]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'input/next'
authorThierry Reding <treding@nvidia.com>
Thu, 24 Oct 2013 12:36:17 +0000 (14:36 +0200)
committerThierry Reding <treding@nvidia.com>
Thu, 24 Oct 2013 12:36:17 +0000 (14:36 +0200)
3456 files changed:
Documentation/ABI/stable/sysfs-bus-usb
Documentation/ABI/testing/sysfs-class-mtd
Documentation/ABI/testing/sysfs-class-net-batman-adv
Documentation/ABI/testing/sysfs-class-net-mesh
Documentation/ABI/testing/sysfs-class-powercap [new file with mode: 0644]
Documentation/ABI/testing/sysfs-devices-power
Documentation/ABI/testing/sysfs-power
Documentation/DMA-API-HOWTO.txt
Documentation/DMA-API.txt
Documentation/PCI/pci.txt
Documentation/acpi/dsdt-override.txt
Documentation/connector/ucon.c
Documentation/devices.txt
Documentation/devicetree/bindings/arm/cci.txt
Documentation/devicetree/bindings/dma/atmel-dma.txt
Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
Documentation/devicetree/bindings/i2c/i2c-rcar.txt [new file with mode: 0644]
Documentation/devicetree/bindings/memory.txt [deleted file]
Documentation/devicetree/bindings/misc/nvidia,tegra114-mipi.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/cpsw-phy-sel.txt [new file with mode: 0644]
Documentation/devicetree/bindings/panel/auo,b101aw03.txt [new file with mode: 0644]
Documentation/devicetree/bindings/panel/chunghwa,claa101wb03.txt [new file with mode: 0644]
Documentation/devicetree/bindings/panel/panasonic,vvx10f004b00.txt [new file with mode: 0644]
Documentation/devicetree/bindings/panel/simple-panel.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pci/designware-pcie.txt
Documentation/devicetree/bindings/sound/cs42l73.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/davinci-evm-audio.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
Documentation/devicetree/bindings/sound/tlv320aic3x.txt
Documentation/devicetree/bindings/tty/serial/renesas,sci-serial.txt [new file with mode: 0644]
Documentation/filesystems/Locking
Documentation/filesystems/caching/netfs-api.txt
Documentation/filesystems/vfs.txt
Documentation/hwmon/lm25066
Documentation/hwmon/ltc2978
Documentation/ioctl/ioctl-number.txt
Documentation/laptops/thinkpad-acpi.txt
Documentation/networking/batman-adv.txt
Documentation/networking/bonding.txt
Documentation/power/opp.txt
Documentation/power/powercap/powercap.txt [new file with mode: 0644]
Documentation/power/runtime_pm.txt
Documentation/ptp/testptp.c
Documentation/s390/s390dbf.txt
Documentation/scheduler/sched-arch.txt
Documentation/sound/alsa/HD-Audio-Models.txt
Documentation/sound/alsa/soc/DPCM.txt [new file with mode: 0644]
Documentation/sound/alsa/soc/codec.txt
Documentation/sound/alsa/soc/dapm.txt
Documentation/sound/alsa/soc/machine.txt
Documentation/sound/alsa/soc/platform.txt
MAINTAINERS
Makefile
arch/alpha/Kconfig
arch/alpha/include/uapi/asm/socket.h
arch/arc/kernel/kprobes.c
arch/arc/kernel/ptrace.c
arch/arc/kernel/time.c
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/Makefile
arch/arm/boot/Makefile
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/exynos5250.dtsi
arch/arm/boot/dts/omap3-beagle-xm.dts
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/install.sh
arch/arm/common/Makefile
arch/arm/common/bL_switcher.c [new file with mode: 0644]
arch/arm/common/bL_switcher_dummy_if.c [new file with mode: 0644]
arch/arm/common/mcpm_entry.c
arch/arm/common/mcpm_head.S
arch/arm/common/sharpsl_param.c
arch/arm/common/timer-sp.c
arch/arm/configs/h3600_defconfig
arch/arm/crypto/.gitignore [new file with mode: 0644]
arch/arm/crypto/Makefile
arch/arm/crypto/aes_glue.c
arch/arm/crypto/aes_glue.h [new file with mode: 0644]
arch/arm/crypto/aesbs-core.S_shipped [new file with mode: 0644]
arch/arm/crypto/aesbs-glue.c [new file with mode: 0644]
arch/arm/crypto/bsaes-armv7.pl [new file with mode: 0644]
arch/arm/include/asm/Kbuild
arch/arm/include/asm/atomic.h
arch/arm/include/asm/bL_switcher.h [new file with mode: 0644]
arch/arm/include/asm/cmpxchg.h
arch/arm/include/asm/cputype.h
arch/arm/include/asm/dma-mapping.h
arch/arm/include/asm/hardirq.h
arch/arm/include/asm/jump_label.h
arch/arm/include/asm/mach/arch.h
arch/arm/include/asm/mcpm.h
arch/arm/include/asm/memory.h
arch/arm/include/asm/pgtable-2level.h
arch/arm/include/asm/pgtable-3level.h
arch/arm/include/asm/processor.h
arch/arm/include/asm/smp.h
arch/arm/include/asm/spinlock.h
arch/arm/include/asm/spinlock_types.h
arch/arm/include/asm/syscall.h
arch/arm/include/asm/tlbflush.h
arch/arm/include/asm/unified.h
arch/arm/include/debug/efm32.S [new file with mode: 0644]
arch/arm/include/uapi/asm/Kbuild
arch/arm/include/uapi/asm/perf_regs.h [new file with mode: 0644]
arch/arm/kernel/Makefile
arch/arm/kernel/armksyms.c
arch/arm/kernel/head.S
arch/arm/kernel/perf_event.c
arch/arm/kernel/perf_regs.c [new file with mode: 0644]
arch/arm/kernel/setup.c
arch/arm/kernel/sleep.S
arch/arm/kernel/smp.c
arch/arm/kernel/smp_tlb.c
arch/arm/kernel/suspend.c
arch/arm/lib/bitops.h
arch/arm/lib/uaccess_with_memcpy.c
arch/arm/mach-at91/Makefile
arch/arm/mach-at91/at91rm9200.c
arch/arm/mach-at91/at91sam9260.c
arch/arm/mach-at91/at91sam9261.c
arch/arm/mach-at91/at91sam9263.c
arch/arm/mach-at91/at91sam9g45.c
arch/arm/mach-at91/at91sam9rl.c
arch/arm/mach-at91/board-sam9260ek.c
arch/arm/mach-at91/board-sam9263ek.c
arch/arm/mach-at91/pm.c
arch/arm/mach-at91/pm.h
arch/arm/mach-at91/setup.c
arch/arm/mach-davinci/Kconfig
arch/arm/mach-davinci/board-da830-evm.c
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-davinci/board-dm365-evm.c
arch/arm/mach-davinci/board-dm644x-evm.c
arch/arm/mach-davinci/board-dm646x-evm.c
arch/arm/mach-davinci/board-mityomapl138.c
arch/arm/mach-davinci/board-sffsdr.c
arch/arm/mach-exynos/common.c
arch/arm/mach-exynos/common.h
arch/arm/mach-exynos/cpuidle.c
arch/arm/mach-exynos/mach-exynos4-dt.c
arch/arm/mach-exynos/mach-exynos5-dt.c
arch/arm/mach-highbank/Kconfig
arch/arm/mach-imx/mach-imx6q.c
arch/arm/mach-imx/mach-pca100.c
arch/arm/mach-imx/mach-pcm037.c
arch/arm/mach-imx/mach-pcm038.c
arch/arm/mach-imx/mach-pcm043.c
arch/arm/mach-imx/mach-vpr200.c
arch/arm/mach-kirkwood/lacie_v2-common.c
arch/arm/mach-omap1/board-osk.c
arch/arm/mach-omap2/board-cm-t35.c
arch/arm/mach-omap2/board-generic.c
arch/arm/mach-omap2/board-h4.c
arch/arm/mach-omap2/board-omap3beagle.c
arch/arm/mach-omap2/board-omap3stalker.c
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-omap2/gpmc-onenand.c
arch/arm/mach-omap2/mux.h
arch/arm/mach-omap2/omap-pm.h
arch/arm/mach-omap2/opp.c
arch/arm/mach-omap2/pm.c
arch/arm/mach-omap2/timer.c
arch/arm/mach-pxa/Kconfig
arch/arm/mach-pxa/stargate2.c
arch/arm/mach-s3c24xx/mach-mini2440.c
arch/arm/mach-sa1100/assabet.c
arch/arm/mach-sa1100/generic.c
arch/arm/mach-sa1100/generic.h
arch/arm/mach-sa1100/include/mach/gpio.h [deleted file]
arch/arm/mach-sa1100/include/mach/h3xxx.h
arch/arm/mach-tegra/Kconfig
arch/arm/mach-tegra/fuse.c
arch/arm/mach-ux500/Kconfig
arch/arm/mach-zynq/common.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/idmap.c
arch/arm/mm/init.c
arch/arm/mm/mm.h
arch/arm/mm/mmap.c
arch/arm/mm/mmu.c
arch/arm/net/bpf_jit_32.c
arch/avr32/include/uapi/asm/socket.h
arch/blackfin/Kconfig
arch/c6x/Kconfig
arch/cris/Kconfig
arch/cris/include/uapi/asm/socket.h
arch/frv/include/uapi/asm/socket.h
arch/h8300/Kconfig [deleted file]
arch/h8300/Kconfig.cpu [deleted file]
arch/h8300/Kconfig.debug [deleted file]
arch/h8300/Kconfig.ide [deleted file]
arch/h8300/Makefile [deleted file]
arch/h8300/README [deleted file]
arch/h8300/boot/Makefile [deleted file]
arch/h8300/boot/compressed/Makefile [deleted file]
arch/h8300/boot/compressed/head.S [deleted file]
arch/h8300/boot/compressed/misc.c [deleted file]
arch/h8300/boot/compressed/vmlinux.lds [deleted file]
arch/h8300/boot/compressed/vmlinux.scr [deleted file]
arch/h8300/defconfig [deleted file]
arch/h8300/include/asm/Kbuild [deleted file]
arch/h8300/include/asm/asm-offsets.h [deleted file]
arch/h8300/include/asm/atomic.h [deleted file]
arch/h8300/include/asm/barrier.h [deleted file]
arch/h8300/include/asm/bitops.h [deleted file]
arch/h8300/include/asm/bootinfo.h [deleted file]
arch/h8300/include/asm/bug.h [deleted file]
arch/h8300/include/asm/bugs.h [deleted file]
arch/h8300/include/asm/cache.h [deleted file]
arch/h8300/include/asm/cachectl.h [deleted file]
arch/h8300/include/asm/cacheflush.h [deleted file]
arch/h8300/include/asm/checksum.h [deleted file]
arch/h8300/include/asm/cmpxchg.h [deleted file]
arch/h8300/include/asm/cputime.h [deleted file]
arch/h8300/include/asm/current.h [deleted file]
arch/h8300/include/asm/dbg.h [deleted file]
arch/h8300/include/asm/delay.h [deleted file]
arch/h8300/include/asm/device.h [deleted file]
arch/h8300/include/asm/div64.h [deleted file]
arch/h8300/include/asm/dma.h [deleted file]
arch/h8300/include/asm/elf.h [deleted file]
arch/h8300/include/asm/emergency-restart.h [deleted file]
arch/h8300/include/asm/fb.h [deleted file]
arch/h8300/include/asm/flat.h [deleted file]
arch/h8300/include/asm/fpu.h [deleted file]
arch/h8300/include/asm/ftrace.h [deleted file]
arch/h8300/include/asm/futex.h [deleted file]
arch/h8300/include/asm/gpio-internal.h [deleted file]
arch/h8300/include/asm/hardirq.h [deleted file]
arch/h8300/include/asm/hw_irq.h [deleted file]
arch/h8300/include/asm/io.h [deleted file]
arch/h8300/include/asm/irq.h [deleted file]
arch/h8300/include/asm/irq_regs.h [deleted file]
arch/h8300/include/asm/irqflags.h [deleted file]
arch/h8300/include/asm/kdebug.h [deleted file]
arch/h8300/include/asm/kmap_types.h [deleted file]
arch/h8300/include/asm/local.h [deleted file]
arch/h8300/include/asm/local64.h [deleted file]
arch/h8300/include/asm/mc146818rtc.h [deleted file]
arch/h8300/include/asm/mmu_context.h [deleted file]
arch/h8300/include/asm/mutex.h [deleted file]
arch/h8300/include/asm/page.h [deleted file]
arch/h8300/include/asm/page_offset.h [deleted file]
arch/h8300/include/asm/param.h [deleted file]
arch/h8300/include/asm/pci.h [deleted file]
arch/h8300/include/asm/percpu.h [deleted file]
arch/h8300/include/asm/pgalloc.h [deleted file]
arch/h8300/include/asm/pgtable.h [deleted file]
arch/h8300/include/asm/processor.h [deleted file]
arch/h8300/include/asm/ptrace.h [deleted file]
arch/h8300/include/asm/regs267x.h [deleted file]
arch/h8300/include/asm/regs306x.h [deleted file]
arch/h8300/include/asm/scatterlist.h [deleted file]
arch/h8300/include/asm/sections.h [deleted file]
arch/h8300/include/asm/segment.h [deleted file]
arch/h8300/include/asm/sh_bios.h [deleted file]
arch/h8300/include/asm/shm.h [deleted file]
arch/h8300/include/asm/shmparam.h [deleted file]
arch/h8300/include/asm/signal.h [deleted file]
arch/h8300/include/asm/smp.h [deleted file]
arch/h8300/include/asm/spinlock.h [deleted file]
arch/h8300/include/asm/string.h [deleted file]
arch/h8300/include/asm/switch_to.h [deleted file]
arch/h8300/include/asm/target_time.h [deleted file]
arch/h8300/include/asm/termios.h [deleted file]
arch/h8300/include/asm/thread_info.h [deleted file]
arch/h8300/include/asm/timer.h [deleted file]
arch/h8300/include/asm/timex.h [deleted file]
arch/h8300/include/asm/tlb.h [deleted file]
arch/h8300/include/asm/tlbflush.h [deleted file]
arch/h8300/include/asm/topology.h [deleted file]
arch/h8300/include/asm/traps.h [deleted file]
arch/h8300/include/asm/types.h [deleted file]
arch/h8300/include/asm/uaccess.h [deleted file]
arch/h8300/include/asm/ucontext.h [deleted file]
arch/h8300/include/asm/unaligned.h [deleted file]
arch/h8300/include/asm/unistd.h [deleted file]
arch/h8300/include/asm/user.h [deleted file]
arch/h8300/include/asm/virtconvert.h [deleted file]
arch/h8300/include/uapi/asm/Kbuild [deleted file]
arch/h8300/include/uapi/asm/auxvec.h [deleted file]
arch/h8300/include/uapi/asm/bitsperlong.h [deleted file]
arch/h8300/include/uapi/asm/byteorder.h [deleted file]
arch/h8300/include/uapi/asm/errno.h [deleted file]
arch/h8300/include/uapi/asm/fcntl.h [deleted file]
arch/h8300/include/uapi/asm/ioctl.h [deleted file]
arch/h8300/include/uapi/asm/ioctls.h [deleted file]
arch/h8300/include/uapi/asm/ipcbuf.h [deleted file]
arch/h8300/include/uapi/asm/kvm_para.h [deleted file]
arch/h8300/include/uapi/asm/mman.h [deleted file]
arch/h8300/include/uapi/asm/msgbuf.h [deleted file]
arch/h8300/include/uapi/asm/param.h [deleted file]
arch/h8300/include/uapi/asm/poll.h [deleted file]
arch/h8300/include/uapi/asm/posix_types.h [deleted file]
arch/h8300/include/uapi/asm/ptrace.h [deleted file]
arch/h8300/include/uapi/asm/resource.h [deleted file]
arch/h8300/include/uapi/asm/sembuf.h [deleted file]
arch/h8300/include/uapi/asm/setup.h [deleted file]
arch/h8300/include/uapi/asm/shmbuf.h [deleted file]
arch/h8300/include/uapi/asm/sigcontext.h [deleted file]
arch/h8300/include/uapi/asm/siginfo.h [deleted file]
arch/h8300/include/uapi/asm/signal.h [deleted file]
arch/h8300/include/uapi/asm/socket.h [deleted file]
arch/h8300/include/uapi/asm/sockios.h [deleted file]
arch/h8300/include/uapi/asm/stat.h [deleted file]
arch/h8300/include/uapi/asm/statfs.h [deleted file]
arch/h8300/include/uapi/asm/swab.h [deleted file]
arch/h8300/include/uapi/asm/termbits.h [deleted file]
arch/h8300/include/uapi/asm/termios.h [deleted file]
arch/h8300/include/uapi/asm/types.h [deleted file]
arch/h8300/include/uapi/asm/unistd.h [deleted file]
arch/h8300/kernel/Makefile [deleted file]
arch/h8300/kernel/asm-offsets.c [deleted file]
arch/h8300/kernel/entry.S [deleted file]
arch/h8300/kernel/gpio.c [deleted file]
arch/h8300/kernel/h8300_ksyms.c [deleted file]
arch/h8300/kernel/irq.c [deleted file]
arch/h8300/kernel/module.c [deleted file]
arch/h8300/kernel/process.c [deleted file]
arch/h8300/kernel/ptrace.c [deleted file]
arch/h8300/kernel/setup.c [deleted file]
arch/h8300/kernel/signal.c [deleted file]
arch/h8300/kernel/sys_h8300.c [deleted file]
arch/h8300/kernel/syscalls.S [deleted file]
arch/h8300/kernel/time.c [deleted file]
arch/h8300/kernel/timer/Makefile [deleted file]
arch/h8300/kernel/timer/itu.c [deleted file]
arch/h8300/kernel/timer/timer16.c [deleted file]
arch/h8300/kernel/timer/timer8.c [deleted file]
arch/h8300/kernel/timer/tpu.c [deleted file]
arch/h8300/kernel/traps.c [deleted file]
arch/h8300/kernel/vmlinux.lds.S [deleted file]
arch/h8300/lib/Makefile [deleted file]
arch/h8300/lib/abs.S [deleted file]
arch/h8300/lib/ashrdi3.c [deleted file]
arch/h8300/lib/checksum.c [deleted file]
arch/h8300/lib/memcpy.S [deleted file]
arch/h8300/lib/memset.S [deleted file]
arch/h8300/lib/romfs.S [deleted file]
arch/h8300/mm/Makefile [deleted file]
arch/h8300/mm/fault.c [deleted file]
arch/h8300/mm/init.c [deleted file]
arch/h8300/mm/kmap.c [deleted file]
arch/h8300/mm/memory.c [deleted file]
arch/h8300/platform/h8300h/Makefile [deleted file]
arch/h8300/platform/h8300h/aki3068net/Makefile [deleted file]
arch/h8300/platform/h8300h/aki3068net/crt0_ram.S [deleted file]
arch/h8300/platform/h8300h/generic/Makefile [deleted file]
arch/h8300/platform/h8300h/generic/crt0_ram.S [deleted file]
arch/h8300/platform/h8300h/generic/crt0_rom.S [deleted file]
arch/h8300/platform/h8300h/h8max/Makefile [deleted file]
arch/h8300/platform/h8300h/h8max/crt0_ram.S [deleted file]
arch/h8300/platform/h8300h/irq.c [deleted file]
arch/h8300/platform/h8300h/ptrace_h8300h.c [deleted file]
arch/h8300/platform/h8s/Makefile [deleted file]
arch/h8300/platform/h8s/edosk2674/Makefile [deleted file]
arch/h8300/platform/h8s/edosk2674/crt0_ram.S [deleted file]
arch/h8300/platform/h8s/edosk2674/crt0_rom.S [deleted file]
arch/h8300/platform/h8s/generic/Makefile [deleted file]
arch/h8300/platform/h8s/generic/crt0_ram.S [deleted file]
arch/h8300/platform/h8s/generic/crt0_rom.S [deleted file]
arch/h8300/platform/h8s/irq.c [deleted file]
arch/h8300/platform/h8s/ptrace_h8s.c [deleted file]
arch/ia64/Kconfig
arch/ia64/include/uapi/asm/socket.h
arch/ia64/kernel/acpi.c
arch/m32r/include/uapi/asm/socket.h
arch/m68k/Kconfig
arch/m68k/include/asm/floppy.h
arch/m68k/include/asm/sun3xflop.h
arch/m68k/include/asm/uaccess.h
arch/m68k/platform/68000/timers.c
arch/m68k/platform/68360/config.c
arch/m68k/platform/coldfire/pit.c
arch/m68k/platform/coldfire/sltimers.c
arch/m68k/platform/coldfire/timers.c
arch/microblaze/Kconfig
arch/mips/Kbuild.platforms
arch/mips/Kconfig
arch/mips/Kconfig.debug
arch/mips/Makefile
arch/mips/alchemy/devboards/db1235.c
arch/mips/ath79/dev-common.c
arch/mips/bcm47xx/Makefile
arch/mips/bcm47xx/board.c [new file with mode: 0644]
arch/mips/bcm47xx/nvram.c
arch/mips/bcm47xx/prom.c
arch/mips/bcm47xx/setup.c
arch/mips/bcm47xx/time.c
arch/mips/boot/compressed/Makefile
arch/mips/boot/compressed/decompress.c
arch/mips/boot/compressed/ld.script
arch/mips/cavium-octeon/setup.c
arch/mips/cobalt/Makefile
arch/mips/cobalt/console.c [deleted file]
arch/mips/cobalt/setup.c
arch/mips/configs/powertv_defconfig [deleted file]
arch/mips/dec/int-handler.S
arch/mips/dec/ioasic-irq.c
arch/mips/dec/prom/call_o32.S
arch/mips/dec/prom/init.c
arch/mips/dec/prom/memory.c
arch/mips/dec/setup.c
arch/mips/include/asm/cacheops.h
arch/mips/include/asm/dec/ioasic.h
arch/mips/include/asm/dec/ioasic_addrs.h
arch/mips/include/asm/dec/kn01.h
arch/mips/include/asm/dec/kn02ca.h
arch/mips/include/asm/dec/prom.h
arch/mips/include/asm/elf.h
arch/mips/include/asm/jump_label.h
arch/mips/include/asm/mach-ath79/ar933x_uart_platform.h [deleted file]
arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h [new file with mode: 0644]
arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h
arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
arch/mips/include/asm/mach-dec/cpu-feature-overrides.h [new file with mode: 0644]
arch/mips/include/asm/mach-generic/dma-coherence.h
arch/mips/include/asm/mach-ip27/dma-coherence.h
arch/mips/include/asm/mach-ip32/dma-coherence.h
arch/mips/include/asm/mach-jazz/dma-coherence.h
arch/mips/include/asm/mach-loongson/dma-coherence.h
arch/mips/include/asm/mach-powertv/asic.h [deleted file]
arch/mips/include/asm/mach-powertv/asic_reg_map.h [deleted file]
arch/mips/include/asm/mach-powertv/asic_regs.h [deleted file]
arch/mips/include/asm/mach-powertv/cpu-feature-overrides.h [deleted file]
arch/mips/include/asm/mach-powertv/dma-coherence.h [deleted file]
arch/mips/include/asm/mach-powertv/interrupts.h [deleted file]
arch/mips/include/asm/mach-powertv/ioremap.h [deleted file]
arch/mips/include/asm/mach-powertv/irq.h [deleted file]
arch/mips/include/asm/mach-powertv/powertv-clock.h [deleted file]
arch/mips/include/asm/mach-powertv/war.h [deleted file]
arch/mips/include/asm/mips-boards/piix4.h
arch/mips/include/asm/mmu_context.h
arch/mips/include/asm/ptrace.h
arch/mips/include/asm/r4kcache.h
arch/mips/include/asm/setup.h
arch/mips/include/asm/stackframe.h
arch/mips/include/asm/syscall.h [new file with mode: 0644]
arch/mips/include/asm/thread_info.h
arch/mips/include/asm/time.h
arch/mips/include/asm/unistd.h
arch/mips/include/uapi/asm/siginfo.h
arch/mips/include/uapi/asm/socket.h
arch/mips/kernel/Makefile
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/csrc-powertv.c [deleted file]
arch/mips/kernel/early_printk_8250.c [new file with mode: 0644]
arch/mips/kernel/ftrace.c
arch/mips/kernel/genex.S
arch/mips/kernel/irq_cpu.c
arch/mips/kernel/module.c
arch/mips/kernel/octeon_switch.S
arch/mips/kernel/ptrace.c
arch/mips/kernel/r2300_switch.S
arch/mips/kernel/r4k_switch.S
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/setup.c
arch/mips/kernel/smp-bmips.c
arch/mips/kernel/smp.c
arch/mips/kernel/traps.c
arch/mips/lantiq/irq.c
arch/mips/lantiq/xway/sysctrl.c
arch/mips/mm/c-r4k.c
arch/mips/mm/dma-default.c
arch/mips/mm/tlb-funcs.S
arch/mips/mm/tlb-r4k.c
arch/mips/mm/tlbex.c
arch/mips/mti-malta/malta-int.c
arch/mips/netlogic/common/smp.c
arch/mips/pci/fixup-malta.c
arch/mips/pci/pci-ar71xx.c
arch/mips/pci/pci-ar724x.c
arch/mips/pci/pci.c
arch/mips/powertv/Kconfig [deleted file]
arch/mips/powertv/Makefile [deleted file]
arch/mips/powertv/Platform [deleted file]
arch/mips/powertv/asic/Makefile [deleted file]
arch/mips/powertv/asic/asic-calliope.c [deleted file]
arch/mips/powertv/asic/asic-cronus.c [deleted file]
arch/mips/powertv/asic/asic-gaia.c [deleted file]
arch/mips/powertv/asic/asic-zeus.c [deleted file]
arch/mips/powertv/asic/asic_devices.c [deleted file]
arch/mips/powertv/asic/asic_int.c [deleted file]
arch/mips/powertv/asic/irq_asic.c [deleted file]
arch/mips/powertv/asic/prealloc-calliope.c [deleted file]
arch/mips/powertv/asic/prealloc-cronus.c [deleted file]
arch/mips/powertv/asic/prealloc-cronuslite.c [deleted file]
arch/mips/powertv/asic/prealloc-gaia.c [deleted file]
arch/mips/powertv/asic/prealloc-zeus.c [deleted file]
arch/mips/powertv/asic/prealloc.h [deleted file]
arch/mips/powertv/init.c [deleted file]
arch/mips/powertv/init.h [deleted file]
arch/mips/powertv/ioremap.c [deleted file]
arch/mips/powertv/memory.c [deleted file]
arch/mips/powertv/pci/Makefile [deleted file]
arch/mips/powertv/pci/fixup-powertv.c [deleted file]
arch/mips/powertv/pci/powertv-pci.h [deleted file]
arch/mips/powertv/powertv-clock.h [deleted file]
arch/mips/powertv/powertv-usb.c [deleted file]
arch/mips/powertv/powertv_setup.c [deleted file]
arch/mips/powertv/reset.c [deleted file]
arch/mips/powertv/reset.h [deleted file]
arch/mips/powertv/time.c [deleted file]
arch/mips/ralink/clk.c
arch/mips/ralink/mt7620.c
arch/mips/ralink/of.c
arch/mips/ralink/rt305x.c
arch/mn10300/include/uapi/asm/socket.h
arch/parisc/Kconfig
arch/parisc/Makefile
arch/parisc/configs/712_defconfig
arch/parisc/configs/a500_defconfig
arch/parisc/configs/b180_defconfig
arch/parisc/configs/c3000_defconfig
arch/parisc/configs/c8000_defconfig
arch/parisc/configs/default_defconfig
arch/parisc/configs/generic-32bit_defconfig [new file with mode: 0644]
arch/parisc/configs/generic-64bit_defconfig [new file with mode: 0644]
arch/parisc/include/asm/assembly.h
arch/parisc/include/asm/ptrace.h
arch/parisc/include/asm/socket.h [new file with mode: 0644]
arch/parisc/include/asm/thread_info.h
arch/parisc/include/asm/traps.h
arch/parisc/include/asm/uaccess.h
arch/parisc/include/uapi/asm/socket.h
arch/parisc/install.sh
arch/parisc/kernel/Makefile
arch/parisc/kernel/audit.c [new file with mode: 0644]
arch/parisc/kernel/compat_audit.c [new file with mode: 0644]
arch/parisc/kernel/ptrace.c
arch/parisc/kernel/smp.c
arch/parisc/kernel/syscall.S
arch/parisc/kernel/traps.c
arch/parisc/lib/lusercopy.S
arch/parisc/lib/memcpy.c
arch/parisc/mm/fault.c
arch/powerpc/Kconfig
arch/powerpc/Makefile
arch/powerpc/boot/Makefile
arch/powerpc/include/asm/archrandom.h [new file with mode: 0644]
arch/powerpc/include/asm/checksum.h
arch/powerpc/include/asm/fsl_ifc.h
arch/powerpc/include/asm/hvsi.h
arch/powerpc/include/asm/io.h
arch/powerpc/include/asm/jump_label.h
arch/powerpc/include/asm/machdep.h
arch/powerpc/include/asm/mmu-hash64.h
arch/powerpc/include/asm/opal.h
arch/powerpc/include/asm/pgtable-ppc64.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/include/asm/ppc_asm.h
arch/powerpc/include/asm/processor.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/scom.h
arch/powerpc/include/asm/sfp-machine.h
arch/powerpc/include/asm/string.h
arch/powerpc/include/asm/word-at-a-time.h
arch/powerpc/include/uapi/asm/byteorder.h
arch/powerpc/include/uapi/asm/socket.h
arch/powerpc/kernel/align.c
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/eeh.c
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64e.S
arch/powerpc/kernel/fpu.S
arch/powerpc/kernel/ftrace.c
arch/powerpc/kernel/head_64.S
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/legacy_serial.c
arch/powerpc/kernel/misc_32.S
arch/powerpc/kernel/module_64.c
arch/powerpc/kernel/paca.c
arch/powerpc/kernel/ppc_ksyms.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/kernel/ptrace32.c
arch/powerpc/kernel/rtas_pci.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/tm.S
arch/powerpc/kernel/traps.c
arch/powerpc/kernel/vdso32/vdso32.lds.S
arch/powerpc/kernel/vdso64/vdso64.lds.S
arch/powerpc/kernel/vecemu.c
arch/powerpc/kernel/vector.S
arch/powerpc/kernel/vio.c
arch/powerpc/kvm/Kconfig
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/e500_mmu_host.c
arch/powerpc/lib/Makefile
arch/powerpc/lib/copyuser_power7.S
arch/powerpc/lib/memcpy_power7.S
arch/powerpc/mm/hash_native_64.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/init_64.c
arch/powerpc/net/bpf_jit_comp.c
arch/powerpc/platforms/512x/mpc512x_shared.c
arch/powerpc/platforms/52xx/mpc52xx_pic.c
arch/powerpc/platforms/8xx/tqm8xx_setup.c
arch/powerpc/platforms/powernv/Kconfig
arch/powerpc/platforms/powernv/Makefile
arch/powerpc/platforms/powernv/eeh-ioda.c
arch/powerpc/platforms/powernv/eeh-powernv.c
arch/powerpc/platforms/powernv/opal-nvram.c
arch/powerpc/platforms/powernv/opal-rtc.c
arch/powerpc/platforms/powernv/opal-wrappers.S
arch/powerpc/platforms/powernv/opal-xscom.c [new file with mode: 0644]
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/pci-p5ioc2.c
arch/powerpc/platforms/powernv/pci.c
arch/powerpc/platforms/powernv/pci.h
arch/powerpc/platforms/powernv/rng.c [new file with mode: 0644]
arch/powerpc/platforms/pseries/Makefile
arch/powerpc/platforms/pseries/dlpar.c
arch/powerpc/platforms/pseries/rng.c [new file with mode: 0644]
arch/powerpc/platforms/wsp/scom_smp.c
arch/powerpc/platforms/wsp/scom_wsp.c
arch/powerpc/platforms/wsp/wsp.c
arch/powerpc/sysdev/Kconfig
arch/powerpc/sysdev/fsl_pci.c
arch/powerpc/sysdev/mpic.c
arch/powerpc/sysdev/scom.c
arch/powerpc/sysdev/xics/ics-opal.c
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/appldata/appldata_base.c
arch/s390/configs/default_defconfig [new file with mode: 0644]
arch/s390/configs/gcov_defconfig [new file with mode: 0644]
arch/s390/configs/performance_defconfig [new file with mode: 0644]
arch/s390/configs/zfcpdump_defconfig [new file with mode: 0644]
arch/s390/crypto/aes_s390.c
arch/s390/defconfig
arch/s390/include/asm/atomic.h
arch/s390/include/asm/bitops.h
arch/s390/include/asm/compat.h
arch/s390/include/asm/ctl_reg.h
arch/s390/include/asm/debug.h
arch/s390/include/asm/dis.h [new file with mode: 0644]
arch/s390/include/asm/fcx.h
arch/s390/include/asm/ipl.h
arch/s390/include/asm/jump_label.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/page.h
arch/s390/include/asm/pci_debug.h
arch/s390/include/asm/pci_insn.h
arch/s390/include/asm/percpu.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/ptrace.h
arch/s390/include/asm/setup.h
arch/s390/include/asm/smp.h
arch/s390/include/asm/switch_to.h
arch/s390/include/asm/timex.h
arch/s390/include/asm/uaccess.h
arch/s390/include/uapi/asm/ptrace.h
arch/s390/include/uapi/asm/sigcontext.h
arch/s390/include/uapi/asm/socket.h
arch/s390/kernel/Makefile
arch/s390/kernel/bitmap.c [deleted file]
arch/s390/kernel/cache.c
arch/s390/kernel/compat_linux.c
arch/s390/kernel/compat_linux.h
arch/s390/kernel/compat_signal.c
arch/s390/kernel/crash_dump.c
arch/s390/kernel/debug.c
arch/s390/kernel/dis.c
arch/s390/kernel/dumpstack.c
arch/s390/kernel/early.c
arch/s390/kernel/entry.S
arch/s390/kernel/entry64.S
arch/s390/kernel/ftrace.c
arch/s390/kernel/head.S
arch/s390/kernel/ipl.c
arch/s390/kernel/kprobes.c
arch/s390/kernel/process.c
arch/s390/kernel/ptrace.c
arch/s390/kernel/runtime_instr.c
arch/s390/kernel/setup.c
arch/s390/kernel/signal.c
arch/s390/kernel/smp.c
arch/s390/kernel/vdso.c
arch/s390/kernel/vtime.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/trace.h
arch/s390/lib/Makefile
arch/s390/lib/delay.c
arch/s390/lib/find.c [new file with mode: 0644]
arch/s390/lib/uaccess_mvcos.c
arch/s390/lib/uaccess_pt.c
arch/s390/lib/uaccess_std.c [deleted file]
arch/s390/math-emu/math.c
arch/s390/mm/cmm.c
arch/s390/mm/fault.c
arch/s390/mm/gup.c
arch/s390/mm/pageattr.c
arch/s390/mm/pgtable.c
arch/s390/net/bpf_jit_comp.c
arch/s390/pci/pci.c
arch/s390/pci/pci_clp.c
arch/s390/pci/pci_dma.c
arch/s390/pci/pci_event.c
arch/sh/Kconfig
arch/sh/include/asm/hw_breakpoint.h
arch/sh/include/cpu-common/cpu/ubc.h [new file with mode: 0644]
arch/sh/include/cpu-sh2a/cpu/ubc.h [new file with mode: 0644]
arch/sh/kernel/cpu/sh2a/Makefile
arch/sh/kernel/cpu/sh2a/ubc.c [new file with mode: 0644]
arch/sh/kernel/hw_breakpoint.c
arch/sparc/Kconfig
arch/sparc/include/asm/jump_label.h
arch/sparc/include/uapi/asm/socket.h
arch/sparc/net/bpf_jit_comp.c
arch/tile/include/asm/atomic.h
arch/tile/include/asm/atomic_32.h
arch/tile/include/asm/cmpxchg.h
arch/tile/include/asm/percpu.h
arch/tile/kernel/hardwall.c
arch/tile/kernel/intvec_32.S
arch/tile/kernel/intvec_64.S
arch/tile/kernel/stack.c
arch/tile/lib/atomic_32.c
arch/unicore32/Kconfig
arch/x86/Kconfig
arch/x86/crypto/Makefile
arch/x86/crypto/aesni-intel_glue.c
arch/x86/crypto/camellia_aesni_avx2_glue.c
arch/x86/crypto/camellia_aesni_avx_glue.c
arch/x86/crypto/cast5_avx_glue.c
arch/x86/crypto/cast6_avx_glue.c
arch/x86/crypto/serpent_avx2_glue.c
arch/x86/crypto/serpent_avx_glue.c
arch/x86/crypto/serpent_sse2_glue.c
arch/x86/crypto/sha256_ssse3_glue.c
arch/x86/crypto/twofish_avx_glue.c
arch/x86/include/asm/acpi.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/jump_label.h
arch/x86/include/asm/mpspec.h
arch/x86/include/asm/msr.h
arch/x86/include/asm/mutex_64.h
arch/x86/include/asm/simd.h [new file with mode: 0644]
arch/x86/include/uapi/asm/msr-index.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/jump_label.c
arch/x86/kernel/kvm.c
arch/x86/kernel/reboot.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/topology.c
arch/x86/kvm/vmx.c
arch/x86/lib/msr-smp.c
arch/x86/net/bpf_jit_comp.c
arch/x86/pci/fixup.c
arch/x86/platform/olpc/olpc-xo15-sci.c
arch/x86/xen/smp.c
arch/xtensa/include/uapi/asm/socket.h
block/blk-settings.c
block/partitions/efi.c
crypto/Kconfig
crypto/Makefile
crypto/ablk_helper.c [moved from arch/x86/crypto/ablk_helper.c with 95% similarity]
crypto/ansi_cprng.c
crypto/asymmetric_keys/rsa.c
crypto/async_tx/async_tx.c
crypto/authenc.c
crypto/authencesn.c
crypto/ccm.c
crypto/gcm.c
crypto/memneq.c [new file with mode: 0644]
drivers/Kconfig
drivers/Makefile
drivers/acpi/Kconfig
drivers/acpi/Makefile
drivers/acpi/ac.c
drivers/acpi/acpi_ipmi.c
drivers/acpi/acpi_lpss.c
drivers/acpi/acpi_memhotplug.c
drivers/acpi/acpi_platform.c
drivers/acpi/acpi_processor.c
drivers/acpi/acpica/acdebug.h
drivers/acpi/acpica/acevents.h
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/acnamesp.h
drivers/acpi/acpica/evgpeutil.c
drivers/acpi/acpica/evmisc.c
drivers/acpi/acpica/evregion.c
drivers/acpi/acpica/evsci.c
drivers/acpi/acpica/evxface.c
drivers/acpi/acpica/hwxface.c
drivers/acpi/acpica/nsaccess.c
drivers/acpi/acpica/nsdump.c
drivers/acpi/acpica/nsxfeval.c
drivers/acpi/acpica/tbinstal.c
drivers/acpi/acpica/tbprint.c
drivers/acpi/acpica/tbxfroot.c
drivers/acpi/acpica/utdebug.c
drivers/acpi/acpica/utglobal.c
drivers/acpi/apei/apei-base.c
drivers/acpi/battery.c
drivers/acpi/blacklist.c
drivers/acpi/bus.c
drivers/acpi/button.c
drivers/acpi/cm_sbs.c [deleted file]
drivers/acpi/device_pm.c
drivers/acpi/dock.c
drivers/acpi/ec.c
drivers/acpi/fan.c
drivers/acpi/internal.h
drivers/acpi/numa.c
drivers/acpi/osl.c
drivers/acpi/pci_root.c
drivers/acpi/power.c
drivers/acpi/proc.c
drivers/acpi/processor_core.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_idle.c
drivers/acpi/sbs.c
drivers/acpi/scan.c
drivers/acpi/sysfs.c
drivers/acpi/thermal.c
drivers/acpi/utils.c
drivers/acpi/video.c
drivers/acpi/video_detect.c
drivers/amba/bus.c
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/ahci_imx.c
drivers/ata/ahci_platform.c
drivers/ata/ata_piix.c
drivers/ata/libahci.c
drivers/ata/libata-acpi.c
drivers/ata/libata-eh.c
drivers/ata/libata-scsi.c
drivers/ata/libata.h
drivers/ata/pata_isapnp.c
drivers/ata/pata_ixp4xx_cf.c
drivers/ata/pata_octeon_cf.c
drivers/atm/firestream.h
drivers/base/cpu.c
drivers/base/memory.c
drivers/base/power/main.c
drivers/base/power/opp.c
drivers/base/power/runtime.c
drivers/base/regmap/internal.h
drivers/base/regmap/regcache.c
drivers/base/regmap/regmap.c
drivers/bcma/host_pci.c
drivers/block/loop.c
drivers/block/nvme-core.c
drivers/block/virtio_blk.c
drivers/bluetooth/Makefile
drivers/bluetooth/ath3k.c
drivers/bluetooth/bfusb.c
drivers/bluetooth/bluecard_cs.c
drivers/bluetooth/bpa10x.c
drivers/bluetooth/bt3c_cs.c
drivers/bluetooth/btmrvl_drv.h
drivers/bluetooth/btmrvl_main.c
drivers/bluetooth/btmrvl_sdio.c
drivers/bluetooth/btmrvl_sdio.h
drivers/bluetooth/btsdio.c
drivers/bluetooth/btuart_cs.c
drivers/bluetooth/btusb.c
drivers/bluetooth/btwilink.c
drivers/bluetooth/dtl1_cs.c
drivers/bluetooth/hci_bcsp.c
drivers/bluetooth/hci_h4.c
drivers/bluetooth/hci_h5.c
drivers/bluetooth/hci_ldisc.c
drivers/bluetooth/hci_ll.c
drivers/bluetooth/hci_vhci.c
drivers/bus/arm-cci.c
drivers/char/hw_random/Kconfig
drivers/char/hw_random/Makefile
drivers/char/hw_random/omap3-rom-rng.c [new file with mode: 0644]
drivers/char/hw_random/powernv-rng.c [new file with mode: 0644]
drivers/char/hw_random/pseries-rng.c
drivers/char/hw_random/via-rng.c
drivers/char/hw_random/virtio-rng.c
drivers/char/random.c
drivers/char/raw.c
drivers/char/tpm/xen-tpmfront.c
drivers/char/virtio_console.c
drivers/connector/cn_proc.c
drivers/connector/connector.c
drivers/cpufreq/Kconfig
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Kconfig.powerpc
drivers/cpufreq/Kconfig.x86
drivers/cpufreq/Makefile
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/arm_big_little.c
drivers/cpufreq/arm_big_little_dt.c
drivers/cpufreq/at32ap-cpufreq.c
drivers/cpufreq/blackfin-cpufreq.c
drivers/cpufreq/cpufreq-cpu0.c
drivers/cpufreq/cpufreq-nforce2.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_governor.h
drivers/cpufreq/cris-artpec3-cpufreq.c
drivers/cpufreq/cris-etraxfs-cpufreq.c
drivers/cpufreq/davinci-cpufreq.c
drivers/cpufreq/dbx500-cpufreq.c
drivers/cpufreq/e_powersaver.c
drivers/cpufreq/elanfreq.c
drivers/cpufreq/exynos-cpufreq.c
drivers/cpufreq/exynos4210-cpufreq.c
drivers/cpufreq/exynos4x12-cpufreq.c
drivers/cpufreq/exynos5440-cpufreq.c
drivers/cpufreq/freq_table.c
drivers/cpufreq/gx-suspmod.c
drivers/cpufreq/highbank-cpufreq.c
drivers/cpufreq/ia64-acpi-cpufreq.c
drivers/cpufreq/imx6q-cpufreq.c
drivers/cpufreq/integrator-cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/kirkwood-cpufreq.c
drivers/cpufreq/longhaul.c
drivers/cpufreq/longrun.c
drivers/cpufreq/loongson2_cpufreq.c
drivers/cpufreq/maple-cpufreq.c
drivers/cpufreq/omap-cpufreq.c
drivers/cpufreq/p4-clockmod.c
drivers/cpufreq/pasemi-cpufreq.c
drivers/cpufreq/pcc-cpufreq.c
drivers/cpufreq/pmac32-cpufreq.c
drivers/cpufreq/pmac64-cpufreq.c
drivers/cpufreq/powernow-k6.c
drivers/cpufreq/powernow-k7.c
drivers/cpufreq/powernow-k8.c
drivers/cpufreq/ppc-corenet-cpufreq.c
drivers/cpufreq/ppc_cbe_cpufreq.c
drivers/cpufreq/pxa2xx-cpufreq.c
drivers/cpufreq/pxa3xx-cpufreq.c
drivers/cpufreq/s3c2416-cpufreq.c
drivers/cpufreq/s3c24xx-cpufreq.c
drivers/cpufreq/s3c64xx-cpufreq.c
drivers/cpufreq/s5pv210-cpufreq.c
drivers/cpufreq/sa1100-cpufreq.c
drivers/cpufreq/sa1110-cpufreq.c
drivers/cpufreq/sc520_freq.c
drivers/cpufreq/sh-cpufreq.c
drivers/cpufreq/sparc-us2e-cpufreq.c
drivers/cpufreq/sparc-us3-cpufreq.c
drivers/cpufreq/spear-cpufreq.c
drivers/cpufreq/speedstep-centrino.c
drivers/cpufreq/speedstep-ich.c
drivers/cpufreq/speedstep-smi.c
drivers/cpufreq/tegra-cpufreq.c
drivers/cpufreq/unicore2-cpufreq.c
drivers/cpuidle/Kconfig.arm
drivers/cpuidle/Makefile
drivers/cpuidle/cpuidle-at91.c [moved from arch/arm/mach-at91/cpuidle.c with 79% similarity]
drivers/cpuidle/cpuidle-ux500.c
drivers/cpuidle/cpuidle-zynq.c
drivers/crypto/caam/caamalg.c
drivers/crypto/caam/caamhash.c
drivers/crypto/caam/caamrng.c
drivers/crypto/caam/ctrl.c
drivers/crypto/caam/desc.h
drivers/crypto/caam/intern.h
drivers/crypto/caam/regs.h
drivers/crypto/caam/sg_sw_sec4.h
drivers/crypto/dcp.c
drivers/crypto/ixp4xx_crypto.c
drivers/crypto/mv_cesa.c
drivers/crypto/omap-aes.c
drivers/crypto/picoxcell_crypto.c
drivers/crypto/sahara.c
drivers/crypto/talitos.c
drivers/crypto/tegra-aes.c
drivers/devfreq/devfreq.c
drivers/devfreq/exynos/exynos4_bus.c
drivers/devfreq/exynos/exynos5_bus.c
drivers/dma/amba-pl08x.c
drivers/dma/at_hdmac.c
drivers/dma/coh901318.c
drivers/dma/cppi41.c
drivers/dma/dma-jz4740.c
drivers/dma/dmaengine.c
drivers/dma/dmatest.c
drivers/dma/dw/core.c
drivers/dma/dw/platform.c
drivers/dma/edma.c
drivers/dma/imx-dma.c
drivers/dma/imx-sdma.c
drivers/dma/intel_mid_dma.c
drivers/dma/ioat/dma.c
drivers/dma/ioat/dma_v3.c
drivers/dma/iop-adma.c
drivers/dma/k3dma.c
drivers/dma/mmp_pdma.c
drivers/dma/mmp_tdma.c
drivers/dma/mv_xor.c
drivers/dma/mxs-dma.c
drivers/dma/omap-dma.c
drivers/dma/pl330.c
drivers/dma/ppc4xx/adma.c
drivers/dma/sa11x0-dma.c
drivers/dma/sh/rcar-hpbdma.c
drivers/dma/sh/shdma-base.c
drivers/dma/sh/shdmac.c
drivers/dma/ste_dma40.c
drivers/dma/tegra20-apb-dma.c
drivers/dma/txx9dmac.c
drivers/firmware/dcdbas.c
drivers/firmware/google/gsmi.c
drivers/gpio/gpio-lynxpoint.c
drivers/gpio/gpio-sa1100.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/armada/Kconfig [new file with mode: 0644]
drivers/gpu/drm/armada/Makefile [new file with mode: 0644]
drivers/gpu/drm/armada/armada_510.c [new file with mode: 0644]
drivers/gpu/drm/armada/armada_crtc.c [new file with mode: 0644]
drivers/gpu/drm/armada/armada_crtc.h [new file with mode: 0644]
drivers/gpu/drm/armada/armada_debugfs.c [new file with mode: 0644]
drivers/gpu/drm/armada/armada_drm.h [new file with mode: 0644]
drivers/gpu/drm/armada/armada_drv.c [new file with mode: 0644]
drivers/gpu/drm/armada/armada_fb.c [new file with mode: 0644]
drivers/gpu/drm/armada/armada_fb.h [new file with mode: 0644]
drivers/gpu/drm/armada/armada_fbdev.c [new file with mode: 0644]
drivers/gpu/drm/armada/armada_gem.c [new file with mode: 0644]
drivers/gpu/drm/armada/armada_gem.h [new file with mode: 0644]
drivers/gpu/drm/armada/armada_hw.h [new file with mode: 0644]
drivers/gpu/drm/armada/armada_ioctlP.h [new file with mode: 0644]
drivers/gpu/drm/armada/armada_output.c [new file with mode: 0644]
drivers/gpu/drm/armada/armada_output.h [new file with mode: 0644]
drivers/gpu/drm/armada/armada_overlay.c [new file with mode: 0644]
drivers/gpu/drm/armada/armada_slave.c [new file with mode: 0644]
drivers/gpu/drm/armada/armada_slave.h [new file with mode: 0644]
drivers/gpu/drm/ast/Kconfig
drivers/gpu/drm/ast/ast_drv.c
drivers/gpu/drm/ast/ast_drv.h
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/cirrus/Kconfig
drivers/gpu/drm/cirrus/cirrus_drv.c
drivers/gpu/drm/cirrus/cirrus_drv.h
drivers/gpu/drm/cirrus/cirrus_main.c
drivers/gpu/drm/drm_context.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_edid_load.c
drivers/gpu/drm/drm_encoder_slave.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_global.c
drivers/gpu/drm/drm_info.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/drm_lock.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_panel.c [new file with mode: 0644]
drivers/gpu/drm/drm_pci.c
drivers/gpu/drm/drm_platform.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/drm_stub.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/drm_usb.c
drivers/gpu/drm/exynos/Kconfig
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/gma500/Kconfig
drivers/gpu/drm/gma500/cdv_intel_dp.c
drivers/gpu/drm/gma500/gem.c
drivers/gpu/drm/gma500/gtt.c
drivers/gpu/drm/gma500/psb_drv.c
drivers/gpu/drm/gma500/psb_drv.h
drivers/gpu/drm/gma500/psb_irq.c
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i810/i810_dma.c
drivers/gpu/drm/i915/Kconfig [new file with mode: 0644]
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/dvo.h
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/intel_acpi.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_bios.h
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_dsi.h [new file with mode: 0644]
drivers/gpu/drm/i915/intel_dsi_cmd.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_dsi_cmd.h [new file with mode: 0644]
drivers/gpu/drm/i915/intel_dsi_pll.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_fbdev.c [moved from drivers/gpu/drm/i915/intel_fb.c with 89% similarity]
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sideband.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/mga/mga_dma.c
drivers/gpu/drm/mga/mga_irq.c
drivers/gpu/drm/mgag200/Kconfig
drivers/gpu/drm/mgag200/mgag200_drv.c
drivers/gpu/drm/mgag200/mgag200_drv.h
drivers/gpu/drm/mgag200/mgag200_main.c
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/nouveau/Kconfig
drivers/gpu/drm/nouveau/core/subdev/mc/base.c
drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
drivers/gpu/drm/nouveau/dispnv04/arb.c
drivers/gpu/drm/nouveau/dispnv04/crtc.c
drivers/gpu/drm/nouveau/dispnv04/dfp.c
drivers/gpu/drm/nouveau/dispnv04/disp.h
drivers/gpu/drm/nouveau/dispnv04/hw.c
drivers/gpu/drm/nouveau/nouveau_abi16.c
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/nouveau/nouveau_backlight.c
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_bo.h
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_gem.h
drivers/gpu/drm/nouveau/nouveau_prime.c
drivers/gpu/drm/omapdrm/Kconfig
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/omapdrm/omap_drv.h
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/omapdrm/omap_irq.c
drivers/gpu/drm/panel/Kconfig [new file with mode: 0644]
drivers/gpu/drm/panel/Makefile [new file with mode: 0644]
drivers/gpu/drm/panel/panel-simple.c [new file with mode: 0644]
drivers/gpu/drm/qxl/Kconfig
drivers/gpu/drm/qxl/qxl_drv.c
drivers/gpu/drm/qxl/qxl_drv.h
drivers/gpu/drm/qxl/qxl_gem.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/btc_dpm.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/dce6_afmt.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_hdmi.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600_hdmi.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_test.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/radeon/sid.h
drivers/gpu/drm/radeon/trinity_dpm.c
drivers/gpu/drm/radeon/uvd_v1_0.c
drivers/gpu/drm/rcar-du/Kconfig
drivers/gpu/drm/shmobile/Kconfig
drivers/gpu/drm/tegra/Kconfig [moved from drivers/gpu/host1x/drm/Kconfig with 84% similarity]
drivers/gpu/drm/tegra/Makefile [new file with mode: 0644]
drivers/gpu/drm/tegra/bus.c [new file with mode: 0644]
drivers/gpu/drm/tegra/dc.c [moved from drivers/gpu/host1x/drm/dc.c with 93% similarity]
drivers/gpu/drm/tegra/dc.h [moved from drivers/gpu/host1x/drm/dc.h with 98% similarity]
drivers/gpu/drm/tegra/drm.c [moved from drivers/gpu/host1x/drm/drm.c with 51% similarity]
drivers/gpu/drm/tegra/drm.h [moved from drivers/gpu/host1x/drm/drm.h with 71% similarity]
drivers/gpu/drm/tegra/dsi.c [new file with mode: 0644]
drivers/gpu/drm/tegra/fb.c [moved from drivers/gpu/host1x/drm/fb.c with 92% similarity]
drivers/gpu/drm/tegra/gem.c [moved from drivers/gpu/host1x/drm/gem.c with 86% similarity]
drivers/gpu/drm/tegra/gem.h [moved from drivers/gpu/host1x/drm/gem.h with 84% similarity]
drivers/gpu/drm/tegra/gr2d.c [new file with mode: 0644]
drivers/gpu/drm/tegra/gr2d.h [new file with mode: 0644]
drivers/gpu/drm/tegra/gr3d.c [new file with mode: 0644]
drivers/gpu/drm/tegra/gr3d.h [new file with mode: 0644]
drivers/gpu/drm/tegra/hdmi.c [moved from drivers/gpu/host1x/drm/hdmi.c with 83% similarity]
drivers/gpu/drm/tegra/hdmi.h [moved from drivers/gpu/host1x/drm/hdmi.h with 72% similarity]
drivers/gpu/drm/tegra/output.c [moved from drivers/gpu/host1x/drm/output.c with 81% similarity]
drivers/gpu/drm/tegra/rgb.c [moved from drivers/gpu/host1x/drm/rgb.c with 96% similarity]
drivers/gpu/drm/tilcdc/Kconfig
drivers/gpu/drm/udl/Kconfig
drivers/gpu/drm/udl/udl_drv.c
drivers/gpu/drm/udl/udl_drv.h
drivers/gpu/drm/udl/udl_gem.c
drivers/gpu/drm/via/via_mm.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/host1x/Kconfig
drivers/gpu/host1x/Makefile
drivers/gpu/host1x/bus.c [new file with mode: 0644]
drivers/gpu/host1x/bus.h [moved from drivers/gpu/host1x/host1x_client.h with 60% similarity]
drivers/gpu/host1x/cdma.c
drivers/gpu/host1x/channel.h
drivers/gpu/host1x/dev.c
drivers/gpu/host1x/dev.h
drivers/gpu/host1x/drm/gr2d.c [deleted file]
drivers/gpu/host1x/host1x.h [deleted file]
drivers/gpu/host1x/host1x_bo.h [deleted file]
drivers/gpu/host1x/hw/Makefile [deleted file]
drivers/gpu/host1x/hw/cdma_hw.c
drivers/gpu/host1x/hw/channel_hw.c
drivers/gpu/host1x/hw/debug_hw.c
drivers/gpu/host1x/hw/host1x01.c
drivers/gpu/host1x/hw/host1x02.c [new file with mode: 0644]
drivers/gpu/host1x/hw/host1x02.h [new file with mode: 0644]
drivers/gpu/host1x/hw/hw_host1x02_channel.h [new file with mode: 0644]
drivers/gpu/host1x/hw/hw_host1x02_sync.h [new file with mode: 0644]
drivers/gpu/host1x/hw/hw_host1x02_uclass.h [new file with mode: 0644]
drivers/gpu/host1x/hw/intr_hw.c
drivers/gpu/host1x/hw/syncpt_hw.c
drivers/gpu/host1x/job.c
drivers/gpu/host1x/job.h
drivers/gpu/host1x/mipi.c [new file with mode: 0644]
drivers/gpu/host1x/syncpt.c
drivers/gpu/host1x/syncpt.h
drivers/hid/Kconfig
drivers/hid/hid-apple.c
drivers/hid/hid-core.c
drivers/hid/hid-elo.c
drivers/hid/hid-holtek-mouse.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-lenovo-tpkbd.c
drivers/hid/hid-lg.c
drivers/hid/hid-lg2ff.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-roccat-kone.c
drivers/hid/hid-roccat-koneplus.c
drivers/hid/hid-roccat-kovaplus.c
drivers/hid/hid-roccat-pyra.c
drivers/hid/hid-sony.c
drivers/hid/hid-wiimote-core.c
drivers/hid/hid-wiimote-modules.c
drivers/hid/hid-wiimote.h
drivers/hid/hidraw.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/uhid.c
drivers/hid/usbhid/hid-quirks.c
drivers/hwmon/abituguru.c
drivers/hwmon/abituguru3.c
drivers/hwmon/acpi_power_meter.c
drivers/hwmon/adcxx.c
drivers/hwmon/adm1026.c
drivers/hwmon/adt7462.c
drivers/hwmon/applesmc.c
drivers/hwmon/asc7621.c
drivers/hwmon/asus_atk0110.c
drivers/hwmon/atxp1.c
drivers/hwmon/ds1621.c
drivers/hwmon/emc1403.c
drivers/hwmon/f71882fg.c
drivers/hwmon/f75375s.c
drivers/hwmon/gpio-fan.c
drivers/hwmon/hwmon.c
drivers/hwmon/ina209.c
drivers/hwmon/ina2xx.c
drivers/hwmon/jc42.c
drivers/hwmon/lm70.c
drivers/hwmon/lm73.c
drivers/hwmon/lm95234.c
drivers/hwmon/ltc4245.c
drivers/hwmon/ltc4261.c
drivers/hwmon/max16065.c
drivers/hwmon/max6642.c
drivers/hwmon/max6650.c
drivers/hwmon/max6697.c
drivers/hwmon/mc13783-adc.c
drivers/hwmon/nct6775.c
drivers/hwmon/pmbus/lm25066.c
drivers/hwmon/pmbus/ltc2978.c
drivers/hwmon/pmbus/pmbus_core.c
drivers/hwmon/tmp401.c
drivers/hwmon/w83791d.c
drivers/hwmon/w83792d.c
drivers/hwmon/w83793.c
drivers/i2c/busses/i2c-bfin-twi.c
drivers/i2c/busses/i2c-davinci.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-mv64xxx.c
drivers/i2c/busses/i2c-mxs.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-stu300.c
drivers/i2c/busses/i2c-xiic.c
drivers/i2c/i2c-core.c
drivers/i2c/i2c-dev.c
drivers/i2c/i2c-smbus.c
drivers/i2c/muxes/i2c-arb-gpio-challenge.c
drivers/i2c/muxes/i2c-mux-gpio.c
drivers/i2c/muxes/i2c-mux-pinctrl.c
drivers/ide/Kconfig
drivers/ide/Makefile
drivers/ide/ide-h8300.c [deleted file]
drivers/idle/intel_idle.c
drivers/iio/frequency/adf4350.c
drivers/iio/industrialio-buffer.c
drivers/infiniband/Kconfig
drivers/infiniband/core/cma.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/amso1100/c2_ae.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/mlx5/srq.c
drivers/infiniband/hw/mthca/mthca_eq.c
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
drivers/infiniband/hw/ocrdma/ocrdma_main.c
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/input/input.c
drivers/input/keyboard/pxa27x_keypad.c
drivers/input/misc/cm109.c
drivers/input/serio/i8042.c
drivers/input/tablet/wacom_sys.c
drivers/input/tablet/wacom_wac.c
drivers/iommu/Kconfig
drivers/irqchip/irq-gic.c
drivers/isdn/hardware/eicon/divasmain.c
drivers/isdn/hardware/eicon/um_idi.c
drivers/isdn/sc/init.c
drivers/md/bcache/request.c
drivers/md/dm-snap-persistent.c
drivers/md/md.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/dvb-frontends/tda10071.c
drivers/media/i2c/ad9389b.c
drivers/media/i2c/adv7511.c
drivers/media/i2c/adv7842.c
drivers/media/i2c/s5c73m3/s5c73m3-core.c
drivers/media/i2c/ths8200.c
drivers/media/pci/saa7134/saa7134-video.c
drivers/media/platform/exynos4-is/media-dev.c
drivers/media/platform/marvell-ccic/mcam-core.c
drivers/media/platform/omap3isp/isp.c
drivers/media/platform/omap3isp/isp.h
drivers/media/platform/s5p-jpeg/jpeg-core.c
drivers/media/platform/sh_vou.c
drivers/media/platform/soc_camera/mx3_camera.c
drivers/media/tuners/e4000.c
drivers/media/usb/gspca/conex.c
drivers/media/usb/gspca/cpia1.c
drivers/media/usb/gspca/gspca.c
drivers/media/usb/gspca/gspca.h
drivers/media/usb/gspca/jeilinj.c
drivers/media/usb/gspca/jl2005bcd.c
drivers/media/usb/gspca/m5602/m5602_mt9m111.c
drivers/media/usb/gspca/mars.c
drivers/media/usb/gspca/mr97310a.c
drivers/media/usb/gspca/nw80x.c
drivers/media/usb/gspca/ov519.c
drivers/media/usb/gspca/ov534.c
drivers/media/usb/gspca/pac207.c
drivers/media/usb/gspca/pac7311.c
drivers/media/usb/gspca/se401.c
drivers/media/usb/gspca/sn9c20x.c
drivers/media/usb/gspca/sonixb.c
drivers/media/usb/gspca/sonixj.c
drivers/media/usb/gspca/spca1528.c
drivers/media/usb/gspca/spca500.c
drivers/media/usb/gspca/sq905c.c
drivers/media/usb/gspca/sq930x.c
drivers/media/usb/gspca/stk014.c
drivers/media/usb/gspca/stk1135.c
drivers/media/usb/gspca/stv06xx/stv06xx.c
drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
drivers/media/usb/gspca/sunplus.c
drivers/media/usb/gspca/topro.c
drivers/media/usb/gspca/tv8532.c
drivers/media/usb/gspca/vicam.c
drivers/media/usb/gspca/w996Xcf.c
drivers/media/usb/gspca/xirlink_cit.c
drivers/media/usb/gspca/zc3xx.c
drivers/media/usb/stkwebcam/stk-webcam.c
drivers/media/usb/uvc/uvc_driver.c
drivers/media/v4l2-core/tuner-core.c
drivers/media/v4l2-core/v4l2-common.c
drivers/media/v4l2-core/videobuf2-core.c
drivers/media/v4l2-core/videobuf2-dma-contig.c
drivers/media/v4l2-core/videobuf2-dma-sg.c
drivers/mfd/mc13xxx-core.c
drivers/mfd/mc13xxx-spi.c
drivers/misc/eeprom/at24.c
drivers/mmc/card/queue.c
drivers/mmc/host/mmci.c
drivers/mmc/host/mmci.h
drivers/mmc/host/sdhci-acpi.c
drivers/mtd/bcm47xxpart.c
drivers/mtd/devices/block2mtd.c
drivers/mtd/devices/docg3.c
drivers/mtd/devices/m25p80.c
drivers/mtd/devices/mtd_dataflash.c
drivers/mtd/devices/phram.c
drivers/mtd/devices/sst25l.c
drivers/mtd/inftlcore.c
drivers/mtd/lpddr/lpddr_cmds.c
drivers/mtd/maps/intel_vr_nor.c
drivers/mtd/maps/pci.c
drivers/mtd/maps/plat-ram.c
drivers/mtd/maps/scb2_flash.c
drivers/mtd/mtdblock.c
drivers/mtd/mtdblock_ro.c
drivers/mtd/mtdchar.c
drivers/mtd/mtdcore.c
drivers/mtd/mtdsuper.c
drivers/mtd/nand/atmel_nand.c
drivers/mtd/nand/bcm47xxnflash/main.c
drivers/mtd/nand/denali.c
drivers/mtd/nand/denali_pci.c
drivers/mtd/nand/diskonchip.c
drivers/mtd/nand/docg4.c
drivers/mtd/nand/fsl_elbc_nand.c
drivers/mtd/nand/fsl_ifc_nand.c
drivers/mtd/nand/gpmi-nand/gpmi-lib.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/gpmi-nand/gpmi-regs.h
drivers/mtd/nand/lpc32xx_mlc.c
drivers/mtd/nand/lpc32xx_slc.c
drivers/mtd/nand/nand_base.c
drivers/mtd/nand/nand_bbt.c
drivers/mtd/nand/nandsim.c
drivers/mtd/nand/pxa3xx_nand.c
drivers/mtd/nand/socrates_nand.c
drivers/mtd/nftlcore.c
drivers/mtd/onenand/omap2.c
drivers/mtd/onenand/onenand_base.c
drivers/mtd/ssfdc.c
drivers/mtd/tests/nandbiterrs.c
drivers/mtd/tests/oobtest.c
drivers/mtd/tests/pagetest.c
drivers/mtd/tests/subpagetest.c
drivers/mtd/ubi/attach.c
drivers/mtd/ubi/build.c
drivers/mtd/ubi/fastmap.c
drivers/mtd/ubi/wl.c
drivers/net/Space.c
drivers/net/bonding/Makefile
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_alb.h
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_netlink.c [new file with mode: 0644]
drivers/net/bonding/bond_options.c [new file with mode: 0644]
drivers/net/bonding/bond_procfs.c
drivers/net/bonding/bond_sysfs.c
drivers/net/bonding/bonding.h
drivers/net/caif/caif_virtio.c
drivers/net/can/at91_can.c
drivers/net/can/bfin_can.c
drivers/net/can/c_can/c_can_pci.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/cc770/cc770_platform.c
drivers/net/can/dev.c
drivers/net/can/flexcan.c
drivers/net/can/janz-ican3.c
drivers/net/can/mcp251x.c
drivers/net/can/mscan/mscan.h
drivers/net/can/pch_can.c
drivers/net/can/sja1000/ems_pci.c
drivers/net/can/sja1000/kvaser_pci.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/can/sja1000/plx_pci.c
drivers/net/can/sja1000/sja1000_platform.c
drivers/net/can/softing/softing.h
drivers/net/can/softing/softing_main.c
drivers/net/can/ti_hecc.c
drivers/net/ethernet/3com/Kconfig
drivers/net/ethernet/3com/typhoon.c
drivers/net/ethernet/8390/8390.h
drivers/net/ethernet/8390/Kconfig
drivers/net/ethernet/8390/Makefile
drivers/net/ethernet/8390/ax88796.c
drivers/net/ethernet/8390/ne-h8300.c [deleted file]
drivers/net/ethernet/8390/ne2k-pci.c
drivers/net/ethernet/adaptec/starfire.c
drivers/net/ethernet/adi/bfin_mac.h
drivers/net/ethernet/amd/7990.h
drivers/net/ethernet/amd/amd8111e.c
drivers/net/ethernet/amd/atarilance.c
drivers/net/ethernet/amd/au1000_eth.c
drivers/net/ethernet/amd/declance.c
drivers/net/ethernet/amd/lance.c
drivers/net/ethernet/amd/pcnet32.c
drivers/net/ethernet/apple/bmac.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/atheros/atl1c/atl1c.h
drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
drivers/net/ethernet/atheros/atl1e/atl1e.h
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
drivers/net/ethernet/atheros/atlx/atl2.h
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/cnic_if.h
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/brocade/bna/bnad.h
drivers/net/ethernet/calxeda/xgmac.c
drivers/net/ethernet/chelsio/cxgb/common.h
drivers/net/ethernet/chelsio/cxgb/cxgb2.c
drivers/net/ethernet/chelsio/cxgb/pm3393.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb3/regs.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/dec/tulip/de2104x.c
drivers/net/ethernet/dec/tulip/de4x5.c
drivers/net/ethernet/dec/tulip/dmfe.c
drivers/net/ethernet/dec/tulip/tulip_core.c
drivers/net/ethernet/dec/tulip/uli526x.c
drivers/net/ethernet/dec/tulip/winbond-840.c
drivers/net/ethernet/dec/tulip/xircom_cb.c
drivers/net/ethernet/dlink/dl2k.c
drivers/net/ethernet/dlink/sundance.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/fealnx.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar.h
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/fujitsu/Kconfig
drivers/net/ethernet/hp/hp100.c
drivers/net/ethernet/i825xx/82596.c
drivers/net/ethernet/i825xx/lib82596.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/emac/debug.h
drivers/net/ethernet/ibm/emac/rgmii.h
drivers/net/ethernet/ibm/emac/tah.h
drivers/net/ethernet/ibm/emac/zmii.h
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/icplus/ipg.c
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/e1000/e1000.h
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/igb/e1000_82575.h
drivers/net/ethernet/intel/igb/e1000_hw.h
drivers/net/ethernet/intel/igb/e1000_i210.h
drivers/net/ethernet/intel/igb/e1000_mac.h
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igbvf/igbvf.h
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/igbvf/vf.c
drivers/net/ethernet/intel/ixgb/ixgb.h
drivers/net/ethernet/intel/ixgb/ixgb_hw.h
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/intel/ixgbevf/vf.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/korina.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/srq.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/micrel/ks8851_mll.c
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/moxa/moxart_ether.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/natsemi/natsemi.c
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/octeon/octeon_mgmt.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
drivers/net/ethernet/packetengines/hamachi.c
drivers/net/ethernet/packetengines/yellowfin.c
drivers/net/ethernet/pasemi/pasemi_mac.c
drivers/net/ethernet/qlogic/netxen/netxen_nic.h
drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
drivers/net/ethernet/qlogic/qlge/qlge.h
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/rdc/r6040.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/realtek/8139too.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/ef10_regs.h
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/sfc/io.h
drivers/net/ethernet/sfc/mcdi.c
drivers/net/ethernet/sfc/mcdi.h
drivers/net/ethernet/sfc/mcdi_pcol.h
drivers/net/ethernet/sfc/mdio_10g.h
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/nic.c
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/sfc/phy.h
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/sfc/selftest.h
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/sgi/meth.c
drivers/net/ethernet/sis/sis190.c
drivers/net/ethernet/smsc/epic100.c
drivers/net/ethernet/smsc/smc9194.c
drivers/net/ethernet/smsc/smc91x.h
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/smsc/smsc9420.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
drivers/net/ethernet/stmicro/stmmac/mmc.h
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/sun/sunhme.c
drivers/net/ethernet/sun/sunqe.c
drivers/net/ethernet/tehuti/tehuti.c
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/ti/Makefile
drivers/net/ethernet/ti/cpsw-phy-sel.c [new file with mode: 0644]
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw.h
drivers/net/ethernet/ti/cpts.h
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/ethernet/ti/tlan.c
drivers/net/ethernet/tile/tilegx.c
drivers/net/ethernet/toshiba/ps3_gelic_net.h
drivers/net/ethernet/toshiba/ps3_gelic_wireless.h
drivers/net/ethernet/toshiba/spider_net.c
drivers/net/ethernet/toshiba/spider_net.h
drivers/net/ethernet/toshiba/tc35815.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/fddi/skfp/fplustm.c
drivers/net/fddi/skfp/h/smc.h
drivers/net/fddi/skfp/skfddi.c
drivers/net/hamradio/baycom_ser_fdx.c
drivers/net/hamradio/baycom_ser_hdx.c
drivers/net/hamradio/scc.c
drivers/net/hamradio/yam.c
drivers/net/ieee802154/mrf24j40.c
drivers/net/irda/bfin_sir.c
drivers/net/irda/donauboe.c
drivers/net/irda/sh_irda.c
drivers/net/irda/sh_sir.c
drivers/net/irda/sir-dev.h
drivers/net/macvlan.c
drivers/net/phy/at803x.c
drivers/net/phy/marvell.c
drivers/net/phy/micrel.c
drivers/net/plip/plip.c
drivers/net/tun.c
drivers/net/usb/ax88179_178a.c
drivers/net/usb/catc.c
drivers/net/usb/cdc-phonet.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/usbnet.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/vxlan.c
drivers/net/wan/farsync.c
drivers/net/wan/hostess_sv11.c
drivers/net/wan/sealevel.c
drivers/net/wan/wanxl.c
drivers/net/wan/x25_asy.h
drivers/net/wan/z85230.h
drivers/net/wimax/i2400m/i2400m-usb.h
drivers/net/wimax/i2400m/i2400m.h
drivers/net/wireless/adm8211.c
drivers/net/wireless/airo.c
drivers/net/wireless/ath/Kconfig
drivers/net/wireless/ath/Makefile
drivers/net/wireless/ath/ar5523/ar5523.c
drivers/net/wireless/ath/ath10k/bmi.c
drivers/net/wireless/ath/ath10k/ce.c
drivers/net/wireless/ath/ath10k/ce.h
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/debug.c
drivers/net/wireless/ath/ath10k/debug.h
drivers/net/wireless/ath/ath10k/htc.c
drivers/net/wireless/ath/ath10k/htc.h
drivers/net/wireless/ath/ath10k/htt.c
drivers/net/wireless/ath/ath10k/htt.h
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/htt_tx.c
drivers/net/wireless/ath/ath10k/hw.h
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/mac.h
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath10k/pci.h
drivers/net/wireless/ath/ath10k/rx_desc.h
drivers/net/wireless/ath/ath10k/trace.h
drivers/net/wireless/ath/ath10k/txrx.c
drivers/net/wireless/ath/ath10k/txrx.h
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/ath5k/ahb.c
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath6kl/common.h
drivers/net/wireless/ath/ath6kl/debug.h
drivers/net/wireless/ath/ath6kl/htc.h
drivers/net/wireless/ath/ath9k/Kconfig
drivers/net/wireless/ath/ath9k/Makefile
drivers/net/wireless/ath/ath9k/ahb.c
drivers/net/wireless/ath/ath9k/ani.c
drivers/net/wireless/ath/ath9k/antenna.c
drivers/net/wireless/ath/ath9k/ar5008_phy.c
drivers/net/wireless/ath/ath9k/ar9002_calib.c
drivers/net/wireless/ath/ath9k/ar9002_hw.c
drivers/net/wireless/ath/ath9k/ar9002_phy.c
drivers/net/wireless/ath/ath9k/ar9003_calib.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
drivers/net/wireless/ath/ath9k/ar9003_hw.c
drivers/net/wireless/ath/ath9k/ar9003_mci.c
drivers/net/wireless/ath/ath9k/ar9003_phy.c
drivers/net/wireless/ath/ath9k/ar9003_phy.h
drivers/net/wireless/ath/ath9k/ar9003_rtt.c
drivers/net/wireless/ath/ath9k/ar9485_initvals.h
drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/beacon.c
drivers/net/wireless/ath/ath9k/calib.c
drivers/net/wireless/ath/ath9k/calib.h
drivers/net/wireless/ath/ath9k/common.c
drivers/net/wireless/ath/ath9k/common.h
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/debug.h
drivers/net/wireless/ath/ath9k/dfs.h
drivers/net/wireless/ath/ath9k/dfs_debug.c
drivers/net/wireless/ath/ath9k/dfs_debug.h
drivers/net/wireless/ath/ath9k/eeprom_4k.c
drivers/net/wireless/ath/ath9k/eeprom_9287.c
drivers/net/wireless/ath/ath9k/eeprom_def.c
drivers/net/wireless/ath/ath9k/gpio.c
drivers/net/wireless/ath/ath9k/htc_drv_debug.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/hw-ops.h
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/link.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/ath/ath9k/mac.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/mci.c
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/ath/ath9k/rc.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/wmi.h
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/dfs_pattern_detector.c [moved from drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c with 95% similarity]
drivers/net/wireless/ath/dfs_pattern_detector.h [moved from drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h with 87% similarity]
drivers/net/wireless/ath/dfs_pri_detector.c [moved from drivers/net/wireless/ath/ath9k/dfs_pri_detector.c with 98% similarity]
drivers/net/wireless/ath/dfs_pri_detector.h [moved from drivers/net/wireless/ath/ath9k/dfs_pri_detector.h with 97% similarity]
drivers/net/wireless/ath/regd.c
drivers/net/wireless/ath/wcn36xx/Kconfig [new file with mode: 0644]
drivers/net/wireless/ath/wcn36xx/Makefile [new file with mode: 0644]
drivers/net/wireless/ath/wcn36xx/debug.c [new file with mode: 0644]
drivers/net/wireless/ath/wcn36xx/debug.h [new file with mode: 0644]
drivers/net/wireless/ath/wcn36xx/dxe.c [new file with mode: 0644]
drivers/net/wireless/ath/wcn36xx/dxe.h [new file with mode: 0644]
drivers/net/wireless/ath/wcn36xx/hal.h [new file with mode: 0644]
drivers/net/wireless/ath/wcn36xx/main.c [new file with mode: 0644]
drivers/net/wireless/ath/wcn36xx/pmc.c [new file with mode: 0644]
drivers/net/wireless/ath/wcn36xx/pmc.h [new file with mode: 0644]
drivers/net/wireless/ath/wcn36xx/smd.c [new file with mode: 0644]
drivers/net/wireless/ath/wcn36xx/smd.h [new file with mode: 0644]
drivers/net/wireless/ath/wcn36xx/txrx.c [new file with mode: 0644]
drivers/net/wireless/ath/wcn36xx/txrx.h [new file with mode: 0644]
drivers/net/wireless/ath/wcn36xx/wcn36xx.h [new file with mode: 0644]
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/ath/wil6210/pcie_bus.c
drivers/net/wireless/atmel.c
drivers/net/wireless/b43/dma.c
drivers/net/wireless/b43/phy_n.c
drivers/net/wireless/b43/xmit.c
drivers/net/wireless/b43legacy/dma.c
drivers/net/wireless/b43legacy/xmit.c
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
drivers/net/wireless/brcm80211/brcmfmac/dhd.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
drivers/net/wireless/brcm80211/brcmfmac/fweh.h
drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
drivers/net/wireless/brcm80211/brcmfmac/usb.c
drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
drivers/net/wireless/brcm80211/brcmsmac/ampdu.h
drivers/net/wireless/brcm80211/brcmsmac/antsel.h
drivers/net/wireless/brcm80211/brcmsmac/channel.h
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/brcm80211/brcmsmac/main.h
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
drivers/net/wireless/brcm80211/brcmsmac/phy_shim.h
drivers/net/wireless/brcm80211/brcmsmac/pmu.h
drivers/net/wireless/brcm80211/brcmsmac/pub.h
drivers/net/wireless/brcm80211/brcmsmac/rate.h
drivers/net/wireless/brcm80211/brcmsmac/stf.h
drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.h
drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
drivers/net/wireless/brcm80211/include/brcmu_d11.h
drivers/net/wireless/brcm80211/include/brcmu_utils.h
drivers/net/wireless/cw1200/cw1200_spi.c
drivers/net/wireless/hostap/hostap_info.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/ipw2x00/libipw.h
drivers/net/wireless/iwlegacy/3945-mac.c
drivers/net/wireless/iwlegacy/3945.h
drivers/net/wireless/iwlegacy/4965-mac.c
drivers/net/wireless/iwlegacy/4965.h
drivers/net/wireless/iwlegacy/common.h
drivers/net/wireless/iwlwifi/dvm/agn.h
drivers/net/wireless/iwlwifi/dvm/dev.h
drivers/net/wireless/iwlwifi/dvm/rs.h
drivers/net/wireless/iwlwifi/dvm/tx.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-config.h
drivers/net/wireless/iwlwifi/iwl-csr.h
drivers/net/wireless/iwlwifi/iwl-drv.c
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/iwl-prph.h
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/bt-coex.c
drivers/net/wireless/iwlwifi/mvm/constants.h
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/debugfs.c
drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/nvm.c
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/power.c
drivers/net/wireless/iwlwifi/mvm/quota.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/rs.h
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/sta.h
drivers/net/wireless/iwlwifi/mvm/testmode.h [new file with mode: 0644]
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/iwlwifi/mvm/time-event.h
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/mvm/utils.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/libertas/firmware.c
drivers/net/wireless/libertas/if_cs.c
drivers/net/wireless/libertas/if_sdio.c
drivers/net/wireless/libertas/if_spi.c
drivers/net/wireless/libertas/if_usb.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/join.c
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/mwifiex/sta_cmd.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/mwifiex/wmm.h
drivers/net/wireless/mwl8k.c
drivers/net/wireless/orinoco/orinoco.h
drivers/net/wireless/orinoco/orinoco_nortel.c
drivers/net/wireless/orinoco/orinoco_pci.c
drivers/net/wireless/orinoco/orinoco_plx.c
drivers/net/wireless/orinoco/orinoco_tmd.c
drivers/net/wireless/p54/p54pci.c
drivers/net/wireless/p54/p54spi.c
drivers/net/wireless/prism54/isl_ioctl.c
drivers/net/wireless/prism54/islpci_dev.c
drivers/net/wireless/prism54/oid_mgt.c
drivers/net/wireless/rt2x00/Kconfig
drivers/net/wireless/rt2x00/Makefile
drivers/net/wireless/rt2x00/rt2400pci.c
drivers/net/wireless/rt2x00/rt2800.h
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rt2x00/rt2800mmio.c [new file with mode: 0644]
drivers/net/wireless/rt2x00/rt2800mmio.h [new file with mode: 0644]
drivers/net/wireless/rt2x00/rt2800pci.c
drivers/net/wireless/rt2x00/rt2800pci.h
drivers/net/wireless/rt2x00/rt2800soc.c [new file with mode: 0644]
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00crypto.c
drivers/net/wireless/rt2x00/rt2x00debug.c
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rt2x00/rt2x00link.c
drivers/net/wireless/rt2x00/rt2x00mac.c
drivers/net/wireless/rt2x00/rt2x00pci.c
drivers/net/wireless/rt2x00/rt2x00queue.c
drivers/net/wireless/rt2x00/rt2x00usb.c
drivers/net/wireless/rt2x00/rt61pci.c
drivers/net/wireless/rt2x00/rt73usb.c
drivers/net/wireless/rtl818x/rtl8180/dev.c
drivers/net/wireless/rtlwifi/base.c
drivers/net/wireless/rtlwifi/base.h
drivers/net/wireless/rtlwifi/cam.h
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/efuse.c
drivers/net/wireless/rtlwifi/efuse.h
drivers/net/wireless/rtlwifi/pci.c
drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
drivers/net/wireless/rtlwifi/rtl8188ee/phy.c
drivers/net/wireless/rtlwifi/rtl8188ee/phy.h
drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
drivers/net/wireless/rtlwifi/rtl8192c/phy_common.h
drivers/net/wireless/rtlwifi/rtl8192ce/def.h
drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
drivers/net/wireless/rtlwifi/rtl8192cu/rf.h
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
drivers/net/wireless/rtlwifi/rtl8192de/dm.c
drivers/net/wireless/rtlwifi/rtl8192de/hw.c
drivers/net/wireless/rtlwifi/rtl8192de/hw.h
drivers/net/wireless/rtlwifi/rtl8192de/phy.c
drivers/net/wireless/rtlwifi/rtl8192de/phy.h
drivers/net/wireless/rtlwifi/rtl8192de/rf.h
drivers/net/wireless/rtlwifi/rtl8192de/sw.c
drivers/net/wireless/rtlwifi/rtl8192de/trx.c
drivers/net/wireless/rtlwifi/rtl8192se/reg.h
drivers/net/wireless/rtlwifi/rtl8192se/trx.c
drivers/net/wireless/rtlwifi/rtl8723ae/phy.c
drivers/net/wireless/rtlwifi/rtl8723ae/phy.h
drivers/net/wireless/rtlwifi/rtl8723ae/rf.h
drivers/net/wireless/rtlwifi/rtl8723ae/sw.c
drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
drivers/net/wireless/rtlwifi/usb.c
drivers/net/wireless/rtlwifi/wifi.h
drivers/net/wireless/ti/wl1251/spi.c
drivers/net/wireless/ti/wl1251/wl1251.h
drivers/net/wireless/ti/wl12xx/main.c
drivers/net/wireless/ti/wl18xx/main.c
drivers/net/wireless/ti/wl18xx/reg.h
drivers/net/wireless/ti/wlcore/cmd.c
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/ps.c
drivers/net/wireless/ti/wlcore/scan.c
drivers/net/wireless/ti/wlcore/spi.c
drivers/net/wireless/ti/wlcore/testmode.c
drivers/net/wireless/ti/wlcore/tx.c
drivers/net/wireless/ti/wlcore/tx.h
drivers/net/wireless/ti/wlcore/wlcore.h
drivers/net/wireless/ti/wlcore/wlcore_i.h
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/nfc/Kconfig
drivers/nfc/Makefile
drivers/nfc/mei_phy.c
drivers/nfc/microread/i2c.c
drivers/nfc/microread/mei.c
drivers/nfc/microread/microread.c
drivers/nfc/nfcsim.c
drivers/nfc/nfcwilink.c
drivers/nfc/pn533.c
drivers/nfc/pn544/i2c.c
drivers/nfc/pn544/pn544.c
drivers/nfc/port100.c [new file with mode: 0644]
drivers/of/Kconfig
drivers/of/Makefile
drivers/of/base.c
drivers/of/fdt.c
drivers/of/of_reserved_mem.c [deleted file]
drivers/of/platform.c
drivers/parport/Kconfig
drivers/parport/parport_pc.c
drivers/pci/host/Kconfig
drivers/pci/host/Makefile
drivers/pci/host/pci-exynos.c
drivers/pci/host/pci-imx6.c [new file with mode: 0644]
drivers/pci/host/pci-tegra.c
drivers/pci/host/pcie-designware.c
drivers/pci/host/pcie-designware.h
drivers/pci/hotplug/acpi_pcihp.c
drivers/pci/hotplug/acpiphp.h
drivers/pci/hotplug/acpiphp_core.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/hotplug/acpiphp_ibm.c
drivers/pci/hotplug/s390_pci_hpc.c
drivers/pci/hotplug/shpchp.h
drivers/pci/pci-acpi.c
drivers/pci/pci.c
drivers/pci/probe.c
drivers/pci/quirks.c
drivers/pci/setup-bus.c
drivers/platform/x86/Kconfig
drivers/platform/x86/eeepc-laptop.c
drivers/platform/x86/fujitsu-laptop.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel-rst.c
drivers/platform/x86/intel-smartconnect.c
drivers/platform/x86/intel_menlow.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/topstar-laptop.c
drivers/platform/x86/toshiba_acpi.c
drivers/platform/x86/wmi.c
drivers/pnp/pnpacpi/core.c
drivers/powercap/Kconfig [new file with mode: 0644]
drivers/powercap/Makefile [new file with mode: 0644]
drivers/powercap/intel_rapl.c [new file with mode: 0644]
drivers/powercap/powercap_sys.c [new file with mode: 0644]
drivers/s390/block/dasd_eckd.c
drivers/s390/block/scm_blk.h
drivers/s390/char/monwriter.c
drivers/s390/char/raw3270.c
drivers/s390/char/sclp.c
drivers/s390/char/sclp_cmd.c
drivers/s390/char/tty3270.c
drivers/s390/char/vmlogrdr.c
drivers/s390/char/zcore.c
drivers/s390/cio/airq.c
drivers/s390/cio/cio.c
drivers/s390/cio/eadm_sch.c
drivers/s390/cio/eadm_sch.h
drivers/s390/cio/qdio_debug.h
drivers/s390/cio/qdio_main.c
drivers/s390/crypto/zcrypt_debug.h
drivers/s390/net/claw.h
drivers/s390/net/ctcm_dbug.c
drivers/s390/net/lcs.h
drivers/s390/net/netiucv.c
drivers/s390/net/qeth_core_main.c
drivers/s390/scsi/zfcp_dbf.h
drivers/scsi/BusLogic.c
drivers/scsi/bfa/bfad.c
drivers/scsi/csiostor/csio_hw.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mr.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/virtio_scsi.c
drivers/spi/spi-atmel.c
drivers/spi/spi-clps711x.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-mpc512x-psc.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-s3c64xx.c
drivers/spi/spi-sh-hspi.c
drivers/spi/spi.c
drivers/staging/dwc2/platform.c
drivers/staging/et131x/et131x.c
drivers/staging/imx-drm/Kconfig
drivers/staging/imx-drm/imx-drm-core.c
drivers/staging/imx-drm/ipuv3-crtc.c
drivers/staging/media/dt3155v4l/dt3155v4l.c
drivers/staging/media/msi3101/Kconfig
drivers/staging/media/msi3101/sdr-msi3101.c
drivers/staging/media/solo6x10/solo6x10-v4l2-enc.c
drivers/thermal/Kconfig
drivers/thermal/intel_powerclamp.c
drivers/thermal/samsung/exynos_thermal_common.c
drivers/thermal/samsung/exynos_tmu.c
drivers/thermal/samsung/exynos_tmu.h
drivers/thermal/samsung/exynos_tmu_data.c
drivers/thermal/samsung/exynos_tmu_data.h
drivers/thermal/thermal_hwmon.c
drivers/thermal/ti-soc-thermal/ti-thermal-common.c
drivers/thermal/x86_pkg_temp_thermal.c
drivers/tty/hvc/hvc_opal.c
drivers/tty/hvc/hvsi_lib.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/imx.c
drivers/tty/serial/sh-sci.c
drivers/tty/serial/vt8500_serial.c
drivers/usb/chipidea/ci_hdrc_imx.c
drivers/usb/chipidea/host.c
drivers/usb/core/quirks.c
drivers/usb/dwc3/dwc3-exynos.c
drivers/usb/gadget/lpc32xx_udc.c
drivers/usb/gadget/storage_common.c
drivers/usb/host/bcma-hcd.c
drivers/usb/host/ehci-atmel.c
drivers/usb/host/ehci-octeon.c
drivers/usb/host/ehci-omap.c
drivers/usb/host/ehci-orion.c
drivers/usb/host/ehci-platform.c
drivers/usb/host/ehci-s5p.c
drivers/usb/host/ehci-spear.c
drivers/usb/host/ehci-tegra.c
drivers/usb/host/ohci-at91.c
drivers/usb/host/ohci-exynos.c
drivers/usb/host/ohci-nxp.c
drivers/usb/host/ohci-octeon.c
drivers/usb/host/ohci-omap3.c
drivers/usb/host/ohci-pxa27x.c
drivers/usb/host/ohci-sa1111.c
drivers/usb/host/ohci-spear.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/ssb-hcd.c
drivers/usb/host/uhci-platform.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/Kconfig
drivers/usb/musb/am35x.c
drivers/usb/musb/da8xx.c
drivers/usb/musb/davinci.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_virthub.c
drivers/usb/musb/tusb6010.c
drivers/usb/serial/option.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/storage/scsiglue.c
drivers/usb/storage/unusual_devs.h
drivers/vfio/vfio_iommu_type1.c
drivers/video/Kconfig
drivers/video/amba-clcd.c
drivers/video/backlight/backlight.c
drivers/virtio/virtio_balloon.c
drivers/virtio/virtio_ring.c
drivers/w1/w1.c
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/hpwdt.c
drivers/watchdog/kempld_wdt.c
drivers/watchdog/sunxi_wdt.c
drivers/watchdog/ts72xx_wdt.c
fs/9p/cache.c
fs/9p/cache.h
fs/9p/vfs_addr.c
fs/9p/vfs_file.c
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/Makefile
fs/adfs/file.c
fs/affs/file.c
fs/afs/cell.c
fs/afs/file.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/vlocation.c
fs/afs/volume.c
fs/afs/write.c
fs/aio.c
fs/bad_inode.c
fs/befs/linuxvfs.c
fs/bfs/file.c
fs/block_dev.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/relocation.c
fs/btrfs/root-tree.c
fs/buffer.c
fs/cachefiles/interface.c
fs/ceph/addr.c
fs/ceph/cache.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/super.c
fs/ceph/super.h
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/file.c
fs/cifs/fscache.c
fs/cifs/ioctl.c
fs/cifs/link.c
fs/cifs/netmisc.c
fs/cifs/sess.c
fs/cifs/smb1ops.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/cifs/smb2proto.h
fs/cifs/smbfsctl.h
fs/cifs/transport.c
fs/dcache.c
fs/direct-io.c
fs/dlm/lockspace.c
fs/ecryptfs/dentry.c
fs/ecryptfs/ecryptfs_kernel.h
fs/ecryptfs/file.c
fs/ecryptfs/inode.c
fs/ecryptfs/keystore.c
fs/ecryptfs/main.c
fs/exofs/file.c
fs/ext2/file.c
fs/ext2/inode.c
fs/ext3/file.c
fs/ext3/inode.c
fs/ext3/namei.c
fs/ext4/ext4.h
fs/ext4/file.c
fs/ext4/indirect.c
fs/ext4/inode.c
fs/ext4/namei.c
fs/ext4/page-io.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/f2fs/checkpoint.c
fs/f2fs/data.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/gc.c
fs/f2fs/inode.c
fs/f2fs/namei.c
fs/f2fs/node.c
fs/f2fs/recovery.c
fs/f2fs/segment.c
fs/f2fs/segment.h
fs/f2fs/super.c
fs/f2fs/xattr.c
fs/fat/file.c
fs/fat/inode.c
fs/fscache/cookie.c
fs/fscache/fsdef.c
fs/fscache/netfs.c
fs/fscache/object.c
fs/fscache/page.c
fs/fuse/cuse.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/gfs2/aops.c
fs/gfs2/bmap.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/glock.h
fs/gfs2/glops.c
fs/gfs2/incore.h
fs/gfs2/inode.c
fs/gfs2/ops_fstype.c
fs/gfs2/quota.c
fs/gfs2/rgrp.c
fs/gfs2/rgrp.h
fs/gfs2/super.c
fs/gfs2/sys.c
fs/gfs2/util.c
fs/gfs2/util.h
fs/gfs2/xattr.c
fs/hfs/inode.c
fs/hfsplus/inode.c
fs/hostfs/hostfs_kern.c
fs/hpfs/file.c
fs/internal.h
fs/iov-iter.c [new file with mode: 0644]
fs/jffs2/file.c
fs/jffs2/fs.c
fs/jfs/file.c
fs/jfs/inode.c
fs/jfs/jfs_inode.c
fs/libfs.c
fs/logfs/dev_mtd.c
fs/logfs/file.c
fs/logfs/super.c
fs/minix/Kconfig
fs/minix/file.c
fs/namei.c
fs/namespace.c
fs/ncpfs/dir.c
fs/ncpfs/file.c
fs/nfs/client.c
fs/nfs/dir.c
fs/nfs/direct.c
fs/nfs/file.c
fs/nfs/fscache.c
fs/nfs/fscache.h
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/namespace.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4client.c
fs/nfs/nfs4file.c
fs/nfs/nfs4namespace.c
fs/nfs/nfs4proc.c
fs/nfs/proc.c
fs/nfs/unlink.c
fs/nfs/write.c
fs/nfsd/nfs4recover.c
fs/nfsd/nfs4state.c
fs/nfsd/nfsfh.c
fs/nfsd/nfsfh.h
fs/nfsd/vfs.c
fs/nilfs2/file.c
fs/nilfs2/inode.c
fs/ocfs2/aops.c
fs/ocfs2/aops.h
fs/ocfs2/file.c
fs/ocfs2/ocfs2_trace.h
fs/omfs/file.c
fs/proc/inode.c
fs/proc/self.c
fs/proc/task_mmu.c
fs/ramfs/file-mmu.c
fs/ramfs/file-nommu.c
fs/read_write.c
fs/reiserfs/file.c
fs/reiserfs/inode.c
fs/romfs/mmap-nommu.c
fs/statfs.c
fs/sysv/file.c
fs/ubifs/dir.c
fs/ubifs/file.c
fs/ubifs/gc.c
fs/ubifs/journal.c
fs/ubifs/xattr.c
fs/udf/file.c
fs/udf/inode.c
fs/ufs/file.c
fs/xfs/Makefile
fs/xfs/xfs_acl.c
fs/xfs/xfs_ag.h
fs/xfs/xfs_alloc.c
fs/xfs/xfs_alloc.h
fs/xfs/xfs_alloc_btree.c
fs/xfs/xfs_alloc_btree.h
fs/xfs/xfs_aops.c
fs/xfs/xfs_attr.c
fs/xfs/xfs_attr_inactive.c
fs/xfs/xfs_attr_leaf.c
fs/xfs/xfs_attr_leaf.h
fs/xfs/xfs_attr_list.c
fs/xfs/xfs_attr_remote.c
fs/xfs/xfs_attr_remote.h
fs/xfs/xfs_bit.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_bmap_btree.c
fs/xfs/xfs_bmap_btree.h
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_bmap_util.h
fs/xfs/xfs_btree.c
fs/xfs/xfs_btree.h
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_buf_item.h
fs/xfs/xfs_da_btree.c
fs/xfs/xfs_da_btree.h
fs/xfs/xfs_da_format.h [moved from fs/xfs/xfs_dir2_format.h with 65% similarity]
fs/xfs/xfs_dir2.c
fs/xfs/xfs_dir2_block.c
fs/xfs/xfs_dir2_data.c
fs/xfs/xfs_dir2_leaf.c
fs/xfs/xfs_dir2_node.c
fs/xfs/xfs_dir2_readdir.c
fs/xfs/xfs_dir2_sf.c
fs/xfs/xfs_discard.c
fs/xfs/xfs_dquot.c
fs/xfs/xfs_dquot.h
fs/xfs/xfs_dquot_buf.c [new file with mode: 0644]
fs/xfs/xfs_dquot_item.c
fs/xfs/xfs_error.c
fs/xfs/xfs_export.c
fs/xfs/xfs_extent_busy.c
fs/xfs/xfs_extent_busy.h
fs/xfs/xfs_extfree_item.c
fs/xfs/xfs_file.c
fs/xfs/xfs_filestream.c
fs/xfs/xfs_format.h
fs/xfs/xfs_fs.h
fs/xfs/xfs_fsops.c
fs/xfs/xfs_ialloc.c
fs/xfs/xfs_ialloc.h
fs/xfs/xfs_ialloc_btree.c
fs/xfs/xfs_ialloc_btree.h
fs/xfs/xfs_icache.c
fs/xfs/xfs_icreate_item.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_inode_buf.c
fs/xfs/xfs_inode_buf.h
fs/xfs/xfs_inode_fork.c
fs/xfs/xfs_inode_fork.h
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_ioctl32.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_iomap.h
fs/xfs/xfs_iops.c
fs/xfs/xfs_iops.h
fs/xfs/xfs_itable.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log.h
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_format.h
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_log_rlimit.c
fs/xfs/xfs_message.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_qm.c
fs/xfs/xfs_qm.h
fs/xfs/xfs_qm_bhv.c
fs/xfs/xfs_qm_syscalls.c
fs/xfs/xfs_quota.h
fs/xfs/xfs_quota_defs.h
fs/xfs/xfs_quotaops.c
fs/xfs/xfs_rtalloc.c
fs/xfs/xfs_rtalloc.h
fs/xfs/xfs_rtbitmap.c [new file with mode: 0644]
fs/xfs/xfs_sb.c
fs/xfs/xfs_sb.h
fs/xfs/xfs_shared.h [new file with mode: 0644]
fs/xfs/xfs_super.c
fs/xfs/xfs_symlink.c
fs/xfs/xfs_symlink.h
fs/xfs/xfs_symlink_remote.c
fs/xfs/xfs_trace.c
fs/xfs/xfs_trans.c
fs/xfs/xfs_trans.h
fs/xfs/xfs_trans_ail.c
fs/xfs/xfs_trans_buf.c
fs/xfs/xfs_trans_dquot.c
fs/xfs/xfs_trans_extfree.c
fs/xfs/xfs_trans_inode.c
fs/xfs/xfs_trans_priv.h
fs/xfs/xfs_trans_resv.c
fs/xfs/xfs_vnode.h
fs/xfs/xfs_xattr.c
include/acpi/acexcep.h
include/acpi/acpi_bus.h
include/acpi/acpixf.h
include/acpi/actypes.h
include/acpi/platform/aclinux.h
include/acpi/processor.h
include/asm-generic/simd.h [new file with mode: 0644]
include/asm-generic/vmlinux.lds.h
include/crypto/ablk_helper.h [moved from arch/x86/include/asm/crypto/ablk_helper.h with 100% similarity]
include/crypto/algapi.h
include/crypto/authenc.h
include/drm/drmP.h
include/drm/drm_crtc.h
include/drm/drm_dp_helper.h
include/drm/drm_panel.h [new file with mode: 0644]
include/dt-bindings/pinctrl/omap.h
include/linux/acpi.h
include/linux/aio.h
include/linux/amba/bus.h
include/linux/backlight.h
include/linux/bio.h
include/linux/bitops.h
include/linux/blk_types.h
include/linux/compiler-gcc4.h
include/linux/cpu.h
include/linux/cpufreq.h
include/linux/dcache.h
include/linux/devfreq.h
include/linux/dma-mapping.h
include/linux/dmaengine.h
include/linux/etherdevice.h
include/linux/fcdevice.h
include/linux/fddidevice.h
include/linux/filter.h
include/linux/fs.h
include/linux/fscache-cache.h
include/linux/fscache.h
include/linux/hashtable.h
include/linux/hippidevice.h
include/linux/host1x.h [new file with mode: 0644]
include/linux/hwmon-vid.h
include/linux/hwmon.h
include/linux/i2c.h
include/linux/ieee80211.h
include/linux/inetdevice.h
include/linux/ipv6.h
include/linux/irqchip/arm-gic.h
include/linux/jump_label.h
include/linux/jump_label_ratelimit.h
include/linux/lockref.h
include/linux/memcontrol.h
include/linux/mfd/mc13xxx.h
include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
include/linux/miscdevice.h
include/linux/mlx4/cmd.h
include/linux/mlx4/device.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/mm.h
include/linux/module.h
include/linux/mtd/map.h
include/linux/mtd/mtd.h
include/linux/mtd/nand.h
include/linux/net.h
include/linux/netdev_features.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netfilter/ipset/ip_set.h
include/linux/netfilter/ipset/ip_set_comment.h [new file with mode: 0644]
include/linux/netfilter/ipset/ip_set_timeout.h
include/linux/netfilter/nf_conntrack_common.h
include/linux/netfilter/nf_conntrack_h323.h
include/linux/netfilter/nf_conntrack_proto_gre.h
include/linux/netfilter/nf_conntrack_sip.h
include/linux/netfilter/nfnetlink.h
include/linux/netfilter/nfnetlink_acct.h
include/linux/netfilter/x_tables.h
include/linux/netfilter_bridge.h
include/linux/netfilter_ipv4.h
include/linux/netfilter_ipv6.h
include/linux/nfs_fs.h
include/linux/nfs_fs_sb.h
include/linux/of_mtd.h
include/linux/of_reserved_mem.h [deleted file]
include/linux/opp.h [deleted file]
include/linux/page-flags.h
include/linux/pci.h
include/linux/perf_event.h
include/linux/platform_data/at24.h [moved from include/linux/i2c/at24.h with 97% similarity]
include/linux/platform_data/davinci_asp.h
include/linux/pm_opp.h [new file with mode: 0644]
include/linux/powercap.h [new file with mode: 0644]
include/linux/random.h
include/linux/regmap.h
include/linux/sched.h
include/linux/serial_sci.h
include/linux/skbuff.h
include/linux/ssb/ssb_driver_gige.h
include/linux/sunrpc/clnt.h
include/linux/sunrpc/sched.h
include/linux/sunrpc/xprt.h
include/linux/thinkpad_acpi.h [new file with mode: 0644]
include/linux/timex.h
include/linux/usb/usb_phy_gen_xceiv.h
include/linux/usb_usual.h
include/linux/vgaarb.h
include/linux/virtio_config.h
include/linux/yam.h
include/media/v4l2-common.h
include/media/videobuf2-core.h
include/media/videobuf2-dma-sg.h
include/net/bluetooth/bluetooth.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/l2cap.h
include/net/bluetooth/mgmt.h
include/net/bluetooth/rfcomm.h
include/net/bluetooth/sco.h
include/net/caif/caif_hsi.h
include/net/cfg80211.h
include/net/cipso_ipv4.h
include/net/compat.h
include/net/dcbevent.h
include/net/dn.h
include/net/dn_dev.h
include/net/dn_fib.h
include/net/dn_neigh.h
include/net/dn_nsp.h
include/net/dn_route.h
include/net/dst.h
include/net/esp.h
include/net/fib_rules.h
include/net/flow.h
include/net/flow_keys.h
include/net/garp.h
include/net/gen_stats.h
include/net/genetlink.h
include/net/gre.h
include/net/icmp.h
include/net/inet6_connection_sock.h
include/net/inet6_hashtables.h
include/net/inet_common.h
include/net/inet_connection_sock.h
include/net/inet_frag.h
include/net/inet_hashtables.h
include/net/inet_sock.h
include/net/inet_timewait_sock.h
include/net/inetpeer.h
include/net/ip.h
include/net/ip6_checksum.h
include/net/ip6_fib.h
include/net/ip6_route.h
include/net/ip_fib.h
include/net/ip_tunnels.h
include/net/ip_vs.h
include/net/ipv6.h
include/net/ipx.h
include/net/irda/ircomm_tty.h
include/net/irda/irda.h
include/net/irda/irda_device.h
include/net/irda/irlap_event.h
include/net/irda/irlap_frame.h
include/net/iw_handler.h
include/net/lapb.h
include/net/llc.h
include/net/llc_c_ac.h
include/net/llc_c_ev.h
include/net/llc_conn.h
include/net/llc_if.h
include/net/llc_pdu.h
include/net/llc_s_ac.h
include/net/llc_s_ev.h
include/net/llc_sap.h
include/net/mac80211.h
include/net/mac802154.h
include/net/mrp.h
include/net/ndisc.h
include/net/net_namespace.h
include/net/netevent.h
include/net/netfilter/ipv4/nf_conntrack_ipv4.h
include/net/netfilter/ipv4/nf_defrag_ipv4.h
include/net/netfilter/ipv6/nf_defrag_ipv6.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_conntrack_acct.h
include/net/netfilter/nf_conntrack_core.h
include/net/netfilter/nf_conntrack_ecache.h
include/net/netfilter/nf_conntrack_extend.h
include/net/netfilter/nf_conntrack_helper.h
include/net/netfilter/nf_conntrack_l3proto.h
include/net/netfilter/nf_conntrack_l4proto.h
include/net/netfilter/nf_conntrack_seqadj.h
include/net/netfilter/nf_conntrack_synproxy.h
include/net/netfilter/nf_conntrack_timeout.h
include/net/netfilter/nf_conntrack_timestamp.h
include/net/netfilter/nf_nat.h
include/net/netfilter/nf_nat_core.h
include/net/netfilter/nf_nat_helper.h
include/net/netfilter/nf_nat_l3proto.h
include/net/netfilter/nf_nat_l4proto.h
include/net/netfilter/nf_queue.h
include/net/netfilter/nf_tables.h [new file with mode: 0644]
include/net/netfilter/nf_tables_core.h [new file with mode: 0644]
include/net/netfilter/nf_tables_ipv4.h [new file with mode: 0644]
include/net/netfilter/nf_tables_ipv6.h [new file with mode: 0644]
include/net/netfilter/xt_rateest.h
include/net/netlink.h
include/net/netns/ipv4.h
include/net/netns/nftables.h [new file with mode: 0644]
include/net/netrom.h
include/net/nfc/digital.h [new file with mode: 0644]
include/net/nfc/hci.h
include/net/nfc/nci.h
include/net/nfc/nci_core.h
include/net/nfc/nfc.h
include/net/p8022.h
include/net/ping.h
include/net/protocol.h
include/net/psnap.h
include/net/raw.h
include/net/rawv6.h
include/net/request_sock.h
include/net/rose.h
include/net/route.h
include/net/rtnetlink.h
include/net/sch_generic.h
include/net/scm.h
include/net/sctp/sctp.h
include/net/secure_seq.h
include/net/sock.h
include/net/stp.h
include/net/tcp.h
include/net/tcp_memcontrol.h
include/net/udp.h
include/net/udplite.h
include/net/wext.h
include/net/wimax.h
include/net/x25.h
include/net/xfrm.h
include/sound/cs42l73.h [new file with mode: 0644]
include/sound/dmaengine_pcm.h
include/sound/rcar_snd.h
include/sound/soc-dai.h
include/sound/soc-dapm.h
include/sound/soc.h
include/trace/events/asoc.h
include/trace/events/power_cpu_migrate.h [new file with mode: 0644]
include/uapi/asm-generic/socket.h
include/uapi/drm/armada_drm.h [new file with mode: 0644]
include/uapi/drm/drm.h
include/uapi/drm/drm_mode.h
include/uapi/drm/i915_drm.h
include/uapi/drm/tegra_drm.h
include/uapi/linux/audit.h
include/uapi/linux/can/bcm.h
include/uapi/linux/can/error.h
include/uapi/linux/can/gw.h
include/uapi/linux/can/netlink.h
include/uapi/linux/can/raw.h
include/uapi/linux/elf-em.h
include/uapi/linux/if_bonding.h
include/uapi/linux/if_link.h
include/uapi/linux/loop.h
include/uapi/linux/major.h
include/uapi/linux/netfilter/Kbuild
include/uapi/linux/netfilter/ipset/ip_set.h
include/uapi/linux/netfilter/nf_conntrack_common.h
include/uapi/linux/netfilter/nf_tables.h [new file with mode: 0644]
include/uapi/linux/netfilter/nf_tables_compat.h [new file with mode: 0644]
include/uapi/linux/netfilter/nfnetlink.h
include/uapi/linux/netfilter/nfnetlink_cttimeout.h
include/uapi/linux/nfc.h
include/uapi/linux/pci_regs.h
include/uapi/linux/pkt_sched.h
include/uapi/linux/tc_act/Kbuild
include/uapi/linux/tc_act/tc_defact.h [moved from include/linux/tc_act/tc_defact.h with 75% similarity]
include/uapi/mtd/mtd-abi.h
include/uapi/rdma/ib_user_verbs.h
include/uapi/sound/Kbuild
include/uapi/sound/asound.h
include/uapi/sound/firewire.h [new file with mode: 0644]
include/xen/interface/io/netif.h
init/Kconfig
init/main.c
ipc/sem.c
ipc/util.c
kernel/cgroup.c
kernel/events/core.c
kernel/jump_label.c
kernel/module.c
kernel/power/Kconfig
kernel/power/qos.c
kernel/power/user.c
lib/kobject.c
lib/lockref.c
lib/percpu-refcount.c
mm/filemap.c
mm/huge_memory.c
mm/hugetlb.c
mm/memcontrol.c
mm/memory.c
mm/migrate.c
mm/mprotect.c
mm/mremap.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_io.c
mm/shmem.c
mm/slab_common.c
mm/swapfile.c
mm/vmscan.c
mm/zswap.c
net/8021q/vlan.c
net/8021q/vlan.h
net/8021q/vlan_netlink.c
net/9p/trans_virtio.c
net/ax25/af_ax25.c
net/batman-adv/Makefile
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/bridge_loop_avoidance.h
net/batman-adv/debugfs.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/distributed-arp-table.h
net/batman-adv/fragmentation.c [new file with mode: 0644]
net/batman-adv/fragmentation.h [new file with mode: 0644]
net/batman-adv/gateway_client.c
net/batman-adv/gateway_client.h
net/batman-adv/gateway_common.c
net/batman-adv/gateway_common.h
net/batman-adv/hard-interface.c
net/batman-adv/hard-interface.h
net/batman-adv/icmp_socket.c
net/batman-adv/icmp_socket.h
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/network-coding.c
net/batman-adv/network-coding.h
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/packet.h
net/batman-adv/routing.c
net/batman-adv/routing.h
net/batman-adv/send.c
net/batman-adv/send.h
net/batman-adv/soft-interface.c
net/batman-adv/soft-interface.h
net/batman-adv/sysfs.c
net/batman-adv/sysfs.h
net/batman-adv/translation-table.c
net/batman-adv/translation-table.h
net/batman-adv/types.h
net/batman-adv/unicast.c [deleted file]
net/batman-adv/unicast.h [deleted file]
net/batman-adv/vis.c [deleted file]
net/batman-adv/vis.h [deleted file]
net/bluetooth/Makefile
net/bluetooth/a2mp.c
net/bluetooth/a2mp.h [moved from include/net/bluetooth/a2mp.h with 100% similarity]
net/bluetooth/af_bluetooth.c
net/bluetooth/amp.c
net/bluetooth/amp.h [moved from include/net/bluetooth/amp.h with 100% similarity]
net/bluetooth/bnep/core.c
net/bluetooth/cmtp/core.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sock.c
net/bluetooth/hci_sysfs.c
net/bluetooth/hidp/core.c
net/bluetooth/hidp/hidp.h
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/mgmt.c
net/bluetooth/rfcomm/core.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/bluetooth/smp.c
net/bluetooth/smp.h [moved from include/net/bluetooth/smp.h with 100% similarity]
net/bridge/br_fdb.c
net/bridge/br_mdb.c
net/bridge/br_multicast.c
net/bridge/br_netfilter.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/br_private_stp.h
net/bridge/br_stp_if.c
net/bridge/br_vlan.c
net/bridge/netfilter/Kconfig
net/bridge/netfilter/Makefile
net/bridge/netfilter/ebt_among.c
net/bridge/netfilter/ebt_ulog.c
net/bridge/netfilter/ebtable_filter.c
net/bridge/netfilter/ebtable_nat.c
net/bridge/netfilter/nf_tables_bridge.c [new file with mode: 0644]
net/can/af_can.h
net/ceph/auth_none.h
net/ceph/auth_x.h
net/ceph/crypto.h
net/compat.c
net/core/dev.c
net/core/ethtool.c
net/core/filter.c
net/core/flow_dissector.c
net/core/neighbour.c
net/core/netprio_cgroup.c
net/core/rtnetlink.c
net/core/secure_seq.c
net/core/skbuff.c
net/core/sock.c
net/core/utils.c
net/dccp/ackvec.h
net/dccp/ccid.h
net/dccp/ccids/lib/loss_interval.h
net/dccp/ccids/lib/packet_history.h
net/dccp/ccids/lib/tfrc.h
net/dccp/dccp.h
net/dccp/feat.h
net/dccp/ipv4.c
net/dccp/ipv6.c
net/dccp/ipv6.h
net/dccp/minisocks.c
net/dccp/output.c
net/dccp/proto.c
net/decnet/netfilter/dn_rtmsg.c
net/ethernet/eth.c
net/ieee802154/6lowpan.c
net/ipv4/af_inet.c
net/ipv4/fib_frontend.c
net/ipv4/fib_lookup.h
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/gre_demux.c
net/ipv4/gre_offload.c
net/ipv4/icmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/inet_fragment.c
net/ipv4/inet_hashtables.c
net/ipv4/inet_timewait_sock.c
net/ipv4/ip_fragment.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_tunnel_core.c
net/ipv4/ip_vti.c
net/ipv4/ipip.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/Makefile
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/arptable_filter.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv4/netfilter/ipt_ULOG.c
net/ipv4/netfilter/iptable_filter.c
net/ipv4/netfilter/iptable_mangle.c
net/ipv4/netfilter/iptable_nat.c
net/ipv4/netfilter/iptable_raw.c
net/ipv4/netfilter/iptable_security.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/netfilter/nf_tables_arp.c [new file with mode: 0644]
net/ipv4/netfilter/nf_tables_ipv4.c [new file with mode: 0644]
net/ipv4/netfilter/nft_chain_nat_ipv4.c [new file with mode: 0644]
net/ipv4/netfilter/nft_chain_route_ipv4.c [new file with mode: 0644]
net/ipv4/netfilter/nft_reject_ipv4.c [new file with mode: 0644]
net/ipv4/ping.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_memcontrol.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_offload.c
net/ipv4/tcp_output.c
net/ipv4/tcp_probe.c
net/ipv4/tcp_timer.c
net/ipv4/tcp_vegas.h
net/ipv4/udp.c
net/ipv4/udp_impl.h
net/ipv4/udp_offload.c
net/ipv4/xfrm4_mode_tunnel.c
net/ipv4/xfrm4_policy.c
net/ipv6/Kconfig
net/ipv6/Makefile
net/ipv6/af_inet6.c
net/ipv6/ah6.c
net/ipv6/datagram.c
net/ipv6/esp6.c
net/ipv6/inet6_connection_sock.c
net/ipv6/inet6_hashtables.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c [new file with mode: 0644]
net/ipv6/ipcomp6.c
net/ipv6/ipv6_sockglue.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/Makefile
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_SYNPROXY.c
net/ipv6/netfilter/ip6table_filter.c
net/ipv6/netfilter/ip6table_mangle.c
net/ipv6/netfilter/ip6table_nat.c
net/ipv6/netfilter/ip6table_raw.c
net/ipv6/netfilter/ip6table_security.c
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
net/ipv6/netfilter/nf_tables_ipv6.c [new file with mode: 0644]
net/ipv6/netfilter/nft_chain_nat_ipv6.c [new file with mode: 0644]
net/ipv6/netfilter/nft_chain_route_ipv6.c [new file with mode: 0644]
net/ipv6/ping.c
net/ipv6/raw.c
net/ipv6/reassembly.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/syncookies.c
net/ipv6/tcp_ipv6.c
net/ipv6/tcpv6_offload.c
net/ipv6/udp.c
net/ipv6/udp_impl.h
net/ipv6/udp_offload.c
net/ipv6/xfrm6_mode_tunnel.c
net/ipv6/xfrm6_policy.c
net/irda/irnet/irnet.h
net/key/af_key.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_debugfs.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_netlink.c
net/l2tp/l2tp_ppp.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/debugfs.c
net/mac80211/driver-ops.h
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/rate.c
net/mac80211/rate.h
net/mac80211/rc80211_minstrel.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rc80211_pid_debugfs.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/spectmgmt.c
net/mac80211/status.c
net/mac80211/trace.h
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/vht.c
net/mpls/mpls_gso.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/core.c
net/netfilter/ipset/Kconfig
net/netfilter/ipset/Makefile
net/netfilter/ipset/ip_set_bitmap_gen.h
net/netfilter/ipset/ip_set_bitmap_ip.c
net/netfilter/ipset/ip_set_bitmap_ipmac.c
net/netfilter/ipset/ip_set_bitmap_port.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_getport.c
net/netfilter/ipset/ip_set_hash_gen.h
net/netfilter/ipset/ip_set_hash_ip.c
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipset/ip_set_hash_netnet.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_hash_netport.c
net/netfilter/ipset/ip_set_hash_netportnet.c [new file with mode: 0644]
net/netfilter/ipset/ip_set_list_set.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/nf_conntrack_h323_main.c
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_internals.h
net/netfilter/nf_nat_core.c
net/netfilter/nf_nat_sip.c
net/netfilter/nf_tables_api.c [new file with mode: 0644]
net/netfilter/nf_tables_core.c [new file with mode: 0644]
net/netfilter/nfnetlink.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue_core.c
net/netfilter/nft_bitwise.c [new file with mode: 0644]
net/netfilter/nft_byteorder.c [new file with mode: 0644]
net/netfilter/nft_cmp.c [new file with mode: 0644]
net/netfilter/nft_compat.c [new file with mode: 0644]
net/netfilter/nft_counter.c [new file with mode: 0644]
net/netfilter/nft_ct.c [new file with mode: 0644]
net/netfilter/nft_expr_template.c [new file with mode: 0644]
net/netfilter/nft_exthdr.c [new file with mode: 0644]
net/netfilter/nft_hash.c [new file with mode: 0644]
net/netfilter/nft_immediate.c [new file with mode: 0644]
net/netfilter/nft_limit.c [new file with mode: 0644]
net/netfilter/nft_log.c [new file with mode: 0644]
net/netfilter/nft_lookup.c [new file with mode: 0644]
net/netfilter/nft_meta.c [new file with mode: 0644]
net/netfilter/nft_meta_target.c [new file with mode: 0644]
net/netfilter/nft_nat.c [new file with mode: 0644]
net/netfilter/nft_payload.c [new file with mode: 0644]
net/netfilter/nft_rbtree.c [new file with mode: 0644]
net/netfilter/x_tables.c
net/netfilter/xt_TCPMSS.c
net/netfilter/xt_TPROXY.c
net/netfilter/xt_set.c
net/netfilter/xt_socket.c
net/netlabel/netlabel_kapi.c
net/nfc/Kconfig
net/nfc/Makefile
net/nfc/core.c
net/nfc/digital.h [new file with mode: 0644]
net/nfc/digital_core.c [new file with mode: 0644]
net/nfc/digital_dep.c [new file with mode: 0644]
net/nfc/digital_technology.c [new file with mode: 0644]
net/nfc/nci/spi.c
net/nfc/netlink.c
net/nfc/rawsock.c
net/openvswitch/vport-vxlan.c
net/rds/connection.c
net/rds/rds.h
net/rxrpc/ar-internal.h
net/sched/act_police.c
net/sched/cls_basic.c
net/sched/cls_cgroup.c
net/sched/em_ipset.c
net/sched/em_meta.c
net/sched/sch_api.c
net/sched/sch_fq.c
net/sched/sch_generic.c
net/sched/sch_htb.c
net/sched/sch_netem.c
net/sched/sch_tbf.c
net/sctp/ipv6.c
net/sctp/output.c
net/sctp/socket.c
net/socket.c
net/sunrpc/clnt.c
net/sunrpc/rpc_pipe.c
net/sunrpc/svcsock.c
net/sunrpc/xprt.c
net/sunrpc/xprtsock.c
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/core.h
net/tipc/eth_media.c
net/tipc/ib_media.c
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/msg.h
net/tipc/port.c
net/tipc/port.h
net/tipc/socket.c
net/unix/af_unix.c
net/unix/diag.c
net/wimax/wimax-internal.h
net/wireless/chan.c
net/wireless/core.c
net/wireless/core.h
net/wireless/debugfs.c
net/wireless/genregdb.awk
net/wireless/ibss.c
net/wireless/nl80211.c
net/wireless/radiotap.c
net/wireless/reg.c
net/wireless/sysfs.h
net/wireless/util.c
net/xfrm/xfrm_hash.h
net/xfrm/xfrm_ipcomp.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_replay.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
scripts/Makefile.modpost
scripts/coccinelle/api/devm_request_and_ioremap.cocci [deleted file]
scripts/mod/modpost.c
scripts/show_delta
scripts/tags.sh
security/apparmor/apparmorfs.c
security/apparmor/policy.c
security/lsm_audit.c
security/selinux/hooks.c
sound/arm/pxa2xx-ac97-lib.c
sound/arm/pxa2xx-pcm.c
sound/firewire/Kconfig
sound/firewire/Makefile
sound/firewire/amdtp.c
sound/firewire/amdtp.h
sound/firewire/cmp.c
sound/firewire/dice-interface.h [new file with mode: 0644]
sound/firewire/dice.c [new file with mode: 0644]
sound/firewire/fcp.c
sound/firewire/isight.c
sound/firewire/lib.c
sound/firewire/lib.h
sound/firewire/scs1x.c
sound/firewire/speakers.c
sound/oss/sb_ess.c
sound/pci/asihpi/asihpi.c
sound/pci/au88x0/au88x0_pcm.c
sound/pci/au88x0/au88x0_synth.c
sound/pci/azt3328.c
sound/pci/ctxfi/ctdaio.c
sound/pci/emu10k1/emufx.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_eld.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_local.h
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/rme9652/hdsp.c
sound/pci/rme9652/hdspm.c
sound/ppc/keywest.c
sound/soc/Makefile
sound/soc/atmel/atmel-pcm.c
sound/soc/atmel/atmel_ssc_dai.c
sound/soc/atmel/atmel_wm8904.c
sound/soc/atmel/sam9g20_wm8731.c
sound/soc/blackfin/bf5xx-ac97-pcm.c
sound/soc/blackfin/bf5xx-i2s-pcm.c
sound/soc/cirrus/Kconfig
sound/soc/cirrus/ep93xx-pcm.c
sound/soc/codecs/88pm860x-codec.c
sound/soc/codecs/88pm860x-codec.h
sound/soc/codecs/ab8500-codec.c
sound/soc/codecs/adau1373.c
sound/soc/codecs/adav80x.c
sound/soc/codecs/ak4104.c
sound/soc/codecs/ak4641.c
sound/soc/codecs/ak4642.c
sound/soc/codecs/arizona.c
sound/soc/codecs/cq93vc.c
sound/soc/codecs/cs4271.c
sound/soc/codecs/cs42l73.c
sound/soc/codecs/cs42l73.h
sound/soc/codecs/max98088.c
sound/soc/codecs/max98095.c
sound/soc/codecs/max9850.c
sound/soc/codecs/mc13783.c
sound/soc/codecs/pcm1681.c
sound/soc/codecs/pcm1792a.c
sound/soc/codecs/rt5640.c
sound/soc/codecs/si476x.c
sound/soc/codecs/sn95031.c
sound/soc/codecs/tas5086.c
sound/soc/codecs/tlv320aic23.c
sound/soc/codecs/tlv320aic26.c
sound/soc/codecs/tlv320aic26.h
sound/soc/codecs/tlv320aic32x4.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/twl4030.c
sound/soc/codecs/twl6040.c
sound/soc/codecs/wm8400.c
sound/soc/codecs/wm8962.c
sound/soc/davinci/Kconfig
sound/soc/davinci/Makefile
sound/soc/davinci/davinci-evm.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/davinci/davinci-mcasp.h
sound/soc/davinci/davinci-pcm.c
sound/soc/fsl/fsl_dma.c
sound/soc/fsl/fsl_spdif.c
sound/soc/fsl/fsl_ssi.c
sound/soc/fsl/imx-audmux.c
sound/soc/fsl/imx-mc13783.c
sound/soc/fsl/imx-pcm-fiq.c
sound/soc/fsl/imx-sgtl5000.c
sound/soc/fsl/imx-spdif.c
sound/soc/fsl/imx-ssi.c
sound/soc/fsl/imx-ssi.h
sound/soc/fsl/imx-wm8962.c
sound/soc/fsl/mpc5200_dma.c
sound/soc/generic/simple-card.c
sound/soc/jz4740/jz4740-pcm.c
sound/soc/kirkwood/kirkwood-dma.c
sound/soc/kirkwood/kirkwood-i2s.c
sound/soc/kirkwood/kirkwood.h
sound/soc/mid-x86/mfld_machine.c
sound/soc/mxs/mxs-saif.c
sound/soc/mxs/mxs-saif.h
sound/soc/mxs/mxs-sgtl5000.c
sound/soc/nuc900/nuc900-pcm.c
sound/soc/omap/Kconfig
sound/soc/omap/omap-mcpdm.c
sound/soc/omap/omap-pcm.c
sound/soc/omap/omap-twl4030.c
sound/soc/pxa/brownstone.c
sound/soc/pxa/corgi.c
sound/soc/pxa/e740_wm9705.c
sound/soc/pxa/e750_wm9705.c
sound/soc/pxa/e800_wm9712.c
sound/soc/pxa/imote2.c
sound/soc/pxa/mioa701_wm9713.c
sound/soc/pxa/mmp-sspa.c
sound/soc/pxa/palm27x.c
sound/soc/pxa/poodle.c
sound/soc/pxa/pxa2xx-ac97.c
sound/soc/pxa/pxa2xx-pcm.c
sound/soc/pxa/tosa.c
sound/soc/pxa/ttc-dkb.c
sound/soc/s6000/s6000-pcm.c
sound/soc/samsung/bells.c
sound/soc/samsung/dma.c
sound/soc/samsung/i2s.c
sound/soc/samsung/idma.c
sound/soc/samsung/smdk_wm8994.c
sound/soc/sh/rcar/adg.c
sound/soc/sh/rcar/core.c
sound/soc/sh/rcar/gen.c
sound/soc/sh/rcar/rsnd.h
sound/soc/sh/rcar/scu.c
sound/soc/sh/rcar/ssi.c
sound/soc/soc-cache.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/soc-devres.c [new file with mode: 0644]
sound/soc/soc-generic-dmaengine-pcm.c
sound/soc/soc-io.c
sound/soc/soc-jack.c
sound/soc/soc-pcm.c
sound/soc/soc-utils.c
sound/soc/spear/spdif_in.c
sound/soc/spear/spdif_out.c
sound/soc/tegra/tegra20_i2s.c
sound/soc/tegra/tegra20_spdif.c
sound/soc/tegra/tegra30_ahub.c
sound/soc/tegra/tegra30_ahub.h
sound/soc/tegra/tegra30_i2s.c
sound/soc/tegra/tegra30_i2s.h
sound/soc/tegra/tegra_asoc_utils.c
sound/soc/tegra/tegra_asoc_utils.h
sound/soc/tegra/tegra_pcm.c
sound/usb/caiaq/control.c
sound/usb/caiaq/device.c
sound/usb/caiaq/device.h
sound/usb/card.c
sound/usb/card.h
sound/usb/endpoint.c
sound/usb/endpoint.h
sound/usb/helper.c
sound/usb/mixer.c
sound/usb/pcm.c
sound/usb/usbaudio.h
sound/usb/usx2y/us122l.c
sound/usb/usx2y/usbusx2yaudio.c
sound/usb/usx2y/usx2yhwdeppcm.c
tools/perf/Makefile
tools/perf/arch/arm/Makefile
tools/perf/arch/arm/include/perf_regs.h [new file with mode: 0644]
tools/perf/arch/arm/util/unwind.c [new file with mode: 0644]
tools/perf/builtin-stat.c
tools/perf/config/Makefile
tools/perf/config/feature-tests.mak
tools/perf/util/dwarf-aux.c
tools/perf/util/dwarf-aux.h
tools/perf/util/header.c
tools/perf/util/probe-finder.c
tools/perf/util/session.c
tools/perf/util/unwind.c
tools/power/x86/turbostat/turbostat.c
tools/testing/ktest/examples/crosstests.conf
tools/testing/selftests/timers/posix_timers.c

index 2be603c52a240fa55f1a630d4f3567cc9c134d19..a6b68572474062bf04329a434cb27bbf825d19b8 100644 (file)
@@ -37,8 +37,8 @@ Description:
                that the USB device has been connected to the machine.  This
                file is read-only.
 Users:
-               PowerTOP <power@bughost.org>
-               http://www.lesswatts.org/projects/powertop/
+               PowerTOP <powertop@lists.01.org>
+               https://01.org/powertop/
 
 What:          /sys/bus/usb/device/.../power/active_duration
 Date:          January 2008
@@ -57,8 +57,8 @@ Description:
                will give an integer percentage.  Note that this does not
                account for counter wrap.
 Users:
-               PowerTOP <power@bughost.org>
-               http://www.lesswatts.org/projects/powertop/
+               PowerTOP <powertop@lists.01.org>
+               https://01.org/powertop/
 
 What:          /sys/bus/usb/devices/<busnum>-<port[.port]>...:<config num>-<interface num>/supports_autosuspend
 Date:          January 2008
index bfd119ace6ad00c2ee56c4c16b25a78ec6b5d7f0..1399bb2da3ebf762ffd0c392c07121d1687ee1dd 100644 (file)
@@ -104,7 +104,7 @@ Description:
                One of the following ASCII strings, representing the device
                type:
 
-               absent, ram, rom, nor, nand, dataflash, ubi, unknown
+               absent, ram, rom, nor, nand, mlc-nand, dataflash, ubi, unknown
 
 What:          /sys/class/mtd/mtdX/writesize
 Date:          April 2009
index bdc00707c751b955d997aa7ce0c51bd8b0222ca5..7f34a95bb9634c9d8d1d9505d31b68c36bc58f76 100644 (file)
@@ -1,13 +1,13 @@
 
 What:           /sys/class/net/<iface>/batman-adv/iface_status
 Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Indicates the status of <iface> as it is seen by batman.
 
 What:           /sys/class/net/<iface>/batman-adv/mesh_iface
 Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 The /sys/class/net/<iface>/batman-adv/mesh_iface file
                 displays the batman mesh interface this <iface>
index bdcd8b4e38f2dd404acdc36bcd123f72ff5d2ed3..0baa657b18c4719048bf7048affea6fba8f79221 100644 (file)
@@ -1,22 +1,23 @@
 
 What:           /sys/class/net/<mesh_iface>/mesh/aggregated_ogms
 Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Indicates whether the batman protocol messages of the
                 mesh <mesh_iface> shall be aggregated or not.
 
-What:           /sys/class/net/<mesh_iface>/mesh/ap_isolation
+What:           /sys/class/net/<mesh_iface>/mesh/<vlan_subdir>/ap_isolation
 Date:           May 2011
-Contact:        Antonio Quartulli <ordex@autistici.org>
+Contact:        Antonio Quartulli <antonio@meshcoding.com>
 Description:
                 Indicates whether the data traffic going from a
                 wireless client to another wireless client will be
-                silently dropped.
+                silently dropped. <vlan_subdir> is empty when referring
+               to the untagged lan.
 
 What:           /sys/class/net/<mesh_iface>/mesh/bonding
 Date:           June 2010
-Contact:        Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
+Contact:        Simon Wunderlich <sw@simonwunderlich.de>
 Description:
                 Indicates whether the data traffic going through the
                 mesh will be sent using multiple interfaces at the
@@ -24,7 +25,7 @@ Description:
 
 What:           /sys/class/net/<mesh_iface>/mesh/bridge_loop_avoidance
 Date:           November 2011
-Contact:        Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
+Contact:        Simon Wunderlich <sw@simonwunderlich.de>
 Description:
                 Indicates whether the bridge loop avoidance feature
                 is enabled. This feature detects and avoids loops
@@ -41,21 +42,21 @@ Description:
 
 What:           /sys/class/net/<mesh_iface>/mesh/gw_bandwidth
 Date:           October 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Defines the bandwidth which is propagated by this
                 node if gw_mode was set to 'server'.
 
 What:           /sys/class/net/<mesh_iface>/mesh/gw_mode
 Date:           October 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Defines the state of the gateway features. Can be
                 either 'off', 'client' or 'server'.
 
 What:           /sys/class/net/<mesh_iface>/mesh/gw_sel_class
 Date:           October 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Defines the selection criteria this node will use
                 to choose a gateway if gw_mode was set to 'client'.
@@ -77,25 +78,14 @@ Description:
 
 What:           /sys/class/net/<mesh_iface>/mesh/orig_interval
 Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Defines the interval in milliseconds in which batman
                 sends its protocol messages.
 
 What:           /sys/class/net/<mesh_iface>/mesh/routing_algo
 Date:           Dec 2011
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
+Contact:        Marek Lindner <mareklindner@neomailbox.ch>
 Description:
                 Defines the routing procotol this mesh instance
                 uses to find the optimal paths through the mesh.
-
-What:           /sys/class/net/<mesh_iface>/mesh/vis_mode
-Date:           May 2010
-Contact:        Marek Lindner <lindner_marek@yahoo.de>
-Description:
-                Each batman node only maintains information about its
-                own local neighborhood, therefore generating graphs
-                showing the topology of the entire mesh is not easily
-                feasible without having a central instance to collect
-                the local topologies from all nodes. This file allows
-                to activate the collecting (server) mode.
diff --git a/Documentation/ABI/testing/sysfs-class-powercap b/Documentation/ABI/testing/sysfs-class-powercap
new file mode 100644 (file)
index 0000000..db3b3ff
--- /dev/null
@@ -0,0 +1,152 @@
+What:          /sys/class/powercap/
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               The powercap/ class sub directory belongs to the power cap
+               subsystem. Refer to
+               Documentation/power/powercap/powercap.txt for details.
+
+What:          /sys/class/powercap/<control type>
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               A <control type> is a unique name under /sys/class/powercap.
+               Here <control type> determines how the power is going to be
+               controlled. A <control type> can contain multiple power zones.
+
+What:          /sys/class/powercap/<control type>/enabled
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               This allows to enable/disable power capping for a "control type".
+               This status affects every power zone using this "control_type.
+
+What:          /sys/class/powercap/<control type>/<power zone>
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               A power zone is a single or a collection of devices, which can
+               be independently monitored and controlled. A power zone sysfs
+               entry is qualified with the name of the <control type>.
+               E.g. intel-rapl:0:1:1.
+
+What:          /sys/class/powercap/<control type>/<power zone>/<child power zone>
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Power zones may be organized in a hierarchy in which child
+               power zones provide monitoring and control for a subset of
+               devices under the parent. For example, if there is a parent
+               power zone for a whole CPU package, each CPU core in it can
+               be a child power zone.
+
+What:          /sys/class/powercap/.../<power zone>/name
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Specifies the name of this power zone.
+
+What:          /sys/class/powercap/.../<power zone>/energy_uj
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Current energy counter in micro-joules. Write "0" to reset.
+               If the counter can not be reset, then this attribute is
+               read-only.
+
+What:          /sys/class/powercap/.../<power zone>/max_energy_range_uj
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Range of the above energy counter in micro-joules.
+
+
+What:          /sys/class/powercap/.../<power zone>/power_uw
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Current power in micro-watts.
+
+What:          /sys/class/powercap/.../<power zone>/max_power_range_uw
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Range of the above power value in micro-watts.
+
+What:          /sys/class/powercap/.../<power zone>/constraint_X_name
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Each power zone can define one or more constraints. Each
+               constraint can have an optional name. Here "X" can have values
+               from 0 to max integer.
+
+What:          /sys/class/powercap/.../<power zone>/constraint_X_power_limit_uw
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Power limit in micro-watts should be applicable for
+               the time window specified by "constraint_X_time_window_us".
+               Here "X" can have values from 0 to max integer.
+
+What:          /sys/class/powercap/.../<power zone>/constraint_X_time_window_us
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Time window in micro seconds. This is used along with
+               constraint_X_power_limit_uw to define a power constraint.
+               Here "X" can have values from 0 to max integer.
+
+
+What:          /sys/class/powercap/<control type>/.../constraint_X_max_power_uw
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Maximum allowed power in micro watts for this constraint.
+               Here "X" can have values from 0 to max integer.
+
+What:          /sys/class/powercap/<control type>/.../constraint_X_min_power_uw
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Minimum allowed power in micro watts for this constraint.
+               Here "X" can have values from 0 to max integer.
+
+What:          /sys/class/powercap/.../<power zone>/constraint_X_max_time_window_us
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Maximum allowed time window in micro seconds for this
+               constraint. Here "X" can have values from 0 to max integer.
+
+What:          /sys/class/powercap/.../<power zone>/constraint_X_min_time_window_us
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description:
+               Minimum allowed time window in micro seconds for this
+               constraint. Here "X" can have values from 0 to max integer.
+
+What:          /sys/class/powercap/.../<power zone>/enabled
+Date:          September 2013
+KernelVersion: 3.13
+Contact:       linux-pm@vger.kernel.org
+Description
+               This allows to enable/disable power capping at power zone level.
+               This applies to current power zone and its children.
index 9d43e76708413bdf6b3d25a0b1179714b06680ba..efe449bdf811db7a1c9f74f38a0371d2c0dd692e 100644 (file)
@@ -1,6 +1,6 @@
 What:          /sys/devices/.../power/
 Date:          January 2009
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../power directory contains attributes
                allowing the user space to check and modify some power
@@ -8,7 +8,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup
 Date:          January 2009
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../power/wakeup attribute allows the user
                space to check if the device is enabled to wake up the system
@@ -34,7 +34,7 @@ Description:
 
 What:          /sys/devices/.../power/control
 Date:          January 2009
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../power/control attribute allows the user
                space to control the run-time power management of the device.
@@ -53,7 +53,7 @@ Description:
 
 What:          /sys/devices/.../power/async
 Date:          January 2009
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../async attribute allows the user space to
                enable or diasble the device's suspend and resume callbacks to
@@ -79,7 +79,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_count
 Date:          September 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_count attribute contains the number
                of signaled wakeup events associated with the device.  This
@@ -88,7 +88,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_active_count
 Date:          September 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_active_count attribute contains the
                number of times the processing of wakeup events associated with
@@ -98,7 +98,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_abort_count
 Date:          February 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_abort_count attribute contains the
                number of times the processing of a wakeup event associated with
@@ -109,7 +109,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_expire_count
 Date:          February 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_expire_count attribute contains the
                number of times a wakeup event associated with the device has
@@ -119,7 +119,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_active
 Date:          September 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_active attribute contains either 1,
                or 0, depending on whether or not a wakeup event associated with
@@ -129,7 +129,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_total_time_ms
 Date:          September 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_total_time_ms attribute contains
                the total time of processing wakeup events associated with the
@@ -139,7 +139,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_max_time_ms
 Date:          September 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_max_time_ms attribute contains
                the maximum time of processing a single wakeup event associated
@@ -149,7 +149,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_last_time_ms
 Date:          September 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_last_time_ms attribute contains
                the value of the monotonic clock corresponding to the time of
@@ -160,7 +160,7 @@ Description:
 
 What:          /sys/devices/.../power/wakeup_prevent_sleep_time_ms
 Date:          February 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_prevent_sleep_time_ms attribute
                contains the total time the device has been preventing
@@ -189,7 +189,7 @@ Description:
 
 What:          /sys/devices/.../power/pm_qos_latency_us
 Date:          March 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../power/pm_qos_resume_latency_us attribute
                contains the PM QoS resume latency limit for the given device,
@@ -207,7 +207,7 @@ Description:
 
 What:          /sys/devices/.../power/pm_qos_no_power_off
 Date:          September 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../power/pm_qos_no_power_off attribute
                is used for manipulating the PM QoS "no power off" flag.  If
@@ -222,7 +222,7 @@ Description:
 
 What:          /sys/devices/.../power/pm_qos_remote_wakeup
 Date:          September 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../power/pm_qos_remote_wakeup attribute
                is used for manipulating the PM QoS "remote wakeup required"
index 217772615d0288f996e05cc128b98c9a40d95042..205a7387844106804de496ec9b3a8c372b725977 100644 (file)
@@ -1,6 +1,6 @@
 What:          /sys/power/
 Date:          August 2006
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power directory will contain files that will
                provide a unified interface to the power management
@@ -8,7 +8,7 @@ Description:
 
 What:          /sys/power/state
 Date:          August 2006
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/state file controls the system power state.
                Reading from this file returns what states are supported,
@@ -22,7 +22,7 @@ Description:
 
 What:          /sys/power/disk
 Date:          September 2006
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/disk file controls the operating mode of the
                suspend-to-disk mechanism.  Reading from this file returns
@@ -67,7 +67,7 @@ Description:
 
 What:          /sys/power/image_size
 Date:          August 2006
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/image_size file controls the size of the image
                created by the suspend-to-disk mechanism.  It can be written a
@@ -84,7 +84,7 @@ Description:
 
 What:          /sys/power/pm_trace
 Date:          August 2006
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/pm_trace file controls the code which saves the
                last PM event point in the RTC across reboots, so that you can
@@ -133,7 +133,7 @@ Description:
 
 What:          /sys/power/pm_async
 Date:          January 2009
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/pm_async file controls the switch allowing the
                user space to enable or disable asynchronous suspend and resume
@@ -146,7 +146,7 @@ Description:
 
 What:          /sys/power/wakeup_count
 Date:          July 2010
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/wakeup_count file allows user space to put the
                system into a sleep state while taking into account the
@@ -161,7 +161,7 @@ Description:
 
 What:          /sys/power/reserved_size
 Date:          May 2011
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/reserved_size file allows user space to control
                the amount of memory reserved for allocations made by device
@@ -175,7 +175,7 @@ Description:
 
 What:          /sys/power/autosleep
 Date:          April 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/autosleep file can be written one of the strings
                returned by reads from /sys/power/state.  If that happens, a
@@ -192,7 +192,7 @@ Description:
 
 What:          /sys/power/wake_lock
 Date:          February 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/wake_lock file allows user space to create
                wakeup source objects and activate them on demand (if one of
@@ -219,7 +219,7 @@ Description:
 
 What:          /sys/power/wake_unlock
 Date:          February 2012
-Contact:       Rafael J. Wysocki <rjw@sisk.pl>
+Contact:       Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/power/wake_unlock file allows user space to deactivate
                wakeup sources created with the help of /sys/power/wake_lock.
index 14129f149a75432589f3bc925f7776a59354ef62..5e983031cc11be35fba1aab90546db5a7de1eeb3 100644 (file)
@@ -101,14 +101,23 @@ style to do this even if your device holds the default setting,
 because this shows that you did think about these issues wrt. your
 device.
 
-The query is performed via a call to dma_set_mask():
+The query is performed via a call to dma_set_mask_and_coherent():
 
-       int dma_set_mask(struct device *dev, u64 mask);
+       int dma_set_mask_and_coherent(struct device *dev, u64 mask);
 
-The query for consistent allocations is performed via a call to
-dma_set_coherent_mask():
+which will query the mask for both streaming and coherent APIs together.
+If you have some special requirements, then the following two separate
+queries can be used instead:
 
-       int dma_set_coherent_mask(struct device *dev, u64 mask);
+       The query for streaming mappings is performed via a call to
+       dma_set_mask():
+
+               int dma_set_mask(struct device *dev, u64 mask);
+
+       The query for consistent allocations is performed via a call
+       to dma_set_coherent_mask():
+
+               int dma_set_coherent_mask(struct device *dev, u64 mask);
 
 Here, dev is a pointer to the device struct of your device, and mask
 is a bit mask describing which bits of an address your device
@@ -137,7 +146,7 @@ exactly why.
 
 The standard 32-bit addressing device would do something like this:
 
-       if (dma_set_mask(dev, DMA_BIT_MASK(32))) {
+       if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
                printk(KERN_WARNING
                       "mydev: No suitable DMA available.\n");
                goto ignore_this_device;
@@ -171,22 +180,20 @@ the case would look like this:
 
        int using_dac, consistent_using_dac;
 
-       if (!dma_set_mask(dev, DMA_BIT_MASK(64))) {
+       if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
                using_dac = 1;
                consistent_using_dac = 1;
-               dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
-       } else if (!dma_set_mask(dev, DMA_BIT_MASK(32))) {
+       } else if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
                using_dac = 0;
                consistent_using_dac = 0;
-               dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
        } else {
                printk(KERN_WARNING
                       "mydev: No suitable DMA available.\n");
                goto ignore_this_device;
        }
 
-dma_set_coherent_mask() will always be able to set the same or a
-smaller mask as dma_set_mask(). However for the rare case that a
+The coherent coherent mask will always be able to set the same or a
+smaller mask as the streaming mask. However for the rare case that a
 device driver only uses consistent allocations, one would have to
 check the return value from dma_set_coherent_mask().
 
@@ -199,9 +206,9 @@ address you might do something like:
                goto ignore_this_device;
        }
 
-When dma_set_mask() is successful, and returns zero, the kernel saves
-away this mask you have provided.  The kernel will use this
-information later when you make DMA mappings.
+When dma_set_mask() or dma_set_mask_and_coherent() is successful, and
+returns zero, the kernel saves away this mask you have provided.  The
+kernel will use this information later when you make DMA mappings.
 
 There is a case which we are aware of at this time, which is worth
 mentioning in this documentation.  If your device supports multiple
index 78a6c569d204bc0073e33fe093d34a8137e5eaf4..e865279cec5855818d83eb281fcce06ee0510040 100644 (file)
@@ -141,6 +141,14 @@ won't change the current mask settings.  It is more intended as an
 internal API for use by the platform than an external API for use by
 driver writers.
 
+int
+dma_set_mask_and_coherent(struct device *dev, u64 mask)
+
+Checks to see if the mask is possible and updates the device
+streaming and coherent DMA mask parameters if it is.
+
+Returns: 0 if successful and a negative error if not.
+
 int
 dma_set_mask(struct device *dev, u64 mask)
 
index bccf602a87f5c2054b146c826725ac183863a8ad..6f458564d625a3d82601703ceed6a814e9e14585 100644 (file)
@@ -525,8 +525,9 @@ corresponding register block for you.
 6. Other interesting functions
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-pci_find_slot()                        Find pci_dev corresponding to given bus and
-                               slot numbers.
+pci_get_domain_bus_and_slot()  Find pci_dev corresponding to given domain,
+                               bus and slot and number. If the device is
+                               found, its reference count is increased.
 pci_set_power_state()          Set PCI Power Management state (0=D0 ... 3=D3)
 pci_find_capability()          Find specified capability in device's capability
                                list.
@@ -582,7 +583,8 @@ having sane locking.
 
 pci_find_device()      Superseded by pci_get_device()
 pci_find_subsys()      Superseded by pci_get_subsys()
-pci_find_slot()                Superseded by pci_get_slot()
+pci_find_slot()                Superseded by pci_get_domain_bus_and_slot()
+pci_get_slot()         Superseded by pci_get_domain_bus_and_slot()
 
 
 The alternative is the traditional PCI device driver that walks PCI
index febbb1ba4d2317b984e7217796ac39151f867531..784841caa6e63824ff2503351fb12e682f01e3b3 100644 (file)
@@ -4,4 +4,4 @@ CONFIG_ACPI_CUSTOM_DSDT builds the image into the kernel.
 
 When to use this method is described in detail on the
 Linux/ACPI home page:
-http://www.lesswatts.org/projects/acpi/overridingDSDT.php
+https://01.org/linux-acpi/documentation/overriding-dsdt
index 4848db8c71ff5118244888b620251f49831c92f2..8a4da64e02a8b08f3ad7ed6359ef713d7afd3b54 100644 (file)
@@ -71,7 +71,7 @@ static int netlink_send(int s, struct cn_msg *msg)
        nlh->nlmsg_seq = seq++;
        nlh->nlmsg_pid = getpid();
        nlh->nlmsg_type = NLMSG_DONE;
-       nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh));
+       nlh->nlmsg_len = size;
        nlh->nlmsg_flags = 0;
 
        m = NLMSG_DATA(nlh);
index 23721d3be3e6160e2057b0b11ef002e2828f458f..80b72419ffd8a83dfc8e150d5f5088ec69e4f0d9 100644 (file)
@@ -414,6 +414,7 @@ Your cooperation is appreciated.
                200 = /dev/net/tun      TAP/TUN network device
                201 = /dev/button/gulpb Transmeta GULP-B buttons
                202 = /dev/emd/ctl      Enhanced Metadisk RAID (EMD) control
+               203 = /dev/cuse         Cuse (character device in user-space)
                204 = /dev/video/em8300         EM8300 DVD decoder control
                205 = /dev/video/em8300_mv      EM8300 DVD decoder video
                206 = /dev/video/em8300_ma      EM8300 DVD decoder audio
index 92d36e2aa87791c75af70f23da1da3169c94f96c..f28d82bbbc56b3f3dff7716ede2f200f315d8e70 100644 (file)
@@ -36,14 +36,18 @@ specific to ARM.
 
        - reg
                Usage: required
-               Value type: <prop-encoded-array>
+               Value type: Integer cells. A register entry, expressed as a pair
+                           of cells, containing base and size.
                Definition: A standard property. Specifies base physical
                            address of CCI control registers common to all
                            interfaces.
 
        - ranges:
                Usage: required
-               Value type: <prop-encoded-array>
+               Value type: Integer cells. An array of range entries, expressed
+                           as a tuple of cells, containing child address,
+                           parent address and the size of the region in the
+                           child address space.
                Definition: A standard property. Follow rules in the ePAPR for
                            hierarchical bus addressing. CCI interfaces
                            addresses refer to the parent node addressing
@@ -74,11 +78,49 @@ specific to ARM.
 
                - reg:
                        Usage: required
-                       Value type: <prop-encoded-array>
+                       Value type: Integer cells. A register entry, expressed
+                                   as a pair of cells, containing base and
+                                   size.
                        Definition: the base address and size of the
                                    corresponding interface programming
                                    registers.
 
+       - CCI PMU node
+
+               Parent node must be CCI interconnect node.
+
+               A CCI pmu node must contain the following properties:
+
+               - compatible
+                       Usage: required
+                       Value type: <string>
+                       Definition: must be "arm,cci-400-pmu"
+
+               - reg:
+                       Usage: required
+                       Value type: Integer cells. A register entry, expressed
+                                   as a pair of cells, containing base and
+                                   size.
+                       Definition: the base address and size of the
+                                   corresponding interface programming
+                                   registers.
+
+               - interrupts:
+                       Usage: required
+                       Value type: Integer cells. Array of interrupt specifier
+                                   entries, as defined in
+                                   ../interrupt-controller/interrupts.txt.
+                       Definition: list of counter overflow interrupts, one per
+                                   counter. The interrupts must be specified
+                                   starting with the cycle counter overflow
+                                   interrupt, followed by counter0 overflow
+                                   interrupt, counter1 overflow interrupt,...
+                                   ,counterN overflow interrupt.
+
+                                   The CCI PMU has an interrupt signal for each
+                                   counter. The number of interrupts must be
+                                   equal to the number of counters.
+
 * CCI interconnect bus masters
 
        Description: masters in the device tree connected to a CCI port
@@ -144,7 +186,7 @@ Example:
                #address-cells = <1>;
                #size-cells = <1>;
                reg = <0x0 0x2c090000 0 0x1000>;
-               ranges = <0x0 0x0 0x2c090000 0x6000>;
+               ranges = <0x0 0x0 0x2c090000 0x10000>;
 
                cci_control0: slave-if@1000 {
                        compatible = "arm,cci-400-ctrl-if";
@@ -163,6 +205,16 @@ Example:
                        interface-type = "ace";
                        reg = <0x5000 0x1000>;
                };
+
+               pmu@9000 {
+                        compatible = "arm,cci-400-pmu";
+                        reg = <0x9000 0x5000>;
+                        interrupts = <0 101 4>,
+                                     <0 102 4>,
+                                     <0 103 4>,
+                                     <0 104 4>,
+                                     <0 105 4>;
+               };
        };
 
 This CCI node corresponds to a CCI component whose control registers sits
index e1f343c7a34b7b10ea462a39b5e9a320ef463ba6..f69bcf5a6343bf314b5eef199999c5a676131e74 100644 (file)
@@ -28,7 +28,7 @@ The three cells in order are:
 dependent:
   - bit 7-0: peripheral identifier for the hardware handshaking interface. The
   identifier can be different for tx and rx.
-  - bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 1 for ASAP.
+  - bit 11-8: FIFO configuration. 0 for half FIFO, 1 for ALAP, 2 for ASAP.
 
 Example:
 
index b4fa934ae3a2a2fa0e68e85199d676a4b8a682ff..3f14e81c55bd6fd81b938b32ff860440ba03fa3d 100644 (file)
@@ -67,6 +67,7 @@ of the following host1x client modules:
   - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
   - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
   - nvidia,edid: supplies a binary EDID blob
+  - nvidia,panel: phandle of a display panel
 
 - hdmi: High Definition Multimedia Interface
 
@@ -81,6 +82,7 @@ of the following host1x client modules:
   - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
   - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
   - nvidia,edid: supplies a binary EDID blob
+  - nvidia,panel: phandle of a display panel
 
 - tvo: TV encoder output
 
@@ -94,6 +96,20 @@ of the following host1x client modules:
   Required properties:
   - compatible: "nvidia,tegra<chip>-dsi"
   - reg: Physical base address and length of the controller's registers.
+  - clocks: Should contain phandle and clock specifiers for two clocks:
+    the DSI controller clock and the parent clock of the controller.
+  - clock-names: A list of strings containing the name for each clock in
+    the clocks property. Must contain the following two entries:
+    - "dsi": the DSI controller clock
+    - "parent": parent of the DSI controller clock
+  - calibrate: Should contain a phandle and a specifier specifying which
+    pads are used by this DSI output and need to be calibrated.
+
+  Optional properties:
+  - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+  - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
+  - nvidia,edid: supplies a binary EDID blob
+  - nvidia,panel: phandle of a display panel
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rcar.txt b/Documentation/devicetree/bindings/i2c/i2c-rcar.txt
new file mode 100644 (file)
index 0000000..897cfcd
--- /dev/null
@@ -0,0 +1,23 @@
+I2C for R-Car platforms
+
+Required properties:
+- compatible: Must be one of
+       "renesas,i2c-rcar"
+       "renesas,i2c-r8a7778"
+       "renesas,i2c-r8a7779"
+       "renesas,i2c-r8a7790"
+- reg: physical base address of the controller and length of memory mapped
+  region.
+- interrupts: interrupt specifier.
+
+Optional properties:
+- clock-frequency: desired I2C bus clock frequency in Hz. The absence of this
+  propoerty indicates the default frequency 100 kHz.
+
+Examples :
+
+i2c0: i2c@e6500000 {
+       compatible = "renesas,i2c-rcar-h2";
+       reg = <0 0xe6500000 0 0x428>;
+       interrupts = <0 174 0x4>;
+};
diff --git a/Documentation/devicetree/bindings/memory.txt b/Documentation/devicetree/bindings/memory.txt
deleted file mode 100644 (file)
index eb24693..0000000
+++ /dev/null
@@ -1,168 +0,0 @@
-*** Memory binding ***
-
-The /memory node provides basic information about the address and size
-of the physical memory. This node is usually filled or updated by the
-bootloader, depending on the actual memory configuration of the given
-hardware.
-
-The memory layout is described by the following node:
-
-/ {
-       #address-cells = <(n)>;
-       #size-cells = <(m)>;
-       memory {
-               device_type = "memory";
-               reg =  <(baseaddr1) (size1)
-                       (baseaddr2) (size2)
-                       ...
-                       (baseaddrN) (sizeN)>;
-       };
-       ...
-};
-
-A memory node follows the typical device tree rules for "reg" property:
-n:             number of cells used to store base address value
-m:             number of cells used to store size value
-baseaddrX:     defines a base address of the defined memory bank
-sizeX:         the size of the defined memory bank
-
-
-More than one memory bank can be defined.
-
-
-*** Reserved memory regions ***
-
-In /memory/reserved-memory node one can create child nodes describing
-particular reserved (excluded from normal use) memory regions. Such
-memory regions are usually designed for the special usage by various
-device drivers. A good example are contiguous memory allocations or
-memory sharing with other operating system on the same hardware board.
-Those special memory regions might depend on the board configuration and
-devices used on the target system.
-
-Parameters for each memory region can be encoded into the device tree
-with the following convention:
-
-[(label):] (name) {
-       compatible = "linux,contiguous-memory-region", "reserved-memory-region";
-       reg = <(address) (size)>;
-       (linux,default-contiguous-region);
-};
-
-compatible:    one or more of:
-       - "linux,contiguous-memory-region" - enables binding of this
-         region to Contiguous Memory Allocator (special region for
-         contiguous memory allocations, shared with movable system
-         memory, Linux kernel-specific).
-       - "reserved-memory-region" - compatibility is defined, given
-         region is assigned for exclusive usage for by the respective
-         devices.
-
-reg:   standard property defining the base address and size of
-       the memory region
-
-linux,default-contiguous-region: property indicating that the region
-       is the default region for all contiguous memory
-       allocations, Linux specific (optional)
-
-It is optional to specify the base address, so if one wants to use
-autoconfiguration of the base address, '0' can be specified as a base
-address in the 'reg' property.
-
-The /memory/reserved-memory node must contain the same #address-cells
-and #size-cells value as the root node.
-
-
-*** Device node's properties ***
-
-Once regions in the /memory/reserved-memory node have been defined, they
-may be referenced by other device nodes. Bindings that wish to reference
-memory regions should explicitly document their use of the following
-property:
-
-memory-region = <&phandle_to_defined_region>;
-
-This property indicates that the device driver should use the memory
-region pointed by the given phandle.
-
-
-*** Example ***
-
-This example defines a memory consisting of 4 memory banks. 3 contiguous
-regions are defined for Linux kernel, one default of all device drivers
-(named contig_mem, placed at 0x72000000, 64MiB), one dedicated to the
-framebuffer device (labelled display_mem, placed at 0x78000000, 8MiB)
-and one for multimedia processing (labelled multimedia_mem, placed at
-0x77000000, 64MiB). 'display_mem' region is then assigned to fb@12300000
-device for DMA memory allocations (Linux kernel drivers will use CMA is
-available or dma-exclusive usage otherwise). 'multimedia_mem' is
-assigned to scaler@12500000 and codec@12600000 devices for contiguous
-memory allocations when CMA driver is enabled.
-
-The reason for creating a separate region for framebuffer device is to
-match the framebuffer base address to the one configured by bootloader,
-so once Linux kernel drivers starts no glitches on the displayed boot
-logo appears. Scaller and codec drivers should share the memory
-allocations.
-
-/ {
-       #address-cells = <1>;
-       #size-cells = <1>;
-
-       /* ... */
-
-       memory {
-               reg =  <0x40000000 0x10000000
-                       0x50000000 0x10000000
-                       0x60000000 0x10000000
-                       0x70000000 0x10000000>;
-
-               reserved-memory {
-                       #address-cells = <1>;
-                       #size-cells = <1>;
-
-                       /*
-                        * global autoconfigured region for contiguous allocations
-                        * (used only with Contiguous Memory Allocator)
-                        */
-                       contig_region@0 {
-                               compatible = "linux,contiguous-memory-region";
-                               reg = <0x0 0x4000000>;
-                               linux,default-contiguous-region;
-                       };
-
-                       /*
-                        * special region for framebuffer
-                        */
-                       display_region: region@78000000 {
-                               compatible = "linux,contiguous-memory-region", "reserved-memory-region";
-                               reg = <0x78000000 0x800000>;
-                       };
-
-                       /*
-                        * special region for multimedia processing devices
-                        */
-                       multimedia_region: region@77000000 {
-                               compatible = "linux,contiguous-memory-region";
-                               reg = <0x77000000 0x4000000>;
-                       };
-               };
-       };
-
-       /* ... */
-
-       fb0: fb@12300000 {
-               status = "okay";
-               memory-region = <&display_region>;
-       };
-
-       scaler: scaler@12500000 {
-               status = "okay";
-               memory-region = <&multimedia_region>;
-       };
-
-       codec: codec@12600000 {
-               status = "okay";
-               memory-region = <&multimedia_region>;
-       };
-};
diff --git a/Documentation/devicetree/bindings/misc/nvidia,tegra114-mipi.txt b/Documentation/devicetree/bindings/misc/nvidia,tegra114-mipi.txt
new file mode 100644 (file)
index 0000000..642c5d8
--- /dev/null
@@ -0,0 +1,37 @@
+NVIDIA Tegra MIPI pad calibration controller
+
+Required properties:
+- compatible: "nvidia,tegra<chip>-mipi"
+- reg: Physical base address and length of the controller's registers.
+- clocks: The clock consumed by the controller.
+- #calibrate-cells: Should be 1. The cell is a bitmask of the pads that need
+  to be calibrated by a given device.
+
+User nodes need to contain a calibrate property that has a phandle to refer
+to the calibration controller node and a bitmask of the pads that need to be
+calibrated.
+
+Example:
+
+       mipi: mipi@700e3000 {
+               compatible = "nvidia,tegra114-mipi";
+               reg = <0x700e3000 0x100>;
+               clocks = <&tegra_car TEGRA114_CLK_MIPI_CAL>;
+               #calibrate-cells = <1>;
+       };
+
+       ...
+
+       host1x@50000000 {
+               ...
+
+               dsi@54300000 {
+                       ...
+
+                       calibrate = <&mipi 0x060>;
+
+                       ...
+               };
+
+               ...
+       };
diff --git a/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt b/Documentation/devicetree/bindings/net/cpsw-phy-sel.txt
new file mode 100644 (file)
index 0000000..7ff57a1
--- /dev/null
@@ -0,0 +1,28 @@
+TI CPSW Phy mode Selection Device Tree Bindings
+-----------------------------------------------
+
+Required properties:
+- compatible           : Should be "ti,am3352-cpsw-phy-sel"
+- reg                  : physical base address and size of the cpsw
+                         registers map
+- reg-names            : names of the register map given in "reg" node
+
+Optional properties:
+-rmii-clock-ext                : If present, the driver will configure the RMII
+                         interface to external clock usage
+
+Examples:
+
+       phy_sel: cpsw-phy-sel@44e10650 {
+               compatible = "ti,am3352-cpsw-phy-sel";
+               reg= <0x44e10650 0x4>;
+               reg-names = "gmii-sel";
+       };
+
+(or)
+       phy_sel: cpsw-phy-sel@44e10650 {
+               compatible = "ti,am3352-cpsw-phy-sel";
+               reg= <0x44e10650 0x4>;
+               reg-names = "gmii-sel";
+               rmii-clock-ext;
+       };
diff --git a/Documentation/devicetree/bindings/panel/auo,b101aw03.txt b/Documentation/devicetree/bindings/panel/auo,b101aw03.txt
new file mode 100644 (file)
index 0000000..72e088a
--- /dev/null
@@ -0,0 +1,7 @@
+AU Optronics Corporation 10.1" WSVGA TFT LCD panel
+
+Required properties:
+- compatible: should be "auo,b101aw03"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/chunghwa,claa101wb03.txt b/Documentation/devicetree/bindings/panel/chunghwa,claa101wb03.txt
new file mode 100644 (file)
index 0000000..0ab2c05
--- /dev/null
@@ -0,0 +1,7 @@
+Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "chunghwa,claa101wb03"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/panasonic,vvx10f004b00.txt b/Documentation/devicetree/bindings/panel/panasonic,vvx10f004b00.txt
new file mode 100644 (file)
index 0000000..d328b03
--- /dev/null
@@ -0,0 +1,7 @@
+Panasonic Corporation 10.1" WUXGA TFT LCD panel
+
+Required properties:
+- compatible: should be "panasonic,vvx10f004b00"
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/simple-panel.txt b/Documentation/devicetree/bindings/panel/simple-panel.txt
new file mode 100644 (file)
index 0000000..1341bbf
--- /dev/null
@@ -0,0 +1,21 @@
+Simple display panel
+
+Required properties:
+- power-supply: regulator to provide the supply voltage
+
+Optional properties:
+- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
+- enable-gpios: GPIO pin to enable or disable the panel
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+
+       panel: panel {
+               compatible = "cptt,claa101wb01";
+               ddc-i2c-bus = <&panelddc>;
+
+               power-supply = <&vdd_pnl_reg>;
+               enable-gpios = <&gpio 90 0>;
+
+               backlight = <&backlight>;
+       };
index e216af356847c05ac4ab9f2e9a6ae887b1939f2a..d5d26d443693ce70db814c9466bbc72d87d6b7fe 100644 (file)
@@ -3,7 +3,7 @@
 Required properties:
 - compatible: should contain "snps,dw-pcie" to identify the
        core, plus an identifier for the specific instance, such
-       as "samsung,exynos5440-pcie".
+       as "samsung,exynos5440-pcie" or "fsl,imx6q-pcie".
 - reg: base addresses and lengths of the pcie controller,
        the phy controller, additional register for the phy controller.
 - interrupts: interrupt values for level interrupt,
@@ -21,6 +21,11 @@ Required properties:
 - num-lanes: number of lanes to use
 - reset-gpio: gpio pin number of power good signal
 
+Optional properties for fsl,imx6q-pcie
+- power-on-gpio: gpio pin number of power-enable signal
+- wake-up-gpio: gpio pin number of incoming wakeup signal
+- disable-gpio: gpio pin number of outgoing rfkill/endpoint disable signal
+
 Example:
 
 SoC specific DT Entry:
diff --git a/Documentation/devicetree/bindings/sound/cs42l73.txt b/Documentation/devicetree/bindings/sound/cs42l73.txt
new file mode 100644 (file)
index 0000000..80ae910
--- /dev/null
@@ -0,0 +1,22 @@
+CS42L73 audio CODEC
+
+Required properties:
+
+  - compatible : "cirrus,cs42l73"
+
+  - reg : the I2C address of the device for I2C
+
+Optional properties:
+
+  - reset_gpio : a GPIO spec for the reset pin.
+  - chgfreq    : Charge Pump Frequency values 0x00-0x0F
+
+
+Example:
+
+codec: cs42l73@4a {
+       compatible = "cirrus,cs42l73";
+       reg = <0x4a>;
+       reset_gpio = <&gpio 10 0>;
+       chgfreq = <0x05>;
+};
\ No newline at end of file
diff --git a/Documentation/devicetree/bindings/sound/davinci-evm-audio.txt b/Documentation/devicetree/bindings/sound/davinci-evm-audio.txt
new file mode 100644 (file)
index 0000000..865178d
--- /dev/null
@@ -0,0 +1,42 @@
+* Texas Instruments SoC audio setups with TLV320AIC3X Codec
+
+Required properties:
+- compatible : "ti,da830-evm-audio" : forDM365/DA8xx/OMAPL1x/AM33xx
+- ti,model : The user-visible name of this sound complex.
+- ti,audio-codec : The phandle of the TLV320AIC3x audio codec
+- ti,mcasp-controller : The phandle of the McASP controller
+- ti,codec-clock-rate : The Codec Clock rate (in Hz) applied to the Codec
+- ti,audio-routing : A list of the connections between audio components.
+  Each entry is a pair of strings, the first being the connection's sink,
+  the second being the connection's source. Valid names for sources and
+  sinks are the codec's pins, and the jacks on the board:
+
+  Board connectors:
+
+  * Headphone Jack
+  * Line Out
+  * Mic Jack
+  * Line In
+
+
+Example:
+
+sound {
+       compatible = "ti,da830-evm-audio";
+       ti,model = "DA830 EVM";
+       ti,audio-codec = <&tlv320aic3x>;
+       ti,mcasp-controller = <&mcasp1>;
+       ti,codec-clock-rate = <12000000>;
+       ti,audio-routing =
+               "Headphone Jack",       "HPLOUT",
+               "Headphone Jack",       "HPROUT",
+               "Line Out",             "LLOUT",
+               "Line Out",             "RLOUT",
+               "MIC3L",                "Mic Bias 2V",
+               "MIC3R",                "Mic Bias 2V",
+               "Mic Bias 2V",          "Mic Jack",
+               "LINE1L",               "Line In",
+               "LINE2L",               "Line In",
+               "LINE1R",               "Line In",
+               "LINE2R",               "Line In";
+};
index 374e145c2ef170ceaa32055cf01836bfb3f8c3ef..ed785b3f67beacb6973ff1758b1b0d5b9b5f16dd 100644 (file)
@@ -4,17 +4,25 @@ Required properties:
 - compatible :
        "ti,dm646x-mcasp-audio" : for DM646x platforms
        "ti,da830-mcasp-audio"  : for both DA830 & DA850 platforms
-       "ti,omap2-mcasp-audio"  : for OMAP2 platforms (TI81xx, AM33xx)
-
-- reg : Should contain McASP registers offset and length
-- interrupts : Interrupt number for McASP
-- op-mode : I2S/DIT ops mode.
-- tdm-slots : Slots for TDM operation.
-- num-serializer : Serializers used by McASP.
-- serial-dir : A list of serializer pin mode. The list number should be equal
-               to "num-serializer" parameter. Each entry is a number indication
-               serializer pin direction. (0 - INACTIVE, 1 - TX, 2 - RX)
+       "ti,am33xx-mcasp-audio" : for AM33xx platforms (AM33xx, TI81xx)
 
+- reg : Should contain reg specifiers for the entries in the reg-names property.
+- reg-names : Should contain:
+         * "mpu" for the main registers (required). For compatibility with
+           existing software, it is recommended this is the first entry.
+         * "dat" for separate data port register access (optional).
+- op-mode : I2S/DIT ops mode. 0 for I2S mode. 1 for DIT mode used for S/PDIF,
+           IEC60958-1, and AES-3 formats.
+- tdm-slots : Slots for TDM operation. Indicates number of channels transmitted
+             or received over one serializer.
+- serial-dir : A list of serializer configuration. Each entry is a number
+               indication for serializer pin direction.
+               (0 - INACTIVE, 1 - TX, 2 - RX)
+- dmas: two element list of DMA controller phandles and DMA request line
+        ordered pairs.
+- dma-names: identifier string for each DMA request line in the dmas property.
+            These strings correspond 1:1 with the ordered pairs in dmas. The dma
+            identifiers must be "rx" and "tx".
 
 Optional properties:
 
@@ -23,18 +31,23 @@ Optional properties:
 - rx-num-evt : FIFO levels.
 - sram-size-playback : size of sram to be allocated during playback
 - sram-size-capture  : size of sram to be allocated during capture
+- interrupts : Interrupt numbers for McASP, currently not used by the driver
+- interrupt-names : Known interrupt names are "tx" and "rx"
+- pinctrl-0: Should specify pin control group used for this controller.
+- pinctrl-names: Should contain only one value - "default", for more details
+                please refer to pinctrl-bindings.txt
+  
 
 Example:
 
 mcasp0: mcasp0@1d00000 {
        compatible = "ti,da830-mcasp-audio";
-       #address-cells = <1>;
-       #size-cells = <0>;
        reg = <0x100000 0x3000>;
-       interrupts = <82 83>;
+       reg-names "mpu";
+       interrupts = <82>, <83>;
+       interrupts-names = "tx", "rx";
        op-mode = <0>;          /* MCASP_IIS_MODE */
        tdm-slots = <2>;
-       num-serializer = <16>;
        serial-dir = <
                        0 0 0 0 /* 0: INACTIVE, 1: TX, 2: RX */
                        0 0 0 0
index 705a6b156c6c4fc00713443f5fa631996b259401..5e6040c2c2e9c34019dceb93e439504e4e5af9ad 100644 (file)
@@ -24,10 +24,36 @@ Optional properties:
        3 - MICBIAS output is connected to AVDD,
        If this node is not mentioned or if the value is incorrect, then MicBias
        is powered down.
+- AVDD-supply, IOVDD-supply, DRVDD-supply, DVDD-supply : power supplies for the
+  device as covered in Documentation/devicetree/bindings/regulator/regulator.txt
+
+CODEC output pins:
+  * LLOUT
+  * RLOUT
+  * MONO_LOUT
+  * HPLOUT
+  * HPROUT
+  * HPLCOM
+  * HPRCOM
+
+CODEC input pins:
+  * MIC3L
+  * MIC3R
+  * LINE1L
+  * LINE2L
+  * LINE1R
+  * LINE2R
+
+The pins can be used in referring sound node's audio-routing property.
 
 Example:
 
 tlv320aic3x: tlv320aic3x@1b {
        compatible = "ti,tlv320aic3x";
        reg = <0x1b>;
+
+       AVDD-supply = <&regulator>;
+       IOVDD-supply = <&regulator>;
+       DRVDD-supply = <&regulator>;
+       DVDD-supply = <&regulator>;
 };
diff --git a/Documentation/devicetree/bindings/tty/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/tty/serial/renesas,sci-serial.txt
new file mode 100644 (file)
index 0000000..6ad1adf
--- /dev/null
@@ -0,0 +1,53 @@
+* Renesas SH-Mobile Serial Communication Interface
+
+Required properties:
+- compatible : Should be "renesas,sci-<port type>-uart", where <port type> may be
+  SCI, SCIF, IRDA, SCIFA or SCIFB.
+- reg : Address and length of the register set for the device
+- interrupts : Should contain the following IRQs: ERI, RXI, TXI and BRI.
+- cell-index : The device id.
+- renesas,scscr : Should contain a bitfield used by the Serial Control Register.
+  b7 = SCSCR_TIE
+  b6 = SCSCR_RIE
+  b5 = SCSCR_TE
+  b4 = SCSCR_RE
+  b3 = SCSCR_REIE
+  b2 = SCSCR_TOIE
+  b1 = SCSCR_CKE1
+  b0 = SCSCR_CKE0
+- renesas,scbrr-algo-id : Algorithm ID for the Bit Rate Register
+  1 = SCBRR_ALGO_1 ((clk + 16 * bps) / (16 * bps) - 1)
+  2 = SCBRR_ALGO_2 ((clk + 16 * bps) / (32 * bps) - 1)
+  3 = SCBRR_ALGO_3 (((clk * 2) + 16 * bps) / (16 * bps) - 1)
+  4 = SCBRR_ALGO_4 (((clk * 2) + 16 * bps) / (32 * bps) - 1)
+  5 = SCBRR_ALGO_5 (((clk * 1000 / 32) / bps) - 1)
+
+Optional properties:
+- renesas,autoconf : Set if device is capable of auto configuration
+- renesas,regtype : Overwrite the register layout. In most cases you can rely
+  on auto-probing (omit this property or set to 0) but some legacy devices
+  use a non-default register layout. Possible layouts are
+  0 = SCIx_PROBE_REGTYPE (default)
+  1 = SCIx_SCI_REGTYPE
+  2 = SCIx_IRDA_REGTYPE
+  3 = SCIx_SCIFA_REGTYPE
+  4 = SCIx_SCIFB_REGTYPE
+  5 = SCIx_SH2_SCIF_FIFODATA_REGTYPE
+  6 = SCIx_SH3_SCIF_REGTYPE
+  7 = SCIx_SH4_SCIF_REGTYPE
+  8 = SCIx_SH4_SCIF_NO_SCSPTR_REGTYPE
+  9 = SCIx_SH4_SCIF_FIFODATA_REGTYPE
+ 10 = SCIx_SH7705_SCIF_REGTYPE
+
+
+Example:
+       sci@0xe6c50000 {
+               compatible = "renesas,sci-SCIFA-uart";
+               interrupt-parent = <&intca>;
+               reg = <0xe6c50000 0x100>;
+               interrupts = <0x0c20>, <0x0c20>, <0x0c20>, <0x0c20>;
+               cell-index = <1>;
+               renesas,scscr = <0x30>;
+               renesas,scbrr-algo-id = <4>;
+               renesas,autoconf;
+       };
index fe7afe22538149706eab5727c989d91a8530c387..21ef48f0778f25f9415fd856408aeb78747f260c 100644 (file)
@@ -192,8 +192,8 @@ prototypes:
        void (*invalidatepage) (struct page *, unsigned int, unsigned int);
        int (*releasepage) (struct page *, int);
        void (*freepage)(struct page *);
-       int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
-                       loff_t offset, unsigned long nr_segs);
+       int (*direct_IO)(int, struct kiocb *, struct iov_iter *iter,
+                       loff_t offset);
        int (*get_xip_mem)(struct address_space *, pgoff_t, int, void **,
                                unsigned long *);
        int (*migratepage)(struct address_space *, struct page *, struct page *);
@@ -426,7 +426,9 @@ prototypes:
        ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
        ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+       ssize_t (*read_iter) (struct kiocb *, struct iov_iter *, loff_t);
        ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+       ssize_t (*write_iter) (struct kiocb *, struct iov_iter *, loff_t);
        int (*iterate) (struct file *, struct dir_context *);
        unsigned int (*poll) (struct file *, struct poll_table_struct *);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
index 11a0a40ce445fa5c2f6fb0276dd63907eb55574d..aed6b94160b1cec74face20a05f9c03f03a057f2 100644 (file)
@@ -29,15 +29,16 @@ This document contains the following sections:
         (6) Index registration
         (7) Data file registration
         (8) Miscellaneous object registration
-        (9) Setting the data file size
+        (9) Setting the data file size
        (10) Page alloc/read/write
        (11) Page uncaching
        (12) Index and data file consistency
-       (13) Miscellaneous cookie operations
-       (14) Cookie unregistration
-       (15) Index invalidation
-       (16) Data file invalidation
-       (17) FS-Cache specific page flags.
+       (13) Cookie enablement
+       (14) Miscellaneous cookie operations
+       (15) Cookie unregistration
+       (16) Index invalidation
+       (17) Data file invalidation
+       (18) FS-Cache specific page flags.
 
 
 =============================
@@ -334,7 +335,8 @@ the path to the file:
        struct fscache_cookie *
        fscache_acquire_cookie(struct fscache_cookie *parent,
                               const struct fscache_object_def *def,
-                              void *netfs_data);
+                              void *netfs_data,
+                              bool enable);
 
 This function creates an index entry in the index represented by parent,
 filling in the index entry by calling the operations pointed to by def.
@@ -350,6 +352,10 @@ object needs to be created somewhere down the hierarchy.  Furthermore, an index
 may be created in several different caches independently at different times.
 This is all handled transparently, and the netfs doesn't see any of it.
 
+A cookie will be created in the disabled state if enabled is false.  A cookie
+must be enabled to do anything with it.  A disabled cookie can be enabled by
+calling fscache_enable_cookie() (see below).
+
 For example, with AFS, a cell would be added to the primary index.  This index
 entry would have a dependent inode containing a volume location index for the
 volume mappings within this cell:
@@ -357,7 +363,7 @@ volume mappings within this cell:
        cell->cache =
                fscache_acquire_cookie(afs_cache_netfs.primary_index,
                                       &afs_cell_cache_index_def,
-                                      cell);
+                                      cell, true);
 
 Then when a volume location was accessed, it would be entered into the cell's
 index and an inode would be allocated that acts as a volume type and hash chain
@@ -366,7 +372,7 @@ combination:
        vlocation->cache =
                fscache_acquire_cookie(cell->cache,
                                       &afs_vlocation_cache_index_def,
-                                      vlocation);
+                                      vlocation, true);
 
 And then a particular flavour of volume (R/O for example) could be added to
 that index, creating another index for vnodes (AFS inode equivalents):
@@ -374,7 +380,7 @@ that index, creating another index for vnodes (AFS inode equivalents):
        volume->cache =
                fscache_acquire_cookie(vlocation->cache,
                                       &afs_volume_cache_index_def,
-                                      volume);
+                                      volume, true);
 
 
 ======================
@@ -388,7 +394,7 @@ the object definition should be something other than index type.
        vnode->cache =
                fscache_acquire_cookie(volume->cache,
                                       &afs_vnode_cache_object_def,
-                                      vnode);
+                                      vnode, true);
 
 
 =================================
@@ -404,7 +410,7 @@ it would be some other type of object such as a data file.
        xattr->cache =
                fscache_acquire_cookie(vnode->cache,
                                       &afs_xattr_cache_object_def,
-                                      xattr);
+                                      xattr, true);
 
 Miscellaneous objects might be used to store extended attributes or directory
 entries for example.
@@ -733,6 +739,47 @@ Note that partial updates may happen automatically at other times, such as when
 data blocks are added to a data file object.
 
 
+=================
+COOKIE ENABLEMENT
+=================
+
+Cookies exist in one of two states: enabled and disabled.  If a cookie is
+disabled, it ignores all attempts to acquire child cookies; check, update or
+invalidate its state; allocate, read or write backing pages - though it is
+still possible to uncache pages and relinquish the cookie.
+
+The initial enablement state is set by fscache_acquire_cookie(), but the cookie
+can be enabled or disabled later.  To disable a cookie, call:
+    
+       void fscache_disable_cookie(struct fscache_cookie *cookie,
+                                   bool invalidate);
+    
+If the cookie is not already disabled, this locks the cookie against other
+enable and disable ops, marks the cookie as being disabled, discards or
+invalidates any backing objects and waits for cessation of activity on any
+associated object before unlocking the cookie.
+
+All possible failures are handled internally.  The caller should consider
+calling fscache_uncache_all_inode_pages() afterwards to make sure all page
+markings are cleared up.
+    
+Cookies can be enabled or reenabled with:
+    
+       void fscache_enable_cookie(struct fscache_cookie *cookie,
+                                  bool (*can_enable)(void *data),
+                                  void *data)
+    
+If the cookie is not already enabled, this locks the cookie against other
+enable and disable ops, invokes can_enable() and, if the cookie is not an index
+cookie, will begin the procedure of acquiring backing objects.
+
+The optional can_enable() function is passed the data argument and returns a
+ruling as to whether or not enablement should actually be permitted to begin.
+
+All possible failures are handled internally.  The cookie will only be marked
+as enabled if provisional backing objects are allocated.
+
+
 ===============================
 MISCELLANEOUS COOKIE OPERATIONS
 ===============================
@@ -778,7 +825,7 @@ COOKIE UNREGISTRATION
 To get rid of a cookie, this function should be called.
 
        void fscache_relinquish_cookie(struct fscache_cookie *cookie,
-                                      int retire);
+                                      bool retire);
 
 If retire is non-zero, then the object will be marked for recycling, and all
 copies of it will be removed from all active caches in which it is present.
index deb48b5fd88327d8249b19aa8de16aab6855c191..47fa5a3e91858fedf5d6a0009e35f0e6edf920e8 100644 (file)
@@ -573,8 +573,8 @@ struct address_space_operations {
        void (*invalidatepage) (struct page *, unsigned int, unsigned int);
        int (*releasepage) (struct page *, int);
        void (*freepage)(struct page *);
-       ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
-                       loff_t offset, unsigned long nr_segs);
+       ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter,
+                       loff_t offset);
        struct page* (*get_xip_page)(struct address_space *, sector_t,
                        int);
        /* migrate the contents of a page to the specified target */
@@ -790,7 +790,9 @@ struct file_operations {
        ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
        ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+       ssize_t (*read_iter) (struct kiocb *, struct iov_iter *, loff_t);
        ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+       ssize_t (*write_iter) (struct kiocb *, struct iov_iter *, loff_t);
        int (*iterate) (struct file *, struct dir_context *);
        unsigned int (*poll) (struct file *, struct poll_table_struct *);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
@@ -825,10 +827,16 @@ otherwise noted.
 
   aio_read: called by io_submit(2) and other asynchronous I/O operations
 
+  read_iter: aio_read replacement, called by io_submit(2) and other
+       asynchronous I/O operations
+
   write: called by write(2) and related system calls
 
   aio_write: called by io_submit(2) and other asynchronous I/O operations
 
+  write_iter: aio_write replacement, called by io_submit(2) and other
+       asynchronous I/O operations
+
   iterate: called when the VFS needs to read the directory contents
 
   poll: called by the VFS when a process wants to check if there is
index c1b57d72efc33c69c05b1856c7631930e5ac28bd..b34c3de5c1bcfccf76f64b3442cf3433bc6ff472 100644 (file)
@@ -8,6 +8,11 @@ Supported chips:
     Datasheets:
        http://www.ti.com/lit/gpn/lm25056
        http://www.ti.com/lit/gpn/lm25056a
+  * TI LM25063
+    Prefix: 'lm25063'
+    Addresses scanned: -
+    Datasheet:
+       To be announced
   * National Semiconductor LM25066
     Prefix: 'lm25066'
     Addresses scanned: -
@@ -32,7 +37,7 @@ Description
 -----------
 
 This driver supports hardware montoring for National Semiconductor / TI LM25056,
-LM25066, LM5064, and LM5064 Power Management, Monitoring, Control, and
+LM25063, LM25066, LM5064, and LM5066 Power Management, Monitoring, Control, and
 Protection ICs.
 
 The driver is a client driver to the core PMBus driver. Please see
@@ -64,8 +69,12 @@ in1_input            Measured input voltage.
 in1_average            Average measured input voltage.
 in1_min                        Minimum input voltage.
 in1_max                        Maximum input voltage.
+in1_crit               Critical high input voltage (LM25063 only).
+in1_lcrit              Critical low input voltage (LM25063 only).
 in1_min_alarm          Input voltage low alarm.
 in1_max_alarm          Input voltage high alarm.
+in1_lcrit_alarm                Input voltage critical low alarm (LM25063 only).
+in1_crit_alarm         Input voltage critical high alarm. (LM25063 only).
 
 in2_label              "vmon"
 in2_input              Measured voltage on VAUX pin
@@ -80,12 +89,16 @@ in3_input           Measured output voltage.
 in3_average            Average measured output voltage.
 in3_min                        Minimum output voltage.
 in3_min_alarm          Output voltage low alarm.
+in3_highest            Historical minimum output voltage (LM25063 only).
+in3_lowest             Historical maximum output voltage (LM25063 only).
 
 curr1_label            "iin"
 curr1_input            Measured input current.
 curr1_average          Average measured input current.
 curr1_max              Maximum input current.
+curr1_crit             Critical input current (LM25063 only).
 curr1_max_alarm                Input current high alarm.
+curr1_crit_alarm       Input current critical high alarm (LM25063 only).
 
 power1_label           "pin"
 power1_input           Measured input power.
@@ -95,6 +108,11 @@ power1_alarm                Input power alarm
 power1_input_highest   Historical maximum power.
 power1_reset_history   Write any value to reset maximum power history.
 
+power2_label           "pout". LM25063 only.
+power2_input           Measured output power.
+power2_max             Maximum output power limit.
+power2_crit            Critical output power limit.
+
 temp1_input            Measured temperature.
 temp1_max              Maximum temperature.
 temp1_crit             Critical high temperature.
index dc0d08c61305469305d86f2f3cd04a32fa2e56c6..a0546fc42273bb7fcdad6389c7afcdded4171ec2 100644 (file)
@@ -6,10 +6,15 @@ Supported chips:
     Prefix: 'ltc2974'
     Addresses scanned: -
     Datasheet: http://www.linear.com/product/ltc2974
-  * Linear Technology LTC2978
+  * Linear Technology LTC2977
+    Prefix: 'ltc2977'
+    Addresses scanned: -
+    Datasheet: http://www.linear.com/product/ltc2977
+  * Linear Technology LTC2978, LTC2978A
     Prefix: 'ltc2978'
     Addresses scanned: -
     Datasheet: http://www.linear.com/product/ltc2978
+              http://www.linear.com/product/ltc2978a
   * Linear Technology LTC3880
     Prefix: 'ltc3880'
     Addresses scanned: -
@@ -26,8 +31,9 @@ Description
 -----------
 
 LTC2974 is a quad digital power supply manager. LTC2978 is an octal power supply
-monitor. LTC3880 is a dual output poly-phase step-down DC/DC controller. LTC3883
-is a single phase step-down DC/DC controller.
+monitor. LTC2977 is a pin compatible replacement for LTC2978. LTC3880 is a dual
+output poly-phase step-down DC/DC controller. LTC3883 is a single phase
+step-down DC/DC controller.
 
 
 Usage Notes
@@ -49,21 +55,25 @@ Sysfs attributes
 in1_label              "vin"
 in1_input              Measured input voltage.
 in1_min                        Minimum input voltage.
-in1_max                        Maximum input voltage. LTC2974 and LTC2978 only.
-in1_lcrit              Critical minimum input voltage. LTC2974 and LTC2978
-                       only.
+in1_max                        Maximum input voltage.
+                       LTC2974, LTC2977, and LTC2978 only.
+in1_lcrit              Critical minimum input voltage.
+                       LTC2974, LTC2977, and LTC2978 only.
 in1_crit               Critical maximum input voltage.
 in1_min_alarm          Input voltage low alarm.
-in1_max_alarm          Input voltage high alarm. LTC2974 and LTC2978 only.
-in1_lcrit_alarm                Input voltage critical low alarm. LTC2974 and LTC2978
-                       only.
+in1_max_alarm          Input voltage high alarm.
+                       LTC2974, LTC2977, and LTC2978 only.
+in1_lcrit_alarm                Input voltage critical low alarm.
+                       LTC2974, LTC2977, and LTC2978 only.
 in1_crit_alarm         Input voltage critical high alarm.
-in1_lowest             Lowest input voltage. LTC2974 and LTC2978 only.
+in1_lowest             Lowest input voltage.
+                       LTC2974, LTC2977, and LTC2978 only.
 in1_highest            Highest input voltage.
 in1_reset_history      Reset input voltage history.
 
 in[N]_label            "vout[1-8]".
                        LTC2974: N=2-5
+                       LTC2977: N=2-9
                        LTC2978: N=2-9
                        LTC3880: N=2-3
                        LTC3883: N=2
@@ -83,21 +93,23 @@ in[N]_reset_history Reset output voltage history.
 temp[N]_input          Measured temperature.
                        On LTC2974, temp[1-4] report external temperatures,
                        and temp5 reports the chip temperature.
-                       On LTC2978, only one temperature measurement is
-                       supported and reports the chip temperature.
+                       On LTC2977 and LTC2978, only one temperature measurement
+                       is supported and reports the chip temperature.
                        On LTC3880, temp1 and temp2 report external
                        temperatures, and temp3 reports the chip temperature.
                        On LTC3883, temp1 reports an external temperature,
                        and temp2 reports the chip temperature.
-temp[N]_min            Mimimum temperature. LTC2974 and LTC2978 only.
+temp[N]_min            Mimimum temperature. LTC2974, LCT2977, and LTC2978 only.
 temp[N]_max            Maximum temperature.
 temp[N]_lcrit          Critical low temperature.
 temp[N]_crit           Critical high temperature.
-temp[N]_min_alarm      Temperature low alarm. LTC2974 and LTC2978 only.
+temp[N]_min_alarm      Temperature low alarm.
+                       LTC2974, LTC2977, and LTC2978 only.
 temp[N]_max_alarm      Temperature high alarm.
 temp[N]_lcrit_alarm    Temperature critical low alarm.
 temp[N]_crit_alarm     Temperature critical high alarm.
-temp[N]_lowest         Lowest measured temperature. LTC2974 and LTC2978 only.
+temp[N]_lowest         Lowest measured temperature.
+                       LTC2974, LTC2977, and LTC2978 only.
                        Not supported for chip temperature sensor on LTC2974.
 temp[N]_highest                Highest measured temperature. Not supported for chip
                        temperature sensor on LTC2974.
@@ -109,6 +121,7 @@ power1_input                Measured input power.
 
 power[N]_label         "pout[1-4]".
                        LTC2974: N=1-4
+                       LTC2977: Not supported
                        LTC2978: Not supported
                        LTC3880: N=1-2
                        LTC3883: N=2
@@ -123,6 +136,7 @@ curr1_reset_history Reset input current history. LTC3883 only.
 
 curr[N]_label          "iout[1-4]".
                        LTC2974: N=1-4
+                       LTC2977: not supported
                        LTC2978: not supported
                        LTC3880: N=2-3
                        LTC3883: N=2
index 2a5f0e14efa351a73ef1eb53c392ecef7c9b4f6b..7cbfa3c4fc3d327c8b1d9c3ef8a4c1228998a69b 100644 (file)
@@ -138,6 +138,7 @@ Code  Seq#(hex)     Include File            Comments
 'H'    C0-DF   net/bluetooth/cmtp/cmtp.h       conflict!
 'H'    C0-DF   net/bluetooth/bnep/bnep.h       conflict!
 'H'    F1      linux/hid-roccat.h      <mailto:erazor_de@users.sourceforge.net>
+'H'    F8-FA   sound/firewire.h
 'I'    all     linux/isdn.h            conflict!
 'I'    00-0F   drivers/isdn/divert/isdn_divert.h       conflict!
 'I'    40-4F   linux/mISDNif.h         conflict!
index 86c52360ffe7326cce53541897d4aa98b86f0e8a..fc04c14de4bbc3705e14eddb97a214ca8a7f6331 100644 (file)
@@ -1,7 +1,7 @@
                     ThinkPad ACPI Extras Driver
 
-                            Version 0.24
-                        December 11th,  2009
+                            Version 0.25
+                        October 16th,  2013
 
                Borislav Deianov <borislav@users.sf.net>
              Henrique de Moraes Holschuh <hmh@hmh.eng.br>
@@ -741,6 +741,9 @@ compiled with the CONFIG_THINKPAD_ACPI_UNSAFE_LEDS option enabled.
 Distributions must never enable this option.  Individual users that
 are aware of the consequences are welcome to enabling it.
 
+Audio mute and microphone mute LEDs are supported, but currently not
+visible to userspace. They are used by the snd-hda-intel audio driver.
+
 procfs notes:
 
 The available commands are:
index c1d82047a4b151c35983656e5841e261ff22fd7f..89490beb3c0bf6bd0160c891b51e3cf07497f1ca 100644 (file)
@@ -69,8 +69,7 @@ folder:
 # aggregated_ogms        gw_bandwidth           log_level
 # ap_isolation           gw_mode                orig_interval
 # bonding                gw_sel_class           routing_algo
-# bridge_loop_avoidance  hop_penalty            vis_mode
-# fragmentation
+# bridge_loop_avoidance  hop_penalty            fragmentation
 
 
 There is a special folder for debugging information:
@@ -78,7 +77,7 @@ There is a special folder for debugging information:
 # ls /sys/kernel/debug/batman_adv/bat0/
 # bla_backbone_table  log                 transtable_global
 # bla_claim_table     originators         transtable_local
-# gateways            socket              vis_data
+# gateways            socket
 
 Some of the files contain all sort of status information  regard-
 ing  the  mesh  network.  For  example, you can view the table of
@@ -127,51 +126,6 @@ ously assigned to interfaces now used by batman advanced, e.g.
 # ifconfig eth0 0.0.0.0
 
 
-VISUALIZATION
--------------
-
-If you want topology visualization, at least one mesh  node  must
-be configured as VIS-server:
-
-# echo "server" > /sys/class/net/bat0/mesh/vis_mode
-
-Each  node  is  either configured as "server" or as "client" (de-
-fault: "client").  Clients send their topology data to the server
-next to them, and server synchronize with other servers. If there
-is no server configured (default) within the  mesh,  no  topology
-information   will  be  transmitted.  With  these  "synchronizing
-servers", there can be 1 or more vis servers sharing the same (or
-at least very similar) data.
-
-When  configured  as  server,  you can get a topology snapshot of
-your mesh:
-
-# cat /sys/kernel/debug/batman_adv/bat0/vis_data
-
-This raw output is intended to be easily parsable and convertable
-with  other tools. Have a look at the batctl README if you want a
-vis output in dot or json format for instance and how those  out-
-puts could then be visualised in an image.
-
-The raw format consists of comma separated values per entry where
-each entry is giving information about a  certain  source  inter-
-face.  Each  entry can/has to have the following values:
--> "mac" - mac address of an originator's source interface
-           (each line begins with it)
--> "TQ mac  value"  -  src mac's link quality towards mac address
-                       of a neighbor originator's interface which
-                       is being used for routing
--> "TT mac" - TT announced by source mac
--> "PRIMARY" - this  is a primary interface
--> "SEC mac" - secondary mac address of source
-               (requires preceding PRIMARY)
-
-The TQ value has a range from 4 to 255 with 255 being  the  best.
-The TT entries are showing which hosts are connected to the mesh
-via bat0 or being bridged into the mesh network.  The PRIMARY/SEC
-values are only applied on primary interfaces
-
-
 LOGGING/DEBUGGING
 -----------------
 
@@ -245,5 +199,5 @@ Mailing-list:   b.a.t.m.a.n@open-mesh.org (optional  subscription
 
 You can also contact the Authors:
 
-Marek  Lindner  <lindner_marek@yahoo.de>
-Simon  Wunderlich  <siwu@hrz.tu-chemnitz.de>
+Marek  Lindner  <mareklindner@neomailbox.ch>
+Simon  Wunderlich  <sw@simonwunderlich.de>
index 9b28e714831ae35fd0664debe4a1091825701024..3856ed2c45a9ffda4918ab3615af8dfc1862890f 100644 (file)
@@ -743,21 +743,16 @@ xmit_hash_policy
                protocol information to generate the hash.
 
                Uses XOR of hardware MAC addresses and IP addresses to
-               generate the hash.  The IPv4 formula is
+               generate the hash.  The formula is
 
-               (((source IP XOR dest IP) AND 0xffff) XOR
-                       ( source MAC XOR destination MAC ))
-                               modulo slave count
+               hash = source MAC XOR destination MAC
+               hash = hash XOR source IP XOR destination IP
+               hash = hash XOR (hash RSHIFT 16)
+               hash = hash XOR (hash RSHIFT 8)
+               And then hash is reduced modulo slave count.
 
-               The IPv6 formula is
-
-               hash = (source ip quad 2 XOR dest IP quad 2) XOR
-                      (source ip quad 3 XOR dest IP quad 3) XOR
-                      (source ip quad 4 XOR dest IP quad 4)
-
-               (((hash >> 24) XOR (hash >> 16) XOR (hash >> 8) XOR hash)
-                       XOR (source MAC XOR destination MAC))
-                               modulo slave count
+               If the protocol is IPv6 then the source and destination
+               addresses are first hashed using ipv6_addr_hash.
 
                This algorithm will place all traffic to a particular
                network peer on the same slave.  For non-IP traffic,
@@ -779,21 +774,16 @@ xmit_hash_policy
                slaves, although a single connection will not span
                multiple slaves.
 
-               The formula for unfragmented IPv4 TCP and UDP packets is
-
-               ((source port XOR dest port) XOR
-                        ((source IP XOR dest IP) AND 0xffff)
-                               modulo slave count
+               The formula for unfragmented TCP and UDP packets is
 
-               The formula for unfragmented IPv6 TCP and UDP packets is
+               hash = source port, destination port (as in the header)
+               hash = hash XOR source IP XOR destination IP
+               hash = hash XOR (hash RSHIFT 16)
+               hash = hash XOR (hash RSHIFT 8)
+               And then hash is reduced modulo slave count.
 
-               hash = (source port XOR dest port) XOR
-                      ((source ip quad 2 XOR dest IP quad 2) XOR
-                       (source ip quad 3 XOR dest IP quad 3) XOR
-                       (source ip quad 4 XOR dest IP quad 4))
-
-               ((hash >> 24) XOR (hash >> 16) XOR (hash >> 8) XOR hash)
-                       modulo slave count
+               If the protocol is IPv6 then the source and destination
+               addresses are first hashed using ipv6_addr_hash.
 
                For fragmented TCP or UDP packets and all other IPv4 and
                IPv6 protocol traffic, the source and destination port
@@ -801,10 +791,6 @@ xmit_hash_policy
                formula is the same as for the layer2 transmit hash
                policy.
 
-               The IPv4 policy is intended to mimic the behavior of
-               certain switches, notably Cisco switches with PFC2 as
-               well as some Foundry and IBM products.
-
                This algorithm is not fully 802.3ad compliant.  A
                single TCP or UDP conversation containing both
                fragmented and unfragmented packets will see packets
@@ -815,6 +801,26 @@ xmit_hash_policy
                conversations.  Other implementations of 802.3ad may
                or may not tolerate this noncompliance.
 
+       encap2+3
+
+               This policy uses the same formula as layer2+3 but it
+               relies on skb_flow_dissect to obtain the header fields
+               which might result in the use of inner headers if an
+               encapsulation protocol is used. For example this will
+               improve the performance for tunnel users because the
+               packets will be distributed according to the encapsulated
+               flows.
+
+       encap3+4
+
+               This policy uses the same formula as layer3+4 but it
+               relies on skb_flow_dissect to obtain the header fields
+               which might result in the use of inner headers if an
+               encapsulation protocol is used. For example this will
+               improve the performance for tunnel users because the
+               packets will be distributed according to the encapsulated
+               flows.
+
        The default value is layer2.  This option was added in bonding
        version 2.6.3.  In earlier versions of bonding, this parameter
        does not exist, and the layer2 policy is the only policy.  The
index 425c51d56aefb8b991b3d97b5d3f01564f9baaf6..b8a907dc01697890747ead021b836b4c60c24ae0 100644 (file)
@@ -42,7 +42,7 @@ We can represent these as three OPPs as the following {Hz, uV} tuples:
 
 OPP library provides a set of helper functions to organize and query the OPP
 information. The library is located in drivers/base/power/opp.c and the header
-is located in include/linux/opp.h. OPP library can be enabled by enabling
+is located in include/linux/pm_opp.h. OPP library can be enabled by enabling
 CONFIG_PM_OPP from power management menuconfig menu. OPP library depends on
 CONFIG_PM as certain SoCs such as Texas Instrument's OMAP framework allows to
 optionally boot at a certain OPP without needing cpufreq.
@@ -71,14 +71,14 @@ operations until that OPP could be re-enabled if possible.
 
 OPP library facilitates this concept in it's implementation. The following
 operational functions operate only on available opps:
-opp_find_freq_{ceil, floor}, opp_get_voltage, opp_get_freq, opp_get_opp_count
-and opp_init_cpufreq_table
+opp_find_freq_{ceil, floor}, dev_pm_opp_get_voltage, dev_pm_opp_get_freq, dev_pm_opp_get_opp_count
+and dev_pm_opp_init_cpufreq_table
 
-opp_find_freq_exact is meant to be used to find the opp pointer which can then
-be used for opp_enable/disable functions to make an opp available as required.
+dev_pm_opp_find_freq_exact is meant to be used to find the opp pointer which can then
+be used for dev_pm_opp_enable/disable functions to make an opp available as required.
 
 WARNING: Users of OPP library should refresh their availability count using
-get_opp_count if opp_enable/disable functions are invoked for a device, the
+get_opp_count if dev_pm_opp_enable/disable functions are invoked for a device, the
 exact mechanism to trigger these or the notification mechanism to other
 dependent subsystems such as cpufreq are left to the discretion of the SoC
 specific framework which uses the OPP library. Similar care needs to be taken
@@ -96,24 +96,24 @@ using RCU read locks. The opp_find_freq_{exact,ceil,floor},
 opp_get_{voltage, freq, opp_count} fall into this category.
 
 opp_{add,enable,disable} are updaters which use mutex and implement it's own
-RCU locking mechanisms. opp_init_cpufreq_table acts as an updater and uses
+RCU locking mechanisms. dev_pm_opp_init_cpufreq_table acts as an updater and uses
 mutex to implment RCU updater strategy. These functions should *NOT* be called
 under RCU locks and other contexts that prevent blocking functions in RCU or
 mutex operations from working.
 
 2. Initial OPP List Registration
 ================================
-The SoC implementation calls opp_add function iteratively to add OPPs per
+The SoC implementation calls dev_pm_opp_add function iteratively to add OPPs per
 device. It is expected that the SoC framework will register the OPP entries
 optimally- typical numbers range to be less than 5. The list generated by
 registering the OPPs is maintained by OPP library throughout the device
 operation. The SoC framework can subsequently control the availability of the
-OPPs dynamically using the opp_enable / disable functions.
+OPPs dynamically using the dev_pm_opp_enable / disable functions.
 
-opp_add - Add a new OPP for a specific domain represented by the device pointer.
+dev_pm_opp_add - Add a new OPP for a specific domain represented by the device pointer.
        The OPP is defined using the frequency and voltage. Once added, the OPP
        is assumed to be available and control of it's availability can be done
-       with the opp_enable/disable functions. OPP library internally stores
+       with the dev_pm_opp_enable/disable functions. OPP library internally stores
        and manages this information in the opp struct. This function may be
        used by SoC framework to define a optimal list as per the demands of
        SoC usage environment.
@@ -124,7 +124,7 @@ opp_add - Add a new OPP for a specific domain represented by the device pointer.
         soc_pm_init()
         {
                /* Do things */
-               r = opp_add(mpu_dev, 1000000, 900000);
+               r = dev_pm_opp_add(mpu_dev, 1000000, 900000);
                if (!r) {
                        pr_err("%s: unable to register mpu opp(%d)\n", r);
                        goto no_cpufreq;
@@ -143,44 +143,44 @@ functions return the matching pointer representing the opp if a match is
 found, else returns error. These errors are expected to be handled by standard
 error checks such as IS_ERR() and appropriate actions taken by the caller.
 
-opp_find_freq_exact - Search for an OPP based on an *exact* frequency and
+dev_pm_opp_find_freq_exact - Search for an OPP based on an *exact* frequency and
        availability. This function is especially useful to enable an OPP which
        is not available by default.
        Example: In a case when SoC framework detects a situation where a
        higher frequency could be made available, it can use this function to
-       find the OPP prior to call the opp_enable to actually make it available.
+       find the OPP prior to call the dev_pm_opp_enable to actually make it available.
         rcu_read_lock();
-        opp = opp_find_freq_exact(dev, 1000000000, false);
+        opp = dev_pm_opp_find_freq_exact(dev, 1000000000, false);
         rcu_read_unlock();
         /* dont operate on the pointer.. just do a sanity check.. */
         if (IS_ERR(opp)) {
                pr_err("frequency not disabled!\n");
                /* trigger appropriate actions.. */
         } else {
-               opp_enable(dev,1000000000);
+               dev_pm_opp_enable(dev,1000000000);
         }
 
        NOTE: This is the only search function that operates on OPPs which are
        not available.
 
-opp_find_freq_floor - Search for an available OPP which is *at most* the
+dev_pm_opp_find_freq_floor - Search for an available OPP which is *at most* the
        provided frequency. This function is useful while searching for a lesser
        match OR operating on OPP information in the order of decreasing
        frequency.
        Example: To find the highest opp for a device:
         freq = ULONG_MAX;
         rcu_read_lock();
-        opp_find_freq_floor(dev, &freq);
+        dev_pm_opp_find_freq_floor(dev, &freq);
         rcu_read_unlock();
 
-opp_find_freq_ceil - Search for an available OPP which is *at least* the
+dev_pm_opp_find_freq_ceil - Search for an available OPP which is *at least* the
        provided frequency. This function is useful while searching for a
        higher match OR operating on OPP information in the order of increasing
        frequency.
        Example 1: To find the lowest opp for a device:
         freq = 0;
         rcu_read_lock();
-        opp_find_freq_ceil(dev, &freq);
+        dev_pm_opp_find_freq_ceil(dev, &freq);
         rcu_read_unlock();
        Example 2: A simplified implementation of a SoC cpufreq_driver->target:
         soc_cpufreq_target(..)
@@ -188,7 +188,7 @@ opp_find_freq_ceil - Search for an available OPP which is *at least* the
                /* Do stuff like policy checks etc. */
                /* Find the best frequency match for the req */
                rcu_read_lock();
-               opp = opp_find_freq_ceil(dev, &freq);
+               opp = dev_pm_opp_find_freq_ceil(dev, &freq);
                rcu_read_unlock();
                if (!IS_ERR(opp))
                        soc_switch_to_freq_voltage(freq);
@@ -208,34 +208,34 @@ as thermal considerations (e.g. don't use OPPx until the temperature drops).
 
 WARNING: Do not use these functions in interrupt context.
 
-opp_enable - Make a OPP available for operation.
+dev_pm_opp_enable - Make a OPP available for operation.
        Example: Lets say that 1GHz OPP is to be made available only if the
        SoC temperature is lower than a certain threshold. The SoC framework
        implementation might choose to do something as follows:
         if (cur_temp < temp_low_thresh) {
                /* Enable 1GHz if it was disabled */
                rcu_read_lock();
-               opp = opp_find_freq_exact(dev, 1000000000, false);
+               opp = dev_pm_opp_find_freq_exact(dev, 1000000000, false);
                rcu_read_unlock();
                /* just error check */
                if (!IS_ERR(opp))
-                       ret = opp_enable(dev, 1000000000);
+                       ret = dev_pm_opp_enable(dev, 1000000000);
                else
                        goto try_something_else;
         }
 
-opp_disable - Make an OPP to be not available for operation
+dev_pm_opp_disable - Make an OPP to be not available for operation
        Example: Lets say that 1GHz OPP is to be disabled if the temperature
        exceeds a threshold value. The SoC framework implementation might
        choose to do something as follows:
         if (cur_temp > temp_high_thresh) {
                /* Disable 1GHz if it was enabled */
                rcu_read_lock();
-               opp = opp_find_freq_exact(dev, 1000000000, true);
+               opp = dev_pm_opp_find_freq_exact(dev, 1000000000, true);
                rcu_read_unlock();
                /* just error check */
                if (!IS_ERR(opp))
-                       ret = opp_disable(dev, 1000000000);
+                       ret = dev_pm_opp_disable(dev, 1000000000);
                else
                        goto try_something_else;
         }
@@ -247,7 +247,7 @@ information from the OPP structure is necessary. Once an OPP pointer is
 retrieved using the search functions, the following functions can be used by SoC
 framework to retrieve the information represented inside the OPP layer.
 
-opp_get_voltage - Retrieve the voltage represented by the opp pointer.
+dev_pm_opp_get_voltage - Retrieve the voltage represented by the opp pointer.
        Example: At a cpufreq transition to a different frequency, SoC
        framework requires to set the voltage represented by the OPP using
        the regulator framework to the Power Management chip providing the
@@ -256,15 +256,15 @@ opp_get_voltage - Retrieve the voltage represented by the opp pointer.
         {
                /* do things */
                rcu_read_lock();
-               opp = opp_find_freq_ceil(dev, &freq);
-               v = opp_get_voltage(opp);
+               opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+               v = dev_pm_opp_get_voltage(opp);
                rcu_read_unlock();
                if (v)
                        regulator_set_voltage(.., v);
                /* do other things */
         }
 
-opp_get_freq - Retrieve the freq represented by the opp pointer.
+dev_pm_opp_get_freq - Retrieve the freq represented by the opp pointer.
        Example: Lets say the SoC framework uses a couple of helper functions
        we could pass opp pointers instead of doing additional parameters to
        handle quiet a bit of data parameters.
@@ -273,8 +273,8 @@ opp_get_freq - Retrieve the freq represented by the opp pointer.
                /* do things.. */
                 max_freq = ULONG_MAX;
                 rcu_read_lock();
-                max_opp = opp_find_freq_floor(dev,&max_freq);
-                requested_opp = opp_find_freq_ceil(dev,&freq);
+                max_opp = dev_pm_opp_find_freq_floor(dev,&max_freq);
+                requested_opp = dev_pm_opp_find_freq_ceil(dev,&freq);
                 if (!IS_ERR(max_opp) && !IS_ERR(requested_opp))
                        r = soc_test_validity(max_opp, requested_opp);
                 rcu_read_unlock();
@@ -282,25 +282,25 @@ opp_get_freq - Retrieve the freq represented by the opp pointer.
         }
         soc_test_validity(..)
         {
-                if(opp_get_voltage(max_opp) < opp_get_voltage(requested_opp))
+                if(dev_pm_opp_get_voltage(max_opp) < dev_pm_opp_get_voltage(requested_opp))
                         return -EINVAL;
-                if(opp_get_freq(max_opp) < opp_get_freq(requested_opp))
+                if(dev_pm_opp_get_freq(max_opp) < dev_pm_opp_get_freq(requested_opp))
                         return -EINVAL;
                /* do things.. */
         }
 
-opp_get_opp_count - Retrieve the number of available opps for a device
+dev_pm_opp_get_opp_count - Retrieve the number of available opps for a device
        Example: Lets say a co-processor in the SoC needs to know the available
        frequencies in a table, the main processor can notify as following:
         soc_notify_coproc_available_frequencies()
         {
                /* Do things */
                rcu_read_lock();
-               num_available = opp_get_opp_count(dev);
+               num_available = dev_pm_opp_get_opp_count(dev);
                speeds = kzalloc(sizeof(u32) * num_available, GFP_KERNEL);
                /* populate the table in increasing order */
                freq = 0;
-               while (!IS_ERR(opp = opp_find_freq_ceil(dev, &freq))) {
+               while (!IS_ERR(opp = dev_pm_opp_find_freq_ceil(dev, &freq))) {
                        speeds[i] = freq;
                        freq++;
                        i++;
@@ -313,7 +313,7 @@ opp_get_opp_count - Retrieve the number of available opps for a device
 
 6. Cpufreq Table Generation
 ===========================
-opp_init_cpufreq_table - cpufreq framework typically is initialized with
+dev_pm_opp_init_cpufreq_table - cpufreq framework typically is initialized with
        cpufreq_frequency_table_cpuinfo which is provided with the list of
        frequencies that are available for operation. This function provides
        a ready to use conversion routine to translate the OPP layer's internal
@@ -326,7 +326,7 @@ opp_init_cpufreq_table - cpufreq framework typically is initialized with
         soc_pm_init()
         {
                /* Do things */
-               r = opp_init_cpufreq_table(dev, &freq_table);
+               r = dev_pm_opp_init_cpufreq_table(dev, &freq_table);
                if (!r)
                        cpufreq_frequency_table_cpuinfo(policy, freq_table);
                /* Do other things */
@@ -336,7 +336,7 @@ opp_init_cpufreq_table - cpufreq framework typically is initialized with
        addition to CONFIG_PM as power management feature is required to
        dynamically scale voltage and frequency in a system.
 
-opp_free_cpufreq_table - Free up the table allocated by opp_init_cpufreq_table
+dev_pm_opp_free_cpufreq_table - Free up the table allocated by dev_pm_opp_init_cpufreq_table
 
 7. Data Structures
 ==================
@@ -358,16 +358,16 @@ accessed by various functions as described above. However, the structures
 representing the actual OPPs and domains are internal to the OPP library itself
 to allow for suitable abstraction reusable across systems.
 
-struct opp - The internal data structure of OPP library which is used to
+struct dev_pm_opp - The internal data structure of OPP library which is used to
        represent an OPP. In addition to the freq, voltage, availability
        information, it also contains internal book keeping information required
        for the OPP library to operate on.  Pointer to this structure is
        provided back to the users such as SoC framework to be used as a
        identifier for OPP in the interactions with OPP layer.
 
-       WARNING: The struct opp pointer should not be parsed or modified by the
-       users. The defaults of for an instance is populated by opp_add, but the
-       availability of the OPP can be modified by opp_enable/disable functions.
+       WARNING: The struct dev_pm_opp pointer should not be parsed or modified by the
+       users. The defaults of for an instance is populated by dev_pm_opp_add, but the
+       availability of the OPP can be modified by dev_pm_opp_enable/disable functions.
 
 struct device - This is used to identify a domain to the OPP layer. The
        nature of the device and it's implementation is left to the user of
@@ -377,19 +377,19 @@ Overall, in a simplistic view, the data structure operations is represented as
 following:
 
 Initialization / modification:
-            +-----+        /- opp_enable
-opp_add --> | opp | <-------
-  |         +-----+        \- opp_disable
+            +-----+        /- dev_pm_opp_enable
+dev_pm_opp_add --> | opp | <-------
+  |         +-----+        \- dev_pm_opp_disable
   \-------> domain_info(device)
 
 Search functions:
-             /-- opp_find_freq_ceil  ---\   +-----+
-domain_info<---- opp_find_freq_exact -----> | opp |
-             \-- opp_find_freq_floor ---/   +-----+
+             /-- dev_pm_opp_find_freq_ceil  ---\   +-----+
+domain_info<---- dev_pm_opp_find_freq_exact -----> | opp |
+             \-- dev_pm_opp_find_freq_floor ---/   +-----+
 
 Retrieval functions:
-+-----+     /- opp_get_voltage
++-----+     /- dev_pm_opp_get_voltage
 | opp | <---
-+-----+     \- opp_get_freq
++-----+     \- dev_pm_opp_get_freq
 
-domain_info <- opp_get_opp_count
+domain_info <- dev_pm_opp_get_opp_count
diff --git a/Documentation/power/powercap/powercap.txt b/Documentation/power/powercap/powercap.txt
new file mode 100644 (file)
index 0000000..1e6ef16
--- /dev/null
@@ -0,0 +1,236 @@
+Power Capping Framework
+==================================
+
+The power capping framework provides a consistent interface between the kernel
+and the user space that allows power capping drivers to expose the settings to
+user space in a uniform way.
+
+Terminology
+=========================
+The framework exposes power capping devices to user space via sysfs in the
+form of a tree of objects. The objects at the root level of the tree represent
+'control types', which correspond to different methods of power capping.  For
+example, the intel-rapl control type represents the Intel "Running Average
+Power Limit" (RAPL) technology, whereas the 'idle-injection' control type
+corresponds to the use of idle injection for controlling power.
+
+Power zones represent different parts of the system, which can be controlled and
+monitored using the power capping method determined by the control type the
+given zone belongs to. They each contain attributes for monitoring power, as
+well as controls represented in the form of power constraints.  If the parts of
+the system represented by different power zones are hierarchical (that is, one
+bigger part consists of multiple smaller parts that each have their own power
+controls), those power zones may also be organized in a hierarchy with one
+parent power zone containing multiple subzones and so on to reflect the power
+control topology of the system.  In that case, it is possible to apply power
+capping to a set of devices together using the parent power zone and if more
+fine grained control is required, it can be applied through the subzones.
+
+
+Example sysfs interface tree:
+
+/sys/devices/virtual/powercap
+??? intel-rapl
+    ??? intel-rapl:0
+    ?   ??? constraint_0_name
+    ?   ??? constraint_0_power_limit_uw
+    ?   ??? constraint_0_time_window_us
+    ?   ??? constraint_1_name
+    ?   ??? constraint_1_power_limit_uw
+    ?   ??? constraint_1_time_window_us
+    ?   ??? device -> ../../intel-rapl
+    ?   ??? energy_uj
+    ?   ??? intel-rapl:0:0
+    ?   ?   ??? constraint_0_name
+    ?   ?   ??? constraint_0_power_limit_uw
+    ?   ?   ??? constraint_0_time_window_us
+    ?   ?   ??? constraint_1_name
+    ?   ?   ??? constraint_1_power_limit_uw
+    ?   ?   ??? constraint_1_time_window_us
+    ?   ?   ??? device -> ../../intel-rapl:0
+    ?   ?   ??? energy_uj
+    ?   ?   ??? max_energy_range_uj
+    ?   ?   ??? name
+    ?   ?   ??? enabled
+    ?   ?   ??? power
+    ?   ?   ?   ??? async
+    ?   ?   ?   []
+    ?   ?   ??? subsystem -> ../../../../../../class/power_cap
+    ?   ?   ??? uevent
+    ?   ??? intel-rapl:0:1
+    ?   ?   ??? constraint_0_name
+    ?   ?   ??? constraint_0_power_limit_uw
+    ?   ?   ??? constraint_0_time_window_us
+    ?   ?   ??? constraint_1_name
+    ?   ?   ??? constraint_1_power_limit_uw
+    ?   ?   ??? constraint_1_time_window_us
+    ?   ?   ??? device -> ../../intel-rapl:0
+    ?   ?   ??? energy_uj
+    ?   ?   ??? max_energy_range_uj
+    ?   ?   ??? name
+    ?   ?   ??? enabled
+    ?   ?   ??? power
+    ?   ?   ?   ??? async
+    ?   ?   ?   []
+    ?   ?   ??? subsystem -> ../../../../../../class/power_cap
+    ?   ?   ??? uevent
+    ?   ??? max_energy_range_uj
+    ?   ??? max_power_range_uw
+    ?   ??? name
+    ?   ??? enabled
+    ?   ??? power
+    ?   ?   ??? async
+    ?   ?   []
+    ?   ??? subsystem -> ../../../../../class/power_cap
+    ?   ??? enabled
+    ?   ??? uevent
+    ??? intel-rapl:1
+    ?   ??? constraint_0_name
+    ?   ??? constraint_0_power_limit_uw
+    ?   ??? constraint_0_time_window_us
+    ?   ??? constraint_1_name
+    ?   ??? constraint_1_power_limit_uw
+    ?   ??? constraint_1_time_window_us
+    ?   ??? device -> ../../intel-rapl
+    ?   ??? energy_uj
+    ?   ??? intel-rapl:1:0
+    ?   ?   ??? constraint_0_name
+    ?   ?   ??? constraint_0_power_limit_uw
+    ?   ?   ??? constraint_0_time_window_us
+    ?   ?   ??? constraint_1_name
+    ?   ?   ??? constraint_1_power_limit_uw
+    ?   ?   ??? constraint_1_time_window_us
+    ?   ?   ??? device -> ../../intel-rapl:1
+    ?   ?   ??? energy_uj
+    ?   ?   ??? max_energy_range_uj
+    ?   ?   ??? name
+    ?   ?   ??? enabled
+    ?   ?   ??? power
+    ?   ?   ?   ??? async
+    ?   ?   ?   []
+    ?   ?   ??? subsystem -> ../../../../../../class/power_cap
+    ?   ?   ??? uevent
+    ?   ??? intel-rapl:1:1
+    ?   ?   ??? constraint_0_name
+    ?   ?   ??? constraint_0_power_limit_uw
+    ?   ?   ??? constraint_0_time_window_us
+    ?   ?   ??? constraint_1_name
+    ?   ?   ??? constraint_1_power_limit_uw
+    ?   ?   ??? constraint_1_time_window_us
+    ?   ?   ??? device -> ../../intel-rapl:1
+    ?   ?   ??? energy_uj
+    ?   ?   ??? max_energy_range_uj
+    ?   ?   ??? name
+    ?   ?   ??? enabled
+    ?   ?   ??? power
+    ?   ?   ?   ??? async
+    ?   ?   ?   []
+    ?   ?   ??? subsystem -> ../../../../../../class/power_cap
+    ?   ?   ??? uevent
+    ?   ??? max_energy_range_uj
+    ?   ??? max_power_range_uw
+    ?   ??? name
+    ?   ??? enabled
+    ?   ??? power
+    ?   ?   ??? async
+    ?   ?   []
+    ?   ??? subsystem -> ../../../../../class/power_cap
+    ?   ??? uevent
+    ??? power
+    ?   ??? async
+    ?   []
+    ??? subsystem -> ../../../../class/power_cap
+    ??? enabled
+    ??? uevent
+
+The above example illustrates a case in which the Intel RAPL technology,
+available in Intel® IA-64 and IA-32 Processor Architectures, is used. There is one
+control type called intel-rapl which contains two power zones, intel-rapl:0 and
+intel-rapl:1, representing CPU packages.  Each of these power zones contains
+two subzones, intel-rapl:j:0 and intel-rapl:j:1 (j = 0, 1), representing the
+"core" and the "uncore" parts of the given CPU package, respectively.  All of
+the zones and subzones contain energy monitoring attributes (energy_uj,
+max_energy_range_uj) and constraint attributes (constraint_*) allowing controls
+to be applied (the constraints in the 'package' power zones apply to the whole
+CPU packages and the subzone constraints only apply to the respective parts of
+the given package individually). Since Intel RAPL doesn't provide instantaneous
+power value, there is no power_uw attribute.
+
+In addition to that, each power zone contains a name attribute, allowing the
+part of the system represented by that zone to be identified.
+For example:
+
+cat /sys/class/power_cap/intel-rapl/intel-rapl:0/name
+package-0
+
+The Intel RAPL technology allows two constraints, short term and long term,
+with two different time windows to be applied to each power zone.  Thus for
+each zone there are 2 attributes representing the constraint names, 2 power
+limits and 2 attributes representing the sizes of the time windows. Such that,
+constraint_j_* attributes correspond to the jth constraint (j = 0,1).
+
+For example:
+       constraint_0_name
+       constraint_0_power_limit_uw
+       constraint_0_time_window_us
+       constraint_1_name
+       constraint_1_power_limit_uw
+       constraint_1_time_window_us
+
+Power Zone Attributes
+=================================
+Monitoring attributes
+----------------------
+
+energy_uj (rw): Current energy counter in micro joules. Write "0" to reset.
+If the counter can not be reset, then this attribute is read only.
+
+max_energy_range_uj (ro): Range of the above energy counter in micro-joules.
+
+power_uw (ro): Current power in micro watts.
+
+max_power_range_uw (ro): Range of the above power value in micro-watts.
+
+name (ro): Name of this power zone.
+
+It is possible that some domains have both power ranges and energy counter ranges;
+however, only one is mandatory.
+
+Constraints
+----------------
+constraint_X_power_limit_uw (rw): Power limit in micro watts, which should be
+applicable for the time window specified by "constraint_X_time_window_us".
+
+constraint_X_time_window_us (rw): Time window in micro seconds.
+
+constraint_X_name (ro): An optional name of the constraint
+
+constraint_X_max_power_uw(ro): Maximum allowed power in micro watts.
+
+constraint_X_min_power_uw(ro): Minimum allowed power in micro watts.
+
+constraint_X_max_time_window_us(ro): Maximum allowed time window in micro seconds.
+
+constraint_X_min_time_window_us(ro): Minimum allowed time window in micro seconds.
+
+Except power_limit_uw and time_window_us other fields are optional.
+
+Common zone and control type attributes
+----------------------------------------
+enabled (rw): Enable/Disable controls at zone level or for all zones using
+a control type.
+
+Power Cap Client Driver Interface
+==================================
+The API summary:
+
+Call powercap_register_control_type() to register control type object.
+Call powercap_register_zone() to register a power zone (under a given
+control type), either as a top-level power zone or as a subzone of another
+power zone registered earlier.
+The number of constraints in a power zone and the corresponding callbacks have
+to be defined prior to calling powercap_register_zone() to register that zone.
+
+To Free a power zone call powercap_unregister_zone().
+To free a control type object call powercap_unregister_control_type().
+Detailed API can be generated using kernel-doc on include/linux/powercap.h.
index 71d8fe4e75d3e5301e25037a28d91600d633dba7..0f54333b0ff2990ce090f88087e17417a894b46c 100644 (file)
@@ -145,11 +145,13 @@ The action performed by the idle callback is totally dependent on the subsystem
 if the device can be suspended (i.e. if all of the conditions necessary for
 suspending the device are satisfied) and to queue up a suspend request for the
 device in that case.  If there is no idle callback, or if the callback returns
-0, then the PM core will attempt to carry out a runtime suspend of the device;
-in essence, it will call pm_runtime_suspend() directly.  To prevent this (for
-example, if the callback routine has started a delayed suspend), the routine
-should return a non-zero value.  Negative error return codes are ignored by the
-PM core.
+0, then the PM core will attempt to carry out a runtime suspend of the device,
+also respecting devices configured for autosuspend.  In essence this means a
+call to pm_runtime_autosuspend() (do note that drivers needs to update the
+device last busy mark, pm_runtime_mark_last_busy(), to control the delay under
+this circumstance).  To prevent this (for example, if the callback routine has
+started a delayed suspend), the routine must return a non-zero value.  Negative
+error return codes are ignored by the PM core.
 
 The helper functions provided by the PM core, described in Section 4, guarantee
 that the following constraints are met with respect to runtime PM callbacks for
@@ -308,7 +310,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
     - execute the subsystem-level idle callback for the device; returns an
       error code on failure, where -EINPROGRESS means that ->runtime_idle() is
       already being executed; if there is no callback or the callback returns 0
-      then run pm_runtime_suspend(dev) and return its result
+      then run pm_runtime_autosuspend(dev) and return its result
 
   int pm_runtime_suspend(struct device *dev);
     - execute the subsystem-level suspend callback for the device; returns 0 on
index f59ded06610813f058d30ae468dbcf3922fc34b7..a74d0a84d329a2909186256d9ad5fdb5f5327a2e 100644 (file)
@@ -100,6 +100,11 @@ static long ppb_to_scaled_ppm(int ppb)
        return (long) (ppb * 65.536);
 }
 
+static int64_t pctns(struct ptp_clock_time *t)
+{
+       return t->sec * 1000000000LL + t->nsec;
+}
+
 static void usage(char *progname)
 {
        fprintf(stderr,
@@ -112,6 +117,8 @@ static void usage(char *progname)
                " -f val     adjust the ptp clock frequency by 'val' ppb\n"
                " -g         get the ptp clock time\n"
                " -h         prints this message\n"
+               " -k val     measure the time offset between system and phc clock\n"
+               "            for 'val' times (Maximum 25)\n"
                " -p val     enable output with a period of 'val' nanoseconds\n"
                " -P val     enable or disable (val=1|0) the system clock PPS\n"
                " -s         set the ptp clock time from the system time\n"
@@ -133,8 +140,12 @@ int main(int argc, char *argv[])
        struct itimerspec timeout;
        struct sigevent sigevent;
 
+       struct ptp_clock_time *pct;
+       struct ptp_sys_offset *sysoff;
+
+
        char *progname;
-       int c, cnt, fd;
+       int i, c, cnt, fd;
 
        char *device = DEVICE;
        clockid_t clkid;
@@ -144,14 +155,19 @@ int main(int argc, char *argv[])
        int extts = 0;
        int gettime = 0;
        int oneshot = 0;
+       int pct_offset = 0;
+       int n_samples = 0;
        int periodic = 0;
        int perout = -1;
        int pps = -1;
        int settime = 0;
 
+       int64_t t1, t2, tp;
+       int64_t interval, offset;
+
        progname = strrchr(argv[0], '/');
        progname = progname ? 1+progname : argv[0];
-       while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghp:P:sSt:v"))) {
+       while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghk:p:P:sSt:v"))) {
                switch (c) {
                case 'a':
                        oneshot = atoi(optarg);
@@ -174,6 +190,10 @@ int main(int argc, char *argv[])
                case 'g':
                        gettime = 1;
                        break;
+               case 'k':
+                       pct_offset = 1;
+                       n_samples = atoi(optarg);
+                       break;
                case 'p':
                        perout = atoi(optarg);
                        break;
@@ -376,6 +396,47 @@ int main(int argc, char *argv[])
                }
        }
 
+       if (pct_offset) {
+               if (n_samples <= 0 || n_samples > 25) {
+                       puts("n_samples should be between 1 and 25");
+                       usage(progname);
+                       return -1;
+               }
+
+               sysoff = calloc(1, sizeof(*sysoff));
+               if (!sysoff) {
+                       perror("calloc");
+                       return -1;
+               }
+               sysoff->n_samples = n_samples;
+
+               if (ioctl(fd, PTP_SYS_OFFSET, sysoff))
+                       perror("PTP_SYS_OFFSET");
+               else
+                       puts("system and phc clock time offset request okay");
+
+               pct = &sysoff->ts[0];
+               for (i = 0; i < sysoff->n_samples; i++) {
+                       t1 = pctns(pct+2*i);
+                       tp = pctns(pct+2*i+1);
+                       t2 = pctns(pct+2*i+2);
+                       interval = t2 - t1;
+                       offset = (t2 + t1) / 2 - tp;
+
+                       printf("system time: %ld.%ld\n",
+                               (pct+2*i)->sec, (pct+2*i)->nsec);
+                       printf("phc    time: %ld.%ld\n",
+                               (pct+2*i+1)->sec, (pct+2*i+1)->nsec);
+                       printf("system time: %ld.%ld\n",
+                               (pct+2*i+2)->sec, (pct+2*i+2)->nsec);
+                       printf("system/phc clock time offset is %ld ns\n"
+                               "system     clock time delay  is %ld ns\n",
+                               offset, interval);
+               }
+
+               free(sysoff);
+       }
+
        close(fd);
        return 0;
 }
index fcaf0b4efba21cfe51ed1aa4a6d9303aa614dea5..3da163383c931258641c69ce01f4f3812c97aafd 100644 (file)
@@ -157,6 +157,16 @@ Return Value:  none
 
 Description:   Sets new actual debug level if new_level is valid. 
 
+---------------------------------------------------------------------------
+bool debug_level_enabled (debug_info_t * id, int level);
+
+Parameter:    id:        handle for debug log
+             level:      debug level
+
+Return Value: True if level is less or equal to the current debug level.
+
+Description:  Returns true if debug events for the specified level would be
+             logged. Otherwise returns false.
 ---------------------------------------------------------------------------
 void debug_stop_all(void);
 
index b1b8587b86f0cc8153c2cedda5a3308f980828aa..9290de7034504186a1fedbd9a60ad3690abbfed6 100644 (file)
@@ -65,11 +65,6 @@ Possible arch/ problems
 
 Possible arch problems I found (and either tried to fix or didn't):
 
-h8300 - Is such sleeping racy vs interrupts? (See #4a).
-        The H8/300 manual I found indicates yes, however disabling IRQs
-        over the sleep mean only NMIs can wake it up, so can't fix easily
-        without doing spin waiting.
-
 ia64 - is safe_halt call racy vs interrupts? (does it sleep?) (See #4a)
 
 sh64 - Is sleeping racy vs interrupts? (See #4a)
index f911e3656209f6c1bdc49a156f322b64dfd3a824..85c362d8ea349350947a8363d36dbe7e622ea536 100644 (file)
@@ -28,6 +28,7 @@ ALC269/270/275/276/28x/29x
   alc269-dmic          Enable ALC269(VA) digital mic workaround
   alc271-dmic          Enable ALC271X digital mic workaround
   inv-dmic             Inverted internal mic workaround
+  headset-mic          Indicates a combined headset (headphone+mic) jack
   lenovo-dock          Enables docking station I/O for some Lenovos
   dell-headset-multi   Headset jack, which can also be used as mic-in
   dell-headset-dock    Headset jack (without mic-in), and also dock I/O
diff --git a/Documentation/sound/alsa/soc/DPCM.txt b/Documentation/sound/alsa/soc/DPCM.txt
new file mode 100644 (file)
index 0000000..aa8546f
--- /dev/null
@@ -0,0 +1,380 @@
+Dynamic PCM
+===========
+
+1. Description
+==============
+
+Dynamic PCM allows an ALSA PCM device to digitally route its PCM audio to
+various digital endpoints during the PCM stream runtime. e.g. PCM0 can route
+digital audio to I2S DAI0, I2S DAI1 or PDM DAI2. This is useful for on SoC DSP
+drivers that expose several ALSA PCMs and can route to multiple DAIs.
+
+The DPCM runtime routing is determined by the ALSA mixer settings in the same
+way as the analog signal is routed in an ASoC codec driver. DPCM uses a DAPM
+graph representing the DSP internal audio paths and uses the mixer settings to
+determine the patch used by each ALSA PCM.
+
+DPCM re-uses all the existing component codec, platform and DAI drivers without
+any modifications.
+
+
+Phone Audio System with SoC based DSP
+-------------------------------------
+
+Consider the following phone audio subsystem. This will be used in this
+document for all examples :-
+
+| Front End PCMs    |  SoC DSP  | Back End DAIs | Audio devices |
+
+                    *************
+PCM0 <------------> *           * <----DAI0-----> Codec Headset
+                    *           *
+PCM1 <------------> *           * <----DAI1-----> Codec Speakers
+                    *   DSP     *
+PCM2 <------------> *           * <----DAI2-----> MODEM
+                    *           *
+PCM3 <------------> *           * <----DAI3-----> BT
+                    *           *
+                    *           * <----DAI4-----> DMIC
+                    *           *
+                    *           * <----DAI5-----> FM
+                    *************
+
+This diagram shows a simple smart phone audio subsystem. It supports Bluetooth,
+FM digital radio, Speakers, Headset Jack, digital microphones and cellular
+modem. This sound card exposes 4 DSP front end (FE) ALSA PCM devices and
+supports 6 back end (BE) DAIs. Each FE PCM can digitally route audio data to any
+of the BE DAIs. The FE PCM devices can also route audio to more than 1 BE DAI.
+
+
+
+Example - DPCM Switching playback from DAI0 to DAI1
+---------------------------------------------------
+
+Audio is being played to the Headset. After a while the user removes the headset
+and audio continues playing on the speakers.
+
+Playback on PCM0 to Headset would look like :-
+
+                    *************
+PCM0 <============> *           * <====DAI0=====> Codec Headset
+                    *           *
+PCM1 <------------> *           * <----DAI1-----> Codec Speakers
+                    *   DSP     *
+PCM2 <------------> *           * <----DAI2-----> MODEM
+                    *           *
+PCM3 <------------> *           * <----DAI3-----> BT
+                    *           *
+                    *           * <----DAI4-----> DMIC
+                    *           *
+                    *           * <----DAI5-----> FM
+                    *************
+
+The headset is removed from the jack by user so the speakers must now be used :-
+
+                    *************
+PCM0 <============> *           * <----DAI0-----> Codec Headset
+                    *           *
+PCM1 <------------> *           * <====DAI1=====> Codec Speakers
+                    *   DSP     *
+PCM2 <------------> *           * <----DAI2-----> MODEM
+                    *           *
+PCM3 <------------> *           * <----DAI3-----> BT
+                    *           *
+                    *           * <----DAI4-----> DMIC
+                    *           *
+                    *           * <----DAI5-----> FM
+                    *************
+
+The audio driver processes this as follows :-
+
+ 1) Machine driver receives Jack removal event.
+
+ 2) Machine driver OR audio HAL disables the Headset path.
+
+ 3) DPCM runs the PCM trigger(stop), hw_free(), shutdown() operations on DAI0
+    for headset since the path is now disabled.
+
+ 4) Machine driver or audio HAL enables the speaker path.
+
+ 5) DPCM runs the PCM ops for startup(), hw_params(), prepapre() and
+    trigger(start) for DAI1 Speakers since the path is enabled.
+
+In this example, the machine driver or userspace audio HAL can alter the routing
+and then DPCM will take care of managing the DAI PCM operations to either bring
+the link up or down. Audio playback does not stop during this transition.
+
+
+
+DPCM machine driver
+===================
+
+The DPCM enabled ASoC machine driver is similar to normal machine drivers
+except that we also have to :-
+
+ 1) Define the FE and BE DAI links.
+
+ 2) Define any FE/BE PCM operations.
+
+ 3) Define widget graph connections.
+
+
+1 FE and BE DAI links
+---------------------
+
+| Front End PCMs    |  SoC DSP  | Back End DAIs | Audio devices |
+
+                    *************
+PCM0 <------------> *           * <----DAI0-----> Codec Headset
+                    *           *
+PCM1 <------------> *           * <----DAI1-----> Codec Speakers
+                    *   DSP     *
+PCM2 <------------> *           * <----DAI2-----> MODEM
+                    *           *
+PCM3 <------------> *           * <----DAI3-----> BT
+                    *           *
+                    *           * <----DAI4-----> DMIC
+                    *           *
+                    *           * <----DAI5-----> FM
+                    *************
+
+For the example above we have to define 4 FE DAI links and 6 BE DAI links. The
+FE DAI links are defined as follows :-
+
+static struct snd_soc_dai_link machine_dais[] = {
+       {
+               .name = "PCM0 System",
+               .stream_name = "System Playback",
+               .cpu_dai_name = "System Pin",
+               .platform_name = "dsp-audio",
+               .codec_name = "snd-soc-dummy",
+               .codec_dai_name = "snd-soc-dummy-dai",
+               .dynamic = 1,
+               .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+               .dpcm_playback = 1,
+       },
+       .....< other FE and BE DAI links here >
+};
+
+This FE DAI link is pretty similar to a regular DAI link except that we also
+set the DAI link to a DPCM FE with the "dynamic = 1". The supported FE stream
+directions should also be set with the "dpcm_playback" and "dpcm_capture"
+flags. There is also an option to specify the ordering of the trigger call for
+each FE. This allows the ASoC core to trigger the DSP before or after the other
+components (as some DSPs have strong requirements for the ordering DAI/DSP
+start and stop sequences).
+
+The FE DAI above sets the codec and code DAIs to dummy devices since the BE is
+dynamic and will change depending on runtime config.
+
+The BE DAIs are configured as follows :-
+
+static struct snd_soc_dai_link machine_dais[] = {
+       .....< FE DAI links here >
+       {
+               .name = "Codec Headset",
+               .cpu_dai_name = "ssp-dai.0",
+               .platform_name = "snd-soc-dummy",
+               .no_pcm = 1,
+               .codec_name = "rt5640.0-001c",
+               .codec_dai_name = "rt5640-aif1",
+               .ignore_suspend = 1,
+               .ignore_pmdown_time = 1,
+               .be_hw_params_fixup = hswult_ssp0_fixup,
+               .ops = &haswell_ops,
+               .dpcm_playback = 1,
+               .dpcm_capture = 1,
+       },
+       .....< other BE DAI links here >
+};
+
+This BE DAI link connects DAI0 to the codec (in this case RT5460 AIF1). It sets
+the "no_pcm" flag to mark it has a BE and sets flags for supported stream
+directions using "dpcm_playback" and "dpcm_capture" above.
+
+The BE has also flags set for ignoreing suspend and PM down time. This allows
+the BE to work in a hostless mode where the host CPU is not transferring data
+like a BT phone call :-
+
+                    *************
+PCM0 <------------> *           * <----DAI0-----> Codec Headset
+                    *           *
+PCM1 <------------> *           * <----DAI1-----> Codec Speakers
+                    *   DSP     *
+PCM2 <------------> *           * <====DAI2=====> MODEM
+                    *           *
+PCM3 <------------> *           * <====DAI3=====> BT
+                    *           *
+                    *           * <----DAI4-----> DMIC
+                    *           *
+                    *           * <----DAI5-----> FM
+                    *************
+
+This allows the host CPU to sleep whilst the DSP, MODEM DAI and the BT DAI are
+still in operation.
+
+A BE DAI link can also set the codec to a dummy device if the code is a device
+that is managed externally.
+
+Likewise a BE DAI can also set a dummy cpu DAI if the CPU DAI is managed by the
+DSP firmware.
+
+
+2 FE/BE PCM operations
+----------------------
+
+The BE above also exports some PCM operations and a "fixup" callback. The fixup
+callback is used by the machine driver to (re)configure the DAI based upon the
+FE hw params. i.e. the DSP may perform SRC or ASRC from the FE to BE.
+
+e.g. DSP converts all FE hw params to run at fixed rate of 48k, 16bit, stereo for
+DAI0. This means all FE hw_params have to be fixed in the machine driver for
+DAI0 so that the DAI is running at desired configuration regardless of the FE
+configuration.
+
+static int dai0_fixup(struct snd_soc_pcm_runtime *rtd,
+                       struct snd_pcm_hw_params *params)
+{
+       struct snd_interval *rate = hw_param_interval(params,
+                       SNDRV_PCM_HW_PARAM_RATE);
+       struct snd_interval *channels = hw_param_interval(params,
+                                               SNDRV_PCM_HW_PARAM_CHANNELS);
+
+       /* The DSP will covert the FE rate to 48k, stereo */
+       rate->min = rate->max = 48000;
+       channels->min = channels->max = 2;
+
+       /* set DAI0 to 16 bit */
+       snd_mask_set(&params->masks[SNDRV_PCM_HW_PARAM_FORMAT -
+                                   SNDRV_PCM_HW_PARAM_FIRST_MASK],
+                                   SNDRV_PCM_FORMAT_S16_LE);
+       return 0;
+}
+
+The other PCM operation are the same as for regular DAI links. Use as necessary.
+
+
+3 Widget graph connections
+--------------------------
+
+The BE DAI links will normally be connected to the graph at initialisation time
+by the ASoC DAPM core. However, if the BE codec or BE DAI is a dummy then this
+has to be set explicitly in the driver :-
+
+/* BE for codec Headset -  DAI0 is dummy and managed by DSP FW */
+{"DAI0 CODEC IN", NULL, "AIF1 Capture"},
+{"AIF1 Playback", NULL, "DAI0 CODEC OUT"},
+
+
+Writing a DPCM DSP driver
+=========================
+
+The DPCM DSP driver looks much like a standard platform class ASoC driver
+combined with elements from a codec class driver. A DSP platform driver must
+implement :-
+
+ 1) Front End PCM DAIs - i.e. struct snd_soc_dai_driver.
+
+ 2) DAPM graph showing DSP audio routing from FE DAIs to BEs.
+
+ 3) DAPM widgets from DSP graph.
+
+ 4) Mixers for gains, routing, etc.
+
+ 5) DMA configuration.
+
+ 6) BE AIF widgets.
+
+Items 6 is important for routing the audio outside of the DSP. AIF need to be
+defined for each BE and each stream direction. e.g for BE DAI0 above we would
+have :-
+
+SND_SOC_DAPM_AIF_IN("DAI0 RX", NULL, 0, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_AIF_OUT("DAI0 TX", NULL, 0, SND_SOC_NOPM, 0, 0),
+
+The BE AIF are used to connect the DSP graph to the graphs for the other
+component drivers (e.g. codec graph).
+
+
+Hostless PCM streams
+====================
+
+A hostless PCM stream is a stream that is not routed through the host CPU. An
+example of this would be a phone call from handset to modem.
+
+
+                    *************
+PCM0 <------------> *           * <----DAI0-----> Codec Headset
+                    *           *
+PCM1 <------------> *           * <====DAI1=====> Codec Speakers/Mic
+                    *   DSP     *
+PCM2 <------------> *           * <====DAI2=====> MODEM
+                    *           *
+PCM3 <------------> *           * <----DAI3-----> BT
+                    *           *
+                    *           * <----DAI4-----> DMIC
+                    *           *
+                    *           * <----DAI5-----> FM
+                    *************
+
+In this case the PCM data is routed via the DSP. The host CPU in this use case
+is only used for control and can sleep during the runtime of the stream.
+
+The host can control the hostless link either by :-
+
+ 1) Configuring the link as a CODEC <-> CODEC style link. In this case the link
+    is enabled or disabled by the state of the DAPM graph. This usually means
+    there is a mixer control that can be used to connect or disconnect the path
+    between both DAIs.
+
+ 2) Hostless FE. This FE has a virtual connection to the BE DAI links on the DAPM
+    graph. Control is then carried out by the FE as regualar PCM operations.
+    This method gives more control over the DAI links, but requires much more
+    userspace code to control the link. Its recommended to use CODEC<->CODEC
+    unless your HW needs more fine grained sequencing of the PCM ops.
+
+
+CODEC <-> CODEC link
+--------------------
+
+This DAI link is enabled when DAPM detects a valid path within the DAPM graph.
+The machine driver sets some additional parameters to the DAI link i.e.
+
+static const struct snd_soc_pcm_stream dai_params = {
+       .formats = SNDRV_PCM_FMTBIT_S32_LE,
+       .rate_min = 8000,
+       .rate_max = 8000,
+       .channels_min = 2,
+       .channels_max = 2,
+};
+
+static struct snd_soc_dai_link dais[] = {
+       < ... more DAI links above ... >
+       {
+               .name = "MODEM",
+               .stream_name = "MODEM",
+               .cpu_dai_name = "dai2",
+               .codec_dai_name = "modem-aif1",
+               .codec_name = "modem",
+               .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+                               | SND_SOC_DAIFMT_CBM_CFM,
+               .params = &dai_params,
+       }
+       < ... more DAI links here ... >
+
+These parameters are used to configure the DAI hw_params() when DAPM detects a
+valid path and then calls the PCM operations to start the link. DAPM will also
+call the appropriate PCM operations to disable the DAI when the path is no
+longer valid.
+
+
+Hostless FE
+-----------
+
+The DAI link(s) are enabled by a FE that does not read or write any PCM data.
+This means creating a new FE that is connected with a virtual path to both
+DAI links. The DAI links will be started when the FE PCM is started and stopped
+when the FE PCM is stopped. Note that the FE PCM cannot read or write data in
+this configuration.
+
+
index bce23a4a78750c630becfc9f6cf9e109ed6721c7..db5f9c9ae1495186e4439705d59f24ea3ce04b49 100644 (file)
@@ -1,22 +1,23 @@
-ASoC Codec Driver
-=================
+ASoC Codec Class Driver
+=======================
 
-The codec driver is generic and hardware independent code that configures the
-codec to provide audio capture and playback. It should contain no code that is
-specific to the target platform or machine. All platform and machine specific
-code should be added to the platform and machine drivers respectively.
+The codec class driver is generic and hardware independent code that configures
+the codec, FM, MODEM, BT or external DSP to provide audio capture and playback.
+It should contain no code that is specific to the target platform or machine.
+All platform and machine specific code should be added to the platform and
+machine drivers respectively.
 
-Each codec driver *must* provide the following features:-
+Each codec class driver *must* provide the following features:-
 
  1) Codec DAI and PCM configuration
- 2) Codec control IO - using I2C, 3 Wire(SPI) or both APIs
+ 2) Codec control IO - using RegMap API
  3) Mixers and audio controls
  4) Codec audio operations
+ 5) DAPM description.
+ 6) DAPM event handler.
 
 Optionally, codec drivers can also provide:-
 
- 5) DAPM description.
- 6) DAPM event handler.
  7) DAC Digital mute control.
 
 Its probably best to use this guide in conjunction with the existing codec
@@ -64,26 +65,9 @@ struct snd_soc_dai_driver wm8731_dai = {
 2 - Codec control IO
 --------------------
 The codec can usually be controlled via an I2C or SPI style interface
-(AC97 combines control with data in the DAI). The codec drivers provide
-functions to read and write the codec registers along with supplying a
-register cache:-
-
-       /* IO control data and register cache */
-       void *control_data; /* codec control (i2c/3wire) data */
-       void *reg_cache;
-
-Codec read/write should do any data formatting and call the hardware
-read write below to perform the IO. These functions are called by the
-core and ALSA when performing DAPM or changing the mixer:-
-
-    unsigned int (*read)(struct snd_soc_codec *, unsigned int);
-    int (*write)(struct snd_soc_codec *, unsigned int, unsigned int);
-
-Codec hardware IO functions - usually points to either the I2C, SPI or AC97
-read/write:-
-
-       hw_write_t hw_write;
-       hw_read_t hw_read;
+(AC97 combines control with data in the DAI). The codec driver should use the
+Regmap API for all codec IO. Please see include/linux/regmap.h and existing
+codec drivers for example regmap usage.
 
 
 3 - Mixers and audio controls
@@ -127,7 +111,7 @@ Defines a stereo enumerated control
 
 4 - Codec Audio Operations
 --------------------------
-The codec driver also supports the following ALSA operations:-
+The codec driver also supports the following ALSA PCM operations:-
 
 /* SoC audio ops */
 struct snd_soc_ops {
index 05bf5a0eee415b8ad29a25a61d90b54e145d9096..7dfd88ce31ac195633b89d728e0dc7787a2f41bf 100644 (file)
@@ -21,7 +21,7 @@ level power systems.
 
 There are 4 power domains within DAPM
 
-   1. Codec domain - VREF, VMID (core codec and audio power)
+   1. Codec bias domain - VREF, VMID (core codec and audio power)
       Usually controlled at codec probe/remove and suspend/resume, although
       can be set at stream time if power is not needed for sidetone, etc.
 
@@ -63,14 +63,22 @@ Audio DAPM widgets fall into a number of types:-
  o Line       - Line Input/Output (and optional Jack)
  o Speaker    - Speaker
  o Supply     - Power or clock supply widget used by other widgets.
+ o Regulator  - External regulator that supplies power to audio components.
+ o Clock      -        External clock that supplies clock to audio componnents.
+ o AIF IN     - Audio Interface Input (with TDM slot mask).
+ o AIF OUT    - Audio Interface Output (with TDM slot mask).
+ o Siggen     - Signal Generator.
+ o DAI IN     - Digital Audio Interface Input.
+ o DAI OUT    - Digital Audio Interface Output.
+ o DAI Link   - DAI Link between two DAI structures */
  o Pre        - Special PRE widget (exec before all others)
  o Post       - Special POST widget (exec after all others)
 
 (Widgets are defined in include/sound/soc-dapm.h)
 
-Widgets are usually added in the codec driver and the machine driver. There are
-convenience macros defined in soc-dapm.h that can be used to quickly build a
-list of widgets of the codecs and machines DAPM widgets.
+Widgets can be added to the sound card by any of the component driver types.
+There are convenience macros defined in soc-dapm.h that can be used to quickly
+build a list of widgets of the codecs and machines DAPM widgets.
 
 Most widgets have a name, register, shift and invert. Some widgets have extra
 parameters for stream name and kcontrols.
@@ -80,11 +88,13 @@ parameters for stream name and kcontrols.
 -------------------------
 
 Stream Widgets relate to the stream power domain and only consist of ADCs
-(analog to digital converters) and DACs (digital to analog converters).
+(analog to digital converters), DACs (digital to analog converters),
+AIF IN and AIF OUT.
 
 Stream widgets have the following format:-
 
 SND_SOC_DAPM_DAC(name, stream name, reg, shift, invert),
+SND_SOC_DAPM_AIF_IN(name, stream, slot, reg, shift, invert)
 
 NOTE: the stream name must match the corresponding stream name in your codec
 snd_soc_codec_dai.
@@ -94,6 +104,11 @@ e.g. stream widgets for HiFi playback and capture
 SND_SOC_DAPM_DAC("HiFi DAC", "HiFi Playback", REG, 3, 1),
 SND_SOC_DAPM_ADC("HiFi ADC", "HiFi Capture", REG, 2, 1),
 
+e.g. stream widgets for AIF
+
+SND_SOC_DAPM_AIF_IN("AIF1RX", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+
 
 2.2 Path Domain Widgets
 -----------------------
@@ -121,12 +136,14 @@ If you dont want the mixer elements prefixed with the name of the mixer widget,
 you can use SND_SOC_DAPM_MIXER_NAMED_CTL instead. the parameters are the same
 as for SND_SOC_DAPM_MIXER.
 
-2.3 Platform/Machine domain Widgets
------------------------------------
+
+2.3 Machine domain Widgets
+--------------------------
 
 Machine widgets are different from codec widgets in that they don't have a
 codec register bit associated with them. A machine widget is assigned to each
-machine audio component (non codec) that can be independently powered. e.g.
+machine audio component (non codec or DSP) that can be independently
+powered. e.g.
 
  o Speaker Amp
  o Microphone Bias
@@ -146,12 +163,12 @@ static int spitz_mic_bias(struct snd_soc_dapm_widget* w, int event)
 SND_SOC_DAPM_MIC("Mic Jack", spitz_mic_bias),
 
 
-2.4 Codec Domain
-----------------
+2.4 Codec (BIAS) Domain
+-----------------------
 
-The codec power domain has no widgets and is handled by the codecs DAPM event
-handler. This handler is called when the codec powerstate is changed wrt to any
-stream event or by kernel PM events.
+The codec bias power domain has no widgets and is handled by the codecs DAPM
+event handler. This handler is called when the codec powerstate is changed wrt
+to any stream event or by kernel PM events.
 
 
 2.5 Virtual Widgets
@@ -169,15 +186,16 @@ After all the widgets have been defined, they can then be added to the DAPM
 subsystem individually with a call to snd_soc_dapm_new_control().
 
 
-3. Codec Widget Interconnections
-================================
+3. Codec/DSP Widget Interconnections
+====================================
 
-Widgets are connected to each other within the codec and machine by audio paths
-(called interconnections). Each interconnection must be defined in order to
-create a map of all audio paths between widgets.
+Widgets are connected to each other within the codec, platform and machine by
+audio paths (called interconnections). Each interconnection must be defined in
+order to create a map of all audio paths between widgets.
 
-This is easiest with a diagram of the codec (and schematic of the machine audio
-system), as it requires joining widgets together via their audio signal paths.
+This is easiest with a diagram of the codec or DSP (and schematic of the machine
+audio system), as it requires joining widgets together via their audio signal
+paths.
 
 e.g., from the WM8731 output mixer (wm8731.c)
 
@@ -247,16 +265,9 @@ machine and includes the codec. e.g.
  o Mic Jack
  o Codec Pins
 
-When a codec pin is NC it can be marked as not used with a call to
-
-snd_soc_dapm_set_endpoint(codec, "Widget Name", 0);
-
-The last argument is 0 for inactive and 1 for active. This way the pin and its
-input widget will never be powered up and consume power.
-
-This also applies to machine widgets. e.g. if a headphone is connected to a
-jack then the jack can be marked active. If the headphone is removed, then
-the headphone jack can be marked inactive.
+Endpoints are added to the DAPM graph so that their usage can be determined in
+order to save power. e.g. NC codecs pins will be switched OFF, unconnected
+jacks can also be switched OFF.
 
 
 5 DAPM Widget Events
index d50c14df34112ed2095942062bcaab90d90697bd..74056dba52be7aab7adbcadd45064fa71d79534f 100644 (file)
@@ -1,8 +1,10 @@
 ASoC Machine Driver
 ===================
 
-The ASoC machine (or board) driver is the code that glues together the platform
-and codec drivers.
+The ASoC machine (or board) driver is the code that glues together all the
+component drivers (e.g. codecs, platforms and DAIs). It also describes the
+relationships between each componnent which include audio paths, GPIOs,
+interrupts, clocking, jacks and voltage regulators.
 
 The machine driver can contain codec and platform specific code. It registers
 the audio subsystem with the kernel as a platform device and is represented by
index d57efad37e0a5b33606599f6735f180706fd228d..3a08a2c9150c991ec6ff9be44633d592578f0b94 100644 (file)
@@ -1,9 +1,9 @@
 ASoC Platform Driver
 ====================
 
-An ASoC platform driver can be divided into audio DMA and SoC DAI configuration
-and control. The platform drivers only target the SoC CPU and must have no board
-specific code.
+An ASoC platform driver class can be divided into audio DMA drivers, SoC DAI
+drivers and DSP drivers. The platform drivers only target the SoC CPU and must
+have no board specific code.
 
 Audio DMA
 =========
@@ -64,3 +64,16 @@ Each SoC DAI driver must provide the following features:-
  5) Suspend and resume (optional)
 
 Please see codec.txt for a description of items 1 - 4.
+
+
+SoC DSP Drivers
+===============
+
+Each SoC DSP driver usually supplies the following features :-
+
+ 1) DAPM graph
+ 2) Mixer controls
+ 3) DMA IO to/from DSP buffers (if applicable)
+ 4) Definition of DSP front end (FE) PCM devices.
+
+Please see DPCM.txt for a description of item 4.
index 8a0cbf3cf2c8c01c5be662eef372eff4060daffc..480d77d216200178df0f3c6a2111d4164be49287 100644 (file)
@@ -237,11 +237,11 @@ F:        drivers/platform/x86/acer-wmi.c
 
 ACPI
 M:     Len Brown <lenb@kernel.org>
-M:     Rafael J. Wysocki <rjw@sisk.pl>
+M:     Rafael J. Wysocki <rjw@rjwysocki.net>
 L:     linux-acpi@vger.kernel.org
-W:     http://www.lesswatts.org/projects/acpi/
-Q:     http://patchwork.kernel.org/project/linux-acpi/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
+W:     https://01.org/linux-acpi
+Q:     https://patchwork.kernel.org/project/linux-acpi/list/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
 S:     Supported
 F:     drivers/acpi/
 F:     drivers/pnp/pnpacpi/
@@ -253,24 +253,38 @@ F:        drivers/pci/*acpi*
 F:     drivers/pci/*/*acpi*
 F:     drivers/pci/*/*/*acpi*
 
+ACPI COMPONENT ARCHITECTURE (ACPICA)
+M:     Robert Moore <robert.moore@intel.com>
+M:     Lv Zheng <lv.zheng@intel.com>
+M:     Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+L:     linux-acpi@vger.kernel.org
+L:     devel@acpica.org
+W:     https://acpica.org/
+W:     https://github.com/acpica/acpica/
+Q:     https://patchwork.kernel.org/project/linux-acpi/list/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
+S:     Supported
+F:     drivers/acpi/acpica/
+F:     include/acpi/
+
 ACPI FAN DRIVER
 M:     Zhang Rui <rui.zhang@intel.com>
 L:     linux-acpi@vger.kernel.org
-W:     http://www.lesswatts.org/projects/acpi/
+W:     https://01.org/linux-acpi
 S:     Supported
 F:     drivers/acpi/fan.c
 
 ACPI THERMAL DRIVER
 M:     Zhang Rui <rui.zhang@intel.com>
 L:     linux-acpi@vger.kernel.org
-W:     http://www.lesswatts.org/projects/acpi/
+W:     https://01.org/linux-acpi
 S:     Supported
 F:     drivers/acpi/*thermal*
 
 ACPI VIDEO DRIVER
 M:     Zhang Rui <rui.zhang@intel.com>
 L:     linux-acpi@vger.kernel.org
-W:     http://www.lesswatts.org/projects/acpi/
+W:     https://01.org/linux-acpi
 S:     Supported
 F:     drivers/acpi/video.c
 
@@ -1402,7 +1416,7 @@ M:        Wolfram Sang <wsa@the-dreams.de>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     drivers/misc/eeprom/at24.c
-F:     include/linux/i2c/at24.h
+F:     include/linux/platform_data/at24.h
 
 ATA OVER ETHERNET (AOE) DRIVER
 M:     "Ed L. Cashin" <ecashin@coraid.com>
@@ -1658,9 +1672,9 @@ F:        drivers/video/backlight/
 F:     include/linux/backlight.h
 
 BATMAN ADVANCED
-M:     Marek Lindner <lindner_marek@yahoo.de>
-M:     Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
-M:     Antonio Quartulli <ordex@autistici.org>
+M:     Marek Lindner <mareklindner@neomailbox.ch>
+M:     Simon Wunderlich <sw@simonwunderlich.de>
+M:     Antonio Quartulli <antonio@meshcoding.com>
 L:     b.a.t.m.a.n@lists.open-mesh.org
 W:     http://www.open-mesh.org/
 S:     Maintained
@@ -1791,6 +1805,7 @@ F:        include/net/bluetooth/
 
 BONDING DRIVER
 M:     Jay Vosburgh <fubar@us.ibm.com>
+M:     Veaceslav Falico <vfalico@redhat.com>
 M:     Andy Gospodarek <andy@greyhouse.net>
 L:     netdev@vger.kernel.org
 W:     http://sourceforge.net/projects/bonding/
@@ -2300,7 +2315,7 @@ S:        Maintained
 F:     drivers/net/ethernet/ti/cpmac.c
 
 CPU FREQUENCY DRIVERS
-M:     Rafael J. Wysocki <rjw@sisk.pl>
+M:     Rafael J. Wysocki <rjw@rjwysocki.net>
 M:     Viresh Kumar <viresh.kumar@linaro.org>
 L:     cpufreq@vger.kernel.org
 L:     linux-pm@vger.kernel.org
@@ -2331,7 +2346,7 @@ S:      Maintained
 F:      drivers/cpuidle/cpuidle-big_little.c
 
 CPUIDLE DRIVERS
-M:     Rafael J. Wysocki <rjw@sisk.pl>
+M:     Rafael J. Wysocki <rjw@rjwysocki.net>
 M:     Daniel Lezcano <daniel.lezcano@linaro.org>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
@@ -2718,6 +2733,8 @@ T:        git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
 DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
 M:     Vinod Koul <vinod.koul@intel.com>
 M:     Dan Williams <dan.j.williams@intel.com>
+L:     dmaengine@vger.kernel.org
+Q:     https://patchwork.kernel.org/project/linux-dmaengine/list/
 S:     Supported
 F:     drivers/dma/
 F:     include/linux/dma*
@@ -2822,7 +2839,9 @@ L:        dri-devel@lists.freedesktop.org
 L:     linux-tegra@vger.kernel.org
 T:     git git://anongit.freedesktop.org/tegra/linux.git
 S:     Maintained
+F:     drivers/gpu/drm/tegra/
 F:     drivers/gpu/host1x/
+F:     include/linux/host1x.h
 F:     include/uapi/drm/tegra_drm.h
 F:     Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
 
@@ -3553,7 +3572,7 @@ F:        fs/freevxfs/
 
 FREEZER
 M:     Pavel Machek <pavel@ucw.cz>
-M:     "Rafael J. Wysocki" <rjw@sisk.pl>
+M:     "Rafael J. Wysocki" <rjw@rjwysocki.net>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     Documentation/power/freezing-of-tasks.txt
@@ -3624,6 +3643,12 @@ L:       linux-scsi@vger.kernel.org
 S:     Odd Fixes (e.g., new signatures)
 F:     drivers/scsi/fdomain.*
 
+GCOV BASED KERNEL PROFILING
+M:     Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
+S:     Maintained
+F:     kernel/gcov/
+F:     Documentation/gcov.txt
+
 GDT SCSI DISK ARRAY CONTROLLER DRIVER
 M:     Achim Leubner <achim_leubner@adaptec.com>
 L:     linux-scsi@vger.kernel.org
@@ -3889,7 +3914,7 @@ F:        drivers/video/hgafb.c
 
 HIBERNATION (aka Software Suspend, aka swsusp)
 M:     Pavel Machek <pavel@ucw.cz>
-M:     "Rafael J. Wysocki" <rjw@sisk.pl>
+M:     "Rafael J. Wysocki" <rjw@rjwysocki.net>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     arch/x86/power/
@@ -4339,7 +4364,7 @@ F:        drivers/video/i810/
 INTEL MENLOW THERMAL DRIVER
 M:     Sujith Thomas <sujith.thomas@intel.com>
 L:     platform-driver-x86@vger.kernel.org
-W:     http://www.lesswatts.org/projects/acpi/
+W:     https://01.org/linux-acpi
 S:     Supported
 F:     drivers/platform/x86/intel_menlow.c
 
@@ -4351,7 +4376,10 @@ F:       arch/x86/kernel/microcode_intel.c
 
 INTEL I/OAT DMA DRIVER
 M:     Dan Williams <dan.j.williams@intel.com>
-S:     Maintained
+M:     Dave Jiang <dave.jiang@intel.com>
+L:     dmaengine@vger.kernel.org
+Q:     https://patchwork.kernel.org/project/linux-dmaengine/list/
+S:     Supported
 F:     drivers/dma/ioat*
 
 INTEL IOMMU (VT-d)
@@ -6372,6 +6400,12 @@ S:       Supported
 F:     Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
 F:     drivers/pci/host/pci-tegra.c
 
+PCI DRIVER FOR SAMSUNG EXYNOS
+M:     Jingoo Han <jg1.han@samsung.com>
+L:     linux-pci@vger.kernel.org
+S:     Maintained
+F:     drivers/pci/host/pci-exynos.c
+
 PCMCIA SUBSYSTEM
 P:     Linux PCMCIA Team
 L:     linux-pcmcia@lists.infradead.org
@@ -6842,6 +6876,14 @@ L:       linux-hexagon@vger.kernel.org
 S:     Supported
 F:     arch/hexagon/
 
+QUALCOMM WCN36XX WIRELESS DRIVER
+M:     Eugene Krasnikov <k.eugene.e@gmail.com>
+L:     wcn36xx@lists.infradead.org
+W:     http://wireless.kernel.org/en/users/Drivers/wcn36xx
+T:     git git://github.com/KrasnikovEugene/wcn36xx.git
+S:     Supported
+F:     drivers/net/wireless/ath/wcn36xx/
+
 QUICKCAM PARALLEL PORT WEBCAMS
 M:     Hans Verkuil <hverkuil@xs4all.nl>
 L:     linux-media@vger.kernel.org
@@ -7816,6 +7858,13 @@ F:       Documentation/sound/alsa/soc/
 F:     sound/soc/
 F:     include/sound/soc*
 
+SOUND - DMAENGINE HELPERS
+M:     Lars-Peter Clausen <lars@metafoo.de>
+S:     Supported
+F:     include/sound/dmaengine_pcm.h
+F:     sound/core/pcm_dmaengine.c
+F:     sound/soc/soc-generic-dmaengine-pcm.c
+
 SPARC + UltraSPARC (sparc/sparc64)
 M:     "David S. Miller" <davem@davemloft.net>
 L:     sparclinux@vger.kernel.org
@@ -8095,7 +8144,7 @@ F:        drivers/sh/
 SUSPEND TO RAM
 M:     Len Brown <len.brown@intel.com>
 M:     Pavel Machek <pavel@ucw.cz>
-M:     "Rafael J. Wysocki" <rjw@sisk.pl>
+M:     "Rafael J. Wysocki" <rjw@rjwysocki.net>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     Documentation/power/
@@ -8596,14 +8645,6 @@ S:       Maintained
 F:     arch/m68k/*/*_no.*
 F:     arch/m68k/include/asm/*_no.*
 
-UCLINUX FOR RENESAS H8/300 (H8300)
-M:     Yoshinori Sato <ysato@users.sourceforge.jp>
-W:     http://uclinux-h8.sourceforge.jp/
-S:     Supported
-F:     arch/h8300/
-F:     drivers/ide/ide-h8300.c
-F:     drivers/net/ethernet/8390/ne-h8300.c
-
 UDF FILESYSTEM
 M:     Jan Kara <jack@suse.cz>
 S:     Maintained
index 9de9aba21bf9cf9d2a7c467e816417c02b217ecc..7ab985d60cc08f6eef19f75ce8cadf9920a89e48 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 12
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
 NAME = One Giant Leap for Frogkind
 
 # *DOCUMENTATION*
@@ -659,6 +659,12 @@ KBUILD_CFLAGS      += $(call cc-option,-fno-strict-overflow)
 # conserve stack if available
 KBUILD_CFLAGS   += $(call cc-option,-fconserve-stack)
 
+# disallow errors like 'EXPORT_GPL(foo);' with missing header
+KBUILD_CFLAGS   += $(call cc-option,-Werror=implicit-int)
+
+# require functions to have arguments in prototypes, not empty 'int foo()'
+KBUILD_CFLAGS   += $(call cc-option,-Werror=strict-prototypes)
+
 # use the deterministic mode of AR if available
 KBUILD_ARFLAGS := $(call ar-option,D)
 
index 35a300d4a9fb37f3a08e3f63365c72bcc2a3ecbf..84803f88a1693afebb1ecea98ec4a165be264db6 100644 (file)
@@ -1,6 +1,7 @@
 config ALPHA
        bool
        default y
+       select ARCH_MIGHT_HAVE_PC_PARPORT
        select HAVE_AOUT
        select HAVE_IDE
        select HAVE_OPROFILE
index 467de010ea7ee130cdbda0b532f10c885f8d1308..e3a1491d5073a0b59a6b9eb207c4a684ae6c9495 100644 (file)
@@ -81,6 +81,8 @@
 
 #define SO_SELECT_ERR_QUEUE    45
 
-#define SO_BUSY_POLL                   46
+#define SO_BUSY_POLL           46
+
+#define SO_MAX_PACING_RATE     47
 
 #endif /* _UAPI_ASM_SOCKET_H */
index 72f97822784a4627da66e835002c309b84dc14d9..eb1c2ee5eaf0a4e0b8811092568943ed96be5388 100644 (file)
@@ -87,13 +87,13 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
 
 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
-       __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+       __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
        kcb->kprobe_status = kcb->prev_kprobe.status;
 }
 
 static inline void __kprobes set_current_kprobe(struct kprobe *p)
 {
-       __get_cpu_var(current_kprobe) = p;
+       __this_cpu_write(current_kprobe, p);
 }
 
 static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
@@ -237,7 +237,7 @@ int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
 
                return 1;
        } else if (kprobe_running()) {
-               p = __get_cpu_var(current_kprobe);
+               p = __this_cpu_read(current_kprobe);
                if (p->break_handler && p->break_handler(p, regs)) {
                        setup_singlestep(p, regs);
                        kcb->kprobe_status = KPROBE_HIT_SS;
index 333238564b67f1239afde4ab4d584f16d8a48572..5d76706139dd36a246eb545f36fe42c1bf44ee9d 100644 (file)
@@ -102,7 +102,7 @@ static int genregs_set(struct task_struct *target,
        REG_IGNORE_ONE(pad2);
        REG_IN_CHUNK(callee, efa, cregs);       /* callee_regs[r25..r13] */
        REG_IGNORE_ONE(efa);                    /* efa update invalid */
-       REG_IN_ONE(stop_pc, &ptregs->ret);      /* stop_pc: PC update */
+       REG_IGNORE_ONE(stop_pc);                        /* PC updated via @ret */
 
        return ret;
 }
index 3fde7de3ea670351ac69e0f441f35f776ebbb7d8..4c21dde2f6a9625bc4ff4bc57126d4783fc73214 100644 (file)
@@ -206,7 +206,7 @@ static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
 
 static irqreturn_t timer_irq_handler(int irq, void *dev_id)
 {
-       struct clock_event_device *clk = &__get_cpu_var(arc_clockevent_device);
+       struct clock_event_device *clk = this_cpu_ptr(&arc_clockevent_device);
 
        arc_timer_event_ack(clk->mode == CLOCK_EVT_MODE_PERIODIC);
        clk->event_handler(clk);
index 1ad6fb6c094db415ec76a72a28356e75bdfd7d17..b6a708ef6067124e3b8b7e3d0cc8aebccb72787d 100644 (file)
@@ -5,6 +5,8 @@ config ARM
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_HAVE_CUSTOM_GPIO_H
+       select ARCH_USE_CMPXCHG_LOCKREF
+       select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_WANT_IPC_PARSE_VERSION
        select BUILDTIME_EXTABLE_SORT if MMU
        select CLONE_BACKWARDS
@@ -51,6 +53,8 @@ config ARM
        select HAVE_MOD_ARCH_SPECIFIC if ARM_UNWIND
        select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
        select HAVE_PERF_EVENTS
+       select HAVE_PERF_REGS
+       select HAVE_PERF_USER_STACK_DUMP
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_UID16
@@ -692,7 +696,6 @@ config ARCH_SA1100
        select GENERIC_CLOCKEVENTS
        select HAVE_IDE
        select ISA
-       select NEED_MACH_GPIO_H
        select NEED_MACH_MEMORY_H
        select SPARSE_IRQ
        help
@@ -1549,6 +1552,32 @@ config MCPM
          for (multi-)cluster based systems, such as big.LITTLE based
          systems.
 
+config BIG_LITTLE
+       bool "big.LITTLE support (Experimental)"
+       depends on CPU_V7 && SMP
+       select MCPM
+       help
+         This option enables support selections for the big.LITTLE
+         system architecture.
+
+config BL_SWITCHER
+       bool "big.LITTLE switcher support"
+       depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
+       select CPU_PM
+       select ARM_CPU_SUSPEND
+       help
+         The big.LITTLE "switcher" provides the core functionality to
+         transparently handle transition between a cluster of A15's
+         and a cluster of A7's in a big.LITTLE system.
+
+config BL_SWITCHER_DUMMY_IF
+       tristate "Simple big.LITTLE switcher user interface"
+       depends on BL_SWITCHER && DEBUG_KERNEL
+       help
+         This is a simple and dummy char dev interface to control
+         the big.LITTLE switcher core code.  It is meant for
+         debugging purposes only.
+
 choice
        prompt "Memory split"
        default VMSPLIT_3G
index 9762c84b419845f05ee6a7d1f9f95084dde310b5..2b3206824353f612d3c72e81eae0b1a157f9d1c7 100644 (file)
@@ -834,6 +834,20 @@ choice
                  options; the platform specific options are deprecated
                  and will be soon removed.
 
+       config DEBUG_LL_UART_EFM32
+               bool "Kernel low-level debugging via efm32 UART"
+               depends on ARCH_EFM32
+               help
+                 Say Y here if you want the debug print routines to direct
+                 their output to an UART or USART port on efm32 based
+                 machines. Use the following addresses for DEBUG_UART_PHYS:
+
+                   0x4000c000 | USART0
+                   0x4000c400 | USART1
+                   0x4000c800 | USART2
+                   0x4000e000 | UART0
+                   0x4000e400 | UART1
+
        config DEBUG_LL_UART_PL01X
                bool "Kernel low-level debugging via ARM Ltd PL01x Primecell UART"
                help
@@ -885,6 +899,7 @@ config DEBUG_LL_INCLUDE
        default "debug/8250.S" if DEBUG_LL_UART_8250 || DEBUG_UART_8250
        default "debug/pl01x.S" if DEBUG_LL_UART_PL01X || DEBUG_UART_PL01X
        default "debug/exynos.S" if DEBUG_EXYNOS_UART
+       default "debug/efm32.S" if DEBUG_LL_UART_EFM32
        default "debug/icedcc.S" if DEBUG_ICEDCC
        default "debug/imx.S" if DEBUG_IMX1_UART || \
                                 DEBUG_IMX25_UART || \
@@ -951,6 +966,7 @@ config DEBUG_UART_PHYS
        default 0x20064000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
        default 0x20068000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
        default 0x20201000 if DEBUG_BCM2835
+       default 0x4000e400 if DEBUG_LL_UART_EFM32
        default 0x40090000 if ARCH_LPC32XX
        default 0x40100000 if DEBUG_PXA_UART1
        default 0x42000000 if ARCH_GEMINI
@@ -981,6 +997,7 @@ config DEBUG_UART_PHYS
        default 0xfff36000 if DEBUG_HIGHBANK_UART
        default 0xfffff700 if ARCH_IOP33X
        depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
+               DEBUG_LL_UART_EFM32 || \
                DEBUG_UART_8250 || DEBUG_UART_PL01X
 
 config DEBUG_UART_VIRT
index a37a50f575a27af2c95abca5c473d6a60233d8b9..db50b626be9871f7426ee394a9b189d8d504c8a3 100644 (file)
@@ -296,10 +296,15 @@ archprepare:
 # Convert bzImage to zImage
 bzImage: zImage
 
-zImage Image xipImage bootpImage uImage: vmlinux
+BOOT_TARGETS   = zImage Image xipImage bootpImage uImage
+INSTALL_TARGETS        = zinstall uinstall install
+
+PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
+
+$(BOOT_TARGETS): vmlinux
        $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 
-zinstall uinstall install: vmlinux
+$(INSTALL_TARGETS):
        $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
 
 %.dtb: | scripts
index 84aa2caf07ed203fb810220258401a1b51f7cab3..ec2f8065f955c5c31a69888bf261c58ea56ffb6d 100644 (file)
@@ -95,24 +95,24 @@ initrd:
        @test "$(INITRD)" != "" || \
        (echo You must specify INITRD; exit -1)
 
-install: $(obj)/Image
-       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+install:
+       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
        $(obj)/Image System.map "$(INSTALL_PATH)"
 
-zinstall: $(obj)/zImage
-       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+zinstall:
+       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
        $(obj)/zImage System.map "$(INSTALL_PATH)"
 
-uinstall: $(obj)/uImage
-       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+uinstall:
+       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
        $(obj)/uImage System.map "$(INSTALL_PATH)"
 
 zi:
-       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
        $(obj)/zImage System.map "$(INSTALL_PATH)"
 
 i:
-       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
        $(obj)/Image System.map "$(INSTALL_PATH)"
 
 subdir-            := bootp compressed dts
index f9c5da9c7fe1ce7d56557fb4582a0d9a53bbcfde..d76ae24c97453ff94ee9bc7232ed8283c8dbc01c 100644 (file)
                                /* Filled in by U-Boot */
                                mac-address = [ 00 00 00 00 00 00 ];
                        };
+
+                       phy_sel: cpsw-phy-sel@44e10650 {
+                               compatible = "ti,am3352-cpsw-phy-sel";
+                               reg= <0x44e10650 0x4>;
+                               reg-names = "gmii-sel";
+                       };
                };
 
                ocmcram: ocmcram@40300000 {
index 7d7cc777ff7b76099e8de5098a4a3a044a208ca9..bbac42a78ce543c2790a7943697afda1eba2e2aa 100644 (file)
                             <1 14 0xf08>,
                             <1 11 0xf08>,
                             <1 10 0xf08>;
+               /* Unfortunately we need this since some versions of U-Boot
+                * on Exynos don't set the CNTFRQ register, so we need the
+                * value from DT.
+                */
+               clock-frequency = <24000000>;
        };
 
        mct@101C0000 {
index 0c514dc8460c2423299848748278a452638e535c..2816bf61267231fd7ec2dd11f87c6da9ed4b3311 100644 (file)
@@ -11,7 +11,7 @@
 
 / {
        model = "TI OMAP3 BeagleBoard xM";
-       compatible = "ti,omap3-beagle-xm", "ti,omap3-beagle", "ti,omap3";
+       compatible = "ti,omap3-beagle-xm", "ti,omap36xx", "ti,omap3";
 
        cpus {
                cpu@0 {
index 7d95cda1fae4f0349bdfb582de99be2baf36dbf6..b41bd57f43287a048b73bfefe35b262ee139600e 100644 (file)
                        #address-cells = <1>;
                        #size-cells = <0>;
                        pinctrl-single,register-width = <16>;
-                       pinctrl-single,function-mask = <0x7f1f>;
+                       pinctrl-single,function-mask = <0xff1f>;
                };
 
                omap3_pmx_wkup: pinmux@0x48002a00 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        pinctrl-single,register-width = <16>;
-                       pinctrl-single,function-mask = <0x7f1f>;
+                       pinctrl-single,function-mask = <0xff1f>;
                };
 
                gpio1: gpio@48310000 {
index 06ea7d42ce8e6bb9f72633a14abd2ace113fa712..2a45092a40e3251e44447f3349e80ee0a69e97a4 100644 (file)
 #   $4 - default install path (blank if root directory)
 #
 
+verify () {
+       if [ ! -f "$1" ]; then
+               echo ""                                                   1>&2
+               echo " *** Missing file: $1"                              1>&2
+               echo ' *** You need to run "make" before "make install".' 1>&2
+               echo ""                                                   1>&2
+               exit 1
+       fi
+}
+
+# Make sure the files actually exist
+verify "$2"
+verify "$3"
+
 # User may have a custom install script
 if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
 if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
index 8c60f473e97625cb8c0bb068375da7368d1ff6fe..5c8584c4944d780f90bcedb40b8224fb60fa364f 100644 (file)
@@ -17,3 +17,5 @@ obj-$(CONFIG_MCPM)            += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
 AFLAGS_mcpm_head.o             := -march=armv7-a
 AFLAGS_vlock.o                 := -march=armv7-a
 obj-$(CONFIG_TI_PRIV_EDMA)     += edma.o
+obj-$(CONFIG_BL_SWITCHER)      += bL_switcher.o
+obj-$(CONFIG_BL_SWITCHER_DUMMY_IF) += bL_switcher_dummy_if.o
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c
new file mode 100644 (file)
index 0000000..63bbc4f
--- /dev/null
@@ -0,0 +1,822 @@
+/*
+ * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
+ *
+ * Created by: Nicolas Pitre, March 2012
+ * Copyright:  (C) 2012-2013  Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/atomic.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/clockchips.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/notifier.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/moduleparam.h>
+
+#include <asm/smp_plat.h>
+#include <asm/cputype.h>
+#include <asm/suspend.h>
+#include <asm/mcpm.h>
+#include <asm/bL_switcher.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/power_cpu_migrate.h>
+
+
+/*
+ * Use our own MPIDR accessors as the generic ones in asm/cputype.h have
+ * __attribute_const__ and we don't want the compiler to assume any
+ * constness here as the value _does_ change along some code paths.
+ */
+
+static int read_mpidr(void)
+{
+       unsigned int id;
+       asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id));
+       return id & MPIDR_HWID_BITMASK;
+}
+
+/*
+ * Get a global nanosecond time stamp for tracing.
+ */
+static s64 get_ns(void)
+{
+       struct timespec ts;
+       getnstimeofday(&ts);
+       return timespec_to_ns(&ts);
+}
+
+/*
+ * bL switcher core code.
+ */
+
+static void bL_do_switch(void *_arg)
+{
+       unsigned ib_mpidr, ib_cpu, ib_cluster;
+       long volatile handshake, **handshake_ptr = _arg;
+
+       pr_debug("%s\n", __func__);
+
+       ib_mpidr = cpu_logical_map(smp_processor_id());
+       ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
+       ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
+
+       /* Advertise our handshake location */
+       if (handshake_ptr) {
+               handshake = 0;
+               *handshake_ptr = &handshake;
+       } else
+               handshake = -1;
+
+       /*
+        * Our state has been saved at this point.  Let's release our
+        * inbound CPU.
+        */
+       mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume);
+       sev();
+
+       /*
+        * From this point, we must assume that our counterpart CPU might
+        * have taken over in its parallel world already, as if execution
+        * just returned from cpu_suspend().  It is therefore important to
+        * be very careful not to make any change the other guy is not
+        * expecting.  This is why we need stack isolation.
+        *
+        * Fancy under cover tasks could be performed here.  For now
+        * we have none.
+        */
+
+       /*
+        * Let's wait until our inbound is alive.
+        */
+       while (!handshake) {
+               wfe();
+               smp_mb();
+       }
+
+       /* Let's put ourself down. */
+       mcpm_cpu_power_down();
+
+       /* should never get here */
+       BUG();
+}
+
+/*
+ * Stack isolation.  To ensure 'current' remains valid, we just use another
+ * piece of our thread's stack space which should be fairly lightly used.
+ * The selected area starts just above the thread_info structure located
+ * at the very bottom of the stack, aligned to a cache line, and indexed
+ * with the cluster number.
+ */
+#define STACK_SIZE 512
+extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
+static int bL_switchpoint(unsigned long _arg)
+{
+       unsigned int mpidr = read_mpidr();
+       unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+       void *stack = current_thread_info() + 1;
+       stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
+       stack += clusterid * STACK_SIZE + STACK_SIZE;
+       call_with_stack(bL_do_switch, (void *)_arg, stack);
+       BUG();
+}
+
+/*
+ * Generic switcher interface
+ */
+
+static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS];
+static int bL_switcher_cpu_pairing[NR_CPUS];
+
+/*
+ * bL_switch_to - Switch to a specific cluster for the current CPU
+ * @new_cluster_id: the ID of the cluster to switch to.
+ *
+ * This function must be called on the CPU to be switched.
+ * Returns 0 on success, else a negative status code.
+ */
+static int bL_switch_to(unsigned int new_cluster_id)
+{
+       unsigned int mpidr, this_cpu, that_cpu;
+       unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
+       struct completion inbound_alive;
+       struct tick_device *tdev;
+       enum clock_event_mode tdev_mode;
+       long volatile *handshake_ptr;
+       int ipi_nr, ret;
+
+       this_cpu = smp_processor_id();
+       ob_mpidr = read_mpidr();
+       ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0);
+       ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1);
+       BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr);
+
+       if (new_cluster_id == ob_cluster)
+               return 0;
+
+       that_cpu = bL_switcher_cpu_pairing[this_cpu];
+       ib_mpidr = cpu_logical_map(that_cpu);
+       ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
+       ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
+
+       pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n",
+                this_cpu, ob_mpidr, ib_mpidr);
+
+       this_cpu = smp_processor_id();
+
+       /* Close the gate for our entry vectors */
+       mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL);
+       mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL);
+
+       /* Install our "inbound alive" notifier. */
+       init_completion(&inbound_alive);
+       ipi_nr = register_ipi_completion(&inbound_alive, this_cpu);
+       ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]);
+       mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr);
+
+       /*
+        * Let's wake up the inbound CPU now in case it requires some delay
+        * to come online, but leave it gated in our entry vector code.
+        */
+       ret = mcpm_cpu_power_up(ib_cpu, ib_cluster);
+       if (ret) {
+               pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
+               return ret;
+       }
+
+       /*
+        * Raise a SGI on the inbound CPU to make sure it doesn't stall
+        * in a possible WFI, such as in bL_power_down().
+        */
+       gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0);
+
+       /*
+        * Wait for the inbound to come up.  This allows for other
+        * tasks to be scheduled in the mean time.
+        */
+       wait_for_completion(&inbound_alive);
+       mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0);
+
+       /*
+        * From this point we are entering the switch critical zone
+        * and can't take any interrupts anymore.
+        */
+       local_irq_disable();
+       local_fiq_disable();
+       trace_cpu_migrate_begin(get_ns(), ob_mpidr);
+
+       /* redirect GIC's SGIs to our counterpart */
+       gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
+
+       tdev = tick_get_device(this_cpu);
+       if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu)))
+               tdev = NULL;
+       if (tdev) {
+               tdev_mode = tdev->evtdev->mode;
+               clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
+       }
+
+       ret = cpu_pm_enter();
+
+       /* we can not tolerate errors at this point */
+       if (ret)
+               panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
+
+       /* Swap the physical CPUs in the logical map for this logical CPU. */
+       cpu_logical_map(this_cpu) = ib_mpidr;
+       cpu_logical_map(that_cpu) = ob_mpidr;
+
+       /* Let's do the actual CPU switch. */
+       ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint);
+       if (ret > 0)
+               panic("%s: cpu_suspend() returned %d\n", __func__, ret);
+
+       /* We are executing on the inbound CPU at this point */
+       mpidr = read_mpidr();
+       pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr);
+       BUG_ON(mpidr != ib_mpidr);
+
+       mcpm_cpu_powered_up();
+
+       ret = cpu_pm_exit();
+
+       if (tdev) {
+               clockevents_set_mode(tdev->evtdev, tdev_mode);
+               clockevents_program_event(tdev->evtdev,
+                                         tdev->evtdev->next_event, 1);
+       }
+
+       trace_cpu_migrate_finish(get_ns(), ib_mpidr);
+       local_fiq_enable();
+       local_irq_enable();
+
+       *handshake_ptr = 1;
+       dsb_sev();
+
+       if (ret)
+               pr_err("%s exiting with error %d\n", __func__, ret);
+       return ret;
+}
+
+struct bL_thread {
+       spinlock_t lock;
+       struct task_struct *task;
+       wait_queue_head_t wq;
+       int wanted_cluster;
+       struct completion started;
+       bL_switch_completion_handler completer;
+       void *completer_cookie;
+};
+
+static struct bL_thread bL_threads[NR_CPUS];
+
+static int bL_switcher_thread(void *arg)
+{
+       struct bL_thread *t = arg;
+       struct sched_param param = { .sched_priority = 1 };
+       int cluster;
+       bL_switch_completion_handler completer;
+       void *completer_cookie;
+
+       sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+       complete(&t->started);
+
+       do {
+               if (signal_pending(current))
+                       flush_signals(current);
+               wait_event_interruptible(t->wq,
+                               t->wanted_cluster != -1 ||
+                               kthread_should_stop());
+
+               spin_lock(&t->lock);
+               cluster = t->wanted_cluster;
+               completer = t->completer;
+               completer_cookie = t->completer_cookie;
+               t->wanted_cluster = -1;
+               t->completer = NULL;
+               spin_unlock(&t->lock);
+
+               if (cluster != -1) {
+                       bL_switch_to(cluster);
+
+                       if (completer)
+                               completer(completer_cookie);
+               }
+       } while (!kthread_should_stop());
+
+       return 0;
+}
+
+static struct task_struct *bL_switcher_thread_create(int cpu, void *arg)
+{
+       struct task_struct *task;
+
+       task = kthread_create_on_node(bL_switcher_thread, arg,
+                                     cpu_to_node(cpu), "kswitcher_%d", cpu);
+       if (!IS_ERR(task)) {
+               kthread_bind(task, cpu);
+               wake_up_process(task);
+       } else
+               pr_err("%s failed for CPU %d\n", __func__, cpu);
+       return task;
+}
+
+/*
+ * bL_switch_request_cb - Switch to a specific cluster for the given CPU,
+ *      with completion notification via a callback
+ *
+ * @cpu: the CPU to switch
+ * @new_cluster_id: the ID of the cluster to switch to.
+ * @completer: switch completion callback.  if non-NULL,
+ *     @completer(@completer_cookie) will be called on completion of
+ *     the switch, in non-atomic context.
+ * @completer_cookie: opaque context argument for @completer.
+ *
+ * This function causes a cluster switch on the given CPU by waking up
+ * the appropriate switcher thread.  This function may or may not return
+ * before the switch has occurred.
+ *
+ * If a @completer callback function is supplied, it will be called when
+ * the switch is complete.  This can be used to determine asynchronously
+ * when the switch is complete, regardless of when bL_switch_request()
+ * returns.  When @completer is supplied, no new switch request is permitted
+ * for the affected CPU until after the switch is complete, and @completer
+ * has returned.
+ */
+int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
+                        bL_switch_completion_handler completer,
+                        void *completer_cookie)
+{
+       struct bL_thread *t;
+
+       if (cpu >= ARRAY_SIZE(bL_threads)) {
+               pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
+               return -EINVAL;
+       }
+
+       t = &bL_threads[cpu];
+
+       if (IS_ERR(t->task))
+               return PTR_ERR(t->task);
+       if (!t->task)
+               return -ESRCH;
+
+       spin_lock(&t->lock);
+       if (t->completer) {
+               spin_unlock(&t->lock);
+               return -EBUSY;
+       }
+       t->completer = completer;
+       t->completer_cookie = completer_cookie;
+       t->wanted_cluster = new_cluster_id;
+       spin_unlock(&t->lock);
+       wake_up(&t->wq);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(bL_switch_request_cb);
+
+/*
+ * Activation and configuration code.
+ */
+
+static DEFINE_MUTEX(bL_switcher_activation_lock);
+static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier);
+static unsigned int bL_switcher_active;
+static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS];
+static cpumask_t bL_switcher_removed_logical_cpus;
+
+int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+       return blocking_notifier_chain_register(&bL_activation_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_register_notifier);
+
+int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+       return blocking_notifier_chain_unregister(&bL_activation_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier);
+
+static int bL_activation_notify(unsigned long val)
+{
+       int ret;
+
+       ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL);
+       if (ret & NOTIFY_STOP_MASK)
+               pr_err("%s: notifier chain failed with status 0x%x\n",
+                       __func__, ret);
+       return notifier_to_errno(ret);
+}
+
+static void bL_switcher_restore_cpus(void)
+{
+       int i;
+
+       for_each_cpu(i, &bL_switcher_removed_logical_cpus)
+               cpu_up(i);
+}
+
+static int bL_switcher_halve_cpus(void)
+{
+       int i, j, cluster_0, gic_id, ret;
+       unsigned int cpu, cluster, mask;
+       cpumask_t available_cpus;
+
+       /* First pass to validate what we have */
+       mask = 0;
+       for_each_online_cpu(i) {
+               cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
+               cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+               if (cluster >= 2) {
+                       pr_err("%s: only dual cluster systems are supported\n", __func__);
+                       return -EINVAL;
+               }
+               if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER))
+                       return -EINVAL;
+               mask |= (1 << cluster);
+       }
+       if (mask != 3) {
+               pr_err("%s: no CPU pairing possible\n", __func__);
+               return -EINVAL;
+       }
+
+       /*
+        * Now let's do the pairing.  We match each CPU with another CPU
+        * from a different cluster.  To get a uniform scheduling behavior
+        * without fiddling with CPU topology and compute capacity data,
+        * we'll use logical CPUs initially belonging to the same cluster.
+        */
+       memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing));
+       cpumask_copy(&available_cpus, cpu_online_mask);
+       cluster_0 = -1;
+       for_each_cpu(i, &available_cpus) {
+               int match = -1;
+               cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+               if (cluster_0 == -1)
+                       cluster_0 = cluster;
+               if (cluster != cluster_0)
+                       continue;
+               cpumask_clear_cpu(i, &available_cpus);
+               for_each_cpu(j, &available_cpus) {
+                       cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1);
+                       /*
+                        * Let's remember the last match to create "odd"
+                        * pairings on purpose in order for other code not
+                        * to assume any relation between physical and
+                        * logical CPU numbers.
+                        */
+                       if (cluster != cluster_0)
+                               match = j;
+               }
+               if (match != -1) {
+                       bL_switcher_cpu_pairing[i] = match;
+                       cpumask_clear_cpu(match, &available_cpus);
+                       pr_info("CPU%d paired with CPU%d\n", i, match);
+               }
+       }
+
+       /*
+        * Now we disable the unwanted CPUs i.e. everything that has no
+        * pairing information (that includes the pairing counterparts).
+        */
+       cpumask_clear(&bL_switcher_removed_logical_cpus);
+       for_each_online_cpu(i) {
+               cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
+               cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
+
+               /* Let's take note of the GIC ID for this CPU */
+               gic_id = gic_get_cpu_id(i);
+               if (gic_id < 0) {
+                       pr_err("%s: bad GIC ID for CPU %d\n", __func__, i);
+                       bL_switcher_restore_cpus();
+                       return -EINVAL;
+               }
+               bL_gic_id[cpu][cluster] = gic_id;
+               pr_info("GIC ID for CPU %u cluster %u is %u\n",
+                       cpu, cluster, gic_id);
+
+               if (bL_switcher_cpu_pairing[i] != -1) {
+                       bL_switcher_cpu_original_cluster[i] = cluster;
+                       continue;
+               }
+
+               ret = cpu_down(i);
+               if (ret) {
+                       bL_switcher_restore_cpus();
+                       return ret;
+               }
+               cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus);
+       }
+
+       return 0;
+}
+
+/* Determine the logical CPU a given physical CPU is grouped on. */
+int bL_switcher_get_logical_index(u32 mpidr)
+{
+       int cpu;
+
+       if (!bL_switcher_active)
+               return -EUNATCH;
+
+       mpidr &= MPIDR_HWID_BITMASK;
+       for_each_online_cpu(cpu) {
+               int pairing = bL_switcher_cpu_pairing[cpu];
+               if (pairing == -1)
+                       continue;
+               if ((mpidr == cpu_logical_map(cpu)) ||
+                   (mpidr == cpu_logical_map(pairing)))
+                       return cpu;
+       }
+       return -EINVAL;
+}
+
+static void bL_switcher_trace_trigger_cpu(void *__always_unused info)
+{
+       trace_cpu_migrate_current(get_ns(), read_mpidr());
+}
+
+int bL_switcher_trace_trigger(void)
+{
+       int ret;
+
+       preempt_disable();
+
+       bL_switcher_trace_trigger_cpu(NULL);
+       ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
+
+       preempt_enable();
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger);
+
+static int bL_switcher_enable(void)
+{
+       int cpu, ret;
+
+       mutex_lock(&bL_switcher_activation_lock);
+       cpu_hotplug_driver_lock();
+       if (bL_switcher_active) {
+               cpu_hotplug_driver_unlock();
+               mutex_unlock(&bL_switcher_activation_lock);
+               return 0;
+       }
+
+       pr_info("big.LITTLE switcher initializing\n");
+
+       ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE);
+       if (ret)
+               goto error;
+
+       ret = bL_switcher_halve_cpus();
+       if (ret)
+               goto error;
+
+       bL_switcher_trace_trigger();
+
+       for_each_online_cpu(cpu) {
+               struct bL_thread *t = &bL_threads[cpu];
+               spin_lock_init(&t->lock);
+               init_waitqueue_head(&t->wq);
+               init_completion(&t->started);
+               t->wanted_cluster = -1;
+               t->task = bL_switcher_thread_create(cpu, t);
+       }
+
+       bL_switcher_active = 1;
+       bL_activation_notify(BL_NOTIFY_POST_ENABLE);
+       pr_info("big.LITTLE switcher initialized\n");
+       goto out;
+
+error:
+       pr_warn("big.LITTLE switcher initialization failed\n");
+       bL_activation_notify(BL_NOTIFY_POST_DISABLE);
+
+out:
+       cpu_hotplug_driver_unlock();
+       mutex_unlock(&bL_switcher_activation_lock);
+       return ret;
+}
+
+#ifdef CONFIG_SYSFS
+
+static void bL_switcher_disable(void)
+{
+       unsigned int cpu, cluster;
+       struct bL_thread *t;
+       struct task_struct *task;
+
+       mutex_lock(&bL_switcher_activation_lock);
+       cpu_hotplug_driver_lock();
+
+       if (!bL_switcher_active)
+               goto out;
+
+       if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) {
+               bL_activation_notify(BL_NOTIFY_POST_ENABLE);
+               goto out;
+       }
+
+       bL_switcher_active = 0;
+
+       /*
+        * To deactivate the switcher, we must shut down the switcher
+        * threads to prevent any other requests from being accepted.
+        * Then, if the final cluster for given logical CPU is not the
+        * same as the original one, we'll recreate a switcher thread
+        * just for the purpose of switching the CPU back without any
+        * possibility for interference from external requests.
+        */
+       for_each_online_cpu(cpu) {
+               t = &bL_threads[cpu];
+               task = t->task;
+               t->task = NULL;
+               if (!task || IS_ERR(task))
+                       continue;
+               kthread_stop(task);
+               /* no more switch may happen on this CPU at this point */
+               cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+               if (cluster == bL_switcher_cpu_original_cluster[cpu])
+                       continue;
+               init_completion(&t->started);
+               t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu];
+               task = bL_switcher_thread_create(cpu, t);
+               if (!IS_ERR(task)) {
+                       wait_for_completion(&t->started);
+                       kthread_stop(task);
+                       cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
+                       if (cluster == bL_switcher_cpu_original_cluster[cpu])
+                               continue;
+               }
+               /* If execution gets here, we're in trouble. */
+               pr_crit("%s: unable to restore original cluster for CPU %d\n",
+                       __func__, cpu);
+               pr_crit("%s: CPU %d can't be restored\n",
+                       __func__, bL_switcher_cpu_pairing[cpu]);
+               cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu],
+                                 &bL_switcher_removed_logical_cpus);
+       }
+
+       bL_switcher_restore_cpus();
+       bL_switcher_trace_trigger();
+
+       bL_activation_notify(BL_NOTIFY_POST_DISABLE);
+
+out:
+       cpu_hotplug_driver_unlock();
+       mutex_unlock(&bL_switcher_activation_lock);
+}
+
+static ssize_t bL_switcher_active_show(struct kobject *kobj,
+               struct kobj_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%u\n", bL_switcher_active);
+}
+
+static ssize_t bL_switcher_active_store(struct kobject *kobj,
+               struct kobj_attribute *attr, const char *buf, size_t count)
+{
+       int ret;
+
+       switch (buf[0]) {
+       case '0':
+               bL_switcher_disable();
+               ret = 0;
+               break;
+       case '1':
+               ret = bL_switcher_enable();
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       return (ret >= 0) ? count : ret;
+}
+
+static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj,
+               struct kobj_attribute *attr, const char *buf, size_t count)
+{
+       int ret = bL_switcher_trace_trigger();
+
+       return ret ? ret : count;
+}
+
+static struct kobj_attribute bL_switcher_active_attr =
+       __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store);
+
+static struct kobj_attribute bL_switcher_trace_trigger_attr =
+       __ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store);
+
+static struct attribute *bL_switcher_attrs[] = {
+       &bL_switcher_active_attr.attr,
+       &bL_switcher_trace_trigger_attr.attr,
+       NULL,
+};
+
+static struct attribute_group bL_switcher_attr_group = {
+       .attrs = bL_switcher_attrs,
+};
+
+static struct kobject *bL_switcher_kobj;
+
+static int __init bL_switcher_sysfs_init(void)
+{
+       int ret;
+
+       bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj);
+       if (!bL_switcher_kobj)
+               return -ENOMEM;
+       ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group);
+       if (ret)
+               kobject_put(bL_switcher_kobj);
+       return ret;
+}
+
+#endif  /* CONFIG_SYSFS */
+
+bool bL_switcher_get_enabled(void)
+{
+       mutex_lock(&bL_switcher_activation_lock);
+
+       return bL_switcher_active;
+}
+EXPORT_SYMBOL_GPL(bL_switcher_get_enabled);
+
+void bL_switcher_put_enabled(void)
+{
+       mutex_unlock(&bL_switcher_activation_lock);
+}
+EXPORT_SYMBOL_GPL(bL_switcher_put_enabled);
+
+/*
+ * Veto any CPU hotplug operation on those CPUs we've removed
+ * while the switcher is active.
+ * We're just not ready to deal with that given the trickery involved.
+ */
+static int bL_switcher_hotplug_callback(struct notifier_block *nfb,
+                                       unsigned long action, void *hcpu)
+{
+       if (bL_switcher_active) {
+               int pairing = bL_switcher_cpu_pairing[(unsigned long)hcpu];
+               switch (action & 0xf) {
+               case CPU_UP_PREPARE:
+               case CPU_DOWN_PREPARE:
+                       if (pairing == -1)
+                               return NOTIFY_BAD;
+               }
+       }
+       return NOTIFY_DONE;
+}
+
+static bool no_bL_switcher;
+core_param(no_bL_switcher, no_bL_switcher, bool, 0644);
+
+static int __init bL_switcher_init(void)
+{
+       int ret;
+
+       if (MAX_NR_CLUSTERS != 2) {
+               pr_err("%s: only dual cluster systems are supported\n", __func__);
+               return -EINVAL;
+       }
+
+       cpu_notifier(bL_switcher_hotplug_callback, 0);
+
+       if (!no_bL_switcher) {
+               ret = bL_switcher_enable();
+               if (ret)
+                       return ret;
+       }
+
+#ifdef CONFIG_SYSFS
+       ret = bL_switcher_sysfs_init();
+       if (ret)
+               pr_err("%s: unable to create sysfs entry\n", __func__);
+#endif
+
+       return 0;
+}
+
+late_initcall(bL_switcher_init);
diff --git a/arch/arm/common/bL_switcher_dummy_if.c b/arch/arm/common/bL_switcher_dummy_if.c
new file mode 100644 (file)
index 0000000..3f47f12
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * arch/arm/common/bL_switcher_dummy_if.c -- b.L switcher dummy interface
+ *
+ * Created by: Nicolas Pitre, November 2012
+ * Copyright:  (C) 2012-2013  Linaro Limited
+ *
+ * Dummy interface to user space for debugging purpose only.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <asm/uaccess.h>
+#include <asm/bL_switcher.h>
+
+static ssize_t bL_switcher_write(struct file *file, const char __user *buf,
+                       size_t len, loff_t *pos)
+{
+       unsigned char val[3];
+       unsigned int cpu, cluster;
+       int ret;
+
+       pr_debug("%s\n", __func__);
+
+       if (len < 3)
+               return -EINVAL;
+
+       if (copy_from_user(val, buf, 3))
+               return -EFAULT;
+
+       /* format: <cpu#>,<cluster#> */
+       if (val[0] < '0' || val[0] > '9' ||
+           val[1] != ',' ||
+           val[2] < '0' || val[2] > '1')
+               return -EINVAL;
+
+       cpu = val[0] - '0';
+       cluster = val[2] - '0';
+       ret = bL_switch_request(cpu, cluster);
+
+       return ret ? : len;
+}
+
+static const struct file_operations bL_switcher_fops = {
+       .write          = bL_switcher_write,
+       .owner  = THIS_MODULE,
+};
+
+static struct miscdevice bL_switcher_device = {
+       MISC_DYNAMIC_MINOR,
+       "b.L_switcher",
+       &bL_switcher_fops
+};
+
+static int __init bL_switcher_dummy_if_init(void)
+{
+       return misc_register(&bL_switcher_device);
+}
+
+static void __exit bL_switcher_dummy_if_exit(void)
+{
+       misc_deregister(&bL_switcher_device);
+}
+
+module_init(bL_switcher_dummy_if_init);
+module_exit(bL_switcher_dummy_if_exit);
index 370236dd1a03309ee3b123e705aa80132c908427..24a9804b8f5e062613532c8a678b1b826b9e7620 100644 (file)
@@ -27,6 +27,18 @@ void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
        sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
 }
 
+extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2];
+
+void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
+                        unsigned long poke_phys_addr, unsigned long poke_val)
+{
+       unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
+       poke[0] = poke_phys_addr;
+       poke[1] = poke_val;
+       __cpuc_flush_dcache_area((void *)poke, 8);
+       outer_clean_range(__pa(poke), __pa(poke + 2));
+}
+
 static const struct mcpm_platform_ops *platform_ops;
 
 int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
@@ -51,7 +63,8 @@ void mcpm_cpu_power_down(void)
 {
        phys_reset_t phys_reset;
 
-       BUG_ON(!platform_ops);
+       if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down))
+               return;
        BUG_ON(!irqs_disabled());
 
        /*
@@ -93,7 +106,8 @@ void mcpm_cpu_suspend(u64 expected_residency)
 {
        phys_reset_t phys_reset;
 
-       BUG_ON(!platform_ops);
+       if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend))
+               return;
        BUG_ON(!irqs_disabled());
 
        /* Very similar to mcpm_cpu_power_down() */
index 39c96df3477a41549d71e1a18733dd85f4a168df..49dd5352fe70336ff023d3c40097a03d4370902f 100644 (file)
@@ -71,12 +71,19 @@ ENTRY(mcpm_entry_point)
         * position independent way.
         */
        adr     r5, 3f
-       ldmia   r5, {r6, r7, r8, r11}
+       ldmia   r5, {r0, r6, r7, r8, r11}
+       add     r0, r5, r0                      @ r0 = mcpm_entry_early_pokes
        add     r6, r5, r6                      @ r6 = mcpm_entry_vectors
        ldr     r7, [r5, r7]                    @ r7 = mcpm_power_up_setup_phys
        add     r8, r5, r8                      @ r8 = mcpm_sync
        add     r11, r5, r11                    @ r11 = first_man_locks
 
+       @ Perform an early poke, if any
+       add     r0, r0, r4, lsl #3
+       ldmia   r0, {r0, r1}
+       teq     r0, #0
+       strne   r1, [r0]
+
        mov     r0, #MCPM_SYNC_CLUSTER_SIZE
        mla     r8, r0, r10, r8                 @ r8 = sync cluster base
 
@@ -195,7 +202,8 @@ mcpm_entry_gated:
 
        .align  2
 
-3:     .word   mcpm_entry_vectors - .
+3:     .word   mcpm_entry_early_pokes - .
+       .word   mcpm_entry_vectors - 3b
        .word   mcpm_power_up_setup_phys - 3b
        .word   mcpm_sync - 3b
        .word   first_man_locks - 3b
@@ -214,6 +222,10 @@ first_man_locks:
 ENTRY(mcpm_entry_vectors)
        .space  4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
 
+       .type   mcpm_entry_early_pokes, #object
+ENTRY(mcpm_entry_early_pokes)
+       .space  8 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER
+
        .type   mcpm_power_up_setup_phys, #object
 ENTRY(mcpm_power_up_setup_phys)
        .space  4               @ set by mcpm_sync_init()
index d56c932580eb201439c8a63aa401abddd157cbbf..025f6ce38596736eea2c929a211b23fd4eaf0d56 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/string.h>
 #include <asm/mach/sharpsl_param.h>
+#include <asm/memory.h>
 
 /*
  * Certain hardware parameters determined at the time of device manufacture,
  */
 #ifdef CONFIG_ARCH_SA1100
 #define PARAM_BASE     0xe8ffc000
+#define param_start(x) (void *)(x)
 #else
 #define PARAM_BASE     0xa0000a00
+#define param_start(x) __va(x)
 #endif
 #define MAGIC_CHG(a,b,c,d) ( ( d << 24 ) | ( c << 16 )  | ( b << 8 ) | a )
 
@@ -41,7 +44,7 @@ EXPORT_SYMBOL(sharpsl_param);
 
 void sharpsl_save_param(void)
 {
-       memcpy(&sharpsl_param, (void *)PARAM_BASE, sizeof(struct sharpsl_param_info));
+       memcpy(&sharpsl_param, param_start(PARAM_BASE), sizeof(struct sharpsl_param_info));
 
        if (sharpsl_param.comadj_keyword != COMADJ_MAGIC)
                sharpsl_param.comadj=-1;
index e901d0f3e0bbcd735f5cf7e62bd653536ff8aa71..ce922d0ea7aa85daa59c408ac5cd79beab5459a6 100644 (file)
@@ -175,7 +175,7 @@ static struct clock_event_device sp804_clockevent = {
 
 static struct irqaction sp804_timer_irq = {
        .name           = "timer",
-       .flags          = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+       .flags          = IRQF_TIMER | IRQF_IRQPOLL,
        .handler        = sp804_timer_interrupt,
        .dev_id         = &sp804_clockevent,
 };
index 317960f1248893d6200650ec9c01e8132b627c16..0142ec37e0be26d8c1480aa4929a722896b3b397 100644 (file)
@@ -1,5 +1,6 @@
-CONFIG_EXPERIMENTAL=y
 CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_MODULES=y
@@ -11,11 +12,11 @@ CONFIG_ARCH_SA1100=y
 CONFIG_SA1100_H3600=y
 CONFIG_PCCARD=y
 CONFIG_PCMCIA_SA1100=y
+CONFIG_PREEMPT=y
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
 # CONFIG_CPU_FREQ_STAT is not set
 CONFIG_FPE_NWFPE=y
-CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
@@ -24,13 +25,10 @@ CONFIG_IRDA=m
 CONFIG_IRLAN=m
 CONFIG_IRNET=m
 CONFIG_IRCOMM=m
-CONFIG_SA1100_FIR=m
 # CONFIG_WIRELESS is not set
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
 CONFIG_MTD_REDBOOT_PARTS=y
-CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_ADV_OPTIONS=y
@@ -41,19 +39,15 @@ CONFIG_MTD_SA1100=y
 CONFIG_BLK_DEV_LOOP=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
-# CONFIG_MISC_DEVICES is not set
 CONFIG_IDE=y
 CONFIG_BLK_DEV_IDECS=y
 CONFIG_NETDEVICES=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_WLAN is not set
-CONFIG_NET_PCMCIA=y
 CONFIG_PCMCIA_PCNET=y
 CONFIG_PPP=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
 CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_ASYNC=m
+# CONFIG_WLAN is not set
 # CONFIG_KEYBOARD_ATKBD is not set
 CONFIG_KEYBOARD_GPIO=y
 # CONFIG_INPUT_MOUSE is not set
@@ -64,8 +58,6 @@ CONFIG_SERIAL_SA1100_CONSOLE=y
 # CONFIG_HWMON is not set
 CONFIG_FB=y
 CONFIG_FB_SA1100=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_EXT2_FS=y
 CONFIG_MSDOS_FS=m
@@ -74,6 +66,4 @@ CONFIG_JFFS2_FS=y
 CONFIG_CRAMFS=m
 CONFIG_NFS_FS=y
 CONFIG_NFSD=m
-CONFIG_SMB_FS=m
 CONFIG_NLS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/arm/crypto/.gitignore b/arch/arm/crypto/.gitignore
new file mode 100644 (file)
index 0000000..6231d36
--- /dev/null
@@ -0,0 +1 @@
+aesbs-core.S
index a2c83851bc90a29f5f1d06415cb4a0db4dd726e1..81cda39860c5c7ad90a6710727011ec79296e5d8 100644 (file)
@@ -3,7 +3,17 @@
 #
 
 obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
+obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
 obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
 
-aes-arm-y  := aes-armv4.o aes_glue.o
-sha1-arm-y := sha1-armv4-large.o sha1_glue.o
+aes-arm-y      := aes-armv4.o aes_glue.o
+aes-arm-bs-y   := aesbs-core.o aesbs-glue.o
+sha1-arm-y     := sha1-armv4-large.o sha1_glue.o
+
+quiet_cmd_perl = PERL    $@
+      cmd_perl = $(PERL) $(<) > $(@)
+
+$(src)/aesbs-core.S_shipped: $(src)/bsaes-armv7.pl
+       $(call cmd,perl)
+
+.PRECIOUS: $(obj)/aesbs-core.S
index 59f7877ead6ac9ee3f8a31b43c6e0458de26cd8f..3003fa1f6fb4b9395c77340fbf83011b5cb0e419 100644 (file)
@@ -6,22 +6,12 @@
 #include <linux/crypto.h>
 #include <crypto/aes.h>
 
-#define AES_MAXNR 14
+#include "aes_glue.h"
 
-typedef struct {
-       unsigned int rd_key[4 *(AES_MAXNR + 1)];
-       int rounds;
-} AES_KEY;
-
-struct AES_CTX {
-       AES_KEY enc_key;
-       AES_KEY dec_key;
-};
-
-asmlinkage void AES_encrypt(const u8 *in, u8 *out, AES_KEY *ctx);
-asmlinkage void AES_decrypt(const u8 *in, u8 *out, AES_KEY *ctx);
-asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
-asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
+EXPORT_SYMBOL(AES_encrypt);
+EXPORT_SYMBOL(AES_decrypt);
+EXPORT_SYMBOL(private_AES_set_encrypt_key);
+EXPORT_SYMBOL(private_AES_set_decrypt_key);
 
 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
@@ -81,7 +71,7 @@ static struct crypto_alg aes_alg = {
                .cipher = {
                        .cia_min_keysize        = AES_MIN_KEY_SIZE,
                        .cia_max_keysize        = AES_MAX_KEY_SIZE,
-                       .cia_setkey                     = aes_set_key,
+                       .cia_setkey             = aes_set_key,
                        .cia_encrypt            = aes_encrypt,
                        .cia_decrypt            = aes_decrypt
                }
diff --git a/arch/arm/crypto/aes_glue.h b/arch/arm/crypto/aes_glue.h
new file mode 100644 (file)
index 0000000..cca3e51
--- /dev/null
@@ -0,0 +1,19 @@
+
+#define AES_MAXNR 14
+
+struct AES_KEY {
+       unsigned int rd_key[4 * (AES_MAXNR + 1)];
+       int rounds;
+};
+
+struct AES_CTX {
+       struct AES_KEY enc_key;
+       struct AES_KEY dec_key;
+};
+
+asmlinkage void AES_encrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
+asmlinkage void AES_decrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
+asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey,
+                                          const int bits, struct AES_KEY *key);
+asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey,
+                                          const int bits, struct AES_KEY *key);
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped
new file mode 100644 (file)
index 0000000..64205d4
--- /dev/null
@@ -0,0 +1,2544 @@
+
+@ ====================================================================
+@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+@ project. The module is, however, dual licensed under OpenSSL and
+@ CRYPTOGAMS licenses depending on where you obtain it. For further
+@ details see http://www.openssl.org/~appro/cryptogams/.
+@
+@ Specific modes and adaptation for Linux kernel by Ard Biesheuvel
+@ <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is
+@ granted.
+@ ====================================================================
+
+@ Bit-sliced AES for ARM NEON
+@
+@ February 2012.
+@
+@ This implementation is direct adaptation of bsaes-x86_64 module for
+@ ARM NEON. Except that this module is endian-neutral [in sense that
+@ it can be compiled for either endianness] by courtesy of vld1.8's
+@ neutrality. Initial version doesn't implement interface to OpenSSL,
+@ only low-level primitives and unsupported entry points, just enough
+@ to collect performance results, which for Cortex-A8 core are:
+@
+@ encrypt      19.5 cycles per byte processed with 128-bit key
+@ decrypt      22.1 cycles per byte processed with 128-bit key
+@ key conv.    440  cycles per 128-bit key/0.18 of 8x block
+@
+@ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
+@ which is [much] worse than anticipated (for further details see
+@ http://www.openssl.org/~appro/Snapdragon-S4.html).
+@
+@ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
+@ manages in 20.0 cycles].
+@
+@ When comparing to x86_64 results keep in mind that NEON unit is
+@ [mostly] single-issue and thus can't [fully] benefit from
+@ instruction-level parallelism. And when comparing to aes-armv4
+@ results keep in mind key schedule conversion overhead (see
+@ bsaes-x86_64.pl for further details)...
+@
+@                                              <appro@openssl.org>
+
+@ April-August 2013
+@
+@ Add CBC, CTR and XTS subroutines, adapt for kernel use.
+@
+@                                      <ard.biesheuvel@linaro.org>
+
+#ifndef __KERNEL__
+# include "arm_arch.h"
+
+# define VFP_ABI_PUSH  vstmdb  sp!,{d8-d15}
+# define VFP_ABI_POP   vldmia  sp!,{d8-d15}
+# define VFP_ABI_FRAME 0x40
+#else
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+# define VFP_ABI_FRAME 0
+# define BSAES_ASM_EXTENDED_KEY
+# define XTS_CHAIN_TWEAK
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+#endif
+
+#ifdef __thumb__
+# define adrl adr
+#endif
+
+#if __ARM_ARCH__>=7
+.text
+.syntax        unified         @ ARMv7-capable assembler is expected to handle this
+#ifdef __thumb2__
+.thumb
+#else
+.code   32
+#endif
+
+.fpu   neon
+
+.type  _bsaes_decrypt8,%function
+.align 4
+_bsaes_decrypt8:
+       adr     r6,_bsaes_decrypt8
+       vldmia  r4!, {q9}               @ round 0 key
+       add     r6,r6,#.LM0ISR-_bsaes_decrypt8
+
+       vldmia  r6!, {q8}               @ .LM0ISR
+       veor    q10, q0, q9     @ xor with round0 key
+       veor    q11, q1, q9
+        vtbl.8 d0, {q10}, d16
+        vtbl.8 d1, {q10}, d17
+       veor    q12, q2, q9
+        vtbl.8 d2, {q11}, d16
+        vtbl.8 d3, {q11}, d17
+       veor    q13, q3, q9
+        vtbl.8 d4, {q12}, d16
+        vtbl.8 d5, {q12}, d17
+       veor    q14, q4, q9
+        vtbl.8 d6, {q13}, d16
+        vtbl.8 d7, {q13}, d17
+       veor    q15, q5, q9
+        vtbl.8 d8, {q14}, d16
+        vtbl.8 d9, {q14}, d17
+       veor    q10, q6, q9
+        vtbl.8 d10, {q15}, d16
+        vtbl.8 d11, {q15}, d17
+       veor    q11, q7, q9
+        vtbl.8 d12, {q10}, d16
+        vtbl.8 d13, {q10}, d17
+        vtbl.8 d14, {q11}, d16
+        vtbl.8 d15, {q11}, d17
+       vmov.i8 q8,#0x55                        @ compose .LBS0
+       vmov.i8 q9,#0x33                        @ compose .LBS1
+       vshr.u64        q10, q6, #1
+        vshr.u64       q11, q4, #1
+       veor            q10, q10, q7
+        veor           q11, q11, q5
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #1
+        veor           q5, q5, q11
+        vshl.u64       q11, q11, #1
+       veor            q6, q6, q10
+        veor           q4, q4, q11
+       vshr.u64        q10, q2, #1
+        vshr.u64       q11, q0, #1
+       veor            q10, q10, q3
+        veor           q11, q11, q1
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q3, q3, q10
+       vshl.u64        q10, q10, #1
+        veor           q1, q1, q11
+        vshl.u64       q11, q11, #1
+       veor            q2, q2, q10
+        veor           q0, q0, q11
+       vmov.i8 q8,#0x0f                        @ compose .LBS2
+       vshr.u64        q10, q5, #2
+        vshr.u64       q11, q4, #2
+       veor            q10, q10, q7
+        veor           q11, q11, q6
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #2
+        veor           q6, q6, q11
+        vshl.u64       q11, q11, #2
+       veor            q5, q5, q10
+        veor           q4, q4, q11
+       vshr.u64        q10, q1, #2
+        vshr.u64       q11, q0, #2
+       veor            q10, q10, q3
+        veor           q11, q11, q2
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q3, q3, q10
+       vshl.u64        q10, q10, #2
+        veor           q2, q2, q11
+        vshl.u64       q11, q11, #2
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       vshr.u64        q10, q3, #4
+        vshr.u64       q11, q2, #4
+       veor            q10, q10, q7
+        veor           q11, q11, q6
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #4
+        veor           q6, q6, q11
+        vshl.u64       q11, q11, #4
+       veor            q3, q3, q10
+        veor           q2, q2, q11
+       vshr.u64        q10, q1, #4
+        vshr.u64       q11, q0, #4
+       veor            q10, q10, q5
+        veor           q11, q11, q4
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #4
+        veor           q4, q4, q11
+        vshl.u64       q11, q11, #4
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       sub     r5,r5,#1
+       b       .Ldec_sbox
+.align 4
+.Ldec_loop:
+       vldmia  r4!, {q8-q11}
+       veor    q8, q8, q0
+       veor    q9, q9, q1
+       vtbl.8  d0, {q8}, d24
+       vtbl.8  d1, {q8}, d25
+       vldmia  r4!, {q8}
+       veor    q10, q10, q2
+       vtbl.8  d2, {q9}, d24
+       vtbl.8  d3, {q9}, d25
+       vldmia  r4!, {q9}
+       veor    q11, q11, q3
+       vtbl.8  d4, {q10}, d24
+       vtbl.8  d5, {q10}, d25
+       vldmia  r4!, {q10}
+       vtbl.8  d6, {q11}, d24
+       vtbl.8  d7, {q11}, d25
+       vldmia  r4!, {q11}
+       veor    q8, q8, q4
+       veor    q9, q9, q5
+       vtbl.8  d8, {q8}, d24
+       vtbl.8  d9, {q8}, d25
+       veor    q10, q10, q6
+       vtbl.8  d10, {q9}, d24
+       vtbl.8  d11, {q9}, d25
+       veor    q11, q11, q7
+       vtbl.8  d12, {q10}, d24
+       vtbl.8  d13, {q10}, d25
+       vtbl.8  d14, {q11}, d24
+       vtbl.8  d15, {q11}, d25
+.Ldec_sbox:
+        veor   q1, q1, q4
+       veor    q3, q3, q4
+
+       veor    q4, q4, q7
+        veor   q1, q1, q6
+       veor    q2, q2, q7
+       veor    q6, q6, q4
+
+       veor    q0, q0, q1
+       veor    q2, q2, q5
+        veor   q7, q7, q6
+       veor    q3, q3, q0
+       veor    q5, q5, q0
+       veor    q1, q1, q3
+       veor    q11, q3, q0
+       veor    q10, q7, q4
+       veor    q9, q1, q6
+       veor    q13, q4, q0
+        vmov   q8, q10
+       veor    q12, q5, q2
+
+       vorr    q10, q10, q9
+       veor    q15, q11, q8
+       vand    q14, q11, q12
+       vorr    q11, q11, q12
+       veor    q12, q12, q9
+       vand    q8, q8, q9
+       veor    q9, q6, q2
+       vand    q15, q15, q12
+       vand    q13, q13, q9
+       veor    q9, q3, q7
+       veor    q12, q1, q5
+       veor    q11, q11, q13
+       veor    q10, q10, q13
+       vand    q13, q9, q12
+       vorr    q9, q9, q12
+       veor    q11, q11, q15
+       veor    q8, q8, q13
+       veor    q10, q10, q14
+       veor    q9, q9, q15
+       veor    q8, q8, q14
+       vand    q12, q4, q6
+       veor    q9, q9, q14
+       vand    q13, q0, q2
+       vand    q14, q7, q1
+       vorr    q15, q3, q5
+       veor    q11, q11, q12
+       veor    q9, q9, q14
+       veor    q8, q8, q15
+       veor    q10, q10, q13
+
+       @ Inv_GF16      0,      1,      2,      3, s0, s1, s2, s3
+
+       @ new smaller inversion
+
+       vand    q14, q11, q9
+       vmov    q12, q8
+
+       veor    q13, q10, q14
+       veor    q15, q8, q14
+       veor    q14, q8, q14    @ q14=q15
+
+       vbsl    q13, q9, q8
+       vbsl    q15, q11, q10
+       veor    q11, q11, q10
+
+       vbsl    q12, q13, q14
+       vbsl    q8, q14, q13
+
+       vand    q14, q12, q15
+       veor    q9, q9, q8
+
+       veor    q14, q14, q11
+       veor    q12, q5, q2
+       veor    q8, q1, q6
+       veor    q10, q15, q14
+       vand    q10, q10, q5
+       veor    q5, q5, q1
+       vand    q11, q1, q15
+       vand    q5, q5, q14
+       veor    q1, q11, q10
+       veor    q5, q5, q11
+       veor    q15, q15, q13
+       veor    q14, q14, q9
+       veor    q11, q15, q14
+        veor   q10, q13, q9
+       vand    q11, q11, q12
+        vand   q10, q10, q2
+       veor    q12, q12, q8
+        veor   q2, q2, q6
+       vand    q8, q8, q15
+        vand   q6, q6, q13
+       vand    q12, q12, q14
+        vand   q2, q2, q9
+       veor    q8, q8, q12
+        veor   q2, q2, q6
+       veor    q12, q12, q11
+        veor   q6, q6, q10
+       veor    q5, q5, q12
+       veor    q2, q2, q12
+       veor    q1, q1, q8
+       veor    q6, q6, q8
+
+       veor    q12, q3, q0
+       veor    q8, q7, q4
+       veor    q11, q15, q14
+        veor   q10, q13, q9
+       vand    q11, q11, q12
+        vand   q10, q10, q0
+       veor    q12, q12, q8
+        veor   q0, q0, q4
+       vand    q8, q8, q15
+        vand   q4, q4, q13
+       vand    q12, q12, q14
+        vand   q0, q0, q9
+       veor    q8, q8, q12
+        veor   q0, q0, q4
+       veor    q12, q12, q11
+        veor   q4, q4, q10
+       veor    q15, q15, q13
+       veor    q14, q14, q9
+       veor    q10, q15, q14
+       vand    q10, q10, q3
+       veor    q3, q3, q7
+       vand    q11, q7, q15
+       vand    q3, q3, q14
+       veor    q7, q11, q10
+       veor    q3, q3, q11
+       veor    q3, q3, q12
+       veor    q0, q0, q12
+       veor    q7, q7, q8
+       veor    q4, q4, q8
+       veor    q1, q1, q7
+       veor    q6, q6, q5
+
+       veor    q4, q4, q1
+       veor    q2, q2, q7
+       veor    q5, q5, q7
+       veor    q4, q4, q2
+        veor   q7, q7, q0
+       veor    q4, q4, q5
+        veor   q3, q3, q6
+        veor   q6, q6, q1
+       veor    q3, q3, q4
+
+       veor    q4, q4, q0
+       veor    q7, q7, q3
+       subs    r5,r5,#1
+       bcc     .Ldec_done
+       @ multiplication by 0x05-0x00-0x04-0x00
+       vext.8  q8, q0, q0, #8
+       vext.8  q14, q3, q3, #8
+       vext.8  q15, q5, q5, #8
+       veor    q8, q8, q0
+       vext.8  q9, q1, q1, #8
+       veor    q14, q14, q3
+       vext.8  q10, q6, q6, #8
+       veor    q15, q15, q5
+       vext.8  q11, q4, q4, #8
+       veor    q9, q9, q1
+       vext.8  q12, q2, q2, #8
+       veor    q10, q10, q6
+       vext.8  q13, q7, q7, #8
+       veor    q11, q11, q4
+       veor    q12, q12, q2
+       veor    q13, q13, q7
+
+        veor   q0, q0, q14
+        veor   q1, q1, q14
+        veor   q6, q6, q8
+        veor   q2, q2, q10
+        veor   q4, q4, q9
+        veor   q1, q1, q15
+        veor   q6, q6, q15
+        veor   q2, q2, q14
+        veor   q7, q7, q11
+        veor   q4, q4, q14
+        veor   q3, q3, q12
+        veor   q2, q2, q15
+        veor   q7, q7, q15
+        veor   q5, q5, q13
+       vext.8  q8, q0, q0, #12 @ x0 <<< 32
+       vext.8  q9, q1, q1, #12
+        veor   q0, q0, q8              @ x0 ^ (x0 <<< 32)
+       vext.8  q10, q6, q6, #12
+        veor   q1, q1, q9
+       vext.8  q11, q4, q4, #12
+        veor   q6, q6, q10
+       vext.8  q12, q2, q2, #12
+        veor   q4, q4, q11
+       vext.8  q13, q7, q7, #12
+        veor   q2, q2, q12
+       vext.8  q14, q3, q3, #12
+        veor   q7, q7, q13
+       vext.8  q15, q5, q5, #12
+        veor   q3, q3, q14
+
+       veor    q9, q9, q0
+        veor   q5, q5, q15
+        vext.8 q0, q0, q0, #8          @ (x0 ^ (x0 <<< 32)) <<< 64)
+       veor    q10, q10, q1
+       veor    q8, q8, q5
+       veor    q9, q9, q5
+        vext.8 q1, q1, q1, #8
+       veor    q13, q13, q2
+        veor   q0, q0, q8
+       veor    q14, q14, q7
+        veor   q1, q1, q9
+        vext.8 q8, q2, q2, #8
+       veor    q12, q12, q4
+        vext.8 q9, q7, q7, #8
+       veor    q15, q15, q3
+        vext.8 q2, q4, q4, #8
+       veor    q11, q11, q6
+        vext.8 q7, q5, q5, #8
+       veor    q12, q12, q5
+        vext.8 q4, q3, q3, #8
+       veor    q11, q11, q5
+        vext.8 q3, q6, q6, #8
+       veor    q5, q9, q13
+       veor    q11, q11, q2
+       veor    q7, q7, q15
+       veor    q6, q4, q14
+       veor    q4, q8, q12
+       veor    q2, q3, q10
+       vmov    q3, q11
+        @ vmov q5, q9
+       vldmia  r6, {q12}               @ .LISR
+       ite     eq                              @ Thumb2 thing, sanity check in ARM
+       addeq   r6,r6,#0x10
+       bne     .Ldec_loop
+       vldmia  r6, {q12}               @ .LISRM0
+       b       .Ldec_loop
+.align 4
+.Ldec_done:
+       vmov.i8 q8,#0x55                        @ compose .LBS0
+       vmov.i8 q9,#0x33                        @ compose .LBS1
+       vshr.u64        q10, q3, #1
+        vshr.u64       q11, q2, #1
+       veor            q10, q10, q5
+        veor           q11, q11, q7
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #1
+        veor           q7, q7, q11
+        vshl.u64       q11, q11, #1
+       veor            q3, q3, q10
+        veor           q2, q2, q11
+       vshr.u64        q10, q6, #1
+        vshr.u64       q11, q0, #1
+       veor            q10, q10, q4
+        veor           q11, q11, q1
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q4, q4, q10
+       vshl.u64        q10, q10, #1
+        veor           q1, q1, q11
+        vshl.u64       q11, q11, #1
+       veor            q6, q6, q10
+        veor           q0, q0, q11
+       vmov.i8 q8,#0x0f                        @ compose .LBS2
+       vshr.u64        q10, q7, #2
+        vshr.u64       q11, q2, #2
+       veor            q10, q10, q5
+        veor           q11, q11, q3
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #2
+        veor           q3, q3, q11
+        vshl.u64       q11, q11, #2
+       veor            q7, q7, q10
+        veor           q2, q2, q11
+       vshr.u64        q10, q1, #2
+        vshr.u64       q11, q0, #2
+       veor            q10, q10, q4
+        veor           q11, q11, q6
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q4, q4, q10
+       vshl.u64        q10, q10, #2
+        veor           q6, q6, q11
+        vshl.u64       q11, q11, #2
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       vshr.u64        q10, q4, #4
+        vshr.u64       q11, q6, #4
+       veor            q10, q10, q5
+        veor           q11, q11, q3
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #4
+        veor           q3, q3, q11
+        vshl.u64       q11, q11, #4
+       veor            q4, q4, q10
+        veor           q6, q6, q11
+       vshr.u64        q10, q1, #4
+        vshr.u64       q11, q0, #4
+       veor            q10, q10, q7
+        veor           q11, q11, q2
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #4
+        veor           q2, q2, q11
+        vshl.u64       q11, q11, #4
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       vldmia  r4, {q8}                        @ last round key
+       veor    q6, q6, q8
+       veor    q4, q4, q8
+       veor    q2, q2, q8
+       veor    q7, q7, q8
+       veor    q3, q3, q8
+       veor    q5, q5, q8
+       veor    q0, q0, q8
+       veor    q1, q1, q8
+       bx      lr
+.size  _bsaes_decrypt8,.-_bsaes_decrypt8
+
+.type  _bsaes_const,%object
+.align 6
+_bsaes_const:
+.LM0ISR:       @ InvShiftRows constants
+       .quad   0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISR:
+       .quad   0x0504070602010003, 0x0f0e0d0c080b0a09
+.LISRM0:
+       .quad   0x01040b0e0205080f, 0x0306090c00070a0d
+.LM0SR:                @ ShiftRows constants
+       .quad   0x0a0e02060f03070b, 0x0004080c05090d01
+.LSR:
+       .quad   0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+       .quad   0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0:
+       .quad   0x02060a0e03070b0f, 0x0004080c0105090d
+.LREVM0SR:
+       .quad   0x090d01050c000408, 0x03070b0f060a0e02
+.asciz "Bit-sliced AES for NEON, CRYPTOGAMS by <appro@openssl.org>"
+.align 6
+.size  _bsaes_const,.-_bsaes_const
+
+.type  _bsaes_encrypt8,%function
+.align 4
+_bsaes_encrypt8:
+       adr     r6,_bsaes_encrypt8
+       vldmia  r4!, {q9}               @ round 0 key
+       sub     r6,r6,#_bsaes_encrypt8-.LM0SR
+
+       vldmia  r6!, {q8}               @ .LM0SR
+_bsaes_encrypt8_alt:
+       veor    q10, q0, q9     @ xor with round0 key
+       veor    q11, q1, q9
+        vtbl.8 d0, {q10}, d16
+        vtbl.8 d1, {q10}, d17
+       veor    q12, q2, q9
+        vtbl.8 d2, {q11}, d16
+        vtbl.8 d3, {q11}, d17
+       veor    q13, q3, q9
+        vtbl.8 d4, {q12}, d16
+        vtbl.8 d5, {q12}, d17
+       veor    q14, q4, q9
+        vtbl.8 d6, {q13}, d16
+        vtbl.8 d7, {q13}, d17
+       veor    q15, q5, q9
+        vtbl.8 d8, {q14}, d16
+        vtbl.8 d9, {q14}, d17
+       veor    q10, q6, q9
+        vtbl.8 d10, {q15}, d16
+        vtbl.8 d11, {q15}, d17
+       veor    q11, q7, q9
+        vtbl.8 d12, {q10}, d16
+        vtbl.8 d13, {q10}, d17
+        vtbl.8 d14, {q11}, d16
+        vtbl.8 d15, {q11}, d17
+_bsaes_encrypt8_bitslice:
+       vmov.i8 q8,#0x55                        @ compose .LBS0
+       vmov.i8 q9,#0x33                        @ compose .LBS1
+       vshr.u64        q10, q6, #1
+        vshr.u64       q11, q4, #1
+       veor            q10, q10, q7
+        veor           q11, q11, q5
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #1
+        veor           q5, q5, q11
+        vshl.u64       q11, q11, #1
+       veor            q6, q6, q10
+        veor           q4, q4, q11
+       vshr.u64        q10, q2, #1
+        vshr.u64       q11, q0, #1
+       veor            q10, q10, q3
+        veor           q11, q11, q1
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q3, q3, q10
+       vshl.u64        q10, q10, #1
+        veor           q1, q1, q11
+        vshl.u64       q11, q11, #1
+       veor            q2, q2, q10
+        veor           q0, q0, q11
+       vmov.i8 q8,#0x0f                        @ compose .LBS2
+       vshr.u64        q10, q5, #2
+        vshr.u64       q11, q4, #2
+       veor            q10, q10, q7
+        veor           q11, q11, q6
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #2
+        veor           q6, q6, q11
+        vshl.u64       q11, q11, #2
+       veor            q5, q5, q10
+        veor           q4, q4, q11
+       vshr.u64        q10, q1, #2
+        vshr.u64       q11, q0, #2
+       veor            q10, q10, q3
+        veor           q11, q11, q2
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q3, q3, q10
+       vshl.u64        q10, q10, #2
+        veor           q2, q2, q11
+        vshl.u64       q11, q11, #2
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       vshr.u64        q10, q3, #4
+        vshr.u64       q11, q2, #4
+       veor            q10, q10, q7
+        veor           q11, q11, q6
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #4
+        veor           q6, q6, q11
+        vshl.u64       q11, q11, #4
+       veor            q3, q3, q10
+        veor           q2, q2, q11
+       vshr.u64        q10, q1, #4
+        vshr.u64       q11, q0, #4
+       veor            q10, q10, q5
+        veor           q11, q11, q4
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #4
+        veor           q4, q4, q11
+        vshl.u64       q11, q11, #4
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       sub     r5,r5,#1
+       b       .Lenc_sbox
+.align 4
+.Lenc_loop:
+       vldmia  r4!, {q8-q11}
+       veor    q8, q8, q0
+       veor    q9, q9, q1
+       vtbl.8  d0, {q8}, d24
+       vtbl.8  d1, {q8}, d25
+       vldmia  r4!, {q8}
+       veor    q10, q10, q2
+       vtbl.8  d2, {q9}, d24
+       vtbl.8  d3, {q9}, d25
+       vldmia  r4!, {q9}
+       veor    q11, q11, q3
+       vtbl.8  d4, {q10}, d24
+       vtbl.8  d5, {q10}, d25
+       vldmia  r4!, {q10}
+       vtbl.8  d6, {q11}, d24
+       vtbl.8  d7, {q11}, d25
+       vldmia  r4!, {q11}
+       veor    q8, q8, q4
+       veor    q9, q9, q5
+       vtbl.8  d8, {q8}, d24
+       vtbl.8  d9, {q8}, d25
+       veor    q10, q10, q6
+       vtbl.8  d10, {q9}, d24
+       vtbl.8  d11, {q9}, d25
+       veor    q11, q11, q7
+       vtbl.8  d12, {q10}, d24
+       vtbl.8  d13, {q10}, d25
+       vtbl.8  d14, {q11}, d24
+       vtbl.8  d15, {q11}, d25
+.Lenc_sbox:
+       veor    q2, q2, q1
+       veor    q5, q5, q6
+       veor    q3, q3, q0
+       veor    q6, q6, q2
+       veor    q5, q5, q0
+
+       veor    q6, q6, q3
+       veor    q3, q3, q7
+       veor    q7, q7, q5
+       veor    q3, q3, q4
+       veor    q4, q4, q5
+
+       veor    q2, q2, q7
+       veor    q3, q3, q1
+       veor    q1, q1, q5
+       veor    q11, q7, q4
+       veor    q10, q1, q2
+       veor    q9, q5, q3
+       veor    q13, q2, q4
+        vmov   q8, q10
+       veor    q12, q6, q0
+
+       vorr    q10, q10, q9
+       veor    q15, q11, q8
+       vand    q14, q11, q12
+       vorr    q11, q11, q12
+       veor    q12, q12, q9
+       vand    q8, q8, q9
+       veor    q9, q3, q0
+       vand    q15, q15, q12
+       vand    q13, q13, q9
+       veor    q9, q7, q1
+       veor    q12, q5, q6
+       veor    q11, q11, q13
+       veor    q10, q10, q13
+       vand    q13, q9, q12
+       vorr    q9, q9, q12
+       veor    q11, q11, q15
+       veor    q8, q8, q13
+       veor    q10, q10, q14
+       veor    q9, q9, q15
+       veor    q8, q8, q14
+       vand    q12, q2, q3
+       veor    q9, q9, q14
+       vand    q13, q4, q0
+       vand    q14, q1, q5
+       vorr    q15, q7, q6
+       veor    q11, q11, q12
+       veor    q9, q9, q14
+       veor    q8, q8, q15
+       veor    q10, q10, q13
+
+       @ Inv_GF16      0,      1,      2,      3, s0, s1, s2, s3
+
+       @ new smaller inversion
+
+       vand    q14, q11, q9
+       vmov    q12, q8
+
+       veor    q13, q10, q14
+       veor    q15, q8, q14
+       veor    q14, q8, q14    @ q14=q15
+
+       vbsl    q13, q9, q8
+       vbsl    q15, q11, q10
+       veor    q11, q11, q10
+
+       vbsl    q12, q13, q14
+       vbsl    q8, q14, q13
+
+       vand    q14, q12, q15
+       veor    q9, q9, q8
+
+       veor    q14, q14, q11
+       veor    q12, q6, q0
+       veor    q8, q5, q3
+       veor    q10, q15, q14
+       vand    q10, q10, q6
+       veor    q6, q6, q5
+       vand    q11, q5, q15
+       vand    q6, q6, q14
+       veor    q5, q11, q10
+       veor    q6, q6, q11
+       veor    q15, q15, q13
+       veor    q14, q14, q9
+       veor    q11, q15, q14
+        veor   q10, q13, q9
+       vand    q11, q11, q12
+        vand   q10, q10, q0
+       veor    q12, q12, q8
+        veor   q0, q0, q3
+       vand    q8, q8, q15
+        vand   q3, q3, q13
+       vand    q12, q12, q14
+        vand   q0, q0, q9
+       veor    q8, q8, q12
+        veor   q0, q0, q3
+       veor    q12, q12, q11
+        veor   q3, q3, q10
+       veor    q6, q6, q12
+       veor    q0, q0, q12
+       veor    q5, q5, q8
+       veor    q3, q3, q8
+
+       veor    q12, q7, q4
+       veor    q8, q1, q2
+       veor    q11, q15, q14
+        veor   q10, q13, q9
+       vand    q11, q11, q12
+        vand   q10, q10, q4
+       veor    q12, q12, q8
+        veor   q4, q4, q2
+       vand    q8, q8, q15
+        vand   q2, q2, q13
+       vand    q12, q12, q14
+        vand   q4, q4, q9
+       veor    q8, q8, q12
+        veor   q4, q4, q2
+       veor    q12, q12, q11
+        veor   q2, q2, q10
+       veor    q15, q15, q13
+       veor    q14, q14, q9
+       veor    q10, q15, q14
+       vand    q10, q10, q7
+       veor    q7, q7, q1
+       vand    q11, q1, q15
+       vand    q7, q7, q14
+       veor    q1, q11, q10
+       veor    q7, q7, q11
+       veor    q7, q7, q12
+       veor    q4, q4, q12
+       veor    q1, q1, q8
+       veor    q2, q2, q8
+       veor    q7, q7, q0
+       veor    q1, q1, q6
+       veor    q6, q6, q0
+       veor    q4, q4, q7
+       veor    q0, q0, q1
+
+       veor    q1, q1, q5
+       veor    q5, q5, q2
+       veor    q2, q2, q3
+       veor    q3, q3, q5
+       veor    q4, q4, q5
+
+       veor    q6, q6, q3
+       subs    r5,r5,#1
+       bcc     .Lenc_done
+       vext.8  q8, q0, q0, #12 @ x0 <<< 32
+       vext.8  q9, q1, q1, #12
+        veor   q0, q0, q8              @ x0 ^ (x0 <<< 32)
+       vext.8  q10, q4, q4, #12
+        veor   q1, q1, q9
+       vext.8  q11, q6, q6, #12
+        veor   q4, q4, q10
+       vext.8  q12, q3, q3, #12
+        veor   q6, q6, q11
+       vext.8  q13, q7, q7, #12
+        veor   q3, q3, q12
+       vext.8  q14, q2, q2, #12
+        veor   q7, q7, q13
+       vext.8  q15, q5, q5, #12
+        veor   q2, q2, q14
+
+       veor    q9, q9, q0
+        veor   q5, q5, q15
+        vext.8 q0, q0, q0, #8          @ (x0 ^ (x0 <<< 32)) <<< 64)
+       veor    q10, q10, q1
+       veor    q8, q8, q5
+       veor    q9, q9, q5
+        vext.8 q1, q1, q1, #8
+       veor    q13, q13, q3
+        veor   q0, q0, q8
+       veor    q14, q14, q7
+        veor   q1, q1, q9
+        vext.8 q8, q3, q3, #8
+       veor    q12, q12, q6
+        vext.8 q9, q7, q7, #8
+       veor    q15, q15, q2
+        vext.8 q3, q6, q6, #8
+       veor    q11, q11, q4
+        vext.8 q7, q5, q5, #8
+       veor    q12, q12, q5
+        vext.8 q6, q2, q2, #8
+       veor    q11, q11, q5
+        vext.8 q2, q4, q4, #8
+       veor    q5, q9, q13
+       veor    q4, q8, q12
+       veor    q3, q3, q11
+       veor    q7, q7, q15
+       veor    q6, q6, q14
+        @ vmov q4, q8
+       veor    q2, q2, q10
+        @ vmov q5, q9
+       vldmia  r6, {q12}               @ .LSR
+       ite     eq                              @ Thumb2 thing, samity check in ARM
+       addeq   r6,r6,#0x10
+       bne     .Lenc_loop
+       vldmia  r6, {q12}               @ .LSRM0
+       b       .Lenc_loop
+.align 4
+.Lenc_done:
+       vmov.i8 q8,#0x55                        @ compose .LBS0
+       vmov.i8 q9,#0x33                        @ compose .LBS1
+       vshr.u64        q10, q2, #1
+        vshr.u64       q11, q3, #1
+       veor            q10, q10, q5
+        veor           q11, q11, q7
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #1
+        veor           q7, q7, q11
+        vshl.u64       q11, q11, #1
+       veor            q2, q2, q10
+        veor           q3, q3, q11
+       vshr.u64        q10, q4, #1
+        vshr.u64       q11, q0, #1
+       veor            q10, q10, q6
+        veor           q11, q11, q1
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q6, q6, q10
+       vshl.u64        q10, q10, #1
+        veor           q1, q1, q11
+        vshl.u64       q11, q11, #1
+       veor            q4, q4, q10
+        veor           q0, q0, q11
+       vmov.i8 q8,#0x0f                        @ compose .LBS2
+       vshr.u64        q10, q7, #2
+        vshr.u64       q11, q3, #2
+       veor            q10, q10, q5
+        veor           q11, q11, q2
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #2
+        veor           q2, q2, q11
+        vshl.u64       q11, q11, #2
+       veor            q7, q7, q10
+        veor           q3, q3, q11
+       vshr.u64        q10, q1, #2
+        vshr.u64       q11, q0, #2
+       veor            q10, q10, q6
+        veor           q11, q11, q4
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q6, q6, q10
+       vshl.u64        q10, q10, #2
+        veor           q4, q4, q11
+        vshl.u64       q11, q11, #2
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       vshr.u64        q10, q6, #4
+        vshr.u64       q11, q4, #4
+       veor            q10, q10, q5
+        veor           q11, q11, q2
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #4
+        veor           q2, q2, q11
+        vshl.u64       q11, q11, #4
+       veor            q6, q6, q10
+        veor           q4, q4, q11
+       vshr.u64        q10, q1, #4
+        vshr.u64       q11, q0, #4
+       veor            q10, q10, q7
+        veor           q11, q11, q3
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #4
+        veor           q3, q3, q11
+        vshl.u64       q11, q11, #4
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       vldmia  r4, {q8}                        @ last round key
+       veor    q4, q4, q8
+       veor    q6, q6, q8
+       veor    q3, q3, q8
+       veor    q7, q7, q8
+       veor    q2, q2, q8
+       veor    q5, q5, q8
+       veor    q0, q0, q8
+       veor    q1, q1, q8
+       bx      lr
+.size  _bsaes_encrypt8,.-_bsaes_encrypt8
+.type  _bsaes_key_convert,%function
+.align 4
+_bsaes_key_convert:
+       adr     r6,_bsaes_key_convert
+       vld1.8  {q7},  [r4]!            @ load round 0 key
+       sub     r6,r6,#_bsaes_key_convert-.LM0
+       vld1.8  {q15}, [r4]!            @ load round 1 key
+
+       vmov.i8 q8,  #0x01                      @ bit masks
+       vmov.i8 q9,  #0x02
+       vmov.i8 q10, #0x04
+       vmov.i8 q11, #0x08
+       vmov.i8 q12, #0x10
+       vmov.i8 q13, #0x20
+       vldmia  r6, {q14}               @ .LM0
+
+#ifdef __ARMEL__
+       vrev32.8        q7,  q7
+       vrev32.8        q15, q15
+#endif
+       sub     r5,r5,#1
+       vstmia  r12!, {q7}              @ save round 0 key
+       b       .Lkey_loop
+
+.align 4
+.Lkey_loop:
+       vtbl.8  d14,{q15},d28
+       vtbl.8  d15,{q15},d29
+       vmov.i8 q6,  #0x40
+       vmov.i8 q15, #0x80
+
+       vtst.8  q0, q7, q8
+       vtst.8  q1, q7, q9
+       vtst.8  q2, q7, q10
+       vtst.8  q3, q7, q11
+       vtst.8  q4, q7, q12
+       vtst.8  q5, q7, q13
+       vtst.8  q6, q7, q6
+       vtst.8  q7, q7, q15
+       vld1.8  {q15}, [r4]!            @ load next round key
+       vmvn    q0, q0          @ "pnot"
+       vmvn    q1, q1
+       vmvn    q5, q5
+       vmvn    q6, q6
+#ifdef __ARMEL__
+       vrev32.8        q15, q15
+#endif
+       subs    r5,r5,#1
+       vstmia  r12!,{q0-q7}            @ write bit-sliced round key
+       bne     .Lkey_loop
+
+       vmov.i8 q7,#0x63                        @ compose .L63
+       @ don't save last round key
+       bx      lr
+.size  _bsaes_key_convert,.-_bsaes_key_convert
+.extern AES_cbc_encrypt
+.extern AES_decrypt
+
+.global        bsaes_cbc_encrypt
+.type  bsaes_cbc_encrypt,%function
+.align 5
+bsaes_cbc_encrypt:
+#ifndef        __KERNEL__
+       cmp     r2, #128
+#ifndef        __thumb__
+       blo     AES_cbc_encrypt
+#else
+       bhs     1f
+       b       AES_cbc_encrypt
+1:
+#endif
+#endif
+
+       @ it is up to the caller to make sure we are called with enc == 0
+
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}
+       VFP_ABI_PUSH
+       ldr     r8, [ip]                        @ IV is 1st arg on the stack
+       mov     r2, r2, lsr#4           @ len in 16 byte blocks
+       sub     sp, #0x10                       @ scratch space to carry over the IV
+       mov     r9, sp                          @ save sp
+
+       ldr     r10, [r3, #240]         @ get # of rounds
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, r10, lsl#7             @ 128 bytes per inner round key
+       add     r12, #96                        @ sifze of bit-slices key schedule
+
+       @ populate the key schedule
+       mov     r4, r3                  @ pass key
+       mov     r5, r10                 @ pass # of rounds
+       mov     sp, r12                         @ sp is sp
+       bl      _bsaes_key_convert
+       vldmia  sp, {q6}
+       vstmia  r12,  {q15}             @ save last round key
+       veor    q7, q7, q6      @ fix up round 0 key
+       vstmia  sp, {q7}
+#else
+       ldr     r12, [r3, #244]
+       eors    r12, #1
+       beq     0f
+
+       @ populate the key schedule
+       str     r12, [r3, #244]
+       mov     r4, r3                  @ pass key
+       mov     r5, r10                 @ pass # of rounds
+       add     r12, r3, #248                   @ pass key schedule
+       bl      _bsaes_key_convert
+       add     r4, r3, #248
+       vldmia  r4, {q6}
+       vstmia  r12, {q15}                      @ save last round key
+       veor    q7, q7, q6      @ fix up round 0 key
+       vstmia  r4, {q7}
+
+.align 2
+0:
+#endif
+
+       vld1.8  {q15}, [r8]             @ load IV
+       b       .Lcbc_dec_loop
+
+.align 4
+.Lcbc_dec_loop:
+       subs    r2, r2, #0x8
+       bmi     .Lcbc_dec_loop_finish
+
+       vld1.8  {q0-q1}, [r0]!  @ load input
+       vld1.8  {q2-q3}, [r0]!
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       mov     r4, sp                  @ pass the key
+#else
+       add     r4, r3, #248
+#endif
+       vld1.8  {q4-q5}, [r0]!
+       mov     r5, r10
+       vld1.8  {q6-q7}, [r0]
+       sub     r0, r0, #0x60
+       vstmia  r9, {q15}                       @ put aside IV
+
+       bl      _bsaes_decrypt8
+
+       vldmia  r9, {q14}                       @ reload IV
+       vld1.8  {q8-q9}, [r0]!  @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q10-q11}, [r0]!
+       veor    q1, q1, q8
+       veor    q6, q6, q9
+       vld1.8  {q12-q13}, [r0]!
+       veor    q4, q4, q10
+       veor    q2, q2, q11
+       vld1.8  {q14-q15}, [r0]!
+       veor    q7, q7, q12
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       veor    q3, q3, q13
+       vst1.8  {q6}, [r1]!
+       veor    q5, q5, q14
+       vst1.8  {q4}, [r1]!
+       vst1.8  {q2}, [r1]!
+       vst1.8  {q7}, [r1]!
+       vst1.8  {q3}, [r1]!
+       vst1.8  {q5}, [r1]!
+
+       b       .Lcbc_dec_loop
+
+.Lcbc_dec_loop_finish:
+       adds    r2, r2, #8
+       beq     .Lcbc_dec_done
+
+       vld1.8  {q0}, [r0]!             @ load input
+       cmp     r2, #2
+       blo     .Lcbc_dec_one
+       vld1.8  {q1}, [r0]!
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       mov     r4, sp                  @ pass the key
+#else
+       add     r4, r3, #248
+#endif
+       mov     r5, r10
+       vstmia  r9, {q15}                       @ put aside IV
+       beq     .Lcbc_dec_two
+       vld1.8  {q2}, [r0]!
+       cmp     r2, #4
+       blo     .Lcbc_dec_three
+       vld1.8  {q3}, [r0]!
+       beq     .Lcbc_dec_four
+       vld1.8  {q4}, [r0]!
+       cmp     r2, #6
+       blo     .Lcbc_dec_five
+       vld1.8  {q5}, [r0]!
+       beq     .Lcbc_dec_six
+       vld1.8  {q6}, [r0]!
+       sub     r0, r0, #0x70
+
+       bl      _bsaes_decrypt8
+
+       vldmia  r9, {q14}                       @ reload IV
+       vld1.8  {q8-q9}, [r0]!  @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q10-q11}, [r0]!
+       veor    q1, q1, q8
+       veor    q6, q6, q9
+       vld1.8  {q12-q13}, [r0]!
+       veor    q4, q4, q10
+       veor    q2, q2, q11
+       vld1.8  {q15}, [r0]!
+       veor    q7, q7, q12
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       veor    q3, q3, q13
+       vst1.8  {q6}, [r1]!
+       vst1.8  {q4}, [r1]!
+       vst1.8  {q2}, [r1]!
+       vst1.8  {q7}, [r1]!
+       vst1.8  {q3}, [r1]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_six:
+       sub     r0, r0, #0x60
+       bl      _bsaes_decrypt8
+       vldmia  r9,{q14}                        @ reload IV
+       vld1.8  {q8-q9}, [r0]!  @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q10-q11}, [r0]!
+       veor    q1, q1, q8
+       veor    q6, q6, q9
+       vld1.8  {q12}, [r0]!
+       veor    q4, q4, q10
+       veor    q2, q2, q11
+       vld1.8  {q15}, [r0]!
+       veor    q7, q7, q12
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       vst1.8  {q6}, [r1]!
+       vst1.8  {q4}, [r1]!
+       vst1.8  {q2}, [r1]!
+       vst1.8  {q7}, [r1]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_five:
+       sub     r0, r0, #0x50
+       bl      _bsaes_decrypt8
+       vldmia  r9, {q14}                       @ reload IV
+       vld1.8  {q8-q9}, [r0]!  @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q10-q11}, [r0]!
+       veor    q1, q1, q8
+       veor    q6, q6, q9
+       vld1.8  {q15}, [r0]!
+       veor    q4, q4, q10
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       veor    q2, q2, q11
+       vst1.8  {q6}, [r1]!
+       vst1.8  {q4}, [r1]!
+       vst1.8  {q2}, [r1]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_four:
+       sub     r0, r0, #0x40
+       bl      _bsaes_decrypt8
+       vldmia  r9, {q14}                       @ reload IV
+       vld1.8  {q8-q9}, [r0]!  @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q10}, [r0]!
+       veor    q1, q1, q8
+       veor    q6, q6, q9
+       vld1.8  {q15}, [r0]!
+       veor    q4, q4, q10
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       vst1.8  {q6}, [r1]!
+       vst1.8  {q4}, [r1]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_three:
+       sub     r0, r0, #0x30
+       bl      _bsaes_decrypt8
+       vldmia  r9, {q14}                       @ reload IV
+       vld1.8  {q8-q9}, [r0]!  @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q15}, [r0]!
+       veor    q1, q1, q8
+       veor    q6, q6, q9
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       vst1.8  {q6}, [r1]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_two:
+       sub     r0, r0, #0x20
+       bl      _bsaes_decrypt8
+       vldmia  r9, {q14}                       @ reload IV
+       vld1.8  {q8}, [r0]!             @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q15}, [r0]!            @ reload input
+       veor    q1, q1, q8
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_one:
+       sub     r0, r0, #0x10
+       mov     r10, r1                 @ save original out pointer
+       mov     r1, r9                  @ use the iv scratch space as out buffer
+       mov     r2, r3
+       vmov    q4,q15          @ just in case ensure that IV
+       vmov    q5,q0                   @ and input are preserved
+       bl      AES_decrypt
+       vld1.8  {q0}, [r9,:64]          @ load result
+       veor    q0, q0, q4      @ ^= IV
+       vmov    q15, q5         @ q5 holds input
+       vst1.8  {q0}, [r10]             @ write output
+
+.Lcbc_dec_done:
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+.Lcbc_dec_bzero:                               @ wipe key schedule [if any]
+       vstmia          sp!, {q0-q1}
+       cmp             sp, r9
+       bne             .Lcbc_dec_bzero
+#endif
+
+       mov     sp, r9
+       add     sp, #0x10                       @ add sp,r9,#0x10 is no good for thumb
+       vst1.8  {q15}, [r8]             @ return IV
+       VFP_ABI_POP
+       ldmia   sp!, {r4-r10, pc}
+.size  bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+.extern        AES_encrypt
+.global        bsaes_ctr32_encrypt_blocks
+.type  bsaes_ctr32_encrypt_blocks,%function
+.align 5
+bsaes_ctr32_encrypt_blocks:
+       cmp     r2, #8                  @ use plain AES for
+       blo     .Lctr_enc_short                 @ small sizes
+
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}
+       VFP_ABI_PUSH
+       ldr     r8, [ip]                        @ ctr is 1st arg on the stack
+       sub     sp, sp, #0x10                   @ scratch space to carry over the ctr
+       mov     r9, sp                          @ save sp
+
+       ldr     r10, [r3, #240]         @ get # of rounds
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, r10, lsl#7             @ 128 bytes per inner round key
+       add     r12, #96                        @ size of bit-sliced key schedule
+
+       @ populate the key schedule
+       mov     r4, r3                  @ pass key
+       mov     r5, r10                 @ pass # of rounds
+       mov     sp, r12                         @ sp is sp
+       bl      _bsaes_key_convert
+       veor    q7,q7,q15       @ fix up last round key
+       vstmia  r12, {q7}                       @ save last round key
+
+       vld1.8  {q0}, [r8]              @ load counter
+       add     r8, r6, #.LREVM0SR-.LM0 @ borrow r8
+       vldmia  sp, {q4}                @ load round0 key
+#else
+       ldr     r12, [r3, #244]
+       eors    r12, #1
+       beq     0f
+
+       @ populate the key schedule
+       str     r12, [r3, #244]
+       mov     r4, r3                  @ pass key
+       mov     r5, r10                 @ pass # of rounds
+       add     r12, r3, #248                   @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    q7,q7,q15       @ fix up last round key
+       vstmia  r12, {q7}                       @ save last round key
+
+.align 2
+0:     add     r12, r3, #248
+       vld1.8  {q0}, [r8]              @ load counter
+       adrl    r8, .LREVM0SR                   @ borrow r8
+       vldmia  r12, {q4}                       @ load round0 key
+       sub     sp, #0x10                       @ place for adjusted round0 key
+#endif
+
+       vmov.i32        q8,#1           @ compose 1<<96
+       veor            q9,q9,q9
+       vrev32.8        q0,q0
+       vext.8          q8,q9,q8,#4
+       vrev32.8        q4,q4
+       vadd.u32        q9,q8,q8        @ compose 2<<96
+       vstmia  sp, {q4}                @ save adjusted round0 key
+       b       .Lctr_enc_loop
+
+.align 4
+.Lctr_enc_loop:
+       vadd.u32        q10, q8, q9     @ compose 3<<96
+       vadd.u32        q1, q0, q8      @ +1
+       vadd.u32        q2, q0, q9      @ +2
+       vadd.u32        q3, q0, q10     @ +3
+       vadd.u32        q4, q1, q10
+       vadd.u32        q5, q2, q10
+       vadd.u32        q6, q3, q10
+       vadd.u32        q7, q4, q10
+       vadd.u32        q10, q5, q10    @ next counter
+
+       @ Borrow prologue from _bsaes_encrypt8 to use the opportunity
+       @ to flip byte order in 32-bit counter
+
+       vldmia          sp, {q9}                @ load round0 key
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x10           @ pass next round key
+#else
+       add             r4, r3, #264
+#endif
+       vldmia          r8, {q8}                        @ .LREVM0SR
+       mov             r5, r10                 @ pass rounds
+       vstmia          r9, {q10}                       @ save next counter
+       sub             r6, r8, #.LREVM0SR-.LSR @ pass constants
+
+       bl              _bsaes_encrypt8_alt
+
+       subs            r2, r2, #8
+       blo             .Lctr_enc_loop_done
+
+       vld1.8          {q8-q9}, [r0]!  @ load input
+       vld1.8          {q10-q11}, [r0]!
+       veor            q0, q8
+       veor            q1, q9
+       vld1.8          {q12-q13}, [r0]!
+       veor            q4, q10
+       veor            q6, q11
+       vld1.8          {q14-q15}, [r0]!
+       veor            q3, q12
+       vst1.8          {q0-q1}, [r1]!  @ write output
+       veor            q7, q13
+       veor            q2, q14
+       vst1.8          {q4}, [r1]!
+       veor            q5, q15
+       vst1.8          {q6}, [r1]!
+       vmov.i32        q8, #1                  @ compose 1<<96
+       vst1.8          {q3}, [r1]!
+       veor            q9, q9, q9
+       vst1.8          {q7}, [r1]!
+       vext.8          q8, q9, q8, #4
+       vst1.8          {q2}, [r1]!
+       vadd.u32        q9,q8,q8                @ compose 2<<96
+       vst1.8          {q5}, [r1]!
+       vldmia          r9, {q0}                        @ load counter
+
+       bne             .Lctr_enc_loop
+       b               .Lctr_enc_done
+
+.align 4
+.Lctr_enc_loop_done:
+       add             r2, r2, #8
+       vld1.8          {q8}, [r0]!     @ load input
+       veor            q0, q8
+       vst1.8          {q0}, [r1]!     @ write output
+       cmp             r2, #2
+       blo             .Lctr_enc_done
+       vld1.8          {q9}, [r0]!
+       veor            q1, q9
+       vst1.8          {q1}, [r1]!
+       beq             .Lctr_enc_done
+       vld1.8          {q10}, [r0]!
+       veor            q4, q10
+       vst1.8          {q4}, [r1]!
+       cmp             r2, #4
+       blo             .Lctr_enc_done
+       vld1.8          {q11}, [r0]!
+       veor            q6, q11
+       vst1.8          {q6}, [r1]!
+       beq             .Lctr_enc_done
+       vld1.8          {q12}, [r0]!
+       veor            q3, q12
+       vst1.8          {q3}, [r1]!
+       cmp             r2, #6
+       blo             .Lctr_enc_done
+       vld1.8          {q13}, [r0]!
+       veor            q7, q13
+       vst1.8          {q7}, [r1]!
+       beq             .Lctr_enc_done
+       vld1.8          {q14}, [r0]
+       veor            q2, q14
+       vst1.8          {q2}, [r1]!
+
+.Lctr_enc_done:
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+#ifndef        BSAES_ASM_EXTENDED_KEY
+.Lctr_enc_bzero:                       @ wipe key schedule [if any]
+       vstmia          sp!, {q0-q1}
+       cmp             sp, r9
+       bne             .Lctr_enc_bzero
+#else
+       vstmia          sp, {q0-q1}
+#endif
+
+       mov     sp, r9
+       add     sp, #0x10               @ add sp,r9,#0x10 is no good for thumb
+       VFP_ABI_POP
+       ldmia   sp!, {r4-r10, pc}       @ return
+
+.align 4
+.Lctr_enc_short:
+       ldr     ip, [sp]                @ ctr pointer is passed on stack
+       stmdb   sp!, {r4-r8, lr}
+
+       mov     r4, r0          @ copy arguments
+       mov     r5, r1
+       mov     r6, r2
+       mov     r7, r3
+       ldr     r8, [ip, #12]           @ load counter LSW
+       vld1.8  {q1}, [ip]              @ load whole counter value
+#ifdef __ARMEL__
+       rev     r8, r8
+#endif
+       sub     sp, sp, #0x10
+       vst1.8  {q1}, [sp,:64]  @ copy counter value
+       sub     sp, sp, #0x10
+
+.Lctr_enc_short_loop:
+       add     r0, sp, #0x10           @ input counter value
+       mov     r1, sp                  @ output on the stack
+       mov     r2, r7                  @ key
+
+       bl      AES_encrypt
+
+       vld1.8  {q0}, [r4]!     @ load input
+       vld1.8  {q1}, [sp,:64]  @ load encrypted counter
+       add     r8, r8, #1
+#ifdef __ARMEL__
+       rev     r0, r8
+       str     r0, [sp, #0x1c]         @ next counter value
+#else
+       str     r8, [sp, #0x1c]         @ next counter value
+#endif
+       veor    q0,q0,q1
+       vst1.8  {q0}, [r5]!     @ store output
+       subs    r6, r6, #1
+       bne     .Lctr_enc_short_loop
+
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+       vstmia          sp!, {q0-q1}
+
+       ldmia   sp!, {r4-r8, pc}
+.size  bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+.globl bsaes_xts_encrypt
+.type  bsaes_xts_encrypt,%function
+.align 4
+bsaes_xts_encrypt:
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}               @ 0x20
+       VFP_ABI_PUSH
+       mov     r6, sp                          @ future r3
+
+       mov     r7, r0
+       mov     r8, r1
+       mov     r9, r2
+       mov     r10, r3
+
+       sub     r0, sp, #0x10                   @ 0x10
+       bic     r0, #0xf                        @ align at 16 bytes
+       mov     sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+       ldr     r0, [ip]                        @ pointer to input tweak
+#else
+       @ generate initial tweak
+       ldr     r0, [ip, #4]                    @ iv[]
+       mov     r1, sp
+       ldr     r2, [ip, #0]                    @ key2
+       bl      AES_encrypt
+       mov     r0,sp                           @ pointer to initial tweak
+#endif
+
+       ldr     r1, [r10, #240]         @ get # of rounds
+       mov     r3, r6
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, r1, lsl#7              @ 128 bytes per inner round key
+       @ add   r12, #96                        @ size of bit-sliced key schedule
+       sub     r12, #48                        @ place for tweak[9]
+
+       @ populate the key schedule
+       mov     r4, r10                 @ pass key
+       mov     r5, r1                  @ pass # of rounds
+       mov     sp, r12
+       add     r12, #0x90                      @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    q7, q7, q15     @ fix up last round key
+       vstmia  r12, {q7}                       @ save last round key
+#else
+       ldr     r12, [r10, #244]
+       eors    r12, #1
+       beq     0f
+
+       str     r12, [r10, #244]
+       mov     r4, r10                 @ pass key
+       mov     r5, r1                  @ pass # of rounds
+       add     r12, r10, #248                  @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    q7, q7, q15     @ fix up last round key
+       vstmia  r12, {q7}
+
+.align 2
+0:     sub     sp, #0x90                       @ place for tweak[9]
+#endif
+
+       vld1.8  {q8}, [r0]                      @ initial tweak
+       adr     r2, .Lxts_magic
+
+       subs    r9, #0x80
+       blo     .Lxts_enc_short
+       b       .Lxts_enc_loop
+
+.align 4
+.Lxts_enc_loop:
+       vldmia          r2, {q5}        @ load XTS magic
+       vshr.s64        q6, q8, #63
+       mov             r0, sp
+       vand            q6, q6, q5
+       vadd.u64        q9, q8, q8
+       vst1.64         {q8}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q9, #63
+       veor            q9, q9, q6
+       vand            q7, q7, q5
+       vadd.u64        q10, q9, q9
+       vst1.64         {q9}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q10, #63
+       veor            q10, q10, q7
+       vand            q6, q6, q5
+       vld1.8          {q0}, [r7]!
+       vadd.u64        q11, q10, q10
+       vst1.64         {q10}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q11, #63
+       veor            q11, q11, q6
+       vand            q7, q7, q5
+       vld1.8          {q1}, [r7]!
+       veor            q0, q0, q8
+       vadd.u64        q12, q11, q11
+       vst1.64         {q11}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q12, #63
+       veor            q12, q12, q7
+       vand            q6, q6, q5
+       vld1.8          {q2}, [r7]!
+       veor            q1, q1, q9
+       vadd.u64        q13, q12, q12
+       vst1.64         {q12}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q13, #63
+       veor            q13, q13, q6
+       vand            q7, q7, q5
+       vld1.8          {q3}, [r7]!
+       veor            q2, q2, q10
+       vadd.u64        q14, q13, q13
+       vst1.64         {q13}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q14, #63
+       veor            q14, q14, q7
+       vand            q6, q6, q5
+       vld1.8          {q4}, [r7]!
+       veor            q3, q3, q11
+       vadd.u64        q15, q14, q14
+       vst1.64         {q14}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q15, #63
+       veor            q15, q15, q6
+       vand            q7, q7, q5
+       vld1.8          {q5}, [r7]!
+       veor            q4, q4, q12
+       vadd.u64        q8, q15, q15
+       vst1.64         {q15}, [r0,:128]!
+       vswp            d15,d14
+       veor            q8, q8, q7
+       vst1.64         {q8}, [r0,:128]         @ next round tweak
+
+       vld1.8          {q6-q7}, [r7]!
+       veor            q5, q5, q13
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q6, q6, q14
+       mov             r5, r1                  @ pass rounds
+       veor            q7, q7, q15
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12-q13}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q4, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q6, q11
+       vld1.64         {q14-q15}, [r0,:128]!
+       veor            q10, q3, q12
+       vst1.8          {q8-q9}, [r8]!
+       veor            q11, q7, q13
+       veor            q12, q2, q14
+       vst1.8          {q10-q11}, [r8]!
+       veor            q13, q5, q15
+       vst1.8          {q12-q13}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+
+       subs            r9, #0x80
+       bpl             .Lxts_enc_loop
+
+.Lxts_enc_short:
+       adds            r9, #0x70
+       bmi             .Lxts_enc_done
+
+       vldmia          r2, {q5}        @ load XTS magic
+       vshr.s64        q7, q8, #63
+       mov             r0, sp
+       vand            q7, q7, q5
+       vadd.u64        q9, q8, q8
+       vst1.64         {q8}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q9, #63
+       veor            q9, q9, q7
+       vand            q6, q6, q5
+       vadd.u64        q10, q9, q9
+       vst1.64         {q9}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q10, #63
+       veor            q10, q10, q6
+       vand            q7, q7, q5
+       vld1.8          {q0}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_enc_1
+       vadd.u64        q11, q10, q10
+       vst1.64         {q10}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q11, #63
+       veor            q11, q11, q7
+       vand            q6, q6, q5
+       vld1.8          {q1}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_enc_2
+       veor            q0, q0, q8
+       vadd.u64        q12, q11, q11
+       vst1.64         {q11}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q12, #63
+       veor            q12, q12, q6
+       vand            q7, q7, q5
+       vld1.8          {q2}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_enc_3
+       veor            q1, q1, q9
+       vadd.u64        q13, q12, q12
+       vst1.64         {q12}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q13, #63
+       veor            q13, q13, q7
+       vand            q6, q6, q5
+       vld1.8          {q3}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_enc_4
+       veor            q2, q2, q10
+       vadd.u64        q14, q13, q13
+       vst1.64         {q13}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q14, #63
+       veor            q14, q14, q6
+       vand            q7, q7, q5
+       vld1.8          {q4}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_enc_5
+       veor            q3, q3, q11
+       vadd.u64        q15, q14, q14
+       vst1.64         {q14}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q15, #63
+       veor            q15, q15, q7
+       vand            q6, q6, q5
+       vld1.8          {q5}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_enc_6
+       veor            q4, q4, q12
+       sub             r9, #0x10
+       vst1.64         {q15}, [r0,:128]                @ next round tweak
+
+       vld1.8          {q6}, [r7]!
+       veor            q5, q5, q13
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q6, q6, q14
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12-q13}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q4, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q6, q11
+       vld1.64         {q14}, [r0,:128]!
+       veor            q10, q3, q12
+       vst1.8          {q8-q9}, [r8]!
+       veor            q11, q7, q13
+       veor            q12, q2, q14
+       vst1.8          {q10-q11}, [r8]!
+       vst1.8          {q12}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_6:
+       vst1.64         {q14}, [r0,:128]                @ next round tweak
+
+       veor            q4, q4, q12
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q5, q5, q13
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12-q13}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q4, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q6, q11
+       veor            q10, q3, q12
+       vst1.8          {q8-q9}, [r8]!
+       veor            q11, q7, q13
+       vst1.8          {q10-q11}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_enc_done
+
+@ put this in range for both ARM and Thumb mode adr instructions
+.align 5
+.Lxts_magic:
+       .quad   1, 0x87
+
+.align 5
+.Lxts_enc_5:
+       vst1.64         {q13}, [r0,:128]                @ next round tweak
+
+       veor            q3, q3, q11
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q4, q4, q12
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q4, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q6, q11
+       veor            q10, q3, q12
+       vst1.8          {q8-q9}, [r8]!
+       vst1.8          {q10}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_4:
+       vst1.64         {q12}, [r0,:128]                @ next round tweak
+
+       veor            q2, q2, q10
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q3, q3, q11
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       veor            q1, q1, q9
+       veor            q8, q4, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q6, q11
+       vst1.8          {q8-q9}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_3:
+       vst1.64         {q11}, [r0,:128]                @ next round tweak
+
+       veor            q1, q1, q9
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q2, q2, q10
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10}, [r0,:128]!
+       veor            q0, q0, q8
+       veor            q1, q1, q9
+       veor            q8, q4, q10
+       vst1.8          {q0-q1}, [r8]!
+       vst1.8          {q8}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_2:
+       vst1.64         {q10}, [r0,:128]                @ next round tweak
+
+       veor            q0, q0, q8
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q1, q1, q9
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       veor            q0, q0, q8
+       veor            q1, q1, q9
+       vst1.8          {q0-q1}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_1:
+       mov             r0, sp
+       veor            q0, q8
+       mov             r1, sp
+       vst1.8          {q0}, [sp,:128]
+       mov             r2, r10
+       mov             r4, r3                          @ preserve fp
+
+       bl              AES_encrypt
+
+       vld1.8          {q0}, [sp,:128]
+       veor            q0, q0, q8
+       vst1.8          {q0}, [r8]!
+       mov             r3, r4
+
+       vmov            q8, q9          @ next round tweak
+
+.Lxts_enc_done:
+#ifndef        XTS_CHAIN_TWEAK
+       adds            r9, #0x10
+       beq             .Lxts_enc_ret
+       sub             r6, r8, #0x10
+
+.Lxts_enc_steal:
+       ldrb            r0, [r7], #1
+       ldrb            r1, [r8, #-0x10]
+       strb            r0, [r8, #-0x10]
+       strb            r1, [r8], #1
+
+       subs            r9, #1
+       bhi             .Lxts_enc_steal
+
+       vld1.8          {q0}, [r6]
+       mov             r0, sp
+       veor            q0, q0, q8
+       mov             r1, sp
+       vst1.8          {q0}, [sp,:128]
+       mov             r2, r10
+       mov             r4, r3                  @ preserve fp
+
+       bl              AES_encrypt
+
+       vld1.8          {q0}, [sp,:128]
+       veor            q0, q0, q8
+       vst1.8          {q0}, [r6]
+       mov             r3, r4
+#endif
+
+.Lxts_enc_ret:
+       bic             r0, r3, #0xf
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+#ifdef XTS_CHAIN_TWEAK
+       ldr             r1, [r3, #0x20+VFP_ABI_FRAME]   @ chain tweak
+#endif
+.Lxts_enc_bzero:                               @ wipe key schedule [if any]
+       vstmia          sp!, {q0-q1}
+       cmp             sp, r0
+       bne             .Lxts_enc_bzero
+
+       mov             sp, r3
+#ifdef XTS_CHAIN_TWEAK
+       vst1.8          {q8}, [r1]
+#endif
+       VFP_ABI_POP
+       ldmia           sp!, {r4-r10, pc}       @ return
+
+.size  bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl bsaes_xts_decrypt
+.type  bsaes_xts_decrypt,%function
+.align 4
+bsaes_xts_decrypt:
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}               @ 0x20
+       VFP_ABI_PUSH
+       mov     r6, sp                          @ future r3
+
+       mov     r7, r0
+       mov     r8, r1
+       mov     r9, r2
+       mov     r10, r3
+
+       sub     r0, sp, #0x10                   @ 0x10
+       bic     r0, #0xf                        @ align at 16 bytes
+       mov     sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+       ldr     r0, [ip]                        @ pointer to input tweak
+#else
+       @ generate initial tweak
+       ldr     r0, [ip, #4]                    @ iv[]
+       mov     r1, sp
+       ldr     r2, [ip, #0]                    @ key2
+       bl      AES_encrypt
+       mov     r0, sp                          @ pointer to initial tweak
+#endif
+
+       ldr     r1, [r10, #240]         @ get # of rounds
+       mov     r3, r6
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, r1, lsl#7              @ 128 bytes per inner round key
+       @ add   r12, #96                        @ size of bit-sliced key schedule
+       sub     r12, #48                        @ place for tweak[9]
+
+       @ populate the key schedule
+       mov     r4, r10                 @ pass key
+       mov     r5, r1                  @ pass # of rounds
+       mov     sp, r12
+       add     r12, #0x90                      @ pass key schedule
+       bl      _bsaes_key_convert
+       add     r4, sp, #0x90
+       vldmia  r4, {q6}
+       vstmia  r12,  {q15}             @ save last round key
+       veor    q7, q7, q6      @ fix up round 0 key
+       vstmia  r4, {q7}
+#else
+       ldr     r12, [r10, #244]
+       eors    r12, #1
+       beq     0f
+
+       str     r12, [r10, #244]
+       mov     r4, r10                 @ pass key
+       mov     r5, r1                  @ pass # of rounds
+       add     r12, r10, #248                  @ pass key schedule
+       bl      _bsaes_key_convert
+       add     r4, r10, #248
+       vldmia  r4, {q6}
+       vstmia  r12,  {q15}             @ save last round key
+       veor    q7, q7, q6      @ fix up round 0 key
+       vstmia  r4, {q7}
+
+.align 2
+0:     sub     sp, #0x90                       @ place for tweak[9]
+#endif
+       vld1.8  {q8}, [r0]                      @ initial tweak
+       adr     r2, .Lxts_magic
+
+       tst     r9, #0xf                        @ if not multiple of 16
+       it      ne                              @ Thumb2 thing, sanity check in ARM
+       subne   r9, #0x10                       @ subtract another 16 bytes
+       subs    r9, #0x80
+
+       blo     .Lxts_dec_short
+       b       .Lxts_dec_loop
+
+.align 4
+.Lxts_dec_loop:
+       vldmia          r2, {q5}        @ load XTS magic
+       vshr.s64        q6, q8, #63
+       mov             r0, sp
+       vand            q6, q6, q5
+       vadd.u64        q9, q8, q8
+       vst1.64         {q8}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q9, #63
+       veor            q9, q9, q6
+       vand            q7, q7, q5
+       vadd.u64        q10, q9, q9
+       vst1.64         {q9}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q10, #63
+       veor            q10, q10, q7
+       vand            q6, q6, q5
+       vld1.8          {q0}, [r7]!
+       vadd.u64        q11, q10, q10
+       vst1.64         {q10}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q11, #63
+       veor            q11, q11, q6
+       vand            q7, q7, q5
+       vld1.8          {q1}, [r7]!
+       veor            q0, q0, q8
+       vadd.u64        q12, q11, q11
+       vst1.64         {q11}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q12, #63
+       veor            q12, q12, q7
+       vand            q6, q6, q5
+       vld1.8          {q2}, [r7]!
+       veor            q1, q1, q9
+       vadd.u64        q13, q12, q12
+       vst1.64         {q12}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q13, #63
+       veor            q13, q13, q6
+       vand            q7, q7, q5
+       vld1.8          {q3}, [r7]!
+       veor            q2, q2, q10
+       vadd.u64        q14, q13, q13
+       vst1.64         {q13}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q14, #63
+       veor            q14, q14, q7
+       vand            q6, q6, q5
+       vld1.8          {q4}, [r7]!
+       veor            q3, q3, q11
+       vadd.u64        q15, q14, q14
+       vst1.64         {q14}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q15, #63
+       veor            q15, q15, q6
+       vand            q7, q7, q5
+       vld1.8          {q5}, [r7]!
+       veor            q4, q4, q12
+       vadd.u64        q8, q15, q15
+       vst1.64         {q15}, [r0,:128]!
+       vswp            d15,d14
+       veor            q8, q8, q7
+       vst1.64         {q8}, [r0,:128]         @ next round tweak
+
+       vld1.8          {q6-q7}, [r7]!
+       veor            q5, q5, q13
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q6, q6, q14
+       mov             r5, r1                  @ pass rounds
+       veor            q7, q7, q15
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12-q13}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q6, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q4, q11
+       vld1.64         {q14-q15}, [r0,:128]!
+       veor            q10, q2, q12
+       vst1.8          {q8-q9}, [r8]!
+       veor            q11, q7, q13
+       veor            q12, q3, q14
+       vst1.8          {q10-q11}, [r8]!
+       veor            q13, q5, q15
+       vst1.8          {q12-q13}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+
+       subs            r9, #0x80
+       bpl             .Lxts_dec_loop
+
+.Lxts_dec_short:
+       adds            r9, #0x70
+       bmi             .Lxts_dec_done
+
+       vldmia          r2, {q5}        @ load XTS magic
+       vshr.s64        q7, q8, #63
+       mov             r0, sp
+       vand            q7, q7, q5
+       vadd.u64        q9, q8, q8
+       vst1.64         {q8}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q9, #63
+       veor            q9, q9, q7
+       vand            q6, q6, q5
+       vadd.u64        q10, q9, q9
+       vst1.64         {q9}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q10, #63
+       veor            q10, q10, q6
+       vand            q7, q7, q5
+       vld1.8          {q0}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_dec_1
+       vadd.u64        q11, q10, q10
+       vst1.64         {q10}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q11, #63
+       veor            q11, q11, q7
+       vand            q6, q6, q5
+       vld1.8          {q1}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_dec_2
+       veor            q0, q0, q8
+       vadd.u64        q12, q11, q11
+       vst1.64         {q11}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q12, #63
+       veor            q12, q12, q6
+       vand            q7, q7, q5
+       vld1.8          {q2}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_dec_3
+       veor            q1, q1, q9
+       vadd.u64        q13, q12, q12
+       vst1.64         {q12}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q13, #63
+       veor            q13, q13, q7
+       vand            q6, q6, q5
+       vld1.8          {q3}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_dec_4
+       veor            q2, q2, q10
+       vadd.u64        q14, q13, q13
+       vst1.64         {q13}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q14, #63
+       veor            q14, q14, q6
+       vand            q7, q7, q5
+       vld1.8          {q4}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_dec_5
+       veor            q3, q3, q11
+       vadd.u64        q15, q14, q14
+       vst1.64         {q14}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q15, #63
+       veor            q15, q15, q7
+       vand            q6, q6, q5
+       vld1.8          {q5}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_dec_6
+       veor            q4, q4, q12
+       sub             r9, #0x10
+       vst1.64         {q15}, [r0,:128]                @ next round tweak
+
+       vld1.8          {q6}, [r7]!
+       veor            q5, q5, q13
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q6, q6, q14
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12-q13}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q6, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q4, q11
+       vld1.64         {q14}, [r0,:128]!
+       veor            q10, q2, q12
+       vst1.8          {q8-q9}, [r8]!
+       veor            q11, q7, q13
+       veor            q12, q3, q14
+       vst1.8          {q10-q11}, [r8]!
+       vst1.8          {q12}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_6:
+       vst1.64         {q14}, [r0,:128]                @ next round tweak
+
+       veor            q4, q4, q12
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q5, q5, q13
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12-q13}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q6, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q4, q11
+       veor            q10, q2, q12
+       vst1.8          {q8-q9}, [r8]!
+       veor            q11, q7, q13
+       vst1.8          {q10-q11}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_5:
+       vst1.64         {q13}, [r0,:128]                @ next round tweak
+
+       veor            q3, q3, q11
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q4, q4, q12
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q6, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q4, q11
+       veor            q10, q2, q12
+       vst1.8          {q8-q9}, [r8]!
+       vst1.8          {q10}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_4:
+       vst1.64         {q12}, [r0,:128]                @ next round tweak
+
+       veor            q2, q2, q10
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q3, q3, q11
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       veor            q1, q1, q9
+       veor            q8, q6, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q4, q11
+       vst1.8          {q8-q9}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_3:
+       vst1.64         {q11}, [r0,:128]                @ next round tweak
+
+       veor            q1, q1, q9
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q2, q2, q10
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10}, [r0,:128]!
+       veor            q0, q0, q8
+       veor            q1, q1, q9
+       veor            q8, q6, q10
+       vst1.8          {q0-q1}, [r8]!
+       vst1.8          {q8}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_2:
+       vst1.64         {q10}, [r0,:128]                @ next round tweak
+
+       veor            q0, q0, q8
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q1, q1, q9
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       veor            q0, q0, q8
+       veor            q1, q1, q9
+       vst1.8          {q0-q1}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_1:
+       mov             r0, sp
+       veor            q0, q8
+       mov             r1, sp
+       vst1.8          {q0}, [sp,:128]
+       mov             r2, r10
+       mov             r4, r3                          @ preserve fp
+       mov             r5, r2                  @ preserve magic
+
+       bl              AES_decrypt
+
+       vld1.8          {q0}, [sp,:128]
+       veor            q0, q0, q8
+       vst1.8          {q0}, [r8]!
+       mov             r3, r4
+       mov             r2, r5
+
+       vmov            q8, q9          @ next round tweak
+
+.Lxts_dec_done:
+#ifndef        XTS_CHAIN_TWEAK
+       adds            r9, #0x10
+       beq             .Lxts_dec_ret
+
+       @ calculate one round of extra tweak for the stolen ciphertext
+       vldmia          r2, {q5}
+       vshr.s64        q6, q8, #63
+       vand            q6, q6, q5
+       vadd.u64        q9, q8, q8
+       vswp            d13,d12
+       veor            q9, q9, q6
+
+       @ perform the final decryption with the last tweak value
+       vld1.8          {q0}, [r7]!
+       mov             r0, sp
+       veor            q0, q0, q9
+       mov             r1, sp
+       vst1.8          {q0}, [sp,:128]
+       mov             r2, r10
+       mov             r4, r3                  @ preserve fp
+
+       bl              AES_decrypt
+
+       vld1.8          {q0}, [sp,:128]
+       veor            q0, q0, q9
+       vst1.8          {q0}, [r8]
+
+       mov             r6, r8
+.Lxts_dec_steal:
+       ldrb            r1, [r8]
+       ldrb            r0, [r7], #1
+       strb            r1, [r8, #0x10]
+       strb            r0, [r8], #1
+
+       subs            r9, #1
+       bhi             .Lxts_dec_steal
+
+       vld1.8          {q0}, [r6]
+       mov             r0, sp
+       veor            q0, q8
+       mov             r1, sp
+       vst1.8          {q0}, [sp,:128]
+       mov             r2, r10
+
+       bl              AES_decrypt
+
+       vld1.8          {q0}, [sp,:128]
+       veor            q0, q0, q8
+       vst1.8          {q0}, [r6]
+       mov             r3, r4
+#endif
+
+.Lxts_dec_ret:
+       bic             r0, r3, #0xf
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+#ifdef XTS_CHAIN_TWEAK
+       ldr             r1, [r3, #0x20+VFP_ABI_FRAME]   @ chain tweak
+#endif
+.Lxts_dec_bzero:                               @ wipe key schedule [if any]
+       vstmia          sp!, {q0-q1}
+       cmp             sp, r0
+       bne             .Lxts_dec_bzero
+
+       mov             sp, r3
+#ifdef XTS_CHAIN_TWEAK
+       vst1.8          {q8}, [r1]
+#endif
+       VFP_ABI_POP
+       ldmia           sp!, {r4-r10, pc}       @ return
+
+.size  bsaes_xts_decrypt,.-bsaes_xts_decrypt
+#endif
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
new file mode 100644 (file)
index 0000000..4522366
--- /dev/null
@@ -0,0 +1,434 @@
+/*
+ * linux/arch/arm/crypto/aesbs-glue.c - glue code for NEON bit sliced AES
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <crypto/aes.h>
+#include <crypto/ablk_helper.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+
+#include "aes_glue.h"
+
+#define BIT_SLICED_KEY_MAXSIZE (128 * (AES_MAXNR - 1) + 2 * AES_BLOCK_SIZE)
+
+struct BS_KEY {
+       struct AES_KEY  rk;
+       int             converted;
+       u8 __aligned(8) bs[BIT_SLICED_KEY_MAXSIZE];
+} __aligned(8);
+
+asmlinkage void bsaes_enc_key_convert(u8 out[], struct AES_KEY const *in);
+asmlinkage void bsaes_dec_key_convert(u8 out[], struct AES_KEY const *in);
+
+asmlinkage void bsaes_cbc_encrypt(u8 const in[], u8 out[], u32 bytes,
+                                 struct BS_KEY *key, u8 iv[]);
+
+asmlinkage void bsaes_ctr32_encrypt_blocks(u8 const in[], u8 out[], u32 blocks,
+                                          struct BS_KEY *key, u8 const iv[]);
+
+asmlinkage void bsaes_xts_encrypt(u8 const in[], u8 out[], u32 bytes,
+                                 struct BS_KEY *key, u8 tweak[]);
+
+asmlinkage void bsaes_xts_decrypt(u8 const in[], u8 out[], u32 bytes,
+                                 struct BS_KEY *key, u8 tweak[]);
+
+struct aesbs_cbc_ctx {
+       struct AES_KEY  enc;
+       struct BS_KEY   dec;
+};
+
+struct aesbs_ctr_ctx {
+       struct BS_KEY   enc;
+};
+
+struct aesbs_xts_ctx {
+       struct BS_KEY   enc;
+       struct BS_KEY   dec;
+       struct AES_KEY  twkey;
+};
+
+static int aesbs_cbc_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+                            unsigned int key_len)
+{
+       struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+       int bits = key_len * 8;
+
+       if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc)) {
+               tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+               return -EINVAL;
+       }
+       ctx->dec.rk = ctx->enc;
+       private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
+       ctx->dec.converted = 0;
+       return 0;
+}
+
+static int aesbs_ctr_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+                            unsigned int key_len)
+{
+       struct aesbs_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+       int bits = key_len * 8;
+
+       if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
+               tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+               return -EINVAL;
+       }
+       ctx->enc.converted = 0;
+       return 0;
+}
+
+static int aesbs_xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+                            unsigned int key_len)
+{
+       struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+       int bits = key_len * 4;
+
+       if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
+               tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+               return -EINVAL;
+       }
+       ctx->dec.rk = ctx->enc.rk;
+       private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
+       private_AES_set_encrypt_key(in_key + key_len / 2, bits, &ctx->twkey);
+       ctx->enc.converted = ctx->dec.converted = 0;
+       return 0;
+}
+
+static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
+                            struct scatterlist *dst,
+                            struct scatterlist *src, unsigned int nbytes)
+{
+       struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       while (walk.nbytes) {
+               u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
+               u8 *src = walk.src.virt.addr;
+
+               if (walk.dst.virt.addr == walk.src.virt.addr) {
+                       u8 *iv = walk.iv;
+
+                       do {
+                               crypto_xor(src, iv, AES_BLOCK_SIZE);
+                               AES_encrypt(src, src, &ctx->enc);
+                               iv = src;
+                               src += AES_BLOCK_SIZE;
+                       } while (--blocks);
+                       memcpy(walk.iv, iv, AES_BLOCK_SIZE);
+               } else {
+                       u8 *dst = walk.dst.virt.addr;
+
+                       do {
+                               crypto_xor(walk.iv, src, AES_BLOCK_SIZE);
+                               AES_encrypt(walk.iv, dst, &ctx->enc);
+                               memcpy(walk.iv, dst, AES_BLOCK_SIZE);
+                               src += AES_BLOCK_SIZE;
+                               dst += AES_BLOCK_SIZE;
+                       } while (--blocks);
+               }
+               err = blkcipher_walk_done(desc, &walk, 0);
+       }
+       return err;
+}
+
+static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
+                            struct scatterlist *dst,
+                            struct scatterlist *src, unsigned int nbytes)
+{
+       struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+       while ((walk.nbytes / AES_BLOCK_SIZE) >= 8) {
+               kernel_neon_begin();
+               bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+                                 walk.nbytes, &ctx->dec, walk.iv);
+               kernel_neon_end();
+               err = blkcipher_walk_done(desc, &walk, 0);
+       }
+       while (walk.nbytes) {
+               u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
+               u8 *dst = walk.dst.virt.addr;
+               u8 *src = walk.src.virt.addr;
+               u8 bk[2][AES_BLOCK_SIZE];
+               u8 *iv = walk.iv;
+
+               do {
+                       if (walk.dst.virt.addr == walk.src.virt.addr)
+                               memcpy(bk[blocks & 1], src, AES_BLOCK_SIZE);
+
+                       AES_decrypt(src, dst, &ctx->dec.rk);
+                       crypto_xor(dst, iv, AES_BLOCK_SIZE);
+
+                       if (walk.dst.virt.addr == walk.src.virt.addr)
+                               iv = bk[blocks & 1];
+                       else
+                               iv = src;
+
+                       dst += AES_BLOCK_SIZE;
+                       src += AES_BLOCK_SIZE;
+               } while (--blocks);
+               err = blkcipher_walk_done(desc, &walk, 0);
+       }
+       return err;
+}
+
+static void inc_be128_ctr(__be32 ctr[], u32 addend)
+{
+       int i;
+
+       for (i = 3; i >= 0; i--, addend = 1) {
+               u32 n = be32_to_cpu(ctr[i]) + addend;
+
+               ctr[i] = cpu_to_be32(n);
+               if (n >= addend)
+                       break;
+       }
+}
+
+static int aesbs_ctr_encrypt(struct blkcipher_desc *desc,
+                            struct scatterlist *dst, struct scatterlist *src,
+                            unsigned int nbytes)
+{
+       struct aesbs_ctr_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       u32 blocks;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+       while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) {
+               u32 tail = walk.nbytes % AES_BLOCK_SIZE;
+               __be32 *ctr = (__be32 *)walk.iv;
+               u32 headroom = UINT_MAX - be32_to_cpu(ctr[3]);
+
+               /* avoid 32 bit counter overflow in the NEON code */
+               if (unlikely(headroom < blocks)) {
+                       blocks = headroom + 1;
+                       tail = walk.nbytes - blocks * AES_BLOCK_SIZE;
+               }
+               kernel_neon_begin();
+               bsaes_ctr32_encrypt_blocks(walk.src.virt.addr,
+                                          walk.dst.virt.addr, blocks,
+                                          &ctx->enc, walk.iv);
+               kernel_neon_end();
+               inc_be128_ctr(ctr, blocks);
+
+               nbytes -= blocks * AES_BLOCK_SIZE;
+               if (nbytes && nbytes == tail && nbytes <= AES_BLOCK_SIZE)
+                       break;
+
+               err = blkcipher_walk_done(desc, &walk, tail);
+       }
+       if (walk.nbytes) {
+               u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
+               u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+               u8 ks[AES_BLOCK_SIZE];
+
+               AES_encrypt(walk.iv, ks, &ctx->enc.rk);
+               if (tdst != tsrc)
+                       memcpy(tdst, tsrc, nbytes);
+               crypto_xor(tdst, ks, nbytes);
+               err = blkcipher_walk_done(desc, &walk, 0);
+       }
+       return err;
+}
+
+static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
+                            struct scatterlist *dst,
+                            struct scatterlist *src, unsigned int nbytes)
+{
+       struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+       /* generate the initial tweak */
+       AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
+
+       while (walk.nbytes) {
+               kernel_neon_begin();
+               bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+                                 walk.nbytes, &ctx->enc, walk.iv);
+               kernel_neon_end();
+               err = blkcipher_walk_done(desc, &walk, 0);
+       }
+       return err;
+}
+
+static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
+                            struct scatterlist *dst,
+                            struct scatterlist *src, unsigned int nbytes)
+{
+       struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+       /* generate the initial tweak */
+       AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
+
+       while (walk.nbytes) {
+               kernel_neon_begin();
+               bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
+                                 walk.nbytes, &ctx->dec, walk.iv);
+               kernel_neon_end();
+               err = blkcipher_walk_done(desc, &walk, 0);
+       }
+       return err;
+}
+
+static struct crypto_alg aesbs_algs[] = { {
+       .cra_name               = "__cbc-aes-neonbs",
+       .cra_driver_name        = "__driver-cbc-aes-neonbs",
+       .cra_priority           = 0,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct aesbs_cbc_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_blkcipher = {
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .setkey         = aesbs_cbc_set_key,
+               .encrypt        = aesbs_cbc_encrypt,
+               .decrypt        = aesbs_cbc_decrypt,
+       },
+}, {
+       .cra_name               = "__ctr-aes-neonbs",
+       .cra_driver_name        = "__driver-ctr-aes-neonbs",
+       .cra_priority           = 0,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = 1,
+       .cra_ctxsize            = sizeof(struct aesbs_ctr_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_blkcipher = {
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .setkey         = aesbs_ctr_set_key,
+               .encrypt        = aesbs_ctr_encrypt,
+               .decrypt        = aesbs_ctr_encrypt,
+       },
+}, {
+       .cra_name               = "__xts-aes-neonbs",
+       .cra_driver_name        = "__driver-xts-aes-neonbs",
+       .cra_priority           = 0,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct aesbs_xts_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_blkcipher = {
+               .min_keysize    = 2 * AES_MIN_KEY_SIZE,
+               .max_keysize    = 2 * AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .setkey         = aesbs_xts_set_key,
+               .encrypt        = aesbs_xts_encrypt,
+               .decrypt        = aesbs_xts_decrypt,
+       },
+}, {
+       .cra_name               = "cbc(aes)",
+       .cra_driver_name        = "cbc-aes-neonbs",
+       .cra_priority           = 300,
+       .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct async_helper_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_ablkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_init               = ablk_init,
+       .cra_exit               = ablk_exit,
+       .cra_ablkcipher = {
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .setkey         = ablk_set_key,
+               .encrypt        = __ablk_encrypt,
+               .decrypt        = ablk_decrypt,
+       }
+}, {
+       .cra_name               = "ctr(aes)",
+       .cra_driver_name        = "ctr-aes-neonbs",
+       .cra_priority           = 300,
+       .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+       .cra_blocksize          = 1,
+       .cra_ctxsize            = sizeof(struct async_helper_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_ablkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_init               = ablk_init,
+       .cra_exit               = ablk_exit,
+       .cra_ablkcipher = {
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .setkey         = ablk_set_key,
+               .encrypt        = ablk_encrypt,
+               .decrypt        = ablk_decrypt,
+       }
+}, {
+       .cra_name               = "xts(aes)",
+       .cra_driver_name        = "xts-aes-neonbs",
+       .cra_priority           = 300,
+       .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct async_helper_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_ablkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_init               = ablk_init,
+       .cra_exit               = ablk_exit,
+       .cra_ablkcipher = {
+               .min_keysize    = 2 * AES_MIN_KEY_SIZE,
+               .max_keysize    = 2 * AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .setkey         = ablk_set_key,
+               .encrypt        = ablk_encrypt,
+               .decrypt        = ablk_decrypt,
+       }
+} };
+
+static int __init aesbs_mod_init(void)
+{
+       if (!cpu_has_neon())
+               return -ENODEV;
+
+       return crypto_register_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+}
+
+static void __exit aesbs_mod_exit(void)
+{
+       crypto_unregister_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+}
+
+module_init(aesbs_mod_init);
+module_exit(aesbs_mod_exit);
+
+MODULE_DESCRIPTION("Bit sliced AES in CBC/CTR/XTS modes using NEON");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl
new file mode 100644 (file)
index 0000000..f3d96d9
--- /dev/null
@@ -0,0 +1,2467 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+#
+# Specific modes and adaptation for Linux kernel by Ard Biesheuvel
+# <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is
+# granted.
+# ====================================================================
+
+# Bit-sliced AES for ARM NEON
+#
+# February 2012.
+#
+# This implementation is direct adaptation of bsaes-x86_64 module for
+# ARM NEON. Except that this module is endian-neutral [in sense that
+# it can be compiled for either endianness] by courtesy of vld1.8's
+# neutrality. Initial version doesn't implement interface to OpenSSL,
+# only low-level primitives and unsupported entry points, just enough
+# to collect performance results, which for Cortex-A8 core are:
+#
+# encrypt      19.5 cycles per byte processed with 128-bit key
+# decrypt      22.1 cycles per byte processed with 128-bit key
+# key conv.    440  cycles per 128-bit key/0.18 of 8x block
+#
+# Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
+# which is [much] worse than anticipated (for further details see
+# http://www.openssl.org/~appro/Snapdragon-S4.html).
+#
+# Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
+# manages in 20.0 cycles].
+#
+# When comparing to x86_64 results keep in mind that NEON unit is
+# [mostly] single-issue and thus can't [fully] benefit from
+# instruction-level parallelism. And when comparing to aes-armv4
+# results keep in mind key schedule conversion overhead (see
+# bsaes-x86_64.pl for further details)...
+#
+#                                              <appro@openssl.org>
+
+# April-August 2013
+#
+# Add CBC, CTR and XTS subroutines, adapt for kernel use.
+#
+#                                      <ard.biesheuvel@linaro.org>
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+my ($inp,$out,$len,$key)=("r0","r1","r2","r3");
+my @XMM=map("q$_",(0..15));
+
+{
+my ($key,$rounds,$const)=("r4","r5","r6");
+
+sub Dlo()   { shift=~m|q([1]?[0-9])|?"d".($1*2):"";     }
+sub Dhi()   { shift=~m|q([1]?[0-9])|?"d".($1*2+1):"";   }
+
+sub Sbox {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b0, b1, b4, b6, b3, b7, b2, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+       &InBasisChange  (@b);
+       &Inv_GF256      (@b[6,5,0,3,7,1,4,2],@t,@s);
+       &OutBasisChange (@b[7,1,4,2,6,5,0,3]);
+}
+
+sub InBasisChange {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb 
+my @b=@_[0..7];
+$code.=<<___;
+       veor    @b[2], @b[2], @b[1]
+       veor    @b[5], @b[5], @b[6]
+       veor    @b[3], @b[3], @b[0]
+       veor    @b[6], @b[6], @b[2]
+       veor    @b[5], @b[5], @b[0]
+
+       veor    @b[6], @b[6], @b[3]
+       veor    @b[3], @b[3], @b[7]
+       veor    @b[7], @b[7], @b[5]
+       veor    @b[3], @b[3], @b[4]
+       veor    @b[4], @b[4], @b[5]
+
+       veor    @b[2], @b[2], @b[7]
+       veor    @b[3], @b[3], @b[1]
+       veor    @b[1], @b[1], @b[5]
+___
+}
+
+sub OutBasisChange {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b1, b2, b4, b7, b0, b3, b5] < msb
+my @b=@_[0..7];
+$code.=<<___;
+       veor    @b[0], @b[0], @b[6]
+       veor    @b[1], @b[1], @b[4]
+       veor    @b[4], @b[4], @b[6]
+       veor    @b[2], @b[2], @b[0]
+       veor    @b[6], @b[6], @b[1]
+
+       veor    @b[1], @b[1], @b[5]
+       veor    @b[5], @b[5], @b[3]
+       veor    @b[3], @b[3], @b[7]
+       veor    @b[7], @b[7], @b[5]
+       veor    @b[2], @b[2], @b[5]
+
+       veor    @b[4], @b[4], @b[7]
+___
+}
+
+sub InvSbox {
+# input in lsb         > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb        > [b0, b1, b6, b4, b2, b7, b3, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+       &InvInBasisChange       (@b);
+       &Inv_GF256              (@b[5,1,2,6,3,7,0,4],@t,@s);
+       &InvOutBasisChange      (@b[3,7,0,4,5,1,2,6]);
+}
+
+sub InvInBasisChange {         # OutBasisChange in reverse (with twist)
+my @b=@_[5,1,2,6,3,7,0,4];
+$code.=<<___
+        veor   @b[1], @b[1], @b[7]
+       veor    @b[4], @b[4], @b[7]
+
+       veor    @b[7], @b[7], @b[5]
+        veor   @b[1], @b[1], @b[3]
+       veor    @b[2], @b[2], @b[5]
+       veor    @b[3], @b[3], @b[7]
+
+       veor    @b[6], @b[6], @b[1]
+       veor    @b[2], @b[2], @b[0]
+        veor   @b[5], @b[5], @b[3]
+       veor    @b[4], @b[4], @b[6]
+       veor    @b[0], @b[0], @b[6]
+       veor    @b[1], @b[1], @b[4]
+___
+}
+
+sub InvOutBasisChange {                # InBasisChange in reverse
+my @b=@_[2,5,7,3,6,1,0,4];
+$code.=<<___;
+       veor    @b[1], @b[1], @b[5]
+       veor    @b[2], @b[2], @b[7]
+
+       veor    @b[3], @b[3], @b[1]
+       veor    @b[4], @b[4], @b[5]
+       veor    @b[7], @b[7], @b[5]
+       veor    @b[3], @b[3], @b[4]
+        veor   @b[5], @b[5], @b[0]
+       veor    @b[3], @b[3], @b[7]
+        veor   @b[6], @b[6], @b[2]
+        veor   @b[2], @b[2], @b[1]
+       veor    @b[6], @b[6], @b[3]
+
+       veor    @b[3], @b[3], @b[0]
+       veor    @b[5], @b[5], @b[6]
+___
+}
+
+sub Mul_GF4 {
+#;*************************************************************
+#;* Mul_GF4: Input x0-x1,y0-y1 Output x0-x1 Temp t0 (8) *
+#;*************************************************************
+my ($x0,$x1,$y0,$y1,$t0,$t1)=@_;
+$code.=<<___;
+       veor    $t0, $y0, $y1
+       vand    $t0, $t0, $x0
+       veor    $x0, $x0, $x1
+       vand    $t1, $x1, $y0
+       vand    $x0, $x0, $y1
+       veor    $x1, $t1, $t0
+       veor    $x0, $x0, $t1
+___
+}
+
+sub Mul_GF4_N {                                # not used, see next subroutine
+# multiply and scale by N
+my ($x0,$x1,$y0,$y1,$t0)=@_;
+$code.=<<___;
+       veor    $t0, $y0, $y1
+       vand    $t0, $t0, $x0
+       veor    $x0, $x0, $x1
+       vand    $x1, $x1, $y0
+       vand    $x0, $x0, $y1
+       veor    $x1, $x1, $x0
+       veor    $x0, $x0, $t0
+___
+}
+
+sub Mul_GF4_N_GF4 {
+# interleaved Mul_GF4_N and Mul_GF4
+my ($x0,$x1,$y0,$y1,$t0,
+    $x2,$x3,$y2,$y3,$t1)=@_;
+$code.=<<___;
+       veor    $t0, $y0, $y1
+        veor   $t1, $y2, $y3
+       vand    $t0, $t0, $x0
+        vand   $t1, $t1, $x2
+       veor    $x0, $x0, $x1
+        veor   $x2, $x2, $x3
+       vand    $x1, $x1, $y0
+        vand   $x3, $x3, $y2
+       vand    $x0, $x0, $y1
+        vand   $x2, $x2, $y3
+       veor    $x1, $x1, $x0
+        veor   $x2, $x2, $x3
+       veor    $x0, $x0, $t0
+        veor   $x3, $x3, $t1
+___
+}
+sub Mul_GF16_2 {
+my @x=@_[0..7];
+my @y=@_[8..11];
+my @t=@_[12..15];
+$code.=<<___;
+       veor    @t[0], @x[0], @x[2]
+       veor    @t[1], @x[1], @x[3]
+___
+       &Mul_GF4        (@x[0], @x[1], @y[0], @y[1], @t[2..3]);
+$code.=<<___;
+       veor    @y[0], @y[0], @y[2]
+       veor    @y[1], @y[1], @y[3]
+___
+       Mul_GF4_N_GF4   (@t[0], @t[1], @y[0], @y[1], @t[3],
+                        @x[2], @x[3], @y[2], @y[3], @t[2]);
+$code.=<<___;
+       veor    @x[0], @x[0], @t[0]
+       veor    @x[2], @x[2], @t[0]
+       veor    @x[1], @x[1], @t[1]
+       veor    @x[3], @x[3], @t[1]
+
+       veor    @t[0], @x[4], @x[6]
+       veor    @t[1], @x[5], @x[7]
+___
+       &Mul_GF4_N_GF4  (@t[0], @t[1], @y[0], @y[1], @t[3],
+                        @x[6], @x[7], @y[2], @y[3], @t[2]);
+$code.=<<___;
+       veor    @y[0], @y[0], @y[2]
+       veor    @y[1], @y[1], @y[3]
+___
+       &Mul_GF4        (@x[4], @x[5], @y[0], @y[1], @t[2..3]);
+$code.=<<___;
+       veor    @x[4], @x[4], @t[0]
+       veor    @x[6], @x[6], @t[0]
+       veor    @x[5], @x[5], @t[1]
+       veor    @x[7], @x[7], @t[1]
+___
+}
+sub Inv_GF256 {
+#;********************************************************************
+#;* Inv_GF256: Input x0-x7 Output x0-x7 Temp t0-t3,s0-s3 (144)       *
+#;********************************************************************
+my @x=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+# direct optimizations from hardware
+$code.=<<___;
+       veor    @t[3], @x[4], @x[6]
+       veor    @t[2], @x[5], @x[7]
+       veor    @t[1], @x[1], @x[3]
+       veor    @s[1], @x[7], @x[6]
+        vmov   @t[0], @t[2]
+       veor    @s[0], @x[0], @x[2]
+
+       vorr    @t[2], @t[2], @t[1]
+       veor    @s[3], @t[3], @t[0]
+       vand    @s[2], @t[3], @s[0]
+       vorr    @t[3], @t[3], @s[0]
+       veor    @s[0], @s[0], @t[1]
+       vand    @t[0], @t[0], @t[1]
+       veor    @t[1], @x[3], @x[2]
+       vand    @s[3], @s[3], @s[0]
+       vand    @s[1], @s[1], @t[1]
+       veor    @t[1], @x[4], @x[5]
+       veor    @s[0], @x[1], @x[0]
+       veor    @t[3], @t[3], @s[1]
+       veor    @t[2], @t[2], @s[1]
+       vand    @s[1], @t[1], @s[0]
+       vorr    @t[1], @t[1], @s[0]
+       veor    @t[3], @t[3], @s[3]
+       veor    @t[0], @t[0], @s[1]
+       veor    @t[2], @t[2], @s[2]
+       veor    @t[1], @t[1], @s[3]
+       veor    @t[0], @t[0], @s[2]
+       vand    @s[0], @x[7], @x[3]
+       veor    @t[1], @t[1], @s[2]
+       vand    @s[1], @x[6], @x[2]
+       vand    @s[2], @x[5], @x[1]
+       vorr    @s[3], @x[4], @x[0]
+       veor    @t[3], @t[3], @s[0]
+       veor    @t[1], @t[1], @s[2]
+       veor    @t[0], @t[0], @s[3]
+       veor    @t[2], @t[2], @s[1]
+
+       @ Inv_GF16 \t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
+
+       @ new smaller inversion
+
+       vand    @s[2], @t[3], @t[1]
+       vmov    @s[0], @t[0]
+
+       veor    @s[1], @t[2], @s[2]
+       veor    @s[3], @t[0], @s[2]
+       veor    @s[2], @t[0], @s[2]     @ @s[2]=@s[3]
+
+       vbsl    @s[1], @t[1], @t[0]
+       vbsl    @s[3], @t[3], @t[2]
+       veor    @t[3], @t[3], @t[2]
+
+       vbsl    @s[0], @s[1], @s[2]
+       vbsl    @t[0], @s[2], @s[1]
+
+       vand    @s[2], @s[0], @s[3]
+       veor    @t[1], @t[1], @t[0]
+
+       veor    @s[2], @s[2], @t[3]
+___
+# output in s3, s2, s1, t1
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \t2, \t3, \t0, \t1, \s0, \s1, \s2, \s3
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3
+       &Mul_GF16_2(@x,@s[3,2,1],@t[1],@s[0],@t[0,2,3]);
+
+### output msb > [x3,x2,x1,x0,x7,x6,x5,x4] < lsb
+}
+
+# AES linear components
+
+sub ShiftRows {
+my @x=@_[0..7];
+my @t=@_[8..11];
+my $mask=pop;
+$code.=<<___;
+       vldmia  $key!, {@t[0]-@t[3]}
+       veor    @t[0], @t[0], @x[0]
+       veor    @t[1], @t[1], @x[1]
+       vtbl.8  `&Dlo(@x[0])`, {@t[0]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[0])`, {@t[0]}, `&Dhi($mask)`
+       vldmia  $key!, {@t[0]}
+       veor    @t[2], @t[2], @x[2]
+       vtbl.8  `&Dlo(@x[1])`, {@t[1]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[1])`, {@t[1]}, `&Dhi($mask)`
+       vldmia  $key!, {@t[1]}
+       veor    @t[3], @t[3], @x[3]
+       vtbl.8  `&Dlo(@x[2])`, {@t[2]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[2])`, {@t[2]}, `&Dhi($mask)`
+       vldmia  $key!, {@t[2]}
+       vtbl.8  `&Dlo(@x[3])`, {@t[3]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[3])`, {@t[3]}, `&Dhi($mask)`
+       vldmia  $key!, {@t[3]}
+       veor    @t[0], @t[0], @x[4]
+       veor    @t[1], @t[1], @x[5]
+       vtbl.8  `&Dlo(@x[4])`, {@t[0]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[4])`, {@t[0]}, `&Dhi($mask)`
+       veor    @t[2], @t[2], @x[6]
+       vtbl.8  `&Dlo(@x[5])`, {@t[1]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[5])`, {@t[1]}, `&Dhi($mask)`
+       veor    @t[3], @t[3], @x[7]
+       vtbl.8  `&Dlo(@x[6])`, {@t[2]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[6])`, {@t[2]}, `&Dhi($mask)`
+       vtbl.8  `&Dlo(@x[7])`, {@t[3]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[7])`, {@t[3]}, `&Dhi($mask)`
+___
+}
+
+sub MixColumns {
+# modified to emit output in order suitable for feeding back to aesenc[last]
+my @x=@_[0..7];
+my @t=@_[8..15];
+my $inv=@_[16];        # optional
+$code.=<<___;
+       vext.8  @t[0], @x[0], @x[0], #12        @ x0 <<< 32
+       vext.8  @t[1], @x[1], @x[1], #12
+        veor   @x[0], @x[0], @t[0]             @ x0 ^ (x0 <<< 32)
+       vext.8  @t[2], @x[2], @x[2], #12
+        veor   @x[1], @x[1], @t[1]
+       vext.8  @t[3], @x[3], @x[3], #12
+        veor   @x[2], @x[2], @t[2]
+       vext.8  @t[4], @x[4], @x[4], #12
+        veor   @x[3], @x[3], @t[3]
+       vext.8  @t[5], @x[5], @x[5], #12
+        veor   @x[4], @x[4], @t[4]
+       vext.8  @t[6], @x[6], @x[6], #12
+        veor   @x[5], @x[5], @t[5]
+       vext.8  @t[7], @x[7], @x[7], #12
+        veor   @x[6], @x[6], @t[6]
+
+       veor    @t[1], @t[1], @x[0]
+        veor   @x[7], @x[7], @t[7]
+        vext.8 @x[0], @x[0], @x[0], #8         @ (x0 ^ (x0 <<< 32)) <<< 64)
+       veor    @t[2], @t[2], @x[1]
+       veor    @t[0], @t[0], @x[7]
+       veor    @t[1], @t[1], @x[7]
+        vext.8 @x[1], @x[1], @x[1], #8
+       veor    @t[5], @t[5], @x[4]
+        veor   @x[0], @x[0], @t[0]
+       veor    @t[6], @t[6], @x[5]
+        veor   @x[1], @x[1], @t[1]
+        vext.8 @t[0], @x[4], @x[4], #8
+       veor    @t[4], @t[4], @x[3]
+        vext.8 @t[1], @x[5], @x[5], #8
+       veor    @t[7], @t[7], @x[6]
+        vext.8 @x[4], @x[3], @x[3], #8
+       veor    @t[3], @t[3], @x[2]
+        vext.8 @x[5], @x[7], @x[7], #8
+       veor    @t[4], @t[4], @x[7]
+        vext.8 @x[3], @x[6], @x[6], #8
+       veor    @t[3], @t[3], @x[7]
+        vext.8 @x[6], @x[2], @x[2], #8
+       veor    @x[7], @t[1], @t[5]
+___
+$code.=<<___ if (!$inv);
+       veor    @x[2], @t[0], @t[4]
+       veor    @x[4], @x[4], @t[3]
+       veor    @x[5], @x[5], @t[7]
+       veor    @x[3], @x[3], @t[6]
+        @ vmov @x[2], @t[0]
+       veor    @x[6], @x[6], @t[2]
+        @ vmov @x[7], @t[1]
+___
+$code.=<<___ if ($inv);
+       veor    @t[3], @t[3], @x[4]
+       veor    @x[5], @x[5], @t[7]
+       veor    @x[2], @x[3], @t[6]
+       veor    @x[3], @t[0], @t[4]
+       veor    @x[4], @x[6], @t[2]
+       vmov    @x[6], @t[3]
+        @ vmov @x[7], @t[1]
+___
+}
+
+sub InvMixColumns_orig {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+$code.=<<___;
+       @ multiplication by 0x0e
+       vext.8  @t[7], @x[7], @x[7], #12
+       vmov    @t[2], @x[2]
+       veor    @x[2], @x[2], @x[5]             @ 2 5
+       veor    @x[7], @x[7], @x[5]             @ 7 5
+       vext.8  @t[0], @x[0], @x[0], #12
+       vmov    @t[5], @x[5]
+       veor    @x[5], @x[5], @x[0]             @ 5 0           [1]
+       veor    @x[0], @x[0], @x[1]             @ 0 1
+       vext.8  @t[1], @x[1], @x[1], #12
+       veor    @x[1], @x[1], @x[2]             @ 1 25
+       veor    @x[0], @x[0], @x[6]             @ 01 6          [2]
+       vext.8  @t[3], @x[3], @x[3], #12
+       veor    @x[1], @x[1], @x[3]             @ 125 3         [4]
+       veor    @x[2], @x[2], @x[0]             @ 25 016        [3]
+       veor    @x[3], @x[3], @x[7]             @ 3 75
+       veor    @x[7], @x[7], @x[6]             @ 75 6          [0]
+       vext.8  @t[6], @x[6], @x[6], #12
+       vmov    @t[4], @x[4]
+       veor    @x[6], @x[6], @x[4]             @ 6 4
+       veor    @x[4], @x[4], @x[3]             @ 4 375         [6]
+       veor    @x[3], @x[3], @x[7]             @ 375 756=36
+       veor    @x[6], @x[6], @t[5]             @ 64 5          [7]
+       veor    @x[3], @x[3], @t[2]             @ 36 2
+       vext.8  @t[5], @t[5], @t[5], #12
+       veor    @x[3], @x[3], @t[4]             @ 362 4         [5]
+___
+                                       my @y = @x[7,5,0,2,1,3,4,6];
+$code.=<<___;
+       @ multiplication by 0x0b
+       veor    @y[1], @y[1], @y[0]
+       veor    @y[0], @y[0], @t[0]
+       vext.8  @t[2], @t[2], @t[2], #12
+       veor    @y[1], @y[1], @t[1]
+       veor    @y[0], @y[0], @t[5]
+       vext.8  @t[4], @t[4], @t[4], #12
+       veor    @y[1], @y[1], @t[6]
+       veor    @y[0], @y[0], @t[7]
+       veor    @t[7], @t[7], @t[6]             @ clobber t[7]
+
+       veor    @y[3], @y[3], @t[0]
+        veor   @y[1], @y[1], @y[0]
+       vext.8  @t[0], @t[0], @t[0], #12
+       veor    @y[2], @y[2], @t[1]
+       veor    @y[4], @y[4], @t[1]
+       vext.8  @t[1], @t[1], @t[1], #12
+       veor    @y[2], @y[2], @t[2]
+       veor    @y[3], @y[3], @t[2]
+       veor    @y[5], @y[5], @t[2]
+       veor    @y[2], @y[2], @t[7]
+       vext.8  @t[2], @t[2], @t[2], #12
+       veor    @y[3], @y[3], @t[3]
+       veor    @y[6], @y[6], @t[3]
+       veor    @y[4], @y[4], @t[3]
+       veor    @y[7], @y[7], @t[4]
+       vext.8  @t[3], @t[3], @t[3], #12
+       veor    @y[5], @y[5], @t[4]
+       veor    @y[7], @y[7], @t[7]
+       veor    @t[7], @t[7], @t[5]             @ clobber t[7] even more
+       veor    @y[3], @y[3], @t[5]
+       veor    @y[4], @y[4], @t[4]
+
+       veor    @y[5], @y[5], @t[7]
+       vext.8  @t[4], @t[4], @t[4], #12
+       veor    @y[6], @y[6], @t[7]
+       veor    @y[4], @y[4], @t[7]
+
+       veor    @t[7], @t[7], @t[5]
+       vext.8  @t[5], @t[5], @t[5], #12
+
+       @ multiplication by 0x0d
+       veor    @y[4], @y[4], @y[7]
+        veor   @t[7], @t[7], @t[6]             @ restore t[7]
+       veor    @y[7], @y[7], @t[4]
+       vext.8  @t[6], @t[6], @t[6], #12
+       veor    @y[2], @y[2], @t[0]
+       veor    @y[7], @y[7], @t[5]
+       vext.8  @t[7], @t[7], @t[7], #12
+       veor    @y[2], @y[2], @t[2]
+
+       veor    @y[3], @y[3], @y[1]
+       veor    @y[1], @y[1], @t[1]
+       veor    @y[0], @y[0], @t[0]
+       veor    @y[3], @y[3], @t[0]
+       veor    @y[1], @y[1], @t[5]
+       veor    @y[0], @y[0], @t[5]
+       vext.8  @t[0], @t[0], @t[0], #12
+       veor    @y[1], @y[1], @t[7]
+       veor    @y[0], @y[0], @t[6]
+       veor    @y[3], @y[3], @y[1]
+       veor    @y[4], @y[4], @t[1]
+       vext.8  @t[1], @t[1], @t[1], #12
+
+       veor    @y[7], @y[7], @t[7]
+       veor    @y[4], @y[4], @t[2]
+       veor    @y[5], @y[5], @t[2]
+       veor    @y[2], @y[2], @t[6]
+       veor    @t[6], @t[6], @t[3]             @ clobber t[6]
+       vext.8  @t[2], @t[2], @t[2], #12
+       veor    @y[4], @y[4], @y[7]
+       veor    @y[3], @y[3], @t[6]
+
+       veor    @y[6], @y[6], @t[6]
+       veor    @y[5], @y[5], @t[5]
+       vext.8  @t[5], @t[5], @t[5], #12
+       veor    @y[6], @y[6], @t[4]
+       vext.8  @t[4], @t[4], @t[4], #12
+       veor    @y[5], @y[5], @t[6]
+       veor    @y[6], @y[6], @t[7]
+       vext.8  @t[7], @t[7], @t[7], #12
+       veor    @t[6], @t[6], @t[3]             @ restore t[6]
+       vext.8  @t[3], @t[3], @t[3], #12
+
+       @ multiplication by 0x09
+       veor    @y[4], @y[4], @y[1]
+       veor    @t[1], @t[1], @y[1]             @ t[1]=y[1]
+       veor    @t[0], @t[0], @t[5]             @ clobber t[0]
+       vext.8  @t[6], @t[6], @t[6], #12
+       veor    @t[1], @t[1], @t[5]
+       veor    @y[3], @y[3], @t[0]
+       veor    @t[0], @t[0], @y[0]             @ t[0]=y[0]
+       veor    @t[1], @t[1], @t[6]
+       veor    @t[6], @t[6], @t[7]             @ clobber t[6]
+       veor    @y[4], @y[4], @t[1]
+       veor    @y[7], @y[7], @t[4]
+       veor    @y[6], @y[6], @t[3]
+       veor    @y[5], @y[5], @t[2]
+       veor    @t[4], @t[4], @y[4]             @ t[4]=y[4]
+       veor    @t[3], @t[3], @y[3]             @ t[3]=y[3]
+       veor    @t[5], @t[5], @y[5]             @ t[5]=y[5]
+       veor    @t[2], @t[2], @y[2]             @ t[2]=y[2]
+       veor    @t[3], @t[3], @t[7]
+       veor    @XMM[5], @t[5], @t[6]
+       veor    @XMM[6], @t[6], @y[6]           @ t[6]=y[6]
+       veor    @XMM[2], @t[2], @t[6]
+       veor    @XMM[7], @t[7], @y[7]           @ t[7]=y[7]
+
+       vmov    @XMM[0], @t[0]
+       vmov    @XMM[1], @t[1]
+       @ vmov  @XMM[2], @t[2]
+       vmov    @XMM[3], @t[3]
+       vmov    @XMM[4], @t[4]
+       @ vmov  @XMM[5], @t[5]
+       @ vmov  @XMM[6], @t[6]
+       @ vmov  @XMM[7], @t[7]
+___
+}
+
+sub InvMixColumns {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+# Thanks to Jussi Kivilinna for providing pointer to
+#
+# | 0e 0b 0d 09 |   | 02 03 01 01 |   | 05 00 04 00 |
+# | 09 0e 0b 0d | = | 01 02 03 01 | x | 00 05 00 04 |
+# | 0d 09 0e 0b |   | 01 01 02 03 |   | 04 00 05 00 |
+# | 0b 0d 09 0e |   | 03 01 01 02 |   | 00 04 00 05 |
+
+$code.=<<___;
+       @ multiplication by 0x05-0x00-0x04-0x00
+       vext.8  @t[0], @x[0], @x[0], #8
+       vext.8  @t[6], @x[6], @x[6], #8
+       vext.8  @t[7], @x[7], @x[7], #8
+       veor    @t[0], @t[0], @x[0]
+       vext.8  @t[1], @x[1], @x[1], #8
+       veor    @t[6], @t[6], @x[6]
+       vext.8  @t[2], @x[2], @x[2], #8
+       veor    @t[7], @t[7], @x[7]
+       vext.8  @t[3], @x[3], @x[3], #8
+       veor    @t[1], @t[1], @x[1]
+       vext.8  @t[4], @x[4], @x[4], #8
+       veor    @t[2], @t[2], @x[2]
+       vext.8  @t[5], @x[5], @x[5], #8
+       veor    @t[3], @t[3], @x[3]
+       veor    @t[4], @t[4], @x[4]
+       veor    @t[5], @t[5], @x[5]
+
+        veor   @x[0], @x[0], @t[6]
+        veor   @x[1], @x[1], @t[6]
+        veor   @x[2], @x[2], @t[0]
+        veor   @x[4], @x[4], @t[2]
+        veor   @x[3], @x[3], @t[1]
+        veor   @x[1], @x[1], @t[7]
+        veor   @x[2], @x[2], @t[7]
+        veor   @x[4], @x[4], @t[6]
+        veor   @x[5], @x[5], @t[3]
+        veor   @x[3], @x[3], @t[6]
+        veor   @x[6], @x[6], @t[4]
+        veor   @x[4], @x[4], @t[7]
+        veor   @x[5], @x[5], @t[7]
+        veor   @x[7], @x[7], @t[5]
+___
+       &MixColumns     (@x,@t,1);      # flipped 2<->3 and 4<->6
+}
+
+sub swapmove {
+my ($a,$b,$n,$mask,$t)=@_;
+$code.=<<___;
+       vshr.u64        $t, $b, #$n
+       veor            $t, $t, $a
+       vand            $t, $t, $mask
+       veor            $a, $a, $t
+       vshl.u64        $t, $t, #$n
+       veor            $b, $b, $t
+___
+}
+sub swapmove2x {
+my ($a0,$b0,$a1,$b1,$n,$mask,$t0,$t1)=@_;
+$code.=<<___;
+       vshr.u64        $t0, $b0, #$n
+        vshr.u64       $t1, $b1, #$n
+       veor            $t0, $t0, $a0
+        veor           $t1, $t1, $a1
+       vand            $t0, $t0, $mask
+        vand           $t1, $t1, $mask
+       veor            $a0, $a0, $t0
+       vshl.u64        $t0, $t0, #$n
+        veor           $a1, $a1, $t1
+        vshl.u64       $t1, $t1, #$n
+       veor            $b0, $b0, $t0
+        veor           $b1, $b1, $t1
+___
+}
+
+sub bitslice {
+my @x=reverse(@_[0..7]);
+my ($t0,$t1,$t2,$t3)=@_[8..11];
+$code.=<<___;
+       vmov.i8 $t0,#0x55                       @ compose .LBS0
+       vmov.i8 $t1,#0x33                       @ compose .LBS1
+___
+       &swapmove2x(@x[0,1,2,3],1,$t0,$t2,$t3);
+       &swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+$code.=<<___;
+       vmov.i8 $t0,#0x0f                       @ compose .LBS2
+___
+       &swapmove2x(@x[0,2,1,3],2,$t1,$t2,$t3);
+       &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+
+       &swapmove2x(@x[0,4,1,5],4,$t0,$t2,$t3);
+       &swapmove2x(@x[2,6,3,7],4,$t0,$t2,$t3);
+}
+
+$code.=<<___;
+#ifndef __KERNEL__
+# include "arm_arch.h"
+
+# define VFP_ABI_PUSH  vstmdb  sp!,{d8-d15}
+# define VFP_ABI_POP   vldmia  sp!,{d8-d15}
+# define VFP_ABI_FRAME 0x40
+#else
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+# define VFP_ABI_FRAME 0
+# define BSAES_ASM_EXTENDED_KEY
+# define XTS_CHAIN_TWEAK
+# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+#endif
+
+#ifdef __thumb__
+# define adrl adr
+#endif
+
+#if __ARM_ARCH__>=7
+.text
+.syntax        unified         @ ARMv7-capable assembler is expected to handle this
+#ifdef __thumb2__
+.thumb
+#else
+.code   32
+#endif
+
+.fpu   neon
+
+.type  _bsaes_decrypt8,%function
+.align 4
+_bsaes_decrypt8:
+       adr     $const,_bsaes_decrypt8
+       vldmia  $key!, {@XMM[9]}                @ round 0 key
+       add     $const,$const,#.LM0ISR-_bsaes_decrypt8
+
+       vldmia  $const!, {@XMM[8]}              @ .LM0ISR
+       veor    @XMM[10], @XMM[0], @XMM[9]      @ xor with round0 key
+       veor    @XMM[11], @XMM[1], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+       veor    @XMM[12], @XMM[2], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+       veor    @XMM[13], @XMM[3], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
+       veor    @XMM[14], @XMM[4], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
+       veor    @XMM[15], @XMM[5], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
+       veor    @XMM[10], @XMM[6], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
+       veor    @XMM[11], @XMM[7], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+        vtbl.8 `&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+___
+       &bitslice       (@XMM[0..7, 8..11]);
+$code.=<<___;
+       sub     $rounds,$rounds,#1
+       b       .Ldec_sbox
+.align 4
+.Ldec_loop:
+___
+       &ShiftRows      (@XMM[0..7, 8..12]);
+$code.=".Ldec_sbox:\n";
+       &InvSbox        (@XMM[0..7, 8..15]);
+$code.=<<___;
+       subs    $rounds,$rounds,#1
+       bcc     .Ldec_done
+___
+       &InvMixColumns  (@XMM[0,1,6,4,2,7,3,5, 8..15]);
+$code.=<<___;
+       vldmia  $const, {@XMM[12]}              @ .LISR
+       ite     eq                              @ Thumb2 thing, sanity check in ARM
+       addeq   $const,$const,#0x10
+       bne     .Ldec_loop
+       vldmia  $const, {@XMM[12]}              @ .LISRM0
+       b       .Ldec_loop
+.align 4
+.Ldec_done:
+___
+       &bitslice       (@XMM[0,1,6,4,2,7,3,5, 8..11]);
+$code.=<<___;
+       vldmia  $key, {@XMM[8]}                 @ last round key
+       veor    @XMM[6], @XMM[6], @XMM[8]
+       veor    @XMM[4], @XMM[4], @XMM[8]
+       veor    @XMM[2], @XMM[2], @XMM[8]
+       veor    @XMM[7], @XMM[7], @XMM[8]
+       veor    @XMM[3], @XMM[3], @XMM[8]
+       veor    @XMM[5], @XMM[5], @XMM[8]
+       veor    @XMM[0], @XMM[0], @XMM[8]
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       bx      lr
+.size  _bsaes_decrypt8,.-_bsaes_decrypt8
+
+.type  _bsaes_const,%object
+.align 6
+_bsaes_const:
+.LM0ISR:       @ InvShiftRows constants
+       .quad   0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISR:
+       .quad   0x0504070602010003, 0x0f0e0d0c080b0a09
+.LISRM0:
+       .quad   0x01040b0e0205080f, 0x0306090c00070a0d
+.LM0SR:                @ ShiftRows constants
+       .quad   0x0a0e02060f03070b, 0x0004080c05090d01
+.LSR:
+       .quad   0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+       .quad   0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0:
+       .quad   0x02060a0e03070b0f, 0x0004080c0105090d
+.LREVM0SR:
+       .quad   0x090d01050c000408, 0x03070b0f060a0e02
+.asciz "Bit-sliced AES for NEON, CRYPTOGAMS by <appro\@openssl.org>"
+.align 6
+.size  _bsaes_const,.-_bsaes_const
+
+.type  _bsaes_encrypt8,%function
+.align 4
+_bsaes_encrypt8:
+       adr     $const,_bsaes_encrypt8
+       vldmia  $key!, {@XMM[9]}                @ round 0 key
+       sub     $const,$const,#_bsaes_encrypt8-.LM0SR
+
+       vldmia  $const!, {@XMM[8]}              @ .LM0SR
+_bsaes_encrypt8_alt:
+       veor    @XMM[10], @XMM[0], @XMM[9]      @ xor with round0 key
+       veor    @XMM[11], @XMM[1], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+       veor    @XMM[12], @XMM[2], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+       veor    @XMM[13], @XMM[3], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
+       veor    @XMM[14], @XMM[4], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
+       veor    @XMM[15], @XMM[5], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
+       veor    @XMM[10], @XMM[6], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
+       veor    @XMM[11], @XMM[7], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+        vtbl.8 `&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+_bsaes_encrypt8_bitslice:
+___
+       &bitslice       (@XMM[0..7, 8..11]);
+$code.=<<___;
+       sub     $rounds,$rounds,#1
+       b       .Lenc_sbox
+.align 4
+.Lenc_loop:
+___
+       &ShiftRows      (@XMM[0..7, 8..12]);
+$code.=".Lenc_sbox:\n";
+       &Sbox           (@XMM[0..7, 8..15]);
+$code.=<<___;
+       subs    $rounds,$rounds,#1
+       bcc     .Lenc_done
+___
+       &MixColumns     (@XMM[0,1,4,6,3,7,2,5, 8..15]);
+$code.=<<___;
+       vldmia  $const, {@XMM[12]}              @ .LSR
+       ite     eq                              @ Thumb2 thing, samity check in ARM
+       addeq   $const,$const,#0x10
+       bne     .Lenc_loop
+       vldmia  $const, {@XMM[12]}              @ .LSRM0
+       b       .Lenc_loop
+.align 4
+.Lenc_done:
+___
+       # output in lsb > [t0, t1, t4, t6, t3, t7, t2, t5] < msb
+       &bitslice       (@XMM[0,1,4,6,3,7,2,5, 8..11]);
+$code.=<<___;
+       vldmia  $key, {@XMM[8]}                 @ last round key
+       veor    @XMM[4], @XMM[4], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[8]
+       veor    @XMM[3], @XMM[3], @XMM[8]
+       veor    @XMM[7], @XMM[7], @XMM[8]
+       veor    @XMM[2], @XMM[2], @XMM[8]
+       veor    @XMM[5], @XMM[5], @XMM[8]
+       veor    @XMM[0], @XMM[0], @XMM[8]
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       bx      lr
+.size  _bsaes_encrypt8,.-_bsaes_encrypt8
+___
+}
+{
+my ($out,$inp,$rounds,$const)=("r12","r4","r5","r6");
+
+sub bitslice_key {
+my @x=reverse(@_[0..7]);
+my ($bs0,$bs1,$bs2,$t2,$t3)=@_[8..12];
+
+       &swapmove       (@x[0,1],1,$bs0,$t2,$t3);
+$code.=<<___;
+       @ &swapmove(@x[2,3],1,$t0,$t2,$t3);
+       vmov    @x[2], @x[0]
+       vmov    @x[3], @x[1]
+___
+       #&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+
+       &swapmove2x     (@x[0,2,1,3],2,$bs1,$t2,$t3);
+$code.=<<___;
+       @ &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+       vmov    @x[4], @x[0]
+       vmov    @x[6], @x[2]
+       vmov    @x[5], @x[1]
+       vmov    @x[7], @x[3]
+___
+       &swapmove2x     (@x[0,4,1,5],4,$bs2,$t2,$t3);
+       &swapmove2x     (@x[2,6,3,7],4,$bs2,$t2,$t3);
+}
+
+$code.=<<___;
+.type  _bsaes_key_convert,%function
+.align 4
+_bsaes_key_convert:
+       adr     $const,_bsaes_key_convert
+       vld1.8  {@XMM[7]},  [$inp]!             @ load round 0 key
+       sub     $const,$const,#_bsaes_key_convert-.LM0
+       vld1.8  {@XMM[15]}, [$inp]!             @ load round 1 key
+
+       vmov.i8 @XMM[8],  #0x01                 @ bit masks
+       vmov.i8 @XMM[9],  #0x02
+       vmov.i8 @XMM[10], #0x04
+       vmov.i8 @XMM[11], #0x08
+       vmov.i8 @XMM[12], #0x10
+       vmov.i8 @XMM[13], #0x20
+       vldmia  $const, {@XMM[14]}              @ .LM0
+
+#ifdef __ARMEL__
+       vrev32.8        @XMM[7],  @XMM[7]
+       vrev32.8        @XMM[15], @XMM[15]
+#endif
+       sub     $rounds,$rounds,#1
+       vstmia  $out!, {@XMM[7]}                @ save round 0 key
+       b       .Lkey_loop
+
+.align 4
+.Lkey_loop:
+       vtbl.8  `&Dlo(@XMM[7])`,{@XMM[15]},`&Dlo(@XMM[14])`
+       vtbl.8  `&Dhi(@XMM[7])`,{@XMM[15]},`&Dhi(@XMM[14])`
+       vmov.i8 @XMM[6],  #0x40
+       vmov.i8 @XMM[15], #0x80
+
+       vtst.8  @XMM[0], @XMM[7], @XMM[8]
+       vtst.8  @XMM[1], @XMM[7], @XMM[9]
+       vtst.8  @XMM[2], @XMM[7], @XMM[10]
+       vtst.8  @XMM[3], @XMM[7], @XMM[11]
+       vtst.8  @XMM[4], @XMM[7], @XMM[12]
+       vtst.8  @XMM[5], @XMM[7], @XMM[13]
+       vtst.8  @XMM[6], @XMM[7], @XMM[6]
+       vtst.8  @XMM[7], @XMM[7], @XMM[15]
+       vld1.8  {@XMM[15]}, [$inp]!             @ load next round key
+       vmvn    @XMM[0], @XMM[0]                @ "pnot"
+       vmvn    @XMM[1], @XMM[1]
+       vmvn    @XMM[5], @XMM[5]
+       vmvn    @XMM[6], @XMM[6]
+#ifdef __ARMEL__
+       vrev32.8        @XMM[15], @XMM[15]
+#endif
+       subs    $rounds,$rounds,#1
+       vstmia  $out!,{@XMM[0]-@XMM[7]}         @ write bit-sliced round key
+       bne     .Lkey_loop
+
+       vmov.i8 @XMM[7],#0x63                   @ compose .L63
+       @ don't save last round key
+       bx      lr
+.size  _bsaes_key_convert,.-_bsaes_key_convert
+___
+}
+
+if (0) {               # following four functions are unsupported interface
+                       # used for benchmarking...
+$code.=<<___;
+.globl bsaes_enc_key_convert
+.type  bsaes_enc_key_convert,%function
+.align 4
+bsaes_enc_key_convert:
+       stmdb   sp!,{r4-r6,lr}
+       vstmdb  sp!,{d8-d15}            @ ABI specification says so
+
+       ldr     r5,[$inp,#240]                  @ pass rounds
+       mov     r4,$inp                         @ pass key
+       mov     r12,$out                        @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    @XMM[7],@XMM[7],@XMM[15]        @ fix up last round key
+       vstmia  r12, {@XMM[7]}                  @ save last round key
+
+       vldmia  sp!,{d8-d15}
+       ldmia   sp!,{r4-r6,pc}
+.size  bsaes_enc_key_convert,.-bsaes_enc_key_convert
+
+.globl bsaes_encrypt_128
+.type  bsaes_encrypt_128,%function
+.align 4
+bsaes_encrypt_128:
+       stmdb   sp!,{r4-r6,lr}
+       vstmdb  sp!,{d8-d15}            @ ABI specification says so
+.Lenc128_loop:
+       vld1.8  {@XMM[0]-@XMM[1]}, [$inp]!      @ load input
+       vld1.8  {@XMM[2]-@XMM[3]}, [$inp]!
+       mov     r4,$key                         @ pass the key
+       vld1.8  {@XMM[4]-@XMM[5]}, [$inp]!
+       mov     r5,#10                          @ pass rounds
+       vld1.8  {@XMM[6]-@XMM[7]}, [$inp]!
+
+       bl      _bsaes_encrypt8
+
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       vst1.8  {@XMM[4]}, [$out]!
+       vst1.8  {@XMM[6]}, [$out]!
+       vst1.8  {@XMM[3]}, [$out]!
+       vst1.8  {@XMM[7]}, [$out]!
+       vst1.8  {@XMM[2]}, [$out]!
+       subs    $len,$len,#0x80
+       vst1.8  {@XMM[5]}, [$out]!
+       bhi     .Lenc128_loop
+
+       vldmia  sp!,{d8-d15}
+       ldmia   sp!,{r4-r6,pc}
+.size  bsaes_encrypt_128,.-bsaes_encrypt_128
+
+.globl bsaes_dec_key_convert
+.type  bsaes_dec_key_convert,%function
+.align 4
+bsaes_dec_key_convert:
+       stmdb   sp!,{r4-r6,lr}
+       vstmdb  sp!,{d8-d15}            @ ABI specification says so
+
+       ldr     r5,[$inp,#240]                  @ pass rounds
+       mov     r4,$inp                         @ pass key
+       mov     r12,$out                        @ pass key schedule
+       bl      _bsaes_key_convert
+       vldmia  $out, {@XMM[6]}
+       vstmia  r12,  {@XMM[15]}                @ save last round key
+       veor    @XMM[7], @XMM[7], @XMM[6]       @ fix up round 0 key
+       vstmia  $out, {@XMM[7]}
+
+       vldmia  sp!,{d8-d15}
+       ldmia   sp!,{r4-r6,pc}
+.size  bsaes_dec_key_convert,.-bsaes_dec_key_convert
+
+.globl bsaes_decrypt_128
+.type  bsaes_decrypt_128,%function
+.align 4
+bsaes_decrypt_128:
+       stmdb   sp!,{r4-r6,lr}
+       vstmdb  sp!,{d8-d15}            @ ABI specification says so
+.Ldec128_loop:
+       vld1.8  {@XMM[0]-@XMM[1]}, [$inp]!      @ load input
+       vld1.8  {@XMM[2]-@XMM[3]}, [$inp]!
+       mov     r4,$key                         @ pass the key
+       vld1.8  {@XMM[4]-@XMM[5]}, [$inp]!
+       mov     r5,#10                          @ pass rounds
+       vld1.8  {@XMM[6]-@XMM[7]}, [$inp]!
+
+       bl      _bsaes_decrypt8
+
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       vst1.8  {@XMM[6]}, [$out]!
+       vst1.8  {@XMM[4]}, [$out]!
+       vst1.8  {@XMM[2]}, [$out]!
+       vst1.8  {@XMM[7]}, [$out]!
+       vst1.8  {@XMM[3]}, [$out]!
+       subs    $len,$len,#0x80
+       vst1.8  {@XMM[5]}, [$out]!
+       bhi     .Ldec128_loop
+
+       vldmia  sp!,{d8-d15}
+       ldmia   sp!,{r4-r6,pc}
+.size  bsaes_decrypt_128,.-bsaes_decrypt_128
+___
+}
+{
+my ($inp,$out,$len,$key, $ivp,$fp,$rounds)=map("r$_",(0..3,8..10));
+my ($keysched)=("sp");
+
+$code.=<<___;
+.extern AES_cbc_encrypt
+.extern AES_decrypt
+
+.global        bsaes_cbc_encrypt
+.type  bsaes_cbc_encrypt,%function
+.align 5
+bsaes_cbc_encrypt:
+#ifndef        __KERNEL__
+       cmp     $len, #128
+#ifndef        __thumb__
+       blo     AES_cbc_encrypt
+#else
+       bhs     1f
+       b       AES_cbc_encrypt
+1:
+#endif
+#endif
+
+       @ it is up to the caller to make sure we are called with enc == 0
+
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}
+       VFP_ABI_PUSH
+       ldr     $ivp, [ip]                      @ IV is 1st arg on the stack
+       mov     $len, $len, lsr#4               @ len in 16 byte blocks
+       sub     sp, #0x10                       @ scratch space to carry over the IV
+       mov     $fp, sp                         @ save sp
+
+       ldr     $rounds, [$key, #240]           @ get # of rounds
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, $rounds, lsl#7         @ 128 bytes per inner round key
+       add     r12, #`128-32`                  @ sifze of bit-slices key schedule
+
+       @ populate the key schedule
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       mov     sp, r12                         @ sp is $keysched
+       bl      _bsaes_key_convert
+       vldmia  $keysched, {@XMM[6]}
+       vstmia  r12,  {@XMM[15]}                @ save last round key
+       veor    @XMM[7], @XMM[7], @XMM[6]       @ fix up round 0 key
+       vstmia  $keysched, {@XMM[7]}
+#else
+       ldr     r12, [$key, #244]
+       eors    r12, #1
+       beq     0f
+
+       @ populate the key schedule
+       str     r12, [$key, #244]
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       add     r12, $key, #248                 @ pass key schedule
+       bl      _bsaes_key_convert
+       add     r4, $key, #248
+       vldmia  r4, {@XMM[6]}
+       vstmia  r12, {@XMM[15]}                 @ save last round key
+       veor    @XMM[7], @XMM[7], @XMM[6]       @ fix up round 0 key
+       vstmia  r4, {@XMM[7]}
+
+.align 2
+0:
+#endif
+
+       vld1.8  {@XMM[15]}, [$ivp]              @ load IV
+       b       .Lcbc_dec_loop
+
+.align 4
+.Lcbc_dec_loop:
+       subs    $len, $len, #0x8
+       bmi     .Lcbc_dec_loop_finish
+
+       vld1.8  {@XMM[0]-@XMM[1]}, [$inp]!      @ load input
+       vld1.8  {@XMM[2]-@XMM[3]}, [$inp]!
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       mov     r4, $keysched                   @ pass the key
+#else
+       add     r4, $key, #248
+#endif
+       vld1.8  {@XMM[4]-@XMM[5]}, [$inp]!
+       mov     r5, $rounds
+       vld1.8  {@XMM[6]-@XMM[7]}, [$inp]
+       sub     $inp, $inp, #0x60
+       vstmia  $fp, {@XMM[15]}                 @ put aside IV
+
+       bl      _bsaes_decrypt8
+
+       vldmia  $fp, {@XMM[14]}                 @ reload IV
+       vld1.8  {@XMM[8]-@XMM[9]}, [$inp]!      @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[10]-@XMM[11]}, [$inp]!
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[9]
+       vld1.8  {@XMM[12]-@XMM[13]}, [$inp]!
+       veor    @XMM[4], @XMM[4], @XMM[10]
+       veor    @XMM[2], @XMM[2], @XMM[11]
+       vld1.8  {@XMM[14]-@XMM[15]}, [$inp]!
+       veor    @XMM[7], @XMM[7], @XMM[12]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       veor    @XMM[3], @XMM[3], @XMM[13]
+       vst1.8  {@XMM[6]}, [$out]!
+       veor    @XMM[5], @XMM[5], @XMM[14]
+       vst1.8  {@XMM[4]}, [$out]!
+       vst1.8  {@XMM[2]}, [$out]!
+       vst1.8  {@XMM[7]}, [$out]!
+       vst1.8  {@XMM[3]}, [$out]!
+       vst1.8  {@XMM[5]}, [$out]!
+
+       b       .Lcbc_dec_loop
+
+.Lcbc_dec_loop_finish:
+       adds    $len, $len, #8
+       beq     .Lcbc_dec_done
+
+       vld1.8  {@XMM[0]}, [$inp]!              @ load input
+       cmp     $len, #2
+       blo     .Lcbc_dec_one
+       vld1.8  {@XMM[1]}, [$inp]!
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       mov     r4, $keysched                   @ pass the key
+#else
+       add     r4, $key, #248
+#endif
+       mov     r5, $rounds
+       vstmia  $fp, {@XMM[15]}                 @ put aside IV
+       beq     .Lcbc_dec_two
+       vld1.8  {@XMM[2]}, [$inp]!
+       cmp     $len, #4
+       blo     .Lcbc_dec_three
+       vld1.8  {@XMM[3]}, [$inp]!
+       beq     .Lcbc_dec_four
+       vld1.8  {@XMM[4]}, [$inp]!
+       cmp     $len, #6
+       blo     .Lcbc_dec_five
+       vld1.8  {@XMM[5]}, [$inp]!
+       beq     .Lcbc_dec_six
+       vld1.8  {@XMM[6]}, [$inp]!
+       sub     $inp, $inp, #0x70
+
+       bl      _bsaes_decrypt8
+
+       vldmia  $fp, {@XMM[14]}                 @ reload IV
+       vld1.8  {@XMM[8]-@XMM[9]}, [$inp]!      @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[10]-@XMM[11]}, [$inp]!
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[9]
+       vld1.8  {@XMM[12]-@XMM[13]}, [$inp]!
+       veor    @XMM[4], @XMM[4], @XMM[10]
+       veor    @XMM[2], @XMM[2], @XMM[11]
+       vld1.8  {@XMM[15]}, [$inp]!
+       veor    @XMM[7], @XMM[7], @XMM[12]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       veor    @XMM[3], @XMM[3], @XMM[13]
+       vst1.8  {@XMM[6]}, [$out]!
+       vst1.8  {@XMM[4]}, [$out]!
+       vst1.8  {@XMM[2]}, [$out]!
+       vst1.8  {@XMM[7]}, [$out]!
+       vst1.8  {@XMM[3]}, [$out]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_six:
+       sub     $inp, $inp, #0x60
+       bl      _bsaes_decrypt8
+       vldmia  $fp,{@XMM[14]}                  @ reload IV
+       vld1.8  {@XMM[8]-@XMM[9]}, [$inp]!      @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[10]-@XMM[11]}, [$inp]!
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[9]
+       vld1.8  {@XMM[12]}, [$inp]!
+       veor    @XMM[4], @XMM[4], @XMM[10]
+       veor    @XMM[2], @XMM[2], @XMM[11]
+       vld1.8  {@XMM[15]}, [$inp]!
+       veor    @XMM[7], @XMM[7], @XMM[12]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       vst1.8  {@XMM[6]}, [$out]!
+       vst1.8  {@XMM[4]}, [$out]!
+       vst1.8  {@XMM[2]}, [$out]!
+       vst1.8  {@XMM[7]}, [$out]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_five:
+       sub     $inp, $inp, #0x50
+       bl      _bsaes_decrypt8
+       vldmia  $fp, {@XMM[14]}                 @ reload IV
+       vld1.8  {@XMM[8]-@XMM[9]}, [$inp]!      @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[10]-@XMM[11]}, [$inp]!
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[9]
+       vld1.8  {@XMM[15]}, [$inp]!
+       veor    @XMM[4], @XMM[4], @XMM[10]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       veor    @XMM[2], @XMM[2], @XMM[11]
+       vst1.8  {@XMM[6]}, [$out]!
+       vst1.8  {@XMM[4]}, [$out]!
+       vst1.8  {@XMM[2]}, [$out]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_four:
+       sub     $inp, $inp, #0x40
+       bl      _bsaes_decrypt8
+       vldmia  $fp, {@XMM[14]}                 @ reload IV
+       vld1.8  {@XMM[8]-@XMM[9]}, [$inp]!      @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[10]}, [$inp]!
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[9]
+       vld1.8  {@XMM[15]}, [$inp]!
+       veor    @XMM[4], @XMM[4], @XMM[10]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       vst1.8  {@XMM[6]}, [$out]!
+       vst1.8  {@XMM[4]}, [$out]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_three:
+       sub     $inp, $inp, #0x30
+       bl      _bsaes_decrypt8
+       vldmia  $fp, {@XMM[14]}                 @ reload IV
+       vld1.8  {@XMM[8]-@XMM[9]}, [$inp]!      @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[15]}, [$inp]!
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[9]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       vst1.8  {@XMM[6]}, [$out]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_two:
+       sub     $inp, $inp, #0x20
+       bl      _bsaes_decrypt8
+       vldmia  $fp, {@XMM[14]}                 @ reload IV
+       vld1.8  {@XMM[8]}, [$inp]!              @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[15]}, [$inp]!             @ reload input
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_one:
+       sub     $inp, $inp, #0x10
+       mov     $rounds, $out                   @ save original out pointer
+       mov     $out, $fp                       @ use the iv scratch space as out buffer
+       mov     r2, $key
+       vmov    @XMM[4],@XMM[15]                @ just in case ensure that IV
+       vmov    @XMM[5],@XMM[0]                 @ and input are preserved
+       bl      AES_decrypt
+       vld1.8  {@XMM[0]}, [$fp,:64]            @ load result
+       veor    @XMM[0], @XMM[0], @XMM[4]       @ ^= IV
+       vmov    @XMM[15], @XMM[5]               @ @XMM[5] holds input
+       vst1.8  {@XMM[0]}, [$rounds]            @ write output
+
+.Lcbc_dec_done:
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+.Lcbc_dec_bzero:                               @ wipe key schedule [if any]
+       vstmia          $keysched!, {q0-q1}
+       cmp             $keysched, $fp
+       bne             .Lcbc_dec_bzero
+#endif
+
+       mov     sp, $fp
+       add     sp, #0x10                       @ add sp,$fp,#0x10 is no good for thumb
+       vst1.8  {@XMM[15]}, [$ivp]              @ return IV
+       VFP_ABI_POP
+       ldmia   sp!, {r4-r10, pc}
+.size  bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+___
+}
+{
+my ($inp,$out,$len,$key, $ctr,$fp,$rounds)=(map("r$_",(0..3,8..10)));
+my $const = "r6";      # shared with _bsaes_encrypt8_alt
+my $keysched = "sp";
+
+$code.=<<___;
+.extern        AES_encrypt
+.global        bsaes_ctr32_encrypt_blocks
+.type  bsaes_ctr32_encrypt_blocks,%function
+.align 5
+bsaes_ctr32_encrypt_blocks:
+       cmp     $len, #8                        @ use plain AES for
+       blo     .Lctr_enc_short                 @ small sizes
+
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}
+       VFP_ABI_PUSH
+       ldr     $ctr, [ip]                      @ ctr is 1st arg on the stack
+       sub     sp, sp, #0x10                   @ scratch space to carry over the ctr
+       mov     $fp, sp                         @ save sp
+
+       ldr     $rounds, [$key, #240]           @ get # of rounds
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, $rounds, lsl#7         @ 128 bytes per inner round key
+       add     r12, #`128-32`                  @ size of bit-sliced key schedule
+
+       @ populate the key schedule
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       mov     sp, r12                         @ sp is $keysched
+       bl      _bsaes_key_convert
+       veor    @XMM[7],@XMM[7],@XMM[15]        @ fix up last round key
+       vstmia  r12, {@XMM[7]}                  @ save last round key
+
+       vld1.8  {@XMM[0]}, [$ctr]               @ load counter
+       add     $ctr, $const, #.LREVM0SR-.LM0   @ borrow $ctr
+       vldmia  $keysched, {@XMM[4]}            @ load round0 key
+#else
+       ldr     r12, [$key, #244]
+       eors    r12, #1
+       beq     0f
+
+       @ populate the key schedule
+       str     r12, [$key, #244]
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       add     r12, $key, #248                 @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    @XMM[7],@XMM[7],@XMM[15]        @ fix up last round key
+       vstmia  r12, {@XMM[7]}                  @ save last round key
+
+.align 2
+0:     add     r12, $key, #248
+       vld1.8  {@XMM[0]}, [$ctr]               @ load counter
+       adrl    $ctr, .LREVM0SR                 @ borrow $ctr
+       vldmia  r12, {@XMM[4]}                  @ load round0 key
+       sub     sp, #0x10                       @ place for adjusted round0 key
+#endif
+
+       vmov.i32        @XMM[8],#1              @ compose 1<<96
+       veor            @XMM[9],@XMM[9],@XMM[9]
+       vrev32.8        @XMM[0],@XMM[0]
+       vext.8          @XMM[8],@XMM[9],@XMM[8],#4
+       vrev32.8        @XMM[4],@XMM[4]
+       vadd.u32        @XMM[9],@XMM[8],@XMM[8] @ compose 2<<96
+       vstmia  $keysched, {@XMM[4]}            @ save adjusted round0 key
+       b       .Lctr_enc_loop
+
+.align 4
+.Lctr_enc_loop:
+       vadd.u32        @XMM[10], @XMM[8], @XMM[9]      @ compose 3<<96
+       vadd.u32        @XMM[1], @XMM[0], @XMM[8]       @ +1
+       vadd.u32        @XMM[2], @XMM[0], @XMM[9]       @ +2
+       vadd.u32        @XMM[3], @XMM[0], @XMM[10]      @ +3
+       vadd.u32        @XMM[4], @XMM[1], @XMM[10]
+       vadd.u32        @XMM[5], @XMM[2], @XMM[10]
+       vadd.u32        @XMM[6], @XMM[3], @XMM[10]
+       vadd.u32        @XMM[7], @XMM[4], @XMM[10]
+       vadd.u32        @XMM[10], @XMM[5], @XMM[10]     @ next counter
+
+       @ Borrow prologue from _bsaes_encrypt8 to use the opportunity
+       @ to flip byte order in 32-bit counter
+
+       vldmia          $keysched, {@XMM[9]}            @ load round0 key
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, $keysched, #0x10            @ pass next round key
+#else
+       add             r4, $key, #`248+16`
+#endif
+       vldmia          $ctr, {@XMM[8]}                 @ .LREVM0SR
+       mov             r5, $rounds                     @ pass rounds
+       vstmia          $fp, {@XMM[10]}                 @ save next counter
+       sub             $const, $ctr, #.LREVM0SR-.LSR   @ pass constants
+
+       bl              _bsaes_encrypt8_alt
+
+       subs            $len, $len, #8
+       blo             .Lctr_enc_loop_done
+
+       vld1.8          {@XMM[8]-@XMM[9]}, [$inp]!      @ load input
+       vld1.8          {@XMM[10]-@XMM[11]}, [$inp]!
+       veor            @XMM[0], @XMM[8]
+       veor            @XMM[1], @XMM[9]
+       vld1.8          {@XMM[12]-@XMM[13]}, [$inp]!
+       veor            @XMM[4], @XMM[10]
+       veor            @XMM[6], @XMM[11]
+       vld1.8          {@XMM[14]-@XMM[15]}, [$inp]!
+       veor            @XMM[3], @XMM[12]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       veor            @XMM[7], @XMM[13]
+       veor            @XMM[2], @XMM[14]
+       vst1.8          {@XMM[4]}, [$out]!
+       veor            @XMM[5], @XMM[15]
+       vst1.8          {@XMM[6]}, [$out]!
+       vmov.i32        @XMM[8], #1                     @ compose 1<<96
+       vst1.8          {@XMM[3]}, [$out]!
+       veor            @XMM[9], @XMM[9], @XMM[9]
+       vst1.8          {@XMM[7]}, [$out]!
+       vext.8          @XMM[8], @XMM[9], @XMM[8], #4
+       vst1.8          {@XMM[2]}, [$out]!
+       vadd.u32        @XMM[9],@XMM[8],@XMM[8]         @ compose 2<<96
+       vst1.8          {@XMM[5]}, [$out]!
+       vldmia          $fp, {@XMM[0]}                  @ load counter
+
+       bne             .Lctr_enc_loop
+       b               .Lctr_enc_done
+
+.align 4
+.Lctr_enc_loop_done:
+       add             $len, $len, #8
+       vld1.8          {@XMM[8]}, [$inp]!      @ load input
+       veor            @XMM[0], @XMM[8]
+       vst1.8          {@XMM[0]}, [$out]!      @ write output
+       cmp             $len, #2
+       blo             .Lctr_enc_done
+       vld1.8          {@XMM[9]}, [$inp]!
+       veor            @XMM[1], @XMM[9]
+       vst1.8          {@XMM[1]}, [$out]!
+       beq             .Lctr_enc_done
+       vld1.8          {@XMM[10]}, [$inp]!
+       veor            @XMM[4], @XMM[10]
+       vst1.8          {@XMM[4]}, [$out]!
+       cmp             $len, #4
+       blo             .Lctr_enc_done
+       vld1.8          {@XMM[11]}, [$inp]!
+       veor            @XMM[6], @XMM[11]
+       vst1.8          {@XMM[6]}, [$out]!
+       beq             .Lctr_enc_done
+       vld1.8          {@XMM[12]}, [$inp]!
+       veor            @XMM[3], @XMM[12]
+       vst1.8          {@XMM[3]}, [$out]!
+       cmp             $len, #6
+       blo             .Lctr_enc_done
+       vld1.8          {@XMM[13]}, [$inp]!
+       veor            @XMM[7], @XMM[13]
+       vst1.8          {@XMM[7]}, [$out]!
+       beq             .Lctr_enc_done
+       vld1.8          {@XMM[14]}, [$inp]
+       veor            @XMM[2], @XMM[14]
+       vst1.8          {@XMM[2]}, [$out]!
+
+.Lctr_enc_done:
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+#ifndef        BSAES_ASM_EXTENDED_KEY
+.Lctr_enc_bzero:                       @ wipe key schedule [if any]
+       vstmia          $keysched!, {q0-q1}
+       cmp             $keysched, $fp
+       bne             .Lctr_enc_bzero
+#else
+       vstmia          $keysched, {q0-q1}
+#endif
+
+       mov     sp, $fp
+       add     sp, #0x10               @ add sp,$fp,#0x10 is no good for thumb
+       VFP_ABI_POP
+       ldmia   sp!, {r4-r10, pc}       @ return
+
+.align 4
+.Lctr_enc_short:
+       ldr     ip, [sp]                @ ctr pointer is passed on stack
+       stmdb   sp!, {r4-r8, lr}
+
+       mov     r4, $inp                @ copy arguments
+       mov     r5, $out
+       mov     r6, $len
+       mov     r7, $key
+       ldr     r8, [ip, #12]           @ load counter LSW
+       vld1.8  {@XMM[1]}, [ip]         @ load whole counter value
+#ifdef __ARMEL__
+       rev     r8, r8
+#endif
+       sub     sp, sp, #0x10
+       vst1.8  {@XMM[1]}, [sp,:64]     @ copy counter value
+       sub     sp, sp, #0x10
+
+.Lctr_enc_short_loop:
+       add     r0, sp, #0x10           @ input counter value
+       mov     r1, sp                  @ output on the stack
+       mov     r2, r7                  @ key
+
+       bl      AES_encrypt
+
+       vld1.8  {@XMM[0]}, [r4]!        @ load input
+       vld1.8  {@XMM[1]}, [sp,:64]     @ load encrypted counter
+       add     r8, r8, #1
+#ifdef __ARMEL__
+       rev     r0, r8
+       str     r0, [sp, #0x1c]         @ next counter value
+#else
+       str     r8, [sp, #0x1c]         @ next counter value
+#endif
+       veor    @XMM[0],@XMM[0],@XMM[1]
+       vst1.8  {@XMM[0]}, [r5]!        @ store output
+       subs    r6, r6, #1
+       bne     .Lctr_enc_short_loop
+
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+       vstmia          sp!, {q0-q1}
+
+       ldmia   sp!, {r4-r8, pc}
+.size  bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+___
+}
+{
+######################################################################
+# void bsaes_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+#      const AES_KEY *key1, const AES_KEY *key2,
+#      const unsigned char iv[16]);
+#
+my ($inp,$out,$len,$key,$rounds,$magic,$fp)=(map("r$_",(7..10,1..3)));
+my $const="r6";                # returned by _bsaes_key_convert
+my $twmask=@XMM[5];
+my @T=@XMM[6..7];
+
+$code.=<<___;
+.globl bsaes_xts_encrypt
+.type  bsaes_xts_encrypt,%function
+.align 4
+bsaes_xts_encrypt:
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}               @ 0x20
+       VFP_ABI_PUSH
+       mov     r6, sp                          @ future $fp
+
+       mov     $inp, r0
+       mov     $out, r1
+       mov     $len, r2
+       mov     $key, r3
+
+       sub     r0, sp, #0x10                   @ 0x10
+       bic     r0, #0xf                        @ align at 16 bytes
+       mov     sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+       ldr     r0, [ip]                        @ pointer to input tweak
+#else
+       @ generate initial tweak
+       ldr     r0, [ip, #4]                    @ iv[]
+       mov     r1, sp
+       ldr     r2, [ip, #0]                    @ key2
+       bl      AES_encrypt
+       mov     r0,sp                           @ pointer to initial tweak
+#endif
+
+       ldr     $rounds, [$key, #240]           @ get # of rounds
+       mov     $fp, r6
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, $rounds, lsl#7         @ 128 bytes per inner round key
+       @ add   r12, #`128-32`                  @ size of bit-sliced key schedule
+       sub     r12, #`32+16`                   @ place for tweak[9]
+
+       @ populate the key schedule
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       mov     sp, r12
+       add     r12, #0x90                      @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    @XMM[7], @XMM[7], @XMM[15]      @ fix up last round key
+       vstmia  r12, {@XMM[7]}                  @ save last round key
+#else
+       ldr     r12, [$key, #244]
+       eors    r12, #1
+       beq     0f
+
+       str     r12, [$key, #244]
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       add     r12, $key, #248                 @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    @XMM[7], @XMM[7], @XMM[15]      @ fix up last round key
+       vstmia  r12, {@XMM[7]}
+
+.align 2
+0:     sub     sp, #0x90                       @ place for tweak[9]
+#endif
+
+       vld1.8  {@XMM[8]}, [r0]                 @ initial tweak
+       adr     $magic, .Lxts_magic
+
+       subs    $len, #0x80
+       blo     .Lxts_enc_short
+       b       .Lxts_enc_loop
+
+.align 4
+.Lxts_enc_loop:
+       vldmia          $magic, {$twmask}       @ load XTS magic
+       vshr.s64        @T[0], @XMM[8], #63
+       mov             r0, sp
+       vand            @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+       vadd.u64        @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+       vst1.64         {@XMM[$i-1]}, [r0,:128]!
+       vswp            `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+       vshr.s64        @T[1], @XMM[$i], #63
+       veor            @XMM[$i], @XMM[$i], @T[0]
+       vand            @T[1], @T[1], $twmask
+___
+       @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+       vld1.8          {@XMM[$i-10]}, [$inp]!
+___
+$code.=<<___ if ($i>=11);
+       veor            @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+       vadd.u64        @XMM[8], @XMM[15], @XMM[15]
+       vst1.64         {@XMM[15]}, [r0,:128]!
+       vswp            `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+       veor            @XMM[8], @XMM[8], @T[0]
+       vst1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+
+       vld1.8          {@XMM[6]-@XMM[7]}, [$inp]!
+       veor            @XMM[5], @XMM[5], @XMM[13]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[6], @XMM[6], @XMM[14]
+       mov             r5, $rounds                     @ pass rounds
+       veor            @XMM[7], @XMM[7], @XMM[15]
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]-@XMM[13]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[4], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[6], @XMM[11]
+       vld1.64         {@XMM[14]-@XMM[15]}, [r0,:128]!
+       veor            @XMM[10], @XMM[3], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       veor            @XMM[11], @XMM[7], @XMM[13]
+       veor            @XMM[12], @XMM[2], @XMM[14]
+       vst1.8          {@XMM[10]-@XMM[11]}, [$out]!
+       veor            @XMM[13], @XMM[5], @XMM[15]
+       vst1.8          {@XMM[12]-@XMM[13]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+
+       subs            $len, #0x80
+       bpl             .Lxts_enc_loop
+
+.Lxts_enc_short:
+       adds            $len, #0x70
+       bmi             .Lxts_enc_done
+
+       vldmia          $magic, {$twmask}       @ load XTS magic
+       vshr.s64        @T[0], @XMM[8], #63
+       mov             r0, sp
+       vand            @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+       vadd.u64        @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+       vst1.64         {@XMM[$i-1]}, [r0,:128]!
+       vswp            `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+       vshr.s64        @T[1], @XMM[$i], #63
+       veor            @XMM[$i], @XMM[$i], @T[0]
+       vand            @T[1], @T[1], $twmask
+___
+       @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+       vld1.8          {@XMM[$i-10]}, [$inp]!
+       subs            $len, #0x10
+       bmi             .Lxts_enc_`$i-9`
+___
+$code.=<<___ if ($i>=11);
+       veor            @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+       sub             $len, #0x10
+       vst1.64         {@XMM[15]}, [r0,:128]           @ next round tweak
+
+       vld1.8          {@XMM[6]}, [$inp]!
+       veor            @XMM[5], @XMM[5], @XMM[13]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[6], @XMM[6], @XMM[14]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]-@XMM[13]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[4], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[6], @XMM[11]
+       vld1.64         {@XMM[14]}, [r0,:128]!
+       veor            @XMM[10], @XMM[3], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       veor            @XMM[11], @XMM[7], @XMM[13]
+       veor            @XMM[12], @XMM[2], @XMM[14]
+       vst1.8          {@XMM[10]-@XMM[11]}, [$out]!
+       vst1.8          {@XMM[12]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_6:
+       vst1.64         {@XMM[14]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[4], @XMM[4], @XMM[12]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[5], @XMM[5], @XMM[13]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]-@XMM[13]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[4], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[6], @XMM[11]
+       veor            @XMM[10], @XMM[3], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       veor            @XMM[11], @XMM[7], @XMM[13]
+       vst1.8          {@XMM[10]-@XMM[11]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_enc_done
+
+@ put this in range for both ARM and Thumb mode adr instructions
+.align 5
+.Lxts_magic:
+       .quad   1, 0x87
+
+.align 5
+.Lxts_enc_5:
+       vst1.64         {@XMM[13]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[3], @XMM[3], @XMM[11]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[4], @XMM[4], @XMM[12]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[4], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[6], @XMM[11]
+       veor            @XMM[10], @XMM[3], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       vst1.8          {@XMM[10]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_4:
+       vst1.64         {@XMM[12]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[2], @XMM[2], @XMM[10]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[3], @XMM[3], @XMM[11]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[4], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[6], @XMM[11]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_3:
+       vst1.64         {@XMM[11]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[1], @XMM[1], @XMM[9]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[2], @XMM[2], @XMM[10]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[8]-@XMM[9]}, [r0,:128]!
+       vld1.64         {@XMM[10]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[4], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       vst1.8          {@XMM[8]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_2:
+       vst1.64         {@XMM[10]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[0], @XMM[0], @XMM[8]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[1], @XMM[1], @XMM[9]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[8]-@XMM[9]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_1:
+       mov             r0, sp
+       veor            @XMM[0], @XMM[8]
+       mov             r1, sp
+       vst1.8          {@XMM[0]}, [sp,:128]
+       mov             r2, $key
+       mov             r4, $fp                         @ preserve fp
+
+       bl              AES_encrypt
+
+       vld1.8          {@XMM[0]}, [sp,:128]
+       veor            @XMM[0], @XMM[0], @XMM[8]
+       vst1.8          {@XMM[0]}, [$out]!
+       mov             $fp, r4
+
+       vmov            @XMM[8], @XMM[9]                @ next round tweak
+
+.Lxts_enc_done:
+#ifndef        XTS_CHAIN_TWEAK
+       adds            $len, #0x10
+       beq             .Lxts_enc_ret
+       sub             r6, $out, #0x10
+
+.Lxts_enc_steal:
+       ldrb            r0, [$inp], #1
+       ldrb            r1, [$out, #-0x10]
+       strb            r0, [$out, #-0x10]
+       strb            r1, [$out], #1
+
+       subs            $len, #1
+       bhi             .Lxts_enc_steal
+
+       vld1.8          {@XMM[0]}, [r6]
+       mov             r0, sp
+       veor            @XMM[0], @XMM[0], @XMM[8]
+       mov             r1, sp
+       vst1.8          {@XMM[0]}, [sp,:128]
+       mov             r2, $key
+       mov             r4, $fp                 @ preserve fp
+
+       bl              AES_encrypt
+
+       vld1.8          {@XMM[0]}, [sp,:128]
+       veor            @XMM[0], @XMM[0], @XMM[8]
+       vst1.8          {@XMM[0]}, [r6]
+       mov             $fp, r4
+#endif
+
+.Lxts_enc_ret:
+       bic             r0, $fp, #0xf
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+#ifdef XTS_CHAIN_TWEAK
+       ldr             r1, [$fp, #0x20+VFP_ABI_FRAME]  @ chain tweak
+#endif
+.Lxts_enc_bzero:                               @ wipe key schedule [if any]
+       vstmia          sp!, {q0-q1}
+       cmp             sp, r0
+       bne             .Lxts_enc_bzero
+
+       mov             sp, $fp
+#ifdef XTS_CHAIN_TWEAK
+       vst1.8          {@XMM[8]}, [r1]
+#endif
+       VFP_ABI_POP
+       ldmia           sp!, {r4-r10, pc}       @ return
+
+.size  bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl bsaes_xts_decrypt
+.type  bsaes_xts_decrypt,%function
+.align 4
+bsaes_xts_decrypt:
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}               @ 0x20
+       VFP_ABI_PUSH
+       mov     r6, sp                          @ future $fp
+
+       mov     $inp, r0
+       mov     $out, r1
+       mov     $len, r2
+       mov     $key, r3
+
+       sub     r0, sp, #0x10                   @ 0x10
+       bic     r0, #0xf                        @ align at 16 bytes
+       mov     sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+       ldr     r0, [ip]                        @ pointer to input tweak
+#else
+       @ generate initial tweak
+       ldr     r0, [ip, #4]                    @ iv[]
+       mov     r1, sp
+       ldr     r2, [ip, #0]                    @ key2
+       bl      AES_encrypt
+       mov     r0, sp                          @ pointer to initial tweak
+#endif
+
+       ldr     $rounds, [$key, #240]           @ get # of rounds
+       mov     $fp, r6
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, $rounds, lsl#7         @ 128 bytes per inner round key
+       @ add   r12, #`128-32`                  @ size of bit-sliced key schedule
+       sub     r12, #`32+16`                   @ place for tweak[9]
+
+       @ populate the key schedule
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       mov     sp, r12
+       add     r12, #0x90                      @ pass key schedule
+       bl      _bsaes_key_convert
+       add     r4, sp, #0x90
+       vldmia  r4, {@XMM[6]}
+       vstmia  r12,  {@XMM[15]}                @ save last round key
+       veor    @XMM[7], @XMM[7], @XMM[6]       @ fix up round 0 key
+       vstmia  r4, {@XMM[7]}
+#else
+       ldr     r12, [$key, #244]
+       eors    r12, #1
+       beq     0f
+
+       str     r12, [$key, #244]
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       add     r12, $key, #248                 @ pass key schedule
+       bl      _bsaes_key_convert
+       add     r4, $key, #248
+       vldmia  r4, {@XMM[6]}
+       vstmia  r12,  {@XMM[15]}                @ save last round key
+       veor    @XMM[7], @XMM[7], @XMM[6]       @ fix up round 0 key
+       vstmia  r4, {@XMM[7]}
+
+.align 2
+0:     sub     sp, #0x90                       @ place for tweak[9]
+#endif
+       vld1.8  {@XMM[8]}, [r0]                 @ initial tweak
+       adr     $magic, .Lxts_magic
+
+       tst     $len, #0xf                      @ if not multiple of 16
+       it      ne                              @ Thumb2 thing, sanity check in ARM
+       subne   $len, #0x10                     @ subtract another 16 bytes
+       subs    $len, #0x80
+
+       blo     .Lxts_dec_short
+       b       .Lxts_dec_loop
+
+.align 4
+.Lxts_dec_loop:
+       vldmia          $magic, {$twmask}       @ load XTS magic
+       vshr.s64        @T[0], @XMM[8], #63
+       mov             r0, sp
+       vand            @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+       vadd.u64        @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+       vst1.64         {@XMM[$i-1]}, [r0,:128]!
+       vswp            `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+       vshr.s64        @T[1], @XMM[$i], #63
+       veor            @XMM[$i], @XMM[$i], @T[0]
+       vand            @T[1], @T[1], $twmask
+___
+       @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+       vld1.8          {@XMM[$i-10]}, [$inp]!
+___
+$code.=<<___ if ($i>=11);
+       veor            @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+       vadd.u64        @XMM[8], @XMM[15], @XMM[15]
+       vst1.64         {@XMM[15]}, [r0,:128]!
+       vswp            `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+       veor            @XMM[8], @XMM[8], @T[0]
+       vst1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+
+       vld1.8          {@XMM[6]-@XMM[7]}, [$inp]!
+       veor            @XMM[5], @XMM[5], @XMM[13]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[6], @XMM[6], @XMM[14]
+       mov             r5, $rounds                     @ pass rounds
+       veor            @XMM[7], @XMM[7], @XMM[15]
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]-@XMM[13]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[6], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[4], @XMM[11]
+       vld1.64         {@XMM[14]-@XMM[15]}, [r0,:128]!
+       veor            @XMM[10], @XMM[2], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       veor            @XMM[11], @XMM[7], @XMM[13]
+       veor            @XMM[12], @XMM[3], @XMM[14]
+       vst1.8          {@XMM[10]-@XMM[11]}, [$out]!
+       veor            @XMM[13], @XMM[5], @XMM[15]
+       vst1.8          {@XMM[12]-@XMM[13]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+
+       subs            $len, #0x80
+       bpl             .Lxts_dec_loop
+
+.Lxts_dec_short:
+       adds            $len, #0x70
+       bmi             .Lxts_dec_done
+
+       vldmia          $magic, {$twmask}       @ load XTS magic
+       vshr.s64        @T[0], @XMM[8], #63
+       mov             r0, sp
+       vand            @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+       vadd.u64        @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+       vst1.64         {@XMM[$i-1]}, [r0,:128]!
+       vswp            `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+       vshr.s64        @T[1], @XMM[$i], #63
+       veor            @XMM[$i], @XMM[$i], @T[0]
+       vand            @T[1], @T[1], $twmask
+___
+       @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+       vld1.8          {@XMM[$i-10]}, [$inp]!
+       subs            $len, #0x10
+       bmi             .Lxts_dec_`$i-9`
+___
+$code.=<<___ if ($i>=11);
+       veor            @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+       sub             $len, #0x10
+       vst1.64         {@XMM[15]}, [r0,:128]           @ next round tweak
+
+       vld1.8          {@XMM[6]}, [$inp]!
+       veor            @XMM[5], @XMM[5], @XMM[13]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[6], @XMM[6], @XMM[14]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]-@XMM[13]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[6], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[4], @XMM[11]
+       vld1.64         {@XMM[14]}, [r0,:128]!
+       veor            @XMM[10], @XMM[2], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       veor            @XMM[11], @XMM[7], @XMM[13]
+       veor            @XMM[12], @XMM[3], @XMM[14]
+       vst1.8          {@XMM[10]-@XMM[11]}, [$out]!
+       vst1.8          {@XMM[12]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_6:
+       vst1.64         {@XMM[14]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[4], @XMM[4], @XMM[12]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[5], @XMM[5], @XMM[13]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]-@XMM[13]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[6], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[4], @XMM[11]
+       veor            @XMM[10], @XMM[2], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       veor            @XMM[11], @XMM[7], @XMM[13]
+       vst1.8          {@XMM[10]-@XMM[11]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_5:
+       vst1.64         {@XMM[13]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[3], @XMM[3], @XMM[11]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[4], @XMM[4], @XMM[12]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[6], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[4], @XMM[11]
+       veor            @XMM[10], @XMM[2], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       vst1.8          {@XMM[10]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_4:
+       vst1.64         {@XMM[12]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[2], @XMM[2], @XMM[10]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[3], @XMM[3], @XMM[11]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[6], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[4], @XMM[11]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_3:
+       vst1.64         {@XMM[11]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[1], @XMM[1], @XMM[9]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[2], @XMM[2], @XMM[10]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[8]-@XMM[9]}, [r0,:128]!
+       vld1.64         {@XMM[10]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[6], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       vst1.8          {@XMM[8]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_2:
+       vst1.64         {@XMM[10]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[0], @XMM[0], @XMM[8]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[1], @XMM[1], @XMM[9]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[8]-@XMM[9]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_1:
+       mov             r0, sp
+       veor            @XMM[0], @XMM[8]
+       mov             r1, sp
+       vst1.8          {@XMM[0]}, [sp,:128]
+       mov             r2, $key
+       mov             r4, $fp                         @ preserve fp
+       mov             r5, $magic                      @ preserve magic
+
+       bl              AES_decrypt
+
+       vld1.8          {@XMM[0]}, [sp,:128]
+       veor            @XMM[0], @XMM[0], @XMM[8]
+       vst1.8          {@XMM[0]}, [$out]!
+       mov             $fp, r4
+       mov             $magic, r5
+
+       vmov            @XMM[8], @XMM[9]                @ next round tweak
+
+.Lxts_dec_done:
+#ifndef        XTS_CHAIN_TWEAK
+       adds            $len, #0x10
+       beq             .Lxts_dec_ret
+
+       @ calculate one round of extra tweak for the stolen ciphertext
+       vldmia          $magic, {$twmask}
+       vshr.s64        @XMM[6], @XMM[8], #63
+       vand            @XMM[6], @XMM[6], $twmask
+       vadd.u64        @XMM[9], @XMM[8], @XMM[8]
+       vswp            `&Dhi("@XMM[6]")`,`&Dlo("@XMM[6]")`
+       veor            @XMM[9], @XMM[9], @XMM[6]
+
+       @ perform the final decryption with the last tweak value
+       vld1.8          {@XMM[0]}, [$inp]!
+       mov             r0, sp
+       veor            @XMM[0], @XMM[0], @XMM[9]
+       mov             r1, sp
+       vst1.8          {@XMM[0]}, [sp,:128]
+       mov             r2, $key
+       mov             r4, $fp                 @ preserve fp
+
+       bl              AES_decrypt
+
+       vld1.8          {@XMM[0]}, [sp,:128]
+       veor            @XMM[0], @XMM[0], @XMM[9]
+       vst1.8          {@XMM[0]}, [$out]
+
+       mov             r6, $out
+.Lxts_dec_steal:
+       ldrb            r1, [$out]
+       ldrb            r0, [$inp], #1
+       strb            r1, [$out, #0x10]
+       strb            r0, [$out], #1
+
+       subs            $len, #1
+       bhi             .Lxts_dec_steal
+
+       vld1.8          {@XMM[0]}, [r6]
+       mov             r0, sp
+       veor            @XMM[0], @XMM[8]
+       mov             r1, sp
+       vst1.8          {@XMM[0]}, [sp,:128]
+       mov             r2, $key
+
+       bl              AES_decrypt
+
+       vld1.8          {@XMM[0]}, [sp,:128]
+       veor            @XMM[0], @XMM[0], @XMM[8]
+       vst1.8          {@XMM[0]}, [r6]
+       mov             $fp, r4
+#endif
+
+.Lxts_dec_ret:
+       bic             r0, $fp, #0xf
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+#ifdef XTS_CHAIN_TWEAK
+       ldr             r1, [$fp, #0x20+VFP_ABI_FRAME]  @ chain tweak
+#endif
+.Lxts_dec_bzero:                               @ wipe key schedule [if any]
+       vstmia          sp!, {q0-q1}
+       cmp             sp, r0
+       bne             .Lxts_dec_bzero
+
+       mov             sp, $fp
+#ifdef XTS_CHAIN_TWEAK
+       vst1.8          {@XMM[8]}, [r1]
+#endif
+       VFP_ABI_POP
+       ldmia           sp!, {r4-r10, pc}       @ return
+
+.size  bsaes_xts_decrypt,.-bsaes_xts_decrypt
+___
+}
+$code.=<<___;
+#endif
+___
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+open SELF,$0;
+while(<SELF>) {
+       next if (/^#!/);
+        last if (!s/^#/@/ and !/^$/);
+        print;
+}
+close SELF;
+
+print $code;
+
+close STDOUT;
index d3db39860b9cc5eb83053f99f9430b926443609c..a6395c0277152f74645b8fceb572abc9b1c5a2db 100644 (file)
@@ -24,6 +24,7 @@ generic-y += sembuf.h
 generic-y += serial.h
 generic-y += shmbuf.h
 generic-y += siginfo.h
+generic-y += simd.h
 generic-y += sizes.h
 generic-y += socket.h
 generic-y += sockios.h
@@ -31,5 +32,4 @@ generic-y += termbits.h
 generic-y += termios.h
 generic-y += timex.h
 generic-y += trace_clock.h
-generic-y += types.h
 generic-y += unaligned.h
index da1c77d39327963ab10e633aeb8809aac7da2dec..55ffc3b850f43e17b4337c83af16c1c0bdf81afc 100644 (file)
@@ -12,6 +12,7 @@
 #define __ASM_ARM_ATOMIC_H
 
 #include <linux/compiler.h>
+#include <linux/prefetch.h>
 #include <linux/types.h>
 #include <linux/irqflags.h>
 #include <asm/barrier.h>
@@ -41,6 +42,7 @@ static inline void atomic_add(int i, atomic_t *v)
        unsigned long tmp;
        int result;
 
+       prefetchw(&v->counter);
        __asm__ __volatile__("@ atomic_add\n"
 "1:    ldrex   %0, [%3]\n"
 "      add     %0, %0, %4\n"
@@ -79,6 +81,7 @@ static inline void atomic_sub(int i, atomic_t *v)
        unsigned long tmp;
        int result;
 
+       prefetchw(&v->counter);
        __asm__ __volatile__("@ atomic_sub\n"
 "1:    ldrex   %0, [%3]\n"
 "      sub     %0, %0, %4\n"
@@ -138,6 +141,7 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
 {
        unsigned long tmp, tmp2;
 
+       prefetchw(addr);
        __asm__ __volatile__("@ atomic_clear_mask\n"
 "1:    ldrex   %0, [%3]\n"
 "      bic     %0, %0, %4\n"
@@ -283,6 +287,7 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
 {
        u64 tmp;
 
+       prefetchw(&v->counter);
        __asm__ __volatile__("@ atomic64_set\n"
 "1:    ldrexd  %0, %H0, [%2]\n"
 "      strexd  %0, %3, %H3, [%2]\n"
@@ -299,6 +304,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
        u64 result;
        unsigned long tmp;
 
+       prefetchw(&v->counter);
        __asm__ __volatile__("@ atomic64_add\n"
 "1:    ldrexd  %0, %H0, [%3]\n"
 "      adds    %0, %0, %4\n"
@@ -339,6 +345,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
        u64 result;
        unsigned long tmp;
 
+       prefetchw(&v->counter);
        __asm__ __volatile__("@ atomic64_sub\n"
 "1:    ldrexd  %0, %H0, [%3]\n"
 "      subs    %0, %0, %4\n"
diff --git a/arch/arm/include/asm/bL_switcher.h b/arch/arm/include/asm/bL_switcher.h
new file mode 100644 (file)
index 0000000..1714800
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * arch/arm/include/asm/bL_switcher.h
+ *
+ * Created by:  Nicolas Pitre, April 2012
+ * Copyright:   (C) 2012-2013  Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_BL_SWITCHER_H
+#define ASM_BL_SWITCHER_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+typedef void (*bL_switch_completion_handler)(void *cookie);
+
+int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
+                        bL_switch_completion_handler completer,
+                        void *completer_cookie);
+static inline int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
+{
+       return bL_switch_request_cb(cpu, new_cluster_id, NULL, NULL);
+}
+
+/*
+ * Register here to be notified about runtime enabling/disabling of
+ * the switcher.
+ *
+ * The notifier chain is called with the switcher activation lock held:
+ * the switcher will not be enabled or disabled during callbacks.
+ * Callbacks must not call bL_switcher_{get,put}_enabled().
+ */
+#define BL_NOTIFY_PRE_ENABLE   0
+#define BL_NOTIFY_POST_ENABLE  1
+#define BL_NOTIFY_PRE_DISABLE  2
+#define BL_NOTIFY_POST_DISABLE 3
+
+#ifdef CONFIG_BL_SWITCHER
+
+int bL_switcher_register_notifier(struct notifier_block *nb);
+int bL_switcher_unregister_notifier(struct notifier_block *nb);
+
+/*
+ * Use these functions to temporarily prevent enabling/disabling of
+ * the switcher.
+ * bL_switcher_get_enabled() returns true if the switcher is currently
+ * enabled.  Each call to bL_switcher_get_enabled() must be followed
+ * by a call to bL_switcher_put_enabled().  These functions are not
+ * recursive.
+ */
+bool bL_switcher_get_enabled(void);
+void bL_switcher_put_enabled(void);
+
+int bL_switcher_trace_trigger(void);
+int bL_switcher_get_logical_index(u32 mpidr);
+
+#else
+static inline int bL_switcher_register_notifier(struct notifier_block *nb)
+{
+       return 0;
+}
+
+static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
+{
+       return 0;
+}
+
+static inline bool bL_switcher_get_enabled(void) { return false; }
+static inline void bL_switcher_put_enabled(void) { }
+static inline int bL_switcher_trace_trigger(void) { return 0; }
+static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; }
+#endif /* CONFIG_BL_SWITCHER */
+
+#endif
index 4f009c10540dff2a2e7efd08b0671c2369547b90..df2fbba7efc80d57074a6053704a9c70119aae03 100644 (file)
@@ -223,6 +223,42 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
        return ret;
 }
 
+static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
+                                            unsigned long long old,
+                                            unsigned long long new)
+{
+       unsigned long long oldval;
+       unsigned long res;
+
+       __asm__ __volatile__(
+"1:    ldrexd          %1, %H1, [%3]\n"
+"      teq             %1, %4\n"
+"      teqeq           %H1, %H4\n"
+"      bne             2f\n"
+"      strexd          %0, %5, %H5, [%3]\n"
+"      teq             %0, #0\n"
+"      bne             1b\n"
+"2:"
+       : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
+       : "r" (ptr), "r" (old), "r" (new)
+       : "cc");
+
+       return oldval;
+}
+
+static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
+                                               unsigned long long old,
+                                               unsigned long long new)
+{
+       unsigned long long ret;
+
+       smp_mb();
+       ret = __cmpxchg64(ptr, old, new);
+       smp_mb();
+
+       return ret;
+}
+
 #define cmpxchg_local(ptr,o,n)                                         \
        ((__typeof__(*(ptr)))__cmpxchg_local((ptr),                     \
                                       (unsigned long)(o),              \
@@ -230,18 +266,16 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
                                       sizeof(*(ptr))))
 
 #define cmpxchg64(ptr, o, n)                                           \
-       ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr),       \
-                                               atomic64_t,             \
-                                               counter),               \
-                                             (unsigned long long)(o),  \
-                                             (unsigned long long)(n)))
-
-#define cmpxchg64_local(ptr, o, n)                                     \
-       ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr),        \
-                                               local64_t,              \
-                                               a),                     \
-                                            (unsigned long long)(o),   \
-                                            (unsigned long long)(n)))
+       ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr),                      \
+                                       (unsigned long long)(o),        \
+                                       (unsigned long long)(n)))
+
+#define cmpxchg64_relaxed(ptr, o, n)                                   \
+       ((__typeof__(*(ptr)))__cmpxchg64((ptr),                         \
+                                       (unsigned long long)(o),        \
+                                       (unsigned long long)(n)))
+
+#define cmpxchg64_local(ptr, o, n)     cmpxchg64_relaxed((ptr), (o), (n))
 
 #endif /* __LINUX_ARM_ARCH__ >= 6 */
 
index 9672e978d50df67d94c3dd86d23f3bcdd187c54d..acdde76b39bbae3064034fff78b9dd2b95bbd39c 100644 (file)
@@ -10,6 +10,7 @@
 #define CPUID_TLBTYPE  3
 #define CPUID_MPUIR    4
 #define CPUID_MPIDR    5
+#define CPUID_REVIDR   6
 
 #ifdef CONFIG_CPU_V7M
 #define CPUID_EXT_PFR0 0x40
index 5b579b951503e51a436683d6fb0be964d983bf67..863cd84eb1a24955e9af8253dcfa8f0b710c95ae 100644 (file)
@@ -64,6 +64,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
 {
        return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
 }
+
 #else
 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
 {
@@ -86,6 +87,13 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
 }
 #endif
 
+/* The ARM override for dma_max_pfn() */
+static inline unsigned long dma_max_pfn(struct device *dev)
+{
+       return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
+}
+#define dma_max_pfn(dev) dma_max_pfn(dev)
+
 /*
  * DMA errors are defined by all-bits-set in the DMA address.
  */
index 2740c2a2df639361617f6fe484ead14f8625eaf2..3d7351c844aac0ae2392d441796ce9904dcaf717 100644 (file)
@@ -5,7 +5,7 @@
 #include <linux/threads.h>
 #include <asm/irq.h>
 
-#define NR_IPI 6
+#define NR_IPI 7
 
 typedef struct {
        unsigned int __softirq_pending;
index bfc198c759130109d44b73b4506ea853d01fba28..863c892b4aaa7403c3bde97b7de3b2cd9a75519c 100644 (file)
@@ -16,7 +16,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key)
 {
-       asm goto("1:\n\t"
+       asm_volatile_goto("1:\n\t"
                 JUMP_LABEL_NOP "\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
                 ".word 1b, %l[l_yes], %c0\n\t"
index 402a2bc6aa687b94b09af6efa039c58fb80436d2..17a3fa2979e8ae5c5a56f88eda448f635c3e132c 100644 (file)
@@ -49,6 +49,7 @@ struct machine_desc {
        bool                    (*smp_init)(void);
        void                    (*fixup)(struct tag *, char **,
                                         struct meminfo *);
+       void                    (*init_meminfo)(void);
        void                    (*reserve)(void);/* reserve mem blocks  */
        void                    (*map_io)(void);/* IO mapping function  */
        void                    (*init_early)(void);
index 0f7b7620e9a554b0b4a0ba4939a60c905e0a435c..5506618119f923b3fa6e0484b3552c6d0dbc1e5d 100644 (file)
@@ -41,6 +41,14 @@ extern void mcpm_entry_point(void);
  */
 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
 
+/*
+ * This sets an early poke i.e a value to be poked into some address
+ * from very early assembly code before the CPU is ungated.  The
+ * address must be physical, and if 0 then nothing will happen.
+ */
+void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
+                        unsigned long poke_phys_addr, unsigned long poke_val);
+
 /*
  * CPU/cluster power operations API for higher subsystems to use.
  */
@@ -76,8 +84,11 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
  *
  * This must be called with interrupts disabled.
  *
- * This does not return.  Re-entry in the kernel is expected via
- * mcpm_entry_point.
+ * On success this does not return.  Re-entry in the kernel is expected
+ * via mcpm_entry_point.
+ *
+ * This will return if mcpm_platform_register() has not been called
+ * previously in which case the caller should take appropriate action.
  */
 void mcpm_cpu_power_down(void);
 
@@ -98,8 +109,11 @@ void mcpm_cpu_power_down(void);
  *
  * This must be called with interrupts disabled.
  *
- * This does not return.  Re-entry in the kernel is expected via
- * mcpm_entry_point.
+ * On success this does not return.  Re-entry in the kernel is expected
+ * via mcpm_entry_point.
+ *
+ * This will return if mcpm_platform_register() has not been called
+ * previously in which case the caller should take appropriate action.
  */
 void mcpm_cpu_suspend(u64 expected_residency);
 
index e750a938fd3ce283ccd351cb73ed9c3d8df84e75..6748d6295a1a07ec23738d660dafb88489f99595 100644 (file)
  * so that all we need to do is modify the 8-bit constant field.
  */
 #define __PV_BITS_31_24        0x81000000
+#define __PV_BITS_7_0  0x81
+
+extern phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
+extern u64 __pv_phys_offset;
+extern u64 __pv_offset;
+extern void fixup_pv_table(const void *, unsigned long);
+extern const void *__pv_table_begin, *__pv_table_end;
 
-extern unsigned long __pv_phys_offset;
 #define PHYS_OFFSET __pv_phys_offset
 
 #define __pv_stub(from,to,instr,type)                  \
@@ -185,22 +191,58 @@ extern unsigned long __pv_phys_offset;
        : "=r" (to)                                     \
        : "r" (from), "I" (type))
 
-static inline unsigned long __virt_to_phys(unsigned long x)
+#define __pv_stub_mov_hi(t)                            \
+       __asm__ volatile("@ __pv_stub_mov\n"            \
+       "1:     mov     %R0, %1\n"                      \
+       "       .pushsection .pv_table,\"a\"\n"         \
+       "       .long   1b\n"                           \
+       "       .popsection\n"                          \
+       : "=r" (t)                                      \
+       : "I" (__PV_BITS_7_0))
+
+#define __pv_add_carry_stub(x, y)                      \
+       __asm__ volatile("@ __pv_add_carry_stub\n"      \
+       "1:     adds    %Q0, %1, %2\n"                  \
+       "       adc     %R0, %R0, #0\n"                 \
+       "       .pushsection .pv_table,\"a\"\n"         \
+       "       .long   1b\n"                           \
+       "       .popsection\n"                          \
+       : "+r" (y)                                      \
+       : "r" (x), "I" (__PV_BITS_31_24)                \
+       : "cc")
+
+static inline phys_addr_t __virt_to_phys(unsigned long x)
 {
-       unsigned long t;
-       __pv_stub(x, t, "add", __PV_BITS_31_24);
+       phys_addr_t t;
+
+       if (sizeof(phys_addr_t) == 4) {
+               __pv_stub(x, t, "add", __PV_BITS_31_24);
+       } else {
+               __pv_stub_mov_hi(t);
+               __pv_add_carry_stub(x, t);
+       }
        return t;
 }
 
-static inline unsigned long __phys_to_virt(unsigned long x)
+static inline unsigned long __phys_to_virt(phys_addr_t x)
 {
        unsigned long t;
        __pv_stub(x, t, "sub", __PV_BITS_31_24);
        return t;
 }
+
 #else
-#define __virt_to_phys(x)      ((x) - PAGE_OFFSET + PHYS_OFFSET)
-#define __phys_to_virt(x)      ((x) - PHYS_OFFSET + PAGE_OFFSET)
+
+static inline phys_addr_t __virt_to_phys(unsigned long x)
+{
+       return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
+}
+
+static inline unsigned long __phys_to_virt(phys_addr_t x)
+{
+       return x - PHYS_OFFSET + PAGE_OFFSET;
+}
+
 #endif
 #endif
 #endif /* __ASSEMBLY__ */
@@ -238,16 +280,31 @@ static inline phys_addr_t virt_to_phys(const volatile void *x)
 
 static inline void *phys_to_virt(phys_addr_t x)
 {
-       return (void *)(__phys_to_virt((unsigned long)(x)));
+       return (void *)__phys_to_virt(x);
 }
 
 /*
  * Drivers should NOT use these either.
  */
 #define __pa(x)                        __virt_to_phys((unsigned long)(x))
-#define __va(x)                        ((void *)__phys_to_virt((unsigned long)(x)))
+#define __va(x)                        ((void *)__phys_to_virt((phys_addr_t)(x)))
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
 
+/*
+ * These are for systems that have a hardware interconnect supported alias of
+ * physical memory for idmap purposes.  Most cases should leave these
+ * untouched.
+ */
+static inline phys_addr_t __virt_to_idmap(unsigned long x)
+{
+       if (arch_virt_to_idmap)
+               return arch_virt_to_idmap(x);
+       else
+               return __virt_to_phys(x);
+}
+
+#define virt_to_idmap(x)       __virt_to_idmap((unsigned long)(x))
+
 /*
  * Virtual <-> DMA view memory address translations
  * Again, these are *only* valid on the kernel direct mapped RAM
index f97ee02386ee063ba12b78786c9a6cd8a4106676..86a659a19526c75a2ba3b91ce839925cc52d26de 100644 (file)
@@ -181,6 +181,13 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
 
 #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
 
+/*
+ * We don't have huge page support for short descriptors, for the moment
+ * define empty stubs for use by pin_page_for_write.
+ */
+#define pmd_hugewillfault(pmd) (0)
+#define pmd_thp_or_huge(pmd)   (0)
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_PGTABLE_2LEVEL_H */
index 5689c18c85f5ebafb95a9bb2cc99fda227992976..39c54cfa03e9b103ef39982a43bac74d5436b791 100644 (file)
@@ -206,6 +206,9 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
 #define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)         (!(pmd_val(pmd) & PMD_SECT_RDONLY))
 
+#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
+#define pmd_thp_or_huge(pmd)   (pmd_huge(pmd) || pmd_trans_huge(pmd))
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 #define pmd_trans_huge(pmd)    (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
 #define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
index 413f3876341cd6fd2e7bc4b1c6a71873cadaa887..c3d5fc124a054c6309ffacdb2845ff22fd5bfa56 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/hw_breakpoint.h>
 #include <asm/ptrace.h>
 #include <asm/types.h>
+#include <asm/unified.h>
 
 #ifdef __KERNEL__
 #define STACK_TOP      ((current->personality & ADDR_LIMIT_32BIT) ? \
@@ -87,6 +88,17 @@ unsigned long get_wchan(struct task_struct *p);
 #define KSTK_EIP(tsk)  task_pt_regs(tsk)->ARM_pc
 #define KSTK_ESP(tsk)  task_pt_regs(tsk)->ARM_sp
 
+#ifdef CONFIG_SMP
+#define __ALT_SMP_ASM(smp, up)                                         \
+       "9998:  " smp "\n"                                              \
+       "       .pushsection \".alt.smp.init\", \"a\"\n"                \
+       "       .long   9998b\n"                                        \
+       "       " up "\n"                                               \
+       "       .popsection\n"
+#else
+#define __ALT_SMP_ASM(smp, up) up
+#endif
+
 /*
  * Prefetching support - only ARMv5.
  */
@@ -97,17 +109,22 @@ static inline void prefetch(const void *ptr)
 {
        __asm__ __volatile__(
                "pld\t%a0"
-               :
-               : "p" (ptr)
-               : "cc");
+               :: "p" (ptr));
 }
 
+#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
 #define ARCH_HAS_PREFETCHW
-#define prefetchw(ptr) prefetch(ptr)
-
-#define ARCH_HAS_SPINLOCK_PREFETCH
-#define spin_lock_prefetch(x) do { } while (0)
-
+static inline void prefetchw(const void *ptr)
+{
+       __asm__ __volatile__(
+               ".arch_extension        mp\n"
+               __ALT_SMP_ASM(
+                       WASM(pldw)              "\t%a0",
+                       WASM(pld)               "\t%a0"
+               )
+               :: "p" (ptr));
+}
+#endif
 #endif
 
 #define HAVE_ARCH_PICK_MMAP_LAYOUT
index a8cae71caceb3fb89c1ec949063b7a0d621dbdca..22a3b9b5d4a16fd4ece50bdfc83859f6ea38352f 100644 (file)
@@ -84,6 +84,8 @@ extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
 
+extern int register_ipi_completion(struct completion *completion, int cpu);
+
 struct smp_operations {
 #ifdef CONFIG_SMP
        /*
index 4f2c28060c9aa227c47e73ac6557c45add91128f..ef3c6072aa45345ae4594f22aebbe9a9ebc538f1 100644 (file)
@@ -5,21 +5,13 @@
 #error SMP not supported on pre-ARMv6 CPUs
 #endif
 
-#include <asm/processor.h>
+#include <linux/prefetch.h>
 
 /*
  * sev and wfe are ARMv6K extensions.  Uniprocessor ARMv6 may not have the K
  * extensions, so when running on UP, we have to patch these instructions away.
  */
-#define ALT_SMP(smp, up)                                       \
-       "9998:  " smp "\n"                                      \
-       "       .pushsection \".alt.smp.init\", \"a\"\n"        \
-       "       .long   9998b\n"                                \
-       "       " up "\n"                                       \
-       "       .popsection\n"
-
 #ifdef CONFIG_THUMB2_KERNEL
-#define SEV            ALT_SMP("sev.w", "nop.w")
 /*
  * For Thumb-2, special care is needed to ensure that the conditional WFE
  * instruction really does assemble to exactly 4 bytes (as required by
  * the assembler won't change IT instructions which are explicitly present
  * in the input.
  */
-#define WFE(cond)      ALT_SMP(                \
+#define WFE(cond)      __ALT_SMP_ASM(          \
        "it " cond "\n\t"                       \
        "wfe" cond ".n",                        \
                                                \
        "nop.w"                                 \
 )
 #else
-#define SEV            ALT_SMP("sev", "nop")
-#define WFE(cond)      ALT_SMP("wfe" cond, "nop")
+#define WFE(cond)      __ALT_SMP_ASM("wfe" cond, "nop")
 #endif
 
+#define SEV            __ALT_SMP_ASM(WASM(sev), WASM(nop))
+
 static inline void dsb_sev(void)
 {
 #if __LINUX_ARM_ARCH__ >= 7
@@ -77,6 +70,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
        u32 newval;
        arch_spinlock_t lockval;
 
+       prefetchw(&lock->slock);
        __asm__ __volatile__(
 "1:    ldrex   %0, [%3]\n"
 "      add     %1, %0, %4\n"
@@ -100,6 +94,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
        unsigned long contended, res;
        u32 slock;
 
+       prefetchw(&lock->slock);
        do {
                __asm__ __volatile__(
                "       ldrex   %0, [%3]\n"
@@ -127,10 +122,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
        dsb_sev();
 }
 
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+       return lock.tickets.owner == lock.tickets.next;
+}
+
 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
-       struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
-       return tickets.owner != tickets.next;
+       return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
 }
 
 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
@@ -152,6 +151,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
 {
        unsigned long tmp;
 
+       prefetchw(&rw->lock);
        __asm__ __volatile__(
 "1:    ldrex   %0, [%1]\n"
 "      teq     %0, #0\n"
@@ -170,6 +170,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
 {
        unsigned long contended, res;
 
+       prefetchw(&rw->lock);
        do {
                __asm__ __volatile__(
                "       ldrex   %0, [%2]\n"
@@ -203,7 +204,7 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
 }
 
 /* write_can_lock - would write_trylock() succeed? */
-#define arch_write_can_lock(x)         ((x)->lock == 0)
+#define arch_write_can_lock(x)         (ACCESS_ONCE((x)->lock) == 0)
 
 /*
  * Read locks are a bit more hairy:
@@ -221,6 +222,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
 {
        unsigned long tmp, tmp2;
 
+       prefetchw(&rw->lock);
        __asm__ __volatile__(
 "1:    ldrex   %0, [%2]\n"
 "      adds    %0, %0, #1\n"
@@ -241,6 +243,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
 
        smp_mb();
 
+       prefetchw(&rw->lock);
        __asm__ __volatile__(
 "1:    ldrex   %0, [%2]\n"
 "      sub     %0, %0, #1\n"
@@ -259,6 +262,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
        unsigned long contended, res;
 
+       prefetchw(&rw->lock);
        do {
                __asm__ __volatile__(
                "       ldrex   %0, [%2]\n"
@@ -280,7 +284,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
 }
 
 /* read_can_lock - would read_trylock() succeed? */
-#define arch_read_can_lock(x)          ((x)->lock < 0x80000000)
+#define arch_read_can_lock(x)          (ACCESS_ONCE((x)->lock) < 0x80000000)
 
 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
index b262d2f8b4784eba5b6805d431468c285c434b88..47663fcb10ad7aad7e3bc87f31636a7a77342e36 100644 (file)
@@ -25,7 +25,7 @@ typedef struct {
 #define __ARCH_SPIN_LOCK_UNLOCKED      { { 0 } }
 
 typedef struct {
-       volatile unsigned int lock;
+       u32 lock;
 } arch_rwlock_t;
 
 #define __ARCH_RW_LOCK_UNLOCKED                { 0 }
index f1d96d4e8092a652aeb84f5825082a8f9064cd20..73ddd7239b33aa77d178ae1341c0c46c736a08e5 100644 (file)
@@ -57,6 +57,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
                                         unsigned int i, unsigned int n,
                                         unsigned long *args)
 {
+       if (n == 0)
+               return;
+
        if (i + n > SYSCALL_MAX_ARGS) {
                unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
                unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
@@ -81,6 +84,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
                                         unsigned int i, unsigned int n,
                                         const unsigned long *args)
 {
+       if (n == 0)
+               return;
+
        if (i + n > SYSCALL_MAX_ARGS) {
                pr_warning("%s called with max args %d, handling only %d\n",
                           __func__, i + n, SYSCALL_MAX_ARGS);
index 38960264040cd989068b7e897979194bd5d30bc4..def9e570199f90a0c42dc7da0f8998fba6a0ab39 100644 (file)
@@ -560,37 +560,6 @@ static inline void __flush_bp_all(void)
                asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
 }
 
-#include <asm/cputype.h>
-#ifdef CONFIG_ARM_ERRATA_798181
-static inline int erratum_a15_798181(void)
-{
-       unsigned int midr = read_cpuid_id();
-
-       /* Cortex-A15 r0p0..r3p2 affected */
-       if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
-               return 0;
-       return 1;
-}
-
-static inline void dummy_flush_tlb_a15_erratum(void)
-{
-       /*
-        * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
-        */
-       asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
-       dsb(ish);
-}
-#else
-static inline int erratum_a15_798181(void)
-{
-       return 0;
-}
-
-static inline void dummy_flush_tlb_a15_erratum(void)
-{
-}
-#endif
-
 /*
  *     flush_pmd_entry
  *
@@ -697,4 +666,21 @@ extern void flush_bp_all(void);
 
 #endif
 
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_ARM_ERRATA_798181
+extern void erratum_a15_798181_init(void);
+#else
+static inline void erratum_a15_798181_init(void) {}
+#endif
+extern bool (*erratum_a15_798181_handler)(void);
+
+static inline bool erratum_a15_798181(void)
+{
+       if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) &&
+               erratum_a15_798181_handler))
+               return erratum_a15_798181_handler();
+       return false;
+}
+#endif
+
 #endif
index f5989f46b4d2d450f18b24faa946de750394ec13..b88beaba6b4a5cf9c0ccded73723c4ea41539167 100644 (file)
@@ -38,6 +38,8 @@
 #ifdef __ASSEMBLY__
 #define W(instr)       instr.w
 #define BSYM(sym)      sym + 1
+#else
+#define WASM(instr)    #instr ".w"
 #endif
 
 #else  /* !CONFIG_THUMB2_KERNEL */
@@ -50,6 +52,8 @@
 #ifdef __ASSEMBLY__
 #define W(instr)       instr
 #define BSYM(sym)      sym
+#else
+#define WASM(instr)    #instr
 #endif
 
 #endif /* CONFIG_THUMB2_KERNEL */
diff --git a/arch/arm/include/debug/efm32.S b/arch/arm/include/debug/efm32.S
new file mode 100644 (file)
index 0000000..2265a19
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2013 Pengutronix
+ * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define UARTn_CMD              0x000c
+#define UARTn_CMD_TXEN                 0x0004
+
+#define        UARTn_STATUS            0x0010
+#define        UARTn_STATUS_TXC                0x0020
+#define        UARTn_STATUS_TXBL               0x0040
+
+#define        UARTn_TXDATA            0x0034
+
+               .macro  addruart, rx, tmp
+               ldr     \rx, =(CONFIG_DEBUG_UART_PHYS)
+
+               /*
+                * enable TX. The driver might disable it to save energy. We
+                * don't care about disabling at the end as during debug power
+                * consumption isn't that important.
+                */
+               ldr     \tmp, =(UARTn_CMD_TXEN)
+               str     \tmp, [\rx, #UARTn_CMD]
+               .endm
+
+               .macro  senduart,rd,rx
+               strb    \rd, [\rx, #UARTn_TXDATA]
+               .endm
+
+               .macro  waituart,rd,rx
+1001:          ldr     \rd, [\rx, #UARTn_STATUS]
+               tst     \rd, #UARTn_STATUS_TXBL
+               beq     1001b
+               .endm
+
+               .macro  busyuart,rd,rx
+1001:          ldr     \rd, [\rx, UARTn_STATUS]
+               tst     \rd, #UARTn_STATUS_TXC
+               bne     1001b
+               .endm
index 18d76fd5a2afb2bf27b91980f29c77094e6638c0..70a1c9da30ca39d4d4d79e8e73c3b8aec0d607e3 100644 (file)
@@ -7,6 +7,7 @@ header-y += hwcap.h
 header-y += ioctls.h
 header-y += kvm_para.h
 header-y += mman.h
+header-y += perf_regs.h
 header-y += posix_types.h
 header-y += ptrace.h
 header-y += setup.h
diff --git a/arch/arm/include/uapi/asm/perf_regs.h b/arch/arm/include/uapi/asm/perf_regs.h
new file mode 100644 (file)
index 0000000..ce59448
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef _ASM_ARM_PERF_REGS_H
+#define _ASM_ARM_PERF_REGS_H
+
+enum perf_event_arm_regs {
+       PERF_REG_ARM_R0,
+       PERF_REG_ARM_R1,
+       PERF_REG_ARM_R2,
+       PERF_REG_ARM_R3,
+       PERF_REG_ARM_R4,
+       PERF_REG_ARM_R5,
+       PERF_REG_ARM_R6,
+       PERF_REG_ARM_R7,
+       PERF_REG_ARM_R8,
+       PERF_REG_ARM_R9,
+       PERF_REG_ARM_R10,
+       PERF_REG_ARM_FP,
+       PERF_REG_ARM_IP,
+       PERF_REG_ARM_SP,
+       PERF_REG_ARM_LR,
+       PERF_REG_ARM_PC,
+       PERF_REG_ARM_MAX,
+};
+#endif /* _ASM_ARM_PERF_REGS_H */
index 5140df5f23aa485214914a8dfbfdf31dc04a5691..9b818ca3610bce4d61cf149ec62c0ebd2de8673c 100644 (file)
@@ -78,6 +78,7 @@ obj-$(CONFIG_CPU_XSC3)                += xscale-cp0.o
 obj-$(CONFIG_CPU_MOHAWK)       += xscale-cp0.o
 obj-$(CONFIG_CPU_PJ4)          += pj4-cp0.o
 obj-$(CONFIG_IWMMXT)           += iwmmxt.o
+obj-$(CONFIG_PERF_EVENTS)      += perf_regs.o
 obj-$(CONFIG_HW_PERF_EVENTS)   += perf_event.o perf_event_cpu.o
 AFLAGS_iwmmxt.o                        := -Wa,-mcpu=iwmmxt
 obj-$(CONFIG_ARM_CPU_TOPOLOGY)  += topology.o
index 60d3b738d4200987e76c75b2e9da513920087a89..1f031ddd0667a3e842317a90c59db3acc2284894 100644 (file)
@@ -155,4 +155,5 @@ EXPORT_SYMBOL(__gnu_mcount_nc);
 
 #ifdef CONFIG_ARM_PATCH_PHYS_VIRT
 EXPORT_SYMBOL(__pv_phys_offset);
+EXPORT_SYMBOL(__pv_offset);
 #endif
index 2c7cc1e03473aee9463e86d7dbedfc999f6e51f7..0f6c6d1fe447918dd5d98193b53a95a6e0a0fc1e 100644 (file)
@@ -487,7 +487,26 @@ __fixup_smp:
        mrc     p15, 0, r0, c0, c0, 5   @ read MPIDR
        and     r0, r0, #0xc0000000     @ multiprocessing extensions and
        teq     r0, #0x80000000         @ not part of a uniprocessor system?
-       moveq   pc, lr                  @ yes, assume SMP
+       bne    __fixup_smp_on_up        @ no, assume UP
+
+       @ Core indicates it is SMP. Check for Aegis SOC where a single
+       @ Cortex-A9 CPU is present but SMP operations fault.
+       mov     r4, #0x41000000
+       orr     r4, r4, #0x0000c000
+       orr     r4, r4, #0x00000090
+       teq     r3, r4                  @ Check for ARM Cortex-A9
+       movne   pc, lr                  @ Not ARM Cortex-A9,
+
+       @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
+       @ below address check will need to be #ifdef'd or equivalent
+       @ for the Aegis platform.
+       mrc     p15, 4, r0, c15, c0     @ get SCU base address
+       teq     r0, #0x0                @ '0' on actual UP A9 hardware
+       beq     __fixup_smp_on_up       @ So its an A9 UP
+       ldr     r0, [r0, #4]            @ read SCU Config
+       and     r0, r0, #0x3            @ number of CPUs
+       teq     r0, #0x0                @ is 1?
+       movne   pc, lr
 
 __fixup_smp_on_up:
        adr     r0, 1f
@@ -536,6 +555,14 @@ ENTRY(fixup_smp)
        ldmfd   sp!, {r4 - r6, pc}
 ENDPROC(fixup_smp)
 
+#ifdef __ARMEB_
+#define LOW_OFFSET     0x4
+#define HIGH_OFFSET    0x0
+#else
+#define LOW_OFFSET     0x0
+#define HIGH_OFFSET    0x4
+#endif
+
 #ifdef CONFIG_ARM_PATCH_PHYS_VIRT
 
 /* __fixup_pv_table - patch the stub instructions with the delta between
@@ -546,17 +573,20 @@ ENDPROC(fixup_smp)
        __HEAD
 __fixup_pv_table:
        adr     r0, 1f
-       ldmia   r0, {r3-r5, r7}
-       sub     r3, r0, r3      @ PHYS_OFFSET - PAGE_OFFSET
+       ldmia   r0, {r3-r7}
+       mvn     ip, #0
+       subs    r3, r0, r3      @ PHYS_OFFSET - PAGE_OFFSET
        add     r4, r4, r3      @ adjust table start address
        add     r5, r5, r3      @ adjust table end address
-       add     r7, r7, r3      @ adjust __pv_phys_offset address
-       str     r8, [r7]        @ save computed PHYS_OFFSET to __pv_phys_offset
+       add     r6, r6, r3      @ adjust __pv_phys_offset address
+       add     r7, r7, r3      @ adjust __pv_offset address
+       str     r8, [r6, #LOW_OFFSET]   @ save computed PHYS_OFFSET to __pv_phys_offset
+       strcc   ip, [r7, #HIGH_OFFSET]  @ save to __pv_offset high bits
        mov     r6, r3, lsr #24 @ constant for add/sub instructions
        teq     r3, r6, lsl #24 @ must be 16MiB aligned
 THUMB( it      ne              @ cross section branch )
        bne     __error
-       str     r6, [r7, #4]    @ save to __pv_offset
+       str     r3, [r7, #LOW_OFFSET]   @ save to __pv_offset low bits
        b       __fixup_a_pv_table
 ENDPROC(__fixup_pv_table)
 
@@ -565,10 +595,19 @@ ENDPROC(__fixup_pv_table)
        .long   __pv_table_begin
        .long   __pv_table_end
 2:     .long   __pv_phys_offset
+       .long   __pv_offset
 
        .text
 __fixup_a_pv_table:
+       adr     r0, 3f
+       ldr     r6, [r0]
+       add     r6, r6, r3
+       ldr     r0, [r6, #HIGH_OFFSET]  @ pv_offset high word
+       ldr     r6, [r6, #LOW_OFFSET]   @ pv_offset low word
+       mov     r6, r6, lsr #24
+       cmn     r0, #1
 #ifdef CONFIG_THUMB2_KERNEL
+       moveq   r0, #0x200000   @ set bit 21, mov to mvn instruction
        lsls    r6, #24
        beq     2f
        clz     r7, r6
@@ -582,18 +621,28 @@ __fixup_a_pv_table:
        b       2f
 1:     add     r7, r3
        ldrh    ip, [r7, #2]
-       and     ip, 0x8f00
-       orr     ip, r6  @ mask in offset bits 31-24
+       tst     ip, #0x4000
+       and     ip, #0x8f00
+       orrne   ip, r6  @ mask in offset bits 31-24
+       orreq   ip, r0  @ mask in offset bits 7-0
        strh    ip, [r7, #2]
+       ldrheq  ip, [r7]
+       biceq   ip, #0x20
+       orreq   ip, ip, r0, lsr #16
+       strheq  ip, [r7]
 2:     cmp     r4, r5
        ldrcc   r7, [r4], #4    @ use branch for delay slot
        bcc     1b
        bx      lr
 #else
+       moveq   r0, #0x400000   @ set bit 22, mov to mvn instruction
        b       2f
 1:     ldr     ip, [r7, r3]
        bic     ip, ip, #0x000000ff
-       orr     ip, ip, r6      @ mask in offset bits 31-24
+       tst     ip, #0xf00      @ check the rotation field
+       orrne   ip, ip, r6      @ mask in offset bits 31-24
+       biceq   ip, ip, #0x400000       @ clear bit 22
+       orreq   ip, ip, r0      @ mask in offset bits 7-0
        str     ip, [r7, r3]
 2:     cmp     r4, r5
        ldrcc   r7, [r4], #4    @ use branch for delay slot
@@ -602,28 +651,29 @@ __fixup_a_pv_table:
 #endif
 ENDPROC(__fixup_a_pv_table)
 
+3:     .long __pv_offset
+
 ENTRY(fixup_pv_table)
        stmfd   sp!, {r4 - r7, lr}
-       ldr     r2, 2f                  @ get address of __pv_phys_offset
        mov     r3, #0                  @ no offset
        mov     r4, r0                  @ r0 = table start
        add     r5, r0, r1              @ r1 = table size
-       ldr     r6, [r2, #4]            @ get __pv_offset
        bl      __fixup_a_pv_table
        ldmfd   sp!, {r4 - r7, pc}
 ENDPROC(fixup_pv_table)
 
-       .align
-2:     .long   __pv_phys_offset
-
        .data
        .globl  __pv_phys_offset
        .type   __pv_phys_offset, %object
 __pv_phys_offset:
-       .long   0
-       .size   __pv_phys_offset, . - __pv_phys_offset
+       .quad   0
+       .size   __pv_phys_offset, . -__pv_phys_offset
+
+       .globl  __pv_offset
+       .type   __pv_offset, %object
 __pv_offset:
-       .long   0
+       .quad   0
+       .size   __pv_offset, . -__pv_offset
 #endif
 
 #include "head-common.S"
index e186ee1e63f6c85261f96844a594080e719c2e07..bc3f2efa0d86b4ff55d6b19833eae688b111fd27 100644 (file)
@@ -256,12 +256,11 @@ validate_event(struct pmu_hw_events *hw_events,
               struct perf_event *event)
 {
        struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
-       struct pmu *leader_pmu = event->group_leader->pmu;
 
        if (is_software_event(event))
                return 1;
 
-       if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+       if (event->state < PERF_EVENT_STATE_OFF)
                return 1;
 
        if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
diff --git a/arch/arm/kernel/perf_regs.c b/arch/arm/kernel/perf_regs.c
new file mode 100644 (file)
index 0000000..6e4379c
--- /dev/null
@@ -0,0 +1,30 @@
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/bug.h>
+#include <asm/perf_regs.h>
+#include <asm/ptrace.h>
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+       if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM_MAX))
+               return 0;
+
+       return regs->uregs[idx];
+}
+
+#define REG_RESERVED (~((1ULL << PERF_REG_ARM_MAX) - 1))
+
+int perf_reg_validate(u64 mask)
+{
+       if (!mask || mask & REG_RESERVED)
+               return -EINVAL;
+
+       return 0;
+}
+
+u64 perf_reg_abi(struct task_struct *task)
+{
+       return PERF_SAMPLE_REGS_ABI_32;
+}
index 0e1e2b3afa45864b5776c177ceb5001402e31681..6b4ce802ac4ea3e2d88bbe04adce6713651fb066 100644 (file)
@@ -73,6 +73,8 @@ __setup("fpe=", fpe_setup);
 #endif
 
 extern void paging_init(const struct machine_desc *desc);
+extern void early_paging_init(const struct machine_desc *,
+                             struct proc_info_list *);
 extern void sanity_check_meminfo(void);
 extern enum reboot_mode reboot_mode;
 extern void setup_dma_zone(const struct machine_desc *desc);
@@ -599,6 +601,8 @@ static void __init setup_processor(void)
        elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
 #endif
 
+       erratum_a15_798181_init();
+
        feat_v6_fixup();
 
        cacheid_init();
@@ -878,6 +882,8 @@ void __init setup_arch(char **cmdline_p)
        parse_early_param();
 
        sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+
+       early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
        sanity_check_meminfo();
        arm_memblock_init(&meminfo, mdesc);
 
index db1536b8b30b497fe11923dfe9f67a72f86f7954..62246020191138e7d3bcdc90cf5d7c645b7ec1b9 100644 (file)
@@ -55,6 +55,7 @@
  * specific registers and some other data for resume.
  *  r0 = suspend function arg0
  *  r1 = suspend function
+ *  r2 = MPIDR value the resuming CPU will use
  */
 ENTRY(__cpu_suspend)
        stmfd   sp!, {r4 - r11, lr}
@@ -67,23 +68,18 @@ ENTRY(__cpu_suspend)
        mov     r5, sp                  @ current virtual SP
        add     r4, r4, #12             @ Space for pgd, virt sp, phys resume fn
        sub     sp, sp, r4              @ allocate CPU state on stack
-       stmfd   sp!, {r0, r1}           @ save suspend func arg and pointer
-       add     r0, sp, #8              @ save pointer to save block
-       mov     r1, r4                  @ size of save block
-       mov     r2, r5                  @ virtual SP
        ldr     r3, =sleep_save_sp
+       stmfd   sp!, {r0, r1}           @ save suspend func arg and pointer
        ldr     r3, [r3, #SLEEP_SAVE_SP_VIRT]
-       ALT_SMP(mrc p15, 0, r9, c0, c0, 5)
-        ALT_UP_B(1f)
-       ldr     r8, =mpidr_hash
-       /*
-        * This ldmia relies on the memory layout of the mpidr_hash
-        * struct mpidr_hash.
-        */
-       ldmia   r8, {r4-r7}     @ r4 = mpidr mask (r5,r6,r7) = l[0,1,2] shifts
-       compute_mpidr_hash      lr, r5, r6, r7, r9, r4
-       add     r3, r3, lr, lsl #2
-1:
+       ALT_SMP(ldr r0, =mpidr_hash)
+       ALT_UP_B(1f)
+       /* This ldmia relies on the memory layout of the mpidr_hash struct */
+       ldmia   r0, {r1, r6-r8} @ r1 = mpidr mask (r6,r7,r8) = l[0,1,2] shifts
+       compute_mpidr_hash      r0, r6, r7, r8, r2, r1
+       add     r3, r3, r0, lsl #2
+1:     mov     r2, r5                  @ virtual SP
+       mov     r1, r4                  @ size of save block
+       add     r0, sp, #8              @ pointer to save block
        bl      __cpu_suspend_save
        adr     lr, BSYM(cpu_suspend_abort)
        ldmfd   sp!, {r0, pc}           @ call suspend fn
index 72024ea8a3a6c07038103e153527cb2454e4552f..5c820cbcf918140873650f9a4a61376d4c951bcf 100644 (file)
@@ -66,6 +66,7 @@ enum ipi_msg_type {
        IPI_CALL_FUNC,
        IPI_CALL_FUNC_SINGLE,
        IPI_CPU_STOP,
+       IPI_COMPLETION,
 };
 
 static DECLARE_COMPLETION(cpu_running);
@@ -80,7 +81,7 @@ void __init smp_set_ops(struct smp_operations *ops)
 
 static unsigned long get_arch_pgd(pgd_t *pgd)
 {
-       phys_addr_t pgdir = virt_to_phys(pgd);
+       phys_addr_t pgdir = virt_to_idmap(pgd);
        BUG_ON(pgdir & ARCH_PGD_MASK);
        return pgdir >> ARCH_PGD_SHIFT;
 }
@@ -456,6 +457,7 @@ static const char *ipi_types[NR_IPI] = {
        S(IPI_CALL_FUNC, "Function call interrupts"),
        S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
        S(IPI_CPU_STOP, "CPU stop interrupts"),
+       S(IPI_COMPLETION, "completion interrupts"),
 };
 
 void show_ipi_list(struct seq_file *p, int prec)
@@ -515,6 +517,19 @@ static void ipi_cpu_stop(unsigned int cpu)
                cpu_relax();
 }
 
+static DEFINE_PER_CPU(struct completion *, cpu_completion);
+
+int register_ipi_completion(struct completion *completion, int cpu)
+{
+       per_cpu(cpu_completion, cpu) = completion;
+       return IPI_COMPLETION;
+}
+
+static void ipi_complete(unsigned int cpu)
+{
+       complete(per_cpu(cpu_completion, cpu));
+}
+
 /*
  * Main handler for inter-processor interrupts
  */
@@ -565,6 +580,12 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
                irq_exit();
                break;
 
+       case IPI_COMPLETION:
+               irq_enter();
+               ipi_complete(cpu);
+               irq_exit();
+               break;
+
        default:
                printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
                       cpu, ipinr);
index 83ccca303df83c4a1f40dce35f324153d979ad16..95d063620b76a6f706bccc23635537ed4bceb01a 100644 (file)
@@ -70,6 +70,40 @@ static inline void ipi_flush_bp_all(void *ignored)
        local_flush_bp_all();
 }
 
+#ifdef CONFIG_ARM_ERRATA_798181
+bool (*erratum_a15_798181_handler)(void);
+
+static bool erratum_a15_798181_partial(void)
+{
+       asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
+       dsb(ish);
+       return false;
+}
+
+static bool erratum_a15_798181_broadcast(void)
+{
+       asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
+       dsb(ish);
+       return true;
+}
+
+void erratum_a15_798181_init(void)
+{
+       unsigned int midr = read_cpuid_id();
+       unsigned int revidr = read_cpuid(CPUID_REVIDR);
+
+       /* Cortex-A15 r0p0..r3p2 w/o ECO fix affected */
+       if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2 ||
+           (revidr & 0x210) == 0x210) {
+               return;
+       }
+       if (revidr & 0x10)
+               erratum_a15_798181_handler = erratum_a15_798181_partial;
+       else
+               erratum_a15_798181_handler = erratum_a15_798181_broadcast;
+}
+#endif
+
 static void ipi_flush_tlb_a15_erratum(void *arg)
 {
        dmb();
@@ -80,7 +114,6 @@ static void broadcast_tlb_a15_erratum(void)
        if (!erratum_a15_798181())
                return;
 
-       dummy_flush_tlb_a15_erratum();
        smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1);
 }
 
@@ -92,7 +125,6 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
        if (!erratum_a15_798181())
                return;
 
-       dummy_flush_tlb_a15_erratum();
        this_cpu = get_cpu();
        a15_erratum_get_cpumask(this_cpu, mm, &mask);
        smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
index 41cf3cbf756de473be3bb1ebb268445aeacadc5d..2835d35234ca459f4d7086a6f811ff35652a6a11 100644 (file)
@@ -10,7 +10,7 @@
 #include <asm/suspend.h>
 #include <asm/tlbflush.h>
 
-extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
+extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
 extern void cpu_resume_mmu(void);
 
 #ifdef CONFIG_MMU
@@ -21,6 +21,7 @@ extern void cpu_resume_mmu(void);
 int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
 {
        struct mm_struct *mm = current->active_mm;
+       u32 __mpidr = cpu_logical_map(smp_processor_id());
        int ret;
 
        if (!idmap_pgd)
@@ -32,7 +33,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
         * resume (indicated by a zero return code), we need to switch
         * back to the correct page tables.
         */
-       ret = __cpu_suspend(arg, fn);
+       ret = __cpu_suspend(arg, fn, __mpidr);
        if (ret == 0) {
                cpu_switch_mm(mm->pgd, mm);
                local_flush_bp_all();
@@ -44,7 +45,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
 #else
 int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
 {
-       return __cpu_suspend(arg, fn);
+       u32 __mpidr = cpu_logical_map(smp_processor_id());
+       return __cpu_suspend(arg, fn, __mpidr);
 }
 #define        idmap_pgd       NULL
 #endif
index d6408d1ee543fe5e3ceabbcda01b25efb07676ba..e0c68d5bb7dc25dd3fa93dc0fa1b3899f5b09019 100644 (file)
@@ -10,6 +10,11 @@ UNWIND(      .fnstart        )
        and     r3, r0, #31             @ Get bit offset
        mov     r0, r0, lsr #5
        add     r1, r1, r0, lsl #2      @ Get word offset
+#if __LINUX_ARM_ARCH__ >= 7
+       .arch_extension mp
+       ALT_SMP(W(pldw) [r1])
+       ALT_UP(W(nop))
+#endif
        mov     r3, r2, lsl r3
 1:     ldrex   r2, [r1]
        \instr  r2, r2, r3
index 025f742dd4df6bf79b279babd264980d851f01d5..3e58d710013c3ad9b377fc76e6dad58f377e88a7 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/hardirq.h> /* for in_atomic() */
 #include <linux/gfp.h>
 #include <linux/highmem.h>
+#include <linux/hugetlb.h>
 #include <asm/current.h>
 #include <asm/page.h>
 
@@ -40,7 +41,35 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
                return 0;
 
        pmd = pmd_offset(pud, addr);
-       if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd)))
+       if (unlikely(pmd_none(*pmd)))
+               return 0;
+
+       /*
+        * A pmd can be bad if it refers to a HugeTLB or THP page.
+        *
+        * Both THP and HugeTLB pages have the same pmd layout
+        * and should not be manipulated by the pte functions.
+        *
+        * Lock the page table for the destination and check
+        * to see that it's still huge and whether or not we will
+        * need to fault on write, or if we have a splitting THP.
+        */
+       if (unlikely(pmd_thp_or_huge(*pmd))) {
+               ptl = &current->mm->page_table_lock;
+               spin_lock(ptl);
+               if (unlikely(!pmd_thp_or_huge(*pmd)
+                       || pmd_hugewillfault(*pmd)
+                       || pmd_trans_splitting(*pmd))) {
+                       spin_unlock(ptl);
+                       return 0;
+               }
+
+               *ptep = NULL;
+               *ptlp = ptl;
+               return 1;
+       }
+
+       if (unlikely(pmd_bad(*pmd)))
                return 0;
 
        pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
@@ -94,7 +123,10 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
                from += tocopy;
                n -= tocopy;
 
-               pte_unmap_unlock(pte, ptl);
+               if (pte)
+                       pte_unmap_unlock(pte, ptl);
+               else
+                       spin_unlock(ptl);
        }
        if (!atomic)
                up_read(&current->mm->mmap_sem);
@@ -147,7 +179,10 @@ __clear_user_memset(void __user *addr, unsigned long n)
                addr += tocopy;
                n -= tocopy;
 
-               pte_unmap_unlock(pte, ptl);
+               if (pte)
+                       pte_unmap_unlock(pte, ptl);
+               else
+                       spin_unlock(ptl);
        }
        up_read(&current->mm->mmap_sem);
 
index 3b0a9538093c168cb4cd80f4ff559b0458d0dfb3..c1b737097c9543faf67c4ed78f2617e3add0606f 100644 (file)
@@ -98,7 +98,6 @@ obj-y                         += leds.o
 # Power Management
 obj-$(CONFIG_PM)               += pm.o
 obj-$(CONFIG_AT91_SLOW_CLOCK)  += pm_slowclock.o
-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
 
 ifeq ($(CONFIG_PM_DEBUG),y)
 CFLAGS_pm.o += -DDEBUG
index 4aad93d54d6f059cc275ac819d7c55df196df4b1..25805f2f6010f3d7b98035f9c8e3b1e09524a4e3 100644 (file)
@@ -27,6 +27,7 @@
 #include "generic.h"
 #include "clock.h"
 #include "sam9_smc.h"
+#include "pm.h"
 
 /* --------------------------------------------------------------------
  *  Clocks
@@ -327,6 +328,7 @@ static void __init at91rm9200_ioremap_registers(void)
 {
        at91rm9200_ioremap_st(AT91RM9200_BASE_ST);
        at91_ioremap_ramc(0, AT91RM9200_BASE_MC, 256);
+       at91_pm_set_standby(at91rm9200_standby);
 }
 
 static void __init at91rm9200_initialize(void)
index 5de6074b4f4f3681d308cfb34a8554a05550b2f2..f8629a3fa2452791b6215c19f77124813e1d3f5f 100644 (file)
@@ -28,6 +28,7 @@
 #include "generic.h"
 #include "clock.h"
 #include "sam9_smc.h"
+#include "pm.h"
 
 /* --------------------------------------------------------------------
  *  Clocks
@@ -342,6 +343,7 @@ static void __init at91sam9260_ioremap_registers(void)
        at91sam926x_ioremap_pit(AT91SAM9260_BASE_PIT);
        at91sam9_ioremap_smc(0, AT91SAM9260_BASE_SMC);
        at91_ioremap_matrix(AT91SAM9260_BASE_MATRIX);
+       at91_pm_set_standby(at91sam9_sdram_standby);
 }
 
 static void __init at91sam9260_initialize(void)
index 0e0793241ab7e1938dc4ec3fd961557344342369..1f3867a17a289beee4bb1a44ae64ca3bc96b044d 100644 (file)
@@ -27,6 +27,7 @@
 #include "generic.h"
 #include "clock.h"
 #include "sam9_smc.h"
+#include "pm.h"
 
 /* --------------------------------------------------------------------
  *  Clocks
@@ -284,6 +285,7 @@ static void __init at91sam9261_ioremap_registers(void)
        at91sam926x_ioremap_pit(AT91SAM9261_BASE_PIT);
        at91sam9_ioremap_smc(0, AT91SAM9261_BASE_SMC);
        at91_ioremap_matrix(AT91SAM9261_BASE_MATRIX);
+       at91_pm_set_standby(at91sam9_sdram_standby);
 }
 
 static void __init at91sam9261_initialize(void)
index 6ce7d18508934a428ea2ac94ab51162b683b43d1..90d455d294a1814e7e72722b5614b4d15548c298 100644 (file)
@@ -26,6 +26,7 @@
 #include "generic.h"
 #include "clock.h"
 #include "sam9_smc.h"
+#include "pm.h"
 
 /* --------------------------------------------------------------------
  *  Clocks
@@ -321,6 +322,7 @@ static void __init at91sam9263_ioremap_registers(void)
        at91sam9_ioremap_smc(0, AT91SAM9263_BASE_SMC0);
        at91sam9_ioremap_smc(1, AT91SAM9263_BASE_SMC1);
        at91_ioremap_matrix(AT91SAM9263_BASE_MATRIX);
+       at91_pm_set_standby(at91sam9_sdram_standby);
 }
 
 static void __init at91sam9263_initialize(void)
index 474ee04d24b93729bdf32e2e4d64a7db0ea9bed8..e9bf0b8f40eb745d317e2d1d50c500eea1491aa4 100644 (file)
@@ -26,6 +26,7 @@
 #include "generic.h"
 #include "clock.h"
 #include "sam9_smc.h"
+#include "pm.h"
 
 /* --------------------------------------------------------------------
  *  Clocks
@@ -370,6 +371,7 @@ static void __init at91sam9g45_ioremap_registers(void)
        at91sam926x_ioremap_pit(AT91SAM9G45_BASE_PIT);
        at91sam9_ioremap_smc(0, AT91SAM9G45_BASE_SMC);
        at91_ioremap_matrix(AT91SAM9G45_BASE_MATRIX);
+       at91_pm_set_standby(at91_ddr_standby);
 }
 
 static void __init at91sam9g45_initialize(void)
index d4ec0d9a9872660e22f2e3c95fc2e8c154cf38e6..88995af09c043abf6198124d1c651d60ac03b0ff 100644 (file)
@@ -27,6 +27,7 @@
 #include "generic.h"
 #include "clock.h"
 #include "sam9_smc.h"
+#include "pm.h"
 
 /* --------------------------------------------------------------------
  *  Clocks
@@ -287,6 +288,7 @@ static void __init at91sam9rl_ioremap_registers(void)
        at91sam926x_ioremap_pit(AT91SAM9RL_BASE_PIT);
        at91sam9_ioremap_smc(0, AT91SAM9RL_BASE_SMC);
        at91_ioremap_matrix(AT91SAM9RL_BASE_MATRIX);
+       at91_pm_set_standby(at91sam9_sdram_standby);
 }
 
 static void __init at91sam9rl_initialize(void)
index 0b153c87521d8e73feaf6b9bc53291431bc6b7a8..f4f8735315dafd8247f686e756423d8b9608a013 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/at73c213.h>
 #include <linux/clk.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/gpio_keys.h>
 #include <linux/input.h>
 
index 3284df05df14be82b8bf7acce98ef090ccf09b56..947e134ac4c3996700a6b075d24ab5afbeb2dc3f 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/platform_device.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/ads7846.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/fb.h>
 #include <linux/gpio_keys.h>
 #include <linux/input.h>
index 15afb5d9271f2237e3c61acd920b2f9c6d6e0a78..9986542e8060119fcad4eee37564fdf3d8d164a6 100644 (file)
@@ -39,6 +39,8 @@
 #include "at91_rstc.h"
 #include "at91_shdwc.h"
 
+static void (*at91_pm_standby)(void);
+
 static void __init show_reset_status(void)
 {
        static char reset[] __initdata = "reset";
@@ -266,14 +268,8 @@ static int at91_pm_enter(suspend_state_t state)
                         * For ARM 926 based chips, this requirement is weaker
                         * as at91sam9 can access a RAM in self-refresh mode.
                         */
-                       if (cpu_is_at91rm9200())
-                               at91rm9200_standby();
-                       else if (cpu_is_at91sam9g45())
-                               at91sam9g45_standby();
-                       else if (cpu_is_at91sam9263())
-                               at91sam9263_standby();
-                       else
-                               at91sam9_standby();
+                       if (at91_pm_standby)
+                               at91_pm_standby();
                        break;
 
                case PM_SUSPEND_ON:
@@ -314,6 +310,18 @@ static const struct platform_suspend_ops at91_pm_ops = {
        .end    = at91_pm_end,
 };
 
+static struct platform_device at91_cpuidle_device = {
+       .name = "cpuidle-at91",
+};
+
+void at91_pm_set_standby(void (*at91_standby)(void))
+{
+       if (at91_standby) {
+               at91_cpuidle_device.dev.platform_data = at91_standby;
+               at91_pm_standby = at91_standby;
+       }
+}
+
 static int __init at91_pm_init(void)
 {
 #ifdef CONFIG_AT91_SLOW_CLOCK
@@ -325,6 +333,9 @@ static int __init at91_pm_init(void)
        /* AT91RM9200 SDRAM low-power mode cannot be used with self-refresh. */
        if (cpu_is_at91rm9200())
                at91_ramc_write(0, AT91RM9200_SDRAMC_LPR, 0);
+       
+       if (at91_cpuidle_device.dev.platform_data)
+               platform_device_register(&at91_cpuidle_device);
 
        suspend_set_ops(&at91_pm_ops);
 
index 2f5908f0b8c5ec6dc9481f85bcba758d98f00d88..3ed190ce062bd5add5a426e07b1cf6c5d1e5b89a 100644 (file)
 #ifndef __ARCH_ARM_MACH_AT91_PM
 #define __ARCH_ARM_MACH_AT91_PM
 
+#include <asm/proc-fns.h>
+
 #include <mach/at91_ramc.h>
 #include <mach/at91rm9200_sdramc.h>
 
+extern void at91_pm_set_standby(void (*at91_standby)(void));
+
 /*
  * The AT91RM9200 goes into self-refresh mode with this command, and will
  * terminate self-refresh automatically on the next SDRAM access.
@@ -45,16 +49,18 @@ static inline void at91rm9200_standby(void)
 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
  * remember.
  */
-static inline void at91sam9g45_standby(void)
+static inline void at91_ddr_standby(void)
 {
        /* Those two values allow us to delay self-refresh activation
         * to the maximum. */
-       u32 lpr0, lpr1;
-       u32 saved_lpr0, saved_lpr1;
+       u32 lpr0, lpr1 = 0;
+       u32 saved_lpr0, saved_lpr1 = 0;
 
-       saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
-       lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
-       lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
+       if (at91_ramc_base[1]) {
+               saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
+               lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
+               lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
+       }
 
        saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
        lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
@@ -62,25 +68,29 @@ static inline void at91sam9g45_standby(void)
 
        /* self-refresh mode now */
        at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
-       at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
+       if (at91_ramc_base[1])
+               at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
 
        cpu_do_idle();
 
        at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
-       at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
+       if (at91_ramc_base[1])
+               at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
 }
 
 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
  * remember.
  */
-static inline void at91sam9263_standby(void)
+static inline void at91sam9_sdram_standby(void)
 {
-       u32 lpr0, lpr1;
-       u32 saved_lpr0, saved_lpr1;
+       u32 lpr0, lpr1 = 0;
+       u32 saved_lpr0, saved_lpr1 = 0;
 
-       saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
-       lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
-       lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
+       if (at91_ramc_base[1]) {
+               saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
+               lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
+               lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
+       }
 
        saved_lpr0 = at91_ramc_read(0, AT91_SDRAMC_LPR);
        lpr0 = saved_lpr0 & ~AT91_SDRAMC_LPCB;
@@ -88,27 +98,14 @@ static inline void at91sam9263_standby(void)
 
        /* self-refresh mode now */
        at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0);
-       at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
+       if (at91_ramc_base[1])
+               at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
 
        cpu_do_idle();
 
        at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0);
-       at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
-}
-
-static inline void at91sam9_standby(void)
-{
-       u32 saved_lpr, lpr;
-
-       saved_lpr = at91_ramc_read(0, AT91_SDRAMC_LPR);
-
-       lpr = saved_lpr & ~AT91_SDRAMC_LPCB;
-       at91_ramc_write(0, AT91_SDRAMC_LPR, lpr |
-                       AT91_SDRAMC_LPCB_SELF_REFRESH);
-
-       cpu_do_idle();
-
-       at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr);
+       if (at91_ramc_base[1])
+               at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
 }
 
 #endif
index b17fbcf4d9e8f34dbd876ea96af334dae4780efc..094b3459c288e37700c42ea85a57ced905323eda 100644 (file)
@@ -23,6 +23,7 @@
 #include "at91_shdwc.h"
 #include "soc.h"
 #include "generic.h"
+#include "pm.h"
 
 struct at91_init_soc __initdata at91_boot_soc;
 
@@ -376,15 +377,16 @@ static void at91_dt_rstc(void)
 }
 
 static struct of_device_id ramc_ids[] = {
-       { .compatible = "atmel,at91rm9200-sdramc" },
-       { .compatible = "atmel,at91sam9260-sdramc" },
-       { .compatible = "atmel,at91sam9g45-ddramc" },
+       { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
+       { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
+       { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
        { /*sentinel*/ }
 };
 
 static void at91_dt_ramc(void)
 {
        struct device_node *np;
+       const struct of_device_id *of_id;
 
        np = of_find_matching_node(NULL, ramc_ids);
        if (!np)
@@ -396,6 +398,12 @@ static void at91_dt_ramc(void)
        /* the controller may have 2 banks */
        at91_ramc_base[1] = of_iomap(np, 1);
 
+       of_id = of_match_node(ramc_ids, np);
+       if (!of_id)
+               pr_warn("AT91: ramc no standby function available\n");
+       else
+               at91_pm_set_standby(of_id->data);
+
        of_node_put(np);
 }
 
index e026b19b23eaf1d8546f4996a387c13e1b8ed670..a075b3e0c5c7a5229aa87163e732d9c6b49a7a50 100644 (file)
@@ -40,7 +40,6 @@ config ARCH_DAVINCI_DA850
        bool "DA850/OMAP-L138/AM18x based system"
        select ARCH_DAVINCI_DA8XX
        select ARCH_HAS_CPUFREQ
-       select CPU_FREQ_TABLE
        select CP_INTC
 
 config ARCH_DAVINCI_DA8XX
index c4bdc0a1c36e7795a21862f8a69a7e8ead71a43b..66b5b3cb53768630812e0ef61429644ecdb7373e 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
 #include <linux/i2c/pcf857x.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <linux/spi/spi.h>
index dd1fb24521aa85b16baa9d200b40d28774591cfc..f25a569b000989120d758245f01929c1cb7ff461 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/platform_data/pca953x.h>
 #include <linux/input.h>
 #include <linux/input/tps6507x-ts.h>
index 4078ba93776b24ef9438f0dcc73c2f42dd859bd9..7f260b77157a3749851350cbca30272efecb0f5d 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/i2c.h>
 #include <linux/io.h>
 #include <linux/clk.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/leds.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
index 40bb9b5b87e829c2d4b741625141a58732921d9f..f21fde9dce007a6dcc249d0e3458572b8b16977c 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/gpio.h>
 #include <linux/i2c.h>
 #include <linux/i2c/pcf857x.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
 #include <linux/mtd/partitions.h>
index 2bc3651d56cc8f52757b3703c14daadf2360abeb..db2df32da6a887d4042d810c525b1cf9e59c8753 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/gpio.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/i2c/pcf857x.h>
 
 #include <media/tvp514x.h>
index cd0f58730c2ba63234fb5e673156c10214300a80..7aa105b1fd0f7553170134c9a8dae654c931784d 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/mtd/partitions.h>
 #include <linux/regulator/machine.h>
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/etherdevice.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/flash.h>
index d84360148100831265561696b1e661f16a035361..41c7c961579133d9a80070a2c46ce56c07f1d8d9 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
 #include <linux/mtd/partitions.h>
index ba95e5db25011a0801c2ef1f7c27270b713280ee..c17407b16d7c687292fa6e064978d9ca1699ea0a 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/clk-provider.h>
 #include <linux/irqchip/arm-gic.h>
 #include <linux/irqchip/chained_irq.h>
+#include <linux/platform_device.h>
 
 #include <asm/proc-fns.h>
 #include <asm/exception.h>
@@ -294,6 +295,16 @@ void exynos5_restart(enum reboot_mode mode, const char *cmd)
        __raw_writel(val, addr);
 }
 
+static struct platform_device exynos_cpuidle = {
+       .name           = "exynos_cpuidle",
+       .id             = -1,
+};
+
+void __init exynos_cpuidle_init(void)
+{
+       platform_device_register(&exynos_cpuidle);
+}
+
 void __init exynos_init_late(void)
 {
        if (of_machine_is_compatible("samsung,exynos5440"))
index 8646a141ae467b8175aed00d0acf39ae73f02097..b2ac1885d381d6581717a111a92f6cec25723abe 100644 (file)
@@ -22,6 +22,7 @@ struct map_desc;
 void exynos_init_io(void);
 void exynos4_restart(enum reboot_mode mode, const char *cmd);
 void exynos5_restart(enum reboot_mode mode, const char *cmd);
+void exynos_cpuidle_init(void);
 void exynos_init_late(void);
 
 void exynos_firmware_init(void);
index ac139226d63c1d1aefe86a7e25087ad7e6da36da..1bde6ad07d93e5a0fe6ed69d7ecc840268be2586 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/io.h>
 #include <linux/export.h>
 #include <linux/time.h>
+#include <linux/platform_device.h>
 
 #include <asm/proc-fns.h>
 #include <asm/smp_scu.h>
@@ -192,7 +193,7 @@ static void __init exynos5_core_down_clk(void)
        __raw_writel(tmp, EXYNOS5_PWR_CTRL2);
 }
 
-static int __init exynos4_init_cpuidle(void)
+static int __init exynos_cpuidle_probe(struct platform_device *pdev)
 {
        int cpu_id, ret;
        struct cpuidle_device *device;
@@ -226,4 +227,13 @@ static int __init exynos4_init_cpuidle(void)
 
        return 0;
 }
-device_initcall(exynos4_init_cpuidle);
+
+static struct platform_driver exynos_cpuidle_driver = {
+       .probe  = exynos_cpuidle_probe,
+       .driver = {
+               .name = "exynos_cpuidle",
+               .owner = THIS_MODULE,
+       },
+};
+
+module_platform_driver(exynos_cpuidle_driver);
index 0099c6c13bbaa9504dccd6fa760c21273de41d3f..53a3dc37a7303b0a5bbdbdc202fbe211e6b0dd9e 100644 (file)
@@ -25,6 +25,8 @@
 
 static void __init exynos4_dt_machine_init(void)
 {
+       exynos_cpuidle_init();
+
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
index f874b773ca134e231df3e21d3c5c5f65caac33a9..c9f7dd1cdc8f9fd0f7a9c75fd4bb625ad42366f7 100644 (file)
@@ -47,6 +47,8 @@ static void __init exynos5_dt_machine_init(void)
                }
        }
 
+       exynos_cpuidle_init();
+
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
index 8e8437dea3ce7742da2cd8cae56df82efa2dfe93..e2ca238cf0ea7fef2d22d027eb04550356cbba26 100644 (file)
@@ -8,7 +8,7 @@ config ARCH_HIGHBANK
        select ARM_AMBA
        select ARM_ERRATA_764369
        select ARM_ERRATA_775420
-       select ARM_ERRATA_798181
+       select ARM_ERRATA_798181 if SMP
        select ARM_GIC
        select ARM_TIMER_SP804
        select CACHE_L2X0
index 90372a21087f9ef38535479ccc35aac9e37977dc..699aabe296e1e62b91f7ca1b0a65c5e635ed8515 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/phy.h>
 #include <linux/reboot.h>
 #include <linux/regmap.h>
@@ -226,7 +226,7 @@ static void __init imx6q_opp_check_1p2ghz(struct device *cpu_dev)
        val = readl_relaxed(base + OCOTP_CFG3);
        val >>= OCOTP_CFG3_SPEED_SHIFT;
        if ((val & 0x3) != OCOTP_CFG3_SPEED_1P2GHZ)
-               if (opp_disable(cpu_dev, 1200000000))
+               if (dev_pm_opp_disable(cpu_dev, 1200000000))
                        pr_warn("failed to disable 1.2 GHz OPP\n");
 
 put_node:
index 19bb6441a7d4aaddb106afa71634606acfae7466..c5f95674e9b72c8caa61052a25c7e90521b3ce42 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/platform_device.h>
 #include <linux/io.h>
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/dma-mapping.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/eeprom.h>
index bc0261e99d398f1632986cccf9785a58519c809c..20cc53f4cee1f1372c619925d0fe74e47eafd075 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/smsc911x.h>
 #include <linux/interrupt.h>
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/delay.h>
 #include <linux/spi/spi.h>
 #include <linux/irq.h>
index e805ac273e9c6cc952e400a4f542f9727881650c..592ddbe031ac714bf7d2d0f0519ba38a3957d0ee 100644 (file)
@@ -18,7 +18,7 @@
  */
 
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/io.h>
 #include <linux/mtd/plat-ram.h>
 #include <linux/mtd/physmap.h>
index b726cb1c5fdd638326148cb4ce66783f69dd7659..ac504b67326b12b51ba9c1ccea470b85e74b1dd3 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/usb/otg.h>
 #include <linux/usb/ulpi.h>
 
index 0910761e8280894918927554598c1f4d1d1f4b79..8825d1217d189e7d026dff24f7b2e41af54fde4a 100644 (file)
@@ -29,7 +29,7 @@
 #include <asm/mach/time.h>
 
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/mfd/mc13xxx.h>
 
 #include "common.h"
index 489495976fcd794e211d10c82e59e087bd40b79c..8e3e4331c380dabd5c3a818626b4eabc7922776b 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/spi/flash.h>
 #include <linux/spi/spi.h>
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/gpio.h>
 #include <asm/mach/time.h>
 #include <mach/kirkwood.h>
index a7ce69286688434e0e195e3c8a2e24728ff6dc60..d68909b095f1c06b135bac8e07dee46408dd5d40 100644 (file)
@@ -300,7 +300,7 @@ static struct omap_lcd_config osk_lcd_config __initdata = {
 #ifdef CONFIG_OMAP_OSK_MISTRAL
 
 #include <linux/input.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/ads7846.h>
 
index 33d159e2386e6a86801d7decced3522125c3b8fb..8dd0ec858cf1cc71372cb1e9d97d23232f7ed0de 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/gpio.h>
 #include <linux/platform_data/gpio-omap.h>
 
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/i2c/twl.h>
 #include <linux/regulator/fixed.h>
 #include <linux/regulator/machine.h>
index 39c78387ddecb1b287ebfe732d7de0f3c62ca4db..87162e1b94a59104a2bca07114da5d8a02816fba 100644 (file)
@@ -129,6 +129,24 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)")
        .restart        = omap3xxx_restart,
 MACHINE_END
 
+static const char *omap36xx_boards_compat[] __initdata = {
+       "ti,omap36xx",
+       NULL,
+};
+
+DT_MACHINE_START(OMAP36XX_DT, "Generic OMAP36xx (Flattened Device Tree)")
+       .reserve        = omap_reserve,
+       .map_io         = omap3_map_io,
+       .init_early     = omap3630_init_early,
+       .init_irq       = omap_intc_of_init,
+       .handle_irq     = omap3_intc_handle_irq,
+       .init_machine   = omap_generic_init,
+       .init_late      = omap3_init_late,
+       .init_time      = omap3_sync32k_timer_init,
+       .dt_compat      = omap36xx_boards_compat,
+       .restart        = omap3xxx_restart,
+MACHINE_END
+
 static const char *omap3_gp_boards_compat[] __initdata = {
        "ti,omap3-beagle",
        "timll,omap3-devkit8000",
index 87e41a8b8d4666059c43a49c2f4ea953b3327fb2..f7808349a7346642a58994c2b5a7b43f3dddb5da 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/delay.h>
 #include <linux/workqueue.h>
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/input.h>
 #include <linux/err.h>
 #include <linux/clk.h>
index f26918467efcf42cc13639317965ebdb6d34bf14..6432ab8d92078ac4779627d9ae143761bd78ebef 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/gpio.h>
 #include <linux/input.h>
 #include <linux/gpio_keys.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/cpu.h>
 
 #include <linux/mtd/mtd.h>
@@ -522,11 +522,11 @@ static int __init beagle_opp_init(void)
                        return -ENODEV;
                }
                /* Enable MPU 1GHz and lower opps */
-               r = opp_enable(mpu_dev, 800000000);
+               r = dev_pm_opp_enable(mpu_dev, 800000000);
                /* TODO: MPU 1GHz needs SR and ABB */
 
                /* Enable IVA 800MHz and lower opps */
-               r |= opp_enable(iva_dev, 660000000);
+               r |= dev_pm_opp_enable(iva_dev, 660000000);
                /* TODO: DSP 800MHz needs SR and ABB */
                if (r) {
                        pr_err("%s: failed to enable higher opp %d\n",
@@ -535,8 +535,8 @@ static int __init beagle_opp_init(void)
                         * Cleanup - disable the higher freqs - we dont care
                         * about the results
                         */
-                       opp_disable(mpu_dev, 800000000);
-                       opp_disable(iva_dev, 660000000);
+                       dev_pm_opp_disable(mpu_dev, 800000000);
+                       dev_pm_opp_disable(iva_dev, 660000000);
                }
        }
        return 0;
index ba8342fef799ee10c3248bc75188bad47b5971bf..119efaf5808ab3df5291969f311e555704e84ee2 100644 (file)
@@ -32,7 +32,7 @@
 #include <linux/spi/spi.h>
 #include <linux/interrupt.h>
 #include <linux/smsc911x.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/usb/phy.h>
 
 #include <asm/mach-types.h>
index c3270c0f1fce47724b0aba71d8f1ea388b94f445..f6fe388af9895ef8c8b2859f9177a146a30fb965 100644 (file)
@@ -167,38 +167,47 @@ static struct lp55xx_led_config rx51_lp5523_led_config[] = {
                .name           = "lp5523:kb1",
                .chan_nr        = 0,
                .led_current    = 50,
+               .max_current    = 100,
        }, {
                .name           = "lp5523:kb2",
                .chan_nr        = 1,
                .led_current    = 50,
+               .max_current    = 100,
        }, {
                .name           = "lp5523:kb3",
                .chan_nr        = 2,
                .led_current    = 50,
+               .max_current    = 100,
        }, {
                .name           = "lp5523:kb4",
                .chan_nr        = 3,
                .led_current    = 50,
+               .max_current    = 100,
        }, {
                .name           = "lp5523:b",
                .chan_nr        = 4,
                .led_current    = 50,
+               .max_current    = 100,
        }, {
                .name           = "lp5523:g",
                .chan_nr        = 5,
                .led_current    = 50,
+               .max_current    = 100,
        }, {
                .name           = "lp5523:r",
                .chan_nr        = 6,
                .led_current    = 50,
+               .max_current    = 100,
        }, {
                .name           = "lp5523:kb5",
                .chan_nr        = 7,
                .led_current    = 50,
+               .max_current    = 100,
        }, {
                .name           = "lp5523:kb6",
                .chan_nr        = 8,
                .led_current    = 50,
+               .max_current    = 100,
        }
 };
 
index 64b5a83469822ad53693c2ae8ffdd3b227fa2a7b..8b6876c98ce1a320c793e7485c35d23bd3a5b923 100644 (file)
@@ -272,9 +272,19 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
        struct gpmc_timings t;
        int ret;
 
-       if (gpmc_onenand_data->of_node)
+       if (gpmc_onenand_data->of_node) {
                gpmc_read_settings_dt(gpmc_onenand_data->of_node,
                                      &onenand_async);
+               if (onenand_async.sync_read || onenand_async.sync_write) {
+                       if (onenand_async.sync_write)
+                               gpmc_onenand_data->flags |=
+                                       ONENAND_SYNC_READWRITE;
+                       else
+                               gpmc_onenand_data->flags |= ONENAND_SYNC_READ;
+                       onenand_async.sync_read = false;
+                       onenand_async.sync_write = false;
+               }
+       }
 
        omap2_onenand_set_async_mode(onenand_base);
 
index 5d2080ef7923585c1313f598a40961c3b241d1ce..16f78a990d04cafbd7dd1fcaa81b7d7dd061e979 100644 (file)
@@ -28,7 +28,7 @@
 #define OMAP_PULL_UP                   (1 << 4)
 #define OMAP_ALTELECTRICALSEL          (1 << 5)
 
-/* 34xx specific mux bit defines */
+/* omap3/4/5 specific mux bit defines */
 #define OMAP_INPUT_EN                  (1 << 8)
 #define OMAP_OFF_EN                    (1 << 9)
 #define OMAP_OFFOUT_EN                 (1 << 10)
@@ -36,8 +36,6 @@
 #define OMAP_OFF_PULL_EN               (1 << 12)
 #define OMAP_OFF_PULL_UP               (1 << 13)
 #define OMAP_WAKEUP_EN                 (1 << 14)
-
-/* 44xx specific mux bit defines */
 #define OMAP_WAKEUP_EVENT              (1 << 15)
 
 /* Active pin states */
index 67faa7b8fe92fd932889b65d55adbe0c5a97e5de..1d777e63e05c94ccf754bab89a65ca681638c67f 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/device.h>
 #include <linux/cpufreq.h>
 #include <linux/clk.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 
 /*
  * agent_id values for use with omap_pm_set_min_bus_tput():
index bd41d59a7cab08ecbca342a665f5f944aa67b6bc..ec21e6eb03e133be1c8b1ff2a4733d0f34052abc 100644 (file)
@@ -17,7 +17,7 @@
  * GNU General Public License for more details.
  */
 #include <linux/module.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/cpu.h>
 
 #include "omap_device.h"
@@ -81,14 +81,14 @@ int __init omap_init_opp_table(struct omap_opp_def *opp_def,
                        dev = &oh->od->pdev->dev;
                }
 
-               r = opp_add(dev, opp_def->freq, opp_def->u_volt);
+               r = dev_pm_opp_add(dev, opp_def->freq, opp_def->u_volt);
                if (r) {
                        dev_err(dev, "%s: add OPP %ld failed for %s [%d] result=%d\n",
                                __func__, opp_def->freq,
                                opp_def->hwmod_name, i, r);
                } else {
                        if (!opp_def->default_available)
-                               r = opp_disable(dev, opp_def->freq);
+                               r = dev_pm_opp_disable(dev, opp_def->freq);
                        if (r)
                                dev_err(dev, "%s: disable %ld failed for %s [%d] result=%d\n",
                                        __func__, opp_def->freq,
index e742118fcfd2b69adc367057e6f17670a5136903..2f569b3c3092dbdaf4d3bbc2ec6a85253402db74 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/err.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/export.h>
 #include <linux/suspend.h>
 #include <linux/cpu.h>
@@ -131,7 +131,7 @@ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
 {
        struct voltagedomain *voltdm;
        struct clk *clk;
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        unsigned long freq, bootup_volt;
        struct device *dev;
 
@@ -172,7 +172,7 @@ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
        clk_put(clk);
 
        rcu_read_lock();
-       opp = opp_find_freq_ceil(dev, &freq);
+       opp = dev_pm_opp_find_freq_ceil(dev, &freq);
        if (IS_ERR(opp)) {
                rcu_read_unlock();
                pr_err("%s: unable to find boot up OPP for vdd_%s\n",
@@ -180,7 +180,7 @@ static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
                goto exit;
        }
 
-       bootup_volt = opp_get_voltage(opp);
+       bootup_volt = dev_pm_opp_get_voltage(opp);
        rcu_read_unlock();
        if (!bootup_volt) {
                pr_err("%s: unable to find voltage corresponding to the bootup OPP for vdd_%s\n",
index fa74a0625da1a033335ea5fb76d6738889a9a1aa..ead48fa5715e16fb197dfa4fac56b0f71069bd20 100644 (file)
@@ -628,7 +628,7 @@ void __init omap4_local_timer_init(void)
 #endif /* CONFIG_HAVE_ARM_TWD */
 #endif /* CONFIG_ARCH_OMAP4 */
 
-#ifdef CONFIG_SOC_OMAP5
+#if defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX)
 void __init omap5_realtime_timer_init(void)
 {
        omap4_sync32k_timer_init();
@@ -636,7 +636,7 @@ void __init omap5_realtime_timer_init(void)
 
        clocksource_of_init();
 }
-#endif /* CONFIG_SOC_OMAP5 */
+#endif /* CONFIG_SOC_OMAP5 || CONFIG_SOC_DRA7XX */
 
 /**
  * omap_timer_init - build and register timer device with an
index a8427115ee07b9a986d62dceac6006f8c9cecafd..96100dbf5a2e8353e9fad34ded41ec5e83dda7ad 100644 (file)
@@ -615,14 +615,12 @@ endmenu
 config PXA25x
        bool
        select CPU_XSCALE
-       select CPU_FREQ_TABLE if CPU_FREQ
        help
          Select code specific to PXA21x/25x/26x variants
 
 config PXA27x
        bool
        select CPU_XSCALE
-       select CPU_FREQ_TABLE if CPU_FREQ
        help
          Select code specific to PXA27x variants
 
@@ -635,7 +633,6 @@ config CPU_PXA26x
 config PXA3xx
        bool
        select CPU_XSC3
-       select CPU_FREQ_TABLE if CPU_FREQ
        help
          Select code specific to PXA3xx variants
 
index 62aea3e835f315266ba887367310ec249503cd21..01de542432a6107b896277da2c5c392c65e9aff6 100644 (file)
@@ -27,7 +27,7 @@
 
 #include <linux/i2c/pxa-i2c.h>
 #include <linux/i2c/pcf857x.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/smc91x.h>
 #include <linux/gpio.h>
 #include <linux/leds.h>
index a83db46320bc6d1e8345ebee8aefd3845f9b347f..4a18d49a63e0b5d7e84dc0018f9473d8f12f164a 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/io.h>
 #include <linux/serial_core.h>
 #include <linux/dm9000.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 #include <linux/platform_device.h>
 #include <linux/gpio_keys.h>
 #include <linux/i2c.h>
index e838ba27e443c1dd0b54042c3e4afd882ae22265..c9808c6841526204144e36a273596e6955696dc8 100644 (file)
@@ -512,6 +512,9 @@ static void __init assabet_map_io(void)
         * Its called GPCLKR0 in my SA1110 manual.
         */
        Ser1SDCR0 |= SDCR0_SUS;
+       MSC1 = (MSC1 & ~0xffff) |
+               MSC_NonBrst | MSC_32BitStMem |
+               MSC_RdAcc(2) | MSC_WrAcc(2) | MSC_Rec(0);
 
        if (!machine_has_neponset())
                sa1100_register_uart_fns(&assabet_port_fns);
index f25b6119e028cc42ee564712fb3ef746961e0574..cb4b2ca3cf6b9e3f4238b13b0aa678c8f8fed7b5 100644 (file)
@@ -42,23 +42,24 @@ EXPORT_SYMBOL(reset_status);
 /*
  * This table is setup for a 3.6864MHz Crystal.
  */
-static const unsigned short cclk_frequency_100khz[NR_FREQS] = {
-        590,   /*  59.0 MHz */
-        737,   /*  73.7 MHz */
-        885,   /*  88.5 MHz */
-       1032,   /* 103.2 MHz */
-       1180,   /* 118.0 MHz */
-       1327,   /* 132.7 MHz */
-       1475,   /* 147.5 MHz */
-       1622,   /* 162.2 MHz */
-       1769,   /* 176.9 MHz */
-       1917,   /* 191.7 MHz */
-       2064,   /* 206.4 MHz */
-       2212,   /* 221.2 MHz */
-       2359,   /* 235.9 MHz */
-       2507,   /* 250.7 MHz */
-       2654,   /* 265.4 MHz */
-       2802    /* 280.2 MHz */
+struct cpufreq_frequency_table sa11x0_freq_table[NR_FREQS+1] = {
+       { .frequency = 59000,   /*  59.0 MHz */},
+       { .frequency = 73700,   /*  73.7 MHz */},
+       { .frequency = 88500,   /*  88.5 MHz */},
+       { .frequency = 103200,  /* 103.2 MHz */},
+       { .frequency = 118000,  /* 118.0 MHz */},
+       { .frequency = 132700,  /* 132.7 MHz */},
+       { .frequency = 147500,  /* 147.5 MHz */},
+       { .frequency = 162200,  /* 162.2 MHz */},
+       { .frequency = 176900,  /* 176.9 MHz */},
+       { .frequency = 191700,  /* 191.7 MHz */},
+       { .frequency = 206400,  /* 206.4 MHz */},
+       { .frequency = 221200,  /* 221.2 MHz */},
+       { .frequency = 235900,  /* 235.9 MHz */},
+       { .frequency = 250700,  /* 250.7 MHz */},
+       { .frequency = 265400,  /* 265.4 MHz */},
+       { .frequency = 280200,  /* 280.2 MHz */},
+       { .frequency = CPUFREQ_TABLE_END, },
 };
 
 /* rounds up(!)  */
@@ -66,10 +67,8 @@ unsigned int sa11x0_freq_to_ppcr(unsigned int khz)
 {
        int i;
 
-       khz /= 100;
-
        for (i = 0; i < NR_FREQS; i++)
-               if (cclk_frequency_100khz[i] >= khz)
+               if (sa11x0_freq_table[i].frequency >= khz)
                        break;
 
        return i;
@@ -79,37 +78,15 @@ unsigned int sa11x0_ppcr_to_freq(unsigned int idx)
 {
        unsigned int freq = 0;
        if (idx < NR_FREQS)
-               freq = cclk_frequency_100khz[idx] * 100;
+               freq = sa11x0_freq_table[idx].frequency;
        return freq;
 }
 
-
-/* make sure that only the "userspace" governor is run -- anything else wouldn't make sense on
- * this platform, anyway.
- */
-int sa11x0_verify_speed(struct cpufreq_policy *policy)
-{
-       unsigned int tmp;
-       if (policy->cpu)
-               return -EINVAL;
-
-       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
-
-       /* make sure that at least one frequency is within the policy */
-       tmp = cclk_frequency_100khz[sa11x0_freq_to_ppcr(policy->min)] * 100;
-       if (tmp > policy->max)
-               policy->max = tmp;
-
-       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
-
-       return 0;
-}
-
 unsigned int sa11x0_getspeed(unsigned int cpu)
 {
        if (cpu)
                return 0;
-       return cclk_frequency_100khz[PPCR & 0xf] * 100;
+       return sa11x0_freq_table[PPCR & 0xf].frequency;
 }
 
 /*
index 9a33695c9492d238879a43dd64002a88d878df23..cbdfae744dc522183fb3b45918e5461a04aeab67 100644 (file)
@@ -3,6 +3,7 @@
  *
  * Author: Nicolas Pitre
  */
+#include <linux/cpufreq.h>
 #include <linux/reboot.h>
 
 extern void sa1100_timer_init(void);
@@ -19,10 +20,8 @@ extern void sa11x0_init_late(void);
 extern void sa1110_mb_enable(void);
 extern void sa1110_mb_disable(void);
 
-struct cpufreq_policy;
-
+extern struct cpufreq_frequency_table sa11x0_freq_table[];
 extern unsigned int sa11x0_freq_to_ppcr(unsigned int khz);
-extern int sa11x0_verify_speed(struct cpufreq_policy *policy);
 extern unsigned int sa11x0_getspeed(unsigned int cpu);
 extern unsigned int sa11x0_ppcr_to_freq(unsigned int idx);
 
diff --git a/arch/arm/mach-sa1100/include/mach/gpio.h b/arch/arm/mach-sa1100/include/mach/gpio.h
deleted file mode 100644 (file)
index 6a9eecf..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * arch/arm/mach-sa1100/include/mach/gpio.h
- *
- * SA1100 GPIO wrappers for arch-neutral GPIO calls
- *
- * Written by Philipp Zabel <philipp.zabel@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef __ASM_ARCH_SA1100_GPIO_H
-#define __ASM_ARCH_SA1100_GPIO_H
-
-#include <linux/io.h>
-#include <mach/hardware.h>
-#include <asm/irq.h>
-#include <asm-generic/gpio.h>
-
-#define __ARM_GPIOLIB_COMPLEX
-
-static inline int gpio_get_value(unsigned gpio)
-{
-       if (__builtin_constant_p(gpio) && (gpio <= GPIO_MAX))
-               return GPLR & GPIO_GPIO(gpio);
-       else
-               return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned gpio, int value)
-{
-       if (__builtin_constant_p(gpio) && (gpio <= GPIO_MAX))
-               if (value)
-                       GPSR = GPIO_GPIO(gpio);
-               else
-                       GPCR = GPIO_GPIO(gpio);
-       else
-               __gpio_set_value(gpio, value);
-}
-
-#define gpio_cansleep  __gpio_cansleep
-
-#endif
index 7d9df16f04a2276ccceba5b8315cf296988a2334..c810620db53d60235916e8ecf25c6402f2b984b1 100644 (file)
@@ -13,6 +13,8 @@
 #ifndef _INCLUDE_H3XXX_H_
 #define _INCLUDE_H3XXX_H_
 
+#include "hardware.h" /* Gives GPIO_MAX */
+
 /* Physical memory regions corresponding to chip selects */
 #define H3600_EGPIO_PHYS       (SA1100_CS5_PHYS + 0x01000000)
 #define H3600_BANK_2_PHYS      SA1100_CS2_PHYS
index 67a76f2dfb9f62b99351e0e7ecb4d8c877b08d4b..f26428d8b62a18828a08cd0ff42bc08f35e5b206 100644 (file)
@@ -54,7 +54,7 @@ config ARCH_TEGRA_3x_SOC
 config ARCH_TEGRA_114_SOC
        bool "Enable support for Tegra114 family"
        select HAVE_ARM_ARCH_TIMER
-       select ARM_ERRATA_798181
+       select ARM_ERRATA_798181 if SMP
        select ARM_L1_CACHE_SHIFT_6
        select PINCTRL_TEGRA114
        help
index e035cd284a6eb5e3bf1268acb00da3c5784e5ced..64652b37488627e1a3feea9d7b3224dd7773c7a3 100644 (file)
@@ -155,13 +155,3 @@ void tegra_init_fuse(void)
                tegra_sku_id, tegra_cpu_process_id,
                tegra_core_process_id);
 }
-
-unsigned long long tegra_chip_uid(void)
-{
-       unsigned long long lo, hi;
-
-       lo = tegra_fuse_readl(FUSE_UID_LOW);
-       hi = tegra_fuse_readl(FUSE_UID_HIGH);
-       return (hi << 32ull) | lo;
-}
-EXPORT_SYMBOL(tegra_chip_uid);
index 99a28d62829742d9103764c47abcb5ff24894fbf..7a3fc1af6944b2cd9175a46f7c1749c83a090220 100644 (file)
@@ -34,7 +34,6 @@ config UX500_SOC_COMMON
 
 config UX500_SOC_DB8500
        bool
-       select CPU_FREQ_TABLE if CPU_FREQ
        select MFD_DB8500_PRCMU
        select PINCTRL_DB8500
        select PINCTRL_DB8540
index 5f252569c68987d908546b55bb921d5f7693c7af..9a7bd137c8fd27d60f42a6b78c6dbf3cc234d348 100644 (file)
@@ -44,6 +44,10 @@ static struct of_device_id zynq_of_bus_ids[] __initdata = {
        {}
 };
 
+static struct platform_device zynq_cpuidle_device = {
+       .name = "cpuidle-zynq",
+};
+
 /**
  * zynq_init_machine - System specific initialization, intended to be
  *                    called from board specific initialization.
@@ -56,6 +60,8 @@ static void __init zynq_init_machine(void)
        l2x0_of_init(0x02060000, 0xF0F0FFFF);
 
        of_platform_bus_probe(NULL, zynq_of_bus_ids, NULL);
+
+       platform_device_register(&zynq_cpuidle_device);
 }
 
 static void __init zynq_timer_init(void)
index f5e1a8471714cd421e2ab9dee54ae88f144316d1..644d91f73b00f3c5663b9e273d91d51a50218dd5 100644 (file)
@@ -159,7 +159,7 @@ EXPORT_SYMBOL(arm_coherent_dma_ops);
 
 static u64 get_coherent_dma_mask(struct device *dev)
 {
-       u64 mask = (u64)arm_dma_limit;
+       u64 mask = (u64)DMA_BIT_MASK(32);
 
        if (dev) {
                mask = dev->coherent_dma_mask;
@@ -173,10 +173,30 @@ static u64 get_coherent_dma_mask(struct device *dev)
                        return 0;
                }
 
-               if ((~mask) & (u64)arm_dma_limit) {
-                       dev_warn(dev, "coherent DMA mask %#llx is smaller "
-                                "than system GFP_DMA mask %#llx\n",
-                                mask, (u64)arm_dma_limit);
+               /*
+                * If the mask allows for more memory than we can address,
+                * and we actually have that much memory, then fail the
+                * allocation.
+                */
+               if (sizeof(mask) != sizeof(dma_addr_t) &&
+                   mask > (dma_addr_t)~0 &&
+                   dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) {
+                       dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
+                                mask);
+                       dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
+                       return 0;
+               }
+
+               /*
+                * Now check that the mask, when translated to a PFN,
+                * fits within the allowable addresses which we can
+                * allocate.
+                */
+               if (dma_to_pfn(dev, mask) < arm_dma_pfn_limit) {
+                       dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
+                                mask,
+                                dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
+                                arm_dma_pfn_limit + 1);
                        return 0;
                }
        }
@@ -1007,8 +1027,27 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  */
 int dma_supported(struct device *dev, u64 mask)
 {
-       if (mask < (u64)arm_dma_limit)
+       unsigned long limit;
+
+       /*
+        * If the mask allows for more memory than we can address,
+        * and we actually have that much memory, then we must
+        * indicate that DMA to this device is not supported.
+        */
+       if (sizeof(mask) != sizeof(dma_addr_t) &&
+           mask > (dma_addr_t)~0 &&
+           dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
                return 0;
+
+       /*
+        * Translate the device's DMA mask to a PFN limit.  This
+        * PFN number includes the page which we can DMA to.
+        */
+       limit = dma_to_pfn(dev, mask);
+
+       if (limit < arm_dma_pfn_limit)
+               return 0;
+
        return 1;
 }
 EXPORT_SYMBOL(dma_supported);
@@ -1232,7 +1271,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
                                break;
 
                len = (j - i) << PAGE_SHIFT;
-               ret = iommu_map(mapping->domain, iova, phys, len, 0);
+               ret = iommu_map(mapping->domain, iova, phys, len,
+                               IOMMU_READ|IOMMU_WRITE);
                if (ret < 0)
                        goto fail;
                iova += len;
@@ -1431,6 +1471,27 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
                                         GFP_KERNEL);
 }
 
+static int __dma_direction_to_prot(enum dma_data_direction dir)
+{
+       int prot;
+
+       switch (dir) {
+       case DMA_BIDIRECTIONAL:
+               prot = IOMMU_READ | IOMMU_WRITE;
+               break;
+       case DMA_TO_DEVICE:
+               prot = IOMMU_READ;
+               break;
+       case DMA_FROM_DEVICE:
+               prot = IOMMU_WRITE;
+               break;
+       default:
+               prot = 0;
+       }
+
+       return prot;
+}
+
 /*
  * Map a part of the scatter-gather list into contiguous io address space
  */
@@ -1444,6 +1505,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
        int ret = 0;
        unsigned int count;
        struct scatterlist *s;
+       int prot;
 
        size = PAGE_ALIGN(size);
        *handle = DMA_ERROR_CODE;
@@ -1460,7 +1522,9 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
                        !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
                        __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
 
-               ret = iommu_map(mapping->domain, iova, phys, len, 0);
+               prot = __dma_direction_to_prot(dir);
+
+               ret = iommu_map(mapping->domain, iova, phys, len, prot);
                if (ret < 0)
                        goto fail;
                count += len >> PAGE_SHIFT;
@@ -1665,19 +1729,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
        if (dma_addr == DMA_ERROR_CODE)
                return dma_addr;
 
-       switch (dir) {
-       case DMA_BIDIRECTIONAL:
-               prot = IOMMU_READ | IOMMU_WRITE;
-               break;
-       case DMA_TO_DEVICE:
-               prot = IOMMU_READ;
-               break;
-       case DMA_FROM_DEVICE:
-               prot = IOMMU_WRITE;
-               break;
-       default:
-               prot = 0;
-       }
+       prot = __dma_direction_to_prot(dir);
 
        ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
        if (ret < 0)
index 83cb3ac27095146f3f60c04047c6b212856a73b2..8e0e52eb76b57d7f9d4208fafbcdf024be369c75 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/system_info.h>
 
 pgd_t *idmap_pgd;
+phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
 
 #ifdef CONFIG_ARM_LPAE
 static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
@@ -67,8 +68,9 @@ static void identity_mapping_add(pgd_t *pgd, const char *text_start,
        unsigned long addr, end;
        unsigned long next;
 
-       addr = virt_to_phys(text_start);
-       end = virt_to_phys(text_end);
+       addr = virt_to_idmap(text_start);
+       end = virt_to_idmap(text_end);
+       pr_info("Setting up static identity map for 0x%lx - 0x%lx\n", addr, end);
 
        prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
 
@@ -90,8 +92,6 @@ static int __init init_static_idmap(void)
        if (!idmap_pgd)
                return -ENOMEM;
 
-       pr_info("Setting up static identity map for 0x%p - 0x%p\n",
-               __idmap_text_start, __idmap_text_end);
        identity_mapping_add(idmap_pgd, __idmap_text_start,
                             __idmap_text_end, 0);
 
index febaee7ca57be76487290a2ac45c6cf74e53759c..2a3fa425c52c4062addaf897951d3b8265819764 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/nodemask.h>
 #include <linux/initrd.h>
 #include <linux/of_fdt.h>
-#include <linux/of_reserved_mem.h>
 #include <linux/highmem.h>
 #include <linux/gfp.h>
 #include <linux/memblock.h>
@@ -218,6 +217,7 @@ EXPORT_SYMBOL(arm_dma_zone_size);
  * so a successful GFP_DMA allocation will always satisfy this.
  */
 phys_addr_t arm_dma_limit;
+unsigned long arm_dma_pfn_limit;
 
 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
        unsigned long dma_size)
@@ -240,6 +240,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
                arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
        } else
                arm_dma_limit = 0xffffffff;
+       arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
 #endif
 }
 
@@ -379,8 +380,6 @@ void __init arm_memblock_init(struct meminfo *mi,
        if (mdesc->reserve)
                mdesc->reserve();
 
-       early_init_dt_scan_reserved_mem();
-
        /*
         * reserve memory for DMA contigouos allocations,
         * must come from DMA area inside low memory
@@ -424,12 +423,10 @@ void __init bootmem_init(void)
         * This doesn't seem to be used by the Linux memory manager any
         * more, but is used by ll_rw_block.  If we can get rid of it, we
         * also get rid of some of the stuff above as well.
-        *
-        * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
-        * the system, not the maximum PFN.
         */
-       max_low_pfn = max_low - PHYS_PFN_OFFSET;
-       max_pfn = max_high - PHYS_PFN_OFFSET;
+       min_low_pfn = min;
+       max_low_pfn = max_low;
+       max_pfn = max_high;
 }
 
 /*
@@ -535,7 +532,7 @@ static inline void free_area_high(unsigned long pfn, unsigned long end)
 static void __init free_highpages(void)
 {
 #ifdef CONFIG_HIGHMEM
-       unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
+       unsigned long max_low = max_low_pfn;
        struct memblock_region *mem, *res;
 
        /* set highmem page free */
index d5a4e9ad8f0f68549947100081e3b9f8ac2e1b3a..d5a982d15a88e2a37a524267f31633f497aa0d4b 100644 (file)
@@ -81,8 +81,10 @@ extern __init void add_static_vm_early(struct static_vm *svm);
 
 #ifdef CONFIG_ZONE_DMA
 extern phys_addr_t arm_dma_limit;
+extern unsigned long arm_dma_pfn_limit;
 #else
 #define arm_dma_limit ((phys_addr_t)~0)
+#define arm_dma_pfn_limit (~0ul >> PAGE_SHIFT)
 #endif
 
 extern phys_addr_t arm_lowmem_limit;
index 0c6356255fe31122f2527a0a2947439e69b0e953..d27158c38eb0b190b869e028b93d8265fb90969e 100644 (file)
@@ -202,13 +202,11 @@ int valid_phys_addr_range(phys_addr_t addr, size_t size)
 }
 
 /*
- * We don't use supersection mappings for mmap() on /dev/mem, which
- * means that we can't map the memory area above the 4G barrier into
- * userspace.
+ * Do not allow /dev/mem mappings beyond the supported physical range.
  */
 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
 {
-       return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
+       return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
 }
 
 #ifdef CONFIG_STRICT_DEVMEM
index b1d17eeb59b895cd429e762d082d6c5c56c3ff57..78eeeca78f5ab331707fcd73b4956c503c2d880b 100644 (file)
@@ -28,6 +28,8 @@
 #include <asm/highmem.h>
 #include <asm/system_info.h>
 #include <asm/traps.h>
+#include <asm/procinfo.h>
+#include <asm/memory.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
@@ -1315,6 +1317,86 @@ static void __init map_lowmem(void)
        }
 }
 
+#ifdef CONFIG_ARM_LPAE
+/*
+ * early_paging_init() recreates boot time page table setup, allowing machines
+ * to switch over to a high (>4G) address space on LPAE systems
+ */
+void __init early_paging_init(const struct machine_desc *mdesc,
+                             struct proc_info_list *procinfo)
+{
+       pmdval_t pmdprot = procinfo->__cpu_mm_mmu_flags;
+       unsigned long map_start, map_end;
+       pgd_t *pgd0, *pgdk;
+       pud_t *pud0, *pudk, *pud_start;
+       pmd_t *pmd0, *pmdk;
+       phys_addr_t phys;
+       int i;
+
+       if (!(mdesc->init_meminfo))
+               return;
+
+       /* remap kernel code and data */
+       map_start = init_mm.start_code;
+       map_end   = init_mm.brk;
+
+       /* get a handle on things... */
+       pgd0 = pgd_offset_k(0);
+       pud_start = pud0 = pud_offset(pgd0, 0);
+       pmd0 = pmd_offset(pud0, 0);
+
+       pgdk = pgd_offset_k(map_start);
+       pudk = pud_offset(pgdk, map_start);
+       pmdk = pmd_offset(pudk, map_start);
+
+       mdesc->init_meminfo();
+
+       /* Run the patch stub to update the constants */
+       fixup_pv_table(&__pv_table_begin,
+               (&__pv_table_end - &__pv_table_begin) << 2);
+
+       /*
+        * Cache cleaning operations for self-modifying code
+        * We should clean the entries by MVA but running a
+        * for loop over every pv_table entry pointer would
+        * just complicate the code.
+        */
+       flush_cache_louis();
+       dsb();
+       isb();
+
+       /* remap level 1 table */
+       for (i = 0; i < PTRS_PER_PGD; pud0++, i++) {
+               set_pud(pud0,
+                       __pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
+               pmd0 += PTRS_PER_PMD;
+       }
+
+       /* remap pmds for kernel mapping */
+       phys = __pa(map_start) & PMD_MASK;
+       do {
+               *pmdk++ = __pmd(phys | pmdprot);
+               phys += PMD_SIZE;
+       } while (phys < map_end);
+
+       flush_cache_all();
+       cpu_switch_mm(pgd0, &init_mm);
+       cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET);
+       local_flush_bp_all();
+       local_flush_tlb_all();
+}
+
+#else
+
+void __init early_paging_init(const struct machine_desc *mdesc,
+                             struct proc_info_list *procinfo)
+{
+       if (mdesc->init_meminfo)
+               mdesc->init_meminfo();
+}
+
+#endif
+
 /*
  * paging_init() sets up the page tables, initialises the zone memory
  * maps, and sets up the zero page, bad page and bad page tables.
index f50d223a0bd31271ed73beabd60679c137ccb440..99b44e0e8d866983dd60d4b8324bf29010679e77 100644 (file)
@@ -930,4 +930,5 @@ void bpf_jit_free(struct sk_filter *fp)
 {
        if (fp->bpf_func != sk_run_filter)
                module_free(NULL, fp->bpf_func);
+       kfree(fp);
 }
index 11c4259c62fb146b8139b7045958f68d6adc3765..4399364214349674999c872ca4779c7089c22160 100644 (file)
@@ -76,4 +76,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* __ASM_AVR32_SOCKET_H */
index f78c9a2c7e281f2d441a40719e6735226231ceaa..eb382aedd9a232c9d85173356bf4ed44d33fb6b5 100644 (file)
@@ -1429,7 +1429,6 @@ source "drivers/cpufreq/Kconfig"
 config BFIN_CPU_FREQ
        bool
        depends on CPU_FREQ
-       select CPU_FREQ_TABLE
        default y
 
 config CPU_VOLTAGE
index 957dd00ea561ce3881a0af87394d349fab04f449..b4f77258caccf5fa9de1231e6ab4b1716ddc5ac6 100644 (file)
@@ -105,10 +105,6 @@ menu "Processor type and features"
 
 source "arch/c6x/platforms/Kconfig"
 
-config TMS320C6X_CACHES_ON
-       bool "L2 cache support"
-       default y
-
 config KERNEL_RAM_BASE_ADDRESS
        hex "Virtual address of memory base"
        default 0xe0000000 if SOC_TMS320C6455
index 02380bed189c6fda4920412a18f1baab6203d1c1..9c957c81c6885acd595c42d1c470a1390cef6feb 100644 (file)
@@ -130,13 +130,11 @@ config SVINTO_SIM
 
 config ETRAXFS
        bool "ETRAX-FS-V32"
-       select CPU_FREQ_TABLE if CPU_FREQ
        help
          Support CRIS V32.
 
 config CRIS_MACH_ARTPEC3
         bool "ARTPEC-3"
-       select CPU_FREQ_TABLE if CPU_FREQ
         help
           Support Axis ARTPEC-3.
 
index eb723e51554e559ef73d1196a455a96583a5cf51..13829aaaeec565b8e53726e95fcf4e7fde638c25 100644 (file)
@@ -78,6 +78,8 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_SOCKET_H */
 
 
index f0cb1c3411638e7b33bee915739881635c59c9bc..5d4299762426b108057a33faf71b85efdfe4f74b 100644 (file)
@@ -76,5 +76,7 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_SOCKET_H */
 
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
deleted file mode 100644 (file)
index 24b1dc2..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-config H8300
-       bool
-       default y
-       select HAVE_IDE
-       select GENERIC_ATOMIC64
-       select HAVE_UID16
-       select VIRT_TO_BUS
-       select ARCH_WANT_IPC_PARSE_VERSION
-       select GENERIC_IRQ_SHOW
-       select GENERIC_CPU_DEVICES
-       select MODULES_USE_ELF_RELA
-       select OLD_SIGSUSPEND3
-       select OLD_SIGACTION
-       select HAVE_UNDERSCORE_SYMBOL_PREFIX
-
-config MMU
-       bool
-       default n
-
-config SWAP
-       bool
-       default n
-
-config ZONE_DMA
-       bool
-       default y
-
-config FPU
-       bool
-       default n
-
-config RWSEM_GENERIC_SPINLOCK
-       bool
-       default y
-
-config RWSEM_XCHGADD_ALGORITHM
-       bool
-       default n
-
-config ARCH_HAS_ILOG2_U32
-       bool
-       default n
-
-config ARCH_HAS_ILOG2_U64
-       bool
-       default n
-
-config GENERIC_HWEIGHT
-       bool
-       default y
-
-config GENERIC_CALIBRATE_DELAY
-       bool
-       default y
-
-config GENERIC_BUG
-        bool
-        depends on BUG
-
-config TIME_LOW_RES
-       bool
-       default y
-
-config NO_IOPORT
-       def_bool y
-
-config NO_DMA
-       def_bool y
-
-config ISA
-       bool
-       default y
-
-config PCI
-       bool
-       default n
-
-config HZ
-       int
-       default 100
-
-source "init/Kconfig"
-
-source "kernel/Kconfig.freezer"
-
-source "arch/h8300/Kconfig.cpu"
-
-menu "Executable file formats"
-
-source "fs/Kconfig.binfmt"
-
-endmenu
-
-source "net/Kconfig"
-
-source "drivers/Kconfig"
-
-source "arch/h8300/Kconfig.ide"
-
-source "fs/Kconfig"
-
-source "arch/h8300/Kconfig.debug"
-
-source "security/Kconfig"
-
-source "crypto/Kconfig"
-
-source "lib/Kconfig"
diff --git a/arch/h8300/Kconfig.cpu b/arch/h8300/Kconfig.cpu
deleted file mode 100644 (file)
index cdee771..0000000
+++ /dev/null
@@ -1,171 +0,0 @@
-menu "Processor type and features"
-
-choice
-       prompt "H8/300 platform"
-       default H8300H_GENERIC
-
-config H8300H_GENERIC
-       bool "H8/300H Generic"
-       help
-         H8/300H CPU Generic Hardware Support
-
-config H8300H_AKI3068NET
-       bool "AE-3068/69"
-       select H83068
-       help
-         AKI-H8/3068F / AKI-H8/3069F Flashmicom LAN Board Support
-         More Information. (Japanese Only)
-         <http://akizukidenshi.com/catalog/default.aspx>
-         AE-3068/69 Evaluation Board Support
-         More Information.
-         <http://www.microtronique.com/ae3069lan.htm>
-
-config H8300H_H8MAX
-       bool "H8MAX"
-       select H83068
-       help
-         H8MAX Evaluation Board Support
-         More Information. (Japanese Only)
-         <http://strawberry-linux.com/h8/index.html>
-
-config H8300H_SIM
-       bool "H8/300H Simulator"
-       select H83007
-       help
-         GDB Simulator Support
-         More Information.
-         <http://sourceware.org/sid/>
-
-config H8S_GENERIC
-       bool "H8S Generic"
-       help
-         H8S CPU Generic Hardware Support
-
-config H8S_EDOSK2674
-       bool "EDOSK-2674"
-       select H8S2678
-       help
-         Renesas EDOSK-2674 Evaluation Board Support
-         More Information.
-         <http://www.azpower.com/H8-uClinux/index.html>
-         <http://www.renesas.eu/products/tools/introductory_evaluation_tools/evaluation_development_os_kits/edosk2674r/edosk2674r_software_tools_root.jsp>
-
-config H8S_SIM
-       bool "H8S Simulator"
-       help
-         GDB Simulator Support
-         More Information.
-         <http://sourceware.org/sid/>
-
-endchoice
-
-choice
-       prompt "CPU Selection"
-
-config H83002
-       bool "H8/3001,3002,3003"
-       depends on BROKEN
-       select CPU_H8300H
-
-config H83007
-       bool "H8/3006,3007"
-       select CPU_H8300H
-
-config H83048
-       bool "H8/3044,3045,3046,3047,3048,3052"
-       depends on BROKEN
-       select CPU_H8300H
-
-config H83068
-       bool "H8/3065,3066,3067,3068,3069"
-       select CPU_H8300H
-
-config H8S2678
-       bool "H8S/2670,2673,2674R,2675,2676"
-       select CPU_H8S
-
-endchoice
-
-config CPU_CLOCK
-       int "CPU Clock Frequency (/1KHz)"
-       default "20000"
-       help
-         CPU Clock Frequency divide to 1000
-
-choice
-       prompt "Kernel executes from"
-       ---help---
-         Choose the memory type that the kernel will be running in.
-
-config RAMKERNEL
-       bool "RAM"
-       help
-         The kernel will be resident in RAM when running.
-
-config ROMKERNEL
-       bool "ROM"
-       help
-         The kernel will be resident in FLASH/ROM when running.
-endchoice
-
-
-config CPU_H8300H
-       bool
-       depends on (H83002 || H83007 || H83048 || H83068)
-       default y
-
-config CPU_H8S
-       bool
-       depends on H8S2678
-       default y
-
-choice
-       prompt "Timer"
-config H8300_TIMER8
-       bool "8bit timer (2ch cascade)"
-       depends on (H83007 || H83068 || H8S2678)
-
-config H8300_TIMER16
-       bool "16bit timer"
-       depends on (H83007 || H83068)
-
-config H8300_ITU
-       bool "ITU"
-       depends on (H83002 || H83048)
-
-config H8300_TPU
-       bool "TPU"
-       depends on H8S2678
-endchoice
-
-if H8300_TIMER8
-choice
-       prompt "Timer Channel"
-config H8300_TIMER8_CH0
-       bool "Channel 0"
-config H8300_TIMER8_CH2
-       bool "Channel 2"
-       depends on CPU_H8300H
-endchoice
-endif
-
-config H8300_TIMER16_CH
-       int "16bit timer channel (0 - 2)"
-       depends on H8300_TIMER16
-       range 0 2
-
-config H8300_ITU_CH
-       int "ITU channel"
-       depends on H8300_ITU
-       range 0 4
-
-config H8300_TPU_CH
-       int "TPU channel"
-       depends on H8300_TPU
-       range 0 4
-
-source "kernel/Kconfig.preempt"
-
-source "mm/Kconfig"
-
-endmenu
diff --git a/arch/h8300/Kconfig.debug b/arch/h8300/Kconfig.debug
deleted file mode 100644 (file)
index e8d1b23..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-menu "Kernel hacking"
-
-source "lib/Kconfig.debug"
-
-config FULLDEBUG
-       bool "Full Symbolic/Source Debugging support"
-       help
-         Enable debugging symbols on kernel build.
-
-config HIGHPROFILE
-       bool "Use fast second timer for profiling"
-       help
-         Use a fast secondary clock to produce profiling information.
-
-config NO_KERNEL_MSG
-       bool "Suppress Kernel BUG Messages"
-       help
-         Do not output any debug BUG messages within the kernel.
-
-config GDB_MAGICPRINT
-       bool "Message Output for GDB MagicPrint service"
-       depends on (H8300H_SIM || H8S_SIM)
-       help
-         kernel messages output using MagicPrint service from GDB
-
-config SYSCALL_PRINT
-       bool "SystemCall trace print"
-       help
-         output history of systemcall
-
-config GDB_DEBUG
-       bool "Use gdb stub"
-       depends on (!H8300H_SIM && !H8S_SIM)
-       help
-         gdb stub exception support
-
-config SH_STANDARD_BIOS
-       bool "Use gdb protocol serial console"
-       depends on (!H8300H_SIM && !H8S_SIM)
-       help
-         serial console output using GDB protocol.
-         Require eCos/RedBoot
-
-config DEFAULT_CMDLINE
-       bool "Use builtin commandline"
-       default n
-       help
-         builtin kernel commandline enabled.
-
-config KERNEL_COMMAND
-       string "Buildin command string"
-       depends on DEFAULT_CMDLINE
-       help
-         builtin kernel commandline strings.
-
-config BLKDEV_RESERVE
-       bool "BLKDEV Reserved Memory"
-       default n
-       help
-         Reserved BLKDEV area.
-
-config BLKDEV_RESERVE_ADDRESS
-       hex 'start address'
-       depends on BLKDEV_RESERVE
-       help
-         BLKDEV start address.
-
-endmenu
diff --git a/arch/h8300/Kconfig.ide b/arch/h8300/Kconfig.ide
deleted file mode 100644 (file)
index a38a630..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-# uClinux H8/300 Target Board Selection Menu (IDE)
-
-if (H8300H_AKI3068NET)
-menu "IDE Extra configuration"
-
-config H8300_IDE_BASE
-       hex "IDE register base address"
-       depends on IDE
-       default 0
-       help
-         IDE registers base address
-
-config H8300_IDE_ALT
-       hex "IDE register alternate address"
-       depends on IDE
-       default 0
-       help
-         IDE alternate registers address
-
-config H8300_IDE_IRQ
-       int "IDE IRQ no"
-       depends on IDE
-       default 0
-       help
-         IDE use IRQ no
-endmenu
-endif
-
-if (H8300H_H8MAX)
-config H8300_IDE_BASE
-       hex
-       depends on IDE
-       default 0x200000
-
-config H8300_IDE_ALT
-       hex
-       depends on IDE
-       default 0x60000c
-
-config H8300_IDE_IRQ
-       int
-       depends on IDE
-       default 5
-endif
diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile
deleted file mode 100644 (file)
index a556447..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-#
-# arch/h8300/Makefile
-#
-# This file is subject to the terms and conditions of the GNU General Public
-# License.  See the file "COPYING" in the main directory of this archive
-# for more details.
-#
-# (C) Copyright 2002,2003 Yoshinori Sato <ysato@users.sourceforge.jp>
-#
-
-platform-$(CONFIG_CPU_H8300H)  := h8300h
-platform-$(CONFIG_CPU_H8S)     := h8s
-PLATFORM := $(platform-y)
-
-board-$(CONFIG_H8300H_GENERIC)         := generic
-board-$(CONFIG_H8300H_AKI3068NET)      := aki3068net
-board-$(CONFIG_H8300H_H8MAX)           := h8max
-board-$(CONFIG_H8300H_SIM)             := generic
-board-$(CONFIG_H8S_GENERIC)            := generic
-board-$(CONFIG_H8S_EDOSK2674)          := edosk2674
-board-$(CONFIG_H8S_SIM)                        := generic
-BOARD := $(board-y)
-
-model-$(CONFIG_RAMKERNEL)      := ram
-model-$(CONFIG_ROMKERNEL)      := rom
-MODEL := $(model-y)
-
-cflags-$(CONFIG_CPU_H8300H)    := -mh
-ldflags-$(CONFIG_CPU_H8300H)   := -mh8300helf
-cflags-$(CONFIG_CPU_H8S)       := -ms
-ldflags-$(CONFIG_CPU_H8S)      := -mh8300self
-
-KBUILD_CFLAGS += $(cflags-y)
-KBUILD_CFLAGS += -mint32 -fno-builtin
-KBUILD_CFLAGS += -g
-KBUILD_CFLAGS += -D__linux__
-KBUILD_CFLAGS += -DUTS_SYSNAME=\"uClinux\"
-KBUILD_AFLAGS += -DPLATFORM=$(PLATFORM) -DMODEL=$(MODEL) $(cflags-y)
-LDFLAGS += $(ldflags-y)
-
-CROSS_COMPILE = h8300-elf-
-LIBGCC := $(shell $(CROSS-COMPILE)$(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
-
-head-y := arch/$(ARCH)/platform/$(PLATFORM)/$(BOARD)/crt0_$(MODEL).o
-
-core-y += arch/$(ARCH)/kernel/ \
-          arch/$(ARCH)/mm/
-ifdef PLATFORM
-core-y += arch/$(ARCH)/platform/$(PLATFORM)/ \
-          arch/$(ARCH)/platform/$(PLATFORM)/$(BOARD)/
-endif
-
-libs-y += arch/$(ARCH)/lib/ $(LIBGCC)
-
-boot := arch/h8300/boot
-
-export MODEL PLATFORM BOARD
-
-archmrproper:
-
-archclean:
-       $(Q)$(MAKE) $(clean)=$(boot)
-
-vmlinux.srec vmlinux.bin zImage: vmlinux
-       $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
-
-define archhelp
-  @echo  'vmlinux.bin  - Create raw binary'
-  @echo  'vmlinux.srec - Create srec binary'
-  @echo  'zImage       - Compressed kernel image'
-endef
diff --git a/arch/h8300/README b/arch/h8300/README
deleted file mode 100644 (file)
index efa805f..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-linux-2.6 for H8/300 README
-Yoshinori Sato <ysato@users.sourceforge.jp>
-
-* Supported CPU
-H8/300H and H8S
-
-* Supported Target
-1.simulator of GDB
-  require patches.
-
-2.AE 3068/AE 3069
-  more information 
-  MICROTRONIQUE <http://www.microtronique.com/>
-  Akizuki Denshi Tsusho Ltd. <http://akizukidenshi.com/> (Japanese Only)
-
-3.H8MAX 
-  see http://ip-sol.jp/h8max/ (Japanese Only)
-
-4.EDOSK2674
-  see http://www.eu.renesas.com/products/mpumcu/tool/edk/support/edosk2674.html
-      http://www.uclinux.org/pub/uClinux/ports/h8/HITACHI-EDOSK2674-HOWTO
-      http://www.azpower.com/H8-uClinux/
-
-* Toolchain Version
-gcc-3.1 or higher and patch
-see arch/h8300/tools_patch/README
-binutils-2.12 or higher
-gdb-5.2 or higher
-The environment that can compile a h8300-elf binary is necessary.
-
-* Userland Develop environment
-used h8300-elf toolchains.
-see http://www.uclinux.org/pub/uClinux/ports/h8/
-
-* A few words of thanks
-Porting to H8/300 serieses is support of Information-technology Promotion Agency, Japan.
-I thank support.
-and All developer/user.
diff --git a/arch/h8300/boot/Makefile b/arch/h8300/boot/Makefile
deleted file mode 100644 (file)
index 0bb62e0..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-# arch/h8300/boot/Makefile
-
-targets := vmlinux.srec vmlinux.bin zImage
-subdir- := compressed
-
-OBJCOPYFLAGS_vmlinux.srec := -Osrec
-OBJCOPYFLAGS_vmlinux.bin  := -Obinary
-OBJCOPYFLAGS_zImage := -O binary -R .note -R .comment -R .stab -R .stabstr -S
-
-$(obj)/vmlinux.srec $(obj)/vmlinux.bin:  vmlinux FORCE
-       $(call if_changed,objcopy)
-       @echo '  Kernel: $@ is ready'
-
-$(obj)/zImage: $(obj)/compressed/vmlinux FORCE
-       $(call if_changed,objcopy)
-       @echo 'Kernel: $@ is ready'
-
-$(obj)/compressed/vmlinux: FORCE
-       $(Q)$(MAKE) $(build)=$(obj)/compressed $@
-
-CLEAN_FILES += arch/$(ARCH)/vmlinux.bin arch/$(ARCH)/vmlinux.srec
-
diff --git a/arch/h8300/boot/compressed/Makefile b/arch/h8300/boot/compressed/Makefile
deleted file mode 100644 (file)
index a6c98fe..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-#
-# linux/arch/sh/boot/compressed/Makefile
-#
-# create a compressed vmlinux image from the original vmlinux
-#
-
-targets                := vmlinux vmlinux.bin vmlinux.bin.gz head.o misc.o piggy.o
-asflags-y      := -traditional
-
-OBJECTS = $(obj)/head.o $(obj)/misc.o
-
-#
-# IMAGE_OFFSET is the load offset of the compression loader
-# Assign dummy values if these 2 variables are not defined,
-# in order to suppress error message.
-#
-CONFIG_MEMORY_START     ?= 0x00400000
-CONFIG_BOOT_LINK_OFFSET ?= 0x00140000
-IMAGE_OFFSET := $(shell printf "0x%08x" $$(($(CONFIG_MEMORY_START)+$(CONFIG_BOOT_LINK_OFFSET))))
-
-LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -estartup $(obj)/vmlinux.lds
-
-$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o FORCE
-       $(call if_changed,ld)
-       @:
-
-$(obj)/vmlinux.bin: vmlinux FORCE
-       $(call if_changed,objcopy)
-
-$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
-       $(call if_changed,gzip)
-
-LDFLAGS_piggy.o := -r --format binary --oformat elf32-h8300 -T
-OBJCOPYFLAGS := -O binary
-
-$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
-       $(call if_changed,ld)
diff --git a/arch/h8300/boot/compressed/head.S b/arch/h8300/boot/compressed/head.S
deleted file mode 100644 (file)
index 10e9a2d..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- *  linux/arch/h8300/boot/compressed/head.S
- *
- *  Copyright (C) 2006 Yoshinori Sato
- */
-
-       .h8300h
-#include <linux/linkage.h>
-
-#define SRAM_START 0xff4000
-
-       .section        .text..startup
-       .global startup
-startup:
-       mov.l   #SRAM_START+0x8000, sp
-       mov.l   #__sbss, er0
-       mov.l   #__ebss, er1
-       sub.l   er0, er1
-       shlr    er1
-       shlr    er1
-       sub.l   er2, er2
-1:
-       mov.l   er2, @er0
-       adds    #4, er0
-       dec.l   #1, er1
-       bne     1b
-       jsr     @_decompress_kernel
-       jmp     @0x400000
-
-       .align  9
-fake_headers_as_bzImage:
-       .word   0
-       .ascii  "HdrS"          ; header signature
-       .word   0x0202          ; header version number (>= 0x0105)
-                               ; or else old loadlin-1.5 will fail)
-       .word   0               ; default_switch
-       .word   0               ; SETUPSEG
-       .word   0x1000
-       .word   0               ; pointing to kernel version string
-       .byte   0               ; = 0, old one (LILO, Loadlin,
-                               ; 0xTV: T=0 for LILO
-                               ;       V = version
-       .byte   1               ; Load flags bzImage=1
-       .word   0x8000          ; size to move, when setup is not
-       .long   0x100000        ; 0x100000 = default for big kernel
-       .long   0               ; address of loaded ramdisk image
-       .long   0               ; its size in bytes
diff --git a/arch/h8300/boot/compressed/misc.c b/arch/h8300/boot/compressed/misc.c
deleted file mode 100644 (file)
index 4a1e3dd..0000000
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * arch/h8300/boot/compressed/misc.c
- *
- * This is a collection of several routines from gzip-1.0.3
- * adapted for Linux.
- *
- * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
- *
- * Adapted for h8300 by Yoshinori Sato 2006
- */
-
-#include <asm/uaccess.h>
-
-/*
- * gzip declarations
- */
-
-#define OF(args)  args
-#define STATIC static
-
-#undef memset
-#undef memcpy
-#define memzero(s, n)     memset ((s), 0, (n))
-
-typedef unsigned char  uch;
-typedef unsigned short ush;
-typedef unsigned long  ulg;
-
-#define WSIZE 0x8000           /* Window size must be at least 32k, */
-                               /* and a power of two */
-
-static uch *inbuf;          /* input buffer */
-static uch window[WSIZE];    /* Sliding window buffer */
-
-static unsigned insize = 0;  /* valid bytes in inbuf */
-static unsigned inptr = 0;   /* index of next byte to be processed in inbuf */
-static unsigned outcnt = 0;  /* bytes in output buffer */
-
-/* gzip flag byte */
-#define ASCII_FLAG   0x01 /* bit 0 set: file probably ASCII text */
-#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
-#define EXTRA_FIELD  0x04 /* bit 2 set: extra field present */
-#define ORIG_NAME    0x08 /* bit 3 set: original file name present */
-#define COMMENT      0x10 /* bit 4 set: file comment present */
-#define ENCRYPTED    0x20 /* bit 5 set: file is encrypted */
-#define RESERVED     0xC0 /* bit 6,7:   reserved */
-
-#define get_byte()  (inptr < insize ? inbuf[inptr++] : fill_inbuf())
-
-/* Diagnostic functions */
-#ifdef DEBUG
-#  define Assert(cond,msg) {if(!(cond)) error(msg);}
-#  define Trace(x) fprintf x
-#  define Tracev(x) {if (verbose) fprintf x ;}
-#  define Tracevv(x) {if (verbose>1) fprintf x ;}
-#  define Tracec(c,x) {if (verbose && (c)) fprintf x ;}
-#  define Tracecv(c,x) {if (verbose>1 && (c)) fprintf x ;}
-#else
-#  define Assert(cond,msg)
-#  define Trace(x)
-#  define Tracev(x)
-#  define Tracevv(x)
-#  define Tracec(c,x)
-#  define Tracecv(c,x)
-#endif
-
-static int  fill_inbuf(void);
-static void flush_window(void);
-static void error(char *m);
-
-extern char input_data[];
-extern int input_len;
-
-static long bytes_out = 0;
-static uch *output_data;
-static unsigned long output_ptr = 0;
-
-static void error(char *m);
-
-int puts(const char *);
-
-extern int _end;
-static unsigned long free_mem_ptr;
-static unsigned long free_mem_end_ptr;
-
-#define HEAP_SIZE             0x10000
-
-#include "../../../../lib/inflate.c"
-
-#define SCR *((volatile unsigned char *)0xffff8a)
-#define TDR *((volatile unsigned char *)0xffff8b)
-#define SSR *((volatile unsigned char *)0xffff8c)
-
-int puts(const char *s)
-{
-       return 0;
-}
-
-void* memset(void* s, int c, size_t n)
-{
-       int i;
-       char *ss = (char*)s;
-
-       for (i=0;i<n;i++) ss[i] = c;
-       return s;
-}
-
-void* memcpy(void* __dest, __const void* __src,
-                           size_t __n)
-{
-       int i;
-       char *d = (char *)__dest, *s = (char *)__src;
-
-       for (i=0;i<__n;i++) d[i] = s[i];
-       return __dest;
-}
-
-/* ===========================================================================
- * Fill the input buffer. This is called only when the buffer is empty
- * and at least one byte is really needed.
- */
-static int fill_inbuf(void)
-{
-       if (insize != 0) {
-               error("ran out of input data");
-       }
-
-       inbuf = input_data;
-       insize = input_len;
-       inptr = 1;
-       return inbuf[0];
-}
-
-/* ===========================================================================
- * Write the output window window[0..outcnt-1] and update crc and bytes_out.
- * (Used for the decompressed data only.)
- */
-static void flush_window(void)
-{
-    ulg c = crc;         /* temporary variable */
-    unsigned n;
-    uch *in, *out, ch;
-
-    in = window;
-    out = &output_data[output_ptr];
-    for (n = 0; n < outcnt; n++) {
-           ch = *out++ = *in++;
-           c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
-    }
-    crc = c;
-    bytes_out += (ulg)outcnt;
-    output_ptr += (ulg)outcnt;
-    outcnt = 0;
-}
-
-static void error(char *x)
-{
-       puts("\n\n");
-       puts(x);
-       puts("\n\n -- System halted");
-
-       while(1);       /* Halt */
-}
-
-#define STACK_SIZE (4096)
-long user_stack [STACK_SIZE];
-long* stack_start = &user_stack[STACK_SIZE];
-
-void decompress_kernel(void)
-{
-       output_data = 0;
-       output_ptr = (unsigned long)0x400000;
-       free_mem_ptr = (unsigned long)&_end;
-       free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
-
-       makecrc();
-       puts("Uncompressing Linux... ");
-       gunzip();
-       puts("Ok, booting the kernel.\n");
-}
diff --git a/arch/h8300/boot/compressed/vmlinux.lds b/arch/h8300/boot/compressed/vmlinux.lds
deleted file mode 100644 (file)
index a0a3a0e..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-SECTIONS
-{
-        .text :
-        {
-        __stext = . ;
-       __text = .;
-              *(.text..startup)
-              *(.text)
-        __etext = . ;
-        }
-
-       .rodata :
-       {
-               *(.rodata)
-       }
-        .data :
-
-        {
-        __sdata = . ;
-        ___data_start = . ;
-                *(.data.*)
-       }
-        .bss :
-        {
-        . = ALIGN(0x4) ;
-        __sbss = . ;
-                *(.bss*)
-        . = ALIGN(0x4) ;
-        __ebss = . ;
-        __end = . ;
-        }
-}
diff --git a/arch/h8300/boot/compressed/vmlinux.scr b/arch/h8300/boot/compressed/vmlinux.scr
deleted file mode 100644 (file)
index a0f6962..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-SECTIONS
-{
-  .data : {
-       _input_len = .;
-       LONG(_input_data_end - _input_data) _input_data = .;
-       *(.data)
-       _input_data_end = .;
-       }
-}
diff --git a/arch/h8300/defconfig b/arch/h8300/defconfig
deleted file mode 100644 (file)
index 042425a..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EXPERT=y
-# CONFIG_UID16 is not set
-# CONFIG_SYSCTL_SYSCALL is not set
-# CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
-# CONFIG_BASE_FULL is not set
-# CONFIG_FUTEX is not set
-# CONFIG_EPOLL is not set
-# CONFIG_SIGNALFD is not set
-# CONFIG_TIMERFD is not set
-# CONFIG_EVENTFD is not set
-# CONFIG_VM_EVENT_COUNTERS is not set
-# CONFIG_COMPAT_BRK is not set
-CONFIG_SLOB=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_H83007=y
-CONFIG_BINFMT_FLAT=y
-CONFIG_BINFMT_ZFLAT=y
-CONFIG_BINFMT_MISC=y
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_REDBOOT_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_RAM=y
-CONFIG_MTD_ROM=y
-CONFIG_MTD_UCLINUX=y
-# CONFIG_BLK_DEV is not set
-# CONFIG_INPUT is not set
-# CONFIG_SERIO is not set
-# CONFIG_HWMON is not set
-# CONFIG_USB_SUPPORT is not set
-# CONFIG_DNOTIFY is not set
-CONFIG_ROMFS_FS=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-# CONFIG_CRC32 is not set
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
deleted file mode 100644 (file)
index 8ada3cf..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-
-generic-y += clkdev.h
-generic-y += exec.h
-generic-y += linkage.h
-generic-y += mmu.h
-generic-y += module.h
-generic-y += trace_clock.h
-generic-y += xor.h
diff --git a/arch/h8300/include/asm/asm-offsets.h b/arch/h8300/include/asm/asm-offsets.h
deleted file mode 100644 (file)
index d370ee3..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <generated/asm-offsets.h>
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
deleted file mode 100644 (file)
index 40901e3..0000000
+++ /dev/null
@@ -1,146 +0,0 @@
-#ifndef __ARCH_H8300_ATOMIC__
-#define __ARCH_H8300_ATOMIC__
-
-#include <linux/types.h>
-#include <asm/cmpxchg.h>
-
-/*
- * Atomic operations that C can't guarantee us.  Useful for
- * resource counting etc..
- */
-
-#define ATOMIC_INIT(i) { (i) }
-
-#define atomic_read(v)         (*(volatile int *)&(v)->counter)
-#define atomic_set(v, i)       (((v)->counter) = i)
-
-#include <linux/kernel.h>
-
-static __inline__ int atomic_add_return(int i, atomic_t *v)
-{
-       unsigned long flags;
-       int ret;
-       local_irq_save(flags);
-       ret = v->counter += i;
-       local_irq_restore(flags);
-       return ret;
-}
-
-#define atomic_add(i, v) atomic_add_return(i, v)
-#define atomic_add_negative(a, v)      (atomic_add_return((a), (v)) < 0)
-
-static __inline__ int atomic_sub_return(int i, atomic_t *v)
-{
-       unsigned long flags;
-       int ret;
-       local_irq_save(flags);
-       ret = v->counter -= i;
-       local_irq_restore(flags);
-       return ret;
-}
-
-#define atomic_sub(i, v) atomic_sub_return(i, v)
-#define atomic_sub_and_test(i,v) (atomic_sub_return(i, v) == 0)
-
-static __inline__ int atomic_inc_return(atomic_t *v)
-{
-       unsigned long flags;
-       int ret;
-       local_irq_save(flags);
-       v->counter++;
-       ret = v->counter;
-       local_irq_restore(flags);
-       return ret;
-}
-
-#define atomic_inc(v) atomic_inc_return(v)
-
-/*
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-static __inline__ int atomic_dec_return(atomic_t *v)
-{
-       unsigned long flags;
-       int ret;
-       local_irq_save(flags);
-       --v->counter;
-       ret = v->counter;
-       local_irq_restore(flags);
-       return ret;
-}
-
-#define atomic_dec(v) atomic_dec_return(v)
-
-static __inline__ int atomic_dec_and_test(atomic_t *v)
-{
-       unsigned long flags;
-       int ret;
-       local_irq_save(flags);
-       --v->counter;
-       ret = v->counter;
-       local_irq_restore(flags);
-       return ret == 0;
-}
-
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
-{
-       int ret;
-       unsigned long flags;
-
-       local_irq_save(flags);
-       ret = v->counter;
-       if (likely(ret == old))
-               v->counter = new;
-       local_irq_restore(flags);
-       return ret;
-}
-
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
-       int ret;
-       unsigned long flags;
-
-       local_irq_save(flags);
-       ret = v->counter;
-       if (ret != u)
-               v->counter += a;
-       local_irq_restore(flags);
-       return ret;
-}
-
-static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v)
-{
-       __asm__ __volatile__("stc ccr,r1l\n\t"
-                            "orc #0x80,ccr\n\t"
-                            "mov.l %0,er0\n\t"
-                            "and.l %1,er0\n\t"
-                            "mov.l er0,%0\n\t"
-                            "ldc r1l,ccr" 
-                             : "=m" (*v) : "g" (~(mask)) :"er0","er1");
-}
-
-static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
-{
-       __asm__ __volatile__("stc ccr,r1l\n\t"
-                            "orc #0x80,ccr\n\t"
-                            "mov.l %0,er0\n\t"
-                            "or.l %1,er0\n\t"
-                            "mov.l er0,%0\n\t"
-                            "ldc r1l,ccr" 
-                             : "=m" (*v) : "g" (mask) :"er0","er1");
-}
-
-/* Atomic operations are already serializing */
-#define smp_mb__before_atomic_dec()    barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc()    barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
-#endif /* __ARCH_H8300_ATOMIC __ */
diff --git a/arch/h8300/include/asm/barrier.h b/arch/h8300/include/asm/barrier.h
deleted file mode 100644 (file)
index 9e0aa9f..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef _H8300_BARRIER_H
-#define _H8300_BARRIER_H
-
-#define nop()  asm volatile ("nop"::)
-
-/*
- * Force strict CPU ordering.
- * Not really required on H8...
- */
-#define mb()   asm volatile (""   : : :"memory")
-#define rmb()  asm volatile (""   : : :"memory")
-#define wmb()  asm volatile (""   : : :"memory")
-#define set_mb(var, value) do { xchg(&var, value); } while (0)
-
-#define read_barrier_depends() do { } while (0)
-
-#ifdef CONFIG_SMP
-#define smp_mb()       mb()
-#define smp_rmb()      rmb()
-#define smp_wmb()      wmb()
-#define smp_read_barrier_depends()     read_barrier_depends()
-#else
-#define smp_mb()       barrier()
-#define smp_rmb()      barrier()
-#define smp_wmb()      barrier()
-#define smp_read_barrier_depends()     do { } while(0)
-#endif
-
-#endif /* _H8300_BARRIER_H */
diff --git a/arch/h8300/include/asm/bitops.h b/arch/h8300/include/asm/bitops.h
deleted file mode 100644 (file)
index eb34e0c..0000000
+++ /dev/null
@@ -1,211 +0,0 @@
-#ifndef _H8300_BITOPS_H
-#define _H8300_BITOPS_H
-
-/*
- * Copyright 1992, Linus Torvalds.
- * Copyright 2002, Yoshinori Sato
- */
-
-#include <linux/compiler.h>
-
-#ifdef __KERNEL__
-
-#ifndef _LINUX_BITOPS_H
-#error only <linux/bitops.h> can be included directly
-#endif
-
-/*
- * Function prototypes to keep gcc -Wall happy
- */
-
-/*
- * ffz = Find First Zero in word. Undefined if no zero exists,
- * so code should check against ~0UL first..
- */
-static __inline__ unsigned long ffz(unsigned long word)
-{
-       unsigned long result;
-
-       result = -1;
-       __asm__("1:\n\t"
-               "shlr.l %2\n\t"
-               "adds #1,%0\n\t"
-               "bcs 1b"
-               : "=r" (result)
-               : "0"  (result),"r" (word));
-       return result;
-}
-
-#define H8300_GEN_BITOP_CONST(OP,BIT)                      \
-       case BIT:                                           \
-       __asm__(OP " #" #BIT ",@%0"::"r"(b_addr):"memory"); \
-       break;
-
-#define H8300_GEN_BITOP(FNAME,OP)                                    \
-static __inline__ void FNAME(int nr, volatile unsigned long* addr)    \
-{                                                                    \
-       volatile unsigned char *b_addr;                               \
-       b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3);    \
-       if (__builtin_constant_p(nr)) {                               \
-               switch(nr & 7) {                                      \
-                       H8300_GEN_BITOP_CONST(OP,0)                   \
-                       H8300_GEN_BITOP_CONST(OP,1)                   \
-                       H8300_GEN_BITOP_CONST(OP,2)                   \
-                       H8300_GEN_BITOP_CONST(OP,3)                   \
-                       H8300_GEN_BITOP_CONST(OP,4)                   \
-                       H8300_GEN_BITOP_CONST(OP,5)                   \
-                       H8300_GEN_BITOP_CONST(OP,6)                   \
-                       H8300_GEN_BITOP_CONST(OP,7)                   \
-               }                                                     \
-       } else {                                                      \
-               __asm__(OP " %w0,@%1"::"r"(nr),"r"(b_addr):"memory"); \
-       }                                                             \
-}
-
-/*
- * clear_bit() doesn't provide any barrier for the compiler.
- */
-#define smp_mb__before_clear_bit()     barrier()
-#define smp_mb__after_clear_bit()      barrier()
-
-H8300_GEN_BITOP(set_bit          ,"bset")
-H8300_GEN_BITOP(clear_bit ,"bclr")
-H8300_GEN_BITOP(change_bit,"bnot")
-#define __set_bit(nr,addr)    set_bit((nr),(addr))
-#define __clear_bit(nr,addr)  clear_bit((nr),(addr))
-#define __change_bit(nr,addr) change_bit((nr),(addr))
-
-#undef H8300_GEN_BITOP
-#undef H8300_GEN_BITOP_CONST
-
-static __inline__ int test_bit(int nr, const unsigned long* addr)
-{
-       return (*((volatile unsigned char *)addr + 
-               ((nr >> 3) ^ 3)) & (1UL << (nr & 7))) != 0;
-}
-
-#define __test_bit(nr, addr) test_bit(nr, addr)
-
-#define H8300_GEN_TEST_BITOP_CONST_INT(OP,BIT)                      \
-       case BIT:                                                    \
-       __asm__("stc ccr,%w1\n\t"                                    \
-               "orc #0x80,ccr\n\t"                                  \
-               "bld #" #BIT ",@%4\n\t"                              \
-               OP " #" #BIT ",@%4\n\t"                              \
-               "rotxl.l %0\n\t"                                     \
-               "ldc %w1,ccr"                                        \
-               : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr)          \
-               : "0" (retval),"r" (b_addr)                          \
-               : "memory");                                         \
-        break;
-
-#define H8300_GEN_TEST_BITOP_CONST(OP,BIT)                          \
-       case BIT:                                                    \
-       __asm__("bld #" #BIT ",@%3\n\t"                              \
-               OP " #" #BIT ",@%3\n\t"                              \
-               "rotxl.l %0\n\t"                                     \
-               : "=r"(retval),"=m"(*b_addr)                         \
-               : "0" (retval),"r" (b_addr)                          \
-               : "memory");                                         \
-        break;
-
-#define H8300_GEN_TEST_BITOP(FNNAME,OP)                                     \
-static __inline__ int FNNAME(int nr, volatile void * addr)          \
-{                                                                   \
-       int retval = 0;                                              \
-       char ccrsave;                                                \
-       volatile unsigned char *b_addr;                              \
-       b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3);   \
-       if (__builtin_constant_p(nr)) {                              \
-               switch(nr & 7) {                                     \
-                       H8300_GEN_TEST_BITOP_CONST_INT(OP,0)         \
-                       H8300_GEN_TEST_BITOP_CONST_INT(OP,1)         \
-                       H8300_GEN_TEST_BITOP_CONST_INT(OP,2)         \
-                       H8300_GEN_TEST_BITOP_CONST_INT(OP,3)         \
-                       H8300_GEN_TEST_BITOP_CONST_INT(OP,4)         \
-                       H8300_GEN_TEST_BITOP_CONST_INT(OP,5)         \
-                       H8300_GEN_TEST_BITOP_CONST_INT(OP,6)         \
-                       H8300_GEN_TEST_BITOP_CONST_INT(OP,7)         \
-               }                                                    \
-       } else {                                                     \
-               __asm__("stc ccr,%w1\n\t"                            \
-                       "orc #0x80,ccr\n\t"                          \
-                       "btst %w5,@%4\n\t"                           \
-                       OP " %w5,@%4\n\t"                            \
-                       "beq 1f\n\t"                                 \
-                       "inc.l #1,%0\n"                              \
-                       "1:\n\t"                                     \
-                       "ldc %w1,ccr"                                \
-                       : "=r"(retval),"=&r"(ccrsave),"=m"(*b_addr)  \
-                       : "0" (retval),"r" (b_addr),"r"(nr)          \
-                       : "memory");                                 \
-       }                                                            \
-       return retval;                                               \
-}                                                                   \
-                                                                    \
-static __inline__ int __ ## FNNAME(int nr, volatile void * addr)     \
-{                                                                   \
-       int retval = 0;                                              \
-       volatile unsigned char *b_addr;                              \
-       b_addr = (volatile unsigned char *)addr + ((nr >> 3) ^ 3);   \
-       if (__builtin_constant_p(nr)) {                              \
-               switch(nr & 7) {                                     \
-                       H8300_GEN_TEST_BITOP_CONST(OP,0)             \
-                       H8300_GEN_TEST_BITOP_CONST(OP,1)             \
-                       H8300_GEN_TEST_BITOP_CONST(OP,2)             \
-                       H8300_GEN_TEST_BITOP_CONST(OP,3)             \
-                       H8300_GEN_TEST_BITOP_CONST(OP,4)             \
-                       H8300_GEN_TEST_BITOP_CONST(OP,5)             \
-                       H8300_GEN_TEST_BITOP_CONST(OP,6)             \
-                       H8300_GEN_TEST_BITOP_CONST(OP,7)             \
-               }                                                    \
-       } else {                                                     \
-               __asm__("btst %w4,@%3\n\t"                           \
-                       OP " %w4,@%3\n\t"                            \
-                       "beq 1f\n\t"                                 \
-                       "inc.l #1,%0\n"                              \
-                       "1:"                                         \
-                       : "=r"(retval),"=m"(*b_addr)                 \
-                       : "0" (retval),"r" (b_addr),"r"(nr)          \
-                       : "memory");                                 \
-       }                                                            \
-       return retval;                                               \
-}
-
-H8300_GEN_TEST_BITOP(test_and_set_bit,  "bset")
-H8300_GEN_TEST_BITOP(test_and_clear_bit, "bclr")
-H8300_GEN_TEST_BITOP(test_and_change_bit,"bnot")
-#undef H8300_GEN_TEST_BITOP_CONST
-#undef H8300_GEN_TEST_BITOP_CONST_INT
-#undef H8300_GEN_TEST_BITOP
-
-#include <asm-generic/bitops/ffs.h>
-
-static __inline__ unsigned long __ffs(unsigned long word)
-{
-       unsigned long result;
-
-       result = -1;
-       __asm__("1:\n\t"
-               "shlr.l %2\n\t"
-               "adds #1,%0\n\t"
-               "bcc 1b"
-               : "=r" (result)
-               : "0"(result),"r"(word));
-       return result;
-}
-
-#include <asm-generic/bitops/find.h>
-#include <asm-generic/bitops/sched.h>
-#include <asm-generic/bitops/hweight.h>
-#include <asm-generic/bitops/lock.h>
-#include <asm-generic/bitops/le.h>
-#include <asm-generic/bitops/ext2-atomic.h>
-
-#endif /* __KERNEL__ */
-
-#include <asm-generic/bitops/fls.h>
-#include <asm-generic/bitops/__fls.h>
-#include <asm-generic/bitops/fls64.h>
-
-#endif /* _H8300_BITOPS_H */
diff --git a/arch/h8300/include/asm/bootinfo.h b/arch/h8300/include/asm/bootinfo.h
deleted file mode 100644 (file)
index 5bed7e7..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-
-/* Nothing for h8300 */
diff --git a/arch/h8300/include/asm/bug.h b/arch/h8300/include/asm/bug.h
deleted file mode 100644 (file)
index 1e1be81..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _H8300_BUG_H
-#define _H8300_BUG_H
-
-/* always true */
-#define is_valid_bugaddr(addr) (1)
-
-#include <asm-generic/bug.h>
-
-struct pt_regs;
-extern void die(const char *str, struct pt_regs *fp, unsigned long err);
-
-#endif
diff --git a/arch/h8300/include/asm/bugs.h b/arch/h8300/include/asm/bugs.h
deleted file mode 100644 (file)
index 1cb4afb..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- *  include/asm-h8300/bugs.h
- *
- *  Copyright (C) 1994  Linus Torvalds
- */
-
-/*
- * This is included by init/main.c to check for architecture-dependent bugs.
- *
- * Needs:
- *     void check_bugs(void);
- */
-
-static void check_bugs(void)
-{
-}
diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
deleted file mode 100644 (file)
index 05887a1..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ARCH_H8300_CACHE_H
-#define __ARCH_H8300_CACHE_H
-
-/* bytes per L1 cache line */
-#define        L1_CACHE_SHIFT  2
-#define        L1_CACHE_BYTES  (1 << L1_CACHE_SHIFT)
-
-/* m68k-elf-gcc  2.95.2 doesn't like these */
-
-#define __cacheline_aligned
-#define ____cacheline_aligned
-
-#endif
diff --git a/arch/h8300/include/asm/cachectl.h b/arch/h8300/include/asm/cachectl.h
deleted file mode 100644 (file)
index c464022..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef _H8300_CACHECTL_H
-#define _H8300_CACHECTL_H
-
-/* Definitions for the cacheflush system call.  */
-
-#define FLUSH_SCOPE_LINE    0  /* Flush a cache line */
-#define FLUSH_SCOPE_PAGE    0  /* Flush a page */
-#define FLUSH_SCOPE_ALL     0  /* Flush the whole cache -- superuser only */
-
-#define FLUSH_CACHE_DATA    0  /* Writeback and flush data cache */
-#define FLUSH_CACHE_INSN    0  /* Flush instruction cache */
-#define FLUSH_CACHE_BOTH    0  /* Flush both caches */
-
-#endif /* _H8300_CACHECTL_H */
diff --git a/arch/h8300/include/asm/cacheflush.h b/arch/h8300/include/asm/cacheflush.h
deleted file mode 100644 (file)
index 4cf2df2..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * (C) Copyright 2002, Yoshinori Sato <ysato@users.sourceforge.jp>
- */
-
-#ifndef _ASM_H8300_CACHEFLUSH_H
-#define _ASM_H8300_CACHEFLUSH_H
-
-/*
- * Cache handling functions
- * No Cache memory all dummy functions
- */
-
-#define flush_cache_all()
-#define        flush_cache_mm(mm)
-#define        flush_cache_dup_mm(mm)          do { } while (0)
-#define        flush_cache_range(vma,a,b)
-#define        flush_cache_page(vma,p,pfn)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define        flush_dcache_page(page)
-#define        flush_dcache_mmap_lock(mapping)
-#define        flush_dcache_mmap_unlock(mapping)
-#define        flush_icache()
-#define        flush_icache_page(vma,page)
-#define        flush_icache_range(start,len)
-#define flush_cache_vmap(start, end)
-#define flush_cache_vunmap(start, end)
-#define        cache_push_v(vaddr,len)
-#define        cache_push(paddr,len)
-#define        cache_clear(paddr,len)
-
-#define        flush_dcache_range(a,b)
-
-#define        flush_icache_user_range(vma,page,addr,len)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-       memcpy(dst, src, len)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
-       memcpy(dst, src, len)
-
-#endif /* _ASM_H8300_CACHEFLUSH_H */
diff --git a/arch/h8300/include/asm/checksum.h b/arch/h8300/include/asm/checksum.h
deleted file mode 100644 (file)
index 98724e1..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-#ifndef _H8300_CHECKSUM_H
-#define _H8300_CHECKSUM_H
-
-/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-__wsum csum_partial(const void *buff, int len, __wsum sum);
-
-/*
- * the same as csum_partial, but copies from src while it
- * checksums
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-
-__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
-
-
-/*
- * the same as csum_partial_copy, but copies from user space.
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-
-extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
-                                               int len, __wsum sum, int *csum_err);
-
-__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
-
-
-/*
- *     Fold a partial checksum
- */
-
-static inline __sum16 csum_fold(__wsum sum)
-{
-       __asm__("mov.l %0,er0\n\t"
-               "add.w e0,r0\n\t"
-               "xor.w e0,e0\n\t"
-               "rotxl.w e0\n\t"
-               "add.w e0,r0\n\t"
-               "sub.w e0,e0\n\t"
-               "mov.l er0,%0"
-               : "=r"(sum)
-               : "0"(sum)
-               : "er0");
-       return (__force __sum16)~sum;
-}
-
-
-/*
- * computes the checksum of the TCP/UDP pseudo-header
- * returns a 16-bit checksum, already complemented
- */
-
-static inline __wsum
-csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
-                 unsigned short proto, __wsum sum)
-{
-       __asm__ ("sub.l er0,er0\n\t"
-                "add.l %2,%0\n\t"
-                "addx  #0,r0l\n\t"
-                "add.l %3,%0\n\t"
-                "addx  #0,r0l\n\t"
-                "add.l %4,%0\n\t"
-                "addx  #0,r0l\n\t"
-                "add.l er0,%0\n\t"
-                "bcc   1f\n\t"
-                "inc.l #1,%0\n"
-                "1:"
-                : "=&r" (sum)
-                : "0" (sum), "r" (daddr), "r" (saddr), "r" (len + proto)
-                :"er0");
-       return sum;
-}
-
-static inline __sum16
-csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
-                 unsigned short proto, __wsum sum)
-{
-       return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
-}
-
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-
-extern __sum16 ip_compute_csum(const void *buff, int len);
-
-#endif /* _H8300_CHECKSUM_H */
diff --git a/arch/h8300/include/asm/cmpxchg.h b/arch/h8300/include/asm/cmpxchg.h
deleted file mode 100644 (file)
index cdb203e..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef __ARCH_H8300_CMPXCHG__
-#define __ARCH_H8300_CMPXCHG__
-
-#include <linux/irqflags.h>
-
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((volatile struct __xchg_dummy *)(x))
-
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
-{
-  unsigned long tmp, flags;
-
-  local_irq_save(flags);
-
-  switch (size) {
-  case 1:
-    __asm__ __volatile__
-    ("mov.b %2,%0\n\t"
-     "mov.b %1,%2"
-    : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
-    break;
-  case 2:
-    __asm__ __volatile__
-    ("mov.w %2,%0\n\t"
-     "mov.w %1,%2"
-    : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
-    break;
-  case 4:
-    __asm__ __volatile__
-    ("mov.l %2,%0\n\t"
-     "mov.l %1,%2"
-    : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory");
-    break;
-  default:
-    tmp = 0;     
-  }
-  local_irq_restore(flags);
-  return tmp;
-}
-
-#include <asm-generic/cmpxchg-local.h>
-
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n)                                              \
-       ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
-                       (unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-
-#ifndef CONFIG_SMP
-#include <asm-generic/cmpxchg.h>
-#endif
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-#endif /* __ARCH_H8300_CMPXCHG__ */
diff --git a/arch/h8300/include/asm/cputime.h b/arch/h8300/include/asm/cputime.h
deleted file mode 100644 (file)
index 092e187..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __H8300_CPUTIME_H
-#define __H8300_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __H8300_CPUTIME_H */
diff --git a/arch/h8300/include/asm/current.h b/arch/h8300/include/asm/current.h
deleted file mode 100644 (file)
index 57d74ee..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef _H8300_CURRENT_H
-#define _H8300_CURRENT_H
-/*
- *     current.h
- *     (C) Copyright 2000, Lineo, David McCullough <davidm@lineo.com>
- *     (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
- *
- *     rather than dedicate a register (as the m68k source does), we
- *     just keep a global,  we should probably just change it all to be
- *     current and lose _current_task.
- */
-
-#include <linux/thread_info.h>
-#include <asm/thread_info.h>
-
-struct task_struct;
-
-static inline struct task_struct *get_current(void)
-{
-       return(current_thread_info()->task);
-}
-
-#define        current get_current()
-
-#endif /* _H8300_CURRENT_H */
diff --git a/arch/h8300/include/asm/dbg.h b/arch/h8300/include/asm/dbg.h
deleted file mode 100644 (file)
index 2c6d1cb..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#define DEBUG 1
-#define        BREAK asm volatile ("trap #3")
diff --git a/arch/h8300/include/asm/delay.h b/arch/h8300/include/asm/delay.h
deleted file mode 100644 (file)
index 743beba..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef _H8300_DELAY_H
-#define _H8300_DELAY_H
-
-#include <asm/param.h>
-
-/*
- * Copyright (C) 2002 Yoshinori Sato <ysato@sourceforge.jp>
- *
- * Delay routines, using a pre-computed "loops_per_second" value.
- */
-
-static inline void __delay(unsigned long loops)
-{
-       __asm__ __volatile__ ("1:\n\t"
-                             "dec.l #1,%0\n\t"
-                             "bne 1b"
-                             :"=r" (loops):"0"(loops));
-}
-
-/*
- * Use only for very small delays ( < 1 msec).  Should probably use a
- * lookup table, really, as the multiplications take much too long with
- * short delays.  This is a "reasonable" implementation, though (and the
- * first constant multiplications gets optimized away if the delay is
- * a constant)  
- */
-
-extern unsigned long loops_per_jiffy;
-
-static inline void udelay(unsigned long usecs)
-{
-       usecs *= 4295;          /* 2**32 / 1000000 */
-       usecs /= (loops_per_jiffy*HZ);
-       if (usecs)
-               __delay(usecs);
-}
-
-#endif /* _H8300_DELAY_H */
diff --git a/arch/h8300/include/asm/device.h b/arch/h8300/include/asm/device.h
deleted file mode 100644 (file)
index d8f9872..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-/*
- * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
- */
-#include <asm-generic/device.h>
-
diff --git a/arch/h8300/include/asm/div64.h b/arch/h8300/include/asm/div64.h
deleted file mode 100644 (file)
index 6cd978c..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/div64.h>
diff --git a/arch/h8300/include/asm/dma.h b/arch/h8300/include/asm/dma.h
deleted file mode 100644 (file)
index 3edbaaa..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _H8300_DMA_H
-#define _H8300_DMA_H 
-
-/*
- * Set number of channels of DMA on ColdFire for different implementations.
- */
-#define MAX_DMA_CHANNELS 0
-#define MAX_DMA_ADDRESS PAGE_OFFSET
-
-/* These are in kernel/dma.c: */
-extern int request_dma(unsigned int dmanr, const char *device_id);     /* reserve a DMA channel */
-extern void free_dma(unsigned int dmanr);      /* release it again */
-#endif /* _H8300_DMA_H */
diff --git a/arch/h8300/include/asm/elf.h b/arch/h8300/include/asm/elf.h
deleted file mode 100644 (file)
index 6db7124..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-#ifndef __ASMH8300_ELF_H
-#define __ASMH8300_ELF_H
-
-/*
- * ELF register definitions..
- */
-
-#include <asm/ptrace.h>
-#include <asm/user.h>
-
-typedef unsigned long elf_greg_t;
-
-#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-typedef unsigned long elf_fpregset_t;
-
-/*
- * This is used to ensure we don't load something for the wrong architecture.
- */
-#define elf_check_arch(x) ((x)->e_machine == EM_H8_300)
-
-/*
- * These are used to set parameters in the core dumps.
- */
-#define ELF_CLASS      ELFCLASS32
-#define ELF_DATA       ELFDATA2MSB
-#define ELF_ARCH       EM_H8_300
-#if defined(__H8300H__)
-#define ELF_CORE_EFLAGS 0x810000
-#endif
-#if defined(__H8300S__)
-#define ELF_CORE_EFLAGS 0x820000
-#endif
-
-#define ELF_PLAT_INIT(_r)      _r->er1 = 0
-
-#define ELF_EXEC_PAGESIZE      4096
-
-/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
-   use of this is to invoke "./ld.so someprog" to test out a new version of
-   the loader.  We need to make sure that it is out of the way of the program
-   that it will "exec", and that there is sufficient room for the brk.  */
-
-#define ELF_ET_DYN_BASE         0xD0000000UL
-
-/* This yields a mask that user programs can use to figure out what
-   instruction set this cpu supports.  */
-
-#define ELF_HWCAP      (0)
-
-/* This yields a string that ld.so will use to load implementation
-   specific libraries for optimization.  This is more specific in
-   intent than poking at uname or /proc/cpuinfo.  */
-
-#define ELF_PLATFORM  (NULL)
-
-#define R_H8_NONE       0
-#define R_H8_DIR32      1
-#define R_H8_DIR32_28   2
-#define R_H8_DIR32_24   3
-#define R_H8_DIR32_16   4
-#define R_H8_DIR32U     6
-#define R_H8_DIR32U_28  7
-#define R_H8_DIR32U_24  8
-#define R_H8_DIR32U_20  9
-#define R_H8_DIR32U_16 10
-#define R_H8_DIR24     11
-#define R_H8_DIR24_20  12
-#define R_H8_DIR24_16  13
-#define R_H8_DIR24U    14
-#define R_H8_DIR24U_20 15
-#define R_H8_DIR24U_16 16
-#define R_H8_DIR16     17
-#define R_H8_DIR16U    18
-#define R_H8_DIR16S_32 19
-#define R_H8_DIR16S_28 20
-#define R_H8_DIR16S_24 21
-#define R_H8_DIR16S_20 22
-#define R_H8_DIR16S    23
-#define R_H8_DIR8      24
-#define R_H8_DIR8U     25
-#define R_H8_DIR8Z_32  26
-#define R_H8_DIR8Z_28  27
-#define R_H8_DIR8Z_24  28
-#define R_H8_DIR8Z_20  29
-#define R_H8_DIR8Z_16  30
-#define R_H8_PCREL16   31
-#define R_H8_PCREL8    32
-#define R_H8_BPOS      33
-#define R_H8_PCREL32   34
-#define R_H8_GOT32O    35
-#define R_H8_GOT16O    36
-#define R_H8_DIR16A8   59
-#define R_H8_DIR16R8   60
-#define R_H8_DIR24A8   61
-#define R_H8_DIR24R8   62
-#define R_H8_DIR32A16  63
-#define R_H8_ABS32     65
-#define R_H8_ABS32A16 127
-
-#endif
diff --git a/arch/h8300/include/asm/emergency-restart.h b/arch/h8300/include/asm/emergency-restart.h
deleted file mode 100644 (file)
index 108d8c4..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/h8300/include/asm/fb.h b/arch/h8300/include/asm/fb.h
deleted file mode 100644 (file)
index c7df380..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_FB_H_
-#define _ASM_FB_H_
-#include <linux/fb.h>
-
-#define fb_pgprotect(...) do {} while (0)
-
-static inline int fb_is_primary_device(struct fb_info *info)
-{
-       return 0;
-}
-
-#endif /* _ASM_FB_H_ */
diff --git a/arch/h8300/include/asm/flat.h b/arch/h8300/include/asm/flat.h
deleted file mode 100644 (file)
index bd12b31..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * include/asm-h8300/flat.h -- uClinux flat-format executables
- */
-
-#ifndef __H8300_FLAT_H__
-#define __H8300_FLAT_H__
-
-#define        flat_argvp_envp_on_stack()              1
-#define        flat_old_ram_flag(flags)                1
-#define        flat_reloc_valid(reloc, size)           ((reloc) <= (size))
-#define        flat_set_persistent(relval, p)          0
-
-/*
- * on the H8 a couple of the relocations have an instruction in the
- * top byte.  As there can only be 24bits of address space,  we just
- * always preserve that 8bits at the top,  when it isn't an instruction
- * is is 0 (davidm@snapgear.com)
- */
-
-#define        flat_get_relocate_addr(rel)             (rel)
-#define flat_get_addr_from_rp(rp, relval, flags, persistent) \
-        (get_unaligned(rp) & ((flags & FLAT_FLAG_GOTPIC) ? 0xffffffff: 0x00ffffff))
-#define flat_put_addr_at_rp(rp, addr, rel) \
-       put_unaligned (((*(char *)(rp)) << 24) | ((addr) & 0x00ffffff), rp)
-
-#endif /* __H8300_FLAT_H__ */
diff --git a/arch/h8300/include/asm/fpu.h b/arch/h8300/include/asm/fpu.h
deleted file mode 100644 (file)
index 4fc416e..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* Nothing do */
diff --git a/arch/h8300/include/asm/ftrace.h b/arch/h8300/include/asm/ftrace.h
deleted file mode 100644 (file)
index 40a8c17..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* empty */
diff --git a/arch/h8300/include/asm/futex.h b/arch/h8300/include/asm/futex.h
deleted file mode 100644 (file)
index 6a332a9..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_FUTEX_H
-#define _ASM_FUTEX_H
-
-#include <asm-generic/futex.h>
-
-#endif
diff --git a/arch/h8300/include/asm/gpio-internal.h b/arch/h8300/include/asm/gpio-internal.h
deleted file mode 100644 (file)
index a714f0c..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-#ifndef _H8300_GPIO_H
-#define _H8300_GPIO_H
-
-#define H8300_GPIO_P1 0
-#define H8300_GPIO_P2 1
-#define H8300_GPIO_P3 2
-#define H8300_GPIO_P4 3
-#define H8300_GPIO_P5 4
-#define H8300_GPIO_P6 5
-#define H8300_GPIO_P7 6
-#define H8300_GPIO_P8 7
-#define H8300_GPIO_P9 8
-#define H8300_GPIO_PA 9
-#define H8300_GPIO_PB 10
-#define H8300_GPIO_PC 11
-#define H8300_GPIO_PD 12
-#define H8300_GPIO_PE 13
-#define H8300_GPIO_PF 14
-#define H8300_GPIO_PG 15
-#define H8300_GPIO_PH 16
-
-#define H8300_GPIO_B7 0x80
-#define H8300_GPIO_B6 0x40
-#define H8300_GPIO_B5 0x20
-#define H8300_GPIO_B4 0x10
-#define H8300_GPIO_B3 0x08
-#define H8300_GPIO_B2 0x04
-#define H8300_GPIO_B1 0x02
-#define H8300_GPIO_B0 0x01
-
-#define H8300_GPIO_INPUT 0
-#define H8300_GPIO_OUTPUT 1
-
-#define H8300_GPIO_RESERVE(port, bits) \
-        h8300_reserved_gpio(port, bits)
-
-#define H8300_GPIO_FREE(port, bits) \
-        h8300_free_gpio(port, bits)
-
-#define H8300_GPIO_DDR(port, bit, dir) \
-        h8300_set_gpio_dir(((port) << 8) | (bit), dir)
-
-#define H8300_GPIO_GETDIR(port, bit) \
-        h8300_get_gpio_dir(((port) << 8) | (bit))
-
-extern int h8300_reserved_gpio(int port, int bits);
-extern int h8300_free_gpio(int port, int bits);
-extern int h8300_set_gpio_dir(int port_bit, int dir);
-extern int h8300_get_gpio_dir(int port_bit);
-extern int h8300_init_gpio(void);
-
-#endif
diff --git a/arch/h8300/include/asm/hardirq.h b/arch/h8300/include/asm/hardirq.h
deleted file mode 100644 (file)
index c2e1aa0..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef __H8300_HARDIRQ_H
-#define __H8300_HARDIRQ_H
-
-#include <asm/irq.h>
-
-#define HARDIRQ_BITS   8
-
-/*
- * The hardirq mask has to be large enough to have
- * space for potentially all IRQ sources in the system
- * nesting on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
-#include <asm-generic/hardirq.h>
-
-#endif
diff --git a/arch/h8300/include/asm/hw_irq.h b/arch/h8300/include/asm/hw_irq.h
deleted file mode 100644 (file)
index d75a5a1..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* Do Nothing */
diff --git a/arch/h8300/include/asm/io.h b/arch/h8300/include/asm/io.h
deleted file mode 100644 (file)
index c1a8df2..0000000
+++ /dev/null
@@ -1,358 +0,0 @@
-#ifndef _H8300_IO_H
-#define _H8300_IO_H
-
-#ifdef __KERNEL__
-
-#include <asm/virtconvert.h>
-
-#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
-#include <asm/regs306x.h>
-#elif defined(CONFIG_H8S2678)
-#include <asm/regs267x.h>
-#else
-#error UNKNOWN CPU TYPE
-#endif
-
-
-/*
- * These are for ISA/PCI shared memory _only_ and should never be used
- * on any other type of memory, including Zorro memory. They are meant to
- * access the bus in the bus byte order which is little-endian!.
- *
- * readX/writeX() are used to access memory mapped devices. On some
- * architectures the memory mapped IO stuff needs to be accessed
- * differently. On the m68k architecture, we just read/write the
- * memory location directly.
- */
-/* ++roman: The assignments to temp. vars avoid that gcc sometimes generates
- * two accesses to memory, which may be undesirable for some devices.
- */
-
-/*
- * swap functions are sometimes needed to interface little-endian hardware
- */
-
-static inline unsigned short _swapw(volatile unsigned short v)
-{
-#ifndef H8300_IO_NOSWAP
-       unsigned short r;
-       __asm__("xor.b %w0,%x0\n\t"
-               "xor.b %x0,%w0\n\t"
-               "xor.b %w0,%x0"
-               :"=r"(r)
-               :"0"(v));
-       return r;
-#else
-       return v;
-#endif
-}
-
-static inline unsigned long _swapl(volatile unsigned long v)
-{
-#ifndef H8300_IO_NOSWAP
-       unsigned long r;
-       __asm__("xor.b %w0,%x0\n\t"
-               "xor.b %x0,%w0\n\t"
-               "xor.b %w0,%x0\n\t"
-               "xor.w %e0,%f0\n\t"
-               "xor.w %f0,%e0\n\t"
-               "xor.w %e0,%f0\n\t"
-               "xor.b %w0,%x0\n\t"
-               "xor.b %x0,%w0\n\t"
-               "xor.b %w0,%x0"
-               :"=r"(r)
-               :"0"(v));
-       return r;
-#else
-       return v;
-#endif
-}
-
-#define readb(addr) \
-    ({ unsigned char __v = \
-     *(volatile unsigned char *)((unsigned long)(addr) & 0x00ffffff); \
-     __v; })
-#define readw(addr) \
-    ({ unsigned short __v = \
-     *(volatile unsigned short *)((unsigned long)(addr) & 0x00ffffff); \
-     __v; })
-#define readl(addr) \
-    ({ unsigned long __v = \
-     *(volatile unsigned long *)((unsigned long)(addr) & 0x00ffffff); \
-     __v; })
-
-#define writeb(b,addr) (void)((*(volatile unsigned char *) \
-                             ((unsigned long)(addr) & 0x00ffffff)) = (b))
-#define writew(b,addr) (void)((*(volatile unsigned short *) \
-                             ((unsigned long)(addr) & 0x00ffffff)) = (b))
-#define writel(b,addr) (void)((*(volatile unsigned long *) \
-                             ((unsigned long)(addr) & 0x00ffffff)) = (b))
-#define readb_relaxed(addr) readb(addr)
-#define readw_relaxed(addr) readw(addr)
-#define readl_relaxed(addr) readl(addr)
-
-#define __raw_readb readb
-#define __raw_readw readw
-#define __raw_readl readl
-#define __raw_writeb writeb
-#define __raw_writew writew
-#define __raw_writel writel
-
-static inline int h8300_buswidth(unsigned int addr)
-{
-       return (*(volatile unsigned char *)ABWCR & (1 << ((addr >> 21) & 7))) == 0;
-}
-
-static inline void io_outsb(unsigned int addr, const void *buf, int len)
-{
-       volatile unsigned char  *ap_b = (volatile unsigned char *) addr;
-       volatile unsigned short *ap_w = (volatile unsigned short *) addr;
-       unsigned char *bp = (unsigned char *) buf;
-
-       if(h8300_buswidth(addr) && (addr & 1)) {
-               while (len--)
-                       *ap_w = *bp++;
-       } else {
-               while (len--)
-                       *ap_b = *bp++;
-       }
-}
-
-static inline void io_outsw(unsigned int addr, const void *buf, int len)
-{
-       volatile unsigned short *ap = (volatile unsigned short *) addr;
-       unsigned short *bp = (unsigned short *) buf;
-       while (len--)
-               *ap = _swapw(*bp++);
-}
-
-static inline void io_outsl(unsigned int addr, const void *buf, int len)
-{
-       volatile unsigned long *ap = (volatile unsigned long *) addr;
-       unsigned long *bp = (unsigned long *) buf;
-       while (len--)
-               *ap = _swapl(*bp++);
-}
-
-static inline void io_outsw_noswap(unsigned int addr, const void *buf, int len)
-{
-       volatile unsigned short *ap = (volatile unsigned short *) addr;
-       unsigned short *bp = (unsigned short *) buf;
-       while (len--)
-               *ap = *bp++;
-}
-
-static inline void io_outsl_noswap(unsigned int addr, const void *buf, int len)
-{
-       volatile unsigned long *ap = (volatile unsigned long *) addr;
-       unsigned long *bp = (unsigned long *) buf;
-       while (len--)
-               *ap = *bp++;
-}
-
-static inline void io_insb(unsigned int addr, void *buf, int len)
-{
-       volatile unsigned char  *ap_b;
-       volatile unsigned short *ap_w;
-       unsigned char *bp = (unsigned char *) buf;
-
-       if(h8300_buswidth(addr)) {
-               ap_w = (volatile unsigned short *)(addr & ~1);
-               while (len--)
-                       *bp++ = *ap_w & 0xff;
-       } else {
-               ap_b = (volatile unsigned char *)addr;
-               while (len--)
-                       *bp++ = *ap_b;
-       }
-}
-
-static inline void io_insw(unsigned int addr, void *buf, int len)
-{
-       volatile unsigned short *ap = (volatile unsigned short *) addr;
-       unsigned short *bp = (unsigned short *) buf;
-       while (len--)
-               *bp++ = _swapw(*ap);
-}
-
-static inline void io_insl(unsigned int addr, void *buf, int len)
-{
-       volatile unsigned long *ap = (volatile unsigned long *) addr;
-       unsigned long *bp = (unsigned long *) buf;
-       while (len--)
-               *bp++ = _swapl(*ap);
-}
-
-static inline void io_insw_noswap(unsigned int addr, void *buf, int len)
-{
-       volatile unsigned short *ap = (volatile unsigned short *) addr;
-       unsigned short *bp = (unsigned short *) buf;
-       while (len--)
-               *bp++ = *ap;
-}
-
-static inline void io_insl_noswap(unsigned int addr, void *buf, int len)
-{
-       volatile unsigned long *ap = (volatile unsigned long *) addr;
-       unsigned long *bp = (unsigned long *) buf;
-       while (len--)
-               *bp++ = *ap;
-}
-
-/*
- *     make the short names macros so specific devices
- *     can override them as required
- */
-
-#define memset_io(a,b,c)       memset((void *)(a),(b),(c))
-#define memcpy_fromio(a,b,c)   memcpy((a),(void *)(b),(c))
-#define memcpy_toio(a,b,c)     memcpy((void *)(a),(b),(c))
-
-#define mmiowb()
-
-#define inb(addr)    ((h8300_buswidth(addr))?readw((addr) & ~1) & 0xff:readb(addr))
-#define inw(addr)    _swapw(readw(addr))
-#define inl(addr)    _swapl(readl(addr))
-#define outb(x,addr) ((void)((h8300_buswidth(addr) && \
-                      ((addr) & 1))?writew(x,(addr) & ~1):writeb(x,addr)))
-#define outw(x,addr) ((void) writew(_swapw(x),addr))
-#define outl(x,addr) ((void) writel(_swapl(x),addr))
-
-#define inb_p(addr)    inb(addr)
-#define inw_p(addr)    inw(addr)
-#define inl_p(addr)    inl(addr)
-#define outb_p(x,addr) outb(x,addr)
-#define outw_p(x,addr) outw(x,addr)
-#define outl_p(x,addr) outl(x,addr)
-
-#define outsb(a,b,l) io_outsb(a,b,l)
-#define outsw(a,b,l) io_outsw(a,b,l)
-#define outsl(a,b,l) io_outsl(a,b,l)
-
-#define insb(a,b,l) io_insb(a,b,l)
-#define insw(a,b,l) io_insw(a,b,l)
-#define insl(a,b,l) io_insl(a,b,l)
-
-#define IO_SPACE_LIMIT 0xffffff
-
-
-/* Values for nocacheflag and cmode */
-#define IOMAP_FULL_CACHING             0
-#define IOMAP_NOCACHE_SER              1
-#define IOMAP_NOCACHE_NONSER           2
-#define IOMAP_WRITETHROUGH             3
-
-extern void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag);
-extern void __iounmap(void *addr, unsigned long size);
-
-static inline void *ioremap(unsigned long physaddr, unsigned long size)
-{
-       return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
-}
-static inline void *ioremap_nocache(unsigned long physaddr, unsigned long size)
-{
-       return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
-}
-static inline void *ioremap_writethrough(unsigned long physaddr, unsigned long size)
-{
-       return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
-}
-static inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size)
-{
-       return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
-}
-
-extern void iounmap(void *addr);
-
-/* H8/300 internal I/O functions */
-static __inline__ unsigned char ctrl_inb(unsigned long addr)
-{
-       return *(volatile unsigned char*)addr;
-}
-
-static __inline__ unsigned short ctrl_inw(unsigned long addr)
-{
-       return *(volatile unsigned short*)addr;
-}
-
-static __inline__ unsigned long ctrl_inl(unsigned long addr)
-{
-       return *(volatile unsigned long*)addr;
-}
-
-static __inline__ void ctrl_outb(unsigned char b, unsigned long addr)
-{
-       *(volatile unsigned char*)addr = b;
-}
-
-static __inline__ void ctrl_outw(unsigned short b, unsigned long addr)
-{
-       *(volatile unsigned short*)addr = b;
-}
-
-static __inline__ void ctrl_outl(unsigned long b, unsigned long addr)
-{
-        *(volatile unsigned long*)addr = b;
-}
-
-static __inline__ void ctrl_bclr(int b, unsigned long addr)
-{
-       if (__builtin_constant_p(b))
-               switch (b) {
-               case 0: __asm__("bclr #0,@%0"::"r"(addr)); break;
-               case 1: __asm__("bclr #1,@%0"::"r"(addr)); break;
-               case 2: __asm__("bclr #2,@%0"::"r"(addr)); break;
-               case 3: __asm__("bclr #3,@%0"::"r"(addr)); break;
-               case 4: __asm__("bclr #4,@%0"::"r"(addr)); break;
-               case 5: __asm__("bclr #5,@%0"::"r"(addr)); break;
-               case 6: __asm__("bclr #6,@%0"::"r"(addr)); break;
-               case 7: __asm__("bclr #7,@%0"::"r"(addr)); break;
-               }
-       else
-               __asm__("bclr %w0,@%1"::"r"(b), "r"(addr));
-}
-
-static __inline__ void ctrl_bset(int b, unsigned long addr)
-{
-       if (__builtin_constant_p(b))
-               switch (b) {
-               case 0: __asm__("bset #0,@%0"::"r"(addr)); break;
-               case 1: __asm__("bset #1,@%0"::"r"(addr)); break;
-               case 2: __asm__("bset #2,@%0"::"r"(addr)); break;
-               case 3: __asm__("bset #3,@%0"::"r"(addr)); break;
-               case 4: __asm__("bset #4,@%0"::"r"(addr)); break;
-               case 5: __asm__("bset #5,@%0"::"r"(addr)); break;
-               case 6: __asm__("bset #6,@%0"::"r"(addr)); break;
-               case 7: __asm__("bset #7,@%0"::"r"(addr)); break;
-               }
-       else
-               __asm__("bset %w0,@%1"::"r"(b), "r"(addr));
-}
-
-/* Pages to physical address... */
-#define page_to_phys(page)      ((page - mem_map) << PAGE_SHIFT)
-#define page_to_bus(page)       ((page - mem_map) << PAGE_SHIFT)
-
-/*
- * Macros used for converting between virtual and physical mappings.
- */
-#define phys_to_virt(vaddr)    ((void *) (vaddr))
-#define virt_to_phys(vaddr)    ((unsigned long) (vaddr))
-
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p)   __va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p)  p
-
-#endif /* __KERNEL__ */
-
-#endif /* _H8300_IO_H */
diff --git a/arch/h8300/include/asm/irq.h b/arch/h8300/include/asm/irq.h
deleted file mode 100644 (file)
index 13d7c60..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef _H8300_IRQ_H_
-#define _H8300_IRQ_H_
-
-#include <asm/ptrace.h>
-
-#if defined(CONFIG_CPU_H8300H)
-#define NR_IRQS 64
-#define EXT_IRQ0 12
-#define EXT_IRQ1 13
-#define EXT_IRQ2 14
-#define EXT_IRQ3 15
-#define EXT_IRQ4 16
-#define EXT_IRQ5 17
-#define EXT_IRQ6 18
-#define EXT_IRQ7 19
-#define EXT_IRQS 5
-#define IER_REGS *(volatile unsigned char *)IER
-#endif
-#if defined(CONFIG_CPU_H8S)
-#define NR_IRQS 128
-#define EXT_IRQ0 16
-#define EXT_IRQ1 17
-#define EXT_IRQ2 18
-#define EXT_IRQ3 19
-#define EXT_IRQ4 20
-#define EXT_IRQ5 21
-#define EXT_IRQ6 22
-#define EXT_IRQ7 23
-#define EXT_IRQ8 24
-#define EXT_IRQ9 25
-#define EXT_IRQ10 26
-#define EXT_IRQ11 27
-#define EXT_IRQ12 28
-#define EXT_IRQ13 29
-#define EXT_IRQ14 30
-#define EXT_IRQ15 31
-#define EXT_IRQS 15
-
-#define IER_REGS *(volatile unsigned short *)IER
-#endif
-
-static __inline__ int irq_canonicalize(int irq)
-{
-       return irq;
-}
-
-typedef void (*h8300_vector)(void);
-
-#endif /* _H8300_IRQ_H_ */
diff --git a/arch/h8300/include/asm/irq_regs.h b/arch/h8300/include/asm/irq_regs.h
deleted file mode 100644 (file)
index 3dd9c0b..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
diff --git a/arch/h8300/include/asm/irqflags.h b/arch/h8300/include/asm/irqflags.h
deleted file mode 100644 (file)
index 9617cd5..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-#ifndef _H8300_IRQFLAGS_H
-#define _H8300_IRQFLAGS_H
-
-static inline unsigned long arch_local_save_flags(void)
-{
-       unsigned long flags;
-       asm volatile ("stc ccr,%w0" : "=r" (flags));
-       return flags;
-}
-
-static inline void arch_local_irq_disable(void)
-{
-       asm volatile ("orc  #0x80,ccr" : : : "memory");
-}
-
-static inline void arch_local_irq_enable(void)
-{
-       asm volatile ("andc #0x7f,ccr" : : : "memory");
-}
-
-static inline unsigned long arch_local_irq_save(void)
-{
-       unsigned long flags = arch_local_save_flags();
-       arch_local_irq_disable();
-       return flags;
-}
-
-static inline void arch_local_irq_restore(unsigned long flags)
-{
-       asm volatile ("ldc %w0,ccr" : : "r" (flags) : "memory");
-}
-
-static inline bool arch_irqs_disabled_flags(unsigned long flags)
-{
-       return (flags & 0x80) == 0x80;
-}
-
-static inline bool arch_irqs_disabled(void)
-{
-       return arch_irqs_disabled_flags(arch_local_save_flags());
-}
-
-#endif /* _H8300_IRQFLAGS_H */
diff --git a/arch/h8300/include/asm/kdebug.h b/arch/h8300/include/asm/kdebug.h
deleted file mode 100644 (file)
index 6ece1b0..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kdebug.h>
diff --git a/arch/h8300/include/asm/kmap_types.h b/arch/h8300/include/asm/kmap_types.h
deleted file mode 100644 (file)
index be12a71..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_H8300_KMAP_TYPES_H
-#define _ASM_H8300_KMAP_TYPES_H
-
-#include <asm-generic/kmap_types.h>
-
-#endif
diff --git a/arch/h8300/include/asm/local.h b/arch/h8300/include/asm/local.h
deleted file mode 100644 (file)
index fdd4efe..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_LOCAL_H_
-#define _H8300_LOCAL_H_
-
-#include <asm-generic/local.h>
-
-#endif
diff --git a/arch/h8300/include/asm/local64.h b/arch/h8300/include/asm/local64.h
deleted file mode 100644 (file)
index 36c93b5..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/h8300/include/asm/mc146818rtc.h b/arch/h8300/include/asm/mc146818rtc.h
deleted file mode 100644 (file)
index ab9d964..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Machine dependent access functions for RTC registers.
- */
-#ifndef _H8300_MC146818RTC_H
-#define _H8300_MC146818RTC_H
-
-/* empty include file to satisfy the include in genrtc.c/ide-geometry.c */
-
-#endif /* _H8300_MC146818RTC_H */
diff --git a/arch/h8300/include/asm/mmu_context.h b/arch/h8300/include/asm/mmu_context.h
deleted file mode 100644 (file)
index f44b730..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-#ifndef __H8300_MMU_CONTEXT_H
-#define __H8300_MMU_CONTEXT_H
-
-#include <asm/setup.h>
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm-generic/mm_hooks.h>
-
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
-static inline int
-init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-{
-       // mm->context = virt_to_phys(mm->pgd);
-       return(0);
-}
-
-#define destroy_context(mm)            do { } while(0)
-#define deactivate_mm(tsk,mm)           do { } while(0)
-
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
-{
-}
-
-static inline void activate_mm(struct mm_struct *prev_mm,
-                              struct mm_struct *next_mm)
-{
-}
-
-#endif
diff --git a/arch/h8300/include/asm/mutex.h b/arch/h8300/include/asm/mutex.h
deleted file mode 100644 (file)
index 458c1f7..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/h8300/include/asm/page.h b/arch/h8300/include/asm/page.h
deleted file mode 100644 (file)
index 837381a..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-#ifndef _H8300_PAGE_H
-#define _H8300_PAGE_H
-
-/* PAGE_SHIFT determines the page size */
-
-#define PAGE_SHIFT     (12)
-#define PAGE_SIZE      (1UL << PAGE_SHIFT)
-#define PAGE_MASK      (~(PAGE_SIZE-1))
-
-#include <asm/setup.h>
-
-#ifndef __ASSEMBLY__
-#define get_user_page(vaddr)           __get_free_page(GFP_KERNEL)
-#define free_user_page(page, addr)     free_page(addr)
-
-#define clear_page(page)       memset((page), 0, PAGE_SIZE)
-#define copy_page(to,from)     memcpy((to), (from), PAGE_SIZE)
-
-#define clear_user_page(page, vaddr, pg)       clear_page(page)
-#define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
-
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
-       alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
-
-/*
- * These are used to make use of C type-checking..
- */
-typedef struct { unsigned long pte; } pte_t;
-typedef struct { unsigned long pmd[16]; } pmd_t;
-typedef struct { unsigned long pgd; } pgd_t;
-typedef struct { unsigned long pgprot; } pgprot_t;
-typedef struct page *pgtable_t;
-
-#define pte_val(x)     ((x).pte)
-#define pmd_val(x)     ((&x)->pmd[0])
-#define pgd_val(x)     ((x).pgd)
-#define pgprot_val(x)  ((x).pgprot)
-
-#define __pte(x)       ((pte_t) { (x) } )
-#define __pmd(x)       ((pmd_t) { (x) } )
-#define __pgd(x)       ((pgd_t) { (x) } )
-#define __pgprot(x)    ((pgprot_t) { (x) } )
-
-extern unsigned long memory_start;
-extern unsigned long memory_end;
-
-#endif /* !__ASSEMBLY__ */
-
-#include <asm/page_offset.h>
-
-#define PAGE_OFFSET            (PAGE_OFFSET_RAW)
-
-#ifndef __ASSEMBLY__
-
-#define __pa(vaddr)            virt_to_phys(vaddr)
-#define __va(paddr)            phys_to_virt((unsigned long)paddr)
-
-#define virt_to_pfn(kaddr)     (__pa(kaddr) >> PAGE_SHIFT)
-#define pfn_to_virt(pfn)       __va((pfn) << PAGE_SHIFT)
-
-#define MAP_NR(addr)           (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)
-#define virt_to_page(addr)     (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
-#define page_to_virt(page)     ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
-#define pfn_valid(page)                (page < max_mapnr)
-
-#define ARCH_PFN_OFFSET                (PAGE_OFFSET >> PAGE_SHIFT)
-
-#define        virt_addr_valid(kaddr)  (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
-                               ((void *)(kaddr) < (void *)memory_end))
-
-#endif /* __ASSEMBLY__ */
-
-#include <asm-generic/memory_model.h>
-#include <asm-generic/getorder.h>
-
-#endif /* _H8300_PAGE_H */
diff --git a/arch/h8300/include/asm/page_offset.h b/arch/h8300/include/asm/page_offset.h
deleted file mode 100644 (file)
index f870646..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-
-#define PAGE_OFFSET_RAW                0x00000000
-
diff --git a/arch/h8300/include/asm/param.h b/arch/h8300/include/asm/param.h
deleted file mode 100644 (file)
index c3909e7..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _H8300_PARAM_H
-#define _H8300_PARAM_H
-
-#include <uapi/asm/param.h>
-
-#define HZ             CONFIG_HZ
-#define        USER_HZ         HZ
-#define        CLOCKS_PER_SEC  (USER_HZ)
-#endif /* _H8300_PARAM_H */
diff --git a/arch/h8300/include/asm/pci.h b/arch/h8300/include/asm/pci.h
deleted file mode 100644 (file)
index 0b2acaa..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef _ASM_H8300_PCI_H
-#define _ASM_H8300_PCI_H
-
-/*
- * asm-h8300/pci.h - H8/300 specific PCI declarations.
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- */
-
-#define pcibios_assign_all_busses()    0
-
-static inline void pcibios_penalize_isa_irq(int irq, int active)
-{
-       /* We don't do dynamic PCI IRQ allocation */
-}
-
-#define PCI_DMA_BUS_IS_PHYS    (1)
-
-#endif /* _ASM_H8300_PCI_H */
diff --git a/arch/h8300/include/asm/percpu.h b/arch/h8300/include/asm/percpu.h
deleted file mode 100644 (file)
index 72c03e3..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ARCH_H8300_PERCPU__
-#define __ARCH_H8300_PERCPU__
-
-#include <asm-generic/percpu.h>
-
-#endif /* __ARCH_H8300_PERCPU__ */
diff --git a/arch/h8300/include/asm/pgalloc.h b/arch/h8300/include/asm/pgalloc.h
deleted file mode 100644 (file)
index c2e89a2..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _H8300_PGALLOC_H
-#define _H8300_PGALLOC_H
-
-#include <asm/setup.h>
-
-#define check_pgt_cache()      do { } while (0)
-
-#endif /* _H8300_PGALLOC_H */
diff --git a/arch/h8300/include/asm/pgtable.h b/arch/h8300/include/asm/pgtable.h
deleted file mode 100644 (file)
index 7ca20f8..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-#ifndef _H8300_PGTABLE_H
-#define _H8300_PGTABLE_H
-
-#include <asm-generic/4level-fixup.h>
-
-#include <linux/slab.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/io.h>
-
-#define pgd_present(pgd)     (1)       /* pages are always present on NO_MM */
-#define pgd_none(pgd)          (0)
-#define pgd_bad(pgd)           (0)
-#define pgd_clear(pgdp)
-#define kern_addr_valid(addr)  (1)
-#define        pmd_offset(a, b)        ((void *)0)
-#define pmd_none(pmd)           (1)
-#define pgd_offset_k(adrdress)  ((pgd_t *)0)
-#define pte_offset_kernel(dir, address) ((pte_t *)0)
-
-#define PAGE_NONE              __pgprot(0)    /* these mean nothing to NO_MM */
-#define PAGE_SHARED            __pgprot(0)    /* these mean nothing to NO_MM */
-#define PAGE_COPY              __pgprot(0)    /* these mean nothing to NO_MM */
-#define PAGE_READONLY  __pgprot(0)    /* these mean nothing to NO_MM */
-#define PAGE_KERNEL            __pgprot(0)    /* these mean nothing to NO_MM */
-
-extern void paging_init(void);
-#define swapper_pg_dir ((pgd_t *) 0)
-
-#define __swp_type(x)          (0)
-#define __swp_offset(x)                (0)
-#define __swp_entry(typ,off)   ((swp_entry_t) { ((typ) | ((off) << 7)) })
-#define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
-
-static inline int pte_file(pte_t pte) { return 0; }
-
-/*
- * ZERO_PAGE is a global shared page that is always zero: used
- * for zero-mapped memory areas etc..
- */
-#define ZERO_PAGE(vaddr)       (virt_to_page(0))
-
-/*
- * These would be in other places but having them here reduces the diffs.
- */
-extern unsigned int kobjsize(const void *objp);
-extern int is_in_rom(unsigned long);
-
-/*
- * No page table caches to initialise
- */
-#define pgtable_cache_init()   do { } while (0)
-
-/*
- * All 32bit addresses are effectively valid for vmalloc...
- * Sort of meaningless for non-VM targets.
- */
-#define        VMALLOC_START   0
-#define        VMALLOC_END     0xffffffff
-
-/*
- * All 32bit addresses are effectively valid for vmalloc...
- * Sort of meaningless for non-VM targets.
- */
-#define        VMALLOC_START   0
-#define        VMALLOC_END     0xffffffff
-
-#define arch_enter_lazy_cpu_mode()    do {} while (0)
-
-#include <asm-generic/pgtable.h>
-
-#endif /* _H8300_PGTABLE_H */
diff --git a/arch/h8300/include/asm/processor.h b/arch/h8300/include/asm/processor.h
deleted file mode 100644 (file)
index 4b0ca49..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * include/asm-h8300/processor.h
- *
- * Copyright (C) 2002 Yoshinori Sato
- *
- * Based on: linux/asm-m68nommu/processor.h
- *
- * Copyright (C) 1995 Hamish Macdonald
- */
-
-#ifndef __ASM_H8300_PROCESSOR_H
-#define __ASM_H8300_PROCESSOR_H
-
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr() ({ __label__ _l; _l: &&_l;})
-
-#include <linux/compiler.h>
-#include <asm/segment.h>
-#include <asm/fpu.h>
-#include <asm/ptrace.h>
-#include <asm/current.h>
-
-static inline unsigned long rdusp(void) {
-       extern unsigned int     sw_usp;
-       return(sw_usp);
-}
-
-static inline void wrusp(unsigned long usp) {
-       extern unsigned int     sw_usp;
-       sw_usp = usp;
-}
-
-/*
- * User space process size: 3.75GB. This is hardcoded into a few places,
- * so don't change it unless you know what you are doing.
- */
-#define TASK_SIZE      (0xFFFFFFFFUL)
-
-#ifdef __KERNEL__
-#define STACK_TOP      TASK_SIZE
-#define STACK_TOP_MAX  STACK_TOP
-#endif
-
-/*
- * This decides where the kernel will search for a free chunk of vm
- * space during mmap's. We won't be using it
- */
-#define TASK_UNMAPPED_BASE     0
-
-struct thread_struct {
-       unsigned long  ksp;             /* kernel stack pointer */
-       unsigned long  usp;             /* user stack pointer */
-       unsigned long  ccr;             /* saved status register */
-       unsigned long  esp0;            /* points to SR of stack frame */
-       struct {
-               unsigned short *addr;
-               unsigned short inst;
-       } breakinfo;
-};
-
-#define INIT_THREAD  {                                         \
-       .ksp  = sizeof(init_stack) + (unsigned long)init_stack, \
-       .usp  = 0,                                              \
-       .ccr  = PS_S,                                           \
-       .esp0 = 0,                                              \
-       .breakinfo = {                                          \
-               .addr = (unsigned short *)-1,                   \
-               .inst = 0                                       \
-       }                                                       \
-}
-
-/*
- * Do necessary setup to start up a newly executed thread.
- *
- * pass the data segment into user programs if it exists,
- * it can't hurt anything as far as I can tell
- */
-#if defined(__H8300H__)
-#define start_thread(_regs, _pc, _usp)                         \
-do {                                                           \
-       (_regs)->pc = (_pc);                                    \
-       (_regs)->ccr = 0x00;       /* clear all flags */        \
-       (_regs)->er5 = current->mm->start_data; /* GOT base */  \
-       wrusp((unsigned long)(_usp) - sizeof(unsigned long)*3); \
-} while(0)
-#endif
-#if defined(__H8300S__)
-#define start_thread(_regs, _pc, _usp)                         \
-do {                                                           \
-       (_regs)->pc = (_pc);                                    \
-       (_regs)->ccr = 0x00;       /* clear kernel flag */      \
-       (_regs)->exr = 0x78;       /* enable all interrupts */  \
-       (_regs)->er5 = current->mm->start_data; /* GOT base */  \
-       /* 14 = space for retaddr(4), vector(4), er0(4) and ext(2) on stack */ \
-       wrusp(((unsigned long)(_usp)) - 14);                    \
-} while(0)
-#endif
-
-/* Forward declaration, a strange C thing */
-struct task_struct;
-
-/* Free all resources held by a thread. */
-static inline void release_thread(struct task_struct *dead_task)
-{
-}
-
-/*
- * Free current thread data structures etc..
- */
-static inline void exit_thread(void)
-{
-}
-
-/*
- * Return saved PC of a blocked thread.
- */
-unsigned long thread_saved_pc(struct task_struct *tsk);
-unsigned long get_wchan(struct task_struct *p);
-
-#define        KSTK_EIP(tsk)   \
-    ({                 \
-       unsigned long eip = 0;   \
-       if ((tsk)->thread.esp0 > PAGE_SIZE && \
-           MAP_NR((tsk)->thread.esp0) < max_mapnr) \
-             eip = ((struct pt_regs *) (tsk)->thread.esp0)->pc; \
-       eip; })
-#define        KSTK_ESP(tsk)   ((tsk) == current ? rdusp() : (tsk)->thread.usp)
-
-#define cpu_relax()    barrier()
-
-#define HARD_RESET_NOW() ({            \
-        local_irq_disable();           \
-        asm("jmp @@0");                        \
-})
-
-#endif
diff --git a/arch/h8300/include/asm/ptrace.h b/arch/h8300/include/asm/ptrace.h
deleted file mode 100644 (file)
index c1826b9..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef _H8300_PTRACE_H
-#define _H8300_PTRACE_H
-
-#include <uapi/asm/ptrace.h>
-
-#ifndef __ASSEMBLY__
-#if defined(CONFIG_CPU_H8S)
-#endif
-#ifndef PS_S
-#define PS_S  (0x10)
-#endif
-
-#if defined(__H8300H__)
-#define H8300_REGS_NO 11
-#endif
-#if defined(__H8300S__)
-#define H8300_REGS_NO 12
-#endif
-
-/* Find the stack offset for a register, relative to thread.esp0. */
-#define PT_REG(reg)    ((long)&((struct pt_regs *)0)->reg)
-
-#define arch_has_single_step() (1)
-
-#define user_mode(regs) (!((regs)->ccr & PS_S))
-#define instruction_pointer(regs) ((regs)->pc)
-#define profile_pc(regs) instruction_pointer(regs)
-#define current_pt_regs() ((struct pt_regs *) \
-       (THREAD_SIZE + (unsigned long)current_thread_info()) - 1)
-#define signal_pt_regs() ((struct pt_regs *)current->thread.esp0)
-#define current_user_stack_pointer() rdusp()
-#endif /* __ASSEMBLY__ */
-#endif /* _H8300_PTRACE_H */
diff --git a/arch/h8300/include/asm/regs267x.h b/arch/h8300/include/asm/regs267x.h
deleted file mode 100644 (file)
index 1bff731..0000000
+++ /dev/null
@@ -1,336 +0,0 @@
-/* internal Peripherals Register address define */
-/* CPU: H8/306x                                 */
-
-#if !defined(__REGS_H8S267x__)
-#define __REGS_H8S267x__ 
-
-#if defined(__KERNEL__)
-
-#define DASTCR 0xFEE01A
-#define DADR0  0xFFFFA4
-#define DADR1  0xFFFFA5
-#define DACR01 0xFFFFA6
-#define DADR2  0xFFFFA8
-#define DADR3  0xFFFFA9
-#define DACR23 0xFFFFAA
-
-#define ADDRA  0xFFFF90
-#define ADDRAH 0xFFFF90
-#define ADDRAL 0xFFFF91
-#define ADDRB  0xFFFF92
-#define ADDRBH 0xFFFF92
-#define ADDRBL 0xFFFF93
-#define ADDRC  0xFFFF94
-#define ADDRCH 0xFFFF94
-#define ADDRCL 0xFFFF95
-#define ADDRD  0xFFFF96
-#define ADDRDH 0xFFFF96
-#define ADDRDL 0xFFFF97
-#define ADDRE  0xFFFF98
-#define ADDREH 0xFFFF98
-#define ADDREL 0xFFFF99
-#define ADDRF  0xFFFF9A
-#define ADDRFH 0xFFFF9A
-#define ADDRFL 0xFFFF9B
-#define ADDRG  0xFFFF9C
-#define ADDRGH 0xFFFF9C
-#define ADDRGL 0xFFFF9D
-#define ADDRH  0xFFFF9E
-#define ADDRHH 0xFFFF9E
-#define ADDRHL 0xFFFF9F
-
-#define ADCSR  0xFFFFA0
-#define ADCR   0xFFFFA1
-
-#define ABWCR  0xFFFEC0
-#define ASTCR  0xFFFEC1
-#define WTCRAH 0xFFFEC2
-#define WTCRAL 0xFFFEC3
-#define WTCRBH 0xFFFEC4
-#define WTCRBL 0xFFFEC5
-#define RDNCR  0xFFFEC6
-#define CSACRH 0xFFFEC8
-#define CSACRL 0xFFFEC9
-#define BROMCRH 0xFFFECA
-#define BROMCRL 0xFFFECB
-#define BCR    0xFFFECC
-#define DRAMCR 0xFFFED0
-#define DRACCR 0xFFFED2
-#define REFCR  0xFFFED4
-#define RTCNT  0xFFFED6
-#define RTCOR  0xFFFED7
-
-#define MAR0AH  0xFFFEE0
-#define MAR0AL  0xFFFEE2
-#define IOAR0A  0xFFFEE4
-#define ETCR0A  0xFFFEE6
-#define MAR0BH  0xFFFEE8
-#define MAR0BL  0xFFFEEA
-#define IOAR0B  0xFFFEEC
-#define ETCR0B  0xFFFEEE
-#define MAR1AH  0xFFFEF0
-#define MAR1AL  0xFFFEF2
-#define IOAR1A  0xFFFEF4
-#define ETCR1A  0xFFFEF6
-#define MAR1BH  0xFFFEF8
-#define MAR1BL  0xFFFEFA
-#define IOAR1B  0xFFFEFC
-#define ETCR1B  0xFFFEFE
-#define DMAWER  0xFFFF20
-#define DMATCR  0xFFFF21
-#define DMACR0A 0xFFFF22
-#define DMACR0B 0xFFFF23
-#define DMACR1A 0xFFFF24
-#define DMACR1B 0xFFFF25
-#define DMABCRH 0xFFFF26
-#define DMABCRL 0xFFFF27
-
-#define EDSAR0  0xFFFDC0
-#define EDDAR0  0xFFFDC4
-#define EDTCR0  0xFFFDC8
-#define EDMDR0  0xFFFDCC
-#define EDMDR0H 0xFFFDCC
-#define EDMDR0L 0xFFFDCD
-#define EDACR0  0xFFFDCE
-#define EDSAR1  0xFFFDD0
-#define EDDAR1  0xFFFDD4
-#define EDTCR1  0xFFFDD8
-#define EDMDR1  0xFFFDDC
-#define EDMDR1H 0xFFFDDC
-#define EDMDR1L 0xFFFDDD
-#define EDACR1  0xFFFDDE
-#define EDSAR2  0xFFFDE0
-#define EDDAR2  0xFFFDE4
-#define EDTCR2  0xFFFDE8
-#define EDMDR2  0xFFFDEC
-#define EDMDR2H 0xFFFDEC
-#define EDMDR2L 0xFFFDED
-#define EDACR2  0xFFFDEE
-#define EDSAR3  0xFFFDF0
-#define EDDAR3  0xFFFDF4
-#define EDTCR3  0xFFFDF8
-#define EDMDR3  0xFFFDFC
-#define EDMDR3H 0xFFFDFC
-#define EDMDR3L 0xFFFDFD
-#define EDACR3  0xFFFDFE
-
-#define IPRA  0xFFFE00
-#define IPRB  0xFFFE02
-#define IPRC  0xFFFE04
-#define IPRD  0xFFFE06
-#define IPRE  0xFFFE08
-#define IPRF  0xFFFE0A
-#define IPRG  0xFFFE0C
-#define IPRH  0xFFFE0E
-#define IPRI  0xFFFE10
-#define IPRJ  0xFFFE12
-#define IPRK  0xFFFE14
-#define ITSR  0xFFFE16
-#define SSIER 0xFFFE18
-#define ISCRH 0xFFFE1A
-#define ISCRL 0xFFFE1C
-
-#define INTCR 0xFFFF31
-#define IER   0xFFFF32
-#define IERH  0xFFFF32
-#define IERL  0xFFFF33
-#define ISR   0xFFFF34
-#define ISRH  0xFFFF34
-#define ISRL  0xFFFF35
-
-#define P1DDR 0xFFFE20
-#define P2DDR 0xFFFE21
-#define P3DDR 0xFFFE22
-#define P4DDR 0xFFFE23
-#define P5DDR 0xFFFE24
-#define P6DDR 0xFFFE25
-#define P7DDR 0xFFFE26
-#define P8DDR 0xFFFE27
-#define P9DDR 0xFFFE28
-#define PADDR 0xFFFE29
-#define PBDDR 0xFFFE2A
-#define PCDDR 0xFFFE2B
-#define PDDDR 0xFFFE2C
-#define PEDDR 0xFFFE2D
-#define PFDDR 0xFFFE2E
-#define PGDDR 0xFFFE2F
-#define PHDDR 0xFFFF74
-
-#define PFCR0 0xFFFE32
-#define PFCR1 0xFFFE33
-#define PFCR2 0xFFFE34
-
-#define PAPCR 0xFFFE36
-#define PBPCR 0xFFFE37
-#define PCPCR 0xFFFE38
-#define PDPCR 0xFFFE39
-#define PEPCR 0xFFFE3A
-
-#define P3ODR 0xFFFE3C
-#define PAODR 0xFFFE3D
-
-#define P1DR  0xFFFF60
-#define P2DR  0xFFFF61
-#define P3DR  0xFFFF62
-#define P4DR  0xFFFF63
-#define P5DR  0xFFFF64
-#define P6DR  0xFFFF65
-#define P7DR  0xFFFF66
-#define P8DR  0xFFFF67
-#define P9DR  0xFFFF68
-#define PADR  0xFFFF69
-#define PBDR  0xFFFF6A
-#define PCDR  0xFFFF6B
-#define PDDR  0xFFFF6C
-#define PEDR  0xFFFF6D
-#define PFDR  0xFFFF6E
-#define PGDR  0xFFFF6F
-#define PHDR  0xFFFF72
-
-#define PORT1 0xFFFF50
-#define PORT2 0xFFFF51
-#define PORT3 0xFFFF52
-#define PORT4 0xFFFF53
-#define PORT5 0xFFFF54
-#define PORT6 0xFFFF55
-#define PORT7 0xFFFF56
-#define PORT8 0xFFFF57
-#define PORT9 0xFFFF58
-#define PORTA 0xFFFF59
-#define PORTB 0xFFFF5A
-#define PORTC 0xFFFF5B
-#define PORTD 0xFFFF5C
-#define PORTE 0xFFFF5D
-#define PORTF 0xFFFF5E
-#define PORTG 0xFFFF5F
-#define PORTH 0xFFFF70
-
-#define PCR   0xFFFF46
-#define PMR   0xFFFF47
-#define NDERH 0xFFFF48
-#define NDERL 0xFFFF49
-#define PODRH 0xFFFF4A
-#define PODRL 0xFFFF4B
-#define NDRH1 0xFFFF4C
-#define NDRL1 0xFFFF4D
-#define NDRH2 0xFFFF4E
-#define NDRL2 0xFFFF4F
-
-#define SMR0  0xFFFF78
-#define BRR0  0xFFFF79
-#define SCR0  0xFFFF7A
-#define TDR0  0xFFFF7B
-#define SSR0  0xFFFF7C
-#define RDR0  0xFFFF7D
-#define SCMR0 0xFFFF7E
-#define SMR1  0xFFFF80
-#define BRR1  0xFFFF81
-#define SCR1  0xFFFF82
-#define TDR1  0xFFFF83
-#define SSR1  0xFFFF84
-#define RDR1  0xFFFF85
-#define SCMR1 0xFFFF86
-#define SMR2  0xFFFF88
-#define BRR2  0xFFFF89
-#define SCR2  0xFFFF8A
-#define TDR2  0xFFFF8B
-#define SSR2  0xFFFF8C
-#define RDR2  0xFFFF8D
-#define SCMR2 0xFFFF8E
-
-#define IRCR0 0xFFFE1E
-#define SEMR  0xFFFDA8
-
-#define MDCR    0xFFFF3E
-#define SYSCR   0xFFFF3D
-#define MSTPCRH 0xFFFF40
-#define MSTPCRL 0xFFFF41
-#define FLMCR1  0xFFFFC8
-#define FLMCR2  0xFFFFC9
-#define EBR1    0xFFFFCA
-#define EBR2    0xFFFFCB
-#define CTGARC_RAMCR   0xFFFECE
-#define SBYCR   0xFFFF3A
-#define SCKCR   0xFFFF3B
-#define PLLCR   0xFFFF45
-
-#define TSTR   0xFFFFC0
-#define TSNC   0XFFFFC1
-
-#define TCR0   0xFFFFD0
-#define TMDR0  0xFFFFD1
-#define TIORH0 0xFFFFD2
-#define TIORL0 0xFFFFD3
-#define TIER0  0xFFFFD4
-#define TSR0   0xFFFFD5
-#define TCNT0  0xFFFFD6
-#define GRA0   0xFFFFD8
-#define GRB0   0xFFFFDA
-#define GRC0   0xFFFFDC
-#define GRD0   0xFFFFDE
-#define TCR1   0xFFFFE0
-#define TMDR1  0xFFFFE1
-#define TIORH1 0xFFFFE2
-#define TIORL1 0xFFFFE3
-#define TIER1  0xFFFFE4
-#define TSR1   0xFFFFE5
-#define TCNT1  0xFFFFE6
-#define GRA1   0xFFFFE8
-#define GRB1   0xFFFFEA
-#define TCR2   0xFFFFF0
-#define TMDR2  0xFFFFF1
-#define TIORH2 0xFFFFF2
-#define TIORL2 0xFFFFF3
-#define TIER2  0xFFFFF4
-#define TSR2   0xFFFFF5
-#define TCNT2  0xFFFFF6
-#define GRA2   0xFFFFF8
-#define GRB2   0xFFFFFA
-#define TCR3   0xFFFE80
-#define TMDR3  0xFFFE81
-#define TIORH3 0xFFFE82
-#define TIORL3 0xFFFE83
-#define TIER3  0xFFFE84
-#define TSR3   0xFFFE85
-#define TCNT3  0xFFFE86
-#define GRA3   0xFFFE88
-#define GRB3   0xFFFE8A
-#define GRC3   0xFFFE8C
-#define GRD3   0xFFFE8E
-#define TCR4   0xFFFE90
-#define TMDR4  0xFFFE91
-#define TIORH4 0xFFFE92
-#define TIORL4 0xFFFE93
-#define TIER4  0xFFFE94
-#define TSR4   0xFFFE95
-#define TCNT4  0xFFFE96
-#define GRA4   0xFFFE98
-#define GRB4   0xFFFE9A
-#define TCR5   0xFFFEA0
-#define TMDR5  0xFFFEA1
-#define TIORH5 0xFFFEA2
-#define TIORL5 0xFFFEA3
-#define TIER5  0xFFFEA4
-#define TSR5   0xFFFEA5
-#define TCNT5  0xFFFEA6
-#define GRA5   0xFFFEA8
-#define GRB5   0xFFFEAA
-
-#define _8TCR0   0xFFFFB0
-#define _8TCR1   0xFFFFB1
-#define _8TCSR0  0xFFFFB2
-#define _8TCSR1  0xFFFFB3
-#define _8TCORA0 0xFFFFB4
-#define _8TCORA1 0xFFFFB5
-#define _8TCORB0 0xFFFFB6
-#define _8TCORB1 0xFFFFB7
-#define _8TCNT0  0xFFFFB8
-#define _8TCNT1  0xFFFFB9
-
-#define TCSR    0xFFFFBC
-#define TCNT    0xFFFFBD
-#define RSTCSRW 0xFFFFBE
-#define RSTCSRR 0xFFFFBF
-
-#endif /* __KERNEL__ */
-#endif /* __REGS_H8S267x__ */
diff --git a/arch/h8300/include/asm/regs306x.h b/arch/h8300/include/asm/regs306x.h
deleted file mode 100644 (file)
index 027dd63..0000000
+++ /dev/null
@@ -1,212 +0,0 @@
-/* internal Peripherals Register address define */
-/* CPU: H8/306x                                 */
-
-#if !defined(__REGS_H8306x__)
-#define __REGS_H8306x__ 
-
-#if defined(__KERNEL__)
-
-#define DASTCR 0xFEE01A
-#define DADR0  0xFEE09C
-#define DADR1  0xFEE09D
-#define DACR   0xFEE09E
-
-#define ADDRAH 0xFFFFE0
-#define ADDRAL 0xFFFFE1
-#define ADDRBH 0xFFFFE2
-#define ADDRBL 0xFFFFE3
-#define ADDRCH 0xFFFFE4
-#define ADDRCL 0xFFFFE5
-#define ADDRDH 0xFFFFE6
-#define ADDRDL 0xFFFFE7
-#define ADCSR  0xFFFFE8
-#define ADCR   0xFFFFE9
-
-#define BRCR   0xFEE013
-#define ADRCR  0xFEE01E
-#define CSCR   0xFEE01F
-#define ABWCR  0xFEE020
-#define ASTCR  0xFEE021
-#define WCRH   0xFEE022
-#define WCRL   0xFEE023
-#define BCR    0xFEE024
-#define DRCRA  0xFEE026
-#define DRCRB  0xFEE027
-#define RTMCSR 0xFEE028
-#define RTCNT  0xFEE029
-#define RTCOR  0xFEE02A
-
-#define MAR0AR  0xFFFF20
-#define MAR0AE  0xFFFF21
-#define MAR0AH  0xFFFF22
-#define MAR0AL  0xFFFF23
-#define ETCR0AL 0xFFFF24
-#define ETCR0AH 0xFFFF25
-#define IOAR0A  0xFFFF26
-#define DTCR0A  0xFFFF27
-#define MAR0BR  0xFFFF28
-#define MAR0BE  0xFFFF29
-#define MAR0BH  0xFFFF2A
-#define MAR0BL  0xFFFF2B
-#define ETCR0BL 0xFFFF2C
-#define ETCR0BH 0xFFFF2D
-#define IOAR0B  0xFFFF2E
-#define DTCR0B  0xFFFF2F
-#define MAR1AR  0xFFFF30
-#define MAR1AE  0xFFFF31
-#define MAR1AH  0xFFFF32
-#define MAR1AL  0xFFFF33
-#define ETCR1AL 0xFFFF34
-#define ETCR1AH 0xFFFF35
-#define IOAR1A  0xFFFF36
-#define DTCR1A  0xFFFF37
-#define MAR1BR  0xFFFF38
-#define MAR1BE  0xFFFF39
-#define MAR1BH  0xFFFF3A
-#define MAR1BL  0xFFFF3B
-#define ETCR1BL 0xFFFF3C
-#define ETCR1BH 0xFFFF3D
-#define IOAR1B  0xFFFF3E
-#define DTCR1B  0xFFFF3F
-
-#define ISCR 0xFEE014
-#define IER  0xFEE015
-#define ISR  0xFEE016
-#define IPRA 0xFEE018
-#define IPRB 0xFEE019
-
-#define P1DDR 0xFEE000
-#define P2DDR 0xFEE001
-#define P3DDR 0xFEE002
-#define P4DDR 0xFEE003
-#define P5DDR 0xFEE004
-#define P6DDR 0xFEE005
-/*#define P7DDR 0xFEE006*/
-#define P8DDR 0xFEE007
-#define P9DDR 0xFEE008
-#define PADDR 0xFEE009
-#define PBDDR 0xFEE00A
-
-#define P1DR  0xFFFFD0
-#define P2DR  0xFFFFD1
-#define P3DR  0xFFFFD2
-#define P4DR  0xFFFFD3
-#define P5DR  0xFFFFD4
-#define P6DR  0xFFFFD5
-/*#define P7DR  0xFFFFD6*/
-#define P8DR  0xFFFFD7
-#define P9DR  0xFFFFD8
-#define PADR  0xFFFFD9
-#define PBDR  0xFFFFDA
-
-#define P2CR  0xFEE03C
-#define P4CR  0xFEE03E
-#define P5CR  0xFEE03F
-
-#define SMR0  0xFFFFB0
-#define BRR0  0xFFFFB1
-#define SCR0  0xFFFFB2
-#define TDR0  0xFFFFB3
-#define SSR0  0xFFFFB4
-#define RDR0  0xFFFFB5
-#define SCMR0 0xFFFFB6
-#define SMR1  0xFFFFB8
-#define BRR1  0xFFFFB9
-#define SCR1  0xFFFFBA
-#define TDR1  0xFFFFBB
-#define SSR1  0xFFFFBC
-#define RDR1  0xFFFFBD
-#define SCMR1 0xFFFFBE
-#define SMR2  0xFFFFC0
-#define BRR2  0xFFFFC1
-#define SCR2  0xFFFFC2
-#define TDR2  0xFFFFC3
-#define SSR2  0xFFFFC4
-#define RDR2  0xFFFFC5
-#define SCMR2 0xFFFFC6
-
-#define MDCR   0xFEE011
-#define SYSCR  0xFEE012
-#define DIVCR  0xFEE01B
-#define MSTCRH 0xFEE01C
-#define MSTCRL 0xFEE01D
-#define FLMCR1 0xFEE030
-#define FLMCR2 0xFEE031
-#define EBR1   0xFEE032
-#define EBR2   0xFEE033
-#define RAMCR  0xFEE077
-
-#define TSTR   0xFFFF60
-#define TSNC   0XFFFF61
-#define TMDR   0xFFFF62
-#define TOLR   0xFFFF63
-#define TISRA  0xFFFF64
-#define TISRB  0xFFFF65
-#define TISRC  0xFFFF66
-#define TCR0   0xFFFF68
-#define TIOR0  0xFFFF69
-#define TCNT0H 0xFFFF6A
-#define TCNT0L 0xFFFF6B
-#define GRA0H  0xFFFF6C
-#define GRA0L  0xFFFF6D
-#define GRB0H  0xFFFF6E
-#define GRB0L  0xFFFF6F
-#define TCR1   0xFFFF70
-#define TIOR1  0xFFFF71
-#define TCNT1H 0xFFFF72
-#define TCNT1L 0xFFFF73
-#define GRA1H  0xFFFF74
-#define GRA1L  0xFFFF75
-#define GRB1H  0xFFFF76
-#define GRB1L  0xFFFF77
-#define TCR3   0xFFFF78
-#define TIOR3  0xFFFF79
-#define TCNT3H 0xFFFF7A
-#define TCNT3L 0xFFFF7B
-#define GRA3H  0xFFFF7C
-#define GRA3L  0xFFFF7D
-#define GRB3H  0xFFFF7E
-#define GRB3L  0xFFFF7F
-
-#define _8TCR0  0xFFFF80
-#define _8TCR1  0xFFFF81
-#define _8TCSR0 0xFFFF82
-#define _8TCSR1 0xFFFF83
-#define TCORA0 0xFFFF84
-#define TCORA1 0xFFFF85
-#define TCORB0 0xFFFF86
-#define TCORB1 0xFFFF87
-#define _8TCNT0 0xFFFF88
-#define _8TCNT1 0xFFFF89
-
-#define _8TCR2  0xFFFF90
-#define _8TCR3  0xFFFF91
-#define _8TCSR2 0xFFFF92
-#define _8TCSR3 0xFFFF93
-#define TCORA2 0xFFFF94
-#define TCORA3 0xFFFF95
-#define TCORB2 0xFFFF96
-#define TCORB3 0xFFFF97
-#define _8TCNT2 0xFFFF98
-#define _8TCNT3 0xFFFF99
-
-#define TCSR   0xFFFF8C
-#define TCNT   0xFFFF8D
-#define RSTCSR 0xFFFF8F
-
-#define TPMR  0xFFFFA0
-#define TPCR  0xFFFFA1
-#define NDERB 0xFFFFA2
-#define NDERA 0xFFFFA3
-#define NDRB1 0xFFFFA4
-#define NDRA1 0xFFFFA5
-#define NDRB2 0xFFFFA6
-#define NDRA2 0xFFFFA7
-
-#define TCSR    0xFFFF8C
-#define TCNT    0xFFFF8D
-#define RSTCSRW 0xFFFF8E
-#define RSTCSRR 0xFFFF8F
-
-#endif /* __KERNEL__ */
-#endif /* __REGS_H8306x__ */
diff --git a/arch/h8300/include/asm/scatterlist.h b/arch/h8300/include/asm/scatterlist.h
deleted file mode 100644 (file)
index 82130ed..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_SCATTERLIST_H
-#define _H8300_SCATTERLIST_H
-
-#include <asm-generic/scatterlist.h>
-
-#endif /* !(_H8300_SCATTERLIST_H) */
diff --git a/arch/h8300/include/asm/sections.h b/arch/h8300/include/asm/sections.h
deleted file mode 100644 (file)
index a81743e..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_SECTIONS_H_
-#define _H8300_SECTIONS_H_
-
-#include <asm-generic/sections.h>
-
-#endif
diff --git a/arch/h8300/include/asm/segment.h b/arch/h8300/include/asm/segment.h
deleted file mode 100644 (file)
index b79a82d..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef _H8300_SEGMENT_H
-#define _H8300_SEGMENT_H
-
-/* define constants */
-#define USER_DATA     (1)
-#ifndef __USER_DS
-#define __USER_DS     (USER_DATA)
-#endif
-#define USER_PROGRAM  (2)
-#define SUPER_DATA    (3)
-#ifndef __KERNEL_DS
-#define __KERNEL_DS   (SUPER_DATA)
-#endif
-#define SUPER_PROGRAM (4)
-
-#ifndef __ASSEMBLY__
-
-typedef struct {
-       unsigned long seg;
-} mm_segment_t;
-
-#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-#define USER_DS                MAKE_MM_SEG(__USER_DS)
-#define KERNEL_DS      MAKE_MM_SEG(__KERNEL_DS)
-
-/*
- * Get/set the SFC/DFC registers for MOVES instructions
- */
-
-static inline mm_segment_t get_fs(void)
-{
-    return USER_DS;
-}
-
-static inline mm_segment_t get_ds(void)
-{
-    /* return the supervisor data space code */
-    return KERNEL_DS;
-}
-
-static inline void set_fs(mm_segment_t val)
-{
-}
-
-#define segment_eq(a,b)        ((a).seg == (b).seg)
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _H8300_SEGMENT_H */
diff --git a/arch/h8300/include/asm/sh_bios.h b/arch/h8300/include/asm/sh_bios.h
deleted file mode 100644 (file)
index b6bb6e5..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/* eCos HAL interface header */
-
-#ifndef SH_BIOS_H
-#define SH_BIOS_H
-
-#define HAL_IF_VECTOR_TABLE 0xfffe20
-#define CALL_IF_SET_CONSOLE_COMM  13
-#define QUERY_CURRENT -1
-#define MANGLER       -3
-
-/* Checking for GDB stub active */
-/* suggestion Jonathan Larmour */
-static int sh_bios_in_gdb_mode(void)
-{
-       static int gdb_active = -1;
-       if (gdb_active == -1) {
-               int (*set_console_comm)(int);
-               set_console_comm = ((void **)HAL_IF_VECTOR_TABLE)[CALL_IF_SET_CONSOLE_COMM];
-               gdb_active = (set_console_comm(QUERY_CURRENT) == MANGLER);
-       }
-       return gdb_active;
-}
-
-static void sh_bios_gdb_detach(void)
-{
-
-}
-
-#endif
diff --git a/arch/h8300/include/asm/shm.h b/arch/h8300/include/asm/shm.h
deleted file mode 100644 (file)
index ed6623c..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef _H8300_SHM_H
-#define _H8300_SHM_H
-
-
-/* format of page table entries that correspond to shared memory pages
-   currently out in swap space (see also mm/swap.c):
-   bits 0-1 (PAGE_PRESENT) is  = 0
-   bits 8..2 (SWP_TYPE) are = SHM_SWP_TYPE
-   bits 31..9 are used like this:
-   bits 15..9 (SHM_ID) the id of the shared memory segment
-   bits 30..16 (SHM_IDX) the index of the page within the shared memory segment
-                    (actually only bits 25..16 get used since SHMMAX is so low)
-   bit 31 (SHM_READ_ONLY) flag whether the page belongs to a read-only attach
-*/
-/* on the m68k both bits 0 and 1 must be zero */
-/* format on the sun3 is similar, but bits 30, 31 are set to zero and all
-   others are reduced by 2. --m */
-
-#ifndef CONFIG_SUN3
-#define SHM_ID_SHIFT   9
-#else
-#define SHM_ID_SHIFT   7
-#endif
-#define _SHM_ID_BITS   7
-#define SHM_ID_MASK    ((1<<_SHM_ID_BITS)-1)
-
-#define SHM_IDX_SHIFT  (SHM_ID_SHIFT+_SHM_ID_BITS)
-#define _SHM_IDX_BITS  15
-#define SHM_IDX_MASK   ((1<<_SHM_IDX_BITS)-1)
-
-#endif /* _H8300_SHM_H */
diff --git a/arch/h8300/include/asm/shmparam.h b/arch/h8300/include/asm/shmparam.h
deleted file mode 100644 (file)
index d186395..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_SHMPARAM_H
-#define _H8300_SHMPARAM_H
-
-#define        SHMLBA PAGE_SIZE                 /* attach addr a multiple of this */
-
-#endif /* _H8300_SHMPARAM_H */
diff --git a/arch/h8300/include/asm/signal.h b/arch/h8300/include/asm/signal.h
deleted file mode 100644 (file)
index 6341e36..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef _H8300_SIGNAL_H
-#define _H8300_SIGNAL_H
-
-#include <uapi/asm/signal.h>
-
-/* Most things should be clean enough to redefine this at will, if care
-   is taken to make libc match.  */
-
-#define _NSIG          64
-#define _NSIG_BPW      32
-#define _NSIG_WORDS    (_NSIG / _NSIG_BPW)
-
-typedef unsigned long old_sigset_t;            /* at least 32 bits */
-
-typedef struct {
-       unsigned long sig[_NSIG_WORDS];
-} sigset_t;
-
-#define __ARCH_HAS_SA_RESTORER
-
-#include <asm/sigcontext.h>
-#undef __HAVE_ARCH_SIG_BITOPS
-
-#endif /* _H8300_SIGNAL_H */
diff --git a/arch/h8300/include/asm/smp.h b/arch/h8300/include/asm/smp.h
deleted file mode 100644 (file)
index 9e9bd7e..0000000
+++ /dev/null
@@ -1 +0,0 @@
-/* nothing required here yet */
diff --git a/arch/h8300/include/asm/spinlock.h b/arch/h8300/include/asm/spinlock.h
deleted file mode 100644 (file)
index d5407fa..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __H8300_SPINLOCK_H
-#define __H8300_SPINLOCK_H
-
-#error "H8/300 doesn't do SMP yet"
-
-#endif
diff --git a/arch/h8300/include/asm/string.h b/arch/h8300/include/asm/string.h
deleted file mode 100644 (file)
index ca50348..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _H8300_STRING_H_
-#define _H8300_STRING_H_
-
-#ifdef __KERNEL__ /* only set these up for kernel code */
-
-#include <asm/setup.h>
-#include <asm/page.h>
-
-#define __HAVE_ARCH_MEMSET
-extern void * memset(void * s, int c, size_t count);
-
-#define __HAVE_ARCH_MEMCPY
-extern void * memcpy(void *d, const void *s, size_t count);
-
-#else /* KERNEL */
-
-/*
- *     let user libraries deal with these,
- *     IMHO the kernel has no place defining these functions for user apps
- */
-
-#define __HAVE_ARCH_STRCPY 1
-#define __HAVE_ARCH_STRNCPY 1
-#define __HAVE_ARCH_STRCAT 1
-#define __HAVE_ARCH_STRNCAT 1
-#define __HAVE_ARCH_STRCMP 1
-#define __HAVE_ARCH_STRNCMP 1
-#define __HAVE_ARCH_STRNICMP 1
-#define __HAVE_ARCH_STRCHR 1
-#define __HAVE_ARCH_STRRCHR 1
-#define __HAVE_ARCH_STRSTR 1
-#define __HAVE_ARCH_STRLEN 1
-#define __HAVE_ARCH_STRNLEN 1
-#define __HAVE_ARCH_MEMSET 1
-#define __HAVE_ARCH_MEMCPY 1
-#define __HAVE_ARCH_MEMMOVE 1
-#define __HAVE_ARCH_MEMSCAN 1
-#define __HAVE_ARCH_MEMCMP 1
-#define __HAVE_ARCH_MEMCHR 1
-#define __HAVE_ARCH_STRTOK 1
-
-#endif /* KERNEL */
-
-#endif /* _M68K_STRING_H_ */
diff --git a/arch/h8300/include/asm/switch_to.h b/arch/h8300/include/asm/switch_to.h
deleted file mode 100644 (file)
index cdd8731..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-#ifndef _H8300_SWITCH_TO_H
-#define _H8300_SWITCH_TO_H
-
-/*
- * switch_to(n) should switch tasks to task ptr, first checking that
- * ptr isn't the current task, in which case it does nothing.  This
- * also clears the TS-flag if the task we switched to has used the
- * math co-processor latest.
- */
-/*
- * switch_to() saves the extra registers, that are not saved
- * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
- * a0-a1. Some of these are used by schedule() and its predecessors
- * and so we might get see unexpected behaviors when a task returns
- * with unexpected register values.
- *
- * syscall stores these registers itself and none of them are used
- * by syscall after the function in the syscall has been called.
- *
- * Beware that resume now expects *next to be in d1 and the offset of
- * tss to be in a1. This saves a few instructions as we no longer have
- * to push them onto the stack and read them back right after.
- *
- * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
- *
- * Changed 96/09/19 by Andreas Schwab
- * pass prev in a0, next in a1, offset of tss in d1, and whether
- * the mm structures are shared in d2 (to avoid atc flushing).
- *
- * H8/300 Porting 2002/09/04 Yoshinori Sato
- */
-
-asmlinkage void resume(void);
-#define switch_to(prev,next,last) {                         \
-  void *_last;                                             \
-  __asm__ __volatile__(                                            \
-                       "mov.l  %1, er0\n\t"                \
-                       "mov.l  %2, er1\n\t"                \
-                        "mov.l  %3, er2\n\t"                \
-                       "jsr @_resume\n\t"                  \
-                        "mov.l  er2,%0\n\t"                 \
-                      : "=r" (_last)                       \
-                      : "r" (&(prev->thread)),             \
-                        "r" (&(next->thread)),             \
-                         "g" (prev)                         \
-                      : "cc", "er0", "er1", "er2", "er3"); \
-  (last) = _last;                                          \
-}
-
-#endif /* _H8300_SWITCH_TO_H */
diff --git a/arch/h8300/include/asm/target_time.h b/arch/h8300/include/asm/target_time.h
deleted file mode 100644 (file)
index 9f2a9aa..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-extern int platform_timer_setup(void (*timer_int)(int, void *, struct pt_regs *));
-extern void platform_timer_eoi(void);
-extern void platform_gettod(unsigned int *year, unsigned int *mon, unsigned int *day, 
-                            unsigned int *hour, unsigned int *min, unsigned int *sec);
diff --git a/arch/h8300/include/asm/termios.h b/arch/h8300/include/asm/termios.h
deleted file mode 100644 (file)
index 93a63df..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-#ifndef _H8300_TERMIOS_H
-#define _H8300_TERMIOS_H
-
-#include <uapi/asm/termios.h>
-
-/*     intr=^C         quit=^|         erase=del       kill=^U
-       eof=^D          vtime=\0        vmin=\1         sxtc=\0
-       start=^Q        stop=^S         susp=^Z         eol=\0
-       reprint=^R      discard=^U      werase=^W       lnext=^V
-       eol2=\0
-*/
-#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
-
-/*
- * Translate a "termio" structure into a "termios". Ugh.
- */
-#define user_termio_to_kernel_termios(termios, termio) \
-({ \
-       unsigned short tmp; \
-       get_user(tmp, &(termio)->c_iflag); \
-       (termios)->c_iflag = (0xffff0000 & ((termios)->c_iflag)) | tmp; \
-       get_user(tmp, &(termio)->c_oflag); \
-       (termios)->c_oflag = (0xffff0000 & ((termios)->c_oflag)) | tmp; \
-       get_user(tmp, &(termio)->c_cflag); \
-       (termios)->c_cflag = (0xffff0000 & ((termios)->c_cflag)) | tmp; \
-       get_user(tmp, &(termio)->c_lflag); \
-       (termios)->c_lflag = (0xffff0000 & ((termios)->c_lflag)) | tmp; \
-       get_user((termios)->c_line, &(termio)->c_line); \
-       copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
-})
-
-/*
- * Translate a "termios" structure into a "termio". Ugh.
- */
-#define kernel_termios_to_user_termio(termio, termios) \
-({ \
-       put_user((termios)->c_iflag, &(termio)->c_iflag); \
-       put_user((termios)->c_oflag, &(termio)->c_oflag); \
-       put_user((termios)->c_cflag, &(termio)->c_cflag); \
-       put_user((termios)->c_lflag, &(termio)->c_lflag); \
-       put_user((termios)->c_line,  &(termio)->c_line); \
-       copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
-})
-
-#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2))
-#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2))
-#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
-#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
-
-#endif /* _H8300_TERMIOS_H */
diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
deleted file mode 100644 (file)
index ec2f777..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-/* thread_info.h: h8300 low-level thread information
- * adapted from the i386 and PPC versions by Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * Copyright (C) 2002  David Howells (dhowells@redhat.com)
- * - Incorporating suggestions made by Linus Torvalds and Dave Miller
- */
-
-#ifndef _ASM_THREAD_INFO_H
-#define _ASM_THREAD_INFO_H
-
-#include <asm/page.h>
-
-#ifdef __KERNEL__
-
-#ifndef __ASSEMBLY__
-
-/*
- * low level task data.
- * If you change this, change the TI_* offsets below to match.
- */
-struct thread_info {
-       struct task_struct *task;               /* main task structure */
-       struct exec_domain *exec_domain;        /* execution domain */
-       unsigned long      flags;               /* low level flags */
-       int                cpu;                 /* cpu we're on */
-       int                preempt_count;       /* 0 => preemptable, <0 => BUG */
-       struct restart_block restart_block;
-};
-
-/*
- * macros/functions for gaining access to the thread information structure
- */
-#define INIT_THREAD_INFO(tsk)                  \
-{                                              \
-       .task =         &tsk,                   \
-       .exec_domain =  &default_exec_domain,   \
-       .flags =        0,                      \
-       .cpu =          0,                      \
-       .preempt_count = INIT_PREEMPT_COUNT,    \
-       .restart_block  = {                     \
-               .fn = do_no_restart_syscall,    \
-       },                                      \
-}
-
-#define init_thread_info       (init_thread_union.thread_info)
-#define init_stack             (init_thread_union.stack)
-
-
-/*
- * Size of kernel stack for each process. This must be a power of 2...
- */
-#define THREAD_SIZE_ORDER      1
-#define THREAD_SIZE            8192    /* 2 pages */
-
-
-/* how to get the thread information struct from C */
-static inline struct thread_info *current_thread_info(void)
-{
-       struct thread_info *ti;
-       __asm__(
-               "mov.l  sp, %0 \n\t"
-               "and.l  %1, %0"
-               : "=&r"(ti)
-               : "i" (~(THREAD_SIZE-1))
-               );
-       return ti;
-}
-
-#endif /* __ASSEMBLY__ */
-
-/*
- * Offsets in thread_info structure, used in assembly code
- */
-#define TI_TASK                0
-#define TI_EXECDOMAIN  4
-#define TI_FLAGS       8
-#define TI_CPU         12
-#define TI_PRE_COUNT   16
-
-#define        PREEMPT_ACTIVE  0x4000000
-
-/*
- * thread information flag bit numbers
- */
-#define TIF_SYSCALL_TRACE      0       /* syscall trace active */
-#define TIF_SIGPENDING         1       /* signal pending */
-#define TIF_NEED_RESCHED       2       /* rescheduling necessary */
-#define TIF_MEMDIE             4       /* is terminating due to OOM killer */
-#define TIF_RESTORE_SIGMASK    5       /* restore signal mask in do_signal() */
-#define TIF_NOTIFY_RESUME      6       /* callback before returning to user */
-
-/* as above, but as bit values */
-#define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
-#define _TIF_SIGPENDING                (1<<TIF_SIGPENDING)
-#define _TIF_NEED_RESCHED      (1<<TIF_NEED_RESCHED)
-#define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
-
-#define _TIF_WORK_MASK         (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
-                                _TIF_NOTIFY_RESUME)
-
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/h8300/include/asm/timer.h b/arch/h8300/include/asm/timer.h
deleted file mode 100644 (file)
index def8046..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef __H8300_TIMER_H
-#define __H8300_TIMER_H
-
-void h8300_timer_tick(void);
-void h8300_timer_setup(void);
-void h8300_gettod(unsigned int *year, unsigned int *mon, unsigned int *day,
-                  unsigned int *hour, unsigned int *min, unsigned int *sec);
-
-#define TIMER_FREQ (CONFIG_CPU_CLOCK*10000) /* Timer input freq. */
-
-#define calc_param(cnt, div, rate, limit)                      \
-do {                                                           \
-       cnt = TIMER_FREQ / HZ;                                  \
-       for (div = 0; div < ARRAY_SIZE(divide_rate); div++) {   \
-               if (rate[div] == 0)                             \
-                       continue;                               \
-               if ((cnt / rate[div]) > limit)                  \
-                       break;                                  \
-       }                                                       \
-       if (div == ARRAY_SIZE(divide_rate))                     \
-               panic("Timer counter overflow");                \
-       cnt /= divide_rate[div];                                \
-} while(0)
-
-#endif
diff --git a/arch/h8300/include/asm/timex.h b/arch/h8300/include/asm/timex.h
deleted file mode 100644 (file)
index 23e6701..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * linux/include/asm-h8300/timex.h
- *
- * H8/300 architecture timex specifications
- */
-#ifndef _ASM_H8300_TIMEX_H
-#define _ASM_H8300_TIMEX_H
-
-#define CLOCK_TICK_RATE (CONFIG_CPU_CLOCK*1000/8192) /* Timer input freq. */
-
-typedef unsigned long cycles_t;
-extern short h8300_timer_count;
-
-static inline cycles_t get_cycles(void)
-{
-       return 0;
-}
-
-#endif
diff --git a/arch/h8300/include/asm/tlb.h b/arch/h8300/include/asm/tlb.h
deleted file mode 100644 (file)
index 7f07430..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef __H8300_TLB_H__
-#define __H8300_TLB_H__
-
-#define tlb_flush(tlb) do { } while(0)
-
-#include <asm-generic/tlb.h>
-
-#endif
diff --git a/arch/h8300/include/asm/tlbflush.h b/arch/h8300/include/asm/tlbflush.h
deleted file mode 100644 (file)
index 41c148a..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef _H8300_TLBFLUSH_H
-#define _H8300_TLBFLUSH_H
-
-/*
- * Copyright (C) 2000 Lineo, David McCullough <davidm@uclinux.org>
- * Copyright (C) 2000-2002, Greg Ungerer <gerg@snapgear.com>
- */
-
-#include <asm/setup.h>
-
-/*
- * flush all user-space atc entries.
- */
-static inline void __flush_tlb(void)
-{
-       BUG();
-}
-
-static inline void __flush_tlb_one(unsigned long addr)
-{
-       BUG();
-}
-
-#define flush_tlb() __flush_tlb()
-
-/*
- * flush all atc entries (both kernel and user-space entries).
- */
-static inline void flush_tlb_all(void)
-{
-       BUG();
-}
-
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
-       BUG();
-}
-
-static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
-{
-       BUG();
-}
-
-static inline void flush_tlb_range(struct mm_struct *mm,
-                                  unsigned long start, unsigned long end)
-{
-       BUG();
-}
-
-static inline void flush_tlb_kernel_page(unsigned long addr)
-{
-       BUG();
-}
-
-#endif /* _H8300_TLBFLUSH_H */
diff --git a/arch/h8300/include/asm/topology.h b/arch/h8300/include/asm/topology.h
deleted file mode 100644 (file)
index fdc1219..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_H8300_TOPOLOGY_H
-#define _ASM_H8300_TOPOLOGY_H
-
-#include <asm-generic/topology.h>
-
-#endif /* _ASM_H8300_TOPOLOGY_H */
diff --git a/arch/h8300/include/asm/traps.h b/arch/h8300/include/asm/traps.h
deleted file mode 100644 (file)
index 41cf6be..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- *  linux/include/asm-h8300/traps.h
- *
- *  Copyright (C) 2003 Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-#ifndef _H8300_TRAPS_H
-#define _H8300_TRAPS_H
-
-extern void system_call(void);
-extern void interrupt_entry(void);
-extern void trace_break(void);
-
-#define JMP_OP 0x5a000000
-#define JSR_OP 0x5e000000
-#define VECTOR(address) ((JMP_OP)|((unsigned long)address))
-#define REDIRECT(address) ((JSR_OP)|((unsigned long)address))
-
-#define TRACE_VEC 5
-
-#define TRAP0_VEC 8
-#define TRAP1_VEC 9
-#define TRAP2_VEC 10
-#define TRAP3_VEC 11
-
-#if defined(__H8300H__)
-#define NR_TRAPS 12
-#endif
-#if defined(__H8300S__)
-#define NR_TRAPS 16
-#endif
-
-#endif /* _H8300_TRAPS_H */
diff --git a/arch/h8300/include/asm/types.h b/arch/h8300/include/asm/types.h
deleted file mode 100644 (file)
index c012707..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _H8300_TYPES_H
-#define _H8300_TYPES_H
-
-#include <uapi/asm/types.h>
-
-
-#define BITS_PER_LONG 32
-
-#endif /* _H8300_TYPES_H */
diff --git a/arch/h8300/include/asm/uaccess.h b/arch/h8300/include/asm/uaccess.h
deleted file mode 100644 (file)
index 8725d1a..0000000
+++ /dev/null
@@ -1,163 +0,0 @@
-#ifndef __H8300_UACCESS_H
-#define __H8300_UACCESS_H
-
-/*
- * User space memory access functions
- */
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-
-#include <asm/segment.h>
-
-#define VERIFY_READ    0
-#define VERIFY_WRITE   1
-
-/* We let the MMU do all checking */
-#define access_ok(type, addr, size) __access_ok((unsigned long)addr,size)
-static inline int __access_ok(unsigned long addr, unsigned long size)
-{
-#define        RANGE_CHECK_OK(addr, size, lower, upper) \
-       (((addr) >= (lower)) && (((addr) + (size)) < (upper)))
-
-       extern unsigned long _ramend;
-       return(RANGE_CHECK_OK(addr, size, 0L, (unsigned long)&_ramend));
-}
-
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue.  No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path.  This means when everything is well,
- * we don't even have to jump over them.  Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
-       unsigned long insn, fixup;
-};
-
-/* Returns 0 if exception not found and fixup otherwise.  */
-extern unsigned long search_exception_table(unsigned long);
-
-
-/*
- * These are the main single-value transfer routines.  They automatically
- * use the right size if we just have the right pointer type.
- */
-
-#define put_user(x, ptr)                               \
-({                                                     \
-    int __pu_err = 0;                                  \
-    typeof(*(ptr)) __pu_val = (x);                     \
-    switch (sizeof (*(ptr))) {                         \
-    case 1:                                            \
-    case 2:                                            \
-    case 4:                                            \
-       *(ptr) = (__pu_val);                            \
-       break;                                          \
-    case 8:                                            \
-       memcpy(ptr, &__pu_val, sizeof (*(ptr)));        \
-       break;                                          \
-    default:                                           \
-       __pu_err = __put_user_bad();                    \
-       break;                                          \
-    }                                                  \
-    __pu_err;                                          \
-})
-#define __put_user(x, ptr) put_user(x, ptr)
-
-extern int __put_user_bad(void);
-
-/*
- * Tell gcc we read from memory instead of writing: this is because
- * we do not write to any memory gcc knows about, so there are no
- * aliasing issues.
- */
-
-#define __ptr(x) ((unsigned long *)(x))
-
-/*
- * Tell gcc we read from memory instead of writing: this is because
- * we do not write to any memory gcc knows about, so there are no
- * aliasing issues.
- */
-
-#define get_user(x, ptr)                                       \
-({                                                             \
-    int __gu_err = 0;                                          \
-    typeof(*(ptr)) __gu_val = *ptr;                            \
-    switch (sizeof(*(ptr))) {                                  \
-    case 1:                                                    \
-    case 2:                                                    \
-    case 4:                                                    \
-    case 8:                                                    \
-       break;                                                  \
-    default:                                                   \
-       __gu_err = __get_user_bad();                            \
-       break;                                                  \
-    }                                                          \
-    (x) = __gu_val;                                            \
-    __gu_err;                                                  \
-})
-#define __get_user(x, ptr) get_user(x, ptr)
-
-extern int __get_user_bad(void);
-
-#define copy_from_user(to, from, n)            (memcpy(to, from, n), 0)
-#define copy_to_user(to, from, n)              (memcpy(to, from, n), 0)
-
-#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
-#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-
-#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; })
-
-#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n)) return retval; })
-
-/*
- * Copy a null terminated string from userspace.
- */
-
-static inline long
-strncpy_from_user(char *dst, const char *src, long count)
-{
-       char *tmp;
-       strncpy(dst, src, count);
-       for (tmp = dst; *tmp && count > 0; tmp++, count--)
-               ;
-       return(tmp - dst); /* DAVIDM should we count a NUL ?  check getname */
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 on exception, a value greater than N if too long
- */
-static inline long strnlen_user(const char *src, long n)
-{
-       return(strlen(src) + 1); /* DAVIDM make safer */
-}
-
-#define strlen_user(str) strnlen_user(str, 32767)
-
-/*
- * Zero Userspace
- */
-
-static inline unsigned long
-clear_user(void *to, unsigned long n)
-{
-       memset(to, 0, n);
-       return 0;
-}
-
-#define __clear_user   clear_user
-
-#endif /* _H8300_UACCESS_H */
diff --git a/arch/h8300/include/asm/ucontext.h b/arch/h8300/include/asm/ucontext.h
deleted file mode 100644 (file)
index 0bcf8f8..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _H8300_UCONTEXT_H
-#define _H8300_UCONTEXT_H
-
-struct ucontext {
-       unsigned long     uc_flags;
-       struct ucontext  *uc_link;
-       stack_t           uc_stack;
-       struct sigcontext uc_mcontext;
-       sigset_t          uc_sigmask;   /* mask last for extensibility */
-};
-
-#endif
diff --git a/arch/h8300/include/asm/unaligned.h b/arch/h8300/include/asm/unaligned.h
deleted file mode 100644 (file)
index b8d06c7..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _ASM_H8300_UNALIGNED_H
-#define _ASM_H8300_UNALIGNED_H
-
-#include <linux/unaligned/be_memmove.h>
-#include <linux/unaligned/le_byteshift.h>
-#include <linux/unaligned/generic.h>
-
-#define get_unaligned  __get_unaligned_be
-#define put_unaligned  __put_unaligned_be
-
-#endif /* _ASM_H8300_UNALIGNED_H */
diff --git a/arch/h8300/include/asm/unistd.h b/arch/h8300/include/asm/unistd.h
deleted file mode 100644 (file)
index ab671ec..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-#ifndef _ASM_H8300_UNISTD_H_
-#define _ASM_H8300_UNISTD_H_
-
-#include <uapi/asm/unistd.h>
-
-
-#define NR_syscalls 321
-
-#define __ARCH_WANT_OLD_READDIR
-#define __ARCH_WANT_OLD_STAT
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SYS_ALARM
-#define __ARCH_WANT_SYS_GETHOSTNAME
-#define __ARCH_WANT_SYS_IPC
-#define __ARCH_WANT_SYS_PAUSE
-#define __ARCH_WANT_SYS_SGETMASK
-#define __ARCH_WANT_SYS_SIGNAL
-#define __ARCH_WANT_SYS_TIME
-#define __ARCH_WANT_SYS_UTIME
-#define __ARCH_WANT_SYS_WAITPID
-#define __ARCH_WANT_SYS_SOCKETCALL
-#define __ARCH_WANT_SYS_FADVISE64
-#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
-#define __ARCH_WANT_SYS_NICE
-#define __ARCH_WANT_SYS_OLD_GETRLIMIT
-#define __ARCH_WANT_SYS_OLD_MMAP
-#define __ARCH_WANT_SYS_OLD_SELECT
-#define __ARCH_WANT_SYS_OLDUMOUNT
-#define __ARCH_WANT_SYS_SIGPENDING
-#define __ARCH_WANT_SYS_SIGPROCMASK
-#define __ARCH_WANT_SYS_FORK
-#define __ARCH_WANT_SYS_VFORK
-#define __ARCH_WANT_SYS_CLONE
-
-#endif /* _ASM_H8300_UNISTD_H_ */
diff --git a/arch/h8300/include/asm/user.h b/arch/h8300/include/asm/user.h
deleted file mode 100644 (file)
index 14a9e18..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-#ifndef _H8300_USER_H
-#define _H8300_USER_H
-
-#include <asm/page.h>
-
-/* Core file format: The core file is written in such a way that gdb
-   can understand it and provide useful information to the user (under
-   linux we use the 'trad-core' bfd).  There are quite a number of
-   obstacles to being able to view the contents of the floating point
-   registers, and until these are solved you will not be able to view the
-   contents of them.  Actually, you can read in the core file and look at
-   the contents of the user struct to find out what the floating point
-   registers contain.
-   The actual file contents are as follows:
-   UPAGE: 1 page consisting of a user struct that tells gdb what is present
-   in the file.  Directly after this is a copy of the task_struct, which
-   is currently not used by gdb, but it may come in useful at some point.
-   All of the registers are stored as part of the upage.  The upage should
-   always be only one page.
-   DATA: The data area is stored.  We use current->end_text to
-   current->brk to pick up all of the user variables, plus any memory
-   that may have been malloced.  No attempt is made to determine if a page
-   is demand-zero or if a page is totally unused, we just cover the entire
-   range.  All of the addresses are rounded in such a way that an integral
-   number of pages is written.
-   STACK: We need the stack information in order to get a meaningful
-   backtrace.  We need to write the data from (esp) to
-   current->start_stack, so we round each of these off in order to be able
-   to write an integer number of pages.
-   The minimum core file size is 3 pages, or 12288 bytes.
-*/
-
-/* This is the old layout of "struct pt_regs" as of Linux 1.x, and
-   is still the layout used by user (the new pt_regs doesn't have
-   all registers). */
-struct user_regs_struct {
-       long er1,er2,er3,er4,er5,er6;
-       long er0;
-       long usp;
-       long orig_er0;
-       short ccr;
-       long pc;
-};
-
-       
-/* When the kernel dumps core, it starts by dumping the user struct -
-   this will be used by gdb to figure out where the data and stack segments
-   are within the file, and what virtual addresses to use. */
-struct user{
-/* We start with the registers, to mimic the way that "memory" is returned
-   from the ptrace(3,...) function.  */
-  struct user_regs_struct regs;        /* Where the registers are actually stored */
-/* ptrace does not yet supply these.  Someday.... */
-/* The rest of this junk is to help gdb figure out what goes where */
-  unsigned long int u_tsize;   /* Text segment size (pages). */
-  unsigned long int u_dsize;   /* Data segment size (pages). */
-  unsigned long int u_ssize;   /* Stack segment size (pages). */
-  unsigned long start_code;     /* Starting virtual address of text. */
-  unsigned long start_stack;   /* Starting virtual address of stack area.
-                                  This is actually the bottom of the stack,
-                                  the top of the stack is always found in the
-                                  esp register.  */
-  long int signal;                     /* Signal that caused the core dump. */
-  int reserved;                        /* No longer used */
-  unsigned long u_ar0;         /* Used by gdb to help find the values for */
-                               /* the registers. */
-  unsigned long magic;         /* To uniquely identify a core file */
-  char u_comm[32];             /* User command that was responsible */
-};
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
-
-#endif
diff --git a/arch/h8300/include/asm/virtconvert.h b/arch/h8300/include/asm/virtconvert.h
deleted file mode 100644 (file)
index 19cfd62..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __H8300_VIRT_CONVERT__
-#define __H8300_VIRT_CONVERT__
-
-/*
- * Macros used for converting between virtual and physical mappings.
- */
-
-#ifdef __KERNEL__
-
-#include <asm/setup.h>
-#include <asm/page.h>
-
-#define phys_to_virt(vaddr)    ((void *) (vaddr))
-#define virt_to_phys(vaddr)    ((unsigned long) (vaddr))
-
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
-#endif
-#endif
diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild
deleted file mode 100644 (file)
index 040178c..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-# UAPI Header export list
-include include/uapi/asm-generic/Kbuild.asm
-
-header-y += auxvec.h
-header-y += bitsperlong.h
-header-y += byteorder.h
-header-y += errno.h
-header-y += fcntl.h
-header-y += ioctl.h
-header-y += ioctls.h
-header-y += ipcbuf.h
-header-y += kvm_para.h
-header-y += mman.h
-header-y += msgbuf.h
-header-y += param.h
-header-y += poll.h
-header-y += posix_types.h
-header-y += ptrace.h
-header-y += resource.h
-header-y += sembuf.h
-header-y += setup.h
-header-y += shmbuf.h
-header-y += sigcontext.h
-header-y += siginfo.h
-header-y += signal.h
-header-y += socket.h
-header-y += sockios.h
-header-y += stat.h
-header-y += statfs.h
-header-y += swab.h
-header-y += termbits.h
-header-y += termios.h
-header-y += types.h
-header-y += unistd.h
diff --git a/arch/h8300/include/uapi/asm/auxvec.h b/arch/h8300/include/uapi/asm/auxvec.h
deleted file mode 100644 (file)
index 1d36fe3..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef __ASMH8300_AUXVEC_H
-#define __ASMH8300_AUXVEC_H
-
-#endif
diff --git a/arch/h8300/include/uapi/asm/bitsperlong.h b/arch/h8300/include/uapi/asm/bitsperlong.h
deleted file mode 100644 (file)
index 6dc0bb0..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/bitsperlong.h>
diff --git a/arch/h8300/include/uapi/asm/byteorder.h b/arch/h8300/include/uapi/asm/byteorder.h
deleted file mode 100644 (file)
index 13539da..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_BYTEORDER_H
-#define _H8300_BYTEORDER_H
-
-#include <linux/byteorder/big_endian.h>
-
-#endif /* _H8300_BYTEORDER_H */
diff --git a/arch/h8300/include/uapi/asm/errno.h b/arch/h8300/include/uapi/asm/errno.h
deleted file mode 100644 (file)
index 0c2f564..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_ERRNO_H
-#define _H8300_ERRNO_H
-
-#include <asm-generic/errno.h>
-
-#endif /* _H8300_ERRNO_H */
diff --git a/arch/h8300/include/uapi/asm/fcntl.h b/arch/h8300/include/uapi/asm/fcntl.h
deleted file mode 100644 (file)
index 1952cb2..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _H8300_FCNTL_H
-#define _H8300_FCNTL_H
-
-#define O_DIRECTORY    040000  /* must be a directory */
-#define O_NOFOLLOW     0100000 /* don't follow links */
-#define O_DIRECT       0200000 /* direct disk access hint - currently ignored */
-#define O_LARGEFILE    0400000
-
-#include <asm-generic/fcntl.h>
-
-#endif /* _H8300_FCNTL_H */
diff --git a/arch/h8300/include/uapi/asm/ioctl.h b/arch/h8300/include/uapi/asm/ioctl.h
deleted file mode 100644 (file)
index b279fe0..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ioctl.h>
diff --git a/arch/h8300/include/uapi/asm/ioctls.h b/arch/h8300/include/uapi/asm/ioctls.h
deleted file mode 100644 (file)
index 30eaed2..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef __ARCH_H8300_IOCTLS_H__
-#define __ARCH_H8300_IOCTLS_H__
-
-#define FIOQSIZE       0x545E
-
-#include <asm-generic/ioctls.h>
-
-#endif /* __ARCH_H8300_IOCTLS_H__ */
diff --git a/arch/h8300/include/uapi/asm/ipcbuf.h b/arch/h8300/include/uapi/asm/ipcbuf.h
deleted file mode 100644 (file)
index 84c7e51..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
diff --git a/arch/h8300/include/uapi/asm/kvm_para.h b/arch/h8300/include/uapi/asm/kvm_para.h
deleted file mode 100644 (file)
index 14fab8f..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/kvm_para.h>
diff --git a/arch/h8300/include/uapi/asm/mman.h b/arch/h8300/include/uapi/asm/mman.h
deleted file mode 100644 (file)
index 8eebf89..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/mman.h>
diff --git a/arch/h8300/include/uapi/asm/msgbuf.h b/arch/h8300/include/uapi/asm/msgbuf.h
deleted file mode 100644 (file)
index 6b148cd..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef _H8300_MSGBUF_H
-#define _H8300_MSGBUF_H
-
-/* 
- * The msqid64_ds structure for H8/300 architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct msqid64_ds {
-       struct ipc64_perm msg_perm;
-       __kernel_time_t msg_stime;      /* last msgsnd time */
-       unsigned long   __unused1;
-       __kernel_time_t msg_rtime;      /* last msgrcv time */
-       unsigned long   __unused2;
-       __kernel_time_t msg_ctime;      /* last change time */
-       unsigned long   __unused3;
-       unsigned long  msg_cbytes;      /* current number of bytes on queue */
-       unsigned long  msg_qnum;        /* number of messages in queue */
-       unsigned long  msg_qbytes;      /* max number of bytes on queue */
-       __kernel_pid_t msg_lspid;       /* pid of last msgsnd */
-       __kernel_pid_t msg_lrpid;       /* last receive pid */
-       unsigned long  __unused4;
-       unsigned long  __unused5;
-};
-
-#endif /* _H8300_MSGBUF_H */
diff --git a/arch/h8300/include/uapi/asm/param.h b/arch/h8300/include/uapi/asm/param.h
deleted file mode 100644 (file)
index 3dd18ae..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef _UAPI_H8300_PARAM_H
-#define _UAPI_H8300_PARAM_H
-
-#ifndef __KERNEL__
-#define HZ             100
-#endif
-
-#define EXEC_PAGESIZE  4096
-
-#ifndef NOGROUP
-#define NOGROUP                (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64      /* max length of hostname */
-
-#endif /* _UAPI_H8300_PARAM_H */
diff --git a/arch/h8300/include/uapi/asm/poll.h b/arch/h8300/include/uapi/asm/poll.h
deleted file mode 100644 (file)
index f61540c..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef __H8300_POLL_H
-#define __H8300_POLL_H
-
-#define POLLWRNORM     POLLOUT
-#define POLLWRBAND     256
-
-#include <asm-generic/poll.h>
-
-#undef POLLREMOVE
-
-#endif
diff --git a/arch/h8300/include/uapi/asm/posix_types.h b/arch/h8300/include/uapi/asm/posix_types.h
deleted file mode 100644 (file)
index 91e62ba..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef __ARCH_H8300_POSIX_TYPES_H
-#define __ARCH_H8300_POSIX_TYPES_H
-
-/*
- * This file is generally used by user-level software, so you need to
- * be a little careful about namespace pollution etc.  Also, we cannot
- * assume GCC is being used.
- */
-
-typedef unsigned short __kernel_mode_t;
-#define __kernel_mode_t __kernel_mode_t
-
-typedef unsigned short __kernel_ipc_pid_t;
-#define __kernel_ipc_pid_t __kernel_ipc_pid_t
-
-typedef unsigned short __kernel_uid_t;
-typedef unsigned short __kernel_gid_t;
-#define __kernel_uid_t __kernel_uid_t
-
-typedef unsigned short __kernel_old_uid_t;
-typedef unsigned short __kernel_old_gid_t;
-#define __kernel_old_uid_t __kernel_old_uid_t
-
-#include <asm-generic/posix_types.h>
-
-#endif
diff --git a/arch/h8300/include/uapi/asm/ptrace.h b/arch/h8300/include/uapi/asm/ptrace.h
deleted file mode 100644 (file)
index ef39ec5..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _UAPI_H8300_PTRACE_H
-#define _UAPI_H8300_PTRACE_H
-
-#ifndef __ASSEMBLY__
-
-#define PT_ER1    0
-#define PT_ER2    1
-#define PT_ER3    2
-#define PT_ER4    3
-#define PT_ER5    4
-#define PT_ER6    5
-#define PT_ER0    6
-#define PT_ORIG_ER0       7
-#define PT_CCR    8
-#define PT_PC     9
-#define PT_USP    10
-#define PT_EXR     12
-
-/* this struct defines the way the registers are stored on the
-   stack during a system call. */
-
-struct pt_regs {
-       long     retpc;
-       long     er4;
-       long     er5;
-       long     er6;
-       long     er3;
-       long     er2;
-       long     er1;
-       long     orig_er0;
-       unsigned short ccr;
-       long     er0;
-       long     vector;
-#if defined(CONFIG_CPU_H8S)
-       unsigned short exr;
-#endif
-       unsigned long  pc;
-} __attribute__((aligned(2),packed));
-
-#define PTRACE_GETREGS            12
-#define PTRACE_SETREGS            13
-
-#endif /* __ASSEMBLY__ */
-#endif /* _UAPI_H8300_PTRACE_H */
diff --git a/arch/h8300/include/uapi/asm/resource.h b/arch/h8300/include/uapi/asm/resource.h
deleted file mode 100644 (file)
index 46c5f43..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_RESOURCE_H
-#define _H8300_RESOURCE_H
-
-#include <asm-generic/resource.h>
-
-#endif /* _H8300_RESOURCE_H */
diff --git a/arch/h8300/include/uapi/asm/sembuf.h b/arch/h8300/include/uapi/asm/sembuf.h
deleted file mode 100644 (file)
index e04a3ec..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef _H8300_SEMBUF_H
-#define _H8300_SEMBUF_H
-
-/* 
- * The semid64_ds structure for m68k architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct semid64_ds {
-       struct ipc64_perm sem_perm;             /* permissions .. see ipc.h */
-       __kernel_time_t sem_otime;              /* last semop time */
-       unsigned long   __unused1;
-       __kernel_time_t sem_ctime;              /* last change time */
-       unsigned long   __unused2;
-       unsigned long   sem_nsems;              /* no. of semaphores in array */
-       unsigned long   __unused3;
-       unsigned long   __unused4;
-};
-
-#endif /* _H8300_SEMBUF_H */
diff --git a/arch/h8300/include/uapi/asm/setup.h b/arch/h8300/include/uapi/asm/setup.h
deleted file mode 100644 (file)
index e2c600e..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __H8300_SETUP_H
-#define __H8300_SETUP_H
-
-#define COMMAND_LINE_SIZE      512
-
-#endif
diff --git a/arch/h8300/include/uapi/asm/shmbuf.h b/arch/h8300/include/uapi/asm/shmbuf.h
deleted file mode 100644 (file)
index 64e7799..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-#ifndef _H8300_SHMBUF_H
-#define _H8300_SHMBUF_H
-
-/* 
- * The shmid64_ds structure for m68k architecture.
- * Note extra padding because this structure is passed back and forth
- * between kernel and user space.
- *
- * Pad space is left for:
- * - 64-bit time_t to solve y2038 problem
- * - 2 miscellaneous 32-bit values
- */
-
-struct shmid64_ds {
-       struct ipc64_perm       shm_perm;       /* operation perms */
-       size_t                  shm_segsz;      /* size of segment (bytes) */
-       __kernel_time_t         shm_atime;      /* last attach time */
-       unsigned long           __unused1;
-       __kernel_time_t         shm_dtime;      /* last detach time */
-       unsigned long           __unused2;
-       __kernel_time_t         shm_ctime;      /* last change time */
-       unsigned long           __unused3;
-       __kernel_pid_t          shm_cpid;       /* pid of creator */
-       __kernel_pid_t          shm_lpid;       /* pid of last operator */
-       unsigned long           shm_nattch;     /* no. of current attaches */
-       unsigned long           __unused4;
-       unsigned long           __unused5;
-};
-
-struct shminfo64 {
-       unsigned long   shmmax;
-       unsigned long   shmmin;
-       unsigned long   shmmni;
-       unsigned long   shmseg;
-       unsigned long   shmall;
-       unsigned long   __unused1;
-       unsigned long   __unused2;
-       unsigned long   __unused3;
-       unsigned long   __unused4;
-};
-
-#endif /* _H8300_SHMBUF_H */
diff --git a/arch/h8300/include/uapi/asm/sigcontext.h b/arch/h8300/include/uapi/asm/sigcontext.h
deleted file mode 100644 (file)
index e4b8150..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef _ASM_H8300_SIGCONTEXT_H
-#define _ASM_H8300_SIGCONTEXT_H
-
-struct sigcontext {
-       unsigned long  sc_mask;         /* old sigmask */
-       unsigned long  sc_usp;          /* old user stack pointer */
-       unsigned long  sc_er0;
-       unsigned long  sc_er1;
-       unsigned long  sc_er2;
-       unsigned long  sc_er3;
-       unsigned long  sc_er4;
-       unsigned long  sc_er5;
-       unsigned long  sc_er6;
-       unsigned short sc_ccr;
-       unsigned long  sc_pc;
-};
-
-#endif
diff --git a/arch/h8300/include/uapi/asm/siginfo.h b/arch/h8300/include/uapi/asm/siginfo.h
deleted file mode 100644 (file)
index bc8fbea..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_SIGINFO_H
-#define _H8300_SIGINFO_H
-
-#include <asm-generic/siginfo.h>
-
-#endif
diff --git a/arch/h8300/include/uapi/asm/signal.h b/arch/h8300/include/uapi/asm/signal.h
deleted file mode 100644 (file)
index af3a6c3..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-#ifndef _UAPI_H8300_SIGNAL_H
-#define _UAPI_H8300_SIGNAL_H
-
-#include <linux/types.h>
-
-/* Avoid too many header ordering problems.  */
-struct siginfo;
-
-#ifndef __KERNEL__
-/* Here we must cater to libcs that poke about in kernel headers.  */
-
-#define NSIG           32
-typedef unsigned long sigset_t;
-
-#endif /* __KERNEL__ */
-
-#define SIGHUP          1
-#define SIGINT          2
-#define SIGQUIT                 3
-#define SIGILL          4
-#define SIGTRAP                 5
-#define SIGABRT                 6
-#define SIGIOT          6
-#define SIGBUS          7
-#define SIGFPE          8
-#define SIGKILL                 9
-#define SIGUSR1                10
-#define SIGSEGV                11
-#define SIGUSR2                12
-#define SIGPIPE                13
-#define SIGALRM                14
-#define SIGTERM                15
-#define SIGSTKFLT      16
-#define SIGCHLD                17
-#define SIGCONT                18
-#define SIGSTOP                19
-#define SIGTSTP                20
-#define SIGTTIN                21
-#define SIGTTOU                22
-#define SIGURG         23
-#define SIGXCPU                24
-#define SIGXFSZ                25
-#define SIGVTALRM      26
-#define SIGPROF                27
-#define SIGWINCH       28
-#define SIGIO          29
-#define SIGPOLL                SIGIO
-/*
-#define SIGLOST                29
-*/
-#define SIGPWR         30
-#define SIGSYS         31
-#define        SIGUNUSED       31
-
-/* These should not be considered constants from userland.  */
-#define SIGRTMIN       32
-#define SIGRTMAX       _NSIG
-
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP   0x00000001
-#define SA_NOCLDWAIT   0x00000002 /* not supported yet */
-#define SA_SIGINFO     0x00000004
-#define SA_ONSTACK     0x08000000
-#define SA_RESTART     0x10000000
-#define SA_NODEFER     0x40000000
-#define SA_RESETHAND   0x80000000
-
-#define SA_NOMASK      SA_NODEFER
-#define SA_ONESHOT     SA_RESETHAND
-
-#define SA_RESTORER    0x04000000
-
-#define MINSIGSTKSZ    2048
-#define SIGSTKSZ       8192
-
-#include <asm-generic/signal-defs.h>
-
-#ifndef __KERNEL__
-/* Here we must cater to libcs that poke about in kernel headers.  */
-
-struct sigaction {
-       union {
-         __sighandler_t _sa_handler;
-         void (*_sa_sigaction)(int, struct siginfo *, void *);
-       } _u;
-       sigset_t sa_mask;
-       unsigned long sa_flags;
-       void (*sa_restorer)(void);
-};
-
-#define sa_handler     _u._sa_handler
-#define sa_sigaction   _u._sa_sigaction
-
-#endif /* __KERNEL__ */
-
-typedef struct sigaltstack {
-       void *ss_sp;
-       int ss_flags;
-       size_t ss_size;
-} stack_t;
-
-
-#endif /* _UAPI_H8300_SIGNAL_H */
diff --git a/arch/h8300/include/uapi/asm/socket.h b/arch/h8300/include/uapi/asm/socket.h
deleted file mode 100644 (file)
index 9490758..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-#ifndef _ASM_SOCKET_H
-#define _ASM_SOCKET_H
-
-#include <asm/sockios.h>
-
-/* For setsockoptions(2) */
-#define SOL_SOCKET     1
-
-#define SO_DEBUG       1
-#define SO_REUSEADDR   2
-#define SO_TYPE                3
-#define SO_ERROR       4
-#define SO_DONTROUTE   5
-#define SO_BROADCAST   6
-#define SO_SNDBUF      7
-#define SO_RCVBUF      8
-#define SO_SNDBUFFORCE 32
-#define SO_RCVBUFFORCE 33
-#define SO_KEEPALIVE   9
-#define SO_OOBINLINE   10
-#define SO_NO_CHECK    11
-#define SO_PRIORITY    12
-#define SO_LINGER      13
-#define SO_BSDCOMPAT   14
-#define SO_REUSEPORT   15
-#define SO_PASSCRED    16
-#define SO_PEERCRED    17
-#define SO_RCVLOWAT    18
-#define SO_SNDLOWAT    19
-#define SO_RCVTIMEO    20
-#define SO_SNDTIMEO    21
-
-/* Security levels - as per NRL IPv6 - don't actually do anything */
-#define SO_SECURITY_AUTHENTICATION             22
-#define SO_SECURITY_ENCRYPTION_TRANSPORT       23
-#define SO_SECURITY_ENCRYPTION_NETWORK         24
-
-#define SO_BINDTODEVICE        25
-
-/* Socket filtering */
-#define SO_ATTACH_FILTER        26
-#define SO_DETACH_FILTER        27
-#define SO_GET_FILTER          SO_ATTACH_FILTER
-
-#define SO_PEERNAME             28
-#define SO_TIMESTAMP           29
-#define SCM_TIMESTAMP          SO_TIMESTAMP
-
-#define SO_ACCEPTCONN          30
-
-#define SO_PEERSEC             31
-#define SO_PASSSEC             34
-#define SO_TIMESTAMPNS         35
-#define SCM_TIMESTAMPNS                SO_TIMESTAMPNS
-
-#define SO_MARK                        36
-
-#define SO_TIMESTAMPING                37
-#define SCM_TIMESTAMPING       SO_TIMESTAMPING
-
-#define SO_PROTOCOL            38
-#define SO_DOMAIN              39
-
-#define SO_RXQ_OVFL             40
-
-#define SO_WIFI_STATUS         41
-#define SCM_WIFI_STATUS                SO_WIFI_STATUS
-#define SO_PEEK_OFF            42
-
-/* Instruct lower device to use last 4-bytes of skb data as FCS */
-#define SO_NOFCS               43
-
-#define SO_LOCK_FILTER         44
-
-#define SO_SELECT_ERR_QUEUE    45
-
-#define SO_BUSY_POLL           46
-
-#endif /* _ASM_SOCKET_H */
diff --git a/arch/h8300/include/uapi/asm/sockios.h b/arch/h8300/include/uapi/asm/sockios.h
deleted file mode 100644 (file)
index e9c7ec8..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ARCH_H8300_SOCKIOS__
-#define __ARCH_H8300_SOCKIOS__
-
-/* Socket-level I/O control calls. */
-#define FIOSETOWN      0x8901
-#define SIOCSPGRP      0x8902
-#define FIOGETOWN      0x8903
-#define SIOCGPGRP      0x8904
-#define SIOCATMARK     0x8905
-#define SIOCGSTAMP     0x8906          /* Get stamp (timeval) */
-#define SIOCGSTAMPNS   0x8907          /* Get stamp (timespec) */
-
-#endif /* __ARCH_H8300_SOCKIOS__ */
diff --git a/arch/h8300/include/uapi/asm/stat.h b/arch/h8300/include/uapi/asm/stat.h
deleted file mode 100644 (file)
index 62c3cc2..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-#ifndef _H8300_STAT_H
-#define _H8300_STAT_H
-
-struct __old_kernel_stat {
-       unsigned short st_dev;
-       unsigned short st_ino;
-       unsigned short st_mode;
-       unsigned short st_nlink;
-       unsigned short st_uid;
-       unsigned short st_gid;
-       unsigned short st_rdev;
-       unsigned long  st_size;
-       unsigned long  st_atime;
-       unsigned long  st_mtime;
-       unsigned long  st_ctime;
-};
-
-struct stat {
-       unsigned short st_dev;
-       unsigned short __pad1;
-       unsigned long st_ino;
-       unsigned short st_mode;
-       unsigned short st_nlink;
-       unsigned short st_uid;
-       unsigned short st_gid;
-       unsigned short st_rdev;
-       unsigned short __pad2;
-       unsigned long  st_size;
-       unsigned long  st_blksize;
-       unsigned long  st_blocks;
-       unsigned long  st_atime;
-       unsigned long  __unused1;
-       unsigned long  st_mtime;
-       unsigned long  __unused2;
-       unsigned long  st_ctime;
-       unsigned long  __unused3;
-       unsigned long  __unused4;
-       unsigned long  __unused5;
-};
-
-/* This matches struct stat64 in glibc2.1, hence the absolutely
- * insane amounts of padding around dev_t's.
- */
-struct stat64 {
-       unsigned long long      st_dev;
-       unsigned char   __pad1[2];
-
-#define STAT64_HAS_BROKEN_ST_INO       1
-       unsigned long   __st_ino;
-
-       unsigned int    st_mode;
-       unsigned int    st_nlink;
-
-       unsigned long   st_uid;
-       unsigned long   st_gid;
-
-       unsigned long long      st_rdev;
-       unsigned char   __pad3[2];
-
-       long long       st_size;
-       unsigned long   st_blksize;
-
-       unsigned long   __pad4;         /* future possible st_blocks high bits */
-       unsigned long   st_blocks;      /* Number 512-byte blocks allocated. */
-
-       unsigned long   st_atime;
-       unsigned long   st_atime_nsec;
-
-       unsigned long   st_mtime;
-       unsigned long   st_mtime_nsec;
-
-       unsigned long   st_ctime;
-       unsigned long   st_ctime_nsec;
-
-       unsigned long long      st_ino;
-};
-
-#endif /* _H8300_STAT_H */
diff --git a/arch/h8300/include/uapi/asm/statfs.h b/arch/h8300/include/uapi/asm/statfs.h
deleted file mode 100644 (file)
index b96efa7..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _H8300_STATFS_H
-#define _H8300_STATFS_H
-
-#include <asm-generic/statfs.h>
-
-#endif /* _H8300_STATFS_H */
diff --git a/arch/h8300/include/uapi/asm/swab.h b/arch/h8300/include/uapi/asm/swab.h
deleted file mode 100644 (file)
index 39abbf5..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _H8300_SWAB_H
-#define _H8300_SWAB_H
-
-#include <linux/types.h>
-
-#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
-#  define __SWAB_64_THRU_32__
-#endif
-
-#endif /* _H8300_SWAB_H */
diff --git a/arch/h8300/include/uapi/asm/termbits.h b/arch/h8300/include/uapi/asm/termbits.h
deleted file mode 100644 (file)
index 3287a62..0000000
+++ /dev/null
@@ -1,201 +0,0 @@
-#ifndef __ARCH_H8300_TERMBITS_H__
-#define __ARCH_H8300_TERMBITS_H__
-
-#include <linux/posix_types.h>
-
-typedef unsigned char  cc_t;
-typedef unsigned int   speed_t;
-typedef unsigned int   tcflag_t;
-
-#define NCCS 19
-struct termios {
-       tcflag_t c_iflag;               /* input mode flags */
-       tcflag_t c_oflag;               /* output mode flags */
-       tcflag_t c_cflag;               /* control mode flags */
-       tcflag_t c_lflag;               /* local mode flags */
-       cc_t c_line;                    /* line discipline */
-       cc_t c_cc[NCCS];                /* control characters */
-};
-
-struct termios2 {
-       tcflag_t c_iflag;               /* input mode flags */
-       tcflag_t c_oflag;               /* output mode flags */
-       tcflag_t c_cflag;               /* control mode flags */
-       tcflag_t c_lflag;               /* local mode flags */
-       cc_t c_line;                    /* line discipline */
-       cc_t c_cc[NCCS];                /* control characters */
-       speed_t c_ispeed;               /* input speed */
-       speed_t c_ospeed;               /* output speed */
-};
-
-struct ktermios {
-       tcflag_t c_iflag;               /* input mode flags */
-       tcflag_t c_oflag;               /* output mode flags */
-       tcflag_t c_cflag;               /* control mode flags */
-       tcflag_t c_lflag;               /* local mode flags */
-       cc_t c_line;                    /* line discipline */
-       cc_t c_cc[NCCS];                /* control characters */
-       speed_t c_ispeed;               /* input speed */
-       speed_t c_ospeed;               /* output speed */
-};
-
-/* c_cc characters */
-#define VINTR 0
-#define VQUIT 1
-#define VERASE 2
-#define VKILL 3
-#define VEOF 4
-#define VTIME 5
-#define VMIN 6
-#define VSWTC 7
-#define VSTART 8
-#define VSTOP 9
-#define VSUSP 10
-#define VEOL 11
-#define VREPRINT 12
-#define VDISCARD 13
-#define VWERASE 14
-#define VLNEXT 15
-#define VEOL2 16
-
-
-/* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK  0000020
-#define ISTRIP 0000040
-#define INLCR  0000100
-#define IGNCR  0000200
-#define ICRNL  0000400
-#define IUCLC  0001000
-#define IXON   0002000
-#define IXANY  0004000
-#define IXOFF  0010000
-#define IMAXBEL        0020000
-#define IUTF8  0040000
-
-/* c_oflag bits */
-#define OPOST  0000001
-#define OLCUC  0000002
-#define ONLCR  0000004
-#define OCRNL  0000010
-#define ONOCR  0000020
-#define ONLRET 0000040
-#define OFILL  0000100
-#define OFDEL  0000200
-#define NLDLY  0000400
-#define   NL0  0000000
-#define   NL1  0000400
-#define CRDLY  0003000
-#define   CR0  0000000
-#define   CR1  0001000
-#define   CR2  0002000
-#define   CR3  0003000
-#define TABDLY 0014000
-#define   TAB0 0000000
-#define   TAB1 0004000
-#define   TAB2 0010000
-#define   TAB3 0014000
-#define   XTABS        0014000
-#define BSDLY  0020000
-#define   BS0  0000000
-#define   BS1  0020000
-#define VTDLY  0040000
-#define   VT0  0000000
-#define   VT1  0040000
-#define FFDLY  0100000
-#define   FF0  0000000
-#define   FF1  0100000
-
-/* c_cflag bit meaning */
-#define CBAUD  0010017
-#define  B0    0000000         /* hang up */
-#define  B50   0000001
-#define  B75   0000002
-#define  B110  0000003
-#define  B134  0000004
-#define  B150  0000005
-#define  B200  0000006
-#define  B300  0000007
-#define  B600  0000010
-#define  B1200 0000011
-#define  B1800 0000012
-#define  B2400 0000013
-#define  B4800 0000014
-#define  B9600 0000015
-#define  B19200        0000016
-#define  B38400        0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CSIZE  0000060
-#define   CS5  0000000
-#define   CS6  0000020
-#define   CS7  0000040
-#define   CS8  0000060
-#define CSTOPB 0000100
-#define CREAD  0000200
-#define PARENB 0000400
-#define PARODD 0001000
-#define HUPCL  0002000
-#define CLOCAL 0004000
-#define CBAUDEX 0010000
-#define    BOTHER 0010000
-#define    B57600 0010001
-#define   B115200 0010002
-#define   B230400 0010003
-#define   B460800 0010004
-#define   B500000 0010005
-#define   B576000 0010006
-#define   B921600 0010007
-#define  B1000000 0010010
-#define  B1152000 0010011
-#define  B1500000 0010012
-#define  B2000000 0010013
-#define  B2500000 0010014
-#define  B3000000 0010015
-#define  B3500000 0010016
-#define  B4000000 0010017
-#define CIBAUD   002003600000          /* input baud rate */
-#define CMSPAR   010000000000          /* mark or space (stick) parity */
-#define CRTSCTS          020000000000          /* flow control */
-
-#define IBSHIFT          16                    /* shift from CBAUD to CIBAUD */
-
-/* c_lflag bits */
-#define ISIG   0000001
-#define ICANON 0000002
-#define XCASE  0000004
-#define ECHO   0000010
-#define ECHOE  0000020
-#define ECHOK  0000040
-#define ECHONL 0000100
-#define NOFLSH 0000200
-#define TOSTOP 0000400
-#define ECHOCTL        0001000
-#define ECHOPRT        0002000
-#define ECHOKE 0004000
-#define FLUSHO 0010000
-#define PENDIN 0040000
-#define IEXTEN 0100000
-#define EXTPROC        0200000
-
-
-/* tcflow() and TCXONC use these */
-#define        TCOOFF          0
-#define        TCOON           1
-#define        TCIOFF          2
-#define        TCION           3
-
-/* tcflush() and TCFLSH use these */
-#define        TCIFLUSH        0
-#define        TCOFLUSH        1
-#define        TCIOFLUSH       2
-
-/* tcsetattr uses these */
-#define        TCSANOW         0
-#define        TCSADRAIN       1
-#define        TCSAFLUSH       2
-
-#endif /* __ARCH_H8300_TERMBITS_H__ */
diff --git a/arch/h8300/include/uapi/asm/termios.h b/arch/h8300/include/uapi/asm/termios.h
deleted file mode 100644 (file)
index 5a67d7e..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _UAPI_H8300_TERMIOS_H
-#define _UAPI_H8300_TERMIOS_H
-
-#include <asm/termbits.h>
-#include <asm/ioctls.h>
-struct winsize {
-       unsigned short ws_row;
-       unsigned short ws_col;
-       unsigned short ws_xpixel;
-       unsigned short ws_ypixel;
-};
-
-#define NCC 8
-struct termio {
-       unsigned short c_iflag;         /* input mode flags */
-       unsigned short c_oflag;         /* output mode flags */
-       unsigned short c_cflag;         /* control mode flags */
-       unsigned short c_lflag;         /* local mode flags */
-       unsigned char c_line;           /* line discipline */
-       unsigned char c_cc[NCC];        /* control characters */
-};
-
-
-/* modem lines */
-#define TIOCM_LE       0x001
-#define TIOCM_DTR      0x002
-#define TIOCM_RTS      0x004
-#define TIOCM_ST       0x008
-#define TIOCM_SR       0x010
-#define TIOCM_CTS      0x020
-#define TIOCM_CAR      0x040
-#define TIOCM_RNG      0x080
-#define TIOCM_DSR      0x100
-#define TIOCM_CD       TIOCM_CAR
-#define TIOCM_RI       TIOCM_RNG
-#define TIOCM_OUT1     0x2000
-#define TIOCM_OUT2     0x4000
-#define TIOCM_LOOP     0x8000
-
-/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-
-
-#endif /* _UAPI_H8300_TERMIOS_H */
diff --git a/arch/h8300/include/uapi/asm/types.h b/arch/h8300/include/uapi/asm/types.h
deleted file mode 100644 (file)
index 9ec9d4c..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/int-ll64.h>
diff --git a/arch/h8300/include/uapi/asm/unistd.h b/arch/h8300/include/uapi/asm/unistd.h
deleted file mode 100644 (file)
index 8cb5d42..0000000
+++ /dev/null
@@ -1,330 +0,0 @@
-#ifndef _UAPI_ASM_H8300_UNISTD_H_
-#define _UAPI_ASM_H8300_UNISTD_H_
-
-/*
- * This file contains the system call numbers.
- */
-
-#define __NR_restart_syscall      0
-#define __NR_exit                1
-#define __NR_fork                2
-#define __NR_read                3
-#define __NR_write               4
-#define __NR_open                5
-#define __NR_close               6
-#define __NR_waitpid             7
-#define __NR_creat               8
-#define __NR_link                9
-#define __NR_unlink             10
-#define __NR_execve             11
-#define __NR_chdir              12
-#define __NR_time               13
-#define __NR_mknod              14
-#define __NR_chmod              15
-#define __NR_lchown             16
-#define __NR_break              17
-#define __NR_oldstat            18
-#define __NR_lseek              19
-#define __NR_getpid             20
-#define __NR_mount              21
-#define __NR_umount             22
-#define __NR_setuid             23
-#define __NR_getuid             24
-#define __NR_stime              25
-#define __NR_ptrace             26
-#define __NR_alarm              27
-#define __NR_oldfstat           28
-#define __NR_pause              29
-#define __NR_utime              30
-#define __NR_stty               31
-#define __NR_gtty               32
-#define __NR_access             33
-#define __NR_nice               34
-#define __NR_ftime              35
-#define __NR_sync               36
-#define __NR_kill               37
-#define __NR_rename             38
-#define __NR_mkdir              39
-#define __NR_rmdir              40
-#define __NR_dup                41
-#define __NR_pipe               42
-#define __NR_times              43
-#define __NR_prof               44
-#define __NR_brk                45
-#define __NR_setgid             46
-#define __NR_getgid             47
-#define __NR_signal             48
-#define __NR_geteuid            49
-#define __NR_getegid            50
-#define __NR_acct               51
-#define __NR_umount2            52
-#define __NR_lock               53
-#define __NR_ioctl              54
-#define __NR_fcntl              55
-#define __NR_mpx                56
-#define __NR_setpgid            57
-#define __NR_ulimit             58
-#define __NR_oldolduname        59
-#define __NR_umask              60
-#define __NR_chroot             61
-#define __NR_ustat              62
-#define __NR_dup2               63
-#define __NR_getppid            64
-#define __NR_getpgrp            65
-#define __NR_setsid             66
-#define __NR_sigaction          67
-#define __NR_sgetmask           68
-#define __NR_ssetmask           69
-#define __NR_setreuid           70
-#define __NR_setregid           71
-#define __NR_sigsuspend                 72
-#define __NR_sigpending                 73
-#define __NR_sethostname        74
-#define __NR_setrlimit          75
-#define __NR_getrlimit          76
-#define __NR_getrusage          77
-#define __NR_gettimeofday       78
-#define __NR_settimeofday       79
-#define __NR_getgroups          80
-#define __NR_setgroups          81
-#define __NR_select             82
-#define __NR_symlink            83
-#define __NR_oldlstat           84
-#define __NR_readlink           85
-#define __NR_uselib             86
-#define __NR_swapon             87
-#define __NR_reboot             88
-#define __NR_readdir            89
-#define __NR_mmap               90
-#define __NR_munmap             91
-#define __NR_truncate           92
-#define __NR_ftruncate          93
-#define __NR_fchmod             94
-#define __NR_fchown             95
-#define __NR_getpriority        96
-#define __NR_setpriority        97
-#define __NR_profil             98
-#define __NR_statfs             99
-#define __NR_fstatfs           100
-#define __NR_ioperm            101
-#define __NR_socketcall                102
-#define __NR_syslog            103
-#define __NR_setitimer         104
-#define __NR_getitimer         105
-#define __NR_stat              106
-#define __NR_lstat             107
-#define __NR_fstat             108
-#define __NR_olduname          109
-#define __NR_iopl              110
-#define __NR_vhangup           111
-#define __NR_idle              112
-#define __NR_vm86old           113
-#define __NR_wait4             114
-#define __NR_swapoff           115
-#define __NR_sysinfo           116
-#define __NR_ipc               117
-#define __NR_fsync             118
-#define __NR_sigreturn         119
-#define __NR_clone             120
-#define __NR_setdomainname     121
-#define __NR_uname             122
-#define __NR_modify_ldt                123
-#define __NR_adjtimex          124
-#define __NR_mprotect          125
-#define __NR_sigprocmask       126
-#define __NR_create_module     127
-#define __NR_init_module       128
-#define __NR_delete_module     129
-#define __NR_get_kernel_syms   130
-#define __NR_quotactl          131
-#define __NR_getpgid           132
-#define __NR_fchdir            133
-#define __NR_bdflush           134
-#define __NR_sysfs             135
-#define __NR_personality       136
-#define __NR_afs_syscall       137 /* Syscall for Andrew File System */
-#define __NR_setfsuid          138
-#define __NR_setfsgid          139
-#define __NR__llseek           140
-#define __NR_getdents          141
-#define __NR__newselect                142
-#define __NR_flock             143
-#define __NR_msync             144
-#define __NR_readv             145
-#define __NR_writev            146
-#define __NR_getsid            147
-#define __NR_fdatasync         148
-#define __NR__sysctl           149
-#define __NR_mlock             150
-#define __NR_munlock           151
-#define __NR_mlockall          152
-#define __NR_munlockall                153
-#define __NR_sched_setparam            154
-#define __NR_sched_getparam            155
-#define __NR_sched_setscheduler                156
-#define __NR_sched_getscheduler                157
-#define __NR_sched_yield               158
-#define __NR_sched_get_priority_max    159
-#define __NR_sched_get_priority_min    160
-#define __NR_sched_rr_get_interval     161
-#define __NR_nanosleep         162
-#define __NR_mremap            163
-#define __NR_setresuid         164
-#define __NR_getresuid         165
-#define __NR_vm86              166
-#define __NR_query_module      167
-#define __NR_poll              168
-#define __NR_nfsservctl                169
-#define __NR_setresgid         170
-#define __NR_getresgid         171
-#define __NR_prctl             172
-#define __NR_rt_sigreturn      173
-#define __NR_rt_sigaction      174
-#define __NR_rt_sigprocmask    175
-#define __NR_rt_sigpending     176
-#define __NR_rt_sigtimedwait   177
-#define __NR_rt_sigqueueinfo   178
-#define __NR_rt_sigsuspend     179
-#define __NR_pread64           180
-#define __NR_pwrite64          181
-#define __NR_chown             182
-#define __NR_getcwd            183
-#define __NR_capget            184
-#define __NR_capset            185
-#define __NR_sigaltstack       186
-#define __NR_sendfile          187
-#define __NR_getpmsg           188     /* some people actually want streams */
-#define __NR_putpmsg           189     /* some people actually want streams */
-#define __NR_vfork             190
-#define __NR_ugetrlimit                191
-#define __NR_mmap2             192
-#define __NR_truncate64                193
-#define __NR_ftruncate64       194
-#define __NR_stat64            195
-#define __NR_lstat64           196
-#define __NR_fstat64           197
-#define __NR_lchown32          198
-#define __NR_getuid32          199
-#define __NR_getgid32          200
-#define __NR_geteuid32         201
-#define __NR_getegid32         202
-#define __NR_setreuid32                203
-#define __NR_setregid32                204
-#define __NR_getgroups32       205
-#define __NR_setgroups32       206
-#define __NR_fchown32          207
-#define __NR_setresuid32       208
-#define __NR_getresuid32       209
-#define __NR_setresgid32       210
-#define __NR_getresgid32       211
-#define __NR_chown32           212
-#define __NR_setuid32          213
-#define __NR_setgid32          214
-#define __NR_setfsuid32                215
-#define __NR_setfsgid32                216
-#define __NR_pivot_root                217
-#define __NR_mincore           218
-#define __NR_madvise           219
-#define __NR_madvise1          219
-#define __NR_getdents64                220
-#define __NR_fcntl64           221
-/* 223 is unused */
-#define __NR_gettid            224
-#define __NR_readahead         225
-#define __NR_setxattr          226
-#define __NR_lsetxattr         227
-#define __NR_fsetxattr         228
-#define __NR_getxattr          229
-#define __NR_lgetxattr         230
-#define __NR_fgetxattr         231
-#define __NR_listxattr         232
-#define __NR_llistxattr                233
-#define __NR_flistxattr                234
-#define __NR_removexattr       235
-#define __NR_lremovexattr      236
-#define __NR_fremovexattr      237
-#define __NR_tkill             238
-#define __NR_sendfile64                239
-#define __NR_futex             240
-#define __NR_sched_setaffinity 241
-#define __NR_sched_getaffinity 242
-#define __NR_set_thread_area   243
-#define __NR_get_thread_area   244
-#define __NR_io_setup          245
-#define __NR_io_destroy                246
-#define __NR_io_getevents      247
-#define __NR_io_submit         248
-#define __NR_io_cancel         249
-#define __NR_fadvise64         250
-/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */
-#define __NR_exit_group                252
-#define __NR_lookup_dcookie    253
-#define __NR_epoll_create      254
-#define __NR_epoll_ctl         255
-#define __NR_epoll_wait                256
-#define __NR_remap_file_pages  257
-#define __NR_set_tid_address   258
-#define __NR_timer_create      259
-#define __NR_timer_settime     (__NR_timer_create+1)
-#define __NR_timer_gettime     (__NR_timer_create+2)
-#define __NR_timer_getoverrun  (__NR_timer_create+3)
-#define __NR_timer_delete      (__NR_timer_create+4)
-#define __NR_clock_settime     (__NR_timer_create+5)
-#define __NR_clock_gettime     (__NR_timer_create+6)
-#define __NR_clock_getres      (__NR_timer_create+7)
-#define __NR_clock_nanosleep   (__NR_timer_create+8)
-#define __NR_statfs64          268
-#define __NR_fstatfs64         269
-#define __NR_tgkill            270
-#define __NR_utimes            271
-#define __NR_fadvise64_64      272
-#define __NR_vserver           273
-#define __NR_mbind             274
-#define __NR_get_mempolicy     275
-#define __NR_set_mempolicy     276
-#define __NR_mq_open           277
-#define __NR_mq_unlink         (__NR_mq_open+1)
-#define __NR_mq_timedsend      (__NR_mq_open+2)
-#define __NR_mq_timedreceive   (__NR_mq_open+3)
-#define __NR_mq_notify         (__NR_mq_open+4)
-#define __NR_mq_getsetattr     (__NR_mq_open+5)
-#define __NR_kexec_load                283
-#define __NR_waitid            284
-/* #define __NR_sys_setaltroot 285 */
-#define __NR_add_key           286
-#define __NR_request_key       287
-#define __NR_keyctl            288
-#define __NR_ioprio_set                289
-#define __NR_ioprio_get                290
-#define __NR_inotify_init      291
-#define __NR_inotify_add_watch 292
-#define __NR_inotify_rm_watch  293
-#define __NR_migrate_pages     294
-#define __NR_openat            295
-#define __NR_mkdirat           296
-#define __NR_mknodat           297
-#define __NR_fchownat          298
-#define __NR_futimesat         299
-#define __NR_fstatat64         300
-#define __NR_unlinkat          301
-#define __NR_renameat          302
-#define __NR_linkat            303
-#define __NR_symlinkat         304
-#define __NR_readlinkat                305
-#define __NR_fchmodat          306
-#define __NR_faccessat         307
-#define __NR_pselect6          308
-#define __NR_ppoll             309
-#define __NR_unshare           310
-#define __NR_set_robust_list   311
-#define __NR_get_robust_list   312
-#define __NR_splice            313
-#define __NR_sync_file_range   314
-#define __NR_tee               315
-#define __NR_vmsplice          316
-#define __NR_move_pages                317
-#define __NR_getcpu            318
-#define __NR_epoll_pwait       319
-#define __NR_setns             320
-
-#endif /* _UAPI_ASM_H8300_UNISTD_H_ */
diff --git a/arch/h8300/kernel/Makefile b/arch/h8300/kernel/Makefile
deleted file mode 100644 (file)
index 1cc57f8..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-extra-y := vmlinux.lds
-
-obj-y := process.o traps.o ptrace.o irq.o \
-        sys_h8300.o time.o signal.o \
-         setup.o gpio.o syscalls.o \
-        entry.o timer/
-
-obj-$(CONFIG_MODULES) += module.o h8300_ksyms.o 
diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c
deleted file mode 100644 (file)
index fd961e0..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * This program is used to generate definitions needed by
- * assembly language modules.
- *
- * We use the technique used in the OSF Mach kernel code:
- * generate asm statements containing #defines,
- * compile this file to assembler, and then extract the
- * #defines from the assembly-language output.
- */
-
-#include <linux/stddef.h>
-#include <linux/sched.h>
-#include <linux/kernel_stat.h>
-#include <linux/ptrace.h>
-#include <linux/hardirq.h>
-#include <linux/kbuild.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/ptrace.h>
-
-int main(void)
-{
-       /* offsets into the task struct */
-       DEFINE(TASK_STATE, offsetof(struct task_struct, state));
-       DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
-       DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
-       DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
-       DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
-       DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
-       DEFINE(TASK_MM, offsetof(struct task_struct, mm));
-       DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
-
-       /* offsets into the irq_cpustat_t struct */
-       DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
-
-       /* offsets into the thread struct */
-       DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
-       DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
-       DEFINE(THREAD_CCR, offsetof(struct thread_struct, ccr));
-
-       /* offsets into the pt_regs struct */
-       DEFINE(LER0,  offsetof(struct pt_regs, er0)      - sizeof(long));
-       DEFINE(LER1,  offsetof(struct pt_regs, er1)      - sizeof(long));
-       DEFINE(LER2,  offsetof(struct pt_regs, er2)      - sizeof(long));
-       DEFINE(LER3,  offsetof(struct pt_regs, er3)      - sizeof(long));
-       DEFINE(LER4,  offsetof(struct pt_regs, er4)      - sizeof(long));
-       DEFINE(LER5,  offsetof(struct pt_regs, er5)      - sizeof(long));
-       DEFINE(LER6,  offsetof(struct pt_regs, er6)      - sizeof(long));
-       DEFINE(LORIG, offsetof(struct pt_regs, orig_er0) - sizeof(long));
-       DEFINE(LCCR,  offsetof(struct pt_regs, ccr)      - sizeof(long));
-       DEFINE(LVEC,  offsetof(struct pt_regs, vector)   - sizeof(long));
-#if defined(__H8300S__)
-       DEFINE(LEXR,  offsetof(struct pt_regs, exr)      - sizeof(long));
-#endif
-       DEFINE(LRET,  offsetof(struct pt_regs, pc)       - sizeof(long));
-
-       DEFINE(PT_PTRACED, PT_PTRACED);
-
-       return 0;
-}
diff --git a/arch/h8300/kernel/entry.S b/arch/h8300/kernel/entry.S
deleted file mode 100644 (file)
index 94bd30f..0000000
+++ /dev/null
@@ -1,402 +0,0 @@
-/* -*- mode: asm -*-
- *
- *  linux/arch/h8300/platform/h8300h/entry.S
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *  David McCullough <davidm@snapgear.com>
- *
- */
-
-/*
- *  entry.S
- *  include exception/interrupt gateway
- *          system call entry
- */
-
-#include <linux/sys.h>
-#include <asm/unistd.h>
-#include <asm/setup.h>
-#include <asm/segment.h>
-#include <asm/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-#include <asm/errno.h>
-
-#if defined(CONFIG_CPU_H8300H)
-#define USERRET 8
-INTERRUPTS = 64
-       .h8300h
-       .macro  SHLL2 reg
-       shll.l  \reg
-       shll.l  \reg
-       .endm
-       .macro  SHLR2 reg
-       shlr.l  \reg
-       shlr.l  \reg
-       .endm
-       .macro  SAVEREGS
-       mov.l   er0,@-sp
-       mov.l   er1,@-sp
-       mov.l   er2,@-sp
-       mov.l   er3,@-sp
-       .endm
-       .macro  RESTOREREGS
-       mov.l   @sp+,er3
-       mov.l   @sp+,er2
-       .endm
-       .macro  SAVEEXR
-       .endm
-       .macro  RESTOREEXR
-       .endm
-#endif
-#if defined(CONFIG_CPU_H8S)
-#define USERRET 10
-#define USEREXR 8
-INTERRUPTS = 128
-       .h8300s
-       .macro  SHLL2 reg
-       shll.l  #2,\reg
-       .endm
-       .macro  SHLR2 reg
-       shlr.l  #2,\reg
-       .endm
-       .macro  SAVEREGS
-       stm.l   er0-er3,@-sp
-       .endm
-       .macro  RESTOREREGS
-       ldm.l   @sp+,er2-er3
-       .endm
-       .macro  SAVEEXR
-       mov.w   @(USEREXR:16,er0),r1
-       mov.w   r1,@(LEXR-LER3:16,sp)           /* copy EXR */
-       .endm
-       .macro  RESTOREEXR
-       mov.w   @(LEXR-LER1:16,sp),r1           /* restore EXR */
-       mov.b   r1l,r1h
-       mov.w   r1,@(USEREXR:16,er0)
-       .endm
-#endif
-
-
-/* CPU context save/restore macros. */
-
-       .macro  SAVE_ALL
-       mov.l   er0,@-sp
-       stc     ccr,r0l                         /* check kernel mode */
-       btst    #4,r0l
-       bne     5f
-
-       /* user mode */
-       mov.l   sp,@_sw_usp
-       mov.l   @sp,er0                         /* restore saved er0 */
-       orc     #0x10,ccr                       /* switch kernel stack */
-       mov.l   @_sw_ksp,sp
-       sub.l   #(LRET-LORIG),sp                /* allocate LORIG - LRET */
-       SAVEREGS
-       mov.l   @_sw_usp,er0
-       mov.l   @(USERRET:16,er0),er1           /* copy the RET addr */
-       mov.l   er1,@(LRET-LER3:16,sp)
-       SAVEEXR
-
-       mov.l   @(LORIG-LER3:16,sp),er0
-       mov.l   er0,@(LER0-LER3:16,sp)          /* copy ER0 */
-       mov.w   e1,r1                           /* e1 highbyte = ccr */
-       and     #0xef,r1h                       /* mask mode? flag */
-       bra     6f
-5:
-       /* kernel mode */
-       mov.l   @sp,er0                         /* restore saved er0 */
-       subs    #2,sp                           /* set dummy ccr */
-       SAVEREGS
-       mov.w   @(LRET-LER3:16,sp),r1           /* copy old ccr */
-6:
-       mov.b   r1h,r1l
-       mov.b   #0,r1h
-       mov.w   r1,@(LCCR-LER3:16,sp)           /* set ccr */
-       mov.l   er6,@-sp                        /* syscall arg #6 */
-       mov.l   er5,@-sp                        /* syscall arg #5 */
-       mov.l   er4,@-sp                        /* syscall arg #4 */
-       .endm                                   /* r1 = ccr */
-
-       .macro  RESTORE_ALL
-       mov.l   @sp+,er4
-       mov.l   @sp+,er5
-       mov.l   @sp+,er6
-       RESTOREREGS
-       mov.w   @(LCCR-LER1:16,sp),r0           /* check kernel mode */
-       btst    #4,r0l
-       bne     7f
-
-       orc     #0x80,ccr
-       mov.l   @_sw_usp,er0
-       mov.l   @(LER0-LER1:16,sp),er1          /* restore ER0 */
-       mov.l   er1,@er0
-       RESTOREEXR
-       mov.w   @(LCCR-LER1:16,sp),r1           /* restore the RET addr */
-       mov.b   r1l,r1h
-       mov.b   @(LRET+1-LER1:16,sp),r1l
-       mov.w   r1,e1
-       mov.w   @(LRET+2-LER1:16,sp),r1
-       mov.l   er1,@(USERRET:16,er0)
-
-       mov.l   @sp+,er1
-       add.l   #(LRET-LER1),sp                 /* remove LORIG - LRET */
-       mov.l   sp,@_sw_ksp
-       andc    #0xef,ccr                       /* switch to user mode */
-       mov.l   er0,sp
-       bra     8f
-7:
-       mov.l   @sp+,er1
-       adds    #4,sp
-       adds    #2,sp
-8:
-       mov.l   @sp+,er0
-       adds    #4,sp                           /* remove the sw created LVEC */
-       rte
-       .endm
-
-.globl _system_call
-.globl _ret_from_exception
-.globl _ret_from_fork
-.globl _ret_from_kernel_thread
-.globl _ret_from_interrupt
-.globl _interrupt_redirect_table
-.globl _sw_ksp,_sw_usp
-.globl _resume
-.globl _interrupt_entry
-.globl _trace_break
-
-#if defined(CONFIG_ROMKERNEL)
-       .section .int_redirect,"ax"
-_interrupt_redirect_table:
-#if defined(CONFIG_CPU_H8300H)
-       .rept   7
-       .long   0
-       .endr
-#endif
-#if defined(CONFIG_CPU_H8S)
-       .rept   5
-       .long   0
-       .endr
-       jmp     @_trace_break
-       .long   0
-#endif
-
-       jsr     @_interrupt_entry               /* NMI */
-       jmp     @_system_call                   /* TRAPA #0 (System call) */
-       .long   0
-       .long   0
-       jmp     @_trace_break                   /* TRAPA #3 (breakpoint) */
-       .rept   INTERRUPTS-12
-       jsr     @_interrupt_entry
-       .endr
-#endif
-#if defined(CONFIG_RAMKERNEL)
-.globl _interrupt_redirect_table
-       .section .bss
-_interrupt_redirect_table:
-       .space  4
-#endif
-
-       .section .text
-       .align  2
-_interrupt_entry:
-       SAVE_ALL
-       mov.l   sp,er0
-       add.l   #LVEC,er0
-       btst    #4,r1l
-       bne     1f
-       /* user LVEC */
-       mov.l   @_sw_usp,er0
-       adds    #4,er0
-1:
-       mov.l   @er0,er0                        /* LVEC address */
-#if defined(CONFIG_ROMKERNEL)
-       sub.l   #_interrupt_redirect_table,er0
-#endif
-#if defined(CONFIG_RAMKERNEL)
-       mov.l   @_interrupt_redirect_table,er1
-       sub.l   er1,er0
-#endif
-       SHLR2   er0
-       dec.l   #1,er0
-       mov.l   sp,er1
-       subs    #4,er1                          /* adjust ret_pc */
-       jsr     @_do_IRQ
-       jmp     @_ret_from_interrupt
-
-_system_call:
-       subs    #4,sp                           /* dummy LVEC */
-       SAVE_ALL
-       andc    #0x7f,ccr
-       mov.l   er0,er4
-
-       /* save top of frame */
-       mov.l   sp,er0
-       jsr     @_set_esp0
-       mov.l   sp,er2
-       and.w   #0xe000,r2
-       mov.b   @((TI_FLAGS+3-(TIF_SYSCALL_TRACE >> 3)):16,er2),r2l
-       btst    #(TIF_SYSCALL_TRACE & 7),r2l
-       beq     1f
-       jsr     @_do_syscall_trace
-1:
-       cmp.l   #NR_syscalls,er4
-       bcc     badsys
-       SHLL2   er4
-       mov.l   #_sys_call_table,er0
-       add.l   er4,er0
-       mov.l   @er0,er4
-       beq     _ret_from_exception:16
-       mov.l   @(LER1:16,sp),er0
-       mov.l   @(LER2:16,sp),er1
-       mov.l   @(LER3:16,sp),er2
-       jsr     @er4
-       mov.l   er0,@(LER0:16,sp)               /* save the return value */
-       mov.l   sp,er2
-       and.w   #0xe000,r2
-       mov.b   @((TI_FLAGS+3-(TIF_SYSCALL_TRACE >> 3)):16,er2),r2l
-       btst    #(TIF_SYSCALL_TRACE & 7),r2l
-       beq     2f
-       jsr     @_do_syscall_trace
-2:
-#if defined(CONFIG_SYSCALL_PRINT)
-       jsr     @_syscall_print
-#endif
-       orc     #0x80,ccr
-       bra     resume_userspace
-
-badsys:
-       mov.l   #-ENOSYS,er0
-       mov.l   er0,@(LER0:16,sp)
-       bra     resume_userspace
-
-#if !defined(CONFIG_PREEMPT)
-#define resume_kernel restore_all
-#endif
-
-_ret_from_exception:
-#if defined(CONFIG_PREEMPT)
-       orc     #0x80,ccr
-#endif
-_ret_from_interrupt:
-       mov.b   @(LCCR+1:16,sp),r0l
-       btst    #4,r0l
-       bne     resume_kernel:8         /* return from kernel */
-resume_userspace:
-       andc    #0x7f,ccr
-       mov.l   sp,er4
-       and.w   #0xe000,r4              /* er4 <- current thread info */
-       mov.l   @(TI_FLAGS:16,er4),er1
-       and.l   #_TIF_WORK_MASK,er1
-       beq     restore_all:8
-work_pending:
-       btst    #TIF_NEED_RESCHED,r1l
-       bne     work_resched:8
-       /* work notifysig */
-       mov.l   sp,er0
-       subs    #4,er0                  /* er0: pt_regs */
-       jsr     @_do_notify_resume
-       bra     restore_all:8
-work_resched:
-       mov.l   sp,er0
-       jsr     @_set_esp0
-       jsr     @_schedule
-       bra     resume_userspace:8
-restore_all:
-       RESTORE_ALL                     /* Does RTE */
-
-#if defined(CONFIG_PREEMPT)
-resume_kernel:
-       mov.l   @(TI_PRE_COUNT:16,er4),er0
-       bne     restore_all:8
-need_resched:
-       mov.l   @(TI_FLAGS:16,er4),er0
-       btst    #TIF_NEED_RESCHED,r0l
-       beq     restore_all:8
-       mov.b   @(LCCR+1:16,sp),r0l     /* Interrupt Enabled? */
-       bmi     restore_all:8
-       mov.l   #PREEMPT_ACTIVE,er0
-       mov.l   er0,@(TI_PRE_COUNT:16,er4)
-       andc    #0x7f,ccr
-       mov.l   sp,er0
-       jsr     @_set_esp0
-       jsr     @_schedule
-       orc     #0x80,ccr
-       bra     need_resched:8
-#endif
-
-_ret_from_fork:
-       mov.l   er2,er0
-       jsr     @_schedule_tail
-       jmp     @_ret_from_exception
-
-_ret_from_kernel_thread:
-       mov.l   er2,er0
-       jsr     @_schedule_tail
-       mov.l   @(LER4:16,sp),er0
-       mov.l   @(LER5:16,sp),er1
-       jsr     @er1
-       jmp     @_ret_from_exception
-
-_resume:
-       /*
-        * Beware - when entering resume, offset of tss is in d1,
-        * prev (the current task) is in a0, next (the new task)
-        * is in a1 and d2.b is non-zero if the mm structure is
-        * shared between the tasks, so don't change these
-        * registers until their contents are no longer needed.
-        */
-
-       /* save sr */
-       sub.w   r3,r3
-       stc     ccr,r3l
-       mov.w   r3,@(THREAD_CCR+2:16,er0)
-
-       /* disable interrupts */
-       orc     #0x80,ccr
-       mov.l   @_sw_usp,er3
-       mov.l   er3,@(THREAD_USP:16,er0)
-       mov.l   sp,@(THREAD_KSP:16,er0)
-
-       /* Skip address space switching if they are the same. */
-       /* FIXME: what did we hack out of here, this does nothing! */
-
-       mov.l   @(THREAD_USP:16,er1),er0
-       mov.l   er0,@_sw_usp
-       mov.l   @(THREAD_KSP:16,er1),sp
-
-       /* restore status register */
-       mov.w   @(THREAD_CCR+2:16,er1),r3
-
-       ldc     r3l,ccr
-       rts
-
-_trace_break:
-       subs    #4,sp
-       SAVE_ALL
-       sub.l   er1,er1
-       dec.l   #1,er1
-       mov.l   er1,@(LORIG,sp)
-       mov.l   sp,er0
-       jsr     @_set_esp0
-       mov.l   @_sw_usp,er0
-       mov.l   @er0,er1
-       mov.w   @(-2:16,er1),r2
-       cmp.w   #0x5730,r2
-       beq     1f
-       subs    #2,er1
-       mov.l   er1,@er0
-1:
-       and.w   #0xff,e1
-       mov.l   er1,er0
-       jsr     @_trace_trap
-       jmp     @_ret_from_exception
-
-       .section        .bss
-_sw_ksp:
-       .space  4
-_sw_usp:
-       .space  4
-
-       .end
diff --git a/arch/h8300/kernel/gpio.c b/arch/h8300/kernel/gpio.c
deleted file mode 100644 (file)
index 084bfd0..0000000
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- *  linux/arch/h8300/kernel/gpio.c
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- */
-
-/*
- * Internal I/O Port Management
- */
-
-#include <linux/stddef.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/fs.h>
-#include <linux/init.h>
-
-#define _(addr) (volatile unsigned char *)(addr)
-#if defined(CONFIG_H83007) || defined(CONFIG_H83068)
-#include <asm/regs306x.h>
-static volatile unsigned char *ddrs[] = {
-       _(P1DDR),_(P2DDR),_(P3DDR),_(P4DDR),_(P5DDR),_(P6DDR),
-       NULL,    _(P8DDR),_(P9DDR),_(PADDR),_(PBDDR),
-};
-#define MAX_PORT 11
-#endif
-
- #if defined(CONFIG_H83002) || defined(CONFIG_H8048)
-/* Fix me!! */
-#include <asm/regs306x.h>
-static volatile unsigned char *ddrs[] = {
-       _(P1DDR),_(P2DDR),_(P3DDR),_(P4DDR),_(P5DDR),_(P6DDR),
-       NULL,    _(P8DDR),_(P9DDR),_(PADDR),_(PBDDR),
-};
-#define MAX_PORT 11
-#endif
-
-#if defined(CONFIG_H8S2678)
-#include <asm/regs267x.h>
-static volatile unsigned char *ddrs[] = {
-       _(P1DDR),_(P2DDR),_(P3DDR),NULL    ,_(P5DDR),_(P6DDR),
-       _(P7DDR),_(P8DDR),NULL,    _(PADDR),_(PBDDR),_(PCDDR),
-       _(PDDDR),_(PEDDR),_(PFDDR),_(PGDDR),_(PHDDR),
-       _(PADDR),_(PBDDR),_(PCDDR),_(PDDDR),_(PEDDR),_(PFDDR),
-       _(PGDDR),_(PHDDR)
-};
-#define MAX_PORT 17
-#endif
-#undef _
-#if !defined(P1DDR)
-#error Unsuppoted CPU Selection
-#endif
-
-static struct {
-       unsigned char used;
-       unsigned char ddr;
-} gpio_regs[MAX_PORT];
-
-extern char *_platform_gpio_table(int length);
-
-int h8300_reserved_gpio(int port, unsigned int bits)
-{
-       unsigned char *used;
-
-       if (port < 0 || port >= MAX_PORT)
-               return -1;
-       used = &(gpio_regs[port].used);
-       if ((*used & bits) != 0)
-               return 0;
-       *used |= bits;
-       return 1;
-}
-
-int h8300_free_gpio(int port, unsigned int bits)
-{
-       unsigned char *used;
-
-       if (port < 0 || port >= MAX_PORT)
-               return -1;
-       used = &(gpio_regs[port].used);
-       if ((*used & bits) != bits)
-               return 0;
-       *used &= (~bits);
-       return 1;
-}
-
-int h8300_set_gpio_dir(int port_bit,int dir)
-{
-       int port = (port_bit >> 8) & 0xff;
-       int bit  = port_bit & 0xff;
-
-       if (ddrs[port] == NULL)
-               return 0;
-       if (gpio_regs[port].used & bit) {
-               if (dir)
-                       gpio_regs[port].ddr |= bit;
-               else
-                       gpio_regs[port].ddr &= ~bit;
-               *ddrs[port] = gpio_regs[port].ddr;
-               return 1;
-       } else
-               return 0;
-}
-
-int h8300_get_gpio_dir(int port_bit)
-{
-       int port = (port_bit >> 8) & 0xff;
-       int bit  = port_bit & 0xff;
-
-       if (ddrs[port] == NULL)
-               return 0;
-       if (gpio_regs[port].used & bit) {
-               return (gpio_regs[port].ddr & bit) != 0;
-       } else
-               return -1;
-}
-
-#if defined(CONFIG_PROC_FS)
-static char *port_status(int portno)
-{
-       static char result[10];
-       static const char io[2]={'I','O'};
-       char *rp;
-       int c;
-       unsigned char used,ddr;
-       
-       used = gpio_regs[portno].used;
-       ddr  = gpio_regs[portno].ddr;
-       result[8]='\0';
-       rp = result + 7;
-       for (c = 8; c > 0; c--,rp--,used >>= 1, ddr >>= 1)
-               if (used & 0x01)
-                       *rp = io[ ddr & 0x01];
-               else    
-                       *rp = '-';
-       return result;
-}
-
-static int gpio_proc_show(struct seq_file *m, void *v)
-{
-       static const char port_name[]="123456789ABCDEFGH";
-       int c;
-
-       for (c = 0; c < MAX_PORT; c++) {
-               if (ddrs[c] == NULL)
-                       continue;
-               seq_printf(m, "P%c: %s\n", port_name[c], port_status(c));
-       }
-       return 0;
-}
-
-static int gpio_proc_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, gpio_proc_show, PDE_DATA(inode));
-}
-
-static const struct file_operations gpio_proc_fops = {
-       .open           = gpio_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static __init int register_proc(void)
-{
-       return proc_create("gpio", S_IRUGO, NULL, &gpio_proc_fops) != NULL;
-}
-
-__initcall(register_proc);
-#endif
-
-void __init h8300_gpio_init(void)
-{
-       memcpy(gpio_regs,_platform_gpio_table(sizeof(gpio_regs)),sizeof(gpio_regs));
-}
diff --git a/arch/h8300/kernel/h8300_ksyms.c b/arch/h8300/kernel/h8300_ksyms.c
deleted file mode 100644 (file)
index 53d7c0e..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-#include <linux/module.h>
-#include <linux/linkage.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/user.h>
-#include <linux/elfcore.h>
-#include <linux/in6.h>
-#include <linux/interrupt.h>
-
-#include <asm/setup.h>
-#include <asm/pgalloc.h>
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/checksum.h>
-#include <asm/current.h>
-#include <asm/gpio.h>
-
-//asmlinkage long long __ashrdi3 (long long, int);
-//asmlinkage long long __lshrdi3 (long long, int);
-extern char h8300_debug_device[];
-
-/* platform dependent support */
-
-EXPORT_SYMBOL(strnlen);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(strstr);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strncmp);
-
-EXPORT_SYMBOL(ip_fast_csum);
-
-EXPORT_SYMBOL(enable_irq);
-EXPORT_SYMBOL(disable_irq);
-
-/* Networking helper routines. */
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-
-/* The following are special because they're not called
-   explicitly (the C compiler generates them).  Fortunately,
-   their interface isn't gonna change any time soon now, so
-   it's OK to leave it out of version control.  */
-//EXPORT_SYMBOL(__ashrdi3);
-//EXPORT_SYMBOL(__lshrdi3);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memscan);
-EXPORT_SYMBOL(memmove);
-
-/*
- * libgcc functions - functions that are used internally by the
- * compiler...  (prototypes are not correct though, but that
- * doesn't really matter since they're not versioned).
- */
-extern void __gcc_bcmp(void);
-extern void __ashldi3(void);
-extern void __ashrdi3(void);
-extern void __cmpdi2(void);
-extern void __divdi3(void);
-extern void __divsi3(void);
-extern void __lshrdi3(void);
-extern void __moddi3(void);
-extern void __modsi3(void);
-extern void __muldi3(void);
-extern void __mulsi3(void);
-extern void __negdi2(void);
-extern void __ucmpdi2(void);
-extern void __udivdi3(void);
-extern void __udivmoddi4(void);
-extern void __udivsi3(void);
-extern void __umoddi3(void);
-extern void __umodsi3(void);
-
-        /* gcc lib functions */
-EXPORT_SYMBOL(__gcc_bcmp);
-EXPORT_SYMBOL(__ashldi3);
-EXPORT_SYMBOL(__ashrdi3);
-EXPORT_SYMBOL(__cmpdi2);
-EXPORT_SYMBOL(__divdi3);
-EXPORT_SYMBOL(__divsi3);
-EXPORT_SYMBOL(__lshrdi3);
-EXPORT_SYMBOL(__moddi3);
-EXPORT_SYMBOL(__modsi3);
-EXPORT_SYMBOL(__muldi3);
-EXPORT_SYMBOL(__mulsi3);
-EXPORT_SYMBOL(__negdi2);
-EXPORT_SYMBOL(__ucmpdi2);
-EXPORT_SYMBOL(__udivdi3);
-EXPORT_SYMBOL(__udivmoddi4);
-EXPORT_SYMBOL(__udivsi3);
-EXPORT_SYMBOL(__umoddi3);
-EXPORT_SYMBOL(__umodsi3);
-
-EXPORT_SYMBOL(h8300_reserved_gpio);
-EXPORT_SYMBOL(h8300_free_gpio);
-EXPORT_SYMBOL(h8300_set_gpio_dir);
diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c
deleted file mode 100644 (file)
index 2fa8ac7..0000000
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * linux/arch/h8300/kernel/irq.c
- *
- * Copyright 2007 Yoshinori Sato <ysato@users.sourceforge.jp>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/kernel_stat.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/random.h>
-#include <linux/bootmem.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-
-#include <asm/traps.h>
-#include <asm/io.h>
-#include <asm/setup.h>
-#include <asm/errno.h>
-
-/*#define DEBUG*/
-
-extern unsigned long *interrupt_redirect_table;
-extern const int h8300_saved_vectors[];
-extern const h8300_vector h8300_trap_table[];
-int h8300_enable_irq_pin(unsigned int irq);
-void h8300_disable_irq_pin(unsigned int irq);
-
-#define CPU_VECTOR ((unsigned long *)0x000000)
-#define ADDR_MASK (0xffffff)
-
-static inline int is_ext_irq(unsigned int irq)
-{
-       return (irq >= EXT_IRQ0 && irq <= (EXT_IRQ0 + EXT_IRQS));
-}
-
-static void h8300_enable_irq(struct irq_data *data)
-{
-       if (is_ext_irq(data->irq))
-               IER_REGS |= 1 << (data->irq - EXT_IRQ0);
-}
-
-static void h8300_disable_irq(struct irq_data *data)
-{
-       if (is_ext_irq(data->irq))
-               IER_REGS &= ~(1 << (data->irq - EXT_IRQ0));
-}
-
-static unsigned int h8300_startup_irq(struct irq_data *data)
-{
-       if (is_ext_irq(data->irq))
-               return h8300_enable_irq_pin(data->irq);
-       else
-               return 0;
-}
-
-static void h8300_shutdown_irq(struct irq_data *data)
-{
-       if (is_ext_irq(data->irq))
-               h8300_disable_irq_pin(data->irq);
-}
-
-/*
- * h8300 interrupt controller implementation
- */
-struct irq_chip h8300irq_chip = {
-       .name           = "H8300-INTC",
-       .irq_startup    = h8300_startup_irq,
-       .irq_shutdown   = h8300_shutdown_irq,
-       .irq_enable     = h8300_enable_irq,
-       .irq_disable    = h8300_disable_irq,
-};
-
-#if defined(CONFIG_RAMKERNEL)
-static unsigned long __init *get_vector_address(void)
-{
-       unsigned long *rom_vector = CPU_VECTOR;
-       unsigned long base,tmp;
-       int vec_no;
-
-       base = rom_vector[EXT_IRQ0] & ADDR_MASK;
-
-       /* check romvector format */
-       for (vec_no = EXT_IRQ1; vec_no <= EXT_IRQ0+EXT_IRQS; vec_no++) {
-               if ((base+(vec_no - EXT_IRQ0)*4) != (rom_vector[vec_no] & ADDR_MASK))
-                       return NULL;
-       }
-
-       /* ramvector base address */
-       base -= EXT_IRQ0*4;
-
-       /* writerble check */
-       tmp = ~(*(volatile unsigned long *)base);
-       (*(volatile unsigned long *)base) = tmp;
-       if ((*(volatile unsigned long *)base) != tmp)
-               return NULL;
-       return (unsigned long *)base;
-}
-
-static void __init setup_vector(void)
-{
-       int i;
-       unsigned long *ramvec,*ramvec_p;
-       const h8300_vector *trap_entry;
-       const int *saved_vector;
-
-       ramvec = get_vector_address();
-       if (ramvec == NULL)
-               panic("interrupt vector serup failed.");
-       else
-               printk(KERN_INFO "virtual vector at 0x%08lx\n",(unsigned long)ramvec);
-
-       /* create redirect table */
-       ramvec_p = ramvec;
-       trap_entry = h8300_trap_table;
-       saved_vector = h8300_saved_vectors;
-       for ( i = 0; i < NR_IRQS; i++) {
-               if (i == *saved_vector) {
-                       ramvec_p++;
-                       saved_vector++;
-               } else {
-                       if ( i < NR_TRAPS ) {
-                               if (*trap_entry)
-                                       *ramvec_p = VECTOR(*trap_entry);
-                               ramvec_p++;
-                               trap_entry++;
-                       } else
-                               *ramvec_p++ = REDIRECT(interrupt_entry);
-               }
-       }
-       interrupt_redirect_table = ramvec;
-#ifdef DEBUG
-       ramvec_p = ramvec;
-       for (i = 0; i < NR_IRQS; i++) {
-               if ((i % 8) == 0)
-                       printk(KERN_DEBUG "\n%p: ",ramvec_p);
-               printk(KERN_DEBUG "%p ",*ramvec_p);
-               ramvec_p++;
-       }
-       printk(KERN_DEBUG "\n");
-#endif
-}
-#else
-#define setup_vector() do { } while(0)
-#endif
-
-void __init init_IRQ(void)
-{
-       int c;
-
-       setup_vector();
-
-       for (c = 0; c < NR_IRQS; c++)
-               irq_set_chip_and_handler(c, &h8300irq_chip, handle_simple_irq);
-}
-
-asmlinkage void do_IRQ(int irq)
-{
-       irq_enter();
-       generic_handle_irq(irq);
-       irq_exit();
-}
diff --git a/arch/h8300/kernel/module.c b/arch/h8300/kernel/module.c
deleted file mode 100644 (file)
index 1d526e0..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-#include <linux/moduleloader.h>
-#include <linux/elf.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(fmt...)
-#endif
-
-int apply_relocate_add(Elf32_Shdr *sechdrs,
-                      const char *strtab,
-                      unsigned int symindex,
-                      unsigned int relsec,
-                      struct module *me)
-{
-       unsigned int i;
-       Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
-
-       DEBUGP("Applying relocate section %u to %u\n", relsec,
-              sechdrs[relsec].sh_info);
-       for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
-               /* This is where to make the change */
-               uint32_t *loc = (uint32_t *)(sechdrs[sechdrs[relsec].sh_info].sh_addr
-                                            + rela[i].r_offset);
-               /* This is the symbol it is referring to.  Note that all
-                  undefined symbols have been resolved.  */
-               Elf32_Sym *sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
-                       + ELF32_R_SYM(rela[i].r_info);
-               uint32_t v = sym->st_value + rela[i].r_addend;
-
-               switch (ELF32_R_TYPE(rela[i].r_info)) {
-               case R_H8_DIR24R8:
-                       loc = (uint32_t *)((uint32_t)loc - 1);
-                       *loc = (*loc & 0xff000000) | ((*loc & 0xffffff) + v);
-                       break;
-               case R_H8_DIR24A8:
-                       if (ELF32_R_SYM(rela[i].r_info))
-                               *loc += v;
-                       break;
-               case R_H8_DIR32:
-               case R_H8_DIR32A16:
-                       *loc += v;
-                       break;
-               case R_H8_PCREL16:
-                       v -= (unsigned long)loc + 2;
-                       if ((Elf32_Sword)v > 0x7fff || 
-                           (Elf32_Sword)v < -(Elf32_Sword)0x8000)
-                               goto overflow;
-                       else 
-                               *(unsigned short *)loc = v;
-                       break;
-               case R_H8_PCREL8:
-                       v -= (unsigned long)loc + 1;
-                       if ((Elf32_Sword)v > 0x7f || 
-                           (Elf32_Sword)v < -(Elf32_Sword)0x80)
-                               goto overflow;
-                       else 
-                               *(unsigned char *)loc = v;
-                       break;
-               default:
-                       printk(KERN_ERR "module %s: Unknown relocation: %u\n",
-                              me->name, ELF32_R_TYPE(rela[i].r_info));
-                       return -ENOEXEC;
-               }
-       }
-       return 0;
- overflow:
-       printk(KERN_ERR "module %s: relocation offset overflow: %08x\n",
-              me->name, rela[i].r_offset);
-       return -ENOEXEC;
-}
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
deleted file mode 100644 (file)
index 1a744ab..0000000
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- *  linux/arch/h8300/kernel/process.c
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Based on:
- *
- *  linux/arch/m68knommu/kernel/process.c
- *
- *  Copyright (C) 1998  D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>,
- *                      Kenneth Albanowski <kjahds@kjahds.com>,
- *                      The Silver Hammer Group, Ltd.
- *
- *  linux/arch/m68k/kernel/process.c
- *
- *  Copyright (C) 1995  Hamish Macdonald
- *
- *  68060 fixes by Jesper Skov
- */
-
-/*
- * This file handles the architecture-dependent parts of process handling..
- */
-
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/ptrace.h>
-#include <linux/user.h>
-#include <linux/interrupt.h>
-#include <linux/reboot.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/rcupdate.h>
-
-#include <asm/uaccess.h>
-#include <asm/traps.h>
-#include <asm/setup.h>
-#include <asm/pgtable.h>
-
-void (*pm_power_off)(void) = NULL;
-EXPORT_SYMBOL(pm_power_off);
-
-asmlinkage void ret_from_fork(void);
-asmlinkage void ret_from_kernel_thread(void);
-
-/*
- * The idle loop on an H8/300..
- */
-#if !defined(CONFIG_H8300H_SIM) && !defined(CONFIG_H8S_SIM)
-void arch_cpu_idle(void)
-{
-       local_irq_enable();
-       /* XXX: race here! What if need_resched() gets set now? */
-       __asm__("sleep");
-}
-#endif
-
-void machine_restart(char * __unused)
-{
-       local_irq_disable();
-       __asm__("jmp @@0"); 
-}
-
-void machine_halt(void)
-{
-       local_irq_disable();
-       __asm__("sleep");
-       for (;;);
-}
-
-void machine_power_off(void)
-{
-       local_irq_disable();
-       __asm__("sleep");
-       for (;;);
-}
-
-void show_regs(struct pt_regs * regs)
-{
-       show_regs_print_info(KERN_DEFAULT);
-
-       printk("\nPC: %08lx  Status: %02x",
-              regs->pc, regs->ccr);
-       printk("\nORIG_ER0: %08lx ER0: %08lx ER1: %08lx",
-              regs->orig_er0, regs->er0, regs->er1);
-       printk("\nER2: %08lx ER3: %08lx ER4: %08lx ER5: %08lx",
-              regs->er2, regs->er3, regs->er4, regs->er5);
-       printk("\nER6' %08lx ",regs->er6);
-       if (user_mode(regs))
-               printk("USP: %08lx\n", rdusp());
-       else
-               printk("\n");
-}
-
-void flush_thread(void)
-{
-}
-
-int copy_thread(unsigned long clone_flags,
-                unsigned long usp, unsigned long topstk,
-                struct task_struct * p)
-{
-       struct pt_regs * childregs;
-
-       childregs = (struct pt_regs *) (THREAD_SIZE + task_stack_page(p)) - 1;
-
-       if (unlikely(p->flags & PF_KTHREAD)) {
-               memset(childregs, 0, sizeof(struct pt_regs));
-               childregs->retpc = (unsigned long) ret_from_kernel_thread;
-               childregs->er4 = topstk; /* arg */
-               childregs->er5 = usp; /* fn */
-               p->thread.ksp = (unsigned long)childregs;
-       }
-       *childregs = *current_pt_regs();
-       childregs->retpc = (unsigned long) ret_from_fork;
-       childregs->er0 = 0;
-       p->thread.usp = usp ?: rdusp();
-       p->thread.ksp = (unsigned long)childregs;
-
-       return 0;
-}
-
-unsigned long thread_saved_pc(struct task_struct *tsk)
-{
-       return ((struct pt_regs *)tsk->thread.esp0)->pc;
-}
-
-unsigned long get_wchan(struct task_struct *p)
-{
-       unsigned long fp, pc;
-       unsigned long stack_page;
-       int count = 0;
-       if (!p || p == current || p->state == TASK_RUNNING)
-               return 0;
-
-       stack_page = (unsigned long)p;
-       fp = ((struct pt_regs *)p->thread.ksp)->er6;
-       do {
-               if (fp < stack_page+sizeof(struct thread_info) ||
-                   fp >= 8184+stack_page)
-                       return 0;
-               pc = ((unsigned long *)fp)[1];
-               if (!in_sched_functions(pc))
-                       return pc;
-               fp = *(unsigned long *) fp;
-       } while (count++ < 16);
-       return 0;
-}
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
deleted file mode 100644 (file)
index 748cf65..0000000
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- *  linux/arch/h8300/kernel/ptrace.c
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Based on:
- *  linux/arch/m68k/kernel/ptrace.c
- *
- *  Copyright (C) 1994 by Hamish Macdonald
- *  Taken from linux/kernel/ptrace.c and modified for M680x0.
- *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License.  See the file COPYING in the main directory of
- * this archive for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <linux/ptrace.h>
-#include <linux/user.h>
-#include <linux/signal.h>
-
-#include <asm/uaccess.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/signal.h>
-
-/* cpu depend functions */
-extern long h8300_get_reg(struct task_struct *task, int regno);
-extern int  h8300_put_reg(struct task_struct *task, int regno, unsigned long data);
-
-
-void user_disable_single_step(struct task_struct *child)
-{
-}
-
-/*
- * does not yet catch signals sent when the child dies.
- * in exit.c or in signal.c.
- */
-
-void ptrace_disable(struct task_struct *child)
-{
-       user_disable_single_step(child);
-}
-
-long arch_ptrace(struct task_struct *child, long request,
-                unsigned long addr, unsigned long data)
-{
-       int ret;
-       int regno = addr >> 2;
-       unsigned long __user *datap = (unsigned long __user *) data;
-
-       switch (request) {
-       /* read the word at location addr in the USER area. */
-               case PTRACE_PEEKUSR: {
-                       unsigned long tmp = 0;
-                       
-                       if ((addr & 3) || addr >= sizeof(struct user)) {
-                               ret = -EIO;
-                               break ;
-                       }
-                       
-                       ret = 0;  /* Default return condition */
-
-                       if (regno < H8300_REGS_NO)
-                               tmp = h8300_get_reg(child, regno);
-                       else {
-                               switch (regno) {
-                               case 49:
-                                       tmp = child->mm->start_code;
-                                       break ;
-                               case 50:
-                                       tmp = child->mm->start_data;
-                                       break ;
-                               case 51:
-                                       tmp = child->mm->end_code;
-                                       break ;
-                               case 52:
-                                       tmp = child->mm->end_data;
-                                       break ;
-                               default:
-                                       ret = -EIO;
-                               }
-                       }
-                       if (!ret)
-                               ret = put_user(tmp, datap);
-                       break ;
-               }
-
-      /* when I and D space are separate, this will have to be fixed. */
-               case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
-                       if ((addr & 3) || addr >= sizeof(struct user)) {
-                               ret = -EIO;
-                               break ;
-                       }
-                           
-                       if (regno == PT_ORIG_ER0) {
-                               ret = -EIO;
-                               break ;
-                       }
-                       if (regno < H8300_REGS_NO) {
-                               ret = h8300_put_reg(child, regno, data);
-                               break ;
-                       }
-                       ret = -EIO;
-                       break ;
-
-               case PTRACE_GETREGS: { /* Get all gp regs from the child. */
-                       int i;
-                       unsigned long tmp;
-                       for (i = 0; i < H8300_REGS_NO; i++) {
-                           tmp = h8300_get_reg(child, i);
-                           if (put_user(tmp, datap)) {
-                               ret = -EFAULT;
-                               break;
-                           }
-                           datap++;
-                       }
-                       ret = 0;
-                       break;
-               }
-
-               case PTRACE_SETREGS: { /* Set all gp regs in the child. */
-                       int i;
-                       unsigned long tmp;
-                       for (i = 0; i < H8300_REGS_NO; i++) {
-                           if (get_user(tmp, datap)) {
-                               ret = -EFAULT;
-                               break;
-                           }
-                           h8300_put_reg(child, i, tmp);
-                           datap++;
-                       }
-                       ret = 0;
-                       break;
-               }
-
-               default:
-                       ret = ptrace_request(child, request, addr, data);
-                       break;
-       }
-       return ret;
-}
-
-asmlinkage void do_syscall_trace(void)
-{
-       if (!test_thread_flag(TIF_SYSCALL_TRACE))
-               return;
-       if (!(current->ptrace & PT_PTRACED))
-               return;
-       ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
-                                ? 0x80 : 0));
-       /*
-        * this isn't the same as continuing with a signal, but it will do
-        * for normal use.  strace only continues with a signal if the
-        * stopping signal is not SIGTRAP.  -brl
-        */
-       if (current->exit_code) {
-               send_sig(current->exit_code, current, 1);
-               current->exit_code = 0;
-       }
-}
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
deleted file mode 100644 (file)
index d0b1607..0000000
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- *  linux/arch/h8300/kernel/setup.c
- *
- *  Copyleft  ()) 2000       James D. Schettine {james@telos-systems.com}
- *  Copyright (C) 1999,2000  Greg Ungerer (gerg@snapgear.com)
- *  Copyright (C) 1998,1999  D. Jeff Dionne <jeff@lineo.ca>
- *  Copyright (C) 1998       Kenneth Albanowski <kjahds@kjahds.com>
- *  Copyright (C) 1995       Hamish Macdonald
- *  Copyright (C) 2000       Lineo Inc. (www.lineo.com) 
- *  Copyright (C) 2001              Lineo, Inc. <www.lineo.com>
- *
- *  H8/300 porting Yoshinori Sato <ysato@users.sourceforge.jp>
- */
-
-/*
- * This file handles the architecture-dependent parts of system setup
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/fb.h>
-#include <linux/console.h>
-#include <linux/genhd.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/major.h>
-#include <linux/bootmem.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-
-#include <asm/setup.h>
-#include <asm/irq.h>
-#include <asm/pgtable.h>
-#include <asm/sections.h>
-
-#if defined(__H8300H__)
-#define CPU "H8/300H"
-#include <asm/regs306x.h>
-#endif
-
-#if defined(__H8300S__)
-#define CPU "H8S"
-#include <asm/regs267x.h>
-#endif
-
-#define STUBSIZE 0xc000
-
-unsigned long rom_length;
-unsigned long memory_start;
-unsigned long memory_end;
-
-char __initdata command_line[COMMAND_LINE_SIZE];
-
-extern int _ramstart, _ramend;
-extern char _target_name[];
-extern void h8300_gpio_init(void);
-
-#if (defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)) \
-    && defined(CONFIG_GDB_MAGICPRINT)
-/* printk with gdb service */
-static void gdb_console_output(struct console *c, const char *msg, unsigned len)
-{
-       for (; len > 0; len--) {
-               asm("mov.w %0,r2\n\t"
-                    "jsr @0xc4"::"r"(*msg++):"er2");
-       }
-}
-
-/*
- *     Setup initial baud/bits/parity. We do two things here:
- *     - construct a cflag setting for the first rs_open()
- *     - initialize the serial port
- *     Return non-zero if we didn't find a serial port.
- */
-static int __init gdb_console_setup(struct console *co, char *options)
-{
-       return 0;
-}
-
-static const struct console gdb_console = {
-       .name           = "gdb_con",
-       .write          = gdb_console_output,
-       .device         = NULL,
-       .setup          = gdb_console_setup,
-       .flags          = CON_PRINTBUFFER,
-       .index          = -1,
-};
-#endif
-
-void __init setup_arch(char **cmdline_p)
-{
-       int bootmap_size;
-
-       memory_start = (unsigned long) &_ramstart;
-
-       /* allow for ROMFS on the end of the kernel */
-       if (memcmp((void *)memory_start, "-rom1fs-", 8) == 0) {
-#if defined(CONFIG_BLK_DEV_INITRD)
-               initrd_start = memory_start;
-               initrd_end = memory_start += be32_to_cpu(((unsigned long *) (memory_start))[2]);
-#else
-               memory_start += be32_to_cpu(((unsigned long *) memory_start)[2]);
-#endif
-       }
-       memory_start = PAGE_ALIGN(memory_start);
-#if !defined(CONFIG_BLKDEV_RESERVE)
-       memory_end = (unsigned long) &_ramend; /* by now the stack is part of the init task */
-#if defined(CONFIG_GDB_DEBUG)
-       memory_end -= STUBSIZE;
-#endif
-#else
-       if ((memory_end < CONFIG_BLKDEV_RESERVE_ADDRESS) && 
-           (memory_end > CONFIG_BLKDEV_RESERVE_ADDRESS))
-           /* overlap userarea */
-           memory_end = CONFIG_BLKDEV_RESERVE_ADDRESS; 
-#endif
-
-       init_mm.start_code = (unsigned long) _stext;
-       init_mm.end_code = (unsigned long) _etext;
-       init_mm.end_data = (unsigned long) _edata;
-       init_mm.brk = (unsigned long) 0; 
-
-#if (defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)) && defined(CONFIG_GDB_MAGICPRINT)
-       register_console((struct console *)&gdb_console);
-#endif
-
-       printk(KERN_INFO "\r\n\nuClinux " CPU "\n");
-       printk(KERN_INFO "Target Hardware: %s\n",_target_name);
-       printk(KERN_INFO "Flat model support (C) 1998,1999 Kenneth Albanowski, D. Jeff Dionne\n");
-       printk(KERN_INFO "H8/300 series support by Yoshinori Sato <ysato@users.sourceforge.jp>\n");
-
-#ifdef DEBUG
-       printk(KERN_DEBUG "KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p "
-               "BSS=0x%p-0x%p\n", _stext, _etext, _sdata, _edata, __bss_start,
-               __bss_stop);
-       printk(KERN_DEBUG "KERNEL -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx "
-               "STACK=0x%06lx-0x%p\n", __bss_stop, memory_start, memory_start,
-               memory_end, memory_end, &_ramend);
-#endif
-
-#ifdef CONFIG_DEFAULT_CMDLINE
-       /* set from default command line */
-       if (*command_line == '\0')
-               strcpy(command_line,CONFIG_KERNEL_COMMAND);
-#endif
-       /* Keep a copy of command line */
-       *cmdline_p = &command_line[0];
-       memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
-       boot_command_line[COMMAND_LINE_SIZE-1] = 0;
-
-#ifdef DEBUG
-       if (strlen(*cmdline_p)) 
-               printk(KERN_DEBUG "Command line: '%s'\n", *cmdline_p);
-#endif
-
-       /*
-        * give all the memory to the bootmap allocator,  tell it to put the
-        * boot mem_map at the start of memory
-        */
-       bootmap_size = init_bootmem_node(
-                       NODE_DATA(0),
-                       memory_start >> PAGE_SHIFT, /* map goes here */
-                       PAGE_OFFSET >> PAGE_SHIFT,      /* 0 on coldfire */
-                       memory_end >> PAGE_SHIFT);
-       /*
-        * free the usable memory,  we have to make sure we do not free
-        * the bootmem bitmap so we then reserve it after freeing it :-)
-        */
-       free_bootmem(memory_start, memory_end - memory_start);
-       reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);
-       /*
-        * get kmalloc into gear
-        */
-       paging_init();
-       h8300_gpio_init();
-#if defined(CONFIG_H8300_AKI3068NET) && defined(CONFIG_IDE)
-       {
-#define AREABIT(addr) (1 << (((addr) >> 21) & 7))
-               /* setup BSC */
-               volatile unsigned char *abwcr = (volatile unsigned char *)ABWCR;
-               volatile unsigned char *cscr = (volatile unsigned char *)CSCR;
-               *abwcr &= ~(AREABIT(CONFIG_H8300_IDE_BASE) | AREABIT(CONFIG_H8300_IDE_ALT));
-               *cscr  |= (AREABIT(CONFIG_H8300_IDE_BASE) | AREABIT(CONFIG_H8300_IDE_ALT)) | 0x0f;
-       }
-#endif
-#ifdef DEBUG
-       printk(KERN_DEBUG "Done setup_arch\n");
-#endif
-}
-
-/*
- *     Get CPU information for use by the procfs.
- */
-
-static int show_cpuinfo(struct seq_file *m, void *v)
-{
-    char *cpu;
-    int mode;
-    u_long clockfreq;
-
-    cpu = CPU;
-    mode = *(volatile unsigned char *)MDCR & 0x07;
-
-    clockfreq = CONFIG_CPU_CLOCK;
-
-    seq_printf(m,  "CPU:\t\t%s (mode:%d)\n"
-                  "Clock:\t\t%lu.%1luMHz\n"
-                  "BogoMips:\t%lu.%02lu\n"
-                  "Calibration:\t%lu loops\n",
-                  cpu,mode,
-                  clockfreq/1000,clockfreq%1000,
-                  (loops_per_jiffy*HZ)/500000,((loops_per_jiffy*HZ)/5000)%100,
-                  (loops_per_jiffy*HZ));
-
-    return 0;
-}
-
-static void *c_start(struct seq_file *m, loff_t *pos)
-{
-       return *pos < NR_CPUS ? ((void *) 0x12345678) : NULL;
-}
-
-static void *c_next(struct seq_file *m, void *v, loff_t *pos)
-{
-       ++*pos;
-       return c_start(m, pos);
-}
-
-static void c_stop(struct seq_file *m, void *v)
-{
-}
-
-const struct seq_operations cpuinfo_op = {
-       .start  = c_start,
-       .next   = c_next,
-       .stop   = c_stop,
-       .show   = show_cpuinfo,
-};
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
deleted file mode 100644 (file)
index a65ff3b..0000000
+++ /dev/null
@@ -1,444 +0,0 @@
-/*
- *  linux/arch/h8300/kernel/signal.c
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-/*
- * uClinux H8/300 support by Yoshinori Sato <ysato@users.sourceforge.jp>
- *                and David McCullough <davidm@snapgear.com>
- *
- * Based on
- * Linux/m68k by Hamish Macdonald
- */
-
-/*
- * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
- * Atari :-) Current limitation: Only one sigstack can be active at one time.
- * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
- * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
- * signal handlers!
- */
-
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/syscalls.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/ptrace.h>
-#include <linux/unistd.h>
-#include <linux/stddef.h>
-#include <linux/highuid.h>
-#include <linux/personality.h>
-#include <linux/tty.h>
-#include <linux/binfmts.h>
-#include <linux/tracehook.h>
-
-#include <asm/setup.h>
-#include <asm/uaccess.h>
-#include <asm/pgtable.h>
-#include <asm/traps.h>
-#include <asm/ucontext.h>
-
-/*
- * Do a signal return; undo the signal stack.
- *
- * Keep the return code on the stack quadword aligned!
- * That makes the cache flush below easier.
- */
-
-struct sigframe
-{
-       long dummy_er0;
-       long dummy_vector;
-#if defined(CONFIG_CPU_H8S)
-       short dummy_exr;
-#endif
-       long dummy_pc;
-       char *pretcode;
-       unsigned char retcode[8];
-       unsigned long extramask[_NSIG_WORDS-1];
-       struct sigcontext sc;
-       int sig;
-} __attribute__((aligned(2),packed));
-
-struct rt_sigframe
-{
-       long dummy_er0;
-       long dummy_vector;
-#if defined(CONFIG_CPU_H8S)
-       short dummy_exr;
-#endif
-       long dummy_pc;
-       char *pretcode;
-       struct siginfo *pinfo;
-       void *puc;
-       unsigned char retcode[8];
-       struct siginfo info;
-       struct ucontext uc;
-       int sig;
-} __attribute__((aligned(2),packed));
-
-static inline int
-restore_sigcontext(struct sigcontext *usc, int *pd0)
-{
-       struct pt_regs *regs = current_pt_regs();
-       int err = 0;
-       unsigned int ccr;
-       unsigned int usp;
-       unsigned int er0;
-
-       /* Always make any pending restarted system calls return -EINTR */
-       current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
-#define COPY(r) err |= __get_user(regs->r, &usc->sc_##r)    /* restore passed registers */
-       COPY(er1);
-       COPY(er2);
-       COPY(er3);
-       COPY(er5);
-       COPY(pc);
-       ccr = regs->ccr & 0x10;
-       COPY(ccr);
-#undef COPY
-       regs->ccr &= 0xef;
-       regs->ccr |= ccr;
-       regs->orig_er0 = -1;            /* disable syscall checks */
-       err |= __get_user(usp, &usc->sc_usp);
-       wrusp(usp);
-
-       err |= __get_user(er0, &usc->sc_er0);
-       *pd0 = er0;
-       return err;
-}
-
-asmlinkage int sys_sigreturn(void)
-{
-       unsigned long usp = rdusp();
-       struct sigframe *frame = (struct sigframe *)(usp - 4);
-       sigset_t set;
-       int er0;
-
-       if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-               goto badframe;
-       if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
-           (_NSIG_WORDS > 1 &&
-            __copy_from_user(&set.sig[1], &frame->extramask,
-                             sizeof(frame->extramask))))
-               goto badframe;
-
-       set_current_blocked(&set);
-       
-       if (restore_sigcontext(&frame->sc, &er0))
-               goto badframe;
-       return er0;
-
-badframe:
-       force_sig(SIGSEGV, current);
-       return 0;
-}
-
-asmlinkage int sys_rt_sigreturn(void)
-{
-       unsigned long usp = rdusp();
-       struct rt_sigframe *frame = (struct rt_sigframe *)(usp - 4);
-       sigset_t set;
-       int er0;
-
-       if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
-               goto badframe;
-       if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
-               goto badframe;
-
-       set_current_blocked(&set);
-       
-       if (restore_sigcontext(&frame->uc.uc_mcontext, &er0))
-               goto badframe;
-
-       if (restore_altstack(&frame->uc.uc_stack))
-               goto badframe;
-
-       return er0;
-
-badframe:
-       force_sig(SIGSEGV, current);
-       return 0;
-}
-
-static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
-                            unsigned long mask)
-{
-       int err = 0;
-
-       err |= __put_user(regs->er0, &sc->sc_er0);
-       err |= __put_user(regs->er1, &sc->sc_er1);
-       err |= __put_user(regs->er2, &sc->sc_er2);
-       err |= __put_user(regs->er3, &sc->sc_er3);
-       err |= __put_user(regs->er4, &sc->sc_er4);
-       err |= __put_user(regs->er5, &sc->sc_er5);
-       err |= __put_user(regs->er6, &sc->sc_er6);
-       err |= __put_user(rdusp(),   &sc->sc_usp);
-       err |= __put_user(regs->pc,  &sc->sc_pc);
-       err |= __put_user(regs->ccr, &sc->sc_ccr);
-       err |= __put_user(mask,      &sc->sc_mask);
-
-       return err;
-}
-
-static inline void *
-get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
-{
-       unsigned long usp;
-
-       /* Default to using normal stack.  */
-       usp = rdusp();
-
-       /* This is the X/Open sanctioned signal stack switching.  */
-       if (ka->sa.sa_flags & SA_ONSTACK) {
-               if (!sas_ss_flags(usp))
-                       usp = current->sas_ss_sp + current->sas_ss_size;
-       }
-       return (void *)((usp - frame_size) & -8UL);
-}
-
-static int setup_frame (int sig, struct k_sigaction *ka,
-                        sigset_t *set, struct pt_regs *regs)
-{
-       struct sigframe *frame;
-       int err = 0;
-       int usig;
-       unsigned char *ret;
-
-       frame = get_sigframe(ka, regs, sizeof(*frame));
-
-       if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
-               goto give_sigsegv;
-
-       usig = current_thread_info()->exec_domain
-               && current_thread_info()->exec_domain->signal_invmap
-               && sig < 32
-               ? current_thread_info()->exec_domain->signal_invmap[sig]
-               : sig;
-
-       err |= __put_user(usig, &frame->sig);
-       if (err)
-               goto give_sigsegv;
-
-       err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
-       if (err)
-               goto give_sigsegv;
-
-       if (_NSIG_WORDS > 1) {
-               err |= copy_to_user(frame->extramask, &set->sig[1],
-                                   sizeof(frame->extramask));
-               if (err)
-                       goto give_sigsegv;
-       }
-
-       ret = frame->retcode;
-       if (ka->sa.sa_flags & SA_RESTORER)
-               ret = (unsigned char *)(ka->sa.sa_restorer);
-       else {
-               /* sub.l er0,er0; mov.b #__NR_sigreturn,r0l; trapa #0 */
-               err |= __put_user(0x1a80f800 + (__NR_sigreturn & 0xff),
-                                 (unsigned long *)(frame->retcode + 0));
-               err |= __put_user(0x5700, (unsigned short *)(frame->retcode + 4));
-       }
-
-       /* Set up to return from userspace.  */
-       err |= __put_user(ret, &frame->pretcode);
-
-       if (err)
-               goto give_sigsegv;
-
-       /* Set up registers for signal handler */
-       wrusp ((unsigned long) frame);
-       regs->pc = (unsigned long) ka->sa.sa_handler;
-       regs->er0 = (current_thread_info()->exec_domain
-                          && current_thread_info()->exec_domain->signal_invmap
-                          && sig < 32
-                          ? current_thread_info()->exec_domain->signal_invmap[sig]
-                         : sig);
-       regs->er1 = (unsigned long)&(frame->sc);
-       regs->er5 = current->mm->start_data;    /* GOT base */
-
-       return 0;
-
-give_sigsegv:
-       force_sigsegv(sig, current);
-       return -EFAULT;
-}
-
-static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
-                           sigset_t *set, struct pt_regs *regs)
-{
-       struct rt_sigframe *frame;
-       int err = 0;
-       int usig;
-       unsigned char *ret;
-
-       frame = get_sigframe(ka, regs, sizeof(*frame));
-
-       if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
-               goto give_sigsegv;
-
-       usig = current_thread_info()->exec_domain
-               && current_thread_info()->exec_domain->signal_invmap
-               && sig < 32
-               ? current_thread_info()->exec_domain->signal_invmap[sig]
-               : sig;
-
-       err |= __put_user(usig, &frame->sig);
-       if (err)
-               goto give_sigsegv;
-
-       err |= __put_user(&frame->info, &frame->pinfo);
-       err |= __put_user(&frame->uc, &frame->puc);
-       err |= copy_siginfo_to_user(&frame->info, info);
-       if (err)
-               goto give_sigsegv;
-
-       /* Create the ucontext.  */
-       err |= __put_user(0, &frame->uc.uc_flags);
-       err |= __put_user(0, &frame->uc.uc_link);
-       err |= __save_altstack(&frame->uc.uc_stack, rdusp());
-       err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
-       err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
-       if (err)
-               goto give_sigsegv;
-
-       /* Set up to return from userspace.  */
-       ret = frame->retcode;
-       if (ka->sa.sa_flags & SA_RESTORER)
-               ret = (unsigned char *)(ka->sa.sa_restorer);
-       else {
-               /* sub.l er0,er0; mov.b #__NR_sigreturn,r0l; trapa #0 */
-               err |= __put_user(0x1a80f800 + (__NR_sigreturn & 0xff),
-                                 (unsigned long *)(frame->retcode + 0));
-               err |= __put_user(0x5700, (unsigned short *)(frame->retcode + 4));
-       }
-       err |= __put_user(ret, &frame->pretcode);
-
-       if (err)
-               goto give_sigsegv;
-
-       /* Set up registers for signal handler */
-       wrusp ((unsigned long) frame);
-       regs->pc  = (unsigned long) ka->sa.sa_handler;
-       regs->er0 = (current_thread_info()->exec_domain
-                    && current_thread_info()->exec_domain->signal_invmap
-                    && sig < 32
-                    ? current_thread_info()->exec_domain->signal_invmap[sig]
-                    : sig);
-       regs->er1 = (unsigned long)&(frame->info);
-       regs->er2 = (unsigned long)&frame->uc;
-       regs->er5 = current->mm->start_data;    /* GOT base */
-
-       return 0;
-
-give_sigsegv:
-       force_sigsegv(sig, current);
-       return -EFAULT;
-}
-
-/*
- * OK, we're invoking a handler
- */
-static void
-handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
-             struct pt_regs * regs)
-{
-       sigset_t *oldset = sigmask_to_save();
-       int ret;
-       /* are we from a system call? */
-       if (regs->orig_er0 >= 0) {
-               switch (regs->er0) {
-                       case -ERESTART_RESTARTBLOCK:
-                       case -ERESTARTNOHAND:
-                               regs->er0 = -EINTR;
-                               break;
-
-                       case -ERESTARTSYS:
-                               if (!(ka->sa.sa_flags & SA_RESTART)) {
-                                       regs->er0 = -EINTR;
-                                       break;
-                               }
-                       /* fallthrough */
-                       case -ERESTARTNOINTR:
-                               regs->er0 = regs->orig_er0;
-                               regs->pc -= 2;
-               }
-       }
-
-       /* set up the stack frame */
-       if (ka->sa.sa_flags & SA_SIGINFO)
-               ret = setup_rt_frame(sig, ka, info, oldset, regs);
-       else
-               ret = setup_frame(sig, ka, oldset, regs);
-
-       if (!ret)
-               signal_delivered(sig, info, ka, regs, 0);
-}
-
-/*
- * Note that 'init' is a special process: it doesn't get signals it doesn't
- * want to handle. Thus you cannot kill init even with a SIGKILL even by
- * mistake.
- */
-static void do_signal(struct pt_regs *regs)
-{
-       siginfo_t info;
-       int signr;
-       struct k_sigaction ka;
-
-       /*
-        * We want the common case to go fast, which
-        * is why we may in certain cases get here from
-        * kernel mode. Just return without doing anything
-        * if so.
-        */
-       if ((regs->ccr & 0x10))
-               return;
-
-       current->thread.esp0 = (unsigned long) regs;
-
-       signr = get_signal_to_deliver(&info, &ka, regs, NULL);
-       if (signr > 0) {
-               /* Whee!  Actually deliver the signal.  */
-               handle_signal(signr, &info, &ka, regs);
-               return;
-       }
-       /* Did we come from a system call? */
-       if (regs->orig_er0 >= 0) {
-               /* Restart the system call - no handlers present */
-               if (regs->er0 == -ERESTARTNOHAND ||
-                   regs->er0 == -ERESTARTSYS ||
-                   regs->er0 == -ERESTARTNOINTR) {
-                       regs->er0 = regs->orig_er0;
-                       regs->pc -= 2;
-               }
-               if (regs->er0 == -ERESTART_RESTARTBLOCK){
-                       regs->er0 = __NR_restart_syscall;
-                       regs->pc -= 2;
-               }
-       }
-
-       /* If there's no signal to deliver, we just restore the saved mask.  */
-       restore_saved_sigmask();
-}
-
-asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
-{
-       if (thread_info_flags & _TIF_SIGPENDING)
-               do_signal(regs);
-
-       if (thread_info_flags & _TIF_NOTIFY_RESUME) {
-               clear_thread_flag(TIF_NOTIFY_RESUME);
-               tracehook_notify_resume(regs);
-       }
-}
diff --git a/arch/h8300/kernel/sys_h8300.c b/arch/h8300/kernel/sys_h8300.c
deleted file mode 100644 (file)
index bf350cb..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * linux/arch/h8300/kernel/sys_h8300.c
- *
- * This file contains various random system calls that
- * have a non-standard calling sequence on the H8/300
- * platform.
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/sem.h>
-#include <linux/msg.h>
-#include <linux/shm.h>
-#include <linux/stat.h>
-#include <linux/syscalls.h>
-#include <linux/mman.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/ipc.h>
-
-#include <asm/setup.h>
-#include <asm/uaccess.h>
-#include <asm/cachectl.h>
-#include <asm/traps.h>
-#include <asm/unistd.h>
-
-/* sys_cacheflush -- no support.  */
-asmlinkage int
-sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
-{
-       return -EINVAL;
-}
-
-asmlinkage int sys_getpagesize(void)
-{
-       return PAGE_SIZE;
-}
-
-#if defined(CONFIG_SYSCALL_PRINT)
-asmlinkage void syscall_print(void *dummy,...)
-{
-       struct pt_regs *regs = (struct pt_regs *) ((unsigned char *)&dummy-4);
-       printk("call %06lx:%ld 1:%08lx,2:%08lx,3:%08lx,ret:%08lx\n",
-               ((regs->pc)&0xffffff)-2,regs->orig_er0,regs->er1,regs->er2,regs->er3,regs->er0);
-}
-#endif
diff --git a/arch/h8300/kernel/syscalls.S b/arch/h8300/kernel/syscalls.S
deleted file mode 100644 (file)
index c55e0ed..0000000
+++ /dev/null
@@ -1,338 +0,0 @@
-/* Systemcall Entry Table */
-#include <linux/sys.h>
-#include <asm/linkage.h>
-#include <asm/unistd.h>
-
-#define CALL(x)        .long _ ## x
-
-.globl _sys_call_table
-
-#if defined(CONFIG_CPU_H8300H)
-       .h8300h
-#endif
-#if defined(CONFIG_CPU_H8S)
-       .h8300s
-#endif
-       .section .text
-       .align  2
-_sys_call_table:
-       CALL(sys_ni_syscall)            /* 0  -  old "setup()" system call*/
-       CALL(sys_exit)
-       CALL(sys_fork)
-       CALL(sys_read)
-       CALL(sys_write)
-       CALL(sys_open)                  /* 5 */
-       CALL(sys_close)
-       CALL(sys_waitpid)
-       CALL(sys_creat)
-       CALL(sys_link)
-       CALL(sys_unlink)                /* 10 */
-       CALL(sys_execve)
-       CALL(sys_chdir)
-       CALL(sys_time)
-       CALL(sys_mknod)
-       CALL(sys_chmod)                 /* 15 */
-       CALL(sys_chown16)
-       CALL(sys_ni_syscall)            /* old break syscall holder */
-       CALL(sys_stat)
-       CALL(sys_lseek)
-       CALL(sys_getpid)                /* 20 */
-       CALL(sys_mount)
-       CALL(sys_oldumount)
-       CALL(sys_setuid16)
-       CALL(sys_getuid16)
-       CALL(sys_stime)                 /* 25 */
-       CALL(sys_ptrace)
-       CALL(sys_alarm)
-       CALL(sys_fstat)
-       CALL(sys_pause)
-       CALL(sys_utime)                 /* 30 */
-       CALL(sys_ni_syscall)            /* old stty syscall holder */
-       CALL(sys_ni_syscall)            /* old gtty syscall holder */
-       CALL(sys_access)
-       CALL(sys_nice)
-       CALL(sys_ni_syscall)            /* 35 old ftime syscall holder */
-       CALL(sys_sync)
-       CALL(sys_kill)
-       CALL(sys_rename)
-       CALL(sys_mkdir)
-       CALL(sys_rmdir)                 /* 40 */
-       CALL(sys_dup)
-       CALL(sys_pipe)
-       CALL(sys_times)
-       CALL(sys_ni_syscall)            /* old prof syscall holder */
-       CALL(sys_brk)                   /* 45 */
-       CALL(sys_setgid16)
-       CALL(sys_getgid16)
-       CALL(sys_signal)
-       CALL(sys_geteuid16)
-       CALL(sys_getegid16)             /* 50 */
-       CALL(sys_acct)
-       CALL(sys_umount)                /* recycled never used phys() */
-       CALL(sys_ni_syscall)            /* old lock syscall holder */
-       CALL(sys_ioctl)
-       CALL(sys_fcntl)                 /* 55 */
-       CALL(sys_ni_syscall)            /* old mpx syscall holder */
-       CALL(sys_setpgid)
-       CALL(sys_ni_syscall)            /* old ulimit syscall holder */
-       CALL(sys_ni_syscall)
-       CALL(sys_umask)                 /* 60 */
-       CALL(sys_chroot)
-       CALL(sys_ustat)
-       CALL(sys_dup2)
-       CALL(sys_getppid)
-       CALL(sys_getpgrp)               /* 65 */
-       CALL(sys_setsid)
-       CALL(sys_sigaction)
-       CALL(sys_sgetmask)
-       CALL(sys_ssetmask)
-       CALL(sys_setreuid16)            /* 70 */
-       CALL(sys_setregid16)
-       CALL(sys_sigsuspend)
-       CALL(sys_sigpending)
-       CALL(sys_sethostname)
-       CALL(sys_setrlimit)             /* 75 */
-       CALL(sys_old_getrlimit)
-       CALL(sys_getrusage)
-       CALL(sys_gettimeofday)
-       CALL(sys_settimeofday)
-       CALL(sys_getgroups16)           /* 80 */
-       CALL(sys_setgroups16)
-       CALL(sys_old_select)
-       CALL(sys_symlink)
-       CALL(sys_lstat)
-       CALL(sys_readlink)              /* 85 */
-       CALL(sys_uselib)
-       CALL(sys_swapon)
-       CALL(sys_reboot)
-       CALL(sys_old_readdir)
-       CALL(sys_old_mmap)              /* 90 */
-       CALL(sys_munmap)
-       CALL(sys_truncate)
-       CALL(sys_ftruncate)
-       CALL(sys_fchmod)
-       CALL(sys_fchown16)              /* 95 */
-       CALL(sys_getpriority)
-       CALL(sys_setpriority)
-       CALL(sys_ni_syscall)            /* old profil syscall holder */
-       CALL(sys_statfs)
-       CALL(sys_fstatfs)               /* 100 */
-       CALL(sys_ni_syscall)            /* ioperm for i386 */
-       CALL(sys_socketcall)
-       CALL(sys_syslog)
-       CALL(sys_setitimer)
-       CALL(sys_getitimer)             /* 105 */
-       CALL(sys_newstat)
-       CALL(sys_newlstat)
-       CALL(sys_newfstat)
-       CALL(sys_ni_syscall)
-       CALL(sys_ni_syscall)            /* iopl for i386 */ /* 110 */
-       CALL(sys_vhangup)
-       CALL(sys_ni_syscall)            /* obsolete idle() syscall */
-       CALL(sys_ni_syscall)            /* vm86old for i386 */
-       CALL(sys_wait4)
-       CALL(sys_swapoff)               /* 115 */
-       CALL(sys_sysinfo)
-       CALL(sys_ipc)
-       CALL(sys_fsync)
-       CALL(sys_sigreturn)
-       CALL(sys_clone)                 /* 120 */
-       CALL(sys_setdomainname)
-       CALL(sys_newuname)
-       CALL(sys_cacheflush)            /* modify_ldt for i386 */
-       CALL(sys_adjtimex)
-       CALL(sys_ni_syscall)            /* 125 sys_mprotect */
-       CALL(sys_sigprocmask)
-       CALL(sys_ni_syscall)            /* sys_create_module */
-       CALL(sys_init_module)
-       CALL(sys_delete_module)
-       CALL(sys_ni_syscall)            /* 130 sys_get_kernel_syms */
-       CALL(sys_quotactl)
-       CALL(sys_getpgid)
-       CALL(sys_fchdir)
-       CALL(sys_bdflush)
-       CALL(sys_sysfs)                 /* 135 */
-       CALL(sys_personality)
-       CALL(sys_ni_syscall)            /* for afs_syscall */
-       CALL(sys_setfsuid16)
-       CALL(sys_setfsgid16)
-       CALL(sys_llseek)                /* 140 */
-       CALL(sys_getdents)
-       CALL(sys_select)
-       CALL(sys_flock)
-       CALL(sys_ni_syscall)            /* sys_msync */
-       CALL(sys_readv)                 /* 145 */
-       CALL(sys_writev)
-       CALL(sys_getsid)
-       CALL(sys_fdatasync)
-       CALL(sys_sysctl)
-       CALL(sys_ni_syscall)            /* 150 sys_mlock */
-       CALL(sys_ni_syscall)            /* sys_munlock */
-       CALL(sys_ni_syscall)            /* sys_mlockall */
-       CALL(sys_ni_syscall)            /* sys_munlockall */
-       CALL(sys_sched_setparam)
-       CALL(sys_sched_getparam)        /* 155 */
-       CALL(sys_sched_setscheduler)
-       CALL(sys_sched_getscheduler)
-       CALL(sys_sched_yield)
-       CALL(sys_sched_get_priority_max)
-       CALL(sys_sched_get_priority_min)  /* 160 */
-       CALL(sys_sched_rr_get_interval)
-       CALL(sys_nanosleep)
-       CALL(sys_ni_syscall)            /* sys_mremap */
-       CALL(sys_setresuid16)
-       CALL(sys_getresuid16)           /* 165 */
-       CALL(sys_ni_syscall)            /* for vm86 */
-       CALL(sys_ni_syscall)            /* sys_query_module */
-       CALL(sys_poll)
-       CALL(sys_ni_syscall)            /* old nfsservctl */
-       CALL(sys_setresgid16)           /* 170 */
-       CALL(sys_getresgid16)
-       CALL(sys_prctl)
-       CALL(sys_rt_sigreturn)
-       CALL(sys_rt_sigaction)
-       CALL(sys_rt_sigprocmask)        /* 175 */
-       CALL(sys_rt_sigpending)
-       CALL(sys_rt_sigtimedwait)
-       CALL(sys_rt_sigqueueinfo)
-       CALL(sys_rt_sigsuspend)
-       CALL(sys_pread64)               /* 180 */
-       CALL(sys_pwrite64)
-       CALL(sys_lchown16);
-       CALL(sys_getcwd)
-       CALL(sys_capget)
-       CALL(sys_capset)                /* 185 */
-       CALL(sys_sigaltstack)
-       CALL(sys_sendfile)
-       CALL(sys_ni_syscall)            /* streams1 */
-       CALL(sys_ni_syscall)            /* streams2 */
-       CALL(sys_vfork)                 /* 190 */
-       CALL(sys_getrlimit)
-       CALL(sys_mmap_pgoff)
-       CALL(sys_truncate64)
-       CALL(sys_ftruncate64)
-       CALL(sys_stat64)                /* 195 */
-       CALL(sys_lstat64)
-       CALL(sys_fstat64)
-       CALL(sys_chown)
-       CALL(sys_getuid)
-       CALL(sys_getgid)                /* 200 */
-       CALL(sys_geteuid)
-       CALL(sys_getegid)
-       CALL(sys_setreuid)
-       CALL(sys_setregid)
-       CALL(sys_getgroups)             /* 205 */
-       CALL(sys_setgroups)
-       CALL(sys_fchown)
-       CALL(sys_setresuid)
-       CALL(sys_getresuid)
-       CALL(sys_setresgid)             /* 210 */
-       CALL(sys_getresgid)
-       CALL(sys_lchown)
-       CALL(sys_setuid)
-       CALL(sys_setgid)
-       CALL(sys_setfsuid)              /* 215 */
-       CALL(sys_setfsgid)
-       CALL(sys_pivot_root)
-       CALL(sys_ni_syscall)
-       CALL(sys_ni_syscall)
-       CALL(sys_getdents64)            /* 220 */
-       CALL(sys_fcntl64)
-       CALL(sys_ni_syscall)            /* reserved TUX */
-       CALL(sys_ni_syscall)            /* reserved Security */
-       CALL(sys_gettid)
-       CALL(sys_readahead)             /* 225 */
-       CALL(sys_setxattr)
-       CALL(sys_lsetxattr)
-       CALL(sys_fsetxattr)
-       CALL(sys_getxattr)
-       CALL(sys_lgetxattr)             /* 230 */
-       CALL(sys_fgetxattr)
-       CALL(sys_listxattr)
-       CALL(sys_llistxattr)
-       CALL(sys_flistxattr)
-       CALL(sys_removexattr)           /* 235 */
-       CALL(sys_lremovexattr)
-       CALL(sys_fremovexattr)
-       CALL(sys_tkill)
-       CALL(sys_sendfile64)
-       CALL(sys_futex)                 /* 240 */
-       CALL(sys_sched_setaffinity)
-       CALL(sys_sched_getaffinity)
-       CALL(sys_ni_syscall)
-       CALL(sys_ni_syscall)
-       CALL(sys_io_setup)              /* 245 */
-       CALL(sys_io_destroy)
-       CALL(sys_io_getevents)
-       CALL(sys_io_submit)
-       CALL(sys_io_cancel)
-       CALL(sys_fadvise64)             /* 250 */
-       CALL(sys_ni_syscall)
-       CALL(sys_exit_group)
-       CALL(sys_lookup_dcookie)
-       CALL(sys_epoll_create)
-       CALL(sys_epoll_ctl)             /* 255 */
-       CALL(sys_epoll_wait)
-       CALL(sys_ni_syscall)            /* sys_remap_file_pages */
-       CALL(sys_set_tid_address)
-       CALL(sys_timer_create)
-       CALL(sys_timer_settime)         /* 260 */
-       CALL(sys_timer_gettime)
-       CALL(sys_timer_getoverrun)
-       CALL(sys_timer_delete)
-       CALL(sys_clock_settime)
-       CALL(sys_clock_gettime)         /* 265 */
-       CALL(sys_clock_getres)
-       CALL(sys_clock_nanosleep)
-       CALL(sys_statfs64)
-       CALL(sys_fstatfs64)
-       CALL(sys_tgkill)                /* 270 */
-       CALL(sys_utimes)
-       CALL(sys_fadvise64_64)
-       CALL(sys_ni_syscall)            /* sys_vserver */
-       CALL(sys_ni_syscall)
-       CALL(sys_get_mempolicy)         /* 275 */
-       CALL(sys_set_mempolicy)
-       CALL(sys_mq_open)
-       CALL(sys_mq_unlink)
-       CALL(sys_mq_timedsend)
-       CALL(sys_mq_timedreceive)       /* 280 */
-       CALL(sys_mq_notify)
-       CALL(sys_mq_getsetattr)
-       CALL(sys_waitid)
-       CALL(sys_ni_syscall)            /* sys_kexec_load */
-       CALL(sys_add_key)               /* 285 */
-       CALL(sys_request_key)
-       CALL(sys_keyctl)
-       CALL(sys_ioprio_set)
-       CALL(sys_ioprio_get)            /* 290 */
-       CALL(sys_inotify_init)
-       CALL(sys_inotify_add_watch)
-       CALL(sys_inotify_rm_watch)
-       CALL(sys_migrate_pages)
-       CALL(sys_openat)                /* 295 */
-       CALL(sys_mkdirat)
-       CALL(sys_mknodat)
-       CALL(sys_fchownat)
-       CALL(sys_futimesat)
-       CALL(sys_fstatat64)             /* 300 */
-       CALL(sys_unlinkat)
-       CALL(sys_renameat)
-       CALL(sys_linkat)
-       CALL(sys_symlinkat)
-       CALL(sys_readlinkat)            /* 305 */
-       CALL(sys_fchmodat)
-       CALL(sys_faccessat)
-       CALL(sys_ni_syscall)            /* sys_pselect6 */
-       CALL(sys_ni_syscall)            /* sys_ppoll */
-       CALL(sys_unshare)               /* 310 */
-       CALL(sys_set_robust_list)
-       CALL(sys_get_robust_list)
-       CALL(sys_splice)
-       CALL(sys_sync_file_range)
-       CALL(sys_tee)                   /* 315 */
-       CALL(sys_vmsplice)
-       CALL(sys_ni_syscall)            /* sys_move_pages */
-       CALL(sys_getcpu)
-       CALL(sys_ni_syscall)            /* sys_epoll_pwait */
-       CALL(sys_setns)                 /* 320 */
diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c
deleted file mode 100644 (file)
index e0f7419..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- *  linux/arch/h8300/kernel/time.c
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Copied/hacked from:
- *
- *  linux/arch/m68k/kernel/time.c
- *
- *  Copyright (C) 1991, 1992, 1995  Linus Torvalds
- *
- * This file contains the m68k-specific time handling details.
- * Most of the stuff is located in the machine specific files.
- *
- * 1997-09-10  Updated NTP code according to technical memorandum Jan '96
- *             "A Kernel Model for Precision Timekeeping" by Dave Mills
- */
-
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/timex.h>
-#include <linux/profile.h>
-
-#include <asm/io.h>
-#include <asm/irq_regs.h>
-#include <asm/timer.h>
-
-#define        TICK_SIZE (tick_nsec / 1000)
-
-void h8300_timer_tick(void)
-{
-       if (current->pid)
-               profile_tick(CPU_PROFILING);
-       xtime_update(1);
-       update_process_times(user_mode(get_irq_regs()));
-}
-
-void read_persistent_clock(struct timespec *ts)
-{
-       unsigned int year, mon, day, hour, min, sec;
-
-       /* FIX by dqg : Set to zero for platforms that don't have tod */
-       /* without this time is undefined and can overflow time_t, causing  */
-       /* very strange errors */
-       year = 1980;
-       mon = day = 1;
-       hour = min = sec = 0;
-#ifdef CONFIG_H8300_GETTOD
-       h8300_gettod (&year, &mon, &day, &hour, &min, &sec);
-#endif
-       if ((year += 1900) < 1970)
-               year += 100;
-       ts->tv_sec = mktime(year, mon, day, hour, min, sec);
-       ts->tv_nsec = 0;
-}
-
-void __init time_init(void)
-{
-
-       h8300_timer_setup();
-}
diff --git a/arch/h8300/kernel/timer/Makefile b/arch/h8300/kernel/timer/Makefile
deleted file mode 100644 (file)
index bef0510..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-# h8300 internal timer handler
-
-obj-$(CONFIG_H8300_TIMER8)  := timer8.o
-obj-$(CONFIG_H8300_TIMER16) := timer16.o
-obj-$(CONFIG_H8300_ITU)     := itu.o
-obj-$(CONFIG_H8300_TPU)     := tpu.o
diff --git a/arch/h8300/kernel/timer/itu.c b/arch/h8300/kernel/timer/itu.c
deleted file mode 100644 (file)
index 0a8b5cd..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- *  linux/arch/h8300/kernel/timer/itu.c
- *
- *  Yoshinori Sato <ysato@users.sourcefoge.jp>
- *
- *  ITU Timer Handler
- *
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/timex.h>
-
-#include <asm/segment.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/regs306x.h>
-
-#if CONFIG_H8300_ITU_CH == 0
-#define ITUBASE        0xffff64
-#define ITUIRQ 24
-#elif CONFIG_H8300_ITU_CH == 1
-#define ITUBASE        0xffff6e
-#define ITUIRQ 28
-#elif CONFIG_H8300_ITU_CH == 2
-#define ITUBASE        0xffff78
-#define ITUIRQ 32
-#elif CONFIG_H8300_ITU_CH == 3
-#define ITUBASE        0xffff82
-#define ITUIRQ 36
-#elif CONFIG_H8300_ITU_CH == 4
-#define ITUBASE        0xffff92
-#define ITUIRQ 40
-#else
-#error Unknown timer channel.
-#endif
-
-#define TCR    0
-#define TIOR   1
-#define TIER   2
-#define TSR    3
-#define TCNT   4
-#define GRA    6
-#define GRB    8
-
-static irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
-       h8300_timer_tick();
-       ctrl_bclr(IMFA, ITUBASE + TSR);
-       return IRQ_HANDLED;
-}
-
-static struct irqaction itu_irq = {
-       .name           = "itu",
-       .handler        = timer_interrupt,
-       .flags          = IRQF_DISABLED | IRQF_TIMER,
-};
-
-static const int __initconst divide_rate[] = {1, 2, 4, 8};
-
-void __init h8300_timer_setup(void)
-{
-       unsigned int div;
-       unsigned int cnt;
-
-       calc_param(cnt, div, divide_rate, 0x10000);
-
-       setup_irq(ITUIRQ, &itu_irq);
-
-       /* initialize timer */
-       ctrl_outb(0, TSTR);
-       ctrl_outb(CCLR0 | div, ITUBASE + TCR);
-       ctrl_outb(0x01, ITUBASE + TIER);
-       ctrl_outw(cnt, ITUBASE + GRA);
-       ctrl_bset(CONFIG_H8300_ITU_CH, TSTR);
-}
diff --git a/arch/h8300/kernel/timer/timer16.c b/arch/h8300/kernel/timer/timer16.c
deleted file mode 100644 (file)
index 462d9f5..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- *  linux/arch/h8300/kernel/timer/timer16.c
- *
- *  Yoshinori Sato <ysato@users.sourcefoge.jp>
- *
- *  16bit Timer Handler
- *
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/timex.h>
-
-#include <asm/segment.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/regs306x.h>
-
-/* 16bit timer */
-#if CONFIG_H8300_TIMER16_CH == 0
-#define _16BASE        0xffff78
-#define _16IRQ 24
-#elif CONFIG_H8300_TIMER16_CH == 1
-#define _16BASE        0xffff80
-#define _16IRQ 28
-#elif CONFIG_H8300_TIMER16_CH == 2
-#define _16BASE        0xffff88
-#define _16IRQ 32
-#else
-#error Unknown timer channel.
-#endif
-
-#define TCR    0
-#define TIOR   1
-#define TCNT   2
-#define GRA    4
-#define GRB    6
-
-#define H8300_TIMER_FREQ CONFIG_CPU_CLOCK*10000 /* Timer input freq. */
-
-static irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
-       h8300_timer_tick();
-       ctrl_bclr(CONFIG_H8300_TIMER16_CH, TISRA);
-       return IRQ_HANDLED;
-}
-
-static struct irqaction timer16_irq = {
-       .name           = "timer-16",
-       .handler        = timer_interrupt,
-       .flags          = IRQF_DISABLED | IRQF_TIMER,
-};
-
-static const int __initconst divide_rate[] = {1, 2, 4, 8};
-
-void __init h8300_timer_setup(void)
-{
-       unsigned int div;
-       unsigned int cnt;
-
-       calc_param(cnt, div, divide_rate, 0x10000);
-
-       setup_irq(_16IRQ, &timer16_irq);
-
-       /* initialize timer */
-       ctrl_outb(0, TSTR);
-       ctrl_outb(CCLR0 | div, _16BASE + TCR);
-       ctrl_outw(cnt, _16BASE + GRA);
-       ctrl_bset(4 + CONFIG_H8300_TIMER16_CH, TISRA);
-       ctrl_bset(CONFIG_H8300_TIMER16_CH, TSTR);
-}
diff --git a/arch/h8300/kernel/timer/timer8.c b/arch/h8300/kernel/timer/timer8.c
deleted file mode 100644 (file)
index 505f341..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- *  linux/arch/h8300/kernel/cpu/timer/timer8.c
- *
- *  Yoshinori Sato <ysato@users.sourcefoge.jp>
- *
- *  8bit Timer Handler
- *
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/profile.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/timer.h>
-#if defined(CONFIG_CPU_H8300H)
-#include <asm/regs306x.h>
-#endif
-#if defined(CONFIG_CPU_H8S)
-#include <asm/regs267x.h>
-#endif
-
-/* 8bit timer x2 */
-#define CMFA   6
-
-#if defined(CONFIG_H8300_TIMER8_CH0)
-#define _8BASE _8TCR0
-#ifdef CONFIG_CPU_H8300H
-#define _8IRQ  36
-#endif
-#ifdef CONFIG_CPU_H8S
-#define _8IRQ  72
-#endif
-#elif defined(CONFIG_H8300_TIMER8_CH2)
-#ifdef CONFIG_CPU_H8300H
-#define _8BASE _8TCR2
-#define _8IRQ  40
-#endif
-#endif
-
-#ifndef _8BASE
-#error Unknown timer channel.
-#endif
-
-#define _8TCR  0
-#define _8TCSR 2
-#define TCORA  4
-#define TCORB  6
-#define _8TCNT 8
-
-#define CMIEA  0x40
-#define CCLR_CMA 0x08
-#define CKS2   0x04
-
-/*
- * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "xtime_update()" routine every clocktick
- */
-
-static irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
-       h8300_timer_tick();
-       ctrl_bclr(CMFA, _8BASE + _8TCSR);
-       return IRQ_HANDLED;
-}
-
-static struct irqaction timer8_irq = {
-       .name           = "timer-8",
-       .handler        = timer_interrupt,
-       .flags          = IRQF_DISABLED | IRQF_TIMER,
-};
-
-static const int __initconst divide_rate[] = {8, 64, 8192};
-
-void __init h8300_timer_setup(void)
-{
-       unsigned int div;
-       unsigned int cnt;
-
-       calc_param(cnt, div, divide_rate, 0x10000);
-       div++;
-
-       setup_irq(_8IRQ, &timer8_irq);
-
-#if defined(CONFIG_CPU_H8S)
-       /* Timer module enable */
-       ctrl_bclr(0, MSTPCRL)
-#endif
-
-       /* initialize timer */
-       ctrl_outw(cnt, _8BASE + TCORA);
-       ctrl_outw(0x0000, _8BASE + _8TCSR);
-       ctrl_outw((CMIEA|CCLR_CMA|CKS2) << 8 | div,
-                 _8BASE + _8TCR);
-}
diff --git a/arch/h8300/kernel/timer/tpu.c b/arch/h8300/kernel/timer/tpu.c
deleted file mode 100644 (file)
index 0350f62..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- *  linux/arch/h8300/kernel/timer/tpu.c
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  TPU Timer Handler
- *
- */
-
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/timex.h>
-
-#include <asm/segment.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/regs267x.h>
-
-/* TPU */
-#if CONFIG_H8300_TPU_CH == 0
-#define TPUBASE        0xffffd0
-#define TPUIRQ 40
-#elif CONFIG_H8300_TPU_CH == 1
-#define TPUBASE        0xffffe0
-#define TPUIRQ 48
-#elif CONFIG_H8300_TPU_CH == 2
-#define TPUBASE        0xfffff0
-#define TPUIRQ 52
-#elif CONFIG_H8300_TPU_CH == 3
-#define TPUBASE        0xfffe80
-#define TPUIRQ 56
-#elif CONFIG_H8300_TPU_CH == 4
-#define TPUBASE        0xfffe90
-#define TPUIRQ 64
-#else
-#error Unknown timer channel.
-#endif
-
-#define _TCR   0
-#define _TMDR  1
-#define _TIOR  2
-#define _TIER  4
-#define _TSR   5
-#define _TCNT  6
-#define _GRA   8
-#define _GRB   10
-
-#define CCLR0  0x20
-
-static irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
-       h8300_timer_tick();
-       ctrl_bclr(0, TPUBASE + _TSR);
-       return IRQ_HANDLED;
-}
-
-static struct irqaction tpu_irq = {
-       .name           = "tpu",
-       .handler        = timer_interrupt,
-       .flags          = IRQF_DISABLED | IRQF_TIMER,
-};
-
-static const int __initconst divide_rate[] = {
-#if CONFIG_H8300_TPU_CH == 0
-       1,4,16,64,0,0,0,0,
-#elif (CONFIG_H8300_TPU_CH == 1) || (CONFIG_H8300_TPU_CH == 5)
-       1,4,16,64,0,0,256,0,
-#elif (CONFIG_H8300_TPU_CH == 2) || (CONFIG_H8300_TPU_CH == 4)
-       1,4,16,64,0,0,0,1024,
-#elif CONFIG_H8300_TPU_CH == 3
-       1,4,16,64,0,1024,256,4096,
-#endif
-};
-
-void __init h8300_timer_setup(void)
-{
-       unsigned int cnt;
-       unsigned int div;
-
-       calc_param(cnt, div, divide_rate, 0x10000);
-
-       setup_irq(TPUIRQ, &tpu_irq);
-
-       /* TPU module enabled */
-       ctrl_bclr(3, MSTPCRH);
-
-       ctrl_outb(0, TSTR);
-       ctrl_outb(CCLR0 | div, TPUBASE + _TCR);
-       ctrl_outb(0, TPUBASE + _TMDR);
-       ctrl_outw(0, TPUBASE + _TIOR);
-       ctrl_outb(0x01, TPUBASE + _TIER);
-       ctrl_outw(cnt, TPUBASE + _GRA);
-       ctrl_bset(CONFIG_H8300_TPU_CH, TSTR);
-}
diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c
deleted file mode 100644 (file)
index cfe494d..0000000
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * linux/arch/h8300/boot/traps.c -- general exception handling code
- * H8/300 support Yoshinori Sato <ysato@users.sourceforge.jp>
- * 
- * Cloned from Linux/m68k.
- *
- * No original Copyright holder listed,
- * Probable original (C) Roman Zippel (assigned DJD, 1999)
- *
- * Copyright 1999-2000 D. Jeff Dionne, <jeff@rt-control.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file COPYING in the main directory of this archive
- * for more details.
- */
-
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/bug.h>
-
-#include <asm/irq.h>
-#include <asm/traps.h>
-#include <asm/page.h>
-
-static DEFINE_SPINLOCK(die_lock);
-
-/*
- * this must be called very early as the kernel might
- * use some instruction that are emulated on the 060
- */
-
-void __init base_trap_init(void)
-{
-}
-
-void __init trap_init (void)
-{
-}
-
-asmlinkage void set_esp0 (unsigned long ssp)
-{
-       current->thread.esp0 = ssp;
-}
-
-/*
- *     Generic dumping code. Used for panic and debug.
- */
-
-static void dump(struct pt_regs *fp)
-{
-       unsigned long   *sp;
-       unsigned char   *tp;
-       int             i;
-
-       printk("\nCURRENT PROCESS:\n\n");
-       printk("COMM=%s PID=%d\n", current->comm, current->pid);
-       if (current->mm) {
-               printk("TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n",
-                       (int) current->mm->start_code,
-                       (int) current->mm->end_code,
-                       (int) current->mm->start_data,
-                       (int) current->mm->end_data,
-                       (int) current->mm->end_data,
-                       (int) current->mm->brk);
-               printk("USER-STACK=%08x  KERNEL-STACK=%08lx\n\n",
-                       (int) current->mm->start_stack,
-                       (int) PAGE_SIZE+(unsigned long)current);
-       }
-
-       show_regs(fp);
-       printk("\nCODE:");
-       tp = ((unsigned char *) fp->pc) - 0x20;
-       for (sp = (unsigned long *) tp, i = 0; (i < 0x40);  i += 4) {
-               if ((i % 0x10) == 0)
-                       printk("\n%08x: ", (int) (tp + i));
-               printk("%08x ", (int) *sp++);
-       }
-       printk("\n");
-
-       printk("\nKERNEL STACK:");
-       tp = ((unsigned char *) fp) - 0x40;
-       for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) {
-               if ((i % 0x10) == 0)
-                       printk("\n%08x: ", (int) (tp + i));
-               printk("%08x ", (int) *sp++);
-       }
-       printk("\n");
-       if (STACK_MAGIC != *(unsigned long *)((unsigned long)current+PAGE_SIZE))
-                printk("(Possibly corrupted stack page??)\n");
-
-       printk("\n\n");
-}
-
-void die(const char *str, struct pt_regs *fp, unsigned long err)
-{
-       static int diecount;
-
-       oops_enter();
-
-       console_verbose();
-       spin_lock_irq(&die_lock);
-       report_bug(fp->pc, fp);
-       printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++diecount);
-       dump(fp);
-
-       spin_unlock_irq(&die_lock);
-       do_exit(SIGSEGV);
-}
-
-extern char _start, _etext;
-#define check_kernel_text(addr) \
-        ((addr >= (unsigned long)(&_start)) && \
-         (addr <  (unsigned long)(&_etext))) 
-
-static int kstack_depth_to_print = 24;
-
-void show_stack(struct task_struct *task, unsigned long *esp)
-{
-       unsigned long *stack,  addr;
-       int i;
-
-       if (esp == NULL)
-               esp = (unsigned long *) &esp;
-
-       stack = esp;
-
-       printk("Stack from %08lx:", (unsigned long)stack);
-       for (i = 0; i < kstack_depth_to_print; i++) {
-               if (((unsigned long)stack & (THREAD_SIZE - 1)) == 0)
-                       break;
-               if (i % 8 == 0)
-                       printk("\n       ");
-               printk(" %08lx", *stack++);
-       }
-
-       printk("\nCall Trace:");
-       i = 0;
-       stack = esp;
-       while (((unsigned long)stack & (THREAD_SIZE - 1)) != 0) {
-               addr = *stack++;
-               /*
-                * If the address is either in the text segment of the
-                * kernel, or in the region which contains vmalloc'ed
-                * memory, it *may* be the address of a calling
-                * routine; if so, print it so that someone tracing
-                * down the cause of the crash will be able to figure
-                * out the call path that was taken.
-                */
-               if (check_kernel_text(addr)) {
-                       if (i % 4 == 0)
-                               printk("\n       ");
-                       printk(" [<%08lx>]", addr);
-                       i++;
-               }
-       }
-       printk("\n");
-}
-
-void show_trace_task(struct task_struct *tsk)
-{
-       show_stack(tsk,(unsigned long *)tsk->thread.esp0);
-}
diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S
deleted file mode 100644 (file)
index 3253fed..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-#include <asm-generic/vmlinux.lds.h>
-#include <asm/page.h>
-
-/* target memory map */
-#ifdef CONFIG_H8300H_GENERIC
-#define ROMTOP  0x000000
-#define ROMSIZE 0x400000
-#define RAMTOP  0x400000
-#define RAMSIZE 0x400000
-#endif
-
-#ifdef CONFIG_H8300H_AKI3068NET
-#define ROMTOP  0x000000
-#define ROMSIZE 0x080000
-#define RAMTOP  0x400000
-#define RAMSIZE 0x200000
-#endif
-
-#ifdef CONFIG_H8300H_H8MAX
-#define ROMTOP  0x000000
-#define ROMSIZE 0x080000
-#define RAMTOP  0x400000
-#define RAMSIZE 0x200000
-#endif
-
-#ifdef CONFIG_H8300H_SIM
-#define ROMTOP  0x000000
-#define ROMSIZE 0x400000
-#define RAMTOP  0x400000
-#define RAMSIZE 0x400000
-#endif
-
-#ifdef CONFIG_H8S_SIM
-#define ROMTOP  0x000000
-#define ROMSIZE 0x400000
-#define RAMTOP  0x400000
-#define RAMSIZE 0x800000
-#endif
-
-#ifdef CONFIG_H8S_EDOSK2674
-#define ROMTOP  0x000000
-#define ROMSIZE 0x400000
-#define RAMTOP  0x400000
-#define RAMSIZE 0x800000
-#endif
-
-#if defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)
-INPUT(romfs.o)
-#endif
-
-_jiffies = _jiffies_64 + 4;
-
-ENTRY(__start)
-
-SECTIONS
-{
-#if defined(CONFIG_ROMKERNEL)
-       . = ROMTOP; 
-       .vectors :
-       {
-       __vector = . ;
-               *(.vectors*)
-       }
-#else
-       . = RAMTOP; 
-       .bootvec :      
-       {
-               *(.bootvec)
-       }
-#endif
-        .text :
-       {
-       _text = .;
-#if defined(CONFIG_ROMKERNEL)
-       *(.int_redirect)
-#endif
-       __stext = . ;
-       TEXT_TEXT
-       SCHED_TEXT
-       LOCK_TEXT
-       __etext = . ;
-       }
-       EXCEPTION_TABLE(16)
-
-       RODATA
-#if defined(CONFIG_ROMKERNEL)
-       SECURITY_INIT
-#endif
-       ROEND = .; 
-#if defined(CONFIG_ROMKERNEL)
-       . = RAMTOP;
-       .data : AT(ROEND)
-#else
-       .data : 
-#endif
-       {
-       __sdata = . ;
-       ___data_start = . ;
-
-       INIT_TASK_DATA(0x2000)
-       . = ALIGN(0x4) ;
-               DATA_DATA
-       . = ALIGN(0x4) ;
-               *(.data.*)      
-
-       . = ALIGN(0x4) ;
-       ___init_begin = .;
-       __sinittext = .; 
-               INIT_TEXT
-       __einittext = .; 
-               INIT_DATA
-       . = ALIGN(0x4) ;
-       INIT_SETUP(0x4)
-       ___setup_start = .;
-               *(.init.setup)
-       . = ALIGN(0x4) ;
-       ___setup_end = .;
-       INIT_CALLS
-       CON_INITCALL
-               EXIT_TEXT
-               EXIT_DATA
-       INIT_RAM_FS
-       . = ALIGN(0x4) ;
-       ___init_end = .;
-       __edata = . ;
-       }
-#if defined(CONFIG_RAMKERNEL)
-       SECURITY_INIT
-#endif
-       __begin_data = LOADADDR(.data);
-        .bss : 
-        {
-       . = ALIGN(0x4) ;
-       __sbss = . ;
-       ___bss_start = . ;
-               *(.bss*)
-       . = ALIGN(0x4) ;
-               *(COMMON)
-       . = ALIGN(0x4) ;
-       ___bss_stop = . ;
-       __ebss = . ;
-       __end = . ;
-       __ramstart = .;
-       }
-        .romfs :       
-       {
-               *(.romfs*)
-       }
-       . = RAMTOP+RAMSIZE;
-        .dummy :
-        {
-       COMMAND_START = . - 0x200 ;
-       __ramend = . ;
-       }
-
-       DISCARDS
-}
diff --git a/arch/h8300/lib/Makefile b/arch/h8300/lib/Makefile
deleted file mode 100644 (file)
index 1577f50..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for H8/300-specific library files..
-#
-
-lib-y  = ashrdi3.o checksum.o memcpy.o memset.o abs.o romfs.o
diff --git a/arch/h8300/lib/abs.S b/arch/h8300/lib/abs.S
deleted file mode 100644 (file)
index ddd1fb3..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-;;; abs.S
-
-#include <asm/linkage.h>
-
-#if defined(__H8300H__) 
-       .h8300h
-#endif
-#if defined(__H8300S__) 
-       .h8300s
-#endif
-       .text
-.global _abs
-
-;;; int abs(int n)
-_abs:
-       mov.l   er0,er0
-       bpl     1f
-       neg.l   er0
-1:
-       rts
-       
diff --git a/arch/h8300/lib/ashrdi3.c b/arch/h8300/lib/ashrdi3.c
deleted file mode 100644 (file)
index 78efb65..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/* ashrdi3.c extracted from gcc-2.7.2/libgcc2.c which is: */
-/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
-
-This file is part of GNU CC.
-
-GNU CC is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2, or (at your option)
-any later version.
-
-GNU CC is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with GNU CC; see the file COPYING.  If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA.  */
-
-#define BITS_PER_UNIT 8
-
-typedef         int SItype     __attribute__ ((mode (SI)));
-typedef unsigned int USItype   __attribute__ ((mode (SI)));
-typedef                 int DItype     __attribute__ ((mode (DI)));
-typedef int word_type __attribute__ ((mode (__word__)));
-
-struct DIstruct {SItype high, low;};
-
-typedef union
-{
-  struct DIstruct s;
-  DItype ll;
-} DIunion;
-
-DItype
-__ashrdi3 (DItype u, word_type b)
-{
-  DIunion w;
-  word_type bm;
-  DIunion uu;
-
-  if (b == 0)
-    return u;
-
-  uu.ll = u;
-
-  bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
-  if (bm <= 0)
-    {
-      /* w.s.high = 1..1 or 0..0 */
-      w.s.high = uu.s.high >> (sizeof (SItype) * BITS_PER_UNIT - 1);
-      w.s.low = uu.s.high >> -bm;
-    }
-  else
-    {
-      USItype carries = (USItype)uu.s.high << bm;
-      w.s.high = uu.s.high >> b;
-      w.s.low = ((USItype)uu.s.low >> b) | carries;
-    }
-
-  return w.ll;
-}
diff --git a/arch/h8300/lib/checksum.c b/arch/h8300/lib/checksum.c
deleted file mode 100644 (file)
index bdc5b03..0000000
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * INET                An implementation of the TCP/IP protocol suite for the LINUX
- *             operating system.  INET is implemented using the  BSD Socket
- *             interface as the means of communication with the user level.
- *
- *             IP/TCP/UDP checksumming routines
- *
- * Authors:    Jorge Cwik, <jorge@laser.satlink.net>
- *             Arnt Gulbrandsen, <agulbra@nvg.unit.no>
- *             Tom May, <ftom@netcom.com>
- *             Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
- *             Lots of code moved from tcp.c and ip.c; see those files
- *             for more names.
- *
- * 03/02/96    Jes Sorensen, Andreas Schwab, Roman Hodek:
- *             Fixed some nasty bugs, causing some horrible crashes.
- *             A: At some points, the sum (%0) was used as
- *             length-counter instead of the length counter
- *             (%1). Thanks to Roman Hodek for pointing this out.
- *             B: GCC seems to mess up if one uses too many
- *             data-registers to hold input values and one tries to
- *             specify d0 and d1 as scratch registers. Letting gcc choose these
- *      registers itself solves the problem.
- *
- *             This program is free software; you can redistribute it and/or
- *             modify it under the terms of the GNU General Public License
- *             as published by the Free Software Foundation; either version
- *             2 of the License, or (at your option) any later version.
- */
-/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access kills, so most
-   of the assembly has to go. */
-
-#include <net/checksum.h>
-#include <linux/module.h>
-
-static inline unsigned short from32to16(unsigned long x)
-{
-       /* add up 16-bit and 16-bit for 16+c bit */
-       x = (x & 0xffff) + (x >> 16);
-       /* add up carry.. */
-       x = (x & 0xffff) + (x >> 16);
-       return x;
-}
-
-static unsigned long do_csum(const unsigned char * buff, int len)
-{
-       int odd, count;
-       unsigned long result = 0;
-
-       if (len <= 0)
-               goto out;
-       odd = 1 & (unsigned long) buff;
-       if (odd) {
-               result = *buff;
-               len--;
-               buff++;
-       }
-       count = len >> 1;               /* nr of 16-bit words.. */
-       if (count) {
-               if (2 & (unsigned long) buff) {
-                       result += *(unsigned short *) buff;
-                       count--;
-                       len -= 2;
-                       buff += 2;
-               }
-               count >>= 1;            /* nr of 32-bit words.. */
-               if (count) {
-                       unsigned long carry = 0;
-                       do {
-                               unsigned long w = *(unsigned long *) buff;
-                               count--;
-                               buff += 4;
-                               result += carry;
-                               result += w;
-                               carry = (w > result);
-                       } while (count);
-                       result += carry;
-                       result = (result & 0xffff) + (result >> 16);
-               }
-               if (len & 2) {
-                       result += *(unsigned short *) buff;
-                       buff += 2;
-               }
-       }
-       if (len & 1)
-               result += (*buff << 8);
-       result = from32to16(result);
-       if (odd)
-               result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
-out:
-       return result;
-}
-
-/*
- *     This is a version of ip_compute_csum() optimized for IP headers,
- *     which always checksum on 4 octet boundaries.
- */
-__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
-{
-       return (__force __sum16)~do_csum(iph,ihl*4);
-}
-
-/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-/*
- * Egads...  That thing apparently assumes that *all* checksums it ever sees will
- * be folded.  Very likely a bug.
- */
-__wsum csum_partial(const void *buff, int len, __wsum sum)
-{
-       unsigned int result = do_csum(buff, len);
-
-       /* add in old sum, and carry.. */
-       result += (__force u32)sum;
-       /* 16+c bits -> 16 bits */
-       result = (result & 0xffff) + (result >> 16);
-       return (__force __wsum)result;
-}
-
-EXPORT_SYMBOL(csum_partial);
-
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-__sum16 ip_compute_csum(const void *buff, int len)
-{
-       return (__force __sum16)~do_csum(buff,len);
-}
-
-/*
- * copy from fs while checksumming, otherwise like csum_partial
- */
-
-__wsum
-csum_partial_copy_from_user(const void __user *src, void *dst, int len,
-                           __wsum sum, int *csum_err)
-{
-       if (csum_err) *csum_err = 0;
-       memcpy(dst, (__force const void *)src, len);
-       return csum_partial(dst, len, sum);
-}
-
-/*
- * copy from ds while checksumming, otherwise like csum_partial
- */
-
-__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
-{
-       memcpy(dst, src, len);
-       return csum_partial(dst, len, sum);
-}
diff --git a/arch/h8300/lib/memcpy.S b/arch/h8300/lib/memcpy.S
deleted file mode 100644 (file)
index cad325e..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-;;; memcpy.S
-
-#include <asm/linkage.h>
-
-#if defined(__H8300H__) 
-       .h8300h
-#endif
-#if defined(__H8300S__) 
-       .h8300s
-#endif
-
-       .text
-.global _memcpy
-
-;;; void *memcpy(void *to, void *from, size_t n)
-_memcpy:
-       mov.l   er2,er2
-       bne     1f
-       rts     
-1:     
-       ;; address check
-       bld     #0,r0l
-       bxor    #0,r1l
-       bcs     4f
-       mov.l   er4,@-sp
-       mov.l   er0,@-sp
-       btst    #0,r0l
-       beq     1f
-       ;; (aligned even) odd address
-       mov.b   @er1,r3l
-       mov.b   r3l,@er0
-       adds    #1,er1
-       adds    #1,er0
-       dec.l   #1,er2
-       beq     3f
-1:     
-       ;; n < sizeof(unsigned long) check
-       sub.l   er4,er4
-       adds    #4,er4          ; loop count check value
-       cmp.l   er4,er2
-       blo     2f
-       ;; unsigned long copy
-1:     
-       mov.l   @er1,er3
-       mov.l   er3,@er0
-       adds    #4,er0
-       adds    #4,er1
-       subs    #4,er2
-       cmp.l   er4,er2
-       bcc     1b      
-       ;; rest
-2:     
-       mov.l   er2,er2
-       beq     3f
-1:     
-       mov.b   @er1,r3l
-       mov.b   r3l,@er0
-       adds    #1,er1
-       adds    #1,er0
-       dec.l   #1,er2
-       bne     1b
-3:
-       mov.l   @sp+,er0
-       mov.l   @sp+,er4
-       rts
-
-       ;; odd <- even / even <- odd
-4:     
-       mov.l   er4,er3
-       mov.l   er2,er4
-       mov.l   er5,er2
-       mov.l   er1,er5
-       mov.l   er6,er1
-       mov.l   er0,er6
-1:
-       eepmov.w
-       mov.w   r4,r4
-       bne     1b
-       dec.w   #1,e4
-       bpl     1b
-       mov.l   er1,er6
-       mov.l   er2,er5
-       mov.l   er3,er4
-       rts
diff --git a/arch/h8300/lib/memset.S b/arch/h8300/lib/memset.S
deleted file mode 100644 (file)
index 4549a64..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-/* memset.S */
-
-#include <asm/linkage.h>
-
-#if defined(__H8300H__) 
-       .h8300h
-#endif
-#if defined(__H8300S__) 
-       .h8300s
-#endif
-       .text
-
-.global        _memset
-
-;;void *memset(*ptr, int c, size_t count)
-;; ptr = er0
-;; c   = er1(r1l)
-;; count = er2
-_memset:
-       btst    #0,r0l
-       beq     2f
-
-       ;; odd address
-1:
-       mov.b   r1l,@er0
-       adds    #1,er0
-       dec.l   #1,er2
-       beq     6f
-
-       ;; even address
-2:
-       mov.l   er2,er3
-       cmp.l   #4,er2
-       blo     4f
-       ;; count>=4 -> count/4
-#if defined(__H8300H__)
-       shlr.l  er2
-       shlr.l  er2
-#endif
-#if defined(__H8300S__)
-       shlr.l  #2,er2
-#endif
-       ;; byte -> long
-       mov.b   r1l,r1h
-       mov.w   r1,e1
-3:
-       mov.l   er1,@er0
-       adds    #4,er0
-       dec.l   #1,er2
-       bne     3b
-4:
-       ;; count % 4
-       and.b   #3,r3l
-       beq     6f
-5:
-       mov.b   r1l,@er0
-       adds    #1,er0
-       dec.b   r3l
-       bne     5b
-6:
-       rts
diff --git a/arch/h8300/lib/romfs.S b/arch/h8300/lib/romfs.S
deleted file mode 100644 (file)
index 68910d8..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/* romfs move to __ebss */
-
-#include <asm/linkage.h>
-
-#if defined(__H8300H__) 
-       .h8300h
-#endif
-#if defined(__H8300S__) 
-       .h8300s
-#endif
-
-#define BLKOFFSET 512
-
-       .text
-.globl __move_romfs
-_romfs_sig_len = 8
-
-__move_romfs:  
-       mov.l   #__sbss,er0
-       mov.l   #_romfs_sig,er1
-       mov.b   #_romfs_sig_len,r3l
-1:                                     /* check romfs image */
-       mov.b   @er0+,r2l
-       mov.b   @er1+,r2h
-       cmp.b   r2l,r2h
-       bne     2f
-       dec.b   r3l
-       bne     1b
-
-       /* find romfs image */
-       mov.l   @__sbss+8,er0           /* romfs length(be) */
-       mov.l   #__sbss,er1
-       add.l   er0,er1                 /* romfs image end */
-       mov.l   #__ebss,er2
-       add.l   er0,er2                 /* distination address */
-#if defined(CONFIG_INTELFLASH)
-       add.l   #BLKOFFSET,er2
-#endif
-       adds    #2,er0
-       adds    #1,er0
-       shlr    er0
-       shlr    er0                     /* transfer length */
-1:
-       mov.l   @er1,er3                /* copy image */
-       mov.l   er3,@er2
-       subs    #4,er1
-       subs    #4,er2
-       dec.l   #1,er0
-       bpl     1b
-2:
-       rts
-
-       .section        .rodata
-_romfs_sig:    
-       .ascii  "-rom1fs-"
-
-       .end
diff --git a/arch/h8300/mm/Makefile b/arch/h8300/mm/Makefile
deleted file mode 100644 (file)
index 5f4bc42..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux m68k-specific parts of the memory manager.
-#
-
-obj-y   := init.o fault.o memory.o kmap.o
diff --git a/arch/h8300/mm/fault.c b/arch/h8300/mm/fault.c
deleted file mode 100644 (file)
index 4725359..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *  linux/arch/h8300/mm/fault.c
- *
- *  Copyright (C) 1998  D. Jeff Dionne <jeff@lineo.ca>,
- *  Copyright (C) 2000  Lineo, Inc.  (www.lineo.com) 
- *
- *  Based on:
- *
- *  linux/arch/m68knommu/mm/fault.c
- *  linux/arch/m68k/mm/fault.c
- *
- *  Copyright (C) 1995  Hamish Macdonald
- */
-
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/ptrace.h>
-
-#include <asm/pgtable.h>
-
-/*
- * This routine handles page faults.  It determines the problem, and
- * then passes it off to one of the appropriate routines.
- *
- * error_code:
- *     bit 0 == 0 means no page found, 1 means protection fault
- *     bit 1 == 0 means read, 1 means write
- *
- * If this routine detects a bad access, it returns 1, otherwise it
- * returns 0.
- */
-asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
-                             unsigned long error_code)
-{
-#ifdef DEBUG
-       printk ("regs->sr=%#x, regs->pc=%#lx, address=%#lx, %ld\n",
-               regs->sr, regs->pc, address, error_code);
-#endif
-
-/*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- */
-       if ((unsigned long) address < PAGE_SIZE) {
-               printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
-       } else
-               printk(KERN_ALERT "Unable to handle kernel access");
-       printk(" at virtual address %08lx\n",address);
-       if (!user_mode(regs))
-               die("Oops", regs, error_code);
-       do_exit(SIGKILL);
-
-       return 1;
-}
-
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
deleted file mode 100644 (file)
index 6c1251e..0000000
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- *  linux/arch/h8300/mm/init.c
- *
- *  Copyright (C) 1998  D. Jeff Dionne <jeff@lineo.ca>,
- *                      Kenneth Albanowski <kjahds@kjahds.com>,
- *  Copyright (C) 2000  Lineo, Inc.  (www.lineo.com) 
- *
- *  Based on:
- *
- *  linux/arch/m68knommu/mm/init.c
- *  linux/arch/m68k/mm/init.c
- *
- *  Copyright (C) 1995  Hamish Macdonald
- *
- *  JAN/1999 -- hacked to support ColdFire (gerg@snapgear.com)
- *  DEC/2000 -- linux 2.4 support <davidm@snapgear.com>
- */
-
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/init.h>
-#include <linux/highmem.h>
-#include <linux/pagemap.h>
-#include <linux/bootmem.h>
-#include <linux/gfp.h>
-
-#include <asm/setup.h>
-#include <asm/segment.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/sections.h>
-
-#undef DEBUG
-
-/*
- * BAD_PAGE is the page that is used for page faults when linux
- * is out-of-memory. Older versions of linux just did a
- * do_exit(), but using this instead means there is less risk
- * for a process dying in kernel mode, possibly leaving a inode
- * unused etc..
- *
- * BAD_PAGETABLE is the accompanying page-table: it is initialized
- * to point to BAD_PAGE entries.
- *
- * ZERO_PAGE is a special page that is used for zero-initialized
- * data and COW.
- */
-static unsigned long empty_bad_page_table;
-
-static unsigned long empty_bad_page;
-
-unsigned long empty_zero_page;
-
-extern unsigned long rom_length;
-
-extern unsigned long memory_start;
-extern unsigned long memory_end;
-
-/*
- * paging_init() continues the virtual memory environment setup which
- * was begun by the code in arch/head.S.
- * The parameters are pointers to where to stick the starting and ending
- * addresses of available kernel virtual memory.
- */
-void __init paging_init(void)
-{
-       /*
-        * Make sure start_mem is page aligned,  otherwise bootmem and
-        * page_alloc get different views og the world.
-        */
-#ifdef DEBUG
-       unsigned long start_mem = PAGE_ALIGN(memory_start);
-#endif
-       unsigned long end_mem   = memory_end & PAGE_MASK;
-
-#ifdef DEBUG
-       printk ("start_mem is %#lx\nvirtual_end is %#lx\n",
-               start_mem, end_mem);
-#endif
-
-       /*
-        * Initialize the bad page table and bad page to point
-        * to a couple of allocated pages.
-        */
-       empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
-       empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
-       empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
-       memset((void *)empty_zero_page, 0, PAGE_SIZE);
-
-       /*
-        * Set up SFC/DFC registers (user data space).
-        */
-       set_fs (USER_DS);
-
-#ifdef DEBUG
-       printk ("before free_area_init\n");
-
-       printk ("free_area_init -> start_mem is %#lx\nvirtual_end is %#lx\n",
-               start_mem, end_mem);
-#endif
-
-       {
-               unsigned long zones_size[MAX_NR_ZONES] = {0, };
-
-               zones_size[ZONE_DMA]     = 0 >> PAGE_SHIFT;
-               zones_size[ZONE_NORMAL]  = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
-#ifdef CONFIG_HIGHMEM
-               zones_size[ZONE_HIGHMEM] = 0;
-#endif
-               free_area_init(zones_size);
-       }
-}
-
-void __init mem_init(void)
-{
-       unsigned long codesize = _etext - _stext;
-
-       pr_devel("Mem_init: start=%lx, end=%lx\n", memory_start, memory_end);
-
-       high_memory = (void *) (memory_end & PAGE_MASK);
-       max_mapnr = MAP_NR(high_memory);
-
-       /* this will put all low memory onto the freelists */
-       free_all_bootmem();
-
-       mem_init_print_info(NULL);
-       if (rom_length > 0 && rom_length > codesize)
-               pr_info("Memory available: %luK/%luK ROM\n",
-                       (rom_length - codesize) >> 10, rom_length >> 10);
-}
-
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
-       free_reserved_area((void *)start, (void *)end, -1, "initrd");
-}
-#endif
-
-void
-free_initmem(void)
-{
-#ifdef CONFIG_RAMKERNEL
-       free_initmem_default(-1);
-#endif
-}
-
diff --git a/arch/h8300/mm/kmap.c b/arch/h8300/mm/kmap.c
deleted file mode 100644 (file)
index f79edcd..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- *  linux/arch/h8300/mm/kmap.c
- *  
- *  Based on
- *  linux/arch/m68knommu/mm/kmap.c
- *
- *  Copyright (C) 2000 Lineo, <davidm@snapgear.com>
- *  Copyright (C) 2000-2002 David McCullough <davidm@snapgear.com>
- */
-
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-
-#include <asm/setup.h>
-#include <asm/segment.h>
-#include <asm/page.h>
-#include <asm/pgalloc.h>
-#include <asm/io.h>
-
-#undef DEBUG
-
-#define VIRT_OFFSET (0x01000000)
-
-/*
- * Map some physical address range into the kernel address space.
- */
-void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
-{
-       return (void *)(physaddr + VIRT_OFFSET);
-}
-
-/*
- * Unmap a ioremap()ed region again.
- */
-void iounmap(void *addr)
-{
-}
-
-/*
- * __iounmap unmaps nearly everything, so be careful
- * it doesn't free currently pointer/page tables anymore but it
- * wans't used anyway and might be added later.
- */
-void __iounmap(void *addr, unsigned long size)
-{
-}
-
-/*
- * Set new cache mode for some kernel address space.
- * The caller must push data for that range itself, if such data may already
- * be in the cache.
- */
-void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
-{
-}
diff --git a/arch/h8300/mm/memory.c b/arch/h8300/mm/memory.c
deleted file mode 100644 (file)
index 06e3646..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- *  linux/arch/h8300/mm/memory.c
- *
- *  Copyright (C) 2002  Yoshinori Sato <ysato@users.sourceforge.jp>,
- *
- *  Based on:
- *
- *  linux/arch/m68knommu/mm/memory.c
- *
- *  Copyright (C) 1998  Kenneth Albanowski <kjahds@kjahds.com>,
- *  Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
- *
- *  Based on:
- *
- *  linux/arch/m68k/mm/memory.c
- *
- *  Copyright (C) 1995  Hamish Macdonald
- */
-
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-#include <asm/setup.h>
-#include <asm/segment.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/traps.h>
-#include <asm/io.h>
-
-void cache_clear (unsigned long paddr, int len)
-{
-}
-
-
-void cache_push (unsigned long paddr, int len)
-{
-}
-
-void cache_push_v (unsigned long vaddr, int len)
-{
-}
-
-/*
- * Map some physical address range into the kernel address space.
- */
-
-unsigned long kernel_map(unsigned long paddr, unsigned long size,
-                        int nocacheflag, unsigned long *memavailp )
-{
-       return paddr;
-}
-
diff --git a/arch/h8300/platform/h8300h/Makefile b/arch/h8300/platform/h8300h/Makefile
deleted file mode 100644 (file)
index 420f73b..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-# Reuse any files we can from the H8/300H
-#
-
-obj-y := irq.o ptrace_h8300h.o
diff --git a/arch/h8300/platform/h8300h/aki3068net/Makefile b/arch/h8300/platform/h8300h/aki3068net/Makefile
deleted file mode 100644 (file)
index b7ff780..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-extra-y := crt0_ram.o
diff --git a/arch/h8300/platform/h8300h/aki3068net/crt0_ram.S b/arch/h8300/platform/h8300h/aki3068net/crt0_ram.S
deleted file mode 100644 (file)
index b2ad0f2..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- *  linux/arch/h8300/platform/h8300h/aki3068net/crt0_ram.S
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Platform depend startup
- *  Target Archtecture:        AE-3068 (aka. aki3068net)
- *  Memory Layout     :        RAM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-       
-#if !defined(CONFIG_BLKDEV_RESERVE)
-#if defined(CONFIG_GDB_DEBUG)
-#define RAMEND (__ramend - 0xc000)
-#else
-#define RAMEND __ramend
-#endif
-#else
-#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS
-#endif
-       
-       .global __start
-       .global _command_line
-       .global __platform_gpio_table
-       .global __target_name
-       
-       .h8300h
-
-       .section .text
-       .file   "crt0_ram.S"
-
-       /* CPU Reset entry */
-__start:
-       mov.l   #RAMEND,sp
-       ldc     #0x80,ccr
-
-       /* Peripheral Setup */
-       
-#if defined(CONFIG_MTD_UCLINUX)
-       /* move romfs image */
-       jsr     @__move_romfs   
-#endif
-       
-       /* .bss clear */
-       mov.l   #__sbss,er5
-       mov.l   #__ebss,er4
-       sub.l   er5,er4
-       shlr    er4
-       shlr    er4
-       sub.l   er0,er0
-1:     
-       mov.l   er0,@er5
-       adds    #4,er5
-       dec.l   #1,er4
-       bne     1b
-
-       /* copy kernel commandline */
-       mov.l   #COMMAND_START,er5
-       mov.l   #_command_line,er6
-       mov.w   #512,r4
-       eepmov.w
-
-       /* uClinux kernel start */
-       ldc     #0x90,ccr       /* running kernel */
-       mov.l   #_init_thread_union,sp
-       add.l   #0x2000,sp
-       jsr     @_start_kernel
-_exit:
-
-       jmp     _exit
-
-       rts
-
-       /* I/O port assign information */
-__platform_gpio_table: 
-       mov.l   #gpio_table,er0
-       rts
-
-gpio_table:
-       ;; P1DDR
-       .byte   0xff,0xff
-       ;; P2DDR
-       .byte   0xff,0xff
-       ;; P3DDR
-       .byte   0xff,0x00
-       ;; P4DDR
-       .byte   0x00,0x00
-       ;; P5DDR
-       .byte   0x01,0x01
-       ;; P6DDR
-       .byte   0x00,0x00
-       ;; dummy
-       .byte   0x00,0x00
-       ;; P8DDR
-       .byte   0x0c,0x0c
-       ;; P9DDR
-       .byte   0x00,0x00
-       ;; PADDR
-       .byte   0x00,0x00
-       ;; PBDDR
-       .byte   0x30,0x30
-
-__target_name: 
-       .asciz  "AE-3068"
-       
-       .section .bootvec,"ax"
-       jmp     @__start
diff --git a/arch/h8300/platform/h8300h/generic/Makefile b/arch/h8300/platform/h8300h/generic/Makefile
deleted file mode 100644 (file)
index 2b12a17..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-extra-y :=  crt0_$(MODEL).o
diff --git a/arch/h8300/platform/h8300h/generic/crt0_ram.S b/arch/h8300/platform/h8300h/generic/crt0_ram.S
deleted file mode 100644 (file)
index 5ab7d9c..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- *  linux/arch/h8300/platform/h8300h/generic/crt0_ram.S
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Platform depend startup
- *  Target Archtecture:        AE-3068 (aka. aki3068net)
- *  Memory Layout     :        RAM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-       
-#if !defined(CONFIG_BLKDEV_RESERVE)
-#if defined(CONFIG_GDB_DEBUG)
-#define RAMEND (__ramend - 0xc000)
-#else
-#define RAMEND __ramend
-#endif
-#else
-#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS
-#endif
-       
-       .global __start
-       .global _command_line
-       .global __platform_gpio_table
-       .global __target_name
-       
-       .h8300h
-
-       .section .text
-       .file   "crt0_ram.S"
-
-       /* CPU Reset entry */
-__start:
-       mov.l   #RAMEND,sp
-       ldc     #0x80,ccr
-
-       /* Peripheral Setup */
-       
-#if defined(CONFIG_BLK_DEV_BLKMEM)
-       /* move romfs image */
-       jsr     @__move_romfs   
-#endif
-       
-       /* .bss clear */
-       mov.l   #__sbss,er5
-       mov.l   #__ebss,er4
-       sub.l   er5,er4
-       shlr    er4
-       shlr    er4
-       sub.l   er0,er0
-1:     
-       mov.l   er0,@er5
-       adds    #4,er5
-       dec.l   #1,er4
-       bne     1b
-
-       /* copy kernel commandline */
-       mov.l   #COMMAND_START,er5
-       mov.l   #_command_line,er6
-       mov.w   #512,r4
-       eepmov.w
-
-       /* uClinux kernel start */
-       ldc     #0x90,ccr       /* running kernel */
-       mov.l   #_init_thread_union,sp
-       add.l   #0x2000,sp
-       jsr     @_start_kernel
-_exit:
-
-       jmp     _exit
-
-       rts
-
-       /* I/O port assign information */
-__platform_gpio_table: 
-       mov.l   #gpio_table,er0
-       rts
-
-gpio_table:
-       ;; P1DDR
-       .byte   0x00,0x00
-       ;; P2DDR
-       .byte   0x00,0x00
-       ;; P3DDR
-       .byte   0x00,0x00
-       ;; P4DDR
-       .byte   0x00,0x00
-       ;; P5DDR
-       .byte   0x00,0x00
-       ;; P6DDR
-       .byte   0x00,0x00
-       ;; dummy
-       .byte   0x00,0x00
-       ;; P8DDR
-       .byte   0x00,0x00
-       ;; P9DDR
-       .byte   0x00,0x00
-       ;; PADDR
-       .byte   0x00,0x00
-       ;; PBDDR
-       .byte   0x00,0x00
-
-__target_name: 
-       .asciz  "generic"
diff --git a/arch/h8300/platform/h8300h/generic/crt0_rom.S b/arch/h8300/platform/h8300h/generic/crt0_rom.S
deleted file mode 100644 (file)
index dda1dfa..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- *  linux/arch/h8300/platform/h8300h/generic/crt0_rom.S
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Platform depend startup
- *  Target Archtecture:        generic
- *  Memory Layout     :        ROM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-       
-       .global __start
-       .global __command_line
-       .global __platform_gpio_table
-       .global __target_name
-       
-       .h8300h
-       .section .text
-       .file   "crt0_rom.S"
-
-       /* CPU Reset entry */
-__start:
-       mov.l   #__ramend,sp
-       ldc     #0x80,ccr
-
-       /* Peripheral Setup */
-       
-       /* .bss clear */
-       mov.l   #__sbss,er5
-       mov.l   #__ebss,er4
-       sub.l   er5,er4
-       shlr    er4
-       shlr    er4
-       sub.l   er0,er0
-1:     
-       mov.l   er0,@er5
-       adds    #4,er5
-       dec.l   #1,er4
-       bne     1b
-
-       /* copy .data */
-#if !defined(CONFIG_H8300H_SIM)
-       /* copy .data */
-       mov.l   #__begin_data,er5
-       mov.l   #__sdata,er6
-       mov.l   #__edata,er4
-       sub.l   er6,er4
-       shlr.l  er4
-       shlr.l  er4
-1:     
-       mov.l   @er5+,er0
-       mov.l   er0,@er6
-       adds    #4,er6
-       dec.l   #1,er4
-       bne     1b      
-#endif
-
-       /* copy kernel commandline */
-       mov.l   #COMMAND_START,er5
-       mov.l   #__command_line,er6
-       mov.w   #512,r4
-       eepmov.w
-
-       /* linux kernel start */
-       ldc     #0x90,ccr       /* running kernel */
-       mov.l   #_init_thread_union,sp
-       add.l   #0x2000,sp
-       jsr     @_start_kernel
-_exit:
-
-       jmp     _exit
-
-       rts
-
-       /* I/O port assign information */
-__platform_gpio_table: 
-       mov.l   #gpio_table,er0
-       rts
-
-gpio_table:
-       ;; P1DDR
-       .byte   0x00,0x00
-       ;; P2DDR
-       .byte   0x00,0x00
-       ;; P3DDR
-       .byte   0x00,0x00
-       ;; P4DDR
-       .byte   0x00,0x00
-       ;; P5DDR
-       .byte   0x00,0x00
-       ;; P6DDR
-       .byte   0x00,0x00
-       ;; dummy
-       .byte   0x00,0x00
-       ;; P8DDR
-       .byte   0x00,0x00
-       ;; P9DDR
-       .byte   0x00,0x00
-       ;; PADDR
-       .byte   0x00,0x00
-       ;; PBDDR
-       .byte   0x00,0x00
-
-       .section .rodata
-__target_name: 
-       .asciz  "generic"
-       
-       .section .bss
-__command_line:        
-       .space  512
-
-       /* interrupt vector */
-       .section .vectors,"ax"
-       .long   __start
-vector =       1
-       .rept   64-1
-       .long   _interrupt_redirect_table+vector*4
-vector =       vector + 1
-       .endr
diff --git a/arch/h8300/platform/h8300h/h8max/Makefile b/arch/h8300/platform/h8300h/h8max/Makefile
deleted file mode 100644 (file)
index b7ff780..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-extra-y := crt0_ram.o
diff --git a/arch/h8300/platform/h8300h/h8max/crt0_ram.S b/arch/h8300/platform/h8300h/h8max/crt0_ram.S
deleted file mode 100644 (file)
index 6a0d4e2..0000000
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- *  linux/arch/h8300/platform/h8300h/h8max/crt0_ram.S
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Platform depend startup
- *  Target Archtecture:        H8MAX
- *  Memory Layout     :        RAM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-       
-#if !defined(CONFIG_BLKDEV_RESERVE)
-#if defined(CONFIG_GDB_DEBUG)
-#define RAMEND (__ramend - 0xc000)
-#else
-#define RAMEND __ramend
-#endif
-#else
-#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS
-#endif
-       
-       .global __start
-       .global _command_line
-       .global __platform_gpio_table
-       .global __target_name
-       
-       .h8300h
-
-       .section .text
-       .file   "crt0_ram.S"
-
-       /* CPU Reset entry */
-__start:
-       mov.l   #RAMEND,sp
-       ldc     #0x80,ccr
-
-       /* Peripheral Setup */
-       
-#if defined(CONFIG_MTD_UCLINUX)
-       /* move romfs image */
-       jsr     @__move_romfs   
-#endif
-       
-       /* .bss clear */
-       mov.l   #__sbss,er5
-       mov.l   #__ebss,er4
-       sub.l   er5,er4
-       shlr    er4
-       shlr    er4
-       sub.l   er0,er0
-1:     
-       mov.l   er0,@er5
-       adds    #4,er5
-       dec.l   #1,er4
-       bne     1b
-
-       /* copy kernel commandline */
-       mov.l   #COMMAND_START,er5
-       mov.l   #_command_line,er6
-       mov.w   #512,r4
-       eepmov.w
-
-       /* uClinux kernel start */
-       ldc     #0x90,ccr       /* running kernel */
-       mov.l   #_init_thread_union,sp
-       add.l   #0x2000,sp
-       jsr     @_start_kernel
-_exit:
-
-       jmp     _exit
-
-       rts
-
-       /* I/O port assign information */
-__platform_gpio_table: 
-       mov.l   #gpio_table,er0
-       rts
-
-gpio_table:
-       ;; P1DDR
-       .byte   0xff,0xff
-       ;; P2DDR
-       .byte   0xff,0xff
-       ;; P3DDR
-       .byte   0x00,0x00
-       ;; P4DDR
-       .byte   0x00,0x00
-       ;; P5DDR
-       .byte   0x01,0x01
-       ;; P6DDR
-       .byte   0xf6,0xf6
-       ;; dummy
-       .byte   0x00,0x00
-       ;; P8DDR
-       .byte   0xee,0xee
-       ;; P9DDR
-       .byte   0x00,0x00
-       ;; PADDR
-       .byte   0x00,0x00
-       ;; PBDDR
-       .byte   0x30,0x30
-
-__target_name: 
-       .asciz  "H8MAX"
-       
-       .section .bootvec,"ax"
-       jmp     @__start
diff --git a/arch/h8300/platform/h8300h/irq.c b/arch/h8300/platform/h8300h/irq.c
deleted file mode 100644 (file)
index 0a50353..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Interrupt handling H8/300H depend.
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- */
-
-#include <linux/init.h>
-#include <linux/errno.h>
-
-#include <asm/ptrace.h>
-#include <asm/traps.h>
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/gpio-internal.h>
-#include <asm/regs306x.h>
-
-const int __initconst h8300_saved_vectors[] = {
-#if defined(CONFIG_GDB_DEBUG)
-       TRAP3_VEC,      /* TRAPA #3 is GDB breakpoint */
-#endif
-       -1,
-};
-
-const h8300_vector __initconst h8300_trap_table[] = {
-       0, 0, 0, 0, 0, 0, 0, 0,
-       system_call,
-       0,
-       0,
-       trace_break,
-};
-
-int h8300_enable_irq_pin(unsigned int irq)
-{
-       int bitmask;
-       if (irq < EXT_IRQ0 || irq > EXT_IRQ5)
-               return 0;
-
-       /* initialize IRQ pin */
-       bitmask = 1 << (irq - EXT_IRQ0);
-       switch(irq) {
-       case EXT_IRQ0:
-       case EXT_IRQ1:
-       case EXT_IRQ2:
-       case EXT_IRQ3:
-               if (H8300_GPIO_RESERVE(H8300_GPIO_P8, bitmask) == 0)
-                       return -EBUSY;
-               H8300_GPIO_DDR(H8300_GPIO_P8, bitmask, H8300_GPIO_INPUT);
-               break;
-       case EXT_IRQ4:
-       case EXT_IRQ5:
-               if (H8300_GPIO_RESERVE(H8300_GPIO_P9, bitmask) == 0)
-                       return -EBUSY;
-               H8300_GPIO_DDR(H8300_GPIO_P9, bitmask, H8300_GPIO_INPUT);
-               break;
-       }
-
-       return 0;
-}
-
-void h8300_disable_irq_pin(unsigned int irq)
-{
-       int bitmask;
-       if (irq < EXT_IRQ0 || irq > EXT_IRQ5)
-               return;
-
-       /* disable interrupt & release IRQ pin */
-       bitmask = 1 << (irq - EXT_IRQ0);
-       switch(irq) {
-       case EXT_IRQ0:
-       case EXT_IRQ1:
-       case EXT_IRQ2:
-       case EXT_IRQ3:
-               *(volatile unsigned char *)IER &= ~bitmask;
-               H8300_GPIO_FREE(H8300_GPIO_P8, bitmask);
-               break ;
-       case EXT_IRQ4:
-       case EXT_IRQ5:
-               *(volatile unsigned char *)IER &= ~bitmask;
-               H8300_GPIO_FREE(H8300_GPIO_P9, bitmask);
-               break;
-       }
-}
diff --git a/arch/h8300/platform/h8300h/ptrace_h8300h.c b/arch/h8300/platform/h8300h/ptrace_h8300h.c
deleted file mode 100644 (file)
index 4f1ed02..0000000
+++ /dev/null
@@ -1,284 +0,0 @@
-/*
- *  linux/arch/h8300/platform/h8300h/ptrace_h8300h.c
- *    ptrace cpu depend helper functions
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License.  See the file COPYING in the main directory of
- * this archive for more details.
- */
-
-#include <linux/linkage.h>
-#include <linux/sched.h>
-#include <asm/ptrace.h>
-
-#define CCR_MASK 0x6f    /* mode/imask not set */
-#define BREAKINST 0x5730 /* trapa #3 */
-
-/* Mapping from PT_xxx to the stack offset at which the register is
-   saved.  Notice that usp has no stack-slot and needs to be treated
-   specially (see get_reg/put_reg below). */
-static const int h8300_register_offset[] = {
-       PT_REG(er1), PT_REG(er2), PT_REG(er3), PT_REG(er4),
-       PT_REG(er5), PT_REG(er6), PT_REG(er0), PT_REG(orig_er0),
-       PT_REG(ccr), PT_REG(pc)
-};
-
-/* read register */
-long h8300_get_reg(struct task_struct *task, int regno)
-{
-       switch (regno) {
-       case PT_USP:
-               return task->thread.usp + sizeof(long)*2;
-       case PT_CCR:
-           return *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]);
-       default:
-           return *(unsigned long *)(task->thread.esp0 + h8300_register_offset[regno]);
-       }
-}
-
-/* write register */
-int h8300_put_reg(struct task_struct *task, int regno, unsigned long data)
-{
-       unsigned short oldccr;
-       switch (regno) {
-       case PT_USP:
-               task->thread.usp = data - sizeof(long)*2;
-       case PT_CCR:
-               oldccr = *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]);
-               oldccr &= ~CCR_MASK;
-               data &= CCR_MASK;
-               data |= oldccr;
-               *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]) = data;
-               break;
-       default:
-               *(unsigned long *)(task->thread.esp0 + h8300_register_offset[regno]) = data;
-               break;
-       }
-       return 0;
-}
-
-/* disable singlestep */
-void user_disable_single_step(struct task_struct *child)
-{
-       if((long)child->thread.breakinfo.addr != -1L) {
-               *child->thread.breakinfo.addr = child->thread.breakinfo.inst;
-               child->thread.breakinfo.addr = (unsigned short *)-1L;
-       }
-}
-
-/* calculate next pc */
-enum jump_type {none,    /* normal instruction */
-               jabs,    /* absolute address jump */
-               ind,     /* indirect address jump */
-               ret,     /* return to subrutine */
-               reg,     /* register indexed jump */
-               relb,    /* pc relative jump (byte offset) */
-               relw,    /* pc relative jump (word offset) */
-               };
-
-/* opcode decode table define
-   ptn: opcode pattern
-   msk: opcode bitmask
-   len: instruction length (<0 next table index)
-   jmp: jump operation mode */
-struct optable {
-       unsigned char bitpattern;
-       unsigned char bitmask;
-       signed char length;
-       signed char type;
-} __attribute__((aligned(1),packed));
-
-#define OPTABLE(ptn,msk,len,jmp)   \
-        {                          \
-               .bitpattern = ptn, \
-               .bitmask    = msk, \
-               .length     = len, \
-               .type       = jmp, \
-       }
-
-static const struct optable optable_0[] = {
-       OPTABLE(0x00,0xff, 1,none), /* 0x00 */
-       OPTABLE(0x01,0xff,-1,none), /* 0x01 */
-       OPTABLE(0x02,0xfe, 1,none), /* 0x02-0x03 */
-       OPTABLE(0x04,0xee, 1,none), /* 0x04-0x05/0x14-0x15 */
-       OPTABLE(0x06,0xfe, 1,none), /* 0x06-0x07 */
-       OPTABLE(0x08,0xea, 1,none), /* 0x08-0x09/0x0c-0x0d/0x18-0x19/0x1c-0x1d */
-       OPTABLE(0x0a,0xee, 1,none), /* 0x0a-0x0b/0x1a-0x1b */
-       OPTABLE(0x0e,0xee, 1,none), /* 0x0e-0x0f/0x1e-0x1f */
-       OPTABLE(0x10,0xfc, 1,none), /* 0x10-0x13 */
-       OPTABLE(0x16,0xfe, 1,none), /* 0x16-0x17 */
-       OPTABLE(0x20,0xe0, 1,none), /* 0x20-0x3f */
-       OPTABLE(0x40,0xf0, 1,relb), /* 0x40-0x4f */
-       OPTABLE(0x50,0xfc, 1,none), /* 0x50-0x53 */
-       OPTABLE(0x54,0xfd, 1,ret ), /* 0x54/0x56 */
-       OPTABLE(0x55,0xff, 1,relb), /* 0x55 */
-       OPTABLE(0x57,0xff, 1,none), /* 0x57 */
-       OPTABLE(0x58,0xfb, 2,relw), /* 0x58/0x5c */
-       OPTABLE(0x59,0xfb, 1,reg ), /* 0x59/0x5b */
-       OPTABLE(0x5a,0xfb, 2,jabs), /* 0x5a/0x5e */
-       OPTABLE(0x5b,0xfb, 2,ind ), /* 0x5b/0x5f */
-       OPTABLE(0x60,0xe8, 1,none), /* 0x60-0x67/0x70-0x77 */
-       OPTABLE(0x68,0xfa, 1,none), /* 0x68-0x69/0x6c-0x6d */
-       OPTABLE(0x6a,0xfe,-2,none), /* 0x6a-0x6b */
-       OPTABLE(0x6e,0xfe, 2,none), /* 0x6e-0x6f */
-       OPTABLE(0x78,0xff, 4,none), /* 0x78 */
-       OPTABLE(0x79,0xff, 2,none), /* 0x79 */
-       OPTABLE(0x7a,0xff, 3,none), /* 0x7a */
-       OPTABLE(0x7b,0xff, 2,none), /* 0x7b */
-       OPTABLE(0x7c,0xfc, 2,none), /* 0x7c-0x7f */
-       OPTABLE(0x80,0x80, 1,none), /* 0x80-0xff */
-};
-
-static const struct optable optable_1[] = {
-       OPTABLE(0x00,0xff,-3,none), /* 0x0100 */
-       OPTABLE(0x40,0xf0,-3,none), /* 0x0140-0x14f */
-       OPTABLE(0x80,0xf0, 1,none), /* 0x0180-0x018f */
-       OPTABLE(0xc0,0xc0, 2,none), /* 0x01c0-0x01ff */
-};
-
-static const struct optable optable_2[] = {
-       OPTABLE(0x00,0x20, 2,none), /* 0x6a0?/0x6a8?/0x6b0?/0x6b8? */
-       OPTABLE(0x20,0x20, 3,none), /* 0x6a2?/0x6aa?/0x6b2?/0x6ba? */
-};
-
-static const struct optable optable_3[] = {
-       OPTABLE(0x69,0xfb, 2,none), /* 0x010069/0x01006d/014069/0x01406d */
-       OPTABLE(0x6b,0xff,-4,none), /* 0x01006b/0x01406b */
-       OPTABLE(0x6f,0xff, 3,none), /* 0x01006f/0x01406f */
-       OPTABLE(0x78,0xff, 5,none), /* 0x010078/0x014078 */
-};
-
-static const struct optable optable_4[] = {
-       OPTABLE(0x00,0x78, 3,none), /* 0x0100690?/0x01006d0?/0140690/0x01406d0?/0x0100698?/0x01006d8?/0140698?/0x01406d8? */
-       OPTABLE(0x20,0x78, 4,none), /* 0x0100692?/0x01006d2?/0140692/0x01406d2?/0x010069a?/0x01006da?/014069a?/0x01406da? */
-};
-
-static const struct optables_list {
-       const struct optable *ptr;
-       int size;
-} optables[] = {
-#define OPTABLES(no)                                                   \
-        {                                                              \
-               .ptr  = optable_##no,                                  \
-               .size = sizeof(optable_##no) / sizeof(struct optable), \
-       }
-       OPTABLES(0),
-       OPTABLES(1),
-       OPTABLES(2),
-       OPTABLES(3),
-       OPTABLES(4),
-
-};
-
-const unsigned char condmask[] = {
-       0x00,0x40,0x01,0x04,0x02,0x08,0x10,0x20
-};
-
-static int isbranch(struct task_struct *task,int reson)
-{
-       unsigned char cond = h8300_get_reg(task, PT_CCR);
-       /* encode complex conditions */
-       /* B4: N^V
-          B5: Z|(N^V)
-          B6: C|Z */
-       __asm__("bld #3,%w0\n\t"
-               "bxor #1,%w0\n\t"
-               "bst #4,%w0\n\t"
-               "bor #2,%w0\n\t"
-               "bst #5,%w0\n\t"
-               "bld #2,%w0\n\t"
-               "bor #0,%w0\n\t"
-               "bst #6,%w0\n\t"
-               :"=&r"(cond)::"cc");
-       cond &= condmask[reson >> 1];
-       if (!(reson & 1))
-               return cond == 0;
-       else
-               return cond != 0;
-}
-
-static unsigned short *getnextpc(struct task_struct *child, unsigned short *pc)
-{
-       const struct optable *op;
-       unsigned char *fetch_p;
-       unsigned char inst;
-       unsigned long addr;
-       unsigned long *sp;
-       int op_len,regno;
-       op = optables[0].ptr;
-       op_len = optables[0].size;
-       fetch_p = (unsigned char *)pc;
-       inst = *fetch_p++;
-       do {
-               if ((inst & op->bitmask) == op->bitpattern) {
-                       if (op->length < 0) {
-                               op = optables[-op->length].ptr;
-                               op_len = optables[-op->length].size + 1;
-                               inst = *fetch_p++;
-                       } else {
-                               switch (op->type) {
-                               case none:
-                                       return pc + op->length;
-                               case jabs:
-                                       addr = *(unsigned long *)pc;
-                                       return (unsigned short *)(addr & 0x00ffffff);
-                               case ind:
-                                       addr = *pc & 0xff;
-                                       return (unsigned short *)(*(unsigned long *)addr);
-                               case ret:
-                                       sp = (unsigned long *)h8300_get_reg(child, PT_USP);
-                                       /* user stack frames
-                                          |   er0  | temporary saved
-                                          +--------+
-                                          |   exp  | exception stack frames
-                                          +--------+
-                                          | ret pc | userspace return address
-                                       */
-                                       return (unsigned short *)(*(sp+2) & 0x00ffffff);
-                               case reg:
-                                       regno = (*pc >> 4) & 0x07;
-                                       if (regno == 0)
-                                               addr = h8300_get_reg(child, PT_ER0);
-                                       else
-                                               addr = h8300_get_reg(child, regno-1+PT_ER1);
-                                       return (unsigned short *)addr;
-                               case relb:
-                                       if (inst == 0x55 || isbranch(child,inst & 0x0f))
-                                               pc = (unsigned short *)((unsigned long)pc +
-                                                                      ((signed char)(*fetch_p)));
-                                       return pc+1; /* skip myself */
-                               case relw:
-                                       if (inst == 0x5c || isbranch(child,(*fetch_p & 0xf0) >> 4))
-                                               pc = (unsigned short *)((unsigned long)pc +
-                                                                      ((signed short)(*(pc+1))));
-                                       return pc+2; /* skip myself */
-                               }
-                       }
-               } else
-                       op++;
-       } while(--op_len > 0);
-       return NULL;
-}
-
-/* Set breakpoint(s) to simulate a single step from the current PC.  */
-
-void user_enable_single_step(struct task_struct *child)
-{
-       unsigned short *nextpc;
-       nextpc = getnextpc(child,(unsigned short *)h8300_get_reg(child, PT_PC));
-       child->thread.breakinfo.addr = nextpc;
-       child->thread.breakinfo.inst = *nextpc;
-       *nextpc = BREAKINST;
-}
-
-asmlinkage void trace_trap(unsigned long bp)
-{
-       if ((unsigned long)current->thread.breakinfo.addr == bp) {
-               user_disable_single_step(current);
-               force_sig(SIGTRAP,current);
-       } else
-               force_sig(SIGILL,current);
-}
-
diff --git a/arch/h8300/platform/h8s/Makefile b/arch/h8300/platform/h8s/Makefile
deleted file mode 100644 (file)
index bf12418..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-# Reuse any files we can from the H8S
-#
-
-obj-y := ints_h8s.o ptrace_h8s.o
diff --git a/arch/h8300/platform/h8s/edosk2674/Makefile b/arch/h8300/platform/h8s/edosk2674/Makefile
deleted file mode 100644 (file)
index 8e34972..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-extra-y := crt0_$(MODEL).o
diff --git a/arch/h8300/platform/h8s/edosk2674/crt0_ram.S b/arch/h8300/platform/h8s/edosk2674/crt0_ram.S
deleted file mode 100644 (file)
index 5ed191b..0000000
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- *  linux/arch/h8300/platform/h8s/edosk2674/crt0_ram.S
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Platform depend startup
- *  Target Archtecture:        EDOSK-2674
- *  Memory Layout     :        RAM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-#include <asm/regs267x.h>
-                       
-#if !defined(CONFIG_BLKDEV_RESERVE)
-#if defined(CONFIG_GDB_DEBUG)
-#define RAMEND (__ramend - 0xc000)
-#else
-#define RAMEND __ramend
-#endif
-#else
-#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS
-#endif
-       
-       .global __start
-       .global __command_line
-       .global __platform_gpio_table
-       .global __target_name
-       
-       .h8300s
-
-       .section .text
-       .file   "crt0_ram.S"
-
-       /* CPU Reset entry */
-__start:
-       mov.l   #RAMEND,sp
-       ldc     #0x80,ccr
-       ldc     #0x00,exr
-
-       /* Peripheral Setup */
-       bclr    #4,@INTCR:8     /* interrupt mode 2 */
-       bset    #5,@INTCR:8
-       bclr    #0,@IER+1:16
-       bset    #1,@ISCRL+1:16  /* IRQ0 Positive Edge */
-       bclr    #0,@ISCRL+1:16
-
-#if defined(CONFIG_MTD_UCLINUX)
-       /* move romfs image */
-       jsr     @__move_romfs   
-#endif
-       
-       /* .bss clear */
-       mov.l   #__sbss,er5
-       mov.l   er5,er6
-       mov.l   #__ebss,er4
-       sub.l   er5,er4
-       shlr    #2,er4
-       sub.l   er0,er0
-1:     
-       mov.l   er0,@er5
-       adds    #4,er5
-       dec.l   #1,er4
-       bne     1b
-
-       /* copy kernel commandline */
-       mov.l   #COMMAND_START,er5
-       mov.l   #_command_line,er6
-       mov.w   #512,r4
-       eepmov.w
-
-       /* uClinux kernel start */
-       ldc     #0x90,ccr       /* running kernel */
-       mov.l   #_init_thread_union,sp
-       add.l   #0x2000,sp
-       jsr     @_start_kernel
-_exit:
-
-       jmp     _exit
-
-       rts
-
-       /* I/O port assign information */
-__platform_gpio_table: 
-       mov.l   #gpio_table,er0
-       rts
-
-gpio_table:
-       ;; P1DDR
-       ;;      used,ddr
-       .byte   0x00,0x00
-       ;; P2DDR
-       .byte   0x00,0x00
-       ;; P3DDR
-       .byte   0x3f,0x3a
-       ;; dummy
-       .byte   0x00,0x00
-       ;; P5DDR
-       .byte   0x00,0x00
-       ;; P6DDR
-       .byte   0x00,0x00
-       ;; P7DDR
-       .byte   0x00,0x00
-       ;; P8DDR
-       .byte   0x00,0x00
-       ;; dummy
-       .byte   0x00,0x00
-       ;; PADDR
-       .byte   0xff,0xff
-       ;; PBDDR
-       .byte   0xff,0x00
-       ;; PCDDR
-       .byte   0xff,0x00
-       ;; PDDDR
-       .byte   0xff,0x00
-       ;; PEDDR
-       .byte   0xff,0x00
-       ;; PFDDR
-       .byte   0xff,0xff
-       ;; PGDDR
-       .byte   0x0f,0x0f
-       ;; PHDDR
-       .byte   0x0f,0x0f
-
-__target_name: 
-       .asciz  "EDOSK-2674"
-       
-       .section .bootvec,"ax"
-       jmp     @__start
diff --git a/arch/h8300/platform/h8s/edosk2674/crt0_rom.S b/arch/h8300/platform/h8s/edosk2674/crt0_rom.S
deleted file mode 100644 (file)
index 06d1d7f..0000000
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- *  linux/arch/h8300/platform/h8s/edosk2674/crt0_rom.S
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Platform depend startup
- *  Target Archtecture:        EDOSK-2674
- *  Memory Layout     :        ROM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-#include <asm/regs267x.h>
-               
-       .global __start
-       .global __command_line
-       .global __platform_gpio_table
-       .global __target_name
-       
-       .h8300s
-       .section .text
-       .file   "crt0_rom.S"
-
-       /* CPU Reset entry */
-__start:
-       mov.l   #__ramend,sp
-       ldc     #0x80,ccr
-       ldc     #0,exr
-       
-       /* Peripheral Setup */
-;BSC/GPIO setup
-       mov.l   #init_regs,er0
-       mov.w   #0xffff,e2
-1:
-       mov.w   @er0+,r2
-       beq     2f
-       mov.w   @er0+,r1
-       mov.b   r1l,@er2
-       bra     1b
-
-2:
-;SDRAM setup
-#define SDRAM_SMR 0x400040
-
-       mov.b   #0,r0l
-       mov.b   r0l,@DRACCR:16
-       mov.w   #0x188,r0
-       mov.w   r0,@REFCR:16
-       mov.w   #0x85b4,r0
-       mov.w   r0,@DRAMCR:16
-       mov.b   #0,r1l
-       mov.b   r1l,@SDRAM_SMR
-       mov.w   #0x84b4,r0
-       mov.w   r0,@DRAMCR:16
-;special thanks to Arizona Cooperative Power
-       
-       /* copy .data */
-       mov.l   #__begin_data,er5
-       mov.l   #__sdata,er6
-       mov.l   #__edata,er4
-       sub.l   er6,er4
-       shlr.l  #2,er4
-1:     
-       mov.l   @er5+,er0
-       mov.l   er0,@er6
-       adds    #4,er6
-       dec.l   #1,er4
-       bne     1b      
-
-       /* .bss clear */
-       mov.l   #__sbss,er5
-       mov.l   #__ebss,er4
-       sub.l   er5,er4
-       shlr.l  #2,er4          
-       sub.l   er0,er0
-1:
-       mov.l   er0,@er5
-       adds    #4,er5
-       dec.l   #1,er4
-       bne     1b
-
-       /* copy kernel commandline */
-       mov.l   #COMMAND_START,er5
-       mov.l   #__command_line,er6
-       mov.w   #512,r4
-       eepmov.w
-
-       /* linux kernel start */
-       ldc     #0x90,ccr       /* running kernel */
-       mov.l   #_init_thread_union,sp
-       add.l   #0x2000,sp
-       jsr     @_start_kernel
-_exit:
-
-       jmp     _exit
-
-       rts
-
-       /* I/O port assign information */
-__platform_gpio_table: 
-       mov.l   #gpio_table,er0
-       rts
-
-#define INIT_REGS_DATA(REGS,DATA) \
-       .word   ((REGS) & 0xffff),DATA
-
-init_regs:
-INIT_REGS_DATA(ASTCR,0xff)
-INIT_REGS_DATA(RDNCR,0x00)
-INIT_REGS_DATA(ABWCR,0x80)
-INIT_REGS_DATA(WTCRAH,0x27)
-INIT_REGS_DATA(WTCRAL,0x77)
-INIT_REGS_DATA(WTCRBH,0x71)
-INIT_REGS_DATA(WTCRBL,0x22)
-INIT_REGS_DATA(CSACRH,0x80)
-INIT_REGS_DATA(CSACRL,0x80)
-INIT_REGS_DATA(BROMCRH,0xa0)
-INIT_REGS_DATA(BROMCRL,0xa0)
-INIT_REGS_DATA(P3DDR,0x3a)
-INIT_REGS_DATA(P3ODR,0x06)
-INIT_REGS_DATA(PADDR,0xff)
-INIT_REGS_DATA(PFDDR,0xfe)
-INIT_REGS_DATA(PGDDR,0x0f)
-INIT_REGS_DATA(PHDDR,0x0f)
-INIT_REGS_DATA(PFCR0,0xff)
-INIT_REGS_DATA(PFCR2,0x0d)
-INIT_REGS_DATA(ITSR, 0x00)
-INIT_REGS_DATA(ITSR+1,0x3f)
-INIT_REGS_DATA(INTCR,0x20)
-               
-       .word   0
-
-gpio_table:
-       ;; P1DDR
-       .byte   0x00,0x00
-       ;; P2DDR
-       .byte   0x00,0x00
-       ;; P3DDR
-       .byte   0x00,0x00
-       ;; dummy
-       .byte   0x00,0x00
-       ;; P5DDR
-       .byte   0x00,0x00
-       ;; P6DDR
-       .byte   0x00,0x00
-       ;; P7DDR
-       .byte   0x00,0x00
-       ;; P8DDR
-       .byte   0x00,0x00
-       ;; dummy
-       .byte   0x00,0x00
-       ;; PADDR
-       .byte   0x00,0x00
-       ;; PBDDR
-       .byte   0x00,0x00
-       ;; PCDDR
-       .byte   0x00,0x00
-       ;; PDDDR
-       .byte   0x00,0x00
-       ;; PEDDR
-       .byte   0x00,0x00
-       ;; PFDDR
-       .byte   0x00,0x00
-       ;; PGDDR
-       .byte   0x00,0x00
-       ;; PHDDR
-       .byte   0x00,0x00
-
-       .section .rodata
-__target_name: 
-       .asciz  "EDOSK-2674"
-       
-       .section .bss
-__command_line:        
-       .space  512
-
-       /* interrupt vector */
-       .section .vectors,"ax"
-       .long   __start
-       .long   __start
-vector =       2
-       .rept   126
-       .long   _interrupt_redirect_table+vector*4
-vector =       vector + 1
-       .endr
diff --git a/arch/h8300/platform/h8s/generic/Makefile b/arch/h8300/platform/h8s/generic/Makefile
deleted file mode 100644 (file)
index 44b4685..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-extra-y =  crt0_$(MODEL).o
diff --git a/arch/h8300/platform/h8s/generic/crt0_ram.S b/arch/h8300/platform/h8s/generic/crt0_ram.S
deleted file mode 100644 (file)
index 7018915..0000000
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- *  linux/arch/h8300/platform/h8s/edosk2674/crt0_ram.S
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Platform depend startup
- *  Target Archtecture:        generic
- *  Memory Layout     :        RAM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-#include <asm/regs267x.h>
-                       
-#if !defined(CONFIG_BLKDEV_RESERVE)
-#if defined(CONFIG_GDB_DEBUG)
-#define RAMEND (__ramend - 0xc000)
-#else
-#define RAMEND __ramend
-#endif
-#else
-#define RAMEND CONFIG_BLKDEV_RESERVE_ADDRESS
-#endif
-       
-       .global __start
-       .global __command_line
-       .global __platform_gpio_table
-       .global __target_name
-       
-       .h8300s
-
-       .section .text
-       .file   "crt0_ram.S"
-
-       /* CPU Reset entry */
-__start:
-       mov.l   #RAMEND,sp
-       ldc     #0x80,ccr
-       ldc     #0x00,exr
-
-       /* Peripheral Setup */
-       bclr    #4,@INTCR:8     /* interrupt mode 2 */
-       bset    #5,@INTCR:8
-
-#if defined(CONFIG_MTD_UCLINUX)
-       /* move romfs image */
-       jsr     @__move_romfs   
-#endif
-       
-       /* .bss clear */
-       mov.l   #__sbss,er5
-       mov.l   er5,er6
-       mov.l   #__ebss,er4
-       sub.l   er5,er4
-       shlr    #2,er4
-       sub.l   er0,er0
-1:     
-       mov.l   er0,@er5
-       adds    #4,er5
-       dec.l   #1,er4
-       bne     1b
-
-       /* copy kernel commandline */
-       mov.l   #COMMAND_START,er5
-       mov.l   #_command_line,er6
-       mov.w   #512,r4
-       eepmov.w
-
-       /* uClinux kernel start */
-       ldc     #0x90,ccr       /* running kernel */
-       mov.l   #_init_thread_union,sp
-       add.l   #0x2000,sp
-       jsr     @_start_kernel
-_exit:
-
-       jmp     _exit
-
-       rts
-
-       /* I/O port assign information */
-__platform_gpio_table: 
-       mov.l   #gpio_table,er0
-       rts
-
-gpio_table:
-       ;; P1DDR
-       ;;      used,ddr
-       .byte   0x00,0x00
-       ;; P2DDR
-       .byte   0x00,0x00
-       ;; P3DDR
-       .byte   0x00,0x00
-       ;; dummy
-       .byte   0x00,0x00
-       ;; P5DDR
-       .byte   0x00,0x00
-       ;; P6DDR
-       .byte   0x00,0x00
-       ;; P7DDR
-       .byte   0x00,0x00
-       ;; P8DDR
-       .byte   0x00,0x00
-       ;; dummy
-       .byte   0x00,0x00
-       ;; PADDR
-       .byte   0x00,0x00
-       ;; PBDDR
-       .byte   0x00,0x00
-       ;; PCDDR
-       .byte   0x00,0x00
-       ;; PDDDR
-       .byte   0x00,0x00
-       ;; PEDDR
-       .byte   0x00,0x00
-       ;; PFDDR
-       .byte   0x00,0x00
-       ;; PGDDR
-       .byte   0x00,0x00
-       ;; PHDDR
-       .byte   0x00,0x00
-
-__target_name: 
-       .asciz  "generic"
-       
-       .section .bootvec,"ax"
-       jmp     @__start
diff --git a/arch/h8300/platform/h8s/generic/crt0_rom.S b/arch/h8300/platform/h8s/generic/crt0_rom.S
deleted file mode 100644 (file)
index 623ba78..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- *  linux/arch/h8300/platform/h8s/generic/crt0_rom.S
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- *  Platform depend startup 
- *  Target Archtecture:        generic
- *  Memory Layout     :        ROM
- */
-
-#define ASSEMBLY
-
-#include <asm/linkage.h>
-#include <asm/regs267x.h>
-       
-       .global __start
-       .global __command_line
-       .global __platform_gpio_table
-       .global __target_name
-       
-       .h8300s
-       .section .text
-       .file   "crt0_rom.S"
-
-       /* CPU Reset entry */
-__start:
-       mov.l   #__ramend,sp
-       ldc     #0x80,ccr
-       ldc     #0,exr
-       bclr    #4,@INTCR:8
-       bset    #5,@INTCR:8     /* Interrupt mode 2 */
-       
-       /* Peripheral Setup */
-       
-       /* copy .data */
-#if !defined(CONFIG_H8S_SIM)
-       mov.l   #__begin_data,er5
-       mov.l   #__sdata,er6
-       mov.l   #__edata,er4
-       sub.l   er6,er4
-       shlr.l  #2,er4
-1:     
-       mov.l   @er5+,er0
-       mov.l   er0,@er6
-       adds    #4,er6
-       dec.l   #1,er4
-       bne     1b      
-#endif
-
-       /* .bss clear */
-       mov.l   #__sbss,er5
-       mov.l   #__ebss,er4
-       sub.l   er5,er4
-       shlr.l  #2,er4          
-       sub.l   er0,er0
-1:
-       mov.l   er0,@er5
-       adds    #4,er5
-       dec.l   #1,er4
-       bne     1b
-
-       /* linux kernel start */
-       ldc     #0x90,ccr       /* running kernel */
-       mov.l   #_init_thread_union,sp
-       add.l   #0x2000,sp
-       jsr     @_start_kernel
-_exit:
-
-       jmp     _exit
-
-       rts
-
-       /* I/O port assign information */
-__platform_gpio_table: 
-       mov.l   #gpio_table,er0
-       rts
-
-gpio_table:
-       ;; P1DDR
-       .byte   0x00,0x00
-       ;; P2DDR
-       .byte   0x00,0x00
-       ;; P3DDR
-       .byte   0x00,0x00
-       ;; P4DDR
-       .byte   0x00,0x00
-       ;; P5DDR
-       .byte   0x00,0x00
-       ;; P6DDR
-       .byte   0x00,0x00
-       ;; dummy
-       .byte   0x00,0x00
-       ;; P8DDR
-       .byte   0x00,0x00
-       ;; PADDR
-       .byte   0x00,0x00
-       ;; PBDDR
-       .byte   0x00,0x00
-       ;; PCDDR
-       .byte   0x00,0x00
-       ;; PDDDR
-       .byte   0x00,0x00
-       ;; PEDDR
-       .byte   0x00,0x00
-       ;; PFDDR
-       .byte   0x00,0x00
-       ;; PGDDR
-       .byte   0x00,0x00
-       ;; PHDDR
-       .byte   0x00,0x00
-
-       .section .rodata
-__target_name: 
-       .asciz  "generic"
-       
-       .section .bss
-__command_line:        
-       .space  512
-
-       /* interrupt vector */
-       .section .vectors,"ax"
-       .long   __start
-       .long   __start
-vector =       2
-       .rept   126-1
-       .long   _interrupt_redirect_table+vector*4
-vector =       vector + 1
-       .endr
diff --git a/arch/h8300/platform/h8s/irq.c b/arch/h8300/platform/h8s/irq.c
deleted file mode 100644 (file)
index f3a5511..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * linux/arch/h8300/platform/h8s/ints_h8s.c
- * Interrupt handling CPU variants
- *
- * Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- */
-
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-
-#include <asm/ptrace.h>
-#include <asm/traps.h>
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/gpio-internal.h>
-#include <asm/regs267x.h>
-
-/* saved vector list */
-const int __initconst h8300_saved_vectors[] = {
-#if defined(CONFIG_GDB_DEBUG)
-       TRACE_VEC,
-       TRAP3_VEC,
-#endif
-       -1
-};
-
-/* trap entry table */
-const H8300_VECTOR __initconst h8300_trap_table[] = {
-       0,0,0,0,0,
-       trace_break,  /* TRACE */
-       0,0,
-       system_call,  /* TRAPA #0 */
-       0,0,0,0,0,0,0
-};
-
-/* IRQ pin assignment */
-struct irq_pins {
-       unsigned char port_no;
-       unsigned char bit_no;
-} __attribute__((aligned(1),packed));
-/* ISTR = 0 */
-static const struct irq_pins irq_assign_table0[16]={
-        {H8300_GPIO_P5,H8300_GPIO_B0},{H8300_GPIO_P5,H8300_GPIO_B1},
-       {H8300_GPIO_P5,H8300_GPIO_B2},{H8300_GPIO_P5,H8300_GPIO_B3},
-       {H8300_GPIO_P5,H8300_GPIO_B4},{H8300_GPIO_P5,H8300_GPIO_B5},
-       {H8300_GPIO_P5,H8300_GPIO_B6},{H8300_GPIO_P5,H8300_GPIO_B7},
-       {H8300_GPIO_P6,H8300_GPIO_B0},{H8300_GPIO_P6,H8300_GPIO_B1},
-       {H8300_GPIO_P6,H8300_GPIO_B2},{H8300_GPIO_P6,H8300_GPIO_B3},
-       {H8300_GPIO_P6,H8300_GPIO_B4},{H8300_GPIO_P6,H8300_GPIO_B5},
-       {H8300_GPIO_PF,H8300_GPIO_B1},{H8300_GPIO_PF,H8300_GPIO_B2},
-};
-/* ISTR = 1 */
-static const struct irq_pins irq_assign_table1[16]={
-       {H8300_GPIO_P8,H8300_GPIO_B0},{H8300_GPIO_P8,H8300_GPIO_B1},
-       {H8300_GPIO_P8,H8300_GPIO_B2},{H8300_GPIO_P8,H8300_GPIO_B3},
-       {H8300_GPIO_P8,H8300_GPIO_B4},{H8300_GPIO_P8,H8300_GPIO_B5},
-       {H8300_GPIO_PH,H8300_GPIO_B2},{H8300_GPIO_PH,H8300_GPIO_B3},
-       {H8300_GPIO_P2,H8300_GPIO_B0},{H8300_GPIO_P2,H8300_GPIO_B1},
-       {H8300_GPIO_P2,H8300_GPIO_B2},{H8300_GPIO_P2,H8300_GPIO_B3},
-       {H8300_GPIO_P2,H8300_GPIO_B4},{H8300_GPIO_P2,H8300_GPIO_B5},
-       {H8300_GPIO_P2,H8300_GPIO_B6},{H8300_GPIO_P2,H8300_GPIO_B7},
-};
-
-/* IRQ to GPIO pin translation */
-#define IRQ_GPIO_MAP(irqbit,irq,port,bit)                        \
-do {                                                             \
-       if (*(volatile unsigned short *)ITSR & irqbit) {          \
-               port = irq_assign_table1[irq - EXT_IRQ0].port_no; \
-               bit  = irq_assign_table1[irq - EXT_IRQ0].bit_no;  \
-       } else {                                                  \
-               port = irq_assign_table0[irq - EXT_IRQ0].port_no; \
-               bit  = irq_assign_table0[irq - EXT_IRQ0].bit_no;  \
-       }                                                         \
-} while(0)
-
-int h8300_enable_irq_pin(unsigned int irq)
-{
-       if (irq >= EXT_IRQ0 && irq <= EXT_IRQ15) {
-               unsigned short ptn = 1 << (irq - EXT_IRQ0);
-               unsigned int port_no,bit_no;
-               IRQ_GPIO_MAP(ptn, irq, port_no, bit_no);
-               if (H8300_GPIO_RESERVE(port_no, bit_no) == 0)
-                       return -EBUSY;                   /* pin already use */
-               H8300_GPIO_DDR(port_no, bit_no, H8300_GPIO_INPUT);
-               *(volatile unsigned short *)ISR &= ~ptn; /* ISR clear */
-       }
-
-       return 0;
-}
-
-void h8300_disable_irq_pin(unsigned int irq)
-{
-       if (irq >= EXT_IRQ0 && irq <= EXT_IRQ15) {
-               /* disable interrupt & release IRQ pin */
-               unsigned short ptn = 1 << (irq - EXT_IRQ0);
-               unsigned short port_no,bit_no;
-               *(volatile unsigned short *)ISR &= ~ptn;
-               *(volatile unsigned short *)IER &= ~ptn;
-               IRQ_GPIO_MAP(ptn, irq, port_no, bit_no);
-               H8300_GPIO_FREE(port_no, bit_no);
-       }
-}
diff --git a/arch/h8300/platform/h8s/ptrace_h8s.c b/arch/h8300/platform/h8s/ptrace_h8s.c
deleted file mode 100644 (file)
index c058ab1..0000000
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- *  linux/arch/h8300/platform/h8s/ptrace_h8s.c
- *    ptrace cpu depend helper functions
- *
- *  Yoshinori Sato <ysato@users.sourceforge.jp>
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License.  See the file COPYING in the main directory of
- * this archive for more details.
- */
-
-#include <linux/linkage.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <asm/ptrace.h>
-
-#define CCR_MASK  0x6f
-#define EXR_TRACE 0x80
-
-/* Mapping from PT_xxx to the stack offset at which the register is
-   saved.  Notice that usp has no stack-slot and needs to be treated
-   specially (see get_reg/put_reg below). */
-static const int h8300_register_offset[] = {
-       PT_REG(er1), PT_REG(er2), PT_REG(er3), PT_REG(er4),
-       PT_REG(er5), PT_REG(er6), PT_REG(er0), PT_REG(orig_er0),
-       PT_REG(ccr), PT_REG(pc),  0,           PT_REG(exr)
-};
-
-/* read register */
-long h8300_get_reg(struct task_struct *task, int regno)
-{
-       switch (regno) {
-       case PT_USP:
-               return task->thread.usp + sizeof(long)*2 + 2;
-       case PT_CCR:
-       case PT_EXR:
-           return *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]);
-       default:
-           return *(unsigned long *)(task->thread.esp0 + h8300_register_offset[regno]);
-       }
-}
-
-/* write register */
-int h8300_put_reg(struct task_struct *task, int regno, unsigned long data)
-{
-       unsigned short oldccr;
-       switch (regno) {
-       case PT_USP:
-               task->thread.usp = data - sizeof(long)*2 - 2;
-       case PT_CCR:
-               oldccr = *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]);
-               oldccr &= ~CCR_MASK;
-               data &= CCR_MASK;
-               data |= oldccr;
-               *(unsigned short *)(task->thread.esp0 + h8300_register_offset[regno]) = data;
-               break;
-       case PT_EXR:
-               /* exr modify not support */
-               return -EIO;
-       default:
-               *(unsigned long *)(task->thread.esp0 + h8300_register_offset[regno]) = data;
-               break;
-       }
-       return 0;
-}
-
-/* disable singlestep */
-void user_disable_single_step(struct task_struct *child)
-{
-       *(unsigned short *)(child->thread.esp0 + h8300_register_offset[PT_EXR]) &= ~EXR_TRACE;
-}
-
-/* enable singlestep */
-void user_enable_single_step(struct task_struct *child)
-{
-       *(unsigned short *)(child->thread.esp0 + h8300_register_offset[PT_EXR]) |= EXR_TRACE;
-}
-
-asmlinkage void trace_trap(unsigned long bp)
-{
-       (void)bp;
-       force_sig(SIGTRAP,current);
-}
-
index 7740ab10a17192cb0596d0ee442282d549b3ba33..b10d61bc0f2ad4e0d721b25b40b0a205619fc7b7 100644 (file)
@@ -6,6 +6,7 @@ menu "Processor type and features"
 
 config IA64
        bool
+       select ARCH_MIGHT_HAVE_PC_PARPORT
        select PCI if (!IA64_HP_SIM)
        select ACPI if (!IA64_HP_SIM)
        select PM if (!IA64_HP_SIM)
index 556d0701a155351e844960aa00b51c55c820a3b9..c25302fb48d95636b59670efa423a59ff2d055c0 100644 (file)
@@ -85,4 +85,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_IA64_SOCKET_H */
index 5eb71d22c3d5901030eec5c9ead78dc1279d815e..59d52e3aef125b79b67afdaf1fcffd2b892367ff 100644 (file)
@@ -882,40 +882,10 @@ __init void prefill_possible_map(void)
                set_cpu_possible(i, true);
 }
 
-static int _acpi_map_lsapic(acpi_handle handle, int *pcpu)
+static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
 {
-       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-       union acpi_object *obj;
-       struct acpi_madt_local_sapic *lsapic;
        cpumask_t tmp_map;
-       int cpu, physid;
-
-       if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
-               return -EINVAL;
-
-       if (!buffer.length || !buffer.pointer)
-               return -EINVAL;
-
-       obj = buffer.pointer;
-       if (obj->type != ACPI_TYPE_BUFFER)
-       {
-               kfree(buffer.pointer);
-               return -EINVAL;
-       }
-
-       lsapic = (struct acpi_madt_local_sapic *)obj->buffer.pointer;
-
-       if ((lsapic->header.type != ACPI_MADT_TYPE_LOCAL_SAPIC) ||
-           (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))) {
-               kfree(buffer.pointer);
-               return -EINVAL;
-       }
-
-       physid = ((lsapic->id << 8) | (lsapic->eid));
-
-       kfree(buffer.pointer);
-       buffer.length = ACPI_ALLOCATE_BUFFER;
-       buffer.pointer = NULL;
+       int cpu;
 
        cpumask_complement(&tmp_map, cpu_present_mask);
        cpu = cpumask_first(&tmp_map);
@@ -934,9 +904,9 @@ static int _acpi_map_lsapic(acpi_handle handle, int *pcpu)
 }
 
 /* wrapper to silence section mismatch warning */
-int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu)
+int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
 {
-       return _acpi_map_lsapic(handle, pcpu);
+       return _acpi_map_lsapic(handle, physid, pcpu);
 }
 EXPORT_SYMBOL(acpi_map_lsapic);
 
index 24be7c8da86ad3cbbdee6be0933fe302c9adaf02..52966650114f3198df49c109c02d70ae82d8adcb 100644 (file)
@@ -76,4 +76,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_M32R_SOCKET_H */
index 311a300d48cca82b82a5faa0bf6abbaee42cacef..75f25a8e300170ce85130da8fd2f739faa983059 100644 (file)
@@ -1,6 +1,7 @@
 config M68K
        bool
        default y
+       select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
        select HAVE_IDE
        select HAVE_AOUT if MMU
        select HAVE_DEBUG_BUGVERBOSE
index 697d50393dd07d8db81b73e22544aa3fc325d24d..47365b1ccbecfeb9c87f807ad5ab345649572114 100644 (file)
@@ -85,7 +85,7 @@ static int fd_request_irq(void)
 {
        if(MACH_IS_Q40)
                return request_irq(FLOPPY_IRQ, floppy_hardint,
-                                  IRQF_DISABLED, "floppy", floppy_hardint);
+                                  0, "floppy", floppy_hardint);
        else if(MACH_IS_SUN3X)
                return sun3xflop_request_irq();
        return -ENXIO;
index 95231e2f9d646efb00181fbcac41d1cbc637002c..a02ea3a7bb20a0299e0f1bd2bf2306c484a4bc92 100644 (file)
@@ -207,7 +207,7 @@ static int sun3xflop_request_irq(void)
        if(!once) {
                once = 1;
                error = request_irq(FLOPPY_IRQ, sun3xflop_hardint,
-                                   IRQF_DISABLED, "floppy", NULL);
+                                   0, "floppy", NULL);
                return ((error == 0) ? 0 : -1);
        } else return 0;
 }
index 639c731568b0046d2150af8da76c991a55a45855..3fadc4a93d977ec96456bf194b7a34a97daa495e 100644 (file)
@@ -3,3 +3,10 @@
 #else
 #include <asm/uaccess_mm.h>
 #endif
+
+#ifdef CONFIG_CPU_HAS_NO_UNALIGNED
+#include <asm-generic/uaccess-unaligned.h>
+#else
+#define __get_user_unaligned(x, ptr)   __get_user((x), (ptr))
+#define __put_user_unaligned(x, ptr)   __put_user((x), (ptr))
+#endif
index ec30acbfe6db0507a74e2495db5ea24b31ca0e8d..99a98698bc959035fb89c2d004a02800abcf81a4 100644 (file)
@@ -70,7 +70,7 @@ static irqreturn_t hw_tick(int irq, void *dummy)
 
 static struct irqaction m68328_timer_irq = {
        .name    = "timer",
-       .flags   = IRQF_DISABLED | IRQF_TIMER,
+       .flags   = IRQF_TIMER,
        .handler = hw_tick,
 };
 
index 0570741e5500b221003e3295f4596673cfa53b4b..d493ac43fe3f900fe1b6ecd999e3452e96bdbfd3 100644 (file)
@@ -59,7 +59,7 @@ static irqreturn_t hw_tick(int irq, void *dummy)
 
 static struct irqaction m68360_timer_irq = {
        .name    = "timer",
-       .flags   = IRQF_DISABLED | IRQF_TIMER,
+       .flags   = IRQF_TIMER,
        .handler = hw_tick,
 };
 
index e8f3b97b0f7706ddcbc8dfa33bd412d4c4f84c20..493b3111d4c12b6d96e139a2e96afb84d022f558 100644 (file)
@@ -118,7 +118,7 @@ static irqreturn_t pit_tick(int irq, void *dummy)
 
 static struct irqaction pit_irq = {
        .name    = "timer",
-       .flags   = IRQF_DISABLED | IRQF_TIMER,
+       .flags   = IRQF_TIMER,
        .handler = pit_tick,
 };
 
index bb5a25ada84872420e1ac518e85ac647dc5aa818..831a08cf6f40d7e6e6c28594035a75e0c6a8b1c7 100644 (file)
@@ -51,7 +51,7 @@ irqreturn_t mcfslt_profile_tick(int irq, void *dummy)
 
 static struct irqaction mcfslt_profile_irq = {
        .name    = "profile timer",
-       .flags   = IRQF_DISABLED | IRQF_TIMER,
+       .flags   = IRQF_TIMER,
        .handler = mcfslt_profile_tick,
 };
 
@@ -93,7 +93,7 @@ static irqreturn_t mcfslt_tick(int irq, void *dummy)
 
 static struct irqaction mcfslt_timer_irq = {
        .name    = "timer",
-       .flags   = IRQF_DISABLED | IRQF_TIMER,
+       .flags   = IRQF_TIMER,
        .handler = mcfslt_tick,
 };
 
index d06068e457643fc804f0b068679da230e640becb..cd496a20fcc7ced7b802e705badcae2417eb2256 100644 (file)
@@ -83,7 +83,7 @@ static irqreturn_t mcftmr_tick(int irq, void *dummy)
 
 static struct irqaction mcftmr_timer_irq = {
        .name    = "timer",
-       .flags   = IRQF_DISABLED | IRQF_TIMER,
+       .flags   = IRQF_TIMER,
        .handler = mcftmr_tick,
 };
 
@@ -171,7 +171,7 @@ irqreturn_t coldfire_profile_tick(int irq, void *dummy)
 
 static struct irqaction coldfire_profile_irq = {
        .name    = "profile timer",
-       .flags   = IRQF_DISABLED | IRQF_TIMER,
+       .flags   = IRQF_TIMER,
        .handler = coldfire_profile_tick,
 };
 
index b82f82b743199ac4ff5b54d8cdbe6cc8c1246078..8370114e78aa4cea65a3ab5c44abbd19c4d39236 100644 (file)
@@ -1,5 +1,6 @@
 config MICROBLAZE
        def_bool y
+       select ARCH_MIGHT_HAVE_PC_PARPORT
        select HAVE_MEMBLOCK
        select HAVE_MEMBLOCK_NODE_MAP
        select HAVE_FUNCTION_TRACER
index d9d81c2192534b48062fb3f4923ee8ed994dcc7a..6e239123d6fe05e4a4ab2118f76816971c9a522a 100644 (file)
@@ -20,7 +20,6 @@ platforms += mti-sead3
 platforms += netlogic
 platforms += pmcs-msp71xx
 platforms += pnx833x
-platforms += powertv
 platforms += ralink
 platforms += rb532
 platforms += sgi-ip22
index f75ab4a2f2460a0d5652cbcacf25c2b68a2d24c3..04957828d1b2e19915bcf0dd659d7319e0d66ae0 100644 (file)
@@ -1,6 +1,7 @@
 config MIPS
        bool
        default y
+       select ARCH_MIGHT_HAVE_PC_PARPORT
        select HAVE_CONTEXT_TRACKING
        select HAVE_GENERIC_DMA_COHERENT
        select HAVE_IDE
@@ -8,6 +9,7 @@ config MIPS
        select HAVE_PERF_EVENTS
        select PERF_USE_VMALLOC
        select HAVE_ARCH_KGDB
+       select HAVE_ARCH_TRACEHOOK
        select ARCH_HAVE_CUSTOM_GPIO_H
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_TRACE_MCOUNT_TEST
@@ -18,6 +20,7 @@ config MIPS
        select HAVE_KPROBES
        select HAVE_KRETPROBES
        select HAVE_DEBUG_KMEMLEAK
+       select HAVE_SYSCALL_TRACEPOINTS
        select ARCH_BINFMT_ELF_RANDOMIZE_PIE
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
        select RTC_LIB if !MACH_LOONGSON
@@ -146,6 +149,7 @@ config MIPS_COBALT
        select CSRC_R4K
        select CEVT_GT641XX
        select DMA_NONCOHERENT
+       select EARLY_PRINTK_8250 if EARLY_PRINTK
        select HW_HAS_PCI
        select I8253
        select I8259
@@ -412,23 +416,6 @@ config PMC_MSP
          of integrated peripherals, interfaces and DSPs in addition to
          a variety of MIPS cores.
 
-config POWERTV
-       bool "Cisco PowerTV"
-       select BOOT_ELF32
-       select CEVT_R4K
-       select CPU_MIPSR2_IRQ_VI
-       select CPU_MIPSR2_IRQ_EI
-       select CSRC_POWERTV
-       select DMA_NONCOHERENT
-       select HW_HAS_PCI
-       select SYS_HAS_CPU_MIPS32_R2
-       select SYS_SUPPORTS_32BIT_KERNEL
-       select SYS_SUPPORTS_BIG_ENDIAN
-       select SYS_SUPPORTS_HIGHMEM
-       select USB_OHCI_LITTLE_ENDIAN
-       help
-         This enables support for the Cisco PowerTV Platform.
-
 config RALINK
        bool "Ralink based machines"
        select CEVT_R4K
@@ -811,7 +798,6 @@ source "arch/mips/jz4740/Kconfig"
 source "arch/mips/lantiq/Kconfig"
 source "arch/mips/lasat/Kconfig"
 source "arch/mips/pmcs-msp71xx/Kconfig"
-source "arch/mips/powertv/Kconfig"
 source "arch/mips/ralink/Kconfig"
 source "arch/mips/sgi-ip27/Kconfig"
 source "arch/mips/sibyte/Kconfig"
@@ -890,9 +876,6 @@ config CSRC_BCM1480
 config CSRC_IOASIC
        bool
 
-config CSRC_POWERTV
-       bool
-
 config CSRC_R4K
        bool
 
@@ -1489,8 +1472,10 @@ config SYS_SUPPORTS_ZBOOT
        bool
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_BZIP2
+       select HAVE_KERNEL_LZ4
        select HAVE_KERNEL_LZMA
        select HAVE_KERNEL_LZO
+       select HAVE_KERNEL_XZ
 
 config SYS_SUPPORTS_ZBOOT_UART16550
        bool
@@ -1977,6 +1962,7 @@ config MIPS_VPE_APSP_API
 config MIPS_CMP
        bool "MIPS CMP framework support"
        depends on SYS_SUPPORTS_MIPS_CMP
+       select SMP
        select SYNC_R4K
        select SYS_SUPPORTS_SMP
        select SYS_SUPPORTS_SCHED_SMT if SMP
index 37871f0de15eca8c817f38b6dcdbb2b3b9c056b4..b147e7038ff0cb26042555ee6d10eafd8d070ca2 100644 (file)
@@ -20,6 +20,14 @@ config EARLY_PRINTK
          doesn't cooperate with an X server. You should normally say N here,
          unless you want to debug such a crash.
 
+config EARLY_PRINTK_8250
+       bool "8250/16550 and compatible serial early printk driver"
+       depends on EARLY_PRINTK
+       default n
+       help
+         If you say Y here, it will be possible to use a 8250/16550 serial
+         port as the boot console.
+
 config CMDLINE_BOOL
        bool "Built-in kernel command line"
        default n
index ca8f8340d75f535b1cad5c1c8eed14bbc71f9634..de300b9936076faad63463c884687d9e19ea914f 100644 (file)
@@ -285,15 +285,19 @@ endif
 # Other need ECOFF, so we build a 32-bit ELF binary for them which we then
 # convert to ECOFF using elf2ecoff.
 #
+quiet_cmd_32 = OBJCOPY $@
+       cmd_32 = $(OBJCOPY) -O $(32bit-bfd) $(OBJCOPYFLAGS) $< $@
 vmlinux.32: vmlinux
-       $(OBJCOPY) -O $(32bit-bfd) $(OBJCOPYFLAGS) $< $@
+       $(call cmd,32)
 
 #
 # The 64-bit ELF tools are pretty broken so at this time we generate 64-bit
 # ELF files from 32-bit files by conversion.
 #
+quiet_cmd_64 = OBJCOPY $@
+       cmd_64 = $(OBJCOPY) -O $(64bit-bfd) $(OBJCOPYFLAGS) $< $@
 vmlinux.64: vmlinux
-       $(OBJCOPY) -O $(64bit-bfd) $(OBJCOPYFLAGS) $< $@
+       $(call cmd,64)
 
 all:   $(all-y)
 
@@ -302,10 +306,16 @@ $(boot-y): $(vmlinux-32) FORCE
        $(Q)$(MAKE) $(build)=arch/mips/boot VMLINUX=$(vmlinux-32) \
                $(bootvars-y) arch/mips/boot/$@
 
+ifdef CONFIG_SYS_SUPPORTS_ZBOOT
 # boot/compressed
 $(bootz-y): $(vmlinux-32) FORCE
        $(Q)$(MAKE) $(build)=arch/mips/boot/compressed \
                $(bootvars-y) 32bit-bfd=$(32bit-bfd) $@
+else
+vmlinuz: FORCE
+       @echo '   CONFIG_SYS_SUPPORTS_ZBOOT is not enabled'
+       /bin/false
+endif
 
 
 CLEAN_FILES += vmlinux.32 vmlinux.64
index c76a90f7866427724d74a4df340ab3de9fdf6d9d..bac19dc43d1ddf6c101ffafaa18c590ccde8d523 100644 (file)
@@ -59,7 +59,7 @@ void __init board_setup(void)
                ret = -ENODEV;
        }
        if (ret)
-               panic("cannot initialize board support\n");
+               panic("cannot initialize board support");
 }
 
 int __init db1235_arch_init(void)
index c3b04c929f29b552014013f253d18e8500faae5b..516225d207eeed9cd934a00c8665cc936061fc1d 100644 (file)
@@ -20,7 +20,6 @@
 
 #include <asm/mach-ath79/ath79.h>
 #include <asm/mach-ath79/ar71xx_regs.h>
-#include <asm/mach-ath79/ar933x_uart_platform.h>
 #include "common.h"
 #include "dev-common.h"
 
@@ -68,15 +67,11 @@ static struct resource ar933x_uart_resources[] = {
        },
 };
 
-static struct ar933x_uart_platform_data ar933x_uart_data;
 static struct platform_device ar933x_uart_device = {
        .name           = "ar933x-uart",
        .id             = -1,
        .resource       = ar933x_uart_resources,
        .num_resources  = ARRAY_SIZE(ar933x_uart_resources),
-       .dev = {
-               .platform_data  = &ar933x_uart_data,
-       },
 };
 
 void __init ath79_register_uart(void)
@@ -93,7 +88,6 @@ void __init ath79_register_uart(void)
                ath79_uart_data[0].uartclk = uart_clk_rate;
                platform_device_register(&ath79_uart_device);
        } else if (soc_is_ar933x()) {
-               ar933x_uart_data.uartclk = uart_clk_rate;
                platform_device_register(&ar933x_uart_device);
        } else {
                BUG();
index f3bf6d5bfb9d9430023d001e98c7468fa342b1eb..c52daf9b05c638afbe4062ea204bb6522c15ccd1 100644 (file)
@@ -4,4 +4,5 @@
 #
 
 obj-y                          += irq.o nvram.o prom.o serial.o setup.o time.o sprom.o
+obj-y                          += board.o
 obj-$(CONFIG_BCM47XX_SSB)      += wgt634u.o
diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c
new file mode 100644 (file)
index 0000000..f3f6bfe
--- /dev/null
@@ -0,0 +1,309 @@
+#include <linux/export.h>
+#include <linux/string.h>
+#include <bcm47xx_board.h>
+#include <bcm47xx_nvram.h>
+
+struct bcm47xx_board_type {
+       const enum bcm47xx_board board;
+       const char *name;
+};
+
+struct bcm47xx_board_type_list1 {
+       struct bcm47xx_board_type board;
+       const char *value1;
+};
+
+struct bcm47xx_board_type_list2 {
+       struct bcm47xx_board_type board;
+       const char *value1;
+       const char *value2;
+};
+
+struct bcm47xx_board_type_list3 {
+       struct bcm47xx_board_type board;
+       const char *value1;
+       const char *value2;
+       const char *value3;
+};
+
+struct bcm47xx_board_store {
+       enum bcm47xx_board board;
+       char name[BCM47XX_BOARD_MAX_NAME];
+};
+
+/* model_name */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_model_name[] __initconst = {
+       {{BCM47XX_BOARD_DLINK_DIR130, "D-Link DIR-130"}, "DIR-130"},
+       {{BCM47XX_BOARD_DLINK_DIR330, "D-Link DIR-330"}, "DIR-330"},
+       { {0}, 0},
+};
+
+/* model_no */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_model_no[] __initconst = {
+       {{BCM47XX_BOARD_ASUS_WL700GE, "Asus WL700"}, "WL700"},
+       { {0}, 0},
+};
+
+/* machine_name */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_machine_name[] __initconst = {
+       {{BCM47XX_BOARD_LINKSYS_WRTSL54GS, "Linksys WRTSL54GS"}, "WRTSL54GS"},
+       { {0}, 0},
+};
+
+/* hardware_version */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_hardware_version[] __initconst = {
+       {{BCM47XX_BOARD_ASUS_RTN16, "Asus RT-N16"}, "RT-N16-"},
+       {{BCM47XX_BOARD_ASUS_WL320GE, "Asus WL320GE"}, "WL320G-"},
+       {{BCM47XX_BOARD_ASUS_WL330GE, "Asus WL330GE"}, "WL330GE-"},
+       {{BCM47XX_BOARD_ASUS_WL500GD, "Asus WL500GD"}, "WL500gd-"},
+       {{BCM47XX_BOARD_ASUS_WL500GPV1, "Asus WL500GP V1"}, "WL500gp-"},
+       {{BCM47XX_BOARD_ASUS_WL500GPV2, "Asus WL500GP V2"}, "WL500GPV2-"},
+       {{BCM47XX_BOARD_ASUS_WL500W, "Asus WL500W"}, "WL500gW-"},
+       {{BCM47XX_BOARD_ASUS_WL520GC, "Asus WL520GC"}, "WL520GC-"},
+       {{BCM47XX_BOARD_ASUS_WL520GU, "Asus WL520GU"}, "WL520GU-"},
+       {{BCM47XX_BOARD_BELKIN_F7D4301, "Belkin F7D4301"}, "F7D4301"},
+       { {0}, 0},
+};
+
+/* productid */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_productid[] __initconst = {
+       {{BCM47XX_BOARD_ASUS_RTAC66U, "Asus RT-AC66U"}, "RT-AC66U"},
+       {{BCM47XX_BOARD_ASUS_RTN10, "Asus RT-N10"}, "RT-N10"},
+       {{BCM47XX_BOARD_ASUS_RTN10D, "Asus RT-N10D"}, "RT-N10D"},
+       {{BCM47XX_BOARD_ASUS_RTN10U, "Asus RT-N10U"}, "RT-N10U"},
+       {{BCM47XX_BOARD_ASUS_RTN12, "Asus RT-N12"}, "RT-N12"},
+       {{BCM47XX_BOARD_ASUS_RTN12B1, "Asus RT-N12B1"}, "RT-N12B1"},
+       {{BCM47XX_BOARD_ASUS_RTN12C1, "Asus RT-N12C1"}, "RT-N12C1"},
+       {{BCM47XX_BOARD_ASUS_RTN12D1, "Asus RT-N12D1"}, "RT-N12D1"},
+       {{BCM47XX_BOARD_ASUS_RTN12HP, "Asus RT-N12HP"}, "RT-N12HP"},
+       {{BCM47XX_BOARD_ASUS_RTN15U, "Asus RT-N15U"}, "RT-N15U"},
+       {{BCM47XX_BOARD_ASUS_RTN16, "Asus RT-N16"}, "RT-N16"},
+       {{BCM47XX_BOARD_ASUS_RTN53, "Asus RT-N53"}, "RT-N53"},
+       {{BCM47XX_BOARD_ASUS_RTN66U, "Asus RT-N66U"}, "RT-N66U"},
+       {{BCM47XX_BOARD_ASUS_WL300G, "Asus WL300G"}, "WL300g"},
+       {{BCM47XX_BOARD_ASUS_WLHDD, "Asus WLHDD"}, "WLHDD"},
+       { {0}, 0},
+};
+
+/* ModelId */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_ModelId[] __initconst = {
+       {{BCM47XX_BOARD_DELL_TM2300, "Dell WX-5565"}, "WX-5565"},
+       {{BCM47XX_BOARD_MOTOROLA_WE800G, "Motorola WE800G"}, "WE800G"},
+       {{BCM47XX_BOARD_MOTOROLA_WR850GP, "Motorola WR850GP"}, "WR850GP"},
+       {{BCM47XX_BOARD_MOTOROLA_WR850GV2V3, "Motorola WR850G"}, "WR850G"},
+       { {0}, 0},
+};
+
+/* melco_id or buf1falo_id */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_melco_id[] __initconst = {
+       {{BCM47XX_BOARD_BUFFALO_WBR2_G54, "Buffalo WBR2-G54"}, "29bb0332"},
+       {{BCM47XX_BOARD_BUFFALO_WHR2_A54G54, "Buffalo WHR2-A54G54"}, "290441dd"},
+       {{BCM47XX_BOARD_BUFFALO_WHR_G125, "Buffalo WHR-G125"}, "32093"},
+       {{BCM47XX_BOARD_BUFFALO_WHR_G54S, "Buffalo WHR-G54S"}, "30182"},
+       {{BCM47XX_BOARD_BUFFALO_WHR_HP_G54, "Buffalo WHR-HP-G54"}, "30189"},
+       {{BCM47XX_BOARD_BUFFALO_WLA2_G54L, "Buffalo WLA2-G54L"}, "29129"},
+       {{BCM47XX_BOARD_BUFFALO_WZR_G300N, "Buffalo WZR-G300N"}, "31120"},
+       {{BCM47XX_BOARD_BUFFALO_WZR_RS_G54, "Buffalo WZR-RS-G54"}, "30083"},
+       {{BCM47XX_BOARD_BUFFALO_WZR_RS_G54HP, "Buffalo WZR-RS-G54HP"}, "30103"},
+       { {0}, 0},
+};
+
+/* boot_hw_model, boot_hw_ver */
+static const
+struct bcm47xx_board_type_list2 bcm47xx_board_list_boot_hw[] __initconst = {
+       /* like WRT160N v3.0 */
+       {{BCM47XX_BOARD_CISCO_M10V1, "Cisco M10"}, "M10", "1.0"},
+       /* like WRT310N v2.0 */
+       {{BCM47XX_BOARD_CISCO_M20V1, "Cisco M20"}, "M20", "1.0"},
+       {{BCM47XX_BOARD_LINKSYS_E900V1, "Linksys E900 V1"}, "E900", "1.0"},
+       /* like WRT160N v3.0 */
+       {{BCM47XX_BOARD_LINKSYS_E1000V1, "Linksys E1000 V1"}, "E100", "1.0"},
+       {{BCM47XX_BOARD_LINKSYS_E1000V2, "Linksys E1000 V2"}, "E1000", "2.0"},
+       {{BCM47XX_BOARD_LINKSYS_E1000V21, "Linksys E1000 V2.1"}, "E1000", "2.1"},
+       {{BCM47XX_BOARD_LINKSYS_E1200V2, "Linksys E1200 V2"}, "E1200", "2.0"},
+       {{BCM47XX_BOARD_LINKSYS_E2000V1, "Linksys E2000 V1"}, "Linksys E2000", "1.0"},
+       /* like WRT610N v2.0 */
+       {{BCM47XX_BOARD_LINKSYS_E3000V1, "Linksys E3000 V1"}, "E300", "1.0"},
+       {{BCM47XX_BOARD_LINKSYS_E3200V1, "Linksys E3200 V1"}, "E3200", "1.0"},
+       {{BCM47XX_BOARD_LINKSYS_E4200V1, "Linksys E4200 V1"}, "E4200", "1.0"},
+       {{BCM47XX_BOARD_LINKSYS_WRT150NV11, "Linksys WRT150N V1.1"}, "WRT150N", "1.1"},
+       {{BCM47XX_BOARD_LINKSYS_WRT150NV1, "Linksys WRT150N V1"}, "WRT150N", "1"},
+       {{BCM47XX_BOARD_LINKSYS_WRT160NV1, "Linksys WRT160N V1"}, "WRT160N", "1.0"},
+       {{BCM47XX_BOARD_LINKSYS_WRT160NV3, "Linksys WRT160N V3"}, "WRT160N", "3.0"},
+       {{BCM47XX_BOARD_LINKSYS_WRT300NV11, "Linksys WRT300N V1.1"}, "WRT300N", "1.1"},
+       {{BCM47XX_BOARD_LINKSYS_WRT310NV1, "Linksys WRT310N V1"}, "WRT310N", "1.0"},
+       {{BCM47XX_BOARD_LINKSYS_WRT310NV2, "Linksys WRT310N V2"}, "WRT310N", "2.0"},
+       {{BCM47XX_BOARD_LINKSYS_WRT54G3GV2, "Linksys WRT54G3GV2-VF"}, "WRT54G3GV2-VF", "1.0"},
+       {{BCM47XX_BOARD_LINKSYS_WRT610NV1, "Linksys WRT610N V1"}, "WRT610N", "1.0"},
+       {{BCM47XX_BOARD_LINKSYS_WRT610NV2, "Linksys WRT610N V2"}, "WRT610N", "2.0"},
+       { {0}, 0},
+};
+
+/* board_id */
+static const
+struct bcm47xx_board_type_list1 bcm47xx_board_list_board_id[] __initconst = {
+       {{BCM47XX_BOARD_NETGEAR_WGR614V8, "Netgear WGR614 V8"}, "U12H072T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WGR614V9, "Netgear WGR614 V9"}, "U12H094T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNDR3300, "Netgear WNDR3300"}, "U12H093T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNDR3400V1, "Netgear WNDR3400 V1"}, "U12H155T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNDR3400V2, "Netgear WNDR3400 V2"}, "U12H187T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNDR3400VCNA, "Netgear WNDR3400 Vcna"}, "U12H155T01_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNDR3700V3, "Netgear WNDR3700 V3"}, "U12H194T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNDR4000, "Netgear WNDR4000"}, "U12H181T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNDR4500V1, "Netgear WNDR4500 V1"}, "U12H189T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNDR4500V2, "Netgear WNDR4500 V2"}, "U12H224T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNR2000, "Netgear WNR2000"}, "U12H114T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNR3500L, "Netgear WNR3500L"}, "U12H136T99_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNR3500U, "Netgear WNR3500U"}, "U12H136T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNR3500V2, "Netgear WNR3500 V2"}, "U12H127T00_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNR3500V2VC, "Netgear WNR3500 V2vc"}, "U12H127T70_NETGEAR"},
+       {{BCM47XX_BOARD_NETGEAR_WNR834BV2, "Netgear WNR834B V2"}, "U12H081T00_NETGEAR"},
+       { {0}, 0},
+};
+
+/* boardtype, boardnum, boardrev */
+static const
+struct bcm47xx_board_type_list3 bcm47xx_board_list_board[] __initconst = {
+       {{BCM47XX_BOARD_HUAWEI_E970, "Huawei E970"}, "0x048e", "0x5347", "0x11"},
+       {{BCM47XX_BOARD_PHICOMM_M1, "Phicomm M1"}, "0x0590", "80", "0x1104"},
+       {{BCM47XX_BOARD_ZTE_H218N, "ZTE H218N"}, "0x053d", "1234", "0x1305"},
+       { {0}, 0},
+};
+
+static const
+struct bcm47xx_board_type bcm47xx_board_unknown[] __initconst = {
+       {BCM47XX_BOARD_UNKNOWN, "Unknown Board"},
+};
+
+static struct bcm47xx_board_store bcm47xx_board = {BCM47XX_BOARD_NO, "Unknown Board"};
+
+static __init const struct bcm47xx_board_type *bcm47xx_board_get_nvram(void)
+{
+       char buf1[30];
+       char buf2[30];
+       char buf3[30];
+       const struct bcm47xx_board_type_list1 *e1;
+       const struct bcm47xx_board_type_list2 *e2;
+       const struct bcm47xx_board_type_list3 *e3;
+
+       if (bcm47xx_nvram_getenv("model_name", buf1, sizeof(buf1)) >= 0) {
+               for (e1 = bcm47xx_board_list_model_name; e1->value1; e1++) {
+                       if (!strcmp(buf1, e1->value1))
+                               return &e1->board;
+               }
+       }
+
+       if (bcm47xx_nvram_getenv("model_no", buf1, sizeof(buf1)) >= 0) {
+               for (e1 = bcm47xx_board_list_model_no; e1->value1; e1++) {
+                       if (strstarts(buf1, e1->value1))
+                               return &e1->board;
+               }
+       }
+
+       if (bcm47xx_nvram_getenv("machine_name", buf1, sizeof(buf1)) >= 0) {
+               for (e1 = bcm47xx_board_list_machine_name; e1->value1; e1++) {
+                       if (strstarts(buf1, e1->value1))
+                               return &e1->board;
+               }
+       }
+
+       if (bcm47xx_nvram_getenv("hardware_version", buf1, sizeof(buf1)) >= 0) {
+               for (e1 = bcm47xx_board_list_hardware_version; e1->value1; e1++) {
+                       if (strstarts(buf1, e1->value1))
+                               return &e1->board;
+               }
+       }
+
+       if (bcm47xx_nvram_getenv("productid", buf1, sizeof(buf1)) >= 0) {
+               for (e1 = bcm47xx_board_list_productid; e1->value1; e1++) {
+                       if (!strcmp(buf1, e1->value1))
+                               return &e1->board;
+               }
+       }
+
+       if (bcm47xx_nvram_getenv("ModelId", buf1, sizeof(buf1)) >= 0) {
+               for (e1 = bcm47xx_board_list_ModelId; e1->value1; e1++) {
+                       if (!strcmp(buf1, e1->value1))
+                               return &e1->board;
+               }
+       }
+
+       if (bcm47xx_nvram_getenv("melco_id", buf1, sizeof(buf1)) >= 0 ||
+           bcm47xx_nvram_getenv("buf1falo_id", buf1, sizeof(buf1)) >= 0) {
+               /* buffalo hardware, check id for specific hardware matches */
+               for (e1 = bcm47xx_board_list_melco_id; e1->value1; e1++) {
+                       if (!strcmp(buf1, e1->value1))
+                               return &e1->board;
+               }
+       }
+
+       if (bcm47xx_nvram_getenv("boot_hw_model", buf1, sizeof(buf1)) >= 0 &&
+           bcm47xx_nvram_getenv("boot_hw_ver", buf2, sizeof(buf2)) >= 0) {
+               for (e2 = bcm47xx_board_list_boot_hw; e2->value1; e2++) {
+                       if (!strcmp(buf1, e2->value1) &&
+                           !strcmp(buf2, e2->value2))
+                               return &e2->board;
+               }
+       }
+
+       if (bcm47xx_nvram_getenv("board_id", buf1, sizeof(buf1)) >= 0) {
+               for (e1 = bcm47xx_board_list_board_id; e1->value1; e1++) {
+                       if (!strcmp(buf1, e1->value1))
+                               return &e1->board;
+               }
+       }
+
+       if (bcm47xx_nvram_getenv("boardtype", buf1, sizeof(buf1)) >= 0 &&
+           bcm47xx_nvram_getenv("boardnum", buf2, sizeof(buf2)) >= 0 &&
+           bcm47xx_nvram_getenv("boardrev", buf3, sizeof(buf3)) >= 0) {
+               for (e3 = bcm47xx_board_list_board; e3->value1; e3++) {
+                       if (!strcmp(buf1, e3->value1) &&
+                           !strcmp(buf2, e3->value2) &&
+                           !strcmp(buf3, e3->value3))
+                               return &e3->board;
+               }
+       }
+       return bcm47xx_board_unknown;
+}
+
+void __init bcm47xx_board_detect(void)
+{
+       int err;
+       char buf[10];
+       const struct bcm47xx_board_type *board_detected;
+
+       if (bcm47xx_board.board != BCM47XX_BOARD_NO)
+               return;
+
+       /* check if the nvram is available */
+       err = bcm47xx_nvram_getenv("boardtype", buf, sizeof(buf));
+
+       /* init of nvram failed, probably too early now */
+       if (err == -ENXIO) {
+               return;
+       }
+
+       board_detected = bcm47xx_board_get_nvram();
+       bcm47xx_board.board = board_detected->board;
+       strlcpy(bcm47xx_board.name, board_detected->name,
+               BCM47XX_BOARD_MAX_NAME);
+}
+
+enum bcm47xx_board bcm47xx_board_get(void)
+{
+       return bcm47xx_board.board;
+}
+EXPORT_SYMBOL(bcm47xx_board_get);
+
+const char *bcm47xx_board_get_name(void)
+{
+       return bcm47xx_board.name;
+}
+EXPORT_SYMBOL(bcm47xx_board_get_name);
index cc40b74940f5f6531e4e74cfcf4c665a7b13a9fd..b4c585b1c62eb6279504daea7e1f3a2e7d0b777d 100644 (file)
@@ -190,3 +190,23 @@ int bcm47xx_nvram_getenv(char *name, char *val, size_t val_len)
        return -ENOENT;
 }
 EXPORT_SYMBOL(bcm47xx_nvram_getenv);
+
+int bcm47xx_nvram_gpio_pin(const char *name)
+{
+       int i, err;
+       char nvram_var[10];
+       char buf[30];
+
+       for (i = 0; i < 16; i++) {
+               err = snprintf(nvram_var, sizeof(nvram_var), "gpio%i", i);
+               if (err <= 0)
+                       continue;
+               err = bcm47xx_nvram_getenv(nvram_var, buf, sizeof(buf));
+               if (err <= 0)
+                       continue;
+               if (!strcmp(name, buf))
+                       return i;
+       }
+       return -ENOENT;
+}
+EXPORT_SYMBOL(bcm47xx_nvram_gpio_pin);
index 8c155afb1299e7eda1a413cf4fe4fff3e3d2f093..5cba318bc1cd8e1ce32a8535ecbf79acf1000bf5 100644 (file)
 #include <asm/bootinfo.h>
 #include <asm/fw/cfe/cfe_api.h>
 #include <asm/fw/cfe/cfe_error.h>
+#include <bcm47xx.h>
+#include <bcm47xx_board.h>
 
 static int cfe_cons_handle;
 
+static u16 get_chip_id(void)
+{
+       switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+       case BCM47XX_BUS_TYPE_SSB:
+               return bcm47xx_bus.ssb.chip_id;
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+       case BCM47XX_BUS_TYPE_BCMA:
+               return bcm47xx_bus.bcma.bus.chipinfo.id;
+#endif
+       }
+       return 0;
+}
+
 const char *get_system_type(void)
 {
-       return "Broadcom BCM47XX";
+       static char buf[50];
+       u16 chip_id = get_chip_id();
+
+       snprintf(buf, sizeof(buf),
+                (chip_id > 0x9999) ? "Broadcom BCM%d (%s)" :
+                                     "Broadcom BCM%04X (%s)",
+                chip_id, bcm47xx_board_get_name());
+
+       return buf;
 }
 
 void prom_putchar(char c)
index b2246cd9ca1283eeccddbc8309c49eca28364c9a..1f30571968e78194ad6338b4f0ac3cb3b64332bc 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/time.h>
 #include <bcm47xx.h>
 #include <bcm47xx_nvram.h>
+#include <bcm47xx_board.h>
 
 union bcm47xx_bus bcm47xx_bus;
 EXPORT_SYMBOL(bcm47xx_bus);
@@ -221,6 +222,7 @@ void __init plat_mem_setup(void)
        _machine_restart = bcm47xx_machine_restart;
        _machine_halt = bcm47xx_machine_halt;
        pm_power_off = bcm47xx_machine_halt;
+       bcm47xx_board_detect();
 }
 
 static int __init bcm47xx_register_bus_complete(void)
index 536374dcba78995981b2a0bc04ab7ae62941aa00..2c85d9254b5e91d017e6a36351294e0c7b931d74 100644 (file)
 #include <linux/ssb/ssb.h>
 #include <asm/time.h>
 #include <bcm47xx.h>
+#include <bcm47xx_nvram.h>
+#include <bcm47xx_board.h>
 
 void __init plat_time_init(void)
 {
        unsigned long hz = 0;
+       u16 chip_id = 0;
+       char buf[10];
+       int len;
+       enum bcm47xx_board board = bcm47xx_board_get();
 
        /*
         * Use deterministic values for initial counter interrupt
@@ -43,15 +49,32 @@ void __init plat_time_init(void)
 #ifdef CONFIG_BCM47XX_SSB
        case BCM47XX_BUS_TYPE_SSB:
                hz = ssb_cpu_clock(&bcm47xx_bus.ssb.mipscore) / 2;
+               chip_id = bcm47xx_bus.ssb.chip_id;
                break;
 #endif
 #ifdef CONFIG_BCM47XX_BCMA
        case BCM47XX_BUS_TYPE_BCMA:
                hz = bcma_cpu_clock(&bcm47xx_bus.bcma.bus.drv_mips) / 2;
+               chip_id = bcm47xx_bus.bcma.bus.chipinfo.id;
                break;
 #endif
        }
 
+       if (chip_id == 0x5354) {
+               len = bcm47xx_nvram_getenv("clkfreq", buf, sizeof(buf));
+               if (len >= 0 && !strncmp(buf, "200", 4))
+                       hz = 100000000;
+       }
+
+       switch (board) {
+       case BCM47XX_BOARD_ASUS_WL520GC:
+       case BCM47XX_BOARD_ASUS_WL520GU:
+               hz = 100000000;
+               break;
+       default:
+               break;
+       }
+
        if (!hz)
                hz = 100000000;
 
index 0048c08978965428a32a03bf211c3f86909c12fb..ca0c343c9ea5ed024b87a5e314b4118d9ecff04e 100644 (file)
@@ -37,6 +37,10 @@ vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART16550) += $(obj)/uart-16550.o
 vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY)                += $(obj)/uart-alchemy.o
 endif
 
+ifdef CONFIG_KERNEL_XZ
+vmlinuzobjs-y += $(obj)/../../lib/ashldi3.o
+endif
+
 targets += vmlinux.bin
 OBJCOPYFLAGS_vmlinux.bin := $(OBJCOPYFLAGS) -O binary -R .comment -S
 $(obj)/vmlinux.bin: $(KBUILD_IMAGE) FORCE
@@ -44,8 +48,10 @@ $(obj)/vmlinux.bin: $(KBUILD_IMAGE) FORCE
 
 tool_$(CONFIG_KERNEL_GZIP)    = gzip
 tool_$(CONFIG_KERNEL_BZIP2)   = bzip2
+tool_$(CONFIG_KERNEL_LZ4)     = lz4
 tool_$(CONFIG_KERNEL_LZMA)    = lzma
 tool_$(CONFIG_KERNEL_LZO)     = lzo
+tool_$(CONFIG_KERNEL_XZ)      = xzkern
 
 targets += vmlinux.bin.z
 $(obj)/vmlinux.bin.z: $(obj)/vmlinux.bin FORCE
index 2c9573098c0dab7889895de68c2dae5bb9ad9ba8..a8c6fd6a440667cf58c35d4655862783de7d271f 100644 (file)
@@ -43,7 +43,8 @@ void error(char *x)
 /* activate the code for pre-boot environment */
 #define STATIC static
 
-#ifdef CONFIG_KERNEL_GZIP
+#if defined(CONFIG_KERNEL_GZIP) || defined(CONFIG_KERNEL_XZ) || \
+       defined(CONFIG_KERNEL_LZ4)
 void *memcpy(void *dest, const void *src, size_t n)
 {
        int i;
@@ -54,6 +55,8 @@ void *memcpy(void *dest, const void *src, size_t n)
                d[i] = s[i];
        return dest;
 }
+#endif
+#ifdef CONFIG_KERNEL_GZIP
 #include "../../../../lib/decompress_inflate.c"
 #endif
 
@@ -70,6 +73,10 @@ void *memset(void *s, int c, size_t n)
 #include "../../../../lib/decompress_bunzip2.c"
 #endif
 
+#ifdef CONFIG_KERNEL_LZ4
+#include "../../../../lib/decompress_unlz4.c"
+#endif
+
 #ifdef CONFIG_KERNEL_LZMA
 #include "../../../../lib/decompress_unlzma.c"
 #endif
@@ -78,6 +85,10 @@ void *memset(void *s, int c, size_t n)
 #include "../../../../lib/decompress_unlzo.c"
 #endif
 
+#ifdef CONFIG_KERNEL_XZ
+#include "../../../../lib/decompress_unxz.c"
+#endif
+
 void decompress_kernel(unsigned long boot_heap_start)
 {
        unsigned long zimage_start, zimage_size;
index 8e6b07ca2f5e6924b68b8ce177aa7c9e7119e80e..5a33409c7f63b09bf7599ed77a4dd762c99f8ead 100644 (file)
@@ -8,6 +8,9 @@
 
 OUTPUT_ARCH(mips)
 ENTRY(start)
+PHDRS {
+       text PT_LOAD FLAGS(7); /* RWX */
+}
 SECTIONS
 {
        /* Text and read-only data */
@@ -15,7 +18,7 @@ SECTIONS
        .text : {
                *(.text)
                *(.rodata)
-       }
+       }: text
        /* End of text section */
 
        /* Writable data */
index b212ae12e5ac7dc8ca35dfb24324ba638353f85e..331b837cec57d284d704282aec779056c7f623be 100644 (file)
@@ -999,7 +999,7 @@ void __init plat_mem_setup(void)
 
        if (total == 0)
                panic("Unable to allocate memory from "
-                     "cvmx_bootmem_phy_alloc\n");
+                     "cvmx_bootmem_phy_alloc");
 }
 
 /*
@@ -1081,7 +1081,7 @@ void __init device_tree_init(void)
        /* Copy the default tree from init memory. */
        initial_boot_params = early_init_dt_alloc_memory_arch(dt_size, 8);
        if (initial_boot_params == NULL)
-               panic("Could not allocate initial_boot_params\n");
+               panic("Could not allocate initial_boot_params");
        memcpy(initial_boot_params, fdt, dt_size);
 
        if (do_prune) {
index 61a334ac43ac7c52565713e4a45ed882da57132d..558e94977942033dc8247bcc510ebb705aa9698a 100644 (file)
@@ -5,5 +5,4 @@
 obj-y := buttons.o irq.o lcd.o led.o reset.o rtc.o serial.o setup.o time.o
 
 obj-$(CONFIG_PCI)              += pci.o
-obj-$(CONFIG_EARLY_PRINTK)     += console.o
 obj-$(CONFIG_MTD_PHYSMAP)      += mtd.o
diff --git a/arch/mips/cobalt/console.c b/arch/mips/cobalt/console.c
deleted file mode 100644 (file)
index d1ba701..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * (C) P. Horton 2006
- */
-#include <linux/io.h>
-#include <linux/serial_reg.h>
-
-#include <cobalt.h>
-
-#define UART_BASE      ((void __iomem *)CKSEG1ADDR(0x1c800000))
-
-void prom_putchar(char c)
-{
-       if (cobalt_board_id <= COBALT_BRD_ID_QUBE1)
-               return;
-
-       while (!(readb(UART_BASE + UART_LSR) & UART_LSR_THRE))
-               ;
-
-       writeb(c, UART_BASE + UART_TX);
-}
index ec3b2c417f7cc42b1d3390355cd1067547664022..9a8c2fe8d3345a95a644787668e118bbfa51fedb 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <asm/bootinfo.h>
 #include <asm/reboot.h>
+#include <asm/setup.h>
 #include <asm/gt64120.h>
 
 #include <cobalt.h>
@@ -112,6 +113,8 @@ void __init prom_init(void)
        }
 
        add_memory_region(0x0, memsz, BOOT_MEM_RAM);
+
+       setup_8250_early_printk_port(CKSEG1ADDR(0x1c800000), 0, 0);
 }
 
 void __init prom_free_prom_memory(void)
diff --git a/arch/mips/configs/powertv_defconfig b/arch/mips/configs/powertv_defconfig
deleted file mode 100644 (file)
index 7fda0ce..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-CONFIG_POWERTV=y
-CONFIG_BOOTLOADER_FAMILY="R2"
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_HZ_1000=y
-CONFIG_PREEMPT=y
-# CONFIG_SECCOMP is not set
-CONFIG_EXPERIMENTAL=y
-CONFIG_CROSS_COMPILE=""
-# CONFIG_SWAP is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_RELAY=y
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_RD_GZIP is not set
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
-CONFIG_KALLSYMS_ALL=y
-# CONFIG_PCSPKR_PLATFORM is not set
-# CONFIG_EPOLL is not set
-# CONFIG_SIGNALFD is not set
-# CONFIG_EVENTFD is not set
-# CONFIG_VM_EVENT_COUNTERS is not set
-# CONFIG_SLUB_DEBUG is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
-CONFIG_PCI=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_PNP=y
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-CONFIG_IPV6=y
-CONFIG_IPV6_PRIVACY=y
-CONFIG_INET6_AH=y
-CONFIG_INET6_ESP=y
-CONFIG_INET6_IPCOMP=y
-# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET6_XFRM_MODE_BEET is not set
-# CONFIG_IPV6_SIT is not set
-CONFIG_IPV6_TUNNEL=y
-CONFIG_NETFILTER=y
-# CONFIG_BRIDGE_NETFILTER is not set
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
-CONFIG_IP_NF_IPTABLES=y
-CONFIG_IP_NF_FILTER=y
-CONFIG_IP_NF_ARPTABLES=y
-CONFIG_IP_NF_ARPFILTER=y
-CONFIG_IP6_NF_IPTABLES=y
-CONFIG_IP6_NF_FILTER=y
-CONFIG_BRIDGE=y
-CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_TBF=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=32768
-# CONFIG_MISC_DEVICES is not set
-# CONFIG_SCSI_PROC_FS is not set
-CONFIG_BLK_DEV_SD=y
-# CONFIG_SCSI_LOWLEVEL is not set
-CONFIG_ATA=y
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-# CONFIG_WLAN is not set
-CONFIG_USB_RTL8150=y
-# CONFIG_INPUT_MOUSEDEV is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-# CONFIG_DEVKMEM is not set
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-# CONFIG_MFD_SUPPORT is not set
-# CONFIG_VGA_ARB is not set
-CONFIG_USB_HIDDEV=y
-CONFIG_USB=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_DEVICEFS=y
-# CONFIG_USB_DEVICE_CLASS is not set
-CONFIG_USB_EHCI_HCD=y
-# CONFIG_USB_EHCI_TT_NEWSCHED is not set
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_SERIAL=y
-CONFIG_USB_SERIAL_CONSOLE=y
-CONFIG_USB_SERIAL_CP210X=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
-# CONFIG_DNOTIFY is not set
-CONFIG_FUSE_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_DEBUG_PREEMPT is not set
-CONFIG_DEBUG_INFO=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-# CONFIG_EARLY_PRINTK is not set
-CONFIG_CMDLINE_BOOL=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
index 22afed16ccde293a741e88c9f2fd9f56b733ccd1..41a2fa1fa12e549f75f094505ca044a19b56088d 100644 (file)
  *            7        FPU/R4k timer
  *
  * We handle the IRQ according to _our_ priority (see setup.c),
- * then we just return.         If multiple IRQs are pending then we will
+ * then we just return.  If multiple IRQs are pending then we will
  * just take another exception, big deal.
  */
                .align  5
                /*
                 * Find irq with highest priority
                 */
-                PTR_LA t1,cpu_mask_nr_tbl
+                PTR_LA t1,cpu_mask_nr_tbl
 1:             lw      t2,(t1)
                nop
                and     t2,t0
                /*
                 * Find irq with highest priority
                 */
-                PTR_LA t1,asic_mask_nr_tbl
+                PTR_LA t1,asic_mask_nr_tbl
 2:             lw      t2,(t1)
                nop
                and     t2,t0
                FEXPORT(cpu_all_int)            # HALT, timers, software junk
                li      a0,DEC_CPU_IRQ_BASE
                srl     t0,CAUSEB_IP
-               li      t1,CAUSEF_IP>>CAUSEB_IP # mask
+               li      t1,CAUSEF_IP>>CAUSEB_IP # mask
                b       1f
                 li     t2,4                    # nr of bits / 2
 
index 4b3e3a4375a6756b2af743c3f163d50055fb2c45..e04d973ce5aa24cd832771b417b0b5ce23f14ac4 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *     DEC I/O ASIC interrupts.
  *
- *     Copyright (c) 2002, 2003  Maciej W. Rozycki
+ *     Copyright (c) 2002, 2003, 2013  Maciej W. Rozycki
  *
  *     This program is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License
@@ -51,22 +51,51 @@ static struct irq_chip ioasic_irq_type = {
        .irq_unmask = unmask_ioasic_irq,
 };
 
-void clear_ioasic_dma_irq(unsigned int irq)
+static void clear_ioasic_dma_irq(struct irq_data *d)
 {
        u32 sir;
 
-       sir = ~(1 << (irq - ioasic_irq_base));
+       sir = ~(1 << (d->irq - ioasic_irq_base));
        ioasic_write(IO_REG_SIR, sir);
+       fast_iob();
 }
 
 static struct irq_chip ioasic_dma_irq_type = {
        .name = "IO-ASIC-DMA",
-       .irq_ack = ack_ioasic_irq,
+       .irq_ack = clear_ioasic_dma_irq,
        .irq_mask = mask_ioasic_irq,
-       .irq_mask_ack = ack_ioasic_irq,
        .irq_unmask = unmask_ioasic_irq,
+       .irq_eoi = clear_ioasic_dma_irq,
 };
 
+/*
+ * I/O ASIC implements two kinds of DMA interrupts, informational and
+ * error interrupts.
+ *
+ * The formers do not stop DMA and should be cleared as soon as possible
+ * so that if they retrigger before the handler has completed, usually as
+ * a side effect of actions taken by the handler, then they are reissued.
+ * These use the `handle_edge_irq' handler that clears the request right
+ * away.
+ *
+ * The latters stop DMA and do not resume it until the interrupt has been
+ * cleared.  This cannot be done until after a corrective action has been
+ * taken and this also means they will not retrigger.  Therefore they use
+ * the `handle_fasteoi_irq' handler that only clears the request on the
+ * way out.  Because MIPS processor interrupt inputs, one of which the I/O
+ * ASIC is cascaded to, are level-triggered it is recommended that error
+ * DMA interrupt action handlers are registered with the IRQF_ONESHOT flag
+ * set so that they are run with the interrupt line masked.
+ *
+ * This mask has `1' bits in the positions of informational interrupts.
+ */
+#define IO_IRQ_DMA_INFO                                                        \
+       (IO_IRQ_MASK(IO_INR_SCC0A_RXDMA) |                              \
+        IO_IRQ_MASK(IO_INR_SCC1A_RXDMA) |                              \
+        IO_IRQ_MASK(IO_INR_ISDN_TXDMA) |                               \
+        IO_IRQ_MASK(IO_INR_ISDN_RXDMA) |                               \
+        IO_IRQ_MASK(IO_INR_ASC_DMA))
+
 void __init init_ioasic_irqs(int base)
 {
        int i;
@@ -79,7 +108,9 @@ void __init init_ioasic_irqs(int base)
                irq_set_chip_and_handler(i, &ioasic_irq_type,
                                         handle_level_irq);
        for (; i < base + IO_IRQ_LINES; i++)
-               irq_set_chip(i, &ioasic_dma_irq_type);
+               irq_set_chip_and_handler(i, &ioasic_dma_irq_type,
+                                        1 << (i - base) & IO_IRQ_DMA_INFO ?
+                                        handle_edge_irq : handle_fasteoi_irq);
 
        ioasic_irq_base = base;
 }
index c0d1522d448f91591e06131454598232f931198a..8c8498159e434fa039f7ecae3e290ee0c31d6677 100644 (file)
@@ -14,7 +14,7 @@
 
 /* Maximum number of arguments supported.  Must be even!  */
 #define O32_ARGC       32
-/* Number of static registers we save. */
+/* Number of static registers we save.  */
 #define O32_STATC      11
 /* Frame size for both of the above.  */
 #define O32_FRAMESZ    (4 * O32_ARGC + SZREG * O32_STATC)
index 468f665de7bb731e5d8cf71dcef5c1461c146e48..4e1761e0a09af73a8e9477e09e0ce34e7a981a0e 100644 (file)
@@ -104,7 +104,7 @@ void __init prom_init(void)
        if (prom_is_rex(magic))
                rex_clear_cache();
 
-       /* Register the early console.  */
+       /* Register the early console.  */
        register_prom_console();
 
        /* Were we compiled with the right CPU option? */
index 0aadac74290031dffd4d49a3d80de9054118e93a..8c62316f22f438b4b9cc18b23585f3e0ad8b7258 100644 (file)
@@ -22,7 +22,7 @@ volatile unsigned long mem_err;               /* So we know an error occurred */
 
 /*
  * Probe memory in 4MB chunks, waiting for an error to tell us we've fallen
- * off the end of real memory. Only suitable for the 2100/3100's (PMAX).
+ * off the end of real memory.  Only suitable for the 2100/3100's (PMAX).
  */
 
 #define CHUNK_SIZE 0x400000
index 741cb4235bde2106e2e152bcd1caa2ca63c020b9..56e6e2c23683b328c22f51831908414750cf710a 100644 (file)
@@ -65,7 +65,7 @@ EXPORT_SYMBOL(ioasic_base);
 /*
  * IRQ routing and priority tables.  Priorites are set as follows:
  *
- *             KN01    KN230   KN02    KN02-BA KN02-CA KN03
+ *             KN01    KN230   KN02    KN02-BA KN02-CA KN03
  *
  * MEMORY      CPU     CPU     CPU     ASIC    CPU     CPU
  * RTC         CPU     CPU     CPU     ASIC    CPU     CPU
@@ -413,7 +413,7 @@ static void __init dec_init_kn02(void)
 
 /*
  * Machine-specific initialisation for KN02-BA, aka DS5000/1xx
- * (xx = 20, 25, 33), aka 3min.         Also applies to KN04(-BA), aka
+ * (xx = 20, 25, 33), aka 3min.  Also applies to KN04(-BA), aka
  * DS5000/150, aka 4min.
  */
 static int kn02ba_interrupt[DEC_NR_INTS] __initdata = {
index 68f37e3eccc7f5032e98a7aa854eeceeb1783ace..c75025f27c201f02a9b85f65c2fdbbc529768325 100644 (file)
 /*
  * Cache Operations available on all MIPS processors with R4000-style caches
  */
-#define Index_Invalidate_I     0x00
-#define Index_Writeback_Inv_D  0x01
-#define Index_Load_Tag_I       0x04
-#define Index_Load_Tag_D       0x05
-#define Index_Store_Tag_I      0x08
-#define Index_Store_Tag_D      0x09
-#if defined(CONFIG_CPU_LOONGSON2)
-#define Hit_Invalidate_I       0x00
-#else
-#define Hit_Invalidate_I       0x10
-#endif
-#define Hit_Invalidate_D       0x11
-#define Hit_Writeback_Inv_D    0x15
+#define Index_Invalidate_I             0x00
+#define Index_Writeback_Inv_D          0x01
+#define Index_Load_Tag_I               0x04
+#define Index_Load_Tag_D               0x05
+#define Index_Store_Tag_I              0x08
+#define Index_Store_Tag_D              0x09
+#define Hit_Invalidate_I               0x10
+#define Hit_Invalidate_D               0x11
+#define Hit_Writeback_Inv_D            0x15
 
 /*
  * R4000-specific cacheops
  */
-#define Create_Dirty_Excl_D    0x0d
-#define Fill                   0x14
-#define Hit_Writeback_I                0x18
-#define Hit_Writeback_D                0x19
+#define Create_Dirty_Excl_D            0x0d
+#define Fill                           0x14
+#define Hit_Writeback_I                        0x18
+#define Hit_Writeback_D                        0x19
 
 /*
  * R4000SC and R4400SC-specific cacheops
  */
-#define Index_Invalidate_SI    0x02
-#define Index_Writeback_Inv_SD 0x03
-#define Index_Load_Tag_SI      0x06
-#define Index_Load_Tag_SD      0x07
-#define Index_Store_Tag_SI     0x0A
-#define Index_Store_Tag_SD     0x0B
-#define Create_Dirty_Excl_SD   0x0f
-#define Hit_Invalidate_SI      0x12
-#define Hit_Invalidate_SD      0x13
-#define Hit_Writeback_Inv_SD   0x17
-#define Hit_Writeback_SD       0x1b
-#define Hit_Set_Virtual_SI     0x1e
-#define Hit_Set_Virtual_SD     0x1f
+#define Index_Invalidate_SI            0x02
+#define Index_Writeback_Inv_SD         0x03
+#define Index_Load_Tag_SI              0x06
+#define Index_Load_Tag_SD              0x07
+#define Index_Store_Tag_SI             0x0A
+#define Index_Store_Tag_SD             0x0B
+#define Create_Dirty_Excl_SD           0x0f
+#define Hit_Invalidate_SI              0x12
+#define Hit_Invalidate_SD              0x13
+#define Hit_Writeback_Inv_SD           0x17
+#define Hit_Writeback_SD               0x1b
+#define Hit_Set_Virtual_SI             0x1e
+#define Hit_Set_Virtual_SD             0x1f
 
 /*
  * R5000-specific cacheops
  */
-#define R5K_Page_Invalidate_S  0x17
+#define R5K_Page_Invalidate_S          0x17
 
 /*
  * RM7000-specific cacheops
  */
-#define Page_Invalidate_T      0x16
-#define Index_Store_Tag_T      0x0a
-#define Index_Load_Tag_T       0x06
+#define Page_Invalidate_T              0x16
+#define Index_Store_Tag_T              0x0a
+#define Index_Load_Tag_T               0x06
 
 /*
  * R10000-specific cacheops
  * Cacheops 0x02, 0x06, 0x0a, 0x0c-0x0e, 0x16, 0x1a and 0x1e are unused.
  * Most of the _S cacheops are identical to the R4000SC _SD cacheops.
  */
-#define Index_Writeback_Inv_S  0x03
-#define Index_Load_Tag_S       0x07
-#define Index_Store_Tag_S      0x0B
-#define Hit_Invalidate_S       0x13
-#define Cache_Barrier          0x14
-#define Hit_Writeback_Inv_S    0x17
-#define Index_Load_Data_I      0x18
-#define Index_Load_Data_D      0x19
-#define Index_Load_Data_S      0x1b
-#define Index_Store_Data_I     0x1c
-#define Index_Store_Data_D     0x1d
-#define Index_Store_Data_S     0x1f
+#define Index_Writeback_Inv_S          0x03
+#define Index_Load_Tag_S               0x07
+#define Index_Store_Tag_S              0x0B
+#define Hit_Invalidate_S               0x13
+#define Cache_Barrier                  0x14
+#define Hit_Writeback_Inv_S            0x17
+#define Index_Load_Data_I              0x18
+#define Index_Load_Data_D              0x19
+#define Index_Load_Data_S              0x1b
+#define Index_Store_Data_I             0x1c
+#define Index_Store_Data_D             0x1d
+#define Index_Store_Data_S             0x1f
+
+/*
+ * Loongson2-specific cacheops
+ */
+#define Hit_Invalidate_I_Loongson23    0x00
 
 #endif /* __ASM_CACHEOPS_H */
index a6e505a0e44b4b119a6967fd5f8938ffb211a73e..be4d62a5a10ee203c8d2c9b16dc4ffda9551a493 100644 (file)
@@ -31,8 +31,6 @@ static inline u32 ioasic_read(unsigned int reg)
        return ioasic_base[reg / 4];
 }
 
-extern void clear_ioasic_dma_irq(unsigned int irq);
-
 extern void init_ioasic_irqs(int base);
 
 extern int dec_ioasic_clocksource_init(void);
index a8665a7611c2af3141313c8f21ad32eeae128dc2..8bd95971fe2dc98030565c6d41766ae1eb2b966a 100644 (file)
@@ -40,7 +40,7 @@
 #define IOASIC_FLOPPY  (11*IOASIC_SLOT_SIZE)   /* FDC (maxine) */
 #define IOASIC_SCSI    (12*IOASIC_SLOT_SIZE)   /* ASC SCSI */
 #define IOASIC_FDC_DMA (13*IOASIC_SLOT_SIZE)   /* FDC DMA (maxine) */
-#define IOASIC_SCSI_DMA (14*IOASIC_SLOT_SIZE)  /* ??? */
+#define IOASIC_SCSI_DMA        (14*IOASIC_SLOT_SIZE)   /* ??? */
 #define IOASIC_RES_15  (15*IOASIC_SLOT_SIZE)   /* unused? */
 
 
index 0eb3241de7060883d6b72e2878da258d9b41caf2..88d9ffd742588b41c99975943c7769fb75f18d25 100644 (file)
 /*
  * System Control & Status Register bits.
  */
-#define KN01_CSR_MNFMOD                (1<<15) /* MNFMOD manufacturing jumper */
-#define KN01_CSR_STATUS                (1<<14) /* self-test result status output */
-#define KN01_CSR_PARDIS                (1<<13) /* parity error disable */
-#define KN01_CSR_CRSRTST       (1<<12) /* PCC test output */
-#define KN01_CSR_MONO          (1<<11) /* mono/color fb SIMM installed */
-#define KN01_CSR_MEMERR                (1<<10) /* write timeout error status & ack*/
+#define KN01_CSR_MNFMOD                (1<<15) /* MNFMOD manufacturing jumper */
+#define KN01_CSR_STATUS                (1<<14) /* self-test result status output */
+#define KN01_CSR_PARDIS                (1<<13) /* parity error disable */
+#define KN01_CSR_CRSRTST       (1<<12) /* PCC test output */
+#define KN01_CSR_MONO          (1<<11) /* mono/color fb SIMM installed */
+#define KN01_CSR_MEMERR                (1<<10) /* write timeout error status & ack*/
 #define KN01_CSR_VINT          (1<<9)  /* PCC area detect #2 status & ack */
 #define KN01_CSR_TXDIS         (1<<8)  /* DZ11 transmit disable */
 #define KN01_CSR_VBGTRG                (1<<2)  /* blue DAC voltage over green (r/o) */
index 69dc2a9a2d0f5602d91c4b83306f6f0952572a52..92c0fe2560997e901908eac961e8b27d34097f46 100644 (file)
@@ -68,7 +68,7 @@
 #define KN03CA_IO_SSR_ISDN_RST (1<<12)         /* ~ISDN (Am79C30A) reset */
 
 #define KN03CA_IO_SSR_FLOPPY_RST (1<<7)                /* ~FDC (82077) reset */
-#define KN03CA_IO_SSR_VIDEO_RST (1<<6)         /* ~framebuffer reset */
+#define KN03CA_IO_SSR_VIDEO_RST        (1<<6)          /* ~framebuffer reset */
 #define KN03CA_IO_SSR_AB_RST   (1<<5)          /* ACCESS.bus reset */
 #define KN03CA_IO_SSR_RES_4    (1<<4)          /* unused */
 #define KN03CA_IO_SSR_RES_3    (1<<4)          /* unused */
index 446577712bee23d45be3f9375a6d8ee8f2806a2e..c0ead63138453c04d3b20918fbcfb1e1c02c5c3c 100644 (file)
@@ -49,7 +49,7 @@
 
 #ifdef CONFIG_64BIT
 
-#define prom_is_rex(magic)     1       /* KN04 and KN05 are REX PROMs.  */
+#define prom_is_rex(magic)     1       /* KN04 and KN05 are REX PROMs.  */
 
 #else /* !CONFIG_64BIT */
 
index cf3ae2480b1d25f5dfd77e8e328e5745b413e2be..a66359ef4ece770e3ed1a6518e96b79e733b4afa 100644 (file)
@@ -331,6 +331,7 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
 #define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs)                  \
        dump_task_fpu(tsk, elf_fpregs)
 
+#define CORE_DUMP_USE_REGSET
 #define ELF_EXEC_PAGESIZE      PAGE_SIZE
 
 /* This yields a mask that user programs can use to figure out what
index 4d6d77ed9b9d679cd955e2fafe2dbc16527db65c..e194f957ca8c42a0af82f0621c425308a25e4d53 100644 (file)
@@ -22,7 +22,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key)
 {
-       asm goto("1:\tnop\n\t"
+       asm_volatile_goto("1:\tnop\n\t"
                "nop\n\t"
                ".pushsection __jump_table,  \"aw\"\n\t"
                WORD_INSN " 1b, %l[l_yes], %0\n\t"
diff --git a/arch/mips/include/asm/mach-ath79/ar933x_uart_platform.h b/arch/mips/include/asm/mach-ath79/ar933x_uart_platform.h
deleted file mode 100644 (file)
index 6cb30f2..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- *  Platform data definition for Atheros AR933X UART
- *
- *  Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- */
-
-#ifndef _AR933X_UART_PLATFORM_H
-#define _AR933X_UART_PLATFORM_H
-
-struct ar933x_uart_platform_data {
-       unsigned        uartclk;
-};
-
-#endif /* _AR933X_UART_PLATFORM_H */
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
new file mode 100644 (file)
index 0000000..00867dd
--- /dev/null
@@ -0,0 +1,110 @@
+#ifndef __BCM47XX_BOARD_H
+#define __BCM47XX_BOARD_H
+
+enum bcm47xx_board {
+       BCM47XX_BOARD_ASUS_RTAC66U,
+       BCM47XX_BOARD_ASUS_RTN10,
+       BCM47XX_BOARD_ASUS_RTN10D,
+       BCM47XX_BOARD_ASUS_RTN10U,
+       BCM47XX_BOARD_ASUS_RTN12,
+       BCM47XX_BOARD_ASUS_RTN12B1,
+       BCM47XX_BOARD_ASUS_RTN12C1,
+       BCM47XX_BOARD_ASUS_RTN12D1,
+       BCM47XX_BOARD_ASUS_RTN12HP,
+       BCM47XX_BOARD_ASUS_RTN15U,
+       BCM47XX_BOARD_ASUS_RTN16,
+       BCM47XX_BOARD_ASUS_RTN53,
+       BCM47XX_BOARD_ASUS_RTN66U,
+       BCM47XX_BOARD_ASUS_WL300G,
+       BCM47XX_BOARD_ASUS_WL320GE,
+       BCM47XX_BOARD_ASUS_WL330GE,
+       BCM47XX_BOARD_ASUS_WL500GD,
+       BCM47XX_BOARD_ASUS_WL500GPV1,
+       BCM47XX_BOARD_ASUS_WL500GPV2,
+       BCM47XX_BOARD_ASUS_WL500W,
+       BCM47XX_BOARD_ASUS_WL520GC,
+       BCM47XX_BOARD_ASUS_WL520GU,
+       BCM47XX_BOARD_ASUS_WL700GE,
+       BCM47XX_BOARD_ASUS_WLHDD,
+
+       BCM47XX_BOARD_BELKIN_F7D4301,
+
+       BCM47XX_BOARD_BUFFALO_WBR2_G54,
+       BCM47XX_BOARD_BUFFALO_WHR2_A54G54,
+       BCM47XX_BOARD_BUFFALO_WHR_G125,
+       BCM47XX_BOARD_BUFFALO_WHR_G54S,
+       BCM47XX_BOARD_BUFFALO_WHR_HP_G54,
+       BCM47XX_BOARD_BUFFALO_WLA2_G54L,
+       BCM47XX_BOARD_BUFFALO_WZR_G300N,
+       BCM47XX_BOARD_BUFFALO_WZR_RS_G54,
+       BCM47XX_BOARD_BUFFALO_WZR_RS_G54HP,
+
+       BCM47XX_BOARD_CISCO_M10V1,
+       BCM47XX_BOARD_CISCO_M20V1,
+
+       BCM47XX_BOARD_DELL_TM2300,
+
+       BCM47XX_BOARD_DLINK_DIR130,
+       BCM47XX_BOARD_DLINK_DIR330,
+
+       BCM47XX_BOARD_HUAWEI_E970,
+
+       BCM47XX_BOARD_LINKSYS_E900V1,
+       BCM47XX_BOARD_LINKSYS_E1000V1,
+       BCM47XX_BOARD_LINKSYS_E1000V2,
+       BCM47XX_BOARD_LINKSYS_E1000V21,
+       BCM47XX_BOARD_LINKSYS_E1200V2,
+       BCM47XX_BOARD_LINKSYS_E2000V1,
+       BCM47XX_BOARD_LINKSYS_E3000V1,
+       BCM47XX_BOARD_LINKSYS_E3200V1,
+       BCM47XX_BOARD_LINKSYS_E4200V1,
+       BCM47XX_BOARD_LINKSYS_WRT150NV1,
+       BCM47XX_BOARD_LINKSYS_WRT150NV11,
+       BCM47XX_BOARD_LINKSYS_WRT160NV1,
+       BCM47XX_BOARD_LINKSYS_WRT160NV3,
+       BCM47XX_BOARD_LINKSYS_WRT300NV11,
+       BCM47XX_BOARD_LINKSYS_WRT310NV1,
+       BCM47XX_BOARD_LINKSYS_WRT310NV2,
+       BCM47XX_BOARD_LINKSYS_WRT54G3GV2,
+       BCM47XX_BOARD_LINKSYS_WRT610NV1,
+       BCM47XX_BOARD_LINKSYS_WRT610NV2,
+       BCM47XX_BOARD_LINKSYS_WRTSL54GS,
+
+       BCM47XX_BOARD_MOTOROLA_WE800G,
+       BCM47XX_BOARD_MOTOROLA_WR850GP,
+       BCM47XX_BOARD_MOTOROLA_WR850GV2V3,
+
+       BCM47XX_BOARD_NETGEAR_WGR614V8,
+       BCM47XX_BOARD_NETGEAR_WGR614V9,
+       BCM47XX_BOARD_NETGEAR_WNDR3300,
+       BCM47XX_BOARD_NETGEAR_WNDR3400V1,
+       BCM47XX_BOARD_NETGEAR_WNDR3400V2,
+       BCM47XX_BOARD_NETGEAR_WNDR3400VCNA,
+       BCM47XX_BOARD_NETGEAR_WNDR3700V3,
+       BCM47XX_BOARD_NETGEAR_WNDR4000,
+       BCM47XX_BOARD_NETGEAR_WNDR4500V1,
+       BCM47XX_BOARD_NETGEAR_WNDR4500V2,
+       BCM47XX_BOARD_NETGEAR_WNR2000,
+       BCM47XX_BOARD_NETGEAR_WNR3500L,
+       BCM47XX_BOARD_NETGEAR_WNR3500U,
+       BCM47XX_BOARD_NETGEAR_WNR3500V2,
+       BCM47XX_BOARD_NETGEAR_WNR3500V2VC,
+       BCM47XX_BOARD_NETGEAR_WNR834BV2,
+
+       BCM47XX_BOARD_PHICOMM_M1,
+
+       BCM47XX_BOARD_SIMPLETECH_SIMPLESHARE,
+
+       BCM47XX_BOARD_ZTE_H218N,
+
+       BCM47XX_BOARD_UNKNOWN,
+       BCM47XX_BOARD_NO,
+};
+
+#define BCM47XX_BOARD_MAX_NAME 30
+
+void bcm47xx_board_detect(void);
+enum bcm47xx_board bcm47xx_board_get(void);
+const char *bcm47xx_board_get_name(void);
+
+#endif /* __BCM47XX_BOARD_H */
index b8e7be8f34dd472076b7ffd10d7de2d14c11b638..36a3fc1aa3ae326def39379e03db8258c24d82d3 100644 (file)
@@ -48,4 +48,6 @@ static inline void bcm47xx_nvram_parse_macaddr(char *buf, u8 macaddr[6])
                printk(KERN_WARNING "Can not parse mac address: %s\n", buf);
 }
 
+int bcm47xx_nvram_gpio_pin(const char *name);
+
 #endif /* __BCM47XX_NVRAM_H */
index 47fb247f9663b501796a7c255b32525c225cffed..f9f448650505bc95dc2b5e426c3acf4869ffbfc5 100644 (file)
@@ -52,23 +52,11 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
        return 0;
 }
 
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-       BUG();
-}
-
 static inline int plat_device_is_coherent(struct device *dev)
 {
        return 1;
 }
 
-static inline int plat_dma_mapping_error(struct device *dev,
-                                        dma_addr_t dma_addr)
-{
-       BUG();
-       return 0;
-}
-
 dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
 phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
 
diff --git a/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h b/arch/mips/include/asm/mach-dec/cpu-feature-overrides.h
new file mode 100644 (file)
index 0000000..acce27f
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ *     CPU feature overrides for DECstation systems.  Two variations
+ *     are generally applicable.
+ *
+ *     Copyright (C) 2013  Maciej W. Rozycki
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+#ifndef __ASM_MACH_DEC_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_DEC_CPU_FEATURE_OVERRIDES_H
+
+/* Generic ones first.  */
+#define cpu_has_tlb                    1
+#define cpu_has_tx39_cache             0
+#define cpu_has_fpu                    1
+#define cpu_has_divec                  0
+#define cpu_has_prefetch               0
+#define cpu_has_mcheck                 0
+#define cpu_has_ejtag                  0
+#define cpu_has_mips16                 0
+#define cpu_has_mdmx                   0
+#define cpu_has_mips3d                 0
+#define cpu_has_smartmips              0
+#define cpu_has_rixi                   0
+#define cpu_has_vtag_icache            0
+#define cpu_has_ic_fills_f_dc          0
+#define cpu_has_pindexed_dcache                0
+#define cpu_has_local_ebase            0
+#define cpu_icache_snoops_remote_store 1
+#define cpu_has_mips_4                 0
+#define cpu_has_mips_5                 0
+#define cpu_has_mips32r1               0
+#define cpu_has_mips32r2               0
+#define cpu_has_mips64r1               0
+#define cpu_has_mips64r2               0
+#define cpu_has_dsp                    0
+#define cpu_has_mipsmt                 0
+#define cpu_has_userlocal              0
+
+/* R3k-specific ones.  */
+#ifdef CONFIG_CPU_R3000
+#define cpu_has_4kex                   0
+#define cpu_has_3k_cache               1
+#define cpu_has_4k_cache               0
+#define cpu_has_32fpr                  0
+#define cpu_has_counter                        0
+#define cpu_has_watch                  0
+#define cpu_has_vce                    0
+#define cpu_has_cache_cdex_p           0
+#define cpu_has_cache_cdex_s           0
+#define cpu_has_llsc                   0
+#define cpu_has_dc_aliases             0
+#define cpu_has_mips_2                 0
+#define cpu_has_mips_3                 0
+#define cpu_has_nofpuex                        1
+#define cpu_has_inclusive_pcaches      0
+#define cpu_dcache_line_size()         4
+#define cpu_icache_line_size()         4
+#define cpu_scache_line_size()         0
+#endif /* CONFIG_CPU_R3000 */
+
+/* R4k-specific ones.  */
+#ifdef CONFIG_CPU_R4X00
+#define cpu_has_4kex                   1
+#define cpu_has_3k_cache               0
+#define cpu_has_4k_cache               1
+#define cpu_has_32fpr                  1
+#define cpu_has_counter                        1
+#define cpu_has_watch                  1
+#define cpu_has_vce                    1
+#define cpu_has_cache_cdex_p           1
+#define cpu_has_cache_cdex_s           1
+#define cpu_has_llsc                   1
+#define cpu_has_dc_aliases             (PAGE_SIZE < 0x4000)
+#define cpu_has_mips_2                 1
+#define cpu_has_mips_3                 1
+#define cpu_has_nofpuex                        0
+#define cpu_has_inclusive_pcaches      1
+#define cpu_dcache_line_size()         16
+#define cpu_icache_line_size()         16
+#define cpu_scache_line_size()         32
+#endif /* CONFIG_CPU_R4X00 */
+
+#endif /* __ASM_MACH_DEC_CPU_FEATURE_OVERRIDES_H */
index 74cb99257d5b9b25dda5aec6fa1eaa1553612086..a9e8f6b62b0b9c9fa2a073f1aeb5c08f1d5a6894 100644 (file)
@@ -47,16 +47,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
        return 1;
 }
 
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
-                                        dma_addr_t dma_addr)
-{
-       return 0;
-}
-
 static inline int plat_device_is_coherent(struct device *dev)
 {
 #ifdef CONFIG_DMA_COHERENT
index 06c441968e6ed4fcb31f6c71f506063d198bfb28..4ffddfdb50623bbbcbc2f5eec3ad9aabb7f78bcc 100644 (file)
@@ -58,16 +58,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
        return 1;
 }
 
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
-                                        dma_addr_t dma_addr)
-{
-       return 0;
-}
-
 static inline int plat_device_is_coherent(struct device *dev)
 {
        return 1;               /* IP27 non-cohernet mode is unsupported */
index 073f0c4760ba8cc3cc7cf59380dfbc58c6a409bd..104cfbc3ed63291ec51a6e13152257516cd1d85c 100644 (file)
@@ -80,17 +80,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
        return 1;
 }
 
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-       return;
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
-                                        dma_addr_t dma_addr)
-{
-       return 0;
-}
-
 static inline int plat_device_is_coherent(struct device *dev)
 {
        return 0;               /* IP32 is non-cohernet */
index 9fc1e9ad7038879840d73fc142d7a88b4ebde218..949003ef97b37d0d4023f3da08694c4409139a17 100644 (file)
@@ -48,16 +48,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
        return 1;
 }
 
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
-                                        dma_addr_t dma_addr)
-{
-       return 0;
-}
-
 static inline int plat_device_is_coherent(struct device *dev)
 {
        return 0;
index e1433055fe98b5f6e876313bbdb98b23a2021e75..aeb2c05d61456de8b0143984fe1c9626e8acae6d 100644 (file)
@@ -53,16 +53,6 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
        return 1;
 }
 
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
-                                        dma_addr_t dma_addr)
-{
-       return 0;
-}
-
 static inline int plat_device_is_coherent(struct device *dev)
 {
        return 0;
diff --git a/arch/mips/include/asm/mach-powertv/asic.h b/arch/mips/include/asm/mach-powertv/asic.h
deleted file mode 100644 (file)
index b341108..0000000
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (C) 2009  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#ifndef _ASM_MACH_POWERTV_ASIC_H
-#define _ASM_MACH_POWERTV_ASIC_H
-
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <asm/mach-powertv/asic_regs.h>
-
-#define DVR_CAPABLE    (1<<0)
-#define PCIE_CAPABLE   (1<<1)
-#define FFS_CAPABLE    (1<<2)
-#define DISPLAY_CAPABLE (1<<3)
-
-/* Platform Family types
- * For compitability, the new value must be added in the end */
-enum family_type {
-       FAMILY_8500,
-       FAMILY_8500RNG,
-       FAMILY_4500,
-       FAMILY_1500,
-       FAMILY_8600,
-       FAMILY_4600,
-       FAMILY_4600VZA,
-       FAMILY_8600VZB,
-       FAMILY_1500VZE,
-       FAMILY_1500VZF,
-       FAMILY_8700,
-       FAMILIES
-};
-
-/* Register maps for each ASIC */
-extern const struct register_map calliope_register_map;
-extern const struct register_map cronus_register_map;
-extern const struct register_map gaia_register_map;
-extern const struct register_map zeus_register_map;
-
-extern struct resource dvr_cronus_resources[];
-extern struct resource dvr_gaia_resources[];
-extern struct resource dvr_zeus_resources[];
-extern struct resource non_dvr_calliope_resources[];
-extern struct resource non_dvr_cronus_resources[];
-extern struct resource non_dvr_cronuslite_resources[];
-extern struct resource non_dvr_gaia_resources[];
-extern struct resource non_dvr_vz_calliope_resources[];
-extern struct resource non_dvr_vze_calliope_resources[];
-extern struct resource non_dvr_vzf_calliope_resources[];
-extern struct resource non_dvr_zeus_resources[];
-
-extern void powertv_platform_init(void);
-extern void platform_alloc_bootmem(void);
-extern enum asic_type platform_get_asic(void);
-extern enum family_type platform_get_family(void);
-extern int platform_supports_dvr(void);
-extern int platform_supports_ffs(void);
-extern int platform_supports_pcie(void);
-extern int platform_supports_display(void);
-extern void configure_platform(void);
-
-/* Platform Resources */
-#define ASIC_RESOURCE_GET_EXISTS 1
-extern struct resource *asic_resource_get(const char *name);
-extern void platform_release_memory(void *baddr, int size);
-
-/* USB configuration */
-struct usb_hcd;                        /* Forward reference */
-extern void platform_configure_usb_ehci(void);
-extern void platform_unconfigure_usb_ehci(void);
-extern void platform_configure_usb_ohci(void);
-extern void platform_unconfigure_usb_ohci(void);
-
-/* Resource for ASIC registers */
-extern struct resource asic_resource;
-extern int platform_usb_devices_init(struct platform_device **echi_dev,
-       struct platform_device **ohci_dev);
-
-/* Reboot Cause */
-extern void set_reboot_cause(char code, unsigned int data, unsigned int data2);
-extern void set_locked_reboot_cause(char code, unsigned int data,
-       unsigned int data2);
-
-enum sys_reboot_type {
-       sys_unknown_reboot = 0x00,      /* Unknown reboot cause */
-       sys_davic_change = 0x01,        /* Reboot due to change in DAVIC
-                                        * mode */
-       sys_user_reboot = 0x02,         /* Reboot initiated by user */
-       sys_system_reboot = 0x03,       /* Reboot initiated by OS */
-       sys_trap_reboot = 0x04,         /* Reboot due to a CPU trap */
-       sys_silent_reboot = 0x05,       /* Silent reboot */
-       sys_boot_ldr_reboot = 0x06,     /* Bootloader reboot */
-       sys_power_up_reboot = 0x07,     /* Power on bootup.  Older
-                                        * drivers may report as
-                                        * userReboot. */
-       sys_code_change = 0x08,         /* Reboot to take code change.
-                                        * Older drivers may report as
-                                        * userReboot. */
-       sys_hardware_reset = 0x09,      /* HW watchdog or front-panel
-                                        * reset button reset.  Older
-                                        * drivers may report as
-                                        * userReboot. */
-       sys_watchdogInterrupt = 0x0A    /* Pre-watchdog interrupt */
-};
-
-#endif /* _ASM_MACH_POWERTV_ASIC_H */
diff --git a/arch/mips/include/asm/mach-powertv/asic_reg_map.h b/arch/mips/include/asm/mach-powertv/asic_reg_map.h
deleted file mode 100644 (file)
index 20348e8..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- *                             asic_reg_map.h
- *
- * A macro-enclosed list of the elements for the register_map structure for
- * use in defining and manipulating the structure.
- *
- * Copyright (C) 2009  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-REGISTER_MAP_ELEMENT(eic_slow0_strt_add)
-REGISTER_MAP_ELEMENT(eic_cfg_bits)
-REGISTER_MAP_ELEMENT(eic_ready_status)
-REGISTER_MAP_ELEMENT(chipver3)
-REGISTER_MAP_ELEMENT(chipver2)
-REGISTER_MAP_ELEMENT(chipver1)
-REGISTER_MAP_ELEMENT(chipver0)
-REGISTER_MAP_ELEMENT(uart1_intstat)
-REGISTER_MAP_ELEMENT(uart1_inten)
-REGISTER_MAP_ELEMENT(uart1_config1)
-REGISTER_MAP_ELEMENT(uart1_config2)
-REGISTER_MAP_ELEMENT(uart1_divisorhi)
-REGISTER_MAP_ELEMENT(uart1_divisorlo)
-REGISTER_MAP_ELEMENT(uart1_data)
-REGISTER_MAP_ELEMENT(uart1_status)
-REGISTER_MAP_ELEMENT(int_stat_3)
-REGISTER_MAP_ELEMENT(int_stat_2)
-REGISTER_MAP_ELEMENT(int_stat_1)
-REGISTER_MAP_ELEMENT(int_stat_0)
-REGISTER_MAP_ELEMENT(int_config)
-REGISTER_MAP_ELEMENT(int_int_scan)
-REGISTER_MAP_ELEMENT(ien_int_3)
-REGISTER_MAP_ELEMENT(ien_int_2)
-REGISTER_MAP_ELEMENT(ien_int_1)
-REGISTER_MAP_ELEMENT(ien_int_0)
-REGISTER_MAP_ELEMENT(int_level_3_3)
-REGISTER_MAP_ELEMENT(int_level_3_2)
-REGISTER_MAP_ELEMENT(int_level_3_1)
-REGISTER_MAP_ELEMENT(int_level_3_0)
-REGISTER_MAP_ELEMENT(int_level_2_3)
-REGISTER_MAP_ELEMENT(int_level_2_2)
-REGISTER_MAP_ELEMENT(int_level_2_1)
-REGISTER_MAP_ELEMENT(int_level_2_0)
-REGISTER_MAP_ELEMENT(int_level_1_3)
-REGISTER_MAP_ELEMENT(int_level_1_2)
-REGISTER_MAP_ELEMENT(int_level_1_1)
-REGISTER_MAP_ELEMENT(int_level_1_0)
-REGISTER_MAP_ELEMENT(int_level_0_3)
-REGISTER_MAP_ELEMENT(int_level_0_2)
-REGISTER_MAP_ELEMENT(int_level_0_1)
-REGISTER_MAP_ELEMENT(int_level_0_0)
-REGISTER_MAP_ELEMENT(int_docsis_en)
-REGISTER_MAP_ELEMENT(mips_pll_setup)
-REGISTER_MAP_ELEMENT(fs432x4b4_usb_ctl)
-REGISTER_MAP_ELEMENT(test_bus)
-REGISTER_MAP_ELEMENT(crt_spare)
-REGISTER_MAP_ELEMENT(usb2_ohci_int_mask)
-REGISTER_MAP_ELEMENT(usb2_strap)
-REGISTER_MAP_ELEMENT(ehci_hcapbase)
-REGISTER_MAP_ELEMENT(ohci_hc_revision)
-REGISTER_MAP_ELEMENT(bcm1_bs_lmi_steer)
-REGISTER_MAP_ELEMENT(usb2_control)
-REGISTER_MAP_ELEMENT(usb2_stbus_obc)
-REGISTER_MAP_ELEMENT(usb2_stbus_mess_size)
-REGISTER_MAP_ELEMENT(usb2_stbus_chunk_size)
-REGISTER_MAP_ELEMENT(pcie_regs)
-REGISTER_MAP_ELEMENT(tim_ch)
-REGISTER_MAP_ELEMENT(tim_cl)
-REGISTER_MAP_ELEMENT(gpio_dout)
-REGISTER_MAP_ELEMENT(gpio_din)
-REGISTER_MAP_ELEMENT(gpio_dir)
-REGISTER_MAP_ELEMENT(watchdog)
-REGISTER_MAP_ELEMENT(front_panel)
-REGISTER_MAP_ELEMENT(misc_clk_ctl1)
-REGISTER_MAP_ELEMENT(misc_clk_ctl2)
-REGISTER_MAP_ELEMENT(crt_ext_ctl)
-REGISTER_MAP_ELEMENT(register_maps)
diff --git a/arch/mips/include/asm/mach-powertv/asic_regs.h b/arch/mips/include/asm/mach-powertv/asic_regs.h
deleted file mode 100644 (file)
index 06712ab..0000000
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Copyright (C) 2009  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#ifndef __ASM_MACH_POWERTV_ASIC_H_
-#define __ASM_MACH_POWERTV_ASIC_H_
-#include <linux/io.h>
-
-/* ASIC types */
-enum asic_type {
-       ASIC_UNKNOWN,
-       ASIC_ZEUS,
-       ASIC_CALLIOPE,
-       ASIC_CRONUS,
-       ASIC_CRONUSLITE,
-       ASIC_GAIA,
-       ASICS                   /* Number of supported ASICs */
-};
-
-/* hardcoded values read from Chip Version registers */
-#define CRONUS_10      0x0B4C1C20
-#define CRONUS_11      0x0B4C1C21
-#define CRONUSLITE_10  0x0B4C1C40
-
-#define NAND_FLASH_BASE                0x03000000
-#define CALLIOPE_IO_BASE       0x08000000
-#define GAIA_IO_BASE           0x09000000
-#define CRONUS_IO_BASE         0x09000000
-#define ZEUS_IO_BASE           0x09000000
-
-#define ASIC_IO_SIZE           0x01000000
-
-/* Definitions for backward compatibility */
-#define UART1_INTSTAT  uart1_intstat
-#define UART1_INTEN    uart1_inten
-#define UART1_CONFIG1  uart1_config1
-#define UART1_CONFIG2  uart1_config2
-#define UART1_DIVISORHI uart1_divisorhi
-#define UART1_DIVISORLO uart1_divisorlo
-#define UART1_DATA     uart1_data
-#define UART1_STATUS   uart1_status
-
-/* ASIC register enumeration */
-union register_map_entry {
-       unsigned long phys;
-       u32 *virt;
-};
-
-#define REGISTER_MAP_ELEMENT(x) union register_map_entry x;
-struct register_map {
-#include <asm/mach-powertv/asic_reg_map.h>
-};
-#undef REGISTER_MAP_ELEMENT
-
-/**
- * register_map_offset_phys - add an offset to the physical address
- * @map:       Pointer to the &struct register_map
- * @offset:    Value to add
- *
- * Only adds the base to non-zero physical addresses
- */
-static inline void register_map_offset_phys(struct register_map *map,
-       unsigned long offset)
-{
-#define REGISTER_MAP_ELEMENT(x)                do {                            \
-               if (map->x.phys != 0)                                   \
-                       map->x.phys += offset;                          \
-       } while (false);
-
-#include <asm/mach-powertv/asic_reg_map.h>
-#undef REGISTER_MAP_ELEMENT
-}
-
-/**
- * register_map_virtualize - Convert &register_map to virtual addresses
- * @map:       Pointer to &register_map to virtualize
- */
-static inline void register_map_virtualize(struct register_map *map)
-{
-#define REGISTER_MAP_ELEMENT(x)                do {                            \
-               map->x.virt = (!map->x.phys) ? NULL :                   \
-                       UNCAC_ADDR(phys_to_virt(map->x.phys));          \
-       } while (false);
-
-#include <asm/mach-powertv/asic_reg_map.h>
-#undef REGISTER_MAP_ELEMENT
-}
-
-extern struct register_map _asic_register_map;
-extern unsigned long asic_phy_base;
-
-/*
- * Macros to interface to registers through their ioremapped address
- * asic_reg_phys_addr  Returns the physical address of the given register
- * asic_reg_addr       Returns the iomapped virtual address of the given
- *                     register.
- */
-#define asic_reg_addr(x)       (_asic_register_map.x.virt)
-#define asic_reg_phys_addr(x)  (virt_to_phys((void *) CAC_ADDR(        \
-                                       (unsigned long) asic_reg_addr(x))))
-
-/*
- * The asic_reg macro is gone. It should be replaced by either asic_read or
- * asic_write, as appropriate.
- */
-
-#define asic_read(x)           readl(asic_reg_addr(x))
-#define asic_write(v, x)       writel(v, asic_reg_addr(x))
-
-extern void asic_irq_init(void);
-#endif
diff --git a/arch/mips/include/asm/mach-powertv/cpu-feature-overrides.h b/arch/mips/include/asm/mach-powertv/cpu-feature-overrides.h
deleted file mode 100644 (file)
index 58c76ec..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) 2010  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#ifndef _ASM_MACH_POWERTV_CPU_FEATURE_OVERRIDES_H_
-#define _ASM_MACH_POWERTV_CPU_FEATURE_OVERRIDES_H_
-#define cpu_has_tlb                    1
-#define cpu_has_4kex                   1
-#define cpu_has_3k_cache               0
-#define cpu_has_4k_cache               1
-#define cpu_has_tx39_cache             0
-#define cpu_has_fpu                    0
-#define cpu_has_counter                        1
-#define cpu_has_watch                  1
-#define cpu_has_divec                  1
-#define cpu_has_vce                    0
-#define cpu_has_cache_cdex_p           0
-#define cpu_has_cache_cdex_s           0
-#define cpu_has_mcheck                 1
-#define cpu_has_ejtag                  1
-#define cpu_has_llsc                   1
-#define cpu_has_mips16                 0
-#define cpu_has_mdmx                   0
-#define cpu_has_mips3d                 0
-#define cpu_has_smartmips              0
-#define cpu_has_vtag_icache            0
-#define cpu_has_dc_aliases             0
-#define cpu_has_ic_fills_f_dc          0
-#define cpu_has_mips32r1               0
-#define cpu_has_mips32r2               1
-#define cpu_has_mips64r1               0
-#define cpu_has_mips64r2               0
-#define cpu_has_dsp                    0
-#define cpu_has_dsp2                   0
-#define cpu_has_mipsmt                 0
-#define cpu_has_userlocal              0
-#define cpu_has_nofpuex                        0
-#define cpu_has_64bits                 0
-#define cpu_has_64bit_zero_reg         0
-#define cpu_has_vint                   1
-#define cpu_has_veic                   1
-#define cpu_has_inclusive_pcaches      0
-
-#define cpu_dcache_line_size()         32
-#define cpu_icache_line_size()         32
-#endif
diff --git a/arch/mips/include/asm/mach-powertv/dma-coherence.h b/arch/mips/include/asm/mach-powertv/dma-coherence.h
deleted file mode 100644 (file)
index f831672..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Version from mach-generic modified to support PowerTV port
- * Portions Copyright (C) 2009 Cisco Systems, Inc.
- * Copyright (C) 2006  Ralf Baechle <ralf@linux-mips.org>
- *
- */
-
-#ifndef __ASM_MACH_POWERTV_DMA_COHERENCE_H
-#define __ASM_MACH_POWERTV_DMA_COHERENCE_H
-
-#include <linux/sched.h>
-#include <linux/device.h>
-#include <asm/mach-powertv/asic.h>
-
-static inline bool is_kseg2(void *addr)
-{
-       return (unsigned long)addr >= KSEG2;
-}
-
-static inline unsigned long virt_to_phys_from_pte(void *addr)
-{
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *ptep, pte;
-
-       unsigned long virt_addr = (unsigned long)addr;
-       unsigned long phys_addr = 0UL;
-
-       /* get the page global directory. */
-       pgd = pgd_offset_k(virt_addr);
-
-       if (!pgd_none(*pgd)) {
-               /* get the page upper directory */
-               pud = pud_offset(pgd, virt_addr);
-               if (!pud_none(*pud)) {
-                       /* get the page middle directory */
-                       pmd = pmd_offset(pud, virt_addr);
-                       if (!pmd_none(*pmd)) {
-                               /* get a pointer to the page table entry */
-                               ptep = pte_offset(pmd, virt_addr);
-                               pte = *ptep;
-                               /* check for a valid page */
-                               if (pte_present(pte)) {
-                                       /* get the physical address the page is
-                                        * referring to */
-                                       phys_addr = (unsigned long)
-                                               page_to_phys(pte_page(pte));
-                                       /* add the offset within the page */
-                                       phys_addr |= (virt_addr & ~PAGE_MASK);
-                               }
-                       }
-               }
-       }
-
-       return phys_addr;
-}
-
-static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
-       size_t size)
-{
-       if (is_kseg2(addr))
-               return phys_to_dma(virt_to_phys_from_pte(addr));
-       else
-               return phys_to_dma(virt_to_phys(addr));
-}
-
-static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
-       struct page *page)
-{
-       return phys_to_dma(page_to_phys(page));
-}
-
-static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
-       dma_addr_t dma_addr)
-{
-       return dma_to_phys(dma_addr);
-}
-
-static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
-       size_t size, enum dma_data_direction direction)
-{
-}
-
-static inline int plat_dma_supported(struct device *dev, u64 mask)
-{
-       /*
-        * we fall back to GFP_DMA when the mask isn't all 1s,
-        * so we can't guarantee allocations that must be
-        * within a tighter range than GFP_DMA..
-        */
-       if (mask < DMA_BIT_MASK(24))
-               return 0;
-
-       return 1;
-}
-
-static inline void plat_extra_sync_for_device(struct device *dev)
-{
-}
-
-static inline int plat_dma_mapping_error(struct device *dev,
-                                        dma_addr_t dma_addr)
-{
-       return 0;
-}
-
-static inline int plat_device_is_coherent(struct device *dev)
-{
-       return 0;
-}
-
-#endif /* __ASM_MACH_POWERTV_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-powertv/interrupts.h b/arch/mips/include/asm/mach-powertv/interrupts.h
deleted file mode 100644 (file)
index 6c463be..0000000
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * Copyright (C) 2009  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#ifndef _ASM_MACH_POWERTV_INTERRUPTS_H_
-#define _ASM_MACH_POWERTV_INTERRUPTS_H_
-
-/*
- * Defines for all of the interrupt lines
- */
-
-/* Definitions for backward compatibility */
-#define kIrq_Uart1             irq_uart1
-
-#define ibase 0
-
-/*------------- Register: int_stat_3 */
-/* 126 unused (bit 31) */
-#define irq_asc2video          (ibase+126)     /* ASC 2 Video Interrupt */
-#define irq_asc1video          (ibase+125)     /* ASC 1 Video Interrupt */
-#define irq_comms_block_wd     (ibase+124)     /* ASC 1 Video Interrupt */
-#define irq_fdma_mailbox       (ibase+123)     /* FDMA Mailbox Output */
-#define irq_fdma_gp            (ibase+122)     /* FDMA GP Output */
-#define irq_mips_pic           (ibase+121)     /* MIPS Performance Counter
-                                                * Interrupt */
-#define irq_mips_timer         (ibase+120)     /* MIPS Timer Interrupt */
-#define irq_memory_protect     (ibase+119)     /* Memory Protection Interrupt
-                                                * -- Ored by glue logic inside
-                                                *  SPARC ILC (see
-                                                *  INT_MEM_PROT_STAT, below,
-                                                *  for individual interrupts)
-                                                */
-/* 118 unused (bit 22) */
-#define irq_sbag               (ibase+117)     /* SBAG Interrupt -- Ored by
-                                                * glue logic inside SPARC ILC
-                                                * (see INT_SBAG_STAT, below,
-                                                * for individual interrupts) */
-#define irq_qam_b_fec          (ibase+116)     /* QAM  B FEC Interrupt */
-#define irq_qam_a_fec          (ibase+115)     /* QAM A FEC Interrupt */
-/* 114 unused  (bit 18) */
-#define irq_mailbox            (ibase+113)     /* Mailbox Debug Interrupt  --
-                                                * Ored by glue logic inside
-                                                * SPARC ILC (see
-                                                * INT_MAILBOX_STAT, below, for
-                                                * individual interrupts) */
-#define irq_fuse_stat1         (ibase+112)     /* Fuse Status 1 */
-#define irq_fuse_stat2         (ibase+111)     /* Fuse Status 2 */
-#define irq_fuse_stat3         (ibase+110)     /* Blitter Interrupt / Fuse
-                                                * Status 3 */
-#define irq_blitter            (ibase+110)     /* Blitter Interrupt / Fuse
-                                                * Status 3 */
-#define irq_avc1_pp0           (ibase+109)     /* AVC Decoder #1 PP0
-                                                * Interrupt */
-#define irq_avc1_pp1           (ibase+108)     /* AVC Decoder #1 PP1
-                                                * Interrupt */
-#define irq_avc1_mbe           (ibase+107)     /* AVC Decoder #1 MBE
-                                                * Interrupt */
-#define irq_avc2_pp0           (ibase+106)     /* AVC Decoder #2 PP0
-                                                * Interrupt */
-#define irq_avc2_pp1           (ibase+105)     /* AVC Decoder #2 PP1
-                                                * Interrupt */
-#define irq_avc2_mbe           (ibase+104)     /* AVC Decoder #2 MBE
-                                                * Interrupt */
-#define irq_zbug_spi           (ibase+103)     /* Zbug SPI Slave Interrupt */
-#define irq_qam_mod2           (ibase+102)     /* QAM Modulator 2 DMA
-                                                * Interrupt */
-#define irq_ir_rx              (ibase+101)     /* IR RX 2 Interrupt */
-#define irq_aud_dsp2           (ibase+100)     /* Audio DSP #2 Interrupt */
-#define irq_aud_dsp1           (ibase+99)      /* Audio DSP #1 Interrupt */
-#define irq_docsis             (ibase+98)      /* DOCSIS Debug Interrupt */
-#define irq_sd_dvp1            (ibase+97)      /* SD DVP #1 Interrupt */
-#define irq_sd_dvp2            (ibase+96)      /* SD DVP #2 Interrupt */
-/*------------- Register: int_stat_2 */
-#define irq_hd_dvp             (ibase+95)      /* HD DVP Interrupt */
-#define kIrq_Prewatchdog       (ibase+94)      /* watchdog Pre-Interrupt */
-#define irq_timer2             (ibase+93)      /* Programmable Timer
-                                                * Interrupt 2 */
-#define irq_1394               (ibase+92)      /* 1394 Firewire Interrupt */
-#define irq_usbohci            (ibase+91)      /* USB 2.0 OHCI Interrupt */
-#define irq_usbehci            (ibase+90)      /* USB 2.0 EHCI Interrupt */
-#define irq_pciexp             (ibase+89)      /* PCI Express 0 Interrupt */
-#define irq_pciexp0            (ibase+89)      /* PCI Express 0 Interrupt */
-#define irq_afe1               (ibase+88)      /* AFE 1 Interrupt */
-#define irq_sata               (ibase+87)      /* SATA 1 Interrupt */
-#define irq_sata1              (ibase+87)      /* SATA 1 Interrupt */
-#define irq_dtcp               (ibase+86)      /* DTCP Interrupt */
-#define irq_pciexp1            (ibase+85)      /* PCI Express 1 Interrupt */
-/* 84 unused   (bit 20) */
-/* 83 unused   (bit 19) */
-/* 82 unused   (bit 18) */
-#define irq_sata2              (ibase+81)      /* SATA2 Interrupt */
-#define irq_uart2              (ibase+80)      /* UART2 Interrupt */
-#define irq_legacy_usb         (ibase+79)      /* Legacy USB Host ISR (1.1
-                                                * Host module) */
-#define irq_pod                        (ibase+78)      /* POD Interrupt */
-#define irq_slave_usb          (ibase+77)      /* Slave USB */
-#define irq_denc1              (ibase+76)      /* DENC #1 VTG Interrupt */
-#define irq_vbi_vtg            (ibase+75)      /* VBI VTG Interrupt */
-#define irq_afe2               (ibase+74)      /* AFE 2 Interrupt */
-#define irq_denc2              (ibase+73)      /* DENC #2 VTG Interrupt */
-#define irq_asc2               (ibase+72)      /* ASC #2 Interrupt */
-#define irq_asc1               (ibase+71)      /* ASC #1 Interrupt */
-#define irq_mod_dma            (ibase+70)      /* Modulator DMA Interrupt */
-#define irq_byte_eng1          (ibase+69)      /* Byte Engine Interrupt [1] */
-#define irq_byte_eng0          (ibase+68)      /* Byte Engine Interrupt [0] */
-/* 67 unused   (bit 03) */
-/* 66 unused   (bit 02) */
-/* 65 unused   (bit 01) */
-/* 64 unused   (bit 00) */
-/*------------- Register: int_stat_1 */
-/* 63 unused   (bit 31) */
-/* 62 unused   (bit 30) */
-/* 61 unused   (bit 29) */
-/* 60 unused   (bit 28) */
-/* 59 unused   (bit 27) */
-/* 58 unused   (bit 26) */
-/* 57 unused   (bit 25) */
-/* 56 unused   (bit 24) */
-#define irq_buf_dma_mem2mem    (ibase+55)      /* BufDMA Memory to Memory
-                                                * Interrupt */
-#define irq_buf_dma_usbtransmit (ibase+54)     /* BufDMA USB Transmit
-                                                * Interrupt */
-#define irq_buf_dma_qpskpodtransmit (ibase+53) /* BufDMA QPSK/POD Tramsit
-                                                * Interrupt */
-#define irq_buf_dma_transmit_error (ibase+52)  /* BufDMA Transmit Error
-                                                * Interrupt */
-#define irq_buf_dma_usbrecv    (ibase+51)      /* BufDMA USB Receive
-                                                * Interrupt */
-#define irq_buf_dma_qpskpodrecv (ibase+50)     /* BufDMA QPSK/POD Receive
-                                                * Interrupt */
-#define irq_buf_dma_recv_error (ibase+49)      /* BufDMA Receive Error
-                                                * Interrupt */
-#define irq_qamdma_transmit_play (ibase+48)    /* QAMDMA Transmit/Play
-                                                * Interrupt */
-#define irq_qamdma_transmit_error (ibase+47)   /* QAMDMA Transmit Error
-                                                * Interrupt */
-#define irq_qamdma_recv2high   (ibase+46)      /* QAMDMA Receive 2 High
-                                                * (Chans 63-32) */
-#define irq_qamdma_recv2low    (ibase+45)      /* QAMDMA Receive 2 Low
-                                                * (Chans 31-0) */
-#define irq_qamdma_recv1high   (ibase+44)      /* QAMDMA Receive 1 High
-                                                * (Chans 63-32) */
-#define irq_qamdma_recv1low    (ibase+43)      /* QAMDMA Receive 1 Low
-                                                * (Chans 31-0) */
-#define irq_qamdma_recv_error  (ibase+42)      /* QAMDMA Receive Error
-                                                * Interrupt */
-#define irq_mpegsplice         (ibase+41)      /* MPEG Splice Interrupt */
-#define irq_deinterlace_rdy    (ibase+40)      /* Deinterlacer Frame Ready
-                                                * Interrupt */
-#define irq_ext_in0            (ibase+39)      /* External Interrupt irq_in0 */
-#define irq_gpio3              (ibase+38)      /* GP I/O IRQ 3 - From GP I/O
-                                                * Module */
-#define irq_gpio2              (ibase+37)      /* GP I/O IRQ 2 - From GP I/O
-                                                * Module (ABE_intN) */
-#define irq_pcrcmplt1          (ibase+36)      /* PCR Capture Complete  or
-                                                * Discontinuity 1 */
-#define irq_pcrcmplt2          (ibase+35)      /* PCR Capture Complete or
-                                                * Discontinuity 2 */
-#define irq_parse_peierr       (ibase+34)      /* PID Parser Error Detect
-                                                * (PEI) */
-#define irq_parse_cont_err     (ibase+33)      /* PID Parser continuity error
-                                                * detect */
-#define irq_ds1framer          (ibase+32)      /* DS1 Framer Interrupt */
-/*------------- Register: int_stat_0 */
-#define irq_gpio1              (ibase+31)      /* GP I/O IRQ 1 - From GP I/O
-                                                * Module */
-#define irq_gpio0              (ibase+30)      /* GP I/O IRQ 0 - From GP I/O
-                                                * Module */
-#define irq_qpsk_out_aloha     (ibase+29)      /* QPSK Output Slotted Aloha
-                                                * (chan 3) Transmission
-                                                * Completed OK */
-#define irq_qpsk_out_tdma      (ibase+28)      /* QPSK Output TDMA (chan 2)
-                                                * Transmission Completed OK */
-#define irq_qpsk_out_reserve   (ibase+27)      /* QPSK Output Reservation
-                                                * (chan 1) Transmission
-                                                * Completed OK */
-#define irq_qpsk_out_aloha_err (ibase+26)      /* QPSK Output Slotted Aloha
-                                                * (chan 3)Transmission
-                                                * completed with Errors. */
-#define irq_qpsk_out_tdma_err  (ibase+25)      /* QPSK Output TDMA (chan 2)
-                                                * Transmission completed with
-                                                * Errors. */
-#define irq_qpsk_out_rsrv_err  (ibase+24)      /* QPSK Output Reservation
-                                                * (chan 1) Transmission
-                                                * completed with Errors */
-#define irq_aloha_fail         (ibase+23)      /* Unsuccessful Resend of Aloha
-                                                * for N times. Aloha retry
-                                                * timeout for channel 3. */
-#define irq_timer1             (ibase+22)      /* Programmable Timer
-                                                * Interrupt */
-#define irq_keyboard           (ibase+21)      /* Keyboard Module Interrupt */
-#define irq_i2c                        (ibase+20)      /* I2C Module Interrupt */
-#define irq_spi                        (ibase+19)      /* SPI Module Interrupt */
-#define irq_irblaster          (ibase+18)      /* IR Blaster Interrupt */
-#define irq_splice_detect      (ibase+17)      /* PID Key Change Interrupt or
-                                                * Splice Detect Interrupt */
-#define irq_se_micro           (ibase+16)      /* Secure Micro I/F Module
-                                                * Interrupt */
-#define irq_uart1              (ibase+15)      /* UART Interrupt */
-#define irq_irrecv             (ibase+14)      /* IR Receiver Interrupt */
-#define irq_host_int1          (ibase+13)      /* Host-to-Host Interrupt 1 */
-#define irq_host_int0          (ibase+12)      /* Host-to-Host Interrupt 0 */
-#define irq_qpsk_hecerr                (ibase+11)      /* QPSK HEC Error Interrupt */
-#define irq_qpsk_crcerr                (ibase+10)      /* QPSK AAL-5 CRC Error
-                                                * Interrupt */
-/* 9 unused    (bit 09) */
-/* 8 unused    (bit 08) */
-#define irq_psicrcerr          (ibase+7)       /* QAM PSI CRC Error
-                                                * Interrupt */
-#define irq_psilength_err      (ibase+6)       /* QAM PSI Length Error
-                                                * Interrupt */
-#define irq_esfforward         (ibase+5)       /* ESF Interrupt Mark From
-                                                * Forward Path Reference -
-                                                * every 3ms when forward Mbits
-                                                * and forward slot control
-                                                * bytes are updated. */
-#define irq_esfreverse         (ibase+4)       /* ESF Interrupt Mark from
-                                                * Reverse Path Reference -
-                                                * delayed from forward mark by
-                                                * the ranging delay plus a
-                                                * fixed amount. When reverse
-                                                * Mbits and reverse slot
-                                                * control bytes are updated.
-                                                * Occurs every 3ms for 3.0M and
-                                                * 1.554 M upstream rates and
-                                                * every 6 ms for 256K upstream
-                                                * rate. */
-#define irq_aloha_timeout      (ibase+3)       /* Slotted-Aloha timeout on
-                                                * Channel 1. */
-#define irq_reservation                (ibase+2)       /* Partial (or Incremental)
-                                                * Reservation Message Completed
-                                                * or Slotted aloha verify for
-                                                * channel 1. */
-#define irq_aloha3             (ibase+1)       /* Slotted-Aloha Message Verify
-                                                * Interrupt or Reservation
-                                                * increment completed for
-                                                * channel 3. */
-#define irq_mpeg_d             (ibase+0)       /* MPEG Decoder Interrupt */
-#endif /* _ASM_MACH_POWERTV_INTERRUPTS_H_ */
diff --git a/arch/mips/include/asm/mach-powertv/ioremap.h b/arch/mips/include/asm/mach-powertv/ioremap.h
deleted file mode 100644 (file)
index c86ef09..0000000
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License
- *     as published by the Free Software Foundation; either version
- *     2 of the License, or (at your option) any later version.
- *
- * Portions Copyright (C)  Cisco Systems, Inc.
- */
-#ifndef __ASM_MACH_POWERTV_IOREMAP_H
-#define __ASM_MACH_POWERTV_IOREMAP_H
-
-#include <linux/types.h>
-#include <linux/log2.h>
-#include <linux/compiler.h>
-
-#include <asm/pgtable-bits.h>
-#include <asm/addrspace.h>
-
-/* We're going to mess with bits, so get sizes */
-#define IOR_BPC                        8                       /* Bits per char */
-#define IOR_PHYS_BITS          (IOR_BPC * sizeof(phys_addr_t))
-#define IOR_DMA_BITS           (IOR_BPC * sizeof(dma_addr_t))
-
-/*
- * Define the granularity of physical/DMA mapping in terms of the number
- * of bits that defines the offset within a grain. These will be the
- * least significant bits of the address. The rest of a physical or DMA
- * address will be used to index into an appropriate table to find the
- * offset to add to the address to yield the corresponding DMA or physical
- * address, respectively.
- */
-#define IOR_LSBITS             22                      /* Bits in a grain */
-
-/*
- * Compute the number of most significant address bits after removing those
- * used for the offset within a grain and then compute the number of table
- * entries for the conversion.
- */
-#define IOR_PHYS_MSBITS                (IOR_PHYS_BITS - IOR_LSBITS)
-#define IOR_NUM_PHYS_TO_DMA    ((phys_addr_t) 1 << IOR_PHYS_MSBITS)
-
-#define IOR_DMA_MSBITS         (IOR_DMA_BITS - IOR_LSBITS)
-#define IOR_NUM_DMA_TO_PHYS    ((dma_addr_t) 1 << IOR_DMA_MSBITS)
-
-/*
- * Define data structures used as elements in the arrays for the conversion
- * between physical and DMA addresses. We do some slightly fancy math to
- * compute the width of the offset element of the conversion tables so
- * that we can have the smallest conversion tables. Next, round up the
- * sizes to the next higher power of two, i.e. the offset element will have
- * 8, 16, 32, 64, etc. bits. This eliminates the need to mask off any
- * bits.  Finally, we compute a shift value that puts the most significant
- * bits of the offset into the most significant bits of the offset element.
- * This makes it more efficient on processors without barrel shifters and
- * easier to see the values if the conversion table is dumped in binary.
- */
-#define _IOR_OFFSET_WIDTH(n)   (1 << order_base_2(n))
-#define IOR_OFFSET_WIDTH(n) \
-       (_IOR_OFFSET_WIDTH(n) < 8 ? 8 : _IOR_OFFSET_WIDTH(n))
-
-#define IOR_PHYS_OFFSET_BITS   IOR_OFFSET_WIDTH(IOR_PHYS_MSBITS)
-#define IOR_PHYS_SHIFT         (IOR_PHYS_BITS - IOR_PHYS_OFFSET_BITS)
-
-#define IOR_DMA_OFFSET_BITS    IOR_OFFSET_WIDTH(IOR_DMA_MSBITS)
-#define IOR_DMA_SHIFT          (IOR_DMA_BITS - IOR_DMA_OFFSET_BITS)
-
-struct ior_phys_to_dma {
-       dma_addr_t offset:IOR_DMA_OFFSET_BITS __packed
-               __aligned((IOR_DMA_OFFSET_BITS / IOR_BPC));
-};
-
-struct ior_dma_to_phys {
-       dma_addr_t offset:IOR_PHYS_OFFSET_BITS __packed
-               __aligned((IOR_PHYS_OFFSET_BITS / IOR_BPC));
-};
-
-extern struct ior_phys_to_dma _ior_phys_to_dma[IOR_NUM_PHYS_TO_DMA];
-extern struct ior_dma_to_phys _ior_dma_to_phys[IOR_NUM_DMA_TO_PHYS];
-
-static inline dma_addr_t _phys_to_dma_offset_raw(phys_addr_t phys)
-{
-       return (dma_addr_t)_ior_phys_to_dma[phys >> IOR_LSBITS].offset;
-}
-
-static inline dma_addr_t _dma_to_phys_offset_raw(dma_addr_t dma)
-{
-       return (dma_addr_t)_ior_dma_to_phys[dma >> IOR_LSBITS].offset;
-}
-
-/* These are not portable and should not be used in drivers. Drivers should
- * be using ioremap() and friends to map physical addresses to virtual
- * addresses and dma_map*() and friends to map virtual addresses into DMA
- * addresses and back.
- */
-static inline dma_addr_t phys_to_dma(phys_addr_t phys)
-{
-       return phys + (_phys_to_dma_offset_raw(phys) << IOR_PHYS_SHIFT);
-}
-
-static inline phys_addr_t dma_to_phys(dma_addr_t dma)
-{
-       return dma + (_dma_to_phys_offset_raw(dma) << IOR_DMA_SHIFT);
-}
-
-extern void ioremap_add_map(dma_addr_t phys, phys_addr_t alias,
-       dma_addr_t size);
-
-/*
- * Allow physical addresses to be fixed up to help peripherals located
- * outside the low 32-bit range -- generic pass-through version.
- */
-static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size)
-{
-       return phys_addr;
-}
-
-/*
- * Handle the special case of addresses the area aliased into the first
- * 512 MiB of the processor's physical address space. These turn into either
- * kseg0 or kseg1 addresses, depending on flags.
- */
-static inline void __iomem *plat_ioremap(phys_t start, unsigned long size,
-       unsigned long flags)
-{
-       phys_addr_t start_offset;
-       void __iomem *result = NULL;
-
-       /* Start by checking to see whether this is an aliased address */
-       start_offset = _dma_to_phys_offset_raw(start);
-
-       /*
-        * If:
-        * o    the memory is aliased into the first 512 MiB, and
-        * o    the start and end are in the same RAM bank, and
-        * o    we don't have a zero size or wrap around, and
-        * o    we are supposed to create an uncached mapping,
-        *      handle this is a kseg0 or kseg1 address
-        */
-       if (start_offset != 0) {
-               phys_addr_t last;
-               dma_addr_t dma_to_phys_offset;
-
-               last = start + size - 1;
-               dma_to_phys_offset =
-                       _dma_to_phys_offset_raw(last) << IOR_DMA_SHIFT;
-
-               if (dma_to_phys_offset == start_offset &&
-                       size != 0 && start <= last) {
-                       phys_t adjusted_start;
-                       adjusted_start = start + start_offset;
-                       if (flags == _CACHE_UNCACHED)
-                               result = (void __iomem *) (unsigned long)
-                                       CKSEG1ADDR(adjusted_start);
-                       else
-                               result = (void __iomem *) (unsigned long)
-                                       CKSEG0ADDR(adjusted_start);
-               }
-       }
-
-       return result;
-}
-
-static inline int plat_iounmap(const volatile void __iomem *addr)
-{
-       return 0;
-}
-#endif /* __ASM_MACH_POWERTV_IOREMAP_H */
diff --git a/arch/mips/include/asm/mach-powertv/irq.h b/arch/mips/include/asm/mach-powertv/irq.h
deleted file mode 100644 (file)
index 4bd5d0c..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright (C) 2009  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#ifndef _ASM_MACH_POWERTV_IRQ_H
-#define _ASM_MACH_POWERTV_IRQ_H
-#include <asm/mach-powertv/interrupts.h>
-
-#define MIPS_CPU_IRQ_BASE      ibase
-#define NR_IRQS                        127
-#endif
diff --git a/arch/mips/include/asm/mach-powertv/powertv-clock.h b/arch/mips/include/asm/mach-powertv/powertv-clock.h
deleted file mode 100644 (file)
index 6f3e9a0..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (C) 2009  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-/*
- * Local definitions for the powertv PCI code
- */
-
-#ifndef _POWERTV_PCI_POWERTV_PCI_H_
-#define _POWERTV_PCI_POWERTV_PCI_H_
-extern int asic_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
-extern int asic_pcie_init(void);
-extern int asic_pcie_init(void);
-
-extern int log_level;
-#endif
diff --git a/arch/mips/include/asm/mach-powertv/war.h b/arch/mips/include/asm/mach-powertv/war.h
deleted file mode 100644 (file)
index c5651c8..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * This version for the PowerTV platform copied from the Malta version.
- *
- * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- */
-#ifndef __ASM_MACH_POWERTV_WAR_H
-#define __ASM_MACH_POWERTV_WAR_H
-
-#define R4600_V1_INDEX_ICACHEOP_WAR    0
-#define R4600_V1_HIT_CACHEOP_WAR       0
-#define R4600_V2_HIT_CACHEOP_WAR       0
-#define R5432_CP0_INTERRUPT_WAR                0
-#define BCM1250_M3_WAR                 0
-#define SIBYTE_1956_WAR                        0
-#define MIPS4K_ICACHE_REFILL_WAR       1
-#define MIPS_CACHE_SYNC_WAR            1
-#define TX49XX_ICACHE_INDEX_INV_WAR    0
-#define ICACHE_REFILLS_WORKAROUND_WAR  1
-#define R10000_LLSC_WAR                        0
-#define MIPS34K_MISSED_ITLB_WAR                0
-
-#endif /* __ASM_MACH_POWERTV_WAR_H */
index a02596cf1abd418a80653b5ef36bdda6cbbb4bbd..e33227998713e644a53e21c2e11c3e5fa174c970 100644 (file)
@@ -1,6 +1,7 @@
 /*
  * Carsten Langgaard, carstenl@mips.com
  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
  *
  *  This program is free software; you can distribute it and/or modify it
  *  under the terms of the GNU General Public License (Version 2) as
 #ifndef __ASM_MIPS_BOARDS_PIIX4_H
 #define __ASM_MIPS_BOARDS_PIIX4_H
 
-/************************************************************************
- *  IO register offsets
- ************************************************************************/
-#define PIIX4_ICTLR1_ICW1      0x20
-#define PIIX4_ICTLR1_ICW2      0x21
-#define PIIX4_ICTLR1_ICW3      0x21
-#define PIIX4_ICTLR1_ICW4      0x21
-#define PIIX4_ICTLR2_ICW1      0xa0
-#define PIIX4_ICTLR2_ICW2      0xa1
-#define PIIX4_ICTLR2_ICW3      0xa1
-#define PIIX4_ICTLR2_ICW4      0xa1
-#define PIIX4_ICTLR1_OCW1      0x21
-#define PIIX4_ICTLR1_OCW2      0x20
-#define PIIX4_ICTLR1_OCW3      0x20
-#define PIIX4_ICTLR1_OCW4      0x20
-#define PIIX4_ICTLR2_OCW1      0xa1
-#define PIIX4_ICTLR2_OCW2      0xa0
-#define PIIX4_ICTLR2_OCW3      0xa0
-#define PIIX4_ICTLR2_OCW4      0xa0
-
-
-/************************************************************************
- *  Register encodings.
- ************************************************************************/
-#define PIIX4_OCW2_NSEOI       (0x1 << 5)
-#define PIIX4_OCW2_SEOI                (0x3 << 5)
-#define PIIX4_OCW2_RNSEOI      (0x5 << 5)
-#define PIIX4_OCW2_RAEOIS      (0x4 << 5)
-#define PIIX4_OCW2_RAEOIC      (0x0 << 5)
-#define PIIX4_OCW2_RSEOI       (0x7 << 5)
-#define PIIX4_OCW2_SP          (0x6 << 5)
-#define PIIX4_OCW2_NOP         (0x2 << 5)
-
-#define PIIX4_OCW2_SEL         (0x0 << 3)
-
-#define PIIX4_OCW2_ILS_0       0
-#define PIIX4_OCW2_ILS_1       1
-#define PIIX4_OCW2_ILS_2       2
-#define PIIX4_OCW2_ILS_3       3
-#define PIIX4_OCW2_ILS_4       4
-#define PIIX4_OCW2_ILS_5       5
-#define PIIX4_OCW2_ILS_6       6
-#define PIIX4_OCW2_ILS_7       7
-#define PIIX4_OCW2_ILS_8       0
-#define PIIX4_OCW2_ILS_9       1
-#define PIIX4_OCW2_ILS_10      2
-#define PIIX4_OCW2_ILS_11      3
-#define PIIX4_OCW2_ILS_12      4
-#define PIIX4_OCW2_ILS_13      5
-#define PIIX4_OCW2_ILS_14      6
-#define PIIX4_OCW2_ILS_15      7
-
-#define PIIX4_OCW3_SEL         (0x1 << 3)
-
-#define PIIX4_OCW3_IRR         0x2
-#define PIIX4_OCW3_ISR         0x3
+/* PIRQX Route Control */
+#define PIIX4_FUNC0_PIRQRC                     0x60
+#define   PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_DISABLE       (1 << 7)
+#define   PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MASK          0xf
+#define   PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MAX           16
+/* Top Of Memory */
+#define PIIX4_FUNC0_TOM                                0x69
+#define   PIIX4_FUNC0_TOM_TOP_OF_MEMORY_MASK           0xf0
+/* Deterministic Latency Control */
+#define PIIX4_FUNC0_DLC                                0x82
+#define   PIIX4_FUNC0_DLC_USBPR_EN                     (1 << 2)
+#define   PIIX4_FUNC0_DLC_PASSIVE_RELEASE_EN           (1 << 1)
+#define   PIIX4_FUNC0_DLC_DELAYED_TRANSACTION_EN       (1 << 0)
+
+/* IDE Timing */
+#define PIIX4_FUNC1_IDETIM_PRIMARY_LO          0x40
+#define PIIX4_FUNC1_IDETIM_PRIMARY_HI          0x41
+#define   PIIX4_FUNC1_IDETIM_PRIMARY_HI_IDE_DECODE_EN  (1 << 7)
+#define PIIX4_FUNC1_IDETIM_SECONDARY_LO                0x42
+#define PIIX4_FUNC1_IDETIM_SECONDARY_HI                0x43
+#define   PIIX4_FUNC1_IDETIM_SECONDARY_HI_IDE_DECODE_EN        (1 << 7)
 
 #endif /* __ASM_MIPS_BOARDS_PIIX4_H */
index 3b29079b5424b4fe0f78588ab1b8821aef0a1237..e277bbad28713d3a510cf5e5dec337316f0a745a 100644 (file)
 #endif /* SMTC */
 #include <asm-generic/mm_hooks.h>
 
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-
 #define TLBMISS_HANDLER_SETUP_PGD(pgd)                                 \
 do {                                                                   \
        extern void tlbmiss_handler_setup_pgd(unsigned long);           \
        tlbmiss_handler_setup_pgd((unsigned long)(pgd));                \
 } while (0)
 
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
 #define TLBMISS_HANDLER_SETUP()                                                \
        do {                                                            \
                TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir);              \
-               write_c0_xcontext((unsigned long) smp_processor_id() << 51); \
+               write_c0_xcontext((unsigned long) smp_processor_id() << \
+                                               SMP_CPUID_REGSHIFT);    \
        } while (0)
 
-#else /* CONFIG_MIPS_PGD_C0_CONTEXT: using  pgd_current*/
+#else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using  pgd_current*/
 
 /*
  * For the fast tlb miss handlers, we keep a per cpu array of pointers
@@ -47,21 +47,11 @@ do {                                                                        \
  */
 extern unsigned long pgd_current[];
 
-#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
-       pgd_current[smp_processor_id()] = (unsigned long)(pgd)
-
-#ifdef CONFIG_32BIT
 #define TLBMISS_HANDLER_SETUP()                                                \
-       write_c0_context((unsigned long) smp_processor_id() << 25);     \
+       write_c0_context((unsigned long) smp_processor_id() <<          \
+                                               SMP_CPUID_REGSHIFT);    \
        back_to_back_c0_hazard();                                       \
        TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
-#endif
-#ifdef CONFIG_64BIT
-#define TLBMISS_HANDLER_SETUP()                                                \
-       write_c0_context((unsigned long) smp_processor_id() << 26);     \
-       back_to_back_c0_hazard();                                       \
-       TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
-#endif
 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 
index 5e6cd0947393295ea87cba5191c5a6fbf728c527..7bba9da110afab3f9c92eb02a52161ac23642d29 100644 (file)
@@ -81,7 +81,6 @@ static inline long regs_return_value(struct pt_regs *regs)
 
 #define instruction_pointer(regs) ((regs)->cp0_epc)
 #define profile_pc(regs) instruction_pointer(regs)
-#define user_stack_pointer(r) ((r)->regs[29])
 
 extern asmlinkage void syscall_trace_enter(struct pt_regs *regs);
 extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
@@ -100,4 +99,17 @@ static inline void die_if_kernel(const char *str, struct pt_regs *regs)
        (struct pt_regs *)((sp | (THREAD_SIZE - 1)) + 1 - 32) - 1;      \
 })
 
+/* Helpers for working with the user stack pointer */
+
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+       return regs->regs[29];
+}
+
+static inline void user_stack_pointer_set(struct pt_regs *regs,
+       unsigned long val)
+{
+       regs->regs[29] = val;
+}
+
 #endif /* _ASM_PTRACE_H */
index a0b2650516ac9a2837a362f4c5768ce609767216..34d1a19171257ff8d8e602988615b8b8e2878ff9 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/asm.h>
 #include <asm/cacheops.h>
 #include <asm/cpu-features.h>
+#include <asm/cpu-type.h>
 #include <asm/mipsmtregs.h>
 
 /*
@@ -162,7 +163,15 @@ static inline void flush_scache_line_indexed(unsigned long addr)
 static inline void flush_icache_line(unsigned long addr)
 {
        __iflush_prologue
-       cache_op(Hit_Invalidate_I, addr);
+       switch (boot_cpu_type()) {
+       case CPU_LOONGSON2:
+               cache_op(Hit_Invalidate_I_Loongson23, addr);
+               break;
+
+       default:
+               cache_op(Hit_Invalidate_I, addr);
+               break;
+       }
        __iflush_epilogue
 }
 
@@ -208,7 +217,15 @@ static inline void flush_scache_line(unsigned long addr)
  */
 static inline void protected_flush_icache_line(unsigned long addr)
 {
-       protected_cache_op(Hit_Invalidate_I, addr);
+       switch (boot_cpu_type()) {
+       case CPU_LOONGSON2:
+               protected_cache_op(Hit_Invalidate_I_Loongson23, addr);
+               break;
+
+       default:
+               protected_cache_op(Hit_Invalidate_I, addr);
+               break;
+       }
 }
 
 /*
@@ -412,8 +429,8 @@ __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64
 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)
 
 /* build blast_xxx_range, protected_blast_xxx_range */
-#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \
-static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
+#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)       \
+static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
                                                    unsigned long end)  \
 {                                                                      \
        unsigned long lsize = cpu_##desc##_line_size();                 \
@@ -432,13 +449,15 @@ static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
        __##pfx##flush_epilogue                                         \
 }
 
-__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
-__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_)
-__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_)
-__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
-__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
+__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
+__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
+__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
+__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson23, \
+       protected_, loongson23_)
+__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
+__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
 /* blast_inv_dcache_range */
-__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
-__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
+__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
+__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
 
 #endif /* _ASM_R4KCACHE_H */
index e26589ef36eee12a9d5a957edf0df6d59ae7217e..d7bfdeba9e845acfa83270f01bb280a687d785ad 100644 (file)
@@ -5,6 +5,14 @@
 
 extern void setup_early_printk(void);
 
+#ifdef CONFIG_EARLY_PRINTK_8250
+extern void setup_8250_early_printk_port(unsigned long base,
+       unsigned int reg_shift, unsigned int timeout);
+#else
+static inline void setup_8250_early_printk_port(unsigned long base,
+       unsigned int reg_shift, unsigned int timeout) {}
+#endif
+
 extern void set_handler(unsigned long offset, void *addr, unsigned long len);
 extern void set_uncached_handler(unsigned long offset, void *addr, unsigned long len);
 
index 23fc95e65673bcb5d907a77f83714c424092add6..4857e2c8df5ae2eae1ac4bcca27b7dca586842aa 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/asmmacro.h>
 #include <asm/mipsregs.h>
 #include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
 
 /*
  * For SMTC kernel, global IE should be left set, and interrupts
                .endm
 
 #ifdef CONFIG_SMP
-#ifdef CONFIG_MIPS_MT_SMTC
-#define PTEBASE_SHIFT  19      /* TCBIND */
-#define CPU_ID_REG CP0_TCBIND
-#define CPU_ID_MFC0 mfc0
-#elif defined(CONFIG_MIPS_PGD_C0_CONTEXT)
-#define PTEBASE_SHIFT  48      /* XCONTEXT */
-#define CPU_ID_REG CP0_XCONTEXT
-#define CPU_ID_MFC0 MFC0
-#else
-#define PTEBASE_SHIFT  23      /* CONTEXT */
-#define CPU_ID_REG CP0_CONTEXT
-#define CPU_ID_MFC0 MFC0
-#endif
                .macro  get_saved_sp    /* SMP variation */
-               CPU_ID_MFC0     k0, CPU_ID_REG
+               ASM_CPUID_MFC0  k0, ASM_SMP_CPUID_REG
 #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
                lui     k1, %hi(kernelsp)
 #else
                daddiu  k1, %hi(kernelsp)
                dsll    k1, 16
 #endif
-               LONG_SRL        k0, PTEBASE_SHIFT
+               LONG_SRL        k0, SMP_CPUID_PTRSHIFT
                LONG_ADDU       k1, k0
                LONG_L  k1, %lo(kernelsp)(k1)
                .endm
 
                .macro  set_saved_sp stackp temp temp2
-               CPU_ID_MFC0     \temp, CPU_ID_REG
-               LONG_SRL        \temp, PTEBASE_SHIFT
+               ASM_CPUID_MFC0  \temp, ASM_SMP_CPUID_REG
+               LONG_SRL        \temp, SMP_CPUID_PTRSHIFT
                LONG_S  \stackp, kernelsp(\temp)
                .endm
-#else
+#else /* !CONFIG_SMP */
                .macro  get_saved_sp    /* Uniprocessor variation */
 #ifdef CONFIG_CPU_JUMP_WORKAROUNDS
                /*
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
new file mode 100644 (file)
index 0000000..81c8913
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ *
+ * Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org>
+ */
+
+#ifndef __ASM_MIPS_SYSCALL_H
+#define __ASM_MIPS_SYSCALL_H
+
+#include <linux/audit.h>
+#include <linux/elf-em.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <asm/ptrace.h>
+
+static inline long syscall_get_nr(struct task_struct *task,
+                                 struct pt_regs *regs)
+{
+       return regs->regs[2];
+}
+
+static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
+       struct task_struct *task, struct pt_regs *regs, unsigned int n)
+{
+       unsigned long usp = regs->regs[29];
+
+       switch (n) {
+       case 0: case 1: case 2: case 3:
+               *arg = regs->regs[4 + n];
+
+               return 0;
+
+#ifdef CONFIG_32BIT
+       case 4: case 5: case 6: case 7:
+               return get_user(*arg, (int *)usp + 4 * n);
+#endif
+
+#ifdef CONFIG_64BIT
+       case 4: case 5: case 6: case 7:
+#ifdef CONFIG_MIPS32_O32
+               if (test_thread_flag(TIF_32BIT_REGS))
+                       return get_user(*arg, (int *)usp + 4 * n);
+               else
+#endif
+                       *arg = regs->regs[4 + n];
+
+               return 0;
+#endif
+
+       default:
+               BUG();
+       }
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+                                           struct pt_regs *regs)
+{
+       return regs->regs[2];
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+                                           struct pt_regs *regs,
+                                           int error, long val)
+{
+       if (error) {
+               regs->regs[2] = -error;
+               regs->regs[7] = -1;
+       } else {
+               regs->regs[2] = val;
+               regs->regs[7] = 0;
+       }
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+                                        struct pt_regs *regs,
+                                        unsigned int i, unsigned int n,
+                                        unsigned long *args)
+{
+       unsigned long arg;
+       int ret;
+
+       while (n--)
+               ret |= mips_get_syscall_arg(&arg, task, regs, i++);
+
+       /*
+        * No way to communicate an error because this is a void function.
+        */
+#if 0
+       return ret;
+#endif
+}
+
+extern const unsigned long sys_call_table[];
+extern const unsigned long sys32_call_table[];
+extern const unsigned long sysn32_call_table[];
+
+static inline int __syscall_get_arch(void)
+{
+       int arch = EM_MIPS;
+#ifdef CONFIG_64BIT
+       arch |=  __AUDIT_ARCH_64BIT;
+#endif
+#if defined(__LITTLE_ENDIAN)
+       arch |=  __AUDIT_ARCH_LE;
+#endif
+       return arch;
+}
+
+#endif /* __ASM_MIPS_SYSCALL_H */
index 61215a34acc68ff87da07665c65c8dc0f6dbfcc1..f9b24bfbdbae96f8cc31d58d7cb0318130ceab9f 100644 (file)
@@ -116,6 +116,7 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_32BIT_ADDR         23      /* 32-bit address space (o32/n32) */
 #define TIF_FPUBOUND           24      /* thread bound to FPU-full CPU set */
 #define TIF_LOAD_WATCH         25      /* If set, load watch registers */
+#define TIF_SYSCALL_TRACEPOINT 26      /* syscall tracepoint instrumentation */
 #define TIF_SYSCALL_TRACE      31      /* syscall trace active */
 
 #define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
@@ -132,21 +133,54 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_32BIT_ADDR                (1<<TIF_32BIT_ADDR)
 #define _TIF_FPUBOUND          (1<<TIF_FPUBOUND)
 #define _TIF_LOAD_WATCH                (1<<TIF_LOAD_WATCH)
+#define _TIF_SYSCALL_TRACEPOINT        (1<<TIF_SYSCALL_TRACEPOINT)
 
 #define _TIF_WORK_SYSCALL_ENTRY        (_TIF_NOHZ | _TIF_SYSCALL_TRACE |       \
-                                _TIF_SYSCALL_AUDIT)
+                                _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
 
 /* work to do in syscall_trace_leave() */
 #define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE |       \
-                                _TIF_SYSCALL_AUDIT)
+                                _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
 
 /* work to do on interrupt/exception return */
 #define _TIF_WORK_MASK         \
        (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
 /* work to do on any return to u-space */
 #define _TIF_ALLWORK_MASK      (_TIF_NOHZ | _TIF_WORK_MASK |           \
-                                _TIF_WORK_SYSCALL_EXIT)
+                                _TIF_WORK_SYSCALL_EXIT |               \
+                                _TIF_SYSCALL_TRACEPOINT)
 
-#endif /* __KERNEL__ */
+/*
+ * We stash processor id into a COP0 register to retrieve it fast
+ * at kernel exception entry.
+ */
+#if defined(CONFIG_MIPS_MT_SMTC)
+#define SMP_CPUID_REG          2, 2    /* TCBIND */
+#define ASM_SMP_CPUID_REG      $2, 2
+#define SMP_CPUID_PTRSHIFT     19
+#elif defined(CONFIG_MIPS_PGD_C0_CONTEXT)
+#define SMP_CPUID_REG          20, 0   /* XCONTEXT */
+#define ASM_SMP_CPUID_REG      $20
+#define SMP_CPUID_PTRSHIFT     48
+#else
+#define SMP_CPUID_REG          4, 0    /* CONTEXT */
+#define ASM_SMP_CPUID_REG      $4
+#define SMP_CPUID_PTRSHIFT     23
+#endif
 
+#ifdef CONFIG_64BIT
+#define SMP_CPUID_REGSHIFT     (SMP_CPUID_PTRSHIFT + 3)
+#else
+#define SMP_CPUID_REGSHIFT     (SMP_CPUID_PTRSHIFT + 2)
+#endif
+
+#ifdef CONFIG_MIPS_MT_SMTC
+#define ASM_CPUID_MFC0         mfc0
+#define UASM_i_CPUID_MFC0      uasm_i_mfc0
+#else
+#define ASM_CPUID_MFC0         MFC0
+#define UASM_i_CPUID_MFC0      UASM_i_MFC0
+#endif
+
+#endif /* __KERNEL__ */
 #endif /* _ASM_THREAD_INFO_H */
index 2d7b9df4542dd478d53f53d8151d3381d1c58aee..24f534a7fbc3bd84c7bd3b3bda2238e3ce183943 100644 (file)
@@ -75,7 +75,7 @@ extern int init_r4k_clocksource(void);
 
 static inline int init_mips_clocksource(void)
 {
-#if defined(CONFIG_CSRC_R4K) && !defined(CONFIG_CSRC_GIC)
+#ifdef CONFIG_CSRC_R4K
        return init_r4k_clocksource();
 #else
        return 0;
index 63c9c886173a68c6a89857b7f7d28046cb712e56..4d3b92886665799d2ca2b746d8efc310cc0c1168 100644 (file)
 
 #include <uapi/asm/unistd.h>
 
+#ifdef CONFIG_MIPS32_N32
+#define NR_syscalls  (__NR_N32_Linux + __NR_N32_Linux_syscalls)
+#elif defined(CONFIG_64BIT)
+#define NR_syscalls  (__NR_64_Linux + __NR_64_Linux_syscalls)
+#else
+#define NR_syscalls  (__NR_O32_Linux + __NR_O32_Linux_syscalls)
+#endif
 
 #ifndef __ASSEMBLY__
 
index 88e292b7719e99963f74692f0ffbd36e60e79c83..e81174432bab0eb7c1e41093e6bdd5ed833fc851 100644 (file)
@@ -33,6 +33,8 @@ struct siginfo;
 #error _MIPS_SZLONG neither 32 nor 64
 #endif
 
+#define __ARCH_SIGSYS
+
 #include <asm-generic/siginfo.h>
 
 typedef struct siginfo {
@@ -97,6 +99,13 @@ typedef struct siginfo {
                        __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
                        int _fd;
                } _sigpoll;
+
+               /* SIGSYS */
+               struct {
+                       void __user *_call_addr; /* calling user insn */
+                       int _syscall;   /* triggering system call number */
+                       unsigned int _arch;     /* AUDIT_ARCH_* of syscall */
+               } _sigsys;
        } _sifields;
 } siginfo_t;
 
index 61c01f054d1b160f753582b60888e59592b95b21..0df9787cd84d9e4a93e04513f1654b0327b3df0f 100644 (file)
@@ -94,4 +94,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _UAPI_ASM_SOCKET_H */
index 423d871a946ba15ae5b5ea70338530949fa8d166..1c1b71752c84cbc26dc2c0f81cc778e0a749a495 100644 (file)
@@ -26,7 +26,6 @@ obj-$(CONFIG_CEVT_TXX9)               += cevt-txx9.o
 obj-$(CONFIG_CSRC_BCM1480)     += csrc-bcm1480.o
 obj-$(CONFIG_CSRC_GIC)         += csrc-gic.o
 obj-$(CONFIG_CSRC_IOASIC)      += csrc-ioasic.o
-obj-$(CONFIG_CSRC_POWERTV)     += csrc-powertv.o
 obj-$(CONFIG_CSRC_R4K)         += csrc-r4k.o
 obj-$(CONFIG_CSRC_SB1250)      += csrc-sb1250.o
 obj-$(CONFIG_SYNC_R4K)         += sync-r4k.o
@@ -35,6 +34,7 @@ obj-$(CONFIG_STACKTRACE)      += stacktrace.o
 obj-$(CONFIG_MODULES)          += mips_ksyms.o module.o
 obj-$(CONFIG_MODULES_USE_ELF_RELA) += module-rela.o
 
+obj-$(CONFIG_FTRACE_SYSCALLS)  += ftrace.o
 obj-$(CONFIG_FUNCTION_TRACER)  += mcount.o ftrace.o
 
 obj-$(CONFIG_CPU_R4K_FPU)      += r4k_fpu.o r4k_switch.o
@@ -84,6 +84,7 @@ obj-$(CONFIG_GPIO_TXX9)               += gpio_txx9.o
 obj-$(CONFIG_KEXEC)            += machine_kexec.o relocate_kernel.o crash.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump.o
 obj-$(CONFIG_EARLY_PRINTK)     += early_printk.o
+obj-$(CONFIG_EARLY_PRINTK_8250)        += early_printk_8250.o
 obj-$(CONFIG_SPINLOCK_TEST)    += spinlock_test.o
 obj-$(CONFIG_MIPS_MACHINE)     += mips_machine.o
 
index 5465dc183e5ac4d26e36d300b1df36fb4cf4d2b5..c814287bdf5d10d8a44a9e6a843b3acd5ab30ff8 100644 (file)
@@ -376,13 +376,33 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
                                __cpu_name[cpu] = "R4000PC";
                        }
                } else {
+                       int cca = read_c0_config() & CONF_CM_CMASK;
+                       int mc;
+
+                       /*
+                        * SC and MC versions can't be reliably told apart,
+                        * but only the latter support coherent caching
+                        * modes so assume the firmware has set the KSEG0
+                        * coherency attribute reasonably (if uncached, we
+                        * assume SC).
+                        */
+                       switch (cca) {
+                       case CONF_CM_CACHABLE_CE:
+                       case CONF_CM_CACHABLE_COW:
+                       case CONF_CM_CACHABLE_CUW:
+                               mc = 1;
+                               break;
+                       default:
+                               mc = 0;
+                               break;
+                       }
                        if ((c->processor_id & PRID_REV_MASK) >=
                            PRID_REV_R4400) {
-                               c->cputype = CPU_R4400SC;
-                               __cpu_name[cpu] = "R4400SC";
+                               c->cputype = mc ? CPU_R4400MC : CPU_R4400SC;
+                               __cpu_name[cpu] = mc ? "R4400MC" : "R4400SC";
                        } else {
-                               c->cputype = CPU_R4000SC;
-                               __cpu_name[cpu] = "R4000SC";
+                               c->cputype = mc ? CPU_R4000MC : CPU_R4000SC;
+                               __cpu_name[cpu] = mc ? "R4000MC" : "R4000SC";
                        }
                }
 
@@ -1079,8 +1099,8 @@ void cpu_report(void)
 {
        struct cpuinfo_mips *c = &current_cpu_data;
 
-       printk(KERN_INFO "CPU revision is: %08x (%s)\n",
-              c->processor_id, cpu_name_string());
+       pr_info("CPU%d revision is: %08x (%s)\n",
+               smp_processor_id(), c->processor_id, cpu_name_string());
        if (c->options & MIPS_CPU_FPU)
                printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id);
 }
diff --git a/arch/mips/kernel/csrc-powertv.c b/arch/mips/kernel/csrc-powertv.c
deleted file mode 100644 (file)
index abd99ea..0000000
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright (C) 2008 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- */
-/*
- * The file comes from kernel/csrc-r4k.c
- */
-#include <linux/clocksource.h>
-#include <linux/init.h>
-
-#include <asm/time.h>                  /* Not included in linux/time.h */
-
-#include <asm/mach-powertv/asic_regs.h>
-#include "powertv-clock.h"
-
-/* MIPS PLL Register Definitions */
-#define PLL_GET_M(x)           (((x) >> 8) & 0x000000FF)
-#define PLL_GET_N(x)           (((x) >> 16) & 0x000000FF)
-#define PLL_GET_P(x)           (((x) >> 24) & 0x00000007)
-
-/*
- * returns:  Clock frequency in kHz
- */
-unsigned int __init mips_get_pll_freq(void)
-{
-       unsigned int pll_reg, m, n, p;
-       unsigned int fin = 54000; /* Base frequency in kHz */
-       unsigned int fout;
-
-       /* Read PLL register setting */
-       pll_reg = asic_read(mips_pll_setup);
-       m = PLL_GET_M(pll_reg);
-       n = PLL_GET_N(pll_reg);
-       p = PLL_GET_P(pll_reg);
-       pr_info("MIPS PLL Register:0x%x  M=%d  N=%d  P=%d\n", pll_reg, m, n, p);
-
-       /* Calculate clock frequency = (2 * N * 54MHz) / (M * (2**P)) */
-       fout = ((2 * n * fin) / (m * (0x01 << p)));
-
-       pr_info("MIPS Clock Freq=%d kHz\n", fout);
-
-       return fout;
-}
-
-static cycle_t c0_hpt_read(struct clocksource *cs)
-{
-       return read_c0_count();
-}
-
-static struct clocksource clocksource_mips = {
-       .name           = "powertv-counter",
-       .read           = c0_hpt_read,
-       .mask           = CLOCKSOURCE_MASK(32),
-       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static void __init powertv_c0_hpt_clocksource_init(void)
-{
-       unsigned int pll_freq = mips_get_pll_freq();
-
-       pr_info("CPU frequency %d.%02d MHz\n", pll_freq / 1000,
-               (pll_freq % 1000) * 100 / 1000);
-
-       mips_hpt_frequency = pll_freq / 2 * 1000;
-
-       clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
-
-       clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
-}
-
-/**
- * struct tim_c - free running counter
- * @hi: High 16 bits of the counter
- * @lo: Low 32 bits of the counter
- *
- * Lays out the structure of the free running counter in memory. This counter
- * increments at a rate of 27 MHz/8 on all platforms.
- */
-struct tim_c {
-       unsigned int hi;
-       unsigned int lo;
-};
-
-static struct tim_c *tim_c;
-
-static cycle_t tim_c_read(struct clocksource *cs)
-{
-       unsigned int hi;
-       unsigned int next_hi;
-       unsigned int lo;
-
-       hi = readl(&tim_c->hi);
-
-       for (;;) {
-               lo = readl(&tim_c->lo);
-               next_hi = readl(&tim_c->hi);
-               if (next_hi == hi)
-                       break;
-               hi = next_hi;
-       }
-
-pr_crit("%s: read %llx\n", __func__, ((u64) hi << 32) | lo);
-       return ((u64) hi << 32) | lo;
-}
-
-#define TIM_C_SIZE             48              /* # bits in the timer */
-
-static struct clocksource clocksource_tim_c = {
-       .name           = "powertv-tim_c",
-       .read           = tim_c_read,
-       .mask           = CLOCKSOURCE_MASK(TIM_C_SIZE),
-       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-/**
- * powertv_tim_c_clocksource_init - set up a clock source for the TIM_C clock
- *
- * We know that TIM_C counts at 27 MHz/8, so each cycle corresponds to
- * 1 / (27,000,000/8) seconds.
- */
-static void __init powertv_tim_c_clocksource_init(void)
-{
-       const unsigned long     counts_per_second = 27000000 / 8;
-
-       clocksource_tim_c.rating = 200;
-
-       clocksource_register_hz(&clocksource_tim_c, counts_per_second);
-       tim_c = (struct tim_c *) asic_reg_addr(tim_ch);
-}
-
-/**
- powertv_clocksource_init - initialize all clocksources
- */
-void __init powertv_clocksource_init(void)
-{
-       powertv_c0_hpt_clocksource_init();
-       powertv_tim_c_clocksource_init();
-}
diff --git a/arch/mips/kernel/early_printk_8250.c b/arch/mips/kernel/early_printk_8250.c
new file mode 100644 (file)
index 0000000..83cea37
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ *  8250/16550-type serial ports prom_putchar()
+ *
+ *  Copyright (C) 2010  Yoichi Yuasa <yuasa@linux-mips.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/io.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+
+static void __iomem *serial8250_base;
+static unsigned int serial8250_reg_shift;
+static unsigned int serial8250_tx_timeout;
+
+void setup_8250_early_printk_port(unsigned long base, unsigned int reg_shift,
+                                 unsigned int timeout)
+{
+       serial8250_base = (void __iomem *)base;
+       serial8250_reg_shift = reg_shift;
+       serial8250_tx_timeout = timeout;
+}
+
+static inline u8 serial_in(int offset)
+{
+       return readb(serial8250_base + (offset << serial8250_reg_shift));
+}
+
+static inline void serial_out(int offset, char value)
+{
+       writeb(value, serial8250_base + (offset << serial8250_reg_shift));
+}
+
+void prom_putchar(char c)
+{
+       unsigned int timeout;
+       int status, bits;
+
+       if (!serial8250_base)
+               return;
+
+       timeout = serial8250_tx_timeout;
+       bits = UART_LSR_TEMT | UART_LSR_THRE;
+
+       do {
+               status = serial_in(UART_LSR);
+
+               if (--timeout == 0)
+                       break;
+       } while ((status & bits) != bits);
+
+       if (timeout)
+               serial_out(UART_TX, c);
+}
index dba90ec0dc385ffcad5cc09eda51031d4e9a0fcc..185ba258361b979ee9531bd18d38b0242e1de979 100644 (file)
 #include <linux/uaccess.h>
 #include <linux/init.h>
 #include <linux/ftrace.h>
+#include <linux/syscalls.h>
 
 #include <asm/asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/cacheflush.h>
+#include <asm/syscall.h>
 #include <asm/uasm.h>
+#include <asm/unistd.h>
 
 #include <asm-generic/sections.h>
 
@@ -364,3 +367,33 @@ out:
        WARN_ON(1);
 }
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+
+#ifdef CONFIG_32BIT
+unsigned long __init arch_syscall_addr(int nr)
+{
+       return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
+}
+#endif
+
+#ifdef CONFIG_64BIT
+
+unsigned long __init arch_syscall_addr(int nr)
+{
+#ifdef CONFIG_MIPS32_N32
+       if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls)
+               return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
+#endif
+       if (nr >= __NR_64_Linux  && nr <= __NR_64_Linux + __NR_64_Linux_syscalls)
+               return (unsigned long)sys_call_table[nr - __NR_64_Linux];
+#ifdef CONFIG_MIPS32_O32
+       if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls)
+               return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
+#endif
+
+       return (unsigned long) &sys_ni_syscall;
+}
+#endif
+
+#endif /* CONFIG_FTRACE_SYSCALLS */
index 31fa856829cbf2620521317e5247d42b9e3fb087..72853aa26b77393c95e4c147f8631d63702782e4 100644 (file)
@@ -374,12 +374,20 @@ NESTED(except_vec_nmi, 0, sp)
 NESTED(nmi_handler, PT_SIZE, sp)
        .set    push
        .set    noat
+       /*
+        * Clear ERL - restore segment mapping
+        * Clear BEV - required for page fault exception handler to work
+        */
+       mfc0    k0, CP0_STATUS
+       ori     k0, k0, ST0_EXL
+       li      k1, ~(ST0_BEV | ST0_ERL)
+       and     k0, k0, k1
+       mtc0    k0, CP0_STATUS
+       ehb
        SAVE_ALL
        move    a0, sp
        jal     nmi_exception_handler
-       RESTORE_ALL
-       .set    mips3
-       eret
+       /* nmi_exception_handler never returns */
        .set    pop
        END(nmi_handler)
 
index 72ef2d25cbf21ab5bebb1a93b0c0bf6e4e4fdff7..e498f2b3646a167e73f36e0f74857a5e6da6308e 100644 (file)
@@ -150,7 +150,7 @@ int __init mips_cpu_intc_init(struct device_node *of_node,
        domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
                                       &mips_cpu_intc_irq_domain_ops, NULL);
        if (!domain)
-               panic("Failed to add irqdomain for MIPS CPU\n");
+               panic("Failed to add irqdomain for MIPS CPU");
 
        return 0;
 }
index 977a623d9253ca3580084652ee264b3c4f59a6c2..2a52568dbcd6786cf0ca3c7100f056abba8ed08b 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/moduleloader.h>
 #include <linux/elf.h>
 #include <linux/mm.h>
+#include <linux/numa.h>
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/fs.h>
@@ -46,7 +47,7 @@ static DEFINE_SPINLOCK(dbe_lock);
 void *module_alloc(unsigned long size)
 {
        return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
-                               GFP_KERNEL, PAGE_KERNEL, -1,
+                               GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
                                __builtin_return_address(0));
 }
 #endif
index 4204d76af854209659de87b35e143ec85fb802da..029e002a4ea0083d626140961d3cd465b188fabc 100644 (file)
@@ -73,7 +73,7 @@
 3:
 
 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
-       PTR_L   t8, __stack_chk_guard
+       PTR_LA  t8, __stack_chk_guard
        LONG_L  t9, TASK_STACK_CANARY(a1)
        LONG_S  t9, 0(t8)
 #endif
index 8ae1ebef8b71e67fd5e205646bd10adcdb1b0299..b52e1d2b33e03836002b495328124225ebba29db 100644 (file)
  */
 #include <linux/compiler.h>
 #include <linux/context_tracking.h>
+#include <linux/elf.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/errno.h>
 #include <linux/ptrace.h>
+#include <linux/regset.h>
 #include <linux/smp.h>
 #include <linux/user.h>
 #include <linux/security.h>
+#include <linux/tracehook.h>
 #include <linux/audit.h>
 #include <linux/seccomp.h>
+#include <linux/ftrace.h>
 
 #include <asm/byteorder.h>
 #include <asm/cpu.h>
 #include <asm/mipsmtregs.h>
 #include <asm/pgtable.h>
 #include <asm/page.h>
+#include <asm/syscall.h>
 #include <asm/uaccess.h>
 #include <asm/bootinfo.h>
 #include <asm/reg.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
 /*
  * Called by kernel/ptrace.c when detaching..
  *
@@ -255,6 +263,133 @@ int ptrace_set_watch_regs(struct task_struct *child,
        return 0;
 }
 
+/* regset get/set implementations */
+
+static int gpr_get(struct task_struct *target,
+                  const struct user_regset *regset,
+                  unsigned int pos, unsigned int count,
+                  void *kbuf, void __user *ubuf)
+{
+       struct pt_regs *regs = task_pt_regs(target);
+
+       return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                  regs, 0, sizeof(*regs));
+}
+
+static int gpr_set(struct task_struct *target,
+                  const struct user_regset *regset,
+                  unsigned int pos, unsigned int count,
+                  const void *kbuf, const void __user *ubuf)
+{
+       struct pt_regs newregs;
+       int ret;
+
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &newregs,
+                                0, sizeof(newregs));
+       if (ret)
+               return ret;
+
+       *task_pt_regs(target) = newregs;
+
+       return 0;
+}
+
+static int fpr_get(struct task_struct *target,
+                  const struct user_regset *regset,
+                  unsigned int pos, unsigned int count,
+                  void *kbuf, void __user *ubuf)
+{
+       return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                  &target->thread.fpu,
+                                  0, sizeof(elf_fpregset_t));
+       /* XXX fcr31  */
+}
+
+static int fpr_set(struct task_struct *target,
+                  const struct user_regset *regset,
+                  unsigned int pos, unsigned int count,
+                  const void *kbuf, const void __user *ubuf)
+{
+       return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                 &target->thread.fpu,
+                                 0, sizeof(elf_fpregset_t));
+       /* XXX fcr31  */
+}
+
+enum mips_regset {
+       REGSET_GPR,
+       REGSET_FPR,
+};
+
+static const struct user_regset mips_regsets[] = {
+       [REGSET_GPR] = {
+               .core_note_type = NT_PRSTATUS,
+               .n              = ELF_NGREG,
+               .size           = sizeof(unsigned int),
+               .align          = sizeof(unsigned int),
+               .get            = gpr_get,
+               .set            = gpr_set,
+       },
+       [REGSET_FPR] = {
+               .core_note_type = NT_PRFPREG,
+               .n              = ELF_NFPREG,
+               .size           = sizeof(elf_fpreg_t),
+               .align          = sizeof(elf_fpreg_t),
+               .get            = fpr_get,
+               .set            = fpr_set,
+       },
+};
+
+static const struct user_regset_view user_mips_view = {
+       .name           = "mips",
+       .e_machine      = ELF_ARCH,
+       .ei_osabi       = ELF_OSABI,
+       .regsets        = mips_regsets,
+       .n              = ARRAY_SIZE(mips_regsets),
+};
+
+static const struct user_regset mips64_regsets[] = {
+       [REGSET_GPR] = {
+               .core_note_type = NT_PRSTATUS,
+               .n              = ELF_NGREG,
+               .size           = sizeof(unsigned long),
+               .align          = sizeof(unsigned long),
+               .get            = gpr_get,
+               .set            = gpr_set,
+       },
+       [REGSET_FPR] = {
+               .core_note_type = NT_PRFPREG,
+               .n              = ELF_NFPREG,
+               .size           = sizeof(elf_fpreg_t),
+               .align          = sizeof(elf_fpreg_t),
+               .get            = fpr_get,
+               .set            = fpr_set,
+       },
+};
+
+static const struct user_regset_view user_mips64_view = {
+       .name           = "mips",
+       .e_machine      = ELF_ARCH,
+       .ei_osabi       = ELF_OSABI,
+       .regsets        = mips64_regsets,
+       .n              = ARRAY_SIZE(mips_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+#ifdef CONFIG_32BIT
+       return &user_mips_view;
+#endif
+
+#ifdef CONFIG_MIPS32_O32
+               if (test_thread_flag(TIF_32BIT_REGS))
+                       return &user_mips_view;
+#endif
+
+       return &user_mips64_view;
+}
+
 long arch_ptrace(struct task_struct *child, long request,
                 unsigned long addr, unsigned long data)
 {
@@ -517,52 +652,27 @@ long arch_ptrace(struct task_struct *child, long request,
        return ret;
 }
 
-static inline int audit_arch(void)
-{
-       int arch = EM_MIPS;
-#ifdef CONFIG_64BIT
-       arch |=  __AUDIT_ARCH_64BIT;
-#endif
-#if defined(__LITTLE_ENDIAN)
-       arch |=  __AUDIT_ARCH_LE;
-#endif
-       return arch;
-}
-
 /*
  * Notification of system call entry/exit
  * - triggered by current->work.syscall_trace
  */
 asmlinkage void syscall_trace_enter(struct pt_regs *regs)
 {
+       long ret = 0;
        user_exit();
 
        /* do the secure computing check first */
        secure_computing_strict(regs->regs[2]);
 
-       if (!(current->ptrace & PT_PTRACED))
-               goto out;
-
-       if (!test_thread_flag(TIF_SYSCALL_TRACE))
-               goto out;
+       if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+           tracehook_report_syscall_entry(regs))
+               ret = -1;
 
-       /* The 0x80 provides a way for the tracing parent to distinguish
-          between a syscall stop and SIGTRAP delivery */
-       ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
-                                0x80 : 0));
-
-       /*
-        * this isn't the same as continuing with a signal, but it will do
-        * for normal use.  strace only continues with a signal if the
-        * stopping signal is not SIGTRAP.  -brl
-        */
-       if (current->exit_code) {
-               send_sig(current->exit_code, current, 1);
-               current->exit_code = 0;
-       }
+       if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+               trace_sys_enter(regs, regs->regs[2]);
 
-out:
-       audit_syscall_entry(audit_arch(), regs->regs[2],
+       audit_syscall_entry(__syscall_get_arch(),
+                           regs->regs[2],
                            regs->regs[4], regs->regs[5],
                            regs->regs[6], regs->regs[7]);
 }
@@ -582,26 +692,11 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
 
        audit_syscall_exit(regs);
 
-       if (!(current->ptrace & PT_PTRACED))
-               return;
-
-       if (!test_thread_flag(TIF_SYSCALL_TRACE))
-               return;
-
-       /* The 0x80 provides a way for the tracing parent to distinguish
-          between a syscall stop and SIGTRAP delivery */
-       ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
-                                0x80 : 0));
+       if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+               trace_sys_exit(regs, regs->regs[2]);
 
-       /*
-        * this isn't the same as continuing with a signal, but it will do
-        * for normal use.  strace only continues with a signal if the
-        * stopping signal is not SIGTRAP.  -brl
-        */
-       if (current->exit_code) {
-               send_sig(current->exit_code, current, 1);
-               current->exit_code = 0;
-       }
+       if (test_thread_flag(TIF_SYSCALL_TRACE))
+               tracehook_report_syscall_exit(regs, 0);
 
        user_enter();
 }
index 38af83f84c4af846ce2a6fb55994804b4d781bdc..20b7b040e76f1c4e8d9a019edae6014f3ee3fea5 100644 (file)
@@ -67,7 +67,7 @@ LEAF(resume)
 1:
 
 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
-       PTR_L   t8, __stack_chk_guard
+       PTR_LA  t8, __stack_chk_guard
        LONG_L  t9, TASK_STACK_CANARY(a1)
        LONG_S  t9, 0(t8)
 #endif
index 921238a6bd260d238cd2a01e177556ed105c770b..078de5eaca8fd96d8bcb720491fe3f56901dc03c 100644 (file)
@@ -69,7 +69,7 @@
 1:
 
 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
-       PTR_L   t8, __stack_chk_guard
+       PTR_LA  t8, __stack_chk_guard
        LONG_L  t9, TASK_STACK_CANARY(a1)
        LONG_S  t9, 0(t8)
 #endif
index e774bb1088b5f16970feaea1bddf6eaead240a01..e8e541b40d86be37a2e1fe9c693bbfe912581bfb 100644 (file)
@@ -40,17 +40,58 @@ NESTED(handle_sys, PT_SIZE, sp)
        sw      t1, PT_EPC(sp)
        beqz    t0, illegal_syscall
 
-       sll     t0, v0, 3
+       sll     t0, v0, 2
        la      t1, sys_call_table
        addu    t1, t0
        lw      t2, (t1)                # syscall routine
-       lw      t3, 4(t1)               # >= 0 if we need stack arguments
        beqz    t2, illegal_syscall
 
        sw      a3, PT_R26(sp)          # save a3 for syscall restarting
-       bgez    t3, stackargs
 
-stack_done:
+       /*
+        * More than four arguments.  Try to deal with it by copying the
+        * stack arguments from the user stack to the kernel stack.
+        * This Sucks (TM).
+        */
+       lw      t0, PT_R29(sp)          # get old user stack pointer
+
+       /*
+        * We intentionally keep the kernel stack a little below the top of
+        * userspace so we don't have to do a slower byte accurate check here.
+        */
+       lw      t5, TI_ADDR_LIMIT($28)
+       addu    t4, t0, 32
+       and     t5, t4
+       bltz    t5, bad_stack           # -> sp is bad
+
+       /*
+        * Ok, copy the args from the luser stack to the kernel stack.
+        * t3 is the precomputed number of instruction bytes needed to
+        * load or store arguments 6-8.
+        */
+
+       .set    push
+       .set    noreorder
+       .set    nomacro
+
+1:     lw      t5, 16(t0)              # argument #5 from usp
+4:     lw      t6, 20(t0)              # argument #6 from usp
+3:     lw      t7, 24(t0)              # argument #7 from usp
+2:     lw      t8, 28(t0)              # argument #8 from usp
+
+       sw      t5, 16(sp)              # argument #5 to ksp
+       sw      t6, 20(sp)              # argument #6 to ksp
+       sw      t7, 24(sp)              # argument #7 to ksp
+       sw      t8, 28(sp)              # argument #8 to ksp
+       .set    pop
+
+       .section __ex_table,"a"
+       PTR     1b,bad_stack
+       PTR     2b,bad_stack
+       PTR     3b,bad_stack
+       PTR     4b,bad_stack
+       .previous
+
        lw      t0, TI_FLAGS($28)       # syscall tracing enabled?
        li      t1, _TIF_WORK_SYSCALL_ENTRY
        and     t0, t1
@@ -101,66 +142,6 @@ syscall_trace_entry:
 
 /* ------------------------------------------------------------------------ */
 
-       /*
-        * More than four arguments.  Try to deal with it by copying the
-        * stack arguments from the user stack to the kernel stack.
-        * This Sucks (TM).
-        */
-stackargs:
-       lw      t0, PT_R29(sp)          # get old user stack pointer
-
-       /*
-        * We intentionally keep the kernel stack a little below the top of
-        * userspace so we don't have to do a slower byte accurate check here.
-        */
-       lw      t5, TI_ADDR_LIMIT($28)
-       addu    t4, t0, 32
-       and     t5, t4
-       bltz    t5, bad_stack           # -> sp is bad
-
-       /* Ok, copy the args from the luser stack to the kernel stack.
-        * t3 is the precomputed number of instruction bytes needed to
-        * load or store arguments 6-8.
-        */
-
-       la      t1, 5f                  # load up to 3 arguments
-       subu    t1, t3
-1:     lw      t5, 16(t0)              # argument #5 from usp
-       .set    push
-       .set    noreorder
-       .set    nomacro
-       jr      t1
-        addiu  t1, 6f - 5f
-
-2:     lw      t8, 28(t0)              # argument #8 from usp
-3:     lw      t7, 24(t0)              # argument #7 from usp
-4:     lw      t6, 20(t0)              # argument #6 from usp
-5:     jr      t1
-        sw     t5, 16(sp)              # argument #5 to ksp
-
-#ifdef CONFIG_CPU_MICROMIPS
-       sw      t8, 28(sp)              # argument #8 to ksp
-       nop
-       sw      t7, 24(sp)              # argument #7 to ksp
-       nop
-       sw      t6, 20(sp)              # argument #6 to ksp
-       nop
-#else
-       sw      t8, 28(sp)              # argument #8 to ksp
-       sw      t7, 24(sp)              # argument #7 to ksp
-       sw      t6, 20(sp)              # argument #6 to ksp
-#endif
-6:     j       stack_done              # go back
-        nop
-       .set    pop
-
-       .section __ex_table,"a"
-       PTR     1b,bad_stack
-       PTR     2b,bad_stack
-       PTR     3b,bad_stack
-       PTR     4b,bad_stack
-       .previous
-
        /*
         * The stackpointer for a call with more than 4 arguments is bad.
         * We probably should handle this case a bit more drastic.
@@ -187,7 +168,7 @@ illegal_syscall:
        subu    t0, a0, __NR_O32_Linux  # check syscall number
        sltiu   v0, t0, __NR_O32_Linux_syscalls + 1
        beqz    t0, einval              # do not recurse
-       sll     t1, t0, 3
+       sll     t1, t0, 2
        beqz    v0, einval
        lw      t2, sys_call_table(t1)          # syscall routine
 
@@ -218,260 +199,248 @@ einval: li      v0, -ENOSYS
        jr      ra
        END(sys_syscall)
 
-       .macro  fifty ptr, nargs, from=1, to=50
-       sys     \ptr            \nargs
-       .if     \to-\from
-       fifty   \ptr,\nargs,"(\from+1)",\to
-       .endif
-       .endm
-
-       .macro  mille ptr, nargs, from=1, to=20
-       fifty   \ptr,\nargs
-       .if     \to-\from
-       mille   \ptr,\nargs,"(\from+1)",\to
-       .endif
-       .endm
-
-       .macro  syscalltable
-       sys     sys_syscall             8       /* 4000 */
-       sys     sys_exit                1
-       sys     __sys_fork              0
-       sys     sys_read                3
-       sys     sys_write               3
-       sys     sys_open                3       /* 4005 */
-       sys     sys_close               1
-       sys     sys_waitpid             3
-       sys     sys_creat               2
-       sys     sys_link                2
-       sys     sys_unlink              1       /* 4010 */
-       sys     sys_execve              0
-       sys     sys_chdir               1
-       sys     sys_time                1
-       sys     sys_mknod               3
-       sys     sys_chmod               2       /* 4015 */
-       sys     sys_lchown              3
-       sys     sys_ni_syscall          0
-       sys     sys_ni_syscall          0       /* was sys_stat */
-       sys     sys_lseek               3
-       sys     sys_getpid              0       /* 4020 */
-       sys     sys_mount               5
-       sys     sys_oldumount           1
-       sys     sys_setuid              1
-       sys     sys_getuid              0
-       sys     sys_stime               1       /* 4025 */
-       sys     sys_ptrace              4
-       sys     sys_alarm               1
-       sys     sys_ni_syscall          0       /* was sys_fstat */
-       sys     sys_pause               0
-       sys     sys_utime               2       /* 4030 */
-       sys     sys_ni_syscall          0
-       sys     sys_ni_syscall          0
-       sys     sys_access              2
-       sys     sys_nice                1
-       sys     sys_ni_syscall          0       /* 4035 */
-       sys     sys_sync                0
-       sys     sys_kill                2
-       sys     sys_rename              2
-       sys     sys_mkdir               2
-       sys     sys_rmdir               1       /* 4040 */
-       sys     sys_dup                 1
-       sys     sysm_pipe               0
-       sys     sys_times               1
-       sys     sys_ni_syscall          0
-       sys     sys_brk                 1       /* 4045 */
-       sys     sys_setgid              1
-       sys     sys_getgid              0
-       sys     sys_ni_syscall          0       /* was signal(2) */
-       sys     sys_geteuid             0
-       sys     sys_getegid             0       /* 4050 */
-       sys     sys_acct                1
-       sys     sys_umount              2
-       sys     sys_ni_syscall          0
-       sys     sys_ioctl               3
-       sys     sys_fcntl               3       /* 4055 */
-       sys     sys_ni_syscall          2
-       sys     sys_setpgid             2
-       sys     sys_ni_syscall          0
-       sys     sys_olduname            1
-       sys     sys_umask               1       /* 4060 */
-       sys     sys_chroot              1
-       sys     sys_ustat               2
-       sys     sys_dup2                2
-       sys     sys_getppid             0
-       sys     sys_getpgrp             0       /* 4065 */
-       sys     sys_setsid              0
-       sys     sys_sigaction           3
-       sys     sys_sgetmask            0
-       sys     sys_ssetmask            1
-       sys     sys_setreuid            2       /* 4070 */
-       sys     sys_setregid            2
-       sys     sys_sigsuspend          0
-       sys     sys_sigpending          1
-       sys     sys_sethostname         2
-       sys     sys_setrlimit           2       /* 4075 */
-       sys     sys_getrlimit           2
-       sys     sys_getrusage           2
-       sys     sys_gettimeofday        2
-       sys     sys_settimeofday        2
-       sys     sys_getgroups           2       /* 4080 */
-       sys     sys_setgroups           2
-       sys     sys_ni_syscall          0       /* old_select */
-       sys     sys_symlink             2
-       sys     sys_ni_syscall          0       /* was sys_lstat */
-       sys     sys_readlink            3       /* 4085 */
-       sys     sys_uselib              1
-       sys     sys_swapon              2
-       sys     sys_reboot              3
-       sys     sys_old_readdir         3
-       sys     sys_mips_mmap           6       /* 4090 */
-       sys     sys_munmap              2
-       sys     sys_truncate            2
-       sys     sys_ftruncate           2
-       sys     sys_fchmod              2
-       sys     sys_fchown              3       /* 4095 */
-       sys     sys_getpriority         2
-       sys     sys_setpriority         3
-       sys     sys_ni_syscall          0
-       sys     sys_statfs              2
-       sys     sys_fstatfs             2       /* 4100 */
-       sys     sys_ni_syscall          0       /* was ioperm(2) */
-       sys     sys_socketcall          2
-       sys     sys_syslog              3
-       sys     sys_setitimer           3
-       sys     sys_getitimer           2       /* 4105 */
-       sys     sys_newstat             2
-       sys     sys_newlstat            2
-       sys     sys_newfstat            2
-       sys     sys_uname               1
-       sys     sys_ni_syscall          0       /* 4110 was iopl(2) */
-       sys     sys_vhangup             0
-       sys     sys_ni_syscall          0       /* was sys_idle() */
-       sys     sys_ni_syscall          0       /* was sys_vm86 */
-       sys     sys_wait4               4
-       sys     sys_swapoff             1       /* 4115 */
-       sys     sys_sysinfo             1
-       sys     sys_ipc                 6
-       sys     sys_fsync               1
-       sys     sys_sigreturn           0
-       sys     __sys_clone             6       /* 4120 */
-       sys     sys_setdomainname       2
-       sys     sys_newuname            1
-       sys     sys_ni_syscall          0       /* sys_modify_ldt */
-       sys     sys_adjtimex            1
-       sys     sys_mprotect            3       /* 4125 */
-       sys     sys_sigprocmask         3
-       sys     sys_ni_syscall          0       /* was create_module */
-       sys     sys_init_module         5
-       sys     sys_delete_module       1
-       sys     sys_ni_syscall          0       /* 4130 was get_kernel_syms */
-       sys     sys_quotactl            4
-       sys     sys_getpgid             1
-       sys     sys_fchdir              1
-       sys     sys_bdflush             2
-       sys     sys_sysfs               3       /* 4135 */
-       sys     sys_personality         1
-       sys     sys_ni_syscall          0       /* for afs_syscall */
-       sys     sys_setfsuid            1
-       sys     sys_setfsgid            1
-       sys     sys_llseek              5       /* 4140 */
-       sys     sys_getdents            3
-       sys     sys_select              5
-       sys     sys_flock               2
-       sys     sys_msync               3
-       sys     sys_readv               3       /* 4145 */
-       sys     sys_writev              3
-       sys     sys_cacheflush          3
-       sys     sys_cachectl            3
-       sys     sys_sysmips             4
-       sys     sys_ni_syscall          0       /* 4150 */
-       sys     sys_getsid              1
-       sys     sys_fdatasync           1
-       sys     sys_sysctl              1
-       sys     sys_mlock               2
-       sys     sys_munlock             2       /* 4155 */
-       sys     sys_mlockall            1
-       sys     sys_munlockall          0
-       sys     sys_sched_setparam      2
-       sys     sys_sched_getparam      2
-       sys     sys_sched_setscheduler  3       /* 4160 */
-       sys     sys_sched_getscheduler  1
-       sys     sys_sched_yield         0
-       sys     sys_sched_get_priority_max 1
-       sys     sys_sched_get_priority_min 1
-       sys     sys_sched_rr_get_interval 2     /* 4165 */
-       sys     sys_nanosleep,          2
-       sys     sys_mremap,             5
-       sys     sys_accept              3
-       sys     sys_bind                3
-       sys     sys_connect             3       /* 4170 */
-       sys     sys_getpeername         3
-       sys     sys_getsockname         3
-       sys     sys_getsockopt          5
-       sys     sys_listen              2
-       sys     sys_recv                4       /* 4175 */
-       sys     sys_recvfrom            6
-       sys     sys_recvmsg             3
-       sys     sys_send                4
-       sys     sys_sendmsg             3
-       sys     sys_sendto              6       /* 4180 */
-       sys     sys_setsockopt          5
-       sys     sys_shutdown            2
-       sys     sys_socket              3
-       sys     sys_socketpair          4
-       sys     sys_setresuid           3       /* 4185 */
-       sys     sys_getresuid           3
-       sys     sys_ni_syscall          0       /* was sys_query_module */
-       sys     sys_poll                3
-       sys     sys_ni_syscall          0       /* was nfsservctl */
-       sys     sys_setresgid           3       /* 4190 */
-       sys     sys_getresgid           3
-       sys     sys_prctl               5
-       sys     sys_rt_sigreturn        0
-       sys     sys_rt_sigaction        4
-       sys     sys_rt_sigprocmask      4       /* 4195 */
-       sys     sys_rt_sigpending       2
-       sys     sys_rt_sigtimedwait     4
-       sys     sys_rt_sigqueueinfo     3
-       sys     sys_rt_sigsuspend       0
-       sys     sys_pread64             6       /* 4200 */
-       sys     sys_pwrite64            6
-       sys     sys_chown               3
-       sys     sys_getcwd              2
-       sys     sys_capget              2
-       sys     sys_capset              2       /* 4205 */
-       sys     sys_sigaltstack         0
-       sys     sys_sendfile            4
-       sys     sys_ni_syscall          0
-       sys     sys_ni_syscall          0
-       sys     sys_mips_mmap2          6       /* 4210 */
-       sys     sys_truncate64          4
-       sys     sys_ftruncate64         4
-       sys     sys_stat64              2
-       sys     sys_lstat64             2
-       sys     sys_fstat64             2       /* 4215 */
-       sys     sys_pivot_root          2
-       sys     sys_mincore             3
-       sys     sys_madvise             3
-       sys     sys_getdents64          3
-       sys     sys_fcntl64             3       /* 4220 */
-       sys     sys_ni_syscall          0
-       sys     sys_gettid              0
-       sys     sys_readahead           5
-       sys     sys_setxattr            5
-       sys     sys_lsetxattr           5       /* 4225 */
-       sys     sys_fsetxattr           5
-       sys     sys_getxattr            4
-       sys     sys_lgetxattr           4
-       sys     sys_fgetxattr           4
-       sys     sys_listxattr           3       /* 4230 */
-       sys     sys_llistxattr          3
-       sys     sys_flistxattr          3
-       sys     sys_removexattr         2
-       sys     sys_lremovexattr        2
-       sys     sys_fremovexattr        2       /* 4235 */
-       sys     sys_tkill               2
-       sys     sys_sendfile64          5
-       sys     sys_futex               6
+       .align  2
+       .type   sys_call_table, @object
+EXPORT(sys_call_table)
+       PTR     sys_syscall                     /* 4000 */
+       PTR     sys_exit
+       PTR     __sys_fork
+       PTR     sys_read
+       PTR     sys_write
+       PTR     sys_open                        /* 4005 */
+       PTR     sys_close
+       PTR     sys_waitpid
+       PTR     sys_creat
+       PTR     sys_link
+       PTR     sys_unlink                      /* 4010 */
+       PTR     sys_execve
+       PTR     sys_chdir
+       PTR     sys_time
+       PTR     sys_mknod
+       PTR     sys_chmod                       /* 4015 */
+       PTR     sys_lchown
+       PTR     sys_ni_syscall
+       PTR     sys_ni_syscall                  /* was sys_stat */
+       PTR     sys_lseek
+       PTR     sys_getpid                      /* 4020 */
+       PTR     sys_mount
+       PTR     sys_oldumount
+       PTR     sys_setuid
+       PTR     sys_getuid
+       PTR     sys_stime                       /* 4025 */
+       PTR     sys_ptrace
+       PTR     sys_alarm
+       PTR     sys_ni_syscall                  /* was sys_fstat */
+       PTR     sys_pause
+       PTR     sys_utime                       /* 4030 */
+       PTR     sys_ni_syscall
+       PTR     sys_ni_syscall
+       PTR     sys_access
+       PTR     sys_nice
+       PTR     sys_ni_syscall                  /* 4035 */
+       PTR     sys_sync
+       PTR     sys_kill
+       PTR     sys_rename
+       PTR     sys_mkdir
+       PTR     sys_rmdir                       /* 4040 */
+       PTR     sys_dup
+       PTR     sysm_pipe
+       PTR     sys_times
+       PTR     sys_ni_syscall
+       PTR     sys_brk                         /* 4045 */
+       PTR     sys_setgid
+       PTR     sys_getgid
+       PTR     sys_ni_syscall                  /* was signal(2) */
+       PTR     sys_geteuid
+       PTR     sys_getegid                     /* 4050 */
+       PTR     sys_acct
+       PTR     sys_umount
+       PTR     sys_ni_syscall
+       PTR     sys_ioctl
+       PTR     sys_fcntl                       /* 4055 */
+       PTR     sys_ni_syscall
+       PTR     sys_setpgid
+       PTR     sys_ni_syscall
+       PTR     sys_olduname
+       PTR     sys_umask                       /* 4060 */
+       PTR     sys_chroot
+       PTR     sys_ustat
+       PTR     sys_dup2
+       PTR     sys_getppid
+       PTR     sys_getpgrp                     /* 4065 */
+       PTR     sys_setsid
+       PTR     sys_sigaction
+       PTR     sys_sgetmask
+       PTR     sys_ssetmask
+       PTR     sys_setreuid                    /* 4070 */
+       PTR     sys_setregid
+       PTR     sys_sigsuspend
+       PTR     sys_sigpending
+       PTR     sys_sethostname
+       PTR     sys_setrlimit                   /* 4075 */
+       PTR     sys_getrlimit
+       PTR     sys_getrusage
+       PTR     sys_gettimeofday
+       PTR     sys_settimeofday
+       PTR     sys_getgroups                   /* 4080 */
+       PTR     sys_setgroups
+       PTR     sys_ni_syscall                  /* old_select */
+       PTR     sys_symlink
+       PTR     sys_ni_syscall                  /* was sys_lstat */
+       PTR     sys_readlink                    /* 4085 */
+       PTR     sys_uselib
+       PTR     sys_swapon
+       PTR     sys_reboot
+       PTR     sys_old_readdir
+       PTR     sys_mips_mmap                   /* 4090 */
+       PTR     sys_munmap
+       PTR     sys_truncate
+       PTR     sys_ftruncate
+       PTR     sys_fchmod
+       PTR     sys_fchown                      /* 4095 */
+       PTR     sys_getpriority
+       PTR     sys_setpriority
+       PTR     sys_ni_syscall
+       PTR     sys_statfs
+       PTR     sys_fstatfs                     /* 4100 */
+       PTR     sys_ni_syscall                  /* was ioperm(2) */
+       PTR     sys_socketcall
+       PTR     sys_syslog
+       PTR     sys_setitimer
+       PTR     sys_getitimer                   /* 4105 */
+       PTR     sys_newstat
+       PTR     sys_newlstat
+       PTR     sys_newfstat
+       PTR     sys_uname
+       PTR     sys_ni_syscall                  /* 4110 was iopl(2) */
+       PTR     sys_vhangup
+       PTR     sys_ni_syscall                  /* was sys_idle() */
+       PTR     sys_ni_syscall                  /* was sys_vm86 */
+       PTR     sys_wait4
+       PTR     sys_swapoff                     /* 4115 */
+       PTR     sys_sysinfo
+       PTR     sys_ipc
+       PTR     sys_fsync
+       PTR     sys_sigreturn
+       PTR     __sys_clone                     /* 4120 */
+       PTR     sys_setdomainname
+       PTR     sys_newuname
+       PTR     sys_ni_syscall                  /* sys_modify_ldt */
+       PTR     sys_adjtimex
+       PTR     sys_mprotect                    /* 4125 */
+       PTR     sys_sigprocmask
+       PTR     sys_ni_syscall                  /* was create_module */
+       PTR     sys_init_module
+       PTR     sys_delete_module
+       PTR     sys_ni_syscall                  /* 4130 was get_kernel_syms */
+       PTR     sys_quotactl
+       PTR     sys_getpgid
+       PTR     sys_fchdir
+       PTR     sys_bdflush
+       PTR     sys_sysfs                       /* 4135 */
+       PTR     sys_personality
+       PTR     sys_ni_syscall                  /* for afs_syscall */
+       PTR     sys_setfsuid
+       PTR     sys_setfsgid
+       PTR     sys_llseek                      /* 4140 */
+       PTR     sys_getdents
+       PTR     sys_select
+       PTR     sys_flock
+       PTR     sys_msync
+       PTR     sys_readv                       /* 4145 */
+       PTR     sys_writev
+       PTR     sys_cacheflush
+       PTR     sys_cachectl
+       PTR     sys_sysmips
+       PTR     sys_ni_syscall                  /* 4150 */
+       PTR     sys_getsid
+       PTR     sys_fdatasync
+       PTR     sys_sysctl
+       PTR     sys_mlock
+       PTR     sys_munlock                     /* 4155 */
+       PTR     sys_mlockall
+       PTR     sys_munlockall
+       PTR     sys_sched_setparam
+       PTR     sys_sched_getparam
+       PTR     sys_sched_setscheduler          /* 4160 */
+       PTR     sys_sched_getscheduler
+       PTR     sys_sched_yield
+       PTR     sys_sched_get_priority_max
+       PTR     sys_sched_get_priority_min
+       PTR     sys_sched_rr_get_interval       /* 4165 */
+       PTR     sys_nanosleep
+       PTR     sys_mremap
+       PTR     sys_accept
+       PTR     sys_bind
+       PTR     sys_connect                     /* 4170 */
+       PTR     sys_getpeername
+       PTR     sys_getsockname
+       PTR     sys_getsockopt
+       PTR     sys_listen
+       PTR     sys_recv                        /* 4175 */
+       PTR     sys_recvfrom
+       PTR     sys_recvmsg
+       PTR     sys_send
+       PTR     sys_sendmsg
+       PTR     sys_sendto                      /* 4180 */
+       PTR     sys_setsockopt
+       PTR     sys_shutdown
+       PTR     sys_socket
+       PTR     sys_socketpair
+       PTR     sys_setresuid                   /* 4185 */
+       PTR     sys_getresuid
+       PTR     sys_ni_syscall                  /* was sys_query_module */
+       PTR     sys_poll
+       PTR     sys_ni_syscall                  /* was nfsservctl */
+       PTR     sys_setresgid                   /* 4190 */
+       PTR     sys_getresgid
+       PTR     sys_prctl
+       PTR     sys_rt_sigreturn
+       PTR     sys_rt_sigaction
+       PTR     sys_rt_sigprocmask              /* 4195 */
+       PTR     sys_rt_sigpending
+       PTR     sys_rt_sigtimedwait
+       PTR     sys_rt_sigqueueinfo
+       PTR     sys_rt_sigsuspend
+       PTR     sys_pread64                     /* 4200 */
+       PTR     sys_pwrite64
+       PTR     sys_chown
+       PTR     sys_getcwd
+       PTR     sys_capget
+       PTR     sys_capset                      /* 4205 */
+       PTR     sys_sigaltstack
+       PTR     sys_sendfile
+       PTR     sys_ni_syscall
+       PTR     sys_ni_syscall
+       PTR     sys_mips_mmap2                  /* 4210 */
+       PTR     sys_truncate64
+       PTR     sys_ftruncate64
+       PTR     sys_stat64
+       PTR     sys_lstat64
+       PTR     sys_fstat64                     /* 4215 */
+       PTR     sys_pivot_root
+       PTR     sys_mincore
+       PTR     sys_madvise
+       PTR     sys_getdents64
+       PTR     sys_fcntl64                     /* 4220 */
+       PTR     sys_ni_syscall
+       PTR     sys_gettid
+       PTR     sys_readahead
+       PTR     sys_setxattr
+       PTR     sys_lsetxattr                   /* 4225 */
+       PTR     sys_fsetxattr
+       PTR     sys_getxattr
+       PTR     sys_lgetxattr
+       PTR     sys_fgetxattr
+       PTR     sys_listxattr                   /* 4230 */
+       PTR     sys_llistxattr
+       PTR     sys_flistxattr
+       PTR     sys_removexattr
+       PTR     sys_lremovexattr
+       PTR     sys_fremovexattr                /* 4235 */
+       PTR     sys_tkill
+       PTR     sys_sendfile64
+       PTR     sys_futex
 #ifdef CONFIG_MIPS_MT_FPAFF
        /*
         * For FPU affinity scheduling on MIPS MT processors, we need to
@@ -480,132 +449,117 @@ einval: li      v0, -ENOSYS
         * these hooks for the 32-bit kernel - there is no MIPS64 MT processor
         * atm.
         */
-       sys     mipsmt_sys_sched_setaffinity    3
-       sys     mipsmt_sys_sched_getaffinity    3
+       PTR     mipsmt_sys_sched_setaffinity
+       PTR     mipsmt_sys_sched_getaffinity
 #else
-       sys     sys_sched_setaffinity   3
-       sys     sys_sched_getaffinity   3       /* 4240 */
+       PTR     sys_sched_setaffinity
+       PTR     sys_sched_getaffinity           /* 4240 */
 #endif /* CONFIG_MIPS_MT_FPAFF */
-       sys     sys_io_setup            2
-       sys     sys_io_destroy          1
-       sys     sys_io_getevents        5
-       sys     sys_io_submit           3
-       sys     sys_io_cancel           3       /* 4245 */
-       sys     sys_exit_group          1
-       sys     sys_lookup_dcookie      4
-       sys     sys_epoll_create        1
-       sys     sys_epoll_ctl           4
-       sys     sys_epoll_wait          4       /* 4250 */
-       sys     sys_remap_file_pages    5
-       sys     sys_set_tid_address     1
-       sys     sys_restart_syscall     0
-       sys     sys_fadvise64_64        7
-       sys     sys_statfs64            3       /* 4255 */
-       sys     sys_fstatfs64           2
-       sys     sys_timer_create        3
-       sys     sys_timer_settime       4
-       sys     sys_timer_gettime       2
-       sys     sys_timer_getoverrun    1       /* 4260 */
-       sys     sys_timer_delete        1
-       sys     sys_clock_settime       2
-       sys     sys_clock_gettime       2
-       sys     sys_clock_getres        2
-       sys     sys_clock_nanosleep     4       /* 4265 */
-       sys     sys_tgkill              3
-       sys     sys_utimes              2
-       sys     sys_mbind               4
-       sys     sys_ni_syscall          0       /* sys_get_mempolicy */
-       sys     sys_ni_syscall          0       /* 4270 sys_set_mempolicy */
-       sys     sys_mq_open             4
-       sys     sys_mq_unlink           1
-       sys     sys_mq_timedsend        5
-       sys     sys_mq_timedreceive     5
-       sys     sys_mq_notify           2       /* 4275 */
-       sys     sys_mq_getsetattr       3
-       sys     sys_ni_syscall          0       /* sys_vserver */
-       sys     sys_waitid              5
-       sys     sys_ni_syscall          0       /* available, was setaltroot */
-       sys     sys_add_key             5       /* 4280 */
-       sys     sys_request_key         4
-       sys     sys_keyctl              5
-       sys     sys_set_thread_area     1
-       sys     sys_inotify_init        0
-       sys     sys_inotify_add_watch   3       /* 4285 */
-       sys     sys_inotify_rm_watch    2
-       sys     sys_migrate_pages       4
-       sys     sys_openat              4
-       sys     sys_mkdirat             3
-       sys     sys_mknodat             4       /* 4290 */
-       sys     sys_fchownat            5
-       sys     sys_futimesat           3
-       sys     sys_fstatat64           4
-       sys     sys_unlinkat            3
-       sys     sys_renameat            4       /* 4295 */
-       sys     sys_linkat              5
-       sys     sys_symlinkat           3
-       sys     sys_readlinkat          4
-       sys     sys_fchmodat            3
-       sys     sys_faccessat           3       /* 4300 */
-       sys     sys_pselect6            6
-       sys     sys_ppoll               5
-       sys     sys_unshare             1
-       sys     sys_splice              6
-       sys     sys_sync_file_range     7       /* 4305 */
-       sys     sys_tee                 4
-       sys     sys_vmsplice            4
-       sys     sys_move_pages          6
-       sys     sys_set_robust_list     2
-       sys     sys_get_robust_list     3       /* 4310 */
-       sys     sys_kexec_load          4
-       sys     sys_getcpu              3
-       sys     sys_epoll_pwait         6
-       sys     sys_ioprio_set          3
-       sys     sys_ioprio_get          2       /* 4315 */
-       sys     sys_utimensat           4
-       sys     sys_signalfd            3
-       sys     sys_ni_syscall          0       /* was timerfd */
-       sys     sys_eventfd             1
-       sys     sys_fallocate           6       /* 4320 */
-       sys     sys_timerfd_create      2
-       sys     sys_timerfd_gettime     2
-       sys     sys_timerfd_settime     4
-       sys     sys_signalfd4           4
-       sys     sys_eventfd2            2       /* 4325 */
-       sys     sys_epoll_create1       1
-       sys     sys_dup3                3
-       sys     sys_pipe2               2
-       sys     sys_inotify_init1       1
-       sys     sys_preadv              6       /* 4330 */
-       sys     sys_pwritev             6
-       sys     sys_rt_tgsigqueueinfo   4
-       sys     sys_perf_event_open     5
-       sys     sys_accept4             4
-       sys     sys_recvmmsg            5       /* 4335 */
-       sys     sys_fanotify_init       2
-       sys     sys_fanotify_mark       6
-       sys     sys_prlimit64           4
-       sys     sys_name_to_handle_at   5
-       sys     sys_open_by_handle_at   3       /* 4340 */
-       sys     sys_clock_adjtime       2
-       sys     sys_syncfs              1
-       sys     sys_sendmmsg            4
-       sys     sys_setns               2
-       sys     sys_process_vm_readv    6       /* 4345 */
-       sys     sys_process_vm_writev   6
-       sys     sys_kcmp                5
-       sys     sys_finit_module        3
-       .endm
-
-       /* We pre-compute the number of _instruction_ bytes needed to
-          load or store the arguments 6-8. Negative values are ignored. */
-
-       .macro  sys function, nargs
-       PTR     \function
-       LONG    (\nargs << 2) - (5 << 2)
-       .endm
-
-       .align  3
-       .type   sys_call_table,@object
-EXPORT(sys_call_table)
-       syscalltable
-       .size   sys_call_table, . - sys_call_table
+       PTR     sys_io_setup
+       PTR     sys_io_destroy
+       PTR     sys_io_getevents
+       PTR     sys_io_submit
+       PTR     sys_io_cancel                   /* 4245 */
+       PTR     sys_exit_group
+       PTR     sys_lookup_dcookie
+       PTR     sys_epoll_create
+       PTR     sys_epoll_ctl
+       PTR     sys_epoll_wait                  /* 4250 */
+       PTR     sys_remap_file_pages
+       PTR     sys_set_tid_address
+       PTR     sys_restart_syscall
+       PTR     sys_fadvise64_64
+       PTR     sys_statfs64                    /* 4255 */
+       PTR     sys_fstatfs64
+       PTR     sys_timer_create
+       PTR     sys_timer_settime
+       PTR     sys_timer_gettime
+       PTR     sys_timer_getoverrun            /* 4260 */
+       PTR     sys_timer_delete
+       PTR     sys_clock_settime
+       PTR     sys_clock_gettime
+       PTR     sys_clock_getres
+       PTR     sys_clock_nanosleep             /* 4265 */
+       PTR     sys_tgkill
+       PTR     sys_utimes
+       PTR     sys_mbind
+       PTR     sys_ni_syscall                  /* sys_get_mempolicy */
+       PTR     sys_ni_syscall                  /* 4270 sys_set_mempolicy */
+       PTR     sys_mq_open
+       PTR     sys_mq_unlink
+       PTR     sys_mq_timedsend
+       PTR     sys_mq_timedreceive
+       PTR     sys_mq_notify                   /* 4275 */
+       PTR     sys_mq_getsetattr
+       PTR     sys_ni_syscall                  /* sys_vserver */
+       PTR     sys_waitid
+       PTR     sys_ni_syscall                  /* available, was setaltroot */
+       PTR     sys_add_key                     /* 4280 */
+       PTR     sys_request_key
+       PTR     sys_keyctl
+       PTR     sys_set_thread_area
+       PTR     sys_inotify_init
+       PTR     sys_inotify_add_watch           /* 4285 */
+       PTR     sys_inotify_rm_watch
+       PTR     sys_migrate_pages
+       PTR     sys_openat
+       PTR     sys_mkdirat
+       PTR     sys_mknodat                     /* 4290 */
+       PTR     sys_fchownat
+       PTR     sys_futimesat
+       PTR     sys_fstatat64
+       PTR     sys_unlinkat
+       PTR     sys_renameat                    /* 4295 */
+       PTR     sys_linkat
+       PTR     sys_symlinkat
+       PTR     sys_readlinkat
+       PTR     sys_fchmodat
+       PTR     sys_faccessat                   /* 4300 */
+       PTR     sys_pselect6
+       PTR     sys_ppoll
+       PTR     sys_unshare
+       PTR     sys_splice
+       PTR     sys_sync_file_range             /* 4305 */
+       PTR     sys_tee
+       PTR     sys_vmsplice
+       PTR     sys_move_pages
+       PTR     sys_set_robust_list
+       PTR     sys_get_robust_list             /* 4310 */
+       PTR     sys_kexec_load
+       PTR     sys_getcpu
+       PTR     sys_epoll_pwait
+       PTR     sys_ioprio_set
+       PTR     sys_ioprio_get                  /* 4315 */
+       PTR     sys_utimensat
+       PTR     sys_signalfd
+       PTR     sys_ni_syscall                  /* was timerfd */
+       PTR     sys_eventfd
+       PTR     sys_fallocate                   /* 4320 */
+       PTR     sys_timerfd_create
+       PTR     sys_timerfd_gettime
+       PTR     sys_timerfd_settime
+       PTR     sys_signalfd4
+       PTR     sys_eventfd2                    /* 4325 */
+       PTR     sys_epoll_create1
+       PTR     sys_dup3
+       PTR     sys_pipe2
+       PTR     sys_inotify_init1
+       PTR     sys_preadv                      /* 4330 */
+       PTR     sys_pwritev
+       PTR     sys_rt_tgsigqueueinfo
+       PTR     sys_perf_event_open
+       PTR     sys_accept4
+       PTR     sys_recvmmsg                    /* 4335 */
+       PTR     sys_fanotify_init
+       PTR     sys_fanotify_mark
+       PTR     sys_prlimit64
+       PTR     sys_name_to_handle_at
+       PTR     sys_open_by_handle_at           /* 4340 */
+       PTR     sys_clock_adjtime
+       PTR     sys_syncfs
+       PTR     sys_sendmmsg
+       PTR     sys_setns
+       PTR     sys_process_vm_readv            /* 4345 */
+       PTR     sys_process_vm_writev
+       PTR     sys_kcmp
+       PTR     sys_finit_module
index be6627ead619e72b35bea9f8d031af3b21a2b06a..57e3742fec59a19083eb3cb5f802b9e43a90b66b 100644 (file)
@@ -114,7 +114,8 @@ illegal_syscall:
        END(handle_sys64)
 
        .align  3
-sys_call_table:
+       .type   sys_call_table, @object
+EXPORT(sys_call_table)
        PTR     sys_read                        /* 5000 */
        PTR     sys_write
        PTR     sys_open
index cab150789c8d8412409506c99143a45f04717e80..2f48f5934399e3b48a14cd88f2bf84fba0d46894 100644 (file)
@@ -103,6 +103,7 @@ not_n32_scall:
 
        END(handle_sysn32)
 
+       .type   sysn32_call_table, @object
 EXPORT(sysn32_call_table)
        PTR     sys_read                        /* 6000 */
        PTR     sys_write
index 37605dc8eef7a9c72d5743ad3ad5d1f50088fdfb..f1acdb429f4fa1d89ee8db664f5fd25005c978a1 100644 (file)
@@ -53,7 +53,7 @@ NESTED(handle_sys, PT_SIZE, sp)
        sll     a3, a3, 0
 
        dsll    t0, v0, 3               # offset into table
-       ld      t2, (sys_call_table - (__NR_O32_Linux * 8))(t0)
+       ld      t2, (sys32_call_table - (__NR_O32_Linux * 8))(t0)
 
        sd      a3, PT_R26(sp)          # save a3 for syscall restarting
 
@@ -168,7 +168,7 @@ LEAF(sys32_syscall)
        beqz    t0, einval              # do not recurse
        dsll    t1, t0, 3
        beqz    v0, einval
-       ld      t2, sys_call_table(t1)          # syscall routine
+       ld      t2, sys32_call_table(t1)                # syscall routine
 
        move    a0, a1                  # shift argument registers
        move    a1, a2
@@ -190,8 +190,8 @@ einval: li  v0, -ENOSYS
        END(sys32_syscall)
 
        .align  3
-       .type   sys_call_table,@object
-sys_call_table:
+       .type   sys32_call_table,@object
+EXPORT(sys32_call_table)
        PTR     sys32_syscall                   /* 4000 */
        PTR     sys_exit
        PTR     __sys_fork
@@ -541,4 +541,4 @@ sys_call_table:
        PTR     compat_sys_process_vm_writev
        PTR     sys_kcmp
        PTR     sys_finit_module
-       .size   sys_call_table,.-sys_call_table
+       .size   sys32_call_table,.-sys32_call_table
index c538d6e01b7b744cb4af330a5de15b78963cfa7f..a842154d57dc466eaba000039b3fd5e436c6cb69 100644 (file)
@@ -300,12 +300,13 @@ static void __init bootmem_init(void)
        int i;
 
        /*
-        * Init any data related to initrd. It's a nop if INITRD is
-        * not selected. Once that done we can determine the low bound
-        * of usable memory.
+        * Sanity check any INITRD first. We don't take it into account
+        * for bootmem setup initially, rely on the end-of-kernel-code
+        * as our memory range starting point. Once bootmem is inited we
+        * will reserve the area used for the initrd.
         */
-       reserved_end = max(init_initrd(),
-                          (unsigned long) PFN_UP(__pa_symbol(&_end)));
+       init_initrd();
+       reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
 
        /*
         * max_low_pfn is not a number of pages. The number of pages
@@ -362,6 +363,14 @@ static void __init bootmem_init(void)
                max_low_pfn = PFN_DOWN(HIGHMEM_START);
        }
 
+#ifdef CONFIG_BLK_DEV_INITRD
+       /*
+        * mapstart should be after initrd_end
+        */
+       if (initrd_end)
+               mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
+#endif
+
        /*
         * Initialize the boot-time allocator with low memory only.
         */
index 126da74d4c5559faf40962b80bddef9a39721757..2362665ba4965f2b3ff272a34bb5aca6360f8897 100644 (file)
@@ -136,10 +136,10 @@ static void bmips_prepare_cpus(unsigned int max_cpus)
 {
        if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
                        "smp_ipi0", NULL))
-               panic("Can't request IPI0 interrupt\n");
+               panic("Can't request IPI0 interrupt");
        if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
                        "smp_ipi1", NULL))
-               panic("Can't request IPI1 interrupt\n");
+               panic("Can't request IPI1 interrupt");
 }
 
 /*
index 5c208ed8f8561b461e3b2a8b1f0e49a8a458f8d2..0a022ee33b2a15c5c9722e2e9aa5504358ba05f4 100644 (file)
@@ -150,7 +150,6 @@ asmlinkage void start_secondary(void)
 void __irq_entry smp_call_function_interrupt(void)
 {
        irq_enter();
-       generic_smp_call_function_single_interrupt();
        generic_smp_call_function_interrupt();
        irq_exit();
 }
index 524841f0280370600966d20f38b41d0eefdb4744..f9c8746be8d66d78b3ad1fc500d72674d486cdb9 100644 (file)
@@ -330,6 +330,7 @@ void show_regs(struct pt_regs *regs)
 void show_registers(struct pt_regs *regs)
 {
        const int field = 2 * sizeof(unsigned long);
+       mm_segment_t old_fs = get_fs();
 
        __show_regs(regs);
        print_modules();
@@ -344,9 +345,13 @@ void show_registers(struct pt_regs *regs)
                        printk("*HwTLS: %0*lx\n", field, tls);
        }
 
+       if (!user_mode(regs))
+               /* Necessary for getting the correct stack content */
+               set_fs(KERNEL_DS);
        show_stacktrace(current, regs);
        show_code((unsigned int __user *) regs->cp0_epc);
        printk("\n");
+       set_fs(old_fs);
 }
 
 static int regs_to_trapnr(struct pt_regs *regs)
@@ -366,7 +371,8 @@ void __noreturn die(const char *str, struct pt_regs *regs)
 
        oops_enter();
 
-       if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
+       if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs),
+                      SIGSEGV) == NOTIFY_STOP)
                sig = 0;
 
        console_verbose();
@@ -457,8 +463,8 @@ asmlinkage void do_be(struct pt_regs *regs)
        printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
               data ? "Data" : "Instruction",
               field, regs->cp0_epc, field, regs->regs[31]);
-       if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS)
-           == NOTIFY_STOP)
+       if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs),
+                      SIGBUS) == NOTIFY_STOP)
                goto out;
 
        die_if_kernel("Oops", regs);
@@ -727,8 +733,8 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
        siginfo_t info = {0};
 
        prev_state = exception_enter();
-       if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE)
-           == NOTIFY_STOP)
+       if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
+                      SIGFPE) == NOTIFY_STOP)
                goto out;
        die_if_kernel("FP exception in kernel code", regs);
 
@@ -798,7 +804,8 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
                return;
 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 
-       if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+       if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs),
+                      SIGTRAP) == NOTIFY_STOP)
                return;
 
        /*
@@ -892,12 +899,14 @@ asmlinkage void do_bp(struct pt_regs *regs)
         */
        switch (bcode) {
        case BRK_KPROBE_BP:
-               if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+               if (notify_die(DIE_BREAK, "debug", regs, bcode,
+                              regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
                        goto out;
                else
                        break;
        case BRK_KPROBE_SSTEPBP:
-               if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+               if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
+                              regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
                        goto out;
                else
                        break;
@@ -961,8 +970,8 @@ asmlinkage void do_ri(struct pt_regs *regs)
        int status = -1;
 
        prev_state = exception_enter();
-       if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL)
-           == NOTIFY_STOP)
+       if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
+                      SIGILL) == NOTIFY_STOP)
                goto out;
 
        die_if_kernel("Reserved instruction in kernel code", regs);
@@ -1488,10 +1497,14 @@ int register_nmi_notifier(struct notifier_block *nb)
 
 void __noreturn nmi_exception_handler(struct pt_regs *regs)
 {
+       char str[100];
+
        raw_notifier_call_chain(&nmi_chain, 0, regs);
        bust_spinlocks(1);
-       printk("NMI taken!!!!\n");
-       die("NMI", regs);
+       snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
+                smp_processor_id(), regs->cp0_epc);
+       regs->cp0_epc = read_c0_errorepc();
+       die(str, regs);
 }
 
 #define VECTORSPACING 0x100    /* for EI/VI mode */
@@ -1554,7 +1567,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
        unsigned char *b;
 
        BUG_ON(!cpu_has_veic && !cpu_has_vint);
-       BUG_ON((n < 0) && (n > 9));
 
        if (addr == NULL) {
                handler = (unsigned long) do_default_vi;
index eb3e186596304f480987400ae731f30726923564..85685e1cdb89479ac57de4e3614e69bf6b7cbb2c 100644 (file)
@@ -390,7 +390,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
                ret = of_irq_to_resource_table(eiu_node,
                                                ltq_eiu_irq, exin_avail);
                if (ret != exin_avail)
-                       panic("failed to load external irq resources\n");
+                       panic("failed to load external irq resources");
 
                if (request_mem_region(res.start, resource_size(&res),
                                                        res.name) < 0)
index c24924fe087da2cf05f7f1828d183c79e8f810ad..51804b10a0360317392ebdbc902e59b8e7f4343d 100644 (file)
@@ -128,7 +128,7 @@ static int pmu_enable(struct clk *clk)
        do {} while (--retry && (pmu_r32(PWDSR(clk->module)) & clk->bits));
 
        if (!retry)
-               panic("activating PMU module failed!\n");
+               panic("activating PMU module failed!");
 
        return 0;
 }
index bc6f96fcb529d53d16a04b09754a9e4395fd457f..62ffd20ea86909be81906691dd1531a35733c410 100644 (file)
@@ -346,14 +346,8 @@ static void r4k_blast_scache_setup(void)
 
 static inline void local_r4k___flush_cache_all(void * args)
 {
-#if defined(CONFIG_CPU_LOONGSON2)
-       r4k_blast_scache();
-       return;
-#endif
-       r4k_blast_dcache();
-       r4k_blast_icache();
-
        switch (current_cpu_type()) {
+       case CPU_LOONGSON2:
        case CPU_R4000SC:
        case CPU_R4000MC:
        case CPU_R4400SC:
@@ -361,7 +355,18 @@ static inline void local_r4k___flush_cache_all(void * args)
        case CPU_R10000:
        case CPU_R12000:
        case CPU_R14000:
+               /*
+                * These caches are inclusive caches, that is, if something
+                * is not cached in the S-cache, we know it also won't be
+                * in one of the primary caches.
+                */
                r4k_blast_scache();
+               break;
+
+       default:
+               r4k_blast_dcache();
+               r4k_blast_icache();
+               break;
        }
 }
 
@@ -572,8 +577,17 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
 
        if (end - start > icache_size)
                r4k_blast_icache();
-       else
-               protected_blast_icache_range(start, end);
+       else {
+               switch (boot_cpu_type()) {
+               case CPU_LOONGSON2:
+                       protected_blast_icache_range(start, end);
+                       break;
+
+               default:
+                       protected_loongson23_blast_icache_range(start, end);
+                       break;
+               }
+       }
 }
 
 static inline void local_r4k_flush_icache_range_ipi(void *args)
@@ -1109,15 +1123,14 @@ static void probe_pcache(void)
        case CPU_ALCHEMY:
                c->icache.flags |= MIPS_CACHE_IC_F_DC;
                break;
-       }
 
-#ifdef CONFIG_CPU_LOONGSON2
-       /*
-        * LOONGSON2 has 4 way icache, but when using indexed cache op,
-        * one op will act on all 4 ways
-        */
-       c->icache.ways = 1;
-#endif
+       case CPU_LOONGSON2:
+               /*
+                * LOONGSON2 has 4 way icache, but when using indexed cache op,
+                * one op will act on all 4 ways
+                */
+               c->icache.ways = 1;
+       }
 
        printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
               icache_size >> 10,
@@ -1193,7 +1206,6 @@ static int probe_scache(void)
        return 1;
 }
 
-#if defined(CONFIG_CPU_LOONGSON2)
 static void __init loongson2_sc_init(void)
 {
        struct cpuinfo_mips *c = &current_cpu_data;
@@ -1209,7 +1221,6 @@ static void __init loongson2_sc_init(void)
 
        c->options |= MIPS_CPU_INCLUSIVE_CACHES;
 }
-#endif
 
 extern int r5k_sc_init(void);
 extern int rm7k_sc_init(void);
@@ -1259,11 +1270,10 @@ static void setup_scache(void)
 #endif
                return;
 
-#if defined(CONFIG_CPU_LOONGSON2)
        case CPU_LOONGSON2:
                loongson2_sc_init();
                return;
-#endif
+
        case CPU_XLP:
                /* don't need to worry about L2, fully coherent */
                return;
index 5f8b955125801935f33370559476ca93f640c4df..2e9418562258754dacb511341b499965022760aa 100644 (file)
@@ -297,7 +297,6 @@ static void mips_dma_sync_single_for_cpu(struct device *dev,
 static void mips_dma_sync_single_for_device(struct device *dev,
        dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
 {
-       plat_extra_sync_for_device(dev);
        if (!plat_device_is_coherent(dev))
                __dma_sync(dma_addr_to_page(dev, dma_handle),
                           dma_handle & ~PAGE_MASK, size, direction);
@@ -327,7 +326,7 @@ static void mips_dma_sync_sg_for_device(struct device *dev,
 
 int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
-       return plat_dma_mapping_error(dev, dma_addr);
+       return 0;
 }
 
 int mips_dma_supported(struct device *dev, u64 mask)
@@ -340,7 +339,6 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 {
        BUG_ON(direction == DMA_NONE);
 
-       plat_extra_sync_for_device(dev);
        if (!plat_device_is_coherent(dev))
                __dma_sync_virtual(vaddr, size, direction);
 }
index 79bca3130bd15f51bccae23012fa9a821cadb653..30a494db99c2a0eb4d51aa64ca410de956801837 100644 (file)
 
 #define FASTPATH_SIZE  128
 
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
 LEAF(tlbmiss_handler_setup_pgd)
        .space          16 * 4
 END(tlbmiss_handler_setup_pgd)
 EXPORT(tlbmiss_handler_setup_pgd_end)
-#endif
 
 LEAF(handle_tlbm)
        .space          FASTPATH_SIZE * 4
index bb3a5f643e974a27a7d81246d4d7bb6b5f13a390..da3b0b9c9eae0a9800dfbbd20f6717709e72bfed 100644 (file)
@@ -52,21 +52,26 @@ extern void build_tlb_refill_handler(void);
 
 #endif /* CONFIG_MIPS_MT_SMTC */
 
-#if defined(CONFIG_CPU_LOONGSON2)
 /*
  * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
  * unfortrunately, itlb is not totally transparent to software.
  */
-#define FLUSH_ITLB write_c0_diag(4);
-
-#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC)  write_c0_diag(4); }
-
-#else
-
-#define FLUSH_ITLB
-#define FLUSH_ITLB_VM(vma)
+static inline void flush_itlb(void)
+{
+       switch (current_cpu_type()) {
+       case CPU_LOONGSON2:
+               write_c0_diag(4);
+               break;
+       default:
+               break;
+       }
+}
 
-#endif
+static inline void flush_itlb_vm(struct vm_area_struct *vma)
+{
+       if (vma->vm_flags & VM_EXEC)
+               flush_itlb();
+}
 
 void local_flush_tlb_all(void)
 {
@@ -93,7 +98,7 @@ void local_flush_tlb_all(void)
        }
        tlbw_use_hazard();
        write_c0_entryhi(old_ctx);
-       FLUSH_ITLB;
+       flush_itlb();
        EXIT_CRITICAL(flags);
 }
 EXPORT_SYMBOL(local_flush_tlb_all);
@@ -155,7 +160,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                } else {
                        drop_mmu_context(mm, cpu);
                }
-               FLUSH_ITLB;
+               flush_itlb();
                EXIT_CRITICAL(flags);
        }
 }
@@ -197,7 +202,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
        } else {
                local_flush_tlb_all();
        }
-       FLUSH_ITLB;
+       flush_itlb();
        EXIT_CRITICAL(flags);
 }
 
@@ -230,7 +235,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
 
        finish:
                write_c0_entryhi(oldpid);
-               FLUSH_ITLB_VM(vma);
+               flush_itlb_vm(vma);
                EXIT_CRITICAL(flags);
        }
 }
@@ -262,7 +267,7 @@ void local_flush_tlb_one(unsigned long page)
                tlbw_use_hazard();
        }
        write_c0_entryhi(oldpid);
-       FLUSH_ITLB;
+       flush_itlb();
        EXIT_CRITICAL(flags);
 }
 
@@ -335,7 +340,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
                        tlb_write_indexed();
        }
        tlbw_use_hazard();
-       FLUSH_ITLB_VM(vma);
+       flush_itlb_vm(vma);
        EXIT_CRITICAL(flags);
 }
 
index 9bb3a9363b0618df3e19a43fafc68eb91dff18ba..183f2b583e4dbc7798411c4fd8c6c3927bff607f 100644 (file)
@@ -340,10 +340,6 @@ static struct work_registers build_get_work_registers(u32 **p)
 {
        struct work_registers r;
 
-       int smp_processor_id_reg;
-       int smp_processor_id_sel;
-       int smp_processor_id_shift;
-
        if (scratch_reg >= 0) {
                /* Save in CPU local C0_KScratch? */
                UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
@@ -354,25 +350,9 @@ static struct work_registers build_get_work_registers(u32 **p)
        }
 
        if (num_possible_cpus() > 1) {
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-               smp_processor_id_shift = 51;
-               smp_processor_id_reg = 20; /* XContext */
-               smp_processor_id_sel = 0;
-#else
-# ifdef CONFIG_32BIT
-               smp_processor_id_shift = 25;
-               smp_processor_id_reg = 4; /* Context */
-               smp_processor_id_sel = 0;
-# endif
-# ifdef CONFIG_64BIT
-               smp_processor_id_shift = 26;
-               smp_processor_id_reg = 4; /* Context */
-               smp_processor_id_sel = 0;
-# endif
-#endif
                /* Get smp_processor_id */
-               UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel);
-               UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift);
+               UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG);
+               UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT);
 
                /* handler_reg_save index in K0 */
                UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
@@ -819,11 +799,11 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
        }
        /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
 
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
        if (pgd_reg != -1) {
                /* pgd is in pgd_reg */
                UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
        } else {
+#if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
                /*
                 * &pgd << 11 stored in CONTEXT [23..63].
                 */
@@ -835,30 +815,18 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
                /* 1 0  1 0 1  << 6  xkphys cached */
                uasm_i_ori(p, ptr, ptr, 0x540);
                uasm_i_drotr(p, ptr, ptr, 11);
-       }
 #elif defined(CONFIG_SMP)
-# ifdef         CONFIG_MIPS_MT_SMTC
-       /*
-        * SMTC uses TCBind value as "CPU" index
-        */
-       uasm_i_mfc0(p, ptr, C0_TCBIND);
-       uasm_i_dsrl_safe(p, ptr, ptr, 19);
-# else
-       /*
-        * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
-        * stored in CONTEXT.
-        */
-       uasm_i_dmfc0(p, ptr, C0_CONTEXT);
-       uasm_i_dsrl_safe(p, ptr, ptr, 23);
-# endif
-       UASM_i_LA_mostly(p, tmp, pgdc);
-       uasm_i_daddu(p, ptr, ptr, tmp);
-       uasm_i_dmfc0(p, tmp, C0_BADVADDR);
-       uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
+               UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG);
+               uasm_i_dsrl_safe(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
+               UASM_i_LA_mostly(p, tmp, pgdc);
+               uasm_i_daddu(p, ptr, ptr, tmp);
+               uasm_i_dmfc0(p, tmp, C0_BADVADDR);
+               uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
 #else
-       UASM_i_LA_mostly(p, ptr, pgdc);
-       uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
+               UASM_i_LA_mostly(p, ptr, pgdc);
+               uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
 #endif
+       }
 
        uasm_l_vmalloc_done(l, *p);
 
@@ -953,31 +921,25 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
 static void __maybe_unused
 build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
 {
-       long pgdc = (long)pgd_current;
+       if (pgd_reg != -1) {
+               /* pgd is in pgd_reg */
+               uasm_i_mfc0(p, ptr, c0_kscratch(), pgd_reg);
+               uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
+       } else {
+               long pgdc = (long)pgd_current;
 
-       /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
+               /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
 #ifdef CONFIG_SMP
-#ifdef CONFIG_MIPS_MT_SMTC
-       /*
-        * SMTC uses TCBind value as "CPU" index
-        */
-       uasm_i_mfc0(p, ptr, C0_TCBIND);
-       UASM_i_LA_mostly(p, tmp, pgdc);
-       uasm_i_srl(p, ptr, ptr, 19);
-#else
-       /*
-        * smp_processor_id() << 2 is stored in CONTEXT.
-        */
-       uasm_i_mfc0(p, ptr, C0_CONTEXT);
-       UASM_i_LA_mostly(p, tmp, pgdc);
-       uasm_i_srl(p, ptr, ptr, 23);
-#endif
-       uasm_i_addu(p, ptr, tmp, ptr);
+               uasm_i_mfc0(p, ptr, SMP_CPUID_REG);
+               UASM_i_LA_mostly(p, tmp, pgdc);
+               uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
+               uasm_i_addu(p, ptr, tmp, ptr);
 #else
-       UASM_i_LA_mostly(p, ptr, pgdc);
+               UASM_i_LA_mostly(p, ptr, pgdc);
 #endif
-       uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
-       uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
+               uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
+               uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
+       }
        uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
        uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
        uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
@@ -1349,95 +1311,100 @@ static void build_r4000_tlb_refill_handler(void)
         * need three, with the second nop'ed and the third being
         * unused.
         */
-       /* Loongson2 ebase is different than r4k, we have more space */
-#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
-       if ((p - tlb_handler) > 64)
-               panic("TLB refill handler space exceeded");
-#else
-       if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
-           || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
-               && uasm_insn_has_bdelay(relocs,
-                                       tlb_handler + MIPS64_REFILL_INSNS - 3)))
-               panic("TLB refill handler space exceeded");
-#endif
-
-       /*
-        * Now fold the handler in the TLB refill handler space.
-        */
-#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
-       f = final_handler;
-       /* Simplest case, just copy the handler. */
-       uasm_copy_handler(relocs, labels, tlb_handler, p, f);
-       final_len = p - tlb_handler;
-#else /* CONFIG_64BIT */
-       f = final_handler + MIPS64_REFILL_INSNS;
-       if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
-               /* Just copy the handler. */
-               uasm_copy_handler(relocs, labels, tlb_handler, p, f);
-               final_len = p - tlb_handler;
-       } else {
-#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
-               const enum label_id ls = label_tlb_huge_update;
-#else
-               const enum label_id ls = label_vmalloc;
-#endif
-               u32 *split;
-               int ov = 0;
-               int i;
-
-               for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
-                       ;
-               BUG_ON(i == ARRAY_SIZE(labels));
-               split = labels[i].addr;
-
-               /*
-                * See if we have overflown one way or the other.
-                */
-               if (split > tlb_handler + MIPS64_REFILL_INSNS ||
-                   split < p - MIPS64_REFILL_INSNS)
-                       ov = 1;
-
-               if (ov) {
+       switch (boot_cpu_type()) {
+       default:
+               if (sizeof(long) == 4) {
+       case CPU_LOONGSON2:
+               /* Loongson2 ebase is different than r4k, we have more space */
+                       if ((p - tlb_handler) > 64)
+                               panic("TLB refill handler space exceeded");
                        /*
-                        * Split two instructions before the end.  One
-                        * for the branch and one for the instruction
-                        * in the delay slot.
+                        * Now fold the handler in the TLB refill handler space.
                         */
-                       split = tlb_handler + MIPS64_REFILL_INSNS - 2;
-
+                       f = final_handler;
+                       /* Simplest case, just copy the handler. */
+                       uasm_copy_handler(relocs, labels, tlb_handler, p, f);
+                       final_len = p - tlb_handler;
+                       break;
+               } else {
+                       if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
+                           || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
+                               && uasm_insn_has_bdelay(relocs,
+                                                       tlb_handler + MIPS64_REFILL_INSNS - 3)))
+                               panic("TLB refill handler space exceeded");
                        /*
-                        * If the branch would fall in a delay slot,
-                        * we must back up an additional instruction
-                        * so that it is no longer in a delay slot.
+                        * Now fold the handler in the TLB refill handler space.
                         */
-                       if (uasm_insn_has_bdelay(relocs, split - 1))
-                               split--;
-               }
-               /* Copy first part of the handler. */
-               uasm_copy_handler(relocs, labels, tlb_handler, split, f);
-               f += split - tlb_handler;
-
-               if (ov) {
-                       /* Insert branch. */
-                       uasm_l_split(&l, final_handler);
-                       uasm_il_b(&f, &r, label_split);
-                       if (uasm_insn_has_bdelay(relocs, split))
-                               uasm_i_nop(&f);
-                       else {
-                               uasm_copy_handler(relocs, labels,
-                                                 split, split + 1, f);
-                               uasm_move_labels(labels, f, f + 1, -1);
-                               f++;
-                               split++;
+                       f = final_handler + MIPS64_REFILL_INSNS;
+                       if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
+                               /* Just copy the handler. */
+                               uasm_copy_handler(relocs, labels, tlb_handler, p, f);
+                               final_len = p - tlb_handler;
+                       } else {
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+                               const enum label_id ls = label_tlb_huge_update;
+#else
+                               const enum label_id ls = label_vmalloc;
+#endif
+                               u32 *split;
+                               int ov = 0;
+                               int i;
+
+                               for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
+                                       ;
+                               BUG_ON(i == ARRAY_SIZE(labels));
+                               split = labels[i].addr;
+
+                               /*
+                                * See if we have overflown one way or the other.
+                                */
+                               if (split > tlb_handler + MIPS64_REFILL_INSNS ||
+                                   split < p - MIPS64_REFILL_INSNS)
+                                       ov = 1;
+
+                               if (ov) {
+                                       /*
+                                        * Split two instructions before the end.  One
+                                        * for the branch and one for the instruction
+                                        * in the delay slot.
+                                        */
+                                       split = tlb_handler + MIPS64_REFILL_INSNS - 2;
+
+                                       /*
+                                        * If the branch would fall in a delay slot,
+                                        * we must back up an additional instruction
+                                        * so that it is no longer in a delay slot.
+                                        */
+                                       if (uasm_insn_has_bdelay(relocs, split - 1))
+                                               split--;
+                               }
+                               /* Copy first part of the handler. */
+                               uasm_copy_handler(relocs, labels, tlb_handler, split, f);
+                               f += split - tlb_handler;
+
+                               if (ov) {
+                                       /* Insert branch. */
+                                       uasm_l_split(&l, final_handler);
+                                       uasm_il_b(&f, &r, label_split);
+                                       if (uasm_insn_has_bdelay(relocs, split))
+                                               uasm_i_nop(&f);
+                                       else {
+                                               uasm_copy_handler(relocs, labels,
+                                                                 split, split + 1, f);
+                                               uasm_move_labels(labels, f, f + 1, -1);
+                                               f++;
+                                               split++;
+                                       }
+                               }
+
+                               /* Copy the rest of the handler. */
+                               uasm_copy_handler(relocs, labels, split, p, final_handler);
+                               final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
+                                           (p - split);
                        }
                }
-
-               /* Copy the rest of the handler. */
-               uasm_copy_handler(relocs, labels, split, p, final_handler);
-               final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
-                           (p - split);
+               break;
        }
-#endif /* CONFIG_64BIT */
 
        uasm_resolve_relocs(relocs, labels);
        pr_debug("Wrote TLB refill handler (%u instructions).\n",
@@ -1451,28 +1418,30 @@ static void build_r4000_tlb_refill_handler(void)
 extern u32 handle_tlbl[], handle_tlbl_end[];
 extern u32 handle_tlbs[], handle_tlbs_end[];
 extern u32 handle_tlbm[], handle_tlbm_end[];
-
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
 extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[];
 
-static void build_r4000_setup_pgd(void)
+static void build_setup_pgd(void)
 {
        const int a0 = 4;
-       const int a1 = 5;
+       const int __maybe_unused a1 = 5;
+       const int __maybe_unused a2 = 6;
        u32 *p = tlbmiss_handler_setup_pgd;
        const int tlbmiss_handler_setup_pgd_size =
                tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd;
-       struct uasm_label *l = labels;
-       struct uasm_reloc *r = relocs;
+#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
+       long pgdc = (long)pgd_current;
+#endif
 
        memset(tlbmiss_handler_setup_pgd, 0, tlbmiss_handler_setup_pgd_size *
                                        sizeof(tlbmiss_handler_setup_pgd[0]));
        memset(labels, 0, sizeof(labels));
        memset(relocs, 0, sizeof(relocs));
-
        pgd_reg = allocate_kscratch();
-
+#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
        if (pgd_reg == -1) {
+               struct uasm_label *l = labels;
+               struct uasm_reloc *r = relocs;
+
                /* PGD << 11 in c0_Context */
                /*
                 * If it is a ckseg0 address, convert to a physical
@@ -1494,6 +1463,26 @@ static void build_r4000_setup_pgd(void)
                uasm_i_jr(&p, 31);
                UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
        }
+#else
+#ifdef CONFIG_SMP
+       /* Save PGD to pgd_current[smp_processor_id()] */
+       UASM_i_CPUID_MFC0(&p, a1, SMP_CPUID_REG);
+       UASM_i_SRL_SAFE(&p, a1, a1, SMP_CPUID_PTRSHIFT);
+       UASM_i_LA_mostly(&p, a2, pgdc);
+       UASM_i_ADDU(&p, a2, a2, a1);
+       UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
+#else
+       UASM_i_LA_mostly(&p, a2, pgdc);
+       UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
+#endif /* SMP */
+       uasm_i_jr(&p, 31);
+
+       /* if pgd_reg is allocated, save PGD also to scratch register */
+       if (pgd_reg != -1)
+               UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
+       else
+               uasm_i_nop(&p);
+#endif
        if (p >= tlbmiss_handler_setup_pgd_end)
                panic("tlbmiss_handler_setup_pgd space exceeded");
 
@@ -1504,7 +1493,6 @@ static void build_r4000_setup_pgd(void)
        dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd,
                                        tlbmiss_handler_setup_pgd_size);
 }
-#endif
 
 static void
 iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
@@ -2197,10 +2185,8 @@ static void flush_tlb_handlers(void)
                           (unsigned long)handle_tlbs_end);
        local_flush_icache_range((unsigned long)handle_tlbm,
                           (unsigned long)handle_tlbm_end);
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
        local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
                           (unsigned long)tlbmiss_handler_setup_pgd_end);
-#endif
 }
 
 void build_tlb_refill_handler(void)
@@ -2232,6 +2218,7 @@ void build_tlb_refill_handler(void)
                if (!run_once) {
                        if (!cpu_has_local_ebase)
                                build_r3000_tlb_refill_handler();
+                       build_setup_pgd();
                        build_r3000_tlb_load_handler();
                        build_r3000_tlb_store_handler();
                        build_r3000_tlb_modify_handler();
@@ -2255,9 +2242,7 @@ void build_tlb_refill_handler(void)
        default:
                if (!run_once) {
                        scratch_reg = allocate_kscratch();
-#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
-                       build_r4000_setup_pgd();
-#endif
+                       build_setup_pgd();
                        build_r4000_tlb_load_handler();
                        build_r4000_tlb_store_handler();
                        build_r4000_tlb_modify_handler();
index c69da37346995e94a5e5d4a33dafce0585b84ed9..be4a1092fd534f23827d8f4c9503a0970141cb3e 100644 (file)
@@ -37,7 +37,6 @@
 #include <asm/irq_regs.h>
 #include <asm/mips-boards/malta.h>
 #include <asm/mips-boards/maltaint.h>
-#include <asm/mips-boards/piix4.h>
 #include <asm/gt64120.h>
 #include <asm/mips-boards/generic.h>
 #include <asm/mips-boards/msc01_pci.h>
index 6f8feb9efcff9e64174534233133b6d097d957fc..c0eded01fde96ef23676eabc28e72ceb286467ad 100644 (file)
@@ -245,7 +245,7 @@ static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
        return threadmode;
 
 unsupp:
-       panic("Unsupported CPU mask %lx\n",
+       panic("Unsupported CPU mask %lx",
                (unsigned long)cpumask_bits(wakeup_mask)[0]);
        return 0;
 }
index 07ada7f8441ead44606e732f179a9d2dc4d0e00e..df36e2327c54572cca76848547cbbbf030c554ac 100644 (file)
@@ -1,5 +1,6 @@
 #include <linux/init.h>
 #include <linux/pci.h>
+#include <asm/mips-boards/piix4.h>
 
 /* PCI interrupt pins */
 #define PCIA           1
@@ -53,7 +54,8 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
 static void malta_piix_func0_fixup(struct pci_dev *pdev)
 {
        unsigned char reg_val;
-       static int piixirqmap[16] = {  /* PIIX PIRQC[A:D] irq mappings */
+       /* PIIX PIRQC[A:D] irq mappings */
+       static int piixirqmap[PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MAX] = {
                0,  0,  0,  3,
                4,  5,  6,  7,
                0,  9, 10, 11,
@@ -63,11 +65,12 @@ static void malta_piix_func0_fixup(struct pci_dev *pdev)
 
        /* Interrogate PIIX4 to get PCI IRQ mapping */
        for (i = 0; i <= 3; i++) {
-               pci_read_config_byte(pdev, 0x60+i, &reg_val);
-               if (reg_val & 0x80)
+               pci_read_config_byte(pdev, PIIX4_FUNC0_PIRQRC+i, &reg_val);
+               if (reg_val & PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_DISABLE)
                        pci_irq[PCIA+i] = 0;    /* Disabled */
                else
-                       pci_irq[PCIA+i] = piixirqmap[reg_val & 15];
+                       pci_irq[PCIA+i] = piixirqmap[reg_val &
+                               PIIX4_FUNC0_PIRQRC_IRQ_ROUTING_MASK];
        }
 
        /* Done by YAMON 2.00 onwards */
@@ -76,8 +79,9 @@ static void malta_piix_func0_fixup(struct pci_dev *pdev)
                 * Set top of main memory accessible by ISA or DMA
                 * devices to 16 Mb.
                 */
-               pci_read_config_byte(pdev, 0x69, &reg_val);
-               pci_write_config_byte(pdev, 0x69, reg_val | 0xf0);
+               pci_read_config_byte(pdev, PIIX4_FUNC0_TOM, &reg_val);
+               pci_write_config_byte(pdev, PIIX4_FUNC0_TOM, reg_val |
+                               PIIX4_FUNC0_TOM_TOP_OF_MEMORY_MASK);
        }
 }
 
@@ -93,10 +97,14 @@ static void malta_piix_func1_fixup(struct pci_dev *pdev)
                /*
                 * IDE Decode enable.
                 */
-               pci_read_config_byte(pdev, 0x41, &reg_val);
-               pci_write_config_byte(pdev, 0x41, reg_val|0x80);
-               pci_read_config_byte(pdev, 0x43, &reg_val);
-               pci_write_config_byte(pdev, 0x43, reg_val|0x80);
+               pci_read_config_byte(pdev, PIIX4_FUNC1_IDETIM_PRIMARY_HI,
+                       &reg_val);
+               pci_write_config_byte(pdev, PIIX4_FUNC1_IDETIM_PRIMARY_HI,
+                       reg_val|PIIX4_FUNC1_IDETIM_PRIMARY_HI_IDE_DECODE_EN);
+               pci_read_config_byte(pdev, PIIX4_FUNC1_IDETIM_SECONDARY_HI,
+                       &reg_val);
+               pci_write_config_byte(pdev, PIIX4_FUNC1_IDETIM_SECONDARY_HI,
+                       reg_val|PIIX4_FUNC1_IDETIM_SECONDARY_HI_IDE_DECODE_EN);
        }
 }
 
@@ -108,10 +116,12 @@ static void quirk_dlcsetup(struct pci_dev *dev)
 {
        u8 odlc, ndlc;
 
-       (void) pci_read_config_byte(dev, 0x82, &odlc);
+       (void) pci_read_config_byte(dev, PIIX4_FUNC0_DLC, &odlc);
        /* Enable passive releases and delayed transaction */
-       ndlc = odlc | 7;
-       (void) pci_write_config_byte(dev, 0x82, ndlc);
+       ndlc = odlc | PIIX4_FUNC0_DLC_USBPR_EN |
+                     PIIX4_FUNC0_DLC_PASSIVE_RELEASE_EN |
+                     PIIX4_FUNC0_DLC_DELAYED_TRANSACTION_EN;
+       (void) pci_write_config_byte(dev, PIIX4_FUNC0_DLC, ndlc);
 }
 
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
index 18517dd0f7090987fe182df3d2a4dbe4e62842cf..d471a26dd5f891664fbed0bcc6d330f73027cffd 100644 (file)
@@ -363,9 +363,6 @@ static int ar71xx_pci_probe(struct platform_device *pdev)
        spin_lock_init(&apc->lock);
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
-       if (!res)
-               return -EINVAL;
-
        apc->cfg_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(apc->cfg_base))
                return PTR_ERR(apc->cfg_base);
index 65ec032fa0b442367c93b70cf8599fb5bd03fdd1..785b2659b519bce10085ee6bd42831937c616d4b 100644 (file)
@@ -362,25 +362,16 @@ static int ar724x_pci_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl_base");
-       if (!res)
-               return -EINVAL;
-
        apc->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(apc->ctrl_base))
                return PTR_ERR(apc->ctrl_base);
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
-       if (!res)
-               return -EINVAL;
-
        apc->devcfg_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(apc->devcfg_base))
                return PTR_ERR(apc->devcfg_base);
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crp_base");
-       if (!res)
-               return -EINVAL;
-
        apc->crp_base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(apc->crp_base))
                return PTR_ERR(apc->crp_base);
index 33e7aa52d9c4451ca352221921e27c134246a52d..1bf60b12737746d19cfec9c8b0512e6c419a2a5c 100644 (file)
@@ -120,51 +120,37 @@ static void pcibios_scanbus(struct pci_controller *hose)
 #ifdef CONFIG_OF
 void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
 {
-       const __be32 *ranges;
-       int rlen;
-       int pna = of_n_addr_cells(node);
-       int np = pna + 5;
+       struct of_pci_range range;
+       struct of_pci_range_parser parser;
 
        pr_info("PCI host bridge %s ranges:\n", node->full_name);
-       ranges = of_get_property(node, "ranges", &rlen);
-       if (ranges == NULL)
-               return;
        hose->of_node = node;
 
-       while ((rlen -= np * 4) >= 0) {
-               u32 pci_space;
+       if (of_pci_range_parser_init(&parser, node))
+               return;
+
+       for_each_of_pci_range(&parser, &range) {
                struct resource *res = NULL;
-               u64 addr, size;
-
-               pci_space = be32_to_cpup(&ranges[0]);
-               addr = of_translate_address(node, ranges + 3);
-               size = of_read_number(ranges + pna + 3, 2);
-               ranges += np;
-               switch ((pci_space >> 24) & 0x3) {
-               case 1:         /* PCI IO space */
+
+               switch (range.flags & IORESOURCE_TYPE_BITS) {
+               case IORESOURCE_IO:
                        pr_info("  IO 0x%016llx..0x%016llx\n",
-                                       addr, addr + size - 1);
+                               range.cpu_addr,
+                               range.cpu_addr + range.size - 1);
                        hose->io_map_base =
-                               (unsigned long)ioremap(addr, size);
+                               (unsigned long)ioremap(range.cpu_addr,
+                                                      range.size);
                        res = hose->io_resource;
-                       res->flags = IORESOURCE_IO;
                        break;
-               case 2:         /* PCI Memory space */
-               case 3:         /* PCI 64 bits Memory space */
+               case IORESOURCE_MEM:
                        pr_info(" MEM 0x%016llx..0x%016llx\n",
-                                       addr, addr + size - 1);
+                               range.cpu_addr,
+                               range.cpu_addr + range.size - 1);
                        res = hose->mem_resource;
-                       res->flags = IORESOURCE_MEM;
                        break;
                }
-               if (res != NULL) {
-                       res->start = addr;
-                       res->name = node->full_name;
-                       res->end = res->start + size - 1;
-                       res->parent = NULL;
-                       res->sibling = NULL;
-                       res->child = NULL;
-               }
+               if (res != NULL)
+                       of_pci_range_to_resource(&range, node, res);
        }
 }
 
diff --git a/arch/mips/powertv/Kconfig b/arch/mips/powertv/Kconfig
deleted file mode 100644 (file)
index dd91fba..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-config BOOTLOADER_FAMILY
-       string "POWERTV Bootloader Family string"
-       default "85"
-       depends on POWERTV
-       help
-         This value should be specified when the bootloader driver is disabled
-         and must be exactly two characters long. Families supported are:
-           R1 - RNG-100  R2 - RNG-200
-           A1 - Class A  B1 - Class B
-           E1 - Class E  F1 - Class F
-           44 - 45xx     46 - 46xx
-           85 - 85xx     86 - 86xx
diff --git a/arch/mips/powertv/Makefile b/arch/mips/powertv/Makefile
deleted file mode 100644 (file)
index 39ca9f8..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-# Carsten Langgaard, carstenl@mips.com
-# Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
-#
-# Carsten Langgaard, carstenl@mips.com
-# Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
-# Portions copyright (C)  2009 Cisco Systems, Inc.
-#
-# This program is free software; you can distribute it and/or modify it
-# under the terms of the GNU General Public License (Version 2) as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
-#
-# Makefile for the Cisco PowerTV-specific kernel interface routines
-# under Linux.
-#
-
-obj-y += init.o ioremap.o memory.o powertv_setup.o reset.o time.o \
-       asic/ pci/
-
-obj-$(CONFIG_USB) += powertv-usb.o
diff --git a/arch/mips/powertv/Platform b/arch/mips/powertv/Platform
deleted file mode 100644 (file)
index 4eb5af1..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Cisco PowerTV Platform
-#
-platform-$(CONFIG_POWERTV)     += powertv/
-cflags-$(CONFIG_POWERTV)       +=                                      \
-               -I$(srctree)/arch/mips/include/asm/mach-powertv
-load-$(CONFIG_POWERTV)         += 0xffffffff90800000
diff --git a/arch/mips/powertv/asic/Makefile b/arch/mips/powertv/asic/Makefile
deleted file mode 100644 (file)
index 35dcc53..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Copyright (C) 2009  Scientific-Atlanta, Inc.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
-#
-
-obj-y += asic-calliope.o asic-cronus.o asic-gaia.o asic-zeus.o \
-       asic_devices.o asic_int.o irq_asic.o prealloc-calliope.o \
-       prealloc-cronus.o prealloc-cronuslite.o prealloc-gaia.o prealloc-zeus.o
diff --git a/arch/mips/powertv/asic/asic-calliope.c b/arch/mips/powertv/asic/asic-calliope.c
deleted file mode 100644 (file)
index 2f539b4..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Locations of devices in the Calliope ASIC.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      Ken Eppinett
- *              David Schleef <ds@schleef.org>
- *
- * Description:         Defines the platform resources for the SA settop.
- */
-
-#include <linux/init.h>
-#include <asm/mach-powertv/asic.h>
-
-#define CALLIOPE_ADDR(x)       (CALLIOPE_IO_BASE + (x))
-
-const struct register_map calliope_register_map __initconst = {
-       .eic_slow0_strt_add = {.phys = CALLIOPE_ADDR(0x800000)},
-       .eic_cfg_bits = {.phys = CALLIOPE_ADDR(0x800038)},
-       .eic_ready_status = {.phys = CALLIOPE_ADDR(0x80004c)},
-
-       .chipver3 = {.phys = CALLIOPE_ADDR(0xA00800)},
-       .chipver2 = {.phys = CALLIOPE_ADDR(0xA00804)},
-       .chipver1 = {.phys = CALLIOPE_ADDR(0xA00808)},
-       .chipver0 = {.phys = CALLIOPE_ADDR(0xA0080c)},
-
-       /* The registers of IRBlaster */
-       .uart1_intstat = {.phys = CALLIOPE_ADDR(0xA01800)},
-       .uart1_inten = {.phys = CALLIOPE_ADDR(0xA01804)},
-       .uart1_config1 = {.phys = CALLIOPE_ADDR(0xA01808)},
-       .uart1_config2 = {.phys = CALLIOPE_ADDR(0xA0180C)},
-       .uart1_divisorhi = {.phys = CALLIOPE_ADDR(0xA01810)},
-       .uart1_divisorlo = {.phys = CALLIOPE_ADDR(0xA01814)},
-       .uart1_data = {.phys = CALLIOPE_ADDR(0xA01818)},
-       .uart1_status = {.phys = CALLIOPE_ADDR(0xA0181C)},
-
-       .int_stat_3 = {.phys = CALLIOPE_ADDR(0xA02800)},
-       .int_stat_2 = {.phys = CALLIOPE_ADDR(0xA02804)},
-       .int_stat_1 = {.phys = CALLIOPE_ADDR(0xA02808)},
-       .int_stat_0 = {.phys = CALLIOPE_ADDR(0xA0280c)},
-       .int_config = {.phys = CALLIOPE_ADDR(0xA02810)},
-       .int_int_scan = {.phys = CALLIOPE_ADDR(0xA02818)},
-       .ien_int_3 = {.phys = CALLIOPE_ADDR(0xA02830)},
-       .ien_int_2 = {.phys = CALLIOPE_ADDR(0xA02834)},
-       .ien_int_1 = {.phys = CALLIOPE_ADDR(0xA02838)},
-       .ien_int_0 = {.phys = CALLIOPE_ADDR(0xA0283c)},
-       .int_level_3_3 = {.phys = CALLIOPE_ADDR(0xA02880)},
-       .int_level_3_2 = {.phys = CALLIOPE_ADDR(0xA02884)},
-       .int_level_3_1 = {.phys = CALLIOPE_ADDR(0xA02888)},
-       .int_level_3_0 = {.phys = CALLIOPE_ADDR(0xA0288c)},
-       .int_level_2_3 = {.phys = CALLIOPE_ADDR(0xA02890)},
-       .int_level_2_2 = {.phys = CALLIOPE_ADDR(0xA02894)},
-       .int_level_2_1 = {.phys = CALLIOPE_ADDR(0xA02898)},
-       .int_level_2_0 = {.phys = CALLIOPE_ADDR(0xA0289c)},
-       .int_level_1_3 = {.phys = CALLIOPE_ADDR(0xA028a0)},
-       .int_level_1_2 = {.phys = CALLIOPE_ADDR(0xA028a4)},
-       .int_level_1_1 = {.phys = CALLIOPE_ADDR(0xA028a8)},
-       .int_level_1_0 = {.phys = CALLIOPE_ADDR(0xA028ac)},
-       .int_level_0_3 = {.phys = CALLIOPE_ADDR(0xA028b0)},
-       .int_level_0_2 = {.phys = CALLIOPE_ADDR(0xA028b4)},
-       .int_level_0_1 = {.phys = CALLIOPE_ADDR(0xA028b8)},
-       .int_level_0_0 = {.phys = CALLIOPE_ADDR(0xA028bc)},
-       .int_docsis_en = {.phys = CALLIOPE_ADDR(0xA028F4)},
-
-       .mips_pll_setup = {.phys = CALLIOPE_ADDR(0x980000)},
-       .fs432x4b4_usb_ctl = {.phys = CALLIOPE_ADDR(0x980030)},
-       .test_bus = {.phys = CALLIOPE_ADDR(0x9800CC)},
-       .crt_spare = {.phys = CALLIOPE_ADDR(0x9800d4)},
-       .usb2_ohci_int_mask = {.phys = CALLIOPE_ADDR(0x9A000c)},
-       .usb2_strap = {.phys = CALLIOPE_ADDR(0x9A0014)},
-       .ehci_hcapbase = {.phys = CALLIOPE_ADDR(0x9BFE00)},
-       .ohci_hc_revision = {.phys = CALLIOPE_ADDR(0x9BFC00)},
-       .bcm1_bs_lmi_steer = {.phys = CALLIOPE_ADDR(0x9E0004)},
-       .usb2_control = {.phys = CALLIOPE_ADDR(0x9E0054)},
-       .usb2_stbus_obc = {.phys = CALLIOPE_ADDR(0x9BFF00)},
-       .usb2_stbus_mess_size = {.phys = CALLIOPE_ADDR(0x9BFF04)},
-       .usb2_stbus_chunk_size = {.phys = CALLIOPE_ADDR(0x9BFF08)},
-
-       .pcie_regs = {.phys = 0x000000},        /* -doesn't exist- */
-       .tim_ch = {.phys = CALLIOPE_ADDR(0xA02C10)},
-       .tim_cl = {.phys = CALLIOPE_ADDR(0xA02C14)},
-       .gpio_dout = {.phys = CALLIOPE_ADDR(0xA02c20)},
-       .gpio_din = {.phys = CALLIOPE_ADDR(0xA02c24)},
-       .gpio_dir = {.phys = CALLIOPE_ADDR(0xA02c2C)},
-       .watchdog = {.phys = CALLIOPE_ADDR(0xA02c30)},
-       .front_panel = {.phys = 0x000000},      /* -not used- */
-};
diff --git a/arch/mips/powertv/asic/asic-cronus.c b/arch/mips/powertv/asic/asic-cronus.c
deleted file mode 100644 (file)
index 7f8f342..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Locations of devices in the Cronus ASIC
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      Ken Eppinett
- *              David Schleef <ds@schleef.org>
- *
- * Description:         Defines the platform resources for the SA settop.
- */
-
-#include <linux/init.h>
-#include <asm/mach-powertv/asic.h>
-
-#define CRONUS_ADDR(x) (CRONUS_IO_BASE + (x))
-
-const struct register_map cronus_register_map __initconst = {
-       .eic_slow0_strt_add = {.phys = CRONUS_ADDR(0x000000)},
-       .eic_cfg_bits = {.phys = CRONUS_ADDR(0x000038)},
-       .eic_ready_status = {.phys = CRONUS_ADDR(0x00004C)},
-
-       .chipver3 = {.phys = CRONUS_ADDR(0x2A0800)},
-       .chipver2 = {.phys = CRONUS_ADDR(0x2A0804)},
-       .chipver1 = {.phys = CRONUS_ADDR(0x2A0808)},
-       .chipver0 = {.phys = CRONUS_ADDR(0x2A080C)},
-
-       /* The registers of IRBlaster */
-       .uart1_intstat = {.phys = CRONUS_ADDR(0x2A1800)},
-       .uart1_inten = {.phys = CRONUS_ADDR(0x2A1804)},
-       .uart1_config1 = {.phys = CRONUS_ADDR(0x2A1808)},
-       .uart1_config2 = {.phys = CRONUS_ADDR(0x2A180C)},
-       .uart1_divisorhi = {.phys = CRONUS_ADDR(0x2A1810)},
-       .uart1_divisorlo = {.phys = CRONUS_ADDR(0x2A1814)},
-       .uart1_data = {.phys = CRONUS_ADDR(0x2A1818)},
-       .uart1_status = {.phys = CRONUS_ADDR(0x2A181C)},
-
-       .int_stat_3 = {.phys = CRONUS_ADDR(0x2A2800)},
-       .int_stat_2 = {.phys = CRONUS_ADDR(0x2A2804)},
-       .int_stat_1 = {.phys = CRONUS_ADDR(0x2A2808)},
-       .int_stat_0 = {.phys = CRONUS_ADDR(0x2A280C)},
-       .int_config = {.phys = CRONUS_ADDR(0x2A2810)},
-       .int_int_scan = {.phys = CRONUS_ADDR(0x2A2818)},
-       .ien_int_3 = {.phys = CRONUS_ADDR(0x2A2830)},
-       .ien_int_2 = {.phys = CRONUS_ADDR(0x2A2834)},
-       .ien_int_1 = {.phys = CRONUS_ADDR(0x2A2838)},
-       .ien_int_0 = {.phys = CRONUS_ADDR(0x2A283C)},
-       .int_level_3_3 = {.phys = CRONUS_ADDR(0x2A2880)},
-       .int_level_3_2 = {.phys = CRONUS_ADDR(0x2A2884)},
-       .int_level_3_1 = {.phys = CRONUS_ADDR(0x2A2888)},
-       .int_level_3_0 = {.phys = CRONUS_ADDR(0x2A288C)},
-       .int_level_2_3 = {.phys = CRONUS_ADDR(0x2A2890)},
-       .int_level_2_2 = {.phys = CRONUS_ADDR(0x2A2894)},
-       .int_level_2_1 = {.phys = CRONUS_ADDR(0x2A2898)},
-       .int_level_2_0 = {.phys = CRONUS_ADDR(0x2A289C)},
-       .int_level_1_3 = {.phys = CRONUS_ADDR(0x2A28A0)},
-       .int_level_1_2 = {.phys = CRONUS_ADDR(0x2A28A4)},
-       .int_level_1_1 = {.phys = CRONUS_ADDR(0x2A28A8)},
-       .int_level_1_0 = {.phys = CRONUS_ADDR(0x2A28AC)},
-       .int_level_0_3 = {.phys = CRONUS_ADDR(0x2A28B0)},
-       .int_level_0_2 = {.phys = CRONUS_ADDR(0x2A28B4)},
-       .int_level_0_1 = {.phys = CRONUS_ADDR(0x2A28B8)},
-       .int_level_0_0 = {.phys = CRONUS_ADDR(0x2A28BC)},
-       .int_docsis_en = {.phys = CRONUS_ADDR(0x2A28F4)},
-
-       .mips_pll_setup = {.phys = CRONUS_ADDR(0x1C0000)},
-       .fs432x4b4_usb_ctl = {.phys = CRONUS_ADDR(0x1C0028)},
-       .test_bus = {.phys = CRONUS_ADDR(0x1C00CC)},
-       .crt_spare = {.phys = CRONUS_ADDR(0x1c00d4)},
-       .usb2_ohci_int_mask = {.phys = CRONUS_ADDR(0x20000C)},
-       .usb2_strap = {.phys = CRONUS_ADDR(0x200014)},
-       .ehci_hcapbase = {.phys = CRONUS_ADDR(0x21FE00)},
-       .ohci_hc_revision = {.phys = CRONUS_ADDR(0x21fc00)},
-       .bcm1_bs_lmi_steer = {.phys = CRONUS_ADDR(0x2E0008)},
-       .usb2_control = {.phys = CRONUS_ADDR(0x2E004C)},
-       .usb2_stbus_obc = {.phys = CRONUS_ADDR(0x21FF00)},
-       .usb2_stbus_mess_size = {.phys = CRONUS_ADDR(0x21FF04)},
-       .usb2_stbus_chunk_size = {.phys = CRONUS_ADDR(0x21FF08)},
-
-       .pcie_regs = {.phys = CRONUS_ADDR(0x220000)},
-       .tim_ch = {.phys = CRONUS_ADDR(0x2A2C10)},
-       .tim_cl = {.phys = CRONUS_ADDR(0x2A2C14)},
-       .gpio_dout = {.phys = CRONUS_ADDR(0x2A2C20)},
-       .gpio_din = {.phys = CRONUS_ADDR(0x2A2C24)},
-       .gpio_dir = {.phys = CRONUS_ADDR(0x2A2C2C)},
-       .watchdog = {.phys = CRONUS_ADDR(0x2A2C30)},
-       .front_panel = {.phys = CRONUS_ADDR(0x2A3800)},
-};
diff --git a/arch/mips/powertv/asic/asic-gaia.c b/arch/mips/powertv/asic/asic-gaia.c
deleted file mode 100644 (file)
index 1265b49..0000000
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Locations of devices in the Gaia ASIC
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      David VomLehn
- */
-
-#include <linux/init.h>
-#include <asm/mach-powertv/asic.h>
-
-const struct register_map gaia_register_map __initconst = {
-       .eic_slow0_strt_add = {.phys = GAIA_IO_BASE + 0x000000},
-       .eic_cfg_bits = {.phys = GAIA_IO_BASE + 0x000038},
-       .eic_ready_status = {.phys = GAIA_IO_BASE + 0x00004C},
-
-       .chipver3 = {.phys = GAIA_IO_BASE + 0x2A0800},
-       .chipver2 = {.phys = GAIA_IO_BASE + 0x2A0804},
-       .chipver1 = {.phys = GAIA_IO_BASE + 0x2A0808},
-       .chipver0 = {.phys = GAIA_IO_BASE + 0x2A080C},
-
-       /* The registers of IRBlaster */
-       .uart1_intstat = {.phys = GAIA_IO_BASE + 0x2A1800},
-       .uart1_inten = {.phys = GAIA_IO_BASE + 0x2A1804},
-       .uart1_config1 = {.phys = GAIA_IO_BASE + 0x2A1808},
-       .uart1_config2 = {.phys = GAIA_IO_BASE + 0x2A180C},
-       .uart1_divisorhi = {.phys = GAIA_IO_BASE + 0x2A1810},
-       .uart1_divisorlo = {.phys = GAIA_IO_BASE + 0x2A1814},
-       .uart1_data = {.phys = GAIA_IO_BASE + 0x2A1818},
-       .uart1_status = {.phys = GAIA_IO_BASE + 0x2A181C},
-
-       .int_stat_3 = {.phys = GAIA_IO_BASE + 0x2A2800},
-       .int_stat_2 = {.phys = GAIA_IO_BASE + 0x2A2804},
-       .int_stat_1 = {.phys = GAIA_IO_BASE + 0x2A2808},
-       .int_stat_0 = {.phys = GAIA_IO_BASE + 0x2A280C},
-       .int_config = {.phys = GAIA_IO_BASE + 0x2A2810},
-       .int_int_scan = {.phys = GAIA_IO_BASE + 0x2A2818},
-       .ien_int_3 = {.phys = GAIA_IO_BASE + 0x2A2830},
-       .ien_int_2 = {.phys = GAIA_IO_BASE + 0x2A2834},
-       .ien_int_1 = {.phys = GAIA_IO_BASE + 0x2A2838},
-       .ien_int_0 = {.phys = GAIA_IO_BASE + 0x2A283C},
-       .int_level_3_3 = {.phys = GAIA_IO_BASE + 0x2A2880},
-       .int_level_3_2 = {.phys = GAIA_IO_BASE + 0x2A2884},
-       .int_level_3_1 = {.phys = GAIA_IO_BASE + 0x2A2888},
-       .int_level_3_0 = {.phys = GAIA_IO_BASE + 0x2A288C},
-       .int_level_2_3 = {.phys = GAIA_IO_BASE + 0x2A2890},
-       .int_level_2_2 = {.phys = GAIA_IO_BASE + 0x2A2894},
-       .int_level_2_1 = {.phys = GAIA_IO_BASE + 0x2A2898},
-       .int_level_2_0 = {.phys = GAIA_IO_BASE + 0x2A289C},
-       .int_level_1_3 = {.phys = GAIA_IO_BASE + 0x2A28A0},
-       .int_level_1_2 = {.phys = GAIA_IO_BASE + 0x2A28A4},
-       .int_level_1_1 = {.phys = GAIA_IO_BASE + 0x2A28A8},
-       .int_level_1_0 = {.phys = GAIA_IO_BASE + 0x2A28AC},
-       .int_level_0_3 = {.phys = GAIA_IO_BASE + 0x2A28B0},
-       .int_level_0_2 = {.phys = GAIA_IO_BASE + 0x2A28B4},
-       .int_level_0_1 = {.phys = GAIA_IO_BASE + 0x2A28B8},
-       .int_level_0_0 = {.phys = GAIA_IO_BASE + 0x2A28BC},
-       .int_docsis_en = {.phys = GAIA_IO_BASE + 0x2A28F4},
-
-       .mips_pll_setup = {.phys = GAIA_IO_BASE + 0x1C0000},
-       .fs432x4b4_usb_ctl = {.phys = GAIA_IO_BASE + 0x1C0024},
-       .test_bus = {.phys = GAIA_IO_BASE + 0x1C00CC},
-       .crt_spare = {.phys = GAIA_IO_BASE + 0x1c0108},
-       .usb2_ohci_int_mask = {.phys = GAIA_IO_BASE + 0x20000C},
-       .usb2_strap = {.phys = GAIA_IO_BASE + 0x200014},
-       .ehci_hcapbase = {.phys = GAIA_IO_BASE + 0x21FE00},
-       .ohci_hc_revision = {.phys = GAIA_IO_BASE + 0x21fc00},
-       .bcm1_bs_lmi_steer = {.phys = GAIA_IO_BASE + 0x2E0004},
-       .usb2_control = {.phys = GAIA_IO_BASE + 0x2E004C},
-       .usb2_stbus_obc = {.phys = GAIA_IO_BASE + 0x21FF00},
-       .usb2_stbus_mess_size = {.phys = GAIA_IO_BASE + 0x21FF04},
-       .usb2_stbus_chunk_size = {.phys = GAIA_IO_BASE + 0x21FF08},
-
-       .pcie_regs = {.phys = GAIA_IO_BASE + 0x220000},
-       .tim_ch = {.phys = GAIA_IO_BASE + 0x2A2C10},
-       .tim_cl = {.phys = GAIA_IO_BASE + 0x2A2C14},
-       .gpio_dout = {.phys = GAIA_IO_BASE + 0x2A2C20},
-       .gpio_din = {.phys = GAIA_IO_BASE + 0x2A2C24},
-       .gpio_dir = {.phys = GAIA_IO_BASE + 0x2A2C2C},
-       .watchdog = {.phys = GAIA_IO_BASE + 0x2A2C30},
-       .front_panel = {.phys = GAIA_IO_BASE + 0x2A3800},
-};
diff --git a/arch/mips/powertv/asic/asic-zeus.c b/arch/mips/powertv/asic/asic-zeus.c
deleted file mode 100644 (file)
index 14e7de1..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Locations of devices in the Zeus ASIC
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      Ken Eppinett
- *              David Schleef <ds@schleef.org>
- *
- * Description:         Defines the platform resources for the SA settop.
- */
-
-#include <linux/init.h>
-#include <asm/mach-powertv/asic.h>
-
-#define ZEUS_ADDR(x)   (ZEUS_IO_BASE + (x))
-
-const struct register_map zeus_register_map __initconst = {
-       .eic_slow0_strt_add = {.phys = ZEUS_ADDR(0x000000)},
-       .eic_cfg_bits = {.phys = ZEUS_ADDR(0x000038)},
-       .eic_ready_status = {.phys = ZEUS_ADDR(0x00004c)},
-
-       .chipver3 = {.phys = ZEUS_ADDR(0x280800)},
-       .chipver2 = {.phys = ZEUS_ADDR(0x280804)},
-       .chipver1 = {.phys = ZEUS_ADDR(0x280808)},
-       .chipver0 = {.phys = ZEUS_ADDR(0x28080c)},
-
-       /* The registers of IRBlaster */
-       .uart1_intstat = {.phys = ZEUS_ADDR(0x281800)},
-       .uart1_inten = {.phys = ZEUS_ADDR(0x281804)},
-       .uart1_config1 = {.phys = ZEUS_ADDR(0x281808)},
-       .uart1_config2 = {.phys = ZEUS_ADDR(0x28180C)},
-       .uart1_divisorhi = {.phys = ZEUS_ADDR(0x281810)},
-       .uart1_divisorlo = {.phys = ZEUS_ADDR(0x281814)},
-       .uart1_data = {.phys = ZEUS_ADDR(0x281818)},
-       .uart1_status = {.phys = ZEUS_ADDR(0x28181C)},
-
-       .int_stat_3 = {.phys = ZEUS_ADDR(0x282800)},
-       .int_stat_2 = {.phys = ZEUS_ADDR(0x282804)},
-       .int_stat_1 = {.phys = ZEUS_ADDR(0x282808)},
-       .int_stat_0 = {.phys = ZEUS_ADDR(0x28280c)},
-       .int_config = {.phys = ZEUS_ADDR(0x282810)},
-       .int_int_scan = {.phys = ZEUS_ADDR(0x282818)},
-       .ien_int_3 = {.phys = ZEUS_ADDR(0x282830)},
-       .ien_int_2 = {.phys = ZEUS_ADDR(0x282834)},
-       .ien_int_1 = {.phys = ZEUS_ADDR(0x282838)},
-       .ien_int_0 = {.phys = ZEUS_ADDR(0x28283c)},
-       .int_level_3_3 = {.phys = ZEUS_ADDR(0x282880)},
-       .int_level_3_2 = {.phys = ZEUS_ADDR(0x282884)},
-       .int_level_3_1 = {.phys = ZEUS_ADDR(0x282888)},
-       .int_level_3_0 = {.phys = ZEUS_ADDR(0x28288c)},
-       .int_level_2_3 = {.phys = ZEUS_ADDR(0x282890)},
-       .int_level_2_2 = {.phys = ZEUS_ADDR(0x282894)},
-       .int_level_2_1 = {.phys = ZEUS_ADDR(0x282898)},
-       .int_level_2_0 = {.phys = ZEUS_ADDR(0x28289c)},
-       .int_level_1_3 = {.phys = ZEUS_ADDR(0x2828a0)},
-       .int_level_1_2 = {.phys = ZEUS_ADDR(0x2828a4)},
-       .int_level_1_1 = {.phys = ZEUS_ADDR(0x2828a8)},
-       .int_level_1_0 = {.phys = ZEUS_ADDR(0x2828ac)},
-       .int_level_0_3 = {.phys = ZEUS_ADDR(0x2828b0)},
-       .int_level_0_2 = {.phys = ZEUS_ADDR(0x2828b4)},
-       .int_level_0_1 = {.phys = ZEUS_ADDR(0x2828b8)},
-       .int_level_0_0 = {.phys = ZEUS_ADDR(0x2828bc)},
-       .int_docsis_en = {.phys = ZEUS_ADDR(0x2828F4)},
-
-       .mips_pll_setup = {.phys = ZEUS_ADDR(0x1a0000)},
-       .fs432x4b4_usb_ctl = {.phys = ZEUS_ADDR(0x1a0018)},
-       .test_bus = {.phys = ZEUS_ADDR(0x1a0238)},
-       .crt_spare = {.phys = ZEUS_ADDR(0x1a0090)},
-       .usb2_ohci_int_mask = {.phys = ZEUS_ADDR(0x1e000c)},
-       .usb2_strap = {.phys = ZEUS_ADDR(0x1e0014)},
-       .ehci_hcapbase = {.phys = ZEUS_ADDR(0x1FFE00)},
-       .ohci_hc_revision = {.phys = ZEUS_ADDR(0x1FFC00)},
-       .bcm1_bs_lmi_steer = {.phys = ZEUS_ADDR(0x2C0008)},
-       .usb2_control = {.phys = ZEUS_ADDR(0x2c01a0)},
-       .usb2_stbus_obc = {.phys = ZEUS_ADDR(0x1FFF00)},
-       .usb2_stbus_mess_size = {.phys = ZEUS_ADDR(0x1FFF04)},
-       .usb2_stbus_chunk_size = {.phys = ZEUS_ADDR(0x1FFF08)},
-
-       .pcie_regs = {.phys = ZEUS_ADDR(0x200000)},
-       .tim_ch = {.phys = ZEUS_ADDR(0x282C10)},
-       .tim_cl = {.phys = ZEUS_ADDR(0x282C14)},
-       .gpio_dout = {.phys = ZEUS_ADDR(0x282c20)},
-       .gpio_din = {.phys = ZEUS_ADDR(0x282c24)},
-       .gpio_dir = {.phys = ZEUS_ADDR(0x282c2C)},
-       .watchdog = {.phys = ZEUS_ADDR(0x282c30)},
-       .front_panel = {.phys = ZEUS_ADDR(0x283800)},
-};
diff --git a/arch/mips/powertv/asic/asic_devices.c b/arch/mips/powertv/asic/asic_devices.c
deleted file mode 100644 (file)
index 8380605..0000000
+++ /dev/null
@@ -1,549 +0,0 @@
-/*
- *
- * Description:         Defines the platform resources for Gaia-based settops.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * NOTE: The bootloader allocates persistent memory at an address which is
- * 16 MiB below the end of the highest address in KSEG0. All fixed
- * address memory reservations must avoid this region.
- */
-
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/resource.h>
-#include <linux/serial_reg.h>
-#include <linux/io.h>
-#include <linux/bootmem.h>
-#include <linux/mm.h>
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <asm/page.h>
-#include <linux/swap.h>
-#include <linux/highmem.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/mach-powertv/asic.h>
-#include <asm/mach-powertv/asic_regs.h>
-#include <asm/mach-powertv/interrupts.h>
-
-#ifdef CONFIG_BOOTLOADER_DRIVER
-#include <asm/mach-powertv/kbldr.h>
-#endif
-#include <asm/bootinfo.h>
-
-#define BOOTLDRFAMILY(byte1, byte0) (((byte1) << 8) | (byte0))
-
-/*
- * Forward Prototypes
- */
-static void pmem_setup_resource(void);
-
-/*
- * Global Variables
- */
-enum asic_type asic;
-
-unsigned int platform_features;
-unsigned int platform_family;
-struct register_map _asic_register_map;
-EXPORT_SYMBOL(_asic_register_map);             /* Exported for testing */
-unsigned long asic_phy_base;
-unsigned long asic_base;
-EXPORT_SYMBOL(asic_base);                      /* Exported for testing */
-struct resource *gp_resources;
-
-/*
- * Don't recommend to use it directly, it is usually used by kernel internally.
- * Portable code should be using interfaces such as ioremp, dma_map_single, etc.
- */
-unsigned long phys_to_dma_offset;
-EXPORT_SYMBOL(phys_to_dma_offset);
-
-/*
- *
- * IO Resource Definition
- *
- */
-
-struct resource asic_resource = {
-       .name  = "ASIC Resource",
-       .start = 0,
-       .end   = ASIC_IO_SIZE,
-       .flags = IORESOURCE_MEM,
-};
-
-/*
- * Allow override of bootloader-specified model
- * Returns zero on success, a negative errno value on failure. This parameter
- * allows overriding of the bootloader-specified model.
- */
-static char __initdata cmdline[COMMAND_LINE_SIZE];
-
-#define FORCEFAMILY_PARAM      "forcefamily"
-
-/*
- * check_forcefamily - check for, and parse, forcefamily command line parameter
- * @forced_family:     Pointer to two-character array in which to store the
- *                     value of the forcedfamily parameter, if any.
- */
-static __init int check_forcefamily(unsigned char forced_family[2])
-{
-       const char *p;
-
-       forced_family[0] = '\0';
-       forced_family[1] = '\0';
-
-       /* Check the command line for a forcefamily directive */
-       strncpy(cmdline, arcs_cmdline, COMMAND_LINE_SIZE - 1);
-       p = strstr(cmdline, FORCEFAMILY_PARAM);
-       if (p && (p != cmdline) && (*(p - 1) != ' '))
-               p = strstr(p, " " FORCEFAMILY_PARAM "=");
-
-       if (p) {
-               p += strlen(FORCEFAMILY_PARAM "=");
-
-               if (*p == '\0' || *(p + 1) == '\0' ||
-                       (*(p + 2) != '\0' && *(p + 2) != ' '))
-                       pr_err(FORCEFAMILY_PARAM " must be exactly two "
-                               "characters long, ignoring value\n");
-
-               else {
-                       forced_family[0] = *p;
-                       forced_family[1] = *(p + 1);
-               }
-       }
-
-       return 0;
-}
-
-/*
- * platform_set_family - determine major platform family type.
- *
- * Returns family type; -1 if none
- * Returns the family type; -1 if none
- *
- */
-static __init noinline void platform_set_family(void)
-{
-       unsigned char forced_family[2];
-       unsigned short bootldr_family;
-
-       if (check_forcefamily(forced_family) == 0)
-               bootldr_family = BOOTLDRFAMILY(forced_family[0],
-                       forced_family[1]);
-       else
-               bootldr_family = (unsigned short) BOOTLDRFAMILY(
-                       CONFIG_BOOTLOADER_FAMILY[0],
-                       CONFIG_BOOTLOADER_FAMILY[1]);
-
-       pr_info("Bootloader Family = 0x%04X\n", bootldr_family);
-
-       switch (bootldr_family) {
-       case BOOTLDRFAMILY('R', '1'):
-               platform_family = FAMILY_1500;
-               break;
-       case BOOTLDRFAMILY('4', '4'):
-               platform_family = FAMILY_4500;
-               break;
-       case BOOTLDRFAMILY('4', '6'):
-               platform_family = FAMILY_4600;
-               break;
-       case BOOTLDRFAMILY('A', '1'):
-               platform_family = FAMILY_4600VZA;
-               break;
-       case BOOTLDRFAMILY('8', '5'):
-               platform_family = FAMILY_8500;
-               break;
-       case BOOTLDRFAMILY('R', '2'):
-               platform_family = FAMILY_8500RNG;
-               break;
-       case BOOTLDRFAMILY('8', '6'):
-               platform_family = FAMILY_8600;
-               break;
-       case BOOTLDRFAMILY('B', '1'):
-               platform_family = FAMILY_8600VZB;
-               break;
-       case BOOTLDRFAMILY('E', '1'):
-               platform_family = FAMILY_1500VZE;
-               break;
-       case BOOTLDRFAMILY('F', '1'):
-               platform_family = FAMILY_1500VZF;
-               break;
-       case BOOTLDRFAMILY('8', '7'):
-               platform_family = FAMILY_8700;
-               break;
-       default:
-               platform_family = -1;
-       }
-}
-
-unsigned int platform_get_family(void)
-{
-       return platform_family;
-}
-EXPORT_SYMBOL(platform_get_family);
-
-/*
- * platform_get_asic - determine the ASIC type.
- *
- * Returns the ASIC type, or ASIC_UNKNOWN if unknown
- *
- */
-enum asic_type platform_get_asic(void)
-{
-       return asic;
-}
-EXPORT_SYMBOL(platform_get_asic);
-
-/*
- * set_register_map - set ASIC register configuration
- * @phys_base: Physical address of the base of the ASIC registers
- * @map:       Description of key ASIC registers
- */
-static void __init set_register_map(unsigned long phys_base,
-       const struct register_map *map)
-{
-       asic_phy_base = phys_base;
-       _asic_register_map = *map;
-       register_map_virtualize(&_asic_register_map);
-       asic_base = (unsigned long)ioremap_nocache(phys_base, ASIC_IO_SIZE);
-}
-
-/**
- * configure_platform - configuration based on platform type.
- */
-void __init configure_platform(void)
-{
-       platform_set_family();
-
-       switch (platform_family) {
-       case FAMILY_1500:
-       case FAMILY_1500VZE:
-       case FAMILY_1500VZF:
-               platform_features = FFS_CAPABLE;
-               asic = ASIC_CALLIOPE;
-               set_register_map(CALLIOPE_IO_BASE, &calliope_register_map);
-
-               if (platform_family == FAMILY_1500VZE) {
-                       gp_resources = non_dvr_vze_calliope_resources;
-                       pr_info("Platform: 1500/Vz Class E - "
-                               "CALLIOPE, NON_DVR_CAPABLE\n");
-               } else if (platform_family == FAMILY_1500VZF) {
-                       gp_resources = non_dvr_vzf_calliope_resources;
-                       pr_info("Platform: 1500/Vz Class F - "
-                               "CALLIOPE, NON_DVR_CAPABLE\n");
-               } else {
-                       gp_resources = non_dvr_calliope_resources;
-                       pr_info("Platform: 1500/RNG100 - CALLIOPE, "
-                               "NON_DVR_CAPABLE\n");
-               }
-               break;
-
-       case FAMILY_4500:
-               platform_features = FFS_CAPABLE | PCIE_CAPABLE |
-                       DISPLAY_CAPABLE;
-               asic = ASIC_ZEUS;
-               set_register_map(ZEUS_IO_BASE, &zeus_register_map);
-               gp_resources = non_dvr_zeus_resources;
-
-               pr_info("Platform: 4500 - ZEUS, NON_DVR_CAPABLE\n");
-               break;
-
-       case FAMILY_4600:
-       {
-               unsigned int chipversion = 0;
-
-               /* The settop has PCIE but it isn't used, so don't advertise
-                * it*/
-               platform_features = FFS_CAPABLE | DISPLAY_CAPABLE;
-
-               /* Cronus and Cronus Lite have the same register map */
-               set_register_map(CRONUS_IO_BASE, &cronus_register_map);
-
-               /* ASIC version will determine if this is a real CronusLite or
-                * Castrati(Cronus) */
-               chipversion  = asic_read(chipver3) << 24;
-               chipversion |= asic_read(chipver2) << 16;
-               chipversion |= asic_read(chipver1) << 8;
-               chipversion |= asic_read(chipver0);
-
-               if ((chipversion == CRONUS_10) || (chipversion == CRONUS_11))
-                       asic = ASIC_CRONUS;
-               else
-                       asic = ASIC_CRONUSLITE;
-
-               gp_resources = non_dvr_cronuslite_resources;
-               pr_info("Platform: 4600 - %s, NON_DVR_CAPABLE, "
-                       "chipversion=0x%08X\n",
-                       (asic == ASIC_CRONUS) ? "CRONUS" : "CRONUS LITE",
-                       chipversion);
-               break;
-       }
-       case FAMILY_4600VZA:
-               platform_features = FFS_CAPABLE | DISPLAY_CAPABLE;
-               asic = ASIC_CRONUS;
-               set_register_map(CRONUS_IO_BASE, &cronus_register_map);
-               gp_resources = non_dvr_cronus_resources;
-
-               pr_info("Platform: Vz Class A - CRONUS, NON_DVR_CAPABLE\n");
-               break;
-
-       case FAMILY_8500:
-       case FAMILY_8500RNG:
-               platform_features = DVR_CAPABLE | PCIE_CAPABLE |
-                       DISPLAY_CAPABLE;
-               asic = ASIC_ZEUS;
-               set_register_map(ZEUS_IO_BASE, &zeus_register_map);
-               gp_resources = dvr_zeus_resources;
-
-               pr_info("Platform: 8500/RNG200 - ZEUS, DVR_CAPABLE\n");
-               break;
-
-       case FAMILY_8600:
-       case FAMILY_8600VZB:
-               platform_features = DVR_CAPABLE | PCIE_CAPABLE |
-                       DISPLAY_CAPABLE;
-               asic = ASIC_CRONUS;
-               set_register_map(CRONUS_IO_BASE, &cronus_register_map);
-               gp_resources = dvr_cronus_resources;
-
-               pr_info("Platform: 8600/Vz Class B - CRONUS, "
-                       "DVR_CAPABLE\n");
-               break;
-
-       case FAMILY_8700:
-               platform_features = FFS_CAPABLE | PCIE_CAPABLE;
-               asic = ASIC_GAIA;
-               set_register_map(GAIA_IO_BASE, &gaia_register_map);
-               gp_resources = dvr_gaia_resources;
-
-               pr_info("Platform: 8700 - GAIA, DVR_CAPABLE\n");
-               break;
-
-       default:
-               pr_crit("Platform:  UNKNOWN PLATFORM\n");
-               break;
-       }
-
-       switch (asic) {
-       case ASIC_ZEUS:
-               phys_to_dma_offset = 0x30000000;
-               break;
-       case ASIC_CALLIOPE:
-               phys_to_dma_offset = 0x10000000;
-               break;
-       case ASIC_CRONUSLITE:
-               /* Fall through */
-       case ASIC_CRONUS:
-               /*
-                * TODO: We suppose 0x10000000 aliases into 0x20000000-
-                * 0x2XXXXXXX. If 0x10000000 aliases into 0x60000000-
-                * 0x6XXXXXXX, the offset should be 0x50000000, not 0x10000000.
-                */
-               phys_to_dma_offset = 0x10000000;
-               break;
-       default:
-               phys_to_dma_offset = 0x00000000;
-               break;
-       }
-}
-
-/*
- * RESOURCE ALLOCATION
- *
- */
-/*
- * Allocates/reserves the Platform memory resources early in the boot process.
- * This ignores any resources that are designated IORESOURCE_IO
- */
-void __init platform_alloc_bootmem(void)
-{
-       int i;
-       int total = 0;
-
-       /* Get persistent memory data from command line before allocating
-        * resources. This need to happen before normal command line parsing
-        * has been done */
-       pmem_setup_resource();
-
-       /* Loop through looking for resources that want a particular address */
-       for (i = 0; gp_resources[i].flags != 0; i++) {
-               int size = resource_size(&gp_resources[i]);
-               if ((gp_resources[i].start != 0) &&
-                       ((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
-                       reserve_bootmem(dma_to_phys(gp_resources[i].start),
-                               size, 0);
-                       total += resource_size(&gp_resources[i]);
-                       pr_info("reserve resource %s at %08x (%u bytes)\n",
-                               gp_resources[i].name, gp_resources[i].start,
-                               resource_size(&gp_resources[i]));
-               }
-       }
-
-       /* Loop through assigning addresses for those that are left */
-       for (i = 0; gp_resources[i].flags != 0; i++) {
-               int size = resource_size(&gp_resources[i]);
-               if ((gp_resources[i].start == 0) &&
-                       ((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
-                       void *mem = alloc_bootmem_pages(size);
-
-                       if (mem == NULL)
-                               pr_err("Unable to allocate bootmem pages "
-                                       "for %s\n", gp_resources[i].name);
-
-                       else {
-                               gp_resources[i].start =
-                                       phys_to_dma(virt_to_phys(mem));
-                               gp_resources[i].end =
-                                       gp_resources[i].start + size - 1;
-                               total += size;
-                               pr_info("allocate resource %s at %08x "
-                                               "(%u bytes)\n",
-                                       gp_resources[i].name,
-                                       gp_resources[i].start, size);
-                       }
-               }
-       }
-
-       pr_info("Total Platform driver memory allocation: 0x%08x\n", total);
-
-       /* indicate resources that are platform I/O related */
-       for (i = 0; gp_resources[i].flags != 0; i++) {
-               if ((gp_resources[i].start != 0) &&
-                       ((gp_resources[i].flags & IORESOURCE_IO) != 0)) {
-                       pr_info("reserved platform resource %s at %08x\n",
-                               gp_resources[i].name, gp_resources[i].start);
-               }
-       }
-}
-
-/*
- *
- * PERSISTENT MEMORY (PMEM) CONFIGURATION
- *
- */
-static unsigned long pmemaddr __initdata;
-
-static int __init early_param_pmemaddr(char *p)
-{
-       pmemaddr = (unsigned long)simple_strtoul(p, NULL, 0);
-       return 0;
-}
-early_param("pmemaddr", early_param_pmemaddr);
-
-static long pmemlen __initdata;
-
-static int __init early_param_pmemlen(char *p)
-{
-/* TODO: we can use this code when and if the bootloader ever changes this */
-#if 0
-       pmemlen = (unsigned long)simple_strtoul(p, NULL, 0);
-#else
-       pmemlen = 0x20000;
-#endif
-       return 0;
-}
-early_param("pmemlen", early_param_pmemlen);
-
-/*
- * Set up persistent memory. If we were given values, we patch the array of
- * resources. Otherwise, persistent memory may be allocated anywhere at all.
- */
-static void __init pmem_setup_resource(void)
-{
-       struct resource *resource;
-       resource = asic_resource_get("DiagPersistentMemory");
-
-       if (resource && pmemaddr && pmemlen) {
-               /* The address provided by bootloader is in kseg0. Convert to
-                * a bus address. */
-               resource->start = phys_to_dma(pmemaddr - 0x80000000);
-               resource->end = resource->start + pmemlen - 1;
-
-               pr_info("persistent memory: start=0x%x  end=0x%x\n",
-                       resource->start, resource->end);
-       }
-}
-
-/*
- *
- * RESOURCE ACCESS FUNCTIONS
- *
- */
-
-/**
- * asic_resource_get - retrieves parameters for a platform resource.
- * @name:      string to match resource
- *
- * Returns a pointer to a struct resource corresponding to the given name.
- *
- * CANNOT BE NAMED platform_resource_get, which would be the obvious choice,
- * as this function name is already declared
- */
-struct resource *asic_resource_get(const char *name)
-{
-       int i;
-
-       for (i = 0; gp_resources[i].flags != 0; i++) {
-               if (strcmp(gp_resources[i].name, name) == 0)
-                       return &gp_resources[i];
-       }
-
-       return NULL;
-}
-EXPORT_SYMBOL(asic_resource_get);
-
-/**
- * platform_release_memory - release pre-allocated memory
- * @ptr:       pointer to memory to release
- * @size:      size of resource
- *
- * This must only be called for memory allocated or reserved via the boot
- * memory allocator.
- */
-void platform_release_memory(void *ptr, int size)
-{
-       free_reserved_area(ptr, ptr + size, -1, NULL);
-}
-EXPORT_SYMBOL(platform_release_memory);
-
-/*
- *
- * FEATURE AVAILABILITY FUNCTIONS
- *
- */
-int platform_supports_dvr(void)
-{
-       return (platform_features & DVR_CAPABLE) != 0;
-}
-
-int platform_supports_ffs(void)
-{
-       return (platform_features & FFS_CAPABLE) != 0;
-}
-
-int platform_supports_pcie(void)
-{
-       return (platform_features & PCIE_CAPABLE) != 0;
-}
-
-int platform_supports_display(void)
-{
-       return (platform_features & DISPLAY_CAPABLE) != 0;
-}
diff --git a/arch/mips/powertv/asic/asic_int.c b/arch/mips/powertv/asic/asic_int.c
deleted file mode 100644 (file)
index f44cd92..0000000
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000, 2001, 2004 MIPS Technologies, Inc.
- * Copyright (C) 2001 Ralf Baechle
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Routines for generic manipulation of the interrupts found on the PowerTV
- * platform.
- *
- * The interrupt controller is located in the South Bridge a PIIX4 device
- * with two internal 82C95 interrupt controllers.
- */
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/kernel.h>
-#include <linux/random.h>
-
-#include <asm/irq_cpu.h>
-#include <linux/io.h>
-#include <asm/irq_regs.h>
-#include <asm/setup.h>
-#include <asm/mips-boards/generic.h>
-
-#include <asm/mach-powertv/asic_regs.h>
-
-static DEFINE_RAW_SPINLOCK(asic_irq_lock);
-
-static inline int get_int(void)
-{
-       unsigned long flags;
-       int irq;
-
-       raw_spin_lock_irqsave(&asic_irq_lock, flags);
-
-       irq = (asic_read(int_int_scan) >> 4) - 1;
-
-       if (irq == 0 || irq >= NR_IRQS)
-               irq = -1;
-
-       raw_spin_unlock_irqrestore(&asic_irq_lock, flags);
-
-       return irq;
-}
-
-static void asic_irqdispatch(void)
-{
-       int irq;
-
-       irq = get_int();
-       if (irq < 0)
-               return;  /* interrupt has already been cleared */
-
-       do_IRQ(irq);
-}
-
-static inline int clz(unsigned long x)
-{
-       __asm__(
-       "       .set    push                                    \n"
-       "       .set    mips32                                  \n"
-       "       clz     %0, %1                                  \n"
-       "       .set    pop                                     \n"
-       : "=r" (x)
-       : "r" (x));
-
-       return x;
-}
-
-/*
- * Version of ffs that only looks at bits 12..15.
- */
-static inline unsigned int irq_ffs(unsigned int pending)
-{
-       return fls(pending) - 1 + CAUSEB_IP;
-}
-
-/*
- * TODO: check how it works under EIC mode.
- */
-asmlinkage void plat_irq_dispatch(void)
-{
-       unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
-       int irq;
-
-       irq = irq_ffs(pending);
-
-       if (irq == CAUSEF_IP3)
-               asic_irqdispatch();
-       else if (irq >= 0)
-               do_IRQ(irq);
-       else
-               spurious_interrupt();
-}
-
-void __init arch_init_irq(void)
-{
-       int i;
-
-       asic_irq_init();
-
-       /*
-        * Initialize interrupt exception vectors.
-        */
-       if (cpu_has_veic || cpu_has_vint) {
-               int nvec = cpu_has_veic ? 64 : 8;
-               for (i = 0; i < nvec; i++)
-                       set_vi_handler(i, asic_irqdispatch);
-       }
-}
diff --git a/arch/mips/powertv/asic/irq_asic.c b/arch/mips/powertv/asic/irq_asic.c
deleted file mode 100644 (file)
index 9344902..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Portions copyright (C) 2005-2009 Scientific Atlanta
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- * Modified from arch/mips/kernel/irq-rm7000.c:
- * Copyright (C) 2003 Ralf Baechle
- *
- * This program is free software; you can redistribute it and/or modify it
- * under  the terms of the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/irq.h>
-
-#include <asm/irq_cpu.h>
-#include <asm/mipsregs.h>
-
-#include <asm/mach-powertv/asic_regs.h>
-
-static inline void unmask_asic_irq(struct irq_data *d)
-{
-       unsigned long enable_bit;
-       unsigned int irq = d->irq;
-
-       enable_bit = (1 << (irq & 0x1f));
-
-       switch (irq >> 5) {
-       case 0:
-               asic_write(asic_read(ien_int_0) | enable_bit, ien_int_0);
-               break;
-       case 1:
-               asic_write(asic_read(ien_int_1) | enable_bit, ien_int_1);
-               break;
-       case 2:
-               asic_write(asic_read(ien_int_2) | enable_bit, ien_int_2);
-               break;
-       case 3:
-               asic_write(asic_read(ien_int_3) | enable_bit, ien_int_3);
-               break;
-       default:
-               BUG();
-       }
-}
-
-static inline void mask_asic_irq(struct irq_data *d)
-{
-       unsigned long disable_mask;
-       unsigned int irq = d->irq;
-
-       disable_mask = ~(1 << (irq & 0x1f));
-
-       switch (irq >> 5) {
-       case 0:
-               asic_write(asic_read(ien_int_0) & disable_mask, ien_int_0);
-               break;
-       case 1:
-               asic_write(asic_read(ien_int_1) & disable_mask, ien_int_1);
-               break;
-       case 2:
-               asic_write(asic_read(ien_int_2) & disable_mask, ien_int_2);
-               break;
-       case 3:
-               asic_write(asic_read(ien_int_3) & disable_mask, ien_int_3);
-               break;
-       default:
-               BUG();
-       }
-}
-
-static struct irq_chip asic_irq_chip = {
-       .name = "ASIC Level",
-       .irq_mask = mask_asic_irq,
-       .irq_unmask = unmask_asic_irq,
-};
-
-void __init asic_irq_init(void)
-{
-       int i;
-
-       /* set priority to 0 */
-       write_c0_status(read_c0_status() & ~(0x0000fc00));
-
-       asic_write(0, ien_int_0);
-       asic_write(0, ien_int_1);
-       asic_write(0, ien_int_2);
-       asic_write(0, ien_int_3);
-
-       asic_write(0x0fffffff, int_level_3_3);
-       asic_write(0xffffffff, int_level_3_2);
-       asic_write(0xffffffff, int_level_3_1);
-       asic_write(0xffffffff, int_level_3_0);
-       asic_write(0xffffffff, int_level_2_3);
-       asic_write(0xffffffff, int_level_2_2);
-       asic_write(0xffffffff, int_level_2_1);
-       asic_write(0xffffffff, int_level_2_0);
-       asic_write(0xffffffff, int_level_1_3);
-       asic_write(0xffffffff, int_level_1_2);
-       asic_write(0xffffffff, int_level_1_1);
-       asic_write(0xffffffff, int_level_1_0);
-       asic_write(0xffffffff, int_level_0_3);
-       asic_write(0xffffffff, int_level_0_2);
-       asic_write(0xffffffff, int_level_0_1);
-       asic_write(0xffffffff, int_level_0_0);
-
-       asic_write(0xf, int_int_scan);
-
-       /*
-        * Initialize interrupt handlers.
-        */
-       for (i = 0; i < NR_IRQS; i++)
-               irq_set_chip_and_handler(i, &asic_irq_chip, handle_level_irq);
-}
diff --git a/arch/mips/powertv/asic/prealloc-calliope.c b/arch/mips/powertv/asic/prealloc-calliope.c
deleted file mode 100644 (file)
index 98dc516..0000000
+++ /dev/null
@@ -1,385 +0,0 @@
-/*
- * Memory pre-allocations for Calliope boxes.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      Ken Eppinett
- *              David Schleef <ds@schleef.org>
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <asm/mach-powertv/asic.h>
-#include "prealloc.h"
-
-/*
- * NON_DVR_CAPABLE CALLIOPE RESOURCES
- */
-struct resource non_dvr_calliope_resources[] __initdata =
-{
-       /*
-        * VIDEO / LX1
-        */
-       /* Delta-Mu 1 image (2MiB) */
-       PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 RAM (~36.9MiB (32MiB - (2MiB + 8KiB))) */
-       PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x26700000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Sysaudio Driver
-        */
-       /* DSP code and data images (1MiB) */
-       PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC CPU PCM buffer (40KiB) */
-       PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC AUX buffer (128KiB) */
-       PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC Main buffer (128KiB) */
-       PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * STAVEM driver/STAPI
-        */
-       /* 6MiB */
-       PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * DOCSIS Subsystem
-        */
-       /* 7MiB */
-       PREALLOC_DOCSIS("Docsis", 0x27500000, 0x27c00000-1, IORESOURCE_MEM)
-
-       /*
-        * GHW HAL Driver
-        */
-       /* PowerTV Graphics Heap (14MiB) */
-       PREALLOC_NORMAL("GraphicsHeap", 0x26700000, 0x26700000+(14*1048576)-1,
-               IORESOURCE_MEM)
-
-       /*
-        * multi com buffer area
-        */
-       /* 128KiB */
-       PREALLOC_NORMAL("MulticomSHM", 0x23700000, 0x23720000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DMA Ring buffer (don't need recording buffers)
-        */
-       /* 680KiB */
-       PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit0
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * AVFS: player HAL memory
-        */
-       /* 945K * 3 for playback */
-       PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * PMEM
-        */
-       /* Persistent memory for diagnostics (64KiB) */
-       PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
-            (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Smartcard
-        */
-       /* Read and write buffers for Internal/External cards (10KiB) */
-       PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * NAND Flash
-        */
-       /* 10KiB */
-       PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Synopsys GMAC Memory Region
-        */
-       /* 64KiB */
-       PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * TFTPBuffer
-        *
-        *  This buffer is used in some minimal configurations (e.g. two-way
-        *  loader) for storing software images
-        */
-       PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Add other resources here
-        */
-
-       /*
-        * End of Resource marker
-        */
-       {
-               .flags  = 0,
-       },
-};
-
-
-struct resource non_dvr_vze_calliope_resources[] __initdata =
-{
-       /*
-        * VIDEO / LX1
-        */
-       /* Delta-Mu 1 image (2MiB) */
-       PREALLOC_NORMAL("ST231aImage", 0x22000000, 0x22200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231aMonitor", 0x22200000, 0x22202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 RAM (10.12MiB) */
-       PREALLOC_NORMAL("MediaMemory1", 0x22202000, 0x22C20B85-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Sysaudio Driver
-        */
-       /* DSP code and data images (1MiB) */
-       PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC CPU PCM buffer (40KiB) */
-       PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC AUX buffer (16KiB) */
-       PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC Main buffer (16KiB) */
-       PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * STAVEM driver/STAPI
-        */
-       /* 3.125MiB */
-       PREALLOC_NORMAL("AVMEMPartition0", 0x20396000, 0x206B6000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * GHW HAL Driver
-        */
-       /* PowerTV Graphics Heap (2.59MiB) */
-       PREALLOC_NORMAL("GraphicsHeap", 0x20100000, 0x20396000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * multi com buffer area
-        */
-       /* 128KiB */
-       PREALLOC_NORMAL("MulticomSHM", 0x206B6000, 0x206D6000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DMA Ring buffer (don't need recording buffers)
-        */
-       /* 680KiB */
-       PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit0
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * PMEM
-        */
-       /* Persistent memory for diagnostics (64KiB) */
-       PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
-            (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Smartcard
-        */
-       /* Read and write buffers for Internal/External cards (10KiB) */
-       PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * NAND Flash
-        */
-       /* 10KiB */
-       PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Synopsys GMAC Memory Region
-        */
-       /* 64KiB */
-       PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Add other resources here
-        */
-
-       /*
-        * End of Resource marker
-        */
-       {
-               .flags  = 0,
-       },
-};
-
-struct resource non_dvr_vzf_calliope_resources[] __initdata =
-{
-       /*
-        * VIDEO / LX1
-        */
-       /* Delta-Mu 1 image (2MiB) */
-       PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 RAM (~19.4 (21.5MiB - (2MiB + 8KiB))) */
-       PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x25580000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Sysaudio Driver
-        */
-       /* DSP code and data images (1MiB) */
-       PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC CPU PCM buffer (40KiB) */
-       PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC AUX buffer (128KiB) */
-       PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC Main buffer (128KiB) */
-       PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * STAVEM driver/STAPI
-        */
-       /* 4.5MiB */
-       PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00480000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * GHW HAL Driver
-        */
-       /* PowerTV Graphics Heap (14MiB) */
-       PREALLOC_NORMAL("GraphicsHeap", 0x25600000, 0x25600000+(14*1048576)-1,
-               IORESOURCE_MEM)
-
-       /*
-        * multi com buffer area
-        */
-       /* 128KiB */
-       PREALLOC_NORMAL("MulticomSHM", 0x23700000, 0x23720000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DMA Ring buffer (don't need recording buffers)
-        */
-       /* 680KiB */
-       PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit0
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit1
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * AVFS: player HAL memory
-        */
-       /* 945K * 3 for playback */
-       PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * PMEM
-        */
-       /* Persistent memory for diagnostics (64KiB) */
-       PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
-            (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Smartcard
-        */
-       /* Read and write buffers for Internal/External cards (10KiB) */
-       PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * NAND Flash
-        */
-       /* 10KiB */
-       PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Synopsys GMAC Memory Region
-        */
-       /* 64KiB */
-       PREALLOC_NORMAL("GMAC", 0x00000000, 0x00010000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Add other resources here
-        */
-
-       /*
-        * End of Resource marker
-        */
-       {
-               .flags  = 0,
-       },
-};
diff --git a/arch/mips/powertv/asic/prealloc-cronus.c b/arch/mips/powertv/asic/prealloc-cronus.c
deleted file mode 100644 (file)
index 7c6ce75..0000000
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * Memory pre-allocations for Cronus boxes.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      Ken Eppinett
- *              David Schleef <ds@schleef.org>
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <asm/mach-powertv/asic.h>
-#include "prealloc.h"
-
-/*
- * DVR_CAPABLE CRONUS RESOURCES
- */
-struct resource dvr_cronus_resources[] __initdata =
-{
-       /*
-        * VIDEO1 / LX1
-        */
-       /* Delta-Mu 1 image (2MiB) */
-       PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
-       PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x26000000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * VIDEO2 / LX2
-        */
-       /* Delta-Mu 2 image (2MiB) */
-       PREALLOC_NORMAL("ST231bImage", 0x60000000, 0x60200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 2 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231bMonitor", 0x60200000, 0x60202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 2 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
-       PREALLOC_NORMAL("MediaMemory2", 0x60202000, 0x62000000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Sysaudio Driver
-        */
-       /* DSP code and data images (1MiB) */
-       PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC CPU PCM buffer (40KiB) */
-       PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC AUX buffer (128KiB) */
-       PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC Main buffer (128KiB) */
-       PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * STAVEM driver/STAPI
-        *
-        *  This memory area is used for allocating buffers for Video decoding
-        *  purposes.  Allocation/De-allocation within this buffer is managed
-        *  by the STAVMEM driver of the STAPI.  They could be Decimated
-        *  Picture Buffers, Intermediate Buffers, as deemed necessary for
-        *  video decoding purposes, for any video decoders on Zeus.
-        */
-       /* 12MiB */
-       PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00c00000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DOCSIS Subsystem
-        */
-       /* 7MiB */
-       PREALLOC_DOCSIS("Docsis", 0x67500000, 0x67c00000-1, IORESOURCE_MEM)
-
-       /*
-        * GHW HAL Driver
-        */
-       /* PowerTV Graphics Heap (14MiB) */
-       PREALLOC_NORMAL("GraphicsHeap", 0x62700000, 0x63500000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * multi com buffer area
-        */
-       /* 128KiB */
-       PREALLOC_NORMAL("MulticomSHM", 0x26000000, 0x26020000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DMA Ring buffer
-        */
-       PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x002EA000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit0
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit1
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * ITFS
-        */
-       /* 815,104 bytes each for 2 ITFS partitions. */
-       PREALLOC_NORMAL("ITFS", 0x00000000, 0x0018E000-1, IORESOURCE_MEM)
-
-       /*
-        * AVFS
-        */
-       /* (945K * 8) = (128K * 3) 5 playbacks / 3 server */
-       PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x007c2000-1,
-               IORESOURCE_MEM)
-
-       /* 4KiB */
-       PREALLOC_NORMAL("AvfsFileSys", 0x00000000, 0x00001000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * PMEM
-        */
-       /* Persistent memory for diagnostics (64KiB) */
-       PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
-            (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Smartcard
-        */
-       /* Read and write buffers for Internal/External cards (10KiB) */
-       PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
-               IORESOURCE_MEM)
-
-       /*
-        * KAVNET
-        */
-       /* NP Reset Vector - must be of the form xxCxxxxx (4KiB) */
-       PREALLOC_NORMAL("NP_Reset_Vector", 0x27c00000, 0x27c01000-1,
-               IORESOURCE_MEM)
-       /* NP Image - must be video bank 1 (320KiB) */
-       PREALLOC_NORMAL("NP_Image", 0x27020000, 0x27070000-1, IORESOURCE_MEM)
-       /* NP IPC - must be video bank 2 (512KiB) */
-       PREALLOC_NORMAL("NP_IPC", 0x63500000, 0x63580000-1, IORESOURCE_MEM)
-
-       /*
-        * TFTPBuffer
-        *
-        *  This buffer is used in some minimal configurations (e.g. two-way
-        *  loader) for storing software images
-        */
-       PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Add other resources here
-        */
-
-       /*
-        * End of Resource marker
-        */
-       {
-               .flags  = 0,
-       },
-};
-
-/*
- * NON_DVR_CAPABLE CRONUS RESOURCES
- */
-struct resource non_dvr_cronus_resources[] __initdata =
-{
-       /*
-        * VIDEO1 / LX1
-        */
-       /* Delta-Mu 1 image (2MiB) */
-       PREALLOC_NORMAL("ST231aImage", 0x24000000, 0x24200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231aMonitor", 0x24200000, 0x24202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
-       PREALLOC_NORMAL("MediaMemory1", 0x24202000, 0x26000000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * VIDEO2 / LX2
-        */
-       /* Delta-Mu 2 image (2MiB) */
-       PREALLOC_NORMAL("ST231bImage", 0x60000000, 0x60200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 2 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231bMonitor", 0x60200000, 0x60202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 2 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
-       PREALLOC_NORMAL("MediaMemory2", 0x60202000, 0x62000000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Sysaudio Driver
-        */
-       /* DSP code and data images (1MiB) */
-       PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC CPU PCM buffer (40KiB) */
-       PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC AUX buffer (128KiB) */
-       PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC Main buffer (128KiB) */
-       PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * STAVEM driver/STAPI
-        *
-        *  This memory area is used for allocating buffers for Video decoding
-        *  purposes.  Allocation/De-allocation within this buffer is managed
-        *  by the STAVMEM driver of the STAPI.  They could be Decimated
-        *  Picture Buffers, Intermediate Buffers, as deemed necessary for
-        *  video decoding purposes, for any video decoders on Zeus.
-        */
-       /* 12MiB */
-       PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00c00000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DOCSIS Subsystem
-        */
-       /* 7MiB */
-       PREALLOC_DOCSIS("Docsis", 0x67500000, 0x67c00000-1, IORESOURCE_MEM)
-
-       /*
-        * GHW HAL Driver
-        */
-       /* PowerTV Graphics Heap (14MiB) */
-       PREALLOC_NORMAL("GraphicsHeap", 0x62700000, 0x63500000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * multi com buffer area
-        */
-       /* 128KiB */
-       PREALLOC_NORMAL("MulticomSHM", 0x26000000, 0x26020000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DMA Ring buffer (don't need recording buffers)
-        */
-       /* 680KiB */
-       PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit0
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit1
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * AVFS: player HAL memory
-        */
-       /* 945K * 3 for playback */
-       PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1, IORESOURCE_MEM)
-
-       /*
-        * PMEM
-        */
-       /* Persistent memory for diagnostics (64KiB) */
-       PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
-            (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Smartcard
-        */
-       /* Read and write buffers for Internal/External cards (10KiB) */
-       PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1, IORESOURCE_MEM)
-
-       /*
-        * KAVNET
-        */
-       /* NP Reset Vector - must be of the form xxCxxxxx (4KiB) */
-       PREALLOC_NORMAL("NP_Reset_Vector", 0x27c00000, 0x27c01000-1,
-               IORESOURCE_MEM)
-       /* NP Image - must be video bank 1 (320KiB) */
-       PREALLOC_NORMAL("NP_Image", 0x27020000, 0x27070000-1, IORESOURCE_MEM)
-       /* NP IPC - must be video bank 2 (512KiB) */
-       PREALLOC_NORMAL("NP_IPC", 0x63500000, 0x63580000-1, IORESOURCE_MEM)
-
-       /*
-        * NAND Flash
-        */
-       /* 10KiB */
-       PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Add other resources here
-        */
-
-       /*
-        * End of Resource marker
-        */
-       {
-               .flags  = 0,
-       },
-};
diff --git a/arch/mips/powertv/asic/prealloc-cronuslite.c b/arch/mips/powertv/asic/prealloc-cronuslite.c
deleted file mode 100644 (file)
index a7937ba..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Memory pre-allocations for Cronus Lite boxes.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      Ken Eppinett
- *              David Schleef <ds@schleef.org>
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <asm/mach-powertv/asic.h>
-#include "prealloc.h"
-
-/*
- * NON_DVR_CAPABLE CRONUSLITE RESOURCES
- */
-struct resource non_dvr_cronuslite_resources[] __initdata =
-{
-       /*
-        * VIDEO2 / LX2
-        */
-       /* Delta-Mu 1 image (2MiB) */
-       PREALLOC_NORMAL("ST231aImage", 0x60000000, 0x60200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231aMonitor", 0x60200000, 0x60202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
-       PREALLOC_NORMAL("MediaMemory1", 0x60202000, 0x62000000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Sysaudio Driver
-        */
-       /* DSP code and data images (1MiB) */
-       PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC CPU PCM buffer (40KiB) */
-       PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC AUX buffer (128KiB) */
-       PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00020000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC Main buffer (128KiB) */
-       PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00020000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * STAVEM driver/STAPI
-        *
-        *  This memory area is used for allocating buffers for Video decoding
-        *  purposes.  Allocation/De-allocation within this buffer is managed
-        *  by the STAVMEM driver of the STAPI.  They could be Decimated
-        *  Picture Buffers, Intermediate Buffers, as deemed necessary for
-        *  video decoding purposes, for any video decoders on Zeus.
-        */
-       /* 6MiB */
-       PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DOCSIS Subsystem
-        */
-       /* 7MiB */
-       PREALLOC_DOCSIS("Docsis", 0x67500000, 0x67c00000-1, IORESOURCE_MEM)
-
-       /*
-        * GHW HAL Driver
-        */
-       /* PowerTV Graphics Heap (14MiB) */
-       PREALLOC_NORMAL("GraphicsHeap", 0x62700000, 0x63500000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * multi com buffer area
-        */
-       /* 128KiB */
-       PREALLOC_NORMAL("MulticomSHM", 0x26000000, 0x26020000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DMA Ring buffer (don't need recording buffers)
-        */
-       /* 680KiB */
-       PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x000AA000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit0
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit1
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * AVFS: player HAL memory
-        */
-       /* 945K * 3 for playback */
-       PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * PMEM
-        */
-       /* Persistent memory for diagnostics (64KiB) */
-       PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
-            (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Smartcard
-        */
-       /* Read and write buffers for Internal/External cards (10KiB) */
-       PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1, IORESOURCE_MEM)
-
-       /*
-        * KAVNET
-        */
-       /* NP Reset Vector - must be of the form xxCxxxxx (4KiB) */
-       PREALLOC_NORMAL("NP_Reset_Vector", 0x27c00000, 0x27c01000-1,
-               IORESOURCE_MEM)
-       /* NP Image - must be video bank 1 (320KiB) */
-       PREALLOC_NORMAL("NP_Image", 0x27020000, 0x27070000-1, IORESOURCE_MEM)
-       /* NP IPC - must be video bank 2 (512KiB) */
-       PREALLOC_NORMAL("NP_IPC", 0x63500000, 0x63580000-1, IORESOURCE_MEM)
-
-       /*
-        * NAND Flash
-        */
-       /* 10KiB */
-       PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
-               IORESOURCE_MEM)
-
-       /*
-        * TFTPBuffer
-        *
-        *  This buffer is used in some minimal configurations (e.g. two-way
-        *  loader) for storing software images
-        */
-       PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Add other resources here
-        */
-
-       /*
-        * End of Resource marker
-        */
-       {
-               .flags  = 0,
-       },
-};
diff --git a/arch/mips/powertv/asic/prealloc-gaia.c b/arch/mips/powertv/asic/prealloc-gaia.c
deleted file mode 100644 (file)
index 2303bbf..0000000
+++ /dev/null
@@ -1,589 +0,0 @@
-/*
- * Memory pre-allocations for Gaia boxes.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      David VomLehn
- */
-
-#include <linux/init.h>
-#include <asm/mach-powertv/asic.h>
-
-/*
- * DVR_CAPABLE GAIA RESOURCES
- */
-struct resource dvr_gaia_resources[] __initdata = {
-       /*
-        *
-        * VIDEO1 / LX1
-        *
-        */
-       {
-               .name   = "ST231aImage",        /* Delta-Mu 1 image and ram */
-               .start  = 0x24000000,
-               .end    = 0x241FFFFF,           /* 2MiB */
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "ST231aMonitor",      /* 8KiB block ST231a monitor */
-               .start  = 0x24200000,
-               .end    = 0x24201FFF,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "MediaMemory1",
-               .start  = 0x24202000,
-               .end    = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * VIDEO2 / LX2
-        *
-        */
-       {
-               .name   = "ST231bImage",        /* Delta-Mu 2 image and ram */
-               .start  = 0x60000000,
-               .end    = 0x601FFFFF,           /* 2MiB */
-               .flags  = IORESOURCE_IO,
-       },
-       {
-               .name   = "ST231bMonitor",      /* 8KiB block ST231b monitor */
-               .start  = 0x60200000,
-               .end    = 0x60201FFF,
-               .flags  = IORESOURCE_IO,
-       },
-       {
-               .name   = "MediaMemory2",
-               .start  = 0x60202000,
-               .end    = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * Sysaudio Driver
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  DSP_Image_Buff - DSP code and data images (1MB)
-        *  ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
-        *  ADSC_AUX_Buff - ADSC AUX buffer (16KB)
-        *  ADSC_Main_Buff - ADSC Main buffer (16KB)
-        *
-        */
-       {
-               .name   = "DSP_Image_Buff",
-               .start  = 0x00000000,
-               .end    = 0x000FFFFF,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "ADSC_CPU_PCM_Buff",
-               .start  = 0x00000000,
-               .end    = 0x00009FFF,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "ADSC_AUX_Buff",
-               .start  = 0x00000000,
-               .end    = 0x00003FFF,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "ADSC_Main_Buff",
-               .start  = 0x00000000,
-               .end    = 0x00003FFF,
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * STAVEM driver/STAPI
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  This memory area is used for allocating buffers for Video decoding
-        *  purposes.  Allocation/De-allocation within this buffer is managed
-        *  by the STAVMEM driver of the STAPI.  They could be Decimated
-        *  Picture Buffers, Intermediate Buffers, as deemed necessary for
-        *  video decoding purposes, for any video decoders on Zeus.
-        *
-        */
-       {
-               .name   = "AVMEMPartition0",
-               .start  = 0x63580000,
-               .end    = 0x64180000 - 1,  /* 12 MB total */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * DOCSIS Subsystem
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Docsis -
-        *
-        */
-       {
-               .name   = "Docsis",
-               .start  = 0x62000000,
-               .end    = 0x62700000 - 1,       /* 7 MB total */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * GHW HAL Driver
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  GraphicsHeap - PowerTV Graphics Heap
-        *
-        */
-       {
-               .name   = "GraphicsHeap",
-               .start  = 0x62700000,
-               .end    = 0x63500000 - 1,       /* 14 MB total */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * multi com buffer area
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Docsis -
-        *
-        */
-       {
-               .name   = "MulticomSHM",
-               .start  = 0x26000000,
-               .end    = 0x26020000 - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * DMA Ring buffer
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Docsis -
-        *
-        */
-       {
-               .name   = "BMM_Buffer",
-               .start  = 0x00000000,
-               .end    = 0x00280000 - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * Display bins buffer for unit0
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Display Bins for unit0
-        *
-        */
-       {
-               .name   = "DisplayBins0",
-               .start  = 0x00000000,
-               .end    = 0x00000FFF,           /* 4 KB total */
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * Display bins buffer
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Display Bins for unit1
-        *
-        */
-       {
-               .name   = "DisplayBins1",
-               .start  = 0x64AD4000,
-               .end    = 0x64AD5000 - 1,  /* 4 KB total */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * ITFS
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Docsis -
-        *
-        */
-       {
-               .name   = "ITFS",
-               .start  = 0x64180000,
-               /* 815,104 bytes each for 2 ITFS partitions. */
-               .end    = 0x6430DFFF,
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * AVFS
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Docsis -
-        *
-        */
-       {
-               .name   = "AvfsDmaMem",
-               .start  = 0x6430E000,
-               /* (945K * 8) = (128K *3) 5 playbacks / 3 server */
-               .end    = 0x64AD0000 - 1,
-               .flags  = IORESOURCE_IO,
-       },
-       {
-               .name   = "AvfsFileSys",
-               .start  = 0x64AD0000,
-               .end    = 0x64AD1000 - 1,  /* 4K */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * Smartcard
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Read and write buffers for Internal/External cards
-        *
-        */
-       {
-               .name   = "SmartCardInfo",
-               .start  = 0x64AD1000,
-               .end    = 0x64AD3800 - 1,
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * KAVNET
-        *    NP Reset Vector - must be of the form xxCxxxxx
-        *         NP Image - must be video bank 1
-        *         NP IPC - must be video bank 2
-        */
-       {
-               .name   = "NP_Reset_Vector",
-               .start  = 0x27c00000,
-               .end    = 0x27c01000 - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "NP_Image",
-               .start  = 0x27020000,
-               .end    = 0x27060000 - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "NP_IPC",
-               .start  = 0x63500000,
-               .end    = 0x63580000 - 1,
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        * Add other resources here
-        */
-       { },
-};
-
-/*
- * NON_DVR_CAPABLE GAIA RESOURCES
- */
-struct resource non_dvr_gaia_resources[] __initdata = {
-       /*
-        *
-        * VIDEO1 / LX1
-        *
-        */
-       {
-               .name   = "ST231aImage",        /* Delta-Mu 1 image and ram */
-               .start  = 0x24000000,
-               .end    = 0x241FFFFF,           /* 2MiB */
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "ST231aMonitor",      /* 8KiB block ST231a monitor */
-               .start  = 0x24200000,
-               .end    = 0x24201FFF,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "MediaMemory1",
-               .start  = 0x24202000,
-               .end    = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * VIDEO2 / LX2
-        *
-        */
-       {
-               .name   = "ST231bImage",        /* Delta-Mu 2 image and ram */
-               .start  = 0x60000000,
-               .end    = 0x601FFFFF,           /* 2MiB */
-               .flags  = IORESOURCE_IO,
-       },
-       {
-               .name   = "ST231bMonitor",      /* 8KiB block ST231b monitor */
-               .start  = 0x60200000,
-               .end    = 0x60201FFF,
-               .flags  = IORESOURCE_IO,
-       },
-       {
-               .name   = "MediaMemory2",
-               .start  = 0x60202000,
-               .end    = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * Sysaudio Driver
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  DSP_Image_Buff - DSP code and data images (1MB)
-        *  ADSC_CPU_PCM_Buff - ADSC CPU PCM buffer (40KB)
-        *  ADSC_AUX_Buff - ADSC AUX buffer (16KB)
-        *  ADSC_Main_Buff - ADSC Main buffer (16KB)
-        *
-        */
-       {
-               .name   = "DSP_Image_Buff",
-               .start  = 0x00000000,
-               .end    = 0x000FFFFF,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "ADSC_CPU_PCM_Buff",
-               .start  = 0x00000000,
-               .end    = 0x00009FFF,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "ADSC_AUX_Buff",
-               .start  = 0x00000000,
-               .end    = 0x00003FFF,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "ADSC_Main_Buff",
-               .start  = 0x00000000,
-               .end    = 0x00003FFF,
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * STAVEM driver/STAPI
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  This memory area is used for allocating buffers for Video decoding
-        *  purposes.  Allocation/De-allocation within this buffer is managed
-        *  by the STAVMEM driver of the STAPI.  They could be Decimated
-        *  Picture Buffers, Intermediate Buffers, as deemed necessary for
-        *  video decoding purposes, for any video decoders on Zeus.
-        *
-        */
-       {
-               .name   = "AVMEMPartition0",
-               .start  = 0x63580000,
-               .end    = 0x64180000 - 1,  /* 12 MB total */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * DOCSIS Subsystem
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Docsis -
-        *
-        */
-       {
-               .name   = "Docsis",
-               .start  = 0x62000000,
-               .end    = 0x62700000 - 1,       /* 7 MB total */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * GHW HAL Driver
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  GraphicsHeap - PowerTV Graphics Heap
-        *
-        */
-       {
-               .name   = "GraphicsHeap",
-               .start  = 0x62700000,
-               .end    = 0x63500000 - 1,       /* 14 MB total */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * multi com buffer area
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Docsis -
-        *
-        */
-       {
-               .name   = "MulticomSHM",
-               .start  = 0x26000000,
-               .end    = 0x26020000 - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * DMA Ring buffer
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Docsis -
-        *
-        */
-       {
-               .name   = "BMM_Buffer",
-               .start  = 0x00000000,
-               .end    = 0x000AA000 - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * Display bins buffer for unit0
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Display Bins for unit0
-        *
-        */
-       {
-               .name   = "DisplayBins0",
-               .start  = 0x00000000,
-               .end    = 0x00000FFF,           /* 4 KB total */
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * Display bins buffer
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Display Bins for unit1
-        *
-        */
-       {
-               .name   = "DisplayBins1",
-               .start  = 0x64AD4000,
-               .end    = 0x64AD5000 - 1,  /* 4 KB total */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * AVFS: player HAL memory
-        *
-        *
-        */
-       {
-               .name   = "AvfsDmaMem",
-               .start  = 0x6430E000,
-               .end    = 0x645D2C00 - 1,  /* 945K * 3 for playback */
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * PMEM
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Persistent memory for diagnostics.
-        *
-        */
-       {
-               .name   = "DiagPersistentMemory",
-               .start  = 0x00000000,
-               .end    = 0x10000 - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       /*
-        *
-        * Smartcard
-        *
-        * This driver requires:
-        *
-        * Arbitrary Based Buffers:
-        *  Read and write buffers for Internal/External cards
-        *
-        */
-       {
-               .name   = "SmartCardInfo",
-               .start  = 0x64AD1000,
-               .end    = 0x64AD3800 - 1,
-               .flags  = IORESOURCE_IO,
-       },
-       /*
-        *
-        * KAVNET
-        *    NP Reset Vector - must be of the form xxCxxxxx
-        *         NP Image - must be video bank 1
-        *         NP IPC - must be video bank 2
-        */
-       {
-               .name   = "NP_Reset_Vector",
-               .start  = 0x27c00000,
-               .end    = 0x27c01000 - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "NP_Image",
-               .start  = 0x27020000,
-               .end    = 0x27060000 - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .name   = "NP_IPC",
-               .start  = 0x63500000,
-               .end    = 0x63580000 - 1,
-               .flags  = IORESOURCE_IO,
-       },
-       { },
-};
diff --git a/arch/mips/powertv/asic/prealloc-zeus.c b/arch/mips/powertv/asic/prealloc-zeus.c
deleted file mode 100644 (file)
index 6e76f09..0000000
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Memory pre-allocations for Zeus boxes.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      Ken Eppinett
- *              David Schleef <ds@schleef.org>
- */
-
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <asm/mach-powertv/asic.h>
-#include "prealloc.h"
-
-/*
- * DVR_CAPABLE RESOURCES
- */
-struct resource dvr_zeus_resources[] __initdata =
-{
-       /*
-        * VIDEO1 / LX1
-        */
-       /* Delta-Mu 1 image (2MiB) */
-       PREALLOC_NORMAL("ST231aImage", 0x20000000, 0x20200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231aMonitor", 0x20200000, 0x20202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
-       PREALLOC_NORMAL("MediaMemory1", 0x20202000, 0x22000000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * VIDEO2 / LX2
-        */
-       /* Delta-Mu 2 image (2MiB) */
-       PREALLOC_NORMAL("ST231bImage", 0x30000000, 0x30200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 2 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231bMonitor", 0x30200000, 0x30202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 2 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
-       PREALLOC_NORMAL("MediaMemory2", 0x30202000, 0x32000000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Sysaudio Driver
-        */
-       /* DSP code and data images (1MiB) */
-       PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC CPU PCM buffer (40KiB) */
-       PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC AUX buffer (16KiB) */
-       PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC Main buffer (16KiB) */
-       PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * STAVEM driver/STAPI
-        *
-        *  This memory area is used for allocating buffers for Video decoding
-        *  purposes.  Allocation/De-allocation within this buffer is managed
-        *  by the STAVMEM driver of the STAPI.  They could be Decimated
-        *  Picture Buffers, Intermediate Buffers, as deemed necessary for
-        *  video decoding purposes, for any video decoders on Zeus.
-        */
-       /* 12MiB */
-       PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00c00000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * DOCSIS Subsystem
-        */
-       /* 7MiB */
-       PREALLOC_DOCSIS("Docsis", 0x40100000, 0x40800000-1, IORESOURCE_MEM)
-
-       /*
-        * GHW HAL Driver
-        */
-       /* PowerTV Graphics Heap (14MiB) */
-       PREALLOC_NORMAL("GraphicsHeap", 0x46900000, 0x47700000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * multi com buffer area
-        */
-       /* 128KiB */
-       PREALLOC_NORMAL("MulticomSHM", 0x47900000, 0x47920000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DMA Ring buffer
-        */
-       /* 2.5MiB */
-       PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x00280000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit0
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit1
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins1", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * ITFS
-        */
-       /* 815,104 bytes each for 2 ITFS partitions. */
-       PREALLOC_NORMAL("ITFS", 0x00000000, 0x0018E000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * AVFS
-        */
-       /* (945K * 8) = (128K * 3) 5 playbacks / 3 server */
-       PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x007c2000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* 4KiB */
-       PREALLOC_NORMAL("AvfsFileSys", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * PMEM
-        */
-       /* Persistent memory for diagnostics (64KiB) */
-       PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
-            (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Smartcard
-        */
-       /* Read and write buffers for Internal/External cards (10KiB) */
-       PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * TFTPBuffer
-        *
-        *  This buffer is used in some minimal configurations (e.g. two-way
-        *  loader) for storing software images
-        */
-       PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Add other resources here
-        */
-
-       /*
-        * End of Resource marker
-        */
-       {
-               .flags  = 0,
-       },
-};
-
-/*
- * NON_DVR_CAPABLE ZEUS RESOURCES
- */
-struct resource non_dvr_zeus_resources[] __initdata =
-{
-       /*
-        * VIDEO1 / LX1
-        */
-       /* Delta-Mu 1 image (2MiB) */
-       PREALLOC_NORMAL("ST231aImage", 0x20000000, 0x20200000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 monitor (8KiB) */
-       PREALLOC_NORMAL("ST231aMonitor", 0x20200000, 0x20202000-1,
-               IORESOURCE_MEM)
-       /* Delta-Mu 1 RAM (~29.9MiB (32MiB - (2MiB + 8KiB))) */
-       PREALLOC_NORMAL("MediaMemory1", 0x20202000, 0x22000000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * Sysaudio Driver
-        */
-       /* DSP code and data images (1MiB) */
-       PREALLOC_NORMAL("DSP_Image_Buff", 0x00000000, 0x00100000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC CPU PCM buffer (40KiB) */
-       PREALLOC_NORMAL("ADSC_CPU_PCM_Buff", 0x00000000, 0x0000A000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC AUX buffer (16KiB) */
-       PREALLOC_NORMAL("ADSC_AUX_Buff", 0x00000000, 0x00004000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-       /* ADSC Main buffer (16KiB) */
-       PREALLOC_NORMAL("ADSC_Main_Buff", 0x00000000, 0x00004000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * STAVEM driver/STAPI
-        */
-       /* 6MiB */
-       PREALLOC_NORMAL("AVMEMPartition0", 0x00000000, 0x00600000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * DOCSIS Subsystem
-        */
-       /* 7MiB */
-       PREALLOC_DOCSIS("Docsis", 0x40100000, 0x40800000-1, IORESOURCE_MEM)
-
-       /*
-        * GHW HAL Driver
-        */
-       /* PowerTV Graphics Heap (14MiB) */
-       PREALLOC_NORMAL("GraphicsHeap", 0x46900000, 0x47700000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * multi com buffer area
-        */
-       /* 128KiB */
-       PREALLOC_NORMAL("MulticomSHM", 0x47900000, 0x47920000-1,
-               IORESOURCE_MEM)
-
-       /*
-        * DMA Ring buffer
-        */
-       /* 2.5MiB */
-       PREALLOC_NORMAL("BMM_Buffer", 0x00000000, 0x00280000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Display bins buffer for unit0
-        */
-       /* 4KiB */
-       PREALLOC_NORMAL("DisplayBins0", 0x00000000, 0x00001000-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * AVFS: player HAL memory
-        */
-       /* 945K * 3 for playback */
-       PREALLOC_NORMAL("AvfsDmaMem", 0x00000000, 0x002c4c00-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * PMEM
-        */
-       /* Persistent memory for diagnostics (64KiB) */
-       PREALLOC_PMEM("DiagPersistentMemory", 0x00000000, 0x10000-1,
-            (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Smartcard
-        */
-       /* Read and write buffers for Internal/External cards (10KiB) */
-       PREALLOC_NORMAL("SmartCardInfo", 0x00000000, 0x2800-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * NAND Flash
-        */
-       /* 10KiB */
-       PREALLOC_NORMAL("NandFlash", NAND_FLASH_BASE, NAND_FLASH_BASE+0x400-1,
-               IORESOURCE_MEM)
-
-       /*
-        * TFTPBuffer
-        *
-        *  This buffer is used in some minimal configurations (e.g. two-way
-        *  loader) for storing software images
-        */
-       PREALLOC_TFTP("TFTPBuffer", 0x00000000, MEBIBYTE(80)-1,
-               (IORESOURCE_MEM|IORESOURCE_PTV_RES_LOEXT))
-
-       /*
-        * Add other resources here
-        */
-
-       /*
-        * End of Resource marker
-        */
-       {
-               .flags  = 0,
-       },
-};
diff --git a/arch/mips/powertv/asic/prealloc.h b/arch/mips/powertv/asic/prealloc.h
deleted file mode 100644 (file)
index 8e682df..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Definitions for memory preallocations
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef _ARCH_MIPS_POWERTV_ASIC_PREALLOC_H
-#define _ARCH_MIPS_POWERTV_ASIC_PREALLOC_H
-
-#define KIBIBYTE(n) ((n) * 1024)    /* Number of kibibytes */
-#define MEBIBYTE(n) ((n) * KIBIBYTE(1024)) /* Number of mebibytes */
-
-/* "struct resource" array element definition */
-#define PREALLOC(NAME, START, END, FLAGS) {    \
-               .name = (NAME),                 \
-               .start = (START),               \
-               .end = (END),                   \
-               .flags = (FLAGS)                \
-       },
-
-/* Individual resources in the preallocated resource arrays are defined using
- *  macros.  These macros are conditionally defined based on their
- *  corresponding kernel configuration flag:
- *    - CONFIG_PREALLOC_NORMAL: preallocate resources for a normal settop box
- *    - CONFIG_PREALLOC_TFTP: preallocate the TFTP download resource
- *    - CONFIG_PREALLOC_DOCSIS: preallocate the DOCSIS resource
- *    - CONFIG_PREALLOC_PMEM: reserve space for persistent memory
- */
-#ifdef CONFIG_PREALLOC_NORMAL
-#define PREALLOC_NORMAL(name, start, end, flags) \
-   PREALLOC(name, start, end, flags)
-#else
-#define PREALLOC_NORMAL(name, start, end, flags)
-#endif
-
-#ifdef CONFIG_PREALLOC_TFTP
-#define PREALLOC_TFTP(name, start, end, flags) \
-   PREALLOC(name, start, end, flags)
-#else
-#define PREALLOC_TFTP(name, start, end, flags)
-#endif
-
-#ifdef CONFIG_PREALLOC_DOCSIS
-#define PREALLOC_DOCSIS(name, start, end, flags) \
-   PREALLOC(name, start, end, flags)
-#else
-#define PREALLOC_DOCSIS(name, start, end, flags)
-#endif
-
-#ifdef CONFIG_PREALLOC_PMEM
-#define PREALLOC_PMEM(name, start, end, flags) \
-   PREALLOC(name, start, end, flags)
-#else
-#define PREALLOC_PMEM(name, start, end, flags)
-#endif
-#endif
diff --git a/arch/mips/powertv/init.c b/arch/mips/powertv/init.c
deleted file mode 100644 (file)
index 4989263..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (C) 1999, 2000, 2004, 2005         MIPS Technologies, Inc.
- *     All rights reserved.
- *     Authors: Carsten Langgaard <carstenl@mips.com>
- *              Maciej W. Rozycki <macro@mips.com>
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * PROM library initialisation code.
- */
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-
-#include <asm/bootinfo.h>
-#include <linux/io.h>
-#include <asm/cacheflush.h>
-#include <asm/traps.h>
-
-#include <asm/mips-boards/generic.h>
-#include <asm/mach-powertv/asic.h>
-
-#include "init.h"
-
-static int *_prom_envp;
-unsigned long _prom_memsize;
-
-/*
- * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
- * This macro take care of sign extension, if running in 64-bit mode.
- */
-#define prom_envp(index) ((char *)(long)_prom_envp[(index)])
-
-char *prom_getenv(char *envname)
-{
-       char *result = NULL;
-
-       if (_prom_envp != NULL) {
-               /*
-                * Return a pointer to the given environment variable.
-                * In 64-bit mode: we're using 64-bit pointers, but all pointers
-                * in the PROM structures are only 32-bit, so we need some
-                * workarounds, if we are running in 64-bit mode.
-                */
-               int i, index = 0;
-
-               i = strlen(envname);
-
-               while (prom_envp(index)) {
-                       if (strncmp(envname, prom_envp(index), i) == 0) {
-                               result = prom_envp(index + 1);
-                               break;
-                       }
-                       index += 2;
-               }
-       }
-
-       return result;
-}
-
-void __init prom_init(void)
-{
-       int prom_argc;
-       char *prom_argv;
-
-       prom_argc = fw_arg0;
-       prom_argv = (char *) fw_arg1;
-       _prom_envp = (int *) fw_arg2;
-       _prom_memsize = (unsigned long) fw_arg3;
-
-       if (prom_argc == 1) {
-               strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
-               strlcat(arcs_cmdline, prom_argv, COMMAND_LINE_SIZE);
-       }
-
-       configure_platform();
-       prom_meminit();
-}
diff --git a/arch/mips/powertv/init.h b/arch/mips/powertv/init.h
deleted file mode 100644 (file)
index c1a8bd0..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Definitions from powertv init.c file
- *
- * Copyright (C) 2009  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Author: David VomLehn
- */
-
-#ifndef _POWERTV_INIT_H
-#define _POWERTV_INIT_H
-extern unsigned long _prom_memsize;
-extern void prom_meminit(void);
-extern char *prom_getenv(char *name);
-#endif
diff --git a/arch/mips/powertv/ioremap.c b/arch/mips/powertv/ioremap.c
deleted file mode 100644 (file)
index d060478..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- *                     ioremap.c
- *
- * Support for mapping between dma_addr_t values a phys_addr_t values.
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      David VomLehn <dvomlehn@cisco.com>
- *
- * Description:         Defines the platform resources for the SA settop.
- *
- * NOTE: The bootloader allocates persistent memory at an address which is
- * 16 MiB below the end of the highest address in KSEG0. All fixed
- * address memory reservations must avoid this region.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <asm/mach-powertv/ioremap.h>
-
-/*
- * Define the sizes of and masks for grains in physical and DMA space. The
- * values are the same but the types are not.
- */
-#define IOR_PHYS_GRAIN         ((phys_addr_t) 1 << IOR_LSBITS)
-#define IOR_PHYS_GRAIN_MASK    (IOR_PHYS_GRAIN - 1)
-
-#define IOR_DMA_GRAIN          ((dma_addr_t) 1 << IOR_LSBITS)
-#define IOR_DMA_GRAIN_MASK     (IOR_DMA_GRAIN - 1)
-
-/*
- * Values that, when accessed by an index derived from a phys_addr_t and
- * added to phys_addr_t value, yield a DMA address
- */
-struct ior_phys_to_dma _ior_phys_to_dma[IOR_NUM_PHYS_TO_DMA];
-EXPORT_SYMBOL(_ior_phys_to_dma);
-
-/*
- * Values that, when accessed by an index derived from a dma_addr_t and
- * added to that dma_addr_t value, yield a physical address
- */
-struct ior_dma_to_phys _ior_dma_to_phys[IOR_NUM_DMA_TO_PHYS];
-EXPORT_SYMBOL(_ior_dma_to_phys);
-
-/**
- * setup_dma_to_phys - set up conversion from DMA to physical addresses
- * @dma_idx:   Top IOR_LSBITS bits of the DMA address, i.e. an index
- *             into the array _dma_to_phys.
- * @delta:     Value that, when added to the DMA address, will yield the
- *             physical address
- * @s:         Number of bytes in the section of memory with the given delta
- *             between DMA and physical addresses.
- */
-static void setup_dma_to_phys(dma_addr_t dma, phys_addr_t delta, dma_addr_t s)
-{
-       int dma_idx, first_idx, last_idx;
-       phys_addr_t first, last;
-
-       /*
-        * Calculate the first and last indices, rounding the first up and
-        * the second down.
-        */
-       first = dma & ~IOR_DMA_GRAIN_MASK;
-       last = (dma + s - 1) & ~IOR_DMA_GRAIN_MASK;
-       first_idx = first >> IOR_LSBITS;                /* Convert to indices */
-       last_idx = last >> IOR_LSBITS;
-
-       for (dma_idx = first_idx; dma_idx <= last_idx; dma_idx++)
-               _ior_dma_to_phys[dma_idx].offset = delta >> IOR_DMA_SHIFT;
-}
-
-/**
- * setup_phys_to_dma - set up conversion from DMA to physical addresses
- * @phys_idx:  Top IOR_LSBITS bits of the DMA address, i.e. an index
- *             into the array _phys_to_dma.
- * @delta:     Value that, when added to the DMA address, will yield the
- *             physical address
- * @s:         Number of bytes in the section of memory with the given delta
- *             between DMA and physical addresses.
- */
-static void setup_phys_to_dma(phys_addr_t phys, dma_addr_t delta, phys_addr_t s)
-{
-       int phys_idx, first_idx, last_idx;
-       phys_addr_t first, last;
-
-       /*
-        * Calculate the first and last indices, rounding the first up and
-        * the second down.
-        */
-       first = phys & ~IOR_PHYS_GRAIN_MASK;
-       last = (phys + s - 1) & ~IOR_PHYS_GRAIN_MASK;
-       first_idx = first >> IOR_LSBITS;                /* Convert to indices */
-       last_idx = last >> IOR_LSBITS;
-
-       for (phys_idx = first_idx; phys_idx <= last_idx; phys_idx++)
-               _ior_phys_to_dma[phys_idx].offset = delta >> IOR_PHYS_SHIFT;
-}
-
-/**
- * ioremap_add_map - add to the physical and DMA address conversion arrays
- * @phys:      Process's view of the address of the start of the memory chunk
- * @dma:       DMA address of the start of the memory chunk
- * @size:      Size, in bytes, of the chunk of memory
- *
- * NOTE: It might be obvious, but the assumption is that all @size bytes have
- * the same offset between the physical address and the DMA address.
- */
-void ioremap_add_map(phys_addr_t phys, phys_addr_t dma, phys_addr_t size)
-{
-       if (size == 0)
-               return;
-
-       if ((dma & IOR_DMA_GRAIN_MASK) != 0 ||
-               (phys & IOR_PHYS_GRAIN_MASK) != 0 ||
-               (size & IOR_PHYS_GRAIN_MASK) != 0)
-               pr_crit("Memory allocation must be in chunks of 0x%x bytes\n",
-                       IOR_PHYS_GRAIN);
-
-       setup_dma_to_phys(dma, phys - dma, size);
-       setup_phys_to_dma(phys, dma - phys, size);
-}
diff --git a/arch/mips/powertv/memory.c b/arch/mips/powertv/memory.c
deleted file mode 100644 (file)
index bc2f3ca..0000000
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Apparently originally from arch/mips/malta-memory.c. Modified to work
- * with the PowerTV bootloader.
- */
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/bootmem.h>
-#include <linux/pfn.h>
-#include <linux/string.h>
-
-#include <asm/bootinfo.h>
-#include <asm/page.h>
-#include <asm/sections.h>
-
-#include <asm/mach-powertv/asic.h>
-#include <asm/mach-powertv/ioremap.h>
-
-#include "init.h"
-
-/* Memory constants */
-#define KIBIBYTE(n)            ((n) * 1024)    /* Number of kibibytes */
-#define MEBIBYTE(n)            ((n) * KIBIBYTE(1024)) /* Number of mebibytes */
-#define DEFAULT_MEMSIZE                MEBIBYTE(128)   /* If no memsize provided */
-
-#define BLDR_SIZE      KIBIBYTE(256)           /* Memory reserved for bldr */
-#define RV_SIZE                MEBIBYTE(4)             /* Size of reset vector */
-
-#define LOW_MEM_END    0x20000000              /* Highest low memory address */
-#define BLDR_ALIAS     0x10000000              /* Bootloader address */
-#define RV_PHYS                0x1fc00000              /* Reset vector address */
-#define LOW_RAM_END    RV_PHYS                 /* End of real RAM in low mem */
-
-/*
- * Very low-level conversion from processor physical address to device
- * DMA address for the first bank of memory.
- */
-#define PHYS_TO_DMA(paddr)     ((paddr) + (CONFIG_LOW_RAM_DMA - LOW_RAM_ALIAS))
-
-unsigned long ptv_memsize;
-
-/*
- * struct low_mem_reserved - Items in low memory that are reserved
- * @start:     Physical address of item
- * @size:      Size, in bytes, of this item
- * @is_aliased: True if this is RAM aliased from another location. If false,
- *             it is something other than aliased RAM and the RAM in the
- *             unaliased address is still visible outside of low memory.
- */
-struct low_mem_reserved {
-       phys_addr_t     start;
-       phys_addr_t     size;
-       bool            is_aliased;
-};
-
-/*
- * Must be in ascending address order
- */
-struct low_mem_reserved low_mem_reserved[] = {
-       {BLDR_ALIAS, BLDR_SIZE, true},  /* Bootloader RAM */
-       {RV_PHYS, RV_SIZE, false},      /* Reset vector */
-};
-
-/*
- * struct mem_layout - layout of a piece of the system RAM
- * @phys:      Physical address of the start of this piece of RAM. This is the
- *             address at which both the processor and I/O devices see the
- *             RAM.
- * @alias:     Alias of this piece of memory in order to make it appear in
- *             the low memory part of the processor's address space. I/O
- *             devices don't see anything here.
- * @size:      Size, in bytes, of this piece of RAM
- */
-struct mem_layout {
-       phys_addr_t     phys;
-       phys_addr_t     alias;
-       phys_addr_t     size;
-};
-
-/*
- * struct mem_layout_list - list descriptor for layouts of system RAM pieces
- * @family:    Specifies the family being described
- * @n:         Number of &struct mem_layout elements
- * @layout:    Pointer to the list of &mem_layout structures
- */
-struct mem_layout_list {
-       enum family_type        family;
-       size_t                  n;
-       struct mem_layout       *layout;
-};
-
-static struct mem_layout f1500_layout[] = {
-       {0x20000000, 0x10000000, MEBIBYTE(256)},
-};
-
-static struct mem_layout f4500_layout[] = {
-       {0x40000000, 0x10000000, MEBIBYTE(256)},
-       {0x20000000, 0x20000000, MEBIBYTE(32)},
-};
-
-static struct mem_layout f8500_layout[] = {
-       {0x40000000, 0x10000000, MEBIBYTE(256)},
-       {0x20000000, 0x20000000, MEBIBYTE(32)},
-       {0x30000000, 0x30000000, MEBIBYTE(32)},
-};
-
-static struct mem_layout fx600_layout[] = {
-       {0x20000000, 0x10000000, MEBIBYTE(256)},
-       {0x60000000, 0x60000000, MEBIBYTE(128)},
-};
-
-static struct mem_layout_list layout_list[] = {
-       {FAMILY_1500, ARRAY_SIZE(f1500_layout), f1500_layout},
-       {FAMILY_1500VZE, ARRAY_SIZE(f1500_layout), f1500_layout},
-       {FAMILY_1500VZF, ARRAY_SIZE(f1500_layout), f1500_layout},
-       {FAMILY_4500, ARRAY_SIZE(f4500_layout), f4500_layout},
-       {FAMILY_8500, ARRAY_SIZE(f8500_layout), f8500_layout},
-       {FAMILY_8500RNG, ARRAY_SIZE(f8500_layout), f8500_layout},
-       {FAMILY_4600, ARRAY_SIZE(fx600_layout), fx600_layout},
-       {FAMILY_4600VZA, ARRAY_SIZE(fx600_layout), fx600_layout},
-       {FAMILY_8600, ARRAY_SIZE(fx600_layout), fx600_layout},
-       {FAMILY_8600VZB, ARRAY_SIZE(fx600_layout), fx600_layout},
-};
-
-/* If we can't determine the layout, use this */
-static struct mem_layout default_layout[] = {
-       {0x20000000, 0x10000000, MEBIBYTE(128)},
-};
-
-/**
- * register_non_ram - register low memory not available for RAM usage
- */
-static __init void register_non_ram(void)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(low_mem_reserved); i++)
-               add_memory_region(low_mem_reserved[i].start,
-                       low_mem_reserved[i].size, BOOT_MEM_RESERVED);
-}
-
-/**
- * get_memsize - get the size of memory as a single bank
- */
-static phys_addr_t get_memsize(void)
-{
-       static char cmdline[COMMAND_LINE_SIZE] __initdata;
-       phys_addr_t memsize = 0;
-       char *memsize_str;
-       char *ptr;
-
-       /* Check the command line first for a memsize directive */
-       strcpy(cmdline, arcs_cmdline);
-       ptr = strstr(cmdline, "memsize=");
-       if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' '))
-               ptr = strstr(ptr, " memsize=");
-
-       if (ptr) {
-               memsize = memparse(ptr + 8, &ptr);
-       } else {
-               /* otherwise look in the environment */
-               memsize_str = prom_getenv("memsize");
-
-               if (memsize_str != NULL) {
-                       pr_info("prom memsize = %s\n", memsize_str);
-                       memsize = simple_strtol(memsize_str, NULL, 0);
-               }
-
-               if (memsize == 0) {
-                       if (_prom_memsize != 0) {
-                               memsize = _prom_memsize;
-                               pr_info("_prom_memsize = 0x%x\n", memsize);
-                               /* add in memory that the bootloader doesn't
-                                * report */
-                               memsize += BLDR_SIZE;
-                       } else {
-                               memsize = DEFAULT_MEMSIZE;
-                               pr_info("Memsize not passed by bootloader, "
-                                       "defaulting to 0x%x\n", memsize);
-                       }
-               }
-       }
-
-       return memsize;
-}
-
-/**
- * register_low_ram - register an aliased section of RAM
- * @p:         Alias address of memory
- * @n:         Number of bytes in this section of memory
- *
- * Returns the number of bytes registered
- *
- */
-static __init phys_addr_t register_low_ram(phys_addr_t p, phys_addr_t n)
-{
-       phys_addr_t s;
-       int i;
-       phys_addr_t orig_n;
-
-       orig_n = n;
-
-       BUG_ON(p + n > RV_PHYS);
-
-       for (i = 0; n != 0 && i < ARRAY_SIZE(low_mem_reserved); i++) {
-               phys_addr_t start;
-               phys_addr_t size;
-
-               start = low_mem_reserved[i].start;
-               size = low_mem_reserved[i].size;
-
-               /* Handle memory before this low memory section */
-               if (p < start) {
-                       phys_addr_t s;
-                       s = min(n, start - p);
-                       add_memory_region(p, s, BOOT_MEM_RAM);
-                       p += s;
-                       n -= s;
-               }
-
-               /* Handle the low memory section itself. If it's aliased,
-                * we reduce the number of byes left, but if not, the RAM
-                * is available elsewhere and we don't reduce the number of
-                * bytes remaining. */
-               if (p == start) {
-                       if (low_mem_reserved[i].is_aliased) {
-                               s = min(n, size);
-                               n -= s;
-                               p += s;
-                       } else
-                               p += n;
-               }
-       }
-
-       return orig_n - n;
-}
-
-/*
- * register_ram - register real RAM
- * @p: Address of memory as seen by devices
- * @alias:     If the memory is seen at an additional address by the processor,
- *             this will be the address, otherwise it is the same as @p.
- * @n:         Number of bytes in this section of memory
- */
-static __init void register_ram(phys_addr_t p, phys_addr_t alias,
-       phys_addr_t n)
-{
-       /*
-        * If some or all of this memory has an alias, break it into the
-        * aliased and non-aliased portion.
-        */
-       if (p != alias) {
-               phys_addr_t alias_size;
-               phys_addr_t registered;
-
-               alias_size = min(n, LOW_RAM_END - alias);
-               registered = register_low_ram(alias, alias_size);
-               ioremap_add_map(alias, p, n);
-               n -= registered;
-               p += registered;
-       }
-
-#ifdef CONFIG_HIGHMEM
-       if (n != 0) {
-               add_memory_region(p, n, BOOT_MEM_RAM);
-               ioremap_add_map(p, p, n);
-       }
-#endif
-}
-
-/**
- * register_address_space - register things in the address space
- * @memsize:   Number of bytes of RAM installed
- *
- * Takes the given number of bytes of RAM and registers as many of the regions,
- * or partial regions, as it can. So, the default configuration might have
- * two regions with 256 MiB each. If the memsize passed in on the command line
- * is 384 MiB, it will register the first region with 256 MiB and the second
- * with 128 MiB.
- */
-static __init void register_address_space(phys_addr_t memsize)
-{
-       int i;
-       phys_addr_t size;
-       size_t n;
-       struct mem_layout *layout;
-       enum family_type family;
-
-       /*
-        * Register all of the things that aren't available to the kernel as
-        * memory.
-        */
-       register_non_ram();
-
-       /* Find the appropriate memory description */
-       family = platform_get_family();
-
-       for (i = 0; i < ARRAY_SIZE(layout_list); i++) {
-               if (layout_list[i].family == family)
-                       break;
-       }
-
-       if (i == ARRAY_SIZE(layout_list)) {
-               n = ARRAY_SIZE(default_layout);
-               layout = default_layout;
-       } else {
-               n = layout_list[i].n;
-               layout = layout_list[i].layout;
-       }
-
-       for (i = 0; memsize != 0 && i < n; i++) {
-               size = min(memsize, layout[i].size);
-               register_ram(layout[i].phys, layout[i].alias, size);
-               memsize -= size;
-       }
-}
-
-void __init prom_meminit(void)
-{
-       ptv_memsize = get_memsize();
-       register_address_space(ptv_memsize);
-}
-
-void __init prom_free_prom_memory(void)
-{
-       unsigned long addr;
-       int i;
-
-       for (i = 0; i < boot_mem_map.nr_map; i++) {
-               if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
-                       continue;
-
-               addr = boot_mem_map.map[i].addr;
-               free_init_pages("prom memory",
-                               addr, addr + boot_mem_map.map[i].size);
-       }
-}
diff --git a/arch/mips/powertv/pci/Makefile b/arch/mips/powertv/pci/Makefile
deleted file mode 100644 (file)
index 2610a6a..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-#
-# Copyright (C) 2009  Scientific-Atlanta, Inc.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
-#
-
-obj-$(CONFIG_PCI)      += fixup-powertv.o
diff --git a/arch/mips/powertv/pci/fixup-powertv.c b/arch/mips/powertv/pci/fixup-powertv.c
deleted file mode 100644 (file)
index d7ecbae..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/pci.h>
-#include <asm/mach-powertv/interrupts.h>
-#include "powertv-pci.h"
-
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
-       return asic_pcie_map_irq(dev, slot, pin);
-}
-
-/* Do platform specific device initialization at pci_enable_device() time */
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
-       return 0;
-}
-
-/*
- * asic_pcie_map_irq
- *
- * Parameters:
- * *dev - pointer to a pci_dev structure  (not used)
- * slot - slot number  (not used)
- * pin - pin number  (not used)
- *
- * Return Value:
- * Returns: IRQ number (always the PCI Express IRQ number)
- *
- * Description:
- * asic_pcie_map_irq will return the IRQ number of the PCI Express interrupt.
- *
- */
-int asic_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
-       return irq_pciexp;
-}
-EXPORT_SYMBOL(asic_pcie_map_irq);
diff --git a/arch/mips/powertv/pci/powertv-pci.h b/arch/mips/powertv/pci/powertv-pci.h
deleted file mode 100644 (file)
index 1b5886b..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- *                             powertv-pci.c
- *
- * Copyright (C) 2009  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-/*
- * Local definitions for the powertv PCI code
- */
-
-#ifndef _POWERTV_PCI_POWERTV_PCI_H_
-#define _POWERTV_PCI_POWERTV_PCI_H_
-extern int asic_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
-extern int asic_pcie_init(void);
-extern int asic_pcie_init(void);
-
-extern int log_level;
-#endif
diff --git a/arch/mips/powertv/powertv-clock.h b/arch/mips/powertv/powertv-clock.h
deleted file mode 100644 (file)
index d94c543..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2009  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Author: David VomLehn
- */
-
-#ifndef _POWERTV_POWERTV_CLOCK_H
-#define _POWERTV_POWERTV_CLOCK_H
-extern int powertv_clockevent_init(void);
-extern void powertv_clocksource_init(void);
-extern unsigned int mips_get_pll_freq(void);
-#endif
diff --git a/arch/mips/powertv/powertv-usb.c b/arch/mips/powertv/powertv-usb.c
deleted file mode 100644 (file)
index d845eac..0000000
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- *                             powertv-usb.c
- *
- * Description:         ASIC-specific USB device setup and shutdown
- *
- * Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
- * Copyright (C) 2009 Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:      Ken Eppinett
- *              David Schleef <ds@schleef.org>
- *
- * NOTE: The bootloader allocates persistent memory at an address which is
- * 16 MiB below the end of the highest address in KSEG0. All fixed
- * address memory reservations must avoid this region.
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <asm/mach-powertv/asic.h>
-#include <asm/mach-powertv/interrupts.h>
-
-/* misc_clk_ctl1 values */
-#define MCC1_30MHZ_POWERUP_SELECT      (1 << 14)
-#define MCC1_DIV9                      (1 << 13)
-#define MCC1_ETHMIPS_POWERUP_SELECT    (1 << 11)
-#define MCC1_USB_POWERUP_SELECT                (1 << 1)
-#define MCC1_CLOCK108_POWERUP_SELECT   (1 << 0)
-
-/* Possible values for clock select */
-#define MCC1_USB_CLOCK_HIGH_Z          (0 << 4)
-#define MCC1_USB_CLOCK_48MHZ           (1 << 4)
-#define MCC1_USB_CLOCK_24MHZ           (2 << 4)
-#define MCC1_USB_CLOCK_6MHZ            (3 << 4)
-
-#define MCC1_CONFIG    (MCC1_30MHZ_POWERUP_SELECT |            \
-                        MCC1_DIV9 |                            \
-                        MCC1_ETHMIPS_POWERUP_SELECT |          \
-                        MCC1_USB_POWERUP_SELECT |              \
-                        MCC1_CLOCK108_POWERUP_SELECT)
-
-/* misc_clk_ctl2 values */
-#define MCC2_GMII_GCLK_TO_PAD          (1 << 31)
-#define MCC2_ETHER125_0_CLOCK_SELECT   (1 << 29)
-#define MCC2_RMII_0_CLOCK_SELECT       (1 << 28)
-#define MCC2_GMII_TX0_CLOCK_SELECT     (1 << 27)
-#define MCC2_GMII_RX0_CLOCK_SELECT     (1 << 26)
-#define MCC2_ETHER125_1_CLOCK_SELECT   (1 << 24)
-#define MCC2_RMII_1_CLOCK_SELECT       (1 << 23)
-#define MCC2_GMII_TX1_CLOCK_SELECT     (1 << 22)
-#define MCC2_GMII_RX1_CLOCK_SELECT     (1 << 21)
-#define MCC2_ETHER125_2_CLOCK_SELECT   (1 << 19)
-#define MCC2_RMII_2_CLOCK_SELECT       (1 << 18)
-#define MCC2_GMII_TX2_CLOCK_SELECT     (1 << 17)
-#define MCC2_GMII_RX2_CLOCK_SELECT     (1 << 16)
-
-#define ETHER_CLK_CONFIG       (MCC2_GMII_GCLK_TO_PAD |        \
-                                MCC2_ETHER125_0_CLOCK_SELECT | \
-                                MCC2_RMII_0_CLOCK_SELECT |     \
-                                MCC2_GMII_TX0_CLOCK_SELECT |   \
-                                MCC2_GMII_RX0_CLOCK_SELECT |   \
-                                MCC2_ETHER125_1_CLOCK_SELECT | \
-                                MCC2_RMII_1_CLOCK_SELECT |     \
-                                MCC2_GMII_TX1_CLOCK_SELECT |   \
-                                MCC2_GMII_RX1_CLOCK_SELECT |   \
-                                MCC2_ETHER125_2_CLOCK_SELECT | \
-                                MCC2_RMII_2_CLOCK_SELECT |     \
-                                MCC2_GMII_TX2_CLOCK_SELECT |   \
-                                MCC2_GMII_RX2_CLOCK_SELECT)
-
-/* misc_clk_ctl2 definitions for Gaia */
-#define FSX4A_REF_SELECT               (1 << 16)
-#define FSX4B_REF_SELECT               (1 << 17)
-#define FSX4C_REF_SELECT               (1 << 18)
-#define DDR_PLL_REF_SELECT             (1 << 19)
-#define MIPS_PLL_REF_SELECT            (1 << 20)
-
-/* Definitions for the QAM frequency select register FS432X4A4_QAM_CTL */
-#define QAM_FS_SDIV_SHIFT              29
-#define QAM_FS_MD_SHIFT                        24
-#define QAM_FS_MD_MASK                 0x1f    /* Cut down to 5 bits */
-#define QAM_FS_PE_SHIFT                        8
-
-#define QAM_FS_DISABLE_DIVIDE_BY_3             (1 << 5)
-#define QAM_FS_ENABLE_PROGRAM                  (1 << 4)
-#define QAM_FS_ENABLE_OUTPUT                   (1 << 3)
-#define QAM_FS_SELECT_TEST_BYPASS              (1 << 2)
-#define QAM_FS_DISABLE_DIGITAL_STANDBY         (1 << 1)
-#define QAM_FS_CHOOSE_FS                       (1 << 0)
-
-/* Definitions for fs432x4a_ctl register */
-#define QAM_FS_NSDIV_54MHZ                     (1 << 2)
-
-/* Definitions for bcm1_usb2_ctl register */
-#define BCM1_USB2_CTL_BISTOK                           (1 << 11)
-#define BCM1_USB2_CTL_PORT2_SHIFT_JK                   (1 << 7)
-#define BCM1_USB2_CTL_PORT1_SHIFT_JK                   (1 << 6)
-#define BCM1_USB2_CTL_PORT2_FAST_EDGE                  (1 << 5)
-#define BCM1_USB2_CTL_PORT1_FAST_EDGE                  (1 << 4)
-#define BCM1_USB2_CTL_EHCI_PRT_PWR_ACTIVE_HIGH         (1 << 1)
-#define BCM1_USB2_CTL_APP_PRT_OVRCUR_IN_ACTIVE_HIGH    (1 << 0)
-
-/* Definitions for crt_spare register */
-#define CRT_SPARE_PORT2_SHIFT_JK                       (1 << 21)
-#define CRT_SPARE_PORT1_SHIFT_JK                       (1 << 20)
-#define CRT_SPARE_PORT2_FAST_EDGE                      (1 << 19)
-#define CRT_SPARE_PORT1_FAST_EDGE                      (1 << 18)
-#define CRT_SPARE_DIVIDE_BY_9_FROM_432                 (1 << 17)
-#define CRT_SPARE_USB_DIVIDE_BY_9                      (1 << 16)
-
-/* Definitions for usb2_stbus_obc register */
-#define USB_STBUS_OBC_STORE32_LOAD32                   0x3
-
-/* Definitions for usb2_stbus_mess_size register */
-#define USB2_STBUS_MESS_SIZE_2                         0x1     /* 2 packets */
-
-/* Definitions for usb2_stbus_chunk_size register */
-#define USB2_STBUS_CHUNK_SIZE_2                                0x1     /* 2 packets */
-
-/* Definitions for usb2_strap register */
-#define USB2_STRAP_HFREQ_SELECT                                0x1
-
-/*
- * USB Host Resource Definition
- */
-
-static struct resource ehci_resources[] = {
-       {
-               .parent = &asic_resource,
-               .start  = 0,
-               .end    = 0xff,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .start  = irq_usbehci,
-               .end    = irq_usbehci,
-               .flags  = IORESOURCE_IRQ,
-       },
-};
-
-static u64 ehci_dmamask = 0xffffffffULL;
-
-static struct platform_device ehci_device = {
-       .name = "powertv-ehci",
-       .id = 0,
-       .num_resources = 2,
-       .resource = ehci_resources,
-       .dev = {
-               .dma_mask = &ehci_dmamask,
-               .coherent_dma_mask = 0xffffffff,
-       },
-};
-
-static struct resource ohci_resources[] = {
-       {
-               .parent = &asic_resource,
-               .start  = 0,
-               .end    = 0xff,
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .start  = irq_usbohci,
-               .end    = irq_usbohci,
-               .flags  = IORESOURCE_IRQ,
-       },
-};
-
-static u64 ohci_dmamask = 0xffffffffULL;
-
-static struct platform_device ohci_device = {
-       .name = "powertv-ohci",
-       .id = 0,
-       .num_resources = 2,
-       .resource = ohci_resources,
-       .dev = {
-               .dma_mask = &ohci_dmamask,
-               .coherent_dma_mask = 0xffffffff,
-       },
-};
-
-static unsigned usb_users;
-static DEFINE_SPINLOCK(usb_regs_lock);
-
-/*
- *
- * fs_update - set frequency synthesizer for USB
- * @pe_bits            Phase tap setting
- * @md_bits            Coarse selector bus for algorithm of phase tap
- * @sdiv_bits          Output divider setting
- * @disable_div_by_3   Either QAM_FS_DISABLE_DIVIDE_BY_3 or zero
- * @standby            Either QAM_FS_DISABLE_DIGITAL_STANDBY or zero
- *
- * QAM frequency selection code, which affects the frequency at which USB
- * runs. The frequency is calculated as:
- *                            2^15 * ndiv * Fin
- * Fout = ------------------------------------------------------------
- *       (sdiv * (ipe * (1 + md/32) - (ipe - 2^15)*(1 + (md + 1)/32)))
- * where:
- * Fin         54 MHz
- * ndiv                QAM_FS_NSDIV_54MHZ ? 8 : 16
- * sdiv                1 << (sdiv_bits + 1)
- * ipe         Same as pe_bits
- * md          A five-bit, two's-complement integer (range [-16, 15]), which
- *             is the lower 5 bits of md_bits.
- */
-static void fs_update(u32 pe_bits, int md_bits, u32 sdiv_bits,
-       u32 disable_div_by_3, u32 standby)
-{
-       u32 val;
-
-       val = ((sdiv_bits << QAM_FS_SDIV_SHIFT) |
-               ((md_bits & QAM_FS_MD_MASK) << QAM_FS_MD_SHIFT) |
-               (pe_bits << QAM_FS_PE_SHIFT) |
-               QAM_FS_ENABLE_OUTPUT |
-               standby |
-               disable_div_by_3);
-       asic_write(val, fs432x4b4_usb_ctl);
-       asic_write(val | QAM_FS_ENABLE_PROGRAM, fs432x4b4_usb_ctl);
-       asic_write(val | QAM_FS_ENABLE_PROGRAM | QAM_FS_CHOOSE_FS,
-               fs432x4b4_usb_ctl);
-}
-
-/*
- * usb_eye_configure - for optimizing the shape USB eye waveform
- * @set:       Bits to set in the register
- * @clear:     Bits to clear in the register; each bit with a one will
- *             be set in the register, zero bits will not be modified
- */
-static void usb_eye_configure(u32 set, u32 clear)
-{
-       u32 old;
-
-       old = asic_read(crt_spare);
-       old |= set;
-       old &= ~clear;
-       asic_write(old, crt_spare);
-}
-
-/*
- * platform_configure_usb - usb configuration based on platform type.
- */
-static void platform_configure_usb(void)
-{
-       u32 bcm1_usb2_ctl_value;
-       enum asic_type asic_type;
-       unsigned long flags;
-
-       spin_lock_irqsave(&usb_regs_lock, flags);
-       usb_users++;
-
-       if (usb_users != 1) {
-               spin_unlock_irqrestore(&usb_regs_lock, flags);
-               return;
-       }
-
-       asic_type = platform_get_asic();
-
-       switch (asic_type) {
-       case ASIC_ZEUS:
-               fs_update(0x0000, -15, 0x02, 0, 0);
-               bcm1_usb2_ctl_value = BCM1_USB2_CTL_EHCI_PRT_PWR_ACTIVE_HIGH |
-                       BCM1_USB2_CTL_APP_PRT_OVRCUR_IN_ACTIVE_HIGH;
-               break;
-
-       case ASIC_CRONUS:
-       case ASIC_CRONUSLITE:
-               usb_eye_configure(0, CRT_SPARE_USB_DIVIDE_BY_9);
-               fs_update(0x8000, -14, 0x03, QAM_FS_DISABLE_DIVIDE_BY_3,
-                       QAM_FS_DISABLE_DIGITAL_STANDBY);
-               bcm1_usb2_ctl_value = BCM1_USB2_CTL_EHCI_PRT_PWR_ACTIVE_HIGH |
-                       BCM1_USB2_CTL_APP_PRT_OVRCUR_IN_ACTIVE_HIGH;
-               break;
-
-       case ASIC_CALLIOPE:
-               fs_update(0x0000, -15, 0x02, QAM_FS_DISABLE_DIVIDE_BY_3,
-                       QAM_FS_DISABLE_DIGITAL_STANDBY);
-
-               switch (platform_get_family()) {
-               case FAMILY_1500VZE:
-                       break;
-
-               case FAMILY_1500VZF:
-                       usb_eye_configure(CRT_SPARE_PORT2_SHIFT_JK |
-                               CRT_SPARE_PORT1_SHIFT_JK |
-                               CRT_SPARE_PORT2_FAST_EDGE |
-                               CRT_SPARE_PORT1_FAST_EDGE, 0);
-                       break;
-
-               default:
-                       usb_eye_configure(CRT_SPARE_PORT2_SHIFT_JK |
-                               CRT_SPARE_PORT1_SHIFT_JK, 0);
-                       break;
-               }
-
-               bcm1_usb2_ctl_value = BCM1_USB2_CTL_BISTOK |
-                       BCM1_USB2_CTL_EHCI_PRT_PWR_ACTIVE_HIGH |
-                       BCM1_USB2_CTL_APP_PRT_OVRCUR_IN_ACTIVE_HIGH;
-               break;
-
-       case ASIC_GAIA:
-               fs_update(0x8000, -14, 0x03, QAM_FS_DISABLE_DIVIDE_BY_3,
-                       QAM_FS_DISABLE_DIGITAL_STANDBY);
-               bcm1_usb2_ctl_value = BCM1_USB2_CTL_BISTOK |
-                       BCM1_USB2_CTL_EHCI_PRT_PWR_ACTIVE_HIGH |
-                       BCM1_USB2_CTL_APP_PRT_OVRCUR_IN_ACTIVE_HIGH;
-               break;
-
-       default:
-               pr_err("Unknown ASIC type: %d\n", asic_type);
-               bcm1_usb2_ctl_value = 0;
-               break;
-       }
-
-       /* turn on USB power */
-       asic_write(0, usb2_strap);
-       /* Enable all OHCI interrupts */
-       asic_write(bcm1_usb2_ctl_value, usb2_control);
-       /* usb2_stbus_obc store32/load32 */
-       asic_write(USB_STBUS_OBC_STORE32_LOAD32, usb2_stbus_obc);
-       /* usb2_stbus_mess_size 2 packets */
-       asic_write(USB2_STBUS_MESS_SIZE_2, usb2_stbus_mess_size);
-       /* usb2_stbus_chunk_size 2 packets */
-       asic_write(USB2_STBUS_CHUNK_SIZE_2, usb2_stbus_chunk_size);
-       spin_unlock_irqrestore(&usb_regs_lock, flags);
-}
-
-static void platform_unconfigure_usb(void)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&usb_regs_lock, flags);
-       usb_users--;
-       if (usb_users == 0)
-               asic_write(USB2_STRAP_HFREQ_SELECT, usb2_strap);
-       spin_unlock_irqrestore(&usb_regs_lock, flags);
-}
-
-/*
- * Set up the USB EHCI interface
- */
-void platform_configure_usb_ehci()
-{
-       platform_configure_usb();
-}
-EXPORT_SYMBOL(platform_configure_usb_ehci);
-
-/*
- * Set up the USB OHCI interface
- */
-void platform_configure_usb_ohci()
-{
-       platform_configure_usb();
-}
-EXPORT_SYMBOL(platform_configure_usb_ohci);
-
-/*
- * Shut the USB EHCI interface down
- */
-void platform_unconfigure_usb_ehci()
-{
-       platform_unconfigure_usb();
-}
-EXPORT_SYMBOL(platform_unconfigure_usb_ehci);
-
-/*
- * Shut the USB OHCI interface down
- */
-void platform_unconfigure_usb_ohci()
-{
-       platform_unconfigure_usb();
-}
-EXPORT_SYMBOL(platform_unconfigure_usb_ohci);
-
-/**
- * platform_devices_init - sets up USB device resourse.
- */
-int __init platform_usb_devices_init(struct platform_device **ehci_dev,
-       struct platform_device **ohci_dev)
-{
-       *ehci_dev = &ehci_device;
-       ehci_resources[0].start = asic_reg_phys_addr(ehci_hcapbase);
-       ehci_resources[0].end += ehci_resources[0].start;
-
-       *ohci_dev = &ohci_device;
-       ohci_resources[0].start = asic_reg_phys_addr(ohci_hc_revision);
-       ohci_resources[0].end += ohci_resources[0].start;
-
-       return 0;
-}
diff --git a/arch/mips/powertv/powertv_setup.c b/arch/mips/powertv/powertv_setup.c
deleted file mode 100644 (file)
index 24689bf..0000000
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/screen_info.h>
-#include <linux/notifier.h>
-#include <linux/etherdevice.h>
-#include <linux/if_ether.h>
-#include <linux/ctype.h>
-#include <linux/cpu.h>
-#include <linux/time.h>
-
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mips-boards/generic.h>
-#include <asm/dma.h>
-#include <asm/asm.h>
-#include <asm/traps.h>
-#include <asm/asm-offsets.h>
-#include "reset.h"
-
-#define VAL(n)         STR(n)
-
-/*
- * Macros for loading addresses and storing registers:
- * LONG_L_     Stringified version of LONG_L for use in asm() statement
- * LONG_S_     Stringified version of LONG_S for use in asm() statement
- * PTR_LA_     Stringified version of PTR_LA for use in asm() statement
- * REG_SIZE    Number of 8-bit bytes in a full width register
- */
-#define LONG_L_                VAL(LONG_L) " "
-#define LONG_S_                VAL(LONG_S) " "
-#define PTR_LA_                VAL(PTR_LA) " "
-
-#ifdef CONFIG_64BIT
-#warning TODO: 64-bit code needs to be verified
-#define REG_SIZE       "8"             /* In bytes */
-#endif
-
-#ifdef CONFIG_32BIT
-#define REG_SIZE       "4"             /* In bytes */
-#endif
-
-static void register_panic_notifier(void);
-static int panic_handler(struct notifier_block *notifier_block,
-       unsigned long event, void *cause_string);
-
-const char *get_system_type(void)
-{
-       return "PowerTV";
-}
-
-void __init plat_mem_setup(void)
-{
-       panic_on_oops = 1;
-       register_panic_notifier();
-
-#if 0
-       mips_pcibios_init();
-#endif
-       mips_reboot_setup();
-}
-
-/*
- * Install a panic notifier for platform-specific diagnostics
- */
-static void register_panic_notifier()
-{
-       static struct notifier_block panic_notifier = {
-               .notifier_call = panic_handler,
-               .next = NULL,
-               .priority       = INT_MAX
-       };
-       atomic_notifier_chain_register(&panic_notifier_list, &panic_notifier);
-}
-
-static int panic_handler(struct notifier_block *notifier_block,
-       unsigned long event, void *cause_string)
-{
-       struct pt_regs  my_regs;
-
-       /* Save all of the registers */
-       {
-               unsigned long   at, v0, v1; /* Must be on the stack */
-
-               /* Start by saving $at and v0 on the stack. We use $at
-                * ourselves, but it looks like the compiler may use v0 or v1
-                * to load the address of the pt_regs structure. We'll come
-                * back later to store the registers in the pt_regs
-                * structure. */
-               __asm__ __volatile__ (
-                       ".set   noat\n"
-                       LONG_S_         "$at, %[at]\n"
-                       LONG_S_         "$2, %[v0]\n"
-                       LONG_S_         "$3, %[v1]\n"
-               :
-                       [at] "=m" (at),
-                       [v0] "=m" (v0),
-                       [v1] "=m" (v1)
-               :
-               :       "at"
-               );
-
-               __asm__ __volatile__ (
-                       ".set   noat\n"
-                       "move           $at, %[pt_regs]\n"
-
-                       /* Argument registers */
-                       LONG_S_         "$4, " VAL(PT_R4) "($at)\n"
-                       LONG_S_         "$5, " VAL(PT_R5) "($at)\n"
-                       LONG_S_         "$6, " VAL(PT_R6) "($at)\n"
-                       LONG_S_         "$7, " VAL(PT_R7) "($at)\n"
-
-                       /* Temporary regs */
-                       LONG_S_         "$8, " VAL(PT_R8) "($at)\n"
-                       LONG_S_         "$9, " VAL(PT_R9) "($at)\n"
-                       LONG_S_         "$10, " VAL(PT_R10) "($at)\n"
-                       LONG_S_         "$11, " VAL(PT_R11) "($at)\n"
-                       LONG_S_         "$12, " VAL(PT_R12) "($at)\n"
-                       LONG_S_         "$13, " VAL(PT_R13) "($at)\n"
-                       LONG_S_         "$14, " VAL(PT_R14) "($at)\n"
-                       LONG_S_         "$15, " VAL(PT_R15) "($at)\n"
-
-                       /* "Saved" registers */
-                       LONG_S_         "$16, " VAL(PT_R16) "($at)\n"
-                       LONG_S_         "$17, " VAL(PT_R17) "($at)\n"
-                       LONG_S_         "$18, " VAL(PT_R18) "($at)\n"
-                       LONG_S_         "$19, " VAL(PT_R19) "($at)\n"
-                       LONG_S_         "$20, " VAL(PT_R20) "($at)\n"
-                       LONG_S_         "$21, " VAL(PT_R21) "($at)\n"
-                       LONG_S_         "$22, " VAL(PT_R22) "($at)\n"
-                       LONG_S_         "$23, " VAL(PT_R23) "($at)\n"
-
-                       /* Add'l temp regs */
-                       LONG_S_         "$24, " VAL(PT_R24) "($at)\n"
-                       LONG_S_         "$25, " VAL(PT_R25) "($at)\n"
-
-                       /* Kernel temp regs */
-                       LONG_S_         "$26, " VAL(PT_R26) "($at)\n"
-                       LONG_S_         "$27, " VAL(PT_R27) "($at)\n"
-
-                       /* Global pointer, stack pointer, frame pointer and
-                        * return address */
-                       LONG_S_         "$gp, " VAL(PT_R28) "($at)\n"
-                       LONG_S_         "$sp, " VAL(PT_R29) "($at)\n"
-                       LONG_S_         "$fp, " VAL(PT_R30) "($at)\n"
-                       LONG_S_         "$ra, " VAL(PT_R31) "($at)\n"
-
-                       /* Now we can get the $at and v0 registers back and
-                        * store them */
-                       LONG_L_         "$8, %[at]\n"
-                       LONG_S_         "$8, " VAL(PT_R1) "($at)\n"
-                       LONG_L_         "$8, %[v0]\n"
-                       LONG_S_         "$8, " VAL(PT_R2) "($at)\n"
-                       LONG_L_         "$8, %[v1]\n"
-                       LONG_S_         "$8, " VAL(PT_R3) "($at)\n"
-               :
-               :
-                       [at] "m" (at),
-                       [v0] "m" (v0),
-                       [v1] "m" (v1),
-                       [pt_regs] "r" (&my_regs)
-               :       "at", "t0"
-               );
-
-               /* Set the current EPC value to be the current location in this
-                * function */
-               __asm__ __volatile__ (
-                       ".set   noat\n"
-               "1:\n"
-                       PTR_LA_         "$at, 1b\n"
-                       LONG_S_         "$at, %[cp0_epc]\n"
-               :
-                       [cp0_epc] "=m" (my_regs.cp0_epc)
-               :
-               :       "at"
-               );
-
-               my_regs.cp0_cause = read_c0_cause();
-               my_regs.cp0_status = read_c0_status();
-       }
-
-       pr_crit("I'm feeling a bit sleepy. hmmmmm... perhaps a nap would... "
-               "zzzz... \n");
-
-       return NOTIFY_DONE;
-}
-
-/* Information about the RF MAC address, if one was supplied on the
- * command line. */
-static bool have_rfmac;
-static u8 rfmac[ETH_ALEN];
-
-static int rfmac_param(char *p)
-{
-       u8      *q;
-       bool    is_high_nibble;
-       int     c;
-
-       /* Skip a leading "0x", if present */
-       if (*p == '0' && *(p+1) == 'x')
-               p += 2;
-
-       q = rfmac;
-       is_high_nibble = true;
-
-       for (c = (unsigned char) *p++;
-               isxdigit(c) && q - rfmac < ETH_ALEN;
-               c = (unsigned char) *p++) {
-               int     nibble;
-
-               nibble = (isdigit(c) ? (c - '0') :
-                       (isupper(c) ? c - 'A' + 10 : c - 'a' + 10));
-
-               if (is_high_nibble)
-                       *q = nibble << 4;
-               else
-                       *q++ |= nibble;
-
-               is_high_nibble = !is_high_nibble;
-       }
-
-       /* If we parsed all the way to the end of the parameter value and
-        * parsed all ETH_ALEN bytes, we have a usable RF MAC address */
-       have_rfmac = (c == '\0' && q - rfmac == ETH_ALEN);
-
-       return 0;
-}
-
-early_param("rfmac", rfmac_param);
-
-/*
- * Generate an Ethernet MAC address that has a good chance of being unique.
- * @addr:      Pointer to six-byte array containing the Ethernet address
- * Generates an Ethernet MAC address that is highly likely to be unique for
- * this particular system on a network with other systems of the same type.
- *
- * The problem we are solving is that, when eth_random_addr() is used to
- * generate MAC addresses at startup, there isn't much entropy for the random
- * number generator to use and the addresses it produces are fairly likely to
- * be the same as those of other identical systems on the same local network.
- * This is true even for relatively small numbers of systems (for the reason
- * why, see the Wikipedia entry for "Birthday problem" at:
- *     http://en.wikipedia.org/wiki/Birthday_problem
- *
- * The good news is that we already have a MAC address known to be unique, the
- * RF MAC address. The bad news is that this address is already in use on the
- * RF interface. Worse, the obvious trick, taking the RF MAC address and
- * turning on the locally managed bit, has already been used for other devices.
- * Still, this does give us something to work with.
- *
- * The approach we take is:
- * 1.  If we can't get the RF MAC Address, just call eth_random_addr.
- * 2.  Use the 24-bit NIC-specific bits of the RF MAC address as the last 24
- *     bits of the new address. This is very likely to be unique, except for
- *     the current box.
- * 3.  To avoid using addresses already on the current box, we set the top
- *     six bits of the address with a value different from any currently
- *     registered Scientific Atlanta organizationally unique identifyer
- *     (OUI). This avoids duplication with any addresses on the system that
- *     were generated from valid Scientific Atlanta-registered address by
- *     simply flipping the locally managed bit.
- * 4.  We aren't generating a multicast address, so we leave the multicast
- *     bit off. Since we aren't using a registered address, we have to set
- *     the locally managed bit.
- * 5.  We then randomly generate the remaining 16-bits. This does two
- *     things:
- *     a.      It allows us to call this function for more than one device
- *             in this system
- *     b.      It ensures that things will probably still work even if
- *             some device on the device network has a locally managed
- *             address that matches the top six bits from step 2.
- */
-void platform_random_ether_addr(u8 addr[ETH_ALEN])
-{
-       const int num_random_bytes = 2;
-       const unsigned char non_sciatl_oui_bits = 0xc0u;
-       const unsigned char mac_addr_locally_managed = (1 << 1);
-
-       if (!have_rfmac) {
-               pr_warning("rfmac not available on command line; "
-                       "generating random MAC address\n");
-               eth_random_addr(addr);
-       }
-
-       else {
-               int     i;
-
-               /* Set the first byte to something that won't match a Scientific
-                * Atlanta OUI, is locally managed, and isn't a multicast
-                * address */
-               addr[0] = non_sciatl_oui_bits | mac_addr_locally_managed;
-
-               /* Get some bytes of random address information */
-               get_random_bytes(&addr[1], num_random_bytes);
-
-               /* Copy over the NIC-specific bits of the RF MAC address */
-               for (i = 1 + num_random_bytes; i < ETH_ALEN; i++)
-                       addr[i] = rfmac[i];
-       }
-}
diff --git a/arch/mips/powertv/reset.c b/arch/mips/powertv/reset.c
deleted file mode 100644 (file)
index 11c32fb..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/pm.h>
-
-#include <linux/io.h>
-#include <asm/reboot.h>                        /* Not included by linux/reboot.h */
-
-#include <asm/mach-powertv/asic_regs.h>
-#include "reset.h"
-
-static void mips_machine_restart(char *command)
-{
-       writel(0x1, asic_reg_addr(watchdog));
-}
-
-void mips_reboot_setup(void)
-{
-       _machine_restart = mips_machine_restart;
-}
diff --git a/arch/mips/powertv/reset.h b/arch/mips/powertv/reset.h
deleted file mode 100644 (file)
index 888fd09..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Definitions from powertv reset.c file
- *
- * Copyright (C) 2009  Cisco Systems, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- * Author: David VomLehn
- */
-
-#ifndef _POWERTV_POWERTV_RESET_H
-#define _POWERTV_POWERTV_RESET_H
-extern void mips_reboot_setup(void);
-#endif
diff --git a/arch/mips/powertv/time.c b/arch/mips/powertv/time.c
deleted file mode 100644 (file)
index f38b0d4..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- * Portions copyright (C) 2009 Cisco Systems, Inc.
- *
- *  This program is free software; you can distribute it and/or modify it
- *  under the terms of the GNU General Public License (Version 2) as
- *  published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- *  for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Setting up the clock on the MIPS boards.
- */
-
-#include <linux/init.h>
-#include <asm/mach-powertv/interrupts.h>
-#include <asm/time.h>
-
-#include "powertv-clock.h"
-
-unsigned int get_c0_compare_int(void)
-{
-       return irq_mips_timer;
-}
-
-void __init plat_time_init(void)
-{
-       powertv_clocksource_init();
-}
index bba0cdfd83bcd37b2278dce3fdec6f8980bfbb19..5d0983d47161c183033b9943eada1edb93b797a3 100644 (file)
@@ -26,7 +26,7 @@ void ralink_clk_add(const char *dev, unsigned long rate)
        struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
 
        if (!clk)
-               panic("failed to add clock\n");
+               panic("failed to add clock");
 
        clk->cl.dev_id = dev;
        clk->cl.clk = clk;
index d217509e530093f0154783c32f8b016ff419f8a4..a3ad56c2372d0fcd261a1ff9dfea333a528f601f 100644 (file)
@@ -350,7 +350,7 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
                name = "MT7620A";
                soc_info->compatible = "ralink,mt7620a-soc";
        } else {
-               panic("mt7620: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
+               panic("mt7620: unknown SoC, n0:%08x n1:%08x", n0, n1);
        }
 
        rev = __raw_readl(sysc + SYSC_REG_CHIP_REV);
index ce38d11f9da5bdb3baaec1821dec2a6a25ae4836..15f21ea96121023b60d005045a3115bac48c5622 100644 (file)
@@ -108,7 +108,7 @@ static int __init plat_of_setup(void)
        strncpy(of_ids[1].compatible, "palmbus", len);
 
        if (of_platform_populate(NULL, of_ids, NULL, NULL))
-               panic("failed to populate DT\n");
+               panic("failed to populate DT");
 
        /* make sure ithat the reset controller is setup early */
        ralink_rst_init();
index ca7ee3a33790fc074ed4788f8fb1e1412babe187..bb82a82da9e70736160bb911a432477b3a81fdfe 100644 (file)
@@ -276,7 +276,7 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
                name = "RT5350";
                soc_info->compatible = "ralink,rt5350-soc";
        } else {
-               panic("rt305x: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
+               panic("rt305x: unknown SoC, n0:%08x n1:%08x", n0, n1);
        }
 
        id = __raw_readl(sysc + SYSC_REG_CHIP_ID);
index e2a2b203eb005b5403f2b68f92a841d87137b75c..71dedcae55a69c4ea93c5c71c7edfd93583864a9 100644 (file)
@@ -76,4 +76,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_SOCKET_H */
index ad2ce8dab9963c4dd6867ee83064460576084089..56c9cb7c8bcf9eb16188e2d60b9bfa1a386e1dcd 100644 (file)
@@ -1,6 +1,7 @@
 config PARISC
        def_bool y
        select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+       select ARCH_MIGHT_HAVE_PC_PARPORT
        select HAVE_IDE
        select HAVE_OPROFILE
        select HAVE_FUNCTION_TRACER if 64BIT
@@ -287,6 +288,10 @@ config SYSVIPC_COMPAT
        def_bool y
        depends on COMPAT && SYSVIPC
 
+config AUDIT_ARCH
+       def_bool y
+       depends on COMPAT
+
 config HPUX
        bool "Support for HP-UX binaries"
        depends on !64BIT
index e02f665f804a53ae73761fa3546880150eff766f..7187664034c3499b92c62210d0d347b22ae24adc 100644 (file)
@@ -94,7 +94,7 @@ PALOCONF := $(shell if [ -f $(src)/palo.conf ]; then echo $(src)/palo.conf; \
        else echo $(obj)/palo.conf; \
        fi)
 
-palo: vmlinuz
+palo lifimage: vmlinuz
        @if test ! -x "$(PALO)"; then \
                echo 'ERROR: Please install palo first (apt-get install palo)';\
                echo 'or build it from source and install it somewhere in your $$PATH';\
@@ -109,16 +109,23 @@ palo: vmlinuz
        fi
        $(PALO) -f $(PALOCONF)
 
-# Shorthands for known targets not supported by parisc, use vmlinux/vmlinuz as default
+BOOT_TARGETS    = zImage Image palo lifimage
+INSTALL_TARGETS = zinstall install
+
+PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
+
+bzImage zImage: vmlinuz
 Image: vmlinux
-zImage bzImage: vmlinuz
 
 vmlinuz: vmlinux
        @gzip -cf -9 $< > $@
 
-install: vmlinuz
-       sh $(src)/arch/parisc/install.sh \
-                       $(KERNELRELEASE) $< System.map "$(INSTALL_PATH)"
+install:
+       $(CONFIG_SHELL) $(src)/arch/parisc/install.sh \
+                       $(KERNELRELEASE) vmlinux System.map "$(INSTALL_PATH)"
+zinstall:
+       $(CONFIG_SHELL) $(src)/arch/parisc/install.sh \
+                       $(KERNELRELEASE) vmlinuz System.map "$(INSTALL_PATH)"
 
 CLEAN_FILES    += lifimage
 MRPROPER_FILES += palo.conf
@@ -127,10 +134,11 @@ define archhelp
        @echo  '* vmlinux       - Uncompressed kernel image (./vmlinux)'
        @echo  '  vmlinuz       - Compressed kernel image (./vmlinuz)'
        @echo  '  palo          - Bootable image (./lifimage)'
-       @echo  '  install       - Install kernel using'
+       @echo  '  install       - Install uncompressed vmlinux kernel using'
        @echo  '                  (your) ~/bin/$(INSTALLKERNEL) or'
        @echo  '                  (distribution) /sbin/$(INSTALLKERNEL) or'
        @echo  '                  copy to $$(INSTALL_PATH)'
+       @echo  '  zinstall      - Install compressed vmlinuz kernel'
 endef
 
 # we require gcc 3.3 or above to compile the kernel
index 0f90569b9d8546f7fa710a5b15f7883266e1ba95..9387cc2693f6a33a70459fcb46da058db395e2c4 100644 (file)
@@ -40,6 +40,8 @@ CONFIG_IP_NF_QUEUE=m
 CONFIG_LLC2=m
 CONFIG_NET_PKTGEN=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 CONFIG_PARPORT=y
index b647b182dacc17d6cf95fa79cee892fd8e0427bf..90025322b75ecd3b8a5672b76d18ce534b92ee2f 100644 (file)
@@ -79,6 +79,8 @@ CONFIG_IP_DCCP=m
 CONFIG_LLC2=m
 CONFIG_NET_PKTGEN=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 CONFIG_BLK_DEV_UMEM=m
index e289f5bf31488f0bb9eff29bb51bcbf25abd3bd1..f1a0c25bef8dc3a6667c608a092f96a39d5994aa 100644 (file)
@@ -4,6 +4,7 @@ CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=16
 CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODVERSIONS=y
@@ -27,6 +28,8 @@ CONFIG_IP_PNP_BOOTP=y
 # CONFIG_INET_LRO is not set
 CONFIG_IPV6=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 CONFIG_PARPORT=y
 CONFIG_PARPORT_PC=y
index 311ca367b62237b87bb78023f7687ef1f7d98c85..ec1b014952b6601458f4c3b2901d8e86670b96fc 100644 (file)
@@ -5,6 +5,7 @@ CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=16
 CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_BLK_DEV_INITRD=y
 # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
 CONFIG_EXPERT=y
 CONFIG_KALLSYMS_ALL=y
@@ -39,6 +40,8 @@ CONFIG_NETFILTER_DEBUG=y
 CONFIG_IP_NF_QUEUE=m
 CONFIG_NET_PKTGEN=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 CONFIG_BLK_DEV_UMEM=m
index f11006361297eb152cf5f78471f6c69de41e5cfc..e1c8d2015c8938ac0a3440d38af427b4ac8eec7a 100644 (file)
@@ -62,6 +62,8 @@ CONFIG_TIPC=m
 CONFIG_LLC2=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
 CONFIG_PARPORT=y
 CONFIG_PARPORT_PC=y
index dfe88f6c95c4d7c924b9e03d8c7abd236f100609..ba61495e1fa4b8d9ee576aff9a7559e619f229b6 100644 (file)
@@ -49,6 +49,8 @@ CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_LLC2=m
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 CONFIG_PARPORT=y
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
new file mode 100644 (file)
index 0000000..33b148f
--- /dev/null
@@ -0,0 +1,328 @@
+CONFIG_LOCALVERSION="-32bit"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_LZO=y
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_PERF_EVENTS=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PA7100LC=y
+CONFIG_SMP=y
+CONFIG_HZ_100=y
+CONFIG_IOMMU_CCIO=y
+CONFIG_GSC_LASI=y
+CONFIG_GSC_WAX=y
+CONFIG_EISA=y
+CONFIG_PCI=y
+CONFIG_GSC_DINO=y
+CONFIG_PCI_LBA=y
+CONFIG_PCCARD=m
+CONFIG_YENTA=m
+# CONFIG_PDC_CHASSIS is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=m
+CONFIG_LLC2=m
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=m
+CONFIG_PARPORT_1284=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=6144
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_NS87415=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_LASI700=y
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_ZALON=y
+CONFIG_SCSI_DH=y
+CONFIG_ATA=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_UEVENT=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_TUN=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+CONFIG_NET_TULIP=y
+CONFIG_TULIP=y
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+CONFIG_LASI_82596=y
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPPOE=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_POLLDEV=y
+CONFIG_KEYBOARD_HIL_OLD=m
+CONFIG_KEYBOARD_HIL=m
+CONFIG_MOUSE_SERIAL=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=m
+CONFIG_LEGACY_PTY_COUNT=64
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=8
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_PRINTER=m
+CONFIG_PPDEV=m
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_POWER_SUPPLY=y
+# CONFIG_HWMON is not set
+CONFIG_AGP=y
+CONFIG_VIDEO_OUTPUT_CONTROL=y
+CONFIG_FB=y
+CONFIG_FB_FOREIGN_ENDIAN=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_MATROX=m
+CONFIG_FB_MATROX_G=y
+CONFIG_FB_VOODOO1=m
+CONFIG_DUMMY_CONSOLE_COLUMNS=128
+CONFIG_DUMMY_CONSOLE_ROWS=48
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_SOUND=m
+CONFIG_SND=m
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_AD1889=m
+CONFIG_SND_HARMONY=m
+CONFIG_HIDRAW=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_KYE=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=m
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_DMADEVICES=y
+CONFIG_AUXDISPLAY=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_RT=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_NFS_FS=m
+# CONFIG_NFS_V2 is not set
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_RT_MUTEX_TESTER=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_LATENCYTOP=y
+CONFIG_LKDTM=m
+CONFIG_KEYS=y
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC_CCITT=m
+CONFIG_CRC_T10DIF=y
+CONFIG_FONTS=y
diff --git a/arch/parisc/configs/generic-64bit_defconfig b/arch/parisc/configs/generic-64bit_defconfig
new file mode 100644 (file)
index 0000000..d7f5126
--- /dev/null
@@ -0,0 +1,345 @@
+CONFIG_LOCALVERSION="-64bit"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_BLK_DEV_INTEGRITY=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_PA8X00=y
+CONFIG_MLONGCALLS=y
+CONFIG_64BIT=y
+CONFIG_SMP=y
+# CONFIG_COMPACTION is not set
+CONFIG_HPPB=y
+CONFIG_IOMMU_CCIO=y
+CONFIG_GSC_LASI=y
+CONFIG_GSC_WAX=y
+CONFIG_PCI=y
+CONFIG_PCI_STUB=m
+CONFIG_PCI_IOV=y
+CONFIG_GSC_DINO=y
+CONFIG_PCI_LBA=y
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_XFRM_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_LRO=m
+CONFIG_INET_DIAG=m
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_ADVANCED is not set
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_DCB=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_IDE=y
+CONFIG_IDE_GD=m
+CONFIG_IDE_GD_ATAPI=y
+CONFIG_BLK_DEV_IDECD=m
+CONFIG_BLK_DEV_NS87415=y
+CONFIG_BLK_DEV_SIIMAGE=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_SCSI_ISCSI_ATTRS=y
+CONFIG_SCSI_SRP_ATTRS=y
+CONFIG_ISCSI_BOOT_SYSFS=y
+CONFIG_SCSI_MPT2SAS=y
+CONFIG_SCSI_LASI700=m
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_ZALON=y
+CONFIG_SCSI_QLA_ISCSI=m
+CONFIG_SCSI_DH=y
+CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_RAID=m
+CONFIG_DM_UEVENT=y
+CONFIG_FUSION=y
+CONFIG_FUSION_SPI=y
+CONFIG_FUSION_SAS=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_NETCONSOLE=m
+CONFIG_NETCONSOLE_DYNAMIC=y
+CONFIG_TUN=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+CONFIG_NET_TULIP=y
+CONFIG_TULIP=y
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+CONFIG_HP100=m
+CONFIG_E1000=y
+CONFIG_LASI_82596=y
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+CONFIG_QLA3XXX=m
+CONFIG_QLCNIC=m
+CONFIG_QLGE=m
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_PHYLIB=y
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_NATIONAL_PHY=m
+CONFIG_STE10XP=m
+CONFIG_LSI_ET1011C_PHY=m
+CONFIG_MDIO_BITBANG=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_HIL_OLD is not set
+# CONFIG_KEYBOARD_HIL is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+CONFIG_HP_SDC_RTC=m
+CONFIG_SERIO_SERPORT=m
+CONFIG_HP_SDC=m
+CONFIG_HIL_MLC=m
+CONFIG_SERIO_RAW=m
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_NOZOMI=m
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=8
+CONFIG_SERIAL_8250_RUNTIME_UARTS=8
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_JSM=m
+CONFIG_HW_RANDOM_TIMERIOMEM=m
+CONFIG_TCG_TPM=m
+CONFIG_TCG_ATMEL=m
+CONFIG_PTP_1588_CLOCK=m
+CONFIG_SENSORS_I5K_AMB=m
+CONFIG_SENSORS_F71882FG=m
+CONFIG_SENSORS_PC87427=m
+CONFIG_SENSORS_VT1211=m
+CONFIG_SENSORS_VT8231=m
+CONFIG_SENSORS_W83627EHF=m
+CONFIG_WATCHDOG=y
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_SSB=m
+CONFIG_SSB_DRIVER_PCICORE=y
+CONFIG_HTC_PASIC3=m
+CONFIG_LPC_SCH=m
+CONFIG_MFD_SM501=m
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=m
+CONFIG_REGULATOR_USERSPACE_CONSUMER=m
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_AGP=y
+CONFIG_AGP_PARISC=y
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_DRM_RADEON_UMS=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+CONFIG_HID=m
+CONFIG_HIDRAW=y
+CONFIG_HID_DRAGONRISE=m
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_KYE=m
+CONFIG_HID_GYRATION=m
+CONFIG_HID_TWINHAN=m
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_HID_NTRIG=m
+CONFIG_HID_PANTHERLORD=m
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=m
+CONFIG_HID_SAMSUNG=m
+CONFIG_HID_SONY=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_HID_GREENASIA=m
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=m
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TOPSEED=m
+CONFIG_HID_THRUSTMASTER=m
+CONFIG_THRUSTMASTER_FF=y
+CONFIG_HID_ZEROPLUS=m
+CONFIG_ZEROPLUS_FF=y
+CONFIG_USB_HID=m
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DYNAMIC_MINORS=y
+CONFIG_USB_MON=m
+CONFIG_USB_WUSB_CBAF=m
+CONFIG_USB_XHCI_HCD=m
+CONFIG_USB_EHCI_HCD=m
+CONFIG_USB_OHCI_HCD=m
+CONFIG_USB_R8A66597_HCD=m
+CONFIG_USB_ACM=m
+CONFIG_USB_PRINTER=m
+CONFIG_USB_WDM=m
+CONFIG_USB_TMC=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=m
+CONFIG_LEDS_TRIGGER_BACKLIGHT=m
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
+CONFIG_UIO=y
+CONFIG_UIO_PDRV_GENIRQ=m
+CONFIG_UIO_AEC=m
+CONFIG_UIO_SERCOS3=m
+CONFIG_UIO_PCI_GENERIC=m
+CONFIG_STAGING=y
+# CONFIG_NET_VENDOR_SILICOM is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_BTRFS_FS=m
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
+CONFIG_ISO9660_FS=y
+CONFIG_UDF_FS=y
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_SYSV_FS=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V4=m
+CONFIG_NFS_V4_1=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V4=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_UTF8=m
+CONFIG_PRINTK_TIME=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_ECB=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_DEFLATE=m
+# CONFIG_CRYPTO_HW is not set
+CONFIG_CRC_CCITT=m
+CONFIG_LIBCRC32C=y
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
index 0da848232344fc41d9583cc10f8510a54a7d6a16..b3069fd83468c5972f98d19f8f17dfa9bfb5e0cf 100644 (file)
        nop     /* 7 */
        .endm
 
+       /*
+        * ASM_EXCEPTIONTABLE_ENTRY
+        *
+        * Creates an exception table entry.
+        * Do not convert to a assembler macro. This won't work.
+        */
+#define ASM_EXCEPTIONTABLE_ENTRY(fault_addr, except_addr)      \
+       .section __ex_table,"aw"                        !       \
+       ASM_ULONG_INSN  fault_addr, except_addr         !       \
+       .previous
+
+
 #endif /* __ASSEMBLY__ */
 #endif
index a2db278a5def69c4a6ac4745c3685e45600df156..3c3cb004b7e225344c9850805cce04192655e7d2 100644 (file)
@@ -19,5 +19,9 @@
 #define user_stack_pointer(regs)       ((regs)->gr[30])
 unsigned long profile_pc(struct pt_regs *);
 
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+       return regs->gr[20];
+}
 
 #endif
diff --git a/arch/parisc/include/asm/socket.h b/arch/parisc/include/asm/socket.h
new file mode 100644 (file)
index 0000000..8d806d8
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef _ASM_SOCKET_H
+#define _ASM_SOCKET_H
+
+#include <uapi/asm/socket.h>
+
+/* O_NONBLOCK clashes with the bits used for socket types.  Therefore we
+ * have to define SOCK_NONBLOCK to a different value here.
+ */
+#define SOCK_NONBLOCK  0x40000000
+#endif /* _ASM_SOCKET_H */
index 540c88fa8f863d44adcc254fe9a3917cf015aa4d..bc7cf120106b30e477264850e373f346eeb9bf82 100644 (file)
@@ -59,6 +59,7 @@ struct thread_info {
 #define TIF_32BIT               4       /* 32 bit binary */
 #define TIF_MEMDIE             5       /* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK    6       /* restore saved signal mask */
+#define TIF_SYSCALL_AUDIT      7       /* syscall auditing active */
 #define TIF_NOTIFY_RESUME      8       /* callback before returning to user */
 #define TIF_SINGLESTEP         9       /* single stepping? */
 #define TIF_BLOCKSTEP          10      /* branch stepping? */
@@ -68,6 +69,7 @@ struct thread_info {
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
 #define _TIF_POLLING_NRFLAG    (1 << TIF_POLLING_NRFLAG)
 #define _TIF_32BIT             (1 << TIF_32BIT)
+#define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
 #define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
 #define _TIF_SINGLESTEP                (1 << TIF_SINGLESTEP)
 #define _TIF_BLOCKSTEP         (1 << TIF_BLOCKSTEP)
@@ -75,7 +77,7 @@ struct thread_info {
 #define _TIF_USER_WORK_MASK     (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
                                  _TIF_NEED_RESCHED)
 #define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP |        \
-                                _TIF_BLOCKSTEP)
+                                _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT)
 
 #endif /* __KERNEL__ */
 
index 1945f995f2dfec1212ed8e1769168085076d9bb1..4736020ba5eabeb05841e5a9dc4834026f61a92d 100644 (file)
@@ -6,7 +6,7 @@ struct pt_regs;
 
 /* traps.c */
 void parisc_terminate(char *msg, struct pt_regs *regs,
-               int code, unsigned long offset);
+               int code, unsigned long offset) __noreturn __cold;
 
 /* mm/fault.c */
 void do_page_fault(struct pt_regs *regs, unsigned long code,
index e0a82358517e032677cd563ae7aad33e37fef11f..4006964d8e12646761d954b9f73ff0b503e736b6 100644 (file)
@@ -59,12 +59,13 @@ static inline long access_ok(int type, const void __user * addr,
 /*
  * The exception table contains two values: the first is an address
  * for an instruction that is allowed to fault, and the second is
- * the address to the fixup routine. 
+ * the address to the fixup routine. Even on a 64bit kernel we could
+ * use a 32bit (unsigned int) address here.
  */
 
 struct exception_table_entry {
-       unsigned long insn;  /* address of insn that is allowed to fault.   */
-       long fixup;          /* fixup routine */
+       unsigned long insn;     /* address of insn that is allowed to fault. */
+       unsigned long fixup;    /* fixup routine */
 };
 
 #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
index 71700e636a8e7b25172cda7f4815b1e46216b3d3..f33113a6141e7540da2195cc72469152edfbecf2 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _ASM_SOCKET_H
-#define _ASM_SOCKET_H
+#ifndef _UAPI_ASM_SOCKET_H
+#define _UAPI_ASM_SOCKET_H
 
 #include <asm/sockios.h>
 
@@ -75,9 +75,6 @@
 
 #define SO_BUSY_POLL           0x4027
 
-/* O_NONBLOCK clashes with the bits used for socket types.  Therefore we
- * have to define SOCK_NONBLOCK to a different value here.
- */
-#define SOCK_NONBLOCK   0x40000000
+#define SO_MAX_PACING_RATE     0x4048
 
-#endif /* _ASM_SOCKET_H */
+#endif /* _UAPI_ASM_SOCKET_H */
index 4da682b466d06fd0806b4b3fd515f02fbacd4009..6f68784fea25f9141b195db9ec7b532f3f01da9c 100644 (file)
 #   $4 - default install path (blank if root directory)
 #
 
+verify () {
+       if [ ! -f "$1" ]; then
+               echo ""                                                   1>&2
+               echo " *** Missing file: $1"                              1>&2
+               echo ' *** You need to run "make" before "make install".' 1>&2
+               echo ""                                                   1>&2
+               exit 1
+       fi
+}
+
+# Make sure the files actually exist
+
+verify "$2"
+verify "$3"
+
 # User may have a custom install script
 
-if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
-if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
+if [ -n "${INSTALLKERNEL}" ]; then
+  if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+  if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
+fi
 
 # Default install
 
-if [ -f $4/vmlinuz ]; then
-       mv $4/vmlinuz $4/vmlinuz.old
+if [ "$(basename $2)" = "zImage" ]; then
+# Compressed install
+  echo "Installing compressed kernel"
+  base=vmlinuz
+else
+# Normal install
+  echo "Installing normal kernel"
+  base=vmlinux
+fi
+
+if [ -f $4/$base-$1 ]; then
+  mv $4/$base-$1 $4/$base-$1.old
 fi
+cat $2 > $4/$base-$1
 
-if [ -f $4/System.map ]; then
-       mv $4/System.map $4/System.old
+# Install system map file
+if [ -f $4/System.map-$1 ]; then
+  mv $4/System.map-$1 $4/System.map-$1.old
 fi
+cp $3 $4/System.map-$1
 
-cat $2 > $4/vmlinuz
-cp $3 $4/System.map
index 66ee3f12df5880db00852baad257c9ad895d11a4..ad1e3a68208cce20fed402b2388986e863b8d83e 100644 (file)
@@ -31,5 +31,6 @@ obj-$(CONFIG_64BIT)   += binfmt_elf32.o sys_parisc32.o signal32.o
 obj-$(CONFIG_STACKTRACE)+= stacktrace.o
 # only supported for PCX-W/U in 64-bit mode at the moment
 obj-$(CONFIG_64BIT)    += perf.o perf_asm.o
+obj-$(CONFIG_AUDIT_ARCH) += audit.o compat_audit.o
 obj-$(CONFIG_FUNCTION_TRACER)          += ftrace.o
 obj-$(CONFIG_FUNCTION_GRAPH_TRACER)    += ftrace.o
diff --git a/arch/parisc/kernel/audit.c b/arch/parisc/kernel/audit.c
new file mode 100644 (file)
index 0000000..eb64a61
--- /dev/null
@@ -0,0 +1,81 @@
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/audit.h>
+#include <asm/unistd.h>
+
+static unsigned dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+static unsigned read_class[] = {
+#include <asm-generic/audit_read.h>
+~0U
+};
+
+static unsigned write_class[] = {
+#include <asm-generic/audit_write.h>
+~0U
+};
+
+static unsigned chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+static unsigned signal_class[] = {
+#include <asm-generic/audit_signal.h>
+~0U
+};
+
+int audit_classify_arch(int arch)
+{
+#ifdef CONFIG_COMPAT
+       if (arch == AUDIT_ARCH_PARISC)
+               return 1;
+#endif
+       return 0;
+}
+
+int audit_classify_syscall(int abi, unsigned syscall)
+{
+#ifdef CONFIG_COMPAT
+       extern int parisc32_classify_syscall(unsigned);
+       if (abi == AUDIT_ARCH_PARISC)
+               return parisc32_classify_syscall(syscall);
+#endif
+       switch (syscall) {
+       case __NR_open:
+               return 2;
+       case __NR_openat:
+               return 3;
+       case __NR_execve:
+               return 5;
+       default:
+               return 0;
+       }
+}
+
+static int __init audit_classes_init(void)
+{
+#ifdef CONFIG_COMPAT
+       extern __u32 parisc32_dir_class[];
+       extern __u32 parisc32_write_class[];
+       extern __u32 parisc32_read_class[];
+       extern __u32 parisc32_chattr_class[];
+       extern __u32 parisc32_signal_class[];
+       audit_register_class(AUDIT_CLASS_WRITE_32, parisc32_write_class);
+       audit_register_class(AUDIT_CLASS_READ_32, parisc32_read_class);
+       audit_register_class(AUDIT_CLASS_DIR_WRITE_32, parisc32_dir_class);
+       audit_register_class(AUDIT_CLASS_CHATTR_32, parisc32_chattr_class);
+       audit_register_class(AUDIT_CLASS_SIGNAL_32, parisc32_signal_class);
+#endif
+       audit_register_class(AUDIT_CLASS_WRITE, write_class);
+       audit_register_class(AUDIT_CLASS_READ, read_class);
+       audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
+       audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
+       audit_register_class(AUDIT_CLASS_SIGNAL, signal_class);
+       return 0;
+}
+
+__initcall(audit_classes_init);
diff --git a/arch/parisc/kernel/compat_audit.c b/arch/parisc/kernel/compat_audit.c
new file mode 100644 (file)
index 0000000..c74478f
--- /dev/null
@@ -0,0 +1,40 @@
+#include <asm/unistd.h>
+
+unsigned int parisc32_dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+unsigned int parisc32_chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+unsigned int parisc32_write_class[] = {
+#include <asm-generic/audit_write.h>
+~0U
+};
+
+unsigned int parisc32_read_class[] = {
+#include <asm-generic/audit_read.h>
+~0U
+};
+
+unsigned int parisc32_signal_class[] = {
+#include <asm-generic/audit_signal.h>
+~0U
+};
+
+int parisc32_classify_syscall(unsigned syscall)
+{
+       switch (syscall) {
+       case __NR_open:
+               return 2;
+       case __NR_openat:
+               return 3;
+       case __NR_execve:
+               return 5;
+       default:
+               return 1;
+       }
+}
index 534abd4936e1ecf96a8f872d9ba90f930c1aff9e..e842ee233db44ef902899280fbadd456175a4de3 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/security.h>
 #include <linux/compat.h>
 #include <linux/signal.h>
+#include <linux/audit.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -267,11 +268,28 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 
 long do_syscall_trace_enter(struct pt_regs *regs)
 {
+       long ret = 0;
+
        if (test_thread_flag(TIF_SYSCALL_TRACE) &&
            tracehook_report_syscall_entry(regs))
-               return -1L;
-
-       return regs->gr[20];
+               ret = -1L;
+
+#ifdef CONFIG_64BIT
+       if (!is_compat_task())
+               audit_syscall_entry(AUDIT_ARCH_PARISC64,
+                       regs->gr[20],
+                       regs->gr[26], regs->gr[25],
+                       regs->gr[24], regs->gr[23]);
+       else
+#endif
+               audit_syscall_entry(AUDIT_ARCH_PARISC,
+                       regs->gr[20] & 0xffffffff,
+                       regs->gr[26] & 0xffffffff,
+                       regs->gr[25] & 0xffffffff,
+                       regs->gr[24] & 0xffffffff,
+                       regs->gr[23] & 0xffffffff);
+
+       return ret ? : regs->gr[20];
 }
 
 void do_syscall_trace_exit(struct pt_regs *regs)
@@ -279,6 +297,8 @@ void do_syscall_trace_exit(struct pt_regs *regs)
        int stepping = test_thread_flag(TIF_SINGLESTEP) ||
                test_thread_flag(TIF_BLOCKSTEP);
 
+       audit_syscall_exit(regs);
+
        if (stepping || test_thread_flag(TIF_SYSCALL_TRACE))
                tracehook_report_syscall_exit(regs, stepping);
 }
index 8a252f2d6c087aad6248e98178c79eef818c14da..2b96602e812ff9648f0ce9ec66b52e103ddb8a3e 100644 (file)
@@ -72,7 +72,6 @@ enum ipi_message_type {
        IPI_NOP=0,
        IPI_RESCHEDULE=1,
        IPI_CALL_FUNC,
-       IPI_CALL_FUNC_SINGLE,
        IPI_CPU_START,
        IPI_CPU_STOP,
        IPI_CPU_TEST
@@ -164,11 +163,6 @@ ipi_interrupt(int irq, void *dev_id)
                                generic_smp_call_function_interrupt();
                                break;
 
-                       case IPI_CALL_FUNC_SINGLE:
-                               smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu);
-                               generic_smp_call_function_single_interrupt();
-                               break;
-
                        case IPI_CPU_START:
                                smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu);
                                break;
@@ -260,7 +254,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 
 void arch_send_call_function_single_ipi(int cpu)
 {
-       send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
+       send_IPI_single(cpu, IPI_CALL_FUNC);
 }
 
 /*
index e767ab733e321e5619b919a9b8684ac70c35ba9b..a63bb179f79a1fcd56a7bcf1adbe759f46587b71 100644 (file)
@@ -649,10 +649,8 @@ cas_action:
        /* Two exception table entries, one for the load,
           the other for the store. Either return -EFAULT.
           Each of the entries must be relocated. */
-       .section __ex_table,"aw"
-       ASM_ULONG_INSN (1b - linux_gateway_page), (3b - linux_gateway_page)
-       ASM_ULONG_INSN (2b - linux_gateway_page), (3b - linux_gateway_page)
-       .previous
+       ASM_EXCEPTIONTABLE_ENTRY(1b-linux_gateway_page, 3b-linux_gateway_page)
+       ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page)
 
 
        /* Make sure nothing else is placed on this page */
index 04e47c6a45626347aa261d3725005cdafb9385ad..1cd1d0c83b6d7bd7a21d0a22c57e18f2ac27f65a 100644 (file)
@@ -291,11 +291,6 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
        do_exit(SIGSEGV);
 }
 
-int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
-{
-       return syscall(regs);
-}
-
 /* gdb uses break 4,8 */
 #define GDB_BREAK_INSN 0x10004
 static void handle_gdb_break(struct pt_regs *regs, int wot)
@@ -805,14 +800,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
        else {
 
            /*
-            * The kernel should never fault on its own address space.
+            * The kernel should never fault on its own address space,
+            * unless pagefault_disable() was called before.
             */
 
-           if (fault_space == 0
+           if (fault_space == 0 && !in_atomic())
            {
                pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
                parisc_terminate("Kernel Fault", regs, code, fault_address);
-       
            }
        }
 
index 6f2d9355efe25af6ab90d4205a216c1c649c39a9..a512f07d4feba9bc2dd36f2f1d2f96ee26734348 100644 (file)
@@ -88,9 +88,7 @@ ENDPROC(lclear_user)
        ldo        1(%r25),%r25
        .previous
 
-       .section __ex_table,"aw"
-       ASM_ULONG_INSN 1b,2b
-       .previous
+       ASM_EXCEPTIONTABLE_ENTRY(1b,2b)
 
        .procend
 
@@ -129,10 +127,8 @@ ENDPROC(lstrnlen_user)
        copy        %r24,%r26    /* reset r26 so 0 is returned on fault */
        .previous
 
-       .section __ex_table,"aw"
-       ASM_ULONG_INSN 1b,3b
-       ASM_ULONG_INSN 2b,3b
-       .previous
+       ASM_EXCEPTIONTABLE_ENTRY(1b,3b)
+       ASM_EXCEPTIONTABLE_ENTRY(2b,3b)
 
        .procend
 
index ac4370b1ca4019f5eaf85f910097bf1cc69c0611..b5507ec06b846f09ed4d38c5841b4eecaffb156e 100644 (file)
@@ -56,7 +56,7 @@
 #ifdef __KERNEL__
 #include <linux/module.h>
 #include <linux/compiler.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 #define s_space "%%sr1"
 #define d_space "%%sr2"
 #else
@@ -524,4 +524,17 @@ EXPORT_SYMBOL(copy_to_user);
 EXPORT_SYMBOL(copy_from_user);
 EXPORT_SYMBOL(copy_in_user);
 EXPORT_SYMBOL(memcpy);
+
+long probe_kernel_read(void *dst, const void *src, size_t size)
+{
+       unsigned long addr = (unsigned long)src;
+
+       if (size < 0 || addr < PAGE_SIZE)
+               return -EFAULT;
+
+       /* check for I/O space F_EXTEND(0xfff00000) access as well? */
+
+       return __probe_kernel_read(dst, src, size);
+}
+
 #endif
index 00c0ed333a3d53421a161369cf1fa621c8a02d7f..df0d32971cdf0ec232d6ba5b5dd40394a4158529 100644 (file)
@@ -142,6 +142,12 @@ int fixup_exception(struct pt_regs *regs)
 {
        const struct exception_table_entry *fix;
 
+       /* If we only stored 32bit addresses in the exception table we can drop
+        * out if we faulted on a 64bit address. */
+       if ((sizeof(regs->iaoq[0]) > sizeof(fix->insn))
+               && (regs->iaoq[0] >> 32))
+                       return 0;
+
        fix = search_exception_tables(regs->iaoq[0]);
        if (fix) {
                struct exception_data *d;
@@ -171,20 +177,25 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
                              unsigned long address)
 {
        struct vm_area_struct *vma, *prev_vma;
-       struct task_struct *tsk = current;
-       struct mm_struct *mm = tsk->mm;
+       struct task_struct *tsk;
+       struct mm_struct *mm;
        unsigned long acc_type;
        int fault;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+       unsigned int flags;
+
+       if (in_atomic())
+               goto no_context;
 
-       if (in_atomic() || !mm)
+       tsk = current;
+       mm = tsk->mm;
+       if (!mm)
                goto no_context;
 
+       flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
        if (user_mode(regs))
                flags |= FAULT_FLAG_USER;
 
        acc_type = parisc_acctyp(code, regs->iir);
-
        if (acc_type & VM_WRITE)
                flags |= FAULT_FLAG_WRITE;
 retry:
index 38f3b7e47ec5efd190018de0d9bba1d2ef5b4011..e2e03a6d060f4543e76c8bf4c34b6a5886822fe3 100644 (file)
@@ -85,6 +85,7 @@ config GENERIC_HWEIGHT
 config PPC
        bool
        default y
+       select ARCH_MIGHT_HAVE_PC_PARPORT
        select BINFMT_ELF
        select OF
        select OF_EARLY_FLATTREE
@@ -97,7 +98,7 @@ config PPC
        select VIRT_TO_BUS if !PPC64
        select HAVE_IDE
        select HAVE_IOREMAP_PROT
-       select HAVE_EFFICIENT_UNALIGNED_ACCESS
+       select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_LITTLE_ENDIAN
        select HAVE_KPROBES
        select HAVE_ARCH_KGDB
        select HAVE_KRETPROBES
@@ -139,6 +140,9 @@ config PPC
        select OLD_SIGACTION if PPC32
        select HAVE_DEBUG_STACKOVERFLOW
 
+config GENERIC_CSUM
+       def_bool CPU_LITTLE_ENDIAN
+
 config EARLY_PRINTK
        bool
        default y
@@ -1009,6 +1013,9 @@ config PHYSICAL_START
        default "0x00000000"
 endif
 
+config ARCH_RANDOM
+       def_bool n
+
 source "net/Kconfig"
 
 source "drivers/Kconfig"
index 51cfb78d4061d30781b916fe7a21a3fd81daec8c..607acf54a425b2b50913ea6b4f48024a5d21aadc 100644 (file)
@@ -36,17 +36,26 @@ KBUILD_DEFCONFIG := ppc64_defconfig
 endif
 
 ifeq ($(CONFIG_PPC64),y)
-OLDARCH        := ppc64
-
 new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi)
 
 ifeq ($(new_nm),y)
 NM             := $(NM) --synthetic
 endif
+endif
 
+ifeq ($(CONFIG_PPC64),y)
+ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
+OLDARCH        := ppc64le
+else
+OLDARCH        := ppc64
+endif
+else
+ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
+OLDARCH        := ppcle
 else
 OLDARCH        := ppc
 endif
+endif
 
 # It seems there are times we use this Makefile without
 # including the config file, but this replicates the old behaviour
@@ -56,11 +65,29 @@ endif
 
 UTS_MACHINE := $(OLDARCH)
 
+ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
+override CC    += -mlittle-endian -mno-strict-align
+override AS    += -mlittle-endian
+override LD    += -EL
+override CROSS32CC += -mlittle-endian
+override CROSS32AS += -mlittle-endian
+LDEMULATION    := lppc
+GNUTARGET      := powerpcle
+MULTIPLEWORD   := -mno-multiple
+else
+override CC    += -mbig-endian
+override AS    += -mbig-endian
+override LD    += -EB
+LDEMULATION    := ppc
+GNUTARGET      := powerpc
+MULTIPLEWORD   := -mmultiple
+endif
+
 ifeq ($(HAS_BIARCH),y)
 override AS    += -a$(CONFIG_WORD_SIZE)
-override LD    += -m elf$(CONFIG_WORD_SIZE)ppc
+override LD    += -m elf$(CONFIG_WORD_SIZE)$(LDEMULATION)
 override CC    += -m$(CONFIG_WORD_SIZE)
-override AR    := GNUTARGET=elf$(CONFIG_WORD_SIZE)-powerpc $(AR)
+override AR    := GNUTARGET=elf$(CONFIG_WORD_SIZE)-$(GNUTARGET) $(AR)
 endif
 
 LDFLAGS_vmlinux-y := -Bstatic
@@ -86,7 +113,7 @@ endif
 CFLAGS-$(CONFIG_PPC64) := -mtraceback=no -mcall-aixdesc
 CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,-mminimal-toc)
 CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
-CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple
+CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD)
 
 ifeq ($(CONFIG_PPC_BOOK3S_64),y)
 CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,-mtune=power4)
index 15ca2255f43853945789c1fdc58f4a5bc5a82dbf..ca7f08cc4afd770b528931bbab92edc55ad5e8db 100644 (file)
@@ -22,7 +22,8 @@ all: $(obj)/zImage
 BOOTCFLAGS    := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
                 -fno-strict-aliasing -Os -msoft-float -pipe \
                 -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
-                -isystem $(shell $(CROSS32CC) -print-file-name=include)
+                -isystem $(shell $(CROSS32CC) -print-file-name=include) \
+                -mbig-endian
 BOOTAFLAGS     := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
 
 ifdef CONFIG_DEBUG_INFO
diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h
new file mode 100644 (file)
index 0000000..d853d16
--- /dev/null
@@ -0,0 +1,32 @@
+#ifndef _ASM_POWERPC_ARCHRANDOM_H
+#define _ASM_POWERPC_ARCHRANDOM_H
+
+#ifdef CONFIG_ARCH_RANDOM
+
+#include <asm/machdep.h>
+
+static inline int arch_get_random_long(unsigned long *v)
+{
+       if (ppc_md.get_random_long)
+               return ppc_md.get_random_long(v);
+
+       return 0;
+}
+
+static inline int arch_get_random_int(unsigned int *v)
+{
+       unsigned long val;
+       int rc;
+
+       rc = arch_get_random_long(&val);
+       if (rc)
+               *v = val;
+
+       return rc;
+}
+
+int powernv_get_random_long(unsigned long *v);
+
+#endif /* CONFIG_ARCH_RANDOM */
+
+#endif /* _ASM_POWERPC_ARCHRANDOM_H */
index ce0c28495f9a0416450a33a4ceee4b3a33be45c2..8251a3ba870f8cfd27a5b1c84c794ac719e34ace 100644 (file)
@@ -14,6 +14,9 @@
  * which always checksum on 4 octet boundaries.  ihl is the number
  * of 32-bit words and is always >= 5.
  */
+#ifdef CONFIG_GENERIC_CSUM
+#include <asm-generic/checksum.h>
+#else
 extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
 
 /*
@@ -123,5 +126,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
        return sum;
 #endif
 }
+
+#endif
 #endif /* __KERNEL__ */
 #endif
index b8a4b9bc50b326b07efda9e6f82e59e16be3e318..f49ddb1b2273b0d46e1cbf07ca434287a42e3cca 100644 (file)
@@ -93,6 +93,7 @@
 #define CSOR_NAND_PGS_512              0x00000000
 #define CSOR_NAND_PGS_2K               0x00080000
 #define CSOR_NAND_PGS_4K               0x00100000
+#define CSOR_NAND_PGS_8K               0x00180000
 /* Spare region Size */
 #define CSOR_NAND_SPRZ_MASK            0x0000E000
 #define CSOR_NAND_SPRZ_SHIFT           13
 #define CSOR_NAND_SPRZ_210             0x00006000
 #define CSOR_NAND_SPRZ_218             0x00008000
 #define CSOR_NAND_SPRZ_224             0x0000A000
+#define CSOR_NAND_SPRZ_CSOR_EXT                0x0000C000
 /* Pages Per Block */
 #define CSOR_NAND_PB_MASK              0x00000700
 #define CSOR_NAND_PB_SHIFT             8
index d3f64f361814cf4210ad9e968ccb747fe8c87048..d4a5315718ca454a6a4d01ceab9d4a0e767ac267 100644 (file)
@@ -25,7 +25,7 @@
 struct hvsi_header {
        uint8_t  type;
        uint8_t  len;
-       uint16_t seqno;
+       __be16 seqno;
 } __attribute__((packed));
 
 struct hvsi_data {
@@ -35,24 +35,24 @@ struct hvsi_data {
 
 struct hvsi_control {
        struct hvsi_header hdr;
-       uint16_t verb;
+       __be16 verb;
        /* optional depending on verb: */
-       uint32_t word;
-       uint32_t mask;
+       __be32 word;
+       __be32 mask;
 } __attribute__((packed));
 
 struct hvsi_query {
        struct hvsi_header hdr;
-       uint16_t verb;
+       __be16 verb;
 } __attribute__((packed));
 
 struct hvsi_query_response {
        struct hvsi_header hdr;
-       uint16_t verb;
-       uint16_t query_seqno;
+       __be16 verb;
+       __be16 query_seqno;
        union {
                uint8_t  version;
-               uint32_t mctrl_word;
+               __be32 mctrl_word;
        } u;
 } __attribute__((packed));
 
index 5a64757dc0d1eb0206f2499a7e26e39fec7cda21..575fbf81fad02dcd8e946cecfa9c5d5c35e11810 100644 (file)
@@ -21,7 +21,7 @@ extern struct pci_dev *isa_bridge_pcidev;
 /*
  * has legacy ISA devices ?
  */
-#define arch_has_dev_port()    (isa_bridge_pcidev != NULL)
+#define arch_has_dev_port()    (isa_bridge_pcidev != NULL || isa_io_special)
 #endif
 
 #include <linux/device.h>
@@ -113,7 +113,7 @@ extern bool isa_io_special;
 
 /* gcc 4.0 and older doesn't have 'Z' constraint */
 #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 0)
-#define DEF_MMIO_IN_LE(name, size, insn)                               \
+#define DEF_MMIO_IN_X(name, size, insn)                                \
 static inline u##size name(const volatile u##size __iomem *addr)       \
 {                                                                      \
        u##size ret;                                                    \
@@ -122,7 +122,7 @@ static inline u##size name(const volatile u##size __iomem *addr)    \
        return ret;                                                     \
 }
 
-#define DEF_MMIO_OUT_LE(name, size, insn)                              \
+#define DEF_MMIO_OUT_X(name, size, insn)                               \
 static inline void name(volatile u##size __iomem *addr, u##size val)   \
 {                                                                      \
        __asm__ __volatile__("sync;"#insn" %1,0,%2"                     \
@@ -130,7 +130,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val)        \
        IO_SET_SYNC_FLAG();                                             \
 }
 #else /* newer gcc */
-#define DEF_MMIO_IN_LE(name, size, insn)                               \
+#define DEF_MMIO_IN_X(name, size, insn)                                \
 static inline u##size name(const volatile u##size __iomem *addr)       \
 {                                                                      \
        u##size ret;                                                    \
@@ -139,7 +139,7 @@ static inline u##size name(const volatile u##size __iomem *addr)    \
        return ret;                                                     \
 }
 
-#define DEF_MMIO_OUT_LE(name, size, insn)                              \
+#define DEF_MMIO_OUT_X(name, size, insn)                               \
 static inline void name(volatile u##size __iomem *addr, u##size val)   \
 {                                                                      \
        __asm__ __volatile__("sync;"#insn" %1,%y0"                      \
@@ -148,7 +148,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val)        \
 }
 #endif
 
-#define DEF_MMIO_IN_BE(name, size, insn)                               \
+#define DEF_MMIO_IN_D(name, size, insn)                                \
 static inline u##size name(const volatile u##size __iomem *addr)       \
 {                                                                      \
        u##size ret;                                                    \
@@ -157,7 +157,7 @@ static inline u##size name(const volatile u##size __iomem *addr)    \
        return ret;                                                     \
 }
 
-#define DEF_MMIO_OUT_BE(name, size, insn)                              \
+#define DEF_MMIO_OUT_D(name, size, insn)                               \
 static inline void name(volatile u##size __iomem *addr, u##size val)   \
 {                                                                      \
        __asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0"                 \
@@ -165,22 +165,37 @@ static inline void name(volatile u##size __iomem *addr, u##size val)      \
        IO_SET_SYNC_FLAG();                                             \
 }
 
+DEF_MMIO_IN_D(in_8,     8, lbz);
+DEF_MMIO_OUT_D(out_8,   8, stb);
 
-DEF_MMIO_IN_BE(in_8,     8, lbz);
-DEF_MMIO_IN_BE(in_be16, 16, lhz);
-DEF_MMIO_IN_BE(in_be32, 32, lwz);
-DEF_MMIO_IN_LE(in_le16, 16, lhbrx);
-DEF_MMIO_IN_LE(in_le32, 32, lwbrx);
+#ifdef __BIG_ENDIAN__
+DEF_MMIO_IN_D(in_be16, 16, lhz);
+DEF_MMIO_IN_D(in_be32, 32, lwz);
+DEF_MMIO_IN_X(in_le16, 16, lhbrx);
+DEF_MMIO_IN_X(in_le32, 32, lwbrx);
 
-DEF_MMIO_OUT_BE(out_8,     8, stb);
-DEF_MMIO_OUT_BE(out_be16, 16, sth);
-DEF_MMIO_OUT_BE(out_be32, 32, stw);
-DEF_MMIO_OUT_LE(out_le16, 16, sthbrx);
-DEF_MMIO_OUT_LE(out_le32, 32, stwbrx);
+DEF_MMIO_OUT_D(out_be16, 16, sth);
+DEF_MMIO_OUT_D(out_be32, 32, stw);
+DEF_MMIO_OUT_X(out_le16, 16, sthbrx);
+DEF_MMIO_OUT_X(out_le32, 32, stwbrx);
+#else
+DEF_MMIO_IN_X(in_be16, 16, lhbrx);
+DEF_MMIO_IN_X(in_be32, 32, lwbrx);
+DEF_MMIO_IN_D(in_le16, 16, lhz);
+DEF_MMIO_IN_D(in_le32, 32, lwz);
+
+DEF_MMIO_OUT_X(out_be16, 16, sthbrx);
+DEF_MMIO_OUT_X(out_be32, 32, stwbrx);
+DEF_MMIO_OUT_D(out_le16, 16, sth);
+DEF_MMIO_OUT_D(out_le32, 32, stw);
+
+#endif /* __BIG_ENDIAN */
 
 #ifdef __powerpc64__
-DEF_MMIO_OUT_BE(out_be64, 64, std);
-DEF_MMIO_IN_BE(in_be64, 64, ld);
+
+#ifdef __BIG_ENDIAN__
+DEF_MMIO_OUT_D(out_be64, 64, std);
+DEF_MMIO_IN_D(in_be64, 64, ld);
 
 /* There is no asm instructions for 64 bits reverse loads and stores */
 static inline u64 in_le64(const volatile u64 __iomem *addr)
@@ -192,6 +207,22 @@ static inline void out_le64(volatile u64 __iomem *addr, u64 val)
 {
        out_be64(addr, swab64(val));
 }
+#else
+DEF_MMIO_OUT_D(out_le64, 64, std);
+DEF_MMIO_IN_D(in_le64, 64, ld);
+
+/* There is no asm instructions for 64 bits reverse loads and stores */
+static inline u64 in_be64(const volatile u64 __iomem *addr)
+{
+       return swab64(in_le64(addr));
+}
+
+static inline void out_be64(volatile u64 __iomem *addr, u64 val)
+{
+       out_le64(addr, swab64(val));
+}
+
+#endif
 #endif /* __powerpc64__ */
 
 /*
index ae098c438f009eb0e312fc8b78a6ce07f4311cc6..f016bb699b5f6200268d3b8e5bf4e2bd003dbbf7 100644 (file)
@@ -19,7 +19,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key)
 {
-       asm goto("1:\n\t"
+       asm_volatile_goto("1:\n\t"
                 "nop\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
                 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
index 8b480901165a5222634c60c81408651e3da76a38..ad3025d0880b1c709d073e9d42f55a71a9a8159a 100644 (file)
@@ -78,6 +78,18 @@ struct machdep_calls {
                                    long index);
        void            (*tce_flush)(struct iommu_table *tbl);
 
+       /* _rm versions are for real mode use only */
+       int             (*tce_build_rm)(struct iommu_table *tbl,
+                                    long index,
+                                    long npages,
+                                    unsigned long uaddr,
+                                    enum dma_data_direction direction,
+                                    struct dma_attrs *attrs);
+       void            (*tce_free_rm)(struct iommu_table *tbl,
+                                   long index,
+                                   long npages);
+       void            (*tce_flush_rm)(struct iommu_table *tbl);
+
        void __iomem *  (*ioremap)(phys_addr_t addr, unsigned long size,
                                   unsigned long flags, void *caller);
        void            (*iounmap)(volatile void __iomem *token);
@@ -263,6 +275,10 @@ struct machdep_calls {
        ssize_t (*cpu_probe)(const char *, size_t);
        ssize_t (*cpu_release)(const char *, size_t);
 #endif
+
+#ifdef CONFIG_ARCH_RANDOM
+       int (*get_random_long)(unsigned long *v);
+#endif
 };
 
 extern void e500_idle(void);
index c4cf01197273f8a88203d35c34b1516e356040a4..807014dde821058429b41a5d825759d50d4d9ed7 100644 (file)
@@ -135,8 +135,8 @@ extern char initial_stab[];
 #ifndef __ASSEMBLY__
 
 struct hash_pte {
-       unsigned long v;
-       unsigned long r;
+       __be64 v;
+       __be64 r;
 };
 
 extern struct hash_pte *htab_address;
index c5cd72833d6e7f29daba00655296750b0c560b16..4cc33ba1edd3ed292cacd1d70bd3030023bbc3e5 100644 (file)
@@ -460,10 +460,12 @@ enum {
 
 enum {
        OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1,
+       OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2
 };
 
 enum {
        OPAL_P7IOC_NUM_PEST_REGS = 128,
+       OPAL_PHB3_NUM_PEST_REGS = 256
 };
 
 struct OpalIoPhbErrorCommon {
@@ -531,28 +533,91 @@ struct OpalIoP7IOCPhbErrorData {
        uint64_t pestB[OPAL_P7IOC_NUM_PEST_REGS];
 };
 
+struct OpalIoPhb3ErrorData {
+       struct OpalIoPhbErrorCommon common;
+
+       uint32_t brdgCtl;
+
+       /* PHB3 UTL regs */
+       uint32_t portStatusReg;
+       uint32_t rootCmplxStatus;
+       uint32_t busAgentStatus;
+
+       /* PHB3 cfg regs */
+       uint32_t deviceStatus;
+       uint32_t slotStatus;
+       uint32_t linkStatus;
+       uint32_t devCmdStatus;
+       uint32_t devSecStatus;
+
+       /* cfg AER regs */
+       uint32_t rootErrorStatus;
+       uint32_t uncorrErrorStatus;
+       uint32_t corrErrorStatus;
+       uint32_t tlpHdr1;
+       uint32_t tlpHdr2;
+       uint32_t tlpHdr3;
+       uint32_t tlpHdr4;
+       uint32_t sourceId;
+
+       uint32_t rsv3;
+
+       /* Record data about the call to allocate a buffer */
+       uint64_t errorClass;
+       uint64_t correlator;
+
+       uint64_t nFir;                  /* 000 */
+       uint64_t nFirMask;              /* 003 */
+       uint64_t nFirWOF;               /* 008 */
+
+       /* PHB3 MMIO Error Regs */
+       uint64_t phbPlssr;              /* 120 */
+       uint64_t phbCsr;                /* 110 */
+       uint64_t lemFir;                /* C00 */
+       uint64_t lemErrorMask;          /* C18 */
+       uint64_t lemWOF;                /* C40 */
+       uint64_t phbErrorStatus;        /* C80 */
+       uint64_t phbFirstErrorStatus;   /* C88 */
+       uint64_t phbErrorLog0;          /* CC0 */
+       uint64_t phbErrorLog1;          /* CC8 */
+       uint64_t mmioErrorStatus;       /* D00 */
+       uint64_t mmioFirstErrorStatus;  /* D08 */
+       uint64_t mmioErrorLog0;         /* D40 */
+       uint64_t mmioErrorLog1;         /* D48 */
+       uint64_t dma0ErrorStatus;       /* D80 */
+       uint64_t dma0FirstErrorStatus;  /* D88 */
+       uint64_t dma0ErrorLog0;         /* DC0 */
+       uint64_t dma0ErrorLog1;         /* DC8 */
+       uint64_t dma1ErrorStatus;       /* E00 */
+       uint64_t dma1FirstErrorStatus;  /* E08 */
+       uint64_t dma1ErrorLog0;         /* E40 */
+       uint64_t dma1ErrorLog1;         /* E48 */
+       uint64_t pestA[OPAL_PHB3_NUM_PEST_REGS];
+       uint64_t pestB[OPAL_PHB3_NUM_PEST_REGS];
+};
+
 typedef struct oppanel_line {
        const char *    line;
        uint64_t        line_len;
 } oppanel_line_t;
 
 /* API functions */
-int64_t opal_console_write(int64_t term_number, int64_t *length,
+int64_t opal_console_write(int64_t term_number, __be64 *length,
                           const uint8_t *buffer);
-int64_t opal_console_read(int64_t term_number, int64_t *length,
+int64_t opal_console_read(int64_t term_number, __be64 *length,
                          uint8_t *buffer);
 int64_t opal_console_write_buffer_space(int64_t term_number,
-                                       int64_t *length);
-int64_t opal_rtc_read(uint32_t *year_month_day,
-                     uint64_t *hour_minute_second_millisecond);
+                                       __be64 *length);
+int64_t opal_rtc_read(__be32 *year_month_day,
+                     __be64 *hour_minute_second_millisecond);
 int64_t opal_rtc_write(uint32_t year_month_day,
                       uint64_t hour_minute_second_millisecond);
 int64_t opal_cec_power_down(uint64_t request);
 int64_t opal_cec_reboot(void);
 int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
 int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
-int64_t opal_handle_interrupt(uint64_t isn, uint64_t *outstanding_event_mask);
-int64_t opal_poll_events(uint64_t *outstanding_event_mask);
+int64_t opal_handle_interrupt(uint64_t isn, __be64 *outstanding_event_mask);
+int64_t opal_poll_events(__be64 *outstanding_event_mask);
 int64_t opal_pci_set_hub_tce_memory(uint64_t hub_id, uint64_t tce_mem_addr,
                                    uint64_t tce_mem_size);
 int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr,
@@ -560,9 +625,9 @@ int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr,
 int64_t opal_pci_config_read_byte(uint64_t phb_id, uint64_t bus_dev_func,
                                  uint64_t offset, uint8_t *data);
 int64_t opal_pci_config_read_half_word(uint64_t phb_id, uint64_t bus_dev_func,
-                                      uint64_t offset, uint16_t *data);
+                                      uint64_t offset, __be16 *data);
 int64_t opal_pci_config_read_word(uint64_t phb_id, uint64_t bus_dev_func,
-                                 uint64_t offset, uint32_t *data);
+                                 uint64_t offset, __be32 *data);
 int64_t opal_pci_config_write_byte(uint64_t phb_id, uint64_t bus_dev_func,
                                   uint64_t offset, uint8_t data);
 int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func,
@@ -570,14 +635,14 @@ int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func,
 int64_t opal_pci_config_write_word(uint64_t phb_id, uint64_t bus_dev_func,
                                   uint64_t offset, uint32_t data);
 int64_t opal_set_xive(uint32_t isn, uint16_t server, uint8_t priority);
-int64_t opal_get_xive(uint32_t isn, uint16_t *server, uint8_t *priority);
+int64_t opal_get_xive(uint32_t isn, __be16 *server, uint8_t *priority);
 int64_t opal_register_exception_handler(uint64_t opal_exception,
                                        uint64_t handler_address,
                                        uint64_t glue_cache_line);
 int64_t opal_pci_eeh_freeze_status(uint64_t phb_id, uint64_t pe_number,
                                   uint8_t *freeze_state,
-                                  uint16_t *pci_error_type,
-                                  uint64_t *phb_status);
+                                  __be16 *pci_error_type,
+                                  __be64 *phb_status);
 int64_t opal_pci_eeh_freeze_clear(uint64_t phb_id, uint64_t pe_number,
                                  uint64_t eeh_action_token);
 int64_t opal_pci_shpc(uint64_t phb_id, uint64_t shpc_action, uint8_t *state);
@@ -614,13 +679,13 @@ int64_t opal_pci_msi_eoi(uint64_t phb_id, uint32_t hw_irq);
 int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint32_t pe_number,
                             uint32_t xive_num);
 int64_t opal_get_xive_source(uint64_t phb_id, uint32_t xive_num,
-                            int32_t *interrupt_source_number);
+                            __be32 *interrupt_source_number);
 int64_t opal_get_msi_32(uint64_t phb_id, uint32_t mve_number, uint32_t xive_num,
-                       uint8_t msi_range, uint32_t *msi_address,
-                       uint32_t *message_data);
+                       uint8_t msi_range, __be32 *msi_address,
+                       __be32 *message_data);
 int64_t opal_get_msi_64(uint64_t phb_id, uint32_t mve_number,
                        uint32_t xive_num, uint8_t msi_range,
-                       uint64_t *msi_address, uint32_t *message_data);
+                       __be64 *msi_address, __be32 *message_data);
 int64_t opal_start_cpu(uint64_t thread_number, uint64_t start_address);
 int64_t opal_query_cpu_status(uint64_t thread_number, uint8_t *thread_status);
 int64_t opal_write_oppanel(oppanel_line_t *lines, uint64_t num_lines);
@@ -642,7 +707,7 @@ int64_t opal_pci_fence_phb(uint64_t phb_id);
 int64_t opal_pci_reinit(uint64_t phb_id, uint8_t reinit_scope);
 int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action);
 int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
-int64_t opal_get_epow_status(uint64_t *status);
+int64_t opal_get_epow_status(__be64 *status);
 int64_t opal_set_system_attention_led(uint8_t led_action);
 int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
                            uint16_t *pci_error_type, uint16_t *severity);
index 46db09414a1063415fccc9ef74b9f4b664de02f3..4a191c47286748c65cf2e7eb8753239af4c3b5d5 100644 (file)
@@ -394,6 +394,8 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
        hpte_slot_array[index] = hidx << 4 | 0x1 << 3;
 }
 
+struct page *realmode_pfn_to_page(unsigned long pfn);
+
 static inline char *get_hpte_slot_array(pmd_t *pmdp)
 {
        /*
index d7fe9f5b46d457cf0895c7dee542bc2e40fb3e49..ad5fcf51b25225c7827f45347ce94d8f7c2584b6 100644 (file)
 #define PPC_INST_TLBIVAX               0x7c000624
 #define PPC_INST_TLBSRX_DOT            0x7c0006a5
 #define PPC_INST_XXLOR                 0xf0000510
+#define PPC_INST_XXSWAPD               0xf0000250
 #define PPC_INST_XVCPSGNDP             0xf0000780
 #define PPC_INST_TRECHKPT              0x7c0007dd
 #define PPC_INST_TRECLAIM              0x7c00075d
                                               VSX_XX1((s), a, b))
 #define XXLOR(t, a, b)         stringify_in_c(.long PPC_INST_XXLOR | \
                                               VSX_XX3((t), a, b))
+#define XXSWAPD(t, a)          stringify_in_c(.long PPC_INST_XXSWAPD | \
+                                              VSX_XX3((t), a, a))
 #define XVCPSGNDP(t, a, b)     stringify_in_c(.long (PPC_INST_XVCPSGNDP | \
                                               VSX_XX3((t), (a), (b))))
 
index 599545738af3e2354b137221cf1c07cf68e25440..8deaaad3b32fcfa0c2bf0bd14434dcdfd411fe7a 100644 (file)
@@ -98,123 +98,51 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
 #define REST_8GPRS(n, base)    REST_4GPRS(n, base); REST_4GPRS(n+4, base)
 #define REST_10GPRS(n, base)   REST_8GPRS(n, base); REST_2GPRS(n+8, base)
 
-#define SAVE_FPR(n, base)      stfd    n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base)
+#define SAVE_FPR(n, base)      stfd    n,8*TS_FPRWIDTH*(n)(base)
 #define SAVE_2FPRS(n, base)    SAVE_FPR(n, base); SAVE_FPR(n+1, base)
 #define SAVE_4FPRS(n, base)    SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
 #define SAVE_8FPRS(n, base)    SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
 #define SAVE_16FPRS(n, base)   SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
 #define SAVE_32FPRS(n, base)   SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
-#define REST_FPR(n, base)      lfd     n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base)
+#define REST_FPR(n, base)      lfd     n,8*TS_FPRWIDTH*(n)(base)
 #define REST_2FPRS(n, base)    REST_FPR(n, base); REST_FPR(n+1, base)
 #define REST_4FPRS(n, base)    REST_2FPRS(n, base); REST_2FPRS(n+2, base)
 #define REST_8FPRS(n, base)    REST_4FPRS(n, base); REST_4FPRS(n+4, base)
 #define REST_16FPRS(n, base)   REST_8FPRS(n, base); REST_8FPRS(n+8, base)
 #define REST_32FPRS(n, base)   REST_16FPRS(n, base); REST_16FPRS(n+16, base)
 
-#define SAVE_VR(n,b,base)      li b,THREAD_VR0+(16*(n));  stvx n,base,b
+#define SAVE_VR(n,b,base)      li b,16*(n);  stvx n,base,b
 #define SAVE_2VRS(n,b,base)    SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
 #define SAVE_4VRS(n,b,base)    SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
 #define SAVE_8VRS(n,b,base)    SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
 #define SAVE_16VRS(n,b,base)   SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
 #define SAVE_32VRS(n,b,base)   SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
-#define REST_VR(n,b,base)      li b,THREAD_VR0+(16*(n)); lvx n,base,b
+#define REST_VR(n,b,base)      li b,16*(n); lvx n,base,b
 #define REST_2VRS(n,b,base)    REST_VR(n,b,base); REST_VR(n+1,b,base)
 #define REST_4VRS(n,b,base)    REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
 #define REST_8VRS(n,b,base)    REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
 #define REST_16VRS(n,b,base)   REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
 #define REST_32VRS(n,b,base)   REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
 
-/* Save/restore FPRs, VRs and VSRs from their checkpointed backups in
- * thread_struct:
- */
-#define SAVE_FPR_TRANSACT(n, base)     stfd n,THREAD_TRANSACT_FPR0+    \
-                                       8*TS_FPRWIDTH*(n)(base)
-#define SAVE_2FPRS_TRANSACT(n, base)   SAVE_FPR_TRANSACT(n, base);     \
-                                       SAVE_FPR_TRANSACT(n+1, base)
-#define SAVE_4FPRS_TRANSACT(n, base)   SAVE_2FPRS_TRANSACT(n, base);   \
-                                       SAVE_2FPRS_TRANSACT(n+2, base)
-#define SAVE_8FPRS_TRANSACT(n, base)   SAVE_4FPRS_TRANSACT(n, base);   \
-                                       SAVE_4FPRS_TRANSACT(n+4, base)
-#define SAVE_16FPRS_TRANSACT(n, base)  SAVE_8FPRS_TRANSACT(n, base);   \
-                                       SAVE_8FPRS_TRANSACT(n+8, base)
-#define SAVE_32FPRS_TRANSACT(n, base)  SAVE_16FPRS_TRANSACT(n, base);  \
-                                       SAVE_16FPRS_TRANSACT(n+16, base)
-
-#define REST_FPR_TRANSACT(n, base)     lfd     n,THREAD_TRANSACT_FPR0+ \
-                                       8*TS_FPRWIDTH*(n)(base)
-#define REST_2FPRS_TRANSACT(n, base)   REST_FPR_TRANSACT(n, base);     \
-                                       REST_FPR_TRANSACT(n+1, base)
-#define REST_4FPRS_TRANSACT(n, base)   REST_2FPRS_TRANSACT(n, base);   \
-                                       REST_2FPRS_TRANSACT(n+2, base)
-#define REST_8FPRS_TRANSACT(n, base)   REST_4FPRS_TRANSACT(n, base);   \
-                                       REST_4FPRS_TRANSACT(n+4, base)
-#define REST_16FPRS_TRANSACT(n, base)  REST_8FPRS_TRANSACT(n, base);   \
-                                       REST_8FPRS_TRANSACT(n+8, base)
-#define REST_32FPRS_TRANSACT(n, base)  REST_16FPRS_TRANSACT(n, base);  \
-                                       REST_16FPRS_TRANSACT(n+16, base)
-
-
-#define SAVE_VR_TRANSACT(n,b,base)     li b,THREAD_TRANSACT_VR0+(16*(n)); \
-                                       stvx n,b,base
-#define SAVE_2VRS_TRANSACT(n,b,base)   SAVE_VR_TRANSACT(n,b,base);     \
-                                       SAVE_VR_TRANSACT(n+1,b,base)
-#define SAVE_4VRS_TRANSACT(n,b,base)   SAVE_2VRS_TRANSACT(n,b,base);   \
-                                       SAVE_2VRS_TRANSACT(n+2,b,base)
-#define SAVE_8VRS_TRANSACT(n,b,base)   SAVE_4VRS_TRANSACT(n,b,base);   \
-                                       SAVE_4VRS_TRANSACT(n+4,b,base)
-#define SAVE_16VRS_TRANSACT(n,b,base)  SAVE_8VRS_TRANSACT(n,b,base);   \
-                                       SAVE_8VRS_TRANSACT(n+8,b,base)
-#define SAVE_32VRS_TRANSACT(n,b,base)  SAVE_16VRS_TRANSACT(n,b,base);  \
-                                       SAVE_16VRS_TRANSACT(n+16,b,base)
-
-#define REST_VR_TRANSACT(n,b,base)     li b,THREAD_TRANSACT_VR0+(16*(n)); \
-                                       lvx n,b,base
-#define REST_2VRS_TRANSACT(n,b,base)   REST_VR_TRANSACT(n,b,base);     \
-                                       REST_VR_TRANSACT(n+1,b,base)
-#define REST_4VRS_TRANSACT(n,b,base)   REST_2VRS_TRANSACT(n,b,base);   \
-                                       REST_2VRS_TRANSACT(n+2,b,base)
-#define REST_8VRS_TRANSACT(n,b,base)   REST_4VRS_TRANSACT(n,b,base);   \
-                                       REST_4VRS_TRANSACT(n+4,b,base)
-#define REST_16VRS_TRANSACT(n,b,base)  REST_8VRS_TRANSACT(n,b,base);   \
-                                       REST_8VRS_TRANSACT(n+8,b,base)
-#define REST_32VRS_TRANSACT(n,b,base)  REST_16VRS_TRANSACT(n,b,base);  \
-                                       REST_16VRS_TRANSACT(n+16,b,base)
-
-
-#define SAVE_VSR_TRANSACT(n,b,base)    li b,THREAD_TRANSACT_VSR0+(16*(n)); \
-                                       STXVD2X(n,R##base,R##b)
-#define SAVE_2VSRS_TRANSACT(n,b,base)  SAVE_VSR_TRANSACT(n,b,base);    \
-                                       SAVE_VSR_TRANSACT(n+1,b,base)
-#define SAVE_4VSRS_TRANSACT(n,b,base)  SAVE_2VSRS_TRANSACT(n,b,base);  \
-                                       SAVE_2VSRS_TRANSACT(n+2,b,base)
-#define SAVE_8VSRS_TRANSACT(n,b,base)  SAVE_4VSRS_TRANSACT(n,b,base);  \
-                                       SAVE_4VSRS_TRANSACT(n+4,b,base)
-#define SAVE_16VSRS_TRANSACT(n,b,base) SAVE_8VSRS_TRANSACT(n,b,base);  \
-                                       SAVE_8VSRS_TRANSACT(n+8,b,base)
-#define SAVE_32VSRS_TRANSACT(n,b,base) SAVE_16VSRS_TRANSACT(n,b,base); \
-                                       SAVE_16VSRS_TRANSACT(n+16,b,base)
-
-#define REST_VSR_TRANSACT(n,b,base)    li b,THREAD_TRANSACT_VSR0+(16*(n)); \
-                                       LXVD2X(n,R##base,R##b)
-#define REST_2VSRS_TRANSACT(n,b,base)  REST_VSR_TRANSACT(n,b,base);    \
-                                       REST_VSR_TRANSACT(n+1,b,base)
-#define REST_4VSRS_TRANSACT(n,b,base)  REST_2VSRS_TRANSACT(n,b,base);  \
-                                       REST_2VSRS_TRANSACT(n+2,b,base)
-#define REST_8VSRS_TRANSACT(n,b,base)  REST_4VSRS_TRANSACT(n,b,base);  \
-                                       REST_4VSRS_TRANSACT(n+4,b,base)
-#define REST_16VSRS_TRANSACT(n,b,base) REST_8VSRS_TRANSACT(n,b,base);  \
-                                       REST_8VSRS_TRANSACT(n+8,b,base)
-#define REST_32VSRS_TRANSACT(n,b,base) REST_16VSRS_TRANSACT(n,b,base); \
-                                       REST_16VSRS_TRANSACT(n+16,b,base)
+#ifdef __BIG_ENDIAN__
+#define STXVD2X_ROT(n,b,base)          STXVD2X(n,b,base)
+#define LXVD2X_ROT(n,b,base)           LXVD2X(n,b,base)
+#else
+#define STXVD2X_ROT(n,b,base)          XXSWAPD(n,n);           \
+                                       STXVD2X(n,b,base);      \
+                                       XXSWAPD(n,n)
 
+#define LXVD2X_ROT(n,b,base)           LXVD2X(n,b,base);       \
+                                       XXSWAPD(n,n)
+#endif
 /* Save the lower 32 VSRs in the thread VSR region */
-#define SAVE_VSR(n,b,base)     li b,THREAD_VSR0+(16*(n));  STXVD2X(n,R##base,R##b)
+#define SAVE_VSR(n,b,base)     li b,16*(n);  STXVD2X_ROT(n,R##base,R##b)
 #define SAVE_2VSRS(n,b,base)   SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
 #define SAVE_4VSRS(n,b,base)   SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
 #define SAVE_8VSRS(n,b,base)   SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
 #define SAVE_16VSRS(n,b,base)  SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
 #define SAVE_32VSRS(n,b,base)  SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
-#define REST_VSR(n,b,base)     li b,THREAD_VSR0+(16*(n)); LXVD2X(n,R##base,R##b)
+#define REST_VSR(n,b,base)     li b,16*(n); LXVD2X_ROT(n,R##base,R##b)
 #define REST_2VSRS(n,b,base)   REST_VSR(n,b,base); REST_VSR(n+1,b,base)
 #define REST_4VSRS(n,b,base)   REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
 #define REST_8VSRS(n,b,base)   REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
@@ -832,6 +760,35 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946)
 #define N_SLINE        68
 #define N_SO   100
 
-#endif /*  __ASSEMBLY__ */
+/*
+ * Create an endian fixup trampoline
+ *
+ * This starts with a "tdi 0,0,0x48" instruction which is
+ * essentially a "trap never", and thus akin to a nop.
+ *
+ * The opcode for this instruction read with the wrong endian
+ * however results in a b . + 8
+ *
+ * So essentially we use that trick to execute the following
+ * trampoline in "reverse endian" if we are running with the
+ * MSR_LE bit set the "wrong" way for whatever endianness the
+ * kernel is built for.
+ */
 
+#ifdef CONFIG_PPC_BOOK3E
+#define FIXUP_ENDIAN
+#else
+#define FIXUP_ENDIAN                                              \
+       tdi   0,0,0x48;   /* Reverse endian of b . + 8          */ \
+       b     $+36;       /* Skip trampoline if endian is good  */ \
+       .long 0x05009f42; /* bcl 20,31,$+4                      */ \
+       .long 0xa602487d; /* mflr r10                           */ \
+       .long 0x1c004a39; /* addi r10,r10,28                    */ \
+       .long 0xa600607d; /* mfmsr r11                          */ \
+       .long 0x01006b69; /* xori r11,r11,1                     */ \
+       .long 0xa6035a7d; /* mtsrr0 r10                         */ \
+       .long 0xa6037b7d; /* mtsrr1 r11                         */ \
+       .long 0x2400004c  /* rfid                               */
+#endif /* !CONFIG_PPC_BOOK3E */
+#endif /*  __ASSEMBLY__ */
 #endif /* _ASM_POWERPC_PPC_ASM_H */
index ce4de5aed7b5c302b292bd38c69f21bd2f059038..c1583070937de03a311784c72b5c16ab7e85bff4 100644 (file)
 
 #ifdef CONFIG_VSX
 #define TS_FPRWIDTH 2
+
+#ifdef __BIG_ENDIAN__
+#define TS_FPROFFSET 0
+#define TS_VSRLOWOFFSET 1
+#else
+#define TS_FPROFFSET 1
+#define TS_VSRLOWOFFSET 0
+#endif
+
 #else
 #define TS_FPRWIDTH 1
+#define TS_FPROFFSET 0
 #endif
 
 #ifdef CONFIG_PPC64
@@ -142,10 +152,20 @@ typedef struct {
        unsigned long seg;
 } mm_segment_t;
 
-#define TS_FPROFFSET 0
-#define TS_VSRLOWOFFSET 1
-#define TS_FPR(i) fpr[i][TS_FPROFFSET]
-#define TS_TRANS_FPR(i) transact_fpr[i][TS_FPROFFSET]
+#define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET]
+#define TS_TRANS_FPR(i) transact_fp.fpr[i][TS_FPROFFSET]
+
+/* FP and VSX 0-31 register set */
+struct thread_fp_state {
+       u64     fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
+       u64     fpscr;          /* Floating point status */
+};
+
+/* Complete AltiVec register set including VSCR */
+struct thread_vr_state {
+       vector128       vr[32] __attribute__((aligned(16)));
+       vector128       vscr __attribute__((aligned(16)));
+};
 
 struct thread_struct {
        unsigned long   ksp;            /* Kernel stack pointer */
@@ -198,13 +218,8 @@ struct thread_struct {
        unsigned long   dvc2;
 #endif
 #endif
-       /* FP and VSX 0-31 register set */
-       double          fpr[32][TS_FPRWIDTH] __attribute__((aligned(16)));
-       struct {
-
-               unsigned int pad;
-               unsigned int val;       /* Floating point status */
-       } fpscr;
+       struct thread_fp_state  fp_state;
+       struct thread_fp_state  *fp_save_area;
        int             fpexc_mode;     /* floating-point exception mode */
        unsigned int    align_ctl;      /* alignment handling control */
 #ifdef CONFIG_PPC64
@@ -222,10 +237,8 @@ struct thread_struct {
        struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */
        unsigned long   trap_nr;        /* last trap # on this thread */
 #ifdef CONFIG_ALTIVEC
-       /* Complete AltiVec register set */
-       vector128       vr[32] __attribute__((aligned(16)));
-       /* AltiVec status */
-       vector128       vscr __attribute__((aligned(16)));
+       struct thread_vr_state vr_state;
+       struct thread_vr_state *vr_save_area;
        unsigned long   vrsave;
        int             used_vr;        /* set if process has used altivec */
 #endif /* CONFIG_ALTIVEC */
@@ -262,13 +275,8 @@ struct thread_struct {
         * transact_fpr[] is the new set of transactional values.
         * VRs work the same way.
         */
-       double          transact_fpr[32][TS_FPRWIDTH];
-       struct {
-               unsigned int pad;
-               unsigned int val;       /* Floating point status */
-       } transact_fpscr;
-       vector128       transact_vr[32] __attribute__((aligned(16)));
-       vector128       transact_vscr __attribute__((aligned(16)));
+       struct thread_fp_state transact_fp;
+       struct thread_vr_state transact_vr;
        unsigned long   transact_vrsave;
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
@@ -322,8 +330,6 @@ struct thread_struct {
        .ksp = INIT_SP, \
        .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
        .fs = KERNEL_DS, \
-       .fpr = {{0}}, \
-       .fpscr = { .val = 0, }, \
        .fpexc_mode = 0, \
        .ppr = INIT_PPR, \
 }
@@ -361,6 +367,11 @@ extern int set_endian(struct task_struct *tsk, unsigned int val);
 extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
 extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
 
+extern void load_fp_state(struct thread_fp_state *fp);
+extern void store_fp_state(struct thread_fp_state *fp);
+extern void load_vr_state(struct thread_vr_state *vr);
+extern void store_vr_state(struct thread_vr_state *vr);
+
 static inline unsigned int __unpack_fe01(unsigned long msr_bits)
 {
        return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
index 10d1ef016bf1b73f199387b9640cf9ffb8429b6f..126f6e98f84de687d1d26d2ab83ff4b1d2a6fc86 100644 (file)
 #define MSR_64BIT      MSR_SF
 
 /* Server variant */
-#define MSR_           (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV)
+#define __MSR          (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV)
+#ifdef __BIG_ENDIAN__
+#define MSR_           __MSR
+#else
+#define MSR_           (__MSR | MSR_LE)
+#endif
 #define MSR_KERNEL     (MSR_ | MSR_64BIT)
 #define MSR_USER32     (MSR_ | MSR_PR | MSR_EE)
 #define MSR_USER64     (MSR_USER32 | MSR_64BIT)
index 0cabfd7bc2d1e5603d1cfa64e49c3cbd4557224e..07dcdcfdaefc4b4b4ed5fd01de00ffc8ebefa712 100644 (file)
@@ -54,8 +54,8 @@ struct scom_controller {
        scom_map_t (*map)(struct device_node *ctrl_dev, u64 reg, u64 count);
        void (*unmap)(scom_map_t map);
 
-       u64 (*read)(scom_map_t map, u32 reg);
-       void (*write)(scom_map_t map, u32 reg, u64 value);
+       int (*read)(scom_map_t map, u32 reg, u64 *value);
+       int (*write)(scom_map_t map, u32 reg, u64 value);
 };
 
 extern const struct scom_controller *scom_controller;
@@ -133,10 +133,18 @@ static inline void scom_unmap(scom_map_t map)
  * scom_read - Read a SCOM register
  * @map: Result of scom_map
  * @reg: Register index within that map
+ * @value: Updated with the value read
+ *
+ * Returns 0 (success) or a negative error code
  */
-static inline u64 scom_read(scom_map_t map, u32 reg)
+static inline int scom_read(scom_map_t map, u32 reg, u64 *value)
 {
-       return scom_controller->read(map, reg);
+       int rc;
+
+       rc = scom_controller->read(map, reg, value);
+       if (rc)
+               *value = 0xfffffffffffffffful;
+       return rc;
 }
 
 /**
@@ -144,12 +152,15 @@ static inline u64 scom_read(scom_map_t map, u32 reg)
  * @map: Result of scom_map
  * @reg: Register index within that map
  * @value: Value to write
+ *
+ * Returns 0 (success) or a negative error code
  */
-static inline void scom_write(scom_map_t map, u32 reg, u64 value)
+static inline int scom_write(scom_map_t map, u32 reg, u64 value)
 {
-       scom_controller->write(map, reg, value);
+       return scom_controller->write(map, reg, value);
 }
 
+
 #endif /* CONFIG_PPC_SCOM */
 #endif /* __ASSEMBLY__ */
 #endif /* __KERNEL__ */
index 3a7a67a0d006cfe24d0b2bd626e7361d771dde1b..d89beaba26ff95d2ab0ed48cdaf1ba7fc8f3bd73 100644 (file)
 #define FP_EX_DIVZERO         (1 << (31 - 5))
 #define FP_EX_INEXACT         (1 << (31 - 6))
 
-#define __FPU_FPSCR    (current->thread.fpscr.val)
+#define __FPU_FPSCR    (current->thread.fp_state.fpscr)
 
 /* We only actually write to the destination register
  * if exceptions signalled (if any) will not trap.
index e40010abcaf134f53bbcf639bf6999b856a42a2a..0dffad6bcc846725a273daff18593c7b03305060 100644 (file)
@@ -10,7 +10,9 @@
 #define __HAVE_ARCH_STRNCMP
 #define __HAVE_ARCH_STRCAT
 #define __HAVE_ARCH_MEMSET
+#ifdef __BIG_ENDIAN__
 #define __HAVE_ARCH_MEMCPY
+#endif
 #define __HAVE_ARCH_MEMMOVE
 #define __HAVE_ARCH_MEMCMP
 #define __HAVE_ARCH_MEMCHR
@@ -22,7 +24,9 @@ extern int strcmp(const char *,const char *);
 extern int strncmp(const char *, const char *, __kernel_size_t);
 extern char * strcat(char *, const char *);
 extern void * memset(void *,int,__kernel_size_t);
+#ifdef __BIG_ENDIAN__
 extern void * memcpy(void *,const void *,__kernel_size_t);
+#endif
 extern void * memmove(void *,const void *,__kernel_size_t);
 extern int memcmp(const void *,const void *,__kernel_size_t);
 extern void * memchr(const void *,int,__kernel_size_t);
index d0b6d4ac6dda58c99d7fc6c884f0bbc11d4561e0..213a5f2b0717da8240f59e1b01c4df31ddebac17 100644 (file)
@@ -8,6 +8,8 @@
 #include <linux/kernel.h>
 #include <asm/asm-compat.h>
 
+#ifdef __BIG_ENDIAN__
+
 struct word_at_a_time {
        const unsigned long high_bits, low_bits;
 };
@@ -38,4 +40,73 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
        return (val + c->high_bits) & ~rhs;
 }
 
+#else
+
+/*
+ * This is largely generic for little-endian machines, but the
+ * optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+struct word_at_a_time {
+       const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Jan Achrenius on G+: microoptimized version of
+ * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
+ * that works for the bytemasks without having to
+ * mask them first.
+ */
+static inline long count_masked_bytes(unsigned long mask)
+{
+       return mask*0x0001020304050608ul >> 56;
+}
+
+#else  /* 32-bit case */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+       /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+       long a = (0x0ff0001+mask) >> 23;
+       /* Fix the 1 for 00 case */
+       return a & mask;
+}
+
+#endif
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+       unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+       *bits = mask;
+       return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+       return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+       bits = (bits - 1) & ~bits;
+       return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+       return count_masked_bytes(mask);
+}
+#endif
+
 #endif /* _ASM_WORD_AT_A_TIME_H */
index aa6cc4fac9651326b69a7648b13009f30dd93774..ca931d0740003e71f786fcb15db16cdf53aaadb4 100644 (file)
@@ -7,6 +7,10 @@
  * as published by the Free Software Foundation; either version
  * 2 of the License, or (at your option) any later version.
  */
+#ifdef __LITTLE_ENDIAN__
+#include <linux/byteorder/little_endian.h>
+#else
 #include <linux/byteorder/big_endian.h>
+#endif
 
 #endif /* _ASM_POWERPC_BYTEORDER_H */
index a6d74467c9edb476ad1175e34670848f946528c8..fa698324a1fd13e50ec0d5e01d526b5a00a2b0b8 100644 (file)
@@ -83,4 +83,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_POWERPC_SOCKET_H */
index a27ccd5dc6b9a5bb18435af2a862d7b5ddb619be..6e3f9772aaba7f27592708dd82fb238f93602e9e 100644 (file)
@@ -54,8 +54,6 @@ struct aligninfo {
 /* DSISR bits reported for a DCBZ instruction: */
 #define DCBZ   0x5f    /* 8xx/82xx dcbz faults when cache not enabled */
 
-#define SWAP(a, b)     (t = (a), (a) = (b), (b) = t)
-
 /*
  * The PowerPC stores certain bits of the instruction that caused the
  * alignment exception in the DSISR register.  This array maps those
@@ -264,6 +262,7 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
 
 #define SWIZ_PTR(p)            ((unsigned char __user *)((p) ^ swiz))
 
+#ifdef __BIG_ENDIAN__
 static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
                            unsigned int reg, unsigned int nb,
                            unsigned int flags, unsigned int instr,
@@ -392,6 +391,7 @@ static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
                return -EFAULT;
        return 1;       /* exception handled and fixed up */
 }
+#endif
 
 #ifdef CONFIG_SPE
 
@@ -458,7 +458,7 @@ static struct aligninfo spe_aligninfo[32] = {
 static int emulate_spe(struct pt_regs *regs, unsigned int reg,
                       unsigned int instr)
 {
-       int t, ret;
+       int ret;
        union {
                u64 ll;
                u32 w[2];
@@ -581,24 +581,18 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
        if (flags & SW) {
                switch (flags & 0xf0) {
                case E8:
-                       SWAP(data.v[0], data.v[7]);
-                       SWAP(data.v[1], data.v[6]);
-                       SWAP(data.v[2], data.v[5]);
-                       SWAP(data.v[3], data.v[4]);
+                       data.ll = swab64(data.ll);
                        break;
                case E4:
-
-                       SWAP(data.v[0], data.v[3]);
-                       SWAP(data.v[1], data.v[2]);
-                       SWAP(data.v[4], data.v[7]);
-                       SWAP(data.v[5], data.v[6]);
+                       data.w[0] = swab32(data.w[0]);
+                       data.w[1] = swab32(data.w[1]);
                        break;
                /* Its half word endian */
                default:
-                       SWAP(data.v[0], data.v[1]);
-                       SWAP(data.v[2], data.v[3]);
-                       SWAP(data.v[4], data.v[5]);
-                       SWAP(data.v[6], data.v[7]);
+                       data.h[0] = swab16(data.h[0]);
+                       data.h[1] = swab16(data.h[1]);
+                       data.h[2] = swab16(data.h[2]);
+                       data.h[3] = swab16(data.h[3]);
                        break;
                }
        }
@@ -658,14 +652,31 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
        flush_vsx_to_thread(current);
 
        if (reg < 32)
-               ptr = (char *) &current->thread.TS_FPR(reg);
+               ptr = (char *) &current->thread.fp_state.fpr[reg][0];
        else
-               ptr = (char *) &current->thread.vr[reg - 32];
+               ptr = (char *) &current->thread.vr_state.vr[reg - 32];
 
        lptr = (unsigned long *) ptr;
 
+#ifdef __LITTLE_ENDIAN__
+       if (flags & SW) {
+               elsize = length;
+               sw = length-1;
+       } else {
+               /*
+                * The elements are BE ordered, even in LE mode, so process
+                * them in reverse order.
+                */
+               addr += length - elsize;
+
+               /* 8 byte memory accesses go in the top 8 bytes of the VR */
+               if (length == 8)
+                       ptr += 8;
+       }
+#else
        if (flags & SW)
                sw = elsize-1;
+#endif
 
        for (j = 0; j < length; j += elsize) {
                for (i = 0; i < elsize; ++i) {
@@ -675,19 +686,31 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
                                ret |= __get_user(ptr[i^sw], addr + i);
                }
                ptr  += elsize;
+#ifdef __LITTLE_ENDIAN__
+               addr -= elsize;
+#else
                addr += elsize;
+#endif
        }
 
+#ifdef __BIG_ENDIAN__
+#define VSX_HI 0
+#define VSX_LO 1
+#else
+#define VSX_HI 1
+#define VSX_LO 0
+#endif
+
        if (!ret) {
                if (flags & U)
                        regs->gpr[areg] = regs->dar;
 
                /* Splat load copies the same data to top and bottom 8 bytes */
                if (flags & SPLT)
-                       lptr[1] = lptr[0];
-               /* For 8 byte loads, zero the top 8 bytes */
+                       lptr[VSX_LO] = lptr[VSX_HI];
+               /* For 8 byte loads, zero the low 8 bytes */
                else if (!(flags & ST) && (8 == length))
-                       lptr[1] = 0;
+                       lptr[VSX_LO] = 0;
        } else
                return -EFAULT;
 
@@ -710,18 +733,28 @@ int fix_alignment(struct pt_regs *regs)
        unsigned int dsisr;
        unsigned char __user *addr;
        unsigned long p, swiz;
-       int ret, t;
-       union {
+       int ret, i;
+       union data {
                u64 ll;
                double dd;
                unsigned char v[8];
                struct {
+#ifdef __LITTLE_ENDIAN__
+                       int      low32;
+                       unsigned hi32;
+#else
                        unsigned hi32;
                        int      low32;
+#endif
                } x32;
                struct {
+#ifdef __LITTLE_ENDIAN__
+                       short         low16;
+                       unsigned char hi48[6];
+#else
                        unsigned char hi48[6];
                        short         low16;
+#endif
                } x16;
        } data;
 
@@ -780,8 +813,9 @@ int fix_alignment(struct pt_regs *regs)
 
        /* Byteswap little endian loads and stores */
        swiz = 0;
-       if (regs->msr & MSR_LE) {
+       if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
                flags ^= SW;
+#ifdef __BIG_ENDIAN__
                /*
                 * So-called "PowerPC little endian" mode works by
                 * swizzling addresses rather than by actually doing
@@ -794,6 +828,7 @@ int fix_alignment(struct pt_regs *regs)
                 */
                if (cpu_has_feature(CPU_FTR_PPC_LE))
                        swiz = 7;
+#endif
        }
 
        /* DAR has the operand effective address */
@@ -818,7 +853,7 @@ int fix_alignment(struct pt_regs *regs)
                        elsize = 8;
 
                flags = 0;
-               if (regs->msr & MSR_LE)
+               if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE))
                        flags |= SW;
                if (instruction & 0x100)
                        flags |= ST;
@@ -847,9 +882,13 @@ int fix_alignment(struct pt_regs *regs)
         * function
         */
        if (flags & M) {
+#ifdef __BIG_ENDIAN__
                PPC_WARN_ALIGNMENT(multiple, regs);
                return emulate_multiple(regs, addr, reg, nb,
                                        flags, instr, swiz);
+#else
+               return -EFAULT;
+#endif
        }
 
        /* Verify the address of the operand */
@@ -868,8 +907,12 @@ int fix_alignment(struct pt_regs *regs)
 
        /* Special case for 16-byte FP loads and stores */
        if (nb == 16) {
+#ifdef __BIG_ENDIAN__
                PPC_WARN_ALIGNMENT(fp_pair, regs);
                return emulate_fp_pair(addr, reg, flags);
+#else
+               return -EFAULT;
+#endif
        }
 
        PPC_WARN_ALIGNMENT(unaligned, regs);
@@ -878,32 +921,36 @@ int fix_alignment(struct pt_regs *regs)
         * get it from register values
         */
        if (!(flags & ST)) {
-               data.ll = 0;
-               ret = 0;
-               p = (unsigned long) addr;
+               unsigned int start = 0;
+
                switch (nb) {
-               case 8:
-                       ret |= __get_user_inatomic(data.v[0], SWIZ_PTR(p++));
-                       ret |= __get_user_inatomic(data.v[1], SWIZ_PTR(p++));
-                       ret |= __get_user_inatomic(data.v[2], SWIZ_PTR(p++));
-                       ret |= __get_user_inatomic(data.v[3], SWIZ_PTR(p++));
                case 4:
-                       ret |= __get_user_inatomic(data.v[4], SWIZ_PTR(p++));
-                       ret |= __get_user_inatomic(data.v[5], SWIZ_PTR(p++));
+                       start = offsetof(union data, x32.low32);
+                       break;
                case 2:
-                       ret |= __get_user_inatomic(data.v[6], SWIZ_PTR(p++));
-                       ret |= __get_user_inatomic(data.v[7], SWIZ_PTR(p++));
-                       if (unlikely(ret))
-                               return -EFAULT;
+                       start = offsetof(union data, x16.low16);
+                       break;
                }
+
+               data.ll = 0;
+               ret = 0;
+               p = (unsigned long)addr;
+
+               for (i = 0; i < nb; i++)
+                       ret |= __get_user_inatomic(data.v[start + i],
+                                                  SWIZ_PTR(p++));
+
+               if (unlikely(ret))
+                       return -EFAULT;
+
        } else if (flags & F) {
-               data.dd = current->thread.TS_FPR(reg);
+               data.ll = current->thread.TS_FPR(reg);
                if (flags & S) {
                        /* Single-precision FP store requires conversion... */
 #ifdef CONFIG_PPC_FPU
                        preempt_disable();
                        enable_kernel_fp();
-                       cvt_df(&data.dd, (float *)&data.v[4]);
+                       cvt_df(&data.dd, (float *)&data.x32.low32);
                        preempt_enable();
 #else
                        return 0;
@@ -915,17 +962,13 @@ int fix_alignment(struct pt_regs *regs)
        if (flags & SW) {
                switch (nb) {
                case 8:
-                       SWAP(data.v[0], data.v[7]);
-                       SWAP(data.v[1], data.v[6]);
-                       SWAP(data.v[2], data.v[5]);
-                       SWAP(data.v[3], data.v[4]);
+                       data.ll = swab64(data.ll);
                        break;
                case 4:
-                       SWAP(data.v[4], data.v[7]);
-                       SWAP(data.v[5], data.v[6]);
+                       data.x32.low32 = swab32(data.x32.low32);
                        break;
                case 2:
-                       SWAP(data.v[6], data.v[7]);
+                       data.x16.low16 = swab16(data.x16.low16);
                        break;
                }
        }
@@ -947,7 +990,7 @@ int fix_alignment(struct pt_regs *regs)
 #ifdef CONFIG_PPC_FPU
                preempt_disable();
                enable_kernel_fp();
-               cvt_fd((float *)&data.v[4], &data.dd);
+               cvt_fd((float *)&data.x32.low32, &data.dd);
                preempt_enable();
 #else
                return 0;
@@ -957,25 +1000,28 @@ int fix_alignment(struct pt_regs *regs)
 
        /* Store result to memory or update registers */
        if (flags & ST) {
-               ret = 0;
-               p = (unsigned long) addr;
+               unsigned int start = 0;
+
                switch (nb) {
-               case 8:
-                       ret |= __put_user_inatomic(data.v[0], SWIZ_PTR(p++));
-                       ret |= __put_user_inatomic(data.v[1], SWIZ_PTR(p++));
-                       ret |= __put_user_inatomic(data.v[2], SWIZ_PTR(p++));
-                       ret |= __put_user_inatomic(data.v[3], SWIZ_PTR(p++));
                case 4:
-                       ret |= __put_user_inatomic(data.v[4], SWIZ_PTR(p++));
-                       ret |= __put_user_inatomic(data.v[5], SWIZ_PTR(p++));
+                       start = offsetof(union data, x32.low32);
+                       break;
                case 2:
-                       ret |= __put_user_inatomic(data.v[6], SWIZ_PTR(p++));
-                       ret |= __put_user_inatomic(data.v[7], SWIZ_PTR(p++));
+                       start = offsetof(union data, x16.low16);
+                       break;
                }
+
+               ret = 0;
+               p = (unsigned long)addr;
+
+               for (i = 0; i < nb; i++)
+                       ret |= __put_user_inatomic(data.v[start + i],
+                                                  SWIZ_PTR(p++));
+
                if (unlikely(ret))
                        return -EFAULT;
        } else if (flags & F)
-               current->thread.TS_FPR(reg) = data.dd;
+               current->thread.TS_FPR(reg) = data.ll;
        else
                regs->gpr[reg] = data.ll;
 
index 502c7a4e73f70dc1008754b7f55e5ae164f25a76..6278edddc3f8fd7deac04c8af6162bc8f22beda3 100644 (file)
@@ -90,16 +90,17 @@ int main(void)
        DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0]));
 #endif
        DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
-       DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
-       DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
+       DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fp_state));
+       DEFINE(THREAD_FPSAVEAREA, offsetof(struct thread_struct, fp_save_area));
+       DEFINE(FPSTATE_FPSCR, offsetof(struct thread_fp_state, fpscr));
 #ifdef CONFIG_ALTIVEC
-       DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
+       DEFINE(THREAD_VRSTATE, offsetof(struct thread_struct, vr_state));
+       DEFINE(THREAD_VRSAVEAREA, offsetof(struct thread_struct, vr_save_area));
        DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
-       DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
        DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
+       DEFINE(VRSTATE_VSCR, offsetof(struct thread_vr_state, vscr));
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_VSX
-       DEFINE(THREAD_VSR0, offsetof(struct thread_struct, fpr));
        DEFINE(THREAD_USED_VSR, offsetof(struct thread_struct, used_vsr));
 #endif /* CONFIG_VSX */
 #ifdef CONFIG_PPC64
@@ -143,20 +144,12 @@ int main(void)
        DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
        DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
        DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
-       DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct,
-                                        transact_vr[0]));
-       DEFINE(THREAD_TRANSACT_VSCR, offsetof(struct thread_struct,
-                                         transact_vscr));
+       DEFINE(THREAD_TRANSACT_VRSTATE, offsetof(struct thread_struct,
+                                                transact_vr));
        DEFINE(THREAD_TRANSACT_VRSAVE, offsetof(struct thread_struct,
                                            transact_vrsave));
-       DEFINE(THREAD_TRANSACT_FPR0, offsetof(struct thread_struct,
-                                         transact_fpr[0]));
-       DEFINE(THREAD_TRANSACT_FPSCR, offsetof(struct thread_struct,
-                                          transact_fpscr));
-#ifdef CONFIG_VSX
-       DEFINE(THREAD_TRANSACT_VSR0, offsetof(struct thread_struct,
-                                         transact_fpr[0]));
-#endif
+       DEFINE(THREAD_TRANSACT_FPSTATE, offsetof(struct thread_struct,
+                                                transact_fp));
        /* Local pt_regs on stack for Transactional Memory funcs. */
        DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD +
               sizeof(struct pt_regs) + 16);
index 55593ee2d5aacfee6ef9e86c5adfd4057c5cab10..58906d7f4c4950b5830f9706ba01cc1c3c8985f0 100644 (file)
@@ -189,8 +189,7 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
        }
 
        /* If PCI-E capable, dump PCI-E cap 10, and the AER */
-       cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
-       if (cap) {
+       if (pci_is_pcie(dev)) {
                n += scnprintf(buf+n, len-n, "pci-e cap10:\n");
                printk(KERN_WARNING
                       "EEH: PCI-E capabilities and status follow:\n");
@@ -327,11 +326,11 @@ static int eeh_phb_check_failure(struct eeh_pe *pe)
        /* Isolate the PHB and send event */
        eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
        eeh_serialize_unlock(flags);
-       eeh_send_failure_event(phb_pe);
 
        pr_err("EEH: PHB#%x failure detected\n",
                phb_pe->phb->global_number);
        dump_stack();
+       eeh_send_failure_event(phb_pe);
 
        return 1;
 out:
@@ -454,8 +453,6 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
        eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
        eeh_serialize_unlock(flags);
 
-       eeh_send_failure_event(pe);
-
        /* Most EEH events are due to device driver bugs.  Having
         * a stack trace will help the device-driver authors figure
         * out what happened.  So print that out.
@@ -464,6 +461,8 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
                pe->addr, pe->phb->global_number);
        dump_stack();
 
+       eeh_send_failure_event(pe);
+
        return 1;
 
 dn_unlock:
index c04cdf70d487536614899da24fe88beac50a0f54..12679cd43e0c4600c52d87695145760501a78530 100644 (file)
@@ -673,9 +673,7 @@ _GLOBAL(ret_from_except_lite)
 
 resume_kernel:
        /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
-       CURRENT_THREAD_INFO(r9, r1)
-       ld      r8,TI_FLAGS(r9)
-       andis.  r8,r8,_TIF_EMULATE_STACK_STORE@h
+       andis.  r8,r4,_TIF_EMULATE_STACK_STORE@h
        beq+    1f
 
        addi    r8,r1,INT_FRAME_SIZE    /* Get the kprobed function entry */
@@ -1017,7 +1015,7 @@ _GLOBAL(enter_rtas)
        
         li      r9,1
         rldicr  r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
-       ori     r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
+       ori     r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
        andc    r6,r0,r9
        sync                            /* disable interrupts so SRR0/1 */
        mtmsrd  r0                      /* don't get trashed */
@@ -1032,6 +1030,8 @@ _GLOBAL(enter_rtas)
        b       .       /* prevent speculative execution */
 
 _STATIC(rtas_return_loc)
+       FIXUP_ENDIAN
+
        /* relocation is off at this point */
        GET_PACA(r4)
        clrldi  r4,r4,2                 /* convert to realmode address */
@@ -1103,28 +1103,30 @@ _GLOBAL(enter_prom)
        std     r10,_CCR(r1)
        std     r11,_MSR(r1)
 
-       /* Get the PROM entrypoint */
-       mtlr    r4
+       /* Put PROM address in SRR0 */
+       mtsrr0  r4
 
-       /* Switch MSR to 32 bits mode
+       /* Setup our trampoline return addr in LR */
+       bcl     20,31,$+4
+0:     mflr    r4
+       addi    r4,r4,(1f - 0b)
+               mtlr    r4
+
+       /* Prepare a 32-bit mode big endian MSR
         */
 #ifdef CONFIG_PPC_BOOK3E
        rlwinm  r11,r11,0,1,31
-       mtmsr   r11
+       mtsrr1  r11
+       rfi
 #else /* CONFIG_PPC_BOOK3E */
-        mfmsr   r11
-        li      r12,1
-        rldicr  r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
-        andc    r11,r11,r12
-        li      r12,1
-        rldicr  r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
-        andc    r11,r11,r12
-        mtmsrd  r11
+       LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
+       andc    r11,r11,r12
+       mtsrr1  r11
+       rfid
 #endif /* CONFIG_PPC_BOOK3E */
-        isync
 
-       /* Enter PROM here... */
-       blrl
+1:     /* Return from OF */
+       FIXUP_ENDIAN
 
        /* Just make sure that r1 top 32 bits didn't get
         * corrupt by OF
index 2d067049db27f1e56ca421c0a223e958125f60ad..68d74b45232d99a74c7485bc6228ebf2225703a7 100644 (file)
@@ -607,6 +607,7 @@ kernel_dbg_exc:
        NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR,
                                PROLOG_ADDITION_NONE)
        EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE)
+       CHECK_NAPPING()
        addi    r3,r1,STACK_FRAME_OVERHEAD
        bl      .performance_monitor_exception
        b       .ret_from_except_lite
index caeaabf11a2fbb3cd7d63555a19600f4e13e2618..4dca05e91e953e85b495722ca1a44633ca917988 100644 (file)
@@ -35,15 +35,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX);                                  \
 2:     REST_32VSRS(n,c,base);                                          \
 3:
 
-#define __REST_32FPVSRS_TRANSACT(n,c,base)                             \
-BEGIN_FTR_SECTION                                                      \
-       b       2f;                                                     \
-END_FTR_SECTION_IFSET(CPU_FTR_VSX);                                    \
-       REST_32FPRS_TRANSACT(n,base);                                   \
-       b       3f;                                                     \
-2:     REST_32VSRS_TRANSACT(n,c,base);                                 \
-3:
-
 #define __SAVE_32FPVSRS(n,c,base)                                      \
 BEGIN_FTR_SECTION                                                      \
        b       2f;                                                     \
@@ -54,40 +45,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX);                                 \
 3:
 #else
 #define __REST_32FPVSRS(n,b,base)      REST_32FPRS(n, base)
-#define __REST_32FPVSRS_TRANSACT(n,b,base)     REST_32FPRS(n, base)
 #define __SAVE_32FPVSRS(n,b,base)      SAVE_32FPRS(n, base)
 #endif
 #define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
-#define REST_32FPVSRS_TRANSACT(n,c,base) \
-       __REST_32FPVSRS_TRANSACT(n,__REG_##c,__REG_##base)
 #define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
 
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Wrapper to call load_up_fpu from C.
- * void do_load_up_fpu(struct pt_regs *regs);
- */
-_GLOBAL(do_load_up_fpu)
-       mflr    r0
-       std     r0, 16(r1)
-       stdu    r1, -112(r1)
-
-       subi    r6, r3, STACK_FRAME_OVERHEAD
-       /* load_up_fpu expects r12=MSR, r13=PACA, and returns
-        * with r12 = new MSR.
-        */
-       ld      r12,_MSR(r6)
-       GET_PACA(r13)
-
-       bl      load_up_fpu
-       std     r12,_MSR(r6)
-
-       ld      r0, 112+16(r1)
-       addi    r1, r1, 112
-       mtlr    r0
-       blr
-
-
 /* void do_load_up_transact_fpu(struct thread_struct *thread)
  *
  * This is similar to load_up_fpu but for the transactional version of the FP
@@ -105,9 +68,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        SYNC
        MTMSRD(r5)
 
-       lfd     fr0,THREAD_TRANSACT_FPSCR(r3)
+       addi    r7,r3,THREAD_TRANSACT_FPSTATE
+       lfd     fr0,FPSTATE_FPSCR(r7)
        MTFSF_L(fr0)
-       REST_32FPVSRS_TRANSACT(0, R4, R3)
+       REST_32FPVSRS(0, R4, R7)
 
        /* FP/VSX off again */
        MTMSRD(r6)
@@ -116,6 +80,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        blr
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 
+/*
+ * Load state from memory into FP registers including FPSCR.
+ * Assumes the caller has enabled FP in the MSR.
+ */
+_GLOBAL(load_fp_state)
+       lfd     fr0,FPSTATE_FPSCR(r3)
+       MTFSF_L(fr0)
+       REST_32FPVSRS(0, R4, R3)
+       blr
+
+/*
+ * Store FP state into memory, including FPSCR
+ * Assumes the caller has enabled FP in the MSR.
+ */
+_GLOBAL(store_fp_state)
+       SAVE_32FPVSRS(0, R4, R3)
+       mffs    fr0
+       stfd    fr0,FPSTATE_FPSCR(r3)
+       blr
+
 /*
  * This task wants to use the FPU now.
  * On UP, disable FP for the task which had the FPU previously,
@@ -147,9 +131,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        beq     1f
        toreal(r4)
        addi    r4,r4,THREAD            /* want last_task_used_math->thread */
-       SAVE_32FPVSRS(0, R5, R4)
+       addi    r8,r4,THREAD_FPSTATE
+       SAVE_32FPVSRS(0, R5, R8)
        mffs    fr0
-       stfd    fr0,THREAD_FPSCR(r4)
+       stfd    fr0,FPSTATE_FPSCR(r8)
        PPC_LL  r5,PT_REGS(r4)
        toreal(r5)
        PPC_LL  r4,_MSR-STACK_FRAME_OVERHEAD(r5)
@@ -160,7 +145,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
 #endif /* CONFIG_SMP */
        /* enable use of FP after return */
 #ifdef CONFIG_PPC32
-       mfspr   r5,SPRN_SPRG_THREAD             /* current task's THREAD (phys) */
+       mfspr   r5,SPRN_SPRG_THREAD     /* current task's THREAD (phys) */
        lwz     r4,THREAD_FPEXC_MODE(r5)
        ori     r9,r9,MSR_FP            /* enable FP for current */
        or      r9,r9,r4
@@ -172,9 +157,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        or      r12,r12,r4
        std     r12,_MSR(r1)
 #endif
-       lfd     fr0,THREAD_FPSCR(r5)
+       addi    r7,r5,THREAD_FPSTATE
+       lfd     fr0,FPSTATE_FPSCR(r7)
        MTFSF_L(fr0)
-       REST_32FPVSRS(0, R4, R5)
+       REST_32FPVSRS(0, R4, R7)
 #ifndef CONFIG_SMP
        subi    r4,r5,THREAD
        fromreal(r4)
@@ -206,11 +192,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        PPC_LCMPI       0,r3,0
        beqlr-                          /* if no previous owner, done */
        addi    r3,r3,THREAD            /* want THREAD of task */
+       PPC_LL  r6,THREAD_FPSAVEAREA(r3)
        PPC_LL  r5,PT_REGS(r3)
-       PPC_LCMPI       0,r5,0
-       SAVE_32FPVSRS(0, R4 ,R3)
+       PPC_LCMPI       0,r6,0
+       bne     2f
+       addi    r6,r3,THREAD_FPSTATE
+2:     PPC_LCMPI       0,r5,0
+       SAVE_32FPVSRS(0, R4, R6)
        mffs    fr0
-       stfd    fr0,THREAD_FPSCR(r3)
+       stfd    fr0,FPSTATE_FPSCR(r6)
        beq     1f
        PPC_LL  r4,_MSR-STACK_FRAME_OVERHEAD(r5)
        li      r3,MSR_FP|MSR_FE0|MSR_FE1
index 1fb78561096accfff7e60062a4fbc0bddfcfec55..9b27b293a9226903c81529a4f01aee64a7f07d23 100644 (file)
@@ -174,7 +174,11 @@ __ftrace_make_nop(struct module *mod,
 
        pr_devel(" %08x %08x\n", jmp[0], jmp[1]);
 
+#ifdef __LITTLE_ENDIAN__
+       ptr = ((unsigned long)jmp[1] << 32) + jmp[0];
+#else
        ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
+#endif
 
        /* This should match what was called */
        if (ptr != ppc_function_entry((void *)addr)) {
index 3d11d8038deec122ad9735202e98f4567998a070..2ae41aba40530f7facf916f6ce101ad3196a7363 100644 (file)
@@ -68,6 +68,7 @@ _stext:
 _GLOBAL(__start)
        /* NOP this out unconditionally */
 BEGIN_FTR_SECTION
+       FIXUP_ENDIAN
        b       .__start_initialization_multiplatform
 END_FTR_SECTION(0, 1)
 
@@ -115,6 +116,7 @@ __run_at_load:
  */
        .globl  __secondary_hold
 __secondary_hold:
+       FIXUP_ENDIAN
 #ifndef CONFIG_PPC_BOOK3E
        mfmsr   r24
        ori     r24,r24,MSR_RI
@@ -205,6 +207,7 @@ _GLOBAL(generic_secondary_thread_init)
  * as SCOM before entry).
  */
 _GLOBAL(generic_secondary_smp_init)
+       FIXUP_ENDIAN
        mr      r24,r3
        mr      r25,r4
 
index 57d286a78f86f6ff1231c695b9a10a591af30710..c7cb8c232d2f4fdedf9129ec691d8471736e2fc7 100644 (file)
@@ -495,14 +495,15 @@ void __do_irq(struct pt_regs *regs)
 void do_IRQ(struct pt_regs *regs)
 {
        struct pt_regs *old_regs = set_irq_regs(regs);
-       struct thread_info *curtp, *irqtp;
+       struct thread_info *curtp, *irqtp, *sirqtp;
 
        /* Switch to the irq stack to handle this */
        curtp = current_thread_info();
        irqtp = hardirq_ctx[raw_smp_processor_id()];
+       sirqtp = softirq_ctx[raw_smp_processor_id()];
 
        /* Already there ? */
-       if (unlikely(curtp == irqtp)) {
+       if (unlikely(curtp == irqtp || curtp == sirqtp)) {
                __do_irq(regs);
                set_irq_regs(old_regs);
                return;
index 22e88dd2f34ad7b43c27fe1361f8ed9213f9f960..40bd7bd4e19a88ee9b573c37deea8bc8694375b9 100644 (file)
@@ -35,7 +35,7 @@ static struct legacy_serial_info {
        phys_addr_t                     taddr;
 } legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
 
-static struct __initdata of_device_id legacy_serial_parents[] = {
+static struct of_device_id legacy_serial_parents[] __initdata = {
        {.type = "soc",},
        {.type = "tsi-bridge",},
        {.type = "opb", },
index 2b0ad984536333d7a15a3b39e6c9234c11b4b669..e47d268727a4a8ed87948a4d3ab5f87ad00793b1 100644 (file)
@@ -658,6 +658,20 @@ _GLOBAL(__lshrdi3)
        or      r4,r4,r7        # LSW |= t2
        blr
 
+/*
+ * 64-bit comparison: __cmpdi2(s64 a, s64 b)
+ * Returns 0 if a < b, 1 if a == b, 2 if a > b.
+ */
+_GLOBAL(__cmpdi2)
+       cmpw    r3,r5
+       li      r3,1
+       bne     1f
+       cmplw   r4,r6
+       beqlr
+1:     li      r3,0
+       bltlr
+       li      r3,2
+       blr
 /*
  * 64-bit comparison: __ucmpdi2(u64 a, u64 b)
  * Returns 0 if a < b, 1 if a == b, 2 if a > b.
index 6ee59a0eb268b8614ef13b6745c3555bfdd7aebd..a102f4412392cfc7b76cfdc12e1de8f9fa22a880 100644 (file)
@@ -62,6 +62,16 @@ struct ppc64_stub_entry
    r2) into the stub. */
 static struct ppc64_stub_entry ppc64_stub =
 { .jump = {
+#ifdef __LITTLE_ENDIAN__
+       0x00, 0x00, 0x82, 0x3d, /* addis   r12,r2, <high> */
+       0x00, 0x00, 0x8c, 0x39, /* addi    r12,r12, <low> */
+       /* Save current r2 value in magic place on the stack. */
+       0x28, 0x00, 0x41, 0xf8, /* std     r2,40(r1) */
+       0x20, 0x00, 0x6c, 0xe9, /* ld      r11,32(r12) */
+       0x28, 0x00, 0x4c, 0xe8, /* ld      r2,40(r12) */
+       0xa6, 0x03, 0x69, 0x7d, /* mtctr   r11 */
+       0x20, 0x04, 0x80, 0x4e  /* bctr */
+#else
        0x3d, 0x82, 0x00, 0x00, /* addis   r12,r2, <high> */
        0x39, 0x8c, 0x00, 0x00, /* addi    r12,r12, <low> */
        /* Save current r2 value in magic place on the stack. */
@@ -70,6 +80,7 @@ static struct ppc64_stub_entry ppc64_stub =
        0xe8, 0x4c, 0x00, 0x28, /* ld      r2,40(r12) */
        0x7d, 0x69, 0x03, 0xa6, /* mtctr   r11 */
        0x4e, 0x80, 0x04, 0x20  /* bctr */
+#endif
 } };
 
 /* Count how many different 24-bit relocations (different symbol,
@@ -269,8 +280,13 @@ static inline int create_stub(Elf64_Shdr *sechdrs,
 
        *entry = ppc64_stub;
 
+#ifdef __LITTLE_ENDIAN__
+       loc1 = (Elf64_Half *)&entry->jump[0];
+       loc2 = (Elf64_Half *)&entry->jump[4];
+#else
        loc1 = (Elf64_Half *)&entry->jump[2];
        loc2 = (Elf64_Half *)&entry->jump[6];
+#endif
 
        /* Stub uses address relative to r2. */
        reladdr = (unsigned long)entry - my_r2(sechdrs, me);
index 3fc16e3beb9f8dca2ea5f260bb59d2dc20bef3f3..0620eaaaad45b419c70475485a9d052c0ddbe246 100644 (file)
@@ -46,7 +46,7 @@ struct lppaca lppaca[] = {
 static struct lppaca *extra_lppacas;
 static long __initdata lppaca_size;
 
-static void allocate_lppacas(int nr_cpus, unsigned long limit)
+static void __init allocate_lppacas(int nr_cpus, unsigned long limit)
 {
        if (nr_cpus <= NR_LPPACAS)
                return;
@@ -57,7 +57,7 @@ static void allocate_lppacas(int nr_cpus, unsigned long limit)
                                                 PAGE_SIZE, limit));
 }
 
-static struct lppaca *new_lppaca(int cpu)
+static struct lppaca * __init new_lppaca(int cpu)
 {
        struct lppaca *lp;
 
@@ -70,7 +70,7 @@ static struct lppaca *new_lppaca(int cpu)
        return lp;
 }
 
-static void free_lppacas(void)
+static void __init free_lppacas(void)
 {
        long new_size = 0, nr;
 
index 21646dbe1bb3c7a48df59ba14dea18431abe12be..3bd77edd7610ce20267a880069972624eafed62e 100644 (file)
@@ -79,10 +79,12 @@ EXPORT_SYMBOL(strlen);
 EXPORT_SYMBOL(strcmp);
 EXPORT_SYMBOL(strncmp);
 
+#ifndef CONFIG_GENERIC_CSUM
 EXPORT_SYMBOL(csum_partial);
 EXPORT_SYMBOL(csum_partial_copy_generic);
 EXPORT_SYMBOL(ip_fast_csum);
 EXPORT_SYMBOL(csum_tcpudp_magic);
+#endif
 
 EXPORT_SYMBOL(__copy_tofrom_user);
 EXPORT_SYMBOL(__clear_user);
@@ -98,9 +100,13 @@ EXPORT_SYMBOL(start_thread);
 
 #ifdef CONFIG_PPC_FPU
 EXPORT_SYMBOL(giveup_fpu);
+EXPORT_SYMBOL(load_fp_state);
+EXPORT_SYMBOL(store_fp_state);
 #endif
 #ifdef CONFIG_ALTIVEC
 EXPORT_SYMBOL(giveup_altivec);
+EXPORT_SYMBOL(load_vr_state);
+EXPORT_SYMBOL(store_vr_state);
 #endif /* CONFIG_ALTIVEC */
 #ifdef CONFIG_VSX
 EXPORT_SYMBOL(giveup_vsx);
@@ -143,10 +149,14 @@ EXPORT_SYMBOL(__ashldi3);
 EXPORT_SYMBOL(__lshrdi3);
 int __ucmpdi2(unsigned long long, unsigned long long);
 EXPORT_SYMBOL(__ucmpdi2);
+int __cmpdi2(long long, long long);
+EXPORT_SYMBOL(__cmpdi2);
 #endif
 long long __bswapdi2(long long);
 EXPORT_SYMBOL(__bswapdi2);
+#ifdef __BIG_ENDIAN__
 EXPORT_SYMBOL(memcpy);
+#endif
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memmove);
 EXPORT_SYMBOL(memcmp);
index 96d2fdf3aa9ebe3bba547fd567c5a232be20ec9a..8649a3d629e1b9f2b4bce242e3873f0b28f881cc 100644 (file)
@@ -1008,6 +1008,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
        p->thread.ptrace_bps[0] = NULL;
 #endif
 
+       p->thread.fp_save_area = NULL;
+#ifdef CONFIG_ALTIVEC
+       p->thread.vr_save_area = NULL;
+#endif
+
 #ifdef CONFIG_PPC_STD_MMU_64
        if (mmu_has_feature(MMU_FTR_SLB)) {
                unsigned long sp_vsid;
@@ -1113,12 +1118,12 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
 #ifdef CONFIG_VSX
        current->thread.used_vsr = 0;
 #endif
-       memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
-       current->thread.fpscr.val = 0;
+       memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
+       current->thread.fp_save_area = NULL;
 #ifdef CONFIG_ALTIVEC
-       memset(current->thread.vr, 0, sizeof(current->thread.vr));
-       memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
-       current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
+       memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
+       current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
+       current->thread.vr_save_area = NULL;
        current->thread.vrsave = 0;
        current->thread.used_vr = 0;
 #endif /* CONFIG_ALTIVEC */
index 5fe2842e8bab7cc4013c987c74791c00005c51ee..cb64a6e1dc5186918b4d2052e52e961cfb14eedb 100644 (file)
@@ -858,7 +858,8 @@ static void __init prom_send_capabilities(void)
 {
        ihandle root;
        prom_arg_t ret;
-       __be32 *cores;
+       u32 cores;
+       unsigned char *ptcores;
 
        root = call_prom("open", 1, 1, ADDR("/"));
        if (root != 0) {
@@ -868,15 +869,30 @@ static void __init prom_send_capabilities(void)
                 * (we assume this is the same for all cores) and use it to
                 * divide NR_CPUS.
                 */
-               cores = (__be32 *)&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET];
-               if (be32_to_cpup(cores) != NR_CPUS) {
+
+               /* The core value may start at an odd address. If such a word
+                * access is made at a cache line boundary, this leads to an
+                * exception which may not be handled at this time.
+                * Forcing a per byte access to avoid exception.
+                */
+               ptcores = &ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET];
+               cores = 0;
+               cores |= ptcores[0] << 24;
+               cores |= ptcores[1] << 16;
+               cores |= ptcores[2] << 8;
+               cores |= ptcores[3];
+               if (cores != NR_CPUS) {
                        prom_printf("WARNING ! "
                                    "ibm_architecture_vec structure inconsistent: %lu!\n",
-                                   be32_to_cpup(cores));
+                                   cores);
                } else {
-                       *cores = cpu_to_be32(DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()));
+                       cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
                        prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
-                                   be32_to_cpup(cores), NR_CPUS);
+                                   cores, NR_CPUS);
+                       ptcores[0] = (cores >> 24) & 0xff;
+                       ptcores[1] = (cores >> 16) & 0xff;
+                       ptcores[2] = (cores >> 8) & 0xff;
+                       ptcores[3] = cores & 0xff;
                }
 
                /* try calling the ibm,client-architecture-support method */
index 9a0d24c390a3535e16c934f80ec19695da04d095..1ca589c9ec6d5a85433dac34ea590ab379e7144c 100644 (file)
@@ -362,7 +362,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
                   void *kbuf, void __user *ubuf)
 {
 #ifdef CONFIG_VSX
-       double buf[33];
+       u64 buf[33];
        int i;
 #endif
        flush_fp_to_thread(target);
@@ -371,15 +371,15 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
        /* copy to local buffer then write that out */
        for (i = 0; i < 32 ; i++)
                buf[i] = target->thread.TS_FPR(i);
-       memcpy(&buf[32], &target->thread.fpscr, sizeof(double));
+       buf[32] = target->thread.fp_state.fpscr;
        return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
 
 #else
-       BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
-                    offsetof(struct thread_struct, TS_FPR(32)));
+       BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
+                    offsetof(struct thread_fp_state, fpr[32][0]));
 
        return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-                                  &target->thread.fpr, 0, -1);
+                                  &target->thread.fp_state, 0, -1);
 #endif
 }
 
@@ -388,7 +388,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
                   const void *kbuf, const void __user *ubuf)
 {
 #ifdef CONFIG_VSX
-       double buf[33];
+       u64 buf[33];
        int i;
 #endif
        flush_fp_to_thread(target);
@@ -400,14 +400,14 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
                return i;
        for (i = 0; i < 32 ; i++)
                target->thread.TS_FPR(i) = buf[i];
-       memcpy(&target->thread.fpscr, &buf[32], sizeof(double));
+       target->thread.fp_state.fpscr = buf[32];
        return 0;
 #else
-       BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
-                    offsetof(struct thread_struct, TS_FPR(32)));
+       BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
+                    offsetof(struct thread_fp_state, fpr[32][0]));
 
        return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-                                 &target->thread.fpr, 0, -1);
+                                 &target->thread.fp_state, 0, -1);
 #endif
 }
 
@@ -440,11 +440,11 @@ static int vr_get(struct task_struct *target, const struct user_regset *regset,
 
        flush_altivec_to_thread(target);
 
-       BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
-                    offsetof(struct thread_struct, vr[32]));
+       BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
+                    offsetof(struct thread_vr_state, vr[32]));
 
        ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-                                 &target->thread.vr, 0,
+                                 &target->thread.vr_state, 0,
                                  33 * sizeof(vector128));
        if (!ret) {
                /*
@@ -471,11 +471,12 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset,
 
        flush_altivec_to_thread(target);
 
-       BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
-                    offsetof(struct thread_struct, vr[32]));
+       BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
+                    offsetof(struct thread_vr_state, vr[32]));
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-                                &target->thread.vr, 0, 33 * sizeof(vector128));
+                                &target->thread.vr_state, 0,
+                                33 * sizeof(vector128));
        if (!ret && count > 0) {
                /*
                 * We use only the first word of vrsave.
@@ -514,13 +515,13 @@ static int vsr_get(struct task_struct *target, const struct user_regset *regset,
                   unsigned int pos, unsigned int count,
                   void *kbuf, void __user *ubuf)
 {
-       double buf[32];
+       u64 buf[32];
        int ret, i;
 
        flush_vsx_to_thread(target);
 
        for (i = 0; i < 32 ; i++)
-               buf[i] = target->thread.fpr[i][TS_VSRLOWOFFSET];
+               buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
        ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
                                  buf, 0, 32 * sizeof(double));
 
@@ -531,7 +532,7 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
                   unsigned int pos, unsigned int count,
                   const void *kbuf, const void __user *ubuf)
 {
-       double buf[32];
+       u64 buf[32];
        int ret,i;
 
        flush_vsx_to_thread(target);
@@ -539,7 +540,7 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                 buf, 0, 32 * sizeof(double));
        for (i = 0; i < 32 ; i++)
-               target->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+               target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
 
 
        return ret;
@@ -1554,10 +1555,10 @@ long arch_ptrace(struct task_struct *child, long request,
 
                        flush_fp_to_thread(child);
                        if (fpidx < (PT_FPSCR - PT_FPR0))
-                               tmp = ((unsigned long *)child->thread.fpr)
-                                       [fpidx * TS_FPRWIDTH];
+                               memcpy(&tmp, &child->thread.fp_state.fpr,
+                                      sizeof(long));
                        else
-                               tmp = child->thread.fpscr.val;
+                               tmp = child->thread.fp_state.fpscr;
                }
                ret = put_user(tmp, datalp);
                break;
@@ -1587,10 +1588,10 @@ long arch_ptrace(struct task_struct *child, long request,
 
                        flush_fp_to_thread(child);
                        if (fpidx < (PT_FPSCR - PT_FPR0))
-                               ((unsigned long *)child->thread.fpr)
-                                       [fpidx * TS_FPRWIDTH] = data;
+                               memcpy(&child->thread.fp_state.fpr, &data,
+                                      sizeof(long));
                        else
-                               child->thread.fpscr.val = data;
+                               child->thread.fp_state.fpscr = data;
                        ret = 0;
                }
                break;
index f51599e941c7661b281a5130570b2f5e9d701ec3..097f8dc426a017accf9de0297dc892a40c6c8272 100644 (file)
@@ -43,7 +43,6 @@
 #define FPRNUMBER(i) (((i) - PT_FPR0) >> 1)
 #define FPRHALF(i) (((i) - PT_FPR0) & 1)
 #define FPRINDEX(i) TS_FPRWIDTH * FPRNUMBER(i) * 2 + FPRHALF(i)
-#define FPRINDEX_3264(i) (TS_FPRWIDTH * ((i) - PT_FPR0))
 
 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                        compat_ulong_t caddr, compat_ulong_t cdata)
@@ -105,7 +104,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                         * to be an array of unsigned int (32 bits) - the
                         * index passed in is based on this assumption.
                         */
-                       tmp = ((unsigned int *)child->thread.fpr)
+                       tmp = ((unsigned int *)child->thread.fp_state.fpr)
                                [FPRINDEX(index)];
                }
                ret = put_user((unsigned int)tmp, (u32 __user *)data);
@@ -147,8 +146,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                if (numReg >= PT_FPR0) {
                        flush_fp_to_thread(child);
                        /* get 64 bit FPR */
-                       tmp = ((u64 *)child->thread.fpr)
-                               [FPRINDEX_3264(numReg)];
+                       tmp = child->thread.fp_state.fpr[numReg - PT_FPR0][0];
                } else { /* register within PT_REGS struct */
                        unsigned long tmp2;
                        ret = ptrace_get_reg(child, numReg, &tmp2);
@@ -207,7 +205,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                         * to be an array of unsigned int (32 bits) - the
                         * index passed in is based on this assumption.
                         */
-                       ((unsigned int *)child->thread.fpr)
+                       ((unsigned int *)child->thread.fp_state.fpr)
                                [FPRINDEX(index)] = data;
                        ret = 0;
                }
@@ -251,8 +249,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
                        u64 *tmp;
                        flush_fp_to_thread(child);
                        /* get 64 bit FPR ... */
-                       tmp = &(((u64 *)child->thread.fpr)
-                               [FPRINDEX_3264(numReg)]);
+                       tmp = &child->thread.fp_state.fpr[numReg - PT_FPR0][0];
                        /* ... write the 32 bit part we want */
                        ((u32 *)tmp)[index % 2] = data;
                        ret = 0;
index 6e7b7cdeec6541135dad27df9f23bf64ac16395b..7d4c7172f38ed43d85873c23f5b1e63e733ea093 100644 (file)
@@ -223,7 +223,7 @@ unsigned long get_phb_buid(struct device_node *phb)
 static int phb_set_bus_ranges(struct device_node *dev,
                              struct pci_controller *phb)
 {
-       const int *bus_range;
+       const __be32 *bus_range;
        unsigned int len;
 
        bus_range = of_get_property(dev, "bus-range", &len);
@@ -231,8 +231,8 @@ static int phb_set_bus_ranges(struct device_node *dev,
                return 1;
        }
 
-       phb->first_busno =  bus_range[0];
-       phb->last_busno  =  bus_range[1];
+       phb->first_busno = be32_to_cpu(bus_range[0]);
+       phb->last_busno  = be32_to_cpu(bus_range[1]);
 
        return 0;
 }
index bebdf1a1a5403df741ea389102f1b69b80daf60e..c094e28b3f102acfb21e6b4458b3ccdb26eb15bc 100644 (file)
@@ -265,27 +265,27 @@ struct rt_sigframe {
 unsigned long copy_fpr_to_user(void __user *to,
                               struct task_struct *task)
 {
-       double buf[ELF_NFPREG];
+       u64 buf[ELF_NFPREG];
        int i;
 
        /* save FPR copy to local buffer then write to the thread_struct */
        for (i = 0; i < (ELF_NFPREG - 1) ; i++)
                buf[i] = task->thread.TS_FPR(i);
-       memcpy(&buf[i], &task->thread.fpscr, sizeof(double));
+       buf[i] = task->thread.fp_state.fpscr;
        return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
 }
 
 unsigned long copy_fpr_from_user(struct task_struct *task,
                                 void __user *from)
 {
-       double buf[ELF_NFPREG];
+       u64 buf[ELF_NFPREG];
        int i;
 
        if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
                return 1;
        for (i = 0; i < (ELF_NFPREG - 1) ; i++)
                task->thread.TS_FPR(i) = buf[i];
-       memcpy(&task->thread.fpscr, &buf[i], sizeof(double));
+       task->thread.fp_state.fpscr = buf[i];
 
        return 0;
 }
@@ -293,25 +293,25 @@ unsigned long copy_fpr_from_user(struct task_struct *task,
 unsigned long copy_vsx_to_user(void __user *to,
                               struct task_struct *task)
 {
-       double buf[ELF_NVSRHALFREG];
+       u64 buf[ELF_NVSRHALFREG];
        int i;
 
        /* save FPR copy to local buffer then write to the thread_struct */
        for (i = 0; i < ELF_NVSRHALFREG; i++)
-               buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET];
+               buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
        return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
 }
 
 unsigned long copy_vsx_from_user(struct task_struct *task,
                                 void __user *from)
 {
-       double buf[ELF_NVSRHALFREG];
+       u64 buf[ELF_NVSRHALFREG];
        int i;
 
        if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
                return 1;
        for (i = 0; i < ELF_NVSRHALFREG ; i++)
-               task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+               task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
        return 0;
 }
 
@@ -319,27 +319,27 @@ unsigned long copy_vsx_from_user(struct task_struct *task,
 unsigned long copy_transact_fpr_to_user(void __user *to,
                                  struct task_struct *task)
 {
-       double buf[ELF_NFPREG];
+       u64 buf[ELF_NFPREG];
        int i;
 
        /* save FPR copy to local buffer then write to the thread_struct */
        for (i = 0; i < (ELF_NFPREG - 1) ; i++)
                buf[i] = task->thread.TS_TRANS_FPR(i);
-       memcpy(&buf[i], &task->thread.transact_fpscr, sizeof(double));
+       buf[i] = task->thread.transact_fp.fpscr;
        return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
 }
 
 unsigned long copy_transact_fpr_from_user(struct task_struct *task,
                                          void __user *from)
 {
-       double buf[ELF_NFPREG];
+       u64 buf[ELF_NFPREG];
        int i;
 
        if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
                return 1;
        for (i = 0; i < (ELF_NFPREG - 1) ; i++)
                task->thread.TS_TRANS_FPR(i) = buf[i];
-       memcpy(&task->thread.transact_fpscr, &buf[i], sizeof(double));
+       task->thread.transact_fp.fpscr = buf[i];
 
        return 0;
 }
@@ -347,25 +347,25 @@ unsigned long copy_transact_fpr_from_user(struct task_struct *task,
 unsigned long copy_transact_vsx_to_user(void __user *to,
                                  struct task_struct *task)
 {
-       double buf[ELF_NVSRHALFREG];
+       u64 buf[ELF_NVSRHALFREG];
        int i;
 
        /* save FPR copy to local buffer then write to the thread_struct */
        for (i = 0; i < ELF_NVSRHALFREG; i++)
-               buf[i] = task->thread.transact_fpr[i][TS_VSRLOWOFFSET];
+               buf[i] = task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET];
        return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
 }
 
 unsigned long copy_transact_vsx_from_user(struct task_struct *task,
                                          void __user *from)
 {
-       double buf[ELF_NVSRHALFREG];
+       u64 buf[ELF_NVSRHALFREG];
        int i;
 
        if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
                return 1;
        for (i = 0; i < ELF_NVSRHALFREG ; i++)
-               task->thread.transact_fpr[i][TS_VSRLOWOFFSET] = buf[i];
+               task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = buf[i];
        return 0;
 }
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
@@ -373,14 +373,14 @@ unsigned long copy_transact_vsx_from_user(struct task_struct *task,
 inline unsigned long copy_fpr_to_user(void __user *to,
                                      struct task_struct *task)
 {
-       return __copy_to_user(to, task->thread.fpr,
+       return __copy_to_user(to, task->thread.fp_state.fpr,
                              ELF_NFPREG * sizeof(double));
 }
 
 inline unsigned long copy_fpr_from_user(struct task_struct *task,
                                        void __user *from)
 {
-       return __copy_from_user(task->thread.fpr, from,
+       return __copy_from_user(task->thread.fp_state.fpr, from,
                              ELF_NFPREG * sizeof(double));
 }
 
@@ -388,14 +388,14 @@ inline unsigned long copy_fpr_from_user(struct task_struct *task,
 inline unsigned long copy_transact_fpr_to_user(void __user *to,
                                         struct task_struct *task)
 {
-       return __copy_to_user(to, task->thread.transact_fpr,
+       return __copy_to_user(to, task->thread.transact_fp.fpr,
                              ELF_NFPREG * sizeof(double));
 }
 
 inline unsigned long copy_transact_fpr_from_user(struct task_struct *task,
                                                 void __user *from)
 {
-       return __copy_from_user(task->thread.transact_fpr, from,
+       return __copy_from_user(task->thread.transact_fp.fpr, from,
                                ELF_NFPREG * sizeof(double));
 }
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
@@ -423,7 +423,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
        /* save altivec registers */
        if (current->thread.used_vr) {
                flush_altivec_to_thread(current);
-               if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
+               if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
                                   ELF_NVRREG * sizeof(vector128)))
                        return 1;
                /* set MSR_VEC in the saved MSR value to indicate that
@@ -534,17 +534,17 @@ static int save_tm_user_regs(struct pt_regs *regs,
        /* save altivec registers */
        if (current->thread.used_vr) {
                flush_altivec_to_thread(current);
-               if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
+               if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
                                   ELF_NVRREG * sizeof(vector128)))
                        return 1;
                if (msr & MSR_VEC) {
                        if (__copy_to_user(&tm_frame->mc_vregs,
-                                          current->thread.transact_vr,
+                                          &current->thread.transact_vr,
                                           ELF_NVRREG * sizeof(vector128)))
                                return 1;
                } else {
                        if (__copy_to_user(&tm_frame->mc_vregs,
-                                          current->thread.vr,
+                                          &current->thread.vr_state,
                                           ELF_NVRREG * sizeof(vector128)))
                                return 1;
                }
@@ -692,11 +692,12 @@ static long restore_user_regs(struct pt_regs *regs,
        regs->msr &= ~MSR_VEC;
        if (msr & MSR_VEC) {
                /* restore altivec registers from the stack */
-               if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
+               if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
                                     sizeof(sr->mc_vregs)))
                        return 1;
        } else if (current->thread.used_vr)
-               memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
+               memset(&current->thread.vr_state, 0,
+                      ELF_NVRREG * sizeof(vector128));
 
        /* Always get VRSAVE back */
        if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
@@ -722,7 +723,7 @@ static long restore_user_regs(struct pt_regs *regs,
                        return 1;
        } else if (current->thread.used_vsr)
                for (i = 0; i < 32 ; i++)
-                       current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
+                       current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
 #endif /* CONFIG_VSX */
        /*
         * force the process to reload the FP registers from
@@ -798,15 +799,16 @@ static long restore_tm_user_regs(struct pt_regs *regs,
        regs->msr &= ~MSR_VEC;
        if (msr & MSR_VEC) {
                /* restore altivec registers from the stack */
-               if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
+               if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
                                     sizeof(sr->mc_vregs)) ||
-                   __copy_from_user(current->thread.transact_vr,
+                   __copy_from_user(&current->thread.transact_vr,
                                     &tm_sr->mc_vregs,
                                     sizeof(sr->mc_vregs)))
                        return 1;
        } else if (current->thread.used_vr) {
-               memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
-               memset(current->thread.transact_vr, 0,
+               memset(&current->thread.vr_state, 0,
+                      ELF_NVRREG * sizeof(vector128));
+               memset(&current->thread.transact_vr, 0,
                       ELF_NVRREG * sizeof(vector128));
        }
 
@@ -838,8 +840,8 @@ static long restore_tm_user_regs(struct pt_regs *regs,
                        return 1;
        } else if (current->thread.used_vsr)
                for (i = 0; i < 32 ; i++) {
-                       current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
-                       current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0;
+                       current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
+                       current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0;
                }
 #endif /* CONFIG_VSX */
 
@@ -1030,7 +1032,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
                if (__put_user(0, &rt_sf->uc.uc_link))
                        goto badframe;
 
-       current->thread.fpscr.val = 0;  /* turn off all fp exceptions */
+       current->thread.fp_state.fpscr = 0;     /* turn off all fp exceptions */
 
        /* create a stack frame for the caller of the handler */
        newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
@@ -1045,8 +1047,9 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
        regs->gpr[5] = (unsigned long) &rt_sf->uc;
        regs->gpr[6] = (unsigned long) rt_sf;
        regs->nip = (unsigned long) ka->sa.sa_handler;
-       /* enter the signal handler in big-endian mode */
+       /* enter the signal handler in native-endian mode */
        regs->msr &= ~MSR_LE;
+       regs->msr |= (MSR_KERNEL & MSR_LE);
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
        /* Remove TM bits from thread's MSR.  The MSR in the sigcontext
         * just indicates to userland that we were doing a transaction, but we
@@ -1462,7 +1465,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
 
        regs->link = tramp;
 
-       current->thread.fpscr.val = 0;  /* turn off all fp exceptions */
+       current->thread.fp_state.fpscr = 0;     /* turn off all fp exceptions */
 
        /* create a stack frame for the caller of the handler */
        newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
index f93ec2835a13f01294a9b3d5c225686a04666702..b3c615764c9b97bcb510d017bd9c8ff33e6d69ec 100644 (file)
@@ -103,7 +103,8 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
        if (current->thread.used_vr) {
                flush_altivec_to_thread(current);
                /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
-               err |= __copy_to_user(v_regs, current->thread.vr, 33 * sizeof(vector128));
+               err |= __copy_to_user(v_regs, &current->thread.vr_state,
+                                     33 * sizeof(vector128));
                /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
                 * contains valid data.
                 */
@@ -195,18 +196,18 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
        if (current->thread.used_vr) {
                flush_altivec_to_thread(current);
                /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
-               err |= __copy_to_user(v_regs, current->thread.vr,
+               err |= __copy_to_user(v_regs, &current->thread.vr_state,
                                      33 * sizeof(vector128));
                /* If VEC was enabled there are transactional VRs valid too,
                 * else they're a copy of the checkpointed VRs.
                 */
                if (msr & MSR_VEC)
                        err |= __copy_to_user(tm_v_regs,
-                                             current->thread.transact_vr,
+                                             &current->thread.transact_vr,
                                              33 * sizeof(vector128));
                else
                        err |= __copy_to_user(tm_v_regs,
-                                             current->thread.vr,
+                                             &current->thread.vr_state,
                                              33 * sizeof(vector128));
 
                /* set MSR_VEC in the MSR value in the frame to indicate
@@ -349,10 +350,10 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
                return -EFAULT;
        /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
        if (v_regs != NULL && (msr & MSR_VEC) != 0)
-               err |= __copy_from_user(current->thread.vr, v_regs,
+               err |= __copy_from_user(&current->thread.vr_state, v_regs,
                                        33 * sizeof(vector128));
        else if (current->thread.used_vr)
-               memset(current->thread.vr, 0, 33 * sizeof(vector128));
+               memset(&current->thread.vr_state, 0, 33 * sizeof(vector128));
        /* Always get VRSAVE back */
        if (v_regs != NULL)
                err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
@@ -374,7 +375,7 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
                err |= copy_vsx_from_user(current, v_regs);
        else
                for (i = 0; i < 32 ; i++)
-                       current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
+                       current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
 #endif
        return err;
 }
@@ -468,14 +469,14 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
                return -EFAULT;
        /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
        if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) {
-               err |= __copy_from_user(current->thread.vr, v_regs,
+               err |= __copy_from_user(&current->thread.vr_state, v_regs,
                                        33 * sizeof(vector128));
-               err |= __copy_from_user(current->thread.transact_vr, tm_v_regs,
+               err |= __copy_from_user(&current->thread.transact_vr, tm_v_regs,
                                        33 * sizeof(vector128));
        }
        else if (current->thread.used_vr) {
-               memset(current->thread.vr, 0, 33 * sizeof(vector128));
-               memset(current->thread.transact_vr, 0, 33 * sizeof(vector128));
+               memset(&current->thread.vr_state, 0, 33 * sizeof(vector128));
+               memset(&current->thread.transact_vr, 0, 33 * sizeof(vector128));
        }
        /* Always get VRSAVE back */
        if (v_regs != NULL && tm_v_regs != NULL) {
@@ -507,8 +508,8 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
                err |= copy_transact_vsx_from_user(current, tm_v_regs);
        } else {
                for (i = 0; i < 32 ; i++) {
-                       current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
-                       current->thread.transact_fpr[i][TS_VSRLOWOFFSET] = 0;
+                       current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
+                       current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0;
                }
        }
 #endif
@@ -747,7 +748,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
                goto badframe;
 
        /* Make sure signal handler doesn't get spurious FP exceptions */
-       current->thread.fpscr.val = 0;
+       current->thread.fp_state.fpscr = 0;
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
        /* Remove TM bits from thread's MSR.  The MSR in the sigcontext
         * just indicates to userland that we were doing a transaction, but we
@@ -773,8 +774,9 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
 
        /* Set up "regs" so we "return" to the signal handler. */
        err |= get_user(regs->nip, &funct_desc_ptr->entry);
-       /* enter the signal handler in big-endian mode */
+       /* enter the signal handler in native-endian mode */
        regs->msr &= ~MSR_LE;
+       regs->msr |= (MSR_KERNEL & MSR_LE);
        regs->gpr[1] = newsp;
        err |= get_user(regs->gpr[2], &funct_desc_ptr->toc);
        regs->gpr[3] = signr;
index 8e59abc237d7f17c96effa20583206e75b2e4b7a..930cd8af35035441031e1abecbdde3472ab48808 100644 (file)
@@ -844,18 +844,6 @@ void __cpu_die(unsigned int cpu)
                smp_ops->cpu_die(cpu);
 }
 
-static DEFINE_MUTEX(powerpc_cpu_hotplug_driver_mutex);
-
-void cpu_hotplug_driver_lock()
-{
-       mutex_lock(&powerpc_cpu_hotplug_driver_mutex);
-}
-
-void cpu_hotplug_driver_unlock()
-{
-       mutex_unlock(&powerpc_cpu_hotplug_driver_mutex);
-}
-
 void cpu_die(void)
 {
        if (ppc_md.cpu_die)
index cd809eaa8b5c4bec7d00497e0edaa614501f13e0..761af4f0a632bab2ec41754768cdd72496fc3c20 100644 (file)
 #include <asm/reg.h>
 
 #ifdef CONFIG_VSX
-/* See fpu.S, this is very similar but to save/restore checkpointed FPRs/VSRs */
-#define __SAVE_32FPRS_VSRS_TRANSACT(n,c,base)  \
+/* See fpu.S, this is borrowed from there */
+#define __SAVE_32FPRS_VSRS(n,c,base)           \
 BEGIN_FTR_SECTION                              \
        b       2f;                             \
 END_FTR_SECTION_IFSET(CPU_FTR_VSX);            \
-       SAVE_32FPRS_TRANSACT(n,base);           \
+       SAVE_32FPRS(n,base);                    \
        b       3f;                             \
-2:     SAVE_32VSRS_TRANSACT(n,c,base);         \
+2:     SAVE_32VSRS(n,c,base);                  \
 3:
-/* ...and this is just plain borrowed from there. */
 #define __REST_32FPRS_VSRS(n,c,base)           \
 BEGIN_FTR_SECTION                              \
        b       2f;                             \
@@ -31,11 +30,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX);         \
 2:     REST_32VSRS(n,c,base);                  \
 3:
 #else
-#define __SAVE_32FPRS_VSRS_TRANSACT(n,c,base) SAVE_32FPRS_TRANSACT(n, base)
-#define __REST_32FPRS_VSRS(n,c,base)         REST_32FPRS(n, base)
+#define __SAVE_32FPRS_VSRS(n,c,base)   SAVE_32FPRS(n, base)
+#define __REST_32FPRS_VSRS(n,c,base)   REST_32FPRS(n, base)
 #endif
-#define SAVE_32FPRS_VSRS_TRANSACT(n,c,base) \
-       __SAVE_32FPRS_VSRS_TRANSACT(n,__REG_##c,__REG_##base)
+#define SAVE_32FPRS_VSRS(n,c,base) \
+       __SAVE_32FPRS_VSRS(n,__REG_##c,__REG_##base)
 #define REST_32FPRS_VSRS(n,c,base) \
        __REST_32FPRS_VSRS(n,__REG_##c,__REG_##base)
 
@@ -157,10 +156,11 @@ _GLOBAL(tm_reclaim)
        andis.          r0, r4, MSR_VEC@h
        beq     dont_backup_vec
 
-       SAVE_32VRS_TRANSACT(0, r6, r3)  /* r6 scratch, r3 thread */
+       addi    r7, r3, THREAD_TRANSACT_VRSTATE
+       SAVE_32VRS(0, r6, r7)   /* r6 scratch, r7 transact vr state */
        mfvscr  vr0
-       li      r6, THREAD_TRANSACT_VSCR
-       stvx    vr0, r3, r6
+       li      r6, VRSTATE_VSCR
+       stvx    vr0, r7, r6
 dont_backup_vec:
        mfspr   r0, SPRN_VRSAVE
        std     r0, THREAD_TRANSACT_VRSAVE(r3)
@@ -168,10 +168,11 @@ dont_backup_vec:
        andi.   r0, r4, MSR_FP
        beq     dont_backup_fp
 
-       SAVE_32FPRS_VSRS_TRANSACT(0, R6, R3)    /* r6 scratch, r3 thread */
+       addi    r7, r3, THREAD_TRANSACT_FPSTATE
+       SAVE_32FPRS_VSRS(0, R6, R7)     /* r6 scratch, r7 transact fp state */
 
        mffs    fr0
-       stfd    fr0,THREAD_TRANSACT_FPSCR(r3)
+       stfd    fr0,FPSTATE_FPSCR(r7)
 
 dont_backup_fp:
        /* The moment we treclaim, ALL of our GPRs will switch
@@ -358,10 +359,11 @@ _GLOBAL(tm_recheckpoint)
        andis.  r0, r4, MSR_VEC@h
        beq     dont_restore_vec
 
-       li      r5, THREAD_VSCR
-       lvx     vr0, r3, r5
+       addi    r8, r3, THREAD_VRSTATE
+       li      r5, VRSTATE_VSCR
+       lvx     vr0, r8, r5
        mtvscr  vr0
-       REST_32VRS(0, r5, r3)                   /* r5 scratch, r3 THREAD ptr */
+       REST_32VRS(0, r5, r8)                   /* r5 scratch, r8 ptr */
 dont_restore_vec:
        ld      r5, THREAD_VRSAVE(r3)
        mtspr   SPRN_VRSAVE, r5
@@ -370,9 +372,10 @@ dont_restore_vec:
        andi.   r0, r4, MSR_FP
        beq     dont_restore_fp
 
-       lfd     fr0, THREAD_FPSCR(r3)
+       addi    r8, r3, THREAD_FPSTATE
+       lfd     fr0, FPSTATE_FPSCR(r8)
        MTFSF_L(fr0)
-       REST_32FPRS_VSRS(0, R4, R3)
+       REST_32FPRS_VSRS(0, R4, R8)
 
 dont_restore_fp:
        mtmsr   r6                              /* FP/Vec off again! */
index f783c932faeb3717eca6136cab5ab350f01e9a6e..f0a6814007a521649be57a7b08de14f19493ceb0 100644 (file)
@@ -816,7 +816,7 @@ static void parse_fpe(struct pt_regs *regs)
 
        flush_fp_to_thread(current);
 
-       code = __parse_fpscr(current->thread.fpscr.val);
+       code = __parse_fpscr(current->thread.fp_state.fpscr);
 
        _exception(SIGFPE, regs, code, regs->nip);
 }
@@ -1069,7 +1069,7 @@ static int emulate_math(struct pt_regs *regs)
                return 0;
        case 1: {
                        int code = 0;
-                       code = __parse_fpscr(current->thread.fpscr.val);
+                       code = __parse_fpscr(current->thread.fp_state.fpscr);
                        _exception(SIGFPE, regs, code, regs->nip);
                        return 0;
                }
@@ -1371,8 +1371,6 @@ void facility_unavailable_exception(struct pt_regs *regs)
 
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 
-extern void do_load_up_fpu(struct pt_regs *regs);
-
 void fp_unavailable_tm(struct pt_regs *regs)
 {
        /* Note:  This does not handle any kind of FP laziness. */
@@ -1403,8 +1401,6 @@ void fp_unavailable_tm(struct pt_regs *regs)
 }
 
 #ifdef CONFIG_ALTIVEC
-extern void do_load_up_altivec(struct pt_regs *regs);
-
 void altivec_unavailable_tm(struct pt_regs *regs)
 {
        /* See the comments in fp_unavailable_tm().  This function operates
@@ -1634,7 +1630,7 @@ void altivec_assist_exception(struct pt_regs *regs)
                /* XXX quick hack for now: set the non-Java bit in the VSCR */
                printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
                                   "in %s at %lx\n", current->comm, regs->nip);
-               current->thread.vscr.u[3] |= 0x10000;
+               current->thread.vr_state.vscr.u[3] |= 0x10000;
        }
 }
 #endif /* CONFIG_ALTIVEC */
index f223409629b9c768891f54a4f117ed964a25a0e2..e58ee10fa5c0a41efbacaa9f10bc03665d18b8fb 100644 (file)
@@ -4,7 +4,11 @@
  */
 #include <asm/vdso.h>
 
+#ifdef __LITTLE_ENDIAN__
+OUTPUT_FORMAT("elf32-powerpcle", "elf32-powerpcle", "elf32-powerpcle")
+#else
 OUTPUT_FORMAT("elf32-powerpc", "elf32-powerpc", "elf32-powerpc")
+#endif
 OUTPUT_ARCH(powerpc:common)
 ENTRY(_start)
 
index e4863819663b35d0ca8658a5f1fd374c4b0da5c7..64fb183a47c2fff07bc385489db33bdc5c7799e3 100644 (file)
@@ -4,7 +4,11 @@
  */
 #include <asm/vdso.h>
 
+#ifdef __LITTLE_ENDIAN__
+OUTPUT_FORMAT("elf64-powerpcle", "elf64-powerpcle", "elf64-powerpcle")
+#else
 OUTPUT_FORMAT("elf64-powerpc", "elf64-powerpc", "elf64-powerpc")
+#endif
 OUTPUT_ARCH(powerpc:common64)
 ENTRY(_start)
 
index 604d0947cb20cd87dcad9f7e512dd5a63ddd05d4..c4bfadb2606bcc6a5cb92b456ae0903e1ea5b86c 100644 (file)
@@ -271,7 +271,7 @@ int emulate_altivec(struct pt_regs *regs)
        vb = (instr >> 11) & 0x1f;
        vc = (instr >> 6) & 0x1f;
 
-       vrs = current->thread.vr;
+       vrs = current->thread.vr_state.vr;
        switch (instr & 0x3f) {
        case 10:
                switch (vc) {
@@ -320,12 +320,12 @@ int emulate_altivec(struct pt_regs *regs)
                case 14:        /* vctuxs */
                        for (i = 0; i < 4; ++i)
                                vrs[vd].u[i] = ctuxs(vrs[vb].u[i], va,
-                                               &current->thread.vscr.u[3]);
+                                       &current->thread.vr_state.vscr.u[3]);
                        break;
                case 15:        /* vctsxs */
                        for (i = 0; i < 4; ++i)
                                vrs[vd].u[i] = ctsxs(vrs[vb].u[i], va,
-                                               &current->thread.vscr.u[3]);
+                                       &current->thread.vr_state.vscr.u[3]);
                        break;
                default:
                        return -EINVAL;
index 9e20999aaef289169dd79feb42871647d4c6dd5c..eacda4eea2d70af507771df52dfa37bd20c73f55 100644 (file)
@@ -8,29 +8,6 @@
 #include <asm/ptrace.h>
 
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Wrapper to call load_up_altivec from C.
- * void do_load_up_altivec(struct pt_regs *regs);
- */
-_GLOBAL(do_load_up_altivec)
-       mflr    r0
-       std     r0, 16(r1)
-       stdu    r1, -112(r1)
-
-       subi    r6, r3, STACK_FRAME_OVERHEAD
-       /* load_up_altivec expects r12=MSR, r13=PACA, and returns
-        * with r12 = new MSR.
-        */
-       ld      r12,_MSR(r6)
-       GET_PACA(r13)
-       bl      load_up_altivec
-       std     r12,_MSR(r6)
-
-       ld      r0, 112+16(r1)
-       addi    r1, r1, 112
-       mtlr    r0
-       blr
-
 /* void do_load_up_transact_altivec(struct thread_struct *thread)
  *
  * This is similar to load_up_altivec but for the transactional version of the
@@ -46,10 +23,11 @@ _GLOBAL(do_load_up_transact_altivec)
        li      r4,1
        stw     r4,THREAD_USED_VR(r3)
 
-       li      r10,THREAD_TRANSACT_VSCR
+       li      r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR
        lvx     vr0,r10,r3
        mtvscr  vr0
-       REST_32VRS_TRANSACT(0,r4,r3)
+       addi    r10,r3,THREAD_TRANSACT_VRSTATE
+       REST_32VRS(0,r4,r10)
 
        /* Disable VEC again. */
        MTMSRD(r6)
@@ -59,7 +37,28 @@ _GLOBAL(do_load_up_transact_altivec)
 #endif
 
 /*
- * load_up_altivec(unused, unused, tsk)
+ * Load state from memory into VMX registers including VSCR.
+ * Assumes the caller has enabled VMX in the MSR.
+ */
+_GLOBAL(load_vr_state)
+       li      r4,VRSTATE_VSCR
+       lvx     vr0,r4,r3
+       mtvscr  vr0
+       REST_32VRS(0,r4,r3)
+       blr
+
+/*
+ * Store VMX state into memory, including VSCR.
+ * Assumes the caller has enabled VMX in the MSR.
+ */
+_GLOBAL(store_vr_state)
+       SAVE_32VRS(0, r4, r3)
+       mfvscr  vr0
+       li      r4, VRSTATE_VSCR
+       stvx    vr0, r4, r3
+       blr
+
+/*
  * Disable VMX for the task which had it previously,
  * and save its vector registers in its thread_struct.
  * Enables the VMX for use in the kernel on return.
@@ -90,10 +89,11 @@ _GLOBAL(load_up_altivec)
        /* Save VMX state to last_task_used_altivec's THREAD struct */
        toreal(r4)
        addi    r4,r4,THREAD
-       SAVE_32VRS(0,r5,r4)
+       addi    r7,r4,THREAD_VRSTATE
+       SAVE_32VRS(0,r5,r7)
        mfvscr  vr0
-       li      r10,THREAD_VSCR
-       stvx    vr0,r10,r4
+       li      r10,VRSTATE_VSCR
+       stvx    vr0,r10,r7
        /* Disable VMX for last_task_used_altivec */
        PPC_LL  r5,PT_REGS(r4)
        toreal(r5)
@@ -125,12 +125,13 @@ _GLOBAL(load_up_altivec)
        oris    r12,r12,MSR_VEC@h
        std     r12,_MSR(r1)
 #endif
+       addi    r7,r5,THREAD_VRSTATE
        li      r4,1
-       li      r10,THREAD_VSCR
+       li      r10,VRSTATE_VSCR
        stw     r4,THREAD_USED_VR(r5)
-       lvx     vr0,r10,r5
+       lvx     vr0,r10,r7
        mtvscr  vr0
-       REST_32VRS(0,r4,r5)
+       REST_32VRS(0,r4,r7)
 #ifndef CONFIG_SMP
        /* Update last_task_used_altivec to 'current' */
        subi    r4,r5,THREAD            /* Back to 'current' */
@@ -165,12 +166,16 @@ _GLOBAL(giveup_altivec)
        PPC_LCMPI       0,r3,0
        beqlr                           /* if no previous owner, done */
        addi    r3,r3,THREAD            /* want THREAD of task */
+       PPC_LL  r7,THREAD_VRSAVEAREA(r3)
        PPC_LL  r5,PT_REGS(r3)
-       PPC_LCMPI       0,r5,0
-       SAVE_32VRS(0,r4,r3)
+       PPC_LCMPI       0,r7,0
+       bne     2f
+       addi    r7,r3,THREAD_VRSTATE
+2:     PPC_LCMPI       0,r5,0
+       SAVE_32VRS(0,r4,r7)
        mfvscr  vr0
-       li      r4,THREAD_VSCR
-       stvx    vr0,r4,r3
+       li      r4,VRSTATE_VSCR
+       stvx    vr0,r4,r7
        beq     1f
        PPC_LL  r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 #ifdef CONFIG_VSX
index d38cc08b16c7f23831e3af558bfe35908e14af47..089de12b9ab0946e324c4521e11936ae685b15da 100644 (file)
@@ -1413,8 +1413,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
 
                /* needed to ensure proper operation of coherent allocations
                 * later, in case driver doesn't set it explicitly */
-               dma_set_mask(&viodev->dev, DMA_BIT_MASK(64));
-               dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
+               dma_set_mask_and_coherent(&viodev->dev, DMA_BIT_MASK(64));
        }
 
        /* register with generic device framework */
index ffaef2cb101a4ef50c77f1dc58c3cdac1f2da105..e593ff257bd300461d8a6e487e0fb448e5d86936 100644 (file)
@@ -6,6 +6,7 @@ source "virt/kvm/Kconfig"
 
 menuconfig VIRTUALIZATION
        bool "Virtualization"
+       depends on !CPU_LITTLE_ENDIAN
        ---help---
          Say Y here to get to see options for using your Linux host to run
          other operating systems inside virtual machines (guests).
index 294b7af28cdd3c58b2551c53b9210daa429f836d..c71103b8a748350947ad3c2f6b7256c6c2841405 100644 (file)
@@ -1066,7 +1066,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 BEGIN_FTR_SECTION
        mfspr   r8, SPRN_DSCR
        ld      r7, HSTATE_DSCR(r13)
-       std     r8, VCPU_DSCR(r7)
+       std     r8, VCPU_DSCR(r9)
        mtspr   SPRN_DSCR, r7
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 
index 27db1e66595987a99e2f387819345af30baad739..c0b48f96a91c9817b17e80b6b32f3f4e6aac0167 100644 (file)
@@ -444,7 +444,7 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
 #ifdef CONFIG_VSX
        u64 *vcpu_vsx = vcpu->arch.vsr;
 #endif
-       u64 *thread_fpr = (u64*)t->fpr;
+       u64 *thread_fpr = &t->fp_state.fpr[0][0];
        int i;
 
        /*
@@ -466,14 +466,14 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
                /*
                 * Note that on CPUs with VSX, giveup_fpu stores
                 * both the traditional FP registers and the added VSX
-                * registers into thread.fpr[].
+                * registers into thread.fp_state.fpr[].
                 */
                if (current->thread.regs->msr & MSR_FP)
                        giveup_fpu(current);
                for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
                        vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
 
-               vcpu->arch.fpscr = t->fpscr.val;
+               vcpu->arch.fpscr = t->fp_state.fpscr;
 
 #ifdef CONFIG_VSX
                if (cpu_has_feature(CPU_FTR_VSX))
@@ -486,8 +486,8 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
        if (msr & MSR_VEC) {
                if (current->thread.regs->msr & MSR_VEC)
                        giveup_altivec(current);
-               memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
-               vcpu->arch.vscr = t->vscr;
+               memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr));
+               vcpu->arch.vscr = t->vr_state.vscr;
        }
 #endif
 
@@ -539,7 +539,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
 #ifdef CONFIG_VSX
        u64 *vcpu_vsx = vcpu->arch.vsr;
 #endif
-       u64 *thread_fpr = (u64*)t->fpr;
+       u64 *thread_fpr = &t->fp_state.fpr[0][0];
        int i;
 
        /* When we have paired singles, we emulate in software */
@@ -584,15 +584,15 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
                for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
                        thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
 #endif
-               t->fpscr.val = vcpu->arch.fpscr;
+               t->fp_state.fpscr = vcpu->arch.fpscr;
                t->fpexc_mode = 0;
                kvmppc_load_up_fpu();
        }
 
        if (msr & MSR_VEC) {
 #ifdef CONFIG_ALTIVEC
-               memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
-               t->vscr = vcpu->arch.vscr;
+               memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
+               t->vr_state.vscr = vcpu->arch.vscr;
                t->vrsave = -1;
                kvmppc_load_up_altivec();
 #endif
@@ -1116,12 +1116,10 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
        int ret;
-       double fpr[32][TS_FPRWIDTH];
-       unsigned int fpscr;
+       struct thread_fp_state fp;
        int fpexc_mode;
 #ifdef CONFIG_ALTIVEC
-       vector128 vr[32];
-       vector128 vscr;
+       struct thread_vr_state vr;
        unsigned long uninitialized_var(vrsave);
        int used_vr;
 #endif
@@ -1153,8 +1151,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        /* Save FPU state in stack */
        if (current->thread.regs->msr & MSR_FP)
                giveup_fpu(current);
-       memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
-       fpscr = current->thread.fpscr.val;
+       fp = current->thread.fp_state;
        fpexc_mode = current->thread.fpexc_mode;
 
 #ifdef CONFIG_ALTIVEC
@@ -1163,8 +1160,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        if (used_vr) {
                if (current->thread.regs->msr & MSR_VEC)
                        giveup_altivec(current);
-               memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
-               vscr = current->thread.vscr;
+               vr = current->thread.vr_state;
                vrsave = current->thread.vrsave;
        }
 #endif
@@ -1196,15 +1192,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        current->thread.regs->msr = ext_msr;
 
        /* Restore FPU/VSX state from stack */
-       memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
-       current->thread.fpscr.val = fpscr;
+       current->thread.fp_state = fp;
        current->thread.fpexc_mode = fpexc_mode;
 
 #ifdef CONFIG_ALTIVEC
        /* Restore Altivec state from stack */
        if (used_vr && current->thread.used_vr) {
-               memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
-               current->thread.vscr = vscr;
+               current->thread.vr_state = vr;
                current->thread.vrsave = vrsave;
        }
        current->thread.used_vr = used_vr;
index 17722d82f1d1f500bd5579f544cf29b18e6cbd80..5133199f6cb7b6cab1feca199f434bfd69a89e1a 100644 (file)
@@ -656,9 +656,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
        int ret, s;
 #ifdef CONFIG_PPC_FPU
-       unsigned int fpscr;
+       struct thread_fp_state fp;
        int fpexc_mode;
-       u64 fpr[32];
 #endif
 
        if (!vcpu->arch.sane) {
@@ -677,13 +676,13 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 #ifdef CONFIG_PPC_FPU
        /* Save userspace FPU state in stack */
        enable_kernel_fp();
-       memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
-       fpscr = current->thread.fpscr.val;
+       fp = current->thread.fp_state;
        fpexc_mode = current->thread.fpexc_mode;
 
        /* Restore guest FPU state to thread */
-       memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
-       current->thread.fpscr.val = vcpu->arch.fpscr;
+       memcpy(current->thread.fp_state.fpr, vcpu->arch.fpr,
+              sizeof(vcpu->arch.fpr));
+       current->thread.fp_state.fpscr = vcpu->arch.fpscr;
 
        /*
         * Since we can't trap on MSR_FP in GS-mode, we consider the guest
@@ -709,12 +708,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        vcpu->fpu_active = 0;
 
        /* Save guest FPU state from thread */
-       memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
-       vcpu->arch.fpscr = current->thread.fpscr.val;
+       memcpy(vcpu->arch.fpr, current->thread.fp_state.fpr,
+              sizeof(vcpu->arch.fpr));
+       vcpu->arch.fpscr = current->thread.fp_state.fpscr;
 
        /* Restore userspace FPU state from stack */
-       memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
-       current->thread.fpscr.val = fpscr;
+       current->thread.fp_state = fp;
        current->thread.fpexc_mode = fpexc_mode;
 #endif
 
index 1c6a9d729df4a26ca3ad6c9f1c94008d681f7d42..c65593abae8eb879f71915cbf639f0dfec4468dc 100644 (file)
@@ -332,6 +332,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
        unsigned long hva;
        int pfnmap = 0;
        int tsize = BOOK3E_PAGESZ_4K;
+       int ret = 0;
+       unsigned long mmu_seq;
+       struct kvm *kvm = vcpu_e500->vcpu.kvm;
+
+       /* used to check for invalidations in progress */
+       mmu_seq = kvm->mmu_notifier_seq;
+       smp_rmb();
 
        /*
         * Translate guest physical to true physical, acquiring
@@ -449,6 +456,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
                gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
        }
 
+       spin_lock(&kvm->mmu_lock);
+       if (mmu_notifier_retry(kvm, mmu_seq)) {
+               ret = -EAGAIN;
+               goto out;
+       }
+
        kvmppc_e500_ref_setup(ref, gtlbe, pfn);
 
        kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -457,10 +470,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
        /* Clear i-cache for new pages */
        kvmppc_mmu_flush_icache(pfn);
 
+out:
+       spin_unlock(&kvm->mmu_lock);
+
        /* Drop refcount on page, so that mmu notifiers can clear it */
        kvm_release_pfn_clean(pfn);
 
-       return 0;
+       return ret;
 }
 
 /* XXX only map the one-one case, for now use TLB0 */
index 4504332766990147fdbfef302baac7919ec179fa..5310132856c11d11e31964294d9345fca55aa1a9 100644 (file)
@@ -10,15 +10,23 @@ CFLAGS_REMOVE_code-patching.o = -pg
 CFLAGS_REMOVE_feature-fixups.o = -pg
 
 obj-y                  := string.o alloc.o \
-                          checksum_$(CONFIG_WORD_SIZE).o crtsavres.o
+                          crtsavres.o
 obj-$(CONFIG_PPC32)    += div64.o copy_32.o
 obj-$(CONFIG_HAS_IOMEM)        += devres.o
 
 obj-$(CONFIG_PPC64)    += copypage_64.o copyuser_64.o \
-                          memcpy_64.o usercopy_64.o mem_64.o string.o \
-                          checksum_wrappers_64.o hweight_64.o \
-                          copyuser_power7.o string_64.o copypage_power7.o \
-                          memcpy_power7.o
+                          usercopy_64.o mem_64.o string.o \
+                          hweight_64.o \
+                          copyuser_power7.o string_64.o copypage_power7.o
+ifeq ($(CONFIG_GENERIC_CSUM),)
+obj-y                  += checksum_$(CONFIG_WORD_SIZE).o
+obj-$(CONFIG_PPC64)    += checksum_wrappers_64.o
+endif
+
+ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),)
+obj-$(CONFIG_PPC64)            += memcpy_power7.o memcpy_64.o 
+endif
+
 obj-$(CONFIG_PPC_EMULATE_SSTEP)        += sstep.o ldstfp.o
 
 ifeq ($(CONFIG_PPC64),y)
index d1f11795a7ad64bd6bd05beb522e07e44eb46498..e8e9c36dc7844455c4b24356cdff5f9ed9e70aff 100644 (file)
  */
 #include <asm/ppc_asm.h>
 
+#ifdef __BIG_ENDIAN__
+#define LVS(VRT,RA,RB)         lvsl    VRT,RA,RB
+#define VPERM(VRT,VRA,VRB,VRC) vperm   VRT,VRA,VRB,VRC
+#else
+#define LVS(VRT,RA,RB)         lvsr    VRT,RA,RB
+#define VPERM(VRT,VRA,VRB,VRC) vperm   VRT,VRB,VRA,VRC
+#endif
+
        .macro err1
 100:
        .section __ex_table,"a"
@@ -552,13 +560,13 @@ err3;     stw     r7,4(r3)
        li      r10,32
        li      r11,48
 
-       lvsl    vr16,0,r4       /* Setup permute control vector */
+       LVS(vr16,0,r4)          /* Setup permute control vector */
 err3;  lvx     vr0,0,r4
        addi    r4,r4,16
 
        bf      cr7*4+3,5f
 err3;  lvx     vr1,r0,r4
-       vperm   vr8,vr0,vr1,vr16
+       VPERM(vr8,vr0,vr1,vr16)
        addi    r4,r4,16
 err3;  stvx    vr8,r0,r3
        addi    r3,r3,16
@@ -566,9 +574,9 @@ err3;       stvx    vr8,r0,r3
 
 5:     bf      cr7*4+2,6f
 err3;  lvx     vr1,r0,r4
-       vperm   vr8,vr0,vr1,vr16
+       VPERM(vr8,vr0,vr1,vr16)
 err3;  lvx     vr0,r4,r9
-       vperm   vr9,vr1,vr0,vr16
+       VPERM(vr9,vr1,vr0,vr16)
        addi    r4,r4,32
 err3;  stvx    vr8,r0,r3
 err3;  stvx    vr9,r3,r9
@@ -576,13 +584,13 @@ err3;     stvx    vr9,r3,r9
 
 6:     bf      cr7*4+1,7f
 err3;  lvx     vr3,r0,r4
-       vperm   vr8,vr0,vr3,vr16
+       VPERM(vr8,vr0,vr3,vr16)
 err3;  lvx     vr2,r4,r9
-       vperm   vr9,vr3,vr2,vr16
+       VPERM(vr9,vr3,vr2,vr16)
 err3;  lvx     vr1,r4,r10
-       vperm   vr10,vr2,vr1,vr16
+       VPERM(vr10,vr2,vr1,vr16)
 err3;  lvx     vr0,r4,r11
-       vperm   vr11,vr1,vr0,vr16
+       VPERM(vr11,vr1,vr0,vr16)
        addi    r4,r4,64
 err3;  stvx    vr8,r0,r3
 err3;  stvx    vr9,r3,r9
@@ -611,21 +619,21 @@ err3;     stvx    vr11,r3,r11
        .align  5
 8:
 err4;  lvx     vr7,r0,r4
-       vperm   vr8,vr0,vr7,vr16
+       VPERM(vr8,vr0,vr7,vr16)
 err4;  lvx     vr6,r4,r9
-       vperm   vr9,vr7,vr6,vr16
+       VPERM(vr9,vr7,vr6,vr16)
 err4;  lvx     vr5,r4,r10
-       vperm   vr10,vr6,vr5,vr16
+       VPERM(vr10,vr6,vr5,vr16)
 err4;  lvx     vr4,r4,r11
-       vperm   vr11,vr5,vr4,vr16
+       VPERM(vr11,vr5,vr4,vr16)
 err4;  lvx     vr3,r4,r12
-       vperm   vr12,vr4,vr3,vr16
+       VPERM(vr12,vr4,vr3,vr16)
 err4;  lvx     vr2,r4,r14
-       vperm   vr13,vr3,vr2,vr16
+       VPERM(vr13,vr3,vr2,vr16)
 err4;  lvx     vr1,r4,r15
-       vperm   vr14,vr2,vr1,vr16
+       VPERM(vr14,vr2,vr1,vr16)
 err4;  lvx     vr0,r4,r16
-       vperm   vr15,vr1,vr0,vr16
+       VPERM(vr15,vr1,vr0,vr16)
        addi    r4,r4,128
 err4;  stvx    vr8,r0,r3
 err4;  stvx    vr9,r3,r9
@@ -649,13 +657,13 @@ err4;     stvx    vr15,r3,r16
 
        bf      cr7*4+1,9f
 err3;  lvx     vr3,r0,r4
-       vperm   vr8,vr0,vr3,vr16
+       VPERM(vr8,vr0,vr3,vr16)
 err3;  lvx     vr2,r4,r9
-       vperm   vr9,vr3,vr2,vr16
+       VPERM(vr9,vr3,vr2,vr16)
 err3;  lvx     vr1,r4,r10
-       vperm   vr10,vr2,vr1,vr16
+       VPERM(vr10,vr2,vr1,vr16)
 err3;  lvx     vr0,r4,r11
-       vperm   vr11,vr1,vr0,vr16
+       VPERM(vr11,vr1,vr0,vr16)
        addi    r4,r4,64
 err3;  stvx    vr8,r0,r3
 err3;  stvx    vr9,r3,r9
@@ -665,9 +673,9 @@ err3;       stvx    vr11,r3,r11
 
 9:     bf      cr7*4+2,10f
 err3;  lvx     vr1,r0,r4
-       vperm   vr8,vr0,vr1,vr16
+       VPERM(vr8,vr0,vr1,vr16)
 err3;  lvx     vr0,r4,r9
-       vperm   vr9,vr1,vr0,vr16
+       VPERM(vr9,vr1,vr0,vr16)
        addi    r4,r4,32
 err3;  stvx    vr8,r0,r3
 err3;  stvx    vr9,r3,r9
@@ -675,7 +683,7 @@ err3;       stvx    vr9,r3,r9
 
 10:    bf      cr7*4+3,11f
 err3;  lvx     vr1,r0,r4
-       vperm   vr8,vr0,vr1,vr16
+       VPERM(vr8,vr0,vr1,vr16)
        addi    r4,r4,16
 err3;  stvx    vr8,r0,r3
        addi    r3,r3,16
index 0663630baf3b46373905d60d96e989fee6637aa8..e4177dbea6bd6a9e59e1cfc548195b1223b8eb0d 100644 (file)
 #include <asm/ppc_asm.h>
 
 _GLOBAL(memcpy_power7)
+
+#ifdef __BIG_ENDIAN__
+#define LVS(VRT,RA,RB)         lvsl    VRT,RA,RB
+#define VPERM(VRT,VRA,VRB,VRC) vperm   VRT,VRA,VRB,VRC
+#else
+#define LVS(VRT,RA,RB)         lvsr    VRT,RA,RB
+#define VPERM(VRT,VRA,VRB,VRC) vperm   VRT,VRB,VRA,VRC
+#endif
+
 #ifdef CONFIG_ALTIVEC
        cmpldi  r5,16
        cmpldi  cr1,r5,4096
@@ -485,13 +494,13 @@ _GLOBAL(memcpy_power7)
        li      r10,32
        li      r11,48
 
-       lvsl    vr16,0,r4       /* Setup permute control vector */
+       LVS(vr16,0,r4)          /* Setup permute control vector */
        lvx     vr0,0,r4
        addi    r4,r4,16
 
        bf      cr7*4+3,5f
        lvx     vr1,r0,r4
-       vperm   vr8,vr0,vr1,vr16
+       VPERM(vr8,vr0,vr1,vr16)
        addi    r4,r4,16
        stvx    vr8,r0,r3
        addi    r3,r3,16
@@ -499,9 +508,9 @@ _GLOBAL(memcpy_power7)
 
 5:     bf      cr7*4+2,6f
        lvx     vr1,r0,r4
-       vperm   vr8,vr0,vr1,vr16
+       VPERM(vr8,vr0,vr1,vr16)
        lvx     vr0,r4,r9
-       vperm   vr9,vr1,vr0,vr16
+       VPERM(vr9,vr1,vr0,vr16)
        addi    r4,r4,32
        stvx    vr8,r0,r3
        stvx    vr9,r3,r9
@@ -509,13 +518,13 @@ _GLOBAL(memcpy_power7)
 
 6:     bf      cr7*4+1,7f
        lvx     vr3,r0,r4
-       vperm   vr8,vr0,vr3,vr16
+       VPERM(vr8,vr0,vr3,vr16)
        lvx     vr2,r4,r9
-       vperm   vr9,vr3,vr2,vr16
+       VPERM(vr9,vr3,vr2,vr16)
        lvx     vr1,r4,r10
-       vperm   vr10,vr2,vr1,vr16
+       VPERM(vr10,vr2,vr1,vr16)
        lvx     vr0,r4,r11
-       vperm   vr11,vr1,vr0,vr16
+       VPERM(vr11,vr1,vr0,vr16)
        addi    r4,r4,64
        stvx    vr8,r0,r3
        stvx    vr9,r3,r9
@@ -544,21 +553,21 @@ _GLOBAL(memcpy_power7)
        .align  5
 8:
        lvx     vr7,r0,r4
-       vperm   vr8,vr0,vr7,vr16
+       VPERM(vr8,vr0,vr7,vr16)
        lvx     vr6,r4,r9
-       vperm   vr9,vr7,vr6,vr16
+       VPERM(vr9,vr7,vr6,vr16)
        lvx     vr5,r4,r10
-       vperm   vr10,vr6,vr5,vr16
+       VPERM(vr10,vr6,vr5,vr16)
        lvx     vr4,r4,r11
-       vperm   vr11,vr5,vr4,vr16
+       VPERM(vr11,vr5,vr4,vr16)
        lvx     vr3,r4,r12
-       vperm   vr12,vr4,vr3,vr16
+       VPERM(vr12,vr4,vr3,vr16)
        lvx     vr2,r4,r14
-       vperm   vr13,vr3,vr2,vr16
+       VPERM(vr13,vr3,vr2,vr16)
        lvx     vr1,r4,r15
-       vperm   vr14,vr2,vr1,vr16
+       VPERM(vr14,vr2,vr1,vr16)
        lvx     vr0,r4,r16
-       vperm   vr15,vr1,vr0,vr16
+       VPERM(vr15,vr1,vr0,vr16)
        addi    r4,r4,128
        stvx    vr8,r0,r3
        stvx    vr9,r3,r9
@@ -582,13 +591,13 @@ _GLOBAL(memcpy_power7)
 
        bf      cr7*4+1,9f
        lvx     vr3,r0,r4
-       vperm   vr8,vr0,vr3,vr16
+       VPERM(vr8,vr0,vr3,vr16)
        lvx     vr2,r4,r9
-       vperm   vr9,vr3,vr2,vr16
+       VPERM(vr9,vr3,vr2,vr16)
        lvx     vr1,r4,r10
-       vperm   vr10,vr2,vr1,vr16
+       VPERM(vr10,vr2,vr1,vr16)
        lvx     vr0,r4,r11
-       vperm   vr11,vr1,vr0,vr16
+       VPERM(vr11,vr1,vr0,vr16)
        addi    r4,r4,64
        stvx    vr8,r0,r3
        stvx    vr9,r3,r9
@@ -598,9 +607,9 @@ _GLOBAL(memcpy_power7)
 
 9:     bf      cr7*4+2,10f
        lvx     vr1,r0,r4
-       vperm   vr8,vr0,vr1,vr16
+       VPERM(vr8,vr0,vr1,vr16)
        lvx     vr0,r4,r9
-       vperm   vr9,vr1,vr0,vr16
+       VPERM(vr9,vr1,vr0,vr16)
        addi    r4,r4,32
        stvx    vr8,r0,r3
        stvx    vr9,r3,r9
@@ -608,7 +617,7 @@ _GLOBAL(memcpy_power7)
 
 10:    bf      cr7*4+3,11f
        lvx     vr1,r0,r4
-       vperm   vr8,vr0,vr1,vr16
+       VPERM(vr8,vr0,vr1,vr16)
        addi    r4,r4,16
        stvx    vr8,r0,r3
        addi    r3,r3,16
index c33d939120c970f3f800b42b5dfa3739e28ac16f..3ea26c25590be1dabe4a057882f35b77f5dfe7c1 100644 (file)
 #define DBG_LOW(fmt...)
 #endif
 
+#ifdef __BIG_ENDIAN__
 #define HPTE_LOCK_BIT 3
+#else
+#define HPTE_LOCK_BIT (56+3)
+#endif
 
 DEFINE_RAW_SPINLOCK(native_tlbie_lock);
 
@@ -172,7 +176,7 @@ static inline void tlbie(unsigned long vpn, int psize, int apsize,
 
 static inline void native_lock_hpte(struct hash_pte *hptep)
 {
-       unsigned long *word = &hptep->v;
+       unsigned long *word = (unsigned long *)&hptep->v;
 
        while (1) {
                if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
@@ -184,7 +188,7 @@ static inline void native_lock_hpte(struct hash_pte *hptep)
 
 static inline void native_unlock_hpte(struct hash_pte *hptep)
 {
-       unsigned long *word = &hptep->v;
+       unsigned long *word = (unsigned long *)&hptep->v;
 
        clear_bit_unlock(HPTE_LOCK_BIT, word);
 }
@@ -204,10 +208,10 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
        }
 
        for (i = 0; i < HPTES_PER_GROUP; i++) {
-               if (! (hptep->v & HPTE_V_VALID)) {
+               if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
                        /* retry with lock held */
                        native_lock_hpte(hptep);
-                       if (! (hptep->v & HPTE_V_VALID))
+                       if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
                                break;
                        native_unlock_hpte(hptep);
                }
@@ -226,14 +230,14 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
                        i, hpte_v, hpte_r);
        }
 
-       hptep->r = hpte_r;
+       hptep->r = cpu_to_be64(hpte_r);
        /* Guarantee the second dword is visible before the valid bit */
        eieio();
        /*
         * Now set the first dword including the valid bit
         * NOTE: this also unlocks the hpte
         */
-       hptep->v = hpte_v;
+       hptep->v = cpu_to_be64(hpte_v);
 
        __asm__ __volatile__ ("ptesync" : : : "memory");
 
@@ -254,12 +258,12 @@ static long native_hpte_remove(unsigned long hpte_group)
 
        for (i = 0; i < HPTES_PER_GROUP; i++) {
                hptep = htab_address + hpte_group + slot_offset;
-               hpte_v = hptep->v;
+               hpte_v = be64_to_cpu(hptep->v);
 
                if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
                        /* retry with lock held */
                        native_lock_hpte(hptep);
-                       hpte_v = hptep->v;
+                       hpte_v = be64_to_cpu(hptep->v);
                        if ((hpte_v & HPTE_V_VALID)
                            && !(hpte_v & HPTE_V_BOLTED))
                                break;
@@ -294,7 +298,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
 
        native_lock_hpte(hptep);
 
-       hpte_v = hptep->v;
+       hpte_v = be64_to_cpu(hptep->v);
        /*
         * We need to invalidate the TLB always because hpte_remove doesn't do
         * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
@@ -308,8 +312,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
        } else {
                DBG_LOW(" -> hit\n");
                /* Update the HPTE */
-               hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
-                       (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
+               hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PP | HPTE_R_N)) |
+                       (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)));
        }
        native_unlock_hpte(hptep);
 
@@ -334,7 +338,7 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
        slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
        for (i = 0; i < HPTES_PER_GROUP; i++) {
                hptep = htab_address + slot;
-               hpte_v = hptep->v;
+               hpte_v = be64_to_cpu(hptep->v);
 
                if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
                        /* HPTE matches */
@@ -369,8 +373,9 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
        hptep = htab_address + slot;
 
        /* Update the HPTE */
-       hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
-               (newpp & (HPTE_R_PP | HPTE_R_N));
+       hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
+                       ~(HPTE_R_PP | HPTE_R_N)) |
+               (newpp & (HPTE_R_PP | HPTE_R_N)));
        /*
         * Ensure it is out of the tlb too. Bolted entries base and
         * actual page size will be same.
@@ -392,7 +397,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
 
        want_v = hpte_encode_avpn(vpn, bpsize, ssize);
        native_lock_hpte(hptep);
-       hpte_v = hptep->v;
+       hpte_v = be64_to_cpu(hptep->v);
 
        /*
         * We need to invalidate the TLB always because hpte_remove doesn't do
@@ -458,7 +463,7 @@ static void native_hugepage_invalidate(struct mm_struct *mm,
                hptep = htab_address + slot;
                want_v = hpte_encode_avpn(vpn, psize, ssize);
                native_lock_hpte(hptep);
-               hpte_v = hptep->v;
+               hpte_v = be64_to_cpu(hptep->v);
 
                /* Even if we miss, we need to invalidate the TLB */
                if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
@@ -519,11 +524,12 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
                        int *psize, int *apsize, int *ssize, unsigned long *vpn)
 {
        unsigned long avpn, pteg, vpi;
-       unsigned long hpte_v = hpte->v;
+       unsigned long hpte_v = be64_to_cpu(hpte->v);
+       unsigned long hpte_r = be64_to_cpu(hpte->r);
        unsigned long vsid, seg_off;
        int size, a_size, shift;
        /* Look at the 8 bit LP value */
-       unsigned int lp = (hpte->r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
+       unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
 
        if (!(hpte_v & HPTE_V_LARGE)) {
                size   = MMU_PAGE_4K;
@@ -612,7 +618,7 @@ static void native_hpte_clear(void)
                 * running,  right?  and for crash dump, we probably
                 * don't want to wait for a maybe bad cpu.
                 */
-               hpte_v = hptep->v;
+               hpte_v = be64_to_cpu(hptep->v);
 
                /*
                 * Call __tlbie() here rather than tlbie() since we
@@ -664,7 +670,7 @@ static void native_flush_hash_range(unsigned long number, int local)
                        hptep = htab_address + slot;
                        want_v = hpte_encode_avpn(vpn, psize, ssize);
                        native_lock_hpte(hptep);
-                       hpte_v = hptep->v;
+                       hpte_v = be64_to_cpu(hptep->v);
                        if (!HPTE_V_COMPARE(hpte_v, want_v) ||
                            !(hpte_v & HPTE_V_VALID))
                                native_unlock_hpte(hptep);
index bde8b55897551a60b15ad6017c7910a6cd3278b8..6176b3cdf57991590df2b26f42f27eed593096bb 100644 (file)
@@ -251,19 +251,18 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
                                         void *data)
 {
        char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-       u32 *prop;
+       __be32 *prop;
        unsigned long size = 0;
 
        /* We are scanning "cpu" nodes only */
        if (type == NULL || strcmp(type, "cpu") != 0)
                return 0;
 
-       prop = (u32 *)of_get_flat_dt_prop(node, "ibm,processor-segment-sizes",
-                                         &size);
+       prop = of_get_flat_dt_prop(node, "ibm,processor-segment-sizes", &size);
        if (prop == NULL)
                return 0;
        for (; size >= 4; size -= 4, ++prop) {
-               if (prop[0] == 40) {
+               if (be32_to_cpu(prop[0]) == 40) {
                        DBG("1T segment support detected\n");
                        cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT;
                        return 1;
@@ -307,23 +306,22 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
                                          void *data)
 {
        char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-       u32 *prop;
+       __be32 *prop;
        unsigned long size = 0;
 
        /* We are scanning "cpu" nodes only */
        if (type == NULL || strcmp(type, "cpu") != 0)
                return 0;
 
-       prop = (u32 *)of_get_flat_dt_prop(node,
-                                         "ibm,segment-page-sizes", &size);
+       prop = of_get_flat_dt_prop(node, "ibm,segment-page-sizes", &size);
        if (prop != NULL) {
                pr_info("Page sizes from device-tree:\n");
                size /= 4;
                cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE);
                while(size > 0) {
-                       unsigned int base_shift = prop[0];
-                       unsigned int slbenc = prop[1];
-                       unsigned int lpnum = prop[2];
+                       unsigned int base_shift = be32_to_cpu(prop[0]);
+                       unsigned int slbenc = be32_to_cpu(prop[1]);
+                       unsigned int lpnum = be32_to_cpu(prop[2]);
                        struct mmu_psize_def *def;
                        int idx, base_idx;
 
@@ -356,8 +354,8 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
                                def->tlbiel = 0;
 
                        while (size > 0 && lpnum) {
-                               unsigned int shift = prop[0];
-                               int penc  = prop[1];
+                               unsigned int shift = be32_to_cpu(prop[0]);
+                               int penc  = be32_to_cpu(prop[1]);
 
                                prop += 2; size -= 2;
                                lpnum--;
@@ -390,8 +388,8 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
                                        const char *uname, int depth,
                                        void *data) {
        char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-       unsigned long *addr_prop;
-       u32 *page_count_prop;
+       __be64 *addr_prop;
+       __be32 *page_count_prop;
        unsigned int expected_pages;
        long unsigned int phys_addr;
        long unsigned int block_size;
@@ -405,12 +403,12 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
        page_count_prop = of_get_flat_dt_prop(node, "ibm,expected#pages", NULL);
        if (page_count_prop == NULL)
                return 0;
-       expected_pages = (1 << page_count_prop[0]);
+       expected_pages = (1 << be32_to_cpu(page_count_prop[0]));
        addr_prop = of_get_flat_dt_prop(node, "reg", NULL);
        if (addr_prop == NULL)
                return 0;
-       phys_addr = addr_prop[0];
-       block_size = addr_prop[1];
+       phys_addr = be64_to_cpu(addr_prop[0]);
+       block_size = be64_to_cpu(addr_prop[1]);
        if (block_size != (16 * GB))
                return 0;
        printk(KERN_INFO "Huge page(16GB) memory: "
@@ -534,16 +532,16 @@ static int __init htab_dt_scan_pftsize(unsigned long node,
                                       void *data)
 {
        char *type = of_get_flat_dt_prop(node, "device_type", NULL);
-       u32 *prop;
+       __be32 *prop;
 
        /* We are scanning "cpu" nodes only */
        if (type == NULL || strcmp(type, "cpu") != 0)
                return 0;
 
-       prop = (u32 *)of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
+       prop = of_get_flat_dt_prop(node, "ibm,pft-size", NULL);
        if (prop != NULL) {
                /* pft_size[0] is the NUMA CEC cookie */
-               ppc64_pft_size = prop[1];
+               ppc64_pft_size = be32_to_cpu(prop[1]);
                return 1;
        }
        return 0;
index 8ed035d2edb5a51540d580e4b6a6a5a106b3a5f9..e3734edffa697ac42cf9d4563fb754b8602d3bcd 100644 (file)
@@ -304,5 +304,54 @@ void register_page_bootmem_memmap(unsigned long section_nr,
                                  struct page *start_page, unsigned long size)
 {
 }
-#endif /* CONFIG_SPARSEMEM_VMEMMAP */
 
+/*
+ * We do not have access to the sparsemem vmemmap, so we fallback to
+ * walking the list of sparsemem blocks which we already maintain for
+ * the sake of crashdump. In the long run, we might want to maintain
+ * a tree if performance of that linear walk becomes a problem.
+ *
+ * realmode_pfn_to_page functions can fail due to:
+ * 1) As real sparsemem blocks do not lay in RAM continously (they
+ * are in virtual address space which is not available in the real mode),
+ * the requested page struct can be split between blocks so get_page/put_page
+ * may fail.
+ * 2) When huge pages are used, the get_page/put_page API will fail
+ * in real mode as the linked addresses in the page struct are virtual
+ * too.
+ */
+struct page *realmode_pfn_to_page(unsigned long pfn)
+{
+       struct vmemmap_backing *vmem_back;
+       struct page *page;
+       unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
+       unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
+
+       for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
+               if (pg_va < vmem_back->virt_addr)
+                       continue;
+
+               /* Check that page struct is not split between real pages */
+               if ((pg_va + sizeof(struct page)) >
+                               (vmem_back->virt_addr + page_size))
+                       return NULL;
+
+               page = (struct page *) (vmem_back->phys + pg_va -
+                               vmem_back->virt_addr);
+               return page;
+       }
+
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
+
+#elif defined(CONFIG_FLATMEM)
+
+struct page *realmode_pfn_to_page(unsigned long pfn)
+{
+       struct page *page = pfn_to_page(pfn);
+       return page;
+}
+EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
+
+#endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
index bf56e33f8257f68717b241c98abefb2f925f2858..2345bdb4d91784bb2bbdd8ce05d9c3566e48f309 100644 (file)
@@ -691,4 +691,5 @@ void bpf_jit_free(struct sk_filter *fp)
 {
        if (fp->bpf_func != sk_run_filter)
                module_free(NULL, fp->bpf_func);
+       kfree(fp);
 }
index a82a41b4fd917895631104627f6b05b333dff8fe..1a7b1d0f41df9bf4ec344b77bd76223526141777 100644 (file)
@@ -303,6 +303,9 @@ void __init mpc512x_setup_diu(void)
        diu_ops.release_bootmem         = mpc512x_release_bootmem;
 }
 
+#else
+void __init mpc512x_setup_diu(void) { /* EMPTY */ }
+void __init mpc512x_init_diu(void) { /* EMPTY */ }
 #endif
 
 void __init mpc512x_init_IRQ(void)
index b69221ba07fd21868dc7c1892384969fb6c03d53..2898b737deb79e5015c7658568a696254d6f23b5 100644 (file)
@@ -340,7 +340,7 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
 {
        int l1irq;
        int l2irq;
-       struct irq_chip *irqchip;
+       struct irq_chip *uninitialized_var(irqchip);
        void *hndlr;
        int type;
        u32 reg;
@@ -373,9 +373,8 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
        case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break;
        case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break;
        case MPC52xx_IRQ_L1_CRIT:
-       default:
                pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n",
-                       __func__, l1irq);
+                       __func__, l2irq);
                irq_set_chip(virq, &no_irq_chip);
                return 0;
        }
index 8d21ab70e06c4aec26f55f67fc4d0f1574811970..ef0778a0ca8f996ef0b8d6c86a032c6eb05899f7 100644 (file)
@@ -48,7 +48,7 @@ struct cpm_pin {
        int port, pin, flags;
 };
 
-static struct __initdata cpm_pin tqm8xx_pins[] = {
+static struct cpm_pin tqm8xx_pins[] __initdata = {
        /* SMC1 */
        {CPM_PORTB, 24, CPM_PIN_INPUT}, /* RX */
        {CPM_PORTB, 25, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */
@@ -63,7 +63,7 @@ static struct __initdata cpm_pin tqm8xx_pins[] = {
        {CPM_PORTC, 11, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_GPIO},
 };
 
-static struct __initdata cpm_pin tqm8xx_fec_pins[] = {
+static struct cpm_pin tqm8xx_fec_pins[] __initdata = {
        /* MII */
        {CPM_PORTD, 3, CPM_PIN_OUTPUT},
        {CPM_PORTD, 4, CPM_PIN_OUTPUT},
index 6fae5eb99ea6febff2fdd465fcf19c64d9114643..9fced3f6d2dcfadbf0f26c71d57693d79b1c78ba 100644 (file)
@@ -9,6 +9,8 @@ config PPC_POWERNV
        select EPAPR_BOOT
        select PPC_INDIRECT_PIO
        select PPC_UDBG_16550
+       select PPC_SCOM
+       select ARCH_RANDOM
        default y
 
 config POWERNV_MSI
index 300c437d713cf1a6b4c73d2b2bb58830483219fe..050d57e0c78811e2c1276ec90ead89a827d1fc8c 100644 (file)
@@ -1,6 +1,7 @@
 obj-y                  += setup.o opal-takeover.o opal-wrappers.o opal.o
-obj-y                  += opal-rtc.o opal-nvram.o opal-lpc.o
+obj-y                  += opal-rtc.o opal-nvram.o opal-lpc.o rng.o
 
 obj-$(CONFIG_SMP)      += smp.o
 obj-$(CONFIG_PCI)      += pci.o pci-p5ioc2.o pci-ioda.o
 obj-$(CONFIG_EEH)      += eeh-ioda.o eeh-powernv.o
+obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
index cf42e74514fa192e3a8d64ed0d41ada445a974e3..02245cee78183852d52f3a907023dcd9a63529bf 100644 (file)
@@ -59,26 +59,60 @@ static struct notifier_block ioda_eeh_nb = {
 };
 
 #ifdef CONFIG_DEBUG_FS
-static int ioda_eeh_dbgfs_set(void *data, u64 val)
+static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val)
 {
        struct pci_controller *hose = data;
        struct pnv_phb *phb = hose->private_data;
 
-       out_be64(phb->regs + 0xD10, val);
+       out_be64(phb->regs + offset, val);
        return 0;
 }
 
-static int ioda_eeh_dbgfs_get(void *data, u64 *val)
+static int ioda_eeh_dbgfs_get(void *data, int offset, u64 *val)
 {
        struct pci_controller *hose = data;
        struct pnv_phb *phb = hose->private_data;
 
-       *val = in_be64(phb->regs + 0xD10);
+       *val = in_be64(phb->regs + offset);
        return 0;
 }
 
-DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_dbgfs_ops, ioda_eeh_dbgfs_get,
-                       ioda_eeh_dbgfs_set, "0x%llx\n");
+static int ioda_eeh_outb_dbgfs_set(void *data, u64 val)
+{
+       return ioda_eeh_dbgfs_set(data, 0xD10, val);
+}
+
+static int ioda_eeh_outb_dbgfs_get(void *data, u64 *val)
+{
+       return ioda_eeh_dbgfs_get(data, 0xD10, val);
+}
+
+static int ioda_eeh_inbA_dbgfs_set(void *data, u64 val)
+{
+       return ioda_eeh_dbgfs_set(data, 0xD90, val);
+}
+
+static int ioda_eeh_inbA_dbgfs_get(void *data, u64 *val)
+{
+       return ioda_eeh_dbgfs_get(data, 0xD90, val);
+}
+
+static int ioda_eeh_inbB_dbgfs_set(void *data, u64 val)
+{
+       return ioda_eeh_dbgfs_set(data, 0xE10, val);
+}
+
+static int ioda_eeh_inbB_dbgfs_get(void *data, u64 *val)
+{
+       return ioda_eeh_dbgfs_get(data, 0xE10, val);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_outb_dbgfs_ops, ioda_eeh_outb_dbgfs_get,
+                       ioda_eeh_outb_dbgfs_set, "0x%llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbA_dbgfs_ops, ioda_eeh_inbA_dbgfs_get,
+                       ioda_eeh_inbA_dbgfs_set, "0x%llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get,
+                       ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
 #endif /* CONFIG_DEBUG_FS */
 
 /**
@@ -106,27 +140,30 @@ static int ioda_eeh_post_init(struct pci_controller *hose)
                ioda_eeh_nb_init = 1;
        }
 
-       /* FIXME: Enable it for PHB3 later */
-       if (phb->type == PNV_PHB_IODA1) {
+       /* We needn't HUB diag-data on PHB3 */
+       if (phb->type == PNV_PHB_IODA1 && !hub_diag) {
+               hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
                if (!hub_diag) {
-                       hub_diag = (char *)__get_free_page(GFP_KERNEL |
-                                                          __GFP_ZERO);
-                       if (!hub_diag) {
-                               pr_err("%s: Out of memory !\n",
-                                      __func__);
-                               return -ENOMEM;
-                       }
+                       pr_err("%s: Out of memory !\n", __func__);
+                       return -ENOMEM;
                }
+       }
 
 #ifdef CONFIG_DEBUG_FS
-               if (phb->dbgfs)
-                       debugfs_create_file("err_injct", 0600,
-                                           phb->dbgfs, hose,
-                                           &ioda_eeh_dbgfs_ops);
+       if (phb->dbgfs) {
+               debugfs_create_file("err_injct_outbound", 0600,
+                                   phb->dbgfs, hose,
+                                   &ioda_eeh_outb_dbgfs_ops);
+               debugfs_create_file("err_injct_inboundA", 0600,
+                                   phb->dbgfs, hose,
+                                   &ioda_eeh_inbA_dbgfs_ops);
+               debugfs_create_file("err_injct_inboundB", 0600,
+                                   phb->dbgfs, hose,
+                                   &ioda_eeh_inbB_dbgfs_ops);
+       }
 #endif
 
-               phb->eeh_state |= PNV_EEH_STATE_ENABLED;
-       }
+       phb->eeh_state |= PNV_EEH_STATE_ENABLED;
 
        return 0;
 }
@@ -546,8 +583,8 @@ static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
                        phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
        if (ret) {
                spin_unlock_irqrestore(&phb->lock, flags);
-               pr_warning("%s: Failed to get log for PHB#%x-PE#%x\n",
-                          __func__, hose->global_number, pe->addr);
+               pr_warning("%s: Can't get log for PHB#%x-PE#%x (%lld)\n",
+                          __func__, hose->global_number, pe->addr, ret);
                return -EIO;
        }
 
@@ -710,6 +747,73 @@ static void ioda_eeh_p7ioc_phb_diag(struct pci_controller *hose,
        }
 }
 
+static void ioda_eeh_phb3_phb_diag(struct pci_controller *hose,
+                                   struct OpalIoPhbErrorCommon *common)
+{
+       struct OpalIoPhb3ErrorData *data;
+       int i;
+
+       data = (struct OpalIoPhb3ErrorData*)common;
+       pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n\n",
+               hose->global_number, common->version);
+
+       pr_info("  brdgCtl:              %08x\n", data->brdgCtl);
+
+       pr_info("  portStatusReg:        %08x\n", data->portStatusReg);
+       pr_info("  rootCmplxStatus:      %08x\n", data->rootCmplxStatus);
+       pr_info("  busAgentStatus:       %08x\n", data->busAgentStatus);
+
+       pr_info("  deviceStatus:         %08x\n", data->deviceStatus);
+       pr_info("  slotStatus:           %08x\n", data->slotStatus);
+       pr_info("  linkStatus:           %08x\n", data->linkStatus);
+       pr_info("  devCmdStatus:         %08x\n", data->devCmdStatus);
+       pr_info("  devSecStatus:         %08x\n", data->devSecStatus);
+
+       pr_info("  rootErrorStatus:      %08x\n", data->rootErrorStatus);
+       pr_info("  uncorrErrorStatus:    %08x\n", data->uncorrErrorStatus);
+       pr_info("  corrErrorStatus:      %08x\n", data->corrErrorStatus);
+       pr_info("  tlpHdr1:              %08x\n", data->tlpHdr1);
+       pr_info("  tlpHdr2:              %08x\n", data->tlpHdr2);
+       pr_info("  tlpHdr3:              %08x\n", data->tlpHdr3);
+       pr_info("  tlpHdr4:              %08x\n", data->tlpHdr4);
+       pr_info("  sourceId:             %08x\n", data->sourceId);
+       pr_info("  errorClass:           %016llx\n", data->errorClass);
+       pr_info("  correlator:           %016llx\n", data->correlator);
+       pr_info("  nFir:                 %016llx\n", data->nFir);
+       pr_info("  nFirMask:             %016llx\n", data->nFirMask);
+       pr_info("  nFirWOF:              %016llx\n", data->nFirWOF);
+       pr_info("  PhbPlssr:             %016llx\n", data->phbPlssr);
+       pr_info("  PhbCsr:               %016llx\n", data->phbCsr);
+       pr_info("  lemFir:               %016llx\n", data->lemFir);
+       pr_info("  lemErrorMask:         %016llx\n", data->lemErrorMask);
+       pr_info("  lemWOF:               %016llx\n", data->lemWOF);
+       pr_info("  phbErrorStatus:       %016llx\n", data->phbErrorStatus);
+       pr_info("  phbFirstErrorStatus:  %016llx\n", data->phbFirstErrorStatus);
+       pr_info("  phbErrorLog0:         %016llx\n", data->phbErrorLog0);
+       pr_info("  phbErrorLog1:         %016llx\n", data->phbErrorLog1);
+       pr_info("  mmioErrorStatus:      %016llx\n", data->mmioErrorStatus);
+       pr_info("  mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
+       pr_info("  mmioErrorLog0:        %016llx\n", data->mmioErrorLog0);
+       pr_info("  mmioErrorLog1:        %016llx\n", data->mmioErrorLog1);
+       pr_info("  dma0ErrorStatus:      %016llx\n", data->dma0ErrorStatus);
+       pr_info("  dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
+       pr_info("  dma0ErrorLog0:        %016llx\n", data->dma0ErrorLog0);
+       pr_info("  dma0ErrorLog1:        %016llx\n", data->dma0ErrorLog1);
+       pr_info("  dma1ErrorStatus:      %016llx\n", data->dma1ErrorStatus);
+       pr_info("  dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
+       pr_info("  dma1ErrorLog0:        %016llx\n", data->dma1ErrorLog0);
+       pr_info("  dma1ErrorLog1:        %016llx\n", data->dma1ErrorLog1);
+
+       for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
+               if ((data->pestA[i] >> 63) == 0 &&
+                   (data->pestB[i] >> 63) == 0)
+                       continue;
+
+               pr_info("  PE[%3d] PESTA:        %016llx\n", i, data->pestA[i]);
+               pr_info("          PESTB:        %016llx\n", data->pestB[i]);
+       }
+}
+
 static void ioda_eeh_phb_diag(struct pci_controller *hose)
 {
        struct pnv_phb *phb = hose->private_data;
@@ -728,6 +832,9 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose)
        case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
                ioda_eeh_p7ioc_phb_diag(hose, common);
                break;
+       case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
+               ioda_eeh_phb3_phb_diag(hose, common);
+               break;
        default:
                pr_warning("%s: Unrecognized I/O chip %d\n",
                           __func__, common->ioType);
index 79663d26e6eaa1de8749c6994316737f991106ab..73b981438cc583e0ba4345129046d56b72a8b9c1 100644 (file)
@@ -144,11 +144,8 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
        /*
         * Enable EEH explicitly so that we will do EEH check
         * while accessing I/O stuff
-        *
-        * FIXME: Enable that for PHB3 later
         */
-       if (phb->type == PNV_PHB_IODA1)
-               eeh_subsystem_enabled = 1;
+       eeh_subsystem_enabled = 1;
 
        /* Save memory bars */
        eeh_save_bars(edev);
index 3f83e1ae26acb591fbf3614bd9afbb6ae4ffa3d3..acd9f7e96678256e2620b24494fded8e90b08ebb 100644 (file)
@@ -65,7 +65,7 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
 void __init opal_nvram_init(void)
 {
        struct device_node *np;
-       const u32 *nbytes_p;
+       const __be32 *nbytes_p;
 
        np = of_find_compatible_node(NULL, NULL, "ibm,opal-nvram");
        if (np == NULL)
@@ -76,7 +76,7 @@ void __init opal_nvram_init(void)
                of_node_put(np);
                return;
        }
-       nvram_size = *nbytes_p;
+       nvram_size = be32_to_cpup(nbytes_p);
 
        printk(KERN_INFO "OPAL nvram setup, %u bytes\n", nvram_size);
        of_node_put(np);
index 2aa7641aac9bd8080e2e42b3c0ed98ba7920c0ea..7d07c7e80ec09e9232b3cafd9c41a104992b5331 100644 (file)
@@ -37,10 +37,12 @@ unsigned long __init opal_get_boot_time(void)
        struct rtc_time tm;
        u32 y_m_d;
        u64 h_m_s_ms;
+       __be32 __y_m_d;
+       __be64 __h_m_s_ms;
        long rc = OPAL_BUSY;
 
        while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
-               rc = opal_rtc_read(&y_m_d, &h_m_s_ms);
+               rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
                if (rc == OPAL_BUSY_EVENT)
                        opal_poll_events(NULL);
                else
@@ -48,6 +50,8 @@ unsigned long __init opal_get_boot_time(void)
        }
        if (rc != OPAL_SUCCESS)
                return 0;
+       y_m_d = be32_to_cpu(__y_m_d);
+       h_m_s_ms = be64_to_cpu(__h_m_s_ms);
        opal_to_tm(y_m_d, h_m_s_ms, &tm);
        return mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
                      tm.tm_hour, tm.tm_min, tm.tm_sec);
@@ -58,9 +62,11 @@ void opal_get_rtc_time(struct rtc_time *tm)
        long rc = OPAL_BUSY;
        u32 y_m_d;
        u64 h_m_s_ms;
+       __be32 __y_m_d;
+       __be64 __h_m_s_ms;
 
        while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
-               rc = opal_rtc_read(&y_m_d, &h_m_s_ms);
+               rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
                if (rc == OPAL_BUSY_EVENT)
                        opal_poll_events(NULL);
                else
@@ -68,6 +74,8 @@ void opal_get_rtc_time(struct rtc_time *tm)
        }
        if (rc != OPAL_SUCCESS)
                return;
+       y_m_d = be32_to_cpu(__y_m_d);
+       h_m_s_ms = be64_to_cpu(__h_m_s_ms);
        opal_to_tm(y_m_d, h_m_s_ms, tm);
 }
 
index 8f3844535fbb2e4167ced361831e6c2aeab9a190..2a03e1e63c7a910c204486f32dc9957c4c6cd47e 100644 (file)
@@ -34,7 +34,7 @@
        mtmsrd  r12,1;                  \
        LOAD_REG_ADDR(r0,.opal_return); \
        mtlr    r0;                     \
-       li      r0,MSR_DR|MSR_IR;       \
+       li      r0,MSR_DR|MSR_IR|MSR_LE;\
        andc    r12,r12,r0;             \
        li      r0,token;               \
        mtspr   SPRN_HSRR1,r12;         \
        hrfid
 
 _STATIC(opal_return)
+       /*
+        * Fixup endian on OPAL return... we should be able to simplify
+        * this by instead converting the below trampoline to a set of
+        * bytes (always BE) since MSR:LE will end up fixed up as a side
+        * effect of the rfid.
+        */
+       FIXUP_ENDIAN
        ld      r2,PACATOC(r13);
        ld      r4,8(r1);
        ld      r5,16(r1);
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
new file mode 100644 (file)
index 0000000..3ed5c64
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * PowerNV LPC bus handling.
+ *
+ * Copyright 2013 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/bug.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+
+#include <asm/machdep.h>
+#include <asm/firmware.h>
+#include <asm/opal.h>
+#include <asm/scom.h>
+
+/*
+ * We could probably fit that inside the scom_map_t
+ * which is a void* after all but it's really too ugly
+ * so let's kmalloc it for now
+ */
+struct opal_scom_map {
+       uint32_t chip;
+       uint32_t addr;
+};
+
+static scom_map_t opal_scom_map(struct device_node *dev, u64 reg, u64 count)
+{
+       struct opal_scom_map *m;
+       const __be32 *gcid;
+
+       if (!of_get_property(dev, "scom-controller", NULL)) {
+               pr_err("%s: device %s is not a SCOM controller\n",
+                       __func__, dev->full_name);
+               return SCOM_MAP_INVALID;
+       }
+       gcid = of_get_property(dev, "ibm,chip-id", NULL);
+       if (!gcid) {
+               pr_err("%s: device %s has no ibm,chip-id\n",
+                       __func__, dev->full_name);
+               return SCOM_MAP_INVALID;
+       }
+       m = kmalloc(sizeof(struct opal_scom_map), GFP_KERNEL);
+       if (!m)
+               return NULL;
+       m->chip = be32_to_cpup(gcid);
+       m->addr = reg;
+
+       return (scom_map_t)m;
+}
+
+static void opal_scom_unmap(scom_map_t map)
+{
+       kfree(map);
+}
+
+static int opal_xscom_err_xlate(int64_t rc)
+{
+       switch(rc) {
+       case 0:
+               return 0;
+       /* Add more translations if necessary */
+       default:
+               return -EIO;
+       }
+}
+
+static int opal_scom_read(scom_map_t map, u32 reg, u64 *value)
+{
+       struct opal_scom_map *m = map;
+       int64_t rc;
+
+       rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value));
+       return opal_xscom_err_xlate(rc);
+}
+
+static int opal_scom_write(scom_map_t map, u32 reg, u64 value)
+{
+       struct opal_scom_map *m = map;
+       int64_t rc;
+
+       rc = opal_xscom_write(m->chip, m->addr + reg, value);
+       return opal_xscom_err_xlate(rc);
+}
+
+static const struct scom_controller opal_scom_controller = {
+       .map    = opal_scom_map,
+       .unmap  = opal_scom_unmap,
+       .read   = opal_scom_read,
+       .write  = opal_scom_write
+};
+
+static int opal_xscom_init(void)
+{
+       if (firmware_has_feature(FW_FEATURE_OPALv3))
+               scom_init(&opal_scom_controller);
+       return 0;
+}
+arch_initcall(opal_xscom_init);
index 2911abe550f1d9182ce793f0ec4777b0c85295b7..09336f0c54c56e12954b454f6d562aac64508c50 100644 (file)
@@ -77,6 +77,7 @@ int __init early_init_dt_scan_opal(unsigned long node,
 
 static int __init opal_register_exception_handlers(void)
 {
+#ifdef __BIG_ENDIAN__
        u64 glue;
 
        if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
@@ -94,6 +95,7 @@ static int __init opal_register_exception_handlers(void)
                                        0, glue);
        glue += 128;
        opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
+#endif
 
        return 0;
 }
@@ -164,27 +166,28 @@ void opal_notifier_disable(void)
 
 int opal_get_chars(uint32_t vtermno, char *buf, int count)
 {
-       s64 len, rc;
-       u64 evt;
+       s64 rc;
+       __be64 evt, len;
 
        if (!opal.entry)
                return -ENODEV;
        opal_poll_events(&evt);
-       if ((evt & OPAL_EVENT_CONSOLE_INPUT) == 0)
+       if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0)
                return 0;
-       len = count;
-       rc = opal_console_read(vtermno, &len, buf);
+       len = cpu_to_be64(count);
+       rc = opal_console_read(vtermno, &len, buf);     
        if (rc == OPAL_SUCCESS)
-               return len;
+               return be64_to_cpu(len);
        return 0;
 }
 
 int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
 {
        int written = 0;
+       __be64 olen;
        s64 len, rc;
        unsigned long flags;
-       u64 evt;
+       __be64 evt;
 
        if (!opal.entry)
                return -ENODEV;
@@ -199,13 +202,14 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
         */
        spin_lock_irqsave(&opal_write_lock, flags);
        if (firmware_has_feature(FW_FEATURE_OPALv2)) {
-               rc = opal_console_write_buffer_space(vtermno, &len);
+               rc = opal_console_write_buffer_space(vtermno, &olen);
+               len = be64_to_cpu(olen);
                if (rc || len < total_len) {
                        spin_unlock_irqrestore(&opal_write_lock, flags);
                        /* Closed -> drop characters */
                        if (rc)
                                return total_len;
-                       opal_poll_events(&evt);
+                       opal_poll_events(NULL);
                        return -EAGAIN;
                }
        }
@@ -216,8 +220,9 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
        rc = OPAL_BUSY;
        while(total_len > 0 && (rc == OPAL_BUSY ||
                                rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
-               len = total_len;
-               rc = opal_console_write(vtermno, &len, data);
+               olen = cpu_to_be64(total_len);
+               rc = opal_console_write(vtermno, &olen, data);
+               len = be64_to_cpu(olen);
 
                /* Closed or other error drop */
                if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
@@ -237,7 +242,8 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
                 */
                do
                        opal_poll_events(&evt);
-               while(rc == OPAL_SUCCESS && (evt & OPAL_EVENT_CONSOLE_OUTPUT));
+               while(rc == OPAL_SUCCESS &&
+                       (be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT));
        }
        spin_unlock_irqrestore(&opal_write_lock, flags);
        return written;
@@ -360,7 +366,7 @@ int opal_machine_check(struct pt_regs *regs)
 
 static irqreturn_t opal_interrupt(int irq, void *data)
 {
-       uint64_t events;
+       __be64 events;
 
        opal_handle_interrupt(virq_to_hw(irq), &events);
 
@@ -372,7 +378,7 @@ static irqreturn_t opal_interrupt(int irq, void *data)
 static int __init opal_init(void)
 {
        struct device_node *np, *consoles;
-       const u32 *irqs;
+       const __be32 *irqs;
        int rc, i, irqlen;
 
        opal_node = of_find_node_by_path("/ibm,opal");
index 74a5a5773b1fbce0c31f567ef6536e75038fff98..c639af7d4826b595f8b28ded90497f677c5ff5fa 100644 (file)
@@ -70,6 +70,16 @@ define_pe_printk_level(pe_err, KERN_ERR);
 define_pe_printk_level(pe_warn, KERN_WARNING);
 define_pe_printk_level(pe_info, KERN_INFO);
 
+/*
+ * stdcix is only supposed to be used in hypervisor real mode as per
+ * the architecture spec
+ */
+static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
+{
+       __asm__ __volatile__("stdcix %0,0,%1"
+               : : "r" (val), "r" (paddr) : "memory");
+}
+
 static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
 {
        unsigned long pe;
@@ -454,10 +464,13 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
        }
 }
 
-static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
-                                        u64 *startp, u64 *endp)
+static void pnv_pci_ioda1_tce_invalidate(struct pnv_ioda_pe *pe,
+                                        struct iommu_table *tbl,
+                                        __be64 *startp, __be64 *endp, bool rm)
 {
-       u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
+       __be64 __iomem *invalidate = rm ?
+               (__be64 __iomem *)pe->tce_inval_reg_phys :
+               (__be64 __iomem *)tbl->it_index;
        unsigned long start, end, inc;
 
        start = __pa(startp);
@@ -484,7 +497,10 @@ static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
 
         mb(); /* Ensure above stores are visible */
         while (start <= end) {
-                __raw_writeq(start, invalidate);
+               if (rm)
+                       __raw_rm_writeq(cpu_to_be64(start), invalidate);
+               else
+                       __raw_writeq(cpu_to_be64(start), invalidate);
                 start += inc;
         }
 
@@ -496,10 +512,12 @@ static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
 
 static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
                                         struct iommu_table *tbl,
-                                        u64 *startp, u64 *endp)
+                                        __be64 *startp, __be64 *endp, bool rm)
 {
        unsigned long start, end, inc;
-       u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
+       __be64 __iomem *invalidate = rm ?
+               (__be64 __iomem *)pe->tce_inval_reg_phys :
+               (__be64 __iomem *)tbl->it_index;
 
        /* We'll invalidate DMA address in PE scope */
        start = 0x2ul << 60;
@@ -515,22 +533,25 @@ static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
        mb();
 
        while (start <= end) {
-               __raw_writeq(start, invalidate);
+               if (rm)
+                       __raw_rm_writeq(cpu_to_be64(start), invalidate);
+               else
+                       __raw_writeq(cpu_to_be64(start), invalidate);
                start += inc;
        }
 }
 
 void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
-                                u64 *startp, u64 *endp)
+                                __be64 *startp, __be64 *endp, bool rm)
 {
        struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
                                              tce32_table);
        struct pnv_phb *phb = pe->phb;
 
        if (phb->type == PNV_PHB_IODA1)
-               pnv_pci_ioda1_tce_invalidate(tbl, startp, endp);
+               pnv_pci_ioda1_tce_invalidate(pe, tbl, startp, endp, rm);
        else
-               pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp);
+               pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp, rm);
 }
 
 static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
@@ -603,7 +624,9 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
                 * bus number, print that out instead.
                 */
                tbl->it_busno = 0;
-               tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
+               pe->tce_inval_reg_phys = be64_to_cpup(swinvp);
+               tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys,
+                               8);
                tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE |
                               TCE_PCI_SWINV_PAIR;
        }
@@ -681,7 +704,9 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
                 * bus number, print that out instead.
                 */
                tbl->it_busno = 0;
-               tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
+               pe->tce_inval_reg_phys = be64_to_cpup(swinvp);
+               tbl->it_index = (unsigned long)ioremap(pe->tce_inval_reg_phys,
+                               8);
                tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
        }
        iommu_init_table(tbl, phb->hose->node);
@@ -786,8 +811,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
        struct irq_data *idata;
        struct irq_chip *ichip;
        unsigned int xive_num = hwirq - phb->msi_base;
-       uint64_t addr64;
-       uint32_t addr32, data;
+       __be32 data;
        int rc;
 
        /* No PE assigned ? bail out ... no MSI for you ! */
@@ -811,6 +835,8 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
        }
 
        if (is_64) {
+               __be64 addr64;
+
                rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
                                     &addr64, &data);
                if (rc) {
@@ -818,9 +844,11 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
                                pci_name(dev), rc);
                        return -EIO;
                }
-               msg->address_hi = addr64 >> 32;
-               msg->address_lo = addr64 & 0xfffffffful;
+               msg->address_hi = be64_to_cpu(addr64) >> 32;
+               msg->address_lo = be64_to_cpu(addr64) & 0xfffffffful;
        } else {
+               __be32 addr32;
+
                rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
                                     &addr32, &data);
                if (rc) {
@@ -829,9 +857,9 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
                        return -EIO;
                }
                msg->address_hi = 0;
-               msg->address_lo = addr32;
+               msg->address_lo = be32_to_cpu(addr32);
        }
-       msg->data = data;
+       msg->data = be32_to_cpu(data);
 
        /*
         * Change the IRQ chip for the MSI interrupts on PHB3.
@@ -1106,8 +1134,8 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
        struct pci_controller *hose;
        struct pnv_phb *phb;
        unsigned long size, m32map_off, iomap_off, pemap_off;
-       const u64 *prop64;
-       const u32 *prop32;
+       const __be64 *prop64;
+       const __be32 *prop32;
        int len;
        u64 phb_id;
        void *aux;
@@ -1142,8 +1170,8 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
        spin_lock_init(&phb->lock);
        prop32 = of_get_property(np, "bus-range", &len);
        if (prop32 && len == 8) {
-               hose->first_busno = prop32[0];
-               hose->last_busno = prop32[1];
+               hose->first_busno = be32_to_cpu(prop32[0]);
+               hose->last_busno = be32_to_cpu(prop32[1]);
        } else {
                pr_warn("  Broken <bus-range> on %s\n", np->full_name);
                hose->first_busno = 0;
@@ -1175,7 +1203,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
        if (!prop32)
                phb->ioda.total_pe = 1;
        else
-               phb->ioda.total_pe = *prop32;
+               phb->ioda.total_pe = be32_to_cpup(prop32);
 
        phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
        /* FW Has already off top 64k of M32 space (MSI space) */
@@ -1285,7 +1313,7 @@ void __init pnv_pci_init_ioda2_phb(struct device_node *np)
 void __init pnv_pci_init_ioda_hub(struct device_node *np)
 {
        struct device_node *phbn;
-       const u64 *prop64;
+       const __be64 *prop64;
        u64 hub_id;
 
        pr_info("Probing IODA IO-Hub %s\n", np->full_name);
index b68db6325c1b2a222f6045d5d7d5813c1c329a1a..f8b4bd8afb2e5da39cd8632c7a6a8eae54448aeb 100644 (file)
@@ -99,7 +99,7 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
                                           void *tce_mem, u64 tce_size)
 {
        struct pnv_phb *phb;
-       const u64 *prop64;
+       const __be64 *prop64;
        u64 phb_id;
        int64_t rc;
        static int primary = 1;
@@ -178,7 +178,7 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
 void __init pnv_pci_init_p5ioc2_hub(struct device_node *np)
 {
        struct device_node *phbn;
-       const u64 *prop64;
+       const __be64 *prop64;
        u64 hub_id;
        void *tce_mem;
        uint64_t tce_per_phb;
index a28d3b5e6393fa8b9d9d11bc81210ab9db13f38b..921ae673baf3e4ae86412fb3f3a932de030062e7 100644 (file)
@@ -236,7 +236,7 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb,
 {
        s64     rc;
        u8      fstate;
-       u16     pcierr;
+       __be16  pcierr;
        u32     pe_no;
 
        /*
@@ -283,16 +283,16 @@ int pnv_pci_cfg_read(struct device_node *dn,
                break;
        }
        case 2: {
-               u16 v16;
+               __be16 v16;
                rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
                                                   &v16);
-               *val = (rc == OPAL_SUCCESS) ? v16 : 0xffff;
+               *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
                break;
        }
        case 4: {
-               u32 v32;
+               __be32 v32;
                rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
-               *val = (rc == OPAL_SUCCESS) ? v32 : 0xffffffff;
+               *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
                break;
        }
        default:
@@ -401,10 +401,10 @@ struct pci_ops pnv_pci_ops = {
 
 static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
                         unsigned long uaddr, enum dma_data_direction direction,
-                        struct dma_attrs *attrs)
+                        struct dma_attrs *attrs, bool rm)
 {
        u64 proto_tce;
-       u64 *tcep, *tces;
+       __be64 *tcep, *tces;
        u64 rpn;
 
        proto_tce = TCE_PCI_READ; // Read allowed
@@ -412,33 +412,48 @@ static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
        if (direction != DMA_TO_DEVICE)
                proto_tce |= TCE_PCI_WRITE;
 
-       tces = tcep = ((u64 *)tbl->it_base) + index - tbl->it_offset;
+       tces = tcep = ((__be64 *)tbl->it_base) + index - tbl->it_offset;
        rpn = __pa(uaddr) >> TCE_SHIFT;
 
        while (npages--)
-               *(tcep++) = proto_tce | (rpn++ << TCE_RPN_SHIFT);
+               *(tcep++) = cpu_to_be64(proto_tce | (rpn++ << TCE_RPN_SHIFT));
 
        /* Some implementations won't cache invalid TCEs and thus may not
         * need that flush. We'll probably turn it_type into a bit mask
         * of flags if that becomes the case
         */
        if (tbl->it_type & TCE_PCI_SWINV_CREATE)
-               pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1);
+               pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, rm);
 
        return 0;
 }
 
-static void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
+static int pnv_tce_build_vm(struct iommu_table *tbl, long index, long npages,
+                           unsigned long uaddr,
+                           enum dma_data_direction direction,
+                           struct dma_attrs *attrs)
 {
-       u64 *tcep, *tces;
+       return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs,
+                       false);
+}
+
+static void pnv_tce_free(struct iommu_table *tbl, long index, long npages,
+               bool rm)
+{
+       __be64 *tcep, *tces;
 
-       tces = tcep = ((u64 *)tbl->it_base) + index - tbl->it_offset;
+       tces = tcep = ((__be64 *)tbl->it_base) + index - tbl->it_offset;
 
        while (npages--)
-               *(tcep++) = 0;
+               *(tcep++) = cpu_to_be64(0);
 
        if (tbl->it_type & TCE_PCI_SWINV_FREE)
-               pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1);
+               pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, rm);
+}
+
+static void pnv_tce_free_vm(struct iommu_table *tbl, long index, long npages)
+{
+       pnv_tce_free(tbl, index, npages, false);
 }
 
 static unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
@@ -446,6 +461,19 @@ static unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
        return ((u64 *)tbl->it_base)[index - tbl->it_offset];
 }
 
+static int pnv_tce_build_rm(struct iommu_table *tbl, long index, long npages,
+                           unsigned long uaddr,
+                           enum dma_data_direction direction,
+                           struct dma_attrs *attrs)
+{
+       return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs, true);
+}
+
+static void pnv_tce_free_rm(struct iommu_table *tbl, long index, long npages)
+{
+       pnv_tce_free(tbl, index, npages, true);
+}
+
 void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
                               void *tce_mem, u64 tce_size,
                               u64 dma_offset)
@@ -484,8 +512,8 @@ static struct iommu_table *pnv_pci_setup_bml_iommu(struct pci_controller *hose)
        swinvp = of_get_property(hose->dn, "linux,tce-sw-invalidate-info",
                                 NULL);
        if (swinvp) {
-               tbl->it_busno = swinvp[1];
-               tbl->it_index = (unsigned long)ioremap(swinvp[0], 8);
+               tbl->it_busno = be64_to_cpu(swinvp[1]);
+               tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
                tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
        }
        return tbl;
@@ -610,8 +638,10 @@ void __init pnv_pci_init(void)
 
        /* Configure IOMMU DMA hooks */
        ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup;
-       ppc_md.tce_build = pnv_tce_build;
-       ppc_md.tce_free = pnv_tce_free;
+       ppc_md.tce_build = pnv_tce_build_vm;
+       ppc_md.tce_free = pnv_tce_free_vm;
+       ppc_md.tce_build_rm = pnv_tce_build_rm;
+       ppc_md.tce_free_rm = pnv_tce_free_rm;
        ppc_md.tce_get = pnv_tce_get;
        ppc_md.pci_probe_mode = pnv_pci_probe_mode;
        set_pci_dma_ops(&dma_iommu_ops);
index d633c64e05a1ef9204924b48a132bb505cd5e0a8..64d3b12e5b6d2661d4729a61e26dfc3bbfd98513 100644 (file)
@@ -17,7 +17,7 @@ enum pnv_phb_model {
        PNV_PHB_MODEL_PHB3,
 };
 
-#define PNV_PCI_DIAG_BUF_SIZE  4096
+#define PNV_PCI_DIAG_BUF_SIZE  8192
 #define PNV_IODA_PE_DEV                (1 << 0)        /* PE has single PCI device     */
 #define PNV_IODA_PE_BUS                (1 << 1)        /* PE has primary PCI bus       */
 #define PNV_IODA_PE_BUS_ALL    (1 << 2)        /* PE has subordinate buses     */
@@ -52,6 +52,7 @@ struct pnv_ioda_pe {
        int                     tce32_seg;
        int                     tce32_segcount;
        struct iommu_table      tce32_table;
+       phys_addr_t             tce_inval_reg_phys;
 
        /* XXX TODO: Add support for additional 64-bit iommus */
 
@@ -193,6 +194,6 @@ extern void pnv_pci_init_p5ioc2_hub(struct device_node *np);
 extern void pnv_pci_init_ioda_hub(struct device_node *np);
 extern void pnv_pci_init_ioda2_phb(struct device_node *np);
 extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
-                                       u64 *startp, u64 *endp);
+                                       __be64 *startp, __be64 *endp, bool rm);
 
 #endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c
new file mode 100644 (file)
index 0000000..02db7d7
--- /dev/null
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2013, Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt)    "powernv-rng: " fmt
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <asm/archrandom.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+
+
+struct powernv_rng {
+       void __iomem *regs;
+       unsigned long mask;
+};
+
+static DEFINE_PER_CPU(struct powernv_rng *, powernv_rng);
+
+
+static unsigned long rng_whiten(struct powernv_rng *rng, unsigned long val)
+{
+       unsigned long parity;
+
+       /* Calculate the parity of the value */
+       asm ("popcntd %0,%1" : "=r" (parity) : "r" (val));
+
+       /* xor our value with the previous mask */
+       val ^= rng->mask;
+
+       /* update the mask based on the parity of this value */
+       rng->mask = (rng->mask << 1) | (parity & 1);
+
+       return val;
+}
+
+int powernv_get_random_long(unsigned long *v)
+{
+       struct powernv_rng *rng;
+
+       rng = get_cpu_var(powernv_rng);
+
+       *v = rng_whiten(rng, in_be64(rng->regs));
+
+       put_cpu_var(rng);
+
+       return 1;
+}
+EXPORT_SYMBOL_GPL(powernv_get_random_long);
+
+static __init void rng_init_per_cpu(struct powernv_rng *rng,
+                                   struct device_node *dn)
+{
+       int chip_id, cpu;
+
+       chip_id = of_get_ibm_chip_id(dn);
+       if (chip_id == -1)
+               pr_warn("No ibm,chip-id found for %s.\n", dn->full_name);
+
+       for_each_possible_cpu(cpu) {
+               if (per_cpu(powernv_rng, cpu) == NULL ||
+                   cpu_to_chip_id(cpu) == chip_id) {
+                       per_cpu(powernv_rng, cpu) = rng;
+               }
+       }
+}
+
+static __init int rng_create(struct device_node *dn)
+{
+       struct powernv_rng *rng;
+       unsigned long val;
+
+       rng = kzalloc(sizeof(*rng), GFP_KERNEL);
+       if (!rng)
+               return -ENOMEM;
+
+       rng->regs = of_iomap(dn, 0);
+       if (!rng->regs) {
+               kfree(rng);
+               return -ENXIO;
+       }
+
+       val = in_be64(rng->regs);
+       rng->mask = val;
+
+       rng_init_per_cpu(rng, dn);
+
+       pr_info_once("Registering arch random hook.\n");
+
+       ppc_md.get_random_long = powernv_get_random_long;
+
+       return 0;
+}
+
+static __init int rng_init(void)
+{
+       struct device_node *dn;
+       int rc;
+
+       for_each_compatible_node(dn, NULL, "ibm,power-rng") {
+               rc = rng_create(dn);
+               if (rc) {
+                       pr_err("Failed creating rng for %s (%d).\n",
+                               dn->full_name, rc);
+                       continue;
+               }
+
+               /* Create devices for hwrng driver */
+               of_platform_device_create(dn, NULL, NULL);
+       }
+
+       return 0;
+}
+subsys_initcall(rng_init);
index 6c61ec5ee914aa559bff1834692ab8a59b12e87d..fbccac9cd2dc393c2828ccacfd70e8018efcfb10 100644 (file)
@@ -3,7 +3,7 @@ ccflags-$(CONFIG_PPC_PSERIES_DEBUG)     += -DDEBUG
 
 obj-y                  := lpar.o hvCall.o nvram.o reconfig.o \
                           setup.o iommu.o event_sources.o ras.o \
-                          firmware.o power.o dlpar.o mobility.o
+                          firmware.o power.o dlpar.o mobility.o rng.o
 obj-$(CONFIG_SMP)      += smp.o
 obj-$(CONFIG_SCANLOG)  += scanlog.o
 obj-$(CONFIG_EEH)      += eeh_pseries.o
index 7cfdaae1721a925c1d7370515917e7d9af5cc553..a8fe5aa3d34fd545f87a31995153effd8a63521b 100644 (file)
@@ -404,46 +404,38 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
        unsigned long drc_index;
        int rc;
 
-       cpu_hotplug_driver_lock();
        rc = strict_strtoul(buf, 0, &drc_index);
-       if (rc) {
-               rc = -EINVAL;
-               goto out;
-       }
+       if (rc)
+               return -EINVAL;
 
        parent = of_find_node_by_path("/cpus");
-       if (!parent) {
-               rc = -ENODEV;
-               goto out;
-       }
+       if (!parent)
+               return -ENODEV;
 
        dn = dlpar_configure_connector(drc_index, parent);
-       if (!dn) {
-               rc = -EINVAL;
-               goto out;
-       }
+       if (!dn)
+               return -EINVAL;
 
        of_node_put(parent);
 
        rc = dlpar_acquire_drc(drc_index);
        if (rc) {
                dlpar_free_cc_nodes(dn);
-               rc = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
 
        rc = dlpar_attach_node(dn);
        if (rc) {
                dlpar_release_drc(drc_index);
                dlpar_free_cc_nodes(dn);
-               goto out;
+               return rc;
        }
 
        rc = dlpar_online_cpu(dn);
-out:
-       cpu_hotplug_driver_unlock();
+       if (rc)
+               return rc;
 
-       return rc ? rc : count;
+       return count;
 }
 
 static int dlpar_offline_cpu(struct device_node *dn)
@@ -516,30 +508,27 @@ static ssize_t dlpar_cpu_release(const char *buf, size_t count)
                return -EINVAL;
        }
 
-       cpu_hotplug_driver_lock();
        rc = dlpar_offline_cpu(dn);
        if (rc) {
                of_node_put(dn);
-               rc = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
 
        rc = dlpar_release_drc(*drc_index);
        if (rc) {
                of_node_put(dn);
-               goto out;
+               return rc;
        }
 
        rc = dlpar_detach_node(dn);
        if (rc) {
                dlpar_acquire_drc(*drc_index);
-               goto out;
+               return rc;
        }
 
        of_node_put(dn);
-out:
-       cpu_hotplug_driver_unlock();
-       return rc ? rc : count;
+
+       return count;
 }
 
 static int __init pseries_dlpar_init(void)
diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c
new file mode 100644 (file)
index 0000000..a702f1c
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2013, Michael Ellerman, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt)    "pseries-rng: " fmt
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <asm/archrandom.h>
+#include <asm/machdep.h>
+
+
+static int pseries_get_random_long(unsigned long *v)
+{
+       unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+       if (plpar_hcall(H_RANDOM, retbuf) == H_SUCCESS) {
+               *v = retbuf[0];
+               return 1;
+       }
+
+       return 0;
+}
+
+static __init int rng_init(void)
+{
+       struct device_node *dn;
+
+       dn = of_find_compatible_node(NULL, NULL, "ibm,random");
+       if (!dn)
+               return -ENODEV;
+
+       pr_info("Registering arch random hook.\n");
+
+       ppc_md.get_random_long = pseries_get_random_long;
+
+       return 0;
+}
+subsys_initcall(rng_init);
index b56b70aeb4971c6f68a9c0815bfcadce906947ac..268bc899c1f7168ca5827920010af0b53595c228 100644 (file)
@@ -116,7 +116,14 @@ static int a2_scom_ram(scom_map_t scom, int thread, u32 insn, int extmask)
 
        scom_write(scom, SCOM_RAMIC, cmd);
 
-       while (!((val = scom_read(scom, SCOM_RAMC)) & mask)) {
+       for (;;) {
+               if (scom_read(scom, SCOM_RAMC, &val) != 0) {
+                       pr_err("SCOM error on instruction 0x%08x, thread %d\n",
+                              insn, thread);
+                       return -1;
+               }
+               if (val & mask)
+                       break;
                pr_devel("Waiting on RAMC = 0x%llx\n", val);
                if (++n == 3) {
                        pr_err("RAMC timeout on instruction 0x%08x, thread %d\n",
@@ -151,9 +158,7 @@ static int a2_scom_getgpr(scom_map_t scom, int thread, int gpr, int alt,
        if (rc)
                return rc;
 
-       *out_gpr = scom_read(scom, SCOM_RAMD);
-
-       return 0;
+       return scom_read(scom, SCOM_RAMD, out_gpr);
 }
 
 static int a2_scom_getspr(scom_map_t scom, int thread, int spr, u64 *out_spr)
@@ -353,7 +358,10 @@ int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, struct device_node *np)
 
        pr_devel("Bringing up CPU%d using SCOM...\n", lcpu);
 
-       pccr0 = scom_read(scom, SCOM_PCCR0);
+       if (scom_read(scom, SCOM_PCCR0, &pccr0) != 0) {
+               printk(KERN_ERR "XSCOM failure readng PCCR0 on CPU%d\n", lcpu);
+               return -1;
+       }
        scom_write(scom, SCOM_PCCR0, pccr0 | SCOM_PCCR0_ENABLE_DEBUG |
                                     SCOM_PCCR0_ENABLE_RAM);
 
index 4052e2259f3016d904a707ff3e2524e1f2f58ac6..54172c4a8a641ea4fd3f577fc581f09cfb7e5c2c 100644 (file)
@@ -50,18 +50,22 @@ static void wsp_scom_unmap(scom_map_t map)
        iounmap((void *)map);
 }
 
-static u64 wsp_scom_read(scom_map_t map, u32 reg)
+static int wsp_scom_read(scom_map_t map, u32 reg, u64 *value)
 {
        u64 __iomem *addr = (u64 __iomem *)map;
 
-       return in_be64(addr + reg);
+       *value = in_be64(addr + reg);
+
+       return 0;
 }
 
-static void wsp_scom_write(scom_map_t map, u32 reg, u64 value)
+static int wsp_scom_write(scom_map_t map, u32 reg, u64 value)
 {
        u64 __iomem *addr = (u64 __iomem *)map;
 
-       return out_be64(addr + reg, value);
+       out_be64(addr + reg, value);
+
+       return 0;
 }
 
 static const struct scom_controller wsp_scom_controller = {
index d25cc96c21b82fda4dcb44872446ec9b206968c7..ddb6efe889144dd78e15e80150e7b8a75bbb2d89 100644 (file)
@@ -89,6 +89,7 @@ void wsp_halt(void)
        struct device_node *dn;
        struct device_node *mine;
        struct device_node *me;
+       int rc;
 
        me = of_get_cpu_node(smp_processor_id(), NULL);
        mine = scom_find_parent(me);
@@ -101,15 +102,15 @@ void wsp_halt(void)
 
                /* read-modify-write it so the HW probe does not get
                 * confused */
-               val = scom_read(m, 0);
-               val |= 1;
-               scom_write(m, 0, val);
+               rc = scom_read(m, 0, &val);
+               if (rc == 0)
+                       scom_write(m, 0, val | 1);
                scom_unmap(m);
        }
        m = scom_map(mine, 0, 1);
-       val = scom_read(m, 0);
-       val |= 1;
-       scom_write(m, 0, val);
+       rc = scom_read(m, 0, &val);
+       if (rc == 0)
+               scom_write(m, 0, val | 1);
        /* should never return */
        scom_unmap(m);
 }
index ab4cb54764721870b06e3b08ee42121fc7fb7ea4..13ec968be4c7a25ca79fc480a7aab26e514b3bb8 100644 (file)
@@ -28,7 +28,7 @@ config PPC_SCOM
 
 config SCOM_DEBUGFS
        bool "Expose SCOM controllers via debugfs"
-       depends on PPC_SCOM
+       depends on PPC_SCOM && DEBUG_FS
        default n
 
 config GE_FPGA
index ccfb50ddfe38f7242b071c6519aaa1eb43daf6a2..92e7258478d8eafcc7f3c7655446601ce4fa61d1 100644 (file)
@@ -45,7 +45,7 @@ static void quirk_fsl_pcie_header(struct pci_dev *dev)
        u8 hdr_type;
 
        /* if we aren't a PCIe don't bother */
-       if (!pci_find_capability(dev, PCI_CAP_ID_EXP))
+       if (!pci_is_pcie(dev))
                return;
 
        /* if we aren't in host mode don't bother */
index 1be54faf60dd8be75f9f149101733c4c1c92f299..bdcb8588e4926c42130935a0ef33d2d19cab4b99 100644 (file)
@@ -1088,8 +1088,14 @@ static int mpic_host_map(struct irq_domain *h, unsigned int virq,
         * is done here.
         */
        if (!mpic_is_ipi(mpic, hw) && (mpic->flags & MPIC_NO_RESET)) {
+               int cpu;
+
+               preempt_disable();
+               cpu = mpic_processor_id(mpic);
+               preempt_enable();
+
                mpic_set_vector(virq, hw);
-               mpic_set_destination(virq, mpic_processor_id(mpic));
+               mpic_set_destination(virq, cpu);
                mpic_irq_set_priority(virq, 8);
        }
 
index 9193e12df6951ed570d0558d8d361f99af117153..3963d995648a551e0dbe355833ff3b204d6980e7 100644 (file)
@@ -53,7 +53,7 @@ scom_map_t scom_map_device(struct device_node *dev, int index)
 {
        struct device_node *parent;
        unsigned int cells, size;
-       const u32 *prop;
+       const __be32 *prop, *sprop;
        u64 reg, cnt;
        scom_map_t ret;
 
@@ -62,12 +62,24 @@ scom_map_t scom_map_device(struct device_node *dev, int index)
        if (parent == NULL)
                return 0;
 
-       prop = of_get_property(parent, "#scom-cells", NULL);
-       cells = prop ? *prop : 1;
-
+       /*
+        * We support "scom-reg" properties for adding scom registers
+        * to a random device-tree node with an explicit scom-parent
+        *
+        * We also support the simple "reg" property if the device is
+        * a direct child of a scom controller.
+        *
+        * In case both exist, "scom-reg" takes precedence.
+        */
        prop = of_get_property(dev, "scom-reg", &size);
+       sprop = of_get_property(parent, "#scom-cells", NULL);
+       if (!prop && parent == dev->parent) {
+               prop = of_get_property(dev, "reg", &size);
+               sprop = of_get_property(parent, "#address-cells", NULL);
+       }
        if (!prop)
-               return 0;
+               return NULL;
+       cells = sprop ? be32_to_cpup(sprop) : 1;
        size >>= 2;
 
        if (index >= (size / (2*cells)))
@@ -137,8 +149,7 @@ static int scom_val_get(void *data, u64 *val)
        if (!scom_map_ok(ent->map))
                return -EFAULT;
 
-       *val = scom_read(ent->map, 0);
-       return 0;
+       return scom_read(ent->map, 0, val);
 }
 DEFINE_SIMPLE_ATTRIBUTE(scom_val_fops, scom_val_get, scom_val_set,
                        "0x%llx\n");
@@ -169,7 +180,7 @@ static int scom_debug_init_one(struct dentry *root, struct device_node *dn,
 
        debugfs_create_file("addr", 0600, dir, ent, &scom_addr_fops);
        debugfs_create_file("value", 0600, dir, ent, &scom_val_fops);
-       debugfs_create_blob("path", 0400, dir, &ent->blob);
+       debugfs_create_blob("devspec", 0400, dir, &ent->blob);
 
        return 0;
 }
@@ -185,8 +196,13 @@ static int scom_debug_init(void)
                return -1;
 
        i = rc = 0;
-       for_each_node_with_property(dn, "scom-controller")
-               rc |= scom_debug_init_one(root, dn, i++);
+       for_each_node_with_property(dn, "scom-controller") {
+               int id = of_get_ibm_chip_id(dn);
+               if (id == -1)
+                       id = i;
+               rc |= scom_debug_init_one(root, dn, id);
+               i++;
+       }
 
        return rc;
 }
index 39d72212655e3706ef5ca09dacd689156bc34399..3c6ee1b64e5d1eec2bf5d03679d537f1bc5c5d53 100644 (file)
@@ -112,6 +112,7 @@ static int ics_opal_set_affinity(struct irq_data *d,
                                 bool force)
 {
        unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+       __be16 oserver;
        int16_t server;
        int8_t priority;
        int64_t rc;
@@ -120,13 +121,13 @@ static int ics_opal_set_affinity(struct irq_data *d,
        if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
                return -1;
 
-       rc = opal_get_xive(hw_irq, &server, &priority);
+       rc = opal_get_xive(hw_irq, &oserver, &priority);
        if (rc != OPAL_SUCCESS) {
-               pr_err("%s: opal_set_xive(irq=%d [hw 0x%x] server=%x)"
-                      " error %lld\n",
-                      __func__, d->irq, hw_irq, server, rc);
+               pr_err("%s: opal_get_xive(irq=%d [hw 0x%x]) error %lld\n",
+                      __func__, d->irq, hw_irq, rc);
                return -1;
        }
+       server = be16_to_cpu(oserver);
 
        wanted_server = xics_get_irq_server(d->irq, cpumask, 1);
        if (wanted_server < 0) {
@@ -181,7 +182,7 @@ static int ics_opal_map(struct ics *ics, unsigned int virq)
 {
        unsigned int hw_irq = (unsigned int)virq_to_hw(virq);
        int64_t rc;
-       int16_t server;
+       __be16 server;
        int8_t priority;
 
        if (WARN_ON(hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS))
@@ -201,7 +202,7 @@ static int ics_opal_map(struct ics *ics, unsigned int virq)
 static void ics_opal_mask_unknown(struct ics *ics, unsigned long vec)
 {
        int64_t rc;
-       int16_t server;
+       __be16 server;
        int8_t priority;
 
        /* Check if HAL knows about this interrupt */
@@ -215,14 +216,14 @@ static void ics_opal_mask_unknown(struct ics *ics, unsigned long vec)
 static long ics_opal_get_server(struct ics *ics, unsigned long vec)
 {
        int64_t rc;
-       int16_t server;
+       __be16 server;
        int8_t priority;
 
        /* Check if HAL knows about this interrupt */
        rc = opal_get_xive(vec, &server, &priority);
        if (rc != OPAL_SUCCESS)
                return -1;
-       return ics_opal_unmangle_server(server);
+       return ics_opal_unmangle_server(be16_to_cpu(server));
 }
 
 int __init ics_opal_init(void)
index 7143793859fadf0cbc66c1b2119c5cbaee6368c8..a02177fb5ec1d25f9daaf54b108c7854ff870209 100644 (file)
@@ -99,6 +99,7 @@ config S390
        select CLONE_BACKWARDS2
        select GENERIC_CLOCKEVENTS
        select GENERIC_CPU_DEVICES if !SMP
+       select GENERIC_FIND_FIRST_BIT
        select GENERIC_SMP_IDLE_THREAD
        select GENERIC_TIME_VSYSCALL_OLD
        select HAVE_ALIGNED_STRUCT_PAGE if SLUB
index a7d68a467ce884d77a9c615db5fcba0cdb251e71..ecc9d4f73cc6a3479254e9e55955795772eef593 100644 (file)
@@ -35,13 +35,13 @@ endif
 
 export LD_BFD
 
-cflags-$(CONFIG_MARCH_G5)   += $(call cc-option,-march=g5)
-cflags-$(CONFIG_MARCH_Z900) += $(call cc-option,-march=z900)
-cflags-$(CONFIG_MARCH_Z990) += $(call cc-option,-march=z990)
-cflags-$(CONFIG_MARCH_Z9_109) += $(call cc-option,-march=z9-109)
-cflags-$(CONFIG_MARCH_Z10) += $(call cc-option,-march=z10)
-cflags-$(CONFIG_MARCH_Z196) += $(call cc-option,-march=z196)
-cflags-$(CONFIG_MARCH_ZEC12) += $(call cc-option,-march=zEC12)
+cflags-$(CONFIG_MARCH_G5)     += -march=g5
+cflags-$(CONFIG_MARCH_Z900)   += -march=z900
+cflags-$(CONFIG_MARCH_Z990)   += -march=z990
+cflags-$(CONFIG_MARCH_Z9_109) += -march=z9-109
+cflags-$(CONFIG_MARCH_Z10)    += -march=z10
+cflags-$(CONFIG_MARCH_Z196)   += -march=z196
+cflags-$(CONFIG_MARCH_ZEC12)  += -march=zEC12
 
 #KBUILD_IMAGE is necessary for make rpm
 KBUILD_IMAGE   :=arch/s390/boot/image
index 87a22092b68f8b152a7df862d4c47604e10edee5..603d2003cd9fed7563a9f61cde7dd28823080d2e 100644 (file)
@@ -204,7 +204,7 @@ static int
 appldata_timer_handler(ctl_table *ctl, int write,
                           void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       int len;
+       unsigned int len;
        char buf[2];
 
        if (!*lenp || *ppos) {
@@ -246,7 +246,8 @@ static int
 appldata_interval_handler(ctl_table *ctl, int write,
                           void __user *buffer, size_t *lenp, loff_t *ppos)
 {
-       int len, interval;
+       unsigned int len;
+       int interval;
        char buf[16];
 
        if (!*lenp || *ppos) {
@@ -290,7 +291,8 @@ appldata_generic_handler(ctl_table *ctl, int write,
                           void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        struct appldata_ops *ops = NULL, *tmp_ops;
-       int rc, len, found;
+       unsigned int len;
+       int rc, found;
        char buf[2];
        struct list_head *lh;
 
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
new file mode 100644 (file)
index 0000000..e0af2ee
--- /dev/null
@@ -0,0 +1,655 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_CGROUP=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_MARCH_Z9_109=y
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_PCI=y
+CONFIG_PCI_DEBUG=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_S390=y
+CONFIG_CHSC_SCH=y
+CONFIG_CRASH_DUMP=y
+CONFIG_ZFCPDUMP=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_HIBERNATION=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_VS_FTP=m
+CONFIG_IP_VS_PE_SIP=m
+CONFIG_NF_CONNTRACK_IPV4=m
+# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NET_SCTPPROBE=m
+CONFIG_RDS=m
+CONFIG_RDS_RDMA=m
+CONFIG_RDS_TCP=m
+CONFIG_RDS_DEBUG=y
+CONFIG_L2TP=m
+CONFIG_L2TP_DEBUGFS=m
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=m
+CONFIG_L2TP_ETH=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFB=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_CHOKE=m
+CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_PERF=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_CGROUP=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_CSUM=m
+CONFIG_DNS_RESOLVER=y
+CONFIG_BPF_JIT=y
+CONFIG_NET_PKTGEN=m
+CONFIG_NET_TCPPROBE=m
+CONFIG_DEVTMPFS=y
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_OSD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_XIP=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=y
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_ENCLOSURE=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_ISCSI_TCP=m
+CONFIG_LIBFCOE=m
+CONFIG_SCSI_DEBUG=m
+CONFIG_ZFCP=y
+CONFIG_SCSI_VIRTIO=m
+CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_SCSI_OSD_INITIATOR=m
+CONFIG_SCSI_OSD_ULD=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
+CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_DELAY=m
+CONFIG_DM_UEVENT=y
+CONFIG_DM_FLAKEY=m
+CONFIG_DM_VERITY=m
+CONFIG_DM_SWITCH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=m
+CONFIG_NLMON=m
+CONFIG_VHOST_NET=m
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+CONFIG_MLX4_EN=m
+# CONFIG_NET_VENDOR_NATSEMI is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+CONFIG_LEGACY_PTY_COUNT=0
+CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_RAW_DRIVER=m
+CONFIG_HANGCHECK_TIMER=m
+CONFIG_TN3270_FS=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_ZVM_WATCHDOG=m
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_INFINIBAND=m
+CONFIG_INFINIBAND_USER_ACCESS=m
+CONFIG_MLX4_INFINIBAND=m
+CONFIG_VIRTIO_BALLOON=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT2_FS_XIP=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_JBD_DEBUG=y
+CONFIG_JBD2_DEBUG=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_JFS_STATISTICS=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_XFS_RT=y
+CONFIG_XFS_DEBUG=y
+CONFIG_GFS2_FS=m
+CONFIG_OCFS2_FS=m
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_NILFS2_FS=m
+CONFIG_FANOTIFY=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_FSCACHE=m
+CONFIG_CACHEFILES=m
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_NTFS_FS=m
+CONFIG_NTFS_RW=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_ROMFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=m
+CONFIG_NFS_SWAP=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_V4_SECURITY_LABEL=y
+CONFIG_CIFS=m
+CONFIG_CIFS_STATS=y
+CONFIG_CIFS_STATS2=y
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_UPCALL=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_DLM=m
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_READABLE_ASM=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_SLUB_STATS=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_VM_RB=y
+CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
+CONFIG_DEBUG_PER_CPU_MAPS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_RT_MUTEX_TESTER=y
+CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_LOCK_STAT=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
+CONFIG_DEBUG_WRITECOUNT=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_DEBUG_CREDENTIALS=y
+CONFIG_PROVE_RCU=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=300
+CONFIG_NOTIFIER_ERROR_INJECTION=m
+CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
+CONFIG_PM_NOTIFIER_ERROR_INJECT=m
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAILSLAB=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAIL_MAKE_REQUEST=y
+CONFIG_FAIL_IO_TIMEOUT=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_LATENCYTOP=y
+CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_KPROBE_EVENT is not set
+CONFIG_LKDTM=m
+CONFIG_KPROBES_SANITY_TEST=y
+CONFIG_RBTREE_TEST=m
+CONFIG_INTERVAL_TREE_TEST=m
+CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_DMA_API_DEBUG=y
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_S390_PTDUMP=y
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_IMA=y
+CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_USER=m
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_ZLIB=y
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_ZCRYPT=m
+CONFIG_CRYPTO_SHA1_S390=m
+CONFIG_CRYPTO_SHA256_S390=m
+CONFIG_CRYPTO_SHA512_S390=m
+CONFIG_CRYPTO_DES_S390=m
+CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_GHASH_S390=m
+CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
+CONFIG_PUBLIC_KEY_ALGO_RSA=m
+CONFIG_X509_CERTIFICATE_PARSER=m
+CONFIG_CRC7=m
+CONFIG_CRC8=m
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_CORDIC=m
+CONFIG_CMM=m
+CONFIG_APPLDATA_BASE=y
+CONFIG_KVM=m
+CONFIG_KVM_S390_UCONTROL=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
new file mode 100644 (file)
index 0000000..b9f6b4c
--- /dev/null
@@ -0,0 +1,618 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_GCOV_KERNEL=y
+CONFIG_GCOV_PROFILE_ALL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_MARCH_Z9_109=y
+CONFIG_HZ_100=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_S390=y
+CONFIG_CHSC_SCH=y
+CONFIG_CRASH_DUMP=y
+CONFIG_ZFCPDUMP=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_HIBERNATION=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_VS_FTP=m
+CONFIG_IP_VS_PE_SIP=m
+CONFIG_NF_CONNTRACK_IPV4=m
+# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NET_SCTPPROBE=m
+CONFIG_RDS=m
+CONFIG_RDS_RDMA=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
+CONFIG_L2TP_DEBUGFS=m
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=m
+CONFIG_L2TP_ETH=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFB=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_CHOKE=m
+CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_PERF=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_CGROUP=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_CSUM=m
+CONFIG_DNS_RESOLVER=y
+CONFIG_BPF_JIT=y
+CONFIG_NET_PKTGEN=m
+CONFIG_NET_TCPPROBE=m
+CONFIG_DEVTMPFS=y
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_OSD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_XIP=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=y
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_ENCLOSURE=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_ISCSI_TCP=m
+CONFIG_LIBFCOE=m
+CONFIG_SCSI_DEBUG=m
+CONFIG_ZFCP=y
+CONFIG_SCSI_VIRTIO=m
+CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_SCSI_OSD_INITIATOR=m
+CONFIG_SCSI_OSD_ULD=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
+CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_DELAY=m
+CONFIG_DM_UEVENT=y
+CONFIG_DM_FLAKEY=m
+CONFIG_DM_VERITY=m
+CONFIG_DM_SWITCH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=m
+CONFIG_NLMON=m
+CONFIG_VHOST_NET=m
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+CONFIG_MLX4_EN=m
+# CONFIG_NET_VENDOR_NATSEMI is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+CONFIG_LEGACY_PTY_COUNT=0
+CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_RAW_DRIVER=m
+CONFIG_HANGCHECK_TIMER=m
+CONFIG_TN3270_FS=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_ZVM_WATCHDOG=m
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_INFINIBAND=m
+CONFIG_INFINIBAND_USER_ACCESS=m
+CONFIG_MLX4_INFINIBAND=m
+CONFIG_VIRTIO_BALLOON=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT2_FS_XIP=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_JBD_DEBUG=y
+CONFIG_JBD2_DEBUG=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_JFS_STATISTICS=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_XFS_RT=y
+CONFIG_GFS2_FS=m
+CONFIG_OCFS2_FS=m
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_NILFS2_FS=m
+CONFIG_FANOTIFY=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_FSCACHE=m
+CONFIG_CACHEFILES=m
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_NTFS_FS=m
+CONFIG_NTFS_RW=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_ROMFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=m
+CONFIG_NFS_SWAP=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_V4_SECURITY_LABEL=y
+CONFIG_CIFS=m
+CONFIG_CIFS_STATS=y
+CONFIG_CIFS_STATS2=y
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_UPCALL=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_DLM=m
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
+CONFIG_TIMER_STATS=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_NOTIFIER_ERROR_INJECTION=m
+CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
+CONFIG_PM_NOTIFIER_ERROR_INJECT=m
+CONFIG_LATENCYTOP=y
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_KPROBE_EVENT is not set
+CONFIG_LKDTM=m
+CONFIG_RBTREE_TEST=m
+CONFIG_INTERVAL_TREE_TEST=m
+CONFIG_ATOMIC64_SELFTEST=y
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_S390_PTDUMP=y
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_IMA=y
+CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_USER=m
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_ZLIB=y
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_ZCRYPT=m
+CONFIG_CRYPTO_SHA1_S390=m
+CONFIG_CRYPTO_SHA256_S390=m
+CONFIG_CRYPTO_SHA512_S390=m
+CONFIG_CRYPTO_DES_S390=m
+CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_GHASH_S390=m
+CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
+CONFIG_PUBLIC_KEY_ALGO_RSA=m
+CONFIG_X509_CERTIFICATE_PARSER=m
+CONFIG_CRC7=m
+CONFIG_CRC8=m
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_CORDIC=m
+CONFIG_CMM=m
+CONFIG_APPLDATA_BASE=y
+CONFIG_KVM=m
+CONFIG_KVM_S390_UCONTROL=y
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
new file mode 100644 (file)
index 0000000..91087b4
--- /dev/null
@@ -0,0 +1,610 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_BLK_DEV_THROTTLING=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_MARCH_Z9_109=y
+CONFIG_HZ_100=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_S390=y
+CONFIG_CHSC_SCH=y
+CONFIG_CRASH_DUMP=y
+CONFIG_ZFCPDUMP=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_HIBERNATION=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=m
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=m
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_NET_IPIP=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=m
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_DIAG=m
+CONFIG_INET_UDP_DIAG=m
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_LP=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_YEAH=m
+CONFIG_TCP_CONG_ILLINOIS=m
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_TRANSPORT=m
+CONFIG_INET6_XFRM_MODE_TUNNEL=m
+CONFIG_INET6_XFRM_MODE_BEET=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_SIT=m
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_NETBIOS_NS=m
+CONFIG_NF_CONNTRACK_SNMP=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_SET=m
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_IPVS=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_SET=m
+CONFIG_IP_SET_BITMAP_IP=m
+CONFIG_IP_SET_BITMAP_IPMAC=m
+CONFIG_IP_SET_BITMAP_PORT=m
+CONFIG_IP_SET_HASH_IP=m
+CONFIG_IP_SET_HASH_IPPORT=m
+CONFIG_IP_SET_HASH_IPPORTIP=m
+CONFIG_IP_SET_HASH_IPPORTNET=m
+CONFIG_IP_SET_HASH_NET=m
+CONFIG_IP_SET_HASH_NETPORT=m
+CONFIG_IP_SET_HASH_NETIFACE=m
+CONFIG_IP_SET_LIST_SET=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_IP_VS_FTP=m
+CONFIG_IP_VS_PE_SIP=m
+CONFIG_NF_CONNTRACK_IPV4=m
+# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_NF_NAT_IPV4=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NET_SCTPPROBE=m
+CONFIG_RDS=m
+CONFIG_RDS_RDMA=m
+CONFIG_RDS_TCP=m
+CONFIG_L2TP=m
+CONFIG_L2TP_DEBUGFS=m
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=m
+CONFIG_L2TP_ETH=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFB=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_DRR=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_CHOKE=m
+CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_CLS_U32_PERF=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_CGROUP=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_ACT_CSUM=m
+CONFIG_DNS_RESOLVER=y
+CONFIG_BPF_JIT=y
+CONFIG_NET_PKTGEN=m
+CONFIG_NET_TCPPROBE=m
+CONFIG_DEVTMPFS=y
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_OSD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_XIP=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_ENCLOSURE_SERVICES=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=y
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_ENCLOSURE=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_SCSI_SRP_ATTRS=m
+CONFIG_SCSI_SRP_TGT_ATTRS=y
+CONFIG_ISCSI_TCP=m
+CONFIG_LIBFCOE=m
+CONFIG_SCSI_DEBUG=m
+CONFIG_ZFCP=y
+CONFIG_SCSI_VIRTIO=m
+CONFIG_SCSI_DH=m
+CONFIG_SCSI_DH_RDAC=m
+CONFIG_SCSI_DH_HP_SW=m
+CONFIG_SCSI_DH_EMC=m
+CONFIG_SCSI_DH_ALUA=m
+CONFIG_SCSI_OSD_INITIATOR=m
+CONFIG_SCSI_OSD_ULD=m
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_RAID=m
+CONFIG_DM_LOG_USERSPACE=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_DM_MULTIPATH_QL=m
+CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_DELAY=m
+CONFIG_DM_UEVENT=y
+CONFIG_DM_FLAKEY=m
+CONFIG_DM_VERITY=m
+CONFIG_DM_SWITCH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=m
+CONFIG_NLMON=m
+CONFIG_VHOST_NET=m
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+CONFIG_MLX4_EN=m
+# CONFIG_NET_VENDOR_NATSEMI is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+CONFIG_LEGACY_PTY_COUNT=0
+CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_RAW_DRIVER=m
+CONFIG_HANGCHECK_TIMER=m
+CONFIG_TN3270_FS=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+CONFIG_SOFT_WATCHDOG=m
+CONFIG_ZVM_WATCHDOG=m
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_INFINIBAND=m
+CONFIG_INFINIBAND_USER_ACCESS=m
+CONFIG_MLX4_INFINIBAND=m
+CONFIG_VIRTIO_BALLOON=m
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT2_FS_XIP=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_JBD_DEBUG=y
+CONFIG_JBD2_DEBUG=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_JFS_STATISTICS=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_XFS_RT=y
+CONFIG_GFS2_FS=m
+CONFIG_OCFS2_FS=m
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_NILFS2_FS=m
+CONFIG_FANOTIFY=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_FSCACHE=m
+CONFIG_CACHEFILES=m
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_NTFS_FS=m
+CONFIG_NTFS_RW=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
+CONFIG_ROMFS_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=m
+CONFIG_NFS_SWAP=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_NFSD_V4_SECURITY_LABEL=y
+CONFIG_CIFS=m
+CONFIG_CIFS_STATS=y
+CONFIG_CIFS_STATS2=y
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_UPCALL=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+# CONFIG_CIFS_DEBUG is not set
+CONFIG_CIFS_DFS_UPCALL=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_DLM=m
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_TIMER_STATS=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_LATENCYTOP=y
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_KPROBE_EVENT is not set
+CONFIG_LKDTM=m
+CONFIG_ATOMIC64_SELFTEST=y
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_S390_PTDUMP=y
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_IMA=y
+CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_USER=m
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_ZLIB=y
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_ZCRYPT=m
+CONFIG_CRYPTO_SHA1_S390=m
+CONFIG_CRYPTO_SHA256_S390=m
+CONFIG_CRYPTO_SHA512_S390=m
+CONFIG_CRYPTO_DES_S390=m
+CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_GHASH_S390=m
+CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
+CONFIG_PUBLIC_KEY_ALGO_RSA=m
+CONFIG_X509_CERTIFICATE_PARSER=m
+CONFIG_CRC7=m
+CONFIG_CRC8=m
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+CONFIG_CORDIC=m
+CONFIG_CMM=m
+CONFIG_APPLDATA_BASE=y
+CONFIG_KVM=m
+CONFIG_KVM_S390_UCONTROL=y
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
new file mode 100644 (file)
index 0000000..d725c4d
--- /dev/null
@@ -0,0 +1,86 @@
+# CONFIG_SWAP is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
+CONFIG_DEFAULT_DEADLINE=y
+CONFIG_MARCH_Z9_109=y
+# CONFIG_COMPAT is not set
+CONFIG_NR_CPUS=2
+# CONFIG_HOTPLUG_CPU is not set
+CONFIG_HZ_100=y
+# CONFIG_COMPACTION is not set
+# CONFIG_MIGRATION is not set
+# CONFIG_CHECK_STACK is not set
+# CONFIG_CHSC_SCH is not set
+# CONFIG_SCM_BUS is not set
+CONFIG_CRASH_DUMP=y
+CONFIG_ZFCPDUMP=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_SECCOMP is not set
+# CONFIG_IUCV is not set
+CONFIG_ATM=y
+CONFIG_ATM_LANE=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_BLK_DEV_XPRAM is not set
+# CONFIG_DCSSBLK is not set
+# CONFIG_DASD is not set
+CONFIG_ENCLOSURE_SERVICES=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_ENCLOSURE=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SRP_ATTRS=y
+CONFIG_ZFCP=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_HVC_IUCV is not set
+CONFIG_RAW_DRIVER=y
+# CONFIG_SCLP_ASYNC is not set
+# CONFIG_HMC_DRV is not set
+# CONFIG_S390_TAPE is not set
+# CONFIG_VMCP is not set
+# CONFIG_MONWRITER is not set
+# CONFIG_S390_VMUR is not set
+# CONFIG_HID is not set
+CONFIG_MEMSTICK=y
+CONFIG_MEMSTICK_DEBUG=y
+CONFIG_MEMSTICK_UNSAFE_RESUME=y
+CONFIG_MSPRO_BLOCK=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_INOTIFY_USER is not set
+CONFIG_CONFIGFS_FS=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_FTRACE is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_XZ_DEC_X86=y
+CONFIG_XZ_DEC_POWERPC=y
+CONFIG_XZ_DEC_IA64=y
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+CONFIG_XZ_DEC_SPARC=y
+# CONFIG_PFAULT is not set
+# CONFIG_S390_HYPFS_FS is not set
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_S390_GUEST is not set
index b4dbade8ca247c52106ecd72036983c850369431..46cae138ece2efa3617447d1fccf82fa504d4e7d 100644 (file)
@@ -725,6 +725,8 @@ static struct crypto_alg xts_aes_alg = {
        }
 };
 
+static int xts_aes_alg_reg;
+
 static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
                           unsigned int key_len)
 {
@@ -846,6 +848,8 @@ static struct crypto_alg ctr_aes_alg = {
        }
 };
 
+static int ctr_aes_alg_reg;
+
 static int __init aes_s390_init(void)
 {
        int ret;
@@ -884,6 +888,7 @@ static int __init aes_s390_init(void)
                ret = crypto_register_alg(&xts_aes_alg);
                if (ret)
                        goto xts_aes_err;
+               xts_aes_alg_reg = 1;
        }
 
        if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
@@ -902,6 +907,7 @@ static int __init aes_s390_init(void)
                        free_page((unsigned long) ctrblk);
                        goto ctr_aes_err;
                }
+               ctr_aes_alg_reg = 1;
        }
 
 out:
@@ -921,9 +927,12 @@ aes_err:
 
 static void __exit aes_s390_fini(void)
 {
-       crypto_unregister_alg(&ctr_aes_alg);
-       free_page((unsigned long) ctrblk);
-       crypto_unregister_alg(&xts_aes_alg);
+       if (ctr_aes_alg_reg) {
+               crypto_unregister_alg(&ctr_aes_alg);
+               free_page((unsigned long) ctrblk);
+       }
+       if (xts_aes_alg_reg)
+               crypto_unregister_alg(&xts_aes_alg);
        crypto_unregister_alg(&cbc_aes_alg);
        crypto_unregister_alg(&ecb_aes_alg);
        crypto_unregister_alg(&aes_alg);
index d204c65bf722cf7ad35d12d3643830cee5880d8c..33f57514f4245a3835438801c99e11c749ac33a3 100644 (file)
@@ -38,13 +38,14 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
-# CONFIG_EFI_PARTITION is not set
 CONFIG_DEFAULT_DEADLINE=y
+CONFIG_MARCH_Z196=y
 CONFIG_HZ_100=y
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
 CONFIG_CRASH_DUMP=y
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
@@ -152,6 +153,7 @@ CONFIG_CRYPTO_CMAC=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_CRCT10DIF=m
 CONFIG_CRYPTO_MD4=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
index c797832daa5f596bac9da8f5075d1bd2e006559c..12c5ec156502a87306e093396b7d195b7b4fa50d 100644 (file)
 
 #define ATOMIC_INIT(i)  { (i) }
 
-#define __CS_LOOP(ptr, op_val, op_string) ({                           \
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __ATOMIC_OR    "lao"
+#define __ATOMIC_AND   "lan"
+#define __ATOMIC_ADD   "laa"
+
+#define __ATOMIC_LOOP(ptr, op_val, op_string)                          \
+({                                                                     \
+       int old_val;                                                    \
+                                                                       \
+       typecheck(atomic_t *, ptr);                                     \
+       asm volatile(                                                   \
+               op_string "     %0,%2,%1\n"                             \
+               : "=d" (old_val), "+Q" ((ptr)->counter)                 \
+               : "d" (op_val)                                          \
+               : "cc", "memory");                                      \
+       old_val;                                                        \
+})
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __ATOMIC_OR    "or"
+#define __ATOMIC_AND   "nr"
+#define __ATOMIC_ADD   "ar"
+
+#define __ATOMIC_LOOP(ptr, op_val, op_string)                          \
+({                                                                     \
        int old_val, new_val;                                           \
+                                                                       \
+       typecheck(atomic_t *, ptr);                                     \
        asm volatile(                                                   \
                "       l       %0,%2\n"                                \
                "0:     lr      %1,%0\n"                                \
                op_string "     %1,%3\n"                                \
                "       cs      %0,%1,%2\n"                             \
                "       jl      0b"                                     \
-               : "=&d" (old_val), "=&d" (new_val),                     \
-                 "=Q" (((atomic_t *)(ptr))->counter)                   \
-               : "d" (op_val),  "Q" (((atomic_t *)(ptr))->counter)     \
+               : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
+               : "d" (op_val)                                          \
                : "cc", "memory");                                      \
-       new_val;                                                        \
+       old_val;                                                        \
 })
 
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
 static inline int atomic_read(const atomic_t *v)
 {
        int c;
@@ -53,32 +82,45 @@ static inline void atomic_set(atomic_t *v, int i)
 
 static inline int atomic_add_return(int i, atomic_t *v)
 {
-       return __CS_LOOP(v, i, "ar");
+       return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i;
 }
-#define atomic_add(_i, _v)             atomic_add_return(_i, _v)
-#define atomic_add_negative(_i, _v)    (atomic_add_return(_i, _v) < 0)
-#define atomic_inc(_v)                 atomic_add_return(1, _v)
-#define atomic_inc_return(_v)          atomic_add_return(1, _v)
-#define atomic_inc_and_test(_v)                (atomic_add_return(1, _v) == 0)
 
-static inline int atomic_sub_return(int i, atomic_t *v)
+static inline void atomic_add(int i, atomic_t *v)
 {
-       return __CS_LOOP(v, i, "sr");
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+       if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
+               asm volatile(
+                       "asi    %0,%1\n"
+                       : "+Q" (v->counter)
+                       : "i" (i)
+                       : "cc", "memory");
+       } else {
+               atomic_add_return(i, v);
+       }
+#else
+       atomic_add_return(i, v);
+#endif
 }
-#define atomic_sub(_i, _v)             atomic_sub_return(_i, _v)
+
+#define atomic_add_negative(_i, _v)    (atomic_add_return(_i, _v) < 0)
+#define atomic_inc(_v)                 atomic_add(1, _v)
+#define atomic_inc_return(_v)          atomic_add_return(1, _v)
+#define atomic_inc_and_test(_v)                (atomic_add_return(1, _v) == 0)
+#define atomic_sub(_i, _v)             atomic_add(-(int)_i, _v)
+#define atomic_sub_return(_i, _v)      atomic_add_return(-(int)(_i), _v)
 #define atomic_sub_and_test(_i, _v)    (atomic_sub_return(_i, _v) == 0)
-#define atomic_dec(_v)                 atomic_sub_return(1, _v)
+#define atomic_dec(_v)                 atomic_sub(1, _v)
 #define atomic_dec_return(_v)          atomic_sub_return(1, _v)
 #define atomic_dec_and_test(_v)                (atomic_sub_return(1, _v) == 0)
 
-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
 {
-       __CS_LOOP(v, ~mask, "nr");
+       __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND);
 }
 
-static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
 {
-       __CS_LOOP(v, mask, "or");
+       __ATOMIC_LOOP(v, mask, __ATOMIC_OR);
 }
 
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -87,8 +129,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
        asm volatile(
                "       cs      %0,%2,%1"
-               : "+d" (old), "=Q" (v->counter)
-               : "d" (new), "Q" (v->counter)
+               : "+d" (old), "+Q" (v->counter)
+               : "d" (new)
                : "cc", "memory");
        return old;
 }
@@ -109,27 +151,56 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 }
 
 
-#undef __CS_LOOP
+#undef __ATOMIC_LOOP
 
 #define ATOMIC64_INIT(i)  { (i) }
 
 #ifdef CONFIG_64BIT
 
-#define __CSG_LOOP(ptr, op_val, op_string) ({                          \
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __ATOMIC64_OR  "laog"
+#define __ATOMIC64_AND "lang"
+#define __ATOMIC64_ADD "laag"
+
+#define __ATOMIC64_LOOP(ptr, op_val, op_string)                                \
+({                                                                     \
+       long long old_val;                                              \
+                                                                       \
+       typecheck(atomic64_t *, ptr);                                   \
+       asm volatile(                                                   \
+               op_string "     %0,%2,%1\n"                             \
+               : "=d" (old_val), "+Q" ((ptr)->counter)                 \
+               : "d" (op_val)                                          \
+               : "cc", "memory");                                      \
+       old_val;                                                        \
+})
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __ATOMIC64_OR  "ogr"
+#define __ATOMIC64_AND "ngr"
+#define __ATOMIC64_ADD "agr"
+
+#define __ATOMIC64_LOOP(ptr, op_val, op_string)                                \
+({                                                                     \
        long long old_val, new_val;                                     \
+                                                                       \
+       typecheck(atomic64_t *, ptr);                                   \
        asm volatile(                                                   \
                "       lg      %0,%2\n"                                \
                "0:     lgr     %1,%0\n"                                \
                op_string "     %1,%3\n"                                \
                "       csg     %0,%1,%2\n"                             \
                "       jl      0b"                                     \
-               : "=&d" (old_val), "=&d" (new_val),                     \
-                 "=Q" (((atomic_t *)(ptr))->counter)                   \
-               : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter)      \
+               : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
+               : "d" (op_val)                                          \
                : "cc", "memory");                                      \
-       new_val;                                                        \
+       old_val;                                                        \
 })
 
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
 static inline long long atomic64_read(const atomic64_t *v)
 {
        long long c;
@@ -149,22 +220,17 @@ static inline void atomic64_set(atomic64_t *v, long long i)
 
 static inline long long atomic64_add_return(long long i, atomic64_t *v)
 {
-       return __CSG_LOOP(v, i, "agr");
-}
-
-static inline long long atomic64_sub_return(long long i, atomic64_t *v)
-{
-       return __CSG_LOOP(v, i, "sgr");
+       return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i;
 }
 
 static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
 {
-       __CSG_LOOP(v, ~mask, "ngr");
+       __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND);
 }
 
 static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
 {
-       __CSG_LOOP(v, mask, "ogr");
+       __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR);
 }
 
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
@@ -174,13 +240,13 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
 {
        asm volatile(
                "       csg     %0,%2,%1"
-               : "+d" (old), "=Q" (v->counter)
-               : "d" (new), "Q" (v->counter)
+               : "+d" (old), "+Q" (v->counter)
+               : "d" (new)
                : "cc", "memory");
        return old;
 }
 
-#undef __CSG_LOOP
+#undef __ATOMIC64_LOOP
 
 #else /* CONFIG_64BIT */
 
@@ -216,8 +282,8 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new)
                "       lm      %0,%N0,%1\n"
                "0:     cds     %0,%2,%1\n"
                "       jl      0b\n"
-               : "=&d" (rp_old), "=Q" (v->counter)
-               : "d" (rp_new), "Q" (v->counter)
+               : "=&d" (rp_old), "+Q" (v->counter)
+               : "d" (rp_new)
                : "cc");
        return rp_old.pair;
 }
@@ -230,8 +296,8 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
 
        asm volatile(
                "       cds     %0,%2,%1"
-               : "+&d" (rp_old), "=Q" (v->counter)
-               : "d" (rp_new), "Q" (v->counter)
+               : "+&d" (rp_old), "+Q" (v->counter)
+               : "d" (rp_new)
                : "cc");
        return rp_old.pair;
 }
@@ -248,17 +314,6 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
        return new;
 }
 
-static inline long long atomic64_sub_return(long long i, atomic64_t *v)
-{
-       long long old, new;
-
-       do {
-               old = atomic64_read(v);
-               new = old - i;
-       } while (atomic64_cmpxchg(v, old, new) != old);
-       return new;
-}
-
 static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
 {
        long long old, new;
@@ -281,7 +336,24 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
 
 #endif /* CONFIG_64BIT */
 
-static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+static inline void atomic64_add(long long i, atomic64_t *v)
+{
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+       if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
+               asm volatile(
+                       "agsi   %0,%1\n"
+                       : "+Q" (v->counter)
+                       : "i" (i)
+                       : "cc", "memory");
+       } else {
+               atomic64_add_return(i, v);
+       }
+#else
+       atomic64_add_return(i, v);
+#endif
+}
+
+static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
 {
        long long c, old;
 
@@ -289,7 +361,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
        for (;;) {
                if (unlikely(c == u))
                        break;
-               old = atomic64_cmpxchg(v, c, c + a);
+               old = atomic64_cmpxchg(v, c, c + i);
                if (likely(old == c))
                        break;
                c = old;
@@ -314,14 +386,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
        return dec;
 }
 
-#define atomic64_add(_i, _v)           atomic64_add_return(_i, _v)
 #define atomic64_add_negative(_i, _v)  (atomic64_add_return(_i, _v) < 0)
-#define atomic64_inc(_v)               atomic64_add_return(1, _v)
+#define atomic64_inc(_v)               atomic64_add(1, _v)
 #define atomic64_inc_return(_v)                atomic64_add_return(1, _v)
 #define atomic64_inc_and_test(_v)      (atomic64_add_return(1, _v) == 0)
-#define atomic64_sub(_i, _v)           atomic64_sub_return(_i, _v)
+#define atomic64_sub_return(_i, _v)    atomic64_add_return(-(long long)(_i), _v)
+#define atomic64_sub(_i, _v)           atomic64_add(-(long long)_i, _v)
 #define atomic64_sub_and_test(_i, _v)  (atomic64_sub_return(_i, _v) == 0)
-#define atomic64_dec(_v)               atomic64_sub_return(1, _v)
+#define atomic64_dec(_v)               atomic64_sub(1, _v)
 #define atomic64_dec_return(_v)                atomic64_sub_return(1, _v)
 #define atomic64_dec_and_test(_v)      (atomic64_sub_return(1, _v) == 0)
 #define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1, 0)
index 10135a38673c04894c69e36ee244a756ec28d30e..6e6ad06808293b7e88949351f647e516af8f16b2 100644 (file)
@@ -1,10 +1,40 @@
 /*
- *  S390 version
- *    Copyright IBM Corp. 1999
- *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *    Copyright IBM Corp. 1999,2013
  *
- *  Derived from "include/asm-i386/bitops.h"
- *    Copyright (C) 1992, Linus Torvalds
+ *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ *
+ * The description below was taken in large parts from the powerpc
+ * bitops header file:
+ * Within a word, bits are numbered LSB first.  Lot's of places make
+ * this assumption by directly testing bits with (val & (1<<nr)).
+ * This can cause confusion for large (> 1 word) bitmaps on a
+ * big-endian system because, unlike little endian, the number of each
+ * bit depends on the word size.
+ *
+ * The bitop functions are defined to work on unsigned longs, so for an
+ * s390x system the bits end up numbered:
+ *   |63..............0|127............64|191...........128|255...........196|
+ * and on s390:
+ *   |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224|
+ *
+ * There are a few little-endian macros used mostly for filesystem
+ * bitmaps, these work on similar bit arrays layouts, but
+ * byte-oriented:
+ *   |7...0|15...8|23...16|31...24|39...32|47...40|55...48|63...56|
+ *
+ * The main difference is that bit 3-5 (64b) or 3-4 (32b) in the bit
+ * number field needs to be reversed compared to the big-endian bit
+ * fields. This can be achieved by XOR with 0x38 (64b) or 0x18 (32b).
+ *
+ * We also have special functions which work with an MSB0 encoding:
+ * on an s390x system the bits are numbered:
+ *   |0..............63|64............127|128...........191|192...........255|
+ * and on s390:
+ *   |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
+ *
+ * The main difference is that bit 0-63 (64b) or 0-31 (32b) in the bit
+ * number field needs to be reversed compared to the LSB0 encoded bit
+ * fields. This can be achieved by XOR with 0x3f (64b) or 0x1f (32b).
  *
  */
 
 #error only <linux/bitops.h> can be included directly
 #endif
 
+#include <linux/typecheck.h>
 #include <linux/compiler.h>
 
-/*
- * 32 bit bitops format:
- * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
- * bit 32 is the LSB of *(addr+4). That combined with the
- * big endian byte order on S390 give the following bit
- * order in memory:
- *    1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
- *    0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
- * after that follows the next long with bit numbers
- *    3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
- *    2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
- * The reason for this bit ordering is the fact that
- * in the architecture independent code bits operations
- * of the form "flags |= (1 << bitnr)" are used INTERMIXED
- * with operation of the form "set_bit(bitnr, flags)".
- *
- * 64 bit bitops format:
- * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
- * bit 64 is the LSB of *(addr+8). That combined with the
- * big endian byte order on S390 give the following bit
- * order in memory:
- *    3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
- *    2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
- *    1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
- *    0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
- * after that follows the next long with bit numbers
- *    7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
- *    6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
- *    5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
- *    4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
- * The reason for this bit ordering is the fact that
- * in the architecture independent code bits operations
- * of the form "flags |= (1 << bitnr)" are used INTERMIXED
- * with operation of the form "set_bit(bitnr, flags)".
- */
-
-/* bitmap tables from arch/s390/kernel/bitmap.c */
-extern const char _oi_bitmap[];
-extern const char _ni_bitmap[];
-extern const char _zb_findmap[];
-extern const char _sb_findmap[];
-
 #ifndef CONFIG_64BIT
 
 #define __BITOPS_OR            "or"
 #define __BITOPS_AND           "nr"
 #define __BITOPS_XOR           "xr"
 
-#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string)        \
+#define __BITOPS_LOOP(__addr, __val, __op_string)              \
+({                                                             \
+       unsigned long __old, __new;                             \
+                                                               \
+       typecheck(unsigned long *, (__addr));                   \
        asm volatile(                                           \
                "       l       %0,%2\n"                        \
                "0:     lr      %1,%0\n"                        \
                __op_string "   %1,%3\n"                        \
                "       cs      %0,%1,%2\n"                     \
                "       jl      0b"                             \
-               : "=&d" (__old), "=&d" (__new),                 \
-                 "=Q" (*(unsigned long *) __addr)              \
-               : "d" (__val), "Q" (*(unsigned long *) __addr)  \
-               : "cc");
+               : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
+               : "d" (__val)                                   \
+               : "cc");                                        \
+       __old;                                                  \
+})
 
 #else /* CONFIG_64BIT */
 
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __BITOPS_OR            "laog"
+#define __BITOPS_AND           "lang"
+#define __BITOPS_XOR           "laxg"
+
+#define __BITOPS_LOOP(__addr, __val, __op_string)              \
+({                                                             \
+       unsigned long __old;                                    \
+                                                               \
+       typecheck(unsigned long *, (__addr));                   \
+       asm volatile(                                           \
+               __op_string "   %0,%2,%1\n"                     \
+               : "=d" (__old), "+Q" (*(__addr))                \
+               : "d" (__val)                                   \
+               : "cc");                                        \
+       __old;                                                  \
+})
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
 #define __BITOPS_OR            "ogr"
 #define __BITOPS_AND           "ngr"
 #define __BITOPS_XOR           "xgr"
 
-#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string)        \
+#define __BITOPS_LOOP(__addr, __val, __op_string)              \
+({                                                             \
+       unsigned long __old, __new;                             \
+                                                               \
+       typecheck(unsigned long *, (__addr));                   \
        asm volatile(                                           \
                "       lg      %0,%2\n"                        \
                "0:     lgr     %1,%0\n"                        \
                __op_string "   %1,%3\n"                        \
                "       csg     %0,%1,%2\n"                     \
                "       jl      0b"                             \
-               : "=&d" (__old), "=&d" (__new),                 \
-                 "=Q" (*(unsigned long *) __addr)              \
-               : "d" (__val), "Q" (*(unsigned long *) __addr)  \
-               : "cc");
+               : "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
+               : "d" (__val)                                   \
+               : "cc");                                        \
+       __old;                                                  \
+})
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
 
 #endif /* CONFIG_64BIT */
 
 #define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
 
-#ifdef CONFIG_SMP
-/*
- * SMP safe set_bit routine based on compare and swap (CS)
- */
-static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+static inline unsigned long *
+__bitops_word(unsigned long nr, volatile unsigned long *ptr)
+{
+       unsigned long addr;
+
+       addr = (unsigned long)ptr + ((nr ^ (nr & (BITS_PER_LONG - 1))) >> 3);
+       return (unsigned long *)addr;
+}
+
+static inline unsigned char *
+__bitops_byte(unsigned long nr, volatile unsigned long *ptr)
 {
-        unsigned long addr, old, new, mask;
+       return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
+}
+
+static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
+{
+       unsigned long *addr = __bitops_word(nr, ptr);
+       unsigned long mask;
 
-       addr = (unsigned long) ptr;
-       /* calculate address for CS */
-       addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
-       /* make OR mask */
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+       if (__builtin_constant_p(nr)) {
+               unsigned char *caddr = __bitops_byte(nr, ptr);
+
+               asm volatile(
+                       "oi     %0,%b1\n"
+                       : "+Q" (*caddr)
+                       : "i" (1 << (nr & 7))
+                       : "cc");
+               return;
+       }
+#endif
        mask = 1UL << (nr & (BITS_PER_LONG - 1));
-       /* Do the atomic update. */
-       __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
+       __BITOPS_LOOP(addr, mask, __BITOPS_OR);
 }
 
-/*
- * SMP safe clear_bit routine based on compare and swap (CS)
- */
-static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-        unsigned long addr, old, new, mask;
+       unsigned long *addr = __bitops_word(nr, ptr);
+       unsigned long mask;
+
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+       if (__builtin_constant_p(nr)) {
+               unsigned char *caddr = __bitops_byte(nr, ptr);
 
-       addr = (unsigned long) ptr;
-       /* calculate address for CS */
-       addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
-       /* make AND mask */
+               asm volatile(
+                       "ni     %0,%b1\n"
+                       : "+Q" (*caddr)
+                       : "i" (~(1 << (nr & 7)))
+                       : "cc");
+               return;
+       }
+#endif
        mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
-       /* Do the atomic update. */
-       __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
+       __BITOPS_LOOP(addr, mask, __BITOPS_AND);
 }
 
-/*
- * SMP safe change_bit routine based on compare and swap (CS)
- */
-static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-        unsigned long addr, old, new, mask;
+       unsigned long *addr = __bitops_word(nr, ptr);
+       unsigned long mask;
+
+#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
+       if (__builtin_constant_p(nr)) {
+               unsigned char *caddr = __bitops_byte(nr, ptr);
 
-       addr = (unsigned long) ptr;
-       /* calculate address for CS */
-       addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
-       /* make XOR mask */
+               asm volatile(
+                       "xi     %0,%b1\n"
+                       : "+Q" (*caddr)
+                       : "i" (1 << (nr & 7))
+                       : "cc");
+               return;
+       }
+#endif
        mask = 1UL << (nr & (BITS_PER_LONG - 1));
-       /* Do the atomic update. */
-       __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
+       __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
 }
 
-/*
- * SMP safe test_and_set_bit routine based on compare and swap (CS)
- */
 static inline int
-test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-        unsigned long addr, old, new, mask;
+       unsigned long *addr = __bitops_word(nr, ptr);
+       unsigned long old, mask;
 
-       addr = (unsigned long) ptr;
-       /* calculate address for CS */
-       addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
-       /* make OR/test mask */
        mask = 1UL << (nr & (BITS_PER_LONG - 1));
-       /* Do the atomic update. */
-       __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
+       old = __BITOPS_LOOP(addr, mask, __BITOPS_OR);
        barrier();
        return (old & mask) != 0;
 }
 
-/*
- * SMP safe test_and_clear_bit routine based on compare and swap (CS)
- */
 static inline int
-test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-        unsigned long addr, old, new, mask;
+       unsigned long *addr = __bitops_word(nr, ptr);
+       unsigned long old, mask;
 
-       addr = (unsigned long) ptr;
-       /* calculate address for CS */
-       addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
-       /* make AND/test mask */
        mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
-       /* Do the atomic update. */
-       __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
+       old = __BITOPS_LOOP(addr, mask, __BITOPS_AND);
        barrier();
-       return (old ^ new) != 0;
+       return (old & ~mask) != 0;
 }
 
-/*
- * SMP safe test_and_change_bit routine based on compare and swap (CS) 
- */
 static inline int
-test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
+test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-        unsigned long addr, old, new, mask;
+       unsigned long *addr = __bitops_word(nr, ptr);
+       unsigned long old, mask;
 
-       addr = (unsigned long) ptr;
-       /* calculate address for CS */
-       addr += (nr ^ (nr & (BITS_PER_LONG - 1))) >> 3;
-       /* make XOR/test mask */
        mask = 1UL << (nr & (BITS_PER_LONG - 1));
-       /* Do the atomic update. */
-       __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
+       old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
        barrier();
        return (old & mask) != 0;
 }
-#endif /* CONFIG_SMP */
 
-/*
- * fast, non-SMP set_bit routine
- */
 static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-       unsigned long addr;
-
-       addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-       asm volatile(
-               "       oc      %O0(1,%R0),%1"
-               : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
-}
-
-static inline void 
-__constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
-{
-       unsigned long addr;
+       unsigned char *addr = __bitops_byte(nr, ptr);
 
-       addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-       *(unsigned char *) addr |= 1 << (nr & 7);
+       *addr |= 1 << (nr & 7);
 }
 
-#define set_bit_simple(nr,addr) \
-(__builtin_constant_p((nr)) ? \
- __constant_set_bit((nr),(addr)) : \
- __set_bit((nr),(addr)) )
-
-/*
- * fast, non-SMP clear_bit routine
- */
 static inline void 
 __clear_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-       unsigned long addr;
-
-       addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-       asm volatile(
-               "       nc      %O0(1,%R0),%1"
-               : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc");
-}
-
-static inline void 
-__constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
-{
-       unsigned long addr;
+       unsigned char *addr = __bitops_byte(nr, ptr);
 
-       addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-       *(unsigned char *) addr &= ~(1 << (nr & 7));
+       *addr &= ~(1 << (nr & 7));
 }
 
-#define clear_bit_simple(nr,addr) \
-(__builtin_constant_p((nr)) ? \
- __constant_clear_bit((nr),(addr)) : \
- __clear_bit((nr),(addr)) )
-
-/* 
- * fast, non-SMP change_bit routine 
- */
 static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-       unsigned long addr;
-
-       addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-       asm volatile(
-               "       xc      %O0(1,%R0),%1"
-               : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
-}
-
-static inline void 
-__constant_change_bit(const unsigned long nr, volatile unsigned long *ptr) 
-{
-       unsigned long addr;
+       unsigned char *addr = __bitops_byte(nr, ptr);
 
-       addr = ((unsigned long) ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-       *(unsigned char *) addr ^= 1 << (nr & 7);
+       *addr ^= 1 << (nr & 7);
 }
 
-#define change_bit_simple(nr,addr) \
-(__builtin_constant_p((nr)) ? \
- __constant_change_bit((nr),(addr)) : \
- __change_bit((nr),(addr)) )
-
-/*
- * fast, non-SMP test_and_set_bit routine
- */
 static inline int
-test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
+__test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-       unsigned long addr;
+       unsigned char *addr = __bitops_byte(nr, ptr);
        unsigned char ch;
 
-       addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-       ch = *(unsigned char *) addr;
-       asm volatile(
-               "       oc      %O0(1,%R0),%1"
-               : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
-               : "cc", "memory");
+       ch = *addr;
+       *addr |= 1 << (nr & 7);
        return (ch >> (nr & 7)) & 1;
 }
-#define __test_and_set_bit(X,Y)                test_and_set_bit_simple(X,Y)
 
-/*
- * fast, non-SMP test_and_clear_bit routine
- */
 static inline int
-test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
+__test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-       unsigned long addr;
+       unsigned char *addr = __bitops_byte(nr, ptr);
        unsigned char ch;
 
-       addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-       ch = *(unsigned char *) addr;
-       asm volatile(
-               "       nc      %O0(1,%R0),%1"
-               : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
-               : "cc", "memory");
+       ch = *addr;
+       *addr &= ~(1 << (nr & 7));
        return (ch >> (nr & 7)) & 1;
 }
-#define __test_and_clear_bit(X,Y)      test_and_clear_bit_simple(X,Y)
 
-/*
- * fast, non-SMP test_and_change_bit routine
- */
 static inline int
-test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
+__test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
 {
-       unsigned long addr;
+       unsigned char *addr = __bitops_byte(nr, ptr);
        unsigned char ch;
 
-       addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-       ch = *(unsigned char *) addr;
-       asm volatile(
-               "       xc      %O0(1,%R0),%1"
-               : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
-               : "cc", "memory");
+       ch = *addr;
+       *addr ^= 1 << (nr & 7);
        return (ch >> (nr & 7)) & 1;
 }
-#define __test_and_change_bit(X,Y)     test_and_change_bit_simple(X,Y)
-
-#ifdef CONFIG_SMP
-#define set_bit             set_bit_cs
-#define clear_bit           clear_bit_cs
-#define change_bit          change_bit_cs
-#define test_and_set_bit    test_and_set_bit_cs
-#define test_and_clear_bit  test_and_clear_bit_cs
-#define test_and_change_bit test_and_change_bit_cs
-#else
-#define set_bit             set_bit_simple
-#define clear_bit           clear_bit_simple
-#define change_bit          change_bit_simple
-#define test_and_set_bit    test_and_set_bit_simple
-#define test_and_clear_bit  test_and_clear_bit_simple
-#define test_and_change_bit test_and_change_bit_simple
-#endif
-
-
-/*
- * This routine doesn't need to be atomic.
- */
 
-static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
+static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
 {
-       unsigned long addr;
-       unsigned char ch;
-
-       addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
-       ch = *(volatile unsigned char *) addr;
-       return (ch >> (nr & 7)) & 1;
-}
+       const volatile unsigned char *addr;
 
-static inline int 
-__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
-    return (((volatile char *) addr)
-           [(nr^(BITS_PER_LONG-8))>>3] & (1<<(nr&7))) != 0;
+       addr = ((const volatile unsigned char *)ptr);
+       addr += (nr ^ (BITS_PER_LONG - 8)) >> 3;
+       return (*addr >> (nr & 7)) & 1;
 }
 
-#define test_bit(nr,addr) \
-(__builtin_constant_p((nr)) ? \
- __constant_test_bit((nr),(addr)) : \
- __test_bit((nr),(addr)) )
-
 /*
- * Optimized find bit helper functions.
- */
-
-/**
- * __ffz_word_loop - find byte offset of first long != -1UL
- * @addr: pointer to array of unsigned long
- * @size: size of the array in bits
+ * Functions which use MSB0 bit numbering.
+ * On an s390x system the bits are numbered:
+ *   |0..............63|64............127|128...........191|192...........255|
+ * and on s390:
+ *   |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
  */
-static inline unsigned long __ffz_word_loop(const unsigned long *addr,
-                                           unsigned long size)
-{
-       typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
-       unsigned long bytes = 0;
-
-       asm volatile(
-#ifndef CONFIG_64BIT
-               "       ahi     %1,-1\n"
-               "       sra     %1,5\n"
-               "       jz      1f\n"
-               "0:     c       %2,0(%0,%3)\n"
-               "       jne     1f\n"
-               "       la      %0,4(%0)\n"
-               "       brct    %1,0b\n"
-               "1:\n"
-#else
-               "       aghi    %1,-1\n"
-               "       srag    %1,%1,6\n"
-               "       jz      1f\n"
-               "0:     cg      %2,0(%0,%3)\n"
-               "       jne     1f\n"
-               "       la      %0,8(%0)\n"
-               "       brct    %1,0b\n"
-               "1:\n"
-#endif
-               : "+&a" (bytes), "+&d" (size)
-               : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
-               : "cc" );
-       return bytes;
-}
+unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size);
+unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
+                               unsigned long offset);
 
-/**
- * __ffs_word_loop - find byte offset of first long != 0UL
- * @addr: pointer to array of unsigned long
- * @size: size of the array in bits
- */
-static inline unsigned long __ffs_word_loop(const unsigned long *addr,
-                                           unsigned long size)
+static inline void set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
 {
-       typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
-       unsigned long bytes = 0;
-
-       asm volatile(
-#ifndef CONFIG_64BIT
-               "       ahi     %1,-1\n"
-               "       sra     %1,5\n"
-               "       jz      1f\n"
-               "0:     c       %2,0(%0,%3)\n"
-               "       jne     1f\n"
-               "       la      %0,4(%0)\n"
-               "       brct    %1,0b\n"
-               "1:\n"
-#else
-               "       aghi    %1,-1\n"
-               "       srag    %1,%1,6\n"
-               "       jz      1f\n"
-               "0:     cg      %2,0(%0,%3)\n"
-               "       jne     1f\n"
-               "       la      %0,8(%0)\n"
-               "       brct    %1,0b\n"
-               "1:\n"
-#endif
-               : "+&a" (bytes), "+&a" (size)
-               : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
-               : "cc" );
-       return bytes;
+       return set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
 }
 
-/**
- * __ffz_word - add number of the first unset bit
- * @nr: base value the bit number is added to
- * @word: the word that is searched for unset bits
- */
-static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
+static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
 {
-#ifdef CONFIG_64BIT
-       if ((word & 0xffffffff) == 0xffffffff) {
-               word >>= 32;
-               nr += 32;
-       }
-#endif
-       if ((word & 0xffff) == 0xffff) {
-               word >>= 16;
-               nr += 16;
-       }
-       if ((word & 0xff) == 0xff) {
-               word >>= 8;
-               nr += 8;
-       }
-       return nr + _zb_findmap[(unsigned char) word];
+       return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
 }
 
-/**
- * __ffs_word - add number of the first set bit
- * @nr: base value the bit number is added to
- * @word: the word that is searched for set bits
- */
-static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
+static inline void __set_bit_inv(unsigned long nr, volatile unsigned long *ptr)
 {
-#ifdef CONFIG_64BIT
-       if ((word & 0xffffffff) == 0) {
-               word >>= 32;
-               nr += 32;
-       }
-#endif
-       if ((word & 0xffff) == 0) {
-               word >>= 16;
-               nr += 16;
-       }
-       if ((word & 0xff) == 0) {
-               word >>= 8;
-               nr += 8;
-       }
-       return nr + _sb_findmap[(unsigned char) word];
+       return __set_bit(nr ^ (BITS_PER_LONG - 1), ptr);
 }
 
-
-/**
- * __load_ulong_be - load big endian unsigned long
- * @p: pointer to array of unsigned long
- * @offset: byte offset of source value in the array
- */
-static inline unsigned long __load_ulong_be(const unsigned long *p,
-                                           unsigned long offset)
+static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
 {
-       p = (unsigned long *)((unsigned long) p + offset);
-       return *p;
+       return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
 }
 
-/**
- * __load_ulong_le - load little endian unsigned long
- * @p: pointer to array of unsigned long
- * @offset: byte offset of source value in the array
- */
-static inline unsigned long __load_ulong_le(const unsigned long *p,
-                                           unsigned long offset)
+static inline int test_bit_inv(unsigned long nr,
+                              const volatile unsigned long *ptr)
 {
-       unsigned long word;
-
-       p = (unsigned long *)((unsigned long) p + offset);
-#ifndef CONFIG_64BIT
-       asm volatile(
-               "       ic      %0,%O1(%R1)\n"
-               "       icm     %0,2,%O1+1(%R1)\n"
-               "       icm     %0,4,%O1+2(%R1)\n"
-               "       icm     %0,8,%O1+3(%R1)"
-               : "=&d" (word) : "Q" (*p) : "cc");
-#else
-       asm volatile(
-               "       lrvg    %0,%1"
-               : "=d" (word) : "m" (*p) );
-#endif
-       return word;
+       return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
 }
 
-/*
- * The various find bit functions.
- */
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
 
-/*
- * ffz - find first zero in word.
- * @word: The word to search
+/**
+ * __flogr - find leftmost one
+ * @word - The word to search
  *
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-static inline unsigned long ffz(unsigned long word)
-{
-       return __ffz_word(0, word);
+ * Returns the bit number of the most significant bit set,
+ * where the most significant bit has bit number 0.
+ * If no bit is set this function returns 64.
+ */
+static inline unsigned char __flogr(unsigned long word)
+{
+       if (__builtin_constant_p(word)) {
+               unsigned long bit = 0;
+
+               if (!word)
+                       return 64;
+               if (!(word & 0xffffffff00000000UL)) {
+                       word <<= 32;
+                       bit += 32;
+               }
+               if (!(word & 0xffff000000000000UL)) {
+                       word <<= 16;
+                       bit += 16;
+               }
+               if (!(word & 0xff00000000000000UL)) {
+                       word <<= 8;
+                       bit += 8;
+               }
+               if (!(word & 0xf000000000000000UL)) {
+                       word <<= 4;
+                       bit += 4;
+               }
+               if (!(word & 0xc000000000000000UL)) {
+                       word <<= 2;
+                       bit += 2;
+               }
+               if (!(word & 0x8000000000000000UL)) {
+                       word <<= 1;
+                       bit += 1;
+               }
+               return bit;
+       } else {
+               register unsigned long bit asm("4") = word;
+               register unsigned long out asm("5");
+
+               asm volatile(
+                       "       flogr   %[bit],%[bit]\n"
+                       : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
+               return bit;
+       }
 }
 
 /**
@@ -573,337 +395,83 @@ static inline unsigned long ffz(unsigned long word)
  *
  * Undefined if no bit exists, so code should check against 0 first.
  */
-static inline unsigned long __ffs (unsigned long word)
+static inline unsigned long __ffs(unsigned long word)
 {
-       return __ffs_word(0, word);
+       return __flogr(-word & word) ^ (BITS_PER_LONG - 1);
 }
 
 /**
  * ffs - find first bit set
- * @x: the word to search
+ * @word: the word to search
  *
- * This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
+ * This is defined the same way as the libc and
+ * compiler builtin ffs routines (man ffs).
  */
-static inline int ffs(int x)
+static inline int ffs(int word)
 {
-       if (!x)
-               return 0;
-       return __ffs_word(1, x);
+       unsigned long mask = 2 * BITS_PER_LONG - 1;
+       unsigned int val = (unsigned int)word;
+
+       return (1 + (__flogr(-val & val) ^ (BITS_PER_LONG - 1))) & mask;
 }
 
 /**
- * find_first_zero_bit - find the first zero bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
  *
- * Returns the bit-number of the first zero bit, not the number of the byte
- * containing a bit.
+ * Undefined if no set bit exists, so code should check against 0 first.
  */
-static inline unsigned long find_first_zero_bit(const unsigned long *addr,
-                                               unsigned long size)
+static inline unsigned long __fls(unsigned long word)
 {
-       unsigned long bytes, bits;
-
-        if (!size)
-                return 0;
-       bytes = __ffz_word_loop(addr, size);
-       bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
-       return (bits < size) ? bits : size;
+       return __flogr(word) ^ (BITS_PER_LONG - 1);
 }
-#define find_first_zero_bit find_first_zero_bit
 
 /**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
+ * fls64 - find last set bit in a 64-bit word
+ * @word: the word to search
  *
- * Returns the bit-number of the first set bit, not the number of the byte
- * containing a bit.
- */
-static inline unsigned long find_first_bit(const unsigned long * addr,
-                                          unsigned long size)
-{
-       unsigned long bytes, bits;
-
-        if (!size)
-                return 0;
-       bytes = __ffs_word_loop(addr, size);
-       bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
-       return (bits < size) ? bits : size;
-}
-#define find_first_bit find_first_bit
-
-/*
- * Big endian variant whichs starts bit counting from left using
- * the flogr (find leftmost one) instruction.
- */
-static inline unsigned long __flo_word(unsigned long nr, unsigned long val)
-{
-       register unsigned long bit asm("2") = val;
-       register unsigned long out asm("3");
-
-       asm volatile (
-               "       .insn   rre,0xb9830000,%[bit],%[bit]\n"
-               : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
-       return nr + bit;
-}
-
-/*
- * 64 bit special left bitops format:
- * order in memory:
- *    00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f
- *    10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f
- *    20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f
- *    30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f
- * after that follows the next long with bit numbers
- *    40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f
- *    50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f
- *    60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f
- *    70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f
- * The reason for this bit ordering is the fact that
- * the hardware sets bits in a bitmap starting at bit 0
- * and we don't want to scan the bitmap from the 'wrong
- * end'.
+ * This is defined in a similar way as the libc and compiler builtin
+ * ffsll, but returns the position of the most significant set bit.
+ *
+ * fls64(value) returns 0 if value is 0 or the position of the last
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 64.
  */
-static inline unsigned long find_first_bit_left(const unsigned long *addr,
-                                               unsigned long size)
-{
-       unsigned long bytes, bits;
-
-       if (!size)
-               return 0;
-       bytes = __ffs_word_loop(addr, size);
-       bits = __flo_word(bytes * 8, __load_ulong_be(addr, bytes));
-       return (bits < size) ? bits : size;
-}
-
-static inline int find_next_bit_left(const unsigned long *addr,
-                                    unsigned long size,
-                                    unsigned long offset)
+static inline int fls64(unsigned long word)
 {
-       const unsigned long *p;
-       unsigned long bit, set;
-
-       if (offset >= size)
-               return size;
-       bit = offset & (BITS_PER_LONG - 1);
-       offset -= bit;
-       size -= offset;
-       p = addr + offset / BITS_PER_LONG;
-       if (bit) {
-               set = __flo_word(0, *p & (~0UL >> bit));
-               if (set >= size)
-                       return size + offset;
-               if (set < BITS_PER_LONG)
-                       return set + offset;
-               offset += BITS_PER_LONG;
-               size -= BITS_PER_LONG;
-               p++;
-       }
-       return offset + find_first_bit_left(p, size);
-}
-
-#define for_each_set_bit_left(bit, addr, size)                         \
-       for ((bit) = find_first_bit_left((addr), (size));               \
-            (bit) < (size);                                            \
-            (bit) = find_next_bit_left((addr), (size), (bit) + 1))
-
-/* same as for_each_set_bit() but use bit as value to start with */
-#define for_each_set_bit_left_cont(bit, addr, size)                    \
-       for ((bit) = find_next_bit_left((addr), (size), (bit));         \
-            (bit) < (size);                                            \
-            (bit) = find_next_bit_left((addr), (size), (bit) + 1))
+       unsigned long mask = 2 * BITS_PER_LONG - 1;
 
-/**
- * find_next_zero_bit - find the first zero bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-static inline int find_next_zero_bit (const unsigned long * addr,
-                                     unsigned long size,
-                                     unsigned long offset)
-{
-        const unsigned long *p;
-       unsigned long bit, set;
-
-       if (offset >= size)
-               return size;
-       bit = offset & (BITS_PER_LONG - 1);
-       offset -= bit;
-       size -= offset;
-       p = addr + offset / BITS_PER_LONG;
-       if (bit) {
-               /*
-                * __ffz_word returns BITS_PER_LONG
-                * if no zero bit is present in the word.
-                */
-               set = __ffz_word(bit, *p >> bit);
-               if (set >= size)
-                       return size + offset;
-               if (set < BITS_PER_LONG)
-                       return set + offset;
-               offset += BITS_PER_LONG;
-               size -= BITS_PER_LONG;
-               p++;
-       }
-       return offset + find_first_zero_bit(p, size);
+       return (1 + (__flogr(word) ^ (BITS_PER_LONG - 1))) & mask;
 }
-#define find_next_zero_bit find_next_zero_bit
 
 /**
- * find_next_bit - find the first set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
+ * fls - find last (most-significant) bit set
+ * @word: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  */
-static inline int find_next_bit (const unsigned long * addr,
-                                unsigned long size,
-                                unsigned long offset)
+static inline int fls(int word)
 {
-        const unsigned long *p;
-       unsigned long bit, set;
-
-       if (offset >= size)
-               return size;
-       bit = offset & (BITS_PER_LONG - 1);
-       offset -= bit;
-       size -= offset;
-       p = addr + offset / BITS_PER_LONG;
-       if (bit) {
-               /*
-                * __ffs_word returns BITS_PER_LONG
-                * if no one bit is present in the word.
-                */
-               set = __ffs_word(0, *p & (~0UL << bit));
-               if (set >= size)
-                       return size + offset;
-               if (set < BITS_PER_LONG)
-                       return set + offset;
-               offset += BITS_PER_LONG;
-               size -= BITS_PER_LONG;
-               p++;
-       }
-       return offset + find_first_bit(p, size);
+       return fls64((unsigned int)word);
 }
-#define find_next_bit find_next_bit
 
-/*
- * Every architecture must define this function. It's the fastest
- * way of searching a 140-bit bitmap where the first 100 bits are
- * unlikely to be set. It's guaranteed that at least one of the 140
- * bits is cleared.
- */
-static inline int sched_find_first_bit(unsigned long *b)
-{
-       return find_first_bit(b, 140);
-}
+#else /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
 
-#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/ffs.h>
 #include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls.h>
 #include <asm-generic/bitops/fls64.h>
 
+#endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
+
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/find.h>
 #include <asm-generic/bitops/hweight.h>
 #include <asm-generic/bitops/lock.h>
-
-/*
- * ATTENTION: intel byte ordering convention for ext2 and minix !!
- * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
- * bit 32 is the LSB of (addr+4).
- * That combined with the little endian byte order of Intel gives the
- * following bit order in memory:
- *    07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
- *    23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
- */
-
-static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
-{
-       unsigned long bytes, bits;
-
-        if (!size)
-                return 0;
-       bytes = __ffz_word_loop(vaddr, size);
-       bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
-       return (bits < size) ? bits : size;
-}
-#define find_first_zero_bit_le find_first_zero_bit_le
-
-static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
-                                         unsigned long offset)
-{
-        unsigned long *addr = vaddr, *p;
-       unsigned long bit, set;
-
-        if (offset >= size)
-                return size;
-       bit = offset & (BITS_PER_LONG - 1);
-       offset -= bit;
-       size -= offset;
-       p = addr + offset / BITS_PER_LONG;
-        if (bit) {
-               /*
-                * s390 version of ffz returns BITS_PER_LONG
-                * if no zero bit is present in the word.
-                */
-               set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
-               if (set >= size)
-                       return size + offset;
-               if (set < BITS_PER_LONG)
-                       return set + offset;
-               offset += BITS_PER_LONG;
-               size -= BITS_PER_LONG;
-               p++;
-        }
-       return offset + find_first_zero_bit_le(p, size);
-}
-#define find_next_zero_bit_le find_next_zero_bit_le
-
-static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
-{
-       unsigned long bytes, bits;
-
-       if (!size)
-               return 0;
-       bytes = __ffs_word_loop(vaddr, size);
-       bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
-       return (bits < size) ? bits : size;
-}
-#define find_first_bit_le find_first_bit_le
-
-static inline int find_next_bit_le(void *vaddr, unsigned long size,
-                                    unsigned long offset)
-{
-       unsigned long *addr = vaddr, *p;
-       unsigned long bit, set;
-
-       if (offset >= size)
-               return size;
-       bit = offset & (BITS_PER_LONG - 1);
-       offset -= bit;
-       size -= offset;
-       p = addr + offset / BITS_PER_LONG;
-       if (bit) {
-               /*
-                * s390 version of ffz returns BITS_PER_LONG
-                * if no zero bit is present in the word.
-                */
-               set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
-               if (set >= size)
-                       return size + offset;
-               if (set < BITS_PER_LONG)
-                       return set + offset;
-               offset += BITS_PER_LONG;
-               size -= BITS_PER_LONG;
-               p++;
-       }
-       return offset + find_first_bit_le(p, size);
-}
-#define find_next_bit_le find_next_bit_le
-
+#include <asm-generic/bitops/sched.h>
 #include <asm-generic/bitops/le.h>
-
 #include <asm-generic/bitops/ext2-atomic-setbit.h>
 
 #endif /* _S390_BITOPS_H */
index c1e7c646727cd4c71c56a5040a477a6d8618d565..4bf9da03591e7abda64ab4632f511c16b7734d16 100644 (file)
@@ -22,6 +22,7 @@
 #define PSW32_MASK_ASC         0x0000C000UL
 #define PSW32_MASK_CC          0x00003000UL
 #define PSW32_MASK_PM          0x00000f00UL
+#define PSW32_MASK_RI          0x00000080UL
 
 #define PSW32_MASK_USER                0x0000FF00UL
 
@@ -35,7 +36,9 @@
 #define PSW32_ASC_SECONDARY    0x00008000UL
 #define PSW32_ASC_HOME         0x0000C000UL
 
-extern u32 psw32_user_bits;
+#define PSW32_USER_BITS (PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT | \
+                        PSW32_DEFAULT_KEY | PSW32_MASK_BASE | \
+                        PSW32_MASK_MCHECK | PSW32_MASK_PSTATE | PSW32_ASC_HOME)
 
 #define COMPAT_USER_HZ         100
 #define COMPAT_UTS_MACHINE     "s390\0\0\0\0"
index debfda33d1f86d88a8b3bce89e6b23962c643489..9b69c0befdcaafcb46565056e10c36e884ab8065 100644 (file)
@@ -8,69 +8,59 @@
 #define __ASM_CTL_REG_H
 
 #ifdef CONFIG_64BIT
-
-#define __ctl_load(array, low, high) ({                                \
-       typedef struct { char _[sizeof(array)]; } addrtype;     \
-       asm volatile(                                           \
-               "       lctlg   %1,%2,%0\n"                     \
-               : : "Q" (*(addrtype *)(&array)),                \
-                   "i" (low), "i" (high));                     \
-       })
-
-#define __ctl_store(array, low, high) ({                       \
-       typedef struct { char _[sizeof(array)]; } addrtype;     \
-       asm volatile(                                           \
-               "       stctg   %1,%2,%0\n"                     \
-               : "=Q" (*(addrtype *)(&array))                  \
-               : "i" (low), "i" (high));                       \
-       })
-
-#else /* CONFIG_64BIT */
-
-#define __ctl_load(array, low, high) ({                                \
-       typedef struct { char _[sizeof(array)]; } addrtype;     \
-       asm volatile(                                           \
-               "       lctl    %1,%2,%0\n"                     \
-               : : "Q" (*(addrtype *)(&array)),                \
-                   "i" (low), "i" (high));                     \
-})
-
-#define __ctl_store(array, low, high) ({                       \
-       typedef struct { char _[sizeof(array)]; } addrtype;     \
-       asm volatile(                                           \
-               "       stctl   %1,%2,%0\n"                     \
-               : "=Q" (*(addrtype *)(&array))                  \
-               : "i" (low), "i" (high));                       \
-       })
-
-#endif /* CONFIG_64BIT */
-
-#define __ctl_set_bit(cr, bit) ({      \
-       unsigned long __dummy;          \
-       __ctl_store(__dummy, cr, cr);   \
-       __dummy |= 1UL << (bit);        \
-       __ctl_load(__dummy, cr, cr);    \
-})
-
-#define __ctl_clear_bit(cr, bit) ({    \
-       unsigned long __dummy;          \
-       __ctl_store(__dummy, cr, cr);   \
-       __dummy &= ~(1UL << (bit));     \
-       __ctl_load(__dummy, cr, cr);    \
-})
+# define __CTL_LOAD    "lctlg"
+# define __CTL_STORE   "stctg"
+#else
+# define __CTL_LOAD    "lctl"
+# define __CTL_STORE   "stctl"
+#endif
+
+#define __ctl_load(array, low, high) {                                 \
+       typedef struct { char _[sizeof(array)]; } addrtype;             \
+                                                                       \
+       BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
+       asm volatile(                                                   \
+               __CTL_LOAD " %1,%2,%0\n"                                \
+               : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
+}
+
+#define __ctl_store(array, low, high) {                                        \
+       typedef struct { char _[sizeof(array)]; } addrtype;             \
+                                                                       \
+       BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
+       asm volatile(                                                   \
+               __CTL_STORE " %1,%2,%0\n"                               \
+               : "=Q" (*(addrtype *)(&array))                          \
+               : "i" (low), "i" (high));                               \
+}
+
+static inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
+{
+       unsigned long reg;
+
+       __ctl_store(reg, cr, cr);
+       reg |= 1UL << bit;
+       __ctl_load(reg, cr, cr);
+}
+
+static inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
+{
+       unsigned long reg;
+
+       __ctl_store(reg, cr, cr);
+       reg &= ~(1UL << bit);
+       __ctl_load(reg, cr, cr);
+}
+
+void smp_ctl_set_bit(int cr, int bit);
+void smp_ctl_clear_bit(int cr, int bit);
 
 #ifdef CONFIG_SMP
-
-extern void smp_ctl_set_bit(int cr, int bit);
-extern void smp_ctl_clear_bit(int cr, int bit);
-#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
-#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
-
+# define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
+# define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
 #else
-
-#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
-#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
-
-#endif /* CONFIG_SMP */
+# define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
+# define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
+#endif
 
 #endif /* __ASM_CTL_REG_H */
index 188c5052a20ab663f878afc2c23889ec53384d8e..530c15eb01e99ad5970b556234c65d34a8bdf141 100644 (file)
@@ -107,6 +107,11 @@ void debug_set_level(debug_info_t* id, int new_level);
 void debug_set_critical(void);
 void debug_stop_all(void);
 
+static inline bool debug_level_enabled(debug_info_t* id, int level)
+{
+       return level <= id->level;
+}
+
 static inline debug_entry_t*
 debug_event(debug_info_t* id, int level, void* data, int length)
 {
diff --git a/arch/s390/include/asm/dis.h b/arch/s390/include/asm/dis.h
new file mode 100644 (file)
index 0000000..04a83f5
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Disassemble s390 instructions.
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ */
+
+#ifndef __ASM_S390_DIS_H__
+#define __ASM_S390_DIS_H__
+
+/* Type of operand */
+#define OPERAND_GPR    0x1     /* Operand printed as %rx */
+#define OPERAND_FPR    0x2     /* Operand printed as %fx */
+#define OPERAND_AR     0x4     /* Operand printed as %ax */
+#define OPERAND_CR     0x8     /* Operand printed as %cx */
+#define OPERAND_DISP   0x10    /* Operand printed as displacement */
+#define OPERAND_BASE   0x20    /* Operand printed as base register */
+#define OPERAND_INDEX  0x40    /* Operand printed as index register */
+#define OPERAND_PCREL  0x80    /* Operand printed as pc-relative symbol */
+#define OPERAND_SIGNED 0x100   /* Operand printed as signed value */
+#define OPERAND_LENGTH 0x200   /* Operand printed as length (+1) */
+
+
+struct s390_operand {
+       int bits;               /* The number of bits in the operand. */
+       int shift;              /* The number of bits to shift. */
+       int flags;              /* One bit syntax flags. */
+};
+
+struct s390_insn {
+       const char name[5];
+       unsigned char opfrag;
+       unsigned char format;
+};
+
+
+static inline int insn_length(unsigned char code)
+{
+       return ((((int) code + 64) >> 7) + 1) << 1;
+}
+
+void show_code(struct pt_regs *regs);
+void print_fn_code(unsigned char *code, unsigned long len);
+int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len);
+struct s390_insn *find_insn(unsigned char *code);
+
+static inline int is_known_insn(unsigned char *code)
+{
+       return !!find_insn(code);
+}
+
+#endif /* __ASM_S390_DIS_H__ */
index ef61709950765bbdd72879c473b49d509c7adf0a..7ecb92b469b67bc45a099d9fea8bb72e375fc704 100644 (file)
@@ -12,9 +12,9 @@
 
 #define TCW_FORMAT_DEFAULT             0
 #define TCW_TIDAW_FORMAT_DEFAULT       0
-#define TCW_FLAGS_INPUT_TIDA           1 << (23 - 5)
-#define TCW_FLAGS_TCCB_TIDA            1 << (23 - 6)
-#define TCW_FLAGS_OUTPUT_TIDA          1 << (23 - 7)
+#define TCW_FLAGS_INPUT_TIDA           (1 << (23 - 5))
+#define TCW_FLAGS_TCCB_TIDA            (1 << (23 - 6))
+#define TCW_FLAGS_OUTPUT_TIDA          (1 << (23 - 7))
 #define TCW_FLAGS_TIDAW_FORMAT(x)      ((x) & 3) << (23 - 9)
 #define TCW_FLAGS_GET_TIDAW_FORMAT(x)  (((x) >> (23 - 9)) & 3)
 
@@ -54,11 +54,11 @@ struct tcw {
        u32 intrg;
 } __attribute__ ((packed, aligned(64)));
 
-#define TIDAW_FLAGS_LAST               1 << (7 - 0)
-#define TIDAW_FLAGS_SKIP               1 << (7 - 1)
-#define TIDAW_FLAGS_DATA_INT           1 << (7 - 2)
-#define TIDAW_FLAGS_TTIC               1 << (7 - 3)
-#define TIDAW_FLAGS_INSERT_CBC         1 << (7 - 4)
+#define TIDAW_FLAGS_LAST               (1 << (7 - 0))
+#define TIDAW_FLAGS_SKIP               (1 << (7 - 1))
+#define TIDAW_FLAGS_DATA_INT           (1 << (7 - 2))
+#define TIDAW_FLAGS_TTIC               (1 << (7 - 3))
+#define TIDAW_FLAGS_INSERT_CBC         (1 << (7 - 4))
 
 /**
  * struct tidaw - Transport-Indirect-Addressing Word (TIDAW)
@@ -106,9 +106,9 @@ struct tsa_ddpc {
        u8 sense[32];
 } __attribute__ ((packed));
 
-#define TSA_INTRG_FLAGS_CU_STATE_VALID         1 << (7 - 0)
-#define TSA_INTRG_FLAGS_DEV_STATE_VALID                1 << (7 - 1)
-#define TSA_INTRG_FLAGS_OP_STATE_VALID         1 << (7 - 2)
+#define TSA_INTRG_FLAGS_CU_STATE_VALID         (1 << (7 - 0))
+#define TSA_INTRG_FLAGS_DEV_STATE_VALID                (1 << (7 - 1))
+#define TSA_INTRG_FLAGS_OP_STATE_VALID         (1 << (7 - 2))
 
 /**
  * struct tsa_intrg - Interrogate Transport-Status Area (Intrg. TSA)
@@ -140,10 +140,10 @@ struct tsa_intrg {
 #define TSB_FORMAT_DDPC                2
 #define TSB_FORMAT_INTRG       3
 
-#define TSB_FLAGS_DCW_OFFSET_VALID     1 << (7 - 0)
-#define TSB_FLAGS_COUNT_VALID          1 << (7 - 1)
-#define TSB_FLAGS_CACHE_MISS           1 << (7 - 2)
-#define TSB_FLAGS_TIME_VALID           1 << (7 - 3)
+#define TSB_FLAGS_DCW_OFFSET_VALID     (1 << (7 - 0))
+#define TSB_FLAGS_COUNT_VALID          (1 << (7 - 1))
+#define TSB_FLAGS_CACHE_MISS           (1 << (7 - 2))
+#define TSB_FLAGS_TIME_VALID           (1 << (7 - 3))
 #define TSB_FLAGS_FORMAT(x)            ((x) & 7)
 #define TSB_FORMAT(t)                  ((t)->flags & 7)
 
@@ -179,9 +179,9 @@ struct tsb {
 #define DCW_INTRG_RCQ_PRIMARY          1
 #define DCW_INTRG_RCQ_SECONDARY                2
 
-#define DCW_INTRG_FLAGS_MPM            1 < (7 - 0)
-#define DCW_INTRG_FLAGS_PPR            1 < (7 - 1)
-#define DCW_INTRG_FLAGS_CRIT           1 < (7 - 2)
+#define DCW_INTRG_FLAGS_MPM            (1 << (7 - 0))
+#define DCW_INTRG_FLAGS_PPR            (1 << (7 - 1))
+#define DCW_INTRG_FLAGS_CRIT           (1 << (7 - 2))
 
 /**
  * struct dcw_intrg_data - Interrogate DCW data
@@ -216,7 +216,7 @@ struct dcw_intrg_data {
        u8  prog_data[0];
 } __attribute__ ((packed));
 
-#define DCW_FLAGS_CC           1 << (7 - 1)
+#define DCW_FLAGS_CC           (1 << (7 - 1))
 
 #define DCW_CMD_WRITE          0x01
 #define DCW_CMD_READ           0x02
index 2bd6cb897b90e86f129a6ae2b4eeca6bd495de95..2fcccc0c997cc3102dcb67e5ea2f60fc4f4200b2 100644 (file)
@@ -7,6 +7,7 @@
 #ifndef _ASM_S390_IPL_H
 #define _ASM_S390_IPL_H
 
+#include <asm/lowcore.h>
 #include <asm/types.h>
 #include <asm/cio.h>
 #include <asm/setup.h>
@@ -86,7 +87,14 @@ struct ipl_parameter_block {
  */
 extern u32 ipl_flags;
 extern u32 dump_prefix_page;
-extern unsigned int zfcpdump_prefix_array[];
+
+struct dump_save_areas {
+       struct save_area **areas;
+       int count;
+};
+
+extern struct dump_save_areas dump_save_areas;
+struct save_area *dump_save_area_create(int cpu);
 
 extern void do_reipl(void);
 extern void do_halt(void);
index 6c32190dc73e880255175049fe36965e647101eb..346b1c85ffb40d890078550dc72c2dedcb275a2e 100644 (file)
@@ -15,7 +15,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key)
 {
-       asm goto("0:    brcl 0,0\n"
+       asm_volatile_goto("0:   brcl 0,0\n"
                ".pushsection __jump_table, \"aw\"\n"
                ASM_ALIGN "\n"
                ASM_PTR " 0b, %l[label], %0\n"
index 9f973d8de90ea91fbde12f11cff70c8470bf8a81..5d1f950704dc6272ec368279b2a01f82274fe220 100644 (file)
@@ -40,14 +40,8 @@ static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
        pgd_t *pgd = mm->pgd;
 
        S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
-       if (s390_user_mode != HOME_SPACE_MODE) {
-               /* Load primary space page table origin. */
-               asm volatile(LCTL_OPCODE" 1,1,%0\n"
-                            : : "m" (S390_lowcore.user_asce) );
-       } else
-               /* Load home space page table origin. */
-               asm volatile(LCTL_OPCODE" 13,13,%0"
-                            : : "m" (S390_lowcore.user_asce) );
+       /* Load primary space page table origin. */
+       asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce));
        set_fs(current->thread.mm_segment);
 }
 
index 1e51f2915b2eea6a1396c7dfb2d63ffc2f671ee9..316c8503a3b4fda3824ca071065f145038b34a0c 100644 (file)
 #include <asm/setup.h>
 #ifndef __ASSEMBLY__
 
-void storage_key_init_range(unsigned long start, unsigned long end);
+static inline void storage_key_init_range(unsigned long start, unsigned long end)
+{
+#if PAGE_DEFAULT_KEY
+       __storage_key_init_range(start, end);
+#endif
+}
 
 static inline void clear_page(void *page)
 {
index 1ca5d1047c71742f5e3c5681c89a4b98e4e87530..ac24b26fc065be9bad9443fc7a86cd1e24ab3abe 100644 (file)
@@ -6,14 +6,9 @@
 extern debug_info_t *pci_debug_msg_id;
 extern debug_info_t *pci_debug_err_id;
 
-#ifdef CONFIG_PCI_DEBUG
 #define zpci_dbg(imp, fmt, args...)                            \
        debug_sprintf_event(pci_debug_msg_id, imp, fmt, ##args)
 
-#else /* !CONFIG_PCI_DEBUG */
-#define zpci_dbg(imp, fmt, args...) do { } while (0)
-#endif
-
 #define zpci_err(text...)                                                      \
        do {                                                                    \
                char debug_buffer[16];                                          \
index df6eac9f0cb4e324069e3bae65246a7a99f02888..649eb62c52b3784ec0214e47482b87442d3081dd 100644 (file)
 struct zpci_fib {
        u32 fmt         :  8;   /* format */
        u32             : 24;
-       u32 reserved1;
+       u32             : 32;
        u8 fc;                  /* function controls */
-       u8 reserved2;
-       u16 reserved3;
-       u32 reserved4;
+       u64             : 56;
        u64 pba;                /* PCI base address */
        u64 pal;                /* PCI address limit */
        u64 iota;               /* I/O Translation Anchor */
@@ -70,14 +68,13 @@ struct zpci_fib {
        u32 sum         :  1;   /* Adapter int summary bit enabled */
        u32             :  1;
        u32 aisbo       :  6;   /* Adapter int summary bit offset */
-       u32 reserved5;
+       u32             : 32;
        u64 aibv;               /* Adapter int bit vector address */
        u64 aisb;               /* Adapter int summary bit address */
        u64 fmb_addr;           /* Function measurement block address and key */
-       u64 reserved6;
-       u64 reserved7;
-} __packed;
-
+       u32             : 32;
+       u32 gd;
+} __packed __aligned(8);
 
 int zpci_mod_fc(u64 req, struct zpci_fib *fib);
 int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
index 86fe0ee2cee5945beacca37f563d6d118fae0a12..061ab45faf7015aa7e7ba0d8b7bbfa7b613a11c9 100644 (file)
  */
 #define __my_cpu_offset S390_lowcore.percpu_offset
 
+#ifdef CONFIG_64BIT
+
 /*
  * For 64 bit module code, the module may be more than 4G above the
  * per cpu area, use weak definitions to force the compiler to
  * generate external references.
  */
-#if defined(CONFIG_SMP) && defined(CONFIG_64BIT) && defined(MODULE)
+#if defined(CONFIG_SMP) && defined(MODULE)
 #define ARCH_NEEDS_WEAK_PER_CPU
 #endif
 
-#define arch_this_cpu_to_op(pcp, val, op)                              \
+/*
+ * We use a compare-and-swap loop since that uses less cpu cycles than
+ * disabling and enabling interrupts like the generic variant would do.
+ */
+#define arch_this_cpu_to_op_simple(pcp, val, op)                       \
 ({                                                                     \
        typedef typeof(pcp) pcp_op_T__;                                 \
        pcp_op_T__ old__, new__, prev__;                                \
        do {                                                            \
                old__ = prev__;                                         \
                new__ = old__ op (val);                                 \
-               switch (sizeof(*ptr__)) {                               \
-               case 8:                                                 \
-                       prev__ = cmpxchg64(ptr__, old__, new__);        \
-                       break;                                          \
-               default:                                                \
-                       prev__ = cmpxchg(ptr__, old__, new__);          \
-               }                                                       \
+               prev__ = cmpxchg(ptr__, old__, new__);                  \
        } while (prev__ != old__);                                      \
        preempt_enable();                                               \
        new__;                                                          \
 })
 
-#define this_cpu_add_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
+#define this_cpu_add_1(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_2(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_and_1(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_and_2(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_or_1(pcp, val)                arch_this_cpu_to_op_simple(pcp, val, |)
+#define this_cpu_or_2(pcp, val)                arch_this_cpu_to_op_simple(pcp, val, |)
+#define this_cpu_xor_1(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, ^)
+#define this_cpu_xor_2(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, ^)
+
+#ifndef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define this_cpu_add_4(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_8(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
+#define this_cpu_and_4(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_and_8(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, &)
+#define this_cpu_or_4(pcp, val)                arch_this_cpu_to_op_simple(pcp, val, |)
+#define this_cpu_or_8(pcp, val)                arch_this_cpu_to_op_simple(pcp, val, |)
+#define this_cpu_xor_4(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, ^)
+#define this_cpu_xor_8(pcp, val)       arch_this_cpu_to_op_simple(pcp, val, ^)
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define arch_this_cpu_add(pcp, val, op1, op2, szcast)                  \
+{                                                                      \
+       typedef typeof(pcp) pcp_op_T__;                                 \
+       pcp_op_T__ val__ = (val);                                       \
+       pcp_op_T__ old__, *ptr__;                                       \
+       preempt_disable();                                              \
+       ptr__ = __this_cpu_ptr(&(pcp));                                 \
+       if (__builtin_constant_p(val__) &&                              \
+           ((szcast)val__ > -129) && ((szcast)val__ < 128)) {          \
+               asm volatile(                                           \
+                       op2 "   %[ptr__],%[val__]\n"                    \
+                       : [ptr__] "+Q" (*ptr__)                         \
+                       : [val__] "i" ((szcast)val__)                   \
+                       : "cc");                                        \
+       } else {                                                        \
+               asm volatile(                                           \
+                       op1 "   %[old__],%[val__],%[ptr__]\n"           \
+                       : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__)   \
+                       : [val__] "d" (val__)                           \
+                       : "cc");                                        \
+       }                                                               \
+       preempt_enable();                                               \
+}
 
-#define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op(pcp, val, +)
-#define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op(pcp, val, +)
+#define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
+#define this_cpu_add_8(pcp, val) arch_this_cpu_add(pcp, val, "laag", "agsi", long)
 
-#define this_cpu_and_1(pcp, val) arch_this_cpu_to_op(pcp, val, &)
-#define this_cpu_and_2(pcp, val) arch_this_cpu_to_op(pcp, val, &)
-#define this_cpu_and_4(pcp, val) arch_this_cpu_to_op(pcp, val, &)
-#define this_cpu_and_8(pcp, val) arch_this_cpu_to_op(pcp, val, &)
+#define arch_this_cpu_add_return(pcp, val, op)                         \
+({                                                                     \
+       typedef typeof(pcp) pcp_op_T__;                                 \
+       pcp_op_T__ val__ = (val);                                       \
+       pcp_op_T__ old__, *ptr__;                                       \
+       preempt_disable();                                              \
+       ptr__ = __this_cpu_ptr(&(pcp));                                 \
+       asm volatile(                                                   \
+               op "    %[old__],%[val__],%[ptr__]\n"                   \
+               : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__)           \
+               : [val__] "d" (val__)                                   \
+               : "cc");                                                \
+       preempt_enable();                                               \
+       old__ + val__;                                                  \
+})
 
-#define this_cpu_or_1(pcp, val) arch_this_cpu_to_op(pcp, val, |)
-#define this_cpu_or_2(pcp, val) arch_this_cpu_to_op(pcp, val, |)
-#define this_cpu_or_4(pcp, val) arch_this_cpu_to_op(pcp, val, |)
-#define this_cpu_or_8(pcp, val) arch_this_cpu_to_op(pcp, val, |)
+#define this_cpu_add_return_4(pcp, val) arch_this_cpu_add_return(pcp, val, "laa")
+#define this_cpu_add_return_8(pcp, val) arch_this_cpu_add_return(pcp, val, "laag")
 
-#define this_cpu_xor_1(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
-#define this_cpu_xor_2(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
-#define this_cpu_xor_4(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
-#define this_cpu_xor_8(pcp, val) arch_this_cpu_to_op(pcp, val, ^)
+#define arch_this_cpu_to_op(pcp, val, op)                              \
+{                                                                      \
+       typedef typeof(pcp) pcp_op_T__;                                 \
+       pcp_op_T__ val__ = (val);                                       \
+       pcp_op_T__ old__, *ptr__;                                       \
+       preempt_disable();                                              \
+       ptr__ = __this_cpu_ptr(&(pcp));                                 \
+       asm volatile(                                                   \
+               op "    %[old__],%[val__],%[ptr__]\n"                   \
+               : [old__] "=d" (old__), [ptr__] "+Q" (*ptr__)           \
+               : [val__] "d" (val__)                                   \
+               : "cc");                                                \
+       preempt_enable();                                               \
+}
+
+#define this_cpu_and_4(pcp, val)       arch_this_cpu_to_op(pcp, val, "lan")
+#define this_cpu_and_8(pcp, val)       arch_this_cpu_to_op(pcp, val, "lang")
+#define this_cpu_or_4(pcp, val)                arch_this_cpu_to_op(pcp, val, "lao")
+#define this_cpu_or_8(pcp, val)                arch_this_cpu_to_op(pcp, val, "laog")
+#define this_cpu_xor_4(pcp, val)       arch_this_cpu_to_op(pcp, val, "lax")
+#define this_cpu_xor_8(pcp, val)       arch_this_cpu_to_op(pcp, val, "laxg")
+
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
 
 #define arch_this_cpu_cmpxchg(pcp, oval, nval)                         \
 ({                                                                     \
        pcp_op_T__ *ptr__;                                              \
        preempt_disable();                                              \
        ptr__ = __this_cpu_ptr(&(pcp));                                 \
-       switch (sizeof(*ptr__)) {                                       \
-       case 8:                                                         \
-               ret__ = cmpxchg64(ptr__, oval, nval);                   \
-               break;                                                  \
-       default:                                                        \
-               ret__ = cmpxchg(ptr__, oval, nval);                     \
-       }                                                               \
+       ret__ = cmpxchg(ptr__, oval, nval);                             \
        preempt_enable();                                               \
        ret__;                                                          \
 })
 #define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval)
 #define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval)
 #define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
-#ifdef CONFIG_64BIT
 #define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
-#endif
 
 #define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2)       \
 ({                                                                     \
 })
 
 #define this_cpu_cmpxchg_double_4 arch_this_cpu_cmpxchg_double
-#ifdef CONFIG_64BIT
 #define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
-#endif
+
+#endif /* CONFIG_64BIT */
 
 #include <asm-generic/percpu.h>
 
index 9b60a36c348d5422dc325463bcb26efeee64161d..2204400d0bd58d4a1e45c82394ff5cbd100aa2cc 100644 (file)
@@ -748,7 +748,9 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
 
 static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
 {
-       if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_WRITE)) {
+       if (!MACHINE_HAS_ESOP &&
+           (pte_val(entry) & _PAGE_PRESENT) &&
+           (pte_val(entry) & _PAGE_WRITE)) {
                /*
                 * Without enhanced suppression-on-protection force
                 * the dirty bit on for all writable ptes.
index ca7821f07260301f26c2ff9314432b193ad8bebf..a56e63483e0f73bc36b8493c1a34b5cd1d3f5579 100644 (file)
@@ -134,14 +134,14 @@ struct stack_frame {
  * Do necessary setup to start up a new thread.
  */
 #define start_thread(regs, new_psw, new_stackp) do {                   \
-       regs->psw.mask  = psw_user_bits | PSW_MASK_EA | PSW_MASK_BA;    \
+       regs->psw.mask  = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA;    \
        regs->psw.addr  = new_psw | PSW_ADDR_AMODE;                     \
        regs->gprs[15]  = new_stackp;                                   \
        execve_tail();                                                  \
 } while (0)
 
 #define start_thread31(regs, new_psw, new_stackp) do {                 \
-       regs->psw.mask  = psw_user_bits | PSW_MASK_BA;                  \
+       regs->psw.mask  = PSW_USER_BITS | PSW_MASK_BA;                  \
        regs->psw.addr  = new_psw | PSW_ADDR_AMODE;                     \
        regs->gprs[15]  = new_stackp;                                   \
        __tlb_flush_mm(current->mm);                                    \
@@ -169,17 +169,15 @@ extern void release_thread(struct task_struct *);
  */
 extern unsigned long thread_saved_pc(struct task_struct *t);
 
-extern void show_code(struct pt_regs *regs);
-extern void print_fn_code(unsigned char *code, unsigned long len);
-extern int insn_to_mnemonic(unsigned char *instruction, char *buf,
-                           unsigned int len);
-
 unsigned long get_wchan(struct task_struct *p);
 #define task_pt_regs(tsk) ((struct pt_regs *) \
         (task_stack_page(tsk) + THREAD_SIZE) - 1)
 #define KSTK_EIP(tsk)  (task_pt_regs(tsk)->psw.addr)
 #define KSTK_ESP(tsk)  (task_pt_regs(tsk)->gprs[15])
 
+/* Has task runtime instrumentation enabled ? */
+#define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
+
 static inline unsigned short stap(void)
 {
        unsigned short cpu_address;
@@ -348,9 +346,9 @@ __set_psw_mask(unsigned long mask)
 }
 
 #define local_mcck_enable() \
-       __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK)
+       __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK)
 #define local_mcck_disable() \
-       __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT)
+       __set_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT)
 
 /*
  * Basic Machine Check/Program Check Handler.
index 52b56533c57cda08f4d8283253ed682a13536f5c..9c82cebddabd78938b1d66baef004c74f3bd97ec 100644 (file)
 
 #ifndef __ASSEMBLY__
 
-extern long psw_kernel_bits;
-extern long psw_user_bits;
+#define PSW_KERNEL_BITS        (PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_HOME | \
+                        PSW_MASK_EA | PSW_MASK_BA)
+#define PSW_USER_BITS  (PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \
+                        PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK | \
+                        PSW_MASK_PSTATE | PSW_ASC_PRIMARY)
 
 /*
  * The pt_regs struct defines the way the registers are stored on
index 59880dbaf360a6f39563990506e3c6a5af648aae..df802ee14af6f76591cebdc54030488b42c3d0cc 100644 (file)
@@ -48,13 +48,6 @@ void detect_memory_layout(struct mem_chunk chunk[], unsigned long maxsize);
 void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
                     unsigned long size);
 
-#define PRIMARY_SPACE_MODE     0
-#define ACCESS_REGISTER_MODE   1
-#define SECONDARY_SPACE_MODE   2
-#define HOME_SPACE_MODE                3
-
-extern unsigned int s390_user_mode;
-
 /*
  * Machine features detected in head.S
  */
index b64f15c3b4cc739a13e76ec3c1287328315ecb74..ac9bed8e103fa741f85b3aecc64fcc9a18511c82 100644 (file)
@@ -14,7 +14,6 @@
 #define raw_smp_processor_id() (S390_lowcore.cpu_nr)
 
 extern struct mutex smp_cpu_state_mutex;
-extern struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
 
 extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
 
index 6dbd559763c9996c099f14e4ce6926e15930b7f8..29c81f82705e139dc53a9af3f72b0db3d9e14695 100644 (file)
 extern struct task_struct *__switch_to(void *, void *);
 extern void update_cr_regs(struct task_struct *task);
 
-static inline void save_fp_regs(s390_fp_regs *fpregs)
+static inline int test_fp_ctl(u32 fpc)
 {
+       u32 orig_fpc;
+       int rc;
+
+       if (!MACHINE_HAS_IEEE)
+               return 0;
+
        asm volatile(
-               "       std     0,%O0+8(%R0)\n"
-               "       std     2,%O0+24(%R0)\n"
-               "       std     4,%O0+40(%R0)\n"
-               "       std     6,%O0+56(%R0)"
-               : "=Q" (*fpregs) : "Q" (*fpregs));
+               "       efpc    %1\n"
+               "       sfpc    %2\n"
+               "0:     sfpc    %1\n"
+               "       la      %0,0\n"
+               "1:\n"
+               EX_TABLE(0b,1b)
+               : "=d" (rc), "=d" (orig_fpc)
+               : "d" (fpc), "0" (-EINVAL));
+       return rc;
+}
+
+static inline void save_fp_ctl(u32 *fpc)
+{
        if (!MACHINE_HAS_IEEE)
                return;
+
        asm volatile(
-               "       stfpc   %0\n"
-               "       std     1,%O0+16(%R0)\n"
-               "       std     3,%O0+32(%R0)\n"
-               "       std     5,%O0+48(%R0)\n"
-               "       std     7,%O0+64(%R0)\n"
-               "       std     8,%O0+72(%R0)\n"
-               "       std     9,%O0+80(%R0)\n"
-               "       std     10,%O0+88(%R0)\n"
-               "       std     11,%O0+96(%R0)\n"
-               "       std     12,%O0+104(%R0)\n"
-               "       std     13,%O0+112(%R0)\n"
-               "       std     14,%O0+120(%R0)\n"
-               "       std     15,%O0+128(%R0)\n"
-               : "=Q" (*fpregs) : "Q" (*fpregs));
+               "       stfpc   %0\n"
+               : "+Q" (*fpc));
 }
 
-static inline void restore_fp_regs(s390_fp_regs *fpregs)
+static inline int restore_fp_ctl(u32 *fpc)
 {
+       int rc;
+
+       if (!MACHINE_HAS_IEEE)
+               return 0;
+
        asm volatile(
-               "       ld      0,%O0+8(%R0)\n"
-               "       ld      2,%O0+24(%R0)\n"
-               "       ld      4,%O0+40(%R0)\n"
-               "       ld      6,%O0+56(%R0)"
-               : : "Q" (*fpregs));
+               "0:     lfpc    %1\n"
+               "       la      %0,0\n"
+               "1:\n"
+               EX_TABLE(0b,1b)
+               : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
+       return rc;
+}
+
+static inline void save_fp_regs(freg_t *fprs)
+{
+       asm volatile("std 0,%0" : "=Q" (fprs[0]));
+       asm volatile("std 2,%0" : "=Q" (fprs[2]));
+       asm volatile("std 4,%0" : "=Q" (fprs[4]));
+       asm volatile("std 6,%0" : "=Q" (fprs[6]));
        if (!MACHINE_HAS_IEEE)
                return;
-       asm volatile(
-               "       lfpc    %0\n"
-               "       ld      1,%O0+16(%R0)\n"
-               "       ld      3,%O0+32(%R0)\n"
-               "       ld      5,%O0+48(%R0)\n"
-               "       ld      7,%O0+64(%R0)\n"
-               "       ld      8,%O0+72(%R0)\n"
-               "       ld      9,%O0+80(%R0)\n"
-               "       ld      10,%O0+88(%R0)\n"
-               "       ld      11,%O0+96(%R0)\n"
-               "       ld      12,%O0+104(%R0)\n"
-               "       ld      13,%O0+112(%R0)\n"
-               "       ld      14,%O0+120(%R0)\n"
-               "       ld      15,%O0+128(%R0)\n"
-               : : "Q" (*fpregs));
+       asm volatile("std 1,%0" : "=Q" (fprs[1]));
+       asm volatile("std 3,%0" : "=Q" (fprs[3]));
+       asm volatile("std 5,%0" : "=Q" (fprs[5]));
+       asm volatile("std 7,%0" : "=Q" (fprs[7]));
+       asm volatile("std 8,%0" : "=Q" (fprs[8]));
+       asm volatile("std 9,%0" : "=Q" (fprs[9]));
+       asm volatile("std 10,%0" : "=Q" (fprs[10]));
+       asm volatile("std 11,%0" : "=Q" (fprs[11]));
+       asm volatile("std 12,%0" : "=Q" (fprs[12]));
+       asm volatile("std 13,%0" : "=Q" (fprs[13]));
+       asm volatile("std 14,%0" : "=Q" (fprs[14]));
+       asm volatile("std 15,%0" : "=Q" (fprs[15]));
+}
+
+static inline void restore_fp_regs(freg_t *fprs)
+{
+       asm volatile("ld 0,%0" : : "Q" (fprs[0]));
+       asm volatile("ld 2,%0" : : "Q" (fprs[2]));
+       asm volatile("ld 4,%0" : : "Q" (fprs[4]));
+       asm volatile("ld 6,%0" : : "Q" (fprs[6]));
+       if (!MACHINE_HAS_IEEE)
+               return;
+       asm volatile("ld 1,%0" : : "Q" (fprs[1]));
+       asm volatile("ld 3,%0" : : "Q" (fprs[3]));
+       asm volatile("ld 5,%0" : : "Q" (fprs[5]));
+       asm volatile("ld 7,%0" : : "Q" (fprs[7]));
+       asm volatile("ld 8,%0" : : "Q" (fprs[8]));
+       asm volatile("ld 9,%0" : : "Q" (fprs[9]));
+       asm volatile("ld 10,%0" : : "Q" (fprs[10]));
+       asm volatile("ld 11,%0" : : "Q" (fprs[11]));
+       asm volatile("ld 12,%0" : : "Q" (fprs[12]));
+       asm volatile("ld 13,%0" : : "Q" (fprs[13]));
+       asm volatile("ld 14,%0" : : "Q" (fprs[14]));
+       asm volatile("ld 15,%0" : : "Q" (fprs[15]));
 }
 
 static inline void save_access_regs(unsigned int *acrs)
@@ -83,12 +119,14 @@ static inline void restore_access_regs(unsigned int *acrs)
 
 #define switch_to(prev,next,last) do {                                 \
        if (prev->mm) {                                                 \
-               save_fp_regs(&prev->thread.fp_regs);                    \
+               save_fp_ctl(&prev->thread.fp_regs.fpc);                 \
+               save_fp_regs(prev->thread.fp_regs.fprs);                \
                save_access_regs(&prev->thread.acrs[0]);                \
                save_ri_cb(prev->thread.ri_cb);                         \
        }                                                               \
        if (next->mm) {                                                 \
-               restore_fp_regs(&next->thread.fp_regs);                 \
+               restore_fp_ctl(&next->thread.fp_regs.fpc);              \
+               restore_fp_regs(next->thread.fp_regs.fprs);             \
                restore_access_regs(&next->thread.acrs[0]);             \
                restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb);  \
                update_cr_regs(next);                                   \
index 8ad8af915032a32573e06548e90d89ea7563349c..819b94d2272054d318fdd12c31b8718a9e4bb0bd 100644 (file)
@@ -71,30 +71,30 @@ static inline void local_tick_enable(unsigned long long comp)
 
 typedef unsigned long long cycles_t;
 
-static inline unsigned long long get_tod_clock(void)
-{
-       unsigned long long clk;
-
-#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
-       asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc");
-#else
-       asm volatile("stck %0" : "=Q" (clk) : : "cc");
-#endif
-       return clk;
-}
-
 static inline void get_tod_clock_ext(char *clk)
 {
        asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
 }
 
-static inline unsigned long long get_tod_clock_xt(void)
+static inline unsigned long long get_tod_clock(void)
 {
        unsigned char clk[16];
        get_tod_clock_ext(clk);
        return *((unsigned long long *)&clk[1]);
 }
 
+static inline unsigned long long get_tod_clock_fast(void)
+{
+#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
+       unsigned long long clk;
+
+       asm volatile("stckf %0" : "=Q" (clk) : : "cc");
+       return clk;
+#else
+       return get_tod_clock();
+#endif
+}
+
 static inline cycles_t get_cycles(void)
 {
        return (cycles_t) get_tod_clock() >> 2;
@@ -125,7 +125,7 @@ extern u64 sched_clock_base_cc;
  */
 static inline unsigned long long get_tod_clock_monotonic(void)
 {
-       return get_tod_clock_xt() - sched_clock_base_cc;
+       return get_tod_clock() - sched_clock_base_cc;
 }
 
 /**
index 9c33ed4e666f5cd920933f6ad9fa5463d83dd1c1..79330af9a5f85442745110001defbaa2a1964bb8 100644 (file)
@@ -94,9 +94,7 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
 
 struct uaccess_ops {
        size_t (*copy_from_user)(size_t, const void __user *, void *);
-       size_t (*copy_from_user_small)(size_t, const void __user *, void *);
        size_t (*copy_to_user)(size_t, void __user *, const void *);
-       size_t (*copy_to_user_small)(size_t, void __user *, const void *);
        size_t (*copy_in_user)(size_t, void __user *, const void __user *);
        size_t (*clear_user)(size_t, void __user *);
        size_t (*strnlen_user)(size_t, const char __user *);
@@ -106,22 +104,20 @@ struct uaccess_ops {
 };
 
 extern struct uaccess_ops uaccess;
-extern struct uaccess_ops uaccess_std;
 extern struct uaccess_ops uaccess_mvcos;
-extern struct uaccess_ops uaccess_mvcos_switch;
 extern struct uaccess_ops uaccess_pt;
 
 extern int __handle_fault(unsigned long, unsigned long, int);
 
 static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
 {
-       size = uaccess.copy_to_user_small(size, ptr, x);
+       size = uaccess.copy_to_user(size, ptr, x);
        return size ? -EFAULT : size;
 }
 
 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
 {
-       size = uaccess.copy_from_user_small(size, ptr, x);
+       size = uaccess.copy_from_user(size, ptr, x);
        return size ? -EFAULT : size;
 }
 
@@ -226,10 +222,7 @@ extern int __get_user_bad(void) __attribute__((noreturn));
 static inline unsigned long __must_check
 __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-       if (__builtin_constant_p(n) && (n <= 256))
-               return uaccess.copy_to_user_small(n, to, from);
-       else
-               return uaccess.copy_to_user(n, to, from);
+       return uaccess.copy_to_user(n, to, from);
 }
 
 #define __copy_to_user_inatomic __copy_to_user
@@ -275,10 +268,7 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
 static inline unsigned long __must_check
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       if (__builtin_constant_p(n) && (n <= 256))
-               return uaccess.copy_from_user_small(n, from, to);
-       else
-               return uaccess.copy_from_user(n, from, to);
+       return uaccess.copy_from_user(n, from, to);
 }
 
 extern void copy_from_user_overflow(void)
index 7a84619e315e804af6d7c6a535c4df7981e082e3..7e0b498a2c2ba95c8ca56537e673b18c4a0065d3 100644 (file)
@@ -199,6 +199,7 @@ typedef union
 typedef struct
 {
        __u32   fpc;
+       __u32   pad;
        freg_t  fprs[NUM_FPRS];              
 } s390_fp_regs;
 
@@ -206,7 +207,6 @@ typedef struct
 #define FPC_FLAGS_MASK          0x00F80000
 #define FPC_DXC_MASK            0x0000FF00
 #define FPC_RM_MASK             0x00000003
-#define FPC_VALID_MASK          0xF8F8FF03
 
 /* this typedef defines how a Program Status Word looks like */
 typedef struct 
@@ -263,7 +263,7 @@ typedef struct
 #define PSW_MASK_EA            0x0000000100000000UL
 #define PSW_MASK_BA            0x0000000080000000UL
 
-#define PSW_MASK_USER          0x0000FF8180000000UL
+#define PSW_MASK_USER          0x0000FF0180000000UL
 
 #define PSW_ADDR_AMODE         0x0000000000000000UL
 #define PSW_ADDR_INSN          0xFFFFFFFFFFFFFFFFUL
index 584787f6ce44d88569b964bf8543a5c74475e030..b30de9c01bbedad00c5e15ff52ffa8d19b06c824 100644 (file)
@@ -49,6 +49,7 @@ typedef struct
 typedef struct
 {
        unsigned int fpc;
+       unsigned int pad;
        double   fprs[__NUM_FPRS];
 } _s390_fp_regs;
 
index 92494494692eca965ee1d315d42a800e08c47605..c286c2e868f03f9683481b82689af19f1a222e2d 100644 (file)
@@ -82,4 +82,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _ASM_SOCKET_H */
index 4bb2a46561631ab361c027fdcfc8a1dfd0504cb5..2403303cfed708d3ae73a0b0091cab67c21f6f9f 100644 (file)
@@ -28,7 +28,7 @@ CFLAGS_ptrace.o               += -DUTS_MACHINE='"$(UTS_MACHINE)"'
 
 CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
 
-obj-y  := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o
+obj-y  := traps.o time.o process.o base.o early.o setup.o vtime.o
 obj-y  += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
 obj-y  += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
 obj-y  += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
diff --git a/arch/s390/kernel/bitmap.c b/arch/s390/kernel/bitmap.c
deleted file mode 100644 (file)
index 102da5e..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- *    Bitmaps for set_bit, clear_bit, test_and_set_bit, ...
- *    See include/asm/{bitops.h|posix_types.h} for details
- *
- *    Copyright IBM Corp. 1999, 2009
- *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
- */
-
-#include <linux/bitops.h>
-#include <linux/module.h>
-
-const char _oi_bitmap[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
-EXPORT_SYMBOL(_oi_bitmap);
-
-const char _ni_bitmap[] = { 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f };
-EXPORT_SYMBOL(_ni_bitmap);
-
-const char _zb_findmap[] = {
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4,
-       0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8 };
-EXPORT_SYMBOL(_zb_findmap);
-
-const char _sb_findmap[] = {
-       8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,
-       4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0 };
-EXPORT_SYMBOL(_sb_findmap);
index dd62071624be349ca1913ed4404e826ecd6b673e..3a414c0f93edcd08d69d3a1aa2af645c098327c9 100644 (file)
@@ -146,15 +146,14 @@ static void __init cache_build_info(void)
        ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
        for (level = 0; level < CACHE_MAX_LEVEL; level++) {
                switch (ct.ci[level].scope) {
-               case CACHE_SCOPE_NOTEXISTS:
-               case CACHE_SCOPE_RESERVED:
-                       return;
                case CACHE_SCOPE_SHARED:
                        private = 0;
                        break;
                case CACHE_SCOPE_PRIVATE:
                        private = 1;
                        break;
+               default:
+                       return;
                }
                if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
                        rc  = cache_add(level, private, CACHE_TYPE_DATA);
index 1f1b8c70ab97ce9e5b9445d5dc020f8a75bfac77..e030d2bdec1b6aa2b9f29288b28c6600710ecfd1 100644 (file)
 
 #include "compat_linux.h"
 
-u32 psw32_user_bits = PSW32_MASK_DAT | PSW32_MASK_IO | PSW32_MASK_EXT |
-                     PSW32_DEFAULT_KEY | PSW32_MASK_BASE | PSW32_MASK_MCHECK |
-                     PSW32_MASK_PSTATE | PSW32_ASC_HOME;
 /* For this source file, we want overflow handling. */
 
 #undef high2lowuid
index 976518c0592aa96a29565bac78d58fa8fcfdd846..1bfda3eca37909988c26564b11398b6b75302c50 100644 (file)
@@ -27,6 +27,7 @@ typedef union
 typedef struct
 {
        unsigned int    fpc;
+       unsigned int    pad;
        freg_t32        fprs[__NUM_FPRS];              
 } _s390_fp_regs32;
 
index 1389b637dae55018e3eab8b18dd3f44e9440e078..5a3ab5c191fdaf7f370a14cc69d52eb9d3052647 100644 (file)
@@ -99,7 +99,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
                        break;
                }
        }
-       return err;
+       return err ? -EFAULT : 0;
 }
 
 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
@@ -148,62 +148,71 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
                        break;
                }
        }
-       return err;
+       return err ? -EFAULT : 0;
 }
 
 static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
 {
-       _s390_regs_common32 regs32;
-       int err, i;
+       _sigregs32 user_sregs;
+       int i;
 
-       regs32.psw.mask = psw32_user_bits |
-               ((__u32)(regs->psw.mask >> 32) & PSW32_MASK_USER);
-       regs32.psw.addr = (__u32) regs->psw.addr |
+       user_sregs.regs.psw.mask = (__u32)(regs->psw.mask >> 32);
+       user_sregs.regs.psw.mask &= PSW32_MASK_USER | PSW32_MASK_RI;
+       user_sregs.regs.psw.mask |= PSW32_USER_BITS;
+       user_sregs.regs.psw.addr = (__u32) regs->psw.addr |
                (__u32)(regs->psw.mask & PSW_MASK_BA);
        for (i = 0; i < NUM_GPRS; i++)
-               regs32.gprs[i] = (__u32) regs->gprs[i];
+               user_sregs.regs.gprs[i] = (__u32) regs->gprs[i];
        save_access_regs(current->thread.acrs);
-       memcpy(regs32.acrs, current->thread.acrs, sizeof(regs32.acrs));
-       err = __copy_to_user(&sregs->regs, &regs32, sizeof(regs32));
-       if (err)
-               return err;
-       save_fp_regs(&current->thread.fp_regs);
-       /* s390_fp_regs and _s390_fp_regs32 are the same ! */
-       return __copy_to_user(&sregs->fpregs, &current->thread.fp_regs,
-                             sizeof(_s390_fp_regs32));
+       memcpy(&user_sregs.regs.acrs, current->thread.acrs,
+              sizeof(user_sregs.regs.acrs));
+       save_fp_ctl(&current->thread.fp_regs.fpc);
+       save_fp_regs(current->thread.fp_regs.fprs);
+       memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
+              sizeof(user_sregs.fpregs));
+       if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32)))
+               return -EFAULT;
+       return 0;
 }
 
 static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
 {
-       _s390_regs_common32 regs32;
-       int err, i;
+       _sigregs32 user_sregs;
+       int i;
 
        /* Alwys make any pending restarted system call return -EINTR */
        current_thread_info()->restart_block.fn = do_no_restart_syscall;
 
-       err = __copy_from_user(&regs32, &sregs->regs, sizeof(regs32));
-       if (err)
-               return err;
+       if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs)))
+               return -EFAULT;
+
+       if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW32_MASK_RI))
+               return -EINVAL;
+
+       /* Loading the floating-point-control word can fail. Do that first. */
+       if (restore_fp_ctl(&user_sregs.fpregs.fpc))
+               return -EINVAL;
+
+       /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
        regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
-               (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 |
-               (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE);
+               (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 |
+               (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 |
+               (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE);
        /* Check for invalid user address space control. */
-       if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
-               regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
+       if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
+               regs->psw.mask = PSW_ASC_PRIMARY |
                        (regs->psw.mask & ~PSW_MASK_ASC);
-       regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN);
+       regs->psw.addr = (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_INSN);
        for (i = 0; i < NUM_GPRS; i++)
-               regs->gprs[i] = (__u64) regs32.gprs[i];
-       memcpy(current->thread.acrs, regs32.acrs, sizeof(current->thread.acrs));
+               regs->gprs[i] = (__u64) user_sregs.regs.gprs[i];
+       memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
+              sizeof(current->thread.acrs));
        restore_access_regs(current->thread.acrs);
 
-       err = __copy_from_user(&current->thread.fp_regs, &sregs->fpregs,
-                              sizeof(_s390_fp_regs32));
-       current->thread.fp_regs.fpc &= FPC_VALID_MASK;
-       if (err)
-               return err;
+       memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
+              sizeof(current->thread.fp_regs));
 
-       restore_fp_regs(&current->thread.fp_regs);
+       restore_fp_regs(current->thread.fp_regs.fprs);
        clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */
        return 0;
 }
@@ -215,18 +224,18 @@ static int save_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs)
 
        for (i = 0; i < NUM_GPRS; i++)
                gprs_high[i] = regs->gprs[i] >> 32;
-
-       return __copy_to_user(uregs, &gprs_high, sizeof(gprs_high));
+       if (__copy_to_user(uregs, &gprs_high, sizeof(gprs_high)))
+               return -EFAULT;
+       return 0;
 }
 
 static int restore_sigregs_gprs_high(struct pt_regs *regs, __u32 __user *uregs)
 {
        __u32 gprs_high[NUM_GPRS];
-       int err, i;
+       int i;
 
-       err = __copy_from_user(&gprs_high, uregs, sizeof(gprs_high));
-       if (err)
-               return err;
+       if (__copy_from_user(&gprs_high, uregs, sizeof(gprs_high)))
+               return -EFAULT;
        for (i = 0; i < NUM_GPRS; i++)
                *(__u32 *)&regs->gprs[i] = gprs_high[i];
        return 0;
@@ -348,7 +357,7 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
        regs->gprs[15] = (__force __u64) frame;
        /* Force 31 bit amode and default user address space control. */
        regs->psw.mask = PSW_MASK_BA |
-               (psw_user_bits & PSW_MASK_ASC) |
+               (PSW_USER_BITS & PSW_MASK_ASC) |
                (regs->psw.mask & ~PSW_MASK_ASC);
        regs->psw.addr = (__force __u64) ka->sa.sa_handler;
 
@@ -415,7 +424,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
        regs->gprs[15] = (__force __u64) frame;
        /* Force 31 bit amode and default user address space control. */
        regs->psw.mask = PSW_MASK_BA |
-               (psw_user_bits & PSW_MASK_ASC) |
+               (PSW_USER_BITS & PSW_MASK_ASC) |
                (regs->psw.mask & ~PSW_MASK_ASC);
        regs->psw.addr = (__u64 __force) ka->sa.sa_handler;
 
index c84f33d51f7b45d92d66b870f9b35c23d5054568..f45b2ab0cb81ae425ce2d72c2de9b6fa4fa241a3 100644 (file)
 #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
 #define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
 
+struct dump_save_areas dump_save_areas;
+
+/*
+ * Allocate and add a save area for a CPU
+ */
+struct save_area *dump_save_area_create(int cpu)
+{
+       struct save_area **save_areas, *save_area;
+
+       save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
+       if (!save_area)
+               return NULL;
+       if (cpu + 1 > dump_save_areas.count) {
+               dump_save_areas.count = cpu + 1;
+               save_areas = krealloc(dump_save_areas.areas,
+                                     dump_save_areas.count * sizeof(void *),
+                                     GFP_KERNEL | __GFP_ZERO);
+               if (!save_areas) {
+                       kfree(save_area);
+                       return NULL;
+               }
+               dump_save_areas.areas = save_areas;
+       }
+       dump_save_areas.areas[cpu] = save_area;
+       return save_area;
+}
 
 /*
  * Return physical address for virtual address
@@ -40,28 +66,25 @@ static inline void *load_real_addr(void *addr)
 }
 
 /*
- * Copy up to one page to vmalloc or real memory
+ * Copy real to virtual or real memory
  */
-static ssize_t copy_page_real(void *buf, void *src, size_t csize)
+static int copy_from_realmem(void *dest, void *src, size_t count)
 {
-       size_t size;
+       unsigned long size;
 
-       if (is_vmalloc_addr(buf)) {
-               BUG_ON(csize >= PAGE_SIZE);
-               /* If buf is not page aligned, copy first part */
-               size = min(roundup(__pa(buf), PAGE_SIZE) - __pa(buf), csize);
-               if (size) {
-                       if (memcpy_real(load_real_addr(buf), src, size))
-                               return -EFAULT;
-                       buf += size;
-                       src += size;
-               }
-               /* Copy second part */
-               size = csize - size;
-               return (size) ? memcpy_real(load_real_addr(buf), src, size) : 0;
-       } else {
-               return memcpy_real(buf, src, csize);
-       }
+       if (!count)
+               return 0;
+       if (!is_vmalloc_or_module_addr(dest))
+               return memcpy_real(dest, src, count);
+       do {
+               size = min(count, PAGE_SIZE - (__pa(dest) & ~PAGE_MASK));
+               if (memcpy_real(load_real_addr(dest), src, size))
+                       return -EFAULT;
+               count -= size;
+               dest += size;
+               src += size;
+       } while (count);
+       return 0;
 }
 
 /*
@@ -114,7 +137,7 @@ static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
                rc = copy_to_user_real((void __force __user *) buf,
                                       (void *) src, csize);
        else
-               rc = copy_page_real(buf, (void *) src, csize);
+               rc = copy_from_realmem(buf, (void *) src, csize);
        return (rc == 0) ? rc : csize;
 }
 
@@ -210,7 +233,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
        if (OLDMEM_BASE) {
                if ((unsigned long) src < OLDMEM_SIZE) {
                        copied = min(count, OLDMEM_SIZE - (unsigned long) src);
-                       rc = memcpy_real(dest, src + OLDMEM_BASE, copied);
+                       rc = copy_from_realmem(dest, src + OLDMEM_BASE, copied);
                        if (rc)
                                return rc;
                }
@@ -223,7 +246,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
                                return rc;
                }
        }
-       return memcpy_real(dest + copied, src + copied, count - copied);
+       return copy_from_realmem(dest + copied, src + copied, count - copied);
 }
 
 /*
@@ -453,8 +476,8 @@ static int get_cpu_cnt(void)
 {
        int i, cpus = 0;
 
-       for (i = 0; zfcpdump_save_areas[i]; i++) {
-               if (zfcpdump_save_areas[i]->pref_reg == 0)
+       for (i = 0; i < dump_save_areas.count; i++) {
+               if (dump_save_areas.areas[i]->pref_reg == 0)
                        continue;
                cpus++;
        }
@@ -525,8 +548,8 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
 
        ptr = nt_prpsinfo(ptr);
 
-       for (i = 0; zfcpdump_save_areas[i]; i++) {
-               sa = zfcpdump_save_areas[i];
+       for (i = 0; i < dump_save_areas.count; i++) {
+               sa = dump_save_areas.areas[i];
                if (sa->pref_reg == 0)
                        continue;
                ptr = fill_cpu_elf_notes(ptr, sa);
index f1279dc2e1bcecae2a12ea246096f1458b2de77f..17d62fe5d7b70554c36b8f85b34da23d86c90407 100644 (file)
@@ -867,7 +867,7 @@ static inline void
 debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
                        int exception)
 {
-       active->id.stck = get_tod_clock();
+       active->id.stck = get_tod_clock_fast();
        active->id.fields.cpuid = smp_processor_id();
        active->caller = __builtin_return_address(0);
        active->id.fields.exception = exception;
index be87d3e05a5be69265a6100f87afe2fa60d51137..993efe6a887c2c31d4bcd90b02bccbb3c0234ae9 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/kdebug.h>
 
 #include <asm/uaccess.h>
+#include <asm/dis.h>
 #include <asm/io.h>
 #include <linux/atomic.h>
 #include <asm/mathemu.h>
 #define ONELONG "%016lx: "
 #endif /* CONFIG_64BIT */
 
-#define OPERAND_GPR    0x1     /* Operand printed as %rx */
-#define OPERAND_FPR    0x2     /* Operand printed as %fx */
-#define OPERAND_AR     0x4     /* Operand printed as %ax */
-#define OPERAND_CR     0x8     /* Operand printed as %cx */
-#define OPERAND_DISP   0x10    /* Operand printed as displacement */
-#define OPERAND_BASE   0x20    /* Operand printed as base register */
-#define OPERAND_INDEX  0x40    /* Operand printed as index register */
-#define OPERAND_PCREL  0x80    /* Operand printed as pc-relative symbol */
-#define OPERAND_SIGNED 0x100   /* Operand printed as signed value */
-#define OPERAND_LENGTH 0x200   /* Operand printed as length (+1) */
-
 enum {
        UNUSED, /* Indicates the end of the operand list */
        R_8,    /* GPR starting at position 8 */
@@ -155,19 +145,7 @@ enum {
        INSTR_S_00, INSTR_S_RD,
 };
 
-struct operand {
-       int bits;               /* The number of bits in the operand. */
-       int shift;              /* The number of bits to shift. */
-       int flags;              /* One bit syntax flags. */
-};
-
-struct insn {
-       const char name[5];
-       unsigned char opfrag;
-       unsigned char format;
-};
-
-static const struct operand operands[] =
+static const struct s390_operand operands[] =
 {
        [UNUSED]  = { 0, 0, 0 },
        [R_8]    = {  4,  8, OPERAND_GPR },
@@ -479,7 +457,7 @@ static char *long_insn_name[] = {
        [LONG_INSN_PCISTB] = "pcistb",
 };
 
-static struct insn opcode[] = {
+static struct s390_insn opcode[] = {
 #ifdef CONFIG_64BIT
        { "bprp", 0xc5, INSTR_MII_UPI },
        { "bpp", 0xc7, INSTR_SMI_U0RDP },
@@ -668,7 +646,7 @@ static struct insn opcode[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_01[] = {
+static struct s390_insn opcode_01[] = {
 #ifdef CONFIG_64BIT
        { "ptff", 0x04, INSTR_E },
        { "pfpo", 0x0a, INSTR_E },
@@ -684,7 +662,7 @@ static struct insn opcode_01[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_a5[] = {
+static struct s390_insn opcode_a5[] = {
 #ifdef CONFIG_64BIT
        { "iihh", 0x00, INSTR_RI_RU },
        { "iihl", 0x01, INSTR_RI_RU },
@@ -706,7 +684,7 @@ static struct insn opcode_a5[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_a7[] = {
+static struct s390_insn opcode_a7[] = {
 #ifdef CONFIG_64BIT
        { "tmhh", 0x02, INSTR_RI_RU },
        { "tmhl", 0x03, INSTR_RI_RU },
@@ -728,7 +706,7 @@ static struct insn opcode_a7[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_aa[] = {
+static struct s390_insn opcode_aa[] = {
 #ifdef CONFIG_64BIT
        { { 0, LONG_INSN_RINEXT }, 0x00, INSTR_RI_RI },
        { "rion", 0x01, INSTR_RI_RI },
@@ -739,7 +717,7 @@ static struct insn opcode_aa[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_b2[] = {
+static struct s390_insn opcode_b2[] = {
 #ifdef CONFIG_64BIT
        { "stckf", 0x7c, INSTR_S_RD },
        { "lpp", 0x80, INSTR_S_RD },
@@ -851,7 +829,7 @@ static struct insn opcode_b2[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_b3[] = {
+static struct s390_insn opcode_b3[] = {
 #ifdef CONFIG_64BIT
        { "maylr", 0x38, INSTR_RRF_F0FF },
        { "mylr", 0x39, INSTR_RRF_F0FF },
@@ -1034,7 +1012,7 @@ static struct insn opcode_b3[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_b9[] = {
+static struct s390_insn opcode_b9[] = {
 #ifdef CONFIG_64BIT
        { "lpgr", 0x00, INSTR_RRE_RR },
        { "lngr", 0x01, INSTR_RRE_RR },
@@ -1167,7 +1145,7 @@ static struct insn opcode_b9[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_c0[] = {
+static struct s390_insn opcode_c0[] = {
 #ifdef CONFIG_64BIT
        { "lgfi", 0x01, INSTR_RIL_RI },
        { "xihf", 0x06, INSTR_RIL_RU },
@@ -1187,7 +1165,7 @@ static struct insn opcode_c0[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_c2[] = {
+static struct s390_insn opcode_c2[] = {
 #ifdef CONFIG_64BIT
        { "msgfi", 0x00, INSTR_RIL_RI },
        { "msfi", 0x01, INSTR_RIL_RI },
@@ -1205,7 +1183,7 @@ static struct insn opcode_c2[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_c4[] = {
+static struct s390_insn opcode_c4[] = {
 #ifdef CONFIG_64BIT
        { "llhrl", 0x02, INSTR_RIL_RP },
        { "lghrl", 0x04, INSTR_RIL_RP },
@@ -1222,7 +1200,7 @@ static struct insn opcode_c4[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_c6[] = {
+static struct s390_insn opcode_c6[] = {
 #ifdef CONFIG_64BIT
        { "exrl", 0x00, INSTR_RIL_RP },
        { "pfdrl", 0x02, INSTR_RIL_UP },
@@ -1240,7 +1218,7 @@ static struct insn opcode_c6[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_c8[] = {
+static struct s390_insn opcode_c8[] = {
 #ifdef CONFIG_64BIT
        { "mvcos", 0x00, INSTR_SSF_RRDRD },
        { "ectg", 0x01, INSTR_SSF_RRDRD },
@@ -1251,7 +1229,7 @@ static struct insn opcode_c8[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_cc[] = {
+static struct s390_insn opcode_cc[] = {
 #ifdef CONFIG_64BIT
        { "brcth", 0x06, INSTR_RIL_RP },
        { "aih", 0x08, INSTR_RIL_RI },
@@ -1263,7 +1241,7 @@ static struct insn opcode_cc[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_e3[] = {
+static struct s390_insn opcode_e3[] = {
 #ifdef CONFIG_64BIT
        { "ltg", 0x02, INSTR_RXY_RRRD },
        { "lrag", 0x03, INSTR_RXY_RRRD },
@@ -1369,7 +1347,7 @@ static struct insn opcode_e3[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_e5[] = {
+static struct s390_insn opcode_e5[] = {
 #ifdef CONFIG_64BIT
        { "strag", 0x02, INSTR_SSE_RDRD },
        { "mvhhi", 0x44, INSTR_SIL_RDI },
@@ -1391,7 +1369,7 @@ static struct insn opcode_e5[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_eb[] = {
+static struct s390_insn opcode_eb[] = {
 #ifdef CONFIG_64BIT
        { "lmg", 0x04, INSTR_RSY_RRRD },
        { "srag", 0x0a, INSTR_RSY_RRRD },
@@ -1465,7 +1443,7 @@ static struct insn opcode_eb[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_ec[] = {
+static struct s390_insn opcode_ec[] = {
 #ifdef CONFIG_64BIT
        { "brxhg", 0x44, INSTR_RIE_RRP },
        { "brxlg", 0x45, INSTR_RIE_RRP },
@@ -1504,7 +1482,7 @@ static struct insn opcode_ec[] = {
        { "", 0, INSTR_INVALID }
 };
 
-static struct insn opcode_ed[] = {
+static struct s390_insn opcode_ed[] = {
 #ifdef CONFIG_64BIT
        { "mayl", 0x38, INSTR_RXF_FRRDF },
        { "myl", 0x39, INSTR_RXF_FRRDF },
@@ -1572,7 +1550,7 @@ static struct insn opcode_ed[] = {
 
 /* Extracts an operand value from an instruction.  */
 static unsigned int extract_operand(unsigned char *code,
-                                   const struct operand *operand)
+                                   const struct s390_operand *operand)
 {
        unsigned int val;
        int bits;
@@ -1608,16 +1586,11 @@ static unsigned int extract_operand(unsigned char *code,
        return val;
 }
 
-static inline int insn_length(unsigned char code)
-{
-       return ((((int) code + 64) >> 7) + 1) << 1;
-}
-
-static struct insn *find_insn(unsigned char *code)
+struct s390_insn *find_insn(unsigned char *code)
 {
        unsigned char opfrag = code[1];
        unsigned char opmask;
-       struct insn *table;
+       struct s390_insn *table;
 
        switch (code[0]) {
        case 0x01:
@@ -1706,7 +1679,7 @@ static struct insn *find_insn(unsigned char *code)
  */
 int insn_to_mnemonic(unsigned char *instruction, char *buf, unsigned int len)
 {
-       struct insn *insn;
+       struct s390_insn *insn;
 
        insn = find_insn(instruction);
        if (!insn)
@@ -1722,9 +1695,9 @@ EXPORT_SYMBOL_GPL(insn_to_mnemonic);
 
 static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
 {
-       struct insn *insn;
+       struct s390_insn *insn;
        const unsigned char *ops;
-       const struct operand *operand;
+       const struct s390_operand *operand;
        unsigned int value;
        char separator;
        char *ptr;
index 99e7f6035895e0cceccbc0ae6bf123a871a5d6a3..e6af9406987c9982689e5f500612f14d49c1b0b0 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/sched.h>
 #include <asm/processor.h>
 #include <asm/debug.h>
+#include <asm/dis.h>
 #include <asm/ipl.h>
 
 #ifndef CONFIG_64BIT
index dc8770d7173c83aec5b3475d748eee99d99e60b7..96543ac400a7820ede40f22bf9636753920456b7 100644 (file)
@@ -206,6 +206,7 @@ static noinline __init void clear_bss_section(void)
  */
 static noinline __init void init_kernel_storage_key(void)
 {
+#if PAGE_DEFAULT_KEY
        unsigned long end_pfn, init_pfn;
 
        end_pfn = PFN_UP(__pa(&_end));
@@ -213,6 +214,7 @@ static noinline __init void init_kernel_storage_key(void)
        for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
                page_set_storage_key(init_pfn << PAGE_SHIFT,
                                     PAGE_DEFAULT_KEY, 0);
+#endif
 }
 
 static __initdata char sysinfo_page[PAGE_SIZE] __aligned(PAGE_SIZE);
index cc30d1fb000c25c8f74a8045b105762ccf32c9b2..0dc2b6d0a1ec8557f7450d5fbd255d2758bf8512 100644 (file)
@@ -266,6 +266,7 @@ sysc_sigpending:
        tm      __TI_flags+3(%r12),_TIF_SYSCALL
        jno     sysc_return
        lm      %r2,%r7,__PT_R2(%r11)   # load svc arguments
+       l       %r10,__TI_sysc_table(%r12)      # 31 bit system call table
        xr      %r8,%r8                 # svc 0 returns -ENOSYS
        clc     __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
        jnl     sysc_nr_ok              # invalid svc number -> do svc 0
index 2b2188b97c6aff464e467b7250823257924e31ab..e5b43c97a8340a807671ace329a4fa87a573709a 100644 (file)
@@ -297,6 +297,7 @@ sysc_sigpending:
        tm      __TI_flags+7(%r12),_TIF_SYSCALL
        jno     sysc_return
        lmg     %r2,%r7,__PT_R2(%r11)   # load svc arguments
+       lg      %r10,__TI_sysc_table(%r12)      # address of system call table
        lghi    %r8,0                   # svc 0 returns -ENOSYS
        llgh    %r1,__PT_INT_CODE+2(%r11)       # load new svc number
        cghi    %r1,NR_syscalls
index 1014ad5f7693eda79c3caa4ad8b7e8b5edb5f3fc..224db03e95182adc3976aecb3b20f0de5d2b0b13 100644 (file)
@@ -151,14 +151,13 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                goto out;
        ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
-       if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
-               goto out;
        trace.func = ip;
+       trace.depth = current->curr_ret_stack + 1;
        /* Only trace if the calling function expects to. */
-       if (!ftrace_graph_entry(&trace)) {
-               current->curr_ret_stack--;
+       if (!ftrace_graph_entry(&trace))
+               goto out;
+       if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
                goto out;
-       }
        parent = (unsigned long) return_to_handler;
 out:
        return parent;
index fd8db63dfc942a211d9dfe95b8c2bff88b90ed8d..429afcc480cb2e86c7e684e81c6f1f1160ee3b05 100644 (file)
@@ -437,7 +437,7 @@ ENTRY(startup_kdump)
 
 #if defined(CONFIG_64BIT)
 #if defined(CONFIG_MARCH_ZEC12)
-       .long 3, 0xc100efe3, 0xf46ce000, 0x00400000
+       .long 3, 0xc100efe3, 0xf46ce800, 0x00400000
 #elif defined(CONFIG_MARCH_Z196)
        .long 2, 0xc100efe3, 0xf46c0000
 #elif defined(CONFIG_MARCH_Z10)
index feb719d3c85160b586dd6af4d936f363936c4419..633ca7504536c10a517667b5f582653f763390e3 100644 (file)
@@ -2051,12 +2051,12 @@ void s390_reset_system(void (*func)(void *), void *data)
        __ctl_clear_bit(0,28);
 
        /* Set new machine check handler */
-       S390_lowcore.mcck_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT;
+       S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
        S390_lowcore.mcck_new_psw.addr =
                PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler;
 
        /* Set new program check handler */
-       S390_lowcore.program_new_psw.mask = psw_kernel_bits | PSW_MASK_DAT;
+       S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
        S390_lowcore.program_new_psw.addr =
                PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
 
index 0ce9fb245034d67bf8f44dd7c9234ef954679a51..59a9c35c4598ae3265c60f69f8f87ca0a1538c2d 100644 (file)
 #include <linux/stop_machine.h>
 #include <linux/kdebug.h>
 #include <linux/uaccess.h>
-#include <asm/cacheflush.h>
-#include <asm/sections.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/hardirq.h>
+#include <asm/cacheflush.h>
+#include <asm/sections.h>
+#include <asm/dis.h>
 
 DEFINE_PER_CPU(struct kprobe *, current_kprobe);
 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
@@ -59,6 +60,8 @@ struct kprobe_insn_cache kprobe_dmainsn_slots = {
 
 static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
 {
+       if (!is_known_insn((unsigned char *)insn))
+               return -EINVAL;
        switch (insn[0] >> 8) {
        case 0x0c:      /* bassm */
        case 0x0b:      /* bsm   */
@@ -67,6 +70,11 @@ static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
        case 0xac:      /* stnsm */
        case 0xad:      /* stosm */
                return -EINVAL;
+       case 0xc6:
+               switch (insn[0] & 0x0f) {
+               case 0x00: /* exrl   */
+                       return -EINVAL;
+               }
        }
        switch (insn[0]) {
        case 0x0101:    /* pr    */
@@ -180,7 +188,6 @@ static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn)
                break;
        case 0xc6:
                switch (insn[0] & 0x0f) {
-               case 0x00: /* exrl   */
                case 0x02: /* pfdrl  */
                case 0x04: /* cghrl  */
                case 0x05: /* chrl   */
@@ -204,7 +211,7 @@ static void __kprobes copy_instruction(struct kprobe *p)
        s64 disp, new_disp;
        u64 addr, new_addr;
 
-       memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2);
+       memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8));
        if (!is_insn_relative_long(p->ainsn.insn))
                return;
        /*
@@ -248,7 +255,7 @@ static int __kprobes s390_get_insn_slot(struct kprobe *p)
        p->ainsn.insn = NULL;
        if (is_kernel_addr(p->addr))
                p->ainsn.insn = get_dmainsn_slot();
-       if (is_module_addr(p->addr))
+       else if (is_module_addr(p->addr))
                p->ainsn.insn = get_insn_slot();
        return p->ainsn.insn ? 0 : -ENOMEM;
 }
@@ -604,7 +611,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
                ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
 
        if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
-               int ilen = ((p->ainsn.insn[0] >> 14) + 3) & -2;
+               int ilen = insn_length(p->ainsn.insn[0] >> 8);
                if (ip - (unsigned long) p->ainsn.insn == ilen)
                        ip = (unsigned long) p->addr + ilen;
        }
index c5dbb335716d5e2cdc864fc6b189a46af74ba5d3..7ed0d4e2a435452733173767b56af52163334980 100644 (file)
@@ -139,7 +139,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
        if (unlikely(p->flags & PF_KTHREAD)) {
                /* kernel thread */
                memset(&frame->childregs, 0, sizeof(struct pt_regs));
-               frame->childregs.psw.mask = psw_kernel_bits | PSW_MASK_DAT |
+               frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
                                PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
                frame->childregs.psw.addr = PSW_ADDR_AMODE |
                                (unsigned long) kernel_thread_starter;
@@ -165,7 +165,8 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
         * save fprs to current->thread.fp_regs to merge them with
         * the emulated registers and then copy the result to the child.
         */
-       save_fp_regs(&current->thread.fp_regs);
+       save_fp_ctl(&current->thread.fp_regs.fpc);
+       save_fp_regs(current->thread.fp_regs.fprs);
        memcpy(&p->thread.fp_regs, &current->thread.fp_regs,
               sizeof(s390_fp_regs));
        /* Set a new TLS ?  */
@@ -173,7 +174,9 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
                p->thread.acrs[0] = frame->childregs.gprs[6];
 #else /* CONFIG_64BIT */
        /* Save the fpu registers to new thread structure. */
-       save_fp_regs(&p->thread.fp_regs);
+       save_fp_ctl(&p->thread.fp_regs.fpc);
+       save_fp_regs(p->thread.fp_regs.fprs);
+       p->thread.fp_regs.pad = 0;
        /* Set a new TLS ?  */
        if (clone_flags & CLONE_SETTLS) {
                unsigned long tls = frame->childregs.gprs[6];
@@ -205,10 +208,12 @@ int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
         * save fprs to current->thread.fp_regs to merge them with
         * the emulated registers and then copy the result to the dump.
         */
-       save_fp_regs(&current->thread.fp_regs);
+       save_fp_ctl(&current->thread.fp_regs.fpc);
+       save_fp_regs(current->thread.fp_regs.fprs);
        memcpy(fpregs, &current->thread.fp_regs, sizeof(s390_fp_regs));
 #else /* CONFIG_64BIT */
-       save_fp_regs(fpregs);
+       save_fp_ctl(&fpregs->fpc);
+       save_fp_regs(fpregs->fprs);
 #endif /* CONFIG_64BIT */
        return 1;
 }
index 9556905bd3ce42c046052a54aa32ed5b07d9d559..e65c91c591e8b99e4189f31b403a35ad06837f4f 100644 (file)
@@ -198,9 +198,11 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
                 * psw and gprs are stored on the stack
                 */
                tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
-               if (addr == (addr_t) &dummy->regs.psw.mask)
+               if (addr == (addr_t) &dummy->regs.psw.mask) {
                        /* Return a clean psw mask. */
-                       tmp = psw_user_bits | (tmp & PSW_MASK_USER);
+                       tmp &= PSW_MASK_USER | PSW_MASK_RI;
+                       tmp |= PSW_USER_BITS;
+               }
 
        } else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
                /*
@@ -239,8 +241,7 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
                offset = addr - (addr_t) &dummy->regs.fp_regs;
                tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
                if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
-                       tmp &= (unsigned long) FPC_VALID_MASK
-                               << (BITS_PER_LONG - 32);
+                       tmp <<= BITS_PER_LONG - 32;
 
        } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
                /*
@@ -321,11 +322,15 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
                /*
                 * psw and gprs are stored on the stack
                 */
-               if (addr == (addr_t) &dummy->regs.psw.mask &&
-                   ((data & ~PSW_MASK_USER) != psw_user_bits ||
-                    ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
-                       /* Invalid psw mask. */
-                       return -EINVAL;
+               if (addr == (addr_t) &dummy->regs.psw.mask) {
+                       unsigned long mask = PSW_MASK_USER;
+
+                       mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
+                       if ((data & ~mask) != PSW_USER_BITS)
+                               return -EINVAL;
+                       if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
+                               return -EINVAL;
+               }
                *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
 
        } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
@@ -363,10 +368,10 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
                /*
                 * floating point regs. are stored in the thread structure
                 */
-               if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
-                   (data & ~((unsigned long) FPC_VALID_MASK
-                             << (BITS_PER_LONG - 32))) != 0)
-                       return -EINVAL;
+               if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
+                       if ((unsigned int) data != 0 ||
+                           test_fp_ctl(data >> (BITS_PER_LONG - 32)))
+                               return -EINVAL;
                offset = addr - (addr_t) &dummy->regs.fp_regs;
                *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
 
@@ -557,7 +562,8 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
                if (addr == (addr_t) &dummy32->regs.psw.mask) {
                        /* Fake a 31 bit psw mask. */
                        tmp = (__u32)(regs->psw.mask >> 32);
-                       tmp = psw32_user_bits | (tmp & PSW32_MASK_USER);
+                       tmp &= PSW32_MASK_USER | PSW32_MASK_RI;
+                       tmp |= PSW32_USER_BITS;
                } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
                        /* Fake a 31 bit psw address. */
                        tmp = (__u32) regs->psw.addr |
@@ -654,13 +660,16 @@ static int __poke_user_compat(struct task_struct *child,
                 * psw, gprs, acrs and orig_gpr2 are stored on the stack
                 */
                if (addr == (addr_t) &dummy32->regs.psw.mask) {
+                       __u32 mask = PSW32_MASK_USER;
+
+                       mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
                        /* Build a 64 bit psw mask from 31 bit mask. */
-                       if ((tmp & ~PSW32_MASK_USER) != psw32_user_bits)
+                       if ((tmp & ~mask) != PSW32_USER_BITS)
                                /* Invalid psw mask. */
                                return -EINVAL;
                        regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
                                (regs->psw.mask & PSW_MASK_BA) |
-                               (__u64)(tmp & PSW32_MASK_USER) << 32;
+                               (__u64)(tmp & mask) << 32;
                } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
                        /* Build a 64 bit psw address from 31 bit address. */
                        regs->psw.addr = (__u64) tmp & PSW32_ADDR_INSN;
@@ -696,8 +705,7 @@ static int __poke_user_compat(struct task_struct *child,
                 * floating point regs. are stored in the thread structure 
                 */
                if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
-                   (tmp & ~FPC_VALID_MASK) != 0)
-                       /* Invalid floating point control. */
+                   test_fp_ctl(tmp))
                        return -EINVAL;
                offset = addr - (addr_t) &dummy32->regs.fp_regs;
                *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
@@ -895,8 +903,10 @@ static int s390_fpregs_get(struct task_struct *target,
                           const struct user_regset *regset, unsigned int pos,
                           unsigned int count, void *kbuf, void __user *ubuf)
 {
-       if (target == current)
-               save_fp_regs(&target->thread.fp_regs);
+       if (target == current) {
+               save_fp_ctl(&target->thread.fp_regs.fpc);
+               save_fp_regs(target->thread.fp_regs.fprs);
+       }
 
        return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
                                   &target->thread.fp_regs, 0, -1);
@@ -909,19 +919,21 @@ static int s390_fpregs_set(struct task_struct *target,
 {
        int rc = 0;
 
-       if (target == current)
-               save_fp_regs(&target->thread.fp_regs);
+       if (target == current) {
+               save_fp_ctl(&target->thread.fp_regs.fpc);
+               save_fp_regs(target->thread.fp_regs.fprs);
+       }
 
        /* If setting FPC, must validate it first. */
        if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
-               u32 fpc[2] = { target->thread.fp_regs.fpc, 0 };
-               rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc,
+               u32 ufpc[2] = { target->thread.fp_regs.fpc, 0 };
+               rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc,
                                        0, offsetof(s390_fp_regs, fprs));
                if (rc)
                        return rc;
-               if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0)
+               if (ufpc[1] != 0 || test_fp_ctl(ufpc[0]))
                        return -EINVAL;
-               target->thread.fp_regs.fpc = fpc[0];
+               target->thread.fp_regs.fpc = ufpc[0];
        }
 
        if (rc == 0 && count > 0)
@@ -929,8 +941,10 @@ static int s390_fpregs_set(struct task_struct *target,
                                        target->thread.fp_regs.fprs,
                                        offsetof(s390_fp_regs, fprs), -1);
 
-       if (rc == 0 && target == current)
-               restore_fp_regs(&target->thread.fp_regs);
+       if (rc == 0 && target == current) {
+               restore_fp_ctl(&target->thread.fp_regs.fpc);
+               restore_fp_regs(target->thread.fp_regs.fprs);
+       }
 
        return rc;
 }
index e1c9d1c292fa2ce4afa9a7d777b2d1db08024044..d817cce7e72de862081f57a2ef6b0936c64fcc60 100644 (file)
@@ -40,8 +40,6 @@ static void disable_runtime_instr(void)
 static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
 {
        cb->buf_limit = 0xfff;
-       if (s390_user_mode == HOME_SPACE_MODE)
-               cb->home_space = 1;
        cb->int_requested = 1;
        cb->pstate = 1;
        cb->pstate_set_buf = 1;
index aeed8a61fa0d4f1b4862a98cab44e29beda63699..ffe1c53264a708352622d0159f437f1e176d2a75 100644 (file)
 #include <asm/sclp.h>
 #include "entry.h"
 
-long psw_kernel_bits   = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY |
-                         PSW_MASK_EA | PSW_MASK_BA;
-long psw_user_bits     = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT |
-                         PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK |
-                         PSW_MASK_PSTATE | PSW_ASC_HOME;
-
 /*
  * User copy operations.
  */
@@ -300,43 +294,14 @@ static int __init parse_vmalloc(char *arg)
 }
 early_param("vmalloc", parse_vmalloc);
 
-unsigned int s390_user_mode = PRIMARY_SPACE_MODE;
-EXPORT_SYMBOL_GPL(s390_user_mode);
-
-static void __init set_user_mode_primary(void)
-{
-       psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME;
-       psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY;
-#ifdef CONFIG_COMPAT
-       psw32_user_bits =
-               (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY;
-#endif
-       uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos_switch : uaccess_pt;
-}
-
 static int __init early_parse_user_mode(char *p)
 {
-       if (p && strcmp(p, "primary") == 0)
-               s390_user_mode = PRIMARY_SPACE_MODE;
-       else if (!p || strcmp(p, "home") == 0)
-               s390_user_mode = HOME_SPACE_MODE;
-       else
-               return 1;
-       return 0;
+       if (!p || strcmp(p, "primary") == 0)
+               return 0;
+       return 1;
 }
 early_param("user_mode", early_parse_user_mode);
 
-static void __init setup_addressing_mode(void)
-{
-       if (s390_user_mode != PRIMARY_SPACE_MODE)
-               return;
-       set_user_mode_primary();
-       if (MACHINE_HAS_MVCOS)
-               pr_info("Address spaces switched, mvcos available\n");
-       else
-               pr_info("Address spaces switched, mvcos not available\n");
-}
-
 void *restart_stack __attribute__((__section__(".data")));
 
 static void __init setup_lowcore(void)
@@ -348,24 +313,24 @@ static void __init setup_lowcore(void)
         */
        BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
        lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
-       lc->restart_psw.mask = psw_kernel_bits;
+       lc->restart_psw.mask = PSW_KERNEL_BITS;
        lc->restart_psw.addr =
                PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
-       lc->external_new_psw.mask = psw_kernel_bits |
+       lc->external_new_psw.mask = PSW_KERNEL_BITS |
                PSW_MASK_DAT | PSW_MASK_MCHECK;
        lc->external_new_psw.addr =
                PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
-       lc->svc_new_psw.mask = psw_kernel_bits |
+       lc->svc_new_psw.mask = PSW_KERNEL_BITS |
                PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
        lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
-       lc->program_new_psw.mask = psw_kernel_bits |
+       lc->program_new_psw.mask = PSW_KERNEL_BITS |
                PSW_MASK_DAT | PSW_MASK_MCHECK;
        lc->program_new_psw.addr =
                PSW_ADDR_AMODE | (unsigned long) pgm_check_handler;
-       lc->mcck_new_psw.mask = psw_kernel_bits;
+       lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
        lc->mcck_new_psw.addr =
                PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
-       lc->io_new_psw.mask = psw_kernel_bits |
+       lc->io_new_psw.mask = PSW_KERNEL_BITS |
                PSW_MASK_DAT | PSW_MASK_MCHECK;
        lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
        lc->clock_comparator = -1ULL;
@@ -1043,10 +1008,7 @@ void __init setup_arch(char **cmdline_p)
        init_mm.end_data = (unsigned long) &_edata;
        init_mm.brk = (unsigned long) &_end;
 
-       if (MACHINE_HAS_MVCOS)
-               memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
-       else
-               memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
+       uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos : uaccess_pt;
 
        parse_early_param();
        detect_memory_layout(memory_chunk, memory_end);
@@ -1054,7 +1016,6 @@ void __init setup_arch(char **cmdline_p)
        setup_ipl();
        reserve_oldmem();
        setup_memory_end();
-       setup_addressing_mode();
        reserve_crashkernel();
        setup_memory();
        setup_resources();
index c45becf82e0179e17fcc9b1372b1ba25ef7c2131..fb535874a2464853168c9a9182620acf10e37c74 100644 (file)
@@ -57,40 +57,48 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
 
        /* Copy a 'clean' PSW mask to the user to avoid leaking
           information about whether PER is currently on.  */
-       user_sregs.regs.psw.mask = psw_user_bits |
-               (regs->psw.mask & PSW_MASK_USER);
+       user_sregs.regs.psw.mask = PSW_USER_BITS |
+               (regs->psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
        user_sregs.regs.psw.addr = regs->psw.addr;
        memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
        memcpy(&user_sregs.regs.acrs, current->thread.acrs,
-              sizeof(sregs->regs.acrs));
+              sizeof(user_sregs.regs.acrs));
        /* 
         * We have to store the fp registers to current->thread.fp_regs
         * to merge them with the emulated registers.
         */
-       save_fp_regs(&current->thread.fp_regs);
+       save_fp_ctl(&current->thread.fp_regs.fpc);
+       save_fp_regs(current->thread.fp_regs.fprs);
        memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
-              sizeof(s390_fp_regs));
-       return __copy_to_user(sregs, &user_sregs, sizeof(_sigregs));
+              sizeof(user_sregs.fpregs));
+       if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs)))
+               return -EFAULT;
+       return 0;
 }
 
-/* Returns positive number on error */
 static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
 {
-       int err;
        _sigregs user_sregs;
 
        /* Alwys make any pending restarted system call return -EINTR */
        current_thread_info()->restart_block.fn = do_no_restart_syscall;
 
-       err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs));
-       if (err)
-               return err;
-       /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */
+       if (__copy_from_user(&user_sregs, sregs, sizeof(user_sregs)))
+               return -EFAULT;
+
+       if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW_MASK_RI))
+               return -EINVAL;
+
+       /* Loading the floating-point-control word can fail. Do that first. */
+       if (restore_fp_ctl(&user_sregs.fpregs.fpc))
+               return -EINVAL;
+
+       /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
        regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
-               (user_sregs.regs.psw.mask & PSW_MASK_USER);
+               (user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI));
        /* Check for invalid user address space control. */
-       if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC))
-               regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) |
+       if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
+               regs->psw.mask = PSW_ASC_PRIMARY |
                        (regs->psw.mask & ~PSW_MASK_ASC);
        /* Check for invalid amode */
        if (regs->psw.mask & PSW_MASK_EA)
@@ -98,14 +106,13 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
        regs->psw.addr = user_sregs.regs.psw.addr;
        memcpy(&regs->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs));
        memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
-              sizeof(sregs->regs.acrs));
+              sizeof(current->thread.acrs));
        restore_access_regs(current->thread.acrs);
 
        memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
-              sizeof(s390_fp_regs));
-       current->thread.fp_regs.fpc &= FPC_VALID_MASK;
+              sizeof(current->thread.fp_regs));
 
-       restore_fp_regs(&current->thread.fp_regs);
+       restore_fp_regs(current->thread.fp_regs.fprs);
        clear_thread_flag(TIF_SYSCALL); /* No longer in a system call */
        return 0;
 }
@@ -224,7 +231,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
        regs->gprs[15] = (unsigned long) frame;
        /* Force default amode and default user address space control. */
        regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
-               (psw_user_bits & PSW_MASK_ASC) |
+               (PSW_USER_BITS & PSW_MASK_ASC) |
                (regs->psw.mask & ~PSW_MASK_ASC);
        regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
 
@@ -295,7 +302,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        regs->gprs[15] = (unsigned long) frame;
        /* Force default amode and default user address space control. */
        regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
-               (psw_user_bits & PSW_MASK_ASC) |
+               (PSW_USER_BITS & PSW_MASK_ASC) |
                (regs->psw.mask & ~PSW_MASK_ASC);
        regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
 
index 1a4313a1b60f76e20ac50ff31b5c0d2e0bf95492..739313db71e5e2431f32c59472cdaa89ccbae2ef 100644 (file)
@@ -283,7 +283,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
        struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
        unsigned long source_cpu = stap();
 
-       __load_psw_mask(psw_kernel_bits);
+       __load_psw_mask(PSW_KERNEL_BITS);
        if (pcpu->address == source_cpu)
                func(data);     /* should not return */
        /* Stop target cpu (if func returns this stops the current cpu). */
@@ -395,7 +395,7 @@ void smp_send_stop(void)
        int cpu;
 
        /* Disable all interrupts/machine checks */
-       __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
+       __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
        trace_hardirqs_off();
 
        debug_set_critical();
@@ -533,9 +533,6 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
 
 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
 
-struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
-EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
-
 static void __init smp_get_save_area(int cpu, u16 address)
 {
        void *lc = pcpu_devices[0].lowcore;
@@ -546,15 +543,9 @@ static void __init smp_get_save_area(int cpu, u16 address)
        if (!OLDMEM_BASE && (address == boot_cpu_address ||
                             ipl_info.type != IPL_TYPE_FCP_DUMP))
                return;
-       if (cpu >= NR_CPUS) {
-               pr_warning("CPU %i exceeds the maximum %i and is excluded "
-                          "from the dump\n", cpu, NR_CPUS - 1);
-               return;
-       }
-       save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL);
+       save_area = dump_save_area_create(cpu);
        if (!save_area)
                panic("could not allocate memory for save area\n");
-       zfcpdump_save_areas[cpu] = save_area;
 #ifdef CONFIG_CRASH_DUMP
        if (address == boot_cpu_address) {
                /* Copy the registers of the boot cpu. */
@@ -693,7 +684,7 @@ static void smp_start_secondary(void *cpuvoid)
        S390_lowcore.restart_source = -1UL;
        restore_access_regs(S390_lowcore.access_regs_save_area);
        __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
-       __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
+       __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
        cpu_init();
        preempt_disable();
        init_cpu_timer();
index 05d75c413137879a30fded476638b0b9c4a001f5..a84476f2a9bb3ae488eb7a936021a744a822dd31 100644 (file)
@@ -84,8 +84,7 @@ struct vdso_data *vdso_data = &vdso_data_store.data;
  */
 static void vdso_init_data(struct vdso_data *vd)
 {
-       vd->ectg_available =
-               s390_user_mode != HOME_SPACE_MODE && test_facility(31);
+       vd->ectg_available = test_facility(31);
 }
 
 #ifdef CONFIG_64BIT
@@ -102,7 +101,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
 
        lowcore->vdso_per_cpu_data = __LC_PASTE;
 
-       if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
+       if (!vdso_enabled)
                return 0;
 
        segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
@@ -147,7 +146,7 @@ void vdso_free_per_cpu(struct _lowcore *lowcore)
        unsigned long segment_table, page_table, page_frame;
        u32 *psal, *aste;
 
-       if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
+       if (!vdso_enabled)
                return;
 
        psal = (u32 *)(addr_t) lowcore->paste[4];
@@ -165,7 +164,7 @@ static void vdso_init_cr5(void)
 {
        unsigned long cr5;
 
-       if (s390_user_mode == HOME_SPACE_MODE || !vdso_enabled)
+       if (!vdso_enabled)
                return;
        cr5 = offsetof(struct _lowcore, paste);
        __ctl_load(cr5, 5, 5);
index abcfab55f99b37e5d4f79903be716e67896d5c79..e312c48a1c405741a4e1675002aa9bf16003f821 100644 (file)
@@ -161,7 +161,7 @@ void __kprobes vtime_stop_cpu(void)
        trace_hardirqs_on();
 
        /* Wait for external, I/O or machine check interrupt. */
-       psw_mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_DAT |
+       psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT |
                PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
        idle->nohz_delay = 0;
 
index 7f35cb33e5102008244b4fd1376954400f2d0009..7f1f7ac5cf7f8a2c3f3966d4fe96fa23af90ea04 100644 (file)
@@ -385,7 +385,7 @@ static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
        }
 
        if ((!rc) && (vcpu->arch.sie_block->ckc <
-               get_tod_clock() + vcpu->arch.sie_block->epoch)) {
+               get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
                if ((!psw_extint_disabled(vcpu)) &&
                        (vcpu->arch.sie_block->gcr[0] & 0x800ul))
                        rc = 1;
@@ -425,7 +425,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
                goto no_timer;
        }
 
-       now = get_tod_clock() + vcpu->arch.sie_block->epoch;
+       now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
        if (vcpu->arch.sie_block->ckc < now) {
                __unset_cpu_idle(vcpu);
                return 0;
@@ -515,7 +515,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
        }
 
        if ((vcpu->arch.sie_block->ckc <
-               get_tod_clock() + vcpu->arch.sie_block->epoch))
+               get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
                __try_deliver_ckc_interrupt(vcpu);
 
        if (atomic_read(&fi->active)) {
index 776dafe918db30b8c3f4823b8bf11c461bed6f5f..ed8064cb5c4921424d5981b890e6fd9b07f9ed02 100644 (file)
@@ -343,10 +343,11 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
-       save_fp_regs(&vcpu->arch.host_fpregs);
+       save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
+       save_fp_regs(vcpu->arch.host_fpregs.fprs);
        save_access_regs(vcpu->arch.host_acrs);
-       vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
-       restore_fp_regs(&vcpu->arch.guest_fpregs);
+       restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+       restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
        restore_access_regs(vcpu->run->s.regs.acrs);
        gmap_enable(vcpu->arch.gmap);
        atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
@@ -356,9 +357,11 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
        atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
        gmap_disable(vcpu->arch.gmap);
-       save_fp_regs(&vcpu->arch.guest_fpregs);
+       save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+       save_fp_regs(vcpu->arch.guest_fpregs.fprs);
        save_access_regs(vcpu->run->s.regs.acrs);
-       restore_fp_regs(&vcpu->arch.host_fpregs);
+       restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
+       restore_fp_regs(vcpu->arch.host_fpregs.fprs);
        restore_access_regs(vcpu->arch.host_acrs);
 }
 
@@ -618,9 +621,12 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 
 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
+       if (test_fp_ctl(fpu->fpc))
+               return -EINVAL;
        memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
-       vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
-       restore_fp_regs(&vcpu->arch.guest_fpregs);
+       vcpu->arch.guest_fpregs.fpc = fpu->fpc;
+       restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+       restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
        return 0;
 }
 
@@ -876,7 +882,8 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
         * copying in vcpu load/put. Lets update our copies before we save
         * it into the save area
         */
-       save_fp_regs(&vcpu->arch.guest_fpregs);
+       save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
+       save_fp_regs(vcpu->arch.guest_fpregs.fprs);
        save_access_regs(vcpu->run->s.regs.acrs);
 
        if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
index c2f582bb1cb262e421072a9b1500c961f4e7426a..0c991c6748ab3a4d860c890b7c1b1526808d4ed6 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/tracepoint.h>
 #include <asm/sigp.h>
 #include <asm/debug.h>
+#include <asm/dis.h>
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM kvm
index 20b0e97a7df2e204f9a680be4d95c86e0944652d..b068729e50ace9711774adab984f03a8a41e9338 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for s390-specific library files..
 #
 
-lib-y += delay.o string.o uaccess_std.o uaccess_pt.o
+lib-y += delay.o string.o uaccess_pt.o find.o
 obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
 obj-$(CONFIG_64BIT) += mem64.o
 lib-$(CONFIG_64BIT) += uaccess_mvcos.o
index 57c87d7d7ede01add784c291f31a82f502ca877e..a9f3d0042d58ba849b8c624f23915d88d03d6f70 100644 (file)
@@ -44,7 +44,7 @@ static void __udelay_disabled(unsigned long long usecs)
        do {
                set_clock_comparator(end);
                vtime_stop_cpu();
-       } while (get_tod_clock() < end);
+       } while (get_tod_clock_fast() < end);
        lockdep_on();
        __ctl_load(cr0, 0, 0);
        __ctl_load(cr6, 6, 6);
@@ -55,7 +55,7 @@ static void __udelay_enabled(unsigned long long usecs)
 {
        u64 clock_saved, end;
 
-       end = get_tod_clock() + (usecs << 12);
+       end = get_tod_clock_fast() + (usecs << 12);
        do {
                clock_saved = 0;
                if (end < S390_lowcore.clock_comparator) {
@@ -65,7 +65,7 @@ static void __udelay_enabled(unsigned long long usecs)
                vtime_stop_cpu();
                if (clock_saved)
                        local_tick_enable(clock_saved);
-       } while (get_tod_clock() < end);
+       } while (get_tod_clock_fast() < end);
 }
 
 /*
@@ -109,8 +109,8 @@ void udelay_simple(unsigned long long usecs)
 {
        u64 end;
 
-       end = get_tod_clock() + (usecs << 12);
-       while (get_tod_clock() < end)
+       end = get_tod_clock_fast() + (usecs << 12);
+       while (get_tod_clock_fast() < end)
                cpu_relax();
 }
 
@@ -120,10 +120,10 @@ void __ndelay(unsigned long long nsecs)
 
        nsecs <<= 9;
        do_div(nsecs, 125);
-       end = get_tod_clock() + nsecs;
+       end = get_tod_clock_fast() + nsecs;
        if (nsecs & ~0xfffUL)
                __udelay(nsecs >> 12);
-       while (get_tod_clock() < end)
+       while (get_tod_clock_fast() < end)
                barrier();
 }
 EXPORT_SYMBOL(__ndelay);
diff --git a/arch/s390/lib/find.c b/arch/s390/lib/find.c
new file mode 100644 (file)
index 0000000..620d34d
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * MSB0 numbered special bitops handling.
+ *
+ * On s390x the bits are numbered:
+ *   |0..............63|64............127|128...........191|192...........255|
+ * and on s390:
+ *   |0.....31|31....63|64....95|96...127|128..159|160..191|192..223|224..255|
+ *
+ * The reason for this bit numbering is the fact that the hardware sets bits
+ * in a bitmap starting at bit 0 (MSB) and we don't want to scan the bitmap
+ * from the 'wrong end'.
+ */
+
+#include <linux/compiler.h>
+#include <linux/bitops.h>
+#include <linux/export.h>
+
+unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size)
+{
+       const unsigned long *p = addr;
+       unsigned long result = 0;
+       unsigned long tmp;
+
+       while (size & ~(BITS_PER_LONG - 1)) {
+               if ((tmp = *(p++)))
+                       goto found;
+               result += BITS_PER_LONG;
+               size -= BITS_PER_LONG;
+       }
+       if (!size)
+               return result;
+       tmp = (*p) & (~0UL << (BITS_PER_LONG - size));
+       if (!tmp)               /* Are any bits set? */
+               return result + size;   /* Nope. */
+found:
+       return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
+}
+EXPORT_SYMBOL(find_first_bit_inv);
+
+unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size,
+                               unsigned long offset)
+{
+       const unsigned long *p = addr + (offset / BITS_PER_LONG);
+       unsigned long result = offset & ~(BITS_PER_LONG - 1);
+       unsigned long tmp;
+
+       if (offset >= size)
+               return size;
+       size -= result;
+       offset %= BITS_PER_LONG;
+       if (offset) {
+               tmp = *(p++);
+               tmp &= (~0UL >> offset);
+               if (size < BITS_PER_LONG)
+                       goto found_first;
+               if (tmp)
+                       goto found_middle;
+               size -= BITS_PER_LONG;
+               result += BITS_PER_LONG;
+       }
+       while (size & ~(BITS_PER_LONG-1)) {
+               if ((tmp = *(p++)))
+                       goto found_middle;
+               result += BITS_PER_LONG;
+               size -= BITS_PER_LONG;
+       }
+       if (!size)
+               return result;
+       tmp = *p;
+found_first:
+       tmp &= (~0UL << (BITS_PER_LONG - size));
+       if (!tmp)               /* Are any bits set? */
+               return result + size;   /* Nope. */
+found_middle:
+       return result + (__fls(tmp) ^ (BITS_PER_LONG - 1));
+}
+EXPORT_SYMBOL(find_next_bit_inv);
index 1829742bf4793fae0060dbb4bd31b4e0bfbacc4f..4b7993bf69b96bd42f503e8772f29caedf5dd4b0 100644 (file)
@@ -65,13 +65,6 @@ static size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
        return size;
 }
 
-static size_t copy_from_user_mvcos_check(size_t size, const void __user *ptr, void *x)
-{
-       if (size <= 256)
-               return copy_from_user_std(size, ptr, x);
-       return copy_from_user_mvcos(size, ptr, x);
-}
-
 static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
 {
        register unsigned long reg0 asm("0") = 0x810000UL;
@@ -101,14 +94,6 @@ static size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
        return size;
 }
 
-static size_t copy_to_user_mvcos_check(size_t size, void __user *ptr,
-                                      const void *x)
-{
-       if (size <= 256)
-               return copy_to_user_std(size, ptr, x);
-       return copy_to_user_mvcos(size, ptr, x);
-}
-
 static size_t copy_in_user_mvcos(size_t size, void __user *to,
                                 const void __user *from)
 {
@@ -201,23 +186,8 @@ static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
 }
 
 struct uaccess_ops uaccess_mvcos = {
-       .copy_from_user = copy_from_user_mvcos_check,
-       .copy_from_user_small = copy_from_user_std,
-       .copy_to_user = copy_to_user_mvcos_check,
-       .copy_to_user_small = copy_to_user_std,
-       .copy_in_user = copy_in_user_mvcos,
-       .clear_user = clear_user_mvcos,
-       .strnlen_user = strnlen_user_std,
-       .strncpy_from_user = strncpy_from_user_std,
-       .futex_atomic_op = futex_atomic_op_std,
-       .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
-};
-
-struct uaccess_ops uaccess_mvcos_switch = {
        .copy_from_user = copy_from_user_mvcos,
-       .copy_from_user_small = copy_from_user_mvcos,
        .copy_to_user = copy_to_user_mvcos,
-       .copy_to_user_small = copy_to_user_mvcos,
        .copy_in_user = copy_in_user_mvcos,
        .clear_user = clear_user_mvcos,
        .strnlen_user = strnlen_user_mvcos,
index 1694d738b17527aad71850c8fc772e755d26ca54..97e03caf782598a20857ab276ff03df701c4c3f9 100644 (file)
@@ -461,9 +461,7 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
 
 struct uaccess_ops uaccess_pt = {
        .copy_from_user         = copy_from_user_pt,
-       .copy_from_user_small   = copy_from_user_pt,
        .copy_to_user           = copy_to_user_pt,
-       .copy_to_user_small     = copy_to_user_pt,
        .copy_in_user           = copy_in_user_pt,
        .clear_user             = clear_user_pt,
        .strnlen_user           = strnlen_user_pt,
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
deleted file mode 100644 (file)
index 4a75d47..0000000
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- *  Standard user space access functions based on mvcp/mvcs and doing
- *  interesting things in the secondary space mode.
- *
- *    Copyright IBM Corp. 2006
- *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- *              Gerald Schaefer (gerald.schaefer@de.ibm.com)
- */
-
-#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/uaccess.h>
-#include <asm/futex.h>
-#include "uaccess.h"
-
-#ifndef CONFIG_64BIT
-#define AHI    "ahi"
-#define ALR    "alr"
-#define CLR    "clr"
-#define LHI    "lhi"
-#define SLR    "slr"
-#else
-#define AHI    "aghi"
-#define ALR    "algr"
-#define CLR    "clgr"
-#define LHI    "lghi"
-#define SLR    "slgr"
-#endif
-
-size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
-{
-       unsigned long tmp1, tmp2;
-
-       tmp1 = -256UL;
-       asm volatile(
-               "0: mvcp  0(%0,%2),0(%1),%3\n"
-               "10:jz    8f\n"
-               "1:"ALR"  %0,%3\n"
-               "   la    %1,256(%1)\n"
-               "   la    %2,256(%2)\n"
-               "2: mvcp  0(%0,%2),0(%1),%3\n"
-               "11:jnz   1b\n"
-               "   j     8f\n"
-               "3: la    %4,255(%1)\n" /* %4 = ptr + 255 */
-               "  "LHI"  %3,-4096\n"
-               "   nr    %4,%3\n"      /* %4 = (ptr + 255) & -4096 */
-               "  "SLR"  %4,%1\n"
-               "  "CLR"  %0,%4\n"      /* copy crosses next page boundary? */
-               "   jnh   5f\n"
-               "4: mvcp  0(%4,%2),0(%1),%3\n"
-               "12:"SLR"  %0,%4\n"
-               "  "ALR"  %2,%4\n"
-               "5:"LHI"  %4,-1\n"
-               "  "ALR"  %4,%0\n"      /* copy remaining size, subtract 1 */
-               "   bras  %3,7f\n"      /* memset loop */
-               "   xc    0(1,%2),0(%2)\n"
-               "6: xc    0(256,%2),0(%2)\n"
-               "   la    %2,256(%2)\n"
-               "7:"AHI"  %4,-256\n"
-               "   jnm   6b\n"
-               "   ex    %4,0(%3)\n"
-               "   j     9f\n"
-               "8:"SLR"  %0,%0\n"
-               "9: \n"
-               EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
-               EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
-               : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
-               : : "cc", "memory");
-       return size;
-}
-
-static size_t copy_from_user_std_check(size_t size, const void __user *ptr,
-                                      void *x)
-{
-       if (size <= 1024)
-               return copy_from_user_std(size, ptr, x);
-       return copy_from_user_pt(size, ptr, x);
-}
-
-size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
-{
-       unsigned long tmp1, tmp2;
-
-       tmp1 = -256UL;
-       asm volatile(
-               "0: mvcs  0(%0,%1),0(%2),%3\n"
-               "7: jz    5f\n"
-               "1:"ALR"  %0,%3\n"
-               "   la    %1,256(%1)\n"
-               "   la    %2,256(%2)\n"
-               "2: mvcs  0(%0,%1),0(%2),%3\n"
-               "8: jnz   1b\n"
-               "   j     5f\n"
-               "3: la    %4,255(%1)\n" /* %4 = ptr + 255 */
-               "  "LHI"  %3,-4096\n"
-               "   nr    %4,%3\n"      /* %4 = (ptr + 255) & -4096 */
-               "  "SLR"  %4,%1\n"
-               "  "CLR"  %0,%4\n"      /* copy crosses next page boundary? */
-               "   jnh   6f\n"
-               "4: mvcs  0(%4,%1),0(%2),%3\n"
-               "9:"SLR"  %0,%4\n"
-               "   j     6f\n"
-               "5:"SLR"  %0,%0\n"
-               "6: \n"
-               EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
-               EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
-               : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
-               : : "cc", "memory");
-       return size;
-}
-
-static size_t copy_to_user_std_check(size_t size, void __user *ptr,
-                                    const void *x)
-{
-       if (size <= 1024)
-               return copy_to_user_std(size, ptr, x);
-       return copy_to_user_pt(size, ptr, x);
-}
-
-static size_t copy_in_user_std(size_t size, void __user *to,
-                              const void __user *from)
-{
-       unsigned long tmp1;
-
-       asm volatile(
-               "   sacf  256\n"
-               "  "AHI"  %0,-1\n"
-               "   jo    5f\n"
-               "   bras  %3,3f\n"
-               "0:"AHI"  %0,257\n"
-               "1: mvc   0(1,%1),0(%2)\n"
-               "   la    %1,1(%1)\n"
-               "   la    %2,1(%2)\n"
-               "  "AHI"  %0,-1\n"
-               "   jnz   1b\n"
-               "   j     5f\n"
-               "2: mvc   0(256,%1),0(%2)\n"
-               "   la    %1,256(%1)\n"
-               "   la    %2,256(%2)\n"
-               "3:"AHI"  %0,-256\n"
-               "   jnm   2b\n"
-               "4: ex    %0,1b-0b(%3)\n"
-               "5: "SLR"  %0,%0\n"
-               "6: sacf  0\n"
-               EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
-               : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
-               : : "cc", "memory");
-       return size;
-}
-
-static size_t clear_user_std(size_t size, void __user *to)
-{
-       unsigned long tmp1, tmp2;
-
-       asm volatile(
-               "   sacf  256\n"
-               "  "AHI"  %0,-1\n"
-               "   jo    5f\n"
-               "   bras  %3,3f\n"
-               "   xc    0(1,%1),0(%1)\n"
-               "0:"AHI"  %0,257\n"
-               "   la    %2,255(%1)\n" /* %2 = ptr + 255 */
-               "   srl   %2,12\n"
-               "   sll   %2,12\n"      /* %2 = (ptr + 255) & -4096 */
-               "  "SLR"  %2,%1\n"
-               "  "CLR"  %0,%2\n"      /* clear crosses next page boundary? */
-               "   jnh   5f\n"
-               "  "AHI"  %2,-1\n"
-               "1: ex    %2,0(%3)\n"
-               "  "AHI"  %2,1\n"
-               "  "SLR"  %0,%2\n"
-               "   j     5f\n"
-               "2: xc    0(256,%1),0(%1)\n"
-               "   la    %1,256(%1)\n"
-               "3:"AHI"  %0,-256\n"
-               "   jnm   2b\n"
-               "4: ex    %0,0(%3)\n"
-               "5: "SLR"  %0,%0\n"
-               "6: sacf  0\n"
-               EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
-               : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
-               : : "cc", "memory");
-       return size;
-}
-
-size_t strnlen_user_std(size_t size, const char __user *src)
-{
-       register unsigned long reg0 asm("0") = 0UL;
-       unsigned long tmp1, tmp2;
-
-       if (unlikely(!size))
-               return 0;
-       asm volatile(
-               "   la    %2,0(%1)\n"
-               "   la    %3,0(%0,%1)\n"
-               "  "SLR"  %0,%0\n"
-               "   sacf  256\n"
-               "0: srst  %3,%2\n"
-               "   jo    0b\n"
-               "   la    %0,1(%3)\n"   /* strnlen_user results includes \0 */
-               "  "SLR"  %0,%1\n"
-               "1: sacf  0\n"
-               EX_TABLE(0b,1b)
-               : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
-               : "d" (reg0) : "cc", "memory");
-       return size;
-}
-
-size_t strncpy_from_user_std(size_t count, const char __user *src, char *dst)
-{
-       size_t done, len, offset, len_str;
-
-       if (unlikely(!count))
-               return 0;
-       done = 0;
-       do {
-               offset = (size_t)src & ~PAGE_MASK;
-               len = min(count - done, PAGE_SIZE - offset);
-               if (copy_from_user_std(len, src, dst))
-                       return -EFAULT;
-               len_str = strnlen(dst, len);
-               done += len_str;
-               src += len_str;
-               dst += len_str;
-       } while ((len_str == len) && (done < count));
-       return done;
-}
-
-#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg)     \
-       asm volatile(                                                   \
-               "   sacf  256\n"                                        \
-               "0: l     %1,0(%6)\n"                                   \
-               "1:"insn                                                \
-               "2: cs    %1,%2,0(%6)\n"                                \
-               "3: jl    1b\n"                                         \
-               "   lhi   %0,0\n"                                       \
-               "4: sacf  0\n"                                          \
-               EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b)         \
-               : "=d" (ret), "=&d" (oldval), "=&d" (newval),           \
-                 "=m" (*uaddr)                                         \
-               : "0" (-EFAULT), "d" (oparg), "a" (uaddr),              \
-                 "m" (*uaddr) : "cc");
-
-int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old)
-{
-       int oldval = 0, newval, ret;
-
-       switch (op) {
-       case FUTEX_OP_SET:
-               __futex_atomic_op("lr %2,%5\n",
-                                 ret, oldval, newval, uaddr, oparg);
-               break;
-       case FUTEX_OP_ADD:
-               __futex_atomic_op("lr %2,%1\nar %2,%5\n",
-                                 ret, oldval, newval, uaddr, oparg);
-               break;
-       case FUTEX_OP_OR:
-               __futex_atomic_op("lr %2,%1\nor %2,%5\n",
-                                 ret, oldval, newval, uaddr, oparg);
-               break;
-       case FUTEX_OP_ANDN:
-               __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
-                                 ret, oldval, newval, uaddr, oparg);
-               break;
-       case FUTEX_OP_XOR:
-               __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
-                                 ret, oldval, newval, uaddr, oparg);
-               break;
-       default:
-               ret = -ENOSYS;
-       }
-       *old = oldval;
-       return ret;
-}
-
-int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr,
-                            u32 oldval, u32 newval)
-{
-       int ret;
-
-       asm volatile(
-               "   sacf 256\n"
-               "0: cs   %1,%4,0(%5)\n"
-               "1: la   %0,0\n"
-               "2: sacf 0\n"
-               EX_TABLE(0b,2b) EX_TABLE(1b,2b)
-               : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
-               : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
-               : "cc", "memory" );
-       *uval = oldval;
-       return ret;
-}
-
-struct uaccess_ops uaccess_std = {
-       .copy_from_user = copy_from_user_std_check,
-       .copy_from_user_small = copy_from_user_std,
-       .copy_to_user = copy_to_user_std_check,
-       .copy_to_user_small = copy_to_user_std,
-       .copy_in_user = copy_in_user_std,
-       .clear_user = clear_user_std,
-       .strnlen_user = strnlen_user_std,
-       .strncpy_from_user = strncpy_from_user_std,
-       .futex_atomic_op = futex_atomic_op_std,
-       .futex_atomic_cmpxchg = futex_atomic_cmpxchg_std,
-};
index 58bff541fde9c95edcd0db4f49da3aa8629371b9..a6ba0d7243356649e6882b5dc16a85c445ed120e 100644 (file)
@@ -19,6 +19,8 @@
 #include <math-emu/double.h>
 #include <math-emu/quad.h>
 
+#define FPC_VALID_MASK         0xF8F8FF03
+
 /*
  * I miss a macro to round a floating point number to the
  * nearest integer in the same floating point format.
index 9d84a1feefef0e57005f6a31ae50079ebec3a15b..76741306af2a9d5193c4a12c2210d520c26c129f 100644 (file)
@@ -257,8 +257,8 @@ static int cmm_pages_handler(ctl_table *ctl, int write, void __user *buffer,
                             size_t *lenp, loff_t *ppos)
 {
        char buf[16], *p;
+       unsigned int len;
        long nr;
-       int len;
 
        if (!*lenp || (*ppos && !write)) {
                *lenp = 0;
@@ -298,7 +298,7 @@ static int cmm_timeout_handler(ctl_table *ctl, int write,  void __user *buffer,
 {
        char buf[64], *p;
        long nr, seconds;
-       int len;
+       unsigned int len;
 
        if (!*lenp || (*ppos && !write)) {
                *lenp = 0;
index fc6679210d83249e9b34c2f44070ab1021852e91..8f29762671cf171dc6a79d64f718e4b02d247cb9 100644 (file)
@@ -115,13 +115,8 @@ static inline int user_space_fault(unsigned long trans_exc_code)
        if (trans_exc_code == 2)
                /* Access via secondary space, set_fs setting decides */
                return current->thread.mm_segment.ar4;
-       if (s390_user_mode == HOME_SPACE_MODE)
-               /* User space if the access has been done via home space. */
-               return trans_exc_code == 3;
        /*
-        * If the user space is not the home space the kernel runs in home
-        * space. Access via secondary space has already been covered,
-        * access via primary space or access register is from user space
+        * Access via primary space or access register is from user space
         * and access via home space is from the kernel.
         */
        return trans_exc_code != 3;
@@ -471,7 +466,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
        int access, fault;
 
        /* Emulate a uaccess fault from kernel mode. */
-       regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
+       regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK;
        if (!irqs_disabled())
                regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
        regs.psw.addr = (unsigned long) __builtin_return_address(0);
index 5d758db27bdced58d929d736363bcafc09c199ab..639fce464008854cf3d23c22f6dcbe4a970add68 100644 (file)
@@ -180,9 +180,15 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
        addr = start;
        len = (unsigned long) nr_pages << PAGE_SHIFT;
        end = start + len;
-       if ((end < start) || (end > TASK_SIZE))
+       if ((end <= start) || (end > TASK_SIZE))
                return 0;
-
+       /*
+        * local_irq_save() doesn't prevent pagetable teardown, but does
+        * prevent the pagetables from being freed on s390.
+        *
+        * So long as we atomically load page table pointers versus teardown,
+        * we can follow the address down to the the page and take a ref on it.
+        */
        local_irq_save(flags);
        pgdp = pgd_offset(mm, addr);
        do {
@@ -219,63 +225,22 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                        struct page **pages)
 {
        struct mm_struct *mm = current->mm;
-       unsigned long addr, len, end;
-       unsigned long next;
-       pgd_t *pgdp, pgd;
-       int nr = 0;
+       int nr, ret;
 
        start &= PAGE_MASK;
-       addr = start;
-       len = (unsigned long) nr_pages << PAGE_SHIFT;
-       end = start + len;
-       if ((end < start) || (end > TASK_SIZE))
-               goto slow_irqon;
-
-       /*
-        * local_irq_disable() doesn't prevent pagetable teardown, but does
-        * prevent the pagetables from being freed on s390.
-        *
-        * So long as we atomically load page table pointers versus teardown,
-        * we can follow the address down to the the page and take a ref on it.
-        */
-       local_irq_disable();
-       pgdp = pgd_offset(mm, addr);
-       do {
-               pgd = *pgdp;
-               barrier();
-               next = pgd_addr_end(addr, end);
-               if (pgd_none(pgd))
-                       goto slow;
-               if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
-                       goto slow;
-       } while (pgdp++, addr = next, addr != end);
-       local_irq_enable();
-
-       VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
-       return nr;
-
-       {
-               int ret;
-slow:
-               local_irq_enable();
-slow_irqon:
-               /* Try to get the remaining pages with get_user_pages */
-               start += nr << PAGE_SHIFT;
-               pages += nr;
-
-               down_read(&mm->mmap_sem);
-               ret = get_user_pages(current, mm, start,
-                       (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
-               up_read(&mm->mmap_sem);
-
-               /* Have to be a bit careful with return values */
-               if (nr > 0) {
-                       if (ret < 0)
-                               ret = nr;
-                       else
-                               ret += nr;
-               }
-
-               return ret;
-       }
+       nr = __get_user_pages_fast(start, nr_pages, write, pages);
+       if (nr == nr_pages)
+               return nr;
+
+       /* Try to get the remaining pages with get_user_pages */
+       start += nr << PAGE_SHIFT;
+       pages += nr;
+       down_read(&mm->mmap_sem);
+       ret = get_user_pages(current, mm, start,
+                            nr_pages - nr, write, 0, pages, NULL);
+       up_read(&mm->mmap_sem);
+       /* Have to be a bit careful with return values */
+       if (nr > 0)
+               ret = (ret < 0) ? nr : ret + nr;
+       return ret;
 }
index 990397420e6bcf8262b92806b5f5b57bff273373..8400f494623f4591a5de3e5ea6442a08fe767ed4 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/pgtable.h>
 #include <asm/page.h>
 
+#if PAGE_DEFAULT_KEY
 static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
 {
        asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
@@ -16,7 +17,7 @@ static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
        return addr;
 }
 
-void storage_key_init_range(unsigned long start, unsigned long end)
+void __storage_key_init_range(unsigned long start, unsigned long end)
 {
        unsigned long boundary, size;
 
@@ -36,6 +37,7 @@ void storage_key_init_range(unsigned long start, unsigned long end)
                start += PAGE_SIZE;
        }
 }
+#endif
 
 static pte_t *walk_page_table(unsigned long addr)
 {
index de8cbc30dcd1be13cb34dd0fc0e7ad5a8a68372e..94f37a9fb1e543d2abc1efc318aa80212ae6db13 100644 (file)
@@ -1157,10 +1157,6 @@ int s390_enable_sie(void)
        struct mm_struct *mm = tsk->mm;
        struct mmu_gather tlb;
 
-       /* Do we have switched amode? If no, we cannot do sie */
-       if (s390_user_mode == HOME_SPACE_MODE)
-               return -EINVAL;
-
        /* Do we have pgstes? if yes, we are done */
        if (mm_has_pgste(tsk->mm))
                return 0;
index 709239285869caa29fde3db90ea31da213cb5cb2..16871da3737164d986412ff40cc8fb95c2c7119b 100644 (file)
@@ -12,8 +12,8 @@
 #include <linux/random.h>
 #include <linux/init.h>
 #include <asm/cacheflush.h>
-#include <asm/processor.h>
 #include <asm/facility.h>
+#include <asm/dis.h>
 
 /*
  * Conventions:
@@ -156,8 +156,8 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
                EMIT6(0xeb8ff058, 0x0024);
                /* lgr %r14,%r15 */
                EMIT4(0xb90400ef);
-               /* ahi %r15,<offset> */
-               EMIT4_IMM(0xa7fa0000, (jit->seen & SEEN_MEM) ? -112 : -80);
+               /* aghi %r15,<offset> */
+               EMIT4_IMM(0xa7fb0000, (jit->seen & SEEN_MEM) ? -112 : -80);
                /* stg %r14,152(%r15) */
                EMIT6(0xe3e0f098, 0x0024);
        } else if ((jit->seen & SEEN_XREG) && (jit->seen & SEEN_LITERAL))
@@ -881,7 +881,9 @@ void bpf_jit_free(struct sk_filter *fp)
        struct bpf_binary_header *header = (void *)addr;
 
        if (fp->bpf_func == sk_run_filter)
-               return;
+               goto free_filter;
        set_memory_rw(addr, header->pages);
        module_free(NULL, header);
+free_filter:
+       kfree(fp);
 }
index f17a8343e3609d7d644b1d5896fd49bf4fca9e88..0c9a17780e4b9be96288a75a356444761f0085d5 100644 (file)
@@ -120,26 +120,17 @@ EXPORT_SYMBOL_GPL(pci_proc_domain);
 static int zpci_set_airq(struct zpci_dev *zdev)
 {
        u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
-       struct zpci_fib *fib;
-       int rc;
-
-       fib = (void *) get_zeroed_page(GFP_KERNEL);
-       if (!fib)
-               return -ENOMEM;
+       struct zpci_fib fib = {0};
 
-       fib->isc = PCI_ISC;
-       fib->sum = 1;           /* enable summary notifications */
-       fib->noi = airq_iv_end(zdev->aibv);
-       fib->aibv = (unsigned long) zdev->aibv->vector;
-       fib->aibvo = 0;         /* each zdev has its own interrupt vector */
-       fib->aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
-       fib->aisbo = zdev->aisb & 63;
+       fib.isc = PCI_ISC;
+       fib.sum = 1;            /* enable summary notifications */
+       fib.noi = airq_iv_end(zdev->aibv);
+       fib.aibv = (unsigned long) zdev->aibv->vector;
+       fib.aibvo = 0;          /* each zdev has its own interrupt vector */
+       fib.aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
+       fib.aisbo = zdev->aisb & 63;
 
-       rc = zpci_mod_fc(req, fib);
-       pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
-
-       free_page((unsigned long) fib);
-       return rc;
+       return zpci_mod_fc(req, &fib);
 }
 
 struct mod_pci_args {
@@ -152,22 +143,14 @@ struct mod_pci_args {
 static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args)
 {
        u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn);
-       struct zpci_fib *fib;
-       int rc;
-
-       /* The FIB must be available even if it's not used */
-       fib = (void *) get_zeroed_page(GFP_KERNEL);
-       if (!fib)
-               return -ENOMEM;
+       struct zpci_fib fib = {0};
 
-       fib->pba = args->base;
-       fib->pal = args->limit;
-       fib->iota = args->iota;
-       fib->fmb_addr = args->fmb_addr;
+       fib.pba = args->base;
+       fib.pal = args->limit;
+       fib.iota = args->iota;
+       fib.fmb_addr = args->fmb_addr;
 
-       rc = zpci_mod_fc(req, fib);
-       free_page((unsigned long) fib);
-       return rc;
+       return zpci_mod_fc(req, &fib);
 }
 
 /* Modify PCI: Register I/O address translation parameters */
@@ -424,7 +407,6 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
        struct msi_msg msg;
        int rc;
 
-       pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
        if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
                return -EINVAL;
        msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX);
@@ -489,7 +471,6 @@ out_msi:
 out_si:
        airq_iv_free_bit(zpci_aisb_iv, aisb);
 out:
-       dev_err(&pdev->dev, "register MSI failed with: %d\n", rc);
        return rc;
 }
 
@@ -499,14 +480,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
        struct msi_desc *msi;
        int rc;
 
-       pr_info("%s: on pdev: %p\n", __func__, pdev);
-
        /* Disable adapter interrupts */
        rc = zpci_clear_airq(zdev);
-       if (rc) {
-               dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc);
+       if (rc)
                return;
-       }
 
        /* Release MSI interrupts */
        list_for_each_entry(msi, &pdev->msi_list, list) {
@@ -625,8 +602,11 @@ static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned lo
        r->name = name;
 
        rc = request_resource(&iomem_resource, r);
-       if (rc)
-               pr_debug("request resource %pR failed\n", r);
+       if (rc) {
+               kfree(r->name);
+               kfree(r);
+               return ERR_PTR(-ENOMEM);
+       }
        return r;
 }
 
@@ -708,6 +688,47 @@ void pcibios_disable_device(struct pci_dev *pdev)
        zdev->pdev = NULL;
 }
 
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+static int zpci_restore(struct device *dev)
+{
+       struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+       int ret = 0;
+
+       if (zdev->state != ZPCI_FN_STATE_ONLINE)
+               goto out;
+
+       ret = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
+       if (ret)
+               goto out;
+
+       zpci_map_resources(zdev);
+       zpci_register_ioat(zdev, 0, zdev->start_dma + PAGE_OFFSET,
+                          zdev->start_dma + zdev->iommu_size - 1,
+                          (u64) zdev->dma_table);
+
+out:
+       return ret;
+}
+
+static int zpci_freeze(struct device *dev)
+{
+       struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
+
+       if (zdev->state != ZPCI_FN_STATE_ONLINE)
+               return 0;
+
+       zpci_unregister_ioat(zdev, 0);
+       return clp_disable_fh(zdev);
+}
+
+struct dev_pm_ops pcibios_pm_ops = {
+       .thaw_noirq = zpci_restore,
+       .freeze_noirq = zpci_freeze,
+       .restore_noirq = zpci_restore,
+       .poweroff_noirq = zpci_freeze,
+};
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
+
 static int zpci_scan_bus(struct zpci_dev *zdev)
 {
        struct resource *res;
@@ -781,7 +802,6 @@ int zpci_enable_device(struct zpci_dev *zdev)
        rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
        if (rc)
                goto out;
-       pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid);
 
        rc = zpci_dma_init_device(zdev);
        if (rc)
@@ -901,10 +921,6 @@ static int __init pci_base_init(void)
            || !test_facility(71) || !test_facility(72))
                return 0;
 
-       pr_info("Probing PCI hardware: PCI:%d  SID:%d  AEN:%d\n",
-               test_facility(69), test_facility(70),
-               test_facility(71));
-
        rc = zpci_debug_init();
        if (rc)
                goto out;
index 475563c3d1e40d401417c6503946f1933457980d..84147984224a996fca36b72d3aff574df229f1c8 100644 (file)
 #include <asm/pci_debug.h>
 #include <asm/pci_clp.h>
 
+static inline void zpci_err_clp(unsigned int rsp, int rc)
+{
+       struct {
+               unsigned int rsp;
+               int rc;
+       } __packed data = {rsp, rc};
+
+       zpci_err_hex(&data, sizeof(data));
+}
+
 /*
  * Call Logical Processor
  * Retry logic is handled by the caller.
@@ -54,7 +64,6 @@ static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
        zdev->msi_addr = response->msia;
        zdev->fmb_update = response->mui;
 
-       pr_debug("Supported number of MSI vectors: %u\n", response->noi);
        switch (response->version) {
        case 1:
                zdev->max_bus_speed = PCIE_SPEED_5_0GT;
@@ -84,8 +93,8 @@ static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
        if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
                clp_store_query_pci_fngrp(zdev, &rrb->response);
        else {
-               pr_err("Query PCI FNGRP failed with response: %x  cc: %d\n",
-                       rrb->response.hdr.rsp, rc);
+               zpci_err("Q PCI FGRP:\n");
+               zpci_err_clp(rrb->response.hdr.rsp, rc);
                rc = -EIO;
        }
        clp_free_block(rrb);
@@ -131,8 +140,8 @@ static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
                if (rrb->response.pfgid)
                        rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
        } else {
-               pr_err("Query PCI failed with response: %x  cc: %d\n",
-                        rrb->response.hdr.rsp, rc);
+               zpci_err("Q PCI FN:\n");
+               zpci_err_clp(rrb->response.hdr.rsp, rc);
                rc = -EIO;
        }
 out:
@@ -206,8 +215,8 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
        if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
                *fh = rrb->response.fh;
        else {
-               zpci_dbg(0, "SPF fh:%x, cc:%d, resp:%x\n", *fh, rc,
-                        rrb->response.hdr.rsp);
+               zpci_err("Set PCI FN:\n");
+               zpci_err_clp(rrb->response.hdr.rsp, rc);
                rc = -EIO;
        }
        clp_free_block(rrb);
@@ -262,8 +271,8 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
                /* Get PCI function handle list */
                rc = clp_instr(rrb);
                if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
-                       pr_err("List PCI failed with response: 0x%x  cc: %d\n",
-                               rrb->response.hdr.rsp, rc);
+                       zpci_err("List PCI FN:\n");
+                       zpci_err_clp(rrb->response.hdr.rsp, rc);
                        rc = -EIO;
                        goto out;
                }
@@ -273,17 +282,11 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
 
                entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
                        rrb->response.entry_size;
-               pr_info("Detected number of PCI functions: %u\n", entries);
 
-               /* Store the returned resume token as input for the next call */
                resume_token = rrb->response.resume_token;
-
                for (i = 0; i < entries; i++)
                        cb(&rrb->response.fh_list[i]);
        } while (resume_token);
-
-       pr_debug("Maximum number of supported PCI functions: %u\n",
-               rrb->response.max_fn);
 out:
        return rc;
 }
index 7e5573acb06375791ef82582d400ede99f8fe069..9b83d080902dfb6d406c1fb026c95414d7494a53 100644 (file)
@@ -145,10 +145,8 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
                return -EINVAL;
 
        spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
-       if (!zdev->dma_table) {
-               dev_err(&zdev->pdev->dev, "Missing DMA table\n");
+       if (!zdev->dma_table)
                goto no_refresh;
-       }
 
        for (i = 0; i < nr_pages; i++) {
                dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
@@ -280,11 +278,8 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
        size = nr_pages * PAGE_SIZE;
 
        dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
-       if (dma_addr + size > zdev->end_dma) {
-               dev_err(dev, "(dma_addr: 0x%16.16LX + size: 0x%16.16lx) > end_dma: 0x%16.16Lx\n",
-                        dma_addr, size, zdev->end_dma);
+       if (dma_addr + size > zdev->end_dma)
                goto out_free;
-       }
 
        if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
                flags |= ZPCI_TABLE_PROTECTED;
@@ -297,7 +292,8 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
 out_free:
        dma_free_iommu(zdev, iommu_page_index, nr_pages);
 out_err:
-       dev_err(dev, "Failed to map addr: %lx\n", pa);
+       zpci_err("map error:\n");
+       zpci_err_hex(&pa, sizeof(pa));
        return DMA_ERROR_CODE;
 }
 
@@ -312,8 +308,10 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
        npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
        dma_addr = dma_addr & PAGE_MASK;
        if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
-                            ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID))
-               dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr);
+                            ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) {
+               zpci_err("unmap error:\n");
+               zpci_err_hex(&dma_addr, sizeof(dma_addr));
+       }
 
        atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages);
        iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
index 0aecaf9548458e4a88dd450edd65f78d3b1d0449..278e671ec9ac22b977c0598aaa31552d3e32b567 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/kernel.h>
 #include <linux/pci.h>
+#include <asm/pci_debug.h>
 
 /* Content Code Description for PCI Function Error */
 struct zpci_ccdf_err {
@@ -41,25 +42,15 @@ struct zpci_ccdf_avail {
        u16 pec;                        /* PCI event code */
 } __packed;
 
-static void zpci_event_log_err(struct zpci_ccdf_err *ccdf)
-{
-       struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
-
-       zpci_err("SEI error CCD:\n");
-       zpci_err_hex(ccdf, sizeof(*ccdf));
-       dev_err(&zdev->pdev->dev, "event code: 0x%x\n", ccdf->pec);
-}
-
 static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf)
 {
        struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+       struct pci_dev *pdev = zdev ? zdev->pdev : NULL;
 
-       pr_err("%s%s: availability event: fh: 0x%x  fid: 0x%x  event code: 0x%x  reason:",
-               (zdev) ? dev_driver_string(&zdev->pdev->dev) : "?",
-               (zdev) ? dev_name(&zdev->pdev->dev) : "?",
-               ccdf->fh, ccdf->fid, ccdf->pec);
-       print_hex_dump(KERN_CONT, "ccdf", DUMP_PREFIX_OFFSET,
-                      16, 1, ccdf, sizeof(*ccdf), false);
+       pr_info("%s: Event 0x%x reconfigured PCI function 0x%x\n",
+               pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
+       zpci_err("avail CCDF:\n");
+       zpci_err_hex(ccdf, sizeof(*ccdf));
 
        switch (ccdf->pec) {
        case 0x0301:
@@ -79,14 +70,16 @@ static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf)
 void zpci_event_error(void *data)
 {
        struct zpci_ccdf_err *ccdf = data;
-       struct zpci_dev *zdev;
+       struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+
+       zpci_err("error CCDF:\n");
+       zpci_err_hex(ccdf, sizeof(*ccdf));
 
-       zpci_event_log_err(ccdf);
-       zdev = get_zdev_by_fid(ccdf->fid);
-       if (!zdev) {
-               pr_err("Error event for unknown fid: %x", ccdf->fid);
+       if (!zdev)
                return;
-       }
+
+       pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n",
+              pci_name(zdev->pdev), ccdf->pec, ccdf->fid);
 }
 
 void zpci_event_availability(void *data)
index 224f4bc9925ece7f38c85a145cf615aa78082569..f56d7f8b6f64b77a314ebb00c1e79dfe3a6f572c 100644 (file)
@@ -1,5 +1,6 @@
 config SUPERH
        def_bool y
+       select ARCH_MIGHT_HAVE_PC_PARPORT
        select EXPERT
        select CLKDEV_LOOKUP
        select HAVE_IDE if HAS_IOPORT
index ec9ad593c3da743bacbd5875a577709a94e3333b..01a38696137e9952c9e07f91669685b3b349b50d 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/kdebug.h>
 #include <linux/types.h>
+#include <cpu/ubc.h>
 
 struct arch_hw_breakpoint {
        char            *name; /* Contains name of the symbol to set bkpt */
@@ -15,17 +16,6 @@ struct arch_hw_breakpoint {
        u16             type;
 };
 
-enum {
-       SH_BREAKPOINT_READ      = (1 << 1),
-       SH_BREAKPOINT_WRITE     = (1 << 2),
-       SH_BREAKPOINT_RW        = SH_BREAKPOINT_READ | SH_BREAKPOINT_WRITE,
-
-       SH_BREAKPOINT_LEN_1     = (1 << 12),
-       SH_BREAKPOINT_LEN_2     = (1 << 13),
-       SH_BREAKPOINT_LEN_4     = SH_BREAKPOINT_LEN_1 | SH_BREAKPOINT_LEN_2,
-       SH_BREAKPOINT_LEN_8     = (1 << 14),
-};
-
 struct sh_ubc {
        const char      *name;
        unsigned int    num_events;
diff --git a/arch/sh/include/cpu-common/cpu/ubc.h b/arch/sh/include/cpu-common/cpu/ubc.h
new file mode 100644 (file)
index 0000000..b604619
--- /dev/null
@@ -0,0 +1,17 @@
+#ifndef __ARCH_SH_CPU_UBC_H__
+#define __ARCH_SH_CPU_UBC_H__
+
+enum {
+       SH_BREAKPOINT_READ      = (1 << 1),
+       SH_BREAKPOINT_WRITE     = (1 << 2),
+       SH_BREAKPOINT_RW        = SH_BREAKPOINT_READ | SH_BREAKPOINT_WRITE,
+
+       SH_BREAKPOINT_LEN_1     = (1 << 12),
+       SH_BREAKPOINT_LEN_2     = (1 << 13),
+       SH_BREAKPOINT_LEN_4     = SH_BREAKPOINT_LEN_1 | SH_BREAKPOINT_LEN_2,
+       SH_BREAKPOINT_LEN_8     = (1 << 14),
+};
+
+#define UBC_64BIT      1
+
+#endif /* __ARCH_SH_CPU_UBC_H__ */
diff --git a/arch/sh/include/cpu-sh2a/cpu/ubc.h b/arch/sh/include/cpu-sh2a/cpu/ubc.h
new file mode 100644 (file)
index 0000000..3371f90
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef __ARCH_SH_CPU_UBC_H__
+#define __ARCH_SH_CPU_UBC_H__
+
+enum {
+       SH_BREAKPOINT_READ      = (1 << 2),
+       SH_BREAKPOINT_WRITE     = (1 << 3),
+       SH_BREAKPOINT_RW        = SH_BREAKPOINT_READ | SH_BREAKPOINT_WRITE,
+
+       SH_BREAKPOINT_LEN_1     = (1 << 0),
+       SH_BREAKPOINT_LEN_2     = (1 << 1),
+       SH_BREAKPOINT_LEN_4     = SH_BREAKPOINT_LEN_1 | SH_BREAKPOINT_LEN_2,
+};
+
+#endif /* __ARCH_SH_CPU_UBC_H__ */
index 990195d9845607bfcca4a8b3cfcd05c41df582bc..92f0da4c86a7533e18269e6f5d439130e1a71815 100644 (file)
@@ -22,3 +22,4 @@ pinmux-$(CONFIG_CPU_SUBTYPE_SH7264)   := pinmux-sh7264.o
 pinmux-$(CONFIG_CPU_SUBTYPE_SH7269)    := pinmux-sh7269.o
 
 obj-$(CONFIG_GPIOLIB)                  += $(pinmux-y)
+obj-$(CONFIG_HAVE_HW_BREAKPOINT)       += ubc.o
diff --git a/arch/sh/kernel/cpu/sh2a/ubc.c b/arch/sh/kernel/cpu/sh2a/ubc.c
new file mode 100644 (file)
index 0000000..ef95a9b
--- /dev/null
@@ -0,0 +1,154 @@
+/*
+ * arch/sh/kernel/cpu/sh2a/ubc.c
+ *
+ * On-chip UBC support for SH-2A CPUs.
+ *
+ * Copyright (C) 2009 - 2010  Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <asm/hw_breakpoint.h>
+
+#define UBC_BAR(idx)   (0xfffc0400 + (0x10 * idx))
+#define UBC_BAMR(idx)  (0xfffc0404 + (0x10 * idx))
+#define UBC_BBR(idx)   (0xfffc04A0 + (0x10 * idx))
+#define UBC_BDR(idx)   (0xfffc0408 + (0x10 * idx))
+#define UBC_BDMR(idx)  (0xfffc040C + (0x10 * idx))
+
+#define UBC_BRCR       0xfffc04C0
+
+/* BBR */
+#define UBC_BBR_UBID   (1 << 13)     /* User Break Interrupt Disable */
+#define UBC_BBR_DBE    (1 << 12)     /* Data Break Enable */
+#define UBC_BBR_CD_C   (1 << 6)      /* C Bus Cycle */
+#define UBC_BBR_CD_I   (2 << 6)      /* I Bus Cycle */
+#define UBC_BBR_ID_I   (1 << 4)      /* Break Condition is instruction fetch cycle */
+#define UBC_BBR_ID_D   (2 << 4)      /* Break Condition is data access cycle */
+#define UBC_BBR_ID_ID  (3 << 4)      /* Break Condition is instruction fetch or data access cycle */
+
+#define UBC_CRR_BIE    (1 << 0)
+
+/* CBR */
+#define UBC_CBR_CE     (1 << 0)
+
+static struct sh_ubc sh2a_ubc;
+
+static void sh2a_ubc_enable(struct arch_hw_breakpoint *info, int idx)
+{
+       __raw_writel(UBC_BBR_DBE | UBC_BBR_CD_C | UBC_BBR_ID_ID |
+                    info->len | info->type, UBC_BBR(idx));
+       __raw_writel(info->address, UBC_BAR(idx));
+}
+
+static void sh2a_ubc_disable(struct arch_hw_breakpoint *info, int idx)
+{
+       __raw_writel(UBC_BBR_UBID, UBC_BBR(idx));
+       __raw_writel(0, UBC_BAR(idx));
+}
+
+static void sh2a_ubc_enable_all(unsigned long mask)
+{
+       int i;
+
+       for (i = 0; i < sh2a_ubc.num_events; i++)
+               if (mask & (1 << i))
+                       __raw_writel(__raw_readl(UBC_BBR(i)) & ~UBC_BBR_UBID,
+                                    UBC_BBR(i));
+}
+
+static void sh2a_ubc_disable_all(void)
+{
+       int i;
+       
+       for (i = 0; i < sh2a_ubc.num_events; i++)
+               __raw_writel(__raw_readl(UBC_BBR(i)) | UBC_BBR_UBID,
+                            UBC_BBR(i));
+}
+
+static unsigned long sh2a_ubc_active_mask(void)
+{
+       unsigned long active = 0;
+       int i;
+
+       for (i = 0; i < sh2a_ubc.num_events; i++)
+               if (!(__raw_readl(UBC_BBR(i)) & UBC_BBR_UBID))
+                       active |= (1 << i);
+
+       return active;
+}
+
+static unsigned long sh2a_ubc_triggered_mask(void)
+{
+       unsigned int ret, mask;
+       
+       mask = 0;
+       ret = __raw_readl(UBC_BRCR);
+       if ((ret & (1 << 15)) || (ret & (1 << 13))) {
+               mask |= (1 << 0); /* Match condition for channel 0 */
+       } else 
+               mask &= ~(1 << 0);
+       
+       if ((ret & (1 << 14)) || (ret & (1 << 12))) {
+               mask |= (1 << 1); /* Match condition for channel 1 */
+       } else 
+               mask &= ~(1 << 1);
+
+       return mask;
+}
+
+static void sh2a_ubc_clear_triggered_mask(unsigned long mask)
+{
+       if (mask & (1 << 0)) /* Channel 0 statisfied break condition */
+               __raw_writel(__raw_readl(UBC_BRCR) &
+                            ~((1 << 15) | (1 << 13)), UBC_BRCR);
+       
+       if (mask & (1 << 1)) /* Channel 1 statisfied break condition */
+               __raw_writel(__raw_readl(UBC_BRCR) &
+                            ~((1 << 14) | (1 << 12)), UBC_BRCR);
+}
+
+static struct sh_ubc sh2a_ubc = {
+       .name                   = "SH-2A",
+       .num_events             = 2,
+       .trap_nr                = 0x1e0,
+       .enable                 = sh2a_ubc_enable,
+       .disable                = sh2a_ubc_disable,
+       .enable_all             = sh2a_ubc_enable_all,
+       .disable_all            = sh2a_ubc_disable_all,
+       .active_mask            = sh2a_ubc_active_mask,
+       .triggered_mask         = sh2a_ubc_triggered_mask,
+       .clear_triggered_mask   = sh2a_ubc_clear_triggered_mask,
+};
+
+static int __init sh2a_ubc_init(void)
+{
+       struct clk *ubc_iclk = clk_get(NULL, "ubc0");
+       int i;
+
+       /*
+        * The UBC MSTP bit is optional, as not all platforms will have
+        * it. Just ignore it if we can't find it.
+        */
+       if (IS_ERR(ubc_iclk))
+               ubc_iclk = NULL;
+
+       clk_enable(ubc_iclk);
+
+       for (i = 0; i < sh2a_ubc.num_events; i++) {
+               __raw_writel(0, UBC_BAMR(i));
+               __raw_writel(0, UBC_BBR(i));
+       }
+
+       clk_disable(ubc_iclk);
+
+       sh2a_ubc.clk = ubc_iclk;
+
+       return register_sh_ubc(&sh2a_ubc);
+}
+arch_initcall(sh2a_ubc_init);
index f9173766ec4be2393e4207efe7092741f35fc271..ac4922ad3c148d3fc0883afe9ccb8a50d078fc76 100644 (file)
@@ -113,9 +113,11 @@ static int get_hbp_len(u16 hbp_len)
        case SH_BREAKPOINT_LEN_4:
                len_in_bytes = 4;
                break;
+#ifdef UBC_64BIT
        case SH_BREAKPOINT_LEN_8:
                len_in_bytes = 8;
                break;
+#endif
        }
        return len_in_bytes;
 }
@@ -149,9 +151,11 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
        case SH_BREAKPOINT_LEN_4:
                *gen_len = HW_BREAKPOINT_LEN_4;
                break;
+#ifdef UBC_64BIT
        case SH_BREAKPOINT_LEN_8:
                *gen_len = HW_BREAKPOINT_LEN_8;
                break;
+#endif
        default:
                return -EINVAL;
        }
@@ -190,9 +194,11 @@ static int arch_build_bp_info(struct perf_event *bp)
        case HW_BREAKPOINT_LEN_4:
                info->len = SH_BREAKPOINT_LEN_4;
                break;
+#ifdef UBC_64BIT
        case HW_BREAKPOINT_LEN_8:
                info->len = SH_BREAKPOINT_LEN_8;
                break;
+#endif
        default:
                return -EINVAL;
        }
@@ -240,9 +246,11 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
        case SH_BREAKPOINT_LEN_4:
                align = 3;
                break;
+#ifdef UBC_64BIT
        case SH_BREAKPOINT_LEN_8:
                align = 7;
                break;
+#endif
        default:
                return ret;
        }
index 78c4fdb91bc57a425e9fd5978260088ef5072d34..258464973bcb02a040e533b9fd8a1e78212684ca 100644 (file)
@@ -12,6 +12,7 @@ config 64BIT
 config SPARC
        bool
        default y
+       select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI
        select OF
        select OF_PROMTREE
        select HAVE_IDE
index 5080d16a832ffec0c813b30cf546340e5047a7fd..ec2e2e2aba7d8f15419aa12bc6f790993f8c54d4 100644 (file)
@@ -9,7 +9,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key)
 {
-               asm goto("1:\n\t"
+               asm_volatile_goto("1:\n\t"
                         "nop\n\t"
                         "nop\n\t"
                         ".pushsection __jump_table,  \"aw\"\n\t"
index 4e1d66c3ce71b0b89bc2acb86643b3cebd445a06..0f21e9a5ca18879f5d919aa0fa05532204244eba 100644 (file)
@@ -72,6 +72,8 @@
 
 #define SO_BUSY_POLL           0x0030
 
+#define SO_MAX_PACING_RATE     0x0031
+
 /* Security levels - as per NRL IPv6 - don't actually do anything */
 #define SO_SECURITY_AUTHENTICATION             0x5001
 #define SO_SECURITY_ENCRYPTION_TRANSPORT       0x5002
index 9c7be59e6f5ad3ad360facd10c6a2c35fd3bde75..218b6b23c378f888ef6ab0accf7ee4882e2911e2 100644 (file)
@@ -808,4 +808,5 @@ void bpf_jit_free(struct sk_filter *fp)
 {
        if (fp->bpf_func != sk_run_filter)
                module_free(NULL, fp->bpf_func);
+       kfree(fp);
 }
index d385eaadece7a68fe603eeb625b3caedb3c4c345..70979846076332bf7771d8517afdcc3415518b17 100644 (file)
@@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
  *
  * Atomically sets @v to @i and returns old @v
  */
-static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
+static inline long long atomic64_xchg(atomic64_t *v, long long n)
 {
        return xchg64(&v->counter, n);
 }
@@ -180,7 +180,8 @@ static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
  * Atomically checks if @v holds @o and replaces it with @n if so.
  * Returns the old value at @v.
  */
-static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
+static inline long long atomic64_cmpxchg(atomic64_t *v, long long o,
+                                       long long n)
 {
        return cmpxchg64(&v->counter, o, n);
 }
index 0d0395b1b1529d454f772ebb61e240d7fb41f52a..1ad4a1f7d42b8aa47eadbbb4cdcf2e50dc630fd0 100644 (file)
@@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n)
 /* A 64bit atomic type */
 
 typedef struct {
-       u64 __aligned(8) counter;
+       long long counter;
 } atomic64_t;
 
 #define ATOMIC64_INIT(val) { (val) }
@@ -91,14 +91,14 @@ typedef struct {
  *
  * Atomically reads the value of @v.
  */
-static inline u64 atomic64_read(const atomic64_t *v)
+static inline long long atomic64_read(const atomic64_t *v)
 {
        /*
         * Requires an atomic op to read both 32-bit parts consistently.
         * Casting away const is safe since the atomic support routines
         * do not write to memory if the value has not been modified.
         */
-       return _atomic64_xchg_add((u64 *)&v->counter, 0);
+       return _atomic64_xchg_add((long long *)&v->counter, 0);
 }
 
 /**
@@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
  *
  * Atomically adds @i to @v.
  */
-static inline void atomic64_add(u64 i, atomic64_t *v)
+static inline void atomic64_add(long long i, atomic64_t *v)
 {
        _atomic64_xchg_add(&v->counter, i);
 }
@@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
  *
  * Atomically adds @i to @v and returns @i + @v
  */
-static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
 {
        smp_mb();  /* barrier for proper semantics */
        return _atomic64_xchg_add(&v->counter, i) + i;
@@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
  * Atomically adds @a to @v, so long as @v was not already @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
+static inline long long atomic64_add_unless(atomic64_t *v, long long a,
+                                       long long u)
 {
        smp_mb();  /* barrier for proper semantics */
        return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
@@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
  * atomic64_set() can't be just a raw store, since it would be lost if it
  * fell between the load and store of one of the other atomic ops.
  */
-static inline void atomic64_set(atomic64_t *v, u64 n)
+static inline void atomic64_set(atomic64_t *v, long long n)
 {
        _atomic64_xchg(&v->counter, n);
 }
@@ -236,11 +237,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
 extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
 extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
 extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
-extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);
-extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);
-extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
-extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
-                                     int *lock, u64 o, u64 n);
+extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
+                                       long long o, long long n);
+extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
+extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
+                                       long long n);
+extern long long __atomic64_xchg_add_unless(volatile long long *p,
+                                       int *lock, long long o, long long n);
 
 /* Return failure from the atomic wrappers. */
 struct __get_user __atomic_bad_address(int __user *addr);
index 4001d5eab4bb7f1fb59e6e62c924bd71b4b10e2f..0ccda3c425be0d3b19a27c13b68ad6b6eaa37525 100644 (file)
@@ -35,10 +35,10 @@ int _atomic_xchg(int *ptr, int n);
 int _atomic_xchg_add(int *v, int i);
 int _atomic_xchg_add_unless(int *v, int a, int u);
 int _atomic_cmpxchg(int *ptr, int o, int n);
-u64 _atomic64_xchg(u64 *v, u64 n);
-u64 _atomic64_xchg_add(u64 *v, u64 i);
-u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u);
-u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
+long long _atomic64_xchg(long long *v, long long n);
+long long _atomic64_xchg_add(long long *v, long long i);
+long long _atomic64_xchg_add_unless(long long *v, long long a, long long u);
+long long _atomic64_cmpxchg(long long *v, long long o, long long n);
 
 #define xchg(ptr, n)                                                   \
        ({                                                              \
@@ -53,7 +53,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
                if (sizeof(*(ptr)) != 4)                                \
                        __cmpxchg_called_with_bad_pointer();            \
                smp_mb();                                               \
-               (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \
+               (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o,     \
+                                               (int)n);                \
        })
 
 #define xchg64(ptr, n)                                                 \
@@ -61,7 +62,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
                if (sizeof(*(ptr)) != 8)                                \
                        __xchg_called_with_bad_pointer();               \
                smp_mb();                                               \
-               (typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \
+               (typeof(*(ptr)))_atomic64_xchg((long long *)(ptr),      \
+                                               (long long)(n));        \
        })
 
 #define cmpxchg64(ptr, o, n)                                           \
@@ -69,7 +71,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
                if (sizeof(*(ptr)) != 8)                                \
                        __cmpxchg_called_with_bad_pointer();            \
                smp_mb();                                               \
-               (typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \
+               (typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr,     \
+                                       (long long)o, (long long)n);    \
        })
 
 #else
@@ -81,10 +84,11 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
                switch (sizeof(*(ptr))) {                               \
                case 4:                                                 \
                        __x = (typeof(__x))(unsigned long)              \
-                               __insn_exch4((ptr), (u32)(unsigned long)(n)); \
+                               __insn_exch4((ptr),                     \
+                                       (u32)(unsigned long)(n));       \
                        break;                                          \
                case 8:                                                 \
-                       __x = (typeof(__x))                     \
+                       __x = (typeof(__x))                             \
                                __insn_exch((ptr), (unsigned long)(n)); \
                        break;                                          \
                default:                                                \
@@ -103,10 +107,12 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
                switch (sizeof(*(ptr))) {                               \
                case 4:                                                 \
                        __x = (typeof(__x))(unsigned long)              \
-                               __insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \
+                               __insn_cmpexch4((ptr),                  \
+                                       (u32)(unsigned long)(n));       \
                        break;                                          \
                case 8:                                                 \
-                       __x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \
+                       __x = (typeof(__x))__insn_cmpexch((ptr),        \
+                                               (long long)(n));        \
                        break;                                          \
                default:                                                \
                        __cmpxchg_called_with_bad_pointer();            \
index 63294f5a8efbca7820c891232c7f79303213e653..4f7ae39fa2022c537770bd10e1cbb0e231249b53 100644 (file)
 #ifndef _ASM_TILE_PERCPU_H
 #define _ASM_TILE_PERCPU_H
 
-register unsigned long __my_cpu_offset __asm__("tp");
-#define __my_cpu_offset __my_cpu_offset
-#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp))
+register unsigned long my_cpu_offset_reg asm("tp");
+
+#ifdef CONFIG_PREEMPT
+/*
+ * For full preemption, we can't just use the register variable
+ * directly, since we need barrier() to hazard against it, causing the
+ * compiler to reload anything computed from a previous "tp" value.
+ * But we also don't want to use volatile asm, since we'd like the
+ * compiler to be able to cache the value across multiple percpu reads.
+ * So we use a fake stack read as a hazard against barrier().
+ * The 'U' constraint is like 'm' but disallows postincrement.
+ */
+static inline unsigned long __my_cpu_offset(void)
+{
+       unsigned long tp;
+       register unsigned long *sp asm("sp");
+       asm("move %0, tp" : "=r" (tp) : "U" (*sp));
+       return tp;
+}
+#define __my_cpu_offset __my_cpu_offset()
+#else
+/*
+ * We don't need to hazard against barrier() since "tp" doesn't ever
+ * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
+ * changes at function call points, at which we are already re-reading
+ * the value of "tp" due to "my_cpu_offset_reg" being a global variable.
+ */
+#define __my_cpu_offset my_cpu_offset_reg
+#endif
+
+#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
 
 #include <asm-generic/percpu.h>
 
index df27a1fd94a310a759612a5c6706b1011a94f787..531f4c365351119eeb249904cc01fd60307931b4 100644 (file)
@@ -66,7 +66,7 @@ static struct hardwall_type hardwall_types[] = {
                0,
                "udn",
                LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
-               __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock),
+               __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_UDN].lock),
                NULL
        },
 #ifndef __tilepro__
@@ -77,7 +77,7 @@ static struct hardwall_type hardwall_types[] = {
                1,  /* disabled pending hypervisor support */
                "idn",
                LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
-               __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock),
+               __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IDN].lock),
                NULL
        },
        {  /* access to user-space IPI */
@@ -87,7 +87,7 @@ static struct hardwall_type hardwall_types[] = {
                0,
                "ipi",
                LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
-               __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock),
+               __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IPI].lock),
                NULL
        },
 #endif
index 088d5c141e681084ce030165f303d15f4b69876f..2cbe6d5dd6b04db3ea071fb12c3dbb1866dc818e 100644 (file)
@@ -815,6 +815,9 @@ STD_ENTRY(interrupt_return)
        }
        bzt     r28, 1f
        bnz     r29, 1f
+       /* Disable interrupts explicitly for preemption. */
+       IRQ_DISABLE(r20,r21)
+       TRACE_IRQS_OFF
        jal     preempt_schedule_irq
        FEEDBACK_REENTER(interrupt_return)
 1:
index ec755d3f373467ebe271dd1743cc6f56b6f9d7da..b8fc497f24370c0d7c17536664cb19c40923e5bf 100644 (file)
@@ -841,6 +841,9 @@ STD_ENTRY(interrupt_return)
        }
        beqzt   r28, 1f
        bnez    r29, 1f
+       /* Disable interrupts explicitly for preemption. */
+       IRQ_DISABLE(r20,r21)
+       TRACE_IRQS_OFF
        jal     preempt_schedule_irq
        FEEDBACK_REENTER(interrupt_return)
 1:
index 362284af3afd31ab39081447b6f1295a866c0ab9..c93977a62116dfecd12a74376e307c89c7875b91 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/mmzone.h>
 #include <linux/dcache.h>
 #include <linux/fs.h>
+#include <linux/string.h>
 #include <asm/backtrace.h>
 #include <asm/page.h>
 #include <asm/ucontext.h>
@@ -332,21 +333,18 @@ static void describe_addr(struct KBacktraceIterator *kbt,
        }
 
        if (vma->vm_file) {
-               char *s;
                p = d_path(&vma->vm_file->f_path, buf, bufsize);
                if (IS_ERR(p))
                        p = "?";
-               s = strrchr(p, '/');
-               if (s)
-                       p = s+1;
+               name = kbasename(p);
        } else {
-               p = "anon";
+               name = "anon";
        }
 
        /* Generate a string description of the vma info. */
-       namelen = strlen(p);
+       namelen = strlen(name);
        remaining = (bufsize - 1) - namelen;
-       memmove(buf, p, namelen);
+       memmove(buf, name, namelen);
        snprintf(buf + namelen, remaining, "[%lx+%lx] ",
                 vma->vm_start, vma->vm_end - vma->vm_start);
 }
index 759efa337be88ebaf72cd1047151a2489a7745cb..c89b211fd9e7c093ed26e932432b329974003a04 100644 (file)
@@ -107,19 +107,19 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
 EXPORT_SYMBOL(_atomic_xor);
 
 
-u64 _atomic64_xchg(u64 *v, u64 n)
+long long _atomic64_xchg(long long *v, long long n)
 {
        return __atomic64_xchg(v, __atomic_setup(v), n);
 }
 EXPORT_SYMBOL(_atomic64_xchg);
 
-u64 _atomic64_xchg_add(u64 *v, u64 i)
+long long _atomic64_xchg_add(long long *v, long long i)
 {
        return __atomic64_xchg_add(v, __atomic_setup(v), i);
 }
 EXPORT_SYMBOL(_atomic64_xchg_add);
 
-u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
+long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
 {
        /*
         * Note: argument order is switched here since it is easier
@@ -130,7 +130,7 @@ u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
 }
 EXPORT_SYMBOL(_atomic64_xchg_add_unless);
 
-u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n)
+long long _atomic64_cmpxchg(long long *v, long long o, long long n)
 {
        return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
 }
index 82cdd8906f3d6ff70e3317a78ae00c08a0bed39e..a7ba27b2752be0923644eba19035ba3a97a6c792 100644 (file)
@@ -1,5 +1,6 @@
 config UNICORE32
        def_bool y
+       select ARCH_MIGHT_HAVE_PC_PARPORT
        select HAVE_MEMBLOCK
        select HAVE_GENERIC_DMA_COHERENT
        select HAVE_DMA_ATTRS
index ee2fb9d37745887eb16255cbd30adacbdcc1ecae..e0836de76f3c90dcb57ec0e7bbfb293cb7d0ca99 100644 (file)
@@ -22,6 +22,7 @@ config X86_64
 config X86
        def_bool y
        select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+       select ARCH_MIGHT_HAVE_PC_PARPORT
        select HAVE_AOUT if X86_32
        select HAVE_UNSTABLE_SCHED_CLOCK
        select ARCH_SUPPORTS_NUMA_BALANCING
@@ -254,10 +255,6 @@ config ARCH_HWEIGHT_CFLAGS
        default "-fcall-saved-ecx -fcall-saved-edx" if X86_32
        default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" if X86_64
 
-config ARCH_CPU_PROBE_RELEASE
-       def_bool y
-       depends on HOTPLUG_CPU
-
 config ARCH_SUPPORTS_UPROBES
        def_bool y
 
@@ -860,7 +857,7 @@ source "kernel/Kconfig.preempt"
 
 config X86_UP_APIC
        bool "Local APIC support on uniprocessors"
-       depends on X86_32 && !SMP && !X86_32_NON_STANDARD
+       depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI
        ---help---
          A local APIC (Advanced Programmable Interrupt Controller) is an
          integrated interrupt controller in the CPU. If you have a single-CPU
@@ -885,11 +882,11 @@ config X86_UP_IOAPIC
 
 config X86_LOCAL_APIC
        def_bool y
-       depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
+       depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI
 
 config X86_IO_APIC
        def_bool y
-       depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC
+       depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC || PCI_MSI
 
 config X86_VISWS_APIC
        def_bool y
@@ -1033,6 +1030,7 @@ config X86_REBOOTFIXUPS
 
 config MICROCODE
        tristate "CPU microcode loading support"
+       depends on CPU_SUP_AMD || CPU_SUP_INTEL
        select FW_LOADER
        ---help---
 
index 7d6ba9db1be99696784aeba62343eeebff9c32dd..e0fc24db234ac1263c96fdc18d0bf4f553647198 100644 (file)
@@ -3,8 +3,9 @@
 #
 
 avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no)
+avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
+                               $(comma)4)$(comma)%ymm2,yes,no)
 
-obj-$(CONFIG_CRYPTO_ABLK_HELPER_X86) += ablk_helper.o
 obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o
 
 obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
index f80e668785c0bf5f36383e5ab49495c24f93f843..835488b745eed5ce9484a40dfe56bf839f51d33d 100644 (file)
@@ -34,7 +34,7 @@
 #include <asm/cpu_device_id.h>
 #include <asm/i387.h>
 #include <asm/crypto/aes.h>
-#include <asm/crypto/ablk_helper.h>
+#include <crypto/ablk_helper.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/internal/aead.h>
 #include <linux/workqueue.h>
index 414fe5d7946be077c25ba19160496c4bf910b1ab..4209a76fcdaad4225fb9c15fb954dfb5ad495c74 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/types.h>
 #include <linux/crypto.h>
 #include <linux/err.h>
+#include <crypto/ablk_helper.h>
 #include <crypto/algapi.h>
 #include <crypto/ctr.h>
 #include <crypto/lrw.h>
@@ -21,7 +22,6 @@
 #include <asm/xcr.h>
 #include <asm/xsave.h>
 #include <asm/crypto/camellia.h>
-#include <asm/crypto/ablk_helper.h>
 #include <asm/crypto/glue_helper.h>
 
 #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
index 37fd0c0a81ea8861f30a649b01cee8a6c11db4e5..87a041a10f4ac1fe7cceccc82eb24fcb03f92fa4 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/types.h>
 #include <linux/crypto.h>
 #include <linux/err.h>
+#include <crypto/ablk_helper.h>
 #include <crypto/algapi.h>
 #include <crypto/ctr.h>
 #include <crypto/lrw.h>
@@ -21,7 +22,6 @@
 #include <asm/xcr.h>
 #include <asm/xsave.h>
 #include <asm/crypto/camellia.h>
-#include <asm/crypto/ablk_helper.h>
 #include <asm/crypto/glue_helper.h>
 
 #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
index c6631813dc115c609e186044790aa5461cb6f0c7..e6a3700489b94119c514177f4a2a0b52e5423c13 100644 (file)
 #include <linux/types.h>
 #include <linux/crypto.h>
 #include <linux/err.h>
+#include <crypto/ablk_helper.h>
 #include <crypto/algapi.h>
 #include <crypto/cast5.h>
 #include <crypto/cryptd.h>
 #include <crypto/ctr.h>
 #include <asm/xcr.h>
 #include <asm/xsave.h>
-#include <asm/crypto/ablk_helper.h>
 #include <asm/crypto/glue_helper.h>
 
 #define CAST5_PARALLEL_BLOCKS 16
index 8d0dfb86a5593554e0d536a48c28572ecc92c91d..09f3677393e4b888895c83bae05d114df0d5184c 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/types.h>
 #include <linux/crypto.h>
 #include <linux/err.h>
+#include <crypto/ablk_helper.h>
 #include <crypto/algapi.h>
 #include <crypto/cast6.h>
 #include <crypto/cryptd.h>
@@ -37,7 +38,6 @@
 #include <crypto/xts.h>
 #include <asm/xcr.h>
 #include <asm/xsave.h>
-#include <asm/crypto/ablk_helper.h>
 #include <asm/crypto/glue_helper.h>
 
 #define CAST6_PARALLEL_BLOCKS 8
index 23aabc6c20a5376fa81cf49ff9893ec76b6cdf05..2fae489b15246525991e6e606b8e01923e298263 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/types.h>
 #include <linux/crypto.h>
 #include <linux/err.h>
+#include <crypto/ablk_helper.h>
 #include <crypto/algapi.h>
 #include <crypto/ctr.h>
 #include <crypto/lrw.h>
@@ -22,7 +23,6 @@
 #include <asm/xcr.h>
 #include <asm/xsave.h>
 #include <asm/crypto/serpent-avx.h>
-#include <asm/crypto/ablk_helper.h>
 #include <asm/crypto/glue_helper.h>
 
 #define SERPENT_AVX2_PARALLEL_BLOCKS 16
index 9ae83cf8d21e987e2e3bf9656a51ecaf61644427..ff487087097254f8368d035db08d02472dc9b76e 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/types.h>
 #include <linux/crypto.h>
 #include <linux/err.h>
+#include <crypto/ablk_helper.h>
 #include <crypto/algapi.h>
 #include <crypto/serpent.h>
 #include <crypto/cryptd.h>
@@ -38,7 +39,6 @@
 #include <asm/xcr.h>
 #include <asm/xsave.h>
 #include <asm/crypto/serpent-avx.h>
-#include <asm/crypto/ablk_helper.h>
 #include <asm/crypto/glue_helper.h>
 
 /* 8-way parallel cipher functions */
index 97a356ece24d2b74d18090760e988c45d2bc914a..8c95f86373061680f4d8f4418d4da77f9123c111 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/types.h>
 #include <linux/crypto.h>
 #include <linux/err.h>
+#include <crypto/ablk_helper.h>
 #include <crypto/algapi.h>
 #include <crypto/serpent.h>
 #include <crypto/cryptd.h>
@@ -42,7 +43,6 @@
 #include <crypto/lrw.h>
 #include <crypto/xts.h>
 #include <asm/crypto/serpent-sse2.h>
-#include <asm/crypto/ablk_helper.h>
 #include <asm/crypto/glue_helper.h>
 
 static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
index 50226c4b86ed338da70cb285a53e1e86d210d5f4..f248546da1caa956014dfd9648814ad37f2ca8d3 100644 (file)
@@ -281,7 +281,7 @@ static int __init sha256_ssse3_mod_init(void)
        /* allow AVX to override SSSE3, it's a little faster */
        if (avx_usable()) {
 #ifdef CONFIG_AS_AVX2
-               if (boot_cpu_has(X86_FEATURE_AVX2))
+               if (boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_BMI2))
                        sha256_transform_asm = sha256_transform_rorx;
                else
 #endif
@@ -319,4 +319,4 @@ MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
 
 MODULE_ALIAS("sha256");
-MODULE_ALIAS("sha384");
+MODULE_ALIAS("sha224");
index a62ba541884ef1a15da1082d9d2ca48296c563ec..4e3c665be1296f16cedde6f402249316b75bc4ed 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/types.h>
 #include <linux/crypto.h>
 #include <linux/err.h>
+#include <crypto/ablk_helper.h>
 #include <crypto/algapi.h>
 #include <crypto/twofish.h>
 #include <crypto/cryptd.h>
@@ -39,7 +40,6 @@
 #include <asm/xcr.h>
 #include <asm/xsave.h>
 #include <asm/crypto/twofish.h>
-#include <asm/crypto/ablk_helper.h>
 #include <asm/crypto/glue_helper.h>
 #include <crypto/scatterwalk.h>
 #include <linux/workqueue.h>
index b1977bad5435e5342af7e548700f08c1a04ff580..c8c1e700c26ed3426f0086c594a645bcc49261d9 100644 (file)
@@ -26,6 +26,7 @@
 #include <acpi/pdc_intel.h>
 
 #include <asm/numa.h>
+#include <asm/fixmap.h>
 #include <asm/processor.h>
 #include <asm/mmu.h>
 #include <asm/mpspec.h>
index d3f5c63078d812e2f6220cfa5ad322aae920827c..89270b4318db8b97ac467717b38863fa549435ad 100644 (file)
@@ -374,7 +374,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
                 * Catch too early usage of this before alternatives
                 * have run.
                 */
-               asm goto("1: jmp %l[t_warn]\n"
+               asm_volatile_goto("1: jmp %l[t_warn]\n"
                         "2:\n"
                         ".section .altinstructions,\"a\"\n"
                         " .long 1b - .\n"
@@ -388,7 +388,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
 
 #endif
 
-               asm goto("1: jmp %l[t_no]\n"
+               asm_volatile_goto("1: jmp %l[t_no]\n"
                         "2:\n"
                         ".section .altinstructions,\"a\"\n"
                         " .long 1b - .\n"
@@ -453,7 +453,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
  * have. Thus, we force the jump to the widest, 4-byte, signed relative
  * offset even though the last would often fit in less bytes.
  */
-               asm goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
+               asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
                         "2:\n"
                         ".section .altinstructions,\"a\"\n"
                         " .long 1b - .\n"              /* src offset */
index 64507f35800ce23e9c867e1c80b5dc3d0a835ec1..6a2cefb4395a4228cce550ef8b231f3e7158d9d1 100644 (file)
@@ -18,7 +18,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key)
 {
-       asm goto("1:"
+       asm_volatile_goto("1:"
                ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
                ".pushsection __jump_table,  \"aw\" \n\t"
                _ASM_ALIGN "\n\t"
index 626cf70082d7c8229b14a529625652f5646fcf09..3142a94c7b4bf90314838fa8beb67c4e7ed0b363 100644 (file)
@@ -94,7 +94,7 @@ static inline void early_reserve_e820_mpc_new(void) { }
 #define default_get_smp_config x86_init_uint_noop
 #endif
 
-void generic_processor_info(int apicid, int version);
+int generic_processor_info(int apicid, int version);
 #ifdef CONFIG_ACPI
 extern void mp_register_ioapic(int id, u32 address, u32 gsi_base);
 extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
index cb7502852acb0921e4e0c7132962c681f43b51ec..e139b13f2a33a3572d91e4f7297952e1bf8a1ffc 100644 (file)
@@ -218,10 +218,14 @@ void msrs_free(struct msr *msrs);
 #ifdef CONFIG_SMP
 int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
+int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
 void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
 void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs);
 int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
 int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
+int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
+int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
 int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
 int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]);
 #else  /*  CONFIG_SMP  */
@@ -235,6 +239,16 @@ static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
        wrmsr(msr_no, l, h);
        return 0;
 }
+static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+{
+       rdmsrl(msr_no, *q);
+       return 0;
+}
+static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+{
+       wrmsrl(msr_no, q);
+       return 0;
+}
 static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no,
                                struct msr *msrs)
 {
@@ -254,6 +268,14 @@ static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
 {
        return wrmsr_safe(msr_no, l, h);
 }
+static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+{
+       return rdmsrl_safe(msr_no, q);
+}
+static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+{
+       return wrmsrl_safe(msr_no, q);
+}
 static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
 {
        return rdmsr_safe_regs(regs);
index e7e6751648edf775ad54a42a5c05b3e6d3c1cff5..07537a44216ec9b2eed302183af7c57d8949a5a0 100644 (file)
@@ -20,7 +20,7 @@
 static inline void __mutex_fastpath_lock(atomic_t *v,
                                         void (*fail_fn)(atomic_t *))
 {
-       asm volatile goto(LOCK_PREFIX "   decl %0\n"
+       asm_volatile_goto(LOCK_PREFIX "   decl %0\n"
                          "   jns %l[exit]\n"
                          : : "m" (v->counter)
                          : "memory", "cc"
@@ -75,7 +75,7 @@ static inline int __mutex_fastpath_lock_retval(atomic_t *count)
 static inline void __mutex_fastpath_unlock(atomic_t *v,
                                           void (*fail_fn)(atomic_t *))
 {
-       asm volatile goto(LOCK_PREFIX "   incl %0\n"
+       asm_volatile_goto(LOCK_PREFIX "   incl %0\n"
                          "   jg %l[exit]\n"
                          : : "m" (v->counter)
                          : "memory", "cc"
diff --git a/arch/x86/include/asm/simd.h b/arch/x86/include/asm/simd.h
new file mode 100644 (file)
index 0000000..ee80b92
--- /dev/null
@@ -0,0 +1,11 @@
+
+#include <asm/i387.h>
+
+/*
+ * may_use_simd - whether it is allowable at this time to issue SIMD
+ *                instructions or access the SIMD register file
+ */
+static __must_check inline bool may_use_simd(void)
+{
+       return irq_fpu_usable();
+}
index bb0465090ae53eb3246b5022d187dd4517bd39d0..940ed3fd889a743732891945b191f34cddc8e217 100644 (file)
 #define MSR_PP1_ENERGY_STATUS          0x00000641
 #define MSR_PP1_POLICY                 0x00000642
 
+#define MSR_CORE_C1_RES                        0x00000660
+
 #define MSR_AMD64_MC0_MASK             0xc0010044
 
 #define MSR_IA32_MCx_CTL(x)            (MSR_IA32_MC0_CTL + 4*(x))
index 40c76604199f76e9263f94c10c249f2eee8e40dc..6c0b43bd024bb5890230ca45c61b3de082b611ef 100644 (file)
@@ -189,24 +189,31 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
        return 0;
 }
 
-static void acpi_register_lapic(int id, u8 enabled)
+/**
+ * acpi_register_lapic - register a local apic and generates a logic cpu number
+ * @id: local apic id to register
+ * @enabled: this cpu is enabled or not
+ *
+ * Returns the logic cpu number which maps to the local apic
+ */
+static int acpi_register_lapic(int id, u8 enabled)
 {
        unsigned int ver = 0;
 
        if (id >= MAX_LOCAL_APIC) {
                printk(KERN_INFO PREFIX "skipped apicid that is too big\n");
-               return;
+               return -EINVAL;
        }
 
        if (!enabled) {
                ++disabled_cpus;
-               return;
+               return -EINVAL;
        }
 
        if (boot_cpu_physical_apicid != -1U)
                ver = apic_version[boot_cpu_physical_apicid];
 
-       generic_processor_info(id, ver);
+       return generic_processor_info(id, ver);
 }
 
 static int __init
@@ -614,84 +621,27 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
 #endif
 }
 
-static int _acpi_map_lsapic(acpi_handle handle, int *pcpu)
+static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
 {
-       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-       union acpi_object *obj;
-       struct acpi_madt_local_apic *lapic;
-       cpumask_var_t tmp_map, new_map;
-       u8 physid;
        int cpu;
-       int retval = -ENOMEM;
-
-       if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
-               return -EINVAL;
-
-       if (!buffer.length || !buffer.pointer)
-               return -EINVAL;
-
-       obj = buffer.pointer;
-       if (obj->type != ACPI_TYPE_BUFFER ||
-           obj->buffer.length < sizeof(*lapic)) {
-               kfree(buffer.pointer);
-               return -EINVAL;
-       }
 
-       lapic = (struct acpi_madt_local_apic *)obj->buffer.pointer;
-
-       if (lapic->header.type != ACPI_MADT_TYPE_LOCAL_APIC ||
-           !(lapic->lapic_flags & ACPI_MADT_ENABLED)) {
-               kfree(buffer.pointer);
-               return -EINVAL;
-       }
-
-       physid = lapic->id;
-
-       kfree(buffer.pointer);
-       buffer.length = ACPI_ALLOCATE_BUFFER;
-       buffer.pointer = NULL;
-       lapic = NULL;
-
-       if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL))
-               goto out;
-
-       if (!alloc_cpumask_var(&new_map, GFP_KERNEL))
-               goto free_tmp_map;
-
-       cpumask_copy(tmp_map, cpu_present_mask);
-       acpi_register_lapic(physid, ACPI_MADT_ENABLED);
-
-       /*
-        * If acpi_register_lapic successfully generates a new logical cpu
-        * number, then the following will get us exactly what was mapped
-        */
-       cpumask_andnot(new_map, cpu_present_mask, tmp_map);
-       if (cpumask_empty(new_map)) {
-               printk ("Unable to map lapic to logical cpu number\n");
-               retval = -EINVAL;
-               goto free_new_map;
+       cpu = acpi_register_lapic(physid, ACPI_MADT_ENABLED);
+       if (cpu < 0) {
+               pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
+               return cpu;
        }
 
        acpi_processor_set_pdc(handle);
-
-       cpu = cpumask_first(new_map);
        acpi_map_cpu2node(handle, cpu, physid);
 
        *pcpu = cpu;
-       retval = 0;
-
-free_new_map:
-       free_cpumask_var(new_map);
-free_tmp_map:
-       free_cpumask_var(tmp_map);
-out:
-       return retval;
+       return 0;
 }
 
 /* wrapper to silence section mismatch warning */
-int __ref acpi_map_lsapic(acpi_handle handle, int *pcpu)
+int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
 {
-       return _acpi_map_lsapic(handle, pcpu);
+       return _acpi_map_lsapic(handle, physid, pcpu);
 }
 EXPORT_SYMBOL(acpi_map_lsapic);
 
@@ -745,7 +695,7 @@ static int __init acpi_parse_sbf(struct acpi_table_header *table)
 #ifdef CONFIG_HPET_TIMER
 #include <asm/hpet.h>
 
-static struct __initdata resource *hpet_res;
+static struct resource *hpet_res __initdata;
 
 static int __init acpi_parse_hpet(struct acpi_table_header *table)
 {
index a7eb82d9b0120d02147d8eb1620a7da1d708c282..ed165d657380062387d08da7bf2f607fff192490 100644 (file)
@@ -2107,7 +2107,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
        apic_write(APIC_LVT1, value);
 }
 
-void generic_processor_info(int apicid, int version)
+int generic_processor_info(int apicid, int version)
 {
        int cpu, max = nr_cpu_ids;
        bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
@@ -2127,7 +2127,7 @@ void generic_processor_info(int apicid, int version)
                        "  Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
 
                disabled_cpus++;
-               return;
+               return -ENODEV;
        }
 
        if (num_processors >= nr_cpu_ids) {
@@ -2138,7 +2138,7 @@ void generic_processor_info(int apicid, int version)
                        "  Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
 
                disabled_cpus++;
-               return;
+               return -EINVAL;
        }
 
        num_processors++;
@@ -2183,6 +2183,8 @@ void generic_processor_info(int apicid, int version)
 #endif
        set_cpu_possible(cpu, true);
        set_cpu_present(cpu, true);
+
+       return cpu;
 }
 
 int hard_smp_processor_id(void)
index 1191ac1c9d2598e64d13a1419b4a7a383933ce03..a419814cea575f9e2cf183772a9e99c59e097a11 100644 (file)
@@ -113,7 +113,7 @@ static int __init early_get_pnodeid(void)
                break;
        case UV3_HUB_PART_NUMBER:
        case UV3_HUB_PART_NUMBER_X:
-               uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1;
+               uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
                break;
        }
 
index 897783b3302a9cd0d0af3efad49c94e45f9f7e9e..9d8449158cf989af3009c6606b7a878c827f5d32 100644 (file)
@@ -1888,10 +1888,7 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
        userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
        userpg->pmc_width = x86_pmu.cntval_bits;
 
-       if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
-               return;
-
-       if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+       if (!sched_clock_stable)
                return;
 
        userpg->cap_user_time = 1;
@@ -1899,10 +1896,8 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
        userpg->time_shift = CYC2NS_SCALE_FACTOR;
        userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
 
-       if (sched_clock_stable && !check_tsc_disabled()) {
-               userpg->cap_user_time_zero = 1;
-               userpg->time_zero = this_cpu_read(cyc2ns_offset);
-       }
+       userpg->cap_user_time_zero = 1;
+       userpg->time_zero = this_cpu_read(cyc2ns_offset);
 }
 
 /*
index ee11b7dfbfbb6676eb94a28360bce88b286ceac1..26d5a55a273610b8d49da975a006f156adb4e482 100644 (file)
@@ -42,15 +42,27 @@ static void __jump_label_transform(struct jump_entry *entry,
                                   int init)
 {
        union jump_code_union code;
+       const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
        const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
 
        if (type == JUMP_LABEL_ENABLE) {
-               /*
-                * We are enabling this jump label. If it is not a nop
-                * then something must have gone wrong.
-                */
-               if (unlikely(memcmp((void *)entry->code, ideal_nop, 5) != 0))
-                       bug_at((void *)entry->code, __LINE__);
+               if (init) {
+                       /*
+                        * Jump label is enabled for the first time.
+                        * So we expect a default_nop...
+                        */
+                       if (unlikely(memcmp((void *)entry->code, default_nop, 5)
+                                    != 0))
+                               bug_at((void *)entry->code, __LINE__);
+               } else {
+                       /*
+                        * ...otherwise expect an ideal_nop. Otherwise
+                        * something went horribly wrong.
+                        */
+                       if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
+                                    != 0))
+                               bug_at((void *)entry->code, __LINE__);
+               }
 
                code.jump = 0xe9;
                code.offset = entry->target -
@@ -63,7 +75,6 @@ static void __jump_label_transform(struct jump_entry *entry,
                 * are converting the default nop to the ideal nop.
                 */
                if (init) {
-                       const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
                        if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
                                bug_at((void *)entry->code, __LINE__);
                } else {
index 697b93af02ddbc0fb4461b5d853810fa250cd0a1..a0e2a8a80c94129bb3d3d47bf975ef68f98f500e 100644 (file)
@@ -775,11 +775,22 @@ void __init kvm_spinlock_init(void)
        if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
                return;
 
-       printk(KERN_INFO "KVM setup paravirtual spinlock\n");
+       pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
+       pv_lock_ops.unlock_kick = kvm_unlock_kick;
+}
+
+static __init int kvm_spinlock_init_jump(void)
+{
+       if (!kvm_para_available())
+               return 0;
+       if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
+               return 0;
 
        static_key_slow_inc(&paravirt_ticketlocks_enabled);
+       printk(KERN_INFO "KVM setup paravirtual spinlock\n");
 
-       pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
-       pv_lock_ops.unlock_kick = kvm_unlock_kick;
+       return 0;
 }
+early_initcall(kvm_spinlock_init_jump);
+
 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
index e643e744e4d8bf855ce1889bdae58b51001aab45..7e920bff99a34b260e1ea338c3fc7dee44ba0e67 100644 (file)
@@ -326,6 +326,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
                },
        },
+       {       /* Handle problems with rebooting on the Latitude E5410. */
+               .callback = set_pci_reboot,
+               .ident = "Dell Latitude E5410",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5410"),
+               },
+       },
        {       /* Handle problems with rebooting on the Latitude E5420. */
                .callback = set_pci_reboot,
                .ident = "Dell Latitude E5420",
index 6cacab671f9b76a35aaf49d41431b12e788e5bbe..e73b3f53310c7663b2c37a3cffd57c0cad974a3c 100644 (file)
 /* State of each CPU */
 DEFINE_PER_CPU(int, cpu_state) = { 0 };
 
-#ifdef CONFIG_HOTPLUG_CPU
-/*
- * We need this for trampoline_base protection from concurrent accesses when
- * off- and onlining cores wildly.
- */
-static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
-
-void cpu_hotplug_driver_lock(void)
-{
-       mutex_lock(&x86_cpu_hotplug_driver_mutex);
-}
-
-void cpu_hotplug_driver_unlock(void)
-{
-       mutex_unlock(&x86_cpu_hotplug_driver_mutex);
-}
-
-ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
-ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
-#endif
-
 /* Number of siblings per CPU package */
 int smp_num_siblings = 1;
 EXPORT_SYMBOL(smp_num_siblings);
index 6e60b5fe224481d01ca8662c6edd01432da08518..649b010da00ba5a0c191a4144f2458c08f0500ab 100644 (file)
@@ -65,29 +65,32 @@ int __ref _debug_hotplug_cpu(int cpu, int action)
        if (!cpu_is_hotpluggable(cpu))
                return -EINVAL;
 
-       cpu_hotplug_driver_lock();
+       lock_device_hotplug();
 
        switch (action) {
        case 0:
                ret = cpu_down(cpu);
                if (!ret) {
                        pr_info("CPU %u is now offline\n", cpu);
+                       dev->offline = true;
                        kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
                } else
                        pr_debug("Can't offline CPU%d.\n", cpu);
                break;
        case 1:
                ret = cpu_up(cpu);
-               if (!ret)
+               if (!ret) {
+                       dev->offline = false;
                        kobject_uevent(&dev->kobj, KOBJ_ONLINE);
-               else
+               } else {
                        pr_debug("Can't online CPU%d.\n", cpu);
+               }
                break;
        default:
                ret = -EINVAL;
        }
 
-       cpu_hotplug_driver_unlock();
+       unlock_device_hotplug();
 
        return ret;
 }
index 3b8e7459dd4db84a4c6f72816cd2c0629d866ee4..2b2fce1b200900b1af42865f946d5faa25fdc56a 100644 (file)
@@ -3255,25 +3255,29 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
 
 static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
 {
+       struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
+
        if (!test_bit(VCPU_EXREG_PDPTR,
                      (unsigned long *)&vcpu->arch.regs_dirty))
                return;
 
        if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
-               vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]);
-               vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]);
-               vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]);
-               vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]);
+               vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
+               vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
+               vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
+               vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
        }
 }
 
 static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
 {
+       struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
+
        if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
-               vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
-               vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
-               vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
-               vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
+               mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
+               mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
+               mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
+               mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
        }
 
        __set_bit(VCPU_EXREG_PDPTR,
@@ -7777,10 +7781,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
                vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
                vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
                vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
-               __clear_bit(VCPU_EXREG_PDPTR,
-                               (unsigned long *)&vcpu->arch.regs_avail);
-               __clear_bit(VCPU_EXREG_PDPTR,
-                               (unsigned long *)&vcpu->arch.regs_dirty);
        }
 
        kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
index a6b1b86d225358f05e58a649fdadedc4eafdb87e..518532e6a3faa213eb0833c6781c9ceba34dea86 100644 (file)
@@ -47,6 +47,21 @@ int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
 }
 EXPORT_SYMBOL(rdmsr_on_cpu);
 
+int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+{
+       int err;
+       struct msr_info rv;
+
+       memset(&rv, 0, sizeof(rv));
+
+       rv.msr_no = msr_no;
+       err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
+       *q = rv.reg.q;
+
+       return err;
+}
+EXPORT_SYMBOL(rdmsrl_on_cpu);
+
 int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
 {
        int err;
@@ -63,6 +78,22 @@ int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
 }
 EXPORT_SYMBOL(wrmsr_on_cpu);
 
+int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+{
+       int err;
+       struct msr_info rv;
+
+       memset(&rv, 0, sizeof(rv));
+
+       rv.msr_no = msr_no;
+       rv.reg.q = q;
+
+       err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
+
+       return err;
+}
+EXPORT_SYMBOL(wrmsrl_on_cpu);
+
 static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
                            struct msr *msrs,
                            void (*msr_func) (void *info))
@@ -159,6 +190,37 @@ int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
 }
 EXPORT_SYMBOL(wrmsr_safe_on_cpu);
 
+int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
+{
+       int err;
+       struct msr_info rv;
+
+       memset(&rv, 0, sizeof(rv));
+
+       rv.msr_no = msr_no;
+       rv.reg.q = q;
+
+       err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
+
+       return err ? err : rv.err;
+}
+EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
+
+int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
+{
+       int err;
+       struct msr_info rv;
+
+       memset(&rv, 0, sizeof(rv));
+
+       rv.msr_no = msr_no;
+       err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
+       *q = rv.reg.q;
+
+       return err ? err : rv.err;
+}
+EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
+
 /*
  * These variants are significantly slower, but allows control over
  * the entire 32-bit GPR set.
index 79c216aa0e2baaac3a65a43972161922489c4978..516593e1ce33b92175195c8de2983258b723493c 100644 (file)
@@ -772,13 +772,21 @@ out:
        return;
 }
 
+static void bpf_jit_free_deferred(struct work_struct *work)
+{
+       struct sk_filter *fp = container_of(work, struct sk_filter, work);
+       unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
+       struct bpf_binary_header *header = (void *)addr;
+
+       set_memory_rw(addr, header->pages);
+       module_free(NULL, header);
+       kfree(fp);
+}
+
 void bpf_jit_free(struct sk_filter *fp)
 {
        if (fp->bpf_func != sk_run_filter) {
-               unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
-               struct bpf_binary_header *header = (void *)addr;
-
-               set_memory_rw(addr, header->pages);
-               module_free(NULL, header);
+               INIT_WORK(&fp->work, bpf_jit_free_deferred);
+               schedule_work(&fp->work);
        }
 }
index f5809fa2753e69246f8d361d844c212e03d139e9..b046e070e08868f72e62b5ce804ffedd1689cc03 100644 (file)
@@ -231,7 +231,7 @@ static int quirk_pcie_aspm_write(struct pci_bus *bus, unsigned int devfn, int wh
        offset = quirk_aspm_offset[GET_INDEX(bus->self->device, devfn)];
 
        if ((offset) && (where == offset))
-               value = value & 0xfffffffc;
+               value = value & ~PCI_EXP_LNKCTL_ASPMC;
 
        return raw_pci_write(pci_domain_nr(bus), bus->number,
                                                devfn, where, size, value);
@@ -252,7 +252,7 @@ static struct pci_ops quirk_pcie_aspm_ops = {
  */
 static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
 {
-       int cap_base, i;
+       int i;
        struct pci_bus  *pbus;
        struct pci_dev *dev;
 
@@ -278,7 +278,7 @@ static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
                for (i = GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i)
                        quirk_aspm_offset[i] = 0;
 
-               pbus->ops = pbus->parent->ops;
+               pci_bus_set_ops(pbus, pbus->parent->ops);
        } else {
                /*
                 * If devices are attached to the root port at power-up or
@@ -286,13 +286,15 @@ static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
                 * each root port to save the register offsets and replace the
                 * bus ops.
                 */
-               list_for_each_entry(dev, &pbus->devices, bus_list) {
+               list_for_each_entry(dev, &pbus->devices, bus_list)
                        /* There are 0 to 8 devices attached to this bus */
-                       cap_base = pci_find_capability(dev, PCI_CAP_ID_EXP);
-                       quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] = cap_base + 0x10;
-               }
-               pbus->ops = &quirk_pcie_aspm_ops;
+                       quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] =
+                               dev->pcie_cap + PCI_EXP_LNKCTL;
+
+               pci_bus_set_ops(pbus, &quirk_pcie_aspm_ops);
+               dev_info(&pbus->dev, "writes to ASPM control bits will be ignored\n");
        }
+
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_MCH_PA,     pcie_rootport_aspm_quirk);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,   PCI_DEVICE_ID_INTEL_MCH_PA1,    pcie_rootport_aspm_quirk);
index fef7d0ba7e3a93d8e44ed09cec92db708759cbcf..649a12befba92f399895b947cc779e0269ac750d 100644 (file)
@@ -40,16 +40,9 @@ static bool                          lid_wake_on_close;
  */
 static int set_lid_wake_behavior(bool wake_on_close)
 {
-       struct acpi_object_list arg_list;
-       union acpi_object arg;
        acpi_status status;
 
-       arg_list.count          = 1;
-       arg_list.pointer        = &arg;
-       arg.type                = ACPI_TYPE_INTEGER;
-       arg.integer.value       = wake_on_close;
-
-       status = acpi_evaluate_object(NULL, "\\_SB.PCI0.LID.LIDW", &arg_list, NULL);
+       status = acpi_execute_simple_method(NULL, "\\_SB.PCI0.LID.LIDW", wake_on_close);
        if (ACPI_FAILURE(status)) {
                pr_warning(PFX "failed to set lid behavior\n");
                return 1;
index d1e4777b4e75352fea33ca6a3e3d43a679e19c6d..31d04758b76f6a2da41c309e368768d4881066d0 100644 (file)
@@ -278,6 +278,15 @@ static void __init xen_smp_prepare_boot_cpu(void)
                   old memory can be recycled */
                make_lowmem_page_readwrite(xen_initial_gdt);
 
+#ifdef CONFIG_X86_32
+               /*
+                * Xen starts us with XEN_FLAT_RING1_DS, but linux code
+                * expects __USER_DS
+                */
+               loadsegment(ds, __USER_DS);
+               loadsegment(es, __USER_DS);
+#endif
+
                xen_filter_cpu_maps();
                xen_setup_vcpu_info_placement();
        }
index c114483010c13b70caf62b3edea8b46947ca17e8..7db5c22faa68a1803cad4708d9047ad8396b637e 100644 (file)
@@ -87,4 +87,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* _XTENSA_SOCKET_H */
index c50ecf0ea3b17c652db8c134905de38e56713851..026c1517505f2aaab4780a15735850104abbf7eb 100644 (file)
@@ -195,17 +195,17 @@ EXPORT_SYMBOL(blk_queue_make_request);
 /**
  * blk_queue_bounce_limit - set bounce buffer limit for queue
  * @q: the request queue for the device
- * @dma_mask: the maximum address the device can handle
+ * @max_addr: the maximum address the device can handle
  *
  * Description:
  *    Different hardware can have different requirements as to what pages
  *    it can do I/O directly to. A low level driver can call
  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
- *    buffers for doing I/O to pages residing above @dma_mask.
+ *    buffers for doing I/O to pages residing above @max_addr.
  **/
-void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
+void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
 {
-       unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
+       unsigned long b_pfn = max_addr >> PAGE_SHIFT;
        int dma = 0;
 
        q->bounce_gfp = GFP_NOIO;
index 1eb09ee5311b414e448b28eb26a72f7ff6bbaa1c..a8287b49d0621d1778295ad0516c8ccbf22ed0fa 100644 (file)
@@ -222,11 +222,16 @@ check_hybrid:
         * the disk size.
         *
         * Hybrid MBRs do not necessarily comply with this.
+        *
+        * Consider a bad value here to be a warning to support dd'ing
+        * an image from a smaller disk to a larger disk.
         */
        if (ret == GPT_MBR_PROTECTIVE) {
                sz = le32_to_cpu(mbr->partition_record[part].size_in_lba);
                if (sz != (uint32_t) total_sectors - 1 && sz != 0xFFFFFFFF)
-                       ret = 0;
+                       pr_debug("GPT: mbr size in lba (%u) different than whole disk (%u).\n",
+                                sz, min_t(uint32_t,
+                                          total_sectors - 1, 0xFFFFFFFF));
        }
 done:
        return ret;
index 69ce573f1224560b4f5c7532e21053b27c651da4..05cdf4a2b94ee72324eb967932b31fa23b684207 100644 (file)
@@ -174,9 +174,8 @@ config CRYPTO_TEST
        help
          Quick & dirty crypto test module.
 
-config CRYPTO_ABLK_HELPER_X86
+config CRYPTO_ABLK_HELPER
        tristate
-       depends on X86
        select CRYPTO_CRYPTD
 
 config CRYPTO_GLUE_HELPER_X86
@@ -695,7 +694,7 @@ config CRYPTO_AES_NI_INTEL
        select CRYPTO_AES_X86_64 if 64BIT
        select CRYPTO_AES_586 if !64BIT
        select CRYPTO_CRYPTD
-       select CRYPTO_ABLK_HELPER_X86
+       select CRYPTO_ABLK_HELPER
        select CRYPTO_ALGAPI
        select CRYPTO_GLUE_HELPER_X86 if 64BIT
        select CRYPTO_LRW
@@ -776,6 +775,22 @@ config CRYPTO_AES_ARM
 
          See <http://csrc.nist.gov/encryption/aes/> for more information.
 
+config CRYPTO_AES_ARM_BS
+       tristate "Bit sliced AES using NEON instructions"
+       depends on ARM && KERNEL_MODE_NEON
+       select CRYPTO_ALGAPI
+       select CRYPTO_AES_ARM
+       select CRYPTO_ABLK_HELPER
+       help
+         Use a faster and more secure NEON based implementation of AES in CBC,
+         CTR and XTS modes
+
+         Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode
+         and for XTS mode encryption, CBC and XTS mode decryption speedup is
+         around 25%. (CBC encryption speed is not affected by this driver.)
+         This implementation does not rely on any lookup tables so it is
+         believed to be invulnerable to cache timing attacks.
+
 config CRYPTO_ANUBIS
        tristate "Anubis cipher algorithm"
        select CRYPTO_ALGAPI
@@ -879,7 +894,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX_X86_64
        depends on CRYPTO
        select CRYPTO_ALGAPI
        select CRYPTO_CRYPTD
-       select CRYPTO_ABLK_HELPER_X86
+       select CRYPTO_ABLK_HELPER
        select CRYPTO_GLUE_HELPER_X86
        select CRYPTO_CAMELLIA_X86_64
        select CRYPTO_LRW
@@ -901,7 +916,7 @@ config CRYPTO_CAMELLIA_AESNI_AVX2_X86_64
        depends on CRYPTO
        select CRYPTO_ALGAPI
        select CRYPTO_CRYPTD
-       select CRYPTO_ABLK_HELPER_X86
+       select CRYPTO_ABLK_HELPER
        select CRYPTO_GLUE_HELPER_X86
        select CRYPTO_CAMELLIA_X86_64
        select CRYPTO_CAMELLIA_AESNI_AVX_X86_64
@@ -953,7 +968,7 @@ config CRYPTO_CAST5_AVX_X86_64
        depends on X86 && 64BIT
        select CRYPTO_ALGAPI
        select CRYPTO_CRYPTD
-       select CRYPTO_ABLK_HELPER_X86
+       select CRYPTO_ABLK_HELPER
        select CRYPTO_CAST_COMMON
        select CRYPTO_CAST5
        help
@@ -976,7 +991,7 @@ config CRYPTO_CAST6_AVX_X86_64
        depends on X86 && 64BIT
        select CRYPTO_ALGAPI
        select CRYPTO_CRYPTD
-       select CRYPTO_ABLK_HELPER_X86
+       select CRYPTO_ABLK_HELPER
        select CRYPTO_GLUE_HELPER_X86
        select CRYPTO_CAST_COMMON
        select CRYPTO_CAST6
@@ -1094,7 +1109,7 @@ config CRYPTO_SERPENT_SSE2_X86_64
        depends on X86 && 64BIT
        select CRYPTO_ALGAPI
        select CRYPTO_CRYPTD
-       select CRYPTO_ABLK_HELPER_X86
+       select CRYPTO_ABLK_HELPER
        select CRYPTO_GLUE_HELPER_X86
        select CRYPTO_SERPENT
        select CRYPTO_LRW
@@ -1116,7 +1131,7 @@ config CRYPTO_SERPENT_SSE2_586
        depends on X86 && !64BIT
        select CRYPTO_ALGAPI
        select CRYPTO_CRYPTD
-       select CRYPTO_ABLK_HELPER_X86
+       select CRYPTO_ABLK_HELPER
        select CRYPTO_GLUE_HELPER_X86
        select CRYPTO_SERPENT
        select CRYPTO_LRW
@@ -1138,7 +1153,7 @@ config CRYPTO_SERPENT_AVX_X86_64
        depends on X86 && 64BIT
        select CRYPTO_ALGAPI
        select CRYPTO_CRYPTD
-       select CRYPTO_ABLK_HELPER_X86
+       select CRYPTO_ABLK_HELPER
        select CRYPTO_GLUE_HELPER_X86
        select CRYPTO_SERPENT
        select CRYPTO_LRW
@@ -1160,7 +1175,7 @@ config CRYPTO_SERPENT_AVX2_X86_64
        depends on X86 && 64BIT
        select CRYPTO_ALGAPI
        select CRYPTO_CRYPTD
-       select CRYPTO_ABLK_HELPER_X86
+       select CRYPTO_ABLK_HELPER
        select CRYPTO_GLUE_HELPER_X86
        select CRYPTO_SERPENT
        select CRYPTO_SERPENT_AVX_X86_64
@@ -1276,7 +1291,7 @@ config CRYPTO_TWOFISH_AVX_X86_64
        depends on X86 && 64BIT
        select CRYPTO_ALGAPI
        select CRYPTO_CRYPTD
-       select CRYPTO_ABLK_HELPER_X86
+       select CRYPTO_ABLK_HELPER
        select CRYPTO_GLUE_HELPER_X86
        select CRYPTO_TWOFISH_COMMON
        select CRYPTO_TWOFISH_X86_64
index 80019ba8da3a2113ce8a48bf924bba9ca7d96e50..e0b7cf4e18b689a8c8a61a985c1fca06738e7412 100644 (file)
@@ -2,8 +2,13 @@
 # Cryptographic API
 #
 
+# memneq MUST be built with -Os or -O0 to prevent early-return optimizations
+# that will defeat memneq's actual purpose to prevent timing attacks.
+CFLAGS_REMOVE_memneq.o := -O1 -O2 -O3
+CFLAGS_memneq.o := -Os
+
 obj-$(CONFIG_CRYPTO) += crypto.o
-crypto-y := api.o cipher.o compress.o
+crypto-y := api.o cipher.o compress.o memneq.o
 
 obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
 
@@ -104,3 +109,4 @@ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
 obj-$(CONFIG_XOR_BLOCKS) += xor.o
 obj-$(CONFIG_ASYNC_CORE) += async_tx/
 obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/
+obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o
similarity index 95%
rename from arch/x86/crypto/ablk_helper.c
rename to crypto/ablk_helper.c
index 43282fe04a8b726e57048d8e67fd96d209c76041..ffe7278d4bd83bd9b8ee5e7392af7bd417e037ae 100644 (file)
 #include <linux/crypto.h>
 #include <linux/init.h>
 #include <linux/module.h>
+#include <linux/hardirq.h>
 #include <crypto/algapi.h>
 #include <crypto/cryptd.h>
-#include <asm/i387.h>
-#include <asm/crypto/ablk_helper.h>
+#include <crypto/ablk_helper.h>
+#include <asm/simd.h>
 
 int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
                 unsigned int key_len)
@@ -70,11 +71,11 @@ int ablk_encrypt(struct ablkcipher_request *req)
        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
        struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 
-       if (!irq_fpu_usable()) {
+       if (!may_use_simd()) {
                struct ablkcipher_request *cryptd_req =
                        ablkcipher_request_ctx(req);
 
-               memcpy(cryptd_req, req, sizeof(*req));
+               *cryptd_req = *req;
                ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
 
                return crypto_ablkcipher_encrypt(cryptd_req);
@@ -89,11 +90,11 @@ int ablk_decrypt(struct ablkcipher_request *req)
        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
        struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 
-       if (!irq_fpu_usable()) {
+       if (!may_use_simd()) {
                struct ablkcipher_request *cryptd_req =
                        ablkcipher_request_ctx(req);
 
-               memcpy(cryptd_req, req, sizeof(*req));
+               *cryptd_req = *req;
                ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
 
                return crypto_ablkcipher_decrypt(cryptd_req);
index c0bb3778f1ae06976fbaf07c7e1b075fc0c581e7..666f1962a160f5d547579b918b6229de0232607b 100644 (file)
@@ -230,11 +230,11 @@ remainder:
         */
        if (byte_count < DEFAULT_BLK_SZ) {
 empty_rbuf:
-               for (; ctx->rand_data_valid < DEFAULT_BLK_SZ;
-                       ctx->rand_data_valid++) {
+               while (ctx->rand_data_valid < DEFAULT_BLK_SZ) {
                        *ptr = ctx->rand_data[ctx->rand_data_valid];
                        ptr++;
                        byte_count--;
+                       ctx->rand_data_valid++;
                        if (byte_count == 0)
                                goto done;
                }
index 4a6a0696f8a3b165618fe7c62516a7446e354c24..1912b9be504356e5880eeb764216f2beff53e296 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
+#include <crypto/algapi.h>
 #include "public_key.h"
 
 MODULE_LICENSE("GPL");
@@ -189,12 +190,12 @@ static int RSA_verify(const u8 *H, const u8 *EM, size_t k, size_t hash_size,
                }
        }
 
-       if (memcmp(asn1_template, EM + T_offset, asn1_size) != 0) {
+       if (crypto_memneq(asn1_template, EM + T_offset, asn1_size) != 0) {
                kleave(" = -EBADMSG [EM[T] ASN.1 mismatch]");
                return -EBADMSG;
        }
 
-       if (memcmp(H, EM + T_offset + asn1_size, hash_size) != 0) {
+       if (crypto_memneq(H, EM + T_offset + asn1_size, hash_size) != 0) {
                kleave(" = -EKEYREJECTED [EM[T] hash mismatch]");
                return -EKEYREJECTED;
        }
index 7be34248b450896cfc056d1709513df4f172f79a..39ea4791a3c977ad7eda47498b9f7d7741b438e2 100644 (file)
@@ -128,7 +128,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
                }
                device->device_issue_pending(chan);
        } else {
-               if (dma_wait_for_async_tx(depend_tx) != DMA_SUCCESS)
+               if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)
                        panic("%s: DMA error waiting for depend_tx\n",
                              __func__);
                tx->tx_submit(tx);
@@ -280,7 +280,7 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
                 * we are referring to the correct operation
                 */
                BUG_ON(async_tx_test_ack(*tx));
-               if (dma_wait_for_async_tx(*tx) != DMA_SUCCESS)
+               if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE)
                        panic("%s: DMA error waiting for transaction\n",
                              __func__);
                async_tx_ack(*tx);
index ffce19de05cf958853dc0e13f7e3cf1efa30cd4f..1875e7026e8f7fad661e2051cdc139f733d6c19a 100644 (file)
@@ -52,40 +52,52 @@ static void authenc_request_complete(struct aead_request *req, int err)
                aead_request_complete(req, err);
 }
 
-static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
-                                unsigned int keylen)
+int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
+                              unsigned int keylen)
 {
-       unsigned int authkeylen;
-       unsigned int enckeylen;
-       struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
-       struct crypto_ahash *auth = ctx->auth;
-       struct crypto_ablkcipher *enc = ctx->enc;
-       struct rtattr *rta = (void *)key;
+       struct rtattr *rta = (struct rtattr *)key;
        struct crypto_authenc_key_param *param;
-       int err = -EINVAL;
 
        if (!RTA_OK(rta, keylen))
-               goto badkey;
+               return -EINVAL;
        if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
-               goto badkey;
+               return -EINVAL;
        if (RTA_PAYLOAD(rta) < sizeof(*param))
-               goto badkey;
+               return -EINVAL;
 
        param = RTA_DATA(rta);
-       enckeylen = be32_to_cpu(param->enckeylen);
+       keys->enckeylen = be32_to_cpu(param->enckeylen);
 
        key += RTA_ALIGN(rta->rta_len);
        keylen -= RTA_ALIGN(rta->rta_len);
 
-       if (keylen < enckeylen)
-               goto badkey;
+       if (keylen < keys->enckeylen)
+               return -EINVAL;
 
-       authkeylen = keylen - enckeylen;
+       keys->authkeylen = keylen - keys->enckeylen;
+       keys->authkey = key;
+       keys->enckey = key + keys->authkeylen;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_authenc_extractkeys);
+
+static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
+                                unsigned int keylen)
+{
+       struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+       struct crypto_ahash *auth = ctx->auth;
+       struct crypto_ablkcipher *enc = ctx->enc;
+       struct crypto_authenc_keys keys;
+       int err = -EINVAL;
+
+       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+               goto badkey;
 
        crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
        crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc) &
                                    CRYPTO_TFM_REQ_MASK);
-       err = crypto_ahash_setkey(auth, keyauthkeylen);
+       err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
        crypto_aead_set_flags(authenc, crypto_ahash_get_flags(auth) &
                                       CRYPTO_TFM_RES_MASK);
 
@@ -95,7 +107,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
        crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
        crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc) &
                                         CRYPTO_TFM_REQ_MASK);
-       err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen);
+       err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
        crypto_aead_set_flags(authenc, crypto_ablkcipher_get_flags(enc) &
                                       CRYPTO_TFM_RES_MASK);
 
@@ -188,7 +200,7 @@ static void authenc_verify_ahash_update_done(struct crypto_async_request *areq,
        scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
                                 authsize, 0);
 
-       err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+       err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
        if (err)
                goto out;
 
@@ -227,7 +239,7 @@ static void authenc_verify_ahash_done(struct crypto_async_request *areq,
        scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
                                 authsize, 0);
 
-       err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+       err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
        if (err)
                goto out;
 
@@ -462,7 +474,7 @@ static int crypto_authenc_verify(struct aead_request *req,
        ihash = ohash + authsize;
        scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
                                 authsize, 0);
-       return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0;
+       return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
 }
 
 static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
index ab53762fc309c5db55ccaab948f1862bcc7a237a..4be0dd4373a9a2e8a15b62adbf8dbdd6e374eacd 100644 (file)
@@ -59,37 +59,19 @@ static void authenc_esn_request_complete(struct aead_request *req, int err)
 static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *key,
                                     unsigned int keylen)
 {
-       unsigned int authkeylen;
-       unsigned int enckeylen;
        struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
        struct crypto_ahash *auth = ctx->auth;
        struct crypto_ablkcipher *enc = ctx->enc;
-       struct rtattr *rta = (void *)key;
-       struct crypto_authenc_key_param *param;
+       struct crypto_authenc_keys keys;
        int err = -EINVAL;
 
-       if (!RTA_OK(rta, keylen))
+       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
                goto badkey;
-       if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
-               goto badkey;
-       if (RTA_PAYLOAD(rta) < sizeof(*param))
-               goto badkey;
-
-       param = RTA_DATA(rta);
-       enckeylen = be32_to_cpu(param->enckeylen);
-
-       key += RTA_ALIGN(rta->rta_len);
-       keylen -= RTA_ALIGN(rta->rta_len);
-
-       if (keylen < enckeylen)
-               goto badkey;
-
-       authkeylen = keylen - enckeylen;
 
        crypto_ahash_clear_flags(auth, CRYPTO_TFM_REQ_MASK);
        crypto_ahash_set_flags(auth, crypto_aead_get_flags(authenc_esn) &
                                     CRYPTO_TFM_REQ_MASK);
-       err = crypto_ahash_setkey(auth, keyauthkeylen);
+       err = crypto_ahash_setkey(auth, keys.authkey, keys.authkeylen);
        crypto_aead_set_flags(authenc_esn, crypto_ahash_get_flags(auth) &
                                           CRYPTO_TFM_RES_MASK);
 
@@ -99,7 +81,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
        crypto_ablkcipher_clear_flags(enc, CRYPTO_TFM_REQ_MASK);
        crypto_ablkcipher_set_flags(enc, crypto_aead_get_flags(authenc_esn) &
                                         CRYPTO_TFM_REQ_MASK);
-       err = crypto_ablkcipher_setkey(enc, key + authkeylen, enckeylen);
+       err = crypto_ablkcipher_setkey(enc, keys.enckey, keys.enckeylen);
        crypto_aead_set_flags(authenc_esn, crypto_ablkcipher_get_flags(enc) &
                                           CRYPTO_TFM_RES_MASK);
 
@@ -247,7 +229,7 @@ static void authenc_esn_verify_ahash_update_done(struct crypto_async_request *ar
        scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
                                 authsize, 0);
 
-       err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+       err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
        if (err)
                goto out;
 
@@ -296,7 +278,7 @@ static void authenc_esn_verify_ahash_update_done2(struct crypto_async_request *a
        scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
                                 authsize, 0);
 
-       err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+       err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
        if (err)
                goto out;
 
@@ -336,7 +318,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
        scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
                                 authsize, 0);
 
-       err = memcmp(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
+       err = crypto_memneq(ihash, ahreq->result, authsize) ? -EBADMSG : 0;
        if (err)
                goto out;
 
@@ -568,7 +550,7 @@ static int crypto_authenc_esn_verify(struct aead_request *req)
        ihash = ohash + authsize;
        scatterwalk_map_and_copy(ihash, areq_ctx->sg, areq_ctx->cryptlen,
                                 authsize, 0);
-       return memcmp(ihash, ohash, authsize) ? -EBADMSG : 0;
+       return crypto_memneq(ihash, ohash, authsize) ? -EBADMSG : 0;
 }
 
 static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv,
index 499c91717d937bfaba9364a5bcde94988626195c..3e05499d183aa8e49d41b83be448e7f52d76b10c 100644 (file)
@@ -363,7 +363,7 @@ static void crypto_ccm_decrypt_done(struct crypto_async_request *areq,
 
        if (!err) {
                err = crypto_ccm_auth(req, req->dst, cryptlen);
-               if (!err && memcmp(pctx->auth_tag, pctx->odata, authsize))
+               if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize))
                        err = -EBADMSG;
        }
        aead_request_complete(req, err);
@@ -422,7 +422,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
                return err;
 
        /* verify */
-       if (memcmp(authtag, odata, authsize))
+       if (crypto_memneq(authtag, odata, authsize))
                return -EBADMSG;
 
        return err;
index 43e1fb05ea54878cbe136231a1a92c847b6d3119..b4f01793900409a0398faeb958b74416a5d1637d 100644 (file)
@@ -582,7 +582,7 @@ static int crypto_gcm_verify(struct aead_request *req,
 
        crypto_xor(auth_tag, iauth_tag, 16);
        scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0);
-       return memcmp(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
+       return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
 }
 
 static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
diff --git a/crypto/memneq.c b/crypto/memneq.c
new file mode 100644 (file)
index 0000000..cd01622
--- /dev/null
@@ -0,0 +1,138 @@
+/*
+ * Constant-time equality testing of memory regions.
+ *
+ * Authors:
+ *
+ *   James Yonan <james@openvpn.net>
+ *   Daniel Borkmann <dborkman@redhat.com>
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 OpenVPN Technologies, Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *   * Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *   * Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *   * Neither the name of OpenVPN Technologies nor the names of its
+ *     contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <crypto/algapi.h>
+
+#ifndef __HAVE_ARCH_CRYPTO_MEMNEQ
+
+/* Generic path for arbitrary size */
+static inline unsigned long
+__crypto_memneq_generic(const void *a, const void *b, size_t size)
+{
+       unsigned long neq = 0;
+
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+       while (size >= sizeof(unsigned long)) {
+               neq |= *(unsigned long *)a ^ *(unsigned long *)b;
+               a += sizeof(unsigned long);
+               b += sizeof(unsigned long);
+               size -= sizeof(unsigned long);
+       }
+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+       while (size > 0) {
+               neq |= *(unsigned char *)a ^ *(unsigned char *)b;
+               a += 1;
+               b += 1;
+               size -= 1;
+       }
+       return neq;
+}
+
+/* Loop-free fast-path for frequently used 16-byte size */
+static inline unsigned long __crypto_memneq_16(const void *a, const void *b)
+{
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+       if (sizeof(unsigned long) == 8)
+               return ((*(unsigned long *)(a)   ^ *(unsigned long *)(b))
+                     | (*(unsigned long *)(a+8) ^ *(unsigned long *)(b+8)));
+       else if (sizeof(unsigned int) == 4)
+               return ((*(unsigned int *)(a)    ^ *(unsigned int *)(b))
+                      | (*(unsigned int *)(a+4)  ^ *(unsigned int *)(b+4))
+                     | (*(unsigned int *)(a+8)  ^ *(unsigned int *)(b+8))
+                     | (*(unsigned int *)(a+12) ^ *(unsigned int *)(b+12)));
+       else
+#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+               return ((*(unsigned char *)(a)    ^ *(unsigned char *)(b))
+                     | (*(unsigned char *)(a+1)  ^ *(unsigned char *)(b+1))
+                     | (*(unsigned char *)(a+2)  ^ *(unsigned char *)(b+2))
+                     | (*(unsigned char *)(a+3)  ^ *(unsigned char *)(b+3))
+                     | (*(unsigned char *)(a+4)  ^ *(unsigned char *)(b+4))
+                     | (*(unsigned char *)(a+5)  ^ *(unsigned char *)(b+5))
+                     | (*(unsigned char *)(a+6)  ^ *(unsigned char *)(b+6))
+                     | (*(unsigned char *)(a+7)  ^ *(unsigned char *)(b+7))
+                     | (*(unsigned char *)(a+8)  ^ *(unsigned char *)(b+8))
+                     | (*(unsigned char *)(a+9)  ^ *(unsigned char *)(b+9))
+                     | (*(unsigned char *)(a+10) ^ *(unsigned char *)(b+10))
+                     | (*(unsigned char *)(a+11) ^ *(unsigned char *)(b+11))
+                     | (*(unsigned char *)(a+12) ^ *(unsigned char *)(b+12))
+                     | (*(unsigned char *)(a+13) ^ *(unsigned char *)(b+13))
+                     | (*(unsigned char *)(a+14) ^ *(unsigned char *)(b+14))
+                     | (*(unsigned char *)(a+15) ^ *(unsigned char *)(b+15)));
+}
+
+/* Compare two areas of memory without leaking timing information,
+ * and with special optimizations for common sizes.  Users should
+ * not call this function directly, but should instead use
+ * crypto_memneq defined in crypto/algapi.h.
+ */
+noinline unsigned long __crypto_memneq(const void *a, const void *b,
+                                      size_t size)
+{
+       switch (size) {
+       case 16:
+               return __crypto_memneq_16(a, b);
+       default:
+               return __crypto_memneq_generic(a, b, size);
+       }
+}
+EXPORT_SYMBOL(__crypto_memneq);
+
+#endif /* __HAVE_ARCH_CRYPTO_MEMNEQ */
index aa43b911ccef582c78e70a914106ba454de2c05b..969e9871785ca5ae47e0153ef96c15c460daca18 100644 (file)
@@ -166,4 +166,6 @@ source "drivers/reset/Kconfig"
 
 source "drivers/fmc/Kconfig"
 
+source "drivers/powercap/Kconfig"
+
 endmenu
index ab93de8297f1338fc2440967ce8934a7ff6bc043..34c1d554f69b489fc523a811e9f1fc76b5a5cd4e 100644 (file)
@@ -152,3 +152,4 @@ obj-$(CONFIG_VME_BUS)               += vme/
 obj-$(CONFIG_IPACK_BUS)                += ipack/
 obj-$(CONFIG_NTB)              += ntb/
 obj-$(CONFIG_FMC)              += fmc/
+obj-$(CONFIG_POWERCAP)         += powercap/
index 22327e6a7236fc367313ec5acf45016d354863d4..589da059ce3939c9a2139907daefbf00a5a9b0e2 100644 (file)
@@ -24,7 +24,7 @@ menuconfig ACPI
          are configured, ACPI is used.
 
          The project home page for the Linux ACPI subsystem is here:
-         <http://www.lesswatts.org/projects/acpi/>
+         <https://01.org/linux-acpi>
 
          Linux support for ACPI is based on Intel Corporation's ACPI
          Component Architecture (ACPI CA).  For more information on the
@@ -56,23 +56,6 @@ config ACPI_PROCFS
 
          Say N to delete /proc/acpi/ files that have moved to /sys/
 
-config ACPI_PROCFS_POWER
-       bool "Deprecated power /proc/acpi directories"
-       depends on PROC_FS
-       help
-         For backwards compatibility, this option allows
-          deprecated power /proc/acpi/ directories to exist, even when
-          they have been replaced by functions in /sys.
-          The deprecated directories (and their replacements) include:
-         /proc/acpi/battery/* (/sys/class/power_supply/*)
-         /proc/acpi/ac_adapter/* (sys/class/power_supply/*)
-         This option has no effect on /proc/acpi/ directories
-         and functions, which do not yet exist in /sys
-         This option, together with the proc directories, will be
-         deleted in 2.6.39.
-
-         Say N to delete power /proc/acpi/ directories that have moved to /sys/
-
 config ACPI_EC_DEBUGFS
        tristate "EC read/write access through /sys/kernel/debug/ec"
        default n
@@ -123,9 +106,9 @@ config ACPI_BUTTON
        default y
        help
          This driver handles events on the power, sleep, and lid buttons.
-         A daemon reads /proc/acpi/event and perform user-defined actions
-         such as shutting down the system.  This is necessary for
-         software-controlled poweroff.
+         A daemon reads events from input devices or via netlink and
+         performs user-defined actions such as shutting down the system.
+         This is necessary for software-controlled poweroff.
 
          To compile this driver as a module, choose M here:
          the module will be called button.
@@ -175,9 +158,10 @@ config ACPI_PROCESSOR
 
          To compile this driver as a module, choose M here:
          the module will be called processor.
+
 config ACPI_IPMI
        tristate "IPMI"
-       depends on IPMI_SI && IPMI_HANDLER
+       depends on IPMI_SI
        default n
        help
          This driver enables the ACPI to access the BMC controller. And it
index cdaf68b58b006fb833f104f2d5e28baa4ecaf991..a55fc06db4ae705b47aa8ca9f2b33d456057c536 100644 (file)
@@ -47,7 +47,6 @@ acpi-y                                += sysfs.o
 acpi-$(CONFIG_X86)             += acpi_cmos_rtc.o
 acpi-$(CONFIG_DEBUG_FS)                += debugfs.o
 acpi-$(CONFIG_ACPI_NUMA)       += numa.o
-acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
 ifdef CONFIG_ACPI_VIDEO
 acpi-y                         += video_detect.o
 endif
index f37beaa32750106dffdf9fd7aeac764f1f0094f2..b9f0d5f4bba51cecb6ffe8fda126762850818263 100644 (file)
 #include <linux/types.h>
 #include <linux/dmi.h>
 #include <linux/delay.h>
-#ifdef CONFIG_ACPI_PROCFS_POWER
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#endif
+#include <linux/platform_device.h>
 #include <linux/power_supply.h>
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
@@ -55,75 +52,30 @@ MODULE_AUTHOR("Paul Diefenbaugh");
 MODULE_DESCRIPTION("ACPI AC Adapter Driver");
 MODULE_LICENSE("GPL");
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-extern struct proc_dir_entry *acpi_lock_ac_dir(void);
-extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
-static int acpi_ac_open_fs(struct inode *inode, struct file *file);
-#endif
-
-static int acpi_ac_add(struct acpi_device *device);
-static int acpi_ac_remove(struct acpi_device *device);
-static void acpi_ac_notify(struct acpi_device *device, u32 event);
-
-static const struct acpi_device_id ac_device_ids[] = {
-       {"ACPI0003", 0},
-       {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, ac_device_ids);
-
-#ifdef CONFIG_PM_SLEEP
-static int acpi_ac_resume(struct device *dev);
-#endif
-static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
-
 static int ac_sleep_before_get_state_ms;
 
-static struct acpi_driver acpi_ac_driver = {
-       .name = "ac",
-       .class = ACPI_AC_CLASS,
-       .ids = ac_device_ids,
-       .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
-       .ops = {
-               .add = acpi_ac_add,
-               .remove = acpi_ac_remove,
-               .notify = acpi_ac_notify,
-               },
-       .drv.pm = &acpi_ac_pm,
-};
-
 struct acpi_ac {
        struct power_supply charger;
-       struct acpi_device * device;
+       struct acpi_device *adev;
+       struct platform_device *pdev;
        unsigned long long state;
 };
 
 #define to_acpi_ac(x) container_of(x, struct acpi_ac, charger)
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-static const struct file_operations acpi_ac_fops = {
-       .owner = THIS_MODULE,
-       .open = acpi_ac_open_fs,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-#endif
-
 /* --------------------------------------------------------------------------
                                AC Adapter Management
    -------------------------------------------------------------------------- */
 
 static int acpi_ac_get_state(struct acpi_ac *ac)
 {
-       acpi_status status = AE_OK;
-
-
-       if (!ac)
-               return -EINVAL;
+       acpi_status status;
 
-       status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL, &ac->state);
+       status = acpi_evaluate_integer(ac->adev->handle, "_PSR", NULL,
+                                      &ac->state);
        if (ACPI_FAILURE(status)) {
-               ACPI_EXCEPTION((AE_INFO, status, "Error reading AC Adapter state"));
+               ACPI_EXCEPTION((AE_INFO, status,
+                               "Error reading AC Adapter state"));
                ac->state = ACPI_AC_STATUS_UNKNOWN;
                return -ENODEV;
        }
@@ -160,91 +112,13 @@ static enum power_supply_property ac_props[] = {
        POWER_SUPPLY_PROP_ONLINE,
 };
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-/* --------------------------------------------------------------------------
-                              FS Interface (/proc)
-   -------------------------------------------------------------------------- */
-
-static struct proc_dir_entry *acpi_ac_dir;
-
-static int acpi_ac_seq_show(struct seq_file *seq, void *offset)
-{
-       struct acpi_ac *ac = seq->private;
-
-
-       if (!ac)
-               return 0;
-
-       if (acpi_ac_get_state(ac)) {
-               seq_puts(seq, "ERROR: Unable to read AC Adapter state\n");
-               return 0;
-       }
-
-       seq_puts(seq, "state:                   ");
-       switch (ac->state) {
-       case ACPI_AC_STATUS_OFFLINE:
-               seq_puts(seq, "off-line\n");
-               break;
-       case ACPI_AC_STATUS_ONLINE:
-               seq_puts(seq, "on-line\n");
-               break;
-       default:
-               seq_puts(seq, "unknown\n");
-               break;
-       }
-
-       return 0;
-}
-
-static int acpi_ac_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_ac_seq_show, PDE_DATA(inode));
-}
-
-static int acpi_ac_add_fs(struct acpi_device *device)
-{
-       struct proc_dir_entry *entry = NULL;
-
-       printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded,"
-                       " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
-       if (!acpi_device_dir(device)) {
-               acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
-                                                    acpi_ac_dir);
-               if (!acpi_device_dir(device))
-                       return -ENODEV;
-       }
-
-       /* 'state' [R] */
-       entry = proc_create_data(ACPI_AC_FILE_STATE,
-                                S_IRUGO, acpi_device_dir(device),
-                                &acpi_ac_fops, acpi_driver_data(device));
-       if (!entry)
-               return -ENODEV;
-       return 0;
-}
-
-static int acpi_ac_remove_fs(struct acpi_device *device)
-{
-
-       if (acpi_device_dir(device)) {
-               remove_proc_entry(ACPI_AC_FILE_STATE, acpi_device_dir(device));
-
-               remove_proc_entry(acpi_device_bid(device), acpi_ac_dir);
-               acpi_device_dir(device) = NULL;
-       }
-
-       return 0;
-}
-#endif
-
 /* --------------------------------------------------------------------------
                                    Driver Model
    -------------------------------------------------------------------------- */
 
-static void acpi_ac_notify(struct acpi_device *device, u32 event)
+static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data)
 {
-       struct acpi_ac *ac = acpi_driver_data(device);
-
+       struct acpi_ac *ac = data;
 
        if (!ac)
                return;
@@ -267,10 +141,10 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
                        msleep(ac_sleep_before_get_state_ms);
 
                acpi_ac_get_state(ac);
-               acpi_bus_generate_netlink_event(device->pnp.device_class,
-                                                 dev_name(&device->dev), event,
-                                                 (u32) ac->state);
-               acpi_notifier_call_chain(device, event, (u32) ac->state);
+               acpi_bus_generate_netlink_event(ac->adev->pnp.device_class,
+                                               dev_name(&ac->pdev->dev),
+                                               event, (u32) ac->state);
+               acpi_notifier_call_chain(ac->adev, event, (u32) ac->state);
                kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
        }
 
@@ -295,53 +169,55 @@ static struct dmi_system_id ac_dmi_table[] = {
        {},
 };
 
-static int acpi_ac_add(struct acpi_device *device)
+static int acpi_ac_probe(struct platform_device *pdev)
 {
        int result = 0;
        struct acpi_ac *ac = NULL;
+       struct acpi_device *adev;
 
-
-       if (!device)
+       if (!pdev)
                return -EINVAL;
 
+       result = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev);
+       if (result)
+               return -ENODEV;
+
        ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL);
        if (!ac)
                return -ENOMEM;
 
-       ac->device = device;
-       strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME);
-       strcpy(acpi_device_class(device), ACPI_AC_CLASS);
-       device->driver_data = ac;
+       strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME);
+       strcpy(acpi_device_class(adev), ACPI_AC_CLASS);
+       ac->adev = adev;
+       ac->pdev = pdev;
+       platform_set_drvdata(pdev, ac);
 
        result = acpi_ac_get_state(ac);
        if (result)
                goto end;
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       result = acpi_ac_add_fs(device);
-#endif
-       if (result)
-               goto end;
-       ac->charger.name = acpi_device_bid(device);
+       ac->charger.name = acpi_device_bid(adev);
        ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
        ac->charger.properties = ac_props;
        ac->charger.num_properties = ARRAY_SIZE(ac_props);
        ac->charger.get_property = get_ac_property;
-       result = power_supply_register(&ac->device->dev, &ac->charger);
+       result = power_supply_register(&pdev->dev, &ac->charger);
        if (result)
                goto end;
 
+       result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev),
+                       ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler, ac);
+       if (result) {
+               power_supply_unregister(&ac->charger);
+               goto end;
+       }
        printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
-              acpi_device_name(device), acpi_device_bid(device),
+              acpi_device_name(adev), acpi_device_bid(adev),
               ac->state ? "on-line" : "off-line");
 
-      end:
-       if (result) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
-               acpi_ac_remove_fs(device);
-#endif
+end:
+       if (result)
                kfree(ac);
-       }
 
        dmi_check_system(ac_dmi_table);
        return result;
@@ -356,7 +232,7 @@ static int acpi_ac_resume(struct device *dev)
        if (!dev)
                return -EINVAL;
 
-       ac = acpi_driver_data(to_acpi_device(dev));
+       ac = platform_get_drvdata(to_platform_device(dev));
        if (!ac)
                return -EINVAL;
 
@@ -368,28 +244,44 @@ static int acpi_ac_resume(struct device *dev)
        return 0;
 }
 #endif
+static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume);
 
-static int acpi_ac_remove(struct acpi_device *device)
+static int acpi_ac_remove(struct platform_device *pdev)
 {
-       struct acpi_ac *ac = NULL;
-
+       struct acpi_ac *ac;
 
-       if (!device || !acpi_driver_data(device))
+       if (!pdev)
                return -EINVAL;
 
-       ac = acpi_driver_data(device);
+       acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev),
+                       ACPI_DEVICE_NOTIFY, acpi_ac_notify_handler);
 
+       ac = platform_get_drvdata(pdev);
        if (ac->charger.dev)
                power_supply_unregister(&ac->charger);
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       acpi_ac_remove_fs(device);
-#endif
 
        kfree(ac);
 
        return 0;
 }
 
+static const struct acpi_device_id acpi_ac_match[] = {
+       { "ACPI0003", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, acpi_ac_match);
+
+static struct platform_driver acpi_ac_driver = {
+       .probe          = acpi_ac_probe,
+       .remove         = acpi_ac_remove,
+       .driver         = {
+               .name   = "acpi-ac",
+               .owner  = THIS_MODULE,
+               .pm     = &acpi_ac_pm_ops,
+               .acpi_match_table = ACPI_PTR(acpi_ac_match),
+       },
+};
+
 static int __init acpi_ac_init(void)
 {
        int result;
@@ -397,34 +289,16 @@ static int __init acpi_ac_init(void)
        if (acpi_disabled)
                return -ENODEV;
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       acpi_ac_dir = acpi_lock_ac_dir();
-       if (!acpi_ac_dir)
+       result = platform_driver_register(&acpi_ac_driver);
+       if (result < 0)
                return -ENODEV;
-#endif
-
-       result = acpi_bus_register_driver(&acpi_ac_driver);
-       if (result < 0) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
-               acpi_unlock_ac_dir(acpi_ac_dir);
-#endif
-               return -ENODEV;
-       }
 
        return 0;
 }
 
 static void __exit acpi_ac_exit(void)
 {
-
-       acpi_bus_unregister_driver(&acpi_ac_driver);
-
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       acpi_unlock_ac_dir(acpi_ac_dir);
-#endif
-
-       return;
+       platform_driver_unregister(&acpi_ac_driver);
 }
-
 module_init(acpi_ac_init);
 module_exit(acpi_ac_exit);
index a6977e12d5745ab5f2015e2f2879174bddd64bc0..ac0f52f6df2b862052a3de423ac039a6bd2eefe8 100644 (file)
@@ -1,8 +1,9 @@
 /*
  *  acpi_ipmi.c - ACPI IPMI opregion
  *
- *  Copyright (C) 2010 Intel Corporation
- *  Copyright (C) 2010 Zhao Yakui <yakui.zhao@intel.com>
+ *  Copyright (C) 2010, 2013 Intel Corporation
+ *    Author: Zhao Yakui <yakui.zhao@intel.com>
+ *            Lv Zheng <lv.zheng@intel.com>
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
-#include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
 #include <linux/ipmi.h>
-#include <linux/device.h>
-#include <linux/pnp.h>
 #include <linux/spinlock.h>
 
 MODULE_AUTHOR("Zhao Yakui");
 MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
 MODULE_LICENSE("GPL");
 
-#define IPMI_FLAGS_HANDLER_INSTALL     0
-
 #define ACPI_IPMI_OK                   0
 #define ACPI_IPMI_TIMEOUT              0x10
 #define ACPI_IPMI_UNKNOWN              0x07
 /* the IPMI timeout is 5s */
-#define IPMI_TIMEOUT                   (5 * HZ)
+#define IPMI_TIMEOUT                   (5000)
+#define ACPI_IPMI_MAX_MSG_LENGTH       64
 
 struct acpi_ipmi_device {
        /* the device list attached to driver_data.ipmi_devices */
        struct list_head head;
+
        /* the IPMI request message list */
        struct list_head tx_msg_list;
-       spinlock_t      tx_msg_lock;
+
+       spinlock_t tx_msg_lock;
        acpi_handle handle;
-       struct pnp_dev *pnp_dev;
-       ipmi_user_t     user_interface;
+       struct device *dev;
+       ipmi_user_t user_interface;
        int ipmi_ifnum; /* IPMI interface number */
        long curr_msgid;
-       unsigned long flags;
-       struct ipmi_smi_info smi_data;
+       bool dead;
+       struct kref kref;
 };
 
 struct ipmi_driver_data {
-       struct list_head        ipmi_devices;
-       struct ipmi_smi_watcher bmc_events;
-       struct ipmi_user_hndl   ipmi_hndlrs;
-       struct mutex            ipmi_lock;
+       struct list_head ipmi_devices;
+       struct ipmi_smi_watcher bmc_events;
+       struct ipmi_user_hndl ipmi_hndlrs;
+       struct mutex ipmi_lock;
+
+       /*
+        * NOTE: IPMI System Interface Selection
+        * There is no system interface specified by the IPMI operation
+        * region access.  We try to select one system interface with ACPI
+        * handle set.  IPMI messages passed from the ACPI codes are sent
+        * to this selected global IPMI system interface.
+        */
+       struct acpi_ipmi_device *selected_smi;
 };
 
 struct acpi_ipmi_msg {
        struct list_head head;
+
        /*
         * General speaking the addr type should be SI_ADDR_TYPE. And
         * the addr channel should be BMC.
@@ -86,30 +85,31 @@ struct acpi_ipmi_msg {
         */
        struct ipmi_addr addr;
        long tx_msgid;
+
        /* it is used to track whether the IPMI message is finished */
        struct completion tx_complete;
+
        struct kernel_ipmi_msg tx_message;
-       int     msg_done;
-       /* tx data . And copy it from ACPI object buffer */
-       u8      tx_data[64];
-       int     tx_len;
-       u8      rx_data[64];
-       int     rx_len;
+       int msg_done;
+
+       /* tx/rx data . And copy it from/to ACPI object buffer */
+       u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
+       u8 rx_len;
+
        struct acpi_ipmi_device *device;
+       struct kref kref;
 };
 
 /* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
 struct acpi_ipmi_buffer {
        u8 status;
        u8 length;
-       u8 data[64];
+       u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
 };
 
 static void ipmi_register_bmc(int iface, struct device *dev);
 static void ipmi_bmc_gone(int iface);
 static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
-static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device);
-static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device);
 
 static struct ipmi_driver_data driver_data = {
        .ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
@@ -121,29 +121,142 @@ static struct ipmi_driver_data driver_data = {
        .ipmi_hndlrs = {
                .ipmi_recv_hndl = ipmi_msg_handler,
        },
+       .ipmi_lock = __MUTEX_INITIALIZER(driver_data.ipmi_lock)
 };
 
-static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi)
+static struct acpi_ipmi_device *
+ipmi_dev_alloc(int iface, struct device *dev, acpi_handle handle)
+{
+       struct acpi_ipmi_device *ipmi_device;
+       int err;
+       ipmi_user_t user;
+
+       ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
+       if (!ipmi_device)
+               return NULL;
+
+       kref_init(&ipmi_device->kref);
+       INIT_LIST_HEAD(&ipmi_device->head);
+       INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
+       spin_lock_init(&ipmi_device->tx_msg_lock);
+       ipmi_device->handle = handle;
+       ipmi_device->dev = get_device(dev);
+       ipmi_device->ipmi_ifnum = iface;
+
+       err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
+                              ipmi_device, &user);
+       if (err) {
+               put_device(dev);
+               kfree(ipmi_device);
+               return NULL;
+       }
+       ipmi_device->user_interface = user;
+
+       return ipmi_device;
+}
+
+static void ipmi_dev_release(struct acpi_ipmi_device *ipmi_device)
+{
+       ipmi_destroy_user(ipmi_device->user_interface);
+       put_device(ipmi_device->dev);
+       kfree(ipmi_device);
+}
+
+static void ipmi_dev_release_kref(struct kref *kref)
+{
+       struct acpi_ipmi_device *ipmi =
+               container_of(kref, struct acpi_ipmi_device, kref);
+
+       ipmi_dev_release(ipmi);
+}
+
+static void __ipmi_dev_kill(struct acpi_ipmi_device *ipmi_device)
+{
+       list_del(&ipmi_device->head);
+       if (driver_data.selected_smi == ipmi_device)
+               driver_data.selected_smi = NULL;
+
+       /*
+        * Always setting dead flag after deleting from the list or
+        * list_for_each_entry() codes must get changed.
+        */
+       ipmi_device->dead = true;
+}
+
+static struct acpi_ipmi_device *acpi_ipmi_dev_get(void)
+{
+       struct acpi_ipmi_device *ipmi_device = NULL;
+
+       mutex_lock(&driver_data.ipmi_lock);
+       if (driver_data.selected_smi) {
+               ipmi_device = driver_data.selected_smi;
+               kref_get(&ipmi_device->kref);
+       }
+       mutex_unlock(&driver_data.ipmi_lock);
+
+       return ipmi_device;
+}
+
+static void acpi_ipmi_dev_put(struct acpi_ipmi_device *ipmi_device)
+{
+       kref_put(&ipmi_device->kref, ipmi_dev_release_kref);
+}
+
+static struct acpi_ipmi_msg *ipmi_msg_alloc(void)
 {
+       struct acpi_ipmi_device *ipmi;
        struct acpi_ipmi_msg *ipmi_msg;
-       struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+
+       ipmi = acpi_ipmi_dev_get();
+       if (!ipmi)
+               return NULL;
 
        ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
-       if (!ipmi_msg)  {
-               dev_warn(&pnp_dev->dev, "Can't allocate memory for ipmi_msg\n");
+       if (!ipmi_msg) {
+               acpi_ipmi_dev_put(ipmi);
                return NULL;
        }
+
+       kref_init(&ipmi_msg->kref);
        init_completion(&ipmi_msg->tx_complete);
        INIT_LIST_HEAD(&ipmi_msg->head);
        ipmi_msg->device = ipmi;
+       ipmi_msg->msg_done = ACPI_IPMI_UNKNOWN;
+
        return ipmi_msg;
 }
 
-#define                IPMI_OP_RGN_NETFN(offset)       ((offset >> 8) & 0xff)
-#define                IPMI_OP_RGN_CMD(offset)         (offset & 0xff)
-static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
-                               acpi_physical_address address,
-                               acpi_integer *value)
+static void ipmi_msg_release(struct acpi_ipmi_msg *tx_msg)
+{
+       acpi_ipmi_dev_put(tx_msg->device);
+       kfree(tx_msg);
+}
+
+static void ipmi_msg_release_kref(struct kref *kref)
+{
+       struct acpi_ipmi_msg *tx_msg =
+               container_of(kref, struct acpi_ipmi_msg, kref);
+
+       ipmi_msg_release(tx_msg);
+}
+
+static struct acpi_ipmi_msg *acpi_ipmi_msg_get(struct acpi_ipmi_msg *tx_msg)
+{
+       kref_get(&tx_msg->kref);
+
+       return tx_msg;
+}
+
+static void acpi_ipmi_msg_put(struct acpi_ipmi_msg *tx_msg)
+{
+       kref_put(&tx_msg->kref, ipmi_msg_release_kref);
+}
+
+#define IPMI_OP_RGN_NETFN(offset)      ((offset >> 8) & 0xff)
+#define IPMI_OP_RGN_CMD(offset)                (offset & 0xff)
+static int acpi_format_ipmi_request(struct acpi_ipmi_msg *tx_msg,
+                                   acpi_physical_address address,
+                                   acpi_integer *value)
 {
        struct kernel_ipmi_msg *msg;
        struct acpi_ipmi_buffer *buffer;
@@ -151,21 +264,31 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
        unsigned long flags;
 
        msg = &tx_msg->tx_message;
+
        /*
         * IPMI network function and command are encoded in the address
         * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3.
         */
        msg->netfn = IPMI_OP_RGN_NETFN(address);
        msg->cmd = IPMI_OP_RGN_CMD(address);
-       msg->data = tx_msg->tx_data;
+       msg->data = tx_msg->data;
+
        /*
         * value is the parameter passed by the IPMI opregion space handler.
         * It points to the IPMI request message buffer
         */
        buffer = (struct acpi_ipmi_buffer *)value;
+
        /* copy the tx message data */
+       if (buffer->length > ACPI_IPMI_MAX_MSG_LENGTH) {
+               dev_WARN_ONCE(tx_msg->device->dev, true,
+                             "Unexpected request (msg len %d).\n",
+                             buffer->length);
+               return -EINVAL;
+       }
        msg->data_len = buffer->length;
-       memcpy(tx_msg->tx_data, buffer->data, msg->data_len);
+       memcpy(tx_msg->data, buffer->data, msg->data_len);
+
        /*
         * now the default type is SYSTEM_INTERFACE and channel type is BMC.
         * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE,
@@ -179,14 +302,17 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
 
        /* Get the msgid */
        device = tx_msg->device;
+
        spin_lock_irqsave(&device->tx_msg_lock, flags);
        device->curr_msgid++;
        tx_msg->tx_msgid = device->curr_msgid;
        spin_unlock_irqrestore(&device->tx_msg_lock, flags);
+
+       return 0;
 }
 
 static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
-               acpi_integer *value, int rem_time)
+                                     acpi_integer *value)
 {
        struct acpi_ipmi_buffer *buffer;
 
@@ -195,110 +321,158 @@ static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
         * IPMI message returned by IPMI command.
         */
        buffer = (struct acpi_ipmi_buffer *)value;
-       if (!rem_time && !msg->msg_done) {
-               buffer->status = ACPI_IPMI_TIMEOUT;
-               return;
-       }
+
        /*
-        * If the flag of msg_done is not set or the recv length is zero, it
-        * means that the IPMI command is not executed correctly.
-        * The status code will be ACPI_IPMI_UNKNOWN.
+        * If the flag of msg_done is not set, it means that the IPMI command is
+        * not executed correctly.
         */
-       if (!msg->msg_done || !msg->rx_len) {
-               buffer->status = ACPI_IPMI_UNKNOWN;
+       buffer->status = msg->msg_done;
+       if (msg->msg_done != ACPI_IPMI_OK)
                return;
-       }
+
        /*
         * If the IPMI response message is obtained correctly, the status code
         * will be ACPI_IPMI_OK
         */
-       buffer->status = ACPI_IPMI_OK;
        buffer->length = msg->rx_len;
-       memcpy(buffer->data, msg->rx_data, msg->rx_len);
+       memcpy(buffer->data, msg->data, msg->rx_len);
 }
 
 static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
 {
-       struct acpi_ipmi_msg *tx_msg, *temp;
-       int count = HZ / 10;
-       struct pnp_dev *pnp_dev = ipmi->pnp_dev;
+       struct acpi_ipmi_msg *tx_msg;
+       unsigned long flags;
+
+       /*
+        * NOTE: On-going ipmi_recv_msg
+        * ipmi_msg_handler() may still be invoked by ipmi_si after
+        * flushing.  But it is safe to do a fast flushing on module_exit()
+        * without waiting for all ipmi_recv_msg(s) to complete from
+        * ipmi_msg_handler() as it is ensured by ipmi_si that all
+        * ipmi_recv_msg(s) are freed after invoking ipmi_destroy_user().
+        */
+       spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
+       while (!list_empty(&ipmi->tx_msg_list)) {
+               tx_msg = list_first_entry(&ipmi->tx_msg_list,
+                                         struct acpi_ipmi_msg,
+                                         head);
+               list_del(&tx_msg->head);
+               spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
 
-       list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
                /* wake up the sleep thread on the Tx msg */
                complete(&tx_msg->tx_complete);
+               acpi_ipmi_msg_put(tx_msg);
+               spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
        }
+       spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
+}
+
+static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi,
+                              struct acpi_ipmi_msg *msg)
+{
+       struct acpi_ipmi_msg *tx_msg, *temp;
+       bool msg_found = false;
+       unsigned long flags;
 
-       /* wait for about 100ms to flush the tx message list */
-       while (count--) {
-               if (list_empty(&ipmi->tx_msg_list))
+       spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
+       list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
+               if (msg == tx_msg) {
+                       msg_found = true;
+                       list_del(&tx_msg->head);
                        break;
-               schedule_timeout(1);
+               }
        }
-       if (!list_empty(&ipmi->tx_msg_list))
-               dev_warn(&pnp_dev->dev, "tx msg list is not NULL\n");
+       spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
+
+       if (msg_found)
+               acpi_ipmi_msg_put(tx_msg);
 }
 
 static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
 {
        struct acpi_ipmi_device *ipmi_device = user_msg_data;
-       int msg_found = 0;
-       struct acpi_ipmi_msg *tx_msg;
-       struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
+       bool msg_found = false;
+       struct acpi_ipmi_msg *tx_msg, *temp;
+       struct device *dev = ipmi_device->dev;
        unsigned long flags;
 
        if (msg->user != ipmi_device->user_interface) {
-               dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
-                       "returned user %p, expected user %p\n",
-                       msg->user, ipmi_device->user_interface);
-               ipmi_free_recv_msg(msg);
-               return;
+               dev_warn(dev,
+                        "Unexpected response is returned. returned user %p, expected user %p\n",
+                        msg->user, ipmi_device->user_interface);
+               goto out_msg;
        }
+
        spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
-       list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
+       list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) {
                if (msg->msgid == tx_msg->tx_msgid) {
-                       msg_found = 1;
+                       msg_found = true;
+                       list_del(&tx_msg->head);
                        break;
                }
        }
-
        spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
+
        if (!msg_found) {
-               dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
-                       "returned.\n", msg->msgid);
-               ipmi_free_recv_msg(msg);
-               return;
+               dev_warn(dev,
+                        "Unexpected response (msg id %ld) is returned.\n",
+                        msg->msgid);
+               goto out_msg;
        }
 
-       if (msg->msg.data_len) {
-               /* copy the response data to Rx_data buffer */
-               memcpy(tx_msg->rx_data, msg->msg_data, msg->msg.data_len);
-               tx_msg->rx_len = msg->msg.data_len;
-               tx_msg->msg_done = 1;
+       /* copy the response data to Rx_data buffer */
+       if (msg->msg.data_len > ACPI_IPMI_MAX_MSG_LENGTH) {
+               dev_WARN_ONCE(dev, true,
+                             "Unexpected response (msg len %d).\n",
+                             msg->msg.data_len);
+               goto out_comp;
        }
+
+       /* response msg is an error msg */
+       msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+       if (msg->recv_type == IPMI_RESPONSE_RECV_TYPE &&
+           msg->msg.data_len == 1) {
+               if (msg->msg.data[0] == IPMI_TIMEOUT_COMPLETION_CODE) {
+                       dev_WARN_ONCE(dev, true,
+                                     "Unexpected response (timeout).\n");
+                       tx_msg->msg_done = ACPI_IPMI_TIMEOUT;
+               }
+               goto out_comp;
+       }
+
+       tx_msg->rx_len = msg->msg.data_len;
+       memcpy(tx_msg->data, msg->msg.data, tx_msg->rx_len);
+       tx_msg->msg_done = ACPI_IPMI_OK;
+
+out_comp:
        complete(&tx_msg->tx_complete);
+       acpi_ipmi_msg_put(tx_msg);
+out_msg:
        ipmi_free_recv_msg(msg);
-};
+}
 
 static void ipmi_register_bmc(int iface, struct device *dev)
 {
        struct acpi_ipmi_device *ipmi_device, *temp;
-       struct pnp_dev *pnp_dev;
-       ipmi_user_t             user;
        int err;
        struct ipmi_smi_info smi_data;
        acpi_handle handle;
 
        err = ipmi_get_smi_info(iface, &smi_data);
-
        if (err)
                return;
 
-       if (smi_data.addr_src != SI_ACPI) {
-               put_device(smi_data.dev);
-               return;
-       }
-
+       if (smi_data.addr_src != SI_ACPI)
+               goto err_ref;
        handle = smi_data.addr_info.acpi_info.acpi_handle;
+       if (!handle)
+               goto err_ref;
+
+       ipmi_device = ipmi_dev_alloc(iface, smi_data.dev, handle);
+       if (!ipmi_device) {
+               dev_warn(smi_data.dev, "Can't create IPMI user interface\n");
+               goto err_ref;
+       }
 
        mutex_lock(&driver_data.ipmi_lock);
        list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
@@ -307,34 +481,20 @@ static void ipmi_register_bmc(int iface, struct device *dev)
                 * to the device list, don't add it again.
                 */
                if (temp->handle == handle)
-                       goto out;
+                       goto err_lock;
        }
-
-       ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
-
-       if (!ipmi_device)
-               goto out;
-
-       pnp_dev = to_pnp_dev(smi_data.dev);
-       ipmi_device->handle = handle;
-       ipmi_device->pnp_dev = pnp_dev;
-
-       err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
-                                       ipmi_device, &user);
-       if (err) {
-               dev_warn(&pnp_dev->dev, "Can't create IPMI user interface\n");
-               kfree(ipmi_device);
-               goto out;
-       }
-       acpi_add_ipmi_device(ipmi_device);
-       ipmi_device->user_interface = user;
-       ipmi_device->ipmi_ifnum = iface;
+       if (!driver_data.selected_smi)
+               driver_data.selected_smi = ipmi_device;
+       list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
        mutex_unlock(&driver_data.ipmi_lock);
-       memcpy(&ipmi_device->smi_data, &smi_data, sizeof(struct ipmi_smi_info));
+
+       put_device(smi_data.dev);
        return;
 
-out:
+err_lock:
        mutex_unlock(&driver_data.ipmi_lock);
+       ipmi_dev_release(ipmi_device);
+err_ref:
        put_device(smi_data.dev);
        return;
 }
@@ -342,23 +502,29 @@ out:
 static void ipmi_bmc_gone(int iface)
 {
        struct acpi_ipmi_device *ipmi_device, *temp;
+       bool dev_found = false;
 
        mutex_lock(&driver_data.ipmi_lock);
        list_for_each_entry_safe(ipmi_device, temp,
-                               &driver_data.ipmi_devices, head) {
-               if (ipmi_device->ipmi_ifnum != iface)
-                       continue;
-
-               acpi_remove_ipmi_device(ipmi_device);
-               put_device(ipmi_device->smi_data.dev);
-               kfree(ipmi_device);
-               break;
+                                &driver_data.ipmi_devices, head) {
+               if (ipmi_device->ipmi_ifnum != iface) {
+                       dev_found = true;
+                       __ipmi_dev_kill(ipmi_device);
+                       break;
+               }
        }
+       if (!driver_data.selected_smi)
+               driver_data.selected_smi = list_first_entry_or_null(
+                                       &driver_data.ipmi_devices,
+                                       struct acpi_ipmi_device, head);
        mutex_unlock(&driver_data.ipmi_lock);
+
+       if (dev_found) {
+               ipmi_flush_tx_msg(ipmi_device);
+               acpi_ipmi_dev_put(ipmi_device);
+       }
 }
-/* --------------------------------------------------------------------------
- *                     Address Space Management
- * -------------------------------------------------------------------------- */
+
 /*
  * This is the IPMI opregion space handler.
  * @function: indicates the read/write. In fact as the IPMI message is driven
@@ -371,17 +537,17 @@ static void ipmi_bmc_gone(int iface)
  *          the response IPMI message returned by IPMI command.
  * @handler_context: IPMI device context.
  */
-
 static acpi_status
 acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
-                     u32 bits, acpi_integer *value,
-                     void *handler_context, void *region_context)
+                       u32 bits, acpi_integer *value,
+                       void *handler_context, void *region_context)
 {
        struct acpi_ipmi_msg *tx_msg;
-       struct acpi_ipmi_device *ipmi_device = handler_context;
-       int err, rem_time;
+       struct acpi_ipmi_device *ipmi_device;
+       int err;
        acpi_status status;
        unsigned long flags;
+
        /*
         * IPMI opregion message.
         * IPMI message is firstly written to the BMC and system software
@@ -391,118 +557,75 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
        if ((function & ACPI_IO_MASK) == ACPI_READ)
                return AE_TYPE;
 
-       if (!ipmi_device->user_interface)
+       tx_msg = ipmi_msg_alloc();
+       if (!tx_msg)
                return AE_NOT_EXIST;
+       ipmi_device = tx_msg->device;
 
-       tx_msg = acpi_alloc_ipmi_msg(ipmi_device);
-       if (!tx_msg)
-               return AE_NO_MEMORY;
+       if (acpi_format_ipmi_request(tx_msg, address, value) != 0) {
+               ipmi_msg_release(tx_msg);
+               return AE_TYPE;
+       }
 
-       acpi_format_ipmi_msg(tx_msg, address, value);
+       acpi_ipmi_msg_get(tx_msg);
+       mutex_lock(&driver_data.ipmi_lock);
+       /* Do not add a tx_msg that can not be flushed. */
+       if (ipmi_device->dead) {
+               mutex_unlock(&driver_data.ipmi_lock);
+               ipmi_msg_release(tx_msg);
+               return AE_NOT_EXIST;
+       }
        spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
        list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
        spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
+       mutex_unlock(&driver_data.ipmi_lock);
+
        err = ipmi_request_settime(ipmi_device->user_interface,
-                                       &tx_msg->addr,
-                                       tx_msg->tx_msgid,
-                                       &tx_msg->tx_message,
-                                       NULL, 0, 0, 0);
+                                  &tx_msg->addr,
+                                  tx_msg->tx_msgid,
+                                  &tx_msg->tx_message,
+                                  NULL, 0, 0, IPMI_TIMEOUT);
        if (err) {
                status = AE_ERROR;
-               goto end_label;
+               goto out_msg;
        }
-       rem_time = wait_for_completion_timeout(&tx_msg->tx_complete,
-                                       IPMI_TIMEOUT);
-       acpi_format_ipmi_response(tx_msg, value, rem_time);
+       wait_for_completion(&tx_msg->tx_complete);
+
+       acpi_format_ipmi_response(tx_msg, value);
        status = AE_OK;
 
-end_label:
-       spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
-       list_del(&tx_msg->head);
-       spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
-       kfree(tx_msg);
+out_msg:
+       ipmi_cancel_tx_msg(ipmi_device, tx_msg);
+       acpi_ipmi_msg_put(tx_msg);
        return status;
 }
 
-static void ipmi_remove_space_handler(struct acpi_ipmi_device *ipmi)
-{
-       if (!test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
-               return;
-
-       acpi_remove_address_space_handler(ipmi->handle,
-                               ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler);
-
-       clear_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
-}
-
-static int ipmi_install_space_handler(struct acpi_ipmi_device *ipmi)
+static int __init acpi_ipmi_init(void)
 {
+       int result;
        acpi_status status;
 
-       if (test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags))
+       if (acpi_disabled)
                return 0;
 
-       status = acpi_install_address_space_handler(ipmi->handle,
+       status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
                                                    ACPI_ADR_SPACE_IPMI,
                                                    &acpi_ipmi_space_handler,
-                                                   NULL, ipmi);
+                                                   NULL, NULL);
        if (ACPI_FAILURE(status)) {
-               struct pnp_dev *pnp_dev = ipmi->pnp_dev;
-               dev_warn(&pnp_dev->dev, "Can't register IPMI opregion space "
-                       "handle\n");
+               pr_warn("Can't register IPMI opregion space handle\n");
                return -EINVAL;
        }
-       set_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags);
-       return 0;
-}
-
-static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
-{
-
-       INIT_LIST_HEAD(&ipmi_device->head);
-
-       spin_lock_init(&ipmi_device->tx_msg_lock);
-       INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
-       ipmi_install_space_handler(ipmi_device);
-
-       list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
-}
-
-static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device)
-{
-       /*
-        * If the IPMI user interface is created, it should be
-        * destroyed.
-        */
-       if (ipmi_device->user_interface) {
-               ipmi_destroy_user(ipmi_device->user_interface);
-               ipmi_device->user_interface = NULL;
-       }
-       /* flush the Tx_msg list */
-       if (!list_empty(&ipmi_device->tx_msg_list))
-               ipmi_flush_tx_msg(ipmi_device);
-
-       list_del(&ipmi_device->head);
-       ipmi_remove_space_handler(ipmi_device);
-}
-
-static int __init acpi_ipmi_init(void)
-{
-       int result = 0;
-
-       if (acpi_disabled)
-               return result;
-
-       mutex_init(&driver_data.ipmi_lock);
-
        result = ipmi_smi_watcher_register(&driver_data.bmc_events);
+       if (result)
+               pr_err("Can't register IPMI system interface watcher\n");
 
        return result;
 }
 
 static void __exit acpi_ipmi_exit(void)
 {
-       struct acpi_ipmi_device *ipmi_device, *temp;
+       struct acpi_ipmi_device *ipmi_device;
 
        if (acpi_disabled)
                return;
@@ -516,13 +639,22 @@ static void __exit acpi_ipmi_exit(void)
         * handler and free it.
         */
        mutex_lock(&driver_data.ipmi_lock);
-       list_for_each_entry_safe(ipmi_device, temp,
-                               &driver_data.ipmi_devices, head) {
-               acpi_remove_ipmi_device(ipmi_device);
-               put_device(ipmi_device->smi_data.dev);
-               kfree(ipmi_device);
+       while (!list_empty(&driver_data.ipmi_devices)) {
+               ipmi_device = list_first_entry(&driver_data.ipmi_devices,
+                                              struct acpi_ipmi_device,
+                                              head);
+               __ipmi_dev_kill(ipmi_device);
+               mutex_unlock(&driver_data.ipmi_lock);
+
+               ipmi_flush_tx_msg(ipmi_device);
+               acpi_ipmi_dev_put(ipmi_device);
+
+               mutex_lock(&driver_data.ipmi_lock);
        }
        mutex_unlock(&driver_data.ipmi_lock);
+       acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
+                                         ACPI_ADR_SPACE_IPMI,
+                                         &acpi_ipmi_space_handler);
 }
 
 module_init(acpi_ipmi_init);
index fb78bb9ad8f65888817fc6f7a3ecb2563b975134..d3961014aad7ff9d77bcb296ec2f851f96d64b5b 100644 (file)
@@ -30,6 +30,7 @@ ACPI_MODULE_NAME("acpi_lpss");
 /* Offsets relative to LPSS_PRIVATE_OFFSET */
 #define LPSS_GENERAL                   0x08
 #define LPSS_GENERAL_LTR_MODE_SW       BIT(2)
+#define LPSS_GENERAL_UART_RTS_OVRD     BIT(3)
 #define LPSS_SW_LTR                    0x10
 #define LPSS_AUTO_LTR                  0x14
 #define LPSS_TX_INT                    0x20
@@ -68,11 +69,16 @@ struct lpss_private_data {
 
 static void lpss_uart_setup(struct lpss_private_data *pdata)
 {
-       unsigned int tx_int_offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
+       unsigned int offset;
        u32 reg;
 
-       reg = readl(pdata->mmio_base + tx_int_offset);
-       writel(reg | LPSS_TX_INT_MASK, pdata->mmio_base + tx_int_offset);
+       offset = pdata->dev_desc->prv_offset + LPSS_TX_INT;
+       reg = readl(pdata->mmio_base + offset);
+       writel(reg | LPSS_TX_INT_MASK, pdata->mmio_base + offset);
+
+       offset = pdata->dev_desc->prv_offset + LPSS_GENERAL;
+       reg = readl(pdata->mmio_base + offset);
+       writel(reg | LPSS_GENERAL_UART_RTS_OVRD, pdata->mmio_base + offset);
 }
 
 static struct lpss_device_desc lpt_dev_desc = {
index 999adb5499c7270626466a7d74870531c3e3ae28..551dad712ffec451364a08c01487a1987c10d7db 100644 (file)
@@ -152,8 +152,9 @@ static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
        unsigned long long current_status;
 
        /* Get device present/absent information from the _STA */
-       if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle, "_STA",
-                                              NULL, &current_status)))
+       if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle,
+                                              METHOD_NAME__STA, NULL,
+                                              &current_status)))
                return -ENODEV;
        /*
         * Check for device status. Device should be
@@ -281,7 +282,7 @@ static void acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
                if (!info->enabled)
                        continue;
 
-               if (nid < 0)
+               if (nid == NUMA_NO_NODE)
                        nid = memory_add_physaddr_to_nid(info->start_addr);
 
                acpi_unbind_memory_blocks(info, handle);
index 1bde12708f9e112c708bb71ae8d6162af5e367a9..8a4cfc7e71f0f83cc7a8644e75eab627c8e3c52d 100644 (file)
@@ -29,6 +29,13 @@ ACPI_MODULE_NAME("platform");
 static const struct acpi_device_id acpi_platform_device_ids[] = {
 
        { "PNP0D40" },
+       { "ACPI0003" },
+       { "VPC2004" },
+       { "BCM4752" },
+
+       /* Intel Smart Sound Technology */
+       { "INT33C8" },
+       { "80860F28" },
 
        { }
 };
index f29e06efa47976eba16e3b4e449a8b5a9235909f..3c1d6b0c09a4b7d1eeeb9347a7c743d1c7487988 100644 (file)
@@ -140,15 +140,11 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev)
        return 0;
 }
 
-static int acpi_processor_errata(struct acpi_processor *pr)
+static int acpi_processor_errata(void)
 {
        int result = 0;
        struct pci_dev *dev = NULL;
 
-
-       if (!pr)
-               return -EINVAL;
-
        /*
         * PIIX4
         */
@@ -181,7 +177,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
        cpu_maps_update_begin();
        cpu_hotplug_begin();
 
-       ret = acpi_map_lsapic(pr->handle, &pr->id);
+       ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id);
        if (ret)
                goto out;
 
@@ -219,11 +215,9 @@ static int acpi_processor_get_info(struct acpi_device *device)
        int cpu_index, device_declaration = 0;
        acpi_status status = AE_OK;
        static int cpu0_initialized;
+       unsigned long long value;
 
-       if (num_online_cpus() > 1)
-               errata.smp = TRUE;
-
-       acpi_processor_errata(pr);
+       acpi_processor_errata();
 
        /*
         * Check to see if we have bus mastering arbitration control.  This
@@ -247,18 +241,12 @@ static int acpi_processor_get_info(struct acpi_device *device)
                        return -ENODEV;
                }
 
-               /*
-                * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP.
-                *      >>> 'acpi_get_processor_id(acpi_id, &id)' in
-                *      arch/xxx/acpi.c
-                */
                pr->acpi_id = object.processor.proc_id;
        } else {
                /*
                 * Declared with "Device" statement; match _UID.
                 * Note that we don't handle string _UIDs yet.
                 */
-               unsigned long long value;
                status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID,
                                                NULL, &value);
                if (ACPI_FAILURE(status)) {
@@ -270,7 +258,9 @@ static int acpi_processor_get_info(struct acpi_device *device)
                device_declaration = 1;
                pr->acpi_id = value;
        }
-       cpu_index = acpi_get_cpuid(pr->handle, device_declaration, pr->acpi_id);
+       pr->apic_id = acpi_get_apicid(pr->handle, device_declaration,
+                                       pr->acpi_id);
+       cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
 
        /* Handle UP system running SMP kernel, with no LAPIC in MADT */
        if (!cpu0_initialized && (cpu_index == -1) &&
@@ -332,9 +322,9 @@ static int acpi_processor_get_info(struct acpi_device *device)
         * ensure we get the right value in the "physical id" field
         * of /proc/cpuinfo
         */
-       status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
+       status = acpi_evaluate_integer(pr->handle, "_SUN", NULL, &value);
        if (ACPI_SUCCESS(status))
-               arch_fix_phys_package_id(pr->id, object.integer.value);
+               arch_fix_phys_package_id(pr->id, value);
 
        return 0;
 }
index 9feba08c29fe08b772e976fdefc7531300eb4d2d..27c36a5251b56dd560b04333a773ddd875360fd3 100644 (file)
@@ -113,11 +113,12 @@ void acpi_db_display_handlers(void);
 ACPI_HW_DEPENDENT_RETURN_VOID(void
                              acpi_db_generate_gpe(char *gpe_arg,
                                                   char *block_arg))
+ ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_db_generate_sci(void))
 
 /*
  * dbconvert - miscellaneous conversion routines
  */
- acpi_status acpi_db_hex_char_to_value(int hex_char, u8 *return_value);
+acpi_status acpi_db_hex_char_to_value(int hex_char, u8 *return_value);
 
 acpi_status acpi_db_convert_to_package(char *string, union acpi_object *object);
 
@@ -154,6 +155,8 @@ void acpi_db_set_scope(char *name);
 
 void acpi_db_dump_namespace(char *start_arg, char *depth_arg);
 
+void acpi_db_dump_namespace_paths(void);
+
 void acpi_db_dump_namespace_by_owner(char *owner_arg, char *depth_arg);
 
 acpi_status acpi_db_find_name_in_namespace(char *name_arg);
index ab0e9771038127c83365fc3b1d72de841612fb99..3ae5fd02ae649f46677b362af9aa4cd39c4e6e3b 100644 (file)
@@ -242,11 +242,11 @@ acpi_ev_initialize_region(union acpi_operand_object *region_obj,
  */
 u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context);
 
-u32 acpi_ev_install_sci_handler(void);
+u32 acpi_ev_sci_dispatch(void);
 
-acpi_status acpi_ev_remove_sci_handler(void);
+u32 acpi_ev_install_sci_handler(void);
 
-u32 acpi_ev_initialize_SCI(u32 program_SCI);
+acpi_status acpi_ev_remove_all_sci_handlers(void);
 
 ACPI_HW_DEPENDENT_RETURN_VOID(void acpi_ev_terminate(void))
 #endif                         /* __ACEVENTS_H__  */
index 90e846f985fa30caa109c8de336ac8e677a29bee..0fba431f4fcb90fd1bad8ec85b870486a33b70d6 100644 (file)
@@ -269,6 +269,7 @@ ACPI_EXTERN acpi_table_handler acpi_gbl_table_handler;
 ACPI_EXTERN void *acpi_gbl_table_handler_context;
 ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
 ACPI_EXTERN acpi_interface_handler acpi_gbl_interface_handler;
+ACPI_EXTERN struct acpi_sci_handler_info *acpi_gbl_sci_handler_list;
 
 /* Owner ID support */
 
@@ -445,13 +446,6 @@ ACPI_EXTERN u8 acpi_gbl_db_opt_tables;
 ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
 ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
 ACPI_EXTERN u8 acpi_gbl_db_opt_no_region_support;
-
-ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
-ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE];
-ACPI_EXTERN char acpi_gbl_db_scope_buf[80];
-ACPI_EXTERN char acpi_gbl_db_debug_filename[80];
 ACPI_EXTERN u8 acpi_gbl_db_output_to_file;
 ACPI_EXTERN char *acpi_gbl_db_buffer;
 ACPI_EXTERN char *acpi_gbl_db_filename;
@@ -459,6 +453,16 @@ ACPI_EXTERN u32 acpi_gbl_db_debug_level;
 ACPI_EXTERN u32 acpi_gbl_db_console_debug_level;
 ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_db_scope_node;
 
+ACPI_EXTERN char *acpi_gbl_db_args[ACPI_DEBUGGER_MAX_ARGS];
+ACPI_EXTERN acpi_object_type acpi_gbl_db_arg_types[ACPI_DEBUGGER_MAX_ARGS];
+
+/* These buffers should all be the same size */
+
+ACPI_EXTERN char acpi_gbl_db_line_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_parsed_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_scope_buf[ACPI_DB_LINE_BUFFER_SIZE];
+ACPI_EXTERN char acpi_gbl_db_debug_filename[ACPI_DB_LINE_BUFFER_SIZE];
+
 /*
  * Statistic globals
  */
index 0ed00669cd217a0754087ed61bc89465e31804f3..be9e30ee60488b7de09a61ea1dd198fe84ebf43b 100644 (file)
@@ -398,6 +398,14 @@ struct acpi_simple_repair_info {
  *
  ****************************************************************************/
 
+/* Dispatch info for each host-installed SCI handler */
+
+struct acpi_sci_handler_info {
+       struct acpi_sci_handler_info *next;
+       acpi_sci_handler address;       /* Address of handler */
+       void *context;          /* Context to be passed to handler */
+};
+
 /* Dispatch info for each GPE -- either a method or handler, cannot be both */
 
 struct acpi_gpe_handler_info {
index 40b04bd5579e36abec92165270c44f576e8fddc4..e6138ac4a16054038275fc81f2f616cd70c26d5d 100644 (file)
@@ -213,6 +213,12 @@ acpi_ns_dump_objects(acpi_object_type type,
                     u8 display_type,
                     u32 max_depth,
                     acpi_owner_id owner_id, acpi_handle start_handle);
+
+void
+acpi_ns_dump_object_paths(acpi_object_type type,
+                         u8 display_type,
+                         u32 max_depth,
+                         acpi_owner_id owner_id, acpi_handle start_handle);
 #endif                         /* ACPI_FUTURE_USAGE */
 
 /*
index b24dbb80fab8f681dc3089cd8a29c700f5cb57c7..d52339090b604ba1569f599a03a45e6712e54ee9 100644 (file)
@@ -196,7 +196,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
  *
  * FUNCTION:    acpi_ev_get_gpe_xrupt_block
  *
- * PARAMETERS:  interrupt_number     - Interrupt for a GPE block
+ * PARAMETERS:  interrupt_number            - Interrupt for a GPE block
  *
  * RETURN:      A GPE interrupt block
  *
index 1b111ef74903771f8f9a8f690aef85b752f65c61..a5687540e9a66e5ba20bfeb9772ad5351b20bc61 100644 (file)
@@ -264,13 +264,6 @@ void acpi_ev_terminate(void)
 
                status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL);
 
-               /* Remove SCI handler */
-
-               status = acpi_ev_remove_sci_handler();
-               if (ACPI_FAILURE(status)) {
-                       ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
-               }
-
                status = acpi_ev_remove_global_lock_handler();
                if (ACPI_FAILURE(status)) {
                        ACPI_ERROR((AE_INFO,
@@ -280,6 +273,13 @@ void acpi_ev_terminate(void)
                acpi_gbl_events_initialized = FALSE;
        }
 
+       /* Remove SCI handlers */
+
+       status = acpi_ev_remove_all_sci_handlers();
+       if (ACPI_FAILURE(status)) {
+               ACPI_ERROR((AE_INFO, "Could not remove SCI handler"));
+       }
+
        /* Deallocate all handler objects installed within GPE info structs */
 
        status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers, NULL);
index cea14d6fc76c976db580144f93d28d890ff3938f..6293d6bb6fe1e2a0348f90505ebbaa36f2f3f3b2 100644 (file)
@@ -217,16 +217,11 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
                if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE)) {
                        region_obj->region.flags |= AOPOBJ_SETUP_COMPLETE;
 
-                       if (region_obj2->extra.region_context) {
-
-                               /* The handler for this region was already installed */
-
-                               ACPI_FREE(region_context);
-                       } else {
-                               /*
-                                * Save the returned context for use in all accesses to
-                                * this particular region
-                                */
+                       /*
+                        * Save the returned context for use in all accesses to
+                        * the handler for this particular region
+                        */
+                       if (!(region_obj2->extra.region_context)) {
                                region_obj2->extra.region_context =
                                    region_context;
                        }
@@ -402,6 +397,14 @@ acpi_ev_detach_region(union acpi_operand_object *region_obj,
                                                 handler_obj->address_space.
                                                 context, region_context);
 
+                               /*
+                                * region_context should have been released by the deactivate
+                                * operation. We don't need access to it anymore here.
+                                */
+                               if (region_context) {
+                                       *region_context = NULL;
+                               }
+
                                /* Init routine may fail, Just ignore errors */
 
                                if (ACPI_FAILURE(status)) {
index b905acf7aacdc1d90f9189b9660df28eb6777f08..94d9ebddf5755031a34b654664b0be6471330662 100644 (file)
@@ -52,6 +52,50 @@ ACPI_MODULE_NAME("evsci")
 /* Local prototypes */
 static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context);
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ev_sci_dispatch
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      Status code indicates whether interrupt was handled.
+ *
+ * DESCRIPTION: Dispatch the SCI to all host-installed SCI handlers.
+ *
+ ******************************************************************************/
+
+u32 acpi_ev_sci_dispatch(void)
+{
+       struct acpi_sci_handler_info *sci_handler;
+       acpi_cpu_flags flags;
+       u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
+
+       ACPI_FUNCTION_NAME(ev_sci_dispatch);
+
+       /* Are there any host-installed SCI handlers? */
+
+       if (!acpi_gbl_sci_handler_list) {
+               return (int_status);
+       }
+
+       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+       /* Invoke all host-installed SCI handlers */
+
+       sci_handler = acpi_gbl_sci_handler_list;
+       while (sci_handler) {
+
+               /* Invoke the installed handler (at interrupt level) */
+
+               int_status |= sci_handler->address(sci_handler->context);
+
+               sci_handler = sci_handler->next;
+       }
+
+       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+       return (int_status);
+}
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ev_sci_xrupt_handler
@@ -89,6 +133,10 @@ static u32 ACPI_SYSTEM_XFACE acpi_ev_sci_xrupt_handler(void *context)
         */
        interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
 
+       /* Invoke all host-installed SCI handlers */
+
+       interrupt_handled |= acpi_ev_sci_dispatch();
+
        return_UINT32(interrupt_handled);
 }
 
@@ -112,14 +160,13 @@ u32 ACPI_SYSTEM_XFACE acpi_ev_gpe_xrupt_handler(void *context)
        ACPI_FUNCTION_TRACE(ev_gpe_xrupt_handler);
 
        /*
-        * We are guaranteed by the ACPI CA initialization/shutdown code that
+        * We are guaranteed by the ACPICA initialization/shutdown code that
         * if this interrupt handler is installed, ACPI is enabled.
         */
 
        /* GPEs: Check for and dispatch any GPEs that have occurred */
 
        interrupt_handled |= acpi_ev_gpe_detect(gpe_xrupt_list);
-
        return_UINT32(interrupt_handled);
 }
 
@@ -150,15 +197,15 @@ u32 acpi_ev_install_sci_handler(void)
 
 /******************************************************************************
  *
- * FUNCTION:    acpi_ev_remove_sci_handler
+ * FUNCTION:    acpi_ev_remove_all_sci_handlers
  *
  * PARAMETERS:  none
  *
- * RETURN:      E_OK if handler uninstalled OK, E_ERROR if handler was not
+ * RETURN:      AE_OK if handler uninstalled, AE_ERROR if handler was not
  *              installed to begin with
  *
  * DESCRIPTION: Remove the SCI interrupt handler. No further SCIs will be
- *              taken.
+ *              taken. Remove all host-installed SCI handlers.
  *
  * Note:  It doesn't seem important to disable all events or set the event
  *        enable registers to their original values. The OS should disable
@@ -167,11 +214,13 @@ u32 acpi_ev_install_sci_handler(void)
  *
  ******************************************************************************/
 
-acpi_status acpi_ev_remove_sci_handler(void)
+acpi_status acpi_ev_remove_all_sci_handlers(void)
 {
+       struct acpi_sci_handler_info *sci_handler;
+       acpi_cpu_flags flags;
        acpi_status status;
 
-       ACPI_FUNCTION_TRACE(ev_remove_sci_handler);
+       ACPI_FUNCTION_TRACE(ev_remove_all_sci_handlers);
 
        /* Just let the OS remove the handler and disable the level */
 
@@ -179,6 +228,21 @@ acpi_status acpi_ev_remove_sci_handler(void)
            acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
                                             acpi_ev_sci_xrupt_handler);
 
+       if (!acpi_gbl_sci_handler_list) {
+               return (status);
+       }
+
+       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+       /* Free all host-installed SCI handlers */
+
+       while (acpi_gbl_sci_handler_list) {
+               sci_handler = acpi_gbl_sci_handler_list;
+               acpi_gbl_sci_handler_list = sci_handler->next;
+               ACPI_FREE(sci_handler);
+       }
+
+       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
        return_ACPI_STATUS(status);
 }
 
index ca5fba99c33bc82a302f8eb246ca41224a2cbada..6f56146a6f88a55a7d0ebebaa4be0bbd84126bd0 100644 (file)
@@ -383,6 +383,144 @@ ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
 #endif                         /*  ACPI_FUTURE_USAGE  */
 
 #if (!ACPI_REDUCED_HARDWARE)
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_sci_handler
+ *
+ * PARAMETERS:  address             - Address of the handler
+ *              context             - Value passed to the handler on each SCI
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Install a handler for a System Control Interrupt.
+ *
+ ******************************************************************************/
+acpi_status acpi_install_sci_handler(acpi_sci_handler address, void *context)
+{
+       struct acpi_sci_handler_info *new_sci_handler;
+       struct acpi_sci_handler_info *sci_handler;
+       acpi_cpu_flags flags;
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(acpi_install_sci_handler);
+
+       if (!address) {
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
+       }
+
+       /* Allocate and init a handler object */
+
+       new_sci_handler = ACPI_ALLOCATE(sizeof(struct acpi_sci_handler_info));
+       if (!new_sci_handler) {
+               return_ACPI_STATUS(AE_NO_MEMORY);
+       }
+
+       new_sci_handler->address = address;
+       new_sci_handler->context = context;
+
+       status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+       if (ACPI_FAILURE(status)) {
+               goto exit;
+       }
+
+       /* Lock list during installation */
+
+       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+       sci_handler = acpi_gbl_sci_handler_list;
+
+       /* Ensure handler does not already exist */
+
+       while (sci_handler) {
+               if (address == sci_handler->address) {
+                       status = AE_ALREADY_EXISTS;
+                       goto unlock_and_exit;
+               }
+
+               sci_handler = sci_handler->next;
+       }
+
+       /* Install the new handler into the global list (at head) */
+
+       new_sci_handler->next = acpi_gbl_sci_handler_list;
+       acpi_gbl_sci_handler_list = new_sci_handler;
+
+      unlock_and_exit:
+
+       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+       (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+      exit:
+       if (ACPI_FAILURE(status)) {
+               ACPI_FREE(new_sci_handler);
+       }
+       return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_remove_sci_handler
+ *
+ * PARAMETERS:  address             - Address of the handler
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove a handler for a System Control Interrupt.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_remove_sci_handler(acpi_sci_handler address)
+{
+       struct acpi_sci_handler_info *prev_sci_handler;
+       struct acpi_sci_handler_info *next_sci_handler;
+       acpi_cpu_flags flags;
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(acpi_remove_sci_handler);
+
+       if (!address) {
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
+       }
+
+       status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       /* Remove the SCI handler with lock */
+
+       flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
+
+       prev_sci_handler = NULL;
+       next_sci_handler = acpi_gbl_sci_handler_list;
+       while (next_sci_handler) {
+               if (next_sci_handler->address == address) {
+
+                       /* Unlink and free the SCI handler info block */
+
+                       if (prev_sci_handler) {
+                               prev_sci_handler->next = next_sci_handler->next;
+                       } else {
+                               acpi_gbl_sci_handler_list =
+                                   next_sci_handler->next;
+                       }
+
+                       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+                       ACPI_FREE(next_sci_handler);
+                       goto unlock_and_exit;
+               }
+
+               prev_sci_handler = next_sci_handler;
+               next_sci_handler = next_sci_handler->next;
+       }
+
+       acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
+       status = AE_NOT_EXIST;
+
+      unlock_and_exit:
+       (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+       return_ACPI_STATUS(status);
+}
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_install_global_event_handler
@@ -398,6 +536,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
  *              Can be used to update event counters, etc.
  *
  ******************************************************************************/
+
 acpi_status
 acpi_install_global_event_handler(acpi_gbl_event_handler handler, void *context)
 {
index 5ee7a814cd9207db34d42b8945867e415ab733bb..f81fb068d20ed34613615d76217841e2c7297ad4 100644 (file)
@@ -119,7 +119,8 @@ ACPI_EXPORT_SYMBOL(acpi_reset)
  ******************************************************************************/
 acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
 {
-       u32 value;
+       u32 value_lo;
+       u32 value_hi;
        u32 width;
        u64 address;
        acpi_status status;
@@ -137,13 +138,8 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
                return (status);
        }
 
-       /* Initialize entire 64-bit return value to zero */
-
-       *return_value = 0;
-       value = 0;
-
        /*
-        * Two address spaces supported: Memory or IO. PCI_Config is
+        * Two address spaces supported: Memory or I/O. PCI_Config is
         * not supported here because the GAS structure is insufficient
         */
        if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
@@ -155,29 +151,35 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
                }
        } else {                /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
 
+               value_lo = 0;
+               value_hi = 0;
+
                width = reg->bit_width;
                if (width == 64) {
                        width = 32;     /* Break into two 32-bit transfers */
                }
 
                status = acpi_hw_read_port((acpi_io_address)
-                                          address, &value, width);
+                                          address, &value_lo, width);
                if (ACPI_FAILURE(status)) {
                        return (status);
                }
-               *return_value = value;
 
                if (reg->bit_width == 64) {
 
                        /* Read the top 32 bits */
 
                        status = acpi_hw_read_port((acpi_io_address)
-                                                  (address + 4), &value, 32);
+                                                  (address + 4), &value_hi,
+                                                  32);
                        if (ACPI_FAILURE(status)) {
                                return (status);
                        }
-                       *return_value |= ((u64)value << 32);
                }
+
+               /* Set the return value only if status is AE_OK */
+
+               *return_value = (value_lo | ((u64)value_hi << 32));
        }
 
        ACPI_DEBUG_PRINT((ACPI_DB_IO,
@@ -186,7 +188,7 @@ acpi_status acpi_read(u64 *return_value, struct acpi_generic_address *reg)
                          ACPI_FORMAT_UINT64(address),
                          acpi_ut_get_region_name(reg->space_id)));
 
-       return (status);
+       return (AE_OK);
 }
 
 ACPI_EXPORT_SYMBOL(acpi_read)
index c5316e5bd4abf2efce5aad7d4968708d3bccdb3d..aff79c7392ff3ff20e19e0a8f43a82709b0053ef 100644 (file)
@@ -424,8 +424,9 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
                                        /* Current scope has no parent scope */
 
                                        ACPI_ERROR((AE_INFO,
-                                                   "ACPI path has too many parent prefixes (^) "
-                                                   "- reached beyond root node"));
+                                                   "%s: Path has too many parent prefixes (^) "
+                                                   "- reached beyond root node",
+                                                   pathname));
                                        return_ACPI_STATUS(AE_NOT_FOUND);
                                }
                        }
index 7418c77fde8c70db88d0d0af9c0c6a8fcad6cad5..80633851cb2fd00e7886bdc4deacf0929726818a 100644 (file)
@@ -59,6 +59,17 @@ acpi_ns_dump_one_device(acpi_handle obj_handle,
 #endif
 
 #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
+
+#ifdef ACPI_FUTURE_USAGE
+static acpi_status
+acpi_ns_dump_one_object_path(acpi_handle obj_handle,
+                            u32 level, void *context, void **return_value);
+
+static acpi_status
+acpi_ns_get_max_depth(acpi_handle obj_handle,
+                     u32 level, void *context, void **return_value);
+#endif                         /* ACPI_FUTURE_USAGE */
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ns_print_pathname
@@ -671,6 +682,129 @@ acpi_ns_dump_objects(acpi_object_type type,
 }
 #endif                         /* ACPI_FUTURE_USAGE */
 
+#ifdef ACPI_FUTURE_USAGE
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_dump_one_object_path, acpi_ns_get_max_depth
+ *
+ * PARAMETERS:  obj_handle          - Node to be dumped
+ *              level               - Nesting level of the handle
+ *              context             - Passed into walk_namespace
+ *              return_value        - Not used
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Dump the full pathname to a namespace object. acp_ns_get_max_depth
+ *              computes the maximum nesting depth in the namespace tree, in
+ *              order to simplify formatting in acpi_ns_dump_one_object_path.
+ *              These procedures are user_functions called by acpi_ns_walk_namespace.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ns_dump_one_object_path(acpi_handle obj_handle,
+                            u32 level, void *context, void **return_value)
+{
+       u32 max_level = *((u32 *)context);
+       char *pathname;
+       struct acpi_namespace_node *node;
+       int path_indent;
+
+       if (!obj_handle) {
+               return (AE_OK);
+       }
+
+       node = acpi_ns_validate_handle(obj_handle);
+       pathname = acpi_ns_get_external_pathname(node);
+
+       path_indent = 1;
+       if (level <= max_level) {
+               path_indent = max_level - level + 1;
+       }
+
+       acpi_os_printf("%2d%*s%-12s%*s",
+                      level, level, " ", acpi_ut_get_type_name(node->type),
+                      path_indent, " ");
+
+       acpi_os_printf("%s\n", &pathname[1]);
+       ACPI_FREE(pathname);
+       return (AE_OK);
+}
+
+static acpi_status
+acpi_ns_get_max_depth(acpi_handle obj_handle,
+                     u32 level, void *context, void **return_value)
+{
+       u32 *max_level = (u32 *)context;
+
+       if (level > *max_level) {
+               *max_level = level;
+       }
+       return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ns_dump_object_paths
+ *
+ * PARAMETERS:  type                - Object type to be dumped
+ *              display_type        - 0 or ACPI_DISPLAY_SUMMARY
+ *              max_depth           - Maximum depth of dump. Use ACPI_UINT32_MAX
+ *                                    for an effectively unlimited depth.
+ *              owner_id            - Dump only objects owned by this ID. Use
+ *                                    ACPI_UINT32_MAX to match all owners.
+ *              start_handle        - Where in namespace to start/end search
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Dump full object pathnames within the loaded namespace. Uses
+ *              acpi_ns_walk_namespace in conjunction with acpi_ns_dump_one_object_path.
+ *
+ ******************************************************************************/
+
+void
+acpi_ns_dump_object_paths(acpi_object_type type,
+                         u8 display_type,
+                         u32 max_depth,
+                         acpi_owner_id owner_id, acpi_handle start_handle)
+{
+       acpi_status status;
+       u32 max_level = 0;
+
+       ACPI_FUNCTION_ENTRY();
+
+       /*
+        * Just lock the entire namespace for the duration of the dump.
+        * We don't want any changes to the namespace during this time,
+        * especially the temporary nodes since we are going to display
+        * them also.
+        */
+       status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+       if (ACPI_FAILURE(status)) {
+               acpi_os_printf("Could not acquire namespace mutex\n");
+               return;
+       }
+
+       /* Get the max depth of the namespace tree, for formatting later */
+
+       (void)acpi_ns_walk_namespace(type, start_handle, max_depth,
+                                    ACPI_NS_WALK_NO_UNLOCK |
+                                    ACPI_NS_WALK_TEMP_NODES,
+                                    acpi_ns_get_max_depth, NULL,
+                                    (void *)&max_level, NULL);
+
+       /* Now dump the entire namespace */
+
+       (void)acpi_ns_walk_namespace(type, start_handle, max_depth,
+                                    ACPI_NS_WALK_NO_UNLOCK |
+                                    ACPI_NS_WALK_TEMP_NODES,
+                                    acpi_ns_dump_one_object_path, NULL,
+                                    (void *)&max_level, NULL);
+
+       (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+}
+#endif                         /* ACPI_FUTURE_USAGE */
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ns_dump_entry
index b38b4b07f86e31d8920a7b1467ba4fb6604fc6eb..481a6b4a9b2ffbeb64418abd6b458697faa74eb5 100644 (file)
@@ -605,11 +605,19 @@ acpi_walk_namespace(acpi_object_type type,
                goto unlock_and_exit;
        }
 
+       /* Now we can validate the starting node */
+
+       if (!acpi_ns_validate_handle(start_object)) {
+               status = AE_BAD_PARAMETER;
+               goto unlock_and_exit2;
+       }
+
        status = acpi_ns_walk_namespace(type, start_object, max_depth,
                                        ACPI_NS_WALK_UNLOCK,
                                        descending_callback, ascending_callback,
                                        context, return_value);
 
+      unlock_and_exit2:
        (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
 
       unlock_and_exit:
index 42a13c0d7015360ab44c684ab9645500fc63f1d8..9e6788f9ba0ff5865bd2caeac1d05918c407fa11 100644 (file)
@@ -80,16 +80,10 @@ acpi_status acpi_tb_verify_table(struct acpi_table_desc *table_desc)
                }
        }
 
-       /* FACS is the odd table, has no standard ACPI header and no checksum */
+       /* Always calculate checksum, ignore bad checksum if requested */
 
-       if (!ACPI_COMPARE_NAME(&table_desc->signature, ACPI_SIG_FACS)) {
-
-               /* Always calculate checksum, ignore bad checksum if requested */
-
-               status =
-                   acpi_tb_verify_checksum(table_desc->pointer,
-                                           table_desc->length);
-       }
+       status =
+           acpi_tb_verify_checksum(table_desc->pointer, table_desc->length);
 
        return_ACPI_STATUS(status);
 }
index dc963f823d2c094bb204b7baf6734fb6d340c007..9a47715af1f37893e2985b370b7b63e9f57a01bf 100644 (file)
@@ -138,7 +138,7 @@ acpi_tb_print_table_header(acpi_physical_address address,
                ACPI_INFO((AE_INFO, "%4.4s %p %05X",
                           header->signature, ACPI_CAST_PTR(void, address),
                           header->length));
-       } else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) {
+       } else if (ACPI_VALIDATE_RSDP_SIG(header->signature)) {
 
                /* RSDP has no common fields */
 
@@ -190,6 +190,16 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
 {
        u8 checksum;
 
+       /*
+        * FACS/S3PT:
+        * They are the odd tables, have no standard ACPI header and no checksum
+        */
+
+       if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_S3PT) ||
+           ACPI_COMPARE_NAME(table->signature, ACPI_SIG_FACS)) {
+               return (AE_OK);
+       }
+
        /* Compute the checksum on the table */
 
        checksum = acpi_tb_checksum(ACPI_CAST_PTR(u8, table), length);
index 948c95e80d44765f41e82bb62c92bb4c3405bb01..1c95fabbe6a42171b868ea85e6825a3363dc31d5 100644 (file)
@@ -68,8 +68,7 @@ acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
         * Note: Sometimes there exists more than one RSDP in memory; the valid
         * RSDP has a valid checksum, all others have an invalid checksum.
         */
-       if (ACPI_STRNCMP((char *)rsdp->signature, ACPI_SIG_RSDP,
-                        sizeof(ACPI_SIG_RSDP) - 1) != 0) {
+       if (!ACPI_VALIDATE_RSDP_SIG(rsdp->signature)) {
 
                /* Nope, BAD Signature */
 
index 5796e11a0671d32fad4329262e05b51d038cd38e..ffd0db509d347d00ba6da16f6c8c03e047d323f2 100644 (file)
@@ -190,7 +190,7 @@ acpi_debug_print(u32 requested_debug_level,
         * Display the module name, current line number, thread ID (if requested),
         * current procedure nesting level, and the current procedure name
         */
-       acpi_os_printf("%8s-%04ld ", module_name, line_number);
+       acpi_os_printf("%9s-%04ld ", module_name, line_number);
 
        if (ACPI_LV_THREADS & acpi_dbg_level) {
                acpi_os_printf("[%u] ", (u32)thread_id);
index d6f26bf8a0626d0d7497baeaf1ce931d3a76c9ba..046d5b059c07f3ce56b6f35d2c2aac3ea9d59f0d 100644 (file)
@@ -291,7 +291,7 @@ acpi_status acpi_ut_init_globals(void)
 
 #if (!ACPI_REDUCED_HARDWARE)
 
-       /* GPE support */
+       /* GPE/SCI support */
 
        acpi_gbl_all_gpes_initialized = FALSE;
        acpi_gbl_gpe_xrupt_list_head = NULL;
@@ -300,6 +300,7 @@ acpi_status acpi_ut_init_globals(void)
        acpi_current_gpe_count = 0;
 
        acpi_gbl_global_event_handler = NULL;
+       acpi_gbl_sci_handler_list = NULL;
 
 #endif                         /* !ACPI_REDUCED_HARDWARE */
 
index 46f80e2c92f7da893ff28f5993d89fa01efc323a..6d2c49b86b7fa82434bdb0b2ce1906c95dce00ca 100644 (file)
@@ -758,9 +758,9 @@ int apei_osc_setup(void)
                .cap.pointer    = capbuf,
        };
 
-       capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
-       capbuf[OSC_SUPPORT_TYPE] = 1;
-       capbuf[OSC_CONTROL_TYPE] = 0;
+       capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
+       capbuf[OSC_SUPPORT_DWORD] = 1;
+       capbuf[OSC_CONTROL_DWORD] = 0;
 
        if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
            || ACPI_FAILURE(acpi_run_osc(handle, &context)))
index 2c9958cd7a4350ae675b76ad338904999ae149c1..fbf1aceda8b8ab915a7d2476d78db2b6e2b94e68 100644 (file)
 #include <linux/suspend.h>
 #include <asm/unaligned.h>
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <asm/uaccess.h>
-#endif
-
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
 #include <linux/power_supply.h>
@@ -72,19 +66,6 @@ static unsigned int cache_time = 1000;
 module_param(cache_time, uint, 0644);
 MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-extern struct proc_dir_entry *acpi_lock_battery_dir(void);
-extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
-
-enum acpi_battery_files {
-       info_tag = 0,
-       state_tag,
-       alarm_tag,
-       ACPI_BATTERY_NUMFILES,
-};
-
-#endif
-
 static const struct acpi_device_id battery_device_ids[] = {
        {"PNP0C0A", 0},
        {"", 0},
@@ -320,14 +301,6 @@ static enum power_supply_property energy_battery_props[] = {
        POWER_SUPPLY_PROP_SERIAL_NUMBER,
 };
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-inline char *acpi_battery_units(struct acpi_battery *battery)
-{
-       return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ?
-               "mA" : "mW";
-}
-#endif
-
 /* --------------------------------------------------------------------------
                                Battery Management
    -------------------------------------------------------------------------- */
@@ -740,279 +713,6 @@ static void acpi_battery_refresh(struct acpi_battery *battery)
        sysfs_add_battery(battery);
 }
 
-/* --------------------------------------------------------------------------
-                              FS Interface (/proc)
-   -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_ACPI_PROCFS_POWER
-static struct proc_dir_entry *acpi_battery_dir;
-
-static int acpi_battery_print_info(struct seq_file *seq, int result)
-{
-       struct acpi_battery *battery = seq->private;
-
-       if (result)
-               goto end;
-
-       seq_printf(seq, "present:                 %s\n",
-                  acpi_battery_present(battery) ? "yes" : "no");
-       if (!acpi_battery_present(battery))
-               goto end;
-       if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
-               seq_printf(seq, "design capacity:         unknown\n");
-       else
-               seq_printf(seq, "design capacity:         %d %sh\n",
-                          battery->design_capacity,
-                          acpi_battery_units(battery));
-
-       if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN)
-               seq_printf(seq, "last full capacity:      unknown\n");
-       else
-               seq_printf(seq, "last full capacity:      %d %sh\n",
-                          battery->full_charge_capacity,
-                          acpi_battery_units(battery));
-
-       seq_printf(seq, "battery technology:      %srechargeable\n",
-                  (!battery->technology)?"non-":"");
-
-       if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN)
-               seq_printf(seq, "design voltage:          unknown\n");
-       else
-               seq_printf(seq, "design voltage:          %d mV\n",
-                          battery->design_voltage);
-       seq_printf(seq, "design capacity warning: %d %sh\n",
-                  battery->design_capacity_warning,
-                  acpi_battery_units(battery));
-       seq_printf(seq, "design capacity low:     %d %sh\n",
-                  battery->design_capacity_low,
-                  acpi_battery_units(battery));
-       seq_printf(seq, "cycle count:             %i\n", battery->cycle_count);
-       seq_printf(seq, "capacity granularity 1:  %d %sh\n",
-                  battery->capacity_granularity_1,
-                  acpi_battery_units(battery));
-       seq_printf(seq, "capacity granularity 2:  %d %sh\n",
-                  battery->capacity_granularity_2,
-                  acpi_battery_units(battery));
-       seq_printf(seq, "model number:            %s\n", battery->model_number);
-       seq_printf(seq, "serial number:           %s\n", battery->serial_number);
-       seq_printf(seq, "battery type:            %s\n", battery->type);
-       seq_printf(seq, "OEM info:                %s\n", battery->oem_info);
-      end:
-       if (result)
-               seq_printf(seq, "ERROR: Unable to read battery info\n");
-       return result;
-}
-
-static int acpi_battery_print_state(struct seq_file *seq, int result)
-{
-       struct acpi_battery *battery = seq->private;
-
-       if (result)
-               goto end;
-
-       seq_printf(seq, "present:                 %s\n",
-                  acpi_battery_present(battery) ? "yes" : "no");
-       if (!acpi_battery_present(battery))
-               goto end;
-
-       seq_printf(seq, "capacity state:          %s\n",
-                       (battery->state & 0x04) ? "critical" : "ok");
-       if ((battery->state & 0x01) && (battery->state & 0x02))
-               seq_printf(seq,
-                          "charging state:          charging/discharging\n");
-       else if (battery->state & 0x01)
-               seq_printf(seq, "charging state:          discharging\n");
-       else if (battery->state & 0x02)
-               seq_printf(seq, "charging state:          charging\n");
-       else
-               seq_printf(seq, "charging state:          charged\n");
-
-       if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
-               seq_printf(seq, "present rate:            unknown\n");
-       else
-               seq_printf(seq, "present rate:            %d %s\n",
-                          battery->rate_now, acpi_battery_units(battery));
-
-       if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN)
-               seq_printf(seq, "remaining capacity:      unknown\n");
-       else
-               seq_printf(seq, "remaining capacity:      %d %sh\n",
-                          battery->capacity_now, acpi_battery_units(battery));
-       if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN)
-               seq_printf(seq, "present voltage:         unknown\n");
-       else
-               seq_printf(seq, "present voltage:         %d mV\n",
-                          battery->voltage_now);
-      end:
-       if (result)
-               seq_printf(seq, "ERROR: Unable to read battery state\n");
-
-       return result;
-}
-
-static int acpi_battery_print_alarm(struct seq_file *seq, int result)
-{
-       struct acpi_battery *battery = seq->private;
-
-       if (result)
-               goto end;
-
-       if (!acpi_battery_present(battery)) {
-               seq_printf(seq, "present:                 no\n");
-               goto end;
-       }
-       seq_printf(seq, "alarm:                   ");
-       if (!battery->alarm)
-               seq_printf(seq, "unsupported\n");
-       else
-               seq_printf(seq, "%u %sh\n", battery->alarm,
-                               acpi_battery_units(battery));
-      end:
-       if (result)
-               seq_printf(seq, "ERROR: Unable to read battery alarm\n");
-       return result;
-}
-
-static ssize_t acpi_battery_write_alarm(struct file *file,
-                                       const char __user * buffer,
-                                       size_t count, loff_t * ppos)
-{
-       int result = 0;
-       char alarm_string[12] = { '\0' };
-       struct seq_file *m = file->private_data;
-       struct acpi_battery *battery = m->private;
-
-       if (!battery || (count > sizeof(alarm_string) - 1))
-               return -EINVAL;
-       if (!acpi_battery_present(battery)) {
-               result = -ENODEV;
-               goto end;
-       }
-       if (copy_from_user(alarm_string, buffer, count)) {
-               result = -EFAULT;
-               goto end;
-       }
-       alarm_string[count] = '\0';
-       battery->alarm = simple_strtol(alarm_string, NULL, 0);
-       result = acpi_battery_set_alarm(battery);
-      end:
-       if (!result)
-               return count;
-       return result;
-}
-
-typedef int(*print_func)(struct seq_file *seq, int result);
-
-static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = {
-       acpi_battery_print_info,
-       acpi_battery_print_state,
-       acpi_battery_print_alarm,
-};
-
-static int acpi_battery_read(int fid, struct seq_file *seq)
-{
-       struct acpi_battery *battery = seq->private;
-       int result = acpi_battery_update(battery);
-       return acpi_print_funcs[fid](seq, result);
-}
-
-#define DECLARE_FILE_FUNCTIONS(_name) \
-static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \
-{ \
-       return acpi_battery_read(_name##_tag, seq); \
-} \
-static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \
-{ \
-       return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \
-}
-
-DECLARE_FILE_FUNCTIONS(info);
-DECLARE_FILE_FUNCTIONS(state);
-DECLARE_FILE_FUNCTIONS(alarm);
-
-#undef DECLARE_FILE_FUNCTIONS
-
-#define FILE_DESCRIPTION_RO(_name) \
-       { \
-       .name = __stringify(_name), \
-       .mode = S_IRUGO, \
-       .ops = { \
-               .open = acpi_battery_##_name##_open_fs, \
-               .read = seq_read, \
-               .llseek = seq_lseek, \
-               .release = single_release, \
-               .owner = THIS_MODULE, \
-               }, \
-       }
-
-#define FILE_DESCRIPTION_RW(_name) \
-       { \
-       .name = __stringify(_name), \
-       .mode = S_IFREG | S_IRUGO | S_IWUSR, \
-       .ops = { \
-               .open = acpi_battery_##_name##_open_fs, \
-               .read = seq_read, \
-               .llseek = seq_lseek, \
-               .write = acpi_battery_write_##_name, \
-               .release = single_release, \
-               .owner = THIS_MODULE, \
-               }, \
-       }
-
-static const struct battery_file {
-       struct file_operations ops;
-       umode_t mode;
-       const char *name;
-} acpi_battery_file[] = {
-       FILE_DESCRIPTION_RO(info),
-       FILE_DESCRIPTION_RO(state),
-       FILE_DESCRIPTION_RW(alarm),
-};
-
-#undef FILE_DESCRIPTION_RO
-#undef FILE_DESCRIPTION_RW
-
-static int acpi_battery_add_fs(struct acpi_device *device)
-{
-       struct proc_dir_entry *entry = NULL;
-       int i;
-
-       printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded,"
-                       " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
-       if (!acpi_device_dir(device)) {
-               acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
-                                                    acpi_battery_dir);
-               if (!acpi_device_dir(device))
-                       return -ENODEV;
-       }
-
-       for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
-               entry = proc_create_data(acpi_battery_file[i].name,
-                                        acpi_battery_file[i].mode,
-                                        acpi_device_dir(device),
-                                        &acpi_battery_file[i].ops,
-                                        acpi_driver_data(device));
-               if (!entry)
-                       return -ENODEV;
-       }
-       return 0;
-}
-
-static void acpi_battery_remove_fs(struct acpi_device *device)
-{
-       int i;
-       if (!acpi_device_dir(device))
-               return;
-       for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i)
-               remove_proc_entry(acpi_battery_file[i].name,
-                                 acpi_device_dir(device));
-
-       remove_proc_entry(acpi_device_bid(device), acpi_battery_dir);
-       acpi_device_dir(device) = NULL;
-}
-
-#endif
-
 /* --------------------------------------------------------------------------
                                  Driver Interface
    -------------------------------------------------------------------------- */
@@ -1075,15 +775,6 @@ static int acpi_battery_add(struct acpi_device *device)
        result = acpi_battery_update(battery);
        if (result)
                goto fail;
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       result = acpi_battery_add_fs(device);
-#endif
-       if (result) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
-               acpi_battery_remove_fs(device);
-#endif
-               goto fail;
-       }
 
        printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n",
                ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device),
@@ -1110,9 +801,6 @@ static int acpi_battery_remove(struct acpi_device *device)
                return -EINVAL;
        battery = acpi_driver_data(device);
        unregister_pm_notifier(&battery->pm_nb);
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       acpi_battery_remove_fs(device);
-#endif
        sysfs_remove_battery(battery);
        mutex_destroy(&battery->lock);
        mutex_destroy(&battery->sysfs_lock);
@@ -1158,18 +846,7 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
 {
        if (acpi_disabled)
                return;
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       acpi_battery_dir = acpi_lock_battery_dir();
-       if (!acpi_battery_dir)
-               return;
-#endif
-       if (acpi_bus_register_driver(&acpi_battery_driver) < 0) {
-#ifdef CONFIG_ACPI_PROCFS_POWER
-               acpi_unlock_battery_dir(acpi_battery_dir);
-#endif
-               return;
-       }
-       return;
+       acpi_bus_register_driver(&acpi_battery_driver);
 }
 
 static int __init acpi_battery_init(void)
@@ -1181,9 +858,6 @@ static int __init acpi_battery_init(void)
 static void __exit acpi_battery_exit(void)
 {
        acpi_bus_unregister_driver(&acpi_battery_driver);
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       acpi_unlock_battery_dir(acpi_battery_dir);
-#endif
 }
 
 module_init(acpi_battery_init);
index 9515f18898b2b578053c58309185daa7934cfdda..aa4d874a96fdda83dd548ba30563133e2019f04b 100644 (file)
@@ -273,6 +273,11 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                     DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
                },
        },
+
+       /*
+        * The following machines have broken backlight support when reporting
+        * the Windows 2012 OSI, so disable it until their support is fixed.
+        */
        {
        .callback = dmi_disable_osi_win8,
        .ident = "ASUS Zenbook Prime UX31A",
@@ -297,6 +302,54 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                     DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
                },
        },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "ThinkPad Edge E530",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "3259CTO"),
+               },
+       },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "ThinkPad Edge E530",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "3259HJG"),
+               },
+       },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "Acer Aspire V5-573G",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "V5-573G/Dazzle_HW"),
+               },
+       },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "Acer Aspire V5-572G",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "Acer Aspire"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "V5-572G/Dazzle_CX"),
+               },
+       },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "ThinkPad T431s",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "20AACTO1WW"),
+               },
+       },
+       {
+       .callback = dmi_disable_osi_win8,
+       .ident = "ThinkPad T430",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
+               },
+       },
 
        /*
         * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
index b587ec8257b2190758eca2b8b3306a933d4ae16d..fbcfaa682c157dc6a6edcc5ab61b59c55a0cc269 100644 (file)
@@ -255,7 +255,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context)
                        acpi_print_osc_error(handle, context,
                                "_OSC invalid revision");
                if (errors & OSC_CAPABILITIES_MASK_ERROR) {
-                       if (((u32 *)context->cap.pointer)[OSC_QUERY_TYPE]
+                       if (((u32 *)context->cap.pointer)[OSC_QUERY_DWORD]
                            & OSC_QUERY_ENABLE)
                                goto out_success;
                        status = AE_SUPPORT;
@@ -295,30 +295,30 @@ static void acpi_bus_osc_support(void)
        };
        acpi_handle handle;
 
-       capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
-       capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
+       capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
+       capbuf[OSC_SUPPORT_DWORD] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
 #if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\
                        defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
-       capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT;
+       capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT;
 #endif
 
 #if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
-       capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT;
+       capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
 #endif
 
 #ifdef ACPI_HOTPLUG_OST
-       capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_HOTPLUG_OST_SUPPORT;
+       capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
 #endif
 
        if (!ghes_disable)
-               capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_APEI_SUPPORT;
+               capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_APEI_SUPPORT;
        if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle)))
                return;
        if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) {
                u32 *capbuf_ret = context.ret.pointer;
-               if (context.ret.length > OSC_SUPPORT_TYPE)
+               if (context.ret.length > OSC_SUPPORT_DWORD)
                        osc_sb_apei_support_acked =
-                               capbuf_ret[OSC_SUPPORT_TYPE] & OSC_SB_APEI_SUPPORT;
+                               capbuf_ret[OSC_SUPPORT_DWORD] & OSC_SB_APEI_SUPPORT;
                kfree(context.ret.pointer);
        }
        /* do we need to check other returned cap? Sounds no */
index a55773801c5f1fb739df9597dbbde86d272b092c..c971929d75c20090b69afda2ab65b073bc95b140 100644 (file)
@@ -383,18 +383,15 @@ static int acpi_button_add(struct acpi_device *device)
 
        switch (button->type) {
        case ACPI_BUTTON_TYPE_POWER:
-               input->evbit[0] = BIT_MASK(EV_KEY);
-               set_bit(KEY_POWER, input->keybit);
+               input_set_capability(input, EV_KEY, KEY_POWER);
                break;
 
        case ACPI_BUTTON_TYPE_SLEEP:
-               input->evbit[0] = BIT_MASK(EV_KEY);
-               set_bit(KEY_SLEEP, input->keybit);
+               input_set_capability(input, EV_KEY, KEY_SLEEP);
                break;
 
        case ACPI_BUTTON_TYPE_LID:
-               input->evbit[0] = BIT_MASK(EV_SW);
-               set_bit(SW_LID, input->swbit);
+               input_set_capability(input, EV_SW, SW_LID);
                break;
        }
 
diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c
deleted file mode 100644 (file)
index 6c9ee68..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or (at
- *  your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/acpi.h>
-#include <linux/types.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
-
-#define PREFIX "ACPI: "
-
-ACPI_MODULE_NAME("cm_sbs");
-#define ACPI_AC_CLASS          "ac_adapter"
-#define ACPI_BATTERY_CLASS     "battery"
-#define _COMPONENT             ACPI_SBS_COMPONENT
-static struct proc_dir_entry *acpi_ac_dir;
-static struct proc_dir_entry *acpi_battery_dir;
-
-static DEFINE_MUTEX(cm_sbs_mutex);
-
-static int lock_ac_dir_cnt;
-static int lock_battery_dir_cnt;
-
-struct proc_dir_entry *acpi_lock_ac_dir(void)
-{
-       mutex_lock(&cm_sbs_mutex);
-       if (!acpi_ac_dir)
-               acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir);
-       if (acpi_ac_dir) {
-               lock_ac_dir_cnt++;
-       } else {
-               printk(KERN_ERR PREFIX
-                                 "Cannot create %s\n", ACPI_AC_CLASS);
-       }
-       mutex_unlock(&cm_sbs_mutex);
-       return acpi_ac_dir;
-}
-EXPORT_SYMBOL(acpi_lock_ac_dir);
-
-void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param)
-{
-       mutex_lock(&cm_sbs_mutex);
-       if (acpi_ac_dir_param)
-               lock_ac_dir_cnt--;
-       if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) {
-               remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir);
-               acpi_ac_dir = NULL;
-       }
-       mutex_unlock(&cm_sbs_mutex);
-}
-EXPORT_SYMBOL(acpi_unlock_ac_dir);
-
-struct proc_dir_entry *acpi_lock_battery_dir(void)
-{
-       mutex_lock(&cm_sbs_mutex);
-       if (!acpi_battery_dir) {
-               acpi_battery_dir =
-                   proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir);
-       }
-       if (acpi_battery_dir) {
-               lock_battery_dir_cnt++;
-       } else {
-               printk(KERN_ERR PREFIX
-                                 "Cannot create %s\n", ACPI_BATTERY_CLASS);
-       }
-       mutex_unlock(&cm_sbs_mutex);
-       return acpi_battery_dir;
-}
-EXPORT_SYMBOL(acpi_lock_battery_dir);
-
-void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param)
-{
-       mutex_lock(&cm_sbs_mutex);
-       if (acpi_battery_dir_param)
-               lock_battery_dir_cnt--;
-       if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param
-           && acpi_battery_dir) {
-               remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir);
-               acpi_battery_dir = NULL;
-       }
-       mutex_unlock(&cm_sbs_mutex);
-       return;
-}
-EXPORT_SYMBOL(acpi_unlock_battery_dir);
index 59d3202f6b36fc197afe259b343764cb2a9621aa..d42b2fb5a7e94131ce2633ef95f4d7631729325f 100644 (file)
@@ -118,9 +118,10 @@ int acpi_device_get_power(struct acpi_device *device, int *state)
        /*
         * If we were unsure about the device parent's power state up to this
         * point, the fact that the device is in D0 implies that the parent has
-        * to be in D0 too.
+        * to be in D0 too, except if ignore_parent is set.
         */
-       if (device->parent && device->parent->power.state == ACPI_STATE_UNKNOWN
+       if (!device->power.flags.ignore_parent && device->parent
+           && device->parent->power.state == ACPI_STATE_UNKNOWN
            && result == ACPI_STATE_D0)
                device->parent->power.state = ACPI_STATE_D0;
 
@@ -177,7 +178,8 @@ int acpi_device_set_power(struct acpi_device *device, int state)
                         acpi_power_state_string(state));
                return -ENODEV;
        }
-       if (device->parent && (state < device->parent->power.state)) {
+       if (!device->power.flags.ignore_parent &&
+           device->parent && (state < device->parent->power.state)) {
                dev_warn(&device->dev,
                         "Cannot transition to power state %s for parent in %s\n",
                         acpi_power_state_string(state),
@@ -1025,60 +1027,4 @@ void acpi_dev_pm_detach(struct device *dev, bool power_off)
        }
 }
 EXPORT_SYMBOL_GPL(acpi_dev_pm_detach);
-
-/**
- * acpi_dev_pm_add_dependent - Add physical device depending for PM.
- * @handle: Handle of ACPI device node.
- * @depdev: Device depending on that node for PM.
- */
-void acpi_dev_pm_add_dependent(acpi_handle handle, struct device *depdev)
-{
-       struct acpi_device_physical_node *dep;
-       struct acpi_device *adev;
-
-       if (!depdev || acpi_bus_get_device(handle, &adev))
-               return;
-
-       mutex_lock(&adev->physical_node_lock);
-
-       list_for_each_entry(dep, &adev->power_dependent, node)
-               if (dep->dev == depdev)
-                       goto out;
-
-       dep = kzalloc(sizeof(*dep), GFP_KERNEL);
-       if (dep) {
-               dep->dev = depdev;
-               list_add_tail(&dep->node, &adev->power_dependent);
-       }
-
- out:
-       mutex_unlock(&adev->physical_node_lock);
-}
-EXPORT_SYMBOL_GPL(acpi_dev_pm_add_dependent);
-
-/**
- * acpi_dev_pm_remove_dependent - Remove physical device depending for PM.
- * @handle: Handle of ACPI device node.
- * @depdev: Device depending on that node for PM.
- */
-void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev)
-{
-       struct acpi_device_physical_node *dep;
-       struct acpi_device *adev;
-
-       if (!depdev || acpi_bus_get_device(handle, &adev))
-               return;
-
-       mutex_lock(&adev->physical_node_lock);
-
-       list_for_each_entry(dep, &adev->power_dependent, node)
-               if (dep->dev == depdev) {
-                       list_del(&dep->node);
-                       kfree(dep);
-                       break;
-               }
-
-       mutex_unlock(&adev->physical_node_lock);
-}
-EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent);
 #endif /* CONFIG_PM */
index 05ea4be01a832ccb3d8b38e4318c66fee0f83a89..ca86c1ce7c8a1b5d06713e6a5264e25bd05e99e5 100644 (file)
@@ -441,7 +441,7 @@ static void handle_dock(struct dock_station *ds, int dock)
        acpi_status status;
        struct acpi_object_list arg_list;
        union acpi_object arg;
-       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       unsigned long long value;
 
        acpi_handle_info(ds->handle, "%s\n", dock ? "docking" : "undocking");
 
@@ -450,12 +450,10 @@ static void handle_dock(struct dock_station *ds, int dock)
        arg_list.pointer = &arg;
        arg.type = ACPI_TYPE_INTEGER;
        arg.integer.value = dock;
-       status = acpi_evaluate_object(ds->handle, "_DCK", &arg_list, &buffer);
+       status = acpi_evaluate_integer(ds->handle, "_DCK", &arg_list, &value);
        if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
                acpi_handle_err(ds->handle, "Failed to execute _DCK (0x%x)\n",
                                status);
-
-       kfree(buffer.pointer);
 }
 
 static inline void dock(struct dock_station *ds)
index a06d9837470585aaf0298b3f3f96f7e94bd64724..d5309fd494589b4d1596cb1c03576ed1e7ef252e 100644 (file)
@@ -28,6 +28,7 @@
 
 /* Uncomment next line to get verbose printout */
 /* #define DEBUG */
+#define pr_fmt(fmt) "ACPI : EC: " fmt
 
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -49,9 +50,6 @@
 #define ACPI_EC_DEVICE_NAME            "Embedded Controller"
 #define ACPI_EC_FILE_INFO              "info"
 
-#undef PREFIX
-#define PREFIX                         "ACPI: EC: "
-
 /* EC status register */
 #define ACPI_EC_FLAG_OBF       0x01    /* Output buffer full */
 #define ACPI_EC_FLAG_IBF       0x02    /* Input buffer full */
@@ -131,26 +129,26 @@ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
 static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
 {
        u8 x = inb(ec->command_addr);
-       pr_debug(PREFIX "---> status = 0x%2.2x\n", x);
+       pr_debug("---> status = 0x%2.2x\n", x);
        return x;
 }
 
 static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
 {
        u8 x = inb(ec->data_addr);
-       pr_debug(PREFIX "---> data = 0x%2.2x\n", x);
+       pr_debug("---> data = 0x%2.2x\n", x);
        return x;
 }
 
 static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
 {
-       pr_debug(PREFIX "<--- command = 0x%2.2x\n", command);
+       pr_debug("<--- command = 0x%2.2x\n", command);
        outb(command, ec->command_addr);
 }
 
 static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
 {
-       pr_debug(PREFIX "<--- data = 0x%2.2x\n", data);
+       pr_debug("<--- data = 0x%2.2x\n", data);
        outb(data, ec->data_addr);
 }
 
@@ -241,7 +239,7 @@ static int ec_poll(struct acpi_ec *ec)
                        }
                        advance_transaction(ec, acpi_ec_read_status(ec));
                } while (time_before(jiffies, delay));
-               pr_debug(PREFIX "controller reset, restart transaction\n");
+               pr_debug("controller reset, restart transaction\n");
                spin_lock_irqsave(&ec->lock, flags);
                start_transaction(ec);
                spin_unlock_irqrestore(&ec->lock, flags);
@@ -309,12 +307,12 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
                }
        }
        if (ec_wait_ibf0(ec)) {
-               pr_err(PREFIX "input buffer is not empty, "
+               pr_err("input buffer is not empty, "
                                "aborting transaction\n");
                status = -ETIME;
                goto end;
        }
-       pr_debug(PREFIX "transaction start (cmd=0x%02x, addr=0x%02x)\n",
+       pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n",
                        t->command, t->wdata ? t->wdata[0] : 0);
        /* disable GPE during transaction if storm is detected */
        if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
@@ -331,12 +329,12 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
                /* It is safe to enable the GPE outside of the transaction. */
                acpi_enable_gpe(NULL, ec->gpe);
        } else if (t->irq_count > ec_storm_threshold) {
-               pr_info(PREFIX "GPE storm detected(%d GPEs), "
+               pr_info("GPE storm detected(%d GPEs), "
                        "transactions will use polling mode\n",
                        t->irq_count);
                set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
        }
-       pr_debug(PREFIX "transaction end\n");
+       pr_debug("transaction end\n");
 end:
        if (ec->global_lock)
                acpi_release_global_lock(glk);
@@ -570,12 +568,12 @@ static void acpi_ec_run(void *cxt)
        struct acpi_ec_query_handler *handler = cxt;
        if (!handler)
                return;
-       pr_debug(PREFIX "start query execution\n");
+       pr_debug("start query execution\n");
        if (handler->func)
                handler->func(handler->data);
        else if (handler->handle)
                acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
-       pr_debug(PREFIX "stop query execution\n");
+       pr_debug("stop query execution\n");
        kfree(handler);
 }
 
@@ -593,7 +591,8 @@ static int acpi_ec_sync_query(struct acpi_ec *ec)
                        if (!copy)
                                return -ENOMEM;
                        memcpy(copy, handler, sizeof(*copy));
-                       pr_debug(PREFIX "push query execution (0x%2x) on queue\n", value);
+                       pr_debug("push query execution (0x%2x) on queue\n",
+                               value);
                        return acpi_os_execute((copy->func) ?
                                OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
                                acpi_ec_run, copy);
@@ -616,7 +615,7 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state)
 {
        if (state & ACPI_EC_FLAG_SCI) {
                if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
-                       pr_debug(PREFIX "push gpe query to the queue\n");
+                       pr_debug("push gpe query to the queue\n");
                        return acpi_os_execute(OSL_NOTIFY_HANDLER,
                                acpi_ec_gpe_query, ec);
                }
@@ -630,7 +629,7 @@ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
        struct acpi_ec *ec = data;
        u8 status = acpi_ec_read_status(ec);
 
-       pr_debug(PREFIX "~~~> interrupt, status:0x%02x\n", status);
+       pr_debug("~~~> interrupt, status:0x%02x\n", status);
 
        advance_transaction(ec, status);
        if (ec_transaction_done(ec) &&
@@ -776,7 +775,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
                         * The AE_NOT_FOUND error will be ignored and OS
                         * continue to initialize EC.
                         */
-                       printk(KERN_ERR "Fail in evaluating the _REG object"
+                       pr_err("Fail in evaluating the _REG object"
                                " of EC device. Broken bios is suspected.\n");
                } else {
                        acpi_remove_gpe_handler(NULL, ec->gpe,
@@ -795,10 +794,10 @@ static void ec_remove_handlers(struct acpi_ec *ec)
        acpi_disable_gpe(NULL, ec->gpe);
        if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
                                ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
-               pr_err(PREFIX "failed to remove space handler\n");
+               pr_err("failed to remove space handler\n");
        if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
                                &acpi_ec_gpe_handler)))
-               pr_err(PREFIX "failed to remove gpe handler\n");
+               pr_err("failed to remove gpe handler\n");
        clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
 }
 
@@ -840,7 +839,7 @@ static int acpi_ec_add(struct acpi_device *device)
        ret = !!request_region(ec->command_addr, 1, "EC cmd");
        WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
 
-       pr_info(PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
+       pr_info("GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
                          ec->gpe, ec->command_addr, ec->data_addr);
 
        ret = ec_install_handlers(ec);
@@ -931,7 +930,7 @@ static int ec_validate_ecdt(const struct dmi_system_id *id)
 /* MSI EC needs special treatment, enable it */
 static int ec_flag_msi(const struct dmi_system_id *id)
 {
-       printk(KERN_DEBUG PREFIX "Detected MSI hardware, enabling workarounds.\n");
+       pr_debug("Detected MSI hardware, enabling workarounds.\n");
        EC_FLAGS_MSI = 1;
        EC_FLAGS_VALIDATE_ECDT = 1;
        return 0;
@@ -1010,7 +1009,7 @@ int __init acpi_ec_ecdt_probe(void)
        status = acpi_get_table(ACPI_SIG_ECDT, 1,
                                (struct acpi_table_header **)&ecdt_ptr);
        if (ACPI_SUCCESS(status)) {
-               pr_info(PREFIX "EC description table is found, configuring boot EC\n");
+               pr_info("EC description table is found, configuring boot EC\n");
                boot_ec->command_addr = ecdt_ptr->control.address;
                boot_ec->data_addr = ecdt_ptr->data.address;
                boot_ec->gpe = ecdt_ptr->gpe;
@@ -1030,7 +1029,7 @@ int __init acpi_ec_ecdt_probe(void)
 
        /* This workaround is needed only on some broken machines,
         * which require early EC, but fail to provide ECDT */
-       printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
+       pr_debug("Look up EC in DSDT\n");
        status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
                                        boot_ec, NULL);
        /* Check that acpi_get_devices actually find something */
@@ -1042,7 +1041,7 @@ int __init acpi_ec_ecdt_probe(void)
                    saved_ec->data_addr != boot_ec->data_addr ||
                    saved_ec->gpe != boot_ec->gpe ||
                    saved_ec->handle != boot_ec->handle)
-                       pr_info(PREFIX "ASUSTek keeps feeding us with broken "
+                       pr_info("ASUSTek keeps feeding us with broken "
                        "ECDT tables, which are very hard to workaround. "
                        "Trying to use DSDT EC info instead. Please send "
                        "output of acpidump to linux-acpi@vger.kernel.org\n");
index 41ade6570bc07c22c3c40445d696aea26e4e4c41..ba3da88cee45a20479df62023ee560aa422dff22 100644 (file)
@@ -168,7 +168,7 @@ static int acpi_fan_add(struct acpi_device *device)
               acpi_device_name(device), acpi_device_bid(device),
               !device->power.state ? "on" : "off");
 
-      end:
+end:
        return result;
 }
 
index 20f423337e1fb73c66379a3771954741860c6d7d..e9304dc7ebfa0dc40c37dcc3be270cc299b94170 100644 (file)
@@ -169,9 +169,7 @@ int acpi_create_platform_device(struct acpi_device *adev,
                                        Video
   -------------------------------------------------------------------------- */
 #if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
-bool acpi_video_backlight_quirks(void);
-#else
-static inline bool acpi_video_backlight_quirks(void) { return false; }
+bool acpi_osi_is_win8(void);
 #endif
 
 #endif /* _ACPI_INTERNAL_H_ */
index 2e82e5d7693016676ad6afdf69ce3b553e968d44..a2343a1d9e0b1a0e77f3b7a7caf6676499c5f989 100644 (file)
@@ -73,7 +73,7 @@ int acpi_map_pxm_to_node(int pxm)
 {
        int node = pxm_to_node_map[pxm];
 
-       if (node < 0) {
+       if (node == NUMA_NO_NODE) {
                if (nodes_weight(nodes_found_map) >= MAX_NUMNODES)
                        return NUMA_NO_NODE;
                node = first_unset_node(nodes_found_map);
@@ -334,7 +334,7 @@ int acpi_get_pxm(acpi_handle h)
 
 int acpi_get_node(acpi_handle *handle)
 {
-       int pxm, node = -1;
+       int pxm, node = NUMA_NO_NODE;
 
        pxm = acpi_get_pxm(handle);
        if (pxm >= 0 && pxm < MAX_PXM_DOMAINS)
index e5f416c7f66e9e92e1ed988c3a709bc2820d56f4..a0c09adf7e7d2043587dad0384ba1374353989c7 100644 (file)
@@ -569,8 +569,10 @@ static const char * const table_sigs[] = {
 
 #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
 
-/* Must not increase 10 or needs code modification below */
-#define ACPI_OVERRIDE_TABLES 10
+#define ACPI_OVERRIDE_TABLES 64
+static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES];
+
+#define MAP_CHUNK_SIZE   (NR_FIX_BTMAPS << PAGE_SHIFT)
 
 void __init acpi_initrd_override(void *data, size_t size)
 {
@@ -579,8 +581,6 @@ void __init acpi_initrd_override(void *data, size_t size)
        struct acpi_table_header *table;
        char cpio_path[32] = "kernel/firmware/acpi/";
        struct cpio_data file;
-       struct cpio_data early_initrd_files[ACPI_OVERRIDE_TABLES];
-       char *p;
 
        if (data == NULL || size == 0)
                return;
@@ -625,8 +625,8 @@ void __init acpi_initrd_override(void *data, size_t size)
                        table->signature, cpio_path, file.name, table->length);
 
                all_tables_size += table->length;
-               early_initrd_files[table_nr].data = file.data;
-               early_initrd_files[table_nr].size = file.size;
+               acpi_initrd_files[table_nr].data = file.data;
+               acpi_initrd_files[table_nr].size = file.size;
                table_nr++;
        }
        if (table_nr == 0)
@@ -652,14 +652,34 @@ void __init acpi_initrd_override(void *data, size_t size)
        memblock_reserve(acpi_tables_addr, all_tables_size);
        arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
 
-       p = early_ioremap(acpi_tables_addr, all_tables_size);
-
+       /*
+        * early_ioremap only can remap 256k one time. If we map all
+        * tables one time, we will hit the limit. Need to map chunks
+        * one by one during copying the same as that in relocate_initrd().
+        */
        for (no = 0; no < table_nr; no++) {
-               memcpy(p + total_offset, early_initrd_files[no].data,
-                      early_initrd_files[no].size);
-               total_offset += early_initrd_files[no].size;
+               unsigned char *src_p = acpi_initrd_files[no].data;
+               phys_addr_t size = acpi_initrd_files[no].size;
+               phys_addr_t dest_addr = acpi_tables_addr + total_offset;
+               phys_addr_t slop, clen;
+               char *dest_p;
+
+               total_offset += size;
+
+               while (size) {
+                       slop = dest_addr & ~PAGE_MASK;
+                       clen = size;
+                       if (clen > MAP_CHUNK_SIZE - slop)
+                               clen = MAP_CHUNK_SIZE - slop;
+                       dest_p = early_ioremap(dest_addr & PAGE_MASK,
+                                                clen + slop);
+                       memcpy(dest_p + slop, src_p, clen);
+                       early_iounmap(dest_p, clen + slop);
+                       src_p += clen;
+                       dest_addr += clen;
+                       size -= clen;
+               }
        }
-       early_iounmap(p, all_tables_size);
 }
 #endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
 
@@ -820,7 +840,7 @@ acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
 
 void acpi_os_sleep(u64 ms)
 {
-       schedule_timeout_interruptible(msecs_to_jiffies(ms));
+       msleep(ms);
 }
 
 void acpi_os_stall(u32 us)
@@ -1335,7 +1355,7 @@ static int __init acpi_os_name_setup(char *str)
        if (!str || !*str)
                return 0;
 
-       for (; count-- && str && *str; str++) {
+       for (; count-- && *str; str++) {
                if (isalnum(*str) || *str == ' ' || *str == ':')
                        *p++ = *str;
                else if (*str == '\'' || *str == '"')
index d3874f4256534d06aeb0fdb9f364e5f5851963f3..924ad92852c1a7fa69ea26f0ac8c2aaeb9d254fd 100644 (file)
@@ -49,10 +49,10 @@ static int acpi_pci_root_add(struct acpi_device *device,
                             const struct acpi_device_id *not_used);
 static void acpi_pci_root_remove(struct acpi_device *device);
 
-#define ACPI_PCIE_REQ_SUPPORT (OSC_EXT_PCI_CONFIG_SUPPORT \
-                               | OSC_ACTIVE_STATE_PWR_SUPPORT \
-                               | OSC_CLOCK_PWR_CAPABILITY_SUPPORT \
-                               | OSC_MSI_SUPPORT)
+#define ACPI_PCIE_REQ_SUPPORT (OSC_PCI_EXT_CONFIG_SUPPORT \
+                               | OSC_PCI_ASPM_SUPPORT \
+                               | OSC_PCI_CLOCK_PM_SUPPORT \
+                               | OSC_PCI_MSI_SUPPORT)
 
 static const struct acpi_device_id root_device_ids[] = {
        {"PNP0A03", 0},
@@ -127,6 +127,55 @@ static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
        return AE_OK;
 }
 
+struct pci_osc_bit_struct {
+       u32 bit;
+       char *desc;
+};
+
+static struct pci_osc_bit_struct pci_osc_support_bit[] = {
+       { OSC_PCI_EXT_CONFIG_SUPPORT, "ExtendedConfig" },
+       { OSC_PCI_ASPM_SUPPORT, "ASPM" },
+       { OSC_PCI_CLOCK_PM_SUPPORT, "ClockPM" },
+       { OSC_PCI_SEGMENT_GROUPS_SUPPORT, "Segments" },
+       { OSC_PCI_MSI_SUPPORT, "MSI" },
+};
+
+static struct pci_osc_bit_struct pci_osc_control_bit[] = {
+       { OSC_PCI_EXPRESS_NATIVE_HP_CONTROL, "PCIeHotplug" },
+       { OSC_PCI_SHPC_NATIVE_HP_CONTROL, "SHPCHotplug" },
+       { OSC_PCI_EXPRESS_PME_CONTROL, "PME" },
+       { OSC_PCI_EXPRESS_AER_CONTROL, "AER" },
+       { OSC_PCI_EXPRESS_CAPABILITY_CONTROL, "PCIeCapability" },
+};
+
+static void decode_osc_bits(struct acpi_pci_root *root, char *msg, u32 word,
+                           struct pci_osc_bit_struct *table, int size)
+{
+       char buf[80];
+       int i, len = 0;
+       struct pci_osc_bit_struct *entry;
+
+       buf[0] = '\0';
+       for (i = 0, entry = table; i < size; i++, entry++)
+               if (word & entry->bit)
+                       len += snprintf(buf + len, sizeof(buf) - len, "%s%s",
+                                       len ? " " : "", entry->desc);
+
+       dev_info(&root->device->dev, "_OSC: %s [%s]\n", msg, buf);
+}
+
+static void decode_osc_support(struct acpi_pci_root *root, char *msg, u32 word)
+{
+       decode_osc_bits(root, msg, word, pci_osc_support_bit,
+                       ARRAY_SIZE(pci_osc_support_bit));
+}
+
+static void decode_osc_control(struct acpi_pci_root *root, char *msg, u32 word)
+{
+       decode_osc_bits(root, msg, word, pci_osc_control_bit,
+                       ARRAY_SIZE(pci_osc_control_bit));
+}
+
 static u8 pci_osc_uuid_str[] = "33DB4D5B-1FF7-401C-9657-7441C03DD766";
 
 static acpi_status acpi_pci_run_osc(acpi_handle handle,
@@ -158,14 +207,14 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
        support &= OSC_PCI_SUPPORT_MASKS;
        support |= root->osc_support_set;
 
-       capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
-       capbuf[OSC_SUPPORT_TYPE] = support;
+       capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
+       capbuf[OSC_SUPPORT_DWORD] = support;
        if (control) {
                *control &= OSC_PCI_CONTROL_MASKS;
-               capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set;
+               capbuf[OSC_CONTROL_DWORD] = *control | root->osc_control_set;
        } else {
                /* Run _OSC query only with existing controls. */
-               capbuf[OSC_CONTROL_TYPE] = root->osc_control_set;
+               capbuf[OSC_CONTROL_DWORD] = root->osc_control_set;
        }
 
        status = acpi_pci_run_osc(root->device->handle, capbuf, &result);
@@ -180,11 +229,7 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root,
 static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
 {
        acpi_status status;
-       acpi_handle tmp;
 
-       status = acpi_get_handle(root->device->handle, "_OSC", &tmp);
-       if (ACPI_FAILURE(status))
-               return status;
        mutex_lock(&osc_lock);
        status = acpi_pci_query_osc(root, flags, NULL);
        mutex_unlock(&osc_lock);
@@ -316,9 +361,8 @@ EXPORT_SYMBOL_GPL(acpi_get_pci_dev);
 acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
 {
        struct acpi_pci_root *root;
-       acpi_status status;
+       acpi_status status = AE_OK;
        u32 ctrl, capbuf[3];
-       acpi_handle tmp;
 
        if (!mask)
                return AE_BAD_PARAMETER;
@@ -331,10 +375,6 @@ acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
        if (!root)
                return AE_NOT_EXIST;
 
-       status = acpi_get_handle(handle, "_OSC", &tmp);
-       if (ACPI_FAILURE(status))
-               return status;
-
        mutex_lock(&osc_lock);
 
        *mask = ctrl | root->osc_control_set;
@@ -349,17 +389,21 @@ acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 *mask, u32 req)
                        goto out;
                if (ctrl == *mask)
                        break;
+               decode_osc_control(root, "platform does not support",
+                                  ctrl & ~(*mask));
                ctrl = *mask;
        }
 
        if ((ctrl & req) != req) {
+               decode_osc_control(root, "not requesting control; platform does not support",
+                                  req & ~(ctrl));
                status = AE_SUPPORT;
                goto out;
        }
 
-       capbuf[OSC_QUERY_TYPE] = 0;
-       capbuf[OSC_SUPPORT_TYPE] = root->osc_support_set;
-       capbuf[OSC_CONTROL_TYPE] = ctrl;
+       capbuf[OSC_QUERY_DWORD] = 0;
+       capbuf[OSC_SUPPORT_DWORD] = root->osc_support_set;
+       capbuf[OSC_CONTROL_DWORD] = ctrl;
        status = acpi_pci_run_osc(handle, capbuf, mask);
        if (ACPI_SUCCESS(status))
                root->osc_control_set = *mask;
@@ -369,6 +413,87 @@ out:
 }
 EXPORT_SYMBOL(acpi_pci_osc_control_set);
 
+static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
+                                int *clear_aspm)
+{
+       u32 support, control, requested;
+       acpi_status status;
+       struct acpi_device *device = root->device;
+       acpi_handle handle = device->handle;
+
+       /*
+        * All supported architectures that use ACPI have support for
+        * PCI domains, so we indicate this in _OSC support capabilities.
+        */
+       support = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
+       if (pci_ext_cfg_avail())
+               support |= OSC_PCI_EXT_CONFIG_SUPPORT;
+       if (pcie_aspm_support_enabled())
+               support |= OSC_PCI_ASPM_SUPPORT | OSC_PCI_CLOCK_PM_SUPPORT;
+       if (pci_msi_enabled())
+               support |= OSC_PCI_MSI_SUPPORT;
+
+       decode_osc_support(root, "OS supports", support);
+       status = acpi_pci_osc_support(root, support);
+       if (ACPI_FAILURE(status)) {
+               dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n",
+                        acpi_format_exception(status));
+               *no_aspm = 1;
+               return;
+       }
+
+       if (pcie_ports_disabled) {
+               dev_info(&device->dev, "PCIe port services disabled; not requesting _OSC control\n");
+               return;
+       }
+
+       if ((support & ACPI_PCIE_REQ_SUPPORT) != ACPI_PCIE_REQ_SUPPORT) {
+               decode_osc_support(root, "not requesting OS control; OS requires",
+                                  ACPI_PCIE_REQ_SUPPORT);
+               return;
+       }
+
+       control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL
+               | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
+               | OSC_PCI_EXPRESS_PME_CONTROL;
+
+       if (pci_aer_available()) {
+               if (aer_acpi_firmware_first())
+                       dev_info(&device->dev,
+                                "PCIe AER handled by firmware\n");
+               else
+                       control |= OSC_PCI_EXPRESS_AER_CONTROL;
+       }
+
+       requested = control;
+       status = acpi_pci_osc_control_set(handle, &control,
+                                         OSC_PCI_EXPRESS_CAPABILITY_CONTROL);
+       if (ACPI_SUCCESS(status)) {
+               decode_osc_control(root, "OS now controls", control);
+               if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
+                       /*
+                        * We have ASPM control, but the FADT indicates
+                        * that it's unsupported. Clear it.
+                        */
+                       *clear_aspm = 1;
+               }
+       } else {
+               decode_osc_control(root, "OS requested", requested);
+               decode_osc_control(root, "platform willing to grant", control);
+               dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n",
+                       acpi_format_exception(status));
+
+               /*
+                * We want to disable ASPM here, but aspm_disabled
+                * needs to remain in its state from boot so that we
+                * properly handle PCIe 1.1 devices.  So we set this
+                * flag here, to defer the action until after the ACPI
+                * root scan.
+                */
+               *no_aspm = 1;
+       }
+}
+
 static int acpi_pci_root_add(struct acpi_device *device,
                             const struct acpi_device_id *not_used)
 {
@@ -376,9 +501,8 @@ static int acpi_pci_root_add(struct acpi_device *device,
        acpi_status status;
        int result;
        struct acpi_pci_root *root;
-       u32 flags, base_flags;
        acpi_handle handle = device->handle;
-       bool no_aspm = false, clear_aspm = false;
+       int no_aspm = 0, clear_aspm = 0;
 
        root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
        if (!root)
@@ -431,81 +555,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
 
        root->mcfg_addr = acpi_pci_root_get_mcfg_addr(handle);
 
-       /*
-        * All supported architectures that use ACPI have support for
-        * PCI domains, so we indicate this in _OSC support capabilities.
-        */
-       flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
-       acpi_pci_osc_support(root, flags);
-
-       if (pci_ext_cfg_avail())
-               flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
-       if (pcie_aspm_support_enabled()) {
-               flags |= OSC_ACTIVE_STATE_PWR_SUPPORT |
-               OSC_CLOCK_PWR_CAPABILITY_SUPPORT;
-       }
-       if (pci_msi_enabled())
-               flags |= OSC_MSI_SUPPORT;
-       if (flags != base_flags) {
-               status = acpi_pci_osc_support(root, flags);
-               if (ACPI_FAILURE(status)) {
-                       dev_info(&device->dev, "ACPI _OSC support "
-                               "notification failed, disabling PCIe ASPM\n");
-                       no_aspm = true;
-                       flags = base_flags;
-               }
-       }
-
-       if (!pcie_ports_disabled
-           && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
-               flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
-                       | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
-                       | OSC_PCI_EXPRESS_PME_CONTROL;
-
-               if (pci_aer_available()) {
-                       if (aer_acpi_firmware_first())
-                               dev_dbg(&device->dev,
-                                       "PCIe errors handled by BIOS.\n");
-                       else
-                               flags |= OSC_PCI_EXPRESS_AER_CONTROL;
-               }
-
-               dev_info(&device->dev,
-                       "Requesting ACPI _OSC control (0x%02x)\n", flags);
-
-               status = acpi_pci_osc_control_set(handle, &flags,
-                                      OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
-               if (ACPI_SUCCESS(status)) {
-                       dev_info(&device->dev,
-                               "ACPI _OSC control (0x%02x) granted\n", flags);
-                       if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
-                               /*
-                                * We have ASPM control, but the FADT indicates
-                                * that it's unsupported. Clear it.
-                                */
-                               clear_aspm = true;
-                       }
-               } else {
-                       dev_info(&device->dev,
-                               "ACPI _OSC request failed (%s), "
-                               "returned control mask: 0x%02x\n",
-                               acpi_format_exception(status), flags);
-                       dev_info(&device->dev,
-                                "ACPI _OSC control for PCIe not granted, disabling ASPM\n");
-                       /*
-                        * We want to disable ASPM here, but aspm_disabled
-                        * needs to remain in its state from boot so that we
-                        * properly handle PCIe 1.1 devices.  So we set this
-                        * flag here, to defer the action until after the ACPI
-                        * root scan.
-                        */
-                       no_aspm = true;
-               }
-       } else {
-               dev_info(&device->dev,
-                        "Unable to request _OSC control "
-                        "(_OSC support mask: 0x%02x)\n", flags);
-       }
+       negotiate_os_control(root, &no_aspm, &clear_aspm);
 
        /*
         * TBD: Need PCI interface for enumeration/configuration of roots.
index 0dbe5cdf3396e5f53b23c84bb30041329544774d..c2ad391d8041ecb6a354e2df07b9d813d817a744 100644 (file)
@@ -59,16 +59,9 @@ ACPI_MODULE_NAME("power");
 #define ACPI_POWER_RESOURCE_STATE_ON   0x01
 #define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
 
-struct acpi_power_dependent_device {
-       struct list_head node;
-       struct acpi_device *adev;
-       struct work_struct work;
-};
-
 struct acpi_power_resource {
        struct acpi_device device;
        struct list_head list_node;
-       struct list_head dependent;
        char *name;
        u32 system_level;
        u32 order;
@@ -233,32 +226,6 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
        return 0;
 }
 
-static void acpi_power_resume_dependent(struct work_struct *work)
-{
-       struct acpi_power_dependent_device *dep;
-       struct acpi_device_physical_node *pn;
-       struct acpi_device *adev;
-       int state;
-
-       dep = container_of(work, struct acpi_power_dependent_device, work);
-       adev = dep->adev;
-       if (acpi_power_get_inferred_state(adev, &state))
-               return;
-
-       if (state > ACPI_STATE_D0)
-               return;
-
-       mutex_lock(&adev->physical_node_lock);
-
-       list_for_each_entry(pn, &adev->physical_node_list, node)
-               pm_request_resume(pn->dev);
-
-       list_for_each_entry(pn, &adev->power_dependent, node)
-               pm_request_resume(pn->dev);
-
-       mutex_unlock(&adev->physical_node_lock);
-}
-
 static int __acpi_power_on(struct acpi_power_resource *resource)
 {
        acpi_status status = AE_OK;
@@ -283,14 +250,8 @@ static int acpi_power_on_unlocked(struct acpi_power_resource *resource)
                                  resource->name));
        } else {
                result = __acpi_power_on(resource);
-               if (result) {
+               if (result)
                        resource->ref_count--;
-               } else {
-                       struct acpi_power_dependent_device *dep;
-
-                       list_for_each_entry(dep, &resource->dependent, node)
-                               schedule_work(&dep->work);
-               }
        }
        return result;
 }
@@ -390,52 +351,6 @@ static int acpi_power_on_list(struct list_head *list)
        return result;
 }
 
-static void acpi_power_add_dependent(struct acpi_power_resource *resource,
-                                    struct acpi_device *adev)
-{
-       struct acpi_power_dependent_device *dep;
-
-       mutex_lock(&resource->resource_lock);
-
-       list_for_each_entry(dep, &resource->dependent, node)
-               if (dep->adev == adev)
-                       goto out;
-
-       dep = kzalloc(sizeof(*dep), GFP_KERNEL);
-       if (!dep)
-               goto out;
-
-       dep->adev = adev;
-       INIT_WORK(&dep->work, acpi_power_resume_dependent);
-       list_add_tail(&dep->node, &resource->dependent);
-
- out:
-       mutex_unlock(&resource->resource_lock);
-}
-
-static void acpi_power_remove_dependent(struct acpi_power_resource *resource,
-                                       struct acpi_device *adev)
-{
-       struct acpi_power_dependent_device *dep;
-       struct work_struct *work = NULL;
-
-       mutex_lock(&resource->resource_lock);
-
-       list_for_each_entry(dep, &resource->dependent, node)
-               if (dep->adev == adev) {
-                       list_del(&dep->node);
-                       work = &dep->work;
-                       break;
-               }
-
-       mutex_unlock(&resource->resource_lock);
-
-       if (work) {
-               cancel_work_sync(work);
-               kfree(dep);
-       }
-}
-
 static struct attribute *attrs[] = {
        NULL,
 };
@@ -524,8 +439,6 @@ static void acpi_power_expose_hide(struct acpi_device *adev,
 
 void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
 {
-       struct acpi_device_power_state *ps;
-       struct acpi_power_resource_entry *entry;
        int state;
 
        if (adev->wakeup.flags.valid)
@@ -535,16 +448,6 @@ void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
        if (!adev->power.flags.power_resources)
                return;
 
-       ps = &adev->power.states[ACPI_STATE_D0];
-       list_for_each_entry(entry, &ps->resources, node) {
-               struct acpi_power_resource *resource = entry->resource;
-
-               if (add)
-                       acpi_power_add_dependent(resource, adev);
-               else
-                       acpi_power_remove_dependent(resource, adev);
-       }
-
        for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++)
                acpi_power_expose_hide(adev,
                                       &adev->power.states[state].resources,
@@ -882,7 +785,6 @@ int acpi_add_power_resource(acpi_handle handle)
        acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER,
                                ACPI_STA_DEFAULT);
        mutex_init(&resource->resource_lock);
-       INIT_LIST_HEAD(&resource->dependent);
        INIT_LIST_HEAD(&resource->list_node);
        resource->name = device->pnp.bus_id;
        strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
@@ -936,8 +838,10 @@ void acpi_resume_power_resources(void)
                mutex_lock(&resource->resource_lock);
 
                result = acpi_power_get_state(resource->device.handle, &state);
-               if (result)
+               if (result) {
+                       mutex_unlock(&resource->resource_lock);
                        continue;
+               }
 
                if (state == ACPI_POWER_RESOURCE_STATE_OFF
                    && resource->ref_count) {
index 04a13784dd20a4a7e42b15b98eedcff90cb67299..6a5b152ad4d070511d08d74439155fa17a83ed8a 100644 (file)
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
 
-#ifdef CONFIG_X86
-#include <linux/mc146818rtc.h>
-#endif
-
 #include "sleep.h"
 
 #define _COMPONENT             ACPI_SYSTEM_COMPONENT
 
 /*
  * this file provides support for:
- * /proc/acpi/alarm
  * /proc/acpi/wakeup
  */
 
 ACPI_MODULE_NAME("sleep")
 
-#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || !defined(CONFIG_X86)
-/* use /sys/class/rtc/rtcX/wakealarm instead; it's not ACPI-specific */
-#else
-#define        HAVE_ACPI_LEGACY_ALARM
-#endif
-
-#ifdef HAVE_ACPI_LEGACY_ALARM
-
-static u32 cmos_bcd_read(int offset, int rtc_control);
-
-static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
-{
-       u32 sec, min, hr;
-       u32 day, mo, yr, cent = 0;
-       u32 today = 0;
-       unsigned char rtc_control = 0;
-       unsigned long flags;
-
-       spin_lock_irqsave(&rtc_lock, flags);
-
-       rtc_control = CMOS_READ(RTC_CONTROL);
-       sec = cmos_bcd_read(RTC_SECONDS_ALARM, rtc_control);
-       min = cmos_bcd_read(RTC_MINUTES_ALARM, rtc_control);
-       hr = cmos_bcd_read(RTC_HOURS_ALARM, rtc_control);
-
-       /* If we ever get an FACP with proper values... */
-       if (acpi_gbl_FADT.day_alarm) {
-               /* ACPI spec: only low 6 its should be cared */
-               day = CMOS_READ(acpi_gbl_FADT.day_alarm) & 0x3F;
-               if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-                       day = bcd2bin(day);
-       } else
-               day = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
-       if (acpi_gbl_FADT.month_alarm)
-               mo = cmos_bcd_read(acpi_gbl_FADT.month_alarm, rtc_control);
-       else {
-               mo = cmos_bcd_read(RTC_MONTH, rtc_control);
-               today = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
-       }
-       if (acpi_gbl_FADT.century)
-               cent = cmos_bcd_read(acpi_gbl_FADT.century, rtc_control);
-
-       yr = cmos_bcd_read(RTC_YEAR, rtc_control);
-
-       spin_unlock_irqrestore(&rtc_lock, flags);
-
-       /* we're trusting the FADT (see above) */
-       if (!acpi_gbl_FADT.century)
-               /* If we're not trusting the FADT, we should at least make it
-                * right for _this_ century... ehm, what is _this_ century?
-                *
-                * TBD:
-                *  ASAP: find piece of code in the kernel, e.g. star tracker driver,
-                *        which we can trust to determine the century correctly. Atom
-                *        watch driver would be nice, too...
-                *
-                *  if that has not happened, change for first release in 2050:
-                *        if (yr<50)
-                *                yr += 2100;
-                *        else
-                *                yr += 2000;   // current line of code
-                *
-                *  if that has not happened either, please do on 2099/12/31:23:59:59
-                *        s/2000/2100
-                *
-                */
-               yr += 2000;
-       else
-               yr += cent * 100;
-
-       /*
-        * Show correct dates for alarms up to a month into the future.
-        * This solves issues for nearly all situations with the common
-        * 30-day alarm clocks in PC hardware.
-        */
-       if (day < today) {
-               if (mo < 12) {
-                       mo += 1;
-               } else {
-                       mo = 1;
-                       yr += 1;
-               }
-       }
-
-       seq_printf(seq, "%4.4u-", yr);
-       (mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo);
-       (day > 31) ? seq_puts(seq, "** ") : seq_printf(seq, "%2.2u ", day);
-       (hr > 23) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", hr);
-       (min > 59) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", min);
-       (sec > 59) ? seq_puts(seq, "**\n") : seq_printf(seq, "%2.2u\n", sec);
-
-       return 0;
-}
-
-static int acpi_system_alarm_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_system_alarm_seq_show, PDE_DATA(inode));
-}
-
-static int get_date_field(char **p, u32 * value)
-{
-       char *next = NULL;
-       char *string_end = NULL;
-       int result = -EINVAL;
-
-       /*
-        * Try to find delimeter, only to insert null.  The end of the
-        * string won't have one, but is still valid.
-        */
-       if (*p == NULL)
-               return result;
-
-       next = strpbrk(*p, "- :");
-       if (next)
-               *next++ = '\0';
-
-       *value = simple_strtoul(*p, &string_end, 10);
-
-       /* Signal success if we got a good digit */
-       if (string_end != *p)
-               result = 0;
-
-       if (next)
-               *p = next;
-       else
-               *p = NULL;
-
-       return result;
-}
-
-/* Read a possibly BCD register, always return binary */
-static u32 cmos_bcd_read(int offset, int rtc_control)
-{
-       u32 val = CMOS_READ(offset);
-       if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-               val = bcd2bin(val);
-       return val;
-}
-
-/* Write binary value into possibly BCD register */
-static void cmos_bcd_write(u32 val, int offset, int rtc_control)
-{
-       if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
-               val = bin2bcd(val);
-       CMOS_WRITE(val, offset);
-}
-
-static ssize_t
-acpi_system_write_alarm(struct file *file,
-                       const char __user * buffer, size_t count, loff_t * ppos)
-{
-       int result = 0;
-       char alarm_string[30] = { '\0' };
-       char *p = alarm_string;
-       u32 sec, min, hr, day, mo, yr;
-       int adjust = 0;
-       unsigned char rtc_control = 0;
-
-       if (count > sizeof(alarm_string) - 1)
-               return -EINVAL;
-
-       if (copy_from_user(alarm_string, buffer, count))
-               return -EFAULT;
-
-       alarm_string[count] = '\0';
-
-       /* check for time adjustment */
-       if (alarm_string[0] == '+') {
-               p++;
-               adjust = 1;
-       }
-
-       if ((result = get_date_field(&p, &yr)))
-               goto end;
-       if ((result = get_date_field(&p, &mo)))
-               goto end;
-       if ((result = get_date_field(&p, &day)))
-               goto end;
-       if ((result = get_date_field(&p, &hr)))
-               goto end;
-       if ((result = get_date_field(&p, &min)))
-               goto end;
-       if ((result = get_date_field(&p, &sec)))
-               goto end;
-
-       spin_lock_irq(&rtc_lock);
-
-       rtc_control = CMOS_READ(RTC_CONTROL);
-
-       if (adjust) {
-               yr += cmos_bcd_read(RTC_YEAR, rtc_control);
-               mo += cmos_bcd_read(RTC_MONTH, rtc_control);
-               day += cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
-               hr += cmos_bcd_read(RTC_HOURS, rtc_control);
-               min += cmos_bcd_read(RTC_MINUTES, rtc_control);
-               sec += cmos_bcd_read(RTC_SECONDS, rtc_control);
-       }
-
-       spin_unlock_irq(&rtc_lock);
-
-       if (sec > 59) {
-               min += sec/60;
-               sec = sec%60;
-       }
-       if (min > 59) {
-               hr += min/60;
-               min = min%60;
-       }
-       if (hr > 23) {
-               day += hr/24;
-               hr = hr%24;
-       }
-       if (day > 31) {
-               mo += day/32;
-               day = day%32;
-       }
-       if (mo > 12) {
-               yr += mo/13;
-               mo = mo%13;
-       }
-
-       spin_lock_irq(&rtc_lock);
-       /*
-        * Disable alarm interrupt before setting alarm timer or else
-        * when ACPI_EVENT_RTC is enabled, a spurious ACPI interrupt occurs
-        */
-       rtc_control &= ~RTC_AIE;
-       CMOS_WRITE(rtc_control, RTC_CONTROL);
-       CMOS_READ(RTC_INTR_FLAGS);
-
-       /* write the fields the rtc knows about */
-       cmos_bcd_write(hr, RTC_HOURS_ALARM, rtc_control);
-       cmos_bcd_write(min, RTC_MINUTES_ALARM, rtc_control);
-       cmos_bcd_write(sec, RTC_SECONDS_ALARM, rtc_control);
-
-       /*
-        * If the system supports an enhanced alarm it will have non-zero
-        * offsets into the CMOS RAM here -- which for some reason are pointing
-        * to the RTC area of memory.
-        */
-       if (acpi_gbl_FADT.day_alarm)
-               cmos_bcd_write(day, acpi_gbl_FADT.day_alarm, rtc_control);
-       if (acpi_gbl_FADT.month_alarm)
-               cmos_bcd_write(mo, acpi_gbl_FADT.month_alarm, rtc_control);
-       if (acpi_gbl_FADT.century) {
-               if (adjust)
-                       yr += cmos_bcd_read(acpi_gbl_FADT.century, rtc_control) * 100;
-               cmos_bcd_write(yr / 100, acpi_gbl_FADT.century, rtc_control);
-       }
-       /* enable the rtc alarm interrupt */
-       rtc_control |= RTC_AIE;
-       CMOS_WRITE(rtc_control, RTC_CONTROL);
-       CMOS_READ(RTC_INTR_FLAGS);
-
-       spin_unlock_irq(&rtc_lock);
-
-       acpi_clear_event(ACPI_EVENT_RTC);
-       acpi_enable_event(ACPI_EVENT_RTC, 0);
-
-       *ppos += count;
-
-       result = 0;
-      end:
-       return result ? result : count;
-}
-#endif                         /* HAVE_ACPI_LEGACY_ALARM */
-
 static int
 acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
 {
@@ -417,41 +145,8 @@ static const struct file_operations acpi_system_wakeup_device_fops = {
        .release = single_release,
 };
 
-#ifdef HAVE_ACPI_LEGACY_ALARM
-static const struct file_operations acpi_system_alarm_fops = {
-       .owner = THIS_MODULE,
-       .open = acpi_system_alarm_open_fs,
-       .read = seq_read,
-       .write = acpi_system_write_alarm,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-static u32 rtc_handler(void *context)
-{
-       acpi_clear_event(ACPI_EVENT_RTC);
-       acpi_disable_event(ACPI_EVENT_RTC, 0);
-
-       return ACPI_INTERRUPT_HANDLED;
-}
-#endif                         /* HAVE_ACPI_LEGACY_ALARM */
-
 int __init acpi_sleep_proc_init(void)
 {
-#ifdef HAVE_ACPI_LEGACY_ALARM
-       /* 'alarm' [R/W] */
-       proc_create("alarm", S_IFREG | S_IRUGO | S_IWUSR,
-                   acpi_root_dir, &acpi_system_alarm_fops);
-
-       acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
-       /*
-        * Disable the RTC event after installing RTC handler.
-        * Only when RTC alarm is set will it be enabled.
-        */
-       acpi_clear_event(ACPI_EVENT_RTC);
-       acpi_disable_event(ACPI_EVENT_RTC, 0);
-#endif                         /* HAVE_ACPI_LEGACY_ALARM */
-
        /* 'wakeup device' [R/W] */
        proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
                    acpi_root_dir, &acpi_system_wakeup_device_fops);
index cf34d903f4fb4a45b7975fc5fce60d141de2b7a9..b3171f30b319c1244007e686a0b516833443191d 100644 (file)
@@ -162,16 +162,23 @@ exit:
        return apic_id;
 }
 
-int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
+int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id)
 {
-#ifdef CONFIG_SMP
-       int i;
-#endif
-       int apic_id = -1;
+       int apic_id;
 
        apic_id = map_mat_entry(handle, type, acpi_id);
        if (apic_id == -1)
                apic_id = map_madt_entry(type, acpi_id);
+
+       return apic_id;
+}
+
+int acpi_map_cpuid(int apic_id, u32 acpi_id)
+{
+#ifdef CONFIG_SMP
+       int i;
+#endif
+
        if (apic_id == -1) {
                /*
                 * On UP processor, there is no _MAT or MADT table.
@@ -211,6 +218,15 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
 #endif
        return -1;
 }
+
+int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
+{
+       int apic_id;
+
+       apic_id = acpi_get_apicid(handle, type, acpi_id);
+
+       return acpi_map_cpuid(apic_id, acpi_id);
+}
 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
 
 static bool __init processor_physically_present(acpi_handle handle)
index e534ba66d5b80861849ae705bf941ac570720607..40fc773ab6e078a0f9d06e7d9b3faf497ec49169 100644 (file)
@@ -153,8 +153,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
        return NOTIFY_OK;
 }
 
-static struct notifier_block __refdata acpi_cpu_notifier =
-{
+static struct notifier_block __refdata acpi_cpu_notifier = {
            .notifier_call = acpi_cpu_soft_notify,
 };
 
index f98dd00b51a94b2d7e2d7dc9cd15f3e941e81bff..35c8f2bbcc40b45510ea7d1b67ee294efc33471b 100644 (file)
@@ -272,9 +272,6 @@ static void tsc_check_state(int state) { return; }
 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
 {
 
-       if (!pr)
-               return -EINVAL;
-
        if (!pr->pblk)
                return -ENODEV;
 
@@ -1076,12 +1073,8 @@ int acpi_processor_hotplug(struct acpi_processor *pr)
        if (disabled_by_idle_boot_param())
                return 0;
 
-       if (!pr)
-               return -EINVAL;
-
-       if (nocst) {
+       if (nocst)
                return -ENODEV;
-       }
 
        if (!pr->flags.power_setup_done)
                return -ENODEV;
@@ -1108,9 +1101,6 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
        if (disabled_by_idle_boot_param())
                return 0;
 
-       if (!pr)
-               return -EINVAL;
-
        if (nocst)
                return -ENODEV;
 
@@ -1183,9 +1173,6 @@ int acpi_processor_power_init(struct acpi_processor *pr)
                first_run++;
        }
 
-       if (!pr)
-               return -EINVAL;
-
        if (acpi_gbl_FADT.cst_control && !nocst) {
                status =
                    acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
index aef7e1cd1e5d62f95512484935e4fea9b11af961..d465ae6cdd004b9813cc333529c54ef29abcb16f 100644 (file)
 #include <linux/moduleparam.h>
 #include <linux/kernel.h>
 
-#ifdef CONFIG_ACPI_PROCFS_POWER
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <asm/uaccess.h>
-#endif
-
 #include <linux/acpi.h>
 #include <linux/timer.h>
 #include <linux/jiffies.h>
@@ -67,11 +61,6 @@ static unsigned int cache_time = 1000;
 module_param(cache_time, uint, 0644);
 MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
 
-extern struct proc_dir_entry *acpi_lock_ac_dir(void);
-extern struct proc_dir_entry *acpi_lock_battery_dir(void);
-extern void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
-extern void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir);
-
 #define MAX_SBS_BAT                    4
 #define ACPI_SBS_BLOCK_MAX             32
 
@@ -84,9 +73,6 @@ MODULE_DEVICE_TABLE(acpi, sbs_device_ids);
 struct acpi_battery {
        struct power_supply bat;
        struct acpi_sbs *sbs;
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       struct proc_dir_entry *proc_entry;
-#endif
        unsigned long update_time;
        char name[8];
        char manufacturer_name[ACPI_SBS_BLOCK_MAX];
@@ -119,9 +105,6 @@ struct acpi_sbs {
        struct acpi_device *device;
        struct acpi_smb_hc *hc;
        struct mutex lock;
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       struct proc_dir_entry *charger_entry;
-#endif
        struct acpi_battery battery[MAX_SBS_BAT];
        u8 batteries_supported:4;
        u8 manager_present:1;
@@ -481,261 +464,6 @@ static struct device_attribute alarm_attr = {
        .store = acpi_battery_alarm_store,
 };
 
-/* --------------------------------------------------------------------------
-                              FS Interface (/proc/acpi)
-   -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_ACPI_PROCFS_POWER
-/* Generic Routines */
-static int
-acpi_sbs_add_fs(struct proc_dir_entry **dir,
-               struct proc_dir_entry *parent_dir,
-               char *dir_name,
-               const struct file_operations *info_fops,
-               const struct file_operations *state_fops,
-               const struct file_operations *alarm_fops, void *data)
-{
-       printk(KERN_WARNING PREFIX "Deprecated procfs I/F for SBS is loaded,"
-                       " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
-       if (!*dir) {
-               *dir = proc_mkdir(dir_name, parent_dir);
-               if (!*dir) {
-                       return -ENODEV;
-               }
-       }
-
-       /* 'info' [R] */
-       if (info_fops)
-               proc_create_data(ACPI_SBS_FILE_INFO, S_IRUGO, *dir,
-                                info_fops, data);
-
-       /* 'state' [R] */
-       if (state_fops)
-               proc_create_data(ACPI_SBS_FILE_STATE, S_IRUGO, *dir,
-                                state_fops, data);
-
-       /* 'alarm' [R/W] */
-       if (alarm_fops)
-               proc_create_data(ACPI_SBS_FILE_ALARM, S_IRUGO, *dir,
-                                alarm_fops, data);
-       return 0;
-}
-
-/* Smart Battery Interface */
-static struct proc_dir_entry *acpi_battery_dir = NULL;
-
-static inline char *acpi_battery_units(struct acpi_battery *battery)
-{
-       return acpi_battery_mode(battery) ? " mW" : " mA";
-}
-
-
-static int acpi_battery_read_info(struct seq_file *seq, void *offset)
-{
-       struct acpi_battery *battery = seq->private;
-       struct acpi_sbs *sbs = battery->sbs;
-       int result = 0;
-
-       mutex_lock(&sbs->lock);
-
-       seq_printf(seq, "present:                 %s\n",
-                  (battery->present) ? "yes" : "no");
-       if (!battery->present)
-               goto end;
-
-       seq_printf(seq, "design capacity:         %i%sh\n",
-                  battery->design_capacity * acpi_battery_scale(battery),
-                  acpi_battery_units(battery));
-       seq_printf(seq, "last full capacity:      %i%sh\n",
-                  battery->full_charge_capacity * acpi_battery_scale(battery),
-                  acpi_battery_units(battery));
-       seq_printf(seq, "battery technology:      rechargeable\n");
-       seq_printf(seq, "design voltage:          %i mV\n",
-                  battery->design_voltage * acpi_battery_vscale(battery));
-       seq_printf(seq, "design capacity warning: unknown\n");
-       seq_printf(seq, "design capacity low:     unknown\n");
-       seq_printf(seq, "cycle count:             %i\n", battery->cycle_count);
-       seq_printf(seq, "capacity granularity 1:  unknown\n");
-       seq_printf(seq, "capacity granularity 2:  unknown\n");
-       seq_printf(seq, "model number:            %s\n", battery->device_name);
-       seq_printf(seq, "serial number:           %i\n",
-                  battery->serial_number);
-       seq_printf(seq, "battery type:            %s\n",
-                  battery->device_chemistry);
-       seq_printf(seq, "OEM info:                %s\n",
-                  battery->manufacturer_name);
-      end:
-       mutex_unlock(&sbs->lock);
-       return result;
-}
-
-static int acpi_battery_info_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_battery_read_info, PDE_DATA(inode));
-}
-
-static int acpi_battery_read_state(struct seq_file *seq, void *offset)
-{
-       struct acpi_battery *battery = seq->private;
-       struct acpi_sbs *sbs = battery->sbs;
-       int rate;
-
-       mutex_lock(&sbs->lock);
-       seq_printf(seq, "present:                 %s\n",
-                  (battery->present) ? "yes" : "no");
-       if (!battery->present)
-               goto end;
-
-       acpi_battery_get_state(battery);
-       seq_printf(seq, "capacity state:          %s\n",
-                  (battery->state & 0x0010) ? "critical" : "ok");
-       seq_printf(seq, "charging state:          %s\n",
-                  (battery->rate_now < 0) ? "discharging" :
-                  ((battery->rate_now > 0) ? "charging" : "charged"));
-       rate = abs(battery->rate_now) * acpi_battery_ipscale(battery);
-       rate *= (acpi_battery_mode(battery))?(battery->voltage_now *
-                       acpi_battery_vscale(battery)/1000):1;
-       seq_printf(seq, "present rate:            %d%s\n", rate,
-                  acpi_battery_units(battery));
-       seq_printf(seq, "remaining capacity:      %i%sh\n",
-                  battery->capacity_now * acpi_battery_scale(battery),
-                  acpi_battery_units(battery));
-       seq_printf(seq, "present voltage:         %i mV\n",
-                  battery->voltage_now * acpi_battery_vscale(battery));
-
-      end:
-       mutex_unlock(&sbs->lock);
-       return 0;
-}
-
-static int acpi_battery_state_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_battery_read_state, PDE_DATA(inode));
-}
-
-static int acpi_battery_read_alarm(struct seq_file *seq, void *offset)
-{
-       struct acpi_battery *battery = seq->private;
-       struct acpi_sbs *sbs = battery->sbs;
-       int result = 0;
-
-       mutex_lock(&sbs->lock);
-
-       if (!battery->present) {
-               seq_printf(seq, "present:                 no\n");
-               goto end;
-       }
-
-       acpi_battery_get_alarm(battery);
-       seq_printf(seq, "alarm:                   ");
-       if (battery->alarm_capacity)
-               seq_printf(seq, "%i%sh\n",
-                          battery->alarm_capacity *
-                          acpi_battery_scale(battery),
-                          acpi_battery_units(battery));
-       else
-               seq_printf(seq, "disabled\n");
-      end:
-       mutex_unlock(&sbs->lock);
-       return result;
-}
-
-static ssize_t
-acpi_battery_write_alarm(struct file *file, const char __user * buffer,
-                        size_t count, loff_t * ppos)
-{
-       struct seq_file *seq = file->private_data;
-       struct acpi_battery *battery = seq->private;
-       struct acpi_sbs *sbs = battery->sbs;
-       char alarm_string[12] = { '\0' };
-       int result = 0;
-       mutex_lock(&sbs->lock);
-       if (!battery->present) {
-               result = -ENODEV;
-               goto end;
-       }
-       if (count > sizeof(alarm_string) - 1) {
-               result = -EINVAL;
-               goto end;
-       }
-       if (copy_from_user(alarm_string, buffer, count)) {
-               result = -EFAULT;
-               goto end;
-       }
-       alarm_string[count] = 0;
-       battery->alarm_capacity = simple_strtoul(alarm_string, NULL, 0) /
-                                       acpi_battery_scale(battery);
-       acpi_battery_set_alarm(battery);
-      end:
-       mutex_unlock(&sbs->lock);
-       if (result)
-               return result;
-       return count;
-}
-
-static int acpi_battery_alarm_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_battery_read_alarm, PDE_DATA(inode));
-}
-
-static const struct file_operations acpi_battery_info_fops = {
-       .open = acpi_battery_info_open_fs,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
-static const struct file_operations acpi_battery_state_fops = {
-       .open = acpi_battery_state_open_fs,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
-static const struct file_operations acpi_battery_alarm_fops = {
-       .open = acpi_battery_alarm_open_fs,
-       .read = seq_read,
-       .write = acpi_battery_write_alarm,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
-/* Legacy AC Adapter Interface */
-
-static struct proc_dir_entry *acpi_ac_dir = NULL;
-
-static int acpi_ac_read_state(struct seq_file *seq, void *offset)
-{
-
-       struct acpi_sbs *sbs = seq->private;
-
-       mutex_lock(&sbs->lock);
-
-       seq_printf(seq, "state:                   %s\n",
-                  sbs->charger_present ? "on-line" : "off-line");
-
-       mutex_unlock(&sbs->lock);
-       return 0;
-}
-
-static int acpi_ac_state_open_fs(struct inode *inode, struct file *file)
-{
-       return single_open(file, acpi_ac_read_state, PDE_DATA(inode));
-}
-
-static const struct file_operations acpi_ac_state_fops = {
-       .open = acpi_ac_state_open_fs,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-       .owner = THIS_MODULE,
-};
-
-#endif
-
 /* --------------------------------------------------------------------------
                                  Driver Interface
    -------------------------------------------------------------------------- */
@@ -781,12 +509,6 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id)
                return result;
 
        sprintf(battery->name, ACPI_BATTERY_DIR_NAME, id);
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       acpi_sbs_add_fs(&battery->proc_entry, acpi_battery_dir,
-                       battery->name, &acpi_battery_info_fops,
-                       &acpi_battery_state_fops, &acpi_battery_alarm_fops,
-                       battery);
-#endif
        battery->bat.name = battery->name;
        battery->bat.type = POWER_SUPPLY_TYPE_BATTERY;
        if (!acpi_battery_mode(battery)) {
@@ -822,10 +544,6 @@ static void acpi_battery_remove(struct acpi_sbs *sbs, int id)
                        device_remove_file(battery->bat.dev, &alarm_attr);
                power_supply_unregister(&battery->bat);
        }
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       proc_remove(battery->proc_entry);
-       battery->proc_entry = NULL;
-#endif
 }
 
 static int acpi_charger_add(struct acpi_sbs *sbs)
@@ -835,13 +553,7 @@ static int acpi_charger_add(struct acpi_sbs *sbs)
        result = acpi_ac_get_present(sbs);
        if (result)
                goto end;
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       result = acpi_sbs_add_fs(&sbs->charger_entry, acpi_ac_dir,
-                                ACPI_AC_DIR_NAME, NULL,
-                                &acpi_ac_state_fops, NULL, sbs);
-       if (result)
-               goto end;
-#endif
+
        sbs->charger.name = "sbs-charger";
        sbs->charger.type = POWER_SUPPLY_TYPE_MAINS;
        sbs->charger.properties = sbs_ac_props;
@@ -859,10 +571,6 @@ static void acpi_charger_remove(struct acpi_sbs *sbs)
 {
        if (sbs->charger.dev)
                power_supply_unregister(&sbs->charger);
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       proc_remove(sbs->charger_entry);
-       sbs->charger_entry = NULL;
-#endif
 }
 
 static void acpi_sbs_callback(void *context)
@@ -950,20 +658,6 @@ static int acpi_sbs_remove(struct acpi_device *device)
        return 0;
 }
 
-static void acpi_sbs_rmdirs(void)
-{
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       if (acpi_ac_dir) {
-               acpi_unlock_ac_dir(acpi_ac_dir);
-               acpi_ac_dir = NULL;
-       }
-       if (acpi_battery_dir) {
-               acpi_unlock_battery_dir(acpi_battery_dir);
-               acpi_battery_dir = NULL;
-       }
-#endif
-}
-
 #ifdef CONFIG_PM_SLEEP
 static int acpi_sbs_resume(struct device *dev)
 {
@@ -995,28 +689,17 @@ static int __init acpi_sbs_init(void)
 
        if (acpi_disabled)
                return -ENODEV;
-#ifdef CONFIG_ACPI_PROCFS_POWER
-       acpi_ac_dir = acpi_lock_ac_dir();
-       if (!acpi_ac_dir)
-               return -ENODEV;
-       acpi_battery_dir = acpi_lock_battery_dir();
-       if (!acpi_battery_dir) {
-               acpi_sbs_rmdirs();
-               return -ENODEV;
-       }
-#endif
+
        result = acpi_bus_register_driver(&acpi_sbs_driver);
-       if (result < 0) {
-               acpi_sbs_rmdirs();
+       if (result < 0)
                return -ENODEV;
-       }
+
        return 0;
 }
 
 static void __exit acpi_sbs_exit(void)
 {
        acpi_bus_unregister_driver(&acpi_sbs_driver);
-       acpi_sbs_rmdirs();
        return;
 }
 
index 407ad13cac2f27945c3b812f791a4f615c2db492..fee8a297c7d95310caa64492b75e6c2fae0c280e 100644 (file)
@@ -999,7 +999,6 @@ int acpi_device_add(struct acpi_device *device,
        INIT_LIST_HEAD(&device->wakeup_list);
        INIT_LIST_HEAD(&device->physical_node_list);
        mutex_init(&device->physical_node_lock);
-       INIT_LIST_HEAD(&device->power_dependent);
 
        new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
        if (!new_bus_id) {
index 05306a59aedc18b45dd89f636d907def41d67d11..db5293650f622108e80a09124097f469c727547f 100644 (file)
@@ -564,6 +564,7 @@ static ssize_t counter_set(struct kobject *kobj,
        acpi_event_status status;
        acpi_handle handle;
        int result = 0;
+       unsigned long tmp;
 
        if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
                int i;
@@ -596,8 +597,10 @@ static ssize_t counter_set(struct kobject *kobj,
                else if (!strcmp(buf, "clear\n") &&
                         (status & ACPI_EVENT_FLAG_SET))
                        result = acpi_clear_gpe(handle, index);
+               else if (!kstrtoul(buf, 0, &tmp))
+                       all_counters[index].count = tmp;
                else
-                       all_counters[index].count = strtoul(buf, NULL, 0);
+                       result = -EINVAL;
        } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
                int event = index - num_gpes;
                if (!strcmp(buf, "disable\n") &&
@@ -609,8 +612,10 @@ static ssize_t counter_set(struct kobject *kobj,
                else if (!strcmp(buf, "clear\n") &&
                         (status & ACPI_EVENT_FLAG_SET))
                        result = acpi_clear_event(event);
+               else if (!kstrtoul(buf, 0, &tmp))
+                       all_counters[index].count = tmp;
                else
-                       all_counters[index].count = strtoul(buf, NULL, 0);
+                       result = -EINVAL;
        } else
                all_counters[index].count = strtoul(buf, NULL, 0);
 
@@ -762,13 +767,8 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
        if (!hotplug_kobj)
                goto err_out;
 
-       kobject_init(&hotplug->kobj, &acpi_hotplug_profile_ktype);
-       error = kobject_set_name(&hotplug->kobj, "%s", name);
-       if (error)
-               goto err_out;
-
-       hotplug->kobj.parent = hotplug_kobj;
-       error = kobject_add(&hotplug->kobj, hotplug_kobj, NULL);
+       error = kobject_init_and_add(&hotplug->kobj,
+               &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
        if (error)
                goto err_out;
 
index 6a0329340b42ad188add760db66ebcc17ba9ab22..0d9f46b5ae6d100a64ca3c3410ab23ac2af361fe 100644 (file)
@@ -299,8 +299,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
                        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                          "No critical threshold\n"));
                } else if (tmp <= 2732) {
-                       printk(KERN_WARNING FW_BUG "Invalid critical threshold "
-                              "(%llu)\n", tmp);
+                       pr_warn(FW_BUG "Invalid critical threshold (%llu)\n",
+                               tmp);
                        tz->trips.critical.flags.valid = 0;
                } else {
                        tz->trips.critical.flags.valid = 1;
@@ -317,8 +317,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
                                 * Allow override critical threshold
                                 */
                                if (crt_k > tz->trips.critical.temperature)
-                                       printk(KERN_WARNING PREFIX
-                                               "Critical threshold %d C\n", crt);
+                                       pr_warn(PREFIX "Critical threshold %d C\n",
+                                               crt);
                                tz->trips.critical.temperature = crt_k;
                        }
                }
@@ -390,8 +390,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
                status = acpi_evaluate_reference(tz->device->handle, "_PSL",
                                                        NULL, &devices);
                if (ACPI_FAILURE(status)) {
-                       printk(KERN_WARNING PREFIX
-                               "Invalid passive threshold\n");
+                       pr_warn(PREFIX "Invalid passive threshold\n");
                        tz->trips.passive.flags.valid = 0;
                }
                else
@@ -453,8 +452,8 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
                        status = acpi_evaluate_reference(tz->device->handle,
                                                name, NULL, &devices);
                        if (ACPI_FAILURE(status)) {
-                               printk(KERN_WARNING PREFIX
-                                       "Invalid active%d threshold\n", i);
+                               pr_warn(PREFIX "Invalid active%d threshold\n",
+                                       i);
                                tz->trips.active[i].flags.valid = 0;
                        }
                        else
@@ -505,7 +504,7 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
                valid |= tz->trips.active[i].flags.valid;
 
        if (!valid) {
-               printk(KERN_WARNING FW_BUG "No valid trip found\n");
+               pr_warn(FW_BUG "No valid trip found\n");
                return -ENODEV;
        }
        return 0;
@@ -515,10 +514,9 @@ static void acpi_thermal_check(void *data)
 {
        struct acpi_thermal *tz = data;
 
-       if (!tz->tz_enabled) {
-               pr_warn("thermal zone is disabled \n");
+       if (!tz->tz_enabled)
                return;
-       }
+
        thermal_zone_device_update(tz->thermal_zone);
 }
 
@@ -570,9 +568,10 @@ static int thermal_set_mode(struct thermal_zone_device *thermal,
         */
        if (mode == THERMAL_DEVICE_ENABLED)
                enable = 1;
-       else if (mode == THERMAL_DEVICE_DISABLED)
+       else if (mode == THERMAL_DEVICE_DISABLED) {
                enable = 0;
-       else
+               pr_warn("thermal zone will be disabled\n");
+       } else
                return -EINVAL;
 
        if (enable != tz->tz_enabled) {
@@ -923,8 +922,7 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
                                  acpi_bus_private_data_handler,
                                  tz->thermal_zone);
        if (ACPI_FAILURE(status)) {
-               printk(KERN_ERR PREFIX
-                               "Error attaching device data\n");
+               pr_err(PREFIX "Error attaching device data\n");
                return -ENODEV;
        }
 
@@ -1094,9 +1092,8 @@ static int acpi_thermal_add(struct acpi_device *device)
        if (result)
                goto free_memory;
 
-       printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n",
-              acpi_device_name(device), acpi_device_bid(device),
-              KELVIN_TO_CELSIUS(tz->temperature));
+       pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
+               acpi_device_bid(device), KELVIN_TO_CELSIUS(tz->temperature));
        goto end;
 
 free_memory:
@@ -1159,24 +1156,24 @@ static int acpi_thermal_resume(struct device *dev)
 static int thermal_act(const struct dmi_system_id *d) {
 
        if (act == 0) {
-               printk(KERN_NOTICE "ACPI: %s detected: "
-                       "disabling all active thermal trip points\n", d->ident);
+               pr_notice(PREFIX "%s detected: "
+                         "disabling all active thermal trip points\n", d->ident);
                act = -1;
        }
        return 0;
 }
 static int thermal_nocrt(const struct dmi_system_id *d) {
 
-       printk(KERN_NOTICE "ACPI: %s detected: "
-               "disabling all critical thermal trip point actions.\n", d->ident);
+       pr_notice(PREFIX "%s detected: "
+                 "disabling all critical thermal trip point actions.\n", d->ident);
        nocrt = 1;
        return 0;
 }
 static int thermal_tzp(const struct dmi_system_id *d) {
 
        if (tzp == 0) {
-               printk(KERN_NOTICE "ACPI: %s detected: "
-                       "enabling thermal zone polling\n", d->ident);
+               pr_notice(PREFIX "%s detected: "
+                         "enabling thermal zone polling\n", d->ident);
                tzp = 300;      /* 300 dS = 30 Seconds */
        }
        return 0;
@@ -1184,8 +1181,8 @@ static int thermal_tzp(const struct dmi_system_id *d) {
 static int thermal_psv(const struct dmi_system_id *d) {
 
        if (psv == 0) {
-               printk(KERN_NOTICE "ACPI: %s detected: "
-                       "disabling all passive thermal trip points\n", d->ident);
+               pr_notice(PREFIX "%s detected: "
+                         "disabling all passive thermal trip points\n", d->ident);
                psv = -1;
        }
        return 0;
@@ -1238,7 +1235,7 @@ static int __init acpi_thermal_init(void)
        dmi_check_system(thermal_dmi_table);
 
        if (off) {
-               printk(KERN_NOTICE "ACPI: thermal control disabled\n");
+               pr_notice(PREFIX "thermal control disabled\n");
                return -ENODEV;
        }
 
index 552248b0005b01a241ab511e52eef08a1c15d244..fc2cd328408053dfaa37eceb227de974b2796bf6 100644 (file)
@@ -169,11 +169,20 @@ acpi_extract_package(union acpi_object *package,
        /*
         * Validate output buffer.
         */
-       if (buffer->length < size_required) {
+       if (buffer->length == ACPI_ALLOCATE_BUFFER) {
+               buffer->pointer = ACPI_ALLOCATE(size_required);
+               if (!buffer->pointer)
+                       return AE_NO_MEMORY;
                buffer->length = size_required;
-               return AE_BUFFER_OVERFLOW;
-       } else if (buffer->length != size_required || !buffer->pointer) {
-               return AE_BAD_PARAMETER;
+               memset(buffer->pointer, 0, size_required);
+       } else {
+               if (buffer->length < size_required) {
+                       buffer->length = size_required;
+                       return AE_BUFFER_OVERFLOW;
+               } else if (buffer->length != size_required ||
+                          !buffer->pointer) {
+                       return AE_BAD_PARAMETER;
+               }
        }
 
        head = buffer->pointer;
index aebcf6355df467e3c394c00fa5c48731e42cc18d..d020df5a732ab1109d9fd3ddcaa9c7ff871b192b 100644 (file)
@@ -88,7 +88,16 @@ module_param(allow_duplicates, bool, 0644);
 static bool use_bios_initial_backlight = 1;
 module_param(use_bios_initial_backlight, bool, 0644);
 
+/*
+ * For Windows 8 systems: if set ture and the GPU driver has
+ * registered a backlight interface, skip registering ACPI video's.
+ */
+static bool use_native_backlight = false;
+module_param(use_native_backlight, bool, 0644);
+
 static int register_count;
+static struct mutex video_list_lock;
+static struct list_head video_bus_head;
 static int acpi_video_bus_add(struct acpi_device *device);
 static int acpi_video_bus_remove(struct acpi_device *device);
 static void acpi_video_bus_notify(struct acpi_device *device, u32 event);
@@ -157,6 +166,7 @@ struct acpi_video_bus {
        struct acpi_video_bus_flags flags;
        struct list_head video_device_list;
        struct mutex device_list_lock;  /* protects video_device_list */
+       struct list_head entry;
        struct input_dev *input;
        char phys[32];  /* for input device */
        struct notifier_block pm_nb;
@@ -229,6 +239,14 @@ static int acpi_video_get_next_level(struct acpi_video_device *device,
 static int acpi_video_switch_brightness(struct acpi_video_device *device,
                                         int event);
 
+static bool acpi_video_verify_backlight_support(void)
+{
+       if (acpi_osi_is_win8() && use_native_backlight &&
+           backlight_device_registered(BACKLIGHT_RAW))
+               return false;
+       return acpi_video_backlight_support();
+}
+
 /* backlight device sysfs support */
 static int acpi_video_get_brightness(struct backlight_device *bd)
 {
@@ -884,79 +902,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
 
        if (acpi_has_method(device->dev->handle, "_DDC"))
                device->cap._DDC = 1;
-
-       if (acpi_video_backlight_support()) {
-               struct backlight_properties props;
-               struct pci_dev *pdev;
-               acpi_handle acpi_parent;
-               struct device *parent = NULL;
-               int result;
-               static int count;
-               char *name;
-
-               result = acpi_video_init_brightness(device);
-               if (result)
-                       return;
-               name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
-               if (!name)
-                       return;
-               count++;
-
-               acpi_get_parent(device->dev->handle, &acpi_parent);
-
-               pdev = acpi_get_pci_dev(acpi_parent);
-               if (pdev) {
-                       parent = &pdev->dev;
-                       pci_dev_put(pdev);
-               }
-
-               memset(&props, 0, sizeof(struct backlight_properties));
-               props.type = BACKLIGHT_FIRMWARE;
-               props.max_brightness = device->brightness->count - 3;
-               device->backlight = backlight_device_register(name,
-                                                             parent,
-                                                             device,
-                                                             &acpi_backlight_ops,
-                                                             &props);
-               kfree(name);
-               if (IS_ERR(device->backlight))
-                       return;
-
-               /*
-                * Save current brightness level in case we have to restore it
-                * before acpi_video_device_lcd_set_level() is called next time.
-                */
-               device->backlight->props.brightness =
-                               acpi_video_get_brightness(device->backlight);
-
-               device->cooling_dev = thermal_cooling_device_register("LCD",
-                                       device->dev, &video_cooling_ops);
-               if (IS_ERR(device->cooling_dev)) {
-                       /*
-                        * Set cooling_dev to NULL so we don't crash trying to
-                        * free it.
-                        * Also, why the hell we are returning early and
-                        * not attempt to register video output if cooling
-                        * device registration failed?
-                        * -- dtor
-                        */
-                       device->cooling_dev = NULL;
-                       return;
-               }
-
-               dev_info(&device->dev->dev, "registered as cooling_device%d\n",
-                        device->cooling_dev->id);
-               result = sysfs_create_link(&device->dev->dev.kobj,
-                               &device->cooling_dev->device.kobj,
-                               "thermal_cooling");
-               if (result)
-                       printk(KERN_ERR PREFIX "Create sysfs link\n");
-               result = sysfs_create_link(&device->cooling_dev->device.kobj,
-                               &device->dev->dev.kobj, "device");
-               if (result)
-                       printk(KERN_ERR PREFIX "Create sysfs link\n");
-
-       }
 }
 
 /*
@@ -1143,13 +1088,6 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
        acpi_video_device_bind(video, data);
        acpi_video_device_find_cap(data);
 
-       status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
-                                            acpi_video_device_notify, data);
-       if (ACPI_FAILURE(status))
-               dev_err(&device->dev, "Error installing notify handler\n");
-       else
-               data->flags.notify = 1;
-
        mutex_lock(&video->device_list_lock);
        list_add_tail(&data->entry, &video->video_device_list);
        mutex_unlock(&video->device_list_lock);
@@ -1333,8 +1271,8 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event)
        unsigned long long level_current, level_next;
        int result = -EINVAL;
 
-       /* no warning message if acpi_backlight=vendor is used */
-       if (!acpi_video_backlight_support())
+       /* no warning message if acpi_backlight=vendor or a quirk is used */
+       if (!acpi_video_verify_backlight_support())
                return 0;
 
        if (!device->brightness)
@@ -1454,64 +1392,6 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video,
        return status;
 }
 
-static int acpi_video_bus_put_one_device(struct acpi_video_device *device)
-{
-       acpi_status status;
-
-       if (!device || !device->video)
-               return -ENOENT;
-
-       if (device->flags.notify) {
-               status = acpi_remove_notify_handler(device->dev->handle,
-                               ACPI_DEVICE_NOTIFY, acpi_video_device_notify);
-               if (ACPI_FAILURE(status))
-                       dev_err(&device->dev->dev,
-                                       "Can't remove video notify handler\n");
-       }
-
-       if (device->backlight) {
-               backlight_device_unregister(device->backlight);
-               device->backlight = NULL;
-       }
-       if (device->cooling_dev) {
-               sysfs_remove_link(&device->dev->dev.kobj,
-                                 "thermal_cooling");
-               sysfs_remove_link(&device->cooling_dev->device.kobj,
-                                 "device");
-               thermal_cooling_device_unregister(device->cooling_dev);
-               device->cooling_dev = NULL;
-       }
-
-       return 0;
-}
-
-static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
-{
-       int status;
-       struct acpi_video_device *dev, *next;
-
-       mutex_lock(&video->device_list_lock);
-
-       list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
-
-               status = acpi_video_bus_put_one_device(dev);
-               if (ACPI_FAILURE(status))
-                       printk(KERN_WARNING PREFIX
-                              "hhuuhhuu bug in acpi video driver.\n");
-
-               if (dev->brightness) {
-                       kfree(dev->brightness->levels);
-                       kfree(dev->brightness);
-               }
-               list_del(&dev->entry);
-               kfree(dev);
-       }
-
-       mutex_unlock(&video->device_list_lock);
-
-       return 0;
-}
-
 /* acpi_video interface */
 
 /*
@@ -1521,13 +1401,13 @@ static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
 static int acpi_video_bus_start_devices(struct acpi_video_bus *video)
 {
        return acpi_video_bus_DOS(video, 0,
-                                 acpi_video_backlight_quirks() ? 1 : 0);
+                                 acpi_osi_is_win8() ? 1 : 0);
 }
 
 static int acpi_video_bus_stop_devices(struct acpi_video_bus *video)
 {
        return acpi_video_bus_DOS(video, 0,
-                                 acpi_video_backlight_quirks() ? 0 : 1);
+                                 acpi_osi_is_win8() ? 0 : 1);
 }
 
 static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
@@ -1536,7 +1416,7 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
        struct input_dev *input;
        int keycode = 0;
 
-       if (!video)
+       if (!video || !video->input)
                return;
 
        input = video->input;
@@ -1691,12 +1571,236 @@ acpi_video_bus_match(acpi_handle handle, u32 level, void *context,
        return AE_OK;
 }
 
+static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
+{
+       if (acpi_video_verify_backlight_support()) {
+               struct backlight_properties props;
+               struct pci_dev *pdev;
+               acpi_handle acpi_parent;
+               struct device *parent = NULL;
+               int result;
+               static int count;
+               char *name;
+
+               result = acpi_video_init_brightness(device);
+               if (result)
+                       return;
+               name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
+               if (!name)
+                       return;
+               count++;
+
+               acpi_get_parent(device->dev->handle, &acpi_parent);
+
+               pdev = acpi_get_pci_dev(acpi_parent);
+               if (pdev) {
+                       parent = &pdev->dev;
+                       pci_dev_put(pdev);
+               }
+
+               memset(&props, 0, sizeof(struct backlight_properties));
+               props.type = BACKLIGHT_FIRMWARE;
+               props.max_brightness = device->brightness->count - 3;
+               device->backlight = backlight_device_register(name,
+                                                             parent,
+                                                             device,
+                                                             &acpi_backlight_ops,
+                                                             &props);
+               kfree(name);
+               if (IS_ERR(device->backlight))
+                       return;
+
+               /*
+                * Save current brightness level in case we have to restore it
+                * before acpi_video_device_lcd_set_level() is called next time.
+                */
+               device->backlight->props.brightness =
+                               acpi_video_get_brightness(device->backlight);
+
+               device->cooling_dev = thermal_cooling_device_register("LCD",
+                                       device->dev, &video_cooling_ops);
+               if (IS_ERR(device->cooling_dev)) {
+                       /*
+                        * Set cooling_dev to NULL so we don't crash trying to
+                        * free it.
+                        * Also, why the hell we are returning early and
+                        * not attempt to register video output if cooling
+                        * device registration failed?
+                        * -- dtor
+                        */
+                       device->cooling_dev = NULL;
+                       return;
+               }
+
+               dev_info(&device->dev->dev, "registered as cooling_device%d\n",
+                        device->cooling_dev->id);
+               result = sysfs_create_link(&device->dev->dev.kobj,
+                               &device->cooling_dev->device.kobj,
+                               "thermal_cooling");
+               if (result)
+                       printk(KERN_ERR PREFIX "Create sysfs link\n");
+               result = sysfs_create_link(&device->cooling_dev->device.kobj,
+                               &device->dev->dev.kobj, "device");
+               if (result)
+                       printk(KERN_ERR PREFIX "Create sysfs link\n");
+       }
+}
+
+static int acpi_video_bus_register_backlight(struct acpi_video_bus *video)
+{
+       struct acpi_video_device *dev;
+
+       mutex_lock(&video->device_list_lock);
+       list_for_each_entry(dev, &video->video_device_list, entry)
+               acpi_video_dev_register_backlight(dev);
+       mutex_unlock(&video->device_list_lock);
+
+       video->pm_nb.notifier_call = acpi_video_resume;
+       video->pm_nb.priority = 0;
+       return register_pm_notifier(&video->pm_nb);
+}
+
+static void acpi_video_dev_unregister_backlight(struct acpi_video_device *device)
+{
+       if (device->backlight) {
+               backlight_device_unregister(device->backlight);
+               device->backlight = NULL;
+       }
+       if (device->brightness) {
+               kfree(device->brightness->levels);
+               kfree(device->brightness);
+               device->brightness = NULL;
+       }
+       if (device->cooling_dev) {
+               sysfs_remove_link(&device->dev->dev.kobj, "thermal_cooling");
+               sysfs_remove_link(&device->cooling_dev->device.kobj, "device");
+               thermal_cooling_device_unregister(device->cooling_dev);
+               device->cooling_dev = NULL;
+       }
+}
+
+static int acpi_video_bus_unregister_backlight(struct acpi_video_bus *video)
+{
+       struct acpi_video_device *dev;
+       int error = unregister_pm_notifier(&video->pm_nb);
+
+       mutex_lock(&video->device_list_lock);
+       list_for_each_entry(dev, &video->video_device_list, entry)
+               acpi_video_dev_unregister_backlight(dev);
+       mutex_unlock(&video->device_list_lock);
+
+       return error;
+}
+
+static void acpi_video_dev_add_notify_handler(struct acpi_video_device *device)
+{
+       acpi_status status;
+       struct acpi_device *adev = device->dev;
+
+       status = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY,
+                                            acpi_video_device_notify, device);
+       if (ACPI_FAILURE(status))
+               dev_err(&adev->dev, "Error installing notify handler\n");
+       else
+               device->flags.notify = 1;
+}
+
+static int acpi_video_bus_add_notify_handler(struct acpi_video_bus *video)
+{
+       struct input_dev *input;
+       struct acpi_video_device *dev;
+       int error;
+
+       video->input = input = input_allocate_device();
+       if (!input) {
+               error = -ENOMEM;
+               goto out;
+       }
+
+       error = acpi_video_bus_start_devices(video);
+       if (error)
+               goto err_free_input;
+
+       snprintf(video->phys, sizeof(video->phys),
+                       "%s/video/input0", acpi_device_hid(video->device));
+
+       input->name = acpi_device_name(video->device);
+       input->phys = video->phys;
+       input->id.bustype = BUS_HOST;
+       input->id.product = 0x06;
+       input->dev.parent = &video->device->dev;
+       input->evbit[0] = BIT(EV_KEY);
+       set_bit(KEY_SWITCHVIDEOMODE, input->keybit);
+       set_bit(KEY_VIDEO_NEXT, input->keybit);
+       set_bit(KEY_VIDEO_PREV, input->keybit);
+       set_bit(KEY_BRIGHTNESS_CYCLE, input->keybit);
+       set_bit(KEY_BRIGHTNESSUP, input->keybit);
+       set_bit(KEY_BRIGHTNESSDOWN, input->keybit);
+       set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
+       set_bit(KEY_DISPLAY_OFF, input->keybit);
+
+       error = input_register_device(input);
+       if (error)
+               goto err_stop_dev;
+
+       mutex_lock(&video->device_list_lock);
+       list_for_each_entry(dev, &video->video_device_list, entry)
+               acpi_video_dev_add_notify_handler(dev);
+       mutex_unlock(&video->device_list_lock);
+
+       return 0;
+
+err_stop_dev:
+       acpi_video_bus_stop_devices(video);
+err_free_input:
+       input_free_device(input);
+       video->input = NULL;
+out:
+       return error;
+}
+
+static void acpi_video_dev_remove_notify_handler(struct acpi_video_device *dev)
+{
+       if (dev->flags.notify) {
+               acpi_remove_notify_handler(dev->dev->handle, ACPI_DEVICE_NOTIFY,
+                                          acpi_video_device_notify);
+               dev->flags.notify = 0;
+       }
+}
+
+static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video)
+{
+       struct acpi_video_device *dev;
+
+       mutex_lock(&video->device_list_lock);
+       list_for_each_entry(dev, &video->video_device_list, entry)
+               acpi_video_dev_remove_notify_handler(dev);
+       mutex_unlock(&video->device_list_lock);
+
+       acpi_video_bus_stop_devices(video);
+       input_unregister_device(video->input);
+       video->input = NULL;
+}
+
+static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
+{
+       struct acpi_video_device *dev, *next;
+
+       mutex_lock(&video->device_list_lock);
+       list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
+               list_del(&dev->entry);
+               kfree(dev);
+       }
+       mutex_unlock(&video->device_list_lock);
+
+       return 0;
+}
+
 static int instance;
 
 static int acpi_video_bus_add(struct acpi_device *device)
 {
        struct acpi_video_bus *video;
-       struct input_dev *input;
        int error;
        acpi_status status;
 
@@ -1748,62 +1852,24 @@ static int acpi_video_bus_add(struct acpi_device *device)
        if (error)
                goto err_put_video;
 
-       video->input = input = input_allocate_device();
-       if (!input) {
-               error = -ENOMEM;
-               goto err_put_video;
-       }
-
-       error = acpi_video_bus_start_devices(video);
-       if (error)
-               goto err_free_input_dev;
-
-       snprintf(video->phys, sizeof(video->phys),
-               "%s/video/input0", acpi_device_hid(video->device));
-
-       input->name = acpi_device_name(video->device);
-       input->phys = video->phys;
-       input->id.bustype = BUS_HOST;
-       input->id.product = 0x06;
-       input->dev.parent = &device->dev;
-       input->evbit[0] = BIT(EV_KEY);
-       set_bit(KEY_SWITCHVIDEOMODE, input->keybit);
-       set_bit(KEY_VIDEO_NEXT, input->keybit);
-       set_bit(KEY_VIDEO_PREV, input->keybit);
-       set_bit(KEY_BRIGHTNESS_CYCLE, input->keybit);
-       set_bit(KEY_BRIGHTNESSUP, input->keybit);
-       set_bit(KEY_BRIGHTNESSDOWN, input->keybit);
-       set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
-       set_bit(KEY_DISPLAY_OFF, input->keybit);
-
        printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s  rom: %s  post: %s)\n",
               ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
               video->flags.multihead ? "yes" : "no",
               video->flags.rom ? "yes" : "no",
               video->flags.post ? "yes" : "no");
+       mutex_lock(&video_list_lock);
+       list_add_tail(&video->entry, &video_bus_head);
+       mutex_unlock(&video_list_lock);
 
-       video->pm_nb.notifier_call = acpi_video_resume;
-       video->pm_nb.priority = 0;
-       error = register_pm_notifier(&video->pm_nb);
-       if (error)
-               goto err_stop_video;
-
-       error = input_register_device(input);
-       if (error)
-               goto err_unregister_pm_notifier;
+       acpi_video_bus_register_backlight(video);
+       acpi_video_bus_add_notify_handler(video);
 
        return 0;
 
- err_unregister_pm_notifier:
-       unregister_pm_notifier(&video->pm_nb);
- err_stop_video:
-       acpi_video_bus_stop_devices(video);
- err_free_input_dev:
-       input_free_device(input);
- err_put_video:
+err_put_video:
        acpi_video_bus_put_devices(video);
        kfree(video->attached_array);
- err_free_video:
+err_free_video:
        kfree(video);
        device->driver_data = NULL;
 
@@ -1820,12 +1886,14 @@ static int acpi_video_bus_remove(struct acpi_device *device)
 
        video = acpi_driver_data(device);
 
-       unregister_pm_notifier(&video->pm_nb);
-
-       acpi_video_bus_stop_devices(video);
+       acpi_video_bus_remove_notify_handler(video);
+       acpi_video_bus_unregister_backlight(video);
        acpi_video_bus_put_devices(video);
 
-       input_unregister_device(video->input);
+       mutex_lock(&video_list_lock);
+       list_del(&video->entry);
+       mutex_unlock(&video_list_lock);
+
        kfree(video->attached_array);
        kfree(video);
 
@@ -1874,6 +1942,9 @@ int acpi_video_register(void)
                return 0;
        }
 
+       mutex_init(&video_list_lock);
+       INIT_LIST_HEAD(&video_bus_head);
+
        result = acpi_bus_register_driver(&acpi_video_bus);
        if (result < 0)
                return -ENODEV;
index 940edbf2fe8f4460b27fceb1e7fe5fdb7cbacfc3..84875fd4c74f9fbd100106ea3615ffcde2daf6cf 100644 (file)
@@ -168,6 +168,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
                },
        },
+       {
+       .callback = video_detect_force_vendor,
+       .ident = "Lenovo Yoga 13",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
+               },
+       },
        { },
 };
 
@@ -233,11 +241,11 @@ static void acpi_video_caps_check(void)
                acpi_video_get_capabilities(NULL);
 }
 
-bool acpi_video_backlight_quirks(void)
+bool acpi_osi_is_win8(void)
 {
        return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
 }
-EXPORT_SYMBOL(acpi_video_backlight_quirks);
+EXPORT_SYMBOL(acpi_osi_is_win8);
 
 /* Promote the vendor interface instead of the generic video module.
  * This function allow DMI blacklists to be implemented by externals
index c6707278a6bb496d200caf9f0bd4a57233b9b1a1..c4876ac9151a56bc95a05df647e0850cea3182c4 100644 (file)
@@ -552,7 +552,6 @@ amba_aphb_device_add(struct device *parent, const char *name,
        if (!dev)
                return ERR_PTR(-ENOMEM);
 
-       dev->dma_mask = dma_mask;
        dev->dev.coherent_dma_mask = dma_mask;
        dev->irq[0] = irq1;
        dev->irq[1] = irq2;
@@ -619,7 +618,7 @@ static void amba_device_initialize(struct amba_device *dev, const char *name)
                dev_set_name(&dev->dev, "%s", name);
        dev->dev.release = amba_device_release;
        dev->dev.bus = &amba_bustype;
-       dev->dev.dma_mask = &dev->dma_mask;
+       dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
        dev->res.name = dev_name(&dev->dev);
 }
 
@@ -663,9 +662,6 @@ int amba_device_register(struct amba_device *dev, struct resource *parent)
        amba_device_initialize(dev, dev->dev.init_name);
        dev->dev.init_name = NULL;
 
-       if (!dev->dev.coherent_dma_mask && dev->dma_mask)
-               dev_warn(&dev->dev, "coherent dma mask is unset\n");
-
        return amba_device_add(dev, parent);
 }
 
index 9d715ae5ff6b73b6bd09690b95e9686f196e9cdb..8e28f923cf7f3a221104eb625e4d3a89b5ab79ca 100644 (file)
@@ -1343,7 +1343,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
                host->flags |= ATA_HOST_PARALLEL_SCAN;
        else
-               printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
+               dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
 
        if (pi.flags & ATA_FLAG_EM)
                ahci_reset_em(host);
index 11456371f29b04fdeee0aaf8247f893b3ac52a13..2289efdf82030e388ce977957eeb5b26f26f19d2 100644 (file)
@@ -339,6 +339,7 @@ extern struct device_attribute *ahci_sdev_attrs[];
        .sdev_attrs             = ahci_sdev_attrs
 
 extern struct ata_port_operations ahci_ops;
+extern struct ata_port_operations ahci_platform_ops;
 extern struct ata_port_operations ahci_pmp_retry_srst_ops;
 
 unsigned int ahci_dev_classify(struct ata_port *ap);
@@ -368,6 +369,7 @@ irqreturn_t ahci_hw_interrupt(int irq, void *dev_instance);
 irqreturn_t ahci_thread_fn(int irq, void *dev_instance);
 void ahci_print_info(struct ata_host *host, const char *scc_s);
 int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis);
+void ahci_error_handler(struct ata_port *ap);
 
 static inline void __iomem *__ahci_port_base(struct ata_host *host,
                                             unsigned int port_no)
index 58debb0acc3a73743ea247f0c6b260368cd67f4b..ae2d73fe321e2f2c62d8e5709a788905edfd5a89 100644 (file)
@@ -1,6 +1,6 @@
 /*
+ * copyright (c) 2013 Freescale Semiconductor, Inc.
  * Freescale IMX AHCI SATA platform driver
- * Copyright 2013 Freescale Semiconductor, Inc.
  *
  * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
  *
 #include <linux/of_device.h>
 #include <linux/mfd/syscon.h>
 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/libata.h>
 #include "ahci.h"
 
 enum {
-       HOST_TIMER1MS = 0xe0, /* Timer 1-ms */
+       PORT_PHY_CTL = 0x178,                   /* Port0 PHY Control */
+       PORT_PHY_CTL_PDDQ_LOC = 0x100000,       /* PORT_PHY_CTL bits */
+       HOST_TIMER1MS = 0xe0,                   /* Timer 1-ms */
 };
 
 struct imx_ahci_priv {
@@ -36,6 +39,56 @@ struct imx_ahci_priv {
        struct clk *sata_ref_clk;
        struct clk *ahb_clk;
        struct regmap *gpr;
+       bool no_device;
+       bool first_time;
+};
+
+static int ahci_imx_hotplug;
+module_param_named(hotplug, ahci_imx_hotplug, int, 0644);
+MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support)");
+
+static void ahci_imx_error_handler(struct ata_port *ap)
+{
+       u32 reg_val;
+       struct ata_device *dev;
+       struct ata_host *host = dev_get_drvdata(ap->dev);
+       struct ahci_host_priv *hpriv = host->private_data;
+       void __iomem *mmio = hpriv->mmio;
+       struct imx_ahci_priv *imxpriv = dev_get_drvdata(ap->dev->parent);
+
+       ahci_error_handler(ap);
+
+       if (!(imxpriv->first_time) || ahci_imx_hotplug)
+               return;
+
+       imxpriv->first_time = false;
+
+       ata_for_each_dev(dev, &ap->link, ENABLED)
+               return;
+       /*
+        * Disable link to save power.  An imx ahci port can't be recovered
+        * without full reset once the pddq mode is enabled making it
+        * impossible to use as part of libata LPM.
+        */
+       reg_val = readl(mmio + PORT_PHY_CTL);
+       writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL);
+       regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+                       IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+                       !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+       clk_disable_unprepare(imxpriv->sata_ref_clk);
+       imxpriv->no_device = true;
+}
+
+static struct ata_port_operations ahci_imx_ops = {
+       .inherits       = &ahci_platform_ops,
+       .error_handler  = ahci_imx_error_handler,
+};
+
+static const struct ata_port_info ahci_imx_port_info = {
+       .flags          = AHCI_FLAG_COMMON,
+       .pio_mask       = ATA_PIO4,
+       .udma_mask      = ATA_UDMA6,
+       .port_ops       = &ahci_imx_ops,
 };
 
 static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
@@ -117,9 +170,51 @@ static void imx6q_sata_exit(struct device *dev)
        clk_disable_unprepare(imxpriv->sata_ref_clk);
 }
 
+static int imx_ahci_suspend(struct device *dev)
+{
+       struct imx_ahci_priv *imxpriv =  dev_get_drvdata(dev->parent);
+
+       /*
+        * If no_device is set, The CLKs had been gated off in the
+        * initialization so don't do it again here.
+        */
+       if (!imxpriv->no_device) {
+               regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+                               IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+                               !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+               clk_disable_unprepare(imxpriv->sata_ref_clk);
+       }
+
+       return 0;
+}
+
+static int imx_ahci_resume(struct device *dev)
+{
+       struct imx_ahci_priv *imxpriv =  dev_get_drvdata(dev->parent);
+       int ret;
+
+       if (!imxpriv->no_device) {
+               ret = clk_prepare_enable(imxpriv->sata_ref_clk);
+               if (ret < 0) {
+                       dev_err(dev, "pre-enable sata_ref clock err:%d\n", ret);
+                       return ret;
+               }
+
+               regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
+                               IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+                               IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+               usleep_range(1000, 2000);
+       }
+
+       return 0;
+}
+
 static struct ahci_platform_data imx6q_sata_pdata = {
        .init = imx6q_sata_init,
        .exit = imx6q_sata_exit,
+       .ata_port_info = &ahci_imx_port_info,
+       .suspend = imx_ahci_suspend,
+       .resume = imx_ahci_resume,
 };
 
 static const struct of_device_id imx_ahci_of_match[] = {
@@ -152,6 +247,8 @@ static int imx_ahci_probe(struct platform_device *pdev)
        ahci_dev = &ahci_pdev->dev;
        ahci_dev->parent = dev;
 
+       imxpriv->no_device = false;
+       imxpriv->first_time = true;
        imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
        if (IS_ERR(imxpriv->ahb_clk)) {
                dev_err(dev, "can't get ahb clock.\n");
index 2daaee05cab12d629502c913ef7102a793cdf27f..f9554318504f8a401c97b2221831b1cdbdcdb67f 100644 (file)
@@ -49,10 +49,11 @@ static struct platform_device_id ahci_devtype[] = {
 };
 MODULE_DEVICE_TABLE(platform, ahci_devtype);
 
-static struct ata_port_operations ahci_platform_ops = {
+struct ata_port_operations ahci_platform_ops = {
        .inherits       = &ahci_ops,
        .host_stop      = ahci_host_stop,
 };
+EXPORT_SYMBOL_GPL(ahci_platform_ops);
 
 static struct ata_port_operations ahci_platform_retry_srst_ops = {
        .inherits       = &ahci_pmp_retry_srst_ops,
@@ -184,7 +185,7 @@ static int ahci_probe(struct platform_device *pdev)
        if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
                host->flags |= ATA_HOST_PARALLEL_SCAN;
        else
-               printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
+               dev_info(dev, "SSS flag set, parallel bus scan disabled\n");
 
        if (pi.flags & ATA_FLAG_EM)
                ahci_reset_em(host);
index 513ad7ed0c997e19c3b3c4190b09221199c3d16c..6334c8d7c3f1e6a437b90af78e585e50698f5fc8 100644 (file)
 
 enum {
        PIIX_IOCFG              = 0x54, /* IDE I/O configuration register */
-       ICH5_PMR                = 0x90, /* port mapping register */
+       ICH5_PMR                = 0x90, /* address map register */
        ICH5_PCS                = 0x92, /* port control and status */
        PIIX_SIDPR_BAR          = 5,
        PIIX_SIDPR_LEN          = 16,
@@ -233,7 +233,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
          PCI_CLASS_STORAGE_IDE << 8, 0xffff00, ich6m_sata },
        /* 82801GB/GR/GH (ICH7, identical to ICH6) */
        { 0x8086, 0x27c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
-       /* 2801GBM/GHM (ICH7M, identical to ICH6M) */
+       /* 82801GBM/GHM (ICH7M, identical to ICH6M)  */
        { 0x8086, 0x27c4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6m_sata },
        /* Enterprise Southbridge 2 (631xESB/632xESB) */
        { 0x8086, 0x2680, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich6_sata },
@@ -517,7 +517,7 @@ static int ich_pata_cable_detect(struct ata_port *ap)
        const struct ich_laptop *lap = &ich_laptop[0];
        u8 mask;
 
-       /* Check for specials - Acer Aspire 5602WLMi */
+       /* Check for specials */
        while (lap->device) {
                if (lap->device == pdev->device &&
                    lap->subvendor == pdev->subsystem_vendor &&
@@ -1366,38 +1366,39 @@ static const int *piix_init_sata_map(struct pci_dev *pdev,
        const int *map;
        int i, invalid_map = 0;
        u8 map_value;
+       char buf[32];
+       char *p = buf, *end = buf + sizeof(buf);
 
        pci_read_config_byte(pdev, ICH5_PMR, &map_value);
 
        map = map_db->map[map_value & map_db->mask];
 
-       dev_info(&pdev->dev, "MAP [");
        for (i = 0; i < 4; i++) {
                switch (map[i]) {
                case RV:
                        invalid_map = 1;
-                       pr_cont(" XX");
+                       p += scnprintf(p, end - p, " XX");
                        break;
 
                case NA:
-                       pr_cont(" --");
+                       p += scnprintf(p, end - p, " --");
                        break;
 
                case IDE:
                        WARN_ON((i & 1) || map[i + 1] != IDE);
                        pinfo[i / 2] = piix_port_info[ich_pata_100];
                        i++;
-                       pr_cont(" IDE IDE");
+                       p += scnprintf(p, end - p, " IDE IDE");
                        break;
 
                default:
-                       pr_cont(" P%d", map[i]);
+                       p += scnprintf(p, end - p, " P%d", map[i]);
                        if (i & 1)
                                pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS;
                        break;
                }
        }
-       pr_cont(" ]\n");
+       dev_info(&pdev->dev, "MAP [%s ]\n", buf);
 
        if (invalid_map)
                dev_err(&pdev->dev, "invalid MAP value %u\n", map_value);
index acfd0f711069e9da304c95179b14127e20ae1c9c..080edd13dbc413ecd0ef4b14b3660f0efb36e9dd 100644 (file)
@@ -89,7 +89,6 @@ static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
                          unsigned long deadline);
 static void ahci_postreset(struct ata_link *link, unsigned int *class);
-static void ahci_error_handler(struct ata_port *ap);
 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
 static void ahci_dev_config(struct ata_device *dev);
 #ifdef CONFIG_PM
@@ -189,14 +188,15 @@ struct ata_port_operations ahci_pmp_retry_srst_ops = {
 };
 EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops);
 
-int ahci_em_messages = 1;
+static bool ahci_em_messages __read_mostly = true;
 EXPORT_SYMBOL_GPL(ahci_em_messages);
-module_param(ahci_em_messages, int, 0444);
+module_param(ahci_em_messages, bool, 0444);
 /* add other LED protocol types when they become supported */
 MODULE_PARM_DESC(ahci_em_messages,
        "AHCI Enclosure Management Message control (0 = off, 1 = on)");
 
-int devslp_idle_timeout = 1000;        /* device sleep idle timeout in ms */
+/* device sleep idle timeout in ms */
+static int devslp_idle_timeout __read_mostly = 1000;
 module_param(devslp_idle_timeout, int, 0644);
 MODULE_PARM_DESC(devslp_idle_timeout, "device sleep idle timeout");
 
@@ -778,8 +778,16 @@ static void ahci_start_port(struct ata_port *ap)
                                rc = ap->ops->transmit_led_message(ap,
                                                               emp->led_state,
                                                               4);
+                               /*
+                                * If busy, give a breather but do not
+                                * release EH ownership by using msleep()
+                                * instead of ata_msleep().  EM Transmit
+                                * bit is busy for the whole host and
+                                * releasing ownership will cause other
+                                * ports to fail the same way.
+                                */
                                if (rc == -EBUSY)
-                                       ata_msleep(ap, 1);
+                                       msleep(1);
                                else
                                        break;
                        }
@@ -1981,7 +1989,7 @@ static void ahci_thaw(struct ata_port *ap)
        writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
 }
 
-static void ahci_error_handler(struct ata_port *ap)
+void ahci_error_handler(struct ata_port *ap)
 {
        if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
                /* restart engine */
@@ -1994,6 +2002,7 @@ static void ahci_error_handler(struct ata_port *ap)
        if (!ata_dev_enabled(ap->link.device))
                ahci_stop_engine(ap);
 }
+EXPORT_SYMBOL_GPL(ahci_error_handler);
 
 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
 {
index 4ba8b04055728d49a0ac147a608d1ad27391d62a..ab714d2ad978644752ca3c9bdff74ae1c9f63934 100644 (file)
@@ -1035,17 +1035,3 @@ void ata_acpi_on_disable(struct ata_device *dev)
 {
        ata_acpi_clear_gtf(dev);
 }
-
-void ata_scsi_acpi_bind(struct ata_device *dev)
-{
-       acpi_handle handle = ata_dev_acpi_handle(dev);
-       if (handle)
-               acpi_dev_pm_add_dependent(handle, &dev->sdev->sdev_gendev);
-}
-
-void ata_scsi_acpi_unbind(struct ata_device *dev)
-{
-       acpi_handle handle = ata_dev_acpi_handle(dev);
-       if (handle)
-               acpi_dev_pm_remove_dependent(handle, &dev->sdev->sdev_gendev);
-}
index c69fcce505c03d06c7b20ae333ff9708a228f869..370462fa8e01addd3387befee8f4ea78486b2cc0 100644 (file)
@@ -1322,14 +1322,14 @@ void ata_eh_qc_complete(struct ata_queued_cmd *qc)
  *     should be retried.  To be used from EH.
  *
  *     SCSI midlayer limits the number of retries to scmd->allowed.
- *     scmd->retries is decremented for commands which get retried
+ *     scmd->allowed is incremented for commands which get retried
  *     due to unrelated failures (qc->err_mask is zero).
  */
 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
 {
        struct scsi_cmnd *scmd = qc->scsicmd;
-       if (!qc->err_mask && scmd->retries)
-               scmd->retries--;
+       if (!qc->err_mask)
+               scmd->allowed++;
        __ata_eh_qc_complete(qc);
 }
 
index 97a0cef1295916223202058b83e80a20de74c7de..db6dfcfa3e2ee932190069290814f2a71bc8f3f6 100644 (file)
@@ -3679,7 +3679,6 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
                        if (!IS_ERR(sdev)) {
                                dev->sdev = sdev;
                                scsi_device_put(sdev);
-                               ata_scsi_acpi_bind(dev);
                        } else {
                                dev->sdev = NULL;
                        }
@@ -3767,8 +3766,6 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
        struct scsi_device *sdev;
        unsigned long flags;
 
-       ata_scsi_acpi_unbind(dev);
-
        /* Alas, we need to grab scan_mutex to ensure SCSI device
         * state doesn't change underneath us and thus
         * scsi_device_get() always succeeds.  The mutex locking can
index eeeb77845d48574e3f43b4b6e456302e5f3926d9..45b5ab3a95d51158c9ef0e618d6c8ae0fdeb373b 100644 (file)
@@ -121,8 +121,6 @@ extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
 extern void ata_acpi_bind_port(struct ata_port *ap);
 extern void ata_acpi_bind_dev(struct ata_device *dev);
 extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
-extern void ata_scsi_acpi_bind(struct ata_device *dev);
-extern void ata_scsi_acpi_unbind(struct ata_device *dev);
 #else
 static inline void ata_acpi_dissociate(struct ata_host *host) { }
 static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
@@ -133,8 +131,6 @@ static inline void ata_acpi_set_state(struct ata_port *ap,
                                      pm_message_t state) { }
 static inline void ata_acpi_bind_port(struct ata_port *ap) {}
 static inline void ata_acpi_bind_dev(struct ata_device *dev) {}
-static inline void ata_scsi_acpi_bind(struct ata_device *dev) {}
-static inline void ata_scsi_acpi_unbind(struct ata_device *dev) {}
 #endif
 
 /* libata-scsi.c */
index 4bceb8803a10f50baee31c9c39233282ebbc6e91..b33d1f99b3a44bb40e76a529a8eddbc2268bdc20 100644 (file)
@@ -78,7 +78,7 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
 
        ap->ioaddr.cmd_addr = cmd_addr;
 
-       if (pnp_port_valid(idev, 1) == 0) {
+       if (pnp_port_valid(idev, 1)) {
                ctl_addr = devm_ioport_map(&idev->dev,
                                           pnp_port_start(idev, 1), 1);
                ap->ioaddr.altstatus_addr = ctl_addr;
index 1ec53f8ca96fa682fc1492e61816c6b043876f7d..ddf470c2341d7f1f153b7d7776fa873b9c7906f3 100644 (file)
@@ -144,6 +144,7 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
        struct ata_host *host;
        struct ata_port *ap;
        struct ixp4xx_pata_data *data = dev_get_platdata(&pdev->dev);
+       int ret;
 
        cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -157,7 +158,9 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        /* acquire resources and fill host */
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        data->cs0 = devm_ioremap(&pdev->dev, cs0->start, 0x1000);
        data->cs1 = devm_ioremap(&pdev->dev, cs1->start, 0x1000);
index c51bbb9ea8e8a826e3d2fa68e693d1cf6a3a2a94..6231d4394f45d021c8dc7cf8a15a38c14e29b2c7 100644 (file)
@@ -1014,8 +1014,9 @@ static int octeon_cf_probe(struct platform_device *pdev)
        }
        cf_port->c0 = ap->ioaddr.ctl_addr;
 
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
-       pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       rv = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (rv)
+               return ret;
 
        ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
 
index 49e783e35ee94cf57e24c2e06c7ad8a7b26a29ff..364eded31881fbbf4d83adb0909d614b29692ed9 100644 (file)
@@ -420,7 +420,6 @@ struct fs_transmit_config {
 #define RC_FLAGS_BFPS_BFP27 (0xd << 17)
 #define RC_FLAGS_BFPS_BFP47 (0xe << 17)
 
-#define RC_FLAGS_BFPS       (0x1 << 17)
 #define RC_FLAGS_BFPP       (0x1 << 21)
 #define RC_FLAGS_TEVC       (0x1 << 22)
 #define RC_FLAGS_TEP        (0x1 << 23)
index 848ebbd25717269fa0ef34e1632e502eb29f4aeb..f48370dfc908e19a73a86cf817ad05128c0ed034 100644 (file)
@@ -44,13 +44,11 @@ static int __ref cpu_subsys_online(struct device *dev)
        struct cpu *cpu = container_of(dev, struct cpu, dev);
        int cpuid = dev->id;
        int from_nid, to_nid;
-       int ret = -ENODEV;
-
-       cpu_hotplug_driver_lock();
+       int ret;
 
        from_nid = cpu_to_node(cpuid);
        if (from_nid == NUMA_NO_NODE)
-               goto out;
+               return -ENODEV;
 
        ret = cpu_up(cpuid);
        /*
@@ -61,19 +59,12 @@ static int __ref cpu_subsys_online(struct device *dev)
        if (from_nid != to_nid)
                change_cpu_under_node(cpu, from_nid, to_nid);
 
- out:
-       cpu_hotplug_driver_unlock();
        return ret;
 }
 
 static int cpu_subsys_offline(struct device *dev)
 {
-       int ret;
-
-       cpu_hotplug_driver_lock();
-       ret = cpu_down(dev->id);
-       cpu_hotplug_driver_unlock();
-       return ret;
+       return cpu_down(dev->id);
 }
 
 void unregister_cpu(struct cpu *cpu)
@@ -93,7 +84,17 @@ static ssize_t cpu_probe_store(struct device *dev,
                               const char *buf,
                               size_t count)
 {
-       return arch_cpu_probe(buf, count);
+       ssize_t cnt;
+       int ret;
+
+       ret = lock_device_hotplug_sysfs();
+       if (ret)
+               return ret;
+
+       cnt = arch_cpu_probe(buf, count);
+
+       unlock_device_hotplug();
+       return cnt;
 }
 
 static ssize_t cpu_release_store(struct device *dev,
@@ -101,7 +102,17 @@ static ssize_t cpu_release_store(struct device *dev,
                                 const char *buf,
                                 size_t count)
 {
-       return arch_cpu_release(buf, count);
+       ssize_t cnt;
+       int ret;
+
+       ret = lock_device_hotplug_sysfs();
+       if (ret)
+               return ret;
+
+       cnt = arch_cpu_release(buf, count);
+
+       unlock_device_hotplug();
+       return cnt;
 }
 
 static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
index 9e59f6535c442bbb26d6ad055b93a4b0cf5bef5d..bece691cb5d99d79f761b9400eebf1e9a78c55a2 100644 (file)
@@ -333,8 +333,10 @@ store_mem_state(struct device *dev,
                online_type = ONLINE_KEEP;
        else if (!strncmp(buf, "offline", min_t(int, count, 7)))
                online_type = -1;
-       else
-               return -EINVAL;
+       else {
+               ret = -EINVAL;
+               goto err;
+       }
 
        switch (online_type) {
        case ONLINE_KERNEL:
@@ -357,6 +359,7 @@ store_mem_state(struct device *dev,
                ret = -EINVAL; /* should never happen */
        }
 
+err:
        unlock_device_hotplug();
 
        if (ret)
index 9f098a82cf04b061edb203fff718ef0ca70e6328..ee039afe90786510eb19e0a536beb30413b27a38 100644 (file)
@@ -30,6 +30,8 @@
 #include <linux/suspend.h>
 #include <trace/events/power.h>
 #include <linux/cpuidle.h>
+#include <linux/timer.h>
+
 #include "../base.h"
 #include "power.h"
 
@@ -390,6 +392,71 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
        return error;
 }
 
+#ifdef CONFIG_DPM_WATCHDOG
+struct dpm_watchdog {
+       struct device           *dev;
+       struct task_struct      *tsk;
+       struct timer_list       timer;
+};
+
+#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
+       struct dpm_watchdog wd
+
+/**
+ * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
+ * @data: Watchdog object address.
+ *
+ * Called when a driver has timed out suspending or resuming.
+ * There's not much we can do here to recover so panic() to
+ * capture a crash-dump in pstore.
+ */
+static void dpm_watchdog_handler(unsigned long data)
+{
+       struct dpm_watchdog *wd = (void *)data;
+
+       dev_emerg(wd->dev, "**** DPM device timeout ****\n");
+       show_stack(wd->tsk, NULL);
+       panic("%s %s: unrecoverable failure\n",
+               dev_driver_string(wd->dev), dev_name(wd->dev));
+}
+
+/**
+ * dpm_watchdog_set - Enable pm watchdog for given device.
+ * @wd: Watchdog. Must be allocated on the stack.
+ * @dev: Device to handle.
+ */
+static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
+{
+       struct timer_list *timer = &wd->timer;
+
+       wd->dev = dev;
+       wd->tsk = current;
+
+       init_timer_on_stack(timer);
+       /* use same timeout value for both suspend and resume */
+       timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
+       timer->function = dpm_watchdog_handler;
+       timer->data = (unsigned long)wd;
+       add_timer(timer);
+}
+
+/**
+ * dpm_watchdog_clear - Disable suspend/resume watchdog.
+ * @wd: Watchdog to disable.
+ */
+static void dpm_watchdog_clear(struct dpm_watchdog *wd)
+{
+       struct timer_list *timer = &wd->timer;
+
+       del_timer_sync(timer);
+       destroy_timer_on_stack(timer);
+}
+#else
+#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
+#define dpm_watchdog_set(x, y)
+#define dpm_watchdog_clear(x)
+#endif
+
 /*------------------------- Resume routines -------------------------*/
 
 /**
@@ -576,6 +643,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
        pm_callback_t callback = NULL;
        char *info = NULL;
        int error = 0;
+       DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 
        TRACE_DEVICE(dev);
        TRACE_RESUME(0);
@@ -584,6 +652,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
                goto Complete;
 
        dpm_wait(dev->parent, async);
+       dpm_watchdog_set(&wd, dev);
        device_lock(dev);
 
        /*
@@ -642,6 +711,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
 
  Unlock:
        device_unlock(dev);
+       dpm_watchdog_clear(&wd);
 
  Complete:
        complete_all(&dev->power.completion);
@@ -1060,6 +1130,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        pm_callback_t callback = NULL;
        char *info = NULL;
        int error = 0;
+       DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 
        dpm_wait_for_children(dev, async);
 
@@ -1083,6 +1154,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        if (dev->power.syscore)
                goto Complete;
 
+       dpm_watchdog_set(&wd, dev);
        device_lock(dev);
 
        if (dev->pm_domain) {
@@ -1139,6 +1211,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
        }
 
        device_unlock(dev);
+       dpm_watchdog_clear(&wd);
 
  Complete:
        complete_all(&dev->power.completion);
index ef89897c6043eec57ed2d8d1f4c6db464cfd92d1..fa41874184401cd46c155526102fb72aed99e329 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/list.h>
 #include <linux/rculist.h>
 #include <linux/rcupdate.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/of.h>
 #include <linux/export.h>
 
@@ -42,7 +42,7 @@
  */
 
 /**
- * struct opp - Generic OPP description structure
+ * struct dev_pm_opp - Generic OPP description structure
  * @node:      opp list node. The nodes are maintained throughout the lifetime
  *             of boot. It is expected only an optimal set of OPPs are
  *             added to the library by the SoC framework.
@@ -59,7 +59,7 @@
  *
  * This structure stores the OPP information for a given device.
  */
-struct opp {
+struct dev_pm_opp {
        struct list_head node;
 
        bool available;
@@ -136,7 +136,7 @@ static struct device_opp *find_device_opp(struct device *dev)
 }
 
 /**
- * opp_get_voltage() - Gets the voltage corresponding to an available opp
+ * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
  * @opp:       opp for which voltage has to be returned for
  *
  * Return voltage in micro volt corresponding to the opp, else
@@ -150,9 +150,9 @@ static struct device_opp *find_device_opp(struct device *dev)
  * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  * pointer.
  */
-unsigned long opp_get_voltage(struct opp *opp)
+unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
 {
-       struct opp *tmp_opp;
+       struct dev_pm_opp *tmp_opp;
        unsigned long v = 0;
 
        tmp_opp = rcu_dereference(opp);
@@ -163,10 +163,10 @@ unsigned long opp_get_voltage(struct opp *opp)
 
        return v;
 }
-EXPORT_SYMBOL_GPL(opp_get_voltage);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
 
 /**
- * opp_get_freq() - Gets the frequency corresponding to an available opp
+ * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
  * @opp:       opp for which frequency has to be returned for
  *
  * Return frequency in hertz corresponding to the opp, else
@@ -180,9 +180,9 @@ EXPORT_SYMBOL_GPL(opp_get_voltage);
  * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  * pointer.
  */
-unsigned long opp_get_freq(struct opp *opp)
+unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
 {
-       struct opp *tmp_opp;
+       struct dev_pm_opp *tmp_opp;
        unsigned long f = 0;
 
        tmp_opp = rcu_dereference(opp);
@@ -193,10 +193,10 @@ unsigned long opp_get_freq(struct opp *opp)
 
        return f;
 }
-EXPORT_SYMBOL_GPL(opp_get_freq);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
 
 /**
- * opp_get_opp_count() - Get number of opps available in the opp list
+ * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
  * @dev:       device for which we do this operation
  *
  * This function returns the number of available opps if there are any,
@@ -206,10 +206,10 @@ EXPORT_SYMBOL_GPL(opp_get_freq);
  * internally references two RCU protected structures: device_opp and opp which
  * are safe as long as we are under a common RCU locked section.
  */
-int opp_get_opp_count(struct device *dev)
+int dev_pm_opp_get_opp_count(struct device *dev)
 {
        struct device_opp *dev_opp;
-       struct opp *temp_opp;
+       struct dev_pm_opp *temp_opp;
        int count = 0;
 
        dev_opp = find_device_opp(dev);
@@ -226,10 +226,10 @@ int opp_get_opp_count(struct device *dev)
 
        return count;
 }
-EXPORT_SYMBOL_GPL(opp_get_opp_count);
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
 
 /**
- * opp_find_freq_exact() - search for an exact frequency
+ * dev_pm_opp_find_freq_exact() - search for an exact frequency
  * @dev:               device for which we do this operation
  * @freq:              frequency to search for
  * @available:         true/false - match for available opp
@@ -254,11 +254,12 @@ EXPORT_SYMBOL_GPL(opp_get_opp_count);
  * under the locked area. The pointer returned must be used prior to unlocking
  * with rcu_read_unlock() to maintain the integrity of the pointer.
  */
-struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
-                               bool available)
+struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+                                             unsigned long freq,
+                                             bool available)
 {
        struct device_opp *dev_opp;
-       struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+       struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
        dev_opp = find_device_opp(dev);
        if (IS_ERR(dev_opp)) {
@@ -277,10 +278,10 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
 
        return opp;
 }
-EXPORT_SYMBOL_GPL(opp_find_freq_exact);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
 
 /**
- * opp_find_freq_ceil() - Search for an rounded ceil freq
+ * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
  * @dev:       device for which we do this operation
  * @freq:      Start frequency
  *
@@ -300,10 +301,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_exact);
  * under the locked area. The pointer returned must be used prior to unlocking
  * with rcu_read_unlock() to maintain the integrity of the pointer.
  */
-struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+                                            unsigned long *freq)
 {
        struct device_opp *dev_opp;
-       struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+       struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
        if (!dev || !freq) {
                dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -324,10 +326,10 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
 
        return opp;
 }
-EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
 
 /**
- * opp_find_freq_floor() - Search for a rounded floor freq
+ * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
  * @dev:       device for which we do this operation
  * @freq:      Start frequency
  *
@@ -347,10 +349,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
  * under the locked area. The pointer returned must be used prior to unlocking
  * with rcu_read_unlock() to maintain the integrity of the pointer.
  */
-struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
+struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+                                             unsigned long *freq)
 {
        struct device_opp *dev_opp;
-       struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+       struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
        if (!dev || !freq) {
                dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -375,17 +378,17 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
 
        return opp;
 }
-EXPORT_SYMBOL_GPL(opp_find_freq_floor);
+EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
 
 /**
- * opp_add()  - Add an OPP table from a table definitions
+ * dev_pm_opp_add()  - Add an OPP table from a table definitions
  * @dev:       device for which we do this operation
  * @freq:      Frequency in Hz for this OPP
  * @u_volt:    Voltage in uVolts for this OPP
  *
  * This function adds an opp definition to the opp list and returns status.
  * The opp is made available by default and it can be controlled using
- * opp_enable/disable functions.
+ * dev_pm_opp_enable/disable functions.
  *
  * Locking: The internal device_opp and opp structures are RCU protected.
  * Hence this function internally uses RCU updater strategy with mutex locks
@@ -393,14 +396,14 @@ EXPORT_SYMBOL_GPL(opp_find_freq_floor);
  * that this function is *NOT* called under RCU protection or in contexts where
  * mutex cannot be locked.
  */
-int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
+int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
 {
        struct device_opp *dev_opp = NULL;
-       struct opp *opp, *new_opp;
+       struct dev_pm_opp *opp, *new_opp;
        struct list_head *head;
 
        /* allocate new OPP node */
-       new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL);
+       new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
        if (!new_opp) {
                dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
                return -ENOMEM;
@@ -460,7 +463,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
        srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
        return 0;
 }
-EXPORT_SYMBOL_GPL(opp_add);
+EXPORT_SYMBOL_GPL(dev_pm_opp_add);
 
 /**
  * opp_set_availability() - helper to set the availability of an opp
@@ -485,11 +488,11 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
                bool availability_req)
 {
        struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
-       struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
+       struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
        int r = 0;
 
        /* keep the node allocated */
-       new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL);
+       new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
        if (!new_opp) {
                dev_warn(dev, "%s: Unable to create OPP\n", __func__);
                return -ENOMEM;
@@ -552,13 +555,13 @@ unlock:
 }
 
 /**
- * opp_enable() - Enable a specific OPP
+ * dev_pm_opp_enable() - Enable a specific OPP
  * @dev:       device for which we do this operation
  * @freq:      OPP frequency to enable
  *
  * Enables a provided opp. If the operation is valid, this returns 0, else the
  * corresponding error value. It is meant to be used for users an OPP available
- * after being temporarily made unavailable with opp_disable.
+ * after being temporarily made unavailable with dev_pm_opp_disable.
  *
  * Locking: The internal device_opp and opp structures are RCU protected.
  * Hence this function indirectly uses RCU and mutex locks to keep the
@@ -566,21 +569,21 @@ unlock:
  * this function is *NOT* called under RCU protection or in contexts where
  * mutex locking or synchronize_rcu() blocking calls cannot be used.
  */
-int opp_enable(struct device *dev, unsigned long freq)
+int dev_pm_opp_enable(struct device *dev, unsigned long freq)
 {
        return opp_set_availability(dev, freq, true);
 }
-EXPORT_SYMBOL_GPL(opp_enable);
+EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
 
 /**
- * opp_disable() - Disable a specific OPP
+ * dev_pm_opp_disable() - Disable a specific OPP
  * @dev:       device for which we do this operation
  * @freq:      OPP frequency to disable
  *
  * Disables a provided opp. If the operation is valid, this returns
  * 0, else the corresponding error value. It is meant to be a temporary
  * control by users to make this OPP not available until the circumstances are
- * right to make it available again (with a call to opp_enable).
+ * right to make it available again (with a call to dev_pm_opp_enable).
  *
  * Locking: The internal device_opp and opp structures are RCU protected.
  * Hence this function indirectly uses RCU and mutex locks to keep the
@@ -588,15 +591,15 @@ EXPORT_SYMBOL_GPL(opp_enable);
  * this function is *NOT* called under RCU protection or in contexts where
  * mutex locking or synchronize_rcu() blocking calls cannot be used.
  */
-int opp_disable(struct device *dev, unsigned long freq)
+int dev_pm_opp_disable(struct device *dev, unsigned long freq)
 {
        return opp_set_availability(dev, freq, false);
 }
-EXPORT_SYMBOL_GPL(opp_disable);
+EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
 
 #ifdef CONFIG_CPU_FREQ
 /**
- * opp_init_cpufreq_table() - create a cpufreq table for a device
+ * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
  * @dev:       device for which we do this operation
  * @table:     Cpufreq table returned back to caller
  *
@@ -619,11 +622,11 @@ EXPORT_SYMBOL_GPL(opp_disable);
  * Callers should ensure that this function is *NOT* called under RCU protection
  * or in contexts where mutex locking cannot be used.
  */
-int opp_init_cpufreq_table(struct device *dev,
+int dev_pm_opp_init_cpufreq_table(struct device *dev,
                            struct cpufreq_frequency_table **table)
 {
        struct device_opp *dev_opp;
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        struct cpufreq_frequency_table *freq_table;
        int i = 0;
 
@@ -639,7 +642,7 @@ int opp_init_cpufreq_table(struct device *dev,
        }
 
        freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
-                            (opp_get_opp_count(dev) + 1), GFP_KERNEL);
+                            (dev_pm_opp_get_opp_count(dev) + 1), GFP_KERNEL);
        if (!freq_table) {
                mutex_unlock(&dev_opp_list_lock);
                dev_warn(dev, "%s: Unable to allocate frequency table\n",
@@ -663,16 +666,16 @@ int opp_init_cpufreq_table(struct device *dev,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(opp_init_cpufreq_table);
+EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
 
 /**
- * opp_free_cpufreq_table() - free the cpufreq table
+ * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
  * @dev:       device for which we do this operation
  * @table:     table to free
  *
- * Free up the table allocated by opp_init_cpufreq_table
+ * Free up the table allocated by dev_pm_opp_init_cpufreq_table
  */
-void opp_free_cpufreq_table(struct device *dev,
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
                                struct cpufreq_frequency_table **table)
 {
        if (!table)
@@ -681,14 +684,14 @@ void opp_free_cpufreq_table(struct device *dev,
        kfree(*table);
        *table = NULL;
 }
-EXPORT_SYMBOL_GPL(opp_free_cpufreq_table);
+EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
 #endif         /* CONFIG_CPU_FREQ */
 
 /**
- * opp_get_notifier() - find notifier_head of the device with opp
+ * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
  * @dev:       device pointer used to lookup device OPPs.
  */
-struct srcu_notifier_head *opp_get_notifier(struct device *dev)
+struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
 {
        struct device_opp *dev_opp = find_device_opp(dev);
 
@@ -732,7 +735,7 @@ int of_init_opp_table(struct device *dev)
                unsigned long freq = be32_to_cpup(val++) * 1000;
                unsigned long volt = be32_to_cpup(val++);
 
-               if (opp_add(dev, freq, volt)) {
+               if (dev_pm_opp_add(dev, freq, volt)) {
                        dev_warn(dev, "%s: Failed to add OPP %ld\n",
                                 __func__, freq);
                        continue;
index 268a35097578d94f78f4baef4d9cc8b3fa1c31cd..72e00e66ecc5d3f84c6d2cce5f6d387c3d4ee260 100644 (file)
@@ -258,7 +258,8 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
  * Check if the device's runtime PM status allows it to be suspended.  If
  * another idle notification has been started earlier, return immediately.  If
  * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
- * run the ->runtime_idle() callback directly.
+ * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
+ * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
  *
  * This function must be called under dev->power.lock with interrupts disabled.
  */
@@ -331,7 +332,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
 
  out:
        trace_rpm_return_int(dev, _THIS_IP_, retval);
-       return retval ? retval : rpm_suspend(dev, rpmflags);
+       return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
 }
 
 /**
index 57f777835d97d852ef9067970c8bec276ea6e8ae..33414b1de2013b8e3fc68f31a70f8a56f2900cbe 100644 (file)
@@ -44,7 +44,6 @@ struct regmap_format {
 
 struct regmap_async {
        struct list_head list;
-       struct work_struct cleanup;
        struct regmap *map;
        void *work_buf;
 };
@@ -64,9 +63,11 @@ struct regmap {
        void *bus_context;
        const char *name;
 
+       bool async;
        spinlock_t async_lock;
        wait_queue_head_t async_waitq;
        struct list_head async_list;
+       struct list_head async_free;
        int async_ret;
 
 #ifdef CONFIG_DEBUG_FS
@@ -179,6 +180,9 @@ struct regmap_field {
        /* lsb */
        unsigned int shift;
        unsigned int reg;
+
+       unsigned int id_size;
+       unsigned int id_offset;
 };
 
 #ifdef CONFIG_DEBUG_FS
@@ -218,7 +222,7 @@ bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
 int regcache_lookup_reg(struct regmap *map, unsigned int reg);
 
 int _regmap_raw_write(struct regmap *map, unsigned int reg,
-                     const void *val, size_t val_len, bool async);
+                     const void *val, size_t val_len);
 
 void regmap_async_complete_cb(struct regmap_async *async, int ret);
 
index d6c2d691b6e862e9ffc29b468dbdc617bc3d4fc0..a36112af494ce34464d989e8037fa96e1a2a2c98 100644 (file)
@@ -631,8 +631,7 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
 
        map->cache_bypass = 1;
 
-       ret = _regmap_raw_write(map, base, *data, count * val_bytes,
-                               false);
+       ret = _regmap_raw_write(map, base, *data, count * val_bytes);
 
        map->cache_bypass = 0;
 
index 7d689a15c500bb34ddc3331f33b57512890341aa..ccdac61ac5e2b2b6de72819d8911fd01ad1ecf60 100644 (file)
@@ -42,15 +42,6 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
 static int _regmap_bus_raw_write(void *context, unsigned int reg,
                                 unsigned int val);
 
-static void async_cleanup(struct work_struct *work)
-{
-       struct regmap_async *async = container_of(work, struct regmap_async,
-                                                 cleanup);
-
-       kfree(async->work_buf);
-       kfree(async);
-}
-
 bool regmap_reg_in_ranges(unsigned int reg,
                          const struct regmap_range *ranges,
                          unsigned int nranges)
@@ -465,6 +456,7 @@ struct regmap *regmap_init(struct device *dev,
 
        spin_lock_init(&map->async_lock);
        INIT_LIST_HEAD(&map->async_list);
+       INIT_LIST_HEAD(&map->async_free);
        init_waitqueue_head(&map->async_waitq);
 
        if (config->read_flag_mask || config->write_flag_mask) {
@@ -821,6 +813,8 @@ static void regmap_field_init(struct regmap_field *rm_field,
        rm_field->reg = reg_field.reg;
        rm_field->shift = reg_field.lsb;
        rm_field->mask = ((BIT(field_bits) - 1) << reg_field.lsb);
+       rm_field->id_size = reg_field.id_size;
+       rm_field->id_offset = reg_field.id_offset;
 }
 
 /**
@@ -942,12 +936,22 @@ EXPORT_SYMBOL_GPL(regmap_reinit_cache);
  */
 void regmap_exit(struct regmap *map)
 {
+       struct regmap_async *async;
+
        regcache_exit(map);
        regmap_debugfs_exit(map);
        regmap_range_exit(map);
        if (map->bus && map->bus->free_context)
                map->bus->free_context(map->bus_context);
        kfree(map->work_buf);
+       while (!list_empty(&map->async_free)) {
+               async = list_first_entry_or_null(&map->async_free,
+                                                struct regmap_async,
+                                                list);
+               list_del(&async->list);
+               kfree(async->work_buf);
+               kfree(async);
+       }
        kfree(map);
 }
 EXPORT_SYMBOL_GPL(regmap_exit);
@@ -1039,7 +1043,7 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
 }
 
 int _regmap_raw_write(struct regmap *map, unsigned int reg,
-                     const void *val, size_t val_len, bool async)
+                     const void *val, size_t val_len)
 {
        struct regmap_range_node *range;
        unsigned long flags;
@@ -1091,7 +1095,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
                        dev_dbg(map->dev, "Writing window %d/%zu\n",
                                win_residue, val_len / map->format.val_bytes);
                        ret = _regmap_raw_write(map, reg, val, win_residue *
-                                               map->format.val_bytes, async);
+                                               map->format.val_bytes);
                        if (ret != 0)
                                return ret;
 
@@ -1114,21 +1118,42 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
 
        u8[0] |= map->write_flag_mask;
 
-       if (async && map->bus->async_write) {
-               struct regmap_async *async = map->bus->async_alloc();
-               if (!async)
-                       return -ENOMEM;
+       /*
+        * Essentially all I/O mechanisms will be faster with a single
+        * buffer to write.  Since register syncs often generate raw
+        * writes of single registers optimise that case.
+        */
+       if (val != work_val && val_len == map->format.val_bytes) {
+               memcpy(work_val, val, map->format.val_bytes);
+               val = work_val;
+       }
+
+       if (map->async && map->bus->async_write) {
+               struct regmap_async *async;
 
                trace_regmap_async_write_start(map->dev, reg, val_len);
 
-               async->work_buf = kzalloc(map->format.buf_size,
-                                         GFP_KERNEL | GFP_DMA);
-               if (!async->work_buf) {
-                       kfree(async);
-                       return -ENOMEM;
+               spin_lock_irqsave(&map->async_lock, flags);
+               async = list_first_entry_or_null(&map->async_free,
+                                                struct regmap_async,
+                                                list);
+               if (async)
+                       list_del(&async->list);
+               spin_unlock_irqrestore(&map->async_lock, flags);
+
+               if (!async) {
+                       async = map->bus->async_alloc();
+                       if (!async)
+                               return -ENOMEM;
+
+                       async->work_buf = kzalloc(map->format.buf_size,
+                                                 GFP_KERNEL | GFP_DMA);
+                       if (!async->work_buf) {
+                               kfree(async);
+                               return -ENOMEM;
+                       }
                }
 
-               INIT_WORK(&async->cleanup, async_cleanup);
                async->map = map;
 
                /* If the caller supplied the value we can use it safely. */
@@ -1152,11 +1177,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
                                ret);
 
                        spin_lock_irqsave(&map->async_lock, flags);
-                       list_del(&async->list);
+                       list_move(&async->list, &map->async_free);
                        spin_unlock_irqrestore(&map->async_lock, flags);
-
-                       kfree(async->work_buf);
-                       kfree(async);
                }
 
                return ret;
@@ -1253,7 +1275,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
                                 map->work_buf +
                                 map->format.reg_bytes +
                                 map->format.pad_bytes,
-                                map->format.val_bytes, false);
+                                map->format.val_bytes);
 }
 
 static inline void *_regmap_map_get_context(struct regmap *map)
@@ -1317,6 +1339,37 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
 }
 EXPORT_SYMBOL_GPL(regmap_write);
 
+/**
+ * regmap_write_async(): Write a value to a single register asynchronously
+ *
+ * @map: Register map to write to
+ * @reg: Register to write to
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
+{
+       int ret;
+
+       if (reg % map->reg_stride)
+               return -EINVAL;
+
+       map->lock(map->lock_arg);
+
+       map->async = true;
+
+       ret = _regmap_write(map, reg, val);
+
+       map->async = false;
+
+       map->unlock(map->lock_arg);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_write_async);
+
 /**
  * regmap_raw_write(): Write raw values to one or more registers
  *
@@ -1345,7 +1398,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
 
        map->lock(map->lock_arg);
 
-       ret = _regmap_raw_write(map, reg, val, val_len, false);
+       ret = _regmap_raw_write(map, reg, val, val_len);
 
        map->unlock(map->lock_arg);
 
@@ -1369,6 +1422,74 @@ int regmap_field_write(struct regmap_field *field, unsigned int val)
 }
 EXPORT_SYMBOL_GPL(regmap_field_write);
 
+/**
+ * regmap_field_update_bits(): Perform a read/modify/write cycle
+ *                              on the register field
+ *
+ * @field: Register field to write to
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_field_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val)
+{
+       mask = (mask << field->shift) & field->mask;
+
+       return regmap_update_bits(field->regmap, field->reg,
+                                 mask, val << field->shift);
+}
+EXPORT_SYMBOL_GPL(regmap_field_update_bits);
+
+/**
+ * regmap_fields_write(): Write a value to a single register field with port ID
+ *
+ * @field: Register field to write to
+ * @id: port ID
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_fields_write(struct regmap_field *field, unsigned int id,
+                       unsigned int val)
+{
+       if (id >= field->id_size)
+               return -EINVAL;
+
+       return regmap_update_bits(field->regmap,
+                                 field->reg + (field->id_offset * id),
+                                 field->mask, val << field->shift);
+}
+EXPORT_SYMBOL_GPL(regmap_fields_write);
+
+/**
+ * regmap_fields_update_bits():        Perform a read/modify/write cycle
+ *                              on the register field
+ *
+ * @field: Register field to write to
+ * @id: port ID
+ * @mask: Bitmask to change
+ * @val: Value to be written
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_fields_update_bits(struct regmap_field *field,  unsigned int id,
+                             unsigned int mask, unsigned int val)
+{
+       if (id >= field->id_size)
+               return -EINVAL;
+
+       mask = (mask << field->shift) & field->mask;
+
+       return regmap_update_bits(field->regmap,
+                                 field->reg + (field->id_offset * id),
+                                 mask, val << field->shift);
+}
+EXPORT_SYMBOL_GPL(regmap_fields_update_bits);
+
 /*
  * regmap_bulk_write(): Write multiple registers to the device
  *
@@ -1426,8 +1547,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
                                return ret;
                }
        } else {
-               ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count,
-                                       false);
+               ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
        }
 
        if (val_bytes != 1)
@@ -1473,7 +1593,11 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
 
        map->lock(map->lock_arg);
 
-       ret = _regmap_raw_write(map, reg, val, val_len, true);
+       map->async = true;
+
+       ret = _regmap_raw_write(map, reg, val, val_len);
+
+       map->async = false;
 
        map->unlock(map->lock_arg);
 
@@ -1676,6 +1800,39 @@ int regmap_field_read(struct regmap_field *field, unsigned int *val)
 }
 EXPORT_SYMBOL_GPL(regmap_field_read);
 
+/**
+ * regmap_fields_read(): Read a value to a single register field with port ID
+ *
+ * @field: Register field to read from
+ * @id: port ID
+ * @val: Pointer to store read value
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_fields_read(struct regmap_field *field, unsigned int id,
+                      unsigned int *val)
+{
+       int ret;
+       unsigned int reg_val;
+
+       if (id >= field->id_size)
+               return -EINVAL;
+
+       ret = regmap_read(field->regmap,
+                         field->reg + (field->id_offset * id),
+                         &reg_val);
+       if (ret != 0)
+               return ret;
+
+       reg_val &= field->mask;
+       reg_val >>= field->shift;
+       *val = reg_val;
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_fields_read);
+
 /**
  * regmap_bulk_read(): Read multiple registers from the device
  *
@@ -1787,6 +1944,41 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,
 }
 EXPORT_SYMBOL_GPL(regmap_update_bits);
 
+/**
+ * regmap_update_bits_async: Perform a read/modify/write cycle on the register
+ *                           map asynchronously
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ *
+ * With most buses the read must be done synchronously so this is most
+ * useful for devices with a cache which do not need to interact with
+ * the hardware to determine the current register value.
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_update_bits_async(struct regmap *map, unsigned int reg,
+                            unsigned int mask, unsigned int val)
+{
+       bool change;
+       int ret;
+
+       map->lock(map->lock_arg);
+
+       map->async = true;
+
+       ret = _regmap_update_bits(map, reg, mask, val, &change);
+
+       map->async = false;
+
+       map->unlock(map->lock_arg);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_update_bits_async);
+
 /**
  * regmap_update_bits_check: Perform a read/modify/write cycle on the
  *                           register map and report if updated
@@ -1812,6 +2004,43 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
 }
 EXPORT_SYMBOL_GPL(regmap_update_bits_check);
 
+/**
+ * regmap_update_bits_check_async: Perform a read/modify/write cycle on the
+ *                                 register map asynchronously and report if
+ *                                 updated
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ * @change: Boolean indicating if a write was done
+ *
+ * With most buses the read must be done synchronously so this is most
+ * useful for devices with a cache which do not need to interact with
+ * the hardware to determine the current register value.
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
+                                  unsigned int mask, unsigned int val,
+                                  bool *change)
+{
+       int ret;
+
+       map->lock(map->lock_arg);
+
+       map->async = true;
+
+       ret = _regmap_update_bits(map, reg, mask, val, change);
+
+       map->async = false;
+
+       map->unlock(map->lock_arg);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_update_bits_check_async);
+
 void regmap_async_complete_cb(struct regmap_async *async, int ret)
 {
        struct regmap *map = async->map;
@@ -1820,8 +2049,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
        trace_regmap_async_io_complete(map->dev);
 
        spin_lock(&map->async_lock);
-
-       list_del(&async->list);
+       list_move(&async->list, &map->async_free);
        wake = list_empty(&map->async_list);
 
        if (ret != 0)
@@ -1829,8 +2057,6 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
 
        spin_unlock(&map->async_lock);
 
-       schedule_work(&async->cleanup);
-
        if (wake)
                wake_up(&map->async_waitq);
 }
index a355e63a3838cb7ff7e7ef66194888adc65d6232..6fb98b53533f9d708dfc2a530e1033f3f09008b9 100644 (file)
@@ -188,8 +188,11 @@ static int bcma_host_pci_probe(struct pci_dev *dev,
                pci_write_config_dword(dev, 0x40, val & 0xffff00ff);
 
        /* SSB needed additional powering up, do we have any AMBA PCI cards? */
-       if (!pci_is_pcie(dev))
-               bcma_err(bus, "PCI card detected, report problems.\n");
+       if (!pci_is_pcie(dev)) {
+               bcma_err(bus, "PCI card detected, they are not supported.\n");
+               err = -ENXIO;
+               goto err_pci_release_regions;
+       }
 
        /* Map MMIO */
        err = -ENOMEM;
@@ -269,6 +272,7 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
 
 static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
+       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
index 40e715531aa65f0e63babd13757ad6dfb104d22a..e5647690a751ef1f1ea6bcd09ee24dc758a1e292 100644 (file)
@@ -75,6 +75,7 @@
 #include <linux/sysfs.h>
 #include <linux/miscdevice.h>
 #include <linux/falloc.h>
+#include <linux/aio.h>
 #include "loop.h"
 
 #include <asm/uaccess.h>
@@ -218,6 +219,48 @@ lo_do_transfer(struct loop_device *lo, int cmd,
        return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
 }
 
+#ifdef CONFIG_AIO
+static void lo_rw_aio_complete(u64 data, long res)
+{
+       struct bio *bio = (struct bio *)(uintptr_t)data;
+
+       if (res > 0)
+               res = 0;
+       else if (res < 0)
+               res = -EIO;
+
+       bio_endio(bio, res);
+}
+
+static int lo_rw_aio(struct loop_device *lo, struct bio *bio)
+{
+       struct file *file = lo->lo_backing_file;
+       struct kiocb *iocb;
+       unsigned int op;
+       struct iov_iter iter;
+       struct bio_vec *bvec;
+       size_t nr_segs;
+       loff_t pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
+
+       iocb = aio_kernel_alloc(GFP_NOIO);
+       if (!iocb)
+               return -ENOMEM;
+
+       if (bio_rw(bio) & WRITE)
+               op = IOCB_CMD_WRITE_ITER;
+       else
+               op = IOCB_CMD_READ_ITER;
+
+       bvec = bio_iovec_idx(bio, bio->bi_idx);
+       nr_segs = bio_segments(bio);
+       iov_iter_init_bvec(&iter, bvec, nr_segs, bvec_length(bvec, nr_segs), 0);
+       aio_kernel_init_rw(iocb, file, iov_iter_count(&iter), pos);
+       aio_kernel_init_callback(iocb, lo_rw_aio_complete, (u64)(uintptr_t)bio);
+
+       return aio_kernel_submit(iocb, op, &iter);
+}
+#endif /* CONFIG_AIO */
+
 /**
  * __do_lo_send_write - helper for writing data to a loop device
  *
@@ -418,50 +461,33 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
        pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
 
        if (bio_rw(bio) == WRITE) {
-               struct file *file = lo->lo_backing_file;
-
-               if (bio->bi_rw & REQ_FLUSH) {
-                       ret = vfs_fsync(file, 0);
-                       if (unlikely(ret && ret != -EINVAL)) {
-                               ret = -EIO;
-                               goto out;
-                       }
-               }
+               ret = lo_send(lo, bio, pos);
+       } else
+               ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
 
-               /*
-                * We use punch hole to reclaim the free space used by the
-                * image a.k.a. discard. However we do not support discard if
-                * encryption is enabled, because it may give an attacker
-                * useful information.
-                */
-               if (bio->bi_rw & REQ_DISCARD) {
-                       struct file *file = lo->lo_backing_file;
-                       int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
+       return ret;
+}
 
-                       if ((!file->f_op->fallocate) ||
-                           lo->lo_encrypt_key_size) {
-                               ret = -EOPNOTSUPP;
-                               goto out;
-                       }
-                       ret = file->f_op->fallocate(file, mode, pos,
-                                                   bio->bi_size);
-                       if (unlikely(ret && ret != -EINVAL &&
-                                    ret != -EOPNOTSUPP))
-                               ret = -EIO;
-                       goto out;
-               }
+static int lo_discard(struct loop_device *lo, struct bio *bio)
+{
+       struct file *file = lo->lo_backing_file;
+       int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
+       loff_t pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
+       int ret;
 
-               ret = lo_send(lo, bio, pos);
+       /*
+        * We use punch hole to reclaim the free space used by the
+        * image a.k.a. discard. However we do not support discard if
+        * encryption is enabled, because it may give an attacker
+        * useful information.
+        */
 
-               if ((bio->bi_rw & REQ_FUA) && !ret) {
-                       ret = vfs_fsync(file, 0);
-                       if (unlikely(ret && ret != -EINVAL))
-                               ret = -EIO;
-               }
-       } else
-               ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
+       if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size)
+               return -EOPNOTSUPP;
 
-out:
+       ret = file->f_op->fallocate(file, mode, pos, bio->bi_size);
+       if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
+               ret = -EIO;
        return ret;
 }
 
@@ -525,7 +551,35 @@ static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
                do_loop_switch(lo, bio->bi_private);
                bio_put(bio);
        } else {
-               int ret = do_bio_filebacked(lo, bio);
+               int ret;
+
+               if (bio_rw(bio) == WRITE) {
+                       if (bio->bi_rw & REQ_FLUSH) {
+                               ret = vfs_fsync(lo->lo_backing_file, 1);
+                               if (unlikely(ret && ret != -EINVAL))
+                                       goto out;
+                       }
+                       if (bio->bi_rw & REQ_DISCARD) {
+                               ret = lo_discard(lo, bio);
+                               goto out;
+                       }
+               }
+#ifdef CONFIG_AIO
+               if (lo->lo_flags & LO_FLAGS_USE_AIO &&
+                   lo->transfer == transfer_none) {
+                       ret = lo_rw_aio(lo, bio);
+                       if (ret == 0)
+                               return;
+               } else
+#endif
+                       ret = do_bio_filebacked(lo, bio);
+
+               if ((bio_rw(bio) == WRITE) && bio->bi_rw & REQ_FUA && !ret) {
+                       ret = vfs_fsync(lo->lo_backing_file, 0);
+                       if (unlikely(ret && ret != -EINVAL))
+                               ret = -EIO;
+               }
+out:
                bio_endio(bio, ret);
        }
 }
@@ -547,6 +601,12 @@ static int loop_thread(void *data)
        struct loop_device *lo = data;
        struct bio *bio;
 
+       /*
+        * In cases where the underlying filesystem calls balance_dirty_pages()
+        * we want less throttling to avoid lock ups trying to write dirty
+        * pages through the loop device
+        */
+       current->flags |= PF_LESS_THROTTLE;
        set_user_nice(current, -20);
 
        while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) {
@@ -869,6 +929,14 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
            !file->f_op->write)
                lo_flags |= LO_FLAGS_READ_ONLY;
 
+#ifdef CONFIG_AIO
+       if (file->f_op->write_iter && file->f_op->read_iter &&
+           mapping->a_ops->direct_IO) {
+               file->f_flags |= O_DIRECT;
+               lo_flags |= LO_FLAGS_USE_AIO;
+       }
+#endif
+
        lo_blocksize = S_ISBLK(inode->i_mode) ?
                inode->i_bdev->bd_block_size : PAGE_SIZE;
 
@@ -912,6 +980,16 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
 
        set_blocksize(bdev, lo_blocksize);
 
+#ifdef CONFIG_AIO
+       /*
+        * We must not send too-small direct-io requests, so we inherit
+        * the logical block size from the underlying device
+        */
+       if ((lo_flags & LO_FLAGS_USE_AIO) && inode->i_sb->s_bdev)
+               blk_queue_logical_block_size(lo->lo_queue,
+                               bdev_logical_block_size(inode->i_sb->s_bdev));
+#endif
+
        lo->lo_thread = kthread_create(loop_thread, lo, "loop%d",
                                                lo->lo_number);
        if (IS_ERR(lo->lo_thread)) {
index da52092980e2312987b6a1040df5c0ba444852c3..26d03fa0bf26696d9e004b3983a580d409d3d006 100644 (file)
@@ -1949,12 +1949,9 @@ static int nvme_dev_map(struct nvme_dev *dev)
        if (pci_request_selected_regions(pdev, bars, "nvme"))
                goto disable_pci;
 
-       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
-               dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-       else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
-               dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
-       else
-               goto disable_pci;
+       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
+           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
+               goto disable;
 
        pci_set_drvdata(pdev, dev);
        dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
@@ -2168,6 +2165,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        INIT_LIST_HEAD(&dev->namespaces);
        dev->pci_dev = pdev;
+
        result = nvme_set_instance(dev);
        if (result)
                goto free;
index 5cdf88b7ad9e72a36bc9aa25755b03485c987418..6b66252fc4e680a03213a2c927e04e4df7faa885 100644 (file)
@@ -456,18 +456,15 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
 {
        struct virtio_blk *vblk = bd->bd_disk->private_data;
-       struct virtio_blk_geometry vgeo;
-       int err;
 
        /* see if the host passed in geometry config */
-       err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY,
-                               offsetof(struct virtio_blk_config, geometry),
-                               &vgeo);
-
-       if (!err) {
-               geo->heads = vgeo.heads;
-               geo->sectors = vgeo.sectors;
-               geo->cylinders = vgeo.cylinders;
+       if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
+               virtio_cread(vblk->vdev, struct virtio_blk_config,
+                            geometry.cylinders, &geo->cylinders);
+               virtio_cread(vblk->vdev, struct virtio_blk_config,
+                            geometry.heads, &geo->heads);
+               virtio_cread(vblk->vdev, struct virtio_blk_config,
+                            geometry.sectors, &geo->sectors);
        } else {
                /* some standard values, similar to sd */
                geo->heads = 1 << 6;
@@ -529,8 +526,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
                goto done;
 
        /* Host must always specify the capacity. */
-       vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
-                         &capacity, sizeof(capacity));
+       virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
 
        /* If capacity is too big, truncate with warning. */
        if ((sector_t)capacity != capacity) {
@@ -608,9 +604,9 @@ static int virtblk_get_cache_mode(struct virtio_device *vdev)
        u8 writeback;
        int err;
 
-       err = virtio_config_val(vdev, VIRTIO_BLK_F_CONFIG_WCE,
-                               offsetof(struct virtio_blk_config, wce),
-                               &writeback);
+       err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
+                                  struct virtio_blk_config, wce,
+                                  &writeback);
        if (err)
                writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE);
 
@@ -642,7 +638,6 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
        struct virtio_blk *vblk = disk->private_data;
        struct virtio_device *vdev = vblk->vdev;
        int i;
-       u8 writeback;
 
        BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
        for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
@@ -652,11 +647,7 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
        if (i < 0)
                return -EINVAL;
 
-       writeback = i;
-       vdev->config->set(vdev,
-                         offsetof(struct virtio_blk_config, wce),
-                         &writeback, sizeof(writeback));
-
+       virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
        virtblk_update_cache_mode(vdev);
        return count;
 }
@@ -699,9 +690,9 @@ static int virtblk_probe(struct virtio_device *vdev)
        index = err;
 
        /* We need to know how many segments before we allocate. */
-       err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
-                               offsetof(struct virtio_blk_config, seg_max),
-                               &sg_elems);
+       err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
+                                  struct virtio_blk_config, seg_max,
+                                  &sg_elems);
 
        /* We need at least one SG element, whatever they say. */
        if (err || !sg_elems)
@@ -772,8 +763,7 @@ static int virtblk_probe(struct virtio_device *vdev)
                set_disk_ro(vblk->disk, 1);
 
        /* Host must always specify the capacity. */
-       vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
-                         &cap, sizeof(cap));
+       virtio_cread(vdev, struct virtio_blk_config, capacity, &cap);
 
        /* If capacity is too big, truncate with warning. */
        if ((sector_t)cap != cap) {
@@ -794,46 +784,45 @@ static int virtblk_probe(struct virtio_device *vdev)
 
        /* Host can optionally specify maximum segment size and number of
         * segments. */
-       err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX,
-                               offsetof(struct virtio_blk_config, size_max),
-                               &v);
+       err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
+                                  struct virtio_blk_config, size_max, &v);
        if (!err)
                blk_queue_max_segment_size(q, v);
        else
                blk_queue_max_segment_size(q, -1U);
 
        /* Host can optionally specify the block size of the device */
-       err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
-                               offsetof(struct virtio_blk_config, blk_size),
-                               &blk_size);
+       err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
+                                  struct virtio_blk_config, blk_size,
+                                  &blk_size);
        if (!err)
                blk_queue_logical_block_size(q, blk_size);
        else
                blk_size = queue_logical_block_size(q);
 
        /* Use topology information if available */
-       err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
-                       offsetof(struct virtio_blk_config, physical_block_exp),
-                       &physical_block_exp);
+       err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
+                                  struct virtio_blk_config, physical_block_exp,
+                                  &physical_block_exp);
        if (!err && physical_block_exp)
                blk_queue_physical_block_size(q,
                                blk_size * (1 << physical_block_exp));
 
-       err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
-                       offsetof(struct virtio_blk_config, alignment_offset),
-                       &alignment_offset);
+       err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
+                                  struct virtio_blk_config, alignment_offset,
+                                  &alignment_offset);
        if (!err && alignment_offset)
                blk_queue_alignment_offset(q, blk_size * alignment_offset);
 
-       err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
-                       offsetof(struct virtio_blk_config, min_io_size),
-                       &min_io_size);
+       err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
+                                  struct virtio_blk_config, min_io_size,
+                                  &min_io_size);
        if (!err && min_io_size)
                blk_queue_io_min(q, blk_size * min_io_size);
 
-       err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
-                       offsetof(struct virtio_blk_config, opt_io_size),
-                       &opt_io_size);
+       err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
+                                  struct virtio_blk_config, opt_io_size,
+                                  &opt_io_size);
        if (!err && opt_io_size)
                blk_queue_io_opt(q, blk_size * opt_io_size);
 
@@ -899,7 +888,7 @@ static void virtblk_remove(struct virtio_device *vdev)
                ida_simple_remove(&vd_index_ida, index);
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int virtblk_freeze(struct virtio_device *vdev)
 {
        struct virtio_blk *vblk = vdev->priv;
@@ -959,7 +948,7 @@ static struct virtio_driver virtio_blk = {
        .probe                  = virtblk_probe,
        .remove                 = virtblk_remove,
        .config_changed         = virtblk_config_changed,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
        .freeze                 = virtblk_freeze,
        .restore                = virtblk_restore,
 #endif
index 4afae20df512cb6d88ce61b6d11c489d25eb88b2..9fe8a875a8277b51c069ce682d6ece31c922d426 100644 (file)
@@ -30,3 +30,5 @@ hci_uart-$(CONFIG_BT_HCIUART_LL)      += hci_ll.o
 hci_uart-$(CONFIG_BT_HCIUART_ATH3K)    += hci_ath.o
 hci_uart-$(CONFIG_BT_HCIUART_3WIRE)    += hci_h5.o
 hci_uart-objs                          := $(hci_uart-y)
+
+ccflags-y += -D__CHECK_ENDIAN__
index 0a327f4154a2b2039ad4ae0e50b7195dfa2cb5f0..6bfc1bb318f6399397ca8f169cc07fd98b46256d 100644 (file)
@@ -57,7 +57,7 @@ struct ath3k_version {
        unsigned char   reserved[0x07];
 };
 
-static struct usb_device_id ath3k_table[] = {
+static const struct usb_device_id ath3k_table[] = {
        /* Atheros AR3011 */
        { USB_DEVICE(0x0CF3, 0x3000) },
 
@@ -112,7 +112,7 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
 #define BTUSB_ATH3012          0x80
 /* This table is to load patch and sysconfig files
  * for AR3012 */
-static struct usb_device_id ath3k_blist_tbl[] = {
+static const struct usb_device_id ath3k_blist_tbl[] = {
 
        /* Atheros AR3012 with sflash firmware*/
        { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
index 995aee9cba22a8d871289004a4cef5ec60cdeaa2..31386998c9a7b4159f04194f2125ee10d73cc920 100644 (file)
@@ -42,7 +42,7 @@
 
 static struct usb_driver bfusb_driver;
 
-static struct usb_device_id bfusb_table[] = {
+static const struct usb_device_id bfusb_table[] = {
        /* AVM BlueFRITZ! USB */
        { USB_DEVICE(0x057c, 0x2200) },
 
@@ -318,7 +318,6 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
                        return -ENOMEM;
                }
 
-               skb->dev = (void *) data->hdev;
                bt_cb(skb)->pkt_type = pkt_type;
 
                data->reassembly = skb;
@@ -333,7 +332,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
                memcpy(skb_put(data->reassembly, len), buf, len);
 
        if (hdr & 0x08) {
-               hci_recv_frame(data->reassembly);
+               hci_recv_frame(data->hdev, data->reassembly);
                data->reassembly = NULL;
        }
 
@@ -465,26 +464,18 @@ static int bfusb_close(struct hci_dev *hdev)
        return 0;
 }
 
-static int bfusb_send_frame(struct sk_buff *skb)
+static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-       struct hci_dev *hdev = (struct hci_dev *) skb->dev;
-       struct bfusb_data *data;
+       struct bfusb_data *data = hci_get_drvdata(hdev);
        struct sk_buff *nskb;
        unsigned char buf[3];
        int sent = 0, size, count;
 
        BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len);
 
-       if (!hdev) {
-               BT_ERR("Frame for unknown HCI device (hdev=NULL)");
-               return -ENODEV;
-       }
-
        if (!test_bit(HCI_RUNNING, &hdev->flags))
                return -EBUSY;
 
-       data = hci_get_drvdata(hdev);
-
        switch (bt_cb(skb)->pkt_type) {
        case HCI_COMMAND_PKT:
                hdev->stat.cmd_tx++;
@@ -544,11 +535,6 @@ static int bfusb_send_frame(struct sk_buff *skb)
        return 0;
 }
 
-static int bfusb_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
-{
-       return -ENOIOCTLCMD;
-}
-
 static int bfusb_load_firmware(struct bfusb_data *data,
                               const unsigned char *firmware, int count)
 {
@@ -699,11 +685,10 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
        hci_set_drvdata(hdev, data);
        SET_HCIDEV_DEV(hdev, &intf->dev);
 
-       hdev->open     = bfusb_open;
-       hdev->close    = bfusb_close;
-       hdev->flush    = bfusb_flush;
-       hdev->send     = bfusb_send_frame;
-       hdev->ioctl    = bfusb_ioctl;
+       hdev->open  = bfusb_open;
+       hdev->close = bfusb_close;
+       hdev->flush = bfusb_flush;
+       hdev->send  = bfusb_send_frame;
 
        if (hci_register_dev(hdev) < 0) {
                BT_ERR("Can't register HCI device");
index 6c3e3d43c718a1d756c9308a8a135bd9e64a2e32..57427de864a657530ec92f1a54fe9e0d5c52938e 100644 (file)
@@ -399,7 +399,6 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
 
                if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
 
-                       info->rx_skb->dev = (void *) info->hdev;
                        bt_cb(info->rx_skb)->pkt_type = buf[i];
 
                        switch (bt_cb(info->rx_skb)->pkt_type) {
@@ -477,7 +476,7 @@ static void bluecard_receive(bluecard_info_t *info, unsigned int offset)
                                        break;
 
                                case RECV_WAIT_DATA:
-                                       hci_recv_frame(info->rx_skb);
+                                       hci_recv_frame(info->hdev, info->rx_skb);
                                        info->rx_skb = NULL;
                                        break;
 
@@ -659,17 +658,9 @@ static int bluecard_hci_close(struct hci_dev *hdev)
 }
 
 
-static int bluecard_hci_send_frame(struct sk_buff *skb)
+static int bluecard_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-       bluecard_info_t *info;
-       struct hci_dev *hdev = (struct hci_dev *)(skb->dev);
-
-       if (!hdev) {
-               BT_ERR("Frame for unknown HCI device (hdev=NULL)");
-               return -ENODEV;
-       }
-
-       info = hci_get_drvdata(hdev);
+       bluecard_info_t *info = hci_get_drvdata(hdev);
 
        switch (bt_cb(skb)->pkt_type) {
        case HCI_COMMAND_PKT:
@@ -693,12 +684,6 @@ static int bluecard_hci_send_frame(struct sk_buff *skb)
 }
 
 
-static int bluecard_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
-{
-       return -ENOIOCTLCMD;
-}
-
-
 
 /* ======================== Card services HCI interaction ======================== */
 
@@ -734,11 +719,10 @@ static int bluecard_open(bluecard_info_t *info)
        hci_set_drvdata(hdev, info);
        SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
 
-       hdev->open     = bluecard_hci_open;
-       hdev->close    = bluecard_hci_close;
-       hdev->flush    = bluecard_hci_flush;
-       hdev->send     = bluecard_hci_send_frame;
-       hdev->ioctl    = bluecard_hci_ioctl;
+       hdev->open  = bluecard_hci_open;
+       hdev->close = bluecard_hci_close;
+       hdev->flush = bluecard_hci_flush;
+       hdev->send  = bluecard_hci_send_frame;
 
        id = inb(iobase + 0x30);
 
index 2fe4a8031348f0c8b05074eb9889a1d2e02a7a91..8a319913c9a96fd60c6d021c5edd055b83ef0aad 100644 (file)
@@ -37,7 +37,7 @@
 
 #define VERSION "0.10"
 
-static struct usb_device_id bpa10x_table[] = {
+static const struct usb_device_id bpa10x_table[] = {
        /* Tektronix BPA 100/105 (Digianswer) */
        { USB_DEVICE(0x08fd, 0x0002) },
 
@@ -129,8 +129,6 @@ static int bpa10x_recv(struct hci_dev *hdev, int queue, void *buf, int count)
                                return -ENOMEM;
                        }
 
-                       skb->dev = (void *) hdev;
-
                        data->rx_skb[queue] = skb;
 
                        scb = (void *) skb->cb;
@@ -155,7 +153,7 @@ static int bpa10x_recv(struct hci_dev *hdev, int queue, void *buf, int count)
                        data->rx_skb[queue] = NULL;
 
                        bt_cb(skb)->pkt_type = scb->type;
-                       hci_recv_frame(skb);
+                       hci_recv_frame(hdev, skb);
                }
 
                count -= len; buf += len;
@@ -352,9 +350,8 @@ static int bpa10x_flush(struct hci_dev *hdev)
        return 0;
 }
 
-static int bpa10x_send_frame(struct sk_buff *skb)
+static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-       struct hci_dev *hdev = (struct hci_dev *) skb->dev;
        struct bpa10x_data *data = hci_get_drvdata(hdev);
        struct usb_ctrlrequest *dr;
        struct urb *urb;
@@ -366,6 +363,8 @@ static int bpa10x_send_frame(struct sk_buff *skb)
        if (!test_bit(HCI_RUNNING, &hdev->flags))
                return -EBUSY;
 
+       skb->dev = (void *) hdev;
+
        urb = usb_alloc_urb(0, GFP_ATOMIC);
        if (!urb)
                return -ENOMEM;
index a1aaa3ba2a4bafed0e2c4573b1614e24ddb84306..73d87994d028ad3e64397a61e05a2e8c15f78321 100644 (file)
@@ -247,7 +247,6 @@ static void bt3c_receive(bt3c_info_t *info)
 
                if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
 
-                       info->rx_skb->dev = (void *) info->hdev;
                        bt_cb(info->rx_skb)->pkt_type = inb(iobase + DATA_L);
                        inb(iobase + DATA_H);
                        //printk("bt3c: PACKET_TYPE=%02x\n", bt_cb(info->rx_skb)->pkt_type);
@@ -318,7 +317,7 @@ static void bt3c_receive(bt3c_info_t *info)
                                        break;
 
                                case RECV_WAIT_DATA:
-                                       hci_recv_frame(info->rx_skb);
+                                       hci_recv_frame(info->hdev, info->rx_skb);
                                        info->rx_skb = NULL;
                                        break;
 
@@ -416,19 +415,11 @@ static int bt3c_hci_close(struct hci_dev *hdev)
 }
 
 
-static int bt3c_hci_send_frame(struct sk_buff *skb)
+static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-       bt3c_info_t *info;
-       struct hci_dev *hdev = (struct hci_dev *)(skb->dev);
+       bt3c_info_t *info = hci_get_drvdata(hdev);
        unsigned long flags;
 
-       if (!hdev) {
-               BT_ERR("Frame for unknown HCI device (hdev=NULL)");
-               return -ENODEV;
-       }
-
-       info = hci_get_drvdata(hdev);
-
        switch (bt_cb(skb)->pkt_type) {
        case HCI_COMMAND_PKT:
                hdev->stat.cmd_tx++;
@@ -455,12 +446,6 @@ static int bt3c_hci_send_frame(struct sk_buff *skb)
 }
 
 
-static int bt3c_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
-{
-       return -ENOIOCTLCMD;
-}
-
-
 
 /* ======================== Card services HCI interaction ======================== */
 
@@ -577,11 +562,10 @@ static int bt3c_open(bt3c_info_t *info)
        hci_set_drvdata(hdev, info);
        SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
 
-       hdev->open     = bt3c_hci_open;
-       hdev->close    = bt3c_hci_close;
-       hdev->flush    = bt3c_hci_flush;
-       hdev->send     = bt3c_hci_send_frame;
-       hdev->ioctl    = bt3c_hci_ioctl;
+       hdev->open  = bt3c_hci_open;
+       hdev->close = bt3c_hci_close;
+       hdev->flush = bt3c_hci_flush;
+       hdev->send  = bt3c_hci_send_frame;
 
        /* Load firmware */
        err = request_firmware(&firmware, "BT3CPCC.bin", &info->p_dev->dev);
index 27068d1493808ec3f2acda115fd51773cbc4cfc5..f9d183387f4585b37e0642aef9e269e17c6be1a4 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/bitops.h>
 #include <linux/slab.h>
 #include <net/bluetooth/bluetooth.h>
+#include <linux/ctype.h>
+#include <linux/firmware.h>
 
 #define BTM_HEADER_LEN                 4
 #define BTM_UPLD_SIZE                  2312
@@ -41,6 +43,8 @@ struct btmrvl_thread {
 struct btmrvl_device {
        void *card;
        struct hci_dev *hcidev;
+       struct device *dev;
+       const char *cal_data;
 
        u8 dev_type;
 
@@ -91,6 +95,7 @@ struct btmrvl_private {
 #define BT_CMD_HOST_SLEEP_CONFIG       0x59
 #define BT_CMD_HOST_SLEEP_ENABLE       0x5A
 #define BT_CMD_MODULE_CFG_REQ          0x5B
+#define BT_CMD_LOAD_CONFIG_DATA                0x61
 
 /* Sub-commands: Module Bringup/Shutdown Request/Response */
 #define MODULE_BRINGUP_REQ             0xF1
@@ -116,11 +121,8 @@ struct btmrvl_private {
 #define PS_SLEEP                       0x01
 #define PS_AWAKE                       0x00
 
-struct btmrvl_cmd {
-       __le16 ocf_ogf;
-       u8 length;
-       u8 data[4];
-} __packed;
+#define BT_CMD_DATA_SIZE               32
+#define BT_CAL_DATA_SIZE               28
 
 struct btmrvl_event {
        u8 ec;          /* event counter */
index 9a9f51875df5ef7826cea976b6afefa0889e160b..5cf31c4fe6d1cf552c9846c8fe7a79857851c9b6 100644 (file)
@@ -57,8 +57,7 @@ bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
                ocf = hci_opcode_ocf(opcode);
                ogf = hci_opcode_ogf(opcode);
 
-               if (ocf == BT_CMD_MODULE_CFG_REQ &&
-                                       priv->btmrvl_dev.sendcmdflag) {
+               if (priv->btmrvl_dev.sendcmdflag) {
                        priv->btmrvl_dev.sendcmdflag = false;
                        priv->adapter->cmd_complete = true;
                        wake_up_interruptible(&priv->adapter->cmd_wait_q);
@@ -116,7 +115,6 @@ int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb)
                        adapter->hs_state = HS_ACTIVATED;
                        if (adapter->psmode)
                                adapter->ps_state = PS_SLEEP;
-                       wake_up_interruptible(&adapter->cmd_wait_q);
                        BT_DBG("HS ACTIVATED!");
                } else {
                        BT_DBG("HS Enable failed");
@@ -168,45 +166,50 @@ exit:
 }
 EXPORT_SYMBOL_GPL(btmrvl_process_event);
 
-int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
+static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 cmd_no,
+                               const void *param, u8 len)
 {
        struct sk_buff *skb;
-       struct btmrvl_cmd *cmd;
-       int ret = 0;
+       struct hci_command_hdr *hdr;
 
-       skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
+       skb = bt_skb_alloc(HCI_COMMAND_HDR_SIZE + len, GFP_ATOMIC);
        if (skb == NULL) {
                BT_ERR("No free skb");
                return -ENOMEM;
        }
 
-       cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
-       cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_MODULE_CFG_REQ));
-       cmd->length = 1;
-       cmd->data[0] = subcmd;
+       hdr = (struct hci_command_hdr *)skb_put(skb, HCI_COMMAND_HDR_SIZE);
+       hdr->opcode = cpu_to_le16(hci_opcode_pack(OGF, cmd_no));
+       hdr->plen = len;
+
+       if (len)
+               memcpy(skb_put(skb, len), param, len);
 
        bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
 
-       skb->dev = (void *) priv->btmrvl_dev.hcidev;
        skb_queue_head(&priv->adapter->tx_queue, skb);
 
        priv->btmrvl_dev.sendcmdflag = true;
 
        priv->adapter->cmd_complete = false;
 
-       BT_DBG("Queue module cfg Command");
-
        wake_up_interruptible(&priv->main_thread.wait_q);
 
        if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q,
                                priv->adapter->cmd_complete,
-                               msecs_to_jiffies(WAIT_UNTIL_CMD_RESP))) {
-               ret = -ETIMEDOUT;
-               BT_ERR("module_cfg_cmd(%x): timeout: %d",
-                                       subcmd, priv->btmrvl_dev.sendcmdflag);
-       }
+                               msecs_to_jiffies(WAIT_UNTIL_CMD_RESP)))
+               return -ETIMEDOUT;
+
+       return 0;
+}
 
-       BT_DBG("module cfg Command done");
+int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd)
+{
+       int ret;
+
+       ret = btmrvl_send_sync_cmd(priv, BT_CMD_MODULE_CFG_REQ, &subcmd, 1);
+       if (ret)
+               BT_ERR("module_cfg_cmd(%x) failed\n", subcmd);
 
        return ret;
 }
@@ -214,61 +217,36 @@ EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd);
 
 int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv)
 {
-       struct sk_buff *skb;
-       struct btmrvl_cmd *cmd;
-
-       skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
-       if (!skb) {
-               BT_ERR("No free skb");
-               return -ENOMEM;
-       }
-
-       cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
-       cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF,
-                                                  BT_CMD_HOST_SLEEP_CONFIG));
-       cmd->length = 2;
-       cmd->data[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
-       cmd->data[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
+       int ret;
+       u8 param[2];
 
-       bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
+       param[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8;
+       param[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff);
 
-       skb->dev = (void *) priv->btmrvl_dev.hcidev;
-       skb_queue_head(&priv->adapter->tx_queue, skb);
+       BT_DBG("Sending HSCFG Command, gpio=0x%x, gap=0x%x",
+              param[0], param[1]);
 
-       BT_DBG("Queue HSCFG Command, gpio=0x%x, gap=0x%x", cmd->data[0],
-              cmd->data[1]);
+       ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_CONFIG, param, 2);
+       if (ret)
+               BT_ERR("HSCFG command failed\n");
 
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(btmrvl_send_hscfg_cmd);
 
 int btmrvl_enable_ps(struct btmrvl_private *priv)
 {
-       struct sk_buff *skb;
-       struct btmrvl_cmd *cmd;
-
-       skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
-       if (skb == NULL) {
-               BT_ERR("No free skb");
-               return -ENOMEM;
-       }
-
-       cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
-       cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF,
-                                       BT_CMD_AUTO_SLEEP_MODE));
-       cmd->length = 1;
+       int ret;
+       u8 param;
 
        if (priv->btmrvl_dev.psmode)
-               cmd->data[0] = BT_PS_ENABLE;
+               param = BT_PS_ENABLE;
        else
-               cmd->data[0] = BT_PS_DISABLE;
+               param = BT_PS_DISABLE;
 
-       bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
-
-       skb->dev = (void *) priv->btmrvl_dev.hcidev;
-       skb_queue_head(&priv->adapter->tx_queue, skb);
-
-       BT_DBG("Queue PSMODE Command:%d", cmd->data[0]);
+       ret = btmrvl_send_sync_cmd(priv, BT_CMD_AUTO_SLEEP_MODE, &param, 1);
+       if (ret)
+               BT_ERR("PSMODE command failed\n");
 
        return 0;
 }
@@ -276,37 +254,11 @@ EXPORT_SYMBOL_GPL(btmrvl_enable_ps);
 
 int btmrvl_enable_hs(struct btmrvl_private *priv)
 {
-       struct sk_buff *skb;
-       struct btmrvl_cmd *cmd;
-       int ret = 0;
-
-       skb = bt_skb_alloc(sizeof(*cmd), GFP_ATOMIC);
-       if (skb == NULL) {
-               BT_ERR("No free skb");
-               return -ENOMEM;
-       }
-
-       cmd = (struct btmrvl_cmd *) skb_put(skb, sizeof(*cmd));
-       cmd->ocf_ogf = cpu_to_le16(hci_opcode_pack(OGF, BT_CMD_HOST_SLEEP_ENABLE));
-       cmd->length = 0;
-
-       bt_cb(skb)->pkt_type = MRVL_VENDOR_PKT;
-
-       skb->dev = (void *) priv->btmrvl_dev.hcidev;
-       skb_queue_head(&priv->adapter->tx_queue, skb);
-
-       BT_DBG("Queue hs enable Command");
-
-       wake_up_interruptible(&priv->main_thread.wait_q);
+       int ret;
 
-       if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q,
-                       priv->adapter->hs_state,
-                       msecs_to_jiffies(WAIT_UNTIL_HS_STATE_CHANGED))) {
-               ret = -ETIMEDOUT;
-               BT_ERR("timeout: %d, %d,%d", priv->adapter->hs_state,
-                                               priv->adapter->ps_state,
-                                               priv->adapter->wakeup_tries);
-       }
+       ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_ENABLE, NULL, 0);
+       if (ret)
+               BT_ERR("Host sleep enable command failed\n");
 
        return ret;
 }
@@ -403,26 +355,12 @@ static void btmrvl_free_adapter(struct btmrvl_private *priv)
        priv->adapter = NULL;
 }
 
-static int btmrvl_ioctl(struct hci_dev *hdev,
-                               unsigned int cmd, unsigned long arg)
+static int btmrvl_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-       return -ENOIOCTLCMD;
-}
-
-static int btmrvl_send_frame(struct sk_buff *skb)
-{
-       struct hci_dev *hdev = (struct hci_dev *) skb->dev;
-       struct btmrvl_private *priv = NULL;
+       struct btmrvl_private *priv = hci_get_drvdata(hdev);
 
        BT_DBG("type=%d, len=%d", skb->pkt_type, skb->len);
 
-       if (!hdev) {
-               BT_ERR("Frame for unknown HCI device");
-               return -ENODEV;
-       }
-
-       priv = hci_get_drvdata(hdev);
-
        if (!test_bit(HCI_RUNNING, &hdev->flags)) {
                BT_ERR("Failed testing HCI_RUNING, flags=%lx", hdev->flags);
                print_hex_dump_bytes("data: ", DUMP_PREFIX_OFFSET,
@@ -479,6 +417,137 @@ static int btmrvl_open(struct hci_dev *hdev)
        return 0;
 }
 
+/*
+ * This function parses provided calibration data input. It should contain
+ * hex bytes separated by space or new line character. Here is an example.
+ * 00 1C 01 37 FF FF FF FF 02 04 7F 01
+ * CE BA 00 00 00 2D C6 C0 00 00 00 00
+ * 00 F0 00 00
+ */
+static int btmrvl_parse_cal_cfg(const u8 *src, u32 len, u8 *dst, u32 dst_size)
+{
+       const u8 *s = src;
+       u8 *d = dst;
+       int ret;
+       u8 tmp[3];
+
+       tmp[2] = '\0';
+       while ((s - src) <= len - 2) {
+               if (isspace(*s)) {
+                       s++;
+                       continue;
+               }
+
+               if (isxdigit(*s)) {
+                       if ((d - dst) >= dst_size) {
+                               BT_ERR("calibration data file too big!!!");
+                               return -EINVAL;
+                       }
+
+                       memcpy(tmp, s, 2);
+
+                       ret = kstrtou8(tmp, 16, d++);
+                       if (ret < 0)
+                               return ret;
+
+                       s += 2;
+               } else {
+                       return -EINVAL;
+               }
+       }
+       if (d == dst)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int btmrvl_load_cal_data(struct btmrvl_private *priv,
+                               u8 *config_data)
+{
+       int i, ret;
+       u8 data[BT_CMD_DATA_SIZE];
+
+       data[0] = 0x00;
+       data[1] = 0x00;
+       data[2] = 0x00;
+       data[3] = BT_CMD_DATA_SIZE - 4;
+
+       /* Swap cal-data bytes. Each four bytes are swapped. Considering 4
+        * byte SDIO header offset, mapping of input and output bytes will be
+        * {3, 2, 1, 0} -> {0+4, 1+4, 2+4, 3+4},
+        * {7, 6, 5, 4} -> {4+4, 5+4, 6+4, 7+4} */
+       for (i = 4; i < BT_CMD_DATA_SIZE; i++)
+               data[i] = config_data[(i / 4) * 8 - 1 - i];
+
+       print_hex_dump_bytes("Calibration data: ",
+                            DUMP_PREFIX_OFFSET, data, BT_CMD_DATA_SIZE);
+
+       ret = btmrvl_send_sync_cmd(priv, BT_CMD_LOAD_CONFIG_DATA, data,
+                                  BT_CMD_DATA_SIZE);
+       if (ret)
+               BT_ERR("Failed to download caibration data\n");
+
+       return 0;
+}
+
+static int
+btmrvl_process_cal_cfg(struct btmrvl_private *priv, u8 *data, u32 size)
+{
+       u8 cal_data[BT_CAL_DATA_SIZE];
+       int ret;
+
+       ret = btmrvl_parse_cal_cfg(data, size, cal_data, sizeof(cal_data));
+       if (ret)
+               return ret;
+
+       ret = btmrvl_load_cal_data(priv, cal_data);
+       if (ret) {
+               BT_ERR("Fail to load calibrate data");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int btmrvl_cal_data_config(struct btmrvl_private *priv)
+{
+       const struct firmware *cfg;
+       int ret;
+       const char *cal_data = priv->btmrvl_dev.cal_data;
+
+       if (!cal_data)
+               return 0;
+
+       ret = request_firmware(&cfg, cal_data, priv->btmrvl_dev.dev);
+       if (ret < 0) {
+               BT_DBG("Failed to get %s file, skipping cal data download",
+                      cal_data);
+               return 0;
+       }
+
+       ret = btmrvl_process_cal_cfg(priv, (u8 *)cfg->data, cfg->size);
+       release_firmware(cfg);
+       return ret;
+}
+
+static int btmrvl_setup(struct hci_dev *hdev)
+{
+       struct btmrvl_private *priv = hci_get_drvdata(hdev);
+
+       btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+
+       if (btmrvl_cal_data_config(priv))
+               BT_ERR("Set cal data failed");
+
+       priv->btmrvl_dev.psmode = 1;
+       btmrvl_enable_ps(priv);
+
+       priv->btmrvl_dev.gpio_gap = 0xffff;
+       btmrvl_send_hscfg_cmd(priv);
+
+       return 0;
+}
+
 /*
  * This function handles the event generated by firmware, rx data
  * received from firmware, and tx data sent from kernel.
@@ -566,14 +635,12 @@ int btmrvl_register_hdev(struct btmrvl_private *priv)
        priv->btmrvl_dev.hcidev = hdev;
        hci_set_drvdata(hdev, priv);
 
-       hdev->bus = HCI_SDIO;
-       hdev->open = btmrvl_open;
+       hdev->bus   = HCI_SDIO;
+       hdev->open  = btmrvl_open;
        hdev->close = btmrvl_close;
        hdev->flush = btmrvl_flush;
-       hdev->send = btmrvl_send_frame;
-       hdev->ioctl = btmrvl_ioctl;
-
-       btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ);
+       hdev->send  = btmrvl_send_frame;
+       hdev->setup = btmrvl_setup;
 
        hdev->dev_type = priv->btmrvl_dev.dev_type;
 
index 00da6df9f71edfcd6f92acbe70129af5ec62e0bf..fabcf5bb48afbb1c62cf5145d5a12f60eb3a05a8 100644 (file)
@@ -18,7 +18,6 @@
  * this warranty disclaimer.
  **/
 
-#include <linux/firmware.h>
 #include <linux/slab.h>
 
 #include <linux/mmc/sdio_ids.h>
@@ -102,6 +101,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_88xx = {
 static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
        .helper         = "mrvl/sd8688_helper.bin",
        .firmware       = "mrvl/sd8688.bin",
+       .cal_data       = NULL,
        .reg            = &btmrvl_reg_8688,
        .sd_blksz_fw_dl = 64,
 };
@@ -109,6 +109,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
 static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
        .helper         = NULL,
        .firmware       = "mrvl/sd8787_uapsta.bin",
+       .cal_data       = NULL,
        .reg            = &btmrvl_reg_87xx,
        .sd_blksz_fw_dl = 256,
 };
@@ -116,6 +117,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
 static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
        .helper         = NULL,
        .firmware       = "mrvl/sd8797_uapsta.bin",
+       .cal_data       = "mrvl/sd8797_caldata.conf",
        .reg            = &btmrvl_reg_87xx,
        .sd_blksz_fw_dl = 256,
 };
@@ -123,6 +125,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
 static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
        .helper         = NULL,
        .firmware       = "mrvl/sd8897_uapsta.bin",
+       .cal_data       = NULL,
        .reg            = &btmrvl_reg_88xx,
        .sd_blksz_fw_dl = 256,
 };
@@ -597,15 +600,14 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
        case HCI_SCODATA_PKT:
        case HCI_EVENT_PKT:
                bt_cb(skb)->pkt_type = type;
-               skb->dev = (void *)hdev;
                skb_put(skb, buf_len);
                skb_pull(skb, SDIO_HEADER_LEN);
 
                if (type == HCI_EVENT_PKT) {
                        if (btmrvl_check_evtpkt(priv, skb))
-                               hci_recv_frame(skb);
+                               hci_recv_frame(hdev, skb);
                } else {
-                       hci_recv_frame(skb);
+                       hci_recv_frame(hdev, skb);
                }
 
                hdev->stat.byte_rx += buf_len;
@@ -613,12 +615,11 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
 
        case MRVL_VENDOR_PKT:
                bt_cb(skb)->pkt_type = HCI_VENDOR_PKT;
-               skb->dev = (void *)hdev;
                skb_put(skb, buf_len);
                skb_pull(skb, SDIO_HEADER_LEN);
 
                if (btmrvl_process_event(priv, skb))
-                       hci_recv_frame(skb);
+                       hci_recv_frame(hdev, skb);
 
                hdev->stat.byte_rx += buf_len;
                break;
@@ -1006,6 +1007,7 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
                struct btmrvl_sdio_device *data = (void *) id->driver_data;
                card->helper = data->helper;
                card->firmware = data->firmware;
+               card->cal_data = data->cal_data;
                card->reg = data->reg;
                card->sd_blksz_fw_dl = data->sd_blksz_fw_dl;
        }
@@ -1034,6 +1036,8 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
        }
 
        card->priv = priv;
+       priv->btmrvl_dev.dev = &card->func->dev;
+       priv->btmrvl_dev.cal_data = card->cal_data;
 
        /* Initialize the interface specific function pointers */
        priv->hw_host_to_card = btmrvl_sdio_host_to_card;
@@ -1046,12 +1050,6 @@ static int btmrvl_sdio_probe(struct sdio_func *func,
                goto disable_host_int;
        }
 
-       priv->btmrvl_dev.psmode = 1;
-       btmrvl_enable_ps(priv);
-
-       priv->btmrvl_dev.gpio_gap = 0xffff;
-       btmrvl_send_hscfg_cmd(priv);
-
        return 0;
 
 disable_host_int:
@@ -1222,4 +1220,5 @@ MODULE_FIRMWARE("mrvl/sd8688_helper.bin");
 MODULE_FIRMWARE("mrvl/sd8688.bin");
 MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
 MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8797_caldata.conf");
 MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
index 43d35a609ca9a94795afb731d230fa88ca109bef..6872d9ecac074ba04792c22776b3baee3971c9de 100644 (file)
@@ -85,6 +85,7 @@ struct btmrvl_sdio_card {
        u32 ioport;
        const char *helper;
        const char *firmware;
+       const char *cal_data;
        const struct btmrvl_sdio_card_reg *reg;
        u16 sd_blksz_fw_dl;
        u8 rx_unit;
@@ -94,6 +95,7 @@ struct btmrvl_sdio_card {
 struct btmrvl_sdio_device {
        const char *helper;
        const char *firmware;
+       const char *cal_data;
        const struct btmrvl_sdio_card_reg *reg;
        u16 sd_blksz_fw_dl;
 };
index 4a9909713874dd03eb52240b148ed83b9a22b9d8..b61440aaee658210143f435a872ed51e649485e6 100644 (file)
@@ -157,10 +157,9 @@ static int btsdio_rx_packet(struct btsdio_data *data)
 
        data->hdev->stat.byte_rx += len;
 
-       skb->dev = (void *) data->hdev;
        bt_cb(skb)->pkt_type = hdr[3];
 
-       err = hci_recv_frame(skb);
+       err = hci_recv_frame(data->hdev, skb);
        if (err < 0)
                return err;
 
@@ -255,9 +254,8 @@ static int btsdio_flush(struct hci_dev *hdev)
        return 0;
 }
 
-static int btsdio_send_frame(struct sk_buff *skb)
+static int btsdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-       struct hci_dev *hdev = (struct hci_dev *) skb->dev;
        struct btsdio_data *data = hci_get_drvdata(hdev);
 
        BT_DBG("%s", hdev->name);
index beb262f2dc4d087432f6e18ab0862888683d0c6d..a03ecc22a561caf4c3d1bdc53489a5b9612745d8 100644 (file)
@@ -198,7 +198,6 @@ static void btuart_receive(btuart_info_t *info)
 
                if (info->rx_state == RECV_WAIT_PACKET_TYPE) {
 
-                       info->rx_skb->dev = (void *) info->hdev;
                        bt_cb(info->rx_skb)->pkt_type = inb(iobase + UART_RX);
 
                        switch (bt_cb(info->rx_skb)->pkt_type) {
@@ -265,7 +264,7 @@ static void btuart_receive(btuart_info_t *info)
                                        break;
 
                                case RECV_WAIT_DATA:
-                                       hci_recv_frame(info->rx_skb);
+                                       hci_recv_frame(info->hdev, info->rx_skb);
                                        info->rx_skb = NULL;
                                        break;
 
@@ -424,17 +423,9 @@ static int btuart_hci_close(struct hci_dev *hdev)
 }
 
 
-static int btuart_hci_send_frame(struct sk_buff *skb)
+static int btuart_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-       btuart_info_t *info;
-       struct hci_dev *hdev = (struct hci_dev *)(skb->dev);
-
-       if (!hdev) {
-               BT_ERR("Frame for unknown HCI device (hdev=NULL)");
-               return -ENODEV;
-       }
-
-       info = hci_get_drvdata(hdev);
+       btuart_info_t *info = hci_get_drvdata(hdev);
 
        switch (bt_cb(skb)->pkt_type) {
        case HCI_COMMAND_PKT:
@@ -458,12 +449,6 @@ static int btuart_hci_send_frame(struct sk_buff *skb)
 }
 
 
-static int btuart_hci_ioctl(struct hci_dev *hdev, unsigned int cmd, unsigned long arg)
-{
-       return -ENOIOCTLCMD;
-}
-
-
 
 /* ======================== Card services HCI interaction ======================== */
 
@@ -495,11 +480,10 @@ static int btuart_open(btuart_info_t *info)
        hci_set_drvdata(hdev, info);
        SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
 
-       hdev->open     = btuart_hci_open;
-       hdev->close    = btuart_hci_close;
-       hdev->flush    = btuart_hci_flush;
-       hdev->send     = btuart_hci_send_frame;
-       hdev->ioctl    = btuart_hci_ioctl;
+       hdev->open  = btuart_hci_open;
+       hdev->close = btuart_hci_close;
+       hdev->flush = btuart_hci_flush;
+       hdev->send  = btuart_hci_send_frame;
 
        spin_lock_irqsave(&(info->lock), flags);
 
index f3dfc0a88fdcb95e25647caa6cfc14a1d9a5a484..30868fa870f6086d614184a389feef3267619eef 100644 (file)
@@ -50,7 +50,7 @@ static struct usb_driver btusb_driver;
 #define BTUSB_ATH3012          0x80
 #define BTUSB_INTEL            0x100
 
-static struct usb_device_id btusb_table[] = {
+static const struct usb_device_id btusb_table[] = {
        /* Generic Bluetooth USB device */
        { USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
 
@@ -121,7 +121,7 @@ static struct usb_device_id btusb_table[] = {
 
 MODULE_DEVICE_TABLE(usb, btusb_table);
 
-static struct usb_device_id blacklist_table[] = {
+static const struct usb_device_id blacklist_table[] = {
        /* CSR BlueCore devices */
        { USB_DEVICE(0x0a12, 0x0001), .driver_info = BTUSB_CSR },
 
@@ -716,9 +716,8 @@ static int btusb_flush(struct hci_dev *hdev)
        return 0;
 }
 
-static int btusb_send_frame(struct sk_buff *skb)
+static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-       struct hci_dev *hdev = (struct hci_dev *) skb->dev;
        struct btusb_data *data = hci_get_drvdata(hdev);
        struct usb_ctrlrequest *dr;
        struct urb *urb;
@@ -730,6 +729,8 @@ static int btusb_send_frame(struct sk_buff *skb)
        if (!test_bit(HCI_RUNNING, &hdev->flags))
                return -EBUSY;
 
+       skb->dev = (void *) hdev;
+
        switch (bt_cb(skb)->pkt_type) {
        case HCI_COMMAND_PKT:
                urb = usb_alloc_urb(0, GFP_ATOMIC);
@@ -774,7 +775,7 @@ static int btusb_send_frame(struct sk_buff *skb)
                break;
 
        case HCI_SCODATA_PKT:
-               if (!data->isoc_tx_ep || hdev->conn_hash.sco_num < 1)
+               if (!data->isoc_tx_ep || hci_conn_num(hdev, SCO_LINK) < 1)
                        return -ENODEV;
 
                urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_ATOMIC);
@@ -833,8 +834,8 @@ static void btusb_notify(struct hci_dev *hdev, unsigned int evt)
 
        BT_DBG("%s evt %d", hdev->name, evt);
 
-       if (hdev->conn_hash.sco_num != data->sco_num) {
-               data->sco_num = hdev->conn_hash.sco_num;
+       if (hci_conn_num(hdev, SCO_LINK) != data->sco_num) {
+               data->sco_num = hci_conn_num(hdev, SCO_LINK);
                schedule_work(&data->work);
        }
 }
@@ -889,7 +890,7 @@ static void btusb_work(struct work_struct *work)
        int new_alts;
        int err;
 
-       if (hdev->conn_hash.sco_num > 0) {
+       if (data->sco_num > 0) {
                if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) {
                        err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf);
                        if (err < 0) {
@@ -903,9 +904,9 @@ static void btusb_work(struct work_struct *work)
 
                if (hdev->voice_setting & 0x0020) {
                        static const int alts[3] = { 2, 4, 5 };
-                       new_alts = alts[hdev->conn_hash.sco_num - 1];
+                       new_alts = alts[data->sco_num - 1];
                } else {
-                       new_alts = hdev->conn_hash.sco_num;
+                       new_alts = data->sco_num;
                }
 
                if (data->isoc_altsetting != new_alts) {
index 60abf596f60ea21f9354ae1b3d3608bb04e4142e..f038dba19e36b77973da26ee9688ed7791126d2f 100644 (file)
@@ -108,10 +108,8 @@ static long st_receive(void *priv_data, struct sk_buff *skb)
                return -EFAULT;
        }
 
-       skb->dev = (void *) lhst->hdev;
-
        /* Forward skb to HCI core layer */
-       err = hci_recv_frame(skb);
+       err = hci_recv_frame(lhst->hdev, skb);
        if (err < 0) {
                BT_ERR("Unable to push skb to HCI core(%d)", err);
                return err;
@@ -253,14 +251,11 @@ static int ti_st_close(struct hci_dev *hdev)
        return err;
 }
 
-static int ti_st_send_frame(struct sk_buff *skb)
+static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-       struct hci_dev *hdev;
        struct ti_st *hst;
        long len;
 
-       hdev = (struct hci_dev *)skb->dev;
-
        if (!test_bit(HCI_RUNNING, &hdev->flags))
                return -EBUSY;
 
index 33f3a6950c0e85e852766d41bc80db4cd986d98f..52eed1f3565dbf5868c094d246cf7dee4dcc86a4 100644 (file)
@@ -256,9 +256,8 @@ static void dtl1_receive(dtl1_info_t *info)
                                case 0x83:
                                case 0x84:
                                        /* send frame to the HCI layer */
-                                       info->rx_skb->dev = (void *) info->hdev;
                                        bt_cb(info->rx_skb)->pkt_type &= 0x0f;
-                                       hci_recv_frame(info->rx_skb);
+                                       hci_recv_frame(info->hdev, info->rx_skb);
                                        break;
                                default:
                                        /* unknown packet */
@@ -383,20 +382,12 @@ static int dtl1_hci_close(struct hci_dev *hdev)
 }
 
 
-static int dtl1_hci_send_frame(struct sk_buff *skb)
+static int dtl1_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-       dtl1_info_t *info;
-       struct hci_dev *hdev = (struct hci_dev *)(skb->dev);
+       dtl1_info_t *info = hci_get_drvdata(hdev);
        struct sk_buff *s;
        nsh_t nsh;
 
-       if (!hdev) {
-               BT_ERR("Frame for unknown HCI device (hdev=NULL)");
-               return -ENODEV;
-       }
-
-       info = hci_get_drvdata(hdev);
-
        switch (bt_cb(skb)->pkt_type) {
        case HCI_COMMAND_PKT:
                hdev->stat.cmd_tx++;
@@ -438,12 +429,6 @@ static int dtl1_hci_send_frame(struct sk_buff *skb)
 }
 
 
-static int dtl1_hci_ioctl(struct hci_dev *hdev, unsigned int cmd,  unsigned long arg)
-{
-       return -ENOIOCTLCMD;
-}
-
-
 
 /* ======================== Card services HCI interaction ======================== */
 
@@ -477,11 +462,10 @@ static int dtl1_open(dtl1_info_t *info)
        hci_set_drvdata(hdev, info);
        SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
 
-       hdev->open     = dtl1_hci_open;
-       hdev->close    = dtl1_hci_close;
-       hdev->flush    = dtl1_hci_flush;
-       hdev->send     = dtl1_hci_send_frame;
-       hdev->ioctl    = dtl1_hci_ioctl;
+       hdev->open  = dtl1_hci_open;
+       hdev->close = dtl1_hci_close;
+       hdev->flush = dtl1_hci_flush;
+       hdev->send  = dtl1_hci_send_frame;
 
        spin_lock_irqsave(&(info->lock), flags);
 
index 57e502e0608058ca9943b7cf5c14613339562729..0bc87f7abd958a262028cdd1f0cb7cb0b624c45e 100644 (file)
@@ -522,7 +522,7 @@ static void bcsp_complete_rx_pkt(struct hci_uart *hu)
                                memcpy(skb_push(bcsp->rx_skb, HCI_EVENT_HDR_SIZE), &hdr, HCI_EVENT_HDR_SIZE);
                                bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT;
 
-                               hci_recv_frame(bcsp->rx_skb);
+                               hci_recv_frame(hu->hdev, bcsp->rx_skb);
                        } else {
                                BT_ERR ("Packet for unknown channel (%u %s)",
                                        bcsp->rx_skb->data[1] & 0x0f,
@@ -536,7 +536,7 @@ static void bcsp_complete_rx_pkt(struct hci_uart *hu)
                /* Pull out BCSP hdr */
                skb_pull(bcsp->rx_skb, 4);
 
-               hci_recv_frame(bcsp->rx_skb);
+               hci_recv_frame(hu->hdev, bcsp->rx_skb);
        }
 
        bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
@@ -655,7 +655,6 @@ static int bcsp_recv(struct hci_uart *hu, void *data, int count)
                                        bcsp->rx_count = 0;
                                        return 0;
                                }
-                               bcsp->rx_skb->dev = (void *) hu->hdev;
                                break;
                        }
                        break;
index 8ae9f1ea2bb5e59dd40f5c905c3057b5f0e35417..7048a583fe51a695a044ad541a0f894e3bf7c510 100644 (file)
@@ -124,30 +124,6 @@ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
        return 0;
 }
 
-static inline int h4_check_data_len(struct h4_struct *h4, int len)
-{
-       int room = skb_tailroom(h4->rx_skb);
-
-       BT_DBG("len %d room %d", len, room);
-
-       if (!len) {
-               hci_recv_frame(h4->rx_skb);
-       } else if (len > room) {
-               BT_ERR("Data length is too large");
-               kfree_skb(h4->rx_skb);
-       } else {
-               h4->rx_state = H4_W4_DATA;
-               h4->rx_count = len;
-               return len;
-       }
-
-       h4->rx_state = H4_W4_PACKET_TYPE;
-       h4->rx_skb   = NULL;
-       h4->rx_count = 0;
-
-       return 0;
-}
-
 /* Recv data */
 static int h4_recv(struct hci_uart *hu, void *data, int count)
 {
index b6154d5a07a51cf954b1e34d869cb6d3feb75f53..f6f4974505600a2884f56c01be22bd6a79492f04 100644 (file)
@@ -340,7 +340,7 @@ static void h5_complete_rx_pkt(struct hci_uart *hu)
                /* Remove Three-wire header */
                skb_pull(h5->rx_skb, 4);
 
-               hci_recv_frame(h5->rx_skb);
+               hci_recv_frame(hu->hdev, h5->rx_skb);
                h5->rx_skb = NULL;
 
                break;
index bc68a440d432cd21b2ee10885e7106cfe2852b79..6e06f6f6915296ad08d7da394175da19250364db 100644 (file)
@@ -234,21 +234,13 @@ static int hci_uart_close(struct hci_dev *hdev)
 }
 
 /* Send frames from HCI layer */
-static int hci_uart_send_frame(struct sk_buff *skb)
+static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-       struct hci_dev* hdev = (struct hci_dev *) skb->dev;
-       struct hci_uart *hu;
-
-       if (!hdev) {
-               BT_ERR("Frame for unknown device (hdev=NULL)");
-               return -ENODEV;
-       }
+       struct hci_uart *hu = hci_get_drvdata(hdev);
 
        if (!test_bit(HCI_RUNNING, &hdev->flags))
                return -EBUSY;
 
-       hu = hci_get_drvdata(hdev);
-
        BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
 
        hu->proto->enqueue(hu, skb);
index cfc7679385890b6e0feaadee1462a1e85330cc59..69a90b1b5ff56131aa7a2e51b7d18aabba28b560 100644 (file)
@@ -110,7 +110,6 @@ static int send_hcill_cmd(u8 cmd, struct hci_uart *hu)
        /* prepare packet */
        hcill_packet = (struct hcill_cmd *) skb_put(skb, 1);
        hcill_packet->cmd = cmd;
-       skb->dev = (void *) hu->hdev;
 
        /* send packet */
        skb_queue_tail(&ll->txq, skb);
@@ -346,14 +345,14 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
        return 0;
 }
 
-static inline int ll_check_data_len(struct ll_struct *ll, int len)
+static inline int ll_check_data_len(struct hci_dev *hdev, struct ll_struct *ll, int len)
 {
        int room = skb_tailroom(ll->rx_skb);
 
        BT_DBG("len %d room %d", len, room);
 
        if (!len) {
-               hci_recv_frame(ll->rx_skb);
+               hci_recv_frame(hdev, ll->rx_skb);
        } else if (len > room) {
                BT_ERR("Data length is too large");
                kfree_skb(ll->rx_skb);
@@ -395,7 +394,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
                        switch (ll->rx_state) {
                        case HCILL_W4_DATA:
                                BT_DBG("Complete data");
-                               hci_recv_frame(ll->rx_skb);
+                               hci_recv_frame(hu->hdev, ll->rx_skb);
 
                                ll->rx_state = HCILL_W4_PACKET_TYPE;
                                ll->rx_skb = NULL;
@@ -406,7 +405,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
 
                                BT_DBG("Event header: evt 0x%2.2x plen %d", eh->evt, eh->plen);
 
-                               ll_check_data_len(ll, eh->plen);
+                               ll_check_data_len(hu->hdev, ll, eh->plen);
                                continue;
 
                        case HCILL_W4_ACL_HDR:
@@ -415,7 +414,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
 
                                BT_DBG("ACL header: dlen %d", dlen);
 
-                               ll_check_data_len(ll, dlen);
+                               ll_check_data_len(hu->hdev, ll, dlen);
                                continue;
 
                        case HCILL_W4_SCO_HDR:
@@ -423,7 +422,7 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
 
                                BT_DBG("SCO header: dlen %d", sh->dlen);
 
-                               ll_check_data_len(ll, sh->dlen);
+                               ll_check_data_len(hu->hdev, ll, sh->dlen);
                                continue;
                        }
                }
@@ -494,7 +493,6 @@ static int ll_recv(struct hci_uart *hu, void *data, int count)
                        return -ENOMEM;
                }
 
-               ll->rx_skb->dev = (void *) hu->hdev;
                bt_cb(ll->rx_skb)->pkt_type = type;
        }
 
index d8b7aed6e4a96f6d6997ab9fac2a1ac84fbc4700..7b167385a1c4e8bdf7d0974707f0b0b43ea21f43 100644 (file)
@@ -24,6 +24,7 @@
  */
 
 #include <linux/module.h>
+#include <asm/unaligned.h>
 
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
-#define VERSION "1.3"
+#define VERSION "1.4"
 
 static bool amp;
 
 struct vhci_data {
        struct hci_dev *hdev;
 
-       unsigned long flags;
-
        wait_queue_head_t read_wait;
        struct sk_buff_head readq;
+
+       struct delayed_work open_timeout;
 };
 
 static int vhci_open_dev(struct hci_dev *hdev)
@@ -80,35 +81,73 @@ static int vhci_flush(struct hci_dev *hdev)
        return 0;
 }
 
-static int vhci_send_frame(struct sk_buff *skb)
+static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-       struct hci_dev* hdev = (struct hci_dev *) skb->dev;
-       struct vhci_data *data;
+       struct vhci_data *data = hci_get_drvdata(hdev);
+
+       if (!test_bit(HCI_RUNNING, &hdev->flags))
+               return -EBUSY;
+
+       memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+       skb_queue_tail(&data->readq, skb);
+
+       wake_up_interruptible(&data->read_wait);
+       return 0;
+}
 
+static int vhci_create_device(struct vhci_data *data, __u8 dev_type)
+{
+       struct hci_dev *hdev;
+       struct sk_buff *skb;
+
+       skb = bt_skb_alloc(4, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       hdev = hci_alloc_dev();
        if (!hdev) {
-               BT_ERR("Frame for unknown HCI device (hdev=NULL)");
-               return -ENODEV;
+               kfree_skb(skb);
+               return -ENOMEM;
        }
 
-       if (!test_bit(HCI_RUNNING, &hdev->flags))
+       data->hdev = hdev;
+
+       hdev->bus = HCI_VIRTUAL;
+       hdev->dev_type = dev_type;
+       hci_set_drvdata(hdev, data);
+
+       hdev->open  = vhci_open_dev;
+       hdev->close = vhci_close_dev;
+       hdev->flush = vhci_flush;
+       hdev->send  = vhci_send_frame;
+
+       if (hci_register_dev(hdev) < 0) {
+               BT_ERR("Can't register HCI device");
+               hci_free_dev(hdev);
+               data->hdev = NULL;
+               kfree_skb(skb);
                return -EBUSY;
+       }
 
-       data = hci_get_drvdata(hdev);
+       bt_cb(skb)->pkt_type = HCI_VENDOR_PKT;
 
-       memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+       *skb_put(skb, 1) = 0xff;
+       *skb_put(skb, 1) = dev_type;
+       put_unaligned_le16(hdev->id, skb_put(skb, 2));
        skb_queue_tail(&data->readq, skb);
 
        wake_up_interruptible(&data->read_wait);
-
        return 0;
 }
 
 static inline ssize_t vhci_get_user(struct vhci_data *data,
-                                       const char __user *buf, size_t count)
+                                   const char __user *buf, size_t count)
 {
        struct sk_buff *skb;
+       __u8 pkt_type, dev_type;
+       int ret;
 
-       if (count > HCI_MAX_FRAME_SIZE)
+       if (count < 2 || count > HCI_MAX_FRAME_SIZE)
                return -EINVAL;
 
        skb = bt_skb_alloc(count, GFP_KERNEL);
@@ -120,27 +159,69 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
                return -EFAULT;
        }
 
-       skb->dev = (void *) data->hdev;
-       bt_cb(skb)->pkt_type = *((__u8 *) skb->data);
+       pkt_type = *((__u8 *) skb->data);
        skb_pull(skb, 1);
 
-       hci_recv_frame(skb);
+       switch (pkt_type) {
+       case HCI_EVENT_PKT:
+       case HCI_ACLDATA_PKT:
+       case HCI_SCODATA_PKT:
+               if (!data->hdev) {
+                       kfree_skb(skb);
+                       return -ENODEV;
+               }
+
+               bt_cb(skb)->pkt_type = pkt_type;
+
+               ret = hci_recv_frame(data->hdev, skb);
+               break;
 
-       return count;
+       case HCI_VENDOR_PKT:
+               if (data->hdev) {
+                       kfree_skb(skb);
+                       return -EBADFD;
+               }
+
+               cancel_delayed_work_sync(&data->open_timeout);
+
+               dev_type = *((__u8 *) skb->data);
+               skb_pull(skb, 1);
+
+               if (skb->len > 0) {
+                       kfree_skb(skb);
+                       return -EINVAL;
+               }
+
+               kfree_skb(skb);
+
+               if (dev_type != HCI_BREDR && dev_type != HCI_AMP)
+                       return -EINVAL;
+
+               ret = vhci_create_device(data, dev_type);
+               break;
+
+       default:
+               kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       return (ret < 0) ? ret : count;
 }
 
 static inline ssize_t vhci_put_user(struct vhci_data *data,
-                       struct sk_buff *skb, char __user *buf, int count)
+                                   struct sk_buff *skb,
+                                   char __user *buf, int count)
 {
        char __user *ptr = buf;
-       int len, total = 0;
+       int len;
 
        len = min_t(unsigned int, skb->len, count);
 
        if (copy_to_user(ptr, skb->data, len))
                return -EFAULT;
 
-       total += len;
+       if (!data->hdev)
+               return len;
 
        data->hdev->stat.byte_tx += len;
 
@@ -148,21 +229,19 @@ static inline ssize_t vhci_put_user(struct vhci_data *data,
        case HCI_COMMAND_PKT:
                data->hdev->stat.cmd_tx++;
                break;
-
        case HCI_ACLDATA_PKT:
                data->hdev->stat.acl_tx++;
                break;
-
        case HCI_SCODATA_PKT:
                data->hdev->stat.sco_tx++;
                break;
        }
 
-       return total;
+       return len;
 }
 
 static ssize_t vhci_read(struct file *file,
-                               char __user *buf, size_t count, loff_t *pos)
+                        char __user *buf, size_t count, loff_t *pos)
 {
        struct vhci_data *data = file->private_data;
        struct sk_buff *skb;
@@ -185,7 +264,7 @@ static ssize_t vhci_read(struct file *file,
                }
 
                ret = wait_event_interruptible(data->read_wait,
-                                       !skb_queue_empty(&data->readq));
+                                              !skb_queue_empty(&data->readq));
                if (ret < 0)
                        break;
        }
@@ -194,7 +273,7 @@ static ssize_t vhci_read(struct file *file,
 }
 
 static ssize_t vhci_write(struct file *file,
-                       const char __user *buf, size_t count, loff_t *pos)
+                         const char __user *buf, size_t count, loff_t *pos)
 {
        struct vhci_data *data = file->private_data;
 
@@ -213,10 +292,17 @@ static unsigned int vhci_poll(struct file *file, poll_table *wait)
        return POLLOUT | POLLWRNORM;
 }
 
+static void vhci_open_timeout(struct work_struct *work)
+{
+       struct vhci_data *data = container_of(work, struct vhci_data,
+                                             open_timeout.work);
+
+       vhci_create_device(data, amp ? HCI_AMP : HCI_BREDR);
+}
+
 static int vhci_open(struct inode *inode, struct file *file)
 {
        struct vhci_data *data;
-       struct hci_dev *hdev;
 
        data = kzalloc(sizeof(struct vhci_data), GFP_KERNEL);
        if (!data)
@@ -225,35 +311,13 @@ static int vhci_open(struct inode *inode, struct file *file)
        skb_queue_head_init(&data->readq);
        init_waitqueue_head(&data->read_wait);
 
-       hdev = hci_alloc_dev();
-       if (!hdev) {
-               kfree(data);
-               return -ENOMEM;
-       }
-
-       data->hdev = hdev;
-
-       hdev->bus = HCI_VIRTUAL;
-       hci_set_drvdata(hdev, data);
-
-       if (amp)
-               hdev->dev_type = HCI_AMP;
-
-       hdev->open     = vhci_open_dev;
-       hdev->close    = vhci_close_dev;
-       hdev->flush    = vhci_flush;
-       hdev->send     = vhci_send_frame;
-
-       if (hci_register_dev(hdev) < 0) {
-               BT_ERR("Can't register HCI device");
-               kfree(data);
-               hci_free_dev(hdev);
-               return -EBUSY;
-       }
+       INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout);
 
        file->private_data = data;
        nonseekable_open(inode, file);
 
+       schedule_delayed_work(&data->open_timeout, msecs_to_jiffies(1000));
+
        return 0;
 }
 
@@ -262,8 +326,12 @@ static int vhci_release(struct inode *inode, struct file *file)
        struct vhci_data *data = file->private_data;
        struct hci_dev *hdev = data->hdev;
 
-       hci_unregister_dev(hdev);
-       hci_free_dev(hdev);
+       cancel_delayed_work_sync(&data->open_timeout);
+
+       if (hdev) {
+               hci_unregister_dev(hdev);
+               hci_free_dev(hdev);
+       }
 
        file->private_data = NULL;
        kfree(data);
@@ -309,3 +377,4 @@ MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
 MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
 MODULE_VERSION(VERSION);
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("devname:vhci");
index 200926699778e2a0ec2605429362f92282494a56..bb5b90e8e7687a0b71d33aae92f7050f741a6fa9 100644 (file)
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/spinlock.h>
 
 #include <asm/cacheflush.h>
+#include <asm/irq_regs.h>
+#include <asm/pmu.h>
 #include <asm/smp_plat.h>
 
+#define DRIVER_NAME            "CCI-400"
+#define DRIVER_NAME_PMU                DRIVER_NAME " PMU"
+#define PMU_NAME               "CCI_400"
+
 #define CCI_PORT_CTRL          0x0
 #define CCI_CTRL_STATUS                0xc
 
@@ -54,6 +64,568 @@ static unsigned int nb_cci_ports;
 static void __iomem *cci_ctrl_base;
 static unsigned long cci_ctrl_phys;
 
+#ifdef CONFIG_HW_PERF_EVENTS
+
+#define CCI_PMCR               0x0100
+#define CCI_PID2               0x0fe8
+
+#define CCI_PMCR_CEN           0x00000001
+#define CCI_PMCR_NCNT_MASK     0x0000f800
+#define CCI_PMCR_NCNT_SHIFT    11
+
+#define CCI_PID2_REV_MASK      0xf0
+#define CCI_PID2_REV_SHIFT     4
+
+/* Port ids */
+#define CCI_PORT_S0    0
+#define CCI_PORT_S1    1
+#define CCI_PORT_S2    2
+#define CCI_PORT_S3    3
+#define CCI_PORT_S4    4
+#define CCI_PORT_M0    5
+#define CCI_PORT_M1    6
+#define CCI_PORT_M2    7
+
+#define CCI_REV_R0             0
+#define CCI_REV_R1             1
+#define CCI_REV_R0_P4          4
+#define CCI_REV_R1_P2          6
+
+#define CCI_PMU_EVT_SEL                0x000
+#define CCI_PMU_CNTR           0x004
+#define CCI_PMU_CNTR_CTRL      0x008
+#define CCI_PMU_OVRFLW         0x00c
+
+#define CCI_PMU_OVRFLW_FLAG    1
+
+#define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K)
+
+/*
+ * Instead of an event id to monitor CCI cycles, a dedicated counter is
+ * provided. Use 0xff to represent CCI cycles and hope that no future revisions
+ * make use of this event in hardware.
+ */
+enum cci400_perf_events {
+       CCI_PMU_CYCLES = 0xff
+};
+
+#define CCI_PMU_EVENT_MASK             0xff
+#define CCI_PMU_EVENT_SOURCE(event)    ((event >> 5) & 0x7)
+#define CCI_PMU_EVENT_CODE(event)      (event & 0x1f)
+
+#define CCI_PMU_MAX_HW_EVENTS 5   /* CCI PMU has 4 counters + 1 cycle counter */
+
+#define CCI_PMU_CYCLE_CNTR_IDX         0
+#define CCI_PMU_CNTR0_IDX              1
+#define CCI_PMU_CNTR_LAST(cci_pmu)     (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1)
+
+/*
+ * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
+ * ports and bits 4:0 are event codes. There are different event codes
+ * associated with each port type.
+ *
+ * Additionally, the range of events associated with the port types changed
+ * between Rev0 and Rev1.
+ *
+ * The constants below define the range of valid codes for each port type for
+ * the different revisions and are used to validate the event to be monitored.
+ */
+
+#define CCI_REV_R0_SLAVE_PORT_MIN_EV   0x00
+#define CCI_REV_R0_SLAVE_PORT_MAX_EV   0x13
+#define CCI_REV_R0_MASTER_PORT_MIN_EV  0x14
+#define CCI_REV_R0_MASTER_PORT_MAX_EV  0x1a
+
+#define CCI_REV_R1_SLAVE_PORT_MIN_EV   0x00
+#define CCI_REV_R1_SLAVE_PORT_MAX_EV   0x14
+#define CCI_REV_R1_MASTER_PORT_MIN_EV  0x00
+#define CCI_REV_R1_MASTER_PORT_MAX_EV  0x11
+
+struct pmu_port_event_ranges {
+       u8 slave_min;
+       u8 slave_max;
+       u8 master_min;
+       u8 master_max;
+};
+
+static struct pmu_port_event_ranges port_event_range[] = {
+       [CCI_REV_R0] = {
+               .slave_min = CCI_REV_R0_SLAVE_PORT_MIN_EV,
+               .slave_max = CCI_REV_R0_SLAVE_PORT_MAX_EV,
+               .master_min = CCI_REV_R0_MASTER_PORT_MIN_EV,
+               .master_max = CCI_REV_R0_MASTER_PORT_MAX_EV,
+       },
+       [CCI_REV_R1] = {
+               .slave_min = CCI_REV_R1_SLAVE_PORT_MIN_EV,
+               .slave_max = CCI_REV_R1_SLAVE_PORT_MAX_EV,
+               .master_min = CCI_REV_R1_MASTER_PORT_MIN_EV,
+               .master_max = CCI_REV_R1_MASTER_PORT_MAX_EV,
+       },
+};
+
+struct cci_pmu_drv_data {
+       void __iomem *base;
+       struct arm_pmu *cci_pmu;
+       int nr_irqs;
+       int irqs[CCI_PMU_MAX_HW_EVENTS];
+       unsigned long active_irqs;
+       struct perf_event *events[CCI_PMU_MAX_HW_EVENTS];
+       unsigned long used_mask[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS)];
+       struct pmu_port_event_ranges *port_ranges;
+       struct pmu_hw_events hw_events;
+};
+static struct cci_pmu_drv_data *pmu;
+
+static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
+{
+       int i;
+
+       for (i = 0; i < nr_irqs; i++)
+               if (irq == irqs[i])
+                       return true;
+
+       return false;
+}
+
+static int probe_cci_revision(void)
+{
+       int rev;
+       rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
+       rev >>= CCI_PID2_REV_SHIFT;
+
+       if (rev <= CCI_REV_R0_P4)
+               return CCI_REV_R0;
+       else if (rev <= CCI_REV_R1_P2)
+               return CCI_REV_R1;
+
+       return -ENOENT;
+}
+
+static struct pmu_port_event_ranges *port_range_by_rev(void)
+{
+       int rev = probe_cci_revision();
+
+       if (rev < 0)
+               return NULL;
+
+       return &port_event_range[rev];
+}
+
+static int pmu_is_valid_slave_event(u8 ev_code)
+{
+       return pmu->port_ranges->slave_min <= ev_code &&
+               ev_code <= pmu->port_ranges->slave_max;
+}
+
+static int pmu_is_valid_master_event(u8 ev_code)
+{
+       return pmu->port_ranges->master_min <= ev_code &&
+               ev_code <= pmu->port_ranges->master_max;
+}
+
+static int pmu_validate_hw_event(u8 hw_event)
+{
+       u8 ev_source = CCI_PMU_EVENT_SOURCE(hw_event);
+       u8 ev_code = CCI_PMU_EVENT_CODE(hw_event);
+
+       switch (ev_source) {
+       case CCI_PORT_S0:
+       case CCI_PORT_S1:
+       case CCI_PORT_S2:
+       case CCI_PORT_S3:
+       case CCI_PORT_S4:
+               /* Slave Interface */
+               if (pmu_is_valid_slave_event(ev_code))
+                       return hw_event;
+               break;
+       case CCI_PORT_M0:
+       case CCI_PORT_M1:
+       case CCI_PORT_M2:
+               /* Master Interface */
+               if (pmu_is_valid_master_event(ev_code))
+                       return hw_event;
+               break;
+       }
+
+       return -ENOENT;
+}
+
+static int pmu_is_valid_counter(struct arm_pmu *cci_pmu, int idx)
+{
+       return CCI_PMU_CYCLE_CNTR_IDX <= idx &&
+               idx <= CCI_PMU_CNTR_LAST(cci_pmu);
+}
+
+static u32 pmu_read_register(int idx, unsigned int offset)
+{
+       return readl_relaxed(pmu->base + CCI_PMU_CNTR_BASE(idx) + offset);
+}
+
+static void pmu_write_register(u32 value, int idx, unsigned int offset)
+{
+       return writel_relaxed(value, pmu->base + CCI_PMU_CNTR_BASE(idx) + offset);
+}
+
+static void pmu_disable_counter(int idx)
+{
+       pmu_write_register(0, idx, CCI_PMU_CNTR_CTRL);
+}
+
+static void pmu_enable_counter(int idx)
+{
+       pmu_write_register(1, idx, CCI_PMU_CNTR_CTRL);
+}
+
+static void pmu_set_event(int idx, unsigned long event)
+{
+       event &= CCI_PMU_EVENT_MASK;
+       pmu_write_register(event, idx, CCI_PMU_EVT_SEL);
+}
+
+static u32 pmu_get_max_counters(void)
+{
+       u32 n_cnts = (readl_relaxed(cci_ctrl_base + CCI_PMCR) &
+                     CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT;
+
+       /* add 1 for cycle counter */
+       return n_cnts + 1;
+}
+
+static struct pmu_hw_events *pmu_get_hw_events(void)
+{
+       return &pmu->hw_events;
+}
+
+static int pmu_get_event_idx(struct pmu_hw_events *hw, struct perf_event *event)
+{
+       struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+       struct hw_perf_event *hw_event = &event->hw;
+       unsigned long cci_event = hw_event->config_base & CCI_PMU_EVENT_MASK;
+       int idx;
+
+       if (cci_event == CCI_PMU_CYCLES) {
+               if (test_and_set_bit(CCI_PMU_CYCLE_CNTR_IDX, hw->used_mask))
+                       return -EAGAIN;
+
+               return CCI_PMU_CYCLE_CNTR_IDX;
+       }
+
+       for (idx = CCI_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
+               if (!test_and_set_bit(idx, hw->used_mask))
+                       return idx;
+
+       /* No counters available */
+       return -EAGAIN;
+}
+
+static int pmu_map_event(struct perf_event *event)
+{
+       int mapping;
+       u8 config = event->attr.config & CCI_PMU_EVENT_MASK;
+
+       if (event->attr.type < PERF_TYPE_MAX)
+               return -ENOENT;
+
+       if (config == CCI_PMU_CYCLES)
+               mapping = config;
+       else
+               mapping = pmu_validate_hw_event(config);
+
+       return mapping;
+}
+
+static int pmu_request_irq(struct arm_pmu *cci_pmu, irq_handler_t handler)
+{
+       int i;
+       struct platform_device *pmu_device = cci_pmu->plat_device;
+
+       if (unlikely(!pmu_device))
+               return -ENODEV;
+
+       if (pmu->nr_irqs < 1) {
+               dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n");
+               return -ENODEV;
+       }
+
+       /*
+        * Register all available CCI PMU interrupts. In the interrupt handler
+        * we iterate over the counters checking for interrupt source (the
+        * overflowing counter) and clear it.
+        *
+        * This should allow handling of non-unique interrupt for the counters.
+        */
+       for (i = 0; i < pmu->nr_irqs; i++) {
+               int err = request_irq(pmu->irqs[i], handler, IRQF_SHARED,
+                               "arm-cci-pmu", cci_pmu);
+               if (err) {
+                       dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n",
+                               pmu->irqs[i]);
+                       return err;
+               }
+
+               set_bit(i, &pmu->active_irqs);
+       }
+
+       return 0;
+}
+
+static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
+{
+       unsigned long flags;
+       struct arm_pmu *cci_pmu = (struct arm_pmu *)dev;
+       struct pmu_hw_events *events = cci_pmu->get_hw_events();
+       struct perf_sample_data data;
+       struct pt_regs *regs;
+       int idx, handled = IRQ_NONE;
+
+       raw_spin_lock_irqsave(&events->pmu_lock, flags);
+       regs = get_irq_regs();
+       /*
+        * Iterate over counters and update the corresponding perf events.
+        * This should work regardless of whether we have per-counter overflow
+        * interrupt or a combined overflow interrupt.
+        */
+       for (idx = CCI_PMU_CYCLE_CNTR_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
+               struct perf_event *event = events->events[idx];
+               struct hw_perf_event *hw_counter;
+
+               if (!event)
+                       continue;
+
+               hw_counter = &event->hw;
+
+               /* Did this counter overflow? */
+               if (!pmu_read_register(idx, CCI_PMU_OVRFLW) & CCI_PMU_OVRFLW_FLAG)
+                       continue;
+
+               pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW);
+
+               handled = IRQ_HANDLED;
+
+               armpmu_event_update(event);
+               perf_sample_data_init(&data, 0, hw_counter->last_period);
+               if (!armpmu_event_set_period(event))
+                       continue;
+
+               if (perf_event_overflow(event, &data, regs))
+                       cci_pmu->disable(event);
+       }
+       raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+
+       return IRQ_RETVAL(handled);
+}
+
+static void pmu_free_irq(struct arm_pmu *cci_pmu)
+{
+       int i;
+
+       for (i = 0; i < pmu->nr_irqs; i++) {
+               if (!test_and_clear_bit(i, &pmu->active_irqs))
+                       continue;
+
+               free_irq(pmu->irqs[i], cci_pmu);
+       }
+}
+
+static void pmu_enable_event(struct perf_event *event)
+{
+       unsigned long flags;
+       struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+       struct pmu_hw_events *events = cci_pmu->get_hw_events();
+       struct hw_perf_event *hw_counter = &event->hw;
+       int idx = hw_counter->idx;
+
+       if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
+               dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+               return;
+       }
+
+       raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+       /* Configure the event to count, unless you are counting cycles */
+       if (idx != CCI_PMU_CYCLE_CNTR_IDX)
+               pmu_set_event(idx, hw_counter->config_base);
+
+       pmu_enable_counter(idx);
+
+       raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void pmu_disable_event(struct perf_event *event)
+{
+       struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+       struct hw_perf_event *hw_counter = &event->hw;
+       int idx = hw_counter->idx;
+
+       if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
+               dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+               return;
+       }
+
+       pmu_disable_counter(idx);
+}
+
+static void pmu_start(struct arm_pmu *cci_pmu)
+{
+       u32 val;
+       unsigned long flags;
+       struct pmu_hw_events *events = cci_pmu->get_hw_events();
+
+       raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+       /* Enable all the PMU counters. */
+       val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
+       writel(val, cci_ctrl_base + CCI_PMCR);
+
+       raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void pmu_stop(struct arm_pmu *cci_pmu)
+{
+       u32 val;
+       unsigned long flags;
+       struct pmu_hw_events *events = cci_pmu->get_hw_events();
+
+       raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+       /* Disable all the PMU counters. */
+       val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
+       writel(val, cci_ctrl_base + CCI_PMCR);
+
+       raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static u32 pmu_read_counter(struct perf_event *event)
+{
+       struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+       struct hw_perf_event *hw_counter = &event->hw;
+       int idx = hw_counter->idx;
+       u32 value;
+
+       if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
+               dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+               return 0;
+       }
+       value = pmu_read_register(idx, CCI_PMU_CNTR);
+
+       return value;
+}
+
+static void pmu_write_counter(struct perf_event *event, u32 value)
+{
+       struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+       struct hw_perf_event *hw_counter = &event->hw;
+       int idx = hw_counter->idx;
+
+       if (unlikely(!pmu_is_valid_counter(cci_pmu, idx)))
+               dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+       else
+               pmu_write_register(value, idx, CCI_PMU_CNTR);
+}
+
+static int cci_pmu_init(struct arm_pmu *cci_pmu, struct platform_device *pdev)
+{
+       *cci_pmu = (struct arm_pmu){
+               .name             = PMU_NAME,
+               .max_period       = (1LLU << 32) - 1,
+               .get_hw_events    = pmu_get_hw_events,
+               .get_event_idx    = pmu_get_event_idx,
+               .map_event        = pmu_map_event,
+               .request_irq      = pmu_request_irq,
+               .handle_irq       = pmu_handle_irq,
+               .free_irq         = pmu_free_irq,
+               .enable           = pmu_enable_event,
+               .disable          = pmu_disable_event,
+               .start            = pmu_start,
+               .stop             = pmu_stop,
+               .read_counter     = pmu_read_counter,
+               .write_counter    = pmu_write_counter,
+       };
+
+       cci_pmu->plat_device = pdev;
+       cci_pmu->num_events = pmu_get_max_counters();
+
+       return armpmu_register(cci_pmu, -1);
+}
+
+static const struct of_device_id arm_cci_pmu_matches[] = {
+       {
+               .compatible = "arm,cci-400-pmu",
+       },
+       {},
+};
+
+static int cci_pmu_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       int i, ret, irq;
+
+       pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
+       if (!pmu)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       pmu->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(pmu->base))
+               return -ENOMEM;
+
+       /*
+        * CCI PMU has 5 overflow signals - one per counter; but some may be tied
+        * together to a common interrupt.
+        */
+       pmu->nr_irqs = 0;
+       for (i = 0; i < CCI_PMU_MAX_HW_EVENTS; i++) {
+               irq = platform_get_irq(pdev, i);
+               if (irq < 0)
+                       break;
+
+               if (is_duplicate_irq(irq, pmu->irqs, pmu->nr_irqs))
+                       continue;
+
+               pmu->irqs[pmu->nr_irqs++] = irq;
+       }
+
+       /*
+        * Ensure that the device tree has as many interrupts as the number
+        * of counters.
+        */
+       if (i < CCI_PMU_MAX_HW_EVENTS) {
+               dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n",
+                       i, CCI_PMU_MAX_HW_EVENTS);
+               return -EINVAL;
+       }
+
+       pmu->port_ranges = port_range_by_rev();
+       if (!pmu->port_ranges) {
+               dev_warn(&pdev->dev, "CCI PMU version not supported\n");
+               return -EINVAL;
+       }
+
+       pmu->cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*(pmu->cci_pmu)), GFP_KERNEL);
+       if (!pmu->cci_pmu)
+               return -ENOMEM;
+
+       pmu->hw_events.events = pmu->events;
+       pmu->hw_events.used_mask = pmu->used_mask;
+       raw_spin_lock_init(&pmu->hw_events.pmu_lock);
+
+       ret = cci_pmu_init(pmu->cci_pmu, pdev);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int cci_platform_probe(struct platform_device *pdev)
+{
+       if (!cci_probed())
+               return -ENODEV;
+
+       return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+}
+
+#endif /* CONFIG_HW_PERF_EVENTS */
+
 struct cpu_port {
        u64 mpidr;
        u32 port;
@@ -120,7 +692,7 @@ int cci_ace_get_port(struct device_node *dn)
 }
 EXPORT_SYMBOL_GPL(cci_ace_get_port);
 
-static void __init cci_ace_init_ports(void)
+static void cci_ace_init_ports(void)
 {
        int port, cpu;
        struct device_node *cpun;
@@ -386,7 +958,7 @@ static const struct of_device_id arm_cci_ctrl_if_matches[] = {
        {},
 };
 
-static int __init cci_probe(void)
+static int cci_probe(void)
 {
        struct cci_nb_ports const *cci_config;
        int ret, i, nb_ace = 0, nb_ace_lite = 0;
@@ -490,7 +1062,7 @@ memalloc_err:
 static int cci_init_status = -EAGAIN;
 static DEFINE_MUTEX(cci_probing);
 
-static int __init cci_init(void)
+static int cci_init(void)
 {
        if (cci_init_status != -EAGAIN)
                return cci_init_status;
@@ -502,18 +1074,55 @@ static int __init cci_init(void)
        return cci_init_status;
 }
 
+#ifdef CONFIG_HW_PERF_EVENTS
+static struct platform_driver cci_pmu_driver = {
+       .driver = {
+                  .name = DRIVER_NAME_PMU,
+                  .of_match_table = arm_cci_pmu_matches,
+                 },
+       .probe = cci_pmu_probe,
+};
+
+static struct platform_driver cci_platform_driver = {
+       .driver = {
+                  .name = DRIVER_NAME,
+                  .of_match_table = arm_cci_matches,
+                 },
+       .probe = cci_platform_probe,
+};
+
+static int __init cci_platform_init(void)
+{
+       int ret;
+
+       ret = platform_driver_register(&cci_pmu_driver);
+       if (ret)
+               return ret;
+
+       return platform_driver_register(&cci_platform_driver);
+}
+
+#else
+
+static int __init cci_platform_init(void)
+{
+       return 0;
+}
+
+#endif
 /*
  * To sort out early init calls ordering a helper function is provided to
  * check if the CCI driver has beed initialized. Function check if the driver
  * has been initialized, if not it calls the init function that probes
  * the driver and updates the return value.
  */
-bool __init cci_probed(void)
+bool cci_probed(void)
 {
        return cci_init() == 0;
 }
 EXPORT_SYMBOL_GPL(cci_probed);
 
 early_initcall(cci_init);
+core_initcall(cci_platform_init);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("ARM CCI support");
index 0aa9d91daef500486e999c8ce4ccb4cb90d2755d..d4cfddd456e0900c17cdfc42fffb1da158e82c7e 100644 (file)
@@ -165,6 +165,19 @@ config HW_RANDOM_OMAP
 
          If unsure, say Y.
 
+config HW_RANDOM_OMAP3_ROM
+       tristate "OMAP3 ROM Random Number Generator support"
+       depends on HW_RANDOM && ARCH_OMAP3
+       default HW_RANDOM
+       ---help---
+         This driver provides kernel-side support for the Random Number
+         Generator hardware found on OMAP34xx processors.
+
+         To compile this driver as a module, choose M here: the
+         module will be called omap3-rom-rng.
+
+         If unsure, say Y.
+
 config HW_RANDOM_OCTEON
        tristate "Octeon Random Number Generator support"
        depends on HW_RANDOM && CAVIUM_OCTEON_SOC
@@ -290,6 +303,19 @@ config HW_RANDOM_PSERIES
 
          If unsure, say Y.
 
+config HW_RANDOM_POWERNV
+       tristate "PowerNV Random Number Generator support"
+       depends on HW_RANDOM && PPC_POWERNV
+       default HW_RANDOM
+       ---help---
+         This is the driver for Random Number Generator hardware found
+         in POWER7+ and above machines for PowerNV platform.
+
+         To compile this driver as a module, choose M here: the
+         module will be called powernv-rng.
+
+         If unsure, say Y.
+
 config HW_RANDOM_EXYNOS
        tristate "EXYNOS HW random number generator support"
        depends on HW_RANDOM && HAS_IOMEM && HAVE_CLK
index bed467c9300e6ec99240a262585d6bf9409a0b94..0a9af90489f97a2e6b594ccbc641bcf47625be9a 100644 (file)
@@ -15,6 +15,7 @@ n2-rng-y := n2-drv.o n2-asm.o
 obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o
 obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o
 obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
+obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o
 obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
 obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
 obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
@@ -24,6 +25,7 @@ obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
 obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o
 obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o
 obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
+obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o
 obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-rng.o
 obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o
 obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
new file mode 100644 (file)
index 0000000..c853e9e
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ * omap3-rom-rng.c - RNG driver for TI OMAP3 CPU family
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Author: Juha Yrjola <juha.yrjola@solidboot.com>
+ *
+ * Copyright (C) 2013 Pali Rohár <pali.rohar@gmail.com>
+ *
+ * This file is licensed under  the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/random.h>
+#include <linux/hw_random.h>
+#include <linux/timer.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+
+#define RNG_RESET                      0x01
+#define RNG_GEN_PRNG_HW_INIT           0x02
+#define RNG_GEN_HW                     0x08
+
+/* param1: ptr, param2: count, param3: flag */
+static u32 (*omap3_rom_rng_call)(u32, u32, u32);
+
+static struct timer_list idle_timer;
+static int rng_idle;
+static struct clk *rng_clk;
+
+static void omap3_rom_rng_idle(unsigned long data)
+{
+       int r;
+
+       r = omap3_rom_rng_call(0, 0, RNG_RESET);
+       if (r != 0) {
+               pr_err("reset failed: %d\n", r);
+               return;
+       }
+       clk_disable_unprepare(rng_clk);
+       rng_idle = 1;
+}
+
+static int omap3_rom_rng_get_random(void *buf, unsigned int count)
+{
+       u32 r;
+       u32 ptr;
+
+       del_timer_sync(&idle_timer);
+       if (rng_idle) {
+               clk_prepare_enable(rng_clk);
+               r = omap3_rom_rng_call(0, 0, RNG_GEN_PRNG_HW_INIT);
+               if (r != 0) {
+                       clk_disable_unprepare(rng_clk);
+                       pr_err("HW init failed: %d\n", r);
+                       return -EIO;
+               }
+               rng_idle = 0;
+       }
+
+       ptr = virt_to_phys(buf);
+       r = omap3_rom_rng_call(ptr, count, RNG_GEN_HW);
+       mod_timer(&idle_timer, jiffies + msecs_to_jiffies(500));
+       if (r != 0)
+               return -EINVAL;
+       return 0;
+}
+
+static int omap3_rom_rng_data_present(struct hwrng *rng, int wait)
+{
+       return 1;
+}
+
+static int omap3_rom_rng_data_read(struct hwrng *rng, u32 *data)
+{
+       int r;
+
+       r = omap3_rom_rng_get_random(data, 4);
+       if (r < 0)
+               return r;
+       return 4;
+}
+
+static struct hwrng omap3_rom_rng_ops = {
+       .name           = "omap3-rom",
+       .data_present   = omap3_rom_rng_data_present,
+       .data_read      = omap3_rom_rng_data_read,
+};
+
+static int omap3_rom_rng_probe(struct platform_device *pdev)
+{
+       pr_info("initializing\n");
+
+       omap3_rom_rng_call = pdev->dev.platform_data;
+       if (!omap3_rom_rng_call) {
+               pr_err("omap3_rom_rng_call is NULL\n");
+               return -EINVAL;
+       }
+
+       setup_timer(&idle_timer, omap3_rom_rng_idle, 0);
+       rng_clk = clk_get(&pdev->dev, "ick");
+       if (IS_ERR(rng_clk)) {
+               pr_err("unable to get RNG clock\n");
+               return PTR_ERR(rng_clk);
+       }
+
+       /* Leave the RNG in reset state. */
+       clk_prepare_enable(rng_clk);
+       omap3_rom_rng_idle(0);
+
+       return hwrng_register(&omap3_rom_rng_ops);
+}
+
+static int omap3_rom_rng_remove(struct platform_device *pdev)
+{
+       hwrng_unregister(&omap3_rom_rng_ops);
+       clk_disable_unprepare(rng_clk);
+       clk_put(rng_clk);
+       return 0;
+}
+
+static struct platform_driver omap3_rom_rng_driver = {
+       .driver = {
+               .name           = "omap3-rom-rng",
+               .owner          = THIS_MODULE,
+       },
+       .probe          = omap3_rom_rng_probe,
+       .remove         = omap3_rom_rng_remove,
+};
+
+module_platform_driver(omap3_rom_rng_driver);
+
+MODULE_ALIAS("platform:omap3-rom-rng");
+MODULE_AUTHOR("Juha Yrjola");
+MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/powernv-rng.c b/drivers/char/hw_random/powernv-rng.c
new file mode 100644 (file)
index 0000000..3f4f632
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2013 Michael Ellerman, Guo Chao, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <linux/hw_random.h>
+
+static int powernv_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+       unsigned long *buf;
+       int i, len;
+
+       /* We rely on rng_buffer_size() being >= sizeof(unsigned long) */
+       len = max / sizeof(unsigned long);
+
+       buf = (unsigned long *)data;
+
+       for (i = 0; i < len; i++)
+               powernv_get_random_long(buf++);
+
+       return len * sizeof(unsigned long);
+}
+
+static struct hwrng powernv_hwrng = {
+       .name = "powernv-rng",
+       .read = powernv_rng_read,
+};
+
+static int powernv_rng_remove(struct platform_device *pdev)
+{
+       hwrng_unregister(&powernv_hwrng);
+
+       return 0;
+}
+
+static int powernv_rng_probe(struct platform_device *pdev)
+{
+       int rc;
+
+       rc = hwrng_register(&powernv_hwrng);
+       if (rc) {
+               /* We only register one device, ignore any others */
+               if (rc == -EEXIST)
+                       rc = -ENODEV;
+
+               return rc;
+       }
+
+       pr_info("Registered powernv hwrng.\n");
+
+       return 0;
+}
+
+static struct of_device_id powernv_rng_match[] = {
+       { .compatible   = "ibm,power-rng",},
+       {},
+};
+MODULE_DEVICE_TABLE(of, powernv_rng_match);
+
+static struct platform_driver powernv_rng_driver = {
+       .driver = {
+               .name = "powernv_rng",
+               .of_match_table = powernv_rng_match,
+       },
+       .probe  = powernv_rng_probe,
+       .remove = powernv_rng_remove,
+};
+module_platform_driver(powernv_rng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Bare metal HWRNG driver for POWER7+ and above");
index 5f1197929f0ceebaa2ca34632ca16be9046150dc..ab7ffdec0ec3545a7ec6f940b32cff396d153183 100644 (file)
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/hw_random.h>
 #include <asm/vio.h>
 
-#define MODULE_NAME "pseries-rng"
 
 static int pseries_rng_data_read(struct hwrng *rng, u32 *data)
 {
-       if (plpar_hcall(H_RANDOM, (unsigned long *)data) != H_SUCCESS) {
-               printk(KERN_ERR "pseries rng hcall error\n");
-               return 0;
+       int rc;
+
+       rc = plpar_hcall(H_RANDOM, (unsigned long *)data);
+       if (rc != H_SUCCESS) {
+               pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
+               return -EIO;
        }
+
+       /* The hypervisor interface returns 64 bits */
        return 8;
 }
 
@@ -47,7 +54,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
 };
 
 static struct hwrng pseries_rng = {
-       .name           = MODULE_NAME,
+       .name           = KBUILD_MODNAME,
        .data_read      = pseries_rng_data_read,
 };
 
@@ -70,7 +77,7 @@ static struct vio_device_id pseries_rng_driver_ids[] = {
 MODULE_DEVICE_TABLE(vio, pseries_rng_driver_ids);
 
 static struct vio_driver pseries_rng_driver = {
-       .name = MODULE_NAME,
+       .name = KBUILD_MODNAME,
        .probe = pseries_rng_probe,
        .remove = pseries_rng_remove,
        .get_desired_dma = pseries_rng_get_desired_dma,
index e737772ad69a8103312e596cd0e24ce00d759e22..de5a6dcfb3e242ec4aa5b6742c73c7700e32d551 100644 (file)
@@ -221,7 +221,7 @@ static void __exit mod_exit(void)
 module_init(mod_init);
 module_exit(mod_exit);
 
-static struct x86_cpu_id via_rng_cpu_id[] = {
+static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = {
        X86_FEATURE_MATCH(X86_FEATURE_XSTORE),
        {}
 };
index ef46a9cfd832a5ce48a3f720c62845a2171b9839..c12398d1517c5cb35711b9c1770d6a4b1eb2ca51 100644 (file)
@@ -133,7 +133,7 @@ static void virtrng_remove(struct virtio_device *vdev)
        remove_common(vdev);
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int virtrng_freeze(struct virtio_device *vdev)
 {
        remove_common(vdev);
@@ -157,7 +157,7 @@ static struct virtio_driver virtio_rng_driver = {
        .id_table =     id_table,
        .probe =        virtrng_probe,
        .remove =       virtrng_remove,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
        .freeze =       virtrng_freeze,
        .restore =      virtrng_restore,
 #endif
index 7737b5bd26af816e4361a906ad9d0cd6966b7287..7a744d39175638a381835a8cadce7039f58ee3bf 100644 (file)
@@ -640,7 +640,7 @@ struct timer_rand_state {
  */
 void add_device_randomness(const void *buf, unsigned int size)
 {
-       unsigned long time = get_cycles() ^ jiffies;
+       unsigned long time = random_get_entropy() ^ jiffies;
 
        mix_pool_bytes(&input_pool, buf, size, NULL);
        mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
@@ -677,7 +677,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
                goto out;
 
        sample.jiffies = jiffies;
-       sample.cycles = get_cycles();
+       sample.cycles = random_get_entropy();
        sample.num = num;
        mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
 
@@ -744,7 +744,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
        struct fast_pool        *fast_pool = &__get_cpu_var(irq_randomness);
        struct pt_regs          *regs = get_irq_regs();
        unsigned long           now = jiffies;
-       __u32                   input[4], cycles = get_cycles();
+       __u32                   input[4], cycles = random_get_entropy();
 
        input[0] = cycles ^ jiffies;
        input[1] = irq;
@@ -1459,12 +1459,11 @@ struct ctl_table random_table[] = {
 
 static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
 
-static int __init random_int_secret_init(void)
+int random_int_secret_init(void)
 {
        get_random_bytes(random_int_secret, sizeof(random_int_secret));
        return 0;
 }
-late_initcall(random_int_secret_init);
 
 /*
  * Get a random word for internal kernel use only. Similar to urandom but
@@ -1483,7 +1482,7 @@ unsigned int get_random_int(void)
 
        hash = get_cpu_var(get_random_int_hash);
 
-       hash[0] += current->pid + jiffies + get_cycles();
+       hash[0] += current->pid + jiffies + random_get_entropy();
        md5_transform(hash, random_int_secret);
        ret = hash[0];
        put_cpu_var(get_random_int_hash);
index f3223aac4df11c41959a744bee67af2d2df232e5..db5fa4e9b9e50f3a88bce0993b01420b53e8735f 100644 (file)
@@ -285,9 +285,9 @@ static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
 
 static const struct file_operations raw_fops = {
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = blkdev_aio_write,
+       .write_iter     = blkdev_write_iter,
        .fsync          = blkdev_fsync,
        .open           = raw_open,
        .release        = raw_release,
index 06189e55b4e5a0479d2eaab0a31610ef91bf472f..94c280d36e8b3bfaea00ee36c4fe3215818b3c86 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
+#include <xen/xen.h>
 #include <xen/events.h>
 #include <xen/interface/io/tpmif.h>
 #include <xen/grant_table.h>
index b79cf3e1b793dca8f652067718d4ece3c0b3335d..2a8d9a7a18340f4192ff183d0bda3424724477d5 100644 (file)
@@ -1837,12 +1837,8 @@ static void config_intr(struct virtio_device *vdev)
                struct port *port;
                u16 rows, cols;
 
-               vdev->config->get(vdev,
-                                 offsetof(struct virtio_console_config, cols),
-                                 &cols, sizeof(u16));
-               vdev->config->get(vdev,
-                                 offsetof(struct virtio_console_config, rows),
-                                 &rows, sizeof(u16));
+               virtio_cread(vdev, struct virtio_console_config, cols, &cols);
+               virtio_cread(vdev, struct virtio_console_config, rows, &rows);
 
                port = find_port_by_id(portdev, 0);
                set_console_size(port, rows, cols);
@@ -2014,10 +2010,9 @@ static int virtcons_probe(struct virtio_device *vdev)
 
        /* Don't test MULTIPORT at all if we're rproc: not a valid feature! */
        if (!is_rproc_serial(vdev) &&
-           virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
-                                 offsetof(struct virtio_console_config,
-                                          max_nr_ports),
-                                 &portdev->config.max_nr_ports) == 0) {
+           virtio_cread_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
+                                struct virtio_console_config, max_nr_ports,
+                                &portdev->config.max_nr_ports) == 0) {
                multiport = true;
        }
 
@@ -2142,7 +2137,7 @@ static struct virtio_device_id rproc_serial_id_table[] = {
 static unsigned int rproc_serial_features[] = {
 };
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int virtcons_freeze(struct virtio_device *vdev)
 {
        struct ports_device *portdev;
@@ -2220,7 +2215,7 @@ static struct virtio_driver virtio_console = {
        .probe =        virtcons_probe,
        .remove =       virtcons_remove,
        .config_changed = config_intr,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
        .freeze =       virtcons_freeze,
        .restore =      virtcons_restore,
 #endif
index 08ae128cce9be2e930454088c1041478bce8d8f5..c73fc2b74de2a1dd0665bde5693b1fce03f6d6c5 100644 (file)
@@ -65,6 +65,7 @@ void proc_fork_connector(struct task_struct *task)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -80,6 +81,7 @@ void proc_fork_connector(struct task_struct *task)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        /*  If cn_netlink_send() failed, the data is not sent */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
@@ -96,6 +98,7 @@ void proc_exec_connector(struct task_struct *task)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -106,6 +109,7 @@ void proc_exec_connector(struct task_struct *task)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -122,6 +126,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        ev->what = which_id;
        ev->event_data.id.process_pid = task->pid;
        ev->event_data.id.process_tgid = task->tgid;
@@ -145,6 +150,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -160,6 +166,7 @@ void proc_sid_connector(struct task_struct *task)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -170,6 +177,7 @@ void proc_sid_connector(struct task_struct *task)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -185,6 +193,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -203,6 +212,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -218,6 +228,7 @@ void proc_comm_connector(struct task_struct *task)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -229,6 +240,7 @@ void proc_comm_connector(struct task_struct *task)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -244,6 +256,7 @@ void proc_coredump_connector(struct task_struct *task)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -254,6 +267,7 @@ void proc_coredump_connector(struct task_struct *task)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -269,6 +283,7 @@ void proc_exit_connector(struct task_struct *task)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        get_seq(&msg->seq, &ev->cpu);
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -281,6 +296,7 @@ void proc_exit_connector(struct task_struct *task)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = 0; /* not used */
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
@@ -304,6 +320,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
 
        msg = (struct cn_msg *)buffer;
        ev = (struct proc_event *)msg->data;
+       memset(&ev->event_data, 0, sizeof(ev->event_data));
        msg->seq = rcvd_seq;
        ktime_get_ts(&ts); /* get high res monotonic timestamp */
        put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -313,6 +330,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
        memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
        msg->ack = rcvd_ack + 1;
        msg->len = sizeof(*ev);
+       msg->flags = 0; /* not used */
        cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
 }
 
index 6ecfa758942c50a4b33ebd399cd831ac1420277d..a36749f1e44a869418e1bcab6481334948618391 100644 (file)
@@ -109,7 +109,7 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
 
        data = nlmsg_data(nlh);
 
-       memcpy(data, msg, sizeof(*data) + msg->len);
+       memcpy(data, msg, size);
 
        NETLINK_CB(skb).dst_group = group;
 
@@ -157,17 +157,18 @@ static int cn_call_callback(struct sk_buff *skb)
 static void cn_rx_skb(struct sk_buff *__skb)
 {
        struct nlmsghdr *nlh;
-       int err;
        struct sk_buff *skb;
+       int len, err;
 
        skb = skb_get(__skb);
 
        if (skb->len >= NLMSG_HDRLEN) {
                nlh = nlmsg_hdr(skb);
+               len = nlmsg_len(nlh);
 
-               if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
+               if (len < (int)sizeof(struct cn_msg) ||
                    skb->len < nlh->nlmsg_len ||
-                   nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) {
+                   len > CONNECTOR_MAX_MSG_SIZE) {
                        kfree_skb(skb);
                        return;
                }
index 534fcb8251538a31d2695313b1d565990f8d51d4..38093e272377b2729939400c581aeb9706424011 100644 (file)
@@ -17,15 +17,11 @@ config CPU_FREQ
 
 if CPU_FREQ
 
-config CPU_FREQ_TABLE
-       tristate
-
 config CPU_FREQ_GOV_COMMON
        bool
 
 config CPU_FREQ_STAT
        tristate "CPU frequency translation statistics"
-       select CPU_FREQ_TABLE
        default y
        help
          This driver exports CPU frequency statistics information through sysfs
@@ -143,7 +139,6 @@ config CPU_FREQ_GOV_USERSPACE
 
 config CPU_FREQ_GOV_ONDEMAND
        tristate "'ondemand' cpufreq policy governor"
-       select CPU_FREQ_TABLE
        select CPU_FREQ_GOV_COMMON
        help
          'ondemand' - This driver adds a dynamic cpufreq policy governor.
@@ -187,7 +182,6 @@ config CPU_FREQ_GOV_CONSERVATIVE
 config GENERIC_CPUFREQ_CPU0
        tristate "Generic CPU0 cpufreq driver"
        depends on HAVE_CLK && REGULATOR && PM_OPP && OF
-       select CPU_FREQ_TABLE
        help
          This adds a generic cpufreq driver for CPU0 frequency management.
          It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
@@ -223,7 +217,6 @@ depends on IA64
 
 config IA64_ACPI_CPUFREQ
        tristate "ACPI Processor P-States driver"
-       select CPU_FREQ_TABLE
        depends on ACPI_PROCESSOR
        help
        This driver adds a CPUFreq driver which utilizes the ACPI
@@ -240,7 +233,6 @@ depends on MIPS
 
 config LOONGSON2_CPUFREQ
        tristate "Loongson2 CPUFreq Driver"
-       select CPU_FREQ_TABLE
        help
          This option adds a CPUFreq driver for loongson processors which
          support software configurable cpu frequency.
@@ -262,7 +254,6 @@ menu "SPARC CPU frequency scaling drivers"
 depends on SPARC64
 config SPARC_US3_CPUFREQ
        tristate "UltraSPARC-III CPU Frequency driver"
-       select CPU_FREQ_TABLE
        help
          This adds the CPUFreq driver for UltraSPARC-III processors.
 
@@ -272,7 +263,6 @@ config SPARC_US3_CPUFREQ
 
 config SPARC_US2E_CPUFREQ
        tristate "UltraSPARC-IIe CPU Frequency driver"
-       select CPU_FREQ_TABLE
        help
          This adds the CPUFreq driver for UltraSPARC-IIe processors.
 
@@ -285,7 +275,6 @@ menu "SH CPU Frequency scaling"
 depends on SUPERH
 config SH_CPU_FREQ
        tristate "SuperH CPU Frequency driver"
-       select CPU_FREQ_TABLE
        help
          This adds the cpufreq driver for SuperH. Any CPU that supports
          clock rate rounding through the clock framework can use this
index 0fa204b244bd2df52974a50fc4290e9fa77d287c..701ec95ce95487cb29dbb9a1ba45012984085299 100644 (file)
@@ -5,7 +5,6 @@
 config ARM_BIG_LITTLE_CPUFREQ
        tristate "Generic ARM big LITTLE CPUfreq driver"
        depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK
-       select CPU_FREQ_TABLE
        help
          This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
 
@@ -18,7 +17,6 @@ config ARM_DT_BL_CPUFREQ
 
 config ARM_EXYNOS_CPUFREQ
        bool
-       select CPU_FREQ_TABLE
 
 config ARM_EXYNOS4210_CPUFREQ
        bool "SAMSUNG EXYNOS4210"
@@ -58,7 +56,6 @@ config ARM_EXYNOS5440_CPUFREQ
        depends on SOC_EXYNOS5440
        depends on HAVE_CLK && PM_OPP && OF
        default y
-       select CPU_FREQ_TABLE
        help
          This adds the CPUFreq driver for Samsung EXYNOS5440
          SoC. The nature of exynos5440 clock controller is
@@ -85,7 +82,6 @@ config ARM_IMX6Q_CPUFREQ
        tristate "Freescale i.MX6Q cpufreq support"
        depends on SOC_IMX6Q
        depends on REGULATOR_ANATOP
-       select CPU_FREQ_TABLE
        help
          This adds cpufreq driver support for Freescale i.MX6Q SOC.
 
@@ -101,7 +97,6 @@ config ARM_INTEGRATOR
 
 config ARM_KIRKWOOD_CPUFREQ
        def_bool ARCH_KIRKWOOD && OF
-       select CPU_FREQ_TABLE
        help
          This adds the CPUFreq driver for Marvell Kirkwood
          SoCs.
@@ -110,7 +105,6 @@ config ARM_OMAP2PLUS_CPUFREQ
        bool "TI OMAP2+"
        depends on ARCH_OMAP2PLUS
        default ARCH_OMAP2PLUS
-       select CPU_FREQ_TABLE
 
 config ARM_S3C_CPUFREQ
        bool
@@ -165,7 +159,6 @@ config ARM_S3C2412_CPUFREQ
 config ARM_S3C2416_CPUFREQ
        bool "S3C2416 CPU Frequency scaling support"
        depends on CPU_S3C2416
-       select CPU_FREQ_TABLE
        help
          This adds the CPUFreq driver for the Samsung S3C2416 and
          S3C2450 SoC. The S3C2416 supports changing the rate of the
@@ -196,7 +189,6 @@ config ARM_S3C2440_CPUFREQ
 config ARM_S3C64XX_CPUFREQ
        bool "Samsung S3C64XX"
        depends on CPU_S3C6410
-       select CPU_FREQ_TABLE
        default y
        help
          This adds the CPUFreq driver for Samsung S3C6410 SoC.
@@ -206,7 +198,6 @@ config ARM_S3C64XX_CPUFREQ
 config ARM_S5PV210_CPUFREQ
        bool "Samsung S5PV210 and S5PC110"
        depends on CPU_S5PV210
-       select CPU_FREQ_TABLE
        default y
        help
          This adds the CPUFreq driver for Samsung S5PV210 and
@@ -223,7 +214,6 @@ config ARM_SA1110_CPUFREQ
 config ARM_SPEAR_CPUFREQ
        bool "SPEAr CPUFreq support"
        depends on PLAT_SPEAR
-       select CPU_FREQ_TABLE
        default y
        help
          This adds the CPUFreq driver support for SPEAr SOCs.
@@ -231,7 +221,6 @@ config ARM_SPEAR_CPUFREQ
 config ARM_TEGRA_CPUFREQ
        bool "TEGRA CPUFreq support"
        depends on ARCH_TEGRA
-       select CPU_FREQ_TABLE
        default y
        help
          This adds the CPUFreq driver support for TEGRA SOCs.
index 25ca9db62e090d6b49e20d6744c3cd4027a16bfb..ca0021a96e19738abefa9538bcb590e3e2c3faa7 100644 (file)
@@ -1,7 +1,6 @@
 config CPU_FREQ_CBE
        tristate "CBE frequency scaling"
        depends on CBE_RAS && PPC_CELL
-       select CPU_FREQ_TABLE
        default m
        help
          This adds the cpufreq driver for Cell BE processors.
@@ -20,7 +19,6 @@ config CPU_FREQ_CBE_PMI
 config CPU_FREQ_MAPLE
        bool "Support for Maple 970FX Evaluation Board"
        depends on PPC_MAPLE
-       select CPU_FREQ_TABLE
        help
          This adds support for frequency switching on Maple 970FX
          Evaluation Board and compatible boards (IBM JS2x blades).
@@ -28,7 +26,6 @@ config CPU_FREQ_MAPLE
 config PPC_CORENET_CPUFREQ
        tristate "CPU frequency scaling driver for Freescale E500MC SoCs"
        depends on PPC_E500MC && OF && COMMON_CLK
-       select CPU_FREQ_TABLE
        select CLK_PPC_CORENET
        help
          This adds the CPUFreq driver support for Freescale e500mc,
@@ -38,7 +35,6 @@ config PPC_CORENET_CPUFREQ
 config CPU_FREQ_PMAC
        bool "Support for Apple PowerBooks"
        depends on ADB_PMU && PPC32
-       select CPU_FREQ_TABLE
        help
          This adds support for frequency switching on Apple PowerBooks,
          this currently includes some models of iBook & Titanium
@@ -47,7 +43,6 @@ config CPU_FREQ_PMAC
 config CPU_FREQ_PMAC64
        bool "Support for some Apple G5s"
        depends on PPC_PMAC && PPC64
-       select CPU_FREQ_TABLE
        help
          This adds support for frequency switching on Apple iMac G5,
          and some of the more recent desktop G5 machines as well.
@@ -55,7 +50,6 @@ config CPU_FREQ_PMAC64
 config PPC_PASEMI_CPUFREQ
        bool "Support for PA Semi PWRficient"
        depends on PPC_PASEMI
-       select CPU_FREQ_TABLE
        default y
        help
          This adds the support for frequency switching on PA Semi
index e2b6eabef2218d4e1e392ed523b3451553e196cb..6897ad85b0467a8200c3a529e5dee14344c2afbf 100644 (file)
@@ -31,7 +31,6 @@ config X86_PCC_CPUFREQ
 
 config X86_ACPI_CPUFREQ
        tristate "ACPI Processor P-States driver"
-       select CPU_FREQ_TABLE
        depends on ACPI_PROCESSOR
        help
          This driver adds a CPUFreq driver which utilizes the ACPI
@@ -60,7 +59,6 @@ config X86_ACPI_CPUFREQ_CPB
 
 config ELAN_CPUFREQ
        tristate "AMD Elan SC400 and SC410"
-       select CPU_FREQ_TABLE
        depends on MELAN
        ---help---
          This adds the CPUFreq driver for AMD Elan SC400 and SC410
@@ -76,7 +74,6 @@ config ELAN_CPUFREQ
 
 config SC520_CPUFREQ
        tristate "AMD Elan SC520"
-       select CPU_FREQ_TABLE
        depends on MELAN
        ---help---
          This adds the CPUFreq driver for AMD Elan SC520 processor.
@@ -88,7 +85,6 @@ config SC520_CPUFREQ
 
 config X86_POWERNOW_K6
        tristate "AMD Mobile K6-2/K6-3 PowerNow!"
-       select CPU_FREQ_TABLE
        depends on X86_32
        help
          This adds the CPUFreq driver for mobile AMD K6-2+ and mobile
@@ -100,7 +96,6 @@ config X86_POWERNOW_K6
 
 config X86_POWERNOW_K7
        tristate "AMD Mobile Athlon/Duron PowerNow!"
-       select CPU_FREQ_TABLE
        depends on X86_32
        help
          This adds the CPUFreq driver for mobile AMD K7 mobile processors.
@@ -118,7 +113,6 @@ config X86_POWERNOW_K7_ACPI
 
 config X86_POWERNOW_K8
        tristate "AMD Opteron/Athlon64 PowerNow!"
-       select CPU_FREQ_TABLE
        depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
        help
          This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
@@ -132,7 +126,6 @@ config X86_POWERNOW_K8
 config X86_AMD_FREQ_SENSITIVITY
        tristate "AMD frequency sensitivity feedback powersave bias"
        depends on CPU_FREQ_GOV_ONDEMAND && X86_ACPI_CPUFREQ && CPU_SUP_AMD
-       select CPU_FREQ_TABLE
        help
          This adds AMD-specific powersave bias function to the ondemand
          governor, which allows it to make more power-conscious frequency
@@ -160,7 +153,6 @@ config X86_GX_SUSPMOD
 
 config X86_SPEEDSTEP_CENTRINO
        tristate "Intel Enhanced SpeedStep (deprecated)"
-       select CPU_FREQ_TABLE
        select X86_SPEEDSTEP_CENTRINO_TABLE if X86_32
        depends on X86_32 || (X86_64 && ACPI_PROCESSOR)
        help
@@ -190,7 +182,6 @@ config X86_SPEEDSTEP_CENTRINO_TABLE
 
 config X86_SPEEDSTEP_ICH
        tristate "Intel Speedstep on ICH-M chipsets (ioport interface)"
-       select CPU_FREQ_TABLE
        depends on X86_32
        help
          This adds the CPUFreq driver for certain mobile Intel Pentium III
@@ -204,7 +195,6 @@ config X86_SPEEDSTEP_ICH
 
 config X86_SPEEDSTEP_SMI
        tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)"
-       select CPU_FREQ_TABLE
        depends on X86_32
        help
          This adds the CPUFreq driver for certain mobile Intel Pentium III
@@ -217,7 +207,6 @@ config X86_SPEEDSTEP_SMI
 
 config X86_P4_CLOCKMOD
        tristate "Intel Pentium 4 clock modulation"
-       select CPU_FREQ_TABLE
        help
          This adds the CPUFreq driver for Intel Pentium 4 / XEON
          processors.  When enabled it will lower CPU temperature by skipping
@@ -259,7 +248,6 @@ config X86_LONGRUN
 
 config X86_LONGHAUL
        tristate "VIA Cyrix III Longhaul"
-       select CPU_FREQ_TABLE
        depends on X86_32 && ACPI_PROCESSOR
        help
          This adds the CPUFreq driver for VIA Samuel/CyrixIII,
@@ -272,7 +260,6 @@ config X86_LONGHAUL
 
 config X86_E_POWERSAVER
        tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)"
-       select CPU_FREQ_TABLE
        depends on X86_32 && ACPI_PROCESSOR
        help
          This adds the CPUFreq driver for VIA C7 processors.  However, this driver
index ad5866c2ada0a5bd219305954682dbc7aae4f5a2..b7948bbbbf1fe7346590e79e3e6cbd9655272000 100644 (file)
@@ -1,5 +1,5 @@
 # CPUfreq core
-obj-$(CONFIG_CPU_FREQ)                 += cpufreq.o
+obj-$(CONFIG_CPU_FREQ)                 += cpufreq.o freq_table.o
 # CPUfreq stats
 obj-$(CONFIG_CPU_FREQ_STAT)             += cpufreq_stats.o
 
@@ -11,9 +11,6 @@ obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND)   += cpufreq_ondemand.o
 obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE)        += cpufreq_conservative.o
 obj-$(CONFIG_CPU_FREQ_GOV_COMMON)              += cpufreq_governor.o
 
-# CPUfreq cross-arch helpers
-obj-$(CONFIG_CPU_FREQ_TABLE)           += freq_table.o
-
 obj-$(CONFIG_GENERIC_CPUFREQ_CPU0)     += cpufreq-cpu0.o
 
 ##################################################################################
index d2c3253e015ee23f107d2d34333c6afa533ab7cf..a1717d7367c1ad0bd29a3dd54de644791d648cc7 100644 (file)
@@ -516,15 +516,6 @@ out:
        return result;
 }
 
-static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
-
-       pr_debug("acpi_cpufreq_verify\n");
-
-       return cpufreq_frequency_table_verify(policy, data->freq_table);
-}
-
 static unsigned long
 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
 {
@@ -837,7 +828,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
        data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
        perf->state = 0;
 
-       result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+       result = cpufreq_table_validate_and_show(policy, data->freq_table);
        if (result)
                goto err_freqfree;
 
@@ -846,12 +837,16 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
 
        switch (perf->control_register.space_id) {
        case ACPI_ADR_SPACE_SYSTEM_IO:
-               /* Current speed is unknown and not detectable by IO port */
+               /*
+                * The core will not set policy->cur, because
+                * cpufreq_driver->get is NULL, so we need to set it here.
+                * However, we have to guess it, because the current speed is
+                * unknown and not detectable via IO ports.
+                */
                policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
                break;
        case ACPI_ADR_SPACE_FIXED_HARDWARE:
                acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
-               policy->cur = get_cur_freq_on_cpu(cpu);
                break;
        default:
                break;
@@ -868,8 +863,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
                        (u32) perf->states[i].power,
                        (u32) perf->states[i].transition_latency);
 
-       cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
-
        /*
         * the first call to ->target() should result in us actually
         * writing something to the appropriate registers.
@@ -929,7 +922,7 @@ static struct freq_attr *acpi_cpufreq_attr[] = {
 };
 
 static struct cpufreq_driver acpi_cpufreq_driver = {
-       .verify         = acpi_cpufreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = acpi_cpufreq_target,
        .bios_limit     = acpi_processor_get_bios_limit,
        .init           = acpi_cpufreq_cpu_init,
index 3549f0784af176d07ee17dd60d02ec85859d6bd1..086f7c17ff5816001ab9be359741b9292539cf47 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/cpumask.h>
 #include <linux/export.h>
 #include <linux/of_platform.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/slab.h>
 #include <linux/topology.h>
 #include <linux/types.h>
@@ -47,14 +47,6 @@ static unsigned int bL_cpufreq_get(unsigned int cpu)
        return clk_get_rate(clk[cur_cluster]) / 1000;
 }
 
-/* Validate policy frequency range */
-static int bL_cpufreq_verify_policy(struct cpufreq_policy *policy)
-{
-       u32 cur_cluster = cpu_to_cluster(policy->cpu);
-
-       return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
-}
-
 /* Set clock frequency */
 static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
                unsigned int target_freq, unsigned int relation)
@@ -98,7 +90,7 @@ static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
 
        if (!atomic_dec_return(&cluster_usage[cluster])) {
                clk_put(clk[cluster]);
-               opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+               dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
                dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
        }
 }
@@ -119,7 +111,7 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
                goto atomic_dec;
        }
 
-       ret = opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
+       ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
        if (ret) {
                dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
                                __func__, cpu_dev->id, ret);
@@ -127,7 +119,7 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
        }
 
        name[12] = cluster + '0';
-       clk[cluster] = clk_get_sys(name, NULL);
+       clk[cluster] = clk_get(cpu_dev, name);
        if (!IS_ERR(clk[cluster])) {
                dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
                                __func__, clk[cluster], freq_table[cluster],
@@ -138,7 +130,7 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
        dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
                        __func__, cpu_dev->id, cluster);
        ret = PTR_ERR(clk[cluster]);
-       opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
+       dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
 
 atomic_dec:
        atomic_dec(&cluster_usage[cluster]);
@@ -165,7 +157,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
        if (ret)
                return ret;
 
-       ret = cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
+       ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
        if (ret) {
                dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
                                policy->cpu, cur_cluster);
@@ -173,16 +165,12 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
                return ret;
        }
 
-       cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
-
        if (arm_bL_ops->get_transition_latency)
                policy->cpuinfo.transition_latency =
                        arm_bL_ops->get_transition_latency(cpu_dev);
        else
                policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
 
-       policy->cur = bL_cpufreq_get(policy->cpu);
-
        cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
 
        dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
@@ -200,28 +188,23 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
                return -ENODEV;
        }
 
+       cpufreq_frequency_table_put_attr(policy->cpu);
        put_cluster_clk_and_freq_table(cpu_dev);
        dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
 
        return 0;
 }
 
-/* Export freq_table to sysfs */
-static struct freq_attr *bL_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver bL_cpufreq_driver = {
        .name                   = "arm-big-little",
-       .flags                  = CPUFREQ_STICKY,
-       .verify                 = bL_cpufreq_verify_policy,
+       .flags                  = CPUFREQ_STICKY |
+                                       CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+       .verify                 = cpufreq_generic_frequency_table_verify,
        .target                 = bL_cpufreq_set_target,
        .get                    = bL_cpufreq_get,
        .init                   = bL_cpufreq_init,
        .exit                   = bL_cpufreq_exit,
-       .have_governor_per_policy = true,
-       .attr                   = bL_cpufreq_attr,
+       .attr                   = cpufreq_generic_attr,
 };
 
 int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
index 480c0bd0468d7c0fbbed9e0872cd9adb4fb77244..8d9d5910890689e58d5994d6aeebfccf03215c59 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/export.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/types.h>
index e0c38d9389979b2f1b9a503d36c8e12364ecc188..7439deddd5cf72794f8580712f11ca7b6388e6f6 100644 (file)
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/export.h>
+#include <linux/slab.h>
 
 static struct clk *cpuclk;
-
-static int at32_verify_speed(struct cpufreq_policy *policy)
-{
-       if (policy->cpu != 0)
-               return -EINVAL;
-
-       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-                       policy->cpuinfo.max_freq);
-       return 0;
-}
+static struct cpufreq_frequency_table *freq_table;
 
 static unsigned int at32_get_speed(unsigned int cpu)
 {
@@ -85,31 +77,68 @@ static int at32_set_target(struct cpufreq_policy *policy,
 
 static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy)
 {
+       unsigned int frequency, rate, min_freq;
+       int retval, steps, i;
+
        if (policy->cpu != 0)
                return -EINVAL;
 
        cpuclk = clk_get(NULL, "cpu");
        if (IS_ERR(cpuclk)) {
                pr_debug("cpufreq: could not get CPU clk\n");
-               return PTR_ERR(cpuclk);
+               retval = PTR_ERR(cpuclk);
+               goto out_err;
        }
 
-       policy->cpuinfo.min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
-       policy->cpuinfo.max_freq = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
+       min_freq = (clk_round_rate(cpuclk, 1) + 500) / 1000;
+       frequency = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
        policy->cpuinfo.transition_latency = 0;
-       policy->cur = at32_get_speed(0);
-       policy->min = policy->cpuinfo.min_freq;
-       policy->max = policy->cpuinfo.max_freq;
 
-       printk("cpufreq: AT32AP CPU frequency driver\n");
+       /*
+        * AVR32 CPU frequency rate scales in power of two between maximum and
+        * minimum, also add space for the table end marker.
+        *
+        * Further validate that the frequency is usable, and append it to the
+        * frequency table.
+        */
+       steps = fls(frequency / min_freq) + 1;
+       freq_table = kzalloc(steps * sizeof(struct cpufreq_frequency_table),
+                       GFP_KERNEL);
+       if (!freq_table) {
+               retval = -ENOMEM;
+               goto out_err_put_clk;
+       }
+
+       for (i = 0; i < (steps - 1); i++) {
+               rate = clk_round_rate(cpuclk, frequency * 1000) / 1000;
 
-       return 0;
+               if (rate != frequency)
+                       freq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+               else
+                       freq_table[i].frequency = frequency;
+
+               frequency /= 2;
+       }
+
+       freq_table[steps - 1].frequency = CPUFREQ_TABLE_END;
+
+       retval = cpufreq_table_validate_and_show(policy, freq_table);
+       if (!retval) {
+               printk("cpufreq: AT32AP CPU frequency driver\n");
+               return 0;
+       }
+
+       kfree(freq_table);
+out_err_put_clk:
+       clk_put(cpuclk);
+out_err:
+       return retval;
 }
 
 static struct cpufreq_driver at32_driver = {
        .name           = "at32ap",
        .init           = at32_cpufreq_driver_init,
-       .verify         = at32_verify_speed,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = at32_set_target,
        .get            = at32_get_speed,
        .flags          = CPUFREQ_STICKY,
index ef05978a723702de29da757eda20edec7d21c3d6..0bc9e8c2c79bb1404a8539f6ff8760c5ab966727 100644 (file)
@@ -191,11 +191,6 @@ static int bfin_target(struct cpufreq_policy *policy,
        return ret;
 }
 
-static int bfin_verify_speed(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, bfin_freq_table);
-}
-
 static int __bfin_cpu_init(struct cpufreq_policy *policy)
 {
 
@@ -209,23 +204,17 @@ static int __bfin_cpu_init(struct cpufreq_policy *policy)
 
        policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
 
-       policy->cur = cclk;
-       cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
-       return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);
+       return cpufreq_table_validate_and_show(policy, bfin_freq_table);
 }
 
-static struct freq_attr *bfin_freq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver bfin_driver = {
-       .verify = bfin_verify_speed,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = bfin_target,
        .get = bfin_getfreq_khz,
        .init = __bfin_cpu_init,
+       .exit = cpufreq_generic_exit,
        .name = "bfin cpufreq",
-       .attr = bfin_freq_attr,
+       .attr = cpufreq_generic_attr,
 };
 
 static int __init bfin_cpu_init(void)
index c522a95c0e168ae30e4a347f189f89c27173c427..33ab6504c4478ae344395a746cddde0341ca2c52 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
 #include <linux/slab.h>
@@ -30,11 +30,6 @@ static struct clk *cpu_clk;
 static struct regulator *cpu_reg;
 static struct cpufreq_frequency_table *freq_table;
 
-static int cpu0_verify_speed(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
 static unsigned int cpu0_get_speed(unsigned int cpu)
 {
        return clk_get_rate(cpu_clk) / 1000;
@@ -44,7 +39,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
                           unsigned int target_freq, unsigned int relation)
 {
        struct cpufreq_freqs freqs;
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        unsigned long volt = 0, volt_old = 0, tol = 0;
        long freq_Hz, freq_exact;
        unsigned int index;
@@ -72,7 +67,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
 
        if (!IS_ERR(cpu_reg)) {
                rcu_read_lock();
-               opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
+               opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
                if (IS_ERR(opp)) {
                        rcu_read_unlock();
                        pr_err("failed to find OPP for %ld\n", freq_Hz);
@@ -80,7 +75,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
                        ret = PTR_ERR(opp);
                        goto post_notify;
                }
-               volt = opp_get_voltage(opp);
+               volt = dev_pm_opp_get_voltage(opp);
                rcu_read_unlock();
                tol = volt * voltage_tolerance / 100;
                volt_old = regulator_get_voltage(cpu_reg);
@@ -127,50 +122,18 @@ post_notify:
 
 static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
 {
-       int ret;
-
-       ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
-       if (ret) {
-               pr_err("invalid frequency table: %d\n", ret);
-               return ret;
-       }
-
-       policy->cpuinfo.transition_latency = transition_latency;
-       policy->cur = clk_get_rate(cpu_clk) / 1000;
-
-       /*
-        * The driver only supports the SMP configuartion where all processors
-        * share the clock and voltage and clock.  Use cpufreq affected_cpus
-        * interface to have all CPUs scaled together.
-        */
-       cpumask_setall(policy->cpus);
-
-       cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
-       return 0;
-}
-
-static int cpu0_cpufreq_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-
-       return 0;
+       return cpufreq_generic_init(policy, freq_table, transition_latency);
 }
 
-static struct freq_attr *cpu0_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver cpu0_cpufreq_driver = {
        .flags = CPUFREQ_STICKY,
-       .verify = cpu0_verify_speed,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = cpu0_set_target,
        .get = cpu0_get_speed,
        .init = cpu0_cpufreq_init,
-       .exit = cpu0_cpufreq_exit,
+       .exit = cpufreq_generic_exit,
        .name = "generic_cpu0",
-       .attr = cpu0_cpufreq_attr,
+       .attr = cpufreq_generic_attr,
 };
 
 static int cpu0_cpufreq_probe(struct platform_device *pdev)
@@ -218,7 +181,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
                goto out_put_node;
        }
 
-       ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
+       ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
        if (ret) {
                pr_err("failed to init cpufreq table: %d\n", ret);
                goto out_put_node;
@@ -230,7 +193,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
                transition_latency = CPUFREQ_ETERNAL;
 
        if (!IS_ERR(cpu_reg)) {
-               struct opp *opp;
+               struct dev_pm_opp *opp;
                unsigned long min_uV, max_uV;
                int i;
 
@@ -242,12 +205,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
                for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
                        ;
                rcu_read_lock();
-               opp = opp_find_freq_exact(cpu_dev,
+               opp = dev_pm_opp_find_freq_exact(cpu_dev,
                                freq_table[0].frequency * 1000, true);
-               min_uV = opp_get_voltage(opp);
-               opp = opp_find_freq_exact(cpu_dev,
+               min_uV = dev_pm_opp_get_voltage(opp);
+               opp = dev_pm_opp_find_freq_exact(cpu_dev,
                                freq_table[i-1].frequency * 1000, true);
-               max_uV = opp_get_voltage(opp);
+               max_uV = dev_pm_opp_get_voltage(opp);
                rcu_read_unlock();
                ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
                if (ret > 0)
@@ -264,7 +227,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
        return 0;
 
 out_free_table:
-       opp_free_cpufreq_table(cpu_dev, &freq_table);
+       dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
 out_put_node:
        of_node_put(np);
        return ret;
@@ -273,7 +236,7 @@ out_put_node:
 static int cpu0_cpufreq_remove(struct platform_device *pdev)
 {
        cpufreq_unregister_driver(&cpu0_cpufreq_driver);
-       opp_free_cpufreq_table(cpu_dev, &freq_table);
+       dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
 
        return 0;
 }
index b83d45f6857495083163e13336027ef4449b5da2..a05b876f375e5672c00b05412782c23ead83a773 100644 (file)
@@ -303,9 +303,7 @@ static int nforce2_verify(struct cpufreq_policy *policy)
        if (policy->min < (fsb_pol_max * fid * 100))
                policy->max = (fsb_pol_max + 1) * fid * 100;
 
-       cpufreq_verify_within_limits(policy,
-                                    policy->cpuinfo.min_freq,
-                                    policy->cpuinfo.max_freq);
+       cpufreq_verify_within_cpu_limits(policy);
        return 0;
 }
 
@@ -362,7 +360,6 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
        policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100;
        policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100;
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-       policy->cur = nforce2_get(policy->cpu);
 
        return 0;
 }
index 04548f7023af68dee6b36fc590ec59f02c0216f7..ec391d7f010b531a2fe378df96a88780b5c5faf2 100644 (file)
@@ -67,13 +67,11 @@ static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
 
 #define lock_policy_rwsem(mode, cpu)                                   \
-static int lock_policy_rwsem_##mode(int cpu)                           \
+static void lock_policy_rwsem_##mode(int cpu)                          \
 {                                                                      \
        struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
        BUG_ON(!policy);                                                \
        down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu));           \
-                                                                       \
-       return 0;                                                       \
 }
 
 lock_policy_rwsem(read, cpu);
@@ -135,7 +133,7 @@ static DEFINE_MUTEX(cpufreq_governor_mutex);
 
 bool have_governor_per_policy(void)
 {
-       return cpufreq_driver->have_governor_per_policy;
+       return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
 }
 EXPORT_SYMBOL_GPL(have_governor_per_policy);
 
@@ -183,6 +181,37 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
 }
 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
 
+/*
+ * This is a generic cpufreq init() routine which can be used by cpufreq
+ * drivers of SMP systems. It will do following:
+ * - validate & show freq table passed
+ * - set policies transition latency
+ * - policy->cpus with all possible CPUs
+ */
+int cpufreq_generic_init(struct cpufreq_policy *policy,
+               struct cpufreq_frequency_table *table,
+               unsigned int transition_latency)
+{
+       int ret;
+
+       ret = cpufreq_table_validate_and_show(policy, table);
+       if (ret) {
+               pr_err("%s: invalid frequency table: %d\n", __func__, ret);
+               return ret;
+       }
+
+       policy->cpuinfo.transition_latency = transition_latency;
+
+       /*
+        * The driver only supports the SMP configuartion where all processors
+        * share the clock and voltage and clock.
+        */
+       cpumask_setall(policy->cpus);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_init);
+
 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
 {
        struct cpufreq_policy *policy = NULL;
@@ -414,7 +443,7 @@ show_one(scaling_min_freq, min);
 show_one(scaling_max_freq, max);
 show_one(scaling_cur_freq, cur);
 
-static int __cpufreq_set_policy(struct cpufreq_policy *policy,
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
                                struct cpufreq_policy *new_policy);
 
 /**
@@ -435,7 +464,7 @@ static ssize_t store_##file_name                                    \
        if (ret != 1)                                                   \
                return -EINVAL;                                         \
                                                                        \
-       ret = __cpufreq_set_policy(policy, &new_policy);                \
+       ret = cpufreq_set_policy(policy, &new_policy);          \
        policy->user_policy.object = policy->object;                    \
                                                                        \
        return ret ? ret : count;                                       \
@@ -493,11 +522,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
                                                &new_policy.governor))
                return -EINVAL;
 
-       /*
-        * Do not use cpufreq_set_policy here or the user_policy.max
-        * will be wrongly overridden
-        */
-       ret = __cpufreq_set_policy(policy, &new_policy);
+       ret = cpufreq_set_policy(policy, &new_policy);
 
        policy->user_policy.policy = policy->policy;
        policy->user_policy.governor = policy->governor;
@@ -653,13 +678,12 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
 {
        struct cpufreq_policy *policy = to_policy(kobj);
        struct freq_attr *fattr = to_attr(attr);
-       ssize_t ret = -EINVAL;
+       ssize_t ret;
 
        if (!down_read_trylock(&cpufreq_rwsem))
-               goto exit;
+               return -EINVAL;
 
-       if (lock_policy_rwsem_read(policy->cpu) < 0)
-               goto up_read;
+       lock_policy_rwsem_read(policy->cpu);
 
        if (fattr->show)
                ret = fattr->show(policy, buf);
@@ -667,10 +691,8 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
                ret = -EIO;
 
        unlock_policy_rwsem_read(policy->cpu);
-
-up_read:
        up_read(&cpufreq_rwsem);
-exit:
+
        return ret;
 }
 
@@ -689,8 +711,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
        if (!down_read_trylock(&cpufreq_rwsem))
                goto unlock;
 
-       if (lock_policy_rwsem_write(policy->cpu) < 0)
-               goto up_read;
+       lock_policy_rwsem_write(policy->cpu);
 
        if (fattr->store)
                ret = fattr->store(policy, buf, count);
@@ -699,7 +720,6 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
 
        unlock_policy_rwsem_write(policy->cpu);
 
-up_read:
        up_read(&cpufreq_rwsem);
 unlock:
        put_online_cpus();
@@ -844,11 +864,11 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
        int ret = 0;
 
        memcpy(&new_policy, policy, sizeof(*policy));
-       /* assure that the starting sequence is run in __cpufreq_set_policy */
+       /* assure that the starting sequence is run in cpufreq_set_policy */
        policy->governor = NULL;
 
        /* set default policy */
-       ret = __cpufreq_set_policy(policy, &new_policy);
+       ret = cpufreq_set_policy(policy, &new_policy);
        policy->user_policy.policy = policy->policy;
        policy->user_policy.governor = policy->governor;
 
@@ -949,7 +969,7 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
 
 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
 {
-       if (cpu == policy->cpu)
+       if (WARN_ON(cpu == policy->cpu))
                return;
 
        /*
@@ -966,9 +986,7 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
 
        up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu));
 
-#ifdef CONFIG_CPU_FREQ_TABLE
        cpufreq_frequency_table_update_policy_cpu(policy);
-#endif
        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                        CPUFREQ_UPDATE_POLICY_CPU, policy);
 }
@@ -1053,6 +1071,14 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
                goto err_set_policy_cpu;
        }
 
+       if (cpufreq_driver->get) {
+               policy->cur = cpufreq_driver->get(policy->cpu);
+               if (!policy->cur) {
+                       pr_err("%s: ->get() failed\n", __func__);
+                       goto err_get_freq;
+               }
+       }
+
        /* related cpus should atleast have policy->cpus */
        cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
 
@@ -1107,6 +1133,9 @@ err_out_unregister:
                per_cpu(cpufreq_cpu_data, j) = NULL;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
+err_get_freq:
+       if (cpufreq_driver->exit)
+               cpufreq_driver->exit(policy);
 err_set_policy_cpu:
        cpufreq_policy_free(policy);
 nomem_out:
@@ -1147,7 +1176,7 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
        if (ret) {
                pr_err("%s: Failed to move kobj: %d", __func__, ret);
 
-               WARN_ON(lock_policy_rwsem_write(old_cpu));
+               lock_policy_rwsem_write(old_cpu);
                cpumask_set_cpu(old_cpu, policy->cpus);
                unlock_policy_rwsem_write(old_cpu);
 
@@ -1208,14 +1237,13 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
                if (!frozen)
                        sysfs_remove_link(&dev->kobj, "cpufreq");
        } else if (cpus > 1) {
-
                new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
                if (new_cpu >= 0) {
                        update_policy_cpu(policy, new_cpu);
 
                        if (!frozen) {
-                               pr_debug("%s: policy Kobject moved to cpu: %d "
-                                        "from: %d\n",__func__, new_cpu, cpu);
+                               pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
+                                               __func__, new_cpu, cpu);
                        }
                }
        }
@@ -1243,7 +1271,7 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
                return -EINVAL;
        }
 
-       WARN_ON(lock_policy_rwsem_write(cpu));
+       lock_policy_rwsem_write(cpu);
        cpus = cpumask_weight(policy->cpus);
 
        if (cpus > 1)
@@ -1310,36 +1338,24 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
 }
 
 /**
- * __cpufreq_remove_dev - remove a CPU device
+ * cpufreq_remove_dev - remove a CPU device
  *
  * Removes the cpufreq interface for a CPU device.
- * Caller should already have policy_rwsem in write mode for this CPU.
- * This routine frees the rwsem before returning.
  */
-static inline int __cpufreq_remove_dev(struct device *dev,
-                                      struct subsys_interface *sif,
-                                      bool frozen)
-{
-       int ret;
-
-       ret = __cpufreq_remove_dev_prepare(dev, sif, frozen);
-
-       if (!ret)
-               ret = __cpufreq_remove_dev_finish(dev, sif, frozen);
-
-       return ret;
-}
-
 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
 {
        unsigned int cpu = dev->id;
-       int retval;
+       int ret;
 
        if (cpu_is_offline(cpu))
                return 0;
 
-       retval = __cpufreq_remove_dev(dev, sif, false);
-       return retval;
+       ret = __cpufreq_remove_dev_prepare(dev, sif, false);
+
+       if (!ret)
+               ret = __cpufreq_remove_dev_finish(dev, sif, false);
+
+       return ret;
 }
 
 static void handle_update(struct work_struct *work)
@@ -1466,14 +1482,11 @@ unsigned int cpufreq_get(unsigned int cpu)
        if (!down_read_trylock(&cpufreq_rwsem))
                return 0;
 
-       if (unlikely(lock_policy_rwsem_read(cpu)))
-               goto out_policy;
+       lock_policy_rwsem_read(cpu);
 
        ret_freq = __cpufreq_get(cpu);
 
        unlock_policy_rwsem_read(cpu);
-
-out_policy:
        up_read(&cpufreq_rwsem);
 
        return ret_freq;
@@ -1697,14 +1710,12 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
 {
        int ret = -EINVAL;
 
-       if (unlikely(lock_policy_rwsem_write(policy->cpu)))
-               goto fail;
+       lock_policy_rwsem_write(policy->cpu);
 
        ret = __cpufreq_driver_target(policy, target_freq, relation);
 
        unlock_policy_rwsem_write(policy->cpu);
 
-fail:
        return ret;
 }
 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
@@ -1871,10 +1882,10 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
 EXPORT_SYMBOL(cpufreq_get_policy);
 
 /*
- * data   : current policy.
- * policy : policy to be set.
+ * policy : current policy.
+ * new_policy: policy to be set.
  */
-static int __cpufreq_set_policy(struct cpufreq_policy *policy,
+static int cpufreq_set_policy(struct cpufreq_policy *policy,
                                struct cpufreq_policy *new_policy)
 {
        int ret = 0, failed = 1;
@@ -1995,10 +2006,7 @@ int cpufreq_update_policy(unsigned int cpu)
                goto no_policy;
        }
 
-       if (unlikely(lock_policy_rwsem_write(cpu))) {
-               ret = -EINVAL;
-               goto fail;
-       }
+       lock_policy_rwsem_write(cpu);
 
        pr_debug("updating policy for CPU %u\n", cpu);
        memcpy(&new_policy, policy, sizeof(*policy));
@@ -2023,11 +2031,10 @@ int cpufreq_update_policy(unsigned int cpu)
                }
        }
 
-       ret = __cpufreq_set_policy(policy, &new_policy);
+       ret = cpufreq_set_policy(policy, &new_policy);
 
        unlock_policy_rwsem_write(cpu);
 
-fail:
        cpufreq_cpu_put(policy);
 no_policy:
        return ret;
index 88cd39f7b0e9573b4fc7e5ca13ac0126cf6ede9d..b5f2b8618949dc75d55202d7b76e56e1e0c52a5f 100644 (file)
@@ -191,7 +191,10 @@ struct common_dbs_data {
        struct attribute_group *attr_group_gov_sys; /* one governor - system */
        struct attribute_group *attr_group_gov_pol; /* one governor - policy */
 
-       /* Common data for platforms that don't set have_governor_per_policy */
+       /*
+        * Common data for platforms that don't set
+        * CPUFREQ_HAVE_GOVERNOR_PER_POLICY
+        */
        struct dbs_data *gdbs_data;
 
        struct cpu_dbs_common_info *(*get_cpu_cdbs)(int cpu);
index cb8276dd19caee0a5e2756883148a2d70a7132f2..05fdc7e40257f96a83940d3fbd601d45f4e8e77a 100644 (file)
@@ -54,11 +54,6 @@ static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
        cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 };
 
-static int cris_freq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
-}
-
 static int cris_freq_target(struct cpufreq_policy *policy,
                            unsigned int target_freq,
                            unsigned int relation)
@@ -76,42 +71,17 @@ static int cris_freq_target(struct cpufreq_policy *policy,
 
 static int cris_freq_cpu_init(struct cpufreq_policy *policy)
 {
-       int result;
-
-       /* cpuinfo and default policy values */
-       policy->cpuinfo.transition_latency = 1000000; /* 1ms */
-       policy->cur = cris_freq_get_cpu_frequency(0);
-
-       result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
-       if (result)
-               return (result);
-
-       cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
-
-       return 0;
-}
-
-
-static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
+       return cpufreq_generic_init(policy, cris_freq_table, 1000000);
 }
 
-
-static struct freq_attr *cris_freq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver cris_freq_driver = {
        .get    = cris_freq_get_cpu_frequency,
-       .verify = cris_freq_verify,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = cris_freq_target,
        .init   = cris_freq_cpu_init,
-       .exit   = cris_freq_cpu_exit,
+       .exit   = cpufreq_generic_exit,
        .name   = "cris_freq",
-       .attr   = cris_freq_attr,
+       .attr   = cpufreq_generic_attr,
 };
 
 static int __init cris_freq_init(void)
index 72328f77dc53bdef993173a1832d3159ac3fd03c..fac2b26932dd7fc4ff121e6bc6edd98ed65b0c47 100644 (file)
@@ -54,11 +54,6 @@ static void cris_freq_set_cpu_state(struct cpufreq_policy *policy,
        cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 };
 
-static int cris_freq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, &cris_freq_table[0]);
-}
-
 static int cris_freq_target(struct cpufreq_policy *policy,
                            unsigned int target_freq, unsigned int relation)
 {
@@ -75,40 +70,17 @@ static int cris_freq_target(struct cpufreq_policy *policy,
 
 static int cris_freq_cpu_init(struct cpufreq_policy *policy)
 {
-       int result;
-
-       /* cpuinfo and default policy values */
-       policy->cpuinfo.transition_latency = 1000000;   /* 1ms */
-       policy->cur = cris_freq_get_cpu_frequency(0);
-
-       result = cpufreq_frequency_table_cpuinfo(policy, cris_freq_table);
-       if (result)
-               return (result);
-
-       cpufreq_frequency_table_get_attr(cris_freq_table, policy->cpu);
-
-       return 0;
-}
-
-static int cris_freq_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
+       return cpufreq_generic_init(policy, cris_freq_table, 1000000);
 }
 
-static struct freq_attr *cris_freq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver cris_freq_driver = {
        .get = cris_freq_get_cpu_frequency,
-       .verify = cris_freq_verify,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = cris_freq_target,
        .init = cris_freq_cpu_init,
-       .exit = cris_freq_cpu_exit,
+       .exit = cpufreq_generic_exit,
        .name = "cris_freq",
-       .attr = cris_freq_attr,
+       .attr = cpufreq_generic_attr,
 };
 
 static int __init cris_freq_init(void)
index 551dd655c6f2ac1d49fb1238ee20f38c213e3f36..972583baf9e8d605b14b830b40b2c459ae1414b5 100644 (file)
@@ -50,9 +50,7 @@ static int davinci_verify_speed(struct cpufreq_policy *policy)
        if (policy->cpu)
                return -EINVAL;
 
-       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-                                    policy->cpuinfo.max_freq);
-
+       cpufreq_verify_within_cpu_limits(policy);
        policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000;
        policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000;
        cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
@@ -138,47 +136,24 @@ static int davinci_cpu_init(struct cpufreq_policy *policy)
                        return result;
        }
 
-       policy->cur = davinci_getspeed(0);
-
-       result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
-       if (result) {
-               pr_err("%s: cpufreq_frequency_table_cpuinfo() failed",
-                               __func__);
-               return result;
-       }
-
-       cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
        /*
         * Time measurement across the target() function yields ~1500-1800us
         * time taken with no drivers on notification list.
         * Setting the latency to 2000 us to accommodate addition of drivers
         * to pre/post change notification list.
         */
-       policy->cpuinfo.transition_latency = 2000 * 1000;
-       return 0;
+       return cpufreq_generic_init(policy, freq_table, 2000 * 1000);
 }
 
-static int davinci_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
-}
-
-static struct freq_attr *davinci_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver davinci_driver = {
        .flags          = CPUFREQ_STICKY,
        .verify         = davinci_verify_speed,
        .target         = davinci_target,
        .get            = davinci_getspeed,
        .init           = davinci_cpu_init,
-       .exit           = davinci_cpu_exit,
+       .exit           = cpufreq_generic_exit,
        .name           = "davinci",
-       .attr           = davinci_cpufreq_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static int __init davinci_cpufreq_probe(struct platform_device *pdev)
index 26321cdc19464f1cd3b23f7bf6575d5d97fa6ffc..a60f7693c18e1b93809d53c5835c89583a8af379 100644 (file)
 static struct cpufreq_frequency_table *freq_table;
 static struct clk *armss_clk;
 
-static struct freq_attr *dbx500_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
-static int dbx500_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
 static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
                                unsigned int target_freq,
                                unsigned int relation)
@@ -84,43 +74,17 @@ static unsigned int dbx500_cpufreq_getspeed(unsigned int cpu)
 
 static int dbx500_cpufreq_init(struct cpufreq_policy *policy)
 {
-       int res;
-
-       /* get policy fields based on the table */
-       res = cpufreq_frequency_table_cpuinfo(policy, freq_table);
-       if (!res)
-               cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-       else {
-               pr_err("dbx500-cpufreq: Failed to read policy table\n");
-               return res;
-       }
-
-       policy->min = policy->cpuinfo.min_freq;
-       policy->max = policy->cpuinfo.max_freq;
-       policy->cur = dbx500_cpufreq_getspeed(policy->cpu);
-       policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
-
-       /*
-        * FIXME : Need to take time measurement across the target()
-        *         function with no/some/all drivers in the notification
-        *         list.
-        */
-       policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
-
-       /* policy sharing between dual CPUs */
-       cpumask_setall(policy->cpus);
-
-       return 0;
+       return cpufreq_generic_init(policy, freq_table, 20 * 1000);
 }
 
 static struct cpufreq_driver dbx500_cpufreq_driver = {
        .flags  = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
-       .verify = dbx500_cpufreq_verify_speed,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = dbx500_cpufreq_target,
        .get    = dbx500_cpufreq_getspeed,
        .init   = dbx500_cpufreq_init,
        .name   = "DBX500",
-       .attr   = dbx500_cpufreq_attr,
+       .attr   = cpufreq_generic_attr,
 };
 
 static int dbx500_cpufreq_probe(struct platform_device *pdev)
index 09f64cc830197fc1f80f605e41d73be6b0441435..2c11ce3c67bde69a9e199135349becdd065fa283 100644 (file)
@@ -198,12 +198,6 @@ static int eps_target(struct cpufreq_policy *policy,
        return ret;
 }
 
-static int eps_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy,
-                       &eps_cpu[policy->cpu]->freq_table[0]);
-}
-
 static int eps_cpu_init(struct cpufreq_policy *policy)
 {
        unsigned int i;
@@ -401,15 +395,13 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
        }
 
        policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */
-       policy->cur = fsb * current_multiplier;
 
-       ret = cpufreq_frequency_table_cpuinfo(policy, &centaur->freq_table[0]);
+       ret = cpufreq_table_validate_and_show(policy, &centaur->freq_table[0]);
        if (ret) {
                kfree(centaur);
                return ret;
        }
 
-       cpufreq_frequency_table_get_attr(&centaur->freq_table[0], policy->cpu);
        return 0;
 }
 
@@ -424,19 +416,14 @@ static int eps_cpu_exit(struct cpufreq_policy *policy)
        return 0;
 }
 
-static struct freq_attr *eps_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver eps_driver = {
-       .verify         = eps_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = eps_target,
        .init           = eps_cpu_init,
        .exit           = eps_cpu_exit,
        .get            = eps_get,
        .name           = "e_powersaver",
-       .attr           = eps_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 
index 823a400d98fd6bc5f8c627db8151df572ffc68b9..d91a645a27aea800062dde1358fa02f5155137d0 100644 (file)
@@ -165,19 +165,6 @@ static void elanfreq_set_cpu_state(struct cpufreq_policy *policy,
 };
 
 
-/**
- *     elanfreq_validatespeed: test if frequency range is valid
- *     @policy: the policy to validate
- *
- *     This function checks if a given frequency range in kHz is valid
- *     for the hardware supported by the driver.
- */
-
-static int elanfreq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]);
-}
-
 static int elanfreq_target(struct cpufreq_policy *policy,
                            unsigned int target_freq,
                            unsigned int relation)
@@ -202,7 +189,6 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
 {
        struct cpuinfo_x86 *c = &cpu_data(0);
        unsigned int i;
-       int result;
 
        /* capability check */
        if ((c->x86_vendor != X86_VENDOR_AMD) ||
@@ -221,21 +207,8 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
 
        /* cpuinfo and default policy values */
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-       policy->cur = elanfreq_get_cpu_frequency(0);
 
-       result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table);
-       if (result)
-               return result;
-
-       cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu);
-       return 0;
-}
-
-
-static int elanfreq_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
+       return cpufreq_table_validate_and_show(policy, elanfreq_table);
 }
 
 
@@ -261,20 +234,14 @@ __setup("elanfreq=", elanfreq_setup);
 #endif
 
 
-static struct freq_attr *elanfreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
-
 static struct cpufreq_driver elanfreq_driver = {
        .get            = elanfreq_get_cpu_frequency,
-       .verify         = elanfreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = elanfreq_target,
        .init           = elanfreq_cpu_init,
-       .exit           = elanfreq_cpu_exit,
+       .exit           = cpufreq_generic_exit,
        .name           = "elanfreq",
-       .attr           = elanfreq_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id elan_id[] = {
index 0fac34439e3171dbc97b8f717a935b691657cba9..3e4af676f43d79fba9c704049858e5c7026f5dac 100644 (file)
@@ -31,12 +31,6 @@ static unsigned int locking_frequency;
 static bool frequency_locked;
 static DEFINE_MUTEX(cpufreq_lock);
 
-static int exynos_verify_speed(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy,
-                                             exynos_info->freq_table);
-}
-
 static unsigned int exynos_getspeed(unsigned int cpu)
 {
        return clk_get_rate(exynos_info->cpu_clk) / 1000;
@@ -141,7 +135,7 @@ post_notify:
        if ((freqs.new < freqs.old) ||
           ((freqs.new > freqs.old) && safe_arm_volt)) {
                /* down the voltage after frequency change */
-               regulator_set_voltage(arm_regulator, arm_volt,
+               ret = regulator_set_voltage(arm_regulator, arm_volt,
                                arm_volt);
                if (ret) {
                        pr_err("%s: failed to set cpu voltage to %d\n",
@@ -247,38 +241,18 @@ static struct notifier_block exynos_cpufreq_nb = {
 
 static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-       policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu);
-
-       cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu);
-
-       /* set the transition latency value */
-       policy->cpuinfo.transition_latency = 100000;
-
-       cpumask_setall(policy->cpus);
-
-       return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table);
+       return cpufreq_generic_init(policy, exynos_info->freq_table, 100000);
 }
 
-static int exynos_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
-}
-
-static struct freq_attr *exynos_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver exynos_driver = {
        .flags          = CPUFREQ_STICKY,
-       .verify         = exynos_verify_speed,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = exynos_target,
        .get            = exynos_getspeed,
        .init           = exynos_cpufreq_cpu_init,
-       .exit           = exynos_cpufreq_cpu_exit,
+       .exit           = cpufreq_generic_exit,
        .name           = "exynos_cpufreq",
-       .attr           = exynos_cpufreq_attr,
+       .attr           = cpufreq_generic_attr,
 #ifdef CONFIG_PM
        .suspend        = exynos_cpufreq_suspend,
        .resume         = exynos_cpufreq_resume,
index add7fbec4fc9af5ca3060e925cfe662a25d21eb4..f2c75065ce198694428d631baef388ad8e9aa6c6 100644 (file)
@@ -81,9 +81,9 @@ static void exynos4210_set_clkdiv(unsigned int div_index)
 
 static void exynos4210_set_apll(unsigned int index)
 {
-       unsigned int tmp;
+       unsigned int tmp, freq = apll_freq_4210[index].freq;
 
-       /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
+       /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
        clk_set_parent(moutcore, mout_mpll);
 
        do {
@@ -92,21 +92,9 @@ static void exynos4210_set_apll(unsigned int index)
                tmp &= 0x7;
        } while (tmp != 0x2);
 
-       /* 2. Set APLL Lock time */
-       __raw_writel(EXYNOS4_APLL_LOCKTIME, EXYNOS4_APLL_LOCK);
-
-       /* 3. Change PLL PMS values */
-       tmp = __raw_readl(EXYNOS4_APLL_CON0);
-       tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
-       tmp |= apll_freq_4210[index].mps;
-       __raw_writel(tmp, EXYNOS4_APLL_CON0);
+       clk_set_rate(mout_apll, freq * 1000);
 
-       /* 4. wait_lock_time */
-       do {
-               tmp = __raw_readl(EXYNOS4_APLL_CON0);
-       } while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT)));
-
-       /* 5. MUX_CORE_SEL = APLL */
+       /* MUX_CORE_SEL = APLL */
        clk_set_parent(moutcore, mout_apll);
 
        do {
@@ -115,53 +103,15 @@ static void exynos4210_set_apll(unsigned int index)
        } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
 }
 
-static bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index)
-{
-       unsigned int old_pm = apll_freq_4210[old_index].mps >> 8;
-       unsigned int new_pm = apll_freq_4210[new_index].mps >> 8;
-
-       return (old_pm == new_pm) ? 0 : 1;
-}
-
 static void exynos4210_set_frequency(unsigned int old_index,
                                     unsigned int new_index)
 {
-       unsigned int tmp;
-
        if (old_index > new_index) {
-               if (!exynos4210_pms_change(old_index, new_index)) {
-                       /* 1. Change the system clock divider values */
-                       exynos4210_set_clkdiv(new_index);
-
-                       /* 2. Change just s value in apll m,p,s value */
-                       tmp = __raw_readl(EXYNOS4_APLL_CON0);
-                       tmp &= ~(0x7 << 0);
-                       tmp |= apll_freq_4210[new_index].mps & 0x7;
-                       __raw_writel(tmp, EXYNOS4_APLL_CON0);
-               } else {
-                       /* Clock Configuration Procedure */
-                       /* 1. Change the system clock divider values */
-                       exynos4210_set_clkdiv(new_index);
-                       /* 2. Change the apll m,p,s value */
-                       exynos4210_set_apll(new_index);
-               }
+               exynos4210_set_clkdiv(new_index);
+               exynos4210_set_apll(new_index);
        } else if (old_index < new_index) {
-               if (!exynos4210_pms_change(old_index, new_index)) {
-                       /* 1. Change just s value in apll m,p,s value */
-                       tmp = __raw_readl(EXYNOS4_APLL_CON0);
-                       tmp &= ~(0x7 << 0);
-                       tmp |= apll_freq_4210[new_index].mps & 0x7;
-                       __raw_writel(tmp, EXYNOS4_APLL_CON0);
-
-                       /* 2. Change the system clock divider values */
-                       exynos4210_set_clkdiv(new_index);
-               } else {
-                       /* Clock Configuration Procedure */
-                       /* 1. Change the apll m,p,s value */
-                       exynos4210_set_apll(new_index);
-                       /* 2. Change the system clock divider values */
-                       exynos4210_set_clkdiv(new_index);
-               }
+               exynos4210_set_apll(new_index);
+               exynos4210_set_clkdiv(new_index);
        }
 }
 
@@ -194,7 +144,6 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
        info->volt_table = exynos4210_volt_table;
        info->freq_table = exynos4210_freq_table;
        info->set_freq = exynos4210_set_frequency;
-       info->need_apll_change = exynos4210_pms_change;
 
        return 0;
 
index 08b7477b0aa23ea139faa00aef4d6eca601d173c..8683304ce62cc4dba0c947e2be6049e1b94ffcaf 100644 (file)
@@ -128,9 +128,9 @@ static void exynos4x12_set_clkdiv(unsigned int div_index)
 
 static void exynos4x12_set_apll(unsigned int index)
 {
-       unsigned int tmp, pdiv;
+       unsigned int tmp, freq = apll_freq_4x12[index].freq;
 
-       /* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
+       /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
        clk_set_parent(moutcore, mout_mpll);
 
        do {
@@ -140,24 +140,9 @@ static void exynos4x12_set_apll(unsigned int index)
                tmp &= 0x7;
        } while (tmp != 0x2);
 
-       /* 2. Set APLL Lock time */
-       pdiv = ((apll_freq_4x12[index].mps >> 8) & 0x3f);
+       clk_set_rate(mout_apll, freq * 1000);
 
-       __raw_writel((pdiv * 250), EXYNOS4_APLL_LOCK);
-
-       /* 3. Change PLL PMS values */
-       tmp = __raw_readl(EXYNOS4_APLL_CON0);
-       tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
-       tmp |= apll_freq_4x12[index].mps;
-       __raw_writel(tmp, EXYNOS4_APLL_CON0);
-
-       /* 4. wait_lock_time */
-       do {
-               cpu_relax();
-               tmp = __raw_readl(EXYNOS4_APLL_CON0);
-       } while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT)));
-
-       /* 5. MUX_CORE_SEL = APLL */
+       /* MUX_CORE_SEL = APLL */
        clk_set_parent(moutcore, mout_apll);
 
        do {
@@ -167,52 +152,15 @@ static void exynos4x12_set_apll(unsigned int index)
        } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
 }
 
-static bool exynos4x12_pms_change(unsigned int old_index, unsigned int new_index)
-{
-       unsigned int old_pm = apll_freq_4x12[old_index].mps >> 8;
-       unsigned int new_pm = apll_freq_4x12[new_index].mps >> 8;
-
-       return (old_pm == new_pm) ? 0 : 1;
-}
-
 static void exynos4x12_set_frequency(unsigned int old_index,
                                  unsigned int new_index)
 {
-       unsigned int tmp;
-
        if (old_index > new_index) {
-               if (!exynos4x12_pms_change(old_index, new_index)) {
-                       /* 1. Change the system clock divider values */
-                       exynos4x12_set_clkdiv(new_index);
-                       /* 2. Change just s value in apll m,p,s value */
-                       tmp = __raw_readl(EXYNOS4_APLL_CON0);
-                       tmp &= ~(0x7 << 0);
-                       tmp |= apll_freq_4x12[new_index].mps & 0x7;
-                       __raw_writel(tmp, EXYNOS4_APLL_CON0);
-
-               } else {
-                       /* Clock Configuration Procedure */
-                       /* 1. Change the system clock divider values */
-                       exynos4x12_set_clkdiv(new_index);
-                       /* 2. Change the apll m,p,s value */
-                       exynos4x12_set_apll(new_index);
-               }
+               exynos4x12_set_clkdiv(new_index);
+               exynos4x12_set_apll(new_index);
        } else if (old_index < new_index) {
-               if (!exynos4x12_pms_change(old_index, new_index)) {
-                       /* 1. Change just s value in apll m,p,s value */
-                       tmp = __raw_readl(EXYNOS4_APLL_CON0);
-                       tmp &= ~(0x7 << 0);
-                       tmp |= apll_freq_4x12[new_index].mps & 0x7;
-                       __raw_writel(tmp, EXYNOS4_APLL_CON0);
-                       /* 2. Change the system clock divider values */
-                       exynos4x12_set_clkdiv(new_index);
-               } else {
-                       /* Clock Configuration Procedure */
-                       /* 1. Change the apll m,p,s value */
-                       exynos4x12_set_apll(new_index);
-                       /* 2. Change the system clock divider values */
-                       exynos4x12_set_clkdiv(new_index);
-               }
+               exynos4x12_set_apll(new_index);
+               exynos4x12_set_clkdiv(new_index);
        }
 }
 
@@ -250,7 +198,6 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
        info->volt_table = exynos4x12_volt_table;
        info->freq_table = exynos4x12_freq_table;
        info->set_freq = exynos4x12_set_frequency;
-       info->need_apll_change = exynos4x12_pms_change;
 
        return 0;
 
index be5380ecdcd43f95c4aa88a62389c184597f3971..8ae5e2925bf1e2e446bdb952a68f925d46da732e 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/module.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
@@ -118,12 +118,12 @@ static int init_div_table(void)
        struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
        unsigned int tmp, clk_div, ema_div, freq, volt_id;
        int i = 0;
-       struct opp *opp;
+       struct dev_pm_opp *opp;
 
        rcu_read_lock();
        for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) {
 
-               opp = opp_find_freq_exact(dvfs_info->dev,
+               opp = dev_pm_opp_find_freq_exact(dvfs_info->dev,
                                        freq_tbl[i].frequency * 1000, true);
                if (IS_ERR(opp)) {
                        rcu_read_unlock();
@@ -142,7 +142,7 @@ static int init_div_table(void)
                                        << P0_7_CSCLKDEV_SHIFT;
 
                /* Calculate EMA */
-               volt_id = opp_get_voltage(opp);
+               volt_id = dev_pm_opp_get_voltage(opp);
                volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP;
                if (volt_id < PMIC_HIGH_VOLT) {
                        ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) |
@@ -209,12 +209,6 @@ static void exynos_enable_dvfs(void)
                                dvfs_info->base + XMU_DVFS_CTRL);
 }
 
-static int exynos_verify_speed(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy,
-                                             dvfs_info->freq_table);
-}
-
 static unsigned int exynos_getspeed(unsigned int cpu)
 {
        return dvfs_info->cur_frequency;
@@ -324,30 +318,19 @@ static void exynos_sort_descend_freq_table(void)
 
 static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-       int ret;
-
-       ret = cpufreq_frequency_table_cpuinfo(policy, dvfs_info->freq_table);
-       if (ret) {
-               dev_err(dvfs_info->dev, "Invalid frequency table: %d\n", ret);
-               return ret;
-       }
-
-       policy->cur = dvfs_info->cur_frequency;
-       policy->cpuinfo.transition_latency = dvfs_info->latency;
-       cpumask_setall(policy->cpus);
-
-       cpufreq_frequency_table_get_attr(dvfs_info->freq_table, policy->cpu);
-
-       return 0;
+       return cpufreq_generic_init(policy, dvfs_info->freq_table,
+                       dvfs_info->latency);
 }
 
 static struct cpufreq_driver exynos_driver = {
        .flags          = CPUFREQ_STICKY,
-       .verify         = exynos_verify_speed,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = exynos_target,
        .get            = exynos_getspeed,
        .init           = exynos_cpufreq_cpu_init,
+       .exit           = cpufreq_generic_exit,
        .name           = CPUFREQ_NAME,
+       .attr           = cpufreq_generic_attr,
 };
 
 static const struct of_device_id exynos_cpufreq_match[] = {
@@ -399,13 +382,14 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
                goto err_put_node;
        }
 
-       ret = opp_init_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+       ret = dev_pm_opp_init_cpufreq_table(dvfs_info->dev,
+                                           &dvfs_info->freq_table);
        if (ret) {
                dev_err(dvfs_info->dev,
                        "failed to init cpufreq table: %d\n", ret);
                goto err_put_node;
        }
-       dvfs_info->freq_count = opp_get_opp_count(dvfs_info->dev);
+       dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev);
        exynos_sort_descend_freq_table();
 
        if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency))
@@ -454,7 +438,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
        return 0;
 
 err_free_table:
-       opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+       dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
 err_put_node:
        of_node_put(np);
        dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
@@ -464,7 +448,7 @@ err_put_node:
 static int exynos_cpufreq_remove(struct platform_device *pdev)
 {
        cpufreq_unregister_driver(&exynos_driver);
-       opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
+       dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
        return 0;
 }
 
index f111454a7aeace94454e3c8eff6e7e45360380d7..3458d27f63b409e03b866e6b20e541a7de8cc769 100644 (file)
@@ -54,31 +54,30 @@ EXPORT_SYMBOL_GPL(cpufreq_frequency_table_cpuinfo);
 int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
                                   struct cpufreq_frequency_table *table)
 {
-       unsigned int next_larger = ~0;
-       unsigned int i;
-       unsigned int count = 0;
+       unsigned int next_larger = ~0, freq, i = 0;
+       bool found = false;
 
        pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
                                        policy->min, policy->max, policy->cpu);
 
-       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-                                    policy->cpuinfo.max_freq);
+       cpufreq_verify_within_cpu_limits(policy);
 
-       for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
-               unsigned int freq = table[i].frequency;
+       for (; freq = table[i].frequency, freq != CPUFREQ_TABLE_END; i++) {
                if (freq == CPUFREQ_ENTRY_INVALID)
                        continue;
-               if ((freq >= policy->min) && (freq <= policy->max))
-                       count++;
-               else if ((next_larger > freq) && (freq > policy->max))
+               if ((freq >= policy->min) && (freq <= policy->max)) {
+                       found = true;
+                       break;
+               }
+
+               if ((next_larger > freq) && (freq > policy->max))
                        next_larger = freq;
        }
 
-       if (!count)
+       if (!found) {
                policy->max = next_larger;
-
-       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-                                    policy->cpuinfo.max_freq);
+               cpufreq_verify_within_cpu_limits(policy);
+       }
 
        pr_debug("verification lead to (%u - %u kHz) for cpu %u\n",
                                policy->min, policy->max, policy->cpu);
@@ -87,6 +86,20 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
 }
 EXPORT_SYMBOL_GPL(cpufreq_frequency_table_verify);
 
+/*
+ * Generic routine to verify policy & frequency table, requires driver to call
+ * cpufreq_frequency_table_get_attr() prior to it.
+ */
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy)
+{
+       struct cpufreq_frequency_table *table =
+               cpufreq_frequency_get_table(policy->cpu);
+       if (!table)
+               return -ENODEV;
+
+       return cpufreq_frequency_table_verify(policy, table);
+}
+EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
 
 int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
                                   struct cpufreq_frequency_table *table,
@@ -200,6 +213,12 @@ struct freq_attr cpufreq_freq_attr_scaling_available_freqs = {
 };
 EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
 
+struct freq_attr *cpufreq_generic_attr[] = {
+       &cpufreq_freq_attr_scaling_available_freqs,
+       NULL,
+};
+EXPORT_SYMBOL_GPL(cpufreq_generic_attr);
+
 /*
  * if you use these, you must assure that the frequency table is valid
  * all the time between get_attr and put_attr!
@@ -219,6 +238,18 @@ void cpufreq_frequency_table_put_attr(unsigned int cpu)
 }
 EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
 
+int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
+                                     struct cpufreq_frequency_table *table)
+{
+       int ret = cpufreq_frequency_table_cpuinfo(policy, table);
+
+       if (!ret)
+               cpufreq_frequency_table_get_attr(table, policy->cpu);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(cpufreq_table_validate_and_show);
+
 void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy)
 {
        pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n",
index 70442c7b5e71aed7e57ce61b20893e42dc36a215..d83e8266a58e2eddf824696c80240ced024bab57 100644 (file)
@@ -401,7 +401,7 @@ static int cpufreq_gx_target(struct cpufreq_policy *policy,
 
 static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
 {
-       unsigned int maxfreq, curfreq;
+       unsigned int maxfreq;
 
        if (!policy || policy->cpu != 0)
                return -ENODEV;
@@ -415,10 +415,8 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
                maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f];
 
        stock_freq = maxfreq;
-       curfreq = gx_get_cpuspeed(0);
 
        pr_debug("cpu max frequency is %d.\n", maxfreq);
-       pr_debug("cpu current frequency is %dkHz.\n", curfreq);
 
        /* setup basic struct for cpufreq API */
        policy->cpu = 0;
@@ -428,7 +426,6 @@ static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy)
        else
                policy->min = maxfreq / POLICY_MIN_DIV;
        policy->max = maxfreq;
-       policy->cur = curfreq;
        policy->cpuinfo.min_freq = maxfreq / max_duration;
        policy->cpuinfo.max_freq = maxfreq;
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
index 794123fcf3e3ebce87cdbcbf0a66b60f4b70db2c..bf8902a0866dd4d767cdab4ba45d84ca397033fe 100644 (file)
@@ -66,7 +66,8 @@ static int hb_cpufreq_driver_init(void)
        struct device_node *np;
        int ret;
 
-       if (!of_machine_is_compatible("calxeda,highbank"))
+       if ((!of_machine_is_compatible("calxeda,highbank")) &&
+               (!of_machine_is_compatible("calxeda,ecx-2000")))
                return -ENODEV;
 
        cpu_dev = get_cpu_device(0);
index 3e14f03171759bcdbd8a0501bc1ff38fee86d4c0..90c6598415fd39687aa874a85d52730452f26c3d 100644 (file)
@@ -247,22 +247,6 @@ acpi_cpufreq_target (
 }
 
 
-static int
-acpi_cpufreq_verify (
-       struct cpufreq_policy   *policy)
-{
-       unsigned int result = 0;
-       struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
-
-       pr_debug("acpi_cpufreq_verify\n");
-
-       result = cpufreq_frequency_table_verify(policy,
-                       data->freq_table);
-
-       return (result);
-}
-
-
 static int
 acpi_cpufreq_cpu_init (
        struct cpufreq_policy   *policy)
@@ -321,7 +305,6 @@ acpi_cpufreq_cpu_init (
                            data->acpi_data.states[i].transition_latency * 1000;
                }
        }
-       policy->cur = processor_get_freq(data, policy->cpu);
 
        /* table init */
        for (i = 0; i <= data->acpi_data.state_count; i++)
@@ -335,7 +318,7 @@ acpi_cpufreq_cpu_init (
                }
        }
 
-       result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
+       result = cpufreq_table_validate_and_show(policy, data->freq_table);
        if (result) {
                goto err_freqfree;
        }
@@ -356,8 +339,6 @@ acpi_cpufreq_cpu_init (
                        (u32) data->acpi_data.states[i].status,
                        (u32) data->acpi_data.states[i].control);
 
-       cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
-
        /* the first call to ->target() should result in us actually
         * writing something to the appropriate registers. */
        data->resume = 1;
@@ -396,20 +377,14 @@ acpi_cpufreq_cpu_exit (
 }
 
 
-static struct freq_attr* acpi_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
-
 static struct cpufreq_driver acpi_cpufreq_driver = {
-       .verify         = acpi_cpufreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = acpi_cpufreq_target,
        .get            = acpi_cpufreq_get,
        .init           = acpi_cpufreq_cpu_init,
        .exit           = acpi_cpufreq_cpu_exit,
        .name           = "acpi-cpufreq",
-       .attr           = acpi_cpufreq_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 
index c3fd2a101ca02852c677d021c567b27ee74b075a..be23892282e3462947e7df176113070a2b1d8ca7 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
 
@@ -35,11 +35,6 @@ static struct device *cpu_dev;
 static struct cpufreq_frequency_table *freq_table;
 static unsigned int transition_latency;
 
-static int imx6q_verify_speed(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
 static unsigned int imx6q_get_speed(unsigned int cpu)
 {
        return clk_get_rate(arm_clk) / 1000;
@@ -49,7 +44,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
                            unsigned int target_freq, unsigned int relation)
 {
        struct cpufreq_freqs freqs;
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        unsigned long freq_hz, volt, volt_old;
        unsigned int index;
        int ret;
@@ -70,14 +65,14 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
                return 0;
 
        rcu_read_lock();
-       opp = opp_find_freq_ceil(cpu_dev, &freq_hz);
+       opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz);
        if (IS_ERR(opp)) {
                rcu_read_unlock();
                dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz);
                return PTR_ERR(opp);
        }
 
-       volt = opp_get_voltage(opp);
+       volt = dev_pm_opp_get_voltage(opp);
        rcu_read_unlock();
        volt_old = regulator_get_voltage(arm_reg);
 
@@ -159,47 +154,23 @@ post_notify:
 
 static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
 {
-       int ret;
-
-       ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
-       if (ret) {
-               dev_err(cpu_dev, "invalid frequency table: %d\n", ret);
-               return ret;
-       }
-
-       policy->cpuinfo.transition_latency = transition_latency;
-       policy->cur = clk_get_rate(arm_clk) / 1000;
-       cpumask_setall(policy->cpus);
-       cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
-       return 0;
-}
-
-static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
+       return cpufreq_generic_init(policy, freq_table, transition_latency);
 }
 
-static struct freq_attr *imx6q_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver imx6q_cpufreq_driver = {
-       .verify = imx6q_verify_speed,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = imx6q_set_target,
        .get = imx6q_get_speed,
        .init = imx6q_cpufreq_init,
-       .exit = imx6q_cpufreq_exit,
+       .exit = cpufreq_generic_exit,
        .name = "imx6q-cpufreq",
-       .attr = imx6q_cpufreq_attr,
+       .attr = cpufreq_generic_attr,
 };
 
 static int imx6q_cpufreq_probe(struct platform_device *pdev)
 {
        struct device_node *np;
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        unsigned long min_volt, max_volt;
        int num, ret;
 
@@ -237,14 +208,14 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
        }
 
        /* We expect an OPP table supplied by platform */
-       num = opp_get_opp_count(cpu_dev);
+       num = dev_pm_opp_get_opp_count(cpu_dev);
        if (num < 0) {
                ret = num;
                dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
                goto put_node;
        }
 
-       ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
+       ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
        if (ret) {
                dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
                goto put_node;
@@ -259,12 +230,12 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
         * same order.
         */
        rcu_read_lock();
-       opp = opp_find_freq_exact(cpu_dev,
+       opp = dev_pm_opp_find_freq_exact(cpu_dev,
                                  freq_table[0].frequency * 1000, true);
-       min_volt = opp_get_voltage(opp);
-       opp = opp_find_freq_exact(cpu_dev,
+       min_volt = dev_pm_opp_get_voltage(opp);
+       opp = dev_pm_opp_find_freq_exact(cpu_dev,
                                  freq_table[--num].frequency * 1000, true);
-       max_volt = opp_get_voltage(opp);
+       max_volt = dev_pm_opp_get_voltage(opp);
        rcu_read_unlock();
        ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
        if (ret > 0)
@@ -292,7 +263,7 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
        return 0;
 
 free_freq_table:
-       opp_free_cpufreq_table(cpu_dev, &freq_table);
+       dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
 put_node:
        of_node_put(np);
        return ret;
@@ -301,7 +272,7 @@ put_node:
 static int imx6q_cpufreq_remove(struct platform_device *pdev)
 {
        cpufreq_unregister_driver(&imx6q_cpufreq_driver);
-       opp_free_cpufreq_table(cpu_dev, &freq_table);
+       dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
 
        return 0;
 }
index f7c99df0880b4d396fee72860b904b5764f943f3..babf3e40e9fa5030be4420f7038681954ebc5bb8 100644 (file)
@@ -59,9 +59,7 @@ static int integrator_verify_policy(struct cpufreq_policy *policy)
 {
        struct icst_vco vco;
 
-       cpufreq_verify_within_limits(policy, 
-                                    policy->cpuinfo.min_freq, 
-                                    policy->cpuinfo.max_freq);
+       cpufreq_verify_within_cpu_limits(policy);
 
        vco = icst_hz_to_vco(&cclk_params, policy->max * 1000);
        policy->max = icst_hz(&cclk_params, vco) / 1000;
@@ -69,10 +67,7 @@ static int integrator_verify_policy(struct cpufreq_policy *policy)
        vco = icst_hz_to_vco(&cclk_params, policy->min * 1000);
        policy->min = icst_hz(&cclk_params, vco) / 1000;
 
-       cpufreq_verify_within_limits(policy, 
-                                    policy->cpuinfo.min_freq, 
-                                    policy->cpuinfo.max_freq);
-
+       cpufreq_verify_within_cpu_limits(policy);
        return 0;
 }
 
@@ -186,10 +181,9 @@ static int integrator_cpufreq_init(struct cpufreq_policy *policy)
 {
 
        /* set default policy and cpuinfo */
-       policy->cpuinfo.max_freq = 160000;
-       policy->cpuinfo.min_freq = 12000;
+       policy->max = policy->cpuinfo.max_freq = 160000;
+       policy->min = policy->cpuinfo.min_freq = 12000;
        policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */
-       policy->cur = policy->min = policy->max = integrator_get(policy->cpu);
 
        return 0;
 }
index 32b3479a24051d09a0c1c53647b74848fcb3c967..67a87e01c1d96f6709de25dff2b9cbb590d70b36 100644 (file)
@@ -48,7 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
 }
 
 struct sample {
-       int core_pct_busy;
+       int32_t core_pct_busy;
        u64 aperf;
        u64 mperf;
        int freq;
@@ -68,7 +68,7 @@ struct _pid {
        int32_t i_gain;
        int32_t d_gain;
        int deadband;
-       int last_err;
+       int32_t last_err;
 };
 
 struct cpudata {
@@ -153,16 +153,15 @@ static inline void pid_d_gain_set(struct _pid *pid, int percent)
        pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
 }
 
-static signed int pid_calc(struct _pid *pid, int busy)
+static signed int pid_calc(struct _pid *pid, int32_t busy)
 {
-       signed int err, result;
+       signed int result;
        int32_t pterm, dterm, fp_error;
        int32_t integral_limit;
 
-       err = pid->setpoint - busy;
-       fp_error = int_tofp(err);
+       fp_error = int_tofp(pid->setpoint) - busy;
 
-       if (abs(err) <= pid->deadband)
+       if (abs(fp_error) <= int_tofp(pid->deadband))
                return 0;
 
        pterm = mul_fp(pid->p_gain, fp_error);
@@ -176,8 +175,8 @@ static signed int pid_calc(struct _pid *pid, int busy)
        if (pid->integral < -integral_limit)
                pid->integral = -integral_limit;
 
-       dterm = mul_fp(pid->d_gain, (err - pid->last_err));
-       pid->last_err = err;
+       dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
+       pid->last_err = fp_error;
 
        result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
 
@@ -367,12 +366,13 @@ static int intel_pstate_turbo_pstate(void)
 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
 {
        int max_perf = cpu->pstate.turbo_pstate;
+       int max_perf_adj;
        int min_perf;
        if (limits.no_turbo)
                max_perf = cpu->pstate.max_pstate;
 
-       max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
-       *max = clamp_t(int, max_perf,
+       max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
+       *max = clamp_t(int, max_perf_adj,
                        cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
 
        min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
@@ -383,6 +383,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
 {
        int max_perf, min_perf;
+       u64 val;
 
        intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
 
@@ -394,11 +395,11 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
        trace_cpu_frequency(pstate * 100000, cpu->cpu);
 
        cpu->pstate.current_pstate = pstate;
+       val = pstate << 8;
        if (limits.no_turbo)
-               wrmsrl(MSR_IA32_PERF_CTL, BIT(32) | (pstate << 8));
-       else
-               wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
+               val |= (u64)1 << 32;
 
+       wrmsrl(MSR_IA32_PERF_CTL, val);
 }
 
 static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
@@ -435,8 +436,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
                                        struct sample *sample)
 {
        u64 core_pct;
-       core_pct = div64_u64(sample->aperf * 100, sample->mperf);
-       sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
+       core_pct = div64_u64(int_tofp(sample->aperf * 100),
+                            sample->mperf);
+       sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
 
        sample->core_pct_busy = core_pct;
 }
@@ -468,22 +470,19 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
        mod_timer_pinned(&cpu->timer, jiffies + delay);
 }
 
-static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
+static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
 {
-       int32_t busy_scaled;
        int32_t core_busy, max_pstate, current_pstate;
 
-       core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
+       core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
        max_pstate = int_tofp(cpu->pstate.max_pstate);
        current_pstate = int_tofp(cpu->pstate.current_pstate);
-       busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
-
-       return fp_toint(busy_scaled);
+       return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
 }
 
 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
 {
-       int busy_scaled;
+       int32_t busy_scaled;
        struct _pid *pid;
        signed int ctl = 0;
        int steps;
@@ -614,9 +613,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 
 static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
 {
-       cpufreq_verify_within_limits(policy,
-                               policy->cpuinfo.min_freq,
-                               policy->cpuinfo.max_freq);
+       cpufreq_verify_within_cpu_limits(policy);
 
        if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
                (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
@@ -637,8 +634,8 @@ static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
 
 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
 {
-       int rc, min_pstate, max_pstate;
        struct cpudata *cpu;
+       int rc;
 
        rc = intel_pstate_init_cpu(policy->cpu);
        if (rc)
@@ -652,9 +649,8 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
        else
                policy->policy = CPUFREQ_POLICY_POWERSAVE;
 
-       intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
-       policy->min = min_pstate * 100000;
-       policy->max = max_pstate * 100000;
+       policy->min = cpu->pstate.min_pstate * 100000;
+       policy->max = cpu->pstate.turbo_pstate * 100000;
 
        /* cpuinfo and default policy values */
        policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
index ba10658a93949fe9adca0863c7c8926a11fe1676..fff8653c8e9b7087a71a0a885853d7c4cdfd30c0 100644 (file)
@@ -102,11 +102,6 @@ static void kirkwood_cpufreq_set_cpu_state(struct cpufreq_policy *policy,
        cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 };
 
-static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, kirkwood_freq_table);
-}
-
 static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
                            unsigned int target_freq,
                            unsigned int relation)
@@ -125,40 +120,17 @@ static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
 /* Module init and exit code */
 static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-       int result;
-
-       /* cpuinfo and default policy values */
-       policy->cpuinfo.transition_latency = 5000; /* 5uS */
-       policy->cur = kirkwood_cpufreq_get_cpu_frequency(0);
-
-       result = cpufreq_frequency_table_cpuinfo(policy, kirkwood_freq_table);
-       if (result)
-               return result;
-
-       cpufreq_frequency_table_get_attr(kirkwood_freq_table, policy->cpu);
-
-       return 0;
-}
-
-static int kirkwood_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
+       return cpufreq_generic_init(policy, kirkwood_freq_table, 5000);
 }
 
-static struct freq_attr *kirkwood_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver kirkwood_cpufreq_driver = {
        .get    = kirkwood_cpufreq_get_cpu_frequency,
-       .verify = kirkwood_cpufreq_verify,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = kirkwood_cpufreq_target,
        .init   = kirkwood_cpufreq_cpu_init,
-       .exit   = kirkwood_cpufreq_cpu_exit,
+       .exit   = cpufreq_generic_exit,
        .name   = "kirkwood-cpufreq",
-       .attr   = kirkwood_cpufreq_attr,
+       .attr   = cpufreq_generic_attr,
 };
 
 static int kirkwood_cpufreq_probe(struct platform_device *pdev)
index 4ada1cccb0523632e72c016e9a511dd38b08d33d..14df4974fb458a165b3dacaaeb15e843d1af6c82 100644 (file)
@@ -625,12 +625,6 @@ static void longhaul_setup_voltagescaling(void)
 }
 
 
-static int longhaul_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, longhaul_table);
-}
-
-
 static int longhaul_target(struct cpufreq_policy *policy,
                            unsigned int target_freq, unsigned int relation)
 {
@@ -919,36 +913,18 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
                longhaul_setup_voltagescaling();
 
        policy->cpuinfo.transition_latency = 200000;    /* nsec */
-       policy->cur = calc_speed(longhaul_get_cpu_mult());
-
-       ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table);
-       if (ret)
-               return ret;
-
-       cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu);
 
-       return 0;
+       return cpufreq_table_validate_and_show(policy, longhaul_table);
 }
 
-static int longhaul_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
-}
-
-static struct freq_attr *longhaul_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver longhaul_driver = {
-       .verify = longhaul_verify,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = longhaul_target,
        .get    = longhaul_get,
        .init   = longhaul_cpu_init,
-       .exit   = longhaul_cpu_exit,
+       .exit   = cpufreq_generic_exit,
        .name   = "longhaul",
-       .attr   = longhaul_attr,
+       .attr   = cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id longhaul_id[] = {
index 5aa031612d5393ff08d196f83b8701ac8c8f5e12..074971b12635af04b7214f510c70e7e8c37ebcbf 100644 (file)
@@ -129,9 +129,7 @@ static int longrun_verify_policy(struct cpufreq_policy *policy)
                return -EINVAL;
 
        policy->cpu = 0;
-       cpufreq_verify_within_limits(policy,
-               policy->cpuinfo.min_freq,
-               policy->cpuinfo.max_freq);
+       cpufreq_verify_within_cpu_limits(policy);
 
        if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
            (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
index 7bc3c44d34e2f0ec5493ad9fdf0c8467ccd8a037..2c8ec8e064490be7d56b4e591960b2e11a4bf813 100644 (file)
@@ -131,40 +131,24 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
                return ret;
        }
 
-       policy->cur = loongson2_cpufreq_get(policy->cpu);
-
-       cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
-                                        policy->cpu);
-
-       return cpufreq_frequency_table_cpuinfo(policy,
-                                           &loongson2_clockmod_table[0]);
-}
-
-static int loongson2_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy,
-                                             &loongson2_clockmod_table[0]);
+       return cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0);
 }
 
 static int loongson2_cpufreq_exit(struct cpufreq_policy *policy)
 {
+       cpufreq_frequency_table_put_attr(policy->cpu);
        clk_put(cpuclk);
        return 0;
 }
 
-static struct freq_attr *loongson2_table_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver loongson2_cpufreq_driver = {
        .name = "loongson2",
        .init = loongson2_cpufreq_cpu_init,
-       .verify = loongson2_cpufreq_verify,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = loongson2_cpufreq_target,
        .get = loongson2_cpufreq_get,
        .exit = loongson2_cpufreq_exit,
-       .attr = loongson2_table_attr,
+       .attr = cpufreq_generic_attr,
 };
 
 static struct platform_device_id platform_device_ids[] = {
index 6168d77b296d78b7d1a452bae6f3ae34c7e036c3..eb1e1766baede24438b948a6f9974d5f6e0877be 100644 (file)
@@ -64,11 +64,6 @@ static struct cpufreq_frequency_table maple_cpu_freqs[] = {
        {0,                     CPUFREQ_TABLE_END},
 };
 
-static struct freq_attr *maple_cpu_freqs_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 /* Power mode data is an array of the 32 bits PCR values to use for
  * the various frequencies, retrieved from the device-tree
  */
@@ -135,11 +130,6 @@ static int maple_scom_query_freq(void)
  * Common interface to the cpufreq core
  */
 
-static int maple_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, maple_cpu_freqs);
-}
-
 static int maple_cpufreq_target(struct cpufreq_policy *policy,
        unsigned int target_freq, unsigned int relation)
 {
@@ -175,27 +165,17 @@ static unsigned int maple_cpufreq_get_speed(unsigned int cpu)
 
 static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-       policy->cpuinfo.transition_latency = 12000;
-       policy->cur = maple_cpu_freqs[maple_scom_query_freq()].frequency;
-       /* secondary CPUs are tied to the primary one by the
-        * cpufreq core if in the secondary policy we tell it that
-        * it actually must be one policy together with all others. */
-       cpumask_setall(policy->cpus);
-       cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu);
-
-       return cpufreq_frequency_table_cpuinfo(policy,
-               maple_cpu_freqs);
+       return cpufreq_generic_init(policy, maple_cpu_freqs, 12000);
 }
 
-
 static struct cpufreq_driver maple_cpufreq_driver = {
        .name           = "maple",
        .flags          = CPUFREQ_CONST_LOOPS,
        .init           = maple_cpufreq_cpu_init,
-       .verify         = maple_cpufreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = maple_cpufreq_target,
        .get            = maple_cpufreq_get_speed,
-       .attr           = maple_cpu_freqs_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static int __init maple_cpufreq_init(void)
index f31fcfcad514330ca856bcfd015aba3927de8241..ac552d090463d729d4ef0b98aebe893c554ae02c 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/err.h>
 #include <linux/clk.h>
 #include <linux/io.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/cpu.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -40,13 +40,6 @@ static struct clk *mpu_clk;
 static struct device *mpu_dev;
 static struct regulator *mpu_reg;
 
-static int omap_verify_speed(struct cpufreq_policy *policy)
-{
-       if (!freq_table)
-               return -EINVAL;
-       return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
 static unsigned int omap_getspeed(unsigned int cpu)
 {
        unsigned long rate;
@@ -65,7 +58,7 @@ static int omap_target(struct cpufreq_policy *policy,
        unsigned int i;
        int r, ret = 0;
        struct cpufreq_freqs freqs;
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        unsigned long freq, volt = 0, volt_old = 0, tol = 0;
 
        if (!freq_table) {
@@ -105,14 +98,14 @@ static int omap_target(struct cpufreq_policy *policy,
 
        if (mpu_reg) {
                rcu_read_lock();
-               opp = opp_find_freq_ceil(mpu_dev, &freq);
+               opp = dev_pm_opp_find_freq_ceil(mpu_dev, &freq);
                if (IS_ERR(opp)) {
                        rcu_read_unlock();
                        dev_err(mpu_dev, "%s: unable to find MPU OPP for %d\n",
                                __func__, freqs.new);
                        return -EINVAL;
                }
-               volt = opp_get_voltage(opp);
+               volt = dev_pm_opp_get_voltage(opp);
                rcu_read_unlock();
                tol = volt * OPP_TOLERANCE / 100;
                volt_old = regulator_get_voltage(mpu_reg);
@@ -162,86 +155,57 @@ done:
 static inline void freq_table_free(void)
 {
        if (atomic_dec_and_test(&freq_table_users))
-               opp_free_cpufreq_table(mpu_dev, &freq_table);
+               dev_pm_opp_free_cpufreq_table(mpu_dev, &freq_table);
 }
 
 static int omap_cpu_init(struct cpufreq_policy *policy)
 {
-       int result = 0;
+       int result;
 
        mpu_clk = clk_get(NULL, "cpufreq_ck");
        if (IS_ERR(mpu_clk))
                return PTR_ERR(mpu_clk);
 
-       if (policy->cpu >= NR_CPUS) {
-               result = -EINVAL;
-               goto fail_ck;
-       }
-
-       policy->cur = omap_getspeed(policy->cpu);
-
-       if (!freq_table)
-               result = opp_init_cpufreq_table(mpu_dev, &freq_table);
-
-       if (result) {
-               dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n",
+       if (!freq_table) {
+               result = dev_pm_opp_init_cpufreq_table(mpu_dev, &freq_table);
+               if (result) {
+                       dev_err(mpu_dev,
+                               "%s: cpu%d: failed creating freq table[%d]\n",
                                __func__, policy->cpu, result);
-               goto fail_ck;
+                       goto fail;
+               }
        }
 
        atomic_inc_return(&freq_table_users);
 
-       result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
-       if (result)
-               goto fail_table;
-
-       cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-
-       policy->cur = omap_getspeed(policy->cpu);
-
-       /*
-        * On OMAP SMP configuartion, both processors share the voltage
-        * and clock. So both CPUs needs to be scaled together and hence
-        * needs software co-ordination. Use cpufreq affected_cpus
-        * interface to handle this scenario. Additional is_smp() check
-        * is to keep SMP_ON_UP build working.
-        */
-       if (is_smp())
-               cpumask_setall(policy->cpus);
-
        /* FIXME: what's the actual transition time? */
-       policy->cpuinfo.transition_latency = 300 * 1000;
-
-       return 0;
+       result = cpufreq_generic_init(policy, freq_table, 300 * 1000);
+       if (!result)
+               return 0;
 
-fail_table:
        freq_table_free();
-fail_ck:
+fail:
        clk_put(mpu_clk);
        return result;
 }
 
 static int omap_cpu_exit(struct cpufreq_policy *policy)
 {
+       cpufreq_frequency_table_put_attr(policy->cpu);
        freq_table_free();
        clk_put(mpu_clk);
        return 0;
 }
 
-static struct freq_attr *omap_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver omap_driver = {
        .flags          = CPUFREQ_STICKY,
-       .verify         = omap_verify_speed,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = omap_target,
        .get            = omap_getspeed,
        .init           = omap_cpu_init,
        .exit           = omap_cpu_exit,
        .name           = "omap",
-       .attr           = omap_cpufreq_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static int omap_cpufreq_probe(struct platform_device *pdev)
index 2f0a2a65c37f67eeae9789ed12afa20627fd3503..6164c1cca504881d80ecb64d3bfb10e756a17349 100644 (file)
@@ -140,12 +140,6 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
 }
 
 
-static int cpufreq_p4_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]);
-}
-
-
 static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
 {
        if (c->x86 == 0x06) {
@@ -230,25 +224,17 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
                else
                        p4clockmod_table[i].frequency = (stock_freq * i)/8;
        }
-       cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu);
 
        /* cpuinfo and default policy values */
 
        /* the transition latency is set to be 1 higher than the maximum
         * transition latency of the ondemand governor */
        policy->cpuinfo.transition_latency = 10000001;
-       policy->cur = stock_freq;
 
-       return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]);
+       return cpufreq_table_validate_and_show(policy, &p4clockmod_table[0]);
 }
 
 
-static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
-}
-
 static unsigned int cpufreq_p4_get(unsigned int cpu)
 {
        u32 l, h;
@@ -267,19 +253,14 @@ static unsigned int cpufreq_p4_get(unsigned int cpu)
        return stock_freq;
 }
 
-static struct freq_attr *p4clockmod_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver p4clockmod_driver = {
-       .verify         = cpufreq_p4_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = cpufreq_p4_target,
        .init           = cpufreq_p4_cpu_init,
-       .exit           = cpufreq_p4_cpu_exit,
+       .exit           = cpufreq_generic_exit,
        .get            = cpufreq_p4_get,
        .name           = "p4-clockmod",
-       .attr           = p4clockmod_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id cpufreq_p4_id[] = {
index 534e43a60d1f9a80b2a952abe254fb01e06c0f12..1cca332728c3c36e2a40a5bc248f1cb39651c741 100644 (file)
@@ -69,11 +69,6 @@ static struct cpufreq_frequency_table pas_freqs[] = {
        {0,     CPUFREQ_TABLE_END},
 };
 
-static struct freq_attr *pas_cpu_freqs_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 /*
  * hardware specific functions
  */
@@ -209,22 +204,13 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
                pr_debug("%d: %d\n", i, pas_freqs[i].frequency);
        }
 
-       policy->cpuinfo.transition_latency = get_gizmo_latency();
-
        cur_astate = get_cur_astate(policy->cpu);
        pr_debug("current astate is at %d\n",cur_astate);
 
        policy->cur = pas_freqs[cur_astate].frequency;
-       cpumask_copy(policy->cpus, cpu_online_mask);
-
        ppc_proc_freq = policy->cur * 1000ul;
 
-       cpufreq_frequency_table_get_attr(pas_freqs, policy->cpu);
-
-       /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max
-        * are set correctly
-        */
-       return cpufreq_frequency_table_cpuinfo(policy, pas_freqs);
+       return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
 
 out_unmap_sdcpwr:
        iounmap(sdcpwr_mapbase);
@@ -253,11 +239,6 @@ static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
        return 0;
 }
 
-static int pas_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, pas_freqs);
-}
-
 static int pas_cpufreq_target(struct cpufreq_policy *policy,
                              unsigned int target_freq,
                              unsigned int relation)
@@ -300,9 +281,9 @@ static struct cpufreq_driver pas_cpufreq_driver = {
        .flags          = CPUFREQ_CONST_LOOPS,
        .init           = pas_cpufreq_cpu_init,
        .exit           = pas_cpufreq_cpu_exit,
-       .verify         = pas_cpufreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = pas_cpufreq_target,
-       .attr           = pas_cpu_freqs_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 /*
index d81c4e5ea0ada8c49afac790f1107bc5ce8f11e6..e2b4f40ff69acaaff51f6461caafd4c8a23bdd16 100644 (file)
@@ -111,8 +111,7 @@ static struct pcc_cpu __percpu *pcc_cpu_info;
 
 static int pcc_cpufreq_verify(struct cpufreq_policy *policy)
 {
-       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-                                    policy->cpuinfo.max_freq);
+       cpufreq_verify_within_cpu_limits(policy);
        return 0;
 }
 
@@ -396,15 +395,14 @@ static int __init pcc_cpufreq_probe(void)
        struct pcc_memory_resource *mem_resource;
        struct pcc_register_resource *reg_resource;
        union acpi_object *out_obj, *member;
-       acpi_handle handle, osc_handle, pcch_handle;
+       acpi_handle handle, osc_handle;
        int ret = 0;
 
        status = acpi_get_handle(NULL, "\\_SB", &handle);
        if (ACPI_FAILURE(status))
                return -ENODEV;
 
-       status = acpi_get_handle(handle, "PCCH", &pcch_handle);
-       if (ACPI_FAILURE(status))
+       if (!acpi_has_method(handle, "PCCH"))
                return -ENODEV;
 
        status = acpi_get_handle(handle, "_OSC", &osc_handle);
@@ -560,13 +558,6 @@ static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
                ioread32(&pcch_hdr->nominal) * 1000;
        policy->min = policy->cpuinfo.min_freq =
                ioread32(&pcch_hdr->minimum_frequency) * 1000;
-       policy->cur = pcc_get_freq(cpu);
-
-       if (!policy->cur) {
-               pr_debug("init: Unable to get current CPU frequency\n");
-               result = -EINVAL;
-               goto out;
-       }
 
        pr_debug("init: policy->max is %d, policy->min is %d\n",
                policy->max, policy->min);
index a096cd3fa23d1aaef19b3ae27ecd87037f01ba6f..6eac1e2300785ed4c3a9dc1d00ea362014061c50 100644 (file)
@@ -86,11 +86,6 @@ static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
        {0,                     CPUFREQ_TABLE_END},
 };
 
-static struct freq_attr* pmac_cpu_freqs_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static inline void local_delay(unsigned long ms)
 {
        if (no_schedule)
@@ -378,11 +373,6 @@ static unsigned int pmac_cpufreq_get_speed(unsigned int cpu)
        return cur_freq;
 }
 
-static int pmac_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, pmac_cpu_freqs);
-}
-
 static int pmac_cpufreq_target(        struct cpufreq_policy *policy,
                                        unsigned int target_freq,
                                        unsigned int relation)
@@ -402,14 +392,7 @@ static int pmac_cpufreq_target(    struct cpufreq_policy *policy,
 
 static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-       if (policy->cpu != 0)
-               return -ENODEV;
-
-       policy->cpuinfo.transition_latency      = transition_latency;
-       policy->cur = cur_freq;
-
-       cpufreq_frequency_table_get_attr(pmac_cpu_freqs, policy->cpu);
-       return cpufreq_frequency_table_cpuinfo(policy, pmac_cpu_freqs);
+       return cpufreq_generic_init(policy, pmac_cpu_freqs, transition_latency);
 }
 
 static u32 read_gpio(struct device_node *np)
@@ -469,14 +452,14 @@ static int pmac_cpufreq_resume(struct cpufreq_policy *policy)
 }
 
 static struct cpufreq_driver pmac_cpufreq_driver = {
-       .verify         = pmac_cpufreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = pmac_cpufreq_target,
        .get            = pmac_cpufreq_get_speed,
        .init           = pmac_cpufreq_cpu_init,
        .suspend        = pmac_cpufreq_suspend,
        .resume         = pmac_cpufreq_resume,
        .flags          = CPUFREQ_PM_NO_WARN,
-       .attr           = pmac_cpu_freqs_attr,
+       .attr           = cpufreq_generic_attr,
        .name           = "powermac",
 };
 
index 3a51ad7e47c8c67c95da5f3691d7f89e397c34a6..5261b92d768bb31fa89d60bb2692768d412882d8 100644 (file)
@@ -70,11 +70,6 @@ static struct cpufreq_frequency_table g5_cpu_freqs[] = {
        {0,                     CPUFREQ_TABLE_END},
 };
 
-static struct freq_attr* g5_cpu_freqs_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 /* Power mode data is an array of the 32 bits PCR values to use for
  * the various frequencies, retrieved from the device-tree
  */
@@ -142,7 +137,7 @@ static void g5_vdnap_switch_volt(int speed_mode)
                pmf_call_one(pfunc_vdnap0_complete, &args);
                if (done)
                        break;
-               msleep(1);
+               usleep_range(1000, 1000);
        }
        if (done == 0)
                printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
@@ -241,7 +236,7 @@ static void g5_pfunc_switch_volt(int speed_mode)
                if (pfunc_cpu1_volt_low)
                        pmf_call_one(pfunc_cpu1_volt_low, NULL);
        }
-       msleep(10); /* should be faster , to fix */
+       usleep_range(10000, 10000); /* should be faster , to fix */
 }
 
 /*
@@ -286,7 +281,7 @@ static int g5_pfunc_switch_freq(int speed_mode)
                pmf_call_one(pfunc_slewing_done, &args);
                if (done)
                        break;
-               msleep(1);
+               usleep_range(500, 500);
        }
        if (done == 0)
                printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
@@ -317,11 +312,6 @@ static int g5_pfunc_query_freq(void)
  * Common interface to the cpufreq core
  */
 
-static int g5_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, g5_cpu_freqs);
-}
-
 static int g5_cpufreq_target(struct cpufreq_policy *policy,
        unsigned int target_freq, unsigned int relation)
 {
@@ -357,27 +347,17 @@ static unsigned int g5_cpufreq_get_speed(unsigned int cpu)
 
 static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-       policy->cpuinfo.transition_latency = transition_latency;
-       policy->cur = g5_cpu_freqs[g5_query_freq()].frequency;
-       /* secondary CPUs are tied to the primary one by the
-        * cpufreq core if in the secondary policy we tell it that
-        * it actually must be one policy together with all others. */
-       cpumask_copy(policy->cpus, cpu_online_mask);
-       cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
-
-       return cpufreq_frequency_table_cpuinfo(policy,
-               g5_cpu_freqs);
+       return cpufreq_generic_init(policy, g5_cpu_freqs, transition_latency);
 }
 
-
 static struct cpufreq_driver g5_cpufreq_driver = {
        .name           = "powermac",
        .flags          = CPUFREQ_CONST_LOOPS,
        .init           = g5_cpufreq_cpu_init,
-       .verify         = g5_cpufreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = g5_cpufreq_target,
        .get            = g5_cpufreq_get_speed,
-       .attr           = g5_cpu_freqs_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 
@@ -397,7 +377,8 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
        /* Check supported platforms */
        if (of_machine_is_compatible("PowerMac8,1") ||
            of_machine_is_compatible("PowerMac8,2") ||
-           of_machine_is_compatible("PowerMac9,1"))
+           of_machine_is_compatible("PowerMac9,1") ||
+           of_machine_is_compatible("PowerMac12,1"))
                use_volts_smu = 1;
        else if (of_machine_is_compatible("PowerMac11,2"))
                use_volts_vdnap = 1;
@@ -647,8 +628,10 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
        g5_cpu_freqs[0].frequency = max_freq;
        g5_cpu_freqs[1].frequency = min_freq;
 
+       /* Based on a measurement on Xserve G5, rounded up. */
+       transition_latency = 10 * NSEC_PER_MSEC;
+
        /* Set callbacks */
-       transition_latency = CPUFREQ_ETERNAL;
        g5_switch_volt = g5_pfunc_switch_volt;
        g5_switch_freq = g5_pfunc_switch_freq;
        g5_query_freq = g5_pfunc_query_freq;
index 85f1c8c25ddc5d20a361e219d023548b356b835b..eda17024a34adb0e11cd9cb4553d4d0ca7d18164 100644 (file)
@@ -104,19 +104,6 @@ static void powernow_k6_set_state(struct cpufreq_policy *policy,
 }
 
 
-/**
- * powernow_k6_verify - verifies a new CPUfreq policy
- * @policy: new policy
- *
- * Policy must be within lowest and highest possible CPU Frequency,
- * and at least one possible state must be within min and max.
- */
-static int powernow_k6_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, &clock_ratio[0]);
-}
-
-
 /**
  * powernow_k6_setpolicy - sets a new CPUFreq policy
  * @policy: new policy
@@ -145,7 +132,6 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
 static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
 {
        unsigned int i, f;
-       int result;
 
        if (policy->cpu != 0)
                return -ENODEV;
@@ -165,15 +151,8 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
 
        /* cpuinfo and default policy values */
        policy->cpuinfo.transition_latency = 200000;
-       policy->cur = busfreq * max_multiplier;
-
-       result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio);
-       if (result)
-               return result;
-
-       cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu);
 
-       return 0;
+       return cpufreq_table_validate_and_show(policy, clock_ratio);
 }
 
 
@@ -195,19 +174,14 @@ static unsigned int powernow_k6_get(unsigned int cpu)
        return ret;
 }
 
-static struct freq_attr *powernow_k6_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver powernow_k6_driver = {
-       .verify         = powernow_k6_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = powernow_k6_target,
        .init           = powernow_k6_cpu_init,
        .exit           = powernow_k6_cpu_exit,
        .get            = powernow_k6_get,
        .name           = "powernow-k6",
-       .attr           = powernow_k6_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id powernow_k6_ids[] = {
index 14ce480be8ab2503515cb1d61b8f30b5892d85d2..44d345bad6fb1d9b93345ea5b80e24c7cfc0b281 100644 (file)
@@ -549,11 +549,6 @@ static int powernow_target(struct cpufreq_policy *policy,
 }
 
 
-static int powernow_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, powernow_table);
-}
-
 /*
  * We use the fact that the bus frequency is somehow
  * a multiple of 100000/3 khz, then we compute sgtc according
@@ -678,11 +673,7 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
        policy->cpuinfo.transition_latency =
                cpufreq_scale(2000000UL, fsb, latency);
 
-       policy->cur = powernow_get(0);
-
-       cpufreq_frequency_table_get_attr(powernow_table, policy->cpu);
-
-       return cpufreq_frequency_table_cpuinfo(policy, powernow_table);
+       return cpufreq_table_validate_and_show(policy, powernow_table);
 }
 
 static int powernow_cpu_exit(struct cpufreq_policy *policy)
@@ -701,13 +692,8 @@ static int powernow_cpu_exit(struct cpufreq_policy *policy)
        return 0;
 }
 
-static struct freq_attr *powernow_table_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver powernow_driver = {
-       .verify         = powernow_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = powernow_target,
        .get            = powernow_get,
 #ifdef CONFIG_X86_POWERNOW_K7_ACPI
@@ -716,7 +702,7 @@ static struct cpufreq_driver powernow_driver = {
        .init           = powernow_cpu_init,
        .exit           = powernow_cpu_exit,
        .name           = "powernow-k7",
-       .attr           = powernow_table_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static int __init powernow_init(void)
index 2344a9ed17f3c38017701e075c945df8ae0f4bdf..298beb742ebb1e62ef743c7e3392bd0f980bc3ba 100644 (file)
@@ -1053,17 +1053,6 @@ static int powernowk8_target(struct cpufreq_policy *pol,
        return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta);
 }
 
-/* Driver entry point to verify the policy and range of frequencies */
-static int powernowk8_verify(struct cpufreq_policy *pol)
-{
-       struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
-
-       if (!data)
-               return -EINVAL;
-
-       return cpufreq_frequency_table_verify(pol, data->powernow_table);
-}
-
 struct init_on_cpu {
        struct powernow_k8_data *data;
        int rc;
@@ -1152,11 +1141,8 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
        cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
        data->available_cores = pol->cpus;
 
-       pol->cur = find_khz_freq_from_fid(data->currfid);
-       pr_debug("policy current frequency %d kHz\n", pol->cur);
-
        /* min/max the cpu is capable of */
-       if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) {
+       if (cpufreq_table_validate_and_show(pol, data->powernow_table)) {
                printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n");
                powernow_k8_cpu_exit_acpi(data);
                kfree(data->powernow_table);
@@ -1164,8 +1150,6 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
                return -EINVAL;
        }
 
-       cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
-
        pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
                 data->currfid, data->currvid);
 
@@ -1227,20 +1211,15 @@ out:
        return khz;
 }
 
-static struct freq_attr *powernow_k8_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver cpufreq_amd64_driver = {
-       .verify         = powernowk8_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = powernowk8_target,
        .bios_limit     = acpi_processor_get_bios_limit,
        .init           = powernowk8_cpu_init,
        .exit           = powernowk8_cpu_exit,
        .get            = powernowk8_get,
        .name           = "powernow-k8",
-       .attr           = powernow_k8_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static void __request_acpi_cpufreq(void)
index 60e81d524ea8231666210ecca2c97407c0a5a279..a0f562ca292dd6124226329a1202ba00435c5a6a 100644 (file)
@@ -202,7 +202,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
        table[i].frequency = CPUFREQ_TABLE_END;
 
        /* set the min and max frequency properly */
-       ret = cpufreq_frequency_table_cpuinfo(policy, table);
+       ret = cpufreq_table_validate_and_show(policy, table);
        if (ret) {
                pr_err("invalid frequency table: %d\n", ret);
                goto err_nomem1;
@@ -217,9 +217,6 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
                per_cpu(cpu_data, i) = data;
 
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-       policy->cur = corenet_cpufreq_get_speed(policy->cpu);
-
-       cpufreq_frequency_table_get_attr(table, cpu);
        of_node_put(np);
 
        return 0;
@@ -253,14 +250,6 @@ static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy)
        return 0;
 }
 
-static int corenet_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       struct cpufreq_frequency_table *table =
-               per_cpu(cpu_data, policy->cpu)->table;
-
-       return cpufreq_frequency_table_verify(policy, table);
-}
-
 static int corenet_cpufreq_target(struct cpufreq_policy *policy,
                unsigned int target_freq, unsigned int relation)
 {
@@ -293,20 +282,15 @@ static int corenet_cpufreq_target(struct cpufreq_policy *policy,
        return ret;
 }
 
-static struct freq_attr *corenet_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
        .name           = "ppc_cpufreq",
        .flags          = CPUFREQ_CONST_LOOPS,
        .init           = corenet_cpufreq_cpu_init,
        .exit           = __exit_p(corenet_cpufreq_cpu_exit),
-       .verify         = corenet_cpufreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = corenet_cpufreq_target,
        .get            = corenet_cpufreq_get_speed,
-       .attr           = corenet_cpufreq_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static const struct of_device_id node_matches[] __initdata = {
index 2e448f0bbdc583465672e7ec40804f1d01467224..38540d1f59390b1306b286bcb36fb34a3980bbcb 100644 (file)
@@ -123,22 +123,9 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
        cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
 #endif
 
-       cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
-
        /* this ensures that policy->cpuinfo_min
         * and policy->cpuinfo_max are set correctly */
-       return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs);
-}
-
-static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
-}
-
-static int cbe_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, cbe_freqs);
+       return cpufreq_table_validate_and_show(policy, cbe_freqs);
 }
 
 static int cbe_cpufreq_target(struct cpufreq_policy *policy,
@@ -176,10 +163,10 @@ static int cbe_cpufreq_target(struct cpufreq_policy *policy,
 }
 
 static struct cpufreq_driver cbe_cpufreq_driver = {
-       .verify         = cbe_cpufreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = cbe_cpufreq_target,
        .init           = cbe_cpufreq_cpu_init,
-       .exit           = cbe_cpufreq_cpu_exit,
+       .exit           = cpufreq_generic_exit,
        .name           = "cbe-cpufreq",
        .flags          = CPUFREQ_CONST_LOOPS,
 };
index 8749eaf1879338ad331f587c9a716a501ab0c701..29aca574317bd02bb3932bb890da9726ccd8268d 100644 (file)
@@ -262,23 +262,6 @@ static u32 mdrefr_dri(unsigned int freq)
        return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32;
 }
 
-/* find a valid frequency point */
-static int pxa_verify_policy(struct cpufreq_policy *policy)
-{
-       struct cpufreq_frequency_table *pxa_freqs_table;
-       pxa_freqs_t *pxa_freqs;
-       int ret;
-
-       find_freq_tables(&pxa_freqs_table, &pxa_freqs);
-       ret = cpufreq_frequency_table_verify(policy, pxa_freqs_table);
-
-       if (freq_debug)
-               pr_debug("Verified CPU policy: %dKhz min to %dKhz max\n",
-                        policy->min, policy->max);
-
-       return ret;
-}
-
 static unsigned int pxa_cpufreq_get(unsigned int cpu)
 {
        return get_clk_frequency_khz(0);
@@ -414,8 +397,6 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
 
        /* set default policy and cpuinfo */
        policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
-       policy->cur = get_clk_frequency_khz(0);    /* current freq */
-       policy->min = policy->max = policy->cur;
 
        /* Generate pxa25x the run cpufreq_frequency_table struct */
        for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) {
@@ -453,10 +434,12 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
                find_freq_tables(&pxa255_freq_table, &pxa255_freqs);
                pr_info("PXA255 cpufreq using %s frequency table\n",
                        pxa255_turbo_table ? "turbo" : "run");
-               cpufreq_frequency_table_cpuinfo(policy, pxa255_freq_table);
+
+               cpufreq_table_validate_and_show(policy, pxa255_freq_table);
+       }
+       else if (cpu_is_pxa27x()) {
+               cpufreq_table_validate_and_show(policy, pxa27x_freq_table);
        }
-       else if (cpu_is_pxa27x())
-               cpufreq_frequency_table_cpuinfo(policy, pxa27x_freq_table);
 
        printk(KERN_INFO "PXA CPU frequency change support initialized\n");
 
@@ -464,9 +447,10 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
 }
 
 static struct cpufreq_driver pxa_cpufreq_driver = {
-       .verify = pxa_verify_policy,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = pxa_set_target,
        .init   = pxa_cpufreq_init,
+       .exit   = cpufreq_generic_exit,
        .get    = pxa_cpufreq_get,
        .name   = "PXA2xx",
 };
index d26306fb00d2e8a749ed044c816533f101defd11..47fbee49d6e594f2dfd6ca1e55ddebbe0b3a2f90 100644 (file)
@@ -108,7 +108,7 @@ static int setup_freqs_table(struct cpufreq_policy *policy,
        pxa3xx_freqs_num = num;
        pxa3xx_freqs_table = table;
 
-       return cpufreq_frequency_table_cpuinfo(policy, table);
+       return cpufreq_table_validate_and_show(policy, table);
 }
 
 static void __update_core_freq(struct pxa3xx_freq_info *info)
@@ -150,11 +150,6 @@ static void __update_bus_freq(struct pxa3xx_freq_info *info)
                cpu_relax();
 }
 
-static int pxa3xx_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, pxa3xx_freqs_table);
-}
-
 static unsigned int pxa3xx_cpufreq_get(unsigned int cpu)
 {
        return pxa3xx_get_clk_frequency_khz(0);
@@ -206,11 +201,10 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
        int ret = -EINVAL;
 
        /* set default policy and cpuinfo */
-       policy->cpuinfo.min_freq = 104000;
-       policy->cpuinfo.max_freq = (cpu_is_pxa320()) ? 806000 : 624000;
+       policy->min = policy->cpuinfo.min_freq = 104000;
+       policy->max = policy->cpuinfo.max_freq =
+               (cpu_is_pxa320()) ? 806000 : 624000;
        policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */
-       policy->max = pxa3xx_get_clk_frequency_khz(0);
-       policy->cur = policy->min = policy->max;
 
        if (cpu_is_pxa300() || cpu_is_pxa310())
                ret = setup_freqs_table(policy, pxa300_freqs,
@@ -230,9 +224,10 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
 }
 
 static struct cpufreq_driver pxa3xx_cpufreq_driver = {
-       .verify         = pxa3xx_cpufreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = pxa3xx_cpufreq_set,
        .init           = pxa3xx_cpufreq_init,
+       .exit           = cpufreq_generic_exit,
        .get            = pxa3xx_cpufreq_get,
        .name           = "pxa3xx-cpufreq",
 };
index 22dcb81ef9d0e9069b8fc4d49249713d59d41dd9..26a35d1371574f2e4abef7dd05beea6becb8f2a2 100644 (file)
@@ -87,16 +87,6 @@ static struct cpufreq_frequency_table s3c2450_freq_table[] = {
        { 0, CPUFREQ_TABLE_END },
 };
 
-static int s3c2416_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
-       struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
-
-       if (policy->cpu != 0)
-               return -EINVAL;
-
-       return cpufreq_frequency_table_verify(policy, s3c_freq->freq_table);
-}
-
 static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu)
 {
        struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
@@ -486,20 +476,14 @@ static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
                freq++;
        }
 
-       policy->cur = clk_get_rate(s3c_freq->armclk) / 1000;
-
        /* Datasheet says PLL stabalisation time must be at least 300us,
         * so but add some fudge. (reference in LOCKCON0 register description)
         */
-       policy->cpuinfo.transition_latency = (500 * 1000) +
-                                            s3c_freq->regulator_latency;
-
-       ret = cpufreq_frequency_table_cpuinfo(policy, s3c_freq->freq_table);
+       ret = cpufreq_generic_init(policy, s3c_freq->freq_table,
+                       (500 * 1000) + s3c_freq->regulator_latency);
        if (ret)
                goto err_freq_table;
 
-       cpufreq_frequency_table_get_attr(s3c_freq->freq_table, 0);
-
        register_reboot_notifier(&s3c2416_cpufreq_reboot_notifier);
 
        return 0;
@@ -518,19 +502,14 @@ err_hclk:
        return ret;
 }
 
-static struct freq_attr *s3c2416_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver s3c2416_cpufreq_driver = {
        .flags          = 0,
-       .verify         = s3c2416_cpufreq_verify_speed,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = s3c2416_cpufreq_set_target,
        .get            = s3c2416_cpufreq_get_speed,
        .init           = s3c2416_cpufreq_driver_init,
        .name           = "s3c2416",
-       .attr           = s3c2416_cpufreq_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static int __init s3c2416_cpufreq_init(void)
index b0f343fcb7eefdfe40b942b202398372a618df44..485088253358d1a5712c9f73cfea9036f65aa156 100644 (file)
@@ -373,23 +373,7 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
 
 static int s3c_cpufreq_init(struct cpufreq_policy *policy)
 {
-       printk(KERN_INFO "%s: initialising policy %p\n", __func__, policy);
-
-       if (policy->cpu != 0)
-               return -EINVAL;
-
-       policy->cur = s3c_cpufreq_get(0);
-       policy->min = policy->cpuinfo.min_freq = 0;
-       policy->max = policy->cpuinfo.max_freq = cpu_cur.info->max.fclk / 1000;
-       policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
-
-       /* feed the latency information from the cpu driver */
-       policy->cpuinfo.transition_latency = cpu_cur.info->latency;
-
-       if (ftab)
-               cpufreq_frequency_table_cpuinfo(policy, ftab);
-
-       return 0;
+       return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
 }
 
 static int __init s3c_cpufreq_initclks(void)
@@ -416,14 +400,6 @@ static int __init s3c_cpufreq_initclks(void)
        return 0;
 }
 
-static int s3c_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       if (policy->cpu != 0)
-               return -EINVAL;
-
-       return 0;
-}
-
 #ifdef CONFIG_PM
 static struct cpufreq_frequency_table suspend_pll;
 static unsigned int suspend_freq;
@@ -473,7 +449,6 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
 
 static struct cpufreq_driver s3c24xx_driver = {
        .flags          = CPUFREQ_STICKY,
-       .verify         = s3c_cpufreq_verify,
        .target         = s3c_cpufreq_target,
        .get            = s3c_cpufreq_get,
        .init           = s3c_cpufreq_init,
index 8a72b0c555f846da21d25d02d9f94d427a7796b5..461617332033de80fb7c1ff774bfd15106670830 100644 (file)
@@ -54,14 +54,6 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
 };
 #endif
 
-static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy)
-{
-       if (policy->cpu != 0)
-               return -EINVAL;
-
-       return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table);
-}
-
 static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
 {
        if (cpu != 0)
@@ -166,7 +158,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
                if (freq->frequency == CPUFREQ_ENTRY_INVALID)
                        continue;
 
-               dvfs = &s3c64xx_dvfs_table[freq->index];
+               dvfs = &s3c64xx_dvfs_table[freq->driver_data];
                found = 0;
 
                for (i = 0; i < count; i++) {
@@ -243,15 +235,12 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
                freq++;
        }
 
-       policy->cur = clk_get_rate(armclk) / 1000;
-
        /* Datasheet says PLL stabalisation time (if we were to use
         * the PLLs, which we don't currently) is ~300us worst case,
         * but add some fudge.
         */
-       policy->cpuinfo.transition_latency = (500 * 1000) + regulator_latency;
-
-       ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
+       ret = cpufreq_generic_init(policy, s3c64xx_freq_table,
+                       (500 * 1000) + regulator_latency);
        if (ret != 0) {
                pr_err("Failed to configure frequency table: %d\n",
                       ret);
@@ -264,7 +253,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
 
 static struct cpufreq_driver s3c64xx_cpufreq_driver = {
        .flags          = 0,
-       .verify         = s3c64xx_cpufreq_verify_speed,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = s3c64xx_cpufreq_set_target,
        .get            = s3c64xx_cpufreq_get_speed,
        .init           = s3c64xx_cpufreq_driver_init,
index 5c77570737937c298577303904baa60823fdce60..600b4f472e28432ad9ac6b0bab2b1a3d5091104f 100644 (file)
@@ -174,14 +174,6 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
        __raw_writel(tmp1, reg);
 }
 
-static int s5pv210_verify_speed(struct cpufreq_policy *policy)
-{
-       if (policy->cpu)
-               return -EINVAL;
-
-       return cpufreq_frequency_table_verify(policy, s5pv210_freq_table);
-}
-
 static unsigned int s5pv210_getspeed(unsigned int cpu)
 {
        if (cpu)
@@ -551,13 +543,7 @@ static int __init s5pv210_cpu_init(struct cpufreq_policy *policy)
        s5pv210_dram_conf[1].refresh = (__raw_readl(S5P_VA_DMC1 + 0x30) * 1000);
        s5pv210_dram_conf[1].freq = clk_get_rate(dmc1_clk);
 
-       policy->cur = policy->min = policy->max = s5pv210_getspeed(0);
-
-       cpufreq_frequency_table_get_attr(s5pv210_freq_table, policy->cpu);
-
-       policy->cpuinfo.transition_latency = 40000;
-
-       return cpufreq_frequency_table_cpuinfo(policy, s5pv210_freq_table);
+       return cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
 
 out_dmc1:
        clk_put(dmc0_clk);
@@ -605,7 +591,7 @@ static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
 
 static struct cpufreq_driver s5pv210_driver = {
        .flags          = CPUFREQ_STICKY,
-       .verify         = s5pv210_verify_speed,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = s5pv210_target,
        .get            = s5pv210_getspeed,
        .init           = s5pv210_cpu_init,
index cff18e87ca58721cc9ae13e432816d712d7d0c2d..b282cea47e628d67551c2366b0d494dda781cff7 100644 (file)
@@ -218,18 +218,12 @@ static int sa1100_target(struct cpufreq_policy *policy,
 
 static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
 {
-       if (policy->cpu != 0)
-               return -EINVAL;
-       policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
-       policy->cpuinfo.min_freq = 59000;
-       policy->cpuinfo.max_freq = 287000;
-       policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-       return 0;
+       return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL);
 }
 
 static struct cpufreq_driver sa1100_driver __refdata = {
        .flags          = CPUFREQ_STICKY,
-       .verify         = sa11x0_verify_speed,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = sa1100_target,
        .get            = sa11x0_getspeed,
        .init           = sa1100_cpu_init,
index 39c90b6f42865eb8f8950ce0f7d7ce84d463a9ee..bca04c0b4a734369ef5281a6acebb575a2940da1 100644 (file)
@@ -332,20 +332,14 @@ static int sa1110_target(struct cpufreq_policy *policy,
 
 static int __init sa1110_cpu_init(struct cpufreq_policy *policy)
 {
-       if (policy->cpu != 0)
-               return -EINVAL;
-       policy->cur = policy->min = policy->max = sa11x0_getspeed(0);
-       policy->cpuinfo.min_freq = 59000;
-       policy->cpuinfo.max_freq = 287000;
-       policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-       return 0;
+       return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL);
 }
 
 /* sa1110_driver needs __refdata because it must remain after init registers
  * it with cpufreq_register_driver() */
 static struct cpufreq_driver sa1110_driver __refdata = {
        .flags          = CPUFREQ_STICKY,
-       .verify         = sa11x0_verify_speed,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = sa1110_target,
        .get            = sa11x0_getspeed,
        .init           = sa1110_cpu_init,
index d6f6c6f4efa76ac6f82a97ddeff3292a06cfd18c..9047ab1ca014b9e4b735abc60f244a54d1a74733 100644 (file)
@@ -78,11 +78,6 @@ static void sc520_freq_set_cpu_state(struct cpufreq_policy *policy,
        cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
 };
 
-static int sc520_freq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]);
-}
-
 static int sc520_freq_target(struct cpufreq_policy *policy,
                            unsigned int target_freq,
                            unsigned int relation)
@@ -106,7 +101,6 @@ static int sc520_freq_target(struct cpufreq_policy *policy,
 static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
 {
        struct cpuinfo_x86 *c = &cpu_data(0);
-       int result;
 
        /* capability check */
        if (c->x86_vendor != X86_VENDOR_AMD ||
@@ -115,39 +109,19 @@ static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
 
        /* cpuinfo and default policy values */
        policy->cpuinfo.transition_latency = 1000000; /* 1ms */
-       policy->cur = sc520_freq_get_cpu_frequency(0);
-
-       result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table);
-       if (result)
-               return result;
-
-       cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu);
-
-       return 0;
-}
-
 
-static int sc520_freq_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
+       return cpufreq_table_validate_and_show(policy, sc520_freq_table);
 }
 
 
-static struct freq_attr *sc520_freq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
-
 static struct cpufreq_driver sc520_freq_driver = {
        .get    = sc520_freq_get_cpu_frequency,
-       .verify = sc520_freq_verify,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = sc520_freq_target,
        .init   = sc520_freq_cpu_init,
-       .exit   = sc520_freq_cpu_exit,
+       .exit   = cpufreq_generic_exit,
        .name   = "sc520_freq",
-       .attr   = sc520_freq_attr,
+       .attr   = cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id sc520_ids[] = {
index ffc6d24b0cfbed764db6b6c8e2015330e6fb9fbd..387af12503a64e43f15d8fd0fa1d6e108a741f62 100644 (file)
@@ -87,15 +87,12 @@ static int sh_cpufreq_verify(struct cpufreq_policy *policy)
        if (freq_table)
                return cpufreq_frequency_table_verify(policy, freq_table);
 
-       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-                                    policy->cpuinfo.max_freq);
+       cpufreq_verify_within_cpu_limits(policy);
 
        policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000;
        policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000;
 
-       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
-                                    policy->cpuinfo.max_freq);
-
+       cpufreq_verify_within_cpu_limits(policy);
        return 0;
 }
 
@@ -114,15 +111,13 @@ static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy)
                return PTR_ERR(cpuclk);
        }
 
-       policy->cur = sh_cpufreq_get(cpu);
-
        freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
        if (freq_table) {
                int result;
 
-               result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
-               if (!result)
-                       cpufreq_frequency_table_get_attr(freq_table, cpu);
+               result = cpufreq_table_validate_and_show(policy, freq_table);
+               if (result)
+                       return result;
        } else {
                dev_notice(dev, "no frequency table found, falling back "
                           "to rate rounding.\n");
@@ -154,11 +149,6 @@ static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy)
        return 0;
 }
 
-static struct freq_attr *sh_freq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver sh_cpufreq_driver = {
        .name           = "sh",
        .get            = sh_cpufreq_get,
@@ -166,7 +156,7 @@ static struct cpufreq_driver sh_cpufreq_driver = {
        .verify         = sh_cpufreq_verify,
        .init           = sh_cpufreq_cpu_init,
        .exit           = sh_cpufreq_cpu_exit,
-       .attr           = sh_freq_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static int __init sh_cpufreq_module_init(void)
index cf5bc2ca16fa11f4155d757c6d5219b6691a6b81..291688c1da9acb5883fe3c2339ebbdf216a81696 100644 (file)
@@ -295,12 +295,6 @@ static int us2e_freq_target(struct cpufreq_policy *policy,
        return 0;
 }
 
-static int us2e_freq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy,
-                                             &us2e_freq_table[policy->cpu].table[0]);
-}
-
 static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
 {
        unsigned int cpu = policy->cpu;
@@ -324,13 +318,15 @@ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
        policy->cpuinfo.transition_latency = 0;
        policy->cur = clock_tick;
 
-       return cpufreq_frequency_table_cpuinfo(policy, table);
+       return cpufreq_table_validate_and_show(policy, table);
 }
 
 static int us2e_freq_cpu_exit(struct cpufreq_policy *policy)
 {
-       if (cpufreq_us2e_driver)
+       if (cpufreq_us2e_driver) {
+               cpufreq_frequency_table_put_attr(policy->cpu);
                us2e_set_cpu_divider_index(policy, 0);
+       }
 
        return 0;
 }
@@ -361,7 +357,7 @@ static int __init us2e_freq_init(void)
                        goto err_out;
 
                driver->init = us2e_freq_cpu_init;
-               driver->verify = us2e_freq_verify;
+               driver->verify = cpufreq_generic_frequency_table_verify;
                driver->target = us2e_freq_target;
                driver->get = us2e_freq_get;
                driver->exit = us2e_freq_cpu_exit;
index ac76b489979d48cac1278525ab861f46b69f8c86..9b3dbd31362efaa3f5b3a1dcffe5a34c16f42f49 100644 (file)
@@ -156,12 +156,6 @@ static int us3_freq_target(struct cpufreq_policy *policy,
        return 0;
 }
 
-static int us3_freq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy,
-                                             &us3_freq_table[policy->cpu].table[0]);
-}
-
 static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
 {
        unsigned int cpu = policy->cpu;
@@ -181,13 +175,15 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
        policy->cpuinfo.transition_latency = 0;
        policy->cur = clock_tick;
 
-       return cpufreq_frequency_table_cpuinfo(policy, table);
+       return cpufreq_table_validate_and_show(policy, table);
 }
 
 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
 {
-       if (cpufreq_us3_driver)
+       if (cpufreq_us3_driver) {
+               cpufreq_frequency_table_put_attr(policy->cpu);
                us3_set_cpu_divider_index(policy, 0);
+       }
 
        return 0;
 }
@@ -222,7 +218,7 @@ static int __init us3_freq_init(void)
                        goto err_out;
 
                driver->init = us3_freq_cpu_init;
-               driver->verify = us3_freq_verify;
+               driver->verify = cpufreq_generic_frequency_table_verify;
                driver->target = us3_freq_target;
                driver->get = us3_freq_get;
                driver->exit = us3_freq_cpu_exit;
index 3f418166ce02b8fc57a664f3d870bfb8508f3018..8841366a2068e2c41898daf982997820a10ed725 100644 (file)
@@ -30,11 +30,6 @@ static struct {
        u32 cnt;
 } spear_cpufreq;
 
-static int spear_cpufreq_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl);
-}
-
 static unsigned int spear_cpufreq_get(unsigned int cpu)
 {
        return clk_get_rate(spear_cpufreq.clk) / 1000;
@@ -176,43 +171,19 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
 
 static int spear_cpufreq_init(struct cpufreq_policy *policy)
 {
-       int ret;
-
-       ret = cpufreq_frequency_table_cpuinfo(policy, spear_cpufreq.freq_tbl);
-       if (ret) {
-               pr_err("cpufreq_frequency_table_cpuinfo() failed");
-               return ret;
-       }
-
-       cpufreq_frequency_table_get_attr(spear_cpufreq.freq_tbl, policy->cpu);
-       policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency;
-       policy->cur = spear_cpufreq_get(0);
-
-       cpumask_setall(policy->cpus);
-
-       return 0;
-}
-
-static int spear_cpufreq_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
+       return cpufreq_generic_init(policy, spear_cpufreq.freq_tbl,
+                       spear_cpufreq.transition_latency);
 }
 
-static struct freq_attr *spear_cpufreq_attr[] = {
-        &cpufreq_freq_attr_scaling_available_freqs,
-        NULL,
-};
-
 static struct cpufreq_driver spear_cpufreq_driver = {
        .name           = "cpufreq-spear",
        .flags          = CPUFREQ_STICKY,
-       .verify         = spear_cpufreq_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = spear_cpufreq_target,
        .get            = spear_cpufreq_get,
        .init           = spear_cpufreq_init,
-       .exit           = spear_cpufreq_exit,
-       .attr           = spear_cpufreq_attr,
+       .exit           = cpufreq_generic_exit,
+       .attr           = cpufreq_generic_attr,
 };
 
 static int spear_cpufreq_driver_init(void)
index f897d510584285bea376474ca6a3198eeef7d74f..25e45f89acac1b550c227c969e9fe9ebf659b077 100644 (file)
@@ -343,9 +343,7 @@ static unsigned int get_cur_freq(unsigned int cpu)
 static int centrino_cpu_init(struct cpufreq_policy *policy)
 {
        struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
-       unsigned freq;
        unsigned l, h;
-       int ret;
        int i;
 
        /* Only Intel makes Enhanced Speedstep-capable CPUs */
@@ -373,9 +371,8 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
                return -ENODEV;
        }
 
-       if (centrino_cpu_init_table(policy)) {
+       if (centrino_cpu_init_table(policy))
                return -ENODEV;
-       }
 
        /* Check to see if Enhanced SpeedStep is enabled, and try to
           enable it if not. */
@@ -395,22 +392,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
                }
        }
 
-       freq = get_cur_freq(policy->cpu);
        policy->cpuinfo.transition_latency = 10000;
                                                /* 10uS transition latency */
-       policy->cur = freq;
-
-       pr_debug("centrino_cpu_init: cur=%dkHz\n", policy->cur);
 
-       ret = cpufreq_frequency_table_cpuinfo(policy,
+       return cpufreq_table_validate_and_show(policy,
                per_cpu(centrino_model, policy->cpu)->op_points);
-       if (ret)
-               return (ret);
-
-       cpufreq_frequency_table_get_attr(
-               per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu);
-
-       return 0;
 }
 
 static int centrino_cpu_exit(struct cpufreq_policy *policy)
@@ -427,19 +413,6 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
        return 0;
 }
 
-/**
- * centrino_verify - verifies a new CPUFreq policy
- * @policy: new policy
- *
- * Limit must be within this model's frequency range at least one
- * border included.
- */
-static int centrino_verify (struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy,
-                       per_cpu(centrino_model, policy->cpu)->op_points);
-}
-
 /**
  * centrino_setpolicy - set a new CPUFreq policy
  * @policy: new policy
@@ -561,20 +534,15 @@ out:
        return retval;
 }
 
-static struct freq_attr* centrino_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver centrino_driver = {
        .name           = "centrino", /* should be speedstep-centrino,
                                         but there's a 16 char limit */
        .init           = centrino_cpu_init,
        .exit           = centrino_cpu_exit,
-       .verify         = centrino_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = centrino_target,
        .get            = get_cur_freq,
-       .attr           = centrino_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 /*
index 5355abb69afc972d88c9f509924d3e779fa0cdac..1a8b01bd0feca15998092d539a274cce543216ad 100644 (file)
@@ -289,18 +289,6 @@ static int speedstep_target(struct cpufreq_policy *policy,
 }
 
 
-/**
- * speedstep_verify - verifies a new CPUFreq policy
- * @policy: new policy
- *
- * Limit must be within speedstep_low_freq and speedstep_high_freq, with
- * at least one border included.
- */
-static int speedstep_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
-}
-
 struct get_freqs {
        struct cpufreq_policy *policy;
        int ret;
@@ -320,8 +308,7 @@ static void get_freqs_on_cpu(void *_get_freqs)
 
 static int speedstep_cpu_init(struct cpufreq_policy *policy)
 {
-       int result;
-       unsigned int policy_cpu, speed;
+       unsigned int policy_cpu;
        struct get_freqs gf;
 
        /* only run on CPU to be set, or on its sibling */
@@ -336,49 +323,18 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
        if (gf.ret)
                return gf.ret;
 
-       /* get current speed setting */
-       speed = speedstep_get(policy_cpu);
-       if (!speed)
-               return -EIO;
-
-       pr_debug("currently at %s speed setting - %i MHz\n",
-               (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
-               ? "low" : "high",
-               (speed / 1000));
-
-       /* cpuinfo and default policy values */
-       policy->cur = speed;
-
-       result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
-       if (result)
-               return result;
-
-       cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
-
-       return 0;
-}
-
-
-static int speedstep_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
+       return cpufreq_table_validate_and_show(policy, speedstep_freqs);
 }
 
-static struct freq_attr *speedstep_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 
 static struct cpufreq_driver speedstep_driver = {
        .name   = "speedstep-ich",
-       .verify = speedstep_verify,
+       .verify = cpufreq_generic_frequency_table_verify,
        .target = speedstep_target,
        .init   = speedstep_cpu_init,
-       .exit   = speedstep_cpu_exit,
+       .exit   = cpufreq_generic_exit,
        .get    = speedstep_get,
-       .attr   = speedstep_attr,
+       .attr   = cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id ss_smi_ids[] = {
index abfba4f731ebdc4212696732a765b56083bc8252..a02b649c9647ef04432a811abb0ade749eb1a823 100644 (file)
@@ -264,23 +264,9 @@ static int speedstep_target(struct cpufreq_policy *policy,
 }
 
 
-/**
- * speedstep_verify - verifies a new CPUFreq policy
- * @policy: new policy
- *
- * Limit must be within speedstep_low_freq and speedstep_high_freq, with
- * at least one border included.
- */
-static int speedstep_verify(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]);
-}
-
-
 static int speedstep_cpu_init(struct cpufreq_policy *policy)
 {
        int result;
-       unsigned int speed, state;
        unsigned int *low, *high;
 
        /* capability check */
@@ -316,32 +302,8 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
                        pr_debug("workaround worked.\n");
        }
 
-       /* get current speed setting */
-       state = speedstep_get_state();
-       speed = speedstep_freqs[state].frequency;
-
-       pr_debug("currently at %s speed setting - %i MHz\n",
-               (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency)
-               ? "low" : "high",
-               (speed / 1000));
-
-       /* cpuinfo and default policy values */
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
-       policy->cur = speed;
-
-       result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs);
-       if (result)
-               return result;
-
-       cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu);
-
-       return 0;
-}
-
-static int speedstep_cpu_exit(struct cpufreq_policy *policy)
-{
-       cpufreq_frequency_table_put_attr(policy->cpu);
-       return 0;
+       return cpufreq_table_validate_and_show(policy, speedstep_freqs);
 }
 
 static unsigned int speedstep_get(unsigned int cpu)
@@ -362,20 +324,15 @@ static int speedstep_resume(struct cpufreq_policy *policy)
        return result;
 }
 
-static struct freq_attr *speedstep_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver speedstep_driver = {
        .name           = "speedstep-smi",
-       .verify         = speedstep_verify,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = speedstep_target,
        .init           = speedstep_cpu_init,
-       .exit           = speedstep_cpu_exit,
+       .exit           = cpufreq_generic_exit,
        .get            = speedstep_get,
        .resume         = speedstep_resume,
-       .attr           = speedstep_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static const struct x86_cpu_id ss_smi_ids[] = {
index a7b876fdc1d8a5b672346233d7f7b7ce0a7c962f..32483ef63d5305c064a97ff5aa1280ed1203ff87 100644 (file)
@@ -51,11 +51,6 @@ static unsigned long target_cpu_speed[NUM_CPUS];
 static DEFINE_MUTEX(tegra_cpu_lock);
 static bool is_suspended;
 
-static int tegra_verify_speed(struct cpufreq_policy *policy)
-{
-       return cpufreq_frequency_table_verify(policy, freq_table);
-}
-
 static unsigned int tegra_getspeed(unsigned int cpu)
 {
        unsigned long rate;
@@ -209,21 +204,23 @@ static struct notifier_block tegra_cpu_pm_notifier = {
 
 static int tegra_cpu_init(struct cpufreq_policy *policy)
 {
+       int ret;
+
        if (policy->cpu >= NUM_CPUS)
                return -EINVAL;
 
        clk_prepare_enable(emc_clk);
        clk_prepare_enable(cpu_clk);
 
-       cpufreq_frequency_table_cpuinfo(policy, freq_table);
-       cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
-       policy->cur = tegra_getspeed(policy->cpu);
-       target_cpu_speed[policy->cpu] = policy->cur;
+       target_cpu_speed[policy->cpu] = tegra_getspeed(policy->cpu);
 
        /* FIXME: what's the actual transition time? */
-       policy->cpuinfo.transition_latency = 300 * 1000;
-
-       cpumask_copy(policy->cpus, cpu_possible_mask);
+       ret = cpufreq_generic_init(policy, freq_table, 300 * 1000);
+       if (ret) {
+               clk_disable_unprepare(cpu_clk);
+               clk_disable_unprepare(emc_clk);
+               return ret;
+       }
 
        if (policy->cpu == 0)
                register_pm_notifier(&tegra_cpu_pm_notifier);
@@ -233,24 +230,20 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
 
 static int tegra_cpu_exit(struct cpufreq_policy *policy)
 {
-       cpufreq_frequency_table_cpuinfo(policy, freq_table);
+       cpufreq_frequency_table_put_attr(policy->cpu);
+       clk_disable_unprepare(cpu_clk);
        clk_disable_unprepare(emc_clk);
        return 0;
 }
 
-static struct freq_attr *tegra_cpufreq_attr[] = {
-       &cpufreq_freq_attr_scaling_available_freqs,
-       NULL,
-};
-
 static struct cpufreq_driver tegra_cpufreq_driver = {
-       .verify         = tegra_verify_speed,
+       .verify         = cpufreq_generic_frequency_table_verify,
        .target         = tegra_target,
        .get            = tegra_getspeed,
        .init           = tegra_cpu_init,
        .exit           = tegra_cpu_exit,
        .name           = "tegra",
-       .attr           = tegra_cpufreq_attr,
+       .attr           = cpufreq_generic_attr,
 };
 
 static int __init tegra_cpufreq_init(void)
index b225f04d8ae5c55bcfee26f644812d3d1100cc04..653ae2955b555ad63607d84a4ebeb27be205e644 100644 (file)
@@ -29,9 +29,7 @@ static int ucv2_verify_speed(struct cpufreq_policy *policy)
        if (policy->cpu)
                return -EINVAL;
 
-       cpufreq_verify_within_limits(policy,
-                       policy->cpuinfo.min_freq, policy->cpuinfo.max_freq);
-
+       cpufreq_verify_within_cpu_limits(policy);
        return 0;
 }
 
@@ -68,7 +66,6 @@ static int __init ucv2_cpu_init(struct cpufreq_policy *policy)
 {
        if (policy->cpu != 0)
                return -EINVAL;
-       policy->cur = ucv2_getspeed(0);
        policy->min = policy->cpuinfo.min_freq = 250000;
        policy->max = policy->cpuinfo.max_freq = 1000000;
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
index 8e366032230893dc35c0d03f55138a20e0a33f07..f23bd75426cdfcac5da246ef6f23adef0a15b0da 100644 (file)
@@ -2,6 +2,17 @@
 # ARM CPU Idle drivers
 #
 
+config ARM_BIG_LITTLE_CPUIDLE
+       bool "Support for ARM big.LITTLE processors"
+       depends on ARCH_VEXPRESS_TC2_PM
+       select ARM_CPU_SUSPEND
+       select CPU_IDLE_MULTIPLE_DRIVERS
+       help
+         Select this option to enable CPU idle driver for big.LITTLE based
+         ARM systems. Driver manages CPUs coordination through MCPM and
+         define different C-states for little and big cores through the
+         multiple CPU idle drivers infrastructure.
+
 config ARM_HIGHBANK_CPUIDLE
        bool "CPU Idle Driver for Calxeda processors"
        depends on ARCH_HIGHBANK
@@ -27,13 +38,9 @@ config ARM_U8500_CPUIDLE
        help
          Select this to enable cpuidle for ST-E u8500 processors
 
-config CPU_IDLE_BIG_LITTLE
-       bool "Support for ARM big.LITTLE processors"
-       depends on ARCH_VEXPRESS_TC2_PM
-       select ARM_CPU_SUSPEND
-       select CPU_IDLE_MULTIPLE_DRIVERS
+config ARM_AT91_CPUIDLE
+       bool "Cpu Idle Driver for the AT91 processors"
+       default y
+       depends on ARCH_AT91
        help
-         Select this option to enable CPU idle driver for big.LITTLE based
-         ARM systems. Driver manages CPUs coordination through MCPM and
-         define different C-states for little and big cores through the
-         multiple CPU idle drivers infrastructure.
+         Select this to enable cpuidle for AT91 processors
index cea5ef58876d8202521545008afb9026565fbe7e..527be28e5c1e4e785032a83027899fdf47103b21 100644 (file)
@@ -7,8 +7,9 @@ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
 
 ##################################################################################
 # ARM SoC drivers
+obj-$(CONFIG_ARM_BIG_LITTLE_CPUIDLE)   += cpuidle-big_little.o
 obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE)     += cpuidle-calxeda.o
 obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE)     += cpuidle-kirkwood.o
 obj-$(CONFIG_ARM_ZYNQ_CPUIDLE)         += cpuidle-zynq.o
 obj-$(CONFIG_ARM_U8500_CPUIDLE)         += cpuidle-ux500.o
-obj-$(CONFIG_CPU_IDLE_BIG_LITTLE)      += cpuidle-big_little.o
+obj-$(CONFIG_ARM_AT91_CPUIDLE)          += cpuidle-at91.o
similarity index 79%
rename from arch/arm/mach-at91/cpuidle.c
rename to drivers/cpuidle/cpuidle-at91.c
index 4ec6a6d9b9be7e7e0e853ad17a57f2ae2d7b986b..a0774370c6bc41acd38c410beedd55e4592558d0 100644 (file)
 #include <linux/export.h>
 #include <asm/proc-fns.h>
 #include <asm/cpuidle.h>
-#include <mach/cpu.h>
-
-#include "pm.h"
 
 #define AT91_MAX_STATES        2
 
+static void (*at91_standby)(void);
+
 /* Actual code that puts the SoC in different idle states */
 static int at91_enter_idle(struct cpuidle_device *dev,
                        struct cpuidle_driver *drv,
                               int index)
 {
-       if (cpu_is_at91rm9200())
-               at91rm9200_standby();
-       else if (cpu_is_at91sam9g45())
-               at91sam9g45_standby();
-       else if (cpu_is_at91sam9263())
-               at91sam9263_standby();
-       else
-               at91sam9_standby();
-
+       at91_standby();
        return index;
 }
 
@@ -60,9 +51,19 @@ static struct cpuidle_driver at91_idle_driver = {
 };
 
 /* Initialize CPU idle by registering the idle states */
-static int __init at91_init_cpuidle(void)
+static int at91_cpuidle_probe(struct platform_device *dev)
 {
+       at91_standby = (void *)(dev->dev.platform_data);
+       
        return cpuidle_register(&at91_idle_driver, NULL);
 }
 
-device_initcall(at91_init_cpuidle);
+static struct platform_driver at91_cpuidle_driver = {
+       .driver = {
+               .name = "cpuidle-at91",
+               .owner = THIS_MODULE,
+       },
+       .probe = at91_cpuidle_probe,
+};
+
+module_platform_driver(at91_cpuidle_driver);
index e0564652af35114f2d73227b68bf781758975629..5e35804b1a952393dd84b20aacaf96442eac4d62 100644 (file)
@@ -111,7 +111,7 @@ static struct cpuidle_driver ux500_idle_driver = {
        .state_count = 2,
 };
 
-static int __init dbx500_cpuidle_probe(struct platform_device *pdev)
+static int dbx500_cpuidle_probe(struct platform_device *pdev)
 {
        /* Configure wake up reasons */
        prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
index 38e03a18359156077e17d8f5b35671de47bf6d8e..aded759280282b08e7fb3e9025f130240b9f6074 100644 (file)
@@ -28,7 +28,7 @@
 #include <linux/init.h>
 #include <linux/cpu_pm.h>
 #include <linux/cpuidle.h>
-#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <asm/proc-fns.h>
 #include <asm/cpuidle.h>
 
@@ -70,14 +70,19 @@ static struct cpuidle_driver zynq_idle_driver = {
 };
 
 /* Initialize CPU idle by registering the idle states */
-static int __init zynq_cpuidle_init(void)
+static int zynq_cpuidle_probe(struct platform_device *pdev)
 {
-       if (!of_machine_is_compatible("xlnx,zynq-7000"))
-               return -ENODEV;
-
        pr_info("Xilinx Zynq CpuIdle Driver started\n");
 
        return cpuidle_register(&zynq_idle_driver, NULL);
 }
 
-device_initcall(zynq_cpuidle_init);
+static struct platform_driver zynq_cpuidle_driver = {
+       .driver = {
+               .name = "cpuidle-zynq",
+               .owner = THIS_MODULE,
+       },
+       .probe = zynq_cpuidle_probe,
+};
+
+module_platform_driver(zynq_cpuidle_driver);
index 7c63b72ecd750f381fef66e8baccc7955351b1b3..86a0d415b9a777d8bdf636a89cbfe0e37e27794f 100644 (file)
@@ -2209,6 +2209,13 @@ static int __init caam_algapi_init(void)
        priv = dev_get_drvdata(ctrldev);
        of_node_put(dev_node);
 
+       /*
+        * If priv is NULL, it's probably because the caam driver wasn't
+        * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+        */
+       if (!priv)
+               return -ENODEV;
+
        INIT_LIST_HEAD(&priv->alg_list);
 
        atomic_set(&priv->tfm_count, -1);
index e732bd962e98cc715db6463c7587dbb2511707e3..ca6218eee460f6b71e99bda6475fdde3e8af9e19 100644 (file)
@@ -1833,6 +1833,13 @@ static int __init caam_algapi_hash_init(void)
        priv = dev_get_drvdata(ctrldev);
        of_node_put(dev_node);
 
+       /*
+        * If priv is NULL, it's probably because the caam driver wasn't
+        * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+        */
+       if (!priv)
+               return -ENODEV;
+
        INIT_LIST_HEAD(&priv->hash_list);
 
        atomic_set(&priv->tfm_count, -1);
index d1939a9539c06a4204a26b65d3d743d46c2346d3..588ad2288f82a0b328a6addc27b12d2984147a93 100644 (file)
@@ -298,6 +298,13 @@ static int __init caam_rng_init(void)
        priv = dev_get_drvdata(ctrldev);
        of_node_put(dev_node);
 
+       /*
+        * If priv is NULL, it's probably because the caam driver wasn't
+        * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+        */
+       if (!priv)
+               return -ENODEV;
+
        caam_init_rng(&rng_ctx, priv->jrdev[0]);
 
        dev_info(priv->jrdev[0], "registering rng-caam\n");
index b010d42a18035fa48a8797dc802267f70db531d2..26438cd126859e75638c893ad6e959c3baf9e9ef 100644 (file)
 #include "error.h"
 #include "ctrl.h"
 
-static int caam_remove(struct platform_device *pdev)
-{
-       struct device *ctrldev;
-       struct caam_drv_private *ctrlpriv;
-       struct caam_drv_private_jr *jrpriv;
-       struct caam_full __iomem *topregs;
-       int ring, ret = 0;
-
-       ctrldev = &pdev->dev;
-       ctrlpriv = dev_get_drvdata(ctrldev);
-       topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
-
-       /* shut down JobRs */
-       for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
-               ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
-               jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
-               irq_dispose_mapping(jrpriv->irq);
-       }
-
-       /* Shut down debug views */
-#ifdef CONFIG_DEBUG_FS
-       debugfs_remove_recursive(ctrlpriv->dfs_root);
-#endif
-
-       /* Unmap controller region */
-       iounmap(&topregs->ctrl);
-
-       kfree(ctrlpriv->jrdev);
-       kfree(ctrlpriv);
-
-       return ret;
-}
-
 /*
  * Descriptor to instantiate RNG State Handle 0 in normal mode and
  * load the JDKEK, TDKEK and TDSK registers
  */
-static void build_instantiation_desc(u32 *desc)
+static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
 {
-       u32 *jump_cmd;
+       u32 *jump_cmd, op_flags;
 
        init_job_desc(desc, 0);
 
+       op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+                       (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
+
        /* INIT RNG in non-test mode */
-       append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
-                        OP_ALG_AS_INIT);
+       append_operation(desc, op_flags);
+
+       if (!handle && do_sk) {
+               /*
+                * For SH0, Secure Keys must be generated as well
+                */
+
+               /* wait for done */
+               jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
+               set_jump_tgt_here(desc, jump_cmd);
+
+               /*
+                * load 1 to clear written reg:
+                * resets the done interrrupt and returns the RNG to idle.
+                */
+               append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
+
+               /* Initialize State Handle  */
+               append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
+                                OP_ALG_AAI_RNG4_SK);
+       }
 
-       /* wait for done */
-       jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
-       set_jump_tgt_here(desc, jump_cmd);
+       append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
+}
 
-       /*
-        * load 1 to clear written reg:
-        * resets the done interrupt and returns the RNG to idle.
-        */
-       append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
+/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
+static void build_deinstantiation_desc(u32 *desc, int handle)
+{
+       init_job_desc(desc, 0);
 
-       /* generate secure keys (non-test) */
+       /* Uninstantiate State Handle 0 */
        append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
-                        OP_ALG_RNG4_SK);
+                        (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
+
+       append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
 }
 
-static int instantiate_rng(struct device *ctrldev)
+/*
+ * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
+ *                       the software (no JR/QI used).
+ * @ctrldev - pointer to device
+ * @status - descriptor status, after being run
+ *
+ * Return: - 0 if no error occurred
+ *        - -ENODEV if the DECO couldn't be acquired
+ *        - -EAGAIN if an error occurred while executing the descriptor
+ */
+static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
+                                       u32 *status)
 {
        struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
        struct caam_full __iomem *topregs;
        unsigned int timeout = 100000;
-       u32 *desc;
-       int i, ret = 0;
-
-       desc = kmalloc(CAAM_CMD_SZ * 6, GFP_KERNEL | GFP_DMA);
-       if (!desc) {
-               dev_err(ctrldev, "can't allocate RNG init descriptor memory\n");
-               return -ENOMEM;
-       }
-       build_instantiation_desc(desc);
+       u32 deco_dbg_reg, flags;
+       int i;
 
        /* Set the bit to request direct access to DECO0 */
        topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
@@ -100,36 +93,221 @@ static int instantiate_rng(struct device *ctrldev)
 
        if (!timeout) {
                dev_err(ctrldev, "failed to acquire DECO 0\n");
-               ret = -EIO;
-               goto out;
+               clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
+               return -ENODEV;
        }
 
        for (i = 0; i < desc_len(desc); i++)
-               topregs->deco.descbuf[i] = *(desc + i);
+               wr_reg32(&topregs->deco.descbuf[i], *(desc + i));
 
-       wr_reg32(&topregs->deco.jr_ctl_hi, DECO_JQCR_WHL | DECO_JQCR_FOUR);
+       flags = DECO_JQCR_WHL;
+       /*
+        * If the descriptor length is longer than 4 words, then the
+        * FOUR bit in JRCTRL register must be set.
+        */
+       if (desc_len(desc) >= 4)
+               flags |= DECO_JQCR_FOUR;
+
+       /* Instruct the DECO to execute it */
+       wr_reg32(&topregs->deco.jr_ctl_hi, flags);
 
        timeout = 10000000;
-       while ((rd_reg32(&topregs->deco.desc_dbg) & DECO_DBG_VALID) &&
-                                                                --timeout)
+       do {
+               deco_dbg_reg = rd_reg32(&topregs->deco.desc_dbg);
+               /*
+                * If an error occured in the descriptor, then
+                * the DECO status field will be set to 0x0D
+                */
+               if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
+                   DESC_DBG_DECO_STAT_HOST_ERR)
+                       break;
                cpu_relax();
+       } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
 
-       if (!timeout) {
-               dev_err(ctrldev, "failed to instantiate RNG\n");
-               ret = -EIO;
-       }
+       *status = rd_reg32(&topregs->deco.op_status_hi) &
+                 DECO_OP_STATUS_HI_ERR_MASK;
 
+       /* Mark the DECO as free */
        clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
-out:
+
+       if (!timeout)
+               return -EAGAIN;
+
+       return 0;
+}
+
+/*
+ * instantiate_rng - builds and executes a descriptor on DECO0,
+ *                  which initializes the RNG block.
+ * @ctrldev - pointer to device
+ * @state_handle_mask - bitmask containing the instantiation status
+ *                     for the RNG4 state handles which exist in
+ *                     the RNG4 block: 1 if it's been instantiated
+ *                     by an external entry, 0 otherwise.
+ * @gen_sk  - generate data to be loaded into the JDKEK, TDKEK and TDSK;
+ *           Caution: this can be done only once; if the keys need to be
+ *           regenerated, a POR is required
+ *
+ * Return: - 0 if no error occurred
+ *        - -ENOMEM if there isn't enough memory to allocate the descriptor
+ *        - -ENODEV if DECO0 couldn't be acquired
+ *        - -EAGAIN if an error occurred when executing the descriptor
+ *           f.i. there was a RNG hardware error due to not "good enough"
+ *           entropy being aquired.
+ */
+static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
+                          int gen_sk)
+{
+       struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
+       struct caam_full __iomem *topregs;
+       struct rng4tst __iomem *r4tst;
+       u32 *desc, status, rdsta_val;
+       int ret = 0, sh_idx;
+
+       topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+       r4tst = &topregs->ctrl.r4tst[0];
+
+       desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
+       if (!desc)
+               return -ENOMEM;
+
+       for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+               /*
+                * If the corresponding bit is set, this state handle
+                * was initialized by somebody else, so it's left alone.
+                */
+               if ((1 << sh_idx) & state_handle_mask)
+                       continue;
+
+               /* Create the descriptor for instantiating RNG State Handle */
+               build_instantiation_desc(desc, sh_idx, gen_sk);
+
+               /* Try to run it through DECO0 */
+               ret = run_descriptor_deco0(ctrldev, desc, &status);
+
+               /*
+                * If ret is not 0, or descriptor status is not 0, then
+                * something went wrong. No need to try the next state
+                * handle (if available), bail out here.
+                * Also, if for some reason, the State Handle didn't get
+                * instantiated although the descriptor has finished
+                * without any error (HW optimizations for later
+                * CAAM eras), then try again.
+                */
+               rdsta_val =
+                       rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IFMASK;
+               if (status || !(rdsta_val & (1 << sh_idx)))
+                       ret = -EAGAIN;
+               if (ret)
+                       break;
+
+               dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
+               /* Clear the contents before recreating the descriptor */
+               memset(desc, 0x00, CAAM_CMD_SZ * 7);
+       }
+
        kfree(desc);
+
        return ret;
 }
 
 /*
- * By default, the TRNG runs for 200 clocks per sample;
- * 1600 clocks per sample generates better entropy.
+ * deinstantiate_rng - builds and executes a descriptor on DECO0,
+ *                    which deinitializes the RNG block.
+ * @ctrldev - pointer to device
+ * @state_handle_mask - bitmask containing the instantiation status
+ *                     for the RNG4 state handles which exist in
+ *                     the RNG4 block: 1 if it's been instantiated
+ *
+ * Return: - 0 if no error occurred
+ *        - -ENOMEM if there isn't enough memory to allocate the descriptor
+ *        - -ENODEV if DECO0 couldn't be acquired
+ *        - -EAGAIN if an error occurred when executing the descriptor
  */
-static void kick_trng(struct platform_device *pdev)
+static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
+{
+       u32 *desc, status;
+       int sh_idx, ret = 0;
+
+       desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
+       if (!desc)
+               return -ENOMEM;
+
+       for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
+               /*
+                * If the corresponding bit is set, then it means the state
+                * handle was initialized by us, and thus it needs to be
+                * deintialized as well
+                */
+               if ((1 << sh_idx) & state_handle_mask) {
+                       /*
+                        * Create the descriptor for deinstantating this state
+                        * handle
+                        */
+                       build_deinstantiation_desc(desc, sh_idx);
+
+                       /* Try to run it through DECO0 */
+                       ret = run_descriptor_deco0(ctrldev, desc, &status);
+
+                       if (ret || status) {
+                               dev_err(ctrldev,
+                                       "Failed to deinstantiate RNG4 SH%d\n",
+                                       sh_idx);
+                               break;
+                       }
+                       dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
+               }
+       }
+
+       kfree(desc);
+
+       return ret;
+}
+
+static int caam_remove(struct platform_device *pdev)
+{
+       struct device *ctrldev;
+       struct caam_drv_private *ctrlpriv;
+       struct caam_drv_private_jr *jrpriv;
+       struct caam_full __iomem *topregs;
+       int ring, ret = 0;
+
+       ctrldev = &pdev->dev;
+       ctrlpriv = dev_get_drvdata(ctrldev);
+       topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+
+       /* shut down JobRs */
+       for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
+               ret |= caam_jr_shutdown(ctrlpriv->jrdev[ring]);
+               jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]);
+               irq_dispose_mapping(jrpriv->irq);
+       }
+
+       /* De-initialize RNG state handles initialized by this driver. */
+       if (ctrlpriv->rng4_sh_init)
+               deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
+
+       /* Shut down debug views */
+#ifdef CONFIG_DEBUG_FS
+       debugfs_remove_recursive(ctrlpriv->dfs_root);
+#endif
+
+       /* Unmap controller region */
+       iounmap(&topregs->ctrl);
+
+       kfree(ctrlpriv->jrdev);
+       kfree(ctrlpriv);
+
+       return ret;
+}
+
+/*
+ * kick_trng - sets the various parameters for enabling the initialization
+ *            of the RNG4 block in CAAM
+ * @pdev - pointer to the platform device
+ * @ent_delay - Defines the length (in system clocks) of each entropy sample.
+ */
+static void kick_trng(struct platform_device *pdev, int ent_delay)
 {
        struct device *ctrldev = &pdev->dev;
        struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
@@ -142,14 +320,31 @@ static void kick_trng(struct platform_device *pdev)
 
        /* put RNG4 into program mode */
        setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
-       /* 1600 clocks per sample */
+
+       /*
+        * Performance-wise, it does not make sense to
+        * set the delay to a value that is lower
+        * than the last one that worked (i.e. the state handles
+        * were instantiated properly. Thus, instead of wasting
+        * time trying to set the values controlling the sample
+        * frequency, the function simply returns.
+        */
+       val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
+             >> RTSDCTL_ENT_DLY_SHIFT;
+       if (ent_delay <= val) {
+               /* put RNG4 into run mode */
+               clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
+               return;
+       }
+
        val = rd_reg32(&r4tst->rtsdctl);
-       val = (val & ~RTSDCTL_ENT_DLY_MASK) | (1600 << RTSDCTL_ENT_DLY_SHIFT);
+       val = (val & ~RTSDCTL_ENT_DLY_MASK) |
+             (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
        wr_reg32(&r4tst->rtsdctl, val);
-       /* min. freq. count */
-       wr_reg32(&r4tst->rtfrqmin, 400);
-       /* max. freq. count */
-       wr_reg32(&r4tst->rtfrqmax, 6400);
+       /* min. freq. count, equal to 1/4 of the entropy sample length */
+       wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
+       /* max. freq. count, equal to 8 times the entropy sample length */
+       wr_reg32(&r4tst->rtfrqmax, ent_delay << 3);
        /* put RNG4 into run mode */
        clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
 }
@@ -190,7 +385,7 @@ EXPORT_SYMBOL(caam_get_era);
 /* Probe routine for CAAM top (controller) level */
 static int caam_probe(struct platform_device *pdev)
 {
-       int ret, ring, rspec;
+       int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
        u64 caam_id;
        struct device *dev;
        struct device_node *nprop, *np;
@@ -296,16 +491,55 @@ static int caam_probe(struct platform_device *pdev)
 
        /*
         * If SEC has RNG version >= 4 and RNG state handle has not been
-        * already instantiated ,do RNG instantiation
+        * already instantiateddo RNG instantiation
         */
-       if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4 &&
-           !(rd_reg32(&topregs->ctrl.r4tst[0].rdsta) & RDSTA_IF0)) {
-               kick_trng(pdev);
-               ret = instantiate_rng(dev);
+       if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) {
+               ctrlpriv->rng4_sh_init =
+                       rd_reg32(&topregs->ctrl.r4tst[0].rdsta);
+               /*
+                * If the secure keys (TDKEK, JDKEK, TDSK), were already
+                * generated, signal this to the function that is instantiating
+                * the state handles. An error would occur if RNG4 attempts
+                * to regenerate these keys before the next POR.
+                */
+               gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
+               ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
+               do {
+                       int inst_handles =
+                               rd_reg32(&topregs->ctrl.r4tst[0].rdsta) &
+                                                               RDSTA_IFMASK;
+                       /*
+                        * If either SH were instantiated by somebody else
+                        * (e.g. u-boot) then it is assumed that the entropy
+                        * parameters are properly set and thus the function
+                        * setting these (kick_trng(...)) is skipped.
+                        * Also, if a handle was instantiated, do not change
+                        * the TRNG parameters.
+                        */
+                       if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
+                               kick_trng(pdev, ent_delay);
+                               ent_delay += 400;
+                       }
+                       /*
+                        * if instantiate_rng(...) fails, the loop will rerun
+                        * and the kick_trng(...) function will modfiy the
+                        * upper and lower limits of the entropy sampling
+                        * interval, leading to a sucessful initialization of
+                        * the RNG.
+                        */
+                       ret = instantiate_rng(dev, inst_handles,
+                                             gen_sk);
+               } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
                if (ret) {
+                       dev_err(dev, "failed to instantiate RNG");
                        caam_remove(pdev);
                        return ret;
                }
+               /*
+                * Set handles init'ed by this module as the complement of the
+                * already initialized ones
+                */
+               ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
 
                /* Enable RDB bit so that RNG works faster */
                setbits32(&topregs->ctrl.scfgr, SCFGR_RDBENABLE);
index 53b296f78b0d2a588fe1cc3bb4bde067c05a0eda..7e4500f18df6f06de602a3d44ff6d88c4ca70ec5 100644 (file)
@@ -1155,8 +1155,15 @@ struct sec4_sg_entry {
 
 /* randomizer AAI set */
 #define OP_ALG_AAI_RNG         (0x00 << OP_ALG_AAI_SHIFT)
-#define OP_ALG_AAI_RNG_NOZERO  (0x10 << OP_ALG_AAI_SHIFT)
-#define OP_ALG_AAI_RNG_ODD     (0x20 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_NZB     (0x10 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG_OBP     (0x20 << OP_ALG_AAI_SHIFT)
+
+/* RNG4 AAI set */
+#define OP_ALG_AAI_RNG4_SH_0   (0x00 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SH_1   (0x01 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_PS     (0x40 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_AI     (0x80 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_RNG4_SK     (0x100 << OP_ALG_AAI_SHIFT)
 
 /* hmac/smac AAI set */
 #define OP_ALG_AAI_HASH                (0x00 << OP_ALG_AAI_SHIFT)
@@ -1178,12 +1185,6 @@ struct sec4_sg_entry {
 #define OP_ALG_AAI_GSM         (0x10 << OP_ALG_AAI_SHIFT)
 #define OP_ALG_AAI_EDGE                (0x20 << OP_ALG_AAI_SHIFT)
 
-/* RNG4 set */
-#define OP_ALG_RNG4_SHIFT      4
-#define OP_ALG_RNG4_MASK       (0x1f3 << OP_ALG_RNG4_SHIFT)
-
-#define OP_ALG_RNG4_SK         (0x100 << OP_ALG_RNG4_SHIFT)
-
 #define OP_ALG_AS_SHIFT                2
 #define OP_ALG_AS_MASK         (0x3 << OP_ALG_AS_SHIFT)
 #define OP_ALG_AS_UPDATE       (0 << OP_ALG_AS_SHIFT)
index 34c4b9f7fbfae414a1578e37da245fd9119ac8fd..bbc1ac9ec72032f0bea5ae6bf0117ee38b9a320d 100644 (file)
@@ -87,6 +87,12 @@ struct caam_drv_private {
        /* list of registered hash algorithms (mk generic context handle?) */
        struct list_head hash_list;
 
+#define        RNG4_MAX_HANDLES 2
+       /* RNG4 block */
+       u32 rng4_sh_init;       /* This bitmap shows which of the State
+                                  Handles of the RNG4 block are initialized
+                                  by this driver */
+
        /*
         * debugfs entries for developer view into driver/device
         * variables at runtime.
index 4455396918de84320380fcca2eca01d694971114..d50174f45b21c8e0c145d7077ec86f86cb66ff37 100644 (file)
@@ -245,7 +245,7 @@ struct rngtst {
 
 /* RNG4 TRNG test registers */
 struct rng4tst {
-#define RTMCTL_PRGM 0x00010000 /* 1 -> program mode, 0 -> run mode */
+#define RTMCTL_PRGM    0x00010000      /* 1 -> program mode, 0 -> run mode */
        u32 rtmctl;             /* misc. control register */
        u32 rtscmisc;           /* statistical check misc. register */
        u32 rtpkrrng;           /* poker range register */
@@ -255,6 +255,8 @@ struct rng4tst {
        };
 #define RTSDCTL_ENT_DLY_SHIFT 16
 #define RTSDCTL_ENT_DLY_MASK (0xffff << RTSDCTL_ENT_DLY_SHIFT)
+#define RTSDCTL_ENT_DLY_MIN 1200
+#define RTSDCTL_ENT_DLY_MAX 12800
        u32 rtsdctl;            /* seed control register */
        union {
                u32 rtsblim;    /* PRGM=1: sparse bit limit register */
@@ -266,7 +268,11 @@ struct rng4tst {
                u32 rtfrqcnt;   /* PRGM=0: freq. count register */
        };
        u32 rsvd1[40];
+#define RDSTA_SKVT 0x80000000
+#define RDSTA_SKVN 0x40000000
 #define RDSTA_IF0 0x00000001
+#define RDSTA_IF1 0x00000002
+#define RDSTA_IFMASK (RDSTA_IF1 | RDSTA_IF0)
        u32 rdsta;
        u32 rsvd2[15];
 };
@@ -692,6 +698,7 @@ struct caam_deco {
        u32 jr_ctl_hi;  /* CxJRR - JobR Control Register      @800 */
        u32 jr_ctl_lo;
        u64 jr_descaddr;        /* CxDADR - JobR Descriptor Address */
+#define DECO_OP_STATUS_HI_ERR_MASK 0xF00000FF
        u32 op_status_hi;       /* DxOPSTA - DECO Operation Status */
        u32 op_status_lo;
        u32 rsvd24[2];
@@ -706,12 +713,13 @@ struct caam_deco {
        u32 rsvd29[48];
        u32 descbuf[64];        /* DxDESB - Descriptor buffer */
        u32 rscvd30[193];
+#define DESC_DBG_DECO_STAT_HOST_ERR    0x00D00000
+#define DESC_DBG_DECO_STAT_VALID       0x80000000
+#define DESC_DBG_DECO_STAT_MASK                0x00F00000
        u32 desc_dbg;           /* DxDDR - DECO Debug Register */
        u32 rsvd31[126];
 };
 
-/* DECO DBG Register Valid Bit*/
-#define DECO_DBG_VALID         0x80000000
 #define DECO_JQCR_WHL          0x20000000
 #define DECO_JQCR_FOUR         0x10000000
 
index e0037c8ee24386e941e5eed6189e9c2cd9747a3a..b12ff85f4241ece439e43310b115d16a08c5e2eb 100644 (file)
@@ -117,6 +117,21 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
        return nents;
 }
 
+/* Map SG page in kernel virtual address space and copy */
+static inline void sg_map_copy(u8 *dest, struct scatterlist *sg,
+                              int len, int offset)
+{
+       u8 *mapped_addr;
+
+       /*
+        * Page here can be user-space pinned using get_user_pages
+        * Same must be kmapped before use and kunmapped subsequently
+        */
+       mapped_addr = kmap_atomic(sg_page(sg));
+       memcpy(dest, mapped_addr + offset, len);
+       kunmap_atomic(mapped_addr);
+}
+
 /* Copy from len bytes of sg to dest, starting from beginning */
 static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
 {
@@ -124,15 +139,15 @@ static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
        int cpy_index = 0, next_cpy_index = current_sg->length;
 
        while (next_cpy_index < len) {
-               memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg),
-                      current_sg->length);
+               sg_map_copy(dest + cpy_index, current_sg, current_sg->length,
+                           current_sg->offset);
                current_sg = scatterwalk_sg_next(current_sg);
                cpy_index = next_cpy_index;
                next_cpy_index += current_sg->length;
        }
        if (cpy_index < len)
-               memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg),
-                      len - cpy_index);
+               sg_map_copy(dest + cpy_index, current_sg, len-cpy_index,
+                           current_sg->offset);
 }
 
 /* Copy sg data, from to_skip to end, to dest */
@@ -140,7 +155,7 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
                                      int to_skip, unsigned int end)
 {
        struct scatterlist *current_sg = sg;
-       int sg_index, cpy_index;
+       int sg_index, cpy_index, offset;
 
        sg_index = current_sg->length;
        while (sg_index <= to_skip) {
@@ -148,9 +163,10 @@ static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
                sg_index += current_sg->length;
        }
        cpy_index = sg_index - to_skip;
-       memcpy(dest, (u8 *) sg_virt(current_sg) +
-              current_sg->length - cpy_index, cpy_index);
-       current_sg = scatterwalk_sg_next(current_sg);
-       if (end - sg_index)
+       offset = current_sg->offset + current_sg->length - cpy_index;
+       sg_map_copy(dest, current_sg, cpy_index, offset);
+       if (end - sg_index) {
+               current_sg = scatterwalk_sg_next(current_sg);
                sg_copy(dest + cpy_index, current_sg, end - sg_index);
+       }
 }
index a8a7dd4b0d25c6741d47e40b53b9d3cfd7f101ad..247ab8048f5bea3f09e5537f103626fe84e3bce5 100644 (file)
@@ -733,12 +733,9 @@ static int dcp_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, dev);
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!r) {
-               dev_err(&pdev->dev, "failed to get IORESOURCE_MEM\n");
-               return -ENXIO;
-       }
-       dev->dcp_regs_base = devm_ioremap(&pdev->dev, r->start,
-                                         resource_size(r));
+       dev->dcp_regs_base = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(dev->dcp_regs_base))
+               return PTR_ERR(dev->dcp_regs_base);
 
        dcp_set(dev, DCP_CTRL_SFRST, DCP_REG_CTRL);
        udelay(10);
@@ -762,7 +759,8 @@ static int dcp_probe(struct platform_device *pdev)
                return -EIO;
        }
        dev->dcp_vmi_irq = r->start;
-       ret = request_irq(dev->dcp_vmi_irq, dcp_vmi_irq, 0, "dcp", dev);
+       ret = devm_request_irq(&pdev->dev, dev->dcp_vmi_irq, dcp_vmi_irq, 0,
+                              "dcp", dev);
        if (ret != 0) {
                dev_err(&pdev->dev, "can't request_irq (0)\n");
                return -EIO;
@@ -771,15 +769,14 @@ static int dcp_probe(struct platform_device *pdev)
        r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
        if (!r) {
                dev_err(&pdev->dev, "can't get IRQ resource (1)\n");
-               ret = -EIO;
-               goto err_free_irq0;
+               return -EIO;
        }
        dev->dcp_irq = r->start;
-       ret = request_irq(dev->dcp_irq, dcp_irq, 0, "dcp", dev);
+       ret = devm_request_irq(&pdev->dev, dev->dcp_irq, dcp_irq, 0, "dcp",
+                              dev);
        if (ret != 0) {
                dev_err(&pdev->dev, "can't request_irq (1)\n");
-               ret = -EIO;
-               goto err_free_irq0;
+               return -EIO;
        }
 
        dev->hw_pkg[0] = dma_alloc_coherent(&pdev->dev,
@@ -788,8 +785,7 @@ static int dcp_probe(struct platform_device *pdev)
                        GFP_KERNEL);
        if (!dev->hw_pkg[0]) {
                dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
-               ret = -ENOMEM;
-               goto err_free_irq1;
+               return -ENOMEM;
        }
 
        for (i = 1; i < DCP_MAX_PKG; i++) {
@@ -848,16 +844,14 @@ err_unregister:
        for (j = 0; j < i; j++)
                crypto_unregister_alg(&algs[j]);
 err_free_key_iv:
+       tasklet_kill(&dev->done_task);
+       tasklet_kill(&dev->queue_task);
        dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
                        dev->payload_base_dma);
 err_free_hw_packet:
        dma_free_coherent(&pdev->dev, DCP_MAX_PKG *
                sizeof(struct dcp_hw_packet), dev->hw_pkg[0],
                dev->hw_phys_pkg);
-err_free_irq1:
-       free_irq(dev->dcp_irq, dev);
-err_free_irq0:
-       free_irq(dev->dcp_vmi_irq, dev);
 
        return ret;
 }
@@ -868,23 +862,20 @@ static int dcp_remove(struct platform_device *pdev)
        int j;
        dev = platform_get_drvdata(pdev);
 
-       dma_free_coherent(&pdev->dev,
-                       DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
-                       dev->hw_pkg[0], dev->hw_phys_pkg);
-
-       dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
-                       dev->payload_base_dma);
+       misc_deregister(&dev->dcp_bootstream_misc);
 
-       free_irq(dev->dcp_irq, dev);
-       free_irq(dev->dcp_vmi_irq, dev);
+       for (j = 0; j < ARRAY_SIZE(algs); j++)
+               crypto_unregister_alg(&algs[j]);
 
        tasklet_kill(&dev->done_task);
        tasklet_kill(&dev->queue_task);
 
-       for (j = 0; j < ARRAY_SIZE(algs); j++)
-               crypto_unregister_alg(&algs[j]);
+       dma_free_coherent(&pdev->dev, 2 * AES_KEYSIZE_128, dev->payload_base,
+                       dev->payload_base_dma);
 
-       misc_deregister(&dev->dcp_bootstream_misc);
+       dma_free_coherent(&pdev->dev,
+                       DCP_MAX_PKG * sizeof(struct dcp_hw_packet),
+                       dev->hw_pkg[0], dev->hw_phys_pkg);
 
        return 0;
 }
index 21180d6cad6e27f2f316a04b1e98fb2e61ac90d3..9dd6e01eac33050b8304c5f8758440e7286606f2 100644 (file)
@@ -218,23 +218,9 @@ static dma_addr_t crypt_phys;
 
 static int support_aes = 1;
 
-static void dev_release(struct device *dev)
-{
-       return;
-}
-
 #define DRIVER_NAME "ixp4xx_crypto"
-static struct platform_device pseudo_dev = {
-       .name = DRIVER_NAME,
-       .id   = 0,
-       .num_resources = 0,
-       .dev  = {
-               .coherent_dma_mask = DMA_BIT_MASK(32),
-               .release = dev_release,
-       }
-};
 
-static struct device *dev = &pseudo_dev.dev;
+static struct platform_device *pdev;
 
 static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
 {
@@ -263,6 +249,7 @@ static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
 
 static int setup_crypt_desc(void)
 {
+       struct device *dev = &pdev->dev;
        BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
        crypt_virt = dma_alloc_coherent(dev,
                        NPE_QLEN * sizeof(struct crypt_ctl),
@@ -363,6 +350,7 @@ static void finish_scattered_hmac(struct crypt_ctl *crypt)
 
 static void one_packet(dma_addr_t phys)
 {
+       struct device *dev = &pdev->dev;
        struct crypt_ctl *crypt;
        struct ixp_ctx *ctx;
        int failed;
@@ -432,7 +420,7 @@ static void crypto_done_action(unsigned long arg)
        tasklet_schedule(&crypto_done_tasklet);
 }
 
-static int init_ixp_crypto(void)
+static int init_ixp_crypto(struct device *dev)
 {
        int ret = -ENODEV;
        u32 msg[2] = { 0, 0 };
@@ -519,7 +507,7 @@ err:
        return ret;
 }
 
-static void release_ixp_crypto(void)
+static void release_ixp_crypto(struct device *dev)
 {
        qmgr_disable_irq(RECV_QID);
        tasklet_kill(&crypto_done_tasklet);
@@ -886,6 +874,7 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
        enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
        struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
        struct buffer_desc src_hook;
+       struct device *dev = &pdev->dev;
        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
                                GFP_KERNEL : GFP_ATOMIC;
 
@@ -1010,6 +999,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
        unsigned int cryptlen;
        struct buffer_desc *buf, src_hook;
        struct aead_ctx *req_ctx = aead_request_ctx(req);
+       struct device *dev = &pdev->dev;
        gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
                                GFP_KERNEL : GFP_ATOMIC;
 
@@ -1159,32 +1149,24 @@ static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
                        unsigned int keylen)
 {
        struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
-       struct rtattr *rta = (struct rtattr *)key;
-       struct crypto_authenc_key_param *param;
+       struct crypto_authenc_keys keys;
 
-       if (!RTA_OK(rta, keylen))
-               goto badkey;
-       if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
-               goto badkey;
-       if (RTA_PAYLOAD(rta) < sizeof(*param))
+       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
                goto badkey;
 
-       param = RTA_DATA(rta);
-       ctx->enckey_len = be32_to_cpu(param->enckeylen);
-
-       key += RTA_ALIGN(rta->rta_len);
-       keylen -= RTA_ALIGN(rta->rta_len);
+       if (keys.authkeylen > sizeof(ctx->authkey))
+               goto badkey;
 
-       if (keylen < ctx->enckey_len)
+       if (keys.enckeylen > sizeof(ctx->enckey))
                goto badkey;
 
-       ctx->authkey_len = keylen - ctx->enckey_len;
-       memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len);
-       memcpy(ctx->authkey, key, ctx->authkey_len);
+       memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
+       memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
+       ctx->authkey_len = keys.authkeylen;
+       ctx->enckey_len = keys.enckeylen;
 
        return aead_setup(tfm, crypto_aead_authsize(tfm));
 badkey:
-       ctx->enckey_len = 0;
        crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
        return -EINVAL;
 }
@@ -1418,20 +1400,30 @@ static struct ixp_alg ixp4xx_algos[] = {
 } };
 
 #define IXP_POSTFIX "-ixp4xx"
+
+static const struct platform_device_info ixp_dev_info __initdata = {
+       .name           = DRIVER_NAME,
+       .id             = 0,
+       .dma_mask       = DMA_BIT_MASK(32),
+};
+
 static int __init ixp_module_init(void)
 {
        int num = ARRAY_SIZE(ixp4xx_algos);
-       int i,err ;
+       int i, err ;
 
-       if (platform_device_register(&pseudo_dev))
-               return -ENODEV;
+       pdev = platform_device_register_full(&ixp_dev_info);
+       if (IS_ERR(pdev))
+               return PTR_ERR(pdev);
+
+       dev = &pdev->dev;
 
        spin_lock_init(&desc_lock);
        spin_lock_init(&emerg_lock);
 
-       err = init_ixp_crypto();
+       err = init_ixp_crypto(&pdev->dev);
        if (err) {
-               platform_device_unregister(&pseudo_dev);
+               platform_device_unregister(pdev);
                return err;
        }
        for (i=0; i< num; i++) {
@@ -1495,8 +1487,8 @@ static void __exit ixp_module_exit(void)
                if (ixp4xx_algos[i].registered)
                        crypto_unregister_alg(&ixp4xx_algos[i].crypto);
        }
-       release_ixp_crypto();
-       platform_device_unregister(&pseudo_dev);
+       release_ixp_crypto(&pdev->dev);
+       platform_device_unregister(pdev);
 }
 
 module_init(ixp_module_init);
index 3374a3ebe4c75f49ecacbec24740ead4e05011b8..8d1e6f8e9e9cf613519b14fa5e5126135d37e986 100644 (file)
@@ -907,7 +907,7 @@ static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
        return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
 }
 
-irqreturn_t crypto_int(int irq, void *priv)
+static irqreturn_t crypto_int(int irq, void *priv)
 {
        u32 val;
 
@@ -928,7 +928,7 @@ irqreturn_t crypto_int(int irq, void *priv)
        return IRQ_HANDLED;
 }
 
-struct crypto_alg mv_aes_alg_ecb = {
+static struct crypto_alg mv_aes_alg_ecb = {
        .cra_name               = "ecb(aes)",
        .cra_driver_name        = "mv-ecb-aes",
        .cra_priority   = 300,
@@ -951,7 +951,7 @@ struct crypto_alg mv_aes_alg_ecb = {
        },
 };
 
-struct crypto_alg mv_aes_alg_cbc = {
+static struct crypto_alg mv_aes_alg_cbc = {
        .cra_name               = "cbc(aes)",
        .cra_driver_name        = "mv-cbc-aes",
        .cra_priority   = 300,
@@ -975,7 +975,7 @@ struct crypto_alg mv_aes_alg_cbc = {
        },
 };
 
-struct ahash_alg mv_sha1_alg = {
+static struct ahash_alg mv_sha1_alg = {
        .init = mv_hash_init,
        .update = mv_hash_update,
        .final = mv_hash_final,
@@ -999,7 +999,7 @@ struct ahash_alg mv_sha1_alg = {
                 }
 };
 
-struct ahash_alg mv_hmac_sha1_alg = {
+static struct ahash_alg mv_hmac_sha1_alg = {
        .init = mv_hash_init,
        .update = mv_hash_update,
        .final = mv_hash_final,
@@ -1084,7 +1084,7 @@ static int mv_probe(struct platform_device *pdev)
                goto err_unmap_sram;
        }
 
-       ret = request_irq(irq, crypto_int, IRQF_DISABLED, dev_name(&pdev->dev),
+       ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev),
                        cp);
        if (ret)
                goto err_thread;
@@ -1187,7 +1187,7 @@ static struct platform_driver marvell_crypto = {
        .driver         = {
                .owner  = THIS_MODULE,
                .name   = "mv_crypto",
-               .of_match_table = of_match_ptr(mv_cesa_of_match_table),
+               .of_match_table = mv_cesa_of_match_table,
        },
 };
 MODULE_ALIAS("platform:mv_crypto");
index ce791c2f81f79e4ffda5d7d44e6a31a8a46bcb34..e1e58d0ed5504c5999267d6b317f6637d5397aee 100644 (file)
@@ -554,7 +554,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
        return err;
 }
 
-int omap_aes_check_aligned(struct scatterlist *sg)
+static int omap_aes_check_aligned(struct scatterlist *sg)
 {
        while (sg) {
                if (!IS_ALIGNED(sg->offset, 4))
@@ -566,7 +566,7 @@ int omap_aes_check_aligned(struct scatterlist *sg)
        return 0;
 }
 
-int omap_aes_copy_sgs(struct omap_aes_dev *dd)
+static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
 {
        void *buf_in, *buf_out;
        int pages;
index 888f7f4a6d3fa29a36c26a1ee1119428164d9df9..a6175ba6d2389f96ea118470123fd3e7818fb279 100644 (file)
@@ -495,45 +495,29 @@ static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 {
        struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
        struct spacc_alg *alg = to_spacc_alg(tfm->base.__crt_alg);
-       struct rtattr *rta = (void *)key;
-       struct crypto_authenc_key_param *param;
-       unsigned int authkeylen, enckeylen;
+       struct crypto_authenc_keys keys;
        int err = -EINVAL;
 
-       if (!RTA_OK(rta, keylen))
+       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
                goto badkey;
 
-       if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+       if (keys.enckeylen > AES_MAX_KEY_SIZE)
                goto badkey;
 
-       if (RTA_PAYLOAD(rta) < sizeof(*param))
-               goto badkey;
-
-       param = RTA_DATA(rta);
-       enckeylen = be32_to_cpu(param->enckeylen);
-
-       key += RTA_ALIGN(rta->rta_len);
-       keylen -= RTA_ALIGN(rta->rta_len);
-
-       if (keylen < enckeylen)
-               goto badkey;
-
-       authkeylen = keylen - enckeylen;
-
-       if (enckeylen > AES_MAX_KEY_SIZE)
+       if (keys.authkeylen > sizeof(ctx->hash_ctx))
                goto badkey;
 
        if ((alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
            SPA_CTRL_CIPH_ALG_AES)
-               err = spacc_aead_aes_setkey(tfm, key + authkeylen, enckeylen);
+               err = spacc_aead_aes_setkey(tfm, keys.enckey, keys.enckeylen);
        else
-               err = spacc_aead_des_setkey(tfm, key + authkeylen, enckeylen);
+               err = spacc_aead_des_setkey(tfm, keys.enckey, keys.enckeylen);
 
        if (err)
                goto badkey;
 
-       memcpy(ctx->hash_ctx, keyauthkeylen);
-       ctx->hash_key_len = authkeylen;
+       memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
+       ctx->hash_key_len = keys.authkeylen;
 
        return 0;
 
index d7bb8bac36e973944334409760dc56c37eb02be1..785a9ded7bdf3bda2840bc36daa88fe7f1532a9b 100644 (file)
@@ -1058,7 +1058,7 @@ static struct platform_driver sahara_driver = {
        .driver         = {
                .name   = SAHARA_NAME,
                .owner  = THIS_MODULE,
-               .of_match_table = of_match_ptr(sahara_dt_ids),
+               .of_match_table = sahara_dt_ids,
        },
        .id_table = sahara_platform_ids,
 };
index 661dc3eb1d66481c102ee4164850d6ad67cd5f22..f6f7c681073e8b9132afa81c70a7af58cf3f8360 100644 (file)
@@ -671,39 +671,20 @@ static int aead_setkey(struct crypto_aead *authenc,
                       const u8 *key, unsigned int keylen)
 {
        struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
-       struct rtattr *rta = (void *)key;
-       struct crypto_authenc_key_param *param;
-       unsigned int authkeylen;
-       unsigned int enckeylen;
-
-       if (!RTA_OK(rta, keylen))
-               goto badkey;
+       struct crypto_authenc_keys keys;
 
-       if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
                goto badkey;
 
-       if (RTA_PAYLOAD(rta) < sizeof(*param))
+       if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
                goto badkey;
 
-       param = RTA_DATA(rta);
-       enckeylen = be32_to_cpu(param->enckeylen);
-
-       key += RTA_ALIGN(rta->rta_len);
-       keylen -= RTA_ALIGN(rta->rta_len);
-
-       if (keylen < enckeylen)
-               goto badkey;
+       memcpy(ctx->key, keys.authkey, keys.authkeylen);
+       memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
 
-       authkeylen = keylen - enckeylen;
-
-       if (keylen > TALITOS_MAX_KEY_SIZE)
-               goto badkey;
-
-       memcpy(&ctx->key, key, keylen);
-
-       ctx->keylen = keylen;
-       ctx->enckeylen = enckeylen;
-       ctx->authkeylen = authkeylen;
+       ctx->keylen = keys.authkeylen + keys.enckeylen;
+       ctx->enckeylen = keys.enckeylen;
+       ctx->authkeylen = keys.authkeylen;
 
        return 0;
 
index 2d58da972ae279f44b91f05dd472f6287c13f081..d8c7a132fea467e9d38bd842f40b5cde55c6ed8b 100644 (file)
@@ -27,6 +27,8 @@
  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/errno.h>
@@ -199,8 +201,6 @@ static void aes_workqueue_handler(struct work_struct *work);
 static DECLARE_WORK(aes_work, aes_workqueue_handler);
 static struct workqueue_struct *aes_wq;
 
-extern unsigned long long tegra_chip_uid(void);
-
 static inline u32 aes_readl(struct tegra_aes_dev *dd, u32 offset)
 {
        return readl(dd->io_base + offset);
@@ -713,13 +713,12 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
        struct tegra_aes_dev *dd = aes_dev;
        struct tegra_aes_ctx *ctx = &rng_ctx;
        struct tegra_aes_slot *key_slot;
-       struct timespec ts;
        int ret = 0;
-       u64 nsec, tmp[2];
+       u8 tmp[16]; /* 16 bytes = 128 bits of entropy */
        u8 *dt;
 
        if (!ctx || !dd) {
-               dev_err(dd->dev, "ctx=0x%x, dd=0x%x\n",
+               pr_err("ctx=0x%x, dd=0x%x\n",
                        (unsigned int)ctx, (unsigned int)dd);
                return -EINVAL;
        }
@@ -778,14 +777,8 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
        if (dd->ivlen >= (2 * DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128)) {
                dt = dd->iv + DEFAULT_RNG_BLK_SZ + AES_KEYSIZE_128;
        } else {
-               getnstimeofday(&ts);
-               nsec = timespec_to_ns(&ts);
-               do_div(nsec, 1000);
-               nsec ^= dd->ctr << 56;
-               dd->ctr++;
-               tmp[0] = nsec;
-               tmp[1] = tegra_chip_uid();
-               dt = (u8 *)tmp;
+               get_random_bytes(tmp, sizeof(tmp));
+               dt = tmp;
        }
        memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ);
 
@@ -804,7 +797,7 @@ static int tegra_aes_cra_init(struct crypto_tfm *tfm)
        return 0;
 }
 
-void tegra_aes_cra_exit(struct crypto_tfm *tfm)
+static void tegra_aes_cra_exit(struct crypto_tfm *tfm)
 {
        struct tegra_aes_ctx *ctx =
                crypto_ablkcipher_ctx((struct crypto_ablkcipher *)tfm);
@@ -924,7 +917,7 @@ static int tegra_aes_probe(struct platform_device *pdev)
        }
 
        /* Initialize the vde clock */
-       dd->aes_clk = clk_get(dev, "vde");
+       dd->aes_clk = devm_clk_get(dev, "vde");
        if (IS_ERR(dd->aes_clk)) {
                dev_err(dev, "iclock intialization failed.\n");
                err = -ENODEV;
@@ -1033,8 +1026,6 @@ out:
        if (dd->buf_out)
                dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
                        dd->buf_out, dd->dma_buf_out);
-       if (!IS_ERR(dd->aes_clk))
-               clk_put(dd->aes_clk);
        if (aes_wq)
                destroy_workqueue(aes_wq);
        spin_lock(&list_lock);
@@ -1068,7 +1059,6 @@ static int tegra_aes_remove(struct platform_device *pdev)
                          dd->buf_in, dd->dma_buf_in);
        dma_free_coherent(dev, AES_HW_DMA_BUFFER_SIZE_BYTES,
                          dd->buf_out, dd->dma_buf_out);
-       clk_put(dd->aes_clk);
        aes_dev = NULL;
 
        return 0;
index c99c00d35d34f73d7ceca6cbf1e68b163fcc502a..2e23b12c350b8759c71ae1d5a438c9aaf2089166 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/stat.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/devfreq.h>
 #include <linux/workqueue.h>
 #include <linux/platform_device.h>
@@ -902,13 +902,13 @@ static ssize_t available_frequencies_show(struct device *d,
 {
        struct devfreq *df = to_devfreq(d);
        struct device *dev = df->dev.parent;
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        ssize_t count = 0;
        unsigned long freq = 0;
 
        rcu_read_lock();
        do {
-               opp = opp_find_freq_ceil(dev, &freq);
+               opp = dev_pm_opp_find_freq_ceil(dev, &freq);
                if (IS_ERR(opp))
                        break;
 
@@ -1029,25 +1029,26 @@ module_exit(devfreq_exit);
  * under the locked area. The pointer returned must be used prior to unlocking
  * with rcu_read_unlock() to maintain the integrity of the pointer.
  */
-struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
-                                   u32 flags)
+struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
+                                          unsigned long *freq,
+                                          u32 flags)
 {
-       struct opp *opp;
+       struct dev_pm_opp *opp;
 
        if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
                /* The freq is an upper bound. opp should be lower */
-               opp = opp_find_freq_floor(dev, freq);
+               opp = dev_pm_opp_find_freq_floor(dev, freq);
 
                /* If not available, use the closest opp */
                if (opp == ERR_PTR(-ERANGE))
-                       opp = opp_find_freq_ceil(dev, freq);
+                       opp = dev_pm_opp_find_freq_ceil(dev, freq);
        } else {
                /* The freq is an lower bound. opp should be higher */
-               opp = opp_find_freq_ceil(dev, freq);
+               opp = dev_pm_opp_find_freq_ceil(dev, freq);
 
                /* If not available, use the closest opp */
                if (opp == ERR_PTR(-ERANGE))
-                       opp = opp_find_freq_floor(dev, freq);
+                       opp = dev_pm_opp_find_freq_floor(dev, freq);
        }
 
        return opp;
@@ -1066,7 +1067,7 @@ int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
        int ret = 0;
 
        rcu_read_lock();
-       nh = opp_get_notifier(dev);
+       nh = dev_pm_opp_get_notifier(dev);
        if (IS_ERR(nh))
                ret = PTR_ERR(nh);
        rcu_read_unlock();
@@ -1092,7 +1093,7 @@ int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
        int ret = 0;
 
        rcu_read_lock();
-       nh = opp_get_notifier(dev);
+       nh = dev_pm_opp_get_notifier(dev);
        if (IS_ERR(nh))
                ret = PTR_ERR(nh);
        rcu_read_unlock();
index c5f86d8caca34c6a7a3ea2341da5c242ddf41c83..cede6f71cd63feb5afebde508199b9be9a507778 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/slab.h>
 #include <linux/mutex.h>
 #include <linux/suspend.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 #include <linux/devfreq.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
@@ -639,7 +639,7 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
        struct platform_device *pdev = container_of(dev, struct platform_device,
                                                    dev);
        struct busfreq_data *data = platform_get_drvdata(pdev);
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        unsigned long freq;
        unsigned long old_freq = data->curr_oppinfo.rate;
        struct busfreq_opp_info new_oppinfo;
@@ -650,8 +650,8 @@ static int exynos4_bus_target(struct device *dev, unsigned long *_freq,
                rcu_read_unlock();
                return PTR_ERR(opp);
        }
-       new_oppinfo.rate = opp_get_freq(opp);
-       new_oppinfo.volt = opp_get_voltage(opp);
+       new_oppinfo.rate = dev_pm_opp_get_freq(opp);
+       new_oppinfo.volt = dev_pm_opp_get_voltage(opp);
        rcu_read_unlock();
        freq = new_oppinfo.rate;
 
@@ -873,7 +873,7 @@ static int exynos4210_init_tables(struct busfreq_data *data)
                exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i];
 
        for (i = LV_0; i < EX4210_LV_NUM; i++) {
-               err = opp_add(data->dev, exynos4210_busclk_table[i].clk,
+               err = dev_pm_opp_add(data->dev, exynos4210_busclk_table[i].clk,
                              exynos4210_busclk_table[i].volt);
                if (err) {
                        dev_err(data->dev, "Cannot add opp entries.\n");
@@ -940,7 +940,7 @@ static int exynos4x12_init_tables(struct busfreq_data *data)
        }
 
        for (i = 0; i < EX4x12_LV_NUM; i++) {
-               ret = opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
+               ret = dev_pm_opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
                              exynos4x12_mifclk_table[i].volt);
                if (ret) {
                        dev_err(data->dev, "Fail to add opp entries.\n");
@@ -956,7 +956,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
 {
        struct busfreq_data *data = container_of(this, struct busfreq_data,
                                                 pm_notifier);
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        struct busfreq_opp_info new_oppinfo;
        unsigned long maxfreq = ULONG_MAX;
        int err = 0;
@@ -969,7 +969,7 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
                data->disabled = true;
 
                rcu_read_lock();
-               opp = opp_find_freq_floor(data->dev, &maxfreq);
+               opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq);
                if (IS_ERR(opp)) {
                        rcu_read_unlock();
                        dev_err(data->dev, "%s: unable to find a min freq\n",
@@ -977,8 +977,8 @@ static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
                        mutex_unlock(&data->lock);
                        return PTR_ERR(opp);
                }
-               new_oppinfo.rate = opp_get_freq(opp);
-               new_oppinfo.volt = opp_get_voltage(opp);
+               new_oppinfo.rate = dev_pm_opp_get_freq(opp);
+               new_oppinfo.volt = dev_pm_opp_get_voltage(opp);
                rcu_read_unlock();
 
                err = exynos4_bus_setvolt(data, &new_oppinfo,
@@ -1020,7 +1020,7 @@ unlock:
 static int exynos4_busfreq_probe(struct platform_device *pdev)
 {
        struct busfreq_data *data;
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        struct device *dev = &pdev->dev;
        int err = 0;
 
@@ -1065,15 +1065,16 @@ static int exynos4_busfreq_probe(struct platform_device *pdev)
        }
 
        rcu_read_lock();
-       opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
+       opp = dev_pm_opp_find_freq_floor(dev,
+                                        &exynos4_devfreq_profile.initial_freq);
        if (IS_ERR(opp)) {
                rcu_read_unlock();
                dev_err(dev, "Invalid initial frequency %lu kHz.\n",
                        exynos4_devfreq_profile.initial_freq);
                return PTR_ERR(opp);
        }
-       data->curr_oppinfo.rate = opp_get_freq(opp);
-       data->curr_oppinfo.volt = opp_get_voltage(opp);
+       data->curr_oppinfo.rate = dev_pm_opp_get_freq(opp);
+       data->curr_oppinfo.volt = dev_pm_opp_get_voltage(opp);
        rcu_read_unlock();
 
        platform_set_drvdata(pdev, data);
index 574b16b59be5df13352ebee4f952e592b9a6f52f..9e3752dac99e5115bbe2df05694c06e8ca7fe2a8 100644 (file)
 #include <linux/module.h>
 #include <linux/devfreq.h>
 #include <linux/io.h>
-#include <linux/opp.h>
 #include <linux/slab.h>
 #include <linux/suspend.h>
-#include <linux/opp.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/platform_device.h>
+#include <linux/pm_opp.h>
 #include <linux/pm_qos.h>
 #include <linux/regulator/consumer.h>
 #include <linux/of_address.h>
@@ -132,7 +131,7 @@ static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq,
        struct platform_device *pdev = container_of(dev, struct platform_device,
                                                    dev);
        struct busfreq_data_int *data = platform_get_drvdata(pdev);
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        unsigned long old_freq, freq;
        unsigned long volt;
 
@@ -144,8 +143,8 @@ static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq,
                return PTR_ERR(opp);
        }
 
-       freq = opp_get_freq(opp);
-       volt = opp_get_voltage(opp);
+       freq = dev_pm_opp_get_freq(opp);
+       volt = dev_pm_opp_get_voltage(opp);
        rcu_read_unlock();
 
        old_freq = data->curr_freq;
@@ -246,7 +245,7 @@ static int exynos5250_init_int_tables(struct busfreq_data_int *data)
        int i, err = 0;
 
        for (i = LV_0; i < _LV_END; i++) {
-               err = opp_add(data->dev, exynos5_int_opp_table[i].clk,
+               err = dev_pm_opp_add(data->dev, exynos5_int_opp_table[i].clk,
                                exynos5_int_opp_table[i].volt);
                if (err) {
                        dev_err(data->dev, "Cannot add opp entries.\n");
@@ -262,7 +261,7 @@ static int exynos5_busfreq_int_pm_notifier_event(struct notifier_block *this,
 {
        struct busfreq_data_int *data = container_of(this,
                                        struct busfreq_data_int, pm_notifier);
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        unsigned long maxfreq = ULONG_MAX;
        unsigned long freq;
        unsigned long volt;
@@ -276,14 +275,14 @@ static int exynos5_busfreq_int_pm_notifier_event(struct notifier_block *this,
                data->disabled = true;
 
                rcu_read_lock();
-               opp = opp_find_freq_floor(data->dev, &maxfreq);
+               opp = dev_pm_opp_find_freq_floor(data->dev, &maxfreq);
                if (IS_ERR(opp)) {
                        rcu_read_unlock();
                        err = PTR_ERR(opp);
                        goto unlock;
                }
-               freq = opp_get_freq(opp);
-               volt = opp_get_voltage(opp);
+               freq = dev_pm_opp_get_freq(opp);
+               volt = dev_pm_opp_get_voltage(opp);
                rcu_read_unlock();
 
                err = exynos5_int_setvolt(data, volt);
@@ -316,7 +315,7 @@ unlock:
 static int exynos5_busfreq_int_probe(struct platform_device *pdev)
 {
        struct busfreq_data_int *data;
-       struct opp *opp;
+       struct dev_pm_opp *opp;
        struct device *dev = &pdev->dev;
        struct device_node *np;
        unsigned long initial_freq;
@@ -368,7 +367,7 @@ static int exynos5_busfreq_int_probe(struct platform_device *pdev)
        }
 
        rcu_read_lock();
-       opp = opp_find_freq_floor(dev,
+       opp = dev_pm_opp_find_freq_floor(dev,
                        &exynos5_devfreq_int_profile.initial_freq);
        if (IS_ERR(opp)) {
                rcu_read_unlock();
@@ -377,8 +376,8 @@ static int exynos5_busfreq_int_probe(struct platform_device *pdev)
                err = PTR_ERR(opp);
                goto err_opp_add;
        }
-       initial_freq = opp_get_freq(opp);
-       initial_volt = opp_get_voltage(opp);
+       initial_freq = dev_pm_opp_get_freq(opp);
+       initial_volt = dev_pm_opp_get_voltage(opp);
        rcu_read_unlock();
        data->curr_freq = initial_freq;
 
index fce46c5bf1c74e3d76accde7570ffa2d423eb9f1..8c56d7856cb20fb28b6b3461b71397e78ca4edd8 100644 (file)
@@ -1252,7 +1252,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
        size_t bytes = 0;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
 
        /*
@@ -1267,7 +1267,7 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
 
        spin_lock_irqsave(&plchan->vc.lock, flags);
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret != DMA_SUCCESS) {
+       if (ret != DMA_COMPLETE) {
                vd = vchan_find_desc(&plchan->vc, cookie);
                if (vd) {
                        /* On the issued list, so hasn't been processed yet */
@@ -2055,6 +2055,11 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
        if (ret)
                return ret;
 
+       /* Ensure that we can do DMA */
+       ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto out_no_pl08x;
+
        /* Create the driver state holder */
        pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
        if (!pl08x) {
@@ -2133,8 +2138,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
        writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
        writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
 
-       ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
-                         DRIVER_NAME, pl08x);
+       ret = request_irq(adev->irq[0], pl08x_irq, 0, DRIVER_NAME, pl08x);
        if (ret) {
                dev_err(&adev->dev, "%s failed to request interrupt %d\n",
                        __func__, adev->irq[0]);
index c787f38a186a008a6cf8fa4af1dc9d19cab8f836..1ef74579447d5c0cc201cab4425748be75368eef 100644 (file)
@@ -1102,7 +1102,7 @@ atc_tx_status(struct dma_chan *chan,
        int bytes = 0;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
        /*
         * There's no point calculating the residue if there's
index 31011d2a26fcfa0510b64c1114c1f87c05762ec3..3c6716e0b78eee2592a8c977550ce036ab4318c5 100644 (file)
@@ -2369,7 +2369,7 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        enum dma_status ret;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
 
        dma_set_residue(txstate, coh901318_get_bytes_left(chan));
@@ -2694,7 +2694,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
        if (irq < 0)
                return irq;
 
-       err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED,
+       err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, 0,
                               "coh901318", base);
        if (err)
                return err;
index 7c82b92f9b16f18bf1ce4e6bedb988f5d5b70e22..278b3058919a9f9de2fbc113d221d0f835c249a2 100644 (file)
@@ -353,7 +353,7 @@ static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
 
        /* lock */
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (txstate && ret == DMA_SUCCESS)
+       if (txstate && ret == DMA_COMPLETE)
                txstate->residue = c->residue;
        /* unlock */
 
@@ -674,14 +674,14 @@ static void cleanup_chans(struct cppi41_dd *cdd)
        }
 }
 
-static int cppi41_add_chans(struct platform_device *pdev, struct cppi41_dd *cdd)
+static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
 {
        struct cppi41_channel *cchan;
        int i;
        int ret;
        u32 n_chans;
 
-       ret = of_property_read_u32(pdev->dev.of_node, "#dma-channels",
+       ret = of_property_read_u32(dev->of_node, "#dma-channels",
                        &n_chans);
        if (ret)
                return ret;
@@ -719,7 +719,7 @@ err:
        return -ENOMEM;
 }
 
-static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
+static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
 {
        unsigned int mem_decs;
        int i;
@@ -731,7 +731,7 @@ static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
                cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
                cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
 
-               dma_free_coherent(&pdev->dev, mem_decs, cdd->cd,
+               dma_free_coherent(dev, mem_decs, cdd->cd,
                                cdd->descs_phys);
        }
 }
@@ -741,19 +741,19 @@ static void disable_sched(struct cppi41_dd *cdd)
        cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
 }
 
-static void deinit_cpii41(struct platform_device *pdev, struct cppi41_dd *cdd)
+static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
 {
        disable_sched(cdd);
 
-       purge_descs(pdev, cdd);
+       purge_descs(dev, cdd);
 
        cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
        cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
-       dma_free_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
+       dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
                        cdd->scratch_phys);
 }
 
-static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
+static int init_descs(struct device *dev, struct cppi41_dd *cdd)
 {
        unsigned int desc_size;
        unsigned int mem_decs;
@@ -777,7 +777,7 @@ static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
                reg |= ilog2(ALLOC_DECS_NUM) - 5;
 
                BUILD_BUG_ON(DESCS_AREAS != 1);
-               cdd->cd = dma_alloc_coherent(&pdev->dev, mem_decs,
+               cdd->cd = dma_alloc_coherent(dev, mem_decs,
                                &cdd->descs_phys, GFP_KERNEL);
                if (!cdd->cd)
                        return -ENOMEM;
@@ -813,12 +813,12 @@ static void init_sched(struct cppi41_dd *cdd)
        cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
 }
 
-static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
+static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
 {
        int ret;
 
        BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
-       cdd->qmgr_scratch = dma_alloc_coherent(&pdev->dev, QMGR_SCRATCH_SIZE,
+       cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
                        &cdd->scratch_phys, GFP_KERNEL);
        if (!cdd->qmgr_scratch)
                return -ENOMEM;
@@ -827,7 +827,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
        cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
        cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
 
-       ret = init_descs(pdev, cdd);
+       ret = init_descs(dev, cdd);
        if (ret)
                goto err_td;
 
@@ -835,7 +835,7 @@ static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
        init_sched(cdd);
        return 0;
 err_td:
-       deinit_cpii41(pdev, cdd);
+       deinit_cppi41(dev, cdd);
        return ret;
 }
 
@@ -914,11 +914,11 @@ static const struct of_device_id cppi41_dma_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
 
-static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev)
+static const struct cppi_glue_infos *get_glue_info(struct device *dev)
 {
        const struct of_device_id *of_id;
 
-       of_id = of_match_node(cppi41_dma_ids, pdev->dev.of_node);
+       of_id = of_match_node(cppi41_dma_ids, dev->of_node);
        if (!of_id)
                return NULL;
        return of_id->data;
@@ -927,11 +927,12 @@ static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev)
 static int cppi41_dma_probe(struct platform_device *pdev)
 {
        struct cppi41_dd *cdd;
+       struct device *dev = &pdev->dev;
        const struct cppi_glue_infos *glue_info;
        int irq;
        int ret;
 
-       glue_info = get_glue_info(pdev);
+       glue_info = get_glue_info(dev);
        if (!glue_info)
                return -EINVAL;
 
@@ -946,14 +947,14 @@ static int cppi41_dma_probe(struct platform_device *pdev)
        cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
        cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
        cdd->ddev.device_control = cppi41_dma_control;
-       cdd->ddev.dev = &pdev->dev;
+       cdd->ddev.dev = dev;
        INIT_LIST_HEAD(&cdd->ddev.channels);
        cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
 
-       cdd->usbss_mem = of_iomap(pdev->dev.of_node, 0);
-       cdd->ctrl_mem = of_iomap(pdev->dev.of_node, 1);
-       cdd->sched_mem = of_iomap(pdev->dev.of_node, 2);
-       cdd->qmgr_mem = of_iomap(pdev->dev.of_node, 3);
+       cdd->usbss_mem = of_iomap(dev->of_node, 0);
+       cdd->ctrl_mem = of_iomap(dev->of_node, 1);
+       cdd->sched_mem = of_iomap(dev->of_node, 2);
+       cdd->qmgr_mem = of_iomap(dev->of_node, 3);
 
        if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
                        !cdd->qmgr_mem) {
@@ -961,8 +962,8 @@ static int cppi41_dma_probe(struct platform_device *pdev)
                goto err_remap;
        }
 
-       pm_runtime_enable(&pdev->dev);
-       ret = pm_runtime_get_sync(&pdev->dev);
+       pm_runtime_enable(dev);
+       ret = pm_runtime_get_sync(dev);
        if (ret)
                goto err_get_sync;
 
@@ -970,22 +971,22 @@ static int cppi41_dma_probe(struct platform_device *pdev)
        cdd->queues_tx = glue_info->queues_tx;
        cdd->td_queue = glue_info->td_queue;
 
-       ret = init_cppi41(pdev, cdd);
+       ret = init_cppi41(dev, cdd);
        if (ret)
                goto err_init_cppi;
 
-       ret = cppi41_add_chans(pdev, cdd);
+       ret = cppi41_add_chans(dev, cdd);
        if (ret)
                goto err_chans;
 
-       irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+       irq = irq_of_parse_and_map(dev->of_node, 0);
        if (!irq)
                goto err_irq;
 
        cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
 
        ret = request_irq(irq, glue_info->isr, IRQF_SHARED,
-                       dev_name(&pdev->dev), cdd);
+                       dev_name(dev), cdd);
        if (ret)
                goto err_irq;
        cdd->irq = irq;
@@ -994,7 +995,7 @@ static int cppi41_dma_probe(struct platform_device *pdev)
        if (ret)
                goto err_dma_reg;
 
-       ret = of_dma_controller_register(pdev->dev.of_node,
+       ret = of_dma_controller_register(dev->of_node,
                        cppi41_dma_xlate, &cpp41_dma_info);
        if (ret)
                goto err_of;
@@ -1009,11 +1010,11 @@ err_irq:
        cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
        cleanup_chans(cdd);
 err_chans:
-       deinit_cpii41(pdev, cdd);
+       deinit_cppi41(dev, cdd);
 err_init_cppi:
-       pm_runtime_put(&pdev->dev);
+       pm_runtime_put(dev);
 err_get_sync:
-       pm_runtime_disable(&pdev->dev);
+       pm_runtime_disable(dev);
        iounmap(cdd->usbss_mem);
        iounmap(cdd->ctrl_mem);
        iounmap(cdd->sched_mem);
@@ -1033,7 +1034,7 @@ static int cppi41_dma_remove(struct platform_device *pdev)
        cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
        free_irq(cdd->irq, cdd);
        cleanup_chans(cdd);
-       deinit_cpii41(pdev, cdd);
+       deinit_cppi41(&pdev->dev, cdd);
        iounmap(cdd->usbss_mem);
        iounmap(cdd->ctrl_mem);
        iounmap(cdd->sched_mem);
@@ -1044,12 +1045,41 @@ static int cppi41_dma_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int cppi41_suspend(struct device *dev)
+{
+       struct cppi41_dd *cdd = dev_get_drvdata(dev);
+
+       cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
+       disable_sched(cdd);
+
+       return 0;
+}
+
+static int cppi41_resume(struct device *dev)
+{
+       struct cppi41_dd *cdd = dev_get_drvdata(dev);
+       int i;
+
+       for (i = 0; i < DESCS_AREAS; i++)
+               cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
+
+       init_sched(cdd);
+       cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
+
+       return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cppi41_pm_ops, cppi41_suspend, cppi41_resume);
+
 static struct platform_driver cpp41_dma_driver = {
        .probe  = cppi41_dma_probe,
        .remove = cppi41_dma_remove,
        .driver = {
                .name = "cppi41-dma-engine",
                .owner = THIS_MODULE,
+               .pm = &cppi41_pm_ops,
                .of_match_table = of_match_ptr(cppi41_dma_ids),
        },
 };
index b0c0c8268d42bb023ac94ca8f420e22a5a197c37..94c380f0753860c4c4002c19f4217332fa2460f0 100644 (file)
@@ -491,7 +491,7 @@ static enum dma_status jz4740_dma_tx_status(struct dma_chan *c,
        unsigned long flags;
 
        status = dma_cookie_status(c, cookie, state);
-       if (status == DMA_SUCCESS || !state)
+       if (status == DMA_COMPLETE || !state)
                return status;
 
        spin_lock_irqsave(&chan->vchan.lock, flags);
index 9162ac80c18f303ac9a509eb97298eba33d4753b..81d876528c70d60152628b04bb41b08d860ec565 100644 (file)
@@ -1062,7 +1062,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
        unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
 
        if (!tx)
-               return DMA_SUCCESS;
+               return DMA_COMPLETE;
 
        while (tx->cookie == -EBUSY) {
                if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
index 92f796cdc6ab1dc12c6b895fb2d6e16e5dd92264..59e287f56dfca28bb0ff40fc79b6c0e612af58ac 100644 (file)
@@ -740,7 +740,7 @@ static int dmatest_func(void *data)
                                          len, 0);
                        failed_tests++;
                        continue;
-               } else if (status != DMA_SUCCESS) {
+               } else if (status != DMA_COMPLETE) {
                        enum dmatest_error_type type = (status == DMA_ERROR) ?
                                DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS;
                        thread_result_add(info, result, type,
index 89eb89f222846e0ff5d20cfc5e14619fc05d6600..2c29331571e4a142f2f2b965a15ebbc26657bb2a 100644 (file)
@@ -1098,13 +1098,13 @@ dwc_tx_status(struct dma_chan *chan,
        enum dma_status         ret;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
 
        dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret != DMA_SUCCESS)
+       if (ret != DMA_COMPLETE)
                dma_set_residue(txstate, dwc_get_residue(dwc));
 
        if (dwc->paused && ret == DMA_IN_PROGRESS)
index e35d97590311329fe1f7bd93be5cc4b845f3a7c2..453822cc4f9d3a9a9c7b62626c7362c13ee8b931 100644 (file)
@@ -191,11 +191,9 @@ static int dw_probe(struct platform_device *pdev)
        if (IS_ERR(chip->regs))
                return PTR_ERR(chip->regs);
 
-       /* Apply default dma_mask if needed */
-       if (!dev->dma_mask) {
-               dev->dma_mask = &dev->coherent_dma_mask;
-               dev->coherent_dma_mask = DMA_BIT_MASK(32);
-       }
+       err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (err)
+               return err;
 
        pdata = dev_get_platdata(dev);
        if (!pdata)
index 098a8da450f0cababa616ef83f1e65fa82578ea1..57c3f3e5321257a11e26b9e2277213b3c65f64a2 100644 (file)
 #define EDMA_CHANS     64
 #endif /* CONFIG_ARCH_DAVINCI_DA8XX */
 
-/* Max of 16 segments per channel to conserve PaRAM slots */
-#define MAX_NR_SG              16
+/*
+ * Max of 20 segments per channel to conserve PaRAM slots
+ * Also note that MAX_NR_SG should be atleast the no.of periods
+ * that are required for ASoC, otherwise DMA prep calls will
+ * fail. Today davinci-pcm is the only user of this driver and
+ * requires atleast 17 slots, so we setup the default to 20.
+ */
+#define MAX_NR_SG              20
 #define EDMA_MAX_SLOTS         MAX_NR_SG
 #define EDMA_DESCRIPTORS       16
 
@@ -250,6 +256,117 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
        return ret;
 }
 
+/*
+ * A PaRAM set configuration abstraction used by other modes
+ * @chan: Channel who's PaRAM set we're configuring
+ * @pset: PaRAM set to initialize and setup.
+ * @src_addr: Source address of the DMA
+ * @dst_addr: Destination address of the DMA
+ * @burst: In units of dev_width, how much to send
+ * @dev_width: How much is the dev_width
+ * @dma_length: Total length of the DMA transfer
+ * @direction: Direction of the transfer
+ */
+static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset,
+       dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
+       enum dma_slave_buswidth dev_width, unsigned int dma_length,
+       enum dma_transfer_direction direction)
+{
+       struct edma_chan *echan = to_edma_chan(chan);
+       struct device *dev = chan->device->dev;
+       int acnt, bcnt, ccnt, cidx;
+       int src_bidx, dst_bidx, src_cidx, dst_cidx;
+       int absync;
+
+       acnt = dev_width;
+       /*
+        * If the maxburst is equal to the fifo width, use
+        * A-synced transfers. This allows for large contiguous
+        * buffer transfers using only one PaRAM set.
+        */
+       if (burst == 1) {
+               /*
+                * For the A-sync case, bcnt and ccnt are the remainder
+                * and quotient respectively of the division of:
+                * (dma_length / acnt) by (SZ_64K -1). This is so
+                * that in case bcnt over flows, we have ccnt to use.
+                * Note: In A-sync tranfer only, bcntrld is used, but it
+                * only applies for sg_dma_len(sg) >= SZ_64K.
+                * In this case, the best way adopted is- bccnt for the
+                * first frame will be the remainder below. Then for
+                * every successive frame, bcnt will be SZ_64K-1. This
+                * is assured as bcntrld = 0xffff in end of function.
+                */
+               absync = false;
+               ccnt = dma_length / acnt / (SZ_64K - 1);
+               bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
+               /*
+                * If bcnt is non-zero, we have a remainder and hence an
+                * extra frame to transfer, so increment ccnt.
+                */
+               if (bcnt)
+                       ccnt++;
+               else
+                       bcnt = SZ_64K - 1;
+               cidx = acnt;
+       } else {
+               /*
+                * If maxburst is greater than the fifo address_width,
+                * use AB-synced transfers where A count is the fifo
+                * address_width and B count is the maxburst. In this
+                * case, we are limited to transfers of C count frames
+                * of (address_width * maxburst) where C count is limited
+                * to SZ_64K-1. This places an upper bound on the length
+                * of an SG segment that can be handled.
+                */
+               absync = true;
+               bcnt = burst;
+               ccnt = dma_length / (acnt * bcnt);
+               if (ccnt > (SZ_64K - 1)) {
+                       dev_err(dev, "Exceeded max SG segment size\n");
+                       return -EINVAL;
+               }
+               cidx = acnt * bcnt;
+       }
+
+       if (direction == DMA_MEM_TO_DEV) {
+               src_bidx = acnt;
+               src_cidx = cidx;
+               dst_bidx = 0;
+               dst_cidx = 0;
+       } else if (direction == DMA_DEV_TO_MEM)  {
+               src_bidx = 0;
+               src_cidx = 0;
+               dst_bidx = acnt;
+               dst_cidx = cidx;
+       } else {
+               dev_err(dev, "%s: direction not implemented yet\n", __func__);
+               return -EINVAL;
+       }
+
+       pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
+       /* Configure A or AB synchronized transfers */
+       if (absync)
+               pset->opt |= SYNCDIM;
+
+       pset->src = src_addr;
+       pset->dst = dst_addr;
+
+       pset->src_dst_bidx = (dst_bidx << 16) | src_bidx;
+       pset->src_dst_cidx = (dst_cidx << 16) | src_cidx;
+
+       pset->a_b_cnt = bcnt << 16 | acnt;
+       pset->ccnt = ccnt;
+       /*
+        * Only time when (bcntrld) auto reload is required is for
+        * A-sync case, and in this case, a requirement of reload value
+        * of SZ_64K-1 only is assured. 'link' is initially set to NULL
+        * and then later will be populated by edma_execute.
+        */
+       pset->link_bcntrld = 0xffffffff;
+       return absync;
+}
+
 static struct dma_async_tx_descriptor *edma_prep_slave_sg(
        struct dma_chan *chan, struct scatterlist *sgl,
        unsigned int sg_len, enum dma_transfer_direction direction,
@@ -258,23 +375,21 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
        struct edma_chan *echan = to_edma_chan(chan);
        struct device *dev = chan->device->dev;
        struct edma_desc *edesc;
-       dma_addr_t dev_addr;
+       dma_addr_t src_addr = 0, dst_addr = 0;
        enum dma_slave_buswidth dev_width;
        u32 burst;
        struct scatterlist *sg;
-       int acnt, bcnt, ccnt, src, dst, cidx;
-       int src_bidx, dst_bidx, src_cidx, dst_cidx;
-       int i, nslots;
+       int i, nslots, ret;
 
        if (unlikely(!echan || !sgl || !sg_len))
                return NULL;
 
        if (direction == DMA_DEV_TO_MEM) {
-               dev_addr = echan->cfg.src_addr;
+               src_addr = echan->cfg.src_addr;
                dev_width = echan->cfg.src_addr_width;
                burst = echan->cfg.src_maxburst;
        } else if (direction == DMA_MEM_TO_DEV) {
-               dev_addr = echan->cfg.dst_addr;
+               dst_addr = echan->cfg.dst_addr;
                dev_width = echan->cfg.dst_addr_width;
                burst = echan->cfg.dst_maxburst;
        } else {
@@ -306,6 +421,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
                                                EDMA_SLOT_ANY);
                        if (echan->slot[i] < 0) {
                                dev_err(dev, "Failed to allocate slot\n");
+                               kfree(edesc);
                                return NULL;
                        }
                }
@@ -313,63 +429,19 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
 
        /* Configure PaRAM sets for each SG */
        for_each_sg(sgl, sg, sg_len, i) {
+               /* Get address for each SG */
+               if (direction == DMA_DEV_TO_MEM)
+                       dst_addr = sg_dma_address(sg);
+               else
+                       src_addr = sg_dma_address(sg);
 
-               acnt = dev_width;
+               ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
+                                      dst_addr, burst, dev_width,
+                                      sg_dma_len(sg), direction);
+               if (ret < 0)
+                       return NULL;
 
-               /*
-                * If the maxburst is equal to the fifo width, use
-                * A-synced transfers. This allows for large contiguous
-                * buffer transfers using only one PaRAM set.
-                */
-               if (burst == 1) {
-                       edesc->absync = false;
-                       ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1);
-                       bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1);
-                       if (bcnt)
-                               ccnt++;
-                       else
-                               bcnt = SZ_64K - 1;
-                       cidx = acnt;
-               /*
-                * If maxburst is greater than the fifo address_width,
-                * use AB-synced transfers where A count is the fifo
-                * address_width and B count is the maxburst. In this
-                * case, we are limited to transfers of C count frames
-                * of (address_width * maxburst) where C count is limited
-                * to SZ_64K-1. This places an upper bound on the length
-                * of an SG segment that can be handled.
-                */
-               } else {
-                       edesc->absync = true;
-                       bcnt = burst;
-                       ccnt = sg_dma_len(sg) / (acnt * bcnt);
-                       if (ccnt > (SZ_64K - 1)) {
-                               dev_err(dev, "Exceeded max SG segment size\n");
-                               return NULL;
-                       }
-                       cidx = acnt * bcnt;
-               }
-
-               if (direction == DMA_MEM_TO_DEV) {
-                       src = sg_dma_address(sg);
-                       dst = dev_addr;
-                       src_bidx = acnt;
-                       src_cidx = cidx;
-                       dst_bidx = 0;
-                       dst_cidx = 0;
-               } else {
-                       src = dev_addr;
-                       dst = sg_dma_address(sg);
-                       src_bidx = 0;
-                       src_cidx = 0;
-                       dst_bidx = acnt;
-                       dst_cidx = cidx;
-               }
-
-               edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
-               /* Configure A or AB synchronized transfers */
-               if (edesc->absync)
-                       edesc->pset[i].opt |= SYNCDIM;
+               edesc->absync = ret;
 
                /* If this is the last in a current SG set of transactions,
                   enable interrupts so that next set is processed */
@@ -379,17 +451,6 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
                /* If this is the last set, enable completion interrupt flag */
                if (i == sg_len - 1)
                        edesc->pset[i].opt |= TCINTEN;
-
-               edesc->pset[i].src = src;
-               edesc->pset[i].dst = dst;
-
-               edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx;
-               edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx;
-
-               edesc->pset[i].a_b_cnt = bcnt << 16 | acnt;
-               edesc->pset[i].ccnt = ccnt;
-               edesc->pset[i].link_bcntrld = 0xffffffff;
-
        }
 
        return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
@@ -576,7 +637,7 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
        unsigned long flags;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_SUCCESS || !txstate)
+       if (ret == DMA_COMPLETE || !txstate)
                return ret;
 
        spin_lock_irqsave(&echan->vchan.lock, flags);
@@ -631,6 +692,10 @@ static int edma_probe(struct platform_device *pdev)
        struct edma_cc *ecc;
        int ret;
 
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
+
        ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL);
        if (!ecc) {
                dev_err(&pdev->dev, "Can't allocate controller\n");
@@ -702,11 +767,13 @@ static struct platform_device *pdev0, *pdev1;
 static const struct platform_device_info edma_dev_info0 = {
        .name = "edma-dma-engine",
        .id = 0,
+       .dma_mask = DMA_BIT_MASK(32),
 };
 
 static const struct platform_device_info edma_dev_info1 = {
        .name = "edma-dma-engine",
        .id = 1,
+       .dma_mask = DMA_BIT_MASK(32),
 };
 
 static int edma_init(void)
@@ -720,8 +787,6 @@ static int edma_init(void)
                        ret = PTR_ERR(pdev0);
                        goto out;
                }
-               pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask;
-               pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32);
        }
 
        if (EDMA_CTLRS == 2) {
@@ -731,8 +796,6 @@ static int edma_init(void)
                        platform_device_unregister(pdev0);
                        ret = PTR_ERR(pdev1);
                }
-               pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask;
-               pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32);
        }
 
 out:
index 55852c02679143f453286f189e68fd777ea1afaa..2af4028cc23e7abb1c5e39a41b54012b588c9490 100644 (file)
@@ -771,7 +771,7 @@ static int imxdma_alloc_chan_resources(struct dma_chan *chan)
                desc->desc.tx_submit = imxdma_tx_submit;
                /* txd.flags will be overwritten in prep funcs */
                desc->desc.flags = DMA_CTRL_ACK;
-               desc->status = DMA_SUCCESS;
+               desc->status = DMA_COMPLETE;
 
                list_add_tail(&desc->node, &imxdmac->ld_free);
                imxdmac->descs_allocated++;
index fc43603cf0bbeca883aa260d880c86898b49e793..e43c040dfe0bd6d8bba59bfe3e8e3aa608d3a95e 100644 (file)
@@ -638,7 +638,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
        if (error)
                sdmac->status = DMA_ERROR;
        else
-               sdmac->status = DMA_SUCCESS;
+               sdmac->status = DMA_COMPLETE;
 
        dma_cookie_complete(&sdmac->desc);
        if (sdmac->desc.callback)
@@ -1432,6 +1432,10 @@ static int __init sdma_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
+       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
+
        sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
        if (!sdma)
                return -ENOMEM;
index a975ebebea8aaf9b8950497eefdcaf3793d930d2..1aab8130efa1c75ae51906938447c91d12a66ddc 100644 (file)
@@ -309,7 +309,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
                callback_txd(param_txd);
        }
        if (midc->raw_tfr) {
-               desc->status = DMA_SUCCESS;
+               desc->status = DMA_COMPLETE;
                if (desc->lli != NULL) {
                        pci_pool_free(desc->lli_pool, desc->lli,
                                                desc->lli_phys);
@@ -481,7 +481,7 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
        enum dma_status ret;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret != DMA_SUCCESS) {
+       if (ret != DMA_COMPLETE) {
                spin_lock_bh(&midc->lock);
                midc_scan_descriptors(to_middma_device(chan->device), midc);
                spin_unlock_bh(&midc->lock);
index 5ff6fc1819dc6a2e90c035956b23e23c56f9bb5d..a0f0fce5a84e1aefc27dadeea28cbca528cb8f85 100644 (file)
@@ -733,7 +733,7 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
        enum dma_status ret;
 
        ret = dma_cookie_status(c, cookie, txstate);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
 
        device->cleanup_fn((unsigned long) c);
@@ -859,7 +859,7 @@ int ioat_dma_self_test(struct ioatdma_device *device)
 
        if (tmo == 0 ||
            dma->device_tx_status(dma_chan, cookie, NULL)
-                                       != DMA_SUCCESS) {
+                                       != DMA_COMPLETE) {
                dev_err(dev, "Self-test copy timed out, disabling\n");
                err = -ENODEV;
                goto unmap_dma;
index d8ececaf1b57082cc5709aca68714542657b5566..806b4ce5e38c79d84a5d759b108798dbd8b3b22a 100644 (file)
@@ -807,7 +807,7 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
        enum dma_status ret;
 
        ret = dma_cookie_status(c, cookie, txstate);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
 
        ioat3_cleanup(ioat);
@@ -1468,7 +1468,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
 
        tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 
-       if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+       if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
                dev_err(dev, "Self-test xor timed out\n");
                err = -ENODEV;
                goto dma_unmap;
@@ -1530,7 +1530,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
 
        tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 
-       if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+       if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
                dev_err(dev, "Self-test validate timed out\n");
                err = -ENODEV;
                goto dma_unmap;
@@ -1577,7 +1577,7 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
 
        tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
 
-       if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+       if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
                dev_err(dev, "Self-test 2nd validate timed out\n");
                err = -ENODEV;
                goto dma_unmap;
index dd8b44a56e5d0f7090b8dd65bce87a73a60c90ef..408fe6be15f4f394a1a3467e7822d92031af4ec3 100644 (file)
@@ -864,7 +864,7 @@ static enum dma_status iop_adma_status(struct dma_chan *chan,
        int ret;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
 
        iop_adma_slot_cleanup(iop_chan);
@@ -983,7 +983,7 @@ static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
        msleep(1);
 
        if (iop_adma_status(dma_chan, cookie, NULL) !=
-                       DMA_SUCCESS) {
+                       DMA_COMPLETE) {
                dev_err(dma_chan->device->dev,
                        "Self-test copy timed out, disabling\n");
                err = -ENODEV;
@@ -1083,7 +1083,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
        msleep(8);
 
        if (iop_adma_status(dma_chan, cookie, NULL) !=
-               DMA_SUCCESS) {
+               DMA_COMPLETE) {
                dev_err(dma_chan->device->dev,
                        "Self-test xor timed out, disabling\n");
                err = -ENODEV;
@@ -1129,7 +1129,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
        iop_adma_issue_pending(dma_chan);
        msleep(8);
 
-       if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+       if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
                dev_err(dma_chan->device->dev,
                        "Self-test zero sum timed out, disabling\n");
                err = -ENODEV;
@@ -1158,7 +1158,7 @@ iop_adma_xor_val_self_test(struct iop_adma_device *device)
        iop_adma_issue_pending(dma_chan);
        msleep(8);
 
-       if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+       if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
                dev_err(dma_chan->device->dev,
                        "Self-test non-zero sum timed out, disabling\n");
                err = -ENODEV;
@@ -1254,7 +1254,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
        msleep(8);
 
        if (iop_adma_status(dma_chan, cookie, NULL) !=
-               DMA_SUCCESS) {
+               DMA_COMPLETE) {
                dev_err(dev, "Self-test pq timed out, disabling\n");
                err = -ENODEV;
                goto free_resources;
@@ -1291,7 +1291,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
        msleep(8);
 
        if (iop_adma_status(dma_chan, cookie, NULL) !=
-               DMA_SUCCESS) {
+               DMA_COMPLETE) {
                dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
                err = -ENODEV;
                goto free_resources;
@@ -1323,7 +1323,7 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
        msleep(8);
 
        if (iop_adma_status(dma_chan, cookie, NULL) !=
-               DMA_SUCCESS) {
+               DMA_COMPLETE) {
                dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
                err = -ENODEV;
                goto free_resources;
index a2c330f5f9521302ed04998dfb38eecb60b2f894..e26075408e9b95a365dfd188cad786593604412f 100644 (file)
@@ -344,7 +344,7 @@ static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
        size_t bytes = 0;
 
        ret = dma_cookie_status(&c->vc.chan, cookie, state);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
 
        spin_lock_irqsave(&c->vc.lock, flags);
@@ -693,7 +693,7 @@ static int k3_dma_probe(struct platform_device *op)
 
        irq = platform_get_irq(op, 0);
        ret = devm_request_irq(&op->dev, irq,
-                       k3_dma_int_handler, IRQF_DISABLED, DRIVER_NAME, d);
+                       k3_dma_int_handler, 0, DRIVER_NAME, d);
        if (ret)
                return ret;
 
index ff8d7827f8cbe80e2c66d78db1f50ad6fdd91b5a..dcb1e05149a7664c6e65a214d783080d540aaafd 100644 (file)
@@ -798,8 +798,7 @@ static void dma_do_tasklet(unsigned long data)
                 * move the descriptors to a temporary list so we can drop
                 * the lock during the entire cleanup operation
                 */
-               list_del(&desc->node);
-               list_add(&desc->node, &chain_cleanup);
+               list_move(&desc->node, &chain_cleanup);
 
                /*
                 * Look for the first list entry which has the ENDIRQEN flag
@@ -863,7 +862,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
 
        if (irq) {
                ret = devm_request_irq(pdev->dev, irq,
-                       mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy);
+                       mmp_pdma_chan_handler, 0, "pdma", phy);
                if (ret) {
                        dev_err(pdev->dev, "channel request irq fail!\n");
                        return ret;
@@ -970,7 +969,7 @@ static int mmp_pdma_probe(struct platform_device *op)
                /* all chan share one irq, demux inside */
                irq = platform_get_irq(op, 0);
                ret = devm_request_irq(pdev->dev, irq,
-                       mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev);
+                       mmp_pdma_int_handler, 0, "pdma", pdev);
                if (ret)
                        return ret;
        }
index 38cb517fb2ebd82034b02e2ab7d5cabfc9992cbd..2b4026d1f31d52d524d202e7eb7c1752dcc0b04b 100644 (file)
 #define TDCR_BURSTSZ_16B       (0x3 << 6)
 #define TDCR_BURSTSZ_32B       (0x6 << 6)
 #define TDCR_BURSTSZ_64B       (0x7 << 6)
+#define TDCR_BURSTSZ_SQU_1B            (0x5 << 6)
+#define TDCR_BURSTSZ_SQU_2B            (0x6 << 6)
+#define TDCR_BURSTSZ_SQU_4B            (0x0 << 6)
+#define TDCR_BURSTSZ_SQU_8B            (0x1 << 6)
+#define TDCR_BURSTSZ_SQU_16B   (0x3 << 6)
 #define TDCR_BURSTSZ_SQU_32B   (0x7 << 6)
 #define TDCR_BURSTSZ_128B      (0x5 << 6)
 #define TDCR_DSTDIR_MSK                (0x3 << 4)      /* Dst Direction */
@@ -158,7 +163,7 @@ static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac)
        /* disable irq */
        writel(0, tdmac->reg_base + TDIMR);
 
-       tdmac->status = DMA_SUCCESS;
+       tdmac->status = DMA_COMPLETE;
 }
 
 static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac)
@@ -228,8 +233,31 @@ static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
                        return -EINVAL;
                }
        } else if (tdmac->type == PXA910_SQU) {
-               tdcr |= TDCR_BURSTSZ_SQU_32B;
                tdcr |= TDCR_SSPMOD;
+
+               switch (tdmac->burst_sz) {
+               case 1:
+                       tdcr |= TDCR_BURSTSZ_SQU_1B;
+                       break;
+               case 2:
+                       tdcr |= TDCR_BURSTSZ_SQU_2B;
+                       break;
+               case 4:
+                       tdcr |= TDCR_BURSTSZ_SQU_4B;
+                       break;
+               case 8:
+                       tdcr |= TDCR_BURSTSZ_SQU_8B;
+                       break;
+               case 16:
+                       tdcr |= TDCR_BURSTSZ_SQU_16B;
+                       break;
+               case 32:
+                       tdcr |= TDCR_BURSTSZ_SQU_32B;
+                       break;
+               default:
+                       dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
+                       return -EINVAL;
+               }
        }
 
        writel(tdcr, tdmac->reg_base + TDCR);
@@ -324,7 +352,7 @@ static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan)
 
        if (tdmac->irq) {
                ret = devm_request_irq(tdmac->dev, tdmac->irq,
-                       mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac);
+                       mmp_tdma_chan_handler, 0, "tdma", tdmac);
                if (ret)
                        return ret;
        }
@@ -370,7 +398,7 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
        int num_periods = buf_len / period_len;
        int i = 0, buf = 0;
 
-       if (tdmac->status != DMA_SUCCESS)
+       if (tdmac->status != DMA_COMPLETE)
                return NULL;
 
        if (period_len > TDMA_MAX_XFER_BYTES) {
@@ -504,7 +532,7 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
        tdmac->idx         = idx;
        tdmac->type        = type;
        tdmac->reg_base    = (unsigned long)tdev->base + idx * 4;
-       tdmac->status = DMA_SUCCESS;
+       tdmac->status = DMA_COMPLETE;
        tdev->tdmac[tdmac->idx] = tdmac;
        tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
 
@@ -559,7 +587,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
        if (irq_num != chan_num) {
                irq = platform_get_irq(pdev, 0);
                ret = devm_request_irq(&pdev->dev, irq,
-                       mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev);
+                       mmp_tdma_int_handler, 0, "tdma", tdev);
                if (ret)
                        return ret;
        }
index 536dcb8ba5fdfe69ed5f726fc6b5897f00266698..8d5bce9e867e4fc40225fe642019aa9cb47fb6c0 100644 (file)
@@ -749,7 +749,7 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
        enum dma_status ret;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_SUCCESS) {
+       if (ret == DMA_COMPLETE) {
                mv_xor_clean_completed_slots(mv_chan);
                return ret;
        }
@@ -874,7 +874,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
        msleep(1);
 
        if (mv_xor_status(dma_chan, cookie, NULL) !=
-           DMA_SUCCESS) {
+           DMA_COMPLETE) {
                dev_err(dma_chan->device->dev,
                        "Self-test copy timed out, disabling\n");
                err = -ENODEV;
@@ -968,7 +968,7 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
        msleep(8);
 
        if (mv_xor_status(dma_chan, cookie, NULL) !=
-           DMA_SUCCESS) {
+           DMA_COMPLETE) {
                dev_err(dma_chan->device->dev,
                        "Self-test xor timed out, disabling\n");
                err = -ENODEV;
index ccd13df841db790ff9eabc9cbc9df79f5f8bb9af..7ab7cecc48a4abb14a21f59834f577b1f95e913a 100644 (file)
@@ -224,7 +224,7 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
 
 static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
 {
-       mxs_chan->status = DMA_SUCCESS;
+       mxs_chan->status = DMA_COMPLETE;
 }
 
 static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
@@ -312,12 +312,12 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
                        if (mxs_chan->flags & MXS_DMA_SG_LOOP)
                                mxs_chan->status = DMA_IN_PROGRESS;
                        else
-                               mxs_chan->status = DMA_SUCCESS;
+                               mxs_chan->status = DMA_COMPLETE;
                }
 
                stat1 &= ~(1 << channel);
 
-               if (mxs_chan->status == DMA_SUCCESS)
+               if (mxs_chan->status == DMA_COMPLETE)
                        dma_cookie_complete(&mxs_chan->desc);
 
                /* schedule tasklet on this channel */
index ec3fc4fd9160e8aeddf16054cd405f35b43bfef7..2f66cf4e54fe367754378c3c8be213fe20bb8f64 100644 (file)
@@ -248,7 +248,7 @@ static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
        unsigned long flags;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_SUCCESS || !txstate)
+       if (ret == DMA_COMPLETE || !txstate)
                return ret;
 
        spin_lock_irqsave(&c->vc.lock, flags);
index a562d24d20bf55179436d16086ca90f63d1b1894..a4568297341b33e558dc568578079788091ac785 100644 (file)
@@ -2903,6 +2903,10 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
 
        pdat = dev_get_platdata(&adev->dev);
 
+       ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
+
        /* Allocate a new DMAC and its Channels */
        pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
        if (!pdmac) {
@@ -2922,16 +2926,23 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
 
        amba_set_drvdata(adev, pdmac);
 
-       irq = adev->irq[0];
-       ret = request_irq(irq, pl330_irq_handler, 0,
-                       dev_name(&adev->dev), pi);
-       if (ret)
-               return ret;
+       for (i = 0; i <= AMBA_NR_IRQS; i++) {
+               irq = adev->irq[i];
+               if (irq) {
+                       ret = devm_request_irq(&adev->dev, irq,
+                                              pl330_irq_handler, 0,
+                                              dev_name(&adev->dev), pi);
+                       if (ret)
+                               return ret;
+               } else {
+                       break;
+               }
+       }
 
        pi->pcfg.periph_id = adev->periphid;
        ret = pl330_add(pi);
        if (ret)
-               goto probe_err1;
+               return ret;
 
        INIT_LIST_HEAD(&pdmac->desc_pool);
        spin_lock_init(&pdmac->pool_lock);
@@ -3044,8 +3055,6 @@ probe_err3:
        }
 probe_err2:
        pl330_del(pi);
-probe_err1:
-       free_irq(irq, pi);
 
        return ret;
 }
@@ -3055,7 +3064,6 @@ static int pl330_remove(struct amba_device *adev)
        struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
        struct dma_pl330_chan *pch, *_p;
        struct pl330_info *pi;
-       int irq;
 
        if (!pdmac)
                return 0;
@@ -3082,9 +3090,6 @@ static int pl330_remove(struct amba_device *adev)
 
        pl330_del(pi);
 
-       irq = adev->irq[0];
-       free_irq(irq, pi);
-
        return 0;
 }
 
index 370ff8265630cf05cdb64a571e9acb7d9064e08e..60e02ae38b04a46f4327ea76c0590b427808196d 100644 (file)
@@ -3891,7 +3891,7 @@ static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan,
 
        ppc440spe_chan = to_ppc440spe_adma_chan(chan);
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
 
        ppc440spe_adma_slot_cleanup(ppc440spe_chan);
index 461a91ab70bb4feca82cd27c1582f81c2905bcde..ab26d46bbe1598434625979abeb488d5199992d9 100644 (file)
@@ -436,7 +436,7 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
        enum dma_status ret;
 
        ret = dma_cookie_status(&c->vc.chan, cookie, state);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
 
        if (!state)
index 45a520281ce10c7e1c8bf2ef6d18e6f253f72310..ebad84591a6e22bd1f28866c3f0b1946eba3dff0 100644 (file)
@@ -93,6 +93,7 @@ struct hpb_dmae_chan {
        void __iomem *base;
        const struct hpb_dmae_slave_config *cfg;
        char dev_id[16];                /* unique name per DMAC of channel */
+       dma_addr_t slave_addr;
 };
 
 struct hpb_dmae_device {
@@ -432,7 +433,6 @@ hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
                hpb_chan->xfer_mode = XFER_DOUBLE;
        } else {
                dev_err(hpb_chan->shdma_chan.dev, "DCR setting error");
-               shdma_free_irq(&hpb_chan->shdma_chan);
                return -EINVAL;
        }
 
@@ -446,7 +446,8 @@ hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
        return 0;
 }
 
-static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try)
+static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id,
+                             dma_addr_t slave_addr, bool try)
 {
        struct hpb_dmae_chan *chan = to_chan(schan);
        const struct hpb_dmae_slave_config *sc =
@@ -457,6 +458,7 @@ static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try)
        if (try)
                return 0;
        chan->cfg = sc;
+       chan->slave_addr = slave_addr ? : sc->addr;
        return hpb_dmae_alloc_chan_resources(chan, sc);
 }
 
@@ -468,7 +470,7 @@ static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan)
 {
        struct hpb_dmae_chan *chan = to_chan(schan);
 
-       return chan->cfg->addr;
+       return chan->slave_addr;
 }
 
 static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i)
@@ -614,7 +616,6 @@ static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
        shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) {
                BUG_ON(!schan);
 
-               shdma_free_irq(schan);
                shdma_chan_remove(schan);
        }
        dma_dev->chancnt = 0;
index d94ab592cc1bb21b92c851debe381609b599fa48..2e7b394def8058e4d1216ad8a07c785db4d87048 100644 (file)
@@ -724,7 +724,7 @@ static enum dma_status shdma_tx_status(struct dma_chan *chan,
         * If we don't find cookie on the queue, it has been aborted and we have
         * to report error
         */
-       if (status != DMA_SUCCESS) {
+       if (status != DMA_COMPLETE) {
                struct shdma_desc *sdesc;
                status = DMA_ERROR;
                list_for_each_entry(sdesc, &schan->ld_queue, node)
index 1069e8869f20762928ecbbe509b2ed294f82ae35..0d765c0e21ec9de8cb4e25ff66d5fd53ee78d95d 100644 (file)
@@ -685,7 +685,7 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
 static int sh_dmae_probe(struct platform_device *pdev)
 {
        const struct sh_dmae_pdata *pdata;
-       unsigned long irqflags = IRQF_DISABLED,
+       unsigned long irqflags = 0,
                chan_flag[SH_DMAE_MAX_CHANNELS] = {};
        int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
        int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
@@ -838,7 +838,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
                                    IORESOURCE_IRQ_SHAREABLE)
                                        chan_flag[irq_cnt] = IRQF_SHARED;
                                else
-                                       chan_flag[irq_cnt] = IRQF_DISABLED;
+                                       chan_flag[irq_cnt] = 0;
                                dev_dbg(&pdev->dev,
                                        "Found IRQ %d for channel %d\n",
                                        i, irq_cnt);
index 82d2b97ad942f96f2064c0ac58b11141fb85b54c..b8c031b7de4e045d22cfa0e7d495380bc94ed75f 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
+#include <linux/log2.h>
 #include <linux/pm.h>
 #include <linux/pm_runtime.h>
 #include <linux/err.h>
@@ -2626,7 +2627,7 @@ static enum dma_status d40_tx_status(struct dma_chan *chan,
        }
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret != DMA_SUCCESS)
+       if (ret != DMA_COMPLETE)
                dma_set_residue(txstate, stedma40_residue(chan));
 
        if (d40_is_paused(d40c))
@@ -2796,8 +2797,8 @@ static int d40_set_runtime_config(struct dma_chan *chan,
            src_addr_width >  DMA_SLAVE_BUSWIDTH_8_BYTES   ||
            dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
            dst_addr_width >  DMA_SLAVE_BUSWIDTH_8_BYTES   ||
-           ((src_addr_width > 1) && (src_addr_width & 1)) ||
-           ((dst_addr_width > 1) && (dst_addr_width & 1)))
+           !is_power_of_2(src_addr_width) ||
+           !is_power_of_2(dst_addr_width))
                return -EINVAL;
 
        cfg->src_info.data_width = src_addr_width;
index 5d4986e5f5fa6b21423084b688bd0a8afbba0c2e..73654e33f13b98c66ebce532646056ecdce79c61 100644 (file)
@@ -570,7 +570,7 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc,
 
        list_del(&sgreq->node);
        if (sgreq->last_sg) {
-               dma_desc->dma_status = DMA_SUCCESS;
+               dma_desc->dma_status = DMA_COMPLETE;
                dma_cookie_complete(&dma_desc->txd);
                if (!dma_desc->cb_count)
                        list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
@@ -768,7 +768,7 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
        unsigned int residual;
 
        ret = dma_cookie_status(dc, cookie, txstate);
-       if (ret == DMA_SUCCESS)
+       if (ret == DMA_COMPLETE)
                return ret;
 
        spin_lock_irqsave(&tdc->lock, flags);
@@ -1018,7 +1018,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
        return &dma_desc->txd;
 }
 
-struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
+static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
        struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
        size_t period_len, enum dma_transfer_direction direction,
        unsigned long flags, void *context)
index 71e8e775189e0df5568d474ea00157a2675f9260..c2829b481bf2c6b2d576e16aab730c06df375173 100644 (file)
@@ -962,8 +962,8 @@ txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        enum dma_status ret;
 
        ret = dma_cookie_status(chan, cookie, txstate);
-       if (ret == DMA_SUCCESS)
-               return DMA_SUCCESS;
+       if (ret == DMA_COMPLETE)
+               return DMA_COMPLETE;
 
        spin_lock_bh(&dc->lock);
        txx9dmac_scan_descriptors(dc);
index ff080ee201973bd3c76513d11d36273d70971e59..1b5e8e46226d5f3d6bebdb770d8b29de9f04fc43 100644 (file)
@@ -545,12 +545,15 @@ static int dcdbas_probe(struct platform_device *dev)
        host_control_action = HC_ACTION_NONE;
        host_control_smi_type = HC_SMITYPE_NONE;
 
+       dcdbas_pdev = dev;
+
        /*
         * BIOS SMI calls require buffer addresses be in 32-bit address space.
         * This is done by setting the DMA mask below.
         */
-       dcdbas_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-       dcdbas_pdev->dev.dma_mask = &dcdbas_pdev->dev.coherent_dma_mask;
+       error = dma_set_coherent_mask(&dcdbas_pdev->dev, DMA_BIT_MASK(32));
+       if (error)
+               return error;
 
        error = sysfs_create_group(&dev->dev.kobj, &dcdbas_attr_group);
        if (error)
@@ -581,6 +584,14 @@ static struct platform_driver dcdbas_driver = {
        .remove         = dcdbas_remove,
 };
 
+static const struct platform_device_info dcdbas_dev_info __initdata = {
+       .name           = DRIVER_NAME,
+       .id             = -1,
+       .dma_mask       = DMA_BIT_MASK(32),
+};
+
+static struct platform_device *dcdbas_pdev_reg;
+
 /**
  * dcdbas_init: initialize driver
  */
@@ -592,20 +603,14 @@ static int __init dcdbas_init(void)
        if (error)
                return error;
 
-       dcdbas_pdev = platform_device_alloc(DRIVER_NAME, -1);
-       if (!dcdbas_pdev) {
-               error = -ENOMEM;
+       dcdbas_pdev_reg = platform_device_register_full(&dcdbas_dev_info);
+       if (IS_ERR(dcdbas_pdev_reg)) {
+               error = PTR_ERR(dcdbas_pdev_reg);
                goto err_unregister_driver;
        }
 
-       error = platform_device_add(dcdbas_pdev);
-       if (error)
-               goto err_free_device;
-
        return 0;
 
- err_free_device:
-       platform_device_put(dcdbas_pdev);
  err_unregister_driver:
        platform_driver_unregister(&dcdbas_driver);
        return error;
@@ -628,8 +633,9 @@ static void __exit dcdbas_exit(void)
         * all sysfs attributes belonging to this module have been
         * released.
         */
-       smi_data_buf_free();
-       platform_device_unregister(dcdbas_pdev);
+       if (dcdbas_pdev)
+               smi_data_buf_free();
+       platform_device_unregister(dcdbas_pdev_reg);
        platform_driver_unregister(&dcdbas_driver);
 }
 
index 6eb535ffeddc2ea7787f648d1d35734f24c305d5..e5a67b24587ac0efe1848dc08a6b465ef744f553 100644 (file)
@@ -764,6 +764,13 @@ static __init int gsmi_system_valid(void)
 static struct kobject *gsmi_kobj;
 static struct efivars efivars;
 
+static const struct platform_device_info gsmi_dev_info = {
+       .name           = "gsmi",
+       .id             = -1,
+       /* SMI callbacks require 32bit addresses */
+       .dma_mask       = DMA_BIT_MASK(32),
+};
+
 static __init int gsmi_init(void)
 {
        unsigned long flags;
@@ -776,7 +783,7 @@ static __init int gsmi_init(void)
        gsmi_dev.smi_cmd = acpi_gbl_FADT.smi_command;
 
        /* register device */
-       gsmi_dev.pdev = platform_device_register_simple("gsmi", -1, NULL, 0);
+       gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
        if (IS_ERR(gsmi_dev.pdev)) {
                printk(KERN_ERR "gsmi: unable to register platform device\n");
                return PTR_ERR(gsmi_dev.pdev);
@@ -785,10 +792,6 @@ static __init int gsmi_init(void)
        /* SMI access needs to be serialized */
        spin_lock_init(&gsmi_dev.lock);
 
-       /* SMI callbacks require 32bit addresses */
-       gsmi_dev.pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-       gsmi_dev.pdev->dev.dma_mask =
-               &gsmi_dev.pdev->dev.coherent_dma_mask;
        ret = -ENOMEM;
        gsmi_dev.dma_pool = dma_pool_create("gsmi", &gsmi_dev.pdev->dev,
                                             GSMI_BUF_SIZE, GSMI_BUF_ALIGN, 0);
index 2d9ca6055e5e0bde239247fabe41c960d4dd3020..41b5913ddabe6e0a8b1f417004d86628234b77c5 100644 (file)
@@ -248,14 +248,15 @@ static void lp_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
        struct lp_gpio *lg = irq_data_get_irq_handler_data(data);
        struct irq_chip *chip = irq_data_get_irq_chip(data);
        u32 base, pin, mask;
-       unsigned long reg, pending;
+       unsigned long reg, ena, pending;
        unsigned virq;
 
        /* check from GPIO controller which pin triggered the interrupt */
        for (base = 0; base < lg->chip.ngpio; base += 32) {
                reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
+               ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
 
-               while ((pending = inl(reg))) {
+               while ((pending = (inl(reg) & inl(ena)))) {
                        pin = __ffs(pending);
                        mask = BIT(pin);
                        /* Clear before handling so we don't lose an edge */
index 8ea3b33d4b40bb4b96ad7edd83f6dac7e219aa03..a90be34e4d5c24569dc64a320e59d0a9474e0908 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/gpio.h>
 #include <linux/init.h>
 #include <linux/module.h>
-
+#include <linux/io.h>
 #include <mach/hardware.h>
 #include <mach/irqs.h>
 
index 5c1ef2b3ef188253e0031b0ceec2e65239220df5..f2beb728ed8f32cf8eeb1b6135fcf652796ab294 100644 (file)
@@ -73,15 +73,8 @@ static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
 static irqreturn_t acpi_gpio_irq_handler_evt(int irq, void *data)
 {
        struct acpi_gpio_evt_pin *evt_pin = data;
-       struct acpi_object_list args;
-       union acpi_object arg;
 
-       arg.type = ACPI_TYPE_INTEGER;
-       arg.integer.value = evt_pin->pin;
-       args.count = 1;
-       args.pointer = &arg;
-
-       acpi_evaluate_object(evt_pin->evt_handle, NULL, &args, NULL);
+       acpi_execute_simple_method(evt_pin->evt_handle, NULL, evt_pin->pin);
 
        return IRQ_HANDLED;
 }
index 86ef3461ec0647b42f1e1ff94a9f47225151f7f6..0dee0e0c247ae5fa2f121df234d09979dc00df74 100644 (file)
@@ -136,7 +136,7 @@ static struct gpio_desc *gpio_to_desc(unsigned gpio)
  */
 static int desc_to_gpio(const struct gpio_desc *desc)
 {
-       return desc->chip->base + gpio_chip_hwgpio(desc);
+       return desc - &gpio_desc[0];
 }
 
 
@@ -1398,7 +1398,7 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
        int                     status = -EPROBE_DEFER;
        unsigned long           flags;
 
-       if (!desc || !desc->chip) {
+       if (!desc) {
                pr_warn("%s: invalid GPIO\n", __func__);
                return -EINVAL;
        }
@@ -1406,6 +1406,8 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
        spin_lock_irqsave(&gpio_lock, flags);
 
        chip = desc->chip;
+       if (chip == NULL)
+               goto done;
 
        if (!try_module_get(chip->owner))
                goto done;
index 955555d6ec881faa6e821903b91f5d8c6b8c150a..e36892b72da5479270e4bc2b662d928c8723ec4f 100644 (file)
@@ -29,11 +29,17 @@ config DRM_USB
 config DRM_KMS_HELPER
        tristate
        depends on DRM
+       help
+         CRTC helpers for KMS drivers.
+
+config DRM_KMS_FB_HELPER
+       bool
+       depends on DRM_KMS_HELPER
        select FB
        select FRAMEBUFFER_CONSOLE if !EXPERT
        select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
        help
-         FB and CRTC helpers for KMS drivers.
+         FBDEV helpers for KMS drivers.
 
 config DRM_LOAD_EDID_FIRMWARE
        bool "Allow to specify an EDID data set instead of probing for it"
@@ -64,6 +70,7 @@ config DRM_GEM_CMA_HELPER
 config DRM_KMS_CMA_HELPER
        bool
        select DRM_GEM_CMA_HELPER
+       select DRM_KMS_FB_HELPER
        select FB_SYS_FILLRECT
        select FB_SYS_COPYAREA
        select FB_SYS_IMAGEBLIT
@@ -96,6 +103,7 @@ config DRM_RADEON
        select FB_CFB_IMAGEBLIT
        select FW_LOADER
         select DRM_KMS_HELPER
+       select DRM_KMS_FB_HELPER
         select DRM_TTM
        select POWER_SUPPLY
        select HWMON
@@ -120,64 +128,7 @@ config DRM_I810
          selected, the module will be called i810.  AGP support is required
          for this driver to work.
 
-config DRM_I915
-       tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
-       depends on DRM
-       depends on AGP
-       depends on AGP_INTEL
-       # we need shmfs for the swappable backing store, and in particular
-       # the shmem_readpage() which depends upon tmpfs
-       select SHMEM
-       select TMPFS
-       select DRM_KMS_HELPER
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
-       # i915 depends on ACPI_VIDEO when ACPI is enabled
-       # but for select to work, need to select ACPI_VIDEO's dependencies, ick
-       select BACKLIGHT_LCD_SUPPORT if ACPI
-       select BACKLIGHT_CLASS_DEVICE if ACPI
-       select VIDEO_OUTPUT_CONTROL if ACPI
-       select INPUT if ACPI
-       select THERMAL if ACPI
-       select ACPI_VIDEO if ACPI
-       select ACPI_BUTTON if ACPI
-       help
-         Choose this option if you have a system that has "Intel Graphics
-         Media Accelerator" or "HD Graphics" integrated graphics,
-         including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
-         G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
-         Core i5, Core i7 as well as Atom CPUs with integrated graphics.
-         If M is selected, the module will be called i915.  AGP support
-         is required for this driver to work. This driver is used by
-         the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
-         replaces the older i830 module that supported a subset of the
-         hardware in older X.org releases.
-
-         Note that the older i810/i815 chipsets require the use of the
-         i810 driver instead, and the Atom z5xx series has an entirely
-         different implementation.
-
-config DRM_I915_KMS
-       bool "Enable modesetting on intel by default"
-       depends on DRM_I915
-       help
-         Choose this option if you want kernel modesetting enabled by default,
-         and you have a new enough userspace to support this. Running old
-         userspaces with this enabled will cause pain.  Note that this causes
-         the driver to bind to PCI devices, which precludes loading things
-         like intelfb.
-
-config DRM_I915_PRELIMINARY_HW_SUPPORT
-       bool "Enable preliminary support for prerelease Intel hardware by default"
-       depends on DRM_I915
-       help
-         Choose this option if you have prerelease Intel hardware and want the
-         i915 driver to support it by default.  You can enable such support at
-         runtime with the module option i915.preliminary_hw_support=1; this
-         option changes the default for that module option.
-
-         If in doubt, say "N".
+source "drivers/gpu/drm/i915/Kconfig"
 
 config DRM_MGA
        tristate "Matrox g200/g400"
@@ -225,6 +176,8 @@ source "drivers/gpu/drm/mgag200/Kconfig"
 
 source "drivers/gpu/drm/cirrus/Kconfig"
 
+source "drivers/gpu/drm/armada/Kconfig"
+
 source "drivers/gpu/drm/rcar-du/Kconfig"
 
 source "drivers/gpu/drm/shmobile/Kconfig"
@@ -236,3 +189,7 @@ source "drivers/gpu/drm/tilcdc/Kconfig"
 source "drivers/gpu/drm/qxl/Kconfig"
 
 source "drivers/gpu/drm/msm/Kconfig"
+
+source "drivers/gpu/drm/tegra/Kconfig"
+
+source "drivers/gpu/drm/panel/Kconfig"
index f089adfe70eed3daa21f08b81fcb6d9911ea45c6..c802919356451d0c62ff237ad6a923ab31ecac46 100644 (file)
@@ -18,11 +18,13 @@ drm-y       :=      drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
 drm-$(CONFIG_PCI) += ati_pcigart.o
+drm-$(CONFIG_DRM_PANEL) += drm_panel.o
 
 drm-usb-y   := drm_usb.o
 
-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o
+drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o
 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
+drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
 drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
 
 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@@ -49,10 +51,13 @@ obj-$(CONFIG_DRM_EXYNOS) +=exynos/
 obj-$(CONFIG_DRM_GMA500) += gma500/
 obj-$(CONFIG_DRM_UDL) += udl/
 obj-$(CONFIG_DRM_AST) += ast/
+obj-$(CONFIG_DRM_ARMADA) += armada/
 obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
 obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
 obj-$(CONFIG_DRM_OMAP) += omapdrm/
 obj-$(CONFIG_DRM_TILCDC)       += tilcdc/
 obj-$(CONFIG_DRM_QXL) += qxl/
 obj-$(CONFIG_DRM_MSM) += msm/
+obj-$(CONFIG_DRM_TEGRA) += tegra/
 obj-y                  += i2c/
+obj-y                  += panel/
diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig
new file mode 100644 (file)
index 0000000..40d3715
--- /dev/null
@@ -0,0 +1,24 @@
+config DRM_ARMADA
+       tristate "DRM support for Marvell Armada SoCs"
+       depends on DRM && HAVE_CLK && ARM
+       select FB_CFB_FILLRECT
+       select FB_CFB_COPYAREA
+       select FB_CFB_IMAGEBLIT
+       select DRM_KMS_HELPER
+       help
+         Support the "LCD" controllers found on the Marvell Armada 510
+         devices.  There are two controllers on the device, each controller
+         supports graphics and video overlays.
+
+         This driver provides no built-in acceleration; acceleration is
+         performed by other IP found on the SoC.  This driver provides
+         kernel mode setting and buffer management to userspace.
+
+config DRM_ARMADA_TDA1998X
+       bool "Support TDA1998X HDMI output"
+       depends on DRM_ARMADA != n
+       depends on I2C && DRM_I2C_NXP_TDA998X = y
+       default y
+       help
+         Support the TDA1998x HDMI output device found on the Solid-Run
+         CuBox.
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
new file mode 100644 (file)
index 0000000..d6f43e0
--- /dev/null
@@ -0,0 +1,7 @@
+armada-y       := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
+                  armada_gem.o armada_output.o armada_overlay.o \
+                  armada_slave.o
+armada-y       += armada_510.o
+armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
+
+obj-$(CONFIG_DRM_ARMADA) := armada.o
diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c
new file mode 100644 (file)
index 0000000..59948ef
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Armada 510 (aka Dove) variant support
+ */
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_hw.h"
+
+static int armada510_init(struct armada_private *priv, struct device *dev)
+{
+       priv->extclk[0] = devm_clk_get(dev, "ext_ref_clk_1");
+
+       if (IS_ERR(priv->extclk[0]) && PTR_ERR(priv->extclk[0]) == -ENOENT)
+               priv->extclk[0] = ERR_PTR(-EPROBE_DEFER);
+
+       return PTR_RET(priv->extclk[0]);
+}
+
+static int armada510_crtc_init(struct armada_crtc *dcrtc)
+{
+       /* Lower the watermark so to eliminate jitter at higher bandwidths */
+       armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
+       return 0;
+}
+
+/*
+ * Armada510 specific SCLK register selection.
+ * This gets called with sclk = NULL to test whether the mode is
+ * supportable, and again with sclk != NULL to set the clocks up for
+ * that.  The former can return an error, but the latter is expected
+ * not to.
+ *
+ * We currently are pretty rudimentary here, always selecting
+ * EXT_REF_CLK_1 for LCD0 and erroring LCD1.  This needs improvement!
+ */
+static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
+       const struct drm_display_mode *mode, uint32_t *sclk)
+{
+       struct armada_private *priv = dcrtc->crtc.dev->dev_private;
+       struct clk *clk = priv->extclk[0];
+       int ret;
+
+       if (dcrtc->num == 1)
+               return -EINVAL;
+
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       if (dcrtc->clk != clk) {
+               ret = clk_prepare_enable(clk);
+               if (ret)
+                       return ret;
+               dcrtc->clk = clk;
+       }
+
+       if (sclk) {
+               uint32_t rate, ref, div;
+
+               rate = mode->clock * 1000;
+               ref = clk_round_rate(clk, rate);
+               div = DIV_ROUND_UP(ref, rate);
+               if (div < 1)
+                       div = 1;
+
+               clk_set_rate(clk, ref);
+               *sclk = div | SCLK_510_EXTCLK1;
+       }
+
+       return 0;
+}
+
+const struct armada_variant armada510_ops = {
+       .has_spu_adv_reg = true,
+       .spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
+       .init = armada510_init,
+       .crtc_init = armada510_crtc_init,
+       .crtc_compute_clock = armada510_crtc_compute_clock,
+};
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
new file mode 100644 (file)
index 0000000..d8e3982
--- /dev/null
@@ -0,0 +1,1098 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+
+struct armada_frame_work {
+       struct drm_pending_vblank_event *event;
+       struct armada_regs regs[4];
+       struct drm_framebuffer *old_fb;
+};
+
+enum csc_mode {
+       CSC_AUTO = 0,
+       CSC_YUV_CCIR601 = 1,
+       CSC_YUV_CCIR709 = 2,
+       CSC_RGB_COMPUTER = 1,
+       CSC_RGB_STUDIO = 2,
+};
+
+/*
+ * A note about interlacing.  Let's consider HDMI 1920x1080i.
+ * The timing parameters we have from X are:
+ *  Hact HsyA HsyI Htot  Vact VsyA VsyI Vtot
+ *  1920 2448 2492 2640  1080 1084 1094 1125
+ * Which get translated to:
+ *  Hact HsyA HsyI Htot  Vact VsyA VsyI Vtot
+ *  1920 2448 2492 2640   540  542  547  562
+ *
+ * This is how it is defined by CEA-861-D - line and pixel numbers are
+ * referenced to the rising edge of VSYNC and HSYNC.  Total clocks per
+ * line: 2640.  The odd frame, the first active line is at line 21, and
+ * the even frame, the first active line is 584.
+ *
+ * LN:    560     561     562     563             567     568    569
+ * DE:    ~~~|____________________________//__________________________
+ * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
+ * VSYNC: _________________________|~~~~~~//~~~~~~~~~~~~~~~|__________
+ *  22 blanking lines.  VSYNC at 1320 (referenced to the HSYNC rising edge).
+ *
+ * LN:    1123   1124    1125      1               5       6      7
+ * DE:    ~~~|____________________________//__________________________
+ * HSYNC: ____|~|_____|~|_____|~|_____|~|_//__|~|_____|~|_____|~|_____
+ * VSYNC: ____________________|~~~~~~~~~~~//~~~~~~~~~~|_______________
+ *  23 blanking lines
+ *
+ * The Armada LCD Controller line and pixel numbers are, like X timings,
+ * referenced to the top left of the active frame.
+ *
+ * So, translating these to our LCD controller:
+ *  Odd frame, 563 total lines, VSYNC at line 543-548, pixel 1128.
+ *  Even frame, 562 total lines, VSYNC at line 542-547, pixel 2448.
+ * Note: Vsync front porch remains constant!
+ *
+ * if (odd_frame) {
+ *   vtotal = mode->crtc_vtotal + 1;
+ *   vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay + 1;
+ *   vhorizpos = mode->crtc_hsync_start - mode->crtc_htotal / 2
+ * } else {
+ *   vtotal = mode->crtc_vtotal;
+ *   vbackporch = mode->crtc_vsync_start - mode->crtc_vdisplay;
+ *   vhorizpos = mode->crtc_hsync_start;
+ * }
+ * vfrontporch = mode->crtc_vtotal - mode->crtc_vsync_end;
+ *
+ * So, we need to reprogram these registers on each vsync event:
+ *  LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
+ *
+ * Note: we do not use the frame done interrupts because these appear
+ * to happen too early, and lead to jitter on the display (presumably
+ * they occur at the end of the last active line, before the vsync back
+ * porch, which we're reprogramming.)
+ */
+
+void
+armada_drm_crtc_update_regs(struct armada_crtc *dcrtc, struct armada_regs *regs)
+{
+       while (regs->offset != ~0) {
+               void __iomem *reg = dcrtc->base + regs->offset;
+               uint32_t val;
+
+               val = regs->mask;
+               if (val != 0)
+                       val &= readl_relaxed(reg);
+               writel_relaxed(val | regs->val, reg);
+               ++regs;
+       }
+}
+
+#define dpms_blanked(dpms)     ((dpms) != DRM_MODE_DPMS_ON)
+
+static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
+{
+       uint32_t dumb_ctrl;
+
+       dumb_ctrl = dcrtc->cfg_dumb_ctrl;
+
+       if (!dpms_blanked(dcrtc->dpms))
+               dumb_ctrl |= CFG_DUMB_ENA;
+
+       /*
+        * When the dumb interface isn't in DUMB24_RGB888_0 mode, it might
+        * be using SPI or GPIO.  If we set this to DUMB_BLANK, we will
+        * force LCD_D[23:0] to output blank color, overriding the GPIO or
+        * SPI usage.  So leave it as-is unless in DUMB24_RGB888_0 mode.
+        */
+       if (dpms_blanked(dcrtc->dpms) &&
+           (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
+               dumb_ctrl &= ~DUMB_MASK;
+               dumb_ctrl |= DUMB_BLANK;
+       }
+
+       /*
+        * The documentation doesn't indicate what the normal state of
+        * the sync signals are.  Sebastian Hesselbart kindly probed
+        * these signals on his board to determine their state.
+        *
+        * The non-inverted state of the sync signals is active high.
+        * Setting these bits makes the appropriate signal active low.
+        */
+       if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NCSYNC)
+               dumb_ctrl |= CFG_INV_CSYNC;
+       if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NHSYNC)
+               dumb_ctrl |= CFG_INV_HSYNC;
+       if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NVSYNC)
+               dumb_ctrl |= CFG_INV_VSYNC;
+
+       if (dcrtc->dumb_ctrl != dumb_ctrl) {
+               dcrtc->dumb_ctrl = dumb_ctrl;
+               writel_relaxed(dumb_ctrl, dcrtc->base + LCD_SPU_DUMB_CTRL);
+       }
+}
+
+static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
+       int x, int y, struct armada_regs *regs, bool interlaced)
+{
+       struct armada_gem_object *obj = drm_fb_obj(fb);
+       unsigned pitch = fb->pitches[0];
+       unsigned offset = y * pitch + x * fb->bits_per_pixel / 8;
+       uint32_t addr_odd, addr_even;
+       unsigned i = 0;
+
+       DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n",
+               pitch, x, y, fb->bits_per_pixel);
+
+       addr_odd = addr_even = obj->dev_addr + offset;
+
+       if (interlaced) {
+               addr_even += pitch;
+               pitch *= 2;
+       }
+
+       /* write offset, base, and pitch */
+       armada_reg_queue_set(regs, i, addr_odd, LCD_CFG_GRA_START_ADDR0);
+       armada_reg_queue_set(regs, i, addr_even, LCD_CFG_GRA_START_ADDR1);
+       armada_reg_queue_mod(regs, i, pitch, 0xffff, LCD_CFG_GRA_PITCH);
+
+       return i;
+}
+
+static int armada_drm_crtc_queue_frame_work(struct armada_crtc *dcrtc,
+       struct armada_frame_work *work)
+{
+       struct drm_device *dev = dcrtc->crtc.dev;
+       unsigned long flags;
+       int ret;
+
+       ret = drm_vblank_get(dev, dcrtc->num);
+       if (ret) {
+               DRM_ERROR("failed to acquire vblank counter\n");
+               return ret;
+       }
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+       if (!dcrtc->frame_work)
+               dcrtc->frame_work = work;
+       else
+               ret = -EBUSY;
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+
+       if (ret)
+               drm_vblank_put(dev, dcrtc->num);
+
+       return ret;
+}
+
+static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc)
+{
+       struct drm_device *dev = dcrtc->crtc.dev;
+       struct armada_frame_work *work = dcrtc->frame_work;
+
+       dcrtc->frame_work = NULL;
+
+       armada_drm_crtc_update_regs(dcrtc, work->regs);
+
+       if (work->event)
+               drm_send_vblank_event(dev, dcrtc->num, work->event);
+
+       drm_vblank_put(dev, dcrtc->num);
+
+       /* Finally, queue the process-half of the cleanup. */
+       __armada_drm_queue_unref_work(dcrtc->crtc.dev, work->old_fb);
+       kfree(work);
+}
+
+static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
+       struct drm_framebuffer *fb, bool force)
+{
+       struct armada_frame_work *work;
+
+       if (!fb)
+               return;
+
+       if (force) {
+               /* Display is disabled, so just drop the old fb */
+               drm_framebuffer_unreference(fb);
+               return;
+       }
+
+       work = kmalloc(sizeof(*work), GFP_KERNEL);
+       if (work) {
+               int i = 0;
+               work->event = NULL;
+               work->old_fb = fb;
+               armada_reg_queue_end(work->regs, i);
+
+               if (armada_drm_crtc_queue_frame_work(dcrtc, work) == 0)
+                       return;
+
+               kfree(work);
+       }
+
+       /*
+        * Oops - just drop the reference immediately and hope for
+        * the best.  The worst that will happen is the buffer gets
+        * reused before it has finished being displayed.
+        */
+       drm_framebuffer_unreference(fb);
+}
+
+static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
+{
+       struct drm_device *dev = dcrtc->crtc.dev;
+
+       /*
+        * Tell the DRM core that vblank IRQs aren't going to happen for
+        * a while.  This cleans up any pending vblank events for us.
+        */
+       drm_vblank_off(dev, dcrtc->num);
+
+       /* Handle any pending flip event. */
+       spin_lock_irq(&dev->event_lock);
+       if (dcrtc->frame_work)
+               armada_drm_crtc_complete_frame_work(dcrtc);
+       spin_unlock_irq(&dev->event_lock);
+}
+
+void armada_drm_crtc_gamma_set(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
+       int idx)
+{
+}
+
+void armada_drm_crtc_gamma_get(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
+       int idx)
+{
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
+{
+       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+       if (dcrtc->dpms != dpms) {
+               dcrtc->dpms = dpms;
+               armada_drm_crtc_update(dcrtc);
+               if (dpms_blanked(dpms))
+                       armada_drm_vblank_off(dcrtc);
+       }
+}
+
+/*
+ * Prepare for a mode set.  Turn off overlay to ensure that we don't end
+ * up with the overlay size being bigger than the active screen size.
+ * We rely upon X refreshing this state after the mode set has completed.
+ *
+ * The mode_config.mutex will be held for this call
+ */
+static void armada_drm_crtc_prepare(struct drm_crtc *crtc)
+{
+       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+       struct drm_plane *plane;
+
+       /*
+        * If we have an overlay plane associated with this CRTC, disable
+        * it before the modeset to avoid its coordinates being outside
+        * the new mode parameters.  DRM doesn't provide help with this.
+        */
+       plane = dcrtc->plane;
+       if (plane) {
+               struct drm_framebuffer *fb = plane->fb;
+
+               plane->funcs->disable_plane(plane);
+               plane->fb = NULL;
+               plane->crtc = NULL;
+               drm_framebuffer_unreference(fb);
+       }
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_commit(struct drm_crtc *crtc)
+{
+       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+       if (dcrtc->dpms != DRM_MODE_DPMS_ON) {
+               dcrtc->dpms = DRM_MODE_DPMS_ON;
+               armada_drm_crtc_update(dcrtc);
+       }
+}
+
+/* The mode_config.mutex will be held for this call */
+static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+       const struct drm_display_mode *mode, struct drm_display_mode *adj)
+{
+       struct armada_private *priv = crtc->dev->dev_private;
+       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+       int ret;
+
+       /* We can't do interlaced modes if we don't have the SPU_ADV_REG */
+       if (!priv->variant->has_spu_adv_reg &&
+           adj->flags & DRM_MODE_FLAG_INTERLACE)
+               return false;
+
+       /* Check whether the display mode is possible */
+       ret = priv->variant->crtc_compute_clock(dcrtc, adj, NULL);
+       if (ret)
+               return false;
+
+       return true;
+}
+
+void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
+{
+       struct armada_vbl_event *e, *n;
+       void __iomem *base = dcrtc->base;
+
+       if (stat & DMA_FF_UNDERFLOW)
+               DRM_ERROR("video underflow on crtc %u\n", dcrtc->num);
+       if (stat & GRA_FF_UNDERFLOW)
+               DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num);
+
+       if (stat & VSYNC_IRQ)
+               drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num);
+
+       spin_lock(&dcrtc->irq_lock);
+
+       list_for_each_entry_safe(e, n, &dcrtc->vbl_list, node) {
+               list_del_init(&e->node);
+               drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+               e->fn(dcrtc, e->data);
+       }
+
+       if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
+               int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
+               uint32_t val;
+
+               writel_relaxed(dcrtc->v[i].spu_v_porch, base + LCD_SPU_V_PORCH);
+               writel_relaxed(dcrtc->v[i].spu_v_h_total,
+                              base + LCD_SPUT_V_H_TOTAL);
+
+               val = readl_relaxed(base + LCD_SPU_ADV_REG);
+               val &= ~(ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF | ADV_VSYNCOFFEN);
+               val |= dcrtc->v[i].spu_adv_reg;
+               writel_relaxed(val, base + LCD_SPU_ADV_REG);
+       }
+
+       if (stat & DUMB_FRAMEDONE && dcrtc->cursor_update) {
+               writel_relaxed(dcrtc->cursor_hw_pos,
+                              base + LCD_SPU_HWC_OVSA_HPXL_VLN);
+               writel_relaxed(dcrtc->cursor_hw_sz,
+                              base + LCD_SPU_HWC_HPXL_VLN);
+               armada_updatel(CFG_HWC_ENA,
+                              CFG_HWC_ENA | CFG_HWC_1BITMOD | CFG_HWC_1BITENA,
+                              base + LCD_SPU_DMA_CTRL0);
+               dcrtc->cursor_update = false;
+               armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+       }
+
+       spin_unlock(&dcrtc->irq_lock);
+
+       if (stat & GRA_FRAME_IRQ) {
+               struct drm_device *dev = dcrtc->crtc.dev;
+
+               spin_lock(&dev->event_lock);
+               if (dcrtc->frame_work)
+                       armada_drm_crtc_complete_frame_work(dcrtc);
+               spin_unlock(&dev->event_lock);
+
+               wake_up(&dcrtc->frame_wait);
+       }
+}
+
+/* These are locked by dev->vbl_lock */
+void armada_drm_crtc_disable_irq(struct armada_crtc *dcrtc, u32 mask)
+{
+       if (dcrtc->irq_ena & mask) {
+               dcrtc->irq_ena &= ~mask;
+               writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+       }
+}
+
+void armada_drm_crtc_enable_irq(struct armada_crtc *dcrtc, u32 mask)
+{
+       if ((dcrtc->irq_ena & mask) != mask) {
+               dcrtc->irq_ena |= mask;
+               writel(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+               if (readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR) & mask)
+                       writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+       }
+}
+
+static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc)
+{
+       struct drm_display_mode *adj = &dcrtc->crtc.mode;
+       uint32_t val = 0;
+
+       if (dcrtc->csc_yuv_mode == CSC_YUV_CCIR709)
+               val |= CFG_CSC_YUV_CCIR709;
+       if (dcrtc->csc_rgb_mode == CSC_RGB_STUDIO)
+               val |= CFG_CSC_RGB_STUDIO;
+
+       /*
+        * In auto mode, set the colorimetry, based upon the HDMI spec.
+        * 1280x720p, 1920x1080p and 1920x1080i use ITU709, others use
+        * ITU601.  It may be more appropriate to set this depending on
+        * the source - but what if the graphic frame is YUV and the
+        * video frame is RGB?
+        */
+       if ((adj->hdisplay == 1280 && adj->vdisplay == 720 &&
+            !(adj->flags & DRM_MODE_FLAG_INTERLACE)) ||
+           (adj->hdisplay == 1920 && adj->vdisplay == 1080)) {
+               if (dcrtc->csc_yuv_mode == CSC_AUTO)
+                       val |= CFG_CSC_YUV_CCIR709;
+       }
+
+       /*
+        * We assume we're connected to a TV-like device, so the YUV->RGB
+        * conversion should produce a limited range.  We should set this
+        * depending on the connectors attached to this CRTC, and what
+        * kind of device they report being connected.
+        */
+       if (dcrtc->csc_rgb_mode == CSC_AUTO)
+               val |= CFG_CSC_RGB_STUDIO;
+
+       return val;
+}
+
+/* The mode_config.mutex will be held for this call */
+static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
+       struct drm_display_mode *mode, struct drm_display_mode *adj,
+       int x, int y, struct drm_framebuffer *old_fb)
+{
+       struct armada_private *priv = crtc->dev->dev_private;
+       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+       struct armada_regs regs[17];
+       uint32_t lm, rm, tm, bm, val, sclk;
+       unsigned long flags;
+       unsigned i;
+       bool interlaced;
+
+       drm_framebuffer_reference(crtc->fb);
+
+       interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
+
+       i = armada_drm_crtc_calc_fb(dcrtc->crtc.fb, x, y, regs, interlaced);
+
+       rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
+       lm = adj->crtc_htotal - adj->crtc_hsync_end;
+       bm = adj->crtc_vsync_start - adj->crtc_vdisplay;
+       tm = adj->crtc_vtotal - adj->crtc_vsync_end;
+
+       DRM_DEBUG_DRIVER("H: %d %d %d %d lm %d rm %d\n",
+               adj->crtc_hdisplay,
+               adj->crtc_hsync_start,
+               adj->crtc_hsync_end,
+               adj->crtc_htotal, lm, rm);
+       DRM_DEBUG_DRIVER("V: %d %d %d %d tm %d bm %d\n",
+               adj->crtc_vdisplay,
+               adj->crtc_vsync_start,
+               adj->crtc_vsync_end,
+               adj->crtc_vtotal, tm, bm);
+
+       /* Wait for pending flips to complete */
+       wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
+
+       drm_vblank_pre_modeset(crtc->dev, dcrtc->num);
+
+       crtc->mode = *adj;
+
+       val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
+       if (val != dcrtc->dumb_ctrl) {
+               dcrtc->dumb_ctrl = val;
+               writel_relaxed(val, dcrtc->base + LCD_SPU_DUMB_CTRL);
+       }
+
+       /* Now compute the divider for real */
+       priv->variant->crtc_compute_clock(dcrtc, adj, &sclk);
+
+       /* Ensure graphic fifo is enabled */
+       armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
+       armada_reg_queue_set(regs, i, sclk, LCD_CFG_SCLK_DIV);
+
+       if (interlaced ^ dcrtc->interlaced) {
+               if (adj->flags & DRM_MODE_FLAG_INTERLACE)
+                       drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+               else
+                       drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+               dcrtc->interlaced = interlaced;
+       }
+
+       spin_lock_irqsave(&dcrtc->irq_lock, flags);
+
+       /* Even interlaced/progressive frame */
+       dcrtc->v[1].spu_v_h_total = adj->crtc_vtotal << 16 |
+                                   adj->crtc_htotal;
+       dcrtc->v[1].spu_v_porch = tm << 16 | bm;
+       val = adj->crtc_hsync_start;
+       dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
+               priv->variant->spu_adv_reg;
+
+       if (interlaced) {
+               /* Odd interlaced frame */
+               dcrtc->v[0].spu_v_h_total = dcrtc->v[1].spu_v_h_total +
+                                               (1 << 16);
+               dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
+               val = adj->crtc_hsync_start - adj->crtc_htotal / 2;
+               dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
+                       priv->variant->spu_adv_reg;
+       } else {
+               dcrtc->v[0] = dcrtc->v[1];
+       }
+
+       val = adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
+
+       armada_reg_queue_set(regs, i, val, LCD_SPU_V_H_ACTIVE);
+       armada_reg_queue_set(regs, i, val, LCD_SPU_GRA_HPXL_VLN);
+       armada_reg_queue_set(regs, i, val, LCD_SPU_GZM_HPXL_VLN);
+       armada_reg_queue_set(regs, i, (lm << 16) | rm, LCD_SPU_H_PORCH);
+       armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_porch, LCD_SPU_V_PORCH);
+       armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
+                          LCD_SPUT_V_H_TOTAL);
+
+       if (priv->variant->has_spu_adv_reg) {
+               armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
+                                    ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
+                                    ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
+       }
+
+       val = CFG_GRA_ENA | CFG_GRA_HSMOOTH;
+       val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.fb)->fmt);
+       val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.fb)->mod);
+
+       if (drm_fb_to_armada_fb(dcrtc->crtc.fb)->fmt > CFG_420)
+               val |= CFG_PALETTE_ENA;
+
+       if (interlaced)
+               val |= CFG_GRA_FTOGGLE;
+
+       armada_reg_queue_mod(regs, i, val, CFG_GRAFORMAT |
+                            CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+                                        CFG_SWAPYU | CFG_YUV2RGB) |
+                            CFG_PALETTE_ENA | CFG_GRA_FTOGGLE,
+                            LCD_SPU_DMA_CTRL0);
+
+       val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0;
+       armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1);
+
+       val = dcrtc->spu_iopad_ctrl | armada_drm_crtc_calculate_csc(dcrtc);
+       armada_reg_queue_set(regs, i, val, LCD_SPU_IOPAD_CONTROL);
+       armada_reg_queue_end(regs, i);
+
+       armada_drm_crtc_update_regs(dcrtc, regs);
+       spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+
+       armada_drm_crtc_update(dcrtc);
+
+       drm_vblank_post_modeset(crtc->dev, dcrtc->num);
+       armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+
+       return 0;
+}
+
+/* The mode_config.mutex will be held for this call */
+static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+       struct drm_framebuffer *old_fb)
+{
+       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+       struct armada_regs regs[4];
+       unsigned i;
+
+       i = armada_drm_crtc_calc_fb(crtc->fb, crtc->x, crtc->y, regs,
+                                   dcrtc->interlaced);
+       armada_reg_queue_end(regs, i);
+
+       /* Wait for pending flips to complete */
+       wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
+
+       /* Take a reference to the new fb as we're using it */
+       drm_framebuffer_reference(crtc->fb);
+
+       /* Update the base in the CRTC */
+       armada_drm_crtc_update_regs(dcrtc, regs);
+
+       /* Drop our previously held reference */
+       armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+
+       return 0;
+}
+
+static void armada_drm_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+/* The mode_config.mutex will be held for this call */
+static void armada_drm_crtc_disable(struct drm_crtc *crtc)
+{
+       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+       armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+       armada_drm_crtc_finish_fb(dcrtc, crtc->fb, true);
+
+       /* Power down most RAMs and FIFOs */
+       writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+                      CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
+                      CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
+}
+
+static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
+       .dpms           = armada_drm_crtc_dpms,
+       .prepare        = armada_drm_crtc_prepare,
+       .commit         = armada_drm_crtc_commit,
+       .mode_fixup     = armada_drm_crtc_mode_fixup,
+       .mode_set       = armada_drm_crtc_mode_set,
+       .mode_set_base  = armada_drm_crtc_mode_set_base,
+       .load_lut       = armada_drm_crtc_load_lut,
+       .disable        = armada_drm_crtc_disable,
+};
+
+static void armada_load_cursor_argb(void __iomem *base, uint32_t *pix,
+       unsigned stride, unsigned width, unsigned height)
+{
+       uint32_t addr;
+       unsigned y;
+
+       addr = SRAM_HWC32_RAM1;
+       for (y = 0; y < height; y++) {
+               uint32_t *p = &pix[y * stride];
+               unsigned x;
+
+               for (x = 0; x < width; x++, p++) {
+                       uint32_t val = *p;
+
+                       val = (val & 0xff00ff00) |
+                             (val & 0x000000ff) << 16 |
+                             (val & 0x00ff0000) >> 16;
+
+                       writel_relaxed(val,
+                                      base + LCD_SPU_SRAM_WRDAT);
+                       writel_relaxed(addr | SRAM_WRITE,
+                                      base + LCD_SPU_SRAM_CTRL);
+                       addr += 1;
+                       if ((addr & 0x00ff) == 0)
+                               addr += 0xf00;
+                       if ((addr & 0x30ff) == 0)
+                               addr = SRAM_HWC32_RAM2;
+               }
+       }
+}
+
+static void armada_drm_crtc_cursor_tran(void __iomem *base)
+{
+       unsigned addr;
+
+       for (addr = 0; addr < 256; addr++) {
+               /* write the default value */
+               writel_relaxed(0x55555555, base + LCD_SPU_SRAM_WRDAT);
+               writel_relaxed(addr | SRAM_WRITE | SRAM_HWC32_TRAN,
+                              base + LCD_SPU_SRAM_CTRL);
+       }
+}
+
+static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
+{
+       uint32_t xoff, xscr, w = dcrtc->cursor_w, s;
+       uint32_t yoff, yscr, h = dcrtc->cursor_h;
+       uint32_t para1;
+
+       /*
+        * Calculate the visible width and height of the cursor,
+        * screen position, and the position in the cursor bitmap.
+        */
+       if (dcrtc->cursor_x < 0) {
+               xoff = -dcrtc->cursor_x;
+               xscr = 0;
+               w -= min(xoff, w);
+       } else if (dcrtc->cursor_x + w > dcrtc->crtc.mode.hdisplay) {
+               xoff = 0;
+               xscr = dcrtc->cursor_x;
+               w = max_t(int, dcrtc->crtc.mode.hdisplay - dcrtc->cursor_x, 0);
+       } else {
+               xoff = 0;
+               xscr = dcrtc->cursor_x;
+       }
+
+       if (dcrtc->cursor_y < 0) {
+               yoff = -dcrtc->cursor_y;
+               yscr = 0;
+               h -= min(yoff, h);
+       } else if (dcrtc->cursor_y + h > dcrtc->crtc.mode.vdisplay) {
+               yoff = 0;
+               yscr = dcrtc->cursor_y;
+               h = max_t(int, dcrtc->crtc.mode.vdisplay - dcrtc->cursor_y, 0);
+       } else {
+               yoff = 0;
+               yscr = dcrtc->cursor_y;
+       }
+
+       /* On interlaced modes, the vertical cursor size must be halved */
+       s = dcrtc->cursor_w;
+       if (dcrtc->interlaced) {
+               s *= 2;
+               yscr /= 2;
+               h /= 2;
+       }
+
+       if (!dcrtc->cursor_obj || !h || !w) {
+               spin_lock_irq(&dcrtc->irq_lock);
+               armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+               dcrtc->cursor_update = false;
+               armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+               spin_unlock_irq(&dcrtc->irq_lock);
+               return 0;
+       }
+
+       para1 = readl_relaxed(dcrtc->base + LCD_SPU_SRAM_PARA1);
+       armada_updatel(CFG_CSB_256x32, CFG_CSB_256x32 | CFG_PDWN256x32,
+                      dcrtc->base + LCD_SPU_SRAM_PARA1);
+
+       /*
+        * Initialize the transparency if the SRAM was powered down.
+        * We must also reload the cursor data as well.
+        */
+       if (!(para1 & CFG_CSB_256x32)) {
+               armada_drm_crtc_cursor_tran(dcrtc->base);
+               reload = true;
+       }
+
+       if (dcrtc->cursor_hw_sz != (h << 16 | w)) {
+               spin_lock_irq(&dcrtc->irq_lock);
+               armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+               dcrtc->cursor_update = false;
+               armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+               spin_unlock_irq(&dcrtc->irq_lock);
+               reload = true;
+       }
+       if (reload) {
+               struct armada_gem_object *obj = dcrtc->cursor_obj;
+               uint32_t *pix;
+               /* Set the top-left corner of the cursor image */
+               pix = obj->addr;
+               pix += yoff * s + xoff;
+               armada_load_cursor_argb(dcrtc->base, pix, s, w, h);
+       }
+
+       /* Reload the cursor position, size and enable in the IRQ handler */
+       spin_lock_irq(&dcrtc->irq_lock);
+       dcrtc->cursor_hw_pos = yscr << 16 | xscr;
+       dcrtc->cursor_hw_sz = h << 16 | w;
+       dcrtc->cursor_update = true;
+       armada_drm_crtc_enable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+       spin_unlock_irq(&dcrtc->irq_lock);
+
+       return 0;
+}
+
+static void cursor_update(void *data)
+{
+       armada_drm_crtc_cursor_update(data, true);
+}
+
+static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
+       struct drm_file *file, uint32_t handle, uint32_t w, uint32_t h)
+{
+       struct drm_device *dev = crtc->dev;
+       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+       struct armada_private *priv = crtc->dev->dev_private;
+       struct armada_gem_object *obj = NULL;
+       int ret;
+
+       /* If no cursor support, replicate drm's return value */
+       if (!priv->variant->has_spu_adv_reg)
+               return -ENXIO;
+
+       if (handle && w > 0 && h > 0) {
+               /* maximum size is 64x32 or 32x64 */
+               if (w > 64 || h > 64 || (w > 32 && h > 32))
+                       return -ENOMEM;
+
+               obj = armada_gem_object_lookup(dev, file, handle);
+               if (!obj)
+                       return -ENOENT;
+
+               /* Must be a kernel-mapped object */
+               if (!obj->addr) {
+                       drm_gem_object_unreference_unlocked(&obj->obj);
+                       return -EINVAL;
+               }
+
+               if (obj->obj.size < w * h * 4) {
+                       DRM_ERROR("buffer is too small\n");
+                       drm_gem_object_unreference_unlocked(&obj->obj);
+                       return -ENOMEM;
+               }
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       if (dcrtc->cursor_obj) {
+               dcrtc->cursor_obj->update = NULL;
+               dcrtc->cursor_obj->update_data = NULL;
+               drm_gem_object_unreference(&dcrtc->cursor_obj->obj);
+       }
+       dcrtc->cursor_obj = obj;
+       dcrtc->cursor_w = w;
+       dcrtc->cursor_h = h;
+       ret = armada_drm_crtc_cursor_update(dcrtc, true);
+       if (obj) {
+               obj->update_data = dcrtc;
+               obj->update = cursor_update;
+       }
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+
+static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+       struct drm_device *dev = crtc->dev;
+       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+       struct armada_private *priv = crtc->dev->dev_private;
+       int ret;
+
+       /* If no cursor support, replicate drm's return value */
+       if (!priv->variant->has_spu_adv_reg)
+               return -EFAULT;
+
+       mutex_lock(&dev->struct_mutex);
+       dcrtc->cursor_x = x;
+       dcrtc->cursor_y = y;
+       ret = armada_drm_crtc_cursor_update(dcrtc, false);
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+
+static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
+{
+       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+       struct armada_private *priv = crtc->dev->dev_private;
+
+       if (dcrtc->cursor_obj)
+               drm_gem_object_unreference(&dcrtc->cursor_obj->obj);
+
+       priv->dcrtc[dcrtc->num] = NULL;
+       drm_crtc_cleanup(&dcrtc->crtc);
+
+       if (!IS_ERR(dcrtc->clk))
+               clk_disable_unprepare(dcrtc->clk);
+
+       kfree(dcrtc);
+}
+
+/*
+ * The mode_config lock is held here, to prevent races between this
+ * and a mode_set.
+ */
+static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
+       struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
+{
+       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+       struct armada_frame_work *work;
+       struct drm_device *dev = crtc->dev;
+       unsigned long flags;
+       unsigned i;
+       int ret;
+
+       /* We don't support changing the pixel format */
+       if (fb->pixel_format != crtc->fb->pixel_format)
+               return -EINVAL;
+
+       work = kmalloc(sizeof(*work), GFP_KERNEL);
+       if (!work)
+               return -ENOMEM;
+
+       work->event = event;
+       work->old_fb = dcrtc->crtc.fb;
+
+       i = armada_drm_crtc_calc_fb(fb, crtc->x, crtc->y, work->regs,
+                                   dcrtc->interlaced);
+       armada_reg_queue_end(work->regs, i);
+
+       /*
+        * Hold the old framebuffer for the work - DRM appears to drop our
+        * reference to the old framebuffer in drm_mode_page_flip_ioctl().
+        */
+       drm_framebuffer_reference(work->old_fb);
+
+       ret = armada_drm_crtc_queue_frame_work(dcrtc, work);
+       if (ret) {
+               /*
+                * Undo our reference above; DRM does not drop the reference
+                * to this object on error, so that's okay.
+                */
+               drm_framebuffer_unreference(work->old_fb);
+               kfree(work);
+               return ret;
+       }
+
+       /*
+        * Don't take a reference on the new framebuffer;
+        * drm_mode_page_flip_ioctl() has already grabbed a reference and
+        * will _not_ drop that reference on successful return from this
+        * function.  Simply mark this new framebuffer as the current one.
+        */
+       dcrtc->crtc.fb = fb;
+
+       /*
+        * Finally, if the display is blanked, we won't receive an
+        * interrupt, so complete it now.
+        */
+       if (dpms_blanked(dcrtc->dpms)) {
+               spin_lock_irqsave(&dev->event_lock, flags);
+               if (dcrtc->frame_work)
+                       armada_drm_crtc_complete_frame_work(dcrtc);
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+       }
+
+       return 0;
+}
+
+static int
+armada_drm_crtc_set_property(struct drm_crtc *crtc,
+       struct drm_property *property, uint64_t val)
+{
+       struct armada_private *priv = crtc->dev->dev_private;
+       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+       bool update_csc = false;
+
+       if (property == priv->csc_yuv_prop) {
+               dcrtc->csc_yuv_mode = val;
+               update_csc = true;
+       } else if (property == priv->csc_rgb_prop) {
+               dcrtc->csc_rgb_mode = val;
+               update_csc = true;
+       }
+
+       if (update_csc) {
+               uint32_t val;
+
+               val = dcrtc->spu_iopad_ctrl |
+                     armada_drm_crtc_calculate_csc(dcrtc);
+               writel_relaxed(val, dcrtc->base + LCD_SPU_IOPAD_CONTROL);
+       }
+
+       return 0;
+}
+
+static struct drm_crtc_funcs armada_crtc_funcs = {
+       .cursor_set     = armada_drm_crtc_cursor_set,
+       .cursor_move    = armada_drm_crtc_cursor_move,
+       .destroy        = armada_drm_crtc_destroy,
+       .set_config     = drm_crtc_helper_set_config,
+       .page_flip      = armada_drm_crtc_page_flip,
+       .set_property   = armada_drm_crtc_set_property,
+};
+
+static struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
+       { CSC_AUTO,        "Auto" },
+       { CSC_YUV_CCIR601, "CCIR601" },
+       { CSC_YUV_CCIR709, "CCIR709" },
+};
+
+static struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = {
+       { CSC_AUTO,         "Auto" },
+       { CSC_RGB_COMPUTER, "Computer system" },
+       { CSC_RGB_STUDIO,   "Studio" },
+};
+
+static int armada_drm_crtc_create_properties(struct drm_device *dev)
+{
+       struct armada_private *priv = dev->dev_private;
+
+       if (priv->csc_yuv_prop)
+               return 0;
+
+       priv->csc_yuv_prop = drm_property_create_enum(dev, 0,
+                               "CSC_YUV", armada_drm_csc_yuv_enum_list,
+                               ARRAY_SIZE(armada_drm_csc_yuv_enum_list));
+       priv->csc_rgb_prop = drm_property_create_enum(dev, 0,
+                               "CSC_RGB", armada_drm_csc_rgb_enum_list,
+                               ARRAY_SIZE(armada_drm_csc_rgb_enum_list));
+
+       if (!priv->csc_yuv_prop || !priv->csc_rgb_prop)
+               return -ENOMEM;
+
+       return 0;
+}
+
+int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
+       struct resource *res)
+{
+       struct armada_private *priv = dev->dev_private;
+       struct armada_crtc *dcrtc;
+       void __iomem *base;
+       int ret;
+
+       ret = armada_drm_crtc_create_properties(dev);
+       if (ret)
+               return ret;
+
+       base = devm_request_and_ioremap(dev->dev, res);
+       if (!base) {
+               DRM_ERROR("failed to ioremap register\n");
+               return -ENOMEM;
+       }
+
+       dcrtc = kzalloc(sizeof(*dcrtc), GFP_KERNEL);
+       if (!dcrtc) {
+               DRM_ERROR("failed to allocate Armada crtc\n");
+               return -ENOMEM;
+       }
+
+       dcrtc->base = base;
+       dcrtc->num = num;
+       dcrtc->clk = ERR_PTR(-EINVAL);
+       dcrtc->csc_yuv_mode = CSC_AUTO;
+       dcrtc->csc_rgb_mode = CSC_AUTO;
+       dcrtc->cfg_dumb_ctrl = DUMB24_RGB888_0;
+       dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24;
+       spin_lock_init(&dcrtc->irq_lock);
+       dcrtc->irq_ena = CLEAN_SPU_IRQ_ISR;
+       INIT_LIST_HEAD(&dcrtc->vbl_list);
+       init_waitqueue_head(&dcrtc->frame_wait);
+
+       /* Initialize some registers which we don't otherwise set */
+       writel_relaxed(0x00000001, dcrtc->base + LCD_CFG_SCLK_DIV);
+       writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_BLANKCOLOR);
+       writel_relaxed(dcrtc->spu_iopad_ctrl,
+                      dcrtc->base + LCD_SPU_IOPAD_CONTROL);
+       writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_SRAM_PARA0);
+       writel_relaxed(CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
+                      CFG_PDWN32x32 | CFG_PDWN16x66 | CFG_PDWN32x66 |
+                      CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
+       writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
+       writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN);
+
+       if (priv->variant->crtc_init) {
+               ret = priv->variant->crtc_init(dcrtc);
+               if (ret) {
+                       kfree(dcrtc);
+                       return ret;
+               }
+       }
+
+       /* Ensure AXI pipeline is enabled */
+       armada_updatel(CFG_ARBFAST_ENA, 0, dcrtc->base + LCD_SPU_DMA_CTRL0);
+
+       priv->dcrtc[dcrtc->num] = dcrtc;
+
+       drm_crtc_init(dev, &dcrtc->crtc, &armada_crtc_funcs);
+       drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
+
+       drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
+                                  dcrtc->csc_yuv_mode);
+       drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop,
+                                  dcrtc->csc_rgb_mode);
+
+       return armada_overlay_plane_create(dev, 1 << dcrtc->num);
+}
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
new file mode 100644 (file)
index 0000000..9c10a07
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_CRTC_H
+#define ARMADA_CRTC_H
+
+struct armada_gem_object;
+
+struct armada_regs {
+       uint32_t offset;
+       uint32_t mask;
+       uint32_t val;
+};
+
+#define armada_reg_queue_mod(_r, _i, _v, _m, _o)       \
+       do {                                    \
+               struct armada_regs *__reg = _r; \
+               __reg[_i].offset = _o;          \
+               __reg[_i].mask = ~(_m);         \
+               __reg[_i].val = _v;             \
+               _i++;                           \
+       } while (0)
+
+#define armada_reg_queue_set(_r, _i, _v, _o)   \
+       armada_reg_queue_mod(_r, _i, _v, ~0, _o)
+
+#define armada_reg_queue_end(_r, _i)           \
+       armada_reg_queue_mod(_r, _i, 0, 0, ~0)
+
+struct armada_frame_work;
+
+struct armada_crtc {
+       struct drm_crtc         crtc;
+       unsigned                num;
+       void __iomem            *base;
+       struct clk              *clk;
+       struct {
+               uint32_t        spu_v_h_total;
+               uint32_t        spu_v_porch;
+               uint32_t        spu_adv_reg;
+       } v[2];
+       bool                    interlaced;
+       bool                    cursor_update;
+       uint8_t                 csc_yuv_mode;
+       uint8_t                 csc_rgb_mode;
+
+       struct drm_plane        *plane;
+
+       struct armada_gem_object        *cursor_obj;
+       int                     cursor_x;
+       int                     cursor_y;
+       uint32_t                cursor_hw_pos;
+       uint32_t                cursor_hw_sz;
+       uint32_t                cursor_w;
+       uint32_t                cursor_h;
+
+       int                     dpms;
+       uint32_t                cfg_dumb_ctrl;
+       uint32_t                dumb_ctrl;
+       uint32_t                spu_iopad_ctrl;
+
+       wait_queue_head_t       frame_wait;
+       struct armada_frame_work *frame_work;
+
+       spinlock_t              irq_lock;
+       uint32_t                irq_ena;
+       struct list_head        vbl_list;
+};
+#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
+
+int armada_drm_crtc_create(struct drm_device *, unsigned, struct resource *);
+void armada_drm_crtc_gamma_set(struct drm_crtc *, u16, u16, u16, int);
+void armada_drm_crtc_gamma_get(struct drm_crtc *, u16 *, u16 *, u16 *, int);
+void armada_drm_crtc_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_disable_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_enable_irq(struct armada_crtc *, u32);
+void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
new file mode 100644 (file)
index 0000000..612f375
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <drm/drmP.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+
+static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data)
+{
+       struct drm_info_node *node = m->private;
+       struct drm_device *dev = node->minor->dev;
+       struct armada_private *priv = dev->dev_private;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       ret = drm_mm_dump_table(m, &priv->linear);
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+
+static int armada_debugfs_reg_show(struct seq_file *m, void *data)
+{
+       struct drm_device *dev = m->private;
+       struct armada_private *priv = dev->dev_private;
+       int n, i;
+
+       if (priv) {
+               for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
+                       struct armada_crtc *dcrtc = priv->dcrtc[n];
+                       if (!dcrtc)
+                               continue;
+
+                       for (i = 0x84; i <= 0x1c4; i += 4) {
+                               uint32_t v = readl_relaxed(dcrtc->base + i);
+                               seq_printf(m, "%u: 0x%04x: 0x%08x\n", n, i, v);
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int armada_debugfs_reg_r_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, armada_debugfs_reg_show, inode->i_private);
+}
+
+static const struct file_operations fops_reg_r = {
+       .owner = THIS_MODULE,
+       .open = armada_debugfs_reg_r_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static int armada_debugfs_write(struct file *file, const char __user *ptr,
+       size_t len, loff_t *off)
+{
+       struct drm_device *dev = file->private_data;
+       struct armada_private *priv = dev->dev_private;
+       struct armada_crtc *dcrtc = priv->dcrtc[0];
+       char buf[32], *p;
+       uint32_t reg, val;
+       int ret;
+
+       if (*off != 0)
+               return 0;
+
+       if (len > sizeof(buf) - 1)
+               len = sizeof(buf) - 1;
+
+       ret = strncpy_from_user(buf, ptr, len);
+       if (ret < 0)
+               return ret;
+       buf[len] = '\0';
+
+       reg = simple_strtoul(buf, &p, 16);
+       if (!isspace(*p))
+               return -EINVAL;
+       val = simple_strtoul(p + 1, NULL, 16);
+
+       if (reg >= 0x84 && reg <= 0x1c4)
+               writel(val, dcrtc->base + reg);
+
+       return len;
+}
+
+static int armada_debugfs_reg_w_open(struct inode *inode, struct file *file)
+{
+       file->private_data = inode->i_private;
+       return 0;
+}
+
+static const struct file_operations fops_reg_w = {
+       .owner = THIS_MODULE,
+       .open = armada_debugfs_reg_w_open,
+       .write = armada_debugfs_write,
+       .llseek = noop_llseek,
+};
+
+static struct drm_info_list armada_debugfs_list[] = {
+       { "gem_linear", armada_debugfs_gem_linear_show, 0 },
+};
+#define ARMADA_DEBUGFS_ENTRIES ARRAY_SIZE(armada_debugfs_list)
+
+static int drm_add_fake_info_node(struct drm_minor *minor, struct dentry *ent,
+       const void *key)
+{
+       struct drm_info_node *node;
+
+       node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+       if (node == NULL) {
+               debugfs_remove(ent);
+               return -ENOMEM;
+       }
+
+       node->minor = minor;
+       node->dent = ent;
+       node->info_ent = (void *) key;
+
+       mutex_lock(&minor->debugfs_lock);
+       list_add(&node->list, &minor->debugfs_list);
+       mutex_unlock(&minor->debugfs_lock);
+
+       return 0;
+}
+
+static int armada_debugfs_create(struct dentry *root, struct drm_minor *minor,
+       const char *name, umode_t mode, const struct file_operations *fops)
+{
+       struct dentry *de;
+
+       de = debugfs_create_file(name, mode, root, minor->dev, fops);
+
+       return drm_add_fake_info_node(minor, de, fops);
+}
+
+int armada_drm_debugfs_init(struct drm_minor *minor)
+{
+       int ret;
+
+       ret = drm_debugfs_create_files(armada_debugfs_list,
+                                      ARMADA_DEBUGFS_ENTRIES,
+                                      minor->debugfs_root, minor);
+       if (ret)
+               return ret;
+
+       ret = armada_debugfs_create(minor->debugfs_root, minor,
+                                  "reg", S_IFREG | S_IRUSR, &fops_reg_r);
+       if (ret)
+               goto err_1;
+
+       ret = armada_debugfs_create(minor->debugfs_root, minor,
+                               "reg_wr", S_IFREG | S_IWUSR, &fops_reg_w);
+       if (ret)
+               goto err_2;
+       return ret;
+
+ err_2:
+       drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
+ err_1:
+       drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
+                                minor);
+       return ret;
+}
+
+void armada_drm_debugfs_cleanup(struct drm_minor *minor)
+{
+       drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_w, 1, minor);
+       drm_debugfs_remove_files((struct drm_info_list *)&fops_reg_r, 1, minor);
+       drm_debugfs_remove_files(armada_debugfs_list, ARMADA_DEBUGFS_ENTRIES,
+                                minor);
+}
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
new file mode 100644 (file)
index 0000000..eef09ec
--- /dev/null
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_DRM_H
+#define ARMADA_DRM_H
+
+#include <linux/kfifo.h>
+#include <linux/io.h>
+#include <linux/workqueue.h>
+#include <drm/drmP.h>
+
+struct armada_crtc;
+struct armada_gem_object;
+struct clk;
+struct drm_fb_helper;
+
+static inline void
+armada_updatel(uint32_t val, uint32_t mask, void __iomem *ptr)
+{
+       uint32_t ov, v;
+
+       ov = v = readl_relaxed(ptr);
+       v = (v & ~mask) | val;
+       if (ov != v)
+               writel_relaxed(v, ptr);
+}
+
+static inline uint32_t armada_pitch(uint32_t width, uint32_t bpp)
+{
+       uint32_t pitch = bpp != 4 ? width * ((bpp + 7) / 8) : width / 2;
+
+       /* 88AP510 spec recommends pitch be a multiple of 128 */
+       return ALIGN(pitch, 128);
+}
+
+struct armada_vbl_event {
+       struct list_head        node;
+       void                    *data;
+       void                    (*fn)(struct armada_crtc *, void *);
+};
+void armada_drm_vbl_event_add(struct armada_crtc *,
+       struct armada_vbl_event *);
+void armada_drm_vbl_event_remove(struct armada_crtc *,
+       struct armada_vbl_event *);
+void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *,
+       struct armada_vbl_event *);
+#define armada_drm_vbl_event_init(_e, _f, _d) do {     \
+       struct armada_vbl_event *__e = _e;              \
+       INIT_LIST_HEAD(&__e->node);                     \
+       __e->data = _d;                                 \
+       __e->fn = _f;                                   \
+} while (0)
+
+
+struct armada_private;
+
+struct armada_variant {
+       bool    has_spu_adv_reg;
+       uint32_t spu_adv_reg;
+       int (*init)(struct armada_private *, struct device *);
+       int (*crtc_init)(struct armada_crtc *);
+       int (*crtc_compute_clock)(struct armada_crtc *,
+                                 const struct drm_display_mode *,
+                                 uint32_t *);
+};
+
+/* Variant ops */
+extern const struct armada_variant armada510_ops;
+
+struct armada_private {
+       const struct armada_variant *variant;
+       struct work_struct      fb_unref_work;
+       DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
+       struct drm_fb_helper    *fbdev;
+       struct armada_crtc      *dcrtc[2];
+       struct drm_mm           linear;
+       struct clk              *extclk[2];
+       struct drm_property     *csc_yuv_prop;
+       struct drm_property     *csc_rgb_prop;
+       struct drm_property     *colorkey_prop;
+       struct drm_property     *colorkey_min_prop;
+       struct drm_property     *colorkey_max_prop;
+       struct drm_property     *colorkey_val_prop;
+       struct drm_property     *colorkey_alpha_prop;
+       struct drm_property     *colorkey_mode_prop;
+       struct drm_property     *brightness_prop;
+       struct drm_property     *contrast_prop;
+       struct drm_property     *saturation_prop;
+#ifdef CONFIG_DEBUG_FS
+       struct dentry           *de;
+#endif
+};
+
+void __armada_drm_queue_unref_work(struct drm_device *,
+       struct drm_framebuffer *);
+void armada_drm_queue_unref_work(struct drm_device *,
+       struct drm_framebuffer *);
+
+extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
+
+int armada_fbdev_init(struct drm_device *);
+void armada_fbdev_fini(struct drm_device *);
+
+int armada_overlay_plane_create(struct drm_device *, unsigned long);
+
+int armada_drm_debugfs_init(struct drm_minor *);
+void armada_drm_debugfs_cleanup(struct drm_minor *);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
new file mode 100644 (file)
index 0000000..4f2b283
--- /dev/null
@@ -0,0 +1,421 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+#ifdef CONFIG_DRM_ARMADA_TDA1998X
+#include <drm/i2c/tda998x.h>
+#include "armada_slave.h"
+
+static struct tda998x_encoder_params params = {
+       /* With 0x24, there is no translation between vp_out and int_vp
+       FB      LCD out Pins    VIP     Int Vp
+       R:23:16 R:7:0   VPC7:0  7:0     7:0[R]
+       G:15:8  G:15:8  VPB7:0  23:16   23:16[G]
+       B:7:0   B:23:16 VPA7:0  15:8    15:8[B]
+       */
+       .swap_a = 2,
+       .swap_b = 3,
+       .swap_c = 4,
+       .swap_d = 5,
+       .swap_e = 0,
+       .swap_f = 1,
+       .audio_cfg = BIT(2),
+       .audio_frame[1] = 1,
+       .audio_format = AFMT_SPDIF,
+       .audio_sample_rate = 44100,
+};
+
+static const struct armada_drm_slave_config tda19988_config = {
+       .i2c_adapter_id = 0,
+       .crtcs = 1 << 0, /* Only LCD0 at the moment */
+       .polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT,
+       .interlace_allowed = true,
+       .info = {
+               .type = "tda998x",
+               .addr = 0x70,
+               .platform_data = &params,
+       },
+};
+#endif
+
+static void armada_drm_unref_work(struct work_struct *work)
+{
+       struct armada_private *priv =
+               container_of(work, struct armada_private, fb_unref_work);
+       struct drm_framebuffer *fb;
+
+       while (kfifo_get(&priv->fb_unref, &fb))
+               drm_framebuffer_unreference(fb);
+}
+
+/* Must be called with dev->event_lock held */
+void __armada_drm_queue_unref_work(struct drm_device *dev,
+       struct drm_framebuffer *fb)
+{
+       struct armada_private *priv = dev->dev_private;
+
+       /*
+        * Yes, we really must jump through these hoops just to store a
+        * _pointer_ to something into the kfifo.  This is utterly insane
+        * and idiotic, because it kfifo requires the _data_ pointed to by
+        * the pointer const, not the pointer itself.  Not only that, but
+        * you have to pass a pointer _to_ the pointer you want stored.
+        */
+       const struct drm_framebuffer *silly_api_alert = fb;
+       WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert));
+       schedule_work(&priv->fb_unref_work);
+}
+
+void armada_drm_queue_unref_work(struct drm_device *dev,
+       struct drm_framebuffer *fb)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+       __armada_drm_queue_unref_work(dev, fb);
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static int armada_drm_load(struct drm_device *dev, unsigned long flags)
+{
+       const struct platform_device_id *id;
+       struct armada_private *priv;
+       struct resource *res[ARRAY_SIZE(priv->dcrtc)];
+       struct resource *mem = NULL;
+       int ret, n, i;
+
+       memset(res, 0, sizeof(res));
+
+       for (n = i = 0; ; n++) {
+               struct resource *r = platform_get_resource(dev->platformdev,
+                                                          IORESOURCE_MEM, n);
+               if (!r)
+                       break;
+
+               /* Resources above 64K are graphics memory */
+               if (resource_size(r) > SZ_64K)
+                       mem = r;
+               else if (i < ARRAY_SIZE(priv->dcrtc))
+                       res[i++] = r;
+               else
+                       return -EINVAL;
+       }
+
+       if (!res[0] || !mem)
+               return -ENXIO;
+
+       if (!devm_request_mem_region(dev->dev, mem->start,
+                       resource_size(mem), "armada-drm"))
+               return -EBUSY;
+
+       priv = devm_kzalloc(dev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv) {
+               DRM_ERROR("failed to allocate private\n");
+               return -ENOMEM;
+       }
+
+       dev->dev_private = priv;
+
+       /* Get the implementation specific driver data. */
+       id = platform_get_device_id(dev->platformdev);
+       if (!id)
+               return -ENXIO;
+
+       priv->variant = (struct armada_variant *)id->driver_data;
+
+       ret = priv->variant->init(priv, dev->dev);
+       if (ret)
+               return ret;
+
+       INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
+       INIT_KFIFO(priv->fb_unref);
+
+       /* Mode setting support */
+       drm_mode_config_init(dev);
+       dev->mode_config.min_width = 320;
+       dev->mode_config.min_height = 200;
+
+       /*
+        * With vscale enabled, the maximum width is 1920 due to the
+        * 1920 by 3 lines RAM
+        */
+       dev->mode_config.max_width = 1920;
+       dev->mode_config.max_height = 2048;
+
+       dev->mode_config.preferred_depth = 24;
+       dev->mode_config.funcs = &armada_drm_mode_config_funcs;
+       drm_mm_init(&priv->linear, mem->start, resource_size(mem));
+
+       /* Create all LCD controllers */
+       for (n = 0; n < ARRAY_SIZE(priv->dcrtc); n++) {
+               if (!res[n])
+                       break;
+
+               ret = armada_drm_crtc_create(dev, n, res[n]);
+               if (ret)
+                       goto err_kms;
+       }
+
+#ifdef CONFIG_DRM_ARMADA_TDA1998X
+       ret = armada_drm_connector_slave_create(dev, &tda19988_config);
+       if (ret)
+               goto err_kms;
+#endif
+
+       ret = drm_vblank_init(dev, n);
+       if (ret)
+               goto err_kms;
+
+       ret = drm_irq_install(dev);
+       if (ret)
+               goto err_kms;
+
+       dev->vblank_disable_allowed = 1;
+
+       ret = armada_fbdev_init(dev);
+       if (ret)
+               goto err_irq;
+
+       drm_kms_helper_poll_init(dev);
+
+       return 0;
+
+ err_irq:
+       drm_irq_uninstall(dev);
+ err_kms:
+       drm_mode_config_cleanup(dev);
+       drm_mm_takedown(&priv->linear);
+       flush_work(&priv->fb_unref_work);
+
+       return ret;
+}
+
+static int armada_drm_unload(struct drm_device *dev)
+{
+       struct armada_private *priv = dev->dev_private;
+
+       drm_kms_helper_poll_fini(dev);
+       armada_fbdev_fini(dev);
+       drm_irq_uninstall(dev);
+       drm_mode_config_cleanup(dev);
+       drm_mm_takedown(&priv->linear);
+       flush_work(&priv->fb_unref_work);
+       dev->dev_private = NULL;
+
+       return 0;
+}
+
+void armada_drm_vbl_event_add(struct armada_crtc *dcrtc,
+       struct armada_vbl_event *evt)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dcrtc->irq_lock, flags);
+       if (list_empty(&evt->node)) {
+               list_add_tail(&evt->node, &dcrtc->vbl_list);
+
+               drm_vblank_get(dcrtc->crtc.dev, dcrtc->num);
+       }
+       spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
+
+void armada_drm_vbl_event_remove(struct armada_crtc *dcrtc,
+       struct armada_vbl_event *evt)
+{
+       if (!list_empty(&evt->node)) {
+               list_del_init(&evt->node);
+               drm_vblank_put(dcrtc->crtc.dev, dcrtc->num);
+       }
+}
+
+void armada_drm_vbl_event_remove_unlocked(struct armada_crtc *dcrtc,
+       struct armada_vbl_event *evt)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&dcrtc->irq_lock, flags);
+       armada_drm_vbl_event_remove(dcrtc, evt);
+       spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
+
+/* These are called under the vbl_lock. */
+static int armada_drm_enable_vblank(struct drm_device *dev, int crtc)
+{
+       struct armada_private *priv = dev->dev_private;
+       armada_drm_crtc_enable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
+       return 0;
+}
+
+static void armada_drm_disable_vblank(struct drm_device *dev, int crtc)
+{
+       struct armada_private *priv = dev->dev_private;
+       armada_drm_crtc_disable_irq(priv->dcrtc[crtc], VSYNC_IRQ_ENA);
+}
+
+static irqreturn_t armada_drm_irq_handler(int irq, void *arg)
+{
+       struct drm_device *dev = arg;
+       struct armada_private *priv = dev->dev_private;
+       struct armada_crtc *dcrtc = priv->dcrtc[0];
+       uint32_t v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
+       irqreturn_t handled = IRQ_NONE;
+
+       /*
+        * This is rediculous - rather than writing bits to clear, we
+        * have to set the actual status register value.  This is racy.
+        */
+       writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+
+       /* Mask out those interrupts we haven't enabled */
+       v = stat & dcrtc->irq_ena;
+
+       if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
+               armada_drm_crtc_irq(dcrtc, stat);
+               handled = IRQ_HANDLED;
+       }
+
+       return handled;
+}
+
+static int armada_drm_irq_postinstall(struct drm_device *dev)
+{
+       struct armada_private *priv = dev->dev_private;
+       struct armada_crtc *dcrtc = priv->dcrtc[0];
+
+       spin_lock_irq(&dev->vbl_lock);
+       writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+       writel(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+       spin_unlock_irq(&dev->vbl_lock);
+
+       return 0;
+}
+
+static void armada_drm_irq_uninstall(struct drm_device *dev)
+{
+       struct armada_private *priv = dev->dev_private;
+       struct armada_crtc *dcrtc = priv->dcrtc[0];
+
+       writel(0, dcrtc->base + LCD_SPU_IRQ_ENA);
+}
+
+static struct drm_ioctl_desc armada_ioctls[] = {
+       DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,
+               DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl,
+               DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(ARMADA_GEM_PWRITE, armada_gem_pwrite_ioctl,
+               DRM_UNLOCKED),
+};
+
+static const struct file_operations armada_drm_fops = {
+       .owner                  = THIS_MODULE,
+       .llseek                 = no_llseek,
+       .read                   = drm_read,
+       .poll                   = drm_poll,
+       .unlocked_ioctl         = drm_ioctl,
+       .mmap                   = drm_gem_mmap,
+       .open                   = drm_open,
+       .release                = drm_release,
+};
+
+static struct drm_driver armada_drm_driver = {
+       .load                   = armada_drm_load,
+       .open                   = NULL,
+       .preclose               = NULL,
+       .postclose              = NULL,
+       .lastclose              = NULL,
+       .unload                 = armada_drm_unload,
+       .get_vblank_counter     = drm_vblank_count,
+       .enable_vblank          = armada_drm_enable_vblank,
+       .disable_vblank         = armada_drm_disable_vblank,
+       .irq_handler            = armada_drm_irq_handler,
+       .irq_postinstall        = armada_drm_irq_postinstall,
+       .irq_uninstall          = armada_drm_irq_uninstall,
+#ifdef CONFIG_DEBUG_FS
+       .debugfs_init           = armada_drm_debugfs_init,
+       .debugfs_cleanup        = armada_drm_debugfs_cleanup,
+#endif
+       .gem_free_object        = armada_gem_free_object,
+       .prime_handle_to_fd     = drm_gem_prime_handle_to_fd,
+       .prime_fd_to_handle     = drm_gem_prime_fd_to_handle,
+       .gem_prime_export       = armada_gem_prime_export,
+       .gem_prime_import       = armada_gem_prime_import,
+       .dumb_create            = armada_gem_dumb_create,
+       .dumb_map_offset        = armada_gem_dumb_map_offset,
+       .dumb_destroy           = armada_gem_dumb_destroy,
+       .gem_vm_ops             = &armada_gem_vm_ops,
+       .major                  = 1,
+       .minor                  = 0,
+       .name                   = "armada-drm",
+       .desc                   = "Armada SoC DRM",
+       .date                   = "20120730",
+       .driver_features        = DRIVER_GEM | DRIVER_MODESET |
+                                 DRIVER_HAVE_IRQ | DRIVER_PRIME,
+       .ioctls                 = armada_ioctls,
+       .fops                   = &armada_drm_fops,
+};
+
+static int armada_drm_probe(struct platform_device *pdev)
+{
+       return drm_platform_init(&armada_drm_driver, pdev);
+}
+
+static int armada_drm_remove(struct platform_device *pdev)
+{
+       drm_platform_exit(&armada_drm_driver, pdev);
+       return 0;
+}
+
+static const struct platform_device_id armada_drm_platform_ids[] = {
+       {
+               .name           = "armada-drm",
+               .driver_data    = (unsigned long)&armada510_ops,
+       }, {
+               .name           = "armada-510-drm",
+               .driver_data    = (unsigned long)&armada510_ops,
+       },
+       { },
+};
+MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids);
+
+static struct platform_driver armada_drm_platform_driver = {
+       .probe  = armada_drm_probe,
+       .remove = armada_drm_remove,
+       .driver = {
+               .name   = "armada-drm",
+               .owner  = THIS_MODULE,
+       },
+       .id_table = armada_drm_platform_ids,
+};
+
+static int __init armada_drm_init(void)
+{
+       armada_drm_driver.num_ioctls = DRM_ARRAY_SIZE(armada_ioctls);
+       return platform_driver_register(&armada_drm_platform_driver);
+}
+module_init(armada_drm_init);
+
+static void __exit armada_drm_exit(void)
+{
+       platform_driver_unregister(&armada_drm_platform_driver);
+}
+module_exit(armada_drm_exit);
+
+MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
+MODULE_DESCRIPTION("Armada DRM Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:armada-drm");
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
new file mode 100644 (file)
index 0000000..1c90969
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+
+static void armada_fb_destroy(struct drm_framebuffer *fb)
+{
+       struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
+
+       drm_framebuffer_cleanup(&dfb->fb);
+       drm_gem_object_unreference_unlocked(&dfb->obj->obj);
+       kfree(dfb);
+}
+
+static int armada_fb_create_handle(struct drm_framebuffer *fb,
+       struct drm_file *dfile, unsigned int *handle)
+{
+       struct armada_framebuffer *dfb = drm_fb_to_armada_fb(fb);
+       return drm_gem_handle_create(dfile, &dfb->obj->obj, handle);
+}
+
+static const struct drm_framebuffer_funcs armada_fb_funcs = {
+       .destroy        = armada_fb_destroy,
+       .create_handle  = armada_fb_create_handle,
+};
+
+struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
+       struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj)
+{
+       struct armada_framebuffer *dfb;
+       uint8_t format, config;
+       int ret;
+
+       switch (mode->pixel_format) {
+#define FMT(drm, fmt, mod)             \
+       case DRM_FORMAT_##drm:          \
+               format = CFG_##fmt;     \
+               config = mod;           \
+               break
+       FMT(RGB565,     565,            CFG_SWAPRB);
+       FMT(BGR565,     565,            0);
+       FMT(ARGB1555,   1555,           CFG_SWAPRB);
+       FMT(ABGR1555,   1555,           0);
+       FMT(RGB888,     888PACK,        CFG_SWAPRB);
+       FMT(BGR888,     888PACK,        0);
+       FMT(XRGB8888,   X888,           CFG_SWAPRB);
+       FMT(XBGR8888,   X888,           0);
+       FMT(ARGB8888,   8888,           CFG_SWAPRB);
+       FMT(ABGR8888,   8888,           0);
+       FMT(YUYV,       422PACK,        CFG_YUV2RGB | CFG_SWAPYU | CFG_SWAPUV);
+       FMT(UYVY,       422PACK,        CFG_YUV2RGB);
+       FMT(VYUY,       422PACK,        CFG_YUV2RGB | CFG_SWAPUV);
+       FMT(YVYU,       422PACK,        CFG_YUV2RGB | CFG_SWAPYU);
+       FMT(YUV422,     422,            CFG_YUV2RGB);
+       FMT(YVU422,     422,            CFG_YUV2RGB | CFG_SWAPUV);
+       FMT(YUV420,     420,            CFG_YUV2RGB);
+       FMT(YVU420,     420,            CFG_YUV2RGB | CFG_SWAPUV);
+       FMT(C8,         PSEUDO8,        0);
+#undef FMT
+       default:
+               return ERR_PTR(-EINVAL);
+       }
+
+       dfb = kzalloc(sizeof(*dfb), GFP_KERNEL);
+       if (!dfb) {
+               DRM_ERROR("failed to allocate Armada fb object\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       dfb->fmt = format;
+       dfb->mod = config;
+       dfb->obj = obj;
+
+       drm_helper_mode_fill_fb_struct(&dfb->fb, mode);
+
+       ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs);
+       if (ret) {
+               kfree(dfb);
+               return ERR_PTR(ret);
+       }
+
+       /*
+        * Take a reference on our object as we're successful - the
+        * caller already holds a reference, which keeps us safe for
+        * the above call, but the caller will drop their reference
+        * to it.  Hence we need to take our own reference.
+        */
+       drm_gem_object_reference(&obj->obj);
+
+       return dfb;
+}
+
+static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
+       struct drm_file *dfile, struct drm_mode_fb_cmd2 *mode)
+{
+       struct armada_gem_object *obj;
+       struct armada_framebuffer *dfb;
+       int ret;
+
+       DRM_DEBUG_DRIVER("w%u h%u pf%08x f%u p%u,%u,%u\n",
+               mode->width, mode->height, mode->pixel_format,
+               mode->flags, mode->pitches[0], mode->pitches[1],
+               mode->pitches[2]);
+
+       /* We can only handle a single plane at the moment */
+       if (drm_format_num_planes(mode->pixel_format) > 1 &&
+           (mode->handles[0] != mode->handles[1] ||
+            mode->handles[0] != mode->handles[2])) {
+               ret = -EINVAL;
+               goto err;
+       }
+
+       obj = armada_gem_object_lookup(dev, dfile, mode->handles[0]);
+       if (!obj) {
+               ret = -ENOENT;
+               goto err;
+       }
+
+       if (obj->obj.import_attach && !obj->sgt) {
+               ret = armada_gem_map_import(obj);
+               if (ret)
+                       goto err_unref;
+       }
+
+       /* Framebuffer objects must have a valid device address for scanout */
+       if (obj->dev_addr == DMA_ERROR_CODE) {
+               ret = -EINVAL;
+               goto err_unref;
+       }
+
+       dfb = armada_framebuffer_create(dev, mode, obj);
+       if (IS_ERR(dfb)) {
+               ret = PTR_ERR(dfb);
+               goto err;
+       }
+
+       drm_gem_object_unreference_unlocked(&obj->obj);
+
+       return &dfb->fb;
+
+ err_unref:
+       drm_gem_object_unreference_unlocked(&obj->obj);
+ err:
+       DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
+       return ERR_PTR(ret);
+}
+
+static void armada_output_poll_changed(struct drm_device *dev)
+{
+       struct armada_private *priv = dev->dev_private;
+       struct drm_fb_helper *fbh = priv->fbdev;
+
+       if (fbh)
+               drm_fb_helper_hotplug_event(fbh);
+}
+
+const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
+       .fb_create              = armada_fb_create,
+       .output_poll_changed    = armada_output_poll_changed,
+};
diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h
new file mode 100644 (file)
index 0000000..ce3f12e
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_FB_H
+#define ARMADA_FB_H
+
+struct armada_framebuffer {
+       struct drm_framebuffer  fb;
+       struct armada_gem_object *obj;
+       uint8_t                 fmt;
+       uint8_t                 mod;
+};
+#define drm_fb_to_armada_fb(dfb) \
+       container_of(dfb, struct armada_framebuffer, fb)
+#define drm_fb_obj(fb) drm_fb_to_armada_fb(fb)->obj
+
+struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
+       struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
new file mode 100644 (file)
index 0000000..dd5ea77
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Written from the i915 driver.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+
+static /*const*/ struct fb_ops armada_fb_ops = {
+       .owner          = THIS_MODULE,
+       .fb_check_var   = drm_fb_helper_check_var,
+       .fb_set_par     = drm_fb_helper_set_par,
+       .fb_fillrect    = cfb_fillrect,
+       .fb_copyarea    = cfb_copyarea,
+       .fb_imageblit   = cfb_imageblit,
+       .fb_pan_display = drm_fb_helper_pan_display,
+       .fb_blank       = drm_fb_helper_blank,
+       .fb_setcmap     = drm_fb_helper_setcmap,
+       .fb_debug_enter = drm_fb_helper_debug_enter,
+       .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int armada_fb_create(struct drm_fb_helper *fbh,
+       struct drm_fb_helper_surface_size *sizes)
+{
+       struct drm_device *dev = fbh->dev;
+       struct drm_mode_fb_cmd2 mode;
+       struct armada_framebuffer *dfb;
+       struct armada_gem_object *obj;
+       struct fb_info *info;
+       int size, ret;
+       void *ptr;
+
+       memset(&mode, 0, sizeof(mode));
+       mode.width = sizes->surface_width;
+       mode.height = sizes->surface_height;
+       mode.pitches[0] = armada_pitch(mode.width, sizes->surface_bpp);
+       mode.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+                                       sizes->surface_depth);
+
+       size = mode.pitches[0] * mode.height;
+       obj = armada_gem_alloc_private_object(dev, size);
+       if (!obj) {
+               DRM_ERROR("failed to allocate fb memory\n");
+               return -ENOMEM;
+       }
+
+       ret = armada_gem_linear_back(dev, obj);
+       if (ret) {
+               drm_gem_object_unreference_unlocked(&obj->obj);
+               return ret;
+       }
+
+       ptr = armada_gem_map_object(dev, obj);
+       if (!ptr) {
+               drm_gem_object_unreference_unlocked(&obj->obj);
+               return -ENOMEM;
+       }
+
+       dfb = armada_framebuffer_create(dev, &mode, obj);
+
+       /*
+        * A reference is now held by the framebuffer object if
+        * successful, otherwise this drops the ref for the error path.
+        */
+       drm_gem_object_unreference_unlocked(&obj->obj);
+
+       if (IS_ERR(dfb))
+               return PTR_ERR(dfb);
+
+       info = framebuffer_alloc(0, dev->dev);
+       if (!info) {
+               ret = -ENOMEM;
+               goto err_fballoc;
+       }
+
+       ret = fb_alloc_cmap(&info->cmap, 256, 0);
+       if (ret) {
+               ret = -ENOMEM;
+               goto err_fbcmap;
+       }
+
+       strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
+       info->par = fbh;
+       info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
+       info->fbops = &armada_fb_ops;
+       info->fix.smem_start = obj->phys_addr;
+       info->fix.smem_len = obj->obj.size;
+       info->screen_size = obj->obj.size;
+       info->screen_base = ptr;
+       fbh->fb = &dfb->fb;
+       fbh->fbdev = info;
+       drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
+       drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
+
+       DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n",
+               dfb->fb.width, dfb->fb.height,
+               dfb->fb.bits_per_pixel, obj->phys_addr);
+
+       return 0;
+
+ err_fbcmap:
+       framebuffer_release(info);
+ err_fballoc:
+       dfb->fb.funcs->destroy(&dfb->fb);
+       return ret;
+}
+
+static int armada_fb_probe(struct drm_fb_helper *fbh,
+       struct drm_fb_helper_surface_size *sizes)
+{
+       int ret = 0;
+
+       if (!fbh->fb) {
+               ret = armada_fb_create(fbh, sizes);
+               if (ret == 0)
+                       ret = 1;
+       }
+       return ret;
+}
+
+static struct drm_fb_helper_funcs armada_fb_helper_funcs = {
+       .gamma_set      = armada_drm_crtc_gamma_set,
+       .gamma_get      = armada_drm_crtc_gamma_get,
+       .fb_probe       = armada_fb_probe,
+};
+
+int armada_fbdev_init(struct drm_device *dev)
+{
+       struct armada_private *priv = dev->dev_private;
+       struct drm_fb_helper *fbh;
+       int ret;
+
+       fbh = devm_kzalloc(dev->dev, sizeof(*fbh), GFP_KERNEL);
+       if (!fbh)
+               return -ENOMEM;
+
+       priv->fbdev = fbh;
+
+       fbh->funcs = &armada_fb_helper_funcs;
+
+       ret = drm_fb_helper_init(dev, fbh, 1, 1);
+       if (ret) {
+               DRM_ERROR("failed to initialize drm fb helper\n");
+               goto err_fb_helper;
+       }
+
+       ret = drm_fb_helper_single_add_all_connectors(fbh);
+       if (ret) {
+               DRM_ERROR("failed to add fb connectors\n");
+               goto err_fb_setup;
+       }
+
+       ret = drm_fb_helper_initial_config(fbh, 32);
+       if (ret) {
+               DRM_ERROR("failed to set initial config\n");
+               goto err_fb_setup;
+       }
+
+       return 0;
+ err_fb_setup:
+       drm_fb_helper_fini(fbh);
+ err_fb_helper:
+       priv->fbdev = NULL;
+       return ret;
+}
+
+void armada_fbdev_fini(struct drm_device *dev)
+{
+       struct armada_private *priv = dev->dev_private;
+       struct drm_fb_helper *fbh = priv->fbdev;
+
+       if (fbh) {
+               struct fb_info *info = fbh->fbdev;
+
+               if (info) {
+                       unregister_framebuffer(info);
+                       if (info->cmap.len)
+                               fb_dealloc_cmap(&info->cmap);
+                       framebuffer_release(info);
+               }
+
+               if (fbh->fb)
+                       fbh->fb->funcs->destroy(fbh->fb);
+
+               drm_fb_helper_fini(fbh);
+
+               priv->fbdev = NULL;
+       }
+}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
new file mode 100644 (file)
index 0000000..9f2356b
--- /dev/null
@@ -0,0 +1,611 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/shmem_fs.h>
+#include <drm/drmP.h>
+#include "armada_drm.h"
+#include "armada_gem.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
+       unsigned long addr = (unsigned long)vmf->virtual_address;
+       unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
+       int ret;
+
+       pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
+       ret = vm_insert_pfn(vma, addr, pfn);
+
+       switch (ret) {
+       case 0:
+       case -EBUSY:
+               return VM_FAULT_NOPAGE;
+       case -ENOMEM:
+               return VM_FAULT_OOM;
+       default:
+               return VM_FAULT_SIGBUS;
+       }
+}
+
+const struct vm_operations_struct armada_gem_vm_ops = {
+       .fault  = armada_gem_vm_fault,
+       .open   = drm_gem_vm_open,
+       .close  = drm_gem_vm_close,
+};
+
+static size_t roundup_gem_size(size_t size)
+{
+       return roundup(size, PAGE_SIZE);
+}
+
+/* dev->struct_mutex is held here */
+void armada_gem_free_object(struct drm_gem_object *obj)
+{
+       struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+
+       DRM_DEBUG_DRIVER("release obj %p\n", dobj);
+
+       drm_gem_free_mmap_offset(&dobj->obj);
+
+       if (dobj->page) {
+               /* page backed memory */
+               unsigned int order = get_order(dobj->obj.size);
+               __free_pages(dobj->page, order);
+       } else if (dobj->linear) {
+               /* linear backed memory */
+               drm_mm_remove_node(dobj->linear);
+               kfree(dobj->linear);
+               if (dobj->addr)
+                       iounmap(dobj->addr);
+       }
+
+       if (dobj->obj.import_attach) {
+               /* We only ever display imported data */
+               dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
+                                        DMA_TO_DEVICE);
+               drm_prime_gem_destroy(&dobj->obj, NULL);
+       }
+
+       drm_gem_object_release(&dobj->obj);
+
+       kfree(dobj);
+}
+
+int
+armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
+{
+       struct armada_private *priv = dev->dev_private;
+       size_t size = obj->obj.size;
+
+       if (obj->page || obj->linear)
+               return 0;
+
+       /*
+        * If it is a small allocation (typically cursor, which will
+        * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
+        * Framebuffers will never be this small (our minimum size for
+        * framebuffers is larger than this anyway.)  Such objects are
+        * only accessed by the CPU so we don't need any special handing
+        * here.
+        */
+       if (size <= 8192) {
+               unsigned int order = get_order(size);
+               struct page *p = alloc_pages(GFP_KERNEL, order);
+
+               if (p) {
+                       obj->addr = page_address(p);
+                       obj->phys_addr = page_to_phys(p);
+                       obj->page = p;
+
+                       memset(obj->addr, 0, PAGE_ALIGN(size));
+               }
+       }
+
+       /*
+        * We could grab something from CMA if it's enabled, but that
+        * involves building in a problem:
+        *
+        * CMA's interface uses dma_alloc_coherent(), which provides us
+        * with an CPU virtual address and a device address.
+        *
+        * The CPU virtual address may be either an address in the kernel
+        * direct mapped region (for example, as it would be on x86) or
+        * it may be remapped into another part of kernel memory space
+        * (eg, as it would be on ARM.)  This means virt_to_phys() on the
+        * returned virtual address is invalid depending on the architecture
+        * implementation.
+        *
+        * The device address may also not be a physical address; it may
+        * be that there is some kind of remapping between the device and
+        * system RAM, which makes the use of the device address also
+        * unsafe to re-use as a physical address.
+        *
+        * This makes DRM usage of dma_alloc_coherent() in a generic way
+        * at best very questionable and unsafe.
+        */
+
+       /* Otherwise, grab it from our linear allocation */
+       if (!obj->page) {
+               struct drm_mm_node *node;
+               unsigned align = min_t(unsigned, size, SZ_2M);
+               void __iomem *ptr;
+               int ret;
+
+               node = kzalloc(sizeof(*node), GFP_KERNEL);
+               if (!node)
+                       return -ENOSPC;
+
+               mutex_lock(&dev->struct_mutex);
+               ret = drm_mm_insert_node(&priv->linear, node, size, align,
+                                        DRM_MM_SEARCH_DEFAULT);
+               mutex_unlock(&dev->struct_mutex);
+               if (ret) {
+                       kfree(node);
+                       return ret;
+               }
+
+               obj->linear = node;
+
+               /* Ensure that the memory we're returning is cleared. */
+               ptr = ioremap_wc(obj->linear->start, size);
+               if (!ptr) {
+                       mutex_lock(&dev->struct_mutex);
+                       drm_mm_remove_node(obj->linear);
+                       mutex_unlock(&dev->struct_mutex);
+                       kfree(obj->linear);
+                       obj->linear = NULL;
+                       return -ENOMEM;
+               }
+
+               memset_io(ptr, 0, size);
+               iounmap(ptr);
+
+               obj->phys_addr = obj->linear->start;
+               obj->dev_addr = obj->linear->start;
+       }
+
+       DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n",
+                        obj, obj->phys_addr, obj->dev_addr);
+
+       return 0;
+}
+
+void *
+armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
+{
+       /* only linear objects need to be ioremap'd */
+       if (!dobj->addr && dobj->linear)
+               dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
+       return dobj->addr;
+}
+
+struct armada_gem_object *
+armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
+{
+       struct armada_gem_object *obj;
+
+       size = roundup_gem_size(size);
+
+       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+       if (!obj)
+               return NULL;
+
+       drm_gem_private_object_init(dev, &obj->obj, size);
+       obj->dev_addr = DMA_ERROR_CODE;
+
+       DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
+
+       return obj;
+}
+
+struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
+       size_t size)
+{
+       struct armada_gem_object *obj;
+       struct address_space *mapping;
+
+       size = roundup_gem_size(size);
+
+       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+       if (!obj)
+               return NULL;
+
+       if (drm_gem_object_init(dev, &obj->obj, size)) {
+               kfree(obj);
+               return NULL;
+       }
+
+       obj->dev_addr = DMA_ERROR_CODE;
+
+       mapping = obj->obj.filp->f_path.dentry->d_inode->i_mapping;
+       mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+
+       DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
+
+       return obj;
+}
+
+/* Dumb alloc support */
+int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+       struct drm_mode_create_dumb *args)
+{
+       struct armada_gem_object *dobj;
+       u32 handle;
+       size_t size;
+       int ret;
+
+       args->pitch = armada_pitch(args->width, args->bpp);
+       args->size = size = args->pitch * args->height;
+
+       dobj = armada_gem_alloc_private_object(dev, size);
+       if (dobj == NULL)
+               return -ENOMEM;
+
+       ret = armada_gem_linear_back(dev, dobj);
+       if (ret)
+               goto err;
+
+       ret = drm_gem_handle_create(file, &dobj->obj, &handle);
+       if (ret)
+               goto err;
+
+       args->handle = handle;
+
+       /* drop reference from allocate - handle holds it now */
+       DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
+ err:
+       drm_gem_object_unreference_unlocked(&dobj->obj);
+       return ret;
+}
+
+int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+       uint32_t handle, uint64_t *offset)
+{
+       struct armada_gem_object *obj;
+       int ret = 0;
+
+       mutex_lock(&dev->struct_mutex);
+       obj = armada_gem_object_lookup(dev, file, handle);
+       if (!obj) {
+               DRM_ERROR("failed to lookup gem object\n");
+               ret = -EINVAL;
+               goto err_unlock;
+       }
+
+       /* Don't allow imported objects to be mapped */
+       if (obj->obj.import_attach) {
+               ret = -EINVAL;
+               goto err_unlock;
+       }
+
+       ret = drm_gem_create_mmap_offset(&obj->obj);
+       if (ret == 0) {
+               *offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
+               DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
+       }
+
+       drm_gem_object_unreference(&obj->obj);
+ err_unlock:
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
+}
+
+int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+       uint32_t handle)
+{
+       return drm_gem_handle_delete(file, handle);
+}
+
+/* Private driver gem ioctls */
+int armada_gem_create_ioctl(struct drm_device *dev, void *data,
+       struct drm_file *file)
+{
+       struct drm_armada_gem_create *args = data;
+       struct armada_gem_object *dobj;
+       size_t size;
+       u32 handle;
+       int ret;
+
+       if (args->size == 0)
+               return -ENOMEM;
+
+       size = args->size;
+
+       dobj = armada_gem_alloc_object(dev, size);
+       if (dobj == NULL)
+               return -ENOMEM;
+
+       ret = drm_gem_handle_create(file, &dobj->obj, &handle);
+       if (ret)
+               goto err;
+
+       args->handle = handle;
+
+       /* drop reference from allocate - handle holds it now */
+       DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
+ err:
+       drm_gem_object_unreference_unlocked(&dobj->obj);
+       return ret;
+}
+
+/* Map a shmem-backed object into process memory space */
+int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
+       struct drm_file *file)
+{
+       struct drm_armada_gem_mmap *args = data;
+       struct armada_gem_object *dobj;
+       unsigned long addr;
+
+       dobj = armada_gem_object_lookup(dev, file, args->handle);
+       if (dobj == NULL)
+               return -ENOENT;
+
+       if (!dobj->obj.filp) {
+               drm_gem_object_unreference(&dobj->obj);
+               return -EINVAL;
+       }
+
+       addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
+                      MAP_SHARED, args->offset);
+       drm_gem_object_unreference(&dobj->obj);
+       if (IS_ERR_VALUE(addr))
+               return addr;
+
+       args->addr = addr;
+
+       return 0;
+}
+
+int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+       struct drm_file *file)
+{
+       struct drm_armada_gem_pwrite *args = data;
+       struct armada_gem_object *dobj;
+       char __user *ptr;
+       int ret;
+
+       DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
+               args->handle, args->offset, args->size, args->ptr);
+
+       if (args->size == 0)
+               return 0;
+
+       ptr = (char __user *)(uintptr_t)args->ptr;
+
+       if (!access_ok(VERIFY_READ, ptr, args->size))
+               return -EFAULT;
+
+       ret = fault_in_multipages_readable(ptr, args->size);
+       if (ret)
+               return ret;
+
+       dobj = armada_gem_object_lookup(dev, file, args->handle);
+       if (dobj == NULL)
+               return -ENOENT;
+
+       /* Must be a kernel-mapped object */
+       if (!dobj->addr)
+               return -EINVAL;
+
+       if (args->offset > dobj->obj.size ||
+           args->size > dobj->obj.size - args->offset) {
+               DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
+               ret = -EINVAL;
+               goto unref;
+       }
+
+       if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
+               ret = -EFAULT;
+       } else if (dobj->update) {
+               dobj->update(dobj->update_data);
+               ret = 0;
+       }
+
+ unref:
+       drm_gem_object_unreference_unlocked(&dobj->obj);
+       return ret;
+}
+
+/* Prime support */
+struct sg_table *
+armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
+       enum dma_data_direction dir)
+{
+       struct drm_gem_object *obj = attach->dmabuf->priv;
+       struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+       struct scatterlist *sg;
+       struct sg_table *sgt;
+       int i, num;
+
+       sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+       if (!sgt)
+               return NULL;
+
+       if (dobj->obj.filp) {
+               struct address_space *mapping;
+               gfp_t gfp;
+               int count;
+
+               count = dobj->obj.size / PAGE_SIZE;
+               if (sg_alloc_table(sgt, count, GFP_KERNEL))
+                       goto free_sgt;
+
+               mapping = file_inode(dobj->obj.filp)->i_mapping;
+               gfp = mapping_gfp_mask(mapping);
+
+               for_each_sg(sgt->sgl, sg, count, i) {
+                       struct page *page;
+
+                       page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+                       if (IS_ERR(page)) {
+                               num = i;
+                               goto release;
+                       }
+
+                       sg_set_page(sg, page, PAGE_SIZE, 0);
+               }
+
+               if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
+                       num = sgt->nents;
+                       goto release;
+               }
+       } else if (dobj->page) {
+               /* Single contiguous page */
+               if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+                       goto free_sgt;
+
+               sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
+
+               if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
+                       goto free_table;
+       } else if (dobj->linear) {
+               /* Single contiguous physical region - no struct page */
+               if (sg_alloc_table(sgt, 1, GFP_KERNEL))
+                       goto free_sgt;
+               sg_dma_address(sgt->sgl) = dobj->dev_addr;
+               sg_dma_len(sgt->sgl) = dobj->obj.size;
+       } else {
+               goto free_sgt;
+       }
+       return sgt;
+
+ release:
+       for_each_sg(sgt->sgl, sg, num, i)
+               page_cache_release(sg_page(sg));
+ free_table:
+       sg_free_table(sgt);
+ free_sgt:
+       kfree(sgt);
+       return NULL;
+}
+
+static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+       struct sg_table *sgt, enum dma_data_direction dir)
+{
+       struct drm_gem_object *obj = attach->dmabuf->priv;
+       struct armada_gem_object *dobj = drm_to_armada_gem(obj);
+       int i;
+
+       if (!dobj->linear)
+               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+
+       if (dobj->obj.filp) {
+               struct scatterlist *sg;
+               for_each_sg(sgt->sgl, sg, sgt->nents, i)
+                       page_cache_release(sg_page(sg));
+       }
+
+       sg_free_table(sgt);
+       kfree(sgt);
+}
+
+static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
+{
+       return NULL;
+}
+
+static void
+armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
+{
+}
+
+static int
+armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+       return -EINVAL;
+}
+
+static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
+       .map_dma_buf    = armada_gem_prime_map_dma_buf,
+       .unmap_dma_buf  = armada_gem_prime_unmap_dma_buf,
+       .release        = drm_gem_dmabuf_release,
+       .kmap_atomic    = armada_gem_dmabuf_no_kmap,
+       .kunmap_atomic  = armada_gem_dmabuf_no_kunmap,
+       .kmap           = armada_gem_dmabuf_no_kmap,
+       .kunmap         = armada_gem_dmabuf_no_kunmap,
+       .mmap           = armada_gem_dmabuf_mmap,
+};
+
+struct dma_buf *
+armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
+       int flags)
+{
+       return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size,
+                             O_RDWR);
+}
+
+struct drm_gem_object *
+armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
+{
+       struct dma_buf_attachment *attach;
+       struct armada_gem_object *dobj;
+
+       if (buf->ops == &armada_gem_prime_dmabuf_ops) {
+               struct drm_gem_object *obj = buf->priv;
+               if (obj->dev == dev) {
+                       /*
+                        * Importing our own dmabuf(s) increases the
+                        * refcount on the gem object itself.
+                        */
+                       drm_gem_object_reference(obj);
+                       dma_buf_put(buf);
+                       return obj;
+               }
+       }
+
+       attach = dma_buf_attach(buf, dev->dev);
+       if (IS_ERR(attach))
+               return ERR_CAST(attach);
+
+       dobj = armada_gem_alloc_private_object(dev, buf->size);
+       if (!dobj) {
+               dma_buf_detach(buf, attach);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       dobj->obj.import_attach = attach;
+
+       /*
+        * Don't call dma_buf_map_attachment() here - it maps the
+        * scatterlist immediately for DMA, and this is not always
+        * an appropriate thing to do.
+        */
+       return &dobj->obj;
+}
+
+int armada_gem_map_import(struct armada_gem_object *dobj)
+{
+       int ret;
+
+       dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
+                                         DMA_TO_DEVICE);
+       if (!dobj->sgt) {
+               DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
+               return -EINVAL;
+       }
+       if (IS_ERR(dobj->sgt)) {
+               ret = PTR_ERR(dobj->sgt);
+               dobj->sgt = NULL;
+               DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
+               return ret;
+       }
+       if (dobj->sgt->nents > 1) {
+               DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
+               return -EINVAL;
+       }
+       if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
+               DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
+               return -EINVAL;
+       }
+       dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
+       return 0;
+}
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h
new file mode 100644 (file)
index 0000000..00b6cd4
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_GEM_H
+#define ARMADA_GEM_H
+
+/* GEM */
+struct armada_gem_object {
+       struct drm_gem_object   obj;
+       void                    *addr;
+       phys_addr_t             phys_addr;
+       resource_size_t         dev_addr;
+       struct drm_mm_node      *linear;        /* for linear backed */
+       struct page             *page;          /* for page backed */
+       struct sg_table         *sgt;           /* for imported */
+       void                    (*update)(void *);
+       void                    *update_data;
+};
+
+extern const struct vm_operations_struct armada_gem_vm_ops;
+
+#define drm_to_armada_gem(o) container_of(o, struct armada_gem_object, obj)
+
+void armada_gem_free_object(struct drm_gem_object *);
+int armada_gem_linear_back(struct drm_device *, struct armada_gem_object *);
+void *armada_gem_map_object(struct drm_device *, struct armada_gem_object *);
+struct armada_gem_object *armada_gem_alloc_private_object(struct drm_device *,
+       size_t);
+int armada_gem_dumb_create(struct drm_file *, struct drm_device *,
+       struct drm_mode_create_dumb *);
+int armada_gem_dumb_map_offset(struct drm_file *, struct drm_device *,
+       uint32_t, uint64_t *);
+int armada_gem_dumb_destroy(struct drm_file *, struct drm_device *,
+       uint32_t);
+struct dma_buf *armada_gem_prime_export(struct drm_device *dev,
+       struct drm_gem_object *obj, int flags);
+struct drm_gem_object *armada_gem_prime_import(struct drm_device *,
+       struct dma_buf *);
+int armada_gem_map_import(struct armada_gem_object *);
+
+static inline struct armada_gem_object *armada_gem_object_lookup(
+       struct drm_device *dev, struct drm_file *dfile, unsigned handle)
+{
+       struct drm_gem_object *obj = drm_gem_object_lookup(dev, dfile, handle);
+
+       return obj ? drm_to_armada_gem(obj) : NULL;
+}
+#endif
diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h
new file mode 100644 (file)
index 0000000..27319a8
--- /dev/null
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_HW_H
+#define ARMADA_HW_H
+
+/*
+ * Note: the following registers are written from IRQ context:
+ *  LCD_SPU_V_PORCH, LCD_SPU_ADV_REG, LCD_SPUT_V_H_TOTAL
+ *  LCD_SPU_DMA_START_ADDR_[YUV][01], LCD_SPU_DMA_PITCH_YC,
+ *  LCD_SPU_DMA_PITCH_UV, LCD_SPU_DMA_OVSA_HPXL_VLN,
+ *  LCD_SPU_DMA_HPXL_VLN, LCD_SPU_DZM_HPXL_VLN, LCD_SPU_DMA_CTRL0
+ */
+enum {
+       LCD_SPU_ADV_REG                 = 0x0084,       /* Armada 510 */
+       LCD_SPU_DMA_START_ADDR_Y0       = 0x00c0,
+       LCD_SPU_DMA_START_ADDR_U0       = 0x00c4,
+       LCD_SPU_DMA_START_ADDR_V0       = 0x00c8,
+       LCD_CFG_DMA_START_ADDR_0        = 0x00cc,
+       LCD_SPU_DMA_START_ADDR_Y1       = 0x00d0,
+       LCD_SPU_DMA_START_ADDR_U1       = 0x00d4,
+       LCD_SPU_DMA_START_ADDR_V1       = 0x00d8,
+       LCD_CFG_DMA_START_ADDR_1        = 0x00dc,
+       LCD_SPU_DMA_PITCH_YC            = 0x00e0,
+       LCD_SPU_DMA_PITCH_UV            = 0x00e4,
+       LCD_SPU_DMA_OVSA_HPXL_VLN       = 0x00e8,
+       LCD_SPU_DMA_HPXL_VLN            = 0x00ec,
+       LCD_SPU_DZM_HPXL_VLN            = 0x00f0,
+       LCD_CFG_GRA_START_ADDR0         = 0x00f4,
+       LCD_CFG_GRA_START_ADDR1         = 0x00f8,
+       LCD_CFG_GRA_PITCH               = 0x00fc,
+       LCD_SPU_GRA_OVSA_HPXL_VLN       = 0x0100,
+       LCD_SPU_GRA_HPXL_VLN            = 0x0104,
+       LCD_SPU_GZM_HPXL_VLN            = 0x0108,
+       LCD_SPU_HWC_OVSA_HPXL_VLN       = 0x010c,
+       LCD_SPU_HWC_HPXL_VLN            = 0x0110,
+       LCD_SPUT_V_H_TOTAL              = 0x0114,
+       LCD_SPU_V_H_ACTIVE              = 0x0118,
+       LCD_SPU_H_PORCH                 = 0x011c,
+       LCD_SPU_V_PORCH                 = 0x0120,
+       LCD_SPU_BLANKCOLOR              = 0x0124,
+       LCD_SPU_ALPHA_COLOR1            = 0x0128,
+       LCD_SPU_ALPHA_COLOR2            = 0x012c,
+       LCD_SPU_COLORKEY_Y              = 0x0130,
+       LCD_SPU_COLORKEY_U              = 0x0134,
+       LCD_SPU_COLORKEY_V              = 0x0138,
+       LCD_CFG_RDREG4F                 = 0x013c,       /* Armada 510 */
+       LCD_SPU_SPI_RXDATA              = 0x0140,
+       LCD_SPU_ISA_RXDATA              = 0x0144,
+       LCD_SPU_HWC_RDDAT               = 0x0158,
+       LCD_SPU_GAMMA_RDDAT             = 0x015c,
+       LCD_SPU_PALETTE_RDDAT           = 0x0160,
+       LCD_SPU_IOPAD_IN                = 0x0178,
+       LCD_CFG_RDREG5F                 = 0x017c,
+       LCD_SPU_SPI_CTRL                = 0x0180,
+       LCD_SPU_SPI_TXDATA              = 0x0184,
+       LCD_SPU_SMPN_CTRL               = 0x0188,
+       LCD_SPU_DMA_CTRL0               = 0x0190,
+       LCD_SPU_DMA_CTRL1               = 0x0194,
+       LCD_SPU_SRAM_CTRL               = 0x0198,
+       LCD_SPU_SRAM_WRDAT              = 0x019c,
+       LCD_SPU_SRAM_PARA0              = 0x01a0,       /* Armada 510 */
+       LCD_SPU_SRAM_PARA1              = 0x01a4,
+       LCD_CFG_SCLK_DIV                = 0x01a8,
+       LCD_SPU_CONTRAST                = 0x01ac,
+       LCD_SPU_SATURATION              = 0x01b0,
+       LCD_SPU_CBSH_HUE                = 0x01b4,
+       LCD_SPU_DUMB_CTRL               = 0x01b8,
+       LCD_SPU_IOPAD_CONTROL           = 0x01bc,
+       LCD_SPU_IRQ_ENA                 = 0x01c0,
+       LCD_SPU_IRQ_ISR                 = 0x01c4,
+};
+
+/* For LCD_SPU_ADV_REG */
+enum {
+       ADV_VSYNC_L_OFF = 0xfff << 20,
+       ADV_GRACOLORKEY = 1 << 19,
+       ADV_VIDCOLORKEY = 1 << 18,
+       ADV_HWC32BLEND  = 1 << 15,
+       ADV_HWC32ARGB   = 1 << 14,
+       ADV_HWC32ENABLE = 1 << 13,
+       ADV_VSYNCOFFEN  = 1 << 12,
+       ADV_VSYNC_H_OFF = 0xfff << 0,
+};
+
+enum {
+       CFG_565         = 0,
+       CFG_1555        = 1,
+       CFG_888PACK     = 2,
+       CFG_X888        = 3,
+       CFG_8888        = 4,
+       CFG_422PACK     = 5,
+       CFG_422         = 6,
+       CFG_420         = 7,
+       CFG_PSEUDO4     = 9,
+       CFG_PSEUDO8     = 10,
+       CFG_SWAPRB      = 1 << 4,
+       CFG_SWAPUV      = 1 << 3,
+       CFG_SWAPYU      = 1 << 2,
+       CFG_YUV2RGB     = 1 << 1,
+};
+
+/* For LCD_SPU_DMA_CTRL0 */
+enum {
+       CFG_NOBLENDING  = 1 << 31,
+       CFG_GAMMA_ENA   = 1 << 30,
+       CFG_CBSH_ENA    = 1 << 29,
+       CFG_PALETTE_ENA = 1 << 28,
+       CFG_ARBFAST_ENA = 1 << 27,
+       CFG_HWC_1BITMOD = 1 << 26,
+       CFG_HWC_1BITENA = 1 << 25,
+       CFG_HWC_ENA     = 1 << 24,
+       CFG_DMAFORMAT   = 0xf << 20,
+#define        CFG_DMA_FMT(x)  ((x) << 20)
+       CFG_GRAFORMAT   = 0xf << 16,
+#define        CFG_GRA_FMT(x)  ((x) << 16)
+#define CFG_GRA_MOD(x) ((x) << 8)
+       CFG_GRA_FTOGGLE = 1 << 15,
+       CFG_GRA_HSMOOTH = 1 << 14,
+       CFG_GRA_TSTMODE = 1 << 13,
+       CFG_GRA_ENA     = 1 << 8,
+#define CFG_DMA_MOD(x) ((x) << 0)
+       CFG_DMA_FTOGGLE = 1 << 7,
+       CFG_DMA_HSMOOTH = 1 << 6,
+       CFG_DMA_TSTMODE = 1 << 5,
+       CFG_DMA_ENA     = 1 << 0,
+};
+
+enum {
+       CKMODE_DISABLE  = 0,
+       CKMODE_Y        = 1,
+       CKMODE_U        = 2,
+       CKMODE_RGB      = 3,
+       CKMODE_V        = 4,
+       CKMODE_R        = 5,
+       CKMODE_G        = 6,
+       CKMODE_B        = 7,
+};
+
+/* For LCD_SPU_DMA_CTRL1 */
+enum {
+       CFG_FRAME_TRIG          = 1 << 31,
+       CFG_VSYNC_INV           = 1 << 27,
+       CFG_CKMODE_MASK         = 0x7 << 24,
+#define CFG_CKMODE(x)          ((x) << 24)
+       CFG_CARRY               = 1 << 23,
+       CFG_GATED_CLK           = 1 << 21,
+       CFG_PWRDN_ENA           = 1 << 20,
+       CFG_DSCALE_MASK         = 0x3 << 18,
+       CFG_DSCALE_NONE         = 0x0 << 18,
+       CFG_DSCALE_HALF         = 0x1 << 18,
+       CFG_DSCALE_QUAR         = 0x2 << 18,
+       CFG_ALPHAM_MASK         = 0x3 << 16,
+       CFG_ALPHAM_VIDEO        = 0x0 << 16,
+       CFG_ALPHAM_GRA          = 0x1 << 16,
+       CFG_ALPHAM_CFG          = 0x2 << 16,
+       CFG_ALPHA_MASK          = 0xff << 8,
+       CFG_PIXCMD_MASK         = 0xff,
+};
+
+/* For LCD_SPU_SRAM_CTRL */
+enum {
+       SRAM_READ       = 0 << 14,
+       SRAM_WRITE      = 2 << 14,
+       SRAM_INIT       = 3 << 14,
+       SRAM_HWC32_RAM1 = 0xc << 8,
+       SRAM_HWC32_RAM2 = 0xd << 8,
+       SRAM_HWC32_RAMR = SRAM_HWC32_RAM1,
+       SRAM_HWC32_RAMG = SRAM_HWC32_RAM2,
+       SRAM_HWC32_RAMB = 0xe << 8,
+       SRAM_HWC32_TRAN = 0xf << 8,
+       SRAM_HWC        = 0xf << 8,
+};
+
+/* For LCD_SPU_SRAM_PARA1 */
+enum {
+       CFG_CSB_256x32  = 1 << 15,      /* cursor */
+       CFG_CSB_256x24  = 1 << 14,      /* palette */
+       CFG_CSB_256x8   = 1 << 13,      /* gamma */
+       CFG_PDWN1920x32 = 1 << 8,       /* Armada 510: power down vscale ram */
+       CFG_PDWN256x32  = 1 << 7,       /* power down cursor */
+       CFG_PDWN256x24  = 1 << 6,       /* power down palette */
+       CFG_PDWN256x8   = 1 << 5,       /* power down gamma */
+       CFG_PDWNHWC     = 1 << 4,       /* Armada 510: power down all hwc ram */
+       CFG_PDWN32x32   = 1 << 3,       /* power down slave->smart ram */
+       CFG_PDWN16x66   = 1 << 2,       /* power down UV fifo */
+       CFG_PDWN32x66   = 1 << 1,       /* power down Y fifo */
+       CFG_PDWN64x66   = 1 << 0,       /* power down graphic fifo */
+};
+
+/* For LCD_CFG_SCLK_DIV */
+enum {
+       /* Armada 510 */
+       SCLK_510_AXI            = 0x0 << 30,
+       SCLK_510_EXTCLK0        = 0x1 << 30,
+       SCLK_510_PLL            = 0x2 << 30,
+       SCLK_510_EXTCLK1        = 0x3 << 30,
+       SCLK_510_DIV_CHANGE     = 1 << 29,
+       SCLK_510_FRAC_DIV_MASK  = 0xfff << 16,
+       SCLK_510_INT_DIV_MASK   = 0xffff << 0,
+
+       /* Armada 16x */
+       SCLK_16X_AHB            = 0x0 << 28,
+       SCLK_16X_PCLK           = 0x1 << 28,
+       SCLK_16X_AXI            = 0x4 << 28,
+       SCLK_16X_PLL            = 0x8 << 28,
+       SCLK_16X_FRAC_DIV_MASK  = 0xfff << 16,
+       SCLK_16X_INT_DIV_MASK   = 0xffff << 0,
+};
+
+/* For LCD_SPU_DUMB_CTRL */
+enum {
+       DUMB16_RGB565_0 = 0x0 << 28,
+       DUMB16_RGB565_1 = 0x1 << 28,
+       DUMB18_RGB666_0 = 0x2 << 28,
+       DUMB18_RGB666_1 = 0x3 << 28,
+       DUMB12_RGB444_0 = 0x4 << 28,
+       DUMB12_RGB444_1 = 0x5 << 28,
+       DUMB24_RGB888_0 = 0x6 << 28,
+       DUMB_BLANK      = 0x7 << 28,
+       DUMB_MASK       = 0xf << 28,
+       CFG_BIAS_OUT    = 1 << 8,
+       CFG_REV_RGB     = 1 << 7,
+       CFG_INV_CBLANK  = 1 << 6,
+       CFG_INV_CSYNC   = 1 << 5,       /* Normally active high */
+       CFG_INV_HENA    = 1 << 4,
+       CFG_INV_VSYNC   = 1 << 3,       /* Normally active high */
+       CFG_INV_HSYNC   = 1 << 2,       /* Normally active high */
+       CFG_INV_PCLK    = 1 << 1,
+       CFG_DUMB_ENA    = 1 << 0,
+};
+
+/* For LCD_SPU_IOPAD_CONTROL */
+enum {
+       CFG_VSCALE_LN_EN        = 3 << 18,
+       CFG_GRA_VM_ENA          = 1 << 15,
+       CFG_DMA_VM_ENA          = 1 << 13,
+       CFG_CMD_VM_ENA          = 1 << 11,
+       CFG_CSC_MASK            = 3 << 8,
+       CFG_CSC_YUV_CCIR709     = 1 << 9,
+       CFG_CSC_YUV_CCIR601     = 0 << 9,
+       CFG_CSC_RGB_STUDIO      = 1 << 8,
+       CFG_CSC_RGB_COMPUTER    = 0 << 8,
+       CFG_IOPAD_MASK          = 0xf << 0,
+       CFG_IOPAD_DUMB24        = 0x0 << 0,
+       CFG_IOPAD_DUMB18SPI     = 0x1 << 0,
+       CFG_IOPAD_DUMB18GPIO    = 0x2 << 0,
+       CFG_IOPAD_DUMB16SPI     = 0x3 << 0,
+       CFG_IOPAD_DUMB16GPIO    = 0x4 << 0,
+       CFG_IOPAD_DUMB12GPIO    = 0x5 << 0,
+       CFG_IOPAD_SMART18       = 0x6 << 0,
+       CFG_IOPAD_SMART16       = 0x7 << 0,
+       CFG_IOPAD_SMART8        = 0x8 << 0,
+};
+
+#define IOPAD_DUMB24                0x0
+
+/* For LCD_SPU_IRQ_ENA */
+enum {
+       DMA_FRAME_IRQ0_ENA      = 1 << 31,
+       DMA_FRAME_IRQ1_ENA      = 1 << 30,
+       DMA_FRAME_IRQ_ENA       = DMA_FRAME_IRQ0_ENA | DMA_FRAME_IRQ1_ENA,
+       DMA_FF_UNDERFLOW_ENA    = 1 << 29,
+       GRA_FRAME_IRQ0_ENA      = 1 << 27,
+       GRA_FRAME_IRQ1_ENA      = 1 << 26,
+       GRA_FRAME_IRQ_ENA       = GRA_FRAME_IRQ0_ENA | GRA_FRAME_IRQ1_ENA,
+       GRA_FF_UNDERFLOW_ENA    = 1 << 25,
+       VSYNC_IRQ_ENA           = 1 << 23,
+       DUMB_FRAMEDONE_ENA      = 1 << 22,
+       TWC_FRAMEDONE_ENA       = 1 << 21,
+       HWC_FRAMEDONE_ENA       = 1 << 20,
+       SLV_IRQ_ENA             = 1 << 19,
+       SPI_IRQ_ENA             = 1 << 18,
+       PWRDN_IRQ_ENA           = 1 << 17,
+       ERR_IRQ_ENA             = 1 << 16,
+       CLEAN_SPU_IRQ_ISR       = 0xffff,
+};
+
+/* For LCD_SPU_IRQ_ISR */
+enum {
+       DMA_FRAME_IRQ0          = 1 << 31,
+       DMA_FRAME_IRQ1          = 1 << 30,
+       DMA_FRAME_IRQ           = DMA_FRAME_IRQ0 | DMA_FRAME_IRQ1,
+       DMA_FF_UNDERFLOW        = 1 << 29,
+       GRA_FRAME_IRQ0          = 1 << 27,
+       GRA_FRAME_IRQ1          = 1 << 26,
+       GRA_FRAME_IRQ           = GRA_FRAME_IRQ0 | GRA_FRAME_IRQ1,
+       GRA_FF_UNDERFLOW        = 1 << 25,
+       VSYNC_IRQ               = 1 << 23,
+       DUMB_FRAMEDONE          = 1 << 22,
+       TWC_FRAMEDONE           = 1 << 21,
+       HWC_FRAMEDONE           = 1 << 20,
+       SLV_IRQ                 = 1 << 19,
+       SPI_IRQ                 = 1 << 18,
+       PWRDN_IRQ               = 1 << 17,
+       ERR_IRQ                 = 1 << 16,
+       DMA_FRAME_IRQ0_LEVEL    = 1 << 15,
+       DMA_FRAME_IRQ1_LEVEL    = 1 << 14,
+       DMA_FRAME_CNT_ISR       = 3 << 12,
+       GRA_FRAME_IRQ0_LEVEL    = 1 << 11,
+       GRA_FRAME_IRQ1_LEVEL    = 1 << 10,
+       GRA_FRAME_CNT_ISR       = 3 << 8,
+       VSYNC_IRQ_LEVEL         = 1 << 7,
+       DUMB_FRAMEDONE_LEVEL    = 1 << 6,
+       TWC_FRAMEDONE_LEVEL     = 1 << 5,
+       HWC_FRAMEDONE_LEVEL     = 1 << 4,
+       SLV_FF_EMPTY            = 1 << 3,
+       DMA_FF_ALLEMPTY         = 1 << 2,
+       GRA_FF_ALLEMPTY         = 1 << 1,
+       PWRDN_IRQ_LEVEL         = 1 << 0,
+};
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_ioctlP.h b/drivers/gpu/drm/armada/armada_ioctlP.h
new file mode 100644 (file)
index 0000000..bd8c456
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_IOCTLP_H
+#define ARMADA_IOCTLP_H
+
+#define ARMADA_IOCTL_PROTO(name)\
+extern int armada_##name##_ioctl(struct drm_device *, void *, struct drm_file *)
+
+ARMADA_IOCTL_PROTO(gem_create);
+ARMADA_IOCTL_PROTO(gem_mmap);
+ARMADA_IOCTL_PROTO(gem_pwrite);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_output.c b/drivers/gpu/drm/armada/armada_output.c
new file mode 100644 (file)
index 0000000..d685a54
--- /dev/null
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+#include "armada_output.h"
+#include "armada_drm.h"
+
+struct armada_connector {
+       struct drm_connector conn;
+       const struct armada_output_type *type;
+};
+
+#define drm_to_armada_conn(c) container_of(c, struct armada_connector, conn)
+
+struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn)
+{
+       struct drm_encoder *enc = conn->encoder;
+
+       return enc ? enc : drm_encoder_find(conn->dev, conn->encoder_ids[0]);
+}
+
+static enum drm_connector_status armada_drm_connector_detect(
+       struct drm_connector *conn, bool force)
+{
+       struct armada_connector *dconn = drm_to_armada_conn(conn);
+       enum drm_connector_status status = connector_status_disconnected;
+
+       if (dconn->type->detect) {
+               status = dconn->type->detect(conn, force);
+       } else {
+               struct drm_encoder *enc = armada_drm_connector_encoder(conn);
+
+               if (enc)
+                       status = encoder_helper_funcs(enc)->detect(enc, conn);
+       }
+
+       return status;
+}
+
+static void armada_drm_connector_destroy(struct drm_connector *conn)
+{
+       struct armada_connector *dconn = drm_to_armada_conn(conn);
+
+       drm_sysfs_connector_remove(conn);
+       drm_connector_cleanup(conn);
+       kfree(dconn);
+}
+
+static int armada_drm_connector_set_property(struct drm_connector *conn,
+       struct drm_property *property, uint64_t value)
+{
+       struct armada_connector *dconn = drm_to_armada_conn(conn);
+
+       if (!dconn->type->set_property)
+               return -EINVAL;
+
+       return dconn->type->set_property(conn, property, value);
+}
+
+static const struct drm_connector_funcs armada_drm_conn_funcs = {
+       .dpms           = drm_helper_connector_dpms,
+       .fill_modes     = drm_helper_probe_single_connector_modes,
+       .detect         = armada_drm_connector_detect,
+       .destroy        = armada_drm_connector_destroy,
+       .set_property   = armada_drm_connector_set_property,
+};
+
+void armada_drm_encoder_prepare(struct drm_encoder *encoder)
+{
+       encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void armada_drm_encoder_commit(struct drm_encoder *encoder)
+{
+       encoder_helper_funcs(encoder)->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+       const struct drm_display_mode *mode, struct drm_display_mode *adjusted)
+{
+       return true;
+}
+
+/* Shouldn't this be a generic helper function? */
+int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
+       struct drm_display_mode *mode)
+{
+       struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
+       int valid = MODE_BAD;
+
+       if (encoder) {
+               struct drm_encoder_slave *slave = to_encoder_slave(encoder);
+
+               valid = slave->slave_funcs->mode_valid(encoder, mode);
+       }
+       return valid;
+}
+
+int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
+       struct drm_property *property, uint64_t value)
+{
+       struct drm_encoder *encoder = armada_drm_connector_encoder(conn);
+       int rc = -EINVAL;
+
+       if (encoder) {
+               struct drm_encoder_slave *slave = to_encoder_slave(encoder);
+
+               rc = slave->slave_funcs->set_property(encoder, conn, property,
+                                                     value);
+       }
+       return rc;
+}
+
+int armada_output_create(struct drm_device *dev,
+       const struct armada_output_type *type, const void *data)
+{
+       struct armada_connector *dconn;
+       int ret;
+
+       dconn = kzalloc(sizeof(*dconn), GFP_KERNEL);
+       if (!dconn)
+               return -ENOMEM;
+
+       dconn->type = type;
+
+       ret = drm_connector_init(dev, &dconn->conn, &armada_drm_conn_funcs,
+                                type->connector_type);
+       if (ret) {
+               DRM_ERROR("unable to init connector\n");
+               goto err_destroy_dconn;
+       }
+
+       ret = type->create(&dconn->conn, data);
+       if (ret)
+               goto err_conn;
+
+       ret = drm_sysfs_connector_add(&dconn->conn);
+       if (ret)
+               goto err_sysfs;
+
+       return 0;
+
+ err_sysfs:
+       if (dconn->conn.encoder)
+               dconn->conn.encoder->funcs->destroy(dconn->conn.encoder);
+ err_conn:
+       drm_connector_cleanup(&dconn->conn);
+ err_destroy_dconn:
+       kfree(dconn);
+       return ret;
+}
diff --git a/drivers/gpu/drm/armada/armada_output.h b/drivers/gpu/drm/armada/armada_output.h
new file mode 100644 (file)
index 0000000..4126d43
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_CONNETOR_H
+#define ARMADA_CONNETOR_H
+
+#define encoder_helper_funcs(encoder) \
+       ((struct drm_encoder_helper_funcs *)encoder->helper_private)
+
+struct armada_output_type {
+       int connector_type;
+       enum drm_connector_status (*detect)(struct drm_connector *, bool);
+       int (*create)(struct drm_connector *, const void *);
+       int (*set_property)(struct drm_connector *, struct drm_property *,
+                           uint64_t);
+};
+
+struct drm_encoder *armada_drm_connector_encoder(struct drm_connector *conn);
+
+void armada_drm_encoder_prepare(struct drm_encoder *encoder);
+void armada_drm_encoder_commit(struct drm_encoder *encoder);
+
+bool armada_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+       const struct drm_display_mode *mode, struct drm_display_mode *adj);
+
+int armada_drm_slave_encoder_mode_valid(struct drm_connector *conn,
+       struct drm_display_mode *mode);
+
+int armada_drm_slave_encoder_set_property(struct drm_connector *conn,
+       struct drm_property *property, uint64_t value);
+
+int armada_output_create(struct drm_device *dev,
+       const struct armada_output_type *type, const void *data);
+
+#endif
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
new file mode 100644 (file)
index 0000000..c5b06fd
--- /dev/null
@@ -0,0 +1,477 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+#include <drm/armada_drm.h>
+#include "armada_ioctlP.h"
+
+struct armada_plane_properties {
+       uint32_t colorkey_yr;
+       uint32_t colorkey_ug;
+       uint32_t colorkey_vb;
+#define K2R(val) (((val) >> 0) & 0xff)
+#define K2G(val) (((val) >> 8) & 0xff)
+#define K2B(val) (((val) >> 16) & 0xff)
+       int16_t  brightness;
+       uint16_t contrast;
+       uint16_t saturation;
+       uint32_t colorkey_mode;
+};
+
+struct armada_plane {
+       struct drm_plane base;
+       spinlock_t lock;
+       struct drm_framebuffer *old_fb;
+       uint32_t src_hw;
+       uint32_t dst_hw;
+       uint32_t dst_yx;
+       uint32_t ctrl0;
+       struct {
+               struct armada_vbl_event update;
+               struct armada_regs regs[13];
+               wait_queue_head_t wait;
+       } vbl;
+       struct armada_plane_properties prop;
+};
+#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
+
+
+static void
+armada_ovl_update_attr(struct armada_plane_properties *prop,
+       struct armada_crtc *dcrtc)
+{
+       writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y);
+       writel_relaxed(prop->colorkey_ug, dcrtc->base + LCD_SPU_COLORKEY_U);
+       writel_relaxed(prop->colorkey_vb, dcrtc->base + LCD_SPU_COLORKEY_V);
+
+       writel_relaxed(prop->brightness << 16 | prop->contrast,
+                      dcrtc->base + LCD_SPU_CONTRAST);
+       /* Docs say 15:0, but it seems to actually be 31:16 on Armada 510 */
+       writel_relaxed(prop->saturation << 16,
+                      dcrtc->base + LCD_SPU_SATURATION);
+       writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
+
+       spin_lock_irq(&dcrtc->irq_lock);
+       armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
+                    CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
+                    dcrtc->base + LCD_SPU_DMA_CTRL1);
+
+       armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
+       spin_unlock_irq(&dcrtc->irq_lock);
+}
+
+/* === Plane support === */
+static void armada_plane_vbl(struct armada_crtc *dcrtc, void *data)
+{
+       struct armada_plane *dplane = data;
+       struct drm_framebuffer *fb;
+
+       armada_drm_crtc_update_regs(dcrtc, dplane->vbl.regs);
+
+       spin_lock(&dplane->lock);
+       fb = dplane->old_fb;
+       dplane->old_fb = NULL;
+       spin_unlock(&dplane->lock);
+
+       if (fb)
+               armada_drm_queue_unref_work(dcrtc->crtc.dev, fb);
+}
+
+static unsigned armada_limit(int start, unsigned size, unsigned max)
+{
+       int end = start + size;
+       if (end < 0)
+               return 0;
+       if (start < 0)
+               start = 0;
+       return (unsigned)end > max ? max - start : end - start;
+}
+
+static int
+armada_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+       struct drm_framebuffer *fb,
+       int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
+       uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h)
+{
+       struct armada_plane *dplane = drm_to_armada_plane(plane);
+       struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+       uint32_t val, ctrl0;
+       unsigned idx = 0;
+       int ret;
+
+       crtc_w = armada_limit(crtc_x, crtc_w, dcrtc->crtc.mode.hdisplay);
+       crtc_h = armada_limit(crtc_y, crtc_h, dcrtc->crtc.mode.vdisplay);
+       ctrl0 = CFG_DMA_FMT(drm_fb_to_armada_fb(fb)->fmt) |
+               CFG_DMA_MOD(drm_fb_to_armada_fb(fb)->mod) |
+               CFG_CBSH_ENA | CFG_DMA_HSMOOTH | CFG_DMA_ENA;
+
+       /* Does the position/size result in nothing to display? */
+       if (crtc_w == 0 || crtc_h == 0) {
+               ctrl0 &= ~CFG_DMA_ENA;
+       }
+
+       /*
+        * FIXME: if the starting point is off screen, we need to
+        * adjust src_x, src_y, src_w, src_h appropriately, and
+        * according to the scale.
+        */
+
+       if (!dcrtc->plane) {
+               dcrtc->plane = plane;
+               armada_ovl_update_attr(&dplane->prop, dcrtc);
+       }
+
+       /* FIXME: overlay on an interlaced display */
+       /* Just updating the position/size? */
+       if (plane->fb == fb && dplane->ctrl0 == ctrl0) {
+               val = (src_h & 0xffff0000) | src_w >> 16;
+               dplane->src_hw = val;
+               writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_HPXL_VLN);
+               val = crtc_h << 16 | crtc_w;
+               dplane->dst_hw = val;
+               writel_relaxed(val, dcrtc->base + LCD_SPU_DZM_HPXL_VLN);
+               val = crtc_y << 16 | crtc_x;
+               dplane->dst_yx = val;
+               writel_relaxed(val, dcrtc->base + LCD_SPU_DMA_OVSA_HPXL_VLN);
+               return 0;
+       } else if (~dplane->ctrl0 & ctrl0 & CFG_DMA_ENA) {
+               /* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
+               armada_updatel(0, CFG_PDWN16x66 | CFG_PDWN32x66,
+                              dcrtc->base + LCD_SPU_SRAM_PARA1);
+       }
+
+       ret = wait_event_timeout(dplane->vbl.wait,
+                                list_empty(&dplane->vbl.update.node),
+                                HZ/25);
+       if (ret < 0)
+               return ret;
+
+       if (plane->fb != fb) {
+               struct armada_gem_object *obj = drm_fb_obj(fb);
+               uint32_t sy, su, sv;
+
+               /*
+                * Take a reference on the new framebuffer - we want to
+                * hold on to it while the hardware is displaying it.
+                */
+               drm_framebuffer_reference(fb);
+
+               if (plane->fb) {
+                       struct drm_framebuffer *older_fb;
+
+                       spin_lock_irq(&dplane->lock);
+                       older_fb = dplane->old_fb;
+                       dplane->old_fb = plane->fb;
+                       spin_unlock_irq(&dplane->lock);
+                       if (older_fb)
+                               armada_drm_queue_unref_work(dcrtc->crtc.dev,
+                                                           older_fb);
+               }
+
+               src_y >>= 16;
+               src_x >>= 16;
+               sy = obj->dev_addr + fb->offsets[0] + src_y * fb->pitches[0] +
+                       src_x * fb->bits_per_pixel / 8;
+               su = obj->dev_addr + fb->offsets[1] + src_y * fb->pitches[1] +
+                       src_x;
+               sv = obj->dev_addr + fb->offsets[2] + src_y * fb->pitches[2] +
+                       src_x;
+
+               armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+                                    LCD_SPU_DMA_START_ADDR_Y0);
+               armada_reg_queue_set(dplane->vbl.regs, idx, su,
+                                    LCD_SPU_DMA_START_ADDR_U0);
+               armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+                                    LCD_SPU_DMA_START_ADDR_V0);
+               armada_reg_queue_set(dplane->vbl.regs, idx, sy,
+                                    LCD_SPU_DMA_START_ADDR_Y1);
+               armada_reg_queue_set(dplane->vbl.regs, idx, su,
+                                    LCD_SPU_DMA_START_ADDR_U1);
+               armada_reg_queue_set(dplane->vbl.regs, idx, sv,
+                                    LCD_SPU_DMA_START_ADDR_V1);
+
+               val = fb->pitches[0] << 16 | fb->pitches[0];
+               armada_reg_queue_set(dplane->vbl.regs, idx, val,
+                                    LCD_SPU_DMA_PITCH_YC);
+               val = fb->pitches[1] << 16 | fb->pitches[2];
+               armada_reg_queue_set(dplane->vbl.regs, idx, val,
+                                    LCD_SPU_DMA_PITCH_UV);
+       }
+
+       val = (src_h & 0xffff0000) | src_w >> 16;
+       if (dplane->src_hw != val) {
+               dplane->src_hw = val;
+               armada_reg_queue_set(dplane->vbl.regs, idx, val,
+                                    LCD_SPU_DMA_HPXL_VLN);
+       }
+       val = crtc_h << 16 | crtc_w;
+       if (dplane->dst_hw != val) {
+               dplane->dst_hw = val;
+               armada_reg_queue_set(dplane->vbl.regs, idx, val,
+                                    LCD_SPU_DZM_HPXL_VLN);
+       }
+       val = crtc_y << 16 | crtc_x;
+       if (dplane->dst_yx != val) {
+               dplane->dst_yx = val;
+               armada_reg_queue_set(dplane->vbl.regs, idx, val,
+                                    LCD_SPU_DMA_OVSA_HPXL_VLN);
+       }
+       if (dplane->ctrl0 != ctrl0) {
+               dplane->ctrl0 = ctrl0;
+               armada_reg_queue_mod(dplane->vbl.regs, idx, ctrl0,
+                       CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
+                       CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
+                       CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU |
+                       CFG_YUV2RGB) | CFG_DMA_ENA,
+                       LCD_SPU_DMA_CTRL0);
+       }
+       if (idx) {
+               armada_reg_queue_end(dplane->vbl.regs, idx);
+               armada_drm_vbl_event_add(dcrtc, &dplane->vbl.update);
+       }
+       return 0;
+}
+
+static int armada_plane_disable(struct drm_plane *plane)
+{
+       struct armada_plane *dplane = drm_to_armada_plane(plane);
+       struct drm_framebuffer *fb;
+       struct armada_crtc *dcrtc;
+
+       if (!dplane->base.crtc)
+               return 0;
+
+       dcrtc = drm_to_armada_crtc(dplane->base.crtc);
+       dcrtc->plane = NULL;
+
+       spin_lock_irq(&dcrtc->irq_lock);
+       armada_drm_vbl_event_remove(dcrtc, &dplane->vbl.update);
+       armada_updatel(0, CFG_DMA_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
+       dplane->ctrl0 = 0;
+       spin_unlock_irq(&dcrtc->irq_lock);
+
+       /* Power down the Y/U/V FIFOs */
+       armada_updatel(CFG_PDWN16x66 | CFG_PDWN32x66, 0,
+                      dcrtc->base + LCD_SPU_SRAM_PARA1);
+
+       if (plane->fb)
+               drm_framebuffer_unreference(plane->fb);
+
+       spin_lock_irq(&dplane->lock);
+       fb = dplane->old_fb;
+       dplane->old_fb = NULL;
+       spin_unlock_irq(&dplane->lock);
+       if (fb)
+               drm_framebuffer_unreference(fb);
+
+       return 0;
+}
+
+static void armada_plane_destroy(struct drm_plane *plane)
+{
+       kfree(plane);
+}
+
+static int armada_plane_set_property(struct drm_plane *plane,
+       struct drm_property *property, uint64_t val)
+{
+       struct armada_private *priv = plane->dev->dev_private;
+       struct armada_plane *dplane = drm_to_armada_plane(plane);
+       bool update_attr = false;
+
+       if (property == priv->colorkey_prop) {
+#define CCC(v) ((v) << 24 | (v) << 16 | (v) << 8)
+               dplane->prop.colorkey_yr = CCC(K2R(val));
+               dplane->prop.colorkey_ug = CCC(K2G(val));
+               dplane->prop.colorkey_vb = CCC(K2B(val));
+#undef CCC
+               update_attr = true;
+       } else if (property == priv->colorkey_min_prop) {
+               dplane->prop.colorkey_yr &= ~0x00ff0000;
+               dplane->prop.colorkey_yr |= K2R(val) << 16;
+               dplane->prop.colorkey_ug &= ~0x00ff0000;
+               dplane->prop.colorkey_ug |= K2G(val) << 16;
+               dplane->prop.colorkey_vb &= ~0x00ff0000;
+               dplane->prop.colorkey_vb |= K2B(val) << 16;
+               update_attr = true;
+       } else if (property == priv->colorkey_max_prop) {
+               dplane->prop.colorkey_yr &= ~0xff000000;
+               dplane->prop.colorkey_yr |= K2R(val) << 24;
+               dplane->prop.colorkey_ug &= ~0xff000000;
+               dplane->prop.colorkey_ug |= K2G(val) << 24;
+               dplane->prop.colorkey_vb &= ~0xff000000;
+               dplane->prop.colorkey_vb |= K2B(val) << 24;
+               update_attr = true;
+       } else if (property == priv->colorkey_val_prop) {
+               dplane->prop.colorkey_yr &= ~0x0000ff00;
+               dplane->prop.colorkey_yr |= K2R(val) << 8;
+               dplane->prop.colorkey_ug &= ~0x0000ff00;
+               dplane->prop.colorkey_ug |= K2G(val) << 8;
+               dplane->prop.colorkey_vb &= ~0x0000ff00;
+               dplane->prop.colorkey_vb |= K2B(val) << 8;
+               update_attr = true;
+       } else if (property == priv->colorkey_alpha_prop) {
+               dplane->prop.colorkey_yr &= ~0x000000ff;
+               dplane->prop.colorkey_yr |= K2R(val);
+               dplane->prop.colorkey_ug &= ~0x000000ff;
+               dplane->prop.colorkey_ug |= K2G(val);
+               dplane->prop.colorkey_vb &= ~0x000000ff;
+               dplane->prop.colorkey_vb |= K2B(val);
+               update_attr = true;
+       } else if (property == priv->colorkey_mode_prop) {
+               dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
+               dplane->prop.colorkey_mode |= CFG_CKMODE(val);
+               update_attr = true;
+       } else if (property == priv->brightness_prop) {
+               dplane->prop.brightness = val - 256;
+               update_attr = true;
+       } else if (property == priv->contrast_prop) {
+               dplane->prop.contrast = val;
+               update_attr = true;
+       } else if (property == priv->saturation_prop) {
+               dplane->prop.saturation = val;
+               update_attr = true;
+       }
+
+       if (update_attr && dplane->base.crtc)
+               armada_ovl_update_attr(&dplane->prop,
+                                      drm_to_armada_crtc(dplane->base.crtc));
+
+       return 0;
+}
+
+static const struct drm_plane_funcs armada_plane_funcs = {
+       .update_plane   = armada_plane_update,
+       .disable_plane  = armada_plane_disable,
+       .destroy        = armada_plane_destroy,
+       .set_property   = armada_plane_set_property,
+};
+
+static const uint32_t armada_formats[] = {
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_YUYV,
+       DRM_FORMAT_YUV420,
+       DRM_FORMAT_YVU420,
+       DRM_FORMAT_YUV422,
+       DRM_FORMAT_YVU422,
+       DRM_FORMAT_VYUY,
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_ARGB8888,
+       DRM_FORMAT_ABGR8888,
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_XBGR8888,
+       DRM_FORMAT_RGB888,
+       DRM_FORMAT_BGR888,
+       DRM_FORMAT_ARGB1555,
+       DRM_FORMAT_ABGR1555,
+       DRM_FORMAT_RGB565,
+       DRM_FORMAT_BGR565,
+};
+
+static struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = {
+       { CKMODE_DISABLE, "disabled" },
+       { CKMODE_Y,       "Y component" },
+       { CKMODE_U,       "U component" },
+       { CKMODE_V,       "V component" },
+       { CKMODE_RGB,     "RGB" },
+       { CKMODE_R,       "R component" },
+       { CKMODE_G,       "G component" },
+       { CKMODE_B,       "B component" },
+};
+
+static int armada_overlay_create_properties(struct drm_device *dev)
+{
+       struct armada_private *priv = dev->dev_private;
+
+       if (priv->colorkey_prop)
+               return 0;
+
+       priv->colorkey_prop = drm_property_create_range(dev, 0,
+                               "colorkey", 0, 0xffffff);
+       priv->colorkey_min_prop = drm_property_create_range(dev, 0,
+                               "colorkey_min", 0, 0xffffff);
+       priv->colorkey_max_prop = drm_property_create_range(dev, 0,
+                               "colorkey_max", 0, 0xffffff);
+       priv->colorkey_val_prop = drm_property_create_range(dev, 0,
+                               "colorkey_val", 0, 0xffffff);
+       priv->colorkey_alpha_prop = drm_property_create_range(dev, 0,
+                               "colorkey_alpha", 0, 0xffffff);
+       priv->colorkey_mode_prop = drm_property_create_enum(dev, 0,
+                               "colorkey_mode",
+                               armada_drm_colorkey_enum_list,
+                               ARRAY_SIZE(armada_drm_colorkey_enum_list));
+       priv->brightness_prop = drm_property_create_range(dev, 0,
+                               "brightness", 0, 256 + 255);
+       priv->contrast_prop = drm_property_create_range(dev, 0,
+                               "contrast", 0, 0x7fff);
+       priv->saturation_prop = drm_property_create_range(dev, 0,
+                               "saturation", 0, 0x7fff);
+
+       if (!priv->colorkey_prop)
+               return -ENOMEM;
+
+       return 0;
+}
+
+int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
+{
+       struct armada_private *priv = dev->dev_private;
+       struct drm_mode_object *mobj;
+       struct armada_plane *dplane;
+       int ret;
+
+       ret = armada_overlay_create_properties(dev);
+       if (ret)
+               return ret;
+
+       dplane = kzalloc(sizeof(*dplane), GFP_KERNEL);
+       if (!dplane)
+               return -ENOMEM;
+
+       spin_lock_init(&dplane->lock);
+       init_waitqueue_head(&dplane->vbl.wait);
+       armada_drm_vbl_event_init(&dplane->vbl.update, armada_plane_vbl,
+                                 dplane);
+
+       drm_plane_init(dev, &dplane->base, crtcs, &armada_plane_funcs,
+                      armada_formats, ARRAY_SIZE(armada_formats), false);
+
+       dplane->prop.colorkey_yr = 0xfefefe00;
+       dplane->prop.colorkey_ug = 0x01010100;
+       dplane->prop.colorkey_vb = 0x01010100;
+       dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
+       dplane->prop.brightness = 0;
+       dplane->prop.contrast = 0x4000;
+       dplane->prop.saturation = 0x4000;
+
+       mobj = &dplane->base.base;
+       drm_object_attach_property(mobj, priv->colorkey_prop,
+                                  0x0101fe);
+       drm_object_attach_property(mobj, priv->colorkey_min_prop,
+                                  0x0101fe);
+       drm_object_attach_property(mobj, priv->colorkey_max_prop,
+                                  0x0101fe);
+       drm_object_attach_property(mobj, priv->colorkey_val_prop,
+                                  0x0101fe);
+       drm_object_attach_property(mobj, priv->colorkey_alpha_prop,
+                                  0x000000);
+       drm_object_attach_property(mobj, priv->colorkey_mode_prop,
+                                  CKMODE_RGB);
+       drm_object_attach_property(mobj, priv->brightness_prop, 256);
+       drm_object_attach_property(mobj, priv->contrast_prop,
+                                  dplane->prop.contrast);
+       drm_object_attach_property(mobj, priv->saturation_prop,
+                                  dplane->prop.saturation);
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/armada/armada_slave.c b/drivers/gpu/drm/armada/armada_slave.c
new file mode 100644 (file)
index 0000000..00d0fac
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_encoder_slave.h>
+#include "armada_drm.h"
+#include "armada_output.h"
+#include "armada_slave.h"
+
+static int armada_drm_slave_get_modes(struct drm_connector *conn)
+{
+       struct drm_encoder *enc = armada_drm_connector_encoder(conn);
+       int count = 0;
+
+       if (enc) {
+               struct drm_encoder_slave *slave = to_encoder_slave(enc);
+
+               count = slave->slave_funcs->get_modes(enc, conn);
+       }
+
+       return count;
+}
+
+static void armada_drm_slave_destroy(struct drm_encoder *enc)
+{
+       struct drm_encoder_slave *slave = to_encoder_slave(enc);
+       struct i2c_client *client = drm_i2c_encoder_get_client(enc);
+
+       if (slave->slave_funcs)
+               slave->slave_funcs->destroy(enc);
+       if (client)
+               i2c_put_adapter(client->adapter);
+
+       drm_encoder_cleanup(&slave->base);
+       kfree(slave);
+}
+
+static const struct drm_encoder_funcs armada_drm_slave_encoder_funcs = {
+       .destroy        = armada_drm_slave_destroy,
+};
+
+static const struct drm_connector_helper_funcs armada_drm_slave_helper_funcs = {
+       .get_modes      = armada_drm_slave_get_modes,
+       .mode_valid     = armada_drm_slave_encoder_mode_valid,
+       .best_encoder   = armada_drm_connector_encoder,
+};
+
+static const struct drm_encoder_helper_funcs drm_slave_encoder_helpers = {
+       .dpms = drm_i2c_encoder_dpms,
+       .save = drm_i2c_encoder_save,
+       .restore = drm_i2c_encoder_restore,
+       .mode_fixup = drm_i2c_encoder_mode_fixup,
+       .prepare = drm_i2c_encoder_prepare,
+       .commit = drm_i2c_encoder_commit,
+       .mode_set = drm_i2c_encoder_mode_set,
+       .detect = drm_i2c_encoder_detect,
+};
+
+static int
+armada_drm_conn_slave_create(struct drm_connector *conn, const void *data)
+{
+       const struct armada_drm_slave_config *config = data;
+       struct drm_encoder_slave *slave;
+       struct i2c_adapter *adap;
+       int ret;
+
+       conn->interlace_allowed = config->interlace_allowed;
+       conn->doublescan_allowed = config->doublescan_allowed;
+       conn->polled = config->polled;
+
+       drm_connector_helper_add(conn, &armada_drm_slave_helper_funcs);
+
+       slave = kzalloc(sizeof(*slave), GFP_KERNEL);
+       if (!slave)
+               return -ENOMEM;
+
+       slave->base.possible_crtcs = config->crtcs;
+
+       adap = i2c_get_adapter(config->i2c_adapter_id);
+       if (!adap) {
+               kfree(slave);
+               return -EPROBE_DEFER;
+       }
+
+       ret = drm_encoder_init(conn->dev, &slave->base,
+                              &armada_drm_slave_encoder_funcs,
+                              DRM_MODE_ENCODER_TMDS);
+       if (ret) {
+               DRM_ERROR("unable to init encoder\n");
+               i2c_put_adapter(adap);
+               kfree(slave);
+               return ret;
+       }
+
+       ret = drm_i2c_encoder_init(conn->dev, slave, adap, &config->info);
+       i2c_put_adapter(adap);
+       if (ret) {
+               DRM_ERROR("unable to init encoder slave\n");
+               armada_drm_slave_destroy(&slave->base);
+               return ret;
+       }
+
+       drm_encoder_helper_add(&slave->base, &drm_slave_encoder_helpers);
+
+       ret = slave->slave_funcs->create_resources(&slave->base, conn);
+       if (ret) {
+               armada_drm_slave_destroy(&slave->base);
+               return ret;
+       }
+
+       ret = drm_mode_connector_attach_encoder(conn, &slave->base);
+       if (ret) {
+               armada_drm_slave_destroy(&slave->base);
+               return ret;
+       }
+
+       conn->encoder = &slave->base;
+
+       return ret;
+}
+
+static const struct armada_output_type armada_drm_conn_slave = {
+       .connector_type = DRM_MODE_CONNECTOR_HDMIA,
+       .create         = armada_drm_conn_slave_create,
+       .set_property   = armada_drm_slave_encoder_set_property,
+};
+
+int armada_drm_connector_slave_create(struct drm_device *dev,
+       const struct armada_drm_slave_config *config)
+{
+       return armada_output_create(dev, &armada_drm_conn_slave, config);
+}
diff --git a/drivers/gpu/drm/armada/armada_slave.h b/drivers/gpu/drm/armada/armada_slave.h
new file mode 100644 (file)
index 0000000..bf2374c
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef ARMADA_SLAVE_H
+#define ARMADA_SLAVE_H
+
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+
+struct armada_drm_slave_config {
+       int i2c_adapter_id;
+       uint32_t crtcs;
+       uint8_t polled;
+       bool interlace_allowed;
+       bool doublescan_allowed;
+       struct i2c_board_info info;
+};
+
+int armada_drm_connector_slave_create(struct drm_device *dev,
+       const struct armada_drm_slave_config *);
+
+#endif
index da4a51eae824d874a2756d6dab17cdf40d78146e..8a784c460c89d8963effbb25b4509878d2003bc5 100644 (file)
@@ -6,6 +6,7 @@ config DRM_AST
        select FB_SYS_FILLRECT
        select FB_SYS_IMAGEBLIT
        select DRM_KMS_HELPER
+       select DRM_KMS_FB_HELPER
        select DRM_TTM
        help
         Say yes for experimental AST GPU driver. Do not enable
index 32e270dc714eb036a325b1d4999e18e6d7c1e11b..5137f15dba19e71a220ac38efb7e1fee2ebbbd4a 100644 (file)
@@ -211,7 +211,6 @@ static struct drm_driver driver = {
        .minor = DRIVER_MINOR,
        .patchlevel = DRIVER_PATCHLEVEL,
 
-       .gem_init_object = ast_gem_init_object,
        .gem_free_object = ast_gem_free_object,
        .dumb_create = ast_dumb_create,
        .dumb_map_offset = ast_dumb_mmap_offset,
index 8492b68e873c174cda6e99e70cf976a90e5ddaff..9833a1b1acc140f36f0d248a026e43fee2b1ae33 100644 (file)
@@ -323,7 +323,6 @@ extern int ast_dumb_create(struct drm_file *file,
                           struct drm_device *dev,
                           struct drm_mode_create_dumb *args);
 
-extern int ast_gem_init_object(struct drm_gem_object *obj);
 extern void ast_gem_free_object(struct drm_gem_object *obj);
 extern int ast_dumb_mmap_offset(struct drm_file *file,
                                struct drm_device *dev,
index 7f6152d374cace41ae991b3080df769e385824b8..af0b868a9dfd6c5b3849f147f01fbf2cc29c4471 100644 (file)
@@ -449,12 +449,6 @@ int ast_dumb_create(struct drm_file *file,
        return 0;
 }
 
-int ast_gem_init_object(struct drm_gem_object *obj)
-{
-       BUG();
-       return 0;
-}
-
 void ast_bo_unref(struct ast_bo **bo)
 {
        struct ttm_buffer_object *tbo;
index bf67b22723f93e1e5ab6f9b760ba8cb649229724..9864559e5fb994ec47e9b5ceb13246c9682491fe 100644 (file)
@@ -5,6 +5,7 @@ config DRM_CIRRUS_QEMU
        select FB_SYS_COPYAREA
        select FB_SYS_IMAGEBLIT
        select DRM_KMS_HELPER
+       select DRM_KMS_FB_HELPER
        select DRM_TTM
        help
         This is a KMS driver for emulated cirrus device in qemu.
index 138364d917824f8ad4f7f771ad7f68a6cbb35aba..953fc8aea69c141cf076dcccbc1c4ed2ec775321 100644 (file)
@@ -97,7 +97,6 @@ static struct drm_driver driver = {
        .major = DRIVER_MAJOR,
        .minor = DRIVER_MINOR,
        .patchlevel = DRIVER_PATCHLEVEL,
-       .gem_init_object = cirrus_gem_init_object,
        .gem_free_object = cirrus_gem_free_object,
        .dumb_create = cirrus_dumb_create,
        .dumb_map_offset = cirrus_dumb_mmap_offset,
index 9b0bb9184afdbfa2f1eb3ebbcad2feda23aa67a4..b6aded73838bca0fb11c5a936db40a8db1e1e180 100644 (file)
@@ -191,7 +191,6 @@ int cirrus_device_init(struct cirrus_device *cdev,
                      struct pci_dev *pdev,
                      uint32_t flags);
 void cirrus_device_fini(struct cirrus_device *cdev);
-int cirrus_gem_init_object(struct drm_gem_object *obj);
 void cirrus_gem_free_object(struct drm_gem_object *obj);
 int cirrus_dumb_mmap_offset(struct drm_file *file,
                            struct drm_device *dev,
index f130a533a51257dd13fcbda93462dded728182d0..78e76f24343d17bad9bc81e5474f900fbd090b33 100644 (file)
@@ -255,12 +255,6 @@ int cirrus_dumb_create(struct drm_file *file,
        return 0;
 }
 
-int cirrus_gem_init_object(struct drm_gem_object *obj)
-{
-       BUG();
-       return 0;
-}
-
 void cirrus_bo_unref(struct cirrus_bo **bo)
 {
        struct ttm_buffer_object *tbo;
index 224ff965bcf7de624c3b62b6db153bda090ea4a9..a4b017b6849efd79686693e0e5cad7b5edf9ae09 100644 (file)
@@ -334,7 +334,6 @@ int drm_addctx(struct drm_device *dev, void *data,
 
        mutex_lock(&dev->ctxlist_mutex);
        list_add(&ctx_entry->head, &dev->ctxlist);
-       ++dev->ctx_count;
        mutex_unlock(&dev->ctxlist_mutex);
 
        return 0;
@@ -432,7 +431,6 @@ int drm_rmctx(struct drm_device *dev, void *data,
                        if (pos->handle == ctx->handle) {
                                list_del(&pos->head);
                                kfree(pos);
-                               --dev->ctx_count;
                        }
                }
        }
index bff2fa941f6004dca49ce176b928171d075584a8..d7a8370e3cdc4a5881e3c1b28ba74d089f18a8fc 100644 (file)
@@ -202,6 +202,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
        { DRM_MODE_CONNECTOR_TV, "TV" },
        { DRM_MODE_CONNECTOR_eDP, "eDP" },
        { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
+       { DRM_MODE_CONNECTOR_DSI, "DSI" },
 };
 
 static const struct drm_prop_enum_list drm_encoder_enum_list[] =
@@ -211,6 +212,7 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
        { DRM_MODE_ENCODER_LVDS, "LVDS" },
        { DRM_MODE_ENCODER_TVDAC, "TV" },
        { DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
+       { DRM_MODE_ENCODER_DSI, "DSI" },
 };
 
 void drm_connector_ida_init(void)
@@ -1317,6 +1319,9 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
        if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
                return -ERANGE;
 
+       if ((in->flags & DRM_MODE_FLAG_3D_MASK) > DRM_MODE_FLAG_3D_MAX)
+               return -EINVAL;
+
        out->clock = in->clock;
        out->hdisplay = in->hdisplay;
        out->hsync_start = in->hsync_start;
@@ -1579,6 +1584,19 @@ out:
        return ret;
 }
 
+static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
+                                        const struct drm_file *file_priv)
+{
+       /*
+        * If user-space hasn't configured the driver to expose the stereo 3D
+        * modes, don't expose them.
+        */
+       if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode))
+               return false;
+
+       return true;
+}
+
 /**
  * drm_mode_getconnector - get connector configuration
  * @dev: drm device for the ioctl
@@ -1644,7 +1662,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
 
        /* delayed so we get modes regardless of pre-fill_modes state */
        list_for_each_entry(mode, &connector->modes, head)
-               mode_count++;
+               if (drm_mode_expose_to_userspace(mode, file_priv))
+                       mode_count++;
 
        out_resp->connector_id = connector->base.id;
        out_resp->connector_type = connector->connector_type;
@@ -1666,6 +1685,9 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
                copied = 0;
                mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
                list_for_each_entry(mode, &connector->modes, head) {
+                       if (!drm_mode_expose_to_userspace(mode, file_priv))
+                               continue;
+
                        drm_crtc_convert_to_umode(&u_mode, mode);
                        if (copy_to_user(mode_ptr + copied,
                                         &u_mode, sizeof(u_mode))) {
@@ -2040,6 +2062,45 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
 }
 EXPORT_SYMBOL(drm_mode_set_config_internal);
 
+/*
+ * Checks that the framebuffer is big enough for the CRTC viewport
+ * (x, y, hdisplay, vdisplay)
+ */
+static int drm_crtc_check_viewport(const struct drm_crtc *crtc,
+                                  int x, int y,
+                                  const struct drm_display_mode *mode,
+                                  const struct drm_framebuffer *fb)
+
+{
+       int hdisplay, vdisplay;
+
+       hdisplay = mode->hdisplay;
+       vdisplay = mode->vdisplay;
+
+       if (drm_mode_is_stereo(mode)) {
+               struct drm_display_mode adjusted = *mode;
+
+               drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE);
+               hdisplay = adjusted.crtc_hdisplay;
+               vdisplay = adjusted.crtc_vdisplay;
+       }
+
+       if (crtc->invert_dimensions)
+               swap(hdisplay, vdisplay);
+
+       if (hdisplay > fb->width ||
+           vdisplay > fb->height ||
+           x > fb->width - hdisplay ||
+           y > fb->height - vdisplay) {
+               DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
+                             fb->width, fb->height, hdisplay, vdisplay, x, y,
+                             crtc->invert_dimensions ? " (inverted)" : "");
+               return -ENOSPC;
+       }
+
+       return 0;
+}
+
 /**
  * drm_mode_setcrtc - set CRTC configuration
  * @dev: drm device for the ioctl
@@ -2087,7 +2148,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
        DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
        if (crtc_req->mode_valid) {
-               int hdisplay, vdisplay;
                /* If we have a mode we need a framebuffer. */
                /* If we pass -1, set the mode with the currently bound fb */
                if (crtc_req->fb_id == -1) {
@@ -2123,23 +2183,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
 
                drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 
-               hdisplay = mode->hdisplay;
-               vdisplay = mode->vdisplay;
-
-               if (crtc->invert_dimensions)
-                       swap(hdisplay, vdisplay);
-
-               if (hdisplay > fb->width ||
-                   vdisplay > fb->height ||
-                   crtc_req->x > fb->width - hdisplay ||
-                   crtc_req->y > fb->height - vdisplay) {
-                       DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
-                                     fb->width, fb->height,
-                                     hdisplay, vdisplay, crtc_req->x, crtc_req->y,
-                                     crtc->invert_dimensions ? " (inverted)" : "");
-                       ret = -ENOSPC;
+               ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
+                                             mode, fb);
+               if (ret)
                        goto out;
-               }
+
        }
 
        if (crtc_req->count_connectors == 0 && mode) {
@@ -3556,7 +3604,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
        struct drm_framebuffer *fb = NULL, *old_fb = NULL;
        struct drm_pending_vblank_event *e = NULL;
        unsigned long flags;
-       int hdisplay, vdisplay;
        int ret = -EINVAL;
 
        if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
@@ -3588,22 +3635,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
        if (!fb)
                goto out;
 
-       hdisplay = crtc->mode.hdisplay;
-       vdisplay = crtc->mode.vdisplay;
-
-       if (crtc->invert_dimensions)
-               swap(hdisplay, vdisplay);
-
-       if (hdisplay > fb->width ||
-           vdisplay > fb->height ||
-           crtc->x > fb->width - hdisplay ||
-           crtc->y > fb->height - vdisplay) {
-               DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
-                             fb->width, fb->height, hdisplay, vdisplay, crtc->x, crtc->y,
-                             crtc->invert_dimensions ? " (inverted)" : "");
-               ret = -ENOSPC;
+       ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb);
+       if (ret)
                goto out;
-       }
 
        if (crtc->fb->pixel_format != fb->pixel_format) {
                DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n");
index c722c3b5404d3ad06836c522b9e974ab27a3ec60..80578d807cdc5c877312570c9d1a9edac75bdcfb 100644 (file)
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_edid.h>
 
+MODULE_AUTHOR("David Airlie, Jesse Barnes");
+MODULE_DESCRIPTION("DRM KMS helper");
+MODULE_LICENSE("GPL and additional rights");
+
 /**
  * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
  *                                             connector list
@@ -76,7 +80,8 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
 {
        struct drm_display_mode *mode;
 
-       if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
+       if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
+                     DRM_MODE_FLAG_3D_MASK))
                return;
 
        list_for_each_entry(mode, &connector->modes, head) {
@@ -86,6 +91,9 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
                if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
                                !(flags & DRM_MODE_FLAG_DBLSCAN))
                        mode->status = MODE_NO_DBLESCAN;
+               if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
+                               !(flags & DRM_MODE_FLAG_3D_MASK))
+                       mode->status = MODE_NO_STEREO;
        }
 
        return;
@@ -105,9 +113,9 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
  * then culled (based on validity and the @maxX, @maxY parameters) and put into
  * the normal modes list.
  *
- * Intended to be use as a generic implementation of the ->probe() @connector
- * callback for drivers that use the crtc helpers for output mode filtering and
- * detection.
+ * Intended to be use as a generic implementation of the ->fill_modes()
+ * @connector vfunc for drivers that use the crtc helpers for output mode
+ * filtering and detection.
  *
  * RETURNS:
  * Number of modes found on @connector.
@@ -175,6 +183,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
                mode_flags |= DRM_MODE_FLAG_INTERLACE;
        if (connector->doublescan_allowed)
                mode_flags |= DRM_MODE_FLAG_DBLSCAN;
+       if (connector->stereo_allowed)
+               mode_flags |= DRM_MODE_FLAG_3D_MASK;
        drm_mode_validate_flag(connector, mode_flags);
 
        list_for_each_entry(mode, &connector->modes, head) {
@@ -557,6 +567,14 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
                                continue;
 
                        connector->encoder = NULL;
+
+                       /*
+                        * drm_helper_disable_unused_functions() ought to be
+                        * doing this, but since we've decoupled the encoder
+                        * from the connector above, the required connection
+                        * between them is henceforth no longer available.
+                        */
+                       connector->dpms = DRM_MODE_DPMS_OFF;
                }
        }
 
index 89e1966271602a03e777231153d628fbe93b533e..9e978aae8972b18da9f012bef98e8ec1d408a1aa 100644 (file)
@@ -228,12 +228,12 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
 EXPORT_SYMBOL(i2c_dp_aux_add_bus);
 
 /* Helpers for DP link training */
-static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
 {
        return link_status[r - DP_LANE0_1_STATUS];
 }
 
-static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+static u8 dp_get_lane_status(const u8 link_status[DP_LINK_STATUS_SIZE],
                             int lane)
 {
        int i = DP_LANE0_1_STATUS + (lane >> 1);
@@ -242,7 +242,7 @@ static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
        return (l >> s) & 0xf;
 }
 
-bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
                          int lane_count)
 {
        u8 lane_align;
@@ -262,7 +262,7 @@ bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
 }
 EXPORT_SYMBOL(drm_dp_channel_eq_ok);
 
-bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
                              int lane_count)
 {
        int lane;
@@ -277,7 +277,7 @@ bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
 }
 EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
 
-u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
                                     int lane)
 {
        int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@@ -290,7 +290,7 @@ u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
 }
 EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
 
-u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
                                          int lane)
 {
        int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
@@ -303,7 +303,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
 }
 EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
 
-void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
        if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
                udelay(100);
        else
@@ -311,7 +311,7 @@ void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
 }
 EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
 
-void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
+void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
        if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
                udelay(400);
        else
index e572dd20bdee037fed5cdce356191ea84fd41e09..5b7054714475ef2b4ca45e42aa8b723de13b9cc3 100644 (file)
@@ -69,6 +69,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
        DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
 
        DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -170,76 +171,6 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 
 #define DRM_CORE_IOCTL_COUNT   ARRAY_SIZE( drm_ioctls )
 
-/**
- * drm_legacy_dev_reinit
- *
- * Reinitializes a legacy/ums drm device in it's lastclose function.
- */
-static void drm_legacy_dev_reinit(struct drm_device *dev)
-{
-       int i;
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               return;
-
-       atomic_set(&dev->ioctl_count, 0);
-       atomic_set(&dev->vma_count, 0);
-
-       for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
-               atomic_set(&dev->counts[i], 0);
-
-       dev->sigdata.lock = NULL;
-
-       dev->context_flag = 0;
-       dev->last_context = 0;
-       dev->if_version = 0;
-}
-
-/**
- * Take down the DRM device.
- *
- * \param dev DRM device structure.
- *
- * Frees every resource in \p dev.
- *
- * \sa drm_device
- */
-int drm_lastclose(struct drm_device * dev)
-{
-       struct drm_vma_entry *vma, *vma_temp;
-
-       DRM_DEBUG("\n");
-
-       if (dev->driver->lastclose)
-               dev->driver->lastclose(dev);
-       DRM_DEBUG("driver lastclose completed\n");
-
-       if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
-               drm_irq_uninstall(dev);
-
-       mutex_lock(&dev->struct_mutex);
-
-       drm_agp_clear(dev);
-
-       drm_legacy_sg_cleanup(dev);
-
-       /* Clear vma list (only built for debugging) */
-       list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
-               list_del(&vma->head);
-               kfree(vma);
-       }
-
-       drm_legacy_dma_takedown(dev);
-
-       dev->dev_mapping = NULL;
-       mutex_unlock(&dev->struct_mutex);
-
-       drm_legacy_dev_reinit(dev);
-
-       DRM_DEBUG("lastclose completed\n");
-       return 0;
-}
-
 /** File operations structure */
 static const struct file_operations drm_stub_fops = {
        .owner = THIS_MODULE,
@@ -385,7 +316,6 @@ long drm_ioctl(struct file *filp,
                return -ENODEV;
 
        atomic_inc(&dev->ioctl_count);
-       atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
        ++file_priv->ioctl_count;
 
        if ((nr >= DRM_CORE_IOCTL_COUNT) &&
@@ -402,9 +332,16 @@ long drm_ioctl(struct file *filp,
                cmd = ioctl->cmd_drv;
        }
        else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
+               u32 drv_size;
+
                ioctl = &drm_ioctls[nr];
-               cmd = ioctl->cmd;
+
+               drv_size = _IOC_SIZE(ioctl->cmd);
                usize = asize = _IOC_SIZE(cmd);
+               if (drv_size > asize)
+                       asize = drv_size;
+
+               cmd = ioctl->cmd;
        } else
                goto err_i1;
 
@@ -466,7 +403,7 @@ long drm_ioctl(struct file *filp,
 
       err_i1:
        if (!ioctl)
-               DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
+               DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
                          task_pid_nr(current),
                          (long)old_encode_dev(file_priv->minor->device),
                          file_priv->authenticated, cmd, nr);
index 1688ff500513142d6d5072efb8061f1ae231bb3d..f1764ec5818b5cf66fd7a35f8e6703776238fbca 100644 (file)
@@ -1264,6 +1264,18 @@ struct edid *drm_get_edid(struct drm_connector *connector,
 }
 EXPORT_SYMBOL(drm_get_edid);
 
+/**
+ * drm_edid_duplicate - duplicate an EDID and the extensions
+ * @edid: EDID to duplicate
+ *
+ * Return duplicate edid or NULL on allocation failure.
+ */
+struct edid *drm_edid_duplicate(const struct edid *edid)
+{
+       return kmemdup(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+}
+EXPORT_SYMBOL(drm_edid_duplicate);
+
 /*** EDID parsing ***/
 
 /**
@@ -2404,7 +2416,7 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
 
                if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
                     KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
-                   drm_mode_equal_no_clocks(to_match, cea_mode))
+                   drm_mode_equal_no_clocks_no_stereo(to_match, cea_mode))
                        return mode + 1;
        }
        return 0;
@@ -2453,7 +2465,7 @@ static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
 
                if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
                     KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
-                   drm_mode_equal_no_clocks(to_match, hdmi_mode))
+                   drm_mode_equal_no_clocks_no_stereo(to_match, hdmi_mode))
                        return mode + 1;
        }
        return 0;
@@ -2507,6 +2519,9 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
                if (!newmode)
                        continue;
 
+               /* Carry over the stereo flags */
+               newmode->flags |= mode->flags & DRM_MODE_FLAG_3D_MASK;
+
                /*
                 * The current mode could be either variant. Make
                 * sure to pick the "other" clock for the new mode.
@@ -2553,20 +2568,151 @@ do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
        return modes;
 }
 
+struct stereo_mandatory_mode {
+       int width, height, vrefresh;
+       unsigned int flags;
+};
+
+static const struct stereo_mandatory_mode stereo_mandatory_modes[] = {
+       { 1920, 1080, 24, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+       { 1920, 1080, 24, DRM_MODE_FLAG_3D_FRAME_PACKING },
+       { 1920, 1080, 50,
+         DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
+       { 1920, 1080, 60,
+         DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF },
+       { 1280, 720,  50, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+       { 1280, 720,  50, DRM_MODE_FLAG_3D_FRAME_PACKING },
+       { 1280, 720,  60, DRM_MODE_FLAG_3D_TOP_AND_BOTTOM },
+       { 1280, 720,  60, DRM_MODE_FLAG_3D_FRAME_PACKING }
+};
+
+static bool
+stereo_match_mandatory(const struct drm_display_mode *mode,
+                      const struct stereo_mandatory_mode *stereo_mode)
+{
+       unsigned int interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+
+       return mode->hdisplay == stereo_mode->width &&
+              mode->vdisplay == stereo_mode->height &&
+              interlaced == (stereo_mode->flags & DRM_MODE_FLAG_INTERLACE) &&
+              drm_mode_vrefresh(mode) == stereo_mode->vrefresh;
+}
+
+static int add_hdmi_mandatory_stereo_modes(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       const struct drm_display_mode *mode;
+       struct list_head stereo_modes;
+       int modes = 0, i;
+
+       INIT_LIST_HEAD(&stereo_modes);
+
+       list_for_each_entry(mode, &connector->probed_modes, head) {
+               for (i = 0; i < ARRAY_SIZE(stereo_mandatory_modes); i++) {
+                       const struct stereo_mandatory_mode *mandatory;
+                       struct drm_display_mode *new_mode;
+
+                       if (!stereo_match_mandatory(mode,
+                                                   &stereo_mandatory_modes[i]))
+                               continue;
+
+                       mandatory = &stereo_mandatory_modes[i];
+                       new_mode = drm_mode_duplicate(dev, mode);
+                       if (!new_mode)
+                               continue;
+
+                       new_mode->flags |= mandatory->flags;
+                       list_add_tail(&new_mode->head, &stereo_modes);
+                       modes++;
+               }
+       }
+
+       list_splice_tail(&stereo_modes, &connector->probed_modes);
+
+       return modes;
+}
+
+static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_display_mode *newmode;
+
+       vic--; /* VICs start at 1 */
+       if (vic >= ARRAY_SIZE(edid_4k_modes)) {
+               DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
+               return 0;
+       }
+
+       newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
+       if (!newmode)
+               return 0;
+
+       drm_mode_probed_add(connector, newmode);
+
+       return 1;
+}
+
+static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
+                              const u8 *video_db, u8 video_len, u8 video_index)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_display_mode *newmode;
+       int modes = 0;
+       u8 cea_mode;
+
+       if (video_db == NULL || video_index > video_len)
+               return 0;
+
+       /* CEA modes are numbered 1..127 */
+       cea_mode = (video_db[video_index] & 127) - 1;
+       if (cea_mode >= ARRAY_SIZE(edid_cea_modes))
+               return 0;
+
+       if (structure & (1 << 0)) {
+               newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+               if (newmode) {
+                       newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING;
+                       drm_mode_probed_add(connector, newmode);
+                       modes++;
+               }
+       }
+       if (structure & (1 << 6)) {
+               newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+               if (newmode) {
+                       newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
+                       drm_mode_probed_add(connector, newmode);
+                       modes++;
+               }
+       }
+       if (structure & (1 << 8)) {
+               newmode = drm_mode_duplicate(dev, &edid_cea_modes[cea_mode]);
+               if (newmode) {
+                       newmode->flags = DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
+                       drm_mode_probed_add(connector, newmode);
+                       modes++;
+               }
+       }
+
+       return modes;
+}
+
 /*
  * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
  * @connector: connector corresponding to the HDMI sink
  * @db: start of the CEA vendor specific block
  * @len: length of the CEA block payload, ie. one can access up to db[len]
  *
- * Parses the HDMI VSDB looking for modes to add to @connector.
+ * Parses the HDMI VSDB looking for modes to add to @connector. This function
+ * also adds the stereo 3d modes when applicable.
  */
 static int
-do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
+do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
+                  const u8 *video_db, u8 video_len)
 {
-       struct drm_device *dev = connector->dev;
-       int modes = 0, offset = 0, i;
-       u8 vic_len;
+       int modes = 0, offset = 0, i, multi_present = 0;
+       u8 vic_len, hdmi_3d_len = 0;
+       u16 mask;
+       u16 structure_all;
 
        if (len < 8)
                goto out;
@@ -2585,30 +2731,56 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
 
        /* the declared length is not long enough for the 2 first bytes
         * of additional video format capabilities */
-       offset += 2;
-       if (len < (8 + offset))
+       if (len < (8 + offset + 2))
                goto out;
 
+       /* 3D_Present */
+       offset++;
+       if (db[8 + offset] & (1 << 7)) {
+               modes += add_hdmi_mandatory_stereo_modes(connector);
+
+               /* 3D_Multi_present */
+               multi_present = (db[8 + offset] & 0x60) >> 5;
+       }
+
+       offset++;
        vic_len = db[8 + offset] >> 5;
+       hdmi_3d_len = db[8 + offset] & 0x1f;
 
        for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
-               struct drm_display_mode *newmode;
                u8 vic;
 
                vic = db[9 + offset + i];
+               modes += add_hdmi_mode(connector, vic);
+       }
+       offset += 1 + vic_len;
 
-               vic--; /* VICs start at 1 */
-               if (vic >= ARRAY_SIZE(edid_4k_modes)) {
-                       DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
-                       continue;
-               }
+       if (!(multi_present == 1 || multi_present == 2))
+               goto out;
 
-               newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
-               if (!newmode)
-                       continue;
+       if ((multi_present == 1 && len < (9 + offset)) ||
+           (multi_present == 2 && len < (11 + offset)))
+               goto out;
 
-               drm_mode_probed_add(connector, newmode);
-               modes++;
+       if ((multi_present == 1 && hdmi_3d_len < 2) ||
+           (multi_present == 2 && hdmi_3d_len < 4))
+               goto out;
+
+       /* 3D_Structure_ALL */
+       structure_all = (db[8 + offset] << 8) | db[9 + offset];
+
+       /* check if 3D_MASK is present */
+       if (multi_present == 2)
+               mask = (db[10 + offset] << 8) | db[11 + offset];
+       else
+               mask = 0xffff;
+
+       for (i = 0; i < 16; i++) {
+               if (mask & (1 << i))
+                       modes += add_3d_struct_modes(connector,
+                                                    structure_all,
+                                                    video_db,
+                                                    video_len, i);
        }
 
 out:
@@ -2668,8 +2840,8 @@ static int
 add_cea_modes(struct drm_connector *connector, struct edid *edid)
 {
        const u8 *cea = drm_find_cea_extension(edid);
-       const u8 *db;
-       u8 dbl;
+       const u8 *db, *hdmi = NULL, *video = NULL;
+       u8 dbl, hdmi_len, video_len = 0;
        int modes = 0;
 
        if (cea && cea_revision(cea) >= 3) {
@@ -2682,13 +2854,26 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
                        db = &cea[i];
                        dbl = cea_db_payload_len(db);
 
-                       if (cea_db_tag(db) == VIDEO_BLOCK)
-                               modes += do_cea_modes(connector, db + 1, dbl);
-                       else if (cea_db_is_hdmi_vsdb(db))
-                               modes += do_hdmi_vsdb_modes(connector, db, dbl);
+                       if (cea_db_tag(db) == VIDEO_BLOCK) {
+                               video = db + 1;
+                               video_len = dbl;
+                               modes += do_cea_modes(connector, video, dbl);
+                       }
+                       else if (cea_db_is_hdmi_vsdb(db)) {
+                               hdmi = db;
+                               hdmi_len = dbl;
+                       }
                }
        }
 
+       /*
+        * We parse the HDMI VSDB after having added the cea modes as we will
+        * be patching their flags when the sink supports stereo 3D.
+        */
+       if (hdmi)
+               modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len, video,
+                                           video_len);
+
        return modes;
 }
 
@@ -2925,6 +3110,8 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
                        /* Speaker Allocation Data Block */
                        if (dbl == 3) {
                                *sadb = kmalloc(dbl, GFP_KERNEL);
+                               if (!*sadb)
+                                       return -ENOMEM;
                                memcpy(*sadb, &db[1], dbl);
                                count = dbl;
                                break;
@@ -3319,6 +3506,33 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
 }
 EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
 
+static enum hdmi_3d_structure
+s3d_structure_from_display_mode(const struct drm_display_mode *mode)
+{
+       u32 layout = mode->flags & DRM_MODE_FLAG_3D_MASK;
+
+       switch (layout) {
+       case DRM_MODE_FLAG_3D_FRAME_PACKING:
+               return HDMI_3D_STRUCTURE_FRAME_PACKING;
+       case DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE:
+               return HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE;
+       case DRM_MODE_FLAG_3D_LINE_ALTERNATIVE:
+               return HDMI_3D_STRUCTURE_LINE_ALTERNATIVE;
+       case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL:
+               return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL;
+       case DRM_MODE_FLAG_3D_L_DEPTH:
+               return HDMI_3D_STRUCTURE_L_DEPTH;
+       case DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH:
+               return HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH;
+       case DRM_MODE_FLAG_3D_TOP_AND_BOTTOM:
+               return HDMI_3D_STRUCTURE_TOP_AND_BOTTOM;
+       case DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF:
+               return HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF;
+       default:
+               return HDMI_3D_STRUCTURE_INVALID;
+       }
+}
+
 /**
  * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
  * data from a DRM display mode
@@ -3336,20 +3550,29 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
                                            const struct drm_display_mode *mode)
 {
        int err;
+       u32 s3d_flags;
        u8 vic;
 
        if (!frame || !mode)
                return -EINVAL;
 
        vic = drm_match_hdmi_mode(mode);
-       if (!vic)
+       s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK;
+
+       if (!vic && !s3d_flags)
+               return -EINVAL;
+
+       if (vic && s3d_flags)
                return -EINVAL;
 
        err = hdmi_vendor_infoframe_init(frame);
        if (err < 0)
                return err;
 
-       frame->vic = vic;
+       if (vic)
+               frame->vic = vic;
+       else
+               frame->s3d_struct = s3d_structure_from_display_mode(mode);
 
        return 0;
 }
index 271b42bbfb72152dc3d1e66662a981cee4a5fb6e..9081172ef0573a0eb1b17ac8e3681b0c559486cc 100644 (file)
@@ -32,7 +32,7 @@ MODULE_PARM_DESC(edid_firmware, "Do not probe monitor, use specified EDID blob "
        "from built-in data or /lib/firmware instead. ");
 
 #define GENERIC_EDIDS 5
-static char *generic_edid_name[GENERIC_EDIDS] = {
+static const char *generic_edid_name[GENERIC_EDIDS] = {
        "edid/1024x768.bin",
        "edid/1280x1024.bin",
        "edid/1600x1200.bin",
@@ -40,7 +40,7 @@ static char *generic_edid_name[GENERIC_EDIDS] = {
        "edid/1920x1080.bin",
 };
 
-static u8 generic_edid[GENERIC_EDIDS][128] = {
+static const u8 generic_edid[GENERIC_EDIDS][128] = {
        {
        0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
        0x31, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -133,63 +133,68 @@ static u8 generic_edid[GENERIC_EDIDS][128] = {
        },
 };
 
+static int edid_size(const u8 *edid, int data_size)
+{
+       if (data_size < EDID_LENGTH)
+               return 0;
+
+       return (edid[0x7e] + 1) * EDID_LENGTH;
+}
+
 static u8 *edid_load(struct drm_connector *connector, const char *name,
                        const char *connector_name)
 {
-       const struct firmware *fw;
-       struct platform_device *pdev;
-       u8 *fwdata = NULL, *edid, *new_edid;
-       int fwsize, expected;
-       int builtin = 0, err = 0;
+       const struct firmware *fw = NULL;
+       const u8 *fwdata;
+       u8 *edid;
+       int fwsize, builtin;
        int i, valid_extensions = 0;
        bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
 
-       pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
-       if (IS_ERR(pdev)) {
-               DRM_ERROR("Failed to register EDID firmware platform device "
-                   "for connector \"%s\"\n", connector_name);
-               err = -EINVAL;
-               goto out;
-       }
-
-       err = request_firmware(&fw, name, &pdev->dev);
-       platform_device_unregister(pdev);
-
-       if (err) {
-               i = 0;
-               while (i < GENERIC_EDIDS && strcmp(name, generic_edid_name[i]))
-                       i++;
-               if (i < GENERIC_EDIDS) {
-                       err = 0;
-                       builtin = 1;
+       builtin = 0;
+       for (i = 0; i < GENERIC_EDIDS; i++) {
+               if (strcmp(name, generic_edid_name[i]) == 0) {
                        fwdata = generic_edid[i];
                        fwsize = sizeof(generic_edid[i]);
+                       builtin = 1;
+                       break;
                }
        }
+       if (!builtin) {
+               struct platform_device *pdev;
+               int err;
 
-       if (err) {
-               DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
-                   name, err);
-               goto out;
-       }
+               pdev = platform_device_register_simple(connector_name, -1, NULL, 0);
+               if (IS_ERR(pdev)) {
+                       DRM_ERROR("Failed to register EDID firmware platform device "
+                                 "for connector \"%s\"\n", connector_name);
+                       return ERR_CAST(pdev);
+               }
+
+               err = request_firmware(&fw, name, &pdev->dev);
+               platform_device_unregister(pdev);
+               if (err) {
+                       DRM_ERROR("Requesting EDID firmware \"%s\" failed (err=%d)\n",
+                                 name, err);
+                       return ERR_PTR(err);
+               }
 
-       if (fwdata == NULL) {
-               fwdata = (u8 *) fw->data;
+               fwdata = fw->data;
                fwsize = fw->size;
        }
 
-       expected = (fwdata[0x7e] + 1) * EDID_LENGTH;
-       if (expected != fwsize) {
+       if (edid_size(fwdata, fwsize) != fwsize) {
                DRM_ERROR("Size of EDID firmware \"%s\" is invalid "
-                   "(expected %d, got %d)\n", name, expected, (int) fwsize);
-               err = -EINVAL;
-               goto relfw_out;
+                         "(expected %d, got %d\n", name,
+                         edid_size(fwdata, fwsize), (int)fwsize);
+               edid = ERR_PTR(-EINVAL);
+               goto out;
        }
 
        edid = kmemdup(fwdata, fwsize, GFP_KERNEL);
        if (edid == NULL) {
-               err = -ENOMEM;
-               goto relfw_out;
+               edid = ERR_PTR(-ENOMEM);
+               goto out;
        }
 
        if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
@@ -197,8 +202,8 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
                DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
                    name);
                kfree(edid);
-               err = -EINVAL;
-               goto relfw_out;
+               edid = ERR_PTR(-EINVAL);
+               goto out;
        }
 
        for (i = 1; i <= edid[0x7e]; i++) {
@@ -210,19 +215,18 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
        }
 
        if (valid_extensions != edid[0x7e]) {
+               u8 *new_edid;
+
                edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
                DRM_INFO("Found %d valid extensions instead of %d in EDID data "
                    "\"%s\" for connector \"%s\"\n", valid_extensions,
                    edid[0x7e], name, connector_name);
                edid[0x7e] = valid_extensions;
+
                new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH,
-                   GFP_KERNEL);
-               if (new_edid == NULL) {
-                       err = -ENOMEM;
-                       kfree(edid);
-                       goto relfw_out;
-               }
-               edid = new_edid;
+                                   GFP_KERNEL);
+               if (new_edid)
+                       edid = new_edid;
        }
 
        DRM_INFO("Got %s EDID base block and %d extension%s from "
@@ -230,13 +234,9 @@ static u8 *edid_load(struct drm_connector *connector, const char *name,
            "external", valid_extensions, valid_extensions == 1 ? "" : "s",
            name, connector_name);
 
-relfw_out:
-       release_firmware(fw);
-
 out:
-       if (err)
-               return ERR_PTR(err);
-
+       if (fw)
+               release_firmware(fw);
        return edid;
 }
 
index 0cfb60f5476655edc097ca71c648d11544f3357e..d18b88b755c34f876dc2d4b9f0a202709dc87f5d 100644 (file)
@@ -67,12 +67,12 @@ int drm_i2c_encoder_init(struct drm_device *dev,
                goto fail;
        }
 
-       if (!client->driver) {
+       if (!client->dev.driver) {
                err = -ENODEV;
                goto fail_unregister;
        }
 
-       module = client->driver->driver.owner;
+       module = client->dev.driver->owner;
        if (!try_module_get(module)) {
                err = -ENODEV;
                goto fail_unregister;
@@ -80,7 +80,7 @@ int drm_i2c_encoder_init(struct drm_device *dev,
 
        encoder->bus_priv = client;
 
-       encoder_drv = to_drm_i2c_encoder_driver(client->driver);
+       encoder_drv = to_drm_i2c_encoder_driver(to_i2c_driver(client->dev.driver));
 
        err = encoder_drv->encoder_init(client, dev, encoder);
        if (err)
@@ -111,7 +111,7 @@ void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
 {
        struct drm_encoder_slave *encoder = to_encoder_slave(drm_encoder);
        struct i2c_client *client = drm_i2c_encoder_get_client(drm_encoder);
-       struct module *module = client->driver->driver.owner;
+       struct module *module = client->dev.driver->owner;
 
        i2c_unregister_device(client);
        encoder->bus_priv = NULL;
index f6f6cc7fc133292e9fe3375502466b0abd1b0fa9..0a19401aff803bcf2a1ff4dddcbd1fab42a0a187 100644 (file)
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_crtc_helper.h>
 
-MODULE_AUTHOR("David Airlie, Jesse Barnes");
-MODULE_DESCRIPTION("DRM KMS helper");
-MODULE_LICENSE("GPL and additional rights");
-
 static LIST_HEAD(kernel_fb_helper_list);
 
 /**
@@ -407,14 +403,6 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
        struct drm_connector *connector;
        int i, j;
 
-       /*
-        * fbdev->blank can be called from irq context in case of a panic.
-        * Since we already have our own special panic handler which will
-        * restore the fbdev console mode completely, just bail out early.
-        */
-       if (oops_in_progress)
-               return;
-
        /*
         * fbdev->blank can be called from irq context in case of a panic.
         * Since we already have our own special panic handler which will
@@ -852,7 +840,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
        struct drm_fb_helper *fb_helper = info->par;
        struct drm_device *dev = fb_helper->dev;
        struct drm_mode_set *modeset;
-       struct drm_crtc *crtc;
        int ret = 0;
        int i;
 
@@ -863,8 +850,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
        }
 
        for (i = 0; i < fb_helper->crtc_count; i++) {
-               crtc = fb_helper->crtc_info[i].mode_set.crtc;
-
                modeset = &fb_helper->crtc_info[i].mode_set;
 
                modeset->x = var->xoffset;
@@ -1360,7 +1345,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
        struct drm_connector *connector;
        struct drm_connector_helper_funcs *connector_funcs;
        struct drm_encoder *encoder;
-       struct drm_fb_helper_crtc *best_crtc;
        int my_score, best_score, score;
        struct drm_fb_helper_crtc **crtcs, *crtc;
        struct drm_fb_helper_connector *fb_helper_conn;
@@ -1372,7 +1356,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
        connector = fb_helper_conn->connector;
 
        best_crtcs[n] = NULL;
-       best_crtc = NULL;
        best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
        if (modes[n] == NULL)
                return best_score;
@@ -1421,7 +1404,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
                score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
                                                  width, height);
                if (score > best_score) {
-                       best_crtc = crtc;
                        best_score = score;
                        memcpy(best_crtcs, crtcs,
                               dev->mode_config.num_connector *
@@ -1588,8 +1570,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
 int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
 {
        struct drm_device *dev = fb_helper->dev;
-       int count = 0;
-       u32 max_width, max_height, bpp_sel;
+       u32 max_width, max_height;
 
        if (!fb_helper->fb)
                return 0;
@@ -1604,10 +1585,8 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
 
        max_width = fb_helper->fb->width;
        max_height = fb_helper->fb->height;
-       bpp_sel = fb_helper->fb->bits_per_pixel;
 
-       count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
-                                                   max_height);
+       drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height);
        mutex_unlock(&fb_helper->dev->mode_config.mutex);
 
        drm_modeset_lock_all(dev);
index 3f84277d7036b3f843120fb4145dc081010d3d0d..d0e27667a4eb7200700b500804c574c7a9b40d1e 100644 (file)
@@ -113,7 +113,6 @@ int drm_open(struct inode *inode, struct file *filp)
        retcode = drm_open_helper(inode, filp, dev);
        if (retcode)
                goto err_undo;
-       atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
        if (need_setup) {
                retcode = drm_setup(dev);
                if (retcode)
@@ -385,6 +384,71 @@ static void drm_events_release(struct drm_file *file_priv)
        spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
+/**
+ * drm_legacy_dev_reinit
+ *
+ * Reinitializes a legacy/ums drm device in it's lastclose function.
+ */
+static void drm_legacy_dev_reinit(struct drm_device *dev)
+{
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return;
+
+       atomic_set(&dev->ioctl_count, 0);
+       atomic_set(&dev->vma_count, 0);
+
+       dev->sigdata.lock = NULL;
+
+       dev->context_flag = 0;
+       dev->last_context = 0;
+       dev->if_version = 0;
+}
+
+/**
+ * Take down the DRM device.
+ *
+ * \param dev DRM device structure.
+ *
+ * Frees every resource in \p dev.
+ *
+ * \sa drm_device
+ */
+int drm_lastclose(struct drm_device * dev)
+{
+       struct drm_vma_entry *vma, *vma_temp;
+
+       DRM_DEBUG("\n");
+
+       if (dev->driver->lastclose)
+               dev->driver->lastclose(dev);
+       DRM_DEBUG("driver lastclose completed\n");
+
+       if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_irq_uninstall(dev);
+
+       mutex_lock(&dev->struct_mutex);
+
+       drm_agp_clear(dev);
+
+       drm_legacy_sg_cleanup(dev);
+
+       /* Clear vma list (only built for debugging) */
+       list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
+               list_del(&vma->head);
+               kfree(vma);
+       }
+
+       drm_legacy_dma_takedown(dev);
+
+       dev->dev_mapping = NULL;
+       mutex_unlock(&dev->struct_mutex);
+
+       drm_legacy_dev_reinit(dev);
+
+       DRM_DEBUG("lastclose completed\n");
+       return 0;
+}
+
 /**
  * Release file.
  *
@@ -454,7 +518,6 @@ int drm_release(struct inode *inode, struct file *filp)
 
                                list_del(&pos->head);
                                kfree(pos);
-                               --dev->ctx_count;
                        }
                }
        }
@@ -516,7 +579,6 @@ int drm_release(struct inode *inode, struct file *filp)
         * End inline drm_release
         */
 
-       atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
        if (!--dev->open_count) {
                if (atomic_read(&dev->ioctl_count)) {
                        DRM_ERROR("Device busy: %d\n",
index 49293bdc972a0d76143b0bd3b205280d4fffbd9e..4761adedad2abe5f03ae586354436f3bf1b32cbb 100644 (file)
@@ -160,35 +160,6 @@ void drm_gem_private_object_init(struct drm_device *dev,
 }
 EXPORT_SYMBOL(drm_gem_private_object_init);
 
-/**
- * Allocate a GEM object of the specified size with shmfs backing store
- */
-struct drm_gem_object *
-drm_gem_object_alloc(struct drm_device *dev, size_t size)
-{
-       struct drm_gem_object *obj;
-
-       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
-       if (!obj)
-               goto free;
-
-       if (drm_gem_object_init(dev, obj, size) != 0)
-               goto free;
-
-       if (dev->driver->gem_init_object != NULL &&
-           dev->driver->gem_init_object(obj) != 0) {
-               goto fput;
-       }
-       return obj;
-fput:
-       /* Object_init mangles the global counters - readjust them. */
-       fput(obj->filp);
-free:
-       kfree(obj);
-       return NULL;
-}
-EXPORT_SYMBOL(drm_gem_object_alloc);
-
 static void
 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
 {
index f7311162a61dc0e91cbca80f190f75c7e685d179..3d2e91c4d78e1c1003ac9521250dc4ca41c9b6ae 100644 (file)
@@ -67,7 +67,6 @@ int drm_global_item_ref(struct drm_global_reference *ref)
 {
        int ret;
        struct drm_global_item *item = &glob[ref->global_type];
-       void *object;
 
        mutex_lock(&item->mutex);
        if (item->refcount == 0) {
@@ -85,7 +84,6 @@ int drm_global_item_ref(struct drm_global_reference *ref)
        }
        ++item->refcount;
        ref->object = item->object;
-       object = item->object;
        mutex_unlock(&item->mutex);
        return 0;
 out_err:
index 53298320080b86d7694a24a81082a2c402d2e553..7d5a152eeb0288f05e5770a9699b92e253f689e4 100644 (file)
@@ -163,13 +163,13 @@ int drm_vblank_info(struct seq_file *m, void *data)
        mutex_lock(&dev->struct_mutex);
        for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
                seq_printf(m, "CRTC %d enable:     %d\n",
-                          crtc, atomic_read(&dev->vblank_refcount[crtc]));
+                          crtc, atomic_read(&dev->vblank[crtc].refcount));
                seq_printf(m, "CRTC %d counter:    %d\n",
                           crtc, drm_vblank_count(dev, crtc));
                seq_printf(m, "CRTC %d last wait:  %d\n",
-                          crtc, dev->last_vblank_wait[crtc]);
+                          crtc, dev->vblank[crtc].last_wait);
                seq_printf(m, "CRTC %d in modeset: %d\n",
-                          crtc, dev->vblank_inmodeset[crtc]);
+                          crtc, dev->vblank[crtc].inmodeset);
        }
        mutex_unlock(&dev->struct_mutex);
        return 0;
index 07247e2855a23f7d9b530f6383a68f0c64663a0c..dffc836144cc96266a616902aa457b46c0ef1b33 100644 (file)
@@ -302,6 +302,27 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
        return 0;
 }
 
+/**
+ * Set device/driver capabilities
+ */
+int
+drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+       struct drm_set_client_cap *req = data;
+
+       switch (req->capability) {
+       case DRM_CLIENT_CAP_STEREO_3D:
+               if (req->value > 1)
+                       return -EINVAL;
+               file_priv->stereo_allowed = req->value;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 /**
  * Setversion ioctl.
  *
index f92da0a32f0d30d02fe8afeb09f5272a16b8a060..f9af048828ea16136752a29b982ae28add4ff226 100644 (file)
@@ -43,9 +43,8 @@
 #include <linux/export.h>
 
 /* Access macro for slots in vblank timestamp ringbuffer. */
-#define vblanktimestamp(dev, crtc, count) ( \
-       (dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
-       ((count) % DRM_VBLANKTIME_RBSIZE)])
+#define vblanktimestamp(dev, crtc, count) \
+       ((dev)->vblank[crtc].time[(count) % DRM_VBLANKTIME_RBSIZE])
 
 /* Retry timestamp calculation up to 3 times to satisfy
  * drm_timestamp_precision before giving up.
@@ -89,8 +88,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
  */
 static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
 {
-       memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0,
-               DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
+       memset(dev->vblank[crtc].time, 0, sizeof(dev->vblank[crtc].time));
 }
 
 /*
@@ -115,7 +113,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
        spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
 
        dev->driver->disable_vblank(dev, crtc);
-       dev->vblank_enabled[crtc] = 0;
+       dev->vblank[crtc].enabled = false;
 
        /* No further vblank irq's will be processed after
         * this point. Get current hardware vblank count and
@@ -130,9 +128,9 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
         * delayed gpu counter increment.
         */
        do {
-               dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+               dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc);
                vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
-       } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+       } while (dev->vblank[crtc].last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
 
        if (!count)
                vblrc = 0;
@@ -140,7 +138,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
        /* Compute time difference to stored timestamp of last vblank
         * as updated by last invocation of drm_handle_vblank() in vblank irq.
         */
-       vblcount = atomic_read(&dev->_vblank_count[crtc]);
+       vblcount = atomic_read(&dev->vblank[crtc].count);
        diff_ns = timeval_to_ns(&tvblank) -
                  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
 
@@ -157,7 +155,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
         * hope for the best.
         */
        if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
-               atomic_inc(&dev->_vblank_count[crtc]);
+               atomic_inc(&dev->vblank[crtc].count);
                smp_mb__after_atomic_inc();
        }
 
@@ -178,8 +176,8 @@ static void vblank_disable_fn(unsigned long arg)
 
        for (i = 0; i < dev->num_crtcs; i++) {
                spin_lock_irqsave(&dev->vbl_lock, irqflags);
-               if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
-                   dev->vblank_enabled[i]) {
+               if (atomic_read(&dev->vblank[i].refcount) == 0 &&
+                   dev->vblank[i].enabled) {
                        DRM_DEBUG("disabling vblank on crtc %d\n", i);
                        vblank_disable_and_save(dev, i);
                }
@@ -197,14 +195,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
 
        vblank_disable_fn((unsigned long)dev);
 
-       kfree(dev->vbl_queue);
-       kfree(dev->_vblank_count);
-       kfree(dev->vblank_refcount);
-       kfree(dev->vblank_enabled);
-       kfree(dev->last_vblank);
-       kfree(dev->last_vblank_wait);
-       kfree(dev->vblank_inmodeset);
-       kfree(dev->_vblank_time);
+       kfree(dev->vblank);
 
        dev->num_crtcs = 0;
 }
@@ -221,40 +212,12 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
 
        dev->num_crtcs = num_crtcs;
 
-       dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
-                                GFP_KERNEL);
-       if (!dev->vbl_queue)
+       dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
+       if (!dev->vblank)
                goto err;
 
-       dev->_vblank_count = kmalloc(sizeof(atomic_t) * num_crtcs, GFP_KERNEL);
-       if (!dev->_vblank_count)
-               goto err;
-
-       dev->vblank_refcount = kmalloc(sizeof(atomic_t) * num_crtcs,
-                                      GFP_KERNEL);
-       if (!dev->vblank_refcount)
-               goto err;
-
-       dev->vblank_enabled = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
-       if (!dev->vblank_enabled)
-               goto err;
-
-       dev->last_vblank = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
-       if (!dev->last_vblank)
-               goto err;
-
-       dev->last_vblank_wait = kcalloc(num_crtcs, sizeof(u32), GFP_KERNEL);
-       if (!dev->last_vblank_wait)
-               goto err;
-
-       dev->vblank_inmodeset = kcalloc(num_crtcs, sizeof(int), GFP_KERNEL);
-       if (!dev->vblank_inmodeset)
-               goto err;
-
-       dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
-                                   sizeof(struct timeval), GFP_KERNEL);
-       if (!dev->_vblank_time)
-               goto err;
+       for (i = 0; i < num_crtcs; i++)
+               init_waitqueue_head(&dev->vblank[i].queue);
 
        DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
 
@@ -264,14 +227,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
        else
                DRM_INFO("No driver support for vblank timestamp query.\n");
 
-       /* Zero per-crtc vblank stuff */
-       for (i = 0; i < num_crtcs; i++) {
-               init_waitqueue_head(&dev->vbl_queue[i]);
-               atomic_set(&dev->_vblank_count[i], 0);
-               atomic_set(&dev->vblank_refcount[i], 0);
-       }
+       dev->vblank_disable_allowed = false;
 
-       dev->vblank_disable_allowed = 0;
        return 0;
 
 err:
@@ -336,7 +293,7 @@ int drm_irq_install(struct drm_device *dev)
                mutex_unlock(&dev->struct_mutex);
                return -EBUSY;
        }
-       dev->irq_enabled = 1;
+       dev->irq_enabled = true;
        mutex_unlock(&dev->struct_mutex);
 
        DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
@@ -359,7 +316,7 @@ int drm_irq_install(struct drm_device *dev)
 
        if (ret < 0) {
                mutex_lock(&dev->struct_mutex);
-               dev->irq_enabled = 0;
+               dev->irq_enabled = false;
                mutex_unlock(&dev->struct_mutex);
                return ret;
        }
@@ -373,7 +330,7 @@ int drm_irq_install(struct drm_device *dev)
 
        if (ret < 0) {
                mutex_lock(&dev->struct_mutex);
-               dev->irq_enabled = 0;
+               dev->irq_enabled = false;
                mutex_unlock(&dev->struct_mutex);
                if (!drm_core_check_feature(dev, DRIVER_MODESET))
                        vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -394,14 +351,15 @@ EXPORT_SYMBOL(drm_irq_install);
 int drm_irq_uninstall(struct drm_device *dev)
 {
        unsigned long irqflags;
-       int irq_enabled, i;
+       bool irq_enabled;
+       int i;
 
        if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
                return -EINVAL;
 
        mutex_lock(&dev->struct_mutex);
        irq_enabled = dev->irq_enabled;
-       dev->irq_enabled = 0;
+       dev->irq_enabled = false;
        mutex_unlock(&dev->struct_mutex);
 
        /*
@@ -410,9 +368,9 @@ int drm_irq_uninstall(struct drm_device *dev)
        if (dev->num_crtcs) {
                spin_lock_irqsave(&dev->vbl_lock, irqflags);
                for (i = 0; i < dev->num_crtcs; i++) {
-                       DRM_WAKEUP(&dev->vbl_queue[i]);
-                       dev->vblank_enabled[i] = 0;
-                       dev->last_vblank[i] =
+                       DRM_WAKEUP(&dev->vblank[i].queue);
+                       dev->vblank[i].enabled = false;
+                       dev->vblank[i].last =
                                dev->driver->get_vblank_counter(dev, i);
                }
                spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -795,7 +753,7 @@ EXPORT_SYMBOL(drm_get_last_vbltimestamp);
  */
 u32 drm_vblank_count(struct drm_device *dev, int crtc)
 {
-       return atomic_read(&dev->_vblank_count[crtc]);
+       return atomic_read(&dev->vblank[crtc].count);
 }
 EXPORT_SYMBOL(drm_vblank_count);
 
@@ -824,10 +782,10 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
         * a seqlock.
         */
        do {
-               cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
+               cur_vblank = atomic_read(&dev->vblank[crtc].count);
                *vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
                smp_rmb();
-       } while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
+       } while (cur_vblank != atomic_read(&dev->vblank[crtc].count));
 
        return cur_vblank;
 }
@@ -914,12 +872,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
        } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
 
        /* Deal with counter wrap */
-       diff = cur_vblank - dev->last_vblank[crtc];
-       if (cur_vblank < dev->last_vblank[crtc]) {
+       diff = cur_vblank - dev->vblank[crtc].last;
+       if (cur_vblank < dev->vblank[crtc].last) {
                diff += dev->max_vblank_count;
 
                DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
-                         crtc, dev->last_vblank[crtc], cur_vblank, diff);
+                         crtc, dev->vblank[crtc].last, cur_vblank, diff);
        }
 
        DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
@@ -930,12 +888,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
         * reinitialize delayed at next vblank interrupt in that case.
         */
        if (rc) {
-               tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
+               tslot = atomic_read(&dev->vblank[crtc].count) + diff;
                vblanktimestamp(dev, crtc, tslot) = t_vblank;
        }
 
        smp_mb__before_atomic_inc();
-       atomic_add(diff, &dev->_vblank_count[crtc]);
+       atomic_add(diff, &dev->vblank[crtc].count);
        smp_mb__after_atomic_inc();
 }
 
@@ -957,9 +915,9 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
 
        spin_lock_irqsave(&dev->vbl_lock, irqflags);
        /* Going from 0->1 means we have to enable interrupts again */
-       if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+       if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) {
                spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
-               if (!dev->vblank_enabled[crtc]) {
+               if (!dev->vblank[crtc].enabled) {
                        /* Enable vblank irqs under vblank_time_lock protection.
                         * All vblank count & timestamp updates are held off
                         * until we are done reinitializing master counter and
@@ -970,16 +928,16 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
                        DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
                                  crtc, ret);
                        if (ret)
-                               atomic_dec(&dev->vblank_refcount[crtc]);
+                               atomic_dec(&dev->vblank[crtc].refcount);
                        else {
-                               dev->vblank_enabled[crtc] = 1;
+                               dev->vblank[crtc].enabled = true;
                                drm_update_vblank_count(dev, crtc);
                        }
                }
                spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
        } else {
-               if (!dev->vblank_enabled[crtc]) {
-                       atomic_dec(&dev->vblank_refcount[crtc]);
+               if (!dev->vblank[crtc].enabled) {
+                       atomic_dec(&dev->vblank[crtc].refcount);
                        ret = -EINVAL;
                }
        }
@@ -999,10 +957,10 @@ EXPORT_SYMBOL(drm_vblank_get);
  */
 void drm_vblank_put(struct drm_device *dev, int crtc)
 {
-       BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0);
+       BUG_ON(atomic_read(&dev->vblank[crtc].refcount) == 0);
 
        /* Last user schedules interrupt disable */
-       if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) &&
+       if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
            (drm_vblank_offdelay > 0))
                mod_timer(&dev->vblank_disable_timer,
                          jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
@@ -1025,7 +983,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
 
        spin_lock_irqsave(&dev->vbl_lock, irqflags);
        vblank_disable_and_save(dev, crtc);
-       DRM_WAKEUP(&dev->vbl_queue[crtc]);
+       DRM_WAKEUP(&dev->vblank[crtc].queue);
 
        /* Send any queued vblank events, lest the natives grow disquiet */
        seq = drm_vblank_count_and_time(dev, crtc, &now);
@@ -1067,10 +1025,10 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
         * to avoid corrupting the count if multiple, mismatch calls occur),
         * so that interrupts remain enabled in the interim.
         */
-       if (!dev->vblank_inmodeset[crtc]) {
-               dev->vblank_inmodeset[crtc] = 0x1;
+       if (!dev->vblank[crtc].inmodeset) {
+               dev->vblank[crtc].inmodeset = 0x1;
                if (drm_vblank_get(dev, crtc) == 0)
-                       dev->vblank_inmodeset[crtc] |= 0x2;
+                       dev->vblank[crtc].inmodeset |= 0x2;
        }
 }
 EXPORT_SYMBOL(drm_vblank_pre_modeset);
@@ -1083,15 +1041,15 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
        if (!dev->num_crtcs)
                return;
 
-       if (dev->vblank_inmodeset[crtc]) {
+       if (dev->vblank[crtc].inmodeset) {
                spin_lock_irqsave(&dev->vbl_lock, irqflags);
-               dev->vblank_disable_allowed = 1;
+               dev->vblank_disable_allowed = true;
                spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 
-               if (dev->vblank_inmodeset[crtc] & 0x2)
+               if (dev->vblank[crtc].inmodeset & 0x2)
                        drm_vblank_put(dev, crtc);
 
-               dev->vblank_inmodeset[crtc] = 0;
+               dev->vblank[crtc].inmodeset = 0;
        }
 }
 EXPORT_SYMBOL(drm_vblank_post_modeset);
@@ -1288,8 +1246,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
 
        DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
                  vblwait->request.sequence, crtc);
-       dev->last_vblank_wait[crtc] = vblwait->request.sequence;
-       DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
+       dev->vblank[crtc].last_wait = vblwait->request.sequence;
+       DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * DRM_HZ,
                    (((drm_vblank_count(dev, crtc) -
                       vblwait->request.sequence) <= (1 << 23)) ||
                     !dev->irq_enabled));
@@ -1367,7 +1325,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
        spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
 
        /* Vblank irq handling disabled. Nothing to do. */
-       if (!dev->vblank_enabled[crtc]) {
+       if (!dev->vblank[crtc].enabled) {
                spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
                return false;
        }
@@ -1377,7 +1335,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
         */
 
        /* Get current timestamp and count. */
-       vblcount = atomic_read(&dev->_vblank_count[crtc]);
+       vblcount = atomic_read(&dev->vblank[crtc].count);
        drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
 
        /* Compute time difference to timestamp of last vblank */
@@ -1401,14 +1359,14 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
                 * the timestamp computed above.
                 */
                smp_mb__before_atomic_inc();
-               atomic_inc(&dev->_vblank_count[crtc]);
+               atomic_inc(&dev->vblank[crtc].count);
                smp_mb__after_atomic_inc();
        } else {
                DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
                          crtc, (int) diff_ns);
        }
 
-       DRM_WAKEUP(&dev->vbl_queue[crtc]);
+       DRM_WAKEUP(&dev->vblank[crtc].queue);
        drm_handle_vblank_events(dev, crtc);
 
        spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
index d752c96d609092f4de747bddef73bb19d040b7c3..f6452682141b5fd25773153233fb97983dafbc40 100644 (file)
@@ -86,7 +86,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
                if (drm_lock_take(&master->lock, lock->context)) {
                        master->lock.file_priv = file_priv;
                        master->lock.lock_time = jiffies;
-                       atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
                        break;  /* Got lock */
                }
 
@@ -157,8 +156,6 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
                return -EINVAL;
        }
 
-       atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
-
        if (drm_lock_free(&master->lock, lock->context)) {
                /* FIXME: Should really bail out here. */
        }
index fc2adb62b7574dc9b3e1d7de810bb3074320311f..b0733153dfd294f178ff1818618fadbe63a1d144 100644 (file)
@@ -707,18 +707,25 @@ EXPORT_SYMBOL(drm_mode_vrefresh);
 /**
  * drm_mode_set_crtcinfo - set CRTC modesetting parameters
  * @p: mode
- * @adjust_flags: unused? (FIXME)
+ * @adjust_flags: a combination of adjustment flags
  *
  * LOCKING:
  * None.
  *
  * Setup the CRTC modesetting parameters for @p, adjusting if necessary.
+ *
+ * - The CRTC_INTERLACE_HALVE_V flag can be used to halve vertical timings of
+ *   interlaced modes.
+ * - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
+ *   buffers containing two eyes (only adjust the timings when needed, eg. for
+ *   "frame packing" or "side by side full").
  */
 void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
 {
        if ((p == NULL) || ((p->type & DRM_MODE_TYPE_CRTC_C) == DRM_MODE_TYPE_BUILTIN))
                return;
 
+       p->crtc_clock = p->clock;
        p->crtc_hdisplay = p->hdisplay;
        p->crtc_hsync_start = p->hsync_start;
        p->crtc_hsync_end = p->hsync_end;
@@ -752,6 +759,20 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
                p->crtc_vtotal *= p->vscan;
        }
 
+       if (adjust_flags & CRTC_STEREO_DOUBLE) {
+               unsigned int layout = p->flags & DRM_MODE_FLAG_3D_MASK;
+
+               switch (layout) {
+               case DRM_MODE_FLAG_3D_FRAME_PACKING:
+                       p->crtc_clock *= 2;
+                       p->crtc_vdisplay += p->crtc_vtotal;
+                       p->crtc_vsync_start += p->crtc_vtotal;
+                       p->crtc_vsync_end += p->crtc_vtotal;
+                       p->crtc_vtotal += p->crtc_vtotal;
+                       break;
+               }
+       }
+
        p->crtc_vblank_start = min(p->crtc_vsync_start, p->crtc_vdisplay);
        p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
        p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
@@ -830,12 +851,16 @@ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_displ
        } else if (mode1->clock != mode2->clock)
                return false;
 
-       return drm_mode_equal_no_clocks(mode1, mode2);
+       if ((mode1->flags & DRM_MODE_FLAG_3D_MASK) !=
+           (mode2->flags & DRM_MODE_FLAG_3D_MASK))
+               return false;
+
+       return drm_mode_equal_no_clocks_no_stereo(mode1, mode2);
 }
 EXPORT_SYMBOL(drm_mode_equal);
 
 /**
- * drm_mode_equal_no_clocks - test modes for equality
+ * drm_mode_equal_no_clocks_no_stereo - test modes for equality
  * @mode1: first mode
  * @mode2: second mode
  *
@@ -843,12 +868,13 @@ EXPORT_SYMBOL(drm_mode_equal);
  * None.
  *
  * Check to see if @mode1 and @mode2 are equivalent, but
- * don't check the pixel clocks.
+ * don't check the pixel clocks nor the stereo layout.
  *
  * RETURNS:
  * True if the modes are equal, false otherwise.
  */
-bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
+bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
+                                       const struct drm_display_mode *mode2)
 {
        if (mode1->hdisplay == mode2->hdisplay &&
            mode1->hsync_start == mode2->hsync_start &&
@@ -860,12 +886,13 @@ bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct
            mode1->vsync_end == mode2->vsync_end &&
            mode1->vtotal == mode2->vtotal &&
            mode1->vscan == mode2->vscan &&
-           mode1->flags == mode2->flags)
+           (mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
+            (mode2->flags & ~DRM_MODE_FLAG_3D_MASK))
                return true;
 
        return false;
 }
-EXPORT_SYMBOL(drm_mode_equal_no_clocks);
+EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
 
 /**
  * drm_mode_validate_size - make sure modes adhere to size constraints
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
new file mode 100644 (file)
index 0000000..ff6e459
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2013, NVIDIA Corporation.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/err.h>
+#include <linux/export.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_panel.h>
+
+static DEFINE_MUTEX(panel_lock);
+static LIST_HEAD(panel_list);
+
+void drm_panel_init(struct drm_panel *panel)
+{
+       INIT_LIST_HEAD(&panel->list);
+}
+EXPORT_SYMBOL(drm_panel_init);
+
+int drm_panel_add(struct drm_panel *panel)
+{
+       mutex_lock(&panel_lock);
+       list_add_tail(&panel->list, &panel_list);
+       mutex_unlock(&panel_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_panel_add);
+
+void drm_panel_remove(struct drm_panel *panel)
+{
+       mutex_lock(&panel_lock);
+       list_del_init(&panel->list);
+       mutex_unlock(&panel_lock);
+}
+EXPORT_SYMBOL(drm_panel_remove);
+
+int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
+{
+       if (panel->connector)
+               return -EBUSY;
+
+       panel->connector = connector;
+       panel->drm = connector->dev;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_panel_attach);
+
+int drm_panel_detach(struct drm_panel *panel)
+{
+       panel->connector = NULL;
+       panel->drm = NULL;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_panel_detach);
+
+#ifdef CONFIG_OF
+struct drm_panel *of_drm_find_panel(struct device_node *np)
+{
+       struct drm_panel *panel;
+
+       mutex_lock(&panel_lock);
+
+       list_for_each_entry(panel, &panel_list, list) {
+               if (panel->dev->of_node == np) {
+                       mutex_unlock(&panel_lock);
+                       return panel;
+               }
+       }
+
+       mutex_unlock(&panel_lock);
+       return NULL;
+}
+EXPORT_SYMBOL(of_drm_find_panel);
+#endif
index 1f96cee6eee8efb3ff183de2d5a93f5fbe895af8..f00d7a9671eadf5761c70103c5df67cd5aadabcf 100644 (file)
@@ -322,83 +322,36 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
 
        DRM_DEBUG("\n");
 
-       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       dev = drm_dev_alloc(driver, &pdev->dev);
        if (!dev)
                return -ENOMEM;
 
        ret = pci_enable_device(pdev);
        if (ret)
-               goto err_g1;
+               goto err_free;
 
        dev->pdev = pdev;
-       dev->dev = &pdev->dev;
-
-       dev->pci_device = pdev->device;
-       dev->pci_vendor = pdev->vendor;
-
 #ifdef __alpha__
        dev->hose = pdev->sysdata;
 #endif
 
-       mutex_lock(&drm_global_mutex);
-
-       if ((ret = drm_fill_in_dev(dev, ent, driver))) {
-               printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
-               goto err_g2;
-       }
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
                pci_set_drvdata(pdev, dev);
-               ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
-               if (ret)
-                       goto err_g2;
-       }
-
-       if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
-               ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
-               if (ret)
-                       goto err_g21;
-       }
-
-       if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
-               goto err_g3;
-
-       if (dev->driver->load) {
-               ret = dev->driver->load(dev, ent->driver_data);
-               if (ret)
-                       goto err_g4;
-       }
 
-       /* setup the grouping for the legacy output */
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               ret = drm_mode_group_init_legacy_group(dev,
-                                               &dev->primary->mode_group);
-               if (ret)
-                       goto err_g4;
-       }
-
-       list_add_tail(&dev->driver_item, &driver->device_list);
+       ret = drm_dev_register(dev, ent->driver_data);
+       if (ret)
+               goto err_pci;
 
        DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
                 driver->name, driver->major, driver->minor, driver->patchlevel,
                 driver->date, pci_name(pdev), dev->primary->index);
 
-       mutex_unlock(&drm_global_mutex);
        return 0;
 
-err_g4:
-       drm_put_minor(&dev->primary);
-err_g3:
-       if (dev->render)
-               drm_put_minor(&dev->render);
-err_g21:
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               drm_put_minor(&dev->control);
-err_g2:
+err_pci:
        pci_disable_device(pdev);
-err_g1:
-       kfree(dev);
-       mutex_unlock(&drm_global_mutex);
+err_free:
+       drm_dev_free(dev);
        return ret;
 }
 EXPORT_SYMBOL(drm_get_pci_dev);
index f7a18c6ba4c42607a9ffb4ae2b8dd53ceeb3c3a7..fc24fee8ec833b6bdb2c26ada2b45e947622b941 100644 (file)
@@ -47,55 +47,15 @@ static int drm_get_platform_dev(struct platform_device *platdev,
 
        DRM_DEBUG("\n");
 
-       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       dev = drm_dev_alloc(driver, &platdev->dev);
        if (!dev)
                return -ENOMEM;
 
        dev->platformdev = platdev;
-       dev->dev = &platdev->dev;
 
-       mutex_lock(&drm_global_mutex);
-
-       ret = drm_fill_in_dev(dev, NULL, driver);
-
-       if (ret) {
-               printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
-               goto err_g1;
-       }
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
-               if (ret)
-                       goto err_g1;
-       }
-
-       if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
-               ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
-               if (ret)
-                       goto err_g11;
-       }
-
-       ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+       ret = drm_dev_register(dev, 0);
        if (ret)
-               goto err_g2;
-
-       if (dev->driver->load) {
-               ret = dev->driver->load(dev, 0);
-               if (ret)
-                       goto err_g3;
-       }
-
-       /* setup the grouping for the legacy output */
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               ret = drm_mode_group_init_legacy_group(dev,
-                               &dev->primary->mode_group);
-               if (ret)
-                       goto err_g3;
-       }
-
-       list_add_tail(&dev->driver_item, &driver->device_list);
-
-       mutex_unlock(&drm_global_mutex);
+               goto err_free;
 
        DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
                 driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -103,17 +63,8 @@ static int drm_get_platform_dev(struct platform_device *platdev,
 
        return 0;
 
-err_g3:
-       drm_put_minor(&dev->primary);
-err_g2:
-       if (dev->render)
-               drm_put_minor(&dev->render);
-err_g11:
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               drm_put_minor(&dev->control);
-err_g1:
-       kfree(dev);
-       mutex_unlock(&drm_global_mutex);
+err_free:
+       drm_dev_free(dev);
        return ret;
 }
 
index 276d470f7b3efa970c00db47bfd8eabef7c6725d..56805c39c906dfa64326dd8003e6445889f7840e 100644 (file)
@@ -637,14 +637,13 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
        unsigned count;
        struct scatterlist *sg;
        struct page *page;
-       u32 len, offset;
+       u32 len;
        int pg_index;
        dma_addr_t addr;
 
        pg_index = 0;
        for_each_sg(sgt->sgl, sg, sgt->nents, count) {
                len = sg->length;
-               offset = sg->offset;
                page = sg_page(sg);
                addr = sg_dma_address(sg);
 
index 39d864576be4a4d5f5957cdbf1e3112b1322a018..26055abf94ee03dd161dd8c1feffef25796aea2c 100644 (file)
@@ -254,70 +254,6 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
-int drm_fill_in_dev(struct drm_device *dev,
-                          const struct pci_device_id *ent,
-                          struct drm_driver *driver)
-{
-       int retcode;
-
-       INIT_LIST_HEAD(&dev->filelist);
-       INIT_LIST_HEAD(&dev->ctxlist);
-       INIT_LIST_HEAD(&dev->vmalist);
-       INIT_LIST_HEAD(&dev->maplist);
-       INIT_LIST_HEAD(&dev->vblank_event_list);
-
-       spin_lock_init(&dev->count_lock);
-       spin_lock_init(&dev->event_lock);
-       mutex_init(&dev->struct_mutex);
-       mutex_init(&dev->ctxlist_mutex);
-
-       if (drm_ht_create(&dev->map_hash, 12)) {
-               return -ENOMEM;
-       }
-
-       /* the DRM has 6 basic counters */
-       dev->counters = 6;
-       dev->types[0] = _DRM_STAT_LOCK;
-       dev->types[1] = _DRM_STAT_OPENS;
-       dev->types[2] = _DRM_STAT_CLOSES;
-       dev->types[3] = _DRM_STAT_IOCTLS;
-       dev->types[4] = _DRM_STAT_LOCKS;
-       dev->types[5] = _DRM_STAT_UNLOCKS;
-
-       dev->driver = driver;
-
-       if (dev->driver->bus->agp_init) {
-               retcode = dev->driver->bus->agp_init(dev);
-               if (retcode)
-                       goto error_out_unreg;
-       }
-
-
-
-       retcode = drm_ctxbitmap_init(dev);
-       if (retcode) {
-               DRM_ERROR("Cannot allocate memory for context bitmap.\n");
-               goto error_out_unreg;
-       }
-
-       if (driver->driver_features & DRIVER_GEM) {
-               retcode = drm_gem_init(dev);
-               if (retcode) {
-                       DRM_ERROR("Cannot initialize graphics execution "
-                                 "manager (GEM)\n");
-                       goto error_out_unreg;
-               }
-       }
-
-       return 0;
-
-      error_out_unreg:
-       drm_lastclose(dev);
-       return retcode;
-}
-EXPORT_SYMBOL(drm_fill_in_dev);
-
-
 /**
  * Get a secondary minor number.
  *
@@ -427,66 +363,237 @@ static void drm_unplug_minor(struct drm_minor *minor)
  */
 void drm_put_dev(struct drm_device *dev)
 {
-       struct drm_driver *driver;
-       struct drm_map_list *r_list, *list_temp;
-
        DRM_DEBUG("\n");
 
        if (!dev) {
                DRM_ERROR("cleanup called no dev\n");
                return;
        }
-       driver = dev->driver;
 
-       drm_lastclose(dev);
+       drm_dev_unregister(dev);
+       drm_dev_free(dev);
+}
+EXPORT_SYMBOL(drm_put_dev);
 
-       if (dev->driver->unload)
-               dev->driver->unload(dev);
+void drm_unplug_dev(struct drm_device *dev)
+{
+       /* for a USB device */
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_unplug_minor(dev->control);
+       if (dev->render)
+               drm_unplug_minor(dev->render);
+       drm_unplug_minor(dev->primary);
 
-       if (dev->driver->bus->agp_destroy)
-               dev->driver->bus->agp_destroy(dev);
+       mutex_lock(&drm_global_mutex);
 
-       drm_vblank_cleanup(dev);
+       drm_device_set_unplugged(dev);
 
-       list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
-               drm_rmmap(dev, r_list->map);
-       drm_ht_remove(&dev->map_hash);
+       if (dev->open_count == 0) {
+               drm_put_dev(dev);
+       }
+       mutex_unlock(&drm_global_mutex);
+}
+EXPORT_SYMBOL(drm_unplug_dev);
 
-       drm_ctxbitmap_cleanup(dev);
+/**
+ * drm_dev_alloc - Allocate new drm device
+ * @driver: DRM driver to allocate device for
+ * @parent: Parent device object
+ *
+ * Allocate and initialize a new DRM device. No device registration is done.
+ * Call drm_dev_register() to advertice the device to user space and register it
+ * with other core subsystems.
+ *
+ * RETURNS:
+ * Pointer to new DRM device, or NULL if out of memory.
+ */
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+                                struct device *parent)
+{
+       struct drm_device *dev;
+       int ret;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               drm_put_minor(&dev->control);
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return NULL;
 
-       if (dev->render)
-               drm_put_minor(&dev->render);
+       dev->dev = parent;
+       dev->driver = driver;
+
+       INIT_LIST_HEAD(&dev->filelist);
+       INIT_LIST_HEAD(&dev->ctxlist);
+       INIT_LIST_HEAD(&dev->vmalist);
+       INIT_LIST_HEAD(&dev->maplist);
+       INIT_LIST_HEAD(&dev->vblank_event_list);
+
+       spin_lock_init(&dev->count_lock);
+       spin_lock_init(&dev->event_lock);
+       mutex_init(&dev->struct_mutex);
+       mutex_init(&dev->ctxlist_mutex);
+
+       if (drm_ht_create(&dev->map_hash, 12))
+               goto err_free;
 
-       if (driver->driver_features & DRIVER_GEM)
+       ret = drm_ctxbitmap_init(dev);
+       if (ret) {
+               DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+               goto err_ht;
+       }
+
+       if (driver->driver_features & DRIVER_GEM) {
+               ret = drm_gem_init(dev);
+               if (ret) {
+                       DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
+                       goto err_ctxbitmap;
+               }
+       }
+
+       return dev;
+
+err_ctxbitmap:
+       drm_ctxbitmap_cleanup(dev);
+err_ht:
+       drm_ht_remove(&dev->map_hash);
+err_free:
+       kfree(dev);
+       return NULL;
+}
+EXPORT_SYMBOL(drm_dev_alloc);
+
+/**
+ * drm_dev_free - Free DRM device
+ * @dev: DRM device to free
+ *
+ * Free a DRM device that has previously been allocated via drm_dev_alloc().
+ * You must not use kfree() instead or you will leak memory.
+ *
+ * This must not be called once the device got registered. Use drm_put_dev()
+ * instead, which then calls drm_dev_free().
+ */
+void drm_dev_free(struct drm_device *dev)
+{
+       if (dev->driver->driver_features & DRIVER_GEM)
                drm_gem_destroy(dev);
 
-       drm_put_minor(&dev->primary);
+       drm_ctxbitmap_cleanup(dev);
+       drm_ht_remove(&dev->map_hash);
 
-       list_del(&dev->driver_item);
        kfree(dev->devname);
        kfree(dev);
 }
-EXPORT_SYMBOL(drm_put_dev);
+EXPORT_SYMBOL(drm_dev_free);
 
-void drm_unplug_dev(struct drm_device *dev)
+/**
+ * drm_dev_register - Register DRM device
+ * @dev: Device to register
+ *
+ * Register the DRM device @dev with the system, advertise device to user-space
+ * and start normal device operation. @dev must be allocated via drm_dev_alloc()
+ * previously.
+ *
+ * Never call this twice on any device!
+ *
+ * RETURNS:
+ * 0 on success, negative error code on failure.
+ */
+int drm_dev_register(struct drm_device *dev, unsigned long flags)
 {
-       /* for a USB device */
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               drm_unplug_minor(dev->control);
-       if (dev->render)
-               drm_unplug_minor(dev->render);
-       drm_unplug_minor(dev->primary);
+       int ret;
 
        mutex_lock(&drm_global_mutex);
 
-       drm_device_set_unplugged(dev);
+       if (dev->driver->bus->agp_init) {
+               ret = dev->driver->bus->agp_init(dev);
+               if (ret)
+                       goto out_unlock;
+       }
 
-       if (dev->open_count == 0) {
-               drm_put_dev(dev);
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+               if (ret)
+                       goto err_agp;
        }
+
+       if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
+               ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
+               if (ret)
+                       goto err_control_node;
+       }
+
+       ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+       if (ret)
+               goto err_render_node;
+
+       if (dev->driver->load) {
+               ret = dev->driver->load(dev, flags);
+               if (ret)
+                       goto err_primary_node;
+       }
+
+       /* setup grouping for legacy outputs */
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               ret = drm_mode_group_init_legacy_group(dev,
+                               &dev->primary->mode_group);
+               if (ret)
+                       goto err_unload;
+       }
+
+       list_add_tail(&dev->driver_item, &dev->driver->device_list);
+
+       ret = 0;
+       goto out_unlock;
+
+err_unload:
+       if (dev->driver->unload)
+               dev->driver->unload(dev);
+err_primary_node:
+       drm_put_minor(&dev->primary);
+err_render_node:
+       if (dev->render)
+               drm_put_minor(&dev->render);
+err_control_node:
+       if (dev->control)
+               drm_put_minor(&dev->control);
+err_agp:
+       if (dev->driver->bus->agp_destroy)
+               dev->driver->bus->agp_destroy(dev);
+out_unlock:
        mutex_unlock(&drm_global_mutex);
+       return ret;
 }
-EXPORT_SYMBOL(drm_unplug_dev);
+EXPORT_SYMBOL(drm_dev_register);
+
+/**
+ * drm_dev_unregister - Unregister DRM device
+ * @dev: Device to unregister
+ *
+ * Unregister the DRM device from the system. This does the reverse of
+ * drm_dev_register() but does not deallocate the device. The caller must call
+ * drm_dev_free() to free all resources.
+ */
+void drm_dev_unregister(struct drm_device *dev)
+{
+       struct drm_map_list *r_list, *list_temp;
+
+       drm_lastclose(dev);
+
+       if (dev->driver->unload)
+               dev->driver->unload(dev);
+
+       if (dev->driver->bus->agp_destroy)
+               dev->driver->bus->agp_destroy(dev);
+
+       drm_vblank_cleanup(dev);
+
+       list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
+               drm_rmmap(dev, r_list->map);
+
+       if (dev->control)
+               drm_put_minor(&dev->control);
+       if (dev->render)
+               drm_put_minor(&dev->render);
+       drm_put_minor(&dev->primary);
+
+       list_del(&dev->driver_item);
+}
+EXPORT_SYMBOL(drm_dev_unregister);
index 2290b3b7383247a6dde176d71ee1e6fe18d7b4c7..dae42c79154f3de596a06c95d1e996750fe6e35c 100644 (file)
@@ -22,8 +22,8 @@
 #include <drm/drm_core.h>
 #include <drm/drmP.h>
 
-#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
-#define to_drm_connector(d) container_of(d, struct drm_connector, kdev)
+#define to_drm_minor(d) dev_get_drvdata(d)
+#define to_drm_connector(d) dev_get_drvdata(d)
 
 static struct device_type drm_sysfs_device_minor = {
        .name = "drm_minor"
@@ -162,20 +162,6 @@ void drm_sysfs_destroy(void)
        drm_class = NULL;
 }
 
-/**
- * drm_sysfs_device_release - do nothing
- * @dev: Linux device
- *
- * Normally, this would free the DRM device associated with @dev, along
- * with cleaning up any other stuff.  But we do that in the DRM core, so
- * this function can just return and hope that the core does its job.
- */
-static void drm_sysfs_device_release(struct device *dev)
-{
-       memset(dev, 0, sizeof(struct device));
-       return;
-}
-
 /*
  * Connector properties
  */
@@ -394,29 +380,26 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
        int i;
        int ret;
 
-       /* We shouldn't get called more than once for the same connector */
-       BUG_ON(device_is_registered(&connector->kdev));
-
-       connector->kdev.parent = &dev->primary->kdev;
-       connector->kdev.class = drm_class;
-       connector->kdev.release = drm_sysfs_device_release;
+       if (connector->kdev)
+               return 0;
 
+       /* We shouldn't get called more than once for the same connector */
+       connector->kdev = device_create(drm_class, dev->primary->kdev,
+                                       0, connector, "card%d-%s",
+                                       dev->primary->index, drm_get_connector_name(connector));
        DRM_DEBUG("adding \"%s\" to sysfs\n",
                  drm_get_connector_name(connector));
 
-       dev_set_name(&connector->kdev, "card%d-%s",
-                    dev->primary->index, drm_get_connector_name(connector));
-       ret = device_register(&connector->kdev);
-
-       if (ret) {
-               DRM_ERROR("failed to register connector device: %d\n", ret);
+       if (IS_ERR(connector->kdev)) {
+               DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
+               ret = PTR_ERR(connector->kdev);
                goto out;
        }
 
        /* Standard attributes */
 
        for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
-               ret = device_create_file(&connector->kdev, &connector_attrs[attr_cnt]);
+               ret = device_create_file(connector->kdev, &connector_attrs[attr_cnt]);
                if (ret)
                        goto err_out_files;
        }
@@ -433,7 +416,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
                case DRM_MODE_CONNECTOR_Component:
                case DRM_MODE_CONNECTOR_TV:
                        for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
-                               ret = device_create_file(&connector->kdev, &connector_attrs_opt1[opt_cnt]);
+                               ret = device_create_file(connector->kdev, &connector_attrs_opt1[opt_cnt]);
                                if (ret)
                                        goto err_out_files;
                        }
@@ -442,7 +425,7 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
                        break;
        }
 
-       ret = sysfs_create_bin_file(&connector->kdev.kobj, &edid_attr);
+       ret = sysfs_create_bin_file(&connector->kdev->kobj, &edid_attr);
        if (ret)
                goto err_out_files;
 
@@ -453,10 +436,11 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
 
 err_out_files:
        for (i = 0; i < opt_cnt; i++)
-               device_remove_file(&connector->kdev, &connector_attrs_opt1[i]);
+               device_remove_file(connector->kdev, &connector_attrs_opt1[i]);
        for (i = 0; i < attr_cnt; i++)
-               device_remove_file(&connector->kdev, &connector_attrs[i]);
-       device_unregister(&connector->kdev);
+               device_remove_file(connector->kdev, &connector_attrs[i]);
+       put_device(connector->kdev);
+       device_unregister(connector->kdev);
 
 out:
        return ret;
@@ -480,16 +464,17 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
 {
        int i;
 
-       if (!connector->kdev.parent)
+       if (!connector->kdev)
                return;
        DRM_DEBUG("removing \"%s\" from sysfs\n",
                  drm_get_connector_name(connector));
 
        for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
-               device_remove_file(&connector->kdev, &connector_attrs[i]);
-       sysfs_remove_bin_file(&connector->kdev.kobj, &edid_attr);
-       device_unregister(&connector->kdev);
-       connector->kdev.parent = NULL;
+               device_remove_file(connector->kdev, &connector_attrs[i]);
+       sysfs_remove_bin_file(&connector->kdev->kobj, &edid_attr);
+       put_device(connector->kdev);
+       device_unregister(connector->kdev);
+       connector->kdev = NULL;
 }
 EXPORT_SYMBOL(drm_sysfs_connector_remove);
 
@@ -508,7 +493,7 @@ void drm_sysfs_hotplug_event(struct drm_device *dev)
 
        DRM_DEBUG("generating hotplug event\n");
 
-       kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);
+       kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
 }
 EXPORT_SYMBOL(drm_sysfs_hotplug_event);
 
@@ -523,15 +508,8 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
  */
 int drm_sysfs_device_add(struct drm_minor *minor)
 {
-       int err;
        char *minor_str;
 
-       minor->kdev.parent = minor->dev->dev;
-
-       minor->kdev.class = drm_class;
-       minor->kdev.release = drm_sysfs_device_release;
-       minor->kdev.devt = minor->device;
-       minor->kdev.type = &drm_sysfs_device_minor;
        if (minor->type == DRM_MINOR_CONTROL)
                minor_str = "controlD%d";
         else if (minor->type == DRM_MINOR_RENDER)
@@ -539,18 +517,14 @@ int drm_sysfs_device_add(struct drm_minor *minor)
         else
                 minor_str = "card%d";
 
-       dev_set_name(&minor->kdev, minor_str, minor->index);
-
-       err = device_register(&minor->kdev);
-       if (err) {
-               DRM_ERROR("device add failed: %d\n", err);
-               goto err_out;
+       minor->kdev = device_create(drm_class, minor->dev->dev,
+                                   MKDEV(DRM_MAJOR, minor->index),
+                                   minor, minor_str, minor->index);
+       if (IS_ERR(minor->kdev)) {
+               DRM_ERROR("device create failed %ld\n", PTR_ERR(minor->kdev));
+               return PTR_ERR(minor->kdev);
        }
-
        return 0;
-
-err_out:
-       return err;
 }
 
 /**
@@ -562,9 +536,9 @@ err_out:
  */
 void drm_sysfs_device_remove(struct drm_minor *minor)
 {
-       if (minor->kdev.parent)
-               device_unregister(&minor->kdev);
-       minor->kdev.parent = NULL;
+       if (minor->kdev)
+               device_destroy(drm_class, MKDEV(DRM_MAJOR, minor->index));
+       minor->kdev = NULL;
 }
 
 
index 87664723b9ceada4019289a8f6508e0b0facc4f9..b179b70e7853b4bb4eb69c9a97a57295ae26a084 100644 (file)
@@ -7,57 +7,20 @@ int drm_get_usb_dev(struct usb_interface *interface,
                    struct drm_driver *driver)
 {
        struct drm_device *dev;
-       struct usb_device *usbdev;
        int ret;
 
        DRM_DEBUG("\n");
 
-       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       dev = drm_dev_alloc(driver, &interface->dev);
        if (!dev)
                return -ENOMEM;
 
-       usbdev = interface_to_usbdev(interface);
-       dev->usbdev = usbdev;
-       dev->dev = &interface->dev;
-
-       mutex_lock(&drm_global_mutex);
-
-       ret = drm_fill_in_dev(dev, NULL, driver);
-       if (ret) {
-               printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
-               goto err_g1;
-       }
-
+       dev->usbdev = interface_to_usbdev(interface);
        usb_set_intfdata(interface, dev);
-       ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
-       if (ret)
-               goto err_g1;
-
-       if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
-               ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
-               if (ret)
-                       goto err_g11;
-       }
 
-       ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+       ret = drm_dev_register(dev, 0);
        if (ret)
-               goto err_g2;
-
-       if (dev->driver->load) {
-               ret = dev->driver->load(dev, 0);
-               if (ret)
-                       goto err_g3;
-       }
-
-       /* setup the grouping for the legacy output */
-       ret = drm_mode_group_init_legacy_group(dev,
-                                              &dev->primary->mode_group);
-       if (ret)
-               goto err_g3;
-
-       list_add_tail(&dev->driver_item, &driver->device_list);
-
-       mutex_unlock(&drm_global_mutex);
+               goto err_free;
 
        DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
                 driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -65,16 +28,8 @@ int drm_get_usb_dev(struct usb_interface *interface,
 
        return 0;
 
-err_g3:
-       drm_put_minor(&dev->primary);
-err_g2:
-       if (dev->render)
-               drm_put_minor(&dev->render);
-err_g11:
-       drm_put_minor(&dev->control);
-err_g1:
-       kfree(dev);
-       mutex_unlock(&drm_global_mutex);
+err_free:
+       drm_dev_free(dev);
        return ret;
 
 }
index 45b6ef595965b7cb7391125d8b3a4caf2703b2b5..f227f544aa36f2104df33bbeacb5a8dc1e18bbd2 100644 (file)
@@ -2,6 +2,7 @@ config DRM_EXYNOS
        tristate "DRM Support for Samsung SoC EXYNOS Series"
        depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
        select DRM_KMS_HELPER
+       select DRM_KMS_FB_HELPER
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
index bb82ef78ca851101458a76178656a6f5646137b1..b676006a95a0118951f8e12d04f8a6fff0c49fb0 100644 (file)
@@ -264,7 +264,6 @@ static struct drm_driver exynos_drm_driver = {
        .get_vblank_counter     = drm_vblank_count,
        .enable_vblank          = exynos_drm_crtc_enable_vblank,
        .disable_vblank         = exynos_drm_crtc_disable_vblank,
-       .gem_init_object        = exynos_drm_gem_init_object,
        .gem_free_object        = exynos_drm_gem_free_object,
        .gem_vm_ops             = &exynos_drm_gem_vm_ops,
        .dumb_create            = exynos_drm_gem_dumb_create,
@@ -286,7 +285,11 @@ static struct drm_driver exynos_drm_driver = {
 
 static int exynos_drm_platform_probe(struct platform_device *pdev)
 {
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       int ret;
+
+       ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        return drm_platform_init(&exynos_drm_driver, pdev);
 }
index 868a14d529956545204ac34d29083c1fa60dbd10..23da72b5eae98b0bae1e400ea28d74ed5ee7e472 100644 (file)
@@ -716,20 +716,20 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
 {
        /*
         * enable drm irq mode.
-        * - with irq_enabled = 1, we can use the vblank feature.
+        * - with irq_enabled = true, we can use the vblank feature.
         *
         * P.S. note that we wouldn't use drm irq handler but
         *      just specific driver own one instead because
         *      drm framework supports only one irq handler.
         */
-       drm_dev->irq_enabled = 1;
+       drm_dev->irq_enabled = true;
 
        /*
-        * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+        * with vblank_disable_allowed = true, vblank interrupt will be disabled
         * by drm timer once a current process gives up ownership of
         * vblank event.(after drm_vblank_put function is called)
         */
-       drm_dev->vblank_disable_allowed = 1;
+       drm_dev->vblank_disable_allowed = true;
 
        /* attach this sub driver to iommu mapping if supported. */
        if (is_drm_iommu_supported(drm_dev))
index 49f9cd2327575327d0b86183c1fea4e829292374..1ade191d84f4d6514fd16871cb54cbdc3208d925 100644 (file)
@@ -630,11 +630,6 @@ void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
        dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
 }
 
-int exynos_drm_gem_init_object(struct drm_gem_object *obj)
-{
-       return 0;
-}
-
 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
 {
        struct exynos_drm_gem_obj *exynos_gem_obj;
index 09555afdfe9c6bf94fd798d2e899d4e46a1e8042..702ec3abe85cc88eb74aa2e7dc99b5224c175dcf 100644 (file)
@@ -135,9 +135,6 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
                                                unsigned int gem_handle,
                                                struct drm_file *file_priv);
 
-/* initialize gem object. */
-int exynos_drm_gem_init_object(struct drm_gem_object *obj);
-
 /* free gem object. */
 void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
 
index 4400330e4449f57268efc7b961c4025795178ef7..ddaaedde173d0814b5823e628bc1f61e07eae8d8 100644 (file)
@@ -101,7 +101,6 @@ static struct edid *vidi_get_edid(struct device *dev,
 {
        struct vidi_context *ctx = get_vidi_context(dev);
        struct edid *edid;
-       int edid_len;
 
        /*
         * the edid data comes from user side and it would be set
@@ -112,8 +111,7 @@ static struct edid *vidi_get_edid(struct device *dev,
                return ERR_PTR(-EFAULT);
        }
 
-       edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
-       edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
+       edid = drm_edid_duplicate(ctx->raw_edid);
        if (!edid) {
                DRM_DEBUG_KMS("failed to allocate edid\n");
                return ERR_PTR(-ENOMEM);
@@ -385,20 +383,20 @@ static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
 {
        /*
         * enable drm irq mode.
-        * - with irq_enabled = 1, we can use the vblank feature.
+        * - with irq_enabled = true, we can use the vblank feature.
         *
         * P.S. note that we wouldn't use drm irq handler but
         *      just specific driver own one instead because
         *      drm framework supports only one irq handler.
         */
-       drm_dev->irq_enabled = 1;
+       drm_dev->irq_enabled = true;
 
        /*
-        * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+        * with vblank_disable_allowed = true, vblank interrupt will be disabled
         * by drm timer once a current process gives up ownership of
         * vblank event.(after drm_vblank_put function is called)
         */
-       drm_dev->vblank_disable_allowed = 1;
+       drm_dev->vblank_disable_allowed = true;
 
        return 0;
 }
@@ -485,7 +483,6 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
        struct exynos_drm_manager *manager;
        struct exynos_drm_display_ops *display_ops;
        struct drm_exynos_vidi_connection *vidi = data;
-       int edid_len;
 
        if (!vidi) {
                DRM_DEBUG_KMS("user data for vidi is null.\n");
@@ -524,8 +521,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
                        DRM_DEBUG_KMS("edid data is invalid.\n");
                        return -EINVAL;
                }
-               edid_len = (1 + raw_edid->extensions) * EDID_LENGTH;
-               ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL);
+               ctx->raw_edid = drm_edid_duplicate(raw_edid);
                if (!ctx->raw_edid) {
                        DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
                        return -ENOMEM;
index 1f6e2dfaaeaef7f557f4ad74ed45530dd28cd2c6..508cf99a292df18c569cc9ffd3466e8b598629e0 100644 (file)
@@ -5,6 +5,7 @@ config DRM_GMA500
        select FB_CFB_FILLRECT
        select FB_CFB_IMAGEBLIT
        select DRM_KMS_HELPER
+       select DRM_KMS_FB_HELPER
        select DRM_TTM
        # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
        select ACPI_VIDEO if ACPI
index f4eb43573cada7a7a5d2af753a8dcc4a29c898e8..f88a1815d87c4e795492ead6ef1c4c07a4d31b89 100644 (file)
@@ -666,7 +666,7 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector,
        strncpy (intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
        intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
        intel_dp->adapter.algo_data = &intel_dp->algo;
-       intel_dp->adapter.dev.parent = &connector->base.kdev;
+       intel_dp->adapter.dev.parent = connector->base.kdev;
 
        if (is_edp(encoder))
                cdv_intel_edp_panel_vdd_on(encoder);
index 10ae8c52d06f820b5a1ea5bfb9257ca4ee2d40b7..e2db48a81ed0147738b27baaa76d663b29cc0f1a 100644 (file)
 #include <drm/drm_vma_manager.h>
 #include "psb_drv.h"
 
-int psb_gem_init_object(struct drm_gem_object *obj)
-{
-       return -EINVAL;
-}
-
 void psb_gem_free_object(struct drm_gem_object *obj)
 {
        struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
index 92babac362ec0b85b6f1e52f05a6402411b43d1e..2db731f00930d7b28d85e124744423c8a37dd8b4 100644 (file)
@@ -204,6 +204,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
        if (IS_ERR(pages))
                return PTR_ERR(pages);
 
+       gt->npage = gt->gem.size / PAGE_SIZE;
        gt->pages = pages;
 
        return 0;
index fcb4e9ff1f20ac52215c8eed147a78a843586cb4..dd607f820a26f42475e7f84e6e1656e1c5693d8c 100644 (file)
@@ -359,7 +359,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
 
        drm_irq_install(dev);
 
-       dev->vblank_disable_allowed = 1;
+       dev->vblank_disable_allowed = true;
 
        dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 
@@ -646,7 +646,6 @@ static struct drm_driver driver = {
        .preclose = psb_driver_preclose,
        .postclose = psb_driver_close,
 
-       .gem_init_object = psb_gem_init_object,
        .gem_free_object = psb_gem_free_object,
        .gem_vm_ops = &psb_gem_vm_ops,
        .dumb_create = psb_gem_dumb_create,
index 4535ac7708f8a4050ed439572901c696a1db37b8..0bab46bd73d234d6757237eea9a912a603f8ec4a 100644 (file)
@@ -44,10 +44,10 @@ enum {
        CHIP_MFLD_0130 = 3,             /* Medfield */
 };
 
-#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
-#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
-#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
-#define IS_CDV(dev) (((dev)->pci_device & 0xfff0) == 0x0be0)
+#define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108)
+#define IS_MRST(dev) (((dev)->pdev->device & 0xfffc) == 0x4100)
+#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130)
+#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0)
 
 /*
  * Driver definitions
@@ -837,7 +837,6 @@ extern const struct drm_connector_helper_funcs
 extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
 
 /* gem.c */
-extern int psb_gem_init_object(struct drm_gem_object *obj);
 extern void psb_gem_free_object(struct drm_gem_object *obj);
 extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
                        struct drm_file *file);
index 029eccf3013784ccb588c0bc139c917533417d9e..ba4830342d3450593bbc1f49a5a6fded4419c510 100644 (file)
@@ -271,15 +271,15 @@ void psb_irq_preinstall(struct drm_device *dev)
 
        if (gma_power_is_on(dev))
                PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
-       if (dev->vblank_enabled[0])
+       if (dev->vblank[0].enabled)
                dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
-       if (dev->vblank_enabled[1])
+       if (dev->vblank[1].enabled)
                dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
 
        /* FIXME: Handle Medfield irq mask
-       if (dev->vblank_enabled[1])
+       if (dev->vblank[1].enabled)
                dev_priv->vdc_irq_mask |= _MDFLD_PIPEB_EVENT_FLAG;
-       if (dev->vblank_enabled[2])
+       if (dev->vblank[2].enabled)
                dev_priv->vdc_irq_mask |= _MDFLD_PIPEC_EVENT_FLAG;
        */
 
@@ -305,17 +305,17 @@ int psb_irq_postinstall(struct drm_device *dev)
        PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
        PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
 
-       if (dev->vblank_enabled[0])
+       if (dev->vblank[0].enabled)
                psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
        else
                psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
 
-       if (dev->vblank_enabled[1])
+       if (dev->vblank[1].enabled)
                psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
        else
                psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
 
-       if (dev->vblank_enabled[2])
+       if (dev->vblank[2].enabled)
                psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
        else
                psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
@@ -339,13 +339,13 @@ void psb_irq_uninstall(struct drm_device *dev)
 
        PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
 
-       if (dev->vblank_enabled[0])
+       if (dev->vblank[0].enabled)
                psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
 
-       if (dev->vblank_enabled[1])
+       if (dev->vblank[1].enabled)
                psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
 
-       if (dev->vblank_enabled[2])
+       if (dev->vblank[2].enabled)
                psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
 
        dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
@@ -456,7 +456,7 @@ static int psb_vblank_do_wait(struct drm_device *dev,
 {
        unsigned int cur_vblank;
        int ret = 0;
-       DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+       DRM_WAIT_ON(ret, dev->vblank.queue, 3 * DRM_HZ,
                    (((cur_vblank = atomic_read(counter))
                      - *sequence) <= (1 << 23)));
        *sequence = cur_vblank;
index 60e84043aa348fe3ec4293507edeb6b9477e3dc2..400b0c4a10fba3138bbb2fc4cc260058be62cffc 100644 (file)
@@ -17,6 +17,7 @@
 
 
 
+#include <linux/hdmi.h>
 #include <linux/module.h>
 
 #include <drm/drmP.h>
@@ -549,6 +550,8 @@ tda998x_write_avi(struct drm_encoder *encoder, struct drm_display_mode *mode)
        buf[HB(0)] = 0x82;
        buf[HB(1)] = 0x02;
        buf[HB(2)] = 13;
+       buf[PB(1)] = HDMI_SCAN_MODE_UNDERSCAN;
+       buf[PB(3)] = HDMI_QUANTIZATION_RANGE_FULL << 2;
        buf[PB(4)] = drm_match_cea_mode(mode);
 
        tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
index ab1892eb10740fa2b3cc46fe7cba976940b6d186..249fdff305c63b50fd72c2f62a499eb2a0d3e576 100644 (file)
@@ -944,8 +944,6 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
                                 dma->buflist[vertex->idx],
                                 vertex->discard, vertex->used);
 
-       atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
-       atomic_inc(&dev->counts[_DRM_STAT_DMA]);
        sarea_priv->last_enqueue = dev_priv->counter - 1;
        sarea_priv->last_dispatch = (int)hw_status[5];
 
@@ -1105,8 +1103,6 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
        i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
                             mc->last_render);
 
-       atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
-       atomic_inc(&dev->counts[_DRM_STAT_DMA]);
        sarea_priv->last_enqueue = dev_priv->counter - 1;
        sarea_priv->last_dispatch = (int)hw_status[5];
 
@@ -1197,13 +1193,6 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,
 
 int i810_driver_load(struct drm_device *dev, unsigned long flags)
 {
-       /* i810 has 4 more counters */
-       dev->counters += 4;
-       dev->types[6] = _DRM_STAT_IRQ;
-       dev->types[7] = _DRM_STAT_PRIMARY;
-       dev->types[8] = _DRM_STAT_SECONDARY;
-       dev->types[9] = _DRM_STAT_DMA;
-
        pci_set_master(dev->pdev);
 
        return 0;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
new file mode 100644 (file)
index 0000000..6199d0b
--- /dev/null
@@ -0,0 +1,67 @@
+config DRM_I915
+       tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
+       depends on DRM
+       depends on AGP
+       depends on AGP_INTEL
+       # we need shmfs for the swappable backing store, and in particular
+       # the shmem_readpage() which depends upon tmpfs
+       select SHMEM
+       select TMPFS
+       select DRM_KMS_HELPER
+       # i915 depends on ACPI_VIDEO when ACPI is enabled
+       # but for select to work, need to select ACPI_VIDEO's dependencies, ick
+       select BACKLIGHT_LCD_SUPPORT if ACPI
+       select BACKLIGHT_CLASS_DEVICE if ACPI
+       select VIDEO_OUTPUT_CONTROL if ACPI
+       select INPUT if ACPI
+       select ACPI_VIDEO if ACPI
+       select ACPI_BUTTON if ACPI
+       help
+         Choose this option if you have a system that has "Intel Graphics
+         Media Accelerator" or "HD Graphics" integrated graphics,
+         including 830M, 845G, 852GM, 855GM, 865G, 915G, 945G, 965G,
+         G35, G41, G43, G45 chipsets and Celeron, Pentium, Core i3,
+         Core i5, Core i7 as well as Atom CPUs with integrated graphics.
+         If M is selected, the module will be called i915.  AGP support
+         is required for this driver to work. This driver is used by
+         the Intel driver in X.org 6.8 and XFree86 4.4 and above. It
+         replaces the older i830 module that supported a subset of the
+         hardware in older X.org releases.
+
+         Note that the older i810/i815 chipsets require the use of the
+         i810 driver instead, and the Atom z5xx series has an entirely
+         different implementation.
+
+config DRM_I915_KMS
+       bool "Enable modesetting on intel by default"
+       depends on DRM_I915
+       help
+         Choose this option if you want kernel modesetting enabled by default,
+         and you have a new enough userspace to support this. Running old
+         userspaces with this enabled will cause pain.  Note that this causes
+         the driver to bind to PCI devices, which precludes loading things
+         like intelfb.
+
+config DRM_I915_FBDEV
+       bool "Enable legacy fbdev support for the modesettting intel driver"
+       depends on DRM_I915
+       select DRM_KMS_FB_HELPER
+       select FB_CFB_FILLRECT
+       select FB_CFB_COPYAREA
+       select FB_CFB_IMAGEBLIT
+       default y
+       help
+         Choose this option if you have a need for the legacy fbdev
+         support. Note that this support also provide the linux console
+         support on top of the intel modesetting driver.
+
+config DRM_I915_PRELIMINARY_HW_SUPPORT
+       bool "Enable preliminary support for prerelease Intel hardware by default"
+       depends on DRM_I915
+       help
+         Choose this option if you have prerelease Intel hardware and want the
+         i915 driver to support it by default.  You can enable such support at
+         runtime with the module option i915.preliminary_hw_support=1; this
+         option changes the default for that module option.
+
+         If in doubt, say "N".
index b8449a84a0dcab83295696322d718c49924d3f4a..41838eaa799c5afcb7510bae31118099d4d848a3 100644 (file)
@@ -21,6 +21,9 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
          intel_display.o \
          intel_crt.o \
          intel_lvds.o \
+         intel_dsi.o \
+         intel_dsi_cmd.o \
+         intel_dsi_pll.o \
          intel_bios.o \
          intel_ddi.o \
          intel_dp.o \
@@ -30,7 +33,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
          intel_panel.o \
          intel_pm.o \
          intel_i2c.o \
-         intel_fb.o \
          intel_tv.o \
          intel_dvo.o \
          intel_ringbuffer.o \
@@ -51,6 +53,8 @@ i915-$(CONFIG_COMPAT)   += i915_ioc32.o
 
 i915-$(CONFIG_ACPI)    += intel_acpi.o
 
+i915-$(CONFIG_DRM_I915_FBDEV) += intel_fbdev.o
+
 obj-$(CONFIG_DRM_I915)  += i915.o
 
 CFLAGS_i915_trace_points.o := -I$(src)
index 33a62ad80100f8912e7e56f46a06c147e5203736..312163379db9e1ecdb064d6d3cc0e8a885b726c6 100644 (file)
@@ -76,17 +76,6 @@ struct intel_dvo_dev_ops {
        int (*mode_valid)(struct intel_dvo_device *dvo,
                          struct drm_display_mode *mode);
 
-       /*
-        * Callback to adjust the mode to be set in the CRTC.
-        *
-        * This allows an output to adjust the clock or even the entire set of
-        * timings, which is used for panels with fixed timings or for
-        * buses with clock limitations.
-        */
-       bool (*mode_fixup)(struct intel_dvo_device *dvo,
-                          const struct drm_display_mode *mode,
-                          struct drm_display_mode *adjusted_mode);
-
        /*
         * Callback for preparing mode changes on an output
         */
index a6f4cb5af18529308096b827bd50b68b94181f79..5c45e9e598deb0ef88b376e378ea2628ea4a6b0a 100644 (file)
@@ -27,6 +27,8 @@
  */
 
 #include <linux/seq_file.h>
+#include <linux/circ_buf.h>
+#include <linux/ctype.h>
 #include <linux/debugfs.h>
 #include <linux/slab.h>
 #include <linux/export.h>
@@ -38,9 +40,6 @@
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-#define DRM_I915_RING_DEBUG 1
-
-
 #if defined(CONFIG_DEBUG_FS)
 
 enum {
@@ -54,6 +53,32 @@ static const char *yesno(int v)
        return v ? "yes" : "no";
 }
 
+/* As the drm_debugfs_init() routines are called before dev->dev_private is
+ * allocated we need to hook into the minor for release. */
+static int
+drm_add_fake_info_node(struct drm_minor *minor,
+                      struct dentry *ent,
+                      const void *key)
+{
+       struct drm_info_node *node;
+
+       node = kmalloc(sizeof(*node), GFP_KERNEL);
+       if (node == NULL) {
+               debugfs_remove(ent);
+               return -ENOMEM;
+       }
+
+       node->minor = minor;
+       node->dent = ent;
+       node->info_ent = (void *) key;
+
+       mutex_lock(&minor->debugfs_lock);
+       list_add(&node->list, &minor->debugfs_list);
+       mutex_unlock(&minor->debugfs_lock);
+
+       return 0;
+}
+
 static int i915_capabilities(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -145,6 +170,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
                seq_printf(m, " (%s)", obj->ring->name);
 }
 
+static void describe_ctx(struct seq_file *m, struct i915_hw_context *ctx)
+{
+       seq_putc(m, ctx->is_initialized ? 'I' : 'i');
+       seq_putc(m, ctx->remap_slice ? 'R' : 'r');
+       seq_putc(m, ' ');
+}
+
 static int i915_gem_object_list_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -843,6 +875,8 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        if (IS_GEN5(dev)) {
                u16 rgvswctl = I915_READ16(MEMSWCTL);
                u16 rgvstat = I915_READ16(MEMSTAT_ILK);
@@ -1321,6 +1355,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
                return 0;
        }
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
                return ret;
@@ -1395,12 +1431,12 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
 {
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
-       drm_i915_private_t *dev_priv = dev->dev_private;
-       struct intel_fbdev *ifbdev;
+       struct intel_fbdev *ifbdev = NULL;
        struct intel_framebuffer *fb;
-       int ret;
 
-       ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+#ifdef CONFIG_DRM_I915_FBDEV
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret = mutex_lock_interruptible(&dev->mode_config.mutex);
        if (ret)
                return ret;
 
@@ -1416,10 +1452,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
        describe_obj(m, fb->obj);
        seq_putc(m, '\n');
        mutex_unlock(&dev->mode_config.mutex);
+#endif
 
        mutex_lock(&dev->mode_config.fb_lock);
        list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
-               if (&fb->base == ifbdev->helper.fb)
+               if (ifbdev && &fb->base == ifbdev->helper.fb)
                        continue;
 
                seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
@@ -1442,6 +1479,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring;
+       struct i915_hw_context *ctx;
        int ret, i;
 
        ret = mutex_lock_interruptible(&dev->mode_config.mutex);
@@ -1460,12 +1498,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
                seq_putc(m, '\n');
        }
 
-       for_each_ring(ring, dev_priv, i) {
-               if (ring->default_context) {
-                       seq_printf(m, "HW default context %s ring ", ring->name);
-                       describe_obj(m, ring->default_context->obj);
-                       seq_putc(m, '\n');
-               }
+       list_for_each_entry(ctx, &dev_priv->context_list, link) {
+               seq_puts(m, "HW context ");
+               describe_ctx(m, ctx);
+               for_each_ring(ring, dev_priv, i)
+                       if (ring->default_context == ctx)
+                               seq_printf(m, "(default context %s) ", ring->name);
+
+               describe_obj(m, ctx->obj);
+               seq_putc(m, '\n');
        }
 
        mutex_unlock(&dev->mode_config.mutex);
@@ -1610,27 +1651,27 @@ static int i915_dpio_info(struct seq_file *m, void *data)
        seq_printf(m, "DPIO_CTL: 0x%08x\n", I915_READ(DPIO_CTL));
 
        seq_printf(m, "DPIO_DIV_A: 0x%08x\n",
-                  vlv_dpio_read(dev_priv, _DPIO_DIV_A));
+                  vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_A));
        seq_printf(m, "DPIO_DIV_B: 0x%08x\n",
-                  vlv_dpio_read(dev_priv, _DPIO_DIV_B));
+                  vlv_dpio_read(dev_priv, PIPE_A, _DPIO_DIV_B));
 
        seq_printf(m, "DPIO_REFSFR_A: 0x%08x\n",
-                  vlv_dpio_read(dev_priv, _DPIO_REFSFR_A));
+                  vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_A));
        seq_printf(m, "DPIO_REFSFR_B: 0x%08x\n",
-                  vlv_dpio_read(dev_priv, _DPIO_REFSFR_B));
+                  vlv_dpio_read(dev_priv, PIPE_A, _DPIO_REFSFR_B));
 
        seq_printf(m, "DPIO_CORE_CLK_A: 0x%08x\n",
-                  vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_A));
+                  vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_A));
        seq_printf(m, "DPIO_CORE_CLK_B: 0x%08x\n",
-                  vlv_dpio_read(dev_priv, _DPIO_CORE_CLK_B));
+                  vlv_dpio_read(dev_priv, PIPE_A, _DPIO_CORE_CLK_B));
 
        seq_printf(m, "DPIO_LPF_COEFF_A: 0x%08x\n",
-                  vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_A));
+                  vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_A));
        seq_printf(m, "DPIO_LPF_COEFF_B: 0x%08x\n",
-                  vlv_dpio_read(dev_priv, _DPIO_LPF_COEFF_B));
+                  vlv_dpio_read(dev_priv, PIPE_A, _DPIO_LPF_COEFF_B));
 
        seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
-                  vlv_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
+                  vlv_dpio_read(dev_priv, PIPE_A, DPIO_FASTCLK_DISABLE));
 
        mutex_unlock(&dev_priv->dpio_lock);
 
@@ -1655,126 +1696,20 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
        struct drm_info_node *node = m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 psrstat, psrperf;
+       u32 psrperf = 0;
+       bool enabled = false;
 
-       if (!IS_HASWELL(dev)) {
-               seq_puts(m, "PSR not supported on this platform\n");
-       } else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
-               seq_puts(m, "PSR enabled\n");
-       } else {
-               seq_puts(m, "PSR disabled: ");
-               switch (dev_priv->no_psr_reason) {
-               case PSR_NO_SOURCE:
-                       seq_puts(m, "not supported on this platform");
-                       break;
-               case PSR_NO_SINK:
-                       seq_puts(m, "not supported by panel");
-                       break;
-               case PSR_MODULE_PARAM:
-                       seq_puts(m, "disabled by flag");
-                       break;
-               case PSR_CRTC_NOT_ACTIVE:
-                       seq_puts(m, "crtc not active");
-                       break;
-               case PSR_PWR_WELL_ENABLED:
-                       seq_puts(m, "power well enabled");
-                       break;
-               case PSR_NOT_TILED:
-                       seq_puts(m, "not tiled");
-                       break;
-               case PSR_SPRITE_ENABLED:
-                       seq_puts(m, "sprite enabled");
-                       break;
-               case PSR_S3D_ENABLED:
-                       seq_puts(m, "stereo 3d enabled");
-                       break;
-               case PSR_INTERLACED_ENABLED:
-                       seq_puts(m, "interlaced enabled");
-                       break;
-               case PSR_HSW_NOT_DDIA:
-                       seq_puts(m, "HSW ties PSR to DDI A (eDP)");
-                       break;
-               default:
-                       seq_puts(m, "unknown reason");
-               }
-               seq_puts(m, "\n");
-               return 0;
-       }
-
-       psrstat = I915_READ(EDP_PSR_STATUS_CTL);
-
-       seq_puts(m, "PSR Current State: ");
-       switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
-       case EDP_PSR_STATUS_STATE_IDLE:
-               seq_puts(m, "Reset state\n");
-               break;
-       case EDP_PSR_STATUS_STATE_SRDONACK:
-               seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
-               break;
-       case EDP_PSR_STATUS_STATE_SRDENT:
-               seq_puts(m, "SRD entry\n");
-               break;
-       case EDP_PSR_STATUS_STATE_BUFOFF:
-               seq_puts(m, "Wait for buffer turn off\n");
-               break;
-       case EDP_PSR_STATUS_STATE_BUFON:
-               seq_puts(m, "Wait for buffer turn on\n");
-               break;
-       case EDP_PSR_STATUS_STATE_AUXACK:
-               seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
-               break;
-       case EDP_PSR_STATUS_STATE_SRDOFFACK:
-               seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
-               break;
-       default:
-               seq_puts(m, "Unknown\n");
-               break;
-       }
-
-       seq_puts(m, "Link Status: ");
-       switch (psrstat & EDP_PSR_STATUS_LINK_MASK) {
-       case EDP_PSR_STATUS_LINK_FULL_OFF:
-               seq_puts(m, "Link is fully off\n");
-               break;
-       case EDP_PSR_STATUS_LINK_FULL_ON:
-               seq_puts(m, "Link is fully on\n");
-               break;
-       case EDP_PSR_STATUS_LINK_STANDBY:
-               seq_puts(m, "Link is in standby\n");
-               break;
-       default:
-               seq_puts(m, "Unknown\n");
-               break;
-       }
-
-       seq_printf(m, "PSR Entry Count: %u\n",
-                  psrstat >> EDP_PSR_STATUS_COUNT_SHIFT &
-                  EDP_PSR_STATUS_COUNT_MASK);
-
-       seq_printf(m, "Max Sleep Timer Counter: %u\n",
-                  psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT &
-                  EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK);
-
-       seq_printf(m, "Had AUX error: %s\n",
-                  yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR));
+       seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
+       seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
 
-       seq_printf(m, "Sending AUX: %s\n",
-                  yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING));
+       enabled = HAS_PSR(dev) &&
+               I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
+       seq_printf(m, "Enabled: %s\n", yesno(enabled));
 
-       seq_printf(m, "Sending Idle: %s\n",
-                  yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE));
-
-       seq_printf(m, "Sending TP2 TP3: %s\n",
-                  yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3));
-
-       seq_printf(m, "Sending TP1: %s\n",
-                  yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1));
-
-       seq_printf(m, "Idle Count: %u\n",
-                  psrstat & EDP_PSR_STATUS_IDLE_MASK);
-
-       psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK;
-       seq_printf(m, "Performance Counter: %u\n", psrperf);
+       if (HAS_PSR(dev))
+               psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) &
+                       EDP_PSR_PERF_CNT_MASK;
+       seq_printf(m, "Performance_Counter: %u\n", psrperf);
 
        return 0;
 }
@@ -1825,6 +1760,577 @@ static int i915_pc8_status(struct seq_file *m, void *unused)
        return 0;
 }
 
+struct pipe_crc_info {
+       const char *name;
+       struct drm_device *dev;
+       enum pipe pipe;
+};
+
+static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
+{
+       struct pipe_crc_info *info = inode->i_private;
+       struct drm_i915_private *dev_priv = info->dev->dev_private;
+       struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+
+       spin_lock_irq(&pipe_crc->lock);
+
+       if (pipe_crc->opened) {
+               spin_unlock_irq(&pipe_crc->lock);
+               return -EBUSY; /* already open */
+       }
+
+       pipe_crc->opened = true;
+       filep->private_data = inode->i_private;
+
+       spin_unlock_irq(&pipe_crc->lock);
+
+       return 0;
+}
+
+static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
+{
+       struct pipe_crc_info *info = inode->i_private;
+       struct drm_i915_private *dev_priv = info->dev->dev_private;
+       struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+
+       spin_lock_irq(&pipe_crc->lock);
+       pipe_crc->opened = false;
+       spin_unlock_irq(&pipe_crc->lock);
+
+       return 0;
+}
+
+/* (6 fields, 8 chars each, space separated (5) + '\n') */
+#define PIPE_CRC_LINE_LEN      (6 * 8 + 5 + 1)
+/* account for \'0' */
+#define PIPE_CRC_BUFFER_LEN    (PIPE_CRC_LINE_LEN + 1)
+
+static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
+{
+       assert_spin_locked(&pipe_crc->lock);
+       return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
+                       INTEL_PIPE_CRC_ENTRIES_NR);
+}
+
+static ssize_t
+i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
+                  loff_t *pos)
+{
+       struct pipe_crc_info *info = filep->private_data;
+       struct drm_device *dev = info->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
+       char buf[PIPE_CRC_BUFFER_LEN];
+       int head, tail, n_entries, n;
+       ssize_t bytes_read;
+
+       /*
+        * Don't allow user space to provide buffers not big enough to hold
+        * a line of data.
+        */
+       if (count < PIPE_CRC_LINE_LEN)
+               return -EINVAL;
+
+       if (pipe_crc->source == INTEL_PIPE_CRC_SOURCE_NONE)
+               return 0;
+
+       /* nothing to read */
+       spin_lock_irq(&pipe_crc->lock);
+       while (pipe_crc_data_count(pipe_crc) == 0) {
+               int ret;
+
+               if (filep->f_flags & O_NONBLOCK) {
+                       spin_unlock_irq(&pipe_crc->lock);
+                       return -EAGAIN;
+               }
+
+               ret = wait_event_interruptible_lock_irq(pipe_crc->wq,
+                               pipe_crc_data_count(pipe_crc), pipe_crc->lock);
+               if (ret) {
+                       spin_unlock_irq(&pipe_crc->lock);
+                       return ret;
+               }
+       }
+
+       /* We now have one or more entries to read */
+       head = pipe_crc->head;
+       tail = pipe_crc->tail;
+       n_entries = min((size_t)CIRC_CNT(head, tail, INTEL_PIPE_CRC_ENTRIES_NR),
+                       count / PIPE_CRC_LINE_LEN);
+       spin_unlock_irq(&pipe_crc->lock);
+
+       bytes_read = 0;
+       n = 0;
+       do {
+               struct intel_pipe_crc_entry *entry = &pipe_crc->entries[tail];
+               int ret;
+
+               bytes_read += snprintf(buf, PIPE_CRC_BUFFER_LEN,
+                                      "%8u %8x %8x %8x %8x %8x\n",
+                                      entry->frame, entry->crc[0],
+                                      entry->crc[1], entry->crc[2],
+                                      entry->crc[3], entry->crc[4]);
+
+               ret = copy_to_user(user_buf + n * PIPE_CRC_LINE_LEN,
+                                  buf, PIPE_CRC_LINE_LEN);
+               if (ret == PIPE_CRC_LINE_LEN)
+                       return -EFAULT;
+
+               BUILD_BUG_ON_NOT_POWER_OF_2(INTEL_PIPE_CRC_ENTRIES_NR);
+               tail = (tail + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+               n++;
+       } while (--n_entries);
+
+       spin_lock_irq(&pipe_crc->lock);
+       pipe_crc->tail = tail;
+       spin_unlock_irq(&pipe_crc->lock);
+
+       return bytes_read;
+}
+
+static const struct file_operations i915_pipe_crc_fops = {
+       .owner = THIS_MODULE,
+       .open = i915_pipe_crc_open,
+       .read = i915_pipe_crc_read,
+       .release = i915_pipe_crc_release,
+};
+
+static struct pipe_crc_info i915_pipe_crc_data[I915_MAX_PIPES] = {
+       {
+               .name = "i915_pipe_A_crc",
+               .pipe = PIPE_A,
+       },
+       {
+               .name = "i915_pipe_B_crc",
+               .pipe = PIPE_B,
+       },
+       {
+               .name = "i915_pipe_C_crc",
+               .pipe = PIPE_C,
+       },
+};
+
+static int i915_pipe_crc_create(struct dentry *root, struct drm_minor *minor,
+                               enum pipe pipe)
+{
+       struct drm_device *dev = minor->dev;
+       struct dentry *ent;
+       struct pipe_crc_info *info = &i915_pipe_crc_data[pipe];
+
+       info->dev = dev;
+       ent = debugfs_create_file(info->name, S_IRUGO, root, info,
+                                 &i915_pipe_crc_fops);
+       if (IS_ERR(ent))
+               return PTR_ERR(ent);
+
+       return drm_add_fake_info_node(minor, ent, info);
+}
+
+static const char * const pipe_crc_sources[] = {
+       "none",
+       "plane1",
+       "plane2",
+       "pf",
+       "pipe",
+       "TV",
+       "DP-B",
+       "DP-C",
+       "DP-D",
+};
+
+static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
+{
+       BUILD_BUG_ON(ARRAY_SIZE(pipe_crc_sources) != INTEL_PIPE_CRC_SOURCE_MAX);
+       return pipe_crc_sources[source];
+}
+
+static int display_crc_ctl_show(struct seq_file *m, void *data)
+{
+       struct drm_device *dev = m->private;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int i;
+
+       for (i = 0; i < I915_MAX_PIPES; i++)
+               seq_printf(m, "%c %s\n", pipe_name(i),
+                          pipe_crc_source_name(dev_priv->pipe_crc[i].source));
+
+       return 0;
+}
+
+static int display_crc_ctl_open(struct inode *inode, struct file *file)
+{
+       struct drm_device *dev = inode->i_private;
+
+       return single_open(file, display_crc_ctl_show, dev);
+}
+
+static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source source,
+                                uint32_t *val)
+{
+       switch (source) {
+       case INTEL_PIPE_CRC_SOURCE_PIPE:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_INCLUDE_BORDER_I8XX;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_NONE:
+               *val = 0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int vlv_pipe_crc_ctl_reg(enum intel_pipe_crc_source source,
+                               uint32_t *val)
+{
+       switch (source) {
+       case INTEL_PIPE_CRC_SOURCE_PIPE:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_VLV;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_DP_B:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_VLV;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_DP_C:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_VLV;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_NONE:
+               *val = 0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
+                                enum intel_pipe_crc_source source,
+                                uint32_t *val)
+{
+       switch (source) {
+       case INTEL_PIPE_CRC_SOURCE_PIPE:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_I9XX;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_TV:
+               if (!SUPPORTS_TV(dev))
+                       return -EINVAL;
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_DP_B:
+               if (!IS_G4X(dev))
+                       return -EINVAL;
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_DP_C:
+               if (!IS_G4X(dev))
+                       return -EINVAL;
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_DP_D:
+               if (!IS_G4X(dev))
+                       return -EINVAL;
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_NONE:
+               *val = 0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source source,
+                               uint32_t *val)
+{
+       switch (source) {
+       case INTEL_PIPE_CRC_SOURCE_PLANE1:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_ILK;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_PLANE2:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_ILK;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_PIPE:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PIPE_ILK;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_NONE:
+               *val = 0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int ivb_pipe_crc_ctl_reg(enum intel_pipe_crc_source source,
+                               uint32_t *val)
+{
+       switch (source) {
+       case INTEL_PIPE_CRC_SOURCE_PLANE1:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PRIMARY_IVB;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_PLANE2:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_PF:
+               *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
+               break;
+       case INTEL_PIPE_CRC_SOURCE_NONE:
+               *val = 0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
+                              enum intel_pipe_crc_source source)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+       u32 val;
+       int ret;
+
+       if (pipe_crc->source == source)
+               return 0;
+
+       /* forbid changing the source without going back to 'none' */
+       if (pipe_crc->source && source)
+               return -EINVAL;
+
+       if (IS_GEN2(dev))
+               ret = i8xx_pipe_crc_ctl_reg(source, &val);
+       else if (INTEL_INFO(dev)->gen < 5)
+               ret = i9xx_pipe_crc_ctl_reg(dev, source, &val);
+       else if (IS_VALLEYVIEW(dev))
+               ret = vlv_pipe_crc_ctl_reg(source, &val);
+       else if (IS_GEN5(dev) || IS_GEN6(dev))
+               ret = ilk_pipe_crc_ctl_reg(source, &val);
+       else
+               ret = ivb_pipe_crc_ctl_reg(source, &val);
+
+       if (ret != 0)
+               return ret;
+
+       /* none -> real source transition */
+       if (source) {
+               DRM_DEBUG_DRIVER("collecting CRCs for pipe %c, %s\n",
+                                pipe_name(pipe), pipe_crc_source_name(source));
+
+               pipe_crc->entries = kzalloc(sizeof(*pipe_crc->entries) *
+                                           INTEL_PIPE_CRC_ENTRIES_NR,
+                                           GFP_KERNEL);
+               if (!pipe_crc->entries)
+                       return -ENOMEM;
+
+               spin_lock_irq(&pipe_crc->lock);
+               pipe_crc->head = 0;
+               pipe_crc->tail = 0;
+               spin_unlock_irq(&pipe_crc->lock);
+       }
+
+       pipe_crc->source = source;
+
+       I915_WRITE(PIPE_CRC_CTL(pipe), val);
+       POSTING_READ(PIPE_CRC_CTL(pipe));
+
+       /* real source -> none transition */
+       if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
+               struct intel_pipe_crc_entry *entries;
+
+               DRM_DEBUG_DRIVER("stopping CRCs for pipe %c\n",
+                                pipe_name(pipe));
+
+               intel_wait_for_vblank(dev, pipe);
+
+               spin_lock_irq(&pipe_crc->lock);
+               entries = pipe_crc->entries;
+               pipe_crc->entries = NULL;
+               spin_unlock_irq(&pipe_crc->lock);
+
+               kfree(entries);
+       }
+
+       return 0;
+}
+
+/*
+ * Parse pipe CRC command strings:
+ *   command: wsp* object wsp+ name wsp+ source wsp*
+ *   object: 'pipe'
+ *   name: (A | B | C)
+ *   source: (none | plane1 | plane2 | pf)
+ *   wsp: (#0x20 | #0x9 | #0xA)+
+ *
+ * eg.:
+ *  "pipe A plane1"  ->  Start CRC computations on plane1 of pipe A
+ *  "pipe A none"    ->  Stop CRC
+ */
+static int display_crc_ctl_tokenize(char *buf, char *words[], int max_words)
+{
+       int n_words = 0;
+
+       while (*buf) {
+               char *end;
+
+               /* skip leading white space */
+               buf = skip_spaces(buf);
+               if (!*buf)
+                       break;  /* end of buffer */
+
+               /* find end of word */
+               for (end = buf; *end && !isspace(*end); end++)
+                       ;
+
+               if (n_words == max_words) {
+                       DRM_DEBUG_DRIVER("too many words, allowed <= %d\n",
+                                        max_words);
+                       return -EINVAL; /* ran out of words[] before bytes */
+               }
+
+               if (*end)
+                       *end++ = '\0';
+               words[n_words++] = buf;
+               buf = end;
+       }
+
+       return n_words;
+}
+
+enum intel_pipe_crc_object {
+       PIPE_CRC_OBJECT_PIPE,
+};
+
+static const char * const pipe_crc_objects[] = {
+       "pipe",
+};
+
+static int
+display_crc_ctl_parse_object(const char *buf, enum intel_pipe_crc_object *o)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(pipe_crc_objects); i++)
+               if (!strcmp(buf, pipe_crc_objects[i])) {
+                       *o = i;
+                       return 0;
+                   }
+
+       return -EINVAL;
+}
+
+static int display_crc_ctl_parse_pipe(const char *buf, enum pipe *pipe)
+{
+       const char name = buf[0];
+
+       if (name < 'A' || name >= pipe_name(I915_MAX_PIPES))
+               return -EINVAL;
+
+       *pipe = name - 'A';
+
+       return 0;
+}
+
+static int
+display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
+               if (!strcmp(buf, pipe_crc_sources[i])) {
+                       *s = i;
+                       return 0;
+                   }
+
+       return -EINVAL;
+}
+
+static int display_crc_ctl_parse(struct drm_device *dev, char *buf, size_t len)
+{
+#define N_WORDS 3
+       int n_words;
+       char *words[N_WORDS];
+       enum pipe pipe;
+       enum intel_pipe_crc_object object;
+       enum intel_pipe_crc_source source;
+
+       n_words = display_crc_ctl_tokenize(buf, words, N_WORDS);
+       if (n_words != N_WORDS) {
+               DRM_DEBUG_DRIVER("tokenize failed, a command is %d words\n",
+                                N_WORDS);
+               return -EINVAL;
+       }
+
+       if (display_crc_ctl_parse_object(words[0], &object) < 0) {
+               DRM_DEBUG_DRIVER("unknown object %s\n", words[0]);
+               return -EINVAL;
+       }
+
+       if (display_crc_ctl_parse_pipe(words[1], &pipe) < 0) {
+               DRM_DEBUG_DRIVER("unknown pipe %s\n", words[1]);
+               return -EINVAL;
+       }
+
+       if (display_crc_ctl_parse_source(words[2], &source) < 0) {
+               DRM_DEBUG_DRIVER("unknown source %s\n", words[2]);
+               return -EINVAL;
+       }
+
+       return pipe_crc_set_source(dev, pipe, source);
+}
+
+static ssize_t display_crc_ctl_write(struct file *file, const char __user *ubuf,
+                                    size_t len, loff_t *offp)
+{
+       struct seq_file *m = file->private_data;
+       struct drm_device *dev = m->private;
+       char *tmpbuf;
+       int ret;
+
+       if (len == 0)
+               return 0;
+
+       if (len > PAGE_SIZE - 1) {
+               DRM_DEBUG_DRIVER("expected <%lu bytes into pipe crc control\n",
+                                PAGE_SIZE);
+               return -E2BIG;
+       }
+
+       tmpbuf = kmalloc(len + 1, GFP_KERNEL);
+       if (!tmpbuf)
+               return -ENOMEM;
+
+       if (copy_from_user(tmpbuf, ubuf, len)) {
+               ret = -EFAULT;
+               goto out;
+       }
+       tmpbuf[len] = '\0';
+
+       ret = display_crc_ctl_parse(dev, tmpbuf, len);
+
+out:
+       kfree(tmpbuf);
+       if (ret < 0)
+               return ret;
+
+       *offp += len;
+       return len;
+}
+
+static const struct file_operations i915_display_crc_ctl_fops = {
+       .owner = THIS_MODULE,
+       .open = display_crc_ctl_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+       .write = display_crc_ctl_write
+};
+
 static int
 i915_wedged_get(void *data, u64 *val)
 {
@@ -1885,6 +2391,72 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
                        i915_ring_stop_get, i915_ring_stop_set,
                        "0x%08llx\n");
 
+static int
+i915_ring_missed_irq_get(void *data, u64 *val)
+{
+       struct drm_device *dev = data;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       *val = dev_priv->gpu_error.missed_irq_rings;
+       return 0;
+}
+
+static int
+i915_ring_missed_irq_set(void *data, u64 val)
+{
+       struct drm_device *dev = data;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
+       /* Lock against concurrent debugfs callers */
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+       dev_priv->gpu_error.missed_irq_rings = val;
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
+                       i915_ring_missed_irq_get, i915_ring_missed_irq_set,
+                       "0x%08llx\n");
+
+static int
+i915_ring_test_irq_get(void *data, u64 *val)
+{
+       struct drm_device *dev = data;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       *val = dev_priv->gpu_error.test_irq_rings;
+
+       return 0;
+}
+
+static int
+i915_ring_test_irq_set(void *data, u64 val)
+{
+       struct drm_device *dev = data;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
+       DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
+
+       /* Lock against concurrent debugfs callers */
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+
+       dev_priv->gpu_error.test_irq_rings = val;
+       mutex_unlock(&dev->struct_mutex);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
+                       i915_ring_test_irq_get, i915_ring_test_irq_set,
+                       "0x%08llx\n");
+
 #define DROP_UNBOUND 0x1
 #define DROP_BOUND 0x2
 #define DROP_RETIRE 0x4
@@ -1972,6 +2544,8 @@ i915_max_freq_get(void *data, u64 *val)
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
                return -ENODEV;
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
                return ret;
@@ -1996,6 +2570,8 @@ i915_max_freq_set(void *data, u64 val)
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
                return -ENODEV;
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
 
        ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -2034,6 +2610,8 @@ i915_min_freq_get(void *data, u64 *val)
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
                return -ENODEV;
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
                return ret;
@@ -2058,6 +2636,8 @@ i915_min_freq_set(void *data, u64 val)
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
                return -ENODEV;
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
 
        ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -2136,32 +2716,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
                        i915_cache_sharing_get, i915_cache_sharing_set,
                        "%llu\n");
 
-/* As the drm_debugfs_init() routines are called before dev->dev_private is
- * allocated we need to hook into the minor for release. */
-static int
-drm_add_fake_info_node(struct drm_minor *minor,
-                      struct dentry *ent,
-                      const void *key)
-{
-       struct drm_info_node *node;
-
-       node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
-       if (node == NULL) {
-               debugfs_remove(ent);
-               return -ENOMEM;
-       }
-
-       node->minor = minor;
-       node->dent = ent;
-       node->info_ent = (void *) key;
-
-       mutex_lock(&minor->debugfs_lock);
-       list_add(&node->list, &minor->debugfs_list);
-       mutex_unlock(&minor->debugfs_lock);
-
-       return 0;
-}
-
 static int i915_forcewake_open(struct inode *inode, struct file *file)
 {
        struct drm_device *dev = inode->i_private;
@@ -2278,11 +2832,28 @@ static struct i915_debugfs_files {
        {"i915_min_freq", &i915_min_freq_fops},
        {"i915_cache_sharing", &i915_cache_sharing_fops},
        {"i915_ring_stop", &i915_ring_stop_fops},
+       {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
+       {"i915_ring_test_irq", &i915_ring_test_irq_fops},
        {"i915_gem_drop_caches", &i915_drop_caches_fops},
        {"i915_error_state", &i915_error_state_fops},
        {"i915_next_seqno", &i915_next_seqno_fops},
+       {"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
 };
 
+void intel_display_crc_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int i;
+
+       for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
+               struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[i];
+
+               pipe_crc->opened = false;
+               spin_lock_init(&pipe_crc->lock);
+               init_waitqueue_head(&pipe_crc->wq);
+       }
+}
+
 int i915_debugfs_init(struct drm_minor *minor)
 {
        int ret, i;
@@ -2291,6 +2862,12 @@ int i915_debugfs_init(struct drm_minor *minor)
        if (ret)
                return ret;
 
+       for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
+               ret = i915_pipe_crc_create(minor->debugfs_root, minor, i);
+               if (ret)
+                       return ret;
+       }
+
        for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
                ret = i915_debugfs_create(minor->debugfs_root, minor,
                                          i915_debugfs_files[i].name,
@@ -2310,8 +2887,17 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
 
        drm_debugfs_remove_files(i915_debugfs_list,
                                 I915_DEBUGFS_ENTRIES, minor);
+
        drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
                                 1, minor);
+
+       for (i = 0; i < ARRAY_SIZE(i915_pipe_crc_data); i++) {
+               struct drm_info_list *info_list =
+                       (struct drm_info_list *)&i915_pipe_crc_data[i];
+
+               drm_debugfs_remove_files(info_list, 1, minor);
+       }
+
        for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
                struct drm_info_list *info_list =
                        (struct drm_info_list *) i915_debugfs_files[i].fops;
index c27a21034a5e56e6fca41d6ff710c9abef59bf72..efa9fe6853996ed90f02a019d099189d0ee20120 100644 (file)
@@ -52,7 +52,7 @@
        intel_ring_emit(LP_RING(dev_priv), x)
 
 #define ADVANCE_LP_RING() \
-       intel_ring_advance(LP_RING(dev_priv))
+       __intel_ring_advance(LP_RING(dev_priv))
 
 /**
  * Lock test for when it's just for synchronization of ring access.
@@ -641,7 +641,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
 
        if (batch->num_cliprects) {
                cliprects = kcalloc(batch->num_cliprects,
-                                   sizeof(struct drm_clip_rect),
+                                   sizeof(*cliprects),
                                    GFP_KERNEL);
                if (cliprects == NULL)
                        return -ENOMEM;
@@ -703,7 +703,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
 
        if (cmdbuf->num_cliprects) {
                cliprects = kcalloc(cmdbuf->num_cliprects,
-                                   sizeof(struct drm_clip_rect), GFP_KERNEL);
+                                   sizeof(*cliprects), GFP_KERNEL);
                if (cliprects == NULL) {
                        ret = -ENOMEM;
                        goto fail_batch_free;
@@ -931,7 +931,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
                value = READ_BREADCRUMB(dev_priv);
                break;
        case I915_PARAM_CHIPSET_ID:
-               value = dev->pci_device;
+               value = dev->pdev->device;
                break;
        case I915_PARAM_HAS_GEM:
                value = 1;
@@ -1290,12 +1290,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
         * then we do not take part in VGA arbitration and the
         * vga_client_register() fails with -ENODEV.
         */
-       if (!HAS_PCH_SPLIT(dev)) {
-               ret = vga_client_register(dev->pdev, dev, NULL,
-                                         i915_vga_set_decode);
-               if (ret && ret != -ENODEV)
-                       goto out;
-       }
+       ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
+       if (ret && ret != -ENODEV)
+               goto out;
 
        intel_register_dsm_handler();
 
@@ -1314,13 +1311,18 @@ static int i915_load_modeset_init(struct drm_device *dev)
        if (ret)
                goto cleanup_gem_stolen;
 
+       intel_init_power_well(dev);
+
+       /* Keep VGA alive until i915_disable_vga_mem() */
+       intel_display_power_get(dev, POWER_DOMAIN_VGA);
+
        /* Important: The output setup functions called by modeset_init need
         * working irqs for e.g. gmbus and dp aux transfers. */
        intel_modeset_init(dev);
 
        ret = i915_gem_init(dev);
        if (ret)
-               goto cleanup_irq;
+               goto cleanup_power;
 
        INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
 
@@ -1328,9 +1330,11 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
        /* Always safe in the mode setting case. */
        /* FIXME: do pre/post-mode set stuff in core KMS code */
-       dev->vblank_disable_allowed = 1;
-       if (INTEL_INFO(dev)->num_pipes == 0)
+       dev->vblank_disable_allowed = true;
+       if (INTEL_INFO(dev)->num_pipes == 0) {
+               intel_display_power_put(dev, POWER_DOMAIN_VGA);
                return 0;
+       }
 
        ret = intel_fbdev_init(dev);
        if (ret)
@@ -1351,11 +1355,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
         */
        intel_fbdev_initial_config(dev);
 
-       /*
-        * Must do this after fbcon init so that
-        * vgacon_save_screen() works during the handover.
-        */
-       i915_disable_vga_mem(dev);
+       intel_display_power_put(dev, POWER_DOMAIN_VGA);
 
        /* Only enable hotplug handling once the fbdev is fully set up. */
        dev_priv->enable_hotplug_processing = true;
@@ -1371,7 +1371,8 @@ cleanup_gem:
        mutex_unlock(&dev->struct_mutex);
        i915_gem_cleanup_aliasing_ppgtt(dev);
        drm_mm_takedown(&dev_priv->gtt.base.mm);
-cleanup_irq:
+cleanup_power:
+       intel_display_power_put(dev, POWER_DOMAIN_VGA);
        drm_irq_uninstall(dev);
 cleanup_gem_stolen:
        i915_gem_cleanup_stolen(dev);
@@ -1407,6 +1408,7 @@ void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
        master->driver_priv = NULL;
 }
 
+#ifdef CONFIG_DRM_I915_FBDEV
 static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 {
        struct apertures_struct *ap;
@@ -1427,6 +1429,11 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 
        kfree(ap);
 }
+#else
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+}
+#endif
 
 static void i915_dump_device_info(struct drm_i915_private *dev_priv)
 {
@@ -1468,17 +1475,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        info = (struct intel_device_info *) flags;
 
        /* Refuse to load on gen6+ without kms enabled. */
-       if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
+       if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
+               DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
+               DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
                return -ENODEV;
+       }
 
-       /* i915 has 4 more counters */
-       dev->counters += 4;
-       dev->types[6] = _DRM_STAT_IRQ;
-       dev->types[7] = _DRM_STAT_PRIMARY;
-       dev->types[8] = _DRM_STAT_SECONDARY;
-       dev->types[9] = _DRM_STAT_DMA;
-
-       dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
+       dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
        if (dev_priv == NULL)
                return -ENOMEM;
 
@@ -1503,6 +1506,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
        INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
 
+       intel_display_crc_init(dev);
+
        i915_dump_device_info(dev_priv);
 
        /* Not all pre-production machines fall into this category, only the
@@ -1540,19 +1545,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        intel_uncore_early_sanitize(dev);
 
-       if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
-               /* The docs do not explain exactly how the calculation can be
-                * made. It is somewhat guessable, but for now, it's always
-                * 128MB.
-                * NB: We can't write IDICR yet because we do not have gt funcs
-                * set up */
-               dev_priv->ellc_size = 128;
-               DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
-       }
+       /* This must be called before any calls to HAS_PCH_* */
+       intel_detect_pch(dev);
+
+       intel_uncore_init(dev);
 
        ret = i915_gem_gtt_init(dev);
        if (ret)
-               goto put_bridge;
+               goto out_regs;
 
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                i915_kick_out_firmware_fb(dev_priv);
@@ -1581,7 +1581,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                                     aperture_size);
        if (dev_priv->gtt.mappable == NULL) {
                ret = -EIO;
-               goto out_rmmap;
+               goto out_gtt;
        }
 
        dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
@@ -1607,13 +1607,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto out_mtrrfree;
        }
 
-       /* This must be called before any calls to HAS_PCH_* */
-       intel_detect_pch(dev);
-
        intel_irq_init(dev);
        intel_pm_init(dev);
        intel_uncore_sanitize(dev);
-       intel_uncore_init(dev);
 
        /* Try to make sure MCHBAR is enabled before poking at it */
        intel_setup_mchbar(dev);
@@ -1655,7 +1651,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                ret = i915_load_modeset_init(dev);
                if (ret < 0) {
                        DRM_ERROR("failed to init modeset\n");
-                       goto out_gem_unload;
+                       goto out_power_well;
                }
        } else {
                /* Start out suspended in ums mode. */
@@ -1675,6 +1671,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        return 0;
 
+out_power_well:
+       if (HAS_POWER_WELL(dev))
+               i915_remove_power_well(dev);
+       drm_vblank_cleanup(dev);
 out_gem_unload:
        if (dev_priv->mm.inactive_shrinker.scan_objects)
                unregister_shrinker(&dev_priv->mm.inactive_shrinker);
@@ -1688,12 +1688,18 @@ out_gem_unload:
 out_mtrrfree:
        arch_phys_wc_del(dev_priv->gtt.mtrr);
        io_mapping_free(dev_priv->gtt.mappable);
+out_gtt:
+       list_del(&dev_priv->gtt.base.global_link);
+       drm_mm_takedown(&dev_priv->gtt.base.mm);
        dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
-out_rmmap:
+out_regs:
+       intel_uncore_fini(dev);
        pci_iounmap(dev->pdev, dev_priv->regs);
 put_bridge:
        pci_dev_put(dev_priv->bridge_dev);
 free_priv:
+       if (dev_priv->slab)
+               kmem_cache_destroy(dev_priv->slab);
        kfree(dev_priv);
        return ret;
 }
@@ -1718,15 +1724,9 @@ int i915_driver_unload(struct drm_device *dev)
        if (dev_priv->mm.inactive_shrinker.scan_objects)
                unregister_shrinker(&dev_priv->mm.inactive_shrinker);
 
-       mutex_lock(&dev->struct_mutex);
-       ret = i915_gpu_idle(dev);
+       ret = i915_gem_suspend(dev);
        if (ret)
                DRM_ERROR("failed to idle hardware: %d\n", ret);
-       i915_gem_retire_requests(dev);
-       mutex_unlock(&dev->struct_mutex);
-
-       /* Cancel the retire work handler, which should be idle now. */
-       cancel_delayed_work_sync(&dev_priv->mm.retire_work);
 
        io_mapping_free(dev_priv->gtt.mappable);
        arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1783,8 +1783,8 @@ int i915_driver_unload(struct drm_device *dev)
        list_del(&dev_priv->gtt.base.global_link);
        WARN_ON(!list_empty(&dev_priv->vm_list));
        drm_mm_takedown(&dev_priv->gtt.base.mm);
-       if (dev_priv->regs != NULL)
-               pci_iounmap(dev->pdev, dev_priv->regs);
+
+       drm_vblank_cleanup(dev);
 
        intel_teardown_gmbus(dev);
        intel_teardown_mchbar(dev);
@@ -1794,6 +1794,10 @@ int i915_driver_unload(struct drm_device *dev)
 
        dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
 
+       intel_uncore_fini(dev);
+       if (dev_priv->regs != NULL)
+               pci_iounmap(dev->pdev, dev_priv->regs);
+
        if (dev_priv->slab)
                kmem_cache_destroy(dev_priv->slab);
 
@@ -1805,19 +1809,11 @@ int i915_driver_unload(struct drm_device *dev)
 
 int i915_driver_open(struct drm_device *dev, struct drm_file *file)
 {
-       struct drm_i915_file_private *file_priv;
-
-       DRM_DEBUG_DRIVER("\n");
-       file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
-       if (!file_priv)
-               return -ENOMEM;
-
-       file->driver_priv = file_priv;
-
-       spin_lock_init(&file_priv->mm.lock);
-       INIT_LIST_HEAD(&file_priv->mm.request_list);
+       int ret;
 
-       idr_init(&file_priv->context_idr);
+       ret = i915_gem_open(dev, file);
+       if (ret)
+               return ret;
 
        return 0;
 }
@@ -1845,7 +1841,7 @@ void i915_driver_lastclose(struct drm_device * dev)
                return;
 
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               intel_fb_restore_mode(dev);
+               intel_fbdev_restore_mode(dev);
                vga_switcheroo_process_delayed_switch();
                return;
        }
index 69d8ed5416c31b2e80538fabec553e333fd741ea..77e195e0a422b79d1ebaec48e93cb9b5ee4765cf 100644 (file)
@@ -160,49 +160,58 @@ extern int intel_agp_enabled;
 static const struct intel_device_info intel_i830_info = {
        .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
        .has_overlay = 1, .overlay_needs_physical = 1,
+       .ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_845g_info = {
        .gen = 2, .num_pipes = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
+       .ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_i85x_info = {
        .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
        .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
+       .ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_i865g_info = {
        .gen = 2, .num_pipes = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
+       .ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_i915g_info = {
        .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
        .has_overlay = 1, .overlay_needs_physical = 1,
+       .ring_mask = RENDER_RING,
 };
 static const struct intel_device_info intel_i915gm_info = {
        .gen = 3, .is_mobile = 1, .num_pipes = 2,
        .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .supports_tv = 1,
+       .ring_mask = RENDER_RING,
 };
 static const struct intel_device_info intel_i945g_info = {
        .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
        .has_overlay = 1, .overlay_needs_physical = 1,
+       .ring_mask = RENDER_RING,
 };
 static const struct intel_device_info intel_i945gm_info = {
        .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
        .has_hotplug = 1, .cursor_needs_physical = 1,
        .has_overlay = 1, .overlay_needs_physical = 1,
        .supports_tv = 1,
+       .ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_i965g_info = {
        .gen = 4, .is_broadwater = 1, .num_pipes = 2,
        .has_hotplug = 1,
        .has_overlay = 1,
+       .ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_i965gm_info = {
@@ -210,18 +219,20 @@ static const struct intel_device_info intel_i965gm_info = {
        .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
        .has_overlay = 1,
        .supports_tv = 1,
+       .ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_g33_info = {
        .gen = 3, .is_g33 = 1, .num_pipes = 2,
        .need_gfx_hws = 1, .has_hotplug = 1,
        .has_overlay = 1,
+       .ring_mask = RENDER_RING,
 };
 
 static const struct intel_device_info intel_g45_info = {
        .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
        .has_pipe_cxsr = 1, .has_hotplug = 1,
-       .has_bsd_ring = 1,
+       .ring_mask = RENDER_RING | BSD_RING,
 };
 
 static const struct intel_device_info intel_gm45_info = {
@@ -229,7 +240,7 @@ static const struct intel_device_info intel_gm45_info = {
        .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
        .has_pipe_cxsr = 1, .has_hotplug = 1,
        .supports_tv = 1,
-       .has_bsd_ring = 1,
+       .ring_mask = RENDER_RING | BSD_RING,
 };
 
 static const struct intel_device_info intel_pineview_info = {
@@ -241,42 +252,36 @@ static const struct intel_device_info intel_pineview_info = {
 static const struct intel_device_info intel_ironlake_d_info = {
        .gen = 5, .num_pipes = 2,
        .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_bsd_ring = 1,
+       .ring_mask = RENDER_RING | BSD_RING,
 };
 
 static const struct intel_device_info intel_ironlake_m_info = {
        .gen = 5, .is_mobile = 1, .num_pipes = 2,
        .need_gfx_hws = 1, .has_hotplug = 1,
        .has_fbc = 1,
-       .has_bsd_ring = 1,
+       .ring_mask = RENDER_RING | BSD_RING,
 };
 
 static const struct intel_device_info intel_sandybridge_d_info = {
        .gen = 6, .num_pipes = 2,
        .need_gfx_hws = 1, .has_hotplug = 1,
-       .has_bsd_ring = 1,
-       .has_blt_ring = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
        .has_llc = 1,
-       .has_force_wake = 1,
 };
 
 static const struct intel_device_info intel_sandybridge_m_info = {
        .gen = 6, .is_mobile = 1, .num_pipes = 2,
        .need_gfx_hws = 1, .has_hotplug = 1,
        .has_fbc = 1,
-       .has_bsd_ring = 1,
-       .has_blt_ring = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
        .has_llc = 1,
-       .has_force_wake = 1,
 };
 
 #define GEN7_FEATURES  \
        .gen = 7, .num_pipes = 3, \
        .need_gfx_hws = 1, .has_hotplug = 1, \
-       .has_bsd_ring = 1, \
-       .has_blt_ring = 1, \
-       .has_llc = 1, \
-       .has_force_wake = 1
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+       .has_llc = 1
 
 static const struct intel_device_info intel_ivybridge_d_info = {
        GEN7_FEATURES,
@@ -318,7 +323,7 @@ static const struct intel_device_info intel_haswell_d_info = {
        .is_haswell = 1,
        .has_ddi = 1,
        .has_fpga_dbg = 1,
-       .has_vebox_ring = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 };
 
 static const struct intel_device_info intel_haswell_m_info = {
@@ -328,7 +333,7 @@ static const struct intel_device_info intel_haswell_m_info = {
        .has_ddi = 1,
        .has_fpga_dbg = 1,
        .has_fbc = 1,
-       .has_vebox_ring = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
 };
 
 /*
@@ -416,7 +421,7 @@ void intel_detect_pch(struct drm_device *dev)
                        } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
                                /* PantherPoint is CPT compatible */
                                dev_priv->pch_type = PCH_CPT;
-                               DRM_DEBUG_KMS("Found PatherPoint PCH\n");
+                               DRM_DEBUG_KMS("Found PantherPoint PCH\n");
                                WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
                        } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_LPT;
@@ -482,9 +487,7 @@ static int i915_drm_freeze(struct drm_device *dev)
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
                int error;
 
-               mutex_lock(&dev->struct_mutex);
-               error = i915_gem_idle(dev);
-               mutex_unlock(&dev->struct_mutex);
+               error = i915_gem_suspend(dev);
                if (error) {
                        dev_err(&dev->pdev->dev,
                                "GEM idle failed, resume might fail\n");
@@ -505,6 +508,8 @@ static int i915_drm_freeze(struct drm_device *dev)
                intel_modeset_suspend_hw(dev);
        }
 
+       i915_gem_suspend_gtt_mappings(dev);
+
        i915_save_state(dev);
 
        intel_opregion_fini(dev);
@@ -576,11 +581,25 @@ static void intel_resume_hotplug(struct drm_device *dev)
        drm_helper_hpd_irq_event(dev);
 }
 
-static int __i915_drm_thaw(struct drm_device *dev)
+static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int error = 0;
 
+       intel_uncore_early_sanitize(dev);
+
+       intel_uncore_sanitize(dev);
+
+       if (drm_core_check_feature(dev, DRIVER_MODESET) &&
+           restore_gtt_mappings) {
+               mutex_lock(&dev->struct_mutex);
+               i915_gem_restore_gtt_mappings(dev);
+               mutex_unlock(&dev->struct_mutex);
+       } else if (drm_core_check_feature(dev, DRIVER_MODESET))
+               i915_check_and_clear_faults(dev);
+
+       intel_init_power_well(dev);
+
        i915_restore_state(dev);
        intel_opregion_setup(dev);
 
@@ -640,19 +659,7 @@ static int __i915_drm_thaw(struct drm_device *dev)
 
 static int i915_drm_thaw(struct drm_device *dev)
 {
-       int error = 0;
-
-       intel_uncore_sanitize(dev);
-
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               mutex_lock(&dev->struct_mutex);
-               i915_gem_restore_gtt_mappings(dev);
-               mutex_unlock(&dev->struct_mutex);
-       }
-
-       __i915_drm_thaw(dev);
-
-       return error;
+       return __i915_drm_thaw(dev, true);
 }
 
 int i915_resume(struct drm_device *dev)
@@ -668,20 +675,12 @@ int i915_resume(struct drm_device *dev)
 
        pci_set_master(dev->pdev);
 
-       intel_uncore_sanitize(dev);
-
        /*
         * Platforms with opregion should have sane BIOS, older ones (gen3 and
-        * earlier) need this since the BIOS might clear all our scratch PTEs.
+        * earlier) need to restore the GTT mappings since the BIOS might clear
+        * all our scratch PTEs.
         */
-       if (drm_core_check_feature(dev, DRIVER_MODESET) &&
-           !dev_priv->opregion.header) {
-               mutex_lock(&dev->struct_mutex);
-               i915_gem_restore_gtt_mappings(dev);
-               mutex_unlock(&dev->struct_mutex);
-       }
-
-       ret = __i915_drm_thaw(dev);
+       ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
        if (ret)
                return ret;
 
@@ -719,24 +718,19 @@ int i915_reset(struct drm_device *dev)
 
        simulated = dev_priv->gpu_error.stop_rings != 0;
 
-       if (!simulated && get_seconds() - dev_priv->gpu_error.last_reset < 5) {
-               DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
-               ret = -ENODEV;
-       } else {
-               ret = intel_gpu_reset(dev);
-
-               /* Also reset the gpu hangman. */
-               if (simulated) {
-                       DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
-                       dev_priv->gpu_error.stop_rings = 0;
-                       if (ret == -ENODEV) {
-                               DRM_ERROR("Reset not implemented, but ignoring "
-                                         "error for simulated gpu hangs\n");
-                               ret = 0;
-                       }
-               } else
-                       dev_priv->gpu_error.last_reset = get_seconds();
+       ret = intel_gpu_reset(dev);
+
+       /* Also reset the gpu hangman. */
+       if (simulated) {
+               DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
+               dev_priv->gpu_error.stop_rings = 0;
+               if (ret == -ENODEV) {
+                       DRM_ERROR("Reset not implemented, but ignoring "
+                                 "error for simulated gpu hangs\n");
+                       ret = 0;
+               }
        }
+
        if (ret) {
                DRM_ERROR("Failed to reset chip.\n");
                mutex_unlock(&dev->struct_mutex);
@@ -759,30 +753,17 @@ int i915_reset(struct drm_device *dev)
         */
        if (drm_core_check_feature(dev, DRIVER_MODESET) ||
                        !dev_priv->ums.mm_suspended) {
-               struct intel_ring_buffer *ring;
-               int i;
-
+               bool hw_contexts_disabled = dev_priv->hw_contexts_disabled;
                dev_priv->ums.mm_suspended = 0;
 
-               i915_gem_init_swizzling(dev);
-
-               for_each_ring(ring, dev_priv, i)
-                       ring->init(ring);
-
-               i915_gem_context_init(dev);
-               if (dev_priv->mm.aliasing_ppgtt) {
-                       ret = dev_priv->mm.aliasing_ppgtt->enable(dev);
-                       if (ret)
-                               i915_gem_cleanup_aliasing_ppgtt(dev);
-               }
-
-               /*
-                * It would make sense to re-init all the other hw state, at
-                * least the rps/rc6/emon init done within modeset_init_hw. For
-                * some unknown reason, this blows up my ilk, so don't.
-                */
-
+               ret = i915_gem_init_hw(dev);
+               if (!hw_contexts_disabled && dev_priv->hw_contexts_disabled)
+                       DRM_ERROR("HW contexts didn't survive reset\n");
                mutex_unlock(&dev->struct_mutex);
+               if (ret) {
+                       DRM_ERROR("Failed hw init on reset %d\n", ret);
+                       return ret;
+               }
 
                drm_irq_uninstall(dev);
                drm_irq_install(dev);
@@ -799,6 +780,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct intel_device_info *intel_info =
                (struct intel_device_info *) ent->driver_data;
 
+       if (IS_PRELIMINARY_HW(intel_info) && !i915_preliminary_hw_support) {
+               DRM_INFO("This hardware requires preliminary hardware support.\n"
+                        "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
+               return -ENODEV;
+       }
+
        /* Only bind to function 0 of the device. Early generations
         * used function 1 as a placeholder for multi-head. This causes
         * us confusion instead, especially on the systems where both
@@ -946,7 +933,6 @@ static struct drm_driver driver = {
        .debugfs_init = i915_debugfs_init,
        .debugfs_cleanup = i915_debugfs_cleanup,
 #endif
-       .gem_init_object = i915_gem_init_object,
        .gem_free_object = i915_gem_free_object,
        .gem_vm_ops = &i915_gem_vm_ops,
 
index 35874b3a86dcc917c9bb68e1cb4879d81a0fc76b..80957ca0254326df56bb0dbe3d9e685dc0a87dcc 100644 (file)
@@ -98,13 +98,24 @@ enum intel_display_power_domain {
        POWER_DOMAIN_TRANSCODER_A,
        POWER_DOMAIN_TRANSCODER_B,
        POWER_DOMAIN_TRANSCODER_C,
-       POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
+       POWER_DOMAIN_TRANSCODER_EDP,
+       POWER_DOMAIN_VGA,
+
+       POWER_DOMAIN_NUM,
 };
 
+#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
+
 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
                ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
-#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
+#define POWER_DOMAIN_TRANSCODER(tran) \
+       ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
+        (tran) + POWER_DOMAIN_TRANSCODER_A)
+
+#define HSW_ALWAYS_ON_POWER_DOMAINS (          \
+       BIT(POWER_DOMAIN_PIPE_A) |              \
+       BIT(POWER_DOMAIN_TRANSCODER_EDP))
 
 enum hpd_pin {
        HPD_NONE = 0,
@@ -225,6 +236,8 @@ struct intel_opregion {
        struct opregion_header __iomem *header;
        struct opregion_acpi __iomem *acpi;
        struct opregion_swsci __iomem *swsci;
+       u32 swsci_gbda_sub_functions;
+       u32 swsci_sbcb_sub_functions;
        struct opregion_asle __iomem *asle;
        void __iomem *vbt;
        u32 __iomem *lid_state;
@@ -321,11 +334,13 @@ struct drm_i915_error_state {
                u32 dirty:1;
                u32 purgeable:1;
                s32 ring:4;
-               u32 cache_level:2;
+               u32 cache_level:3;
        } **active_bo, **pinned_bo;
        u32 *active_bo_count, *pinned_bo_count;
        struct intel_overlay_error_state *overlay;
        struct intel_display_error_state *display;
+       int hangcheck_score[I915_NUM_RINGS];
+       enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
 };
 
 struct intel_crtc_config;
@@ -357,7 +372,7 @@ struct drm_i915_display_funcs {
                          int target, int refclk,
                          struct dpll *match_clock,
                          struct dpll *best_clock);
-       void (*update_wm)(struct drm_device *dev);
+       void (*update_wm)(struct drm_crtc *crtc);
        void (*update_sprite_wm)(struct drm_plane *plane,
                                 struct drm_crtc *crtc,
                                 uint32_t sprite_width, int pixel_size,
@@ -367,7 +382,6 @@ struct drm_i915_display_funcs {
         * fills out the pipe-config with the hw state. */
        bool (*get_pipe_config)(struct intel_crtc *,
                                struct intel_crtc_config *);
-       void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
        int (*crtc_mode_set)(struct drm_crtc *crtc,
                             int x, int y,
                             struct drm_framebuffer *old_fb);
@@ -375,7 +389,8 @@ struct drm_i915_display_funcs {
        void (*crtc_disable)(struct drm_crtc *crtc);
        void (*off)(struct drm_crtc *crtc);
        void (*write_eld)(struct drm_connector *connector,
-                         struct drm_crtc *crtc);
+                         struct drm_crtc *crtc,
+                         struct drm_display_mode *mode);
        void (*fdi_link_train)(struct drm_crtc *crtc);
        void (*init_clock_gating)(struct drm_device *dev);
        int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
@@ -395,6 +410,20 @@ struct drm_i915_display_funcs {
 struct intel_uncore_funcs {
        void (*force_wake_get)(struct drm_i915_private *dev_priv);
        void (*force_wake_put)(struct drm_i915_private *dev_priv);
+
+       uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+       uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+       uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+       uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
+
+       void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
+                               uint8_t val, bool trace);
+       void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
+                               uint16_t val, bool trace);
+       void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
+                               uint32_t val, bool trace);
+       void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
+                               uint64_t val, bool trace);
 };
 
 struct intel_uncore {
@@ -404,6 +433,8 @@ struct intel_uncore {
 
        unsigned fifo_count;
        unsigned forcewake_count;
+
+       struct delayed_work force_wake_work;
 };
 
 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -420,7 +451,7 @@ struct intel_uncore {
        func(is_ivybridge) sep \
        func(is_valleyview) sep \
        func(is_haswell) sep \
-       func(has_force_wake) sep \
+       func(is_preliminary) sep \
        func(has_fbc) sep \
        func(has_pipe_cxsr) sep \
        func(has_hotplug) sep \
@@ -428,9 +459,6 @@ struct intel_uncore {
        func(has_overlay) sep \
        func(overlay_needs_physical) sep \
        func(supports_tv) sep \
-       func(has_bsd_ring) sep \
-       func(has_blt_ring) sep \
-       func(has_vebox_ring) sep \
        func(has_llc) sep \
        func(has_ddi) sep \
        func(has_fpga_dbg)
@@ -442,6 +470,7 @@ struct intel_device_info {
        u32 display_mmio_offset;
        u8 num_pipes:3;
        u8 gen;
+       u8 ring_mask; /* Rings supported by the HW */
        DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
 };
 
@@ -497,10 +526,12 @@ struct i915_address_space {
 
        /* FIXME: Need a more generic return type */
        gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
-                                    enum i915_cache_level level);
+                                    enum i915_cache_level level,
+                                    bool valid); /* Create a valid PTE */
        void (*clear_range)(struct i915_address_space *vm,
                            unsigned int first_entry,
-                           unsigned int num_entries);
+                           unsigned int num_entries,
+                           bool use_scratch);
        void (*insert_entries)(struct i915_address_space *vm,
                               struct sg_table *st,
                               unsigned int first_entry,
@@ -568,6 +599,13 @@ struct i915_vma {
        /** This vma's place in the batchbuffer or on the eviction list */
        struct list_head exec_list;
 
+       /**
+        * Used for performing relocations during execbuffer insertion.
+        */
+       struct hlist_node exec_node;
+       unsigned long exec_handle;
+       struct drm_i915_gem_exec_object2 *exec_entry;
+
 };
 
 struct i915_ctx_hang_stats {
@@ -576,6 +614,12 @@ struct i915_ctx_hang_stats {
 
        /* This context had batch active when hang was declared */
        unsigned batch_active;
+
+       /* Time when this context was last blamed for a GPU reset */
+       unsigned long guilty_ts;
+
+       /* This context is banned to submit more work */
+       bool banned;
 };
 
 /* This must match up with the value previously used for execbuf2.rsvd1. */
@@ -584,10 +628,13 @@ struct i915_hw_context {
        struct kref ref;
        int id;
        bool is_initialized;
+       uint8_t remap_slice;
        struct drm_i915_file_private *file_priv;
        struct intel_ring_buffer *ring;
        struct drm_i915_gem_object *obj;
        struct i915_ctx_hang_stats hang_stats;
+
+       struct list_head link;
 };
 
 struct i915_fbc {
@@ -621,17 +668,9 @@ struct i915_fbc {
        } no_fbc_reason;
 };
 
-enum no_psr_reason {
-       PSR_NO_SOURCE, /* Not supported on platform */
-       PSR_NO_SINK, /* Not supported by panel */
-       PSR_MODULE_PARAM,
-       PSR_CRTC_NOT_ACTIVE,
-       PSR_PWR_WELL_ENABLED,
-       PSR_NOT_TILED,
-       PSR_SPRITE_ENABLED,
-       PSR_S3D_ENABLED,
-       PSR_INTERLACED_ENABLED,
-       PSR_HSW_NOT_DDIA,
+struct i915_psr {
+       bool sink_support;
+       bool source_ok;
 };
 
 enum intel_pch {
@@ -821,17 +860,20 @@ struct intel_gen6_power_mgmt {
        struct work_struct work;
        u32 pm_iir;
 
-       /* On vlv we need to manually drop to Vmin with a delayed work. */
-       struct delayed_work vlv_work;
-
        /* The below variables an all the rps hw state are protected by
         * dev->struct mutext. */
        u8 cur_delay;
        u8 min_delay;
        u8 max_delay;
        u8 rpe_delay;
+       u8 rp1_delay;
+       u8 rp0_delay;
        u8 hw_max;
 
+       int last_adj;
+       enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
+
+       bool enabled;
        struct delayed_work delayed_resume_work;
 
        /*
@@ -869,7 +911,7 @@ struct intel_ilk_power_mgmt {
 /* Power well structure for haswell */
 struct i915_power_well {
        struct drm_device *device;
-       spinlock_t lock;
+       struct mutex lock;
        /* power well enable/disable usage count */
        int count;
        int i915_request;
@@ -900,9 +942,11 @@ struct i915_ums_state {
        int mm_suspended;
 };
 
+#define MAX_L3_SLICES 2
 struct intel_l3_parity {
-       u32 *remap_info;
+       u32 *remap_info[MAX_L3_SLICES];
        struct work_struct error_work;
+       int which_slice;
 };
 
 struct i915_gem_mm {
@@ -939,6 +983,15 @@ struct i915_gem_mm {
         */
        struct delayed_work retire_work;
 
+       /**
+        * When we detect an idle GPU, we want to turn on
+        * powersaving features. So once we see that there
+        * are no more requests outstanding and no more
+        * arrive within a small period of time, we fire
+        * off the idle_work.
+        */
+       struct delayed_work idle_work;
+
        /**
         * Are we in a non-interruptible section of code like
         * modesetting?
@@ -977,6 +1030,9 @@ struct i915_gpu_error {
        /* For hangcheck timer */
 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
+       /* Hang gpu twice in this window and your context gets banned */
+#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
+
        struct timer_list hangcheck_timer;
 
        /* For reset and error_state handling. */
@@ -985,7 +1041,8 @@ struct i915_gpu_error {
        struct drm_i915_error_state *first_error;
        struct work_struct work;
 
-       unsigned long last_reset;
+
+       unsigned long missed_irq_rings;
 
        /**
         * State variable and reset counter controlling the reset flow
@@ -1025,6 +1082,9 @@ struct i915_gpu_error {
 
        /* For gpu hang simulation. */
        unsigned int stop_rings;
+
+       /* For missed irq/seqno simulation. */
+       unsigned int test_irq_rings;
 };
 
 enum modeset_restore {
@@ -1033,6 +1093,14 @@ enum modeset_restore {
        MODESET_SUSPENDED,
 };
 
+struct ddi_vbt_port_info {
+       uint8_t hdmi_level_shift;
+
+       uint8_t supports_dvi:1;
+       uint8_t supports_hdmi:1;
+       uint8_t supports_dp:1;
+};
+
 struct intel_vbt_data {
        struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
        struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -1058,10 +1126,17 @@ struct intel_vbt_data {
        int edp_bpp;
        struct edp_power_seq edp_pps;
 
+       /* MIPI DSI */
+       struct {
+               u16 panel_id;
+       } dsi;
+
        int crt_ddc_pin;
 
        int child_dev_num;
-       struct child_device_config *child_dev;
+       union child_device_config *child_dev;
+
+       struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
 };
 
 enum intel_ddb_partitioning {
@@ -1077,6 +1152,15 @@ struct intel_wm_level {
        uint32_t fbc_val;
 };
 
+struct hsw_wm_values {
+       uint32_t wm_pipe[3];
+       uint32_t wm_lp[3];
+       uint32_t wm_lp_spr[3];
+       uint32_t wm_linetime[3];
+       bool enable_fbc_wm;
+       enum intel_ddb_partitioning partitioning;
+};
+
 /*
  * This struct tracks the state needed for the Package C8+ feature.
  *
@@ -1146,6 +1230,35 @@ struct i915_package_c8 {
        } regsave;
 };
 
+enum intel_pipe_crc_source {
+       INTEL_PIPE_CRC_SOURCE_NONE,
+       INTEL_PIPE_CRC_SOURCE_PLANE1,
+       INTEL_PIPE_CRC_SOURCE_PLANE2,
+       INTEL_PIPE_CRC_SOURCE_PF,
+       INTEL_PIPE_CRC_SOURCE_PIPE,
+       /* TV/DP on pre-gen5/vlv can't use the pipe source. */
+       INTEL_PIPE_CRC_SOURCE_TV,
+       INTEL_PIPE_CRC_SOURCE_DP_B,
+       INTEL_PIPE_CRC_SOURCE_DP_C,
+       INTEL_PIPE_CRC_SOURCE_DP_D,
+       INTEL_PIPE_CRC_SOURCE_MAX,
+};
+
+struct intel_pipe_crc_entry {
+       uint32_t frame;
+       uint32_t crc[5];
+};
+
+#define INTEL_PIPE_CRC_ENTRIES_NR      128
+struct intel_pipe_crc {
+       spinlock_t lock;
+       bool opened;            /* exclusive access to the result file */
+       struct intel_pipe_crc_entry *entries;
+       enum intel_pipe_crc_source source;
+       int head, tail;
+       wait_queue_head_t wq;
+};
+
 typedef struct drm_i915_private {
        struct drm_device *dev;
        struct kmem_cache *slab;
@@ -1270,6 +1383,10 @@ typedef struct drm_i915_private {
        struct drm_crtc *pipe_to_crtc_mapping[3];
        wait_queue_head_t pending_flip_queue;
 
+#ifdef CONFIG_DEBUG_FS
+       struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
+#endif
+
        int num_shared_dpll;
        struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
        struct intel_ddi_plls ddi_plls;
@@ -1298,14 +1415,16 @@ typedef struct drm_i915_private {
        /* Haswell power well */
        struct i915_power_well power_well;
 
-       enum no_psr_reason no_psr_reason;
+       struct i915_psr psr;
 
        struct i915_gpu_error gpu_error;
 
        struct drm_i915_gem_object *vlv_pctx;
 
+#ifdef CONFIG_DRM_I915_FBDEV
        /* list of fbdev register on this device */
        struct intel_fbdev *fbdev;
+#endif
 
        /*
         * The console may be contended at resume, but we don't
@@ -1318,6 +1437,7 @@ typedef struct drm_i915_private {
 
        bool hw_contexts_disabled;
        uint32_t hw_context_size;
+       struct list_head context_list;
 
        u32 fdi_rx_config;
 
@@ -1335,6 +1455,9 @@ typedef struct drm_i915_private {
                uint16_t spr_latency[5];
                /* cursor */
                uint16_t cur_latency[5];
+
+               /* current hardware state */
+               struct hsw_wm_values hw;
        } wm;
 
        struct i915_package_c8 pc8;
@@ -1398,8 +1521,6 @@ struct drm_i915_gem_object {
        struct list_head ring_list;
        /** Used in execbuf to temporarily hold a ref */
        struct list_head obj_exec_link;
-       /** This object's place in the batchbuffer or on the eviction list */
-       struct list_head exec_list;
 
        /**
         * This is set if the object is on the active lists (has pending
@@ -1485,13 +1606,6 @@ struct drm_i915_gem_object {
        void *dma_buf_vmapping;
        int vmapping_count;
 
-       /**
-        * Used for performing relocations during execbuffer insertion.
-        */
-       struct hlist_node exec_node;
-       unsigned long exec_handle;
-       struct drm_i915_gem_exec_object2 *exec_entry;
-
        struct intel_ring_buffer *ring;
 
        /** Breadcrumb of last rendering to the buffer. */
@@ -1503,11 +1617,14 @@ struct drm_i915_gem_object {
        /** Current tiling stride for the object, if it's tiled. */
        uint32_t stride;
 
+       /** References from framebuffers, locks out tiling changes. */
+       unsigned long framebuffer_references;
+
        /** Record of address bit 17 of each page at last unbind. */
        unsigned long *bit_17;
 
        /** User space pin count and filp owning the pin */
-       uint32_t user_pin_count;
+       unsigned long user_pin_count;
        struct drm_file *pin_filp;
 
        /** for phy allocated objects */
@@ -1558,48 +1675,55 @@ struct drm_i915_gem_request {
 };
 
 struct drm_i915_file_private {
+       struct drm_i915_private *dev_priv;
+
        struct {
                spinlock_t lock;
                struct list_head request_list;
+               struct delayed_work idle_work;
        } mm;
        struct idr context_idr;
 
        struct i915_ctx_hang_stats hang_stats;
+       atomic_t rps_wait_boost;
 };
 
 #define INTEL_INFO(dev)        (to_i915(dev)->info)
 
-#define IS_I830(dev)           ((dev)->pci_device == 0x3577)
-#define IS_845G(dev)           ((dev)->pci_device == 0x2562)
+#define IS_I830(dev)           ((dev)->pdev->device == 0x3577)
+#define IS_845G(dev)           ((dev)->pdev->device == 0x2562)
 #define IS_I85X(dev)           (INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev)          ((dev)->pci_device == 0x2572)
+#define IS_I865G(dev)          ((dev)->pdev->device == 0x2572)
 #define IS_I915G(dev)          (INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev)         ((dev)->pci_device == 0x2592)
-#define IS_I945G(dev)          ((dev)->pci_device == 0x2772)
+#define IS_I915GM(dev)         ((dev)->pdev->device == 0x2592)
+#define IS_I945G(dev)          ((dev)->pdev->device == 0x2772)
 #define IS_I945GM(dev)         (INTEL_INFO(dev)->is_i945gm)
 #define IS_BROADWATER(dev)     (INTEL_INFO(dev)->is_broadwater)
 #define IS_CRESTLINE(dev)      (INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev)           ((dev)->pci_device == 0x2A42)
+#define IS_GM45(dev)           ((dev)->pdev->device == 0x2A42)
 #define IS_G4X(dev)            (INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev)     ((dev)->pci_device == 0xa001)
-#define IS_PINEVIEW_M(dev)     ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW_G(dev)     ((dev)->pdev->device == 0xa001)
+#define IS_PINEVIEW_M(dev)     ((dev)->pdev->device == 0xa011)
 #define IS_PINEVIEW(dev)       (INTEL_INFO(dev)->is_pineview)
 #define IS_G33(dev)            (INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_M(dev)     ((dev)->pci_device == 0x0046)
+#define IS_IRONLAKE_M(dev)     ((dev)->pdev->device == 0x0046)
 #define IS_IVYBRIDGE(dev)      (INTEL_INFO(dev)->is_ivybridge)
-#define IS_IVB_GT1(dev)                ((dev)->pci_device == 0x0156 || \
-                                (dev)->pci_device == 0x0152 || \
-                                (dev)->pci_device == 0x015a)
-#define IS_SNB_GT1(dev)                ((dev)->pci_device == 0x0102 || \
-                                (dev)->pci_device == 0x0106 || \
-                                (dev)->pci_device == 0x010A)
+#define IS_IVB_GT1(dev)                ((dev)->pdev->device == 0x0156 || \
+                                (dev)->pdev->device == 0x0152 || \
+                                (dev)->pdev->device == 0x015a)
+#define IS_SNB_GT1(dev)                ((dev)->pdev->device == 0x0102 || \
+                                (dev)->pdev->device == 0x0106 || \
+                                (dev)->pdev->device == 0x010A)
 #define IS_VALLEYVIEW(dev)     (INTEL_INFO(dev)->is_valleyview)
 #define IS_HASWELL(dev)        (INTEL_INFO(dev)->is_haswell)
 #define IS_MOBILE(dev)         (INTEL_INFO(dev)->is_mobile)
 #define IS_HSW_EARLY_SDV(dev)  (IS_HASWELL(dev) && \
-                                ((dev)->pci_device & 0xFF00) == 0x0C00)
+                                ((dev)->pdev->device & 0xFF00) == 0x0C00)
 #define IS_ULT(dev)            (IS_HASWELL(dev) && \
-                                ((dev)->pci_device & 0xFF00) == 0x0A00)
+                                ((dev)->pdev->device & 0xFF00) == 0x0A00)
+#define IS_HSW_GT3(dev)                (IS_HASWELL(dev) && \
+                                ((dev)->pdev->device & 0x00F0) == 0x0020)
+#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 
 /*
  * The genX designation typically refers to the render engine, so render
@@ -1614,9 +1738,13 @@ struct drm_i915_file_private {
 #define IS_GEN6(dev)   (INTEL_INFO(dev)->gen == 6)
 #define IS_GEN7(dev)   (INTEL_INFO(dev)->gen == 7)
 
-#define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
-#define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
-#define HAS_VEBOX(dev)          (INTEL_INFO(dev)->has_vebox_ring)
+#define RENDER_RING            (1<<RCS)
+#define BSD_RING               (1<<VCS)
+#define BLT_RING               (1<<BCS)
+#define VEBOX_RING             (1<<VECS)
+#define HAS_BSD(dev)            (INTEL_INFO(dev)->ring_mask & BSD_RING)
+#define HAS_BLT(dev)            (INTEL_INFO(dev)->ring_mask & BLT_RING)
+#define HAS_VEBOX(dev)            (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
 #define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc)
 #define HAS_WT(dev)            (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
@@ -1638,7 +1766,6 @@ struct drm_i915_file_private {
 #define SUPPORTS_DIGITAL_OUTPUTS(dev)  (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
 #define SUPPORTS_INTEGRATED_HDMI(dev)  (IS_G4X(dev) || IS_GEN5(dev))
 #define SUPPORTS_INTEGRATED_DP(dev)    (IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_EDP(dev)              (IS_IRONLAKE_M(dev))
 #define SUPPORTS_TV(dev)               (INTEL_INFO(dev)->supports_tv)
 #define I915_HAS_HOTPLUG(dev)           (INTEL_INFO(dev)->has_hotplug)
 
@@ -1651,6 +1778,7 @@ struct drm_i915_file_private {
 #define HAS_DDI(dev)           (INTEL_INFO(dev)->has_ddi)
 #define HAS_POWER_WELL(dev)    (IS_HASWELL(dev))
 #define HAS_FPGA_DBG_UNCLAIMED(dev)    (INTEL_INFO(dev)->has_fpga_dbg)
+#define HAS_PSR(dev)           (IS_HASWELL(dev))
 
 #define INTEL_PCH_DEVICE_ID_MASK               0xff00
 #define INTEL_PCH_IBX_DEVICE_ID_TYPE           0x3b00
@@ -1666,35 +1794,14 @@ struct drm_i915_file_private {
 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
 
-#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
-
-#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+/* DPF == dynamic parity feature */
+#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
 
 #define GT_FREQUENCY_MULTIPLIER 50
 
 #include "i915_trace.h"
 
-/**
- * RC6 is a special power stage which allows the GPU to enter an very
- * low-voltage mode when idle, using down to 0V while at this stage.  This
- * stage is entered automatically when the GPU is idle when RC6 support is
- * enabled, and as soon as new workload arises GPU wakes up automatically as well.
- *
- * There are different RC6 modes available in Intel GPU, which differentiate
- * among each other with the latency required to enter and leave RC6 and
- * voltage consumed by the GPU in different states.
- *
- * The combination of the following flags define which states GPU is allowed
- * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
- * RC6pp is deepest RC6. Their support by hardware varies according to the
- * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
- * which brings the most power savings; deeper states save more power, but
- * require higher latency to switch to and wake up.
- */
-#define INTEL_RC6_ENABLE                       (1<<0)
-#define INTEL_RC6p_ENABLE                      (1<<1)
-#define INTEL_RC6pp_ENABLE                     (1<<2)
-
 extern const struct drm_ioctl_desc i915_ioctls[];
 extern int i915_max_ioctl;
 extern unsigned int i915_fbpercrtc __always_unused;
@@ -1765,6 +1872,7 @@ extern void intel_uncore_early_sanitize(struct drm_device *dev);
 extern void intel_uncore_init(struct drm_device *dev);
 extern void intel_uncore_clear_errors(struct drm_device *dev);
 extern void intel_uncore_check_errors(struct drm_device *dev);
+extern void intel_uncore_fini(struct drm_device *dev);
 
 void
 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1822,14 +1930,11 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
 void i915_gem_load(struct drm_device *dev);
 void *i915_gem_object_alloc(struct drm_device *dev);
 void i915_gem_object_free(struct drm_i915_gem_object *obj);
-int i915_gem_init_object(struct drm_gem_object *obj);
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
                         const struct drm_i915_gem_object_ops *ops);
 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
                                                  size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
-struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
-                                    struct i915_address_space *vm);
 void i915_gem_vma_destroy(struct i915_vma *vma);
 
 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
@@ -1868,9 +1973,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
                         struct intel_ring_buffer *to);
-void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
-                                   struct intel_ring_buffer *ring);
-
+void i915_vma_move_to_active(struct i915_vma *vma,
+                            struct intel_ring_buffer *ring);
 int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_device *dev,
                         struct drm_mode_create_dumb *args);
@@ -1911,7 +2015,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
        }
 }
 
-void i915_gem_retire_requests(struct drm_device *dev);
+bool i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
                                      bool interruptible);
@@ -1931,11 +2035,11 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_init(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
-void i915_gem_l3_remap(struct drm_device *dev);
+int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
-int __must_check i915_gem_idle(struct drm_device *dev);
+int __must_check i915_gem_suspend(struct drm_device *dev);
 int __i915_add_request(struct intel_ring_buffer *ring,
                       struct drm_file *file,
                       struct drm_i915_gem_object *batch_obj,
@@ -1962,6 +2066,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
 void i915_gem_detach_phys_object(struct drm_device *dev,
                                 struct drm_i915_gem_object *obj);
 void i915_gem_free_all_phys_object(struct drm_device *dev);
+int i915_gem_open(struct drm_device *dev, struct drm_file *file);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
 uint32_t
@@ -1993,6 +2098,9 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
 struct i915_vma *
 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
                                  struct i915_address_space *vm);
+
+struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
+
 /* Some GGTT VM helpers */
 #define obj_to_ggtt(obj) \
        (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
@@ -2029,7 +2137,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
        return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
                                   map_and_fenceable, nonblocking);
 }
-#undef obj_to_ggtt
 
 /* i915_gem_context.c */
 void i915_gem_context_init(struct drm_device *dev);
@@ -2065,6 +2172,8 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
                              struct drm_i915_gem_object *obj);
 
+void i915_check_and_clear_faults(struct drm_device *dev);
+void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
 int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
@@ -2090,6 +2199,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
                                          unsigned cache_level,
                                          bool mappable,
                                          bool nonblock);
+int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
 int i915_gem_evict_everything(struct drm_device *dev);
 
 /* i915_gem_stolen.c */
@@ -2129,6 +2239,11 @@ int i915_verify_lists(struct drm_device *dev);
 /* i915_debugfs.c */
 int i915_debugfs_init(struct drm_minor *minor);
 void i915_debugfs_cleanup(struct drm_minor *minor);
+#ifdef CONFIG_DEBUG_FS
+void intel_display_crc_init(struct drm_device *dev);
+#else
+static inline void intel_display_crc_init(struct drm_device *dev) {}
+#endif
 
 /* i915_gpu_error.c */
 __printf(2, 3)
@@ -2182,15 +2297,30 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
 extern void intel_i2c_reset(struct drm_device *dev);
 
 /* intel_opregion.c */
+struct intel_encoder;
 extern int intel_opregion_setup(struct drm_device *dev);
 #ifdef CONFIG_ACPI
 extern void intel_opregion_init(struct drm_device *dev);
 extern void intel_opregion_fini(struct drm_device *dev);
 extern void intel_opregion_asle_intr(struct drm_device *dev);
+extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+                                        bool enable);
+extern int intel_opregion_notify_adapter(struct drm_device *dev,
+                                        pci_power_t state);
 #else
 static inline void intel_opregion_init(struct drm_device *dev) { return; }
 static inline void intel_opregion_fini(struct drm_device *dev) { return; }
 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+static inline int
+intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
+{
+       return 0;
+}
+static inline int
+intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+{
+       return 0;
+}
 #endif
 
 /* intel_acpi.c */
@@ -2252,8 +2382,16 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
 void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
-u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg);
-void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val);
+u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
+void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
+void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
                   enum intel_sbi_destination destination);
 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
@@ -2262,37 +2400,21 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
 int vlv_gpu_freq(int ddr_freq, int val);
 int vlv_freq_opcode(int ddr_freq, int val);
 
-#define __i915_read(x) \
-       u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace);
-__i915_read(8)
-__i915_read(16)
-__i915_read(32)
-__i915_read(64)
-#undef __i915_read
-
-#define __i915_write(x) \
-       void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace);
-__i915_write(8)
-__i915_write(16)
-__i915_write(32)
-__i915_write(64)
-#undef __i915_write
-
-#define I915_READ8(reg)                i915_read8(dev_priv, (reg), true)
-#define I915_WRITE8(reg, val)  i915_write8(dev_priv, (reg), (val), true)
-
-#define I915_READ16(reg)       i915_read16(dev_priv, (reg), true)
-#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true)
-#define I915_READ16_NOTRACE(reg)       i915_read16(dev_priv, (reg), false)
-#define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false)
-
-#define I915_READ(reg)         i915_read32(dev_priv, (reg), true)
-#define I915_WRITE(reg, val)   i915_write32(dev_priv, (reg), (val), true)
-#define I915_READ_NOTRACE(reg)         i915_read32(dev_priv, (reg), false)
-#define I915_WRITE_NOTRACE(reg, val)   i915_write32(dev_priv, (reg), (val), false)
-
-#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true)
-#define I915_READ64(reg)       i915_read64(dev_priv, (reg), true)
+#define I915_READ8(reg)                dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
+#define I915_WRITE8(reg, val)  dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
+
+#define I915_READ16(reg)       dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
+#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
+#define I915_READ16_NOTRACE(reg)       dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
+#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
+
+#define I915_READ(reg)         dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
+#define I915_WRITE(reg, val)   dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
+#define I915_READ_NOTRACE(reg)         dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
+#define I915_WRITE_NOTRACE(reg, val)   dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
+
+#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
+#define I915_READ64(reg)       dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
 
 #define POSTING_READ(reg)      (void)I915_READ_NOTRACE(reg)
 #define POSTING_READ16(reg)    (void)I915_READ16_NOTRACE(reg)
index cdfb9da0e4ce944529a329ce29f92d391e19973a..e7b39d731db6158a085bd7e786c94a13f239af9a 100644 (file)
@@ -41,6 +41,9 @@ static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *o
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
                                                   bool force);
 static __must_check int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+                              bool readonly);
+static __must_check int
 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
                           struct i915_address_space *vm,
                           unsigned alignment,
@@ -61,8 +64,8 @@ static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
                                             struct shrink_control *sc);
 static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
                                            struct shrink_control *sc);
-static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
-static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
+static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
+static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 
 static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -258,7 +261,7 @@ i915_gem_dumb_create(struct drm_file *file,
                     struct drm_mode_create_dumb *args)
 {
        /* have to work out size/pitch and return them */
-       args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
+       args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
        args->size = args->pitch * args->height;
        return i915_gem_create(file, dev,
                               args->size, &args->handle);
@@ -432,11 +435,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
                 * optimizes for the case when the gpu will dirty the data
                 * anyway again before the next pread happens. */
                needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
-               if (i915_gem_obj_bound_any(obj)) {
-                       ret = i915_gem_object_set_to_gtt_domain(obj, false);
-                       if (ret)
-                               return ret;
-               }
+               ret = i915_gem_object_wait_rendering(obj, true);
+               if (ret)
+                       return ret;
        }
 
        ret = i915_gem_object_get_pages(obj);
@@ -748,11 +749,9 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
                 * optimizes for the case when the gpu will use the data
                 * right away and we therefore have to clflush anyway. */
                needs_clflush_after = cpu_write_needs_clflush(obj);
-               if (i915_gem_obj_bound_any(obj)) {
-                       ret = i915_gem_object_set_to_gtt_domain(obj, true);
-                       if (ret)
-                               return ret;
-               }
+               ret = i915_gem_object_wait_rendering(obj, false);
+               if (ret)
+                       return ret;
        }
        /* Same trick applies to invalidate partially written cachelines read
         * before writing. */
@@ -966,12 +965,31 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
        BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
 
        ret = 0;
-       if (seqno == ring->outstanding_lazy_request)
+       if (seqno == ring->outstanding_lazy_seqno)
                ret = i915_add_request(ring, NULL);
 
        return ret;
 }
 
+static void fake_irq(unsigned long data)
+{
+       wake_up_process((struct task_struct *)data);
+}
+
+static bool missed_irq(struct drm_i915_private *dev_priv,
+                      struct intel_ring_buffer *ring)
+{
+       return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
+}
+
+static bool can_wait_boost(struct drm_i915_file_private *file_priv)
+{
+       if (file_priv == NULL)
+               return true;
+
+       return !atomic_xchg(&file_priv->rps_wait_boost, true);
+}
+
 /**
  * __wait_seqno - wait until execution of seqno has finished
  * @ring: the ring expected to report seqno
@@ -992,13 +1010,14 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
  */
 static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
                        unsigned reset_counter,
-                       bool interruptible, struct timespec *timeout)
+                       bool interruptible,
+                       struct timespec *timeout,
+                       struct drm_i915_file_private *file_priv)
 {
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
-       struct timespec before, now, wait_time={1,0};
-       unsigned long timeout_jiffies;
-       long end;
-       bool wait_forever = true;
+       struct timespec before, now;
+       DEFINE_WAIT(wait);
+       long timeout_jiffies;
        int ret;
 
        WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
@@ -1006,51 +1025,79 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
        if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
                return 0;
 
-       trace_i915_gem_request_wait_begin(ring, seqno);
+       timeout_jiffies = timeout ? timespec_to_jiffies_timeout(timeout) : 1;
 
-       if (timeout != NULL) {
-               wait_time = *timeout;
-               wait_forever = false;
+       if (dev_priv->info->gen >= 6 && can_wait_boost(file_priv)) {
+               gen6_rps_boost(dev_priv);
+               if (file_priv)
+                       mod_delayed_work(dev_priv->wq,
+                                        &file_priv->mm.idle_work,
+                                        msecs_to_jiffies(100));
        }
 
-       timeout_jiffies = timespec_to_jiffies_timeout(&wait_time);
-
-       if (WARN_ON(!ring->irq_get(ring)))
+       if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)) &&
+           WARN_ON(!ring->irq_get(ring)))
                return -ENODEV;
 
-       /* Record current time in case interrupted by signal, or wedged * */
+       /* Record current time in case interrupted by signal, or wedged */
+       trace_i915_gem_request_wait_begin(ring, seqno);
        getrawmonotonic(&before);
+       for (;;) {
+               struct timer_list timer;
+               unsigned long expire;
 
-#define EXIT_COND \
-       (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
-        i915_reset_in_progress(&dev_priv->gpu_error) || \
-        reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
-       do {
-               if (interruptible)
-                       end = wait_event_interruptible_timeout(ring->irq_queue,
-                                                              EXIT_COND,
-                                                              timeout_jiffies);
-               else
-                       end = wait_event_timeout(ring->irq_queue, EXIT_COND,
-                                                timeout_jiffies);
+               prepare_to_wait(&ring->irq_queue, &wait,
+                               interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
 
                /* We need to check whether any gpu reset happened in between
                 * the caller grabbing the seqno and now ... */
-               if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
-                       end = -EAGAIN;
+               if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
+                       /* ... but upgrade the -EAGAIN to an -EIO if the gpu
+                        * is truely gone. */
+                       ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
+                       if (ret == 0)
+                               ret = -EAGAIN;
+                       break;
+               }
 
-               /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
-                * gone. */
-               ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
-               if (ret)
-                       end = ret;
-       } while (end == 0 && wait_forever);
+               if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
+                       ret = 0;
+                       break;
+               }
 
+               if (interruptible && signal_pending(current)) {
+                       ret = -ERESTARTSYS;
+                       break;
+               }
+
+               if (timeout_jiffies <= 0) {
+                       ret = -ETIME;
+                       break;
+               }
+
+               timer.function = NULL;
+               if (timeout || missed_irq(dev_priv, ring)) {
+                       setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
+                       expire = jiffies + (missed_irq(dev_priv, ring) ? 1: timeout_jiffies);
+                       mod_timer(&timer, expire);
+               }
+
+               io_schedule();
+
+               if (timeout)
+                       timeout_jiffies = expire - jiffies;
+
+               if (timer.function) {
+                       del_singleshot_timer_sync(&timer);
+                       destroy_timer_on_stack(&timer);
+               }
+       }
        getrawmonotonic(&now);
+       trace_i915_gem_request_wait_end(ring, seqno);
 
        ring->irq_put(ring);
-       trace_i915_gem_request_wait_end(ring, seqno);
-#undef EXIT_COND
+
+       finish_wait(&ring->irq_queue, &wait);
 
        if (timeout) {
                struct timespec sleep_time = timespec_sub(now, before);
@@ -1059,17 +1106,7 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
                        set_normalized_timespec(timeout, 0, 0);
        }
 
-       switch (end) {
-       case -EIO:
-       case -EAGAIN: /* Wedged */
-       case -ERESTARTSYS: /* Signal */
-               return (int)end;
-       case 0: /* Timeout */
-               return -ETIME;
-       default: /* Completed */
-               WARN_ON(end < 0); /* We're not aware of other errors */
-               return 0;
-       }
+       return ret;
 }
 
 /**
@@ -1097,7 +1134,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
 
        return __wait_seqno(ring, seqno,
                            atomic_read(&dev_priv->gpu_error.reset_counter),
-                           interruptible, NULL);
+                           interruptible, NULL, NULL);
 }
 
 static int
@@ -1147,6 +1184,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
  */
 static __must_check int
 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
+                                           struct drm_file *file,
                                            bool readonly)
 {
        struct drm_device *dev = obj->base.dev;
@@ -1173,7 +1211,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
        mutex_unlock(&dev->struct_mutex);
-       ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
+       ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file->driver_priv);
        mutex_lock(&dev->struct_mutex);
        if (ret)
                return ret;
@@ -1222,7 +1260,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
         * We will repeat the flush holding the lock in the normal manner
         * to catch cases where we are gazumped.
         */
-       ret = i915_gem_object_wait_rendering__nonblocking(obj, !write_domain);
+       ret = i915_gem_object_wait_rendering__nonblocking(obj, file, !write_domain);
        if (ret)
                goto unref;
 
@@ -1690,13 +1728,13 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
        return 0;
 }
 
-static long
+static unsigned long
 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
                  bool purgeable_only)
 {
        struct list_head still_bound_list;
        struct drm_i915_gem_object *obj, *next;
-       long count = 0;
+       unsigned long count = 0;
 
        list_for_each_entry_safe(obj, next,
                                 &dev_priv->mm.unbound_list,
@@ -1762,13 +1800,13 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
        return count;
 }
 
-static long
+static unsigned long
 i915_gem_purge(struct drm_i915_private *dev_priv, long target)
 {
        return __i915_gem_shrink(dev_priv, target, true);
 }
 
-static long
+static unsigned long
 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 {
        struct drm_i915_gem_object *obj, *next;
@@ -1778,9 +1816,8 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 
        list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
                                 global_list) {
-               if (obj->pages_pin_count == 0)
+               if (i915_gem_object_put_pages(obj) == 0)
                        freed += obj->base.size >> PAGE_SHIFT;
-               i915_gem_object_put_pages(obj);
        }
        return freed;
 }
@@ -1865,6 +1902,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
                        sg->length += PAGE_SIZE;
                }
                last_pfn = page_to_pfn(page);
+
+               /* Check that the i965g/gm workaround works. */
+               WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
        }
 #ifdef CONFIG_SWIOTLB
        if (!swiotlb_nr_tbl())
@@ -1918,7 +1958,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
        return 0;
 }
 
-void
+static void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
                               struct intel_ring_buffer *ring)
 {
@@ -1957,6 +1997,13 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
        }
 }
 
+void i915_vma_move_to_active(struct i915_vma *vma,
+                            struct intel_ring_buffer *ring)
+{
+       list_move_tail(&vma->mm_list, &vma->vm->active_list);
+       return i915_gem_object_move_to_active(vma->obj, ring);
+}
+
 static void
 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
@@ -2078,11 +2125,10 @@ int __i915_add_request(struct intel_ring_buffer *ring,
        if (ret)
                return ret;
 
-       request = kmalloc(sizeof(*request), GFP_KERNEL);
-       if (request == NULL)
+       request = ring->preallocated_lazy_request;
+       if (WARN_ON(request == NULL))
                return -ENOMEM;
 
-
        /* Record the position of the start of the request so that
         * should we detect the updated seqno part-way through the
         * GPU processing the request, we never over-estimate the
@@ -2091,17 +2137,13 @@ int __i915_add_request(struct intel_ring_buffer *ring,
        request_ring_position = intel_ring_get_tail(ring);
 
        ret = ring->add_request(ring);
-       if (ret) {
-               kfree(request);
+       if (ret)
                return ret;
-       }
 
        request->seqno = intel_ring_get_seqno(ring);
        request->ring = ring;
        request->head = request_start;
        request->tail = request_ring_position;
-       request->ctx = ring->last_context;
-       request->batch_obj = obj;
 
        /* Whilst this request exists, batch_obj will be on the
         * active_list, and so will hold the active reference. Only when this
@@ -2109,7 +2151,12 @@ int __i915_add_request(struct intel_ring_buffer *ring,
         * inactive_list and lose its active reference. Hence we do not need
         * to explicitly hold another reference here.
         */
+       request->batch_obj = obj;
 
+       /* Hold a reference to the current context so that we can inspect
+        * it later in case a hangcheck error event fires.
+        */
+       request->ctx = ring->last_context;
        if (request->ctx)
                i915_gem_context_reference(request->ctx);
 
@@ -2129,12 +2176,14 @@ int __i915_add_request(struct intel_ring_buffer *ring,
        }
 
        trace_i915_gem_request_add(ring, request->seqno);
-       ring->outstanding_lazy_request = 0;
+       ring->outstanding_lazy_seqno = 0;
+       ring->preallocated_lazy_request = NULL;
 
        if (!dev_priv->ums.mm_suspended) {
                i915_queue_hangcheck(ring->dev);
 
                if (was_empty) {
+                       cancel_delayed_work_sync(&dev_priv->mm.idle_work);
                        queue_delayed_work(dev_priv->wq,
                                           &dev_priv->mm.retire_work,
                                           round_jiffies_up_relative(HZ));
@@ -2156,10 +2205,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
                return;
 
        spin_lock(&file_priv->mm.lock);
-       if (request->file_priv) {
-               list_del(&request->client_list);
-               request->file_priv = NULL;
-       }
+       list_del(&request->client_list);
+       request->file_priv = NULL;
        spin_unlock(&file_priv->mm.lock);
 }
 
@@ -2224,6 +2271,21 @@ static bool i915_request_guilty(struct drm_i915_gem_request *request,
        return false;
 }
 
+static bool i915_context_is_banned(const struct i915_ctx_hang_stats *hs)
+{
+       const unsigned long elapsed = get_seconds() - hs->guilty_ts;
+
+       if (hs->banned)
+               return true;
+
+       if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
+               DRM_ERROR("context hanging too fast, declaring banned!\n");
+               return true;
+       }
+
+       return false;
+}
+
 static void i915_set_reset_status(struct intel_ring_buffer *ring,
                                  struct drm_i915_gem_request *request,
                                  u32 acthd)
@@ -2260,10 +2322,13 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
                hs = &request->file_priv->hang_stats;
 
        if (hs) {
-               if (guilty)
+               if (guilty) {
+                       hs->banned = i915_context_is_banned(hs);
                        hs->batch_active++;
-               else
+                       hs->guilty_ts = get_seconds();
+               } else {
                        hs->batch_pending++;
+               }
        }
 }
 
@@ -2341,6 +2406,8 @@ void i915_gem_reset(struct drm_device *dev)
        for_each_ring(ring, dev_priv, i)
                i915_gem_reset_ring_lists(dev_priv, ring);
 
+       i915_gem_cleanup_ringbuffer(dev);
+
        i915_gem_restore_fences(dev);
 }
 
@@ -2405,57 +2472,53 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
        WARN_ON(i915_verify_lists(ring->dev));
 }
 
-void
+bool
 i915_gem_retire_requests(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring;
+       bool idle = true;
        int i;
 
-       for_each_ring(ring, dev_priv, i)
+       for_each_ring(ring, dev_priv, i) {
                i915_gem_retire_requests_ring(ring);
+               idle &= list_empty(&ring->request_list);
+       }
+
+       if (idle)
+               mod_delayed_work(dev_priv->wq,
+                                  &dev_priv->mm.idle_work,
+                                  msecs_to_jiffies(100));
+
+       return idle;
 }
 
 static void
 i915_gem_retire_work_handler(struct work_struct *work)
 {
-       drm_i915_private_t *dev_priv;
-       struct drm_device *dev;
-       struct intel_ring_buffer *ring;
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv), mm.retire_work.work);
+       struct drm_device *dev = dev_priv->dev;
        bool idle;
-       int i;
-
-       dev_priv = container_of(work, drm_i915_private_t,
-                               mm.retire_work.work);
-       dev = dev_priv->dev;
 
        /* Come back later if the device is busy... */
-       if (!mutex_trylock(&dev->struct_mutex)) {
-               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
-                                  round_jiffies_up_relative(HZ));
-               return;
-       }
-
-       i915_gem_retire_requests(dev);
-
-       /* Send a periodic flush down the ring so we don't hold onto GEM
-        * objects indefinitely.
-        */
-       idle = true;
-       for_each_ring(ring, dev_priv, i) {
-               if (ring->gpu_caches_dirty)
-                       i915_add_request(ring, NULL);
-
-               idle &= list_empty(&ring->request_list);
+       idle = false;
+       if (mutex_trylock(&dev->struct_mutex)) {
+               idle = i915_gem_retire_requests(dev);
+               mutex_unlock(&dev->struct_mutex);
        }
-
-       if (!dev_priv->ums.mm_suspended && !idle)
+       if (!idle)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
                                   round_jiffies_up_relative(HZ));
-       if (idle)
-               intel_mark_idle(dev);
+}
 
-       mutex_unlock(&dev->struct_mutex);
+static void
+i915_gem_idle_work_handler(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv), mm.idle_work.work);
+
+       intel_mark_idle(dev_priv->dev);
 }
 
 /**
@@ -2553,7 +2616,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
        mutex_unlock(&dev->struct_mutex);
 
-       ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
+       ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
        if (timeout)
                args->timeout_ns = timespec_to_ns(timeout);
        return ret;
@@ -2600,6 +2663,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
+       trace_i915_gem_ring_sync_to(from, to, seqno);
        ret = to->sync_to(to, from, seqno);
        if (!ret)
                /* We use last_read_seqno because sync_to()
@@ -2641,11 +2705,17 @@ int i915_vma_unbind(struct i915_vma *vma)
        drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
        int ret;
 
+       /* For now we only ever use 1 vma per object */
+       WARN_ON(!list_is_singular(&obj->vma_list));
+
        if (list_empty(&vma->vma_link))
                return 0;
 
-       if (!drm_mm_node_allocated(&vma->node))
-               goto destroy;
+       if (!drm_mm_node_allocated(&vma->node)) {
+               i915_gem_vma_destroy(vma);
+
+               return 0;
+       }
 
        if (obj->pin_count)
                return -EBUSY;
@@ -2685,13 +2755,10 @@ int i915_vma_unbind(struct i915_vma *vma)
 
        drm_mm_remove_node(&vma->node);
 
-destroy:
        i915_gem_vma_destroy(vma);
 
        /* Since the unbound list is global, only move to that list if
-        * no more VMAs exist.
-        * NB: Until we have real VMAs there will only ever be one */
-       WARN_ON(!list_empty(&obj->vma_list));
+        * no more VMAs exist. */
        if (list_empty(&obj->vma_list))
                list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
 
@@ -3389,8 +3456,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 
        /* And bump the LRU for this access */
        if (i915_gem_object_is_inactive(obj)) {
-               struct i915_vma *vma = i915_gem_obj_to_vma(obj,
-                                                          &dev_priv->gtt.base);
+               struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
                if (vma)
                        list_move_tail(&vma->mm_list,
                                       &dev_priv->gtt.base.inactive_list);
@@ -3761,7 +3827,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
        if (seqno == 0)
                return 0;
 
-       ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
+       ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
        if (ret == 0)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
@@ -3865,6 +3931,11 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
                goto out;
        }
 
+       if (obj->user_pin_count == ULONG_MAX) {
+               ret = -EBUSY;
+               goto out;
+       }
+
        if (obj->user_pin_count == 0) {
                ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
                if (ret)
@@ -4015,7 +4086,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 {
        INIT_LIST_HEAD(&obj->global_list);
        INIT_LIST_HEAD(&obj->ring_list);
-       INIT_LIST_HEAD(&obj->exec_list);
        INIT_LIST_HEAD(&obj->obj_exec_link);
        INIT_LIST_HEAD(&obj->vma_list);
 
@@ -4087,13 +4157,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
        return obj;
 }
 
-int i915_gem_init_object(struct drm_gem_object *obj)
-{
-       BUG();
-
-       return 0;
-}
-
 void i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
@@ -4147,8 +4210,19 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        i915_gem_object_free(obj);
 }
 
-struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
                                     struct i915_address_space *vm)
+{
+       struct i915_vma *vma;
+       list_for_each_entry(vma, &obj->vma_list, vma_link)
+               if (vma->vm == vm)
+                       return vma;
+
+       return NULL;
+}
+
+static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
+                                             struct i915_address_space *vm)
 {
        struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
        if (vma == NULL)
@@ -4169,76 +4243,103 @@ struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
        return vma;
 }
 
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+                                 struct i915_address_space *vm)
+{
+       struct i915_vma *vma;
+
+       vma = i915_gem_obj_to_vma(obj, vm);
+       if (!vma)
+               vma = __i915_gem_vma_create(obj, vm);
+
+       return vma;
+}
+
 void i915_gem_vma_destroy(struct i915_vma *vma)
 {
        WARN_ON(vma->node.allocated);
+
+       /* Keep the vma as a placeholder in the execbuffer reservation lists */
+       if (!list_empty(&vma->exec_list))
+               return;
+
        list_del(&vma->vma_link);
+
        kfree(vma);
 }
 
 int
-i915_gem_idle(struct drm_device *dev)
+i915_gem_suspend(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       int ret;
+       int ret = 0;
 
-       if (dev_priv->ums.mm_suspended) {
-               mutex_unlock(&dev->struct_mutex);
-               return 0;
-       }
+       mutex_lock(&dev->struct_mutex);
+       if (dev_priv->ums.mm_suspended)
+               goto err;
 
        ret = i915_gpu_idle(dev);
-       if (ret) {
-               mutex_unlock(&dev->struct_mutex);
-               return ret;
-       }
+       if (ret)
+               goto err;
+
        i915_gem_retire_requests(dev);
 
        /* Under UMS, be paranoid and evict. */
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                i915_gem_evict_everything(dev);
 
-       del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
-
        i915_kernel_lost_context(dev);
        i915_gem_cleanup_ringbuffer(dev);
 
-       /* Cancel the retire work handler, which should be idle now. */
+       /* Hack!  Don't let anybody do execbuf while we don't control the chip.
+        * We need to replace this with a semaphore, or something.
+        * And not confound ums.mm_suspended!
+        */
+       dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
+                                                            DRIVER_MODESET);
+       mutex_unlock(&dev->struct_mutex);
+
+       del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
        cancel_delayed_work_sync(&dev_priv->mm.retire_work);
+       cancel_delayed_work_sync(&dev_priv->mm.idle_work);
 
        return 0;
+
+err:
+       mutex_unlock(&dev->struct_mutex);
+       return ret;
 }
 
-void i915_gem_l3_remap(struct drm_device *dev)
+int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice)
 {
+       struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       u32 misccpctl;
-       int i;
-
-       if (!HAS_L3_GPU_CACHE(dev))
-               return;
+       u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
+       u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
+       int i, ret;
 
-       if (!dev_priv->l3_parity.remap_info)
-               return;
+       if (!HAS_L3_DPF(dev) || !remap_info)
+               return 0;
 
-       misccpctl = I915_READ(GEN7_MISCCPCTL);
-       I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
-       POSTING_READ(GEN7_MISCCPCTL);
+       ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
+       if (ret)
+               return ret;
 
+       /*
+        * Note: We do not worry about the concurrent register cacheline hang
+        * here because no other code should access these registers other than
+        * at initialization time.
+        */
        for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
-               u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
-               if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
-                       DRM_DEBUG("0x%x was already programmed to %x\n",
-                                 GEN7_L3LOG_BASE + i, remap);
-               if (remap && !dev_priv->l3_parity.remap_info[i/4])
-                       DRM_DEBUG_DRIVER("Clearing remapped register\n");
-               I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
+               intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+               intel_ring_emit(ring, reg_base + i);
+               intel_ring_emit(ring, remap_info[i/4]);
        }
 
-       /* Make sure all the writes land before disabling dop clock gating */
-       POSTING_READ(GEN7_L3LOG_BASE);
+       intel_ring_advance(ring);
 
-       I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+       return ret;
 }
 
 void i915_gem_init_swizzling(struct drm_device *dev)
@@ -4330,7 +4431,7 @@ int
 i915_gem_init_hw(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       int ret;
+       int ret, i;
 
        if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
                return -EIO;
@@ -4338,20 +4439,26 @@ i915_gem_init_hw(struct drm_device *dev)
        if (dev_priv->ellc_size)
                I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
 
+       if (IS_HSW_GT3(dev))
+               I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_ENABLED);
+       else
+               I915_WRITE(MI_PREDICATE_RESULT_2, LOWER_SLICE_DISABLED);
+
        if (HAS_PCH_NOP(dev)) {
                u32 temp = I915_READ(GEN7_MSG_CTL);
                temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
                I915_WRITE(GEN7_MSG_CTL, temp);
        }
 
-       i915_gem_l3_remap(dev);
-
        i915_gem_init_swizzling(dev);
 
        ret = i915_gem_init_rings(dev);
        if (ret)
                return ret;
 
+       for (i = 0; i < NUM_L3_SLICES(dev); i++)
+               i915_gem_l3_remap(&dev_priv->ring[RCS], i);
+
        /*
         * XXX: There was some w/a described somewhere suggesting loading
         * contexts before PPGTT.
@@ -4454,26 +4561,12 @@ int
 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
-
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return 0;
 
        drm_irq_uninstall(dev);
 
-       mutex_lock(&dev->struct_mutex);
-       ret =  i915_gem_idle(dev);
-
-       /* Hack!  Don't let anybody do execbuf while we don't control the chip.
-        * We need to replace this with a semaphore, or something.
-        * And not confound ums.mm_suspended!
-        */
-       if (ret != 0)
-               dev_priv->ums.mm_suspended = 1;
-       mutex_unlock(&dev->struct_mutex);
-
-       return ret;
+       return i915_gem_suspend(dev);
 }
 
 void
@@ -4484,11 +4577,9 @@ i915_gem_lastclose(struct drm_device *dev)
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return;
 
-       mutex_lock(&dev->struct_mutex);
-       ret = i915_gem_idle(dev);
+       ret = i915_gem_suspend(dev);
        if (ret)
                DRM_ERROR("failed to idle hardware: %d\n", ret);
-       mutex_unlock(&dev->struct_mutex);
 }
 
 static void
@@ -4523,6 +4614,7 @@ i915_gem_load(struct drm_device *dev)
        INIT_LIST_HEAD(&dev_priv->vm_list);
        i915_init_vm(dev_priv, &dev_priv->gtt.base);
 
+       INIT_LIST_HEAD(&dev_priv->context_list);
        INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
        INIT_LIST_HEAD(&dev_priv->mm.bound_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4532,6 +4624,8 @@ i915_gem_load(struct drm_device *dev)
                INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
                          i915_gem_retire_work_handler);
+       INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
+                         i915_gem_idle_work_handler);
        init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
 
        /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
@@ -4582,7 +4676,7 @@ static int i915_gem_init_phys_object(struct drm_device *dev,
        if (dev_priv->mm.phys_objs[id - 1] || !size)
                return 0;
 
-       phys_obj = kzalloc(sizeof(struct drm_i915_gem_phys_object), GFP_KERNEL);
+       phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL);
        if (!phys_obj)
                return -ENOMEM;
 
@@ -4756,6 +4850,8 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
 
+       cancel_delayed_work_sync(&file_priv->mm.idle_work);
+
        /* Clean up our request list when the client is going away, so that
         * later retire_requests won't dereference our soon-to-be-gone
         * file_priv.
@@ -4773,6 +4869,38 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
        spin_unlock(&file_priv->mm.lock);
 }
 
+static void
+i915_gem_file_idle_work_handler(struct work_struct *work)
+{
+       struct drm_i915_file_private *file_priv =
+               container_of(work, typeof(*file_priv), mm.idle_work.work);
+
+       atomic_set(&file_priv->rps_wait_boost, false);
+}
+
+int i915_gem_open(struct drm_device *dev, struct drm_file *file)
+{
+       struct drm_i915_file_private *file_priv;
+
+       DRM_DEBUG_DRIVER("\n");
+
+       file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+       if (!file_priv)
+               return -ENOMEM;
+
+       file->driver_priv = file_priv;
+       file_priv->dev_priv = dev->dev_private;
+
+       spin_lock_init(&file_priv->mm.lock);
+       INIT_LIST_HEAD(&file_priv->mm.request_list);
+       INIT_DELAYED_WORK(&file_priv->mm.idle_work,
+                         i915_gem_file_idle_work_handler);
+
+       idr_init(&file_priv->context_idr);
+
+       return 0;
+}
+
 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
 {
        if (!mutex_is_locked(mutex))
@@ -4823,6 +4951,7 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
 
        if (unlock)
                mutex_unlock(&dev->struct_mutex);
+
        return count;
 }
 
@@ -4859,11 +4988,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
 
 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
 {
-       struct drm_i915_private *dev_priv = o->base.dev->dev_private;
-       struct i915_address_space *vm;
+       struct i915_vma *vma;
 
-       list_for_each_entry(vm, &dev_priv->vm_list, global_link)
-               if (i915_gem_obj_bound(o, vm))
+       list_for_each_entry(vma, &o->vma_list, vma_link)
+               if (drm_mm_node_allocated(&vma->node))
                        return true;
 
        return false;
@@ -4895,7 +5023,6 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
                             struct drm_i915_private,
                             mm.inactive_shrinker);
        struct drm_device *dev = dev_priv->dev;
-       int nr_to_scan = sc->nr_to_scan;
        unsigned long freed;
        bool unlock = true;
 
@@ -4909,38 +5036,30 @@ i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
                unlock = false;
        }
 
-       freed = i915_gem_purge(dev_priv, nr_to_scan);
-       if (freed < nr_to_scan)
-               freed += __i915_gem_shrink(dev_priv, nr_to_scan,
-                                                       false);
-       if (freed < nr_to_scan)
+       freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
+       if (freed < sc->nr_to_scan)
+               freed += __i915_gem_shrink(dev_priv,
+                                          sc->nr_to_scan - freed,
+                                          false);
+       if (freed < sc->nr_to_scan)
                freed += i915_gem_shrink_all(dev_priv);
 
        if (unlock)
                mutex_unlock(&dev->struct_mutex);
+
        return freed;
 }
 
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
-                                    struct i915_address_space *vm)
+struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
 {
        struct i915_vma *vma;
-       list_for_each_entry(vma, &obj->vma_list, vma_link)
-               if (vma->vm == vm)
-                       return vma;
 
-       return NULL;
-}
-
-struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
-                                 struct i915_address_space *vm)
-{
-       struct i915_vma *vma;
+       if (WARN_ON(list_empty(&obj->vma_list)))
+               return NULL;
 
-       vma = i915_gem_obj_to_vma(obj, vm);
-       if (!vma)
-               vma = i915_gem_vma_create(obj, vm);
+       vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
+       if (WARN_ON(vma->vm != obj_to_ggtt(obj)))
+               return NULL;
 
        return vma;
 }
index 403309c2a7d6e984c20404c38c72074f846712f7..cc619c138777165b8e05550b4d09cf43e2281135 100644 (file)
@@ -73,7 +73,7 @@
  *
  * There are two confusing terms used above:
  *  The "current context" means the context which is currently running on the
- *  GPU. The GPU has loaded it's state already and has stored away the gtt
+ *  GPU. The GPU has loaded its state already and has stored away the gtt
  *  offset of the BO. The GPU is not actively referencing the data at this
  *  offset, but it will on the next context switch. The only way to avoid this
  *  is to do a GPU reset.
@@ -129,6 +129,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
        struct i915_hw_context *ctx = container_of(ctx_ref,
                                                   typeof(*ctx), ref);
 
+       list_del(&ctx->link);
        drm_gem_object_unreference(&ctx->obj->base);
        kfree(ctx);
 }
@@ -147,6 +148,7 @@ create_hw_context(struct drm_device *dev,
 
        kref_init(&ctx->ref);
        ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
+       INIT_LIST_HEAD(&ctx->link);
        if (ctx->obj == NULL) {
                kfree(ctx);
                DRM_DEBUG_DRIVER("Context object allocated failed\n");
@@ -166,6 +168,7 @@ create_hw_context(struct drm_device *dev,
         * assertion in the context switch code.
         */
        ctx->ring = &dev_priv->ring[RCS];
+       list_add_tail(&ctx->link, &dev_priv->context_list);
 
        /* Default context will never have a file_priv */
        if (file_priv == NULL)
@@ -178,6 +181,10 @@ create_hw_context(struct drm_device *dev,
 
        ctx->file_priv = file_priv;
        ctx->id = ret;
+       /* NB: Mark all slices as needing a remap so that when the context first
+        * loads it will restore whatever remap state already exists. If there
+        * is no remap info, it will be a NOP. */
+       ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1;
 
        return ctx;
 
@@ -213,7 +220,6 @@ static int create_default_context(struct drm_i915_private *dev_priv)
         * may not be available. To avoid this we always pin the
         * default context.
         */
-       dev_priv->ring[RCS].default_context = ctx;
        ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
        if (ret) {
                DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
@@ -226,6 +232,8 @@ static int create_default_context(struct drm_i915_private *dev_priv)
                goto err_unpin;
        }
 
+       dev_priv->ring[RCS].default_context = ctx;
+
        DRM_DEBUG_DRIVER("Default HW context loaded\n");
        return 0;
 
@@ -281,16 +289,24 @@ void i915_gem_context_fini(struct drm_device *dev)
         * other code, leading to spurious errors. */
        intel_gpu_reset(dev);
 
-       i915_gem_object_unpin(dctx->obj);
-
        /* When default context is created and switched to, base object refcount
         * will be 2 (+1 from object creation and +1 from do_switch()).
         * i915_gem_context_fini() will be called after gpu_idle() has switched
         * to default context. So we need to unreference the base object once
         * to offset the do_switch part, so that i915_gem_context_unreference()
         * can then free the base object correctly. */
-       drm_gem_object_unreference(&dctx->obj->base);
+       WARN_ON(!dev_priv->ring[RCS].last_context);
+       if (dev_priv->ring[RCS].last_context == dctx) {
+               /* Fake switch to NULL context */
+               WARN_ON(dctx->obj->active);
+               i915_gem_object_unpin(dctx->obj);
+               i915_gem_context_unreference(dctx);
+       }
+
+       i915_gem_object_unpin(dctx->obj);
        i915_gem_context_unreference(dctx);
+       dev_priv->ring[RCS].default_context = NULL;
+       dev_priv->ring[RCS].last_context = NULL;
 }
 
 static int context_idr_cleanup(int id, void *p, void *data)
@@ -393,11 +409,11 @@ static int do_switch(struct i915_hw_context *to)
        struct intel_ring_buffer *ring = to->ring;
        struct i915_hw_context *from = ring->last_context;
        u32 hw_flags = 0;
-       int ret;
+       int ret, i;
 
        BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
 
-       if (from == to)
+       if (from == to && !to->remap_slice)
                return 0;
 
        ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
@@ -420,8 +436,6 @@ static int do_switch(struct i915_hw_context *to)
 
        if (!to->is_initialized || is_default_context(to))
                hw_flags |= MI_RESTORE_INHIBIT;
-       else if (WARN_ON_ONCE(from == to)) /* not yet expected */
-               hw_flags |= MI_FORCE_RESTORE;
 
        ret = mi_set_context(ring, to, hw_flags);
        if (ret) {
@@ -429,6 +443,18 @@ static int do_switch(struct i915_hw_context *to)
                return ret;
        }
 
+       for (i = 0; i < MAX_L3_SLICES; i++) {
+               if (!(to->remap_slice & (1<<i)))
+                       continue;
+
+               ret = i915_gem_l3_remap(ring, i);
+               /* If it failed, try again next round */
+               if (ret)
+                       DRM_DEBUG_DRIVER("L3 remapping failed\n");
+               else
+                       to->remap_slice &= ~(1<<i);
+       }
+
        /* The backing object for the context is done after switching to the
         * *next* context. Therefore we cannot retire the previous context until
         * the next context has already started running. In fact, the below code
@@ -436,11 +462,8 @@ static int do_switch(struct i915_hw_context *to)
         * MI_SET_CONTEXT instead of when the next seqno has completed.
         */
        if (from != NULL) {
-               struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
-               struct i915_address_space *ggtt = &dev_priv->gtt.base;
                from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-               list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
-               i915_gem_object_move_to_active(from->obj, ring);
+               i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->obj), ring);
                /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
                 * whole damn pipeline, we don't need to explicitly mark the
                 * object dirty. The only exception is that the context must be
@@ -451,17 +474,7 @@ static int do_switch(struct i915_hw_context *to)
                from->obj->dirty = 1;
                BUG_ON(from->obj->ring != ring);
 
-               ret = i915_add_request(ring, NULL);
-               if (ret) {
-                       /* Too late, we've already scheduled a context switch.
-                        * Try to undo the change so that the hw state is
-                        * consistent with out tracking. In case of emergency,
-                        * scream.
-                        */
-                       WARN_ON(mi_set_context(ring, from, MI_RESTORE_INHIBIT));
-                       return ret;
-               }
-
+               /* obj is kept alive until the next request by its active ref */
                i915_gem_object_unpin(from->obj);
                i915_gem_context_unreference(from);
        }
index 91b70015585003540a35028d288c4c67f657f374..b7376533633d2cd74eeae8442a49588cf1af1cdc 100644 (file)
@@ -37,6 +37,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
        if (vma->obj->pin_count)
                return false;
 
+       if (WARN_ON(!list_empty(&vma->exec_list)))
+               return false;
+
        list_add(&vma->exec_list, unwind);
        return drm_mm_scan_add_block(&vma->node);
 }
@@ -113,7 +116,7 @@ none:
        }
 
        /* We expect the caller to unpin, evict all and try again, or give up.
-        * So calling i915_gem_evict_everything() is unnecessary.
+        * So calling i915_gem_evict_vm() is unnecessary.
         */
        return -ENOSPC;
 
@@ -152,12 +155,48 @@ found:
        return ret;
 }
 
+/**
+ * i915_gem_evict_vm - Try to free up VM space
+ *
+ * @vm: Address space to evict from
+ * @do_idle: Boolean directing whether to idle first.
+ *
+ * VM eviction is about freeing up virtual address space. If one wants fine
+ * grained eviction, they should see evict something for more details. In terms
+ * of freeing up actual system memory, this function may not accomplish the
+ * desired result. An object may be shared in multiple address space, and this
+ * function will not assert those objects be freed.
+ *
+ * Using do_idle will result in a more complete eviction because it retires, and
+ * inactivates current BOs.
+ */
+int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
+{
+       struct i915_vma *vma, *next;
+       int ret;
+
+       trace_i915_gem_evict_vm(vm);
+
+       if (do_idle) {
+               ret = i915_gpu_idle(vm->dev);
+               if (ret)
+                       return ret;
+
+               i915_gem_retire_requests(vm->dev);
+       }
+
+       list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
+               if (vma->obj->pin_count == 0)
+                       WARN_ON(i915_vma_unbind(vma));
+
+       return 0;
+}
+
 int
 i915_gem_evict_everything(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct i915_address_space *vm;
-       struct i915_vma *vma, *next;
        bool lists_empty = true;
        int ret;
 
@@ -184,11 +223,8 @@ i915_gem_evict_everything(struct drm_device *dev)
        i915_gem_retire_requests(dev);
 
        /* Having flushed everything, unbind() should never raise an error */
-       list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
-               list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
-                       if (vma->obj->pin_count == 0)
-                               WARN_ON(i915_vma_unbind(vma));
-       }
+       list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+               WARN_ON(i915_gem_evict_vm(vm, false));
 
        return 0;
 }
index bf345777ae9f76ec41e76c5ccf6ed3624cf4ef13..0ce0d47e4b0ffbafda92966e1916b732f5b84968 100644 (file)
 #include "intel_drv.h"
 #include <linux/dma_remapping.h>
 
-struct eb_objects {
-       struct list_head objects;
+struct eb_vmas {
+       struct list_head vmas;
        int and;
        union {
-               struct drm_i915_gem_object *lut[0];
+               struct i915_vma *lut[0];
                struct hlist_head buckets[0];
        };
 };
 
-static struct eb_objects *
-eb_create(struct drm_i915_gem_execbuffer2 *args)
+static struct eb_vmas *
+eb_create(struct drm_i915_gem_execbuffer2 *args, struct i915_address_space *vm)
 {
-       struct eb_objects *eb = NULL;
+       struct eb_vmas *eb = NULL;
 
        if (args->flags & I915_EXEC_HANDLE_LUT) {
-               int size = args->buffer_count;
-               size *= sizeof(struct drm_i915_gem_object *);
-               size += sizeof(struct eb_objects);
+               unsigned size = args->buffer_count;
+               size *= sizeof(struct i915_vma *);
+               size += sizeof(struct eb_vmas);
                eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
        }
 
        if (eb == NULL) {
-               int size = args->buffer_count;
-               int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+               unsigned size = args->buffer_count;
+               unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
                BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
                while (count > 2*size)
                        count >>= 1;
                eb = kzalloc(count*sizeof(struct hlist_head) +
-                            sizeof(struct eb_objects),
+                            sizeof(struct eb_vmas),
                             GFP_TEMPORARY);
                if (eb == NULL)
                        return eb;
@@ -70,64 +70,102 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
        } else
                eb->and = -args->buffer_count;
 
-       INIT_LIST_HEAD(&eb->objects);
+       INIT_LIST_HEAD(&eb->vmas);
        return eb;
 }
 
 static void
-eb_reset(struct eb_objects *eb)
+eb_reset(struct eb_vmas *eb)
 {
        if (eb->and >= 0)
                memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
 }
 
 static int
-eb_lookup_objects(struct eb_objects *eb,
-                 struct drm_i915_gem_exec_object2 *exec,
-                 const struct drm_i915_gem_execbuffer2 *args,
-                 struct drm_file *file)
+eb_lookup_vmas(struct eb_vmas *eb,
+              struct drm_i915_gem_exec_object2 *exec,
+              const struct drm_i915_gem_execbuffer2 *args,
+              struct i915_address_space *vm,
+              struct drm_file *file)
 {
-       int i;
+       struct drm_i915_gem_object *obj;
+       struct list_head objects;
+       int i, ret = 0;
 
+       INIT_LIST_HEAD(&objects);
        spin_lock(&file->table_lock);
+       /* Grab a reference to the object and release the lock so we can lookup
+        * or create the VMA without using GFP_ATOMIC */
        for (i = 0; i < args->buffer_count; i++) {
-               struct drm_i915_gem_object *obj;
-
                obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
                if (obj == NULL) {
                        spin_unlock(&file->table_lock);
                        DRM_DEBUG("Invalid object handle %d at index %d\n",
                                   exec[i].handle, i);
-                       return -ENOENT;
+                       ret = -ENOENT;
+                       goto out;
                }
 
-               if (!list_empty(&obj->exec_list)) {
+               if (!list_empty(&obj->obj_exec_link)) {
                        spin_unlock(&file->table_lock);
                        DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
                                   obj, exec[i].handle, i);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto out;
                }
 
                drm_gem_object_reference(&obj->base);
-               list_add_tail(&obj->exec_list, &eb->objects);
+               list_add_tail(&obj->obj_exec_link, &objects);
+       }
+       spin_unlock(&file->table_lock);
+
+       i = 0;
+       list_for_each_entry(obj, &objects, obj_exec_link) {
+               struct i915_vma *vma;
 
-               obj->exec_entry = &exec[i];
+               /*
+                * NOTE: We can leak any vmas created here when something fails
+                * later on. But that's no issue since vma_unbind can deal with
+                * vmas which are not actually bound. And since only
+                * lookup_or_create exists as an interface to get at the vma
+                * from the (obj, vm) we don't run the risk of creating
+                * duplicated vmas for the same vm.
+                */
+               vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+               if (IS_ERR(vma)) {
+                       DRM_DEBUG("Failed to lookup VMA\n");
+                       ret = PTR_ERR(vma);
+                       goto out;
+               }
+
+               list_add_tail(&vma->exec_list, &eb->vmas);
+
+               vma->exec_entry = &exec[i];
                if (eb->and < 0) {
-                       eb->lut[i] = obj;
+                       eb->lut[i] = vma;
                } else {
                        uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
-                       obj->exec_handle = handle;
-                       hlist_add_head(&obj->exec_node,
+                       vma->exec_handle = handle;
+                       hlist_add_head(&vma->exec_node,
                                       &eb->buckets[handle & eb->and]);
                }
+               ++i;
        }
-       spin_unlock(&file->table_lock);
 
-       return 0;
+
+out:
+       while (!list_empty(&objects)) {
+               obj = list_first_entry(&objects,
+                                      struct drm_i915_gem_object,
+                                      obj_exec_link);
+               list_del_init(&obj->obj_exec_link);
+               if (ret)
+                       drm_gem_object_unreference(&obj->base);
+       }
+       return ret;
 }
 
-static struct drm_i915_gem_object *
-eb_get_object(struct eb_objects *eb, unsigned long handle)
+static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
 {
        if (eb->and < 0) {
                if (handle >= -eb->and)
@@ -139,34 +177,33 @@ eb_get_object(struct eb_objects *eb, unsigned long handle)
 
                head = &eb->buckets[handle & eb->and];
                hlist_for_each(node, head) {
-                       struct drm_i915_gem_object *obj;
+                       struct i915_vma *vma;
 
-                       obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
-                       if (obj->exec_handle == handle)
-                               return obj;
+                       vma = hlist_entry(node, struct i915_vma, exec_node);
+                       if (vma->exec_handle == handle)
+                               return vma;
                }
                return NULL;
        }
 }
 
-static void
-eb_destroy(struct eb_objects *eb)
-{
-       while (!list_empty(&eb->objects)) {
-               struct drm_i915_gem_object *obj;
+static void eb_destroy(struct eb_vmas *eb) {
+       while (!list_empty(&eb->vmas)) {
+               struct i915_vma *vma;
 
-               obj = list_first_entry(&eb->objects,
-                                      struct drm_i915_gem_object,
+               vma = list_first_entry(&eb->vmas,
+                                      struct i915_vma,
                                       exec_list);
-               list_del_init(&obj->exec_list);
-               drm_gem_object_unreference(&obj->base);
+               list_del_init(&vma->exec_list);
+               drm_gem_object_unreference(&vma->obj->base);
        }
        kfree(eb);
 }
 
 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
 {
-       return (obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
+       return (HAS_LLC(obj->base.dev) ||
+               obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
                !obj->map_and_fenceable ||
                obj->cache_level != I915_CACHE_NONE);
 }
@@ -179,7 +216,7 @@ relocate_entry_cpu(struct drm_i915_gem_object *obj,
        char *vaddr;
        int ret = -EINVAL;
 
-       ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+       ret = i915_gem_object_set_to_cpu_domain(obj, true);
        if (ret)
                return ret;
 
@@ -223,22 +260,24 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
 
 static int
 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
-                                  struct eb_objects *eb,
+                                  struct eb_vmas *eb,
                                   struct drm_i915_gem_relocation_entry *reloc,
                                   struct i915_address_space *vm)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_gem_object *target_obj;
        struct drm_i915_gem_object *target_i915_obj;
+       struct i915_vma *target_vma;
        uint32_t target_offset;
        int ret = -EINVAL;
 
        /* we've already hold a reference to all valid objects */
-       target_obj = &eb_get_object(eb, reloc->target_handle)->base;
-       if (unlikely(target_obj == NULL))
+       target_vma = eb_get_vma(eb, reloc->target_handle);
+       if (unlikely(target_vma == NULL))
                return -ENOENT;
+       target_i915_obj = target_vma->obj;
+       target_obj = &target_vma->obj->base;
 
-       target_i915_obj = to_intel_bo(target_obj);
        target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
 
        /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
@@ -320,14 +359,13 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 }
 
 static int
-i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
-                                   struct eb_objects *eb,
-                                   struct i915_address_space *vm)
+i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
+                                struct eb_vmas *eb)
 {
 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
        struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
        struct drm_i915_gem_relocation_entry __user *user_relocs;
-       struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+       struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
        int remain, ret;
 
        user_relocs = to_user_ptr(entry->relocs_ptr);
@@ -346,8 +384,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
                do {
                        u64 offset = r->presumed_offset;
 
-                       ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
-                                                                vm);
+                       ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r,
+                                                                vma->vm);
                        if (ret)
                                return ret;
 
@@ -368,17 +406,16 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
 }
 
 static int
-i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
-                                        struct eb_objects *eb,
-                                        struct drm_i915_gem_relocation_entry *relocs,
-                                        struct i915_address_space *vm)
+i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
+                                     struct eb_vmas *eb,
+                                     struct drm_i915_gem_relocation_entry *relocs)
 {
-       const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+       const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
        int i, ret;
 
        for (i = 0; i < entry->relocation_count; i++) {
-               ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
-                                                        vm);
+               ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i],
+                                                        vma->vm);
                if (ret)
                        return ret;
        }
@@ -387,10 +424,10 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
 }
 
 static int
-i915_gem_execbuffer_relocate(struct eb_objects *eb,
+i915_gem_execbuffer_relocate(struct eb_vmas *eb,
                             struct i915_address_space *vm)
 {
-       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
        int ret = 0;
 
        /* This is the fast path and we cannot handle a pagefault whilst
@@ -401,8 +438,8 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
         * lockdep complains vehemently.
         */
        pagefault_disable();
-       list_for_each_entry(obj, &eb->objects, exec_list) {
-               ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
+       list_for_each_entry(vma, &eb->vmas, exec_list) {
+               ret = i915_gem_execbuffer_relocate_vma(vma, eb);
                if (ret)
                        break;
        }
@@ -415,31 +452,32 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb,
 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
 
 static int
-need_reloc_mappable(struct drm_i915_gem_object *obj)
+need_reloc_mappable(struct i915_vma *vma)
 {
-       struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
-       return entry->relocation_count && !use_cpu_reloc(obj);
+       struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+       return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
+               i915_is_ggtt(vma->vm);
 }
 
 static int
-i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
-                                  struct intel_ring_buffer *ring,
-                                  struct i915_address_space *vm,
-                                  bool *need_reloc)
+i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
+                               struct intel_ring_buffer *ring,
+                               bool *need_reloc)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
-       struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
        bool need_fence, need_mappable;
+       struct drm_i915_gem_object *obj = vma->obj;
        int ret;
 
        need_fence =
                has_fenced_gpu_access &&
                entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
                obj->tiling_mode != I915_TILING_NONE;
-       need_mappable = need_fence || need_reloc_mappable(obj);
+       need_mappable = need_fence || need_reloc_mappable(vma);
 
-       ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
+       ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
                                  false);
        if (ret)
                return ret;
@@ -467,8 +505,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
                obj->has_aliasing_ppgtt_mapping = 1;
        }
 
-       if (entry->offset != i915_gem_obj_offset(obj, vm)) {
-               entry->offset = i915_gem_obj_offset(obj, vm);
+       if (entry->offset != vma->node.start) {
+               entry->offset = vma->node.start;
                *need_reloc = true;
        }
 
@@ -485,14 +523,15 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
 }
 
 static void
-i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
+i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
 {
        struct drm_i915_gem_exec_object2 *entry;
+       struct drm_i915_gem_object *obj = vma->obj;
 
-       if (!i915_gem_obj_bound_any(obj))
+       if (!drm_mm_node_allocated(&vma->node))
                return;
 
-       entry = obj->exec_entry;
+       entry = vma->exec_entry;
 
        if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
                i915_gem_object_unpin_fence(obj);
@@ -505,41 +544,46 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
 
 static int
 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
-                           struct list_head *objects,
-                           struct i915_address_space *vm,
+                           struct list_head *vmas,
                            bool *need_relocs)
 {
        struct drm_i915_gem_object *obj;
-       struct list_head ordered_objects;
+       struct i915_vma *vma;
+       struct i915_address_space *vm;
+       struct list_head ordered_vmas;
        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
        int retry;
 
-       INIT_LIST_HEAD(&ordered_objects);
-       while (!list_empty(objects)) {
+       if (list_empty(vmas))
+               return 0;
+
+       vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
+
+       INIT_LIST_HEAD(&ordered_vmas);
+       while (!list_empty(vmas)) {
                struct drm_i915_gem_exec_object2 *entry;
                bool need_fence, need_mappable;
 
-               obj = list_first_entry(objects,
-                                      struct drm_i915_gem_object,
-                                      exec_list);
-               entry = obj->exec_entry;
+               vma = list_first_entry(vmas, struct i915_vma, exec_list);
+               obj = vma->obj;
+               entry = vma->exec_entry;
 
                need_fence =
                        has_fenced_gpu_access &&
                        entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
                        obj->tiling_mode != I915_TILING_NONE;
-               need_mappable = need_fence || need_reloc_mappable(obj);
+               need_mappable = need_fence || need_reloc_mappable(vma);
 
                if (need_mappable)
-                       list_move(&obj->exec_list, &ordered_objects);
+                       list_move(&vma->exec_list, &ordered_vmas);
                else
-                       list_move_tail(&obj->exec_list, &ordered_objects);
+                       list_move_tail(&vma->exec_list, &ordered_vmas);
 
                obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
                obj->base.pending_write_domain = 0;
                obj->pending_fenced_gpu_access = false;
        }
-       list_splice(&ordered_objects, objects);
+       list_splice(&ordered_vmas, vmas);
 
        /* Attempt to pin all of the buffers into the GTT.
         * This is done in 3 phases:
@@ -558,52 +602,52 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
                int ret = 0;
 
                /* Unbind any ill-fitting objects or pin. */
-               list_for_each_entry(obj, objects, exec_list) {
-                       struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+               list_for_each_entry(vma, vmas, exec_list) {
+                       struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
                        bool need_fence, need_mappable;
-                       u32 obj_offset;
 
-                       if (!i915_gem_obj_bound(obj, vm))
+                       obj = vma->obj;
+
+                       if (!drm_mm_node_allocated(&vma->node))
                                continue;
 
-                       obj_offset = i915_gem_obj_offset(obj, vm);
                        need_fence =
                                has_fenced_gpu_access &&
                                entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
                                obj->tiling_mode != I915_TILING_NONE;
-                       need_mappable = need_fence || need_reloc_mappable(obj);
+                       need_mappable = need_fence || need_reloc_mappable(vma);
 
                        WARN_ON((need_mappable || need_fence) &&
-                               !i915_is_ggtt(vm));
+                              !i915_is_ggtt(vma->vm));
 
                        if ((entry->alignment &&
-                            obj_offset & (entry->alignment - 1)) ||
+                            vma->node.start & (entry->alignment - 1)) ||
                            (need_mappable && !obj->map_and_fenceable))
-                               ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
+                               ret = i915_vma_unbind(vma);
                        else
-                               ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
+                               ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
                        if (ret)
                                goto err;
                }
 
                /* Bind fresh objects */
-               list_for_each_entry(obj, objects, exec_list) {
-                       if (i915_gem_obj_bound(obj, vm))
+               list_for_each_entry(vma, vmas, exec_list) {
+                       if (drm_mm_node_allocated(&vma->node))
                                continue;
 
-                       ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
+                       ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
                        if (ret)
                                goto err;
                }
 
 err:           /* Decrement pin count for bound objects */
-               list_for_each_entry(obj, objects, exec_list)
-                       i915_gem_execbuffer_unreserve_object(obj);
+               list_for_each_entry(vma, vmas, exec_list)
+                       i915_gem_execbuffer_unreserve_vma(vma);
 
                if (ret != -ENOSPC || retry++)
                        return ret;
 
-               ret = i915_gem_evict_everything(ring->dev);
+               ret = i915_gem_evict_vm(vm, true);
                if (ret)
                        return ret;
        } while (1);
@@ -614,24 +658,27 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
                                  struct drm_i915_gem_execbuffer2 *args,
                                  struct drm_file *file,
                                  struct intel_ring_buffer *ring,
-                                 struct eb_objects *eb,
-                                 struct drm_i915_gem_exec_object2 *exec,
-                                 struct i915_address_space *vm)
+                                 struct eb_vmas *eb,
+                                 struct drm_i915_gem_exec_object2 *exec)
 {
        struct drm_i915_gem_relocation_entry *reloc;
-       struct drm_i915_gem_object *obj;
+       struct i915_address_space *vm;
+       struct i915_vma *vma;
        bool need_relocs;
        int *reloc_offset;
        int i, total, ret;
-       int count = args->buffer_count;
+       unsigned count = args->buffer_count;
+
+       if (WARN_ON(list_empty(&eb->vmas)))
+               return 0;
+
+       vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
 
        /* We may process another execbuffer during the unlock... */
-       while (!list_empty(&eb->objects)) {
-               obj = list_first_entry(&eb->objects,
-                                      struct drm_i915_gem_object,
-                                      exec_list);
-               list_del_init(&obj->exec_list);
-               drm_gem_object_unreference(&obj->base);
+       while (!list_empty(&eb->vmas)) {
+               vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
+               list_del_init(&vma->exec_list);
+               drm_gem_object_unreference(&vma->obj->base);
        }
 
        mutex_unlock(&dev->struct_mutex);
@@ -695,20 +742,19 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 
        /* reacquire the objects */
        eb_reset(eb);
-       ret = eb_lookup_objects(eb, exec, args, file);
+       ret = eb_lookup_vmas(eb, exec, args, vm, file);
        if (ret)
                goto err;
 
        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-       ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
+       ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
        if (ret)
                goto err;
 
-       list_for_each_entry(obj, &eb->objects, exec_list) {
-               int offset = obj->exec_entry - exec;
-               ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
-                                                              reloc + reloc_offset[offset],
-                                                              vm);
+       list_for_each_entry(vma, &eb->vmas, exec_list) {
+               int offset = vma->exec_entry - exec;
+               ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
+                                                           reloc + reloc_offset[offset]);
                if (ret)
                        goto err;
        }
@@ -727,14 +773,15 @@ err:
 
 static int
 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
-                               struct list_head *objects)
+                               struct list_head *vmas)
 {
-       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
        uint32_t flush_domains = 0;
        bool flush_chipset = false;
        int ret;
 
-       list_for_each_entry(obj, objects, exec_list) {
+       list_for_each_entry(vma, vmas, exec_list) {
+               struct drm_i915_gem_object *obj = vma->obj;
                ret = i915_gem_object_sync(obj, ring);
                if (ret)
                        return ret;
@@ -771,8 +818,8 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
                   int count)
 {
        int i;
-       int relocs_total = 0;
-       int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
+       unsigned relocs_total = 0;
+       unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
 
        for (i = 0; i < count; i++) {
                char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
@@ -809,13 +856,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
 }
 
 static void
-i915_gem_execbuffer_move_to_active(struct list_head *objects,
-                                  struct i915_address_space *vm,
+i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                                   struct intel_ring_buffer *ring)
 {
-       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
 
-       list_for_each_entry(obj, objects, exec_list) {
+       list_for_each_entry(vma, vmas, exec_list) {
+               struct drm_i915_gem_object *obj = vma->obj;
                u32 old_read = obj->base.read_domains;
                u32 old_write = obj->base.write_domain;
 
@@ -825,9 +872,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
                obj->base.read_domains = obj->base.pending_read_domains;
                obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
 
-               /* FIXME: This lookup gets fixed later <-- danvet */
-               list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
-               i915_gem_object_move_to_active(obj, ring);
+               i915_vma_move_to_active(vma, ring);
                if (obj->base.write_domain) {
                        obj->dirty = 1;
                        obj->last_write_seqno = intel_ring_get_seqno(ring);
@@ -885,10 +930,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct i915_address_space *vm)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct eb_objects *eb;
+       struct eb_vmas *eb;
        struct drm_i915_gem_object *batch_obj;
        struct drm_clip_rect *cliprects = NULL;
        struct intel_ring_buffer *ring;
+       struct i915_ctx_hang_stats *hs;
        u32 ctx_id = i915_execbuffer2_get_context_id(*args);
        u32 exec_start, exec_len;
        u32 mask, flags;
@@ -1000,7 +1046,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                        return -EINVAL;
                }
 
-               cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
+               cliprects = kcalloc(args->num_cliprects,
+                                   sizeof(*cliprects),
                                    GFP_KERNEL);
                if (cliprects == NULL) {
                        ret = -ENOMEM;
@@ -1025,7 +1072,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                goto pre_mutex_err;
        }
 
-       eb = eb_create(args);
+       eb = eb_create(args, vm);
        if (eb == NULL) {
                mutex_unlock(&dev->struct_mutex);
                ret = -ENOMEM;
@@ -1033,18 +1080,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        }
 
        /* Look up object handles */
-       ret = eb_lookup_objects(eb, exec, args, file);
+       ret = eb_lookup_vmas(eb, exec, args, vm, file);
        if (ret)
                goto err;
 
        /* take note of the batch buffer before we might reorder the lists */
-       batch_obj = list_entry(eb->objects.prev,
-                              struct drm_i915_gem_object,
-                              exec_list);
+       batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
 
        /* Move the objects en-masse into the GTT, evicting if necessary. */
        need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
-       ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
+       ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
        if (ret)
                goto err;
 
@@ -1054,7 +1099,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (ret) {
                if (ret == -EFAULT) {
                        ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
-                                                               eb, exec, vm);
+                                                               eb, exec);
                        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
                }
                if (ret)
@@ -1076,10 +1121,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
                i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
 
-       ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
+       ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
        if (ret)
                goto err;
 
+       hs = i915_gem_context_get_hang_stats(dev, file, ctx_id);
+       if (IS_ERR(hs)) {
+               ret = PTR_ERR(hs);
+               goto err;
+       }
+
+       if (hs->banned) {
+               ret = -EIO;
+               goto err;
+       }
+
        ret = i915_switch_context(ring, file, ctx_id);
        if (ret)
                goto err;
@@ -1131,7 +1187,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
 
-       i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
+       i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
        i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
 
 err:
index 212f6d8c35ec6593cc54957ecd24445196d26657..c4c42e7cbd7bbf9a6641d1ca688e9b6be50d9f75 100644 (file)
 #define HSW_WT_ELLC_LLC_AGE0           HSW_CACHEABILITY_CONTROL(0x6)
 
 static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
-                                    enum i915_cache_level level)
+                                    enum i915_cache_level level,
+                                    bool valid)
 {
-       gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+       gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
        switch (level) {
@@ -79,9 +80,10 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
 }
 
 static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
-                                    enum i915_cache_level level)
+                                    enum i915_cache_level level,
+                                    bool valid)
 {
-       gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+       gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
        switch (level) {
@@ -105,9 +107,10 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
 #define BYT_PTE_SNOOPED_BY_CPU_CACHES  (1 << 2)
 
 static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
-                                    enum i915_cache_level level)
+                                    enum i915_cache_level level,
+                                    bool valid)
 {
-       gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+       gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
        /* Mark the page as writeable.  Other platforms don't have a
@@ -122,9 +125,10 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
 }
 
 static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
-                                    enum i915_cache_level level)
+                                    enum i915_cache_level level,
+                                    bool valid)
 {
-       gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+       gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= HSW_PTE_ADDR_ENCODE(addr);
 
        if (level != I915_CACHE_NONE)
@@ -134,9 +138,10 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
 }
 
 static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
-                                     enum i915_cache_level level)
+                                     enum i915_cache_level level,
+                                     bool valid)
 {
-       gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+       gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= HSW_PTE_ADDR_ENCODE(addr);
 
        switch (level) {
@@ -236,7 +241,8 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
 /* PPGTT support for Sandybdrige/Gen6 and later */
 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
                                   unsigned first_entry,
-                                  unsigned num_entries)
+                                  unsigned num_entries,
+                                  bool use_scratch)
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
@@ -245,7 +251,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
        unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
        unsigned last_pte, i;
 
-       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
+       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
 
        while (num_entries) {
                last_pte = first_pte + num_entries;
@@ -282,7 +288,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
                dma_addr_t page_addr;
 
                page_addr = sg_page_iter_dma_address(&sg_iter);
-               pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level);
+               pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
                if (++act_pte == I915_PPGTT_PT_ENTRIES) {
                        kunmap_atomic(pt_vaddr);
                        act_pt++;
@@ -336,7 +342,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
        ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
        ppgtt->base.cleanup = gen6_ppgtt_cleanup;
        ppgtt->base.scratch = dev_priv->gtt.base.scratch;
-       ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
+       ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
                                  GFP_KERNEL);
        if (!ppgtt->pt_pages)
                return -ENOMEM;
@@ -347,7 +353,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
                        goto err_pt_alloc;
        }
 
-       ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
+       ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
                                     GFP_KERNEL);
        if (!ppgtt->pt_dma_addr)
                goto err_pt_alloc;
@@ -367,7 +373,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
        }
 
        ppgtt->base.clear_range(&ppgtt->base, 0,
-                               ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES);
+                               ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
 
        ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
 
@@ -444,7 +450,8 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
 {
        ppgtt->base.clear_range(&ppgtt->base,
                                i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
-                               obj->base.size >> PAGE_SHIFT);
+                               obj->base.size >> PAGE_SHIFT,
+                               true);
 }
 
 extern int intel_iommu_gfx_mapped;
@@ -485,15 +492,65 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
                dev_priv->mm.interruptible = interruptible;
 }
 
+void i915_check_and_clear_faults(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_ring_buffer *ring;
+       int i;
+
+       if (INTEL_INFO(dev)->gen < 6)
+               return;
+
+       for_each_ring(ring, dev_priv, i) {
+               u32 fault_reg;
+               fault_reg = I915_READ(RING_FAULT_REG(ring));
+               if (fault_reg & RING_FAULT_VALID) {
+                       DRM_DEBUG_DRIVER("Unexpected fault\n"
+                                        "\tAddr: 0x%08lx\\n"
+                                        "\tAddress space: %s\n"
+                                        "\tSource ID: %d\n"
+                                        "\tType: %d\n",
+                                        fault_reg & PAGE_MASK,
+                                        fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
+                                        RING_FAULT_SRCID(fault_reg),
+                                        RING_FAULT_FAULT_TYPE(fault_reg));
+                       I915_WRITE(RING_FAULT_REG(ring),
+                                  fault_reg & ~RING_FAULT_VALID);
+               }
+       }
+       POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
+}
+
+void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /* Don't bother messing with faults pre GEN6 as we have little
+        * documentation supporting that it's a good idea.
+        */
+       if (INTEL_INFO(dev)->gen < 6)
+               return;
+
+       i915_check_and_clear_faults(dev);
+
+       dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+                                      dev_priv->gtt.base.start / PAGE_SIZE,
+                                      dev_priv->gtt.base.total / PAGE_SIZE,
+                                      false);
+}
+
 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
 
+       i915_check_and_clear_faults(dev);
+
        /* First fill our portion of the GTT with scratch pages */
        dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
                                       dev_priv->gtt.base.start / PAGE_SIZE,
-                                      dev_priv->gtt.base.total / PAGE_SIZE);
+                                      dev_priv->gtt.base.total / PAGE_SIZE,
+                                      true);
 
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
                i915_gem_clflush_object(obj, obj->pin_display);
@@ -536,7 +593,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 
        for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
                addr = sg_page_iter_dma_address(&sg_iter);
-               iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]);
+               iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
                i++;
        }
 
@@ -548,7 +605,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
         */
        if (i != 0)
                WARN_ON(readl(&gtt_entries[i-1]) !=
-                       vm->pte_encode(addr, level));
+                       vm->pte_encode(addr, level, true));
 
        /* This next bit makes the above posting read even more important. We
         * want to flush the TLBs only after we're certain all the PTE updates
@@ -560,7 +617,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
 
 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
                                  unsigned int first_entry,
-                                 unsigned int num_entries)
+                                 unsigned int num_entries,
+                                 bool use_scratch)
 {
        struct drm_i915_private *dev_priv = vm->dev->dev_private;
        gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
@@ -573,7 +631,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
                 first_entry, num_entries, max_entries))
                num_entries = max_entries;
 
-       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
+       scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);
+
        for (i = 0; i < num_entries; i++)
                iowrite32(scratch_pte, &gtt_base[i]);
        readl(gtt_base);
@@ -594,7 +653,8 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
 
 static void i915_ggtt_clear_range(struct i915_address_space *vm,
                                  unsigned int first_entry,
-                                 unsigned int num_entries)
+                                 unsigned int num_entries,
+                                 bool unused)
 {
        intel_gtt_clear_range(first_entry, num_entries);
 }
@@ -622,7 +682,8 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
 
        dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
                                       entry,
-                                      obj->base.size >> PAGE_SHIFT);
+                                      obj->base.size >> PAGE_SHIFT,
+                                      true);
 
        obj->has_global_gtt_mapping = 0;
 }
@@ -709,11 +770,11 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
                const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
                DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
                              hole_start, hole_end);
-               ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count);
+               ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
        }
 
        /* And finally clear the reserved guard page */
-       ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1);
+       ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
 }
 
 static bool
index e15a1d90037d7709b2f4c489074ef5779ac6c1a8..d284d892ed9491e8f2a618e22576673e06887f74 100644 (file)
@@ -395,7 +395,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
        if (gtt_offset == I915_GTT_OFFSET_NONE)
                return obj;
 
-       vma = i915_gem_vma_create(obj, ggtt);
+       vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto err_out;
index 032e9ef9c89679228a03b2d668a5bf7f68f1587f..b1390534804888e46d4db065fcb0837814743e4c 100644 (file)
@@ -308,7 +308,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       if (obj->pin_count) {
+       if (obj->pin_count || obj->framebuffer_references) {
                drm_gem_object_unreference_unlocked(&obj->base);
                return -EBUSY;
        }
@@ -393,7 +393,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
        /* Try to preallocate memory required to save swizzling on put-pages */
        if (i915_gem_object_needs_bit17_swizzle(obj)) {
                if (obj->bit_17 == NULL) {
-                       obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
+                       obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
                                              sizeof(long), GFP_KERNEL);
                }
        } else {
@@ -504,8 +504,8 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
        int i;
 
        if (obj->bit_17 == NULL) {
-               obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
-                                          sizeof(long), GFP_KERNEL);
+               obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
+                                     sizeof(long), GFP_KERNEL);
                if (obj->bit_17 == NULL) {
                        DRM_ERROR("Failed to allocate memory for bit 17 "
                                  "record\n");
index dae364f0028cc94d58f87613449b41d7620d7f29..5dde8102647122cbea30fb27e681bbe9a6317e29 100644 (file)
@@ -215,6 +215,24 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
        }
 }
 
+static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
+{
+       switch (a) {
+       case HANGCHECK_IDLE:
+               return "idle";
+       case HANGCHECK_WAIT:
+               return "wait";
+       case HANGCHECK_ACTIVE:
+               return "active";
+       case HANGCHECK_KICK:
+               return "kick";
+       case HANGCHECK_HUNG:
+               return "hung";
+       }
+
+       return "unknown";
+}
+
 static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
                                  struct drm_device *dev,
                                  struct drm_i915_error_state *error,
@@ -255,6 +273,9 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
        err_printf(m, "  waiting: %s\n", yesno(error->waiting[ring]));
        err_printf(m, "  ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
        err_printf(m, "  ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+       err_printf(m, "  hangcheck: %s [%d]\n",
+                  hangcheck_action_to_str(error->hangcheck_action[ring]),
+                  error->hangcheck_score[ring]);
 }
 
 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -283,13 +304,14 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
        err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
                   error->time.tv_usec);
        err_printf(m, "Kernel: " UTS_RELEASE "\n");
-       err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
+       err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
        err_printf(m, "EIR: 0x%08x\n", error->eir);
        err_printf(m, "IER: 0x%08x\n", error->ier);
        err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
        err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
        err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
        err_printf(m, "CCID: 0x%08x\n", error->ccid);
+       err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
 
        for (i = 0; i < dev_priv->num_fence_regs; i++)
                err_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
@@ -720,6 +742,9 @@ static void i915_record_ring_state(struct drm_device *dev,
 
        error->cpu_ring_head[ring->id] = ring->head;
        error->cpu_ring_tail[ring->id] = ring->tail;
+
+       error->hangcheck_score[ring->id] = ring->hangcheck.score;
+       error->hangcheck_action[ring->id] = ring->hangcheck.action;
 }
 
 
@@ -769,7 +794,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
 
                error->ring[i].num_requests = count;
                error->ring[i].requests =
-                       kmalloc(count*sizeof(struct drm_i915_error_request),
+                       kcalloc(count, sizeof(*error->ring[i].requests),
                                GFP_ATOMIC);
                if (error->ring[i].requests == NULL) {
                        error->ring[i].num_requests = 0;
@@ -811,7 +836,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
        error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
 
        if (i) {
-               active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC);
+               active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
                if (active_bo)
                        pinned_bo = active_bo + error->active_bo_count[ndx];
        }
@@ -885,8 +910,12 @@ void i915_capture_error_state(struct drm_device *dev)
                return;
        }
 
-       DRM_INFO("capturing error event; look for more information in "
-                "/sys/class/drm/card%d/error\n", dev->primary->index);
+       DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
+                dev->primary->index);
+       DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
+       DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
+       DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
+       DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
 
        kref_init(&error->ref);
        error->eir = I915_READ(EIR);
@@ -988,6 +1017,7 @@ const char *i915_cache_level_str(int type)
        case I915_CACHE_NONE: return " uncached";
        case I915_CACHE_LLC: return " snooped or LLC";
        case I915_CACHE_L3_LLC: return " L3+LLC";
+       case I915_CACHE_WT: return " WT";
        default: return "";
        }
 }
index 4b91228fd9bd8e50e1319816a9fe53b6f166ec68..a9f0cb6d0c12f16a89c1cc0132eb4d16b0eee974 100644 (file)
@@ -30,6 +30,7 @@
 
 #include <linux/sysrq.h>
 #include <linux/slab.h>
+#include <linux/circ_buf.h>
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
@@ -518,6 +519,12 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
        }
 }
 
+static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
+{
+       /* Gen2 doesn't have a hardware frame counter */
+       return 0;
+}
+
 /* Called from drm generic code, passed a 'crtc', which
  * we use as a pipe index
  */
@@ -526,7 +533,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        unsigned long high_frame;
        unsigned long low_frame;
-       u32 high1, high2, low;
+       u32 high1, high2, low, pixel, vbl_start;
 
        if (!i915_pipe_enabled(dev, pipe)) {
                DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
@@ -534,6 +541,24 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
                return 0;
        }
 
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               struct intel_crtc *intel_crtc =
+                       to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
+               const struct drm_display_mode *mode =
+                       &intel_crtc->config.adjusted_mode;
+
+               vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
+       } else {
+               enum transcoder cpu_transcoder =
+                       intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+               u32 htotal;
+
+               htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
+               vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
+
+               vbl_start *= htotal;
+       }
+
        high_frame = PIPEFRAME(pipe);
        low_frame = PIPEFRAMEPIXEL(pipe);
 
@@ -544,13 +569,20 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
         */
        do {
                high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
-               low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
+               low   = I915_READ(low_frame);
                high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
        } while (high1 != high2);
 
        high1 >>= PIPE_FRAME_HIGH_SHIFT;
+       pixel = low & PIPE_PIXEL_MASK;
        low >>= PIPE_FRAME_LOW_SHIFT;
-       return (high1 << 8) | low;
+
+       /*
+        * The frame counter increments at beginning of active.
+        * Cook up a vblank counter by also checking the pixel
+        * counter against vblank start.
+        */
+       return ((high1 << 8) | low) + (pixel >= vbl_start);
 }
 
 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -567,37 +599,98 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
        return I915_READ(reg);
 }
 
+static bool intel_pipe_in_vblank(struct drm_device *dev, enum pipe pipe)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t status;
+
+       if (IS_VALLEYVIEW(dev)) {
+               status = pipe == PIPE_A ?
+                       I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+                       I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+               return I915_READ(VLV_ISR) & status;
+       } else if (IS_GEN2(dev)) {
+               status = pipe == PIPE_A ?
+                       I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+                       I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+               return I915_READ16(ISR) & status;
+       } else if (INTEL_INFO(dev)->gen < 5) {
+               status = pipe == PIPE_A ?
+                       I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT :
+                       I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+               return I915_READ(ISR) & status;
+       } else if (INTEL_INFO(dev)->gen < 7) {
+               status = pipe == PIPE_A ?
+                       DE_PIPEA_VBLANK :
+                       DE_PIPEB_VBLANK;
+
+               return I915_READ(DEISR) & status;
+       } else {
+               switch (pipe) {
+               default:
+               case PIPE_A:
+                       status = DE_PIPEA_VBLANK_IVB;
+                       break;
+               case PIPE_B:
+                       status = DE_PIPEB_VBLANK_IVB;
+                       break;
+               case PIPE_C:
+                       status = DE_PIPEC_VBLANK_IVB;
+                       break;
+               }
+
+               return I915_READ(DEISR) & status;
+       }
+}
+
 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
                             int *vpos, int *hpos)
 {
-       drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-       u32 vbl = 0, position = 0;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+       int position;
        int vbl_start, vbl_end, htotal, vtotal;
        bool in_vbl = true;
        int ret = 0;
-       enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
-                                                                     pipe);
 
-       if (!i915_pipe_enabled(dev, pipe)) {
+       if (!intel_crtc->active) {
                DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
                                 "pipe %c\n", pipe_name(pipe));
                return 0;
        }
 
-       /* Get vtotal. */
-       vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
+       htotal = mode->crtc_htotal;
+       vtotal = mode->crtc_vtotal;
+       vbl_start = mode->crtc_vblank_start;
+       vbl_end = mode->crtc_vblank_end;
 
-       if (INTEL_INFO(dev)->gen >= 4) {
+       ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+       if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
                /* No obvious pixelcount register. Only query vertical
                 * scanout position from Display scan line register.
                 */
-               position = I915_READ(PIPEDSL(pipe));
+               if (IS_GEN2(dev))
+                       position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
+               else
+                       position = I915_READ(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
 
-               /* Decode into vertical scanout position. Don't have
-                * horizontal scanout position.
+               /*
+                * The scanline counter increments at the leading edge
+                * of hsync, ie. it completely misses the active portion
+                * of the line. Fix up the counter at both edges of vblank
+                * to get a more accurate picture whether we're in vblank
+                * or not.
                 */
-               *vpos = position & 0x1fff;
-               *hpos = 0;
+               in_vbl = intel_pipe_in_vblank(dev, pipe);
+               if ((in_vbl && position == vbl_start - 1) ||
+                   (!in_vbl && position == vbl_end - 1))
+                       position = (position + 1) % vtotal;
        } else {
                /* Have access to pixelcount since start of frame.
                 * We can split this into vertical and horizontal
@@ -605,28 +698,32 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
                 */
                position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
 
-               htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
-               *vpos = position / htotal;
-               *hpos = position - (*vpos * htotal);
+               /* convert to pixel counts */
+               vbl_start *= htotal;
+               vbl_end *= htotal;
+               vtotal *= htotal;
        }
 
-       /* Query vblank area. */
-       vbl = I915_READ(VBLANK(cpu_transcoder));
-
-       /* Test position against vblank region. */
-       vbl_start = vbl & 0x1fff;
-       vbl_end = (vbl >> 16) & 0x1fff;
+       in_vbl = position >= vbl_start && position < vbl_end;
 
-       if ((*vpos < vbl_start) || (*vpos > vbl_end))
-               in_vbl = false;
-
-       /* Inside "upper part" of vblank area? Apply corrective offset: */
-       if (in_vbl && (*vpos >= vbl_start))
-               *vpos = *vpos - vtotal;
+       /*
+        * While in vblank, position will be negative
+        * counting up towards 0 at vbl_end. And outside
+        * vblank, position will be positive counting
+        * up since vbl_end.
+        */
+       if (position >= vbl_start)
+               position -= vbl_end;
+       else
+               position += vtotal - vbl_end;
 
-       /* Readouts valid? */
-       if (vbl > 0)
-               ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+       if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+               *vpos = position;
+               *hpos = 0;
+       } else {
+               *vpos = position / htotal;
+               *hpos = position - (*vpos * htotal);
+       }
 
        /* In vblank? */
        if (in_vbl)
@@ -665,7 +762,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
                                                     crtc);
 }
 
-static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
+static bool intel_hpd_irq_event(struct drm_device *dev,
+                               struct drm_connector *connector)
 {
        enum drm_connector_status old_status;
 
@@ -673,11 +771,16 @@ static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *con
        old_status = connector->status;
 
        connector->status = connector->funcs->detect(connector, false);
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
+       if (old_status == connector->status)
+               return false;
+
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
                      connector->base.id,
                      drm_get_connector_name(connector),
-                     old_status, connector->status);
-       return (old_status != connector->status);
+                     drm_get_connector_status_name(old_status),
+                     drm_get_connector_status_name(connector->status));
+
+       return true;
 }
 
 /*
@@ -801,7 +904,7 @@ static void notify_ring(struct drm_device *dev,
        if (ring->obj == NULL)
                return;
 
-       trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
+       trace_i915_gem_request_complete(ring);
 
        wake_up_all(&ring->irq_queue);
        i915_queue_hangcheck(dev);
@@ -812,7 +915,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
        drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
                                                    rps.work);
        u32 pm_iir;
-       u8 new_delay;
+       int new_delay, adj;
 
        spin_lock_irq(&dev_priv->irq_lock);
        pm_iir = dev_priv->rps.pm_iir;
@@ -829,40 +932,49 @@ static void gen6_pm_rps_work(struct work_struct *work)
 
        mutex_lock(&dev_priv->rps.hw_lock);
 
+       adj = dev_priv->rps.last_adj;
        if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
-               new_delay = dev_priv->rps.cur_delay + 1;
+               if (adj > 0)
+                       adj *= 2;
+               else
+                       adj = 1;
+               new_delay = dev_priv->rps.cur_delay + adj;
 
                /*
                 * For better performance, jump directly
                 * to RPe if we're below it.
                 */
-               if (IS_VALLEYVIEW(dev_priv->dev) &&
-                   dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
+               if (new_delay < dev_priv->rps.rpe_delay)
+                       new_delay = dev_priv->rps.rpe_delay;
+       } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
+               if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
                        new_delay = dev_priv->rps.rpe_delay;
-       } else
-               new_delay = dev_priv->rps.cur_delay - 1;
+               else
+                       new_delay = dev_priv->rps.min_delay;
+               adj = 0;
+       } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
+               if (adj < 0)
+                       adj *= 2;
+               else
+                       adj = -1;
+               new_delay = dev_priv->rps.cur_delay + adj;
+       } else { /* unknown event */
+               new_delay = dev_priv->rps.cur_delay;
+       }
 
        /* sysfs frequency interfaces may have snuck in while servicing the
         * interrupt
         */
-       if (new_delay >= dev_priv->rps.min_delay &&
-           new_delay <= dev_priv->rps.max_delay) {
-               if (IS_VALLEYVIEW(dev_priv->dev))
-                       valleyview_set_rps(dev_priv->dev, new_delay);
-               else
-                       gen6_set_rps(dev_priv->dev, new_delay);
-       }
-
-       if (IS_VALLEYVIEW(dev_priv->dev)) {
-               /*
-                * On VLV, when we enter RC6 we may not be at the minimum
-                * voltage level, so arm a timer to check.  It should only
-                * fire when there's activity or once after we've entered
-                * RC6, and then won't be re-armed until the next RPS interrupt.
-                */
-               mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
-                                msecs_to_jiffies(100));
-       }
+       if (new_delay < (int)dev_priv->rps.min_delay)
+               new_delay = dev_priv->rps.min_delay;
+       if (new_delay > (int)dev_priv->rps.max_delay)
+               new_delay = dev_priv->rps.max_delay;
+       dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay;
+
+       if (IS_VALLEYVIEW(dev_priv->dev))
+               valleyview_set_rps(dev_priv->dev, new_delay);
+       else
+               gen6_set_rps(dev_priv->dev, new_delay);
 
        mutex_unlock(&dev_priv->rps.hw_lock);
 }
@@ -882,9 +994,10 @@ static void ivybridge_parity_work(struct work_struct *work)
        drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
                                                    l3_parity.error_work);
        u32 error_status, row, bank, subbank;
-       char *parity_event[5];
+       char *parity_event[6];
        uint32_t misccpctl;
        unsigned long flags;
+       uint8_t slice = 0;
 
        /* We must turn off DOP level clock gating to access the L3 registers.
         * In order to prevent a get/put style interface, acquire struct mutex
@@ -892,55 +1005,81 @@ static void ivybridge_parity_work(struct work_struct *work)
         */
        mutex_lock(&dev_priv->dev->struct_mutex);
 
+       /* If we've screwed up tracking, just let the interrupt fire again */
+       if (WARN_ON(!dev_priv->l3_parity.which_slice))
+               goto out;
+
        misccpctl = I915_READ(GEN7_MISCCPCTL);
        I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
        POSTING_READ(GEN7_MISCCPCTL);
 
-       error_status = I915_READ(GEN7_L3CDERRST1);
-       row = GEN7_PARITY_ERROR_ROW(error_status);
-       bank = GEN7_PARITY_ERROR_BANK(error_status);
-       subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
+       while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
+               u32 reg;
 
-       I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
-                                   GEN7_L3CDERRST1_ENABLE);
-       POSTING_READ(GEN7_L3CDERRST1);
+               slice--;
+               if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
+                       break;
 
-       I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+               dev_priv->l3_parity.which_slice &= ~(1<<slice);
 
-       spin_lock_irqsave(&dev_priv->irq_lock, flags);
-       ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
-       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+               reg = GEN7_L3CDERRST1 + (slice * 0x200);
 
-       mutex_unlock(&dev_priv->dev->struct_mutex);
+               error_status = I915_READ(reg);
+               row = GEN7_PARITY_ERROR_ROW(error_status);
+               bank = GEN7_PARITY_ERROR_BANK(error_status);
+               subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
+
+               I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
+               POSTING_READ(reg);
 
-       parity_event[0] = I915_L3_PARITY_UEVENT "=1";
-       parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
-       parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
-       parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
-       parity_event[4] = NULL;
+               parity_event[0] = I915_L3_PARITY_UEVENT "=1";
+               parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
+               parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
+               parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
+               parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
+               parity_event[5] = NULL;
 
-       kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
-                          KOBJ_CHANGE, parity_event);
+               kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
+                                  KOBJ_CHANGE, parity_event);
 
-       DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
-                 row, bank, subbank);
+               DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
+                         slice, row, bank, subbank);
+
+               kfree(parity_event[4]);
+               kfree(parity_event[3]);
+               kfree(parity_event[2]);
+               kfree(parity_event[1]);
+       }
 
-       kfree(parity_event[3]);
-       kfree(parity_event[2]);
-       kfree(parity_event[1]);
+       I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+
+out:
+       WARN_ON(dev_priv->l3_parity.which_slice);
+       spin_lock_irqsave(&dev_priv->irq_lock, flags);
+       ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
+       spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+
+       mutex_unlock(&dev_priv->dev->struct_mutex);
 }
 
-static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
+static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
-       if (!HAS_L3_GPU_CACHE(dev))
+       if (!HAS_L3_DPF(dev))
                return;
 
        spin_lock(&dev_priv->irq_lock);
-       ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+       ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
        spin_unlock(&dev_priv->irq_lock);
 
+       iir &= GT_PARITY_ERROR(dev);
+       if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
+               dev_priv->l3_parity.which_slice |= 1 << 1;
+
+       if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
+               dev_priv->l3_parity.which_slice |= 1 << 0;
+
        queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
 }
 
@@ -975,8 +1114,8 @@ static void snb_gt_irq_handler(struct drm_device *dev,
                i915_handle_error(dev, false);
        }
 
-       if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
-               ivybridge_parity_error_irq_handler(dev);
+       if (gt_iir & GT_PARITY_ERROR(dev))
+               ivybridge_parity_error_irq_handler(dev, gt_iir);
 }
 
 #define HPD_STORM_DETECT_PERIOD 1000
@@ -1050,6 +1189,102 @@ static void dp_aux_irq_handler(struct drm_device *dev)
        wake_up_all(&dev_priv->gmbus_wait_queue);
 }
 
+#if defined(CONFIG_DEBUG_FS)
+static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+                                        uint32_t crc0, uint32_t crc1,
+                                        uint32_t crc2, uint32_t crc3,
+                                        uint32_t crc4)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
+       struct intel_pipe_crc_entry *entry;
+       int head, tail;
+
+       spin_lock(&pipe_crc->lock);
+
+       if (!pipe_crc->entries) {
+               spin_unlock(&pipe_crc->lock);
+               DRM_ERROR("spurious interrupt\n");
+               return;
+       }
+
+       head = pipe_crc->head;
+       tail = pipe_crc->tail;
+
+       if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
+               spin_unlock(&pipe_crc->lock);
+               DRM_ERROR("CRC buffer overflowing\n");
+               return;
+       }
+
+       entry = &pipe_crc->entries[head];
+
+       entry->frame = dev->driver->get_vblank_counter(dev, pipe);
+       entry->crc[0] = crc0;
+       entry->crc[1] = crc1;
+       entry->crc[2] = crc2;
+       entry->crc[3] = crc3;
+       entry->crc[4] = crc4;
+
+       head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+       pipe_crc->head = head;
+
+       spin_unlock(&pipe_crc->lock);
+
+       wake_up_interruptible(&pipe_crc->wq);
+}
+#else
+static inline void
+display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
+                            uint32_t crc0, uint32_t crc1,
+                            uint32_t crc2, uint32_t crc3,
+                            uint32_t crc4) {}
+#endif
+
+
+static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       display_pipe_crc_irq_handler(dev, pipe,
+                                    I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+                                    0, 0, 0, 0);
+}
+
+static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       display_pipe_crc_irq_handler(dev, pipe,
+                                    I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
+                                    I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
+                                    I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
+                                    I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
+                                    I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
+}
+
+static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       uint32_t res1, res2;
+
+       if (INTEL_INFO(dev)->gen >= 3)
+               res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
+       else
+               res1 = 0;
+
+       if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
+               res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
+       else
+               res2 = 0;
+
+       display_pipe_crc_irq_handler(dev, pipe,
+                                    I915_READ(PIPE_CRC_RES_RED(pipe)),
+                                    I915_READ(PIPE_CRC_RES_GREEN(pipe)),
+                                    I915_READ(PIPE_CRC_RES_BLUE(pipe)),
+                                    res1, res2);
+}
+
 /* The RPS events need forcewake, so we add them to a work queue and mask their
  * IMR bits until the work is done. Other interrupts can be processed without
  * the work queue. */
@@ -1124,6 +1359,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
                                intel_prepare_page_flip(dev, pipe);
                                intel_finish_page_flip(dev, pipe);
                        }
+
+                       if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+                               i9xx_pipe_crc_irq_handler(dev, pipe);
                }
 
                /* Consume port.  Then clear IIR or we'll miss events */
@@ -1212,21 +1450,26 @@ static void ivb_err_int_handler(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 err_int = I915_READ(GEN7_ERR_INT);
+       enum pipe pipe;
 
        if (err_int & ERR_INT_POISON)
                DRM_ERROR("Poison interrupt\n");
 
-       if (err_int & ERR_INT_FIFO_UNDERRUN_A)
-               if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
-                       DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
-
-       if (err_int & ERR_INT_FIFO_UNDERRUN_B)
-               if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
-                       DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
+       for_each_pipe(pipe) {
+               if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
+                       if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
+                                                                 false))
+                               DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n",
+                                                pipe_name(pipe));
+               }
 
-       if (err_int & ERR_INT_FIFO_UNDERRUN_C)
-               if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
-                       DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
+               if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
+                       if (IS_IVYBRIDGE(dev))
+                               ivb_pipe_crc_irq_handler(dev, pipe);
+                       else
+                               hsw_pipe_crc_irq_handler(dev, pipe);
+               }
+       }
 
        I915_WRITE(GEN7_ERR_INT, err_int);
 }
@@ -1321,6 +1564,12 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
                if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
                        DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
 
+       if (de_iir & DE_PIPEA_CRC_DONE)
+               i9xx_pipe_crc_irq_handler(dev, PIPE_A);
+
+       if (de_iir & DE_PIPEB_CRC_DONE)
+               i9xx_pipe_crc_irq_handler(dev, PIPE_B);
+
        if (de_iir & DE_PLANEA_FLIP_DONE) {
                intel_prepare_page_flip(dev, 0);
                intel_finish_page_flip_plane(dev, 0);
@@ -1388,7 +1637,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        u32 de_iir, gt_iir, de_ier, sde_ier = 0;
        irqreturn_t ret = IRQ_NONE;
-       bool err_int_reenable = false;
 
        atomic_inc(&dev_priv->irq_received);
 
@@ -1412,17 +1660,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
                POSTING_READ(SDEIER);
        }
 
-       /* On Haswell, also mask ERR_INT because we don't want to risk
-        * generating "unclaimed register" interrupts from inside the interrupt
-        * handler. */
-       if (IS_HASWELL(dev)) {
-               spin_lock(&dev_priv->irq_lock);
-               err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
-               if (err_int_reenable)
-                       ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
-               spin_unlock(&dev_priv->irq_lock);
-       }
-
        gt_iir = I915_READ(GTIIR);
        if (gt_iir) {
                if (INTEL_INFO(dev)->gen >= 6)
@@ -1452,13 +1689,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
                }
        }
 
-       if (err_int_reenable) {
-               spin_lock(&dev_priv->irq_lock);
-               if (ivb_can_enable_err_int(dev))
-                       ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
-               spin_unlock(&dev_priv->irq_lock);
-       }
-
        I915_WRITE(DEIER, de_ier);
        POSTING_READ(DEIER);
        if (!HAS_PCH_NOP(dev)) {
@@ -1516,7 +1746,7 @@ static void i915_error_work_func(struct work_struct *work)
        char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
        int ret;
 
-       kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
+       kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
 
        /*
         * Note that there's only one work item which does gpu resets, so we
@@ -1530,7 +1760,7 @@ static void i915_error_work_func(struct work_struct *work)
         */
        if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
                DRM_DEBUG_DRIVER("resetting chip\n");
-               kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
+               kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
                                   reset_event);
 
                /*
@@ -1557,7 +1787,7 @@ static void i915_error_work_func(struct work_struct *work)
                        smp_mb__before_atomic_inc();
                        atomic_inc(&dev_priv->gpu_error.reset_counter);
 
-                       kobject_uevent_env(&dev->primary->kdev.kobj,
+                       kobject_uevent_env(&dev->primary->kdev->kobj,
                                           KOBJ_CHANGE, reset_done_event);
                } else {
                        atomic_set(&error->reset_counter, I915_WEDGED);
@@ -1965,6 +2195,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
        if (tmp & RING_WAIT) {
                DRM_ERROR("Kicking stuck wait on %s\n",
                          ring->name);
+               i915_handle_error(dev, false);
                I915_WRITE_CTL(ring, tmp);
                return HANGCHECK_KICK;
        }
@@ -1976,6 +2207,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
                case 1:
                        DRM_ERROR("Kicking stuck semaphore on %s\n",
                                  ring->name);
+                       i915_handle_error(dev, false);
                        I915_WRITE_CTL(ring, tmp);
                        return HANGCHECK_KICK;
                case 0:
@@ -2021,12 +2253,17 @@ static void i915_hangcheck_elapsed(unsigned long data)
 
                if (ring->hangcheck.seqno == seqno) {
                        if (ring_idle(ring, seqno)) {
+                               ring->hangcheck.action = HANGCHECK_IDLE;
+
                                if (waitqueue_active(&ring->irq_queue)) {
                                        /* Issue a wake-up to catch stuck h/w. */
-                                       DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
-                                                 ring->name);
-                                       wake_up_all(&ring->irq_queue);
-                                       ring->hangcheck.score += HUNG;
+                                       if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
+                                               DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
+                                                         ring->name);
+                                               wake_up_all(&ring->irq_queue);
+                                       }
+                                       /* Safeguard against driver failure */
+                                       ring->hangcheck.score += BUSY;
                                } else
                                        busy = false;
                        } else {
@@ -2049,6 +2286,7 @@ static void i915_hangcheck_elapsed(unsigned long data)
                                                                    acthd);
 
                                switch (ring->hangcheck.action) {
+                               case HANGCHECK_IDLE:
                                case HANGCHECK_WAIT:
                                        break;
                                case HANGCHECK_ACTIVE:
@@ -2064,6 +2302,8 @@ static void i915_hangcheck_elapsed(unsigned long data)
                                }
                        }
                } else {
+                       ring->hangcheck.action = HANGCHECK_ACTIVE;
+
                        /* Gradually reduce the count so that we catch DoS
                         * attempts across multiple batches.
                         */
@@ -2254,10 +2494,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
        pm_irqs = gt_irqs = 0;
 
        dev_priv->gt_irq_mask = ~0;
-       if (HAS_L3_GPU_CACHE(dev)) {
+       if (HAS_L3_DPF(dev)) {
                /* L3 parity interrupt is always unmasked. */
-               dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-               gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+               dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
+               gt_irqs |= GT_PARITY_ERROR(dev);
        }
 
        gt_irqs |= GT_RENDER_USER_INTERRUPT;
@@ -2306,8 +2546,10 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
        } else {
                display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
                                DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
-                               DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
-                               DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
+                               DE_AUX_CHANNEL_A |
+                               DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
+                               DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
+                               DE_POISON);
                extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
        }
 
@@ -2341,7 +2583,8 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        u32 enable_mask;
-       u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
+       u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV |
+               PIPE_CRC_DONE_ENABLE;
        unsigned long irqflags;
 
        enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@@ -2464,6 +2707,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
 static int i8xx_irq_postinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+       unsigned long irqflags;
 
        I915_WRITE16(EMR,
                     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -2484,6 +2728,13 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
                     I915_USER_INTERRUPT);
        POSTING_READ16(IER);
 
+       /* Interrupt setup is already guaranteed to be single-threaded, this is
+        * just to make the assert_spin_locked check happy. */
+       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       i915_enable_pipestat(dev_priv, 0, PIPE_CRC_DONE_ENABLE);
+       i915_enable_pipestat(dev_priv, 1, PIPE_CRC_DONE_ENABLE);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
        return 0;
 }
 
@@ -2570,13 +2821,14 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
                if (iir & I915_USER_INTERRUPT)
                        notify_ring(dev, &dev_priv->ring[RCS]);
 
-               if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
-                   i8xx_handle_vblank(dev, 0, iir))
-                       flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
+               for_each_pipe(pipe) {
+                       if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
+                           i8xx_handle_vblank(dev, pipe, iir))
+                               flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
 
-               if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
-                   i8xx_handle_vblank(dev, 1, iir))
-                       flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
+                       if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+                               i9xx_pipe_crc_irq_handler(dev, pipe);
+               }
 
                iir = new_iir;
        }
@@ -2623,6 +2875,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        u32 enable_mask;
+       unsigned long irqflags;
 
        I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
 
@@ -2658,6 +2911,13 @@ static int i915_irq_postinstall(struct drm_device *dev)
 
        i915_enable_asle_pipestat(dev);
 
+       /* Interrupt setup is already guaranteed to be single-threaded, this is
+        * just to make the assert_spin_locked check happy. */
+       spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       i915_enable_pipestat(dev_priv, 0, PIPE_CRC_DONE_ENABLE);
+       i915_enable_pipestat(dev_priv, 1, PIPE_CRC_DONE_ENABLE);
+       spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
        return 0;
 }
 
@@ -2769,6 +3029,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
 
                        if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
                                blc_event = true;
+
+                       if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+                               i9xx_pipe_crc_irq_handler(dev, pipe);
                }
 
                if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -2868,6 +3131,8 @@ static int i965_irq_postinstall(struct drm_device *dev)
         * just to make the assert_spin_locked check happy. */
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
        i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
+       i915_enable_pipestat(dev_priv, 0, PIPE_CRC_DONE_ENABLE);
+       i915_enable_pipestat(dev_priv, 1, PIPE_CRC_DONE_ENABLE);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
        /*
@@ -3013,6 +3278,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
 
                        if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
                                blc_event = true;
+
+                       if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
+                               i9xx_pipe_crc_irq_handler(dev, pipe);
                }
 
 
@@ -3122,18 +3390,21 @@ void intel_irq_init(struct drm_device *dev)
 
        pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
 
-       dev->driver->get_vblank_counter = i915_get_vblank_counter;
-       dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
-       if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+       if (IS_GEN2(dev)) {
+               dev->max_vblank_count = 0;
+               dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
+       } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
                dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
                dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+       } else {
+               dev->driver->get_vblank_counter = i915_get_vblank_counter;
+               dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
        }
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
                dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
-       else
-               dev->driver->get_vblank_timestamp = NULL;
-       dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+               dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+       }
 
        if (IS_VALLEYVIEW(dev)) {
                dev->driver->irq_handler = valleyview_irq_handler;
index c159e1a6810fbd8f04e60520dfc5fdb884d8dbff..2b4f7b18b9ba87e4547e5062763fe6ccdfe63af4 100644 (file)
@@ -26,6 +26,7 @@
 #define _I915_REG_H_
 
 #define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+#define _PIPE_INC(pipe, base, inc) ((base) + (pipe)*(inc))
 #define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
 
 #define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
 #define  MI_SEMAPHORE_SYNC_VVE     (1<<16) /* VECS wait for VCS  (VEVSYNC) */
 #define  MI_SEMAPHORE_SYNC_RVE     (2<<16) /* VECS wait for RCS  (VERSYNC) */
 #define  MI_SEMAPHORE_SYNC_INVALID  (3<<16)
+
+#define MI_PREDICATE_RESULT_2  (0x2214)
+#define  LOWER_SLICE_ENABLED   (1<<0)
+#define  LOWER_SLICE_DISABLED  (0<<0)
+
 /*
  * 3D instructions used by the kernel
  */
 #define   IOSF_PORT_PUNIT                      0x4
 #define   IOSF_PORT_NC                         0x11
 #define   IOSF_PORT_DPIO                       0x12
+#define   IOSF_PORT_GPIO_NC                    0x13
+#define   IOSF_PORT_CCK                                0x14
+#define   IOSF_PORT_CCU                                0xA9
+#define   IOSF_PORT_GPS_CORE                   0x48
 #define VLV_IOSF_DATA                          (VLV_DISPLAY_BASE + 0x2104)
 #define VLV_IOSF_ADDR                          (VLV_DISPLAY_BASE + 0x2108)
 
 #define PUNIT_OPCODE_REG_READ                  6
 #define PUNIT_OPCODE_REG_WRITE                 7
 
+#define PUNIT_REG_PWRGT_CTRL                   0x60
+#define PUNIT_REG_PWRGT_STATUS                 0x61
+#define          PUNIT_CLK_GATE                        1
+#define          PUNIT_PWR_RESET                       2
+#define          PUNIT_PWR_GATE                        3
+#define          RENDER_PWRGT                          (PUNIT_PWR_GATE << 0)
+#define          MEDIA_PWRGT                           (PUNIT_PWR_GATE << 2)
+#define          DISP2D_PWRGT                          (PUNIT_PWR_GATE << 6)
+
 #define PUNIT_REG_GPU_LFM                      0xd3
 #define PUNIT_REG_GPU_FREQ_REQ                 0xd4
 #define PUNIT_REG_GPU_FREQ_STS                 0xd8
 #define   FB_FMAX_VMIN_FREQ_LO_SHIFT           27
 #define   FB_FMAX_VMIN_FREQ_LO_MASK            0xf8000000
 
+/* vlv2 north clock has */
+#define CCK_FUSE_REG                           0x8
+#define  CCK_FUSE_HPLL_FREQ_MASK               0x3
+#define CCK_REG_DSI_PLL_FUSE                   0x44
+#define CCK_REG_DSI_PLL_CONTROL                        0x48
+#define  DSI_PLL_VCO_EN                                (1 << 31)
+#define  DSI_PLL_LDO_GATE                      (1 << 30)
+#define  DSI_PLL_P1_POST_DIV_SHIFT             17
+#define  DSI_PLL_P1_POST_DIV_MASK              (0x1ff << 17)
+#define  DSI_PLL_P2_MUX_DSI0_DIV2              (1 << 13)
+#define  DSI_PLL_P3_MUX_DSI1_DIV2              (1 << 12)
+#define  DSI_PLL_MUX_MASK                      (3 << 9)
+#define  DSI_PLL_MUX_DSI0_DSIPLL               (0 << 10)
+#define  DSI_PLL_MUX_DSI0_CCK                  (1 << 10)
+#define  DSI_PLL_MUX_DSI1_DSIPLL               (0 << 9)
+#define  DSI_PLL_MUX_DSI1_CCK                  (1 << 9)
+#define  DSI_PLL_CLK_GATE_MASK                 (0xf << 5)
+#define  DSI_PLL_CLK_GATE_DSI0_DSIPLL          (1 << 8)
+#define  DSI_PLL_CLK_GATE_DSI1_DSIPLL          (1 << 7)
+#define  DSI_PLL_CLK_GATE_DSI0_CCK             (1 << 6)
+#define  DSI_PLL_CLK_GATE_DSI1_CCK             (1 << 5)
+#define  DSI_PLL_LOCK                          (1 << 0)
+#define CCK_REG_DSI_PLL_DIVIDER                        0x4c
+#define  DSI_PLL_LFSR                          (1 << 31)
+#define  DSI_PLL_FRACTION_EN                   (1 << 30)
+#define  DSI_PLL_FRAC_COUNTER_SHIFT            27
+#define  DSI_PLL_FRAC_COUNTER_MASK             (7 << 27)
+#define  DSI_PLL_USYNC_CNT_SHIFT               18
+#define  DSI_PLL_USYNC_CNT_MASK                        (0x1ff << 18)
+#define  DSI_PLL_N1_DIV_SHIFT                  16
+#define  DSI_PLL_N1_DIV_MASK                   (3 << 16)
+#define  DSI_PLL_M1_DIV_SHIFT                  0
+#define  DSI_PLL_M1_DIV_MASK                   (0x1ff << 0)
+
 /*
  * DPIO - a special bus for various display related registers to hide behind
  *
 #define  DPIO_MODSEL1                  (1<<3) /* if ref clk b == 27 */
 #define  DPIO_MODSEL0                  (1<<2) /* if ref clk a == 27 */
 #define  DPIO_SFR_BYPASS               (1<<1)
-#define  DPIO_RESET                    (1<<0)
+#define  DPIO_CMNRST                   (1<<0)
 
 #define _DPIO_TX3_SWING_CTL4_A         0x690
 #define _DPIO_TX3_SWING_CTL4_B         0x2a90
 #define   ARB_MODE_SWIZZLE_IVB (1<<5)
 #define RENDER_HWS_PGA_GEN7    (0x04080)
 #define RING_FAULT_REG(ring)   (0x4094 + 0x100*(ring)->id)
+#define   RING_FAULT_GTTSEL_MASK (1<<11)
+#define   RING_FAULT_SRCID(x)  ((x >> 3) & 0xff)
+#define   RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
+#define   RING_FAULT_VALID     (1<<0)
 #define DONE_REG               0x40b0
 #define BSD_HWS_PGA_GEN7       (0x04180)
 #define BLT_HWS_PGA_GEN7       (0x04280)
 #define GEN7_ERR_INT   0x44040
 #define   ERR_INT_POISON               (1<<31)
 #define   ERR_INT_MMIO_UNCLAIMED       (1<<13)
+#define   ERR_INT_PIPE_CRC_DONE_C      (1<<8)
 #define   ERR_INT_FIFO_UNDERRUN_C      (1<<6)
+#define   ERR_INT_PIPE_CRC_DONE_B      (1<<5)
 #define   ERR_INT_FIFO_UNDERRUN_B      (1<<3)
+#define   ERR_INT_PIPE_CRC_DONE_A      (1<<2)
+#define   ERR_INT_PIPE_CRC_DONE(pipe)  (1<<(2 + pipe*3))
 #define   ERR_INT_FIFO_UNDERRUN_A      (1<<0)
 #define   ERR_INT_FIFO_UNDERRUN(pipe)  (1<<(pipe*3))
 
 #define GT_BLT_USER_INTERRUPT                  (1 << 22)
 #define GT_BSD_CS_ERROR_INTERRUPT              (1 << 15)
 #define GT_BSD_USER_INTERRUPT                  (1 << 12)
+#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 (1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
 #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT    (1 <<  5) /* !snb */
 #define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT     (1 <<  4)
 #define GT_RENDER_CS_MASTER_ERROR_INTERRUPT    (1 <<  3)
 #define PM_VEBOX_CS_ERROR_INTERRUPT            (1 << 12) /* hsw+ */
 #define PM_VEBOX_USER_INTERRUPT                        (1 << 10) /* hsw+ */
 
+#define GT_PARITY_ERROR(dev) \
+       (GT_RENDER_L3_PARITY_ERROR_INTERRUPT | \
+        (IS_HASWELL(dev) ? GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1 : 0))
+
 /* These are all the "old" interrupts */
 #define ILK_BSD_USER_INTERRUPT                         (1<<5)
 #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT             (1<<18)
 
 #define MI_ARB_VLV             (VLV_DISPLAY_BASE + 0x6504)
 
+#define CZCLK_CDCLK_FREQ_RATIO (VLV_DISPLAY_BASE + 0x6508)
+#define   CDCLK_FREQ_SHIFT     4
+#define   CDCLK_FREQ_MASK      (0x1f << CDCLK_FREQ_SHIFT)
+#define   CZCLK_FREQ_MASK      0xf
+#define GMBUSFREQ_VLV          (VLV_DISPLAY_BASE + 0x6510)
+
 /*
  * Palette regs
  */
  * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
  * every way.  It is not accessible from the CP register read instructions.
  *
+ * Starting from Haswell, you can't write registers using the MCHBAR mirror,
+ * just read.
  */
 #define MCHBAR_MIRROR_BASE     0x10000
 
  */
 #define HSW_CXT_TOTAL_SIZE             (17 * PAGE_SIZE)
 
+#define VLV_CLK_CTL2                   0x101104
+#define   CLK_CTL2_CZCOUNT_30NS_SHIFT  28
+
 /*
  * Overlay regs
  */
  * Display engine regs
  */
 
+/* Pipe A CRC regs */
+#define _PIPE_CRC_CTL_A                (dev_priv->info->display_mmio_offset + 0x60050)
+#define   PIPE_CRC_ENABLE              (1 << 31)
+/* ivb+ source selection */
+#define   PIPE_CRC_SOURCE_PRIMARY_IVB  (0 << 29)
+#define   PIPE_CRC_SOURCE_SPRITE_IVB   (1 << 29)
+#define   PIPE_CRC_SOURCE_PF_IVB       (2 << 29)
+/* ilk+ source selection */
+#define   PIPE_CRC_SOURCE_PRIMARY_ILK  (0 << 28)
+#define   PIPE_CRC_SOURCE_SPRITE_ILK   (1 << 28)
+#define   PIPE_CRC_SOURCE_PIPE_ILK     (2 << 28)
+/* embedded DP port on the north display block, reserved on ivb */
+#define   PIPE_CRC_SOURCE_PORT_A_ILK   (4 << 28)
+#define   PIPE_CRC_SOURCE_FDI_ILK      (5 << 28) /* reserved on ivb */
+/* vlv source selection */
+#define   PIPE_CRC_SOURCE_PIPE_VLV     (0 << 27)
+#define   PIPE_CRC_SOURCE_HDMIB_VLV    (1 << 27)
+#define   PIPE_CRC_SOURCE_HDMIC_VLV    (2 << 27)
+/* with DP port the pipe source is invalid */
+#define   PIPE_CRC_SOURCE_DP_D_VLV     (3 << 27)
+#define   PIPE_CRC_SOURCE_DP_B_VLV     (6 << 27)
+#define   PIPE_CRC_SOURCE_DP_C_VLV     (7 << 27)
+/* gen3+ source selection */
+#define   PIPE_CRC_SOURCE_PIPE_I9XX    (0 << 28)
+#define   PIPE_CRC_SOURCE_SDVOB_I9XX   (1 << 28)
+#define   PIPE_CRC_SOURCE_SDVOC_I9XX   (2 << 28)
+/* with DP/TV port the pipe source is invalid */
+#define   PIPE_CRC_SOURCE_DP_D_G4X     (3 << 28)
+#define   PIPE_CRC_SOURCE_TV_PRE       (4 << 28)
+#define   PIPE_CRC_SOURCE_TV_POST      (5 << 28)
+#define   PIPE_CRC_SOURCE_DP_B_G4X     (6 << 28)
+#define   PIPE_CRC_SOURCE_DP_C_G4X     (7 << 28)
+/* gen2 doesn't have source selection bits */
+#define   PIPE_CRC_INCLUDE_BORDER_I8XX (1 << 30)
+
+#define _PIPE_CRC_RES_1_A_IVB          0x60064
+#define _PIPE_CRC_RES_2_A_IVB          0x60068
+#define _PIPE_CRC_RES_3_A_IVB          0x6006c
+#define _PIPE_CRC_RES_4_A_IVB          0x60070
+#define _PIPE_CRC_RES_5_A_IVB          0x60074
+
+#define _PIPE_CRC_RES_RED_A            (dev_priv->info->display_mmio_offset + 0x60060)
+#define _PIPE_CRC_RES_GREEN_A          (dev_priv->info->display_mmio_offset + 0x60064)
+#define _PIPE_CRC_RES_BLUE_A           (dev_priv->info->display_mmio_offset + 0x60068)
+#define _PIPE_CRC_RES_RES1_A_I915      (dev_priv->info->display_mmio_offset + 0x6006c)
+#define _PIPE_CRC_RES_RES2_A_G4X       (dev_priv->info->display_mmio_offset + 0x60080)
+
+/* Pipe B CRC regs */
+#define _PIPE_CRC_RES_1_B_IVB          0x61064
+#define _PIPE_CRC_RES_2_B_IVB          0x61068
+#define _PIPE_CRC_RES_3_B_IVB          0x6106c
+#define _PIPE_CRC_RES_4_B_IVB          0x61070
+#define _PIPE_CRC_RES_5_B_IVB          0x61074
+
+#define PIPE_CRC_CTL(pipe)     _PIPE_INC(pipe, _PIPE_CRC_CTL_A, 0x01000)
+#define PIPE_CRC_RES_1_IVB(pipe)       \
+       _PIPE(pipe, _PIPE_CRC_RES_1_A_IVB, _PIPE_CRC_RES_1_B_IVB)
+#define PIPE_CRC_RES_2_IVB(pipe)       \
+       _PIPE(pipe, _PIPE_CRC_RES_2_A_IVB, _PIPE_CRC_RES_2_B_IVB)
+#define PIPE_CRC_RES_3_IVB(pipe)       \
+       _PIPE(pipe, _PIPE_CRC_RES_3_A_IVB, _PIPE_CRC_RES_3_B_IVB)
+#define PIPE_CRC_RES_4_IVB(pipe)       \
+       _PIPE(pipe, _PIPE_CRC_RES_4_A_IVB, _PIPE_CRC_RES_4_B_IVB)
+#define PIPE_CRC_RES_5_IVB(pipe)       \
+       _PIPE(pipe, _PIPE_CRC_RES_5_A_IVB, _PIPE_CRC_RES_5_B_IVB)
+
+#define PIPE_CRC_RES_RED(pipe) \
+       _PIPE_INC(pipe, _PIPE_CRC_RES_RED_A, 0x01000)
+#define PIPE_CRC_RES_GREEN(pipe) \
+       _PIPE_INC(pipe, _PIPE_CRC_RES_GREEN_A, 0x01000)
+#define PIPE_CRC_RES_BLUE(pipe) \
+       _PIPE_INC(pipe, _PIPE_CRC_RES_BLUE_A, 0x01000)
+#define PIPE_CRC_RES_RES1_I915(pipe) \
+       _PIPE_INC(pipe, _PIPE_CRC_RES_RES1_A_I915, 0x01000)
+#define PIPE_CRC_RES_RES2_G4X(pipe) \
+       _PIPE_INC(pipe, _PIPE_CRC_RES_RES2_A_G4X, 0x01000)
+
 /* Pipe A timing regs */
 #define _HTOTAL_A      (dev_priv->info->display_mmio_offset + 0x60000)
 #define _HBLANK_A      (dev_priv->info->display_mmio_offset + 0x60004)
 #define _BCLRPAT_B     (dev_priv->info->display_mmio_offset + 0x61020)
 #define _VSYNCSHIFT_B  (dev_priv->info->display_mmio_offset + 0x61028)
 
-
 #define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
 #define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
 #define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
 #define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
 
 /* HSW eDP PSR registers */
-#define EDP_PSR_CTL                            0x64800
+#define EDP_PSR_BASE(dev)                      0x64800
+#define EDP_PSR_CTL(dev)                       (EDP_PSR_BASE(dev) + 0)
 #define   EDP_PSR_ENABLE                       (1<<31)
 #define   EDP_PSR_LINK_DISABLE                 (0<<27)
 #define   EDP_PSR_LINK_STANDBY                 (1<<27)
 #define   EDP_PSR_TP1_TIME_0us                 (3<<4)
 #define   EDP_PSR_IDLE_FRAME_SHIFT             0
 
-#define EDP_PSR_AUX_CTL                        0x64810
-#define EDP_PSR_AUX_DATA1              0x64814
+#define EDP_PSR_AUX_CTL(dev)                   (EDP_PSR_BASE(dev) + 0x10)
+#define EDP_PSR_AUX_DATA1(dev)                 (EDP_PSR_BASE(dev) + 0x14)
 #define   EDP_PSR_DPCD_COMMAND         0x80060000
-#define EDP_PSR_AUX_DATA2              0x64818
+#define EDP_PSR_AUX_DATA2(dev)                 (EDP_PSR_BASE(dev) + 0x18)
 #define   EDP_PSR_DPCD_NORMAL_OPERATION        (1<<24)
-#define EDP_PSR_AUX_DATA3              0x6481c
-#define EDP_PSR_AUX_DATA4              0x64820
-#define EDP_PSR_AUX_DATA5              0x64824
+#define EDP_PSR_AUX_DATA3(dev)                 (EDP_PSR_BASE(dev) + 0x1c)
+#define EDP_PSR_AUX_DATA4(dev)                 (EDP_PSR_BASE(dev) + 0x20)
+#define EDP_PSR_AUX_DATA5(dev)                 (EDP_PSR_BASE(dev) + 0x24)
 
-#define EDP_PSR_STATUS_CTL                     0x64840
+#define EDP_PSR_STATUS_CTL(dev)                        (EDP_PSR_BASE(dev) + 0x40)
 #define   EDP_PSR_STATUS_STATE_MASK            (7<<29)
 #define   EDP_PSR_STATUS_STATE_IDLE            (0<<29)
 #define   EDP_PSR_STATUS_STATE_SRDONACK                (1<<29)
 #define   EDP_PSR_STATUS_SENDING_TP1           (1<<4)
 #define   EDP_PSR_STATUS_IDLE_MASK             0xf
 
-#define EDP_PSR_PERF_CNT               0x64844
+#define EDP_PSR_PERF_CNT(dev)          (EDP_PSR_BASE(dev) + 0x44)
 #define   EDP_PSR_PERF_CNT_MASK                0xffffff
 
-#define EDP_PSR_DEBUG_CTL              0x64860
+#define EDP_PSR_DEBUG_CTL(dev)         (EDP_PSR_BASE(dev) + 0x60)
 #define   EDP_PSR_DEBUG_MASK_LPSP      (1<<27)
 #define   EDP_PSR_DEBUG_MASK_MEMUP     (1<<26)
 #define   EDP_PSR_DEBUG_MASK_HPD       (1<<25)
 
 /* Gen 4 SDVO/HDMI bits: */
 #define   SDVO_COLOR_FORMAT_8bpc               (0 << 26)
+#define   SDVO_COLOR_FORMAT_MASK               (7 << 26)
 #define   SDVO_ENCODING_SDVO                   (0 << 10)
 #define   SDVO_ENCODING_HDMI                   (2 << 10)
 #define   HDMI_MODE_SELECT_HDMI                        (1 << 9) /* HDMI only */
 #define   PIPECONF_DISABLE     0
 #define   PIPECONF_DOUBLE_WIDE (1<<30)
 #define   I965_PIPECONF_ACTIVE (1<<30)
+#define   PIPECONF_DSI_PLL_LOCKED      (1<<29) /* vlv & pipe A only */
 #define   PIPECONF_FRAME_START_DELAY_MASK (3<<27)
 #define   PIPECONF_SINGLE_WIDE 0
 #define   PIPECONF_PIPE_UNLOCKED 0
 
 /* define the Watermark register on Ironlake */
 #define WM0_PIPEA_ILK          0x45100
-#define  WM0_PIPE_PLANE_MASK   (0x7f<<16)
+#define  WM0_PIPE_PLANE_MASK   (0xffff<<16)
 #define  WM0_PIPE_PLANE_SHIFT  16
-#define  WM0_PIPE_SPRITE_MASK  (0x3f<<8)
+#define  WM0_PIPE_SPRITE_MASK  (0xff<<8)
 #define  WM0_PIPE_SPRITE_SHIFT 8
-#define  WM0_PIPE_CURSOR_MASK  (0x1f)
+#define  WM0_PIPE_CURSOR_MASK  (0xff)
 
 #define WM0_PIPEB_ILK          0x45104
 #define WM0_PIPEC_IVB          0x45200
 #define  WM1_LP_LATENCY_MASK   (0x7f<<24)
 #define  WM1_LP_FBC_MASK       (0xf<<20)
 #define  WM1_LP_FBC_SHIFT      20
-#define  WM1_LP_SR_MASK                (0x1ff<<8)
+#define  WM1_LP_SR_MASK                (0x7ff<<8)
 #define  WM1_LP_SR_SHIFT       8
-#define  WM1_LP_CURSOR_MASK    (0x3f)
+#define  WM1_LP_CURSOR_MASK    (0xff)
 #define WM2_LP_ILK             0x4510c
 #define  WM2_LP_EN             (1<<31)
 #define WM3_LP_ILK             0x45110
  *  } while (high1 != high2);
  *  frame = (high1 << 8) | low1;
  */
-#define _PIPEAFRAMEHIGH          (dev_priv->info->display_mmio_offset + 0x70040)
+#define _PIPEAFRAMEHIGH          0x70040
 #define   PIPE_FRAME_HIGH_MASK    0x0000ffff
 #define   PIPE_FRAME_HIGH_SHIFT   0
-#define _PIPEAFRAMEPIXEL         (dev_priv->info->display_mmio_offset + 0x70044)
+#define _PIPEAFRAMEPIXEL         0x70044
 #define   PIPE_FRAME_LOW_MASK     0xff000000
 #define   PIPE_FRAME_LOW_SHIFT    24
 #define   PIPE_PIXEL_MASK         0x00ffffff
 #define   PIPE_PIXEL_SHIFT        0
 /* GM45+ just has to be different */
-#define _PIPEA_FRMCOUNT_GM45   0x70040
-#define _PIPEA_FLIPCOUNT_GM45  0x70044
+#define _PIPEA_FRMCOUNT_GM45   (dev_priv->info->display_mmio_offset + 0x70040)
+#define _PIPEA_FLIPCOUNT_GM45  (dev_priv->info->display_mmio_offset + 0x70044)
 #define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
 
 /* Cursor A & B regs */
 #define _PIPEBDSL              (dev_priv->info->display_mmio_offset + 0x71000)
 #define _PIPEBCONF             (dev_priv->info->display_mmio_offset + 0x71008)
 #define _PIPEBSTAT             (dev_priv->info->display_mmio_offset + 0x71024)
-#define _PIPEBFRAMEHIGH                (dev_priv->info->display_mmio_offset + 0x71040)
-#define _PIPEBFRAMEPIXEL       (dev_priv->info->display_mmio_offset + 0x71044)
-#define _PIPEB_FRMCOUNT_GM45   0x71040
-#define _PIPEB_FLIPCOUNT_GM45  0x71044
+#define _PIPEBFRAMEHIGH                0x71040
+#define _PIPEBFRAMEPIXEL       0x71044
+#define _PIPEB_FRMCOUNT_GM45   (dev_priv->info->display_mmio_offset + 0x71040)
+#define _PIPEB_FLIPCOUNT_GM45  (dev_priv->info->display_mmio_offset + 0x71044)
 
 
 /* Display B control */
 #define DE_PIPEB_ODD_FIELD      (1 << 13)
 #define DE_PIPEB_LINE_COMPARE   (1 << 12)
 #define DE_PIPEB_VSYNC          (1 << 11)
+#define DE_PIPEB_CRC_DONE      (1 << 10)
 #define DE_PIPEB_FIFO_UNDERRUN  (1 << 8)
 #define DE_PIPEA_VBLANK         (1 << 7)
 #define DE_PIPEA_EVEN_FIELD     (1 << 6)
 #define DE_PIPEA_ODD_FIELD      (1 << 5)
 #define DE_PIPEA_LINE_COMPARE   (1 << 4)
 #define DE_PIPEA_VSYNC          (1 << 3)
+#define DE_PIPEA_CRC_DONE      (1 << 2)
 #define DE_PIPEA_FIFO_UNDERRUN  (1 << 0)
 
 /* More Ivybridge lolz */
 #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG         0x9030
 #define  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB      (1<<11)
 
+#define HSW_SCRATCH1                           0xb038
+#define  HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE  (1<<27)
+
 #define HSW_FUSE_STRAP         0x42014
 #define  HSW_CDCLK_LIMIT       (1 << 24)
 
 #define FDI_RX_CHICKEN(pipe) _PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
 
 #define SOUTH_DSPCLK_GATE_D    0xc2020
+#define  PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
 #define  PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
+#define  PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
 #define  PCH_LP_PARTITION_LEVEL_DISABLE  (1<<12)
 
 /* CPU: FDI_TX */
 #define PIPEA_PP_STATUS         (VLV_DISPLAY_BASE + 0x61200)
 #define PIPEA_PP_CONTROL        (VLV_DISPLAY_BASE + 0x61204)
 #define PIPEA_PP_ON_DELAYS      (VLV_DISPLAY_BASE + 0x61208)
+#define  PANEL_PORT_SELECT_DPB_VLV     (1 << 30)
+#define  PANEL_PORT_SELECT_DPC_VLV     (2 << 30)
 #define PIPEA_PP_OFF_DELAYS     (VLV_DISPLAY_BASE + 0x6120c)
 #define PIPEA_PP_DIVISOR        (VLV_DISPLAY_BASE + 0x61210)
 
 #define  PANEL_PORT_SELECT_MASK        (3 << 30)
 #define  PANEL_PORT_SELECT_LVDS        (0 << 30)
 #define  PANEL_PORT_SELECT_DPA (1 << 30)
-#define  EDP_PANEL             (1 << 30)
 #define  PANEL_PORT_SELECT_DPC (2 << 30)
 #define  PANEL_PORT_SELECT_DPD (3 << 30)
 #define  PANEL_POWER_UP_DELAY_MASK     (0x1fff0000)
 #define  PANEL_LIGHT_ON_DELAY_SHIFT    0
 
 #define PCH_PP_OFF_DELAYS      0xc720c
-#define  PANEL_POWER_PORT_SELECT_MASK  (0x3 << 30)
-#define  PANEL_POWER_PORT_LVDS         (0 << 30)
-#define  PANEL_POWER_PORT_DP_A         (1 << 30)
-#define  PANEL_POWER_PORT_DP_C         (2 << 30)
-#define  PANEL_POWER_PORT_DP_D         (3 << 30)
 #define  PANEL_POWER_DOWN_DELAY_MASK   (0x1fff0000)
 #define  PANEL_POWER_DOWN_DELAY_SHIFT  16
 #define  PANEL_LIGHT_OFF_DELAY_MASK    (0x1fff)
 #define   GEN6_RP_UP_IDLE_MIN                  (0x1<<3)
 #define   GEN6_RP_UP_BUSY_AVG                  (0x2<<3)
 #define   GEN6_RP_UP_BUSY_CONT                 (0x4<<3)
-#define   GEN7_RP_DOWN_IDLE_AVG                        (0x2<<0)
+#define   GEN6_RP_DOWN_IDLE_AVG                        (0x2<<0)
 #define   GEN6_RP_DOWN_IDLE_CONT               (0x1<<0)
 #define GEN6_RP_UP_THRESHOLD                   0xA02C
 #define GEN6_RP_DOWN_THRESHOLD                 0xA030
                                                 GEN6_PM_RP_DOWN_TIMEOUT)
 
 #define GEN6_GT_GFX_RC6_LOCKED                 0x138104
+#define VLV_COUNTER_CONTROL                    0x138104
+#define   VLV_COUNT_RANGE_HIGH                 (1<<15)
+#define   VLV_MEDIA_RC6_COUNT_EN               (1<<1)
+#define   VLV_RENDER_RC6_COUNT_EN              (1<<0)
 #define GEN6_GT_GFX_RC6                                0x138108
 #define GEN6_GT_GFX_RC6p                       0x13810C
 #define GEN6_GT_GFX_RC6pp                      0x138110
 #define   GEN6_PCODE_READ_MIN_FREQ_TABLE       0x9
 #define          GEN6_PCODE_WRITE_RC6VIDS              0x4
 #define          GEN6_PCODE_READ_RC6VIDS               0x5
+#define   GEN6_PCODE_READ_D_COMP               0x10
+#define   GEN6_PCODE_WRITE_D_COMP              0x11
 #define   GEN6_ENCODE_RC6_VID(mv)              (((mv) - 245) / 5)
 #define   GEN6_DECODE_RC6_VID(vids)            (((vids) * 5) + 245)
 #define GEN6_PCODE_DATA                                0x138128
 
 /* IVYBRIDGE DPF */
 #define GEN7_L3CDERRST1                        0xB008 /* L3CD Error Status 1 */
+#define HSW_L3CDERRST11                        0xB208 /* L3CD Error Status register 1 slice 1 */
 #define   GEN7_L3CDERRST1_ROW_MASK     (0x7ff<<14)
 #define   GEN7_PARITY_ERROR_VALID      (1<<13)
 #define   GEN7_L3CDERRST1_BANK_MASK    (3<<11)
 #define   GEN7_L3CDERRST1_ENABLE       (1<<7)
 
 #define GEN7_L3LOG_BASE                        0xB070
+#define HSW_L3LOG_BASE_SLICE1          0xB270
 #define GEN7_L3LOG_SIZE                        0x80
 
 #define GEN7_HALF_SLICE_CHICKEN1       0xe100 /* IVB GT1 + VLV */
 #define GEN7_ROW_CHICKEN2_GT2          0xf4f4
 #define   DOP_CLOCK_GATING_DISABLE     (1<<0)
 
+#define HSW_ROW_CHICKEN3               0xe49c
+#define  HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE    (1 << 6)
+
 #define G4X_AUD_VID_DID                        (dev_priv->info->display_mmio_offset + 0x62020)
 #define INTEL_AUDIO_DEVCL              0x808629FB
 #define INTEL_AUDIO_DEVBLC             0x80862801
 #define   AUD_CONFIG_LOWER_N_SHIFT             4
 #define   AUD_CONFIG_LOWER_N_VALUE             (0xfff << 4)
 #define   AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT    16
-#define   AUD_CONFIG_PIXEL_CLOCK_HDMI          (0xf << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK     (0xf << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_25175    (0 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_25200    (1 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_27000    (2 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_27027    (3 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_54000    (4 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_54054    (5 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_74176    (6 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_74250    (7 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_148352   (8 << 16)
+#define   AUD_CONFIG_PIXEL_CLOCK_HDMI_148500   (9 << 16)
 #define   AUD_CONFIG_DISABLE_NCTS              (1 << 3)
 
 /* HSW Audio */
 #define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
 #define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
 
+/* VLV MIPI registers */
+
+#define _MIPIA_PORT_CTRL                       (VLV_DISPLAY_BASE + 0x61190)
+#define _MIPIB_PORT_CTRL                       (VLV_DISPLAY_BASE + 0x61700)
+#define MIPI_PORT_CTRL(pipe)           _PIPE(pipe, _MIPIA_PORT_CTRL, _MIPIB_PORT_CTRL)
+#define  DPI_ENABLE                                    (1 << 31) /* A + B */
+#define  MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT             27
+#define  MIPIA_MIPI4DPHY_DELAY_COUNT_MASK              (0xf << 27)
+#define  DUAL_LINK_MODE_MASK                           (1 << 26)
+#define  DUAL_LINK_MODE_FRONT_BACK                     (0 << 26)
+#define  DUAL_LINK_MODE_PIXEL_ALTERNATIVE              (1 << 26)
+#define  DITHERING_ENABLE                              (1 << 25) /* A + B */
+#define  FLOPPED_HSTX                                  (1 << 23)
+#define  DE_INVERT                                     (1 << 19) /* XXX */
+#define  MIPIA_FLISDSI_DELAY_COUNT_SHIFT               18
+#define  MIPIA_FLISDSI_DELAY_COUNT_MASK                        (0xf << 18)
+#define  AFE_LATCHOUT                                  (1 << 17)
+#define  LP_OUTPUT_HOLD                                        (1 << 16)
+#define  MIPIB_FLISDSI_DELAY_COUNT_HIGH_SHIFT          15
+#define  MIPIB_FLISDSI_DELAY_COUNT_HIGH_MASK           (1 << 15)
+#define  MIPIB_MIPI4DPHY_DELAY_COUNT_SHIFT             11
+#define  MIPIB_MIPI4DPHY_DELAY_COUNT_MASK              (0xf << 11)
+#define  CSB_SHIFT                                     9
+#define  CSB_MASK                                      (3 << 9)
+#define  CSB_20MHZ                                     (0 << 9)
+#define  CSB_10MHZ                                     (1 << 9)
+#define  CSB_40MHZ                                     (2 << 9)
+#define  BANDGAP_MASK                                  (1 << 8)
+#define  BANDGAP_PNW_CIRCUIT                           (0 << 8)
+#define  BANDGAP_LNC_CIRCUIT                           (1 << 8)
+#define  MIPIB_FLISDSI_DELAY_COUNT_LOW_SHIFT           5
+#define  MIPIB_FLISDSI_DELAY_COUNT_LOW_MASK            (7 << 5)
+#define  TEARING_EFFECT_DELAY                          (1 << 4) /* A + B */
+#define  TEARING_EFFECT_SHIFT                          2 /* A + B */
+#define  TEARING_EFFECT_MASK                           (3 << 2)
+#define  TEARING_EFFECT_OFF                            (0 << 2)
+#define  TEARING_EFFECT_DSI                            (1 << 2)
+#define  TEARING_EFFECT_GPIO                           (2 << 2)
+#define  LANE_CONFIGURATION_SHIFT                      0
+#define  LANE_CONFIGURATION_MASK                       (3 << 0)
+#define  LANE_CONFIGURATION_4LANE                      (0 << 0)
+#define  LANE_CONFIGURATION_DUAL_LINK_A                        (1 << 0)
+#define  LANE_CONFIGURATION_DUAL_LINK_B                        (2 << 0)
+
+#define _MIPIA_TEARING_CTRL                    (VLV_DISPLAY_BASE + 0x61194)
+#define _MIPIB_TEARING_CTRL                    (VLV_DISPLAY_BASE + 0x61704)
+#define MIPI_TEARING_CTRL(pipe)                _PIPE(pipe, _MIPIA_TEARING_CTRL, _MIPIB_TEARING_CTRL)
+#define  TEARING_EFFECT_DELAY_SHIFT                    0
+#define  TEARING_EFFECT_DELAY_MASK                     (0xffff << 0)
+
+/* XXX: all bits reserved */
+#define _MIPIA_AUTOPWG                         (VLV_DISPLAY_BASE + 0x611a0)
+
+/* MIPI DSI Controller and D-PHY registers */
+
+#define _MIPIA_DEVICE_READY                    (VLV_DISPLAY_BASE + 0xb000)
+#define _MIPIB_DEVICE_READY                    (VLV_DISPLAY_BASE + 0xb800)
+#define MIPI_DEVICE_READY(pipe)                _PIPE(pipe, _MIPIA_DEVICE_READY, _MIPIB_DEVICE_READY)
+#define  BUS_POSSESSION                                        (1 << 3) /* set to give bus to receiver */
+#define  ULPS_STATE_MASK                               (3 << 1)
+#define  ULPS_STATE_ENTER                              (2 << 1)
+#define  ULPS_STATE_EXIT                               (1 << 1)
+#define  ULPS_STATE_NORMAL_OPERATION                   (0 << 1)
+#define  DEVICE_READY                                  (1 << 0)
+
+#define _MIPIA_INTR_STAT                       (VLV_DISPLAY_BASE + 0xb004)
+#define _MIPIB_INTR_STAT                       (VLV_DISPLAY_BASE + 0xb804)
+#define MIPI_INTR_STAT(pipe)           _PIPE(pipe, _MIPIA_INTR_STAT, _MIPIB_INTR_STAT)
+#define _MIPIA_INTR_EN                         (VLV_DISPLAY_BASE + 0xb008)
+#define _MIPIB_INTR_EN                         (VLV_DISPLAY_BASE + 0xb808)
+#define MIPI_INTR_EN(pipe)             _PIPE(pipe, _MIPIA_INTR_EN, _MIPIB_INTR_EN)
+#define  TEARING_EFFECT                                        (1 << 31)
+#define  SPL_PKT_SENT_INTERRUPT                                (1 << 30)
+#define  GEN_READ_DATA_AVAIL                           (1 << 29)
+#define  LP_GENERIC_WR_FIFO_FULL                       (1 << 28)
+#define  HS_GENERIC_WR_FIFO_FULL                       (1 << 27)
+#define  RX_PROT_VIOLATION                             (1 << 26)
+#define  RX_INVALID_TX_LENGTH                          (1 << 25)
+#define  ACK_WITH_NO_ERROR                             (1 << 24)
+#define  TURN_AROUND_ACK_TIMEOUT                       (1 << 23)
+#define  LP_RX_TIMEOUT                                 (1 << 22)
+#define  HS_TX_TIMEOUT                                 (1 << 21)
+#define  DPI_FIFO_UNDERRUN                             (1 << 20)
+#define  LOW_CONTENTION                                        (1 << 19)
+#define  HIGH_CONTENTION                               (1 << 18)
+#define  TXDSI_VC_ID_INVALID                           (1 << 17)
+#define  TXDSI_DATA_TYPE_NOT_RECOGNISED                        (1 << 16)
+#define  TXCHECKSUM_ERROR                              (1 << 15)
+#define  TXECC_MULTIBIT_ERROR                          (1 << 14)
+#define  TXECC_SINGLE_BIT_ERROR                                (1 << 13)
+#define  TXFALSE_CONTROL_ERROR                         (1 << 12)
+#define  RXDSI_VC_ID_INVALID                           (1 << 11)
+#define  RXDSI_DATA_TYPE_NOT_REGOGNISED                        (1 << 10)
+#define  RXCHECKSUM_ERROR                              (1 << 9)
+#define  RXECC_MULTIBIT_ERROR                          (1 << 8)
+#define  RXECC_SINGLE_BIT_ERROR                                (1 << 7)
+#define  RXFALSE_CONTROL_ERROR                         (1 << 6)
+#define  RXHS_RECEIVE_TIMEOUT_ERROR                    (1 << 5)
+#define  RX_LP_TX_SYNC_ERROR                           (1 << 4)
+#define  RXEXCAPE_MODE_ENTRY_ERROR                     (1 << 3)
+#define  RXEOT_SYNC_ERROR                              (1 << 2)
+#define  RXSOT_SYNC_ERROR                              (1 << 1)
+#define  RXSOT_ERROR                                   (1 << 0)
+
+#define _MIPIA_DSI_FUNC_PRG                    (VLV_DISPLAY_BASE + 0xb00c)
+#define _MIPIB_DSI_FUNC_PRG                    (VLV_DISPLAY_BASE + 0xb80c)
+#define MIPI_DSI_FUNC_PRG(pipe)                _PIPE(pipe, _MIPIA_DSI_FUNC_PRG, _MIPIB_DSI_FUNC_PRG)
+#define  CMD_MODE_DATA_WIDTH_MASK                      (7 << 13)
+#define  CMD_MODE_NOT_SUPPORTED                                (0 << 13)
+#define  CMD_MODE_DATA_WIDTH_16_BIT                    (1 << 13)
+#define  CMD_MODE_DATA_WIDTH_9_BIT                     (2 << 13)
+#define  CMD_MODE_DATA_WIDTH_8_BIT                     (3 << 13)
+#define  CMD_MODE_DATA_WIDTH_OPTION1                   (4 << 13)
+#define  CMD_MODE_DATA_WIDTH_OPTION2                   (5 << 13)
+#define  VID_MODE_FORMAT_MASK                          (0xf << 7)
+#define  VID_MODE_NOT_SUPPORTED                                (0 << 7)
+#define  VID_MODE_FORMAT_RGB565                                (1 << 7)
+#define  VID_MODE_FORMAT_RGB666                                (2 << 7)
+#define  VID_MODE_FORMAT_RGB666_LOOSE                  (3 << 7)
+#define  VID_MODE_FORMAT_RGB888                                (4 << 7)
+#define  CMD_MODE_CHANNEL_NUMBER_SHIFT                 5
+#define  CMD_MODE_CHANNEL_NUMBER_MASK                  (3 << 5)
+#define  VID_MODE_CHANNEL_NUMBER_SHIFT                 3
+#define  VID_MODE_CHANNEL_NUMBER_MASK                  (3 << 3)
+#define  DATA_LANES_PRG_REG_SHIFT                      0
+#define  DATA_LANES_PRG_REG_MASK                       (7 << 0)
+
+#define _MIPIA_HS_TX_TIMEOUT                   (VLV_DISPLAY_BASE + 0xb010)
+#define _MIPIB_HS_TX_TIMEOUT                   (VLV_DISPLAY_BASE + 0xb810)
+#define MIPI_HS_TX_TIMEOUT(pipe)       _PIPE(pipe, _MIPIA_HS_TX_TIMEOUT, _MIPIB_HS_TX_TIMEOUT)
+#define  HIGH_SPEED_TX_TIMEOUT_COUNTER_MASK            0xffffff
+
+#define _MIPIA_LP_RX_TIMEOUT                   (VLV_DISPLAY_BASE + 0xb014)
+#define _MIPIB_LP_RX_TIMEOUT                   (VLV_DISPLAY_BASE + 0xb814)
+#define MIPI_LP_RX_TIMEOUT(pipe)       _PIPE(pipe, _MIPIA_LP_RX_TIMEOUT, _MIPIB_LP_RX_TIMEOUT)
+#define  LOW_POWER_RX_TIMEOUT_COUNTER_MASK             0xffffff
+
+#define _MIPIA_TURN_AROUND_TIMEOUT             (VLV_DISPLAY_BASE + 0xb018)
+#define _MIPIB_TURN_AROUND_TIMEOUT             (VLV_DISPLAY_BASE + 0xb818)
+#define MIPI_TURN_AROUND_TIMEOUT(pipe) _PIPE(pipe, _MIPIA_TURN_AROUND_TIMEOUT, _MIPIB_TURN_AROUND_TIMEOUT)
+#define  TURN_AROUND_TIMEOUT_MASK                      0x3f
+
+#define _MIPIA_DEVICE_RESET_TIMER              (VLV_DISPLAY_BASE + 0xb01c)
+#define _MIPIB_DEVICE_RESET_TIMER              (VLV_DISPLAY_BASE + 0xb81c)
+#define MIPI_DEVICE_RESET_TIMER(pipe)  _PIPE(pipe, _MIPIA_DEVICE_RESET_TIMER, _MIPIB_DEVICE_RESET_TIMER)
+#define  DEVICE_RESET_TIMER_MASK                       0xffff
+
+#define _MIPIA_DPI_RESOLUTION                  (VLV_DISPLAY_BASE + 0xb020)
+#define _MIPIB_DPI_RESOLUTION                  (VLV_DISPLAY_BASE + 0xb820)
+#define MIPI_DPI_RESOLUTION(pipe)      _PIPE(pipe, _MIPIA_DPI_RESOLUTION, _MIPIB_DPI_RESOLUTION)
+#define  VERTICAL_ADDRESS_SHIFT                                16
+#define  VERTICAL_ADDRESS_MASK                         (0xffff << 16)
+#define  HORIZONTAL_ADDRESS_SHIFT                      0
+#define  HORIZONTAL_ADDRESS_MASK                       0xffff
+
+#define _MIPIA_DBI_FIFO_THROTTLE               (VLV_DISPLAY_BASE + 0xb024)
+#define _MIPIB_DBI_FIFO_THROTTLE               (VLV_DISPLAY_BASE + 0xb824)
+#define MIPI_DBI_FIFO_THROTTLE(pipe)   _PIPE(pipe, _MIPIA_DBI_FIFO_THROTTLE, _MIPIB_DBI_FIFO_THROTTLE)
+#define  DBI_FIFO_EMPTY_HALF                           (0 << 0)
+#define  DBI_FIFO_EMPTY_QUARTER                                (1 << 0)
+#define  DBI_FIFO_EMPTY_7_LOCATIONS                    (2 << 0)
+
+/* regs below are bits 15:0 */
+#define _MIPIA_HSYNC_PADDING_COUNT             (VLV_DISPLAY_BASE + 0xb028)
+#define _MIPIB_HSYNC_PADDING_COUNT             (VLV_DISPLAY_BASE + 0xb828)
+#define MIPI_HSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_HSYNC_PADDING_COUNT, _MIPIB_HSYNC_PADDING_COUNT)
+
+#define _MIPIA_HBP_COUNT                       (VLV_DISPLAY_BASE + 0xb02c)
+#define _MIPIB_HBP_COUNT                       (VLV_DISPLAY_BASE + 0xb82c)
+#define MIPI_HBP_COUNT(pipe)           _PIPE(pipe, _MIPIA_HBP_COUNT, _MIPIB_HBP_COUNT)
+
+#define _MIPIA_HFP_COUNT                       (VLV_DISPLAY_BASE + 0xb030)
+#define _MIPIB_HFP_COUNT                       (VLV_DISPLAY_BASE + 0xb830)
+#define MIPI_HFP_COUNT(pipe)           _PIPE(pipe, _MIPIA_HFP_COUNT, _MIPIB_HFP_COUNT)
+
+#define _MIPIA_HACTIVE_AREA_COUNT              (VLV_DISPLAY_BASE + 0xb034)
+#define _MIPIB_HACTIVE_AREA_COUNT              (VLV_DISPLAY_BASE + 0xb834)
+#define MIPI_HACTIVE_AREA_COUNT(pipe)  _PIPE(pipe, _MIPIA_HACTIVE_AREA_COUNT, _MIPIB_HACTIVE_AREA_COUNT)
+
+#define _MIPIA_VSYNC_PADDING_COUNT             (VLV_DISPLAY_BASE + 0xb038)
+#define _MIPIB_VSYNC_PADDING_COUNT             (VLV_DISPLAY_BASE + 0xb838)
+#define MIPI_VSYNC_PADDING_COUNT(pipe) _PIPE(pipe, _MIPIA_VSYNC_PADDING_COUNT, _MIPIB_VSYNC_PADDING_COUNT)
+
+#define _MIPIA_VBP_COUNT                       (VLV_DISPLAY_BASE + 0xb03c)
+#define _MIPIB_VBP_COUNT                       (VLV_DISPLAY_BASE + 0xb83c)
+#define MIPI_VBP_COUNT(pipe)           _PIPE(pipe, _MIPIA_VBP_COUNT, _MIPIB_VBP_COUNT)
+
+#define _MIPIA_VFP_COUNT                       (VLV_DISPLAY_BASE + 0xb040)
+#define _MIPIB_VFP_COUNT                       (VLV_DISPLAY_BASE + 0xb840)
+#define MIPI_VFP_COUNT(pipe)           _PIPE(pipe, _MIPIA_VFP_COUNT, _MIPIB_VFP_COUNT)
+
+#define _MIPIA_HIGH_LOW_SWITCH_COUNT           (VLV_DISPLAY_BASE + 0xb044)
+#define _MIPIB_HIGH_LOW_SWITCH_COUNT           (VLV_DISPLAY_BASE + 0xb844)
+#define MIPI_HIGH_LOW_SWITCH_COUNT(pipe)       _PIPE(pipe, _MIPIA_HIGH_LOW_SWITCH_COUNT, _MIPIB_HIGH_LOW_SWITCH_COUNT)
+/* regs above are bits 15:0 */
+
+#define _MIPIA_DPI_CONTROL                     (VLV_DISPLAY_BASE + 0xb048)
+#define _MIPIB_DPI_CONTROL                     (VLV_DISPLAY_BASE + 0xb848)
+#define MIPI_DPI_CONTROL(pipe)         _PIPE(pipe, _MIPIA_DPI_CONTROL, _MIPIB_DPI_CONTROL)
+#define  DPI_LP_MODE                                   (1 << 6)
+#define  BACKLIGHT_OFF                                 (1 << 5)
+#define  BACKLIGHT_ON                                  (1 << 4)
+#define  COLOR_MODE_OFF                                        (1 << 3)
+#define  COLOR_MODE_ON                                 (1 << 2)
+#define  TURN_ON                                       (1 << 1)
+#define  SHUTDOWN                                      (1 << 0)
+
+#define _MIPIA_DPI_DATA                                (VLV_DISPLAY_BASE + 0xb04c)
+#define _MIPIB_DPI_DATA                                (VLV_DISPLAY_BASE + 0xb84c)
+#define MIPI_DPI_DATA(pipe)            _PIPE(pipe, _MIPIA_DPI_DATA, _MIPIB_DPI_DATA)
+#define  COMMAND_BYTE_SHIFT                            0
+#define  COMMAND_BYTE_MASK                             (0x3f << 0)
+
+#define _MIPIA_INIT_COUNT                      (VLV_DISPLAY_BASE + 0xb050)
+#define _MIPIB_INIT_COUNT                      (VLV_DISPLAY_BASE + 0xb850)
+#define MIPI_INIT_COUNT(pipe)          _PIPE(pipe, _MIPIA_INIT_COUNT, _MIPIB_INIT_COUNT)
+#define  MASTER_INIT_TIMER_SHIFT                       0
+#define  MASTER_INIT_TIMER_MASK                                (0xffff << 0)
+
+#define _MIPIA_MAX_RETURN_PKT_SIZE             (VLV_DISPLAY_BASE + 0xb054)
+#define _MIPIB_MAX_RETURN_PKT_SIZE             (VLV_DISPLAY_BASE + 0xb854)
+#define MIPI_MAX_RETURN_PKT_SIZE(pipe) _PIPE(pipe, _MIPIA_MAX_RETURN_PKT_SIZE, _MIPIB_MAX_RETURN_PKT_SIZE)
+#define  MAX_RETURN_PKT_SIZE_SHIFT                     0
+#define  MAX_RETURN_PKT_SIZE_MASK                      (0x3ff << 0)
+
+#define _MIPIA_VIDEO_MODE_FORMAT               (VLV_DISPLAY_BASE + 0xb058)
+#define _MIPIB_VIDEO_MODE_FORMAT               (VLV_DISPLAY_BASE + 0xb858)
+#define MIPI_VIDEO_MODE_FORMAT(pipe)   _PIPE(pipe, _MIPIA_VIDEO_MODE_FORMAT, _MIPIB_VIDEO_MODE_FORMAT)
+#define  RANDOM_DPI_DISPLAY_RESOLUTION                 (1 << 4)
+#define  DISABLE_VIDEO_BTA                             (1 << 3)
+#define  IP_TG_CONFIG                                  (1 << 2)
+#define  VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE          (1 << 0)
+#define  VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS         (2 << 0)
+#define  VIDEO_MODE_BURST                              (3 << 0)
+
+#define _MIPIA_EOT_DISABLE                     (VLV_DISPLAY_BASE + 0xb05c)
+#define _MIPIB_EOT_DISABLE                     (VLV_DISPLAY_BASE + 0xb85c)
+#define MIPI_EOT_DISABLE(pipe)         _PIPE(pipe, _MIPIA_EOT_DISABLE, _MIPIB_EOT_DISABLE)
+#define  LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE          (1 << 7)
+#define  HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE          (1 << 6)
+#define  LOW_CONTENTION_RECOVERY_DISABLE               (1 << 5)
+#define  HIGH_CONTENTION_RECOVERY_DISABLE              (1 << 4)
+#define  TXDSI_TYPE_NOT_RECOGNISED_ERROR_RECOVERY_DISABLE (1 << 3)
+#define  TXECC_MULTIBIT_ERROR_RECOVERY_DISABLE         (1 << 2)
+#define  CLOCKSTOP                                     (1 << 1)
+#define  EOT_DISABLE                                   (1 << 0)
+
+#define _MIPIA_LP_BYTECLK                      (VLV_DISPLAY_BASE + 0xb060)
+#define _MIPIB_LP_BYTECLK                      (VLV_DISPLAY_BASE + 0xb860)
+#define MIPI_LP_BYTECLK(pipe)          _PIPE(pipe, _MIPIA_LP_BYTECLK, _MIPIB_LP_BYTECLK)
+#define  LP_BYTECLK_SHIFT                              0
+#define  LP_BYTECLK_MASK                               (0xffff << 0)
+
+/* bits 31:0 */
+#define _MIPIA_LP_GEN_DATA                     (VLV_DISPLAY_BASE + 0xb064)
+#define _MIPIB_LP_GEN_DATA                     (VLV_DISPLAY_BASE + 0xb864)
+#define MIPI_LP_GEN_DATA(pipe)         _PIPE(pipe, _MIPIA_LP_GEN_DATA, _MIPIB_LP_GEN_DATA)
+
+/* bits 31:0 */
+#define _MIPIA_HS_GEN_DATA                     (VLV_DISPLAY_BASE + 0xb068)
+#define _MIPIB_HS_GEN_DATA                     (VLV_DISPLAY_BASE + 0xb868)
+#define MIPI_HS_GEN_DATA(pipe)         _PIPE(pipe, _MIPIA_HS_GEN_DATA, _MIPIB_HS_GEN_DATA)
+
+#define _MIPIA_LP_GEN_CTRL                     (VLV_DISPLAY_BASE + 0xb06c)
+#define _MIPIB_LP_GEN_CTRL                     (VLV_DISPLAY_BASE + 0xb86c)
+#define MIPI_LP_GEN_CTRL(pipe)         _PIPE(pipe, _MIPIA_LP_GEN_CTRL, _MIPIB_LP_GEN_CTRL)
+#define _MIPIA_HS_GEN_CTRL                     (VLV_DISPLAY_BASE + 0xb070)
+#define _MIPIB_HS_GEN_CTRL                     (VLV_DISPLAY_BASE + 0xb870)
+#define MIPI_HS_GEN_CTRL(pipe)         _PIPE(pipe, _MIPIA_HS_GEN_CTRL, _MIPIB_HS_GEN_CTRL)
+#define  LONG_PACKET_WORD_COUNT_SHIFT                  8
+#define  LONG_PACKET_WORD_COUNT_MASK                   (0xffff << 8)
+#define  SHORT_PACKET_PARAM_SHIFT                      8
+#define  SHORT_PACKET_PARAM_MASK                       (0xffff << 8)
+#define  VIRTUAL_CHANNEL_SHIFT                         6
+#define  VIRTUAL_CHANNEL_MASK                          (3 << 6)
+#define  DATA_TYPE_SHIFT                               0
+#define  DATA_TYPE_MASK                                        (3f << 0)
+/* data type values, see include/video/mipi_display.h */
+
+#define _MIPIA_GEN_FIFO_STAT                   (VLV_DISPLAY_BASE + 0xb074)
+#define _MIPIB_GEN_FIFO_STAT                   (VLV_DISPLAY_BASE + 0xb874)
+#define MIPI_GEN_FIFO_STAT(pipe)       _PIPE(pipe, _MIPIA_GEN_FIFO_STAT, _MIPIB_GEN_FIFO_STAT)
+#define  DPI_FIFO_EMPTY                                        (1 << 28)
+#define  DBI_FIFO_EMPTY                                        (1 << 27)
+#define  LP_CTRL_FIFO_EMPTY                            (1 << 26)
+#define  LP_CTRL_FIFO_HALF_EMPTY                       (1 << 25)
+#define  LP_CTRL_FIFO_FULL                             (1 << 24)
+#define  HS_CTRL_FIFO_EMPTY                            (1 << 18)
+#define  HS_CTRL_FIFO_HALF_EMPTY                       (1 << 17)
+#define  HS_CTRL_FIFO_FULL                             (1 << 16)
+#define  LP_DATA_FIFO_EMPTY                            (1 << 10)
+#define  LP_DATA_FIFO_HALF_EMPTY                       (1 << 9)
+#define  LP_DATA_FIFO_FULL                             (1 << 8)
+#define  HS_DATA_FIFO_EMPTY                            (1 << 2)
+#define  HS_DATA_FIFO_HALF_EMPTY                       (1 << 1)
+#define  HS_DATA_FIFO_FULL                             (1 << 0)
+
+#define _MIPIA_HS_LS_DBI_ENABLE                        (VLV_DISPLAY_BASE + 0xb078)
+#define _MIPIB_HS_LS_DBI_ENABLE                        (VLV_DISPLAY_BASE + 0xb878)
+#define MIPI_HS_LP_DBI_ENABLE(pipe)    _PIPE(pipe, _MIPIA_HS_LS_DBI_ENABLE, _MIPIB_HS_LS_DBI_ENABLE)
+#define  DBI_HS_LP_MODE_MASK                           (1 << 0)
+#define  DBI_LP_MODE                                   (1 << 0)
+#define  DBI_HS_MODE                                   (0 << 0)
+
+#define _MIPIA_DPHY_PARAM                      (VLV_DISPLAY_BASE + 0xb080)
+#define _MIPIB_DPHY_PARAM                      (VLV_DISPLAY_BASE + 0xb880)
+#define MIPI_DPHY_PARAM(pipe)          _PIPE(pipe, _MIPIA_DPHY_PARAM, _MIPIB_DPHY_PARAM)
+#define  EXIT_ZERO_COUNT_SHIFT                         24
+#define  EXIT_ZERO_COUNT_MASK                          (0x3f << 24)
+#define  TRAIL_COUNT_SHIFT                             16
+#define  TRAIL_COUNT_MASK                              (0x1f << 16)
+#define  CLK_ZERO_COUNT_SHIFT                          8
+#define  CLK_ZERO_COUNT_MASK                           (0xff << 8)
+#define  PREPARE_COUNT_SHIFT                           0
+#define  PREPARE_COUNT_MASK                            (0x3f << 0)
+
+/* bits 31:0 */
+#define _MIPIA_DBI_BW_CTRL                     (VLV_DISPLAY_BASE + 0xb084)
+#define _MIPIB_DBI_BW_CTRL                     (VLV_DISPLAY_BASE + 0xb884)
+#define MIPI_DBI_BW_CTRL(pipe)         _PIPE(pipe, _MIPIA_DBI_BW_CTRL, _MIPIB_DBI_BW_CTRL)
+
+#define _MIPIA_CLK_LANE_SWITCH_TIME_CNT                (VLV_DISPLAY_BASE + 0xb088)
+#define _MIPIB_CLK_LANE_SWITCH_TIME_CNT                (VLV_DISPLAY_BASE + 0xb888)
+#define MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe)    _PIPE(pipe, _MIPIA_CLK_LANE_SWITCH_TIME_CNT, _MIPIB_CLK_LANE_SWITCH_TIME_CNT)
+#define  LP_HS_SSW_CNT_SHIFT                           16
+#define  LP_HS_SSW_CNT_MASK                            (0xffff << 16)
+#define  HS_LP_PWR_SW_CNT_SHIFT                                0
+#define  HS_LP_PWR_SW_CNT_MASK                         (0xffff << 0)
+
+#define _MIPIA_STOP_STATE_STALL                        (VLV_DISPLAY_BASE + 0xb08c)
+#define _MIPIB_STOP_STATE_STALL                        (VLV_DISPLAY_BASE + 0xb88c)
+#define MIPI_STOP_STATE_STALL(pipe)    _PIPE(pipe, _MIPIA_STOP_STATE_STALL, _MIPIB_STOP_STATE_STALL)
+#define  STOP_STATE_STALL_COUNTER_SHIFT                        0
+#define  STOP_STATE_STALL_COUNTER_MASK                 (0xff << 0)
+
+#define _MIPIA_INTR_STAT_REG_1                 (VLV_DISPLAY_BASE + 0xb090)
+#define _MIPIB_INTR_STAT_REG_1                 (VLV_DISPLAY_BASE + 0xb890)
+#define MIPI_INTR_STAT_REG_1(pipe)     _PIPE(pipe, _MIPIA_INTR_STAT_REG_1, _MIPIB_INTR_STAT_REG_1)
+#define _MIPIA_INTR_EN_REG_1                   (VLV_DISPLAY_BASE + 0xb094)
+#define _MIPIB_INTR_EN_REG_1                   (VLV_DISPLAY_BASE + 0xb894)
+#define MIPI_INTR_EN_REG_1(pipe)       _PIPE(pipe, _MIPIA_INTR_EN_REG_1, _MIPIB_INTR_EN_REG_1)
+#define  RX_CONTENTION_DETECTED                                (1 << 0)
+
+/* XXX: only pipe A ?!? */
+#define MIPIA_DBI_TYPEC_CTRL                   (VLV_DISPLAY_BASE + 0xb100)
+#define  DBI_TYPEC_ENABLE                              (1 << 31)
+#define  DBI_TYPEC_WIP                                 (1 << 30)
+#define  DBI_TYPEC_OPTION_SHIFT                                28
+#define  DBI_TYPEC_OPTION_MASK                         (3 << 28)
+#define  DBI_TYPEC_FREQ_SHIFT                          24
+#define  DBI_TYPEC_FREQ_MASK                           (0xf << 24)
+#define  DBI_TYPEC_OVERRIDE                            (1 << 8)
+#define  DBI_TYPEC_OVERRIDE_COUNTER_SHIFT              0
+#define  DBI_TYPEC_OVERRIDE_COUNTER_MASK               (0xff << 0)
+
+
+/* MIPI adapter registers */
+
+#define _MIPIA_CTRL                            (VLV_DISPLAY_BASE + 0xb104)
+#define _MIPIB_CTRL                            (VLV_DISPLAY_BASE + 0xb904)
+#define MIPI_CTRL(pipe)                        _PIPE(pipe, _MIPIA_CTRL, _MIPIB_CTRL)
+#define  ESCAPE_CLOCK_DIVIDER_SHIFT                    5 /* A only */
+#define  ESCAPE_CLOCK_DIVIDER_MASK                     (3 << 5)
+#define  ESCAPE_CLOCK_DIVIDER_1                                (0 << 5)
+#define  ESCAPE_CLOCK_DIVIDER_2                                (1 << 5)
+#define  ESCAPE_CLOCK_DIVIDER_4                                (2 << 5)
+#define  READ_REQUEST_PRIORITY_SHIFT                   3
+#define  READ_REQUEST_PRIORITY_MASK                    (3 << 3)
+#define  READ_REQUEST_PRIORITY_LOW                     (0 << 3)
+#define  READ_REQUEST_PRIORITY_HIGH                    (3 << 3)
+#define  RGB_FLIP_TO_BGR                               (1 << 2)
+
+#define _MIPIA_DATA_ADDRESS                    (VLV_DISPLAY_BASE + 0xb108)
+#define _MIPIB_DATA_ADDRESS                    (VLV_DISPLAY_BASE + 0xb908)
+#define MIPI_DATA_ADDRESS(pipe)                _PIPE(pipe, _MIPIA_DATA_ADDRESS, _MIPIB_DATA_ADDRESS)
+#define  DATA_MEM_ADDRESS_SHIFT                                5
+#define  DATA_MEM_ADDRESS_MASK                         (0x7ffffff << 5)
+#define  DATA_VALID                                    (1 << 0)
+
+#define _MIPIA_DATA_LENGTH                     (VLV_DISPLAY_BASE + 0xb10c)
+#define _MIPIB_DATA_LENGTH                     (VLV_DISPLAY_BASE + 0xb90c)
+#define MIPI_DATA_LENGTH(pipe)         _PIPE(pipe, _MIPIA_DATA_LENGTH, _MIPIB_DATA_LENGTH)
+#define  DATA_LENGTH_SHIFT                             0
+#define  DATA_LENGTH_MASK                              (0xfffff << 0)
+
+#define _MIPIA_COMMAND_ADDRESS                 (VLV_DISPLAY_BASE + 0xb110)
+#define _MIPIB_COMMAND_ADDRESS                 (VLV_DISPLAY_BASE + 0xb910)
+#define MIPI_COMMAND_ADDRESS(pipe)     _PIPE(pipe, _MIPIA_COMMAND_ADDRESS, _MIPIB_COMMAND_ADDRESS)
+#define  COMMAND_MEM_ADDRESS_SHIFT                     5
+#define  COMMAND_MEM_ADDRESS_MASK                      (0x7ffffff << 5)
+#define  AUTO_PWG_ENABLE                               (1 << 2)
+#define  MEMORY_WRITE_DATA_FROM_PIPE_RENDERING         (1 << 1)
+#define  COMMAND_VALID                                 (1 << 0)
+
+#define _MIPIA_COMMAND_LENGTH                  (VLV_DISPLAY_BASE + 0xb114)
+#define _MIPIB_COMMAND_LENGTH                  (VLV_DISPLAY_BASE + 0xb914)
+#define MIPI_COMMAND_LENGTH(pipe)      _PIPE(pipe, _MIPIA_COMMAND_LENGTH, _MIPIB_COMMAND_LENGTH)
+#define  COMMAND_LENGTH_SHIFT(n)                       (8 * (n)) /* n: 0...3 */
+#define  COMMAND_LENGTH_MASK(n)                                (0xff << (8 * (n)))
+
+#define _MIPIA_READ_DATA_RETURN0               (VLV_DISPLAY_BASE + 0xb118)
+#define _MIPIB_READ_DATA_RETURN0               (VLV_DISPLAY_BASE + 0xb918)
+#define MIPI_READ_DATA_RETURN(pipe, n) \
+       (_PIPE(pipe, _MIPIA_READ_DATA_RETURN0, _MIPIB_READ_DATA_RETURN0) + 4 * (n)) /* n: 0...7 */
+
+#define _MIPIA_READ_DATA_VALID                 (VLV_DISPLAY_BASE + 0xb138)
+#define _MIPIB_READ_DATA_VALID                 (VLV_DISPLAY_BASE + 0xb938)
+#define MIPI_READ_DATA_VALID(pipe)     _PIPE(pipe, _MIPIA_READ_DATA_VALID, _MIPIB_READ_DATA_VALID)
+#define  READ_DATA_VALID(n)                            (1 << (n))
+
 #endif /* _I915_REG_H_ */
index 70db618989c42a3bcd3a73df44593a368e2d66d0..a088f1f46bdba4bed261a0cea8ee3891f2b3816e 100644 (file)
@@ -340,7 +340,9 @@ int i915_save_state(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
 
-       pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB);
+       if (INTEL_INFO(dev)->gen <= 4)
+               pci_read_config_byte(dev->pdev, LBB,
+                                    &dev_priv->regfile.saveLBB);
 
        mutex_lock(&dev->struct_mutex);
 
@@ -367,7 +369,8 @@ int i915_save_state(struct drm_device *dev)
        intel_disable_gt_powersave(dev);
 
        /* Cache mode state */
-       dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+       if (INTEL_INFO(dev)->gen < 7)
+               dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
 
        /* Memory Arbitration state */
        dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
@@ -390,7 +393,9 @@ int i915_restore_state(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
 
-       pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB);
+       if (INTEL_INFO(dev)->gen <= 4)
+               pci_write_config_byte(dev->pdev, LBB,
+                                     dev_priv->regfile.saveLBB);
 
        mutex_lock(&dev->struct_mutex);
 
@@ -414,7 +419,9 @@ int i915_restore_state(struct drm_device *dev)
        }
 
        /* Cache mode state */
-       I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000);
+       if (INTEL_INFO(dev)->gen < 7)
+               I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 |
+                          0xffff0000);
 
        /* Memory arbitration state */
        I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
index c8c4112de1108e9293066305eca079fdac8651d2..cef38fd320a7c5c53c687ca5b67b0d1d3192d615 100644 (file)
 #include "intel_drv.h"
 #include "i915_drv.h"
 
+#define dev_to_drm_minor(d) dev_get_drvdata((d))
+
 #ifdef CONFIG_PM
 static u32 calc_residency(struct drm_device *dev, const u32 reg)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u64 raw_time; /* 32b value may overflow during fixed point math */
+       u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
 
        if (!intel_enable_rc6(dev))
                return 0;
 
-       raw_time = I915_READ(reg) * 128ULL;
-       return DIV_ROUND_UP_ULL(raw_time, 100000);
+       /* On VLV, residency time is in CZ units rather than 1.28us */
+       if (IS_VALLEYVIEW(dev)) {
+               u32 clkctl2;
+
+               clkctl2 = I915_READ(VLV_CLK_CTL2) >>
+                       CLK_CTL2_CZCOUNT_30NS_SHIFT;
+               if (!clkctl2) {
+                       WARN(!clkctl2, "bogus CZ count value");
+                       return 0;
+               }
+               units = DIV_ROUND_UP_ULL(30ULL * bias, (u64)clkctl2);
+               if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
+                       units <<= 8;
+
+               div = 1000000ULL * bias;
+       }
+
+       raw_time = I915_READ(reg) * units;
+       return DIV_ROUND_UP_ULL(raw_time, div);
 }
 
 static ssize_t
 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+       struct drm_minor *dminor = dev_to_drm_minor(kdev);
        return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
 }
 
 static ssize_t
 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+       struct drm_minor *dminor = dev_get_drvdata(kdev);
        u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
        return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
 }
@@ -63,16 +83,20 @@ show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 static ssize_t
 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+       struct drm_minor *dminor = dev_to_drm_minor(kdev);
        u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
+       if (IS_VALLEYVIEW(dminor->dev))
+               rc6p_residency = 0;
        return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
 }
 
 static ssize_t
 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *dminor = container_of(kdev, struct drm_minor, kdev);
+       struct drm_minor *dminor = dev_to_drm_minor(kdev);
        u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
+       if (IS_VALLEYVIEW(dminor->dev))
+               rc6pp_residency = 0;
        return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
 }
 
@@ -97,7 +121,7 @@ static struct attribute_group rc6_attr_group = {
 
 static int l3_access_valid(struct drm_device *dev, loff_t offset)
 {
-       if (!HAS_L3_GPU_CACHE(dev))
+       if (!HAS_L3_DPF(dev))
                return -EPERM;
 
        if (offset % 4 != 0)
@@ -115,31 +139,34 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
             loff_t offset, size_t count)
 {
        struct device *dev = container_of(kobj, struct device, kobj);
-       struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+       struct drm_minor *dminor = dev_to_drm_minor(dev);
        struct drm_device *drm_dev = dminor->dev;
        struct drm_i915_private *dev_priv = drm_dev->dev_private;
-       uint32_t misccpctl;
-       int i, ret;
+       int slice = (int)(uintptr_t)attr->private;
+       int ret;
+
+       count = round_down(count, 4);
 
        ret = l3_access_valid(drm_dev, offset);
        if (ret)
                return ret;
 
+       count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
+
        ret = i915_mutex_lock_interruptible(drm_dev);
        if (ret)
                return ret;
 
-       misccpctl = I915_READ(GEN7_MISCCPCTL);
-       I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
-
-       for (i = offset; count >= 4 && i < GEN7_L3LOG_SIZE; i += 4, count -= 4)
-               *((uint32_t *)(&buf[i])) = I915_READ(GEN7_L3LOG_BASE + i);
-
-       I915_WRITE(GEN7_MISCCPCTL, misccpctl);
+       if (dev_priv->l3_parity.remap_info[slice])
+               memcpy(buf,
+                      dev_priv->l3_parity.remap_info[slice] + (offset/4),
+                      count);
+       else
+               memset(buf, 0, count);
 
        mutex_unlock(&drm_dev->struct_mutex);
 
-       return i - offset;
+       return count;
 }
 
 static ssize_t
@@ -148,21 +175,26 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
              loff_t offset, size_t count)
 {
        struct device *dev = container_of(kobj, struct device, kobj);
-       struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
+       struct drm_minor *dminor = dev_to_drm_minor(dev);
        struct drm_device *drm_dev = dminor->dev;
        struct drm_i915_private *dev_priv = drm_dev->dev_private;
+       struct i915_hw_context *ctx;
        u32 *temp = NULL; /* Just here to make handling failures easy */
+       int slice = (int)(uintptr_t)attr->private;
        int ret;
 
        ret = l3_access_valid(drm_dev, offset);
        if (ret)
                return ret;
 
+       if (dev_priv->hw_contexts_disabled)
+               return -ENXIO;
+
        ret = i915_mutex_lock_interruptible(drm_dev);
        if (ret)
                return ret;
 
-       if (!dev_priv->l3_parity.remap_info) {
+       if (!dev_priv->l3_parity.remap_info[slice]) {
                temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
                if (!temp) {
                        mutex_unlock(&drm_dev->struct_mutex);
@@ -182,13 +214,13 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
         * at this point it is left as a TODO.
        */
        if (temp)
-               dev_priv->l3_parity.remap_info = temp;
+               dev_priv->l3_parity.remap_info[slice] = temp;
 
-       memcpy(dev_priv->l3_parity.remap_info + (offset/4),
-              buf + (offset/4),
-              count);
+       memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
 
-       i915_gem_l3_remap(drm_dev);
+       /* NB: We defer the remapping until we switch to the context */
+       list_for_each_entry(ctx, &dev_priv->context_list, link)
+               ctx->remap_slice |= (1<<slice);
 
        mutex_unlock(&drm_dev->struct_mutex);
 
@@ -200,17 +232,29 @@ static struct bin_attribute dpf_attrs = {
        .size = GEN7_L3LOG_SIZE,
        .read = i915_l3_read,
        .write = i915_l3_write,
-       .mmap = NULL
+       .mmap = NULL,
+       .private = (void *)0
+};
+
+static struct bin_attribute dpf_attrs_1 = {
+       .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
+       .size = GEN7_L3LOG_SIZE,
+       .read = i915_l3_read,
+       .write = i915_l3_write,
+       .mmap = NULL,
+       .private = (void *)1
 };
 
 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
                                    struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+       struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        mutex_lock(&dev_priv->rps.hw_lock);
        if (IS_VALLEYVIEW(dev_priv->dev)) {
                u32 freq;
@@ -227,7 +271,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
                                     struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+       struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -238,11 +282,13 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
 
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+       struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        mutex_lock(&dev_priv->rps.hw_lock);
        if (IS_VALLEYVIEW(dev_priv->dev))
                ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
@@ -257,7 +303,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
                                     struct device_attribute *attr,
                                     const char *buf, size_t count)
 {
-       struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+       struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 val, rp_state_cap, hw_max, hw_min, non_oc_max;
@@ -267,6 +313,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
        if (ret)
                return ret;
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        mutex_lock(&dev_priv->rps.hw_lock);
 
        if (IS_VALLEYVIEW(dev_priv->dev)) {
@@ -310,11 +358,13 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 
 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+       struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        mutex_lock(&dev_priv->rps.hw_lock);
        if (IS_VALLEYVIEW(dev_priv->dev))
                ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
@@ -329,7 +379,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
                                     struct device_attribute *attr,
                                     const char *buf, size_t count)
 {
-       struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+       struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 val, rp_state_cap, hw_max, hw_min;
@@ -339,6 +389,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
        if (ret)
                return ret;
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        mutex_lock(&dev_priv->rps.hw_lock);
 
        if (IS_VALLEYVIEW(dev)) {
@@ -388,7 +440,7 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
 /* For now we have a static number of RP states */
 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+       struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 val, rp_state_cap;
@@ -436,7 +488,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
 {
 
        struct device *kdev = container_of(kobj, struct device, kobj);
-       struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+       struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
        struct i915_error_state_file_priv error_priv;
        struct drm_i915_error_state_buf error_str;
@@ -471,7 +523,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
                                 loff_t off, size_t count)
 {
        struct device *kdev = container_of(kobj, struct device, kobj);
-       struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+       struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
        int ret;
 
@@ -501,27 +553,34 @@ void i915_setup_sysfs(struct drm_device *dev)
 
 #ifdef CONFIG_PM
        if (INTEL_INFO(dev)->gen >= 6) {
-               ret = sysfs_merge_group(&dev->primary->kdev.kobj,
+               ret = sysfs_merge_group(&dev->primary->kdev->kobj,
                                        &rc6_attr_group);
                if (ret)
                        DRM_ERROR("RC6 residency sysfs setup failed\n");
        }
 #endif
-       if (HAS_L3_GPU_CACHE(dev)) {
-               ret = device_create_bin_file(&dev->primary->kdev, &dpf_attrs);
+       if (HAS_L3_DPF(dev)) {
+               ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
                if (ret)
                        DRM_ERROR("l3 parity sysfs setup failed\n");
+
+               if (NUM_L3_SLICES(dev) > 1) {
+                       ret = device_create_bin_file(dev->primary->kdev,
+                                                    &dpf_attrs_1);
+                       if (ret)
+                               DRM_ERROR("l3 parity slice 1 setup failed\n");
+               }
        }
 
        ret = 0;
        if (IS_VALLEYVIEW(dev))
-               ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs);
+               ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
        else if (INTEL_INFO(dev)->gen >= 6)
-               ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs);
+               ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
        if (ret)
                DRM_ERROR("RPS sysfs setup failed\n");
 
-       ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
+       ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
                                    &error_state_attr);
        if (ret)
                DRM_ERROR("error_state sysfs setup failed\n");
@@ -529,13 +588,14 @@ void i915_setup_sysfs(struct drm_device *dev)
 
 void i915_teardown_sysfs(struct drm_device *dev)
 {
-       sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
+       sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
        if (IS_VALLEYVIEW(dev))
-               sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs);
+               sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
        else
-               sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
-       device_remove_bin_file(&dev->primary->kdev,  &dpf_attrs);
+               sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
+       device_remove_bin_file(dev->primary->kdev,  &dpf_attrs_1);
+       device_remove_bin_file(dev->primary->kdev,  &dpf_attrs);
 #ifdef CONFIG_PM
-       sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+       sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
 #endif
 }
index e2c5ee6f6194eb662c4234cfdf9304c387330d9e..6e580c98dede727175cf0542b0a1024ad0e2a5da 100644 (file)
@@ -233,6 +233,47 @@ TRACE_EVENT(i915_gem_evict_everything,
            TP_printk("dev=%d", __entry->dev)
 );
 
+TRACE_EVENT(i915_gem_evict_vm,
+           TP_PROTO(struct i915_address_space *vm),
+           TP_ARGS(vm),
+
+           TP_STRUCT__entry(
+                            __field(struct i915_address_space *, vm)
+                           ),
+
+           TP_fast_assign(
+                          __entry->vm = vm;
+                         ),
+
+           TP_printk("dev=%d, vm=%p", __entry->vm->dev->primary->index, __entry->vm)
+);
+
+TRACE_EVENT(i915_gem_ring_sync_to,
+           TP_PROTO(struct intel_ring_buffer *from,
+                    struct intel_ring_buffer *to,
+                    u32 seqno),
+           TP_ARGS(from, to, seqno),
+
+           TP_STRUCT__entry(
+                            __field(u32, dev)
+                            __field(u32, sync_from)
+                            __field(u32, sync_to)
+                            __field(u32, seqno)
+                            ),
+
+           TP_fast_assign(
+                          __entry->dev = from->dev->primary->index;
+                          __entry->sync_from = from->id;
+                          __entry->sync_to = to->id;
+                          __entry->seqno = seqno;
+                          ),
+
+           TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
+                     __entry->dev,
+                     __entry->sync_from, __entry->sync_to,
+                     __entry->seqno)
+);
+
 TRACE_EVENT(i915_gem_ring_dispatch,
            TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
            TP_ARGS(ring, seqno, flags),
@@ -304,9 +345,24 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
            TP_ARGS(ring, seqno)
 );
 
-DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
-           TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
-           TP_ARGS(ring, seqno)
+TRACE_EVENT(i915_gem_request_complete,
+           TP_PROTO(struct intel_ring_buffer *ring),
+           TP_ARGS(ring),
+
+           TP_STRUCT__entry(
+                            __field(u32, dev)
+                            __field(u32, ring)
+                            __field(u32, seqno)
+                            ),
+
+           TP_fast_assign(
+                          __entry->dev = ring->dev->primary->index;
+                          __entry->ring = ring->id;
+                          __entry->seqno = ring->get_seqno(ring, false);
+                          ),
+
+           TP_printk("dev=%u, ring=%u, seqno=%u",
+                     __entry->dev, __entry->ring, __entry->seqno)
 );
 
 DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
index 57fe1ae32a0d69d767eb7d698cfeab2ead4231e2..43959edd4291193a538449c1d4f75bba8d30ea8a 100644 (file)
@@ -193,16 +193,14 @@ out:
 
 static bool intel_dsm_pci_probe(struct pci_dev *pdev)
 {
-       acpi_handle dhandle, intel_handle;
-       acpi_status status;
+       acpi_handle dhandle;
        int ret;
 
        dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
        if (!dhandle)
                return false;
 
-       status = acpi_get_handle(dhandle, "_DSM", &intel_handle);
-       if (ACPI_FAILURE(status)) {
+       if (!acpi_has_method(dhandle, "_DSM")) {
                DRM_DEBUG_KMS("no _DSM method for intel device\n");
                return false;
        }
index 53f2bed8bc5fabf812c16bb74a030e105bb386aa..e29bcae1ef81355d1e97fd0536cac3238cf99b87 100644 (file)
@@ -389,7 +389,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
 {
        struct sdvo_device_mapping *p_mapping;
        struct bdb_general_definitions *p_defs;
-       struct child_device_config *p_child;
+       union child_device_config *p_child;
        int i, child_device_num, count;
        u16     block_size;
 
@@ -416,36 +416,36 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
        count = 0;
        for (i = 0; i < child_device_num; i++) {
                p_child = &(p_defs->devices[i]);
-               if (!p_child->device_type) {
+               if (!p_child->old.device_type) {
                        /* skip the device block if device type is invalid */
                        continue;
                }
-               if (p_child->slave_addr != SLAVE_ADDR1 &&
-                       p_child->slave_addr != SLAVE_ADDR2) {
+               if (p_child->old.slave_addr != SLAVE_ADDR1 &&
+                       p_child->old.slave_addr != SLAVE_ADDR2) {
                        /*
                         * If the slave address is neither 0x70 nor 0x72,
                         * it is not a SDVO device. Skip it.
                         */
                        continue;
                }
-               if (p_child->dvo_port != DEVICE_PORT_DVOB &&
-                       p_child->dvo_port != DEVICE_PORT_DVOC) {
+               if (p_child->old.dvo_port != DEVICE_PORT_DVOB &&
+                       p_child->old.dvo_port != DEVICE_PORT_DVOC) {
                        /* skip the incorrect SDVO port */
                        DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
                        continue;
                }
                DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
                                " %s port\n",
-                               p_child->slave_addr,
-                               (p_child->dvo_port == DEVICE_PORT_DVOB) ?
+                               p_child->old.slave_addr,
+                               (p_child->old.dvo_port == DEVICE_PORT_DVOB) ?
                                        "SDVOB" : "SDVOC");
-               p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
+               p_mapping = &(dev_priv->sdvo_mappings[p_child->old.dvo_port - 1]);
                if (!p_mapping->initialized) {
-                       p_mapping->dvo_port = p_child->dvo_port;
-                       p_mapping->slave_addr = p_child->slave_addr;
-                       p_mapping->dvo_wiring = p_child->dvo_wiring;
-                       p_mapping->ddc_pin = p_child->ddc_pin;
-                       p_mapping->i2c_pin = p_child->i2c_pin;
+                       p_mapping->dvo_port = p_child->old.dvo_port;
+                       p_mapping->slave_addr = p_child->old.slave_addr;
+                       p_mapping->dvo_wiring = p_child->old.dvo_wiring;
+                       p_mapping->ddc_pin = p_child->old.ddc_pin;
+                       p_mapping->i2c_pin = p_child->old.i2c_pin;
                        p_mapping->initialized = 1;
                        DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
                                      p_mapping->dvo_port,
@@ -457,7 +457,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
                        DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
                                         "two SDVO device.\n");
                }
-               if (p_child->slave2_addr) {
+               if (p_child->old.slave2_addr) {
                        /* Maybe this is a SDVO device with multiple inputs */
                        /* And the mapping info is not added */
                        DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
@@ -477,15 +477,13 @@ static void
 parse_driver_features(struct drm_i915_private *dev_priv,
                       struct bdb_header *bdb)
 {
-       struct drm_device *dev = dev_priv->dev;
        struct bdb_driver_features *driver;
 
        driver = find_section(bdb, BDB_DRIVER_FEATURES);
        if (!driver)
                return;
 
-       if (SUPPORTS_EDP(dev) &&
-           driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+       if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
                dev_priv->vbt.edp_support = 1;
 
        if (driver->dual_frequency)
@@ -501,7 +499,7 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
 
        edp = find_section(bdb, BDB_EDP);
        if (!edp) {
-               if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->vbt.edp_support)
+               if (dev_priv->vbt.edp_support)
                        DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
                return;
        }
@@ -568,12 +566,150 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
        }
 }
 
+static void
+parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+{
+       struct bdb_mipi *mipi;
+
+       mipi = find_section(bdb, BDB_MIPI);
+       if (!mipi) {
+               DRM_DEBUG_KMS("No MIPI BDB found");
+               return;
+       }
+
+       /* XXX: add more info */
+       dev_priv->vbt.dsi.panel_id = mipi->panel_id;
+}
+
+static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
+                          struct bdb_header *bdb)
+{
+       union child_device_config *it, *child = NULL;
+       struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
+       uint8_t hdmi_level_shift;
+       int i, j;
+       bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
+       uint8_t aux_channel;
+       /* Each DDI port can have more than one value on the "DVO Port" field,
+        * so look for all the possible values for each port and abort if more
+        * than one is found. */
+       int dvo_ports[][2] = {
+               {DVO_PORT_HDMIA, DVO_PORT_DPA},
+               {DVO_PORT_HDMIB, DVO_PORT_DPB},
+               {DVO_PORT_HDMIC, DVO_PORT_DPC},
+               {DVO_PORT_HDMID, DVO_PORT_DPD},
+               {DVO_PORT_CRT, -1 /* Port E can only be DVO_PORT_CRT */ },
+       };
+
+       /* Find the child device to use, abort if more than one found. */
+       for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+               it = dev_priv->vbt.child_dev + i;
+
+               for (j = 0; j < 2; j++) {
+                       if (dvo_ports[port][j] == -1)
+                               break;
+
+                       if (it->common.dvo_port == dvo_ports[port][j]) {
+                               if (child) {
+                                       DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
+                                                     port_name(port));
+                                       return;
+                               }
+                               child = it;
+                       }
+               }
+       }
+       if (!child)
+               return;
+
+       aux_channel = child->raw[25];
+
+       is_dvi = child->common.device_type & (1 << 4);
+       is_dp = child->common.device_type & (1 << 2);
+       is_crt = child->common.device_type & (1 << 0);
+       is_hdmi = is_dvi && (child->common.device_type & (1 << 11)) == 0;
+       is_edp = is_dp && (child->common.device_type & (1 << 12));
+
+       info->supports_dvi = is_dvi;
+       info->supports_hdmi = is_hdmi;
+       info->supports_dp = is_dp;
+
+       DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
+                     port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
+
+       if (is_edp && is_dvi)
+               DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
+                             port_name(port));
+       if (is_crt && port != PORT_E)
+               DRM_DEBUG_KMS("Port %c is analog\n", port_name(port));
+       if (is_crt && (is_dvi || is_dp))
+               DRM_DEBUG_KMS("Analog port %c is also DP or TMDS compatible\n",
+                             port_name(port));
+       if (is_dvi && (port == PORT_A || port == PORT_E))
+               DRM_DEBUG_KMS("Port %c is TMDS compabile\n", port_name(port));
+       if (!is_dvi && !is_dp && !is_crt)
+               DRM_DEBUG_KMS("Port %c is not DP/TMDS/CRT compatible\n",
+                             port_name(port));
+       if (is_edp && (port == PORT_B || port == PORT_C || port == PORT_E))
+               DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
+
+       if (is_dvi) {
+               if (child->common.ddc_pin == 0x05 && port != PORT_B)
+                       DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
+               if (child->common.ddc_pin == 0x04 && port != PORT_C)
+                       DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
+               if (child->common.ddc_pin == 0x06 && port != PORT_D)
+                       DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
+       }
+
+       if (is_dp) {
+               if (aux_channel == 0x40 && port != PORT_A)
+                       DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
+               if (aux_channel == 0x10 && port != PORT_B)
+                       DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
+               if (aux_channel == 0x20 && port != PORT_C)
+                       DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
+               if (aux_channel == 0x30 && port != PORT_D)
+                       DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
+       }
+
+       if (bdb->version >= 158) {
+               /* The VBT HDMI level shift values match the table we have. */
+               hdmi_level_shift = child->raw[7] & 0xF;
+               if (hdmi_level_shift < 0xC) {
+                       DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
+                                     port_name(port),
+                                     hdmi_level_shift);
+                       info->hdmi_level_shift = hdmi_level_shift;
+               }
+       }
+}
+
+static void parse_ddi_ports(struct drm_i915_private *dev_priv,
+                           struct bdb_header *bdb)
+{
+       struct drm_device *dev = dev_priv->dev;
+       enum port port;
+
+       if (!HAS_DDI(dev))
+               return;
+
+       if (!dev_priv->vbt.child_dev_num)
+               return;
+
+       if (bdb->version < 155)
+               return;
+
+       for (port = PORT_A; port < I915_MAX_PORTS; port++)
+               parse_ddi_port(dev_priv, port, bdb);
+}
+
 static void
 parse_device_mapping(struct drm_i915_private *dev_priv,
                       struct bdb_header *bdb)
 {
        struct bdb_general_definitions *p_defs;
-       struct child_device_config *p_child, *child_dev_ptr;
+       union child_device_config *p_child, *child_dev_ptr;
        int i, child_device_num, count;
        u16     block_size;
 
@@ -601,7 +737,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
        /* get the number of child device that is present */
        for (i = 0; i < child_device_num; i++) {
                p_child = &(p_defs->devices[i]);
-               if (!p_child->device_type) {
+               if (!p_child->common.device_type) {
                        /* skip the device block if device type is invalid */
                        continue;
                }
@@ -621,7 +757,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
        count = 0;
        for (i = 0; i < child_device_num; i++) {
                p_child = &(p_defs->devices[i]);
-               if (!p_child->device_type) {
+               if (!p_child->common.device_type) {
                        /* skip the device block if device type is invalid */
                        continue;
                }
@@ -637,6 +773,7 @@ static void
 init_vbt_defaults(struct drm_i915_private *dev_priv)
 {
        struct drm_device *dev = dev_priv->dev;
+       enum port port;
 
        dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
 
@@ -655,6 +792,18 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
        dev_priv->vbt.lvds_use_ssc = 1;
        dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
        DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->vbt.lvds_ssc_freq);
+
+       for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+               struct ddi_vbt_port_info *info =
+                       &dev_priv->vbt.ddi_port_info[port];
+
+               /* Recommended BSpec default: 800mV 0dB. */
+               info->hdmi_level_shift = 6;
+
+               info->supports_dvi = (port != PORT_A && port != PORT_E);
+               info->supports_hdmi = info->supports_dvi;
+               info->supports_dp = (port != PORT_E);
+       }
 }
 
 static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
@@ -745,6 +894,8 @@ intel_parse_bios(struct drm_device *dev)
        parse_device_mapping(dev_priv, bdb);
        parse_driver_features(dev_priv, bdb);
        parse_edp(dev_priv, bdb);
+       parse_mipi(dev_priv, bdb);
+       parse_ddi_ports(dev_priv, bdb);
 
        if (bios)
                pci_unmap_rom(pdev, bios);
index e088d6f0956a87a239cf94ab31983359d0993f70..287cc5a21c2ef636dee0b4198f8c59665bd1fd5e 100644 (file)
@@ -104,6 +104,7 @@ struct vbios_data {
 #define BDB_LVDS_LFP_DATA       42
 #define BDB_LVDS_BACKLIGHT      43
 #define BDB_LVDS_POWER          44
+#define BDB_MIPI                50
 #define BDB_SKIP               254 /* VBIOS private block, ignore */
 
 struct bdb_general_features {
@@ -201,7 +202,10 @@ struct bdb_general_features {
 #define DEVICE_PORT_DVOB       0x01
 #define DEVICE_PORT_DVOC       0x02
 
-struct child_device_config {
+/* We used to keep this struct but without any version control. We should avoid
+ * using it in the future, but it should be safe to keep using it in the old
+ * code. */
+struct old_child_dev_config {
        u16 handle;
        u16 device_type;
        u8  device_id[10]; /* ascii string */
@@ -223,6 +227,32 @@ struct child_device_config {
        u8  dvo_function;
 } __attribute__((packed));
 
+/* This one contains field offsets that are known to be common for all BDB
+ * versions. Notice that the meaning of the contents contents may still change,
+ * but at least the offsets are consistent. */
+struct common_child_dev_config {
+       u16 handle;
+       u16 device_type;
+       u8 not_common1[12];
+       u8 dvo_port;
+       u8 not_common2[2];
+       u8 ddc_pin;
+       u16 edid_ptr;
+} __attribute__((packed));
+
+/* This field changes depending on the BDB version, so the most reliable way to
+ * read it is by checking the BDB version and reading the raw pointer. */
+union child_device_config {
+       /* This one is safe to be used anywhere, but the code should still check
+        * the BDB version. */
+       u8 raw[33];
+       /* This one should only be kept for legacy code. */
+       struct old_child_dev_config old;
+       /* This one should also be safe to use anywhere, even without version
+        * checks. */
+       struct common_child_dev_config common;
+};
+
 struct bdb_general_definitions {
        /* DDC GPIO */
        u8 crt_ddc_gmbus_pin;
@@ -248,7 +278,7 @@ struct bdb_general_definitions {
         * number = (block_size - sizeof(bdb_general_definitions))/
         *           sizeof(child_device_config);
         */
-       struct child_device_config devices[0];
+       union child_device_config devices[0];
 } __attribute__((packed));
 
 struct bdb_lvds_options {
@@ -618,4 +648,57 @@ int intel_parse_bios(struct drm_device *dev);
 #define                PORT_IDPC       8
 #define                PORT_IDPD       9
 
+/* Possible values for the "DVO Port" field for versions >= 155: */
+#define DVO_PORT_HDMIA 0
+#define DVO_PORT_HDMIB 1
+#define DVO_PORT_HDMIC 2
+#define DVO_PORT_HDMID 3
+#define DVO_PORT_LVDS  4
+#define DVO_PORT_TV    5
+#define DVO_PORT_CRT   6
+#define DVO_PORT_DPB   7
+#define DVO_PORT_DPC   8
+#define DVO_PORT_DPD   9
+#define DVO_PORT_DPA   10
+
+/* MIPI DSI panel info */
+struct bdb_mipi {
+       u16 panel_id;
+       u16 bridge_revision;
+
+       /* General params */
+       u32 dithering:1;
+       u32 bpp_pixel_format:1;
+       u32 rsvd1:1;
+       u32 dphy_valid:1;
+       u32 resvd2:28;
+
+       u16 port_info;
+       u16 rsvd3:2;
+       u16 num_lanes:2;
+       u16 rsvd4:12;
+
+       /* DSI config */
+       u16 virt_ch_num:2;
+       u16 vtm:2;
+       u16 rsvd5:12;
+
+       u32 dsi_clock;
+       u32 bridge_ref_clk;
+       u16 rsvd_pwr;
+
+       /* Dphy Params */
+       u32 prepare_cnt:5;
+       u32 rsvd6:3;
+       u32 clk_zero_cnt:8;
+       u32 trail_cnt:5;
+       u32 rsvd7:3;
+       u32 exit_zero_cnt:6;
+       u32 rsvd8:2;
+
+       u32 hl_switch_cnt;
+       u32 lp_byte_clk;
+       u32 clk_lane_switch_cnt;
+} __attribute__((packed));
+
 #endif /* _I830_BIOS_H_ */
index ea9022ef15d5bdffc40e5c0bf9a941c7f0b38677..2e01bd3a5d8c4e9c588959b6e5ddd548dcc02a02 100644 (file)
@@ -83,8 +83,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
        return true;
 }
 
-static void intel_crt_get_config(struct intel_encoder *encoder,
-                                struct intel_crtc_config *pipe_config)
+static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
 {
        struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
        struct intel_crt *crt = intel_encoder_to_crt(encoder);
@@ -102,7 +101,35 @@ static void intel_crt_get_config(struct intel_encoder *encoder,
        else
                flags |= DRM_MODE_FLAG_NVSYNC;
 
-       pipe_config->adjusted_mode.flags |= flags;
+       return flags;
+}
+
+static void intel_crt_get_config(struct intel_encoder *encoder,
+                                struct intel_crtc_config *pipe_config)
+{
+       struct drm_device *dev = encoder->base.dev;
+       int dotclock;
+
+       pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
+
+       dotclock = pipe_config->port_clock;
+
+       if (HAS_PCH_SPLIT(dev))
+               ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+       pipe_config->adjusted_mode.crtc_clock = dotclock;
+}
+
+static void hsw_crt_get_config(struct intel_encoder *encoder,
+                              struct intel_crtc_config *pipe_config)
+{
+       intel_ddi_get_config(encoder, pipe_config);
+
+       pipe_config->adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC |
+                                             DRM_MODE_FLAG_NHSYNC |
+                                             DRM_MODE_FLAG_PVSYNC |
+                                             DRM_MODE_FLAG_NVSYNC);
+       pipe_config->adjusted_mode.flags |= intel_crt_get_flags(encoder);
 }
 
 /* Note: The caller is required to filter out dpms modes not supported by the
@@ -247,7 +274,7 @@ static void intel_crt_mode_set(struct intel_encoder *encoder)
        struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
        u32 adpa;
 
-       if (HAS_PCH_SPLIT(dev))
+       if (INTEL_INFO(dev)->gen >= 5)
                adpa = ADPA_HOTPLUG_BITS;
        else
                adpa = 0;
@@ -349,9 +376,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
 
        DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
 
-       /* FIXME: debug force function and remove */
-       ret = true;
-
        return ret;
 }
 
@@ -653,7 +677,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
 
 static void intel_crt_destroy(struct drm_connector *connector)
 {
-       drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
        kfree(connector);
 }
@@ -759,7 +782,7 @@ void intel_crt_init(struct drm_device *dev)
        if (!crt)
                return;
 
-       intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+       intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
        if (!intel_connector) {
                kfree(crt);
                return;
@@ -799,7 +822,10 @@ void intel_crt_init(struct drm_device *dev)
        crt->base.mode_set = intel_crt_mode_set;
        crt->base.disable = intel_disable_crt;
        crt->base.enable = intel_enable_crt;
-       crt->base.get_config = intel_crt_get_config;
+       if (IS_HASWELL(dev))
+               crt->base.get_config = hsw_crt_get_config;
+       else
+               crt->base.get_config = intel_crt_get_config;
        if (I915_HAS_HOTPLUG(dev))
                crt->base.hpd_pin = HPD_CRT;
        if (HAS_DDI(dev))
index 63de2701b97403a82ffd221424d5b5b9acda5843..31f4fe271388f746760e723914fcbf7d93ba4935 100644 (file)
@@ -42,7 +42,6 @@ static const u32 hsw_ddi_translations_dp[] = {
        0x80C30FFF, 0x000B0000,
        0x00FFFFFF, 0x00040006,
        0x80D75FFF, 0x000B0000,
-       0x00FFFFFF, 0x00040006          /* HDMI parameters */
 };
 
 static const u32 hsw_ddi_translations_fdi[] = {
@@ -55,10 +54,25 @@ static const u32 hsw_ddi_translations_fdi[] = {
        0x00C30FFF, 0x001E0000,
        0x00FFFFFF, 0x00060006,
        0x00D75FFF, 0x001E0000,
-       0x00FFFFFF, 0x00040006          /* HDMI parameters */
 };
 
-static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
+static const u32 hsw_ddi_translations_hdmi[] = {
+                               /* Idx  NT mV diff      T mV diff       db  */
+       0x00FFFFFF, 0x0006000E, /* 0:   400             400             0   */
+       0x00E79FFF, 0x000E000C, /* 1:   400             500             2   */
+       0x00D75FFF, 0x0005000A, /* 2:   400             600             3.5 */
+       0x00FFFFFF, 0x0005000A, /* 3:   600             600             0   */
+       0x00E79FFF, 0x001D0007, /* 4:   600             750             2   */
+       0x00D75FFF, 0x000C0004, /* 5:   600             900             3.5 */
+       0x00FFFFFF, 0x00040006, /* 6:   800             800             0   */
+       0x80E79FFF, 0x00030002, /* 7:   800             1000            2   */
+       0x00FFFFFF, 0x00140005, /* 8:   850             850             0   */
+       0x00FFFFFF, 0x000C0004, /* 9:   900             900             0   */
+       0x00FFFFFF, 0x001C0003, /* 10:  950             950             0   */
+       0x80FFFFFF, 0x00030002, /* 11:  1000            1000            0   */
+};
+
+enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
        int type = intel_encoder->type;
@@ -92,12 +106,18 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
        const u32 *ddi_translations = (port == PORT_E) ?
                hsw_ddi_translations_fdi :
                hsw_ddi_translations_dp;
+       int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
 
        for (i = 0, reg = DDI_BUF_TRANS(port);
             i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
                I915_WRITE(reg, ddi_translations[i]);
                reg += 4;
        }
+       /* Entry 9 is for HDMI: */
+       for (i = 0; i < 2; i++) {
+               I915_WRITE(reg, hsw_ddi_translations_hdmi[hdmi_level * 2 + i]);
+               reg += 4;
+       }
 }
 
 /* Program DDI buffers translations for DP. By default, program ports A-D in DP
@@ -296,9 +316,6 @@ static void intel_ddi_mode_set(struct intel_encoder *encoder)
                        DRM_DEBUG_DRIVER("DP audio: write eld information\n");
                        intel_write_eld(&encoder->base, adjusted_mode);
                }
-
-               intel_dp_init_link_config(intel_dp);
-
        } else if (type == INTEL_OUTPUT_HDMI) {
                struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
 
@@ -767,9 +784,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
                BUG();
        }
 
-       if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+       if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
                temp |= TRANS_DDI_PVSYNC;
-       if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+       if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
                temp |= TRANS_DDI_PHSYNC;
 
        if (cpu_transcoder == TRANSCODER_EDP) {
@@ -1202,7 +1219,7 @@ void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
 
        val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
              DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
-       if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+       if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
                val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
        I915_WRITE(DP_TP_CTL(port), val);
        POSTING_READ(DP_TP_CTL(port));
@@ -1249,8 +1266,8 @@ static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
                intel_dp_check_link_status(intel_dp);
 }
 
-static void intel_ddi_get_config(struct intel_encoder *encoder,
-                                struct intel_crtc_config *pipe_config)
+void intel_ddi_get_config(struct intel_encoder *encoder,
+                         struct intel_crtc_config *pipe_config)
 {
        struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -1268,6 +1285,37 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
                flags |= DRM_MODE_FLAG_NVSYNC;
 
        pipe_config->adjusted_mode.flags |= flags;
+
+       switch (temp & TRANS_DDI_BPC_MASK) {
+       case TRANS_DDI_BPC_6:
+               pipe_config->pipe_bpp = 18;
+               break;
+       case TRANS_DDI_BPC_8:
+               pipe_config->pipe_bpp = 24;
+               break;
+       case TRANS_DDI_BPC_10:
+               pipe_config->pipe_bpp = 30;
+               break;
+       case TRANS_DDI_BPC_12:
+               pipe_config->pipe_bpp = 36;
+               break;
+       default:
+               break;
+       }
+
+       switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
+       case TRANS_DDI_MODE_SELECT_HDMI:
+       case TRANS_DDI_MODE_SELECT_DVI:
+       case TRANS_DDI_MODE_SELECT_FDI:
+               break;
+       case TRANS_DDI_MODE_SELECT_DP_SST:
+       case TRANS_DDI_MODE_SELECT_DP_MST:
+               pipe_config->has_dp_encoder = true;
+               intel_dp_get_m_n(intel_crtc, pipe_config);
+               break;
+       default:
+               break;
+       }
 }
 
 static void intel_ddi_destroy(struct drm_encoder *encoder)
@@ -1297,6 +1345,41 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
        .destroy = intel_ddi_destroy,
 };
 
+static struct intel_connector *
+intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port)
+{
+       struct intel_connector *connector;
+       enum port port = intel_dig_port->port;
+
+       connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+       if (!connector)
+               return NULL;
+
+       intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
+       if (!intel_dp_init_connector(intel_dig_port, connector)) {
+               kfree(connector);
+               return NULL;
+       }
+
+       return connector;
+}
+
+static struct intel_connector *
+intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
+{
+       struct intel_connector *connector;
+       enum port port = intel_dig_port->port;
+
+       connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+       if (!connector)
+               return NULL;
+
+       intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
+       intel_hdmi_init_connector(intel_dig_port, connector);
+
+       return connector;
+}
+
 void intel_ddi_init(struct drm_device *dev, enum port port)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1305,17 +1388,22 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
        struct drm_encoder *encoder;
        struct intel_connector *hdmi_connector = NULL;
        struct intel_connector *dp_connector = NULL;
+       bool init_hdmi, init_dp;
+
+       init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
+                    dev_priv->vbt.ddi_port_info[port].supports_hdmi);
+       init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
+       if (!init_dp && !init_hdmi) {
+               DRM_DEBUG_KMS("VBT says port %c is not DVI/HDMI/DP compatible\n",
+                             port_name(port));
+               init_hdmi = true;
+               init_dp = true;
+       }
 
-       intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+       intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
        if (!intel_dig_port)
                return;
 
-       dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
-       if (!dp_connector) {
-               kfree(intel_dig_port);
-               return;
-       }
-
        intel_encoder = &intel_dig_port->base;
        encoder = &intel_encoder->base;
 
@@ -1335,28 +1423,22 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
        intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
                                          (DDI_BUF_PORT_REVERSAL |
                                           DDI_A_4_LANES);
-       intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
 
        intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
        intel_encoder->crtc_mask =  (1 << 0) | (1 << 1) | (1 << 2);
        intel_encoder->cloneable = false;
        intel_encoder->hot_plug = intel_ddi_hot_plug;
 
-       if (!intel_dp_init_connector(intel_dig_port, dp_connector)) {
-               drm_encoder_cleanup(encoder);
-               kfree(intel_dig_port);
-               kfree(dp_connector);
-               return;
-       }
+       if (init_dp)
+               dp_connector = intel_ddi_init_dp_connector(intel_dig_port);
 
-       if (intel_encoder->type != INTEL_OUTPUT_EDP) {
-               hdmi_connector = kzalloc(sizeof(struct intel_connector),
-                                        GFP_KERNEL);
-               if (!hdmi_connector) {
-                       return;
-               }
+       /* In theory we don't need the encoder->type check, but leave it just in
+        * case we have some really bad VBTs... */
+       if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi)
+               hdmi_connector = intel_ddi_init_hdmi_connector(intel_dig_port);
 
-               intel_dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
-               intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
+       if (!dp_connector && !hdmi_connector) {
+               drm_encoder_cleanup(encoder);
+               kfree(intel_dig_port);
        }
 }
index e5822e79f912d9d447901f7dbf71fe911b5ade30..3e79a2a0669d478646c9ac3aef1a727af8774160 100644 (file)
 #include <drm/drm_crtc_helper.h>
 #include <linux/dma_remapping.h>
 
-bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
 static void intel_increase_pllclock(struct drm_crtc *crtc);
 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
                                struct intel_crtc_config *pipe_config);
-static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
-                                   struct intel_crtc_config *pipe_config);
+static void ironlake_pch_clock_get(struct intel_crtc *crtc,
+                                  struct intel_crtc_config *pipe_config);
 
 static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
                          int x, int y, struct drm_framebuffer *old_fb);
@@ -69,9 +68,6 @@ struct intel_limit {
        intel_p2_t          p2;
 };
 
-/* FDI */
-#define IRONLAKE_FDI_FREQ              2700000 /* in kHz for mode->clock */
-
 int
 intel_pch_rawclk(struct drm_device *dev)
 {
@@ -313,44 +309,44 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
                .p2_slow = 7, .p2_fast = 7 },
 };
 
-static const intel_limit_t intel_limits_vlv_dac = {
-       .dot = { .min = 25000, .max = 270000 },
-       .vco = { .min = 4000000, .max = 6000000 },
-       .n = { .min = 1, .max = 7 },
-       .m = { .min = 22, .max = 450 }, /* guess */
-       .m1 = { .min = 2, .max = 3 },
-       .m2 = { .min = 11, .max = 156 },
-       .p = { .min = 10, .max = 30 },
-       .p1 = { .min = 1, .max = 3 },
-       .p2 = { .dot_limit = 270000,
-               .p2_slow = 2, .p2_fast = 20 },
-};
-
-static const intel_limit_t intel_limits_vlv_hdmi = {
-       .dot = { .min = 25000, .max = 270000 },
+static const intel_limit_t intel_limits_vlv = {
+        /*
+         * These are the data rate limits (measured in fast clocks)
+         * since those are the strictest limits we have. The fast
+         * clock and actual rate limits are more relaxed, so checking
+         * them would make no difference.
+         */
+       .dot = { .min = 25000 * 5, .max = 270000 * 5 },
        .vco = { .min = 4000000, .max = 6000000 },
        .n = { .min = 1, .max = 7 },
-       .m = { .min = 60, .max = 300 }, /* guess */
        .m1 = { .min = 2, .max = 3 },
        .m2 = { .min = 11, .max = 156 },
-       .p = { .min = 10, .max = 30 },
        .p1 = { .min = 2, .max = 3 },
-       .p2 = { .dot_limit = 270000,
-               .p2_slow = 2, .p2_fast = 20 },
+       .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
 };
 
-static const intel_limit_t intel_limits_vlv_dp = {
-       .dot = { .min = 25000, .max = 270000 },
-       .vco = { .min = 4000000, .max = 6000000 },
-       .n = { .min = 1, .max = 7 },
-       .m = { .min = 22, .max = 450 },
-       .m1 = { .min = 2, .max = 3 },
-       .m2 = { .min = 11, .max = 156 },
-       .p = { .min = 10, .max = 30 },
-       .p1 = { .min = 1, .max = 3 },
-       .p2 = { .dot_limit = 270000,
-               .p2_slow = 2, .p2_fast = 20 },
-};
+static void vlv_clock(int refclk, intel_clock_t *clock)
+{
+       clock->m = clock->m1 * clock->m2;
+       clock->p = clock->p1 * clock->p2;
+       clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+       clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
+}
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+       struct drm_device *dev = crtc->dev;
+       struct intel_encoder *encoder;
+
+       for_each_encoder_on_crtc(dev, crtc, encoder)
+               if (encoder->type == type)
+                       return true;
+
+       return false;
+}
 
 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
                                                int refclk)
@@ -412,12 +408,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
                else
                        limit = &intel_limits_pineview_sdvo;
        } else if (IS_VALLEYVIEW(dev)) {
-               if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
-                       limit = &intel_limits_vlv_dac;
-               else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
-                       limit = &intel_limits_vlv_hdmi;
-               else
-                       limit = &intel_limits_vlv_dp;
+               limit = &intel_limits_vlv;
        } else if (!IS_GEN2(dev)) {
                if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
                        limit = &intel_limits_i9xx_lvds;
@@ -439,8 +430,8 @@ static void pineview_clock(int refclk, intel_clock_t *clock)
 {
        clock->m = clock->m2 + 2;
        clock->p = clock->p1 * clock->p2;
-       clock->vco = refclk * clock->m / clock->n;
-       clock->dot = clock->vco / clock->p;
+       clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
+       clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 }
 
 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
@@ -452,23 +443,8 @@ static void i9xx_clock(int refclk, intel_clock_t *clock)
 {
        clock->m = i9xx_dpll_compute_m(clock);
        clock->p = clock->p1 * clock->p2;
-       clock->vco = refclk * clock->m / (clock->n + 2);
-       clock->dot = clock->vco / clock->p;
-}
-
-/**
- * Returns whether any output on the specified pipe is of the specified type
- */
-bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
-{
-       struct drm_device *dev = crtc->dev;
-       struct intel_encoder *encoder;
-
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               if (encoder->type == type)
-                       return true;
-
-       return false;
+       clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
+       clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
 }
 
 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
@@ -481,20 +457,26 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
                               const intel_limit_t *limit,
                               const intel_clock_t *clock)
 {
+       if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
+               INTELPllInvalid("n out of range\n");
        if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
                INTELPllInvalid("p1 out of range\n");
-       if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
-               INTELPllInvalid("p out of range\n");
        if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
                INTELPllInvalid("m2 out of range\n");
        if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
                INTELPllInvalid("m1 out of range\n");
-       if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
-               INTELPllInvalid("m1 <= m2\n");
-       if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
-               INTELPllInvalid("m out of range\n");
-       if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
-               INTELPllInvalid("n out of range\n");
+
+       if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev))
+               if (clock->m1 <= clock->m2)
+                       INTELPllInvalid("m1 <= m2\n");
+
+       if (!IS_VALLEYVIEW(dev)) {
+               if (clock->p < limit->p.min || limit->p.max < clock->p)
+                       INTELPllInvalid("p out of range\n");
+               if (clock->m < limit->m.min || limit->m.max < clock->m)
+                       INTELPllInvalid("m out of range\n");
+       }
+
        if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
                INTELPllInvalid("vco out of range\n");
        /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
@@ -688,67 +670,73 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
                   int target, int refclk, intel_clock_t *match_clock,
                   intel_clock_t *best_clock)
 {
-       u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
-       u32 m, n, fastclk;
-       u32 updrate, minupdate, p;
-       unsigned long bestppm, ppm, absppm;
-       int dotclk, flag;
-
-       flag = 0;
-       dotclk = target * 1000;
-       bestppm = 1000000;
-       ppm = absppm = 0;
-       fastclk = dotclk / (2*100);
-       updrate = 0;
-       minupdate = 19200;
-       n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
-       bestm1 = bestm2 = bestp1 = bestp2 = 0;
+       struct drm_device *dev = crtc->dev;
+       intel_clock_t clock;
+       unsigned int bestppm = 1000000;
+       /* min update 19.2 MHz */
+       int max_n = min(limit->n.max, refclk / 19200);
+       bool found = false;
+
+       target *= 5; /* fast clock */
+
+       memset(best_clock, 0, sizeof(*best_clock));
 
        /* based on hardware requirement, prefer smaller n to precision */
-       for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
-               updrate = refclk / n;
-               for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
-                       for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
-                               if (p2 > 10)
-                                       p2 = p2 - 1;
-                               p = p1 * p2;
+       for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+               for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
+                       for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
+                            clock.p2 -= clock.p2 > 10 ? 2 : 1) {
+                               clock.p = clock.p1 * clock.p2;
                                /* based on hardware requirement, prefer bigger m1,m2 values */
-                               for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
-                                       m2 = (((2*(fastclk * p * n / m1 )) +
-                                              refclk) / (2*refclk));
-                                       m = m1 * m2;
-                                       vco = updrate * m;
-                                       if (vco >= limit->vco.min && vco < limit->vco.max) {
-                                               ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
-                                               absppm = (ppm > 0) ? ppm : (-ppm);
-                                               if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
-                                                       bestppm = 0;
-                                                       flag = 1;
-                                               }
-                                               if (absppm < bestppm - 10) {
-                                                       bestppm = absppm;
-                                                       flag = 1;
-                                               }
-                                               if (flag) {
-                                                       bestn = n;
-                                                       bestm1 = m1;
-                                                       bestm2 = m2;
-                                                       bestp1 = p1;
-                                                       bestp2 = p2;
-                                                       flag = 0;
-                                               }
+                               for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+                                       unsigned int ppm, diff;
+
+                                       clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
+                                                                    refclk * clock.m1);
+
+                                       vlv_clock(refclk, &clock);
+
+                                       if (!intel_PLL_is_valid(dev, limit,
+                                                               &clock))
+                                               continue;
+
+                                       diff = abs(clock.dot - target);
+                                       ppm = div_u64(1000000ULL * diff, target);
+
+                                       if (ppm < 100 && clock.p > best_clock->p) {
+                                               bestppm = 0;
+                                               *best_clock = clock;
+                                               found = true;
+                                       }
+
+                                       if (bestppm >= 10 && ppm < bestppm - 10) {
+                                               bestppm = ppm;
+                                               *best_clock = clock;
+                                               found = true;
                                        }
                                }
                        }
                }
        }
-       best_clock->n = bestn;
-       best_clock->m1 = bestm1;
-       best_clock->m2 = bestm2;
-       best_clock->p1 = bestp1;
-       best_clock->p2 = bestp2;
 
-       return true;
+       return found;
+}
+
+bool intel_crtc_active(struct drm_crtc *crtc)
+{
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+       /* Be paranoid as we can arrive here with only partial
+        * state retrieved from the hardware during setup.
+        *
+        * We can ditch the adjusted_mode.crtc_clock check as soon
+        * as Haswell has gained clock readout/fastboot support.
+        *
+        * We can ditch the crtc->fb check as soon as we can
+        * properly reconstruct framebuffers.
+        */
+       return intel_crtc->active && crtc->fb &&
+               intel_crtc->config.adjusted_mode.crtc_clock;
 }
 
 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@@ -812,6 +800,25 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
                DRM_DEBUG_KMS("vblank wait timed out\n");
 }
 
+static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 reg = PIPEDSL(pipe);
+       u32 line1, line2;
+       u32 line_mask;
+
+       if (IS_GEN2(dev))
+               line_mask = DSL_LINEMASK_GEN2;
+       else
+               line_mask = DSL_LINEMASK_GEN3;
+
+       line1 = I915_READ(reg) & line_mask;
+       mdelay(5);
+       line2 = I915_READ(reg) & line_mask;
+
+       return line1 == line2;
+}
+
 /*
  * intel_wait_for_pipe_off - wait for pipe to turn off
  * @dev: drm device
@@ -843,22 +850,8 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
                             100))
                        WARN(1, "pipe_off wait timed out\n");
        } else {
-               u32 last_line, line_mask;
-               int reg = PIPEDSL(pipe);
-               unsigned long timeout = jiffies + msecs_to_jiffies(100);
-
-               if (IS_GEN2(dev))
-                       line_mask = DSL_LINEMASK_GEN2;
-               else
-                       line_mask = DSL_LINEMASK_GEN3;
-
                /* Wait for the display line to settle */
-               do {
-                       last_line = I915_READ(reg) & line_mask;
-                       mdelay(5);
-               } while (((I915_READ(reg) & line_mask) != last_line) &&
-                        time_after(timeout, jiffies));
-               if (time_after(jiffies, timeout))
+               if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
                        WARN(1, "pipe_off wait timed out\n");
        }
 }
@@ -929,6 +922,24 @@ void assert_pll(struct drm_i915_private *dev_priv,
             state_string(state), state_string(cur_state));
 }
 
+/* XXX: the dsi pll is shared between MIPI DSI ports */
+static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
+{
+       u32 val;
+       bool cur_state;
+
+       mutex_lock(&dev_priv->dpio_lock);
+       val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+       mutex_unlock(&dev_priv->dpio_lock);
+
+       cur_state = val & DSI_PLL_VCO_EN;
+       WARN(cur_state != state,
+            "DSI PLL state assertion failure (expected %s, current %s)\n",
+            state_string(state), state_string(cur_state));
+}
+#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
+#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
+
 struct intel_shared_dpll *
 intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
 {
@@ -1069,6 +1080,26 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
             pipe_name(pipe));
 }
 
+static void assert_cursor(struct drm_i915_private *dev_priv,
+                         enum pipe pipe, bool state)
+{
+       struct drm_device *dev = dev_priv->dev;
+       bool cur_state;
+
+       if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+               cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
+       else if (IS_845G(dev) || IS_I865G(dev))
+               cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
+       else
+               cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+
+       WARN(cur_state != state,
+            "cursor on pipe %c assertion failure (expected %s, current %s)\n",
+            pipe_name(pipe), state_string(state), state_string(cur_state));
+}
+#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
+#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
+
 void assert_pipe(struct drm_i915_private *dev_priv,
                 enum pipe pipe, bool state)
 {
@@ -1323,6 +1354,26 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
        assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
 }
 
+static void intel_init_dpio(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (!IS_VALLEYVIEW(dev))
+               return;
+
+       /*
+        * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+        *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
+        *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
+        *   b. The other bits such as sfr settings / modesel may all be set
+        *      to 0.
+        *
+        * This should only be done on init and resume from S3 with both
+        * PLLs disabled, or we risk losing DPIO and PLL synchronization.
+        */
+       I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+}
+
 static void vlv_enable_pll(struct intel_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
@@ -1429,6 +1480,20 @@ static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
        POSTING_READ(DPLL(pipe));
 }
 
+static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+       u32 val = 0;
+
+       /* Make sure the pipe isn't still relying on us */
+       assert_pipe_disabled(dev_priv, pipe);
+
+       /* Leave integrated clock source enabled */
+       if (pipe == PIPE_B)
+               val = DPLL_INTEGRATED_CRI_CLK_VLV;
+       I915_WRITE(DPLL(pipe), val);
+       POSTING_READ(DPLL(pipe));
+}
+
 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
 {
        u32 port_mask;
@@ -1661,7 +1726,7 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
  * returning.
  */
 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
-                             bool pch_port)
+                             bool pch_port, bool dsi)
 {
        enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
                                                                      pipe);
@@ -1670,6 +1735,7 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
        u32 val;
 
        assert_planes_disabled(dev_priv, pipe);
+       assert_cursor_disabled(dev_priv, pipe);
        assert_sprites_disabled(dev_priv, pipe);
 
        if (HAS_PCH_LPT(dev_priv->dev))
@@ -1683,7 +1749,10 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
         * need the check.
         */
        if (!HAS_PCH_SPLIT(dev_priv->dev))
-               assert_pll_enabled(dev_priv, pipe);
+               if (dsi)
+                       assert_dsi_pll_enabled(dev_priv);
+               else
+                       assert_pll_enabled(dev_priv, pipe);
        else {
                if (pch_port) {
                        /* if driving the PCH, we need FDI enabled */
@@ -1728,6 +1797,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
         * or we might hang the display.
         */
        assert_planes_disabled(dev_priv, pipe);
+       assert_cursor_disabled(dev_priv, pipe);
        assert_sprites_disabled(dev_priv, pipe);
 
        /* Don't disable pipe A or pipe A PLLs if needed */
@@ -1747,63 +1817,75 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
  * Plane regs are double buffered, going from enabled->disabled needs a
  * trigger in order to latch.  The display address reg provides this.
  */
-void intel_flush_display_plane(struct drm_i915_private *dev_priv,
-                                     enum plane plane)
+void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
+                              enum plane plane)
 {
-       if (dev_priv->info->gen >= 4)
-               I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
-       else
-               I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
+       u32 reg = dev_priv->info->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane);
+
+       I915_WRITE(reg, I915_READ(reg));
+       POSTING_READ(reg);
 }
 
 /**
- * intel_enable_plane - enable a display plane on a given pipe
+ * intel_enable_primary_plane - enable the primary plane on a given pipe
  * @dev_priv: i915 private structure
  * @plane: plane to enable
  * @pipe: pipe being fed
  *
  * Enable @plane on @pipe, making sure that @pipe is running first.
  */
-static void intel_enable_plane(struct drm_i915_private *dev_priv,
-                              enum plane plane, enum pipe pipe)
+static void intel_enable_primary_plane(struct drm_i915_private *dev_priv,
+                                      enum plane plane, enum pipe pipe)
 {
+       struct intel_crtc *intel_crtc =
+               to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
        int reg;
        u32 val;
 
        /* If the pipe isn't enabled, we can't pump pixels and may hang */
        assert_pipe_enabled(dev_priv, pipe);
 
+       WARN(intel_crtc->primary_enabled, "Primary plane already enabled\n");
+
+       intel_crtc->primary_enabled = true;
+
        reg = DSPCNTR(plane);
        val = I915_READ(reg);
        if (val & DISPLAY_PLANE_ENABLE)
                return;
 
        I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
-       intel_flush_display_plane(dev_priv, plane);
+       intel_flush_primary_plane(dev_priv, plane);
        intel_wait_for_vblank(dev_priv->dev, pipe);
 }
 
 /**
- * intel_disable_plane - disable a display plane
+ * intel_disable_primary_plane - disable the primary plane
  * @dev_priv: i915 private structure
  * @plane: plane to disable
  * @pipe: pipe consuming the data
  *
  * Disable @plane; should be an independent operation.
  */
-static void intel_disable_plane(struct drm_i915_private *dev_priv,
-                               enum plane plane, enum pipe pipe)
+static void intel_disable_primary_plane(struct drm_i915_private *dev_priv,
+                                       enum plane plane, enum pipe pipe)
 {
+       struct intel_crtc *intel_crtc =
+               to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
        int reg;
        u32 val;
 
+       WARN(!intel_crtc->primary_enabled, "Primary plane already disabled\n");
+
+       intel_crtc->primary_enabled = false;
+
        reg = DSPCNTR(plane);
        val = I915_READ(reg);
        if ((val & DISPLAY_PLANE_ENABLE) == 0)
                return;
 
        I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
-       intel_flush_display_plane(dev_priv, plane);
+       intel_flush_primary_plane(dev_priv, plane);
        intel_wait_for_vblank(dev_priv->dev, pipe);
 }
 
@@ -1839,10 +1921,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
                alignment = 0;
                break;
        case I915_TILING_Y:
-               /* Despite that we check this in framebuffer_init userspace can
-                * screw us over and change the tiling after the fact. Only
-                * pinned buffers can't change their tiling. */
-               DRM_DEBUG_DRIVER("Y tiled not allowed for scan out buffers\n");
+               WARN(1, "Y tiled bo slipped through, driver bug!\n");
                return -EINVAL;
        default:
                BUG();
@@ -2244,11 +2323,26 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
                return ret;
        }
 
-       /* Update pipe size and adjust fitter if needed */
+       /*
+        * Update pipe size and adjust fitter if needed: the reason for this is
+        * that in compute_mode_changes we check the native mode (not the pfit
+        * mode) to see if we can flip rather than do a full mode set. In the
+        * fastboot case, we'll flip, but if we don't update the pipesrc and
+        * pfit state, we'll end up with a big fb scanned out into the wrong
+        * sized surface.
+        *
+        * To fix this properly, we need to hoist the checks up into
+        * compute_mode_changes (or above), check the actual pfit state and
+        * whether the platform allows pfit disable with pipe active, and only
+        * then update the pipesrc and pfit state, even on the flip path.
+        */
        if (i915_fastboot) {
+               const struct drm_display_mode *adjusted_mode =
+                       &intel_crtc->config.adjusted_mode;
+
                I915_WRITE(PIPESRC(intel_crtc->pipe),
-                          ((crtc->mode.hdisplay - 1) << 16) |
-                          (crtc->mode.vdisplay - 1));
+                          ((adjusted_mode->crtc_hdisplay - 1) << 16) |
+                          (adjusted_mode->crtc_vdisplay - 1));
                if (!intel_crtc->config.pch_pfit.enabled &&
                    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
                     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
@@ -2872,6 +2966,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
        u32 divsel, phaseinc, auxdiv, phasedir = 0;
        u32 temp;
 
@@ -2889,14 +2984,14 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
                        SBI_ICLK);
 
        /* 20MHz is a corner case which is out of range for the 7-bit divisor */
-       if (crtc->mode.clock == 20000) {
+       if (clock == 20000) {
                auxdiv = 1;
                divsel = 0x41;
                phaseinc = 0x20;
        } else {
                /* The iCLK virtual clock root frequency is in MHz,
-                * but the crtc->mode.clock in in KHz. To get the divisors,
-                * it is necessary to divide one by another, so we
+                * but the adjusted_mode->crtc_clock in in KHz. To get the
+                * divisors, it is necessary to divide one by another, so we
                 * convert the virtual clock precision to KHz here for higher
                 * precision.
                 */
@@ -2904,7 +2999,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
                u32 iclk_pi_range = 64;
                u32 desired_divisor, msb_divisor_value, pi_value;
 
-               desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
+               desired_divisor = (iclk_virtual_root_freq / clock);
                msb_divisor_value = desired_divisor / iclk_pi_range;
                pi_value = desired_divisor % iclk_pi_range;
 
@@ -2920,7 +3015,7 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
                ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
 
        DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
-                       crtc->mode.clock,
+                       clock,
                        auxdiv,
                        divsel,
                        phasedir,
@@ -3240,6 +3335,92 @@ static void intel_disable_planes(struct drm_crtc *crtc)
                        intel_plane_disable(&intel_plane->base);
 }
 
+void hsw_enable_ips(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+
+       if (!crtc->config.ips_enabled)
+               return;
+
+       /* We can only enable IPS after we enable a plane and wait for a vblank.
+        * We guarantee that the plane is enabled by calling intel_enable_ips
+        * only after intel_enable_plane. And intel_enable_plane already waits
+        * for a vblank, so all we need to do here is to enable the IPS bit. */
+       assert_plane_enabled(dev_priv, crtc->plane);
+       I915_WRITE(IPS_CTL, IPS_ENABLE);
+
+       /* The bit only becomes 1 in the next vblank, so this wait here is
+        * essentially intel_wait_for_vblank. If we don't have this and don't
+        * wait for vblanks until the end of crtc_enable, then the HW state
+        * readout code will complain that the expected IPS_CTL value is not the
+        * one we read. */
+       if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
+               DRM_ERROR("Timed out waiting for IPS enable\n");
+}
+
+void hsw_disable_ips(struct intel_crtc *crtc)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (!crtc->config.ips_enabled)
+               return;
+
+       assert_plane_enabled(dev_priv, crtc->plane);
+       I915_WRITE(IPS_CTL, 0);
+       POSTING_READ(IPS_CTL);
+
+       /* We need to wait for a vblank before we can disable the plane. */
+       intel_wait_for_vblank(dev, crtc->pipe);
+}
+
+/** Loads the palette/gamma unit for the CRTC with the prepared values */
+static void intel_crtc_load_lut(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       enum pipe pipe = intel_crtc->pipe;
+       int palreg = PALETTE(pipe);
+       int i;
+       bool reenable_ips = false;
+
+       /* The clocks have to be on to load the palette. */
+       if (!crtc->enabled || !intel_crtc->active)
+               return;
+
+       if (!HAS_PCH_SPLIT(dev_priv->dev)) {
+               if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
+                       assert_dsi_pll_enabled(dev_priv);
+               else
+                       assert_pll_enabled(dev_priv, pipe);
+       }
+
+       /* use legacy palette for Ironlake */
+       if (HAS_PCH_SPLIT(dev))
+               palreg = LGC_PALETTE(pipe);
+
+       /* Workaround : Do not read or write the pipe palette/gamma data while
+        * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
+        */
+       if (intel_crtc->config.ips_enabled &&
+           ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
+            GAMMA_MODE_MODE_SPLIT)) {
+               hsw_disable_ips(intel_crtc);
+               reenable_ips = true;
+       }
+
+       for (i = 0; i < 256; i++) {
+               I915_WRITE(palreg + 4 * i,
+                          (intel_crtc->lut_r[i] << 16) |
+                          (intel_crtc->lut_g[i] << 8) |
+                          intel_crtc->lut_b[i]);
+       }
+
+       if (reenable_ips)
+               hsw_enable_ips(intel_crtc);
+}
+
 static void ironlake_crtc_enable(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
@@ -3259,8 +3440,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
        intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
 
-       intel_update_watermarks(dev);
-
        for_each_encoder_on_crtc(dev, crtc, encoder)
                if (encoder->pre_enable)
                        encoder->pre_enable(encoder);
@@ -3283,9 +3462,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
         */
        intel_crtc_load_lut(crtc);
 
+       intel_update_watermarks(crtc);
        intel_enable_pipe(dev_priv, pipe,
-                         intel_crtc->config.has_pch_encoder);
-       intel_enable_plane(dev_priv, plane, pipe);
+                         intel_crtc->config.has_pch_encoder, false);
+       intel_enable_primary_plane(dev_priv, plane, pipe);
        intel_enable_planes(crtc);
        intel_crtc_update_cursor(crtc, true);
 
@@ -3319,34 +3499,74 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
        return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
 }
 
-static void hsw_enable_ips(struct intel_crtc *crtc)
+static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       int plane = intel_crtc->plane;
 
-       if (!crtc->config.ips_enabled)
-               return;
+       intel_enable_primary_plane(dev_priv, plane, pipe);
+       intel_enable_planes(crtc);
+       intel_crtc_update_cursor(crtc, true);
 
-       /* We can only enable IPS after we enable a plane and wait for a vblank.
-        * We guarantee that the plane is enabled by calling intel_enable_ips
-        * only after intel_enable_plane. And intel_enable_plane already waits
-        * for a vblank, so all we need to do here is to enable the IPS bit. */
-       assert_plane_enabled(dev_priv, crtc->plane);
-       I915_WRITE(IPS_CTL, IPS_ENABLE);
+       hsw_enable_ips(intel_crtc);
+
+       mutex_lock(&dev->struct_mutex);
+       intel_update_fbc(dev);
+       mutex_unlock(&dev->struct_mutex);
 }
 
-static void hsw_disable_ips(struct intel_crtc *crtc)
+static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
 {
-       struct drm_device *dev = crtc->base.dev;
+       struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       int pipe = intel_crtc->pipe;
+       int plane = intel_crtc->plane;
 
-       if (!crtc->config.ips_enabled)
-               return;
+       intel_crtc_wait_for_pending_flips(crtc);
+       drm_vblank_off(dev, pipe);
 
-       assert_plane_enabled(dev_priv, crtc->plane);
-       I915_WRITE(IPS_CTL, 0);
+       /* FBC must be disabled before disabling the plane on HSW. */
+       if (dev_priv->fbc.plane == plane)
+               intel_disable_fbc(dev);
 
-       /* We need to wait for a vblank before we can disable the plane. */
-       intel_wait_for_vblank(dev, crtc->pipe);
+       hsw_disable_ips(intel_crtc);
+
+       intel_crtc_update_cursor(crtc, false);
+       intel_disable_planes(crtc);
+       intel_disable_primary_plane(dev_priv, plane, pipe);
+}
+
+/*
+ * This implements the workaround described in the "notes" section of the mode
+ * set sequence documentation. When going from no pipes or single pipe to
+ * multiple pipes, and planes are enabled after the pipe, we need to wait at
+ * least 2 vblanks on the first pipe before enabling planes on the second pipe.
+ */
+static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct intel_crtc *crtc_it, *other_active_crtc = NULL;
+
+       /* We want to get the other_active_crtc only if there's only 1 other
+        * active crtc. */
+       list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) {
+               if (!crtc_it->active || crtc_it == crtc)
+                       continue;
+
+               if (other_active_crtc)
+                       return;
+
+               other_active_crtc = crtc_it;
+       }
+       if (!other_active_crtc)
+               return;
+
+       intel_wait_for_vblank(dev, other_active_crtc->pipe);
+       intel_wait_for_vblank(dev, other_active_crtc->pipe);
 }
 
 static void haswell_crtc_enable(struct drm_crtc *crtc)
@@ -3356,7 +3576,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
-       int plane = intel_crtc->plane;
 
        WARN_ON(!crtc->enabled);
 
@@ -3369,8 +3588,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
        if (intel_crtc->config.has_pch_encoder)
                intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
 
-       intel_update_watermarks(dev);
-
        if (intel_crtc->config.has_pch_encoder)
                dev_priv->display.fdi_link_train(crtc);
 
@@ -3391,23 +3608,22 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
        intel_ddi_set_pipe_settings(crtc);
        intel_ddi_enable_transcoder_func(crtc);
 
+       intel_update_watermarks(crtc);
        intel_enable_pipe(dev_priv, pipe,
-                         intel_crtc->config.has_pch_encoder);
-       intel_enable_plane(dev_priv, plane, pipe);
-       intel_enable_planes(crtc);
-       intel_crtc_update_cursor(crtc, true);
-
-       hsw_enable_ips(intel_crtc);
+                         intel_crtc->config.has_pch_encoder, false);
 
        if (intel_crtc->config.has_pch_encoder)
                lpt_pch_enable(crtc);
 
-       mutex_lock(&dev->struct_mutex);
-       intel_update_fbc(dev);
-       mutex_unlock(&dev->struct_mutex);
-
-       for_each_encoder_on_crtc(dev, crtc, encoder)
+       for_each_encoder_on_crtc(dev, crtc, encoder) {
                encoder->enable(encoder);
+               intel_opregion_notify_encoder(encoder, true);
+       }
+
+       /* If we change the relative order between pipe/planes enabling, we need
+        * to change the workaround. */
+       haswell_mode_set_planes_workaround(intel_crtc);
+       haswell_crtc_enable_planes(crtc);
 
        /*
         * There seems to be a race in PCH platform hw (at least on some
@@ -3460,7 +3676,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
 
        intel_crtc_update_cursor(crtc, false);
        intel_disable_planes(crtc);
-       intel_disable_plane(dev_priv, plane, pipe);
+       intel_disable_primary_plane(dev_priv, plane, pipe);
 
        if (intel_crtc->config.has_pch_encoder)
                intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
@@ -3501,7 +3717,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
        }
 
        intel_crtc->active = false;
-       intel_update_watermarks(dev);
+       intel_update_watermarks(crtc);
 
        mutex_lock(&dev->struct_mutex);
        intel_update_fbc(dev);
@@ -3515,27 +3731,17 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
-       int plane = intel_crtc->plane;
        enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
 
        if (!intel_crtc->active)
                return;
 
-       for_each_encoder_on_crtc(dev, crtc, encoder)
-               encoder->disable(encoder);
+       haswell_crtc_disable_planes(crtc);
 
-       intel_crtc_wait_for_pending_flips(crtc);
-       drm_vblank_off(dev, pipe);
-
-       /* FBC must be disabled before disabling the plane on HSW. */
-       if (dev_priv->fbc.plane == plane)
-               intel_disable_fbc(dev);
-
-       hsw_disable_ips(intel_crtc);
-
-       intel_crtc_update_cursor(crtc, false);
-       intel_disable_planes(crtc);
-       intel_disable_plane(dev_priv, plane, pipe);
+       for_each_encoder_on_crtc(dev, crtc, encoder) {
+               intel_opregion_notify_encoder(encoder, false);
+               encoder->disable(encoder);
+       }
 
        if (intel_crtc->config.has_pch_encoder)
                intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
@@ -3558,7 +3764,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
        }
 
        intel_crtc->active = false;
-       intel_update_watermarks(dev);
+       intel_update_watermarks(crtc);
 
        mutex_lock(&dev->struct_mutex);
        intel_update_fbc(dev);
@@ -3650,6 +3856,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
        struct intel_encoder *encoder;
        int pipe = intel_crtc->pipe;
        int plane = intel_crtc->plane;
+       bool is_dsi;
 
        WARN_ON(!crtc->enabled);
 
@@ -3657,13 +3864,15 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
                return;
 
        intel_crtc->active = true;
-       intel_update_watermarks(dev);
 
        for_each_encoder_on_crtc(dev, crtc, encoder)
                if (encoder->pre_pll_enable)
                        encoder->pre_pll_enable(encoder);
 
-       vlv_enable_pll(intel_crtc);
+       is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
+
+       if (!is_dsi)
+               vlv_enable_pll(intel_crtc);
 
        for_each_encoder_on_crtc(dev, crtc, encoder)
                if (encoder->pre_enable)
@@ -3673,8 +3882,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
 
        intel_crtc_load_lut(crtc);
 
-       intel_enable_pipe(dev_priv, pipe, false);
-       intel_enable_plane(dev_priv, plane, pipe);
+       intel_update_watermarks(crtc);
+       intel_enable_pipe(dev_priv, pipe, false, is_dsi);
+       intel_enable_primary_plane(dev_priv, plane, pipe);
        intel_enable_planes(crtc);
        intel_crtc_update_cursor(crtc, true);
 
@@ -3699,7 +3909,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
                return;
 
        intel_crtc->active = true;
-       intel_update_watermarks(dev);
 
        for_each_encoder_on_crtc(dev, crtc, encoder)
                if (encoder->pre_enable)
@@ -3711,8 +3920,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
 
        intel_crtc_load_lut(crtc);
 
-       intel_enable_pipe(dev_priv, pipe, false);
-       intel_enable_plane(dev_priv, plane, pipe);
+       intel_update_watermarks(crtc);
+       intel_enable_pipe(dev_priv, pipe, false, false);
+       intel_enable_primary_plane(dev_priv, plane, pipe);
        intel_enable_planes(crtc);
        /* The fixup needs to happen before cursor is enabled */
        if (IS_G4X(dev))
@@ -3768,7 +3978,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
        intel_crtc_dpms_overlay(intel_crtc, false);
        intel_crtc_update_cursor(crtc, false);
        intel_disable_planes(crtc);
-       intel_disable_plane(dev_priv, plane, pipe);
+       intel_disable_primary_plane(dev_priv, plane, pipe);
 
        intel_disable_pipe(dev_priv, pipe);
 
@@ -3778,11 +3988,15 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
                if (encoder->post_disable)
                        encoder->post_disable(encoder);
 
-       i9xx_disable_pll(dev_priv, pipe);
+       if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
+               vlv_disable_pll(dev_priv, pipe);
+       else if (!IS_VALLEYVIEW(dev))
+               i9xx_disable_pll(dev_priv, pipe);
 
        intel_crtc->active = false;
+       intel_update_watermarks(crtc);
+
        intel_update_fbc(dev);
-       intel_update_watermarks(dev);
 }
 
 static void i9xx_crtc_off(struct drm_crtc *crtc)
@@ -3856,6 +4070,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
        dev_priv->display.off(crtc);
 
        assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
+       assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
        assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
 
        if (crtc->fb) {
@@ -3941,8 +4156,6 @@ static void intel_connector_check_state(struct intel_connector *connector)
  * consider. */
 void intel_connector_dpms(struct drm_connector *connector, int mode)
 {
-       struct intel_encoder *encoder = intel_attached_encoder(connector);
-
        /* All the simple cases only support two dpms states. */
        if (mode != DRM_MODE_DPMS_ON)
                mode = DRM_MODE_DPMS_OFF;
@@ -3953,10 +4166,8 @@ void intel_connector_dpms(struct drm_connector *connector, int mode)
        connector->dpms = mode;
 
        /* Only need to change hw state when actually enabled */
-       if (encoder->base.crtc)
-               intel_encoder_dpms(encoder, mode);
-       else
-               WARN_ON(encoder->connectors_active != false);
+       if (connector->encoder)
+               intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
 
        intel_modeset_check_state(connector->dev);
 }
@@ -4049,8 +4260,7 @@ retry:
         */
        link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
 
-       fdi_dotclock = adjusted_mode->clock;
-       fdi_dotclock /= pipe_config->pixel_multiplier;
+       fdi_dotclock = adjusted_mode->crtc_clock;
 
        lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
                                           pipe_config->pipe_bpp);
@@ -4092,13 +4302,39 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
        struct drm_device *dev = crtc->base.dev;
        struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
 
-       if (HAS_PCH_SPLIT(dev)) {
-               /* FDI link clock is fixed at 2.7G */
-               if (pipe_config->requested_mode.clock * 3
-                   > IRONLAKE_FDI_FREQ * 4)
+       /* FIXME should check pixel clock limits on all platforms */
+       if (INTEL_INFO(dev)->gen < 4) {
+               struct drm_i915_private *dev_priv = dev->dev_private;
+               int clock_limit =
+                       dev_priv->display.get_display_clock_speed(dev);
+
+               /*
+                * Enable pixel doubling when the dot clock
+                * is > 90% of the (display) core speed.
+                *
+                * GDG double wide on either pipe,
+                * otherwise pipe A only.
+                */
+               if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
+                   adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
+                       clock_limit *= 2;
+                       pipe_config->double_wide = true;
+               }
+
+               if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
                        return -EINVAL;
        }
 
+       /*
+        * Pipe horizontal size must be even in:
+        * - DVO ganged mode
+        * - LVDS dual channel mode
+        * - Double wide pipe
+        */
+       if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
+            intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
+               pipe_config->pipe_src_w &= ~1;
+
        /* Cantiga+ cannot handle modes with a hsync front porch of 0.
         * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
         */
@@ -4262,28 +4498,6 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
                && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
 }
 
-static int vlv_get_refclk(struct drm_crtc *crtc)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int refclk = 27000; /* for DP & HDMI */
-
-       return 100000; /* only one validated so far */
-
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
-               refclk = 96000;
-       } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
-               if (intel_panel_use_ssc(dev_priv))
-                       refclk = 100000;
-               else
-                       refclk = 96000;
-       } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
-               refclk = 100000;
-       }
-
-       return refclk;
-}
-
 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
 {
        struct drm_device *dev = crtc->dev;
@@ -4291,7 +4505,7 @@ static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
        int refclk;
 
        if (IS_VALLEYVIEW(dev)) {
-               refclk = vlv_get_refclk(crtc);
+               refclk = 100000;
        } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
            intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
                refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
@@ -4349,7 +4563,8 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
        }
 }
 
-static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv)
+static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
+               pipe)
 {
        u32 reg_val;
 
@@ -4357,24 +4572,24 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv)
         * PLLB opamp always calibrates to max value of 0x3f, force enable it
         * and set it to a reasonable value instead.
         */
-       reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
+       reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
        reg_val &= 0xffffff00;
        reg_val |= 0x00000030;
-       vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
+       vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
 
-       reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
+       reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
        reg_val &= 0x8cffffff;
        reg_val = 0x8c000000;
-       vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
+       vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
 
-       reg_val = vlv_dpio_read(dev_priv, DPIO_IREF(1));
+       reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
        reg_val &= 0xffffff00;
-       vlv_dpio_write(dev_priv, DPIO_IREF(1), reg_val);
+       vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
 
-       reg_val = vlv_dpio_read(dev_priv, DPIO_CALIBRATION);
+       reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
        reg_val &= 0x00ffffff;
        reg_val |= 0xb0000000;
-       vlv_dpio_write(dev_priv, DPIO_CALIBRATION, reg_val);
+       vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
 }
 
 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
@@ -4440,18 +4655,18 @@ static void vlv_update_pll(struct intel_crtc *crtc)
 
        /* PLL B needs special handling */
        if (pipe)
-               vlv_pllb_recal_opamp(dev_priv);
+               vlv_pllb_recal_opamp(dev_priv, pipe);
 
        /* Set up Tx target for periodic Rcomp update */
-       vlv_dpio_write(dev_priv, DPIO_IREF_BCAST, 0x0100000f);
+       vlv_dpio_write(dev_priv, pipe, DPIO_IREF_BCAST, 0x0100000f);
 
        /* Disable target IRef on PLL */
-       reg_val = vlv_dpio_read(dev_priv, DPIO_IREF_CTL(pipe));
+       reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF_CTL(pipe));
        reg_val &= 0x00ffffff;
-       vlv_dpio_write(dev_priv, DPIO_IREF_CTL(pipe), reg_val);
+       vlv_dpio_write(dev_priv, pipe, DPIO_IREF_CTL(pipe), reg_val);
 
        /* Disable fast lock */
-       vlv_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x610);
+       vlv_dpio_write(dev_priv, pipe, DPIO_FASTCLK_DISABLE, 0x610);
 
        /* Set idtafcrecal before PLL is enabled */
        mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
@@ -4465,55 +4680,55 @@ static void vlv_update_pll(struct intel_crtc *crtc)
         * Note: don't use the DAC post divider as it seems unstable.
         */
        mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
-       vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
+       vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
 
        mdiv |= DPIO_ENABLE_CALIBRATION;
-       vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
+       vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
 
        /* Set HBR and RBR LPF coefficients */
        if (crtc->config.port_clock == 162000 ||
            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
-               vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
+               vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
                                 0x009f0003);
        else
-               vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
+               vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
                                 0x00d0000f);
 
        if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
                /* Use SSC source */
                if (!pipe)
-                       vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+                       vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
                                         0x0df40000);
                else
-                       vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+                       vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
                                         0x0df70000);
        } else { /* HDMI or VGA */
                /* Use bend source */
                if (!pipe)
-                       vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+                       vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
                                         0x0df70000);
                else
-                       vlv_dpio_write(dev_priv, DPIO_REFSFR(pipe),
+                       vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
                                         0x0df40000);
        }
 
-       coreclk = vlv_dpio_read(dev_priv, DPIO_CORE_CLK(pipe));
+       coreclk = vlv_dpio_read(dev_priv, pipe, DPIO_CORE_CLK(pipe));
        coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
        if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
            intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
                coreclk |= 0x01000000;
-       vlv_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), coreclk);
+       vlv_dpio_write(dev_priv, pipe, DPIO_CORE_CLK(pipe), coreclk);
 
-       vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
+       vlv_dpio_write(dev_priv, pipe, DPIO_PLL_CML(pipe), 0x87871000);
 
        /* Enable DPIO clock input */
        dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
                DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
-       if (pipe)
+       /* We should never disable this, set it here for state tracking */
+       if (pipe == PIPE_B)
                dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
        dpll |= DPLL_VCO_ENABLE;
        crtc->config.dpll_hw_state.dpll = dpll;
 
@@ -4651,7 +4866,6 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
        enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
        struct drm_display_mode *adjusted_mode =
                &intel_crtc->config.adjusted_mode;
-       struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
        uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
 
        /* We need to be careful not to changed the adjusted mode, for otherwise
@@ -4704,7 +4918,8 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
         * always be the user's requested size.
         */
        I915_WRITE(PIPESRC(pipe),
-                  ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
+                  ((intel_crtc->config.pipe_src_w - 1) << 16) |
+                  (intel_crtc->config.pipe_src_h - 1));
 }
 
 static void intel_get_pipe_timings(struct intel_crtc *crtc,
@@ -4742,8 +4957,11 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
        }
 
        tmp = I915_READ(PIPESRC(crtc->pipe));
-       pipe_config->requested_mode.vdisplay = (tmp & 0xffff) + 1;
-       pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
+       pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
+       pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
+
+       pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
+       pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
 }
 
 static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
@@ -4763,7 +4981,7 @@ static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
 
        crtc->mode.flags = pipe_config->adjusted_mode.flags;
 
-       crtc->mode.clock = pipe_config->adjusted_mode.clock;
+       crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock;
        crtc->mode.flags |= pipe_config->adjusted_mode.flags;
 }
 
@@ -4779,17 +4997,8 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
            I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
                pipeconf |= PIPECONF_ENABLE;
 
-       if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) {
-               /* Enable pixel doubling when the dot clock is > 90% of the (display)
-                * core speed.
-                *
-                * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
-                * pipe == 0 check?
-                */
-               if (intel_crtc->config.requested_mode.clock >
-                   dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
-                       pipeconf |= PIPECONF_DOUBLE_WIDE;
-       }
+       if (intel_crtc->config.double_wide)
+               pipeconf |= PIPECONF_DOUBLE_WIDE;
 
        /* only g4x and later have fancy bpc/dither controls */
        if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
@@ -4843,14 +5052,13 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
        int pipe = intel_crtc->pipe;
        int plane = intel_crtc->plane;
        int refclk, num_connectors = 0;
        intel_clock_t clock, reduced_clock;
        u32 dspcntr;
        bool ok, has_reduced_clock = false;
-       bool is_lvds = false;
+       bool is_lvds = false, is_dsi = false;
        struct intel_encoder *encoder;
        const intel_limit_t *limit;
        int ret;
@@ -4860,42 +5068,49 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                case INTEL_OUTPUT_LVDS:
                        is_lvds = true;
                        break;
+               case INTEL_OUTPUT_DSI:
+                       is_dsi = true;
+                       break;
                }
 
                num_connectors++;
        }
 
-       refclk = i9xx_get_refclk(crtc, num_connectors);
+       if (is_dsi)
+               goto skip_dpll;
 
-       /*
-        * Returns a set of divisors for the desired target clock with the given
-        * refclk, or FALSE.  The returned values represent the clock equation:
-        * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
-        */
-       limit = intel_limit(crtc, refclk);
-       ok = dev_priv->display.find_dpll(limit, crtc,
-                                        intel_crtc->config.port_clock,
-                                        refclk, NULL, &clock);
-       if (!ok && !intel_crtc->config.clock_set) {
-               DRM_ERROR("Couldn't find PLL settings for mode!\n");
-               return -EINVAL;
-       }
+       if (!intel_crtc->config.clock_set) {
+               refclk = i9xx_get_refclk(crtc, num_connectors);
 
-       if (is_lvds && dev_priv->lvds_downclock_avail) {
                /*
-                * Ensure we match the reduced clock's P to the target clock.
-                * If the clocks don't match, we can't switch the display clock
-                * by using the FP0/FP1. In such case we will disable the LVDS
-                * downclock feature.
-               */
-               has_reduced_clock =
-                       dev_priv->display.find_dpll(limit, crtc,
-                                                   dev_priv->lvds_downclock,
-                                                   refclk, &clock,
-                                                   &reduced_clock);
-       }
-       /* Compat-code for transition, will disappear. */
-       if (!intel_crtc->config.clock_set) {
+                * Returns a set of divisors for the desired target clock with
+                * the given refclk, or FALSE.  The returned values represent
+                * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
+                * 2) / p1 / p2.
+                */
+               limit = intel_limit(crtc, refclk);
+               ok = dev_priv->display.find_dpll(limit, crtc,
+                                                intel_crtc->config.port_clock,
+                                                refclk, NULL, &clock);
+               if (!ok) {
+                       DRM_ERROR("Couldn't find PLL settings for mode!\n");
+                       return -EINVAL;
+               }
+
+               if (is_lvds && dev_priv->lvds_downclock_avail) {
+                       /*
+                        * Ensure we match the reduced clock's P to the target
+                        * clock.  If the clocks don't match, we can't switch
+                        * the display clock by using the FP0/FP1. In such case
+                        * we will disable the LVDS downclock feature.
+                        */
+                       has_reduced_clock =
+                               dev_priv->display.find_dpll(limit, crtc,
+                                                           dev_priv->lvds_downclock,
+                                                           refclk, &clock,
+                                                           &reduced_clock);
+               }
+               /* Compat-code for transition, will disappear. */
                intel_crtc->config.dpll.n = clock.n;
                intel_crtc->config.dpll.m1 = clock.m1;
                intel_crtc->config.dpll.m2 = clock.m2;
@@ -4903,17 +5118,19 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                intel_crtc->config.dpll.p2 = clock.p2;
        }
 
-       if (IS_GEN2(dev))
+       if (IS_GEN2(dev)) {
                i8xx_update_pll(intel_crtc,
                                has_reduced_clock ? &reduced_clock : NULL,
                                num_connectors);
-       else if (IS_VALLEYVIEW(dev))
+       } else if (IS_VALLEYVIEW(dev)) {
                vlv_update_pll(intel_crtc);
-       else
+       } else {
                i9xx_update_pll(intel_crtc,
                                has_reduced_clock ? &reduced_clock : NULL,
                                 num_connectors);
+       }
 
+skip_dpll:
        /* Set up the display plane register */
        dspcntr = DISPPLANE_GAMMA_ENABLE;
 
@@ -4930,8 +5147,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
         * which should always be the user's requested size.
         */
        I915_WRITE(DSPSIZE(plane),
-                  ((mode->vdisplay - 1) << 16) |
-                  (mode->hdisplay - 1));
+                  ((intel_crtc->config.pipe_src_h - 1) << 16) |
+                  (intel_crtc->config.pipe_src_w - 1));
        I915_WRITE(DSPPOS(plane), 0);
 
        i9xx_set_pipeconf(intel_crtc);
@@ -4941,8 +5158,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
 
        ret = intel_pipe_set_base(crtc, x, y, fb);
 
-       intel_update_watermarks(dev);
-
        return ret;
 }
 
@@ -4973,6 +5188,32 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
                        I915_READ(LVDS) & LVDS_BORDER_ENABLE;
 }
 
+static void vlv_crtc_clock_get(struct intel_crtc *crtc,
+                              struct intel_crtc_config *pipe_config)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int pipe = pipe_config->cpu_transcoder;
+       intel_clock_t clock;
+       u32 mdiv;
+       int refclk = 100000;
+
+       mutex_lock(&dev_priv->dpio_lock);
+       mdiv = vlv_dpio_read(dev_priv, pipe, DPIO_DIV(pipe));
+       mutex_unlock(&dev_priv->dpio_lock);
+
+       clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
+       clock.m2 = mdiv & DPIO_M2DIV_MASK;
+       clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
+       clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
+       clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
+
+       vlv_clock(refclk, &clock);
+
+       /* clock.dot is the fast clock */
+       pipe_config->port_clock = clock.dot / 5;
+}
+
 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
                                 struct intel_crtc_config *pipe_config)
 {
@@ -4987,6 +5228,25 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
        if (!(tmp & PIPECONF_ENABLE))
                return false;
 
+       if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
+               switch (tmp & PIPECONF_BPC_MASK) {
+               case PIPECONF_6BPC:
+                       pipe_config->pipe_bpp = 18;
+                       break;
+               case PIPECONF_8BPC:
+                       pipe_config->pipe_bpp = 24;
+                       break;
+               case PIPECONF_10BPC:
+                       pipe_config->pipe_bpp = 30;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       if (INTEL_INFO(dev)->gen < 4)
+               pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
+
        intel_get_pipe_timings(crtc, pipe_config);
 
        i9xx_get_pfit_config(crtc, pipe_config);
@@ -5019,6 +5279,11 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
                                                     DPLL_PORTB_READY_MASK);
        }
 
+       if (IS_VALLEYVIEW(dev))
+               vlv_crtc_clock_get(crtc, pipe_config);
+       else
+               i9xx_crtc_clock_get(crtc, pipe_config);
+
        return true;
 }
 
@@ -5803,11 +6068,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        else
                intel_crtc->lowfreq_avail = false;
 
-       if (intel_crtc->config.has_pch_encoder) {
-               pll = intel_crtc_to_shared_dpll(intel_crtc);
-
-       }
-
        intel_set_pipe_timings(intel_crtc);
 
        if (intel_crtc->config.has_pch_encoder) {
@@ -5826,25 +6086,67 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
 
        ret = intel_pipe_set_base(crtc, x, y, fb);
 
-       intel_update_watermarks(dev);
+       return ret;
+}
+
+static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
+                                        struct intel_link_m_n *m_n)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum pipe pipe = crtc->pipe;
+
+       m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
+       m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
+       m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
+               & ~TU_SIZE_MASK;
+       m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
+       m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
+                   & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+}
+
+static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
+                                        enum transcoder transcoder,
+                                        struct intel_link_m_n *m_n)
+{
+       struct drm_device *dev = crtc->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum pipe pipe = crtc->pipe;
+
+       if (INTEL_INFO(dev)->gen >= 5) {
+               m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
+               m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
+               m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
+                       & ~TU_SIZE_MASK;
+               m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
+               m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
+                           & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+       } else {
+               m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
+               m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
+               m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
+                       & ~TU_SIZE_MASK;
+               m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
+               m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
+                           & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+       }
+}
 
-       return ret;
+void intel_dp_get_m_n(struct intel_crtc *crtc,
+                     struct intel_crtc_config *pipe_config)
+{
+       if (crtc->config.has_pch_encoder)
+               intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
+       else
+               intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
+                                            &pipe_config->dp_m_n);
 }
 
 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
                                        struct intel_crtc_config *pipe_config)
 {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       enum transcoder transcoder = pipe_config->cpu_transcoder;
-
-       pipe_config->fdi_m_n.link_m = I915_READ(PIPE_LINK_M1(transcoder));
-       pipe_config->fdi_m_n.link_n = I915_READ(PIPE_LINK_N1(transcoder));
-       pipe_config->fdi_m_n.gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
-                                       & ~TU_SIZE_MASK;
-       pipe_config->fdi_m_n.gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
-       pipe_config->fdi_m_n.tu = ((I915_READ(PIPE_DATA_M1(transcoder))
-                                  & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+       intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
+                                    &pipe_config->fdi_m_n);
 }
 
 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
@@ -5885,6 +6187,23 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
        if (!(tmp & PIPECONF_ENABLE))
                return false;
 
+       switch (tmp & PIPECONF_BPC_MASK) {
+       case PIPECONF_6BPC:
+               pipe_config->pipe_bpp = 18;
+               break;
+       case PIPECONF_8BPC:
+               pipe_config->pipe_bpp = 24;
+               break;
+       case PIPECONF_10BPC:
+               pipe_config->pipe_bpp = 30;
+               break;
+       case PIPECONF_12BPC:
+               pipe_config->pipe_bpp = 36;
+               break;
+       default:
+               break;
+       }
+
        if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
                struct intel_shared_dpll *pll;
 
@@ -5916,6 +6235,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
                pipe_config->pixel_multiplier =
                        ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
                         >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
+
+               ironlake_pch_clock_get(crtc, pipe_config);
        } else {
                pipe_config->pixel_multiplier = 1;
        }
@@ -5972,8 +6293,8 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
  * register. Callers should take care of disabling all the display engine
  * functions, doing the mode unset, fixing interrupts, etc.
  */
-void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
-                      bool switch_to_fclk, bool allow_power_down)
+static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+                             bool switch_to_fclk, bool allow_power_down)
 {
        uint32_t val;
 
@@ -6001,7 +6322,10 @@ void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
 
        val = I915_READ(D_COMP);
        val |= D_COMP_COMP_DISABLE;
-       I915_WRITE(D_COMP, val);
+       mutex_lock(&dev_priv->rps.hw_lock);
+       if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
+               DRM_ERROR("Failed to disable D_COMP\n");
+       mutex_unlock(&dev_priv->rps.hw_lock);
        POSTING_READ(D_COMP);
        ndelay(100);
 
@@ -6020,7 +6344,7 @@ void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
  * source.
  */
-void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
 {
        uint32_t val;
 
@@ -6043,7 +6367,10 @@ void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
        val = I915_READ(D_COMP);
        val |= D_COMP_COMP_FORCE;
        val &= ~D_COMP_COMP_DISABLE;
-       I915_WRITE(D_COMP, val);
+       mutex_lock(&dev_priv->rps.hw_lock);
+       if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
+               DRM_ERROR("Failed to enable D_COMP\n");
+       mutex_unlock(&dev_priv->rps.hw_lock);
        POSTING_READ(D_COMP);
 
        val = I915_READ(LCPLL_CTL);
@@ -6226,22 +6553,62 @@ static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
        }
 }
 
-static void haswell_modeset_global_resources(struct drm_device *dev)
+#define for_each_power_domain(domain, mask)                            \
+       for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)     \
+               if ((1 << (domain)) & (mask))
+
+static unsigned long get_pipe_power_domains(struct drm_device *dev,
+                                           enum pipe pipe, bool pfit_enabled)
 {
-       bool enable = false;
+       unsigned long mask;
+       enum transcoder transcoder;
+
+       transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe);
+
+       mask = BIT(POWER_DOMAIN_PIPE(pipe));
+       mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
+       if (pfit_enabled)
+               mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
+
+       return mask;
+}
+
+static void modeset_update_power_wells(struct drm_device *dev)
+{
+       unsigned long pipe_domains[I915_MAX_PIPES] = { 0, };
        struct intel_crtc *crtc;
 
+       /*
+        * First get all needed power domains, then put all unneeded, to avoid
+        * any unnecessary toggling of the power wells.
+        */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+               enum intel_display_power_domain domain;
+
                if (!crtc->base.enabled)
                        continue;
 
-               if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled ||
-                   crtc->config.cpu_transcoder != TRANSCODER_EDP)
-                       enable = true;
+               pipe_domains[crtc->pipe] = get_pipe_power_domains(dev,
+                                               crtc->pipe,
+                                               crtc->config.pch_pfit.enabled);
+
+               for_each_power_domain(domain, pipe_domains[crtc->pipe])
+                       intel_display_power_get(dev, domain);
        }
 
-       intel_set_power_well(dev, enable);
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
+               enum intel_display_power_domain domain;
+
+               for_each_power_domain(domain, crtc->enabled_power_domains)
+                       intel_display_power_put(dev, domain);
+
+               crtc->enabled_power_domains = pipe_domains[crtc->pipe];
+       }
+}
 
+static void haswell_modeset_global_resources(struct drm_device *dev)
+{
+       modeset_update_power_wells(dev);
        hsw_update_package_c8(dev);
 }
 
@@ -6280,8 +6647,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
 
        ret = intel_pipe_set_base(crtc, x, y, fb);
 
-       intel_update_watermarks(dev);
-
        return ret;
 }
 
@@ -6389,6 +6754,44 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
        return 0;
 }
 
+static struct {
+       int clock;
+       u32 config;
+} hdmi_audio_clock[] = {
+       { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
+       { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
+       { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
+       { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
+       { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
+       { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
+       { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
+       { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
+       { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
+       { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
+};
+
+/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
+static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
+               if (mode->clock == hdmi_audio_clock[i].clock)
+                       break;
+       }
+
+       if (i == ARRAY_SIZE(hdmi_audio_clock)) {
+               DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
+               i = 1;
+       }
+
+       DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
+                     hdmi_audio_clock[i].clock,
+                     hdmi_audio_clock[i].config);
+
+       return hdmi_audio_clock[i].config;
+}
+
 static bool intel_eld_uptodate(struct drm_connector *connector,
                               int reg_eldv, uint32_t bits_eldv,
                               int reg_elda, uint32_t bits_elda,
@@ -6419,7 +6822,8 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
 }
 
 static void g4x_write_eld(struct drm_connector *connector,
-                         struct drm_crtc *crtc)
+                         struct drm_crtc *crtc,
+                         struct drm_display_mode *mode)
 {
        struct drm_i915_private *dev_priv = connector->dev->dev_private;
        uint8_t *eld = connector->eld;
@@ -6459,7 +6863,8 @@ static void g4x_write_eld(struct drm_connector *connector,
 }
 
 static void haswell_write_eld(struct drm_connector *connector,
-                                    struct drm_crtc *crtc)
+                             struct drm_crtc *crtc,
+                             struct drm_display_mode *mode)
 {
        struct drm_i915_private *dev_priv = connector->dev->dev_private;
        uint8_t *eld = connector->eld;
@@ -6512,8 +6917,9 @@ static void haswell_write_eld(struct drm_connector *connector,
                DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
                eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
                I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
-       } else
-               I915_WRITE(aud_config, 0);
+       } else {
+               I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
+       }
 
        if (intel_eld_uptodate(connector,
                               aud_cntrl_st2, eldv,
@@ -6546,7 +6952,8 @@ static void haswell_write_eld(struct drm_connector *connector,
 }
 
 static void ironlake_write_eld(struct drm_connector *connector,
-                                    struct drm_crtc *crtc)
+                              struct drm_crtc *crtc,
+                              struct drm_display_mode *mode)
 {
        struct drm_i915_private *dev_priv = connector->dev->dev_private;
        uint8_t *eld = connector->eld;
@@ -6590,8 +6997,9 @@ static void ironlake_write_eld(struct drm_connector *connector,
                DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
                eld[5] |= (1 << 2);     /* Conn_Type, 0x1 = DisplayPort */
                I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
-       } else
-               I915_WRITE(aud_config, 0);
+       } else {
+               I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
+       }
 
        if (intel_eld_uptodate(connector,
                               aud_cntrl_st2, eldv,
@@ -6641,50 +7049,7 @@ void intel_write_eld(struct drm_encoder *encoder,
        connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
 
        if (dev_priv->display.write_eld)
-               dev_priv->display.write_eld(connector, crtc);
-}
-
-/** Loads the palette/gamma unit for the CRTC with the prepared values */
-void intel_crtc_load_lut(struct drm_crtc *crtc)
-{
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum pipe pipe = intel_crtc->pipe;
-       int palreg = PALETTE(pipe);
-       int i;
-       bool reenable_ips = false;
-
-       /* The clocks have to be on to load the palette. */
-       if (!crtc->enabled || !intel_crtc->active)
-               return;
-
-       if (!HAS_PCH_SPLIT(dev_priv->dev))
-               assert_pll_enabled(dev_priv, pipe);
-
-       /* use legacy palette for Ironlake */
-       if (HAS_PCH_SPLIT(dev))
-               palreg = LGC_PALETTE(pipe);
-
-       /* Workaround : Do not read or write the pipe palette/gamma data while
-        * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
-        */
-       if (intel_crtc->config.ips_enabled &&
-           ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
-            GAMMA_MODE_MODE_SPLIT)) {
-               hsw_disable_ips(intel_crtc);
-               reenable_ips = true;
-       }
-
-       for (i = 0; i < 256; i++) {
-               I915_WRITE(palreg + 4 * i,
-                          (intel_crtc->lut_r[i] << 16) |
-                          (intel_crtc->lut_g[i] << 8) |
-                          intel_crtc->lut_b[i]);
-       }
-
-       if (reenable_ips)
-               hsw_enable_ips(intel_crtc);
+               dev_priv->display.write_eld(connector, crtc, mode);
 }
 
 static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
@@ -6782,23 +7147,20 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
        int pipe = intel_crtc->pipe;
        int x = intel_crtc->cursor_x;
        int y = intel_crtc->cursor_y;
-       u32 base, pos;
+       u32 base = 0, pos = 0;
        bool visible;
 
-       pos = 0;
-
-       if (on && crtc->enabled && crtc->fb) {
+       if (on)
                base = intel_crtc->cursor_addr;
-               if (x > (int) crtc->fb->width)
-                       base = 0;
 
-               if (y > (int) crtc->fb->height)
-                       base = 0;
-       } else
+       if (x >= intel_crtc->config.pipe_src_w)
+               base = 0;
+
+       if (y >= intel_crtc->config.pipe_src_h)
                base = 0;
 
        if (x < 0) {
-               if (x + intel_crtc->cursor_width < 0)
+               if (x + intel_crtc->cursor_width <= 0)
                        base = 0;
 
                pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
@@ -6807,7 +7169,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
        pos |= x << CURSOR_X_SHIFT;
 
        if (y < 0) {
-               if (y + intel_crtc->cursor_height < 0)
+               if (y + intel_crtc->cursor_height <= 0)
                        base = 0;
 
                pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
@@ -6959,27 +7321,6 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
        return 0;
 }
 
-/** Sets the color ramps on behalf of RandR */
-void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
-                                u16 blue, int regno)
-{
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-       intel_crtc->lut_r[regno] = red >> 8;
-       intel_crtc->lut_g[regno] = green >> 8;
-       intel_crtc->lut_b[regno] = blue >> 8;
-}
-
-void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
-                            u16 *blue, int regno)
-{
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
-       *red = intel_crtc->lut_r[regno] << 8;
-       *green = intel_crtc->lut_g[regno] << 8;
-       *blue = intel_crtc->lut_b[regno] << 8;
-}
-
 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
                                 u16 *blue, uint32_t start, uint32_t size)
 {
@@ -7015,14 +7356,21 @@ intel_framebuffer_create(struct drm_device *dev,
                return ERR_PTR(-ENOMEM);
        }
 
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               goto err;
+
        ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
-       if (ret) {
-               drm_gem_object_unreference_unlocked(&obj->base);
-               kfree(intel_fb);
-               return ERR_PTR(ret);
-       }
+       mutex_unlock(&dev->struct_mutex);
+       if (ret)
+               goto err;
 
        return &intel_fb->base;
+err:
+       drm_gem_object_unreference_unlocked(&obj->base);
+       kfree(intel_fb);
+
+       return ERR_PTR(ret);
 }
 
 static u32
@@ -7065,6 +7413,7 @@ static struct drm_framebuffer *
 mode_fits_in_fbdev(struct drm_device *dev,
                   struct drm_display_mode *mode)
 {
+#ifdef CONFIG_DRM_I915_FBDEV
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
        struct drm_framebuffer *fb;
@@ -7085,6 +7434,9 @@ mode_fits_in_fbdev(struct drm_device *dev,
                return NULL;
 
        return fb;
+#else
+       return NULL;
+#endif
 }
 
 bool intel_get_load_detect_pipe(struct drm_connector *connector,
@@ -7228,6 +7580,22 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
        mutex_unlock(&crtc->mutex);
 }
 
+static int i9xx_pll_refclk(struct drm_device *dev,
+                          const struct intel_crtc_config *pipe_config)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 dpll = pipe_config->dpll_hw_state.dpll;
+
+       if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
+               return dev_priv->vbt.lvds_ssc_freq * 1000;
+       else if (HAS_PCH_SPLIT(dev))
+               return 120000;
+       else if (!IS_GEN2(dev))
+               return 96000;
+       else
+               return 48000;
+}
+
 /* Returns the clock of the currently programmed mode of the given pipe. */
 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
                                struct intel_crtc_config *pipe_config)
@@ -7235,14 +7603,15 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int pipe = pipe_config->cpu_transcoder;
-       u32 dpll = I915_READ(DPLL(pipe));
+       u32 dpll = pipe_config->dpll_hw_state.dpll;
        u32 fp;
        intel_clock_t clock;
+       int refclk = i9xx_pll_refclk(dev, pipe_config);
 
        if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
-               fp = I915_READ(FP0(pipe));
+               fp = pipe_config->dpll_hw_state.fp0;
        else
-               fp = I915_READ(FP1(pipe));
+               fp = pipe_config->dpll_hw_state.fp1;
 
        clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
        if (IS_PINEVIEW(dev)) {
@@ -7273,14 +7642,13 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
                default:
                        DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
                                  "mode\n", (int)(dpll & DPLL_MODE_MASK));
-                       pipe_config->adjusted_mode.clock = 0;
                        return;
                }
 
                if (IS_PINEVIEW(dev))
-                       pineview_clock(96000, &clock);
+                       pineview_clock(refclk, &clock);
                else
-                       i9xx_clock(96000, &clock);
+                       i9xx_clock(refclk, &clock);
        } else {
                bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
 
@@ -7288,13 +7656,6 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
                        clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
                                       DPLL_FPA01_P1_POST_DIV_SHIFT);
                        clock.p2 = 14;
-
-                       if ((dpll & PLL_REF_INPUT_MASK) ==
-                           PLLB_REF_INPUT_SPREADSPECTRUMIN) {
-                               /* XXX: might not be 66MHz */
-                               i9xx_clock(66000, &clock);
-                       } else
-                               i9xx_clock(48000, &clock);
                } else {
                        if (dpll & PLL_P1_DIVIDE_BY_TWO)
                                clock.p1 = 2;
@@ -7306,59 +7667,55 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
                                clock.p2 = 4;
                        else
                                clock.p2 = 2;
-
-                       i9xx_clock(48000, &clock);
                }
+
+               i9xx_clock(refclk, &clock);
        }
 
-       pipe_config->adjusted_mode.clock = clock.dot;
+       /*
+        * This value includes pixel_multiplier. We will use
+        * port_clock to compute adjusted_mode.crtc_clock in the
+        * encoder's get_config() function.
+        */
+       pipe_config->port_clock = clock.dot;
 }
 
-static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
-                                   struct intel_crtc_config *pipe_config)
+int intel_dotclock_calculate(int link_freq,
+                            const struct intel_link_m_n *m_n)
 {
-       struct drm_device *dev = crtc->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
-       int link_freq, repeat;
-       u64 clock;
-       u32 link_m, link_n;
-
-       repeat = pipe_config->pixel_multiplier;
-
        /*
         * The calculation for the data clock is:
-        * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp
+        * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
         * But we want to avoid losing precison if possible, so:
-        * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp))
+        * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
         *
         * and the link clock is simpler:
-        * link_clock = (m * link_clock * repeat) / n
+        * link_clock = (m * link_clock) / n
         */
 
-       /*
-        * We need to get the FDI or DP link clock here to derive
-        * the M/N dividers.
-        *
-        * For FDI, we read it from the BIOS or use a fixed 2.7GHz.
-        * For DP, it's either 1.62GHz or 2.7GHz.
-        * We do our calculations in 10*MHz since we don't need much precison.
-        */
-       if (pipe_config->has_pch_encoder)
-               link_freq = intel_fdi_link_freq(dev) * 10000;
-       else
-               link_freq = pipe_config->port_clock;
+       if (!m_n->link_n)
+               return 0;
 
-       link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder));
-       link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder));
+       return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
+}
 
-       if (!link_m || !link_n)
-               return;
+static void ironlake_pch_clock_get(struct intel_crtc *crtc,
+                                  struct intel_crtc_config *pipe_config)
+{
+       struct drm_device *dev = crtc->base.dev;
 
-       clock = ((u64)link_m * (u64)link_freq * (u64)repeat);
-       do_div(clock, link_n);
+       /* read out port_clock from the DPLL */
+       i9xx_crtc_clock_get(crtc, pipe_config);
 
-       pipe_config->adjusted_mode.clock = clock;
+       /*
+        * This value does not include pixel_multiplier.
+        * We will check that port_clock and adjusted_mode.crtc_clock
+        * agree once we know their relationship in the encoder's
+        * get_config() function.
+        */
+       pipe_config->adjusted_mode.crtc_clock =
+               intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
+                                        &pipe_config->fdi_m_n);
 }
 
 /** Returns the currently programmed mode of the given pipe. */
@@ -7374,6 +7731,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
        int hsync = I915_READ(HSYNC(cpu_transcoder));
        int vtot = I915_READ(VTOTAL(cpu_transcoder));
        int vsync = I915_READ(VSYNC(cpu_transcoder));
+       enum pipe pipe = intel_crtc->pipe;
 
        mode = kzalloc(sizeof(*mode), GFP_KERNEL);
        if (!mode)
@@ -7386,11 +7744,14 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
         * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
         * to use a real value here instead.
         */
-       pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
+       pipe_config.cpu_transcoder = (enum transcoder) pipe;
        pipe_config.pixel_multiplier = 1;
+       pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
+       pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
+       pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
        i9xx_crtc_clock_get(intel_crtc, &pipe_config);
 
-       mode->clock = pipe_config.adjusted_mode.clock;
+       mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
        mode->hdisplay = (htot & 0xffff) + 1;
        mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
        mode->hsync_start = (hsync & 0xffff) + 1;
@@ -7496,6 +7857,9 @@ void intel_mark_idle(struct drm_device *dev)
 
                intel_decrease_pllclock(crtc);
        }
+
+       if (dev_priv->info->gen >= 6)
+               gen6_rps_idle(dev->dev_private);
 }
 
 void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
@@ -7684,7 +8048,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, 0); /* aux display base address, unused */
 
        intel_mark_page_flip_active(intel_crtc);
-       intel_ring_advance(ring);
+       __intel_ring_advance(ring);
        return 0;
 
 err_unpin:
@@ -7726,7 +8090,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, MI_NOOP);
 
        intel_mark_page_flip_active(intel_crtc);
-       intel_ring_advance(ring);
+       __intel_ring_advance(ring);
        return 0;
 
 err_unpin:
@@ -7775,7 +8139,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, pf | pipesrc);
 
        intel_mark_page_flip_active(intel_crtc);
-       intel_ring_advance(ring);
+       __intel_ring_advance(ring);
        return 0;
 
 err_unpin:
@@ -7820,7 +8184,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, pf | pipesrc);
 
        intel_mark_page_flip_active(intel_crtc);
-       intel_ring_advance(ring);
+       __intel_ring_advance(ring);
        return 0;
 
 err_unpin:
@@ -7899,7 +8263,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
        intel_ring_emit(ring, (MI_NOOP));
 
        intel_mark_page_flip_active(intel_crtc);
-       intel_ring_advance(ring);
+       __intel_ring_advance(ring);
        return 0;
 
 err_unpin:
@@ -7944,7 +8308,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
             fb->pitches[0] != crtc->fb->pitches[0]))
                return -EINVAL;
 
-       work = kzalloc(sizeof *work, GFP_KERNEL);
+       work = kzalloc(sizeof(*work), GFP_KERNEL);
        if (work == NULL)
                return -ENOMEM;
 
@@ -8179,6 +8543,17 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
        return bpp;
 }
 
+static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
+{
+       DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
+                       "type: 0x%x flags: 0x%x\n",
+               mode->crtc_clock,
+               mode->crtc_hdisplay, mode->crtc_hsync_start,
+               mode->crtc_hsync_end, mode->crtc_htotal,
+               mode->crtc_vdisplay, mode->crtc_vsync_start,
+               mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
+}
+
 static void intel_dump_pipe_config(struct intel_crtc *crtc,
                                   struct intel_crtc_config *pipe_config,
                                   const char *context)
@@ -8195,10 +8570,19 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
                      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
                      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
                      pipe_config->fdi_m_n.tu);
+       DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+                     pipe_config->has_dp_encoder,
+                     pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
+                     pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
+                     pipe_config->dp_m_n.tu);
        DRM_DEBUG_KMS("requested mode:\n");
        drm_mode_debug_printmodeline(&pipe_config->requested_mode);
        DRM_DEBUG_KMS("adjusted mode:\n");
        drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
+       intel_dump_crtc_timings(&pipe_config->adjusted_mode);
+       DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
+       DRM_DEBUG_KMS("pipe src size: %dx%d\n",
+                     pipe_config->pipe_src_w, pipe_config->pipe_src_h);
        DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
                      pipe_config->gmch_pfit.control,
                      pipe_config->gmch_pfit.pgm_ratios,
@@ -8208,6 +8592,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
                      pipe_config->pch_pfit.size,
                      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
        DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
+       DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
 }
 
 static bool check_encoder_cloning(struct drm_crtc *crtc)
@@ -8251,6 +8636,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
 
        drm_mode_copy(&pipe_config->adjusted_mode, mode);
        drm_mode_copy(&pipe_config->requested_mode, mode);
+
        pipe_config->cpu_transcoder =
                (enum transcoder) to_intel_crtc(crtc)->pipe;
        pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -8277,13 +8663,25 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
        if (plane_bpp < 0)
                goto fail;
 
+       /*
+        * Determine the real pipe dimensions. Note that stereo modes can
+        * increase the actual pipe size due to the frame doubling and
+        * insertion of additional space for blanks between the frame. This
+        * is stored in the crtc timings. We use the requested mode to do this
+        * computation to clearly distinguish it from the adjusted mode, which
+        * can be changed by the connectors in the below retry loop.
+        */
+       drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
+       pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
+       pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
+
 encoder_retry:
        /* Ensure the port clock defaults are reset when retrying. */
        pipe_config->port_clock = 0;
        pipe_config->pixel_multiplier = 1;
 
        /* Fill in default crtc timings, allow encoders to overwrite them. */
-       drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0);
+       drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
 
        /* Pass our mode to the connectors and the CRTC to give them a chance to
         * adjust it according to limitations or connector properties, and also
@@ -8304,7 +8702,8 @@ encoder_retry:
        /* Set default port clock if not overwritten by the encoder. Needs to be
         * done afterwards in case the encoder adjusts the mode. */
        if (!pipe_config->port_clock)
-               pipe_config->port_clock = pipe_config->adjusted_mode.clock;
+               pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
+                       * pipe_config->pixel_multiplier;
 
        ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
        if (ret < 0) {
@@ -8491,13 +8890,9 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
 
 }
 
-static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur,
-                                   struct intel_crtc_config *new)
+static bool intel_fuzzy_clock_check(int clock1, int clock2)
 {
-       int clock1, clock2, diff;
-
-       clock1 = cur->adjusted_mode.clock;
-       clock2 = new->adjusted_mode.clock;
+       int diff;
 
        if (clock1 == clock2)
                return true;
@@ -8551,6 +8946,15 @@ intel_pipe_config_compare(struct drm_device *dev,
                return false; \
        }
 
+#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
+       if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
+               DRM_ERROR("mismatch in " #name " " \
+                         "(expected %i, found %i)\n", \
+                         current_config->name, \
+                         pipe_config->name); \
+               return false; \
+       }
+
 #define PIPE_CONF_QUIRK(quirk) \
        ((current_config->quirks | pipe_config->quirks) & (quirk))
 
@@ -8564,6 +8968,13 @@ intel_pipe_config_compare(struct drm_device *dev,
        PIPE_CONF_CHECK_I(fdi_m_n.link_n);
        PIPE_CONF_CHECK_I(fdi_m_n.tu);
 
+       PIPE_CONF_CHECK_I(has_dp_encoder);
+       PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
+       PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
+       PIPE_CONF_CHECK_I(dp_m_n.link_m);
+       PIPE_CONF_CHECK_I(dp_m_n.link_n);
+       PIPE_CONF_CHECK_I(dp_m_n.tu);
+
        PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
        PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
        PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
@@ -8594,8 +9005,8 @@ intel_pipe_config_compare(struct drm_device *dev,
                                      DRM_MODE_FLAG_NVSYNC);
        }
 
-       PIPE_CONF_CHECK_I(requested_mode.hdisplay);
-       PIPE_CONF_CHECK_I(requested_mode.vdisplay);
+       PIPE_CONF_CHECK_I(pipe_src_w);
+       PIPE_CONF_CHECK_I(pipe_src_h);
 
        PIPE_CONF_CHECK_I(gmch_pfit.control);
        /* pfit ratios are autocomputed by the hw on gen4+ */
@@ -8610,26 +9021,28 @@ intel_pipe_config_compare(struct drm_device *dev,
 
        PIPE_CONF_CHECK_I(ips_enabled);
 
+       PIPE_CONF_CHECK_I(double_wide);
+
        PIPE_CONF_CHECK_I(shared_dpll);
        PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
        PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
        PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
        PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
 
+       if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
+               PIPE_CONF_CHECK_I(pipe_bpp);
+
+       if (!IS_HASWELL(dev)) {
+               PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
+               PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
+       }
+
 #undef PIPE_CONF_CHECK_X
 #undef PIPE_CONF_CHECK_I
 #undef PIPE_CONF_CHECK_FLAGS
+#undef PIPE_CONF_CHECK_CLOCK_FUZZY
 #undef PIPE_CONF_QUIRK
 
-       if (!IS_HASWELL(dev)) {
-               if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
-                       DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
-                                 current_config->adjusted_mode.clock,
-                                 pipe_config->adjusted_mode.clock);
-                       return false;
-               }
-       }
-
        return true;
 }
 
@@ -8761,9 +9174,6 @@ check_crtc_state(struct drm_device *dev)
                                encoder->get_config(encoder, &pipe_config);
                }
 
-               if (dev_priv->display.get_clock)
-                       dev_priv->display.get_clock(crtc, &pipe_config);
-
                WARN(crtc->active != active,
                     "crtc active state doesn't match with hw state "
                     "(expected %i, found %i)\n", crtc->active, active);
@@ -8838,6 +9248,18 @@ intel_modeset_check_state(struct drm_device *dev)
        check_shared_dpll_state(dev);
 }
 
+void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
+                                    int dotclock)
+{
+       /*
+        * FDI already provided one idea for the dotclock.
+        * Yell if the encoder disagrees.
+        */
+       WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
+            "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
+            pipe_config->adjusted_mode.crtc_clock, dotclock);
+}
+
 static int __intel_set_mode(struct drm_crtc *crtc,
                            struct drm_display_mode *mode,
                            int x, int y, struct drm_framebuffer *fb)
@@ -8850,7 +9272,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
        unsigned disable_pipes, prepare_pipes, modeset_pipes;
        int ret = 0;
 
-       saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL);
+       saved_mode = kcalloc(2, sizeof(*saved_mode), GFP_KERNEL);
        if (!saved_mode)
                return -ENOMEM;
        saved_hwmode = saved_mode + 1;
@@ -9389,7 +9811,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
        struct intel_crtc *intel_crtc;
        int i;
 
-       intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
+       intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
        if (intel_crtc == NULL)
                return;
 
@@ -9540,7 +9962,13 @@ static void intel_setup_outputs(struct drm_device *dev)
                if (I915_READ(PCH_DP_D) & DP_DETECTED)
                        intel_dp_init(dev, PCH_DP_D, PORT_D);
        } else if (IS_VALLEYVIEW(dev)) {
-               /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
+               if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
+                       intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
+                                       PORT_B);
+                       if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
+                               intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
+               }
+
                if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
                        intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
                                        PORT_C);
@@ -9549,12 +9977,7 @@ static void intel_setup_outputs(struct drm_device *dev)
                                              PORT_C);
                }
 
-               if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
-                       intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
-                                       PORT_B);
-                       if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
-                               intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
-               }
+               intel_dsi_init(dev);
        } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
                bool found = false;
 
@@ -9610,6 +10033,7 @@ static void intel_setup_outputs(struct drm_device *dev)
 void intel_framebuffer_fini(struct intel_framebuffer *fb)
 {
        drm_framebuffer_cleanup(&fb->base);
+       WARN_ON(!fb->obj->framebuffer_references--);
        drm_gem_object_unreference_unlocked(&fb->obj->base);
 }
 
@@ -9641,9 +10065,12 @@ int intel_framebuffer_init(struct drm_device *dev,
                           struct drm_mode_fb_cmd2 *mode_cmd,
                           struct drm_i915_gem_object *obj)
 {
+       int aligned_height, tile_height;
        int pitch_limit;
        int ret;
 
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
        if (obj->tiling_mode == I915_TILING_Y) {
                DRM_DEBUG("hardware does not support tiling Y\n");
                return -EINVAL;
@@ -9732,8 +10159,16 @@ int intel_framebuffer_init(struct drm_device *dev,
        if (mode_cmd->offsets[0] != 0)
                return -EINVAL;
 
+       tile_height = IS_GEN2(dev) ? 16 : 8;
+       aligned_height = ALIGN(mode_cmd->height,
+                              obj->tiling_mode ? tile_height : 1);
+       /* FIXME drm helper for size checks (especially planar formats)? */
+       if (obj->base.size < aligned_height * mode_cmd->pitches[0])
+               return -EINVAL;
+
        drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
        intel_fb->obj = obj;
+       intel_fb->obj->framebuffer_references++;
 
        ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
        if (ret) {
@@ -9759,9 +10194,15 @@ intel_user_framebuffer_create(struct drm_device *dev,
        return intel_framebuffer_create(dev, mode_cmd, obj);
 }
 
+#ifndef CONFIG_DRM_I915_FBDEV
+static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
+{
+}
+#endif
+
 static const struct drm_mode_config_funcs intel_mode_funcs = {
        .fb_create = intel_user_framebuffer_create,
-       .output_poll_changed = intel_fb_output_poll_changed,
+       .output_poll_changed = intel_fbdev_output_poll_changed,
 };
 
 /* Set up chip specific display functions */
@@ -9787,7 +10228,6 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.update_plane = ironlake_update_plane;
        } else if (HAS_PCH_SPLIT(dev)) {
                dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
-               dev_priv->display.get_clock = ironlake_crtc_clock_get;
                dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
                dev_priv->display.crtc_enable = ironlake_crtc_enable;
                dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -9795,7 +10235,6 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.update_plane = ironlake_update_plane;
        } else if (IS_VALLEYVIEW(dev)) {
                dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-               dev_priv->display.get_clock = i9xx_crtc_clock_get;
                dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
                dev_priv->display.crtc_enable = valleyview_crtc_enable;
                dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9803,7 +10242,6 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.update_plane = i9xx_update_plane;
        } else {
                dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
-               dev_priv->display.get_clock = i9xx_crtc_clock_get;
                dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
                dev_priv->display.crtc_enable = i9xx_crtc_enable;
                dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9979,8 +10417,7 @@ static struct intel_quirk intel_quirks[] = {
        /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
        { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
 
-       /* 830/845 need to leave pipe A & dpll A up */
-       { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+       /* 830 needs to leave pipe A & dpll A up */
        { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
 
        /* Lenovo U160 cannot use SSC on LVDS */
@@ -9989,20 +10426,11 @@ static struct intel_quirk intel_quirks[] = {
        /* Sony Vaio Y cannot use SSC on LVDS */
        { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
 
-       /* Acer Aspire 5734Z must invert backlight brightness */
-       { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
-
-       /* Acer/eMachines G725 */
-       { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
-
-       /* Acer/eMachines e725 */
-       { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
-
-       /* Acer/Packard Bell NCL20 */
-       { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
-
-       /* Acer Aspire 4736Z */
-       { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+       /*
+        * All GM45 Acer (and its brands eMachines and Packard Bell) laptops
+        * seem to use inverted backlight PWM.
+        */
+       { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness },
 
        /* Dell XPS13 HD Sandy Bridge */
        { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
@@ -10049,41 +10477,21 @@ static void i915_disable_vga(struct drm_device *dev)
        POSTING_READ(vga_reg);
 }
 
-static void i915_enable_vga_mem(struct drm_device *dev)
-{
-       /* Enable VGA memory on Intel HD */
-       if (HAS_PCH_SPLIT(dev)) {
-               vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
-               outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE);
-               vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
-                                                  VGA_RSRC_LEGACY_MEM |
-                                                  VGA_RSRC_NORMAL_IO |
-                                                  VGA_RSRC_NORMAL_MEM);
-               vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
-       }
-}
-
-void i915_disable_vga_mem(struct drm_device *dev)
-{
-       /* Disable VGA memory on Intel HD */
-       if (HAS_PCH_SPLIT(dev)) {
-               vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
-               outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE);
-               vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
-                                                  VGA_RSRC_NORMAL_IO |
-                                                  VGA_RSRC_NORMAL_MEM);
-               vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
-       }
-}
-
 void intel_modeset_init_hw(struct drm_device *dev)
 {
-       intel_init_power_well(dev);
+       struct drm_i915_private *dev_priv = dev->dev_private;
 
        intel_prepare_ddi(dev);
 
        intel_init_clock_gating(dev);
 
+       /* Enable the CRI clock source so we can get at the display */
+       if (IS_VALLEYVIEW(dev))
+               I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+                          DPLL_INTEGRATED_CRI_CLK_VLV);
+
+       intel_init_dpio(dev);
+
        mutex_lock(&dev->struct_mutex);
        intel_enable_gt_powersave(dev);
        mutex_unlock(&dev->struct_mutex);
@@ -10351,10 +10759,9 @@ void i915_redisable_vga(struct drm_device *dev)
            (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
                return;
 
-       if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
+       if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
                DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
                i915_disable_vga(dev);
-               i915_disable_vga_mem(dev);
        }
 }
 
@@ -10375,6 +10782,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                                                                 &crtc->config);
 
                crtc->base.enabled = crtc->active;
+               crtc->primary_enabled = crtc->active;
 
                DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
                              crtc->base.base.id,
@@ -10415,20 +10823,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                }
 
                encoder->connectors_active = false;
-               DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
+               DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
                              encoder->base.base.id,
                              drm_get_encoder_name(&encoder->base),
                              encoder->base.crtc ? "enabled" : "disabled",
-                             pipe);
-       }
-
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list,
-                           base.head) {
-               if (!crtc->active)
-                       continue;
-               if (dev_priv->display.get_clock)
-                       dev_priv->display.get_clock(crtc,
-                                                   &crtc->config);
+                             pipe_name(pipe));
        }
 
        list_for_each_entry(connector, &dev->mode_config.connector_list,
@@ -10455,7 +10854,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum pipe pipe;
-       struct drm_plane *plane;
        struct intel_crtc *crtc;
        struct intel_encoder *encoder;
        int i;
@@ -10502,7 +10900,12 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
                pll->on = false;
        }
 
+       if (IS_HASWELL(dev))
+               ilk_wm_get_hw_state(dev);
+
        if (force_restore) {
+               i915_redisable_vga(dev);
+
                /*
                 * We need to use raw interfaces for restoring state to avoid
                 * checking (bogus) intermediate states.
@@ -10514,10 +10917,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
                        __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
                                         crtc->fb);
                }
-               list_for_each_entry(plane, &dev->mode_config.plane_list, head)
-                       intel_plane_restore(plane);
-
-               i915_redisable_vga(dev);
        } else {
                intel_modeset_update_staged_output_state(dev);
        }
@@ -10540,6 +10939,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc;
+       struct drm_connector *connector;
 
        /*
         * Interrupts and polling as the first thing to avoid creating havoc.
@@ -10568,8 +10968,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
 
        intel_disable_fbc(dev);
 
-       i915_enable_vga_mem(dev);
-
        intel_disable_gt_powersave(dev);
 
        ironlake_teardown_rc6(dev);
@@ -10582,6 +10980,10 @@ void intel_modeset_cleanup(struct drm_device *dev)
        /* destroy backlight, if any, before the connectors */
        intel_panel_destroy_backlight(dev);
 
+       /* destroy the sysfs files before encoders/connectors */
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+               drm_sysfs_connector_remove(connector);
+
        drm_mode_config_cleanup(dev);
 
        intel_cleanup_overlay(dev);
@@ -10779,7 +11181,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
        }
 
        for (i = 0; i < error->num_transcoders; i++) {
-               err_printf(m, "  CPU transcoder: %c\n",
+               err_printf(m, "CPU transcoder: %c\n",
                           transcoder_name(error->transcoder[i].cpu_transcoder));
                err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
                err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
index 79c14e298ba657d32f0a57493ec9500b5216ba2d..d40a5f71ddcaac93899cbec08e7297902006669b 100644 (file)
 
 #define DP_LINK_CHECK_TIMEOUT  (10 * 1000)
 
+struct dp_link_dpll {
+       int link_bw;
+       struct dpll dpll;
+};
+
+static const struct dp_link_dpll gen4_dpll[] = {
+       { DP_LINK_BW_1_62,
+               { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
+       { DP_LINK_BW_2_7,
+               { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
+};
+
+static const struct dp_link_dpll pch_dpll[] = {
+       { DP_LINK_BW_1_62,
+               { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
+       { DP_LINK_BW_2_7,
+               { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
+};
+
+static const struct dp_link_dpll vlv_dpll[] = {
+       { DP_LINK_BW_1_62,
+               { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
+       { DP_LINK_BW_2_7,
+               { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
+};
+
 /**
  * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
  * @intel_dp: DP struct
@@ -211,24 +237,77 @@ intel_hrawclk(struct drm_device *dev)
        }
 }
 
+static void
+intel_dp_init_panel_power_sequencer(struct drm_device *dev,
+                                   struct intel_dp *intel_dp,
+                                   struct edp_power_seq *out);
+static void
+intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
+                                             struct intel_dp *intel_dp,
+                                             struct edp_power_seq *out);
+
+static enum pipe
+vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum port port = intel_dig_port->port;
+       enum pipe pipe;
+
+       /* modeset should have pipe */
+       if (crtc)
+               return to_intel_crtc(crtc)->pipe;
+
+       /* init time, try to find a pipe with this port selected */
+       for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
+               u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
+                       PANEL_PORT_SELECT_MASK;
+               if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B)
+                       return pipe;
+               if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C)
+                       return pipe;
+       }
+
+       /* shrug */
+       return PIPE_A;
+}
+
+static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
+{
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+       if (HAS_PCH_SPLIT(dev))
+               return PCH_PP_CONTROL;
+       else
+               return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
+}
+
+static u32 _pp_stat_reg(struct intel_dp *intel_dp)
+{
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+       if (HAS_PCH_SPLIT(dev))
+               return PCH_PP_STATUS;
+       else
+               return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
+}
+
 static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 pp_stat_reg;
 
-       pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
-       return (I915_READ(pp_stat_reg) & PP_ON) != 0;
+       return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
 }
 
 static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 pp_ctrl_reg;
 
-       pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
-       return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
+       return (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0;
 }
 
 static void
@@ -236,19 +315,15 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 pp_stat_reg, pp_ctrl_reg;
 
        if (!is_edp(intel_dp))
                return;
 
-       pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
-       pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
-
        if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
                WARN(1, "eDP powered off while attempting aux channel communication.\n");
                DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
-                               I915_READ(pp_stat_reg),
-                               I915_READ(pp_ctrl_reg));
+                             I915_READ(_pp_stat_reg(intel_dp)),
+                             I915_READ(_pp_ctrl_reg(intel_dp)));
        }
 }
 
@@ -361,6 +436,12 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
                goto out;
        }
 
+       /* Only 5 data registers! */
+       if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
+               ret = -E2BIG;
+               goto out;
+       }
+
        while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
                /* Must try at least 3 times according to DP spec */
                for (try = 0; try < 5; try++) {
@@ -451,9 +532,10 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
        int msg_bytes;
        uint8_t ack;
 
+       if (WARN_ON(send_bytes > 16))
+               return -E2BIG;
+
        intel_dp_check_edp(intel_dp);
-       if (send_bytes > 16)
-               return -1;
        msg[0] = AUX_NATIVE_WRITE << 4;
        msg[1] = address >> 8;
        msg[2] = address & 0xff;
@@ -494,6 +576,9 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
        uint8_t ack;
        int ret;
 
+       if (WARN_ON(recv_bytes > 19))
+               return -E2BIG;
+
        intel_dp_check_edp(intel_dp);
        msg[0] = AUX_NATIVE_READ << 4;
        msg[1] = address >> 8;
@@ -569,7 +654,12 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
                break;
        }
 
-       for (retry = 0; retry < 5; retry++) {
+       /*
+        * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is
+        * required to retry at least seven times upon receiving AUX_DEFER
+        * before giving up the AUX transaction.
+        */
+       for (retry = 0; retry < 7; retry++) {
                ret = intel_dp_aux_ch(intel_dp,
                                      msg, msg_bytes,
                                      reply, reply_bytes);
@@ -647,7 +737,7 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
        strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
        intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
        intel_dp->adapter.algo_data = &intel_dp->algo;
-       intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
+       intel_dp->adapter.dev.parent = intel_connector->base.kdev;
 
        ironlake_edp_panel_vdd_on(intel_dp);
        ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
@@ -660,41 +750,30 @@ intel_dp_set_clock(struct intel_encoder *encoder,
                   struct intel_crtc_config *pipe_config, int link_bw)
 {
        struct drm_device *dev = encoder->base.dev;
+       const struct dp_link_dpll *divisor = NULL;
+       int i, count = 0;
 
        if (IS_G4X(dev)) {
-               if (link_bw == DP_LINK_BW_1_62) {
-                       pipe_config->dpll.p1 = 2;
-                       pipe_config->dpll.p2 = 10;
-                       pipe_config->dpll.n = 2;
-                       pipe_config->dpll.m1 = 23;
-                       pipe_config->dpll.m2 = 8;
-               } else {
-                       pipe_config->dpll.p1 = 1;
-                       pipe_config->dpll.p2 = 10;
-                       pipe_config->dpll.n = 1;
-                       pipe_config->dpll.m1 = 14;
-                       pipe_config->dpll.m2 = 2;
-               }
-               pipe_config->clock_set = true;
+               divisor = gen4_dpll;
+               count = ARRAY_SIZE(gen4_dpll);
        } else if (IS_HASWELL(dev)) {
                /* Haswell has special-purpose DP DDI clocks. */
        } else if (HAS_PCH_SPLIT(dev)) {
-               if (link_bw == DP_LINK_BW_1_62) {
-                       pipe_config->dpll.n = 1;
-                       pipe_config->dpll.p1 = 2;
-                       pipe_config->dpll.p2 = 10;
-                       pipe_config->dpll.m1 = 12;
-                       pipe_config->dpll.m2 = 9;
-               } else {
-                       pipe_config->dpll.n = 2;
-                       pipe_config->dpll.p1 = 1;
-                       pipe_config->dpll.p2 = 10;
-                       pipe_config->dpll.m1 = 14;
-                       pipe_config->dpll.m2 = 8;
-               }
-               pipe_config->clock_set = true;
+               divisor = pch_dpll;
+               count = ARRAY_SIZE(pch_dpll);
        } else if (IS_VALLEYVIEW(dev)) {
-               /* FIXME: Need to figure out optimized DP clocks for vlv. */
+               divisor = vlv_dpll;
+               count = ARRAY_SIZE(vlv_dpll);
+       }
+
+       if (divisor && count) {
+               for (i = 0; i < count; i++) {
+                       if (link_bw == divisor[i].link_bw) {
+                               pipe_config->dpll = divisor[i].dpll;
+                               pipe_config->clock_set = true;
+                               break;
+                       }
+               }
        }
 }
 
@@ -737,19 +816,22 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 
        DRM_DEBUG_KMS("DP link computation with max lane count %i "
                      "max bw %02x pixel clock %iKHz\n",
-                     max_lane_count, bws[max_clock], adjusted_mode->clock);
+                     max_lane_count, bws[max_clock],
+                     adjusted_mode->crtc_clock);
 
        /* Walk through all bpp values. Luckily they're all nicely spaced with 2
         * bpc in between. */
        bpp = pipe_config->pipe_bpp;
-       if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) {
+       if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
+           dev_priv->vbt.edp_bpp < bpp) {
                DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
                              dev_priv->vbt.edp_bpp);
-               bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
+               bpp = dev_priv->vbt.edp_bpp;
        }
 
        for (; bpp >= 6*3; bpp -= 2*3) {
-               mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
+               mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+                                                  bpp);
 
                for (clock = 0; clock <= max_clock; clock++) {
                        for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
@@ -794,7 +876,8 @@ found:
                      mode_rate, link_avail);
 
        intel_link_compute_m_n(bpp, lane_count,
-                              adjusted_mode->clock, pipe_config->port_clock,
+                              adjusted_mode->crtc_clock,
+                              pipe_config->port_clock,
                               &pipe_config->dp_m_n);
 
        intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@@ -802,21 +885,6 @@ found:
        return true;
 }
 
-void intel_dp_init_link_config(struct intel_dp *intel_dp)
-{
-       memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
-       intel_dp->link_configuration[0] = intel_dp->link_bw;
-       intel_dp->link_configuration[1] = intel_dp->lane_count;
-       intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
-       /*
-        * Check for DPCD version > 1.1 and enhanced framing support
-        */
-       if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
-           (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
-               intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-       }
-}
-
 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -889,8 +957,6 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
                intel_write_eld(&encoder->base, adjusted_mode);
        }
 
-       intel_dp_init_link_config(intel_dp);
-
        /* Split out the IBX/CPU vs CPT settings */
 
        if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
@@ -900,7 +966,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
                        intel_dp->DP |= DP_SYNC_VS_HIGH;
                intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
 
-               if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+               if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
                        intel_dp->DP |= DP_ENHANCED_FRAMING;
 
                intel_dp->DP |= crtc->pipe << 29;
@@ -914,7 +980,7 @@ static void intel_dp_mode_set(struct intel_encoder *encoder)
                        intel_dp->DP |= DP_SYNC_VS_HIGH;
                intel_dp->DP |= DP_LINK_TRAIN_OFF;
 
-               if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+               if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
                        intel_dp->DP |= DP_ENHANCED_FRAMING;
 
                if (crtc->pipe == 1)
@@ -944,8 +1010,8 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 pp_stat_reg, pp_ctrl_reg;
 
-       pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
-       pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+       pp_stat_reg = _pp_stat_reg(intel_dp);
+       pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
        DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
                        mask, value,
@@ -987,11 +1053,8 @@ static  u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 control;
-       u32 pp_ctrl_reg;
-
-       pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
-       control = I915_READ(pp_ctrl_reg);
 
+       control = I915_READ(_pp_ctrl_reg(intel_dp));
        control &= ~PANEL_UNLOCK_MASK;
        control |= PANEL_UNLOCK_REGS;
        return control;
@@ -1024,8 +1087,8 @@ void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
        pp = ironlake_get_pp_control(intel_dp);
        pp |= EDP_FORCE_VDD;
 
-       pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
-       pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+       pp_stat_reg = _pp_stat_reg(intel_dp);
+       pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
@@ -1053,8 +1116,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
                pp = ironlake_get_pp_control(intel_dp);
                pp &= ~EDP_FORCE_VDD;
 
-               pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
-               pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+               pp_stat_reg = _pp_ctrl_reg(intel_dp);
+               pp_ctrl_reg = _pp_stat_reg(intel_dp);
 
                I915_WRITE(pp_ctrl_reg, pp);
                POSTING_READ(pp_ctrl_reg);
@@ -1119,20 +1182,19 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
 
        ironlake_wait_panel_power_cycle(intel_dp);
 
+       pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
        pp = ironlake_get_pp_control(intel_dp);
        if (IS_GEN5(dev)) {
                /* ILK workaround: disable reset around power sequence */
                pp &= ~PANEL_POWER_RESET;
-               I915_WRITE(PCH_PP_CONTROL, pp);
-               POSTING_READ(PCH_PP_CONTROL);
+               I915_WRITE(pp_ctrl_reg, pp);
+               POSTING_READ(pp_ctrl_reg);
        }
 
        pp |= POWER_TARGET_ON;
        if (!IS_GEN5(dev))
                pp |= PANEL_POWER_RESET;
 
-       pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
-
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
 
@@ -1140,8 +1202,8 @@ void ironlake_edp_panel_on(struct intel_dp *intel_dp)
 
        if (IS_GEN5(dev)) {
                pp |= PANEL_POWER_RESET; /* restore panel reset bit */
-               I915_WRITE(PCH_PP_CONTROL, pp);
-               POSTING_READ(PCH_PP_CONTROL);
+               I915_WRITE(pp_ctrl_reg, pp);
+               POSTING_READ(pp_ctrl_reg);
        }
 }
 
@@ -1164,7 +1226,7 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
         * panels get very unhappy and cease to work. */
        pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
 
-       pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+       pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
@@ -1197,7 +1259,7 @@ void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
        pp = ironlake_get_pp_control(intel_dp);
        pp |= EDP_BLC_ENABLE;
 
-       pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+       pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
@@ -1221,7 +1283,7 @@ void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
        pp = ironlake_get_pp_control(intel_dp);
        pp &= ~EDP_BLC_ENABLE;
 
-       pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
+       pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
 
        I915_WRITE(pp_ctrl_reg, pp);
        POSTING_READ(pp_ctrl_reg);
@@ -1368,6 +1430,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum port port = dp_to_dig_port(intel_dp)->port;
        struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+       int dotclock;
 
        if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
                tmp = I915_READ(intel_dp->output_reg);
@@ -1395,28 +1458,61 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
 
        pipe_config->adjusted_mode.flags |= flags;
 
-       if (dp_to_dig_port(intel_dp)->port == PORT_A) {
+       pipe_config->has_dp_encoder = true;
+
+       intel_dp_get_m_n(crtc, pipe_config);
+
+       if (port == PORT_A) {
                if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
                        pipe_config->port_clock = 162000;
                else
                        pipe_config->port_clock = 270000;
        }
+
+       if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
+           pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
+               /*
+                * This is a big fat ugly hack.
+                *
+                * Some machines in UEFI boot mode provide us a VBT that has 18
+                * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
+                * unknown we fail to light up. Yet the same BIOS boots up with
+                * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
+                * max, not what it tells us to use.
+                *
+                * Note: This will still be broken if the eDP panel is not lit
+                * up by the BIOS, and thus we can't get the mode at module
+                * load.
+                */
+               DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+                             pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
+               dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
+       }
+
+       dotclock = intel_dotclock_calculate(pipe_config->port_clock,
+                                           &pipe_config->dp_m_n);
+
+       if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
+               ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+       pipe_config->adjusted_mode.crtc_clock = dotclock;
 }
 
-static bool is_edp_psr(struct intel_dp *intel_dp)
+static bool is_edp_psr(struct drm_device *dev)
 {
-       return is_edp(intel_dp) &&
-               intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       return dev_priv->psr.sink_support;
 }
 
 static bool intel_edp_is_psr_enabled(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (!IS_HASWELL(dev))
+       if (!HAS_PSR(dev))
                return false;
 
-       return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
+       return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
 }
 
 static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
@@ -1466,8 +1562,8 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
        intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
 
        /* Avoid continuous PSR exit by masking memup and hpd */
-       I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
-                  EDP_PSR_DEBUG_MASK_HPD);
+       I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
+                  EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
 
        intel_dp->psr_setup_done = true;
 }
@@ -1491,9 +1587,9 @@ static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
                                            DP_PSR_MAIN_LINK_ACTIVE);
 
        /* Setup AUX registers */
-       I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND);
-       I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION);
-       I915_WRITE(EDP_PSR_AUX_CTL,
+       I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
+       I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
+       I915_WRITE(EDP_PSR_AUX_CTL(dev),
                   DP_AUX_CH_CTL_TIME_OUT_400us |
                   (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
                   (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
@@ -1516,7 +1612,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
        } else
                val |= EDP_PSR_LINK_DISABLE;
 
-       I915_WRITE(EDP_PSR_CTL, val |
+       I915_WRITE(EDP_PSR_CTL(dev), val |
                   EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
                   max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
                   idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
@@ -1533,42 +1629,33 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
        struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
        struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
 
-       if (!IS_HASWELL(dev)) {
+       dev_priv->psr.source_ok = false;
+
+       if (!HAS_PSR(dev)) {
                DRM_DEBUG_KMS("PSR not supported on this platform\n");
-               dev_priv->no_psr_reason = PSR_NO_SOURCE;
                return false;
        }
 
        if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
            (dig_port->port != PORT_A)) {
                DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
-               dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
-               return false;
-       }
-
-       if (!is_edp_psr(intel_dp)) {
-               DRM_DEBUG_KMS("PSR not supported by this panel\n");
-               dev_priv->no_psr_reason = PSR_NO_SINK;
                return false;
        }
 
        if (!i915_enable_psr) {
                DRM_DEBUG_KMS("PSR disable by flag\n");
-               dev_priv->no_psr_reason = PSR_MODULE_PARAM;
                return false;
        }
 
        crtc = dig_port->base.base.crtc;
        if (crtc == NULL) {
                DRM_DEBUG_KMS("crtc not active for PSR\n");
-               dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
                return false;
        }
 
        intel_crtc = to_intel_crtc(crtc);
-       if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) {
+       if (!intel_crtc_active(crtc)) {
                DRM_DEBUG_KMS("crtc not active for PSR\n");
-               dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
                return false;
        }
 
@@ -1576,29 +1663,26 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
        if (obj->tiling_mode != I915_TILING_X ||
            obj->fence_reg == I915_FENCE_REG_NONE) {
                DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
-               dev_priv->no_psr_reason = PSR_NOT_TILED;
                return false;
        }
 
        if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
                DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
-               dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
                return false;
        }
 
        if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
            S3D_ENABLE) {
                DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
-               dev_priv->no_psr_reason = PSR_S3D_ENABLED;
                return false;
        }
 
-       if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
+       if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
                DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
-               dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
                return false;
        }
 
+       dev_priv->psr.source_ok = true;
        return true;
 }
 
@@ -1637,10 +1721,11 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
        if (!intel_edp_is_psr_enabled(dev))
                return;
 
-       I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
+       I915_WRITE(EDP_PSR_CTL(dev),
+                  I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
 
        /* Wait till PSR is idle */
-       if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
+       if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
                       EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
                DRM_ERROR("Timed out waiting for PSR Idle State\n");
 }
@@ -1654,7 +1739,7 @@ void intel_edp_psr_update(struct drm_device *dev)
                if (encoder->type == INTEL_OUTPUT_EDP) {
                        intel_dp = enc_to_intel_dp(&encoder->base);
 
-                       if (!is_edp_psr(intel_dp))
+                       if (!is_edp_psr(dev))
                                return;
 
                        if (!intel_edp_psr_match_conditions(intel_dp))
@@ -1713,14 +1798,24 @@ static void intel_enable_dp(struct intel_encoder *encoder)
        ironlake_edp_panel_vdd_off(intel_dp, true);
        intel_dp_complete_link_train(intel_dp);
        intel_dp_stop_link_train(intel_dp);
+}
+
+static void g4x_enable_dp(struct intel_encoder *encoder)
+{
+       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+       intel_enable_dp(encoder);
        ironlake_edp_backlight_on(intel_dp);
 }
 
 static void vlv_enable_dp(struct intel_encoder *encoder)
 {
+       struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+       ironlake_edp_backlight_on(intel_dp);
 }
 
-static void intel_pre_enable_dp(struct intel_encoder *encoder)
+static void g4x_pre_enable_dp(struct intel_encoder *encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
@@ -1738,53 +1833,59 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
        struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
        int port = vlv_dport_to_channel(dport);
        int pipe = intel_crtc->pipe;
+       struct edp_power_seq power_seq;
        u32 val;
 
        mutex_lock(&dev_priv->dpio_lock);
 
-       val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
+       val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
        val = 0;
        if (pipe)
                val |= (1<<21);
        else
                val &= ~(1<<21);
        val |= 0x001000c4;
-       vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
-       vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
-       vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
+       vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
+       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
+       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
 
        mutex_unlock(&dev_priv->dpio_lock);
 
+       /* init power sequencer on this pipe and port */
+       intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+       intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+                                                     &power_seq);
+
        intel_enable_dp(encoder);
 
        vlv_wait_port_ready(dev_priv, port);
 }
 
-static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
+static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
 {
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc =
+               to_intel_crtc(encoder->base.crtc);
        int port = vlv_dport_to_channel(dport);
-
-       if (!IS_VALLEYVIEW(dev))
-               return;
+       int pipe = intel_crtc->pipe;
 
        /* Program Tx lane resets to default */
        mutex_lock(&dev_priv->dpio_lock);
-       vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
+       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
                         DPIO_PCS_TX_LANE2_RESET |
                         DPIO_PCS_TX_LANE1_RESET);
-       vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
+       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
                         DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
                         DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
                         (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
                                 DPIO_PCS_CLK_SOFT_RESET);
 
        /* Fix up inter-pair skew failure */
-       vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
-       vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
-       vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
+       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
+       vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
+       vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
        mutex_unlock(&dev_priv->dpio_lock);
 }
 
@@ -1919,10 +2020,13 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+       struct intel_crtc *intel_crtc =
+               to_intel_crtc(dport->base.base.crtc);
        unsigned long demph_reg_value, preemph_reg_value,
                uniqtranscale_reg_value;
        uint8_t train_set = intel_dp->train_set[0];
        int port = vlv_dport_to_channel(dport);
+       int pipe = intel_crtc->pipe;
 
        switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
        case DP_TRAIN_PRE_EMPHASIS_0:
@@ -1998,21 +2102,22 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
        }
 
        mutex_lock(&dev_priv->dpio_lock);
-       vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
-       vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
-       vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
+       vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x00000000);
+       vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port), demph_reg_value);
+       vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
                         uniqtranscale_reg_value);
-       vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040);
-       vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
-       vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
-       vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
+       vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port), 0x0C782040);
+       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
+       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
+       vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0x80000000);
        mutex_unlock(&dev_priv->dpio_lock);
 
        return 0;
 }
 
 static void
-intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+intel_get_adjust_train(struct intel_dp *intel_dp,
+                      const uint8_t link_status[DP_LINK_STATUS_SIZE])
 {
        uint8_t v = 0;
        uint8_t p = 0;
@@ -2207,14 +2312,15 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
 
 static bool
 intel_dp_set_link_train(struct intel_dp *intel_dp,
-                       uint32_t dp_reg_value,
+                       uint32_t *DP,
                        uint8_t dp_train_pat)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        enum port port = intel_dig_port->port;
-       int ret;
+       uint8_t buf[sizeof(intel_dp->train_set) + 1];
+       int ret, len;
 
        if (HAS_DDI(dev)) {
                uint32_t temp = I915_READ(DP_TP_CTL(port));
@@ -2243,62 +2349,93 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
                I915_WRITE(DP_TP_CTL(port), temp);
 
        } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
-               dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
+               *DP &= ~DP_LINK_TRAIN_MASK_CPT;
 
                switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
                case DP_TRAINING_PATTERN_DISABLE:
-                       dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
+                       *DP |= DP_LINK_TRAIN_OFF_CPT;
                        break;
                case DP_TRAINING_PATTERN_1:
-                       dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
+                       *DP |= DP_LINK_TRAIN_PAT_1_CPT;
                        break;
                case DP_TRAINING_PATTERN_2:
-                       dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+                       *DP |= DP_LINK_TRAIN_PAT_2_CPT;
                        break;
                case DP_TRAINING_PATTERN_3:
                        DRM_ERROR("DP training pattern 3 not supported\n");
-                       dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
+                       *DP |= DP_LINK_TRAIN_PAT_2_CPT;
                        break;
                }
 
        } else {
-               dp_reg_value &= ~DP_LINK_TRAIN_MASK;
+               *DP &= ~DP_LINK_TRAIN_MASK;
 
                switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
                case DP_TRAINING_PATTERN_DISABLE:
-                       dp_reg_value |= DP_LINK_TRAIN_OFF;
+                       *DP |= DP_LINK_TRAIN_OFF;
                        break;
                case DP_TRAINING_PATTERN_1:
-                       dp_reg_value |= DP_LINK_TRAIN_PAT_1;
+                       *DP |= DP_LINK_TRAIN_PAT_1;
                        break;
                case DP_TRAINING_PATTERN_2:
-                       dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+                       *DP |= DP_LINK_TRAIN_PAT_2;
                        break;
                case DP_TRAINING_PATTERN_3:
                        DRM_ERROR("DP training pattern 3 not supported\n");
-                       dp_reg_value |= DP_LINK_TRAIN_PAT_2;
+                       *DP |= DP_LINK_TRAIN_PAT_2;
                        break;
                }
        }
 
-       I915_WRITE(intel_dp->output_reg, dp_reg_value);
+       I915_WRITE(intel_dp->output_reg, *DP);
        POSTING_READ(intel_dp->output_reg);
 
-       intel_dp_aux_native_write_1(intel_dp,
-                                   DP_TRAINING_PATTERN_SET,
-                                   dp_train_pat);
-
-       if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
+       buf[0] = dp_train_pat;
+       if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
            DP_TRAINING_PATTERN_DISABLE) {
-               ret = intel_dp_aux_native_write(intel_dp,
-                                               DP_TRAINING_LANE0_SET,
-                                               intel_dp->train_set,
-                                               intel_dp->lane_count);
-               if (ret != intel_dp->lane_count)
-                       return false;
+               /* don't write DP_TRAINING_LANEx_SET on disable */
+               len = 1;
+       } else {
+               /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
+               memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
+               len = intel_dp->lane_count + 1;
        }
 
-       return true;
+       ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET,
+                                       buf, len);
+
+       return ret == len;
+}
+
+static bool
+intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
+                       uint8_t dp_train_pat)
+{
+       memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
+       intel_dp_set_signal_levels(intel_dp, DP);
+       return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
+}
+
+static bool
+intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
+                          const uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
+       intel_get_adjust_train(intel_dp, link_status);
+       intel_dp_set_signal_levels(intel_dp, DP);
+
+       I915_WRITE(intel_dp->output_reg, *DP);
+       POSTING_READ(intel_dp->output_reg);
+
+       ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET,
+                                       intel_dp->train_set,
+                                       intel_dp->lane_count);
+
+       return ret == intel_dp->lane_count;
 }
 
 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
@@ -2342,32 +2479,37 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
        uint8_t voltage;
        int voltage_tries, loop_tries;
        uint32_t DP = intel_dp->DP;
+       uint8_t link_config[2];
 
        if (HAS_DDI(dev))
                intel_ddi_prepare_link_retrain(encoder);
 
        /* Write the link configuration data */
-       intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
-                                 intel_dp->link_configuration,
-                                 DP_LINK_CONFIGURATION_SIZE);
+       link_config[0] = intel_dp->link_bw;
+       link_config[1] = intel_dp->lane_count;
+       if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
+               link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+       intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2);
+
+       link_config[0] = 0;
+       link_config[1] = DP_SET_ANSI_8B10B;
+       intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2);
 
        DP |= DP_PORT_EN;
 
-       memset(intel_dp->train_set, 0, 4);
+       /* clock recovery */
+       if (!intel_dp_reset_link_train(intel_dp, &DP,
+                                      DP_TRAINING_PATTERN_1 |
+                                      DP_LINK_SCRAMBLING_DISABLE)) {
+               DRM_ERROR("failed to enable link training\n");
+               return;
+       }
+
        voltage = 0xff;
        voltage_tries = 0;
        loop_tries = 0;
        for (;;) {
-               /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
-               uint8_t     link_status[DP_LINK_STATUS_SIZE];
-
-               intel_dp_set_signal_levels(intel_dp, &DP);
-
-               /* Set training pattern 1 */
-               if (!intel_dp_set_link_train(intel_dp, DP,
-                                            DP_TRAINING_PATTERN_1 |
-                                            DP_LINK_SCRAMBLING_DISABLE))
-                       break;
+               uint8_t link_status[DP_LINK_STATUS_SIZE];
 
                drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
                if (!intel_dp_get_link_status(intel_dp, link_status)) {
@@ -2387,10 +2529,12 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
                if (i == intel_dp->lane_count) {
                        ++loop_tries;
                        if (loop_tries == 5) {
-                               DRM_DEBUG_KMS("too many full retries, give up\n");
+                               DRM_ERROR("too many full retries, give up\n");
                                break;
                        }
-                       memset(intel_dp->train_set, 0, 4);
+                       intel_dp_reset_link_train(intel_dp, &DP,
+                                                 DP_TRAINING_PATTERN_1 |
+                                                 DP_LINK_SCRAMBLING_DISABLE);
                        voltage_tries = 0;
                        continue;
                }
@@ -2399,15 +2543,18 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
                if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
                        ++voltage_tries;
                        if (voltage_tries == 5) {
-                               DRM_DEBUG_KMS("too many voltage retries, give up\n");
+                               DRM_ERROR("too many voltage retries, give up\n");
                                break;
                        }
                } else
                        voltage_tries = 0;
                voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
 
-               /* Compute new intel_dp->train_set as requested by target */
-               intel_get_adjust_train(intel_dp, link_status);
+               /* Update training set as requested by target */
+               if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
+                       DRM_ERROR("failed to update link training\n");
+                       break;
+               }
        }
 
        intel_dp->DP = DP;
@@ -2421,11 +2568,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
        uint32_t DP = intel_dp->DP;
 
        /* channel equalization */
+       if (!intel_dp_set_link_train(intel_dp, &DP,
+                                    DP_TRAINING_PATTERN_2 |
+                                    DP_LINK_SCRAMBLING_DISABLE)) {
+               DRM_ERROR("failed to start channel equalization\n");
+               return;
+       }
+
        tries = 0;
        cr_tries = 0;
        channel_eq = false;
        for (;;) {
-               uint8_t     link_status[DP_LINK_STATUS_SIZE];
+               uint8_t link_status[DP_LINK_STATUS_SIZE];
 
                if (cr_tries > 5) {
                        DRM_ERROR("failed to train DP, aborting\n");
@@ -2433,21 +2587,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                        break;
                }
 
-               intel_dp_set_signal_levels(intel_dp, &DP);
-
-               /* channel eq pattern */
-               if (!intel_dp_set_link_train(intel_dp, DP,
-                                            DP_TRAINING_PATTERN_2 |
-                                            DP_LINK_SCRAMBLING_DISABLE))
-                       break;
-
                drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
-               if (!intel_dp_get_link_status(intel_dp, link_status))
+               if (!intel_dp_get_link_status(intel_dp, link_status)) {
+                       DRM_ERROR("failed to get link status\n");
                        break;
+               }
 
                /* Make sure clock is still ok */
                if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
                        intel_dp_start_link_train(intel_dp);
+                       intel_dp_set_link_train(intel_dp, &DP,
+                                               DP_TRAINING_PATTERN_2 |
+                                               DP_LINK_SCRAMBLING_DISABLE);
                        cr_tries++;
                        continue;
                }
@@ -2461,13 +2612,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                if (tries > 5) {
                        intel_dp_link_down(intel_dp);
                        intel_dp_start_link_train(intel_dp);
+                       intel_dp_set_link_train(intel_dp, &DP,
+                                               DP_TRAINING_PATTERN_2 |
+                                               DP_LINK_SCRAMBLING_DISABLE);
                        tries = 0;
                        cr_tries++;
                        continue;
                }
 
-               /* Compute new intel_dp->train_set as requested by target */
-               intel_get_adjust_train(intel_dp, link_status);
+               /* Update training set as requested by target */
+               if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
+                       DRM_ERROR("failed to update link training\n");
+                       break;
+               }
                ++tries;
        }
 
@@ -2482,7 +2639,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
 
 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
 {
-       intel_dp_set_link_train(intel_dp, intel_dp->DP,
+       intel_dp_set_link_train(intel_dp, &intel_dp->DP,
                                DP_TRAINING_PATTERN_DISABLE);
 }
 
@@ -2569,6 +2726,10 @@ intel_dp_link_down(struct intel_dp *intel_dp)
 static bool
 intel_dp_get_dpcd(struct intel_dp *intel_dp)
 {
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
        char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
 
        if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
@@ -2584,11 +2745,16 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
 
        /* Check if the panel supports PSR */
        memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
-       intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
-                                      intel_dp->psr_dpcd,
-                                      sizeof(intel_dp->psr_dpcd));
-       if (is_edp_psr(intel_dp))
-               DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+       if (is_edp(intel_dp)) {
+               intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
+                                              intel_dp->psr_dpcd,
+                                              sizeof(intel_dp->psr_dpcd));
+               if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
+                       dev_priv->psr.sink_support = true;
+                       DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
+               }
+       }
+
        if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
              DP_DWN_STRM_PORT_PRESENT))
                return true; /* native DP sink */
@@ -2708,7 +2874,6 @@ static enum drm_connector_status
 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
 {
        uint8_t *dpcd = intel_dp->dpcd;
-       bool hpd;
        uint8_t type;
 
        if (!intel_dp_get_dpcd(intel_dp))
@@ -2719,8 +2884,8 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
                return connector_status_connected;
 
        /* If we're HPD-aware, SINK_COUNT changes dynamically */
-       hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
-       if (hpd) {
+       if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+           intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
                uint8_t reg;
                if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
                                                    &reg, 1))
@@ -2734,9 +2899,18 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
                return connector_status_connected;
 
        /* Well we tried, say unknown for unreliable port types */
-       type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
-       if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
-               return connector_status_unknown;
+       if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+               type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
+               if (type == DP_DS_PORT_TYPE_VGA ||
+                   type == DP_DS_PORT_TYPE_NON_EDID)
+                       return connector_status_unknown;
+       } else {
+               type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+                       DP_DWN_STRM_PORT_TYPE_MASK;
+               if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
+                   type == DP_DWN_STRM_PORT_TYPE_OTHER)
+                       return connector_status_unknown;
+       }
 
        /* Anything else is out of spec, warn and ignore */
        DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
@@ -2810,19 +2984,11 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
 
        /* use cached edid if we have one */
        if (intel_connector->edid) {
-               struct edid *edid;
-               int size;
-
                /* invalid edid */
                if (IS_ERR(intel_connector->edid))
                        return NULL;
 
-               size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
-               edid = kmemdup(intel_connector->edid, size, GFP_KERNEL);
-               if (!edid)
-                       return NULL;
-
-               return edid;
+               return drm_edid_duplicate(intel_connector->edid);
        }
 
        return drm_get_edid(connector, adapter);
@@ -3030,7 +3196,6 @@ intel_dp_connector_destroy(struct drm_connector *connector)
        if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
                intel_panel_fini(&intel_connector->panel);
 
-       drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
        kfree(connector);
 }
@@ -3101,7 +3266,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
 bool intel_dpd_is_edp(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct child_device_config *p_child;
+       union child_device_config *p_child;
        int i;
 
        if (!dev_priv->vbt.child_dev_num)
@@ -3110,8 +3275,8 @@ bool intel_dpd_is_edp(struct drm_device *dev)
        for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
                p_child = dev_priv->vbt.child_dev + i;
 
-               if (p_child->dvo_port == PORT_IDPD &&
-                   p_child->device_type == DEVICE_TYPE_eDP)
+               if (p_child->common.dvo_port == PORT_IDPD &&
+                   p_child->common.device_type == DEVICE_TYPE_eDP)
                        return true;
        }
        return false;
@@ -3144,24 +3309,26 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct edp_power_seq cur, vbt, spec, final;
        u32 pp_on, pp_off, pp_div, pp;
-       int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg;
+       int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
 
        if (HAS_PCH_SPLIT(dev)) {
-               pp_control_reg = PCH_PP_CONTROL;
+               pp_ctrl_reg = PCH_PP_CONTROL;
                pp_on_reg = PCH_PP_ON_DELAYS;
                pp_off_reg = PCH_PP_OFF_DELAYS;
                pp_div_reg = PCH_PP_DIVISOR;
        } else {
-               pp_control_reg = PIPEA_PP_CONTROL;
-               pp_on_reg = PIPEA_PP_ON_DELAYS;
-               pp_off_reg = PIPEA_PP_OFF_DELAYS;
-               pp_div_reg = PIPEA_PP_DIVISOR;
+               enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+               pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
+               pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+               pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
+               pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
        }
 
        /* Workaround: Need to write PP_CONTROL with the unlock key as
         * the very first thing. */
        pp = ironlake_get_pp_control(intel_dp);
-       I915_WRITE(pp_control_reg, pp);
+       I915_WRITE(pp_ctrl_reg, pp);
 
        pp_on = I915_READ(pp_on_reg);
        pp_off = I915_READ(pp_off_reg);
@@ -3249,9 +3416,11 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
                pp_off_reg = PCH_PP_OFF_DELAYS;
                pp_div_reg = PCH_PP_DIVISOR;
        } else {
-               pp_on_reg = PIPEA_PP_ON_DELAYS;
-               pp_off_reg = PIPEA_PP_OFF_DELAYS;
-               pp_div_reg = PIPEA_PP_DIVISOR;
+               enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+               pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
+               pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
+               pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
        }
 
        /* And finally store the new values in the power sequencer. */
@@ -3268,12 +3437,15 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
        /* Haswell doesn't have any port selection bits for the panel
         * power sequencer any more. */
        if (IS_VALLEYVIEW(dev)) {
-               port_sel = I915_READ(pp_on_reg) & 0xc0000000;
+               if (dp_to_dig_port(intel_dp)->port == PORT_B)
+                       port_sel = PANEL_PORT_SELECT_DPB_VLV;
+               else
+                       port_sel = PANEL_PORT_SELECT_DPC_VLV;
        } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
                if (dp_to_dig_port(intel_dp)->port == PORT_A)
-                       port_sel = PANEL_POWER_PORT_DP_A;
+                       port_sel = PANEL_PORT_SELECT_DPA;
                else
-                       port_sel = PANEL_POWER_PORT_DP_D;
+                       port_sel = PANEL_PORT_SELECT_DPD;
        }
 
        pp_on |= port_sel;
@@ -3516,11 +3688,11 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
        struct drm_encoder *encoder;
        struct intel_connector *intel_connector;
 
-       intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+       intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
        if (!intel_dig_port)
                return;
 
-       intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+       intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
        if (!intel_connector) {
                kfree(intel_dig_port);
                return;
@@ -3539,12 +3711,12 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
        intel_encoder->get_hw_state = intel_dp_get_hw_state;
        intel_encoder->get_config = intel_dp_get_config;
        if (IS_VALLEYVIEW(dev)) {
-               intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
+               intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
                intel_encoder->pre_enable = vlv_pre_enable_dp;
                intel_encoder->enable = vlv_enable_dp;
        } else {
-               intel_encoder->pre_enable = intel_pre_enable_dp;
-               intel_encoder->enable = intel_enable_dp;
+               intel_encoder->pre_enable = g4x_pre_enable_dp;
+               intel_encoder->enable = g4x_enable_dp;
        }
 
        intel_dig_port->port = port;
index 28cae80495e2b1e9d1f4fe0d089bbd63fd7a11b7..af1553ca0f4e8eb8c7fe86ddc0d4e3e380850184 100644 (file)
@@ -77,7 +77,6 @@
 /* the i915, i945 have a single sDVO i2c bus - which is different */
 #define MAX_OUTPUTS 6
 /* maximum connectors per crtcs in the mode set */
-#define INTELFB_CONN_LIMIT 4
 
 #define INTEL_I2C_BUS_DVO 1
 #define INTEL_I2C_BUS_SDVO 2
 #define INTEL_OUTPUT_HDMI 6
 #define INTEL_OUTPUT_DISPLAYPORT 7
 #define INTEL_OUTPUT_EDP 8
-#define INTEL_OUTPUT_UNKNOWN 9
+#define INTEL_OUTPUT_DSI 9
+#define INTEL_OUTPUT_UNKNOWN 10
 
 #define INTEL_DVO_CHIP_NONE 0
 #define INTEL_DVO_CHIP_LVDS 1
 #define INTEL_DVO_CHIP_TMDS 2
 #define INTEL_DVO_CHIP_TVOUT 4
 
+#define INTEL_DSI_COMMAND_MODE 0
+#define INTEL_DSI_VIDEO_MODE   1
+
 struct intel_framebuffer {
        struct drm_framebuffer base;
        struct drm_i915_gem_object *obj;
@@ -207,8 +210,21 @@ struct intel_crtc_config {
 #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
        unsigned long quirks;
 
+       /* User requested mode, only valid as a starting point to
+        * compute adjusted_mode, except in the case of (S)DVO where
+        * it's also for the output timings of the (S)DVO chip.
+        * adjusted_mode will then correspond to the S(DVO) chip's
+        * preferred input timings. */
        struct drm_display_mode requested_mode;
+       /* Actual pipe timings ie. what we program into the pipe timing
+        * registers. adjusted_mode.crtc_clock is the pipe pixel clock. */
        struct drm_display_mode adjusted_mode;
+
+       /* Pipe source size (ie. panel fitter input size)
+        * All planes will be positioned inside this space,
+        * and get clipped at the edges. */
+       int pipe_src_w, pipe_src_h;
+
        /* Whether to set up the PCH/FDI. Note that we never allow sharing
         * between pch encoders and cpu encoders. */
        bool has_pch_encoder;
@@ -262,7 +278,8 @@ struct intel_crtc_config {
 
        /*
         * Frequence the dpll for the port should run at. Differs from the
-        * adjusted dotclock e.g. for DP or 12bpc hdmi mode.
+        * adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
+        * already multiplied by pixel_multiplier.
         */
        int port_clock;
 
@@ -288,6 +305,14 @@ struct intel_crtc_config {
        struct intel_link_m_n fdi_m_n;
 
        bool ips_enabled;
+
+       bool double_wide;
+};
+
+struct intel_pipe_wm {
+       struct intel_wm_level wm[5];
+       uint32_t linetime;
+       bool fbc_wm_enabled;
 };
 
 struct intel_crtc {
@@ -301,8 +326,9 @@ struct intel_crtc {
         * some outputs connected to this crtc.
         */
        bool active;
+       unsigned long enabled_power_domains;
        bool eld_vld;
-       bool primary_disabled; /* is the crtc obscured by a plane? */
+       bool primary_enabled; /* is the primary plane (partially) visible? */
        bool lowfreq_avail;
        struct intel_overlay *overlay;
        struct intel_unpin_work *unpin_work;
@@ -330,6 +356,12 @@ struct intel_crtc {
        /* Access to these should be protected by dev_priv->irq_lock. */
        bool cpu_fifo_underrun_disabled;
        bool pch_fifo_underrun_disabled;
+
+       /* per-pipe watermark state */
+       struct {
+               /* watermarks currently being used  */
+               struct intel_pipe_wm active;
+       } wm;
 };
 
 struct intel_plane_wm_parameters {
@@ -417,13 +449,11 @@ struct intel_hdmi {
 };
 
 #define DP_MAX_DOWNSTREAM_PORTS                0x10
-#define DP_LINK_CONFIGURATION_SIZE     9
 
 struct intel_dp {
        uint32_t output_reg;
        uint32_t aux_ch_ctl_reg;
        uint32_t DP;
-       uint8_t  link_configuration[DP_LINK_CONFIGURATION_SIZE];
        bool has_audio;
        enum hdmi_force_audio force_audio;
        uint32_t color_range;
@@ -495,80 +525,6 @@ struct intel_unpin_work {
        bool enable_stall_check;
 };
 
-int intel_pch_rawclk(struct drm_device *dev);
-
-int intel_connector_update_modes(struct drm_connector *connector,
-                               struct edid *edid);
-int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-
-extern void intel_attach_force_audio_property(struct drm_connector *connector);
-extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
-
-extern bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
-extern void intel_crt_init(struct drm_device *dev);
-extern void intel_hdmi_init(struct drm_device *dev,
-                           int hdmi_reg, enum port port);
-extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
-                                     struct intel_connector *intel_connector);
-extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
-extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
-                                     struct intel_crtc_config *pipe_config);
-extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
-                           bool is_sdvob);
-extern void intel_dvo_init(struct drm_device *dev);
-extern void intel_tv_init(struct drm_device *dev);
-extern void intel_mark_busy(struct drm_device *dev);
-extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
-                              struct intel_ring_buffer *ring);
-extern void intel_mark_idle(struct drm_device *dev);
-extern void intel_lvds_init(struct drm_device *dev);
-extern bool intel_is_dual_link_lvds(struct drm_device *dev);
-extern void intel_dp_init(struct drm_device *dev, int output_reg,
-                         enum port port);
-extern bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
-                                   struct intel_connector *intel_connector);
-extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
-extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
-extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
-extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
-extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
-extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
-extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
-extern bool intel_dp_compute_config(struct intel_encoder *encoder,
-                                   struct intel_crtc_config *pipe_config);
-extern bool intel_dpd_is_edp(struct drm_device *dev);
-extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
-extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
-extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
-extern int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
-extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
-                                     enum plane plane);
-
-/* intel_panel.c */
-extern int intel_panel_init(struct intel_panel *panel,
-                           struct drm_display_mode *fixed_mode);
-extern void intel_panel_fini(struct intel_panel *panel);
-
-extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
-                                  struct drm_display_mode *adjusted_mode);
-extern void intel_pch_panel_fitting(struct intel_crtc *crtc,
-                                   struct intel_crtc_config *pipe_config,
-                                   int fitting_mode);
-extern void intel_gmch_panel_fitting(struct intel_crtc *crtc,
-                                    struct intel_crtc_config *pipe_config,
-                                    int fitting_mode);
-extern void intel_panel_set_backlight(struct drm_device *dev,
-                                     u32 level, u32 max);
-extern int intel_panel_setup_backlight(struct drm_connector *connector);
-extern void intel_panel_enable_backlight(struct drm_device *dev,
-                                        enum pipe pipe);
-extern void intel_panel_disable_backlight(struct drm_device *dev);
-extern void intel_panel_destroy_backlight(struct drm_device *dev);
-extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
-
 struct intel_set_config {
        struct drm_encoder **save_connector_encoders;
        struct drm_crtc **save_encoder_crtcs;
@@ -577,18 +533,14 @@ struct intel_set_config {
        bool mode_changed;
 };
 
-extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
-extern void intel_crtc_load_lut(struct drm_crtc *crtc);
-extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
-extern void intel_encoder_destroy(struct drm_encoder *encoder);
-extern void intel_connector_dpms(struct drm_connector *, int mode);
-extern bool intel_connector_get_hw_state(struct intel_connector *connector);
-extern void intel_modeset_check_state(struct drm_device *dev);
-extern void intel_plane_restore(struct drm_plane *plane);
-extern void intel_plane_disable(struct drm_plane *plane);
-
+struct intel_load_detect_pipe {
+       struct drm_framebuffer *release_fb;
+       bool load_detect_temp;
+       int dpms_mode;
+};
 
-static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
+static inline struct intel_encoder *
+intel_attached_encoder(struct drm_connector *connector)
 {
        return to_intel_connector(connector)->encoder;
 }
@@ -616,73 +568,94 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
        return container_of(intel_hdmi, struct intel_digital_port, hdmi);
 }
 
+
+/* i915_irq.c */
+bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
+                                          enum pipe pipe, bool enable);
+bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
+                                          enum transcoder pch_transcoder,
+                                          bool enable);
+void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void hsw_pc8_disable_interrupts(struct drm_device *dev);
+void hsw_pc8_restore_interrupts(struct drm_device *dev);
+
+
+/* intel_crt.c */
+void intel_crt_init(struct drm_device *dev);
+
+
+/* intel_ddi.c */
+void intel_prepare_ddi(struct drm_device *dev);
+void hsw_fdi_link_train(struct drm_crtc *crtc);
+void intel_ddi_init(struct drm_device *dev, enum port port);
+enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
+int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
+void intel_ddi_pll_init(struct drm_device *dev);
+void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
+void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
+                                      enum transcoder cpu_transcoder);
+void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
+void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
+void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
+bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
+void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
+void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+void intel_ddi_fdi_disable(struct drm_crtc *crtc);
+void intel_ddi_get_config(struct intel_encoder *encoder,
+                         struct intel_crtc_config *pipe_config);
+
+
+/* intel_display.c */
+int intel_pch_rawclk(struct drm_device *dev);
+void intel_mark_busy(struct drm_device *dev);
+void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
+                       struct intel_ring_buffer *ring);
+void intel_mark_idle(struct drm_device *dev);
+void intel_crtc_restore_mode(struct drm_crtc *crtc);
+void intel_crtc_update_dpms(struct drm_crtc *crtc);
+void intel_encoder_destroy(struct drm_encoder *encoder);
+void intel_connector_dpms(struct drm_connector *, int mode);
+bool intel_connector_get_hw_state(struct intel_connector *connector);
+void intel_modeset_check_state(struct drm_device *dev);
 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
                                struct intel_digital_port *port);
-
-extern void intel_connector_attach_encoder(struct intel_connector *connector,
-                                          struct intel_encoder *encoder);
-extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
-
-extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
-                                                   struct drm_crtc *crtc);
+void intel_connector_attach_encoder(struct intel_connector *connector,
+                                   struct intel_encoder *encoder);
+struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
+struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
+                                            struct drm_crtc *crtc);
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
-extern enum transcoder
-intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
-                            enum pipe pipe);
-extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
-extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
-extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
-extern void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
-
-struct intel_load_detect_pipe {
-       struct drm_framebuffer *release_fb;
-       bool load_detect_temp;
-       int dpms_mode;
-};
-extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
-                                      struct drm_display_mode *mode,
-                                      struct intel_load_detect_pipe *old);
-extern void intel_release_load_detect_pipe(struct drm_connector *connector,
-                                          struct intel_load_detect_pipe *old);
-
-extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
-                                   u16 blue, int regno);
-extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
-                                   u16 *blue, int regno);
-
-extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
-                                     struct drm_i915_gem_object *obj,
-                                     struct intel_ring_buffer *pipelined);
-extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
-
-extern int intel_framebuffer_init(struct drm_device *dev,
-                                 struct intel_framebuffer *ifb,
-                                 struct drm_mode_fb_cmd2 *mode_cmd,
-                                 struct drm_i915_gem_object *obj);
-extern void intel_framebuffer_fini(struct intel_framebuffer *fb);
-extern int intel_fbdev_init(struct drm_device *dev);
-extern void intel_fbdev_initial_config(struct drm_device *dev);
-extern void intel_fbdev_fini(struct drm_device *dev);
-extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
-extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
-extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
-extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
-
-extern void intel_setup_overlay(struct drm_device *dev);
-extern void intel_cleanup_overlay(struct drm_device *dev);
-extern int intel_overlay_switch_off(struct intel_overlay *overlay);
-extern int intel_overlay_put_image(struct drm_device *dev, void *data,
-                                  struct drm_file *file_priv);
-extern int intel_overlay_attrs(struct drm_device *dev, void *data,
-                              struct drm_file *file_priv);
-
-extern void intel_fb_output_poll_changed(struct drm_device *dev);
-extern void intel_fb_restore_mode(struct drm_device *dev);
-
-struct intel_shared_dpll *
-intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
-
+enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
+                                            enum pipe pipe);
+void intel_wait_for_vblank(struct drm_device *dev, int pipe);
+void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
+int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
+void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port);
+bool intel_get_load_detect_pipe(struct drm_connector *connector,
+                               struct drm_display_mode *mode,
+                               struct intel_load_detect_pipe *old);
+void intel_release_load_detect_pipe(struct drm_connector *connector,
+                                   struct intel_load_detect_pipe *old);
+int intel_pin_and_fence_fb_obj(struct drm_device *dev,
+                              struct drm_i915_gem_object *obj,
+                              struct intel_ring_buffer *pipelined);
+void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
+int intel_framebuffer_init(struct drm_device *dev,
+                          struct intel_framebuffer *ifb,
+                          struct drm_mode_fb_cmd2 *mode_cmd,
+                          struct drm_i915_gem_object *obj);
+void intel_framebuffer_fini(struct intel_framebuffer *fb);
+void intel_prepare_page_flip(struct drm_device *dev, int plane);
+void intel_finish_page_flip(struct drm_device *dev, int pipe);
+void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
+struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
 void assert_shared_dpll(struct drm_i915_private *dev_priv,
                        struct intel_shared_dpll *pll,
                        bool state);
@@ -696,103 +669,197 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
                       enum pipe pipe, bool state);
 #define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
 #define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
-extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
-                       bool state);
+void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
 #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
 #define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+void intel_write_eld(struct drm_encoder *encoder,
+                    struct drm_display_mode *mode);
+unsigned long intel_gen4_compute_page_offset(int *x, int *y,
+                                            unsigned int tiling_mode,
+                                            unsigned int bpp,
+                                            unsigned int pitch);
+void intel_display_handle_reset(struct drm_device *dev);
+void hsw_enable_pc8_work(struct work_struct *__work);
+void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
+void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
+void intel_dp_get_m_n(struct intel_crtc *crtc,
+                     struct intel_crtc_config *pipe_config);
+int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
+void
+ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
+                               int dotclock);
+bool intel_crtc_active(struct drm_crtc *crtc);
+void i915_disable_vga_mem(struct drm_device *dev);
+void hsw_enable_ips(struct intel_crtc *crtc);
+void hsw_disable_ips(struct intel_crtc *crtc);
+
+
+/* intel_dp.c */
+void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
+bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+                            struct intel_connector *intel_connector);
+void intel_dp_start_link_train(struct intel_dp *intel_dp);
+void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+void intel_dp_stop_link_train(struct intel_dp *intel_dp);
+void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+void intel_dp_check_link_status(struct intel_dp *intel_dp);
+bool intel_dp_compute_config(struct intel_encoder *encoder,
+                            struct intel_crtc_config *pipe_config);
+bool intel_dpd_is_edp(struct drm_device *dev);
+void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
+void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
+void ironlake_edp_panel_on(struct intel_dp *intel_dp);
+void ironlake_edp_panel_off(struct intel_dp *intel_dp);
+void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
+void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
+void intel_edp_psr_enable(struct intel_dp *intel_dp);
+void intel_edp_psr_disable(struct intel_dp *intel_dp);
+void intel_edp_psr_update(struct drm_device *dev);
+
+
+/* intel_dsi.c */
+bool intel_dsi_init(struct drm_device *dev);
+
+
+/* intel_dvo.c */
+void intel_dvo_init(struct drm_device *dev);
+
+
+/* legacy fbdev emulation in intel_fbdev.c */
+#ifdef CONFIG_DRM_I915_FBDEV
+extern int intel_fbdev_init(struct drm_device *dev);
+extern void intel_fbdev_initial_config(struct drm_device *dev);
+extern void intel_fbdev_fini(struct drm_device *dev);
+extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
+extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
+extern void intel_fbdev_restore_mode(struct drm_device *dev);
+#else
+static inline int intel_fbdev_init(struct drm_device *dev)
+{
+       return 0;
+}
 
-extern void intel_init_clock_gating(struct drm_device *dev);
-extern void intel_suspend_hw(struct drm_device *dev);
-extern void intel_write_eld(struct drm_encoder *encoder,
-                           struct drm_display_mode *mode);
-extern void intel_prepare_ddi(struct drm_device *dev);
-extern void hsw_fdi_link_train(struct drm_crtc *crtc);
-extern void intel_ddi_init(struct drm_device *dev, enum port port);
-
-/* For use by IVB LP watermark workaround in intel_sprite.c */
-extern void intel_update_watermarks(struct drm_device *dev);
-extern void intel_update_sprite_watermarks(struct drm_plane *plane,
-                                          struct drm_crtc *crtc,
-                                          uint32_t sprite_width, int pixel_size,
-                                          bool enabled, bool scaled);
-
-extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
-                                                   unsigned int tiling_mode,
-                                                   unsigned int bpp,
-                                                   unsigned int pitch);
-
-extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
-                                    struct drm_file *file_priv);
-extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
-                                    struct drm_file *file_priv);
-
-/* Power-related functions, located in intel_pm.c */
-extern void intel_init_pm(struct drm_device *dev);
-/* FBC */
-extern bool intel_fbc_enabled(struct drm_device *dev);
-extern void intel_update_fbc(struct drm_device *dev);
-/* IPS */
-extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
-extern void intel_gpu_ips_teardown(void);
-
-/* Power well */
-extern int i915_init_power_well(struct drm_device *dev);
-extern void i915_remove_power_well(struct drm_device *dev);
-
-extern bool intel_display_power_enabled(struct drm_device *dev,
-                                       enum intel_display_power_domain domain);
-extern void intel_init_power_well(struct drm_device *dev);
-extern void intel_set_power_well(struct drm_device *dev, bool enable);
-extern void intel_enable_gt_powersave(struct drm_device *dev);
-extern void intel_disable_gt_powersave(struct drm_device *dev);
-extern void ironlake_teardown_rc6(struct drm_device *dev);
+static inline void intel_fbdev_initial_config(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_fini(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state)
+{
+}
+
+static inline void intel_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+#endif
+
+/* intel_hdmi.c */
+void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
+void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+                              struct intel_connector *intel_connector);
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+bool intel_hdmi_compute_config(struct intel_encoder *encoder,
+                              struct intel_crtc_config *pipe_config);
+
+
+/* intel_lvds.c */
+void intel_lvds_init(struct drm_device *dev);
+bool intel_is_dual_link_lvds(struct drm_device *dev);
+
+
+/* intel_modes.c */
+int intel_connector_update_modes(struct drm_connector *connector,
+                                struct edid *edid);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+void intel_attach_force_audio_property(struct drm_connector *connector);
+void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+
+
+/* intel_overlay.c */
+void intel_setup_overlay(struct drm_device *dev);
+void intel_cleanup_overlay(struct drm_device *dev);
+int intel_overlay_switch_off(struct intel_overlay *overlay);
+int intel_overlay_put_image(struct drm_device *dev, void *data,
+                           struct drm_file *file_priv);
+int intel_overlay_attrs(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+
+
+/* intel_panel.c */
+int intel_panel_init(struct intel_panel *panel,
+                    struct drm_display_mode *fixed_mode);
+void intel_panel_fini(struct intel_panel *panel);
+void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
+                           struct drm_display_mode *adjusted_mode);
+void intel_pch_panel_fitting(struct intel_crtc *crtc,
+                            struct intel_crtc_config *pipe_config,
+                            int fitting_mode);
+void intel_gmch_panel_fitting(struct intel_crtc *crtc,
+                             struct intel_crtc_config *pipe_config,
+                             int fitting_mode);
+void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max);
+int intel_panel_setup_backlight(struct drm_connector *connector);
+void intel_panel_enable_backlight(struct drm_device *dev, enum pipe pipe);
+void intel_panel_disable_backlight(struct drm_device *dev);
+void intel_panel_destroy_backlight(struct drm_device *dev);
+enum drm_connector_status intel_panel_detect(struct drm_device *dev);
+
+
+/* intel_pm.c */
+void intel_init_clock_gating(struct drm_device *dev);
+void intel_suspend_hw(struct drm_device *dev);
+void intel_update_watermarks(struct drm_crtc *crtc);
+void intel_update_sprite_watermarks(struct drm_plane *plane,
+                                   struct drm_crtc *crtc,
+                                   uint32_t sprite_width, int pixel_size,
+                                   bool enabled, bool scaled);
+void intel_init_pm(struct drm_device *dev);
+bool intel_fbc_enabled(struct drm_device *dev);
+void intel_update_fbc(struct drm_device *dev);
+void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+void intel_gpu_ips_teardown(void);
+int i915_init_power_well(struct drm_device *dev);
+void i915_remove_power_well(struct drm_device *dev);
+bool intel_display_power_enabled(struct drm_device *dev,
+                                enum intel_display_power_domain domain);
+void intel_display_power_get(struct drm_device *dev,
+                            enum intel_display_power_domain domain);
+void intel_display_power_put(struct drm_device *dev,
+                            enum intel_display_power_domain domain);
+void intel_init_power_well(struct drm_device *dev);
+void intel_set_power_well(struct drm_device *dev, bool enable);
+void intel_enable_gt_powersave(struct drm_device *dev);
+void intel_disable_gt_powersave(struct drm_device *dev);
+void ironlake_teardown_rc6(struct drm_device *dev);
 void gen6_update_ring_freq(struct drm_device *dev);
+void gen6_rps_idle(struct drm_i915_private *dev_priv);
+void gen6_rps_boost(struct drm_i915_private *dev_priv);
+void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
+void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+void ilk_wm_get_hw_state(struct drm_device *dev);
+
+
+/* intel_sdvo.c */
+bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob);
+
+
+/* intel_sprite.c */
+int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
+void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
+                              enum plane plane);
+void intel_plane_restore(struct drm_plane *plane);
+void intel_plane_disable(struct drm_plane *plane);
+int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv);
+int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv);
+
 
-extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
-                                  enum pipe *pipe);
-extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
-extern void intel_ddi_pll_init(struct drm_device *dev);
-extern void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
-extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
-                                             enum transcoder cpu_transcoder);
-extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
-extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
-extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
-extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc);
-extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
-extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
-extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
-extern bool
-intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
-extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
-
-extern void intel_display_handle_reset(struct drm_device *dev);
-extern bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
-                                                 enum pipe pipe,
-                                                 bool enable);
-extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
-                                                enum transcoder pch_transcoder,
-                                                bool enable);
-
-extern void intel_edp_psr_enable(struct intel_dp *intel_dp);
-extern void intel_edp_psr_disable(struct intel_dp *intel_dp);
-extern void intel_edp_psr_update(struct drm_device *dev);
-extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
-                             bool switch_to_fclk, bool allow_power_down);
-extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
-extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv,
-                              uint32_t mask);
-extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv,
-                              uint32_t mask);
-extern void hsw_enable_pc8_work(struct work_struct *__work);
-extern void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
-extern void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
-extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
-extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
-extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
-extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
-extern void i915_disable_vga_mem(struct drm_device *dev);
+/* intel_tv.c */
+void intel_tv_init(struct drm_device *dev);
 
 #endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
new file mode 100644 (file)
index 0000000..d257b09
--- /dev/null
@@ -0,0 +1,620 @@
+/*
+ * Copyright Â© 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jani Nikula <jani.nikula@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include <drm/i915_drm.h>
+#include <linux/slab.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "intel_dsi_cmd.h"
+
+/* the sub-encoders aka panel drivers */
+static const struct intel_dsi_device intel_dsi_devices[] = {
+};
+
+
+static void vlv_cck_modify(struct drm_i915_private *dev_priv, u32 reg, u32 val,
+                          u32 mask)
+{
+       u32 tmp = vlv_cck_read(dev_priv, reg);
+       tmp &= ~mask;
+       tmp |= val;
+       vlv_cck_write(dev_priv, reg, tmp);
+}
+
+static void band_gap_wa(struct drm_i915_private *dev_priv)
+{
+       mutex_lock(&dev_priv->dpio_lock);
+
+       /* Enable bandgap fix in GOP driver */
+       vlv_cck_modify(dev_priv, 0x6D, 0x00010000, 0x00030000);
+       msleep(20);
+       vlv_cck_modify(dev_priv, 0x6E, 0x00010000, 0x00030000);
+       msleep(20);
+       vlv_cck_modify(dev_priv, 0x6F, 0x00010000, 0x00030000);
+       msleep(20);
+       vlv_cck_modify(dev_priv, 0x00, 0x00008000, 0x00008000);
+       msleep(20);
+       vlv_cck_modify(dev_priv, 0x00, 0x00000000, 0x00008000);
+       msleep(20);
+
+       /* Turn Display Trunk on */
+       vlv_cck_modify(dev_priv, 0x6B, 0x00020000, 0x00030000);
+       msleep(20);
+
+       vlv_cck_modify(dev_priv, 0x6C, 0x00020000, 0x00030000);
+       msleep(20);
+
+       vlv_cck_modify(dev_priv, 0x6D, 0x00020000, 0x00030000);
+       msleep(20);
+       vlv_cck_modify(dev_priv, 0x6E, 0x00020000, 0x00030000);
+       msleep(20);
+       vlv_cck_modify(dev_priv, 0x6F, 0x00020000, 0x00030000);
+
+       mutex_unlock(&dev_priv->dpio_lock);
+
+       /* Need huge delay, otherwise clock is not stable */
+       msleep(100);
+}
+
+static struct intel_dsi *intel_attached_dsi(struct drm_connector *connector)
+{
+       return container_of(intel_attached_encoder(connector),
+                           struct intel_dsi, base);
+}
+
+static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
+{
+       return intel_dsi->dev.type == INTEL_DSI_VIDEO_MODE;
+}
+
+static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
+{
+       return intel_dsi->dev.type == INTEL_DSI_COMMAND_MODE;
+}
+
+static void intel_dsi_hot_plug(struct intel_encoder *encoder)
+{
+       DRM_DEBUG_KMS("\n");
+}
+
+static bool intel_dsi_compute_config(struct intel_encoder *encoder,
+                                    struct intel_crtc_config *config)
+{
+       struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
+                                                  base);
+       struct intel_connector *intel_connector = intel_dsi->attached_connector;
+       struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+       struct drm_display_mode *adjusted_mode = &config->adjusted_mode;
+       struct drm_display_mode *mode = &config->requested_mode;
+
+       DRM_DEBUG_KMS("\n");
+
+       if (fixed_mode)
+               intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+
+       if (intel_dsi->dev.dev_ops->mode_fixup)
+               return intel_dsi->dev.dev_ops->mode_fixup(&intel_dsi->dev,
+                                                         mode, adjusted_mode);
+
+       return true;
+}
+
+static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
+{
+       DRM_DEBUG_KMS("\n");
+
+       vlv_enable_dsi_pll(encoder);
+}
+
+static void intel_dsi_pre_enable(struct intel_encoder *encoder)
+{
+       DRM_DEBUG_KMS("\n");
+}
+
+static void intel_dsi_enable(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       int pipe = intel_crtc->pipe;
+       u32 temp;
+
+       DRM_DEBUG_KMS("\n");
+
+       temp = I915_READ(MIPI_DEVICE_READY(pipe));
+       if ((temp & DEVICE_READY) == 0) {
+               temp &= ~ULPS_STATE_MASK;
+               I915_WRITE(MIPI_DEVICE_READY(pipe), temp | DEVICE_READY);
+       } else if (temp & ULPS_STATE_MASK) {
+               temp &= ~ULPS_STATE_MASK;
+               I915_WRITE(MIPI_DEVICE_READY(pipe), temp | ULPS_STATE_EXIT);
+               /*
+                * We need to ensure that there is a minimum of 1 ms time
+                * available before clearing the UPLS exit state.
+                */
+               msleep(2);
+               I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
+       }
+
+       if (is_cmd_mode(intel_dsi))
+               I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(pipe), 8 * 4);
+
+       if (is_vid_mode(intel_dsi)) {
+               msleep(20); /* XXX */
+               dpi_send_cmd(intel_dsi, TURN_ON);
+               msleep(100);
+
+               /* assert ip_tg_enable signal */
+               temp = I915_READ(MIPI_PORT_CTRL(pipe));
+               I915_WRITE(MIPI_PORT_CTRL(pipe), temp | DPI_ENABLE);
+               POSTING_READ(MIPI_PORT_CTRL(pipe));
+       }
+
+       intel_dsi->dev.dev_ops->enable(&intel_dsi->dev);
+}
+
+static void intel_dsi_disable(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       int pipe = intel_crtc->pipe;
+       u32 temp;
+
+       DRM_DEBUG_KMS("\n");
+
+       intel_dsi->dev.dev_ops->disable(&intel_dsi->dev);
+
+       if (is_vid_mode(intel_dsi)) {
+               dpi_send_cmd(intel_dsi, SHUTDOWN);
+               msleep(10);
+
+               /* de-assert ip_tg_enable signal */
+               temp = I915_READ(MIPI_PORT_CTRL(pipe));
+               I915_WRITE(MIPI_PORT_CTRL(pipe), temp & ~DPI_ENABLE);
+               POSTING_READ(MIPI_PORT_CTRL(pipe));
+
+               msleep(2);
+       }
+
+       temp = I915_READ(MIPI_DEVICE_READY(pipe));
+       if (temp & DEVICE_READY) {
+               temp &= ~DEVICE_READY;
+               temp &= ~ULPS_STATE_MASK;
+               I915_WRITE(MIPI_DEVICE_READY(pipe), temp);
+       }
+}
+
+static void intel_dsi_post_disable(struct intel_encoder *encoder)
+{
+       DRM_DEBUG_KMS("\n");
+
+       vlv_disable_dsi_pll(encoder);
+}
+
+static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
+                                  enum pipe *pipe)
+{
+       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       u32 port, func;
+       enum pipe p;
+
+       DRM_DEBUG_KMS("\n");
+
+       /* XXX: this only works for one DSI output */
+       for (p = PIPE_A; p <= PIPE_B; p++) {
+               port = I915_READ(MIPI_PORT_CTRL(p));
+               func = I915_READ(MIPI_DSI_FUNC_PRG(p));
+
+               if ((port & DPI_ENABLE) || (func & CMD_MODE_DATA_WIDTH_MASK)) {
+                       if (I915_READ(MIPI_DEVICE_READY(p)) & DEVICE_READY) {
+                               *pipe = p;
+                               return true;
+                       }
+               }
+       }
+
+       return false;
+}
+
+static void intel_dsi_get_config(struct intel_encoder *encoder,
+                                struct intel_crtc_config *pipe_config)
+{
+       DRM_DEBUG_KMS("\n");
+
+       /* XXX: read flags, set to adjusted_mode */
+}
+
+static int intel_dsi_mode_valid(struct drm_connector *connector,
+                               struct drm_display_mode *mode)
+{
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+       struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
+
+       DRM_DEBUG_KMS("\n");
+
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
+               DRM_DEBUG_KMS("MODE_NO_DBLESCAN\n");
+               return MODE_NO_DBLESCAN;
+       }
+
+       if (fixed_mode) {
+               if (mode->hdisplay > fixed_mode->hdisplay)
+                       return MODE_PANEL;
+               if (mode->vdisplay > fixed_mode->vdisplay)
+                       return MODE_PANEL;
+       }
+
+       return intel_dsi->dev.dev_ops->mode_valid(&intel_dsi->dev, mode);
+}
+
+/* return txclkesc cycles in terms of divider and duration in us */
+static u16 txclkesc(u32 divider, unsigned int us)
+{
+       switch (divider) {
+       case ESCAPE_CLOCK_DIVIDER_1:
+       default:
+               return 20 * us;
+       case ESCAPE_CLOCK_DIVIDER_2:
+               return 10 * us;
+       case ESCAPE_CLOCK_DIVIDER_4:
+               return 5 * us;
+       }
+}
+
+/* return pixels in terms of txbyteclkhs */
+static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count)
+{
+       return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp, 8), lane_count);
+}
+
+static void set_dsi_timings(struct drm_encoder *encoder,
+                           const struct drm_display_mode *mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+       int pipe = intel_crtc->pipe;
+       unsigned int bpp = intel_crtc->config.pipe_bpp;
+       unsigned int lane_count = intel_dsi->lane_count;
+
+       u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
+
+       hactive = mode->hdisplay;
+       hfp = mode->hsync_start - mode->hdisplay;
+       hsync = mode->hsync_end - mode->hsync_start;
+       hbp = mode->htotal - mode->hsync_end;
+
+       vfp = mode->vsync_start - mode->vdisplay;
+       vsync = mode->vsync_end - mode->vsync_start;
+       vbp = mode->vtotal - mode->vsync_end;
+
+       /* horizontal values are in terms of high speed byte clock */
+       hactive = txbyteclkhs(hactive, bpp, lane_count);
+       hfp = txbyteclkhs(hfp, bpp, lane_count);
+       hsync = txbyteclkhs(hsync, bpp, lane_count);
+       hbp = txbyteclkhs(hbp, bpp, lane_count);
+
+       I915_WRITE(MIPI_HACTIVE_AREA_COUNT(pipe), hactive);
+       I915_WRITE(MIPI_HFP_COUNT(pipe), hfp);
+
+       /* meaningful for video mode non-burst sync pulse mode only, can be zero
+        * for non-burst sync events and burst modes */
+       I915_WRITE(MIPI_HSYNC_PADDING_COUNT(pipe), hsync);
+       I915_WRITE(MIPI_HBP_COUNT(pipe), hbp);
+
+       /* vertical values are in terms of lines */
+       I915_WRITE(MIPI_VFP_COUNT(pipe), vfp);
+       I915_WRITE(MIPI_VSYNC_PADDING_COUNT(pipe), vsync);
+       I915_WRITE(MIPI_VBP_COUNT(pipe), vbp);
+}
+
+static void intel_dsi_mode_set(struct intel_encoder *intel_encoder)
+{
+       struct drm_encoder *encoder = &intel_encoder->base;
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+       struct drm_display_mode *adjusted_mode =
+               &intel_crtc->config.adjusted_mode;
+       int pipe = intel_crtc->pipe;
+       unsigned int bpp = intel_crtc->config.pipe_bpp;
+       u32 val, tmp;
+
+       DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+
+       /* Update the DSI PLL */
+       vlv_enable_dsi_pll(intel_encoder);
+
+       /* XXX: Location of the call */
+       band_gap_wa(dev_priv);
+
+       /* escape clock divider, 20MHz, shared for A and C. device ready must be
+        * off when doing this! txclkesc? */
+       tmp = I915_READ(MIPI_CTRL(0));
+       tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
+       I915_WRITE(MIPI_CTRL(0), tmp | ESCAPE_CLOCK_DIVIDER_1);
+
+       /* read request priority is per pipe */
+       tmp = I915_READ(MIPI_CTRL(pipe));
+       tmp &= ~READ_REQUEST_PRIORITY_MASK;
+       I915_WRITE(MIPI_CTRL(pipe), tmp | READ_REQUEST_PRIORITY_HIGH);
+
+       /* XXX: why here, why like this? handling in irq handler?! */
+       I915_WRITE(MIPI_INTR_STAT(pipe), 0xffffffff);
+       I915_WRITE(MIPI_INTR_EN(pipe), 0xffffffff);
+
+       I915_WRITE(MIPI_DPHY_PARAM(pipe),
+                  0x3c << EXIT_ZERO_COUNT_SHIFT |
+                  0x1f << TRAIL_COUNT_SHIFT |
+                  0xc5 << CLK_ZERO_COUNT_SHIFT |
+                  0x1f << PREPARE_COUNT_SHIFT);
+
+       I915_WRITE(MIPI_DPI_RESOLUTION(pipe),
+                  adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
+                  adjusted_mode->hdisplay << HORIZONTAL_ADDRESS_SHIFT);
+
+       set_dsi_timings(encoder, adjusted_mode);
+
+       val = intel_dsi->lane_count << DATA_LANES_PRG_REG_SHIFT;
+       if (is_cmd_mode(intel_dsi)) {
+               val |= intel_dsi->channel << CMD_MODE_CHANNEL_NUMBER_SHIFT;
+               val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */
+       } else {
+               val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT;
+
+               /* XXX: cross-check bpp vs. pixel format? */
+               val |= intel_dsi->pixel_format;
+       }
+       I915_WRITE(MIPI_DSI_FUNC_PRG(pipe), val);
+
+       /* timeouts for recovery. one frame IIUC. if counter expires, EOT and
+        * stop state. */
+
+       /*
+        * In burst mode, value greater than one DPI line Time in byte clock
+        * (txbyteclkhs) To timeout this timer 1+ of the above said value is
+        * recommended.
+        *
+        * In non-burst mode, Value greater than one DPI frame time in byte
+        * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
+        * is recommended.
+        *
+        * In DBI only mode, value greater than one DBI frame time in byte
+        * clock(txbyteclkhs) To timeout this timer 1+ of the above said value
+        * is recommended.
+        */
+
+       if (is_vid_mode(intel_dsi) &&
+           intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
+               I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
+                          txbyteclkhs(adjusted_mode->htotal, bpp,
+                                      intel_dsi->lane_count) + 1);
+       } else {
+               I915_WRITE(MIPI_HS_TX_TIMEOUT(pipe),
+                          txbyteclkhs(adjusted_mode->vtotal *
+                                      adjusted_mode->htotal,
+                                      bpp, intel_dsi->lane_count) + 1);
+       }
+       I915_WRITE(MIPI_LP_RX_TIMEOUT(pipe), 8309); /* max */
+       I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(pipe), 0x14); /* max */
+       I915_WRITE(MIPI_DEVICE_RESET_TIMER(pipe), 0xffff); /* max */
+
+       /* dphy stuff */
+
+       /* in terms of low power clock */
+       I915_WRITE(MIPI_INIT_COUNT(pipe), txclkesc(ESCAPE_CLOCK_DIVIDER_1, 100));
+
+       /* recovery disables */
+       I915_WRITE(MIPI_EOT_DISABLE(pipe), intel_dsi->eot_disable);
+
+       /* in terms of txbyteclkhs. actual high to low switch +
+        * MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
+        *
+        * XXX: write MIPI_STOP_STATE_STALL?
+        */
+       I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(pipe), 0x46);
+
+       /* XXX: low power clock equivalence in terms of byte clock. the number
+        * of byte clocks occupied in one low power clock. based on txbyteclkhs
+        * and txclkesc. txclkesc time / txbyteclk time * (105 +
+        * MIPI_STOP_STATE_STALL) / 105.???
+        */
+       I915_WRITE(MIPI_LP_BYTECLK(pipe), 4);
+
+       /* the bw essential for transmitting 16 long packets containing 252
+        * bytes meant for dcs write memory command is programmed in this
+        * register in terms of byte clocks. based on dsi transfer rate and the
+        * number of lanes configured the time taken to transmit 16 long packets
+        * in a dsi stream varies. */
+       I915_WRITE(MIPI_DBI_BW_CTRL(pipe), 0x820);
+
+       I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(pipe),
+                  0xa << LP_HS_SSW_CNT_SHIFT |
+                  0x14 << HS_LP_PWR_SW_CNT_SHIFT);
+
+       if (is_vid_mode(intel_dsi))
+               I915_WRITE(MIPI_VIDEO_MODE_FORMAT(pipe),
+                          intel_dsi->video_mode_format);
+}
+
+static enum drm_connector_status
+intel_dsi_detect(struct drm_connector *connector, bool force)
+{
+       struct intel_dsi *intel_dsi = intel_attached_dsi(connector);
+       DRM_DEBUG_KMS("\n");
+       return intel_dsi->dev.dev_ops->detect(&intel_dsi->dev);
+}
+
+static int intel_dsi_get_modes(struct drm_connector *connector)
+{
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct drm_display_mode *mode;
+
+       DRM_DEBUG_KMS("\n");
+
+       if (!intel_connector->panel.fixed_mode) {
+               DRM_DEBUG_KMS("no fixed mode\n");
+               return 0;
+       }
+
+       mode = drm_mode_duplicate(connector->dev,
+                                 intel_connector->panel.fixed_mode);
+       if (!mode) {
+               DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
+               return 0;
+       }
+
+       drm_mode_probed_add(connector, mode);
+       return 1;
+}
+
+static void intel_dsi_destroy(struct drm_connector *connector)
+{
+       struct intel_connector *intel_connector = to_intel_connector(connector);
+
+       DRM_DEBUG_KMS("\n");
+       intel_panel_fini(&intel_connector->panel);
+       drm_connector_cleanup(connector);
+       kfree(connector);
+}
+
+static const struct drm_encoder_funcs intel_dsi_funcs = {
+       .destroy = intel_encoder_destroy,
+};
+
+static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
+       .get_modes = intel_dsi_get_modes,
+       .mode_valid = intel_dsi_mode_valid,
+       .best_encoder = intel_best_encoder,
+};
+
+static const struct drm_connector_funcs intel_dsi_connector_funcs = {
+       .dpms = intel_connector_dpms,
+       .detect = intel_dsi_detect,
+       .destroy = intel_dsi_destroy,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+};
+
+bool intel_dsi_init(struct drm_device *dev)
+{
+       struct intel_dsi *intel_dsi;
+       struct intel_encoder *intel_encoder;
+       struct drm_encoder *encoder;
+       struct intel_connector *intel_connector;
+       struct drm_connector *connector;
+       struct drm_display_mode *fixed_mode = NULL;
+       const struct intel_dsi_device *dsi;
+       unsigned int i;
+
+       DRM_DEBUG_KMS("\n");
+
+       intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
+       if (!intel_dsi)
+               return false;
+
+       intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
+       if (!intel_connector) {
+               kfree(intel_dsi);
+               return false;
+       }
+
+       intel_encoder = &intel_dsi->base;
+       encoder = &intel_encoder->base;
+       intel_dsi->attached_connector = intel_connector;
+
+       connector = &intel_connector->base;
+
+       drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI);
+
+       /* XXX: very likely not all of these are needed */
+       intel_encoder->hot_plug = intel_dsi_hot_plug;
+       intel_encoder->compute_config = intel_dsi_compute_config;
+       intel_encoder->pre_pll_enable = intel_dsi_pre_pll_enable;
+       intel_encoder->pre_enable = intel_dsi_pre_enable;
+       intel_encoder->enable = intel_dsi_enable;
+       intel_encoder->mode_set = intel_dsi_mode_set;
+       intel_encoder->disable = intel_dsi_disable;
+       intel_encoder->post_disable = intel_dsi_post_disable;
+       intel_encoder->get_hw_state = intel_dsi_get_hw_state;
+       intel_encoder->get_config = intel_dsi_get_config;
+
+       intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+       for (i = 0; i < ARRAY_SIZE(intel_dsi_devices); i++) {
+               dsi = &intel_dsi_devices[i];
+               intel_dsi->dev = *dsi;
+
+               if (dsi->dev_ops->init(&intel_dsi->dev))
+                       break;
+       }
+
+       if (i == ARRAY_SIZE(intel_dsi_devices)) {
+               DRM_DEBUG_KMS("no device found\n");
+               goto err;
+       }
+
+       intel_encoder->type = INTEL_OUTPUT_DSI;
+       intel_encoder->crtc_mask = (1 << 0); /* XXX */
+
+       intel_encoder->cloneable = false;
+       drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
+                          DRM_MODE_CONNECTOR_DSI);
+
+       drm_connector_helper_add(connector, &intel_dsi_connector_helper_funcs);
+
+       connector->display_info.subpixel_order = SubPixelHorizontalRGB; /*XXX*/
+       connector->interlace_allowed = false;
+       connector->doublescan_allowed = false;
+
+       intel_connector_attach_encoder(intel_connector, intel_encoder);
+
+       drm_sysfs_connector_add(connector);
+
+       fixed_mode = dsi->dev_ops->get_modes(&intel_dsi->dev);
+       if (!fixed_mode) {
+               DRM_DEBUG_KMS("no fixed mode\n");
+               goto err;
+       }
+
+       fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+       intel_panel_init(&intel_connector->panel, fixed_mode);
+
+       return true;
+
+err:
+       drm_encoder_cleanup(&intel_encoder->base);
+       kfree(intel_dsi);
+       kfree(intel_connector);
+
+       return false;
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
new file mode 100644 (file)
index 0000000..c7765f3
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Copyright Â© 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _INTEL_DSI_H
+#define _INTEL_DSI_H
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "intel_drv.h"
+
+struct intel_dsi_device {
+       unsigned int panel_id;
+       const char *name;
+       int type;
+       const struct intel_dsi_dev_ops *dev_ops;
+       void *dev_priv;
+};
+
+struct intel_dsi_dev_ops {
+       bool (*init)(struct intel_dsi_device *dsi);
+
+       /* This callback must be able to assume DSI commands can be sent */
+       void (*enable)(struct intel_dsi_device *dsi);
+
+       /* This callback must be able to assume DSI commands can be sent */
+       void (*disable)(struct intel_dsi_device *dsi);
+
+       int (*mode_valid)(struct intel_dsi_device *dsi,
+                         struct drm_display_mode *mode);
+
+       bool (*mode_fixup)(struct intel_dsi_device *dsi,
+                          const struct drm_display_mode *mode,
+                          struct drm_display_mode *adjusted_mode);
+
+       void (*mode_set)(struct intel_dsi_device *dsi,
+                        struct drm_display_mode *mode,
+                        struct drm_display_mode *adjusted_mode);
+
+       enum drm_connector_status (*detect)(struct intel_dsi_device *dsi);
+
+       bool (*get_hw_state)(struct intel_dsi_device *dev);
+
+       struct drm_display_mode *(*get_modes)(struct intel_dsi_device *dsi);
+
+       void (*destroy) (struct intel_dsi_device *dsi);
+};
+
+struct intel_dsi {
+       struct intel_encoder base;
+
+       struct intel_dsi_device dev;
+
+       struct intel_connector *attached_connector;
+
+       /* if true, use HS mode, otherwise LP */
+       bool hs;
+
+       /* virtual channel */
+       int channel;
+
+       /* number of DSI lanes */
+       unsigned int lane_count;
+
+       /* video mode pixel format for MIPI_DSI_FUNC_PRG register */
+       u32 pixel_format;
+
+       /* video mode format for MIPI_VIDEO_MODE_FORMAT register */
+       u32 video_mode_format;
+
+       /* eot for MIPI_EOT_DISABLE register */
+       u32 eot_disable;
+};
+
+static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
+{
+       return container_of(encoder, struct intel_dsi, base.base);
+}
+
+extern void vlv_enable_dsi_pll(struct intel_encoder *encoder);
+extern void vlv_disable_dsi_pll(struct intel_encoder *encoder);
+
+#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
new file mode 100644 (file)
index 0000000..7c40f98
--- /dev/null
@@ -0,0 +1,427 @@
+/*
+ * Copyright Â© 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jani Nikula <jani.nikula@intel.com>
+ */
+
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <video/mipi_display.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "intel_dsi_cmd.h"
+
+/*
+ * XXX: MIPI_DATA_ADDRESS, MIPI_DATA_LENGTH, MIPI_COMMAND_LENGTH, and
+ * MIPI_COMMAND_ADDRESS registers.
+ *
+ * Apparently these registers provide a MIPI adapter level way to send (lots of)
+ * commands and data to the receiver, without having to write the commands and
+ * data to MIPI_{HS,LP}_GEN_{CTRL,DATA} registers word by word.
+ *
+ * Presumably for anything other than MIPI_DCS_WRITE_MEMORY_START and
+ * MIPI_DCS_WRITE_MEMORY_CONTINUE (which are used to update the external
+ * framebuffer in command mode displays) these are just an optimization that can
+ * come later.
+ *
+ * For memory writes, these should probably be used for performance.
+ */
+
+static void print_stat(struct intel_dsi *intel_dsi)
+{
+       struct drm_encoder *encoder = &intel_dsi->base.base;
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       enum pipe pipe = intel_crtc->pipe;
+       u32 val;
+
+       val = I915_READ(MIPI_INTR_STAT(pipe));
+
+#define STAT_BIT(val, bit) (val) & (bit) ? " " #bit : ""
+       DRM_DEBUG_KMS("MIPI_INTR_STAT(%d) = %08x"
+                     "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
+                     "\n", pipe, val,
+                     STAT_BIT(val, TEARING_EFFECT),
+                     STAT_BIT(val, SPL_PKT_SENT_INTERRUPT),
+                     STAT_BIT(val, GEN_READ_DATA_AVAIL),
+                     STAT_BIT(val, LP_GENERIC_WR_FIFO_FULL),
+                     STAT_BIT(val, HS_GENERIC_WR_FIFO_FULL),
+                     STAT_BIT(val, RX_PROT_VIOLATION),
+                     STAT_BIT(val, RX_INVALID_TX_LENGTH),
+                     STAT_BIT(val, ACK_WITH_NO_ERROR),
+                     STAT_BIT(val, TURN_AROUND_ACK_TIMEOUT),
+                     STAT_BIT(val, LP_RX_TIMEOUT),
+                     STAT_BIT(val, HS_TX_TIMEOUT),
+                     STAT_BIT(val, DPI_FIFO_UNDERRUN),
+                     STAT_BIT(val, LOW_CONTENTION),
+                     STAT_BIT(val, HIGH_CONTENTION),
+                     STAT_BIT(val, TXDSI_VC_ID_INVALID),
+                     STAT_BIT(val, TXDSI_DATA_TYPE_NOT_RECOGNISED),
+                     STAT_BIT(val, TXCHECKSUM_ERROR),
+                     STAT_BIT(val, TXECC_MULTIBIT_ERROR),
+                     STAT_BIT(val, TXECC_SINGLE_BIT_ERROR),
+                     STAT_BIT(val, TXFALSE_CONTROL_ERROR),
+                     STAT_BIT(val, RXDSI_VC_ID_INVALID),
+                     STAT_BIT(val, RXDSI_DATA_TYPE_NOT_REGOGNISED),
+                     STAT_BIT(val, RXCHECKSUM_ERROR),
+                     STAT_BIT(val, RXECC_MULTIBIT_ERROR),
+                     STAT_BIT(val, RXECC_SINGLE_BIT_ERROR),
+                     STAT_BIT(val, RXFALSE_CONTROL_ERROR),
+                     STAT_BIT(val, RXHS_RECEIVE_TIMEOUT_ERROR),
+                     STAT_BIT(val, RX_LP_TX_SYNC_ERROR),
+                     STAT_BIT(val, RXEXCAPE_MODE_ENTRY_ERROR),
+                     STAT_BIT(val, RXEOT_SYNC_ERROR),
+                     STAT_BIT(val, RXSOT_SYNC_ERROR),
+                     STAT_BIT(val, RXSOT_ERROR));
+#undef STAT_BIT
+}
+
+enum dsi_type {
+       DSI_DCS,
+       DSI_GENERIC,
+};
+
+/* enable or disable command mode hs transmissions */
+void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable)
+{
+       struct drm_encoder *encoder = &intel_dsi->base.base;
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       enum pipe pipe = intel_crtc->pipe;
+       u32 temp;
+       u32 mask = DBI_FIFO_EMPTY;
+
+       if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
+               DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
+
+       temp = I915_READ(MIPI_HS_LP_DBI_ENABLE(pipe));
+       temp &= DBI_HS_LP_MODE_MASK;
+       I915_WRITE(MIPI_HS_LP_DBI_ENABLE(pipe), enable ? DBI_HS_MODE : DBI_LP_MODE);
+
+       intel_dsi->hs = enable;
+}
+
+static int dsi_vc_send_short(struct intel_dsi *intel_dsi, int channel,
+                            u8 data_type, u16 data)
+{
+       struct drm_encoder *encoder = &intel_dsi->base.base;
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       enum pipe pipe = intel_crtc->pipe;
+       u32 ctrl_reg;
+       u32 ctrl;
+       u32 mask;
+
+       DRM_DEBUG_KMS("channel %d, data_type %d, data %04x\n",
+                     channel, data_type, data);
+
+       if (intel_dsi->hs) {
+               ctrl_reg = MIPI_HS_GEN_CTRL(pipe);
+               mask = HS_CTRL_FIFO_FULL;
+       } else {
+               ctrl_reg = MIPI_LP_GEN_CTRL(pipe);
+               mask = LP_CTRL_FIFO_FULL;
+       }
+
+       if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50)) {
+               DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
+               print_stat(intel_dsi);
+       }
+
+       /*
+        * Note: This function is also used for long packets, with length passed
+        * as data, since SHORT_PACKET_PARAM_SHIFT ==
+        * LONG_PACKET_WORD_COUNT_SHIFT.
+        */
+       ctrl = data << SHORT_PACKET_PARAM_SHIFT |
+               channel << VIRTUAL_CHANNEL_SHIFT |
+               data_type << DATA_TYPE_SHIFT;
+
+       I915_WRITE(ctrl_reg, ctrl);
+
+       return 0;
+}
+
+static int dsi_vc_send_long(struct intel_dsi *intel_dsi, int channel,
+                           u8 data_type, const u8 *data, int len)
+{
+       struct drm_encoder *encoder = &intel_dsi->base.base;
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       enum pipe pipe = intel_crtc->pipe;
+       u32 data_reg;
+       int i, j, n;
+       u32 mask;
+
+       DRM_DEBUG_KMS("channel %d, data_type %d, len %04x\n",
+                     channel, data_type, len);
+
+       if (intel_dsi->hs) {
+               data_reg = MIPI_HS_GEN_DATA(pipe);
+               mask = HS_DATA_FIFO_FULL;
+       } else {
+               data_reg = MIPI_LP_GEN_DATA(pipe);
+               mask = LP_DATA_FIFO_FULL;
+       }
+
+       if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == 0, 50))
+               DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
+
+       for (i = 0; i < len; i += n) {
+               u32 val = 0;
+               n = min_t(int, len - i, 4);
+
+               for (j = 0; j < n; j++)
+                       val |= *data++ << 8 * j;
+
+               I915_WRITE(data_reg, val);
+               /* XXX: check for data fifo full, once that is set, write 4
+                * dwords, then wait for not set, then continue. */
+       }
+
+       return dsi_vc_send_short(intel_dsi, channel, data_type, len);
+}
+
+static int dsi_vc_write_common(struct intel_dsi *intel_dsi,
+                              int channel, const u8 *data, int len,
+                              enum dsi_type type)
+{
+       int ret;
+
+       if (len == 0) {
+               BUG_ON(type == DSI_GENERIC);
+               ret = dsi_vc_send_short(intel_dsi, channel,
+                                       MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM,
+                                       0);
+       } else if (len == 1) {
+               ret = dsi_vc_send_short(intel_dsi, channel,
+                                       type == DSI_GENERIC ?
+                                       MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
+                                       MIPI_DSI_DCS_SHORT_WRITE, data[0]);
+       } else if (len == 2) {
+               ret = dsi_vc_send_short(intel_dsi, channel,
+                                       type == DSI_GENERIC ?
+                                       MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
+                                       MIPI_DSI_DCS_SHORT_WRITE_PARAM,
+                                       (data[1] << 8) | data[0]);
+       } else {
+               ret = dsi_vc_send_long(intel_dsi, channel,
+                                      type == DSI_GENERIC ?
+                                      MIPI_DSI_GENERIC_LONG_WRITE :
+                                      MIPI_DSI_DCS_LONG_WRITE, data, len);
+       }
+
+       return ret;
+}
+
+int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
+                    const u8 *data, int len)
+{
+       return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_DCS);
+}
+
+int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
+                        const u8 *data, int len)
+{
+       return dsi_vc_write_common(intel_dsi, channel, data, len, DSI_GENERIC);
+}
+
+static int dsi_vc_dcs_send_read_request(struct intel_dsi *intel_dsi,
+                                       int channel, u8 dcs_cmd)
+{
+       return dsi_vc_send_short(intel_dsi, channel, MIPI_DSI_DCS_READ,
+                                dcs_cmd);
+}
+
+static int dsi_vc_generic_send_read_request(struct intel_dsi *intel_dsi,
+                                           int channel, u8 *reqdata,
+                                           int reqlen)
+{
+       u16 data;
+       u8 data_type;
+
+       switch (reqlen) {
+       case 0:
+               data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
+               data = 0;
+               break;
+       case 1:
+               data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
+               data = reqdata[0];
+               break;
+       case 2:
+               data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
+               data = (reqdata[1] << 8) | reqdata[0];
+               break;
+       default:
+               BUG();
+       }
+
+       return dsi_vc_send_short(intel_dsi, channel, data_type, data);
+}
+
+static int dsi_read_data_return(struct intel_dsi *intel_dsi,
+                               u8 *buf, int buflen)
+{
+       struct drm_encoder *encoder = &intel_dsi->base.base;
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       enum pipe pipe = intel_crtc->pipe;
+       int i, len = 0;
+       u32 data_reg, val;
+
+       if (intel_dsi->hs) {
+               data_reg = MIPI_HS_GEN_DATA(pipe);
+       } else {
+               data_reg = MIPI_LP_GEN_DATA(pipe);
+       }
+
+       while (len < buflen) {
+               val = I915_READ(data_reg);
+               for (i = 0; i < 4 && len < buflen; i++, len++)
+                       buf[len] = val >> 8 * i;
+       }
+
+       return len;
+}
+
+int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
+                   u8 *buf, int buflen)
+{
+       struct drm_encoder *encoder = &intel_dsi->base.base;
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       enum pipe pipe = intel_crtc->pipe;
+       u32 mask;
+       int ret;
+
+       /*
+        * XXX: should issue multiple read requests and reads if request is
+        * longer than MIPI_MAX_RETURN_PKT_SIZE
+        */
+
+       I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
+
+       ret = dsi_vc_dcs_send_read_request(intel_dsi, channel, dcs_cmd);
+       if (ret)
+               return ret;
+
+       mask = GEN_READ_DATA_AVAIL;
+       if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
+               DRM_ERROR("Timeout waiting for read data.\n");
+
+       ret = dsi_read_data_return(intel_dsi, buf, buflen);
+       if (ret < 0)
+               return ret;
+
+       if (ret != buflen)
+               return -EIO;
+
+       return 0;
+}
+
+int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
+                       u8 *reqdata, int reqlen, u8 *buf, int buflen)
+{
+       struct drm_encoder *encoder = &intel_dsi->base.base;
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       enum pipe pipe = intel_crtc->pipe;
+       u32 mask;
+       int ret;
+
+       /*
+        * XXX: should issue multiple read requests and reads if request is
+        * longer than MIPI_MAX_RETURN_PKT_SIZE
+        */
+
+       I915_WRITE(MIPI_INTR_STAT(pipe), GEN_READ_DATA_AVAIL);
+
+       ret = dsi_vc_generic_send_read_request(intel_dsi, channel, reqdata,
+                                              reqlen);
+       if (ret)
+               return ret;
+
+       mask = GEN_READ_DATA_AVAIL;
+       if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 50))
+               DRM_ERROR("Timeout waiting for read data.\n");
+
+       ret = dsi_read_data_return(intel_dsi, buf, buflen);
+       if (ret < 0)
+               return ret;
+
+       if (ret != buflen)
+               return -EIO;
+
+       return 0;
+}
+
+/*
+ * send a video mode command
+ *
+ * XXX: commands with data in MIPI_DPI_DATA?
+ */
+int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd)
+{
+       struct drm_encoder *encoder = &intel_dsi->base.base;
+       struct drm_device *dev = encoder->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
+       enum pipe pipe = intel_crtc->pipe;
+       u32 mask;
+
+       /* XXX: pipe, hs */
+       if (intel_dsi->hs)
+               cmd &= ~DPI_LP_MODE;
+       else
+               cmd |= DPI_LP_MODE;
+
+       /* DPI virtual channel?! */
+
+       mask = DPI_FIFO_EMPTY;
+       if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
+               DRM_ERROR("Timeout waiting for DPI FIFO empty.\n");
+
+       /* clear bit */
+       I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
+
+       /* XXX: old code skips write if control unchanged */
+       if (cmd == I915_READ(MIPI_DPI_CONTROL(pipe)))
+               DRM_ERROR("Same special packet %02x twice in a row.\n", cmd);
+
+       I915_WRITE(MIPI_DPI_CONTROL(pipe), cmd);
+
+       mask = SPL_PKT_SENT_INTERRUPT;
+       if (wait_for((I915_READ(MIPI_INTR_STAT(pipe)) & mask) == mask, 100))
+               DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.h b/drivers/gpu/drm/i915/intel_dsi_cmd.h
new file mode 100644 (file)
index 0000000..54c8a23
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * Copyright Â© 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Jani Nikula <jani.nikula@intel.com>
+ */
+
+#ifndef _INTEL_DSI_DSI_H
+#define _INTEL_DSI_DSI_H
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <video/mipi_display.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_dsi.h"
+
+void dsi_hs_mode_enable(struct intel_dsi *intel_dsi, bool enable);
+
+int dsi_vc_dcs_write(struct intel_dsi *intel_dsi, int channel,
+                    const u8 *data, int len);
+
+int dsi_vc_generic_write(struct intel_dsi *intel_dsi, int channel,
+                        const u8 *data, int len);
+
+int dsi_vc_dcs_read(struct intel_dsi *intel_dsi, int channel, u8 dcs_cmd,
+                   u8 *buf, int buflen);
+
+int dsi_vc_generic_read(struct intel_dsi *intel_dsi, int channel,
+                       u8 *reqdata, int reqlen, u8 *buf, int buflen);
+
+int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd);
+
+/* XXX: questionable write helpers */
+static inline int dsi_vc_dcs_write_0(struct intel_dsi *intel_dsi,
+                                    int channel, u8 dcs_cmd)
+{
+       return dsi_vc_dcs_write(intel_dsi, channel, &dcs_cmd, 1);
+}
+
+static inline int dsi_vc_dcs_write_1(struct intel_dsi *intel_dsi,
+                                    int channel, u8 dcs_cmd, u8 param)
+{
+       u8 buf[2] = { dcs_cmd, param };
+       return dsi_vc_dcs_write(intel_dsi, channel, buf, 2);
+}
+
+static inline int dsi_vc_generic_write_0(struct intel_dsi *intel_dsi,
+                                        int channel)
+{
+       return dsi_vc_generic_write(intel_dsi, channel, NULL, 0);
+}
+
+static inline int dsi_vc_generic_write_1(struct intel_dsi *intel_dsi,
+                                        int channel, u8 param)
+{
+       return dsi_vc_generic_write(intel_dsi, channel, &param, 1);
+}
+
+static inline int dsi_vc_generic_write_2(struct intel_dsi *intel_dsi,
+                                        int channel, u8 param1, u8 param2)
+{
+       u8 buf[2] = { param1, param2 };
+       return dsi_vc_generic_write(intel_dsi, channel, buf, 2);
+}
+
+/* XXX: questionable read helpers */
+static inline int dsi_vc_generic_read_0(struct intel_dsi *intel_dsi,
+                                       int channel, u8 *buf, int buflen)
+{
+       return dsi_vc_generic_read(intel_dsi, channel, NULL, 0, buf, buflen);
+}
+
+static inline int dsi_vc_generic_read_1(struct intel_dsi *intel_dsi,
+                                       int channel, u8 param, u8 *buf,
+                                       int buflen)
+{
+       return dsi_vc_generic_read(intel_dsi, channel, &param, 1, buf, buflen);
+}
+
+static inline int dsi_vc_generic_read_2(struct intel_dsi *intel_dsi,
+                                       int channel, u8 param1, u8 param2,
+                                       u8 *buf, int buflen)
+{
+       u8 req[2] = { param1, param2 };
+
+       return dsi_vc_generic_read(intel_dsi, channel, req, 2, buf, buflen);
+}
+
+
+#endif /* _INTEL_DSI_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
new file mode 100644 (file)
index 0000000..44279b2
--- /dev/null
@@ -0,0 +1,317 @@
+/*
+ * Copyright Â© 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *     Shobhit Kumar <shobhit.kumar@intel.com>
+ *     Yogesh Mohan Marimuthu <yogesh.mohan.marimuthu@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include "intel_drv.h"
+#include "i915_drv.h"
+#include "intel_dsi.h"
+
+#define DSI_HSS_PACKET_SIZE            4
+#define DSI_HSE_PACKET_SIZE            4
+#define DSI_HSA_PACKET_EXTRA_SIZE      6
+#define DSI_HBP_PACKET_EXTRA_SIZE      6
+#define DSI_HACTIVE_PACKET_EXTRA_SIZE  6
+#define DSI_HFP_PACKET_EXTRA_SIZE      6
+#define DSI_EOTP_PACKET_SIZE           4
+
+struct dsi_mnp {
+       u32 dsi_pll_ctrl;
+       u32 dsi_pll_div;
+};
+
+static const u32 lfsr_converts[] = {
+       426, 469, 234, 373, 442, 221, 110, 311, 411,            /* 62 - 70 */
+       461, 486, 243, 377, 188, 350, 175, 343, 427, 213,       /* 71 - 80 */
+       106, 53, 282, 397, 354, 227, 113, 56, 284, 142,         /* 81 - 90 */
+       71, 35                                                  /* 91 - 92 */
+};
+
+static u32 dsi_rr_formula(const struct drm_display_mode *mode,
+                         int pixel_format, int video_mode_format,
+                         int lane_count, bool eotp)
+{
+       u32 bpp;
+       u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp;
+       u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes;
+       u32 bytes_per_line, bytes_per_frame;
+       u32 num_frames;
+       u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes;
+       u32 dsi_bit_clock_hz;
+       u32 dsi_clk;
+
+       switch (pixel_format) {
+       default:
+       case VID_MODE_FORMAT_RGB888:
+       case VID_MODE_FORMAT_RGB666_LOOSE:
+               bpp = 24;
+               break;
+       case VID_MODE_FORMAT_RGB666:
+               bpp = 18;
+               break;
+       case VID_MODE_FORMAT_RGB565:
+               bpp = 16;
+               break;
+       }
+
+       hactive = mode->hdisplay;
+       vactive = mode->vdisplay;
+       hfp = mode->hsync_start - mode->hdisplay;
+       hsync = mode->hsync_end - mode->hsync_start;
+       hbp = mode->htotal - mode->hsync_end;
+
+       vfp = mode->vsync_start - mode->vdisplay;
+       vsync = mode->vsync_end - mode->vsync_start;
+       vbp = mode->vtotal - mode->vsync_end;
+
+       hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8);
+       hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8);
+       hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8);
+       hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8);
+
+       bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes +
+               DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE +
+               hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE +
+               hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE +
+               hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE;
+
+       /*
+        * XXX: Need to accurately calculate LP to HS transition timeout and add
+        * it to bytes_per_line/bytes_per_frame.
+        */
+
+       if (eotp && video_mode_format == VIDEO_MODE_BURST)
+               bytes_per_line += DSI_EOTP_PACKET_SIZE;
+
+       bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line +
+               vactive * bytes_per_line + vfp * bytes_per_line;
+
+       if (eotp &&
+           (video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ||
+            video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS))
+               bytes_per_frame += DSI_EOTP_PACKET_SIZE;
+
+       num_frames = drm_mode_vrefresh(mode);
+       bytes_per_x_frames = num_frames * bytes_per_frame;
+
+       bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count;
+
+       /* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
+       dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
+       dsi_clk = dsi_bit_clock_hz / (1000 * 1000);
+
+       if (eotp && video_mode_format == VIDEO_MODE_BURST)
+               dsi_clk *= 2;
+
+       return dsi_clk;
+}
+
+#ifdef MNP_FROM_TABLE
+
+struct dsi_clock_table {
+       u32 freq;
+       u8 m;
+       u8 p;
+};
+
+static const struct dsi_clock_table dsi_clk_tbl[] = {
+       {300, 72, 6}, {313, 75, 6}, {323, 78, 6}, {333, 80, 6},
+       {343, 82, 6}, {353, 85, 6}, {363, 87, 6}, {373, 90, 6},
+       {383, 92, 6}, {390, 78, 5}, {393, 79, 5}, {400, 80, 5},
+       {401, 80, 5}, {402, 80, 5}, {403, 81, 5}, {404, 81, 5},
+       {405, 81, 5}, {406, 81, 5}, {407, 81, 5}, {408, 82, 5},
+       {409, 82, 5}, {410, 82, 5}, {411, 82, 5}, {412, 82, 5},
+       {413, 83, 5}, {414, 83, 5}, {415, 83, 5}, {416, 83, 5},
+       {417, 83, 5}, {418, 84, 5}, {419, 84, 5}, {420, 84, 5},
+       {430, 86, 5}, {440, 88, 5}, {450, 90, 5}, {460, 92, 5},
+       {470, 75, 4}, {480, 77, 4}, {490, 78, 4}, {500, 80, 4},
+       {510, 82, 4}, {520, 83, 4}, {530, 85, 4}, {540, 86, 4},
+       {550, 88, 4}, {560, 90, 4}, {570, 91, 4}, {580, 70, 3},
+       {590, 71, 3}, {600, 72, 3}, {610, 73, 3}, {620, 74, 3},
+       {630, 76, 3}, {640, 77, 3}, {650, 78, 3}, {660, 79, 3},
+       {670, 80, 3}, {680, 82, 3}, {690, 83, 3}, {700, 84, 3},
+       {710, 85, 3}, {720, 86, 3}, {730, 88, 3}, {740, 89, 3},
+       {750, 90, 3}, {760, 91, 3}, {770, 92, 3}, {780, 62, 2},
+       {790, 63, 2}, {800, 64, 2}, {880, 70, 2}, {900, 72, 2},
+       {1000, 80, 2},          /* dsi clock frequency in Mhz*/
+};
+
+static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
+{
+       unsigned int i;
+       u8 m;
+       u8 n;
+       u8 p;
+       u32 m_seed;
+
+       if (dsi_clk < 300 || dsi_clk > 1000)
+               return -ECHRNG;
+
+       for (i = 0; i <= ARRAY_SIZE(dsi_clk_tbl); i++) {
+               if (dsi_clk_tbl[i].freq > dsi_clk)
+                       break;
+       }
+
+       m = dsi_clk_tbl[i].m;
+       p = dsi_clk_tbl[i].p;
+       m_seed = lfsr_converts[m - 62];
+       n = 1;
+       dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + p - 2);
+       dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
+               m_seed << DSI_PLL_M1_DIV_SHIFT;
+
+       return 0;
+}
+
+#else
+
+static int dsi_calc_mnp(u32 dsi_clk, struct dsi_mnp *dsi_mnp)
+{
+       u32 m, n, p;
+       u32 ref_clk;
+       u32 error;
+       u32 tmp_error;
+       u32 target_dsi_clk;
+       u32 calc_dsi_clk;
+       u32 calc_m;
+       u32 calc_p;
+       u32 m_seed;
+
+       if (dsi_clk < 300 || dsi_clk > 1150) {
+               DRM_ERROR("DSI CLK Out of Range\n");
+               return -ECHRNG;
+       }
+
+       ref_clk = 25000;
+       target_dsi_clk = dsi_clk * 1000;
+       error = 0xFFFFFFFF;
+       calc_m = 0;
+       calc_p = 0;
+
+       for (m = 62; m <= 92; m++) {
+               for (p = 2; p <= 6; p++) {
+
+                       calc_dsi_clk = (m * ref_clk) / p;
+                       if (calc_dsi_clk >= target_dsi_clk) {
+                               tmp_error = calc_dsi_clk - target_dsi_clk;
+                               if (tmp_error < error) {
+                                       error = tmp_error;
+                                       calc_m = m;
+                                       calc_p = p;
+                               }
+                       }
+               }
+       }
+
+       m_seed = lfsr_converts[calc_m - 62];
+       n = 1;
+       dsi_mnp->dsi_pll_ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
+       dsi_mnp->dsi_pll_div = (n - 1) << DSI_PLL_N1_DIV_SHIFT |
+               m_seed << DSI_PLL_M1_DIV_SHIFT;
+
+       return 0;
+}
+
+#endif
+
+/*
+ * XXX: The muxing and gating is hard coded for now. Need to add support for
+ * sharing PLLs with two DSI outputs.
+ */
+static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+       const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
+       struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+       int ret;
+       struct dsi_mnp dsi_mnp;
+       u32 dsi_clk;
+
+       dsi_clk = dsi_rr_formula(mode, intel_dsi->pixel_format,
+                                intel_dsi->video_mode_format,
+                                intel_dsi->lane_count, !intel_dsi->eot_disable);
+
+       ret = dsi_calc_mnp(dsi_clk, &dsi_mnp);
+       if (ret) {
+               DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
+               return;
+       }
+
+       dsi_mnp.dsi_pll_ctrl |= DSI_PLL_CLK_GATE_DSI0_DSIPLL;
+
+       DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
+                     dsi_mnp.dsi_pll_div, dsi_mnp.dsi_pll_ctrl);
+
+       vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, 0);
+       vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_DIVIDER, dsi_mnp.dsi_pll_div);
+       vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
+}
+
+void vlv_enable_dsi_pll(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       u32 tmp;
+
+       DRM_DEBUG_KMS("\n");
+
+       mutex_lock(&dev_priv->dpio_lock);
+
+       vlv_configure_dsi_pll(encoder);
+
+       /* wait at least 0.5 us after ungating before enabling VCO */
+       usleep_range(1, 10);
+
+       tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+       tmp |= DSI_PLL_VCO_EN;
+       vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+
+       mutex_unlock(&dev_priv->dpio_lock);
+
+       if (wait_for(I915_READ(PIPECONF(PIPE_A)) & PIPECONF_DSI_PLL_LOCKED, 20)) {
+               DRM_ERROR("DSI PLL lock failed\n");
+               return;
+       }
+
+       DRM_DEBUG_KMS("DSI PLL locked\n");
+}
+
+void vlv_disable_dsi_pll(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       u32 tmp;
+
+       DRM_DEBUG_KMS("\n");
+
+       mutex_lock(&dev_priv->dpio_lock);
+
+       tmp = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
+       tmp &= ~DSI_PLL_VCO_EN;
+       tmp |= DSI_PLL_LDO_GATE;
+       vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, tmp);
+
+       mutex_unlock(&dev_priv->dpio_lock);
+}
index 7fa7df546c1ee6e36e119af34d6e79a9d7015e82..1b64145c669aab596aac1f1c6cf6b9bb875553d3 100644 (file)
@@ -153,6 +153,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
                flags |= DRM_MODE_FLAG_NVSYNC;
 
        pipe_config->adjusted_mode.flags |= flags;
+
+       pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
 }
 
 static void intel_disable_dvo(struct intel_encoder *encoder)
@@ -267,11 +269,6 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
                drm_mode_set_crtcinfo(adjusted_mode, 0);
        }
 
-       if (intel_dvo->dev.dev_ops->mode_fixup)
-               return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev,
-                                                         &pipe_config->requested_mode,
-                                                         adjusted_mode);
-
        return true;
 }
 
@@ -370,7 +367,6 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
 
 static void intel_dvo_destroy(struct drm_connector *connector)
 {
-       drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
        kfree(connector);
 }
@@ -451,11 +447,11 @@ void intel_dvo_init(struct drm_device *dev)
        int i;
        int encoder_type = DRM_MODE_ENCODER_NONE;
 
-       intel_dvo = kzalloc(sizeof(struct intel_dvo), GFP_KERNEL);
+       intel_dvo = kzalloc(sizeof(*intel_dvo), GFP_KERNEL);
        if (!intel_dvo)
                return;
 
-       intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+       intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
        if (!intel_connector) {
                kfree(intel_dvo);
                return;
similarity index 89%
rename from drivers/gpu/drm/i915/intel_fb.c
rename to drivers/gpu/drm/i915/intel_fbdev.c
index bc2100007b21039ff628469e36e4cc48883c833a..895fcb4fbd9446bfbfafc90f95948d9a4bea5c55 100644 (file)
@@ -78,8 +78,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
        mode_cmd.width = sizes->surface_width;
        mode_cmd.height = sizes->surface_height;
 
-       mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
-                                                     8), 64);
+       mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
+                                   DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
        mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
                                                          sizes->surface_depth);
 
@@ -184,6 +184,27 @@ out:
        return ret;
 }
 
+/** Sets the color ramps on behalf of RandR */
+static void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
+                                   u16 blue, int regno)
+{
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+       intel_crtc->lut_r[regno] = red >> 8;
+       intel_crtc->lut_g[regno] = green >> 8;
+       intel_crtc->lut_b[regno] = blue >> 8;
+}
+
+static void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
+                                   u16 *blue, int regno)
+{
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+       *red = intel_crtc->lut_r[regno] << 8;
+       *green = intel_crtc->lut_g[regno] << 8;
+       *blue = intel_crtc->lut_b[regno] << 8;
+}
+
 static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
        .gamma_set = intel_crtc_fb_gamma_set,
        .gamma_get = intel_crtc_fb_gamma_get,
@@ -216,7 +237,7 @@ int intel_fbdev_init(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
+       ifbdev = kzalloc(sizeof(*ifbdev), GFP_KERNEL);
        if (!ifbdev)
                return -ENOMEM;
 
@@ -225,7 +246,7 @@ int intel_fbdev_init(struct drm_device *dev)
 
        ret = drm_fb_helper_init(dev, &ifbdev->helper,
                                 INTEL_INFO(dev)->num_pipes,
-                                INTELFB_CONN_LIMIT);
+                                4);
        if (ret) {
                kfree(ifbdev);
                return ret;
@@ -278,13 +299,13 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state)
 
 MODULE_LICENSE("GPL and additional rights");
 
-void intel_fb_output_poll_changed(struct drm_device *dev)
+void intel_fbdev_output_poll_changed(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
 }
 
-void intel_fb_restore_mode(struct drm_device *dev)
+void intel_fbdev_restore_mode(struct drm_device *dev)
 {
        int ret;
        struct drm_i915_private *dev_priv = dev->dev_private;
index 4148cc85bf7fb303bde9ec151773480542b7f5f8..51a8336dec2e0d40452f178704b6c3d753ff1f6b 100644 (file)
@@ -713,6 +713,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
        u32 tmp, flags = 0;
+       int dotclock;
 
        tmp = I915_READ(intel_hdmi->hdmi_reg);
 
@@ -727,6 +728,16 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
                flags |= DRM_MODE_FLAG_NVSYNC;
 
        pipe_config->adjusted_mode.flags |= flags;
+
+       if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
+               dotclock = pipe_config->port_clock * 2 / 3;
+       else
+               dotclock = pipe_config->port_clock;
+
+       if (HAS_PCH_SPLIT(dev_priv->dev))
+               ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+       pipe_config->adjusted_mode.crtc_clock = dotclock;
 }
 
 static void intel_enable_hdmi(struct intel_encoder *encoder)
@@ -862,7 +873,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
        struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
-       int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
+       int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
        int portclock_limit = hdmi_portclock_limit(intel_hdmi);
        int desired_bpp;
 
@@ -904,7 +915,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
                pipe_config->pipe_bpp = desired_bpp;
        }
 
-       if (adjusted_mode->clock > portclock_limit) {
+       if (adjusted_mode->crtc_clock > portclock_limit) {
                DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
                return false;
        }
@@ -1063,7 +1074,7 @@ done:
        return 0;
 }
 
-static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
+static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
 {
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
@@ -1079,35 +1090,35 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
 
        /* Enable clock channels for this port */
        mutex_lock(&dev_priv->dpio_lock);
-       val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
+       val = vlv_dpio_read(dev_priv, pipe, DPIO_DATA_LANE_A(port));
        val = 0;
        if (pipe)
                val |= (1<<21);
        else
                val &= ~(1<<21);
        val |= 0x001000c4;
-       vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
+       vlv_dpio_write(dev_priv, pipe, DPIO_DATA_CHANNEL(port), val);
 
        /* HDMI 1.0V-2dB */
-       vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0);
-       vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port),
+       vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port), 0);
+       vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL4(port),
                         0x2b245f5f);
-       vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
+       vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL2(port),
                         0x5578b83a);
-       vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port),
+       vlv_dpio_write(dev_priv, pipe, DPIO_TX_SWING_CTL3(port),
                         0x0c782040);
-       vlv_dpio_write(dev_priv, DPIO_TX3_SWING_CTL4(port),
+       vlv_dpio_write(dev_priv, pipe, DPIO_TX3_SWING_CTL4(port),
                         0x2b247878);
-       vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
-       vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
+       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER0(port), 0x00030000);
+       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
                         0x00002000);
-       vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
+       vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
                         DPIO_TX_OCALINIT_EN);
 
        /* Program lane clock */
-       vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
+       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF0(port),
                         0x00760018);
-       vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
+       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLOCKBUF8(port),
                         0x00400888);
        mutex_unlock(&dev_priv->dpio_lock);
 
@@ -1116,55 +1127,60 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
        vlv_wait_port_ready(dev_priv, port);
 }
 
-static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
+static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
 {
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc =
+               to_intel_crtc(encoder->base.crtc);
        int port = vlv_dport_to_channel(dport);
+       int pipe = intel_crtc->pipe;
 
        if (!IS_VALLEYVIEW(dev))
                return;
 
        /* Program Tx lane resets to default */
        mutex_lock(&dev_priv->dpio_lock);
-       vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
+       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port),
                         DPIO_PCS_TX_LANE2_RESET |
                         DPIO_PCS_TX_LANE1_RESET);
-       vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
+       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port),
                         DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
                         DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
                         (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
                         DPIO_PCS_CLK_SOFT_RESET);
 
        /* Fix up inter-pair skew failure */
-       vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
-       vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
-       vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
+       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_STAGGER1(port), 0x00750f00);
+       vlv_dpio_write(dev_priv, pipe, DPIO_TX_CTL(port), 0x00001500);
+       vlv_dpio_write(dev_priv, pipe, DPIO_TX_LANE(port), 0x40400000);
 
-       vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port),
+       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CTL_OVER1(port),
                         0x00002000);
-       vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
+       vlv_dpio_write(dev_priv, pipe, DPIO_TX_OCALINIT(port),
                         DPIO_TX_OCALINIT_EN);
        mutex_unlock(&dev_priv->dpio_lock);
 }
 
-static void intel_hdmi_post_disable(struct intel_encoder *encoder)
+static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
 {
        struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
        struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+       struct intel_crtc *intel_crtc =
+               to_intel_crtc(encoder->base.crtc);
        int port = vlv_dport_to_channel(dport);
+       int pipe = intel_crtc->pipe;
 
        /* Reset lanes to avoid HDMI flicker (VLV w/a) */
        mutex_lock(&dev_priv->dpio_lock);
-       vlv_dpio_write(dev_priv, DPIO_PCS_TX(port), 0x00000000);
-       vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port), 0x00e00060);
+       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_TX(port), 0x00000000);
+       vlv_dpio_write(dev_priv, pipe, DPIO_PCS_CLK(port), 0x00e00060);
        mutex_unlock(&dev_priv->dpio_lock);
 }
 
 static void intel_hdmi_destroy(struct drm_connector *connector)
 {
-       drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
        kfree(connector);
 }
@@ -1211,6 +1227,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
 
        connector->interlace_allowed = 1;
        connector->doublescan_allowed = 0;
+       connector->stereo_allowed = 1;
 
        switch (port) {
        case PORT_B:
@@ -1275,11 +1292,11 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
        struct intel_encoder *intel_encoder;
        struct intel_connector *intel_connector;
 
-       intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
+       intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
        if (!intel_dig_port)
                return;
 
-       intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+       intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
        if (!intel_connector) {
                kfree(intel_dig_port);
                return;
@@ -1296,10 +1313,10 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
        intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
        intel_encoder->get_config = intel_hdmi_get_config;
        if (IS_VALLEYVIEW(dev)) {
-               intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable;
-               intel_encoder->pre_enable = intel_hdmi_pre_enable;
+               intel_encoder->pre_pll_enable = vlv_hdmi_pre_pll_enable;
+               intel_encoder->pre_enable = vlv_hdmi_pre_enable;
                intel_encoder->enable = vlv_enable_hdmi;
-               intel_encoder->post_disable = intel_hdmi_post_disable;
+               intel_encoder->post_disable = vlv_hdmi_post_disable;
        } else {
                intel_encoder->enable = intel_enable_hdmi;
        }
index d1c1e0f7f2621a435ba83bd67bcca0eb878eaa43..2ca17b14b6c1c91710601dfcb34c661b45707347 100644 (file)
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
+enum disp_clk {
+       CDCLK,
+       CZCLK
+};
+
 struct gmbus_port {
        const char *name;
        int reg;
@@ -58,10 +63,69 @@ to_intel_gmbus(struct i2c_adapter *i2c)
        return container_of(i2c, struct intel_gmbus, adapter);
 }
 
+static int get_disp_clk_div(struct drm_i915_private *dev_priv,
+                           enum disp_clk clk)
+{
+       u32 reg_val;
+       int clk_ratio;
+
+       reg_val = I915_READ(CZCLK_CDCLK_FREQ_RATIO);
+
+       if (clk == CDCLK)
+               clk_ratio =
+                       ((reg_val & CDCLK_FREQ_MASK) >> CDCLK_FREQ_SHIFT) + 1;
+       else
+               clk_ratio = (reg_val & CZCLK_FREQ_MASK) + 1;
+
+       return clk_ratio;
+}
+
+static void gmbus_set_freq(struct drm_i915_private *dev_priv)
+{
+       int vco_freq[] = { 800, 1600, 2000, 2400 };
+       int gmbus_freq = 0, cdclk_div, hpll_freq;
+
+       BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
+
+       /* Skip setting the gmbus freq if BIOS has already programmed it */
+       if (I915_READ(GMBUSFREQ_VLV) != 0xA0)
+               return;
+
+       /* Obtain SKU information */
+       mutex_lock(&dev_priv->dpio_lock);
+       hpll_freq =
+               vlv_cck_read(dev_priv, CCK_FUSE_REG) & CCK_FUSE_HPLL_FREQ_MASK;
+       mutex_unlock(&dev_priv->dpio_lock);
+
+       /* Get the CDCLK divide ratio */
+       cdclk_div = get_disp_clk_div(dev_priv, CDCLK);
+
+       /*
+        * Program the gmbus_freq based on the cdclk frequency.
+        * BSpec erroneously claims we should aim for 4MHz, but
+        * in fact 1MHz is the correct frequency.
+        */
+       if (cdclk_div)
+               gmbus_freq = (vco_freq[hpll_freq] << 1) / cdclk_div;
+
+       if (WARN_ON(gmbus_freq == 0))
+               return;
+
+       I915_WRITE(GMBUSFREQ_VLV, gmbus_freq);
+}
+
 void
 intel_i2c_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /*
+        * In BIOS-less system, program the correct gmbus frequency
+        * before reading edid.
+        */
+       if (IS_VALLEYVIEW(dev))
+               gmbus_set_freq(dev_priv);
+
        I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
        I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
 }
index 831a5c021c4bdefd2495ca0736d8fcb3f78ff57d..ae0c843dd2630cfad8b8538928de4fe65fd50509 100644 (file)
@@ -92,6 +92,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 lvds_reg, tmp, flags = 0;
+       int dotclock;
 
        if (HAS_PCH_SPLIT(dev))
                lvds_reg = PCH_LVDS;
@@ -116,6 +117,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
 
                pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
        }
+
+       dotclock = pipe_config->port_clock;
+
+       if (HAS_PCH_SPLIT(dev_priv->dev))
+               ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+       pipe_config->adjusted_mode.crtc_clock = dotclock;
 }
 
 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
@@ -466,7 +474,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
 
        intel_panel_fini(&lvds_connector->base.panel);
 
-       drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
        kfree(connector);
 }
@@ -786,7 +793,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
                return true;
 
        for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
-               struct child_device_config *child = dev_priv->vbt.child_dev + i;
+               union child_device_config *uchild = dev_priv->vbt.child_dev + i;
+               struct old_child_dev_config *child = &uchild->old;
 
                /* If the device type is not LFP, continue.
                 * We have to check both the new identifiers as well as the
@@ -940,11 +948,11 @@ void intel_lvds_init(struct drm_device *dev)
                }
        }
 
-       lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
+       lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
        if (!lvds_encoder)
                return;
 
-       lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
+       lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL);
        if (!lvds_connector) {
                kfree(lvds_encoder);
                return;
index 119771ff46ab5178047c018efc5f9f31a06f068d..b82050c96f3e76542cffa8ee0170dbcaadbbfc7b 100644 (file)
 #include "i915_drv.h"
 #include "intel_drv.h"
 
-#define PCI_ASLE 0xe4
-#define PCI_ASLS 0xfc
+#define PCI_ASLE               0xe4
+#define PCI_ASLS               0xfc
+#define PCI_SWSCI              0xe8
+#define PCI_SWSCI_SCISEL       (1 << 15)
+#define PCI_SWSCI_GSSCIE       (1 << 0)
 
 #define OPREGION_HEADER_OFFSET 0
 #define OPREGION_ACPI_OFFSET   0x100
@@ -107,25 +110,38 @@ struct opregion_asle {
        u32 epfm;       /* enabled panel fitting modes */
        u8 plut[74];    /* panel LUT and identifier */
        u32 pfmb;       /* PWM freq and min brightness */
-       u8 rsvd[102];
+       u32 cddv;       /* color correction default values */
+       u32 pcft;       /* power conservation features */
+       u32 srot;       /* supported rotation angles */
+       u32 iuer;       /* IUER events */
+       u8 rsvd[86];
 } __attribute__((packed));
 
 /* Driver readiness indicator */
 #define ASLE_ARDY_READY                (1 << 0)
 #define ASLE_ARDY_NOT_READY    (0 << 0)
 
-/* ASLE irq request bits */
-#define ASLE_SET_ALS_ILLUM     (1 << 0)
-#define ASLE_SET_BACKLIGHT     (1 << 1)
-#define ASLE_SET_PFIT          (1 << 2)
-#define ASLE_SET_PWM_FREQ      (1 << 3)
-#define ASLE_REQ_MSK           0xf
-
-/* response bits of ASLE irq request */
-#define ASLE_ALS_ILLUM_FAILED  (1<<10)
-#define ASLE_BACKLIGHT_FAILED  (1<<12)
-#define ASLE_PFIT_FAILED       (1<<14)
-#define ASLE_PWM_FREQ_FAILED   (1<<16)
+/* ASLE Interrupt Command (ASLC) bits */
+#define ASLC_SET_ALS_ILLUM             (1 << 0)
+#define ASLC_SET_BACKLIGHT             (1 << 1)
+#define ASLC_SET_PFIT                  (1 << 2)
+#define ASLC_SET_PWM_FREQ              (1 << 3)
+#define ASLC_SUPPORTED_ROTATION_ANGLES (1 << 4)
+#define ASLC_BUTTON_ARRAY              (1 << 5)
+#define ASLC_CONVERTIBLE_INDICATOR     (1 << 6)
+#define ASLC_DOCKING_INDICATOR         (1 << 7)
+#define ASLC_ISCT_STATE_CHANGE         (1 << 8)
+#define ASLC_REQ_MSK                   0x1ff
+/* response bits */
+#define ASLC_ALS_ILLUM_FAILED          (1 << 10)
+#define ASLC_BACKLIGHT_FAILED          (1 << 12)
+#define ASLC_PFIT_FAILED               (1 << 14)
+#define ASLC_PWM_FREQ_FAILED           (1 << 16)
+#define ASLC_ROTATION_ANGLES_FAILED    (1 << 18)
+#define ASLC_BUTTON_ARRAY_FAILED       (1 << 20)
+#define ASLC_CONVERTIBLE_FAILED                (1 << 22)
+#define ASLC_DOCKING_FAILED            (1 << 24)
+#define ASLC_ISCT_STATE_FAILED         (1 << 26)
 
 /* Technology enabled indicator */
 #define ASLE_TCHE_ALS_EN       (1 << 0)
@@ -151,6 +167,60 @@ struct opregion_asle {
 
 #define ASLE_CBLV_VALID         (1<<31)
 
+/* IUER */
+#define ASLE_IUER_DOCKING              (1 << 7)
+#define ASLE_IUER_CONVERTIBLE          (1 << 6)
+#define ASLE_IUER_ROTATION_LOCK_BTN    (1 << 4)
+#define ASLE_IUER_VOLUME_DOWN_BTN      (1 << 3)
+#define ASLE_IUER_VOLUME_UP_BTN                (1 << 2)
+#define ASLE_IUER_WINDOWS_BTN          (1 << 1)
+#define ASLE_IUER_POWER_BTN            (1 << 0)
+
+/* Software System Control Interrupt (SWSCI) */
+#define SWSCI_SCIC_INDICATOR           (1 << 0)
+#define SWSCI_SCIC_MAIN_FUNCTION_SHIFT 1
+#define SWSCI_SCIC_MAIN_FUNCTION_MASK  (0xf << 1)
+#define SWSCI_SCIC_SUB_FUNCTION_SHIFT  8
+#define SWSCI_SCIC_SUB_FUNCTION_MASK   (0xff << 8)
+#define SWSCI_SCIC_EXIT_PARAMETER_SHIFT        8
+#define SWSCI_SCIC_EXIT_PARAMETER_MASK (0xff << 8)
+#define SWSCI_SCIC_EXIT_STATUS_SHIFT   5
+#define SWSCI_SCIC_EXIT_STATUS_MASK    (7 << 5)
+#define SWSCI_SCIC_EXIT_STATUS_SUCCESS 1
+
+#define SWSCI_FUNCTION_CODE(main, sub) \
+       ((main) << SWSCI_SCIC_MAIN_FUNCTION_SHIFT | \
+        (sub) << SWSCI_SCIC_SUB_FUNCTION_SHIFT)
+
+/* SWSCI: Get BIOS Data (GBDA) */
+#define SWSCI_GBDA                     4
+#define SWSCI_GBDA_SUPPORTED_CALLS     SWSCI_FUNCTION_CODE(SWSCI_GBDA, 0)
+#define SWSCI_GBDA_REQUESTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_GBDA, 1)
+#define SWSCI_GBDA_BOOT_DISPLAY_PREF   SWSCI_FUNCTION_CODE(SWSCI_GBDA, 4)
+#define SWSCI_GBDA_PANEL_DETAILS       SWSCI_FUNCTION_CODE(SWSCI_GBDA, 5)
+#define SWSCI_GBDA_TV_STANDARD         SWSCI_FUNCTION_CODE(SWSCI_GBDA, 6)
+#define SWSCI_GBDA_INTERNAL_GRAPHICS   SWSCI_FUNCTION_CODE(SWSCI_GBDA, 7)
+#define SWSCI_GBDA_SPREAD_SPECTRUM     SWSCI_FUNCTION_CODE(SWSCI_GBDA, 10)
+
+/* SWSCI: System BIOS Callbacks (SBCB) */
+#define SWSCI_SBCB                     6
+#define SWSCI_SBCB_SUPPORTED_CALLBACKS SWSCI_FUNCTION_CODE(SWSCI_SBCB, 0)
+#define SWSCI_SBCB_INIT_COMPLETION     SWSCI_FUNCTION_CODE(SWSCI_SBCB, 1)
+#define SWSCI_SBCB_PRE_HIRES_SET_MODE  SWSCI_FUNCTION_CODE(SWSCI_SBCB, 3)
+#define SWSCI_SBCB_POST_HIRES_SET_MODE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 4)
+#define SWSCI_SBCB_DISPLAY_SWITCH      SWSCI_FUNCTION_CODE(SWSCI_SBCB, 5)
+#define SWSCI_SBCB_SET_TV_FORMAT       SWSCI_FUNCTION_CODE(SWSCI_SBCB, 6)
+#define SWSCI_SBCB_ADAPTER_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 7)
+#define SWSCI_SBCB_DISPLAY_POWER_STATE SWSCI_FUNCTION_CODE(SWSCI_SBCB, 8)
+#define SWSCI_SBCB_SET_BOOT_DISPLAY    SWSCI_FUNCTION_CODE(SWSCI_SBCB, 9)
+#define SWSCI_SBCB_SET_PANEL_DETAILS   SWSCI_FUNCTION_CODE(SWSCI_SBCB, 10)
+#define SWSCI_SBCB_SET_INTERNAL_GFX    SWSCI_FUNCTION_CODE(SWSCI_SBCB, 11)
+#define SWSCI_SBCB_POST_HIRES_TO_DOS_FS        SWSCI_FUNCTION_CODE(SWSCI_SBCB, 16)
+#define SWSCI_SBCB_SUSPEND_RESUME      SWSCI_FUNCTION_CODE(SWSCI_SBCB, 17)
+#define SWSCI_SBCB_SET_SPREAD_SPECTRUM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 18)
+#define SWSCI_SBCB_POST_VBE_PM         SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
+#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO        SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
+
 #define ACPI_OTHER_OUTPUT (0<<8)
 #define ACPI_VGA_OUTPUT (1<<8)
 #define ACPI_TV_OUTPUT (2<<8)
@@ -158,6 +228,171 @@ struct opregion_asle {
 #define ACPI_LVDS_OUTPUT (4<<8)
 
 #ifdef CONFIG_ACPI
+static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct opregion_swsci __iomem *swsci = dev_priv->opregion.swsci;
+       u32 main_function, sub_function, scic;
+       u16 pci_swsci;
+       u32 dslp;
+
+       if (!swsci)
+               return -ENODEV;
+
+       main_function = (function & SWSCI_SCIC_MAIN_FUNCTION_MASK) >>
+               SWSCI_SCIC_MAIN_FUNCTION_SHIFT;
+       sub_function = (function & SWSCI_SCIC_SUB_FUNCTION_MASK) >>
+               SWSCI_SCIC_SUB_FUNCTION_SHIFT;
+
+       /* Check if we can call the function. See swsci_setup for details. */
+       if (main_function == SWSCI_SBCB) {
+               if ((dev_priv->opregion.swsci_sbcb_sub_functions &
+                    (1 << sub_function)) == 0)
+                       return -EINVAL;
+       } else if (main_function == SWSCI_GBDA) {
+               if ((dev_priv->opregion.swsci_gbda_sub_functions &
+                    (1 << sub_function)) == 0)
+                       return -EINVAL;
+       }
+
+       /* Driver sleep timeout in ms. */
+       dslp = ioread32(&swsci->dslp);
+       if (!dslp) {
+               /* The spec says 2ms should be the default, but it's too small
+                * for some machines. */
+               dslp = 50;
+       } else if (dslp > 500) {
+               /* Hey bios, trust must be earned. */
+               WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp);
+               dslp = 500;
+       }
+
+       /* The spec tells us to do this, but we are the only user... */
+       scic = ioread32(&swsci->scic);
+       if (scic & SWSCI_SCIC_INDICATOR) {
+               DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
+               return -EBUSY;
+       }
+
+       scic = function | SWSCI_SCIC_INDICATOR;
+
+       iowrite32(parm, &swsci->parm);
+       iowrite32(scic, &swsci->scic);
+
+       /* Ensure SCI event is selected and event trigger is cleared. */
+       pci_read_config_word(dev->pdev, PCI_SWSCI, &pci_swsci);
+       if (!(pci_swsci & PCI_SWSCI_SCISEL) || (pci_swsci & PCI_SWSCI_GSSCIE)) {
+               pci_swsci |= PCI_SWSCI_SCISEL;
+               pci_swsci &= ~PCI_SWSCI_GSSCIE;
+               pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+       }
+
+       /* Use event trigger to tell bios to check the mail. */
+       pci_swsci |= PCI_SWSCI_GSSCIE;
+       pci_write_config_word(dev->pdev, PCI_SWSCI, pci_swsci);
+
+       /* Poll for the result. */
+#define C (((scic = ioread32(&swsci->scic)) & SWSCI_SCIC_INDICATOR) == 0)
+       if (wait_for(C, dslp)) {
+               DRM_DEBUG_DRIVER("SWSCI request timed out\n");
+               return -ETIMEDOUT;
+       }
+
+       scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >>
+               SWSCI_SCIC_EXIT_STATUS_SHIFT;
+
+       /* Note: scic == 0 is an error! */
+       if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
+               DRM_DEBUG_DRIVER("SWSCI request error %u\n", scic);
+               return -EIO;
+       }
+
+       if (parm_out)
+               *parm_out = ioread32(&swsci->parm);
+
+       return 0;
+
+#undef C
+}
+
+#define DISPLAY_TYPE_CRT                       0
+#define DISPLAY_TYPE_TV                                1
+#define DISPLAY_TYPE_EXTERNAL_FLAT_PANEL       2
+#define DISPLAY_TYPE_INTERNAL_FLAT_PANEL       3
+
+int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
+                                 bool enable)
+{
+       struct drm_device *dev = intel_encoder->base.dev;
+       u32 parm = 0;
+       u32 type = 0;
+       u32 port;
+
+       /* don't care about old stuff for now */
+       if (!HAS_DDI(dev))
+               return 0;
+
+       port = intel_ddi_get_encoder_port(intel_encoder);
+       if (port == PORT_E) {
+               port = 0;
+       } else {
+               parm |= 1 << port;
+               port++;
+       }
+
+       if (!enable)
+               parm |= 4 << 8;
+
+       switch (intel_encoder->type) {
+       case INTEL_OUTPUT_ANALOG:
+               type = DISPLAY_TYPE_CRT;
+               break;
+       case INTEL_OUTPUT_UNKNOWN:
+       case INTEL_OUTPUT_DISPLAYPORT:
+       case INTEL_OUTPUT_HDMI:
+               type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
+               break;
+       case INTEL_OUTPUT_EDP:
+               type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
+               break;
+       default:
+               WARN_ONCE(1, "unsupported intel_encoder type %d\n",
+                         intel_encoder->type);
+               return -EINVAL;
+       }
+
+       parm |= type << (16 + port * 3);
+
+       return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
+}
+
+static const struct {
+       pci_power_t pci_power_state;
+       u32 parm;
+} power_state_map[] = {
+       { PCI_D0,       0x00 },
+       { PCI_D1,       0x01 },
+       { PCI_D2,       0x02 },
+       { PCI_D3hot,    0x04 },
+       { PCI_D3cold,   0x04 },
+};
+
+int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
+{
+       int i;
+
+       if (!HAS_DDI(dev))
+               return 0;
+
+       for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
+               if (state == power_state_map[i].pci_power_state)
+                       return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE,
+                                    power_state_map[i].parm, NULL);
+       }
+
+       return -EINVAL;
+}
+
 static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -166,12 +401,13 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
        DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
 
        if (!(bclp & ASLE_BCLP_VALID))
-               return ASLE_BACKLIGHT_FAILED;
+               return ASLC_BACKLIGHT_FAILED;
 
        bclp &= ASLE_BCLP_MSK;
        if (bclp > 255)
-               return ASLE_BACKLIGHT_FAILED;
+               return ASLC_BACKLIGHT_FAILED;
 
+       DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
        intel_panel_set_backlight(dev, bclp, 255);
        iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
 
@@ -183,13 +419,13 @@ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
        /* alsi is the current ALS reading in lux. 0 indicates below sensor
           range, 0xffff indicates above sensor range. 1-0xfffe are valid */
        DRM_DEBUG_DRIVER("Illum is not supported\n");
-       return ASLE_ALS_ILLUM_FAILED;
+       return ASLC_ALS_ILLUM_FAILED;
 }
 
 static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
 {
        DRM_DEBUG_DRIVER("PWM freq is not supported\n");
-       return ASLE_PWM_FREQ_FAILED;
+       return ASLC_PWM_FREQ_FAILED;
 }
 
 static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
@@ -197,39 +433,106 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
        /* Panel fitting is currently controlled by the X code, so this is a
           noop until modesetting support works fully */
        DRM_DEBUG_DRIVER("Pfit is not supported\n");
-       return ASLE_PFIT_FAILED;
+       return ASLC_PFIT_FAILED;
+}
+
+static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot)
+{
+       DRM_DEBUG_DRIVER("SROT is not supported\n");
+       return ASLC_ROTATION_ANGLES_FAILED;
+}
+
+static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
+{
+       if (!iuer)
+               DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
+       if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
+               DRM_DEBUG_DRIVER("Button array event is not supported (rotation lock)\n");
+       if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
+               DRM_DEBUG_DRIVER("Button array event is not supported (volume down)\n");
+       if (iuer & ASLE_IUER_VOLUME_UP_BTN)
+               DRM_DEBUG_DRIVER("Button array event is not supported (volume up)\n");
+       if (iuer & ASLE_IUER_WINDOWS_BTN)
+               DRM_DEBUG_DRIVER("Button array event is not supported (windows)\n");
+       if (iuer & ASLE_IUER_POWER_BTN)
+               DRM_DEBUG_DRIVER("Button array event is not supported (power)\n");
+
+       return ASLC_BUTTON_ARRAY_FAILED;
+}
+
+static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
+{
+       if (iuer & ASLE_IUER_CONVERTIBLE)
+               DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
+       else
+               DRM_DEBUG_DRIVER("Convertible is not supported (slate)\n");
+
+       return ASLC_CONVERTIBLE_FAILED;
+}
+
+static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
+{
+       if (iuer & ASLE_IUER_DOCKING)
+               DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
+       else
+               DRM_DEBUG_DRIVER("Docking is not supported (undocked)\n");
+
+       return ASLC_DOCKING_FAILED;
+}
+
+static u32 asle_isct_state(struct drm_device *dev)
+{
+       DRM_DEBUG_DRIVER("ISCT is not supported\n");
+       return ASLC_ISCT_STATE_FAILED;
 }
 
 void intel_opregion_asle_intr(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
-       u32 asle_stat = 0;
-       u32 asle_req;
+       u32 aslc_stat = 0;
+       u32 aslc_req;
 
        if (!asle)
                return;
 
-       asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK;
+       aslc_req = ioread32(&asle->aslc);
 
-       if (!asle_req) {
-               DRM_DEBUG_DRIVER("non asle set request??\n");
+       if (!(aslc_req & ASLC_REQ_MSK)) {
+               DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
+                                aslc_req);
                return;
        }
 
-       if (asle_req & ASLE_SET_ALS_ILLUM)
-               asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
+       if (aslc_req & ASLC_SET_ALS_ILLUM)
+               aslc_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi));
+
+       if (aslc_req & ASLC_SET_BACKLIGHT)
+               aslc_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
+
+       if (aslc_req & ASLC_SET_PFIT)
+               aslc_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
+
+       if (aslc_req & ASLC_SET_PWM_FREQ)
+               aslc_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
 
-       if (asle_req & ASLE_SET_BACKLIGHT)
-               asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp));
+       if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
+               aslc_stat |= asle_set_supported_rotation_angles(dev,
+                                                       ioread32(&asle->srot));
 
-       if (asle_req & ASLE_SET_PFIT)
-               asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit));
+       if (aslc_req & ASLC_BUTTON_ARRAY)
+               aslc_stat |= asle_set_button_array(dev, ioread32(&asle->iuer));
 
-       if (asle_req & ASLE_SET_PWM_FREQ)
-               asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb));
+       if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
+               aslc_stat |= asle_set_convertible(dev, ioread32(&asle->iuer));
 
-       iowrite32(asle_stat, &asle->aslc);
+       if (aslc_req & ASLC_DOCKING_INDICATOR)
+               aslc_stat |= asle_set_docking(dev, ioread32(&asle->iuer));
+
+       if (aslc_req & ASLC_ISCT_STATE_CHANGE)
+               aslc_stat |= asle_isct_state(dev);
+
+       iowrite32(aslc_stat, &asle->aslc);
 }
 
 #define ACPI_EV_DISPLAY_SWITCH (1<<0)
@@ -446,8 +749,68 @@ void intel_opregion_fini(struct drm_device *dev)
        opregion->swsci = NULL;
        opregion->asle = NULL;
        opregion->vbt = NULL;
+       opregion->lid_state = NULL;
+}
+
+static void swsci_setup(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_opregion *opregion = &dev_priv->opregion;
+       bool requested_callbacks = false;
+       u32 tmp;
+
+       /* Sub-function code 0 is okay, let's allow them. */
+       opregion->swsci_gbda_sub_functions = 1;
+       opregion->swsci_sbcb_sub_functions = 1;
+
+       /* We use GBDA to ask for supported GBDA calls. */
+       if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
+               /* make the bits match the sub-function codes */
+               tmp <<= 1;
+               opregion->swsci_gbda_sub_functions |= tmp;
+       }
+
+       /*
+        * We also use GBDA to ask for _requested_ SBCB callbacks. The driver
+        * must not call interfaces that are not specifically requested by the
+        * bios.
+        */
+       if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
+               /* here, the bits already match sub-function codes */
+               opregion->swsci_sbcb_sub_functions |= tmp;
+               requested_callbacks = true;
+       }
+
+       /*
+        * But we use SBCB to ask for _supported_ SBCB calls. This does not mean
+        * the callback is _requested_. But we still can't call interfaces that
+        * are not requested.
+        */
+       if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
+               /* make the bits match the sub-function codes */
+               u32 low = tmp & 0x7ff;
+               u32 high = tmp & ~0xfff; /* bit 11 is reserved */
+               tmp = (high << 4) | (low << 1) | 1;
+
+               /* best guess what to do with supported wrt requested */
+               if (requested_callbacks) {
+                       u32 req = opregion->swsci_sbcb_sub_functions;
+                       if ((req & tmp) != req)
+                               DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp);
+                       /* XXX: for now, trust the requested callbacks */
+                       /* opregion->swsci_sbcb_sub_functions &= tmp; */
+               } else {
+                       opregion->swsci_sbcb_sub_functions |= tmp;
+               }
+       }
+
+       DRM_DEBUG_DRIVER("SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
+                        opregion->swsci_gbda_sub_functions,
+                        opregion->swsci_sbcb_sub_functions);
 }
-#endif
+#else /* CONFIG_ACPI */
+static inline void swsci_setup(struct drm_device *dev) {}
+#endif  /* CONFIG_ACPI */
 
 int intel_opregion_setup(struct drm_device *dev)
 {
@@ -490,6 +853,7 @@ int intel_opregion_setup(struct drm_device *dev)
        if (mboxes & MBOX_SWSCI) {
                DRM_DEBUG_DRIVER("SWSCI supported\n");
                opregion->swsci = base + OPREGION_SWSCI_OFFSET;
+               swsci_setup(dev);
        }
        if (mboxes & MBOX_ASLE) {
                DRM_DEBUG_DRIVER("ASLE supported\n");
index ddfd0aefe0c0906a587addfcc0d35bda414400b5..a98a990fbab3561368239ce7a7ac020c1d44f0c7 100644 (file)
@@ -821,14 +821,11 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
 static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
                                          struct intel_crtc *crtc)
 {
-       drm_i915_private_t *dev_priv = overlay->dev->dev_private;
-
        if (!crtc->active)
                return -EINVAL;
 
        /* can't use the overlay with double wide pipe */
-       if (INTEL_INFO(overlay->dev)->gen < 4 &&
-           (I915_READ(PIPECONF(crtc->pipe)) & (PIPECONF_DOUBLE_WIDE | PIPECONF_ENABLE)) != PIPECONF_ENABLE)
+       if (crtc->config.double_wide)
                return -EINVAL;
 
        return 0;
@@ -1056,7 +1053,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
                return ret;
        }
 
-       params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL);
+       params = kmalloc(sizeof(*params), GFP_KERNEL);
        if (!params)
                return -ENOMEM;
 
@@ -1323,7 +1320,7 @@ void intel_setup_overlay(struct drm_device *dev)
        if (!HAS_OVERLAY(dev))
                return;
 
-       overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
+       overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
        if (!overlay)
                return;
 
index 293564a2896a19038cc87ecba7648bce511388d2..09b2994c9b374b36e026bfc074d417701a5227d3 100644 (file)
@@ -50,23 +50,22 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
                        struct intel_crtc_config *pipe_config,
                        int fitting_mode)
 {
-       struct drm_display_mode *mode, *adjusted_mode;
+       struct drm_display_mode *adjusted_mode;
        int x, y, width, height;
 
-       mode = &pipe_config->requested_mode;
        adjusted_mode = &pipe_config->adjusted_mode;
 
        x = y = width = height = 0;
 
        /* Native modes don't need fitting */
-       if (adjusted_mode->hdisplay == mode->hdisplay &&
-           adjusted_mode->vdisplay == mode->vdisplay)
+       if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
+           adjusted_mode->vdisplay == pipe_config->pipe_src_h)
                goto done;
 
        switch (fitting_mode) {
        case DRM_MODE_SCALE_CENTER:
-               width = mode->hdisplay;
-               height = mode->vdisplay;
+               width = pipe_config->pipe_src_w;
+               height = pipe_config->pipe_src_h;
                x = (adjusted_mode->hdisplay - width + 1)/2;
                y = (adjusted_mode->vdisplay - height + 1)/2;
                break;
@@ -74,17 +73,19 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
        case DRM_MODE_SCALE_ASPECT:
                /* Scale but preserve the aspect ratio */
                {
-                       u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
-                       u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+                       u32 scaled_width = adjusted_mode->hdisplay
+                               * pipe_config->pipe_src_h;
+                       u32 scaled_height = pipe_config->pipe_src_w
+                               * adjusted_mode->vdisplay;
                        if (scaled_width > scaled_height) { /* pillar */
-                               width = scaled_height / mode->vdisplay;
+                               width = scaled_height / pipe_config->pipe_src_h;
                                if (width & 1)
                                        width++;
                                x = (adjusted_mode->hdisplay - width + 1) / 2;
                                y = 0;
                                height = adjusted_mode->vdisplay;
                        } else if (scaled_width < scaled_height) { /* letter */
-                               height = scaled_width / mode->hdisplay;
+                               height = scaled_width / pipe_config->pipe_src_w;
                                if (height & 1)
                                    height++;
                                y = (adjusted_mode->vdisplay - height + 1) / 2;
@@ -171,20 +172,96 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
        return (FACTOR * ratio + FACTOR/2) / FACTOR;
 }
 
+static void i965_scale_aspect(struct intel_crtc_config *pipe_config,
+                             u32 *pfit_control)
+{
+       struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+       u32 scaled_width = adjusted_mode->hdisplay *
+               pipe_config->pipe_src_h;
+       u32 scaled_height = pipe_config->pipe_src_w *
+               adjusted_mode->vdisplay;
+
+       /* 965+ is easy, it does everything in hw */
+       if (scaled_width > scaled_height)
+               *pfit_control |= PFIT_ENABLE |
+                       PFIT_SCALING_PILLAR;
+       else if (scaled_width < scaled_height)
+               *pfit_control |= PFIT_ENABLE |
+                       PFIT_SCALING_LETTER;
+       else if (adjusted_mode->hdisplay != pipe_config->pipe_src_w)
+               *pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
+}
+
+static void i9xx_scale_aspect(struct intel_crtc_config *pipe_config,
+                             u32 *pfit_control, u32 *pfit_pgm_ratios,
+                             u32 *border)
+{
+       struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
+       u32 scaled_width = adjusted_mode->hdisplay *
+               pipe_config->pipe_src_h;
+       u32 scaled_height = pipe_config->pipe_src_w *
+               adjusted_mode->vdisplay;
+       u32 bits;
+
+       /*
+        * For earlier chips we have to calculate the scaling
+        * ratio by hand and program it into the
+        * PFIT_PGM_RATIO register
+        */
+       if (scaled_width > scaled_height) { /* pillar */
+               centre_horizontally(adjusted_mode,
+                                   scaled_height /
+                                   pipe_config->pipe_src_h);
+
+               *border = LVDS_BORDER_ENABLE;
+               if (pipe_config->pipe_src_h != adjusted_mode->vdisplay) {
+                       bits = panel_fitter_scaling(pipe_config->pipe_src_h,
+                                                   adjusted_mode->vdisplay);
+
+                       *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+                                            bits << PFIT_VERT_SCALE_SHIFT);
+                       *pfit_control |= (PFIT_ENABLE |
+                                         VERT_INTERP_BILINEAR |
+                                         HORIZ_INTERP_BILINEAR);
+               }
+       } else if (scaled_width < scaled_height) { /* letter */
+               centre_vertically(adjusted_mode,
+                                 scaled_width /
+                                 pipe_config->pipe_src_w);
+
+               *border = LVDS_BORDER_ENABLE;
+               if (pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
+                       bits = panel_fitter_scaling(pipe_config->pipe_src_w,
+                                                   adjusted_mode->hdisplay);
+
+                       *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+                                            bits << PFIT_VERT_SCALE_SHIFT);
+                       *pfit_control |= (PFIT_ENABLE |
+                                         VERT_INTERP_BILINEAR |
+                                         HORIZ_INTERP_BILINEAR);
+               }
+       } else {
+               /* Aspects match, Let hw scale both directions */
+               *pfit_control |= (PFIT_ENABLE |
+                                 VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
+                                 VERT_INTERP_BILINEAR |
+                                 HORIZ_INTERP_BILINEAR);
+       }
+}
+
 void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
                              struct intel_crtc_config *pipe_config,
                              int fitting_mode)
 {
        struct drm_device *dev = intel_crtc->base.dev;
        u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
-       struct drm_display_mode *mode, *adjusted_mode;
+       struct drm_display_mode *adjusted_mode;
 
-       mode = &pipe_config->requested_mode;
        adjusted_mode = &pipe_config->adjusted_mode;
 
        /* Native modes don't need fitting */
-       if (adjusted_mode->hdisplay == mode->hdisplay &&
-           adjusted_mode->vdisplay == mode->vdisplay)
+       if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
+           adjusted_mode->vdisplay == pipe_config->pipe_src_h)
                goto out;
 
        switch (fitting_mode) {
@@ -193,81 +270,25 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
                 * For centered modes, we have to calculate border widths &
                 * heights and modify the values programmed into the CRTC.
                 */
-               centre_horizontally(adjusted_mode, mode->hdisplay);
-               centre_vertically(adjusted_mode, mode->vdisplay);
+               centre_horizontally(adjusted_mode, pipe_config->pipe_src_w);
+               centre_vertically(adjusted_mode, pipe_config->pipe_src_h);
                border = LVDS_BORDER_ENABLE;
                break;
        case DRM_MODE_SCALE_ASPECT:
                /* Scale but preserve the aspect ratio */
-               if (INTEL_INFO(dev)->gen >= 4) {
-                       u32 scaled_width = adjusted_mode->hdisplay *
-                               mode->vdisplay;
-                       u32 scaled_height = mode->hdisplay *
-                               adjusted_mode->vdisplay;
-
-                       /* 965+ is easy, it does everything in hw */
-                       if (scaled_width > scaled_height)
-                               pfit_control |= PFIT_ENABLE |
-                                       PFIT_SCALING_PILLAR;
-                       else if (scaled_width < scaled_height)
-                               pfit_control |= PFIT_ENABLE |
-                                       PFIT_SCALING_LETTER;
-                       else if (adjusted_mode->hdisplay != mode->hdisplay)
-                               pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
-               } else {
-                       u32 scaled_width = adjusted_mode->hdisplay *
-                               mode->vdisplay;
-                       u32 scaled_height = mode->hdisplay *
-                               adjusted_mode->vdisplay;
-                       /*
-                        * For earlier chips we have to calculate the scaling
-                        * ratio by hand and program it into the
-                        * PFIT_PGM_RATIO register
-                        */
-                       if (scaled_width > scaled_height) { /* pillar */
-                               centre_horizontally(adjusted_mode,
-                                                   scaled_height /
-                                                   mode->vdisplay);
-
-                               border = LVDS_BORDER_ENABLE;
-                               if (mode->vdisplay != adjusted_mode->vdisplay) {
-                                       u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
-                                       pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
-                                                           bits << PFIT_VERT_SCALE_SHIFT);
-                                       pfit_control |= (PFIT_ENABLE |
-                                                        VERT_INTERP_BILINEAR |
-                                                        HORIZ_INTERP_BILINEAR);
-                               }
-                       } else if (scaled_width < scaled_height) { /* letter */
-                               centre_vertically(adjusted_mode,
-                                                 scaled_width /
-                                                 mode->hdisplay);
-
-                               border = LVDS_BORDER_ENABLE;
-                               if (mode->hdisplay != adjusted_mode->hdisplay) {
-                                       u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
-                                       pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
-                                                           bits << PFIT_VERT_SCALE_SHIFT);
-                                       pfit_control |= (PFIT_ENABLE |
-                                                        VERT_INTERP_BILINEAR |
-                                                        HORIZ_INTERP_BILINEAR);
-                               }
-                       } else {
-                               /* Aspects match, Let hw scale both directions */
-                               pfit_control |= (PFIT_ENABLE |
-                                                VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
-                                                VERT_INTERP_BILINEAR |
-                                                HORIZ_INTERP_BILINEAR);
-                       }
-               }
+               if (INTEL_INFO(dev)->gen >= 4)
+                       i965_scale_aspect(pipe_config, &pfit_control);
+               else
+                       i9xx_scale_aspect(pipe_config, &pfit_control,
+                                         &pfit_pgm_ratios, &border);
                break;
        case DRM_MODE_SCALE_FULLSCREEN:
                /*
                 * Full scaling, even if it changes the aspect ratio.
                 * Fortunately this is all done for us in hw.
                 */
-               if (mode->vdisplay != adjusted_mode->vdisplay ||
-                   mode->hdisplay != adjusted_mode->hdisplay) {
+               if (pipe_config->pipe_src_h != adjusted_mode->vdisplay ||
+                   pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
                        pfit_control |= PFIT_ENABLE;
                        if (INTEL_INFO(dev)->gen >= 4)
                                pfit_control |= PFIT_SCALING_AUTO;
@@ -308,7 +329,7 @@ static int is_backlight_combination_mode(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (INTEL_INFO(dev)->gen >= 4)
+       if (IS_GEN4(dev))
                return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
 
        if (IS_GEN2(dev))
@@ -351,6 +372,9 @@ static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
                                I915_WRITE(BLC_PWM_CTL2,
                                           dev_priv->regfile.saveBLC_PWM_CTL2);
                }
+
+               if (IS_VALLEYVIEW(dev) && !val)
+                       val = 0x0f42ffff;
        }
 
        return val;
@@ -441,7 +465,8 @@ static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
        I915_WRITE(BLC_PWM_CPU_CTL, val | level);
 }
 
-static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level)
+static void intel_panel_actually_set_backlight(struct drm_device *dev,
+                                              u32 level)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 tmp;
@@ -549,6 +574,8 @@ void intel_panel_enable_backlight(struct drm_device *dev,
                intel_pipe_to_cpu_transcoder(dev_priv, pipe);
        unsigned long flags;
 
+       DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+
        spin_lock_irqsave(&dev_priv->backlight.lock, flags);
 
        if (dev_priv->backlight.level == 0) {
@@ -607,10 +634,24 @@ set_level:
        spin_unlock_irqrestore(&dev_priv->backlight.lock, flags);
 }
 
+/* FIXME: use VBT vals to init PWM_CTL and PWM_CTL2 correctly */
+static void intel_panel_init_backlight_regs(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (IS_VALLEYVIEW(dev)) {
+               u32 cur_val = I915_READ(BLC_PWM_CTL) &
+                       BACKLIGHT_DUTY_CYCLE_MASK;
+               I915_WRITE(BLC_PWM_CTL, (0xf42 << 16) | cur_val);
+       }
+}
+
 static void intel_panel_init_backlight(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       intel_panel_init_backlight_regs(dev);
+
        dev_priv->backlight.level = intel_panel_get_backlight(dev);
        dev_priv->backlight.enabled = dev_priv->backlight.level != 0;
 }
@@ -637,10 +678,12 @@ intel_panel_detect(struct drm_device *dev)
        }
 }
 
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
 static int intel_panel_update_status(struct backlight_device *bd)
 {
        struct drm_device *dev = bl_get_data(bd);
+       DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
+                     bd->props.brightness, bd->props.max_brightness);
        intel_panel_set_backlight(dev, bd->props.brightness,
                                  bd->props.max_brightness);
        return 0;
@@ -683,7 +726,7 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
        }
        dev_priv->backlight.device =
                backlight_device_register("intel_backlight",
-                                         &connector->kdev, dev,
+                                         connector->kdev, dev,
                                          &intel_panel_bl_ops, &props);
 
        if (IS_ERR(dev_priv->backlight.device)) {
index dd176b7296c1c44904a163cfe82960f6e196249d..9b70c34fce78a13e4e26aea6c1082e2ab415c0b9 100644 (file)
 #include <linux/module.h>
 #include <drm/i915_powerwell.h>
 
+/**
+ * RC6 is a special power stage which allows the GPU to enter an very
+ * low-voltage mode when idle, using down to 0V while at this stage.  This
+ * stage is entered automatically when the GPU is idle when RC6 support is
+ * enabled, and as soon as new workload arises GPU wakes up automatically as well.
+ *
+ * There are different RC6 modes available in Intel GPU, which differentiate
+ * among each other with the latency required to enter and leave RC6 and
+ * voltage consumed by the GPU in different states.
+ *
+ * The combination of the following flags define which states GPU is allowed
+ * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
+ * RC6pp is deepest RC6. Their support by hardware varies according to the
+ * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
+ * which brings the most power savings; deeper states save more power, but
+ * require higher latency to switch to and wake up.
+ */
+#define INTEL_RC6_ENABLE                       (1<<0)
+#define INTEL_RC6p_ENABLE                      (1<<1)
+#define INTEL_RC6pp_ENABLE                     (1<<2)
+
 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
  * framebuffer contents in-memory, aiming at reducing the required bandwidth
  * during in-memory transfers and, therefore, reduce the power packet.
  * i915.i915_enable_fbc parameter
  */
 
-static bool intel_crtc_active(struct drm_crtc *crtc)
-{
-       /* Be paranoid as we can arrive here with only partial
-        * state retrieved from the hardware during setup.
-        */
-       return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
-}
-
 static void i8xx_disable_fbc(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -378,7 +391,7 @@ static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 
        intel_cancel_fbc_work(dev_priv);
 
-       work = kzalloc(sizeof *work, GFP_KERNEL);
+       work = kzalloc(sizeof(*work), GFP_KERNEL);
        if (work == NULL) {
                DRM_ERROR("Failed to allocate FBC work structure\n");
                dev_priv->display.enable_fbc(crtc, interval);
@@ -458,7 +471,8 @@ void intel_update_fbc(struct drm_device *dev)
        struct drm_framebuffer *fb;
        struct intel_framebuffer *intel_fb;
        struct drm_i915_gem_object *obj;
-       unsigned int max_hdisplay, max_vdisplay;
+       const struct drm_display_mode *adjusted_mode;
+       unsigned int max_width, max_height;
 
        if (!I915_HAS_FBC(dev)) {
                set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
@@ -482,7 +496,7 @@ void intel_update_fbc(struct drm_device *dev)
         */
        list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
                if (intel_crtc_active(tmp_crtc) &&
-                   !to_intel_crtc(tmp_crtc)->primary_disabled) {
+                   to_intel_crtc(tmp_crtc)->primary_enabled) {
                        if (crtc) {
                                if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
                                        DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
@@ -502,6 +516,7 @@ void intel_update_fbc(struct drm_device *dev)
        fb = crtc->fb;
        intel_fb = to_intel_framebuffer(fb);
        obj = intel_fb->obj;
+       adjusted_mode = &intel_crtc->config.adjusted_mode;
 
        if (i915_enable_fbc < 0 &&
            INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
@@ -514,8 +529,8 @@ void intel_update_fbc(struct drm_device *dev)
                        DRM_DEBUG_KMS("fbc disabled per module param\n");
                goto out_disable;
        }
-       if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
-           (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
+       if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+           (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
                if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
                        DRM_DEBUG_KMS("mode incompatible with compression, "
                                      "disabling\n");
@@ -523,14 +538,14 @@ void intel_update_fbc(struct drm_device *dev)
        }
 
        if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
-               max_hdisplay = 4096;
-               max_vdisplay = 2048;
+               max_width = 4096;
+               max_height = 2048;
        } else {
-               max_hdisplay = 2048;
-               max_vdisplay = 1536;
+               max_width = 2048;
+               max_height = 1536;
        }
-       if ((crtc->mode.hdisplay > max_hdisplay) ||
-           (crtc->mode.vdisplay > max_vdisplay)) {
+       if (intel_crtc->config.pipe_src_w > max_width ||
+           intel_crtc->config.pipe_src_h > max_height) {
                if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
                        DRM_DEBUG_KMS("mode too large for compression, disabling\n");
                goto out_disable;
@@ -1087,8 +1102,9 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
        return enabled;
 }
 
-static void pineview_update_wm(struct drm_device *dev)
+static void pineview_update_wm(struct drm_crtc *unused_crtc)
 {
+       struct drm_device *dev = unused_crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc;
        const struct cxsr_latency *latency;
@@ -1105,8 +1121,12 @@ static void pineview_update_wm(struct drm_device *dev)
 
        crtc = single_enabled_crtc(dev);
        if (crtc) {
-               int clock = crtc->mode.clock;
+               const struct drm_display_mode *adjusted_mode;
                int pixel_size = crtc->fb->bits_per_pixel / 8;
+               int clock;
+
+               adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+               clock = adjusted_mode->crtc_clock;
 
                /* Display SR */
                wm = intel_calculate_wm(clock, &pineview_display_wm,
@@ -1166,6 +1186,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
                            int *cursor_wm)
 {
        struct drm_crtc *crtc;
+       const struct drm_display_mode *adjusted_mode;
        int htotal, hdisplay, clock, pixel_size;
        int line_time_us, line_count;
        int entries, tlb_miss;
@@ -1177,9 +1198,10 @@ static bool g4x_compute_wm0(struct drm_device *dev,
                return false;
        }
 
-       htotal = crtc->mode.htotal;
-       hdisplay = crtc->mode.hdisplay;
-       clock = crtc->mode.clock;
+       adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+       clock = adjusted_mode->crtc_clock;
+       htotal = adjusted_mode->htotal;
+       hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
        pixel_size = crtc->fb->bits_per_pixel / 8;
 
        /* Use the small buffer method to calculate plane watermark */
@@ -1250,6 +1272,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
                             int *display_wm, int *cursor_wm)
 {
        struct drm_crtc *crtc;
+       const struct drm_display_mode *adjusted_mode;
        int hdisplay, htotal, pixel_size, clock;
        unsigned long line_time_us;
        int line_count, line_size;
@@ -1262,9 +1285,10 @@ static bool g4x_compute_srwm(struct drm_device *dev,
        }
 
        crtc = intel_get_crtc_for_plane(dev, plane);
-       hdisplay = crtc->mode.hdisplay;
-       htotal = crtc->mode.htotal;
-       clock = crtc->mode.clock;
+       adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+       clock = adjusted_mode->crtc_clock;
+       htotal = adjusted_mode->htotal;
+       hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
        pixel_size = crtc->fb->bits_per_pixel / 8;
 
        line_time_us = (htotal * 1000) / clock;
@@ -1303,7 +1327,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
        if (!intel_crtc_active(crtc))
                return false;
 
-       clock = crtc->mode.clock;       /* VESA DOT Clock */
+       clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
        pixel_size = crtc->fb->bits_per_pixel / 8;      /* BPP */
 
        entries = (clock / 1000) * pixel_size;
@@ -1365,8 +1389,9 @@ static void vlv_update_drain_latency(struct drm_device *dev)
 
 #define single_plane_enabled(mask) is_power_of_2(mask)
 
-static void valleyview_update_wm(struct drm_device *dev)
+static void valleyview_update_wm(struct drm_crtc *crtc)
 {
+       struct drm_device *dev = crtc->dev;
        static const int sr_latency_ns = 12000;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -1424,8 +1449,9 @@ static void valleyview_update_wm(struct drm_device *dev)
                   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 }
 
-static void g4x_update_wm(struct drm_device *dev)
+static void g4x_update_wm(struct drm_crtc *crtc)
 {
+       struct drm_device *dev = crtc->dev;
        static const int sr_latency_ns = 12000;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -1476,8 +1502,9 @@ static void g4x_update_wm(struct drm_device *dev)
                   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 }
 
-static void i965_update_wm(struct drm_device *dev)
+static void i965_update_wm(struct drm_crtc *unused_crtc)
 {
+       struct drm_device *dev = unused_crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc;
        int srwm = 1;
@@ -1488,9 +1515,11 @@ static void i965_update_wm(struct drm_device *dev)
        if (crtc) {
                /* self-refresh has much higher latency */
                static const int sr_latency_ns = 12000;
-               int clock = crtc->mode.clock;
-               int htotal = crtc->mode.htotal;
-               int hdisplay = crtc->mode.hdisplay;
+               const struct drm_display_mode *adjusted_mode =
+                       &to_intel_crtc(crtc)->config.adjusted_mode;
+               int clock = adjusted_mode->crtc_clock;
+               int htotal = adjusted_mode->htotal;
+               int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
                int pixel_size = crtc->fb->bits_per_pixel / 8;
                unsigned long line_time_us;
                int entries;
@@ -1541,8 +1570,9 @@ static void i965_update_wm(struct drm_device *dev)
        I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
 }
 
-static void i9xx_update_wm(struct drm_device *dev)
+static void i9xx_update_wm(struct drm_crtc *unused_crtc)
 {
+       struct drm_device *dev = unused_crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        const struct intel_watermark_params *wm_info;
        uint32_t fwater_lo;
@@ -1562,11 +1592,13 @@ static void i9xx_update_wm(struct drm_device *dev)
        fifo_size = dev_priv->display.get_fifo_size(dev, 0);
        crtc = intel_get_crtc_for_plane(dev, 0);
        if (intel_crtc_active(crtc)) {
+               const struct drm_display_mode *adjusted_mode;
                int cpp = crtc->fb->bits_per_pixel / 8;
                if (IS_GEN2(dev))
                        cpp = 4;
 
-               planea_wm = intel_calculate_wm(crtc->mode.clock,
+               adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+               planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
                                               wm_info, fifo_size, cpp,
                                               latency_ns);
                enabled = crtc;
@@ -1576,11 +1608,13 @@ static void i9xx_update_wm(struct drm_device *dev)
        fifo_size = dev_priv->display.get_fifo_size(dev, 1);
        crtc = intel_get_crtc_for_plane(dev, 1);
        if (intel_crtc_active(crtc)) {
+               const struct drm_display_mode *adjusted_mode;
                int cpp = crtc->fb->bits_per_pixel / 8;
                if (IS_GEN2(dev))
                        cpp = 4;
 
-               planeb_wm = intel_calculate_wm(crtc->mode.clock,
+               adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+               planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
                                               wm_info, fifo_size, cpp,
                                               latency_ns);
                if (enabled == NULL)
@@ -1607,9 +1641,11 @@ static void i9xx_update_wm(struct drm_device *dev)
        if (HAS_FW_BLC(dev) && enabled) {
                /* self-refresh has much higher latency */
                static const int sr_latency_ns = 6000;
-               int clock = enabled->mode.clock;
-               int htotal = enabled->mode.htotal;
-               int hdisplay = enabled->mode.hdisplay;
+               const struct drm_display_mode *adjusted_mode =
+                       &to_intel_crtc(enabled)->config.adjusted_mode;
+               int clock = adjusted_mode->crtc_clock;
+               int htotal = adjusted_mode->htotal;
+               int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
                int pixel_size = enabled->fb->bits_per_pixel / 8;
                unsigned long line_time_us;
                int entries;
@@ -1658,10 +1694,12 @@ static void i9xx_update_wm(struct drm_device *dev)
        }
 }
 
-static void i830_update_wm(struct drm_device *dev)
+static void i830_update_wm(struct drm_crtc *unused_crtc)
 {
+       struct drm_device *dev = unused_crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc;
+       const struct drm_display_mode *adjusted_mode;
        uint32_t fwater_lo;
        int planea_wm;
 
@@ -1669,7 +1707,9 @@ static void i830_update_wm(struct drm_device *dev)
        if (crtc == NULL)
                return;
 
-       planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
+       adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+       planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
+                                      &i830_wm_info,
                                       dev_priv->display.get_fifo_size(dev, 0),
                                       4, latency_ns);
        fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -1741,6 +1781,7 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
                                  int *fbc_wm, int *display_wm, int *cursor_wm)
 {
        struct drm_crtc *crtc;
+       const struct drm_display_mode *adjusted_mode;
        unsigned long line_time_us;
        int hdisplay, htotal, pixel_size, clock;
        int line_count, line_size;
@@ -1753,9 +1794,10 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
        }
 
        crtc = intel_get_crtc_for_plane(dev, plane);
-       hdisplay = crtc->mode.hdisplay;
-       htotal = crtc->mode.htotal;
-       clock = crtc->mode.clock;
+       adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
+       clock = adjusted_mode->crtc_clock;
+       htotal = adjusted_mode->htotal;
+       hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
        pixel_size = crtc->fb->bits_per_pixel / 8;
 
        line_time_us = (htotal * 1000) / clock;
@@ -1785,8 +1827,9 @@ static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
                                   display, cursor);
 }
 
-static void ironlake_update_wm(struct drm_device *dev)
+static void ironlake_update_wm(struct drm_crtc *crtc)
 {
+       struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int fbc_wm, plane_wm, cursor_wm;
        unsigned int enabled;
@@ -1868,8 +1911,9 @@ static void ironlake_update_wm(struct drm_device *dev)
         */
 }
 
-static void sandybridge_update_wm(struct drm_device *dev)
+static void sandybridge_update_wm(struct drm_crtc *crtc)
 {
+       struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int latency = dev_priv->wm.pri_latency[0] * 100;        /* In unit 0.1us */
        u32 val;
@@ -1970,8 +2014,9 @@ static void sandybridge_update_wm(struct drm_device *dev)
                   cursor_wm);
 }
 
-static void ivybridge_update_wm(struct drm_device *dev)
+static void ivybridge_update_wm(struct drm_crtc *crtc)
 {
+       struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int latency = dev_priv->wm.pri_latency[0] * 100;        /* In unit 0.1us */
        u32 val;
@@ -2098,7 +2143,7 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        uint32_t pixel_rate;
 
-       pixel_rate = intel_crtc->config.adjusted_mode.clock;
+       pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
 
        /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
         * adjust the pixel_rate here. */
@@ -2107,8 +2152,8 @@ static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
                uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
                uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
 
-               pipe_w = intel_crtc->config.requested_mode.hdisplay;
-               pipe_h = intel_crtc->config.requested_mode.vdisplay;
+               pipe_w = intel_crtc->config.pipe_src_w;
+               pipe_h = intel_crtc->config.pipe_src_h;
                pfit_w = (pfit_size >> 16) & 0xFFFF;
                pfit_h = pfit_size & 0xFFFF;
                if (pipe_w < pfit_w)
@@ -2176,27 +2221,18 @@ struct hsw_wm_maximums {
        uint16_t fbc;
 };
 
-struct hsw_wm_values {
-       uint32_t wm_pipe[3];
-       uint32_t wm_lp[3];
-       uint32_t wm_lp_spr[3];
-       uint32_t wm_linetime[3];
-       bool enable_fbc_wm;
-};
-
 /* used in computing the new watermarks state */
 struct intel_wm_config {
        unsigned int num_pipes_active;
        bool sprites_enabled;
        bool sprites_scaled;
-       bool fbc_wm_enabled;
 };
 
 /*
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_pri_wm(const struct hsw_pipe_wm_parameters *params,
                                   uint32_t mem_value,
                                   bool is_lp)
 {
@@ -2225,7 +2261,7 @@ static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_spr_wm(const struct hsw_pipe_wm_parameters *params,
                                   uint32_t mem_value)
 {
        uint32_t method1, method2;
@@ -2248,7 +2284,7 @@ static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
  * For both WM_PIPE and WM_LP.
  * mem_value must be in 0.1us units.
  */
-static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_cur_wm(const struct hsw_pipe_wm_parameters *params,
                                   uint32_t mem_value)
 {
        if (!params->active || !params->cur.enabled)
@@ -2262,7 +2298,7 @@ static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
 }
 
 /* Only for WM_LP. */
-static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
+static uint32_t ilk_compute_fbc_wm(const struct hsw_pipe_wm_parameters *params,
                                   uint32_t pri_val)
 {
        if (!params->active || !params->pri.enabled)
@@ -2356,11 +2392,11 @@ static unsigned int ilk_fbc_wm_max(void)
        return 15;
 }
 
-static void ilk_wm_max(struct drm_device *dev,
-                      int level,
-                      const struct intel_wm_config *config,
-                      enum intel_ddb_partitioning ddb_partitioning,
-                      struct hsw_wm_maximums *max)
+static void ilk_compute_wm_maximums(struct drm_device *dev,
+                                   int level,
+                                   const struct intel_wm_config *config,
+                                   enum intel_ddb_partitioning ddb_partitioning,
+                                   struct hsw_wm_maximums *max)
 {
        max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
        max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
@@ -2368,9 +2404,9 @@ static void ilk_wm_max(struct drm_device *dev,
        max->fbc = ilk_fbc_wm_max();
 }
 
-static bool ilk_check_wm(int level,
-                        const struct hsw_wm_maximums *max,
-                        struct intel_wm_level *result)
+static bool ilk_validate_wm_level(int level,
+                                 const struct hsw_wm_maximums *max,
+                                 struct intel_wm_level *result)
 {
        bool ret;
 
@@ -2406,14 +2442,12 @@ static bool ilk_check_wm(int level,
                result->enable = true;
        }
 
-       DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis");
-
        return ret;
 }
 
 static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
                                 int level,
-                                struct hsw_pipe_wm_parameters *p,
+                                const struct hsw_pipe_wm_parameters *p,
                                 struct intel_wm_level *result)
 {
        uint16_t pri_latency = dev_priv->wm.pri_latency[level];
@@ -2434,55 +2468,6 @@ static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
        result->enable = true;
 }
 
-static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
-                             int level, struct hsw_wm_maximums *max,
-                             struct hsw_pipe_wm_parameters *params,
-                             struct intel_wm_level *result)
-{
-       enum pipe pipe;
-       struct intel_wm_level res[3];
-
-       for (pipe = PIPE_A; pipe <= PIPE_C; pipe++)
-               ilk_compute_wm_level(dev_priv, level, &params[pipe], &res[pipe]);
-
-       result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val);
-       result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val);
-       result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val);
-       result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val);
-       result->enable = true;
-
-       return ilk_check_wm(level, max, result);
-}
-
-static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
-                                   enum pipe pipe,
-                                   struct hsw_pipe_wm_parameters *params)
-{
-       uint32_t pri_val, cur_val, spr_val;
-       /* WM0 latency values stored in 0.1us units */
-       uint16_t pri_latency = dev_priv->wm.pri_latency[0];
-       uint16_t spr_latency = dev_priv->wm.spr_latency[0];
-       uint16_t cur_latency = dev_priv->wm.cur_latency[0];
-
-       pri_val = ilk_compute_pri_wm(params, pri_latency, false);
-       spr_val = ilk_compute_spr_wm(params, spr_latency);
-       cur_val = ilk_compute_cur_wm(params, cur_latency);
-
-       WARN(pri_val > 127,
-            "Primary WM error, mode not supported for pipe %c\n",
-            pipe_name(pipe));
-       WARN(spr_val > 127,
-            "Sprite WM error, mode not supported for pipe %c\n",
-            pipe_name(pipe));
-       WARN(cur_val > 63,
-            "Cursor WM error, mode not supported for pipe %c\n",
-            pipe_name(pipe));
-
-       return (pri_val << WM0_PIPE_PLANE_SHIFT) |
-              (spr_val << WM0_PIPE_SPRITE_SHIFT) |
-              cur_val;
-}
-
 static uint32_t
 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
 {
@@ -2554,19 +2539,22 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
                wm[3] *= 2;
 }
 
-static void intel_print_wm_latency(struct drm_device *dev,
-                                  const char *name,
-                                  const uint16_t wm[5])
+static int ilk_wm_max_level(const struct drm_device *dev)
 {
-       int level, max_level;
-
        /* how many WM levels are we expecting */
        if (IS_HASWELL(dev))
-               max_level = 4;
+               return 4;
        else if (INTEL_INFO(dev)->gen >= 6)
-               max_level = 3;
+               return 3;
        else
-               max_level = 2;
+               return 2;
+}
+
+static void intel_print_wm_latency(struct drm_device *dev,
+                                  const char *name,
+                                  const uint16_t wm[5])
+{
+       int level, max_level = ilk_wm_max_level(dev);
 
        for (level = 0; level <= max_level; level++) {
                unsigned int latency = wm[level];
@@ -2606,101 +2594,154 @@ static void intel_setup_wm_latency(struct drm_device *dev)
        intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
 }
 
-static void hsw_compute_wm_parameters(struct drm_device *dev,
-                                     struct hsw_pipe_wm_parameters *params,
-                                     struct hsw_wm_maximums *lp_max_1_2,
-                                     struct hsw_wm_maximums *lp_max_5_6)
+static void hsw_compute_wm_parameters(struct drm_crtc *crtc,
+                                     struct hsw_pipe_wm_parameters *p,
+                                     struct intel_wm_config *config)
 {
-       struct drm_crtc *crtc;
+       struct drm_device *dev = crtc->dev;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       enum pipe pipe = intel_crtc->pipe;
        struct drm_plane *plane;
-       enum pipe pipe;
-       struct intel_wm_config config = {};
-
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-               struct hsw_pipe_wm_parameters *p;
-
-               pipe = intel_crtc->pipe;
-               p = &params[pipe];
-
-               p->active = intel_crtc_active(crtc);
-               if (!p->active)
-                       continue;
-
-               config.num_pipes_active++;
 
+       p->active = intel_crtc_active(crtc);
+       if (p->active) {
                p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
                p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
                p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
                p->cur.bytes_per_pixel = 4;
-               p->pri.horiz_pixels =
-                       intel_crtc->config.requested_mode.hdisplay;
+               p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
                p->cur.horiz_pixels = 64;
                /* TODO: for now, assume primary and cursor planes are always enabled. */
                p->pri.enabled = true;
                p->cur.enabled = true;
        }
 
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+               config->num_pipes_active += intel_crtc_active(crtc);
+
        list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
                struct intel_plane *intel_plane = to_intel_plane(plane);
-               struct hsw_pipe_wm_parameters *p;
-
-               pipe = intel_plane->pipe;
-               p = &params[pipe];
 
-               p->spr = intel_plane->wm;
+               if (intel_plane->pipe == pipe)
+                       p->spr = intel_plane->wm;
 
-               config.sprites_enabled |= p->spr.enabled;
-               config.sprites_scaled |= p->spr.scaled;
+               config->sprites_enabled |= intel_plane->wm.enabled;
+               config->sprites_scaled |= intel_plane->wm.scaled;
        }
+}
+
+/* Compute new watermarks for the pipe */
+static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
+                                 const struct hsw_pipe_wm_parameters *params,
+                                 struct intel_pipe_wm *pipe_wm)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int level, max_level = ilk_wm_max_level(dev);
+       /* LP0 watermark maximums depend on this pipe alone */
+       struct intel_wm_config config = {
+               .num_pipes_active = 1,
+               .sprites_enabled = params->spr.enabled,
+               .sprites_scaled = params->spr.scaled,
+       };
+       struct hsw_wm_maximums max;
 
-       ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2);
+       /* LP0 watermarks always use 1/2 DDB partitioning */
+       ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
 
-       /* 5/6 split only in single pipe config on IVB+ */
-       if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1)
-               ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6);
-       else
-               *lp_max_5_6 = *lp_max_1_2;
+       for (level = 0; level <= max_level; level++)
+               ilk_compute_wm_level(dev_priv, level, params,
+                                    &pipe_wm->wm[level]);
+
+       pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
+
+       /* At least LP0 must be valid */
+       return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
 }
 
-static void hsw_compute_wm_results(struct drm_device *dev,
-                                  struct hsw_pipe_wm_parameters *params,
-                                  struct hsw_wm_maximums *lp_maximums,
-                                  struct hsw_wm_values *results)
+/*
+ * Merge the watermarks from all active pipes for a specific level.
+ */
+static void ilk_merge_wm_level(struct drm_device *dev,
+                              int level,
+                              struct intel_wm_level *ret_wm)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc *crtc;
-       struct intel_wm_level lp_results[4] = {};
-       enum pipe pipe;
-       int level, max_level, wm_lp;
+       const struct intel_crtc *intel_crtc;
 
-       for (level = 1; level <= 4; level++)
-               if (!hsw_compute_lp_wm(dev_priv, level,
-                                      lp_maximums, params,
-                                      &lp_results[level - 1]))
-                       break;
-       max_level = level - 1;
+       list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
+               const struct intel_wm_level *wm =
+                       &intel_crtc->wm.active.wm[level];
+
+               if (!wm->enable)
+                       return;
+
+               ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
+               ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
+               ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
+               ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
+       }
 
-       memset(results, 0, sizeof(*results));
+       ret_wm->enable = true;
+}
 
-       /* The spec says it is preferred to disable FBC WMs instead of disabling
-        * a WM level. */
-       results->enable_fbc_wm = true;
+/*
+ * Merge all low power watermarks for all active pipes.
+ */
+static void ilk_wm_merge(struct drm_device *dev,
+                        const struct hsw_wm_maximums *max,
+                        struct intel_pipe_wm *merged)
+{
+       int level, max_level = ilk_wm_max_level(dev);
+
+       merged->fbc_wm_enabled = true;
+
+       /* merge each WM1+ level */
        for (level = 1; level <= max_level; level++) {
-               if (lp_results[level - 1].fbc_val > lp_maximums->fbc) {
-                       results->enable_fbc_wm = false;
-                       lp_results[level - 1].fbc_val = 0;
+               struct intel_wm_level *wm = &merged->wm[level];
+
+               ilk_merge_wm_level(dev, level, wm);
+
+               if (!ilk_validate_wm_level(level, max, wm))
+                       break;
+
+               /*
+                * The spec says it is preferred to disable
+                * FBC WMs instead of disabling a WM level.
+                */
+               if (wm->fbc_val > max->fbc) {
+                       merged->fbc_wm_enabled = false;
+                       wm->fbc_val = 0;
                }
        }
+}
 
+static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
+{
+       /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
+       return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
+}
+
+static void hsw_compute_wm_results(struct drm_device *dev,
+                                  const struct intel_pipe_wm *merged,
+                                  enum intel_ddb_partitioning partitioning,
+                                  struct hsw_wm_values *results)
+{
+       struct intel_crtc *intel_crtc;
+       int level, wm_lp;
+
+       results->enable_fbc_wm = merged->fbc_wm_enabled;
+       results->partitioning = partitioning;
+
+       /* LP1+ register values */
        for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
                const struct intel_wm_level *r;
 
-               level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
-               if (level > max_level)
+               level = ilk_wm_lp_to_level(wm_lp, merged);
+
+               r = &merged->wm[level];
+               if (!r->enable)
                        break;
 
-               r = &lp_results[level - 1];
                results->wm_lp[wm_lp - 1] = HSW_WM_LP_VAL(level * 2,
                                                          r->fbc_val,
                                                          r->pri_val,
@@ -2708,116 +2749,158 @@ static void hsw_compute_wm_results(struct drm_device *dev,
                results->wm_lp_spr[wm_lp - 1] = r->spr_val;
        }
 
-       for_each_pipe(pipe)
-               results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, pipe,
-                                                            &params[pipe]);
+       /* LP0 register values */
+       list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
+               enum pipe pipe = intel_crtc->pipe;
+               const struct intel_wm_level *r =
+                       &intel_crtc->wm.active.wm[0];
 
-       for_each_pipe(pipe) {
-               crtc = dev_priv->pipe_to_crtc_mapping[pipe];
-               results->wm_linetime[pipe] = hsw_compute_linetime_wm(dev, crtc);
+               if (WARN_ON(!r->enable))
+                       continue;
+
+               results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
+
+               results->wm_pipe[pipe] =
+                       (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
+                       (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
+                       r->cur_val;
        }
 }
 
 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
  * case both are at the same level. Prefer r1 in case they're the same. */
-static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
-                                                 struct hsw_wm_values *r2)
+static struct intel_pipe_wm *hsw_find_best_result(struct drm_device *dev,
+                                                 struct intel_pipe_wm *r1,
+                                                 struct intel_pipe_wm *r2)
 {
-       int i, val_r1 = 0, val_r2 = 0;
+       int level, max_level = ilk_wm_max_level(dev);
+       int level1 = 0, level2 = 0;
 
-       for (i = 0; i < 3; i++) {
-               if (r1->wm_lp[i] & WM3_LP_EN)
-                       val_r1 = r1->wm_lp[i] & WM1_LP_LATENCY_MASK;
-               if (r2->wm_lp[i] & WM3_LP_EN)
-                       val_r2 = r2->wm_lp[i] & WM1_LP_LATENCY_MASK;
+       for (level = 1; level <= max_level; level++) {
+               if (r1->wm[level].enable)
+                       level1 = level;
+               if (r2->wm[level].enable)
+                       level2 = level;
        }
 
-       if (val_r1 == val_r2) {
-               if (r2->enable_fbc_wm && !r1->enable_fbc_wm)
+       if (level1 == level2) {
+               if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
                        return r2;
                else
                        return r1;
-       } else if (val_r1 > val_r2) {
+       } else if (level1 > level2) {
                return r1;
        } else {
                return r2;
        }
 }
 
+/* dirty bits used to track which watermarks need changes */
+#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
+#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
+#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
+#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
+#define WM_DIRTY_FBC (1 << 24)
+#define WM_DIRTY_DDB (1 << 25)
+
+static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
+                                        const struct hsw_wm_values *old,
+                                        const struct hsw_wm_values *new)
+{
+       unsigned int dirty = 0;
+       enum pipe pipe;
+       int wm_lp;
+
+       for_each_pipe(pipe) {
+               if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
+                       dirty |= WM_DIRTY_LINETIME(pipe);
+                       /* Must disable LP1+ watermarks too */
+                       dirty |= WM_DIRTY_LP_ALL;
+               }
+
+               if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
+                       dirty |= WM_DIRTY_PIPE(pipe);
+                       /* Must disable LP1+ watermarks too */
+                       dirty |= WM_DIRTY_LP_ALL;
+               }
+       }
+
+       if (old->enable_fbc_wm != new->enable_fbc_wm) {
+               dirty |= WM_DIRTY_FBC;
+               /* Must disable LP1+ watermarks too */
+               dirty |= WM_DIRTY_LP_ALL;
+       }
+
+       if (old->partitioning != new->partitioning) {
+               dirty |= WM_DIRTY_DDB;
+               /* Must disable LP1+ watermarks too */
+               dirty |= WM_DIRTY_LP_ALL;
+       }
+
+       /* LP1+ watermarks already deemed dirty, no need to continue */
+       if (dirty & WM_DIRTY_LP_ALL)
+               return dirty;
+
+       /* Find the lowest numbered LP1+ watermark in need of an update... */
+       for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
+               if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
+                   old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
+                       break;
+       }
+
+       /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
+       for (; wm_lp <= 3; wm_lp++)
+               dirty |= WM_DIRTY_LP(wm_lp);
+
+       return dirty;
+}
+
 /*
  * The spec says we shouldn't write when we don't need, because every write
  * causes WMs to be re-evaluated, expending some power.
  */
 static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
-                               struct hsw_wm_values *results,
-                               enum intel_ddb_partitioning partitioning)
+                               struct hsw_wm_values *results)
 {
-       struct hsw_wm_values previous;
+       struct hsw_wm_values *previous = &dev_priv->wm.hw;
+       unsigned int dirty;
        uint32_t val;
-       enum intel_ddb_partitioning prev_partitioning;
-       bool prev_enable_fbc_wm;
-
-       previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
-       previous.wm_pipe[1] = I915_READ(WM0_PIPEB_ILK);
-       previous.wm_pipe[2] = I915_READ(WM0_PIPEC_IVB);
-       previous.wm_lp[0] = I915_READ(WM1_LP_ILK);
-       previous.wm_lp[1] = I915_READ(WM2_LP_ILK);
-       previous.wm_lp[2] = I915_READ(WM3_LP_ILK);
-       previous.wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
-       previous.wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
-       previous.wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
-       previous.wm_linetime[0] = I915_READ(PIPE_WM_LINETIME(PIPE_A));
-       previous.wm_linetime[1] = I915_READ(PIPE_WM_LINETIME(PIPE_B));
-       previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
-
-       prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
-                               INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
-
-       prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
-
-       if (memcmp(results->wm_pipe, previous.wm_pipe,
-                  sizeof(results->wm_pipe)) == 0 &&
-           memcmp(results->wm_lp, previous.wm_lp,
-                  sizeof(results->wm_lp)) == 0 &&
-           memcmp(results->wm_lp_spr, previous.wm_lp_spr,
-                  sizeof(results->wm_lp_spr)) == 0 &&
-           memcmp(results->wm_linetime, previous.wm_linetime,
-                  sizeof(results->wm_linetime)) == 0 &&
-           partitioning == prev_partitioning &&
-           results->enable_fbc_wm == prev_enable_fbc_wm)
+
+       dirty = ilk_compute_wm_dirty(dev_priv->dev, previous, results);
+       if (!dirty)
                return;
 
-       if (previous.wm_lp[2] != 0)
+       if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != 0)
                I915_WRITE(WM3_LP_ILK, 0);
-       if (previous.wm_lp[1] != 0)
+       if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != 0)
                I915_WRITE(WM2_LP_ILK, 0);
-       if (previous.wm_lp[0] != 0)
+       if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != 0)
                I915_WRITE(WM1_LP_ILK, 0);
 
-       if (previous.wm_pipe[0] != results->wm_pipe[0])
+       if (dirty & WM_DIRTY_PIPE(PIPE_A))
                I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
-       if (previous.wm_pipe[1] != results->wm_pipe[1])
+       if (dirty & WM_DIRTY_PIPE(PIPE_B))
                I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
-       if (previous.wm_pipe[2] != results->wm_pipe[2])
+       if (dirty & WM_DIRTY_PIPE(PIPE_C))
                I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
 
-       if (previous.wm_linetime[0] != results->wm_linetime[0])
+       if (dirty & WM_DIRTY_LINETIME(PIPE_A))
                I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
-       if (previous.wm_linetime[1] != results->wm_linetime[1])
+       if (dirty & WM_DIRTY_LINETIME(PIPE_B))
                I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
-       if (previous.wm_linetime[2] != results->wm_linetime[2])
+       if (dirty & WM_DIRTY_LINETIME(PIPE_C))
                I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
 
-       if (prev_partitioning != partitioning) {
+       if (dirty & WM_DIRTY_DDB) {
                val = I915_READ(WM_MISC);
-               if (partitioning == INTEL_DDB_PART_1_2)
+               if (results->partitioning == INTEL_DDB_PART_1_2)
                        val &= ~WM_MISC_DATA_PARTITION_5_6;
                else
                        val |= WM_MISC_DATA_PARTITION_5_6;
                I915_WRITE(WM_MISC, val);
        }
 
-       if (prev_enable_fbc_wm != results->enable_fbc_wm) {
+       if (dirty & WM_DIRTY_FBC) {
                val = I915_READ(DISP_ARB_CTL);
                if (results->enable_fbc_wm)
                        val &= ~DISP_FBC_WM_DIS;
@@ -2826,45 +2909,65 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
                I915_WRITE(DISP_ARB_CTL, val);
        }
 
-       if (previous.wm_lp_spr[0] != results->wm_lp_spr[0])
+       if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0])
                I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
-       if (previous.wm_lp_spr[1] != results->wm_lp_spr[1])
+       if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
                I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
-       if (previous.wm_lp_spr[2] != results->wm_lp_spr[2])
+       if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
                I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
 
-       if (results->wm_lp[0] != 0)
+       if (dirty & WM_DIRTY_LP(1) && results->wm_lp[0] != 0)
                I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
-       if (results->wm_lp[1] != 0)
+       if (dirty & WM_DIRTY_LP(2) && results->wm_lp[1] != 0)
                I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
-       if (results->wm_lp[2] != 0)
+       if (dirty & WM_DIRTY_LP(3) && results->wm_lp[2] != 0)
                I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
+
+       dev_priv->wm.hw = *results;
 }
 
-static void haswell_update_wm(struct drm_device *dev)
+static void haswell_update_wm(struct drm_crtc *crtc)
 {
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
-       struct hsw_pipe_wm_parameters params[3];
-       struct hsw_wm_values results_1_2, results_5_6, *best_results;
+       struct hsw_wm_maximums max;
+       struct hsw_pipe_wm_parameters params = {};
+       struct hsw_wm_values results = {};
        enum intel_ddb_partitioning partitioning;
+       struct intel_pipe_wm pipe_wm = {};
+       struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
+       struct intel_wm_config config = {};
+
+       hsw_compute_wm_parameters(crtc, &params, &config);
+
+       intel_compute_pipe_wm(crtc, &params, &pipe_wm);
+
+       if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
+               return;
+
+       intel_crtc->wm.active = pipe_wm;
 
-       hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6);
+       ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
+       ilk_wm_merge(dev, &max, &lp_wm_1_2);
 
-       hsw_compute_wm_results(dev, params,
-                              &lp_max_1_2, &results_1_2);
-       if (lp_max_1_2.pri != lp_max_5_6.pri) {
-               hsw_compute_wm_results(dev, params,
-                                      &lp_max_5_6, &results_5_6);
-               best_results = hsw_find_best_result(&results_1_2, &results_5_6);
+       /* 5/6 split only in single pipe config on IVB+ */
+       if (INTEL_INFO(dev)->gen >= 7 &&
+           config.num_pipes_active == 1 && config.sprites_enabled) {
+               ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
+               ilk_wm_merge(dev, &max, &lp_wm_5_6);
+
+               best_lp_wm = hsw_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
        } else {
-               best_results = &results_1_2;
+               best_lp_wm = &lp_wm_1_2;
        }
 
-       partitioning = (best_results == &results_1_2) ?
+       partitioning = (best_lp_wm == &lp_wm_1_2) ?
                       INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
 
-       hsw_write_wm_values(dev_priv, best_results, partitioning);
+       hsw_compute_wm_results(dev, best_lp_wm, partitioning, &results);
+
+       hsw_write_wm_values(dev_priv, &results);
 }
 
 static void haswell_update_sprite_wm(struct drm_plane *plane,
@@ -2879,7 +2982,7 @@ static void haswell_update_sprite_wm(struct drm_plane *plane,
        intel_plane->wm.horiz_pixels = sprite_width;
        intel_plane->wm.bytes_per_pixel = pixel_size;
 
-       haswell_update_wm(plane->dev);
+       haswell_update_wm(crtc);
 }
 
 static bool
@@ -2898,7 +3001,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
                return false;
        }
 
-       clock = crtc->mode.clock;
+       clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
 
        /* Use the small buffer method to calculate the sprite watermark */
        entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
@@ -2933,7 +3036,7 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
        }
 
        crtc = intel_get_crtc_for_plane(dev, plane);
-       clock = crtc->mode.clock;
+       clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
        if (!clock) {
                *sprite_wm = 0;
                return false;
@@ -3044,6 +3147,74 @@ static void sandybridge_update_sprite_wm(struct drm_plane *plane,
        I915_WRITE(WM3S_LP_IVB, sprite_wm);
 }
 
+static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct hsw_wm_values *hw = &dev_priv->wm.hw;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_pipe_wm *active = &intel_crtc->wm.active;
+       enum pipe pipe = intel_crtc->pipe;
+       static const unsigned int wm0_pipe_reg[] = {
+               [PIPE_A] = WM0_PIPEA_ILK,
+               [PIPE_B] = WM0_PIPEB_ILK,
+               [PIPE_C] = WM0_PIPEC_IVB,
+       };
+
+       hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
+       hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+
+       if (intel_crtc_active(crtc)) {
+               u32 tmp = hw->wm_pipe[pipe];
+
+               /*
+                * For active pipes LP0 watermark is marked as
+                * enabled, and LP1+ watermaks as disabled since
+                * we can't really reverse compute them in case
+                * multiple pipes are active.
+                */
+               active->wm[0].enable = true;
+               active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
+               active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
+               active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
+               active->linetime = hw->wm_linetime[pipe];
+       } else {
+               int level, max_level = ilk_wm_max_level(dev);
+
+               /*
+                * For inactive pipes, all watermark levels
+                * should be marked as enabled but zeroed,
+                * which is what we'd compute them to.
+                */
+               for (level = 0; level <= max_level; level++)
+                       active->wm[level].enable = true;
+       }
+}
+
+void ilk_wm_get_hw_state(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct hsw_wm_values *hw = &dev_priv->wm.hw;
+       struct drm_crtc *crtc;
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+               ilk_pipe_wm_get_hw_state(crtc);
+
+       hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
+       hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
+       hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
+
+       hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
+       hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
+       hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
+
+       hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
+               INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
+
+       hw->enable_fbc_wm =
+               !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
+}
+
 /**
  * intel_update_watermarks - update FIFO watermark values based on current modes
  *
@@ -3076,12 +3247,12 @@ static void sandybridge_update_sprite_wm(struct drm_plane *plane,
  * We don't use the sprite, so we can ignore that.  And on Crestline we have
  * to set the non-SR watermarks to 8.
  */
-void intel_update_watermarks(struct drm_device *dev)
+void intel_update_watermarks(struct drm_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
 
        if (dev_priv->display.update_wm)
-               dev_priv->display.update_wm(dev);
+               dev_priv->display.update_wm(crtc);
 }
 
 void intel_update_sprite_watermarks(struct drm_plane *plane,
@@ -3287,6 +3458,98 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
        return limits;
 }
 
+static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
+{
+       int new_power;
+
+       new_power = dev_priv->rps.power;
+       switch (dev_priv->rps.power) {
+       case LOW_POWER:
+               if (val > dev_priv->rps.rpe_delay + 1 && val > dev_priv->rps.cur_delay)
+                       new_power = BETWEEN;
+               break;
+
+       case BETWEEN:
+               if (val <= dev_priv->rps.rpe_delay && val < dev_priv->rps.cur_delay)
+                       new_power = LOW_POWER;
+               else if (val >= dev_priv->rps.rp0_delay && val > dev_priv->rps.cur_delay)
+                       new_power = HIGH_POWER;
+               break;
+
+       case HIGH_POWER:
+               if (val < (dev_priv->rps.rp1_delay + dev_priv->rps.rp0_delay) >> 1 && val < dev_priv->rps.cur_delay)
+                       new_power = BETWEEN;
+               break;
+       }
+       /* Max/min bins are special */
+       if (val == dev_priv->rps.min_delay)
+               new_power = LOW_POWER;
+       if (val == dev_priv->rps.max_delay)
+               new_power = HIGH_POWER;
+       if (new_power == dev_priv->rps.power)
+               return;
+
+       /* Note the units here are not exactly 1us, but 1280ns. */
+       switch (new_power) {
+       case LOW_POWER:
+               /* Upclock if more than 95% busy over 16ms */
+               I915_WRITE(GEN6_RP_UP_EI, 12500);
+               I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
+
+               /* Downclock if less than 85% busy over 32ms */
+               I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+               I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
+
+               I915_WRITE(GEN6_RP_CONTROL,
+                          GEN6_RP_MEDIA_TURBO |
+                          GEN6_RP_MEDIA_HW_NORMAL_MODE |
+                          GEN6_RP_MEDIA_IS_GFX |
+                          GEN6_RP_ENABLE |
+                          GEN6_RP_UP_BUSY_AVG |
+                          GEN6_RP_DOWN_IDLE_AVG);
+               break;
+
+       case BETWEEN:
+               /* Upclock if more than 90% busy over 13ms */
+               I915_WRITE(GEN6_RP_UP_EI, 10250);
+               I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
+
+               /* Downclock if less than 75% busy over 32ms */
+               I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+               I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
+
+               I915_WRITE(GEN6_RP_CONTROL,
+                          GEN6_RP_MEDIA_TURBO |
+                          GEN6_RP_MEDIA_HW_NORMAL_MODE |
+                          GEN6_RP_MEDIA_IS_GFX |
+                          GEN6_RP_ENABLE |
+                          GEN6_RP_UP_BUSY_AVG |
+                          GEN6_RP_DOWN_IDLE_AVG);
+               break;
+
+       case HIGH_POWER:
+               /* Upclock if more than 85% busy over 10ms */
+               I915_WRITE(GEN6_RP_UP_EI, 8000);
+               I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
+
+               /* Downclock if less than 60% busy over 32ms */
+               I915_WRITE(GEN6_RP_DOWN_EI, 25000);
+               I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
+
+               I915_WRITE(GEN6_RP_CONTROL,
+                          GEN6_RP_MEDIA_TURBO |
+                          GEN6_RP_MEDIA_HW_NORMAL_MODE |
+                          GEN6_RP_MEDIA_IS_GFX |
+                          GEN6_RP_ENABLE |
+                          GEN6_RP_UP_BUSY_AVG |
+                          GEN6_RP_DOWN_IDLE_AVG);
+               break;
+       }
+
+       dev_priv->rps.power = new_power;
+       dev_priv->rps.last_adj = 0;
+}
+
 void gen6_set_rps(struct drm_device *dev, u8 val)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3299,6 +3562,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
        if (val == dev_priv->rps.cur_delay)
                return;
 
+       gen6_set_rps_thresholds(dev_priv, val);
+
        if (IS_HASWELL(dev))
                I915_WRITE(GEN6_RPNSWREQ,
                           HSW_FREQUENCY(val));
@@ -3320,6 +3585,32 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
        trace_intel_gpu_freq_change(val * 50);
 }
 
+void gen6_rps_idle(struct drm_i915_private *dev_priv)
+{
+       mutex_lock(&dev_priv->rps.hw_lock);
+       if (dev_priv->rps.enabled) {
+               if (dev_priv->info->is_valleyview)
+                       valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+               else
+                       gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
+               dev_priv->rps.last_adj = 0;
+       }
+       mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
+void gen6_rps_boost(struct drm_i915_private *dev_priv)
+{
+       mutex_lock(&dev_priv->rps.hw_lock);
+       if (dev_priv->rps.enabled) {
+               if (dev_priv->info->is_valleyview)
+                       valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
+               else
+                       gen6_set_rps(dev_priv->dev, dev_priv->rps.max_delay);
+               dev_priv->rps.last_adj = 0;
+       }
+       mutex_unlock(&dev_priv->rps.hw_lock);
+}
+
 /*
  * Wait until the previous freq change has completed,
  * or the timeout elapsed, and then update our notion
@@ -3415,6 +3706,20 @@ static void valleyview_disable_rps(struct drm_device *dev)
        }
 }
 
+static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
+{
+       if (IS_GEN6(dev))
+               DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+
+       if (IS_HASWELL(dev))
+               DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
+
+       DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
+                       (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
+                       (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
+                       (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+}
+
 int intel_enable_rc6(const struct drm_device *dev)
 {
        /* No RC6 before Ironlake */
@@ -3429,18 +3734,13 @@ int intel_enable_rc6(const struct drm_device *dev)
        if (INTEL_INFO(dev)->gen == 5)
                return 0;
 
-       if (IS_HASWELL(dev)) {
-               DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
+       if (IS_HASWELL(dev))
                return INTEL_RC6_ENABLE;
-       }
 
        /* snb/ivb have more than one rc6 state. */
-       if (INTEL_INFO(dev)->gen == 6) {
-               DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
+       if (INTEL_INFO(dev)->gen == 6)
                return INTEL_RC6_ENABLE;
-       }
 
-       DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
        return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
 }
 
@@ -3501,7 +3801,10 @@ static void gen6_enable_rps(struct drm_device *dev)
 
        /* In units of 50MHz */
        dev_priv->rps.hw_max = dev_priv->rps.max_delay = rp_state_cap & 0xff;
-       dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
+       dev_priv->rps.min_delay = (rp_state_cap >> 16) & 0xff;
+       dev_priv->rps.rp1_delay = (rp_state_cap >>  8) & 0xff;
+       dev_priv->rps.rp0_delay = (rp_state_cap >>  0) & 0xff;
+       dev_priv->rps.rpe_delay = dev_priv->rps.rp1_delay;
        dev_priv->rps.cur_delay = 0;
 
        /* disable the counters and set deterministic thresholds */
@@ -3539,48 +3842,16 @@ static void gen6_enable_rps(struct drm_device *dev)
                        rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
        }
 
-       DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
-                       (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
-                       (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
-                       (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+       intel_print_rc6_info(dev, rc6_mask);
 
        I915_WRITE(GEN6_RC_CONTROL,
                   rc6_mask |
                   GEN6_RC_CTL_EI_MODE(1) |
                   GEN6_RC_CTL_HW_ENABLE);
 
-       if (IS_HASWELL(dev)) {
-               I915_WRITE(GEN6_RPNSWREQ,
-                          HSW_FREQUENCY(10));
-               I915_WRITE(GEN6_RC_VIDEO_FREQ,
-                          HSW_FREQUENCY(12));
-       } else {
-               I915_WRITE(GEN6_RPNSWREQ,
-                          GEN6_FREQUENCY(10) |
-                          GEN6_OFFSET(0) |
-                          GEN6_AGGRESSIVE_TURBO);
-               I915_WRITE(GEN6_RC_VIDEO_FREQ,
-                          GEN6_FREQUENCY(12));
-       }
-
-       I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
-       I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
-                  dev_priv->rps.max_delay << 24 |
-                  dev_priv->rps.min_delay << 16);
-
-       I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
-       I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
-       I915_WRITE(GEN6_RP_UP_EI, 66000);
-       I915_WRITE(GEN6_RP_DOWN_EI, 350000);
-
+       /* Power down if completely idle for over 50ms */
+       I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
        I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
-       I915_WRITE(GEN6_RP_CONTROL,
-                  GEN6_RP_MEDIA_TURBO |
-                  GEN6_RP_MEDIA_HW_NORMAL_MODE |
-                  GEN6_RP_MEDIA_IS_GFX |
-                  GEN6_RP_ENABLE |
-                  GEN6_RP_UP_BUSY_AVG |
-                  (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
 
        ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
        if (!ret) {
@@ -3596,7 +3867,8 @@ static void gen6_enable_rps(struct drm_device *dev)
                DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
        }
 
-       gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
+       dev_priv->rps.power = HIGH_POWER; /* force a reset */
+       gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay);
 
        gen6_enable_rps_interrupts(dev);
 
@@ -3624,23 +3896,28 @@ void gen6_update_ring_freq(struct drm_device *dev)
        unsigned int gpu_freq;
        unsigned int max_ia_freq, min_ring_freq;
        int scaling_factor = 180;
+       struct cpufreq_policy *policy;
 
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
-       max_ia_freq = cpufreq_quick_get_max(0);
-       /*
-        * Default to measured freq if none found, PCU will ensure we don't go
-        * over
-        */
-       if (!max_ia_freq)
+       policy = cpufreq_cpu_get(0);
+       if (policy) {
+               max_ia_freq = policy->cpuinfo.max_freq;
+               cpufreq_cpu_put(policy);
+       } else {
+               /*
+                * Default to measured freq if none found, PCU will ensure we
+                * don't go over
+                */
                max_ia_freq = tsc_khz;
+       }
 
        /* Convert from kHz to MHz */
        max_ia_freq /= 1000;
 
-       min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK);
-       /* convert DDR frequency from units of 133.3MHz to bandwidth */
-       min_ring_freq = (2 * 4 * min_ring_freq + 2) / 3;
+       min_ring_freq = I915_READ(MCHBAR_MIRROR_BASE_SNB + DCLK) & 0xf;
+       /* convert DDR frequency from units of 266.6MHz to bandwidth */
+       min_ring_freq = mult_frac(min_ring_freq, 8, 3);
 
        /*
         * For each potential GPU frequency, load a ring frequency we'd like
@@ -3653,7 +3930,7 @@ void gen6_update_ring_freq(struct drm_device *dev)
                unsigned int ia_freq = 0, ring_freq = 0;
 
                if (IS_HASWELL(dev)) {
-                       ring_freq = (gpu_freq * 5 + 3) / 4;
+                       ring_freq = mult_frac(gpu_freq, 5, 4);
                        ring_freq = max(min_ring_freq, ring_freq);
                        /* leave ia_freq as the default, chosen by cpufreq */
                } else {
@@ -3709,24 +3986,6 @@ int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
        return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
 }
 
-static void vlv_rps_timer_work(struct work_struct *work)
-{
-       drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
-                                                   rps.vlv_work.work);
-
-       /*
-        * Timer fired, we must be idle.  Drop to min voltage state.
-        * Note: we use RPe here since it should match the
-        * Vmin we were shooting for.  That should give us better
-        * perf when we come back out of RC6 than if we used the
-        * min freq available.
-        */
-       mutex_lock(&dev_priv->rps.hw_lock);
-       if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay)
-               valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
-       mutex_unlock(&dev_priv->rps.hw_lock);
-}
-
 static void valleyview_setup_pctx(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3773,13 +4032,14 @@ static void valleyview_enable_rps(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_ring_buffer *ring;
-       u32 gtfifodbg, val;
+       u32 gtfifodbg, val, rc6_mode = 0;
        int i;
 
        WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
        if ((gtfifodbg = I915_READ(GTFIFODBG))) {
-               DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
+               DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
+                                gtfifodbg);
                I915_WRITE(GTFIFODBG, gtfifodbg);
        }
 
@@ -3812,9 +4072,16 @@ static void valleyview_enable_rps(struct drm_device *dev)
        I915_WRITE(GEN6_RC6_THRESHOLD, 0xc350);
 
        /* allows RC6 residency counter to work */
-       I915_WRITE(0x138104, _MASKED_BIT_ENABLE(0x3));
-       I915_WRITE(GEN6_RC_CONTROL,
-                  GEN7_RC_CTL_TO_MODE);
+       I915_WRITE(VLV_COUNTER_CONTROL,
+                  _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+                                     VLV_MEDIA_RC6_COUNT_EN |
+                                     VLV_RENDER_RC6_COUNT_EN));
+       if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+               rc6_mode = GEN7_RC_CTL_TO_MODE;
+
+       intel_print_rc6_info(dev, rc6_mode);
+
+       I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
 
        val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
        switch ((val >> 6) & 3) {
@@ -3864,8 +4131,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
                                      dev_priv->rps.rpe_delay),
                         dev_priv->rps.rpe_delay);
 
-       INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
-
        valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
 
        gen6_enable_rps_interrupts(dev);
@@ -3987,6 +4252,8 @@ static void ironlake_enable_rc6(struct drm_device *dev)
 
        I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
        I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+
+       intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
 }
 
 static unsigned long intel_pxfreq(u32 vidfreq)
@@ -4605,13 +4872,12 @@ void intel_disable_gt_powersave(struct drm_device *dev)
        } else if (INTEL_INFO(dev)->gen >= 6) {
                cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
                cancel_work_sync(&dev_priv->rps.work);
-               if (IS_VALLEYVIEW(dev))
-                       cancel_delayed_work_sync(&dev_priv->rps.vlv_work);
                mutex_lock(&dev_priv->rps.hw_lock);
                if (IS_VALLEYVIEW(dev))
                        valleyview_disable_rps(dev);
                else
                        gen6_disable_rps(dev);
+               dev_priv->rps.enabled = false;
                mutex_unlock(&dev_priv->rps.hw_lock);
        }
 }
@@ -4631,6 +4897,7 @@ static void intel_gen6_powersave_work(struct work_struct *work)
                gen6_enable_rps(dev);
                gen6_update_ring_freq(dev);
        }
+       dev_priv->rps.enabled = true;
        mutex_unlock(&dev_priv->rps.hw_lock);
 }
 
@@ -4674,7 +4941,7 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
                I915_WRITE(DSPCNTR(pipe),
                           I915_READ(DSPCNTR(pipe)) |
                           DISPPLANE_TRICKLE_FEED_DISABLE);
-               intel_flush_display_plane(dev_priv, pipe);
+               intel_flush_primary_plane(dev_priv, pipe);
        }
 }
 
@@ -4761,7 +5028,9 @@ static void cpt_init_clock_gating(struct drm_device *dev)
         * gating for the panel power sequencer or it will fail to
         * start up when no ports are active.
         */
-       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+       I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
+                  PCH_DPLUNIT_CLOCK_GATE_DISABLE |
+                  PCH_CPUNIT_CLOCK_GATE_DISABLE);
        I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
                   DPLS_EDP_PPS_FIX_DIS);
        /* The below fixes the weird display corruption, a few pixels shifted
@@ -4955,6 +5224,11 @@ static void haswell_init_clock_gating(struct drm_device *dev)
        I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
                        GEN7_WA_L3_CHICKEN_MODE);
 
+       /* L3 caching of data atomics doesn't work -- disable it. */
+       I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
+       I915_WRITE(HSW_ROW_CHICKEN3,
+                  _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
+
        /* This is required by WaCatErrorRejectionIssue:hsw */
        I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
                        I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
@@ -5250,6 +5524,23 @@ void intel_suspend_hw(struct drm_device *dev)
                lpt_suspend_hw(dev);
 }
 
+static bool is_always_on_power_domain(struct drm_device *dev,
+                                     enum intel_display_power_domain domain)
+{
+       unsigned long always_on_domains;
+
+       BUG_ON(BIT(domain) & ~POWER_DOMAIN_MASK);
+
+       if (IS_HASWELL(dev)) {
+               always_on_domains = HSW_ALWAYS_ON_POWER_DOMAINS;
+       } else {
+               WARN_ON(1);
+               return true;
+       }
+
+       return BIT(domain) & always_on_domains;
+}
+
 /**
  * We should only use the power well if we explicitly asked the hardware to
  * enable it, so check if it's enabled and also check if we've requested it to
@@ -5263,23 +5554,11 @@ bool intel_display_power_enabled(struct drm_device *dev,
        if (!HAS_POWER_WELL(dev))
                return true;
 
-       switch (domain) {
-       case POWER_DOMAIN_PIPE_A:
-       case POWER_DOMAIN_TRANSCODER_EDP:
+       if (is_always_on_power_domain(dev, domain))
                return true;
-       case POWER_DOMAIN_PIPE_B:
-       case POWER_DOMAIN_PIPE_C:
-       case POWER_DOMAIN_PIPE_A_PANEL_FITTER:
-       case POWER_DOMAIN_PIPE_B_PANEL_FITTER:
-       case POWER_DOMAIN_PIPE_C_PANEL_FITTER:
-       case POWER_DOMAIN_TRANSCODER_A:
-       case POWER_DOMAIN_TRANSCODER_B:
-       case POWER_DOMAIN_TRANSCODER_C:
-               return I915_READ(HSW_PWR_WELL_DRIVER) ==
+
+       return I915_READ(HSW_PWR_WELL_DRIVER) ==
                     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
-       default:
-               BUG();
-       }
 }
 
 static void __intel_set_power_well(struct drm_device *dev, bool enable)
@@ -5323,12 +5602,59 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
                        spin_lock_irqsave(&dev->vbl_lock, irqflags);
                        for_each_pipe(p)
                                if (p != PIPE_A)
-                                       dev->last_vblank[p] = 0;
+                                       dev->vblank[p].last = 0;
                        spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
                }
        }
 }
 
+static void __intel_power_well_get(struct i915_power_well *power_well)
+{
+       if (!power_well->count++)
+               __intel_set_power_well(power_well->device, true);
+}
+
+static void __intel_power_well_put(struct i915_power_well *power_well)
+{
+       WARN_ON(!power_well->count);
+       if (!--power_well->count)
+               __intel_set_power_well(power_well->device, false);
+}
+
+void intel_display_power_get(struct drm_device *dev,
+                            enum intel_display_power_domain domain)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_power_well *power_well = &dev_priv->power_well;
+
+       if (!HAS_POWER_WELL(dev))
+               return;
+
+       if (is_always_on_power_domain(dev, domain))
+               return;
+
+       mutex_lock(&power_well->lock);
+       __intel_power_well_get(power_well);
+       mutex_unlock(&power_well->lock);
+}
+
+void intel_display_power_put(struct drm_device *dev,
+                            enum intel_display_power_domain domain)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_power_well *power_well = &dev_priv->power_well;
+
+       if (!HAS_POWER_WELL(dev))
+               return;
+
+       if (is_always_on_power_domain(dev, domain))
+               return;
+
+       mutex_lock(&power_well->lock);
+       __intel_power_well_put(power_well);
+       mutex_unlock(&power_well->lock);
+}
+
 static struct i915_power_well *hsw_pwr;
 
 /* Display audio driver power well request */
@@ -5337,11 +5663,9 @@ void i915_request_power_well(void)
        if (WARN_ON(!hsw_pwr))
                return;
 
-       spin_lock_irq(&hsw_pwr->lock);
-       if (!hsw_pwr->count++ &&
-                       !hsw_pwr->i915_request)
-               __intel_set_power_well(hsw_pwr->device, true);
-       spin_unlock_irq(&hsw_pwr->lock);
+       mutex_lock(&hsw_pwr->lock);
+       __intel_power_well_get(hsw_pwr);
+       mutex_unlock(&hsw_pwr->lock);
 }
 EXPORT_SYMBOL_GPL(i915_request_power_well);
 
@@ -5351,12 +5675,9 @@ void i915_release_power_well(void)
        if (WARN_ON(!hsw_pwr))
                return;
 
-       spin_lock_irq(&hsw_pwr->lock);
-       WARN_ON(!hsw_pwr->count);
-       if (!--hsw_pwr->count &&
-                      !hsw_pwr->i915_request)
-               __intel_set_power_well(hsw_pwr->device, false);
-       spin_unlock_irq(&hsw_pwr->lock);
+       mutex_lock(&hsw_pwr->lock);
+       __intel_power_well_put(hsw_pwr);
+       mutex_unlock(&hsw_pwr->lock);
 }
 EXPORT_SYMBOL_GPL(i915_release_power_well);
 
@@ -5367,7 +5688,7 @@ int i915_init_power_well(struct drm_device *dev)
        hsw_pwr = &dev_priv->power_well;
 
        hsw_pwr->device = dev;
-       spin_lock_init(&hsw_pwr->lock);
+       mutex_init(&hsw_pwr->lock);
        hsw_pwr->count = 0;
 
        return 0;
@@ -5389,17 +5710,39 @@ void intel_set_power_well(struct drm_device *dev, bool enable)
        if (!i915_disable_power_well && !enable)
                return;
 
-       spin_lock_irq(&power_well->lock);
+       mutex_lock(&power_well->lock);
+
+       /*
+        * This function will only ever contribute one
+        * to the power well reference count. i915_request
+        * is what tracks whether we have or have not
+        * added the one to the reference count.
+        */
+       if (power_well->i915_request == enable)
+               goto out;
+
        power_well->i915_request = enable;
 
-       /* only reject "disable" power well request */
-       if (power_well->count && !enable) {
-               spin_unlock_irq(&power_well->lock);
+       if (enable)
+               __intel_power_well_get(power_well);
+       else
+               __intel_power_well_put(power_well);
+
+ out:
+       mutex_unlock(&power_well->lock);
+}
+
+static void intel_resume_power_well(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_power_well *power_well = &dev_priv->power_well;
+
+       if (!HAS_POWER_WELL(dev))
                return;
-       }
 
-       __intel_set_power_well(dev, enable);
-       spin_unlock_irq(&power_well->lock);
+       mutex_lock(&power_well->lock);
+       __intel_set_power_well(dev, power_well->count > 0);
+       mutex_unlock(&power_well->lock);
 }
 
 /*
@@ -5417,6 +5760,7 @@ void intel_init_power_well(struct drm_device *dev)
 
        /* For now, we need the power well to be always enabled. */
        intel_set_power_well(dev, true);
+       intel_resume_power_well(dev);
 
        /* We're taking over the BIOS, so clear any requests made by it since
         * the driver is in charge now. */
@@ -5681,5 +6025,7 @@ void intel_pm_init(struct drm_device *dev)
 
        INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
                          intel_gen6_powersave_work);
+
+       INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
 }
 
index 460ee1026fcad63249e83af59da0353b40b04460..2dec134f75eb4d63116a97bfcd647cbba6973228 100644 (file)
@@ -41,6 +41,16 @@ static inline int ring_space(struct intel_ring_buffer *ring)
        return space;
 }
 
+void __intel_ring_advance(struct intel_ring_buffer *ring)
+{
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+       ring->tail &= ring->size - 1;
+       if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
+               return;
+       ring->write_tail(ring, ring->tail);
+}
+
 static int
 gen2_render_ring_flush(struct intel_ring_buffer *ring,
                       u32      invalidate_domains,
@@ -385,8 +395,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
        int ret = 0;
        u32 head;
 
-       if (HAS_FORCE_WAKE(dev))
-               gen6_gt_force_wake_get(dev_priv);
+       gen6_gt_force_wake_get(dev_priv);
 
        if (I915_NEED_GFX_HWS(dev))
                intel_ring_setup_status_page(ring);
@@ -459,8 +468,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
        memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
 
 out:
-       if (HAS_FORCE_WAKE(dev))
-               gen6_gt_force_wake_put(dev_priv);
+       gen6_gt_force_wake_put(dev_priv);
 
        return ret;
 }
@@ -559,8 +567,8 @@ static int init_render_ring(struct intel_ring_buffer *ring)
        if (INTEL_INFO(dev)->gen >= 6)
                I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
-       if (HAS_L3_GPU_CACHE(dev))
-               I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+       if (HAS_L3_DPF(dev))
+               I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
 
        return ret;
 }
@@ -593,7 +601,7 @@ update_mboxes(struct intel_ring_buffer *ring,
 #define MBOX_UPDATE_DWORDS 4
        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
        intel_ring_emit(ring, mmio_offset);
-       intel_ring_emit(ring, ring->outstanding_lazy_request);
+       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
        intel_ring_emit(ring, MI_NOOP);
 }
 
@@ -629,9 +637,9 @@ gen6_add_request(struct intel_ring_buffer *ring)
 
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring, ring->outstanding_lazy_request);
+       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
        intel_ring_emit(ring, MI_USER_INTERRUPT);
-       intel_ring_advance(ring);
+       __intel_ring_advance(ring);
 
        return 0;
 }
@@ -723,7 +731,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
                        PIPE_CONTROL_WRITE_FLUSH |
                        PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
        intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring, ring->outstanding_lazy_request);
+       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
        intel_ring_emit(ring, 0);
        PIPE_CONTROL_FLUSH(ring, scratch_addr);
        scratch_addr += 128; /* write to separate cachelines */
@@ -742,9 +750,9 @@ pc_render_add_request(struct intel_ring_buffer *ring)
                        PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
                        PIPE_CONTROL_NOTIFY);
        intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring, ring->outstanding_lazy_request);
+       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
        intel_ring_emit(ring, 0);
-       intel_ring_advance(ring);
+       __intel_ring_advance(ring);
 
        return 0;
 }
@@ -963,9 +971,9 @@ i9xx_add_request(struct intel_ring_buffer *ring)
 
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring, ring->outstanding_lazy_request);
+       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
        intel_ring_emit(ring, MI_USER_INTERRUPT);
-       intel_ring_advance(ring);
+       __intel_ring_advance(ring);
 
        return 0;
 }
@@ -987,10 +995,10 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
        if (ring->irq_refcount++ == 0) {
-               if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
+               if (HAS_L3_DPF(dev) && ring->id == RCS)
                        I915_WRITE_IMR(ring,
                                       ~(ring->irq_enable_mask |
-                                        GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
+                                        GT_PARITY_ERROR(dev)));
                else
                        I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
                ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
@@ -1009,9 +1017,8 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
 
        spin_lock_irqsave(&dev_priv->irq_lock, flags);
        if (--ring->irq_refcount == 0) {
-               if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
-                       I915_WRITE_IMR(ring,
-                                      ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+               if (HAS_L3_DPF(dev) && ring->id == RCS)
+                       I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
                else
                        I915_WRITE_IMR(ring, ~0);
                ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
@@ -1317,7 +1324,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
        /* Disable the ring buffer. The ring must be idle at this point */
        dev_priv = ring->dev->dev_private;
        ret = intel_ring_idle(ring);
-       if (ret)
+       if (ret && !i915_reset_in_progress(&dev_priv->gpu_error))
                DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
                          ring->name, ret);
 
@@ -1328,6 +1335,8 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
        i915_gem_object_unpin(ring->obj);
        drm_gem_object_unreference(&ring->obj->base);
        ring->obj = NULL;
+       ring->preallocated_lazy_request = NULL;
+       ring->outstanding_lazy_seqno = 0;
 
        if (ring->cleanup)
                ring->cleanup(ring);
@@ -1414,6 +1423,9 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
        if (ret != -ENOSPC)
                return ret;
 
+       /* force the tail write in case we have been skipping them */
+       __intel_ring_advance(ring);
+
        trace_i915_ring_wait_begin(ring);
        /* With GEM the hangcheck timer should kick us out of the loop,
         * leaving it early runs the risk of corrupting GEM state (due
@@ -1475,7 +1487,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
        int ret;
 
        /* We need to add any requests required to flush the objects and ring */
-       if (ring->outstanding_lazy_request) {
+       if (ring->outstanding_lazy_seqno) {
                ret = i915_add_request(ring, NULL);
                if (ret)
                        return ret;
@@ -1495,10 +1507,20 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
 static int
 intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
 {
-       if (ring->outstanding_lazy_request)
+       if (ring->outstanding_lazy_seqno)
                return 0;
 
-       return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
+       if (ring->preallocated_lazy_request == NULL) {
+               struct drm_i915_gem_request *request;
+
+               request = kmalloc(sizeof(*request), GFP_KERNEL);
+               if (request == NULL)
+                       return -ENOMEM;
+
+               ring->preallocated_lazy_request = request;
+       }
+
+       return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
 }
 
 static int __intel_ring_begin(struct intel_ring_buffer *ring,
@@ -1545,7 +1567,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
-       BUG_ON(ring->outstanding_lazy_request);
+       BUG_ON(ring->outstanding_lazy_seqno);
 
        if (INTEL_INFO(ring->dev)->gen >= 6) {
                I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
@@ -1558,17 +1580,6 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
        ring->hangcheck.seqno = seqno;
 }
 
-void intel_ring_advance(struct intel_ring_buffer *ring)
-{
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
-       ring->tail &= ring->size - 1;
-       if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
-               return;
-       ring->write_tail(ring, ring->tail);
-}
-
-
 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
                                     u32 value)
 {
index 68b1ca974d594dc483827fdc0dba724563a07f8d..71a73f4fe252fdb90b4c24d8947e936b37ad0f5b 100644 (file)
@@ -34,6 +34,7 @@ struct  intel_hw_status_page {
 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
 
 enum intel_ring_hangcheck_action {
+       HANGCHECK_IDLE = 0,
        HANGCHECK_WAIT,
        HANGCHECK_ACTIVE,
        HANGCHECK_KICK,
@@ -140,7 +141,8 @@ struct  intel_ring_buffer {
        /**
         * Do we have some not yet emitted requests outstanding?
         */
-       u32 outstanding_lazy_request;
+       struct drm_i915_gem_request *preallocated_lazy_request;
+       u32 outstanding_lazy_seqno;
        bool gpu_caches_dirty;
        bool fbc_dirty;
 
@@ -237,7 +239,12 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
        iowrite32(data, ring->virtual_start + ring->tail);
        ring->tail += 4;
 }
-void intel_ring_advance(struct intel_ring_buffer *ring);
+static inline void intel_ring_advance(struct intel_ring_buffer *ring)
+{
+       ring->tail &= ring->size - 1;
+}
+void __intel_ring_advance(struct intel_ring_buffer *ring);
+
 int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
 void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
 int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
@@ -258,8 +265,8 @@ static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
 
 static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
 {
-       BUG_ON(ring->outstanding_lazy_request == 0);
-       return ring->outstanding_lazy_request;
+       BUG_ON(ring->outstanding_lazy_seqno == 0);
+       return ring->outstanding_lazy_seqno;
 }
 
 static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
index 49482fd5b76c6cad80298d2d2f67c0050c73a93d..a583e8f718a7f0f4b87b4fe724336c73ad1a8198 100644 (file)
@@ -539,7 +539,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
                goto log_fail;
 
        while ((status == SDVO_CMD_STATUS_PENDING ||
-                       status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
+               status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
                if (retry < 10)
                        msleep(15);
                else
@@ -1068,7 +1068,7 @@ intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
 
 static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
 {
-       unsigned dotclock = pipe_config->adjusted_mode.clock;
+       unsigned dotclock = pipe_config->port_clock;
        struct dpll *clock = &pipe_config->dpll;
 
        /* SDVO TV has fixed PLL values depend on its clock range,
@@ -1133,7 +1133,6 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
         */
        pipe_config->pixel_multiplier =
                intel_sdvo_get_pixel_multiplier(adjusted_mode);
-       adjusted_mode->clock *= pipe_config->pixel_multiplier;
 
        if (intel_sdvo->color_range_auto) {
                /* See CEA-861-E - 5.1 Default Encoding Parameters */
@@ -1217,11 +1216,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
            !intel_sdvo_set_tv_format(intel_sdvo))
                return;
 
-       /* We have tried to get input timing in mode_fixup, and filled into
-        * adjusted_mode.
-        */
        intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
-       input_dtd.part1.clock /= crtc->config.pixel_multiplier;
 
        if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
                input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
@@ -1330,6 +1325,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
        struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
        struct intel_sdvo_dtd dtd;
        int encoder_pixel_multiplier = 0;
+       int dotclock;
        u32 flags = 0, sdvox;
        u8 val;
        bool ret;
@@ -1368,6 +1364,13 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
                         >> SDVO_PORT_MULTIPLY_SHIFT) + 1;
        }
 
+       dotclock = pipe_config->port_clock / pipe_config->pixel_multiplier;
+
+       if (HAS_PCH_SPLIT(dev))
+               ironlake_check_encoder_dotclock(pipe_config, dotclock);
+
+       pipe_config->adjusted_mode.crtc_clock = dotclock;
+
        /* Cross check the port pixel multiplier with the sdvo encoder state. */
        if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
                                 &val, 1)) {
@@ -1770,6 +1773,9 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
 {
        struct edid *edid;
 
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                     connector->base.id, drm_get_connector_name(connector));
+
        /* set the bus switch and get the modes */
        edid = intel_sdvo_get_edid(connector);
 
@@ -1865,6 +1871,9 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
        uint32_t reply = 0, format_map = 0;
        int i;
 
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                     connector->base.id, drm_get_connector_name(connector));
+
        /* Read the list of supported input resolutions for the selected TV
         * format.
         */
@@ -1899,6 +1908,9 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
        struct drm_i915_private *dev_priv = connector->dev->dev_private;
        struct drm_display_mode *newmode;
 
+       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+                     connector->base.id, drm_get_connector_name(connector));
+
        /*
         * Fetch modes from VBT. For SDVO prefer the VBT mode since some
         * SDVO->LVDS transcoders can't cope with the EDID mode.
@@ -1930,7 +1942,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
                        break;
                }
        }
-
 }
 
 static int intel_sdvo_get_modes(struct drm_connector *connector)
@@ -1998,7 +2009,6 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
                                     intel_sdvo_connector->tv_format);
 
        intel_sdvo_destroy_enhance_property(connector);
-       drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
        kfree(intel_sdvo_connector);
 }
@@ -2394,7 +2404,9 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
        struct intel_connector *intel_connector;
        struct intel_sdvo_connector *intel_sdvo_connector;
 
-       intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+       DRM_DEBUG_KMS("initialising DVI device %d\n", device);
+
+       intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
        if (!intel_sdvo_connector)
                return false;
 
@@ -2442,7 +2454,9 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
        struct intel_connector *intel_connector;
        struct intel_sdvo_connector *intel_sdvo_connector;
 
-       intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+       DRM_DEBUG_KMS("initialising TV type %d\n", type);
+
+       intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
        if (!intel_sdvo_connector)
                return false;
 
@@ -2467,6 +2481,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
        return true;
 
 err:
+       drm_sysfs_connector_remove(connector);
        intel_sdvo_destroy(connector);
        return false;
 }
@@ -2479,7 +2494,9 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
        struct intel_connector *intel_connector;
        struct intel_sdvo_connector *intel_sdvo_connector;
 
-       intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+       DRM_DEBUG_KMS("initialising analog device %d\n", device);
+
+       intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
        if (!intel_sdvo_connector)
                return false;
 
@@ -2510,7 +2527,9 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
        struct intel_connector *intel_connector;
        struct intel_sdvo_connector *intel_sdvo_connector;
 
-       intel_sdvo_connector = kzalloc(sizeof(struct intel_sdvo_connector), GFP_KERNEL);
+       DRM_DEBUG_KMS("initialising LVDS device %d\n", device);
+
+       intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
        if (!intel_sdvo_connector)
                return false;
 
@@ -2534,6 +2553,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
        return true;
 
 err:
+       drm_sysfs_connector_remove(connector);
        intel_sdvo_destroy(connector);
        return false;
 }
@@ -2605,8 +2625,10 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
 
        list_for_each_entry_safe(connector, tmp,
                                 &dev->mode_config.connector_list, head) {
-               if (intel_attached_encoder(connector) == &intel_sdvo->base)
+               if (intel_attached_encoder(connector) == &intel_sdvo->base) {
+                       drm_sysfs_connector_remove(connector);
                        intel_sdvo_destroy(connector);
+               }
        }
 }
 
@@ -2876,7 +2898,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
        struct intel_encoder *intel_encoder;
        struct intel_sdvo *intel_sdvo;
        int i;
-       intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
+       intel_sdvo = kzalloc(sizeof(*intel_sdvo), GFP_KERNEL);
        if (!intel_sdvo)
                return false;
 
index 9a0e6c5ea540544f10aa719a9c1fea6cbab82eb0..9944d8135e87f88215d5d07d54a5fd87fcf4dc60 100644 (file)
 #include "i915_drv.h"
 #include "intel_drv.h"
 
-/* IOSF sideband */
+/*
+ * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
+ * VLV_VLV2_PUNIT_HAS_0.8.docx
+ */
 static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
                           u32 port, u32 opcode, u32 addr, u32 *val)
 {
@@ -101,19 +104,83 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
        return val;
 }
 
-u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg)
+u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
 {
        u32 val = 0;
+       vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
+                       PUNIT_OPCODE_REG_READ, reg, &val);
+       return val;
+}
 
-       vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
-                       DPIO_OPCODE_REG_READ, reg, &val);
+void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+       vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
+                       PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
+u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+       u32 val = 0;
+       vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
+                       PUNIT_OPCODE_REG_READ, reg, &val);
+       return val;
+}
+
+void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+       vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
+                       PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
+u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+       u32 val = 0;
+       vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
+                       PUNIT_OPCODE_REG_READ, reg, &val);
+       return val;
+}
 
+void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+       vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
+                       PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
+u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+       u32 val = 0;
+       vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
+                       PUNIT_OPCODE_REG_READ, reg, &val);
+       return val;
+}
+
+void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+{
+       vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
+                       PUNIT_OPCODE_REG_WRITE, reg, &val);
+}
+
+static u32 vlv_get_phy_port(enum pipe pipe)
+{
+       u32 port = IOSF_PORT_DPIO;
+
+       WARN_ON ((pipe != PIPE_A) && (pipe != PIPE_B));
+
+       return port;
+}
+
+u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
+{
+       u32 val = 0;
+
+       vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
+                       DPIO_OPCODE_REG_READ, reg, &val);
        return val;
 }
 
-void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val)
+void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val)
 {
-       vlv_sideband_rw(dev_priv, DPIO_DEVFN, IOSF_PORT_DPIO,
+       vlv_sideband_rw(dev_priv, DPIO_DEVFN, vlv_get_phy_port(pipe),
                        DPIO_OPCODE_REG_WRITE, reg, &val);
 }
 
index ad6ec4b39005e8c6bfe6a41f65be48eaf4c7ebc6..8afaad6bcc4821981a2843a843d2d69c33151dd4 100644 (file)
@@ -288,7 +288,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                dev_priv->sprite_scaling_enabled |= 1 << pipe;
 
                if (!scaling_was_enabled) {
-                       intel_update_watermarks(dev);
+                       intel_update_watermarks(crtc);
                        intel_wait_for_vblank(dev, pipe);
                }
                sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
@@ -323,7 +323,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
 
        /* potentially re-enable LP watermarks */
        if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
-               intel_update_watermarks(dev);
+               intel_update_watermarks(crtc);
 }
 
 static void
@@ -349,7 +349,7 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
 
        /* potentially re-enable LP watermarks */
        if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
-               intel_update_watermarks(dev);
+               intel_update_watermarks(crtc);
 }
 
 static int
@@ -521,13 +521,28 @@ intel_enable_primary(struct drm_crtc *crtc)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int reg = DSPCNTR(intel_crtc->plane);
 
-       if (!intel_crtc->primary_disabled)
+       if (intel_crtc->primary_enabled)
                return;
 
-       intel_crtc->primary_disabled = false;
-       intel_update_fbc(dev);
+       intel_crtc->primary_enabled = true;
 
        I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
+       intel_flush_primary_plane(dev_priv, intel_crtc->plane);
+
+       /*
+        * FIXME IPS should be fine as long as one plane is
+        * enabled, but in practice it seems to have problems
+        * when going from primary only to sprite only and vice
+        * versa.
+        */
+       if (intel_crtc->config.ips_enabled) {
+               intel_wait_for_vblank(dev, intel_crtc->pipe);
+               hsw_enable_ips(intel_crtc);
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       intel_update_fbc(dev);
+       mutex_unlock(&dev->struct_mutex);
 }
 
 static void
@@ -538,13 +553,26 @@ intel_disable_primary(struct drm_crtc *crtc)
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int reg = DSPCNTR(intel_crtc->plane);
 
-       if (intel_crtc->primary_disabled)
+       if (!intel_crtc->primary_enabled)
                return;
 
-       I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+       intel_crtc->primary_enabled = false;
 
-       intel_crtc->primary_disabled = true;
-       intel_update_fbc(dev);
+       mutex_lock(&dev->struct_mutex);
+       if (dev_priv->fbc.plane == intel_crtc->plane)
+               intel_disable_fbc(dev);
+       mutex_unlock(&dev->struct_mutex);
+
+       /*
+        * FIXME IPS should be fine as long as one plane is
+        * enabled, but in practice it seems to have problems
+        * when going from primary only to sprite only and vice
+        * versa.
+        */
+       hsw_disable_ips(intel_crtc);
+
+       I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+       intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 }
 
 static int
@@ -623,15 +651,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                   uint32_t src_w, uint32_t src_h)
 {
        struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_plane *intel_plane = to_intel_plane(plane);
-       struct intel_framebuffer *intel_fb;
-       struct drm_i915_gem_object *obj, *old_obj;
-       int pipe = intel_plane->pipe;
-       enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
-                                                                     pipe);
-       int ret = 0;
+       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+       struct drm_i915_gem_object *obj = intel_fb->obj;
+       struct drm_i915_gem_object *old_obj = intel_plane->obj;
+       int ret;
        bool disable_primary = false;
        bool visible;
        int hscale, vscale;
@@ -652,29 +677,23 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                .y2 = crtc_y + crtc_h,
        };
        const struct drm_rect clip = {
-               .x2 = crtc->mode.hdisplay,
-               .y2 = crtc->mode.vdisplay,
+               .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
+               .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
+       };
+       const struct {
+               int crtc_x, crtc_y;
+               unsigned int crtc_w, crtc_h;
+               uint32_t src_x, src_y, src_w, src_h;
+       } orig = {
+               .crtc_x = crtc_x,
+               .crtc_y = crtc_y,
+               .crtc_w = crtc_w,
+               .crtc_h = crtc_h,
+               .src_x = src_x,
+               .src_y = src_y,
+               .src_w = src_w,
+               .src_h = src_h,
        };
-
-       intel_fb = to_intel_framebuffer(fb);
-       obj = intel_fb->obj;
-
-       old_obj = intel_plane->obj;
-
-       intel_plane->crtc_x = crtc_x;
-       intel_plane->crtc_y = crtc_y;
-       intel_plane->crtc_w = crtc_w;
-       intel_plane->crtc_h = crtc_h;
-       intel_plane->src_x = src_x;
-       intel_plane->src_y = src_y;
-       intel_plane->src_w = src_w;
-       intel_plane->src_h = src_h;
-
-       /* Pipe must be running... */
-       if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE)) {
-               DRM_DEBUG_KMS("Pipe disabled\n");
-               return -EINVAL;
-       }
 
        /* Don't modify another pipe's plane */
        if (intel_plane->pipe != intel_crtc->pipe) {
@@ -810,7 +829,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
         * we can disable the primary and save power.
         */
        disable_primary = drm_rect_equals(&dst, &clip);
-       WARN_ON(disable_primary && !visible);
+       WARN_ON(disable_primary && !visible && intel_crtc->active);
 
        mutex_lock(&dev->struct_mutex);
 
@@ -820,27 +839,40 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
         * the sprite planes only require 128KiB alignment and 32 PTE padding.
         */
        ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
-       if (ret)
-               goto out_unlock;
 
-       intel_plane->obj = obj;
-
-       /*
-        * Be sure to re-enable the primary before the sprite is no longer
-        * covering it fully.
-        */
-       if (!disable_primary)
-               intel_enable_primary(crtc);
+       mutex_unlock(&dev->struct_mutex);
 
-       if (visible)
-               intel_plane->update_plane(plane, crtc, fb, obj,
-                                         crtc_x, crtc_y, crtc_w, crtc_h,
-                                         src_x, src_y, src_w, src_h);
-       else
-               intel_plane->disable_plane(plane, crtc);
+       if (ret)
+               return ret;
+
+       intel_plane->crtc_x = orig.crtc_x;
+       intel_plane->crtc_y = orig.crtc_y;
+       intel_plane->crtc_w = orig.crtc_w;
+       intel_plane->crtc_h = orig.crtc_h;
+       intel_plane->src_x = orig.src_x;
+       intel_plane->src_y = orig.src_y;
+       intel_plane->src_w = orig.src_w;
+       intel_plane->src_h = orig.src_h;
+       intel_plane->obj = obj;
 
-       if (disable_primary)
-               intel_disable_primary(crtc);
+       if (intel_crtc->active) {
+               /*
+                * Be sure to re-enable the primary before the sprite is no longer
+                * covering it fully.
+                */
+               if (!disable_primary)
+                       intel_enable_primary(crtc);
+
+               if (visible)
+                       intel_plane->update_plane(plane, crtc, fb, obj,
+                                                 crtc_x, crtc_y, crtc_w, crtc_h,
+                                                 src_x, src_y, src_w, src_h);
+               else
+                       intel_plane->disable_plane(plane, crtc);
+
+               if (disable_primary)
+                       intel_disable_primary(crtc);
+       }
 
        /* Unpin old obj after new one is active to avoid ugliness */
        if (old_obj) {
@@ -850,17 +882,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
                 * wait for vblank to avoid ugliness, we only need to
                 * do the pin & ref bookkeeping.
                 */
-               if (old_obj != obj) {
-                       mutex_unlock(&dev->struct_mutex);
-                       intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
-                       mutex_lock(&dev->struct_mutex);
-               }
+               if (old_obj != obj && intel_crtc->active)
+                       intel_wait_for_vblank(dev, intel_crtc->pipe);
+
+               mutex_lock(&dev->struct_mutex);
                intel_unpin_fb_obj(old_obj);
+               mutex_unlock(&dev->struct_mutex);
        }
 
-out_unlock:
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
+       return 0;
 }
 
 static int
@@ -868,7 +898,7 @@ intel_disable_plane(struct drm_plane *plane)
 {
        struct drm_device *dev = plane->dev;
        struct intel_plane *intel_plane = to_intel_plane(plane);
-       int ret = 0;
+       struct intel_crtc *intel_crtc;
 
        if (!plane->fb)
                return 0;
@@ -876,21 +906,25 @@ intel_disable_plane(struct drm_plane *plane)
        if (WARN_ON(!plane->crtc))
                return -EINVAL;
 
-       intel_enable_primary(plane->crtc);
-       intel_plane->disable_plane(plane, plane->crtc);
+       intel_crtc = to_intel_crtc(plane->crtc);
 
-       if (!intel_plane->obj)
-               goto out;
+       if (intel_crtc->active) {
+               intel_enable_primary(plane->crtc);
+               intel_plane->disable_plane(plane, plane->crtc);
+       }
 
-       intel_wait_for_vblank(dev, intel_plane->pipe);
+       if (intel_plane->obj) {
+               if (intel_crtc->active)
+                       intel_wait_for_vblank(dev, intel_plane->pipe);
 
-       mutex_lock(&dev->struct_mutex);
-       intel_unpin_fb_obj(intel_plane->obj);
-       intel_plane->obj = NULL;
-       mutex_unlock(&dev->struct_mutex);
-out:
+               mutex_lock(&dev->struct_mutex);
+               intel_unpin_fb_obj(intel_plane->obj);
+               mutex_unlock(&dev->struct_mutex);
 
-       return ret;
+               intel_plane->obj = NULL;
+       }
+
+       return 0;
 }
 
 static void intel_destroy_plane(struct drm_plane *plane)
@@ -1034,7 +1068,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
        if (INTEL_INFO(dev)->gen < 5)
                return -ENODEV;
 
-       intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
+       intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
        if (!intel_plane)
                return -ENOMEM;
 
index dd6f84bf6c220e94c7f50e1d38ca7fea9f11cecb..18c406246a2d4acd352017a730eebe7842ba8eb8 100644 (file)
@@ -912,7 +912,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
        if (!tv_mode)
                return false;
 
-       pipe_config->adjusted_mode.clock = tv_mode->clock;
+       pipe_config->adjusted_mode.crtc_clock = tv_mode->clock;
        DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
        pipe_config->pipe_bpp = 8*3;
 
@@ -1044,7 +1044,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
                tv_mode->dda3_inc << TV_SCDDA3_INC_SHIFT;
 
        /* Enable two fixes for the chips that need them. */
-       if (dev->pci_device < 0x2772)
+       if (dev->pdev->device < 0x2772)
                tv_ctl |= TV_ENC_C0_FIX | TV_ENC_SDP_FIX;
 
        I915_WRITE(TV_H_CTL_1, hctl1);
@@ -1094,7 +1094,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
                unsigned int xsize, ysize;
                /* Pipe must be off here */
                I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
-               intel_flush_display_plane(dev_priv, intel_crtc->plane);
+               intel_flush_primary_plane(dev_priv, intel_crtc->plane);
 
                /* Wait for vblank for the disable to take effect */
                if (IS_GEN2(dev))
@@ -1123,7 +1123,7 @@ static void intel_tv_mode_set(struct intel_encoder *encoder)
 
                I915_WRITE(pipeconf_reg, pipeconf);
                I915_WRITE(dspcntr_reg, dspcntr);
-               intel_flush_display_plane(dev_priv, intel_crtc->plane);
+               intel_flush_primary_plane(dev_priv, intel_crtc->plane);
        }
 
        j = 0;
@@ -1433,7 +1433,6 @@ intel_tv_get_modes(struct drm_connector *connector)
 static void
 intel_tv_destroy(struct drm_connector *connector)
 {
-       drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
        kfree(connector);
 }
@@ -1518,7 +1517,7 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
 static int tv_is_present_in_vbt(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct child_device_config *p_child;
+       union child_device_config *p_child;
        int i, ret;
 
        if (!dev_priv->vbt.child_dev_num)
@@ -1530,13 +1529,13 @@ static int tv_is_present_in_vbt(struct drm_device *dev)
                /*
                 * If the device type is not TV, continue.
                 */
-               if (p_child->device_type != DEVICE_TYPE_INT_TV &&
-                       p_child->device_type != DEVICE_TYPE_TV)
+               if (p_child->old.device_type != DEVICE_TYPE_INT_TV &&
+                       p_child->old.device_type != DEVICE_TYPE_TV)
                        continue;
                /* Only when the addin_offset is non-zero, it is regarded
                 * as present.
                 */
-               if (p_child->addin_offset) {
+               if (p_child->old.addin_offset) {
                        ret = 1;
                        break;
                }
@@ -1590,12 +1589,12 @@ intel_tv_init(struct drm_device *dev)
            (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
                return;
 
-       intel_tv = kzalloc(sizeof(struct intel_tv), GFP_KERNEL);
+       intel_tv = kzalloc(sizeof(*intel_tv), GFP_KERNEL);
        if (!intel_tv) {
                return;
        }
 
-       intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
+       intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
        if (!intel_connector) {
                kfree(intel_tv);
                return;
index 8649f1c36b007f89ea5b2bbf8bf3bb26090d9f52..f6fae35c568e53036faf150f4791e950c5bfaf3f 100644 (file)
@@ -204,60 +204,34 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
        gen6_gt_check_fifodbg(dev_priv);
 }
 
-void intel_uncore_early_sanitize(struct drm_device *dev)
+static void gen6_force_wake_work(struct work_struct *work)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
+       unsigned long irqflags;
 
-       if (HAS_FPGA_DBG_UNCLAIMED(dev))
-               __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+       if (--dev_priv->uncore.forcewake_count == 0)
+               dev_priv->uncore.funcs.force_wake_put(dev_priv);
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
-void intel_uncore_init(struct drm_device *dev)
+void intel_uncore_early_sanitize(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (IS_VALLEYVIEW(dev)) {
-               dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
-               dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
-       } else if (IS_HASWELL(dev)) {
-               dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
-               dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
-       } else if (IS_IVYBRIDGE(dev)) {
-               u32 ecobus;
-
-               /* IVB configs may use multi-threaded forcewake */
-
-               /* A small trick here - if the bios hasn't configured
-                * MT forcewake, and if the device is in RC6, then
-                * force_wake_mt_get will not wake the device and the
-                * ECOBUS read will return zero. Which will be
-                * (correctly) interpreted by the test below as MT
-                * forcewake being disabled.
-                */
-               mutex_lock(&dev->struct_mutex);
-               __gen6_gt_force_wake_mt_get(dev_priv);
-               ecobus = __raw_i915_read32(dev_priv, ECOBUS);
-               __gen6_gt_force_wake_mt_put(dev_priv);
-               mutex_unlock(&dev->struct_mutex);
+       if (HAS_FPGA_DBG_UNCLAIMED(dev))
+               __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
 
-               if (ecobus & FORCEWAKE_MT_ENABLE) {
-                       dev_priv->uncore.funcs.force_wake_get =
-                               __gen6_gt_force_wake_mt_get;
-                       dev_priv->uncore.funcs.force_wake_put =
-                               __gen6_gt_force_wake_mt_put;
-               } else {
-                       DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
-                       DRM_INFO("when using vblank-synced partial screen updates.\n");
-                       dev_priv->uncore.funcs.force_wake_get =
-                               __gen6_gt_force_wake_get;
-                       dev_priv->uncore.funcs.force_wake_put =
-                               __gen6_gt_force_wake_put;
-               }
-       } else if (IS_GEN6(dev)) {
-               dev_priv->uncore.funcs.force_wake_get =
-                       __gen6_gt_force_wake_get;
-               dev_priv->uncore.funcs.force_wake_put =
-                       __gen6_gt_force_wake_put;
+       if (IS_HASWELL(dev) &&
+           (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
+               /* The docs do not explain exactly how the calculation can be
+                * made. It is somewhat guessable, but for now, it's always
+                * 128MB.
+                * NB: We can't write IDICR yet because we do not have gt funcs
+                * set up */
+               dev_priv->ellc_size = 128;
+               DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
        }
 }
 
@@ -276,10 +250,26 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev)
 
 void intel_uncore_sanitize(struct drm_device *dev)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 reg_val;
+
        intel_uncore_forcewake_reset(dev);
 
        /* BIOS often leaves RC6 enabled, but disable it for hw init */
        intel_disable_gt_powersave(dev);
+
+       /* Turn off power gate, require especially for the BIOS less system */
+       if (IS_VALLEYVIEW(dev)) {
+
+               mutex_lock(&dev_priv->rps.hw_lock);
+               reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
+
+               if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
+                       vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
+
+               mutex_unlock(&dev_priv->rps.hw_lock);
+
+       }
 }
 
 /*
@@ -292,6 +282,9 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 {
        unsigned long irqflags;
 
+       if (!dev_priv->uncore.funcs.force_wake_get)
+               return;
+
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
        if (dev_priv->uncore.forcewake_count++ == 0)
                dev_priv->uncore.funcs.force_wake_get(dev_priv);
@@ -305,17 +298,22 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 {
        unsigned long irqflags;
 
+       if (!dev_priv->uncore.funcs.force_wake_put)
+               return;
+
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-       if (--dev_priv->uncore.forcewake_count == 0)
-               dev_priv->uncore.funcs.force_wake_put(dev_priv);
+       if (--dev_priv->uncore.forcewake_count == 0) {
+               dev_priv->uncore.forcewake_count++;
+               mod_delayed_work(dev_priv->wq,
+                                &dev_priv->uncore.force_wake_work,
+                                1);
+       }
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
 }
 
 /* We give fast paths for the really cool registers */
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
-       ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
-        ((reg) < 0x40000) &&            \
-        ((reg) != FORCEWAKE))
+        ((reg) < 0x40000 && (reg) != FORCEWAKE)
 
 static void
 ilk_dummy_write(struct drm_i915_private *dev_priv)
@@ -329,8 +327,7 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
 static void
 hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
 {
-       if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
-           (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+       if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
                DRM_ERROR("Unknown unclaimed register before writing to %x\n",
                          reg);
                __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
@@ -340,20 +337,43 @@ hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
 static void
 hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
 {
-       if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
-           (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+       if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
                DRM_ERROR("Unclaimed write to %x\n", reg);
                __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
        }
 }
 
-#define __i915_read(x) \
-u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
+#define REG_READ_HEADER(x) \
        unsigned long irqflags; \
        u##x val = 0; \
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
-       if (dev_priv->info->gen == 5) \
-               ilk_dummy_write(dev_priv); \
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+
+#define REG_READ_FOOTER \
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+       trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
+       return val
+
+#define __gen4_read(x) \
+static u##x \
+gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+       REG_READ_HEADER(x); \
+       val = __raw_i915_read##x(dev_priv, reg); \
+       REG_READ_FOOTER; \
+}
+
+#define __gen5_read(x) \
+static u##x \
+gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+       REG_READ_HEADER(x); \
+       ilk_dummy_write(dev_priv); \
+       val = __raw_i915_read##x(dev_priv, reg); \
+       REG_READ_FOOTER; \
+}
+
+#define __gen6_read(x) \
+static u##x \
+gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
+       REG_READ_HEADER(x); \
        if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
                if (dev_priv->uncore.forcewake_count == 0) \
                        dev_priv->uncore.funcs.force_wake_get(dev_priv); \
@@ -363,28 +383,73 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
        } else { \
                val = __raw_i915_read##x(dev_priv, reg); \
        } \
+       REG_READ_FOOTER; \
+}
+
+__gen6_read(8)
+__gen6_read(16)
+__gen6_read(32)
+__gen6_read(64)
+__gen5_read(8)
+__gen5_read(16)
+__gen5_read(32)
+__gen5_read(64)
+__gen4_read(8)
+__gen4_read(16)
+__gen4_read(32)
+__gen4_read(64)
+
+#undef __gen6_read
+#undef __gen5_read
+#undef __gen4_read
+#undef REG_READ_FOOTER
+#undef REG_READ_HEADER
+
+#define REG_WRITE_HEADER \
+       unsigned long irqflags; \
+       trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+
+#define __gen4_write(x) \
+static void \
+gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+       REG_WRITE_HEADER; \
+       __raw_i915_write##x(dev_priv, reg, val); \
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
-       trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
-       return val; \
 }
 
-__i915_read(8)
-__i915_read(16)
-__i915_read(32)
-__i915_read(64)
-#undef __i915_read
+#define __gen5_write(x) \
+static void \
+gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+       REG_WRITE_HEADER; \
+       ilk_dummy_write(dev_priv); \
+       __raw_i915_write##x(dev_priv, reg, val); \
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+}
 
-#define __i915_write(x) \
-void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \
-       unsigned long irqflags; \
+#define __gen6_write(x) \
+static void \
+gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
        u32 __fifo_ret = 0; \
-       trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+       REG_WRITE_HEADER; \
+       if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+               __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
+       } \
+       __raw_i915_write##x(dev_priv, reg, val); \
+       if (unlikely(__fifo_ret)) { \
+               gen6_gt_check_fifodbg(dev_priv); \
+       } \
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+}
+
+#define __hsw_write(x) \
+static void \
+hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
+       u32 __fifo_ret = 0; \
+       REG_WRITE_HEADER; \
        if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
                __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
        } \
-       if (dev_priv->info->gen == 5) \
-               ilk_dummy_write(dev_priv); \
        hsw_unclaimed_reg_clear(dev_priv, reg); \
        __raw_i915_write##x(dev_priv, reg, val); \
        if (unlikely(__fifo_ret)) { \
@@ -393,11 +458,134 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool tr
        hsw_unclaimed_reg_check(dev_priv, reg); \
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
 }
-__i915_write(8)
-__i915_write(16)
-__i915_write(32)
-__i915_write(64)
-#undef __i915_write
+
+__hsw_write(8)
+__hsw_write(16)
+__hsw_write(32)
+__hsw_write(64)
+__gen6_write(8)
+__gen6_write(16)
+__gen6_write(32)
+__gen6_write(64)
+__gen5_write(8)
+__gen5_write(16)
+__gen5_write(32)
+__gen5_write(64)
+__gen4_write(8)
+__gen4_write(16)
+__gen4_write(32)
+__gen4_write(64)
+
+#undef __hsw_write
+#undef __gen6_write
+#undef __gen5_write
+#undef __gen4_write
+#undef REG_WRITE_HEADER
+
+void intel_uncore_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
+                         gen6_force_wake_work);
+
+       if (IS_VALLEYVIEW(dev)) {
+               dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
+               dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
+       } else if (IS_HASWELL(dev)) {
+               dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
+               dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
+       } else if (IS_IVYBRIDGE(dev)) {
+               u32 ecobus;
+
+               /* IVB configs may use multi-threaded forcewake */
+
+               /* A small trick here - if the bios hasn't configured
+                * MT forcewake, and if the device is in RC6, then
+                * force_wake_mt_get will not wake the device and the
+                * ECOBUS read will return zero. Which will be
+                * (correctly) interpreted by the test below as MT
+                * forcewake being disabled.
+                */
+               mutex_lock(&dev->struct_mutex);
+               __gen6_gt_force_wake_mt_get(dev_priv);
+               ecobus = __raw_i915_read32(dev_priv, ECOBUS);
+               __gen6_gt_force_wake_mt_put(dev_priv);
+               mutex_unlock(&dev->struct_mutex);
+
+               if (ecobus & FORCEWAKE_MT_ENABLE) {
+                       dev_priv->uncore.funcs.force_wake_get =
+                               __gen6_gt_force_wake_mt_get;
+                       dev_priv->uncore.funcs.force_wake_put =
+                               __gen6_gt_force_wake_mt_put;
+               } else {
+                       DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
+                       DRM_INFO("when using vblank-synced partial screen updates.\n");
+                       dev_priv->uncore.funcs.force_wake_get =
+                               __gen6_gt_force_wake_get;
+                       dev_priv->uncore.funcs.force_wake_put =
+                               __gen6_gt_force_wake_put;
+               }
+       } else if (IS_GEN6(dev)) {
+               dev_priv->uncore.funcs.force_wake_get =
+                       __gen6_gt_force_wake_get;
+               dev_priv->uncore.funcs.force_wake_put =
+                       __gen6_gt_force_wake_put;
+       }
+
+       switch (INTEL_INFO(dev)->gen) {
+       case 7:
+       case 6:
+               if (IS_HASWELL(dev)) {
+                       dev_priv->uncore.funcs.mmio_writeb  = hsw_write8;
+                       dev_priv->uncore.funcs.mmio_writew  = hsw_write16;
+                       dev_priv->uncore.funcs.mmio_writel  = hsw_write32;
+                       dev_priv->uncore.funcs.mmio_writeq  = hsw_write64;
+               } else {
+                       dev_priv->uncore.funcs.mmio_writeb  = gen6_write8;
+                       dev_priv->uncore.funcs.mmio_writew  = gen6_write16;
+                       dev_priv->uncore.funcs.mmio_writel  = gen6_write32;
+                       dev_priv->uncore.funcs.mmio_writeq  = gen6_write64;
+               }
+               dev_priv->uncore.funcs.mmio_readb  = gen6_read8;
+               dev_priv->uncore.funcs.mmio_readw  = gen6_read16;
+               dev_priv->uncore.funcs.mmio_readl  = gen6_read32;
+               dev_priv->uncore.funcs.mmio_readq  = gen6_read64;
+               break;
+       case 5:
+               dev_priv->uncore.funcs.mmio_writeb  = gen5_write8;
+               dev_priv->uncore.funcs.mmio_writew  = gen5_write16;
+               dev_priv->uncore.funcs.mmio_writel  = gen5_write32;
+               dev_priv->uncore.funcs.mmio_writeq  = gen5_write64;
+               dev_priv->uncore.funcs.mmio_readb  = gen5_read8;
+               dev_priv->uncore.funcs.mmio_readw  = gen5_read16;
+               dev_priv->uncore.funcs.mmio_readl  = gen5_read32;
+               dev_priv->uncore.funcs.mmio_readq  = gen5_read64;
+               break;
+       case 4:
+       case 3:
+       case 2:
+               dev_priv->uncore.funcs.mmio_writeb  = gen4_write8;
+               dev_priv->uncore.funcs.mmio_writew  = gen4_write16;
+               dev_priv->uncore.funcs.mmio_writel  = gen4_write32;
+               dev_priv->uncore.funcs.mmio_writeq  = gen4_write64;
+               dev_priv->uncore.funcs.mmio_readb  = gen4_read8;
+               dev_priv->uncore.funcs.mmio_readw  = gen4_read16;
+               dev_priv->uncore.funcs.mmio_readl  = gen4_read32;
+               dev_priv->uncore.funcs.mmio_readq  = gen4_read64;
+               break;
+       }
+}
+
+void intel_uncore_fini(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       flush_delayed_work(&dev_priv->uncore.force_wake_work);
+
+       /* Paranoia: make sure we have disabled everything before we exit. */
+       intel_uncore_sanitize(dev);
+}
 
 static const struct register_whitelist {
        uint64_t offset;
@@ -445,36 +633,6 @@ int i915_reg_read_ioctl(struct drm_device *dev,
        return 0;
 }
 
-static int i8xx_do_reset(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       if (IS_I85X(dev))
-               return -ENODEV;
-
-       I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
-       POSTING_READ(D_STATE);
-
-       if (IS_I830(dev) || IS_845G(dev)) {
-               I915_WRITE(DEBUG_RESET_I830,
-                          DEBUG_RESET_DISPLAY |
-                          DEBUG_RESET_RENDER |
-                          DEBUG_RESET_FULL);
-               POSTING_READ(DEBUG_RESET_I830);
-               msleep(1);
-
-               I915_WRITE(DEBUG_RESET_I830, 0);
-               POSTING_READ(DEBUG_RESET_I830);
-       }
-
-       msleep(1);
-
-       I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
-       POSTING_READ(D_STATE);
-
-       return 0;
-}
-
 static int i965_reset_complete(struct drm_device *dev)
 {
        u8 gdrst;
@@ -576,7 +734,6 @@ int intel_gpu_reset(struct drm_device *dev)
        case 6: return gen6_do_reset(dev);
        case 5: return ironlake_do_reset(dev);
        case 4: return i965_do_reset(dev);
-       case 2: return i8xx_do_reset(dev);
        default: return -ENODEV;
        }
 }
index cc3166dd445ac8f8371752a2ab3f1b13bf4811cd..087db33f6cff19f8cdaa32f390c6c4dd52a8c79d 100644 (file)
@@ -406,11 +406,6 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
        dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
        dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
 
-       dev->counters += 3;
-       dev->types[6] = _DRM_STAT_IRQ;
-       dev->types[7] = _DRM_STAT_PRIMARY;
-       dev->types[8] = _DRM_STAT_SECONDARY;
-
        ret = drm_vblank_init(dev, 1);
 
        if (ret) {
index 598c281def0a34ef1e50a04af7dc9f06b54b04c0..2b0ceb8dc11b8dce6894cf056998714603087a3d 100644 (file)
@@ -169,5 +169,5 @@ void mga_driver_irq_uninstall(struct drm_device *dev)
        /* Disable *all* interrupts */
        MGA_WRITE(MGA_IEN, 0);
 
-       dev->irq_enabled = 0;
+       dev->irq_enabled = false;
 }
index b487cdec5ee792b6d00fb2a086202426c6a94f99..3a1c5fbae54a5486252e91dd4ae1c0f9fdf95a59 100644 (file)
@@ -5,6 +5,7 @@ config DRM_MGAG200
        select FB_SYS_COPYAREA
        select FB_SYS_IMAGEBLIT
        select DRM_KMS_HELPER
+       select DRM_KMS_FB_HELPER
        select DRM_TTM
        help
         This is a KMS driver for the MGA G200 server chips, it
index fcce7b2f80110d2c9a2907b02210edc141f1aab9..f15ea3c4a90af16db633d9082d57f2344fb4565a 100644 (file)
@@ -99,7 +99,6 @@ static struct drm_driver driver = {
        .minor = DRIVER_MINOR,
        .patchlevel = DRIVER_PATCHLEVEL,
 
-       .gem_init_object = mgag200_gem_init_object,
        .gem_free_object = mgag200_gem_free_object,
        .dumb_create = mgag200_dumb_create,
        .dumb_map_offset = mgag200_dumb_mmap_offset,
index baaae19332e2e9d98d3cda327d44b8f7c5d5ecb4..cf11ee68a6d92bf8e982e3063d86befcaf2ced6d 100644 (file)
@@ -260,7 +260,6 @@ int mgag200_driver_unload(struct drm_device *dev);
 int mgag200_gem_create(struct drm_device *dev,
                   u32 size, bool iskernel,
                       struct drm_gem_object **obj);
-int mgag200_gem_init_object(struct drm_gem_object *obj);
 int mgag200_dumb_create(struct drm_file *file,
                        struct drm_device *dev,
                        struct drm_mode_create_dumb *args);
index 0f8b861b10b3e6749704ea6a82cd57457602bfc6..b1120cb1db6d76b76fd38afd3279a3684e6b188b 100644 (file)
@@ -310,12 +310,6 @@ int mgag200_dumb_create(struct drm_file *file,
        return 0;
 }
 
-int mgag200_gem_init_object(struct drm_gem_object *obj)
-{
-       BUG();
-       return 0;
-}
-
 void mgag200_bo_unref(struct mgag200_bo **bo)
 {
        struct ttm_buffer_object *tbo;
index a06c19cc56f8988f7399641b9d9048308773f875..f39ab7554fc992175630831acf28bbf223cd9f50 100644 (file)
@@ -14,6 +14,7 @@ config DRM_MSM
 config DRM_MSM_FBDEV
        bool "Enable legacy fbdev support for MSM modesetting driver"
        depends on DRM_MSM
+       select DRM_KMS_FB_HELPER
        select FB_SYS_FILLRECT
        select FB_SYS_COPYAREA
        select FB_SYS_IMAGEBLIT
index ff80f12480ea2223ec85583cd47b01f4064a2fd8..7cf787d697b111aade40bf0cb34a1db52a2c41a6 100644 (file)
@@ -3,6 +3,7 @@ config DRM_NOUVEAU
        depends on DRM && PCI
         select FW_LOADER
        select DRM_KMS_HELPER
+       select DRM_KMS_FB_HELPER
        select DRM_TTM
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
index 37712a6df92358e2583b87a53df4a04cd25ba290..e290cfa4acee09bd8f0a1a2b54462e38abf0bcec 100644 (file)
@@ -113,7 +113,7 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
                pmc->use_msi = false;
                break;
        default:
-               pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", true);
+               pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", false);
                if (pmc->use_msi) {
                        pmc->use_msi = pci_enable_msi(device->pdev) == 0;
                        if (pmc->use_msi) {
index 8b3adec5fbb19e8a34cdb53fa83420f7b57f21b2..eae939d3fc1a262b4d0a7277e6d2d242eed99a6b 100644 (file)
@@ -41,7 +41,8 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
        if (!client)
                return false;
 
-       if (!client->driver || client->driver->detect(client, info)) {
+       if (!client->dev.driver ||
+           to_i2c_driver(client->dev.driver)->detect(client, info)) {
                i2c_unregister_device(client);
                return false;
        }
index 2e70462883e89efe2be217c37f85d1754d5426a3..2a15b98b4d2b826e7fa45ddb7c4611d940206bd2 100644 (file)
@@ -210,8 +210,8 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
        sim_data.nvclk_khz = NVClk;
        sim_data.bpp = bpp;
        sim_data.two_heads = nv_two_heads(dev);
-       if ((dev->pci_device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
-           (dev->pci_device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
+       if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
+           (dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
                uint32_t type;
 
                pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type);
@@ -256,8 +256,8 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
 
        if (nv_device(drm->device)->card_type < NV_20)
                nv04_update_arb(dev, vclk, bpp, burst, lwm);
-       else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
-                (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
+       else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
+                (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
                *burst = 128;
                *lwm = 0x0480;
        } else
index d4fbf11360febee34d2774c08233944c6b546fc4..0e3270c3ffd2296caad7cdbcbaadf9fdc7044be8 100644 (file)
@@ -326,8 +326,6 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
                        regp->MiscOutReg = 0x23;        /* +hsync +vsync */
        }
 
-       regp->MiscOutReg |= (mode->clock_index & 0x03) << 2;
-
        /*
         * Time Sequencer
         */
index 93dd23ff00931c473af05dd8280aa6e2fb40369e..59d1c040b84f69c8d9513a20ebb9081267ee432e 100644 (file)
@@ -490,8 +490,8 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
        /* BIOS scripts usually take care of the backlight, thanks
         * Apple for your consistency.
         */
-       if (dev->pci_device == 0x0174 || dev->pci_device == 0x0179 ||
-           dev->pci_device == 0x0189 || dev->pci_device == 0x0329) {
+       if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 ||
+           dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) {
                if (mode == DRM_MODE_DPMS_ON) {
                        nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
                        nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
index 9928187f0a7d0bebdf3c80f8aec00809b294546e..2cf65e0b517e881ca15d2ed0c5b307ca346f606f 100644 (file)
@@ -127,7 +127,7 @@ static inline bool
 nv_two_heads(struct drm_device *dev)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
-       const int impl = dev->pci_device & 0x0ff0;
+       const int impl = dev->pdev->device & 0x0ff0;
 
        if (nv_device(drm->device)->card_type >= NV_10 && impl != 0x0100 &&
            impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
@@ -139,14 +139,14 @@ nv_two_heads(struct drm_device *dev)
 static inline bool
 nv_gf4_disp_arch(struct drm_device *dev)
 {
-       return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110;
+       return nv_two_heads(dev) && (dev->pdev->device & 0x0ff0) != 0x0110;
 }
 
 static inline bool
 nv_two_reg_pll(struct drm_device *dev)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
-       const int impl = dev->pci_device & 0x0ff0;
+       const int impl = dev->pdev->device & 0x0ff0;
 
        if (impl == 0x0310 || impl == 0x0340 || nv_device(drm->device)->card_type >= NV_40)
                return true;
index 973056b86207d89f191e8e37c872d7f7d66ca9b2..f8dee834527fb621c107f9e01d6a7ec546535332 100644 (file)
@@ -220,7 +220,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
        int ret;
 
        if (plltype == PLL_MEMORY &&
-           (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
+           (dev->pdev->device & 0x0ff0) == CHIPSET_NFORCE) {
                uint32_t mpllP;
 
                pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
@@ -230,7 +230,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
                return 400000 / mpllP;
        } else
        if (plltype == PLL_MEMORY &&
-           (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
+           (dev->pdev->device & 0xff0) == CHIPSET_NFORCE2) {
                uint32_t clock;
 
                pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
index 8f467e7bfd196f70a17e892775642aaa7223238e..72055a35f8459aa2aee9e02218ad1c2f01722704 100644 (file)
@@ -130,7 +130,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
        if (chan->ntfy) {
                nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
                nouveau_bo_unpin(chan->ntfy);
-               drm_gem_object_unreference_unlocked(chan->ntfy->gem);
+               drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
        }
 
        if (chan->heap.block_size)
@@ -178,10 +178,10 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
                getparam->value = device->chipset;
                break;
        case NOUVEAU_GETPARAM_PCI_VENDOR:
-               getparam->value = dev->pci_vendor;
+               getparam->value = dev->pdev->vendor;
                break;
        case NOUVEAU_GETPARAM_PCI_DEVICE:
-               getparam->value = dev->pci_device;
+               getparam->value = dev->pdev->device;
                break;
        case NOUVEAU_GETPARAM_BUS_TYPE:
                if (drm_pci_device_is_agp(dev))
@@ -320,7 +320,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
                        goto done;
        }
 
-       ret = drm_gem_handle_create(file_priv, chan->ntfy->gem,
+       ret = drm_gem_handle_create(file_priv, &chan->ntfy->gem,
                                    &init->notifier_handle);
        if (ret)
                goto done;
index dd7d2e18271940ea3e9f6d9a90b09140c767e73e..cfbeee607b3ace2eb0c602919033b570580f2625 100644 (file)
@@ -253,18 +253,15 @@ static struct vga_switcheroo_handler nouveau_dsm_handler = {
 
 static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
 {
-       acpi_handle dhandle, nvidia_handle;
-       acpi_status status;
+       acpi_handle dhandle;
        int retval = 0;
 
        dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
        if (!dhandle)
                return false;
 
-       status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
-       if (ACPI_FAILURE(status)) {
+       if (!acpi_has_method(dhandle, "_DSM"))
                return false;
-       }
 
        if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
                retval |= NOUVEAU_DSM_HAS_MUX;
index 2ffad2176b7fc7a2e2f6a7ee90d653286e4151bc..630f6e84fc01989da5f3a8957bab9d450f888c92 100644 (file)
@@ -82,7 +82,7 @@ nv40_backlight_init(struct drm_connector *connector)
        memset(&props, 0, sizeof(struct backlight_properties));
        props.type = BACKLIGHT_RAW;
        props.max_brightness = 31;
-       bd = backlight_device_register("nv_backlight", &connector->kdev, drm,
+       bd = backlight_device_register("nv_backlight", connector->kdev, drm,
                                       &nv40_bl_ops, &props);
        if (IS_ERR(bd))
                return PTR_ERR(bd);
@@ -204,7 +204,7 @@ nv50_backlight_init(struct drm_connector *connector)
        memset(&props, 0, sizeof(struct backlight_properties));
        props.type = BACKLIGHT_RAW;
        props.max_brightness = 100;
-       bd = backlight_device_register("nv_backlight", &connector->kdev,
+       bd = backlight_device_register("nv_backlight", connector->kdev,
                                       nv_encoder, ops, &props);
        if (IS_ERR(bd))
                return PTR_ERR(bd);
index 3e7287675ecf627fb3e6db850548e363e7ad2772..4c3feaaa10375721627b4bb00c5f95f0e6eb8a9a 100644 (file)
@@ -127,8 +127,8 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_outp
 #ifdef __powerpc__
        /* Powerbook specific quirks */
        if (script == LVDS_RESET &&
-           (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 ||
-            dev->pci_device == 0x0329))
+           (dev->pdev->device == 0x0179 || dev->pdev->device == 0x0189 ||
+            dev->pdev->device == 0x0329))
                nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
 #endif
 
index 755c38d0627171e985ad87abe703b3407081648f..4172854d4365da62542ff6696ae4b0c5ce1d7104 100644 (file)
@@ -146,7 +146,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
        struct drm_device *dev = drm->dev;
        struct nouveau_bo *nvbo = nouveau_bo(bo);
 
-       if (unlikely(nvbo->gem))
+       if (unlikely(nvbo->gem.filp))
                DRM_ERROR("bo %p still attached to GEM object\n", bo);
        WARN_ON(nvbo->pin_refcnt > 0);
        nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
@@ -1267,7 +1267,7 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 {
        struct nouveau_bo *nvbo = nouveau_bo(bo);
 
-       return drm_vma_node_verify_access(&nvbo->gem->vma_node, filp);
+       return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
 }
 
 static int
index 653dbbbd4fa1d48ca43092bc79ad0970f90c7665..ff17c1f432fc9e9312ace42491385bb76226b381 100644 (file)
@@ -27,7 +27,10 @@ struct nouveau_bo {
        u32 tile_flags;
        struct nouveau_drm_tile *tile;
 
-       struct drm_gem_object *gem;
+       /* Only valid if allocated via nouveau_gem_new() and iff you hold a
+        * gem reference to it! For debugging, use gem.filp != NULL to test
+        * whether it is valid. */
+       struct drm_gem_object gem;
 
        /* protect by the ttm reservation lock */
        int pin_refcnt;
index c5b36f9e9a10d7ce02d68197181f427b950732b6..2136d0038252fec86ef07d6e75121ddff7177a70 100644 (file)
@@ -215,8 +215,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
                connector->doublescan_allowed = true;
                if (nv_device(drm->device)->card_type == NV_20 ||
                   (nv_device(drm->device)->card_type == NV_10 &&
-                   (dev->pci_device & 0x0ff0) != 0x0100 &&
-                   (dev->pci_device & 0x0ff0) != 0x0150))
+                   (dev->pdev->device & 0x0ff0) != 0x0100 &&
+                   (dev->pdev->device & 0x0ff0) != 0x0150))
                        /* HW is broken */
                        connector->interlace_allowed = false;
                else
index 7848590f5568e4142457a0f121d9282ca6d1ae17..bdd5cf71a24cb81c4b295d15100ed0fb2e68fa6f 100644 (file)
@@ -50,7 +50,7 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
        struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
 
        if (fb->nvbo)
-               drm_gem_object_unreference_unlocked(fb->nvbo->gem);
+               drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
 
        drm_framebuffer_cleanup(drm_fb);
        kfree(fb);
@@ -63,7 +63,7 @@ nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
 {
        struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
 
-       return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle);
+       return drm_gem_handle_create(file_priv, &fb->nvbo->gem, handle);
 }
 
 static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
@@ -674,8 +674,8 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
        if (ret)
                return ret;
 
-       ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
-       drm_gem_object_unreference_unlocked(bo->gem);
+       ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
+       drm_gem_object_unreference_unlocked(&bo->gem);
        return ret;
 }
 
@@ -688,7 +688,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
 
        gem = drm_gem_object_lookup(dev, file_priv, handle);
        if (gem) {
-               struct nouveau_bo *bo = gem->driver_private;
+               struct nouveau_bo *bo = nouveau_gem_object(gem);
                *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
                drm_gem_object_unreference_unlocked(gem);
                return 0;
index e893c53624024751930f51c28517546d7fc7cb9d..428d818be7757c0c2f8ca655fc194580080863f6 100644 (file)
@@ -834,7 +834,6 @@ driver = {
        .gem_prime_vmap = nouveau_gem_prime_vmap,
        .gem_prime_vunmap = nouveau_gem_prime_vunmap,
 
-       .gem_init_object = nouveau_gem_object_new,
        .gem_free_object = nouveau_gem_object_del,
        .gem_open_object = nouveau_gem_object_open,
        .gem_close_object = nouveau_gem_object_close,
index a86ecf65c1642b2372e9a0e0136a69c7b068e041..c80b519b513a964c5802d8f6e3281d39a0447dea 100644 (file)
@@ -420,7 +420,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
                nouveau_bo_unmap(nouveau_fb->nvbo);
                nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
                nouveau_bo_unpin(nouveau_fb->nvbo);
-               drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
+               drm_gem_object_unreference_unlocked(&nouveau_fb->nvbo->gem);
                nouveau_fb->nvbo = NULL;
        }
        drm_fb_helper_fini(&fbcon->helper);
index f32b71238c03c3426034004779481925c8dc00ec..418a6177a65330a5074a16565cce9a40784f3c01 100644 (file)
 #include "nouveau_ttm.h"
 #include "nouveau_gem.h"
 
-int
-nouveau_gem_object_new(struct drm_gem_object *gem)
-{
-       return 0;
-}
-
 void
 nouveau_gem_object_del(struct drm_gem_object *gem)
 {
-       struct nouveau_bo *nvbo = gem->driver_private;
+       struct nouveau_bo *nvbo = nouveau_gem_object(gem);
        struct ttm_buffer_object *bo = &nvbo->bo;
 
-       if (!nvbo)
-               return;
-       nvbo->gem = NULL;
-
        if (gem->import_attach)
                drm_prime_gem_destroy(gem, nvbo->bo.sg);
 
-       ttm_bo_unref(&bo);
-
        drm_gem_object_release(gem);
-       kfree(gem);
+
+       /* reset filp so nouveau_bo_del_ttm() can test for it */
+       gem->filp = NULL;
+       ttm_bo_unref(&bo);
 }
 
 int
@@ -186,14 +177,15 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
        if (nv_device(drm->device)->card_type >= NV_50)
                nvbo->valid_domains &= domain;
 
-       nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
-       if (!nvbo->gem) {
+       /* Initialize the embedded gem-object. We return a single gem-reference
+        * to the caller, instead of a normal nouveau_bo ttm reference. */
+       ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
+       if (ret) {
                nouveau_bo_ref(NULL, pnvbo);
                return -ENOMEM;
        }
 
-       nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
-       nvbo->gem->driver_private = nvbo;
+       nvbo->bo.persistent_swap_storage = nvbo->gem.filp;
        return 0;
 }
 
@@ -250,15 +242,15 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
+       ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
        if (ret == 0) {
-               ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
+               ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
                if (ret)
                        drm_gem_handle_delete(file_priv, req->info.handle);
        }
 
        /* drop reference from allocate - handle holds it now */
-       drm_gem_object_unreference_unlocked(nvbo->gem);
+       drm_gem_object_unreference_unlocked(&nvbo->gem);
        return ret;
 }
 
@@ -266,7 +258,7 @@ static int
 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
                       uint32_t write_domains, uint32_t valid_domains)
 {
-       struct nouveau_bo *nvbo = gem->driver_private;
+       struct nouveau_bo *nvbo = nouveau_gem_object(gem);
        struct ttm_buffer_object *bo = &nvbo->bo;
        uint32_t domains = valid_domains & nvbo->valid_domains &
                (write_domains ? write_domains : read_domains);
@@ -327,7 +319,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
                list_del(&nvbo->entry);
                nvbo->reserved_by = NULL;
                ttm_bo_unreserve_ticket(&nvbo->bo, ticket);
-               drm_gem_object_unreference_unlocked(nvbo->gem);
+               drm_gem_object_unreference_unlocked(&nvbo->gem);
        }
 }
 
@@ -376,7 +368,7 @@ retry:
                        validate_fini(op, NULL);
                        return -ENOENT;
                }
-               nvbo = gem->driver_private;
+               nvbo = nouveau_gem_object(gem);
                if (nvbo == res_bo) {
                        res_bo = NULL;
                        drm_gem_object_unreference_unlocked(gem);
@@ -478,7 +470,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
                        return ret;
                }
 
-               ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
+               ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
                                             b->write_domains,
                                             b->valid_domains);
                if (unlikely(ret)) {
index 502e4290aa8fdee0ef200410f4fb0264ac82aaf8..7caca057bc382dbae3d6e849d26ad4cdf103e0b4 100644 (file)
 static inline struct nouveau_bo *
 nouveau_gem_object(struct drm_gem_object *gem)
 {
-       return gem ? gem->driver_private : NULL;
+       return gem ? container_of(gem, struct nouveau_bo, gem) : NULL;
 }
 
 /* nouveau_gem.c */
 extern int nouveau_gem_new(struct drm_device *, int size, int align,
                           uint32_t domain, uint32_t tile_mode,
                           uint32_t tile_flags, struct nouveau_bo **);
-extern int nouveau_gem_object_new(struct drm_gem_object *);
 extern void nouveau_gem_object_del(struct drm_gem_object *);
 extern int nouveau_gem_object_open(struct drm_gem_object *, struct drm_file *);
 extern void nouveau_gem_object_close(struct drm_gem_object *,
index e90468d5e5c0aed0c39a7fb9bd2465909f0ee554..51a2cb102b441f14f49bd0e9b663e15aaf078592 100644 (file)
@@ -71,14 +71,16 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
                return ERR_PTR(ret);
 
        nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
-       nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
-       if (!nvbo->gem) {
+
+       /* Initialize the embedded gem-object. We return a single gem-reference
+        * to the caller, instead of a normal nouveau_bo ttm reference. */
+       ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
+       if (ret) {
                nouveau_bo_ref(NULL, &nvbo);
                return ERR_PTR(-ENOMEM);
        }
 
-       nvbo->gem->driver_private = nvbo;
-       return nvbo->gem;
+       return &nvbo->gem;
 }
 
 int nouveau_gem_prime_pin(struct drm_gem_object *obj)
index 20c41e73d448f4e455cd1a85f42697d0b046f43c..6c220cd3497a26ada576caed6d51f5d001e63839 100644 (file)
@@ -5,6 +5,7 @@ config DRM_OMAP
        depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
        depends on OMAP2_DSS
        select DRM_KMS_HELPER
+       select DRM_KMS_FB_HELPER
        select FB_SYS_FILLRECT
        select FB_SYS_COPYAREA
        select FB_SYS_IMAGEBLIT
index acf667859cb6231040914270b5bafbc923004ea8..701c4c10e08b5858a5e083d05329af23c076669e 100644 (file)
@@ -664,8 +664,9 @@ static int omap_dmm_probe(struct platform_device *dev)
        }
 
        /* set dma mask for device */
-       /* NOTE: this is a workaround for the hwmod not initializing properly */
-       dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto fail;
 
        omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
 
index 2603d909f49ce032f1933a5d8d17131de132c0d9..e7fa3cd9674389e1cac2de91816ce6b0013b2a1b 100644 (file)
@@ -620,7 +620,6 @@ static struct drm_driver omap_drm_driver = {
                .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
                .gem_prime_export = omap_gem_prime_export,
                .gem_prime_import = omap_gem_prime_import,
-               .gem_init_object = omap_gem_init_object,
                .gem_free_object = omap_gem_free_object,
                .gem_vm_ops = &omap_gem_vm_ops,
                .dumb_create = omap_gem_dumb_create,
index 30b95b736658b0c9154ca54c289d99e19361c16c..07847693cf494caababe5780bae717ef393c8af1 100644 (file)
@@ -220,7 +220,6 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
                union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
 void omap_gem_free_object(struct drm_gem_object *obj);
-int omap_gem_init_object(struct drm_gem_object *obj);
 void *omap_gem_vaddr(struct drm_gem_object *obj);
 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
                uint32_t handle, uint64_t *offset);
index 533f6ebec531ff67a1c1f7a96c2504f852d665f0..5aec3e81fe241e8ef1308d1b53feca75b3dfa34f 100644 (file)
@@ -1274,11 +1274,6 @@ unlock:
        return ret;
 }
 
-int omap_gem_init_object(struct drm_gem_object *obj)
-{
-       return -EINVAL;          /* unused */
-}
-
 /* don't call directly.. called from GEM core when it is time to actually
  * free the object..
  */
index 9263db117ff8ae937dbf743ac0e8a267264d95a4..cb858600185f8051c7f92997e5e7e8ad7d8a6173 100644 (file)
@@ -261,7 +261,7 @@ int omap_drm_irq_install(struct drm_device *dev)
                mutex_unlock(&dev->struct_mutex);
                return -EBUSY;
        }
-       dev->irq_enabled = 1;
+       dev->irq_enabled = true;
        mutex_unlock(&dev->struct_mutex);
 
        /* Before installing handler */
@@ -272,7 +272,7 @@ int omap_drm_irq_install(struct drm_device *dev)
 
        if (ret < 0) {
                mutex_lock(&dev->struct_mutex);
-               dev->irq_enabled = 0;
+               dev->irq_enabled = false;
                mutex_unlock(&dev->struct_mutex);
                return ret;
        }
@@ -283,7 +283,7 @@ int omap_drm_irq_install(struct drm_device *dev)
 
        if (ret < 0) {
                mutex_lock(&dev->struct_mutex);
-               dev->irq_enabled = 0;
+               dev->irq_enabled = false;
                mutex_unlock(&dev->struct_mutex);
                dispc_free_irq(dev);
        }
@@ -294,11 +294,12 @@ int omap_drm_irq_install(struct drm_device *dev)
 int omap_drm_irq_uninstall(struct drm_device *dev)
 {
        unsigned long irqflags;
-       int irq_enabled, i;
+       bool irq_enabled;
+       int i;
 
        mutex_lock(&dev->struct_mutex);
        irq_enabled = dev->irq_enabled;
-       dev->irq_enabled = 0;
+       dev->irq_enabled = false;
        mutex_unlock(&dev->struct_mutex);
 
        /*
@@ -307,9 +308,9 @@ int omap_drm_irq_uninstall(struct drm_device *dev)
        if (dev->num_crtcs) {
                spin_lock_irqsave(&dev->vbl_lock, irqflags);
                for (i = 0; i < dev->num_crtcs; i++) {
-                       DRM_WAKEUP(&dev->vbl_queue[i]);
-                       dev->vblank_enabled[i] = 0;
-                       dev->last_vblank[i] =
+                       DRM_WAKEUP(&dev->vblank[i].queue);
+                       dev->vblank[i].enabled = false;
+                       dev->vblank[i].last =
                                dev->driver->get_vblank_counter(dev, i);
                }
                spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
new file mode 100644 (file)
index 0000000..843087b
--- /dev/null
@@ -0,0 +1,17 @@
+menuconfig DRM_PANEL
+       bool "DRM panel support"
+       help
+         Panel registration and lookup framework.
+
+if DRM_PANEL
+
+config DRM_PANEL_SIMPLE
+       bool "support for simple panels"
+       depends on OF
+       help
+         DRM panel driver for dumb panels that need at most a regulator and
+         a GPIO to be powered up. Optionally a backlight can be attached so
+         that it can be automatically turned off when the panel goes into a
+         low power state.
+
+endif
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
new file mode 100644 (file)
index 0000000..af9dfa2
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
new file mode 100644 (file)
index 0000000..def3e75
--- /dev/null
@@ -0,0 +1,335 @@
+/*
+ * Copyright (C) 2013, NVIDIA Corporation.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_panel.h>
+
+struct panel_desc {
+       const struct drm_display_mode *modes;
+       unsigned int num_modes;
+
+       struct {
+               unsigned int width;
+               unsigned int height;
+       } size;
+};
+
+/* TODO: convert to gpiod_*() API once it's been merged */
+#define GPIO_ACTIVE_LOW        (1 << 0)
+
+struct panel_simple {
+       struct drm_panel base;
+       bool enabled;
+
+       const struct panel_desc *desc;
+
+       struct backlight_device *backlight;
+       struct regulator *supply;
+
+       unsigned long enable_gpio_flags;
+       int enable_gpio;
+};
+
+static inline struct panel_simple *to_panel_simple(struct drm_panel *panel)
+{
+       return container_of(panel, struct panel_simple, base);
+}
+
+static void panel_simple_disable(struct drm_panel *panel)
+{
+       struct panel_simple *p = to_panel_simple(panel);
+
+       if (!p->enabled)
+               return;
+
+       if (p->backlight) {
+               p->backlight->props.power = FB_BLANK_POWERDOWN;
+               backlight_update_status(p->backlight);
+       }
+
+       if (gpio_is_valid(p->enable_gpio)) {
+               if (p->enable_gpio_flags & GPIO_ACTIVE_LOW)
+                       gpio_set_value(p->enable_gpio, 1);
+               else
+                       gpio_set_value(p->enable_gpio, 0);
+       }
+
+       regulator_disable(p->supply);
+       p->enabled = false;
+}
+
+static void panel_simple_enable(struct drm_panel *panel)
+{
+       struct panel_simple *p = to_panel_simple(panel);
+       int err;
+
+       if (p->enabled)
+               return;
+
+       err = regulator_enable(p->supply);
+       if (err < 0)
+               dev_err(panel->dev, "failed to enable supply: %d\n", err);
+
+       if (gpio_is_valid(p->enable_gpio)) {
+               if (p->enable_gpio_flags & GPIO_ACTIVE_LOW)
+                       gpio_set_value(p->enable_gpio, 0);
+               else
+                       gpio_set_value(p->enable_gpio, 1);
+       }
+
+       if (p->backlight) {
+               p->backlight->props.power = FB_BLANK_UNBLANK;
+               backlight_update_status(p->backlight);
+       }
+
+       p->enabled = true;
+}
+
+static int panel_simple_get_modes(struct drm_panel *panel)
+{
+       struct panel_simple *p = to_panel_simple(panel);
+       struct drm_display_mode *mode;
+       unsigned int i;
+
+       for (i = 0; i < p->desc->num_modes; i++) {
+               mode = drm_mode_duplicate(panel->drm, &p->desc->modes[i]);
+               if (!mode)
+                       return -ENOMEM;
+
+               drm_mode_set_name(mode);
+
+               drm_mode_probed_add(panel->connector, mode);
+       }
+
+       return p->desc->num_modes;
+}
+
+static const struct drm_panel_funcs panel_simple_funcs = {
+       .disable = panel_simple_disable,
+       .enable = panel_simple_enable,
+       .get_modes = panel_simple_get_modes,
+};
+
+static const struct drm_display_mode auo_b101aw03_mode = {
+       .clock = 51450,
+       .hdisplay = 1024,
+       .hsync_start = 1024 + 156,
+       .hsync_end = 1024 + 156 + 8,
+       .htotal = 1024 + 156 + 8 + 156,
+       .vdisplay = 600,
+       .vsync_start = 600 + 16,
+       .vsync_end = 600 + 16 + 6,
+       .vtotal = 600 + 16 + 6 + 16,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc auo_b101aw03 = {
+       .modes = &auo_b101aw03_mode,
+       .num_modes = 1,
+       .size = {
+               .width = 223,
+               .height = 125,
+       },
+};
+
+static const struct drm_display_mode chunghwa_claa101wb01_mode = {
+       .clock = 69300,
+       .hdisplay = 1366,
+       .hsync_start = 1366 + 48,
+       .hsync_end = 1366 + 48 + 32,
+       .htotal = 1366 + 48 + 32 + 20,
+       .vdisplay = 768,
+       .vsync_start = 768 + 16,
+       .vsync_end = 768 + 16 + 8,
+       .vtotal = 768 + 16 + 8 + 16,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc chunghwa_claa101wb01 = {
+       .modes = &chunghwa_claa101wb01_mode,
+       .num_modes = 1,
+       .size = {
+               .width = 223,
+               .height = 125,
+       },
+};
+
+static const struct drm_display_mode panasonic_vvx10f004b00_mode = {
+       .clock = 154700,
+       .hdisplay = 1920,
+       .hsync_start = 1920 + 154,
+       .hsync_end = 1920 + 154 + 16,
+       .htotal = 1920 + 154 + 16 + 32,
+       .vdisplay = 1200,
+       .vsync_start = 1200 + 17,
+       .vsync_end = 1200 + 17 + 2,
+       .vtotal = 1200 + 17 + 2 + 16,
+       .vrefresh = 60,
+};
+
+static const struct panel_desc panasonic_vvx10f004b00 = {
+       .modes = &panasonic_vvx10f004b00_mode,
+       .num_modes = 1,
+       .size = {
+               .width = 217,
+               .height = 136,
+       },
+};
+
+static const struct of_device_id panel_simple_of_match[] = {
+       {
+               .compatible = "auo,b101aw03",
+               .data = &auo_b101aw03,
+       }, {
+               .compatible = "chunghwa,claa101wb01",
+               .data = &chunghwa_claa101wb01
+       }, {
+               .compatible = "panasonic,vvx10f004b00",
+               .data = &panasonic_vvx10f004b00
+       }, {
+               /* sentinel */
+       }
+};
+MODULE_DEVICE_TABLE(of, panel_simple_of_match);
+
+static int panel_simple_probe(struct platform_device *pdev)
+{
+       const struct of_device_id *id;
+       struct device_node *backlight;
+       struct panel_simple *panel;
+       enum of_gpio_flags flags;
+       int err;
+
+       panel = devm_kzalloc(&pdev->dev, sizeof(*panel), GFP_KERNEL);
+       if (!panel)
+               return -ENOMEM;
+
+       id = of_match_node(panel_simple_of_match, pdev->dev.of_node);
+       if (!id)
+               return -ENODEV;
+
+       panel->desc = id->data;
+       panel->enabled = false;
+
+       panel->supply = devm_regulator_get(&pdev->dev, "power");
+       if (IS_ERR(panel->supply))
+               return PTR_ERR(panel->supply);
+
+       panel->enable_gpio = of_get_named_gpio_flags(pdev->dev.of_node,
+                                                    "enable-gpios", 0,
+                                                    &flags);
+       if (gpio_is_valid(panel->enable_gpio)) {
+               unsigned int value;
+
+               if (flags & OF_GPIO_ACTIVE_LOW)
+                       panel->enable_gpio_flags |= GPIO_ACTIVE_LOW;
+
+               err = gpio_request(panel->enable_gpio, "enable");
+               if (err < 0) {
+                       dev_err(&pdev->dev, "failed to request GPIO#%u: %d\n",
+                               panel->enable_gpio, err);
+                       return err;
+               }
+
+               value = (panel->enable_gpio_flags & GPIO_ACTIVE_LOW) != 0;
+
+               err = gpio_direction_output(panel->enable_gpio, value);
+               if (err < 0) {
+                       dev_err(&pdev->dev, "failed to setup GPIO%u: %d\n",
+                               panel->enable_gpio, err);
+                       goto free_gpio;
+               }
+       }
+
+       backlight = of_parse_phandle(pdev->dev.of_node, "backlight", 0);
+       if (backlight) {
+               panel->backlight = of_find_backlight_by_node(backlight);
+               if (!panel->backlight) {
+                       err = -EPROBE_DEFER;
+                       goto free_gpio;
+               }
+
+               of_node_put(backlight);
+       }
+
+       drm_panel_init(&panel->base);
+       panel->base.dev = &pdev->dev;
+       panel->base.funcs = &panel_simple_funcs;
+
+       err = drm_panel_add(&panel->base);
+       if (err < 0)
+               goto free_gpio;
+
+       platform_set_drvdata(pdev, panel);
+
+       return 0;
+
+free_gpio:
+       if (gpio_is_valid(panel->enable_gpio))
+               gpio_free(panel->enable_gpio);
+
+       return err;
+}
+
+static int panel_simple_remove(struct platform_device *pdev)
+{
+       struct panel_simple *panel = platform_get_drvdata(pdev);
+
+       drm_panel_detach(&panel->base);
+       drm_panel_remove(&panel->base);
+
+       panel_simple_disable(&panel->base);
+
+       if (panel->backlight)
+               put_device(&panel->backlight->dev);
+
+       if (gpio_is_valid(panel->enable_gpio))
+               gpio_free(panel->enable_gpio);
+
+       regulator_disable(panel->supply);
+
+       return 0;
+}
+
+static struct platform_driver panel_simple_driver = {
+       .driver = {
+               .name = "panel-simple",
+               .owner = THIS_MODULE,
+               .of_match_table = panel_simple_of_match,
+       },
+       .probe = panel_simple_probe,
+       .remove = panel_simple_remove,
+};
+module_platform_driver(panel_simple_driver);
+
+MODULE_DESCRIPTION("DRM Driver for Simple Panels");
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_LICENSE("GPL v2");
index d6c12796023cd654e95196bc9d55ce18aabdb114..037d324bf58f6ccfda14ff471f3c98b4405981a0 100644 (file)
@@ -6,6 +6,7 @@ config DRM_QXL
        select FB_SYS_IMAGEBLIT
        select FB_DEFERRED_IO
         select DRM_KMS_HELPER
+       select DRM_KMS_FB_HELPER
         select DRM_TTM
        help
                QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
index 514118ae72d4671474b165c5c0b7385f77b04ae7..fee8748bdca52fda145888018b3d4a89c43de866 100644 (file)
@@ -225,7 +225,6 @@ static struct drm_driver qxl_driver = {
        .debugfs_init = qxl_debugfs_init,
        .debugfs_cleanup = qxl_debugfs_takedown,
 #endif
-       .gem_init_object = qxl_gem_object_init,
        .gem_free_object = qxl_gem_object_free,
        .gem_open_object = qxl_gem_object_open,
        .gem_close_object = qxl_gem_object_close,
index f7c9adde46a0c8ffb5586eb44461e2c3155e88cb..41d22ed260602acfb42184e4f73ea41f0a409885 100644 (file)
@@ -412,7 +412,6 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
                                      struct qxl_surface *surf,
                                      struct qxl_bo **qobj,
                                      uint32_t *handle);
-int qxl_gem_object_init(struct drm_gem_object *obj);
 void qxl_gem_object_free(struct drm_gem_object *gobj);
 int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
 void qxl_gem_object_close(struct drm_gem_object *obj,
index 1648e4125af7619f9923901306935b0f152ac0fa..b96f0c9d89b2d717b552d59c19e26d4828214f66 100644 (file)
 #include "qxl_drv.h"
 #include "qxl_object.h"
 
-int qxl_gem_object_init(struct drm_gem_object *obj)
-{
-       /* we do nothings here */
-       return 0;
-}
-
 void qxl_gem_object_free(struct drm_gem_object *gobj)
 {
        struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
index 00885417ffffd2e8ad695a1a01059d40df68c011..fb3ae07a14692601a912b1cbea37ae7ff13bcc62 100644 (file)
@@ -690,8 +690,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
 
        /* set the lane count on the sink */
        tmp = dp_info->dp_lane_count;
-       if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 &&
-           dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
+       if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
                tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
        radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
 
index 32923d2f60021105a5092826bcd7810ca054c960..2cb08f93236d9e68fc20a7ed28945983c512a019 100644 (file)
@@ -213,7 +213,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
        props.type = BACKLIGHT_RAW;
        snprintf(bl_name, sizeof(bl_name),
                 "radeon_bl%d", dev->primary->index);
-       bd = backlight_device_register(bl_name, &drm_connector->kdev,
+       bd = backlight_device_register(bl_name, drm_connector->kdev,
                                       pdata, &radeon_atom_backlight_ops, &props);
        if (IS_ERR(bd)) {
                DRM_ERROR("Backlight registration failed\n");
@@ -707,24 +707,37 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        switch (connector->connector_type) {
        case DRM_MODE_CONNECTOR_DVII:
        case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
-               if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
-                   (drm_detect_hdmi_monitor(radeon_connector->edid) &&
-                    (radeon_connector->audio == RADEON_AUDIO_AUTO)))
-                       return ATOM_ENCODER_MODE_HDMI;
-               else if (radeon_connector->use_digital)
+               if (radeon_audio != 0) {
+                       if (radeon_connector->use_digital &&
+                           (radeon_connector->audio == RADEON_AUDIO_ENABLE))
+                               return ATOM_ENCODER_MODE_HDMI;
+                       else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+                                (radeon_connector->audio == RADEON_AUDIO_AUTO))
+                               return ATOM_ENCODER_MODE_HDMI;
+                       else if (radeon_connector->use_digital)
+                               return ATOM_ENCODER_MODE_DVI;
+                       else
+                               return ATOM_ENCODER_MODE_CRT;
+               } else if (radeon_connector->use_digital) {
                        return ATOM_ENCODER_MODE_DVI;
-               else
+               } else {
                        return ATOM_ENCODER_MODE_CRT;
+               }
                break;
        case DRM_MODE_CONNECTOR_DVID:
        case DRM_MODE_CONNECTOR_HDMIA:
        default:
-               if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
-                   (drm_detect_hdmi_monitor(radeon_connector->edid) &&
-                    (radeon_connector->audio == RADEON_AUDIO_AUTO)))
-                       return ATOM_ENCODER_MODE_HDMI;
-               else
+               if (radeon_audio != 0) {
+                       if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
+                               return ATOM_ENCODER_MODE_HDMI;
+                       else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+                                (radeon_connector->audio == RADEON_AUDIO_AUTO))
+                               return ATOM_ENCODER_MODE_HDMI;
+                       else
+                               return ATOM_ENCODER_MODE_DVI;
+               } else {
                        return ATOM_ENCODER_MODE_DVI;
+               }
                break;
        case DRM_MODE_CONNECTOR_LVDS:
                return ATOM_ENCODER_MODE_LVDS;
@@ -732,14 +745,19 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        case DRM_MODE_CONNECTOR_DisplayPort:
                dig_connector = radeon_connector->con_priv;
                if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
-                   (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+                   (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
                        return ATOM_ENCODER_MODE_DP;
-               else if ((radeon_connector->audio == RADEON_AUDIO_ENABLE) ||
-                        (drm_detect_hdmi_monitor(radeon_connector->edid) &&
-                         (radeon_connector->audio == RADEON_AUDIO_AUTO)))
-                       return ATOM_ENCODER_MODE_HDMI;
-               else
+               } else if (radeon_audio != 0) {
+                       if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
+                               return ATOM_ENCODER_MODE_HDMI;
+                       else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+                                (radeon_connector->audio == RADEON_AUDIO_AUTO))
+                               return ATOM_ENCODER_MODE_HDMI;
+                       else
+                               return ATOM_ENCODER_MODE_DVI;
+               } else {
                        return ATOM_ENCODER_MODE_DVI;
+               }
                break;
        case DRM_MODE_CONNECTOR_eDP:
                return ATOM_ENCODER_MODE_DP;
@@ -1655,7 +1673,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
                         * does the same thing and more.
                         */
                        if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
-                           (rdev->family != CHIP_RS880))
+                           (rdev->family != CHIP_RS780) && (rdev->family != CHIP_RS880))
                                atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
                }
                if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
index b162e98a2953ee392029dba8c1eae7d0e3678e97..9b6950d9b3c09cc193010a50bdd521939464d539 100644 (file)
@@ -1930,7 +1930,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
                        }
                        j++;
 
-                       if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
+                       if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
                                return -EINVAL;
 
                        tmp = RREG32(MC_PMG_CMD_MRS);
@@ -1945,7 +1945,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
                        }
                        j++;
 
-                       if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
+                       if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
                                return -EINVAL;
                        break;
                case MC_SEQ_RESERVE_M >> 2:
@@ -1959,7 +1959,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
                        }
                        j++;
 
-                       if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
+                       if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
                                return -EINVAL;
                        break;
                default:
index d02fd1c045d567371a187c686e1af2b411f88aa1..9cd2bc989ac713d1604cec5ab1311ac3680066c5 100644 (file)
@@ -77,6 +77,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev);
 static void cik_program_aspm(struct radeon_device *rdev);
 static void cik_init_pg(struct radeon_device *rdev);
 static void cik_init_cg(struct radeon_device *rdev);
+static void cik_fini_pg(struct radeon_device *rdev);
+static void cik_fini_cg(struct radeon_device *rdev);
 static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
                                          bool enable);
 
@@ -1692,6 +1694,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
                               fw_name);
                        release_firmware(rdev->smc_fw);
                        rdev->smc_fw = NULL;
+                       err = 0;
                } else if (rdev->smc_fw->size != smc_req_size) {
                        printk(KERN_ERR
                               "cik_smc: Bogus length %zu in firmware \"%s\"\n",
@@ -3180,6 +3183,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
        if (r) {
                DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+               radeon_scratch_free(rdev, scratch);
                return r;
        }
        ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
@@ -3196,6 +3200,8 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
        r = radeon_fence_wait(ib.fence, false);
        if (r) {
                DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+               radeon_scratch_free(rdev, scratch);
+               radeon_ib_free(rdev, &ib);
                return r;
        }
        for (i = 0; i < rdev->usec_timeout; i++) {
@@ -4185,6 +4191,10 @@ static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
        dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
                 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
 
+       /* disable CG/PG */
+       cik_fini_pg(rdev);
+       cik_fini_cg(rdev);
+
        /* stop the rlc */
        cik_rlc_stop(rdev);
 
index 85a69d2ea3d2c8d86c9c552902d260dd576ac927..9fcd338c0fcf6bb1e113c80c8b116bcf5c56abbc 100644 (file)
@@ -113,6 +113,9 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
        u8 *sadb;
        int sad_count;
 
+       /* XXX: setting this register causes hangs on some asics */
+       return;
+
        if (!dig->afmt->pin)
                return;
 
index 555164e270a79347fb36b9cadab09d90754c4306..b5c67a99dda9b34707c3ba7eb20a682f9c7fe089 100644 (file)
@@ -3131,7 +3131,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
                rdev->config.evergreen.sx_max_export_size = 256;
                rdev->config.evergreen.sx_max_export_pos_size = 64;
                rdev->config.evergreen.sx_max_export_smx_size = 192;
-               rdev->config.evergreen.max_hw_contexts = 8;
+               rdev->config.evergreen.max_hw_contexts = 4;
                rdev->config.evergreen.sq_num_cf_insts = 2;
 
                rdev->config.evergreen.sc_prim_fifo_size = 0x40;
index f71ce390aebe581dd08998ce29d98e4b1058155c..fe1de855775ecca1491ced31f797f5cac6d05abc 100644 (file)
@@ -67,6 +67,9 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
        u8 *sadb;
        int sad_count;
 
+       /* XXX: setting this register causes hangs on some asics */
+       return;
+
        list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
                if (connector->encoder == encoder)
                        radeon_connector = to_radeon_connector(connector);
@@ -288,8 +291,7 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
        /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
 
        WREG32(HDMI_ACR_PACKET_CONTROL + offset,
-              HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
-              HDMI_ACR_SOURCE); /* select SW CTS value */
+              HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
 
        evergreen_hdmi_update_ACR(encoder, mode->clock);
 
index 8768fd6a1e2707acf1006f9afff44eb1a173fd4b..4f6d2962767dced17ae7f5f3e44e79bce4f51bbf 100644 (file)
  * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
  */
 #              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
-                /* 0 - SRC_ADDR
+                /* 0 - DST_ADDR
                 * 1 - GDS
                 */
 #              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
 #              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
 /* COMMAND */
 #              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
-#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
                 /* 0 - none
                 * 1 - 8 in 16
                 * 2 - 8 in 32
index 93c1f9ef5da9b5ee7c3474bfaaae5bbe7497cf91..cac2866d79da441dfbbbcef860f713c9c7ad5a58 100644 (file)
@@ -804,6 +804,7 @@ int ni_init_microcode(struct radeon_device *rdev)
                               fw_name);
                        release_firmware(rdev->smc_fw);
                        rdev->smc_fw = NULL;
+                       err = 0;
                } else if (rdev->smc_fw->size != smc_req_size) {
                        printk(KERN_ERR
                               "ni_mc: Bogus length %zu in firmware \"%s\"\n",
index 2a1b1876b4312eb332c017cb027aeb346bf2c850..f9be22062df1eb8048384cabec6219e8a5238fff 100644 (file)
@@ -2302,6 +2302,7 @@ int r600_init_microcode(struct radeon_device *rdev)
                               fw_name);
                        release_firmware(rdev->smc_fw);
                        rdev->smc_fw = NULL;
+                       err = 0;
                } else if (rdev->smc_fw->size != smc_req_size) {
                        printk(KERN_ERR
                               "smc: Bogus length %zu in firmware \"%s\"\n",
index b0fa6002af3e98da440cbebfa1ae183163e8b07d..06022e3b9c3bdc0a0660a60c448e25038659e41d 100644 (file)
@@ -57,15 +57,15 @@ enum r600_hdmi_iec_status_bits {
 static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
     /*      32kHz        44.1kHz       48kHz    */
     /* Clock      N     CTS      N     CTS      N     CTS */
-    {  25174,  4576,  28125,  7007,  31250,  6864,  28125 }, /*  25,20/1.001 MHz */
+    {  25175,  4576,  28125,  7007,  31250,  6864,  28125 }, /*  25,20/1.001 MHz */
     {  25200,  4096,  25200,  6272,  28000,  6144,  25200 }, /*  25.20       MHz */
     {  27000,  4096,  27000,  6272,  30000,  6144,  27000 }, /*  27.00       MHz */
     {  27027,  4096,  27027,  6272,  30030,  6144,  27027 }, /*  27.00*1.001 MHz */
     {  54000,  4096,  54000,  6272,  60000,  6144,  54000 }, /*  54.00       MHz */
     {  54054,  4096,  54054,  6272,  60060,  6144,  54054 }, /*  54.00*1.001 MHz */
-    {  74175, 11648, 210937, 17836, 234375, 11648, 140625 }, /*  74.25/1.001 MHz */
+    {  74176, 11648, 210937, 17836, 234375, 11648, 140625 }, /*  74.25/1.001 MHz */
     {  74250,  4096,  74250,  6272,  82500,  6144,  74250 }, /*  74.25       MHz */
-    { 148351, 11648, 421875,  8918, 234375,  5824, 140625 }, /* 148.50/1.001 MHz */
+    { 148352, 11648, 421875,  8918, 234375,  5824, 140625 }, /* 148.50/1.001 MHz */
     { 148500,  4096, 148500,  6272, 165000,  6144, 148500 }, /* 148.50       MHz */
     {      0,  4096,      0,  6272,      0,  6144,      0 }  /* Other */
 };
@@ -75,8 +75,15 @@ static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
  */
 static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
 {
-       if (*CTS == 0)
-               *CTS = clock * N / (128 * freq) * 1000;
+       u64 n;
+       u32 d;
+
+       if (*CTS == 0) {
+               n = (u64)clock * (u64)N * 1000ULL;
+               d = 128 * freq;
+               do_div(n, d);
+               *CTS = n;
+       }
        DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
                  N, *CTS, freq);
 }
@@ -302,6 +309,9 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
        u8 *sadb;
        int sad_count;
 
+       /* XXX: setting this register causes hangs on some asics */
+       return;
+
        list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
                if (connector->encoder == encoder)
                        radeon_connector = to_radeon_connector(connector);
@@ -444,8 +454,8 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
        }
 
        WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
-              HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
-              HDMI0_ACR_SOURCE); /* select SW CTS value */
+              HDMI0_ACR_SOURCE | /* select SW CTS value - XXX verify that hw CTS works on all families */
+              HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
 
        WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
               HDMI0_NULL_SEND | /* send null packets when required */
index e673fe26ea84d00b292f77c456089c4312372ab7..7b3c7b5932c5a289be3fd58d05b7392791c6a4ff 100644 (file)
  */
 #              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
 /* COMMAND */
-#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
                 /* 0 - none
                 * 1 - 8 in 16
                 * 2 - 8 in 32
index 061b227dae0c45f88f506bf497c75faa1195aca1..c155d6f3fa68cad15102af67bfd348677c74d6fa 100644 (file)
@@ -499,7 +499,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
        crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
        fp2_gen_cntl = 0;
 
-       if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+       if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
                fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
        }
 
@@ -536,7 +536,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
                (RADEON_CRTC_SYNC_TRISTAT |
                 RADEON_CRTC_DISPLAY_DIS)));
 
-       if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+       if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
                WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON));
        }
 
@@ -554,7 +554,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
                WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
        }
        WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
-       if (rdev->ddev->pci_device == PCI_DEVICE_ID_ATI_RADEON_QY) {
+       if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) {
                WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
        }
        return r;
index 79159b5da05bc778dc71b819a32c4139d28f967b..64565732cb98cf25af4e4d1f564e307c85cf4a8f 100644 (file)
@@ -1658,9 +1658,12 @@ radeon_add_atom_connector(struct drm_device *dev,
                        drm_object_attach_property(&radeon_connector->base.base,
                                                      rdev->mode_info.underscan_vborder_property,
                                                      0);
-                       drm_object_attach_property(&radeon_connector->base.base,
-                                                  rdev->mode_info.audio_property,
-                                                  RADEON_AUDIO_DISABLE);
+                       if (radeon_audio != 0)
+                               drm_object_attach_property(&radeon_connector->base.base,
+                                                          rdev->mode_info.audio_property,
+                                                          (radeon_audio == 1) ?
+                                                          RADEON_AUDIO_AUTO :
+                                                          RADEON_AUDIO_DISABLE);
                        subpixel_order = SubPixelHorizontalRGB;
                        connector->interlace_allowed = true;
                        if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
@@ -1754,10 +1757,12 @@ radeon_add_atom_connector(struct drm_device *dev,
                                                              rdev->mode_info.underscan_vborder_property,
                                                              0);
                        }
-                       if (ASIC_IS_DCE2(rdev)) {
+                       if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
                                drm_object_attach_property(&radeon_connector->base.base,
-                                                             rdev->mode_info.audio_property,
-                                                             RADEON_AUDIO_DISABLE);
+                                                          rdev->mode_info.audio_property,
+                                                          (radeon_audio == 1) ?
+                                                          RADEON_AUDIO_AUTO :
+                                                          RADEON_AUDIO_DISABLE);
                        }
                        if (connector_type == DRM_MODE_CONNECTOR_DVII) {
                                radeon_connector->dac_load_detect = true;
@@ -1799,10 +1804,12 @@ radeon_add_atom_connector(struct drm_device *dev,
                                                              rdev->mode_info.underscan_vborder_property,
                                                              0);
                        }
-                       if (ASIC_IS_DCE2(rdev)) {
+                       if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
                                drm_object_attach_property(&radeon_connector->base.base,
-                                                             rdev->mode_info.audio_property,
-                                                             RADEON_AUDIO_DISABLE);
+                                                          rdev->mode_info.audio_property,
+                                                          (radeon_audio == 1) ?
+                                                          RADEON_AUDIO_AUTO :
+                                                          RADEON_AUDIO_DISABLE);
                        }
                        subpixel_order = SubPixelHorizontalRGB;
                        connector->interlace_allowed = true;
@@ -1843,10 +1850,12 @@ radeon_add_atom_connector(struct drm_device *dev,
                                                              rdev->mode_info.underscan_vborder_property,
                                                              0);
                        }
-                       if (ASIC_IS_DCE2(rdev)) {
+                       if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
                                drm_object_attach_property(&radeon_connector->base.base,
-                                                             rdev->mode_info.audio_property,
-                                                             RADEON_AUDIO_DISABLE);
+                                                          rdev->mode_info.audio_property,
+                                                          (radeon_audio == 1) ?
+                                                          RADEON_AUDIO_AUTO :
+                                                          RADEON_AUDIO_DISABLE);
                        }
                        connector->interlace_allowed = true;
                        /* in theory with a DP to VGA converter... */
index 66c222836631a4bc8dcd12b2f81f6e935218c566..80285e35bc6513fda3d5f5c975399a60feaab1e3 100644 (file)
@@ -85,9 +85,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
                   VRAM, also but everything into VRAM on AGP cards to avoid
                   image corruptions */
                if (p->ring == R600_RING_TYPE_UVD_INDEX &&
-                   p->rdev->family < CHIP_PALM &&
                    (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) {
-
+                       /* TODO: is this still needed for NI+ ? */
                        p->relocs[i].lobj.domain =
                                RADEON_GEM_DOMAIN_VRAM;
 
index cdd12dcd988b1ed3260076b6d984cbf388dc35a7..b01f231c2f1994db73ca01fe1f56fd22801fa111 100644 (file)
@@ -100,7 +100,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
 int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
 void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
 irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
-int radeon_gem_object_init(struct drm_gem_object *obj);
 void radeon_gem_object_free(struct drm_gem_object *obj);
 int radeon_gem_object_open(struct drm_gem_object *obj,
                                struct drm_file *file_priv);
@@ -153,7 +152,7 @@ int radeon_benchmarking = 0;
 int radeon_testing = 0;
 int radeon_connector_table = 0;
 int radeon_tv = 1;
-int radeon_audio = 1;
+int radeon_audio = -1;
 int radeon_disp_priority = 0;
 int radeon_hw_i2c = 0;
 int radeon_pcie_gen2 = -1;
@@ -196,7 +195,7 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
 MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
 module_param_named(tv, radeon_tv, int, 0444);
 
-MODULE_PARM_DESC(audio, "Audio enable (1 = enable)");
+MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
 module_param_named(audio, radeon_audio, int, 0444);
 
 MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
@@ -408,7 +407,6 @@ static struct drm_driver kms_driver = {
        .irq_uninstall = radeon_driver_irq_uninstall_kms,
        .irq_handler = radeon_driver_irq_handler_kms,
        .ioctls = radeon_ioctls_kms,
-       .gem_init_object = radeon_gem_object_init,
        .gem_free_object = radeon_gem_object_free,
        .gem_open_object = radeon_gem_object_open,
        .gem_close_object = radeon_gem_object_close,
index dce99c8a583501e92b72f4012a250a917835a51f..805c5e566b9a1f29539a4cf1148183056dc90579 100644 (file)
 #include <drm/radeon_drm.h>
 #include "radeon.h"
 
-int radeon_gem_object_init(struct drm_gem_object *obj)
-{
-       BUG();
-
-       return 0;
-}
-
 void radeon_gem_object_free(struct drm_gem_object *gobj)
 {
        struct radeon_bo *robj = gem_to_radeon_bo(gobj);
index 61580ddc4eb2375f908394cc1a119d74d8612aa2..d6b36766e8c9229276536d54cba3ab63794b1246 100644 (file)
@@ -191,7 +191,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 
        switch (info->request) {
        case RADEON_INFO_DEVICE_ID:
-               *value = dev->pci_device;
+               *value = dev->pdev->device;
                break;
        case RADEON_INFO_NUM_GB_PIPES:
                *value = rdev->num_gb_pipes;
index 62cd512f5c8d4161ff3708204fe32f560566402e..c89971d904c36696ad17e6ac4ac68f017a81b27c 100644 (file)
@@ -392,7 +392,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
        props.type = BACKLIGHT_RAW;
        snprintf(bl_name, sizeof(bl_name),
                 "radeon_bl%d", dev->primary->index);
-       bd = backlight_device_register(bl_name, &drm_connector->kdev,
+       bd = backlight_device_register(bl_name, drm_connector->kdev,
                                       pdata, &radeon_backlight_ops, &props);
        if (IS_ERR(bd)) {
                DRM_ERROR("Backlight registration failed\n");
index ac07ad1d4f8c903783acc26e3fc8f9f0e3449d44..4f6b7fc7ad3cad3a90017c6d7f75192c4c55452a 100644 (file)
@@ -945,6 +945,8 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
                if (enable) {
                        mutex_lock(&rdev->pm.mutex);
                        rdev->pm.dpm.uvd_active = true;
+                       /* disable this for now */
+#if 0
                        if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
                                dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
                        else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
@@ -954,6 +956,7 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
                        else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
                                dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
                        else
+#endif
                                dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
                        rdev->pm.dpm.state = dpm_state;
                        mutex_unlock(&rdev->pm.mutex);
index f4d6bcee9006451ca377b94ece370806a8f89d2d..12e8099a0823e23a1218c1e5a3f07e78bbfbfcd0 100644 (file)
@@ -36,8 +36,8 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
        struct radeon_bo *vram_obj = NULL;
        struct radeon_bo **gtt_obj = NULL;
        uint64_t gtt_addr, vram_addr;
-       unsigned i, n, size;
-       int r, ring;
+       unsigned n, size;
+       int i, r, ring;
 
        switch (flag) {
        case RADEON_TEST_COPY_DMA:
index a0f11856dddef7067871e630ed2bafcb9a2d4587..308eff5be1b420b74b18ccb1b1e0c89f4bd67f32 100644 (file)
@@ -476,7 +476,8 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
                return -EINVAL;
        }
 
-       if (p->rdev->family < CHIP_PALM && (cmd == 0 || cmd == 0x3) &&
+       /* TODO: is this still necessary on NI+ ? */
+       if ((cmd == 0 || cmd == 0x3) &&
            (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
                DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
                          start, end);
@@ -798,7 +799,8 @@ void radeon_uvd_note_usage(struct radeon_device *rdev)
                    (rdev->pm.dpm.hd != hd)) {
                        rdev->pm.dpm.sd = sd;
                        rdev->pm.dpm.hd = hd;
-                       streams_changed = true;
+                       /* disable this for now */
+                       /*streams_changed = true;*/
                }
        }
 
index c354c1094967990a46caea46612caed123bc9b51..d96f7cbca0a115f58eaeca9df3a6e585d6b3445d 100644 (file)
@@ -85,6 +85,9 @@ extern void si_dma_vm_set_page(struct radeon_device *rdev,
                               uint32_t incr, uint32_t flags);
 static void si_enable_gui_idle_interrupt(struct radeon_device *rdev,
                                         bool enable);
+static void si_fini_pg(struct radeon_device *rdev);
+static void si_fini_cg(struct radeon_device *rdev);
+static void si_rlc_stop(struct radeon_device *rdev);
 
 static const u32 verde_rlc_save_restore_register_list[] =
 {
@@ -1678,6 +1681,7 @@ static int si_init_microcode(struct radeon_device *rdev)
                       fw_name);
                release_firmware(rdev->smc_fw);
                rdev->smc_fw = NULL;
+               err = 0;
        } else if (rdev->smc_fw->size != smc_req_size) {
                printk(KERN_ERR
                       "si_smc: Bogus length %zu in firmware \"%s\"\n",
@@ -3608,6 +3612,13 @@ static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
        dev_info(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
                 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
 
+       /* disable PG/CG */
+       si_fini_pg(rdev);
+       si_fini_cg(rdev);
+
+       /* stop the rlc */
+       si_rlc_stop(rdev);
+
        /* Disable CP parsing/prefetching */
        WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
 
index 9ace28702c761a196089c7e88442e45b08a3ed16..2332aa1bf93c7c40936710c7e8595c6d49f6514b 100644 (file)
@@ -5208,7 +5208,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
                                        table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
                        }
                        j++;
-                       if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+                       if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
                                return -EINVAL;
 
                        if (!pi->mem_gddr5) {
@@ -5218,7 +5218,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
                                        table->mc_reg_table_entry[k].mc_data[j] =
                                                (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
                                j++;
-                               if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+                               if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
                                        return -EINVAL;
                        }
                        break;
@@ -5231,7 +5231,7 @@ static int si_set_mc_special_registers(struct radeon_device *rdev,
                                        (temp_reg & 0xffff0000) |
                                        (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
                        j++;
-                       if (j > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
+                       if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
                                return -EINVAL;
                        break;
                default:
index 52d2ab6b67a0b8876bdfa1710756f582b3927382..7e2e0ea66a008f491f5396bada706ca469b23f0b 100644 (file)
  * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
  */
 #              define PACKET3_CP_DMA_DST_SEL(x)    ((x) << 20)
-                /* 0 - SRC_ADDR
+                /* 0 - DST_ADDR
                 * 1 - GDS
                 */
 #              define PACKET3_CP_DMA_ENGINE(x)     ((x) << 27)
 #              define PACKET3_CP_DMA_CP_SYNC       (1 << 31)
 /* COMMAND */
 #              define PACKET3_CP_DMA_DIS_WC        (1 << 21)
-#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
+#              define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
                 /* 0 - none
                 * 1 - 8 in 16
                 * 2 - 8 in 32
index 7f998bf1cc9dd796b412dae45184dc5f1c76f184..9364129ba292e1b5412a0d10818ed158c95bb0fa 100644 (file)
@@ -1868,7 +1868,7 @@ int trinity_dpm_init(struct radeon_device *rdev)
        for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
                pi->at[i] = TRINITY_AT_DFLT;
 
-       pi->enable_bapm = true;
+       pi->enable_bapm = false;
        pi->enable_nbps_policy = true;
        pi->enable_sclk_ds = true;
        pi->enable_gfx_power_gating = true;
index 3100fa9cb52f4ff5f0b924af6d146f1e06ae6908..7266805d9786c6fe9bfd38501f7ab674f7d05a73 100644 (file)
@@ -212,8 +212,8 @@ int uvd_v1_0_start(struct radeon_device *rdev)
        /* enable VCPU clock */
        WREG32(UVD_VCPU_CNTL,  1 << 9);
 
-       /* enable UMC and NC0 */
-       WREG32_P(UVD_LMI_CTRL2, 1 << 13, ~((1 << 8) | (1 << 13)));
+       /* enable UMC */
+       WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
 
        /* boot up the VCPU */
        WREG32(UVD_SOFT_RESET, 0);
index c590cd9dca0bc5d707bf1c8a2fdce62fb3946d59..d8e835ac2c5eabf65d5cdcd26d27147aaf7dfc8d 100644 (file)
@@ -4,6 +4,7 @@ config DRM_RCAR_DU
        select DRM_KMS_HELPER
        select DRM_KMS_CMA_HELPER
        select DRM_GEM_CMA_HELPER
+       select DRM_KMS_FB_HELPER
        help
          Choose this option if you have an R-Car chipset.
          If M is selected the module will be called rcar-du-drm.
index ca498d151a76597de53a8f96bfe19f2279ac29bb..d1372862d871b083726638a77b7b8d8419287fb4 100644 (file)
@@ -2,6 +2,7 @@ config DRM_SHMOBILE
        tristate "DRM Support for SH Mobile"
        depends on DRM && (ARM || SUPERH)
        select DRM_KMS_HELPER
+       select DRM_KMS_FB_HELPER
        select DRM_KMS_CMA_HELPER
        select DRM_GEM_CMA_HELPER
        help
similarity index 84%
rename from drivers/gpu/host1x/drm/Kconfig
rename to drivers/gpu/drm/tegra/Kconfig
index 69853a4de40aecd85774d234640778646a552b2c..da34dd1b9ddbb062996849a0b7f42b59755aaabc 100644 (file)
@@ -1,7 +1,11 @@
 config DRM_TEGRA
        bool "NVIDIA Tegra DRM"
+       depends on ARCH_TEGRA || ARCH_MULTIPLATFORM
        depends on DRM
+       select TEGRA_HOST1X
        select DRM_KMS_HELPER
+       select DRM_KMS_FB_HELPER
+       select DRM_PANEL
        select FB_SYS_FILLRECT
        select FB_SYS_COPYAREA
        select FB_SYS_IMAGEBLIT
@@ -13,6 +17,11 @@ config DRM_TEGRA
 
 if DRM_TEGRA
 
+config DRM_TEGRA_DEBUG
+       bool "NVIDIA Tegra DRM debug support"
+       help
+         Say yes here to enable debugging support.
+
 config DRM_TEGRA_STAGING
        bool "Enable HOST1X interface"
        depends on STAGING
@@ -21,9 +30,4 @@ config DRM_TEGRA_STAGING
 
          If unsure, choose N.
 
-config DRM_TEGRA_DEBUG
-       bool "NVIDIA Tegra DRM debug support"
-       help
-         Say yes here to enable debugging support.
-
 endif
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
new file mode 100644 (file)
index 0000000..eb5675c
--- /dev/null
@@ -0,0 +1,16 @@
+ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
+
+tegra-drm-y := \
+       bus.o \
+       drm.o \
+       gem.o \
+       fb.o \
+       dc.o \
+       output.o \
+       rgb.o \
+       hdmi.o \
+       dsi.o \
+       gr2d.o \
+       gr3d.o
+
+obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/bus.c b/drivers/gpu/drm/tegra/bus.c
new file mode 100644 (file)
index 0000000..565f8f7
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "drm.h"
+
+static int drm_host1x_set_busid(struct drm_device *dev,
+                               struct drm_master *master)
+{
+       const char *device = dev_name(dev->dev);
+       const char *driver = dev->driver->name;
+       const char *bus = dev->dev->bus->name;
+       int length;
+
+       master->unique_len = strlen(bus) + 1 + strlen(device);
+       master->unique_size = master->unique_len;
+
+       master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
+       if (!master->unique)
+               return -ENOMEM;
+
+       snprintf(master->unique, master->unique_len + 1, "%s:%s", bus, device);
+
+       length = strlen(driver) + 1 + master->unique_len;
+
+       dev->devname = kmalloc(length + 1, GFP_KERNEL);
+       if (!dev->devname)
+               return -ENOMEM;
+
+       snprintf(dev->devname, length + 1, "%s@%s", driver, master->unique);
+
+       return 0;
+}
+
+static struct drm_bus drm_host1x_bus = {
+       .bus_type = DRIVER_BUS_HOST1X,
+       .set_busid = drm_host1x_set_busid,
+};
+
+int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device)
+{
+       struct drm_device *drm;
+       int ret;
+
+       INIT_LIST_HEAD(&driver->device_list);
+       driver->bus = &drm_host1x_bus;
+
+       drm = drm_dev_alloc(driver, &device->dev);
+       if (!drm)
+               return -ENOMEM;
+
+       ret = drm_dev_register(drm, 0);
+       if (ret)
+               goto err_free;
+
+       DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name,
+                driver->major, driver->minor, driver->patchlevel,
+                driver->date, drm->primary->index);
+
+       return 0;
+
+err_free:
+       drm_dev_free(drm);
+       return ret;
+}
+
+void drm_host1x_exit(struct drm_driver *driver, struct host1x_device *device)
+{
+       struct tegra_drm *tegra = dev_get_drvdata(&device->dev);
+
+       drm_put_dev(tegra->drm);
+}
similarity index 93%
rename from drivers/gpu/host1x/drm/dc.c
rename to drivers/gpu/drm/tegra/dc.c
index b1a05ad901c3d4758f4d768efc5b276d2dc4ee8e..ae1cb31ead7e4256c32f6ea045395347780a25a8 100644 (file)
@@ -8,13 +8,9 @@
  */
 
 #include <linux/clk.h>
-#include <linux/debugfs.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
 #include <linux/clk/tegra.h>
+#include <linux/debugfs.h>
 
-#include "host1x_client.h"
 #include "dc.h"
 #include "drm.h"
 #include "gem.h"
@@ -51,6 +47,8 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
        window.dst.h = crtc_h;
        window.format = tegra_dc_format(fb->pixel_format);
        window.bits_per_pixel = fb->bits_per_pixel;
+       window.bottom_up = tegra_fb_is_bottom_up(fb);
+       window.tiled = tegra_fb_is_tiled(fb);
 
        for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
                struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
@@ -97,8 +95,11 @@ static int tegra_plane_disable(struct drm_plane *plane)
 
 static void tegra_plane_destroy(struct drm_plane *plane)
 {
+       struct tegra_plane *p = to_tegra_plane(plane);
+
        tegra_plane_disable(plane);
        drm_plane_cleanup(plane);
+       kfree(p);
 }
 
 static const struct drm_plane_funcs tegra_plane_funcs = {
@@ -124,7 +125,7 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
        for (i = 0; i < 2; i++) {
                struct tegra_plane *plane;
 
-               plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
+               plane = kzalloc(sizeof(*plane), GFP_KERNEL);
                if (!plane)
                        return -ENOMEM;
 
@@ -133,8 +134,10 @@ static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
                err = drm_plane_init(drm, &plane->base, 1 << dc->pipe,
                                     &tegra_plane_funcs, plane_formats,
                                     ARRAY_SIZE(plane_formats), false);
-               if (err < 0)
+               if (err < 0) {
+                       kfree(plane);
                        return err;
+               }
        }
 
        return 0;
@@ -145,6 +148,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
 {
        unsigned int format = tegra_dc_format(fb->pixel_format);
        struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
+       unsigned int h_offset = 0, v_offset = 0;
        unsigned long value;
 
        tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -156,6 +160,32 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
        tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
        tegra_dc_writel(dc, format, DC_WIN_COLOR_DEPTH);
 
+       if (tegra_fb_is_tiled(fb)) {
+               value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
+                       DC_WIN_BUFFER_ADDR_MODE_TILE;
+       } else {
+               value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
+                       DC_WIN_BUFFER_ADDR_MODE_LINEAR;
+       }
+
+       tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+
+       /* make sure bottom-up buffers are properly displayed */
+       if (tegra_fb_is_bottom_up(fb)) {
+               value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+               value |= INVERT_V;
+               tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+
+               v_offset += fb->height - 1;
+       } else {
+               value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
+               value &= ~INVERT_V;
+               tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
+       }
+
+       tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
+       tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
+
        value = GENERAL_UPDATE | WIN_A_UPDATE;
        tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
 
@@ -255,14 +285,26 @@ static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        return 0;
 }
 
+static void drm_crtc_clear(struct drm_crtc *crtc)
+{
+       memset(crtc, 0, sizeof(*crtc));
+}
+
+static void tegra_dc_destroy(struct drm_crtc *crtc)
+{
+       drm_crtc_cleanup(crtc);
+       drm_crtc_clear(crtc);
+}
+
 static const struct drm_crtc_funcs tegra_crtc_funcs = {
        .page_flip = tegra_dc_page_flip,
        .set_config = drm_crtc_helper_set_config,
-       .destroy = drm_crtc_cleanup,
+       .destroy = tegra_dc_destroy,
 };
 
 static void tegra_crtc_disable(struct drm_crtc *crtc)
 {
+       struct tegra_dc *dc = to_tegra_dc(crtc);
        struct drm_device *drm = crtc->dev;
        struct drm_plane *plane;
 
@@ -277,6 +319,8 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
                        }
                }
        }
+
+       drm_vblank_off(drm, dc->pipe);
 }
 
 static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -491,9 +535,22 @@ int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
                tegra_dc_writel(dc, window->stride[0], DC_WIN_LINE_STRIDE);
        }
 
+       if (window->bottom_up)
+               v_offset += window->src.h - 1;
+
        tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
        tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
 
+       if (window->tiled) {
+               value = DC_WIN_BUFFER_ADDR_MODE_TILE_UV |
+                       DC_WIN_BUFFER_ADDR_MODE_TILE;
+       } else {
+               value = DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV |
+                       DC_WIN_BUFFER_ADDR_MODE_LINEAR;
+       }
+
+       tegra_dc_writel(dc, value, DC_WIN_BUFFER_ADDR_MODE);
+
        value = WIN_ENABLE;
 
        if (yuv) {
@@ -512,6 +569,9 @@ int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
                value |= COLOR_EXPAND;
        }
 
+       if (window->bottom_up)
+               value |= INVERT_V;
+
        tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
 
        /*
@@ -1041,30 +1101,30 @@ static int tegra_dc_debugfs_exit(struct tegra_dc *dc)
        return 0;
 }
 
-static int tegra_dc_drm_init(struct host1x_client *client,
-                            struct drm_device *drm)
+static int tegra_dc_init(struct host1x_client *client)
 {
+       struct tegra_drm *tegra = dev_get_drvdata(client->parent);
        struct tegra_dc *dc = host1x_client_to_dc(client);
        int err;
 
-       dc->pipe = drm->mode_config.num_crtc;
+       dc->pipe = tegra->drm->mode_config.num_crtc;
 
-       drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
+       drm_crtc_init(tegra->drm, &dc->base, &tegra_crtc_funcs);
        drm_mode_crtc_set_gamma_size(&dc->base, 256);
        drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
 
-       err = tegra_dc_rgb_init(drm, dc);
+       err = tegra_dc_rgb_init(tegra->drm, dc);
        if (err < 0 && err != -ENODEV) {
                dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
                return err;
        }
 
-       err = tegra_dc_add_planes(drm, dc);
+       err = tegra_dc_add_planes(tegra->drm, dc);
        if (err < 0)
                return err;
 
        if (IS_ENABLED(CONFIG_DEBUG_FS)) {
-               err = tegra_dc_debugfs_init(dc, drm->primary);
+               err = tegra_dc_debugfs_init(dc, tegra->drm->primary);
                if (err < 0)
                        dev_err(dc->dev, "debugfs setup failed: %d\n", err);
        }
@@ -1080,7 +1140,7 @@ static int tegra_dc_drm_init(struct host1x_client *client,
        return 0;
 }
 
-static int tegra_dc_drm_exit(struct host1x_client *client)
+static int tegra_dc_exit(struct host1x_client *client)
 {
        struct tegra_dc *dc = host1x_client_to_dc(client);
        int err;
@@ -1103,13 +1163,12 @@ static int tegra_dc_drm_exit(struct host1x_client *client)
 }
 
 static const struct host1x_client_ops dc_client_ops = {
-       .drm_init = tegra_dc_drm_init,
-       .drm_exit = tegra_dc_drm_exit,
+       .init = tegra_dc_init,
+       .exit = tegra_dc_exit,
 };
 
 static int tegra_dc_probe(struct platform_device *pdev)
 {
-       struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
        struct resource *regs;
        struct tegra_dc *dc;
        int err;
@@ -1153,7 +1212,7 @@ static int tegra_dc_probe(struct platform_device *pdev)
                return err;
        }
 
-       err = host1x_register_client(host1x, &dc->client);
+       err = host1x_client_register(&dc->client);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to register host1x client: %d\n",
                        err);
@@ -1167,17 +1226,22 @@ static int tegra_dc_probe(struct platform_device *pdev)
 
 static int tegra_dc_remove(struct platform_device *pdev)
 {
-       struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
        struct tegra_dc *dc = platform_get_drvdata(pdev);
        int err;
 
-       err = host1x_unregister_client(host1x, &dc->client);
+       err = host1x_client_unregister(&dc->client);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
                        err);
                return err;
        }
 
+       err = tegra_dc_rgb_remove(dc);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to remove RGB output: %d\n", err);
+               return err;
+       }
+
        clk_disable_unprepare(dc->clk);
 
        return 0;
similarity index 98%
rename from drivers/gpu/host1x/drm/dc.h
rename to drivers/gpu/drm/tegra/dc.h
index 79eaec9aac7752a27bddd06a4c08cf7f2292eac8..f1b31206a480aa872301e50cd2ae036755ced147 100644 (file)
 
 #define DC_DISP_DISP_WIN_OPTIONS               0x402
 #define HDMI_ENABLE (1 << 30)
+#define DSI_ENABLE  (1 << 29)
 
 #define DC_DISP_DISP_MEM_HIGH_PRIORITY         0x403
 #define CURSOR_THRESHOLD(x)   (((x) & 0x03) << 24)
 #define DC_WIN_CSC_KVB                         0x618
 
 #define DC_WIN_WIN_OPTIONS                     0x700
+#define INVERT_V     (1 <<  2)
 #define COLOR_EXPAND (1 <<  6)
 #define CSC_ENABLE   (1 << 18)
 #define WIN_ENABLE   (1 << 30)
 #define DC_WIN_BUF_STRIDE                      0x70b
 #define DC_WIN_UV_BUF_STRIDE                   0x70c
 #define DC_WIN_BUFFER_ADDR_MODE                        0x70d
+#define DC_WIN_BUFFER_ADDR_MODE_LINEAR         (0 <<  0)
+#define DC_WIN_BUFFER_ADDR_MODE_TILE           (1 <<  0)
+#define DC_WIN_BUFFER_ADDR_MODE_LINEAR_UV      (0 << 16)
+#define DC_WIN_BUFFER_ADDR_MODE_TILE_UV                (1 << 16)
 #define DC_WIN_DV_CONTROL                      0x70e
 
 #define DC_WIN_BLEND_NOKEY                     0x70f
similarity index 51%
rename from drivers/gpu/host1x/drm/drm.c
rename to drivers/gpu/drm/tegra/drm.c
index 8c61ceeaa12dda8b33cb5d9c52d987e46adb4573..36360923b424d7a70e628ba07074b95b2684c396 100644 (file)
@@ -7,21 +7,10 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/host1x.h>
 
-#include <linux/dma-mapping.h>
-#include <asm/dma-iommu.h>
-
-#include <drm/drm.h>
-#include <drm/drmP.h>
-
-#include "host1x_client.h"
-#include "dev.h"
 #include "drm.h"
 #include "gem.h"
-#include "syncpt.h"
 
 #define DRIVER_NAME "tegra"
 #define DRIVER_DESC "NVIDIA Tegra graphics"
 #define DRIVER_MINOR 0
 #define DRIVER_PATCHLEVEL 0
 
-struct host1x_drm_client {
-       struct host1x_client *client;
-       struct device_node *np;
-       struct list_head list;
+struct tegra_drm_file {
+       struct list_head contexts;
 };
 
-static int host1x_add_drm_client(struct host1x_drm *host1x,
-                                struct device_node *np)
+static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
 {
-       struct host1x_drm_client *client;
+       struct host1x_device *device = to_host1x_device(drm->dev);
+       struct tegra_drm *tegra;
+       int err;
 
-       client = kzalloc(sizeof(*client), GFP_KERNEL);
-       if (!client)
+       tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
+       if (!tegra)
                return -ENOMEM;
 
-       INIT_LIST_HEAD(&client->list);
-       client->np = of_node_get(np);
+       dev_set_drvdata(drm->dev, tegra);
+       mutex_init(&tegra->clients_lock);
+       INIT_LIST_HEAD(&tegra->clients);
+       drm->dev_private = tegra;
+       tegra->drm = drm;
 
-       list_add_tail(&client->list, &host1x->drm_clients);
+       drm_mode_config_init(drm);
 
-       return 0;
-}
+       err = host1x_device_init(device);
+       if (err < 0)
+               return err;
 
-static int host1x_activate_drm_client(struct host1x_drm *host1x,
-                                     struct host1x_drm_client *drm,
-                                     struct host1x_client *client)
-{
-       mutex_lock(&host1x->drm_clients_lock);
-       list_del_init(&drm->list);
-       list_add_tail(&drm->list, &host1x->drm_active);
-       drm->client = client;
-       mutex_unlock(&host1x->drm_clients_lock);
+       /*
+        * We don't use the drm_irq_install() helpers provided by the DRM
+        * core, so we need to set this manually in order to allow the
+        * DRM_IOCTL_WAIT_VBLANK to operate correctly.
+        */
+       drm->irq_enabled = true;
 
-       return 0;
-}
+       err = drm_vblank_init(drm, drm->mode_config.num_crtc);
+       if (err < 0)
+               return err;
 
-static int host1x_remove_drm_client(struct host1x_drm *host1x,
-                                   struct host1x_drm_client *client)
-{
-       mutex_lock(&host1x->drm_clients_lock);
-       list_del_init(&client->list);
-       mutex_unlock(&host1x->drm_clients_lock);
+       err = tegra_drm_fb_init(drm);
+       if (err < 0)
+               return err;
 
-       of_node_put(client->np);
-       kfree(client);
+       drm_kms_helper_poll_init(drm);
 
        return 0;
 }
 
-static int host1x_parse_dt(struct host1x_drm *host1x)
+static int tegra_drm_unload(struct drm_device *drm)
 {
-       static const char * const compat[] = {
-               "nvidia,tegra20-dc",
-               "nvidia,tegra20-hdmi",
-               "nvidia,tegra20-gr2d",
-               "nvidia,tegra30-dc",
-               "nvidia,tegra30-hdmi",
-               "nvidia,tegra30-gr2d",
-       };
-       unsigned int i;
+       struct host1x_device *device = to_host1x_device(drm->dev);
        int err;
 
-       for (i = 0; i < ARRAY_SIZE(compat); i++) {
-               struct device_node *np;
+       drm_kms_helper_poll_fini(drm);
+       tegra_drm_fb_exit(drm);
+       drm_vblank_cleanup(drm);
+       drm_mode_config_cleanup(drm);
 
-               for_each_child_of_node(host1x->dev->of_node, np) {
-                       if (of_device_is_compatible(np, compat[i]) &&
-                           of_device_is_available(np)) {
-                               err = host1x_add_drm_client(host1x, np);
-                               if (err < 0)
-                                       return err;
-                       }
-               }
-       }
+       err = host1x_device_exit(device);
+       if (err < 0)
+               return err;
 
        return 0;
 }
 
-int host1x_drm_alloc(struct platform_device *pdev)
+static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
 {
-       struct host1x_drm *host1x;
-       int err;
+       struct tegra_drm_file *fpriv;
 
-       host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
-       if (!host1x)
+       fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+       if (!fpriv)
                return -ENOMEM;
 
-       mutex_init(&host1x->drm_clients_lock);
-       INIT_LIST_HEAD(&host1x->drm_clients);
-       INIT_LIST_HEAD(&host1x->drm_active);
-       mutex_init(&host1x->clients_lock);
-       INIT_LIST_HEAD(&host1x->clients);
-       host1x->dev = &pdev->dev;
-
-       err = host1x_parse_dt(host1x);
-       if (err < 0) {
-               dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
-               return err;
-       }
-
-       host1x_set_drm_data(&pdev->dev, host1x);
+       INIT_LIST_HEAD(&fpriv->contexts);
+       filp->driver_priv = fpriv;
 
        return 0;
 }
 
-int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm)
+static void tegra_drm_context_free(struct tegra_drm_context *context)
 {
-       struct host1x_client *client;
-
-       mutex_lock(&host1x->clients_lock);
-
-       list_for_each_entry(client, &host1x->clients, list) {
-               if (client->ops && client->ops->drm_init) {
-                       int err = client->ops->drm_init(client, drm);
-                       if (err < 0) {
-                               dev_err(host1x->dev,
-                                       "DRM setup failed for %s: %d\n",
-                                       dev_name(client->dev), err);
-                               mutex_unlock(&host1x->clients_lock);
-                               return err;
-                       }
-               }
-       }
-
-       mutex_unlock(&host1x->clients_lock);
-
-       return 0;
+       context->client->ops->close_channel(context);
+       kfree(context);
 }
 
-int host1x_drm_exit(struct host1x_drm *host1x)
+static void tegra_drm_lastclose(struct drm_device *drm)
 {
-       struct platform_device *pdev = to_platform_device(host1x->dev);
-       struct host1x_client *client;
-
-       if (!host1x->drm)
-               return 0;
+       struct tegra_drm *tegra = drm->dev_private;
 
-       mutex_lock(&host1x->clients_lock);
-
-       list_for_each_entry_reverse(client, &host1x->clients, list) {
-               if (client->ops && client->ops->drm_exit) {
-                       int err = client->ops->drm_exit(client);
-                       if (err < 0) {
-                               dev_err(host1x->dev,
-                                       "DRM cleanup failed for %s: %d\n",
-                                       dev_name(client->dev), err);
-                               mutex_unlock(&host1x->clients_lock);
-                               return err;
-                       }
-               }
-       }
+       tegra_fbdev_restore_mode(tegra->fbdev);
+}
 
-       mutex_unlock(&host1x->clients_lock);
+static struct host1x_bo *
+host1x_bo_lookup(struct drm_device *drm, struct drm_file *file, u32 handle)
+{
+       struct drm_gem_object *gem;
+       struct tegra_bo *bo;
 
-       drm_platform_exit(&tegra_drm_driver, pdev);
-       host1x->drm = NULL;
+       gem = drm_gem_object_lookup(drm, file, handle);
+       if (!gem)
+               return NULL;
 
-       return 0;
-}
+       mutex_lock(&drm->struct_mutex);
+       drm_gem_object_unreference(gem);
+       mutex_unlock(&drm->struct_mutex);
 
-int host1x_register_client(struct host1x_drm *host1x,
-                          struct host1x_client *client)
-{
-       struct host1x_drm_client *drm, *tmp;
+       bo = to_tegra_bo(gem);
+       return &bo->base;
+}
+
+int tegra_drm_submit(struct tegra_drm_context *context,
+                    struct drm_tegra_submit *args, struct drm_device *drm,
+                    struct drm_file *file)
+{
+       unsigned int num_cmdbufs = args->num_cmdbufs;
+       unsigned int num_relocs = args->num_relocs;
+       unsigned int num_waitchks = args->num_waitchks;
+       struct drm_tegra_cmdbuf __user *cmdbufs =
+               (void * __user)(uintptr_t)args->cmdbufs;
+       struct drm_tegra_reloc __user *relocs =
+               (void * __user)(uintptr_t)args->relocs;
+       struct drm_tegra_waitchk __user *waitchks =
+               (void * __user)(uintptr_t)args->waitchks;
+       struct drm_tegra_syncpt syncpt;
+       struct host1x_job *job;
        int err;
 
-       mutex_lock(&host1x->clients_lock);
-       list_add_tail(&client->list, &host1x->clients);
-       mutex_unlock(&host1x->clients_lock);
-
-       list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
-               if (drm->np == client->dev->of_node)
-                       host1x_activate_drm_client(host1x, drm, client);
+       /* We don't yet support other than one syncpt_incr struct per submit */
+       if (args->num_syncpts != 1)
+               return -EINVAL;
 
-       if (list_empty(&host1x->drm_clients)) {
-               struct platform_device *pdev = to_platform_device(host1x->dev);
+       job = host1x_job_alloc(context->channel, args->num_cmdbufs,
+                              args->num_relocs, args->num_waitchks);
+       if (!job)
+               return -ENOMEM;
 
-               err = drm_platform_init(&tegra_drm_driver, pdev);
-               if (err < 0) {
-                       dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
-                       return err;
-               }
-       }
+       job->num_relocs = args->num_relocs;
+       job->num_waitchk = args->num_waitchks;
+       job->client = (u32)args->context;
+       job->class = context->client->base.class;
+       job->serialize = true;
 
-       return 0;
-}
+       while (num_cmdbufs) {
+               struct drm_tegra_cmdbuf cmdbuf;
+               struct host1x_bo *bo;
 
-int host1x_unregister_client(struct host1x_drm *host1x,
-                            struct host1x_client *client)
-{
-       struct host1x_drm_client *drm, *tmp;
-       int err;
+               err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
+               if (err)
+                       goto fail;
 
-       list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
-               if (drm->client == client) {
-                       err = host1x_drm_exit(host1x);
-                       if (err < 0) {
-                               dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
-                                       err);
-                               return err;
-                       }
-
-                       host1x_remove_drm_client(host1x, drm);
-                       break;
+               bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
+               if (!bo) {
+                       err = -ENOENT;
+                       goto fail;
                }
+
+               host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
+               num_cmdbufs--;
+               cmdbufs++;
        }
 
-       mutex_lock(&host1x->clients_lock);
-       list_del_init(&client->list);
-       mutex_unlock(&host1x->clients_lock);
+       err = copy_from_user(job->relocarray, relocs,
+                            sizeof(*relocs) * num_relocs);
+       if (err)
+               goto fail;
 
-       return 0;
-}
+       while (num_relocs--) {
+               struct host1x_reloc *reloc = &job->relocarray[num_relocs];
+               struct host1x_bo *cmdbuf, *target;
 
-static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
-{
-       struct host1x_drm *host1x;
-       int err;
+               cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
+               target = host1x_bo_lookup(drm, file, (u32)reloc->target);
 
-       host1x = host1x_get_drm_data(drm->dev);
-       drm->dev_private = host1x;
-       host1x->drm = drm;
+               reloc->cmdbuf = cmdbuf;
+               reloc->target = target;
 
-       drm_mode_config_init(drm);
+               if (!reloc->target || !reloc->cmdbuf) {
+                       err = -ENOENT;
+                       goto fail;
+               }
+       }
 
-       err = host1x_drm_init(host1x, drm);
-       if (err < 0)
-               return err;
+       err = copy_from_user(job->waitchk, waitchks,
+                            sizeof(*waitchks) * num_waitchks);
+       if (err)
+               goto fail;
 
-       /*
-        * We don't use the drm_irq_install() helpers provided by the DRM
-        * core, so we need to set this manually in order to allow the
-        * DRM_IOCTL_WAIT_VBLANK to operate correctly.
-        */
-       drm->irq_enabled = 1;
+       err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
+                            sizeof(syncpt));
+       if (err)
+               goto fail;
 
-       err = drm_vblank_init(drm, drm->mode_config.num_crtc);
-       if (err < 0)
-               return err;
+       job->is_addr_reg = context->client->ops->is_addr_reg;
+       job->syncpt_incrs = syncpt.incrs;
+       job->syncpt_id = syncpt.id;
+       job->timeout = 10000;
 
-       err = tegra_drm_fb_init(drm);
-       if (err < 0)
-               return err;
+       if (args->timeout && args->timeout < 10000)
+               job->timeout = args->timeout;
 
-       drm_kms_helper_poll_init(drm);
+       err = host1x_job_pin(job, context->client->base.dev);
+       if (err)
+               goto fail;
 
-       return 0;
-}
+       err = host1x_job_submit(job);
+       if (err)
+               goto fail_submit;
 
-static int tegra_drm_unload(struct drm_device *drm)
-{
-       drm_kms_helper_poll_fini(drm);
-       tegra_drm_fb_exit(drm);
-
-       drm_mode_config_cleanup(drm);
+       args->fence = job->syncpt_end;
 
+       host1x_job_put(job);
        return 0;
-}
-
-static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
-{
-       struct host1x_drm_file *fpriv;
-
-       fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
-       if (!fpriv)
-               return -ENOMEM;
-
-       INIT_LIST_HEAD(&fpriv->contexts);
-       filp->driver_priv = fpriv;
 
-       return 0;
+fail_submit:
+       host1x_job_unpin(job);
+fail:
+       host1x_job_put(job);
+       return err;
 }
 
-static void host1x_drm_context_free(struct host1x_drm_context *context)
-{
-       context->client->ops->close_channel(context);
-       kfree(context);
-}
 
-static void tegra_drm_lastclose(struct drm_device *drm)
+#ifdef CONFIG_DRM_TEGRA_STAGING
+static struct tegra_drm_context *tegra_drm_get_context(__u64 context)
 {
-       struct host1x_drm *host1x = drm->dev_private;
-
-       tegra_fbdev_restore_mode(host1x->fbdev);
+       return (struct tegra_drm_context *)(uintptr_t)context;
 }
 
-#ifdef CONFIG_DRM_TEGRA_STAGING
-static bool host1x_drm_file_owns_context(struct host1x_drm_file *file,
-                                        struct host1x_drm_context *context)
+static bool tegra_drm_file_owns_context(struct tegra_drm_file *file,
+                                       struct tegra_drm_context *context)
 {
-       struct host1x_drm_context *ctx;
+       struct tegra_drm_context *ctx;
 
        list_for_each_entry(ctx, &file->contexts, list)
                if (ctx == context)
@@ -335,7 +262,7 @@ static int tegra_gem_create(struct drm_device *drm, void *data,
        struct drm_tegra_gem_create *args = data;
        struct tegra_bo *bo;
 
-       bo = tegra_bo_create_with_handle(file, drm, args->size,
+       bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
                                         &args->handle);
        if (IS_ERR(bo))
                return PTR_ERR(bo);
@@ -366,10 +293,11 @@ static int tegra_gem_mmap(struct drm_device *drm, void *data,
 static int tegra_syncpt_read(struct drm_device *drm, void *data,
                             struct drm_file *file)
 {
+       struct host1x *host = dev_get_drvdata(drm->dev->parent);
        struct drm_tegra_syncpt_read *args = data;
-       struct host1x *host = dev_get_drvdata(drm->dev);
-       struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
+       struct host1x_syncpt *sp;
 
+       sp = host1x_syncpt_get(host, args->id);
        if (!sp)
                return -EINVAL;
 
@@ -380,10 +308,11 @@ static int tegra_syncpt_read(struct drm_device *drm, void *data,
 static int tegra_syncpt_incr(struct drm_device *drm, void *data,
                             struct drm_file *file)
 {
+       struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
        struct drm_tegra_syncpt_incr *args = data;
-       struct host1x *host = dev_get_drvdata(drm->dev);
-       struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
+       struct host1x_syncpt *sp;
 
+       sp = host1x_syncpt_get(host1x, args->id);
        if (!sp)
                return -EINVAL;
 
@@ -393,10 +322,11 @@ static int tegra_syncpt_incr(struct drm_device *drm, void *data,
 static int tegra_syncpt_wait(struct drm_device *drm, void *data,
                             struct drm_file *file)
 {
+       struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
        struct drm_tegra_syncpt_wait *args = data;
-       struct host1x *host = dev_get_drvdata(drm->dev);
-       struct host1x_syncpt *sp = host1x_syncpt_get(host, args->id);
+       struct host1x_syncpt *sp;
 
+       sp = host1x_syncpt_get(host1x, args->id);
        if (!sp)
                return -EINVAL;
 
@@ -407,26 +337,26 @@ static int tegra_syncpt_wait(struct drm_device *drm, void *data,
 static int tegra_open_channel(struct drm_device *drm, void *data,
                              struct drm_file *file)
 {
+       struct tegra_drm_file *fpriv = file->driver_priv;
+       struct tegra_drm *tegra = drm->dev_private;
        struct drm_tegra_open_channel *args = data;
-       struct host1x_client *client;
-       struct host1x_drm_context *context;
-       struct host1x_drm_file *fpriv = file->driver_priv;
-       struct host1x_drm *host1x = drm->dev_private;
+       struct tegra_drm_context *context;
+       struct tegra_drm_client *client;
        int err = -ENODEV;
 
        context = kzalloc(sizeof(*context), GFP_KERNEL);
        if (!context)
                return -ENOMEM;
 
-       list_for_each_entry(client, &host1x->clients, list)
-               if (client->class == args->client) {
+       list_for_each_entry(client, &tegra->clients, list)
+               if (client->base.class == args->client) {
                        err = client->ops->open_channel(client, context);
                        if (err)
                                break;
 
-                       context->client = client;
                        list_add(&context->list, &fpriv->contexts);
                        args->context = (uintptr_t)context;
+                       context->client = client;
                        return 0;
                }
 
@@ -437,16 +367,17 @@ static int tegra_open_channel(struct drm_device *drm, void *data,
 static int tegra_close_channel(struct drm_device *drm, void *data,
                               struct drm_file *file)
 {
+       struct tegra_drm_file *fpriv = file->driver_priv;
        struct drm_tegra_close_channel *args = data;
-       struct host1x_drm_file *fpriv = file->driver_priv;
-       struct host1x_drm_context *context =
-               (struct host1x_drm_context *)(uintptr_t)args->context;
+       struct tegra_drm_context *context;
+
+       context = tegra_drm_get_context(args->context);
 
-       if (!host1x_drm_file_owns_context(fpriv, context))
+       if (!tegra_drm_file_owns_context(fpriv, context))
                return -EINVAL;
 
        list_del(&context->list);
-       host1x_drm_context_free(context);
+       tegra_drm_context_free(context);
 
        return 0;
 }
@@ -454,19 +385,20 @@ static int tegra_close_channel(struct drm_device *drm, void *data,
 static int tegra_get_syncpt(struct drm_device *drm, void *data,
                            struct drm_file *file)
 {
+       struct tegra_drm_file *fpriv = file->driver_priv;
        struct drm_tegra_get_syncpt *args = data;
-       struct host1x_drm_file *fpriv = file->driver_priv;
-       struct host1x_drm_context *context =
-               (struct host1x_drm_context *)(uintptr_t)args->context;
+       struct tegra_drm_context *context;
        struct host1x_syncpt *syncpt;
 
-       if (!host1x_drm_file_owns_context(fpriv, context))
+       context = tegra_drm_get_context(args->context);
+
+       if (!tegra_drm_file_owns_context(fpriv, context))
                return -ENODEV;
 
-       if (args->index >= context->client->num_syncpts)
+       if (args->index >= context->client->base.num_syncpts)
                return -EINVAL;
 
-       syncpt = context->client->syncpts[args->index];
+       syncpt = context->client->base.syncpts[args->index];
        args->id = host1x_syncpt_id(syncpt);
 
        return 0;
@@ -475,12 +407,13 @@ static int tegra_get_syncpt(struct drm_device *drm, void *data,
 static int tegra_submit(struct drm_device *drm, void *data,
                        struct drm_file *file)
 {
+       struct tegra_drm_file *fpriv = file->driver_priv;
        struct drm_tegra_submit *args = data;
-       struct host1x_drm_file *fpriv = file->driver_priv;
-       struct host1x_drm_context *context =
-               (struct host1x_drm_context *)(uintptr_t)args->context;
+       struct tegra_drm_context *context;
+
+       context = tegra_drm_get_context(args->context);
 
-       if (!host1x_drm_file_owns_context(fpriv, context))
+       if (!tegra_drm_file_owns_context(fpriv, context))
                return -ENODEV;
 
        return context->client->ops->submit(context, args, drm, file);
@@ -559,15 +492,15 @@ static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
 
 static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
 {
-       struct host1x_drm_file *fpriv = file->driver_priv;
-       struct host1x_drm_context *context, *tmp;
+       struct tegra_drm_file *fpriv = file->driver_priv;
+       struct tegra_drm_context *context, *tmp;
        struct drm_crtc *crtc;
 
        list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
                tegra_dc_cancel_page_flip(crtc, file);
 
        list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
-               host1x_drm_context_free(context);
+               tegra_drm_context_free(context);
 
        kfree(fpriv);
 }
@@ -645,3 +578,116 @@ struct drm_driver tegra_drm_driver = {
        .minor = DRIVER_MINOR,
        .patchlevel = DRIVER_PATCHLEVEL,
 };
+
+int tegra_drm_register_client(struct tegra_drm *tegra,
+                             struct tegra_drm_client *client)
+{
+       mutex_lock(&tegra->clients_lock);
+       list_add_tail(&client->list, &tegra->clients);
+       mutex_unlock(&tegra->clients_lock);
+
+       return 0;
+}
+
+int tegra_drm_unregister_client(struct tegra_drm *tegra,
+                               struct tegra_drm_client *client)
+{
+       mutex_lock(&tegra->clients_lock);
+       list_del_init(&client->list);
+       mutex_unlock(&tegra->clients_lock);
+
+       return 0;
+}
+
+static int host1x_drm_probe(struct host1x_device *device)
+{
+       return drm_host1x_init(&tegra_drm_driver, device);
+}
+
+static int host1x_drm_remove(struct host1x_device *device)
+{
+       drm_host1x_exit(&tegra_drm_driver, device);
+
+       return 0;
+}
+
+static const struct of_device_id host1x_drm_subdevs[] = {
+       { .compatible = "nvidia,tegra20-dc", },
+       { .compatible = "nvidia,tegra20-hdmi", },
+       { .compatible = "nvidia,tegra20-gr2d", },
+       { .compatible = "nvidia,tegra20-gr3d", },
+       { .compatible = "nvidia,tegra30-dc", },
+       { .compatible = "nvidia,tegra30-hdmi", },
+       { .compatible = "nvidia,tegra30-gr2d", },
+       { .compatible = "nvidia,tegra30-gr3d", },
+       { .compatible = "nvidia,tegra114-dsi", },
+       { .compatible = "nvidia,tegra114-hdmi", },
+       { .compatible = "nvidia,tegra114-gr3d", },
+       { /* sentinel */ }
+};
+
+static struct host1x_driver host1x_drm_driver = {
+       .name = "drm",
+       .probe = host1x_drm_probe,
+       .remove = host1x_drm_remove,
+       .subdevs = host1x_drm_subdevs,
+};
+
+static int __init host1x_drm_init(void)
+{
+       int err;
+
+       err = host1x_driver_register(&host1x_drm_driver);
+       if (err < 0)
+               return err;
+
+       err = platform_driver_register(&tegra_dc_driver);
+       if (err < 0)
+               goto unregister_host1x;
+
+       err = platform_driver_register(&tegra_dsi_driver);
+       if (err < 0)
+               goto unregister_dc;
+
+       err = platform_driver_register(&tegra_hdmi_driver);
+       if (err < 0)
+               goto unregister_dsi;
+
+       err = platform_driver_register(&tegra_gr2d_driver);
+       if (err < 0)
+               goto unregister_hdmi;
+
+       err = platform_driver_register(&tegra_gr3d_driver);
+       if (err < 0)
+               goto unregister_gr2d;
+
+       return 0;
+
+unregister_gr2d:
+       platform_driver_unregister(&tegra_gr2d_driver);
+unregister_hdmi:
+       platform_driver_unregister(&tegra_hdmi_driver);
+unregister_dsi:
+       platform_driver_unregister(&tegra_dsi_driver);
+unregister_dc:
+       platform_driver_unregister(&tegra_dc_driver);
+unregister_host1x:
+       host1x_driver_unregister(&host1x_drm_driver);
+       return err;
+}
+module_init(host1x_drm_init);
+
+static void __exit host1x_drm_exit(void)
+{
+       platform_driver_unregister(&tegra_gr3d_driver);
+       platform_driver_unregister(&tegra_gr2d_driver);
+       platform_driver_unregister(&tegra_hdmi_driver);
+       platform_driver_unregister(&tegra_dsi_driver);
+       platform_driver_unregister(&tegra_dc_driver);
+       host1x_driver_unregister(&host1x_drm_driver);
+}
+module_exit(host1x_drm_exit);
+
+MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
+MODULE_LICENSE("GPL v2");
similarity index 71%
rename from drivers/gpu/host1x/drm/drm.h
rename to drivers/gpu/drm/tegra/drm.h
index 02ce020f25754633bca0f9489e6e29c85ec9eea2..7070ec44a797df391e37f4940b4adc6ecdeb6423 100644 (file)
 #ifndef HOST1X_DRM_H
 #define HOST1X_DRM_H 1
 
+#include <uapi/drm/tegra_drm.h>
+#include <linux/host1x.h>
+
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_fixed.h>
-#include <uapi/drm/tegra_drm.h>
-
-#include "host1x.h"
 
 struct tegra_fb {
        struct drm_framebuffer base;
@@ -30,17 +30,8 @@ struct tegra_fbdev {
        struct tegra_fb *fb;
 };
 
-struct host1x_drm {
+struct tegra_drm {
        struct drm_device *drm;
-       struct device *dev;
-       void __iomem *regs;
-       struct clk *clk;
-       int syncpt;
-       int irq;
-
-       struct mutex drm_clients_lock;
-       struct list_head drm_clients;
-       struct list_head drm_active;
 
        struct mutex clients_lock;
        struct list_head clients;
@@ -48,66 +39,60 @@ struct host1x_drm {
        struct tegra_fbdev *fbdev;
 };
 
-struct host1x_client;
+struct tegra_drm_client;
 
-struct host1x_drm_context {
-       struct host1x_client *client;
+struct tegra_drm_context {
+       struct tegra_drm_client *client;
        struct host1x_channel *channel;
        struct list_head list;
 };
 
-struct host1x_client_ops {
-       int (*drm_init)(struct host1x_client *client, struct drm_device *drm);
-       int (*drm_exit)(struct host1x_client *client);
-       int (*open_channel)(struct host1x_client *client,
-                           struct host1x_drm_context *context);
-       void (*close_channel)(struct host1x_drm_context *context);
-       int (*submit)(struct host1x_drm_context *context,
+struct tegra_drm_client_ops {
+       int (*open_channel)(struct tegra_drm_client *client,
+                           struct tegra_drm_context *context);
+       void (*close_channel)(struct tegra_drm_context *context);
+       int (*is_addr_reg)(struct device *dev, u32 class, u32 offset);
+       int (*submit)(struct tegra_drm_context *context,
                      struct drm_tegra_submit *args, struct drm_device *drm,
                      struct drm_file *file);
 };
 
-struct host1x_drm_file {
-       struct list_head contexts;
-};
-
-struct host1x_client {
-       struct host1x_drm *host1x;
-       struct device *dev;
-
-       const struct host1x_client_ops *ops;
-
-       enum host1x_class class;
-       struct host1x_channel *channel;
-
-       struct host1x_syncpt **syncpts;
-       unsigned int num_syncpts;
+int tegra_drm_submit(struct tegra_drm_context *context,
+                    struct drm_tegra_submit *args, struct drm_device *drm,
+                    struct drm_file *file);
 
+struct tegra_drm_client {
+       struct host1x_client base;
        struct list_head list;
+
+       const struct tegra_drm_client_ops *ops;
 };
 
-extern int host1x_drm_init(struct host1x_drm *host1x, struct drm_device *drm);
-extern int host1x_drm_exit(struct host1x_drm *host1x);
+static inline struct tegra_drm_client *
+host1x_to_drm_client(struct host1x_client *client)
+{
+       return container_of(client, struct tegra_drm_client, base);
+}
+
+extern int tegra_drm_register_client(struct tegra_drm *tegra,
+                                    struct tegra_drm_client *client);
+extern int tegra_drm_unregister_client(struct tegra_drm *tegra,
+                                      struct tegra_drm_client *client);
 
-extern int host1x_register_client(struct host1x_drm *host1x,
-                                 struct host1x_client *client);
-extern int host1x_unregister_client(struct host1x_drm *host1x,
-                                   struct host1x_client *client);
+extern int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
+extern int tegra_drm_exit(struct tegra_drm *tegra);
 
 struct tegra_output;
 
 struct tegra_dc {
        struct host1x_client client;
-       spinlock_t lock;
-
-       struct host1x_drm *host1x;
        struct device *dev;
+       spinlock_t lock;
 
        struct drm_crtc base;
        int pipe;
 
        struct clk *clk;
-
        void __iomem *regs;
        int irq;
 
@@ -123,7 +108,8 @@ struct tegra_dc {
        struct drm_pending_vblank_event *event;
 };
 
-static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client)
+static inline struct tegra_dc *
+host1x_client_to_dc(struct host1x_client *client)
 {
        return container_of(client, struct tegra_dc, client);
 }
@@ -162,6 +148,8 @@ struct tegra_dc_window {
        unsigned int format;
        unsigned int stride[2];
        unsigned long base[3];
+       bool bottom_up;
+       bool tiled;
 };
 
 /* from dc.c */
@@ -186,6 +174,7 @@ struct tegra_output_ops {
 enum tegra_output_type {
        TEGRA_OUTPUT_RGB,
        TEGRA_OUTPUT_HDMI,
+       TEGRA_OUTPUT_DSI,
 };
 
 struct tegra_output {
@@ -195,6 +184,7 @@ struct tegra_output {
        const struct tegra_output_ops *ops;
        enum tegra_output_type type;
 
+       struct drm_panel *panel;
        struct i2c_adapter *ddc;
        const struct edid *edid;
        unsigned int hpd_irq;
@@ -249,23 +239,35 @@ static inline int tegra_output_check_mode(struct tegra_output *output,
        return output ? -ENOSYS : -EINVAL;
 }
 
+/* from bus.c */
+int drm_host1x_init(struct drm_driver *driver, struct host1x_device *device);
+void drm_host1x_exit(struct drm_driver *driver, struct host1x_device *device);
+
 /* from rgb.c */
 extern int tegra_dc_rgb_probe(struct tegra_dc *dc);
+extern int tegra_dc_rgb_remove(struct tegra_dc *dc);
 extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
 extern int tegra_dc_rgb_exit(struct tegra_dc *dc);
 
 /* from output.c */
-extern int tegra_output_parse_dt(struct tegra_output *output);
+extern int tegra_output_probe(struct tegra_output *output);
+extern int tegra_output_remove(struct tegra_output *output);
 extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
 extern int tegra_output_exit(struct tegra_output *output);
 
 /* from fb.c */
 struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
                                    unsigned int index);
+bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
+bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer);
 extern int tegra_drm_fb_init(struct drm_device *drm);
 extern void tegra_drm_fb_exit(struct drm_device *drm);
 extern void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev);
 
-extern struct drm_driver tegra_drm_driver;
+extern struct platform_driver tegra_dc_driver;
+extern struct platform_driver tegra_dsi_driver;
+extern struct platform_driver tegra_hdmi_driver;
+extern struct platform_driver tegra_gr2d_driver;
+extern struct platform_driver tegra_gr3d_driver;
 
 #endif /* HOST1X_DRM_H */
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
new file mode 100644 (file)
index 0000000..1cfbace
--- /dev/null
@@ -0,0 +1,1027 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk/tegra.h>
+#include <linux/debugfs.h>
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "drm.h"
+#include "dc.h"
+
+#define DSI_INCR_SYNCPT                        0x00
+#define DSI_INCR_SYNCPT_CONTROL                0x01
+#define DSI_INCR_SYNCPT_ERROR          0x02
+#define DSI_CTXSW                      0x08
+#define DSI_RD_DATA                    0x09
+#define DSI_WR_DATA                    0x0a
+#define DSI_POWER_CONTROL              0x0b
+#define DSI_POWER_CONTROL_ENABLE       (1 << 0)
+#define DSI_INT_ENABLE                 0x0c
+#define DSI_INT_STATUS                 0x0d
+#define DSI_INT_MASK                   0x0e
+#define DSI_HOST_CONTROL               0x0f
+#define DSI_HOST_CONTROL_RAW           (1 << 6)
+#define DSI_HOST_CONTROL_HS            (1 << 5)
+#define DSI_HOST_CONTROL_CS            (1 << 1)
+#define DSI_HOST_CONTROL_ECC           (1 << 0)
+#define DSI_CONTROL                    0x10
+#define DSI_CONTROL_HS_CLK_CTRL                (1 << 20)
+#define DSI_CONTROL_CHANNEL(c)         (((c) & 0x3) << 16)
+#define DSI_CONTROL_FORMAT(f)          (((f) & 0x3) << 12)
+#define DSI_CONTROL_TX_TRIG(x)         (((x) & 0x3) <<  8)
+#define DSI_CONTROL_LANES(n)           (((n) & 0x3) <<  4)
+#define DSI_CONTROL_DCS_ENABLE         (1 << 3)
+#define DSI_CONTROL_SOURCE(s)          (((s) & 0x1) <<  2)
+#define DSI_CONTROL_VIDEO_ENABLE       (1 << 1)
+#define DSI_CONTROL_HOST_ENABLE                (1 << 0)
+#define DSI_SOL_DELAY                  0x11
+#define DSI_MAX_THRESHOLD              0x12
+#define DSI_TRIGGER                    0x13
+#define DSI_TX_CRC                     0x14
+#define DSI_STATUS                     0x15
+#define DSI_INIT_SEQ_CONTROL           0x1a
+#define DSI_INIT_SEQ_DATA_0            0x1b
+#define DSI_INIT_SEQ_DATA_1            0x1c
+#define DSI_INIT_SEQ_DATA_2            0x1d
+#define DSI_INIT_SEQ_DATA_3            0x1e
+#define DSI_INIT_SEQ_DATA_4            0x1f
+#define DSI_INIT_SEQ_DATA_5            0x20
+#define DSI_INIT_SEQ_DATA_6            0x21
+#define DSI_INIT_SEQ_DATA_7            0x22
+#define DSI_PKT_SEQ_0_LO               0x23
+#define DSI_PKT_SEQ_0_HI               0x24
+#define DSI_PKT_SEQ_1_LO               0x25
+#define DSI_PKT_SEQ_1_HI               0x26
+#define DSI_PKT_SEQ_2_LO               0x27
+#define DSI_PKT_SEQ_2_HI               0x28
+#define DSI_PKT_SEQ_3_LO               0x29
+#define DSI_PKT_SEQ_3_HI               0x2a
+#define DSI_PKT_SEQ_4_LO               0x2b
+#define DSI_PKT_SEQ_4_HI               0x2c
+#define DSI_PKT_SEQ_5_LO               0x2d
+#define DSI_PKT_SEQ_5_HI               0x2e
+#define DSI_DCS_CMDS                   0x33
+#define DSI_PKT_LEN_0_1                        0x34
+#define DSI_PKT_LEN_2_3                        0x35
+#define DSI_PKT_LEN_4_5                        0x36
+#define DSI_PKT_LEN_6_7                        0x37
+#define DSI_PHY_TIMING_0               0x3c
+#define DSI_PHY_TIMING_1               0x3d
+#define DSI_PHY_TIMING_2               0x3e
+#define DSI_BTA_TIMING                 0x3f
+
+#define DSI_TIMING_FIELD(value, period, hwinc) \
+       ((DIV_ROUND_CLOSEST(value, period) - (hwinc)) & 0xff)
+
+#define DSI_TIMEOUT_0                  0x44
+#define DSI_TIMEOUT_LRX(x)             (((x) & 0xffff) << 16)
+#define DSI_TIMEOUT_HTX(x)             (((x) & 0xffff) <<  0)
+#define DSI_TIMEOUT_1                  0x45
+#define DSI_TIMEOUT_PR(x)              (((x) & 0xffff) << 16)
+#define DSI_TIMEOUT_TA(x)              (((x) & 0xffff) <<  0)
+#define DSI_TO_TALLY                   0x46
+#define DSI_TALLY_TA(x)                        (((x) & 0xff) << 16)
+#define DSI_TALLY_LRX(x)               (((x) & 0xff) <<  8)
+#define DSI_TALLY_HTX(x)               (((x) & 0xff) <<  0)
+#define DSI_PAD_CONTROL_0              0x4b
+#define DSI_PAD_CONTROL_VS1_PDIO(x)    (((x) & 0xf) <<  0)
+#define DSI_PAD_CONTROL_VS1_PDIO_CLK   (1 <<  8)
+#define DSI_PAD_CONTROL_VS1_PULLDN(x)  (((x) & 0xf) << 16)
+#define DSI_PAD_CONTROL_VS1_PULLDN_CLK (1 << 24)
+#define DSI_PAD_CONTROL_CD             0x4c
+#define DSI_PAD_CD_STATUS              0x4d
+#define DSI_VIDEO_MODE_CONTROL         0x4e
+#define DSI_PAD_CONTROL_1              0x4f
+#define DSI_PAD_CONTROL_2              0x50
+#define DSI_PAD_OUT_CLK(x)             (((x) & 0x7) <<  0)
+#define DSI_PAD_LP_DN(x)               (((x) & 0x7) <<  4)
+#define DSI_PAD_LP_UP(x)               (((x) & 0x7) <<  8)
+#define DSI_PAD_SLEW_DN(x)             (((x) & 0x7) << 12)
+#define DSI_PAD_SLEW_UP(x)             (((x) & 0x7) << 16)
+#define DSI_PAD_CONTROL_3              0x51
+#define DSI_PAD_CONTROL_4              0x52
+#define DSI_GANGED_MODE_CONTROL                0x53
+#define DSI_GANGED_MODE_START          0x54
+#define DSI_GANGED_MODE_SIZE           0x55
+#define DSI_RAW_DATA_BYTE_COUNT                0x56
+#define DSI_ULTRA_LOW_POWER_CONTROL    0x57
+#define DSI_INIT_SEQ_DATA_8            0x58
+#define DSI_INIT_SEQ_DATA_9            0x59
+#define DSI_INIT_SEQ_DATA_10           0x5a
+#define DSI_INIT_SEQ_DATA_11           0x5b
+#define DSI_INIT_SEQ_DATA_12           0x5c
+#define DSI_INIT_SEQ_DATA_13           0x5d
+#define DSI_INIT_SEQ_DATA_14           0x5e
+#define DSI_INIT_SEQ_DATA_15           0x5f
+
+enum dsi_format {
+       DSI_FORMAT_16P,
+       DSI_FORMAT_18NP,
+       DSI_FORMAT_18P,
+       DSI_FORMAT_24P,
+};
+
+/*
+ * D-PHY timing parameters
+ *
+ * A detailed description of these parameters can be found in the  MIPI
+ * Alliance Specification for D-PHY, Section 5.9 "Global Operation Timing
+ * Parameters".
+ *
+ * All parameters are specified in nanoseconds.
+ */
+struct dsi_phy_timing {
+       unsigned int clkmiss;
+       unsigned int clkpost;
+       unsigned int clkpre;
+       unsigned int clkprepare;
+       unsigned int clksettle;
+       unsigned int clktermen;
+       unsigned int clktrail;
+       unsigned int clkzero;
+       unsigned int dtermen;
+       unsigned int eot;
+       unsigned int hsexit;
+       unsigned int hsprepare;
+       unsigned int hszero;
+       unsigned int hssettle;
+       unsigned int hsskip;
+       unsigned int hstrail;
+       unsigned int init;
+       unsigned int lpx;
+       unsigned int taget;
+       unsigned int tago;
+       unsigned int tasure;
+       unsigned int wakeup;
+};
+
+/*
+ * Default D-PHY timings based on MIPI D-PHY specification. Derived from
+ * the valid ranges specified in Section 5.9 of the D-PHY specification
+ * with minor adjustments.
+ */
+static int dsi_phy_timing_get_default(struct dsi_phy_timing *timing,
+                                     unsigned long period)
+{
+       timing->clkmiss = 0;
+       timing->clkpost = 70 + 52 * period;
+       timing->clkpre = 8;
+       timing->clkprepare = 65;
+       timing->clksettle = 95;
+       timing->clktermen = 0;
+       timing->clktrail = 80;
+       timing->clkzero = 260;
+       timing->dtermen = 0;
+       timing->eot = 0;
+       timing->hsexit = 120;
+       timing->hsprepare = 65 + 5 * period;
+       timing->hszero = 145 + 5 * period;
+       timing->hssettle = 85 + 6 * period;
+       timing->hsskip = 40;
+       timing->hstrail = max(8 * period, 60 + 4 * period);
+       timing->init = 100000;
+       timing->lpx = 60;
+       timing->taget = 5 * timing->lpx;
+       timing->tago = 4 * timing->lpx;
+       timing->tasure = 2 * timing->lpx;
+       timing->wakeup = 1000000;
+
+       return 0;
+}
+
+/*
+ * Validate D-PHY timing according to MIPI Alliance Specification for D-PHY,
+ * Section 5.9 "Global Operation Timing Parameters".
+ */
+static int dsi_phy_timing_validate(struct dsi_phy_timing *timing,
+                                  unsigned long period)
+{
+       if (timing->clkmiss > 60)
+               return -EINVAL;
+
+       if (timing->clkpost < (60 + 52 * period))
+               return -EINVAL;
+
+       if (timing->clkpre < 8)
+               return -EINVAL;
+
+       if (timing->clkprepare < 38 || timing->clkprepare > 95)
+               return -EINVAL;
+
+       if (timing->clksettle < 95 || timing->clksettle > 300)
+               return -EINVAL;
+
+       if (timing->clktermen > 38)
+               return -EINVAL;
+
+       if (timing->clktrail < 60)
+               return -EINVAL;
+
+       if (timing->clkprepare + timing->clkzero < 300)
+               return -EINVAL;
+
+       if (timing->dtermen > 35 + 4 * period)
+               return -EINVAL;
+
+       if (timing->eot > 105 + 12 * period)
+               return -EINVAL;
+
+       if (timing->hsexit < 100)
+               return -EINVAL;
+
+       if (timing->hsprepare < 40 + 4 * period ||
+           timing->hsprepare > 85 + 6 * period)
+               return -EINVAL;
+
+       if (timing->hsprepare + timing->hszero < 145 + 10 * period)
+               return -EINVAL;
+
+       if ((timing->hssettle < 85 + 6 * period) ||
+           (timing->hssettle > 145 + 10 * period))
+               return -EINVAL;
+
+       if (timing->hsskip < 40 || timing->hsskip > 55 + 4 * period)
+               return -EINVAL;
+
+       if (timing->hstrail < max(8 * period, 60 + 4 * period))
+               return -EINVAL;
+
+       if (timing->init < 100000)
+               return -EINVAL;
+
+       if (timing->lpx < 50)
+               return -EINVAL;
+
+       if (timing->taget != 5 * timing->lpx)
+               return -EINVAL;
+
+       if (timing->tago != 4 * timing->lpx)
+               return -EINVAL;
+
+       if (timing->tasure < timing->lpx || timing->tasure > 2 * timing->lpx)
+               return -EINVAL;
+
+       if (timing->wakeup < 1000000)
+               return -EINVAL;
+
+       return 0;
+}
+
+struct tegra_dsi {
+       struct host1x_client client;
+       struct tegra_output output;
+       struct device *dev;
+
+       void __iomem *regs;
+
+       struct clk *clk_parent;
+       struct clk *clk;
+
+       struct drm_info_list *debugfs_files;
+       struct drm_minor *minor;
+       struct dentry *debugfs;
+
+       enum dsi_format format;
+       unsigned int lanes;
+};
+
+static inline struct tegra_dsi *
+host1x_client_to_dsi(struct host1x_client *client)
+{
+       return container_of(client, struct tegra_dsi, client);
+}
+
+static inline struct tegra_dsi *to_dsi(struct tegra_output *output)
+{
+       return container_of(output, struct tegra_dsi, output);
+}
+
+static inline unsigned long tegra_dsi_readl(struct tegra_dsi *dsi,
+                                           unsigned long reg)
+{
+       return readl(dsi->regs + (reg << 2));
+}
+
+static inline void tegra_dsi_writel(struct tegra_dsi *dsi, unsigned long value,
+                                   unsigned long reg)
+{
+       writel(value, dsi->regs + (reg << 2));
+}
+
+static int tegra_dsi_show_regs(struct seq_file *s, void *data)
+{
+       struct drm_info_node *node = s->private;
+       struct tegra_dsi *dsi = node->info_ent->data;
+
+#define DUMP_REG(name)                                         \
+       seq_printf(s, "%-32s %#05x %08lx\n", #name, name,       \
+                  tegra_dsi_readl(dsi, name))
+
+       DUMP_REG(DSI_INCR_SYNCPT);
+       DUMP_REG(DSI_INCR_SYNCPT_CONTROL);
+       DUMP_REG(DSI_INCR_SYNCPT_ERROR);
+       DUMP_REG(DSI_CTXSW);
+       DUMP_REG(DSI_RD_DATA);
+       DUMP_REG(DSI_WR_DATA);
+       DUMP_REG(DSI_POWER_CONTROL);
+       DUMP_REG(DSI_INT_ENABLE);
+       DUMP_REG(DSI_INT_STATUS);
+       DUMP_REG(DSI_INT_MASK);
+       DUMP_REG(DSI_HOST_CONTROL);
+       DUMP_REG(DSI_CONTROL);
+       DUMP_REG(DSI_SOL_DELAY);
+       DUMP_REG(DSI_MAX_THRESHOLD);
+       DUMP_REG(DSI_TRIGGER);
+       DUMP_REG(DSI_TX_CRC);
+       DUMP_REG(DSI_STATUS);
+
+       DUMP_REG(DSI_INIT_SEQ_CONTROL);
+       DUMP_REG(DSI_INIT_SEQ_DATA_0);
+       DUMP_REG(DSI_INIT_SEQ_DATA_1);
+       DUMP_REG(DSI_INIT_SEQ_DATA_2);
+       DUMP_REG(DSI_INIT_SEQ_DATA_3);
+       DUMP_REG(DSI_INIT_SEQ_DATA_4);
+       DUMP_REG(DSI_INIT_SEQ_DATA_5);
+       DUMP_REG(DSI_INIT_SEQ_DATA_6);
+       DUMP_REG(DSI_INIT_SEQ_DATA_7);
+
+       DUMP_REG(DSI_PKT_SEQ_0_LO);
+       DUMP_REG(DSI_PKT_SEQ_0_HI);
+       DUMP_REG(DSI_PKT_SEQ_1_LO);
+       DUMP_REG(DSI_PKT_SEQ_1_HI);
+       DUMP_REG(DSI_PKT_SEQ_2_LO);
+       DUMP_REG(DSI_PKT_SEQ_2_HI);
+       DUMP_REG(DSI_PKT_SEQ_3_LO);
+       DUMP_REG(DSI_PKT_SEQ_3_HI);
+       DUMP_REG(DSI_PKT_SEQ_4_LO);
+       DUMP_REG(DSI_PKT_SEQ_4_HI);
+       DUMP_REG(DSI_PKT_SEQ_5_LO);
+       DUMP_REG(DSI_PKT_SEQ_5_HI);
+
+       DUMP_REG(DSI_DCS_CMDS);
+
+       DUMP_REG(DSI_PKT_LEN_0_1);
+       DUMP_REG(DSI_PKT_LEN_2_3);
+       DUMP_REG(DSI_PKT_LEN_4_5);
+       DUMP_REG(DSI_PKT_LEN_6_7);
+
+       DUMP_REG(DSI_PHY_TIMING_0);
+       DUMP_REG(DSI_PHY_TIMING_1);
+       DUMP_REG(DSI_PHY_TIMING_2);
+       DUMP_REG(DSI_BTA_TIMING);
+
+       DUMP_REG(DSI_TIMEOUT_0);
+       DUMP_REG(DSI_TIMEOUT_1);
+       DUMP_REG(DSI_TO_TALLY);
+
+       DUMP_REG(DSI_PAD_CONTROL_0);
+       DUMP_REG(DSI_PAD_CONTROL_CD);
+       DUMP_REG(DSI_PAD_CD_STATUS);
+       DUMP_REG(DSI_VIDEO_MODE_CONTROL);
+       DUMP_REG(DSI_PAD_CONTROL_1);
+       DUMP_REG(DSI_PAD_CONTROL_2);
+       DUMP_REG(DSI_PAD_CONTROL_3);
+       DUMP_REG(DSI_PAD_CONTROL_4);
+
+       DUMP_REG(DSI_GANGED_MODE_CONTROL);
+       DUMP_REG(DSI_GANGED_MODE_START);
+       DUMP_REG(DSI_GANGED_MODE_SIZE);
+
+       DUMP_REG(DSI_RAW_DATA_BYTE_COUNT);
+       DUMP_REG(DSI_ULTRA_LOW_POWER_CONTROL);
+
+       DUMP_REG(DSI_INIT_SEQ_DATA_8);
+       DUMP_REG(DSI_INIT_SEQ_DATA_9);
+       DUMP_REG(DSI_INIT_SEQ_DATA_10);
+       DUMP_REG(DSI_INIT_SEQ_DATA_11);
+       DUMP_REG(DSI_INIT_SEQ_DATA_12);
+       DUMP_REG(DSI_INIT_SEQ_DATA_13);
+       DUMP_REG(DSI_INIT_SEQ_DATA_14);
+       DUMP_REG(DSI_INIT_SEQ_DATA_15);
+
+#undef DUMP_REG
+
+       return 0;
+}
+
+static struct drm_info_list debugfs_files[] = {
+       { "regs", tegra_dsi_show_regs, 0, NULL },
+};
+
+static int tegra_dsi_debugfs_init(struct tegra_dsi *dsi,
+                                 struct drm_minor *minor)
+{
+       const char *name = dev_name(dsi->dev);
+       unsigned int i;
+       int err;
+
+       dsi->debugfs = debugfs_create_dir(name, minor->debugfs_root);
+       if (!dsi->debugfs)
+               return -ENOMEM;
+
+       dsi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
+                                    GFP_KERNEL);
+       if (!dsi->debugfs_files) {
+               err = -ENOMEM;
+               goto remove;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
+               dsi->debugfs_files[i].data = dsi;
+
+       err = drm_debugfs_create_files(dsi->debugfs_files,
+                                      ARRAY_SIZE(debugfs_files),
+                                      dsi->debugfs, minor);
+       if (err < 0)
+               goto free;
+
+       dsi->minor = minor;
+
+       return 0;
+
+free:
+       kfree(dsi->debugfs_files);
+       dsi->debugfs_files = NULL;
+remove:
+       debugfs_remove(dsi->debugfs);
+       dsi->debugfs = NULL;
+
+       return err;
+}
+
+static int tegra_dsi_debugfs_exit(struct tegra_dsi *dsi)
+{
+       drm_debugfs_remove_files(dsi->debugfs_files, ARRAY_SIZE(debugfs_files),
+                                dsi->minor);
+       dsi->minor = NULL;
+
+       kfree(dsi->debugfs_files);
+       dsi->debugfs_files = NULL;
+
+       debugfs_remove(dsi->debugfs);
+       dsi->debugfs = NULL;
+
+       return 0;
+}
+
+enum {
+       CMD_VS = 0x01,
+       CMD_VE = 0x11,
+
+       CMD_HS = 0x21,
+       CMD_HE = 0x31,
+
+       CMD_EOT = 0x08,
+       CMD_NULL = 0x09,
+       CMD_SHORTW = 0x15,
+       CMD_BLNK = 0x19,
+       CMD_LONGW = 0x39,
+
+       CMD_RGB = 0x00,
+       CMD_RGB_16BPP = 0x0E,
+       CMD_RGB_18BPP = 0x1E,
+       CMD_RGB_18BPPNP = 0x2E,
+       CMD_RGB_24BPP = 0x3E,
+};
+
+#define PKT_ID0(id)    ((((id) & 0x3f) << 3) | (1 << 9))
+#define PKT_LEN0(len)  (((len) & 0x7) << 0)
+#define PKT_ID1(id)    ((((id) & 0x3f) << 13) | (1 << 19))
+#define PKT_LEN1(len)  (((len) & 0x7) << 10)
+#define PKT_ID2(id)    ((((id) & 0x3f) << 23) | (1 << 29))
+#define PKT_LEN2(len)  (((len) & 0x7) << 20)
+#define PKT_ID3(id)    ((((id) & 0x3f) << 3) | (1 << 9))
+#define PKT_LEN3(len)  (((len) & 0x7) << 0)
+#define PKT_ID4(id)    ((((id) & 0x3f) << 13) | (1 << 19))
+#define PKT_LEN4(len)  (((len) & 0x7) << 10)
+#define PKT_ID5(id)    ((((id) & 0x3f) << 23) | (1 << 29))
+#define PKT_LEN5(len)  (((len) & 0x7) << 20)
+#define PKT_LP         (1 << 30)
+#define NUM_PKT_SEQ    12
+
+/* non-burst mode with sync-end */
+static const u32 pkt_seq_vnb_syne[NUM_PKT_SEQ] = {
+       PKT_ID0(CMD_VS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(1) |
+       PKT_ID2(CMD_HE) | PKT_LEN2(0) | PKT_LP,
+       0,
+       PKT_ID0(CMD_VE) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(1) |
+       PKT_ID2(CMD_HE) | PKT_LEN2(0) | PKT_LP,
+       0,
+       PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(1) |
+       PKT_ID2(CMD_HE) | PKT_LEN2(0) | PKT_LP,
+       0,
+       PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(1) |
+       PKT_ID2(CMD_HE) | PKT_LEN2(0),
+       PKT_ID3(CMD_BLNK) | PKT_LEN3(2) | PKT_ID4(CMD_RGB_24BPP) | PKT_LEN4(3) |
+       PKT_ID5(CMD_BLNK) | PKT_LEN5(4),
+       PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(1) |
+       PKT_ID2(CMD_HE) | PKT_LEN2(0) | PKT_LP,
+       0,
+       PKT_ID0(CMD_HS) | PKT_LEN0(0) | PKT_ID1(CMD_BLNK) | PKT_LEN1(1) |
+       PKT_ID2(CMD_HE) | PKT_LEN2(0),
+       PKT_ID3(CMD_BLNK) | PKT_LEN3(2) | PKT_ID4(CMD_RGB_24BPP) | PKT_LEN4(3) |
+       PKT_ID5(CMD_BLNK) | PKT_LEN5(4),
+};
+
+static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi)
+{
+       struct dsi_phy_timing timing;
+       unsigned long value, period;
+       long rate;
+       int err;
+
+       rate = clk_get_rate(dsi->clk);
+       if (rate < 0)
+               return rate;
+
+       period = DIV_ROUND_CLOSEST(1000000000UL, rate * 2);
+
+       err = dsi_phy_timing_get_default(&timing, period);
+       if (err < 0)
+               return err;
+
+       err = dsi_phy_timing_validate(&timing, period);
+       if (err < 0) {
+               dev_err(dsi->dev, "failed to validate D-PHY timing: %d\n", err);
+               return err;
+       }
+
+       /*
+        * The D-PHY timing fields below are expressed in byte-clock cycles,
+        * so multiply the period by 8.
+        */
+       period *= 8;
+
+       value = DSI_TIMING_FIELD(timing.hsexit, period, 1) << 24 |
+               DSI_TIMING_FIELD(timing.hstrail, period, 0) << 16 |
+               DSI_TIMING_FIELD(timing.hszero, period, 3) << 8 |
+               DSI_TIMING_FIELD(timing.hsprepare, period, 1);
+       tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_0);
+
+       value = DSI_TIMING_FIELD(timing.clktrail, period, 1) << 24 |
+               DSI_TIMING_FIELD(timing.clkpost, period, 1) << 16 |
+               DSI_TIMING_FIELD(timing.clkzero, period, 1) << 8 |
+               DSI_TIMING_FIELD(timing.lpx, period, 1);
+       tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_1);
+
+       value = DSI_TIMING_FIELD(timing.clkprepare, period, 1) << 16 |
+               DSI_TIMING_FIELD(timing.clkpre, period, 1) << 8 |
+               DSI_TIMING_FIELD(0xff * period, period, 0) << 0;
+       tegra_dsi_writel(dsi, value, DSI_PHY_TIMING_2);
+
+       value = DSI_TIMING_FIELD(timing.taget, period, 1) << 16 |
+               DSI_TIMING_FIELD(timing.tasure, period, 1) << 8 |
+               DSI_TIMING_FIELD(timing.tago, period, 1);
+       tegra_dsi_writel(dsi, value, DSI_BTA_TIMING);
+
+       return 0;
+}
+
+static int tegra_dsi_get_muldiv(enum dsi_format format, unsigned int *mulp,
+                               unsigned int *divp)
+{
+       switch (format) {
+       case DSI_FORMAT_16P:
+               *mulp = 2;
+               *divp = 1;
+               break;
+
+       case DSI_FORMAT_18NP:
+               *mulp = 9;
+               *divp = 4;
+               break;
+
+       case DSI_FORMAT_18P:
+       case DSI_FORMAT_24P:
+               *mulp = 3;
+               *divp = 1;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int tegra_output_dsi_enable(struct tegra_output *output)
+{
+       struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+       struct drm_display_mode *mode = &dc->base.mode;
+       unsigned int hact, hsw, hbp, hfp, i, mul, div;
+       struct tegra_dsi *dsi = to_dsi(output);
+       /* FIXME: don't hardcode this */
+       const u32 *pkt_seq = pkt_seq_vnb_syne;
+       unsigned long value;
+       int err;
+
+       err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
+       if (err < 0)
+               return err;
+
+       err = clk_enable(dsi->clk);
+       if (err < 0)
+               return err;
+
+       tegra_periph_reset_deassert(dsi->clk);
+
+       value = DSI_CONTROL_CHANNEL(0) | DSI_CONTROL_FORMAT(dsi->format) |
+               DSI_CONTROL_LANES(dsi->lanes - 1) | DSI_CONTROL_SOURCE(0);
+       tegra_dsi_writel(dsi, value, DSI_CONTROL);
+
+       tegra_dc_writel(dc, DSI_ENABLE, DC_DISP_DISP_WIN_OPTIONS);
+
+       value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
+               PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
+       tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
+
+       tegra_dc_writel(dc, DISP_CTRL_MODE_C_DISPLAY, DC_CMD_DISPLAY_COMMAND);
+       tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
+       tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+
+       tegra_dsi_writel(dsi, 0x000001e0, DSI_MAX_THRESHOLD);
+
+       value = DSI_HOST_CONTROL_HS | DSI_HOST_CONTROL_CS |
+               DSI_HOST_CONTROL_ECC;
+       tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+
+       value = tegra_dsi_readl(dsi, DSI_CONTROL);
+       value |= DSI_CONTROL_HS_CLK_CTRL;
+       value &= ~DSI_CONTROL_TX_TRIG(3);
+       value &= ~DSI_CONTROL_DCS_ENABLE;
+       value |= DSI_CONTROL_VIDEO_ENABLE;
+       value &= ~DSI_CONTROL_HOST_ENABLE;
+       tegra_dsi_writel(dsi, value, DSI_CONTROL);
+
+       err = tegra_dsi_set_phy_timing(dsi);
+       if (err < 0)
+               return err;
+
+       for (i = 0; i < NUM_PKT_SEQ; i++)
+               tegra_dsi_writel(dsi, pkt_seq[i], DSI_PKT_SEQ_0_LO + i);
+
+       /* horizontal active pixels */
+       hact = mode->hdisplay * mul / div;
+
+       /* horizontal sync width */
+       hsw = (mode->hsync_end - mode->hsync_start) * mul / div;
+       hsw -= 10;
+
+       /* horizontal back porch */
+       hbp = (mode->htotal - mode->hsync_end) * mul / div;
+       hbp -= 14;
+
+       /* horizontal front porch */
+       hfp = (mode->hsync_start  - mode->hdisplay) * mul / div;
+       hfp -= 8;
+
+       tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1);
+       tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3);
+       tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5);
+       tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7);
+
+       /* set SOL delay */
+       tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY);
+
+       return 0;
+}
+
+static int tegra_output_dsi_disable(struct tegra_output *output)
+{
+       struct tegra_dsi *dsi = to_dsi(output);
+
+       clk_disable(dsi->clk);
+
+       return 0;
+}
+
+static int tegra_output_dsi_setup_clock(struct tegra_output *output,
+                                       struct clk *clk, unsigned long pclk)
+{
+       struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
+       struct drm_display_mode *mode = &dc->base.mode;
+       struct tegra_dsi *dsi = to_dsi(output);
+       unsigned long bclk, plld, value;
+       unsigned int timeout, mul, div;
+       struct clk *base;
+       int err;
+
+       err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
+       if (err < 0)
+               return err;
+
+       pclk = mode->htotal * mode->vtotal * mode->vrefresh;
+       bclk = (pclk * mul) / (div * dsi->lanes);
+       plld = DIV_ROUND_UP(bclk * 8, 1000000);
+       pclk = (plld * 1000000) / 2;
+
+       err = clk_set_parent(clk, dsi->clk_parent);
+       if (err < 0) {
+               dev_err(dsi->dev, "failed to set parent clock: %d\n", err);
+               return err;
+       }
+
+       base = clk_get_parent(dsi->clk_parent);
+
+       /*
+        * This assumes that the parent clock is pll_d_out0 or pll_d2_out
+        * respectively, each of which divides the base pll_d by 2.
+        */
+       err = clk_set_rate(base, pclk * 2);
+       if (err < 0) {
+               dev_err(dsi->dev, "failed to set base clock rate to %lu Hz\n",
+                       pclk * 2);
+               return err;
+       }
+
+       /* one frame high-speed transmission timeout */
+       timeout = (bclk / mode->vrefresh) / 512;
+       value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout);
+       tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0);
+
+       /* 2 ms peripheral timeout for panel */
+       timeout = 2 * bclk / 512 * 1000;
+       value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000);
+       tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1);
+
+       value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0);
+       tegra_dsi_writel(dsi, value, DSI_TO_TALLY);
+
+       return 0;
+}
+
+static int tegra_output_dsi_check_mode(struct tegra_output *output,
+                                      struct drm_display_mode *mode,
+                                      enum drm_mode_status *status)
+{
+       /*
+        * FIXME: For now, always assume that the mode is okay.
+        */
+
+       *status = MODE_OK;
+
+       return 0;
+}
+
+static const struct tegra_output_ops dsi_ops = {
+       .enable = tegra_output_dsi_enable,
+       .disable = tegra_output_dsi_disable,
+       .setup_clock = tegra_output_dsi_setup_clock,
+       .check_mode = tegra_output_dsi_check_mode,
+};
+
+static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
+{
+       unsigned long value;
+
+       value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0);
+       tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0);
+
+       return 0;
+}
+
+static int tegra_dsi_init(struct host1x_client *client)
+{
+       struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+       struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+       unsigned long value, i;
+       int err;
+
+       dsi->output.type = TEGRA_OUTPUT_DSI;
+       dsi->output.dev = client->dev;
+       dsi->output.ops = &dsi_ops;
+
+       err = tegra_output_init(tegra->drm, &dsi->output);
+       if (err < 0) {
+               dev_err(client->dev, "output setup failed: %d\n", err);
+               return err;
+       }
+
+       if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+               err = tegra_dsi_debugfs_init(dsi, tegra->drm->primary);
+               if (err < 0)
+                       dev_err(dsi->dev, "debugfs setup failed: %d\n", err);
+       }
+
+       /*
+        * enable high-speed mode, checksum generation, ECC generation and
+        * disable raw mode
+        */
+       value = tegra_dsi_readl(dsi, DSI_HOST_CONTROL);
+       value |= DSI_HOST_CONTROL_ECC | DSI_HOST_CONTROL_CS |
+                DSI_HOST_CONTROL_HS;
+       value &= ~DSI_HOST_CONTROL_RAW;
+       tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
+
+       tegra_dsi_writel(dsi, 0, DSI_SOL_DELAY);
+       tegra_dsi_writel(dsi, 0, DSI_MAX_THRESHOLD);
+
+       tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_CONTROL);
+
+       for (i = 0; i < 8; i++) {
+               tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_0 + i);
+               tegra_dsi_writel(dsi, 0, DSI_INIT_SEQ_DATA_8 + i);
+       }
+
+       for (i = 0; i < 12; i++)
+               tegra_dsi_writel(dsi, 0, DSI_PKT_SEQ_0_LO + i);
+
+       tegra_dsi_writel(dsi, 0, DSI_DCS_CMDS);
+
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
+       tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
+
+       /* start calibration */
+       tegra_dsi_pad_enable(dsi);
+
+       value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) |
+               DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) |
+               DSI_PAD_OUT_CLK(0x0);
+       tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
+
+       err = tegra_mipi_calibrate(dsi->dev);
+       if (err < 0) {
+               dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
+               return err;
+       }
+
+       tegra_dsi_writel(dsi, DSI_POWER_CONTROL_ENABLE, DSI_POWER_CONTROL);
+       usleep_range(300, 1000);
+
+       return 0;
+}
+
+static int tegra_dsi_exit(struct host1x_client *client)
+{
+       struct tegra_dsi *dsi = host1x_client_to_dsi(client);
+       int err;
+
+       if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+               err = tegra_dsi_debugfs_exit(dsi);
+               if (err < 0)
+                       dev_err(dsi->dev, "debugfs cleanup failed: %d\n", err);
+       }
+
+       err = tegra_output_disable(&dsi->output);
+       if (err < 0) {
+               dev_err(client->dev, "output failed to disable: %d\n", err);
+               return err;
+       }
+
+       err = tegra_output_exit(&dsi->output);
+       if (err < 0) {
+               dev_err(client->dev, "output cleanup failed: %d\n", err);
+               return err;
+       }
+
+       return 0;
+}
+
+static const struct host1x_client_ops dsi_client_ops = {
+       .init = tegra_dsi_init,
+       .exit = tegra_dsi_exit,
+};
+
+static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi)
+{
+       struct clk *parent;
+       int err;
+
+       parent = clk_get_parent(dsi->clk);
+       if (!parent)
+               return -EINVAL;
+
+       err = clk_set_parent(parent, dsi->clk_parent);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static int tegra_dsi_probe(struct platform_device *pdev)
+{
+       struct tegra_dsi *dsi;
+       struct resource *regs;
+       int err;
+
+       dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
+       if (!dsi)
+               return -ENOMEM;
+
+       dsi->output.dev = dsi->dev = &pdev->dev;
+
+       err = tegra_output_probe(&dsi->output);
+       if (err < 0)
+               return err;
+
+       /*
+        * FIXME: Don't hardcode these. Perhaps they should be queried from
+        *        the panel or from the DSI interface's DT node.
+        */
+       dsi->format = DSI_FORMAT_24P;
+       dsi->lanes = 4;
+
+       dsi->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(dsi->clk))
+               return PTR_ERR(dsi->clk);;
+
+       err = clk_prepare_enable(dsi->clk);
+       if (err < 0)
+               return err;
+
+       dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
+       if (IS_ERR(dsi->clk_parent))
+               return PTR_ERR(dsi->clk_parent);
+
+       err = clk_prepare_enable(dsi->clk_parent);
+       if (err < 0)
+               return err;
+
+       err = tegra_dsi_setup_clocks(dsi);
+       if (err < 0)
+               return err;
+
+       regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
+       if (!dsi->regs)
+               return -EADDRNOTAVAIL;
+
+       INIT_LIST_HEAD(&dsi->client.list);
+       dsi->client.ops = &dsi_client_ops;
+       dsi->client.dev = &pdev->dev;
+
+       err = host1x_client_register(&dsi->client);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+                       err);
+               return err;
+       }
+
+       platform_set_drvdata(pdev, dsi);
+
+       return 0;
+}
+
+static int tegra_dsi_remove(struct platform_device *pdev)
+{
+       struct tegra_dsi *dsi = platform_get_drvdata(pdev);
+       int err;
+
+       err = host1x_client_unregister(&dsi->client);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+                       err);
+               return err;
+       }
+
+       clk_disable_unprepare(dsi->clk_parent);
+       clk_disable_unprepare(dsi->clk);
+
+       err = tegra_output_remove(&dsi->output);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to remove output: %d\n", err);
+               return err;
+       }
+
+       return 0;
+}
+
+static const struct of_device_id tegra_dsi_of_match[] = {
+       { .compatible = "nvidia,tegra114-dsi", },
+       { },
+};
+
+struct platform_driver tegra_dsi_driver = {
+       .driver = {
+               .name = "tegra-dsi",
+               .of_match_table = tegra_dsi_of_match,
+       },
+       .probe = tegra_dsi_probe,
+       .remove = tegra_dsi_remove,
+};
similarity index 92%
rename from drivers/gpu/host1x/drm/fb.c
rename to drivers/gpu/drm/tegra/fb.c
index 979a3e32b78bbb0f5da3126b6b406eebae6ef497..490f7719e317ed80319f4961a69d3e349d1ad333 100644 (file)
@@ -10,8 +10,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/module.h>
-
 #include "drm.h"
 #include "gem.h"
 
@@ -36,6 +34,26 @@ struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
        return fb->planes[index];
 }
 
+bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer)
+{
+       struct tegra_fb *fb = to_tegra_fb(framebuffer);
+
+       if (fb->planes[0]->flags & TEGRA_BO_BOTTOM_UP)
+               return true;
+
+       return false;
+}
+
+bool tegra_fb_is_tiled(struct drm_framebuffer *framebuffer)
+{
+       struct tegra_fb *fb = to_tegra_fb(framebuffer);
+
+       if (fb->planes[0]->flags & TEGRA_BO_TILED)
+               return true;
+
+       return false;
+}
+
 static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
 {
        struct tegra_fb *fb = to_tegra_fb(framebuffer);
@@ -190,7 +208,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
 
        size = cmd.pitches[0] * cmd.height;
 
-       bo = tegra_bo_create(drm, size);
+       bo = tegra_bo_create(drm, size, 0);
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
@@ -323,10 +341,10 @@ static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
 
 static void tegra_fb_output_poll_changed(struct drm_device *drm)
 {
-       struct host1x_drm *host1x = drm->dev_private;
+       struct tegra_drm *tegra = drm->dev_private;
 
-       if (host1x->fbdev)
-               drm_fb_helper_hotplug_event(&host1x->fbdev->base);
+       if (tegra->fbdev)
+               drm_fb_helper_hotplug_event(&tegra->fbdev->base);
 }
 
 static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
@@ -336,7 +354,7 @@ static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
 
 int tegra_drm_fb_init(struct drm_device *drm)
 {
-       struct host1x_drm *host1x = drm->dev_private;
+       struct tegra_drm *tegra = drm->dev_private;
        struct tegra_fbdev *fbdev;
 
        drm->mode_config.min_width = 0;
@@ -352,16 +370,16 @@ int tegra_drm_fb_init(struct drm_device *drm)
        if (IS_ERR(fbdev))
                return PTR_ERR(fbdev);
 
-       host1x->fbdev = fbdev;
+       tegra->fbdev = fbdev;
 
        return 0;
 }
 
 void tegra_drm_fb_exit(struct drm_device *drm)
 {
-       struct host1x_drm *host1x = drm->dev_private;
+       struct tegra_drm *tegra = drm->dev_private;
 
-       tegra_fbdev_free(host1x->fbdev);
+       tegra_fbdev_free(tegra->fbdev);
 }
 
 void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
similarity index 86%
rename from drivers/gpu/host1x/drm/gem.c
rename to drivers/gpu/drm/tegra/gem.c
index 59623de4ee15f3bf9a186d80481f358397fbf99e..28a9cbc07ab95f3a5873fc9aac0f008180bffca5 100644 (file)
  * GNU General Public License for more details.
  */
 
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/export.h>
-#include <linux/dma-mapping.h>
-
-#include <drm/drmP.h>
-#include <drm/drm.h>
+#include <drm/tegra_drm.h>
 
 #include "gem.h"
 
-static inline struct tegra_bo *host1x_to_drm_bo(struct host1x_bo *bo)
+static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
 {
        return container_of(bo, struct tegra_bo, base);
 }
 
 static void tegra_bo_put(struct host1x_bo *bo)
 {
-       struct tegra_bo *obj = host1x_to_drm_bo(bo);
+       struct tegra_bo *obj = host1x_to_tegra_bo(bo);
        struct drm_device *drm = obj->gem.dev;
 
        mutex_lock(&drm->struct_mutex);
@@ -46,7 +39,7 @@ static void tegra_bo_put(struct host1x_bo *bo)
 
 static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
 {
-       struct tegra_bo *obj = host1x_to_drm_bo(bo);
+       struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 
        return obj->paddr;
 }
@@ -57,7 +50,7 @@ static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
 
 static void *tegra_bo_mmap(struct host1x_bo *bo)
 {
-       struct tegra_bo *obj = host1x_to_drm_bo(bo);
+       struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 
        return obj->vaddr;
 }
@@ -68,7 +61,7 @@ static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
 
 static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
 {
-       struct tegra_bo *obj = host1x_to_drm_bo(bo);
+       struct tegra_bo *obj = host1x_to_tegra_bo(bo);
 
        return obj->vaddr + page * PAGE_SIZE;
 }
@@ -80,7 +73,7 @@ static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
 
 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
 {
-       struct tegra_bo *obj = host1x_to_drm_bo(bo);
+       struct tegra_bo *obj = host1x_to_tegra_bo(bo);
        struct drm_device *drm = obj->gem.dev;
 
        mutex_lock(&drm->struct_mutex);
@@ -106,7 +99,8 @@ static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
        dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
 }
 
-struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
+                                unsigned long flags)
 {
        struct tegra_bo *bo;
        int err;
@@ -135,6 +129,12 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
        if (err)
                goto err_mmap;
 
+       if (flags & DRM_TEGRA_GEM_CREATE_TILED)
+               bo->flags |= TEGRA_BO_TILED;
+
+       if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
+               bo->flags |= TEGRA_BO_BOTTOM_UP;
+
        return bo;
 
 err_mmap:
@@ -149,14 +149,15 @@ err_dma:
 }
 
 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
-                                           struct drm_device *drm,
-                                           unsigned int size,
-                                           unsigned int *handle)
+                                            struct drm_device *drm,
+                                            unsigned int size,
+                                            unsigned long flags,
+                                            unsigned int *handle)
 {
        struct tegra_bo *bo;
        int ret;
 
-       bo = tegra_bo_create(drm, size);
+       bo = tegra_bo_create(drm, size, flags);
        if (IS_ERR(bo))
                return bo;
 
@@ -178,7 +179,6 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
        struct tegra_bo *bo = to_tegra_bo(gem);
 
        drm_gem_free_mmap_offset(gem);
-
        drm_gem_object_release(gem);
        tegra_bo_destroy(gem->dev, bo);
 
@@ -197,8 +197,8 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
        if (args->size < args->pitch * args->height)
                args->size = args->pitch * args->height;
 
-       bo = tegra_bo_create_with_handle(file, drm, args->size,
-                                           &args->handle);
+       bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
+                                        &args->handle);
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
similarity index 84%
rename from drivers/gpu/host1x/drm/gem.h
rename to drivers/gpu/drm/tegra/gem.h
index 492533a2dacb1bba249cde2ff08b63e1c855c291..7674000bf47d6696ecec6db7507926144832c772 100644 (file)
 #ifndef __HOST1X_GEM_H
 #define __HOST1X_GEM_H
 
+#include <linux/host1x.h>
+
 #include <drm/drm.h>
 #include <drm/drmP.h>
 
-#include "host1x_bo.h"
+#define TEGRA_BO_TILED     (1 << 0)
+#define TEGRA_BO_BOTTOM_UP (1 << 1)
 
 struct tegra_bo {
        struct drm_gem_object gem;
        struct host1x_bo base;
+       unsigned long flags;
        dma_addr_t paddr;
        void *vaddr;
 };
@@ -38,11 +42,13 @@ static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
 
 extern const struct host1x_bo_ops tegra_bo_ops;
 
-struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size);
+struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
+                                unsigned long flags);
 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
-                                           struct drm_device *drm,
-                                           unsigned int size,
-                                           unsigned int *handle);
+                                            struct drm_device *drm,
+                                            unsigned int size,
+                                            unsigned long flags,
+                                            unsigned int *handle);
 void tegra_bo_free_object(struct drm_gem_object *gem);
 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
                         struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c
new file mode 100644 (file)
index 0000000..73b79ba
--- /dev/null
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2012-2013, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+
+#include "drm.h"
+#include "gem.h"
+#include "gr2d.h"
+
+struct gr2d {
+       struct tegra_drm_client client;
+       struct host1x_channel *channel;
+       struct clk *clk;
+
+       DECLARE_BITMAP(addr_regs, GR2D_NUM_REGS);
+};
+
+static inline struct gr2d *to_gr2d(struct tegra_drm_client *client)
+{
+       return container_of(client, struct gr2d, client);
+}
+
+static int gr2d_init(struct host1x_client *client)
+{
+       struct tegra_drm_client *drm = host1x_to_drm_client(client);
+       struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+       struct gr2d *gr2d = to_gr2d(drm);
+
+       gr2d->channel = host1x_channel_request(client->dev);
+       if (!gr2d->channel)
+               return -ENOMEM;
+
+       client->syncpts[0] = host1x_syncpt_request(client->dev, false);
+       if (!client->syncpts[0]) {
+               host1x_channel_free(gr2d->channel);
+               return -ENOMEM;
+       }
+
+       return tegra_drm_register_client(tegra, drm);
+}
+
+static int gr2d_exit(struct host1x_client *client)
+{
+       struct tegra_drm_client *drm = host1x_to_drm_client(client);
+       struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+       struct gr2d *gr2d = to_gr2d(drm);
+       int err;
+
+       err = tegra_drm_unregister_client(tegra, drm);
+       if (err < 0)
+               return err;
+
+       host1x_syncpt_free(client->syncpts[0]);
+       host1x_channel_free(gr2d->channel);
+
+       return 0;
+}
+
+static const struct host1x_client_ops gr2d_client_ops = {
+       .init = gr2d_init,
+       .exit = gr2d_exit,
+};
+
+static int gr2d_open_channel(struct tegra_drm_client *client,
+                            struct tegra_drm_context *context)
+{
+       struct gr2d *gr2d = to_gr2d(client);
+
+       context->channel = host1x_channel_get(gr2d->channel);
+       if (!context->channel)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void gr2d_close_channel(struct tegra_drm_context *context)
+{
+       host1x_channel_put(context->channel);
+}
+
+static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 offset)
+{
+       struct gr2d *gr2d = dev_get_drvdata(dev);
+
+       switch (class) {
+       case HOST1X_CLASS_HOST1X:
+               if (offset == 0x2b)
+                       return 1;
+
+               break;
+
+       case HOST1X_CLASS_GR2D:
+       case HOST1X_CLASS_GR2D_SB:
+               if (offset >= GR2D_NUM_REGS)
+                       break;
+
+               if (test_bit(offset, gr2d->addr_regs))
+                       return 1;
+
+               break;
+       }
+
+       return 0;
+}
+
+static const struct tegra_drm_client_ops gr2d_ops = {
+       .open_channel = gr2d_open_channel,
+       .close_channel = gr2d_close_channel,
+       .is_addr_reg = gr2d_is_addr_reg,
+       .submit = tegra_drm_submit,
+};
+
+static const struct of_device_id gr2d_match[] = {
+       { .compatible = "nvidia,tegra30-gr2d" },
+       { .compatible = "nvidia,tegra20-gr2d" },
+       { },
+};
+
+static const u32 gr2d_addr_regs[] = {
+       GR2D_UA_BASE_ADDR,
+       GR2D_VA_BASE_ADDR,
+       GR2D_PAT_BASE_ADDR,
+       GR2D_DSTA_BASE_ADDR,
+       GR2D_DSTB_BASE_ADDR,
+       GR2D_DSTC_BASE_ADDR,
+       GR2D_SRCA_BASE_ADDR,
+       GR2D_SRCB_BASE_ADDR,
+       GR2D_SRC_BASE_ADDR_SB,
+       GR2D_DSTA_BASE_ADDR_SB,
+       GR2D_DSTB_BASE_ADDR_SB,
+       GR2D_UA_BASE_ADDR_SB,
+       GR2D_VA_BASE_ADDR_SB,
+};
+
+static int gr2d_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct host1x_syncpt **syncpts;
+       struct gr2d *gr2d;
+       unsigned int i;
+       int err;
+
+       gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
+       if (!gr2d)
+               return -ENOMEM;
+
+       syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
+       if (!syncpts)
+               return -ENOMEM;
+
+       gr2d->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(gr2d->clk)) {
+               dev_err(dev, "cannot get clock\n");
+               return PTR_ERR(gr2d->clk);
+       }
+
+       err = clk_prepare_enable(gr2d->clk);
+       if (err) {
+               dev_err(dev, "cannot turn on clock\n");
+               return err;
+       }
+
+       INIT_LIST_HEAD(&gr2d->client.base.list);
+       gr2d->client.base.ops = &gr2d_client_ops;
+       gr2d->client.base.dev = dev;
+       gr2d->client.base.class = HOST1X_CLASS_GR2D;
+       gr2d->client.base.syncpts = syncpts;
+       gr2d->client.base.num_syncpts = 1;
+
+       INIT_LIST_HEAD(&gr2d->client.list);
+       gr2d->client.ops = &gr2d_ops;
+
+       err = host1x_client_register(&gr2d->client.base);
+       if (err < 0) {
+               dev_err(dev, "failed to register host1x client: %d\n", err);
+               return err;
+       }
+
+       /* initialize address register map */
+       for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); i++)
+               set_bit(gr2d_addr_regs[i], gr2d->addr_regs);
+
+       platform_set_drvdata(pdev, gr2d);
+
+       return 0;
+}
+
+static int gr2d_remove(struct platform_device *pdev)
+{
+       struct gr2d *gr2d = platform_get_drvdata(pdev);
+       int err;
+
+       err = host1x_client_unregister(&gr2d->client.base);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+                       err);
+               return err;
+       }
+
+       clk_disable_unprepare(gr2d->clk);
+
+       return 0;
+}
+
+struct platform_driver tegra_gr2d_driver = {
+       .driver = {
+               .name = "tegra-gr2d",
+               .of_match_table = gr2d_match,
+       },
+       .probe = gr2d_probe,
+       .remove = gr2d_remove,
+};
diff --git a/drivers/gpu/drm/tegra/gr2d.h b/drivers/gpu/drm/tegra/gr2d.h
new file mode 100644 (file)
index 0000000..4d7304f
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_GR2D_H
+#define TEGRA_GR2D_H
+
+#define GR2D_UA_BASE_ADDR              0x1a
+#define GR2D_VA_BASE_ADDR              0x1b
+#define GR2D_PAT_BASE_ADDR             0x26
+#define GR2D_DSTA_BASE_ADDR            0x2b
+#define GR2D_DSTB_BASE_ADDR            0x2c
+#define GR2D_DSTC_BASE_ADDR            0x2d
+#define GR2D_SRCA_BASE_ADDR            0x31
+#define GR2D_SRCB_BASE_ADDR            0x32
+#define GR2D_SRC_BASE_ADDR_SB          0x48
+#define GR2D_DSTA_BASE_ADDR_SB         0x49
+#define GR2D_DSTB_BASE_ADDR_SB         0x4a
+#define GR2D_UA_BASE_ADDR_SB           0x4b
+#define GR2D_VA_BASE_ADDR_SB           0x4c
+
+#define GR2D_NUM_REGS                  0x4d
+
+#endif
diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c
new file mode 100644 (file)
index 0000000..dc0f222
--- /dev/null
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2013 Avionic Design GmbH
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/host1x.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/tegra-powergate.h>
+
+#include "drm.h"
+#include "gem.h"
+#include "gr3d.h"
+
+struct gr3d {
+       struct tegra_drm_client client;
+       struct host1x_channel *channel;
+       struct clk *clk_secondary;
+       struct clk *clk;
+
+       DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS);
+};
+
+static inline struct gr3d *to_gr3d(struct tegra_drm_client *client)
+{
+       return container_of(client, struct gr3d, client);
+}
+
+static int gr3d_init(struct host1x_client *client)
+{
+       struct tegra_drm_client *drm = host1x_to_drm_client(client);
+       struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+       struct gr3d *gr3d = to_gr3d(drm);
+
+       gr3d->channel = host1x_channel_request(client->dev);
+       if (!gr3d->channel)
+               return -ENOMEM;
+
+       client->syncpts[0] = host1x_syncpt_request(client->dev, 0);
+       if (!client->syncpts[0]) {
+               host1x_channel_free(gr3d->channel);
+               return -ENOMEM;
+       }
+
+       return tegra_drm_register_client(tegra, drm);
+}
+
+static int gr3d_exit(struct host1x_client *client)
+{
+       struct tegra_drm_client *drm = host1x_to_drm_client(client);
+       struct tegra_drm *tegra = dev_get_drvdata(client->parent);
+       struct gr3d *gr3d = to_gr3d(drm);
+       int err;
+
+       err = tegra_drm_unregister_client(tegra, drm);
+       if (err < 0)
+               return err;
+
+       host1x_syncpt_free(client->syncpts[0]);
+       host1x_channel_free(gr3d->channel);
+
+       return 0;
+}
+
+static const struct host1x_client_ops gr3d_client_ops = {
+       .init = gr3d_init,
+       .exit = gr3d_exit,
+};
+
+static int gr3d_open_channel(struct tegra_drm_client *client,
+                            struct tegra_drm_context *context)
+{
+       struct gr3d *gr3d = to_gr3d(client);
+
+       context->channel = host1x_channel_get(gr3d->channel);
+       if (!context->channel)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void gr3d_close_channel(struct tegra_drm_context *context)
+{
+       host1x_channel_put(context->channel);
+}
+
+static int gr3d_is_addr_reg(struct device *dev, u32 class, u32 offset)
+{
+       struct gr3d *gr3d = dev_get_drvdata(dev);
+
+       switch (class) {
+       case HOST1X_CLASS_HOST1X:
+               if (offset == 0x2b)
+                       return 1;
+
+               break;
+
+       case HOST1X_CLASS_GR3D:
+               if (offset >= GR3D_NUM_REGS)
+                       break;
+
+               if (test_bit(offset, gr3d->addr_regs))
+                       return 1;
+
+               break;
+       }
+
+       return 0;
+}
+
+static const struct tegra_drm_client_ops gr3d_ops = {
+       .open_channel = gr3d_open_channel,
+       .close_channel = gr3d_close_channel,
+       .is_addr_reg = gr3d_is_addr_reg,
+       .submit = tegra_drm_submit,
+};
+
+static const struct of_device_id tegra_gr3d_match[] = {
+       { .compatible = "nvidia,tegra114-gr3d" },
+       { .compatible = "nvidia,tegra30-gr3d" },
+       { .compatible = "nvidia,tegra20-gr3d" },
+       { }
+};
+
+static const u32 gr3d_addr_regs[] = {
+       GR3D_IDX_ATTRIBUTE( 0),
+       GR3D_IDX_ATTRIBUTE( 1),
+       GR3D_IDX_ATTRIBUTE( 2),
+       GR3D_IDX_ATTRIBUTE( 3),
+       GR3D_IDX_ATTRIBUTE( 4),
+       GR3D_IDX_ATTRIBUTE( 5),
+       GR3D_IDX_ATTRIBUTE( 6),
+       GR3D_IDX_ATTRIBUTE( 7),
+       GR3D_IDX_ATTRIBUTE( 8),
+       GR3D_IDX_ATTRIBUTE( 9),
+       GR3D_IDX_ATTRIBUTE(10),
+       GR3D_IDX_ATTRIBUTE(11),
+       GR3D_IDX_ATTRIBUTE(12),
+       GR3D_IDX_ATTRIBUTE(13),
+       GR3D_IDX_ATTRIBUTE(14),
+       GR3D_IDX_ATTRIBUTE(15),
+       GR3D_IDX_INDEX_BASE,
+       GR3D_QR_ZTAG_ADDR,
+       GR3D_QR_CTAG_ADDR,
+       GR3D_QR_CZ_ADDR,
+       GR3D_TEX_TEX_ADDR( 0),
+       GR3D_TEX_TEX_ADDR( 1),
+       GR3D_TEX_TEX_ADDR( 2),
+       GR3D_TEX_TEX_ADDR( 3),
+       GR3D_TEX_TEX_ADDR( 4),
+       GR3D_TEX_TEX_ADDR( 5),
+       GR3D_TEX_TEX_ADDR( 6),
+       GR3D_TEX_TEX_ADDR( 7),
+       GR3D_TEX_TEX_ADDR( 8),
+       GR3D_TEX_TEX_ADDR( 9),
+       GR3D_TEX_TEX_ADDR(10),
+       GR3D_TEX_TEX_ADDR(11),
+       GR3D_TEX_TEX_ADDR(12),
+       GR3D_TEX_TEX_ADDR(13),
+       GR3D_TEX_TEX_ADDR(14),
+       GR3D_TEX_TEX_ADDR(15),
+       GR3D_DW_MEMORY_OUTPUT_ADDRESS,
+       GR3D_GLOBAL_SURFADDR( 0),
+       GR3D_GLOBAL_SURFADDR( 1),
+       GR3D_GLOBAL_SURFADDR( 2),
+       GR3D_GLOBAL_SURFADDR( 3),
+       GR3D_GLOBAL_SURFADDR( 4),
+       GR3D_GLOBAL_SURFADDR( 5),
+       GR3D_GLOBAL_SURFADDR( 6),
+       GR3D_GLOBAL_SURFADDR( 7),
+       GR3D_GLOBAL_SURFADDR( 8),
+       GR3D_GLOBAL_SURFADDR( 9),
+       GR3D_GLOBAL_SURFADDR(10),
+       GR3D_GLOBAL_SURFADDR(11),
+       GR3D_GLOBAL_SURFADDR(12),
+       GR3D_GLOBAL_SURFADDR(13),
+       GR3D_GLOBAL_SURFADDR(14),
+       GR3D_GLOBAL_SURFADDR(15),
+       GR3D_GLOBAL_SPILLSURFADDR,
+       GR3D_GLOBAL_SURFOVERADDR( 0),
+       GR3D_GLOBAL_SURFOVERADDR( 1),
+       GR3D_GLOBAL_SURFOVERADDR( 2),
+       GR3D_GLOBAL_SURFOVERADDR( 3),
+       GR3D_GLOBAL_SURFOVERADDR( 4),
+       GR3D_GLOBAL_SURFOVERADDR( 5),
+       GR3D_GLOBAL_SURFOVERADDR( 6),
+       GR3D_GLOBAL_SURFOVERADDR( 7),
+       GR3D_GLOBAL_SURFOVERADDR( 8),
+       GR3D_GLOBAL_SURFOVERADDR( 9),
+       GR3D_GLOBAL_SURFOVERADDR(10),
+       GR3D_GLOBAL_SURFOVERADDR(11),
+       GR3D_GLOBAL_SURFOVERADDR(12),
+       GR3D_GLOBAL_SURFOVERADDR(13),
+       GR3D_GLOBAL_SURFOVERADDR(14),
+       GR3D_GLOBAL_SURFOVERADDR(15),
+       GR3D_GLOBAL_SAMP01SURFADDR( 0),
+       GR3D_GLOBAL_SAMP01SURFADDR( 1),
+       GR3D_GLOBAL_SAMP01SURFADDR( 2),
+       GR3D_GLOBAL_SAMP01SURFADDR( 3),
+       GR3D_GLOBAL_SAMP01SURFADDR( 4),
+       GR3D_GLOBAL_SAMP01SURFADDR( 5),
+       GR3D_GLOBAL_SAMP01SURFADDR( 6),
+       GR3D_GLOBAL_SAMP01SURFADDR( 7),
+       GR3D_GLOBAL_SAMP01SURFADDR( 8),
+       GR3D_GLOBAL_SAMP01SURFADDR( 9),
+       GR3D_GLOBAL_SAMP01SURFADDR(10),
+       GR3D_GLOBAL_SAMP01SURFADDR(11),
+       GR3D_GLOBAL_SAMP01SURFADDR(12),
+       GR3D_GLOBAL_SAMP01SURFADDR(13),
+       GR3D_GLOBAL_SAMP01SURFADDR(14),
+       GR3D_GLOBAL_SAMP01SURFADDR(15),
+       GR3D_GLOBAL_SAMP23SURFADDR( 0),
+       GR3D_GLOBAL_SAMP23SURFADDR( 1),
+       GR3D_GLOBAL_SAMP23SURFADDR( 2),
+       GR3D_GLOBAL_SAMP23SURFADDR( 3),
+       GR3D_GLOBAL_SAMP23SURFADDR( 4),
+       GR3D_GLOBAL_SAMP23SURFADDR( 5),
+       GR3D_GLOBAL_SAMP23SURFADDR( 6),
+       GR3D_GLOBAL_SAMP23SURFADDR( 7),
+       GR3D_GLOBAL_SAMP23SURFADDR( 8),
+       GR3D_GLOBAL_SAMP23SURFADDR( 9),
+       GR3D_GLOBAL_SAMP23SURFADDR(10),
+       GR3D_GLOBAL_SAMP23SURFADDR(11),
+       GR3D_GLOBAL_SAMP23SURFADDR(12),
+       GR3D_GLOBAL_SAMP23SURFADDR(13),
+       GR3D_GLOBAL_SAMP23SURFADDR(14),
+       GR3D_GLOBAL_SAMP23SURFADDR(15),
+};
+
+static int gr3d_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct host1x_syncpt **syncpts;
+       struct gr3d *gr3d;
+       unsigned int i;
+       int err;
+
+       gr3d = devm_kzalloc(&pdev->dev, sizeof(*gr3d), GFP_KERNEL);
+       if (!gr3d)
+               return -ENOMEM;
+
+       syncpts = devm_kzalloc(&pdev->dev, sizeof(*syncpts), GFP_KERNEL);
+       if (!syncpts)
+               return -ENOMEM;
+
+       gr3d->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(gr3d->clk)) {
+               dev_err(&pdev->dev, "cannot get clock\n");
+               return PTR_ERR(gr3d->clk);
+       }
+
+       if (of_device_is_compatible(np, "nvidia,tegra30-gr3d")) {
+               gr3d->clk_secondary = devm_clk_get(&pdev->dev, "3d2");
+               if (IS_ERR(gr3d->clk)) {
+                       dev_err(&pdev->dev, "cannot get secondary clock\n");
+                       return PTR_ERR(gr3d->clk);
+               }
+       }
+
+       err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D, gr3d->clk);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to power up 3D unit\n");
+               return err;
+       }
+
+       if (gr3d->clk_secondary) {
+               err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_3D1,
+                                                       gr3d->clk_secondary);
+               if (err < 0) {
+                       dev_err(&pdev->dev,
+                               "failed to power up secondary 3D unit\n");
+                       return err;
+               }
+       }
+
+       INIT_LIST_HEAD(&gr3d->client.base.list);
+       gr3d->client.base.ops = &gr3d_client_ops;
+       gr3d->client.base.dev = &pdev->dev;
+       gr3d->client.base.class = HOST1X_CLASS_GR3D;
+       gr3d->client.base.syncpts = syncpts;
+       gr3d->client.base.num_syncpts = 1;
+
+       INIT_LIST_HEAD(&gr3d->client.list);
+       gr3d->client.ops = &gr3d_ops;
+
+       err = host1x_client_register(&gr3d->client.base);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to register host1x client: %d\n",
+                       err);
+               return err;
+       }
+
+       /* initialize address register map */
+       for (i = 0; i < ARRAY_SIZE(gr3d_addr_regs); i++)
+               set_bit(gr3d_addr_regs[i], gr3d->addr_regs);
+
+       platform_set_drvdata(pdev, gr3d);
+
+       return 0;
+}
+
+static int gr3d_remove(struct platform_device *pdev)
+{
+       struct gr3d *gr3d = platform_get_drvdata(pdev);
+       int err;
+
+       err = host1x_client_unregister(&gr3d->client.base);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
+                       err);
+               return err;
+       }
+
+       if (gr3d->clk_secondary) {
+               tegra_powergate_power_off(TEGRA_POWERGATE_3D1);
+               clk_disable_unprepare(gr3d->clk_secondary);
+       }
+
+       tegra_powergate_power_off(TEGRA_POWERGATE_3D);
+       clk_disable_unprepare(gr3d->clk);
+
+       return 0;
+}
+
+struct platform_driver tegra_gr3d_driver = {
+       .driver = {
+               .name = "tegra-gr3d",
+               .of_match_table = tegra_gr3d_match,
+       },
+       .probe = gr3d_probe,
+       .remove = gr3d_remove,
+};
diff --git a/drivers/gpu/drm/tegra/gr3d.h b/drivers/gpu/drm/tegra/gr3d.h
new file mode 100644 (file)
index 0000000..0c30a13
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef TEGRA_GR3D_H
+#define TEGRA_GR3D_H
+
+#define GR3D_IDX_ATTRIBUTE(x)          (0x100 + (x) * 2)
+#define GR3D_IDX_INDEX_BASE            0x121
+#define GR3D_QR_ZTAG_ADDR              0x415
+#define GR3D_QR_CTAG_ADDR              0x417
+#define GR3D_QR_CZ_ADDR                        0x419
+#define GR3D_TEX_TEX_ADDR(x)           (0x710 + (x))
+#define GR3D_DW_MEMORY_OUTPUT_ADDRESS  0x904
+#define GR3D_GLOBAL_SURFADDR(x)                (0xe00 + (x))
+#define GR3D_GLOBAL_SPILLSURFADDR      0xe2a
+#define GR3D_GLOBAL_SURFOVERADDR(x)    (0xe30 + (x))
+#define GR3D_GLOBAL_SAMP01SURFADDR(x)  (0xe50 + (x))
+#define GR3D_GLOBAL_SAMP23SURFADDR(x)  (0xe60 + (x))
+
+#define GR3D_NUM_REGS                  0xe88
+
+#endif
similarity index 83%
rename from drivers/gpu/host1x/drm/hdmi.c
rename to drivers/gpu/drm/tegra/hdmi.c
index 644d95c7d489997b9305f6eced5e62b638798a54..0cd9bc2056e8c6bfe12cd15c5722c6f18d087e78 100644 (file)
@@ -8,21 +8,33 @@
  */
 
 #include <linux/clk.h>
+#include <linux/clk/tegra.h>
 #include <linux/debugfs.h>
-#include <linux/gpio.h>
 #include <linux/hdmi.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
-#include <linux/clk/tegra.h>
-
-#include <drm/drm_edid.h>
 
 #include "hdmi.h"
 #include "drm.h"
 #include "dc.h"
-#include "host1x_client.h"
+
+struct tmds_config {
+       unsigned int pclk;
+       u32 pll0;
+       u32 pll1;
+       u32 pe_current;
+       u32 drive_current;
+       u32 peak_current;
+};
+
+struct tegra_hdmi_config {
+       const struct tmds_config *tmds;
+       unsigned int num_tmds;
+
+       unsigned long fuse_override_offset;
+       unsigned long fuse_override_value;
+
+       bool has_sor_io_peak_current;
+};
 
 struct tegra_hdmi {
        struct host1x_client client;
@@ -38,6 +50,8 @@ struct tegra_hdmi {
        struct clk *clk_parent;
        struct clk *clk;
 
+       const struct tegra_hdmi_config *config;
+
        unsigned int audio_source;
        unsigned int audio_freq;
        bool stereo;
@@ -143,15 +157,7 @@ static const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = {
        {         0,     0,      0,     0 },
 };
 
-struct tmds_config {
-       unsigned int pclk;
-       u32 pll0;
-       u32 pll1;
-       u32 pe_current;
-       u32 drive_current;
-};
-
-static const struct tmds_config tegra2_tmds_config[] = {
+static const struct tmds_config tegra20_tmds_config[] = {
        { /* slow pixel clock modes */
                .pclk = 27000000,
                .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
@@ -184,7 +190,7 @@ static const struct tmds_config tegra2_tmds_config[] = {
        },
 };
 
-static const struct tmds_config tegra3_tmds_config[] = {
+static const struct tmds_config tegra30_tmds_config[] = {
        { /* 480p modes */
                .pclk = 27000000,
                .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
@@ -230,6 +236,85 @@ static const struct tmds_config tegra3_tmds_config[] = {
        },
 };
 
+static const struct tmds_config tegra114_tmds_config[] = {
+       { /* 480p/576p / 25.2MHz/27MHz modes */
+               .pclk = 27000000,
+               .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+                       SOR_PLL_VCOCAP(0) | SOR_PLL_RESISTORSEL,
+               .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(0),
+               .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
+                       PE_CURRENT1(PE_CURRENT_0_mA_T114) |
+                       PE_CURRENT2(PE_CURRENT_0_mA_T114) |
+                       PE_CURRENT3(PE_CURRENT_0_mA_T114),
+               .drive_current =
+                       DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
+                       DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
+                       DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
+                       DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
+               .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+                       PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+                       PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+                       PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+       }, { /* 720p / 74.25MHz modes */
+               .pclk = 74250000,
+               .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+                       SOR_PLL_VCOCAP(1) | SOR_PLL_RESISTORSEL,
+               .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
+                       SOR_PLL_TMDS_TERMADJ(0),
+               .pe_current = PE_CURRENT0(PE_CURRENT_15_mA_T114) |
+                       PE_CURRENT1(PE_CURRENT_15_mA_T114) |
+                       PE_CURRENT2(PE_CURRENT_15_mA_T114) |
+                       PE_CURRENT3(PE_CURRENT_15_mA_T114),
+               .drive_current =
+                       DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_10_400_mA_T114) |
+                       DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_10_400_mA_T114) |
+                       DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_10_400_mA_T114) |
+                       DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_10_400_mA_T114),
+               .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+                       PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+                       PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+                       PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+       }, { /* 1080p / 148.5MHz modes */
+               .pclk = 148500000,
+               .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+                       SOR_PLL_VCOCAP(3) | SOR_PLL_RESISTORSEL,
+               .pll1 = SOR_PLL_PE_EN | SOR_PLL_LOADADJ(3) |
+                       SOR_PLL_TMDS_TERMADJ(0),
+               .pe_current = PE_CURRENT0(PE_CURRENT_10_mA_T114) |
+                       PE_CURRENT1(PE_CURRENT_10_mA_T114) |
+                       PE_CURRENT2(PE_CURRENT_10_mA_T114) |
+                       PE_CURRENT3(PE_CURRENT_10_mA_T114),
+               .drive_current =
+                       DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_12_400_mA_T114) |
+                       DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_12_400_mA_T114) |
+                       DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_12_400_mA_T114) |
+                       DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_12_400_mA_T114),
+               .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_0_000_mA) |
+                       PEAK_CURRENT_LANE1(PEAK_CURRENT_0_000_mA) |
+                       PEAK_CURRENT_LANE2(PEAK_CURRENT_0_000_mA) |
+                       PEAK_CURRENT_LANE3(PEAK_CURRENT_0_000_mA),
+       }, { /* 225/297MHz modes */
+               .pclk = UINT_MAX,
+               .pll0 = SOR_PLL_ICHPMP(1) | SOR_PLL_BG_V17_S(3) |
+                       SOR_PLL_VCOCAP(0xf) | SOR_PLL_RESISTORSEL,
+               .pll1 = SOR_PLL_LOADADJ(3) | SOR_PLL_TMDS_TERMADJ(7)
+                       | SOR_PLL_TMDS_TERM_ENABLE,
+               .pe_current = PE_CURRENT0(PE_CURRENT_0_mA_T114) |
+                       PE_CURRENT1(PE_CURRENT_0_mA_T114) |
+                       PE_CURRENT2(PE_CURRENT_0_mA_T114) |
+                       PE_CURRENT3(PE_CURRENT_0_mA_T114),
+               .drive_current =
+                       DRIVE_CURRENT_LANE0_T114(DRIVE_CURRENT_25_200_mA_T114) |
+                       DRIVE_CURRENT_LANE1_T114(DRIVE_CURRENT_25_200_mA_T114) |
+                       DRIVE_CURRENT_LANE2_T114(DRIVE_CURRENT_25_200_mA_T114) |
+                       DRIVE_CURRENT_LANE3_T114(DRIVE_CURRENT_19_200_mA_T114),
+               .peak_current = PEAK_CURRENT_LANE0(PEAK_CURRENT_3_000_mA) |
+                       PEAK_CURRENT_LANE1(PEAK_CURRENT_3_000_mA) |
+                       PEAK_CURRENT_LANE2(PEAK_CURRENT_3_000_mA) |
+                       PEAK_CURRENT_LANE3(PEAK_CURRENT_0_800_mA),
+       },
+};
+
 static const struct tegra_hdmi_audio_config *
 tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
 {
@@ -511,7 +596,7 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
 
        err = hdmi_audio_infoframe_init(&frame);
        if (err < 0) {
-               dev_err(hdmi->dev, "failed to initialize audio infoframe: %d\n",
+               dev_err(hdmi->dev, "failed to setup audio infoframe: %zd\n",
                        err);
                return;
        }
@@ -531,7 +616,7 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
         * contain 7 bytes. Including the 3 byte header only the first 10
         * bytes can be programmed.
         */
-       tegra_hdmi_write_infopack(hdmi, buffer, min(10, err));
+       tegra_hdmi_write_infopack(hdmi, buffer, min_t(size_t, 10, err));
 
        tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
                          HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
@@ -577,8 +662,28 @@ static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi,
        tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1);
        tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT);
 
-       value = tmds->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE;
-       tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+       tegra_hdmi_writel(hdmi, tmds->drive_current,
+                         HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
+
+       value = tegra_hdmi_readl(hdmi, hdmi->config->fuse_override_offset);
+       value |= hdmi->config->fuse_override_value;
+       tegra_hdmi_writel(hdmi, value, hdmi->config->fuse_override_offset);
+
+       if (hdmi->config->has_sor_io_peak_current)
+               tegra_hdmi_writel(hdmi, tmds->peak_current,
+                                 HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
+}
+
+static bool tegra_output_is_hdmi(struct tegra_output *output)
+{
+       struct edid *edid;
+
+       if (!output->connector.edid_blob_ptr)
+               return false;
+
+       edid = (struct edid *)output->connector.edid_blob_ptr->data;
+
+       return drm_detect_hdmi_monitor(edid);
 }
 
 static int tegra_output_hdmi_enable(struct tegra_output *output)
@@ -589,23 +694,17 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
        struct tegra_hdmi *hdmi = to_hdmi(output);
        struct device_node *node = hdmi->dev->of_node;
        unsigned int pulse_start, div82, pclk;
-       const struct tmds_config *tmds;
-       unsigned int num_tmds;
        unsigned long value;
        int retries = 1000;
        int err;
 
+       hdmi->dvi = !tegra_output_is_hdmi(output);
+
        pclk = mode->clock * 1000;
        h_sync_width = mode->hsync_end - mode->hsync_start;
        h_back_porch = mode->htotal - mode->hsync_end;
        h_front_porch = mode->hsync_start - mode->hdisplay;
 
-       err = regulator_enable(hdmi->vdd);
-       if (err < 0) {
-               dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
-               return err;
-       }
-
        err = regulator_enable(hdmi->pll);
        if (err < 0) {
                dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
@@ -710,17 +809,9 @@ static int tegra_output_hdmi_enable(struct tegra_output *output)
        tegra_hdmi_setup_stereo_infoframe(hdmi);
 
        /* TMDS CONFIG */
-       if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
-               num_tmds = ARRAY_SIZE(tegra3_tmds_config);
-               tmds = tegra3_tmds_config;
-       } else {
-               num_tmds = ARRAY_SIZE(tegra2_tmds_config);
-               tmds = tegra2_tmds_config;
-       }
-
-       for (i = 0; i < num_tmds; i++) {
-               if (pclk <= tmds[i].pclk) {
-                       tegra_hdmi_setup_tmds(hdmi, &tmds[i]);
+       for (i = 0; i < hdmi->config->num_tmds; i++) {
+               if (pclk <= hdmi->config->tmds[i].pclk) {
+                       tegra_hdmi_setup_tmds(hdmi, &hdmi->config->tmds[i]);
                        break;
                }
        }
@@ -824,7 +915,6 @@ static int tegra_output_hdmi_disable(struct tegra_output *output)
        tegra_periph_reset_assert(hdmi->clk);
        clk_disable(hdmi->clk);
        regulator_disable(hdmi->pll);
-       regulator_disable(hdmi->vdd);
 
        return 0;
 }
@@ -1055,6 +1145,7 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
        DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
        DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
        DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
+       DUMP_REG(HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
 
 #undef DUMP_REG
 
@@ -1122,24 +1213,31 @@ static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi)
        return 0;
 }
 
-static int tegra_hdmi_drm_init(struct host1x_client *client,
-                              struct drm_device *drm)
+static int tegra_hdmi_init(struct host1x_client *client)
 {
+       struct tegra_drm *tegra = dev_get_drvdata(client->parent);
        struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
        int err;
 
+       err = regulator_enable(hdmi->vdd);
+       if (err < 0) {
+               dev_err(client->dev, "failed to enable VDD regulator: %d\n",
+                       err);
+               return err;
+       }
+
        hdmi->output.type = TEGRA_OUTPUT_HDMI;
        hdmi->output.dev = client->dev;
        hdmi->output.ops = &hdmi_ops;
 
-       err = tegra_output_init(drm, &hdmi->output);
+       err = tegra_output_init(tegra->drm, &hdmi->output);
        if (err < 0) {
                dev_err(client->dev, "output setup failed: %d\n", err);
                return err;
        }
 
        if (IS_ENABLED(CONFIG_DEBUG_FS)) {
-               err = tegra_hdmi_debugfs_init(hdmi, drm->primary);
+               err = tegra_hdmi_debugfs_init(hdmi, tegra->drm->primary);
                if (err < 0)
                        dev_err(client->dev, "debugfs setup failed: %d\n", err);
        }
@@ -1147,7 +1245,7 @@ static int tegra_hdmi_drm_init(struct host1x_client *client,
        return 0;
 }
 
-static int tegra_hdmi_drm_exit(struct host1x_client *client)
+static int tegra_hdmi_exit(struct host1x_client *client)
 {
        struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
        int err;
@@ -1171,25 +1269,63 @@ static int tegra_hdmi_drm_exit(struct host1x_client *client)
                return err;
        }
 
+       regulator_disable(hdmi->vdd);
+
        return 0;
 }
 
 static const struct host1x_client_ops hdmi_client_ops = {
-       .drm_init = tegra_hdmi_drm_init,
-       .drm_exit = tegra_hdmi_drm_exit,
+       .init = tegra_hdmi_init,
+       .exit = tegra_hdmi_exit,
+};
+
+static const struct tegra_hdmi_config tegra20_hdmi_config = {
+       .tmds = tegra20_tmds_config,
+       .num_tmds = ARRAY_SIZE(tegra20_tmds_config),
+       .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
+       .fuse_override_value = 1 << 31,
+       .has_sor_io_peak_current = false,
+};
+
+static const struct tegra_hdmi_config tegra30_hdmi_config = {
+       .tmds = tegra30_tmds_config,
+       .num_tmds = ARRAY_SIZE(tegra30_tmds_config),
+       .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
+       .fuse_override_value = 1 << 31,
+       .has_sor_io_peak_current = false,
+};
+
+static const struct tegra_hdmi_config tegra114_hdmi_config = {
+       .tmds = tegra114_tmds_config,
+       .num_tmds = ARRAY_SIZE(tegra114_tmds_config),
+       .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
+       .fuse_override_value = 1 << 31,
+       .has_sor_io_peak_current = true,
+};
+
+static const struct of_device_id tegra_hdmi_of_match[] = {
+       { .compatible = "nvidia,tegra114-hdmi", .data = &tegra114_hdmi_config },
+       { .compatible = "nvidia,tegra30-hdmi", .data = &tegra30_hdmi_config },
+       { .compatible = "nvidia,tegra20-hdmi", .data = &tegra20_hdmi_config },
+       { },
 };
 
 static int tegra_hdmi_probe(struct platform_device *pdev)
 {
-       struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
+       const struct of_device_id *match;
        struct tegra_hdmi *hdmi;
        struct resource *regs;
        int err;
 
+       match = of_match_node(tegra_hdmi_of_match, pdev->dev.of_node);
+       if (!match)
+               return -ENODEV;
+
        hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
        if (!hdmi)
                return -ENOMEM;
 
+       hdmi->config = match->data;
        hdmi->dev = &pdev->dev;
        hdmi->audio_source = AUTO;
        hdmi->audio_freq = 44100;
@@ -1234,7 +1370,7 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
 
        hdmi->output.dev = &pdev->dev;
 
-       err = tegra_output_parse_dt(&hdmi->output);
+       err = tegra_output_probe(&hdmi->output);
        if (err < 0)
                return err;
 
@@ -1252,11 +1388,11 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
 
        hdmi->irq = err;
 
-       hdmi->client.ops = &hdmi_client_ops;
        INIT_LIST_HEAD(&hdmi->client.list);
+       hdmi->client.ops = &hdmi_client_ops;
        hdmi->client.dev = &pdev->dev;
 
-       err = host1x_register_client(host1x, &hdmi->client);
+       err = host1x_client_register(&hdmi->client);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to register host1x client: %d\n",
                        err);
@@ -1270,29 +1406,28 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
 
 static int tegra_hdmi_remove(struct platform_device *pdev)
 {
-       struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
        struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
        int err;
 
-       err = host1x_unregister_client(host1x, &hdmi->client);
+       err = host1x_client_unregister(&hdmi->client);
        if (err < 0) {
                dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
                        err);
                return err;
        }
 
+       err = tegra_output_remove(&hdmi->output);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to remove output: %d\n", err);
+               return err;
+       }
+
        clk_unprepare(hdmi->clk_parent);
        clk_unprepare(hdmi->clk);
 
        return 0;
 }
 
-static struct of_device_id tegra_hdmi_of_match[] = {
-       { .compatible = "nvidia,tegra30-hdmi", },
-       { .compatible = "nvidia,tegra20-hdmi", },
-       { },
-};
-
 struct platform_driver tegra_hdmi_driver = {
        .driver = {
                .name = "tegra-hdmi",
similarity index 72%
rename from drivers/gpu/host1x/drm/hdmi.h
rename to drivers/gpu/drm/tegra/hdmi.h
index 52ac36e08ccbef5ec14df106abeabcdd12efe6f2..0aebc485f7fa36be983a609f693bfbb06dcb0214 100644 (file)
 #define DRIVE_CURRENT_LANE1(x)      (((x) & 0x3f) <<  8)
 #define DRIVE_CURRENT_LANE2(x)      (((x) & 0x3f) << 16)
 #define DRIVE_CURRENT_LANE3(x)      (((x) & 0x3f) << 24)
-#define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31)
+#define DRIVE_CURRENT_LANE0_T114(x) (((x) & 0x7f) <<  0)
+#define DRIVE_CURRENT_LANE1_T114(x) (((x) & 0x7f) <<  8)
+#define DRIVE_CURRENT_LANE2_T114(x) (((x) & 0x7f) << 16)
+#define DRIVE_CURRENT_LANE3_T114(x) (((x) & 0x7f) << 24)
 
 #define DRIVE_CURRENT_1_500_mA  0x00
 #define DRIVE_CURRENT_1_875_mA  0x01
 #define DRIVE_CURRENT_24_375_mA 0x3d
 #define DRIVE_CURRENT_24_750_mA 0x3e
 
+#define DRIVE_CURRENT_0_000_mA_T114 0x00
+#define DRIVE_CURRENT_0_400_mA_T114 0x01
+#define DRIVE_CURRENT_0_800_mA_T114 0x02
+#define DRIVE_CURRENT_1_200_mA_T114 0x03
+#define DRIVE_CURRENT_1_600_mA_T114 0x04
+#define DRIVE_CURRENT_2_000_mA_T114 0x05
+#define DRIVE_CURRENT_2_400_mA_T114 0x06
+#define DRIVE_CURRENT_2_800_mA_T114 0x07
+#define DRIVE_CURRENT_3_200_mA_T114 0x08
+#define DRIVE_CURRENT_3_600_mA_T114 0x09
+#define DRIVE_CURRENT_4_000_mA_T114 0x0a
+#define DRIVE_CURRENT_4_400_mA_T114 0x0b
+#define DRIVE_CURRENT_4_800_mA_T114 0x0c
+#define DRIVE_CURRENT_5_200_mA_T114 0x0d
+#define DRIVE_CURRENT_5_600_mA_T114 0x0e
+#define DRIVE_CURRENT_6_000_mA_T114 0x0f
+#define DRIVE_CURRENT_6_400_mA_T114 0x10
+#define DRIVE_CURRENT_6_800_mA_T114 0x11
+#define DRIVE_CURRENT_7_200_mA_T114 0x12
+#define DRIVE_CURRENT_7_600_mA_T114 0x13
+#define DRIVE_CURRENT_8_000_mA_T114 0x14
+#define DRIVE_CURRENT_8_400_mA_T114 0x15
+#define DRIVE_CURRENT_8_800_mA_T114 0x16
+#define DRIVE_CURRENT_9_200_mA_T114 0x17
+#define DRIVE_CURRENT_9_600_mA_T114 0x18
+#define DRIVE_CURRENT_10_000_mA_T114 0x19
+#define DRIVE_CURRENT_10_400_mA_T114 0x1a
+#define DRIVE_CURRENT_10_800_mA_T114 0x1b
+#define DRIVE_CURRENT_11_200_mA_T114 0x1c
+#define DRIVE_CURRENT_11_600_mA_T114 0x1d
+#define DRIVE_CURRENT_12_000_mA_T114 0x1e
+#define DRIVE_CURRENT_12_400_mA_T114 0x1f
+#define DRIVE_CURRENT_12_800_mA_T114 0x20
+#define DRIVE_CURRENT_13_200_mA_T114 0x21
+#define DRIVE_CURRENT_13_600_mA_T114 0x22
+#define DRIVE_CURRENT_14_000_mA_T114 0x23
+#define DRIVE_CURRENT_14_400_mA_T114 0x24
+#define DRIVE_CURRENT_14_800_mA_T114 0x25
+#define DRIVE_CURRENT_15_200_mA_T114 0x26
+#define DRIVE_CURRENT_15_600_mA_T114 0x27
+#define DRIVE_CURRENT_16_000_mA_T114 0x28
+#define DRIVE_CURRENT_16_400_mA_T114 0x29
+#define DRIVE_CURRENT_16_800_mA_T114 0x2a
+#define DRIVE_CURRENT_17_200_mA_T114 0x2b
+#define DRIVE_CURRENT_17_600_mA_T114 0x2c
+#define DRIVE_CURRENT_18_000_mA_T114 0x2d
+#define DRIVE_CURRENT_18_400_mA_T114 0x2e
+#define DRIVE_CURRENT_18_800_mA_T114 0x2f
+#define DRIVE_CURRENT_19_200_mA_T114 0x30
+#define DRIVE_CURRENT_19_600_mA_T114 0x31
+#define DRIVE_CURRENT_20_000_mA_T114 0x32
+#define DRIVE_CURRENT_20_400_mA_T114 0x33
+#define DRIVE_CURRENT_20_800_mA_T114 0x34
+#define DRIVE_CURRENT_21_200_mA_T114 0x35
+#define DRIVE_CURRENT_21_600_mA_T114 0x36
+#define DRIVE_CURRENT_22_000_mA_T114 0x37
+#define DRIVE_CURRENT_22_400_mA_T114 0x38
+#define DRIVE_CURRENT_22_800_mA_T114 0x39
+#define DRIVE_CURRENT_23_200_mA_T114 0x3a
+#define DRIVE_CURRENT_23_600_mA_T114 0x3b
+#define DRIVE_CURRENT_24_000_mA_T114 0x3c
+#define DRIVE_CURRENT_24_400_mA_T114 0x3d
+#define DRIVE_CURRENT_24_800_mA_T114 0x3e
+#define DRIVE_CURRENT_25_200_mA_T114 0x3f
+#define DRIVE_CURRENT_25_400_mA_T114 0x40
+#define DRIVE_CURRENT_25_800_mA_T114 0x41
+#define DRIVE_CURRENT_26_200_mA_T114 0x42
+#define DRIVE_CURRENT_26_600_mA_T114 0x43
+#define DRIVE_CURRENT_27_000_mA_T114 0x44
+#define DRIVE_CURRENT_27_400_mA_T114 0x45
+#define DRIVE_CURRENT_27_800_mA_T114 0x46
+#define DRIVE_CURRENT_28_200_mA_T114 0x47
+
 #define HDMI_NV_PDISP_AUDIO_DEBUG0                             0x7f
 #define HDMI_NV_PDISP_AUDIO_DEBUG1                             0x80
 #define HDMI_NV_PDISP_AUDIO_DEBUG2                             0x81
 #define PE_CURRENT_7_0_mA 0xe
 #define PE_CURRENT_7_5_mA 0xf
 
+#define PE_CURRENT_0_mA_T114 0x0
+#define PE_CURRENT_1_mA_T114 0x1
+#define PE_CURRENT_2_mA_T114 0x2
+#define PE_CURRENT_3_mA_T114 0x3
+#define PE_CURRENT_4_mA_T114 0x4
+#define PE_CURRENT_5_mA_T114 0x5
+#define PE_CURRENT_6_mA_T114 0x6
+#define PE_CURRENT_7_mA_T114 0x7
+#define PE_CURRENT_8_mA_T114 0x8
+#define PE_CURRENT_9_mA_T114 0x9
+#define PE_CURRENT_10_mA_T114 0xa
+#define PE_CURRENT_11_mA_T114 0xb
+#define PE_CURRENT_12_mA_T114 0xc
+#define PE_CURRENT_13_mA_T114 0xd
+#define PE_CURRENT_14_mA_T114 0xe
+#define PE_CURRENT_15_mA_T114 0xf
+
 #define HDMI_NV_PDISP_KEY_CTRL                                 0x9a
 #define HDMI_NV_PDISP_KEY_DEBUG0                               0x9b
 #define HDMI_NV_PDISP_KEY_DEBUG1                               0x9c
 #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920    0xc5
 #define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
 
+#define HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT              0xd1
+#define PEAK_CURRENT_LANE0(x) (((x) & 0x7f) <<  0)
+#define PEAK_CURRENT_LANE1(x) (((x) & 0x7f) <<  8)
+#define PEAK_CURRENT_LANE2(x) (((x) & 0x7f) << 16)
+#define PEAK_CURRENT_LANE3(x) (((x) & 0x7f) << 24)
+
+#define PEAK_CURRENT_0_000_mA 0x00
+#define PEAK_CURRENT_0_200_mA 0x01
+#define PEAK_CURRENT_0_400_mA 0x02
+#define PEAK_CURRENT_0_600_mA 0x03
+#define PEAK_CURRENT_0_800_mA 0x04
+#define PEAK_CURRENT_1_000_mA 0x05
+#define PEAK_CURRENT_1_200_mA 0x06
+#define PEAK_CURRENT_1_400_mA 0x07
+#define PEAK_CURRENT_1_600_mA 0x08
+#define PEAK_CURRENT_1_800_mA 0x09
+#define PEAK_CURRENT_2_000_mA 0x0a
+#define PEAK_CURRENT_2_200_mA 0x0b
+#define PEAK_CURRENT_2_400_mA 0x0c
+#define PEAK_CURRENT_2_600_mA 0x0d
+#define PEAK_CURRENT_2_800_mA 0x0e
+#define PEAK_CURRENT_3_000_mA 0x0f
+#define PEAK_CURRENT_3_200_mA 0x10
+#define PEAK_CURRENT_3_400_mA 0x11
+#define PEAK_CURRENT_3_600_mA 0x12
+#define PEAK_CURRENT_3_800_mA 0x13
+#define PEAK_CURRENT_4_000_mA 0x14
+#define PEAK_CURRENT_4_200_mA 0x15
+#define PEAK_CURRENT_4_400_mA 0x16
+#define PEAK_CURRENT_4_600_mA 0x17
+#define PEAK_CURRENT_4_800_mA 0x18
+#define PEAK_CURRENT_5_000_mA 0x19
+#define PEAK_CURRENT_5_200_mA 0x1a
+#define PEAK_CURRENT_5_400_mA 0x1b
+#define PEAK_CURRENT_5_600_mA 0x1c
+#define PEAK_CURRENT_5_800_mA 0x1d
+#define PEAK_CURRENT_6_000_mA 0x1e
+#define PEAK_CURRENT_6_200_mA 0x1f
+#define PEAK_CURRENT_6_400_mA 0x20
+#define PEAK_CURRENT_6_600_mA 0x21
+#define PEAK_CURRENT_6_800_mA 0x22
+#define PEAK_CURRENT_7_000_mA 0x23
+#define PEAK_CURRENT_7_200_mA 0x24
+#define PEAK_CURRENT_7_400_mA 0x25
+#define PEAK_CURRENT_7_600_mA 0x26
+#define PEAK_CURRENT_7_800_mA 0x27
+#define PEAK_CURRENT_8_000_mA 0x28
+#define PEAK_CURRENT_8_200_mA 0x29
+#define PEAK_CURRENT_8_400_mA 0x2a
+#define PEAK_CURRENT_8_600_mA 0x2b
+#define PEAK_CURRENT_8_800_mA 0x2c
+#define PEAK_CURRENT_9_000_mA 0x2d
+#define PEAK_CURRENT_9_200_mA 0x2e
+#define PEAK_CURRENT_9_400_mA 0x2f
+
+#define HDMI_NV_PDISP_SOR_PAD_CTLS0            0xd2
+
 #endif /* TEGRA_HDMI_H */
similarity index 81%
rename from drivers/gpu/host1x/drm/output.c
rename to drivers/gpu/drm/tegra/output.c
index 137ae81ab80eb164d139742b16996bca8b8922b5..b14f817214712e7db50eaf6442fa5d76720b85c9 100644 (file)
@@ -7,10 +7,9 @@
  * published by the Free Software Foundation.
  */
 
-#include <linux/module.h>
 #include <linux/of_gpio.h>
-#include <linux/i2c.h>
 
+#include <drm/drm_panel.h>
 #include "drm.h"
 
 static int tegra_connector_get_modes(struct drm_connector *connector)
@@ -19,6 +18,9 @@ static int tegra_connector_get_modes(struct drm_connector *connector)
        struct edid *edid = NULL;
        int err = 0;
 
+       if (output->panel)
+               return output->panel->funcs->get_modes(output->panel);
+
        if (output->edid)
                edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL);
        else if (output->ddc)
@@ -74,6 +76,9 @@ tegra_connector_detect(struct drm_connector *connector, bool force)
                else
                        status = connector_status_connected;
        } else {
+               if (output->panel)
+                       status = connector_status_connected;
+
                if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
                        status = connector_status_connected;
        }
@@ -81,10 +86,16 @@ tegra_connector_detect(struct drm_connector *connector, bool force)
        return status;
 }
 
+static void drm_connector_clear(struct drm_connector *connector)
+{
+       memset(connector, 0, sizeof(*connector));
+}
+
 static void tegra_connector_destroy(struct drm_connector *connector)
 {
        drm_sysfs_connector_remove(connector);
        drm_connector_cleanup(connector);
+       drm_connector_clear(connector);
 }
 
 static const struct drm_connector_funcs connector_funcs = {
@@ -94,9 +105,15 @@ static const struct drm_connector_funcs connector_funcs = {
        .destroy = tegra_connector_destroy,
 };
 
+static void drm_encoder_clear(struct drm_encoder *encoder)
+{
+       memset(encoder, 0, sizeof(*encoder));
+}
+
 static void tegra_encoder_destroy(struct drm_encoder *encoder)
 {
        drm_encoder_cleanup(encoder);
+       drm_encoder_clear(encoder);
 }
 
 static const struct drm_encoder_funcs encoder_funcs = {
@@ -105,6 +122,15 @@ static const struct drm_encoder_funcs encoder_funcs = {
 
 static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode)
 {
+       struct tegra_output *output = encoder_to_output(encoder);
+       struct drm_panel *panel = output->panel;
+
+       if (panel && panel->funcs) {
+               if (mode != DRM_MODE_DPMS_ON)
+                       drm_panel_disable(panel);
+               else
+                       drm_panel_enable(panel);
+       }
 }
 
 static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder,
@@ -151,16 +177,25 @@ static irqreturn_t hpd_irq(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-int tegra_output_parse_dt(struct tegra_output *output)
+int tegra_output_probe(struct tegra_output *output)
 {
+       struct device_node *ddc, *panel;
        enum of_gpio_flags flags;
-       struct device_node *ddc;
        size_t size;
        int err;
 
        if (!output->of_node)
                output->of_node = output->dev->of_node;
 
+       panel = of_parse_phandle(output->of_node, "nvidia,panel", 0);
+       if (panel) {
+               output->panel = of_drm_find_panel(panel);
+               if (!output->panel)
+                       return -EPROBE_DEFER;
+
+               of_node_put(panel);
+       }
+
        output->edid = of_get_property(output->of_node, "nvidia,edid", &size);
 
        ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0);
@@ -175,20 +210,12 @@ int tegra_output_parse_dt(struct tegra_output *output)
                of_node_put(ddc);
        }
 
-       if (!output->edid && !output->ddc)
+       if (!output->panel && !output->edid && !output->ddc)
                return -ENODEV;
 
        output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
                                                   "nvidia,hpd-gpio", 0,
                                                   &flags);
-
-       return 0;
-}
-
-int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
-{
-       int connector, encoder, err;
-
        if (gpio_is_valid(output->hpd_gpio)) {
                unsigned long flags;
 
@@ -202,7 +229,8 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
                err = gpio_to_irq(output->hpd_gpio);
                if (err < 0) {
                        dev_err(output->dev, "gpio_to_irq(): %d\n", err);
-                       goto free_hpd;
+                       gpio_free(output->hpd_gpio);
+                       return err;
                }
 
                output->hpd_irq = err;
@@ -215,12 +243,33 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
                if (err < 0) {
                        dev_err(output->dev, "failed to request IRQ#%u: %d\n",
                                output->hpd_irq, err);
-                       goto free_hpd;
+                       gpio_free(output->hpd_gpio);
+                       return err;
                }
 
                output->connector.polled = DRM_CONNECTOR_POLL_HPD;
        }
 
+       return 0;
+}
+
+int tegra_output_remove(struct tegra_output *output)
+{
+       if (gpio_is_valid(output->hpd_gpio)) {
+               free_irq(output->hpd_irq, output);
+               gpio_free(output->hpd_gpio);
+       }
+
+       if (output->ddc)
+               put_device(&output->ddc->dev);
+
+       return 0;
+}
+
+int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
+{
+       int connector, encoder;
+
        switch (output->type) {
        case TEGRA_OUTPUT_RGB:
                connector = DRM_MODE_CONNECTOR_LVDS;
@@ -232,6 +281,11 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
                encoder = DRM_MODE_ENCODER_TMDS;
                break;
 
+       case TEGRA_OUTPUT_DSI:
+               connector = DRM_MODE_CONNECTOR_DSI;
+               encoder = DRM_MODE_ENCODER_DSI;
+               break;
+
        default:
                connector = DRM_MODE_CONNECTOR_Unknown;
                encoder = DRM_MODE_ENCODER_NONE;
@@ -241,6 +295,10 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
        drm_connector_init(drm, &output->connector, &connector_funcs,
                           connector);
        drm_connector_helper_add(&output->connector, &connector_helper_funcs);
+       output->connector.dpms = DRM_MODE_DPMS_OFF;
+
+       if (output->panel)
+               drm_panel_attach(output->panel, &output->connector);
 
        drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder);
        drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
@@ -251,22 +309,9 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
        output->encoder.possible_crtcs = 0x3;
 
        return 0;
-
-free_hpd:
-       gpio_free(output->hpd_gpio);
-
-       return err;
 }
 
 int tegra_output_exit(struct tegra_output *output)
 {
-       if (gpio_is_valid(output->hpd_gpio)) {
-               free_irq(output->hpd_irq, output);
-               gpio_free(output->hpd_gpio);
-       }
-
-       if (output->ddc)
-               put_device(&output->ddc->dev);
-
        return 0;
 }
similarity index 96%
rename from drivers/gpu/host1x/drm/rgb.c
rename to drivers/gpu/drm/tegra/rgb.c
index 5aa66ef7a946f8b1c25a4241cf39f76dad9e91b7..ba47ca4fb880cc239a9b5c454b3b6c5b5adcddc8 100644 (file)
@@ -8,9 +8,6 @@
  */
 
 #include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
 
 #include "drm.h"
 #include "dc.h"
@@ -150,7 +147,7 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
        rgb->output.dev = dc->dev;
        rgb->output.of_node = np;
 
-       err = tegra_output_parse_dt(&rgb->output);
+       err = tegra_output_probe(&rgb->output);
        if (err < 0)
                return err;
 
@@ -177,6 +174,20 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
        return 0;
 }
 
+int tegra_dc_rgb_remove(struct tegra_dc *dc)
+{
+       int err;
+
+       if (!dc->rgb)
+               return 0;
+
+       err = tegra_output_remove(dc->rgb);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
 int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
 {
        struct tegra_rgb *rgb = to_rgb(dc->rgb);
index 7a4d10106906e92e91e75763e0d96b07557bfbff..7c3ef79fcb3736a80636c12fe49de1f8c8b3a735 100644 (file)
@@ -2,6 +2,7 @@ config DRM_TILCDC
        tristate "DRM Support for TI LCDC Display Controller"
        depends on DRM && OF && ARM
        select DRM_KMS_HELPER
+       select DRM_KMS_FB_HELPER
        select DRM_KMS_CMA_HELPER
        select DRM_GEM_CMA_HELPER
        select VIDEOMODE_HELPERS
index 6222af19f4566591f81f9e319b006c59b13a1f5b..f02528686cd524745e9ad058f12b7b8cc56a7620 100644 (file)
@@ -8,6 +8,7 @@ config DRM_UDL
        select FB_SYS_IMAGEBLIT
        select FB_DEFERRED_IO
        select DRM_KMS_HELPER
+        select DRM_KMS_FB_HELPER
        help
          This is a KMS driver for the USB displaylink video adapters.
           Say M/Y to add support for these devices via drm/kms interfaces.
index 7650dc0d78cec2391755a04235cfe3bfc6c7a8d1..3ddd6cd98ac12d0d8cbf9124556f6688b677c9f8 100644 (file)
@@ -77,7 +77,6 @@ static struct drm_driver driver = {
        .unload = udl_driver_unload,
 
        /* gem hooks */
-       .gem_init_object = udl_gem_init_object,
        .gem_free_object = udl_gem_free_object,
        .gem_vm_ops = &udl_gem_vm_ops,
 
index 56aec9409fa317c37054705e0a54c724cb126068..1fbf7b357f16ba1c9b6a8baea1fd54292398e041 100644 (file)
@@ -115,7 +115,6 @@ int udl_dumb_create(struct drm_file *file_priv,
 int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
                 uint32_t handle, uint64_t *offset);
 
-int udl_gem_init_object(struct drm_gem_object *obj);
 void udl_gem_free_object(struct drm_gem_object *gem_obj);
 struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
                                            size_t size);
index 8bf646183bac837a24f335bb46237c5c4a5d5e43..24ffbe990736e3e0751609e78d113d08c952b69f 100644 (file)
@@ -107,13 +107,6 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        }
 }
 
-int udl_gem_init_object(struct drm_gem_object *obj)
-{
-       BUG();
-
-       return 0;
-}
-
 static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
 {
        struct page **pages;
index 7e3ad87c366c6761e41b38c4fe3226f38925d9e5..9278891054834992ba30c5b9ac2a4f5b09403438 100644 (file)
@@ -79,7 +79,7 @@ int via_final_context(struct drm_device *dev, int context)
 
        /* Linux specific until context tracking code gets ported to BSD */
        /* Last context, perform cleanup */
-       if (dev->ctx_count == 1 && dev->dev_private) {
+       if (list_is_singular(&dev->ctxlist) && dev->dev_private) {
                DRM_DEBUG("Last Context\n");
                drm_irq_uninstall(dev);
                via_cleanup_futex(dev_priv);
index 1a90f0a2f7e5aa7b994558b18a3cffd2d1254ee8..0508f93b9795fbc2b4ccf9c8f34ea62383a7e138 100644 (file)
@@ -740,9 +740,17 @@ static void vmw_postclose(struct drm_device *dev,
        struct vmw_fpriv *vmw_fp;
 
        vmw_fp = vmw_fpriv(file_priv);
-       ttm_object_file_release(&vmw_fp->tfile);
-       if (vmw_fp->locked_master)
+
+       if (vmw_fp->locked_master) {
+               struct vmw_master *vmaster =
+                       vmw_master(vmw_fp->locked_master);
+
+               ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
+               ttm_vt_unlock(&vmaster->lock);
                drm_master_put(&vmw_fp->locked_master);
+       }
+
+       ttm_object_file_release(&vmw_fp->tfile);
        kfree(vmw_fp);
 }
 
@@ -925,14 +933,13 @@ static void vmw_master_drop(struct drm_device *dev,
 
        vmw_fp->locked_master = drm_master_get(file_priv->master);
        ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
-       vmw_execbuf_release_pinned_bo(dev_priv);
-
        if (unlikely((ret != 0))) {
                DRM_ERROR("Unable to lock TTM at VT switch.\n");
                drm_master_put(&vmw_fp->locked_master);
        }
 
-       ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
+       ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
+       vmw_execbuf_release_pinned_bo(dev_priv);
 
        if (!dev_priv->enable_fb) {
                ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
index 0e67cf41065d801e6526d23dc5cdf375bdabe1c7..37fb4befec82634ccf3a504428af8ed60f4a0e56 100644 (file)
@@ -970,7 +970,7 @@ void vmw_resource_unreserve(struct vmw_resource *res,
        if (new_backup)
                res->backup_offset = new_backup_offset;
 
-       if (!res->func->may_evict)
+       if (!res->func->may_evict || res->id == -1)
                return;
 
        write_lock(&dev_priv->resource_lock);
index ccfd42b236060ffa17329d95a114029ef1faa89c..7d6bed2225422fa2413130a606d2b20fd084cfd4 100644 (file)
@@ -19,6 +19,4 @@ config TEGRA_HOST1X_FIREWALL
 
          If unsure, choose Y.
 
-source "drivers/gpu/host1x/drm/Kconfig"
-
 endif
index 3b037b6e0298454edbc44c30db4f818f3ca8cafc..de305c2d510ea0bab597891bb8c82bca9890301b 100644 (file)
@@ -1,6 +1,5 @@
-ccflags-y = -Idrivers/gpu/host1x
-
 host1x-y = \
+       bus.o \
        syncpt.o \
        dev.o \
        intr.o \
@@ -8,13 +7,8 @@ host1x-y = \
        channel.o \
        job.o \
        debug.o \
-       hw/host1x01.o
-
-ccflags-y += -Iinclude/drm
-ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
+       mipi.o \
+       hw/host1x01.o \
+       hw/host1x02.o
 
-host1x-$(CONFIG_DRM_TEGRA) += drm/drm.o drm/fb.o drm/dc.o
-host1x-$(CONFIG_DRM_TEGRA) += drm/output.o drm/rgb.o drm/hdmi.o
-host1x-$(CONFIG_DRM_TEGRA) += drm/gem.o
-host1x-$(CONFIG_DRM_TEGRA) += drm/gr2d.o
 obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
new file mode 100644 (file)
index 0000000..509383f
--- /dev/null
@@ -0,0 +1,550 @@
+/*
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012-2013, NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/host1x.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "dev.h"
+
+static DEFINE_MUTEX(clients_lock);
+static LIST_HEAD(clients);
+
+static DEFINE_MUTEX(drivers_lock);
+static LIST_HEAD(drivers);
+
+static DEFINE_MUTEX(devices_lock);
+static LIST_HEAD(devices);
+
+struct host1x_subdev {
+       struct host1x_client *client;
+       struct device_node *np;
+       struct list_head list;
+};
+
+/**
+ * host1x_subdev_add() - add a new subdevice with an associated device node
+ */
+static int host1x_subdev_add(struct host1x_device *device,
+                            struct device_node *np)
+{
+       struct host1x_subdev *subdev;
+
+       subdev = kzalloc(sizeof(*subdev), GFP_KERNEL);
+       if (!subdev)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&subdev->list);
+       subdev->np = of_node_get(np);
+
+       mutex_lock(&device->subdevs_lock);
+       list_add_tail(&subdev->list, &device->subdevs);
+       mutex_unlock(&device->subdevs_lock);
+
+       return 0;
+}
+
+/**
+ * host1x_subdev_del() - remove subdevice
+ */
+static void host1x_subdev_del(struct host1x_subdev *subdev)
+{
+       list_del(&subdev->list);
+       of_node_put(subdev->np);
+       kfree(subdev);
+}
+
+/**
+ * host1x_device_parse_dt() - scan device tree and add matching subdevices
+ */
+static int host1x_device_parse_dt(struct host1x_device *device)
+{
+       struct device_node *np;
+       int err;
+
+       for_each_child_of_node(device->dev.parent->of_node, np) {
+               if (of_match_node(device->driver->subdevs, np) &&
+                   of_device_is_available(np)) {
+                       err = host1x_subdev_add(device, np);
+                       if (err < 0)
+                               return err;
+               }
+       }
+
+       return 0;
+}
+
+static void host1x_subdev_register(struct host1x_device *device,
+                                  struct host1x_subdev *subdev,
+                                  struct host1x_client *client)
+{
+       int err;
+
+       /*
+        * Move the subdevice to the list of active (registered) subdevices
+        * and associate it with a client. At the same time, associate the
+        * client with its parent device.
+        */
+       mutex_lock(&device->subdevs_lock);
+       mutex_lock(&device->clients_lock);
+       list_move_tail(&client->list, &device->clients);
+       list_move_tail(&subdev->list, &device->active);
+       client->parent = &device->dev;
+       subdev->client = client;
+       mutex_unlock(&device->clients_lock);
+       mutex_unlock(&device->subdevs_lock);
+
+       /*
+        * When all subdevices have been registered, the composite device is
+        * ready to be probed.
+        */
+       if (list_empty(&device->subdevs)) {
+               err = device->driver->probe(device);
+               if (err < 0)
+                       dev_err(&device->dev, "probe failed: %d\n", err);
+       }
+}
+
+static void __host1x_subdev_unregister(struct host1x_device *device,
+                                      struct host1x_subdev *subdev)
+{
+       struct host1x_client *client = subdev->client;
+       int err;
+
+       /*
+        * If all subdevices have been activated, we're about to remove the
+        * first active subdevice, so unload the driver first.
+        */
+       if (list_empty(&device->subdevs)) {
+               err = device->driver->remove(device);
+               if (err < 0)
+                       dev_err(&device->dev, "remove failed: %d\n", err);
+       }
+
+       /*
+        * Move the subdevice back to the list of idle subdevices and remove
+        * it from list of clients.
+        */
+       mutex_lock(&device->clients_lock);
+       subdev->client = NULL;
+       client->parent = NULL;
+       list_move_tail(&subdev->list, &device->subdevs);
+       /*
+        * XXX: Perhaps don't do this here, but rather explicitly remove it
+        * when the device is about to be deleted.
+        *
+        * This is somewhat complicated by the fact that this function is
+        * used to remove the subdevice when a client is unregistered but
+        * also when the composite device is about to be removed.
+        */
+       list_del_init(&client->list);
+       mutex_unlock(&device->clients_lock);
+}
+
+static void host1x_subdev_unregister(struct host1x_device *device,
+                                    struct host1x_subdev *subdev)
+{
+       mutex_lock(&device->subdevs_lock);
+       __host1x_subdev_unregister(device, subdev);
+       mutex_unlock(&device->subdevs_lock);
+}
+
+int host1x_device_init(struct host1x_device *device)
+{
+       struct host1x_client *client;
+       int err;
+
+       mutex_lock(&device->clients_lock);
+
+       list_for_each_entry(client, &device->clients, list) {
+               if (client->ops && client->ops->init) {
+                       err = client->ops->init(client);
+                       if (err < 0) {
+                               dev_err(&device->dev,
+                                       "failed to initialize %s: %d\n",
+                                       dev_name(client->dev), err);
+                               mutex_unlock(&device->clients_lock);
+                               return err;
+                       }
+               }
+       }
+
+       mutex_unlock(&device->clients_lock);
+
+       return 0;
+}
+
+int host1x_device_exit(struct host1x_device *device)
+{
+       struct host1x_client *client;
+       int err;
+
+       mutex_lock(&device->clients_lock);
+
+       list_for_each_entry_reverse(client, &device->clients, list) {
+               if (client->ops && client->ops->exit) {
+                       err = client->ops->exit(client);
+                       if (err < 0) {
+                               dev_err(&device->dev,
+                                       "failed to cleanup %s: %d\n",
+                                       dev_name(client->dev), err);
+                               mutex_unlock(&device->clients_lock);
+                               return err;
+                       }
+               }
+       }
+
+       mutex_unlock(&device->clients_lock);
+
+       return 0;
+}
+
+static int host1x_register_client(struct host1x *host1x,
+                                 struct host1x_client *client)
+{
+       struct host1x_device *device;
+       struct host1x_subdev *subdev;
+
+       mutex_lock(&host1x->devices_lock);
+
+       list_for_each_entry(device, &host1x->devices, list) {
+               list_for_each_entry(subdev, &device->subdevs, list) {
+                       if (subdev->np == client->dev->of_node) {
+                               host1x_subdev_register(device, subdev, client);
+                               mutex_unlock(&host1x->devices_lock);
+                               return 0;
+                       }
+               }
+       }
+
+       mutex_unlock(&host1x->devices_lock);
+       return -ENODEV;
+}
+
+static int host1x_unregister_client(struct host1x *host1x,
+                                   struct host1x_client *client)
+{
+       struct host1x_device *device, *dt;
+       struct host1x_subdev *subdev;
+
+       mutex_lock(&host1x->devices_lock);
+
+       list_for_each_entry_safe(device, dt, &host1x->devices, list) {
+               list_for_each_entry(subdev, &device->active, list) {
+                       if (subdev->client == client) {
+                               host1x_subdev_unregister(device, subdev);
+                               mutex_unlock(&host1x->devices_lock);
+                               return 0;
+                       }
+               }
+       }
+
+       mutex_unlock(&host1x->devices_lock);
+       return -ENODEV;
+}
+
+struct bus_type host1x_bus_type = {
+       .name = "host1x",
+};
+
+int host1x_bus_init(void)
+{
+       return bus_register(&host1x_bus_type);
+}
+
+void host1x_bus_exit(void)
+{
+       bus_unregister(&host1x_bus_type);
+}
+
+static void host1x_device_release(struct device *dev)
+{
+       struct host1x_device *device = to_host1x_device(dev);
+
+       kfree(device);
+}
+
+static int host1x_device_add(struct host1x *host1x,
+                            struct host1x_driver *driver)
+{
+       struct host1x_client *client, *tmp;
+       struct host1x_subdev *subdev;
+       struct host1x_device *device;
+       int err;
+
+       device = kzalloc(sizeof(*device), GFP_KERNEL);
+       if (!device)
+               return -ENOMEM;
+
+       mutex_init(&device->subdevs_lock);
+       INIT_LIST_HEAD(&device->subdevs);
+       INIT_LIST_HEAD(&device->active);
+       mutex_init(&device->clients_lock);
+       INIT_LIST_HEAD(&device->clients);
+       INIT_LIST_HEAD(&device->list);
+       device->driver = driver;
+
+       device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
+       device->dev.dma_mask = &device->dev.coherent_dma_mask;
+       device->dev.release = host1x_device_release;
+       dev_set_name(&device->dev, driver->name);
+       device->dev.bus = &host1x_bus_type;
+       device->dev.parent = host1x->dev;
+
+       err = device_register(&device->dev);
+       if (err < 0)
+               return err;
+
+       err = host1x_device_parse_dt(device);
+       if (err < 0) {
+               device_unregister(&device->dev);
+               return err;
+       }
+
+       mutex_lock(&host1x->devices_lock);
+       list_add_tail(&device->list, &host1x->devices);
+       mutex_unlock(&host1x->devices_lock);
+
+       mutex_lock(&clients_lock);
+
+       list_for_each_entry_safe(client, tmp, &clients, list) {
+               list_for_each_entry(subdev, &device->subdevs, list) {
+                       if (subdev->np == client->dev->of_node) {
+                               host1x_subdev_register(device, subdev, client);
+                               break;
+                       }
+               }
+       }
+
+       mutex_unlock(&clients_lock);
+
+       return 0;
+}
+
+/*
+ * Removes a device by first unregistering any subdevices and then removing
+ * itself from the list of devices.
+ *
+ * This function must be called with the host1x->devices_lock held.
+ */
+static void host1x_device_del(struct host1x *host1x,
+                             struct host1x_device *device)
+{
+       struct host1x_subdev *subdev, *sd;
+       struct host1x_client *client, *cl;
+
+       mutex_lock(&device->subdevs_lock);
+
+       /* unregister subdevices */
+       list_for_each_entry_safe(subdev, sd, &device->active, list) {
+               /*
+                * host1x_subdev_unregister() will remove the client from
+                * any lists, so we'll need to manually add it back to the
+                * list of idle clients.
+                *
+                * XXX: Alternatively, perhaps don't remove the client from
+                * any lists in host1x_subdev_unregister() and instead do
+                * that explicitly from host1x_unregister_client()?
+                */
+               client = subdev->client;
+
+               __host1x_subdev_unregister(device, subdev);
+
+               /* add the client to the list of idle clients */
+               mutex_lock(&clients_lock);
+               list_add_tail(&client->list, &clients);
+               mutex_unlock(&clients_lock);
+       }
+
+       /* remove subdevices */
+       list_for_each_entry_safe(subdev, sd, &device->subdevs, list)
+               host1x_subdev_del(subdev);
+
+       mutex_unlock(&device->subdevs_lock);
+
+       /* move clients to idle list */
+       mutex_lock(&clients_lock);
+       mutex_lock(&device->clients_lock);
+
+       list_for_each_entry_safe(client, cl, &device->clients, list)
+               list_move_tail(&client->list, &clients);
+
+       mutex_unlock(&device->clients_lock);
+       mutex_unlock(&clients_lock);
+
+       /* finally remove the device */
+       list_del_init(&device->list);
+       device_unregister(&device->dev);
+}
+
+static void host1x_attach_driver(struct host1x *host1x,
+                                struct host1x_driver *driver)
+{
+       struct host1x_device *device;
+       int err;
+
+       mutex_lock(&host1x->devices_lock);
+
+       list_for_each_entry(device, &host1x->devices, list) {
+               if (device->driver == driver) {
+                       mutex_unlock(&host1x->devices_lock);
+                       return;
+               }
+       }
+
+       mutex_unlock(&host1x->devices_lock);
+
+       err = host1x_device_add(host1x, driver);
+       if (err < 0)
+               dev_err(host1x->dev, "failed to allocate device: %d\n", err);
+}
+
+static void host1x_detach_driver(struct host1x *host1x,
+                                struct host1x_driver *driver)
+{
+       struct host1x_device *device, *tmp;
+
+       mutex_lock(&host1x->devices_lock);
+
+       list_for_each_entry_safe(device, tmp, &host1x->devices, list)
+               if (device->driver == driver)
+                       host1x_device_del(host1x, device);
+
+       mutex_unlock(&host1x->devices_lock);
+}
+
+int host1x_register(struct host1x *host1x)
+{
+       struct host1x_driver *driver;
+
+       mutex_lock(&devices_lock);
+       list_add_tail(&host1x->list, &devices);
+       mutex_unlock(&devices_lock);
+
+       mutex_lock(&drivers_lock);
+
+       list_for_each_entry(driver, &drivers, list)
+               host1x_attach_driver(host1x, driver);
+
+       mutex_unlock(&drivers_lock);
+
+       return 0;
+}
+
+int host1x_unregister(struct host1x *host1x)
+{
+       struct host1x_driver *driver;
+
+       mutex_lock(&drivers_lock);
+
+       list_for_each_entry(driver, &drivers, list)
+               host1x_detach_driver(host1x, driver);
+
+       mutex_unlock(&drivers_lock);
+
+       mutex_lock(&devices_lock);
+       list_del_init(&host1x->list);
+       mutex_unlock(&devices_lock);
+
+       return 0;
+}
+
+int host1x_driver_register(struct host1x_driver *driver)
+{
+       struct host1x *host1x;
+
+       INIT_LIST_HEAD(&driver->list);
+
+       mutex_lock(&drivers_lock);
+       list_add_tail(&driver->list, &drivers);
+       mutex_unlock(&drivers_lock);
+
+       mutex_lock(&devices_lock);
+
+       list_for_each_entry(host1x, &devices, list)
+               host1x_attach_driver(host1x, driver);
+
+       mutex_unlock(&devices_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL(host1x_driver_register);
+
+void host1x_driver_unregister(struct host1x_driver *driver)
+{
+       mutex_lock(&drivers_lock);
+       list_del_init(&driver->list);
+       mutex_unlock(&drivers_lock);
+}
+EXPORT_SYMBOL(host1x_driver_unregister);
+
+int host1x_client_register(struct host1x_client *client)
+{
+       struct host1x *host1x;
+       int err;
+
+       mutex_lock(&devices_lock);
+
+       list_for_each_entry(host1x, &devices, list) {
+               err = host1x_register_client(host1x, client);
+               if (!err) {
+                       mutex_unlock(&devices_lock);
+                       return 0;
+               }
+       }
+
+       mutex_unlock(&devices_lock);
+
+       mutex_lock(&clients_lock);
+       list_add_tail(&client->list, &clients);
+       mutex_unlock(&clients_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL(host1x_client_register);
+
+int host1x_client_unregister(struct host1x_client *client)
+{
+       struct host1x_client *c;
+       struct host1x *host1x;
+       int err;
+
+       mutex_lock(&devices_lock);
+
+       list_for_each_entry(host1x, &devices, list) {
+               err = host1x_unregister_client(host1x, client);
+               if (!err) {
+                       mutex_unlock(&devices_lock);
+                       return 0;
+               }
+       }
+
+       mutex_unlock(&devices_lock);
+       mutex_lock(&clients_lock);
+
+       list_for_each_entry(c, &clients, list) {
+               if (c == client) {
+                       list_del_init(&c->list);
+                       break;
+               }
+       }
+
+       mutex_unlock(&clients_lock);
+
+       return 0;
+}
+EXPORT_SYMBOL(host1x_client_unregister);
similarity index 60%
rename from drivers/gpu/host1x/host1x_client.h
rename to drivers/gpu/host1x/bus.h
index 9b85f10f4a44fc9f70a45af7b40f33d95ba9942c..4099e99212c87354377d9a851d5409573792611b 100644 (file)
@@ -1,5 +1,6 @@
 /*
- * Copyright (c) 2013, NVIDIA Corporation.
+ * Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2012-2013, NVIDIA Corporation
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
-#ifndef HOST1X_CLIENT_H
-#define HOST1X_CLIENT_H
+#ifndef HOST1X_BUS_H
+#define HOST1X_BUS_H
 
-struct device;
-struct platform_device;
+struct host1x;
 
-#ifdef CONFIG_DRM_TEGRA
-int host1x_drm_alloc(struct platform_device *pdev);
-#else
-static inline int host1x_drm_alloc(struct platform_device *pdev)
-{
-       return 0;
-}
-#endif
+int host1x_bus_init(void);
+void host1x_bus_exit(void);
 
-void host1x_set_drm_data(struct device *dev, void *data);
-void *host1x_get_drm_data(struct device *dev);
+int host1x_register(struct host1x *host1x);
+int host1x_unregister(struct host1x *host1x);
 
 #endif
index de72172d3b5fc2ac87955f3ef8b711890454434c..3995255b16c753731420cb8283992c17c33ddf2e 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/cacheflush.h>
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
+#include <linux/host1x.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/kfifo.h>
@@ -30,7 +31,6 @@
 #include "channel.h"
 #include "dev.h"
 #include "debug.h"
-#include "host1x_bo.h"
 #include "job.h"
 
 /*
index 48723b8eea42da33e9854a0925bbeedfa391a37a..df767cf90d51e14c2647f42ef192a7ffcf19489b 100644 (file)
@@ -40,12 +40,6 @@ struct host1x_channel {
 /* channel list operations */
 int host1x_channel_list_init(struct host1x *host);
 
-struct host1x_channel *host1x_channel_request(struct device *dev);
-void host1x_channel_free(struct host1x_channel *channel);
-struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
-void host1x_channel_put(struct host1x_channel *channel);
-int host1x_job_submit(struct host1x_job *job);
-
 #define host1x_for_each_channel(host, channel)                         \
        list_for_each_entry(channel, &host->chlist.list, list)
 
index 471630299878a88bbf300a09a0600da2c1c062eb..12d63336b369c767072c39ca6378e5aa62527b05 100644 (file)
 #define CREATE_TRACE_POINTS
 #include <trace/events/host1x.h>
 
+#include "bus.h"
 #include "dev.h"
 #include "intr.h"
 #include "channel.h"
 #include "debug.h"
 #include "hw/host1x01.h"
-#include "host1x_client.h"
-
-void host1x_set_drm_data(struct device *dev, void *data)
-{
-       struct host1x *host1x = dev_get_drvdata(dev);
-       host1x->drm_data = data;
-}
-
-void *host1x_get_drm_data(struct device *dev)
-{
-       struct host1x *host1x = dev_get_drvdata(dev);
-       return host1x ? host1x->drm_data : NULL;
-}
+#include "hw/host1x02.h"
 
 void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
 {
@@ -79,7 +68,17 @@ static const struct host1x_info host1x01_info = {
        .sync_offset    = 0x3000,
 };
 
+static const struct host1x_info host1x02_info = {
+       .nb_channels = 9,
+       .nb_pts = 32,
+       .nb_mlocks = 16,
+       .nb_bases = 12,
+       .init = host1x02_init,
+       .sync_offset = 0x3000,
+};
+
 static struct of_device_id host1x_of_match[] = {
+       { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
        { .compatible = "nvidia,tegra30-host1x", .data = &host1x01_info, },
        { .compatible = "nvidia,tegra20-host1x", .data = &host1x01_info, },
        { },
@@ -114,6 +113,9 @@ static int host1x_probe(struct platform_device *pdev)
        if (!host)
                return -ENOMEM;
 
+       mutex_init(&host->devices_lock);
+       INIT_LIST_HEAD(&host->devices);
+       INIT_LIST_HEAD(&host->list);
        host->dev = &pdev->dev;
        host->info = id->data;
 
@@ -163,19 +165,24 @@ static int host1x_probe(struct platform_device *pdev)
 
        host1x_debug_init(host);
 
-       host1x_drm_alloc(pdev);
+       err = host1x_register(host);
+       if (err < 0)
+               goto fail_deinit_intr;
 
        return 0;
 
+fail_deinit_intr:
+       host1x_intr_deinit(host);
 fail_deinit_syncpt:
        host1x_syncpt_deinit(host);
        return err;
 }
 
-static int __exit host1x_remove(struct platform_device *pdev)
+static int host1x_remove(struct platform_device *pdev)
 {
        struct host1x *host = platform_get_drvdata(pdev);
 
+       host1x_unregister(host);
        host1x_intr_deinit(host);
        host1x_syncpt_deinit(host);
        clk_disable_unprepare(host->clk);
@@ -184,59 +191,45 @@ static int __exit host1x_remove(struct platform_device *pdev)
 }
 
 static struct platform_driver tegra_host1x_driver = {
-       .probe = host1x_probe,
-       .remove = __exit_p(host1x_remove),
        .driver = {
-               .owner = THIS_MODULE,
                .name = "tegra-host1x",
                .of_match_table = host1x_of_match,
        },
+       .probe = host1x_probe,
+       .remove = host1x_remove,
 };
 
 static int __init tegra_host1x_init(void)
 {
        int err;
 
-       err = platform_driver_register(&tegra_host1x_driver);
+       err = host1x_bus_init();
        if (err < 0)
                return err;
 
-#ifdef CONFIG_DRM_TEGRA
-       err = platform_driver_register(&tegra_dc_driver);
-       if (err < 0)
-               goto unregister_host1x;
-
-       err = platform_driver_register(&tegra_hdmi_driver);
+       err = platform_driver_register(&tegra_host1x_driver);
        if (err < 0)
-               goto unregister_dc;
+               goto unregister_bus;
 
-       err = platform_driver_register(&tegra_gr2d_driver);
+       err = platform_driver_register(&tegra_mipi_driver);
        if (err < 0)
-               goto unregister_hdmi;
-#endif
+               goto unregister_host1x;
 
        return 0;
 
-#ifdef CONFIG_DRM_TEGRA
-unregister_hdmi:
-       platform_driver_unregister(&tegra_hdmi_driver);
-unregister_dc:
-       platform_driver_unregister(&tegra_dc_driver);
 unregister_host1x:
        platform_driver_unregister(&tegra_host1x_driver);
+unregister_bus:
+       host1x_bus_exit();
        return err;
-#endif
 }
 module_init(tegra_host1x_init);
 
 static void __exit tegra_host1x_exit(void)
 {
-#ifdef CONFIG_DRM_TEGRA
-       platform_driver_unregister(&tegra_gr2d_driver);
-       platform_driver_unregister(&tegra_hdmi_driver);
-       platform_driver_unregister(&tegra_dc_driver);
-#endif
+       platform_driver_unregister(&tegra_mipi_driver);
        platform_driver_unregister(&tegra_host1x_driver);
+       host1x_bus_exit();
 }
 module_exit(tegra_host1x_exit);
 
index bed90a8131be590fef951c533cbf3622708184da..65c80dc4f6cbefad0af16c10f22018752bb0805c 100644 (file)
@@ -125,7 +125,10 @@ struct host1x {
 
        struct dentry *debugfs;
 
-       void *drm_data;
+       struct mutex devices_lock;
+       struct list_head devices;
+
+       struct list_head list;
 };
 
 void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v);
@@ -301,8 +304,6 @@ static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o)
        host->debug_op->show_mlocks(host, o);
 }
 
-extern struct platform_driver tegra_dc_driver;
-extern struct platform_driver tegra_hdmi_driver;
-extern struct platform_driver tegra_gr2d_driver;
+extern struct platform_driver tegra_mipi_driver;
 
 #endif
diff --git a/drivers/gpu/host1x/drm/gr2d.c b/drivers/gpu/host1x/drm/gr2d.c
deleted file mode 100644 (file)
index 27ffcf1..0000000
+++ /dev/null
@@ -1,343 +0,0 @@
-/*
- * drivers/video/tegra/host/gr2d/gr2d.c
- *
- * Tegra Graphics 2D
- *
- * Copyright (c) 2012-2013, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/export.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/clk.h>
-
-#include "channel.h"
-#include "drm.h"
-#include "gem.h"
-#include "job.h"
-#include "host1x.h"
-#include "host1x_bo.h"
-#include "host1x_client.h"
-#include "syncpt.h"
-
-struct gr2d {
-       struct host1x_client client;
-       struct clk *clk;
-       struct host1x_channel *channel;
-       unsigned long *addr_regs;
-};
-
-static inline struct gr2d *to_gr2d(struct host1x_client *client)
-{
-       return container_of(client, struct gr2d, client);
-}
-
-static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg);
-
-static int gr2d_client_init(struct host1x_client *client,
-                           struct drm_device *drm)
-{
-       return 0;
-}
-
-static int gr2d_client_exit(struct host1x_client *client)
-{
-       return 0;
-}
-
-static int gr2d_open_channel(struct host1x_client *client,
-                            struct host1x_drm_context *context)
-{
-       struct gr2d *gr2d = to_gr2d(client);
-
-       context->channel = host1x_channel_get(gr2d->channel);
-
-       if (!context->channel)
-               return -ENOMEM;
-
-       return 0;
-}
-
-static void gr2d_close_channel(struct host1x_drm_context *context)
-{
-       host1x_channel_put(context->channel);
-}
-
-static struct host1x_bo *host1x_bo_lookup(struct drm_device *drm,
-                                         struct drm_file *file,
-                                         u32 handle)
-{
-       struct drm_gem_object *gem;
-       struct tegra_bo *bo;
-
-       gem = drm_gem_object_lookup(drm, file, handle);
-       if (!gem)
-               return NULL;
-
-       mutex_lock(&drm->struct_mutex);
-       drm_gem_object_unreference(gem);
-       mutex_unlock(&drm->struct_mutex);
-
-       bo = to_tegra_bo(gem);
-       return &bo->base;
-}
-
-static int gr2d_submit(struct host1x_drm_context *context,
-                      struct drm_tegra_submit *args, struct drm_device *drm,
-                      struct drm_file *file)
-{
-       struct host1x_job *job;
-       unsigned int num_cmdbufs = args->num_cmdbufs;
-       unsigned int num_relocs = args->num_relocs;
-       unsigned int num_waitchks = args->num_waitchks;
-       struct drm_tegra_cmdbuf __user *cmdbufs =
-               (void * __user)(uintptr_t)args->cmdbufs;
-       struct drm_tegra_reloc __user *relocs =
-               (void * __user)(uintptr_t)args->relocs;
-       struct drm_tegra_waitchk __user *waitchks =
-               (void * __user)(uintptr_t)args->waitchks;
-       struct drm_tegra_syncpt syncpt;
-       int err;
-
-       /* We don't yet support other than one syncpt_incr struct per submit */
-       if (args->num_syncpts != 1)
-               return -EINVAL;
-
-       job = host1x_job_alloc(context->channel, args->num_cmdbufs,
-                              args->num_relocs, args->num_waitchks);
-       if (!job)
-               return -ENOMEM;
-
-       job->num_relocs = args->num_relocs;
-       job->num_waitchk = args->num_waitchks;
-       job->client = (u32)args->context;
-       job->class = context->client->class;
-       job->serialize = true;
-
-       while (num_cmdbufs) {
-               struct drm_tegra_cmdbuf cmdbuf;
-               struct host1x_bo *bo;
-
-               err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
-               if (err)
-                       goto fail;
-
-               bo = host1x_bo_lookup(drm, file, cmdbuf.handle);
-               if (!bo) {
-                       err = -ENOENT;
-                       goto fail;
-               }
-
-               host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
-               num_cmdbufs--;
-               cmdbufs++;
-       }
-
-       err = copy_from_user(job->relocarray, relocs,
-                            sizeof(*relocs) * num_relocs);
-       if (err)
-               goto fail;
-
-       while (num_relocs--) {
-               struct host1x_reloc *reloc = &job->relocarray[num_relocs];
-               struct host1x_bo *cmdbuf, *target;
-
-               cmdbuf = host1x_bo_lookup(drm, file, (u32)reloc->cmdbuf);
-               target = host1x_bo_lookup(drm, file, (u32)reloc->target);
-
-               reloc->cmdbuf = cmdbuf;
-               reloc->target = target;
-
-               if (!reloc->target || !reloc->cmdbuf) {
-                       err = -ENOENT;
-                       goto fail;
-               }
-       }
-
-       err = copy_from_user(job->waitchk, waitchks,
-                            sizeof(*waitchks) * num_waitchks);
-       if (err)
-               goto fail;
-
-       err = copy_from_user(&syncpt, (void * __user)(uintptr_t)args->syncpts,
-                            sizeof(syncpt));
-       if (err)
-               goto fail;
-
-       job->syncpt_id = syncpt.id;
-       job->syncpt_incrs = syncpt.incrs;
-       job->timeout = 10000;
-       job->is_addr_reg = gr2d_is_addr_reg;
-
-       if (args->timeout && args->timeout < 10000)
-               job->timeout = args->timeout;
-
-       err = host1x_job_pin(job, context->client->dev);
-       if (err)
-               goto fail;
-
-       err = host1x_job_submit(job);
-       if (err)
-               goto fail_submit;
-
-       args->fence = job->syncpt_end;
-
-       host1x_job_put(job);
-       return 0;
-
-fail_submit:
-       host1x_job_unpin(job);
-fail:
-       host1x_job_put(job);
-       return err;
-}
-
-static struct host1x_client_ops gr2d_client_ops = {
-       .drm_init = gr2d_client_init,
-       .drm_exit = gr2d_client_exit,
-       .open_channel = gr2d_open_channel,
-       .close_channel = gr2d_close_channel,
-       .submit = gr2d_submit,
-};
-
-static void gr2d_init_addr_reg_map(struct device *dev, struct gr2d *gr2d)
-{
-       const u32 gr2d_addr_regs[] = {0x1a, 0x1b, 0x26, 0x2b, 0x2c, 0x2d, 0x31,
-                                     0x32, 0x48, 0x49, 0x4a, 0x4b, 0x4c};
-       unsigned long *bitmap;
-       int i;
-
-       bitmap = devm_kzalloc(dev, DIV_ROUND_UP(256, BITS_PER_BYTE),
-                             GFP_KERNEL);
-
-       for (i = 0; i < ARRAY_SIZE(gr2d_addr_regs); ++i) {
-               u32 reg = gr2d_addr_regs[i];
-               bitmap[BIT_WORD(reg)] |= BIT_MASK(reg);
-       }
-
-       gr2d->addr_regs = bitmap;
-}
-
-static int gr2d_is_addr_reg(struct device *dev, u32 class, u32 reg)
-{
-       struct gr2d *gr2d = dev_get_drvdata(dev);
-
-       switch (class) {
-       case HOST1X_CLASS_HOST1X:
-               return reg == 0x2b;
-       case HOST1X_CLASS_GR2D:
-       case HOST1X_CLASS_GR2D_SB:
-               reg &= 0xff;
-               if (gr2d->addr_regs[BIT_WORD(reg)] & BIT_MASK(reg))
-                       return 1;
-       default:
-               return 0;
-       }
-}
-
-static const struct of_device_id gr2d_match[] = {
-       { .compatible = "nvidia,tegra30-gr2d" },
-       { .compatible = "nvidia,tegra20-gr2d" },
-       { },
-};
-
-static int gr2d_probe(struct platform_device *pdev)
-{
-       struct device *dev = &pdev->dev;
-       struct host1x_drm *host1x = host1x_get_drm_data(dev->parent);
-       int err;
-       struct gr2d *gr2d = NULL;
-       struct host1x_syncpt **syncpts;
-
-       gr2d = devm_kzalloc(dev, sizeof(*gr2d), GFP_KERNEL);
-       if (!gr2d)
-               return -ENOMEM;
-
-       syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
-       if (!syncpts)
-               return -ENOMEM;
-
-       gr2d->clk = devm_clk_get(dev, NULL);
-       if (IS_ERR(gr2d->clk)) {
-               dev_err(dev, "cannot get clock\n");
-               return PTR_ERR(gr2d->clk);
-       }
-
-       err = clk_prepare_enable(gr2d->clk);
-       if (err) {
-               dev_err(dev, "cannot turn on clock\n");
-               return err;
-       }
-
-       gr2d->channel = host1x_channel_request(dev);
-       if (!gr2d->channel)
-               return -ENOMEM;
-
-       *syncpts = host1x_syncpt_request(dev, false);
-       if (!(*syncpts)) {
-               host1x_channel_free(gr2d->channel);
-               return -ENOMEM;
-       }
-
-       gr2d->client.ops = &gr2d_client_ops;
-       gr2d->client.dev = dev;
-       gr2d->client.class = HOST1X_CLASS_GR2D;
-       gr2d->client.syncpts = syncpts;
-       gr2d->client.num_syncpts = 1;
-
-       err = host1x_register_client(host1x, &gr2d->client);
-       if (err < 0) {
-               dev_err(dev, "failed to register host1x client: %d\n", err);
-               return err;
-       }
-
-       gr2d_init_addr_reg_map(dev, gr2d);
-
-       platform_set_drvdata(pdev, gr2d);
-
-       return 0;
-}
-
-static int __exit gr2d_remove(struct platform_device *pdev)
-{
-       struct host1x_drm *host1x = host1x_get_drm_data(pdev->dev.parent);
-       struct gr2d *gr2d = platform_get_drvdata(pdev);
-       unsigned int i;
-       int err;
-
-       err = host1x_unregister_client(host1x, &gr2d->client);
-       if (err < 0) {
-               dev_err(&pdev->dev, "failed to unregister client: %d\n", err);
-               return err;
-       }
-
-       for (i = 0; i < gr2d->client.num_syncpts; i++)
-               host1x_syncpt_free(gr2d->client.syncpts[i]);
-
-       host1x_channel_free(gr2d->channel);
-       clk_disable_unprepare(gr2d->clk);
-
-       return 0;
-}
-
-struct platform_driver tegra_gr2d_driver = {
-       .probe = gr2d_probe,
-       .remove = __exit_p(gr2d_remove),
-       .driver = {
-               .owner = THIS_MODULE,
-               .name = "gr2d",
-               .of_match_table = gr2d_match,
-       }
-};
diff --git a/drivers/gpu/host1x/host1x.h b/drivers/gpu/host1x/host1x.h
deleted file mode 100644 (file)
index a2bc1e6..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Tegra host1x driver
- *
- * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- */
-
-#ifndef __LINUX_HOST1X_H
-#define __LINUX_HOST1X_H
-
-enum host1x_class {
-       HOST1X_CLASS_HOST1X     = 0x1,
-       HOST1X_CLASS_GR2D       = 0x51,
-       HOST1X_CLASS_GR2D_SB    = 0x52
-};
-
-#endif
diff --git a/drivers/gpu/host1x/host1x_bo.h b/drivers/gpu/host1x/host1x_bo.h
deleted file mode 100644 (file)
index 4c1f10b..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Tegra host1x Memory Management Abstraction header
- *
- * Copyright (c) 2012-2013, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _HOST1X_BO_H
-#define _HOST1X_BO_H
-
-struct host1x_bo;
-
-struct host1x_bo_ops {
-       struct host1x_bo *(*get)(struct host1x_bo *bo);
-       void (*put)(struct host1x_bo *bo);
-       dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
-       void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
-       void *(*mmap)(struct host1x_bo *bo);
-       void (*munmap)(struct host1x_bo *bo, void *addr);
-       void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
-       void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
-};
-
-struct host1x_bo {
-       const struct host1x_bo_ops *ops;
-};
-
-static inline void host1x_bo_init(struct host1x_bo *bo,
-                                 const struct host1x_bo_ops *ops)
-{
-       bo->ops = ops;
-}
-
-static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
-{
-       return bo->ops->get(bo);
-}
-
-static inline void host1x_bo_put(struct host1x_bo *bo)
-{
-       bo->ops->put(bo);
-}
-
-static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
-                                      struct sg_table **sgt)
-{
-       return bo->ops->pin(bo, sgt);
-}
-
-static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
-{
-       bo->ops->unpin(bo, sgt);
-}
-
-static inline void *host1x_bo_mmap(struct host1x_bo *bo)
-{
-       return bo->ops->mmap(bo);
-}
-
-static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
-{
-       bo->ops->munmap(bo, addr);
-}
-
-static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
-{
-       return bo->ops->kmap(bo, pagenum);
-}
-
-static inline void host1x_bo_kunmap(struct host1x_bo *bo,
-                                   unsigned int pagenum, void *addr)
-{
-       bo->ops->kunmap(bo, pagenum, addr);
-}
-
-#endif
diff --git a/drivers/gpu/host1x/hw/Makefile b/drivers/gpu/host1x/hw/Makefile
deleted file mode 100644 (file)
index 9b50863..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-ccflags-y = -Idrivers/gpu/host1x
-
-host1x-hw-objs  = \
-       host1x01.o
-
-obj-$(CONFIG_TEGRA_HOST1X) += host1x-hw.o
index 2ee4ad55c4dba344cbb28d72d8febecd7f0181ee..37e2a63241a9d6150b1554896fde15ac0294c157 100644 (file)
 #include <linux/scatterlist.h>
 #include <linux/dma-mapping.h>
 
-#include "cdma.h"
-#include "channel.h"
-#include "dev.h"
-#include "debug.h"
+#include "../cdma.h"
+#include "../channel.h"
+#include "../dev.h"
+#include "../debug.h"
 
 /*
  * Put the restart at the end of pushbuffer memor
index ee199623e36570719ad35fbe3de0d41b00744843..3be0cd296d3a4147101d1deb51ec19becde980c9 100644 (file)
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/host1x.h>
 #include <linux/slab.h>
+
 #include <trace/events/host1x.h>
 
-#include "host1x.h"
-#include "host1x_bo.h"
-#include "channel.h"
-#include "dev.h"
-#include "intr.h"
-#include "job.h"
+#include "../channel.h"
+#include "../dev.h"
+#include "../intr.h"
+#include "../job.h"
 
 #define HOST1X_CHANNEL_SIZE 16384
 #define TRACE_MAX_LENGTH 128U
index 334c038052f582cac72c1818d30b3387aa3ae060..640c75ca5a8bbc465dcc9f1350d4f5efdd928d5d 100644 (file)
  *
  */
 
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/mm.h>
-#include <linux/scatterlist.h>
-
-#include <linux/io.h>
-
-#include "dev.h"
-#include "debug.h"
-#include "cdma.h"
-#include "channel.h"
-#include "host1x_bo.h"
+#include "../dev.h"
+#include "../debug.h"
+#include "../cdma.h"
+#include "../channel.h"
 
 #define HOST1X_DEBUG_MAX_PAGE_OFFSET 102400
 
index a14e91cd1e58eee8e5f52db20c672076544a0439..859b73beb4d0c326fcaf24ae1086ff2bc876df09 100644 (file)
  */
 
 /* include hw specification */
-#include "hw/host1x01.h"
-#include "hw/host1x01_hardware.h"
+#include "host1x01.h"
+#include "host1x01_hardware.h"
 
 /* include code */
-#include "hw/cdma_hw.c"
-#include "hw/channel_hw.c"
-#include "hw/debug_hw.c"
-#include "hw/intr_hw.c"
-#include "hw/syncpt_hw.c"
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
 
-#include "dev.h"
+#include "../dev.h"
 
 int host1x01_init(struct host1x *host)
 {
diff --git a/drivers/gpu/host1x/hw/host1x02.c b/drivers/gpu/host1x/hw/host1x02.c
new file mode 100644 (file)
index 0000000..e98caca
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Host1x init for Tegra114 SoCs
+ *
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* include hw specification */
+#include "host1x01.h"
+#include "host1x01_hardware.h"
+
+/* include code */
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
+
+#include "../dev.h"
+
+int host1x02_init(struct host1x *host)
+{
+       host->channel_op = &host1x_channel_ops;
+       host->cdma_op = &host1x_cdma_ops;
+       host->cdma_pb_op = &host1x_pushbuffer_ops;
+       host->syncpt_op = &host1x_syncpt_ops;
+       host->intr_op = &host1x_intr_ops;
+       host->debug_op = &host1x_debug_ops;
+
+       return 0;
+}
diff --git a/drivers/gpu/host1x/hw/host1x02.h b/drivers/gpu/host1x/hw/host1x02.h
new file mode 100644 (file)
index 0000000..f748660
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Host1x init for Tegra114 SoCs
+ *
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_HOST1X02_H
+#define HOST1X_HOST1X02_H
+
+struct host1x;
+
+int host1x02_init(struct host1x *host);
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_channel.h b/drivers/gpu/host1x/hw/hw_host1x02_channel.h
new file mode 100644 (file)
index 0000000..e490bcd
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef HOST1X_HW_HOST1X02_CHANNEL_H
+#define HOST1X_HW_HOST1X02_CHANNEL_H
+
+static inline u32 host1x_channel_fifostat_r(void)
+{
+       return 0x0;
+}
+#define HOST1X_CHANNEL_FIFOSTAT \
+       host1x_channel_fifostat_r()
+static inline u32 host1x_channel_fifostat_cfempty_v(u32 r)
+{
+       return (r >> 11) & 0x1;
+}
+#define HOST1X_CHANNEL_FIFOSTAT_CFEMPTY_V(r) \
+       host1x_channel_fifostat_cfempty_v(r)
+static inline u32 host1x_channel_dmastart_r(void)
+{
+       return 0x14;
+}
+#define HOST1X_CHANNEL_DMASTART \
+       host1x_channel_dmastart_r()
+static inline u32 host1x_channel_dmaput_r(void)
+{
+       return 0x18;
+}
+#define HOST1X_CHANNEL_DMAPUT \
+       host1x_channel_dmaput_r()
+static inline u32 host1x_channel_dmaget_r(void)
+{
+       return 0x1c;
+}
+#define HOST1X_CHANNEL_DMAGET \
+       host1x_channel_dmaget_r()
+static inline u32 host1x_channel_dmaend_r(void)
+{
+       return 0x20;
+}
+#define HOST1X_CHANNEL_DMAEND \
+       host1x_channel_dmaend_r()
+static inline u32 host1x_channel_dmactrl_r(void)
+{
+       return 0x24;
+}
+#define HOST1X_CHANNEL_DMACTRL \
+       host1x_channel_dmactrl_r()
+static inline u32 host1x_channel_dmactrl_dmastop(void)
+{
+       return 1 << 0;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP \
+       host1x_channel_dmactrl_dmastop()
+static inline u32 host1x_channel_dmactrl_dmastop_v(u32 r)
+{
+       return (r >> 0) & 0x1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP_V(r) \
+       host1x_channel_dmactrl_dmastop_v(r)
+static inline u32 host1x_channel_dmactrl_dmagetrst(void)
+{
+       return 1 << 1;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST \
+       host1x_channel_dmactrl_dmagetrst()
+static inline u32 host1x_channel_dmactrl_dmainitget(void)
+{
+       return 1 << 2;
+}
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET \
+       host1x_channel_dmactrl_dmainitget()
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_sync.h b/drivers/gpu/host1x/hw/hw_host1x02_sync.h
new file mode 100644 (file)
index 0000000..4495401
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef HOST1X_HW_HOST1X02_SYNC_H
+#define HOST1X_HW_HOST1X02_SYNC_H
+
+#define REGISTER_STRIDE        4
+
+static inline u32 host1x_sync_syncpt_r(unsigned int id)
+{
+       return 0x400 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT(id) \
+       host1x_sync_syncpt_r(id)
+static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id)
+{
+       return 0x40 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \
+       host1x_sync_syncpt_thresh_cpu0_int_status_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id)
+{
+       return 0x60 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \
+       host1x_sync_syncpt_thresh_int_disable_r(id)
+static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id)
+{
+       return 0x68 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \
+       host1x_sync_syncpt_thresh_int_enable_cpu0_r(id)
+static inline u32 host1x_sync_cf_setup_r(unsigned int channel)
+{
+       return 0x80 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CF_SETUP(channel) \
+       host1x_sync_cf_setup_r(channel)
+static inline u32 host1x_sync_cf_setup_base_v(u32 r)
+{
+       return (r >> 0) & 0x3ff;
+}
+#define HOST1X_SYNC_CF_SETUP_BASE_V(r) \
+       host1x_sync_cf_setup_base_v(r)
+static inline u32 host1x_sync_cf_setup_limit_v(u32 r)
+{
+       return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CF_SETUP_LIMIT_V(r) \
+       host1x_sync_cf_setup_limit_v(r)
+static inline u32 host1x_sync_cmdproc_stop_r(void)
+{
+       return 0xac;
+}
+#define HOST1X_SYNC_CMDPROC_STOP \
+       host1x_sync_cmdproc_stop_r()
+static inline u32 host1x_sync_ch_teardown_r(void)
+{
+       return 0xb0;
+}
+#define HOST1X_SYNC_CH_TEARDOWN \
+       host1x_sync_ch_teardown_r()
+static inline u32 host1x_sync_usec_clk_r(void)
+{
+       return 0x1a4;
+}
+#define HOST1X_SYNC_USEC_CLK \
+       host1x_sync_usec_clk_r()
+static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void)
+{
+       return 0x1a8;
+}
+#define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \
+       host1x_sync_ctxsw_timeout_cfg_r()
+static inline u32 host1x_sync_ip_busy_timeout_r(void)
+{
+       return 0x1bc;
+}
+#define HOST1X_SYNC_IP_BUSY_TIMEOUT \
+       host1x_sync_ip_busy_timeout_r()
+static inline u32 host1x_sync_mlock_owner_r(unsigned int id)
+{
+       return 0x340 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_MLOCK_OWNER(id) \
+       host1x_sync_mlock_owner_r(id)
+static inline u32 host1x_sync_mlock_owner_chid_f(u32 v)
+{
+       return (v & 0xf) << 8;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CHID_F(v) \
+       host1x_sync_mlock_owner_chid_f(v)
+static inline u32 host1x_sync_mlock_owner_cpu_owns_v(u32 r)
+{
+       return (r >> 1) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(r) \
+       host1x_sync_mlock_owner_cpu_owns_v(r)
+static inline u32 host1x_sync_mlock_owner_ch_owns_v(u32 r)
+{
+       return (r >> 0) & 0x1;
+}
+#define HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(r) \
+       host1x_sync_mlock_owner_ch_owns_v(r)
+static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id)
+{
+       return 0x500 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \
+       host1x_sync_syncpt_int_thresh_r(id)
+static inline u32 host1x_sync_syncpt_base_r(unsigned int id)
+{
+       return 0x600 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_BASE(id) \
+       host1x_sync_syncpt_base_r(id)
+static inline u32 host1x_sync_syncpt_cpu_incr_r(unsigned int id)
+{
+       return 0x700 + id * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_SYNCPT_CPU_INCR(id) \
+       host1x_sync_syncpt_cpu_incr_r(id)
+static inline u32 host1x_sync_cbread_r(unsigned int channel)
+{
+       return 0x720 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBREAD(channel) \
+       host1x_sync_cbread_r(channel)
+static inline u32 host1x_sync_cfpeek_ctrl_r(void)
+{
+       return 0x74c;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL \
+       host1x_sync_cfpeek_ctrl_r()
+static inline u32 host1x_sync_cfpeek_ctrl_addr_f(u32 v)
+{
+       return (v & 0x3ff) << 0;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ADDR_F(v) \
+       host1x_sync_cfpeek_ctrl_addr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_channr_f(u32 v)
+{
+       return (v & 0xf) << 16;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_CHANNR_F(v) \
+       host1x_sync_cfpeek_ctrl_channr_f(v)
+static inline u32 host1x_sync_cfpeek_ctrl_ena_f(u32 v)
+{
+       return (v & 0x1) << 31;
+}
+#define HOST1X_SYNC_CFPEEK_CTRL_ENA_F(v) \
+       host1x_sync_cfpeek_ctrl_ena_f(v)
+static inline u32 host1x_sync_cfpeek_read_r(void)
+{
+       return 0x750;
+}
+#define HOST1X_SYNC_CFPEEK_READ \
+       host1x_sync_cfpeek_read_r()
+static inline u32 host1x_sync_cfpeek_ptrs_r(void)
+{
+       return 0x754;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS \
+       host1x_sync_cfpeek_ptrs_r()
+static inline u32 host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(u32 r)
+{
+       return (r >> 0) & 0x3ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_RD_PTR_V(r) \
+       host1x_sync_cfpeek_ptrs_cf_rd_ptr_v(r)
+static inline u32 host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(u32 r)
+{
+       return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CFPEEK_PTRS_CF_WR_PTR_V(r) \
+       host1x_sync_cfpeek_ptrs_cf_wr_ptr_v(r)
+static inline u32 host1x_sync_cbstat_r(unsigned int channel)
+{
+       return 0x758 + channel * REGISTER_STRIDE;
+}
+#define HOST1X_SYNC_CBSTAT(channel) \
+       host1x_sync_cbstat_r(channel)
+static inline u32 host1x_sync_cbstat_cboffset_v(u32 r)
+{
+       return (r >> 0) & 0xffff;
+}
+#define HOST1X_SYNC_CBSTAT_CBOFFSET_V(r) \
+       host1x_sync_cbstat_cboffset_v(r)
+static inline u32 host1x_sync_cbstat_cbclass_v(u32 r)
+{
+       return (r >> 16) & 0x3ff;
+}
+#define HOST1X_SYNC_CBSTAT_CBCLASS_V(r) \
+       host1x_sync_cbstat_cbclass_v(r)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x02_uclass.h b/drivers/gpu/host1x/hw/hw_host1x02_uclass.h
new file mode 100644 (file)
index 0000000..a3b3c98
--- /dev/null
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2013 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+  * Function naming determines intended use:
+  *
+  *     <x>_r(void) : Returns the offset for register <x>.
+  *
+  *     <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+  *
+  *     <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+  *
+  *     <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+  *         and masked to place it at field <y> of register <x>.  This value
+  *         can be |'d with others to produce a full register value for
+  *         register <x>.
+  *
+  *     <x>_<y>_m(void) : Returns a mask for field <y> of register <x>.  This
+  *         value can be ~'d and then &'d to clear the value of field <y> for
+  *         register <x>.
+  *
+  *     <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+  *         to place it at field <y> of register <x>.  This value can be |'d
+  *         with others to produce a full register value for <x>.
+  *
+  *     <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+  *         <x> value 'r' after being shifted to place its LSB at bit 0.
+  *         This value is suitable for direct comparison with other unshifted
+  *         values appropriate for use in field <y> of register <x>.
+  *
+  *     <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+  *         field <y> of register <x>.  This value is suitable for direct
+  *         comparison with unshifted values appropriate for use in field <y>
+  *         of register <x>.
+  */
+
+#ifndef HOST1X_HW_HOST1X02_UCLASS_H
+#define HOST1X_HW_HOST1X02_UCLASS_H
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+       return 0x0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT \
+       host1x_uclass_incr_syncpt_r()
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+       return (v & 0xff) << 8;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
+       host1x_uclass_incr_syncpt_cond_f(v)
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+       return (v & 0xff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+       host1x_uclass_incr_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+       return 0x8;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT \
+       host1x_uclass_wait_syncpt_r()
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+       return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
+       host1x_uclass_wait_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+       return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
+       host1x_uclass_wait_syncpt_thresh_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+       return 0x9;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
+       host1x_uclass_wait_syncpt_base_r()
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+       return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
+       host1x_uclass_wait_syncpt_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+       return (v & 0xff) << 16;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
+       host1x_uclass_wait_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+       return (v & 0xffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
+       host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+       return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
+       host1x_uclass_load_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+       return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
+       host1x_uclass_load_syncpt_base_value_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+       return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
+       host1x_uclass_incr_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+       return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
+       host1x_uclass_incr_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_indoff_r(void)
+{
+       return 0x2d;
+}
+#define HOST1X_UCLASS_INDOFF \
+       host1x_uclass_indoff_r()
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+       return (v & 0xf) << 28;
+}
+#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
+       host1x_uclass_indoff_indbe_f(v)
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+       return (v & 0x1) << 27;
+}
+#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
+       host1x_uclass_indoff_autoinc_f(v)
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+       return (v & 0xff) << 18;
+}
+#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
+       host1x_uclass_indoff_indmodid_f(v)
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+       return (v & 0xffff) << 2;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+       host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+       return 1;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+       host1x_uclass_indoff_indroffset_f(v)
+
+#endif
index b592eef1efcb9babfe0355d418f4d276682a0841..b26dcc83bc1b373400d72c46aad21d04fb97f52f 100644 (file)
@@ -22,8 +22,8 @@
 #include <linux/io.h>
 #include <asm/mach/irq.h>
 
-#include "intr.h"
-#include "dev.h"
+#include "../intr.h"
+#include "../dev.h"
 
 /*
  * Sync point threshold interrupt service function
index 0cf6095d33678f51da127f4feac313f9422bc9e8..56e85395ac24112faf17b4b06a1aa950e671df0b 100644 (file)
@@ -18,8 +18,8 @@
 
 #include <linux/io.h>
 
-#include "dev.h"
-#include "syncpt.h"
+#include "../dev.h"
+#include "../syncpt.h"
 
 /*
  * Write the current syncpoint value back to hw.
index c4e1050f2252679e448e58e1c2b3edffffd08073..de5ec333ce1adc1974001d01bad56d2544d472d3 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <linux/dma-mapping.h>
 #include <linux/err.h>
+#include <linux/host1x.h>
 #include <linux/kref.h>
 #include <linux/module.h>
 #include <linux/scatterlist.h>
@@ -27,7 +28,6 @@
 
 #include "channel.h"
 #include "dev.h"
-#include "host1x_bo.h"
 #include "job.h"
 #include "syncpt.h"
 
@@ -264,7 +264,7 @@ static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
 }
 
 static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
-                      unsigned int offset)
+                       unsigned int offset)
 {
        offset *= sizeof(u32);
 
@@ -281,7 +281,7 @@ struct host1x_firewall {
        unsigned int num_relocs;
        struct host1x_reloc *reloc;
 
-       struct host1x_bo *cmdbuf_id;
+       struct host1x_bo *cmdbuf;
        unsigned int offset;
 
        u32 words;
@@ -291,25 +291,37 @@ struct host1x_firewall {
        u32 count;
 };
 
+static int check_register(struct host1x_firewall *fw, unsigned long offset)
+{
+       if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
+               if (!fw->num_relocs)
+                       return -EINVAL;
+
+               if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
+                       return -EINVAL;
+
+               fw->num_relocs--;
+               fw->reloc++;
+       }
+
+       return 0;
+}
+
 static int check_mask(struct host1x_firewall *fw)
 {
        u32 mask = fw->mask;
        u32 reg = fw->reg;
+       int ret;
 
        while (mask) {
                if (fw->words == 0)
                        return -EINVAL;
 
                if (mask & 1) {
-                       if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
-                               if (!fw->num_relocs)
-                                       return -EINVAL;
-                               if (!check_reloc(fw->reloc, fw->cmdbuf_id,
-                                                fw->offset))
-                                       return -EINVAL;
-                               fw->reloc++;
-                               fw->num_relocs--;
-                       }
+                       ret = check_register(fw, reg);
+                       if (ret < 0)
+                               return ret;
+
                        fw->words--;
                        fw->offset++;
                }
@@ -324,19 +336,16 @@ static int check_incr(struct host1x_firewall *fw)
 {
        u32 count = fw->count;
        u32 reg = fw->reg;
+       int ret;
 
        while (count) {
                if (fw->words == 0)
                        return -EINVAL;
 
-               if (fw->job->is_addr_reg(fw->dev, fw->class, reg)) {
-                       if (!fw->num_relocs)
-                               return -EINVAL;
-                       if (!check_reloc(fw->reloc, fw->cmdbuf_id, fw->offset))
-                               return -EINVAL;
-                       fw->reloc++;
-                       fw->num_relocs--;
-               }
+               ret = check_register(fw, reg);
+               if (ret < 0)
+                       return ret;
+
                reg++;
                fw->words--;
                fw->offset++;
@@ -348,21 +357,17 @@ static int check_incr(struct host1x_firewall *fw)
 
 static int check_nonincr(struct host1x_firewall *fw)
 {
-       int is_addr_reg = fw->job->is_addr_reg(fw->dev, fw->class, fw->reg);
        u32 count = fw->count;
+       int ret;
 
        while (count) {
                if (fw->words == 0)
                        return -EINVAL;
 
-               if (is_addr_reg) {
-                       if (!fw->num_relocs)
-                               return -EINVAL;
-                       if (!check_reloc(fw->reloc, fw->cmdbuf_id, fw->offset))
-                               return -EINVAL;
-                       fw->reloc++;
-                       fw->num_relocs--;
-               }
+               ret = check_register(fw, fw->reg);
+               if (ret < 0)
+                       return ret;
+
                fw->words--;
                fw->offset++;
                count--;
@@ -381,7 +386,7 @@ static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
                return 0;
 
        fw->words = g->words;
-       fw->cmdbuf_id = g->bo;
+       fw->cmdbuf = g->bo;
        fw->offset = 0;
 
        while (fw->words && !err) {
@@ -436,10 +441,6 @@ static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
                }
        }
 
-       /* No relocs should remain at this point */
-       if (fw->num_relocs)
-               err = -EINVAL;
-
 out:
        return err;
 }
@@ -493,6 +494,10 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
                offset += g->words * sizeof(u32);
        }
 
+       /* No relocs should remain at this point */
+       if (fw.num_relocs)
+               return -EINVAL;
+
        return 0;
 }
 
index fba45f20458e2d1a4b4e83a795c2d948eaa5ea08..33a697d6dcefea290c3bac2981e2399e27f99a14 100644 (file)
@@ -34,15 +34,6 @@ struct host1x_cmdbuf {
        u32 pad;
 };
 
-struct host1x_reloc {
-       struct host1x_bo *cmdbuf;
-       u32 cmdbuf_offset;
-       struct host1x_bo *target;
-       u32 target_offset;
-       u32 shift;
-       u32 pad;
-};
-
 struct host1x_waitchk {
        struct host1x_bo *bo;
        u32 offset;
@@ -55,105 +46,6 @@ struct host1x_job_unpin_data {
        struct sg_table *sgt;
 };
 
-/*
- * Each submit is tracked as a host1x_job.
- */
-struct host1x_job {
-       /* When refcount goes to zero, job can be freed */
-       struct kref ref;
-
-       /* List entry */
-       struct list_head list;
-
-       /* Channel where job is submitted to */
-       struct host1x_channel *channel;
-
-       u32 client;
-
-       /* Gathers and their memory */
-       struct host1x_job_gather *gathers;
-       unsigned int num_gathers;
-
-       /* Wait checks to be processed at submit time */
-       struct host1x_waitchk *waitchk;
-       unsigned int num_waitchk;
-       u32 waitchk_mask;
-
-       /* Array of handles to be pinned & unpinned */
-       struct host1x_reloc *relocarray;
-       unsigned int num_relocs;
-       struct host1x_job_unpin_data *unpins;
-       unsigned int num_unpins;
-
-       dma_addr_t *addr_phys;
-       dma_addr_t *gather_addr_phys;
-       dma_addr_t *reloc_addr_phys;
-
-       /* Sync point id, number of increments and end related to the submit */
-       u32 syncpt_id;
-       u32 syncpt_incrs;
-       u32 syncpt_end;
-
-       /* Maximum time to wait for this job */
-       unsigned int timeout;
-
-       /* Index and number of slots used in the push buffer */
-       unsigned int first_get;
-       unsigned int num_slots;
-
-       /* Copy of gathers */
-       size_t gather_copy_size;
-       dma_addr_t gather_copy;
-       u8 *gather_copy_mapped;
-
-       /* Check if register is marked as an address reg */
-       int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
-
-       /* Request a SETCLASS to this class */
-       u32 class;
-
-       /* Add a channel wait for previous ops to complete */
-       bool serialize;
-};
-/*
- * Allocate memory for a job. Just enough memory will be allocated to
- * accomodate the submit.
- */
-struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
-                                   u32 num_cmdbufs, u32 num_relocs,
-                                   u32 num_waitchks);
-
-/*
- * Add a gather to a job.
- */
-void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
-                          u32 words, u32 offset);
-
-/*
- * Increment reference going to host1x_job.
- */
-struct host1x_job *host1x_job_get(struct host1x_job *job);
-
-/*
- * Decrement reference job, free if goes to zero.
- */
-void host1x_job_put(struct host1x_job *job);
-
-/*
- * Pin memory related to job. This handles relocation of addresses to the
- * host1x address space. Handles both the gather memory and any other memory
- * referred to from the gather buffers.
- *
- * Handles also patching out host waits that would wait for an expired sync
- * point value.
- */
-int host1x_job_pin(struct host1x_job *job, struct device *dev);
-
-/*
- * Unpin memory related to job.
- */
-void host1x_job_unpin(struct host1x_job *job);
-
 /*
  * Dump contents of job to debug output.
  */
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
new file mode 100644 (file)
index 0000000..8a54613
--- /dev/null
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2013 NVIDIA Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define MIPI_CAL_CTRL                  0x00
+#define MIPI_CAL_CTRL_START            (1 << 0)
+
+#define MIPI_CAL_AUTOCAL_CTRL          0x01
+
+#define MIPI_CAL_STATUS                        0x02
+#define MIPI_CAL_STATUS_DONE           (1 << 16)
+#define MIPI_CAL_STATUS_ACTIVE         (1 <<  0)
+
+#define MIPI_CAL_CONFIG_CSIA           0x05
+#define MIPI_CAL_CONFIG_CSIB           0x06
+#define MIPI_CAL_CONFIG_CSIC           0x07
+#define MIPI_CAL_CONFIG_CSID           0x08
+#define MIPI_CAL_CONFIG_CSIE           0x09
+#define MIPI_CAL_CONFIG_DSIA           0x0e
+#define MIPI_CAL_CONFIG_DSIB           0x0f
+#define MIPI_CAL_CONFIG_DSIC           0x10
+#define MIPI_CAL_CONFIG_DSID           0x11
+
+#define MIPI_CAL_CONFIG_SELECT         (1 << 21)
+#define MIPI_CAL_CONFIG_HSPDOS(x)      (((x) & 0x1f) << 16)
+#define MIPI_CAL_CONFIG_HSPUOS(x)      (((x) & 0x1f) <<  8)
+#define MIPI_CAL_CONFIG_TERMOS(x)      (((x) & 0x1f) <<  0)
+
+#define MIPI_CAL_BIAS_PAD_CFG0         0x16
+#define MIPI_CAL_BIAS_PAD_PDVCLAMP     (1 << 1)
+#define MIPI_CAL_BIAS_PAD_E_VCLAMP_REF (1 << 0)
+
+#define MIPI_CAL_BIAS_PAD_CFG1         0x17
+
+#define MIPI_CAL_BIAS_PAD_CFG2         0x18
+#define MIPI_CAL_BIAS_PAD_PDVREG       (1 << 1)
+
+static const struct module {
+       unsigned long reg;
+} modules[] = {
+       { .reg = MIPI_CAL_CONFIG_CSIA },
+       { .reg = MIPI_CAL_CONFIG_CSIB },
+       { .reg = MIPI_CAL_CONFIG_CSIC },
+       { .reg = MIPI_CAL_CONFIG_CSID },
+       { .reg = MIPI_CAL_CONFIG_CSIE },
+       { .reg = MIPI_CAL_CONFIG_DSIA },
+       { .reg = MIPI_CAL_CONFIG_DSIB },
+       { .reg = MIPI_CAL_CONFIG_DSIC },
+       { .reg = MIPI_CAL_CONFIG_DSID },
+};
+
+struct tegra_mipi {
+       void __iomem *regs;
+       struct mutex lock;
+       struct clk *clk;
+};
+
+static inline unsigned long tegra_mipi_readl(struct tegra_mipi *mipi,
+                                            unsigned long reg)
+{
+       return readl(mipi->regs + (reg << 2));
+}
+
+static inline void tegra_mipi_writel(struct tegra_mipi *mipi,
+                                    unsigned long value, unsigned long reg)
+{
+       writel(value, mipi->regs + (reg << 2));
+}
+
+int tegra_mipi_calibrate(struct device *device)
+{
+       struct platform_device *pdev;
+       unsigned int timeout = 20, i;
+       struct of_phandle_args args;
+       unsigned long value, pads;
+       struct tegra_mipi *mipi;
+       int err;
+
+       err = of_parse_phandle_with_args(device->of_node, "calibrate",
+                                        "#calibrate-cells", 0, &args);
+       if (err < 0)
+               return err;
+
+       pdev = of_find_device_by_node(args.np);
+       if (!pdev) {
+               of_node_put(args.np);
+               return -ENODEV;
+       }
+
+       of_node_put(args.np);
+       pads = args.args[0];
+
+       mipi = platform_get_drvdata(pdev);
+       if (!mipi) {
+               err = -ENODEV;
+               goto out;
+       }
+
+       err = clk_enable(mipi->clk);
+       if (err < 0)
+               goto out;
+
+       mutex_lock(&mipi->lock);
+
+       value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG0);
+       value &= ~MIPI_CAL_BIAS_PAD_PDVCLAMP;
+       value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
+       tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
+
+       value = tegra_mipi_readl(mipi, MIPI_CAL_BIAS_PAD_CFG2);
+       value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
+       tegra_mipi_writel(mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
+
+       for (i = 0; i < ARRAY_SIZE(modules); i++) {
+               if (pads & BIT(i))
+                       value = MIPI_CAL_CONFIG_SELECT |
+                               MIPI_CAL_CONFIG_HSPDOS(0) |
+                               MIPI_CAL_CONFIG_HSPUOS(4) |
+                               MIPI_CAL_CONFIG_TERMOS(5);
+               else
+                       value = 0;
+
+               tegra_mipi_writel(mipi, value, modules[i].reg);
+       }
+
+       tegra_mipi_writel(mipi, MIPI_CAL_CTRL_START, MIPI_CAL_CTRL);
+
+       while (timeout) {
+               value = tegra_mipi_readl(mipi, MIPI_CAL_STATUS);
+               if ((value & MIPI_CAL_STATUS_ACTIVE) == 0 &&
+                   (value & MIPI_CAL_STATUS_DONE) != 0)
+                       break;
+
+               usleep_range(10, 100);
+               timeout--;
+       }
+
+       mutex_unlock(&mipi->lock);
+       clk_disable(mipi->clk);
+
+       if (timeout == 0)
+               err = -ETIMEDOUT;
+       else
+               err = 0;
+
+out:
+       platform_device_put(pdev);
+       return err;
+}
+EXPORT_SYMBOL_GPL(tegra_mipi_calibrate);
+
+static int tegra_mipi_probe(struct platform_device *pdev)
+{
+       struct tegra_mipi *mipi;
+       struct resource *res;
+       int err;
+
+       mipi = devm_kzalloc(&pdev->dev, sizeof(*mipi), GFP_KERNEL);
+       if (!mipi)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       mipi->regs = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(mipi->regs))
+               return PTR_ERR(mipi->regs);
+
+       mutex_init(&mipi->lock);
+
+       mipi->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(mipi->clk)) {
+               dev_err(&pdev->dev, "failed to get clock\n");
+               return PTR_ERR(mipi->clk);
+       }
+
+       err = clk_prepare(mipi->clk);
+       if (err < 0)
+               return err;
+
+       platform_set_drvdata(pdev, mipi);
+
+       return 0;
+}
+
+static int tegra_mipi_remove(struct platform_device *pdev)
+{
+       struct tegra_mipi *mipi = platform_get_drvdata(pdev);
+
+       clk_unprepare(mipi->clk);
+
+       return 0;
+}
+
+static struct of_device_id tegra_mipi_of_match[] = {
+       { .compatible = "nvidia,tegra114-mipi", },
+       { },
+};
+
+struct platform_driver tegra_mipi_driver = {
+       .driver = {
+               .name = "tegra-mipi",
+               .of_match_table = tegra_mipi_of_match,
+       },
+       .probe = tegra_mipi_probe,
+       .remove = tegra_mipi_remove,
+};
index 409745b949dbfa7d9ecd897409147ae0225623e3..03cf2922e469bccacd132c9ec07e3190ebafcf4f 100644 (file)
@@ -354,6 +354,25 @@ void host1x_syncpt_deinit(struct host1x *host)
                kfree(sp->name);
 }
 
+/*
+ * Read max. It indicates how many operations there are in queue, either in
+ * channel or in a software thread.
+ * */
+u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
+{
+       smp_rmb();
+       return (u32)atomic_read(&sp->max_val);
+}
+
+/*
+ * Read min, which is a shadow of the current sync point value in hardware.
+ */
+u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
+{
+       smp_rmb();
+       return (u32)atomic_read(&sp->min_val);
+}
+
 int host1x_syncpt_nb_pts(struct host1x *host)
 {
        return host->info->nb_pts;
index 267c0b9d3647d24f2a3bf7dc81504eacaf957005..4eb933a497fd70022029f5e15c9fb4b06b7063ed 100644 (file)
@@ -20,6 +20,7 @@
 #define __HOST1X_SYNCPT_H
 
 #include <linux/atomic.h>
+#include <linux/host1x.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 
@@ -50,25 +51,6 @@ int host1x_syncpt_init(struct host1x *host);
 /*  Free sync point array */
 void host1x_syncpt_deinit(struct host1x *host);
 
-/*
- * Read max. It indicates how many operations there are in queue, either in
- * channel or in a software thread.
- * */
-static inline u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
-{
-       smp_rmb();
-       return (u32)atomic_read(&sp->max_val);
-}
-
-/*
- * Read min, which is a shadow of the current sync point value in hardware.
- */
-static inline u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
-{
-       smp_rmb();
-       return (u32)atomic_read(&sp->min_val);
-}
-
 /* Return number of sync point supported. */
 int host1x_syncpt_nb_pts(struct host1x *host);
 
@@ -112,9 +94,6 @@ static inline bool host1x_syncpt_idle(struct host1x_syncpt *sp)
        return (min == max);
 }
 
-/* Return pointer to struct denoting sync point id. */
-struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
-
 /* Load current value from hardware to the shadow register. */
 u32 host1x_syncpt_load(struct host1x_syncpt *sp);
 
@@ -130,16 +109,9 @@ void host1x_syncpt_restore(struct host1x *host);
 /* Read current wait base value into shadow register and return it. */
 u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp);
 
-/* Request incrementing a sync point. */
-int host1x_syncpt_incr(struct host1x_syncpt *sp);
-
 /* Indicate future operations by incrementing the sync point max. */
 u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
 
-/* Wait until sync point reaches a threshold value, or a timeout. */
-int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh,
-                       long timeout, u32 *value);
-
 /* Check if sync point id is valid. */
 static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
 {
@@ -149,14 +121,4 @@ static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)
 /* Patch a wait by replacing it with a wait for syncpt 0 value 0 */
 int host1x_syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr);
 
-/* Return id of the sync point */
-u32 host1x_syncpt_id(struct host1x_syncpt *sp);
-
-/* Allocate a sync point for a device. */
-struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
-                                           bool client_managed);
-
-/* Free a sync point. */
-void host1x_syncpt_free(struct host1x_syncpt *sp);
-
 #endif
index 71b70e3a7a7183068dbc18d3b224bf3894444497..92c6e273339b0cd44ff38c4ead4c32a37e20d6ab 100644 (file)
@@ -241,6 +241,7 @@ config HID_HOLTEK
          - Sharkoon Drakonia / Perixx MX-2000 gaming mice
          - Tracer Sniper TRM-503 / NOVA Gaming Slider X200 /
            Zalman ZM-GM1
+         - SHARKOON DarkGlider Gaming mouse
 
 config HOLTEK_FF
        bool "Holtek On Line Grip force feedback support"
@@ -322,7 +323,7 @@ config HID_LCPOWER
 
 config HID_LENOVO_TPKBD
        tristate "Lenovo ThinkPad USB Keyboard with TrackPoint"
-       depends on USB_HID
+       depends on HID
        select NEW_LEDS
        select LEDS_CLASS
        ---help---
@@ -361,19 +362,20 @@ config LOGITECH_FF
          - Logitech WingMan Force 3D
          - Logitech Formula Force EX
          - Logitech WingMan Formula Force GP
-         - Logitech MOMO Force wheel
 
          and if you want to enable force feedback for them.
          Note: if you say N here, this device will still be supported, but without
          force feedback.
 
 config LOGIRUMBLEPAD2_FF
-       bool "Logitech RumblePad/Rumblepad 2 force feedback support"
+       bool "Logitech force feedback support (variant 2)"
        depends on HID_LOGITECH
        select INPUT_FF_MEMLESS
        help
-         Say Y here if you want to enable force feedback support for Logitech
-         RumblePad and Rumblepad 2 devices.
+         Say Y here if you want to enable force feedback support for:
+         - Logitech RumblePad
+         - Logitech Rumblepad 2
+         - Logitech Formula Vibration Feedback Wheel
 
 config LOGIG940_FF
        bool "Logitech Flight System G940 force feedback support"
index 881cf7b4f9a433f8ae3e0f6721b3789aa66731f7..497558127bb3e63609af2b5e1f75265f9e7b1423 100644 (file)
@@ -46,6 +46,12 @@ module_param(iso_layout, uint, 0644);
 MODULE_PARM_DESC(iso_layout, "Enable/Disable hardcoded ISO-layout of the keyboard. "
                "(0 = disabled, [1] = enabled)");
 
+static unsigned int swap_opt_cmd;
+module_param(swap_opt_cmd, uint, 0644);
+MODULE_PARM_DESC(swap_opt_cmd, "Swap the Option (\"Alt\") and Command (\"Flag\") keys. "
+               "(For people who want to keep Windows PC keyboard muscle memory. "
+               "[0] = as-is, Mac layout. 1 = swapped, Windows layout.)");
+
 struct apple_sc {
        unsigned long quirks;
        unsigned int fn_on;
@@ -150,6 +156,14 @@ static const struct apple_key_translation apple_iso_keyboard[] = {
        { }
 };
 
+static const struct apple_key_translation swapped_option_cmd_keys[] = {
+       { KEY_LEFTALT,  KEY_LEFTMETA },
+       { KEY_LEFTMETA, KEY_LEFTALT },
+       { KEY_RIGHTALT, KEY_RIGHTMETA },
+       { KEY_RIGHTMETA,KEY_RIGHTALT },
+       { }
+};
+
 static const struct apple_key_translation *apple_find_translation(
                const struct apple_key_translation *table, u16 from)
 {
@@ -242,6 +256,14 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
                }
        }
 
+       if (swap_opt_cmd) {
+               trans = apple_find_translation(swapped_option_cmd_keys, usage->code);
+               if (trans) {
+                       input_event(input, usage->type, trans->to, value);
+                       return 1;
+               }
+       }
+
        return 0;
 }
 
index b8470b1a10fe8b40bef83386a0476591e5a067d7..351b8f33a4ac01472b6b90b12c0dde9e5956b4ad 100644 (file)
@@ -319,7 +319,7 @@ static s32 item_sdata(struct hid_item *item)
 
 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
 {
-       __u32 raw_value;
+       __s32 raw_value;
        switch (item->tag) {
        case HID_GLOBAL_ITEM_TAG_PUSH:
 
@@ -370,10 +370,11 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
                return 0;
 
        case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
-               /* Units exponent negative numbers are given through a
-                * two's complement.
-                * See "6.2.2.7 Global Items" for more information. */
-               raw_value = item_udata(item);
+               /* Many devices provide unit exponent as a two's complement
+                * nibble due to the common misunderstanding of HID
+                * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
+                * both this and the standard encoding. */
+               raw_value = item_sdata(item);
                if (!(raw_value & 0xfffffff0))
                        parser->global.unit_exponent = hid_snto32(raw_value, 4);
                else
@@ -1715,6 +1716,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
        { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_580) },
        { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
@@ -1752,6 +1754,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
@@ -1869,6 +1872,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
 
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
        { }
 };
index f042a6cf8b18c897fce2586f108bd91daaf9d33e..4e49462870abdf2f265c1528ae6c07fa4f783ec8 100644 (file)
@@ -181,7 +181,40 @@ fail:
  */
 static bool elo_broken_firmware(struct usb_device *dev)
 {
-       return use_fw_quirk && le16_to_cpu(dev->descriptor.bcdDevice) == 0x10d;
+       struct usb_device *hub = dev->parent;
+       struct usb_device *child = NULL;
+       u16 fw_lvl = le16_to_cpu(dev->descriptor.bcdDevice);
+       u16 child_vid, child_pid;
+       int i;
+    
+       if (!use_fw_quirk)
+               return false;
+       if (fw_lvl != 0x10d)
+               return false;
+
+       /* iterate sibling devices of the touch controller */
+       usb_hub_for_each_child(hub, i, child) {
+               child_vid = le16_to_cpu(child->descriptor.idVendor);
+               child_pid = le16_to_cpu(child->descriptor.idProduct);
+
+               /*
+                * If one of the devices below is present attached as a sibling of 
+                * the touch controller then  this is a newer IBM 4820 monitor that 
+                * does not need the IBM-requested workaround if fw level is
+                * 0x010d - aka 'M'.
+                * No other HW can have this combination.
+                */
+               if (child_vid==0x04b3) {
+                       switch (child_pid) {
+                       case 0x4676: /* 4820 21x Video */
+                       case 0x4677: /* 4820 51x Video */
+                       case 0x4678: /* 4820 2Lx Video */
+                       case 0x4679: /* 4820 5Lx Video */
+                               return false;
+                       }
+               }
+       }
+       return true;
 }
 
 static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
index 7e6db3cf46f9eb39746fcc4ac4b4e29296caa569..e696566cde46420334d1f416b53c2675ec581a7b 100644 (file)
@@ -27,6 +27,7 @@
  * - USB ID 04d9:a067, sold as Sharkoon Drakonia and Perixx MX-2000
  * - USB ID 04d9:a04a, sold as Tracer Sniper TRM-503, NOVA Gaming Slider X200
  *   and Zalman ZM-GM1
+ * - USB ID 04d9:a081, sold as SHARKOON DarkGlider Gaming mouse
  */
 
 static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
@@ -46,6 +47,7 @@ static __u8 *holtek_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                        }
                        break;
                case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A:
+               case USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081:
                        if (*rsize >= 113 && rdesc[106] == 0xff && rdesc[107] == 0x7f
                                        && rdesc[111] == 0xff && rdesc[112] == 0x7f) {
                                hid_info(hdev, "Fixing up report descriptor\n");
@@ -63,6 +65,8 @@ static const struct hid_device_id holtek_mouse_devices[] = {
                        USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067) },
        { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
                        USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
+                       USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, holtek_mouse_devices);
index e60e8d530697fcaf0a7c4a43f42e53b455de6f4d..c99facee7feb0b0e5fd6b7aaea120fb0c0d29364 100644 (file)
 #define USB_VENDOR_ID_GENERAL_TOUCH    0x0dfc
 #define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
 #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS 0x0100
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0101 0x0101
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0102 0x0102
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0106 0x0106
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A 0x010a
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100
 
 #define USB_VENDOR_ID_GLAB             0x06c2
 #define USB_DEVICE_ID_4_PHIDGETSERVO_30        0x0038
 #define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD      0xa055
 #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A067    0xa067
 #define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A    0xa04a
+#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081    0xa081
 
 #define USB_VENDOR_ID_IMATION          0x0718
 #define USB_DEVICE_ID_DISC_STAKKA      0xd000
 #define USB_DEVICE_ID_DINOVO_EDGE      0xc714
 #define USB_DEVICE_ID_DINOVO_MINI      0xc71f
 #define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2     0xca03
+#define USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL 0xca04
 
 #define USB_VENDOR_ID_LUMIO            0x202e
 #define USB_DEVICE_ID_CRYSTALTOUCH     0x0006
 #define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN   0x0003
 
 #define USB_VENDOR_ID_NINTENDO         0x057e
+#define USB_VENDOR_ID_NINTENDO2                0x054c
 #define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306
 #define USB_DEVICE_ID_NINTENDO_WIIMOTE2        0x0330
 
 #define USB_DEVICE_ID_SYNAPTICS_COMP_TP        0x0009
 #define USB_DEVICE_ID_SYNAPTICS_WTP    0x0010
 #define USB_DEVICE_ID_SYNAPTICS_DPAD   0x0013
+#define USB_DEVICE_ID_SYNAPTICS_LTS1   0x0af8
+#define USB_DEVICE_ID_SYNAPTICS_LTS2   0x1d10
 
 #define USB_VENDOR_ID_THINGM           0x27b8
 #define USB_DEVICE_ID_BLINK1           0x01ed
 #define USB_VENDOR_ID_PRIMAX   0x0461
 #define USB_DEVICE_ID_PRIMAX_KEYBOARD  0x4e05
 
+#define USB_VENDOR_ID_SIS      0x0457
+#define USB_DEVICE_ID_SIS_TS   0x1013
+
 #endif
index 8741d953dcc80acb552bac187ffe6997ad95ca4e..d97f2323af573ecf229f9d76b536634c57c26e17 100644 (file)
@@ -192,6 +192,7 @@ static int hidinput_setkeycode(struct input_dev *dev,
        return -EINVAL;
 }
 
+
 /**
  * hidinput_calc_abs_res - calculate an absolute axis resolution
  * @field: the HID report field to calculate resolution for
@@ -234,23 +235,17 @@ __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
        case ABS_MT_TOOL_Y:
        case ABS_MT_TOUCH_MAJOR:
        case ABS_MT_TOUCH_MINOR:
-               if (field->unit & 0xffffff00)           /* Not a length */
-                       return 0;
-               unit_exponent += hid_snto32(field->unit >> 4, 4) - 1;
-               switch (field->unit & 0xf) {
-               case 0x1:                               /* If centimeters */
+               if (field->unit == 0x11) {              /* If centimeters */
                        /* Convert to millimeters */
                        unit_exponent += 1;
-                       break;
-               case 0x3:                               /* If inches */
+               } else if (field->unit == 0x13) {       /* If inches */
                        /* Convert to millimeters */
                        prev = physical_extents;
                        physical_extents *= 254;
                        if (physical_extents < prev)
                                return 0;
                        unit_exponent -= 1;
-                       break;
-               default:
+               } else {
                        return 0;
                }
                break;
index 31cf29a6ba17551ff5244a0f243d48a4719e662c..2d25b6cbbc051910a8eeaaab0525605c77a88da1 100644 (file)
 #include <linux/module.h>
 #include <linux/sysfs.h>
 #include <linux/device.h>
-#include <linux/usb.h>
 #include <linux/hid.h>
 #include <linux/input.h>
 #include <linux/leds.h>
-#include "usbhid/usbhid.h"
 
 #include "hid-ids.h"
 
@@ -41,10 +39,9 @@ static int tpkbd_input_mapping(struct hid_device *hdev,
                struct hid_input *hi, struct hid_field *field,
                struct hid_usage *usage, unsigned long **bit, int *max)
 {
-       struct usbhid_device *uhdev;
-
-       uhdev = (struct usbhid_device *) hdev->driver_data;
-       if (uhdev->ifnum == 1 && usage->hid == (HID_UP_BUTTON | 0x0010)) {
+       if (usage->hid == (HID_UP_BUTTON | 0x0010)) {
+               /* mark the device as pointer */
+               hid_set_drvdata(hdev, (void *)1);
                map_key_clear(KEY_MICMUTE);
                return 1;
        }
@@ -339,7 +336,7 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
        struct tpkbd_data_pointer *data_pointer;
        size_t name_sz = strlen(dev_name(dev)) + 16;
        char *name_mute, *name_micmute;
-       int i, ret;
+       int i;
 
        /* Validate required reports. */
        for (i = 0; i < 4; i++) {
@@ -354,7 +351,9 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
                hid_warn(hdev, "Could not create sysfs group\n");
        }
 
-       data_pointer = kzalloc(sizeof(struct tpkbd_data_pointer), GFP_KERNEL);
+       data_pointer = devm_kzalloc(&hdev->dev,
+                                   sizeof(struct tpkbd_data_pointer),
+                                   GFP_KERNEL);
        if (data_pointer == NULL) {
                hid_err(hdev, "Could not allocate memory for driver data\n");
                return -ENOMEM;
@@ -364,20 +363,13 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
        data_pointer->sensitivity = 0xa0;
        data_pointer->press_speed = 0x38;
 
-       name_mute = kzalloc(name_sz, GFP_KERNEL);
-       if (name_mute == NULL) {
+       name_mute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL);
+       name_micmute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL);
+       if (name_mute == NULL || name_micmute == NULL) {
                hid_err(hdev, "Could not allocate memory for led data\n");
-               ret = -ENOMEM;
-               goto err;
+               return -ENOMEM;
        }
        snprintf(name_mute, name_sz, "%s:amber:mute", dev_name(dev));
-
-       name_micmute = kzalloc(name_sz, GFP_KERNEL);
-       if (name_micmute == NULL) {
-               hid_err(hdev, "Could not allocate memory for led data\n");
-               ret = -ENOMEM;
-               goto err2;
-       }
        snprintf(name_micmute, name_sz, "%s:amber:micmute", dev_name(dev));
 
        hid_set_drvdata(hdev, data_pointer);
@@ -397,19 +389,12 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
        tpkbd_features_set(hdev);
 
        return 0;
-
-err2:
-       kfree(name_mute);
-err:
-       kfree(data_pointer);
-       return ret;
 }
 
 static int tpkbd_probe(struct hid_device *hdev,
                const struct hid_device_id *id)
 {
        int ret;
-       struct usbhid_device *uhdev;
 
        ret = hid_parse(hdev);
        if (ret) {
@@ -423,9 +408,8 @@ static int tpkbd_probe(struct hid_device *hdev,
                goto err;
        }
 
-       uhdev = (struct usbhid_device *) hdev->driver_data;
-
-       if (uhdev->ifnum == 1) {
+       if (hid_get_drvdata(hdev)) {
+               hid_set_drvdata(hdev, NULL);
                ret = tpkbd_probe_tp(hdev);
                if (ret)
                        goto err_hid;
@@ -449,17 +433,11 @@ static void tpkbd_remove_tp(struct hid_device *hdev)
        led_classdev_unregister(&data_pointer->led_mute);
 
        hid_set_drvdata(hdev, NULL);
-       kfree(data_pointer->led_micmute.name);
-       kfree(data_pointer->led_mute.name);
-       kfree(data_pointer);
 }
 
 static void tpkbd_remove(struct hid_device *hdev)
 {
-       struct usbhid_device *uhdev;
-
-       uhdev = (struct usbhid_device *) hdev->driver_data;
-       if (uhdev->ifnum == 1)
+       if (hid_get_drvdata(hdev))
                tpkbd_remove_tp(hdev);
 
        hid_hw_stop(hdev);
index 6f12ecd36c8834fa9c9cb77b79d883dd4c246dc8..06eb45fa6331fee64152754a5ab9bb2275e3a61c 100644 (file)
@@ -45,7 +45,9 @@
 /* Size of the original descriptors of the Driving Force (and Pro) wheels */
 #define DF_RDESC_ORIG_SIZE     130
 #define DFP_RDESC_ORIG_SIZE    97
+#define FV_RDESC_ORIG_SIZE     130
 #define MOMO_RDESC_ORIG_SIZE   87
+#define MOMO2_RDESC_ORIG_SIZE  87
 
 /* Fixed report descriptors for Logitech Driving Force (and Pro)
  * wheel controllers
@@ -170,6 +172,73 @@ static __u8 dfp_rdesc_fixed[] = {
 0xC0                /*  End Collection                          */
 };
 
+static __u8 fv_rdesc_fixed[] = {
+0x05, 0x01,         /*  Usage Page (Desktop),                   */
+0x09, 0x04,         /*  Usage (Joystik),                        */
+0xA1, 0x01,         /*  Collection (Application),               */
+0xA1, 0x02,         /*      Collection (Logical),               */
+0x95, 0x01,         /*          Report Count (1),               */
+0x75, 0x0A,         /*          Report Size (10),               */
+0x15, 0x00,         /*          Logical Minimum (0),            */
+0x26, 0xFF, 0x03,   /*          Logical Maximum (1023),         */
+0x35, 0x00,         /*          Physical Minimum (0),           */
+0x46, 0xFF, 0x03,   /*          Physical Maximum (1023),        */
+0x09, 0x30,         /*          Usage (X),                      */
+0x81, 0x02,         /*          Input (Variable),               */
+0x95, 0x0C,         /*          Report Count (12),              */
+0x75, 0x01,         /*          Report Size (1),                */
+0x25, 0x01,         /*          Logical Maximum (1),            */
+0x45, 0x01,         /*          Physical Maximum (1),           */
+0x05, 0x09,         /*          Usage Page (Button),            */
+0x19, 0x01,         /*          Usage Minimum (01h),            */
+0x29, 0x0C,         /*          Usage Maximum (0Ch),            */
+0x81, 0x02,         /*          Input (Variable),               */
+0x95, 0x02,         /*          Report Count (2),               */
+0x06, 0x00, 0xFF,   /*          Usage Page (FF00h),             */
+0x09, 0x01,         /*          Usage (01h),                    */
+0x81, 0x02,         /*          Input (Variable),               */
+0x09, 0x02,         /*          Usage (02h),                    */
+0x26, 0xFF, 0x00,   /*          Logical Maximum (255),          */
+0x46, 0xFF, 0x00,   /*          Physical Maximum (255),         */
+0x95, 0x01,         /*          Report Count (1),               */
+0x75, 0x08,         /*          Report Size (8),                */
+0x81, 0x02,         /*          Input (Variable),               */
+0x05, 0x01,         /*          Usage Page (Desktop),           */
+0x25, 0x07,         /*          Logical Maximum (7),            */
+0x46, 0x3B, 0x01,   /*          Physical Maximum (315),         */
+0x75, 0x04,         /*          Report Size (4),                */
+0x65, 0x14,         /*          Unit (Degrees),                 */
+0x09, 0x39,         /*          Usage (Hat Switch),             */
+0x81, 0x42,         /*          Input (Variable, Null State),   */
+0x75, 0x01,         /*          Report Size (1),                */
+0x95, 0x04,         /*          Report Count (4),               */
+0x65, 0x00,         /*          Unit,                           */
+0x06, 0x00, 0xFF,   /*          Usage Page (FF00h),             */
+0x09, 0x01,         /*          Usage (01h),                    */
+0x25, 0x01,         /*          Logical Maximum (1),            */
+0x45, 0x01,         /*          Physical Maximum (1),           */
+0x81, 0x02,         /*          Input (Variable),               */
+0x05, 0x01,         /*          Usage Page (Desktop),           */
+0x95, 0x01,         /*          Report Count (1),               */
+0x75, 0x08,         /*          Report Size (8),                */
+0x26, 0xFF, 0x00,   /*          Logical Maximum (255),          */
+0x46, 0xFF, 0x00,   /*          Physical Maximum (255),         */
+0x09, 0x31,         /*          Usage (Y),                      */
+0x81, 0x02,         /*          Input (Variable),               */
+0x09, 0x32,         /*          Usage (Z),                      */
+0x81, 0x02,         /*          Input (Variable),               */
+0xC0,               /*      End Collection,                     */
+0xA1, 0x02,         /*      Collection (Logical),               */
+0x26, 0xFF, 0x00,   /*          Logical Maximum (255),          */
+0x46, 0xFF, 0x00,   /*          Physical Maximum (255),         */
+0x95, 0x07,         /*          Report Count (7),               */
+0x75, 0x08,         /*          Report Size (8),                */
+0x09, 0x03,         /*          Usage (03h),                    */
+0x91, 0x02,         /*          Output (Variable),              */
+0xC0,               /*      End Collection,                     */
+0xC0                /*  End Collection                          */
+};
+
 static __u8 momo_rdesc_fixed[] = {
 0x05, 0x01,         /*  Usage Page (Desktop),               */
 0x09, 0x04,         /*  Usage (Joystik),                    */
@@ -216,6 +285,54 @@ static __u8 momo_rdesc_fixed[] = {
 0xC0                /*  End Collection                      */
 };
 
+static __u8 momo2_rdesc_fixed[] = {
+0x05, 0x01,         /*  Usage Page (Desktop),               */
+0x09, 0x04,         /*  Usage (Joystik),                    */
+0xA1, 0x01,         /*  Collection (Application),           */
+0xA1, 0x02,         /*      Collection (Logical),           */
+0x95, 0x01,         /*          Report Count (1),           */
+0x75, 0x0A,         /*          Report Size (10),           */
+0x15, 0x00,         /*          Logical Minimum (0),        */
+0x26, 0xFF, 0x03,   /*          Logical Maximum (1023),     */
+0x35, 0x00,         /*          Physical Minimum (0),       */
+0x46, 0xFF, 0x03,   /*          Physical Maximum (1023),    */
+0x09, 0x30,         /*          Usage (X),                  */
+0x81, 0x02,         /*          Input (Variable),           */
+0x95, 0x0A,         /*          Report Count (10),          */
+0x75, 0x01,         /*          Report Size (1),            */
+0x25, 0x01,         /*          Logical Maximum (1),        */
+0x45, 0x01,         /*          Physical Maximum (1),       */
+0x05, 0x09,         /*          Usage Page (Button),        */
+0x19, 0x01,         /*          Usage Minimum (01h),        */
+0x29, 0x0A,         /*          Usage Maximum (0Ah),        */
+0x81, 0x02,         /*          Input (Variable),           */
+0x06, 0x00, 0xFF,   /*          Usage Page (FF00h),         */
+0x09, 0x00,         /*          Usage (00h),                */
+0x95, 0x04,         /*          Report Count (4),           */
+0x81, 0x02,         /*          Input (Variable),           */
+0x95, 0x01,         /*          Report Count (1),           */
+0x75, 0x08,         /*          Report Size (8),            */
+0x26, 0xFF, 0x00,   /*          Logical Maximum (255),      */
+0x46, 0xFF, 0x00,   /*          Physical Maximum (255),     */
+0x09, 0x01,         /*          Usage (01h),                */
+0x81, 0x02,         /*          Input (Variable),           */
+0x05, 0x01,         /*          Usage Page (Desktop),       */
+0x09, 0x31,         /*          Usage (Y),                  */
+0x81, 0x02,         /*          Input (Variable),           */
+0x09, 0x32,         /*          Usage (Z),                  */
+0x81, 0x02,         /*          Input (Variable),           */
+0x06, 0x00, 0xFF,   /*          Usage Page (FF00h),         */
+0x09, 0x00,         /*          Usage (00h),                */
+0x81, 0x02,         /*          Input (Variable),           */
+0xC0,               /*      End Collection,                 */
+0xA1, 0x02,         /*      Collection (Logical),           */
+0x09, 0x02,         /*          Usage (02h),                */
+0x95, 0x07,         /*          Report Count (7),           */
+0x91, 0x02,         /*          Output (Variable),          */
+0xC0,               /*      End Collection,                 */
+0xC0                /*  End Collection                      */
+};
+
 /*
  * Certain Logitech keyboards send in report #3 keys which are far
  * above the logical maximum described in descriptor. This extends
@@ -275,6 +392,24 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                }
                break;
 
+       case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
+               if (*rsize == MOMO2_RDESC_ORIG_SIZE) {
+                       hid_info(hdev,
+                               "fixing up Logitech Momo Racing Force (Black) report descriptor\n");
+                       rdesc = momo2_rdesc_fixed;
+                       *rsize = sizeof(momo2_rdesc_fixed);
+               }
+               break;
+
+       case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL:
+               if (*rsize == FV_RDESC_ORIG_SIZE) {
+                       hid_info(hdev,
+                               "fixing up Logitech Formula Vibration report descriptor\n");
+                       rdesc = fv_rdesc_fixed;
+                       *rsize = sizeof(fv_rdesc_fixed);
+               }
+               break;
+
        case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
                if (*rsize == DFP_RDESC_ORIG_SIZE) {
                        hid_info(hdev,
@@ -492,6 +627,7 @@ static int lg_input_mapped(struct hid_device *hdev, struct hid_input *hi,
                case USB_DEVICE_ID_LOGITECH_G27_WHEEL:
                case USB_DEVICE_ID_LOGITECH_WII_WHEEL:
                case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
+               case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL:
                        field->application = HID_GD_MULTIAXIS;
                        break;
                default:
@@ -639,6 +775,8 @@ static const struct hid_device_id lg_devices[] = {
                .driver_data = LG_NOGET | LG_FF4 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2),
                .driver_data = LG_FF4 },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL),
+               .driver_data = LG_FF2 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL),
                .driver_data = LG_FF4 },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL),
index 1a42eaa6ca0234a054631544732db69484c5eb82..0e3fb1a7e42174dd1dfb953e7209fc14ab50ed7a 100644 (file)
@@ -95,7 +95,7 @@ int lg2ff_init(struct hid_device *hid)
 
        hid_hw_request(hid, report, HID_REQ_SET_REPORT);
 
-       hid_info(hid, "Force feedback for Logitech RumblePad/Rumblepad 2 by Anssi Hannula <anssi.hannula@gmail.com>\n");
+       hid_info(hid, "Force feedback for Logitech variant 2 rumble devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
 
        return 0;
 }
index 5e5fe1b8eebb73e5d4533997a889eee87d637150..cb3250c5a397f9ba19b5974792e1cee374e9f9f0 100644 (file)
@@ -250,12 +250,12 @@ static struct mt_class mt_classes[] = {
        { .name = MT_CLS_GENERALTOUCH_TWOFINGERS,
                .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
                        MT_QUIRK_VALID_IS_INRANGE |
-                       MT_QUIRK_SLOT_IS_CONTACTNUMBER,
+                       MT_QUIRK_SLOT_IS_CONTACTID,
                .maxcontacts = 2
        },
        { .name = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
                .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
-                       MT_QUIRK_SLOT_IS_CONTACTNUMBER
+                       MT_QUIRK_SLOT_IS_CONTACTID
        },
 
        { .name = MT_CLS_FLATFROG,
@@ -1173,6 +1173,21 @@ static const struct hid_device_id mt_devices[] = {
        { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
                MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
                        USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS) },
+       { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS,
+               MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+                       USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0101) },
+       { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+               MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+                       USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0102) },
+       { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+               MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+                       USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_0106) },
+       { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+               MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+                       USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_010A) },
+       { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
+               MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+                       USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100) },
 
        /* Gametel game controller */
        { .driver_data = MT_CLS_NSMU,
index 602c188e9d86cafedde93366bf3d114e69c3e552..6101816a7ddd8a96ae8ad0e9ecfa297c29fa637f 100644 (file)
@@ -382,7 +382,7 @@ static ssize_t kone_sysfs_write_profilex(struct file *fp,
 }
 #define PROFILE_ATTR(number)                                   \
 static struct bin_attribute bin_attr_profile##number = {       \
-       .attr = { .name = "profile##number", .mode = 0660 },    \
+       .attr = { .name = "profile" #number, .mode = 0660 },    \
        .size = sizeof(struct kone_profile),                    \
        .read = kone_sysfs_read_profilex,                       \
        .write = kone_sysfs_write_profilex,                     \
index 5ddf605b6b890b15c2b9f94783072d951908e5db..5e99fcdc71b9cf0631cb7adcc8b61a22c36ae728 100644 (file)
@@ -229,13 +229,13 @@ static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
 
 #define PROFILE_ATTR(number)                                           \
 static struct bin_attribute bin_attr_profile##number##_settings = {    \
-       .attr = { .name = "profile##number##_settings", .mode = 0440 }, \
+       .attr = { .name = "profile" #number "_settings", .mode = 0440 },        \
        .size = KONEPLUS_SIZE_PROFILE_SETTINGS,                         \
        .read = koneplus_sysfs_read_profilex_settings,                  \
        .private = &profile_numbers[number-1],                          \
 };                                                                     \
 static struct bin_attribute bin_attr_profile##number##_buttons = {     \
-       .attr = { .name = "profile##number##_buttons", .mode = 0440 },  \
+       .attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
        .size = KONEPLUS_SIZE_PROFILE_BUTTONS,                          \
        .read = koneplus_sysfs_read_profilex_buttons,                   \
        .private = &profile_numbers[number-1],                          \
index 515bc03136c0c6497b2ada738f6f8a10f7368311..0c8e1ef0b67d14fdb1c3bf2a6b575fdea94faf8e 100644 (file)
@@ -257,13 +257,13 @@ static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp,
 
 #define PROFILE_ATTR(number)                                           \
 static struct bin_attribute bin_attr_profile##number##_settings = {    \
-       .attr = { .name = "profile##number##_settings", .mode = 0440 }, \
+       .attr = { .name = "profile" #number "_settings", .mode = 0440 },        \
        .size = KOVAPLUS_SIZE_PROFILE_SETTINGS,                         \
        .read = kovaplus_sysfs_read_profilex_settings,                  \
        .private = &profile_numbers[number-1],                          \
 };                                                                     \
 static struct bin_attribute bin_attr_profile##number##_buttons = {     \
-       .attr = { .name = "profile##number##_buttons", .mode = 0440 },  \
+       .attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
        .size = KOVAPLUS_SIZE_PROFILE_BUTTONS,                          \
        .read = kovaplus_sysfs_read_profilex_buttons,                   \
        .private = &profile_numbers[number-1],                          \
index 5a6dbbeee790d05ae7abbfb8e045b758b6a6f5ba..1a07e07d99a06c8972a2d80b8fefa8aa4f4b3848 100644 (file)
@@ -225,13 +225,13 @@ static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
 
 #define PROFILE_ATTR(number)                                           \
 static struct bin_attribute bin_attr_profile##number##_settings = {    \
-       .attr = { .name = "profile##number##_settings", .mode = 0440 }, \
+       .attr = { .name = "profile" #number "_settings", .mode = 0440 },        \
        .size = PYRA_SIZE_PROFILE_SETTINGS,                             \
        .read = pyra_sysfs_read_profilex_settings,                      \
        .private = &profile_numbers[number-1],                          \
 };                                                                     \
 static struct bin_attribute bin_attr_profile##number##_buttons = {     \
-       .attr = { .name = "profile##number##_buttons", .mode = 0440 },  \
+       .attr = { .name = "profile" #number "_buttons", .mode = 0440 }, \
        .size = PYRA_SIZE_PROFILE_BUTTONS,                              \
        .read = pyra_sysfs_read_profilex_buttons,                       \
        .private = &profile_numbers[number-1],                          \
index b18320db5f7d18cba708ecd306ee3f394abebddb..bc37a1800166067af660aa108d2f528c8628a260 100644 (file)
@@ -419,21 +419,14 @@ static int sixaxis_usb_output_raw_report(struct hid_device *hid, __u8 *buf,
  */
 static int sixaxis_set_operational_usb(struct hid_device *hdev)
 {
-       struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
-       struct usb_device *dev = interface_to_usbdev(intf);
-       __u16 ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
        int ret;
        char *buf = kmalloc(18, GFP_KERNEL);
 
        if (!buf)
                return -ENOMEM;
 
-       ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
-                                HID_REQ_GET_REPORT,
-                                USB_DIR_IN | USB_TYPE_CLASS |
-                                USB_RECIP_INTERFACE,
-                                (3 << 8) | 0xf2, ifnum, buf, 17,
-                                USB_CTRL_GET_TIMEOUT);
+       ret = hdev->hid_get_raw_report(hdev, 0xf2, buf, 17, HID_FEATURE_REPORT);
+
        if (ret < 0)
                hid_err(hdev, "can't set operational mode\n");
 
index abb20db2b443ccdcc34159a97fcc83307db65c40..1446f526ee8bbade2290b615fc14b6aae35dc09f 100644 (file)
@@ -834,7 +834,8 @@ static void wiimote_init_set_type(struct wiimote_data *wdata,
                goto done;
        }
 
-       if (vendor == USB_VENDOR_ID_NINTENDO) {
+       if (vendor == USB_VENDOR_ID_NINTENDO ||
+           vendor == USB_VENDOR_ID_NINTENDO2) {
                if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) {
                        devtype = WIIMOTE_DEV_GEN10;
                        goto done;
@@ -1855,6 +1856,8 @@ static void wiimote_hid_remove(struct hid_device *hdev)
 static const struct hid_device_id wiimote_hid_devices[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
                                USB_DEVICE_ID_NINTENDO_WIIMOTE) },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO2,
+                               USB_DEVICE_ID_NINTENDO_WIIMOTE) },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
                                USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
        { }
index 2e7d644dba18a6fc1169c7b83ee7eded30f6e09f..71adf9e60b13f4aafa22ba94183bb4dcb8a3a21d 100644 (file)
@@ -119,12 +119,22 @@ static const struct wiimod_ops wiimod_keys = {
  * the rumble motor, this flag shouldn't be set.
  */
 
+/* used by wiimod_rumble and wiipro_rumble */
+static void wiimod_rumble_worker(struct work_struct *work)
+{
+       struct wiimote_data *wdata = container_of(work, struct wiimote_data,
+                                                 rumble_worker);
+
+       spin_lock_irq(&wdata->state.lock);
+       wiiproto_req_rumble(wdata, wdata->state.cache_rumble);
+       spin_unlock_irq(&wdata->state.lock);
+}
+
 static int wiimod_rumble_play(struct input_dev *dev, void *data,
                              struct ff_effect *eff)
 {
        struct wiimote_data *wdata = input_get_drvdata(dev);
        __u8 value;
-       unsigned long flags;
 
        /*
         * The wiimote supports only a single rumble motor so if any magnitude
@@ -137,9 +147,10 @@ static int wiimod_rumble_play(struct input_dev *dev, void *data,
        else
                value = 0;
 
-       spin_lock_irqsave(&wdata->state.lock, flags);
-       wiiproto_req_rumble(wdata, value);
-       spin_unlock_irqrestore(&wdata->state.lock, flags);
+       /* Locking state.lock here might deadlock with input_event() calls.
+        * schedule_work acts as barrier. Merging multiple changes is fine. */
+       wdata->state.cache_rumble = value;
+       schedule_work(&wdata->rumble_worker);
 
        return 0;
 }
@@ -147,6 +158,8 @@ static int wiimod_rumble_play(struct input_dev *dev, void *data,
 static int wiimod_rumble_probe(const struct wiimod_ops *ops,
                               struct wiimote_data *wdata)
 {
+       INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker);
+
        set_bit(FF_RUMBLE, wdata->input->ffbit);
        if (input_ff_create_memless(wdata->input, NULL, wiimod_rumble_play))
                return -ENOMEM;
@@ -159,6 +172,8 @@ static void wiimod_rumble_remove(const struct wiimod_ops *ops,
 {
        unsigned long flags;
 
+       cancel_work_sync(&wdata->rumble_worker);
+
        spin_lock_irqsave(&wdata->state.lock, flags);
        wiiproto_req_rumble(wdata, 0);
        spin_unlock_irqrestore(&wdata->state.lock, flags);
@@ -1731,7 +1746,6 @@ static int wiimod_pro_play(struct input_dev *dev, void *data,
 {
        struct wiimote_data *wdata = input_get_drvdata(dev);
        __u8 value;
-       unsigned long flags;
 
        /*
         * The wiimote supports only a single rumble motor so if any magnitude
@@ -1744,9 +1758,10 @@ static int wiimod_pro_play(struct input_dev *dev, void *data,
        else
                value = 0;
 
-       spin_lock_irqsave(&wdata->state.lock, flags);
-       wiiproto_req_rumble(wdata, value);
-       spin_unlock_irqrestore(&wdata->state.lock, flags);
+       /* Locking state.lock here might deadlock with input_event() calls.
+        * schedule_work acts as barrier. Merging multiple changes is fine. */
+       wdata->state.cache_rumble = value;
+       schedule_work(&wdata->rumble_worker);
 
        return 0;
 }
@@ -1756,6 +1771,8 @@ static int wiimod_pro_probe(const struct wiimod_ops *ops,
 {
        int ret, i;
 
+       INIT_WORK(&wdata->rumble_worker, wiimod_rumble_worker);
+
        wdata->extension.input = input_allocate_device();
        if (!wdata->extension.input)
                return -ENOMEM;
@@ -1817,12 +1834,13 @@ static void wiimod_pro_remove(const struct wiimod_ops *ops,
        if (!wdata->extension.input)
                return;
 
+       input_unregister_device(wdata->extension.input);
+       wdata->extension.input = NULL;
+       cancel_work_sync(&wdata->rumble_worker);
+
        spin_lock_irqsave(&wdata->state.lock, flags);
        wiiproto_req_rumble(wdata, 0);
        spin_unlock_irqrestore(&wdata->state.lock, flags);
-
-       input_unregister_device(wdata->extension.input);
-       wdata->extension.input = NULL;
 }
 
 static const struct wiimod_ops wiimod_pro = {
index f1474f372c0bba1c56b3f7bd0c5eda82be29ef85..75db0c4000377f03bf262eb66a5492046aa012c8 100644 (file)
@@ -133,13 +133,15 @@ struct wiimote_state {
        __u8 *cmd_read_buf;
        __u8 cmd_read_size;
 
-       /* calibration data */
+       /* calibration/cache data */
        __u16 calib_bboard[4][3];
+       __u8 cache_rumble;
 };
 
 struct wiimote_data {
        struct hid_device *hdev;
        struct input_dev *input;
+       struct work_struct rumble_worker;
        struct led_classdev *leds[4];
        struct input_dev *accel;
        struct input_dev *ir;
index 8918dd12bb6915d08ab588e04b22b64a24d6e827..6a6dd5cd783343c0804f26311674458a19d6519b 100644 (file)
@@ -308,18 +308,25 @@ static int hidraw_fasync(int fd, struct file *file, int on)
 static void drop_ref(struct hidraw *hidraw, int exists_bit)
 {
        if (exists_bit) {
-               hid_hw_close(hidraw->hid);
                hidraw->exist = 0;
-               if (hidraw->open)
+               if (hidraw->open) {
+                       hid_hw_close(hidraw->hid);
                        wake_up_interruptible(&hidraw->wait);
+               }
        } else {
                --hidraw->open;
        }
-
-       if (!hidraw->open && !hidraw->exist) {
-               device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
-               hidraw_table[hidraw->minor] = NULL;
-               kfree(hidraw);
+       if (!hidraw->open) {
+               if (!hidraw->exist) {
+                       device_destroy(hidraw_class,
+                                       MKDEV(hidraw_major, hidraw->minor));
+                       hidraw_table[hidraw->minor] = NULL;
+                       kfree(hidraw);
+               } else {
+                       /* close device for last reader */
+                       hid_hw_power(hidraw->hid, PM_HINT_NORMAL);
+                       hid_hw_close(hidraw->hid);
+               }
        }
 }
 
index c1336193b04ba3deb92c3edf2468551f3f6933a9..fd7ce374f812ead7960b72a5d00ac3b5908ab221 100644 (file)
@@ -854,10 +854,10 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
                0xF7, 0xF6, 0xDF, 0x3C, 0x67, 0x42, 0x55, 0x45,
                0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE,
        };
-       struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
-       union acpi_object params[4], *obj;
+       union acpi_object params[4];
        struct acpi_object_list input;
        struct acpi_device *adev;
+       unsigned long long value;
        acpi_handle handle;
 
        handle = ACPI_HANDLE(&client->dev);
@@ -878,22 +878,14 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
        params[3].package.count = 0;
        params[3].package.elements = NULL;
 
-       if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DSM", &input, &buf))) {
+       if (ACPI_FAILURE(acpi_evaluate_integer(handle, "_DSM", &input,
+                                                               &value))) {
                dev_err(&client->dev, "device _DSM execution failed\n");
                return -ENODEV;
        }
 
-       obj = (union acpi_object *)buf.pointer;
-       if (obj->type != ACPI_TYPE_INTEGER) {
-               dev_err(&client->dev, "device _DSM returned invalid type: %d\n",
-                       obj->type);
-               kfree(buf.pointer);
-               return -EINVAL;
-       }
-
-       pdata->hid_descriptor_address = obj->integer.value;
+       pdata->hid_descriptor_address = value;
 
-       kfree(buf.pointer);
        return 0;
 }
 
index 5bf2fb785844919bbc27ad20160b98a998102aa5..93b00d76374cee2b82be0a9deab2a0f8d7c6755b 100644 (file)
@@ -615,7 +615,7 @@ static const struct file_operations uhid_fops = {
 
 static struct miscdevice uhid_misc = {
        .fops           = &uhid_fops,
-       .minor          = MISC_DYNAMIC_MINOR,
+       .minor          = UHID_MINOR,
        .name           = UHID_NAME,
 };
 
@@ -634,4 +634,5 @@ module_exit(uhid_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
 MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
+MODULE_ALIAS_MISCDEV(UHID_MINOR);
 MODULE_ALIAS("devname:" UHID_NAME);
index 07345521f4210f4988627cff4421beedc96d1c06..3fca3be08337d76fdd8ecaec09c3d1693b0ac4ef 100644 (file)
@@ -110,6 +110,9 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_SIS, USB_DEVICE_ID_SIS_TS, HID_QUIRK_NO_INIT_REPORTS },
 
        { 0, 0 }
 };
index 2ebd6ce46108ec4e44797cb695194672ad81bdac..9c8a6bab822866ccab860a04fd276979bd69a115 100644 (file)
@@ -164,7 +164,7 @@ static const u8 abituguru_bank2_max_threshold = 50;
 static const int abituguru_pwm_settings_multiplier[5] = { 0, 1, 1, 1000, 1000 };
 /*
  * Min / Max allowed values for pwm_settings. Note: pwm1 (CPU fan) is a
- * special case the minium allowed pwm% setting for this is 30% (77) on
+ * special case the minimum allowed pwm% setting for this is 30% (77) on
  * some MB's this special case is handled in the code!
  */
 static const u8 abituguru_pwm_min[5] = { 0, 170, 170, 25, 25 };
@@ -517,7 +517,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
 
        ABIT_UGURU_DEBUG(2, "testing bank1 sensor %d\n", (int)sensor_addr);
        /*
-        * Volt sensor test, enable volt low alarm, set min value ridicously
+        * Volt sensor test, enable volt low alarm, set min value ridiculously
         * high, or vica versa if the reading is very high. If its a volt
         * sensor this should always give us an alarm.
         */
@@ -564,7 +564,7 @@ abituguru_detect_bank1_sensor_type(struct abituguru_data *data,
 
        /*
         * Temp sensor test, enable sensor as a temp sensor, set beep value
-        * ridicously low (but not too low, otherwise uguru ignores it).
+        * ridiculously low (but not too low, otherwise uguru ignores it).
         * If its a temp sensor this should always give us an alarm.
         */
        buf[0] = ABIT_UGURU_TEMP_HIGH_ALARM_ENABLE;
index 0cac8c0b001af817b1df36d4d6a780cb673dbc19..4ae74aa8cdc1cf519ad83396cadcfc1a6fe4ca7b 100644 (file)
@@ -176,7 +176,7 @@ struct abituguru3_data {
 
        /*
         * The abituguru3 supports up to 48 sensors, and thus has registers
-        * sets for 48 sensors, for convienence reasons / simplicity of the
+        * sets for 48 sensors, for convenience reasons / simplicity of the
         * code we always read and store all registers for all 48 sensors
         */
 
index a9e3d0152c0b5d23ae6d651c72c8782d60519987..8d40da314a8e5bf5b424a5039e4279ba819433d1 100644 (file)
@@ -381,8 +381,10 @@ static ssize_t show_str(struct device *dev,
                val = resource->oem_info;
                break;
        default:
-               BUG();
+               WARN(1, "Implementation error: unexpected attribute index %d\n",
+                    attr->index);
                val = "";
+               break;
        }
 
        return sprintf(buf, "%s\n", val);
@@ -436,7 +438,9 @@ static ssize_t show_val(struct device *dev,
                val = resource->trip[attr->index - 7] * 1000;
                break;
        default:
-               BUG();
+               WARN(1, "Implementation error: unexpected attribute index %d\n",
+                    attr->index);
+               break;
        }
 
        return sprintf(buf, "%llu\n", val);
@@ -855,7 +859,8 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event)
                dev_info(&device->dev, "Capping in progress.\n");
                break;
        default:
-               BUG();
+               WARN(1, "Unexpected event %d\n", event);
+               break;
        }
        mutex_unlock(&resource->lock);
 
@@ -991,7 +996,7 @@ static int __init acpi_power_meter_init(void)
 
        result = acpi_bus_register_driver(&acpi_power_meter_driver);
        if (result < 0)
-               return -ENODEV;
+               return result;
 
        return 0;
 }
index 751b1f0264a4dae9a76f49de823c807bcac022fe..04c08c2f79b8a3e16e2b537cd7a6427cb745e36d 100644 (file)
@@ -203,7 +203,6 @@ out_err:
        for (i--; i >= 0; i--)
                device_remove_file(&spi->dev, &ad_input[i].dev_attr);
 
-       spi_set_drvdata(spi, NULL);
        mutex_unlock(&adc->lock);
        return status;
 }
@@ -218,7 +217,6 @@ static int adcxx_remove(struct spi_device *spi)
        for (i = 0; i < 3 + adc->channels; i++)
                device_remove_file(&spi->dev, &ad_input[i].dev_attr);
 
-       spi_set_drvdata(spi, NULL);
        mutex_unlock(&adc->lock);
 
        return 0;
index 3a6d9ef1c16caa30612b15ca4fd698c10ba0a983..b3498acb9ab4823556edea591b1d8d3489667e1e 100644 (file)
@@ -616,7 +616,7 @@ static struct adm1026_data *adm1026_update_device(struct device *dev)
                data->gpio = gpio;
 
                data->last_reading = jiffies;
-       }; /* last_reading */
+       }       /* last_reading */
 
        if (!data->valid ||
            time_after(jiffies, data->last_config + ADM1026_CONFIG_INTERVAL)) {
@@ -700,7 +700,7 @@ static struct adm1026_data *adm1026_update_device(struct device *dev)
                }
 
                data->last_config = jiffies;
-       }; /* last_config */
+       }       /* last_config */
 
        data->valid = 1;
        mutex_unlock(&data->update_lock);
@@ -1791,7 +1791,7 @@ static int adm1026_detect(struct i2c_client *client,
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
                /* We need to be able to do byte I/O */
                return -ENODEV;
-       };
+       }
 
        /* Now, we do the remaining detection. */
 
index addb5a4d5064572212ef44df3534f30c90a9a499..562cc3881d3345465f0605c5f19c36d8b0b1f127 100644 (file)
@@ -700,7 +700,7 @@ static int find_trange_value(int trange)
                if (trange_values[i] == trange)
                        return i;
 
-       return -ENODEV;
+       return -EINVAL;
 }
 
 static struct adt7462_data *adt7462_update_device(struct device *dev)
@@ -1294,9 +1294,8 @@ static ssize_t set_pwm_tmax(struct device *dev,
        /* trange = tmax - tmin */
        tmin = (data->pwm_tmin[attr->index] - 64) * 1000;
        trange_value = find_trange_value(trange - tmin);
-
        if (trange_value < 0)
-               return -EINVAL;
+               return trange_value;
 
        temp = trange_value << ADT7462_PWM_RANGE_SHIFT;
        temp |= data->pwm_trange[attr->index] & ADT7462_PWM_HYST_MASK;
index 98814d12a6040e00a9a56d80686d9392c78b3765..3288f13d2d871679b5eac9d6dd019675ede3c0d5 100644 (file)
@@ -230,6 +230,7 @@ static int send_argument(const char *key)
 
 static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
 {
+       u8 status, data = 0;
        int i;
 
        if (send_command(cmd) || send_argument(key)) {
@@ -237,6 +238,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
                return -EIO;
        }
 
+       /* This has no effect on newer (2012) SMCs */
        if (send_byte(len, APPLESMC_DATA_PORT)) {
                pr_warn("%.4s: read len fail\n", key);
                return -EIO;
@@ -250,6 +252,17 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
                buffer[i] = inb(APPLESMC_DATA_PORT);
        }
 
+       /* Read the data port until bit0 is cleared */
+       for (i = 0; i < 16; i++) {
+               udelay(APPLESMC_MIN_WAIT);
+               status = inb(APPLESMC_CMD_PORT);
+               if (!(status & 0x01))
+                       break;
+               data = inb(APPLESMC_DATA_PORT);
+       }
+       if (i)
+               pr_warn("flushed %d bytes, last value is: %d\n", i, data);
+
        return 0;
 }
 
index 3ad9d849add2d95f96b263b590f41d70f6f78b84..8d9f2a0e8efea262aaef5311133af527f66a7315 100644 (file)
@@ -138,7 +138,7 @@ static inline u8 read_byte(struct i2c_client *client, u8 reg)
                dev_err(&client->dev,
                        "Unable to read from register 0x%02x.\n", reg);
                return 0;
-       };
+       }
        return res & 0xff;
 }
 
@@ -149,7 +149,7 @@ static inline int write_byte(struct i2c_client *client, u8 reg, u8 data)
                dev_err(&client->dev,
                        "Unable to write value 0x%02x to register 0x%02x.\n",
                        data, reg);
-       };
+       }
        return res;
 }
 
@@ -1030,7 +1030,7 @@ static struct asc7621_data *asc7621_update_device(struct device *dev)
                        }
                }
                data->last_high_reading = jiffies;
-       };                      /* last_reading */
+       }                       /* last_reading */
 
        /* Read all the low priority registers. */
 
@@ -1044,7 +1044,7 @@ static struct asc7621_data *asc7621_update_device(struct device *dev)
                        }
                }
                data->last_low_reading = jiffies;
-       };                      /* last_reading */
+       }                       /* last_reading */
 
        data->valid = 1;
 
@@ -1084,11 +1084,11 @@ static void asc7621_init_client(struct i2c_client *client)
                dev_err(&client->dev,
                        "Client (%d,0x%02x) config is locked.\n",
                        i2c_adapter_id(client->adapter), client->addr);
-       };
+       }
        if (!(value & 0x04)) {
                dev_err(&client->dev, "Client (%d,0x%02x) is not ready.\n",
                        i2c_adapter_id(client->adapter), client->addr);
-       };
+       }
 
 /*
  * Start monitoring
index b25c64302cbc1911f5d272e50dc05f8477b326c9..1d7ff46812c3dc9d72abb977b02cbf4db04fbd2c 100644 (file)
@@ -119,7 +119,7 @@ struct atk_data {
        acpi_handle rtmp_handle;
        acpi_handle rvlt_handle;
        acpi_handle rfan_handle;
-       /* new inteface */
+       /* new interface */
        acpi_handle enumerate_handle;
        acpi_handle read_handle;
        acpi_handle write_handle;
index aecb9ea7beb5eee65b24013de3ad0deece59c167..ddff02e3e66f9ad464b13261eaf0a7d0b76b9218 100644 (file)
@@ -147,10 +147,9 @@ static ssize_t atxp1_storevcore(struct device *dev,
 
        /* Calculate VID */
        vid = vid_to_reg(vcore, data->vrm);
-
        if (vid < 0) {
                dev_err(dev, "VID calculation failed.\n");
-               return -1;
+               return vid;
        }
 
        /*
index a26ba7a17c2be824797c728bc415cc2e3fd49bdb..872d76744e30d1a7d603cc6ca332ab2aa76a965f 100644 (file)
@@ -120,7 +120,7 @@ static const u8 DS1621_REG_TEMP[3] = {
 
 /* Each client has this additional data */
 struct ds1621_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
        struct mutex update_lock;
        char valid;                     /* !=0 if following fields are valid */
        unsigned long last_updated;     /* In jiffies */
@@ -151,10 +151,10 @@ static inline u16 DS1621_TEMP_TO_REG(long temp, u8 zbits)
        return temp;
 }
 
-static void ds1621_init_client(struct i2c_client *client)
+static void ds1621_init_client(struct ds1621_data *data,
+                              struct i2c_client *client)
 {
        u8 conf, new_conf, sreg, resol;
-       struct ds1621_data *data = i2c_get_clientdata(client);
 
        new_conf = conf = i2c_smbus_read_byte_data(client, DS1621_REG_CONF);
        /* switch to continuous conversion mode */
@@ -197,8 +197,8 @@ static void ds1621_init_client(struct i2c_client *client)
 
 static struct ds1621_data *ds1621_update_client(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ds1621_data *data = i2c_get_clientdata(client);
+       struct ds1621_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        u8 new_conf;
 
        mutex_lock(&data->update_lock);
@@ -247,8 +247,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
                        const char *buf, size_t count)
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ds1621_data *data = i2c_get_clientdata(client);
+       struct ds1621_data *data = dev_get_drvdata(dev);
        long val;
        int err;
 
@@ -258,7 +257,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
 
        mutex_lock(&data->update_lock);
        data->temp[attr->index] = DS1621_TEMP_TO_REG(val, data->zbits);
-       i2c_smbus_write_word_swapped(client, DS1621_REG_TEMP[attr->index],
+       i2c_smbus_write_word_swapped(data->client, DS1621_REG_TEMP[attr->index],
                                     data->temp[attr->index]);
        mutex_unlock(&data->update_lock);
        return count;
@@ -282,16 +281,15 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute *da,
 static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
                          char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ds1621_data *data = i2c_get_clientdata(client);
+       struct ds1621_data *data = dev_get_drvdata(dev);
        return scnprintf(buf, PAGE_SIZE, "%hu\n", data->update_interval);
 }
 
 static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
                            const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ds1621_data *data = i2c_get_clientdata(client);
+       struct ds1621_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        unsigned long convrate;
        s32 err;
        int resol = 0;
@@ -343,8 +341,7 @@ static umode_t ds1621_attribute_visible(struct kobject *kobj,
                                        struct attribute *attr, int index)
 {
        struct device *dev = container_of(kobj, struct device, kobj);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ds1621_data *data = i2c_get_clientdata(client);
+       struct ds1621_data *data = dev_get_drvdata(dev);
 
        if (attr == &dev_attr_update_interval.attr)
                if (data->kind == ds1621 || data->kind == ds1625)
@@ -357,52 +354,31 @@ static const struct attribute_group ds1621_group = {
        .attrs = ds1621_attributes,
        .is_visible = ds1621_attribute_visible
 };
+__ATTRIBUTE_GROUPS(ds1621);
 
 static int ds1621_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
        struct ds1621_data *data;
-       int err;
+       struct device *hwmon_dev;
 
        data = devm_kzalloc(&client->dev, sizeof(struct ds1621_data),
                            GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
-       i2c_set_clientdata(client, data);
        mutex_init(&data->update_lock);
 
        data->kind = id->driver_data;
+       data->client = client;
 
        /* Initialize the DS1621 chip */
-       ds1621_init_client(client);
-
-       /* Register sysfs hooks */
-       err = sysfs_create_group(&client->dev.kobj, &ds1621_group);
-       if (err)
-               return err;
-
-       data->hwmon_dev = hwmon_device_register(&client->dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               err = PTR_ERR(data->hwmon_dev);
-               goto exit_remove_files;
-       }
-
-       return 0;
-
- exit_remove_files:
-       sysfs_remove_group(&client->dev.kobj, &ds1621_group);
-       return err;
-}
-
-static int ds1621_remove(struct i2c_client *client)
-{
-       struct ds1621_data *data = i2c_get_clientdata(client);
-
-       hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &ds1621_group);
+       ds1621_init_client(data, client);
 
-       return 0;
+       hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+                                                          client->name, data,
+                                                          ds1621_groups);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
 static const struct i2c_device_id ds1621_id[] = {
@@ -422,7 +398,6 @@ static struct i2c_driver ds1621_driver = {
                .name   = "ds1621",
        },
        .probe          = ds1621_probe,
-       .remove         = ds1621_remove,
        .id_table       = ds1621_id,
 };
 
index 142e1cb8dea7e6ed641243588d525d6405d2124d..90ec1173b8a125c629542e079cac66872ea60ec4 100644 (file)
@@ -21,7 +21,6 @@
  *
  * TODO
  *     -       cache alarm and critical limit registers
- *     -       add emc1404 support
  */
 
 #include <linux/module.h>
@@ -40,7 +39,8 @@
 #define THERMAL_REVISION_REG   0xff
 
 struct thermal_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
+       const struct attribute_group *groups[3];
        struct mutex mutex;
        /*
         * Cache the hyst value so we don't keep re-reading it. In theory
@@ -53,10 +53,11 @@ struct thermal_data {
 static ssize_t show_temp(struct device *dev,
                        struct device_attribute *attr, char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
        struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
-       int retval = i2c_smbus_read_byte_data(client, sda->index);
+       struct thermal_data *data = dev_get_drvdata(dev);
+       int retval;
 
+       retval = i2c_smbus_read_byte_data(data->client, sda->index);
        if (retval < 0)
                return retval;
        return sprintf(buf, "%d000\n", retval);
@@ -65,27 +66,27 @@ static ssize_t show_temp(struct device *dev,
 static ssize_t show_bit(struct device *dev,
                        struct device_attribute *attr, char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
        struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr);
-       int retval = i2c_smbus_read_byte_data(client, sda->nr);
+       struct thermal_data *data = dev_get_drvdata(dev);
+       int retval;
 
+       retval = i2c_smbus_read_byte_data(data->client, sda->nr);
        if (retval < 0)
                return retval;
-       retval &= sda->index;
-       return sprintf(buf, "%d\n", retval ? 1 : 0);
+       return sprintf(buf, "%d\n", !!(retval & sda->index));
 }
 
 static ssize_t store_temp(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
-       struct i2c_client *client = to_i2c_client(dev);
+       struct thermal_data *data = dev_get_drvdata(dev);
        unsigned long val;
        int retval;
 
        if (kstrtoul(buf, 10, &val))
                return -EINVAL;
-       retval = i2c_smbus_write_byte_data(client, sda->index,
+       retval = i2c_smbus_write_byte_data(data->client, sda->index,
                                        DIV_ROUND_CLOSEST(val, 1000));
        if (retval < 0)
                return retval;
@@ -95,9 +96,9 @@ static ssize_t store_temp(struct device *dev,
 static ssize_t store_bit(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct thermal_data *data = i2c_get_clientdata(client);
        struct sensor_device_attribute_2 *sda = to_sensor_dev_attr_2(attr);
+       struct thermal_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        unsigned long val;
        int retval;
 
@@ -124,9 +125,9 @@ fail:
 static ssize_t show_hyst(struct device *dev,
                        struct device_attribute *attr, char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct thermal_data *data = i2c_get_clientdata(client);
        struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+       struct thermal_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        int retval;
        int hyst;
 
@@ -147,9 +148,9 @@ static ssize_t show_hyst(struct device *dev,
 static ssize_t store_hyst(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct thermal_data *data = i2c_get_clientdata(client);
        struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
+       struct thermal_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        int retval;
        int hyst;
        unsigned long val;
@@ -232,10 +233,26 @@ static SENSOR_DEVICE_ATTR_2(temp3_crit_alarm, S_IRUGO,
 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO | S_IWUSR,
        show_hyst, store_hyst, 0x1A);
 
+static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO | S_IWUSR,
+       show_temp, store_temp, 0x2D);
+static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO | S_IWUSR,
+       show_temp, store_temp, 0x2C);
+static SENSOR_DEVICE_ATTR(temp4_crit, S_IRUGO | S_IWUSR,
+       show_temp, store_temp, 0x30);
+static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 0x2A);
+static SENSOR_DEVICE_ATTR_2(temp4_min_alarm, S_IRUGO,
+       show_bit, NULL, 0x36, 0x08);
+static SENSOR_DEVICE_ATTR_2(temp4_max_alarm, S_IRUGO,
+       show_bit, NULL, 0x35, 0x08);
+static SENSOR_DEVICE_ATTR_2(temp4_crit_alarm, S_IRUGO,
+       show_bit, NULL, 0x37, 0x08);
+static SENSOR_DEVICE_ATTR(temp4_crit_hyst, S_IRUGO | S_IWUSR,
+       show_hyst, store_hyst, 0x30);
+
 static SENSOR_DEVICE_ATTR_2(power_state, S_IRUGO | S_IWUSR,
        show_bit, store_bit, 0x03, 0x40);
 
-static struct attribute *mid_att_thermal[] = {
+static struct attribute *emc1403_attrs[] = {
        &sensor_dev_attr_temp1_min.dev_attr.attr,
        &sensor_dev_attr_temp1_max.dev_attr.attr,
        &sensor_dev_attr_temp1_crit.dev_attr.attr,
@@ -264,8 +281,24 @@ static struct attribute *mid_att_thermal[] = {
        NULL
 };
 
-static const struct attribute_group m_thermal_gr = {
-       .attrs = mid_att_thermal
+static const struct attribute_group emc1403_group = {
+       .attrs = emc1403_attrs,
+};
+
+static struct attribute *emc1404_attrs[] = {
+       &sensor_dev_attr_temp4_min.dev_attr.attr,
+       &sensor_dev_attr_temp4_max.dev_attr.attr,
+       &sensor_dev_attr_temp4_crit.dev_attr.attr,
+       &sensor_dev_attr_temp4_input.dev_attr.attr,
+       &sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp4_crit_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp4_crit_hyst.dev_attr.attr,
+       NULL
+};
+
+static const struct attribute_group emc1404_group = {
+       .attrs = emc1404_attrs,
 };
 
 static int emc1403_detect(struct i2c_client *client,
@@ -286,10 +319,12 @@ static int emc1403_detect(struct i2c_client *client,
        case 0x23:
                strlcpy(info->type, "emc1423", I2C_NAME_SIZE);
                break;
-       /*
-        * Note: 0x25 is the 1404 which is very similar and this
-        * driver could be extended
-        */
+       case 0x25:
+               strlcpy(info->type, "emc1404", I2C_NAME_SIZE);
+               break;
+       case 0x27:
+               strlcpy(info->type, "emc1424", I2C_NAME_SIZE);
+               break;
        default:
                return -ENODEV;
        }
@@ -304,43 +339,29 @@ static int emc1403_detect(struct i2c_client *client,
 static int emc1403_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
-       int res;
        struct thermal_data *data;
+       struct device *hwmon_dev;
 
        data = devm_kzalloc(&client->dev, sizeof(struct thermal_data),
                            GFP_KERNEL);
        if (data == NULL)
                return -ENOMEM;
 
-       i2c_set_clientdata(client, data);
+       data->client = client;
        mutex_init(&data->mutex);
        data->hyst_valid = jiffies - 1;         /* Expired */
 
-       res = sysfs_create_group(&client->dev.kobj, &m_thermal_gr);
-       if (res) {
-               dev_warn(&client->dev, "create group failed\n");
-               return res;
-       }
-       data->hwmon_dev = hwmon_device_register(&client->dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               res = PTR_ERR(data->hwmon_dev);
-               dev_warn(&client->dev, "register hwmon dev failed\n");
-               goto thermal_error;
-       }
-       dev_info(&client->dev, "EMC1403 Thermal chip found\n");
-       return 0;
-
-thermal_error:
-       sysfs_remove_group(&client->dev.kobj, &m_thermal_gr);
-       return res;
-}
+       data->groups[0] = &emc1403_group;
+       if (id->driver_data)
+               data->groups[1] = &emc1404_group;
 
-static int emc1403_remove(struct i2c_client *client)
-{
-       struct thermal_data *data = i2c_get_clientdata(client);
+       hwmon_dev = hwmon_device_register_with_groups(&client->dev,
+                                                     client->name, data,
+                                                     data->groups);
+       if (IS_ERR(hwmon_dev))
+               return PTR_ERR(hwmon_dev);
 
-       hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &m_thermal_gr);
+       dev_info(&client->dev, "%s Thermal chip found\n", id->name);
        return 0;
 }
 
@@ -350,7 +371,9 @@ static const unsigned short emc1403_address_list[] = {
 
 static const struct i2c_device_id emc1403_idtable[] = {
        { "emc1403", 0 },
+       { "emc1404", 1 },
        { "emc1423", 0 },
+       { "emc1424", 1 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, emc1403_idtable);
@@ -362,7 +385,6 @@ static struct i2c_driver sensor_emc1403 = {
        },
        .detect = emc1403_detect,
        .probe = emc1403_probe,
-       .remove = emc1403_remove,
        .id_table = emc1403_idtable,
        .address_list = emc1403_address_list,
 };
index 31b221eeee6ca7c4c789cc4aeaf88164136664fd..03d8592810bf04d4ebfe66cd86ea7e1b025185ac 100644 (file)
@@ -2420,7 +2420,6 @@ static int f71882fg_probe(struct platform_device *pdev)
 exit_unregister_sysfs:
        f71882fg_remove(pdev); /* Will unregister the sysfs files for us */
        return err; /* f71882fg_remove() also frees our data */
-       return err;
 }
 
 static int f71882fg_remove(struct platform_device *pdev)
index a837b94977f4e8e59d9f3478a96747fe56de803e..80c42bea90ed59a0a8c89d996e78613df33f8745 100644 (file)
@@ -275,7 +275,7 @@ static bool duty_mode_enabled(u8 pwm_enable)
        case 3: /* Manual, speed mode */
                return false;
        default:
-               BUG();
+               WARN(1, "Unexpected pwm_enable value %d\n", pwm_enable);
                return true;
        }
 }
@@ -291,7 +291,7 @@ static bool auto_mode_enabled(u8 pwm_enable)
        case 4: /* Auto, duty mode */
                return true;
        default:
-               BUG();
+               WARN(1, "Unexpected pwm_enable value %d\n", pwm_enable);
                return false;
        }
 }
index b7d6a5704eb2ff15887f77341e8d8b2e57cdb291..73181be5b30b806a875f497097f120795db5309c 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/hwmon.h>
 #include <linux/gpio.h>
 #include <linux/gpio-fan.h>
+#include <linux/of.h>
 #include <linux/of_platform.h>
 #include <linux/of_gpio.h>
 
@@ -169,7 +170,7 @@ static int get_fan_speed_index(struct gpio_fan_data *fan_data)
        dev_warn(&fan_data->pdev->dev,
                 "missing speed array entry for GPIO value 0x%x\n", ctrl_val);
 
-       return -EINVAL;
+       return -ENODEV;
 }
 
 static int rpm_to_speed_index(struct gpio_fan_data *fan_data, int rpm)
@@ -309,12 +310,6 @@ exit_unlock:
        return ret;
 }
 
-static ssize_t show_name(struct device *dev,
-                        struct device_attribute *attr, char *buf)
-{
-       return sprintf(buf, "gpio-fan\n");
-}
-
 static DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, show_pwm, set_pwm);
 static DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
                   show_pwm_enable, set_pwm_enable);
@@ -324,26 +319,23 @@ static DEVICE_ATTR(fan1_max, S_IRUGO, show_rpm_max, NULL);
 static DEVICE_ATTR(fan1_input, S_IRUGO, show_rpm, NULL);
 static DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, show_rpm, set_rpm);
 
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-
 static umode_t gpio_fan_is_visible(struct kobject *kobj,
                                   struct attribute *attr, int index)
 {
        struct device *dev = container_of(kobj, struct device, kobj);
        struct gpio_fan_data *data = dev_get_drvdata(dev);
 
-       if (index == 1 && !data->alarm)
+       if (index == 0 && !data->alarm)
                return 0;
-       if (index > 1 && !data->ctrl)
+       if (index > 0 && !data->ctrl)
                return 0;
 
        return attr->mode;
 }
 
 static struct attribute *gpio_fan_attributes[] = {
-       &dev_attr_name.attr,
-       &dev_attr_fan1_alarm.attr,              /* 1 */
-       &dev_attr_pwm1.attr,                    /* 2 */
+       &dev_attr_fan1_alarm.attr,              /* 0 */
+       &dev_attr_pwm1.attr,                    /* 1 */
        &dev_attr_pwm1_enable.attr,
        &dev_attr_pwm1_mode.attr,
        &dev_attr_fan1_input.attr,
@@ -358,6 +350,11 @@ static const struct attribute_group gpio_fan_group = {
        .is_visible = gpio_fan_is_visible,
 };
 
+static const struct attribute_group *gpio_fan_groups[] = {
+       &gpio_fan_group,
+       NULL
+};
+
 static int fan_ctrl_init(struct gpio_fan_data *fan_data,
                         struct gpio_fan_platform_data *pdata)
 {
@@ -384,7 +381,7 @@ static int fan_ctrl_init(struct gpio_fan_data *fan_data,
        fan_data->pwm_enable = true; /* Enable manual fan speed control. */
        fan_data->speed_index = get_fan_speed_index(fan_data);
        if (fan_data->speed_index < 0)
-               return -ENODEV;
+               return fan_data->speed_index;
 
        return 0;
 }
@@ -539,24 +536,16 @@ static int gpio_fan_probe(struct platform_device *pdev)
                        return err;
        }
 
-       err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_group);
-       if (err)
-               return err;
-
        /* Make this driver part of hwmon class. */
-       fan_data->hwmon_dev = hwmon_device_register(&pdev->dev);
-       if (IS_ERR(fan_data->hwmon_dev)) {
-               err = PTR_ERR(fan_data->hwmon_dev);
-               goto err_remove;
-       }
+       fan_data->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
+                                               "gpio-fan", fan_data,
+                                               gpio_fan_groups);
+       if (IS_ERR(fan_data->hwmon_dev))
+               return PTR_ERR(fan_data->hwmon_dev);
 
        dev_info(&pdev->dev, "GPIO fan initialized\n");
 
        return 0;
-
-err_remove:
-       sysfs_remove_group(&pdev->dev.kobj, &gpio_fan_group);
-       return err;
 }
 
 static int gpio_fan_remove(struct platform_device *pdev)
@@ -564,7 +553,6 @@ static int gpio_fan_remove(struct platform_device *pdev)
        struct gpio_fan_data *fan_data = platform_get_drvdata(pdev);
 
        hwmon_device_unregister(fan_data->hwmon_dev);
-       sysfs_remove_group(&pdev->dev.kobj, &gpio_fan_group);
 
        return 0;
 }
index 646314f7c8397e05cfb1c89da7786005e6e41b7a..e176a43af63d7700a3f92753732cc16aabd08aeb 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/device.h>
 #include <linux/err.h>
+#include <linux/slab.h>
 #include <linux/kdev_t.h>
 #include <linux/idr.h>
 #include <linux/hwmon.h>
 #define HWMON_ID_PREFIX "hwmon"
 #define HWMON_ID_FORMAT HWMON_ID_PREFIX "%d"
 
-static struct class *hwmon_class;
+struct hwmon_device {
+       const char *name;
+       struct device dev;
+};
+#define to_hwmon_device(d) container_of(d, struct hwmon_device, dev)
+
+static ssize_t
+show_name(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%s\n", to_hwmon_device(dev)->name);
+}
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
+
+static struct attribute *hwmon_dev_attrs[] = {
+       &dev_attr_name.attr,
+       NULL
+};
+
+static umode_t hwmon_dev_name_is_visible(struct kobject *kobj,
+                                        struct attribute *attr, int n)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+
+       if (to_hwmon_device(dev)->name == NULL)
+               return 0;
+
+       return attr->mode;
+}
+
+static struct attribute_group hwmon_dev_attr_group = {
+       .attrs          = hwmon_dev_attrs,
+       .is_visible     = hwmon_dev_name_is_visible,
+};
+
+static const struct attribute_group *hwmon_dev_attr_groups[] = {
+       &hwmon_dev_attr_group,
+       NULL
+};
+
+static void hwmon_dev_release(struct device *dev)
+{
+       kfree(to_hwmon_device(dev));
+}
+
+static struct class hwmon_class = {
+       .name = "hwmon",
+       .owner = THIS_MODULE,
+       .dev_groups = hwmon_dev_attr_groups,
+       .dev_release = hwmon_dev_release,
+};
 
 static DEFINE_IDA(hwmon_ida);
 
 /**
- * hwmon_device_register - register w/ hwmon
- * @dev: the device to register
+ * hwmon_device_register_with_groups - register w/ hwmon
+ * @dev: the parent device
+ * @name: hwmon name attribute
+ * @drvdata: driver data to attach to created device
+ * @groups: List of attribute groups to create
  *
  * hwmon_device_unregister() must be called when the device is no
  * longer needed.
  *
  * Returns the pointer to the new device.
  */
-struct device *hwmon_device_register(struct device *dev)
+struct device *
+hwmon_device_register_with_groups(struct device *dev, const char *name,
+                                 void *drvdata,
+                                 const struct attribute_group **groups)
 {
-       struct device *hwdev;
-       int id;
+       struct hwmon_device *hwdev;
+       int err, id;
 
        id = ida_simple_get(&hwmon_ida, 0, 0, GFP_KERNEL);
        if (id < 0)
                return ERR_PTR(id);
 
-       hwdev = device_create(hwmon_class, dev, MKDEV(0, 0), NULL,
-                             HWMON_ID_FORMAT, id);
+       hwdev = kzalloc(sizeof(*hwdev), GFP_KERNEL);
+       if (hwdev == NULL) {
+               err = -ENOMEM;
+               goto ida_remove;
+       }
 
-       if (IS_ERR(hwdev))
-               ida_simple_remove(&hwmon_ida, id);
+       hwdev->name = name;
+       hwdev->dev.class = &hwmon_class;
+       hwdev->dev.parent = dev;
+       hwdev->dev.groups = groups;
+       hwdev->dev.of_node = dev ? dev->of_node : NULL;
+       dev_set_drvdata(&hwdev->dev, drvdata);
+       dev_set_name(&hwdev->dev, HWMON_ID_FORMAT, id);
+       err = device_register(&hwdev->dev);
+       if (err)
+               goto free;
 
-       return hwdev;
+       return &hwdev->dev;
+
+free:
+       kfree(hwdev);
+ida_remove:
+       ida_simple_remove(&hwmon_ida, id);
+       return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups);
+
+/**
+ * hwmon_device_register - register w/ hwmon
+ * @dev: the device to register
+ *
+ * hwmon_device_unregister() must be called when the device is no
+ * longer needed.
+ *
+ * Returns the pointer to the new device.
+ */
+struct device *hwmon_device_register(struct device *dev)
+{
+       return hwmon_device_register_with_groups(dev, NULL, NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(hwmon_device_register);
 
@@ -75,6 +163,69 @@ void hwmon_device_unregister(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(hwmon_device_unregister);
 
+static void devm_hwmon_release(struct device *dev, void *res)
+{
+       struct device *hwdev = *(struct device **)res;
+
+       hwmon_device_unregister(hwdev);
+}
+
+/**
+ * devm_hwmon_device_register_with_groups - register w/ hwmon
+ * @dev: the parent device
+ * @name: hwmon name attribute
+ * @drvdata: driver data to attach to created device
+ * @groups: List of attribute groups to create
+ *
+ * Returns the pointer to the new device. The new device is automatically
+ * unregistered with the parent device.
+ */
+struct device *
+devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
+                                      void *drvdata,
+                                      const struct attribute_group **groups)
+{
+       struct device **ptr, *hwdev;
+
+       if (!dev)
+               return ERR_PTR(-EINVAL);
+
+       ptr = devres_alloc(devm_hwmon_release, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return ERR_PTR(-ENOMEM);
+
+       hwdev = hwmon_device_register_with_groups(dev, name, drvdata, groups);
+       if (IS_ERR(hwdev))
+               goto error;
+
+       *ptr = hwdev;
+       devres_add(dev, ptr);
+       return hwdev;
+
+error:
+       devres_free(ptr);
+       return hwdev;
+}
+EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_groups);
+
+static int devm_hwmon_match(struct device *dev, void *res, void *data)
+{
+       struct device **hwdev = res;
+
+       return *hwdev == data;
+}
+
+/**
+ * devm_hwmon_device_unregister - removes a previously registered hwmon device
+ *
+ * @dev: the parent device of the device to unregister
+ */
+void devm_hwmon_device_unregister(struct device *dev)
+{
+       WARN_ON(devres_release(dev, devm_hwmon_release, devm_hwmon_match, dev));
+}
+EXPORT_SYMBOL_GPL(devm_hwmon_device_unregister);
+
 static void __init hwmon_pci_quirks(void)
 {
 #if defined CONFIG_X86 && defined CONFIG_PCI
@@ -105,19 +256,21 @@ static void __init hwmon_pci_quirks(void)
 
 static int __init hwmon_init(void)
 {
+       int err;
+
        hwmon_pci_quirks();
 
-       hwmon_class = class_create(THIS_MODULE, "hwmon");
-       if (IS_ERR(hwmon_class)) {
-               pr_err("couldn't create sysfs class\n");
-               return PTR_ERR(hwmon_class);
+       err = class_register(&hwmon_class);
+       if (err) {
+               pr_err("couldn't register hwmon sysfs class\n");
+               return err;
        }
        return 0;
 }
 
 static void __exit hwmon_exit(void)
 {
-       class_destroy(hwmon_class);
+       class_unregister(&hwmon_class);
 }
 
 subsys_initcall(hwmon_init);
index c6fdd5bd395ecb0db5f72e36be4f9d9e33f3aeaf..5378fdefc1f76d7782d9073e5a98206d953b9e1d 100644 (file)
@@ -63,7 +63,7 @@
 #define INA209_SHUNT_DEFAULT           10000   /* uOhm */
 
 struct ina209_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
 
        struct mutex update_lock;
        bool valid;
@@ -78,8 +78,8 @@ struct ina209_data {
 
 static struct ina209_data *ina209_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ina209_data *data = i2c_get_clientdata(client);
+       struct ina209_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        struct ina209_data *ret = data;
        s32 val;
        int i;
@@ -234,7 +234,6 @@ static ssize_t ina209_set_interval(struct device *dev,
                                   struct device_attribute *da,
                                   const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
        struct ina209_data *data = ina209_update_device(dev);
        long val;
        u16 regval;
@@ -250,7 +249,8 @@ static ssize_t ina209_set_interval(struct device *dev,
        mutex_lock(&data->update_lock);
        regval = ina209_reg_from_interval(data->regs[INA209_CONFIGURATION],
                                          val);
-       i2c_smbus_write_word_swapped(client, INA209_CONFIGURATION, regval);
+       i2c_smbus_write_word_swapped(data->client, INA209_CONFIGURATION,
+                                    regval);
        data->regs[INA209_CONFIGURATION] = regval;
        data->update_interval = ina209_interval_from_reg(regval);
        mutex_unlock(&data->update_lock);
@@ -260,8 +260,7 @@ static ssize_t ina209_set_interval(struct device *dev,
 static ssize_t ina209_show_interval(struct device *dev,
                                    struct device_attribute *da, char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ina209_data *data = i2c_get_clientdata(client);
+       struct ina209_data *data = dev_get_drvdata(dev);
 
        return snprintf(buf, PAGE_SIZE, "%d\n", data->update_interval);
 }
@@ -285,9 +284,9 @@ static ssize_t ina209_reset_history(struct device *dev,
                                    const char *buf,
                                    size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ina209_data *data = i2c_get_clientdata(client);
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+       struct ina209_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        u32 mask = attr->index;
        long val;
        int i, ret;
@@ -312,7 +311,6 @@ static ssize_t ina209_set_value(struct device *dev,
                                const char *buf,
                                size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
        struct ina209_data *data = ina209_update_device(dev);
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
        int reg = attr->index;
@@ -332,7 +330,7 @@ static ssize_t ina209_set_value(struct device *dev,
                count = ret;
                goto abort;
        }
-       i2c_smbus_write_word_swapped(client, reg, ret);
+       i2c_smbus_write_word_swapped(data->client, reg, ret);
        data->regs[reg] = ret;
 abort:
        mutex_unlock(&data->update_lock);
@@ -457,7 +455,7 @@ static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR,
  * Finally, construct an array of pointers to members of the above objects,
  * as required for sysfs_create_group()
  */
-static struct attribute *ina209_attributes[] = {
+static struct attribute *ina209_attrs[] = {
        &sensor_dev_attr_in0_input.dev_attr.attr,
        &sensor_dev_attr_in0_input_highest.dev_attr.attr,
        &sensor_dev_attr_in0_input_lowest.dev_attr.attr,
@@ -498,10 +496,7 @@ static struct attribute *ina209_attributes[] = {
 
        NULL,
 };
-
-static const struct attribute_group ina209_group = {
-       .attrs = ina209_attributes,
-};
+ATTRIBUTE_GROUPS(ina209);
 
 static void ina209_restore_conf(struct i2c_client *client,
                                struct ina209_data *data)
@@ -565,6 +560,7 @@ static int ina209_probe(struct i2c_client *client,
 {
        struct i2c_adapter *adapter = client->adapter;
        struct ina209_data *data;
+       struct device *hwmon_dev;
        int ret;
 
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
@@ -575,27 +571,23 @@ static int ina209_probe(struct i2c_client *client,
                return -ENOMEM;
 
        i2c_set_clientdata(client, data);
+       data->client = client;
        mutex_init(&data->update_lock);
 
        ret = ina209_init_client(client, data);
        if (ret)
                return ret;
 
-       /* Register sysfs hooks */
-       ret = sysfs_create_group(&client->dev.kobj, &ina209_group);
-       if (ret)
+       hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+                                                          client->name,
+                                                          data, ina209_groups);
+       if (IS_ERR(hwmon_dev)) {
+               ret = PTR_ERR(hwmon_dev);
                goto out_restore_conf;
-
-       data->hwmon_dev = hwmon_device_register(&client->dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               ret = PTR_ERR(data->hwmon_dev);
-               goto out_hwmon_device_register;
        }
 
        return 0;
 
-out_hwmon_device_register:
-       sysfs_remove_group(&client->dev.kobj, &ina209_group);
 out_restore_conf:
        ina209_restore_conf(client, data);
        return ret;
@@ -605,8 +597,6 @@ static int ina209_remove(struct i2c_client *client)
 {
        struct ina209_data *data = i2c_get_clientdata(client);
 
-       hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &ina209_group);
        ina209_restore_conf(client, data);
 
        return 0;
index 70a39a8ac0160e520f6017225c71d1efedebca2c..93d26e8af3e2002b8838c08b13f06787e6021dde 100644 (file)
@@ -78,7 +78,7 @@ struct ina2xx_config {
 };
 
 struct ina2xx_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
        const struct ina2xx_config *config;
 
        struct mutex update_lock;
@@ -112,8 +112,8 @@ static const struct ina2xx_config ina2xx_config[] = {
 
 static struct ina2xx_data *ina2xx_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ina2xx_data *data = i2c_get_clientdata(client);
+       struct ina2xx_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        struct ina2xx_data *ret = data;
 
        mutex_lock(&data->update_lock);
@@ -203,41 +203,39 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
                          INA2XX_POWER);
 
 /* pointers to created device attributes */
-static struct attribute *ina2xx_attributes[] = {
+static struct attribute *ina2xx_attrs[] = {
        &sensor_dev_attr_in0_input.dev_attr.attr,
        &sensor_dev_attr_in1_input.dev_attr.attr,
        &sensor_dev_attr_curr1_input.dev_attr.attr,
        &sensor_dev_attr_power1_input.dev_attr.attr,
        NULL,
 };
-
-static const struct attribute_group ina2xx_group = {
-       .attrs = ina2xx_attributes,
-};
+ATTRIBUTE_GROUPS(ina2xx);
 
 static int ina2xx_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
        struct i2c_adapter *adapter = client->adapter;
-       struct ina2xx_data *data;
        struct ina2xx_platform_data *pdata;
-       int ret;
-       u32 val;
+       struct device *dev = &client->dev;
+       struct ina2xx_data *data;
+       struct device *hwmon_dev;
        long shunt = 10000; /* default shunt value 10mOhms */
+       u32 val;
 
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA))
                return -ENODEV;
 
-       data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
-       if (dev_get_platdata(&client->dev)) {
-               pdata = dev_get_platdata(&client->dev);
+       if (dev_get_platdata(dev)) {
+               pdata = dev_get_platdata(dev);
                shunt = pdata->shunt_uohms;
-       } else if (!of_property_read_u32(client->dev.of_node,
-                               "shunt-resistor", &val)) {
-                       shunt = val;
+       } else if (!of_property_read_u32(dev->of_node,
+                                        "shunt-resistor", &val)) {
+               shunt = val;
        }
 
        if (shunt <= 0)
@@ -255,37 +253,18 @@ static int ina2xx_probe(struct i2c_client *client,
        i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION,
                                     data->config->calibration_factor / shunt);
 
-       i2c_set_clientdata(client, data);
+       data->client = client;
        mutex_init(&data->update_lock);
 
-       ret = sysfs_create_group(&client->dev.kobj, &ina2xx_group);
-       if (ret)
-               return ret;
-
-       data->hwmon_dev = hwmon_device_register(&client->dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               ret = PTR_ERR(data->hwmon_dev);
-               goto out_err_hwmon;
-       }
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+                                                          data, ina2xx_groups);
+       if (IS_ERR(hwmon_dev))
+               return PTR_ERR(hwmon_dev);
 
-       dev_info(&client->dev, "power monitor %s (Rshunt = %li uOhm)\n",
+       dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n",
                 id->name, shunt);
 
        return 0;
-
-out_err_hwmon:
-       sysfs_remove_group(&client->dev.kobj, &ina2xx_group);
-       return ret;
-}
-
-static int ina2xx_remove(struct i2c_client *client)
-{
-       struct ina2xx_data *data = i2c_get_clientdata(client);
-
-       hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &ina2xx_group);
-
-       return 0;
 }
 
 static const struct i2c_device_id ina2xx_id[] = {
@@ -302,7 +281,6 @@ static struct i2c_driver ina2xx_driver = {
                .name   = "ina2xx",
        },
        .probe          = ina2xx_probe,
-       .remove         = ina2xx_remove,
        .id_table       = ina2xx_id,
 };
 
index 4a58f130fd4e622cf840969920f5dfd62a393a2b..6013611e4f219145ae9bb8da6a3a88b686801076 100644 (file)
@@ -163,7 +163,7 @@ static struct jc42_chips jc42_chips[] = {
 
 /* Each client has this additional data */
 struct jc42_data {
-       struct device   *hwmon_dev;
+       struct i2c_client *client;
        struct mutex    update_lock;    /* protect register access */
        bool            extended;       /* true if extended range supported */
        bool            valid;
@@ -193,21 +193,21 @@ MODULE_DEVICE_TABLE(i2c, jc42_id);
 
 static int jc42_suspend(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct jc42_data *data = i2c_get_clientdata(client);
+       struct jc42_data *data = dev_get_drvdata(dev);
 
        data->config |= JC42_CFG_SHUTDOWN;
-       i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, data->config);
+       i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG,
+                                    data->config);
        return 0;
 }
 
 static int jc42_resume(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct jc42_data *data = i2c_get_clientdata(client);
+       struct jc42_data *data = dev_get_drvdata(dev);
 
        data->config &= ~JC42_CFG_SHUTDOWN;
-       i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG, data->config);
+       i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG,
+                                    data->config);
        return 0;
 }
 
@@ -317,15 +317,14 @@ static ssize_t set_##value(struct device *dev,                            \
                           struct device_attribute *attr,               \
                           const char *buf, size_t count)               \
 {                                                                      \
-       struct i2c_client *client = to_i2c_client(dev);                 \
-       struct jc42_data *data = i2c_get_clientdata(client);            \
+       struct jc42_data *data = dev_get_drvdata(dev);                  \
        int err, ret = count;                                           \
        long val;                                                       \
-       if (kstrtol(buf, 10, &val) < 0)                         \
+       if (kstrtol(buf, 10, &val) < 0)                                 \
                return -EINVAL;                                         \
        mutex_lock(&data->update_lock);                                 \
        data->value = jc42_temp_to_reg(val, data->extended);            \
-       err = i2c_smbus_write_word_swapped(client, reg, data->value);   \
+       err = i2c_smbus_write_word_swapped(data->client, reg, data->value); \
        if (err < 0)                                                    \
                ret = err;                                              \
        mutex_unlock(&data->update_lock);                               \
@@ -344,8 +343,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
                                  struct device_attribute *attr,
                                  const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct jc42_data *data = i2c_get_clientdata(client);
+       struct jc42_data *data = dev_get_drvdata(dev);
        unsigned long val;
        int diff, hyst;
        int err;
@@ -368,7 +366,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
        mutex_lock(&data->update_lock);
        data->config = (data->config & ~JC42_CFG_HYST_MASK)
          | (hyst << JC42_CFG_HYST_SHIFT);
-       err = i2c_smbus_write_word_swapped(client, JC42_REG_CONFIG,
+       err = i2c_smbus_write_word_swapped(data->client, JC42_REG_CONFIG,
                                           data->config);
        if (err < 0)
                ret = err;
@@ -430,8 +428,7 @@ static umode_t jc42_attribute_mode(struct kobject *kobj,
                                  struct attribute *attr, int index)
 {
        struct device *dev = container_of(kobj, struct device, kobj);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct jc42_data *data = i2c_get_clientdata(client);
+       struct jc42_data *data = dev_get_drvdata(dev);
        unsigned int config = data->config;
        bool readonly;
 
@@ -452,6 +449,7 @@ static const struct attribute_group jc42_group = {
        .attrs = jc42_attributes,
        .is_visible = jc42_attribute_mode,
 };
+__ATTRIBUTE_GROUPS(jc42);
 
 /* Return 0 if detection is successful, -ENODEV otherwise */
 static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info)
@@ -487,14 +485,16 @@ static int jc42_detect(struct i2c_client *client, struct i2c_board_info *info)
 
 static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
 {
-       struct jc42_data *data;
-       int config, cap, err;
        struct device *dev = &client->dev;
+       struct device *hwmon_dev;
+       struct jc42_data *data;
+       int config, cap;
 
        data = devm_kzalloc(dev, sizeof(struct jc42_data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
+       data->client = client;
        i2c_set_clientdata(client, data);
        mutex_init(&data->update_lock);
 
@@ -515,29 +515,15 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
        }
        data->config = config;
 
-       /* Register sysfs hooks */
-       err = sysfs_create_group(&dev->kobj, &jc42_group);
-       if (err)
-               return err;
-
-       data->hwmon_dev = hwmon_device_register(dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               err = PTR_ERR(data->hwmon_dev);
-               goto exit_remove;
-       }
-
-       return 0;
-
-exit_remove:
-       sysfs_remove_group(&dev->kobj, &jc42_group);
-       return err;
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+                                                          data,
+                                                          jc42_groups);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
 static int jc42_remove(struct i2c_client *client)
 {
        struct jc42_data *data = i2c_get_clientdata(client);
-       hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &jc42_group);
 
        /* Restore original configuration except hysteresis */
        if ((data->config & ~JC42_CFG_HYST_MASK) !=
@@ -553,8 +539,8 @@ static int jc42_remove(struct i2c_client *client)
 
 static struct jc42_data *jc42_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct jc42_data *data = i2c_get_clientdata(client);
+       struct jc42_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        struct jc42_data *ret = data;
        int val;
 
index 016efa26ba7ca48e80733d949745010190416951..505a59e100b0b9a80b099aa8a9abd43fa5ec2220 100644 (file)
@@ -174,7 +174,6 @@ out_dev_reg_failed:
 out_dev_create_file_failed:
        device_remove_file(&spi->dev, &dev_attr_temp1_input);
 out_dev_create_temp_file_failed:
-       spi_set_drvdata(spi, NULL);
        return status;
 }
 
@@ -185,7 +184,6 @@ static int lm70_remove(struct spi_device *spi)
        hwmon_device_unregister(p_lm70->hwmon_dev);
        device_remove_file(&spi->dev, &dev_attr_temp1_input);
        device_remove_file(&spi->dev, &dev_attr_name);
-       spi_set_drvdata(spi, NULL);
 
        return 0;
 }
index 9bde9644b102d9bcdade74ce20db444b832f9270..9653bb870a478f58e85443fb0bec69cd35f4a559 100644 (file)
@@ -55,7 +55,7 @@ static const unsigned short lm73_convrates[] = {
 };
 
 struct lm73_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
        struct mutex lock;
        u8 ctrl;                        /* control register value */
 };
@@ -66,7 +66,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
                        const char *buf, size_t count)
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
-       struct i2c_client *client = to_i2c_client(dev);
+       struct lm73_data *data = dev_get_drvdata(dev);
        long temp;
        short value;
        s32 err;
@@ -77,7 +77,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
 
        /* Write value */
        value = clamp_val(temp / 250, LM73_TEMP_MIN, LM73_TEMP_MAX) << 5;
-       err = i2c_smbus_write_word_swapped(client, attr->index, value);
+       err = i2c_smbus_write_word_swapped(data->client, attr->index, value);
        return (err < 0) ? err : count;
 }
 
@@ -85,10 +85,10 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
                         char *buf)
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
-       struct i2c_client *client = to_i2c_client(dev);
+       struct lm73_data *data = dev_get_drvdata(dev);
        int temp;
 
-       s32 err = i2c_smbus_read_word_swapped(client, attr->index);
+       s32 err = i2c_smbus_read_word_swapped(data->client, attr->index);
        if (err < 0)
                return err;
 
@@ -101,8 +101,7 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
 static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
                            const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm73_data *data = i2c_get_clientdata(client);
+       struct lm73_data *data = dev_get_drvdata(dev);
        unsigned long convrate;
        s32 err;
        int res = 0;
@@ -124,7 +123,8 @@ static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
        mutex_lock(&data->lock);
        data->ctrl &= LM73_CTRL_TO_MASK;
        data->ctrl |= res << LM73_CTRL_RES_SHIFT;
-       err = i2c_smbus_write_byte_data(client, LM73_REG_CTRL, data->ctrl);
+       err = i2c_smbus_write_byte_data(data->client, LM73_REG_CTRL,
+                                       data->ctrl);
        mutex_unlock(&data->lock);
 
        if (err < 0)
@@ -136,8 +136,7 @@ static ssize_t set_convrate(struct device *dev, struct device_attribute *da,
 static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
                             char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm73_data *data = i2c_get_clientdata(client);
+       struct lm73_data *data = dev_get_drvdata(dev);
        int res;
 
        res = (data->ctrl & LM73_CTRL_RES_MASK) >> LM73_CTRL_RES_SHIFT;
@@ -147,13 +146,12 @@ static ssize_t show_convrate(struct device *dev, struct device_attribute *da,
 static ssize_t show_maxmin_alarm(struct device *dev,
                                 struct device_attribute *da, char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
-       struct lm73_data *data = i2c_get_clientdata(client);
+       struct lm73_data *data = dev_get_drvdata(dev);
        s32 ctrl;
 
        mutex_lock(&data->lock);
-       ctrl = i2c_smbus_read_byte_data(client, LM73_REG_CTRL);
+       ctrl = i2c_smbus_read_byte_data(data->client, LM73_REG_CTRL);
        if (ctrl < 0)
                goto abort;
        data->ctrl = ctrl;
@@ -183,7 +181,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
 static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO,
                        show_maxmin_alarm, NULL, LM73_CTRL_LO_SHIFT);
 
-static struct attribute *lm73_attributes[] = {
+static struct attribute *lm73_attrs[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
        &sensor_dev_attr_temp1_max.dev_attr.attr,
        &sensor_dev_attr_temp1_min.dev_attr.attr,
@@ -192,10 +190,7 @@ static struct attribute *lm73_attributes[] = {
        &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
        NULL
 };
-
-static const struct attribute_group lm73_group = {
-       .attrs = lm73_attributes,
-};
+ATTRIBUTE_GROUPS(lm73);
 
 /*-----------------------------------------------------------------------*/
 
@@ -204,16 +199,16 @@ static const struct attribute_group lm73_group = {
 static int
 lm73_probe(struct i2c_client *client, const struct i2c_device_id *id)
 {
-       int status;
+       struct device *dev = &client->dev;
+       struct device *hwmon_dev;
        struct lm73_data *data;
        int ctrl;
 
-       data = devm_kzalloc(&client->dev, sizeof(struct lm73_data),
-                           GFP_KERNEL);
+       data = devm_kzalloc(dev, sizeof(struct lm73_data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
-       i2c_set_clientdata(client, data);
+       data->client = client;
        mutex_init(&data->lock);
 
        ctrl = i2c_smbus_read_byte_data(client, LM73_REG_CTRL);
@@ -221,33 +216,13 @@ lm73_probe(struct i2c_client *client, const struct i2c_device_id *id)
                return ctrl;
        data->ctrl = ctrl;
 
-       /* Register sysfs hooks */
-       status = sysfs_create_group(&client->dev.kobj, &lm73_group);
-       if (status)
-               return status;
-
-       data->hwmon_dev = hwmon_device_register(&client->dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               status = PTR_ERR(data->hwmon_dev);
-               goto exit_remove;
-       }
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+                                                          data, lm73_groups);
+       if (IS_ERR(hwmon_dev))
+               return PTR_ERR(hwmon_dev);
 
-       dev_info(&client->dev, "%s: sensor '%s'\n",
-                dev_name(data->hwmon_dev), client->name);
-
-       return 0;
-
-exit_remove:
-       sysfs_remove_group(&client->dev.kobj, &lm73_group);
-       return status;
-}
-
-static int lm73_remove(struct i2c_client *client)
-{
-       struct lm73_data *data = i2c_get_clientdata(client);
+       dev_info(dev, "sensor '%s'\n", client->name);
 
-       hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &lm73_group);
        return 0;
 }
 
@@ -300,7 +275,6 @@ static struct i2c_driver lm73_driver = {
                .name   = "lm73",
        },
        .probe          = lm73_probe,
-       .remove         = lm73_remove,
        .id_table       = lm73_ids,
        .detect         = lm73_detect,
        .address_list   = normal_i2c,
index 307c9eaeeb9f26eaa3e0e76e049b746afca832ad..411202bdaf6b6bf8c3eb0141ad7a62ecd7d4cfbe 100644 (file)
@@ -57,7 +57,7 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4d, 0x4e, I2C_CLIENT_END };
 
 /* Client data (each client gets its own) */
 struct lm95234_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
        struct mutex update_lock;
        unsigned long last_updated, interval;   /* in jiffies */
        bool valid;             /* false until following fields are valid */
@@ -114,9 +114,9 @@ static u16 update_intervals[] = { 143, 364, 1000, 2500 };
 
 /* Fill value cache. Must be called with update lock held. */
 
-static int lm95234_fill_cache(struct i2c_client *client)
+static int lm95234_fill_cache(struct lm95234_data *data,
+                             struct i2c_client *client)
 {
-       struct lm95234_data *data = i2c_get_clientdata(client);
        int i, ret;
 
        ret = i2c_smbus_read_byte_data(client, LM95234_REG_CONVRATE);
@@ -157,9 +157,9 @@ static int lm95234_fill_cache(struct i2c_client *client)
        return 0;
 }
 
-static int lm95234_update_device(struct i2c_client *client,
-                                struct lm95234_data *data)
+static int lm95234_update_device(struct lm95234_data *data)
 {
+       struct i2c_client *client = data->client;
        int ret;
 
        mutex_lock(&data->update_lock);
@@ -169,7 +169,7 @@ static int lm95234_update_device(struct i2c_client *client,
                int i;
 
                if (!data->valid) {
-                       ret = lm95234_fill_cache(client);
+                       ret = lm95234_fill_cache(data, client);
                        if (ret < 0)
                                goto abort;
                }
@@ -209,10 +209,9 @@ abort:
 static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        int index = to_sensor_dev_attr(attr)->index;
-       int ret = lm95234_update_device(client, data);
+       int ret = lm95234_update_device(data);
 
        if (ret)
                return ret;
@@ -224,10 +223,9 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
 static ssize_t show_alarm(struct device *dev,
                          struct device_attribute *attr, char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        u32 mask = to_sensor_dev_attr(attr)->index;
-       int ret = lm95234_update_device(client, data);
+       int ret = lm95234_update_device(data);
 
        if (ret)
                return ret;
@@ -238,10 +236,9 @@ static ssize_t show_alarm(struct device *dev,
 static ssize_t show_type(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        u8 mask = to_sensor_dev_attr(attr)->index;
-       int ret = lm95234_update_device(client, data);
+       int ret = lm95234_update_device(data);
 
        if (ret)
                return ret;
@@ -252,11 +249,10 @@ static ssize_t show_type(struct device *dev, struct device_attribute *attr,
 static ssize_t set_type(struct device *dev, struct device_attribute *attr,
                        const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        unsigned long val;
        u8 mask = to_sensor_dev_attr(attr)->index;
-       int ret = lm95234_update_device(client, data);
+       int ret = lm95234_update_device(data);
 
        if (ret)
                return ret;
@@ -274,7 +270,7 @@ static ssize_t set_type(struct device *dev, struct device_attribute *attr,
        else
                data->sensor_type &= ~mask;
        data->valid = false;
-       i2c_smbus_write_byte_data(client, LM95234_REG_REM_MODEL,
+       i2c_smbus_write_byte_data(data->client, LM95234_REG_REM_MODEL,
                                  data->sensor_type);
        mutex_unlock(&data->update_lock);
 
@@ -284,10 +280,9 @@ static ssize_t set_type(struct device *dev, struct device_attribute *attr,
 static ssize_t show_tcrit2(struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        int index = to_sensor_dev_attr(attr)->index;
-       int ret = lm95234_update_device(client, data);
+       int ret = lm95234_update_device(data);
 
        if (ret)
                return ret;
@@ -298,11 +293,10 @@ static ssize_t show_tcrit2(struct device *dev, struct device_attribute *attr,
 static ssize_t set_tcrit2(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        int index = to_sensor_dev_attr(attr)->index;
        long val;
-       int ret = lm95234_update_device(client, data);
+       int ret = lm95234_update_device(data);
 
        if (ret)
                return ret;
@@ -315,7 +309,7 @@ static ssize_t set_tcrit2(struct device *dev, struct device_attribute *attr,
 
        mutex_lock(&data->update_lock);
        data->tcrit2[index] = val;
-       i2c_smbus_write_byte_data(client, LM95234_REG_TCRIT2(index), val);
+       i2c_smbus_write_byte_data(data->client, LM95234_REG_TCRIT2(index), val);
        mutex_unlock(&data->update_lock);
 
        return count;
@@ -324,10 +318,9 @@ static ssize_t set_tcrit2(struct device *dev, struct device_attribute *attr,
 static ssize_t show_tcrit2_hyst(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        int index = to_sensor_dev_attr(attr)->index;
-       int ret = lm95234_update_device(client, data);
+       int ret = lm95234_update_device(data);
 
        if (ret)
                return ret;
@@ -340,8 +333,7 @@ static ssize_t show_tcrit2_hyst(struct device *dev,
 static ssize_t show_tcrit1(struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        int index = to_sensor_dev_attr(attr)->index;
 
        return sprintf(buf, "%u", data->tcrit1[index] * 1000);
@@ -350,11 +342,10 @@ static ssize_t show_tcrit1(struct device *dev, struct device_attribute *attr,
 static ssize_t set_tcrit1(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        int index = to_sensor_dev_attr(attr)->index;
+       int ret = lm95234_update_device(data);
        long val;
-       int ret = lm95234_update_device(client, data);
 
        if (ret)
                return ret;
@@ -367,7 +358,7 @@ static ssize_t set_tcrit1(struct device *dev, struct device_attribute *attr,
 
        mutex_lock(&data->update_lock);
        data->tcrit1[index] = val;
-       i2c_smbus_write_byte_data(client, LM95234_REG_TCRIT1(index), val);
+       i2c_smbus_write_byte_data(data->client, LM95234_REG_TCRIT1(index), val);
        mutex_unlock(&data->update_lock);
 
        return count;
@@ -376,10 +367,9 @@ static ssize_t set_tcrit1(struct device *dev, struct device_attribute *attr,
 static ssize_t show_tcrit1_hyst(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        int index = to_sensor_dev_attr(attr)->index;
-       int ret = lm95234_update_device(client, data);
+       int ret = lm95234_update_device(data);
 
        if (ret)
                return ret;
@@ -393,11 +383,10 @@ static ssize_t set_tcrit1_hyst(struct device *dev,
                               struct device_attribute *attr,
                               const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        int index = to_sensor_dev_attr(attr)->index;
+       int ret = lm95234_update_device(data);
        long val;
-       int ret = lm95234_update_device(client, data);
 
        if (ret)
                return ret;
@@ -411,7 +400,7 @@ static ssize_t set_tcrit1_hyst(struct device *dev,
 
        mutex_lock(&data->update_lock);
        data->thyst = val;
-       i2c_smbus_write_byte_data(client, LM95234_REG_TCRIT_HYST, val);
+       i2c_smbus_write_byte_data(data->client, LM95234_REG_TCRIT_HYST, val);
        mutex_unlock(&data->update_lock);
 
        return count;
@@ -420,10 +409,9 @@ static ssize_t set_tcrit1_hyst(struct device *dev,
 static ssize_t show_offset(struct device *dev, struct device_attribute *attr,
                           char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        int index = to_sensor_dev_attr(attr)->index;
-       int ret = lm95234_update_device(client, data);
+       int ret = lm95234_update_device(data);
 
        if (ret)
                return ret;
@@ -434,11 +422,10 @@ static ssize_t show_offset(struct device *dev, struct device_attribute *attr,
 static ssize_t set_offset(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
        int index = to_sensor_dev_attr(attr)->index;
+       int ret = lm95234_update_device(data);
        long val;
-       int ret = lm95234_update_device(client, data);
 
        if (ret)
                return ret;
@@ -452,7 +439,7 @@ static ssize_t set_offset(struct device *dev, struct device_attribute *attr,
 
        mutex_lock(&data->update_lock);
        data->toffset[index] = val;
-       i2c_smbus_write_byte_data(client, LM95234_REG_OFFSET(index), val);
+       i2c_smbus_write_byte_data(data->client, LM95234_REG_OFFSET(index), val);
        mutex_unlock(&data->update_lock);
 
        return count;
@@ -461,9 +448,8 @@ static ssize_t set_offset(struct device *dev, struct device_attribute *attr,
 static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
                             char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
-       int ret = lm95234_update_device(client, data);
+       struct lm95234_data *data = dev_get_drvdata(dev);
+       int ret = lm95234_update_device(data);
 
        if (ret)
                return ret;
@@ -475,11 +461,10 @@ static ssize_t show_interval(struct device *dev, struct device_attribute *attr,
 static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
                            const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct lm95234_data *data = i2c_get_clientdata(client);
+       struct lm95234_data *data = dev_get_drvdata(dev);
+       int ret = lm95234_update_device(data);
        unsigned long val;
        u8 regval;
-       int ret = lm95234_update_device(client, data);
 
        if (ret)
                return ret;
@@ -495,7 +480,7 @@ static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
 
        mutex_lock(&data->update_lock);
        data->interval = msecs_to_jiffies(update_intervals[regval]);
-       i2c_smbus_write_byte_data(client, LM95234_REG_CONVRATE, regval);
+       i2c_smbus_write_byte_data(data->client, LM95234_REG_CONVRATE, regval);
        mutex_unlock(&data->update_lock);
 
        return count;
@@ -579,7 +564,7 @@ static SENSOR_DEVICE_ATTR(temp5_offset, S_IWUSR | S_IRUGO, show_offset,
 static DEVICE_ATTR(update_interval, S_IWUSR | S_IRUGO, show_interval,
                   set_interval);
 
-static struct attribute *lm95234_attributes[] = {
+static struct attribute *lm95234_attrs[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
        &sensor_dev_attr_temp2_input.dev_attr.attr,
        &sensor_dev_attr_temp3_input.dev_attr.attr,
@@ -621,10 +606,7 @@ static struct attribute *lm95234_attributes[] = {
        &dev_attr_update_interval.attr,
        NULL
 };
-
-static const struct attribute_group lm95234_group = {
-       .attrs = lm95234_attributes,
-};
+ATTRIBUTE_GROUPS(lm95234);
 
 static int lm95234_detect(struct i2c_client *client,
                          struct i2c_board_info *info)
@@ -701,13 +683,14 @@ static int lm95234_probe(struct i2c_client *client,
 {
        struct device *dev = &client->dev;
        struct lm95234_data *data;
+       struct device *hwmon_dev;
        int err;
 
        data = devm_kzalloc(dev, sizeof(struct lm95234_data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
-       i2c_set_clientdata(client, data);
+       data->client = client;
        mutex_init(&data->update_lock);
 
        /* Initialize the LM95234 chip */
@@ -715,32 +698,10 @@ static int lm95234_probe(struct i2c_client *client,
        if (err < 0)
                return err;
 
-       /* Register sysfs hooks */
-       err = sysfs_create_group(&dev->kobj, &lm95234_group);
-       if (err)
-               return err;
-
-       data->hwmon_dev = hwmon_device_register(dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               err = PTR_ERR(data->hwmon_dev);
-               goto exit_remove_files;
-       }
-
-       return 0;
-
-exit_remove_files:
-       sysfs_remove_group(&dev->kobj, &lm95234_group);
-       return err;
-}
-
-static int lm95234_remove(struct i2c_client *client)
-{
-       struct lm95234_data *data = i2c_get_clientdata(client);
-
-       hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &lm95234_group);
-
-       return 0;
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+                                                          data,
+                                                          lm95234_groups);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
 /* Driver data (common to all clients) */
@@ -756,7 +717,6 @@ static struct i2c_driver lm95234_driver = {
                .name   = DRVNAME,
        },
        .probe          = lm95234_probe,
-       .remove         = lm95234_remove,
        .id_table       = lm95234_id,
        .detect         = lm95234_detect,
        .address_list   = normal_i2c,
index cdc1ecc6734d4122f9aab8736f5b4f37217aaf6b..d4172933ce4f2b67d8696b97527d25e7fd5fab48 100644 (file)
@@ -51,7 +51,9 @@ enum ltc4245_cmd {
 };
 
 struct ltc4245_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
+
+       const struct attribute_group *groups[3];
 
        struct mutex update_lock;
        bool valid;
@@ -77,8 +79,8 @@ struct ltc4245_data {
  */
 static void ltc4245_update_gpios(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ltc4245_data *data = i2c_get_clientdata(client);
+       struct ltc4245_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        u8 gpio_curr, gpio_next, gpio_reg;
        int i;
 
@@ -130,8 +132,8 @@ static void ltc4245_update_gpios(struct device *dev)
 
 static struct ltc4245_data *ltc4245_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ltc4245_data *data = i2c_get_clientdata(client);
+       struct ltc4245_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        s32 val;
        int i;
 
@@ -455,41 +457,14 @@ static const struct attribute_group ltc4245_gpio_group = {
        .attrs = ltc4245_gpio_attributes,
 };
 
-static int ltc4245_sysfs_create_groups(struct i2c_client *client)
+static void ltc4245_sysfs_add_groups(struct ltc4245_data *data)
 {
-       struct ltc4245_data *data = i2c_get_clientdata(client);
-       struct device *dev = &client->dev;
-       int ret;
-
-       /* register the standard sysfs attributes */
-       ret = sysfs_create_group(&dev->kobj, &ltc4245_std_group);
-       if (ret) {
-               dev_err(dev, "unable to register standard attributes\n");
-               return ret;
-       }
+       /* standard sysfs attributes */
+       data->groups[0] = &ltc4245_std_group;
 
        /* if we're using the extra gpio support, register it's attributes */
-       if (data->use_extra_gpios) {
-               ret = sysfs_create_group(&dev->kobj, &ltc4245_gpio_group);
-               if (ret) {
-                       dev_err(dev, "unable to register gpio attributes\n");
-                       sysfs_remove_group(&dev->kobj, &ltc4245_std_group);
-                       return ret;
-               }
-       }
-
-       return 0;
-}
-
-static void ltc4245_sysfs_remove_groups(struct i2c_client *client)
-{
-       struct ltc4245_data *data = i2c_get_clientdata(client);
-       struct device *dev = &client->dev;
-
        if (data->use_extra_gpios)
-               sysfs_remove_group(&dev->kobj, &ltc4245_gpio_group);
-
-       sysfs_remove_group(&dev->kobj, &ltc4245_std_group);
+               data->groups[1] = &ltc4245_gpio_group;
 }
 
 static bool ltc4245_use_extra_gpios(struct i2c_client *client)
@@ -517,7 +492,7 @@ static int ltc4245_probe(struct i2c_client *client,
 {
        struct i2c_adapter *adapter = client->adapter;
        struct ltc4245_data *data;
-       int ret;
+       struct device *hwmon_dev;
 
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
                return -ENODEV;
@@ -526,7 +501,7 @@ static int ltc4245_probe(struct i2c_client *client,
        if (!data)
                return -ENOMEM;
 
-       i2c_set_clientdata(client, data);
+       data->client = client;
        mutex_init(&data->update_lock);
        data->use_extra_gpios = ltc4245_use_extra_gpios(client);
 
@@ -534,30 +509,25 @@ static int ltc4245_probe(struct i2c_client *client,
        i2c_smbus_write_byte_data(client, LTC4245_FAULT1, 0x00);
        i2c_smbus_write_byte_data(client, LTC4245_FAULT2, 0x00);
 
-       /* Register sysfs hooks */
-       ret = ltc4245_sysfs_create_groups(client);
-       if (ret)
-               return ret;
+       /* Add sysfs hooks */
+       ltc4245_sysfs_add_groups(data);
 
-       data->hwmon_dev = hwmon_device_register(&client->dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               ret = PTR_ERR(data->hwmon_dev);
-               goto out_hwmon_device_register;
-       }
+       hwmon_dev = hwmon_device_register_with_groups(&client->dev,
+                                                     client->name, data,
+                                                     data->groups);
+       if (IS_ERR(hwmon_dev))
+               return PTR_ERR(hwmon_dev);
 
-       return 0;
+       i2c_set_clientdata(client, hwmon_dev);
 
-out_hwmon_device_register:
-       ltc4245_sysfs_remove_groups(client);
-       return ret;
+       return 0;
 }
 
 static int ltc4245_remove(struct i2c_client *client)
 {
-       struct ltc4245_data *data = i2c_get_clientdata(client);
+       struct device *hwmon_dev = i2c_get_clientdata(client);
 
-       hwmon_device_unregister(data->hwmon_dev);
-       ltc4245_sysfs_remove_groups(client);
+       hwmon_device_unregister(hwmon_dev);
 
        return 0;
 }
index 487da58ec86c2fbab1cceacbc9b8b0ffa799ea20..0becd69842bb7cb0586f169e1dcbf75a10620278 100644 (file)
@@ -55,7 +55,7 @@
 #define FAULT_OC       (1<<2)
 
 struct ltc4261_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
 
        struct mutex update_lock;
        bool valid;
@@ -67,8 +67,8 @@ struct ltc4261_data {
 
 static struct ltc4261_data *ltc4261_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct ltc4261_data *data = i2c_get_clientdata(client);
+       struct ltc4261_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        struct ltc4261_data *ret = data;
 
        mutex_lock(&data->update_lock);
@@ -150,7 +150,6 @@ static ssize_t ltc4261_show_bool(struct device *dev,
                                 struct device_attribute *da, char *buf)
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
-       struct i2c_client *client = to_i2c_client(dev);
        struct ltc4261_data *data = ltc4261_update_device(dev);
        u8 fault;
 
@@ -159,7 +158,7 @@ static ssize_t ltc4261_show_bool(struct device *dev,
 
        fault = data->regs[LTC4261_FAULT] & attr->index;
        if (fault)              /* Clear reported faults in chip register */
-               i2c_smbus_write_byte_data(client, LTC4261_FAULT, ~fault);
+               i2c_smbus_write_byte_data(data->client, LTC4261_FAULT, ~fault);
 
        return snprintf(buf, PAGE_SIZE, "%d\n", fault ? 1 : 0);
 }
@@ -197,7 +196,7 @@ static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc4261_show_value, NULL,
 static SENSOR_DEVICE_ATTR(curr1_max_alarm, S_IRUGO, ltc4261_show_bool, NULL,
                          FAULT_OC);
 
-static struct attribute *ltc4261_attributes[] = {
+static struct attribute *ltc4261_attrs[] = {
        &sensor_dev_attr_in1_input.dev_attr.attr,
        &sensor_dev_attr_in1_min_alarm.dev_attr.attr,
        &sensor_dev_attr_in1_max_alarm.dev_attr.attr,
@@ -210,62 +209,38 @@ static struct attribute *ltc4261_attributes[] = {
 
        NULL,
 };
-
-static const struct attribute_group ltc4261_group = {
-       .attrs = ltc4261_attributes,
-};
+ATTRIBUTE_GROUPS(ltc4261);
 
 static int ltc4261_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
        struct i2c_adapter *adapter = client->adapter;
+       struct device *dev = &client->dev;
        struct ltc4261_data *data;
-       int ret;
+       struct device *hwmon_dev;
 
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
                return -ENODEV;
 
        if (i2c_smbus_read_byte_data(client, LTC4261_STATUS) < 0) {
-               dev_err(&client->dev, "Failed to read status register\n");
+               dev_err(dev, "Failed to read status register\n");
                return -ENODEV;
        }
 
-       data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
-       i2c_set_clientdata(client, data);
+       data->client = client;
        mutex_init(&data->update_lock);
 
        /* Clear faults */
        i2c_smbus_write_byte_data(client, LTC4261_FAULT, 0x00);
 
-       /* Register sysfs hooks */
-       ret = sysfs_create_group(&client->dev.kobj, &ltc4261_group);
-       if (ret)
-               return ret;
-
-       data->hwmon_dev = hwmon_device_register(&client->dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               ret = PTR_ERR(data->hwmon_dev);
-               goto out_hwmon_device_register;
-       }
-
-       return 0;
-
-out_hwmon_device_register:
-       sysfs_remove_group(&client->dev.kobj, &ltc4261_group);
-       return ret;
-}
-
-static int ltc4261_remove(struct i2c_client *client)
-{
-       struct ltc4261_data *data = i2c_get_clientdata(client);
-
-       hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &ltc4261_group);
-
-       return 0;
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+                                                          data,
+                                                          ltc4261_groups);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
 static const struct i2c_device_id ltc4261_id[] = {
@@ -281,7 +256,6 @@ static struct i2c_driver ltc4261_driver = {
                   .name = "ltc4261",
                   },
        .probe = ltc4261_probe,
-       .remove = ltc4261_remove,
        .id_table = ltc4261_id,
 };
 
index 2fa2c02f5569c5af563ac5a90e865917e648fdf2..d4efc79d7b9371e69dc6e5c5972348020f19a1aa 100644 (file)
@@ -83,7 +83,8 @@ static const bool max16065_have_current[] = {
 
 struct max16065_data {
        enum chips type;
-       struct device *hwmon_dev;
+       struct i2c_client *client;
+       const struct attribute_group *groups[4];
        struct mutex update_lock;
        bool valid;
        unsigned long last_updated; /* in jiffies */
@@ -144,8 +145,8 @@ static int max16065_read_adc(struct i2c_client *client, int reg)
 
 static struct max16065_data *max16065_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct max16065_data *data = i2c_get_clientdata(client);
+       struct max16065_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
 
        mutex_lock(&data->update_lock);
        if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
@@ -186,7 +187,7 @@ static ssize_t max16065_show_alarm(struct device *dev,
 
        val &= (1 << attr2->index);
        if (val)
-               i2c_smbus_write_byte_data(to_i2c_client(dev),
+               i2c_smbus_write_byte_data(data->client,
                                          MAX16065_FAULT(attr2->nr), val);
 
        return snprintf(buf, PAGE_SIZE, "%d\n", !!val);
@@ -223,8 +224,7 @@ static ssize_t max16065_set_limit(struct device *dev,
                                  const char *buf, size_t count)
 {
        struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(da);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct max16065_data *data = i2c_get_clientdata(client);
+       struct max16065_data *data = dev_get_drvdata(dev);
        unsigned long val;
        int err;
        int limit;
@@ -238,7 +238,7 @@ static ssize_t max16065_set_limit(struct device *dev,
        mutex_lock(&data->update_lock);
        data->limit[attr2->nr][attr2->index]
          = LIMIT_TO_MV(limit, data->range[attr2->index]);
-       i2c_smbus_write_byte_data(client,
+       i2c_smbus_write_byte_data(data->client,
                                  MAX16065_LIMIT(attr2->nr, attr2->index),
                                  limit);
        mutex_unlock(&data->update_lock);
@@ -250,8 +250,7 @@ static ssize_t max16065_show_limit(struct device *dev,
                                   struct device_attribute *da, char *buf)
 {
        struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(da);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct max16065_data *data = i2c_get_clientdata(client);
+       struct max16065_data *data = dev_get_drvdata(dev);
 
        return snprintf(buf, PAGE_SIZE, "%d\n",
                        data->limit[attr2->nr][attr2->index]);
@@ -516,8 +515,32 @@ static struct attribute *max16065_max_attributes[] = {
        NULL
 };
 
+static umode_t max16065_basic_is_visible(struct kobject *kobj,
+                                        struct attribute *a, int n)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct max16065_data *data = dev_get_drvdata(dev);
+       int index = n / 4;
+
+       if (index >= data->num_adc || !data->range[index])
+               return 0;
+       return a->mode;
+}
+
+static umode_t max16065_secondary_is_visible(struct kobject *kobj,
+                                            struct attribute *a, int index)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct max16065_data *data = dev_get_drvdata(dev);
+
+       if (index >= data->num_adc)
+               return 0;
+       return a->mode;
+}
+
 static const struct attribute_group max16065_basic_group = {
        .attrs = max16065_basic_attributes,
+       .is_visible = max16065_basic_is_visible,
 };
 
 static const struct attribute_group max16065_current_group = {
@@ -526,38 +549,35 @@ static const struct attribute_group max16065_current_group = {
 
 static const struct attribute_group max16065_min_group = {
        .attrs = max16065_min_attributes,
+       .is_visible = max16065_secondary_is_visible,
 };
 
 static const struct attribute_group max16065_max_group = {
        .attrs = max16065_max_attributes,
+       .is_visible = max16065_secondary_is_visible,
 };
 
-static void max16065_cleanup(struct i2c_client *client)
-{
-       sysfs_remove_group(&client->dev.kobj, &max16065_max_group);
-       sysfs_remove_group(&client->dev.kobj, &max16065_min_group);
-       sysfs_remove_group(&client->dev.kobj, &max16065_current_group);
-       sysfs_remove_group(&client->dev.kobj, &max16065_basic_group);
-}
-
 static int max16065_probe(struct i2c_client *client,
                          const struct i2c_device_id *id)
 {
        struct i2c_adapter *adapter = client->adapter;
        struct max16065_data *data;
-       int i, j, val, ret;
+       struct device *dev = &client->dev;
+       struct device *hwmon_dev;
+       int i, j, val;
        bool have_secondary;            /* true if chip has secondary limits */
        bool secondary_is_max = false;  /* secondary limits reflect max */
+       int groups = 0;
 
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
                                     | I2C_FUNC_SMBUS_READ_WORD_DATA))
                return -ENODEV;
 
-       data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
        if (unlikely(!data))
                return -ENOMEM;
 
-       i2c_set_clientdata(client, data);
+       data->client = client;
        mutex_init(&data->update_lock);
 
        data->num_adc = max16065_num_adc[id->driver_data];
@@ -596,38 +616,16 @@ static int max16065_probe(struct i2c_client *client,
                }
        }
 
-       /* Register sysfs hooks */
-       for (i = 0; i < data->num_adc * 4; i++) {
-               /* Do not create sysfs entry if channel is disabled */
-               if (!data->range[i / 4])
-                       continue;
-
-               ret = sysfs_create_file(&client->dev.kobj,
-                                       max16065_basic_attributes[i]);
-               if (unlikely(ret))
-                       goto out;
-       }
-
-       if (have_secondary) {
-               struct attribute **attr = secondary_is_max ?
-                 max16065_max_attributes : max16065_min_attributes;
-
-               for (i = 0; i < data->num_adc; i++) {
-                       if (!data->range[i])
-                               continue;
-
-                       ret = sysfs_create_file(&client->dev.kobj, attr[i]);
-                       if (unlikely(ret))
-                               goto out;
-               }
-       }
+       /* sysfs hooks */
+       data->groups[groups++] = &max16065_basic_group;
+       if (have_secondary)
+               data->groups[groups++] = secondary_is_max ?
+                       &max16065_max_group : &max16065_min_group;
 
        if (data->have_current) {
                val = i2c_smbus_read_byte_data(client, MAX16065_CURR_CONTROL);
-               if (unlikely(val < 0)) {
-                       ret = val;
-                       goto out;
-               }
+               if (unlikely(val < 0))
+                       return val;
                if (val & MAX16065_CURR_ENABLE) {
                        /*
                         * Current gain is 6, 12, 24, 48 based on values in
@@ -636,33 +634,16 @@ static int max16065_probe(struct i2c_client *client,
                        data->curr_gain = 6 << ((val >> 2) & 0x03);
                        data->range[MAX16065_NUM_ADC]
                          = max16065_csp_adc_range[(val >> 1) & 0x01];
-                       ret = sysfs_create_group(&client->dev.kobj,
-                                                &max16065_current_group);
-                       if (unlikely(ret))
-                               goto out;
+                       data->groups[groups++] = &max16065_current_group;
                } else {
                        data->have_current = false;
                }
        }
 
-       data->hwmon_dev = hwmon_device_register(&client->dev);
-       if (unlikely(IS_ERR(data->hwmon_dev))) {
-               ret = PTR_ERR(data->hwmon_dev);
-               goto out;
-       }
-       return 0;
-
-out:
-       max16065_cleanup(client);
-       return ret;
-}
-
-static int max16065_remove(struct i2c_client *client)
-{
-       struct max16065_data *data = i2c_get_clientdata(client);
-
-       hwmon_device_unregister(data->hwmon_dev);
-       max16065_cleanup(client);
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+                                                          data, data->groups);
+       if (unlikely(IS_ERR(hwmon_dev)))
+               return PTR_ERR(hwmon_dev);
 
        return 0;
 }
@@ -685,7 +666,6 @@ static struct i2c_driver max16065_driver = {
                .name = "max16065",
        },
        .probe = max16065_probe,
-       .remove = max16065_remove,
        .id_table = max16065_id,
 };
 
index 57d58cd3220682030fcb3f0d32b62e4e77648c04..8326fbd601508d354f5a037aabdc5420b1018670 100644 (file)
@@ -87,7 +87,7 @@ static int temp_to_reg(int val)
  */
 
 struct max6642_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
        struct mutex update_lock;
        bool valid; /* zero until following fields are valid */
        unsigned long last_updated; /* in jiffies */
@@ -102,10 +102,10 @@ struct max6642_data {
  * Real code
  */
 
-static void max6642_init_client(struct i2c_client *client)
+static void max6642_init_client(struct max6642_data *data,
+                               struct i2c_client *client)
 {
        u8 config;
-       struct max6642_data *data = i2c_get_clientdata(client);
 
        /*
         * Start the conversions.
@@ -168,14 +168,14 @@ static int max6642_detect(struct i2c_client *client,
 
 static struct max6642_data *max6642_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct max6642_data *data = i2c_get_clientdata(client);
+       struct max6642_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        u16 val, tmp;
 
        mutex_lock(&data->update_lock);
 
        if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
-               dev_dbg(&client->dev, "Updating max6642 data.\n");
+               dev_dbg(dev, "Updating max6642 data.\n");
                val = i2c_smbus_read_byte_data(client,
                                        MAX6642_REG_R_LOCAL_TEMPL);
                tmp = (val >> 6) & 3;
@@ -209,8 +209,8 @@ static struct max6642_data *max6642_update_device(struct device *dev)
 static ssize_t show_temp_max10(struct device *dev,
                               struct device_attribute *dev_attr, char *buf)
 {
-       struct max6642_data *data = max6642_update_device(dev);
        struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr);
+       struct max6642_data *data = max6642_update_device(dev);
 
        return sprintf(buf, "%d\n",
                       temp_from_reg10(data->temp_input[attr->index]));
@@ -219,8 +219,8 @@ static ssize_t show_temp_max10(struct device *dev,
 static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
                             char *buf)
 {
-       struct max6642_data *data = max6642_update_device(dev);
        struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
+       struct max6642_data *data = max6642_update_device(dev);
 
        return sprintf(buf, "%d\n", temp_from_reg(data->temp_high[attr2->nr]));
 }
@@ -228,11 +228,10 @@ static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
 static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
                            const char *buf, size_t count)
 {
+       struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
+       struct max6642_data *data = dev_get_drvdata(dev);
        unsigned long val;
        int err;
-       struct i2c_client *client = to_i2c_client(dev);
-       struct max6642_data *data = i2c_get_clientdata(client);
-       struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
 
        err = kstrtoul(buf, 10, &val);
        if (err < 0)
@@ -240,7 +239,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
 
        mutex_lock(&data->update_lock);
        data->temp_high[attr2->nr] = clamp_val(temp_to_reg(val), 0, 255);
-       i2c_smbus_write_byte_data(client, attr2->index,
+       i2c_smbus_write_byte_data(data->client, attr2->index,
                                  data->temp_high[attr2->nr]);
        mutex_unlock(&data->update_lock);
        return count;
@@ -264,7 +263,7 @@ static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
 static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
 static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
 
-static struct attribute *max6642_attributes[] = {
+static struct attribute *max6642_attrs[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
        &sensor_dev_attr_temp2_input.dev_attr.attr,
        &sensor_dev_attr_temp1_max.dev_attr.attr,
@@ -275,54 +274,29 @@ static struct attribute *max6642_attributes[] = {
        &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
        NULL
 };
+ATTRIBUTE_GROUPS(max6642);
 
-static const struct attribute_group max6642_group = {
-       .attrs = max6642_attributes,
-};
-
-static int max6642_probe(struct i2c_client *new_client,
+static int max6642_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
+       struct device *dev = &client->dev;
        struct max6642_data *data;
-       int err;
+       struct device *hwmon_dev;
 
-       data = devm_kzalloc(&new_client->dev, sizeof(struct max6642_data),
-                           GFP_KERNEL);
+       data = devm_kzalloc(dev, sizeof(struct max6642_data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
-       i2c_set_clientdata(new_client, data);
+       data->client = client;
        mutex_init(&data->update_lock);
 
        /* Initialize the MAX6642 chip */
-       max6642_init_client(new_client);
+       max6642_init_client(data, client);
 
-       /* Register sysfs hooks */
-       err = sysfs_create_group(&new_client->dev.kobj, &max6642_group);
-       if (err)
-               return err;
-
-       data->hwmon_dev = hwmon_device_register(&new_client->dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               err = PTR_ERR(data->hwmon_dev);
-               goto exit_remove_files;
-       }
-
-       return 0;
-
-exit_remove_files:
-       sysfs_remove_group(&new_client->dev.kobj, &max6642_group);
-       return err;
-}
-
-static int max6642_remove(struct i2c_client *client)
-{
-       struct max6642_data *data = i2c_get_clientdata(client);
-
-       hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &max6642_group);
-
-       return 0;
+       hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+                                                          client->name, data,
+                                                          max6642_groups);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
 /*
@@ -341,7 +315,6 @@ static struct i2c_driver max6642_driver = {
                .name   = "max6642",
        },
        .probe          = max6642_probe,
-       .remove         = max6642_remove,
        .id_table       = max6642_id,
        .detect         = max6642_detect,
        .address_list   = normal_i2c,
index 3c16cbd4c00286d465bc2aed825330c0c89e9661..0cafc390db4dd5fc1413ffe0814d6a15567dd352 100644 (file)
@@ -660,7 +660,7 @@ static int max6650_init_client(struct i2c_client *client)
        /*
         * If mode is set to "full off", we change it to "open loop" and
         * set DAC to 255, which has the same effect. We do this because
-        * there's no "full off" mode defined in hwmon specifcations.
+        * there's no "full off" mode defined in hwmon specifications.
         */
 
        if ((config & MAX6650_CFG_MODE_MASK) == MAX6650_CFG_MODE_OFF) {
index a41b5f3fc5069596d7c9a0c0c164dcefefc7e621..7fd3eaf817f4bffe48267887032b1fa15c81847c 100644 (file)
@@ -77,7 +77,7 @@ struct max6697_chip_data {
 };
 
 struct max6697_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
 
        enum chips type;
        const struct max6697_chip_data *chip;
@@ -181,8 +181,8 @@ static const struct max6697_chip_data max6697_chip_data[] = {
 
 static struct max6697_data *max6697_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct max6697_data *data = i2c_get_clientdata(client);
+       struct max6697_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        struct max6697_data *ret = data;
        int val;
        int i;
@@ -303,8 +303,7 @@ static ssize_t set_temp(struct device *dev,
 {
        int nr = to_sensor_dev_attr_2(devattr)->nr;
        int index = to_sensor_dev_attr_2(devattr)->index;
-       struct i2c_client *client = to_i2c_client(dev);
-       struct max6697_data *data = i2c_get_clientdata(client);
+       struct max6697_data *data = dev_get_drvdata(dev);
        long temp;
        int ret;
 
@@ -316,7 +315,7 @@ static ssize_t set_temp(struct device *dev,
        temp = DIV_ROUND_CLOSEST(temp, 1000) + data->temp_offset;
        temp = clamp_val(temp, 0, data->type == max6581 ? 255 : 127);
        data->temp[nr][index] = temp;
-       ret = i2c_smbus_write_byte_data(client,
+       ret = i2c_smbus_write_byte_data(data->client,
                                        index == 2 ? MAX6697_REG_MAX[nr]
                                                   : MAX6697_REG_CRIT[nr],
                                        temp);
@@ -405,8 +404,7 @@ static umode_t max6697_is_visible(struct kobject *kobj, struct attribute *attr,
                                  int index)
 {
        struct device *dev = container_of(kobj, struct device, kobj);
-       struct i2c_client *client = to_i2c_client(dev);
-       struct max6697_data *data = i2c_get_clientdata(client);
+       struct max6697_data *data = dev_get_drvdata(dev);
        const struct max6697_chip_data *chip = data->chip;
        int channel = index / 6;        /* channel number */
        int nr = index % 6;             /* attribute index within channel */
@@ -489,6 +487,7 @@ static struct attribute *max6697_attributes[] = {
 static const struct attribute_group max6697_group = {
        .attrs = max6697_attributes, .is_visible = max6697_is_visible,
 };
+__ATTRIBUTE_GROUPS(max6697);
 
 static void max6697_get_config_of(struct device_node *node,
                                  struct max6697_platform_data *pdata)
@@ -525,9 +524,9 @@ static void max6697_get_config_of(struct device_node *node,
        }
 }
 
-static int max6697_init_chip(struct i2c_client *client)
+static int max6697_init_chip(struct max6697_data *data,
+                            struct i2c_client *client)
 {
-       struct max6697_data *data = i2c_get_clientdata(client);
        struct max6697_platform_data *pdata = dev_get_platdata(&client->dev);
        struct max6697_platform_data p;
        const struct max6697_chip_data *chip = data->chip;
@@ -625,6 +624,7 @@ static int max6697_probe(struct i2c_client *client,
        struct i2c_adapter *adapter = client->adapter;
        struct device *dev = &client->dev;
        struct max6697_data *data;
+       struct device *hwmon_dev;
        int err;
 
        if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -636,39 +636,17 @@ static int max6697_probe(struct i2c_client *client,
 
        data->type = id->driver_data;
        data->chip = &max6697_chip_data[data->type];
-
-       i2c_set_clientdata(client, data);
+       data->client = client;
        mutex_init(&data->update_lock);
 
-       err = max6697_init_chip(client);
-       if (err)
-               return err;
-
-       err = sysfs_create_group(&client->dev.kobj, &max6697_group);
+       err = max6697_init_chip(data, client);
        if (err)
                return err;
 
-       data->hwmon_dev = hwmon_device_register(dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               err = PTR_ERR(data->hwmon_dev);
-               goto error;
-       }
-
-       return 0;
-
-error:
-       sysfs_remove_group(&client->dev.kobj, &max6697_group);
-       return err;
-}
-
-static int max6697_remove(struct i2c_client *client)
-{
-       struct max6697_data *data = i2c_get_clientdata(client);
-
-       hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &max6697_group);
-
-       return 0;
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+                                                          data,
+                                                          max6697_groups);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
 static const struct i2c_device_id max6697_id[] = {
@@ -692,7 +670,6 @@ static struct i2c_driver max6697_driver = {
                .name   = "max6697",
        },
        .probe = max6697_probe,
-       .remove = max6697_remove,
        .id_table = max6697_id,
 };
 
index 982d8622c09b375f080bc2c81bfbacb71cf0896c..ae00e60d856c63c4a2a82a4cd6f5a4e994c31e64 100644 (file)
@@ -37,7 +37,7 @@
 struct mc13783_adc_priv {
        struct mc13xxx *mc13xxx;
        struct device *hwmon_dev;
-       char name[10];
+       char name[PLATFORM_NAME_SIZE];
 };
 
 static ssize_t mc13783_adc_show_name(struct device *dev, struct device_attribute
index 6eb03ce2cff4b46a910787f8a7954bf5d00c6f99..d17325db0ea3d9f2478fc2fd7c42641603797e92 100644 (file)
@@ -724,11 +724,8 @@ struct nct6775_data {
        enum kinds kind;
        const char *name;
 
-       struct device *hwmon_dev;
-       struct attribute_group *group_in;
-       struct attribute_group *group_fan;
-       struct attribute_group *group_temp;
-       struct attribute_group *group_pwm;
+       int num_attr_groups;
+       const struct attribute_group *groups[6];
 
        u16 reg_temp[5][NUM_TEMP]; /* 0=temp, 1=temp_over, 2=temp_hyst,
                                    * 3=temp_crit, 4=temp_lcrit
@@ -942,7 +939,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
        struct sensor_device_attribute_2 *a2;
        struct attribute **attrs;
        struct sensor_device_template **t;
-       int err, i, j, count;
+       int i, count;
 
        if (repeat <= 0)
                return ERR_PTR(-EINVAL);
@@ -973,7 +970,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
 
        for (i = 0; i < repeat; i++) {
                t = tg->templates;
-               for (j = 0; *t != NULL; j++) {
+               while (*t != NULL) {
                        snprintf(su->name, sizeof(su->name),
                                 (*t)->dev_attr.attr.name, tg->base + i);
                        if ((*t)->s2) {
@@ -1002,10 +999,6 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
                }
        }
 
-       err = sysfs_create_group(&dev->kobj, group);
-       if (err)
-               return ERR_PTR(-ENOMEM);
-
        return group;
 }
 
@@ -1457,7 +1450,8 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
                                          = nct6775_read_temp(data,
                                                data->reg_temp[j][i]);
                        }
-                       if (!(data->have_temp_fixed & (1 << i)))
+                       if (i >= NUM_TEMP_FIXED ||
+                           !(data->have_temp_fixed & (1 << i)))
                                continue;
                        data->temp_offset[i]
                          = nct6775_read_value(data, data->REG_TEMP_OFFSET[i]);
@@ -1545,7 +1539,7 @@ static int find_temp_source(struct nct6775_data *data, int index, int count)
                if (src == source)
                        return nr;
        }
-       return -1;
+       return -ENODEV;
 }
 
 static ssize_t
@@ -1644,7 +1638,7 @@ store_temp_beep(struct device *dev, struct device_attribute *attr,
 
        nr = find_temp_source(data, sattr->index, data->num_temp_beeps);
        if (nr < 0)
-               return -ENODEV;
+               return nr;
 
        bit = data->BEEP_BITS[nr + TEMP_ALARM_BASE];
        regindex = bit >> 3;
@@ -2725,16 +2719,6 @@ store_fan_time(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static ssize_t
-show_name(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct nct6775_data *data = dev_get_drvdata(dev);
-
-       return sprintf(buf, "%s\n", data->name);
-}
-
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-
 static ssize_t
 show_auto_pwm(struct device *dev, struct device_attribute *attr, char *buf)
 {
@@ -3061,16 +3045,16 @@ static umode_t nct6775_other_is_visible(struct kobject *kobj,
        struct device *dev = container_of(kobj, struct device, kobj);
        struct nct6775_data *data = dev_get_drvdata(dev);
 
-       if (index == 1 && !data->have_vid)
+       if (index == 0 && !data->have_vid)
                return 0;
 
-       if (index == 2 || index == 3) {
-               if (data->ALARM_BITS[INTRUSION_ALARM_BASE + index - 2] < 0)
+       if (index == 1 || index == 2) {
+               if (data->ALARM_BITS[INTRUSION_ALARM_BASE + index - 1] < 0)
                        return 0;
        }
 
-       if (index == 4 || index == 5) {
-               if (data->BEEP_BITS[INTRUSION_ALARM_BASE + index - 4] < 0)
+       if (index == 3 || index == 4) {
+               if (data->BEEP_BITS[INTRUSION_ALARM_BASE + index - 3] < 0)
                        return 0;
        }
 
@@ -3083,13 +3067,12 @@ static umode_t nct6775_other_is_visible(struct kobject *kobj,
  * Any change in order or content must be matched.
  */
 static struct attribute *nct6775_attributes_other[] = {
-       &dev_attr_name.attr,
-       &dev_attr_cpu0_vid.attr,                                /* 1 */
-       &sensor_dev_attr_intrusion0_alarm.dev_attr.attr,        /* 2 */
-       &sensor_dev_attr_intrusion1_alarm.dev_attr.attr,        /* 3 */
-       &sensor_dev_attr_intrusion0_beep.dev_attr.attr,         /* 4 */
-       &sensor_dev_attr_intrusion1_beep.dev_attr.attr,         /* 5 */
-       &sensor_dev_attr_beep_enable.dev_attr.attr,             /* 6 */
+       &dev_attr_cpu0_vid.attr,                                /* 0 */
+       &sensor_dev_attr_intrusion0_alarm.dev_attr.attr,        /* 1 */
+       &sensor_dev_attr_intrusion1_alarm.dev_attr.attr,        /* 2 */
+       &sensor_dev_attr_intrusion0_beep.dev_attr.attr,         /* 3 */
+       &sensor_dev_attr_intrusion1_beep.dev_attr.attr,         /* 4 */
+       &sensor_dev_attr_beep_enable.dev_attr.attr,             /* 5 */
 
        NULL
 };
@@ -3099,27 +3082,6 @@ static const struct attribute_group nct6775_group_other = {
        .is_visible = nct6775_other_is_visible,
 };
 
-/*
- * Driver and device management
- */
-
-static void nct6775_device_remove_files(struct device *dev)
-{
-       struct nct6775_data *data = dev_get_drvdata(dev);
-
-       if (data->group_pwm)
-               sysfs_remove_group(&dev->kobj, data->group_pwm);
-       if (data->group_in)
-               sysfs_remove_group(&dev->kobj, data->group_in);
-       if (data->group_fan)
-               sysfs_remove_group(&dev->kobj, data->group_fan);
-       if (data->group_temp)
-               sysfs_remove_group(&dev->kobj, data->group_temp);
-
-       sysfs_remove_group(&dev->kobj, &nct6775_group_other);
-}
-
-/* Get the monitoring functions started */
 static inline void nct6775_init_device(struct nct6775_data *data)
 {
        int i;
@@ -3296,6 +3258,7 @@ static int nct6775_probe(struct platform_device *pdev)
        int num_reg_temp;
        u8 cr2a;
        struct attribute_group *group;
+       struct device *hwmon_dev;
 
        res = platform_get_resource(pdev, IORESOURCE_IO, 0);
        if (!devm_request_region(&pdev->dev, res->start, IOREGION_LENGTH,
@@ -3870,61 +3833,36 @@ static int nct6775_probe(struct platform_device *pdev)
        /* Register sysfs hooks */
        group = nct6775_create_attr_group(dev, &nct6775_pwm_template_group,
                                          data->pwm_num);
-       if (IS_ERR(group)) {
-               err = PTR_ERR(group);
-               goto exit_remove;
-       }
-       data->group_pwm = group;
+       if (IS_ERR(group))
+               return PTR_ERR(group);
+
+       data->groups[data->num_attr_groups++] = group;
 
        group = nct6775_create_attr_group(dev, &nct6775_in_template_group,
                                          fls(data->have_in));
-       if (IS_ERR(group)) {
-               err = PTR_ERR(group);
-               goto exit_remove;
-       }
-       data->group_in = group;
+       if (IS_ERR(group))
+               return PTR_ERR(group);
+
+       data->groups[data->num_attr_groups++] = group;
 
        group = nct6775_create_attr_group(dev, &nct6775_fan_template_group,
                                          fls(data->has_fan));
-       if (IS_ERR(group)) {
-               err = PTR_ERR(group);
-               goto exit_remove;
-       }
-       data->group_fan = group;
+       if (IS_ERR(group))
+               return PTR_ERR(group);
+
+       data->groups[data->num_attr_groups++] = group;
 
        group = nct6775_create_attr_group(dev, &nct6775_temp_template_group,
                                          fls(data->have_temp));
-       if (IS_ERR(group)) {
-               err = PTR_ERR(group);
-               goto exit_remove;
-       }
-       data->group_temp = group;
-
-       err = sysfs_create_group(&dev->kobj, &nct6775_group_other);
-       if (err)
-               goto exit_remove;
+       if (IS_ERR(group))
+               return PTR_ERR(group);
 
-       data->hwmon_dev = hwmon_device_register(dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               err = PTR_ERR(data->hwmon_dev);
-               goto exit_remove;
-       }
-
-       return 0;
-
-exit_remove:
-       nct6775_device_remove_files(dev);
-       return err;
-}
-
-static int nct6775_remove(struct platform_device *pdev)
-{
-       struct nct6775_data *data = platform_get_drvdata(pdev);
+       data->groups[data->num_attr_groups++] = group;
+       data->groups[data->num_attr_groups++] = &nct6775_group_other;
 
-       hwmon_device_unregister(data->hwmon_dev);
-       nct6775_device_remove_files(&pdev->dev);
-
-       return 0;
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, data->name,
+                                                          data, data->groups);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
 #ifdef CONFIG_PM
@@ -4013,7 +3951,6 @@ static struct platform_driver nct6775_driver = {
                .pm     = NCT6775_DEV_PM_OPS,
        },
        .probe          = nct6775_probe,
-       .remove         = nct6775_remove,
 };
 
 static const char * const nct6775_sio_names[] __initconst = {
@@ -4101,7 +4038,7 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
 /*
  * when Super-I/O functions move to a separate file, the Super-I/O
  * bus will manage the lifetime of the device and this module will only keep
- * track of the nct6775 driver. But since we platform_device_alloc(), we
+ * track of the nct6775 driver. But since we use platform_device_alloc(), we
  * must keep track of the device
  */
 static struct platform_device *pdev[2];
index 6a9d6edaacb3947a6080162d2ce9b8ddc0c9d59c..a26b1d1d95146b5fd82a39b5b0ffe6f40910f4af 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Hardware monitoring driver for LM25056 / LM25066 / LM5064 / LM5066
+ * Hardware monitoring driver for LM25056 / LM25063 / LM25066 / LM5064 / LM5066
  *
  * Copyright (c) 2011 Ericsson AB.
  * Copyright (c) 2013 Guenter Roeck
@@ -27,7 +27,7 @@
 #include <linux/i2c.h>
 #include "pmbus.h"
 
-enum chips { lm25056, lm25066, lm5064, lm5066 };
+enum chips { lm25056, lm25063, lm25066, lm5064, lm5066 };
 
 #define LM25066_READ_VAUX              0xd0
 #define LM25066_MFR_READ_IIN           0xd1
@@ -52,6 +52,11 @@ enum chips { lm25056, lm25066, lm5064, lm5066 };
 #define LM25056_MFR_STS_VAUX_OV_WARN   (1 << 1)
 #define LM25056_MFR_STS_VAUX_UV_WARN   (1 << 0)
 
+/* LM25063 only */
+
+#define LM25063_READ_VOUT_MAX          0xe5
+#define LM25063_READ_VOUT_MIN          0xe6
+
 struct __coeff {
        short m, b, R;
 };
@@ -59,7 +64,7 @@ struct __coeff {
 #define PSC_CURRENT_IN_L       (PSC_NUM_CLASSES)
 #define PSC_POWER_L            (PSC_NUM_CLASSES + 1)
 
-static struct __coeff lm25066_coeff[4][PSC_NUM_CLASSES + 2] = {
+static struct __coeff lm25066_coeff[5][PSC_NUM_CLASSES + 2] = {
        [lm25056] = {
                [PSC_VOLTAGE_IN] = {
                        .m = 16296,
@@ -116,6 +121,36 @@ static struct __coeff lm25066_coeff[4][PSC_NUM_CLASSES + 2] = {
                        .m = 16,
                },
        },
+       [lm25063] = {
+               [PSC_VOLTAGE_IN] = {
+                       .m = 16000,
+                       .R = -2,
+               },
+               [PSC_VOLTAGE_OUT] = {
+                       .m = 16000,
+                       .R = -2,
+               },
+               [PSC_CURRENT_IN] = {
+                       .m = 10000,
+                       .R = -2,
+               },
+               [PSC_CURRENT_IN_L] = {
+                       .m = 10000,
+                       .R = -2,
+               },
+               [PSC_POWER] = {
+                       .m = 5000,
+                       .R = -3,
+               },
+               [PSC_POWER_L] = {
+                       .m = 5000,
+                       .R = -3,
+               },
+               [PSC_TEMPERATURE] = {
+                       .m = 15596,
+                       .R = -3,
+               },
+       },
        [lm5064] = {
                [PSC_VOLTAGE_IN] = {
                        .m = 4611,
@@ -178,6 +213,7 @@ static struct __coeff lm25066_coeff[4][PSC_NUM_CLASSES + 2] = {
 
 struct lm25066_data {
        int id;
+       u16 rlimit;                     /* Maximum register value */
        struct pmbus_driver_info info;
 };
 
@@ -200,6 +236,10 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
                        /* VIN: 6.14 mV VAUX: 293 uV LSB */
                        ret = DIV_ROUND_CLOSEST(ret * 293, 6140);
                        break;
+               case lm25063:
+                       /* VIN: 6.25 mV VAUX: 200.0 uV LSB */
+                       ret = DIV_ROUND_CLOSEST(ret * 20, 625);
+                       break;
                case lm25066:
                        /* VIN: 4.54 mV VAUX: 283.2 uV LSB */
                        ret = DIV_ROUND_CLOSEST(ret * 2832, 45400);
@@ -253,6 +293,24 @@ static int lm25066_read_word_data(struct i2c_client *client, int page, int reg)
        return ret;
 }
 
+static int lm25063_read_word_data(struct i2c_client *client, int page, int reg)
+{
+       int ret;
+
+       switch (reg) {
+       case PMBUS_VIRT_READ_VOUT_MAX:
+               ret = pmbus_read_word_data(client, 0, LM25063_READ_VOUT_MAX);
+               break;
+       case PMBUS_VIRT_READ_VOUT_MIN:
+               ret = pmbus_read_word_data(client, 0, LM25063_READ_VOUT_MIN);
+               break;
+       default:
+               ret = lm25066_read_word_data(client, page, reg);
+               break;
+       }
+       return ret;
+}
+
 static int lm25056_read_word_data(struct i2c_client *client, int page, int reg)
 {
        int ret;
@@ -308,27 +366,34 @@ static int lm25056_read_byte_data(struct i2c_client *client, int page, int reg)
 static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
                                   u16 word)
 {
+       const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+       const struct lm25066_data *data = to_lm25066_data(info);
        int ret;
 
        switch (reg) {
+       case PMBUS_POUT_OP_FAULT_LIMIT:
+       case PMBUS_POUT_OP_WARN_LIMIT:
        case PMBUS_VOUT_UV_WARN_LIMIT:
        case PMBUS_OT_FAULT_LIMIT:
        case PMBUS_OT_WARN_LIMIT:
+       case PMBUS_IIN_OC_FAULT_LIMIT:
        case PMBUS_VIN_UV_WARN_LIMIT:
+       case PMBUS_VIN_UV_FAULT_LIMIT:
+       case PMBUS_VIN_OV_FAULT_LIMIT:
        case PMBUS_VIN_OV_WARN_LIMIT:
-               word = ((s16)word < 0) ? 0 : clamp_val(word, 0, 0x0fff);
+               word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
                ret = pmbus_write_word_data(client, 0, reg, word);
                pmbus_clear_cache(client);
                break;
        case PMBUS_IIN_OC_WARN_LIMIT:
-               word = ((s16)word < 0) ? 0 : clamp_val(word, 0, 0x0fff);
+               word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
                ret = pmbus_write_word_data(client, 0,
                                            LM25066_MFR_IIN_OC_WARN_LIMIT,
                                            word);
                pmbus_clear_cache(client);
                break;
        case PMBUS_PIN_OP_WARN_LIMIT:
-               word = ((s16)word < 0) ? 0 : clamp_val(word, 0, 0x0fff);
+               word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
                ret = pmbus_write_word_data(client, 0,
                                            LM25066_MFR_PIN_OP_WARN_LIMIT,
                                            word);
@@ -337,7 +402,7 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
        case PMBUS_VIRT_VMON_UV_WARN_LIMIT:
                /* Adjust from VIN coefficients (for LM25056) */
                word = DIV_ROUND_CLOSEST((int)word * 6140, 293);
-               word = ((s16)word < 0) ? 0 : clamp_val(word, 0, 0x0fff);
+               word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
                ret = pmbus_write_word_data(client, 0,
                                            LM25056_VAUX_UV_WARN_LIMIT, word);
                pmbus_clear_cache(client);
@@ -345,7 +410,7 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
        case PMBUS_VIRT_VMON_OV_WARN_LIMIT:
                /* Adjust from VIN coefficients (for LM25056) */
                word = DIV_ROUND_CLOSEST((int)word * 6140, 293);
-               word = ((s16)word < 0) ? 0 : clamp_val(word, 0, 0x0fff);
+               word = ((s16)word < 0) ? 0 : clamp_val(word, 0, data->rlimit);
                ret = pmbus_write_word_data(client, 0,
                                            LM25056_VAUX_OV_WARN_LIMIT, word);
                pmbus_clear_cache(client);
@@ -399,9 +464,16 @@ static int lm25066_probe(struct i2c_client *client,
                info->func[0] |= PMBUS_HAVE_STATUS_VMON;
                info->read_word_data = lm25056_read_word_data;
                info->read_byte_data = lm25056_read_byte_data;
+               data->rlimit = 0x0fff;
+       } else if (data->id == lm25063) {
+               info->func[0] |= PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+                 | PMBUS_HAVE_POUT;
+               info->read_word_data = lm25063_read_word_data;
+               data->rlimit = 0xffff;
        } else {
                info->func[0] |= PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT;
                info->read_word_data = lm25066_read_word_data;
+               data->rlimit = 0x0fff;
        }
        info->write_word_data = lm25066_write_word_data;
 
@@ -432,6 +504,7 @@ static int lm25066_probe(struct i2c_client *client,
 
 static const struct i2c_device_id lm25066_id[] = {
        {"lm25056", lm25056},
+       {"lm25063", lm25063},
        {"lm25066", lm25066},
        {"lm5064", lm5064},
        {"lm5066", lm5066},
@@ -453,5 +526,5 @@ static struct i2c_driver lm25066_driver = {
 module_i2c_driver(lm25066_driver);
 
 MODULE_AUTHOR("Guenter Roeck");
-MODULE_DESCRIPTION("PMBus driver for LM25056/LM25066/LM5064/LM5066");
+MODULE_DESCRIPTION("PMBus driver for LM25066 and compatible chips");
 MODULE_LICENSE("GPL");
index 586a89ef9e0f4f28a6264fd48e00273fad2a1c5a..de3c152a1d9a1f2ed011b679f04ed2f6de74308b 100644 (file)
@@ -1,5 +1,6 @@
 /*
- * Hardware monitoring driver for LTC2974, LTC2978, LTC3880, and LTC3883
+ * Hardware monitoring driver for LTC2974, LTC2977, LTC2978, LTC3880,
+ * and LTC3883
  *
  * Copyright (c) 2011 Ericsson AB.
  * Copyright (c) 2013 Guenter Roeck
@@ -27,7 +28,7 @@
 #include <linux/i2c.h>
 #include "pmbus.h"
 
-enum chips { ltc2974, ltc2978, ltc3880, ltc3883 };
+enum chips { ltc2974, ltc2977, ltc2978, ltc3880, ltc3883 };
 
 /* Common for all chips */
 #define LTC2978_MFR_VOUT_PEAK          0xdd
@@ -35,7 +36,7 @@ enum chips { ltc2974, ltc2978, ltc3880, ltc3883 };
 #define LTC2978_MFR_TEMPERATURE_PEAK   0xdf
 #define LTC2978_MFR_SPECIAL_ID         0xe7
 
-/* LTC2974 and LTC2978 */
+/* LTC2974, LCT2977, and LTC2978 */
 #define LTC2978_MFR_VOUT_MIN           0xfb
 #define LTC2978_MFR_VIN_MIN            0xfc
 #define LTC2978_MFR_TEMPERATURE_MIN    0xfd
@@ -53,8 +54,10 @@ enum chips { ltc2974, ltc2978, ltc3880, ltc3883 };
 #define LTC3883_MFR_IIN_PEAK           0xe1
 
 #define LTC2974_ID                     0x0212
+#define LTC2977_ID                     0x0130
 #define LTC2978_ID_REV1                        0x0121
 #define LTC2978_ID_REV2                        0x0122
+#define LTC2978A_ID                    0x0124
 #define LTC3880_ID                     0x4000
 #define LTC3880_ID_MASK                        0xff00
 #define LTC3883_ID                     0x4300
@@ -363,6 +366,7 @@ static int ltc2978_write_word_data(struct i2c_client *client, int page,
 
 static const struct i2c_device_id ltc2978_id[] = {
        {"ltc2974", ltc2974},
+       {"ltc2977", ltc2977},
        {"ltc2978", ltc2978},
        {"ltc3880", ltc3880},
        {"ltc3883", ltc3883},
@@ -392,7 +396,10 @@ static int ltc2978_probe(struct i2c_client *client,
 
        if (chip_id == LTC2974_ID) {
                data->id = ltc2974;
-       } else if (chip_id == LTC2978_ID_REV1 || chip_id == LTC2978_ID_REV2) {
+       } else if (chip_id == LTC2977_ID) {
+               data->id = ltc2977;
+       } else if (chip_id == LTC2978_ID_REV1 || chip_id == LTC2978_ID_REV2 ||
+                  chip_id == LTC2978A_ID) {
                data->id = ltc2978;
        } else if ((chip_id & LTC3880_ID_MASK) == LTC3880_ID) {
                data->id = ltc3880;
@@ -438,6 +445,7 @@ static int ltc2978_probe(struct i2c_client *client,
                          | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT;
                }
                break;
+       case ltc2977:
        case ltc2978:
                info->read_word_data = ltc2978_read_word_data;
                info->pages = LTC2978_NUM_PAGES;
index 9319fcf142d96656040eabcc0d411699f61f4b06..3cbf66e9d861968863e01062902bdd699ca648e9 100644 (file)
@@ -97,6 +97,7 @@ struct pmbus_data {
        int max_attributes;
        int num_attributes;
        struct attribute_group group;
+       const struct attribute_group *groups[2];
 
        struct pmbus_sensor *sensors;
 
@@ -156,7 +157,7 @@ EXPORT_SYMBOL_GPL(pmbus_write_byte);
 
 /*
  * _pmbus_write_byte() is similar to pmbus_write_byte(), but checks if
- * a device specific mapping funcion exists and calls it if necessary.
+ * a device specific mapping function exists and calls it if necessary.
  */
 static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value)
 {
@@ -348,7 +349,7 @@ static struct _pmbus_status {
 
 static struct pmbus_data *pmbus_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
+       struct i2c_client *client = to_i2c_client(dev->parent);
        struct pmbus_data *data = i2c_get_clientdata(client);
        const struct pmbus_driver_info *info = data->info;
        struct pmbus_sensor *sensor;
@@ -686,7 +687,7 @@ static int pmbus_get_boolean(struct pmbus_data *data, struct pmbus_boolean *b,
        if (!s1 && !s2) {
                ret = !!regval;
        } else if (!s1 || !s2) {
-               BUG();
+               WARN(1, "Bad boolean descriptor %p: s1=%p, s2=%p\n", b, s1, s2);
                return 0;
        } else {
                long v1, v2;
@@ -733,7 +734,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
                                struct device_attribute *devattr,
                                const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
+       struct i2c_client *client = to_i2c_client(dev->parent);
        struct pmbus_data *data = i2c_get_clientdata(client);
        struct pmbus_sensor *sensor = to_pmbus_sensor(devattr);
        ssize_t rv = count;
@@ -1768,22 +1769,16 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
                goto out_kfree;
        }
 
-       /* Register sysfs hooks */
-       ret = sysfs_create_group(&dev->kobj, &data->group);
-       if (ret) {
-               dev_err(dev, "Failed to create sysfs entries\n");
-               goto out_kfree;
-       }
-       data->hwmon_dev = hwmon_device_register(dev);
+       data->groups[0] = &data->group;
+       data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
+                                                           data, data->groups);
        if (IS_ERR(data->hwmon_dev)) {
                ret = PTR_ERR(data->hwmon_dev);
                dev_err(dev, "Failed to register hwmon device\n");
-               goto out_hwmon_device_register;
+               goto out_kfree;
        }
        return 0;
 
-out_hwmon_device_register:
-       sysfs_remove_group(&dev->kobj, &data->group);
 out_kfree:
        kfree(data->group.attrs);
        return ret;
@@ -1794,7 +1789,6 @@ int pmbus_do_remove(struct i2c_client *client)
 {
        struct pmbus_data *data = i2c_get_clientdata(client);
        hwmon_device_unregister(data->hwmon_dev);
-       sysfs_remove_group(&client->dev.kobj, &data->group);
        kfree(data->group.attrs);
        return 0;
 }
index dfe6d9527efb4b6489676468d495e56e7841b2a9..7fa6e7d0b9b6f93ea634954cd1d71d0a67b1694b 100644 (file)
@@ -155,7 +155,8 @@ MODULE_DEVICE_TABLE(i2c, tmp401_id);
  */
 
 struct tmp401_data {
-       struct device *hwmon_dev;
+       struct i2c_client *client;
+       const struct attribute_group *groups[3];
        struct mutex update_lock;
        char valid; /* zero until following fields are valid */
        unsigned long last_updated; /* in jiffies */
@@ -231,8 +232,8 @@ static int tmp401_update_device_reg16(struct i2c_client *client,
 
 static struct tmp401_data *tmp401_update_device(struct device *dev)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct tmp401_data *data = i2c_get_clientdata(client);
+       struct tmp401_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        struct tmp401_data *ret = data;
        int i, val;
        unsigned long next_update;
@@ -350,15 +351,12 @@ static ssize_t store_temp(struct device *dev, struct device_attribute *devattr,
 {
        int nr = to_sensor_dev_attr_2(devattr)->nr;
        int index = to_sensor_dev_attr_2(devattr)->index;
-       struct i2c_client *client = to_i2c_client(dev);
-       struct tmp401_data *data = tmp401_update_device(dev);
+       struct tmp401_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        long val;
        u16 reg;
        u8 regaddr;
 
-       if (IS_ERR(data))
-               return PTR_ERR(data);
-
        if (kstrtol(buf, 10, &val))
                return -EINVAL;
 
@@ -405,7 +403,7 @@ static ssize_t store_temp_crit_hyst(struct device *dev, struct device_attribute
        val = clamp_val(val, temp - 255000, temp);
        reg = ((temp - val) + 500) / 1000;
 
-       i2c_smbus_write_byte_data(to_i2c_client(dev), TMP401_TEMP_CRIT_HYST,
+       i2c_smbus_write_byte_data(data->client, TMP401_TEMP_CRIT_HYST,
                                  reg);
 
        data->temp_crit_hyst = reg;
@@ -423,8 +421,8 @@ static ssize_t store_temp_crit_hyst(struct device *dev, struct device_attribute
 static ssize_t reset_temp_history(struct device *dev,
        struct device_attribute *devattr, const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct tmp401_data *data = i2c_get_clientdata(client);
+       struct tmp401_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        long val;
 
        if (kstrtol(buf, 10, &val))
@@ -447,8 +445,7 @@ static ssize_t reset_temp_history(struct device *dev,
 static ssize_t show_update_interval(struct device *dev,
                                    struct device_attribute *attr, char *buf)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct tmp401_data *data = i2c_get_clientdata(client);
+       struct tmp401_data *data = dev_get_drvdata(dev);
 
        return sprintf(buf, "%u\n", data->update_interval);
 }
@@ -457,8 +454,8 @@ static ssize_t set_update_interval(struct device *dev,
                                   struct device_attribute *attr,
                                   const char *buf, size_t count)
 {
-       struct i2c_client *client = to_i2c_client(dev);
-       struct tmp401_data *data = i2c_get_clientdata(client);
+       struct tmp401_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
        unsigned long val;
        int err, rate;
 
@@ -616,10 +613,10 @@ static const struct attribute_group tmp432_group = {
  * Begin non sysfs callback code (aka Real code)
  */
 
-static void tmp401_init_client(struct i2c_client *client)
+static void tmp401_init_client(struct tmp401_data *data,
+                              struct i2c_client *client)
 {
        int config, config_orig;
-       struct tmp401_data *data = i2c_get_clientdata(client);
 
        /* Set the conversion rate to 2 Hz */
        i2c_smbus_write_byte_data(client, TMP401_CONVERSION_RATE_WRITE, 5);
@@ -705,77 +702,45 @@ static int tmp401_detect(struct i2c_client *client,
        return 0;
 }
 
-static int tmp401_remove(struct i2c_client *client)
-{
-       struct device *dev = &client->dev;
-       struct tmp401_data *data = i2c_get_clientdata(client);
-
-       if (data->hwmon_dev)
-               hwmon_device_unregister(data->hwmon_dev);
-
-       sysfs_remove_group(&dev->kobj, &tmp401_group);
-
-       if (data->kind == tmp411)
-               sysfs_remove_group(&dev->kobj, &tmp411_group);
-
-       if (data->kind == tmp432)
-               sysfs_remove_group(&dev->kobj, &tmp432_group);
-
-       return 0;
-}
-
 static int tmp401_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
+       const char *names[] = { "TMP401", "TMP411", "TMP431", "TMP432" };
        struct device *dev = &client->dev;
-       int err;
+       struct device *hwmon_dev;
        struct tmp401_data *data;
-       const char *names[] = { "TMP401", "TMP411", "TMP431", "TMP432" };
+       int groups = 0;
 
        data = devm_kzalloc(dev, sizeof(struct tmp401_data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
-       i2c_set_clientdata(client, data);
+       data->client = client;
        mutex_init(&data->update_lock);
        data->kind = id->driver_data;
 
        /* Initialize the TMP401 chip */
-       tmp401_init_client(client);
+       tmp401_init_client(data, client);
 
        /* Register sysfs hooks */
-       err = sysfs_create_group(&dev->kobj, &tmp401_group);
-       if (err)
-               return err;
+       data->groups[groups++] = &tmp401_group;
 
        /* Register additional tmp411 sysfs hooks */
-       if (data->kind == tmp411) {
-               err = sysfs_create_group(&dev->kobj, &tmp411_group);
-               if (err)
-                       goto exit_remove;
-       }
+       if (data->kind == tmp411)
+               data->groups[groups++] = &tmp411_group;
 
        /* Register additional tmp432 sysfs hooks */
-       if (data->kind == tmp432) {
-               err = sysfs_create_group(&dev->kobj, &tmp432_group);
-               if (err)
-                       goto exit_remove;
-       }
+       if (data->kind == tmp432)
+               data->groups[groups++] = &tmp432_group;
 
-       data->hwmon_dev = hwmon_device_register(dev);
-       if (IS_ERR(data->hwmon_dev)) {
-               err = PTR_ERR(data->hwmon_dev);
-               data->hwmon_dev = NULL;
-               goto exit_remove;
-       }
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+                                                          data, data->groups);
+       if (IS_ERR(hwmon_dev))
+               return PTR_ERR(hwmon_dev);
 
        dev_info(dev, "Detected TI %s chip\n", names[data->kind]);
 
        return 0;
-
-exit_remove:
-       tmp401_remove(client);
-       return err;
 }
 
 static struct i2c_driver tmp401_driver = {
@@ -784,7 +749,6 @@ static struct i2c_driver tmp401_driver = {
                .name   = "tmp401",
        },
        .probe          = tmp401_probe,
-       .remove         = tmp401_remove,
        .id_table       = tmp401_id,
        .detect         = tmp401_detect,
        .address_list   = normal_i2c,
index a3feee332e200bd1cd1fd38e91b32a2b9ba00e28..bdcf2dce5ec4088e8f37f4c4c62d81f6e469b708 100644 (file)
@@ -1043,7 +1043,7 @@ static struct sensor_device_attribute sda_temp_alarm[] = {
        SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13),
 };
 
-/* get reatime status of all sensors items: voltage, temp, fan */
+/* get realtime status of all sensors items: voltage, temp, fan */
 static ssize_t show_alarms_reg(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
index 5febb43cb4c1032de659cbf3ec3ffa652c8c832f..df585808adb65feb2f53c458799010f68ca4bcc2 100644 (file)
@@ -579,7 +579,7 @@ static ssize_t store_temp23(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-/* get reatime status of all sensors items: voltage, temp, fan */
+/* get realtime status of all sensors items: voltage, temp, fan */
 static ssize_t
 show_alarms_reg(struct device *dev, struct device_attribute *attr, char *buf)
 {
index b0c30a546ff2e3079510e4858c6e8e15899ba092..9d63d71214cade1cddad0b8bfbbbbae726c4dda5 100644 (file)
@@ -808,7 +808,7 @@ show_sf_ctrl(struct device *dev, struct device_attribute *attr, char *buf)
        if (nr == TEMP_FAN_MAP) {
                val = data->temp_fan_map[index];
        } else if (nr == TEMP_PWM_ENABLE) {
-               /* +2 to transfrom into 2 and 3 to conform with sysfs intf */
+               /* +2 to transform into 2 and 3 to conform with sysfs intf */
                val = ((data->pwm_enable >> index) & 0x01) + 2;
        } else if (nr == TEMP_CRUISE) {
                val = TEMP_FROM_REG(data->temp_cruise[index] & 0x7f);
@@ -1199,7 +1199,8 @@ static void w83793_init_client(struct i2c_client *client)
 
 static int watchdog_set_timeout(struct w83793_data *data, int timeout)
 {
-       int ret, mtimeout;
+       unsigned int mtimeout;
+       int ret;
 
        mtimeout = DIV_ROUND_UP(timeout, 60);
 
index 35a473ba3d81d5afb9cb0188f2141c2dc417c737..3b9bd9a3f2b08a0a43683c87f1801d2f330185c3 100644 (file)
@@ -675,7 +675,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
        p_adap->retries = 3;
 
        rc = peripheral_request_list(
-                       (unsigned short *)dev_get_platdata(&pdev->dev),
+                       dev_get_platdata(&pdev->dev),
                        "i2c-bfin-twi");
        if (rc) {
                dev_err(&pdev->dev, "Can't setup pin mux!\n");
@@ -723,7 +723,7 @@ out_error_add_adapter:
        free_irq(iface->irq, iface);
 out_error_req_irq:
 out_error_no_irq:
-       peripheral_free_list((unsigned short *)dev_get_platdata(&pdev->dev));
+       peripheral_free_list(dev_get_platdata(&pdev->dev));
 out_error_pin_mux:
        iounmap(iface->regs_base);
 out_error_ioremap:
@@ -739,7 +739,7 @@ static int i2c_bfin_twi_remove(struct platform_device *pdev)
 
        i2c_del_adapter(&(iface->adap));
        free_irq(iface->irq, iface);
-       peripheral_free_list((unsigned short *)dev_get_platdata(&pdev->dev));
+       peripheral_free_list(dev_get_platdata(&pdev->dev));
        iounmap(iface->regs_base);
        kfree(iface);
 
index 132369fad4e0fefe09e10dd88ade2eaf9a8b1972..85e8ad6056c4abce8f02bab4afc2e0a9a1cc0e15 100644 (file)
@@ -795,7 +795,7 @@ static struct platform_driver davinci_i2c_driver = {
                .name   = "i2c_davinci",
                .owner  = THIS_MODULE,
                .pm     = davinci_i2c_pm_ops,
-               .of_match_table = of_match_ptr(davinci_i2c_of_match),
+               .of_match_table = davinci_i2c_of_match,
        },
 };
 
index 4c1b60539a2515c34ee4df0e7c353dbd7860dc56..0aa01136f8d955148d984ac00e06ffb9e8ce1196 100644 (file)
@@ -270,7 +270,8 @@ static SIMPLE_DEV_PM_OPS(dw_i2c_dev_pm_ops, dw_i2c_suspend, dw_i2c_resume);
 MODULE_ALIAS("platform:i2c_designware");
 
 static struct platform_driver dw_i2c_driver = {
-       .remove         = dw_i2c_remove,
+       .probe = dw_i2c_probe,
+       .remove = dw_i2c_remove,
        .driver         = {
                .name   = "i2c_designware",
                .owner  = THIS_MODULE,
@@ -282,7 +283,7 @@ static struct platform_driver dw_i2c_driver = {
 
 static int __init dw_i2c_init_driver(void)
 {
-       return platform_driver_probe(&dw_i2c_driver, dw_i2c_probe);
+       return platform_driver_register(&dw_i2c_driver);
 }
 subsys_initcall(dw_i2c_init_driver);
 
index ccf46656bdad9182e2d75d6c1f3dbf42ea0c32c6..1d7efa3169cd772ed2ba580b0c8cfd149eee09d5 100644 (file)
@@ -365,7 +365,7 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
        clk_disable_unprepare(i2c_imx->clk);
 }
 
-static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
+static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
                                                        unsigned int rate)
 {
        struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div;
@@ -589,7 +589,7 @@ static struct i2c_algorithm i2c_imx_algo = {
        .functionality  = i2c_imx_func,
 };
 
-static int __init i2c_imx_probe(struct platform_device *pdev)
+static int i2c_imx_probe(struct platform_device *pdev)
 {
        const struct of_device_id *of_id = of_match_device(i2c_imx_dt_ids,
                                                           &pdev->dev);
@@ -697,7 +697,7 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
        return 0;   /* Return OK */
 }
 
-static int __exit i2c_imx_remove(struct platform_device *pdev)
+static int i2c_imx_remove(struct platform_device *pdev)
 {
        struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
 
@@ -715,7 +715,8 @@ static int __exit i2c_imx_remove(struct platform_device *pdev)
 }
 
 static struct platform_driver i2c_imx_driver = {
-       .remove         = __exit_p(i2c_imx_remove),
+       .probe = i2c_imx_probe,
+       .remove = i2c_imx_remove,
        .driver = {
                .name   = DRIVER_NAME,
                .owner  = THIS_MODULE,
@@ -726,7 +727,7 @@ static struct platform_driver i2c_imx_driver = {
 
 static int __init i2c_adap_imx_init(void)
 {
-       return platform_driver_probe(&i2c_imx_driver, i2c_imx_probe);
+       return platform_driver_register(&i2c_imx_driver);
 }
 subsys_initcall(i2c_adap_imx_init);
 
index d3e9cc3153a973dc62f99d7d607e003179f41f9d..8be7e42aa4de88ba3f00ef2275455b27fa5cdf96 100644 (file)
@@ -911,7 +911,7 @@ static struct platform_driver mv64xxx_i2c_driver = {
        .driver = {
                .owner  = THIS_MODULE,
                .name   = MV64XXX_I2C_CTLR_NAME,
-               .of_match_table = of_match_ptr(mv64xxx_i2c_of_match_table),
+               .of_match_table = mv64xxx_i2c_of_match_table,
        },
 };
 
index f4a01675fa71b4a18ac2b35714425be363442528..99fe86e24fba0428adcf7a7ae27549be2138d8a7 100644 (file)
@@ -1,6 +1,7 @@
 /*
  * Freescale MXS I2C bus driver
  *
+ * Copyright (C) 2012-2013 Marek Vasut <marex@denx.de>
  * Copyright (C) 2011-2012 Wolfram Sang, Pengutronix e.K.
  *
  * based on a (non-working) driver which was:
 
 #define MXS_I2C_CTRL0          (0x00)
 #define MXS_I2C_CTRL0_SET      (0x04)
+#define MXS_I2C_CTRL0_CLR      (0x08)
 
 #define MXS_I2C_CTRL0_SFTRST                   0x80000000
 #define MXS_I2C_CTRL0_RUN                      0x20000000
 #define MXS_I2C_CTRL0_SEND_NAK_ON_LAST         0x02000000
+#define MXS_I2C_CTRL0_PIO_MODE                 0x01000000
 #define MXS_I2C_CTRL0_RETAIN_CLOCK             0x00200000
 #define MXS_I2C_CTRL0_POST_SEND_STOP           0x00100000
 #define MXS_I2C_CTRL0_PRE_SEND_START           0x00080000
 #define MXS_I2C_CTRL1_SLAVE_IRQ                        0x01
 
 #define MXS_I2C_STAT           (0x50)
+#define MXS_I2C_STAT_GOT_A_NAK                 0x10000000
 #define MXS_I2C_STAT_BUS_BUSY                  0x00000800
 #define MXS_I2C_STAT_CLK_GEN_BUSY              0x00000400
 
-#define MXS_I2C_DATA           (0xa0)
+#define MXS_I2C_DATA(i2c)      ((i2c->dev_type == MXS_I2C_V1) ? 0x60 : 0xa0)
 
-#define MXS_I2C_DEBUG0         (0xb0)
-#define MXS_I2C_DEBUG0_CLR     (0xb8)
+#define MXS_I2C_DEBUG0_CLR(i2c)        ((i2c->dev_type == MXS_I2C_V1) ? 0x78 : 0xb8)
 
 #define MXS_I2C_DEBUG0_DMAREQ  0x80000000
 
 #define MXS_CMD_I2C_READ       (MXS_I2C_CTRL0_SEND_NAK_ON_LAST | \
                                 MXS_I2C_CTRL0_MASTER_MODE)
 
+enum mxs_i2c_devtype {
+       MXS_I2C_UNKNOWN = 0,
+       MXS_I2C_V1,
+       MXS_I2C_V2,
+};
+
 /**
  * struct mxs_i2c_dev - per device, private MXS-I2C data
  *
  * @dev: driver model device node
+ * @dev_type: distinguish i.MX23/i.MX28 features
  * @regs: IO registers pointer
  * @cmd_complete: completion object for transaction wait
  * @cmd_err: error code for last transaction
  */
 struct mxs_i2c_dev {
        struct device *dev;
+       enum mxs_i2c_devtype dev_type;
        void __iomem *regs;
        struct completion cmd_complete;
        int cmd_err;
@@ -291,48 +302,11 @@ write_init_pio_fail:
        return -EINVAL;
 }
 
-static int mxs_i2c_pio_wait_dmareq(struct mxs_i2c_dev *i2c)
+static int mxs_i2c_pio_wait_xfer_end(struct mxs_i2c_dev *i2c)
 {
        unsigned long timeout = jiffies + msecs_to_jiffies(1000);
 
-       while (!(readl(i2c->regs + MXS_I2C_DEBUG0) &
-               MXS_I2C_DEBUG0_DMAREQ)) {
-               if (time_after(jiffies, timeout))
-                       return -ETIMEDOUT;
-               cond_resched();
-       }
-
-       return 0;
-}
-
-static int mxs_i2c_pio_wait_cplt(struct mxs_i2c_dev *i2c, int last)
-{
-       unsigned long timeout = jiffies + msecs_to_jiffies(1000);
-
-       /*
-        * We do not use interrupts in the PIO mode. Due to the
-        * maximum transfer length being 8 bytes in PIO mode, the
-        * overhead of interrupt would be too large and this would
-        * neglect the gain from using the PIO mode.
-        */
-
-       while (!(readl(i2c->regs + MXS_I2C_CTRL1) &
-               MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ)) {
-               if (time_after(jiffies, timeout))
-                       return -ETIMEDOUT;
-               cond_resched();
-       }
-
-       writel(MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ,
-               i2c->regs + MXS_I2C_CTRL1_CLR);
-
-       /*
-        * When ending a transfer with a stop, we have to wait for the bus to
-        * go idle before we report the transfer as completed. Otherwise the
-        * start of the next transfer may race with the end of the current one.
-        */
-       while (last && (readl(i2c->regs + MXS_I2C_STAT) &
-                       (MXS_I2C_STAT_BUS_BUSY | MXS_I2C_STAT_CLK_GEN_BUSY))) {
+       while (readl(i2c->regs + MXS_I2C_CTRL0) & MXS_I2C_CTRL0_RUN) {
                if (time_after(jiffies, timeout))
                        return -ETIMEDOUT;
                cond_resched();
@@ -370,106 +344,215 @@ static void mxs_i2c_pio_trigger_cmd(struct mxs_i2c_dev *i2c, u32 cmd)
        writel(reg, i2c->regs + MXS_I2C_CTRL0);
 }
 
+/*
+ * Start WRITE transaction on the I2C bus. By studying i.MX23 datasheet,
+ * CTRL0::PIO_MODE bit description clarifies the order in which the registers
+ * must be written during PIO mode operation. First, the CTRL0 register has
+ * to be programmed with all the necessary bits but the RUN bit. Then the
+ * payload has to be written into the DATA register. Finally, the transmission
+ * is executed by setting the RUN bit in CTRL0.
+ */
+static void mxs_i2c_pio_trigger_write_cmd(struct mxs_i2c_dev *i2c, u32 cmd,
+                                         u32 data)
+{
+       writel(cmd, i2c->regs + MXS_I2C_CTRL0);
+
+       if (i2c->dev_type == MXS_I2C_V1)
+               writel(MXS_I2C_CTRL0_PIO_MODE, i2c->regs + MXS_I2C_CTRL0_SET);
+
+       writel(data, i2c->regs + MXS_I2C_DATA(i2c));
+       writel(MXS_I2C_CTRL0_RUN, i2c->regs + MXS_I2C_CTRL0_SET);
+}
+
 static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap,
                        struct i2c_msg *msg, uint32_t flags)
 {
        struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
        uint32_t addr_data = msg->addr << 1;
        uint32_t data = 0;
-       int i, shifts_left, ret;
+       int i, ret, xlen = 0, xmit = 0;
+       uint32_t start;
 
        /* Mute IRQs coming from this block. */
        writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_CLR);
 
+       /*
+        * MX23 idea:
+        * - Enable CTRL0::PIO_MODE (1 << 24)
+        * - Enable CTRL1::ACK_MODE (1 << 27)
+        *
+        * WARNING! The MX23 is broken in some way, even if it claims
+        * to support PIO, when we try to transfer any amount of data
+        * that is not aligned to 4 bytes, the DMA engine will have
+        * bits in DEBUG1::DMA_BYTES_ENABLES still set even after the
+        * transfer. This in turn will mess up the next transfer as
+        * the block it emit one byte write onto the bus terminated
+        * with a NAK+STOP. A possible workaround is to reset the IP
+        * block after every PIO transmission, which might just work.
+        *
+        * NOTE: The CTRL0::PIO_MODE description is important, since
+        * it outlines how the PIO mode is really supposed to work.
+        */
        if (msg->flags & I2C_M_RD) {
+               /*
+                * PIO READ transfer:
+                *
+                * This transfer MUST be limited to 4 bytes maximum. It is not
+                * possible to transfer more than four bytes via PIO, since we
+                * can not in any way make sure we can read the data from the
+                * DATA register fast enough. Besides, the RX FIFO is only four
+                * bytes deep, thus we can only really read up to four bytes at
+                * time. Finally, there is no bit indicating us that new data
+                * arrived at the FIFO and can thus be fetched from the DATA
+                * register.
+                */
+               BUG_ON(msg->len > 4);
+
                addr_data |= I2C_SMBUS_READ;
 
                /* SELECT command. */
-               mxs_i2c_pio_trigger_cmd(i2c, MXS_CMD_I2C_SELECT);
-
-               ret = mxs_i2c_pio_wait_dmareq(i2c);
-               if (ret)
-                       return ret;
-
-               writel(addr_data, i2c->regs + MXS_I2C_DATA);
-               writel(MXS_I2C_DEBUG0_DMAREQ, i2c->regs + MXS_I2C_DEBUG0_CLR);
+               mxs_i2c_pio_trigger_write_cmd(i2c, MXS_CMD_I2C_SELECT,
+                                             addr_data);
 
-               ret = mxs_i2c_pio_wait_cplt(i2c, 0);
-               if (ret)
-                       return ret;
-
-               if (mxs_i2c_pio_check_error_state(i2c))
+               ret = mxs_i2c_pio_wait_xfer_end(i2c);
+               if (ret) {
+                       dev_err(i2c->dev,
+                               "PIO: Failed to send SELECT command!\n");
                        goto cleanup;
+               }
 
                /* READ command. */
                mxs_i2c_pio_trigger_cmd(i2c,
                                        MXS_CMD_I2C_READ | flags |
                                        MXS_I2C_CTRL0_XFER_COUNT(msg->len));
 
+               ret = mxs_i2c_pio_wait_xfer_end(i2c);
+               if (ret) {
+                       dev_err(i2c->dev,
+                               "PIO: Failed to send SELECT command!\n");
+                       goto cleanup;
+               }
+
+               data = readl(i2c->regs + MXS_I2C_DATA(i2c));
                for (i = 0; i < msg->len; i++) {
-                       if ((i & 3) == 0) {
-                               ret = mxs_i2c_pio_wait_dmareq(i2c);
-                               if (ret)
-                                       return ret;
-                               data = readl(i2c->regs + MXS_I2C_DATA);
-                               writel(MXS_I2C_DEBUG0_DMAREQ,
-                                      i2c->regs + MXS_I2C_DEBUG0_CLR);
-                       }
                        msg->buf[i] = data & 0xff;
                        data >>= 8;
                }
        } else {
+               /*
+                * PIO WRITE transfer:
+                *
+                * The code below implements clock stretching to circumvent
+                * the possibility of kernel not being able to supply data
+                * fast enough. It is possible to transfer arbitrary amount
+                * of data using PIO write.
+                */
                addr_data |= I2C_SMBUS_WRITE;
 
-               /* WRITE command. */
-               mxs_i2c_pio_trigger_cmd(i2c,
-                                       MXS_CMD_I2C_WRITE | flags |
-                                       MXS_I2C_CTRL0_XFER_COUNT(msg->len + 1));
-
                /*
                 * The LSB of data buffer is the first byte blasted across
                 * the bus. Higher order bytes follow. Thus the following
                 * filling schematic.
                 */
+
                data = addr_data << 24;
+
+               /* Start the transfer with START condition. */
+               start = MXS_I2C_CTRL0_PRE_SEND_START;
+
+               /* If the transfer is long, use clock stretching. */
+               if (msg->len > 3)
+                       start |= MXS_I2C_CTRL0_RETAIN_CLOCK;
+
                for (i = 0; i < msg->len; i++) {
                        data >>= 8;
                        data |= (msg->buf[i] << 24);
-                       if ((i & 3) == 2) {
-                               ret = mxs_i2c_pio_wait_dmareq(i2c);
-                               if (ret)
-                                       return ret;
-                               writel(data, i2c->regs + MXS_I2C_DATA);
-                               writel(MXS_I2C_DEBUG0_DMAREQ,
-                                      i2c->regs + MXS_I2C_DEBUG0_CLR);
+
+                       xmit = 0;
+
+                       /* This is the last transfer of the message. */
+                       if (i + 1 == msg->len) {
+                               /* Add optional STOP flag. */
+                               start |= flags;
+                               /* Remove RETAIN_CLOCK bit. */
+                               start &= ~MXS_I2C_CTRL0_RETAIN_CLOCK;
+                               xmit = 1;
                        }
-               }
 
-               shifts_left = 24 - (i & 3) * 8;
-               if (shifts_left) {
-                       data >>= shifts_left;
-                       ret = mxs_i2c_pio_wait_dmareq(i2c);
-                       if (ret)
-                               return ret;
-                       writel(data, i2c->regs + MXS_I2C_DATA);
+                       /* Four bytes are ready in the "data" variable. */
+                       if ((i & 3) == 2)
+                               xmit = 1;
+
+                       /* Nothing interesting happened, continue stuffing. */
+                       if (!xmit)
+                               continue;
+
+                       /*
+                        * Compute the size of the transfer and shift the
+                        * data accordingly.
+                        *
+                        * i = (4k + 0) .... xlen = 2
+                        * i = (4k + 1) .... xlen = 3
+                        * i = (4k + 2) .... xlen = 4
+                        * i = (4k + 3) .... xlen = 1
+                        */
+
+                       if ((i % 4) == 3)
+                               xlen = 1;
+                       else
+                               xlen = (i % 4) + 2;
+
+                       data >>= (4 - xlen) * 8;
+
+                       dev_dbg(i2c->dev,
+                               "PIO: len=%i pos=%i total=%i [W%s%s%s]\n",
+                               xlen, i, msg->len,
+                               start & MXS_I2C_CTRL0_PRE_SEND_START ? "S" : "",
+                               start & MXS_I2C_CTRL0_POST_SEND_STOP ? "E" : "",
+                               start & MXS_I2C_CTRL0_RETAIN_CLOCK ? "C" : "");
+
                        writel(MXS_I2C_DEBUG0_DMAREQ,
-                              i2c->regs + MXS_I2C_DEBUG0_CLR);
+                              i2c->regs + MXS_I2C_DEBUG0_CLR(i2c));
+
+                       mxs_i2c_pio_trigger_write_cmd(i2c,
+                               start | MXS_I2C_CTRL0_MASTER_MODE |
+                               MXS_I2C_CTRL0_DIRECTION |
+                               MXS_I2C_CTRL0_XFER_COUNT(xlen), data);
+
+                       /* The START condition is sent only once. */
+                       start &= ~MXS_I2C_CTRL0_PRE_SEND_START;
+
+                       /* Wait for the end of the transfer. */
+                       ret = mxs_i2c_pio_wait_xfer_end(i2c);
+                       if (ret) {
+                               dev_err(i2c->dev,
+                                       "PIO: Failed to finish WRITE cmd!\n");
+                               break;
+                       }
+
+                       /* Check NAK here. */
+                       ret = readl(i2c->regs + MXS_I2C_STAT) &
+                                   MXS_I2C_STAT_GOT_A_NAK;
+                       if (ret) {
+                               ret = -ENXIO;
+                               goto cleanup;
+                       }
                }
        }
 
-       ret = mxs_i2c_pio_wait_cplt(i2c, flags & MXS_I2C_CTRL0_POST_SEND_STOP);
-       if (ret)
-               return ret;
-
        /* make sure we capture any occurred error into cmd_err */
-       mxs_i2c_pio_check_error_state(i2c);
+       ret = mxs_i2c_pio_check_error_state(i2c);
 
 cleanup:
        /* Clear any dangling IRQs and re-enable interrupts. */
        writel(MXS_I2C_IRQ_MASK, i2c->regs + MXS_I2C_CTRL1_CLR);
        writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
 
-       return 0;
+       /* Clear the PIO_MODE on i.MX23 */
+       if (i2c->dev_type == MXS_I2C_V1)
+               writel(MXS_I2C_CTRL0_PIO_MODE, i2c->regs + MXS_I2C_CTRL0_CLR);
+
+       return ret;
 }
 
 /*
@@ -479,8 +562,9 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
                                int stop)
 {
        struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
-       int ret, err;
+       int ret;
        int flags;
+       int use_pio = 0;
 
        flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0;
 
@@ -491,19 +575,21 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
                return -EINVAL;
 
        /*
-        * The current boundary to select between PIO/DMA transfer method
-        * is set to 8 bytes, transfers shorter than 8 bytes are transfered
-        * using PIO mode while longer transfers use DMA. The 8 byte border is
-        * based on this empirical measurement and a lot of previous frobbing.
+        * The MX28 I2C IP block can only do PIO READ for transfer of to up
+        * 4 bytes of length. The write transfer is not limited as it can use
+        * clock stretching to avoid FIFO underruns.
         */
+       if ((msg->flags & I2C_M_RD) && (msg->len <= 4))
+               use_pio = 1;
+       if (!(msg->flags & I2C_M_RD) && (msg->len < 7))
+               use_pio = 1;
+
        i2c->cmd_err = 0;
-       if (0) {        /* disable PIO mode until a proper fix is made */
+       if (use_pio) {
                ret = mxs_i2c_pio_setup_xfer(adap, msg, flags);
-               if (ret) {
-                       err = mxs_i2c_reset(i2c);
-                       if (err)
-                               return err;
-               }
+               /* No need to reset the block if NAK was received. */
+               if (ret && (ret != -ENXIO))
+                       mxs_i2c_reset(i2c);
        } else {
                INIT_COMPLETION(i2c->cmd_complete);
                ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
@@ -514,9 +600,11 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
                                                msecs_to_jiffies(1000));
                if (ret == 0)
                        goto timeout;
+
+               ret = i2c->cmd_err;
        }
 
-       if (i2c->cmd_err == -ENXIO) {
+       if (ret == -ENXIO) {
                /*
                 * If the transfer fails with a NAK from the slave the
                 * controller halts until it gets told to return to idle state.
@@ -525,7 +613,19 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
                       i2c->regs + MXS_I2C_CTRL1_SET);
        }
 
-       ret = i2c->cmd_err;
+       /*
+        * WARNING!
+        * The i.MX23 is strange. After each and every operation, it's I2C IP
+        * block must be reset, otherwise the IP block will misbehave. This can
+        * be observed on the bus by the block sending out one single byte onto
+        * the bus. In case such an error happens, bit 27 will be set in the
+        * DEBUG0 register. This bit is not documented in the i.MX23 datasheet
+        * and is marked as "TBD" instead. To reset this bit to a correct state,
+        * reset the whole block. Since the block reset does not take long, do
+        * reset the block after every transfer to play safe.
+        */
+       if (i2c->dev_type == MXS_I2C_V1)
+               mxs_i2c_reset(i2c);
 
        dev_dbg(i2c->dev, "Done with err=%d\n", ret);
 
@@ -680,8 +780,28 @@ static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c)
        return 0;
 }
 
+static struct platform_device_id mxs_i2c_devtype[] = {
+       {
+               .name = "imx23-i2c",
+               .driver_data = MXS_I2C_V1,
+       }, {
+               .name = "imx28-i2c",
+               .driver_data = MXS_I2C_V2,
+       }, { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, mxs_i2c_devtype);
+
+static const struct of_device_id mxs_i2c_dt_ids[] = {
+       { .compatible = "fsl,imx23-i2c", .data = &mxs_i2c_devtype[0], },
+       { .compatible = "fsl,imx28-i2c", .data = &mxs_i2c_devtype[1], },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_i2c_dt_ids);
+
 static int mxs_i2c_probe(struct platform_device *pdev)
 {
+       const struct of_device_id *of_id =
+                               of_match_device(mxs_i2c_dt_ids, &pdev->dev);
        struct device *dev = &pdev->dev;
        struct mxs_i2c_dev *i2c;
        struct i2c_adapter *adap;
@@ -693,6 +813,11 @@ static int mxs_i2c_probe(struct platform_device *pdev)
        if (!i2c)
                return -ENOMEM;
 
+       if (of_id) {
+               const struct platform_device_id *device_id = of_id->data;
+               i2c->dev_type = device_id->driver_data;
+       }
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        irq = platform_get_irq(pdev, 0);
 
@@ -768,24 +893,19 @@ static int mxs_i2c_remove(struct platform_device *pdev)
        return 0;
 }
 
-static const struct of_device_id mxs_i2c_dt_ids[] = {
-       { .compatible = "fsl,imx28-i2c", },
-       { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mxs_i2c_dt_ids);
-
 static struct platform_driver mxs_i2c_driver = {
        .driver = {
                   .name = DRIVER_NAME,
                   .owner = THIS_MODULE,
                   .of_match_table = mxs_i2c_dt_ids,
                   },
+       .probe = mxs_i2c_probe,
        .remove = mxs_i2c_remove,
 };
 
 static int __init mxs_i2c_init(void)
 {
-       return platform_driver_probe(&mxs_i2c_driver, mxs_i2c_probe);
+       return platform_driver_register(&mxs_i2c_driver);
 }
 subsys_initcall(mxs_i2c_init);
 
@@ -795,6 +915,7 @@ static void __exit mxs_i2c_exit(void)
 }
 module_exit(mxs_i2c_exit);
 
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
 MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
 MODULE_DESCRIPTION("MXS I2C Bus Driver");
 MODULE_LICENSE("GPL");
index 6d8308d5dc4e915c4584e2d5d9b3e5690882a2b9..9967a6f9c2ffba2fe4521a8abf19b49e978110a4 100644 (file)
@@ -939,6 +939,9 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
                /*
                 * ProDB0017052: Clear ARDY bit twice
                 */
+               if (stat & OMAP_I2C_STAT_ARDY)
+                       omap_i2c_ack_stat(dev, OMAP_I2C_STAT_ARDY);
+
                if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
                                        OMAP_I2C_STAT_AL)) {
                        omap_i2c_ack_stat(dev, (OMAP_I2C_STAT_RRDY |
index d2fe11da5e82a43cdc80c9fc9446aeb1e7c7fa0b..8603f5e805aa9741b142141f37575faeb1a3007e 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/i2c/i2c-rcar.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
@@ -226,15 +227,16 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv,
                                    u32 bus_speed,
                                    struct device *dev)
 {
-       struct clk *clkp = clk_get(NULL, "peripheral_clk");
+       struct clk *clkp = clk_get(dev, NULL);
        u32 scgd, cdf;
        u32 round, ick;
        u32 scl;
        u32 cdf_width;
+       unsigned long rate;
 
-       if (!clkp) {
-               dev_err(dev, "there is no peripheral_clk\n");
-               return -EIO;
+       if (IS_ERR(clkp)) {
+               dev_err(dev, "couldn't get clock\n");
+               return PTR_ERR(clkp);
        }
 
        switch (priv->devtype) {
@@ -264,15 +266,14 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv,
         * clkp : peripheral_clk
         * F[]  : integer up-valuation
         */
-       for (cdf = 0; cdf < (1 << cdf_width); cdf++) {
-               ick = clk_get_rate(clkp) / (1 + cdf);
-               if (ick < 20000000)
-                       goto ick_find;
+       rate = clk_get_rate(clkp);
+       cdf = rate / 20000000;
+       if (cdf >= 1 << cdf_width) {
+               dev_err(dev, "Input clock %lu too high\n", rate);
+               return -EIO;
        }
-       dev_err(dev, "there is no best CDF\n");
-       return -EIO;
+       ick = rate / (cdf + 1);
 
-ick_find:
        /*
         * it is impossible to calculate large scale
         * number on u32. separate it
@@ -290,6 +291,12 @@ ick_find:
         *
         * Calculation result (= SCL) should be less than
         * bus_speed for hardware safety
+        *
+        * We could use something along the lines of
+        *      div = ick / (bus_speed + 1) + 1;
+        *      scgd = (div - 20 - round + 7) / 8;
+        *      scl = ick / (20 + (scgd * 8) + round);
+        * (not fully verified) but that would get pretty involved
         */
        for (scgd = 0; scgd < 0x40; scgd++) {
                scl = ick / (20 + (scgd * 8) + round);
@@ -306,7 +313,7 @@ scgd_find:
        /*
         * keep icccr value
         */
-       priv->icccr = (scgd << (cdf_width) | cdf);
+       priv->icccr = scgd << cdf_width | cdf;
 
        return 0;
 }
@@ -632,6 +639,15 @@ static const struct i2c_algorithm rcar_i2c_algo = {
        .functionality  = rcar_i2c_func,
 };
 
+static const struct of_device_id rcar_i2c_dt_ids[] = {
+       { .compatible = "renesas,i2c-rcar", .data = (void *)I2C_RCAR_H1 },
+       { .compatible = "renesas,i2c-r8a7778", .data = (void *)I2C_RCAR_H1 },
+       { .compatible = "renesas,i2c-r8a7779", .data = (void *)I2C_RCAR_H1 },
+       { .compatible = "renesas,i2c-r8a7790", .data = (void *)I2C_RCAR_H2 },
+       {},
+};
+MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids);
+
 static int rcar_i2c_probe(struct platform_device *pdev)
 {
        struct i2c_rcar_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -649,10 +665,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
        }
 
        bus_speed = 100000; /* default 100 kHz */
-       if (pdata && pdata->bus_speed)
+       ret = of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed);
+       if (ret < 0 && pdata && pdata->bus_speed)
                bus_speed = pdata->bus_speed;
 
-       priv->devtype = platform_get_device_id(pdev)->driver_data;
+       if (pdev->dev.of_node)
+               priv->devtype = (long)of_match_device(rcar_i2c_dt_ids,
+                                                     dev)->data;
+       else
+               priv->devtype = platform_get_device_id(pdev)->driver_data;
 
        ret = rcar_i2c_clock_calculate(priv, bus_speed, dev);
        if (ret < 0)
@@ -673,6 +694,7 @@ static int rcar_i2c_probe(struct platform_device *pdev)
        adap->class             = I2C_CLASS_HWMON | I2C_CLASS_SPD;
        adap->retries           = 3;
        adap->dev.parent        = dev;
+       adap->dev.of_node       = dev->of_node;
        i2c_set_adapdata(adap, priv);
        strlcpy(adap->name, pdev->name, sizeof(adap->name));
 
@@ -720,6 +742,7 @@ static struct platform_driver rcar_i2c_driver = {
        .driver = {
                .name   = "i2c-rcar",
                .owner  = THIS_MODULE,
+               .of_match_table = rcar_i2c_dt_ids,
        },
        .probe          = rcar_i2c_probe,
        .remove         = rcar_i2c_remove,
index f8f6f2e552db29e344b41f4e42d0cbc1ac9fc000..04a17b9b38bbbb9f54cd8cb91ce2888aac95a478 100644 (file)
@@ -859,8 +859,7 @@ static const struct i2c_algorithm stu300_algo = {
        .functionality  = stu300_func,
 };
 
-static int __init
-stu300_probe(struct platform_device *pdev)
+static int stu300_probe(struct platform_device *pdev)
 {
        struct stu300_dev *dev;
        struct i2c_adapter *adap;
@@ -966,8 +965,7 @@ static SIMPLE_DEV_PM_OPS(stu300_pm, stu300_suspend, stu300_resume);
 #define STU300_I2C_PM  NULL
 #endif
 
-static int __exit
-stu300_remove(struct platform_device *pdev)
+static int stu300_remove(struct platform_device *pdev)
 {
        struct stu300_dev *dev = platform_get_drvdata(pdev);
 
@@ -989,13 +987,14 @@ static struct platform_driver stu300_i2c_driver = {
                .pm     = STU300_I2C_PM,
                .of_match_table = stu300_dt_match,
        },
-       .remove         = __exit_p(stu300_remove),
+       .probe = stu300_probe,
+       .remove = stu300_remove,
 
 };
 
 static int __init stu300_init(void)
 {
-       return platform_driver_probe(&stu300_i2c_driver, stu300_probe);
+       return platform_driver_register(&stu300_i2c_driver);
 }
 
 static void __exit stu300_exit(void)
index 4c8b368d463b7c77517fb3ed314e4234bd6dafbc..6e7b09c1804e324ead3503e06c5f042cddf1f9b3 100644 (file)
@@ -702,7 +702,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
        if (irq < 0)
                goto resource_missing;
 
-       pdata = (struct xiic_i2c_platform_data *)dev_get_platdata(&pdev->dev);
+       pdata = dev_get_platdata(&pdev->dev);
 
        i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
        if (!i2c)
index 29d3f045a2bfbc688beeb79f888cb72d10c3bcd2..5923cfa390c86de6528559c229ace0a4b39b402c 100644 (file)
@@ -248,16 +248,17 @@ static int i2c_device_probe(struct device *dev)
        driver = to_i2c_driver(dev->driver);
        if (!driver->probe || !driver->id_table)
                return -ENODEV;
-       client->driver = driver;
+
        if (!device_can_wakeup(&client->dev))
                device_init_wakeup(&client->dev,
                                        client->flags & I2C_CLIENT_WAKE);
        dev_dbg(dev, "probe\n");
 
+       acpi_dev_pm_attach(&client->dev, true);
        status = driver->probe(client, i2c_match_id(driver->id_table, client));
        if (status) {
-               client->driver = NULL;
                i2c_set_clientdata(client, NULL);
+               acpi_dev_pm_detach(&client->dev, true);
        }
        return status;
 }
@@ -279,10 +280,9 @@ static int i2c_device_remove(struct device *dev)
                dev->driver = NULL;
                status = 0;
        }
-       if (status == 0) {
-               client->driver = NULL;
+       if (status == 0)
                i2c_set_clientdata(client, NULL);
-       }
+       acpi_dev_pm_detach(&client->dev, true);
        return status;
 }
 
@@ -1111,8 +1111,10 @@ static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
        if (ret < 0 || !info.addr)
                return AE_OK;
 
+       adev->power.flags.ignore_parent = true;
        strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
        if (!i2c_new_device(adapter, &info)) {
+               adev->power.flags.ignore_parent = false;
                dev_err(&adapter->dev,
                        "failed to add I2C device %s from ACPI\n",
                        dev_name(&adev->dev));
@@ -1134,6 +1136,9 @@ static void acpi_i2c_register_devices(struct i2c_adapter *adap)
        acpi_handle handle;
        acpi_status status;
 
+       if (!adap->dev.parent)
+               return;
+
        handle = ACPI_HANDLE(adap->dev.parent);
        if (!handle)
                return;
@@ -1606,9 +1611,14 @@ static int i2c_cmd(struct device *dev, void *_arg)
 {
        struct i2c_client       *client = i2c_verify_client(dev);
        struct i2c_cmd_arg      *arg = _arg;
+       struct i2c_driver       *driver;
+
+       if (!client || !client->dev.driver)
+               return 0;
 
-       if (client && client->driver && client->driver->command)
-               client->driver->command(client, arg->cmd, arg->arg);
+       driver = to_i2c_driver(client->dev.driver);
+       if (driver->command)
+               driver->command(client, arg->cmd, arg->arg);
        return 0;
 }
 
index c3ccdea3d18059c4dd199f533932ff17f0b4f448..80b47e8ce030cef7ff6f9ab9f058b15e63f7329e 100644 (file)
@@ -102,8 +102,8 @@ static void return_i2c_dev(struct i2c_dev *i2c_dev)
        kfree(i2c_dev);
 }
 
-static ssize_t show_adapter_name(struct device *dev,
-                                struct device_attribute *attr, char *buf)
+static ssize_t name_show(struct device *dev,
+                        struct device_attribute *attr, char *buf)
 {
        struct i2c_dev *i2c_dev = i2c_dev_get_by_minor(MINOR(dev->devt));
 
@@ -111,7 +111,13 @@ static ssize_t show_adapter_name(struct device *dev,
                return -ENODEV;
        return sprintf(buf, "%s\n", i2c_dev->adap->name);
 }
-static DEVICE_ATTR(name, S_IRUGO, show_adapter_name, NULL);
+static DEVICE_ATTR_RO(name);
+
+static struct attribute *i2c_attrs[] = {
+       &dev_attr_name.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(i2c);
 
 /* ------------------------------------------------------------------------- */
 
@@ -562,15 +568,10 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
                res = PTR_ERR(i2c_dev->dev);
                goto error;
        }
-       res = device_create_file(i2c_dev->dev, &dev_attr_name);
-       if (res)
-               goto error_destroy;
 
        pr_debug("i2c-dev: adapter [%s] registered as minor %d\n",
                 adap->name, adap->nr);
        return 0;
-error_destroy:
-       device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
 error:
        return_i2c_dev(i2c_dev);
        return res;
@@ -589,7 +590,6 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
        if (!i2c_dev) /* attach_adapter must have failed */
                return 0;
 
-       device_remove_file(i2c_dev->dev, &dev_attr_name);
        return_i2c_dev(i2c_dev);
        device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
 
@@ -637,6 +637,7 @@ static int __init i2c_dev_init(void)
                res = PTR_ERR(i2c_dev_class);
                goto out_unreg_chrdev;
        }
+       i2c_dev_class->dev_groups = i2c_groups;
 
        /* Keep track of adapters which will be added or removed later */
        res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier);
index 44d4c6071c15096e19d16433092b7428c113f34f..c99b229873665b56656a7cabd95c7dfd6acb3fb1 100644 (file)
@@ -46,6 +46,7 @@ static int smbus_do_alert(struct device *dev, void *addrp)
 {
        struct i2c_client *client = i2c_verify_client(dev);
        struct alert_data *data = addrp;
+       struct i2c_driver *driver;
 
        if (!client || client->addr != data->addr)
                return 0;
@@ -54,12 +55,13 @@ static int smbus_do_alert(struct device *dev, void *addrp)
 
        /*
         * Drivers should either disable alerts, or provide at least
-        * a minimal handler.  Lock so client->driver won't change.
+        * a minimal handler.  Lock so the driver won't change.
         */
        device_lock(dev);
-       if (client->driver) {
-               if (client->driver->alert)
-                       client->driver->alert(client, data->flag);
+       if (client->dev.driver) {
+               driver = to_i2c_driver(client->dev.driver);
+               if (driver->alert)
+                       driver->alert(client, data->flag);
                else
                        dev_warn(&client->dev, "no driver alert()!\n");
        } else
index 74b41ae690f3e6dab7271319ed62c436ac30935a..c58e093b6032480a316c1725db5f6879842cd2ef 100644 (file)
@@ -200,7 +200,7 @@ static int i2c_arbitrator_probe(struct platform_device *pdev)
        arb->parent = of_find_i2c_adapter_by_node(parent_np);
        if (!arb->parent) {
                dev_err(dev, "Cannot find parent bus\n");
-               return -EINVAL;
+               return -EPROBE_DEFER;
        }
 
        /* Actually add the mux adapter */
@@ -238,7 +238,7 @@ static struct platform_driver i2c_arbitrator_driver = {
        .driver = {
                .owner  = THIS_MODULE,
                .name   = "i2c-arb-gpio-challenge",
-               .of_match_table = of_match_ptr(i2c_arbitrator_of_match),
+               .of_match_table = i2c_arbitrator_of_match,
        },
 };
 
index 5d4a99ba743e39b21a9483ce2a15aabb41a84934..d72841f8010ca6709e52a12e2e8b566b9dcd9544 100644 (file)
@@ -66,7 +66,7 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
        struct device_node *adapter_np, *child;
        struct i2c_adapter *adapter;
        unsigned *values, *gpios;
-       int i = 0;
+       int i = 0, ret;
 
        if (!np)
                return -ENODEV;
@@ -79,7 +79,7 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
        adapter = of_find_i2c_adapter_by_node(adapter_np);
        if (!adapter) {
                dev_err(&pdev->dev, "Cannot find parent bus\n");
-               return -ENODEV;
+               return -EPROBE_DEFER;
        }
        mux->data.parent = i2c_adapter_id(adapter);
        put_device(&adapter->dev);
@@ -116,8 +116,12 @@ static int i2c_mux_gpio_probe_dt(struct gpiomux *mux,
                return -ENOMEM;
        }
 
-       for (i = 0; i < mux->data.n_gpios; i++)
-               gpios[i] = of_get_named_gpio(np, "mux-gpios", i);
+       for (i = 0; i < mux->data.n_gpios; i++) {
+               ret = of_get_named_gpio(np, "mux-gpios", i);
+               if (ret < 0)
+                       return ret;
+               gpios[i] = ret;
+       }
 
        mux->data.gpios = gpios;
 
@@ -177,7 +181,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
        if (!parent) {
                dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
                        mux->data.parent);
-               return -ENODEV;
+               return -EPROBE_DEFER;
        }
 
        mux->parent = parent;
@@ -279,7 +283,7 @@ static struct platform_driver i2c_mux_gpio_driver = {
        .driver = {
                .owner  = THIS_MODULE,
                .name   = "i2c-mux-gpio",
-               .of_match_table = of_match_ptr(i2c_mux_gpio_of_match),
+               .of_match_table = i2c_mux_gpio_of_match,
        },
 };
 
index 69a91732ae6561560ca6286007a10602e0e09ba4..68a37157377df12797b1b122ca4568d121559112 100644 (file)
@@ -113,7 +113,7 @@ static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
        adapter = of_find_i2c_adapter_by_node(adapter_np);
        if (!adapter) {
                dev_err(mux->dev, "Cannot find parent bus\n");
-               return -ENODEV;
+               return -EPROBE_DEFER;
        }
        mux->pdata->parent_bus_num = i2c_adapter_id(adapter);
        put_device(&adapter->dev);
@@ -211,7 +211,7 @@ static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
        if (!mux->parent) {
                dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
                        mux->pdata->parent_bus_num);
-               ret = -ENODEV;
+               ret = -EPROBE_DEFER;
                goto err;
        }
 
index 02906ca99b41c4583ca6c3adb18f309ec88ffad5..5dba90a8a27cbc8445cffbf621971ce5d9c6fb43 100644 (file)
@@ -722,13 +722,6 @@ config BLK_DEV_IDE_RAPIDE
          Say Y here if you want to support the Yellowstone RapIDE controller
          manufactured for use with Acorn computers.
 
-config IDE_H8300
-       tristate "H8300 IDE support"
-       depends on H8300
-       default y
-       help
-         Enables the H8300 IDE driver.
-
 config BLK_DEV_GAYLE
        tristate "Amiga Gayle IDE interface support"
        depends on AMIGA
index af8d016c37eaae0dc4f5c97b9aecce840d6fec57..a04ee82f1c8f5bf5bd712f90dd73744a37762e2a 100644 (file)
@@ -78,8 +78,6 @@ obj-$(CONFIG_BLK_DEV_CMD640)          += cmd640.o
 
 obj-$(CONFIG_BLK_DEV_IDE_PMAC)         += pmac.o
 
-obj-$(CONFIG_IDE_H8300)                        += ide-h8300.o
-
 obj-$(CONFIG_IDE_GENERIC)              += ide-generic.o
 obj-$(CONFIG_BLK_DEV_IDEPNP)           += ide-pnp.o
 
diff --git a/drivers/ide/ide-h8300.c b/drivers/ide/ide-h8300.c
deleted file mode 100644 (file)
index 520f42c..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * H8/300 generic IDE interface
- */
-
-#include <linux/init.h>
-#include <linux/ide.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#define DRV_NAME "ide-h8300"
-
-#define bswap(d) \
-({                                     \
-       u16 r;                          \
-       __asm__("mov.b %w1,r1h\n\t"     \
-               "mov.b %x1,r1l\n\t"     \
-               "mov.w r1,%0"           \
-               :"=r"(r)                \
-               :"r"(d)                 \
-               :"er1");                \
-       (r);                            \
-})
-
-static void mm_outsw(unsigned long addr, void *buf, u32 len)
-{
-       unsigned short *bp = (unsigned short *)buf;
-       for (; len > 0; len--, bp++)
-               *(volatile u16 *)addr = bswap(*bp);
-}
-
-static void mm_insw(unsigned long addr, void *buf, u32 len)
-{
-       unsigned short *bp = (unsigned short *)buf;
-       for (; len > 0; len--, bp++)
-               *bp = bswap(*(volatile u16 *)addr);
-}
-
-static void h8300_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
-                            void *buf, unsigned int len)
-{
-       mm_insw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
-}
-
-static void h8300_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
-                             void *buf, unsigned int len)
-{
-       mm_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
-}
-
-static const struct ide_tp_ops h8300_tp_ops = {
-       .exec_command           = ide_exec_command,
-       .read_status            = ide_read_status,
-       .read_altstatus         = ide_read_altstatus,
-       .write_devctl           = ide_write_devctl,
-
-       .dev_select             = ide_dev_select,
-       .tf_load                = ide_tf_load,
-       .tf_read                = ide_tf_read,
-
-       .input_data             = h8300_input_data,
-       .output_data            = h8300_output_data,
-};
-
-#define H8300_IDE_GAP (2)
-
-static inline void hw_setup(struct ide_hw *hw)
-{
-       int i;
-
-       memset(hw, 0, sizeof(*hw));
-       for (i = 0; i <= 7; i++)
-               hw->io_ports_array[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i;
-       hw->io_ports.ctl_addr = CONFIG_H8300_IDE_ALT;
-       hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ;
-}
-
-static const struct ide_port_info h8300_port_info = {
-       .tp_ops                 = &h8300_tp_ops,
-       .host_flags             = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA,
-       .chipset                = ide_generic,
-};
-
-static int __init h8300_ide_init(void)
-{
-       struct ide_hw hw, *hws[] = { &hw };
-
-       printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n");
-
-       if (!request_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8, "ide-h8300"))
-               goto out_busy;
-       if (!request_region(CONFIG_H8300_IDE_ALT, H8300_IDE_GAP, "ide-h8300")) {
-               release_region(CONFIG_H8300_IDE_BASE, H8300_IDE_GAP*8);
-               goto out_busy;
-       }
-
-       hw_setup(&hw);
-
-       return ide_host_add(&h8300_port_info, hws, 1, NULL);
-
-out_busy:
-       printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n");
-
-       return -EBUSY;
-}
-
-module_init(h8300_ide_init);
-
-MODULE_LICENSE("GPL");
index fa6964d8681a0d126fcf7c4845896b3f06298f8f..3a449f65eb2d3beb889217893a0e9d677214c1d9 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * intel_idle.c - native hardware idle loop for modern Intel processors
  *
- * Copyright (c) 2010, Intel Corporation.
+ * Copyright (c) 2013, Intel Corporation.
  * Len Brown <len.brown@intel.com>
  *
  * This program is free software; you can redistribute it and/or modify it
@@ -123,7 +123,7 @@ static struct cpuidle_state *cpuidle_state_table;
  * which is also the index into the MWAIT hint array.
  * Thus C0 is a dummy.
  */
-static struct cpuidle_state nehalem_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state nehalem_cstates[] __initdata = {
        {
                .name = "C1-NHM",
                .desc = "MWAIT 0x00",
@@ -156,7 +156,7 @@ static struct cpuidle_state nehalem_cstates[CPUIDLE_STATE_MAX] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state snb_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state snb_cstates[] __initdata = {
        {
                .name = "C1-SNB",
                .desc = "MWAIT 0x00",
@@ -196,7 +196,7 @@ static struct cpuidle_state snb_cstates[CPUIDLE_STATE_MAX] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state ivb_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state ivb_cstates[] __initdata = {
        {
                .name = "C1-IVB",
                .desc = "MWAIT 0x00",
@@ -236,7 +236,7 @@ static struct cpuidle_state ivb_cstates[CPUIDLE_STATE_MAX] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state hsw_cstates[] __initdata = {
        {
                .name = "C1-HSW",
                .desc = "MWAIT 0x00",
@@ -297,7 +297,7 @@ static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = {
+static struct cpuidle_state atom_cstates[] __initdata = {
        {
                .name = "C1E-ATM",
                .desc = "MWAIT 0x00",
@@ -329,6 +329,36 @@ static struct cpuidle_state atom_cstates[CPUIDLE_STATE_MAX] = {
        {
                .enter = NULL }
 };
+static struct cpuidle_state avn_cstates[CPUIDLE_STATE_MAX] = {
+       {
+               .name = "C1-AVN",
+               .desc = "MWAIT 0x00",
+               .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+               .exit_latency = 1,
+               .target_residency = 1,
+               .enter = &intel_idle },
+       {
+               .name = "C1E-AVN",
+               .desc = "MWAIT 0x01",
+               .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+               .exit_latency = 5,
+               .target_residency = 10,
+               .enter = &intel_idle },
+       {
+               .name = "C6NS-AVN",     /* No Cache Shrink */
+               .desc = "MWAIT 0x51",
+               .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 15,
+               .target_residency = 45,
+               .enter = &intel_idle },
+       {
+               .name = "C6FS-AVN",     /* Full Cache shrink */
+               .desc = "MWAIT 0x52",
+               .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 150,            /* fake penalty added due to cold cache */
+               .target_residency = 100000,     /* fake penalty added due to cold cache */
+               .enter = &intel_idle },
+};
 
 /**
  * intel_idle
@@ -462,6 +492,11 @@ static const struct idle_cpu idle_cpu_hsw = {
        .disable_promotion_to_c1e = true,
 };
 
+static const struct idle_cpu idle_cpu_avn = {
+       .state_table = avn_cstates,
+       .disable_promotion_to_c1e = true,
+};
+
 #define ICPU(model, cpu) \
        { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
 
@@ -483,6 +518,7 @@ static const struct x86_cpu_id intel_idle_ids[] = {
        ICPU(0x3f, idle_cpu_hsw),
        ICPU(0x45, idle_cpu_hsw),
        ICPU(0x46, idle_cpu_hsw),
+       ICPU(0x4D, idle_cpu_avn),
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -490,7 +526,7 @@ MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
 /*
  * intel_idle_probe()
  */
-static int intel_idle_probe(void)
+static int __init intel_idle_probe(void)
 {
        unsigned int eax, ebx, ecx;
        const struct x86_cpu_id *id;
@@ -558,7 +594,7 @@ static void intel_idle_cpuidle_devices_uninit(void)
  * intel_idle_cpuidle_driver_init()
  * allocate, initialize cpuidle_states
  */
-static int intel_idle_cpuidle_driver_init(void)
+static int __init intel_idle_cpuidle_driver_init(void)
 {
        int cstate;
        struct cpuidle_driver *drv = &intel_idle_driver;
@@ -628,7 +664,7 @@ static int intel_idle_cpu_init(int cpu)
                int num_substates, mwait_hint, mwait_cstate, mwait_substate;
 
                if (cpuidle_state_table[cstate].enter == NULL)
-                       continue;
+                       break;
 
                if (cstate + 1 > max_cstate) {
                        printk(PREFIX "max_cstate %d reached\n", max_cstate);
index a7b30be86ae06cb7ae59a077aa45ee4e58f52051..52605c0ea3a69fce7c1d312d4fa1610de43174fd 100644 (file)
@@ -525,8 +525,10 @@ static int adf4350_probe(struct spi_device *spi)
        }
 
        indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
-       if (indio_dev == NULL)
-               return -ENOMEM;
+       if (indio_dev == NULL) {
+               ret =  -ENOMEM;
+               goto error_disable_clk;
+       }
 
        st = iio_priv(indio_dev);
 
index 2710f7245c3b5472b0e924bd22a94f6bc1212b6f..2db7dcd826b9db6500e354498b385cbb3ebd9f7d 100644 (file)
@@ -477,6 +477,9 @@ void iio_disable_all_buffers(struct iio_dev *indio_dev)
        indio_dev->currentmode = INDIO_DIRECT_MODE;
        if (indio_dev->setup_ops->postdisable)
                indio_dev->setup_ops->postdisable(indio_dev);
+
+       if (indio_dev->available_scan_masks == NULL)
+               kfree(indio_dev->active_scan_mask);
 }
 
 int iio_update_buffers(struct iio_dev *indio_dev,
index 5ceda710f516bc4721e14961a504fa7e2c3054a9..b84791f03a27f5d174b60680f77c59de06bb542a 100644 (file)
@@ -31,6 +31,17 @@ config INFINIBAND_USER_ACCESS
          libibverbs, libibcm and a hardware driver library from
          <http://www.openfabrics.org/git/>.
 
+config INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
+       bool "Experimental and unstable ABI for userspace access to flow steering verbs"
+       depends on INFINIBAND_USER_ACCESS
+       depends on STAGING
+       ---help---
+         The final ABI for userspace access to flow steering verbs
+         has not been defined.  To use the current ABI, *WHICH WILL
+         CHANGE IN THE FUTURE*, say Y here.
+
+         If unsure, say N.
+
 config INFINIBAND_USER_MEM
        bool
        depends on INFINIBAND_USER_ACCESS != n
index dab4b41f1715846bd2a26c03fd90f0175766980e..a082fd9e7ebe009465f961eab3ad8b840839f7f9 100644 (file)
@@ -2294,7 +2294,7 @@ static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
        int low, high, remaining;
        unsigned int rover;
 
-       inet_get_local_port_range(&low, &high);
+       inet_get_local_port_range(&init_net, &low, &high);
        remaining = (high - low) + 1;
        rover = net_random() % remaining + low;
 retry:
index d040b877475f3f04f9f6c2fea63ba772fbd9586c..d8f9c6c272d732a898b334be0c3034aadf37b275 100644 (file)
@@ -217,7 +217,9 @@ IB_UVERBS_DECLARE_CMD(destroy_srq);
 IB_UVERBS_DECLARE_CMD(create_xsrq);
 IB_UVERBS_DECLARE_CMD(open_xrcd);
 IB_UVERBS_DECLARE_CMD(close_xrcd);
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
 IB_UVERBS_DECLARE_CMD(create_flow);
 IB_UVERBS_DECLARE_CMD(destroy_flow);
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 
 #endif /* UVERBS_H */
index f2b81b9ee0d6849c67693e50c351721c64df27fe..2f0f01b70e3bd22c538cd3a4081be0a9ec3f790f 100644 (file)
@@ -54,7 +54,9 @@ static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
 static struct uverbs_lock_class ah_lock_class  = { .name = "AH-uobj" };
 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 
 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen)                      \
        do {                                                            \
@@ -2599,6 +2601,7 @@ out_put:
        return ret ? ret : in_len;
 }
 
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
 static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec,
                                union ib_flow_spec *ib_spec)
 {
@@ -2824,6 +2827,7 @@ ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file,
 
        return ret ? ret : in_len;
 }
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 
 static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
                                struct ib_uverbs_create_xsrq *cmd,
index 75ad86c4abf82a86572d9ca8b35d52a7f9d2d4ce..2df31f68ea0905ef06ac1ed054348cf669cfaa61 100644 (file)
@@ -115,8 +115,10 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
        [IB_USER_VERBS_CMD_CLOSE_XRCD]          = ib_uverbs_close_xrcd,
        [IB_USER_VERBS_CMD_CREATE_XSRQ]         = ib_uverbs_create_xsrq,
        [IB_USER_VERBS_CMD_OPEN_QP]             = ib_uverbs_open_qp,
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
        [IB_USER_VERBS_CMD_CREATE_FLOW]         = ib_uverbs_create_flow,
        [IB_USER_VERBS_CMD_DESTROY_FLOW]        = ib_uverbs_destroy_flow
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 };
 
 static void ib_uverbs_add_one(struct ib_device *device);
@@ -605,6 +607,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
        if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
                return -ENOSYS;
 
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
        if (hdr.command >= IB_USER_VERBS_CMD_THRESHOLD) {
                struct ib_uverbs_cmd_hdr_ex hdr_ex;
 
@@ -621,6 +624,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
                                                     (hdr_ex.out_words +
                                                      hdr_ex.provider_out_words) * 4);
        } else {
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
                if (hdr.in_words * 4 != count)
                        return -EINVAL;
 
@@ -628,7 +632,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
                                                     buf + sizeof(hdr),
                                                     hdr.in_words * 4,
                                                     hdr.out_words * 4);
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
        }
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 }
 
 static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
index d5d1929753e4fdc95bbdc67625f5f0521ffeb32f..cedda25232be2454b3b2f0902152d96a7b1dfd75 100644 (file)
@@ -141,7 +141,7 @@ static const char *to_qp_state_str(int state)
                return "C2_QP_STATE_ERROR";
        default:
                return "<invalid QP state>";
-       };
+       }
 }
 
 void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
index d6c5a73becf40ecfc0f177422f34de1f14247f32..f0612645de998f6bcba2dfa0ae34a2b51483af38 100644 (file)
@@ -1691,9 +1691,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                ibdev->ib_dev.create_flow       = mlx4_ib_create_flow;
                ibdev->ib_dev.destroy_flow      = mlx4_ib_destroy_flow;
 
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
                ibdev->ib_dev.uverbs_cmd_mask   |=
                        (1ull << IB_USER_VERBS_CMD_CREATE_FLOW) |
                        (1ull << IB_USER_VERBS_CMD_DESTROY_FLOW);
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
        }
 
        mlx4_ib_alloc_eqs(dev, ibdev);
index 3f831de9a4d8a901607afc26c81827cc5c1e3bd5..b1a6cb3a2809282fbfab1b44b09c53e315a93466 100644 (file)
@@ -164,6 +164,7 @@ int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
 static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
 {
        struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
+       char name[MLX5_MAX_EQ_NAME];
        struct mlx5_eq *eq, *n;
        int ncomp_vec;
        int nent;
@@ -180,11 +181,10 @@ static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
                        goto clean;
                }
 
-               snprintf(eq->name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
+               snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
                err = mlx5_create_map_eq(&dev->mdev, eq,
                                         i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
-                                        eq->name,
-                                        &dev->mdev.priv.uuari.uars[0]);
+                                        name, &dev->mdev.priv.uuari.uars[0]);
                if (err) {
                        kfree(eq);
                        goto clean;
@@ -301,9 +301,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
        props->max_srq_sge         = max_rq_sg - 1;
        props->max_fast_reg_page_list_len = (unsigned int)-1;
        props->local_ca_ack_delay  = dev->mdev.caps.local_ca_ack_delay;
-       props->atomic_cap          = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ?
-               IB_ATOMIC_HCA : IB_ATOMIC_NONE;
-       props->masked_atomic_cap   = IB_ATOMIC_HCA;
+       props->atomic_cap          = IB_ATOMIC_NONE;
+       props->masked_atomic_cap   = IB_ATOMIC_NONE;
        props->max_pkeys           = be16_to_cpup((__be16 *)(out_mad->data + 28));
        props->max_mcast_grp       = 1 << dev->mdev.caps.log_max_mcg;
        props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;
@@ -1006,6 +1005,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
        ibev.device           = &ibdev->ib_dev;
        ibev.element.port_num = port;
 
+       if (port < 1 || port > ibdev->num_ports) {
+               mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
+               return;
+       }
+
        if (ibdev->ib_active)
                ib_dispatch_event(&ibev);
 }
index bd41df95b6f0214deb445ab228be8d157373074f..3453580b1eb2cc45800b8ee697157a24cf269970 100644 (file)
@@ -42,6 +42,10 @@ enum {
        DEF_CACHE_SIZE  = 10,
 };
 
+enum {
+       MLX5_UMR_ALIGN  = 2048
+};
+
 static __be64 *mr_align(__be64 *ptr, int align)
 {
        unsigned long mask = align - 1;
@@ -61,13 +65,11 @@ static int order2idx(struct mlx5_ib_dev *dev, int order)
 
 static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
 {
-       struct device *ddev = dev->ib_dev.dma_device;
        struct mlx5_mr_cache *cache = &dev->cache;
        struct mlx5_cache_ent *ent = &cache->ent[c];
        struct mlx5_create_mkey_mbox_in *in;
        struct mlx5_ib_mr *mr;
        int npages = 1 << ent->order;
-       int size = sizeof(u64) * npages;
        int err = 0;
        int i;
 
@@ -83,21 +85,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
                }
                mr->order = ent->order;
                mr->umred = 1;
-               mr->pas = kmalloc(size + 0x3f, GFP_KERNEL);
-               if (!mr->pas) {
-                       kfree(mr);
-                       err = -ENOMEM;
-                       goto out;
-               }
-               mr->dma = dma_map_single(ddev, mr_align(mr->pas, 0x40), size,
-                                        DMA_TO_DEVICE);
-               if (dma_mapping_error(ddev, mr->dma)) {
-                       kfree(mr->pas);
-                       kfree(mr);
-                       err = -ENOMEM;
-                       goto out;
-               }
-
                in->seg.status = 1 << 6;
                in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
                in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
@@ -108,8 +95,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
                                            sizeof(*in));
                if (err) {
                        mlx5_ib_warn(dev, "create mkey failed %d\n", err);
-                       dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
-                       kfree(mr->pas);
                        kfree(mr);
                        goto out;
                }
@@ -129,11 +114,9 @@ out:
 
 static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
 {
-       struct device *ddev = dev->ib_dev.dma_device;
        struct mlx5_mr_cache *cache = &dev->cache;
        struct mlx5_cache_ent *ent = &cache->ent[c];
        struct mlx5_ib_mr *mr;
-       int size;
        int err;
        int i;
 
@@ -149,14 +132,10 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
                ent->size--;
                spin_unlock(&ent->lock);
                err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
-               if (err) {
+               if (err)
                        mlx5_ib_warn(dev, "failed destroy mkey\n");
-               } else {
-                       size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
-                       dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
-                       kfree(mr->pas);
+               else
                        kfree(mr);
-               }
        }
 }
 
@@ -408,13 +387,12 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 
 static void clean_keys(struct mlx5_ib_dev *dev, int c)
 {
-       struct device *ddev = dev->ib_dev.dma_device;
        struct mlx5_mr_cache *cache = &dev->cache;
        struct mlx5_cache_ent *ent = &cache->ent[c];
        struct mlx5_ib_mr *mr;
-       int size;
        int err;
 
+       cancel_delayed_work(&ent->dwork);
        while (1) {
                spin_lock(&ent->lock);
                if (list_empty(&ent->head)) {
@@ -427,14 +405,10 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
                ent->size--;
                spin_unlock(&ent->lock);
                err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
-               if (err) {
+               if (err)
                        mlx5_ib_warn(dev, "failed destroy mkey\n");
-               } else {
-                       size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
-                       dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
-                       kfree(mr->pas);
+               else
                        kfree(mr);
-               }
        }
 }
 
@@ -540,13 +514,15 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
        int i;
 
        dev->cache.stopped = 1;
-       destroy_workqueue(dev->cache.wq);
+       flush_workqueue(dev->cache.wq);
 
        mlx5_mr_cache_debugfs_cleanup(dev);
 
        for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
                clean_keys(dev, i);
 
+       destroy_workqueue(dev->cache.wq);
+
        return 0;
 }
 
@@ -675,10 +651,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
                                  int page_shift, int order, int access_flags)
 {
        struct mlx5_ib_dev *dev = to_mdev(pd->device);
+       struct device *ddev = dev->ib_dev.dma_device;
        struct umr_common *umrc = &dev->umrc;
        struct ib_send_wr wr, *bad;
        struct mlx5_ib_mr *mr;
        struct ib_sge sg;
+       int size = sizeof(u64) * npages;
        int err;
        int i;
 
@@ -697,7 +675,22 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
        if (!mr)
                return ERR_PTR(-EAGAIN);
 
-       mlx5_ib_populate_pas(dev, umem, page_shift, mr_align(mr->pas, 0x40), 1);
+       mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
+       if (!mr->pas) {
+               err = -ENOMEM;
+               goto error;
+       }
+
+       mlx5_ib_populate_pas(dev, umem, page_shift,
+                            mr_align(mr->pas, MLX5_UMR_ALIGN), 1);
+
+       mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
+                                DMA_TO_DEVICE);
+       if (dma_mapping_error(ddev, mr->dma)) {
+               kfree(mr->pas);
+               err = -ENOMEM;
+               goto error;
+       }
 
        memset(&wr, 0, sizeof(wr));
        wr.wr_id = (u64)(unsigned long)mr;
@@ -718,6 +711,9 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
        wait_for_completion(&mr->done);
        up(&umrc->sem);
 
+       dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
+       kfree(mr->pas);
+
        if (mr->status != IB_WC_SUCCESS) {
                mlx5_ib_warn(dev, "reg umr failed\n");
                err = -EFAULT;
index 045f8cdbd303deba81e60c5f1f52330b536b9617..5659ea88074108e60d9253085ce89f962326b808 100644 (file)
@@ -203,7 +203,7 @@ static int sq_overhead(enum ib_qp_type qp_type)
 
        switch (qp_type) {
        case IB_QPT_XRC_INI:
-               size = sizeof(struct mlx5_wqe_xrc_seg);
+               size += sizeof(struct mlx5_wqe_xrc_seg);
                /* fall through */
        case IB_QPT_RC:
                size += sizeof(struct mlx5_wqe_ctrl_seg) +
@@ -211,20 +211,23 @@ static int sq_overhead(enum ib_qp_type qp_type)
                        sizeof(struct mlx5_wqe_raddr_seg);
                break;
 
+       case IB_QPT_XRC_TGT:
+               return 0;
+
        case IB_QPT_UC:
-               size = sizeof(struct mlx5_wqe_ctrl_seg) +
+               size += sizeof(struct mlx5_wqe_ctrl_seg) +
                        sizeof(struct mlx5_wqe_raddr_seg);
                break;
 
        case IB_QPT_UD:
        case IB_QPT_SMI:
        case IB_QPT_GSI:
-               size = sizeof(struct mlx5_wqe_ctrl_seg) +
+               size += sizeof(struct mlx5_wqe_ctrl_seg) +
                        sizeof(struct mlx5_wqe_datagram_seg);
                break;
 
        case MLX5_IB_QPT_REG_UMR:
-               size = sizeof(struct mlx5_wqe_ctrl_seg) +
+               size += sizeof(struct mlx5_wqe_ctrl_seg) +
                        sizeof(struct mlx5_wqe_umr_ctrl_seg) +
                        sizeof(struct mlx5_mkey_seg);
                break;
@@ -270,7 +273,8 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
                return wqe_size;
 
        if (wqe_size > dev->mdev.caps.max_sq_desc_sz) {
-               mlx5_ib_dbg(dev, "\n");
+               mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
+                           wqe_size, dev->mdev.caps.max_sq_desc_sz);
                return -EINVAL;
        }
 
@@ -280,9 +284,15 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
 
        wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
        qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
+       if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
+               mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
+                           qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
+               return -ENOMEM;
+       }
        qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
        qp->sq.max_gs = attr->cap.max_send_sge;
-       qp->sq.max_post = 1 << ilog2(wq_size / wqe_size);
+       qp->sq.max_post = wq_size / wqe_size;
+       attr->cap.max_send_wr = qp->sq.max_post;
 
        return wq_size;
 }
@@ -1280,6 +1290,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
                                          MLX5_QP_OPTPAR_Q_KEY,
                        [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX    |
                                           MLX5_QP_OPTPAR_Q_KEY,
+                       [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
+                                         MLX5_QP_OPTPAR_RRE            |
+                                         MLX5_QP_OPTPAR_RAE            |
+                                         MLX5_QP_OPTPAR_RWE            |
+                                         MLX5_QP_OPTPAR_PKEY_INDEX,
                },
        },
        [MLX5_QP_STATE_RTR] = {
@@ -1314,6 +1329,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
                [MLX5_QP_STATE_RTS] = {
                        [MLX5_QP_ST_UD]  = MLX5_QP_OPTPAR_Q_KEY,
                        [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
+                       [MLX5_QP_ST_UC]  = MLX5_QP_OPTPAR_RWE,
+                       [MLX5_QP_ST_RC]  = MLX5_QP_OPTPAR_RNR_TIMEOUT   |
+                                          MLX5_QP_OPTPAR_RWE           |
+                                          MLX5_QP_OPTPAR_RAE           |
+                                          MLX5_QP_OPTPAR_RRE,
                },
        },
 };
@@ -1651,29 +1671,6 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
        rseg->reserved = 0;
 }
 
-static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
-{
-       if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
-               aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
-               aseg->compare  = cpu_to_be64(wr->wr.atomic.compare_add);
-       } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
-               aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
-               aseg->compare  = cpu_to_be64(wr->wr.atomic.compare_add_mask);
-       } else {
-               aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
-               aseg->compare  = 0;
-       }
-}
-
-static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg,
-                                 struct ib_send_wr *wr)
-{
-       aseg->swap_add          = cpu_to_be64(wr->wr.atomic.swap);
-       aseg->swap_add_mask     = cpu_to_be64(wr->wr.atomic.swap_mask);
-       aseg->compare           = cpu_to_be64(wr->wr.atomic.compare_add);
-       aseg->compare_mask      = cpu_to_be64(wr->wr.atomic.compare_add_mask);
-}
-
 static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
                             struct ib_send_wr *wr)
 {
@@ -2063,28 +2060,11 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 
                        case IB_WR_ATOMIC_CMP_AND_SWP:
                        case IB_WR_ATOMIC_FETCH_AND_ADD:
-                               set_raddr_seg(seg, wr->wr.atomic.remote_addr,
-                                             wr->wr.atomic.rkey);
-                               seg  += sizeof(struct mlx5_wqe_raddr_seg);
-
-                               set_atomic_seg(seg, wr);
-                               seg  += sizeof(struct mlx5_wqe_atomic_seg);
-
-                               size += (sizeof(struct mlx5_wqe_raddr_seg) +
-                                        sizeof(struct mlx5_wqe_atomic_seg)) / 16;
-                               break;
-
                        case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
-                               set_raddr_seg(seg, wr->wr.atomic.remote_addr,
-                                             wr->wr.atomic.rkey);
-                               seg  += sizeof(struct mlx5_wqe_raddr_seg);
-
-                               set_masked_atomic_seg(seg, wr);
-                               seg  += sizeof(struct mlx5_wqe_masked_atomic_seg);
-
-                               size += (sizeof(struct mlx5_wqe_raddr_seg) +
-                                        sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16;
-                               break;
+                               mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
+                               err = -ENOSYS;
+                               *bad_wr = wr;
+                               goto out;
 
                        case IB_WR_LOCAL_INV:
                                next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
index 84d297afd6a9889642007a4b3d6b2e0e6e25be74..0aa478bc291ae39aec0ed8c243cce72fe7ed2bdd 100644 (file)
@@ -295,7 +295,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
        mlx5_vfree(in);
        if (err) {
                mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
-               goto err_srq;
+               goto err_usr_kern_srq;
        }
 
        mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
@@ -316,6 +316,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
 
 err_core:
        mlx5_core_destroy_srq(&dev->mdev, &srq->msrq);
+
+err_usr_kern_srq:
        if (pd->uobject)
                destroy_srq_user(pd, srq);
        else
index 7c9d35f39d756950b9470d50201f882ca6fba1e3..69020173899397ee5889287ee79f0c3525ab8991 100644 (file)
@@ -357,7 +357,7 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
                        mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",
                                   eqe->type, eqe->subtype, eq->eqn);
                        break;
-               };
+               }
 
                set_eqe_hw(eqe);
                ++eq->cons_index;
index 4ed8235d2d36d818360cce7e2d43010e0840ab62..50219ab2279d56ae6599e7189b06034bf12049d9 100644 (file)
@@ -150,7 +150,7 @@ enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
                return IB_QPS_SQE;
        case OCRDMA_QPS_ERR:
                return IB_QPS_ERR;
-       };
+       }
        return IB_QPS_ERR;
 }
 
@@ -171,7 +171,7 @@ static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
                return OCRDMA_QPS_SQE;
        case IB_QPS_ERR:
                return OCRDMA_QPS_ERR;
-       };
+       }
        return OCRDMA_QPS_ERR;
 }
 
@@ -1982,7 +1982,7 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
                break;
        default:
                return -EINVAL;
-       };
+       }
 
        cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
        if (!cmd)
index 56e004940f1806c9203e788985395ea4ea5bdddb..0ce7674621eaba2aa310fef332695b67f3057971 100644 (file)
@@ -531,7 +531,7 @@ static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
        case BE_DEV_DOWN:
                ocrdma_close(dev);
                break;
-       };
+       }
 }
 
 static struct ocrdma_driver ocrdma_drv = {
index 6e982bb43c3172d2cc36b844c1b97f8bb0960237..69f1d1221a6bea07039fc37b5ead33f941fc74a4 100644 (file)
@@ -141,7 +141,7 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
                /* Unsupported */
                *ib_speed = IB_SPEED_SDR;
                *ib_width = IB_WIDTH_1X;
-       };
+       }
 }
 
 
@@ -2331,7 +2331,7 @@ static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
        default:
                ibwc_status = IB_WC_GENERAL_ERR;
                break;
-       };
+       }
        return ibwc_status;
 }
 
@@ -2370,7 +2370,7 @@ static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
                pr_err("%s() invalid opcode received = 0x%x\n",
                       __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
                break;
-       };
+       }
 }
 
 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
index c0446992892533b24d3853d043cf088451db5662..e75d015024a15c7fb6eb3074299e234a69916594 100644 (file)
@@ -1734,6 +1734,7 @@ EXPORT_SYMBOL_GPL(input_class);
  */
 struct input_dev *input_allocate_device(void)
 {
+       static atomic_t input_no = ATOMIC_INIT(0);
        struct input_dev *dev;
 
        dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
@@ -1743,9 +1744,13 @@ struct input_dev *input_allocate_device(void)
                device_initialize(&dev->dev);
                mutex_init(&dev->mutex);
                spin_lock_init(&dev->event_lock);
+               init_timer(&dev->timer);
                INIT_LIST_HEAD(&dev->h_list);
                INIT_LIST_HEAD(&dev->node);
 
+               dev_set_name(&dev->dev, "input%ld",
+                            (unsigned long) atomic_inc_return(&input_no) - 1);
+
                __module_get(THIS_MODULE);
        }
 
@@ -2019,7 +2024,6 @@ static void devm_input_device_unregister(struct device *dev, void *res)
  */
 int input_register_device(struct input_dev *dev)
 {
-       static atomic_t input_no = ATOMIC_INIT(0);
        struct input_devres *devres = NULL;
        struct input_handler *handler;
        unsigned int packet_size;
@@ -2059,7 +2063,6 @@ int input_register_device(struct input_dev *dev)
         * If delay and period are pre-set by the driver, then autorepeating
         * is handled by the driver itself and we don't do it in input.c.
         */
-       init_timer(&dev->timer);
        if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) {
                dev->timer.data = (long) dev;
                dev->timer.function = input_repeat_key;
@@ -2073,9 +2076,6 @@ int input_register_device(struct input_dev *dev)
        if (!dev->setkeycode)
                dev->setkeycode = input_default_setkeycode;
 
-       dev_set_name(&dev->dev, "input%ld",
-                    (unsigned long) atomic_inc_return(&input_no) - 1);
-
        error = device_add(&dev->dev);
        if (error)
                goto err_free_vals;
index 5446ad4e4aebbb30d6d912a49bef1f6c88e432ee..186138c720c79ebfee4ef896c61aeee78c770289 100644 (file)
@@ -787,10 +787,17 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
        input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
        input_set_capability(input_dev, EV_MSC, MSC_SCAN);
 
-       if (pdata)
+       if (pdata) {
                error = pxa27x_keypad_build_keycode(keypad);
-       else
+       } else {
                error = pxa27x_keypad_build_keycode_from_dt(keypad);
+               /*
+                * Data that we get from DT resides in dynamically
+                * allocated memory so we need to update our pdata
+                * pointer.
+                */
+               pdata = keypad->pdata;
+       }
        if (error) {
                dev_err(&pdev->dev, "failed to build keycode\n");
                goto failed_put_clk;
index 082684e7f390f7d9f50201df3e5ddd325f2d0256..9365535ba7f157b98138f6f0defb14d9e1ae782a 100644 (file)
@@ -351,7 +351,9 @@ static void cm109_urb_irq_callback(struct urb *urb)
        if (status) {
                if (status == -ESHUTDOWN)
                        return;
-               dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status);
+               dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n",
+                                   __func__, status);
+               goto out;
        }
 
        /* Special keys */
@@ -418,8 +420,12 @@ static void cm109_urb_ctl_callback(struct urb *urb)
             dev->ctl_data->byte[2],
             dev->ctl_data->byte[3]);
 
-       if (status)
-               dev_err(&dev->intf->dev, "%s: urb status %d\n", __func__, status);
+       if (status) {
+               if (status == -ESHUTDOWN)
+                       return;
+               dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n",
+                                   __func__, status);
+       }
 
        spin_lock(&dev->ctl_submit_lock);
 
@@ -427,7 +433,7 @@ static void cm109_urb_ctl_callback(struct urb *urb)
 
        if (likely(!dev->shutdown)) {
 
-               if (dev->buzzer_pending) {
+               if (dev->buzzer_pending || status) {
                        dev->buzzer_pending = 0;
                        dev->ctl_urb_pending = 1;
                        cm109_submit_buzz_toggle(dev);
index 78e4de42efaacec53c29f740fafe490dead2af84..52c9ebf94729ff5bf6531cc7773902ece632d03b 100644 (file)
@@ -223,21 +223,26 @@ static int i8042_flush(void)
 {
        unsigned long flags;
        unsigned char data, str;
-       int i = 0;
+       int count = 0;
+       int retval = 0;
 
        spin_lock_irqsave(&i8042_lock, flags);
 
-       while (((str = i8042_read_status()) & I8042_STR_OBF) && (i < I8042_BUFFER_SIZE)) {
-               udelay(50);
-               data = i8042_read_data();
-               i++;
-               dbg("%02x <- i8042 (flush, %s)\n",
-                   data, str & I8042_STR_AUXDATA ? "aux" : "kbd");
+       while ((str = i8042_read_status()) & I8042_STR_OBF) {
+               if (count++ < I8042_BUFFER_SIZE) {
+                       udelay(50);
+                       data = i8042_read_data();
+                       dbg("%02x <- i8042 (flush, %s)\n",
+                           data, str & I8042_STR_AUXDATA ? "aux" : "kbd");
+               } else {
+                       retval = -EIO;
+                       break;
+               }
        }
 
        spin_unlock_irqrestore(&i8042_lock, flags);
 
-       return i;
+       return retval;
 }
 
 /*
@@ -849,7 +854,7 @@ static int __init i8042_check_aux(void)
 
 static int i8042_controller_check(void)
 {
-       if (i8042_flush() == I8042_BUFFER_SIZE) {
+       if (i8042_flush()) {
                pr_err("No controller found\n");
                return -ENODEV;
        }
index 8a90da11365f78d4193550410398d6fa07a390aa..867e7c33ac55072b19f2ff339705c4e90f6b428d 100644 (file)
@@ -1038,6 +1038,7 @@ static void wacom_destroy_leds(struct wacom *wacom)
 }
 
 static enum power_supply_property wacom_battery_props[] = {
+       POWER_SUPPLY_PROP_SCOPE,
        POWER_SUPPLY_PROP_CAPACITY
 };
 
@@ -1049,6 +1050,9 @@ static int wacom_battery_get_property(struct power_supply *psy,
        int ret = 0;
 
        switch (psp) {
+               case POWER_SUPPLY_PROP_SCOPE:
+                       val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+                       break;
                case POWER_SUPPLY_PROP_CAPACITY:
                        val->intval =
                                wacom->wacom_wac.battery_capacity * 100 / 31;
index 9c8eded2e504956a557403cc61b7be595e7a901b..782c2535f1d81a26db96716ac406405b2b03a78b 100644 (file)
@@ -2126,6 +2126,12 @@ static const struct wacom_features wacom_features_0x101 =
 static const struct wacom_features wacom_features_0x10D =
        { "Wacom ISDv4 10D",      WACOM_PKGLEN_MTTPC,     26202, 16325,  255,
          0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x10E =
+       { "Wacom ISDv4 10E",      WACOM_PKGLEN_MTTPC,     27760, 15694,  255,
+         0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x10F =
+       { "Wacom ISDv4 10F",      WACOM_PKGLEN_MTTPC,     27760, 15694,  255,
+         0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
 static const struct wacom_features wacom_features_0x4001 =
        { "Wacom ISDv4 4001",      WACOM_PKGLEN_MTTPC,     26202, 16325,  255,
          0, MTTPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2327,6 +2333,8 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x100) },
        { USB_DEVICE_WACOM(0x101) },
        { USB_DEVICE_WACOM(0x10D) },
+       { USB_DEVICE_WACOM(0x10E) },
+       { USB_DEVICE_WACOM(0x10F) },
        { USB_DEVICE_WACOM(0x300) },
        { USB_DEVICE_WACOM(0x301) },
        { USB_DEVICE_WACOM(0x304) },
index fe302e33f72e7b2da024180e697422461b017bc7..c880ebaf155372eaa7460491258502c9c2d35dcd 100644 (file)
@@ -52,7 +52,7 @@ config AMD_IOMMU
        select PCI_PRI
        select PCI_PASID
        select IOMMU_API
-       depends on X86_64 && PCI && ACPI && X86_IO_APIC
+       depends on X86_64 && PCI && ACPI
        ---help---
          With this option you can enable support for AMD IOMMU hardware in
          your system. An IOMMU is a hardware component which provides
index d0e948084eaf7e2211c323f5976ecc08e5681136..9031171c141b52c5e9175fdbf6eec9bd0c4224b3 100644 (file)
@@ -253,10 +253,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
        if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
                return -EINVAL;
 
+       raw_spin_lock(&irq_controller_lock);
        mask = 0xff << shift;
        bit = gic_cpu_map[cpu] << shift;
-
-       raw_spin_lock(&irq_controller_lock);
        val = readl_relaxed(reg) & ~mask;
        writel_relaxed(val | bit, reg);
        raw_spin_unlock(&irq_controller_lock);
@@ -652,7 +651,9 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
 void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 {
        int cpu;
-       unsigned long map = 0;
+       unsigned long flags, map = 0;
+
+       raw_spin_lock_irqsave(&irq_controller_lock, flags);
 
        /* Convert our logical CPU mask into a physical one. */
        for_each_cpu(cpu, mask)
@@ -666,7 +667,149 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
 
        /* this always happens on GIC0 */
        writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+
+       raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
+}
+#endif
+
+#ifdef CONFIG_BL_SWITCHER
+/*
+ * gic_send_sgi - send a SGI directly to given CPU interface number
+ *
+ * cpu_id: the ID for the destination CPU interface
+ * irq: the IPI number to send a SGI for
+ */
+void gic_send_sgi(unsigned int cpu_id, unsigned int irq)
+{
+       BUG_ON(cpu_id >= NR_GIC_CPU_IF);
+       cpu_id = 1 << cpu_id;
+       /* this always happens on GIC0 */
+       writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+}
+
+/*
+ * gic_get_cpu_id - get the CPU interface ID for the specified CPU
+ *
+ * @cpu: the logical CPU number to get the GIC ID for.
+ *
+ * Return the CPU interface ID for the given logical CPU number,
+ * or -1 if the CPU number is too large or the interface ID is
+ * unknown (more than one bit set).
+ */
+int gic_get_cpu_id(unsigned int cpu)
+{
+       unsigned int cpu_bit;
+
+       if (cpu >= NR_GIC_CPU_IF)
+               return -1;
+       cpu_bit = gic_cpu_map[cpu];
+       if (cpu_bit & (cpu_bit - 1))
+               return -1;
+       return __ffs(cpu_bit);
 }
+
+/*
+ * gic_migrate_target - migrate IRQs to another CPU interface
+ *
+ * @new_cpu_id: the CPU target ID to migrate IRQs to
+ *
+ * Migrate all peripheral interrupts with a target matching the current CPU
+ * to the interface corresponding to @new_cpu_id.  The CPU interface mapping
+ * is also updated.  Targets to other CPU interfaces are unchanged.
+ * This must be called with IRQs locally disabled.
+ */
+void gic_migrate_target(unsigned int new_cpu_id)
+{
+       unsigned int cur_cpu_id, gic_irqs, gic_nr = 0;
+       void __iomem *dist_base;
+       int i, ror_val, cpu = smp_processor_id();
+       u32 val, cur_target_mask, active_mask;
+
+       if (gic_nr >= MAX_GIC_NR)
+               BUG();
+
+       dist_base = gic_data_dist_base(&gic_data[gic_nr]);
+       if (!dist_base)
+               return;
+       gic_irqs = gic_data[gic_nr].gic_irqs;
+
+       cur_cpu_id = __ffs(gic_cpu_map[cpu]);
+       cur_target_mask = 0x01010101 << cur_cpu_id;
+       ror_val = (cur_cpu_id - new_cpu_id) & 31;
+
+       raw_spin_lock(&irq_controller_lock);
+
+       /* Update the target interface for this logical CPU */
+       gic_cpu_map[cpu] = 1 << new_cpu_id;
+
+       /*
+        * Find all the peripheral interrupts targetting the current
+        * CPU interface and migrate them to the new CPU interface.
+        * We skip DIST_TARGET 0 to 7 as they are read-only.
+        */
+       for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) {
+               val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
+               active_mask = val & cur_target_mask;
+               if (active_mask) {
+                       val &= ~active_mask;
+                       val |= ror32(active_mask, ror_val);
+                       writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4);
+               }
+       }
+
+       raw_spin_unlock(&irq_controller_lock);
+
+       /*
+        * Now let's migrate and clear any potential SGIs that might be
+        * pending for us (cur_cpu_id).  Since GIC_DIST_SGI_PENDING_SET
+        * is a banked register, we can only forward the SGI using
+        * GIC_DIST_SOFTINT.  The original SGI source is lost but Linux
+        * doesn't use that information anyway.
+        *
+        * For the same reason we do not adjust SGI source information
+        * for previously sent SGIs by us to other CPUs either.
+        */
+       for (i = 0; i < 16; i += 4) {
+               int j;
+               val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i);
+               if (!val)
+                       continue;
+               writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i);
+               for (j = i; j < i + 4; j++) {
+                       if (val & 0xff)
+                               writel_relaxed((1 << (new_cpu_id + 16)) | j,
+                                               dist_base + GIC_DIST_SOFTINT);
+                       val >>= 8;
+               }
+       }
+}
+
+/*
+ * gic_get_sgir_physaddr - get the physical address for the SGI register
+ *
+ * REturn the physical address of the SGI register to be used
+ * by some early assembly code when the kernel is not yet available.
+ */
+static unsigned long gic_dist_physaddr;
+
+unsigned long gic_get_sgir_physaddr(void)
+{
+       if (!gic_dist_physaddr)
+               return 0;
+       return gic_dist_physaddr + GIC_DIST_SOFTINT;
+}
+
+void __init gic_init_physaddr(struct device_node *node)
+{
+       struct resource res;
+       if (of_address_to_resource(node, 0, &res) == 0) {
+               gic_dist_physaddr = res.start;
+               pr_info("GIC physical location is %#lx\n", gic_dist_physaddr);
+       }
+}
+
+#else
+#define gic_init_physaddr(node)  do { } while (0)
 #endif
 
 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
@@ -850,6 +993,8 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
                percpu_offset = 0;
 
        gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
+       if (!gic_cnt)
+               gic_init_physaddr(node);
 
        if (parent) {
                irq = irq_of_parse_and_map(node, 0);
index 52377b4bf039e0cbe19249ee3361acbd6ce85014..a2e0ed6c9a4d367db910d9493974bcd116a369e1 100644 (file)
@@ -481,7 +481,7 @@ void __inline__ outpp(void __iomem *addr, word p)
 int diva_os_register_irq(void *context, byte irq, const char *name)
 {
        int result = request_irq(irq, diva_os_irq_wrapper,
-                                IRQF_DISABLED | IRQF_SHARED, name, context);
+                                IRQF_SHARED, name, context);
        return (result);
 }
 
index 7cab5c3276c2e16f9e91fb97a635e7ccdf6640b5..e1519718ce67e3ed4363078eb3e8207b6b9f1db6 100644 (file)
@@ -288,9 +288,9 @@ int divas_um_idi_delete_entity(int adapter_nr, void *entity)
        cleanup_entity(e);
        diva_os_free(0, e->os_context);
        memset(e, 0x00, sizeof(*e));
-       diva_os_free(0, e);
 
        DBG_LOG(("A(%d) remove E:%08x", adapter_nr, e));
+       diva_os_free(0, e);
 
        return (0);
 }
index ca997bd4e818c6c1606ddea03d53d8d5eb51cdd4..92acc81f844d161a7e47d9477c2e49e941929d58 100644 (file)
@@ -336,7 +336,7 @@ static int __init sc_init(void)
                 */
                sc_adapter[cinst]->interrupt = irq[b];
                if (request_irq(sc_adapter[cinst]->interrupt, interrupt_handler,
-                               IRQF_DISABLED, interface->id,
+                               0, interface->id,
                                (void *)(unsigned long) cinst))
                {
                        kfree(sc_adapter[cinst]->channel);
index 71eb233b9ace7a23f143170e8f4cc924a2a1de9a..2a7f0dd6abab4d6bb21ba028b5b3a25721080a2c 100644 (file)
@@ -996,10 +996,11 @@ static void request_write(struct cached_dev *dc, struct search *s)
                closure_bio_submit(bio, cl, s->d);
        } else {
                bch_writeback_add(dc);
+               s->op.cache_bio = bio;
 
                if (bio->bi_rw & REQ_FLUSH) {
                        /* Also need to send a flush to the backing device */
-                       struct bio *flush = bio_alloc_bioset(0, GFP_NOIO,
+                       struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
                                                             dc->disk.bio_split);
 
                        flush->bi_rw    = WRITE_FLUSH;
@@ -1008,8 +1009,6 @@ static void request_write(struct cached_dev *dc, struct search *s)
                        flush->bi_private = cl;
 
                        closure_bio_submit(flush, cl, s->d);
-               } else {
-                       s->op.cache_bio = bio;
                }
        }
 out:
index 4caa8e6d59d7968584e23a9187bc07f8c81df321..2d2b1b7588d7e476b7fc814a63c82bad2edb1ed6 100644 (file)
@@ -269,6 +269,14 @@ static chunk_t area_location(struct pstore *ps, chunk_t area)
        return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
 }
 
+static void skip_metadata(struct pstore *ps)
+{
+       uint32_t stride = ps->exceptions_per_area + 1;
+       chunk_t next_free = ps->next_free;
+       if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
+               ps->next_free++;
+}
+
 /*
  * Read or write a metadata area.  Remembering to skip the first
  * chunk which holds the header.
@@ -502,6 +510,8 @@ static int read_exceptions(struct pstore *ps,
 
        ps->current_area--;
 
+       skip_metadata(ps);
+
        return 0;
 }
 
@@ -616,8 +626,6 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
                                        struct dm_exception *e)
 {
        struct pstore *ps = get_info(store);
-       uint32_t stride;
-       chunk_t next_free;
        sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
 
        /* Is there enough room ? */
@@ -630,10 +638,8 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
         * Move onto the next free pending, making sure to take
         * into account the location of the metadata chunks.
         */
-       stride = (ps->exceptions_per_area + 1);
-       next_free = ++ps->next_free;
-       if (sector_div(next_free, stride) == 1)
-               ps->next_free++;
+       ps->next_free++;
+       skip_metadata(ps);
 
        atomic_inc(&ps->pending_count);
        return 0;
index adf4d7e1d5e15233a67e7108dc24b8a7bb6efeb1..561a65f82e26af05c8210f94b49367b6fa51e55b 100644 (file)
@@ -8111,6 +8111,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
        u64 *p;
        int lo, hi;
        int rv = 1;
+       unsigned long flags;
 
        if (bb->shift < 0)
                /* badblocks are disabled */
@@ -8125,7 +8126,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
                sectors = next - s;
        }
 
-       write_seqlock_irq(&bb->lock);
+       write_seqlock_irqsave(&bb->lock, flags);
 
        p = bb->page;
        lo = 0;
@@ -8241,7 +8242,7 @@ static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors,
        bb->changed = 1;
        if (!acknowledged)
                bb->unacked_exist = 1;
-       write_sequnlock_irq(&bb->lock);
+       write_sequnlock_irqrestore(&bb->lock, flags);
 
        return rv;
 }
index d60412c7f9952a350e85002f6578bd66ee416a44..aacf6bf352d87978a15633b05a0d51d3579ce16d 100644 (file)
@@ -1479,6 +1479,7 @@ static int raid1_spare_active(struct mddev *mddev)
                        }
                }
                if (rdev
+                   && rdev->recovery_offset == MaxSector
                    && !test_bit(Faulty, &rdev->flags)
                    && !test_and_set_bit(In_sync, &rdev->flags)) {
                        count++;
index df7b0a06b0ea2d820aec6ad5a752a13bb64e13e8..73dc8a377522e19af92c9bed26519a479f36dbea 100644 (file)
@@ -1782,6 +1782,7 @@ static int raid10_spare_active(struct mddev *mddev)
                        }
                        sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
                } else if (tmp->rdev
+                          && tmp->rdev->recovery_offset == MaxSector
                           && !test_bit(Faulty, &tmp->rdev->flags)
                           && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
                        count++;
index 7ff4f252ca1a42943252a9e19b975da5b2f8ce9b..f8b9068439267a3539d671e6e59ceb0b47b90547 100644 (file)
@@ -778,6 +778,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
                        bi->bi_io_vec[0].bv_offset = 0;
                        bi->bi_size = STRIPE_SIZE;
+                       /*
+                        * If this is discard request, set bi_vcnt 0. We don't
+                        * want to confuse SCSI because SCSI will replace payload
+                        */
+                       if (rw & REQ_DISCARD)
+                               bi->bi_vcnt = 0;
                        if (rrdev)
                                set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
 
@@ -816,6 +822,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
                        rbi->bi_io_vec[0].bv_offset = 0;
                        rbi->bi_size = STRIPE_SIZE;
+                       /*
+                        * If this is discard request, set bi_vcnt 0. We don't
+                        * want to confuse SCSI because SCSI will replace payload
+                        */
+                       if (rw & REQ_DISCARD)
+                               rbi->bi_vcnt = 0;
                        if (conf->mddev->gendisk)
                                trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
                                                      rbi, disk_devt(conf->mddev->gendisk),
@@ -2910,6 +2922,14 @@ static void handle_stripe_clean_event(struct r5conf *conf,
                }
                /* now that discard is done we can proceed with any sync */
                clear_bit(STRIPE_DISCARD, &sh->state);
+               /*
+                * SCSI discard will change some bio fields and the stripe has
+                * no updated data, so remove it from hash list and the stripe
+                * will be reinitialized
+                */
+               spin_lock_irq(&conf->device_lock);
+               remove_hash(sh);
+               spin_unlock_irq(&conf->device_lock);
                if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
                        set_bit(STRIPE_HANDLE, &sh->state);
 
index 2521f7e23018f70a33a9ad43e1bb26e79e037ab8..e79749cfec814b75f0ac84d2f5ad881399a79a75 100644 (file)
@@ -912,14 +912,8 @@ static int tda10071_init(struct dvb_frontend *fe)
                { 0xd5, 0x03, 0x03 },
        };
 
-       /* firmware status */
-       ret = tda10071_rd_reg(priv, 0x51, &tmp);
-       if (ret)
-               goto error;
-
-       if (!tmp) {
+       if (priv->warm) {
                /* warm state - wake up device from sleep */
-               priv->warm = 1;
 
                for (i = 0; i < ARRAY_SIZE(tab); i++) {
                        ret = tda10071_wr_reg_mask(priv, tab[i].reg,
@@ -937,7 +931,6 @@ static int tda10071_init(struct dvb_frontend *fe)
                        goto error;
        } else {
                /* cold state - try to download firmware */
-               priv->warm = 0;
 
                /* request the firmware, this will block and timeout */
                ret = request_firmware(&fw, fw_file, priv->i2c->dev.parent);
index bb0c99d7a4f169d25c642b5143efcf36c087c397..b06a7e54ee0d25100f1f01abcac7ddc70e8489df 100644 (file)
@@ -628,16 +628,13 @@ static int ad9389b_s_stream(struct v4l2_subdev *sd, int enable)
 
 static const struct v4l2_dv_timings_cap ad9389b_timings_cap = {
        .type = V4L2_DV_BT_656_1120,
-       .bt = {
-               .max_width = 1920,
-               .max_height = 1200,
-               .min_pixelclock = 25000000,
-               .max_pixelclock = 170000000,
-               .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+       /* keep this initialization for compatibility with GCC < 4.4.6 */
+       .reserved = { 0 },
+       V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
+               V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
                        V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
-               .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
-                       V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM,
-       },
+               V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+               V4L2_DV_BT_CAP_CUSTOM)
 };
 
 static int ad9389b_s_dv_timings(struct v4l2_subdev *sd,
index 7a576097471f352939cc703d6fbac368cf02f532..7c8d971f1f613206b1821bfa023ea5326ba48028 100644 (file)
@@ -119,16 +119,14 @@ static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq);
 
 static const struct v4l2_dv_timings_cap adv7511_timings_cap = {
        .type = V4L2_DV_BT_656_1120,
-       .bt = {
-               .max_width = ADV7511_MAX_WIDTH,
-               .max_height = ADV7511_MAX_HEIGHT,
-               .min_pixelclock = ADV7511_MIN_PIXELCLOCK,
-               .max_pixelclock = ADV7511_MAX_PIXELCLOCK,
-               .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+       /* keep this initialization for compatibility with GCC < 4.4.6 */
+       .reserved = { 0 },
+       V4L2_INIT_BT_TIMINGS(0, ADV7511_MAX_WIDTH, 0, ADV7511_MAX_HEIGHT,
+               ADV7511_MIN_PIXELCLOCK, ADV7511_MAX_PIXELCLOCK,
+               V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
                        V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
-               .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
-                       V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM,
-       },
+               V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+                       V4L2_DV_BT_CAP_CUSTOM)
 };
 
 static inline struct adv7511_state *get_adv7511_state(struct v4l2_subdev *sd)
@@ -1126,6 +1124,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
        state->i2c_edid = i2c_new_dummy(client->adapter, state->i2c_edid_addr >> 1);
        if (state->i2c_edid == NULL) {
                v4l2_err(sd, "failed to register edid i2c client\n");
+               err = -ENOMEM;
                goto err_entity;
        }
 
@@ -1133,6 +1132,7 @@ static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *
        state->work_queue = create_singlethread_workqueue(sd->name);
        if (state->work_queue == NULL) {
                v4l2_err(sd, "could not create workqueue\n");
+               err = -ENOMEM;
                goto err_unreg_cec;
        }
 
index d1748901337cd08be083e9a6a7d35958f6aeca7b..22f729d66a9696522e18d42932ae383594cbca5f 100644 (file)
@@ -546,30 +546,24 @@ static inline bool is_digital_input(struct v4l2_subdev *sd)
 
 static const struct v4l2_dv_timings_cap adv7842_timings_cap_analog = {
        .type = V4L2_DV_BT_656_1120,
-       .bt = {
-               .max_width = 1920,
-               .max_height = 1200,
-               .min_pixelclock = 25000000,
-               .max_pixelclock = 170000000,
-               .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+       /* keep this initialization for compatibility with GCC < 4.4.6 */
+       .reserved = { 0 },
+       V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
+               V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
                        V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
-               .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
-                       V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM,
-       },
+               V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+                       V4L2_DV_BT_CAP_CUSTOM)
 };
 
 static const struct v4l2_dv_timings_cap adv7842_timings_cap_digital = {
        .type = V4L2_DV_BT_656_1120,
-       .bt = {
-               .max_width = 1920,
-               .max_height = 1200,
-               .min_pixelclock = 25000000,
-               .max_pixelclock = 225000000,
-               .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+       /* keep this initialization for compatibility with GCC < 4.4.6 */
+       .reserved = { 0 },
+       V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000,
+               V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
                        V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
-               .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
-                       V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM,
-       },
+               V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
+                       V4L2_DV_BT_CAP_CUSTOM)
 };
 
 static inline const struct v4l2_dv_timings_cap *
index b76ec0e7e685ee8b1fa4d3fd99180a96b595e8d0..1083890ac5a9fa198195ad931f8943f1b67fa340 100644 (file)
@@ -1581,7 +1581,7 @@ static int s5c73m3_probe(struct i2c_client *client,
        oif_sd = &state->oif_sd;
 
        v4l2_subdev_init(sd, &s5c73m3_subdev_ops);
-       sd->owner = client->driver->driver.owner;
+       sd->owner = client->dev.driver->owner;
        v4l2_set_subdevdata(sd, state);
        strlcpy(sd->name, "S5C73M3", sizeof(sd->name));
 
index a58a8f663ffbb3a028540fc8763ce5521e55e828..d9f65d7e3e58b64dea5b930d5800e20921fd8778 100644 (file)
@@ -46,14 +46,10 @@ struct ths8200_state {
 
 static const struct v4l2_dv_timings_cap ths8200_timings_cap = {
        .type = V4L2_DV_BT_656_1120,
-       .bt = {
-               .max_width = 1920,
-               .max_height = 1080,
-               .min_pixelclock = 25000000,
-               .max_pixelclock = 148500000,
-               .standards = V4L2_DV_BT_STD_CEA861,
-               .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE,
-       },
+       /* keep this initialization for compatibility with GCC < 4.4.6 */
+       .reserved = { 0 },
+       V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1080, 25000000, 148500000,
+               V4L2_DV_BT_STD_CEA861, V4L2_DV_BT_CAP_PROGRESSIVE)
 };
 
 static inline struct ths8200_state *to_state(struct v4l2_subdev *sd)
index e12bbd8c3f0b8da63f8418e6a00bc803f883963a..fb60da85bc2c039e3ab7a6db22fa1182c4e977bb 100644 (file)
@@ -1455,6 +1455,7 @@ static int video_release(struct file *file)
 
        /* stop video capture */
        if (res_check(fh, RESOURCE_VIDEO)) {
+               pm_qos_remove_request(&dev->qos_request);
                videobuf_streamoff(&fh->cap);
                res_free(dev,fh,RESOURCE_VIDEO);
        }
index a8351127831726bea95214d5e55d7f75535ac6d2..7a4ee4c0449deea95dc86e82a85af8412a1d5aad 100644 (file)
@@ -411,8 +411,8 @@ static int fimc_md_of_add_sensor(struct fimc_md *fmd,
 
        device_lock(&client->dev);
 
-       if (!client->driver ||
-           !try_module_get(client->driver->driver.owner)) {
+       if (!client->dev.driver ||
+           !try_module_get(client->dev.driver->owner)) {
                ret = -EPROBE_DEFER;
                v4l2_info(&fmd->v4l2_dev, "No driver found for %s\n",
                                                node->full_name);
@@ -442,7 +442,7 @@ static int fimc_md_of_add_sensor(struct fimc_md *fmd,
        fmd->num_sensors++;
 
 mod_put:
-       module_put(client->driver->driver.owner);
+       module_put(client->dev.driver->owner);
 dev_put:
        device_unlock(&client->dev);
        put_device(&client->dev);
index 5184887b155c7098415b1a3470619be336023f87..32fab30a910590ba290ef987aaad8ed1df78e74f 100644 (file)
@@ -1221,16 +1221,16 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
 {
        struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
        struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
-       struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0);
+       struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
        struct mcam_dma_desc *desc = mvb->dma_desc;
        struct scatterlist *sg;
        int i;
 
-       mvb->dma_desc_nent = dma_map_sg(cam->dev, sgd->sglist, sgd->num_pages,
-                       DMA_FROM_DEVICE);
+       mvb->dma_desc_nent = dma_map_sg(cam->dev, sg_table->sgl,
+                       sg_table->nents, DMA_FROM_DEVICE);
        if (mvb->dma_desc_nent <= 0)
                return -EIO;  /* Not sure what's right here */
-       for_each_sg(sgd->sglist, sg, mvb->dma_desc_nent, i) {
+       for_each_sg(sg_table->sgl, sg, mvb->dma_desc_nent, i) {
                desc->dma_addr = sg_dma_address(sg);
                desc->segment_len = sg_dma_len(sg);
                desc++;
@@ -1241,9 +1241,11 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
 static int mcam_vb_sg_buf_finish(struct vb2_buffer *vb)
 {
        struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
-       struct vb2_dma_sg_desc *sgd = vb2_dma_sg_plane_desc(vb, 0);
+       struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
 
-       dma_unmap_sg(cam->dev, sgd->sglist, sgd->num_pages, DMA_FROM_DEVICE);
+       if (sg_table)
+               dma_unmap_sg(cam->dev, sg_table->sgl,
+                               sg_table->nents, DMA_FROM_DEVICE);
        return 0;
 }
 
index df3a0ec7fd2c68f83bf2d287efd7a75378766980..1c3608039663e281d23541f17aedcbce044c87fb 100644 (file)
@@ -2182,9 +2182,9 @@ static int isp_probe(struct platform_device *pdev)
        isp->pdata = pdata;
        isp->ref_count = 0;
 
-       isp->raw_dmamask = DMA_BIT_MASK(32);
-       isp->dev->dma_mask = &isp->raw_dmamask;
-       isp->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(isp->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        platform_set_drvdata(pdev, isp);
 
index cd3eff45ae7d50d0a8b28f4cf5c95dab80abb913..ce65d3ae1aa7b8070f5161bad3caf9d4581b1cb9 100644 (file)
@@ -152,7 +152,6 @@ struct isp_xclk {
  * @mmio_base_phys: Array with physical L4 bus addresses for ISP register
  *                  regions.
  * @mmio_size: Array with ISP register regions size in bytes.
- * @raw_dmamask: Raw DMA mask
  * @stat_lock: Spinlock for handling statistics
  * @isp_mutex: Mutex for serializing requests to ISP.
  * @crashed: Bitmask of crashed entities (indexed by entity ID)
@@ -190,8 +189,6 @@ struct isp_device {
        unsigned long mmio_base_phys[OMAP3_ISP_IOMEM_LAST];
        resource_size_t mmio_size[OMAP3_ISP_IOMEM_LAST];
 
-       u64 raw_dmamask;
-
        /* ISP Obj */
        spinlock_t stat_lock;   /* common lock for statistic drivers */
        struct mutex isp_mutex; /* For handling ref_count field */
index 15d23968d1de8392a61deb4a4bcd2b0489ce689d..9b88a46010072fbe3b4aeefa46bbc8ee0f04ea7e 100644 (file)
@@ -1423,6 +1423,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
        jpeg->vfd_decoder->release      = video_device_release;
        jpeg->vfd_decoder->lock         = &jpeg->lock;
        jpeg->vfd_decoder->v4l2_dev     = &jpeg->v4l2_dev;
+       jpeg->vfd_decoder->vfl_dir      = VFL_DIR_M2M;
 
        ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_GRABBER, -1);
        if (ret) {
index 7a9c5e9329f2655327c2c3d6a22e660a11728d38..4f30341dc2ab239ed4b736292c1b7ccb6d208b5e 100644 (file)
@@ -776,7 +776,7 @@ static int sh_vou_try_fmt_vid_out(struct file *file, void *priv,
        v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 1,
                              &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0);
 
-       for (i = 0; ARRAY_SIZE(vou_fmt); i++)
+       for (i = 0; i < ARRAY_SIZE(vou_fmt); i++)
                if (vou_fmt[i].pfmt == pix->pixelformat)
                        return 0;
 
index 8f9f6211c52e1e052f0ce4db84b3bcd951260d72..f975b70086922c866dc415f9eca4506aef3c19ea 100644 (file)
@@ -266,7 +266,6 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
        struct idmac_channel *ichan = mx3_cam->idmac_channel[0];
        struct idmac_video_param *video = &ichan->params.video;
        const struct soc_mbus_pixelfmt *host_fmt = icd->current_fmt->host_fmt;
-       unsigned long flags;
        dma_cookie_t cookie;
        size_t new_size;
 
@@ -328,7 +327,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
                memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0));
 #endif
 
-       spin_lock_irqsave(&mx3_cam->lock, flags);
+       spin_lock_irq(&mx3_cam->lock);
        list_add_tail(&buf->queue, &mx3_cam->capture);
 
        if (!mx3_cam->active)
@@ -351,7 +350,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
        if (mx3_cam->active == buf)
                mx3_cam->active = NULL;
 
-       spin_unlock_irqrestore(&mx3_cam->lock, flags);
+       spin_unlock_irq(&mx3_cam->lock);
 error:
        vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
 }
index ad9309da4a9102779f67cbd999aee72db465c51d..6c96e4898777f29f5c9ae4457d488e9424b32f2b 100644 (file)
@@ -19,6 +19,7 @@
  */
 
 #include "e4000_priv.h"
+#include <linux/math64.h>
 
 /* write multiple registers */
 static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len)
@@ -233,7 +234,7 @@ static int e4000_set_params(struct dvb_frontend *fe)
         * or more.
         */
        f_vco = c->frequency * e4000_pll_lut[i].mul;
-       sigma_delta = 0x10000UL * (f_vco % priv->cfg->clock) / priv->cfg->clock;
+       sigma_delta = div_u64(0x10000ULL * (f_vco % priv->cfg->clock), priv->cfg->clock);
        buf[0] = f_vco / priv->cfg->clock;
        buf[1] = (sigma_delta >> 0) & 0xff;
        buf[2] = (sigma_delta >> 8) & 0xff;
index 38714df31ac49878d13d64a9e626f26f1813041b..2e15c80d6e3d11b953d210b553cbc6878110427f 100644 (file)
@@ -783,7 +783,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
        struct sd *sd = (struct sd *) gspca_dev;
 
        /* create the JPEG header */
-       jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+       jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+                       gspca_dev->pixfmt.width,
                        0x22);          /* JPEG 411 */
        jpeg_set_qual(sd->jpeg_hdr, QUALITY);
 
index 064b53043b153359d70b2b6f3abdbc31a9131d6f..f23df4a9d8c56e460840082bfd5bb9ebcafce511 100644 (file)
@@ -1553,9 +1553,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
                sd->params.format.videoSize = VIDEOSIZE_CIF;
 
        sd->params.roi.colEnd = sd->params.roi.colStart +
-                               (gspca_dev->width >> 3);
+                               (gspca_dev->pixfmt.width >> 3);
        sd->params.roi.rowEnd = sd->params.roi.rowStart +
-                               (gspca_dev->height >> 2);
+                               (gspca_dev->pixfmt.height >> 2);
 
        /* And now set the camera to a known state */
        ret = do_command(gspca_dev, CPIA_COMMAND_SetGrabMode,
index 048507b27bb2388573ea8d76077e32a5eb1c708e..f3a7ace0fac9cc8bd4e2b2c8b82038dc85aa7068 100644 (file)
@@ -504,8 +504,7 @@ static int frame_alloc(struct gspca_dev *gspca_dev, struct file *file,
        unsigned int frsz;
        int i;
 
-       i = gspca_dev->curr_mode;
-       frsz = gspca_dev->cam.cam_mode[i].sizeimage;
+       frsz = gspca_dev->pixfmt.sizeimage;
        PDEBUG(D_STREAM, "frame alloc frsz: %d", frsz);
        frsz = PAGE_ALIGN(frsz);
        if (count >= GSPCA_MAX_FRAMES)
@@ -627,16 +626,14 @@ static struct usb_host_endpoint *alt_xfer(struct usb_host_interface *alt,
 static u32 which_bandwidth(struct gspca_dev *gspca_dev)
 {
        u32 bandwidth;
-       int i;
 
        /* get the (max) image size */
-       i = gspca_dev->curr_mode;
-       bandwidth = gspca_dev->cam.cam_mode[i].sizeimage;
+       bandwidth = gspca_dev->pixfmt.sizeimage;
 
        /* if the image is compressed, estimate its mean size */
        if (!gspca_dev->cam.needs_full_bandwidth &&
-           bandwidth < gspca_dev->cam.cam_mode[i].width *
-                               gspca_dev->cam.cam_mode[i].height)
+           bandwidth < gspca_dev->pixfmt.width *
+                               gspca_dev->pixfmt.height)
                bandwidth = bandwidth * 3 / 8;  /* 0.375 */
 
        /* estimate the frame rate */
@@ -650,7 +647,7 @@ static u32 which_bandwidth(struct gspca_dev *gspca_dev)
 
                /* don't hope more than 15 fps with USB 1.1 and
                 * image resolution >= 640x480 */
-               if (gspca_dev->width >= 640
+               if (gspca_dev->pixfmt.width >= 640
                 && gspca_dev->dev->speed == USB_SPEED_FULL)
                        bandwidth *= 15;                /* 15 fps */
                else
@@ -982,9 +979,7 @@ static void gspca_set_default_mode(struct gspca_dev *gspca_dev)
 
        i = gspca_dev->cam.nmodes - 1;  /* take the highest mode */
        gspca_dev->curr_mode = i;
-       gspca_dev->width = gspca_dev->cam.cam_mode[i].width;
-       gspca_dev->height = gspca_dev->cam.cam_mode[i].height;
-       gspca_dev->pixfmt = gspca_dev->cam.cam_mode[i].pixelformat;
+       gspca_dev->pixfmt = gspca_dev->cam.cam_mode[i];
 
        /* does nothing if ctrl_handler == NULL */
        v4l2_ctrl_handler_setup(gspca_dev->vdev.ctrl_handler);
@@ -1105,10 +1100,8 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
                            struct v4l2_format *fmt)
 {
        struct gspca_dev *gspca_dev = video_drvdata(file);
-       int mode;
 
-       mode = gspca_dev->curr_mode;
-       fmt->fmt.pix = gspca_dev->cam.cam_mode[mode];
+       fmt->fmt.pix = gspca_dev->pixfmt;
        /* some drivers use priv internally, zero it before giving it to
           userspace */
        fmt->fmt.pix.priv = 0;
@@ -1140,6 +1133,12 @@ static int try_fmt_vid_cap(struct gspca_dev *gspca_dev,
                        mode = mode2;
        }
        fmt->fmt.pix = gspca_dev->cam.cam_mode[mode];
+       if (gspca_dev->sd_desc->try_fmt) {
+               /* pass original resolution to subdriver try_fmt */
+               fmt->fmt.pix.width = w;
+               fmt->fmt.pix.height = h;
+               gspca_dev->sd_desc->try_fmt(gspca_dev, fmt);
+       }
        /* some drivers use priv internally, zero it before giving it to
           userspace */
        fmt->fmt.pix.priv = 0;
@@ -1178,19 +1177,16 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
                goto out;
        }
 
-       if (ret == gspca_dev->curr_mode) {
-               ret = 0;
-               goto out;                       /* same mode */
-       }
-
        if (gspca_dev->streaming) {
                ret = -EBUSY;
                goto out;
        }
-       gspca_dev->width = fmt->fmt.pix.width;
-       gspca_dev->height = fmt->fmt.pix.height;
-       gspca_dev->pixfmt = fmt->fmt.pix.pixelformat;
        gspca_dev->curr_mode = ret;
+       if (gspca_dev->sd_desc->try_fmt)
+               /* subdriver try_fmt can modify format parameters */
+               gspca_dev->pixfmt = fmt->fmt.pix;
+       else
+               gspca_dev->pixfmt = gspca_dev->cam.cam_mode[ret];
 
        ret = 0;
 out:
@@ -1205,6 +1201,9 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
        int i;
        __u32 index = 0;
 
+       if (gspca_dev->sd_desc->enum_framesizes)
+               return gspca_dev->sd_desc->enum_framesizes(gspca_dev, fsize);
+
        for (i = 0; i < gspca_dev->cam.nmodes; i++) {
                if (fsize->pixel_format !=
                                gspca_dev->cam.cam_mode[i].pixelformat)
@@ -1471,8 +1470,9 @@ static int vidioc_streamon(struct file *file, void *priv,
                if (ret < 0)
                        goto out;
        }
-       PDEBUG_MODE(gspca_dev, D_STREAM, "stream on OK", gspca_dev->pixfmt,
-                   gspca_dev->width, gspca_dev->height);
+       PDEBUG_MODE(gspca_dev, D_STREAM, "stream on OK",
+                   gspca_dev->pixfmt.pixelformat,
+                   gspca_dev->pixfmt.width, gspca_dev->pixfmt.height);
        ret = 0;
 out:
        mutex_unlock(&gspca_dev->queue_lock);
index ac0b11f46f5037d0cb29eaeb8896b445537fa76b..300642dc1a177207c1e4b9952f20404e19667c8e 100644 (file)
@@ -88,6 +88,10 @@ typedef void (*cam_pkt_op) (struct gspca_dev *gspca_dev,
 typedef int (*cam_int_pkt_op) (struct gspca_dev *gspca_dev,
                                u8 *data,
                                int len);
+typedef void (*cam_format_op) (struct gspca_dev *gspca_dev,
+                               struct v4l2_format *fmt);
+typedef int (*cam_frmsize_op) (struct gspca_dev *gspca_dev,
+                               struct v4l2_frmsizeenum *fsize);
 
 /* subdriver description */
 struct sd_desc {
@@ -109,6 +113,8 @@ struct sd_desc {
        cam_set_jpg_op set_jcomp;
        cam_streamparm_op get_streamparm;
        cam_streamparm_op set_streamparm;
+       cam_format_op try_fmt;
+       cam_frmsize_op enum_framesizes;
 #ifdef CONFIG_VIDEO_ADV_DEBUG
        cam_set_reg_op set_register;
        cam_get_reg_op get_register;
@@ -183,9 +189,7 @@ struct gspca_dev {
        __u8 streaming;                 /* protected by both mutexes (*) */
 
        __u8 curr_mode;                 /* current camera mode */
-       __u32 pixfmt;                   /* current mode parameters */
-       __u16 width;
-       __u16 height;
+       struct v4l2_pix_format pixfmt;  /* current mode parameters */
        __u32 sequence;                 /* frame sequence number */
 
        wait_queue_head_t wq;           /* wait queue */
index 8da3dde383853d9c0faaad80a7f0c53eadaf449b..19736e237b37d6a14e50141c8d24104141783f16 100644 (file)
@@ -378,11 +378,12 @@ static int sd_start(struct gspca_dev *gspca_dev)
        struct sd *dev = (struct sd *) gspca_dev;
 
        /* create the JPEG header */
-       jpeg_define(dev->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+       jpeg_define(dev->jpeg_hdr, gspca_dev->pixfmt.height,
+                       gspca_dev->pixfmt.width,
                        0x21);          /* JPEG 422 */
        jpeg_set_qual(dev->jpeg_hdr, dev->quality);
        PDEBUG(D_STREAM, "Start streaming at %dx%d",
-               gspca_dev->height, gspca_dev->width);
+               gspca_dev->pixfmt.height, gspca_dev->pixfmt.width);
        jlj_start(gspca_dev);
        return gspca_dev->usb_err;
 }
index fdaeeb14453fbb42d82d9d31c0d64e088333bed2..5b481fa430992a7b37b9d14f9df108498f0c4d86 100644 (file)
@@ -455,7 +455,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
        struct sd *sd = (struct sd *) gspca_dev;
        sd->cap_mode = gspca_dev->cam.cam_mode;
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 640:
                PDEBUG(D_STREAM, "Start streaming at vga resolution");
                jl2005c_stream_start_vga_lg(gspca_dev);
index cfa4663f8934ebea47eb7d8d116a4405e875c17d..27fcef11aef42998b0132b26f3b3b4790ee06dd7 100644 (file)
@@ -266,7 +266,7 @@ static int mt9m111_set_hvflip(struct gspca_dev *gspca_dev)
                return err;
 
        data[0] = MT9M111_RMB_OVER_SIZED;
-       if (gspca_dev->width == 640) {
+       if (gspca_dev->pixfmt.width == 640) {
                data[1] = MT9M111_RMB_ROW_SKIP_2X |
                          MT9M111_RMB_COLUMN_SKIP_2X |
                          (hflip << 1) | vflip;
index ff2c5abf115ba635d59a363f190d04acb21ceb66..779a8785f421bbcb67c62840c8a67478ee0a44a2 100644 (file)
@@ -254,7 +254,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
        int i;
 
        /* create the JPEG header */
-       jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+       jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+                       gspca_dev->pixfmt.width,
                        0x21);          /* JPEG 422 */
        jpeg_set_qual(sd->jpeg_hdr, QUALITY);
 
@@ -270,8 +271,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
        data[0] = 0x00;         /* address */
        data[1] = 0x0c | 0x01;  /* reg 0 */
        data[2] = 0x01;         /* reg 1 */
-       data[3] = gspca_dev->width / 8;         /* h_size , reg 2 */
-       data[4] = gspca_dev->height / 8;        /* v_size , reg 3 */
+       data[3] = gspca_dev->pixfmt.width / 8;  /* h_size , reg 2 */
+       data[4] = gspca_dev->pixfmt.height / 8; /* v_size , reg 3 */
        data[5] = 0x30;         /* reg 4, MI, PAS5101 :
                                 *      0x30 for 24mhz , 0x28 for 12mhz */
        data[6] = 0x02;         /* reg 5, H start - was 0x04 */
index 68bb2f35966656e8005bb9ef6cb08707ef266bbb..f006e29ca0197b66ec62dfd821dcb520e24635bf 100644 (file)
@@ -521,7 +521,7 @@ static int start_cif_cam(struct gspca_dev *gspca_dev)
        if (sd->sensor_type)
                data[5] = 0xbb;
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 160:
                data[9] |= 0x04;  /* reg 8, 2:1 scale down from 320 */
                /* fall thru */
@@ -618,7 +618,7 @@ static int start_vga_cam(struct gspca_dev *gspca_dev)
                data[10] = 0x18;
        }
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 160:
                data[9] |= 0x0c;  /* reg 8, 4:1 scale down */
                /* fall thru */
@@ -847,7 +847,7 @@ static void setexposure(struct gspca_dev *gspca_dev, s32 expo, s32 min_clockdiv)
                u8 clockdiv = (60 * expo + 7999) / 8000;
 
                /* Limit framerate to not exceed usb bandwidth */
-               if (clockdiv < min_clockdiv && gspca_dev->width >= 320)
+               if (clockdiv < min_clockdiv && gspca_dev->pixfmt.width >= 320)
                        clockdiv = min_clockdiv;
                else if (clockdiv < 2)
                        clockdiv = 2;
index 44c9964b1b3e206085ac1ea3dea8e23048aaead2..599f755e75b86513c28228468bc19e15278a414a 100644 (file)
@@ -1708,7 +1708,7 @@ static void setautogain(struct gspca_dev *gspca_dev, s32 val)
 
        reg_r(gspca_dev, 0x1004, 1);
        if (gspca_dev->usb_buf[0] & 0x04) {     /* if AE_FULL_FRM */
-               sd->ae_res = gspca_dev->width * gspca_dev->height;
+               sd->ae_res = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height;
        } else {                                /* get the AE window size */
                reg_r(gspca_dev, 0x1011, 8);
                w = (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0]
@@ -1717,7 +1717,8 @@ static void setautogain(struct gspca_dev *gspca_dev, s32 val)
                  - (gspca_dev->usb_buf[7] << 8) - gspca_dev->usb_buf[6];
                sd->ae_res = h * w;
                if (sd->ae_res == 0)
-                       sd->ae_res = gspca_dev->width * gspca_dev->height;
+                       sd->ae_res = gspca_dev->pixfmt.width *
+                                       gspca_dev->pixfmt.height;
        }
 }
 
@@ -1856,21 +1857,21 @@ static int sd_start(struct gspca_dev *gspca_dev)
        reg_w_buf(gspca_dev, cmd);
        switch (sd->webcam) {
        case P35u:
-               if (gspca_dev->width == 320)
+               if (gspca_dev->pixfmt.width == 320)
                        reg_w_buf(gspca_dev, nw801_start_qvga);
                else
                        reg_w_buf(gspca_dev, nw801_start_vga);
                reg_w_buf(gspca_dev, nw801_start_2);
                break;
        case Kr651us:
-               if (gspca_dev->width == 320)
+               if (gspca_dev->pixfmt.width == 320)
                        reg_w_buf(gspca_dev, kr651_start_qvga);
                else
                        reg_w_buf(gspca_dev, kr651_start_vga);
                reg_w_buf(gspca_dev, kr651_start_2);
                break;
        case Proscope:
-               if (gspca_dev->width == 320)
+               if (gspca_dev->pixfmt.width == 320)
                        reg_w_buf(gspca_dev, proscope_start_qvga);
                else
                        reg_w_buf(gspca_dev, proscope_start_vga);
index 8937d79fd1762bcea5ebd863d2eecf6d0d69c0cc..c95f32a0c02b4283d0f4c66b253e0a7e402f392d 100644 (file)
@@ -3468,7 +3468,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
 
        switch (sd->bridge) {
        case BRIDGE_OVFX2:
-               if (gspca_dev->width != 800)
+               if (gspca_dev->pixfmt.width != 800)
                        gspca_dev->cam.bulk_size = OVFX2_BULK_SIZE;
                else
                        gspca_dev->cam.bulk_size = 7 * 4096;
@@ -3507,8 +3507,8 @@ static void ov511_mode_init_regs(struct sd *sd)
        /* Here I'm assuming that snapshot size == image size.
         * I hope that's always true. --claudio
         */
-       hsegs = (sd->gspca_dev.width >> 3) - 1;
-       vsegs = (sd->gspca_dev.height >> 3) - 1;
+       hsegs = (sd->gspca_dev.pixfmt.width >> 3) - 1;
+       vsegs = (sd->gspca_dev.pixfmt.height >> 3) - 1;
 
        reg_w(sd, R511_CAM_PXCNT, hsegs);
        reg_w(sd, R511_CAM_LNCNT, vsegs);
@@ -3541,7 +3541,7 @@ static void ov511_mode_init_regs(struct sd *sd)
        case SEN_OV7640:
        case SEN_OV7648:
        case SEN_OV76BE:
-               if (sd->gspca_dev.width == 320)
+               if (sd->gspca_dev.pixfmt.width == 320)
                        interlaced = 1;
                /* Fall through */
        case SEN_OV6630:
@@ -3551,7 +3551,7 @@ static void ov511_mode_init_regs(struct sd *sd)
                case 30:
                case 25:
                        /* Not enough bandwidth to do 640x480 @ 30 fps */
-                       if (sd->gspca_dev.width != 640) {
+                       if (sd->gspca_dev.pixfmt.width != 640) {
                                sd->clockdiv = 0;
                                break;
                        }
@@ -3584,7 +3584,8 @@ static void ov511_mode_init_regs(struct sd *sd)
 
        /* Check if we have enough bandwidth to disable compression */
        fps = (interlaced ? 60 : 30) / (sd->clockdiv + 1) + 1;
-       needed = fps * sd->gspca_dev.width * sd->gspca_dev.height * 3 / 2;
+       needed = fps * sd->gspca_dev.pixfmt.width *
+                       sd->gspca_dev.pixfmt.height * 3 / 2;
        /* 1000 isoc packets/sec */
        if (needed > 1000 * packet_size) {
                /* Enable Y and UV quantization and compression */
@@ -3646,8 +3647,8 @@ static void ov518_mode_init_regs(struct sd *sd)
                reg_w(sd, 0x38, 0x80);
        }
 
-       hsegs = sd->gspca_dev.width / 16;
-       vsegs = sd->gspca_dev.height / 4;
+       hsegs = sd->gspca_dev.pixfmt.width / 16;
+       vsegs = sd->gspca_dev.pixfmt.height / 4;
 
        reg_w(sd, 0x29, hsegs);
        reg_w(sd, 0x2a, vsegs);
@@ -3686,7 +3687,8 @@ static void ov518_mode_init_regs(struct sd *sd)
                         * happened to be with revision < 2 cams using an
                         * OV7620 and revision 2 cams using an OV7620AE.
                         */
-                       if (sd->revision > 0 && sd->gspca_dev.width == 640) {
+                       if (sd->revision > 0 &&
+                                       sd->gspca_dev.pixfmt.width == 640) {
                                reg_w(sd, 0x20, 0x60);
                                reg_w(sd, 0x21, 0x1f);
                        } else {
@@ -3812,8 +3814,8 @@ static void ov519_mode_init_regs(struct sd *sd)
                break;
        }
 
-       reg_w(sd, OV519_R10_H_SIZE,     sd->gspca_dev.width >> 4);
-       reg_w(sd, OV519_R11_V_SIZE,     sd->gspca_dev.height >> 3);
+       reg_w(sd, OV519_R10_H_SIZE,     sd->gspca_dev.pixfmt.width >> 4);
+       reg_w(sd, OV519_R11_V_SIZE,     sd->gspca_dev.pixfmt.height >> 3);
        if (sd->sensor == SEN_OV7670 &&
            sd->gspca_dev.cam.cam_mode[sd->gspca_dev.curr_mode].priv)
                reg_w(sd, OV519_R12_X_OFFSETL, 0x04);
@@ -3947,14 +3949,16 @@ static void mode_init_ov_sensor_regs(struct sd *sd)
            }
        case SEN_OV3610:
                if (qvga) {
-                       xstart = (1040 - gspca_dev->width) / 2 + (0x1f << 4);
-                       ystart = (776 - gspca_dev->height) / 2;
+                       xstart = (1040 - gspca_dev->pixfmt.width) / 2 +
+                               (0x1f << 4);
+                       ystart = (776 - gspca_dev->pixfmt.height) / 2;
                } else {
-                       xstart = (2076 - gspca_dev->width) / 2 + (0x10 << 4);
-                       ystart = (1544 - gspca_dev->height) / 2;
+                       xstart = (2076 - gspca_dev->pixfmt.width) / 2 +
+                               (0x10 << 4);
+                       ystart = (1544 - gspca_dev->pixfmt.height) / 2;
                }
-               xend = xstart + gspca_dev->width;
-               yend = ystart + gspca_dev->height;
+               xend = xstart + gspca_dev->pixfmt.width;
+               yend = ystart + gspca_dev->pixfmt.height;
                /* Writing to the COMH register resets the other windowing regs
                   to their default values, so we must do this first. */
                i2c_w_mask(sd, 0x12, qvga ? 0x40 : 0x00, 0xf0);
@@ -4229,8 +4233,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
        struct sd *sd = (struct sd *) gspca_dev;
 
        /* Default for most bridges, allow bridge_mode_init_regs to override */
-       sd->sensor_width = sd->gspca_dev.width;
-       sd->sensor_height = sd->gspca_dev.height;
+       sd->sensor_width = sd->gspca_dev.pixfmt.width;
+       sd->sensor_height = sd->gspca_dev.pixfmt.height;
 
        switch (sd->bridge) {
        case BRIDGE_OV511:
@@ -4345,12 +4349,13 @@ static void ov511_pkt_scan(struct gspca_dev *gspca_dev,
                ov51x_handle_button(gspca_dev, (in[8] >> 2) & 1);
                if (in[8] & 0x80) {
                        /* Frame end */
-                       if ((in[9] + 1) * 8 != gspca_dev->width ||
-                           (in[10] + 1) * 8 != gspca_dev->height) {
+                       if ((in[9] + 1) * 8 != gspca_dev->pixfmt.width ||
+                           (in[10] + 1) * 8 != gspca_dev->pixfmt.height) {
                                PERR("Invalid frame size, got: %dx%d,"
                                        " requested: %dx%d\n",
                                        (in[9] + 1) * 8, (in[10] + 1) * 8,
-                                       gspca_dev->width, gspca_dev->height);
+                                       gspca_dev->pixfmt.width,
+                                       gspca_dev->pixfmt.height);
                                gspca_dev->last_packet_type = DISCARD_PACKET;
                                return;
                        }
@@ -4470,7 +4475,8 @@ static void ovfx2_pkt_scan(struct gspca_dev *gspca_dev,
                if (sd->first_frame) {
                        sd->first_frame--;
                        if (gspca_dev->image_len <
-                                 sd->gspca_dev.width * sd->gspca_dev.height)
+                                 sd->gspca_dev.pixfmt.width *
+                                       sd->gspca_dev.pixfmt.height)
                                gspca_dev->last_packet_type = DISCARD_PACKET;
                }
                gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
index 03a33c46ca2c0ede859867d00b0652f911566be9..90f0d637cd9d05f66450ef645859cf7f5f7fb381 100644 (file)
@@ -1440,9 +1440,10 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
                /* If this packet is marked as EOF, end the frame */
                } else if (data[1] & UVC_STREAM_EOF) {
                        sd->last_pts = 0;
-                       if (gspca_dev->pixfmt == V4L2_PIX_FMT_YUYV
+                       if (gspca_dev->pixfmt.pixelformat == V4L2_PIX_FMT_YUYV
                         && gspca_dev->image_len + len - 12 !=
-                                  gspca_dev->width * gspca_dev->height * 2) {
+                                  gspca_dev->pixfmt.width *
+                                       gspca_dev->pixfmt.height * 2) {
                                PDEBUG(D_PACK, "wrong sized frame");
                                goto discard;
                        }
index 83519be94e58c704b7b50c50b69cce558645cb1b..cd79c180f67b87e84689a8a162b4962bf5691cde 100644 (file)
@@ -299,7 +299,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
        pac207_write_regs(gspca_dev, 0x0042, pac207_sensor_init[3], 8);
 
        /* Compression Balance */
-       if (gspca_dev->width == 176)
+       if (gspca_dev->pixfmt.width == 176)
                pac207_write_reg(gspca_dev, 0x4a, 0xff);
        else
                pac207_write_reg(gspca_dev, 0x4a, 0x30);
@@ -317,7 +317,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
                mode = 0x00;
        else
                mode = 0x02;
-       if (gspca_dev->width == 176) {  /* 176x144 */
+       if (gspca_dev->pixfmt.width == 176) {   /* 176x144 */
                mode |= 0x01;
                PDEBUG(D_STREAM, "pac207_start mode 176x144");
        } else {                                /* 352x288 */
index 1a5bdc853a80dbc2fb9a3c15360f2243a7f77f91..25f86b1e74a80b9c9d6f4d856b8156a216d47102 100644 (file)
@@ -326,7 +326,7 @@ static void setexposure(struct gspca_dev *gspca_dev, s32 val)
         *  640x480 mode and page 4 reg 2 <= 3 then it must be 9
         */
        reg_w(gspca_dev, 0xff, 0x01);
-       if (gspca_dev->width != 640 && val <= 3)
+       if (gspca_dev->pixfmt.width != 640 && val <= 3)
                reg_w(gspca_dev, 0x08, 0x09);
        else
                reg_w(gspca_dev, 0x08, 0x08);
@@ -337,7 +337,7 @@ static void setexposure(struct gspca_dev *gspca_dev, s32 val)
         * camera to use higher compression or we may run out of
         * bandwidth.
         */
-       if (gspca_dev->width == 640 && val == 2)
+       if (gspca_dev->pixfmt.width == 640 && val == 2)
                reg_w(gspca_dev, 0x80, 0x01);
        else
                reg_w(gspca_dev, 0x80, 0x1c);
@@ -615,7 +615,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
 
                /* Start the new frame with the jpeg header */
                pac_start_frame(gspca_dev,
-                       gspca_dev->height, gspca_dev->width);
+                       gspca_dev->pixfmt.height, gspca_dev->pixfmt.width);
        }
        gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
 }
index 5f729b8aa2bd64bee74af9c02b1954a12c4bb304..5102cea504710c5c2aeeba1b0101159f35b0b459 100644 (file)
@@ -354,9 +354,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
 
        /* set size + mode */
        se401_write_req(gspca_dev, SE401_REQ_SET_WIDTH,
-                       gspca_dev->width * mult, 0);
+                       gspca_dev->pixfmt.width * mult, 0);
        se401_write_req(gspca_dev, SE401_REQ_SET_HEIGHT,
-                       gspca_dev->height * mult, 0);
+                       gspca_dev->pixfmt.height * mult, 0);
        /*
         * HDG: disabled this as it does not seem to do anything
         * se401_write_req(gspca_dev, SE401_REQ_SET_OUTPUT_MODE,
@@ -480,7 +480,7 @@ static void sd_complete_frame(struct gspca_dev *gspca_dev, u8 *data, int len)
 static void sd_pkt_scan_janggu(struct gspca_dev *gspca_dev, u8 *data, int len)
 {
        struct sd *sd = (struct sd *)gspca_dev;
-       int imagesize = gspca_dev->width * gspca_dev->height;
+       int imagesize = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height;
        int i, plen, bits, pixels, info, count;
 
        if (sd->restart_stream)
index f4453d52801b94140cc1201f437ba68fec694095..2a38621cf7188d1f8cdfe03e7d1fede7ffcad566 100644 (file)
@@ -1955,7 +1955,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
                        return 0;
                }
 
-               switch (gspca_dev->width) {
+               switch (gspca_dev->pixfmt.width) {
                case 160: /* 160x120 */
                        gspca_dev->alt = 2;
                        break;
@@ -1985,8 +1985,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
 {
        struct sd *sd = (struct sd *) gspca_dev;
        int mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
-       int width = gspca_dev->width;
-       int height = gspca_dev->height;
+       int width = gspca_dev->pixfmt.width;
+       int height = gspca_dev->pixfmt.height;
        u8 fmt, scale = 0;
 
        jpeg_define(sd->jpeg_hdr, height, width,
index d7ff3b9687c57cb22c504e7a3a9493e41fd924eb..7277dbd2afcdb8c88629aafc2c9013b38cd79390 100644 (file)
@@ -513,10 +513,7 @@ static void i2c_w(struct gspca_dev *gspca_dev, const u8 *buf)
                if (gspca_dev->usb_buf[0] & 0x04) {
                        if (gspca_dev->usb_buf[0] & 0x08) {
                                dev_err(gspca_dev->v4l2_dev.dev,
-                                       "i2c error writing %02x %02x %02x %02x"
-                                       " %02x %02x %02x %02x\n",
-                                       buf[0], buf[1], buf[2], buf[3],
-                                       buf[4], buf[5], buf[6], buf[7]);
+                                       "i2c error writing %8ph\n", buf);
                                gspca_dev->usb_err = -EIO;
                        }
                        return;
@@ -753,7 +750,7 @@ static void setexposure(struct gspca_dev *gspca_dev)
                /* In 640x480, if the reg11 has less than 4, the image is
                   unstable (the bridge goes into a higher compression mode
                   which we have not reverse engineered yet). */
-               if (gspca_dev->width == 640 && reg11 < 4)
+               if (gspca_dev->pixfmt.width == 640 && reg11 < 4)
                        reg11 = 4;
 
                /* frame exposure time in ms = 1000 * reg11 / 30    ->
index 3b5ccb1c4cdf11e0597995f1acdd3e74d182cabe..c69b45d7cfbf3995281ce1ede849a6b761474d77 100644 (file)
@@ -2204,7 +2204,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
                                { 0x14, 0xe7, 0x1e, 0xdd };
 
        /* create the JPEG header */
-       jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+       jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+                       gspca_dev->pixfmt.width,
                        0x21);          /* JPEG 422 */
 
        /* initialize the bridge */
index 688592b289eafad0055e4f10d747492a6b65d0b5..f38fd8949609fd917cee8568a4cba9bdb5e83cb4 100644 (file)
@@ -255,7 +255,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
        struct sd *sd = (struct sd *) gspca_dev;
 
        /* initialize the JPEG header */
-       jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+       jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+                       gspca_dev->pixfmt.width,
                        0x22);          /* JPEG 411 */
 
        /* the JPEG quality shall be 85% */
index 9f8bf51fd64b64cb1e4544a722f2b0d9da308c1e..f011a309dd65f4d0776e82cb55f4e6c3fb4ccf58 100644 (file)
@@ -608,7 +608,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
        __u8 xmult, ymult;
 
        /* create the JPEG header */
-       jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+       jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+                       gspca_dev->pixfmt.width,
                        0x22);          /* JPEG 411 */
        jpeg_set_qual(sd->jpeg_hdr, QUALITY);
 
index acb19fb9a3df3a7d8914eb4f5efa927aa6a18fad..aa21edc9502d67466f7ed13959d85709b9e36889 100644 (file)
@@ -272,7 +272,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
 
        dev->cap_mode = gspca_dev->cam.cam_mode;
        /* "Open the shutter" and set size, to start capture */
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 640:
                PDEBUG(D_STREAM, "Start streaming at high resolution");
                dev->cap_mode++;
index b10d0821111cba7815be078d338c8e1f0917e5ef..e274cf19a3ea22a1448f340f7a779c5a1274d528 100644 (file)
@@ -906,7 +906,8 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
 
        gspca_dev->cam.bulk_nurbs = 1;  /* there must be one URB only */
        sd->do_ctrl = 0;
-       gspca_dev->cam.bulk_size = gspca_dev->width * gspca_dev->height + 8;
+       gspca_dev->cam.bulk_size = gspca_dev->pixfmt.width *
+                       gspca_dev->pixfmt.height + 8;
        return 0;
 }
 
index 8c0982607f25c0ca8b14518be8ebee8b40b50278..b0c70fea760ba68a8235372e9c0ddcc2970af941 100644 (file)
@@ -250,7 +250,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
        int ret, value;
 
        /* create the JPEG header */
-       jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+       jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+                       gspca_dev->pixfmt.width,
                        0x22);          /* JPEG 411 */
        jpeg_set_qual(sd->jpeg_hdr, QUALITY);
 
@@ -261,7 +262,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
        set_par(gspca_dev, 0x00000000);
        set_par(gspca_dev, 0x8002e001);
        set_par(gspca_dev, 0x14000000);
-       if (gspca_dev->width > 320)
+       if (gspca_dev->pixfmt.width > 320)
                value = 0x8002e001;             /* 640x480 */
        else
                value = 0x4001f000;             /* 320x240 */
index 585868835aceab032418a13448cf2d00007947f8..8add2f74dedaae59d339ba65b8ebbe115f9d5dc6 100644 (file)
@@ -48,42 +48,11 @@ struct sd {
 };
 
 static const struct v4l2_pix_format stk1135_modes[] = {
-       {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
-               .bytesperline = 160,
-               .sizeimage = 160 * 120,
-               .colorspace = V4L2_COLORSPACE_SRGB},
-       {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
-               .bytesperline = 176,
-               .sizeimage = 176 * 144,
-               .colorspace = V4L2_COLORSPACE_SRGB},
-       {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
-               .bytesperline = 320,
-               .sizeimage = 320 * 240,
-               .colorspace = V4L2_COLORSPACE_SRGB},
-       {352, 288, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
-               .bytesperline = 352,
-               .sizeimage = 352 * 288,
-               .colorspace = V4L2_COLORSPACE_SRGB},
+       /* default mode (this driver supports variable resolution) */
        {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
                .bytesperline = 640,
                .sizeimage = 640 * 480,
                .colorspace = V4L2_COLORSPACE_SRGB},
-       {720, 576, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
-               .bytesperline = 720,
-               .sizeimage = 720 * 576,
-               .colorspace = V4L2_COLORSPACE_SRGB},
-       {800, 600, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
-               .bytesperline = 800,
-               .sizeimage = 800 * 600,
-               .colorspace = V4L2_COLORSPACE_SRGB},
-       {1024, 768, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
-               .bytesperline = 1024,
-               .sizeimage = 1024 * 768,
-               .colorspace = V4L2_COLORSPACE_SRGB},
-       {1280, 1024, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
-               .bytesperline = 1280,
-               .sizeimage = 1280 * 1024,
-               .colorspace = V4L2_COLORSPACE_SRGB},
 };
 
 /* -- read a register -- */
@@ -347,16 +316,16 @@ static void stk1135_configure_mt9m112(struct gspca_dev *gspca_dev)
                sensor_write(gspca_dev, cfg[i].reg, cfg[i].val);
 
        /* set output size */
-       width = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].width;
-       height = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].height;
-       if (width <= 640) { /* use context A (half readout speed by default) */
+       width = gspca_dev->pixfmt.width;
+       height = gspca_dev->pixfmt.height;
+       if (width <= 640 && height <= 512) { /* context A (half readout speed)*/
                sensor_write(gspca_dev, 0x1a7, width);
                sensor_write(gspca_dev, 0x1aa, height);
                /* set read mode context A */
                sensor_write(gspca_dev, 0x0c8, 0x0000);
                /* set resize, read mode, vblank, hblank context A */
                sensor_write(gspca_dev, 0x2c8, 0x0000);
-       } else { /* use context B (full readout speed by default) */
+       } else { /* context B (full readout speed) */
                sensor_write(gspca_dev, 0x1a1, width);
                sensor_write(gspca_dev, 0x1a4, height);
                /* set read mode context B */
@@ -484,8 +453,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
        reg_w(gspca_dev, STK1135_REG_CISPO + 3, 0x00);
 
        /* set capture end position */
-       width = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].width;
-       height = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].height;
+       width = gspca_dev->pixfmt.width;
+       height = gspca_dev->pixfmt.height;
        reg_w(gspca_dev, STK1135_REG_CIEPO + 0, width & 0xff);
        reg_w(gspca_dev, STK1135_REG_CIEPO + 1, width >> 8);
        reg_w(gspca_dev, STK1135_REG_CIEPO + 2, height & 0xff);
@@ -643,6 +612,35 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
        return 0;
 }
 
+void stk1135_try_fmt(struct gspca_dev *gspca_dev, struct v4l2_format *fmt)
+{
+       fmt->fmt.pix.width = clamp(fmt->fmt.pix.width, 32U, 1280U);
+       fmt->fmt.pix.height = clamp(fmt->fmt.pix.height, 32U, 1024U);
+       /* round up to even numbers */
+       fmt->fmt.pix.width += (fmt->fmt.pix.width & 1);
+       fmt->fmt.pix.height += (fmt->fmt.pix.height & 1);
+
+       fmt->fmt.pix.bytesperline = fmt->fmt.pix.width;
+       fmt->fmt.pix.sizeimage = fmt->fmt.pix.width * fmt->fmt.pix.height;
+}
+
+int stk1135_enum_framesizes(struct gspca_dev *gspca_dev,
+                       struct v4l2_frmsizeenum *fsize)
+{
+       if (fsize->index != 0 || fsize->pixel_format != V4L2_PIX_FMT_SBGGR8)
+               return -EINVAL;
+
+       fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+       fsize->stepwise.min_width = 32;
+       fsize->stepwise.min_height = 32;
+       fsize->stepwise.max_width = 1280;
+       fsize->stepwise.max_height = 1024;
+       fsize->stepwise.step_width = 2;
+       fsize->stepwise.step_height = 2;
+
+       return 0;
+}
+
 /* sub-driver description */
 static const struct sd_desc sd_desc = {
        .name = MODULE_NAME,
@@ -653,6 +651,8 @@ static const struct sd_desc sd_desc = {
        .stopN = sd_stopN,
        .pkt_scan = sd_pkt_scan,
        .dq_callback = stk1135_dq_callback,
+       .try_fmt = stk1135_try_fmt,
+       .enum_framesizes = stk1135_enum_framesizes,
 };
 
 /* -- module initialisation -- */
index 55ee7a61c67fb635fe8dbfb2527f178c554c0f6c..49d209bbf9ee08f54e2caa86283205b914a72afd 100644 (file)
@@ -452,7 +452,7 @@ frame_data:
                                        NULL, 0);
 
                        if (sd->bridge == BRIDGE_ST6422)
-                               sd->to_skip = gspca_dev->width * 4;
+                               sd->to_skip = gspca_dev->pixfmt.width * 4;
 
                        if (chunk_len)
                                PERR("Chunk length is "
index 8206b77433006690f3926767290a2802de816611..8d785edcccf2ef040906840f55ea3173e555ecbf 100644 (file)
@@ -421,7 +421,7 @@ static int pb0100_set_autogain_target(struct gspca_dev *gspca_dev, __s32 val)
 
        /* Number of pixels counted by the sensor when subsampling the pixels.
         * Slightly larger than the real value to avoid oscillation */
-       totalpixels = gspca_dev->width * gspca_dev->height;
+       totalpixels = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height;
        totalpixels = totalpixels/(8*8) + totalpixels/(64*64);
 
        brightpixels = (totalpixels * val) >> 8;
index af8767a9bd4ccc1a9af62d7f23c69e68a6348f2a..a517d185febed4590bbb5f58306aa4e4a2c4f0f1 100644 (file)
@@ -715,7 +715,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
        int enable;
 
        /* create the JPEG header */
-       jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+       jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+                       gspca_dev->pixfmt.width,
                        0x22);          /* JPEG 411 */
        jpeg_set_qual(sd->jpeg_hdr, QUALITY);
 
index 4cb511ccc5f6ecbefadee9ef64de47f9fd75f3e2..640c2fe760b3c4e13c687eefbde34e156025d648 100644 (file)
@@ -3856,7 +3856,7 @@ static void setsharpness(struct gspca_dev *gspca_dev, s32 val)
 
        if (sd->bridge == BRIDGE_TP6800) {
                val |= 0x08;            /* grid compensation enable */
-               if (gspca_dev->width == 640)
+               if (gspca_dev->pixfmt.width == 640)
                        reg_w(gspca_dev, TP6800_R78_FORMAT, 0x00); /* vga */
                else
                        val |= 0x04;            /* scaling down enable */
@@ -3880,7 +3880,7 @@ static void set_resolution(struct gspca_dev *gspca_dev)
        struct sd *sd = (struct sd *) gspca_dev;
 
        reg_w(gspca_dev, TP6800_R21_ENDP_1_CTL, 0x00);
-       if (gspca_dev->width == 320) {
+       if (gspca_dev->pixfmt.width == 320) {
                reg_w(gspca_dev, TP6800_R3F_FRAME_RATE, 0x06);
                msleep(100);
                i2c_w(gspca_dev, CX0342_AUTO_ADC_CALIB, 0x01);
@@ -3924,7 +3924,7 @@ static int get_fr_idx(struct gspca_dev *gspca_dev)
 
                /* 640x480 * 30 fps does not work */
                if (i == 6                      /* if 30 fps */
-                && gspca_dev->width == 640)
+                && gspca_dev->pixfmt.width == 640)
                        i = 0x05;               /* 15 fps */
        } else {
                for (i = 0; i < ARRAY_SIZE(rates_6810) - 1; i++) {
@@ -3935,7 +3935,7 @@ static int get_fr_idx(struct gspca_dev *gspca_dev)
 
                /* 640x480 * 30 fps does not work */
                if (i == 7                      /* if 30 fps */
-                && gspca_dev->width == 640)
+                && gspca_dev->pixfmt.width == 640)
                        i = 6;                  /* 15 fps */
                i |= 0x80;                      /* clock * 1 */
        }
@@ -4554,7 +4554,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
 {
        struct sd *sd = (struct sd *) gspca_dev;
 
-       jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width);
+       jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+                       gspca_dev->pixfmt.width);
        set_dqt(gspca_dev, sd->quality);
        if (sd->bridge == BRIDGE_TP6800) {
                if (sd->sensor == SENSOR_CX0342)
@@ -4737,7 +4738,7 @@ static void sd_dq_callback(struct gspca_dev *gspca_dev)
                        (gspca_dev->usb_buf[26] << 8) + gspca_dev->usb_buf[25] +
                        (gspca_dev->usb_buf[29] << 8) + gspca_dev->usb_buf[28])
                                / 8;
-               if (gspca_dev->width == 640)
+               if (gspca_dev->pixfmt.width == 640)
                        luma /= 4;
                reg_w(gspca_dev, 0x7d, 0x00);
 
index 8591324a53e15edc5582d73d9c90715969ffe0f4..d497ba38af0da06149ebb95ec91526c21e6a4604 100644 (file)
@@ -268,7 +268,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
        packet_type0 = packet_type1 = INTER_PACKET;
        if (gspca_dev->empty_packet) {
                gspca_dev->empty_packet = 0;
-               sd->packet = gspca_dev->height / 2;
+               sd->packet = gspca_dev->pixfmt.height / 2;
                packet_type0 = FIRST_PACKET;
        } else if (sd->packet == 0)
                return;                 /* 2 more lines in 352x288 ! */
@@ -284,9 +284,10 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
         * - 4 bytes
         */
        gspca_frame_add(gspca_dev, packet_type0,
-                       data + 2, gspca_dev->width);
+                       data + 2, gspca_dev->pixfmt.width);
        gspca_frame_add(gspca_dev, packet_type1,
-                       data + gspca_dev->width + 5, gspca_dev->width);
+                       data + gspca_dev->pixfmt.width + 5,
+                       gspca_dev->pixfmt.width);
 }
 
 static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
index a2275cfe0b814625b1bfc9e14144084aed74ca6b..103f6c4236b0789d9eb621eb17efd77a5d46c63e 100644 (file)
@@ -121,13 +121,13 @@ static int vicam_read_frame(struct gspca_dev *gspca_dev, u8 *data, int size)
 
        memset(req_data, 0, 16);
        req_data[0] = gain;
-       if (gspca_dev->width == 256)
+       if (gspca_dev->pixfmt.width == 256)
                req_data[1] |= 0x01; /* low nibble x-scale */
-       if (gspca_dev->height <= 122) {
+       if (gspca_dev->pixfmt.height <= 122) {
                req_data[1] |= 0x10; /* high nibble y-scale */
-               unscaled_height = gspca_dev->height * 2;
+               unscaled_height = gspca_dev->pixfmt.height * 2;
        } else
-               unscaled_height = gspca_dev->height;
+               unscaled_height = gspca_dev->pixfmt.height;
        req_data[2] = 0x90; /* unknown, does not seem to do anything */
        if (unscaled_height <= 200)
                req_data[3] = 0x06; /* vend? */
index 2165da0c7ce1570828be04c572a0bcc816353677..fb9fe2ef3a6f60059b7f3fee4e89254b4869d52f 100644 (file)
@@ -430,11 +430,11 @@ static void w9968cf_set_crop_window(struct sd *sd)
        #define SC(x) ((x) << 10)
 
        /* Scaling factors */
-       fw = SC(sd->gspca_dev.width) / max_width;
-       fh = SC(sd->gspca_dev.height) / max_height;
+       fw = SC(sd->gspca_dev.pixfmt.width) / max_width;
+       fh = SC(sd->gspca_dev.pixfmt.height) / max_height;
 
-       cw = (fw >= fh) ? max_width : SC(sd->gspca_dev.width) / fh;
-       ch = (fw >= fh) ? SC(sd->gspca_dev.height) / fw : max_height;
+       cw = (fw >= fh) ? max_width : SC(sd->gspca_dev.pixfmt.width) / fh;
+       ch = (fw >= fh) ? SC(sd->gspca_dev.pixfmt.height) / fw : max_height;
 
        sd->sensor_width = max_width;
        sd->sensor_height = max_height;
@@ -454,34 +454,34 @@ static void w9968cf_mode_init_regs(struct sd *sd)
 
        w9968cf_set_crop_window(sd);
 
-       reg_w(sd, 0x14, sd->gspca_dev.width);
-       reg_w(sd, 0x15, sd->gspca_dev.height);
+       reg_w(sd, 0x14, sd->gspca_dev.pixfmt.width);
+       reg_w(sd, 0x15, sd->gspca_dev.pixfmt.height);
 
        /* JPEG width & height */
-       reg_w(sd, 0x30, sd->gspca_dev.width);
-       reg_w(sd, 0x31, sd->gspca_dev.height);
+       reg_w(sd, 0x30, sd->gspca_dev.pixfmt.width);
+       reg_w(sd, 0x31, sd->gspca_dev.pixfmt.height);
 
        /* Y & UV frame buffer strides (in WORD) */
        if (w9968cf_vga_mode[sd->gspca_dev.curr_mode].pixelformat ==
            V4L2_PIX_FMT_JPEG) {
-               reg_w(sd, 0x2c, sd->gspca_dev.width / 2);
-               reg_w(sd, 0x2d, sd->gspca_dev.width / 4);
+               reg_w(sd, 0x2c, sd->gspca_dev.pixfmt.width / 2);
+               reg_w(sd, 0x2d, sd->gspca_dev.pixfmt.width / 4);
        } else
-               reg_w(sd, 0x2c, sd->gspca_dev.width);
+               reg_w(sd, 0x2c, sd->gspca_dev.pixfmt.width);
 
        reg_w(sd, 0x00, 0xbf17); /* reset everything */
        reg_w(sd, 0x00, 0xbf10); /* normal operation */
 
        /* Transfer size in WORDS (for UYVY format only) */
-       val = sd->gspca_dev.width * sd->gspca_dev.height;
+       val = sd->gspca_dev.pixfmt.width * sd->gspca_dev.pixfmt.height;
        reg_w(sd, 0x3d, val & 0xffff); /* low bits */
        reg_w(sd, 0x3e, val >> 16);    /* high bits */
 
        if (w9968cf_vga_mode[sd->gspca_dev.curr_mode].pixelformat ==
            V4L2_PIX_FMT_JPEG) {
                /* We may get called multiple times (usb isoc bw negotiat.) */
-               jpeg_define(sd->jpeg_hdr, sd->gspca_dev.height,
-                           sd->gspca_dev.width, 0x22); /* JPEG 420 */
+               jpeg_define(sd->jpeg_hdr, sd->gspca_dev.pixfmt.height,
+                           sd->gspca_dev.pixfmt.width, 0x22); /* JPEG 420 */
                jpeg_set_qual(sd->jpeg_hdr, v4l2_ctrl_g_ctrl(sd->jpegqual));
                w9968cf_upload_quantizationtables(sd);
                v4l2_ctrl_grab(sd->jpegqual, true);
index 7eaf64eb867cf5ebdef1e55c265cd6f7ca4a6685..a41aa7817c54349e196a4aeee80c60a8adb4a921 100644 (file)
@@ -1471,14 +1471,14 @@ static int cit_get_clock_div(struct gspca_dev *gspca_dev)
 
        while (clock_div > 3 &&
                        1000 * packet_size >
-                       gspca_dev->width * gspca_dev->height *
+                       gspca_dev->pixfmt.width * gspca_dev->pixfmt.height *
                        fps[clock_div - 1] * 3 / 2)
                clock_div--;
 
        PDEBUG(D_PROBE,
               "PacketSize: %d, res: %dx%d -> using clockdiv: %d (%d fps)",
-              packet_size, gspca_dev->width, gspca_dev->height, clock_div,
-              fps[clock_div]);
+              packet_size, gspca_dev->pixfmt.width, gspca_dev->pixfmt.height,
+              clock_div, fps[clock_div]);
 
        return clock_div;
 }
@@ -1502,7 +1502,7 @@ static int cit_start_model0(struct gspca_dev *gspca_dev)
        cit_write_reg(gspca_dev, 0x0002, 0x0426);
        cit_write_reg(gspca_dev, 0x0014, 0x0427);
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 160: /* 160x120 */
                cit_write_reg(gspca_dev, 0x0004, 0x010b);
                cit_write_reg(gspca_dev, 0x0001, 0x010a);
@@ -1643,7 +1643,7 @@ static int cit_start_model1(struct gspca_dev *gspca_dev)
        cit_write_reg(gspca_dev, 0x00, 0x0101);
        cit_write_reg(gspca_dev, 0x00, 0x010a);
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 128: /* 128x96 */
                cit_write_reg(gspca_dev, 0x80, 0x0103);
                cit_write_reg(gspca_dev, 0x60, 0x0105);
@@ -1700,7 +1700,7 @@ static int cit_start_model1(struct gspca_dev *gspca_dev)
        }
 
        /* Assorted init */
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 128: /* 128x96 */
                cit_Packet_Format1(gspca_dev, 0x2b, 0x1e);
                cit_write_reg(gspca_dev, 0xc9, 0x0119); /* Same everywhere */
@@ -1753,7 +1753,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev)
        cit_write_reg(gspca_dev, 0x0000, 0x0108);
        cit_write_reg(gspca_dev, 0x0001, 0x0133);
        cit_write_reg(gspca_dev, 0x0001, 0x0102);
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 176: /* 176x144 */
                cit_write_reg(gspca_dev, 0x002c, 0x0103);       /* All except 320x240 */
                cit_write_reg(gspca_dev, 0x0000, 0x0104);       /* Same */
@@ -1792,7 +1792,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev)
 
        cit_write_reg(gspca_dev, 0x0000, 0x0100);       /* LED on */
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 176: /* 176x144 */
                cit_write_reg(gspca_dev, 0x0050, 0x0111);
                cit_write_reg(gspca_dev, 0x00d0, 0x0111);
@@ -1840,7 +1840,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev)
         * Magic control of CMOS sensor. Only lower values like
         * 0-3 work, and picture shifts left or right. Don't change.
         */
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 176: /* 176x144 */
                cit_model2_Packet1(gspca_dev, 0x0014, 0x0002);
                cit_model2_Packet1(gspca_dev, 0x0016, 0x0002); /* Horizontal shift */
@@ -1899,7 +1899,7 @@ static int cit_start_model2(struct gspca_dev *gspca_dev)
         * does not allow arbitrary values and apparently is a bit mask, to
         * be activated only at appropriate time. Don't change it randomly!
         */
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 176: /* 176x144 */
                cit_model2_Packet1(gspca_dev, 0x0026, 0x00c2);
                break;
@@ -2023,7 +2023,7 @@ static int cit_start_model3(struct gspca_dev *gspca_dev)
        cit_model3_Packet1(gspca_dev, 0x009e, 0x0096);
        cit_model3_Packet1(gspca_dev, 0x009f, 0x000a);
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 160:
                cit_write_reg(gspca_dev, 0x0000, 0x0101); /* Same on 160x120, 320x240 */
                cit_write_reg(gspca_dev, 0x00a0, 0x0103); /* Same on 160x120, 320x240 */
@@ -2134,7 +2134,7 @@ static int cit_start_model3(struct gspca_dev *gspca_dev)
           like with the IBM netcam pro). */
        cit_write_reg(gspca_dev, clock_div, 0x0111); /* Clock Divider */
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 160:
                cit_model3_Packet1(gspca_dev, 0x001f, 0x0000); /* Same */
                cit_model3_Packet1(gspca_dev, 0x0039, 0x001f); /* Same */
@@ -2211,7 +2211,7 @@ static int cit_start_model4(struct gspca_dev *gspca_dev)
        cit_write_reg(gspca_dev, 0xfffa, 0x0124);
        cit_model4_Packet1(gspca_dev, 0x0034, 0x0000);
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 128: /* 128x96 */
                cit_write_reg(gspca_dev, 0x0070, 0x0119);
                cit_write_reg(gspca_dev, 0x00d0, 0x0111);
@@ -2531,7 +2531,7 @@ static int cit_start_ibm_netcam_pro(struct gspca_dev *gspca_dev)
        cit_write_reg(gspca_dev, 0x00fc, 0x012b); /* Same */
        cit_write_reg(gspca_dev, 0x0022, 0x012a); /* Same */
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 160: /* 160x120 */
                cit_write_reg(gspca_dev, 0x0024, 0x010b);
                cit_write_reg(gspca_dev, 0x0089, 0x0119);
@@ -2635,7 +2635,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
        struct usb_host_interface *alt;
        int max_packet_size;
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 160:
                max_packet_size = 450;
                break;
@@ -2659,7 +2659,7 @@ static int sd_isoc_nego(struct gspca_dev *gspca_dev)
        int ret, packet_size, min_packet_size;
        struct usb_host_interface *alt;
 
-       switch (gspca_dev->width) {
+       switch (gspca_dev->pixfmt.width) {
        case 160:
                min_packet_size = 200;
                break;
@@ -2780,7 +2780,7 @@ static u8 *cit_find_sof(struct gspca_dev *gspca_dev, u8 *data, int len)
        case CIT_MODEL1:
        case CIT_MODEL3:
        case CIT_IBM_NETCAM_PRO:
-               switch (gspca_dev->width) {
+               switch (gspca_dev->pixfmt.width) {
                case 160: /* 160x120 */
                        byte3 = 0x02;
                        byte4 = 0x0a;
@@ -2864,20 +2864,16 @@ static u8 *cit_find_sof(struct gspca_dev *gspca_dev, u8 *data, int len)
                                if (data[i] == 0xff) {
                                        if (i >= 4)
                                                PDEBUG(D_FRAM,
-                                                      "header found at offset: %d: %02x %02x 00 %02x %02x %02x\n",
+                                                      "header found at offset: %d: %02x %02x 00 %3ph\n",
                                                       i - 1,
                                                       data[i - 4],
                                                       data[i - 3],
-                                                      data[i],
-                                                      data[i + 1],
-                                                      data[i + 2]);
+                                                      &data[i]);
                                        else
                                                PDEBUG(D_FRAM,
-                                                      "header found at offset: %d: 00 %02x %02x %02x\n",
+                                                      "header found at offset: %d: 00 %3ph\n",
                                                       i - 1,
-                                                      data[i],
-                                                      data[i + 1],
-                                                      data[i + 2]);
+                                                      &data[i]);
                                        return data + i + (sd->sof_len - 1);
                                }
                                break;
index cbfc2f921427cd250556210bdbcf37539c6d5e88..7b95d8e88a20240305a8817b72c3459d80a5adc6 100644 (file)
@@ -6700,7 +6700,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
        };
 
        /* create the JPEG header */
-       jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width,
+       jpeg_define(sd->jpeg_hdr, gspca_dev->pixfmt.height,
+                       gspca_dev->pixfmt.width,
                        0x21);          /* JPEG 422 */
 
        mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
index c43c8d32be40c30e99be96d6fedd5a8fdd3832d9..be77482c30704a5b81615199f563770c0147ec0f 100644 (file)
@@ -111,6 +111,13 @@ static const struct dmi_system_id stk_upside_down_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "F3JC")
                }
        },
+       {
+               .ident = "T12Rg-H",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "HCL Infosystems Limited"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "T12Rg-H")
+               }
+       },
        {}
 };
 
index 81695d48c13ebc5ceeb5560a0787aae94a516766..c3bb2502225bc6c8932d4aca36de62f1fd1daee2 100644 (file)
@@ -2090,6 +2090,15 @@ static struct usb_device_id uvc_ids[] = {
          .bInterfaceSubClass   = 1,
          .bInterfaceProtocol   = 0,
          .driver_info          = UVC_QUIRK_PROBE_MINMAX },
+       /* Microsoft Lifecam NX-3000 */
+       { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE
+                               | USB_DEVICE_ID_MATCH_INT_INFO,
+         .idVendor             = 0x045e,
+         .idProduct            = 0x0721,
+         .bInterfaceClass      = USB_CLASS_VIDEO,
+         .bInterfaceSubClass   = 1,
+         .bInterfaceProtocol   = 0,
+         .driver_info          = UVC_QUIRK_PROBE_DEF },
        /* Microsoft Lifecam VX-7000 */
        { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE
                                | USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2174,6 +2183,15 @@ static struct usb_device_id uvc_ids[] = {
          .bInterfaceSubClass   = 1,
          .bInterfaceProtocol   = 0,
          .driver_info          = UVC_QUIRK_PROBE_DEF },
+       /* Dell SP2008WFP Monitor */
+       { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE
+                               | USB_DEVICE_ID_MATCH_INT_INFO,
+         .idVendor             = 0x05a9,
+         .idProduct            = 0x2641,
+         .bInterfaceClass      = USB_CLASS_VIDEO,
+         .bInterfaceSubClass   = 1,
+         .bInterfaceProtocol   = 0,
+         .driver_info          = UVC_QUIRK_PROBE_DEF },
        /* Dell Alienware X51 */
        { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE
                                | USB_DEVICE_ID_MATCH_INT_INFO,
index ddc9379eb2769c308198cb17bc9afde4e4067fb3..4133af01774a3226413c73fbaa02f41aa789e1d6 100644 (file)
@@ -43,7 +43,7 @@
 
 #define UNSET (-1U)
 
-#define PREFIX (t->i2c->driver->driver.name)
+#define PREFIX (t->i2c->dev.driver->name)
 
 /*
  * Driver modprobe parameters
@@ -452,7 +452,7 @@ static void set_type(struct i2c_client *c, unsigned int type,
        }
 
        tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n",
-                 c->adapter->name, c->driver->driver.name, c->addr << 1, type,
+                 c->adapter->name, c->dev.driver->name, c->addr << 1, type,
                  t->mode_mask);
        return;
 
@@ -556,7 +556,7 @@ static void tuner_lookup(struct i2c_adapter *adap,
                int mode_mask;
 
                if (pos->i2c->adapter != adap ||
-                   strcmp(pos->i2c->driver->driver.name, "tuner"))
+                   strcmp(pos->i2c->dev.driver->name, "tuner"))
                        continue;
 
                mode_mask = pos->mode_mask;
index 037d7a55aa8c6c0595987d9142630a6ea2c7f5b7..433d6d77942eeeab78fa7282fdb77ccb3648a0e8 100644 (file)
@@ -236,14 +236,14 @@ void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
        v4l2_subdev_init(sd, ops);
        sd->flags |= V4L2_SUBDEV_FL_IS_I2C;
        /* the owner is the same as the i2c_client's driver owner */
-       sd->owner = client->driver->driver.owner;
+       sd->owner = client->dev.driver->owner;
        sd->dev = &client->dev;
        /* i2c_client and v4l2_subdev point to one another */
        v4l2_set_subdevdata(sd, client);
        i2c_set_clientdata(client, sd);
        /* initialize name */
        snprintf(sd->name, sizeof(sd->name), "%s %d-%04x",
-               client->driver->driver.name, i2c_adapter_id(client->adapter),
+               client->dev.driver->name, i2c_adapter_id(client->adapter),
                client->addr);
 }
 EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init);
@@ -274,11 +274,11 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
           loaded. This delay-load mechanism doesn't work if other drivers
           want to use the i2c device, so explicitly loading the module
           is the best alternative. */
-       if (client == NULL || client->driver == NULL)
+       if (client == NULL || client->dev.driver == NULL)
                goto error;
 
        /* Lock the module so we can safely get the v4l2_subdev pointer */
-       if (!try_module_get(client->driver->driver.owner))
+       if (!try_module_get(client->dev.driver->owner))
                goto error;
        sd = i2c_get_clientdata(client);
 
@@ -287,7 +287,7 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
        if (v4l2_device_register_subdev(v4l2_dev, sd))
                sd = NULL;
        /* Decrease the module use count to match the first try_module_get. */
-       module_put(client->driver->driver.owner);
+       module_put(client->dev.driver->owner);
 
 error:
        /* If we have a client but no subdev, then something went wrong and
index 594c75eab5a5d84b42d20ce6e5ba014fe91c0972..812165884f4200cce6265c7ffe76d4fb4c29fe9c 100644 (file)
@@ -353,7 +353,9 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
 
                        if (b->m.planes[plane].bytesused > length)
                                return -EINVAL;
-                       if (b->m.planes[plane].data_offset >=
+
+                       if (b->m.planes[plane].data_offset > 0 &&
+                           b->m.planes[plane].data_offset >=
                            b->m.planes[plane].bytesused)
                                return -EINVAL;
                }
@@ -1013,6 +1015,10 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
 
                /* Check if the provided plane buffer is large enough */
                if (planes[plane].length < q->plane_sizes[plane]) {
+                       dprintk(1, "qbuf: provided buffer size %u is less than "
+                                               "setup size %u for plane %d\n",
+                                               planes[plane].length,
+                                               q->plane_sizes[plane], plane);
                        ret = -EINVAL;
                        goto err;
                }
@@ -1203,8 +1209,11 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
        int ret;
 
        ret = __verify_length(vb, b);
-       if (ret < 0)
+       if (ret < 0) {
+               dprintk(1, "%s(): plane parameters verification failed: %d\n",
+                       __func__, ret);
                return ret;
+       }
 
        switch (q->memory) {
        case V4L2_MEMORY_MMAP:
@@ -2467,10 +2476,11 @@ size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
 }
 EXPORT_SYMBOL_GPL(vb2_read);
 
-size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count,
+size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
                loff_t *ppos, int nonblocking)
 {
-       return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 0);
+       return __vb2_perform_fileio(q, (char __user *) data, count,
+                                                       ppos, nonblocking, 0);
 }
 EXPORT_SYMBOL_GPL(vb2_write);
 
@@ -2631,7 +2641,7 @@ int vb2_fop_release(struct file *file)
 }
 EXPORT_SYMBOL_GPL(vb2_fop_release);
 
-ssize_t vb2_fop_write(struct file *file, char __user *buf,
+ssize_t vb2_fop_write(struct file *file, const char __user *buf,
                size_t count, loff_t *ppos)
 {
        struct video_device *vdev = video_devdata(file);
index fd56f25632018fde92f1acefc4d284e6a47d6dba..646f08f4f504c05ae37dd95cbc1fe8333705e658 100644 (file)
@@ -423,6 +423,39 @@ static inline int vma_is_io(struct vm_area_struct *vma)
        return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
 }
 
+static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
+       struct vm_area_struct *vma, unsigned long *res)
+{
+       unsigned long pfn, start_pfn, prev_pfn;
+       unsigned int i;
+       int ret;
+
+       if (!vma_is_io(vma))
+               return -EFAULT;
+
+       ret = follow_pfn(vma, start, &pfn);
+       if (ret)
+               return ret;
+
+       start_pfn = pfn;
+       start += PAGE_SIZE;
+
+       for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
+               prev_pfn = pfn;
+               ret = follow_pfn(vma, start, &pfn);
+
+               if (ret) {
+                       pr_err("no page for address %lu\n", start);
+                       return ret;
+               }
+               if (pfn != prev_pfn + 1)
+                       return -EINVAL;
+       }
+
+       *res = start_pfn;
+       return 0;
+}
+
 static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
        int n_pages, struct vm_area_struct *vma, int write)
 {
@@ -433,6 +466,9 @@ static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
                        unsigned long pfn;
                        int ret = follow_pfn(vma, start, &pfn);
 
+                       if (!pfn_valid(pfn))
+                               return -EINVAL;
+
                        if (ret) {
                                pr_err("no page for address %lu\n", start);
                                return ret;
@@ -468,16 +504,49 @@ static void vb2_dc_put_userptr(void *buf_priv)
        struct vb2_dc_buf *buf = buf_priv;
        struct sg_table *sgt = buf->dma_sgt;
 
-       dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
-       if (!vma_is_io(buf->vma))
-               vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
+       if (sgt) {
+               dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+               if (!vma_is_io(buf->vma))
+                       vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
 
-       sg_free_table(sgt);
-       kfree(sgt);
+               sg_free_table(sgt);
+               kfree(sgt);
+       }
        vb2_put_vma(buf->vma);
        kfree(buf);
 }
 
+/*
+ * For some kind of reserved memory there might be no struct page available,
+ * so all that can be done to support such 'pages' is to try to convert
+ * pfn to dma address or at the last resort just assume that
+ * dma address == physical address (like it has been assumed in earlier version
+ * of videobuf2-dma-contig
+ */
+
+#ifdef __arch_pfn_to_dma
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+       return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
+}
+#elif defined(__pfn_to_bus)
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+       return (dma_addr_t)__pfn_to_bus(pfn);
+}
+#elif defined(__pfn_to_phys)
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+       return (dma_addr_t)__pfn_to_phys(pfn);
+}
+#else
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
+{
+       /* really, we cannot do anything better at this point */
+       return (dma_addr_t)(pfn) << PAGE_SHIFT;
+}
+#endif
+
 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
        unsigned long size, int write)
 {
@@ -548,6 +617,14 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
        /* extract page list from userspace mapping */
        ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
        if (ret) {
+               unsigned long pfn;
+               if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
+                       buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
+                       buf->size = size;
+                       kfree(pages);
+                       return buf;
+               }
+
                pr_err("failed to get user pages\n");
                goto fail_vma;
        }
index 16ae3dcc7e294cdb4722d0991b31b9e6ac7cacf0..2f860543912cd1c3f5dd4f286705b2c8b208dc37 100644 (file)
@@ -35,17 +35,61 @@ struct vb2_dma_sg_buf {
        struct page                     **pages;
        int                             write;
        int                             offset;
-       struct vb2_dma_sg_desc          sg_desc;
+       struct sg_table                 sg_table;
+       size_t                          size;
+       unsigned int                    num_pages;
        atomic_t                        refcount;
        struct vb2_vmarea_handler       handler;
 };
 
 static void vb2_dma_sg_put(void *buf_priv);
 
+static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
+               gfp_t gfp_flags)
+{
+       unsigned int last_page = 0;
+       int size = buf->size;
+
+       while (size > 0) {
+               struct page *pages;
+               int order;
+               int i;
+
+               order = get_order(size);
+               /* Dont over allocate*/
+               if ((PAGE_SIZE << order) > size)
+                       order--;
+
+               pages = NULL;
+               while (!pages) {
+                       pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
+                                       __GFP_NOWARN | gfp_flags, order);
+                       if (pages)
+                               break;
+
+                       if (order == 0) {
+                               while (last_page--)
+                                       __free_page(buf->pages[last_page]);
+                               return -ENOMEM;
+                       }
+                       order--;
+               }
+
+               split_page(pages, order);
+               for (i = 0; i < (1 << order); i++)
+                       buf->pages[last_page++] = &pages[i];
+
+               size -= PAGE_SIZE << order;
+       }
+
+       return 0;
+}
+
 static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
 {
        struct vb2_dma_sg_buf *buf;
-       int i;
+       int ret;
+       int num_pages;
 
        buf = kzalloc(sizeof *buf, GFP_KERNEL);
        if (!buf)
@@ -54,29 +98,23 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla
        buf->vaddr = NULL;
        buf->write = 0;
        buf->offset = 0;
-       buf->sg_desc.size = size;
+       buf->size = size;
        /* size is already page aligned */
-       buf->sg_desc.num_pages = size >> PAGE_SHIFT;
+       buf->num_pages = size >> PAGE_SHIFT;
 
-       buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages *
-                                     sizeof(*buf->sg_desc.sglist));
-       if (!buf->sg_desc.sglist)
-               goto fail_sglist_alloc;
-       sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
-
-       buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
+       buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
                             GFP_KERNEL);
        if (!buf->pages)
                goto fail_pages_array_alloc;
 
-       for (i = 0; i < buf->sg_desc.num_pages; ++i) {
-               buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO |
-                                          __GFP_NOWARN | gfp_flags);
-               if (NULL == buf->pages[i])
-                       goto fail_pages_alloc;
-               sg_set_page(&buf->sg_desc.sglist[i],
-                           buf->pages[i], PAGE_SIZE, 0);
-       }
+       ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
+       if (ret)
+               goto fail_pages_alloc;
+
+       ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
+                       buf->num_pages, 0, size, gfp_flags);
+       if (ret)
+               goto fail_table_alloc;
 
        buf->handler.refcount = &buf->refcount;
        buf->handler.put = vb2_dma_sg_put;
@@ -85,18 +123,16 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla
        atomic_inc(&buf->refcount);
 
        dprintk(1, "%s: Allocated buffer of %d pages\n",
-               __func__, buf->sg_desc.num_pages);
+               __func__, buf->num_pages);
        return buf;
 
+fail_table_alloc:
+       num_pages = buf->num_pages;
+       while (num_pages--)
+               __free_page(buf->pages[num_pages]);
 fail_pages_alloc:
-       while (--i >= 0)
-               __free_page(buf->pages[i]);
        kfree(buf->pages);
-
 fail_pages_array_alloc:
-       vfree(buf->sg_desc.sglist);
-
-fail_sglist_alloc:
        kfree(buf);
        return NULL;
 }
@@ -104,14 +140,14 @@ fail_sglist_alloc:
 static void vb2_dma_sg_put(void *buf_priv)
 {
        struct vb2_dma_sg_buf *buf = buf_priv;
-       int i = buf->sg_desc.num_pages;
+       int i = buf->num_pages;
 
        if (atomic_dec_and_test(&buf->refcount)) {
                dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
-                       buf->sg_desc.num_pages);
+                       buf->num_pages);
                if (buf->vaddr)
-                       vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
-               vfree(buf->sg_desc.sglist);
+                       vm_unmap_ram(buf->vaddr, buf->num_pages);
+               sg_free_table(&buf->sg_table);
                while (--i >= 0)
                        __free_page(buf->pages[i]);
                kfree(buf->pages);
@@ -124,7 +160,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
 {
        struct vb2_dma_sg_buf *buf;
        unsigned long first, last;
-       int num_pages_from_user, i;
+       int num_pages_from_user;
 
        buf = kzalloc(sizeof *buf, GFP_KERNEL);
        if (!buf)
@@ -133,56 +169,41 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
        buf->vaddr = NULL;
        buf->write = write;
        buf->offset = vaddr & ~PAGE_MASK;
-       buf->sg_desc.size = size;
+       buf->size = size;
 
        first = (vaddr           & PAGE_MASK) >> PAGE_SHIFT;
        last  = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
-       buf->sg_desc.num_pages = last - first + 1;
-
-       buf->sg_desc.sglist = vzalloc(
-               buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
-       if (!buf->sg_desc.sglist)
-               goto userptr_fail_sglist_alloc;
-
-       sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
+       buf->num_pages = last - first + 1;
 
-       buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
+       buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
                             GFP_KERNEL);
        if (!buf->pages)
-               goto userptr_fail_pages_array_alloc;
+               return NULL;
 
        num_pages_from_user = get_user_pages(current, current->mm,
                                             vaddr & PAGE_MASK,
-                                            buf->sg_desc.num_pages,
+                                            buf->num_pages,
                                             write,
                                             1, /* force */
                                             buf->pages,
                                             NULL);
 
-       if (num_pages_from_user != buf->sg_desc.num_pages)
+       if (num_pages_from_user != buf->num_pages)
                goto userptr_fail_get_user_pages;
 
-       sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0],
-                   PAGE_SIZE - buf->offset, buf->offset);
-       size -= PAGE_SIZE - buf->offset;
-       for (i = 1; i < buf->sg_desc.num_pages; ++i) {
-               sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i],
-                           min_t(size_t, PAGE_SIZE, size), 0);
-               size -= min_t(size_t, PAGE_SIZE, size);
-       }
+       if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
+                       buf->num_pages, buf->offset, size, 0))
+               goto userptr_fail_alloc_table_from_pages;
+
        return buf;
 
+userptr_fail_alloc_table_from_pages:
 userptr_fail_get_user_pages:
        dprintk(1, "get_user_pages requested/got: %d/%d]\n",
-              num_pages_from_user, buf->sg_desc.num_pages);
+              num_pages_from_user, buf->num_pages);
        while (--num_pages_from_user >= 0)
                put_page(buf->pages[num_pages_from_user]);
        kfree(buf->pages);
-
-userptr_fail_pages_array_alloc:
-       vfree(buf->sg_desc.sglist);
-
-userptr_fail_sglist_alloc:
        kfree(buf);
        return NULL;
 }
@@ -194,18 +215,18 @@ userptr_fail_sglist_alloc:
 static void vb2_dma_sg_put_userptr(void *buf_priv)
 {
        struct vb2_dma_sg_buf *buf = buf_priv;
-       int i = buf->sg_desc.num_pages;
+       int i = buf->num_pages;
 
        dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
-              __func__, buf->sg_desc.num_pages);
+              __func__, buf->num_pages);
        if (buf->vaddr)
-               vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
+               vm_unmap_ram(buf->vaddr, buf->num_pages);
+       sg_free_table(&buf->sg_table);
        while (--i >= 0) {
                if (buf->write)
                        set_page_dirty_lock(buf->pages[i]);
                put_page(buf->pages[i]);
        }
-       vfree(buf->sg_desc.sglist);
        kfree(buf->pages);
        kfree(buf);
 }
@@ -218,7 +239,7 @@ static void *vb2_dma_sg_vaddr(void *buf_priv)
 
        if (!buf->vaddr)
                buf->vaddr = vm_map_ram(buf->pages,
-                                       buf->sg_desc.num_pages,
+                                       buf->num_pages,
                                        -1,
                                        PAGE_KERNEL);
 
@@ -274,7 +295,7 @@ static void *vb2_dma_sg_cookie(void *buf_priv)
 {
        struct vb2_dma_sg_buf *buf = buf_priv;
 
-       return &buf->sg_desc;
+       return &buf->sg_table;
 }
 
 const struct vb2_mem_ops vb2_dma_sg_memops = {
index 2a9b100c48259deed696305ada91a65faa8b4e46..dbbf8ee3f592369fdd5aa968148e7aa307246580 100644 (file)
@@ -158,8 +158,6 @@ int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val)
 {
        int ret;
 
-       BUG_ON(!mutex_is_locked(&mc13xxx->lock));
-
        if (offset > MC13XXX_NUMREGS)
                return -EINVAL;
 
@@ -172,8 +170,6 @@ EXPORT_SYMBOL(mc13xxx_reg_read);
 
 int mc13xxx_reg_write(struct mc13xxx *mc13xxx, unsigned int offset, u32 val)
 {
-       BUG_ON(!mutex_is_locked(&mc13xxx->lock));
-
        dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x\n", offset, val);
 
        if (offset > MC13XXX_NUMREGS || val > 0xffffff)
@@ -186,7 +182,6 @@ EXPORT_SYMBOL(mc13xxx_reg_write);
 int mc13xxx_reg_rmw(struct mc13xxx *mc13xxx, unsigned int offset,
                u32 mask, u32 val)
 {
-       BUG_ON(!mutex_is_locked(&mc13xxx->lock));
        BUG_ON(val & ~mask);
        dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x (mask: 0x%06x)\n",
                        offset, val, mask);
index 77189daadf1e774c44714beed041d89d08870e4b..5f14ef6693c22abd073b62ea6114dab2b09d76b8 100644 (file)
@@ -94,10 +94,15 @@ static int mc13xxx_spi_write(void *context, const void *data, size_t count)
 {
        struct device *dev = context;
        struct spi_device *spi = to_spi_device(dev);
+       const char *reg = data;
 
        if (count != 4)
                return -ENOTSUPP;
 
+       /* include errata fix for spi audio problems */
+       if (*reg == MC13783_AUDIO_CODEC || *reg == MC13783_AUDIO_DAC)
+               spi_write(spi, data, count);
+
        return spi_write(spi, data, count);
 }
 
index 5d4fd69d04ca05f1b189e458fdbacd4e54e3a0b7..4ef01ab67853289ba7c1db81e5d7ec5464333401 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/jiffies.h>
 #include <linux/of.h>
 #include <linux/i2c.h>
-#include <linux/i2c/at24.h>
+#include <linux/platform_data/at24.h>
 
 /*
  * I2C EEPROMs from most vendors are inexpensive and mostly interchangeable.
index fa9632eb63f14cc9af971f290989f13263c3cbfd..357bbc54fe4b6f423aa2dcc86ca3a23624748a6b 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/freezer.h>
 #include <linux/kthread.h>
 #include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
@@ -196,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
        struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
 
        if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
-               limit = *mmc_dev(host)->dma_mask;
+               limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
 
        mq->card = card;
        mq->queue = blk_init_queue(mmc_request_fn, lock);
index c3785edc0e92c851d3c36a0481a1b2936f92fc63..d135c76c4855b825175370e215979bd528e4b2d4 100644 (file)
@@ -62,6 +62,7 @@ static unsigned int fmax = 515633;
  * @signal_direction: input/out direction of bus signals can be indicated
  * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
  * @busy_detect: true if busy detection on dat0 is supported
+ * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
  */
 struct variant_data {
        unsigned int            clkreg;
@@ -76,6 +77,7 @@ struct variant_data {
        bool                    signal_direction;
        bool                    pwrreg_clkgate;
        bool                    busy_detect;
+       bool                    pwrreg_nopower;
 };
 
 static struct variant_data variant_arm = {
@@ -109,6 +111,7 @@ static struct variant_data variant_u300 = {
        .pwrreg_powerup         = MCI_PWR_ON,
        .signal_direction       = true,
        .pwrreg_clkgate         = true,
+       .pwrreg_nopower         = true,
 };
 
 static struct variant_data variant_nomadik = {
@@ -121,6 +124,7 @@ static struct variant_data variant_nomadik = {
        .pwrreg_powerup         = MCI_PWR_ON,
        .signal_direction       = true,
        .pwrreg_clkgate         = true,
+       .pwrreg_nopower         = true,
 };
 
 static struct variant_data variant_ux500 = {
@@ -135,6 +139,7 @@ static struct variant_data variant_ux500 = {
        .signal_direction       = true,
        .pwrreg_clkgate         = true,
        .busy_detect            = true,
+       .pwrreg_nopower         = true,
 };
 
 static struct variant_data variant_ux500v2 = {
@@ -150,6 +155,7 @@ static struct variant_data variant_ux500v2 = {
        .signal_direction       = true,
        .pwrreg_clkgate         = true,
        .busy_detect            = true,
+       .pwrreg_nopower         = true,
 };
 
 static int mmci_card_busy(struct mmc_host *mmc)
@@ -189,6 +195,21 @@ static int mmci_validate_data(struct mmci_host *host,
        return 0;
 }
 
+static void mmci_reg_delay(struct mmci_host *host)
+{
+       /*
+        * According to the spec, at least three feedback clock cycles
+        * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
+        * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
+        * Worst delay time during card init is at 100 kHz => 30 us.
+        * Worst delay time when up and running is at 25 MHz => 120 ns.
+        */
+       if (host->cclk < 25000000)
+               udelay(30);
+       else
+               ndelay(120);
+}
+
 /*
  * This must be called with host->lock held
  */
@@ -1264,6 +1285,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 
        mmci_set_clkreg(host, ios->clock);
        mmci_write_pwrreg(host, pwr);
+       mmci_reg_delay(host);
 
        spin_unlock_irqrestore(&host->lock, flags);
 
@@ -1510,23 +1532,6 @@ static int mmci_probe(struct amba_device *dev,
                mmc->f_max = min(host->mclk, fmax);
        dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
 
-       host->pinctrl = devm_pinctrl_get(&dev->dev);
-       if (IS_ERR(host->pinctrl)) {
-               ret = PTR_ERR(host->pinctrl);
-               goto clk_disable;
-       }
-
-       host->pins_default = pinctrl_lookup_state(host->pinctrl,
-                       PINCTRL_STATE_DEFAULT);
-
-       /* enable pins to be muxed in and configured */
-       if (!IS_ERR(host->pins_default)) {
-               ret = pinctrl_select_state(host->pinctrl, host->pins_default);
-               if (ret)
-                       dev_warn(&dev->dev, "could not set default pins\n");
-       } else
-               dev_warn(&dev->dev, "could not get default pinstate\n");
-
        /* Get regulators and the supported OCR mask */
        mmc_regulator_get_supply(mmc);
        if (!mmc->ocr_avail)
@@ -1760,6 +1765,41 @@ static int mmci_resume(struct device *dev)
 #endif
 
 #ifdef CONFIG_PM_RUNTIME
+static void mmci_save(struct mmci_host *host)
+{
+       unsigned long flags;
+
+       if (host->variant->pwrreg_nopower) {
+               spin_lock_irqsave(&host->lock, flags);
+
+               writel(0, host->base + MMCIMASK0);
+               writel(0, host->base + MMCIDATACTRL);
+               writel(0, host->base + MMCIPOWER);
+               writel(0, host->base + MMCICLOCK);
+               mmci_reg_delay(host);
+
+               spin_unlock_irqrestore(&host->lock, flags);
+       }
+
+}
+
+static void mmci_restore(struct mmci_host *host)
+{
+       unsigned long flags;
+
+       if (host->variant->pwrreg_nopower) {
+               spin_lock_irqsave(&host->lock, flags);
+
+               writel(host->clk_reg, host->base + MMCICLOCK);
+               writel(host->datactrl_reg, host->base + MMCIDATACTRL);
+               writel(host->pwr_reg, host->base + MMCIPOWER);
+               writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+               mmci_reg_delay(host);
+
+               spin_unlock_irqrestore(&host->lock, flags);
+       }
+}
+
 static int mmci_runtime_suspend(struct device *dev)
 {
        struct amba_device *adev = to_amba_device(dev);
@@ -1767,6 +1807,8 @@ static int mmci_runtime_suspend(struct device *dev)
 
        if (mmc) {
                struct mmci_host *host = mmc_priv(mmc);
+               pinctrl_pm_select_sleep_state(dev);
+               mmci_save(host);
                clk_disable_unprepare(host->clk);
        }
 
@@ -1781,6 +1823,8 @@ static int mmci_runtime_resume(struct device *dev)
        if (mmc) {
                struct mmci_host *host = mmc_priv(mmc);
                clk_prepare_enable(host->clk);
+               mmci_restore(host);
+               pinctrl_pm_select_default_state(dev);
        }
 
        return 0;
index 69080fab637520af2a9ea8b4a6ba49ae52b0344f..168bc72f7a94a9b662d7c0a97775c781d4d769aa 100644 (file)
@@ -200,10 +200,6 @@ struct mmci_host {
        struct sg_mapping_iter  sg_miter;
        unsigned int            size;
 
-       /* pinctrl handles */
-       struct pinctrl          *pinctrl;
-       struct pinctrl_state    *pins_default;
-
 #ifdef CONFIG_DMA_ENGINE
        /* DMA stuff */
        struct dma_chan         *dma_current;
index cdd4ce0d7c90c91526cb447996c2607603bb0195..ef19874fcd1f4dcd8ab6d9821091f542021ccc8f 100644 (file)
@@ -310,8 +310,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
                        dma_mask = DMA_BIT_MASK(32);
                }
 
-               dev->dma_mask = &dev->coherent_dma_mask;
-               dev->coherent_dma_mask = dma_mask;
+               err = dma_coerce_mask_and_coherent(dev, dma_mask);
+               if (err)
+                       goto err_free;
        }
 
        if (c->slot) {
index 9279a9174f84ad9cd3a2ae2c7117c29cc8134e9e..7a6384b0962a9de2c92dffb81f1fc45d6d89a490 100644 (file)
 
 /* Magics */
 #define BOARD_DATA_MAGIC               0x5246504D      /* MPFR */
+#define FACTORY_MAGIC                  0x59544346      /* FCTY */
 #define POT_MAGIC1                     0x54544f50      /* POTT */
 #define POT_MAGIC2                     0x504f          /* OP */
 #define ML_MAGIC1                      0x39685a42
 #define ML_MAGIC2                      0x26594131
 #define TRX_MAGIC                      0x30524448
+#define SQSH_MAGIC                     0x71736873      /* shsq */
 
 struct trx_header {
        uint32_t magic;
@@ -71,7 +73,14 @@ static int bcm47xxpart_parse(struct mtd_info *master,
        /* Alloc */
        parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
                        GFP_KERNEL);
+       if (!parts)
+               return -ENOMEM;
+
        buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL);
+       if (!buf) {
+               kfree(parts);
+               return -ENOMEM;
+       }
 
        /* Parse block by block looking for magics */
        for (offset = 0; offset <= master->size - blocksize;
@@ -110,6 +119,13 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                        continue;
                }
 
+               /* Found on Huawei E970 */
+               if (buf[0x000 / 4] == FACTORY_MAGIC) {
+                       bcm47xxpart_add_part(&parts[curr_part++], "factory",
+                                            offset, MTD_WRITEABLE);
+                       continue;
+               }
+
                /* POT(TOP) */
                if (buf[0x000 / 4] == POT_MAGIC1 &&
                    (buf[0x004 / 4] & 0xFFFF) == POT_MAGIC2) {
@@ -167,6 +183,13 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                        offset = rounddown(offset + trx->length, blocksize);
                        continue;
                }
+
+               /* Squashfs on devices not using TRX */
+               if (buf[0x000 / 4] == SQSH_MAGIC) {
+                       bcm47xxpart_add_part(&parts[curr_part++], "rootfs",
+                                            offset, 0);
+                       continue;
+               }
        }
 
        /* Look for NVRAM at the end of the last block. */
index 5cb4c04726b2e0eb58dd6452b293602f82fa1945..d9fd87a4c8dc08a6b191e4454cf529898c1688c6 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/mutex.h>
 #include <linux/mount.h>
 #include <linux/slab.h>
+#include <linux/major.h>
 
 /* Info for the block device */
 struct block2mtd_dev {
index 3e1b0a0ef4dba22e559b74e603ebe1bf90f389cb..4f091c1a9981c060e3ea2cae43960ade8b4bc2f7 100644 (file)
@@ -2097,7 +2097,7 @@ notfound:
        ret = -ENODEV;
        dev_info(dev, "No supported DiskOnChip found\n");
 err_probe:
-       kfree(cascade->bch);
+       free_bch(cascade->bch);
        for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
                if (cascade->floors[floor])
                        doc_release_device(cascade->floors[floor]);
index 26b14f9fcac6d129c3a2bc5c7a9fa7baea234691..8d6c87be95982f22ec48c79fe581dc0a387116e6 100644 (file)
@@ -168,12 +168,25 @@ static inline int write_disable(struct m25p *flash)
  */
 static inline int set_4byte(struct m25p *flash, u32 jedec_id, int enable)
 {
+       int status;
+       bool need_wren = false;
+
        switch (JEDEC_MFR(jedec_id)) {
-       case CFI_MFR_MACRONIX:
        case CFI_MFR_ST: /* Micron, actually */
+               /* Some Micron need WREN command; all will accept it */
+               need_wren = true;
+       case CFI_MFR_MACRONIX:
        case 0xEF /* winbond */:
+               if (need_wren)
+                       write_enable(flash);
+
                flash->command[0] = enable ? OPCODE_EN4B : OPCODE_EX4B;
-               return spi_write(flash->spi, flash->command, 1);
+               status = spi_write(flash->spi, flash->command, 1);
+
+               if (need_wren)
+                       write_disable(flash);
+
+               return status;
        default:
                /* Spansion style */
                flash->command[0] = OPCODE_BRWR;
@@ -743,6 +756,9 @@ static const struct spi_device_id m25p_ids[] = {
        { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
        { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
 
+       /* ESMT */
+       { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
+
        /* Everspin */
        { "mr25h256", CAT25_INFO(  32 * 1024, 1, 256, 2, M25P_NO_ERASE | M25P_NO_FR) },
        { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, M25P_NO_ERASE | M25P_NO_FR) },
@@ -774,6 +790,7 @@ static const struct spi_device_id m25p_ids[] = {
        { "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024, 256, 0) },
        { "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024, 256, 0) },
        { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) },
+       { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K) },
 
        /* PMC */
        { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
index 0e8cbfeba11e42a85da4a843cd7f212263eb9448..1cfbfcfb6e191f7a41db7b0e696b7d082bf20788 100644 (file)
@@ -88,8 +88,6 @@ struct dataflash {
        uint8_t                 command[4];
        char                    name[24];
 
-       unsigned                partitioned:1;
-
        unsigned short          page_offset;    /* offset in flash address */
        unsigned int            page_size;      /* of bytes per page */
 
index 67823de68db69e7f2ae80d378ed7b9289eb811e0..e1f2aebaa48955035f6eef98f31e7640202461f1 100644 (file)
@@ -94,7 +94,7 @@ static void unregister_devices(void)
        }
 }
 
-static int register_device(char *name, unsigned long start, unsigned long len)
+static int register_device(char *name, phys_addr_t start, size_t len)
 {
        struct phram_mtd_list *new;
        int ret = -ENOMEM;
@@ -141,35 +141,35 @@ out0:
        return ret;
 }
 
-static int ustrtoul(const char *cp, char **endp, unsigned int base)
+static int parse_num64(uint64_t *num64, char *token)
 {
-       unsigned long result = simple_strtoul(cp, endp, base);
-
-       switch (**endp) {
-       case 'G':
-               result *= 1024;
-       case 'M':
-               result *= 1024;
-       case 'k':
-               result *= 1024;
+       size_t len;
+       int shift = 0;
+       int ret;
+
+       len = strlen(token);
        /* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
-               if ((*endp)[1] == 'i')
-                       (*endp) += 2;
+       if (len > 2) {
+               if (token[len - 1] == 'i') {
+                       switch (token[len - 2]) {
+                       case 'G':
+                               shift += 10;
+                       case 'M':
+                               shift += 10;
+                       case 'k':
+                               shift += 10;
+                               token[len - 2] = 0;
+                               break;
+                       default:
+                               return -EINVAL;
+                       }
+               }
        }
-       return result;
-}
 
-static int parse_num32(uint32_t *num32, const char *token)
-{
-       char *endp;
-       unsigned long n;
+       ret = kstrtou64(token, 0, num64);
+       *num64 <<= shift;
 
-       n = ustrtoul(token, &endp, 0);
-       if (*endp)
-               return -EINVAL;
-
-       *num32 = n;
-       return 0;
+       return ret;
 }
 
 static int parse_name(char **pname, const char *token)
@@ -209,19 +209,19 @@ static inline void kill_final_newline(char *str)
  * This shall contain the module parameter if any. It is of the form:
  * - phram=<device>,<address>,<size> for module case
  * - phram.phram=<device>,<address>,<size> for built-in case
- * We leave 64 bytes for the device name, 12 for the address and 12 for the
+ * We leave 64 bytes for the device name, 20 for the address and 20 for the
  * size.
  * Example: phram.phram=rootfs,0xa0000000,512Mi
  */
-static __initdata char phram_paramline[64+12+12];
+static __initdata char phram_paramline[64 + 20 + 20];
 
 static int __init phram_setup(const char *val)
 {
-       char buf[64+12+12], *str = buf;
+       char buf[64 + 20 + 20], *str = buf;
        char *token[3];
        char *name;
-       uint32_t start;
-       uint32_t len;
+       uint64_t start;
+       uint64_t len;
        int i, ret;
 
        if (strnlen(val, sizeof(buf)) >= sizeof(buf))
@@ -243,13 +243,13 @@ static int __init phram_setup(const char *val)
        if (ret)
                return ret;
 
-       ret = parse_num32(&start, token[1]);
+       ret = parse_num64(&start, token[1]);
        if (ret) {
                kfree(name);
                parse_err("illegal start address\n");
        }
 
-       ret = parse_num32(&len, token[2]);
+       ret = parse_num64(&len, token[2]);
        if (ret) {
                kfree(name);
                parse_err("illegal device length\n");
@@ -257,7 +257,7 @@ static int __init phram_setup(const char *val)
 
        ret = register_device(name, start, len);
        if (!ret)
-               pr_info("%s device: %#x at %#x\n", name, len, start);
+               pr_info("%s device: %#llx at %#llx\n", name, len, start);
        else
                kfree(name);
 
index a42f1f0e7281417398f4de453845ff8edf79b60c..687bf27ec85076c2d735418714c12c537c2dc498 100644 (file)
@@ -364,7 +364,7 @@ static int sst25l_probe(struct spi_device *spi)
        if (!flash_info)
                return -ENODEV;
 
-       flash = kzalloc(sizeof(struct sst25l_flash), GFP_KERNEL);
+       flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL);
        if (!flash)
                return -ENOMEM;
 
@@ -402,11 +402,8 @@ static int sst25l_probe(struct spi_device *spi)
        ret = mtd_device_parse_register(&flash->mtd, NULL, NULL,
                                        data ? data->parts : NULL,
                                        data ? data->nr_parts : 0);
-       if (ret) {
-               kfree(flash);
-               spi_set_drvdata(spi, NULL);
+       if (ret)
                return -ENODEV;
-       }
 
        return 0;
 }
@@ -414,12 +411,8 @@ static int sst25l_probe(struct spi_device *spi)
 static int sst25l_remove(struct spi_device *spi)
 {
        struct sst25l_flash *flash = spi_get_drvdata(spi);
-       int ret;
 
-       ret = mtd_device_unregister(&flash->mtd);
-       if (ret == 0)
-               kfree(flash);
-       return ret;
+       return mtd_device_unregister(&flash->mtd);
 }
 
 static struct spi_driver sst25l_driver = {
index 3af351484098b6b47c5b0973dda092abdc1bfe0d..b66b541877f0aea5c84e947f72bb45b557fc403a 100644 (file)
@@ -50,7 +50,7 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
        struct INFTLrecord *inftl;
        unsigned long temp;
 
-       if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX)
+       if (!mtd_type_is_nand(mtd) || mtd->size > UINT_MAX)
                return;
        /* OK, this is moderately ugly.  But probably safe.  Alternatives? */
        if (memcmp(mtd->name, "DiskOnChip", 10))
index d3cfe26beeaa6bcc771a085f1e3f41d641af4c19..2ef19aa0086bee62d51a9986ea50f5983461869c 100644 (file)
@@ -703,7 +703,7 @@ static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
 
 #define DO_XXLOCK_LOCK         1
 #define DO_XXLOCK_UNLOCK       2
-int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
+static int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
 {
        int ret = 0;
        struct map_info *map = mtd->priv;
index f581ac1cf022fb006be3b814f6f1b23341f9e851..46d195fca94267631887f76d7d98f7d67005f3b7 100644 (file)
@@ -180,7 +180,6 @@ static void vr_nor_pci_remove(struct pci_dev *dev)
 {
        struct vr_nor_mtd *p = pci_get_drvdata(dev);
 
-       pci_set_drvdata(dev, NULL);
        vr_nor_destroy_partitions(p);
        vr_nor_destroy_mtd_setup(p);
        vr_nor_destroy_maps(p);
index c2604f8b2a5efccd1cde4f77db2be09eb9ce1624..36da518915b52b8220e5e6d23ed57acc6738ce0e 100644 (file)
@@ -316,7 +316,6 @@ static void mtd_pci_remove(struct pci_dev *dev)
        map->exit(dev, map);
        kfree(map);
 
-       pci_set_drvdata(dev, NULL);
        pci_release_regions(dev);
 }
 
index 676271659b37442a85d4d545fca05f80c48bdb54..10196f5a897d6b31674232652869cb5617e820ec 100644 (file)
@@ -55,7 +55,7 @@ struct platram_info {
 
 static inline struct platram_info *to_platram_info(struct platform_device *dev)
 {
-       return (struct platram_info *)platform_get_drvdata(dev);
+       return platform_get_drvdata(dev);
 }
 
 /* platram_setrw
@@ -257,21 +257,7 @@ static struct platform_driver platram_driver = {
        },
 };
 
-/* module init/exit */
-
-static int __init platram_init(void)
-{
-       printk("Generic platform RAM MTD, (c) 2004 Simtec Electronics\n");
-       return platform_driver_register(&platram_driver);
-}
-
-static void __exit platram_exit(void)
-{
-       platform_driver_unregister(&platram_driver);
-}
-
-module_init(platram_init);
-module_exit(platram_exit);
+module_platform_driver(platram_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
index c77b68c9412f8631955f6b52df633a6ab3fbce07..3051c4c362404bab5119dcf00d8c1e7321e4dbf3 100644 (file)
@@ -212,7 +212,6 @@ static void scb2_flash_remove(struct pci_dev *dev)
 
        if (!region_fail)
                release_mem_region(SCB2_ADDR, SCB2_WINDOW);
-       pci_set_drvdata(dev, NULL);
 }
 
 static struct pci_device_id scb2_flash_pci_ids[] = {
index 2aef5dda522be57cbd6652963d4078966f7bc64e..485ea751c7f9b6ab18530e2d7ffcec3f328146c0 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/blktrans.h>
 #include <linux/mutex.h>
+#include <linux/major.h>
 
 
 struct mtdblk_dev {
@@ -373,7 +374,7 @@ static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
 
 static struct mtd_blktrans_ops mtdblock_tr = {
        .name           = "mtdblock",
-       .major          = 31,
+       .major          = MTD_BLOCK_MAJOR,
        .part_bits      = 0,
        .blksize        = 512,
        .open           = mtdblock_open,
index 92759a9d2985e8a478b6a05734db195453d39434..fb5dc89369de34f1ec95126e64b1ed4a7c275c18 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/blktrans.h>
 #include <linux/module.h>
+#include <linux/major.h>
 
 static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
                              unsigned long block, char *buf)
@@ -70,7 +71,7 @@ static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
 
 static struct mtd_blktrans_ops mtdblock_tr = {
        .name           = "mtdblock",
-       .major          = 31,
+       .major          = MTD_BLOCK_MAJOR,
        .part_bits      = 0,
        .blksize        = 512,
        .readsect       = mtdblock_readsect,
index 684bfa39e4ee4892f901874c52c699fa71fd572a..9aa0c5e49c1d3aec1641a08fb27b979e5e97f79f 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/mount.h>
 #include <linux/blkpg.h>
 #include <linux/magic.h>
+#include <linux/major.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 #include <linux/mtd/map.h>
index 5e14d540ba2f623abb6438c4b6c204b991ed260b..7189089d87e3dbbca1dee15fe656a0a487b40dee 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/backing-dev.h>
 #include <linux/gfp.h>
 #include <linux/slab.h>
+#include <linux/major.h>
 
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
@@ -157,6 +158,9 @@ static ssize_t mtd_type_show(struct device *dev,
        case MTD_UBIVOLUME:
                type = "ubi";
                break;
+       case MTD_MLCNANDFLASH:
+               type = "mlc-nand";
+               break;
        default:
                type = "unknown";
        }
index 334da5f583c021124ea2e64ae7860f513bd9816f..20c02a3b7417cd3025a6c17142f86a3479e2e3a8 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/export.h>
 #include <linux/ctype.h>
 #include <linux/slab.h>
+#include <linux/major.h>
 
 /*
  * compare superblocks to see if they're equivalent
index 060feeaf6b3e5554328904a1ac870a97cbc5685b..ef9c9f547c01fffe391235e3a6d9dffe95944321 100644 (file)
@@ -1062,56 +1062,28 @@ static void atmel_pmecc_core_init(struct mtd_info *mtd)
 }
 
 /*
- * Get ECC requirement in ONFI parameters, returns -1 if ONFI
- * parameters is not supported.
- * return 0 if success to get the ECC requirement.
- */
-static int get_onfi_ecc_param(struct nand_chip *chip,
-               int *ecc_bits, int *sector_size)
-{
-       *ecc_bits = *sector_size = 0;
-
-       if (chip->onfi_params.ecc_bits == 0xff)
-               /* TODO: the sector_size and ecc_bits need to be find in
-                * extended ecc parameter, currently we don't support it.
-                */
-               return -1;
-
-       *ecc_bits = chip->onfi_params.ecc_bits;
-
-       /* The default sector size (ecc codeword size) is 512 */
-       *sector_size = 512;
-
-       return 0;
-}
-
-/*
- * Get ecc requirement from ONFI parameters ecc requirement.
+ * Get minimum ecc requirements from NAND.
  * If pmecc-cap, pmecc-sector-size in DTS are not specified, this function
- * will set them according to ONFI ecc requirement. Otherwise, use the
+ * will set them according to minimum ecc requirement. Otherwise, use the
  * value in DTS file.
  * return 0 if success. otherwise return error code.
  */
 static int pmecc_choose_ecc(struct atmel_nand_host *host,
                int *cap, int *sector_size)
 {
-       /* Get ECC requirement from ONFI parameters */
-       *cap = *sector_size = 0;
-       if (host->nand_chip.onfi_version) {
-               if (!get_onfi_ecc_param(&host->nand_chip, cap, sector_size))
-                       dev_info(host->dev, "ONFI params, minimum required ECC: %d bits in %d bytes\n",
+       /* Get minimum ECC requirements */
+       if (host->nand_chip.ecc_strength_ds) {
+               *cap = host->nand_chip.ecc_strength_ds;
+               *sector_size = host->nand_chip.ecc_step_ds;
+               dev_info(host->dev, "minimum ECC: %d bits in %d bytes\n",
                                *cap, *sector_size);
-               else
-                       dev_info(host->dev, "NAND chip ECC reqirement is in Extended ONFI parameter, we don't support yet.\n");
        } else {
-               dev_info(host->dev, "NAND chip is not ONFI compliant, assume ecc_bits is 2 in 512 bytes");
-       }
-       if (*cap == 0 && *sector_size == 0) {
                *cap = 2;
                *sector_size = 512;
+               dev_info(host->dev, "can't detect min. ECC, assume 2 bits in 512 bytes\n");
        }
 
-       /* If dts file doesn't specify then use the one in ONFI parameters */
+       /* If device tree doesn't specify, use NAND's minimum ECC parameters */
        if (host->pmecc_corr_cap == 0) {
                /* use the most fitable ecc bits (the near bigger one ) */
                if (*cap <= 2)
@@ -1449,7 +1421,6 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
                ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
 }
 
-#if defined(CONFIG_OF)
 static int atmel_of_init_port(struct atmel_nand_host *host,
                              struct device_node *np)
 {
@@ -1457,7 +1428,7 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
        u32 offset[2];
        int ecc_mode;
        struct atmel_nand_data *board = &host->board;
-       enum of_gpio_flags flags;
+       enum of_gpio_flags flags = 0;
 
        if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
                if (val >= 32) {
@@ -1540,13 +1511,6 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
 
        return 0;
 }
-#else
-static int atmel_of_init_port(struct atmel_nand_host *host,
-                             struct device_node *np)
-{
-       return -EINVAL;
-}
-#endif
 
 static int __init atmel_hw_nand_init_params(struct platform_device *pdev,
                                         struct atmel_nand_host *host)
@@ -2019,7 +1983,8 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
        mtd = &host->mtd;
        nand_chip = &host->nand_chip;
        host->dev = &pdev->dev;
-       if (pdev->dev.of_node) {
+       if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
+               /* Only when CONFIG_OF is enabled of_node can be parsed */
                res = atmel_of_init_port(host, pdev->dev.of_node);
                if (res)
                        goto err_nand_ioremap;
@@ -2207,14 +2172,12 @@ static int __exit atmel_nand_remove(struct platform_device *pdev)
        return 0;
 }
 
-#if defined(CONFIG_OF)
 static const struct of_device_id atmel_nand_dt_ids[] = {
        { .compatible = "atmel,at91rm9200-nand" },
        { /* sentinel */ }
 };
 
 MODULE_DEVICE_TABLE(of, atmel_nand_dt_ids);
-#endif
 
 static int atmel_nand_nfc_probe(struct platform_device *pdev)
 {
@@ -2253,12 +2216,11 @@ static int atmel_nand_nfc_probe(struct platform_device *pdev)
        return 0;
 }
 
-#if defined(CONFIG_OF)
-static struct of_device_id atmel_nand_nfc_match[] = {
+static const struct of_device_id atmel_nand_nfc_match[] = {
        { .compatible = "atmel,sama5d3-nfc" },
        { /* sentinel */ }
 };
-#endif
+MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match);
 
 static struct platform_driver atmel_nand_nfc_driver = {
        .driver = {
index 7bae569fdc79880e5025a5db3e93dd34da274598..107445911315cba4f78a509ab72e91677a1cfb18 100644 (file)
@@ -29,11 +29,9 @@ static int bcm47xxnflash_probe(struct platform_device *pdev)
        struct bcm47xxnflash *b47n;
        int err = 0;
 
-       b47n = kzalloc(sizeof(*b47n), GFP_KERNEL);
-       if (!b47n) {
-               err = -ENOMEM;
-               goto out;
-       }
+       b47n = devm_kzalloc(&pdev->dev, sizeof(*b47n), GFP_KERNEL);
+       if (!b47n)
+               return -ENOMEM;
 
        b47n->nand_chip.priv = b47n;
        b47n->mtd.owner = THIS_MODULE;
@@ -48,22 +46,16 @@ static int bcm47xxnflash_probe(struct platform_device *pdev)
        }
        if (err) {
                pr_err("Initialization failed: %d\n", err);
-               goto err_init;
+               return err;
        }
 
        err = mtd_device_parse_register(&b47n->mtd, probes, NULL, NULL, 0);
        if (err) {
                pr_err("Failed to register MTD device: %d\n", err);
-               goto err_dev_reg;
+               return err;
        }
 
        return 0;
-
-err_dev_reg:
-err_init:
-       kfree(b47n);
-out:
-       return err;
 }
 
 static int bcm47xxnflash_remove(struct platform_device *pdev)
@@ -85,22 +77,4 @@ static struct platform_driver bcm47xxnflash_driver = {
        },
 };
 
-static int __init bcm47xxnflash_init(void)
-{
-       int err;
-
-       err = platform_driver_register(&bcm47xxnflash_driver);
-       if (err)
-               pr_err("Failed to register bcm47xx nand flash driver: %d\n",
-                      err);
-
-       return err;
-}
-
-static void __exit bcm47xxnflash_exit(void)
-{
-       platform_driver_unregister(&bcm47xxnflash_driver);
-}
-
-module_init(bcm47xxnflash_init);
-module_exit(bcm47xxnflash_exit);
+module_platform_driver(bcm47xxnflash_driver);
index 2ed2bb33a6e773f6a81835b2787db2ef9b6bf774..370b9dd7a2786841f3180d732321c99416030559 100644 (file)
@@ -1394,7 +1394,7 @@ static struct nand_bbt_descr bbt_mirror_descr = {
 };
 
 /* initialize driver data structures */
-void denali_drv_init(struct denali_nand_info *denali)
+static void denali_drv_init(struct denali_nand_info *denali)
 {
        denali->idx = 0;
 
@@ -1520,7 +1520,7 @@ int denali_init(struct denali_nand_info *denali)
         * so just let controller do 15bit ECC for MLC and 8bit ECC for
         * SLC if possible.
         * */
-       if (denali->nand.cellinfo & NAND_CI_CELLTYPE_MSK &&
+       if (!nand_is_slc(&denali->nand) &&
                        (denali->mtd.oobsize > (denali->bbtskipbytes +
                        ECC_15BITS * (denali->mtd.writesize /
                        ECC_SECTOR_SIZE)))) {
index e3e46623b2b450f8a1d6cdab3b2c39c6b167836e..033f177a6369b2d240b499f33df0b6bd925ed4d2 100644 (file)
@@ -119,7 +119,6 @@ static void denali_pci_remove(struct pci_dev *dev)
        iounmap(denali->flash_mem);
        pci_release_regions(dev);
        pci_disable_device(dev);
-       pci_set_drvdata(dev, NULL);
        kfree(denali);
 }
 
index eaa3c29ad860eb7c84a5e1309ef879d9d2044ed1..b68a4959f700af3e2768af69e6dd9afd0f94c8e1 100644 (file)
@@ -38,7 +38,7 @@
 #define CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 0
 #endif
 
-static unsigned long __initdata doc_locations[] = {
+static unsigned long doc_locations[] __initdata = {
 #if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
 #ifdef CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH
        0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
index 548db2389fab8b63e41f2f61731606c32643084a..bd1cb672034fe770d2db2b986caa1821c1f63f58 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/mtd/nand.h>
 #include <linux/bch.h>
 #include <linux/bitrev.h>
+#include <linux/jiffies.h>
 
 /*
  * In "reliable mode" consecutive 2k pages are used in parallel (in some
@@ -269,7 +270,7 @@ static int poll_status(struct docg4_priv *doc)
         */
 
        uint16_t flash_status;
-       unsigned int timeo;
+       unsigned long timeo;
        void __iomem *docptr = doc->virtadr;
 
        dev_dbg(doc->dev, "%s...\n", __func__);
@@ -277,22 +278,18 @@ static int poll_status(struct docg4_priv *doc)
        /* hardware quirk requires reading twice initially */
        flash_status = readw(docptr + DOC_FLASHCONTROL);
 
-       timeo = 1000;
+       timeo = jiffies + msecs_to_jiffies(200); /* generous timeout */
        do {
                cpu_relax();
                flash_status = readb(docptr + DOC_FLASHCONTROL);
-       } while (!(flash_status & DOC_CTRL_FLASHREADY) && --timeo);
+       } while (!(flash_status & DOC_CTRL_FLASHREADY) &&
+                time_before(jiffies, timeo));
 
-
-       if (!timeo) {
+       if (unlikely(!(flash_status & DOC_CTRL_FLASHREADY))) {
                dev_err(doc->dev, "%s: timed out!\n", __func__);
                return NAND_STATUS_FAIL;
        }
 
-       if (unlikely(timeo < 50))
-               dev_warn(doc->dev, "%s: nearly timed out; %d remaining\n",
-                        __func__, timeo);
-
        return 0;
 }
 
@@ -1239,7 +1236,6 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
        nand->block_markbad = docg4_block_markbad;
        nand->read_buf = docg4_read_buf;
        nand->write_buf = docg4_write_buf16;
-       nand->scan_bbt = nand_default_bbt;
        nand->erase_cmd = docg4_erase_block;
        nand->ecc.read_page = docg4_read_page;
        nand->ecc.write_page = docg4_write_page;
index 20657209a472f388a0ecdd933322db5ad2d945cb..c6ef9f1c7a825f718a901139f4f56bc1c36d9e1d 100644 (file)
@@ -650,8 +650,6 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
                chip->page_shift);
        dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
                chip->phys_erase_shift);
-       dev_dbg(priv->dev, "fsl_elbc_init: nand->ecclayout = %p\n",
-               chip->ecclayout);
        dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
                chip->ecc.mode);
        dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
index 317a771f1587c94f3fd57c45c8e140deeb08b982..c96e1e0943f56d1a852f043d8243fb515ec3a371 100644 (file)
@@ -135,6 +135,69 @@ static struct nand_ecclayout oob_4096_ecc8 = {
        .oobfree = { {2, 6}, {136, 82} },
 };
 
+/* 8192-byte page size with 4-bit ECC */
+static struct nand_ecclayout oob_8192_ecc4 = {
+       .eccbytes = 128,
+       .eccpos = {
+               8, 9, 10, 11, 12, 13, 14, 15,
+               16, 17, 18, 19, 20, 21, 22, 23,
+               24, 25, 26, 27, 28, 29, 30, 31,
+               32, 33, 34, 35, 36, 37, 38, 39,
+               40, 41, 42, 43, 44, 45, 46, 47,
+               48, 49, 50, 51, 52, 53, 54, 55,
+               56, 57, 58, 59, 60, 61, 62, 63,
+               64, 65, 66, 67, 68, 69, 70, 71,
+               72, 73, 74, 75, 76, 77, 78, 79,
+               80, 81, 82, 83, 84, 85, 86, 87,
+               88, 89, 90, 91, 92, 93, 94, 95,
+               96, 97, 98, 99, 100, 101, 102, 103,
+               104, 105, 106, 107, 108, 109, 110, 111,
+               112, 113, 114, 115, 116, 117, 118, 119,
+               120, 121, 122, 123, 124, 125, 126, 127,
+               128, 129, 130, 131, 132, 133, 134, 135,
+       },
+       .oobfree = { {2, 6}, {136, 208} },
+};
+
+/* 8192-byte page size with 8-bit ECC -- requires 218-byte OOB */
+static struct nand_ecclayout oob_8192_ecc8 = {
+       .eccbytes = 256,
+       .eccpos = {
+               8, 9, 10, 11, 12, 13, 14, 15,
+               16, 17, 18, 19, 20, 21, 22, 23,
+               24, 25, 26, 27, 28, 29, 30, 31,
+               32, 33, 34, 35, 36, 37, 38, 39,
+               40, 41, 42, 43, 44, 45, 46, 47,
+               48, 49, 50, 51, 52, 53, 54, 55,
+               56, 57, 58, 59, 60, 61, 62, 63,
+               64, 65, 66, 67, 68, 69, 70, 71,
+               72, 73, 74, 75, 76, 77, 78, 79,
+               80, 81, 82, 83, 84, 85, 86, 87,
+               88, 89, 90, 91, 92, 93, 94, 95,
+               96, 97, 98, 99, 100, 101, 102, 103,
+               104, 105, 106, 107, 108, 109, 110, 111,
+               112, 113, 114, 115, 116, 117, 118, 119,
+               120, 121, 122, 123, 124, 125, 126, 127,
+               128, 129, 130, 131, 132, 133, 134, 135,
+               136, 137, 138, 139, 140, 141, 142, 143,
+               144, 145, 146, 147, 148, 149, 150, 151,
+               152, 153, 154, 155, 156, 157, 158, 159,
+               160, 161, 162, 163, 164, 165, 166, 167,
+               168, 169, 170, 171, 172, 173, 174, 175,
+               176, 177, 178, 179, 180, 181, 182, 183,
+               184, 185, 186, 187, 188, 189, 190, 191,
+               192, 193, 194, 195, 196, 197, 198, 199,
+               200, 201, 202, 203, 204, 205, 206, 207,
+               208, 209, 210, 211, 212, 213, 214, 215,
+               216, 217, 218, 219, 220, 221, 222, 223,
+               224, 225, 226, 227, 228, 229, 230, 231,
+               232, 233, 234, 235, 236, 237, 238, 239,
+               240, 241, 242, 243, 244, 245, 246, 247,
+               248, 249, 250, 251, 252, 253, 254, 255,
+               256, 257, 258, 259, 260, 261, 262, 263,
+       },
+       .oobfree = { {2, 6}, {264, 80} },
+};
 
 /*
  * Generic flash bbt descriptors
@@ -441,20 +504,29 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
                if (mtd->writesize > 512) {
                        nand_fcr0 =
                                (NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
-                               (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD1_SHIFT);
+                               (NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) |
+                               (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT);
 
                        iowrite32be(
-                               (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-                               (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-                               (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
-                               (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
-                               (IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT),
-                               &ifc->ifc_nand.nand_fir0);
+                                (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+                                (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+                                (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+                                (IFC_FIR_OP_WBCD  << IFC_NAND_FIR0_OP3_SHIFT) |
+                                (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
+                                &ifc->ifc_nand.nand_fir0);
+                       iowrite32be(
+                                (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
+                                (IFC_FIR_OP_RDSTAT <<
+                                       IFC_NAND_FIR1_OP6_SHIFT) |
+                                (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
+                                &ifc->ifc_nand.nand_fir1);
                } else {
                        nand_fcr0 = ((NAND_CMD_PAGEPROG <<
                                        IFC_NAND_FCR0_CMD1_SHIFT) |
                                    (NAND_CMD_SEQIN <<
-                                       IFC_NAND_FCR0_CMD2_SHIFT));
+                                       IFC_NAND_FCR0_CMD2_SHIFT) |
+                                   (NAND_CMD_STATUS <<
+                                       IFC_NAND_FCR0_CMD3_SHIFT));
 
                        iowrite32be(
                                (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
@@ -463,8 +535,13 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
                                (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
                                (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
                                &ifc->ifc_nand.nand_fir0);
-                       iowrite32be(IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT,
-                                   &ifc->ifc_nand.nand_fir1);
+                       iowrite32be(
+                                (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
+                                (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
+                                (IFC_FIR_OP_RDSTAT <<
+                                       IFC_NAND_FIR1_OP7_SHIFT) |
+                                (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
+                                 &ifc->ifc_nand.nand_fir1);
 
                        if (column >= mtd->writesize)
                                nand_fcr0 |=
@@ -718,8 +795,6 @@ static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
                                                        chip->page_shift);
        dev_dbg(priv->dev, "%s: nand->phys_erase_shift = %d\n", __func__,
                                                        chip->phys_erase_shift);
-       dev_dbg(priv->dev, "%s: nand->ecclayout = %p\n", __func__,
-                                                       chip->ecclayout);
        dev_dbg(priv->dev, "%s: nand->ecc.mode = %d\n", __func__,
                                                        chip->ecc.mode);
        dev_dbg(priv->dev, "%s: nand->ecc.steps = %d\n", __func__,
@@ -872,11 +947,25 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
                } else {
                        layout = &oob_4096_ecc8;
                        chip->ecc.bytes = 16;
+                       chip->ecc.strength = 8;
                }
 
                priv->bufnum_mask = 1;
                break;
 
+       case CSOR_NAND_PGS_8K:
+               if ((csor & CSOR_NAND_ECC_MODE_MASK) ==
+                   CSOR_NAND_ECC_MODE_4) {
+                       layout = &oob_8192_ecc4;
+               } else {
+                       layout = &oob_8192_ecc8;
+                       chip->ecc.bytes = 16;
+                       chip->ecc.strength = 8;
+               }
+
+               priv->bufnum_mask = 0;
+       break;
+
        default:
                dev_err(priv->dev, "bad csor %#x: bad page size\n", csor);
                return -ENODEV;
@@ -907,7 +996,6 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
                iounmap(priv->vbase);
 
        ifc_nand_ctrl->chips[priv->bank] = NULL;
-       dev_set_drvdata(priv->dev, NULL);
 
        return 0;
 }
@@ -1082,25 +1170,7 @@ static struct platform_driver fsl_ifc_nand_driver = {
        .remove      = fsl_ifc_nand_remove,
 };
 
-static int __init fsl_ifc_nand_init(void)
-{
-       int ret;
-
-       ret = platform_driver_register(&fsl_ifc_nand_driver);
-       if (ret)
-               printk(KERN_ERR "fsl-ifc: Failed to register platform"
-                               "driver\n");
-
-       return ret;
-}
-
-static void __exit fsl_ifc_nand_exit(void)
-{
-       platform_driver_unregister(&fsl_ifc_nand_driver);
-}
-
-module_init(fsl_ifc_nand_init);
-module_exit(fsl_ifc_nand_exit);
+module_platform_driver(fsl_ifc_nand_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Freescale");
index 4f8857fa48a7f93f502e7b0d1a2581eebac64234..aaced29727fb0437f6a0c142253b04cbc0ca82d0 100644 (file)
@@ -187,6 +187,12 @@ int gpmi_init(struct gpmi_nand_data *this)
        /* Select BCH ECC. */
        writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
 
+       /*
+        * Decouple the chip select from dma channel. We use dma0 for all
+        * the chips.
+        */
+       writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
        gpmi_disable_clk(this);
        return 0;
 err_out:
@@ -1073,6 +1079,13 @@ int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
                mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
                reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
        } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6Q(this)) {
+               /*
+                * In the imx6, all the ready/busy pins are bound
+                * together. So we only need to check chip 0.
+                */
+               if (GPMI_IS_MX6Q(this))
+                       chip = 0;
+
                /* MX28 shares the same R/B register as MX6Q. */
                mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
                reg = readl(r->gpmi_regs + HW_GPMI_STAT);
index 59ab0692f0b97f58fef750b6f5e29c11b6fa0367..6807d7cfe49c1ce7f5f7a68c305e46bdd2f283c4 100644 (file)
@@ -45,7 +45,10 @@ static struct nand_bbt_descr gpmi_bbt_descr = {
        .pattern        = scan_ff_pattern
 };
 
-/*  We will use all the (page + OOB). */
+/*
+ * We may change the layout if we can get the ECC info from the datasheet,
+ * else we will use all the (page + OOB).
+ */
 static struct nand_ecclayout gpmi_hw_ecclayout = {
        .eccbytes = 0,
        .eccpos = { 0, },
@@ -354,9 +357,8 @@ int common_nfc_set_geometry(struct gpmi_nand_data *this)
 
 struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
 {
-       int chipnr = this->current_chip;
-
-       return this->dma_chans[chipnr];
+       /* We use the DMA channel 0 to access all the nand chips. */
+       return this->dma_chans[0];
 }
 
 /* Can we use the upper's buffer directly for DMA? */
@@ -1263,14 +1265,22 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
 static int
 gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
 {
-       /*
-        * The BCH will use all the (page + oob).
-        * Our gpmi_hw_ecclayout can only prohibit the JFFS2 to write the oob.
-        * But it can not stop some ioctls such MEMWRITEOOB which uses
-        * MTD_OPS_PLACE_OOB. So We have to implement this function to prohibit
-        * these ioctls too.
-        */
-       return -EPERM;
+       struct nand_oobfree *of = mtd->ecclayout->oobfree;
+       int status = 0;
+
+       /* Do we have available oob area? */
+       if (!of->length)
+               return -EPERM;
+
+       if (!nand_is_slc(chip))
+               return -EPERM;
+
+       chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of->offset, page);
+       chip->write_buf(mtd, chip->oob_poi + of->offset, of->length);
+       chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+       status = chip->waitfunc(mtd, chip);
+       return status & NAND_STATUS_FAIL ? -EIO : 0;
 }
 
 static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
@@ -1664,7 +1674,7 @@ static int gpmi_nfc_init(struct gpmi_nand_data *this)
        if (ret)
                goto err_out;
 
-       ret = nand_scan_ident(mtd, 1, NULL);
+       ret = nand_scan_ident(mtd, 2, NULL);
        if (ret)
                goto err_out;
 
@@ -1691,19 +1701,19 @@ static const struct platform_device_id gpmi_ids[] = {
        { .name = "imx23-gpmi-nand", .driver_data = IS_MX23, },
        { .name = "imx28-gpmi-nand", .driver_data = IS_MX28, },
        { .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, },
-       {},
+       {}
 };
 
 static const struct of_device_id gpmi_nand_id_table[] = {
        {
                .compatible = "fsl,imx23-gpmi-nand",
-               .data = (void *)&gpmi_ids[IS_MX23]
+               .data = (void *)&gpmi_ids[IS_MX23],
        }, {
                .compatible = "fsl,imx28-gpmi-nand",
-               .data = (void *)&gpmi_ids[IS_MX28]
+               .data = (void *)&gpmi_ids[IS_MX28],
        }, {
                .compatible = "fsl,imx6q-gpmi-nand",
-               .data = (void *)&gpmi_ids[IS_MX6Q]
+               .data = (void *)&gpmi_ids[IS_MX6Q],
        }, {}
 };
 MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
index 53397cc290fcb522c921d746e768a649de663335..82114cdc8330574ad418fbde6e51d7efb2d4da44 100644 (file)
 #define HW_GPMI_CTRL1_CLR                              0x00000068
 #define HW_GPMI_CTRL1_TOG                              0x0000006c
 
+#define BP_GPMI_CTRL1_DECOUPLE_CS                      24
+#define BM_GPMI_CTRL1_DECOUPLE_CS      (1 << BP_GPMI_CTRL1_DECOUPLE_CS)
+
 #define BP_GPMI_CTRL1_WRN_DLY_SEL                      22
 #define BM_GPMI_CTRL1_WRN_DLY_SEL      (0x3 << BP_GPMI_CTRL1_WRN_DLY_SEL)
 #define BF_GPMI_CTRL1_WRN_DLY_SEL(v)  \
index f4dd2a887ea5da15b0b64c743f9ecb2603035b97..327d96c035050ce178711f03ee8ff4dd527a1fb3 100644 (file)
@@ -905,7 +905,7 @@ static struct platform_driver lpc32xx_nand_driver = {
        .driver         = {
                .name   = DRV_NAME,
                .owner  = THIS_MODULE,
-               .of_match_table = of_match_ptr(lpc32xx_nand_match),
+               .of_match_table = lpc32xx_nand_match,
        },
 };
 
index add75709d41550d893f9dc0ef144a8b2edc281f7..23e6974ccd205ec23a7bf19adcea54ffb3304bfa 100644 (file)
@@ -893,7 +893,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
 
        /* Avoid extra scan if using BBT, setup BBT support */
        if (host->ncfg->use_bbt) {
-               chip->options |= NAND_SKIP_BBTSCAN;
                chip->bbt_options |= NAND_BBT_USE_FLASH;
 
                /*
@@ -915,13 +914,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
                goto err_exit3;
        }
 
-       /* Standard layout in FLASH for bad block tables */
-       if (host->ncfg->use_bbt) {
-               if (nand_default_bbt(mtd) < 0)
-                       dev_err(&pdev->dev,
-                              "Error initializing default bad block tables\n");
-       }
-
        mtd->name = "nxp_lpc3220_slc";
        ppdata.of_node = pdev->dev.of_node;
        res = mtd_device_parse_register(mtd, NULL, &ppdata, host->ncfg->parts,
@@ -1023,7 +1015,7 @@ static struct platform_driver lpc32xx_nand_driver = {
        .driver         = {
                .name   = LPC32XX_MODNAME,
                .owner  = THIS_MODULE,
-               .of_match_table = of_match_ptr(lpc32xx_nand_match),
+               .of_match_table = lpc32xx_nand_match,
        },
 };
 
index 7ed4841327f2d7668e51c75645628f2127589f82..ec1db1e19c053367c61a5428319c21f49ad44a5d 100644 (file)
@@ -2869,10 +2869,8 @@ static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
 
        len = le16_to_cpu(p->ext_param_page_length) * 16;
        ep = kmalloc(len, GFP_KERNEL);
-       if (!ep) {
-               ret = -ENOMEM;
-               goto ext_out;
-       }
+       if (!ep)
+               return -ENOMEM;
 
        /* Send our own NAND_CMD_PARAM. */
        chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
@@ -2914,13 +2912,14 @@ static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
        /* get the info we want. */
        ecc = (struct onfi_ext_ecc_info *)cursor;
 
-       if (ecc->codeword_size) {
-               chip->ecc_strength_ds = ecc->ecc_bits;
-               chip->ecc_step_ds = 1 << ecc->codeword_size;
+       if (!ecc->codeword_size) {
+               pr_debug("Invalid codeword size\n");
+               goto ext_out;
        }
 
-       pr_info("ONFI extended param page detected.\n");
-       return 0;
+       chip->ecc_strength_ds = ecc->ecc_bits;
+       chip->ecc_step_ds = 1 << ecc->codeword_size;
+       ret = 0;
 
 ext_out:
        kfree(ep);
@@ -2937,29 +2936,34 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
        int i;
        int val;
 
-       /* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */
-       if (chip->options & NAND_BUSWIDTH_16) {
-               pr_err("Trying ONFI probe in 16 bits mode, aborting !\n");
-               return 0;
-       }
        /* Try ONFI for unknown chip or LP */
        chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
        if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
                chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
                return 0;
 
+       /*
+        * ONFI must be probed in 8-bit mode or with NAND_BUSWIDTH_AUTO, not
+        * with NAND_BUSWIDTH_16
+        */
+       if (chip->options & NAND_BUSWIDTH_16) {
+               pr_err("ONFI cannot be probed in 16-bit mode; aborting\n");
+               return 0;
+       }
+
        chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
        for (i = 0; i < 3; i++) {
                chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
                if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
                                le16_to_cpu(p->crc)) {
-                       pr_info("ONFI param page %d valid\n", i);
                        break;
                }
        }
 
-       if (i == 3)
+       if (i == 3) {
+               pr_err("Could not find valid ONFI parameter page; aborting\n");
                return 0;
+       }
 
        /* Check version */
        val = le16_to_cpu(p->revision);
@@ -2983,11 +2987,23 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
        sanitize_string(p->model, sizeof(p->model));
        if (!mtd->name)
                mtd->name = p->model;
+
        mtd->writesize = le32_to_cpu(p->byte_per_page);
-       mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
+
+       /*
+        * pages_per_block and blocks_per_lun may not be a power-of-2 size
+        * (don't ask me who thought of this...). MTD assumes that these
+        * dimensions will be power-of-2, so just truncate the remaining area.
+        */
+       mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+       mtd->erasesize *= mtd->writesize;
+
        mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
-       chip->chipsize = le32_to_cpu(p->blocks_per_lun);
+
+       /* See erasesize comment */
+       chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
        chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
+       chip->bits_per_cell = p->bits_per_cell;
 
        if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
                *busw = NAND_BUSWIDTH_16;
@@ -3011,10 +3027,11 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
 
                /* The Extended Parameter Page is supported since ONFI 2.1. */
                if (nand_flash_detect_ext_param_page(mtd, chip, p))
-                       pr_info("Failed to detect the extended param page.\n");
+                       pr_warn("Failed to detect ONFI extended param page\n");
+       } else {
+               pr_warn("Could not retrieve ONFI ECC requirements\n");
        }
 
-       pr_info("ONFI flash detected\n");
        return 1;
 }
 
@@ -3077,6 +3094,16 @@ static int nand_id_len(u8 *id_data, int arrlen)
        return arrlen;
 }
 
+/* Extract the bits of per cell from the 3rd byte of the extended ID */
+static int nand_get_bits_per_cell(u8 cellinfo)
+{
+       int bits;
+
+       bits = cellinfo & NAND_CI_CELLTYPE_MSK;
+       bits >>= NAND_CI_CELLTYPE_SHIFT;
+       return bits + 1;
+}
+
 /*
  * Many new NAND share similar device ID codes, which represent the size of the
  * chip. The rest of the parameters must be decoded according to generic or
@@ -3087,7 +3114,7 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
 {
        int extid, id_len;
        /* The 3rd id byte holds MLC / multichip data */
-       chip->cellinfo = id_data[2];
+       chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
        /* The 4th id byte is the important one */
        extid = id_data[3];
 
@@ -3103,8 +3130,7 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
         * ID to decide what to do.
         */
        if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
-                       (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
-                       id_data[5] != 0x00) {
+                       !nand_is_slc(chip) && id_data[5] != 0x00) {
                /* Calc pagesize */
                mtd->writesize = 2048 << (extid & 0x03);
                extid >>= 2;
@@ -3136,7 +3162,7 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
                        (((extid >> 1) & 0x04) | (extid & 0x03));
                *busw = 0;
        } else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
-                       (chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
+                       !nand_is_slc(chip)) {
                unsigned int tmp;
 
                /* Calc pagesize */
@@ -3199,7 +3225,7 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
                 * - ID byte 5, bit[7]:    1 -> BENAND, 0 -> raw SLC
                 */
                if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA &&
-                               !(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+                               nand_is_slc(chip) &&
                                (id_data[5] & 0x7) == 0x6 /* 24nm */ &&
                                !(id_data[4] & 0x80) /* !BENAND */) {
                        mtd->oobsize = 32 * mtd->writesize >> 9;
@@ -3224,6 +3250,9 @@ static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,
        mtd->oobsize = mtd->writesize / 32;
        *busw = type->options & NAND_BUSWIDTH_16;
 
+       /* All legacy ID NAND are small-page, SLC */
+       chip->bits_per_cell = 1;
+
        /*
         * Check for Spansion/AMD ID + repeating 5th, 6th byte since
         * some Spansion chips have erasesize that conflicts with size
@@ -3260,11 +3289,11 @@ static void nand_decode_bbm_options(struct mtd_info *mtd,
         * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
         * AMD/Spansion, and Macronix.  All others scan only the first page.
         */
-       if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+       if (!nand_is_slc(chip) &&
                        (maf_id == NAND_MFR_SAMSUNG ||
                         maf_id == NAND_MFR_HYNIX))
                chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
-       else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+       else if ((nand_is_slc(chip) &&
                                (maf_id == NAND_MFR_SAMSUNG ||
                                 maf_id == NAND_MFR_HYNIX ||
                                 maf_id == NAND_MFR_TOSHIBA ||
@@ -3288,7 +3317,7 @@ static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
                mtd->erasesize = type->erasesize;
                mtd->oobsize = type->oobsize;
 
-               chip->cellinfo = id_data[2];
+               chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
                chip->chipsize = (uint64_t)type->chipsize << 20;
                chip->options |= type->options;
                chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
@@ -3443,11 +3472,13 @@ ident_done:
        if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
                chip->cmdfunc = nand_command_lp;
 
-       pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s),"
-               " %dMiB, page size: %d, OOB size: %d\n",
+       pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s)\n",
                *maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
-               chip->onfi_version ? chip->onfi_params.model : type->name,
-               (int)(chip->chipsize >> 20), mtd->writesize, mtd->oobsize);
+               chip->onfi_version ? chip->onfi_params.model : type->name);
+
+       pr_info("NAND device: %dMiB, %s, page size: %d, OOB size: %d\n",
+               (int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
+               mtd->writesize, mtd->oobsize);
 
        return type;
 }
@@ -3740,8 +3771,7 @@ int nand_scan_tail(struct mtd_info *mtd)
        chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
 
        /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
-       if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
-           !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
+       if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
                switch (chip->ecc.steps) {
                case 2:
                        mtd->subpage_sft = 1;
@@ -3766,7 +3796,7 @@ int nand_scan_tail(struct mtd_info *mtd)
                chip->options |= NAND_SUBPAGE_READ;
 
        /* Fill in remaining MTD driver data */
-       mtd->type = MTD_NANDFLASH;
+       mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
        mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
                                                MTD_CAP_NANDFLASH;
        mtd->_erase = nand_erase;
index bc06196d57395c5c0a9ec8dba4ea5fcd8832e8b2..c75b6a7c6ea430e040675e038ea00f398756bd64 100644 (file)
@@ -1392,4 +1392,3 @@ int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
 }
 
 EXPORT_SYMBOL(nand_scan_bbt);
-EXPORT_SYMBOL(nand_default_bbt);
index bdc1d15369f844bfce700a9794b4f8eb558fa921..3c6d5c601ade269f258175cb8bf740fd10041e09 100644 (file)
@@ -575,12 +575,12 @@ static int alloc_device(struct nandsim *ns)
                cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
                if (IS_ERR(cfile))
                        return PTR_ERR(cfile);
-               if (!cfile->f_op || (!cfile->f_op->read && !cfile->f_op->aio_read)) {
+               if (!file_readable(cfile)) {
                        NS_ERR("alloc_device: cache file not readable\n");
                        err = -EINVAL;
                        goto err_close;
                }
-               if (!cfile->f_op->write && !cfile->f_op->aio_write) {
+               if (!file_writable(cfile)) {
                        NS_ERR("alloc_device: cache file not writeable\n");
                        err = -EINVAL;
                        goto err_close;
@@ -2372,7 +2372,7 @@ static int __init ns_init_module(void)
        if ((retval = init_nandsim(nsmtd)) != 0)
                goto err_exit;
 
-       if ((retval = nand_default_bbt(nsmtd)) != 0)
+       if ((retval = chip->scan_bbt(nsmtd)) != 0)
                goto err_exit;
 
        if ((retval = parse_badblocks(nand, nsmtd)) != 0)
index dd03dfdfb0d65e0ded1b0a124221663b586482b5..aa75adf98d79f1e37fa4ae11d9b2bf8f5715d557 100644 (file)
 #define NAND_STOP_DELAY                (2 * HZ/50)
 #define PAGE_CHUNK_SIZE                (2048)
 
+/*
+ * Define a buffer size for the initial command that detects the flash device:
+ * STATUS, READID and PARAM. The largest of these is the PARAM command,
+ * needing 256 bytes.
+ */
+#define INIT_BUFFER_SIZE       256
+
 /* registers and bit definitions */
 #define NDCR           (0x00) /* Control register */
 #define NDTR0CS0       (0x04) /* Timing Parameter 0 for CS0 */
@@ -164,6 +171,7 @@ struct pxa3xx_nand_info {
 
        unsigned int            buf_start;
        unsigned int            buf_count;
+       unsigned int            buf_size;
 
        /* DMA information */
        int                     drcmr_dat;
@@ -540,7 +548,6 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
        info->oob_size          = 0;
        info->use_ecc           = 0;
        info->use_spare         = 1;
-       info->use_dma           = (use_dma) ? 1 : 0;
        info->is_ready          = 0;
        info->retcode           = ERR_NONE;
        if (info->cs != 0)
@@ -912,26 +919,20 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
        return 0;
 }
 
-/* the maximum possible buffer size for large page with OOB data
- * is: 2048 + 64 = 2112 bytes, allocate a page here for both the
- * data buffer and the DMA descriptor
- */
-#define MAX_BUFF_SIZE  PAGE_SIZE
-
 #ifdef ARCH_HAS_DMA
 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
 {
        struct platform_device *pdev = info->pdev;
-       int data_desc_offset = MAX_BUFF_SIZE - sizeof(struct pxa_dma_desc);
+       int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
 
        if (use_dma == 0) {
-               info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
+               info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
                if (info->data_buff == NULL)
                        return -ENOMEM;
                return 0;
        }
 
-       info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE,
+       info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
                                &info->data_buff_phys, GFP_KERNEL);
        if (info->data_buff == NULL) {
                dev_err(&pdev->dev, "failed to allocate dma buffer\n");
@@ -945,11 +946,16 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
                                pxa3xx_nand_data_dma_irq, info);
        if (info->data_dma_ch < 0) {
                dev_err(&pdev->dev, "failed to request data dma\n");
-               dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
+               dma_free_coherent(&pdev->dev, info->buf_size,
                                info->data_buff, info->data_buff_phys);
                return info->data_dma_ch;
        }
 
+       /*
+        * Now that DMA buffers are allocated we turn on
+        * DMA proper for I/O operations.
+        */
+       info->use_dma = 1;
        return 0;
 }
 
@@ -958,7 +964,7 @@ static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
        struct platform_device *pdev = info->pdev;
        if (use_dma) {
                pxa_free_dma(info->data_dma_ch);
-               dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
+               dma_free_coherent(&pdev->dev, info->buf_size,
                                  info->data_buff, info->data_buff_phys);
        } else {
                kfree(info->data_buff);
@@ -967,7 +973,7 @@ static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
 #else
 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
 {
-       info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
+       info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
        if (info->data_buff == NULL)
                return -ENOMEM;
        return 0;
@@ -1081,7 +1087,16 @@ KEEP_CONFIG:
        else
                host->col_addr_cycles = 1;
 
+       /* release the initial buffer */
+       kfree(info->data_buff);
+
+       /* allocate the real data + oob buffer */
+       info->buf_size = mtd->writesize + mtd->oobsize;
+       ret = pxa3xx_nand_init_buff(info);
+       if (ret)
+               return ret;
        info->oob_buff = info->data_buff + mtd->writesize;
+
        if ((mtd->size >> chip->page_shift) > 65536)
                host->row_addr_cycles = 3;
        else
@@ -1187,9 +1202,13 @@ static int alloc_nand_resource(struct platform_device *pdev)
        }
        info->mmio_phys = r->start;
 
-       ret = pxa3xx_nand_init_buff(info);
-       if (ret)
+       /* Allocate a buffer to allow flash detection */
+       info->buf_size = INIT_BUFFER_SIZE;
+       info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
+       if (info->data_buff == NULL) {
+               ret = -ENOMEM;
                goto fail_disable_clk;
+       }
 
        /* initialize all interrupts to be disabled */
        disable_int(info, NDSR_MASK);
@@ -1207,7 +1226,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
 
 fail_free_buf:
        free_irq(irq, info);
-       pxa3xx_nand_free_buff(info);
+       kfree(info->data_buff);
 fail_disable_clk:
        clk_disable_unprepare(info->clk);
        return ret;
@@ -1407,7 +1426,7 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)
 static struct platform_driver pxa3xx_nand_driver = {
        .driver = {
                .name   = "pxa3xx-nand",
-               .of_match_table = of_match_ptr(pxa3xx_nand_dt_ids),
+               .of_match_table = pxa3xx_nand_dt_ids,
        },
        .probe          = pxa3xx_nand_probe,
        .remove         = pxa3xx_nand_remove,
index 09dde7d27178c72b4f058da3ed9699c0b028c8e5..9a9fa4949b4f8b62d17e2cb43eac7ea909ba0964 100644 (file)
@@ -149,17 +149,13 @@ static int socrates_nand_probe(struct platform_device *ofdev)
        struct mtd_part_parser_data ppdata;
 
        /* Allocate memory for the device structure (and zero it) */
-       host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL);
-       if (!host) {
-               printk(KERN_ERR
-                      "socrates_nand: failed to allocate device structure.\n");
+       host = devm_kzalloc(&ofdev->dev, sizeof(*host), GFP_KERNEL);
+       if (!host)
                return -ENOMEM;
-       }
 
        host->io_base = of_iomap(ofdev->dev.of_node, 0);
        if (host->io_base == NULL) {
-               printk(KERN_ERR "socrates_nand: ioremap failed\n");
-               kfree(host);
+               dev_err(&ofdev->dev, "ioremap failed\n");
                return -EIO;
        }
 
@@ -211,9 +207,7 @@ static int socrates_nand_probe(struct platform_device *ofdev)
        nand_release(mtd);
 
 out:
-       dev_set_drvdata(&ofdev->dev, NULL);
        iounmap(host->io_base);
-       kfree(host);
        return res;
 }
 
@@ -227,9 +221,7 @@ static int socrates_nand_remove(struct platform_device *ofdev)
 
        nand_release(mtd);
 
-       dev_set_drvdata(&ofdev->dev, NULL);
        iounmap(host->io_base);
-       kfree(host);
 
        return 0;
 }
index c5f4ebf4b384404a47df44ec843046e6bb243b44..46f27de018c34e35313a559576d850283933d51a 100644 (file)
@@ -50,7 +50,7 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
        struct NFTLrecord *nftl;
        unsigned long temp;
 
-       if (mtd->type != MTD_NANDFLASH || mtd->size > UINT_MAX)
+       if (!mtd_type_is_nand(mtd) || mtd->size > UINT_MAX)
                return;
        /* OK, this is moderately ugly.  But probably safe.  Alternatives? */
        if (memcmp(mtd->name, "DiskOnChip", 10))
index 558071bf92de0ed607355067b08ebea7fe3833b4..2362909d20c00f740c086049d410aa2450f81d3a 100644 (file)
@@ -573,28 +573,6 @@ static int omap2_onenand_write_bufferram(struct mtd_info *mtd, int area,
 
 static struct platform_driver omap2_onenand_driver;
 
-static int __adjust_timing(struct device *dev, void *data)
-{
-       int ret = 0;
-       struct omap2_onenand *c;
-
-       c = dev_get_drvdata(dev);
-
-       BUG_ON(c->setup == NULL);
-
-       /* DMA is not in use so this is all that is needed */
-       /* Revisit for OMAP3! */
-       ret = c->setup(c->onenand.base, &c->freq);
-
-       return ret;
-}
-
-int omap2_onenand_rephase(void)
-{
-       return driver_for_each_device(&omap2_onenand_driver.driver, NULL,
-                                     NULL, __adjust_timing);
-}
-
 static void omap2_onenand_shutdown(struct platform_device *pdev)
 {
        struct omap2_onenand *c = dev_get_drvdata(&pdev->dev);
index b3f41f200622b39f5ef7d46ab6b772ed2eb9e97e..1de33b5d390358b58a2cbaaaf6991e08ebcc97db 100644 (file)
@@ -2556,10 +2556,6 @@ static int onenand_block_isbad(struct mtd_info *mtd, loff_t ofs)
 {
        int ret;
 
-       /* Check for invalid offset */
-       if (ofs > mtd->size)
-               return -EINVAL;
-
        onenand_get_device(mtd, FL_READING);
        ret = onenand_block_isbad_nolock(mtd, ofs, 0);
        onenand_release_device(mtd);
@@ -3529,7 +3525,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd)
 {
        struct onenand_chip *this = mtd->priv;
        unsigned die, bdry;
-       int ret, syscfg, locked;
+       int syscfg, locked;
 
        /* Disable ECC */
        syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1);
@@ -3540,7 +3536,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd)
                this->wait(mtd, FL_SYNCING);
 
                this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
-               ret = this->wait(mtd, FL_READING);
+               this->wait(mtd, FL_READING);
 
                bdry = this->read_word(this->base + ONENAND_DATARAM);
                if ((bdry >> FLEXONENAND_PI_UNLOCK_SHIFT) == 3)
@@ -3550,7 +3546,7 @@ static int flexonenand_get_boundary(struct mtd_info *mtd)
                this->boundary[die] = bdry & FLEXONENAND_PI_MASK;
 
                this->command(mtd, ONENAND_CMD_RESET, 0, 0);
-               ret = this->wait(mtd, FL_RESETING);
+               this->wait(mtd, FL_RESETING);
 
                printk(KERN_INFO "Die %d boundary: %d%s\n", die,
                       this->boundary[die], locked ? "(Locked)" : "(Unlocked)");
@@ -3734,7 +3730,7 @@ static int flexonenand_set_boundary(struct mtd_info *mtd, int die,
 
        /* Check is boundary is locked */
        this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0);
-       ret = this->wait(mtd, FL_READING);
+       this->wait(mtd, FL_READING);
 
        thisboundary = this->read_word(this->base + ONENAND_DATARAM);
        if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) {
@@ -3835,7 +3831,7 @@ static int onenand_chip_probe(struct mtd_info *mtd)
 static int onenand_probe(struct mtd_info *mtd)
 {
        struct onenand_chip *this = mtd->priv;
-       int maf_id, dev_id, ver_id;
+       int dev_id, ver_id;
        int density;
        int ret;
 
@@ -3843,8 +3839,7 @@ static int onenand_probe(struct mtd_info *mtd)
        if (ret)
                return ret;
 
-       /* Read manufacturer and device IDs from Register */
-       maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID);
+       /* Device and version IDs from Register */
        dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID);
        ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID);
        this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY);
index ab2a52a039c3eef994e7e4a53645d34e1a059eb2..daf82ba7aba038ba4ca322921826cf23d748076f 100644 (file)
@@ -290,7 +290,7 @@ static void ssfdcr_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
        int cis_sector;
 
        /* Check for small page NAND flash */
-       if (mtd->type != MTD_NANDFLASH || mtd->oobsize != OOB_SIZE ||
+       if (!mtd_type_is_nand(mtd) || mtd->oobsize != OOB_SIZE ||
            mtd->size > UINT_MAX)
                return;
 
index 3cd3aabbe1cd8bd3a92a1131e4955c83c41d4431..6f976159611f4ab86ee61033fe6a242b6bc1101f 100644 (file)
@@ -349,7 +349,7 @@ static int __init mtd_nandbiterrs_init(void)
                goto exit_mtddev;
        }
 
-       if (mtd->type != MTD_NANDFLASH) {
+       if (!mtd_type_is_nand(mtd)) {
                pr_info("this test requires NAND flash\n");
                err = -ENODEV;
                goto exit_nand;
index ff35c465bfeea5cb4f8598d95e05993c8cc590db..2e9e2d11f204aa8db90d775743778ce5719443e4 100644 (file)
@@ -289,7 +289,7 @@ static int __init mtd_oobtest_init(void)
                return err;
        }
 
-       if (mtd->type != MTD_NANDFLASH) {
+       if (!mtd_type_is_nand(mtd)) {
                pr_info("this test requires NAND flash\n");
                goto out;
        }
index 44b96e999ad4694c5ad222c357d98bc22c27c247..ed2d3f656fd2ffd41fca36537244e27d7d7fe712 100644 (file)
@@ -353,7 +353,7 @@ static int __init mtd_pagetest_init(void)
                return err;
        }
 
-       if (mtd->type != MTD_NANDFLASH) {
+       if (!mtd_type_is_nand(mtd)) {
                pr_info("this test requires NAND flash\n");
                goto out;
        }
index e2c0adf24cfc35aa6692ff5267b93babc523c69c..a876371ad410c5eec22c912d924574360d769c90 100644 (file)
@@ -299,7 +299,7 @@ static int __init mtd_subpagetest_init(void)
                return err;
        }
 
-       if (mtd->type != MTD_NANDFLASH) {
+       if (!mtd_type_is_nand(mtd)) {
                pr_info("this test requires NAND flash\n");
                goto out;
        }
index c071d410488f4c95a25ef4a4fedfc325b5ff83c9..33bb1f2b63e4f323f0c0c9494b12a1ce3e46d1e2 100644 (file)
@@ -900,10 +900,9 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
                 * number.
                 */
                image_seq = be32_to_cpu(ech->image_seq);
-               if (!ubi->image_seq && image_seq)
+               if (!ubi->image_seq)
                        ubi->image_seq = image_seq;
-               if (ubi->image_seq && image_seq &&
-                   ubi->image_seq != image_seq) {
+               if (image_seq && ubi->image_seq != image_seq) {
                        ubi_err("bad image sequence number %d in PEB %d, expected %d",
                                image_seq, pnum, ubi->image_seq);
                        ubi_dump_ec_hdr(ech);
@@ -1417,9 +1416,11 @@ int ubi_attach(struct ubi_device *ubi, int force_scan)
                                ai = alloc_ai("ubi_aeb_slab_cache2");
                                if (!ai)
                                        return -ENOMEM;
-                       }
 
-                       err = scan_all(ubi, ai, UBI_FM_MAX_START);
+                               err = scan_all(ubi, ai, 0);
+                       } else {
+                               err = scan_all(ubi, ai, UBI_FM_MAX_START);
+                       }
                }
        }
 #else
index 315dcc6ec1f55a0a49de6e79e60ec701f9ae4b64..e05dc6298c1dcc157598e99ac57cb32e702918ae 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/kthread.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
+#include <linux/major.h>
 #include "ubi.h"
 
 /* Maximum length of the 'mtd=' parameter */
index f5aa4b02cfa676f3270dc9b9a87bd87ef031893d..ead861307b3c57aac13bbb187a958a8910062904 100644 (file)
@@ -407,6 +407,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
         */
        for (i = 0; i < pool_size; i++) {
                int scrub = 0;
+               int image_seq;
 
                pnum = be32_to_cpu(pebs[i]);
 
@@ -425,10 +426,16 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
                } else if (ret == UBI_IO_BITFLIPS)
                        scrub = 1;
 
-               if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
+               /*
+                * Older UBI implementations have image_seq set to zero, so
+                * we shouldn't fail if image_seq == 0.
+                */
+               image_seq = be32_to_cpu(ech->image_seq);
+
+               if (image_seq && (image_seq != ubi->image_seq)) {
                        ubi_err("bad image seq: 0x%x, expected: 0x%x",
                                be32_to_cpu(ech->image_seq), ubi->image_seq);
-                       err = UBI_BAD_FASTMAP;
+                       ret = UBI_BAD_FASTMAP;
                        goto out;
                }
 
@@ -819,6 +826,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
        list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
                list_move_tail(&tmp_aeb->u.list, &ai->free);
 
+       ubi_assert(list_empty(&used));
+       ubi_assert(list_empty(&eba_orphans));
+       ubi_assert(list_empty(&free));
+
        /*
         * If fastmap is leaking PEBs (must not happen), raise a
         * fat warning and fall back to scanning mode.
@@ -834,6 +845,19 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
 fail_bad:
        ret = UBI_BAD_FASTMAP;
 fail:
+       list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
+               kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+               list_del(&tmp_aeb->u.list);
+       }
+       list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
+               kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+               list_del(&tmp_aeb->u.list);
+       }
+       list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
+               kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
+               list_del(&tmp_aeb->u.list);
+       }
+
        return ret;
 }
 
@@ -923,6 +947,8 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
        }
 
        for (i = 0; i < used_blocks; i++) {
+               int image_seq;
+
                pnum = be32_to_cpu(fmsb->block_loc[i]);
 
                if (ubi_io_is_bad(ubi, pnum)) {
@@ -940,10 +966,17 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
                } else if (ret == UBI_IO_BITFLIPS)
                        fm->to_be_tortured[i] = 1;
 
+               image_seq = be32_to_cpu(ech->image_seq);
                if (!ubi->image_seq)
-                       ubi->image_seq = be32_to_cpu(ech->image_seq);
+                       ubi->image_seq = image_seq;
 
-               if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
+               /*
+                * Older UBI implementations have image_seq set to zero, so
+                * we shouldn't fail if image_seq == 0.
+                */
+               if (image_seq && (image_seq != ubi->image_seq)) {
+                       ubi_err("wrong image seq:%d instead of %d",
+                               be32_to_cpu(ech->image_seq), ubi->image_seq);
                        ret = UBI_BAD_FASTMAP;
                        goto free_hdr;
                }
index c95bfb183c62b185f2f6cc17b5d84245bdd2d13d..02317c1c02385914c94175fa8757089c677e2b94 100644 (file)
@@ -599,10 +599,6 @@ static void refill_wl_user_pool(struct ubi_device *ubi)
        return_unused_pool_pebs(ubi, pool);
 
        for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
-               if (!ubi->free.rb_node ||
-                  (ubi->free_count - ubi->beb_rsvd_pebs < 1))
-                       break;
-
                pool->pebs[pool->size] = __wl_get_peb(ubi);
                if (pool->pebs[pool->size] < 0)
                        break;
index 3a8c7532ee0d23ed1e896fe94ff017e9cbb82738..a7271e093845fabe111934ea83f7d2ab9588d44e 100644 (file)
@@ -102,8 +102,7 @@ static struct devprobe2 isa_probes[] __initdata = {
 #ifdef CONFIG_WD80x3
        {wd_probe, 0},
 #endif
-#if defined(CONFIG_NE2000) || \
-    defined(CONFIG_NE_H8300)  /* ISA (use ne2k-pci for PCI cards) */
+#if defined(CONFIG_NE2000) /* ISA (use ne2k-pci for PCI cards) */
        {ne_probe, 0},
 #endif
 #ifdef CONFIG_LANCE            /* ISA/VLB (use pcnet32 for PCI cards) */
index 4c21bf6b8b2f015abfaf77b414d3e5b7670026f2..5a5d720da9292e787b4a2cdb94e33bbe883e8920 100644 (file)
@@ -4,7 +4,7 @@
 
 obj-$(CONFIG_BONDING) += bonding.o
 
-bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o
+bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o bond_netlink.o bond_options.o
 
 proc-$(CONFIG_PROC_FS) += bond_procfs.o
 bonding-objs += $(proc-y)
index 0d8f427ade938c75052d804138cd377b02c34f99..187b1b7772ef1b873303fc46998a591137bec7b7 100644 (file)
@@ -135,41 +135,6 @@ static inline struct bonding *__get_bond_by_port(struct port *port)
        return bond_get_bond_by_slave(port->slave);
 }
 
-/**
- * __get_first_port - get the first port in the bond
- * @bond: the bond we're looking at
- *
- * Return the port of the first slave in @bond, or %NULL if it can't be found.
- */
-static inline struct port *__get_first_port(struct bonding *bond)
-{
-       struct slave *first_slave = bond_first_slave(bond);
-
-       return first_slave ? &(SLAVE_AD_INFO(first_slave).port) : NULL;
-}
-
-/**
- * __get_next_port - get the next port in the bond
- * @port: the port we're looking at
- *
- * Return the port of the slave that is next in line of @port's slave in the
- * bond, or %NULL if it can't be found.
- */
-static inline struct port *__get_next_port(struct port *port)
-{
-       struct bonding *bond = __get_bond_by_port(port);
-       struct slave *slave = port->slave, *slave_next;
-
-       // If there's no bond for this port, or this is the last slave
-       if (bond == NULL)
-               return NULL;
-       slave_next = bond_next_slave(bond, slave);
-       if (!slave_next || bond_is_first_slave(bond, slave_next))
-               return NULL;
-
-       return &(SLAVE_AD_INFO(slave_next).port);
-}
-
 /**
  * __get_first_agg - get the first aggregator in the bond
  * @bond: the bond we're looking at
@@ -190,28 +155,6 @@ static inline struct aggregator *__get_first_agg(struct port *port)
        return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
 }
 
-/**
- * __get_next_agg - get the next aggregator in the bond
- * @aggregator: the aggregator we're looking at
- *
- * Return the aggregator of the slave that is next in line of @aggregator's
- * slave in the bond, or %NULL if it can't be found.
- */
-static inline struct aggregator *__get_next_agg(struct aggregator *aggregator)
-{
-       struct slave *slave = aggregator->slave, *slave_next;
-       struct bonding *bond = bond_get_bond_by_slave(slave);
-
-       // If there's no bond for this aggregator, or this is the last slave
-       if (bond == NULL)
-               return NULL;
-       slave_next = bond_next_slave(bond, slave);
-       if (!slave_next || bond_is_first_slave(bond, slave_next))
-               return NULL;
-
-       return &(SLAVE_AD_INFO(slave_next).aggregator);
-}
-
 /*
  * __agg_has_partner
  *
@@ -755,16 +698,15 @@ static u32 __get_agg_bandwidth(struct aggregator *aggregator)
  */
 static struct aggregator *__get_active_agg(struct aggregator *aggregator)
 {
-       struct aggregator *retval = NULL;
+       struct bonding *bond = aggregator->slave->bond;
+       struct list_head *iter;
+       struct slave *slave;
 
-       for (; aggregator; aggregator = __get_next_agg(aggregator)) {
-               if (aggregator->is_active) {
-                       retval = aggregator;
-                       break;
-               }
-       }
+       bond_for_each_slave(bond, slave, iter)
+               if (SLAVE_AD_INFO(slave).aggregator.is_active)
+                       return &(SLAVE_AD_INFO(slave).aggregator);
 
-       return retval;
+       return NULL;
 }
 
 /**
@@ -1274,12 +1216,17 @@ static void ad_port_selection_logic(struct port *port)
 {
        struct aggregator *aggregator, *free_aggregator = NULL, *temp_aggregator;
        struct port *last_port = NULL, *curr_port;
+       struct list_head *iter;
+       struct bonding *bond;
+       struct slave *slave;
        int found = 0;
 
        // if the port is already Selected, do nothing
        if (port->sm_vars & AD_PORT_SELECTED)
                return;
 
+       bond = __get_bond_by_port(port);
+
        // if the port is connected to other aggregator, detach it
        if (port->aggregator) {
                // detach the port from its former aggregator
@@ -1320,8 +1267,8 @@ static void ad_port_selection_logic(struct port *port)
                }
        }
        // search on all aggregators for a suitable aggregator for this port
-       for (aggregator = __get_first_agg(port); aggregator;
-            aggregator = __get_next_agg(aggregator)) {
+       bond_for_each_slave(bond, slave, iter) {
+               aggregator = &(SLAVE_AD_INFO(slave).aggregator);
 
                // keep a free aggregator for later use(if needed)
                if (!aggregator->lag_ports) {
@@ -1515,19 +1462,23 @@ static int agg_device_up(const struct aggregator *agg)
 static void ad_agg_selection_logic(struct aggregator *agg)
 {
        struct aggregator *best, *active, *origin;
+       struct bonding *bond = agg->slave->bond;
+       struct list_head *iter;
+       struct slave *slave;
        struct port *port;
 
        origin = agg;
        active = __get_active_agg(agg);
        best = (active && agg_device_up(active)) ? active : NULL;
 
-       do {
+       bond_for_each_slave(bond, slave, iter) {
+               agg = &(SLAVE_AD_INFO(slave).aggregator);
+
                agg->is_active = 0;
 
                if (agg->num_of_ports && agg_device_up(agg))
                        best = ad_agg_selection_test(best, agg);
-
-       } while ((agg = __get_next_agg(agg)));
+       }
 
        if (best &&
            __get_agg_selection_mode(best->lag_ports) == BOND_AD_STABLE) {
@@ -1565,8 +1516,8 @@ static void ad_agg_selection_logic(struct aggregator *agg)
                         best->lag_ports, best->slave,
                         best->slave ? best->slave->dev->name : "NULL");
 
-               for (agg = __get_first_agg(best->lag_ports); agg;
-                    agg = __get_next_agg(agg)) {
+               bond_for_each_slave(bond, slave, iter) {
+                       agg = &(SLAVE_AD_INFO(slave).aggregator);
 
                        pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
                                 agg->aggregator_identifier, agg->num_of_ports,
@@ -1614,13 +1565,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
                }
        }
 
-       if (origin->slave) {
-               struct bonding *bond;
-
-               bond = bond_get_bond_by_slave(origin->slave);
-               if (bond)
-                       bond_3ad_set_carrier(bond);
-       }
+       bond_3ad_set_carrier(bond);
 }
 
 /**
@@ -1969,6 +1914,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
        struct port *port, *prev_port, *temp_port;
        struct aggregator *aggregator, *new_aggregator, *temp_aggregator;
        int select_new_active_agg = 0;
+       struct bonding *bond = slave->bond;
+       struct slave *slave_iter;
+       struct list_head *iter;
 
        // find the aggregator related to this slave
        aggregator = &(SLAVE_AD_INFO(slave).aggregator);
@@ -1998,14 +1946,16 @@ void bond_3ad_unbind_slave(struct slave *slave)
                // reason to search for new aggregator, and that we will find one
                if ((aggregator->lag_ports != port) || (aggregator->lag_ports->next_port_in_aggregator)) {
                        // find new aggregator for the related port(s)
-                       new_aggregator = __get_first_agg(port);
-                       for (; new_aggregator; new_aggregator = __get_next_agg(new_aggregator)) {
+                       bond_for_each_slave(bond, slave_iter, iter) {
+                               new_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
                                // if the new aggregator is empty, or it is connected to our port only
                                if (!new_aggregator->lag_ports
                                    || ((new_aggregator->lag_ports == port)
                                        && !new_aggregator->lag_ports->next_port_in_aggregator))
                                        break;
                        }
+                       if (!slave_iter)
+                               new_aggregator = NULL;
                        // if new aggregator found, copy the aggregator's parameters
                        // and connect the related lag_ports to the new aggregator
                        if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
@@ -2056,15 +2006,17 @@ void bond_3ad_unbind_slave(struct slave *slave)
                                pr_info("%s: Removing an active aggregator\n",
                                        slave->bond->dev->name);
                                // select new active aggregator
-                               ad_agg_selection_logic(__get_first_agg(port));
+                               temp_aggregator = __get_first_agg(port);
+                               if (temp_aggregator)
+                                       ad_agg_selection_logic(temp_aggregator);
                        }
                }
        }
 
        pr_debug("Unbinding port %d\n", port->actor_port_number);
        // find the aggregator that this port is connected to
-       temp_aggregator = __get_first_agg(port);
-       for (; temp_aggregator; temp_aggregator = __get_next_agg(temp_aggregator)) {
+       bond_for_each_slave(bond, slave_iter, iter) {
+               temp_aggregator = &(SLAVE_AD_INFO(slave_iter).aggregator);
                prev_port = NULL;
                // search the port in the aggregator's related ports
                for (temp_port = temp_aggregator->lag_ports; temp_port;
@@ -2111,19 +2063,24 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
 {
        struct bonding *bond = container_of(work, struct bonding,
                                            ad_work.work);
-       struct port *port;
        struct aggregator *aggregator;
+       struct list_head *iter;
+       struct slave *slave;
+       struct port *port;
 
        read_lock(&bond->lock);
 
        //check if there are any slaves
-       if (list_empty(&bond->slave_list))
+       if (!bond_has_slaves(bond))
                goto re_arm;
 
        // check if agg_select_timer timer after initialize is timed out
        if (BOND_AD_INFO(bond).agg_select_timer && !(--BOND_AD_INFO(bond).agg_select_timer)) {
+               slave = bond_first_slave(bond);
+               port = slave ? &(SLAVE_AD_INFO(slave).port) : NULL;
+
                // select the active aggregator for the bond
-               if ((port = __get_first_port(bond))) {
+               if (port) {
                        if (!port->slave) {
                                pr_warning("%s: Warning: bond's first port is uninitialized\n",
                                           bond->dev->name);
@@ -2137,7 +2094,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
        }
 
        // for each port run the state machines
-       for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
+       bond_for_each_slave(bond, slave, iter) {
+               port = &(SLAVE_AD_INFO(slave).port);
                if (!port->slave) {
                        pr_warning("%s: Warning: Found an uninitialized port\n",
                                   bond->dev->name);
@@ -2382,9 +2340,12 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
                                   struct ad_info *ad_info)
 {
        struct aggregator *aggregator = NULL;
+       struct list_head *iter;
+       struct slave *slave;
        struct port *port;
 
-       for (port = __get_first_port(bond); port; port = __get_next_port(port)) {
+       bond_for_each_slave_rcu(bond, slave, iter) {
+               port = &(SLAVE_AD_INFO(slave).port);
                if (port->aggregator && port->aggregator->is_active) {
                        aggregator = port->aggregator;
                        break;
@@ -2408,25 +2369,25 @@ int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info)
 {
        int ret;
 
-       read_lock(&bond->lock);
+       rcu_read_lock();
        ret = __bond_3ad_get_active_agg_info(bond, ad_info);
-       read_unlock(&bond->lock);
+       rcu_read_unlock();
 
        return ret;
 }
 
 int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
 {
-       struct slave *slave, *start_at;
        struct bonding *bond = netdev_priv(dev);
-       int slave_agg_no;
-       int slaves_in_agg;
-       int agg_id;
-       int i;
+       struct slave *slave, *first_ok_slave;
+       struct aggregator *agg;
        struct ad_info ad_info;
+       struct list_head *iter;
+       int slaves_in_agg;
+       int slave_agg_no;
        int res = 1;
+       int agg_id;
 
-       read_lock(&bond->lock);
        if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
                pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
                         dev->name);
@@ -2437,20 +2398,28 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
        agg_id = ad_info.aggregator_id;
 
        if (slaves_in_agg == 0) {
-               /*the aggregator is empty*/
                pr_debug("%s: Error: active aggregator is empty\n", dev->name);
                goto out;
        }
 
-       slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg);
+       slave_agg_no = bond_xmit_hash(bond, skb, slaves_in_agg);
+       first_ok_slave = NULL;
 
-       bond_for_each_slave(bond, slave) {
-               struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
+       bond_for_each_slave_rcu(bond, slave, iter) {
+               agg = SLAVE_AD_INFO(slave).port.aggregator;
+               if (!agg || agg->aggregator_identifier != agg_id)
+                       continue;
 
-               if (agg && (agg->aggregator_identifier == agg_id)) {
+               if (slave_agg_no >= 0) {
+                       if (!first_ok_slave && SLAVE_IS_OK(slave))
+                               first_ok_slave = slave;
                        slave_agg_no--;
-                       if (slave_agg_no < 0)
-                               break;
+                       continue;
+               }
+
+               if (SLAVE_IS_OK(slave)) {
+                       res = bond_dev_queue_xmit(bond, skb, slave->dev);
+                       goto out;
                }
        }
 
@@ -2460,23 +2429,12 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
                goto out;
        }
 
-       start_at = slave;
-
-       bond_for_each_slave_from(bond, slave, i, start_at) {
-               int slave_agg_id = 0;
-               struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
-
-               if (agg)
-                       slave_agg_id = agg->aggregator_identifier;
-
-               if (SLAVE_IS_OK(slave) && agg && (slave_agg_id == agg_id)) {
-                       res = bond_dev_queue_xmit(bond, skb, slave->dev);
-                       break;
-               }
-       }
+       /* we couldn't find any suitable slave after the agg_no, so use the
+        * first suitable found, if found. */
+       if (first_ok_slave)
+               res = bond_dev_queue_xmit(bond, skb, first_ok_slave->dev);
 
 out:
-       read_unlock(&bond->lock);
        if (res) {
                /* no suitable interface, frame not sent */
                kfree_skb(skb);
@@ -2515,11 +2473,12 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
 void bond_3ad_update_lacp_rate(struct bonding *bond)
 {
        struct port *port = NULL;
+       struct list_head *iter;
        struct slave *slave;
        int lacp_fast;
 
        lacp_fast = bond->params.lacp_fast;
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                port = &(SLAVE_AD_INFO(slave).port);
                __get_state_machine_lock(port);
                if (lacp_fast)
index f428ef57437279ec4bbf15e1c7e8e9b6a9da7a2c..02872405d35dc4a53d13473b31d1b517d22610d0 100644 (file)
@@ -223,13 +223,14 @@ static long long compute_gap(struct slave *slave)
 static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
 {
        struct slave *slave, *least_loaded;
+       struct list_head *iter;
        long long max_gap;
 
        least_loaded = NULL;
        max_gap = LLONG_MIN;
 
        /* Find the slave with the largest gap */
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave_rcu(bond, slave, iter) {
                if (SLAVE_IS_OK(slave)) {
                        long long gap = compute_gap(slave);
 
@@ -382,30 +383,64 @@ out:
 static struct slave *rlb_next_rx_slave(struct bonding *bond)
 {
        struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
-       struct slave *rx_slave, *slave, *start_at;
-       int i = 0;
+       struct slave *before = NULL, *rx_slave = NULL, *slave;
+       struct list_head *iter;
+       bool found = false;
 
-       if (bond_info->next_rx_slave)
-               start_at = bond_info->next_rx_slave;
-       else
-               start_at = bond_first_slave(bond);
+       bond_for_each_slave(bond, slave, iter) {
+               if (!SLAVE_IS_OK(slave))
+                       continue;
+               if (!found) {
+                       if (!before || before->speed < slave->speed)
+                               before = slave;
+               } else {
+                       if (!rx_slave || rx_slave->speed < slave->speed)
+                               rx_slave = slave;
+               }
+               if (slave == bond_info->rx_slave)
+                       found = true;
+       }
+       /* we didn't find anything after the current or we have something
+        * better before and up to the current slave
+        */
+       if (!rx_slave || (before && rx_slave->speed < before->speed))
+               rx_slave = before;
 
-       rx_slave = NULL;
+       if (rx_slave)
+               bond_info->rx_slave = rx_slave;
 
-       bond_for_each_slave_from(bond, slave, i, start_at) {
-               if (SLAVE_IS_OK(slave)) {
-                       if (!rx_slave) {
-                               rx_slave = slave;
-                       } else if (slave->speed > rx_slave->speed) {
+       return rx_slave;
+}
+
+/* Caller must hold rcu_read_lock() for read */
+static struct slave *__rlb_next_rx_slave(struct bonding *bond)
+{
+       struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+       struct slave *before = NULL, *rx_slave = NULL, *slave;
+       struct list_head *iter;
+       bool found = false;
+
+       bond_for_each_slave_rcu(bond, slave, iter) {
+               if (!SLAVE_IS_OK(slave))
+                       continue;
+               if (!found) {
+                       if (!before || before->speed < slave->speed)
+                               before = slave;
+               } else {
+                       if (!rx_slave || rx_slave->speed < slave->speed)
                                rx_slave = slave;
-                       }
                }
+               if (slave == bond_info->rx_slave)
+                       found = true;
        }
+       /* we didn't find anything after the current or we have something
+        * better before and up to the current slave
+        */
+       if (!rx_slave || (before && rx_slave->speed < before->speed))
+               rx_slave = before;
 
-       if (rx_slave) {
-               slave = bond_next_slave(bond, rx_slave);
-               bond_info->next_rx_slave = slave;
-       }
+       if (rx_slave)
+               bond_info->rx_slave = rx_slave;
 
        return rx_slave;
 }
@@ -626,12 +661,14 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
 {
        struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
        struct arp_pkt *arp = arp_pkt(skb);
-       struct slave *assigned_slave;
+       struct slave *assigned_slave, *curr_active_slave;
        struct rlb_client_info *client_info;
        u32 hash_index = 0;
 
        _lock_rx_hashtbl(bond);
 
+       curr_active_slave = rcu_dereference(bond->curr_active_slave);
+
        hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
        client_info = &(bond_info->rx_hashtbl[hash_index]);
 
@@ -656,14 +693,14 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
                         * that the new client can be assigned to this entry.
                         */
                        if (bond->curr_active_slave &&
-                           client_info->slave != bond->curr_active_slave) {
-                               client_info->slave = bond->curr_active_slave;
+                           client_info->slave != curr_active_slave) {
+                               client_info->slave = curr_active_slave;
                                rlb_update_client(client_info);
                        }
                }
        }
        /* assign a new slave */
-       assigned_slave = rlb_next_rx_slave(bond);
+       assigned_slave = __rlb_next_rx_slave(bond);
 
        if (assigned_slave) {
                if (!(client_info->assigned &&
@@ -726,7 +763,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
        /* Don't modify or load balance ARPs that do not originate locally
         * (e.g.,arrive via a bridge).
         */
-       if (!bond_slave_has_mac(bond, arp->mac_src))
+       if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
                return NULL;
 
        if (arp->op_code == htons(ARPOP_REPLY)) {
@@ -1019,7 +1056,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
 
        /* loop through vlans and send one packet for each */
        rcu_read_lock();
-       netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+       netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
                if (upper->priv_flags & IFF_802_1Q_VLAN)
                        alb_send_lp_vid(slave, mac_addr,
                                        vlan_dev_vlan_id(upper));
@@ -1172,10 +1209,11 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
  */
 static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave)
 {
-       struct slave *tmp_slave1, *free_mac_slave = NULL;
        struct slave *has_bond_addr = bond->curr_active_slave;
+       struct slave *tmp_slave1, *free_mac_slave = NULL;
+       struct list_head *iter;
 
-       if (list_empty(&bond->slave_list)) {
+       if (!bond_has_slaves(bond)) {
                /* this is the first slave */
                return 0;
        }
@@ -1196,7 +1234,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
        /* The slave's address is equal to the address of the bond.
         * Search for a spare address in the bond for this slave.
         */
-       bond_for_each_slave(bond, tmp_slave1) {
+       bond_for_each_slave(bond, tmp_slave1, iter) {
                if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) {
                        /* no slave has tmp_slave1's perm addr
                         * as its curr addr
@@ -1246,15 +1284,16 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
  */
 static int alb_set_mac_address(struct bonding *bond, void *addr)
 {
-       char tmp_addr[ETH_ALEN];
-       struct slave *slave;
+       struct slave *slave, *rollback_slave;
+       struct list_head *iter;
        struct sockaddr sa;
+       char tmp_addr[ETH_ALEN];
        int res;
 
        if (bond->alb_info.rlb_enabled)
                return 0;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                /* save net_device's current hw address */
                memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
 
@@ -1274,10 +1313,12 @@ unwind:
        sa.sa_family = bond->dev->type;
 
        /* unwind from head to the slave that failed */
-       bond_for_each_slave_continue_reverse(bond, slave) {
-               memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
-               dev_set_mac_address(slave->dev, &sa);
-               memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+       bond_for_each_slave(bond, rollback_slave, iter) {
+               if (rollback_slave == slave)
+                       break;
+               memcpy(tmp_addr, rollback_slave->dev->dev_addr, ETH_ALEN);
+               dev_set_mac_address(rollback_slave->dev, &sa);
+               memcpy(rollback_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
        }
 
        return res;
@@ -1337,11 +1378,6 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
        skb_reset_mac_header(skb);
        eth_data = eth_hdr(skb);
 
-       /* make sure that the curr_active_slave do not change during tx
-        */
-       read_lock(&bond->lock);
-       read_lock(&bond->curr_slave_lock);
-
        switch (ntohs(skb->protocol)) {
        case ETH_P_IP: {
                const struct iphdr *iph = ip_hdr(skb);
@@ -1423,12 +1459,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
 
        if (!tx_slave) {
                /* unbalanced or unassigned, send through primary */
-               tx_slave = bond->curr_active_slave;
+               tx_slave = rcu_dereference(bond->curr_active_slave);
                bond_info->unbalanced_load += skb->len;
        }
 
        if (tx_slave && SLAVE_IS_OK(tx_slave)) {
-               if (tx_slave != bond->curr_active_slave) {
+               if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
                        memcpy(eth_data->h_source,
                               tx_slave->dev->dev_addr,
                               ETH_ALEN);
@@ -1443,8 +1479,6 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                }
        }
 
-       read_unlock(&bond->curr_slave_lock);
-       read_unlock(&bond->lock);
        if (res) {
                /* no suitable interface, frame not sent */
                kfree_skb(skb);
@@ -1458,11 +1492,12 @@ void bond_alb_monitor(struct work_struct *work)
        struct bonding *bond = container_of(work, struct bonding,
                                            alb_work.work);
        struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+       struct list_head *iter;
        struct slave *slave;
 
        read_lock(&bond->lock);
 
-       if (list_empty(&bond->slave_list)) {
+       if (!bond_has_slaves(bond)) {
                bond_info->tx_rebalance_counter = 0;
                bond_info->lp_counter = 0;
                goto re_arm;
@@ -1480,7 +1515,7 @@ void bond_alb_monitor(struct work_struct *work)
                 */
                read_lock(&bond->curr_slave_lock);
 
-               bond_for_each_slave(bond, slave)
+               bond_for_each_slave(bond, slave, iter)
                        alb_send_learning_packets(slave, slave->dev->dev_addr);
 
                read_unlock(&bond->curr_slave_lock);
@@ -1493,7 +1528,7 @@ void bond_alb_monitor(struct work_struct *work)
 
                read_lock(&bond->curr_slave_lock);
 
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave(bond, slave, iter) {
                        tlb_clear_slave(bond, slave, 1);
                        if (slave == bond->curr_active_slave) {
                                SLAVE_TLB_INFO(slave).load =
@@ -1599,13 +1634,13 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
  */
 void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
 {
-       if (!list_empty(&bond->slave_list))
+       if (bond_has_slaves(bond))
                alb_change_hw_addr_on_detach(bond, slave);
 
        tlb_clear_slave(bond, slave, 0);
 
        if (bond->alb_info.rlb_enabled) {
-               bond->alb_info.next_rx_slave = NULL;
+               bond->alb_info.rx_slave = NULL;
                rlb_clear_slave(bond, slave);
        }
 }
@@ -1669,7 +1704,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
        swap_slave = bond->curr_active_slave;
        rcu_assign_pointer(bond->curr_active_slave, new_slave);
 
-       if (!new_slave || list_empty(&bond->slave_list))
+       if (!new_slave || !bond_has_slaves(bond))
                return;
 
        /* set the new curr_active_slave to the bonds mac address
@@ -1692,6 +1727,23 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
 
        ASSERT_RTNL();
 
+       /* in TLB mode, the slave might flip down/up with the old dev_addr,
+        * and thus filter bond->dev_addr's packets, so force bond's mac
+        */
+       if (bond->params.mode == BOND_MODE_TLB) {
+               struct sockaddr sa;
+               u8 tmp_addr[ETH_ALEN];
+
+               memcpy(tmp_addr, new_slave->dev->dev_addr, ETH_ALEN);
+
+               memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
+               sa.sa_family = bond->dev->type;
+               /* we don't care if it can't change its mac, best effort */
+               dev_set_mac_address(new_slave->dev, &sa);
+
+               memcpy(new_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+       }
+
        /* curr_active_slave must be set before calling alb_swap_mac_addr */
        if (swap_slave) {
                /* swap mac address */
index c5eff5dafdfeab12ee4849fe75d9117d70c4859f..4226044efd083645db9229c0f88e507eae6d410b 100644 (file)
@@ -154,9 +154,7 @@ struct alb_bond_info {
        u8                      rx_ntt; /* flag - need to transmit
                                         * to all rx clients
                                         */
-       struct slave            *next_rx_slave;/* next slave to be assigned
-                                               * to a new rx client for
-                                               */
+       struct slave            *rx_slave;/* last slave to xmit from */
        u8                      primary_is_promisc;        /* boolean */
        u32                     rlb_promisc_timeout_counter;/* counts primary
                                                             * promiscuity time
index e883bfe2e727aa1bf6fd2fb2cea2375578c8c06d..2daa066c6cdd2dfc9688f35611fc46ba0e25ff3e 100644 (file)
@@ -78,6 +78,7 @@
 #include <net/netns/generic.h>
 #include <net/pkt_sched.h>
 #include <linux/rculist.h>
+#include <net/flow_keys.h>
 #include "bonding.h"
 #include "bond_3ad.h"
 #include "bond_alb.h"
@@ -159,7 +160,8 @@ MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on
 module_param(xmit_hash_policy, charp, 0);
 MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; "
                                   "0 for layer 2 (default), 1 for layer 3+4, "
-                                  "2 for layer 2+3");
+                                  "2 for layer 2+3, 3 for encap layer 2+3, "
+                                  "4 for encap layer 3+4");
 module_param(arp_interval, int, 0);
 MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
 module_param_array(arp_ip_target, charp, NULL, 0);
@@ -217,6 +219,8 @@ const struct bond_parm_tbl xmit_hashtype_tbl[] = {
 {      "layer2",               BOND_XMIT_POLICY_LAYER2},
 {      "layer3+4",             BOND_XMIT_POLICY_LAYER34},
 {      "layer2+3",             BOND_XMIT_POLICY_LAYER23},
+{      "encap2+3",             BOND_XMIT_POLICY_ENCAP23},
+{      "encap3+4",             BOND_XMIT_POLICY_ENCAP34},
 {      NULL,                   -1},
 };
 
@@ -332,10 +336,11 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
                                __be16 proto, u16 vid)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave;
+       struct slave *slave, *rollback_slave;
+       struct list_head *iter;
        int res;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                res = vlan_vid_add(slave->dev, proto, vid);
                if (res)
                        goto unwind;
@@ -344,9 +349,13 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
        return 0;
 
 unwind:
-       /* unwind from the slave that failed */
-       bond_for_each_slave_continue_reverse(bond, slave)
-               vlan_vid_del(slave->dev, proto, vid);
+       /* unwind to the slave that failed */
+       bond_for_each_slave(bond, rollback_slave, iter) {
+               if (rollback_slave == slave)
+                       break;
+
+               vlan_vid_del(rollback_slave->dev, proto, vid);
+       }
 
        return res;
 }
@@ -360,9 +369,10 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
                                 __be16 proto, u16 vid)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct list_head *iter;
        struct slave *slave;
 
-       bond_for_each_slave(bond, slave)
+       bond_for_each_slave(bond, slave, iter)
                vlan_vid_del(slave->dev, proto, vid);
 
        if (bond_is_lb(bond))
@@ -382,15 +392,16 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
  */
 static int bond_set_carrier(struct bonding *bond)
 {
+       struct list_head *iter;
        struct slave *slave;
 
-       if (list_empty(&bond->slave_list))
+       if (!bond_has_slaves(bond))
                goto down;
 
        if (bond->params.mode == BOND_MODE_8023AD)
                return bond_3ad_set_carrier(bond);
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (slave->link == BOND_LINK_UP) {
                        if (!netif_carrier_ok(bond->dev)) {
                                netif_carrier_on(bond->dev);
@@ -522,7 +533,9 @@ static int bond_check_dev_link(struct bonding *bond,
  */
 static int bond_set_promiscuity(struct bonding *bond, int inc)
 {
+       struct list_head *iter;
        int err = 0;
+
        if (USES_PRIMARY(bond->params.mode)) {
                /* write lock already acquired */
                if (bond->curr_active_slave) {
@@ -532,7 +545,7 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
        } else {
                struct slave *slave;
 
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave(bond, slave, iter) {
                        err = dev_set_promiscuity(slave->dev, inc);
                        if (err)
                                return err;
@@ -546,7 +559,9 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
  */
 static int bond_set_allmulti(struct bonding *bond, int inc)
 {
+       struct list_head *iter;
        int err = 0;
+
        if (USES_PRIMARY(bond->params.mode)) {
                /* write lock already acquired */
                if (bond->curr_active_slave) {
@@ -556,7 +571,7 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
        } else {
                struct slave *slave;
 
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave(bond, slave, iter) {
                        err = dev_set_allmulti(slave->dev, inc);
                        if (err)
                                return err;
@@ -774,43 +789,24 @@ static bool bond_should_change_active(struct bonding *bond)
 /**
  * find_best_interface - select the best available slave to be the active one
  * @bond: our bonding struct
- *
- * Warning: Caller must hold curr_slave_lock for writing.
  */
 static struct slave *bond_find_best_slave(struct bonding *bond)
 {
-       struct slave *new_active, *old_active;
-       struct slave *bestslave = NULL;
+       struct slave *slave, *bestslave = NULL;
+       struct list_head *iter;
        int mintime = bond->params.updelay;
-       int i;
-
-       new_active = bond->curr_active_slave;
 
-       if (!new_active) { /* there were no active slaves left */
-               new_active = bond_first_slave(bond);
-               if (!new_active)
-                       return NULL; /* still no slave, return NULL */
-       }
-
-       if ((bond->primary_slave) &&
-           bond->primary_slave->link == BOND_LINK_UP &&
-           bond_should_change_active(bond)) {
-               new_active = bond->primary_slave;
-       }
-
-       /* remember where to stop iterating over the slaves */
-       old_active = new_active;
-
-       bond_for_each_slave_from(bond, new_active, i, old_active) {
-               if (new_active->link == BOND_LINK_UP) {
-                       return new_active;
-               } else if (new_active->link == BOND_LINK_BACK &&
-                          IS_UP(new_active->dev)) {
-                       /* link up, but waiting for stabilization */
-                       if (new_active->delay < mintime) {
-                               mintime = new_active->delay;
-                               bestslave = new_active;
-                       }
+       if (bond->primary_slave && bond->primary_slave->link == BOND_LINK_UP &&
+           bond_should_change_active(bond))
+               return bond->primary_slave;
+
+       bond_for_each_slave(bond, slave, iter) {
+               if (slave->link == BOND_LINK_UP)
+                       return slave;
+               if (slave->link == BOND_LINK_BACK && IS_UP(slave->dev) &&
+                   slave->delay < mintime) {
+                       mintime = slave->delay;
+                       bestslave = slave;
                }
        }
 
@@ -971,35 +967,6 @@ void bond_select_active_slave(struct bonding *bond)
        }
 }
 
-/*--------------------------- slave list handling ---------------------------*/
-
-/*
- * This function attaches the slave to the end of list.
- *
- * bond->lock held for writing by caller.
- */
-static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
-{
-       list_add_tail_rcu(&new_slave->list, &bond->slave_list);
-       bond->slave_cnt++;
-}
-
-/*
- * This function detaches the slave from the list.
- * WARNING: no check is made to verify if the slave effectively
- * belongs to <bond>.
- * Nothing is freed on return, structures are just unchained.
- * If any slave pointer in bond was pointing to <slave>,
- * it should be changed by the calling function.
- *
- * bond->lock held for writing by caller.
- */
-static void bond_detach_slave(struct bonding *bond, struct slave *slave)
-{
-       list_del_rcu(&slave->list);
-       bond->slave_cnt--;
-}
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static inline int slave_enable_netpoll(struct slave *slave)
 {
@@ -1046,9 +1013,10 @@ static void bond_poll_controller(struct net_device *bond_dev)
 static void bond_netpoll_cleanup(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct list_head *iter;
        struct slave *slave;
 
-       bond_for_each_slave(bond, slave)
+       bond_for_each_slave(bond, slave, iter)
                if (IS_UP(slave->dev))
                        slave_disable_netpoll(slave);
 }
@@ -1056,10 +1024,11 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
 static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
 {
        struct bonding *bond = netdev_priv(dev);
+       struct list_head *iter;
        struct slave *slave;
        int err = 0;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                err = slave_enable_netpoll(slave);
                if (err) {
                        bond_netpoll_cleanup(dev);
@@ -1087,10 +1056,11 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
                                           netdev_features_t features)
 {
        struct bonding *bond = netdev_priv(dev);
+       struct list_head *iter;
        netdev_features_t mask;
        struct slave *slave;
 
-       if (list_empty(&bond->slave_list)) {
+       if (!bond_has_slaves(bond)) {
                /* Disable adding VLANs to empty bond. But why? --mq */
                features |= NETIF_F_VLAN_CHALLENGED;
                return features;
@@ -1100,7 +1070,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
        features &= ~NETIF_F_ONE_FOR_ALL;
        features |= NETIF_F_ALL_FOR_ALL;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                features = netdev_increment_features(features,
                                                     slave->dev->features,
                                                     mask);
@@ -1118,16 +1088,17 @@ static void bond_compute_features(struct bonding *bond)
 {
        unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
        netdev_features_t vlan_features = BOND_VLAN_FEATURES;
+       struct net_device *bond_dev = bond->dev;
+       struct list_head *iter;
+       struct slave *slave;
        unsigned short max_hard_header_len = ETH_HLEN;
        unsigned int gso_max_size = GSO_MAX_SIZE;
-       struct net_device *bond_dev = bond->dev;
        u16 gso_max_segs = GSO_MAX_SEGS;
-       struct slave *slave;
 
-       if (list_empty(&bond->slave_list))
+       if (!bond_has_slaves(bond))
                goto done;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                vlan_features = netdev_increment_features(vlan_features,
                        slave->dev->vlan_features, BOND_VLAN_FEATURES);
 
@@ -1233,11 +1204,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
 }
 
 static int bond_master_upper_dev_link(struct net_device *bond_dev,
-                                     struct net_device *slave_dev)
+                                     struct net_device *slave_dev,
+                                     struct slave *slave)
 {
        int err;
 
-       err = netdev_master_upper_dev_link(slave_dev, bond_dev);
+       err = netdev_master_upper_dev_link_private(slave_dev, bond_dev, slave);
        if (err)
                return err;
        slave_dev->flags |= IFF_SLAVE;
@@ -1258,7 +1230,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-       struct slave *new_slave = NULL;
+       struct slave *new_slave = NULL, *prev_slave;
        struct sockaddr addr;
        int link_reporting;
        int res = 0, i;
@@ -1313,7 +1285,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
         * bond ether type mutual exclusion - don't allow slaves of dissimilar
         * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
         */
-       if (list_empty(&bond->slave_list)) {
+       if (!bond_has_slaves(bond)) {
                if (bond_dev->type != slave_dev->type) {
                        pr_debug("%s: change device type from %d to %d\n",
                                 bond_dev->name,
@@ -1352,7 +1324,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        }
 
        if (slave_ops->ndo_set_mac_address == NULL) {
-               if (list_empty(&bond->slave_list)) {
+               if (!bond_has_slaves(bond)) {
                        pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
                                   bond_dev->name);
                        bond->params.fail_over_mac = BOND_FOM_ACTIVE;
@@ -1368,7 +1340,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
        /* If this is the first slave, then we need to set the master's hardware
         * address to be the same as the slave's. */
-       if (list_empty(&bond->slave_list) &&
+       if (!bond_has_slaves(bond) &&
            bond->dev->addr_assign_type == NET_ADDR_RANDOM)
                bond_set_dev_addr(bond->dev, slave_dev);
 
@@ -1377,7 +1349,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                res = -ENOMEM;
                goto err_undo_flags;
        }
-       INIT_LIST_HEAD(&new_slave->list);
        /*
         * Set the new_slave's queue_id to be zero.  Queue ID mapping
         * is set via sysfs or module option if desired.
@@ -1413,17 +1384,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                }
        }
 
-       res = bond_master_upper_dev_link(bond_dev, slave_dev);
-       if (res) {
-               pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
-               goto err_restore_mac;
-       }
-
        /* open the slave since the application closed it */
        res = dev_open(slave_dev);
        if (res) {
                pr_debug("Opening slave %s failed\n", slave_dev->name);
-               goto err_unset_master;
+               goto err_restore_mac;
        }
 
        new_slave->bond = bond;
@@ -1479,21 +1444,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                goto err_close;
        }
 
-       write_lock_bh(&bond->lock);
-
-       bond_attach_slave(bond, new_slave);
+       prev_slave = bond_last_slave(bond);
 
        new_slave->delay = 0;
        new_slave->link_failure_count = 0;
 
-       write_unlock_bh(&bond->lock);
-
-       bond_compute_features(bond);
-
        bond_update_speed_duplex(new_slave);
 
-       read_lock(&bond->lock);
-
        new_slave->last_arp_rx = jiffies -
                (msecs_to_jiffies(bond->params.arp_interval) + 1);
        for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
@@ -1554,12 +1511,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                }
        }
 
-       write_lock_bh(&bond->curr_slave_lock);
-
        switch (bond->params.mode) {
        case BOND_MODE_ACTIVEBACKUP:
                bond_set_slave_inactive_flags(new_slave);
-               bond_select_active_slave(bond);
                break;
        case BOND_MODE_8023AD:
                /* in 802.3ad mode, the internal mechanism
@@ -1568,16 +1522,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                 */
                bond_set_slave_inactive_flags(new_slave);
                /* if this is the first slave */
-               if (bond_first_slave(bond) == new_slave) {
+               if (!prev_slave) {
                        SLAVE_AD_INFO(new_slave).id = 1;
                        /* Initialize AD with the number of times that the AD timer is called in 1 second
                         * can be called only after the mac address of the bond is set
                         */
                        bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
                } else {
-                       struct slave *prev_slave;
-
-                       prev_slave = bond_prev_slave(bond, new_slave);
                        SLAVE_AD_INFO(new_slave).id =
                                SLAVE_AD_INFO(prev_slave).id + 1;
                }
@@ -1588,7 +1539,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        case BOND_MODE_ALB:
                bond_set_active_slave(new_slave);
                bond_set_slave_inactive_flags(new_slave);
-               bond_select_active_slave(bond);
                break;
        default:
                pr_debug("This slave is always active in trunk mode\n");
@@ -1606,10 +1556,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                break;
        } /* switch(bond_mode) */
 
-       write_unlock_bh(&bond->curr_slave_lock);
-
-       bond_set_carrier(bond);
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
        slave_dev->npinfo = bond->dev->npinfo;
        if (slave_dev->npinfo) {
@@ -1624,17 +1570,29 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        }
 #endif
 
-       read_unlock(&bond->lock);
-
-       res = bond_create_slave_symlinks(bond_dev, slave_dev);
-       if (res)
-               goto err_detach;
-
        res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
                                         new_slave);
        if (res) {
                pr_debug("Error %d calling netdev_rx_handler_register\n", res);
-               goto err_dest_symlinks;
+               goto err_detach;
+       }
+
+       res = bond_master_upper_dev_link(bond_dev, slave_dev, new_slave);
+       if (res) {
+               pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
+               goto err_unregister;
+       }
+
+       bond->slave_cnt++;
+       bond_compute_features(bond);
+       bond_set_carrier(bond);
+
+       if (USES_PRIMARY(bond->params.mode)) {
+               read_lock(&bond->lock);
+               write_lock_bh(&bond->curr_slave_lock);
+               bond_select_active_slave(bond);
+               write_unlock_bh(&bond->curr_slave_lock);
+               read_unlock(&bond->lock);
        }
 
        pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
@@ -1646,8 +1604,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        return 0;
 
 /* Undo stages on error */
-err_dest_symlinks:
-       bond_destroy_slave_symlinks(bond_dev, slave_dev);
+err_unregister:
+       netdev_rx_handler_unregister(slave_dev);
 
 err_detach:
        if (!USES_PRIMARY(bond->params.mode))
@@ -1655,7 +1613,6 @@ err_detach:
 
        vlan_vids_del_by_dev(slave_dev, bond_dev);
        write_lock_bh(&bond->lock);
-       bond_detach_slave(bond, new_slave);
        if (bond->primary_slave == new_slave)
                bond->primary_slave = NULL;
        if (bond->curr_active_slave == new_slave) {
@@ -1675,9 +1632,6 @@ err_close:
        slave_dev->priv_flags &= ~IFF_BONDING;
        dev_close(slave_dev);
 
-err_unset_master:
-       bond_upper_dev_unlink(bond_dev, slave_dev);
-
 err_restore_mac:
        if (!bond->params.fail_over_mac) {
                /* XXX TODO - fom follow mode needs to change master's
@@ -1696,9 +1650,8 @@ err_free:
        kfree(new_slave);
 
 err_undo_flags:
-       bond_compute_features(bond);
        /* Enslave of first slave has failed and we need to fix master's mac */
-       if (list_empty(&bond->slave_list) &&
+       if (!bond_has_slaves(bond) &&
            ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
                eth_hw_addr_random(bond_dev);
 
@@ -1749,6 +1702,11 @@ static int __bond_release_one(struct net_device *bond_dev,
        }
 
        write_unlock_bh(&bond->lock);
+
+       /* release the slave from its bond */
+       bond->slave_cnt--;
+
+       bond_upper_dev_unlink(bond_dev, slave_dev);
        /* unregister rx_handler early so bond_handle_frame wouldn't be called
         * for this slave anymore.
         */
@@ -1772,12 +1730,9 @@ static int __bond_release_one(struct net_device *bond_dev,
 
        bond->current_arp_slave = NULL;
 
-       /* release the slave from its bond */
-       bond_detach_slave(bond, slave);
-
        if (!all && !bond->params.fail_over_mac) {
                if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
-                   !list_empty(&bond->slave_list))
+                   bond_has_slaves(bond))
                        pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
                                   bond_dev->name, slave_dev->name,
                                   slave->perm_hwaddr,
@@ -1820,7 +1775,7 @@ static int __bond_release_one(struct net_device *bond_dev,
                write_lock_bh(&bond->lock);
        }
 
-       if (list_empty(&bond->slave_list)) {
+       if (!bond_has_slaves(bond)) {
                bond_set_carrier(bond);
                eth_hw_addr_random(bond_dev);
 
@@ -1836,7 +1791,7 @@ static int __bond_release_one(struct net_device *bond_dev,
        unblock_netpoll_tx();
        synchronize_rcu();
 
-       if (list_empty(&bond->slave_list)) {
+       if (!bond_has_slaves(bond)) {
                call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
                call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
        }
@@ -1848,8 +1803,6 @@ static int __bond_release_one(struct net_device *bond_dev,
                        bond_dev->name, slave_dev->name, bond_dev->name);
 
        /* must do this from outside any spinlocks */
-       bond_destroy_slave_symlinks(bond_dev, slave_dev);
-
        vlan_vids_del_by_dev(slave_dev, bond_dev);
 
        /* If the mode USES_PRIMARY, then this cases was handled above by
@@ -1873,8 +1826,6 @@ static int __bond_release_one(struct net_device *bond_dev,
                bond_hw_addr_flush(bond_dev, slave_dev);
        }
 
-       bond_upper_dev_unlink(bond_dev, slave_dev);
-
        slave_disable_netpoll(slave);
 
        /* close slave before restoring its mac address */
@@ -1913,7 +1864,7 @@ static int  bond_release_and_destroy(struct net_device *bond_dev,
        int ret;
 
        ret = bond_release(bond_dev, slave_dev);
-       if (ret == 0 && list_empty(&bond->slave_list)) {
+       if (ret == 0 && !bond_has_slaves(bond)) {
                bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
                pr_info("%s: destroying bond %s.\n",
                        bond_dev->name, bond_dev->name);
@@ -1922,61 +1873,6 @@ static int  bond_release_and_destroy(struct net_device *bond_dev,
        return ret;
 }
 
-/*
- * This function changes the active slave to slave <slave_dev>.
- * It returns -EINVAL in the following cases.
- *  - <slave_dev> is not found in the list.
- *  - There is not active slave now.
- *  - <slave_dev> is already active.
- *  - The link state of <slave_dev> is not BOND_LINK_UP.
- *  - <slave_dev> is not running.
- * In these cases, this function does nothing.
- * In the other cases, current_slave pointer is changed and 0 is returned.
- */
-static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_device *slave_dev)
-{
-       struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *old_active = NULL;
-       struct slave *new_active = NULL;
-       int res = 0;
-
-       if (!USES_PRIMARY(bond->params.mode))
-               return -EINVAL;
-
-       /* Verify that bond_dev is indeed the master of slave_dev */
-       if (!(slave_dev->flags & IFF_SLAVE) ||
-           !netdev_has_upper_dev(slave_dev, bond_dev))
-               return -EINVAL;
-
-       read_lock(&bond->lock);
-
-       old_active = bond->curr_active_slave;
-       new_active = bond_get_slave_by_dev(bond, slave_dev);
-       /*
-        * Changing to the current active: do nothing; return success.
-        */
-       if (new_active && new_active == old_active) {
-               read_unlock(&bond->lock);
-               return 0;
-       }
-
-       if (new_active &&
-           old_active &&
-           new_active->link == BOND_LINK_UP &&
-           IS_UP(new_active->dev)) {
-               block_netpoll_tx();
-               write_lock_bh(&bond->curr_slave_lock);
-               bond_change_active_slave(bond, new_active);
-               write_unlock_bh(&bond->curr_slave_lock);
-               unblock_netpoll_tx();
-       } else
-               res = -EINVAL;
-
-       read_unlock(&bond->lock);
-
-       return res;
-}
-
 static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
 {
        struct bonding *bond = netdev_priv(bond_dev);
@@ -1994,11 +1890,12 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
 static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct list_head *iter;
        int i = 0, res = -ENODEV;
        struct slave *slave;
 
        read_lock(&bond->lock);
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (i++ == (int)info->slave_id) {
                        res = 0;
                        strcpy(info->slave_name, slave->dev->name);
@@ -2019,12 +1916,13 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
 static int bond_miimon_inspect(struct bonding *bond)
 {
        int link_state, commit = 0;
+       struct list_head *iter;
        struct slave *slave;
        bool ignore_updelay;
 
        ignore_updelay = !bond->curr_active_slave ? true : false;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                slave->new_link = BOND_LINK_NOCHANGE;
 
                link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -2118,9 +2016,10 @@ static int bond_miimon_inspect(struct bonding *bond)
 
 static void bond_miimon_commit(struct bonding *bond)
 {
+       struct list_head *iter;
        struct slave *slave;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                switch (slave->new_link) {
                case BOND_LINK_NOCHANGE:
                        continue;
@@ -2225,7 +2124,7 @@ void bond_mii_monitor(struct work_struct *work)
 
        delay = msecs_to_jiffies(bond->params.miimon);
 
-       if (list_empty(&bond->slave_list))
+       if (!bond_has_slaves(bond))
                goto re_arm;
 
        should_notify_peers = bond_should_notify_peers(bond);
@@ -2274,7 +2173,7 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
                return true;
 
        rcu_read_lock();
-       netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+       netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
                if (ip == bond_confirm_addr(upper, 0, ip)) {
                        ret = true;
                        break;
@@ -2349,10 +2248,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                 *
                 * TODO: QinQ?
                 */
-               netdev_for_each_upper_dev_rcu(bond->dev, vlan_upper, vlan_iter) {
+               netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper,
+                                                 vlan_iter) {
                        if (!is_vlan_dev(vlan_upper))
                                continue;
-                       netdev_for_each_upper_dev_rcu(vlan_upper, upper, iter) {
+                       netdev_for_each_all_upper_dev_rcu(vlan_upper, upper,
+                                                         iter) {
                                if (upper == rt->dst.dev) {
                                        vlan_id = vlan_dev_vlan_id(vlan_upper);
                                        rcu_read_unlock();
@@ -2365,7 +2266,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                 * our upper vlans, then just search for any dev that
                 * matches, and in case it's a vlan - save the id
                 */
-               netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+               netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) {
                        if (upper == rt->dst.dev) {
                                /* if it's a vlan - get its VID */
                                if (is_vlan_dev(upper))
@@ -2512,11 +2413,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
        struct bonding *bond = container_of(work, struct bonding,
                                            arp_work.work);
        struct slave *slave, *oldcurrent;
+       struct list_head *iter;
        int do_failover = 0;
 
        read_lock(&bond->lock);
 
-       if (list_empty(&bond->slave_list))
+       if (!bond_has_slaves(bond))
                goto re_arm;
 
        oldcurrent = bond->curr_active_slave;
@@ -2528,7 +2430,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
         * TODO: what about up/down delay in arp mode? it wasn't here before
         *       so it can wait
         */
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                unsigned long trans_start = dev_trans_start(slave->dev);
 
                if (slave->link != BOND_LINK_UP) {
@@ -2619,10 +2521,11 @@ re_arm:
 static int bond_ab_arp_inspect(struct bonding *bond)
 {
        unsigned long trans_start, last_rx;
+       struct list_head *iter;
        struct slave *slave;
        int commit = 0;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                slave->new_link = BOND_LINK_NOCHANGE;
                last_rx = slave_last_rx(bond, slave);
 
@@ -2689,9 +2592,10 @@ static int bond_ab_arp_inspect(struct bonding *bond)
 static void bond_ab_arp_commit(struct bonding *bond)
 {
        unsigned long trans_start;
+       struct list_head *iter;
        struct slave *slave;
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                switch (slave->new_link) {
                case BOND_LINK_NOCHANGE:
                        continue;
@@ -2762,8 +2666,9 @@ do_failover:
  */
 static void bond_ab_arp_probe(struct bonding *bond)
 {
-       struct slave *slave, *next_slave;
-       int i;
+       struct slave *slave, *before = NULL, *new_slave = NULL;
+       struct list_head *iter;
+       bool found = false;
 
        read_lock(&bond->curr_slave_lock);
 
@@ -2793,18 +2698,12 @@ static void bond_ab_arp_probe(struct bonding *bond)
 
        bond_set_slave_inactive_flags(bond->current_arp_slave);
 
-       /* search for next candidate */
-       next_slave = bond_next_slave(bond, bond->current_arp_slave);
-       bond_for_each_slave_from(bond, slave, i, next_slave) {
-               if (IS_UP(slave->dev)) {
-                       slave->link = BOND_LINK_BACK;
-                       bond_set_slave_active_flags(slave);
-                       bond_arp_send_all(bond, slave);
-                       slave->jiffies = jiffies;
-                       bond->current_arp_slave = slave;
-                       break;
-               }
+       bond_for_each_slave(bond, slave, iter) {
+               if (!found && !before && IS_UP(slave->dev))
+                       before = slave;
 
+               if (found && !new_slave && IS_UP(slave->dev))
+                       new_slave = slave;
                /* if the link state is up at this point, we
                 * mark it down - this can happen if we have
                 * simultaneous link failures and
@@ -2812,7 +2711,7 @@ static void bond_ab_arp_probe(struct bonding *bond)
                 * one the current slave so it is still marked
                 * up when it is actually down
                 */
-               if (slave->link == BOND_LINK_UP) {
+               if (!IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
                        slave->link = BOND_LINK_DOWN;
                        if (slave->link_failure_count < UINT_MAX)
                                slave->link_failure_count++;
@@ -2822,7 +2721,22 @@ static void bond_ab_arp_probe(struct bonding *bond)
                        pr_info("%s: backup interface %s is now down.\n",
                                bond->dev->name, slave->dev->name);
                }
+               if (slave == bond->current_arp_slave)
+                       found = true;
        }
+
+       if (!new_slave && before)
+               new_slave = before;
+
+       if (!new_slave)
+               return;
+
+       new_slave->link = BOND_LINK_BACK;
+       bond_set_slave_active_flags(new_slave);
+       bond_arp_send_all(bond, new_slave);
+       new_slave->jiffies = jiffies;
+       bond->current_arp_slave = new_slave;
+
 }
 
 void bond_activebackup_arp_mon(struct work_struct *work)
@@ -2836,7 +2750,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
 
        delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
 
-       if (list_empty(&bond->slave_list))
+       if (!bond_has_slaves(bond))
                goto re_arm;
 
        should_notify_peers = bond_should_notify_peers(bond);
@@ -3033,99 +2947,85 @@ static struct notifier_block bond_netdev_notifier = {
 
 /*---------------------------- Hashing Policies -----------------------------*/
 
-/*
- * Hash for the output device based upon layer 2 data
- */
-static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
+/* L2 hash helper */
+static inline u32 bond_eth_hash(struct sk_buff *skb)
 {
        struct ethhdr *data = (struct ethhdr *)skb->data;
 
        if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto))
-               return (data->h_dest[5] ^ data->h_source[5]) % count;
+               return data->h_dest[5] ^ data->h_source[5];
 
        return 0;
 }
 
-/*
- * Hash for the output device based upon layer 2 and layer 3 data. If
- * the packet is not IP, fall back on bond_xmit_hash_policy_l2()
- */
-static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
+/* Extract the appropriate headers based on bond's xmit policy */
+static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
+                             struct flow_keys *fk)
 {
-       const struct ethhdr *data;
+       const struct ipv6hdr *iph6;
        const struct iphdr *iph;
-       const struct ipv6hdr *ipv6h;
-       u32 v6hash;
-       const __be32 *s, *d;
+       int noff, proto = -1;
 
-       if (skb->protocol == htons(ETH_P_IP) &&
-           pskb_network_may_pull(skb, sizeof(*iph))) {
+       if (bond->params.xmit_policy > BOND_XMIT_POLICY_LAYER23)
+               return skb_flow_dissect(skb, fk);
+
+       fk->ports = 0;
+       noff = skb_network_offset(skb);
+       if (skb->protocol == htons(ETH_P_IP)) {
+               if (!pskb_may_pull(skb, noff + sizeof(*iph)))
+                       return false;
                iph = ip_hdr(skb);
-               data = (struct ethhdr *)skb->data;
-               return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
-                       (data->h_dest[5] ^ data->h_source[5])) % count;
-       } else if (skb->protocol == htons(ETH_P_IPV6) &&
-                  pskb_network_may_pull(skb, sizeof(*ipv6h))) {
-               ipv6h = ipv6_hdr(skb);
-               data = (struct ethhdr *)skb->data;
-               s = &ipv6h->saddr.s6_addr32[0];
-               d = &ipv6h->daddr.s6_addr32[0];
-               v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
-               v6hash ^= (v6hash >> 24) ^ (v6hash >> 16) ^ (v6hash >> 8);
-               return (v6hash ^ data->h_dest[5] ^ data->h_source[5]) % count;
-       }
-
-       return bond_xmit_hash_policy_l2(skb, count);
+               fk->src = iph->saddr;
+               fk->dst = iph->daddr;
+               noff += iph->ihl << 2;
+               if (!ip_is_fragment(iph))
+                       proto = iph->protocol;
+       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               if (!pskb_may_pull(skb, noff + sizeof(*iph6)))
+                       return false;
+               iph6 = ipv6_hdr(skb);
+               fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr);
+               fk->dst = (__force __be32)ipv6_addr_hash(&iph6->daddr);
+               noff += sizeof(*iph6);
+               proto = iph6->nexthdr;
+       } else {
+               return false;
+       }
+       if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34 && proto >= 0)
+               fk->ports = skb_flow_get_ports(skb, noff, proto);
+
+       return true;
 }
 
-/*
- * Hash for the output device based upon layer 3 and layer 4 data. If
- * the packet is a frag or not TCP or UDP, just use layer 3 data.  If it is
- * altogether not IP, fall back on bond_xmit_hash_policy_l2()
+/**
+ * bond_xmit_hash - generate a hash value based on the xmit policy
+ * @bond: bonding device
+ * @skb: buffer to use for headers
+ * @count: modulo value
+ *
+ * This function will extract the necessary headers from the skb buffer and use
+ * them to generate a hash based on the xmit_policy set in the bonding device
+ * which will be reduced modulo count before returning.
  */
-static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
+int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count)
 {
-       u32 layer4_xor = 0;
-       const struct iphdr *iph;
-       const struct ipv6hdr *ipv6h;
-       const __be32 *s, *d;
-       const __be16 *l4 = NULL;
-       __be16 _l4[2];
-       int noff = skb_network_offset(skb);
-       int poff;
-
-       if (skb->protocol == htons(ETH_P_IP) &&
-           pskb_may_pull(skb, noff + sizeof(*iph))) {
-               iph = ip_hdr(skb);
-               poff = proto_ports_offset(iph->protocol);
+       struct flow_keys flow;
+       u32 hash;
 
-               if (!ip_is_fragment(iph) && poff >= 0) {
-                       l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff,
-                                               sizeof(_l4), &_l4);
-                       if (l4)
-                               layer4_xor = ntohs(l4[0] ^ l4[1]);
-               }
-               return (layer4_xor ^
-                       ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
-       } else if (skb->protocol == htons(ETH_P_IPV6) &&
-                  pskb_may_pull(skb, noff + sizeof(*ipv6h))) {
-               ipv6h = ipv6_hdr(skb);
-               poff = proto_ports_offset(ipv6h->nexthdr);
-               if (poff >= 0) {
-                       l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff,
-                                               sizeof(_l4), &_l4);
-                       if (l4)
-                               layer4_xor = ntohs(l4[0] ^ l4[1]);
-               }
-               s = &ipv6h->saddr.s6_addr32[0];
-               d = &ipv6h->daddr.s6_addr32[0];
-               layer4_xor ^= (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
-               layer4_xor ^= (layer4_xor >> 24) ^ (layer4_xor >> 16) ^
-                              (layer4_xor >> 8);
-               return layer4_xor % count;
-       }
+       if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
+           !bond_flow_dissect(bond, skb, &flow))
+               return bond_eth_hash(skb) % count;
+
+       if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
+           bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23)
+               hash = bond_eth_hash(skb);
+       else
+               hash = (__force u32)flow.ports;
+       hash ^= (__force u32)flow.dst ^ (__force u32)flow.src;
+       hash ^= (hash >> 16);
+       hash ^= (hash >> 8);
 
-       return bond_xmit_hash_policy_l2(skb, count);
+       return hash % count;
 }
 
 /*-------------------------- Device entry points ----------------------------*/
@@ -3155,13 +3055,14 @@ static void bond_work_cancel_all(struct bonding *bond)
 static int bond_open(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct list_head *iter;
        struct slave *slave;
 
        /* reset slave->backup and slave->inactive */
        read_lock(&bond->lock);
-       if (!list_empty(&bond->slave_list)) {
+       if (bond_has_slaves(bond)) {
                read_lock(&bond->curr_slave_lock);
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave(bond, slave, iter) {
                        if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
                                && (slave != bond->curr_active_slave)) {
                                bond_set_slave_inactive_flags(slave);
@@ -3221,12 +3122,13 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct rtnl_link_stats64 temp;
+       struct list_head *iter;
        struct slave *slave;
 
        memset(stats, 0, sizeof(*stats));
 
        read_lock_bh(&bond->lock);
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                const struct rtnl_link_stats64 *sstats =
                        dev_get_stats(slave->dev, &temp);
 
@@ -3263,6 +3165,7 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
 
 static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
 {
+       struct bonding *bond = netdev_priv(bond_dev);
        struct net_device *slave_dev = NULL;
        struct ifbond k_binfo;
        struct ifbond __user *u_binfo = NULL;
@@ -3293,7 +3196,6 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
 
 
                if (mii->reg_num == 1) {
-                       struct bonding *bond = netdev_priv(bond_dev);
                        mii->val_out = 0;
                        read_lock(&bond->lock);
                        read_lock(&bond->curr_slave_lock);
@@ -3365,7 +3267,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
                        break;
                case BOND_CHANGE_ACTIVE_OLD:
                case SIOCBONDCHANGEACTIVE:
-                       res = bond_ioctl_change_active(bond_dev, slave_dev);
+                       res = bond_option_active_slave_set(bond, slave_dev);
                        break;
                default:
                        res = -EOPNOTSUPP;
@@ -3393,22 +3295,24 @@ static void bond_change_rx_flags(struct net_device *bond_dev, int change)
 static void bond_set_rx_mode(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct list_head *iter;
        struct slave *slave;
 
-       ASSERT_RTNL();
 
+       rcu_read_lock();
        if (USES_PRIMARY(bond->params.mode)) {
-               slave = rtnl_dereference(bond->curr_active_slave);
+               slave = rcu_dereference(bond->curr_active_slave);
                if (slave) {
                        dev_uc_sync(slave->dev, bond_dev);
                        dev_mc_sync(slave->dev, bond_dev);
                }
        } else {
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave_rcu(bond, slave, iter) {
                        dev_uc_sync_multiple(slave->dev, bond_dev);
                        dev_mc_sync_multiple(slave->dev, bond_dev);
                }
        }
+       rcu_read_unlock();
 }
 
 static int bond_neigh_init(struct neighbour *n)
@@ -3471,7 +3375,8 @@ static int bond_neigh_setup(struct net_device *dev,
 static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave;
+       struct slave *slave, *rollback_slave;
+       struct list_head *iter;
        int res = 0;
 
        pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
@@ -3492,10 +3397,9 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
         * call to the base driver.
         */
 
-       bond_for_each_slave(bond, slave) {
-               pr_debug("s %p s->p %p c_m %p\n",
+       bond_for_each_slave(bond, slave, iter) {
+               pr_debug("s %p c_m %p\n",
                         slave,
-                        bond_prev_slave(bond, slave),
                         slave->dev->netdev_ops->ndo_change_mtu);
 
                res = dev_set_mtu(slave->dev, new_mtu);
@@ -3520,13 +3424,16 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
 
 unwind:
        /* unwind from head to the slave that failed */
-       bond_for_each_slave_continue_reverse(bond, slave) {
+       bond_for_each_slave(bond, rollback_slave, iter) {
                int tmp_res;
 
-               tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu);
+               if (rollback_slave == slave)
+                       break;
+
+               tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
                if (tmp_res) {
                        pr_debug("unwind err %d dev %s\n",
-                                tmp_res, slave->dev->name);
+                                tmp_res, rollback_slave->dev->name);
                }
        }
 
@@ -3543,8 +3450,9 @@ unwind:
 static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct slave *slave, *rollback_slave;
        struct sockaddr *sa = addr, tmp_sa;
-       struct slave *slave;
+       struct list_head *iter;
        int res = 0;
 
        if (bond->params.mode == BOND_MODE_ALB)
@@ -3578,7 +3486,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
         * call to the base driver.
         */
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
                pr_debug("slave %p %s\n", slave, slave->dev->name);
 
@@ -3610,13 +3518,16 @@ unwind:
        tmp_sa.sa_family = bond_dev->type;
 
        /* unwind from head to the slave that failed */
-       bond_for_each_slave_continue_reverse(bond, slave) {
+       bond_for_each_slave(bond, rollback_slave, iter) {
                int tmp_res;
 
-               tmp_res = dev_set_mac_address(slave->dev, &tmp_sa);
+               if (rollback_slave == slave)
+                       break;
+
+               tmp_res = dev_set_mac_address(rollback_slave->dev, &tmp_sa);
                if (tmp_res) {
                        pr_debug("unwind err %d dev %s\n",
-                                tmp_res, slave->dev->name);
+                                tmp_res, rollback_slave->dev->name);
                }
        }
 
@@ -3635,11 +3546,12 @@ unwind:
  */
 void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
 {
+       struct list_head *iter;
        struct slave *slave;
        int i = slave_id;
 
        /* Here we start from the slave with slave_id */
-       bond_for_each_slave_rcu(bond, slave) {
+       bond_for_each_slave_rcu(bond, slave, iter) {
                if (--i < 0) {
                        if (slave_can_tx(slave)) {
                                bond_dev_queue_xmit(bond, skb, slave->dev);
@@ -3650,7 +3562,7 @@ void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
 
        /* Here we start from the first slave up to slave_id */
        i = slave_id;
-       bond_for_each_slave_rcu(bond, slave) {
+       bond_for_each_slave_rcu(bond, slave, iter) {
                if (--i < 0)
                        break;
                if (slave_can_tx(slave)) {
@@ -3707,8 +3619,7 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
        return NETDEV_TX_OK;
 }
 
-/*
- * In bond_xmit_xor() , we determine the output device by using a pre-
+/* In bond_xmit_xor() , we determine the output device by using a pre-
  * determined xmit_hash_policy(), If the selected device is not enabled,
  * find the next active slave.
  */
@@ -3716,8 +3627,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
 
-       bond_xmit_slave_id(bond, skb,
-                          bond->xmit_hash_policy(skb, bond->slave_cnt));
+       bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb, bond->slave_cnt));
 
        return NETDEV_TX_OK;
 }
@@ -3727,8 +3637,9 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave = NULL;
+       struct list_head *iter;
 
-       bond_for_each_slave_rcu(bond, slave) {
+       bond_for_each_slave_rcu(bond, slave, iter) {
                if (bond_is_last_slave(bond, slave))
                        break;
                if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
@@ -3753,22 +3664,6 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
 
 /*------------------------- Device initialization ---------------------------*/
 
-static void bond_set_xmit_hash_policy(struct bonding *bond)
-{
-       switch (bond->params.xmit_policy) {
-       case BOND_XMIT_POLICY_LAYER23:
-               bond->xmit_hash_policy = bond_xmit_hash_policy_l23;
-               break;
-       case BOND_XMIT_POLICY_LAYER34:
-               bond->xmit_hash_policy = bond_xmit_hash_policy_l34;
-               break;
-       case BOND_XMIT_POLICY_LAYER2:
-       default:
-               bond->xmit_hash_policy = bond_xmit_hash_policy_l2;
-               break;
-       }
-}
-
 /*
  * Lookup the slave that corresponds to a qid
  */
@@ -3777,13 +3672,14 @@ static inline int bond_slave_override(struct bonding *bond,
 {
        struct slave *slave = NULL;
        struct slave *check_slave;
+       struct list_head *iter;
        int res = 1;
 
        if (!skb->queue_mapping)
                return 1;
 
        /* Find out if any slaves have the same mapping as this skb. */
-       bond_for_each_slave_rcu(bond, check_slave) {
+       bond_for_each_slave_rcu(bond, check_slave, iter) {
                if (check_slave->queue_id == skb->queue_mapping) {
                        slave = check_slave;
                        break;
@@ -3869,7 +3765,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_BUSY;
 
        rcu_read_lock();
-       if (!list_empty(&bond->slave_list))
+       if (bond_has_slaves(bond))
                ret = __bond_start_xmit(skb, dev);
        else
                kfree_skb(skb);
@@ -3878,43 +3774,12 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return ret;
 }
 
-/*
- * set bond mode specific net device operations
- */
-void bond_set_mode_ops(struct bonding *bond, int mode)
-{
-       struct net_device *bond_dev = bond->dev;
-
-       switch (mode) {
-       case BOND_MODE_ROUNDROBIN:
-               break;
-       case BOND_MODE_ACTIVEBACKUP:
-               break;
-       case BOND_MODE_XOR:
-               bond_set_xmit_hash_policy(bond);
-               break;
-       case BOND_MODE_BROADCAST:
-               break;
-       case BOND_MODE_8023AD:
-               bond_set_xmit_hash_policy(bond);
-               break;
-       case BOND_MODE_ALB:
-               /* FALLTHRU */
-       case BOND_MODE_TLB:
-               break;
-       default:
-               /* Should never happen, mode already checked */
-               pr_err("%s: Error: Unknown bonding mode %d\n",
-                      bond_dev->name, mode);
-               break;
-       }
-}
-
 static int bond_ethtool_get_settings(struct net_device *bond_dev,
                                     struct ethtool_cmd *ecmd)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        unsigned long speed = 0;
+       struct list_head *iter;
        struct slave *slave;
 
        ecmd->duplex = DUPLEX_UNKNOWN;
@@ -3926,7 +3791,7 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
         * this is an accurate maximum.
         */
        read_lock(&bond->lock);
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (SLAVE_IS_OK(slave)) {
                        if (slave->speed != SPEED_UNKNOWN)
                                speed += slave->speed;
@@ -3994,14 +3859,13 @@ static void bond_destructor(struct net_device *bond_dev)
        free_netdev(bond_dev);
 }
 
-static void bond_setup(struct net_device *bond_dev)
+void bond_setup(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
 
        /* initialize rwlocks */
        rwlock_init(&bond->lock);
        rwlock_init(&bond->curr_slave_lock);
-       INIT_LIST_HEAD(&bond->slave_list);
        bond->params = bonding_defaults;
 
        /* Initialize pointers */
@@ -4011,7 +3875,6 @@ static void bond_setup(struct net_device *bond_dev)
        ether_setup(bond_dev);
        bond_dev->netdev_ops = &bond_netdev_ops;
        bond_dev->ethtool_ops = &bond_ethtool_ops;
-       bond_set_mode_ops(bond, bond->params.mode);
 
        bond_dev->destructor = bond_destructor;
 
@@ -4057,12 +3920,13 @@ static void bond_setup(struct net_device *bond_dev)
 static void bond_uninit(struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct slave *slave, *tmp_slave;
+       struct list_head *iter;
+       struct slave *slave;
 
        bond_netpoll_cleanup(bond_dev);
 
        /* Release the bonded slaves */
-       list_for_each_entry_safe(slave, tmp_slave, &bond->slave_list, list)
+       bond_for_each_slave(bond, slave, iter)
                __bond_release_one(bond_dev, slave->dev, true);
        pr_info("%s: released all slaves\n", bond_dev->name);
 
@@ -4495,32 +4359,11 @@ static int bond_init(struct net_device *bond_dev)
        return 0;
 }
 
-static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
-{
-       if (tb[IFLA_ADDRESS]) {
-               if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
-                       return -EINVAL;
-               if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
-                       return -EADDRNOTAVAIL;
-       }
-       return 0;
-}
-
-static unsigned int bond_get_num_tx_queues(void)
+unsigned int bond_get_num_tx_queues(void)
 {
        return tx_queues;
 }
 
-static struct rtnl_link_ops bond_link_ops __read_mostly = {
-       .kind                   = "bond",
-       .priv_size              = sizeof(struct bonding),
-       .setup                  = bond_setup,
-       .validate               = bond_validate,
-       .get_num_tx_queues      = bond_get_num_tx_queues,
-       .get_num_rx_queues      = bond_get_num_tx_queues, /* Use the same number
-                                                            as for TX queues */
-};
-
 /* Create a new bond based on the specified name and bonding parameters.
  * If name is NULL, obtain a suitable "bond%d" name for us.
  * Caller must NOT hold rtnl_lock; we need to release it here before we
@@ -4607,7 +4450,7 @@ static int __init bonding_init(void)
        if (res)
                goto out;
 
-       res = rtnl_link_register(&bond_link_ops);
+       res = bond_netlink_init();
        if (res)
                goto err_link;
 
@@ -4623,7 +4466,7 @@ static int __init bonding_init(void)
 out:
        return res;
 err:
-       rtnl_link_unregister(&bond_link_ops);
+       bond_netlink_fini();
 err_link:
        unregister_pernet_subsys(&bond_net_ops);
        goto out;
@@ -4636,7 +4479,7 @@ static void __exit bonding_exit(void)
 
        bond_destroy_debugfs();
 
-       rtnl_link_unregister(&bond_link_ops);
+       bond_netlink_fini();
        unregister_pernet_subsys(&bond_net_ops);
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -4653,4 +4496,3 @@ MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
 MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
-MODULE_ALIAS_RTNL_LINK("bond");
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
new file mode 100644 (file)
index 0000000..7661261
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+ * drivers/net/bond/bond_netlink.c - Netlink interface for bonding
+ * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_link.h>
+#include <linux/if_ether.h>
+#include <net/netlink.h>
+#include <net/rtnetlink.h>
+#include "bonding.h"
+
+static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
+       [IFLA_BOND_MODE]                = { .type = NLA_U8 },
+       [IFLA_BOND_ACTIVE_SLAVE]        = { .type = NLA_U32 },
+};
+
+static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       if (tb[IFLA_ADDRESS]) {
+               if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+                       return -EINVAL;
+               if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+                       return -EADDRNOTAVAIL;
+       }
+       return 0;
+}
+
+static int bond_changelink(struct net_device *bond_dev,
+                          struct nlattr *tb[], struct nlattr *data[])
+{
+       struct bonding *bond = netdev_priv(bond_dev);
+       int err;
+
+       if (data && data[IFLA_BOND_MODE]) {
+               int mode = nla_get_u8(data[IFLA_BOND_MODE]);
+
+               err = bond_option_mode_set(bond, mode);
+               if (err)
+                       return err;
+       }
+       if (data && data[IFLA_BOND_ACTIVE_SLAVE]) {
+               int ifindex = nla_get_u32(data[IFLA_BOND_ACTIVE_SLAVE]);
+               struct net_device *slave_dev;
+
+               if (ifindex == 0) {
+                       slave_dev = NULL;
+               } else {
+                       slave_dev = __dev_get_by_index(dev_net(bond_dev),
+                                                      ifindex);
+                       if (!slave_dev)
+                               return -ENODEV;
+               }
+               err = bond_option_active_slave_set(bond, slave_dev);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
+                       struct nlattr *tb[], struct nlattr *data[])
+{
+       int err;
+
+       err = bond_changelink(bond_dev, tb, data);
+       if (err < 0)
+               return err;
+
+       return register_netdevice(bond_dev);
+}
+
+static size_t bond_get_size(const struct net_device *bond_dev)
+{
+       return nla_total_size(sizeof(u8));      /* IFLA_BOND_MODE */
+               + nla_total_size(sizeof(u32));  /* IFLA_BOND_ACTIVE_SLAVE */
+}
+
+static int bond_fill_info(struct sk_buff *skb,
+                         const struct net_device *bond_dev)
+{
+       struct bonding *bond = netdev_priv(bond_dev);
+       struct net_device *slave_dev = bond_option_active_slave_get(bond);
+
+       if (nla_put_u8(skb, IFLA_BOND_MODE, bond->params.mode) ||
+           (slave_dev &&
+            nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+struct rtnl_link_ops bond_link_ops __read_mostly = {
+       .kind                   = "bond",
+       .priv_size              = sizeof(struct bonding),
+       .setup                  = bond_setup,
+       .maxtype                = IFLA_BOND_MAX,
+       .policy                 = bond_policy,
+       .validate               = bond_validate,
+       .newlink                = bond_newlink,
+       .changelink             = bond_changelink,
+       .get_size               = bond_get_size,
+       .fill_info              = bond_fill_info,
+       .get_num_tx_queues      = bond_get_num_tx_queues,
+       .get_num_rx_queues      = bond_get_num_tx_queues, /* Use the same number
+                                                            as for TX queues */
+};
+
+int __init bond_netlink_init(void)
+{
+       return rtnl_link_register(&bond_link_ops);
+}
+
+void bond_netlink_fini(void)
+{
+       rtnl_link_unregister(&bond_link_ops);
+}
+
+MODULE_ALIAS_RTNL_LINK("bond");
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
new file mode 100644 (file)
index 0000000..9a5223c
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * drivers/net/bond/bond_options.c - bonding options
+ * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/if.h>
+#include <linux/netdevice.h>
+#include <linux/rwlock.h>
+#include <linux/rcupdate.h>
+#include "bonding.h"
+
+static bool bond_mode_is_valid(int mode)
+{
+       int i;
+
+       for (i = 0; bond_mode_tbl[i].modename; i++);
+
+       return mode >= 0 && mode < i;
+}
+
+int bond_option_mode_set(struct bonding *bond, int mode)
+{
+       if (!bond_mode_is_valid(mode)) {
+               pr_err("invalid mode value %d.\n", mode);
+               return -EINVAL;
+       }
+
+       if (bond->dev->flags & IFF_UP) {
+               pr_err("%s: unable to update mode because interface is up.\n",
+                      bond->dev->name);
+               return -EPERM;
+       }
+
+       if (bond_has_slaves(bond)) {
+               pr_err("%s: unable to update mode because bond has slaves.\n",
+                       bond->dev->name);
+               return -EPERM;
+       }
+
+       if (BOND_MODE_IS_LB(mode) && bond->params.arp_interval) {
+               pr_err("%s: %s mode is incompatible with arp monitoring.\n",
+                      bond->dev->name, bond_mode_tbl[mode].modename);
+               return -EINVAL;
+       }
+
+       /* don't cache arp_validate between modes */
+       bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
+       bond->params.mode = mode;
+       return 0;
+}
+
+static struct net_device *__bond_option_active_slave_get(struct bonding *bond,
+                                                        struct slave *slave)
+{
+       return USES_PRIMARY(bond->params.mode) && slave ? slave->dev : NULL;
+}
+
+struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
+{
+       struct slave *slave = rcu_dereference(bond->curr_active_slave);
+
+       return __bond_option_active_slave_get(bond, slave);
+}
+
+struct net_device *bond_option_active_slave_get(struct bonding *bond)
+{
+       return __bond_option_active_slave_get(bond, bond->curr_active_slave);
+}
+
+int bond_option_active_slave_set(struct bonding *bond,
+                                struct net_device *slave_dev)
+{
+       int ret = 0;
+
+       if (slave_dev) {
+               if (!netif_is_bond_slave(slave_dev)) {
+                       pr_err("Device %s is not bonding slave.\n",
+                              slave_dev->name);
+                       return -EINVAL;
+               }
+
+               if (bond->dev != netdev_master_upper_dev_get(slave_dev)) {
+                       pr_err("%s: Device %s is not our slave.\n",
+                              bond->dev->name, slave_dev->name);
+                       return -EINVAL;
+               }
+       }
+
+       if (!USES_PRIMARY(bond->params.mode)) {
+               pr_err("%s: Unable to change active slave; %s is in mode %d\n",
+                      bond->dev->name, bond->dev->name, bond->params.mode);
+               return -EINVAL;
+       }
+
+       block_netpoll_tx();
+       read_lock(&bond->lock);
+       write_lock_bh(&bond->curr_slave_lock);
+
+       /* check to see if we are clearing active */
+       if (!slave_dev) {
+               pr_info("%s: Clearing current active slave.\n",
+               bond->dev->name);
+               rcu_assign_pointer(bond->curr_active_slave, NULL);
+               bond_select_active_slave(bond);
+       } else {
+               struct slave *old_active = bond->curr_active_slave;
+               struct slave *new_active = bond_slave_get_rtnl(slave_dev);
+
+               BUG_ON(!new_active);
+
+               if (new_active == old_active) {
+                       /* do nothing */
+                       pr_info("%s: %s is already the current active slave.\n",
+                               bond->dev->name, new_active->dev->name);
+               } else {
+                       if (old_active && (new_active->link == BOND_LINK_UP) &&
+                           IS_UP(new_active->dev)) {
+                               pr_info("%s: Setting %s as active slave.\n",
+                                       bond->dev->name, new_active->dev->name);
+                               bond_change_active_slave(bond, new_active);
+                       } else {
+                               pr_err("%s: Could not set %s as active slave; either %s is down or the link is down.\n",
+                                      bond->dev->name, new_active->dev->name,
+                                      new_active->dev->name);
+                               ret = -EINVAL;
+                       }
+               }
+       }
+
+       write_unlock_bh(&bond->curr_slave_lock);
+       read_unlock(&bond->lock);
+       unblock_netpoll_tx();
+       return ret;
+}
index 20a6ee25bb63e42cdf89c0273d8e1afa30234f26..fb868d6c22dac5c75ecd156e61089e7f6806b6c9 100644 (file)
@@ -10,8 +10,9 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
        __acquires(&bond->lock)
 {
        struct bonding *bond = seq->private;
-       loff_t off = 0;
+       struct list_head *iter;
        struct slave *slave;
+       loff_t off = 0;
 
        /* make sure the bond won't be taken away */
        rcu_read_lock();
@@ -20,7 +21,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
        if (*pos == 0)
                return SEQ_START_TOKEN;
 
-       bond_for_each_slave(bond, slave)
+       bond_for_each_slave(bond, slave, iter)
                if (++off == *pos)
                        return slave;
 
@@ -30,17 +31,25 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
 static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        struct bonding *bond = seq->private;
-       struct slave *slave = v;
+       struct list_head *iter;
+       struct slave *slave;
+       bool found = false;
 
        ++*pos;
        if (v == SEQ_START_TOKEN)
                return bond_first_slave(bond);
 
-       if (bond_is_last_slave(bond, slave))
+       if (bond_is_last_slave(bond, v))
                return NULL;
-       slave = bond_next_slave(bond, slave);
 
-       return slave;
+       bond_for_each_slave(bond, slave, iter) {
+               if (found)
+                       return slave;
+               if (slave == v)
+                       found = true;
+       }
+
+       return NULL;
 }
 
 static void bond_info_seq_stop(struct seq_file *seq, void *v)
index c29b836749b6323fe86c35e7762b25a3c8596978..47749c970a01a67c189188767c41e94e2ff1fbd1 100644 (file)
@@ -168,41 +168,6 @@ static const struct class_attribute class_attr_bonding_masters = {
        .namespace = bonding_namespace,
 };
 
-int bond_create_slave_symlinks(struct net_device *master,
-                              struct net_device *slave)
-{
-       char linkname[IFNAMSIZ+7];
-       int ret = 0;
-
-       /* first, create a link from the slave back to the master */
-       ret = sysfs_create_link(&(slave->dev.kobj), &(master->dev.kobj),
-                               "master");
-       if (ret)
-               return ret;
-       /* next, create a link from the master to the slave */
-       sprintf(linkname, "slave_%s", slave->name);
-       ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj),
-                               linkname);
-
-       /* free the master link created earlier in case of error */
-       if (ret)
-               sysfs_remove_link(&(slave->dev.kobj), "master");
-
-       return ret;
-
-}
-
-void bond_destroy_slave_symlinks(struct net_device *master,
-                                struct net_device *slave)
-{
-       char linkname[IFNAMSIZ+7];
-
-       sysfs_remove_link(&(slave->dev.kobj), "master");
-       sprintf(linkname, "slave_%s", slave->name);
-       sysfs_remove_link(&(master->dev.kobj), linkname);
-}
-
-
 /*
  * Show the slaves in the current bond.
  */
@@ -210,11 +175,14 @@ static ssize_t bonding_show_slaves(struct device *d,
                                   struct device_attribute *attr, char *buf)
 {
        struct bonding *bond = to_bond(d);
+       struct list_head *iter;
        struct slave *slave;
        int res = 0;
 
-       read_lock(&bond->lock);
-       bond_for_each_slave(bond, slave) {
+       if (!rtnl_trylock())
+               return restart_syscall();
+
+       bond_for_each_slave(bond, slave, iter) {
                if (res > (PAGE_SIZE - IFNAMSIZ)) {
                        /* not enough space for another interface name */
                        if ((PAGE_SIZE - res) > 10)
@@ -224,7 +192,9 @@ static ssize_t bonding_show_slaves(struct device *d,
                }
                res += sprintf(buf + res, "%s ", slave->dev->name);
        }
-       read_unlock(&bond->lock);
+
+       rtnl_unlock();
+
        if (res)
                buf[res-1] = '\n'; /* eat the leftover space */
 
@@ -313,50 +283,26 @@ static ssize_t bonding_store_mode(struct device *d,
                                  struct device_attribute *attr,
                                  const char *buf, size_t count)
 {
-       int new_value, ret = count;
+       int new_value, ret;
        struct bonding *bond = to_bond(d);
 
-       if (!rtnl_trylock())
-               return restart_syscall();
-
-       if (bond->dev->flags & IFF_UP) {
-               pr_err("unable to update mode of %s because interface is up.\n",
-                      bond->dev->name);
-               ret = -EPERM;
-               goto out;
-       }
-
-       if (!list_empty(&bond->slave_list)) {
-               pr_err("unable to update mode of %s because it has slaves.\n",
-                       bond->dev->name);
-               ret = -EPERM;
-               goto out;
-       }
-
        new_value = bond_parse_parm(buf, bond_mode_tbl);
        if (new_value < 0)  {
                pr_err("%s: Ignoring invalid mode value %.*s.\n",
                       bond->dev->name, (int)strlen(buf) - 1, buf);
-               ret = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
-       if ((new_value == BOND_MODE_ALB ||
-            new_value == BOND_MODE_TLB) &&
-           bond->params.arp_interval) {
-               pr_err("%s: %s mode is incompatible with arp monitoring.\n",
-                      bond->dev->name, bond_mode_tbl[new_value].modename);
-               ret = -EINVAL;
-               goto out;
+       if (!rtnl_trylock())
+               return restart_syscall();
+
+       ret = bond_option_mode_set(bond, new_value);
+       if (!ret) {
+               pr_info("%s: setting mode to %s (%d).\n",
+                       bond->dev->name, bond_mode_tbl[new_value].modename,
+                       new_value);
+               ret = count;
        }
 
-       /* don't cache arp_validate between modes */
-       bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
-       bond->params.mode = new_value;
-       bond_set_mode_ops(bond, bond->params.mode);
-       pr_info("%s: setting mode to %s (%d).\n",
-               bond->dev->name, bond_mode_tbl[new_value].modename,
-               new_value);
-out:
        rtnl_unlock();
        return ret;
 }
@@ -392,7 +338,6 @@ static ssize_t bonding_store_xmit_hash(struct device *d,
                ret = -EINVAL;
        } else {
                bond->params.xmit_policy = new_value;
-               bond_set_mode_ops(bond, bond->params.mode);
                pr_info("%s: setting xmit hash policy to %s (%d).\n",
                        bond->dev->name,
                        xmit_hashtype_tbl[new_value].modename, new_value);
@@ -522,7 +467,7 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
        if (!rtnl_trylock())
                return restart_syscall();
 
-       if (!list_empty(&bond->slave_list)) {
+       if (bond_has_slaves(bond)) {
                pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
                       bond->dev->name);
                ret = -EPERM;
@@ -656,11 +601,15 @@ static ssize_t bonding_store_arp_targets(struct device *d,
                                         const char *buf, size_t count)
 {
        struct bonding *bond = to_bond(d);
+       struct list_head *iter;
        struct slave *slave;
        __be32 newtarget, *targets;
        unsigned long *targets_rx;
        int ind, i, j, ret = -EINVAL;
 
+       if (!rtnl_trylock())
+               return restart_syscall();
+
        targets = bond->params.arp_targets;
        newtarget = in_aton(buf + 1);
        /* look for adds */
@@ -688,7 +637,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
                         &newtarget);
                /* not to race with bond_arp_rcv */
                write_lock_bh(&bond->lock);
-               bond_for_each_slave(bond, slave)
+               bond_for_each_slave(bond, slave, iter)
                        slave->target_last_arp_rx[ind] = jiffies;
                targets[ind] = newtarget;
                write_unlock_bh(&bond->lock);
@@ -714,7 +663,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
                        &newtarget);
 
                write_lock_bh(&bond->lock);
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave(bond, slave, iter) {
                        targets_rx = slave->target_last_arp_rx;
                        j = ind;
                        for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++)
@@ -734,6 +683,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
 
        ret = count;
 out:
+       rtnl_unlock();
        return ret;
 }
 static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets);
@@ -1111,6 +1061,7 @@ static ssize_t bonding_store_primary(struct device *d,
                                     const char *buf, size_t count)
 {
        struct bonding *bond = to_bond(d);
+       struct list_head *iter;
        char ifname[IFNAMSIZ];
        struct slave *slave;
 
@@ -1138,7 +1089,7 @@ static ssize_t bonding_store_primary(struct device *d,
                goto out;
        }
 
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
                        pr_info("%s: Setting %s as primary slave.\n",
                                bond->dev->name, slave->dev->name);
@@ -1268,13 +1219,13 @@ static ssize_t bonding_show_active_slave(struct device *d,
                                         char *buf)
 {
        struct bonding *bond = to_bond(d);
-       struct slave *curr;
+       struct net_device *slave_dev;
        int count = 0;
 
        rcu_read_lock();
-       curr = rcu_dereference(bond->curr_active_slave);
-       if (USES_PRIMARY(bond->params.mode) && curr)
-               count = sprintf(buf, "%s\n", curr->dev->name);
+       slave_dev = bond_option_active_slave_get_rcu(bond);
+       if (slave_dev)
+               count = sprintf(buf, "%s\n", slave_dev->name);
        rcu_read_unlock();
 
        return count;
@@ -1284,80 +1235,33 @@ static ssize_t bonding_store_active_slave(struct device *d,
                                          struct device_attribute *attr,
                                          const char *buf, size_t count)
 {
-       struct slave *slave, *old_active, *new_active;
+       int ret;
        struct bonding *bond = to_bond(d);
        char ifname[IFNAMSIZ];
+       struct net_device *dev;
 
        if (!rtnl_trylock())
                return restart_syscall();
 
-       old_active = new_active = NULL;
-       block_netpoll_tx();
-       read_lock(&bond->lock);
-       write_lock_bh(&bond->curr_slave_lock);
-
-       if (!USES_PRIMARY(bond->params.mode)) {
-               pr_info("%s: Unable to change active slave; %s is in mode %d\n",
-                       bond->dev->name, bond->dev->name, bond->params.mode);
-               goto out;
-       }
-
        sscanf(buf, "%15s", ifname); /* IFNAMSIZ */
-
-       /* check to see if we are clearing active */
        if (!strlen(ifname) || buf[0] == '\n') {
-               pr_info("%s: Clearing current active slave.\n",
-                       bond->dev->name);
-               rcu_assign_pointer(bond->curr_active_slave, NULL);
-               bond_select_active_slave(bond);
-               goto out;
-       }
-
-       bond_for_each_slave(bond, slave) {
-               if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
-                       old_active = bond->curr_active_slave;
-                       new_active = slave;
-                       if (new_active == old_active) {
-                               /* do nothing */
-                               pr_info("%s: %s is already the current"
-                                       " active slave.\n",
-                                       bond->dev->name,
-                                       slave->dev->name);
-                               goto out;
-                       } else {
-                               if ((new_active) &&
-                                   (old_active) &&
-                                   (new_active->link == BOND_LINK_UP) &&
-                                   IS_UP(new_active->dev)) {
-                                       pr_info("%s: Setting %s as active"
-                                               " slave.\n",
-                                               bond->dev->name,
-                                               slave->dev->name);
-                                       bond_change_active_slave(bond,
-                                                                new_active);
-                               } else {
-                                       pr_info("%s: Could not set %s as"
-                                               " active slave; either %s is"
-                                               " down or the link is down.\n",
-                                               bond->dev->name,
-                                               slave->dev->name,
-                                               slave->dev->name);
-                               }
-                               goto out;
-                       }
+               dev = NULL;
+       } else {
+               dev = __dev_get_by_name(dev_net(bond->dev), ifname);
+               if (!dev) {
+                       ret = -ENODEV;
+                       goto out;
                }
        }
 
-       pr_info("%s: Unable to set %.*s as active slave.\n",
-               bond->dev->name, (int)strlen(buf) - 1, buf);
- out:
-       write_unlock_bh(&bond->curr_slave_lock);
-       read_unlock(&bond->lock);
-       unblock_netpoll_tx();
+       ret = bond_option_active_slave_set(bond, dev);
+       if (!ret)
+               ret = count;
 
+ out:
        rtnl_unlock();
 
-       return count;
+       return ret;
 
 }
 static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR,
@@ -1493,14 +1397,14 @@ static ssize_t bonding_show_queue_id(struct device *d,
                                     char *buf)
 {
        struct bonding *bond = to_bond(d);
+       struct list_head *iter;
        struct slave *slave;
        int res = 0;
 
        if (!rtnl_trylock())
                return restart_syscall();
 
-       read_lock(&bond->lock);
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (res > (PAGE_SIZE - IFNAMSIZ - 6)) {
                        /* not enough space for another interface_name:queue_id pair */
                        if ((PAGE_SIZE - res) > 10)
@@ -1511,9 +1415,9 @@ static ssize_t bonding_show_queue_id(struct device *d,
                res += sprintf(buf + res, "%s:%d ",
                               slave->dev->name, slave->queue_id);
        }
-       read_unlock(&bond->lock);
        if (res)
                buf[res-1] = '\n'; /* eat the leftover space */
+
        rtnl_unlock();
 
        return res;
@@ -1529,6 +1433,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
 {
        struct slave *slave, *update_slave;
        struct bonding *bond = to_bond(d);
+       struct list_head *iter;
        u16 qid;
        int ret = count;
        char *delim;
@@ -1561,11 +1466,9 @@ static ssize_t bonding_store_queue_id(struct device *d,
        if (!sdev)
                goto err_no_cmd;
 
-       read_lock(&bond->lock);
-
        /* Search for thes slave and check for duplicate qids */
        update_slave = NULL;
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (sdev == slave->dev)
                        /*
                         * We don't need to check the matching
@@ -1573,23 +1476,20 @@ static ssize_t bonding_store_queue_id(struct device *d,
                         */
                        update_slave = slave;
                else if (qid && qid == slave->queue_id) {
-                       goto err_no_cmd_unlock;
+                       goto err_no_cmd;
                }
        }
 
        if (!update_slave)
-               goto err_no_cmd_unlock;
+               goto err_no_cmd;
 
        /* Actually set the qids for the slave */
        update_slave->queue_id = qid;
 
-       read_unlock(&bond->lock);
 out:
        rtnl_unlock();
        return ret;
 
-err_no_cmd_unlock:
-       read_unlock(&bond->lock);
 err_no_cmd:
        pr_info("invalid input for queue_id set for %s.\n",
                bond->dev->name);
@@ -1619,8 +1519,12 @@ static ssize_t bonding_store_slaves_active(struct device *d,
 {
        struct bonding *bond = to_bond(d);
        int new_value, ret = count;
+       struct list_head *iter;
        struct slave *slave;
 
+       if (!rtnl_trylock())
+               return restart_syscall();
+
        if (sscanf(buf, "%d", &new_value) != 1) {
                pr_err("%s: no all_slaves_active value specified.\n",
                       bond->dev->name);
@@ -1640,8 +1544,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
                goto out;
        }
 
-       read_lock(&bond->lock);
-       bond_for_each_slave(bond, slave) {
+       bond_for_each_slave(bond, slave, iter) {
                if (!bond_is_active_slave(slave)) {
                        if (new_value)
                                slave->inactive = 0;
@@ -1649,8 +1552,8 @@ static ssize_t bonding_store_slaves_active(struct device *d,
                                slave->inactive = 1;
                }
        }
-       read_unlock(&bond->lock);
 out:
+       rtnl_unlock();
        return ret;
 }
 static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
index 03cf3fd14490c4e4dcf8cd2d61f8bca99c9d55db..046a60535e04e3fc7f9e6be79aa8c8cc0bb1867f 100644 (file)
 #define TX_QUEUE_OVERRIDE(mode)                                \
                        (((mode) == BOND_MODE_ACTIVEBACKUP) ||  \
                         ((mode) == BOND_MODE_ROUNDROBIN))
+
+#define BOND_MODE_IS_LB(mode)                  \
+               (((mode) == BOND_MODE_TLB) ||   \
+                ((mode) == BOND_MODE_ALB))
+
 /*
  * Less bad way to call ioctl from within the kernel; this needs to be
  * done some other way to get the call out of interrupt context.
        res; })
 
 /* slave list primitives */
-#define bond_to_slave(ptr) list_entry(ptr, struct slave, list)
+#define bond_slave_list(bond) (&(bond)->dev->adj_list.lower)
+
+#define bond_has_slaves(bond) !list_empty(bond_slave_list(bond))
 
 /* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */
 #define bond_first_slave(bond) \
-       list_first_entry_or_null(&(bond)->slave_list, struct slave, list)
+       (bond_has_slaves(bond) ? \
+               netdev_adjacent_get_private(bond_slave_list(bond)->next) : \
+               NULL)
 #define bond_last_slave(bond) \
-       (list_empty(&(bond)->slave_list) ? NULL : \
-                                          bond_to_slave((bond)->slave_list.prev))
+       (bond_has_slaves(bond) ? \
+               netdev_adjacent_get_private(bond_slave_list(bond)->prev) : \
+               NULL)
 
-#define bond_is_first_slave(bond, pos) ((pos)->list.prev == &(bond)->slave_list)
-#define bond_is_last_slave(bond, pos) ((pos)->list.next == &(bond)->slave_list)
-
-/* Since bond_first/last_slave can return NULL, these can return NULL too */
-#define bond_next_slave(bond, pos) \
-       (bond_is_last_slave(bond, pos) ? bond_first_slave(bond) : \
-                                        bond_to_slave((pos)->list.next))
-
-#define bond_prev_slave(bond, pos) \
-       (bond_is_first_slave(bond, pos) ? bond_last_slave(bond) : \
-                                         bond_to_slave((pos)->list.prev))
-
-/**
- * bond_for_each_slave_from - iterate the slaves list from a starting point
- * @bond:      the bond holding this list.
- * @pos:       current slave.
- * @cnt:       counter for max number of moves
- * @start:     starting point.
- *
- * Caller must hold bond->lock
- */
-#define bond_for_each_slave_from(bond, pos, cnt, start) \
-       for (cnt = 0, pos = start; pos && cnt < (bond)->slave_cnt; \
-            cnt++, pos = bond_next_slave(bond, pos))
+#define bond_is_first_slave(bond, pos) (pos == bond_first_slave(bond))
+#define bond_is_last_slave(bond, pos) (pos == bond_last_slave(bond))
 
 /**
  * bond_for_each_slave - iterate over all slaves
  * @bond:      the bond holding this list
  * @pos:       current slave
+ * @iter:      list_head * iterator
  *
  * Caller must hold bond->lock
  */
-#define bond_for_each_slave(bond, pos) \
-       list_for_each_entry(pos, &(bond)->slave_list, list)
+#define bond_for_each_slave(bond, pos, iter) \
+       netdev_for_each_lower_private((bond)->dev, pos, iter)
 
 /* Caller must have rcu_read_lock */
-#define bond_for_each_slave_rcu(bond, pos) \
-       list_for_each_entry_rcu(pos, &(bond)->slave_list, list)
-
-/**
- * bond_for_each_slave_reverse - iterate in reverse from a given position
- * @bond:      the bond holding this list
- * @pos:       slave to continue from
- *
- * Caller must hold bond->lock
- */
-#define bond_for_each_slave_continue_reverse(bond, pos) \
-       list_for_each_entry_continue_reverse(pos, &(bond)->slave_list, list)
+#define bond_for_each_slave_rcu(bond, pos, iter) \
+       netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
 extern atomic_t netpoll_block_tx;
@@ -188,7 +167,6 @@ struct bond_parm_tbl {
 
 struct slave {
        struct net_device *dev; /* first - useful for panic debug */
-       struct list_head list;
        struct bonding *bond; /* our master */
        int    delay;
        unsigned long jiffies;
@@ -228,7 +206,6 @@ struct slave {
  */
 struct bonding {
        struct   net_device *dev; /* first - useful for panic debug */
-       struct   list_head slave_list;
        struct   slave *curr_active_slave;
        struct   slave *current_arp_slave;
        struct   slave *primary_slave;
@@ -245,7 +222,6 @@ struct bonding {
        char     proc_file_name[IFNAMSIZ];
 #endif /* CONFIG_PROC_FS */
        struct   list_head bond_list;
-       int      (*xmit_hash_policy)(struct sk_buff *, int);
        u16      rr_tx_counter;
        struct   ad_bond_info ad_info;
        struct   alb_bond_info alb_info;
@@ -276,13 +252,7 @@ struct bonding {
 static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
                                                  struct net_device *slave_dev)
 {
-       struct slave *slave = NULL;
-
-       bond_for_each_slave(bond, slave)
-               if (slave->dev == slave_dev)
-                       return slave;
-
-       return NULL;
+       return netdev_lower_dev_get_private(bond->dev, slave_dev);
 }
 
 static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
@@ -294,8 +264,7 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
 
 static inline bool bond_is_lb(const struct bonding *bond)
 {
-       return (bond->params.mode == BOND_MODE_TLB ||
-               bond->params.mode == BOND_MODE_ALB);
+       return BOND_MODE_IS_LB(bond->params.mode);
 }
 
 static inline void bond_set_active_slave(struct slave *slave)
@@ -432,21 +401,18 @@ static inline bool slave_can_tx(struct slave *slave)
 struct bond_net;
 
 int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
-struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
 int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
 void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
 int bond_create(struct net *net, const char *name);
 int bond_create_sysfs(struct bond_net *net);
 void bond_destroy_sysfs(struct bond_net *net);
 void bond_prepare_sysfs_group(struct bonding *bond);
-int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave);
-void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave);
 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev);
 void bond_mii_monitor(struct work_struct *);
 void bond_loadbalance_arp_mon(struct work_struct *);
 void bond_activebackup_arp_mon(struct work_struct *);
-void bond_set_mode_ops(struct bonding *bond, int mode);
+int bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, int count);
 int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl);
 void bond_select_active_slave(struct bonding *bond);
 void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
@@ -456,6 +422,14 @@ void bond_debug_register(struct bonding *bond);
 void bond_debug_unregister(struct bonding *bond);
 void bond_debug_reregister(struct bonding *bond);
 const char *bond_mode_name(int mode);
+void bond_setup(struct net_device *bond_dev);
+unsigned int bond_get_num_tx_queues(void);
+int bond_netlink_init(void);
+void bond_netlink_fini(void);
+int bond_option_mode_set(struct bonding *bond, int mode);
+int bond_option_active_slave_set(struct bonding *bond, struct net_device *slave_dev);
+struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
+struct net_device *bond_option_active_slave_get(struct bonding *bond);
 
 struct bond_net {
        struct net *            net;    /* Associated network namespace */
@@ -492,9 +466,24 @@ static inline void bond_destroy_proc_dir(struct bond_net *bn)
 static inline struct slave *bond_slave_has_mac(struct bonding *bond,
                                               const u8 *mac)
 {
+       struct list_head *iter;
        struct slave *tmp;
 
-       bond_for_each_slave(bond, tmp)
+       bond_for_each_slave(bond, tmp, iter)
+               if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
+                       return tmp;
+
+       return NULL;
+}
+
+/* Caller must hold rcu_read_lock() for read */
+static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond,
+                                              const u8 *mac)
+{
+       struct list_head *iter;
+       struct slave *tmp;
+
+       bond_for_each_slave_rcu(bond, tmp, iter)
                if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
                        return tmp;
 
@@ -528,4 +517,7 @@ extern const struct bond_parm_tbl fail_over_mac_tbl[];
 extern const struct bond_parm_tbl pri_reselect_tbl[];
 extern struct bond_parm_tbl ad_select_tbl[];
 
+/* exported from bond_netlink.c */
+extern struct rtnl_link_ops bond_link_ops;
+
 #endif /* _LINUX_BONDING_H */
index b9ed1288ce2de87cb32de899b303e1c31565916e..985608634f8cccf9eefd32328c96fbbcb2ae0079 100644 (file)
@@ -686,18 +686,19 @@ static int cfv_probe(struct virtio_device *vdev)
                goto err;
 
        /* Get the CAIF configuration from virtio config space, if available */
-#define GET_VIRTIO_CONFIG_OPS(_v, _var, _f) \
-       ((_v)->config->get(_v, offsetof(struct virtio_caif_transf_config, _f), \
-                          &_var, \
-                          FIELD_SIZEOF(struct virtio_caif_transf_config, _f)))
-
        if (vdev->config->get) {
-               GET_VIRTIO_CONFIG_OPS(vdev, cfv->tx_hr, headroom);
-               GET_VIRTIO_CONFIG_OPS(vdev, cfv->rx_hr, headroom);
-               GET_VIRTIO_CONFIG_OPS(vdev, cfv->tx_tr, tailroom);
-               GET_VIRTIO_CONFIG_OPS(vdev, cfv->rx_tr, tailroom);
-               GET_VIRTIO_CONFIG_OPS(vdev, cfv->mtu, mtu);
-               GET_VIRTIO_CONFIG_OPS(vdev, cfv->mru, mtu);
+               virtio_cread(vdev, struct virtio_caif_transf_config, headroom,
+                            &cfv->tx_hr);
+               virtio_cread(vdev, struct virtio_caif_transf_config, headroom,
+                            &cfv->rx_hr);
+               virtio_cread(vdev, struct virtio_caif_transf_config, tailroom,
+                            &cfv->tx_tr);
+               virtio_cread(vdev, struct virtio_caif_transf_config, tailroom,
+                            &cfv->rx_tr);
+               virtio_cread(vdev, struct virtio_caif_transf_config, mtu,
+                            &cfv->mtu);
+               virtio_cread(vdev, struct virtio_caif_transf_config, mtu,
+                            &cfv->mru);
        } else {
                cfv->tx_hr = CFV_DEF_HEADROOM;
                cfv->rx_hr = CFV_DEF_HEADROOM;
index 3b1ff6148702beb3818fbae7da2b0206f7d1c7f4..cf0f63e14e5339d4a6ed324076a4cad7854dfc48 100644 (file)
@@ -1347,7 +1347,7 @@ static int at91_can_probe(struct platform_device *pdev)
        priv->reg_base = addr;
        priv->devtype_data = *devtype_data;
        priv->clk = clk;
-       priv->pdata = pdev->dev.platform_data;
+       priv->pdata = dev_get_platdata(&pdev->dev);
        priv->mb0_id = 0x7ff;
 
        netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
@@ -1405,10 +1405,10 @@ static int at91_can_remove(struct platform_device *pdev)
 
 static const struct platform_device_id at91_can_id_table[] = {
        {
-               .name = "at91_can",
+               .name = "at91sam9x5_can",
                .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data,
        }, {
-               .name = "at91sam9x5_can",
+               .name = "at91_can",
                .driver_data = (kernel_ulong_t)&at91_at91sam9263_data,
        }, {
                /* sentinel */
index a2700d25ff0ed87fb800d4c157aeef911234d26e..8a0b515b33ea57c5804f1c9a82b774be8aadee7f 100644 (file)
@@ -539,7 +539,7 @@ static int bfin_can_probe(struct platform_device *pdev)
        struct resource *res_mem, *rx_irq, *tx_irq, *err_irq;
        unsigned short *pdata;
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        if (!pdata) {
                dev_err(&pdev->dev, "No platform data provided!\n");
                err = -EINVAL;
index b374be7891a296bde66696e19d2137a47d7ffea3..bce0be54c2f59587a2498d2f37821f2634b886d9 100644 (file)
@@ -160,7 +160,6 @@ static int c_can_pci_probe(struct pci_dev *pdev,
        return 0;
 
 out_free_c_can:
-       pci_set_drvdata(pdev, NULL);
        free_c_can_dev(dev);
 out_iounmap:
        pci_iounmap(pdev, addr);
@@ -181,7 +180,6 @@ static void c_can_pci_remove(struct pci_dev *pdev)
 
        unregister_c_can_dev(dev);
 
-       pci_set_drvdata(pdev, NULL);
        free_c_can_dev(dev);
 
        pci_iounmap(pdev, priv->base);
index 294ced3cc227520883c6ebe50ac255634d0f0f7c..d66ac265269c68883070f7857b1cd6d928531f0e 100644 (file)
@@ -322,7 +322,7 @@ static struct platform_driver c_can_plat_driver = {
        .driver = {
                .name = KBUILD_MODNAME,
                .owner = THIS_MODULE,
-               .of_match_table = of_match_ptr(c_can_of_table),
+               .of_match_table = c_can_of_table,
        },
        .probe = c_can_plat_probe,
        .remove = c_can_plat_remove,
index 034bdd816a60c74104b00b5b203c69f120f0555d..ad76734b3ecc79556ee4eb59e5c2025a10824b48 100644 (file)
@@ -152,7 +152,7 @@ static int cc770_get_platform_data(struct platform_device *pdev,
                                   struct cc770_priv *priv)
 {
 
-       struct cc770_platform_data *pdata = pdev->dev.platform_data;
+       struct cc770_platform_data *pdata = dev_get_platdata(&pdev->dev);
 
        priv->can.clock.freq = pdata->osc_freq;
        if (priv->cpu_interface & CPUIF_DSC)
@@ -203,7 +203,7 @@ static int cc770_platform_probe(struct platform_device *pdev)
 
        if (pdev->dev.of_node)
                err = cc770_get_of_node_data(pdev, priv);
-       else if (pdev->dev.platform_data)
+       else if (dev_get_platdata(&pdev->dev))
                err = cc770_get_platform_data(pdev, priv);
        else
                err = -ENODEV;
index f9cba4123c663084b66c2dbdaac5885a579278fd..1870c4731a572d193bef5e6dc0b7364e5f1751ed 100644 (file)
@@ -705,14 +705,14 @@ static size_t can_get_size(const struct net_device *dev)
        size_t size;
 
        size = nla_total_size(sizeof(u32));   /* IFLA_CAN_STATE */
-       size += sizeof(struct can_ctrlmode);  /* IFLA_CAN_CTRLMODE */
+       size += nla_total_size(sizeof(struct can_ctrlmode));  /* IFLA_CAN_CTRLMODE */
        size += nla_total_size(sizeof(u32));  /* IFLA_CAN_RESTART_MS */
-       size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */
-       size += sizeof(struct can_clock);     /* IFLA_CAN_CLOCK */
+       size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
+       size += nla_total_size(sizeof(struct can_clock));     /* IFLA_CAN_CLOCK */
        if (priv->do_get_berr_counter)        /* IFLA_CAN_BERR_COUNTER */
-               size += sizeof(struct can_berr_counter);
+               size += nla_total_size(sizeof(struct can_berr_counter));
        if (priv->bittiming_const)            /* IFLA_CAN_BITTIMING_CONST */
-               size += sizeof(struct can_bittiming_const);
+               size += nla_total_size(sizeof(struct can_bittiming_const));
 
        return size;
 }
index 3f21142138b79868a0f18fa7d246432e99d96192..ae08cf129ebbb0bda31f4b74893574b9703253f0 100644 (file)
@@ -62,7 +62,7 @@
 #define FLEXCAN_MCR_BCC                        BIT(16)
 #define FLEXCAN_MCR_LPRIO_EN           BIT(13)
 #define FLEXCAN_MCR_AEN                        BIT(12)
-#define FLEXCAN_MCR_MAXMB(x)           ((x) & 0xf)
+#define FLEXCAN_MCR_MAXMB(x)           ((x) & 0x1f)
 #define FLEXCAN_MCR_IDAM_A             (0 << 8)
 #define FLEXCAN_MCR_IDAM_B             (1 << 8)
 #define FLEXCAN_MCR_IDAM_C             (2 << 8)
@@ -735,9 +735,11 @@ static int flexcan_chip_start(struct net_device *dev)
         *
         */
        reg_mcr = flexcan_read(&regs->mcr);
+       reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
        reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
                FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN |
-               FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS;
+               FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_SRX_DIS |
+               FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID);
        netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
        flexcan_write(reg_mcr, &regs->mcr);
 
@@ -771,6 +773,10 @@ static int flexcan_chip_start(struct net_device *dev)
        netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
        flexcan_write(reg_ctrl, &regs->ctrl);
 
+       /* Abort any pending TX, mark Mailbox as INACTIVE */
+       flexcan_write(FLEXCAN_MB_CNT_CODE(0x4),
+                     &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
+
        /* acceptance mask/acceptance code (accept everything) */
        flexcan_write(0x0, &regs->rxgmask);
        flexcan_write(0x0, &regs->rx14mask);
@@ -979,9 +985,9 @@ static void unregister_flexcandev(struct net_device *dev)
 }
 
 static const struct of_device_id flexcan_of_match[] = {
-       { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
-       { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
        { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
+       { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
+       { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
        { /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, flexcan_of_match);
@@ -1062,7 +1068,7 @@ static int flexcan_probe(struct platform_device *pdev)
        priv->dev = dev;
        priv->clk_ipg = clk_ipg;
        priv->clk_per = clk_per;
-       priv->pdata = pdev->dev.platform_data;
+       priv->pdata = dev_get_platdata(&pdev->dev);
        priv->devtype_data = devtype_data;
 
        priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
index 36bd6fa1c7f3e4760b5f69f65b420f8d55b66a5f..ab5909a7bae9efa378c0466936002460205f5cf1 100644 (file)
@@ -1769,7 +1769,7 @@ static int ican3_probe(struct platform_device *pdev)
        struct device *dev;
        int ret;
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        if (!pdata)
                return -ENXIO;
 
index fe7dd696957ea3d8c4f7e34b21baa866bb797b55..08ac401e0214e091bdf8b851990dd8b45e0bec89 100644 (file)
@@ -999,7 +999,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
 {
        struct net_device *net;
        struct mcp251x_priv *priv;
-       struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+       struct mcp251x_platform_data *pdata = dev_get_platdata(&spi->dev);
        int ret = -ENODEV;
 
        if (!pdata)
index 9c24d60a23b1dbf459c75d14f7e3ac340350b9d0..e98abb97a0506672ab5cfb2af24fb5484fee61fe 100644 (file)
@@ -297,8 +297,8 @@ struct mscan_priv {
        struct napi_struct napi;
 };
 
-extern struct net_device *alloc_mscandev(void);
-extern int register_mscandev(struct net_device *dev, int mscan_clksrc);
-extern void unregister_mscandev(struct net_device *dev);
+struct net_device *alloc_mscandev(void);
+int register_mscandev(struct net_device *dev, int mscan_clksrc);
+void unregister_mscandev(struct net_device *dev);
 
 #endif /* __MSCAN_H__ */
index 5c314a961970b0041c7776da283979f00c4114f8..5f0e9b3bfa7bb9a8f80f5267058b83c5db53f4c6 100644 (file)
@@ -964,7 +964,6 @@ static void pch_can_remove(struct pci_dev *pdev)
                pci_disable_msi(priv->dev);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        pch_can_reset(priv);
        pci_iounmap(pdev, priv->regs);
        free_candev(priv->ndev);
index 3752342a678ac5320e68bf69f747b4f69034758d..835921388e7ba6ab4b6ee98306c629691421d8a7 100644 (file)
@@ -207,7 +207,6 @@ static void ems_pci_del_card(struct pci_dev *pdev)
        kfree(card);
 
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static void ems_pci_card_reset(struct ems_pci_card *card)
index 217585b97cd3e0851439f8b388c040058b668100..087b13bd300e845a6231d80cfbb29ab0d3c8d41f 100644 (file)
@@ -387,7 +387,6 @@ static void kvaser_pci_remove_one(struct pci_dev *pdev)
 
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static struct pci_driver kvaser_pci_driver = {
index 6b6f0ad75090c4ea463ae49b8e2041dbdaca55df..065ca49eb45e72c48c9d1cc5f8fbda0256fdb195 100644 (file)
@@ -744,8 +744,6 @@ static void peak_pci_remove(struct pci_dev *pdev)
        pci_iounmap(pdev, cfg_base);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-
-       pci_set_drvdata(pdev, NULL);
 }
 
 static struct pci_driver peak_pci_driver = {
index c52c1e96bf90741455eea01add4129fcfc590bad..f9b4f81cd86a4601abc8fbcb5dd7eaf7aaef81a5 100644 (file)
@@ -477,7 +477,6 @@ static void plx_pci_del_card(struct pci_dev *pdev)
        kfree(card);
 
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 /*
index 8e259c541036c575fc181796ebfe21f0c8798a1d..29f9b632118742eea67904bd445922b2773a894f 100644 (file)
@@ -76,7 +76,7 @@ static int sp_probe(struct platform_device *pdev)
        struct resource *res_mem, *res_irq;
        struct sja1000_platform_data *pdata;
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        if (!pdata) {
                dev_err(&pdev->dev, "No platform data provided!\n");
                err = -ENODEV;
index afd7d85b69155a4259748b2c2648bc2739835d38..35f062282dbdc278fbc9b040395f14935b7a1ac5 100644 (file)
@@ -71,34 +71,34 @@ struct softing {
        } id;
 };
 
-extern int softing_default_output(struct net_device *netdev);
+int softing_default_output(struct net_device *netdev);
 
-extern ktime_t softing_raw2ktime(struct softing *card, u32 raw);
+ktime_t softing_raw2ktime(struct softing *card, u32 raw);
 
-extern int softing_chip_poweron(struct softing *card);
+int softing_chip_poweron(struct softing *card);
 
-extern int softing_bootloader_command(struct softing *card, int16_t cmd,
-               const char *msg);
+int softing_bootloader_command(struct softing *card, int16_t cmd,
+                              const char *msg);
 
 /* Load firmware after reset */
-extern int softing_load_fw(const char *file, struct softing *card,
-                       __iomem uint8_t *virt, unsigned int size, int offset);
+int softing_load_fw(const char *file, struct softing *card,
+                   __iomem uint8_t *virt, unsigned int size, int offset);
 
 /* Load final application firmware after bootloader */
-extern int softing_load_app_fw(const char *file, struct softing *card);
+int softing_load_app_fw(const char *file, struct softing *card);
 
 /*
  * enable or disable irq
  * only called with fw.lock locked
  */
-extern int softing_enable_irq(struct softing *card, int enable);
+int softing_enable_irq(struct softing *card, int enable);
 
 /* start/stop 1 bus on card */
-extern int softing_startstop(struct net_device *netdev, int up);
+int softing_startstop(struct net_device *netdev, int up);
 
 /* netif_rx() */
-extern int softing_netdev_rx(struct net_device *netdev,
-               const struct can_frame *msg, ktime_t ktime);
+int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg,
+                     ktime_t ktime);
 
 /* SOFTING DPRAM mappings */
 #define DPRAM_RX               0x0000
index 65eef1eea2e2434ca47cfeb086095a415d2b385e..6cd5c01b624d592e6fce2c543075822e5751561b 100644 (file)
@@ -768,7 +768,7 @@ static int softing_pdev_remove(struct platform_device *pdev)
 
 static int softing_pdev_probe(struct platform_device *pdev)
 {
-       const struct softing_platform_data *pdat = pdev->dev.platform_data;
+       const struct softing_platform_data *pdat = dev_get_platdata(&pdev->dev);
        struct softing *card;
        struct net_device *netdev;
        struct softing_priv *priv;
index 3a349a22d5bc46eed31bdc32e12d27c25df3bd13..beb5ef834f0fb4de00a703cc4a5db1c79863ecc1 100644 (file)
@@ -894,7 +894,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
        void __iomem *addr;
        int err = -ENODEV;
 
-       pdata = pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        if (!pdata) {
                dev_err(&pdev->dev, "No platform data\n");
                goto probe_exit;
index f00c76377b446cb63999857138e4f0365eccc015..65b735d4a6ad8aa92dc4cea9490f3f14fe9bbab8 100644 (file)
@@ -35,7 +35,7 @@ config EL3
 
 config 3C515
        tristate "3c515 ISA \"Fast EtherLink\""
-       depends on (ISA || EISA) && ISA_DMA_API
+       depends on ISA && ISA_DMA_API
        ---help---
          If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
          network card, say Y and read the Ethernet-HOWTO, available from
@@ -70,7 +70,7 @@ config VORTEX
        select MII
        ---help---
          This option enables driver support for a large number of 10Mbps and
-         10/100Mbps EISA, PCI and PCMCIA 3Com network cards:
+         10/100Mbps EISA, PCI and Cardbus 3Com network cards:
 
          "Vortex"    (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI
          "Boomerang" (EtherLink XL 3c900 or 3c905)            PCI
index 144942f6372b68edd1bedc91175b8feab07cbe6e..465cc7108d8a5e5bbb2c34df44f7bd0329d02d6b 100644 (file)
@@ -2525,7 +2525,6 @@ typhoon_remove_one(struct pci_dev *pdev)
        pci_release_regions(pdev);
        pci_clear_mwi(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        free_netdev(dev);
 }
 
index ef325ffa1b5ac63dd61dd0c0230c8a63a1a4a6cf..2923c51bb351c38e2cc7d728056bdcb6f5c25a97 100644 (file)
@@ -28,42 +28,42 @@ extern int ei_debug;
 #endif
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-extern void ei_poll(struct net_device *dev);
-extern void eip_poll(struct net_device *dev);
+void ei_poll(struct net_device *dev);
+void eip_poll(struct net_device *dev);
 #endif
 
 
 /* Without I/O delay - non ISA or later chips */
-extern void NS8390_init(struct net_device *dev, int startp);
-extern int ei_open(struct net_device *dev);
-extern int ei_close(struct net_device *dev);
-extern irqreturn_t ei_interrupt(int irq, void *dev_id);
-extern void ei_tx_timeout(struct net_device *dev);
-extern netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev);
-extern void ei_set_multicast_list(struct net_device *dev);
-extern struct net_device_stats *ei_get_stats(struct net_device *dev);
+void NS8390_init(struct net_device *dev, int startp);
+int ei_open(struct net_device *dev);
+int ei_close(struct net_device *dev);
+irqreturn_t ei_interrupt(int irq, void *dev_id);
+void ei_tx_timeout(struct net_device *dev);
+netdev_tx_t ei_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void ei_set_multicast_list(struct net_device *dev);
+struct net_device_stats *ei_get_stats(struct net_device *dev);
 
 extern const struct net_device_ops ei_netdev_ops;
 
-extern struct net_device *__alloc_ei_netdev(int size);
+struct net_device *__alloc_ei_netdev(int size);
 static inline struct net_device *alloc_ei_netdev(void)
 {
        return __alloc_ei_netdev(0);
 }
 
 /* With I/O delay form */
-extern void NS8390p_init(struct net_device *dev, int startp);
-extern int eip_open(struct net_device *dev);
-extern int eip_close(struct net_device *dev);
-extern irqreturn_t eip_interrupt(int irq, void *dev_id);
-extern void eip_tx_timeout(struct net_device *dev);
-extern netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev);
-extern void eip_set_multicast_list(struct net_device *dev);
-extern struct net_device_stats *eip_get_stats(struct net_device *dev);
+void NS8390p_init(struct net_device *dev, int startp);
+int eip_open(struct net_device *dev);
+int eip_close(struct net_device *dev);
+irqreturn_t eip_interrupt(int irq, void *dev_id);
+void eip_tx_timeout(struct net_device *dev);
+netdev_tx_t eip_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void eip_set_multicast_list(struct net_device *dev);
+struct net_device_stats *eip_get_stats(struct net_device *dev);
 
 extern const struct net_device_ops eip_netdev_ops;
 
-extern struct net_device *__alloc_eip_netdev(int size);
+struct net_device *__alloc_eip_netdev(int size);
 static inline struct net_device *alloc_eip_netdev(void)
 {
        return __alloc_eip_netdev(0);
index becef25fa194174e1a18a1a7cfbe2fd0c606f1f6..0988811f4e40e2e4effd4fa92a665b67d34291b5 100644 (file)
@@ -146,13 +146,6 @@ config PCMCIA_PCNET
          To compile this driver as a module, choose M here: the module will be
          called pcnet_cs.  If unsure, say N.
 
-config NE_H8300
-       tristate "NE2000 compatible support for H8/300"
-       depends on H8300H_AKI3068NET || H8300H_H8MAX
-       ---help---
-         Say Y here if you want to use the NE2000 compatible
-         controller on the Renesas H8/300 processor.
-
 config STNIC
        tristate "National DP83902AV  support"
        depends on SUPERH
index 588954a79b2ae657bf905b0246a0b2753f72a5ed..ff3b31894188261bb4ec865cd289a34e1583b970 100644 (file)
@@ -10,7 +10,6 @@ obj-$(CONFIG_HYDRA) += hydra.o 8390.o
 obj-$(CONFIG_MCF8390) += mcf8390.o 8390.o
 obj-$(CONFIG_NE2000) += ne.o 8390p.o
 obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o
-obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
 obj-$(CONFIG_PCMCIA_AXNET) += axnet_cs.o 8390.o
 obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o
 obj-$(CONFIG_STNIC) += stnic.o 8390.o
index f92f001551dab53152bf276d189e88b1f984e3b5..36fa577970bbae8741254519c0450d497a286d7e 100644 (file)
@@ -702,7 +702,7 @@ static int ax_init_dev(struct net_device *dev)
                        for (i = 0; i < 16; i++)
                                SA_prom[i] = SA_prom[i+i];
 
-               memcpy(dev->dev_addr, SA_prom, 6);
+               memcpy(dev->dev_addr, SA_prom, ETH_ALEN);
        }
 
 #ifdef CONFIG_AX88796_93CX6
diff --git a/drivers/net/ethernet/8390/ne-h8300.c b/drivers/net/ethernet/8390/ne-h8300.c
deleted file mode 100644 (file)
index 7fc28f2..0000000
+++ /dev/null
@@ -1,684 +0,0 @@
-/* ne-h8300.c: A NE2000 clone on H8/300 driver for linux. */
-/*
-    original ne.c
-    Written 1992-94 by Donald Becker.
-
-    Copyright 1993 United States Government as represented by the
-    Director, National Security Agency.
-
-    This software may be used and distributed according to the terms
-    of the GNU General Public License, incorporated herein by reference.
-
-    The author may be reached as becker@scyld.com, or C/O
-    Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
-
-    H8/300 modified
-    Yoshinori Sato <ysato@users.sourceforge.jp>
-*/
-
-static const char version1[] =
-"ne-h8300.c:v1.00 2004/04/11 ysato\n";
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/jiffies.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#define EI_SHIFT(x)    (ei_local->reg_offset[x])
-
-#include "8390.h"
-
-#define DRV_NAME "ne-h8300"
-
-/* Some defines that people can play with if so inclined. */
-
-/* Do we perform extra sanity checks on stuff ? */
-/* #define NE_SANITY_CHECK */
-
-/* Do we implement the read before write bugfix ? */
-/* #define NE_RW_BUGFIX */
-
-/* Do we have a non std. amount of memory? (in units of 256 byte pages) */
-/* #define PACKETBUF_MEMSIZE   0x40 */
-
-/* A zero-terminated list of I/O addresses to be probed at boot. */
-
-/* ---- No user-serviceable parts below ---- */
-
-static const char version[] =
-    "8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-#include "lib8390.c"
-
-#define NE_BASE         (dev->base_addr)
-#define NE_CMD         0x00
-#define NE_DATAPORT    (ei_status.word16?0x20:0x10)    /* NatSemi-defined port window offset. */
-#define NE_RESET       (ei_status.word16?0x3f:0x1f)    /* Issue a read to reset, a write to clear. */
-#define NE_IO_EXTENT   (ei_status.word16?0x40:0x20)
-
-#define NESM_START_PG  0x40    /* First page of TX buffer */
-#define NESM_STOP_PG   0x80    /* Last page +1 of RX ring */
-
-static int ne_probe1(struct net_device *dev, int ioaddr);
-
-static int ne_open(struct net_device *dev);
-static int ne_close(struct net_device *dev);
-
-static void ne_reset_8390(struct net_device *dev);
-static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
-                         int ring_page);
-static void ne_block_input(struct net_device *dev, int count,
-                         struct sk_buff *skb, int ring_offset);
-static void ne_block_output(struct net_device *dev, const int count,
-               const unsigned char *buf, const int start_page);
-
-
-static u32 reg_offset[16];
-
-static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr)
-{
-       struct ei_device *ei_local = netdev_priv(dev);
-       int i;
-       unsigned char bus_width;
-
-       bus_width = *(volatile unsigned char *)ABWCR;
-       bus_width &= 1 << ((base_addr >> 21) & 7);
-
-       for (i = 0; i < ARRAY_SIZE(reg_offset); i++)
-               if (bus_width == 0)
-                       reg_offset[i] = i * 2 + 1;
-               else
-                       reg_offset[i] = i;
-
-       ei_local->reg_offset = reg_offset;
-       return 0;
-}
-
-static int __initdata h8300_ne_count = 0;
-#ifdef CONFIG_H8300H_H8MAX
-static unsigned long __initdata h8300_ne_base[] = { 0x800600 };
-static int h8300_ne_irq[] = {EXT_IRQ4};
-#endif
-#ifdef CONFIG_H8300H_AKI3068NET
-static unsigned long __initdata h8300_ne_base[] = { 0x200000 };
-static int h8300_ne_irq[] = {EXT_IRQ5};
-#endif
-
-static inline int init_dev(struct net_device *dev)
-{
-       if (h8300_ne_count < ARRAY_SIZE(h8300_ne_base)) {
-               dev->base_addr = h8300_ne_base[h8300_ne_count];
-               dev->irq       = h8300_ne_irq[h8300_ne_count];
-               h8300_ne_count++;
-               return 0;
-       } else
-               return -ENODEV;
-}
-
-/*  Probe for various non-shared-memory ethercards.
-
-   NEx000-clone boards have a Station Address PROM (SAPROM) in the packet
-   buffer memory space.  NE2000 clones have 0x57,0x57 in bytes 0x0e,0x0f of
-   the SAPROM, while other supposed NE2000 clones must be detected by their
-   SA prefix.
-
-   Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
-   mode results in doubled values, which can be detected and compensated for.
-
-   The probe is also responsible for initializing the card and filling
-   in the 'dev' and 'ei_status' structures.
-
-   We use the minimum memory size for some ethercard product lines, iff we can't
-   distinguish models.  You can increase the packet buffer size by setting
-   PACKETBUF_MEMSIZE.  Reported Cabletron packet buffer locations are:
-       E1010   starts at 0x100 and ends at 0x2000.
-       E1010-x starts at 0x100 and ends at 0x8000. ("-x" means "more memory")
-       E2010    starts at 0x100 and ends at 0x4000.
-       E2010-x starts at 0x100 and ends at 0xffff.  */
-
-static int __init do_ne_probe(struct net_device *dev)
-{
-       unsigned int base_addr = dev->base_addr;
-
-       /* First check any supplied i/o locations. User knows best. <cough> */
-       if (base_addr > 0x1ff)  /* Check a single specified location. */
-               return ne_probe1(dev, base_addr);
-       else if (base_addr != 0)        /* Don't probe at all. */
-               return -ENXIO;
-
-       return -ENODEV;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
-       free_irq(dev->irq, dev);
-       release_region(dev->base_addr, NE_IO_EXTENT);
-}
-
-#ifndef MODULE
-struct net_device * __init ne_probe(int unit)
-{
-       struct net_device *dev = ____alloc_ei_netdev(0);
-       int err;
-
-       if (!dev)
-               return ERR_PTR(-ENOMEM);
-
-       if (init_dev(dev))
-               return ERR_PTR(-ENODEV);
-
-       sprintf(dev->name, "eth%d", unit);
-       netdev_boot_setup_check(dev);
-
-       err = init_reg_offset(dev, dev->base_addr);
-       if (err)
-               goto out;
-
-       err = do_ne_probe(dev);
-       if (err)
-               goto out;
-       return dev;
-out:
-       free_netdev(dev);
-       return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops ne_netdev_ops = {
-       .ndo_open               = ne_open,
-       .ndo_stop               = ne_close,
-
-       .ndo_start_xmit         = __ei_start_xmit,
-       .ndo_tx_timeout         = __ei_tx_timeout,
-       .ndo_get_stats          = __ei_get_stats,
-       .ndo_set_rx_mode        = __ei_set_multicast_list,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = eth_mac_addr,
-       .ndo_change_mtu         = eth_change_mtu,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = __ei_poll,
-#endif
-};
-
-static int __init ne_probe1(struct net_device *dev, int ioaddr)
-{
-       int i;
-       unsigned char SA_prom[16];
-       int wordlength = 2;
-       const char *name = NULL;
-       int start_page, stop_page;
-       int reg0, ret;
-       static unsigned version_printed;
-       struct ei_device *ei_local = netdev_priv(dev);
-       unsigned char bus_width;
-
-       if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
-               return -EBUSY;
-
-       reg0 = inb_p(ioaddr);
-       if (reg0 == 0xFF) {
-               ret = -ENODEV;
-               goto err_out;
-       }
-
-       /* Do a preliminary verification that we have a 8390. */
-       {
-               int regd;
-               outb_p(E8390_NODMA+E8390_PAGE1+E8390_STOP, ioaddr + E8390_CMD);
-               regd = inb_p(ioaddr + EI_SHIFT(0x0d));
-               outb_p(0xff, ioaddr + EI_SHIFT(0x0d));
-               outb_p(E8390_NODMA+E8390_PAGE0, ioaddr + E8390_CMD);
-               inb_p(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */
-               if (inb_p(ioaddr + EN0_COUNTER0) != 0) {
-                       outb_p(reg0, ioaddr + EI_SHIFT(0));
-                       outb_p(regd, ioaddr + EI_SHIFT(0x0d));  /* Restore the old values. */
-                       ret = -ENODEV;
-                       goto err_out;
-               }
-       }
-
-       if (ei_debug  &&  version_printed++ == 0)
-               printk(KERN_INFO "%s", version1);
-
-       printk(KERN_INFO "NE*000 ethercard probe at %08x:", ioaddr);
-
-       /* Read the 16 bytes of station address PROM.
-          We must first initialize registers, similar to NS8390_init(eifdev, 0).
-          We can't reliably read the SAPROM address without this.
-          (I learned the hard way!). */
-       {
-               struct {unsigned char value, offset; } program_seq[] =
-               {
-                       {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/
-                       {0x48,  EN0_DCFG},      /* Set byte-wide (0x48) access. */
-                       {0x00,  EN0_RCNTLO},    /* Clear the count regs. */
-                       {0x00,  EN0_RCNTHI},
-                       {0x00,  EN0_IMR},       /* Mask completion irq. */
-                       {0xFF,  EN0_ISR},
-                       {E8390_RXOFF, EN0_RXCR},        /* 0x20  Set to monitor */
-                       {E8390_TXOFF, EN0_TXCR},        /* 0x02  and loopback mode. */
-                       {32,    EN0_RCNTLO},
-                       {0x00,  EN0_RCNTHI},
-                       {0x00,  EN0_RSARLO},    /* DMA starting at 0x0000. */
-                       {0x00,  EN0_RSARHI},
-                       {E8390_RREAD+E8390_START, E8390_CMD},
-               };
-
-               for (i = 0; i < ARRAY_SIZE(program_seq); i++)
-                       outb_p(program_seq[i].value, ioaddr + program_seq[i].offset);
-
-       }
-       bus_width = *(volatile unsigned char *)ABWCR;
-       bus_width &= 1 << ((ioaddr >> 21) & 7);
-       ei_status.word16 = (bus_width == 0); /* temporary setting */
-       for(i = 0; i < 16 /*sizeof(SA_prom)*/; i++) {
-               SA_prom[i] = inb_p(ioaddr + NE_DATAPORT);
-               inb_p(ioaddr + NE_DATAPORT); /* dummy read */
-       }
-
-       start_page = NESM_START_PG;
-       stop_page = NESM_STOP_PG;
-
-       if (bus_width)
-               wordlength = 1;
-       else
-               outb_p(0x49, ioaddr + EN0_DCFG);
-
-       /* Set up the rest of the parameters. */
-       name = (wordlength == 2) ? "NE2000" : "NE1000";
-
-       if (! dev->irq) {
-               printk(" failed to detect IRQ line.\n");
-               ret = -EAGAIN;
-               goto err_out;
-       }
-
-       /* Snarf the interrupt now.  There's no point in waiting since we cannot
-          share and the board will usually be enabled. */
-       ret = request_irq(dev->irq, __ei_interrupt, 0, name, dev);
-       if (ret) {
-               printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
-               goto err_out;
-       }
-
-       dev->base_addr = ioaddr;
-
-       for (i = 0; i < ETH_ALEN; i++)
-               dev->dev_addr[i] = SA_prom[i];
-       printk(" %pM\n", dev->dev_addr);
-
-       printk("%s: %s found at %#x, using IRQ %d.\n",
-               dev->name, name, ioaddr, dev->irq);
-
-       ei_status.name = name;
-       ei_status.tx_start_page = start_page;
-       ei_status.stop_page = stop_page;
-       ei_status.word16 = (wordlength == 2);
-
-       ei_status.rx_start_page = start_page + TX_PAGES;
-#ifdef PACKETBUF_MEMSIZE
-        /* Allow the packet buffer size to be overridden by know-it-alls. */
-       ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
-#endif
-
-       ei_status.reset_8390 = &ne_reset_8390;
-       ei_status.block_input = &ne_block_input;
-       ei_status.block_output = &ne_block_output;
-       ei_status.get_8390_hdr = &ne_get_8390_hdr;
-       ei_status.priv = 0;
-
-       dev->netdev_ops = &ne_netdev_ops;
-
-       __NS8390_init(dev, 0);
-
-       ret = register_netdev(dev);
-       if (ret)
-               goto out_irq;
-       return 0;
-out_irq:
-       free_irq(dev->irq, dev);
-err_out:
-       release_region(ioaddr, NE_IO_EXTENT);
-       return ret;
-}
-
-static int ne_open(struct net_device *dev)
-{
-       __ei_open(dev);
-       return 0;
-}
-
-static int ne_close(struct net_device *dev)
-{
-       if (ei_debug > 1)
-               printk(KERN_DEBUG "%s: Shutting down ethercard.\n", dev->name);
-       __ei_close(dev);
-       return 0;
-}
-
-/* Hard reset the card.  This used to pause for the same period that a
-   8390 reset command required, but that shouldn't be necessary. */
-
-static void ne_reset_8390(struct net_device *dev)
-{
-       unsigned long reset_start_time = jiffies;
-       struct ei_device *ei_local = netdev_priv(dev);
-
-       if (ei_debug > 1)
-               printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
-
-       /* DON'T change these to inb_p/outb_p or reset will fail on clones. */
-       outb(inb(NE_BASE + NE_RESET), NE_BASE + NE_RESET);
-
-       ei_status.txing = 0;
-       ei_status.dmaing = 0;
-
-       /* This check _should_not_ be necessary, omit eventually. */
-       while ((inb_p(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
-               if (time_after(jiffies, reset_start_time + 2*HZ/100)) {
-                       printk(KERN_WARNING "%s: ne_reset_8390() did not complete.\n", dev->name);
-                       break;
-               }
-       outb_p(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
-}
-
-/* Grab the 8390 specific header. Similar to the block_input routine, but
-   we don't need to be concerned with ring wrap as the header will be at
-   the start of a page, so we optimize accordingly. */
-
-static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
-{
-       struct ei_device *ei_local = netdev_priv(dev);
-       /* This *shouldn't* happen. If it does, it's the last thing you'll see */
-
-       if (ei_status.dmaing)
-       {
-               printk(KERN_EMERG "%s: DMAing conflict in ne_get_8390_hdr "
-                       "[DMAstat:%d][irqlock:%d].\n",
-                       dev->name, ei_status.dmaing, ei_status.irqlock);
-               return;
-       }
-
-       ei_status.dmaing |= 0x01;
-       outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD);
-       outb_p(sizeof(struct e8390_pkt_hdr), NE_BASE + EN0_RCNTLO);
-       outb_p(0, NE_BASE + EN0_RCNTHI);
-       outb_p(0, NE_BASE + EN0_RSARLO);                /* On page boundary */
-       outb_p(ring_page, NE_BASE + EN0_RSARHI);
-       outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
-
-       if (ei_status.word16) {
-               int len;
-               unsigned short *p = (unsigned short *)hdr;
-               for (len = sizeof(struct e8390_pkt_hdr)>>1; len > 0; len--)
-                       *p++ = inw(NE_BASE + NE_DATAPORT);
-       } else
-               insb(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr));
-
-       outb_p(ENISR_RDC, NE_BASE + EN0_ISR);   /* Ack intr. */
-       ei_status.dmaing &= ~0x01;
-
-       le16_to_cpus(&hdr->count);
-}
-
-/* Block input and output, similar to the Crynwr packet driver.  If you
-   are porting to a new ethercard, look at the packet driver source for hints.
-   The NEx000 doesn't share the on-board packet memory -- you have to put
-   the packet out through the "remote DMA" dataport using outb. */
-
-static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
-{
-       struct ei_device *ei_local = netdev_priv(dev);
-#ifdef NE_SANITY_CHECK
-       int xfer_count = count;
-#endif
-       char *buf = skb->data;
-
-       /* This *shouldn't* happen. If it does, it's the last thing you'll see */
-       if (ei_status.dmaing)
-       {
-               printk(KERN_EMERG "%s: DMAing conflict in ne_block_input "
-                       "[DMAstat:%d][irqlock:%d].\n",
-                       dev->name, ei_status.dmaing, ei_status.irqlock);
-               return;
-       }
-       ei_status.dmaing |= 0x01;
-       outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, NE_BASE + NE_CMD);
-       outb_p(count & 0xff, NE_BASE + EN0_RCNTLO);
-       outb_p(count >> 8, NE_BASE + EN0_RCNTHI);
-       outb_p(ring_offset & 0xff, NE_BASE + EN0_RSARLO);
-       outb_p(ring_offset >> 8, NE_BASE + EN0_RSARHI);
-       outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
-       if (ei_status.word16)
-       {
-               int len;
-               unsigned short *p = (unsigned short *)buf;
-               for (len = count>>1; len > 0; len--)
-                       *p++ = inw(NE_BASE + NE_DATAPORT);
-               if (count & 0x01)
-               {
-                       buf[count-1] = inb(NE_BASE + NE_DATAPORT);
-#ifdef NE_SANITY_CHECK
-                       xfer_count++;
-#endif
-               }
-       } else {
-               insb(NE_BASE + NE_DATAPORT, buf, count);
-       }
-
-#ifdef NE_SANITY_CHECK
-       /* This was for the ALPHA version only, but enough people have
-          been encountering problems so it is still here.  If you see
-          this message you either 1) have a slightly incompatible clone
-          or 2) have noise/speed problems with your bus. */
-
-       if (ei_debug > 1)
-       {
-               /* DMA termination address check... */
-               int addr, tries = 20;
-               do {
-                       /* DON'T check for 'inb_p(EN0_ISR) & ENISR_RDC' here
-                          -- it's broken for Rx on some cards! */
-                       int high = inb_p(NE_BASE + EN0_RSARHI);
-                       int low = inb_p(NE_BASE + EN0_RSARLO);
-                       addr = (high << 8) + low;
-                       if (((ring_offset + xfer_count) & 0xff) == low)
-                               break;
-               } while (--tries > 0);
-               if (tries <= 0)
-                       printk(KERN_WARNING "%s: RX transfer address mismatch,"
-                               "%#4.4x (expected) vs. %#4.4x (actual).\n",
-                               dev->name, ring_offset + xfer_count, addr);
-       }
-#endif
-       outb_p(ENISR_RDC, NE_BASE + EN0_ISR);   /* Ack intr. */
-       ei_status.dmaing &= ~0x01;
-}
-
-static void ne_block_output(struct net_device *dev, int count,
-               const unsigned char *buf, const int start_page)
-{
-       struct ei_device *ei_local = netdev_priv(dev);
-       unsigned long dma_start;
-#ifdef NE_SANITY_CHECK
-       int retries = 0;
-#endif
-
-       /* Round the count up for word writes.  Do we need to do this?
-          What effect will an odd byte count have on the 8390?
-          I should check someday. */
-
-       if (ei_status.word16 && (count & 0x01))
-               count++;
-
-       /* This *shouldn't* happen. If it does, it's the last thing you'll see */
-       if (ei_status.dmaing)
-       {
-               printk(KERN_EMERG "%s: DMAing conflict in ne_block_output."
-                       "[DMAstat:%d][irqlock:%d]\n",
-                       dev->name, ei_status.dmaing, ei_status.irqlock);
-               return;
-       }
-       ei_status.dmaing |= 0x01;
-       /* We should already be in page 0, but to be safe... */
-       outb_p(E8390_PAGE0+E8390_START+E8390_NODMA, NE_BASE + NE_CMD);
-
-#ifdef NE_SANITY_CHECK
-retry:
-#endif
-
-#ifdef NE8390_RW_BUGFIX
-       /* Handle the read-before-write bug the same way as the
-          Crynwr packet driver -- the NatSemi method doesn't work.
-          Actually this doesn't always work either, but if you have
-          problems with your NEx000 this is better than nothing! */
-
-       outb_p(0x42, NE_BASE + EN0_RCNTLO);
-       outb_p(0x00, NE_BASE + EN0_RCNTHI);
-       outb_p(0x42, NE_BASE + EN0_RSARLO);
-       outb_p(0x00, NE_BASE + EN0_RSARHI);
-       outb_p(E8390_RREAD+E8390_START, NE_BASE + NE_CMD);
-       /* Make certain that the dummy read has occurred. */
-       udelay(6);
-#endif
-
-       outb_p(ENISR_RDC, NE_BASE + EN0_ISR);
-
-       /* Now the normal output. */
-       outb_p(count & 0xff, NE_BASE + EN0_RCNTLO);
-       outb_p(count >> 8,   NE_BASE + EN0_RCNTHI);
-       outb_p(0x00, NE_BASE + EN0_RSARLO);
-       outb_p(start_page, NE_BASE + EN0_RSARHI);
-
-       outb_p(E8390_RWRITE+E8390_START, NE_BASE + NE_CMD);
-       if (ei_status.word16) {
-               int len;
-               unsigned short *p = (unsigned short *)buf;
-               for (len = count>>1; len > 0; len--)
-                       outw(*p++, NE_BASE + NE_DATAPORT);
-       } else {
-               outsb(NE_BASE + NE_DATAPORT, buf, count);
-       }
-
-       dma_start = jiffies;
-
-#ifdef NE_SANITY_CHECK
-       /* This was for the ALPHA version only, but enough people have
-          been encountering problems so it is still here. */
-
-       if (ei_debug > 1)
-       {
-               /* DMA termination address check... */
-               int addr, tries = 20;
-               do {
-                       int high = inb_p(NE_BASE + EN0_RSARHI);
-                       int low = inb_p(NE_BASE + EN0_RSARLO);
-                       addr = (high << 8) + low;
-                       if ((start_page << 8) + count == addr)
-                               break;
-               } while (--tries > 0);
-
-               if (tries <= 0)
-               {
-                       printk(KERN_WARNING "%s: Tx packet transfer address mismatch,"
-                               "%#4.4x (expected) vs. %#4.4x (actual).\n",
-                               dev->name, (start_page << 8) + count, addr);
-                       if (retries++ == 0)
-                               goto retry;
-               }
-       }
-#endif
-
-       while ((inb_p(NE_BASE + EN0_ISR) & ENISR_RDC) == 0)
-               if (time_after(jiffies, dma_start + 2*HZ/100)) {                /* 20ms */
-                       printk(KERN_WARNING "%s: timeout waiting for Tx RDC.\n", dev->name);
-                       ne_reset_8390(dev);
-                       __NS8390_init(dev,1);
-                       break;
-               }
-
-       outb_p(ENISR_RDC, NE_BASE + EN0_ISR);   /* Ack intr. */
-       ei_status.dmaing &= ~0x01;
-}
-
-
-#ifdef MODULE
-#define MAX_NE_CARDS   1       /* Max number of NE cards per module */
-static struct net_device *dev_ne[MAX_NE_CARDS];
-static int io[MAX_NE_CARDS];
-static int irq[MAX_NE_CARDS];
-static int bad[MAX_NE_CARDS];  /* 0xbad = bad sig or no reset ack */
-
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param_array(bad, int, NULL, 0);
-MODULE_PARM_DESC(io, "I/O base address(es)");
-MODULE_PARM_DESC(irq, "IRQ number(s)");
-MODULE_DESCRIPTION("H8/300 NE2000 Ethernet driver");
-MODULE_LICENSE("GPL");
-
-/* This is set up so that no ISA autoprobe takes place. We can't guarantee
-that the ne2k probe is the last 8390 based probe to take place (as it
-is at boot) and so the probe will get confused by any other 8390 cards.
-ISA device autoprobes on a running machine are not recommended anyway. */
-
-int init_module(void)
-{
-       int this_dev, found = 0;
-       int err;
-
-       for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
-               struct net_device *dev = ____alloc_ei_netdev(0);
-               if (!dev)
-                       break;
-               if (io[this_dev]) {
-                       dev->irq = irq[this_dev];
-                       dev->mem_end = bad[this_dev];
-                       dev->base_addr = io[this_dev];
-               } else {
-                       dev->base_addr = h8300_ne_base[this_dev];
-                       dev->irq = h8300_ne_irq[this_dev];
-               }
-               err = init_reg_offset(dev, dev->base_addr);
-               if (!err) {
-                       if (do_ne_probe(dev) == 0) {
-                               dev_ne[found++] = dev;
-                               continue;
-                       }
-               }
-               free_netdev(dev);
-               if (found)
-                       break;
-               if (io[this_dev] != 0)
-                       printk(KERN_WARNING "ne.c: No NE*000 card found at i/o = %#x\n", dev->base_addr);
-               else
-                       printk(KERN_NOTICE "ne.c: You must supply \"io=0xNNN\" value(s) for ISA cards.\n");
-               return -ENXIO;
-       }
-       if (found)
-               return 0;
-       return -ENODEV;
-}
-
-void cleanup_module(void)
-{
-       int this_dev;
-
-       for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
-               struct net_device *dev = dev_ne[this_dev];
-               if (dev) {
-                       unregister_netdev(dev);
-                       cleanup_card(dev);
-                       free_netdev(dev);
-               }
-       }
-}
-#endif /* MODULE */
index 92201080e07a104b4e25d2f308a7b62d8f0099ba..fc14a85e4d5ffdde31d42c40dac5f6af7e80c946 100644 (file)
@@ -389,9 +389,7 @@ err_out_free_netdev:
        free_netdev (dev);
 err_out_free_res:
        release_region (ioaddr, NE_IO_EXTENT);
-       pci_set_drvdata (pdev, NULL);
        return -ENODEV;
-
 }
 
 /*
@@ -655,7 +653,6 @@ static void ne2k_pci_remove_one(struct pci_dev *pdev)
        release_region(dev->base_addr, NE_IO_EXTENT);
        free_netdev(dev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 #ifdef CONFIG_PM
index 8b04bfc20cfba3b9c17984471dfccb6163799bdb..171d73c1d3c22de3209ca6c48b7978c35b2a38b0 100644 (file)
@@ -835,7 +835,6 @@ static int starfire_init_one(struct pci_dev *pdev,
        return 0;
 
 err_out_cleardev:
-       pci_set_drvdata(pdev, NULL);
        iounmap(base);
 err_out_free_res:
        pci_release_regions (pdev);
@@ -2012,7 +2011,6 @@ static void starfire_remove_one(struct pci_dev *pdev)
        iounmap(np->base);
        pci_release_regions(pdev);
 
-       pci_set_drvdata(pdev, NULL);
        free_netdev(dev);                       /* Will also free np!! */
 }
 
index 7a07ee07906b280f2d849ae0959c604ee58d411b..6dec86ac97cdbfb26977c382d73cf6a849582895 100644 (file)
@@ -104,6 +104,6 @@ struct bfin_mac_local {
 #endif
 };
 
-extern int bfin_get_ether_addr(char *addr);
+int bfin_get_ether_addr(char *addr);
 
 #endif
index 0a5837b9642194799bedaf26abf3721bafb94579..ae33a99bf47626695b239cdb78941ea09b723b02 100644 (file)
@@ -242,13 +242,13 @@ struct lance_private
 #define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
 
 /* Now the prototypes we export */
-extern int lance_open(struct net_device *dev);
-extern int lance_close (struct net_device *dev);
-extern int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
-extern void lance_set_multicast (struct net_device *dev);
-extern void lance_tx_timeout(struct net_device *dev);
+int lance_open(struct net_device *dev);
+int lance_close (struct net_device *dev);
+int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
+void lance_set_multicast (struct net_device *dev);
+void lance_tx_timeout(struct net_device *dev);
 #ifdef CONFIG_NET_POLL_CONTROLLER
-extern void lance_poll(struct net_device *dev);
+void lance_poll(struct net_device *dev);
 #endif
 
 #endif /* ndef _7990_H */
index 1b1429d5d5c287f761a3066bc6bbebe6b74995ee..d042511bdc1365882a3bea08626bfce6bcc714da 100644 (file)
@@ -1711,7 +1711,6 @@ static void amd8111e_remove_one(struct pci_dev *pdev)
                free_netdev(dev);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
-               pci_set_drvdata(pdev, NULL);
        }
 }
 static void amd8111e_config_ipg(struct net_device* dev)
@@ -1967,7 +1966,6 @@ err_free_reg:
 
 err_disable_pdev:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        return err;
 
 }
index 10ceca523fc0dc9556733d3c1eeded045ab70303..e07ce5ff2d48bf93e2a3daeb102fb4b78d471532 100644 (file)
@@ -586,10 +586,10 @@ static unsigned long __init lance_probe1( struct net_device *dev,
        switch( lp->cardtype ) {
          case OLD_RIEBL:
                /* No ethernet address! (Set some default address) */
-               memcpy( dev->dev_addr, OldRieblDefHwaddr, 6 );
+               memcpy(dev->dev_addr, OldRieblDefHwaddr, ETH_ALEN);
                break;
          case NEW_RIEBL:
-               lp->memcpy_f( dev->dev_addr, RIEBL_HWADDR_ADDR, 6 );
+               lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
                break;
          case PAM_CARD:
                i = IO->eeprom;
index 91d52b495848a17d4b9bae0de04b7d1f7c798ba9..427c148bb643538c1d2240036a9dd7cad1be9596 100644 (file)
@@ -1138,7 +1138,7 @@ static int au1000_probe(struct platform_device *pdev)
                aup->phy1_search_mac0 = 1;
        } else {
                if (is_valid_ether_addr(pd->mac)) {
-                       memcpy(dev->dev_addr, pd->mac, 6);
+                       memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
                } else {
                        /* Set a random MAC since no valid provided by platform_data. */
                        eth_hw_addr_random(dev);
index 94edc9c6fbbf317765d41d8d4ba9cfc6b817ec20..57397295887c964b23f2bbb7cf7d25d908ab2b29 100644 (file)
@@ -344,8 +344,8 @@ static void cp_to_buf(const int type, void *to, const void *from, int len)
                }
 
                clen = len & 1;
-               rtp = tp;
-               rfp = fp;
+               rtp = (unsigned char *)tp;
+               rfp = (const unsigned char *)fp;
                while (clen--) {
                        *rtp++ = *rfp++;
                }
@@ -372,8 +372,8 @@ static void cp_to_buf(const int type, void *to, const void *from, int len)
                 * do the rest, if any.
                 */
                clen = len & 15;
-               rtp = (unsigned char *) tp;
-               rfp = (unsigned char *) fp;
+               rtp = (unsigned char *)tp;
+               rfp = (const unsigned char *)fp;
                while (clen--) {
                        *rtp++ = *rfp++;
                }
@@ -403,8 +403,8 @@ static void cp_from_buf(const int type, void *to, const void *from, int len)
 
                clen = len & 1;
 
-               rtp = tp;
-               rfp = fp;
+               rtp = (unsigned char *)tp;
+               rfp = (const unsigned char *)fp;
 
                while (clen--) {
                        *rtp++ = *rfp++;
@@ -433,8 +433,8 @@ static void cp_from_buf(const int type, void *to, const void *from, int len)
                 * do the rest, if any.
                 */
                clen = len & 15;
-               rtp = (unsigned char *) tp;
-               rfp = (unsigned char *) fp;
+               rtp = (unsigned char *)tp;
+               rfp = (const unsigned char *)fp;
                while (clen--) {
                        *rtp++ = *rfp++;
                }
@@ -725,7 +725,6 @@ static irqreturn_t lance_dma_merr_int(int irq, void *dev_id)
 {
        struct net_device *dev = dev_id;
 
-       clear_ioasic_dma_irq(irq);
        printk(KERN_ERR "%s: DMA error\n", dev->name);
        return IRQ_HANDLED;
 }
@@ -812,7 +811,7 @@ static int lance_open(struct net_device *dev)
        if (lp->dma_irq >= 0) {
                unsigned long flags;
 
-               if (request_irq(lp->dma_irq, lance_dma_merr_int, 0,
+               if (request_irq(lp->dma_irq, lance_dma_merr_int, IRQF_ONESHOT,
                                "lance error", dev)) {
                        free_irq(dev->irq, dev);
                        printk("%s: Can't get DMA IRQ %d\n", dev->name,
index 5c728436b85e7df0cf9421fa6185e45c2b52bd3c..256f590f6bb1a6db167f4375b3ed2719d2ad5073 100644 (file)
@@ -754,7 +754,7 @@ lance_open(struct net_device *dev)
        int i;
 
        if (dev->irq == 0 ||
-               request_irq(dev->irq, lance_interrupt, 0, lp->name, dev)) {
+               request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) {
                return -EAGAIN;
        }
 
index 2d8e28819779eed0388109791fe0126f5af763f8..38492e0b704e3aab926ec5fd300b61028223d504 100644 (file)
@@ -1675,7 +1675,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
                                pr_cont(" warning: CSR address invalid,\n");
                                pr_info("    using instead PROM address of");
                        }
-                       memcpy(dev->dev_addr, promaddr, 6);
+                       memcpy(dev->dev_addr, promaddr, ETH_ALEN);
                }
        }
 
@@ -2818,7 +2818,6 @@ static void pcnet32_remove_one(struct pci_dev *pdev)
                                    lp->init_block, lp->init_dma_addr);
                free_netdev(dev);
                pci_disable_device(pdev);
-               pci_set_drvdata(pdev, NULL);
        }
 }
 
index a597b766f0809d3b1e1893e9ec008fcdc813405f..daae0e01625360598194c76fcc4239a3283bc85c 100644 (file)
@@ -1220,8 +1220,8 @@ static void bmac_reset_and_enable(struct net_device *dev)
        if (skb != NULL) {
                data = skb_put(skb, ETHERMINPACKET);
                memset(data, 0, ETHERMINPACKET);
-               memcpy(data, dev->dev_addr, 6);
-               memcpy(data+6, dev->dev_addr, 6);
+               memcpy(data, dev->dev_addr, ETH_ALEN);
+               memcpy(data + ETH_ALEN, dev->dev_addr, ETH_ALEN);
                bmac_transmit_packet(skb, dev);
        }
        spin_unlock_irqrestore(&bp->lock, flags);
index fc95b235e210d058545efd889537c2a8dea08a45..5aa5e8146496ba807ece60fe5b287d70a79a8ad4 100644 (file)
@@ -1367,7 +1367,6 @@ static void alx_remove(struct pci_dev *pdev)
 
        pci_disable_pcie_error_reporting(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 
        free_netdev(alx->dev);
 }
index 0f0556526ba90bff7f764c4005d6f6e850220dd7..7f9369a3b378320762c89cc9369964d265fda6c9 100644 (file)
@@ -600,7 +600,7 @@ struct atl1c_adapter {
 extern char atl1c_driver_name[];
 extern char atl1c_driver_version[];
 
-extern void atl1c_reinit_locked(struct atl1c_adapter *adapter);
-extern s32 atl1c_reset_hw(struct atl1c_hw *hw);
-extern void atl1c_set_ethtool_ops(struct net_device *netdev);
+void atl1c_reinit_locked(struct atl1c_adapter *adapter);
+s32 atl1c_reset_hw(struct atl1c_hw *hw);
+void atl1c_set_ethtool_ops(struct net_device *netdev);
 #endif /* _ATL1C_H_ */
index 3ef7092e3f1c5ff0fd31cb53a0253a0c7532d711..1cda49a28f7f0e9a672ae775a832eba31ad76fb3 100644 (file)
@@ -153,7 +153,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
 bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value)
 {
        int i;
-       int ret = false;
+       bool ret = false;
        u32 otp_ctrl_data;
        u32 control;
        u32 data;
index b5fd934585e9f9f14595f60a2ec2e069cd2bed35..1b0fe2d04a0e88fcb8ac5e250e33384d66c0c68f 100644 (file)
@@ -499,10 +499,10 @@ struct atl1e_adapter {
 extern char atl1e_driver_name[];
 extern char atl1e_driver_version[];
 
-extern void atl1e_check_options(struct atl1e_adapter *adapter);
-extern int atl1e_up(struct atl1e_adapter *adapter);
-extern void atl1e_down(struct atl1e_adapter *adapter);
-extern void atl1e_reinit_locked(struct atl1e_adapter *adapter);
-extern s32 atl1e_reset_hw(struct atl1e_hw *hw);
-extern void atl1e_set_ethtool_ops(struct net_device *netdev);
+void atl1e_check_options(struct atl1e_adapter *adapter);
+int atl1e_up(struct atl1e_adapter *adapter);
+void atl1e_down(struct atl1e_adapter *adapter);
+void atl1e_reinit_locked(struct atl1e_adapter *adapter);
+s32 atl1e_reset_hw(struct atl1e_hw *hw);
+void atl1e_set_ethtool_ops(struct net_device *netdev);
 #endif /* _ATL1_E_H_ */
index 1966444590f6192e862a713216182ab09052ebef..7a73f3a9fcb5e2a5b0ffbac1ed1d0bc0163769cb 100644 (file)
@@ -313,6 +313,34 @@ static void atl1e_set_multi(struct net_device *netdev)
        }
 }
 
+static void __atl1e_rx_mode(netdev_features_t features, u32 *mac_ctrl_data)
+{
+
+       if (features & NETIF_F_RXALL) {
+               /* enable RX of ALL frames */
+               *mac_ctrl_data |= MAC_CTRL_DBG;
+       } else {
+               /* disable RX of ALL frames */
+               *mac_ctrl_data &= ~MAC_CTRL_DBG;
+       }
+}
+
+static void atl1e_rx_mode(struct net_device *netdev,
+       netdev_features_t features)
+{
+       struct atl1e_adapter *adapter = netdev_priv(netdev);
+       u32 mac_ctrl_data = 0;
+
+       netdev_dbg(adapter->netdev, "%s\n", __func__);
+
+       atl1e_irq_disable(adapter);
+       mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL);
+       __atl1e_rx_mode(features, &mac_ctrl_data);
+       AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
+       atl1e_irq_enable(adapter);
+}
+
+
 static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
 {
        if (features & NETIF_F_HW_VLAN_CTAG_RX) {
@@ -394,6 +422,10 @@ static int atl1e_set_features(struct net_device *netdev,
        if (changed & NETIF_F_HW_VLAN_CTAG_RX)
                atl1e_vlan_mode(netdev, features);
 
+       if (changed & NETIF_F_RXALL)
+               atl1e_rx_mode(netdev, features);
+
+
        return 0;
 }
 
@@ -1057,7 +1089,8 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
                value |= MAC_CTRL_PROMIS_EN;
        if (netdev->flags & IFF_ALLMULTI)
                value |= MAC_CTRL_MC_ALL_EN;
-
+       if (netdev->features & NETIF_F_RXALL)
+               value |= MAC_CTRL_DBG;
        AT_WRITE_REG(hw, REG_MAC_CTRL, value);
 }
 
@@ -1405,7 +1438,8 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
                        rx_page_desc[que].rx_nxseq++;
 
                        /* error packet */
-                       if (prrs->pkt_flag & RRS_IS_ERR_FRAME) {
+                       if ((prrs->pkt_flag & RRS_IS_ERR_FRAME) &&
+                           !(netdev->features & NETIF_F_RXALL)) {
                                if (prrs->err_flag & (RRS_ERR_BAD_CRC |
                                        RRS_ERR_DRIBBLE | RRS_ERR_CODE |
                                        RRS_ERR_TRUNC)) {
@@ -1418,7 +1452,10 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
                        }
 
                        packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
-                                       RRS_PKT_SIZE_MASK) - 4; /* CRC */
+                                       RRS_PKT_SIZE_MASK);
+                       if (likely(!(netdev->features & NETIF_F_RXFCS)))
+                               packet_size -= 4; /* CRC */
+
                        skb = netdev_alloc_skb_ip_align(netdev, packet_size);
                        if (skb == NULL)
                                goto skip_pkt;
@@ -2245,7 +2282,8 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
                              NETIF_F_HW_VLAN_CTAG_RX;
        netdev->features = netdev->hw_features | NETIF_F_LLTX |
                           NETIF_F_HW_VLAN_CTAG_TX;
-
+       /* not enabled by default */
+       netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS;
        return 0;
 }
 
index 3ebe19f7242b6da9cd5d3290674f85cb9e164013..2f27d4c4c3ad4ae5b026983dba7d9478cd1c5430 100644 (file)
@@ -42,7 +42,7 @@
 #include "atlx.h"
 
 #ifdef ETHTOOL_OPS_COMPAT
-extern int ethtool_ioctl(struct ifreq *ifr);
+int ethtool_ioctl(struct ifreq *ifr);
 #endif
 
 #define PCI_COMMAND_REGISTER   PCI_COMMAND
index 9b017d9c58e94e5ab671aeda2cfcc969cc561c6e..90e54d5488dcbda9b13a1820177630d2dd70b613 100644 (file)
@@ -596,6 +596,7 @@ static void b44_timer(unsigned long __opaque)
 static void b44_tx(struct b44 *bp)
 {
        u32 cur, cons;
+       unsigned bytes_compl = 0, pkts_compl = 0;
 
        cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
        cur /= sizeof(struct dma_desc);
@@ -612,9 +613,14 @@ static void b44_tx(struct b44 *bp)
                                 skb->len,
                                 DMA_TO_DEVICE);
                rp->skb = NULL;
+
+               bytes_compl += skb->len;
+               pkts_compl++;
+
                dev_kfree_skb_irq(skb);
        }
 
+       netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
        bp->tx_cons = cons;
        if (netif_queue_stopped(bp->dev) &&
            TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
@@ -1018,6 +1024,8 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (bp->flags & B44_FLAG_REORDER_BUG)
                br32(bp, B44_DMATX_PTR);
 
+       netdev_sent_queue(dev, skb->len);
+
        if (TX_BUFFS_AVAIL(bp) < 1)
                netif_stop_queue(dev);
 
@@ -1416,6 +1424,8 @@ static void b44_init_hw(struct b44 *bp, int reset_kind)
 
        val = br32(bp, B44_ENET_CTRL);
        bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
+
+       netdev_reset_queue(bp->dev);
 }
 
 static int b44_open(struct net_device *dev)
@@ -2101,7 +2111,7 @@ static int b44_get_invariants(struct b44 *bp)
         * valid PHY address. */
        bp->phy_addr &= 0x1F;
 
-       memcpy(bp->dev->dev_addr, addr, 6);
+       memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
 
        if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
                pr_err("Invalid MAC address found in EEPROM\n");
@@ -2183,8 +2193,7 @@ static int b44_init_one(struct ssb_device *sdev,
                goto err_out_free_dev;
        }
 
-       if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
-           dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
+       if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
                dev_err(sdev->dev,
                        "Required 30BIT DMA mask unsupported by the system\n");
                goto err_out_powerdown;
index 249468f953651480a5e0b897d582dd09743c6c3b..7eca5a1747337db6f2eed66803e81b081201e38f 100644 (file)
@@ -149,6 +149,8 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
        dma_desc->ctl0 = cpu_to_le32(ctl0);
        dma_desc->ctl1 = cpu_to_le32(ctl1);
 
+       netdev_sent_queue(net_dev, skb->len);
+
        wmb();
 
        /* Increase ring->end to point empty slot. We tell hardware the first
@@ -178,6 +180,7 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
        struct device *dma_dev = bgmac->core->dma_dev;
        int empty_slot;
        bool freed = false;
+       unsigned bytes_compl = 0, pkts_compl = 0;
 
        /* The last slot that hardware didn't consume yet */
        empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
@@ -195,6 +198,9 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
                                         slot->skb->len, DMA_TO_DEVICE);
                        slot->dma_addr = 0;
 
+                       bytes_compl += slot->skb->len;
+                       pkts_compl++;
+
                        /* Free memory! :) */
                        dev_kfree_skb(slot->skb);
                        slot->skb = NULL;
@@ -208,6 +214,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
                freed = true;
        }
 
+       netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
+
        if (freed && netif_queue_stopped(bgmac->net_dev))
                netif_wake_queue(bgmac->net_dev);
 }
@@ -988,6 +996,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
        bgmac_miiconfig(bgmac);
        bgmac_phy_init(bgmac);
 
+       netdev_reset_queue(bgmac->net_dev);
+
        bgmac->int_status = 0;
 }
 
index e838a3f74b696dd208680498f3350a3349a65392..d9980ad00b4b8d65bc23061fd71d28e936d48cb1 100644 (file)
@@ -5761,8 +5761,8 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
        if (!skb)
                return -ENOMEM;
        packet = skb_put(skb, pkt_size);
-       memcpy(packet, bp->dev->dev_addr, 6);
-       memset(packet + 6, 0x0, 8);
+       memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
+       memset(packet + ETH_ALEN, 0x0, 8);
        for (i = 14; i < pkt_size; i++)
                packet[i] = (unsigned char) (i & 0xff);
 
@@ -8413,7 +8413,6 @@ err_out_release:
 
 err_out_disable:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 
 err_out:
        return rc;
@@ -8514,7 +8513,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        pci_set_drvdata(pdev, dev);
 
-       memcpy(dev->dev_addr, bp->mac_addr, 6);
+       memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
 
        dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
                NETIF_F_TSO | NETIF_F_TSO_ECN |
@@ -8546,7 +8545,6 @@ error:
        pci_iounmap(pdev, bp->regview);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 err_free:
        free_netdev(dev);
        return rc;
@@ -8578,7 +8576,6 @@ bnx2_remove_one(struct pci_dev *pdev)
 
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static int
index 97b3d32a98bd010ab1a1327ef7382e5bd64139b8..4e01c57d8c8de349da61e80cb0e4cb0f4aece3fd 100644 (file)
@@ -1197,8 +1197,9 @@ union cdu_context {
 /* TM (timers) host DB constants */
 #define TM_ILT_PAGE_SZ_HW      0
 #define TM_ILT_PAGE_SZ         (4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
-/* #define TM_CONN_NUM         (CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
-#define TM_CONN_NUM            1024
+#define TM_CONN_NUM            (BNX2X_FIRST_VF_CID + \
+                                BNX2X_VF_CIDS + \
+                                CNIC_ISCSI_CID_MAX)
 #define TM_ILT_SZ              (8 * TM_CONN_NUM)
 #define TM_ILT_LINES           DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
 
@@ -1527,7 +1528,6 @@ struct bnx2x {
 #define PCI_32BIT_FLAG                 (1 << 1)
 #define ONE_PORT_FLAG                  (1 << 2)
 #define NO_WOL_FLAG                    (1 << 3)
-#define USING_DAC_FLAG                 (1 << 4)
 #define USING_MSIX_FLAG                        (1 << 5)
 #define USING_MSI_FLAG                 (1 << 6)
 #define DISABLE_MSI_FLAG               (1 << 7)
@@ -1546,6 +1546,7 @@ struct bnx2x {
 #define IS_VF_FLAG                     (1 << 22)
 #define INTERRUPTS_ENABLED_FLAG                (1 << 23)
 #define BC_SUPPORTS_RMMOD_CMD          (1 << 24)
+#define HAS_PHYS_PORT_ID               (1 << 25)
 
 #define BP_NOMCP(bp)                   ((bp)->flags & NO_MCP_FLAG)
 
@@ -1621,7 +1622,7 @@ struct bnx2x {
        u16                     rx_ticks_int;
        u16                     rx_ticks;
 /* Maximal coalescing timeout in us */
-#define BNX2X_MAX_COALESCE_TOUT                (0xf0*12)
+#define BNX2X_MAX_COALESCE_TOUT                (0xff*BNX2X_BTR)
 
        u32                     lin_cnt;
 
@@ -1876,6 +1877,8 @@ struct bnx2x {
        u32 dump_preset_idx;
        bool                                    stats_started;
        struct semaphore                        stats_sema;
+
+       u8                                      phys_port_id[ETH_ALEN];
 };
 
 /* Tx queues may be less or equal to Rx queues */
@@ -2072,7 +2075,8 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
 
 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
                               u8 src_type, u8 dst_type);
-int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae);
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+                              u32 *comp);
 
 /* FLR related routines */
 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp);
@@ -2231,7 +2235,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
 #define BNX2X_NUM_TESTS_SF             7
 #define BNX2X_NUM_TESTS_MF             3
 #define BNX2X_NUM_TESTS(bp)            (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
-                                                    BNX2X_NUM_TESTS_SF)
+                                            IS_VF(bp) ? 0 : BNX2X_NUM_TESTS_SF)
 
 #define BNX2X_PHY_LOOPBACK             0
 #define BNX2X_MAC_LOOPBACK             1
@@ -2491,11 +2495,9 @@ enum {
 
 #define NUM_MACS       8
 
-enum bnx2x_pci_bus_speed {
-       BNX2X_PCI_LINK_SPEED_2500 = 2500,
-       BNX2X_PCI_LINK_SPEED_5000 = 5000,
-       BNX2X_PCI_LINK_SPEED_8000 = 8000
-};
-
 void bnx2x_set_local_cmng(struct bnx2x *bp);
+
+#define MCPR_SCRATCH_BASE(bp) \
+       (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+
 #endif /* bnx2x.h */
index e66beff2704d19241695fe5028e33efb3ca95519..6e46cff5236da1b835bc7698085fb34b657e3185 100644 (file)
@@ -681,6 +681,7 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                }
        }
 #endif
+       skb_record_rx_queue(skb, fp->rx_queue);
        napi_gro_receive(&fp->napi, skb);
 }
 
@@ -3255,14 +3256,16 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
        if (prot == IPPROTO_TCP)
                rc |= XMIT_CSUM_TCP;
 
-       if (skb_is_gso_v6(skb)) {
-               rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
-               if (rc & XMIT_CSUM_ENC)
-                       rc |= XMIT_GSO_ENC_V6;
-       } else if (skb_is_gso(skb)) {
-               rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
-               if (rc & XMIT_CSUM_ENC)
-                       rc |= XMIT_GSO_ENC_V4;
+       if (skb_is_gso(skb)) {
+               if (skb_is_gso_v6(skb)) {
+                       rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
+                       if (rc & XMIT_CSUM_ENC)
+                               rc |= XMIT_GSO_ENC_V6;
+               } else {
+                       rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
+                       if (rc & XMIT_CSUM_ENC)
+                               rc |= XMIT_GSO_ENC_V4;
+               }
        }
 
        return rc;
index 324de5f05332e78aa6c108d23891105880ee5bd8..32d0f1435fb410b54c3105236c14a211f35bc0b2 100644 (file)
@@ -639,6 +639,9 @@ static int bnx2x_get_regs_len(struct net_device *dev)
        struct bnx2x *bp = netdev_priv(dev);
        int regdump_len = 0;
 
+       if (IS_VF(bp))
+               return 0;
+
        regdump_len = __bnx2x_get_regs_len(bp);
        regdump_len *= 4;
        regdump_len += sizeof(struct dump_header);
@@ -891,17 +894,8 @@ static void bnx2x_get_regs(struct net_device *dev,
         * will re-enable parity attentions right after the dump.
         */
 
-       /* Disable parity on path 0 */
-       bnx2x_pretend_func(bp, 0);
        bnx2x_disable_blocks_parity(bp);
 
-       /* Disable parity on path 1 */
-       bnx2x_pretend_func(bp, 1);
-       bnx2x_disable_blocks_parity(bp);
-
-       /* Return to current function */
-       bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
-
        dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
        dump_hdr.preset = DUMP_ALL_PRESETS;
        dump_hdr.version = BNX2X_DUMP_VERSION;
@@ -928,18 +922,9 @@ static void bnx2x_get_regs(struct net_device *dev,
        /* Actually read the registers */
        __bnx2x_get_regs(bp, p);
 
-       /* Re-enable parity attentions on path 0 */
-       bnx2x_pretend_func(bp, 0);
+       /* Re-enable parity attentions */
        bnx2x_clear_blocks_parity(bp);
        bnx2x_enable_blocks_parity(bp);
-
-       /* Re-enable parity attentions on path 1 */
-       bnx2x_pretend_func(bp, 1);
-       bnx2x_clear_blocks_parity(bp);
-       bnx2x_enable_blocks_parity(bp);
-
-       /* Return to current function */
-       bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
 }
 
 static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset)
@@ -993,17 +978,8 @@ static int bnx2x_get_dump_data(struct net_device *dev,
         * will re-enable parity attentions right after the dump.
         */
 
-       /* Disable parity on path 0 */
-       bnx2x_pretend_func(bp, 0);
        bnx2x_disable_blocks_parity(bp);
 
-       /* Disable parity on path 1 */
-       bnx2x_pretend_func(bp, 1);
-       bnx2x_disable_blocks_parity(bp);
-
-       /* Return to current function */
-       bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
-
        dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
        dump_hdr.preset = bp->dump_preset_idx;
        dump_hdr.version = BNX2X_DUMP_VERSION;
@@ -1032,19 +1008,10 @@ static int bnx2x_get_dump_data(struct net_device *dev,
        /* Actually read the registers */
        __bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
 
-       /* Re-enable parity attentions on path 0 */
-       bnx2x_pretend_func(bp, 0);
-       bnx2x_clear_blocks_parity(bp);
-       bnx2x_enable_blocks_parity(bp);
-
-       /* Re-enable parity attentions on path 1 */
-       bnx2x_pretend_func(bp, 1);
+       /* Re-enable parity attentions */
        bnx2x_clear_blocks_parity(bp);
        bnx2x_enable_blocks_parity(bp);
 
-       /* Return to current function */
-       bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
-
        return 0;
 }
 
@@ -2900,9 +2867,16 @@ static void bnx2x_self_test(struct net_device *dev,
 
        memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
 
+       if (bnx2x_test_nvram(bp) != 0) {
+               if (!IS_MF(bp))
+                       buf[4] = 1;
+               else
+                       buf[0] = 1;
+               etest->flags |= ETH_TEST_FL_FAILED;
+       }
+
        if (!netif_running(dev)) {
-               DP(BNX2X_MSG_ETHTOOL,
-                  "Can't perform self-test when interface is down\n");
+               DP(BNX2X_MSG_ETHTOOL, "Interface is down\n");
                return;
        }
 
@@ -2964,13 +2938,7 @@ static void bnx2x_self_test(struct net_device *dev,
                /* wait until link state is restored */
                bnx2x_wait_for_link(bp, link_up, is_serdes);
        }
-       if (bnx2x_test_nvram(bp) != 0) {
-               if (!IS_MF(bp))
-                       buf[4] = 1;
-               else
-                       buf[0] = 1;
-               etest->flags |= ETH_TEST_FL_FAILED;
-       }
+
        if (bnx2x_test_intr(bp) != 0) {
                if (!IS_MF(bp))
                        buf[5] = 1;
index 32767f6aa33f473a126259e4877fa608ed51b3bf..cf1df8b62e2c2785c0560b77ac5ed4b5fc8ae8b3 100644 (file)
@@ -172,6 +172,7 @@ struct shared_hw_cfg {                       /* NVRAM Offset */
                #define SHARED_HW_CFG_LED_MAC4                       0x000c0000
                #define SHARED_HW_CFG_LED_PHY8                       0x000d0000
                #define SHARED_HW_CFG_LED_EXTPHY1                    0x000e0000
+               #define SHARED_HW_CFG_LED_EXTPHY2                    0x000f0000
 
 
        #define SHARED_HW_CFG_AN_ENABLE_MASK                0x3f000000
index 76df015f486ad9d1653efccde3538dbb47263849..c2dfea7968f452defdca6fd4b1ea68758381038e 100644 (file)
@@ -640,23 +640,35 @@ static const struct {
  * [30] MCP Latched ump_tx_parity
  * [31] MCP Latched scpad_parity
  */
-#define MISC_AEU_ENABLE_MCP_PRTY_BITS  \
+#define MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS      \
        (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
         AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
-        AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+        AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY)
+
+#define MISC_AEU_ENABLE_MCP_PRTY_BITS  \
+       (MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS | \
         AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
 
 /* Below registers control the MCP parity attention output. When
  * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
  * enabled, when cleared - disabled.
  */
-static const u32 mcp_attn_ctl_regs[] = {
-       MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
-       MISC_REG_AEU_ENABLE4_NIG_0,
-       MISC_REG_AEU_ENABLE4_PXP_0,
-       MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
-       MISC_REG_AEU_ENABLE4_NIG_1,
-       MISC_REG_AEU_ENABLE4_PXP_1
+static const struct {
+       u32 addr;
+       u32 bits;
+} mcp_attn_ctl_regs[] = {
+       { MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
+               MISC_AEU_ENABLE_MCP_PRTY_BITS },
+       { MISC_REG_AEU_ENABLE4_NIG_0,
+               MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+       { MISC_REG_AEU_ENABLE4_PXP_0,
+               MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+       { MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
+               MISC_AEU_ENABLE_MCP_PRTY_BITS },
+       { MISC_REG_AEU_ENABLE4_NIG_1,
+               MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS },
+       { MISC_REG_AEU_ENABLE4_PXP_1,
+               MISC_AEU_ENABLE_MCP_PRTY_SUB_BITS }
 };
 
 static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
@@ -665,14 +677,14 @@ static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
        u32 reg_val;
 
        for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
-               reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
+               reg_val = REG_RD(bp, mcp_attn_ctl_regs[i].addr);
 
                if (enable)
-                       reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
+                       reg_val |= mcp_attn_ctl_regs[i].bits;
                else
-                       reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
+                       reg_val &= ~mcp_attn_ctl_regs[i].bits;
 
-               REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
+               REG_WR(bp, mcp_attn_ctl_regs[i].addr, reg_val);
        }
 }
 
index 51468227bf3b75fc17d2abafb8b3bd9294d8494c..20dcc02431cac441a8bf3076a7fe6db1abfb04e9 100644 (file)
@@ -3122,7 +3122,7 @@ static void bnx2x_bsc_module_sel(struct link_params *params)
 }
 
 static int bnx2x_bsc_read(struct link_params *params,
-                         struct bnx2x_phy *phy,
+                         struct bnx2x *bp,
                          u8 sl_devid,
                          u16 sl_addr,
                          u8 lc_addr,
@@ -3131,7 +3131,6 @@ static int bnx2x_bsc_read(struct link_params *params,
 {
        u32 val, i;
        int rc = 0;
-       struct bnx2x *bp = params->bp;
 
        if (xfer_cnt > 16) {
                DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
@@ -6371,9 +6370,15 @@ int bnx2x_set_led(struct link_params *params,
                         * intended override.
                         */
                        break;
-               } else
+               } else {
+                       u32 nig_led_mode = ((params->hw_led_mode <<
+                                            SHARED_HW_CFG_LED_MODE_SHIFT) ==
+                                           SHARED_HW_CFG_LED_EXTPHY2) ?
+                               (SHARED_HW_CFG_LED_PHY1 >>
+                                SHARED_HW_CFG_LED_MODE_SHIFT) : hw_led_mode;
                        REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
-                              hw_led_mode);
+                              nig_led_mode);
+               }
 
                REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
                /* Set blinking rate to ~15.9Hz */
@@ -7917,7 +7922,7 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
                        usleep_range(1000, 2000);
                        bnx2x_warpcore_power_module(params, 1);
                }
-               rc = bnx2x_bsc_read(params, phy, dev_addr, addr32, 0, byte_cnt,
+               rc = bnx2x_bsc_read(params, bp, dev_addr, addr32, 0, byte_cnt,
                                    data_array);
        } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
 
@@ -10653,10 +10658,18 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
                                         0x40);
 
                } else {
+                       /* EXTPHY2 LED mode indicate that the 100M/1G/10G LED
+                        * sources are all wired through LED1, rather than only
+                        * 10G in other modes.
+                        */
+                       val = ((params->hw_led_mode <<
+                               SHARED_HW_CFG_LED_MODE_SHIFT) ==
+                              SHARED_HW_CFG_LED_EXTPHY2) ? 0x98 : 0x80;
+
                        bnx2x_cl45_write(bp, phy,
                                         MDIO_PMA_DEVAD,
                                         MDIO_PMA_REG_8481_LED1_MASK,
-                                        0x80);
+                                        val);
 
                        /* Tell LED3 to blink on source */
                        bnx2x_cl45_read(bp, phy,
index 82b658d8c04c47c52931b5138375cd38f0ca519a..e622cc1f96ffe58336ee8c24f12c54e1679e11b7 100644 (file)
@@ -503,9 +503,9 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
 }
 
 /* issue a dmae command over the init-channel and wait for completion */
-int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
+                              u32 *comp)
 {
-       u32 *wb_comp = bnx2x_sp(bp, wb_comp);
        int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
        int rc = 0;
 
@@ -518,14 +518,14 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
        spin_lock_bh(&bp->dmae_lock);
 
        /* reset completion */
-       *wb_comp = 0;
+       *comp = 0;
 
        /* post the command on the channel used for initializations */
        bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
 
        /* wait for completion */
        udelay(5);
-       while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
+       while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
 
                if (!cnt ||
                    (bp->recovery_state != BNX2X_RECOVERY_DONE &&
@@ -537,7 +537,7 @@ int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
                cnt--;
                udelay(50);
        }
-       if (*wb_comp & DMAE_PCI_ERR_FLAG) {
+       if (*comp & DMAE_PCI_ERR_FLAG) {
                BNX2X_ERR("DMAE PCI error!\n");
                rc = DMAE_PCI_ERROR;
        }
@@ -574,7 +574,7 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
        dmae.len = len32;
 
        /* issue the command and wait for completion */
-       rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
+       rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
        if (rc) {
                BNX2X_ERR("DMAE returned failure %d\n", rc);
                bnx2x_panic();
@@ -611,7 +611,7 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
        dmae.len = len32;
 
        /* issue the command and wait for completion */
-       rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
+       rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
        if (rc) {
                BNX2X_ERR("DMAE returned failure %d\n", rc);
                bnx2x_panic();
@@ -751,6 +751,10 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
        return rc;
 }
 
+#define MCPR_TRACE_BUFFER_SIZE (0x800)
+#define SCRATCH_BUFFER_SIZE(bp)        \
+       (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
+
 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
 {
        u32 addr, val;
@@ -775,7 +779,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
                trace_shmem_base = bp->common.shmem_base;
        else
                trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
-       addr = trace_shmem_base - 0x800;
+
+       /* sanity */
+       if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
+           trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
+                               SCRATCH_BUFFER_SIZE(bp)) {
+               BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
+                         trace_shmem_base);
+               return;
+       }
+
+       addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
 
        /* validate TRCB signature */
        mark = REG_RD(bp, addr);
@@ -787,14 +801,17 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
        /* read cyclic buffer pointer */
        addr += 4;
        mark = REG_RD(bp, addr);
-       mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
-                       + ((mark + 0x3) & ~0x3) - 0x08000000;
+       mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
+       if (mark >= trace_shmem_base || mark < addr + 4) {
+               BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
+               return;
+       }
        printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
 
        printk("%s", lvl);
 
        /* dump buffer after the mark */
-       for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
+       for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
                for (word = 0; word < 8; word++)
                        data[word] = htonl(REG_RD(bp, offset + 4*word));
                data[8] = 0x0;
@@ -4280,65 +4297,60 @@ static void _print_next_block(int idx, const char *blk)
        pr_cont("%s%s", idx ? ", " : "", blk);
 }
 
-static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
-                                           int par_num, bool print)
+static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
+                                           int *par_num, bool print)
 {
-       int i = 0;
-       u32 cur_bit = 0;
+       u32 cur_bit;
+       bool res;
+       int i;
+
+       res = false;
+
        for (i = 0; sig; i++) {
-               cur_bit = ((u32)0x1 << i);
+               cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
-                       switch (cur_bit) {
-                       case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "BRB");
+                       res |= true; /* Each bit is real error! */
+
+                       if (print) {
+                               switch (cur_bit) {
+                               case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "BRB");
                                        _print_parity(bp,
                                                      BRB1_REG_BRB1_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "PARSER");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
+                                       _print_next_block((*par_num)++,
+                                                         "PARSER");
                                        _print_parity(bp, PRS_REG_PRS_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "TSDM");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "TSDM");
                                        _print_parity(bp,
                                                      TSDM_REG_TSDM_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++,
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
+                                       _print_next_block((*par_num)++,
                                                          "SEARCHER");
                                        _print_parity(bp, SRC_REG_SRC_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "TCM");
-                                       _print_parity(bp,
-                                                     TCM_REG_TCM_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "TSEMI");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "TCM");
+                                       _print_parity(bp, TCM_REG_TCM_PRTY_STS);
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
+                                       _print_next_block((*par_num)++,
+                                                         "TSEMI");
                                        _print_parity(bp,
                                                      TSEM_REG_TSEM_PRTY_STS_0);
                                        _print_parity(bp,
                                                      TSEM_REG_TSEM_PRTY_STS_1);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "XPB");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "XPB");
                                        _print_parity(bp, GRCBASE_XPB +
                                                          PB_REG_PB_PRTY_STS);
+                                       break;
                                }
-                               break;
                        }
 
                        /* Clear the bit */
@@ -4346,53 +4358,59 @@ static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
                }
        }
 
-       return par_num;
+       return res;
 }
 
-static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
-                                           int par_num, bool *global,
+static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
+                                           int *par_num, bool *global,
                                            bool print)
 {
-       int i = 0;
-       u32 cur_bit = 0;
+       u32 cur_bit;
+       bool res;
+       int i;
+
+       res = false;
+
        for (i = 0; sig; i++) {
-               cur_bit = ((u32)0x1 << i);
+               cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
+                       res |= true; /* Each bit is real error! */
                        switch (cur_bit) {
                        case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "PBF");
+                                       _print_next_block((*par_num)++, "PBF");
                                        _print_parity(bp, PBF_REG_PBF_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "QM");
+                                       _print_next_block((*par_num)++, "QM");
                                        _print_parity(bp, QM_REG_QM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "TM");
+                                       _print_next_block((*par_num)++, "TM");
                                        _print_parity(bp, TM_REG_TM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "XSDM");
+                                       _print_next_block((*par_num)++, "XSDM");
                                        _print_parity(bp,
                                                      XSDM_REG_XSDM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "XCM");
+                                       _print_next_block((*par_num)++, "XCM");
                                        _print_parity(bp, XCM_REG_XCM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "XSEMI");
+                                       _print_next_block((*par_num)++,
+                                                         "XSEMI");
                                        _print_parity(bp,
                                                      XSEM_REG_XSEM_PRTY_STS_0);
                                        _print_parity(bp,
@@ -4401,7 +4419,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
                                break;
                        case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++,
+                                       _print_next_block((*par_num)++,
                                                          "DOORBELLQ");
                                        _print_parity(bp,
                                                      DORQ_REG_DORQ_PRTY_STS);
@@ -4409,7 +4427,7 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
                                break;
                        case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "NIG");
+                                       _print_next_block((*par_num)++, "NIG");
                                        if (CHIP_IS_E1x(bp)) {
                                                _print_parity(bp,
                                                        NIG_REG_NIG_PRTY_STS);
@@ -4423,32 +4441,34 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
                                break;
                        case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
                                if (print)
-                                       _print_next_block(par_num++,
+                                       _print_next_block((*par_num)++,
                                                          "VAUX PCI CORE");
                                *global = true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "DEBUG");
+                                       _print_next_block((*par_num)++,
+                                                         "DEBUG");
                                        _print_parity(bp, DBG_REG_DBG_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "USDM");
+                                       _print_next_block((*par_num)++, "USDM");
                                        _print_parity(bp,
                                                      USDM_REG_USDM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "UCM");
+                                       _print_next_block((*par_num)++, "UCM");
                                        _print_parity(bp, UCM_REG_UCM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "USEMI");
+                                       _print_next_block((*par_num)++,
+                                                         "USEMI");
                                        _print_parity(bp,
                                                      USEM_REG_USEM_PRTY_STS_0);
                                        _print_parity(bp,
@@ -4457,21 +4477,21 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
                                break;
                        case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "UPB");
+                                       _print_next_block((*par_num)++, "UPB");
                                        _print_parity(bp, GRCBASE_UPB +
                                                          PB_REG_PB_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "CSDM");
+                                       _print_next_block((*par_num)++, "CSDM");
                                        _print_parity(bp,
                                                      CSDM_REG_CSDM_PRTY_STS);
                                }
                                break;
                        case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
                                if (print) {
-                                       _print_next_block(par_num++, "CCM");
+                                       _print_next_block((*par_num)++, "CCM");
                                        _print_parity(bp, CCM_REG_CCM_PRTY_STS);
                                }
                                break;
@@ -4482,80 +4502,73 @@ static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
                }
        }
 
-       return par_num;
+       return res;
 }
 
-static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
-                                           int par_num, bool print)
+static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
+                                           int *par_num, bool print)
 {
-       int i = 0;
-       u32 cur_bit = 0;
+       u32 cur_bit;
+       bool res;
+       int i;
+
+       res = false;
+
        for (i = 0; sig; i++) {
-               cur_bit = ((u32)0x1 << i);
+               cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
-                       switch (cur_bit) {
-                       case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "CSEMI");
+                       res |= true; /* Each bit is real error! */
+                       if (print) {
+                               switch (cur_bit) {
+                               case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
+                                       _print_next_block((*par_num)++,
+                                                         "CSEMI");
                                        _print_parity(bp,
                                                      CSEM_REG_CSEM_PRTY_STS_0);
                                        _print_parity(bp,
                                                      CSEM_REG_CSEM_PRTY_STS_1);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "PXP");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "PXP");
                                        _print_parity(bp, PXP_REG_PXP_PRTY_STS);
                                        _print_parity(bp,
                                                      PXP2_REG_PXP2_PRTY_STS_0);
                                        _print_parity(bp,
                                                      PXP2_REG_PXP2_PRTY_STS_1);
-                               }
-                               break;
-                       case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
-                               if (print)
-                                       _print_next_block(par_num++,
-                                       "PXPPCICLOCKCLIENT");
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "CFC");
+                                       break;
+                               case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
+                                       _print_next_block((*par_num)++,
+                                                         "PXPPCICLOCKCLIENT");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "CFC");
                                        _print_parity(bp,
                                                      CFC_REG_CFC_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "CDU");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "CDU");
                                        _print_parity(bp, CDU_REG_CDU_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "DMAE");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "DMAE");
                                        _print_parity(bp,
                                                      DMAE_REG_DMAE_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "IGU");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "IGU");
                                        if (CHIP_IS_E1x(bp))
                                                _print_parity(bp,
                                                        HC_REG_HC_PRTY_STS);
                                        else
                                                _print_parity(bp,
                                                        IGU_REG_IGU_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "MISC");
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "MISC");
                                        _print_parity(bp,
                                                      MISC_REG_MISC_PRTY_STS);
+                                       break;
                                }
-                               break;
                        }
 
                        /* Clear the bit */
@@ -4563,40 +4576,49 @@ static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
                }
        }
 
-       return par_num;
+       return res;
 }
 
-static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
-                                          bool *global, bool print)
+static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
+                                           int *par_num, bool *global,
+                                           bool print)
 {
-       int i = 0;
-       u32 cur_bit = 0;
+       bool res = false;
+       u32 cur_bit;
+       int i;
+
        for (i = 0; sig; i++) {
-               cur_bit = ((u32)0x1 << i);
+               cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
                        switch (cur_bit) {
                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
                                if (print)
-                                       _print_next_block(par_num++, "MCP ROM");
+                                       _print_next_block((*par_num)++,
+                                                         "MCP ROM");
                                *global = true;
+                               res |= true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
                                if (print)
-                                       _print_next_block(par_num++,
+                                       _print_next_block((*par_num)++,
                                                          "MCP UMP RX");
                                *global = true;
+                               res |= true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
                                if (print)
-                                       _print_next_block(par_num++,
+                                       _print_next_block((*par_num)++,
                                                          "MCP UMP TX");
                                *global = true;
+                               res |= true;
                                break;
                        case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
                                if (print)
-                                       _print_next_block(par_num++,
+                                       _print_next_block((*par_num)++,
                                                          "MCP SCPAD");
-                               *global = true;
+                               /* clear latched SCPAD PATIRY from MCP */
+                               REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
+                                      1UL << 10);
                                break;
                        }
 
@@ -4605,45 +4627,50 @@ static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
                }
        }
 
-       return par_num;
+       return res;
 }
 
-static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
-                                           int par_num, bool print)
+static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
+                                           int *par_num, bool print)
 {
-       int i = 0;
-       u32 cur_bit = 0;
+       u32 cur_bit;
+       bool res;
+       int i;
+
+       res = false;
+
        for (i = 0; sig; i++) {
-               cur_bit = ((u32)0x1 << i);
+               cur_bit = (0x1UL << i);
                if (sig & cur_bit) {
-                       switch (cur_bit) {
-                       case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "PGLUE_B");
+                       res |= true; /* Each bit is real error! */
+                       if (print) {
+                               switch (cur_bit) {
+                               case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
+                                       _print_next_block((*par_num)++,
+                                                         "PGLUE_B");
                                        _print_parity(bp,
-                                               PGLUE_B_REG_PGLUE_B_PRTY_STS);
-                               }
-                               break;
-                       case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
-                               if (print) {
-                                       _print_next_block(par_num++, "ATC");
+                                                     PGLUE_B_REG_PGLUE_B_PRTY_STS);
+                                       break;
+                               case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
+                                       _print_next_block((*par_num)++, "ATC");
                                        _print_parity(bp,
                                                      ATC_REG_ATC_PRTY_STS);
+                                       break;
                                }
-                               break;
                        }
-
                        /* Clear the bit */
                        sig &= ~cur_bit;
                }
        }
 
-       return par_num;
+       return res;
 }
 
 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
                              u32 *sig)
 {
+       bool res = false;
+
        if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
            (sig[1] & HW_PRTY_ASSERT_SET_1) ||
            (sig[2] & HW_PRTY_ASSERT_SET_2) ||
@@ -4660,23 +4687,22 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
                if (print)
                        netdev_err(bp->dev,
                                   "Parity errors detected in blocks: ");
-               par_num = bnx2x_check_blocks_with_parity0(bp,
-                       sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
-               par_num = bnx2x_check_blocks_with_parity1(bp,
-                       sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
-               par_num = bnx2x_check_blocks_with_parity2(bp,
-                       sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
-               par_num = bnx2x_check_blocks_with_parity3(
-                       sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
-               par_num = bnx2x_check_blocks_with_parity4(bp,
-                       sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
+               res |= bnx2x_check_blocks_with_parity0(bp,
+                       sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
+               res |= bnx2x_check_blocks_with_parity1(bp,
+                       sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
+               res |= bnx2x_check_blocks_with_parity2(bp,
+                       sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
+               res |= bnx2x_check_blocks_with_parity3(bp,
+                       sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
+               res |= bnx2x_check_blocks_with_parity4(bp,
+                       sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
 
                if (print)
                        pr_cont("\n");
+       }
 
-               return true;
-       } else
-               return false;
+       return res;
 }
 
 /**
@@ -7126,7 +7152,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
        int port = BP_PORT(bp);
        int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
        u32 low, high;
-       u32 val;
+       u32 val, reg;
 
        DP(NETIF_MSG_HW, "starting port init  port %d\n", port);
 
@@ -7271,6 +7297,17 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
        val |= CHIP_IS_E1(bp) ? 0 : 0x10;
        REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
 
+       /* SCPAD_PARITY should NOT trigger close the gates */
+       reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
+       REG_WR(bp, reg,
+              REG_RD(bp, reg) &
+              ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
+
+       reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
+       REG_WR(bp, reg,
+              REG_RD(bp, reg) &
+              ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
+
        bnx2x_init_block(bp, BLOCK_NIG, init_phase);
 
        if (!CHIP_IS_E1x(bp)) {
@@ -9879,7 +9916,7 @@ static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
 static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
 {
        struct bnx2x_prev_path_list *tmp_list;
-       int rc = false;
+       bool rc = false;
 
        if (down_trylock(&bnx2x_prev_sem))
                return false;
@@ -11149,6 +11186,14 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
                        bnx2x_get_cnic_mac_hwinfo(bp);
        }
 
+       if (!BP_NOMCP(bp)) {
+               /* Read physical port identifier from shmem */
+               val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+               val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+               bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
+               bp->flags |= HAS_PHYS_PORT_ID;
+       }
+
        memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
 
        if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
@@ -11685,9 +11730,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
 static int bnx2x_open(struct net_device *dev)
 {
        struct bnx2x *bp = netdev_priv(dev);
-       bool global = false;
-       int other_engine = BP_PATH(bp) ? 0 : 1;
-       bool other_load_status, load_status;
        int rc;
 
        bp->stats_init = true;
@@ -11703,6 +11745,10 @@ static int bnx2x_open(struct net_device *dev)
         * Parity recovery is only relevant for PF driver.
         */
        if (IS_PF(bp)) {
+               int other_engine = BP_PATH(bp) ? 0 : 1;
+               bool other_load_status, load_status;
+               bool global = false;
+
                other_load_status = bnx2x_get_load_status(bp, other_engine);
                load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
                if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
@@ -11746,7 +11792,7 @@ static int bnx2x_open(struct net_device *dev)
        rc = bnx2x_nic_load(bp, LOAD_OPEN);
        if (rc)
                return rc;
-       return bnx2x_open_epilog(bp);
+       return 0;
 }
 
 /* called with rtnl_lock */
@@ -12044,6 +12090,20 @@ static int bnx2x_validate_addr(struct net_device *dev)
        return 0;
 }
 
+static int bnx2x_get_phys_port_id(struct net_device *netdev,
+                                 struct netdev_phys_port_id *ppid)
+{
+       struct bnx2x *bp = netdev_priv(netdev);
+
+       if (!(bp->flags & HAS_PHYS_PORT_ID))
+               return -EOPNOTSUPP;
+
+       ppid->id_len = sizeof(bp->phys_port_id);
+       memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
+
+       return 0;
+}
+
 static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_open               = bnx2x_open,
        .ndo_stop               = bnx2x_close,
@@ -12073,19 +12133,15 @@ static const struct net_device_ops bnx2x_netdev_ops = {
 #ifdef CONFIG_NET_RX_BUSY_POLL
        .ndo_busy_poll          = bnx2x_low_latency_recv,
 #endif
+       .ndo_get_phys_port_id   = bnx2x_get_phys_port_id,
 };
 
 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
 {
        struct device *dev = &bp->pdev->dev;
 
-       if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
-               bp->flags |= USING_DAC_FLAG;
-               if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
-                       dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
-                       return -EIO;
-               }
-       } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+       if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
+           dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
                dev_err(dev, "System does not support DMA, aborting\n");
                return -EIO;
        }
@@ -12237,10 +12293,13 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
                NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
                NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
        if (!CHIP_IS_E1x(bp)) {
-               dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
+               dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
+                                   NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
                dev->hw_enc_features =
                        NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
                        NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+                       NETIF_F_GSO_IPIP |
+                       NETIF_F_GSO_SIT |
                        NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
        }
 
@@ -12248,8 +12307,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
                NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
 
        dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
-       if (bp->flags & USING_DAC_FLAG)
-               dev->features |= NETIF_F_HIGHDMA;
+       dev->features |= NETIF_F_HIGHDMA;
 
        /* Add Loopback capability to the device */
        dev->hw_features |= NETIF_F_LOOPBACK;
@@ -12274,34 +12332,11 @@ err_out_release:
 
 err_out_disable:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 
 err_out:
        return rc;
 }
 
-static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width,
-                                      enum bnx2x_pci_bus_speed *speed)
-{
-       u32 link_speed, val = 0;
-
-       pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
-       *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
-
-       link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
-
-       switch (link_speed) {
-       case 3:
-               *speed = BNX2X_PCI_LINK_SPEED_8000;
-               break;
-       case 2:
-               *speed = BNX2X_PCI_LINK_SPEED_5000;
-               break;
-       default:
-               *speed = BNX2X_PCI_LINK_SPEED_2500;
-       }
-}
-
 static int bnx2x_check_firmware(struct bnx2x *bp)
 {
        const struct firmware *firmware = bp->firmware;
@@ -12612,24 +12647,24 @@ static int set_max_cos_est(int chip_id)
                return BNX2X_MULTI_TX_COS_E1X;
        case BCM57712:
        case BCM57712_MF:
-       case BCM57712_VF:
                return BNX2X_MULTI_TX_COS_E2_E3A0;
        case BCM57800:
        case BCM57800_MF:
-       case BCM57800_VF:
        case BCM57810:
        case BCM57810_MF:
        case BCM57840_4_10:
        case BCM57840_2_20:
        case BCM57840_O:
        case BCM57840_MFO:
-       case BCM57810_VF:
        case BCM57840_MF:
-       case BCM57840_VF:
        case BCM57811:
        case BCM57811_MF:
-       case BCM57811_VF:
                return BNX2X_MULTI_TX_COS_E3B0;
+       case BCM57712_VF:
+       case BCM57800_VF:
+       case BCM57810_VF:
+       case BCM57840_VF:
+       case BCM57811_VF:
                return 1;
        default:
                pr_err("Unknown board_type (%d), aborting\n", chip_id);
@@ -12658,8 +12693,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
 {
        struct net_device *dev = NULL;
        struct bnx2x *bp;
-       int pcie_width;
-       enum bnx2x_pci_bus_speed pcie_speed;
+       enum pcie_link_width pcie_width;
+       enum pci_bus_speed pcie_speed;
        int rc, max_non_def_sbs;
        int rx_count, tx_count, rss_count, doorbell_size;
        int max_cos_est;
@@ -12808,18 +12843,19 @@ static int bnx2x_init_one(struct pci_dev *pdev,
                dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
                rtnl_unlock();
        }
-
-       bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
-       BNX2X_DEV_INFO("got pcie width %d and speed %d\n",
-                      pcie_width, pcie_speed);
-
-       BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
+       if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
+           pcie_speed == PCI_SPEED_UNKNOWN ||
+           pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
+               BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
+       else
+               BNX2X_DEV_INFO(
+                      "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
                       board_info[ent->driver_data].name,
                       (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
                       pcie_width,
-                      pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" :
-                      pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" :
-                      pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" :
+                      pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
+                      pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
+                      pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
                       "Unknown",
                       dev->base_addr, bp->pdev->irq, dev->dev_addr);
 
@@ -12838,7 +12874,6 @@ init_one_exit:
                pci_release_regions(pdev);
 
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 
        return rc;
 }
@@ -12921,7 +12956,6 @@ static void __bnx2x_remove(struct pci_dev *pdev,
                pci_release_regions(pdev);
 
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static void bnx2x_remove_one(struct pci_dev *pdev)
index 9ad012bdd9151e44ce699b640e77273ad9901796..71fffad94affa6ba994fcd454edba15de227b746 100644 (file)
@@ -470,10 +470,10 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
                                 bnx2x_vfop_qdtor, cmd->done);
                return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
                                             cmd->block);
+       } else {
+               BNX2X_ERR("VF[%d] failed to add a vfop\n", vf->abs_vfid);
+               return -ENOMEM;
        }
-       DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
-          vf->abs_vfid, vfop->rc);
-       return -ENOMEM;
 }
 
 static void
@@ -2802,7 +2802,7 @@ struct set_vf_state_cookie {
        u8 state;
 };
 
-void bnx2x_set_vf_state(void *cookie)
+static void bnx2x_set_vf_state(void *cookie)
 {
        struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
 
@@ -3225,8 +3225,9 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
        pci_disable_sriov(bp->pdev);
 }
 
-int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf,
-                       struct pf_vf_bulletin_content **bulletin)
+static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
+                            struct bnx2x_virtf **vf,
+                            struct pf_vf_bulletin_content **bulletin)
 {
        if (bp->state != BNX2X_STATE_OPEN) {
                BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3390,14 +3391,16 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
                rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
                if (rc) {
                        BNX2X_ERR("failed to delete eth macs\n");
-                       return -EINVAL;
+                       rc = -EINVAL;
+                       goto out;
                }
 
                /* remove existing uc list macs */
                rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
                if (rc) {
                        BNX2X_ERR("failed to delete uc_list macs\n");
-                       return -EINVAL;
+                       rc = -EINVAL;
+                       goto out;
                }
 
                /* configure the new mac to device */
@@ -3405,6 +3408,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
                bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
                                  BNX2X_ETH_MAC, &ramrod_flags);
 
+out:
                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
        }
 
@@ -3467,7 +3471,8 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
                                          &ramrod_flags);
                if (rc) {
                        BNX2X_ERR("failed to delete vlans\n");
-                       return -EINVAL;
+                       rc = -EINVAL;
+                       goto out;
                }
 
                /* send queue update ramrod to configure default vlan and silent
@@ -3501,7 +3506,8 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
                        rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
                        if (rc) {
                                BNX2X_ERR("failed to configure vlan\n");
-                               return -EINVAL;
+                               rc =  -EINVAL;
+                               goto out;
                        }
 
                        /* configure default vlan to vf queue and set silent
@@ -3519,18 +3525,18 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
                rc = bnx2x_queue_state_change(bp, &q_params);
                if (rc) {
                        BNX2X_ERR("Failed to configure default VLAN\n");
-                       return rc;
+                       goto out;
                }
 
                /* clear the flag indicating that this VF needs its vlan
-                * (will only be set if the HV configured th Vlan before vf was
-                * and we were called because the VF came up later
+                * (will only be set if the HV configured the Vlan before vf was
+                * up and we were called because the VF came up later
                 */
+out:
                vf->cfg_flags &= ~VF_CFG_VLAN;
-
                bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
        }
-       return 0;
+       return rc;
 }
 
 /* crc is the first field in the bulletin board. Compute the crc over the
@@ -3637,29 +3643,6 @@ alloc_mem_err:
        return -ENOMEM;
 }
 
-int bnx2x_open_epilog(struct bnx2x *bp)
-{
-       /* Enable sriov via delayed work. This must be done via delayed work
-        * because it causes the probe of the vf devices to be run, which invoke
-        * register_netdevice which must have rtnl lock taken. As we are holding
-        * the lock right now, that could only work if the probe would not take
-        * the lock. However, as the probe of the vf may be called from other
-        * contexts as well (such as passthrough to vm fails) it can't assume
-        * the lock is being held for it. Using delayed work here allows the
-        * probe code to simply take the lock (i.e. wait for it to be released
-        * if it is being held). We only want to do this if the number of VFs
-        * was set before PF driver was loaded.
-        */
-       if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
-               smp_mb__before_clear_bit();
-               set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
-               smp_mb__after_clear_bit();
-               schedule_delayed_work(&bp->sp_rtnl_task, 0);
-       }
-
-       return 0;
-}
-
 void bnx2x_iov_channel_down(struct bnx2x *bp)
 {
        int vf_idx;
index 059f0d460af2a249e631a913ec98da290d78f323..1ff6a9366629ed88fe79a079391c92e95d1e9baf 100644 (file)
@@ -782,7 +782,6 @@ static inline int bnx2x_vf_headroom(struct bnx2x *bp)
 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
 void bnx2x_iov_channel_down(struct bnx2x *bp);
-int bnx2x_open_epilog(struct bnx2x *bp);
 
 #else /* CONFIG_BNX2X_SRIOV */
 
@@ -842,7 +841,6 @@ static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
 static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
 static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
 static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
-static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; }
 
 #endif /* CONFIG_BNX2X_SRIOV */
 #endif /* bnx2x_sriov.h */
index 86436c77af036d7884b9cc9525646a44f3ec4acf..3b75070411aab83136ac2c245ba9c65682aefebe 100644 (file)
@@ -196,7 +196,7 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
 
        } else if (bp->func_stx) {
                *stats_comp = 0;
-               bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
+               bnx2x_issue_dmae_with_comp(bp, dmae, stats_comp);
        }
 }
 
index da16953eb2ec58012f059273abf153c7e81293b9..9199adf32d33c1639dfa2354e609ef92b638ef91 100644 (file)
@@ -60,6 +60,30 @@ void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
        mutex_unlock(&bp->vf2pf_mutex);
 }
 
+/* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
+static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
+                                  enum channel_tlvs req_tlv)
+{
+       struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
+
+       do {
+               if (tlv->type == req_tlv)
+                       return tlv;
+
+               if (!tlv->length) {
+                       BNX2X_ERR("Found TLV with length 0\n");
+                       return NULL;
+               }
+
+               tlvs_list += tlv->length;
+               tlv = (struct channel_tlv *)tlvs_list;
+       } while (tlv->type != CHANNEL_TLV_LIST_END);
+
+       DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
+
+       return NULL;
+}
+
 /* list the types and lengths of the tlvs on the buffer */
 void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
 {
@@ -196,6 +220,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
        int rc = 0, attempts = 0;
        struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
        struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
+       struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
        u32 vf_id;
        bool resources_acquired = false;
 
@@ -219,8 +244,14 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
        /* pf 2 vf bulletin board address */
        req->bulletin_addr = bp->pf2vf_bulletin_mapping;
 
+       /* Request physical port identifier */
+       bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
+                     CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
+
        /* add list termination tlv */
-       bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+       bnx2x_add_tlv(bp, req,
+                     req->first_tlv.tl.length + sizeof(struct channel_tlv),
+                     CHANNEL_TLV_LIST_END,
                      sizeof(struct channel_list_end_tlv));
 
        /* output tlvs list */
@@ -287,6 +318,15 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
                }
        }
 
+       /* Retrieve physical port id (if possible) */
+       phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
+                        bnx2x_search_tlv_list(bp, resp,
+                                              CHANNEL_TLV_PHYS_PORT_ID);
+       if (phys_port_resp) {
+               memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
+               bp->flags |= HAS_PHYS_PORT_ID;
+       }
+
        /* get HW info */
        bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
        bp->link_params.chip_id = bp->common.chip_id;
@@ -980,56 +1020,62 @@ static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
        dmae.len = len32;
 
        /* issue the command and wait for completion */
-       return bnx2x_issue_dmae_with_comp(bp, &dmae);
+       return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
 }
 
-static void bnx2x_vf_mbx_resp(struct bnx2x *bp, struct bnx2x_virtf *vf)
+static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
+                                        struct bnx2x_virtf *vf)
 {
        struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
-       u64 vf_addr;
-       dma_addr_t pf_addr;
        u16 length, type;
-       int rc;
-       struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
 
        /* prepare response */
        type = mbx->first_tlv.tl.type;
        length = type == CHANNEL_TLV_ACQUIRE ?
                sizeof(struct pfvf_acquire_resp_tlv) :
                sizeof(struct pfvf_general_resp_tlv);
-       bnx2x_add_tlv(bp, resp, 0, type, length);
-       resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
-       bnx2x_add_tlv(bp, resp, length, CHANNEL_TLV_LIST_END,
+       bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
+       bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
                      sizeof(struct channel_list_end_tlv));
+}
+
+static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
+                                      struct bnx2x_virtf *vf)
+{
+       struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
+       struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
+       dma_addr_t pf_addr;
+       u64 vf_addr;
+       int rc;
+
        bnx2x_dp_tlv_list(bp, resp);
        DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
           mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
 
+       resp->hdr.status = bnx2x_pfvf_status_codes(vf->op_rc);
+
        /* send response */
        vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
                  mbx->first_tlv.resp_msg_offset;
        pf_addr = mbx->msg_mapping +
                  offsetof(struct bnx2x_vf_mbx_msg, resp);
 
-       /* copy the response body, if there is one, before the header, as the vf
-        * is sensitive to the header being written
+       /* Copy the response buffer. The first u64 is written afterwards, as
+        * the vf is sensitive to the header being written
         */
-       if (resp->hdr.tl.length > sizeof(u64)) {
-               length = resp->hdr.tl.length - sizeof(u64);
-               vf_addr += sizeof(u64);
-               pf_addr += sizeof(u64);
-               rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
-                                         U64_HI(vf_addr),
-                                         U64_LO(vf_addr),
-                                         length/4);
-               if (rc) {
-                       BNX2X_ERR("Failed to copy response body to VF %d\n",
-                                 vf->abs_vfid);
-                       goto mbx_error;
-               }
-               vf_addr -= sizeof(u64);
-               pf_addr -= sizeof(u64);
+       vf_addr += sizeof(u64);
+       pf_addr += sizeof(u64);
+       rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
+                                 U64_HI(vf_addr),
+                                 U64_LO(vf_addr),
+                                 (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
+       if (rc) {
+               BNX2X_ERR("Failed to copy response body to VF %d\n",
+                         vf->abs_vfid);
+               goto mbx_error;
        }
+       vf_addr -= sizeof(u64);
+       pf_addr -= sizeof(u64);
 
        /* ack the FW */
        storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
@@ -1060,6 +1106,36 @@ mbx_error:
        bnx2x_vf_release(bp, vf, false); /* non blocking */
 }
 
+static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
+                                      struct bnx2x_virtf *vf)
+{
+       bnx2x_vf_mbx_resp_single_tlv(bp, vf);
+       bnx2x_vf_mbx_resp_send_msg(bp, vf);
+}
+
+static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
+                                       struct bnx2x_virtf *vf,
+                                       void *buffer,
+                                       u16 *offset)
+{
+       struct vfpf_port_phys_id_resp_tlv *port_id;
+
+       if (!(bp->flags & HAS_PHYS_PORT_ID))
+               return;
+
+       bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
+                     sizeof(struct vfpf_port_phys_id_resp_tlv));
+
+       port_id = (struct vfpf_port_phys_id_resp_tlv *)
+                 (((u8 *)buffer) + *offset);
+       memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
+
+       /* Offset should continue representing the offset to the tail
+        * of TLV data (outside this function scope)
+        */
+       *offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
+}
+
 static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
                                      struct bnx2x_vf_mbx *mbx, int vfop_status)
 {
@@ -1067,6 +1143,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
        struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
        struct pf_vf_resc *resc = &resp->resc;
        u8 status = bnx2x_pfvf_status_codes(vfop_status);
+       u16 length;
 
        memset(resp, 0, sizeof(*resp));
 
@@ -1140,9 +1217,24 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
                        resc->hw_sbs[i].sb_qid);
        DP_CONT(BNX2X_MSG_IOV, "]\n");
 
+       /* prepare response */
+       length = sizeof(struct pfvf_acquire_resp_tlv);
+       bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
+
+       /* Handle possible VF requests for physical port identifiers.
+        * 'length' should continue to indicate the offset of the first empty
+        * place in the buffer (i.e., where next TLV should be inserted)
+        */
+       if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
+                                 CHANNEL_TLV_PHYS_PORT_ID))
+               bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
+
+       bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
        /* send the response */
        vf->op_rc = vfop_status;
-       bnx2x_vf_mbx_resp(bp, vf);
+       bnx2x_vf_mbx_resp_send_msg(bp, vf);
 }
 
 static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -1874,6 +1966,9 @@ void bnx2x_vf_mbx(struct bnx2x *bp, struct vf_pf_event_data *vfpf_event)
        /* process the VF message header */
        mbx->first_tlv = mbx->msg->req.first_tlv;
 
+       /* Clean response buffer to refrain from falsely seeing chains */
+       memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
+
        /* dispatch the request (will prepare the response) */
        bnx2x_vf_mbx_request(bp, vf, mbx);
        goto mbx_done;
index 1179fe06d0c7ff010b7a06102a68b601c747bf9c..208568bc7a71d72d8e19aea70f02485e17c0e8ee 100644 (file)
@@ -188,6 +188,12 @@ struct pfvf_acquire_resp_tlv {
        } resc;
 };
 
+struct vfpf_port_phys_id_resp_tlv {
+       struct channel_tlv tl;
+       u8 id[ETH_ALEN];
+       u8 padding[2];
+};
+
 #define VFPF_INIT_FLG_STATS_COALESCE   (1 << 0) /* when set the VFs queues
                                                  * stats will be coalesced on
                                                  * the leading RSS queue
@@ -398,6 +404,7 @@ enum channel_tlvs {
        CHANNEL_TLV_PF_SET_MAC,
        CHANNEL_TLV_PF_SET_VLAN,
        CHANNEL_TLV_UPDATE_RSS,
+       CHANNEL_TLV_PHYS_PORT_ID,
        CHANNEL_TLV_MAX
 };
 
index 99394bd49a139414776df9ff776c02b2fe6f3c26..f58a8b80302d9b2088ce139688b76e3a9181bb42 100644 (file)
@@ -393,7 +393,7 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
 
                        csk->vlan_id = path_resp->vlan_id;
 
-                       memcpy(csk->ha, path_resp->mac_addr, 6);
+                       memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN);
                        if (test_bit(SK_F_IPV6, &csk->flags))
                                memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
                                       sizeof(struct in6_addr));
@@ -5572,7 +5572,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
        if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
                cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
 
-       memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
+       memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN);
 
        cp->cnic_ops = &cnic_bnx2x_ops;
        cp->start_hw = cnic_start_bnx2x_hw;
index 0658b43e148c1c45ecb2013696534f2ff1d2ffb0..ebbfe25acaa6e0fc927938bd54150435a1feedb5 100644 (file)
@@ -353,8 +353,8 @@ struct cnic_ulp_ops {
        atomic_t ref_count;
 };
 
-extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
+int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
 
-extern int cnic_unregister_driver(int ulp_type);
+int cnic_unregister_driver(int ulp_type);
 
 #endif
index 12d961c4ebcaf704e36b83c8624633c2fbd48dda..819d87c281bfba633d19628e47237bdf84cf03d3 100644 (file)
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
 
 #define DRV_MODULE_NAME                "tg3"
 #define TG3_MAJ_NUM                    3
-#define TG3_MIN_NUM                    133
+#define TG3_MIN_NUM                    134
 #define DRV_MODULE_VERSION     \
        __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE     "Jul 29, 2013"
+#define DRV_MODULE_RELDATE     "Sep 16, 2013"
 
 #define RESET_KIND_SHUTDOWN    0
 #define RESET_KIND_INIT                1
@@ -337,6 +337,11 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
        {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
        {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
        {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -1326,6 +1331,12 @@ static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
        return err;
 }
 
+static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
+{
+       return tg3_writephy(tp, MII_TG3_MISC_SHDW,
+                           reg | val | MII_TG3_MISC_SHDW_WREN);
+}
+
 static int tg3_bmcr_reset(struct tg3 *tp)
 {
        u32 phy_control;
@@ -1364,7 +1375,7 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
 
        spin_lock_bh(&tp->lock);
 
-       if (tg3_readphy(tp, reg, &val))
+       if (__tg3_readphy(tp, mii_id, reg, &val))
                val = -EIO;
 
        spin_unlock_bh(&tp->lock);
@@ -1379,7 +1390,7 @@ static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
 
        spin_lock_bh(&tp->lock);
 
-       if (tg3_writephy(tp, reg, val))
+       if (__tg3_writephy(tp, mii_id, reg, val))
                ret = -EIO;
 
        spin_unlock_bh(&tp->lock);
@@ -1397,7 +1408,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
        u32 val;
        struct phy_device *phydev;
 
-       phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+       phydev = tp->mdio_bus->phy_map[tp->phy_addr];
        switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
        case PHY_ID_BCM50610:
        case PHY_ID_BCM50610M:
@@ -1502,6 +1513,13 @@ static int tg3_mdio_init(struct tg3 *tp)
                                    TG3_CPMU_PHY_STRAP_IS_SERDES;
                if (is_serdes)
                        tp->phy_addr += 7;
+       } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
+               int addr;
+
+               addr = ssb_gige_get_phyaddr(tp->pdev);
+               if (addr < 0)
+                       return addr;
+               tp->phy_addr = addr;
        } else
                tp->phy_addr = TG3_PHY_MII_ADDR;
 
@@ -1522,7 +1540,7 @@ static int tg3_mdio_init(struct tg3 *tp)
        tp->mdio_bus->read     = &tg3_mdio_read;
        tp->mdio_bus->write    = &tg3_mdio_write;
        tp->mdio_bus->reset    = &tg3_mdio_reset;
-       tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
+       tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
        tp->mdio_bus->irq      = &tp->mdio_irq[0];
 
        for (i = 0; i < PHY_MAX_ADDR; i++)
@@ -1543,7 +1561,7 @@ static int tg3_mdio_init(struct tg3 *tp)
                return i;
        }
 
-       phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+       phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
        if (!phydev || !phydev->drv) {
                dev_warn(&tp->pdev->dev, "No PHY devices\n");
@@ -1953,7 +1971,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
        u32 old_tx_mode = tp->tx_mode;
 
        if (tg3_flag(tp, USE_PHYLIB))
-               autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
+               autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
        else
                autoneg = tp->link_config.autoneg;
 
@@ -1989,7 +2007,7 @@ static void tg3_adjust_link(struct net_device *dev)
        u8 oldflowctrl, linkmesg = 0;
        u32 mac_mode, lcl_adv, rmt_adv;
        struct tg3 *tp = netdev_priv(dev);
-       struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+       struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
        spin_lock_bh(&tp->lock);
 
@@ -2078,7 +2096,7 @@ static int tg3_phy_init(struct tg3 *tp)
        /* Bring the PHY back to a known state. */
        tg3_bmcr_reset(tp);
 
-       phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+       phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
        /* Attach the MAC to the PHY. */
        phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
@@ -2105,7 +2123,7 @@ static int tg3_phy_init(struct tg3 *tp)
                                      SUPPORTED_Asym_Pause);
                break;
        default:
-               phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+               phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
                return -EINVAL;
        }
 
@@ -2123,7 +2141,7 @@ static void tg3_phy_start(struct tg3 *tp)
        if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                return;
 
-       phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+       phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
        if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
                tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
@@ -2143,13 +2161,13 @@ static void tg3_phy_stop(struct tg3 *tp)
        if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                return;
 
-       phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+       phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
 }
 
 static void tg3_phy_fini(struct tg3 *tp)
 {
        if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
-               phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+               phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
                tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
        }
 }
@@ -2218,25 +2236,21 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
                return;
        }
 
-       reg = MII_TG3_MISC_SHDW_WREN |
-             MII_TG3_MISC_SHDW_SCR5_SEL |
-             MII_TG3_MISC_SHDW_SCR5_LPED |
+       reg = MII_TG3_MISC_SHDW_SCR5_LPED |
              MII_TG3_MISC_SHDW_SCR5_DLPTLM |
              MII_TG3_MISC_SHDW_SCR5_SDTL |
              MII_TG3_MISC_SHDW_SCR5_C125OE;
        if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
                reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
 
-       tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
+       tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
 
 
-       reg = MII_TG3_MISC_SHDW_WREN |
-             MII_TG3_MISC_SHDW_APD_SEL |
-             MII_TG3_MISC_SHDW_APD_WKTM_84MS;
+       reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
        if (enable)
                reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
 
-       tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
+       tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
 }
 
 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
@@ -4027,7 +4041,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
                        struct phy_device *phydev;
                        u32 phyid, advertising;
 
-                       phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+                       phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
                        tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
 
@@ -9196,10 +9210,7 @@ static int tg3_halt(struct tg3 *tp, int kind, bool silent)
                memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
        }
 
-       if (err)
-               return err;
-
-       return 0;
+       return err;
 }
 
 static int tg3_set_mac_addr(struct net_device *dev, void *p)
@@ -11035,7 +11046,18 @@ static int tg3_request_irq(struct tg3 *tp, int irq_num)
                name = tp->dev->name;
        else {
                name = &tnapi->irq_lbl[0];
-               snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
+               if (tnapi->tx_buffers && tnapi->rx_rcb)
+                       snprintf(name, IFNAMSIZ,
+                                "%s-txrx-%d", tp->dev->name, irq_num);
+               else if (tnapi->tx_buffers)
+                       snprintf(name, IFNAMSIZ,
+                                "%s-tx-%d", tp->dev->name, irq_num);
+               else if (tnapi->rx_rcb)
+                       snprintf(name, IFNAMSIZ,
+                                "%s-rx-%d", tp->dev->name, irq_num);
+               else
+                       snprintf(name, IFNAMSIZ,
+                                "%s-%d", tp->dev->name, irq_num);
                name[IFNAMSIZ-1] = 0;
        }
 
@@ -11907,7 +11929,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                struct phy_device *phydev;
                if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                        return -EAGAIN;
-               phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+               phydev = tp->mdio_bus->phy_map[tp->phy_addr];
                return phy_ethtool_gset(phydev, cmd);
        }
 
@@ -11974,7 +11996,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                struct phy_device *phydev;
                if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                        return -EAGAIN;
-               phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+               phydev = tp->mdio_bus->phy_map[tp->phy_addr];
                return phy_ethtool_sset(phydev, cmd);
        }
 
@@ -12093,12 +12115,10 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 
        device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
 
-       spin_lock_bh(&tp->lock);
        if (device_may_wakeup(dp))
                tg3_flag_set(tp, WOL_ENABLE);
        else
                tg3_flag_clear(tp, WOL_ENABLE);
-       spin_unlock_bh(&tp->lock);
 
        return 0;
 }
@@ -12131,7 +12151,7 @@ static int tg3_nway_reset(struct net_device *dev)
        if (tg3_flag(tp, USE_PHYLIB)) {
                if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                        return -EAGAIN;
-               r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+               r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
        } else {
                u32 bmcr;
 
@@ -12247,7 +12267,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
                u32 newadv;
                struct phy_device *phydev;
 
-               phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+               phydev = tp->mdio_bus->phy_map[tp->phy_addr];
 
                if (!(phydev->supported & SUPPORTED_Pause) ||
                    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
@@ -13194,8 +13214,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
                return -ENOMEM;
 
        tx_data = skb_put(skb, tx_len);
-       memcpy(tx_data, tp->dev->dev_addr, 6);
-       memset(tx_data + 6, 0x0, 8);
+       memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
+       memset(tx_data + ETH_ALEN, 0x0, 8);
 
        tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
 
@@ -13683,7 +13703,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                struct phy_device *phydev;
                if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
                        return -EAGAIN;
-               phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+               phydev = tp->mdio_bus->phy_map[tp->phy_addr];
                return phy_mii_ioctl(phydev, ifr, cmd);
        }
 
@@ -14921,6 +14941,12 @@ static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
                            tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
                                tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
                                                 LED_CTRL_MODE_PHY_2);
+
+                       if (tg3_flag(tp, 5717_PLUS) ||
+                           tg3_asic_rev(tp) == ASIC_REV_5762)
+                               tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
+                                               LED_CTRL_BLINK_RATE_MASK;
+
                        break;
 
                case SHASTA_EXT_LED_MAC:
@@ -15759,9 +15785,12 @@ static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
+                   tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
+                   tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
-                   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
+                   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
+                   tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
                        reg = TG3PCI_GEN2_PRODID_ASICREV;
                else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
                         tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
@@ -16632,8 +16661,8 @@ static int tg3_get_macaddr_sparc(struct tg3 *tp)
        int len;
 
        addr = of_get_property(dp, "local-mac-address", &len);
-       if (addr && len == 6) {
-               memcpy(dev->dev_addr, addr, 6);
+       if (addr && len == ETH_ALEN) {
+               memcpy(dev->dev_addr, addr, ETH_ALEN);
                return 0;
        }
        return -ENODEV;
@@ -16643,7 +16672,7 @@ static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
 {
        struct net_device *dev = tp->dev;
 
-       memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+       memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
        return 0;
 }
 #endif
@@ -17052,10 +17081,6 @@ static int tg3_test_dma(struct tg3 *tp)
 
        tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
 
-#if 0
-       /* Unneeded, already done by tg3_get_invariants.  */
-       tg3_switch_clocks(tp);
-#endif
 
        if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
            tg3_asic_rev(tp) != ASIC_REV_5701)
@@ -17083,20 +17108,6 @@ static int tg3_test_dma(struct tg3 *tp)
                        break;
                }
 
-#if 0
-               /* validate data reached card RAM correctly. */
-               for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
-                       u32 val;
-                       tg3_read_mem(tp, 0x2100 + (i*4), &val);
-                       if (le32_to_cpu(val) != p[i]) {
-                               dev_err(&tp->pdev->dev,
-                                       "%s: Buffer corrupted on device! "
-                                       "(%d != %d)\n", __func__, val, i);
-                               /* ret = -ENODEV here? */
-                       }
-                       p[i] = 0;
-               }
-#endif
                /* Now read it back. */
                ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
                if (ret) {
@@ -17362,8 +17373,10 @@ static int tg3_init_one(struct pci_dev *pdev,
                        tg3_flag_set(tp, FLUSH_POSTED_WRITES);
                if (ssb_gige_one_dma_at_once(pdev))
                        tg3_flag_set(tp, ONE_DMA_AT_ONCE);
-               if (ssb_gige_have_roboswitch(pdev))
+               if (ssb_gige_have_roboswitch(pdev)) {
+                       tg3_flag_set(tp, USE_PHYLIB);
                        tg3_flag_set(tp, ROBOSWITCH);
+               }
                if (ssb_gige_is_rgmii(pdev))
                        tg3_flag_set(tp, RGMII_MODE);
        }
@@ -17409,9 +17422,12 @@ static int tg3_init_one(struct pci_dev *pdev,
            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
+           tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
+           tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
-           tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
+           tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
+           tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
                tg3_flag_set(tp, ENABLE_APE);
                tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
                if (!tp->aperegs) {
@@ -17628,7 +17644,7 @@ static int tg3_init_one(struct pci_dev *pdev,
 
        if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
                struct phy_device *phydev;
-               phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+               phydev = tp->mdio_bus->phy_map[tp->phy_addr];
                netdev_info(dev,
                            "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
                            phydev->drv->name, dev_name(&phydev->dev));
@@ -17685,7 +17701,6 @@ err_out_free_res:
 err_out_disable_pdev:
        if (pci_is_enabled(pdev))
                pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        return err;
 }
 
@@ -17717,7 +17732,6 @@ static void tg3_remove_one(struct pci_dev *pdev)
                free_netdev(dev);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
-               pci_set_drvdata(pdev, NULL);
        }
 }
 
index 70257808aa37deb3c2ff0511a16a6966a8d5f9c0..5c3835aa1e1b0702d602102031a260644129390e 100644 (file)
@@ -68,6 +68,9 @@
 #define  TG3PCI_DEVICE_TIGON3_5762      0x1687
 #define  TG3PCI_DEVICE_TIGON3_5725      0x1643
 #define  TG3PCI_DEVICE_TIGON3_5727      0x16f3
+#define  TG3PCI_DEVICE_TIGON3_57764     0x1642
+#define  TG3PCI_DEVICE_TIGON3_57767     0x1683
+#define  TG3PCI_DEVICE_TIGON3_57787     0x1641
 /* 0x04 --> 0x2c unused */
 #define TG3PCI_SUBVENDOR_ID_BROADCOM           PCI_VENDOR_ID_BROADCOM
 #define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6   0x1644
index b78e69e0e52a291047e72c222530a1500e4ac003..248bc37cb41b24680441c057312e9882f905dd1d 100644 (file)
@@ -3212,7 +3212,6 @@ bnad_init(struct bnad *bnad,
        bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len);
        if (!bnad->bar0) {
                dev_err(&pdev->dev, "ioremap for bar0 failed\n");
-               pci_set_drvdata(pdev, NULL);
                return -ENOMEM;
        }
        pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0,
@@ -3300,17 +3299,12 @@ bnad_pci_init(struct bnad *bnad,
        err = pci_request_regions(pdev, BNAD_NAME);
        if (err)
                goto disable_device;
-       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
-           !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+       if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
                *using_dac = true;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
-               if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
-                                                   DMA_BIT_MASK(32));
-                       if (err)
-                               goto release_regions;
-               }
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+               if (err)
+                       goto release_regions;
                *using_dac = false;
        }
        pci_set_master(pdev);
index aefee77523f2fbc52c621883ebe8fd56a5ffbfce..f7e033f8a00e30f0ed33e0f9889d14bafbf512a6 100644 (file)
@@ -372,38 +372,37 @@ extern u32                bnad_rxqs_per_cq;
 /*
  * EXTERN PROTOTYPES
  */
-extern u32 *cna_get_firmware_buf(struct pci_dev *pdev);
+u32 *cna_get_firmware_buf(struct pci_dev *pdev);
 /* Netdev entry point prototypes */
-extern void bnad_set_rx_mode(struct net_device *netdev);
-extern struct net_device_stats *bnad_get_netdev_stats(
-                               struct net_device *netdev);
-extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
-extern int bnad_enable_default_bcast(struct bnad *bnad);
-extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
-extern void bnad_set_ethtool_ops(struct net_device *netdev);
-extern void bnad_cb_completion(void *arg, enum bfa_status status);
+void bnad_set_rx_mode(struct net_device *netdev);
+struct net_device_stats *bnad_get_netdev_stats(struct net_device *netdev);
+int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
+int bnad_enable_default_bcast(struct bnad *bnad);
+void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
+void bnad_set_ethtool_ops(struct net_device *netdev);
+void bnad_cb_completion(void *arg, enum bfa_status status);
 
 /* Configuration & setup */
-extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
-extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
+void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
+void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
 
-extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
-extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
-extern void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
-extern void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
+int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
+int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
+void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
+void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
 
 /* Timer start/stop protos */
-extern void bnad_dim_timer_start(struct bnad *bnad);
+void bnad_dim_timer_start(struct bnad *bnad);
 
 /* Statistics */
-extern void bnad_netdev_qstats_fill(struct bnad *bnad,
-               struct rtnl_link_stats64 *stats);
-extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
-               struct rtnl_link_stats64 *stats);
+void bnad_netdev_qstats_fill(struct bnad *bnad,
+                            struct rtnl_link_stats64 *stats);
+void bnad_netdev_hwstats_fill(struct bnad *bnad,
+                             struct rtnl_link_stats64 *stats);
 
 /* Debugfs */
-void   bnad_debugfs_init(struct bnad *bnad);
-void   bnad_debugfs_uninit(struct bnad *bnad);
+void bnad_debugfs_init(struct bnad *bnad);
+void bnad_debugfs_uninit(struct bnad *bnad);
 
 /* MACROS */
 /* To set & get the stats counters */
index 78d6d6b970e105bb1de4c157a35af4e861e4f280..48f52882a22b07414b7fed808fe72ccd52650300 100644 (file)
 #define XGMAC_DMA_HW_FEATURE   0x00000f58      /* Enabled Hardware Features */
 
 #define XGMAC_ADDR_AE          0x80000000
-#define XGMAC_MAX_FILTER_ADDR  31
 
 /* PMT Control and Status */
 #define XGMAC_PMT_POINTER_RESET        0x80000000
@@ -384,6 +383,7 @@ struct xgmac_priv {
        struct device *device;
        struct napi_struct napi;
 
+       int max_macs;
        struct xgmac_extra_stats xstats;
 
        spinlock_t stats_lock;
@@ -1291,14 +1291,12 @@ static void xgmac_set_rx_mode(struct net_device *dev)
        netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
                 netdev_mc_count(dev), netdev_uc_count(dev));
 
-       if (dev->flags & IFF_PROMISC) {
-               writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER);
-               return;
-       }
+       if (dev->flags & IFF_PROMISC)
+               value |= XGMAC_FRAME_FILTER_PR;
 
        memset(hash_filter, 0, sizeof(hash_filter));
 
-       if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) {
+       if (netdev_uc_count(dev) > priv->max_macs) {
                use_hash = true;
                value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
        }
@@ -1321,7 +1319,7 @@ static void xgmac_set_rx_mode(struct net_device *dev)
                goto out;
        }
 
-       if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
+       if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) {
                use_hash = true;
                value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
        } else {
@@ -1342,8 +1340,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
        }
 
 out:
-       for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++)
-               xgmac_set_mac_addr(ioaddr, NULL, reg);
+       for (i = reg; i <= priv->max_macs; i++)
+               xgmac_set_mac_addr(ioaddr, NULL, i);
        for (i = 0; i < XGMAC_NUM_HASH; i++)
                writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
 
@@ -1761,6 +1759,13 @@ static int xgmac_probe(struct platform_device *pdev)
        uid = readl(priv->base + XGMAC_VERSION);
        netdev_info(ndev, "h/w version is 0x%x\n", uid);
 
+       /* Figure out how many valid mac address filter registers we have */
+       writel(1, priv->base + XGMAC_ADDR_HIGH(31));
+       if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1)
+               priv->max_macs = 31;
+       else
+               priv->max_macs = 7;
+
        writel(0, priv->base + XGMAC_DMA_INTR_ENA);
        ndev->irq = platform_get_irq(pdev, 0);
        if (ndev->irq == -ENXIO) {
index 5ccbed1784d2020b4683881f26a1a99ab472ad9a..8abb46b39032df4bc13fe3ad4b0f227f9d2411e1 100644 (file)
@@ -324,30 +324,30 @@ static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
        return board_info(adap)->clock_core / 1000000;
 }
 
-extern int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
-extern int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
-extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
-extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
-
-extern void t1_interrupts_enable(adapter_t *adapter);
-extern void t1_interrupts_disable(adapter_t *adapter);
-extern void t1_interrupts_clear(adapter_t *adapter);
-extern int t1_elmer0_ext_intr_handler(adapter_t *adapter);
-extern void t1_elmer0_ext_intr(adapter_t *adapter);
-extern int t1_slow_intr_handler(adapter_t *adapter);
-
-extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
-extern const struct board_info *t1_get_board_info(unsigned int board_id);
-extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
+int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp);
+int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
+int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
+int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
+
+void t1_interrupts_enable(adapter_t *adapter);
+void t1_interrupts_disable(adapter_t *adapter);
+void t1_interrupts_clear(adapter_t *adapter);
+int t1_elmer0_ext_intr_handler(adapter_t *adapter);
+void t1_elmer0_ext_intr(adapter_t *adapter);
+int t1_slow_intr_handler(adapter_t *adapter);
+
+int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
+const struct board_info *t1_get_board_info(unsigned int board_id);
+const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
                                                    unsigned short ssid);
-extern int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data);
-extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
+int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data);
+int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
                     struct adapter_params *p);
-extern int t1_init_hw_modules(adapter_t *adapter);
-extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
-extern void t1_free_sw_modules(adapter_t *adapter);
-extern void t1_fatal_err(adapter_t *adapter);
-extern void t1_link_changed(adapter_t *adapter, int port_id);
-extern void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
+int t1_init_hw_modules(adapter_t *adapter);
+int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
+void t1_free_sw_modules(adapter_t *adapter);
+void t1_fatal_err(adapter_t *adapter);
+void t1_link_changed(adapter_t *adapter, int port_id);
+void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
                            int speed, int duplex, int pause);
 #endif /* _CXGB_COMMON_H_ */
index d7048db9863d905a57b23b9bc985a4c3d27f9bc9..1d021059f097579b9306e458e2c8a5cd92a41948 100644 (file)
@@ -1168,7 +1168,6 @@ out_free_dev:
        pci_release_regions(pdev);
 out_disable_pdev:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        return err;
 }
 
@@ -1347,7 +1346,6 @@ static void remove_one(struct pci_dev *pdev)
 
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        t1_sw_reset(pdev);
 }
 
index 40c7b93ababc3d39d09e6f0ef9ba8e61af241955..eb33a31b08a01487dc17465ffbc7a4505030b9e2 100644 (file)
@@ -499,7 +499,7 @@ static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
 
 static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
 {
-       memcpy(mac_addr, cmac->instance->mac_addr, 6);
+       memcpy(mac_addr, cmac->instance->mac_addr, ETH_ALEN);
        return 0;
 }
 
@@ -526,7 +526,7 @@ static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
         */
 
        /* Store local copy */
-       memcpy(cmac->instance->mac_addr, ma, 6);
+       memcpy(cmac->instance->mac_addr, ma, ETH_ALEN);
 
        lo  = ((u32) ma[1] << 8) | (u32) ma[0];
        mid = ((u32) ma[3] << 8) | (u32) ma[2];
index b650951791dd46e81c35e972354ba133d48ee865..45d77334d7d9a6c1a053be81599edcbf8b74c3b7 100644 (file)
@@ -3374,7 +3374,6 @@ out_release_regions:
        pci_release_regions(pdev);
 out_disable_device:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 out:
        return err;
 }
@@ -3415,7 +3414,6 @@ static void remove_one(struct pci_dev *pdev)
                kfree(adapter);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
-               pci_set_drvdata(pdev, NULL);
        }
 }
 
index 6990f6c6522164a7e602903554f3c8e3ccde83fc..81029b872bdd432a0d95f00048fa8f61143ceaa7 100644 (file)
 #define V_BUSY(x) ((x) << S_BUSY)
 #define F_BUSY    V_BUSY(1U)
 
-#define S_BUSY    31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY    V_BUSY(1U)
-
 #define A_MC7_EXT_MODE1 0x108
 
 #define A_MC7_EXT_MODE2 0x10c
 
 #define A_MC7_CAL 0x128
 
-#define S_BUSY    31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY    V_BUSY(1U)
-
-#define S_BUSY    31
-#define V_BUSY(x) ((x) << S_BUSY)
-#define F_BUSY    V_BUSY(1U)
-
 #define S_CAL_FAULT    30
 #define V_CAL_FAULT(x) ((x) << S_CAL_FAULT)
 #define F_CAL_FAULT    V_CAL_FAULT(1U)
 #define V_OP(x) ((x) << S_OP)
 #define F_OP    V_OP(1U)
 
-#define F_OP    V_OP(1U)
-#define A_SF_OP 0x6dc
-
 #define A_MC7_BIST_ADDR_BEG 0x168
 
 #define A_MC7_BIST_ADDR_END 0x16c
 #define V_CONT(x) ((x) << S_CONT)
 #define F_CONT    V_CONT(1U)
 
-#define F_CONT    V_CONT(1U)
-
 #define A_MC7_INT_ENABLE 0x178
 
 #define S_AE    17
 #define V_NICMODE(x) ((x) << S_NICMODE)
 #define F_NICMODE    V_NICMODE(1U)
 
-#define F_NICMODE    V_NICMODE(1U)
-
 #define S_IPV6ENABLE    15
 #define V_IPV6ENABLE(x) ((x) << S_IPV6ENABLE)
 #define F_IPV6ENABLE    V_IPV6ENABLE(1U)
 
 #define A_ULPRX_STAG_ULIMIT 0x530
 
-#define A_ULPRX_RQ_LLIMIT 0x534
 #define A_ULPRX_RQ_LLIMIT 0x534
 
-#define A_ULPRX_RQ_ULIMIT 0x538
 #define A_ULPRX_RQ_ULIMIT 0x538
 
 #define A_ULPRX_PBL_LLIMIT 0x53c
 
-#define A_ULPRX_PBL_ULIMIT 0x540
 #define A_ULPRX_PBL_ULIMIT 0x540
 
 #define A_ULPRX_TDDP_TAGMASK 0x524
 
-#define A_ULPRX_RQ_LLIMIT 0x534
-#define A_ULPRX_RQ_LLIMIT 0x534
-
-#define A_ULPRX_RQ_ULIMIT 0x538
-#define A_ULPRX_RQ_ULIMIT 0x538
-
-#define A_ULPRX_PBL_ULIMIT 0x540
-#define A_ULPRX_PBL_ULIMIT 0x540
-
 #define A_ULPTX_CONFIG 0x580
 
 #define S_CFG_CQE_SOP_MASK    1
 #define V_TMMODE(x) ((x) << S_TMMODE)
 #define F_TMMODE    V_TMMODE(1U)
 
-#define F_TMMODE    V_TMMODE(1U)
-
 #define A_MC5_DB_ROUTING_TABLE_INDEX 0x70c
 
 #define A_MC5_DB_FILTER_TABLE 0x710
 #define V_TXACTENABLE(x) ((x) << S_TXACTENABLE)
 #define F_TXACTENABLE    V_TXACTENABLE(1U)
 
-#define A_XGM_SERDES_CTRL0 0x8e0
-
 #define S_RESET3    23
 #define V_RESET3(x) ((x) << S_RESET3)
 #define F_RESET3    V_RESET3(1U)
index dfd1e36f57531f0e9e9cfeed14048460d96f6e30..ecd2fb3ef69596146a7cf155323e624e397b6834 100644 (file)
@@ -48,7 +48,6 @@
 #include <linux/vmalloc.h>
 #include <asm/io.h>
 #include "cxgb4_uld.h"
-#include "t4_hw.h"
 
 #define FW_VERSION_MAJOR 1
 #define FW_VERSION_MINOR 4
index c73cabdbd4c08a22fd506eef8a219f02833fb4b3..8b929eeecd2d37cd3b41e32d57bd1fa50402a230 100644 (file)
@@ -3983,6 +3983,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
        struct net_device *event_dev;
        int ret = NOTIFY_DONE;
        struct bonding *bond = netdev_priv(ifa->idev->dev);
+       struct list_head *iter;
        struct slave *slave;
        struct pci_dev *first_pdev = NULL;
 
@@ -3995,7 +3996,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
                 * in all of them only once.
                 */
                read_lock(&bond->lock);
-               bond_for_each_slave(bond, slave) {
+               bond_for_each_slave(bond, slave, iter) {
                        if (!first_pdev) {
                                ret = clip_add(slave->dev, ifa, event);
                                /* If clip_add is success then only initialize
@@ -6074,7 +6075,6 @@ sriov:
        pci_disable_device(pdev);
  out_release_regions:
        pci_release_regions(pdev);
-       pci_set_drvdata(pdev, NULL);
        return err;
 }
 
@@ -6122,7 +6122,6 @@ static void remove_one(struct pci_dev *pdev)
                pci_disable_pcie_error_reporting(pdev);
                pci_disable_device(pdev);
                pci_release_regions(pdev);
-               pci_set_drvdata(pdev, NULL);
        } else
                pci_release_regions(pdev);
 }
index 40c22e7de15c2be52482b990354274bbc1fc3175..5f90ec5f7519a4ac6e7916396cff31446f5c614b 100644 (file)
@@ -2782,11 +2782,9 @@ err_unmap_bar:
 
 err_free_adapter:
        kfree(adapter);
-       pci_set_drvdata(pdev, NULL);
 
 err_release_regions:
        pci_release_regions(pdev);
-       pci_set_drvdata(pdev, NULL);
        pci_clear_master(pdev);
 
 err_disable_device:
@@ -2851,7 +2849,6 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
                }
                iounmap(adapter->regs);
                kfree(adapter);
-               pci_set_drvdata(pdev, NULL);
        }
 
        /*
@@ -2908,7 +2905,7 @@ static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
 #define CH_DEVICE(devid, idx) \
        { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
 
-static struct pci_device_id cxgb4vf_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(cxgb4vf_pci_tbl) = {
        CH_DEVICE(0xb000, 0),   /* PE10K FPGA */
        CH_DEVICE(0x4800, 0),   /* T440-dbg */
        CH_DEVICE(0x4801, 0),   /* T420-cr */
index df296af20bd52d1052a1c05200841d55dc4acde1..8475c4cda9e4ca72ef8a2b309788620e82fb67d3 100644 (file)
@@ -1396,8 +1396,9 @@ static inline void copy_frags(struct sk_buff *skb,
  *     Builds an sk_buff from the given packet gather list.  Returns the
  *     sk_buff or %NULL if sk_buff allocation failed.
  */
-struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
-                                 unsigned int skb_len, unsigned int pull_len)
+static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
+                                        unsigned int skb_len,
+                                        unsigned int pull_len)
 {
        struct sk_buff *skb;
 
@@ -1443,7 +1444,7 @@ out:
  *     Releases the pages of a packet gather list.  We do not own the last
  *     page on the list and do not free it.
  */
-void t4vf_pktgl_free(const struct pkt_gl *gl)
+static void t4vf_pktgl_free(const struct pkt_gl *gl)
 {
        int frag;
 
@@ -1640,7 +1641,7 @@ static inline void rspq_next(struct sge_rspq *rspq)
  *     on this queue.  If the system is under memory shortage use a fairly
  *     long delay to help recovery.
  */
-int process_responses(struct sge_rspq *rspq, int budget)
+static int process_responses(struct sge_rspq *rspq, int budget)
 {
        struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
        int budget_left = budget;
@@ -1893,7 +1894,7 @@ static unsigned int process_intrq(struct adapter *adapter)
  * The MSI interrupt handler handles data events from SGE response queues as
  * well as error and other async events as they all use the same MSI vector.
  */
-irqreturn_t t4vf_intr_msi(int irq, void *cookie)
+static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
 {
        struct adapter *adapter = cookie;
 
index 7b756cf9474a90497de9987591c57cabfcc09f2d..ff78dfaec5087184021b863174f84adfeea59476 100644 (file)
@@ -2309,7 +2309,6 @@ err_out_release_regions:
 err_out_disable_device:
        pci_disable_device(pdev);
 err_out_free_netdev:
-       pci_set_drvdata(pdev, NULL);
        free_netdev(netdev);
 
        return err;
@@ -2338,7 +2337,6 @@ static void enic_remove(struct pci_dev *pdev)
                enic_iounmap(enic);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
-               pci_set_drvdata(pdev, NULL);
                free_netdev(netdev);
        }
 }
index 5f5896e522d2c6068d7b6bc9c685cbc279be48d8..7080ad6c401409199b091021aeed398dda61062c 100644 (file)
@@ -158,18 +158,6 @@ static inline board_info_t *to_dm9000_board(struct net_device *dev)
 
 /* DM9000 network board routine ---------------------------- */
 
-static void
-dm9000_reset(board_info_t * db)
-{
-       dev_dbg(db->dev, "resetting device\n");
-
-       /* RESET device */
-       writeb(DM9000_NCR, db->io_addr);
-       udelay(200);
-       writeb(NCR_RST, db->io_data);
-       udelay(200);
-}
-
 /*
  *   Read a byte from I/O port
  */
@@ -191,6 +179,27 @@ iow(board_info_t * db, int reg, int value)
        writeb(value, db->io_data);
 }
 
+static void
+dm9000_reset(board_info_t *db)
+{
+       dev_dbg(db->dev, "resetting device\n");
+
+       /* Reset DM9000, see DM9000 Application Notes V1.22 Jun 11, 2004 page 29
+        * The essential point is that we have to do a double reset, and the
+        * instruction is to set LBK into MAC internal loopback mode.
+        */
+       iow(db, DM9000_NCR, 0x03);
+       udelay(100); /* Application note says at least 20 us */
+       if (ior(db, DM9000_NCR) & 1)
+               dev_err(db->dev, "dm9000 did not respond to first reset\n");
+
+       iow(db, DM9000_NCR, 0);
+       iow(db, DM9000_NCR, 0x03);
+       udelay(100);
+       if (ior(db, DM9000_NCR) & 1)
+               dev_err(db->dev, "dm9000 did not respond to second reset\n");
+}
+
 /* routines for sending block to chip */
 
 static void dm9000_outblk_8bit(void __iomem *reg, void *data, int count)
@@ -744,15 +753,20 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
 static void dm9000_show_carrier(board_info_t *db,
                                unsigned carrier, unsigned nsr)
 {
+       int lpa;
        struct net_device *ndev = db->ndev;
+       struct mii_if_info *mii = &db->mii;
        unsigned ncr = dm9000_read_locked(db, DM9000_NCR);
 
-       if (carrier)
-               dev_info(db->dev, "%s: link up, %dMbps, %s-duplex, no LPA\n",
+       if (carrier) {
+               lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA);
+               dev_info(db->dev,
+                        "%s: link up, %dMbps, %s-duplex, lpa 0x%04X\n",
                         ndev->name, (nsr & NSR_SPEED) ? 10 : 100,
-                        (ncr & NCR_FDX) ? "full" : "half");
-       else
+                        (ncr & NCR_FDX) ? "full" : "half", lpa);
+       } else {
                dev_info(db->dev, "%s: link down\n", ndev->name);
+       }
 }
 
 static void
@@ -890,9 +904,15 @@ dm9000_init_dm9000(struct net_device *dev)
                        (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
 
        iow(db, DM9000_GPCR, GPCR_GEP_CNTL);    /* Let GPIO0 output */
+       iow(db, DM9000_GPR, 0);
 
-       dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
-       dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */
+       /* If we are dealing with DM9000B, some extra steps are required: a
+        * manual phy reset, and setting init params.
+        */
+       if (db->type == TYPE_DM9000B) {
+               dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET);
+               dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM);
+       }
 
        ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
 
@@ -1603,7 +1623,7 @@ dm9000_probe(struct platform_device *pdev)
 
        if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
                mac_src = "platform data";
-               memcpy(ndev->dev_addr, pdata->dev_addr, 6);
+               memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
        }
 
        if (!is_valid_ether_addr(ndev->dev_addr)) {
index eaab73cf27caffbbabb2904c47be6fc7941f5303..38148b0e3a952a348977de0b2d459b9b4436eee6 100644 (file)
@@ -2110,7 +2110,6 @@ static void de_remove_one(struct pci_dev *pdev)
        iounmap(de->regs);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        free_netdev(dev);
 }
 
index 263b92c00cbfb34dbc3b1904cd2ddacafdeb48cc..c05b66dfcc30bc9c8e6f300f3d1c47300cd73808 100644 (file)
@@ -2328,7 +2328,7 @@ static void de4x5_pci_remove(struct pci_dev *pdev)
        pci_disable_device (pdev);
 }
 
-static struct pci_device_id de4x5_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(de4x5_pci_tbl) = {
         { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
         { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
index 83139307861cd720edfd78d71d7abbe6b2265ff5..5ad9e3e3c0b8a5095564ae4c4369c3ac531c9591 100644 (file)
@@ -523,7 +523,6 @@ err_out_res:
 err_out_disable:
        pci_disable_device(pdev);
 err_out_free:
-       pci_set_drvdata(pdev, NULL);
        free_netdev(dev);
 
        return err;
@@ -548,8 +547,6 @@ static void dmfe_remove_one(struct pci_dev *pdev)
                                        db->buf_pool_ptr, db->buf_pool_dma_ptr);
                pci_release_regions(pdev);
                free_netdev(dev);       /* free board information */
-
-               pci_set_drvdata(pdev, NULL);
        }
 
        DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
index 4e8cfa2ac803abb13c9c692ef971618cc26d664b..add05f14b38be17311f3344bd23860064fac8ce1 100644 (file)
@@ -1939,7 +1939,6 @@ static void tulip_remove_one(struct pci_dev *pdev)
        pci_iounmap(pdev, tp->base_addr);
        free_netdev (dev);
        pci_release_regions (pdev);
-       pci_set_drvdata (pdev, NULL);
 
        /* pci_power_off (pdev, -1); */
 }
index 93845afe1cea105f21751c4bea54100405799b42..a5397b130724faa9bb4fb9d54881226eb03695bd 100644 (file)
@@ -429,7 +429,6 @@ err_out_release:
 err_out_disable:
        pci_disable_device(pdev);
 err_out_free:
-       pci_set_drvdata(pdev, NULL);
        free_netdev(dev);
 
        return err;
@@ -450,7 +449,6 @@ static void uli526x_remove_one(struct pci_dev *pdev)
                                db->buf_pool_ptr, db->buf_pool_dma_ptr);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        free_netdev(dev);
 }
 
index c7b04ecf5b497ea26f969ce52eeaffccfc0ee5ef..62fe512bb2167fb682f63e26e2678e6a5df0cb6d 100644 (file)
@@ -468,7 +468,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
        return 0;
 
 err_out_cleardev:
-       pci_set_drvdata(pdev, NULL);
        pci_iounmap(pdev, ioaddr);
 err_out_free_res:
        pci_release_regions(pdev);
@@ -1542,8 +1541,6 @@ static void w840_remove1(struct pci_dev *pdev)
                pci_iounmap(pdev, np->base_addr);
                free_netdev(dev);
        }
-
-       pci_set_drvdata(pdev, NULL);
 }
 
 #ifdef CONFIG_PM
index 9b84cb04fe5fccded9f26fe010cf16ea6bb36c8a..ab7ebac6fbea0e6fe579b29d398b2c73fc369d4b 100644 (file)
@@ -289,7 +289,6 @@ out:
 err_unmap:
        pci_iounmap(pdev, private->ioaddr);
 reg_fail:
-       pci_set_drvdata(pdev, NULL);
        dma_free_coherent(d, 8192, private->tx_buffer, private->tx_dma_handle);
 tx_buf_fail:
        dma_free_coherent(d, 8192, private->rx_buffer, private->rx_dma_handle);
@@ -317,7 +316,6 @@ static void xircom_remove(struct pci_dev *pdev)
 
        unregister_netdev(dev);
        pci_iounmap(pdev, card->ioaddr);
-       pci_set_drvdata(pdev, NULL);
        dma_free_coherent(d, 8192, card->tx_buffer, card->tx_dma_handle);
        dma_free_coherent(d, 8192, card->rx_buffer, card->rx_dma_handle);
        free_netdev(dev);
index afa8e3af2c4d6840a9eee8c9c46d0e3f6df22a02..4fb756d219f700bfb82a37d62e9cb078fca22024 100644 (file)
@@ -1746,7 +1746,6 @@ rio_remove1 (struct pci_dev *pdev)
                pci_release_regions (pdev);
                pci_disable_device (pdev);
        }
-       pci_set_drvdata (pdev, NULL);
 }
 
 static struct pci_driver rio_driver = {
index bf3bf6f22c998b7c7ef2563be2041737a15959fe..113cd799a131f925fcbd3b5946d4f185249d376f 100644 (file)
@@ -703,7 +703,6 @@ err_out_unmap_tx:
        dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE,
                np->tx_ring, np->tx_ring_dma);
 err_out_cleardev:
-       pci_set_drvdata(pdev, NULL);
        pci_iounmap(pdev, ioaddr);
 err_out_res:
        pci_release_regions(pdev);
@@ -1941,7 +1940,6 @@ static void sundance_remove1(struct pci_dev *pdev)
            pci_iounmap(pdev, np->base);
            pci_release_regions(pdev);
            free_netdev(dev);
-           pci_set_drvdata(pdev, NULL);
        }
 }
 
index db020230bd0bba5ec72186bed9539357ec315abc..1bce77fdbd993240d0299901307f79aab7159da7 100644 (file)
@@ -34,7 +34,7 @@
 #include "be_hw.h"
 #include "be_roce.h"
 
-#define DRV_VER                        "4.9.134.0u"
+#define DRV_VER                        "4.9.224.0u"
 #define DRV_NAME               "be2net"
 #define BE_NAME                        "Emulex BladeEngine2"
 #define BE3_NAME               "Emulex BladeEngine3"
@@ -89,7 +89,7 @@ static inline char *nic_name(struct pci_dev *pdev)
 
 #define BE_NUM_VLANS_SUPPORTED 64
 #define BE_UMC_NUM_VLANS_SUPPORTED     15
-#define BE_MAX_EQD             96u
+#define BE_MAX_EQD             128u
 #define        BE_MAX_TX_FRAG_COUNT    30
 
 #define EVNT_Q_LEN             1024
@@ -201,6 +201,17 @@ struct be_eq_obj {
        struct be_adapter *adapter;
 } ____cacheline_aligned_in_smp;
 
+struct be_aic_obj {            /* Adaptive interrupt coalescing (AIC) info */
+       bool enable;
+       u32 min_eqd;            /* in usecs */
+       u32 max_eqd;            /* in usecs */
+       u32 prev_eqd;           /* in usecs */
+       u32 et_eqd;             /* configured val when aic is off */
+       ulong jiffies;
+       u64 rx_pkts_prev;       /* Used to calculate RX pps */
+       u64 tx_reqs_prev;       /* Used to calculate TX pps */
+};
+
 struct be_mcc_obj {
        struct be_queue_info q;
        struct be_queue_info cq;
@@ -215,6 +226,7 @@ struct be_tx_stats {
        u64 tx_compl;
        ulong tx_jiffies;
        u32 tx_stops;
+       u32 tx_drv_drops;       /* pkts dropped by driver */
        struct u64_stats_sync sync;
        struct u64_stats_sync sync_compl;
 };
@@ -239,15 +251,12 @@ struct be_rx_page_info {
 struct be_rx_stats {
        u64 rx_bytes;
        u64 rx_pkts;
-       u64 rx_pkts_prev;
-       ulong rx_jiffies;
        u32 rx_drops_no_skbs;   /* skb allocation errors */
        u32 rx_drops_no_frags;  /* HW has no fetched frags */
        u32 rx_post_fail;       /* page post alloc failures */
        u32 rx_compl;
        u32 rx_mcast_pkts;
        u32 rx_compl_err;       /* completions with err set */
-       u32 rx_pps;             /* pkts per second */
        struct u64_stats_sync sync;
 };
 
@@ -316,6 +325,11 @@ struct be_drv_stats {
        u32 rx_input_fifo_overflow_drop;
        u32 pmem_fifo_overflow_drop;
        u32 jabber_events;
+       u32 rx_roce_bytes_lsd;
+       u32 rx_roce_bytes_msd;
+       u32 rx_roce_frames;
+       u32 roce_drops_payload_len;
+       u32 roce_drops_crc;
 };
 
 struct be_vf_cfg {
@@ -405,6 +419,7 @@ struct be_adapter {
        u32 big_page_size;      /* Compounded page size shared by rx wrbs */
 
        struct be_drv_stats drv_stats;
+       struct be_aic_obj aic_obj[MAX_EVT_QS];
        u16 vlans_added;
        u8 vlan_tag[VLAN_N_VID];
        u8 vlan_prio_bmap;      /* Available Priority BitMap */
@@ -472,8 +487,8 @@ struct be_adapter {
 
 #define be_physfn(adapter)             (!adapter->virtfn)
 #define        sriov_enabled(adapter)          (adapter->num_vfs > 0)
-#define sriov_want(adapter)             (be_max_vfs(adapter) && num_vfs && \
-                                        be_physfn(adapter))
+#define sriov_want(adapter)             (be_physfn(adapter) && \
+                                        (num_vfs || pci_num_vf(adapter->pdev)))
 #define for_all_vfs(adapter, vf_cfg, i)                                        \
        for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
                i++, vf_cfg++)
@@ -696,27 +711,27 @@ static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
        return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
 }
 
-extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
-               u16 num_popped);
-extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
-extern void be_parse_stats(struct be_adapter *adapter);
-extern int be_load_fw(struct be_adapter *adapter, u8 *func);
-extern bool be_is_wol_supported(struct be_adapter *adapter);
-extern bool be_pause_supported(struct be_adapter *adapter);
-extern u32 be_get_fw_log_level(struct be_adapter *adapter);
+void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
+                 u16 num_popped);
+void be_link_status_update(struct be_adapter *adapter, u8 link_status);
+void be_parse_stats(struct be_adapter *adapter);
+int be_load_fw(struct be_adapter *adapter, u8 *func);
+bool be_is_wol_supported(struct be_adapter *adapter);
+bool be_pause_supported(struct be_adapter *adapter);
+u32 be_get_fw_log_level(struct be_adapter *adapter);
 int be_update_queues(struct be_adapter *adapter);
 int be_poll(struct napi_struct *napi, int budget);
 
 /*
  * internal function to initialize-cleanup roce device.
  */
-extern void be_roce_dev_add(struct be_adapter *);
-extern void be_roce_dev_remove(struct be_adapter *);
+void be_roce_dev_add(struct be_adapter *);
+void be_roce_dev_remove(struct be_adapter *);
 
 /*
  * internal function to open-close roce device during ifup-ifdown.
  */
-extern void be_roce_dev_open(struct be_adapter *);
-extern void be_roce_dev_close(struct be_adapter *);
+void be_roce_dev_open(struct be_adapter *);
+void be_roce_dev_close(struct be_adapter *);
 
 #endif                         /* BE_H */
index bd0e0c0bbcd8e0e21d5295ef92aea6fb769d2c69..2d554366b3429547687a861fcaaa8df99e6529fe 100644 (file)
@@ -1198,7 +1198,6 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
 
        if (lancer_chip(adapter)) {
                req->hdr.version = 1;
-               req->if_id = cpu_to_le16(adapter->if_handle);
        } else if (BEx_chip(adapter)) {
                if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
                        req->hdr.version = 2;
@@ -1206,6 +1205,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
                req->hdr.version = 2;
        }
 
+       if (req->hdr.version > 0)
+               req->if_id = cpu_to_le16(adapter->if_handle);
        req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
        req->ulp_num = BE_ULP1_NUM;
        req->type = BE_ETH_TX_RING_TYPE_STANDARD;
@@ -1435,8 +1436,12 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
                OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
 
        /* version 1 of the cmd is not supported only by BE2 */
-       if (!BE2_chip(adapter))
+       if (BE2_chip(adapter))
+               hdr->version = 0;
+       if (BE3_chip(adapter) || lancer_chip(adapter))
                hdr->version = 1;
+       else
+               hdr->version = 2;
 
        be_mcc_notify(adapter);
        adapter->stats_cmd_sent = true;
@@ -1718,11 +1723,12 @@ err:
 /* set the EQ delay interval of an EQ to specified value
  * Uses async mcc
  */
-int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
+int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
+                     int num)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_modify_eq_delay *req;
-       int status = 0;
+       int status = 0, i;
 
        spin_lock_bh(&adapter->mcc_lock);
 
@@ -1736,13 +1742,15 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
 
-       req->num_eq = cpu_to_le32(1);
-       req->delay[0].eq_id = cpu_to_le32(eq_id);
-       req->delay[0].phase = 0;
-       req->delay[0].delay_multiplier = cpu_to_le32(eqd);
+       req->num_eq = cpu_to_le32(num);
+       for (i = 0; i < num; i++) {
+               req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
+               req->set_eqd[i].phase = 0;
+               req->set_eqd[i].delay_multiplier =
+                               cpu_to_le32(set_eqd[i].delay_multiplier);
+       }
 
        be_mcc_notify(adapter);
-
 err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
@@ -3519,7 +3527,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
        struct be_cmd_enable_disable_vf *req;
        int status;
 
-       if (!lancer_chip(adapter))
+       if (BEx_chip(adapter))
                return 0;
 
        spin_lock_bh(&adapter->mcc_lock);
index 108ca8abf0af2321a6f5db5fe289af658b0bfcdf..88708372d5e5e1d922e3690e71fdc1bce3686e78 100644 (file)
@@ -1057,14 +1057,16 @@ struct be_cmd_resp_get_flow_control {
 } __packed;
 
 /******************** Modify EQ Delay *******************/
+struct be_set_eqd {
+       u32 eq_id;
+       u32 phase;
+       u32 delay_multiplier;
+};
+
 struct be_cmd_req_modify_eq_delay {
        struct be_cmd_req_hdr hdr;
        u32 num_eq;
-       struct {
-               u32 eq_id;
-               u32 phase;
-               u32 delay_multiplier;
-       } delay[8];
+       struct be_set_eqd set_eqd[MAX_EVT_QS];
 } __packed;
 
 struct be_cmd_resp_modify_eq_delay {
@@ -1660,6 +1662,67 @@ struct be_erx_stats_v1 {
        u32 rsvd[4];
 };
 
+struct be_port_rxf_stats_v2 {
+       u32 rsvd0[10];
+       u32 roce_bytes_received_lsd;
+       u32 roce_bytes_received_msd;
+       u32 rsvd1[5];
+       u32 roce_frames_received;
+       u32 rx_crc_errors;
+       u32 rx_alignment_symbol_errors;
+       u32 rx_pause_frames;
+       u32 rx_priority_pause_frames;
+       u32 rx_control_frames;
+       u32 rx_in_range_errors;
+       u32 rx_out_range_errors;
+       u32 rx_frame_too_long;
+       u32 rx_address_filtered;
+       u32 rx_dropped_too_small;
+       u32 rx_dropped_too_short;
+       u32 rx_dropped_header_too_small;
+       u32 rx_dropped_tcp_length;
+       u32 rx_dropped_runt;
+       u32 rsvd2[10];
+       u32 rx_ip_checksum_errs;
+       u32 rx_tcp_checksum_errs;
+       u32 rx_udp_checksum_errs;
+       u32 rsvd3[7];
+       u32 rx_switched_unicast_packets;
+       u32 rx_switched_multicast_packets;
+       u32 rx_switched_broadcast_packets;
+       u32 rsvd4[3];
+       u32 tx_pauseframes;
+       u32 tx_priority_pauseframes;
+       u32 tx_controlframes;
+       u32 rsvd5[10];
+       u32 rxpp_fifo_overflow_drop;
+       u32 rx_input_fifo_overflow_drop;
+       u32 pmem_fifo_overflow_drop;
+       u32 jabber_events;
+       u32 rsvd6[3];
+       u32 rx_drops_payload_size;
+       u32 rx_drops_clipped_header;
+       u32 rx_drops_crc;
+       u32 roce_drops_payload_len;
+       u32 roce_drops_crc;
+       u32 rsvd7[19];
+};
+
+struct be_rxf_stats_v2 {
+       struct be_port_rxf_stats_v2 port[4];
+       u32 rsvd0[2];
+       u32 rx_drops_no_pbuf;
+       u32 rx_drops_no_txpb;
+       u32 rx_drops_no_erx_descr;
+       u32 rx_drops_no_tpre_descr;
+       u32 rsvd1[6];
+       u32 rx_drops_too_many_frags;
+       u32 rx_drops_invalid_ring;
+       u32 forwarded_packets;
+       u32 rx_drops_mtu;
+       u32 rsvd2[35];
+};
+
 struct be_hw_stats_v1 {
        struct be_rxf_stats_v1 rxf;
        u32 rsvd0[BE_TXP_SW_SZ];
@@ -1678,6 +1741,29 @@ struct be_cmd_resp_get_stats_v1 {
        struct be_hw_stats_v1 hw_stats;
 };
 
+struct be_erx_stats_v2 {
+       u32 rx_drops_no_fragments[136];     /* dwordS 0 to 135*/
+       u32 rsvd[3];
+};
+
+struct be_hw_stats_v2 {
+       struct be_rxf_stats_v2 rxf;
+       u32 rsvd0[BE_TXP_SW_SZ];
+       struct be_erx_stats_v2 erx;
+       struct be_pmem_stats pmem;
+       u32 rsvd1[18];
+};
+
+struct be_cmd_req_get_stats_v2 {
+       struct be_cmd_req_hdr hdr;
+       u8 rsvd[sizeof(struct be_hw_stats_v2)];
+};
+
+struct be_cmd_resp_get_stats_v2 {
+       struct be_cmd_resp_hdr hdr;
+       struct be_hw_stats_v2 hw_stats;
+};
+
 /************** get fat capabilites *******************/
 #define MAX_MODULES 27
 #define MAX_MODES 4
@@ -1865,137 +1951,120 @@ struct be_cmd_resp_get_iface_list {
        struct be_if_desc if_desc;
 };
 
-extern int be_pci_fnum_get(struct be_adapter *adapter);
-extern int be_fw_wait_ready(struct be_adapter *adapter);
-extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
-                                bool permanent, u32 if_handle, u32 pmac_id);
-extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
-                       u32 if_id, u32 *pmac_id, u32 domain);
-extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
-                       int pmac_id, u32 domain);
-extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
-                           u32 en_flags, u32 *if_handle, u32 domain);
-extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
-                       u32 domain);
-extern int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo);
-extern int be_cmd_cq_create(struct be_adapter *adapter,
-                       struct be_queue_info *cq, struct be_queue_info *eq,
-                       bool no_delay, int num_cqe_dma_coalesce);
-extern int be_cmd_mccq_create(struct be_adapter *adapter,
-                       struct be_queue_info *mccq,
-                       struct be_queue_info *cq);
-extern int be_cmd_txq_create(struct be_adapter *adapter,
-                       struct be_tx_obj *txo);
-extern int be_cmd_rxq_create(struct be_adapter *adapter,
-                       struct be_queue_info *rxq, u16 cq_id,
-                       u16 frag_size, u32 if_id, u32 rss, u8 *rss_id);
-extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
-                       int type);
-extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
-                       struct be_queue_info *q);
-extern int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
-                                   u8 *link_status, u32 dom);
-extern int be_cmd_reset(struct be_adapter *adapter);
-extern int be_cmd_get_stats(struct be_adapter *adapter,
-                       struct be_dma_mem *nonemb_cmd);
-extern int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
-                       struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
-               char *fw_on_flash);
-
-extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
-extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
-                       u16 *vtag_array, u32 num, bool untagged,
-                       bool promiscuous);
-extern int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
-extern int be_cmd_set_flow_control(struct be_adapter *adapter,
-                       u32 tx_fc, u32 rx_fc);
-extern int be_cmd_get_flow_control(struct be_adapter *adapter,
-                       u32 *tx_fc, u32 *rx_fc);
-extern int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
+int be_pci_fnum_get(struct be_adapter *adapter);
+int be_fw_wait_ready(struct be_adapter *adapter);
+int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
+                         bool permanent, u32 if_handle, u32 pmac_id);
+int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id,
+                   u32 *pmac_id, u32 domain);
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id,
+                   u32 domain);
+int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
+                    u32 *if_handle, u32 domain);
+int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle, u32 domain);
+int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo);
+int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
+                    struct be_queue_info *eq, bool no_delay,
+                    int num_cqe_dma_coalesce);
+int be_cmd_mccq_create(struct be_adapter *adapter, struct be_queue_info *mccq,
+                      struct be_queue_info *cq);
+int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo);
+int be_cmd_rxq_create(struct be_adapter *adapter, struct be_queue_info *rxq,
+                     u16 cq_id, u16 frag_size, u32 if_id, u32 rss, u8 *rss_id);
+int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
+                    int type);
+int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q);
+int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
+                            u8 *link_status, u32 dom);
+int be_cmd_reset(struct be_adapter *adapter);
+int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd);
+int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
+                              struct be_dma_mem *nonemb_cmd);
+int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
+                     char *fw_on_flash);
+int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
+int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
+                      u32 num, bool untagged, bool promiscuous);
+int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
+int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
+int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
+int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
                        u32 *function_mode, u32 *function_caps, u16 *asic_rev);
-extern int be_cmd_reset_function(struct be_adapter *adapter);
-extern int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
-                            u32 rss_hash_opts, u16 table_size);
-extern int be_process_mcc(struct be_adapter *adapter);
-extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
-                       u8 port_num, u8 beacon, u8 status, u8 state);
-extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
-                       u8 port_num, u32 *state);
-extern int be_cmd_write_flashrom(struct be_adapter *adapter,
-                       struct be_dma_mem *cmd, u32 flash_oper,
-                       u32 flash_opcode, u32 buf_size);
-extern int lancer_cmd_write_object(struct be_adapter *adapter,
-                                  struct be_dma_mem *cmd,
-                                  u32 data_size, u32 data_offset,
-                                  const char *obj_name,
-                                  u32 *data_written, u8 *change_status,
-                                  u8 *addn_status);
+int be_cmd_reset_function(struct be_adapter *adapter);
+int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
+                     u32 rss_hash_opts, u16 table_size);
+int be_process_mcc(struct be_adapter *adapter);
+int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
+                           u8 status, u8 state);
+int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num,
+                           u32 *state);
+int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
+                         u32 flash_oper, u32 flash_opcode, u32 buf_size);
+int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+                           u32 data_size, u32 data_offset,
+                           const char *obj_name, u32 *data_written,
+                           u8 *change_status, u8 *addn_status);
 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
-               u32 data_size, u32 data_offset, const char *obj_name,
-               u32 *data_read, u32 *eof, u8 *addn_status);
+                          u32 data_size, u32 data_offset, const char *obj_name,
+                          u32 *data_read, u32 *eof, u8 *addn_status);
 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
-                               int offset);
-extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
-                               struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_fw_init(struct be_adapter *adapter);
-extern int be_cmd_fw_clean(struct be_adapter *adapter);
-extern void be_async_mcc_enable(struct be_adapter *adapter);
-extern void be_async_mcc_disable(struct be_adapter *adapter);
-extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
-                               u32 loopback_type, u32 pkt_size,
-                               u32 num_pkts, u64 pattern);
-extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
-                       u32 byte_cnt, struct be_dma_mem *cmd);
-extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
-                               struct be_dma_mem *nonemb_cmd);
-extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
-                               u8 loopback_type, u8 enable);
-extern int be_cmd_get_phy_info(struct be_adapter *adapter);
-extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
-extern void be_detect_error(struct be_adapter *adapter);
-extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
-extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
-extern int be_cmd_req_native_mode(struct be_adapter *adapter);
-extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
-extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
-extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
-                                   u32 *privilege, u32 domain);
-extern int be_cmd_set_fn_privileges(struct be_adapter *adapter,
-                                   u32 privileges, u32 vf_num);
-extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
-                                   bool *pmac_id_active, u32 *pmac_id,
-                                   u8 domain);
-extern int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id,
-                                u8 *mac);
-extern int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
-extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
-                                               u8 mac_count, u32 domain);
-extern int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id,
-                         u32 dom);
-extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
-                                u32 domain, u16 intf_id, u16 hsw_mode);
-extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
-                                u32 domain, u16 intf_id, u8 *mode);
-extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
-extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
-                                         struct be_dma_mem *cmd);
-extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
-                                         struct be_dma_mem *cmd,
-                                         struct be_fat_conf_params *cfgs);
-extern int lancer_wait_ready(struct be_adapter *adapter);
-extern int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
-extern int lancer_initiate_dump(struct be_adapter *adapter);
-extern bool dump_present(struct be_adapter *adapter);
-extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
-extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
+                        int offset);
+int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
+                           struct be_dma_mem *nonemb_cmd);
+int be_cmd_fw_init(struct be_adapter *adapter);
+int be_cmd_fw_clean(struct be_adapter *adapter);
+void be_async_mcc_enable(struct be_adapter *adapter);
+void be_async_mcc_disable(struct be_adapter *adapter);
+int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
+                        u32 loopback_type, u32 pkt_size, u32 num_pkts,
+                        u64 pattern);
+int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, u32 byte_cnt,
+                       struct be_dma_mem *cmd);
+int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+                           struct be_dma_mem *nonemb_cmd);
+int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+                       u8 loopback_type, u8 enable);
+int be_cmd_get_phy_info(struct be_adapter *adapter);
+int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
+void be_detect_error(struct be_adapter *adapter);
+int be_cmd_get_die_temperature(struct be_adapter *adapter);
+int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
+int be_cmd_req_native_mode(struct be_adapter *adapter);
+int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
+void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
+int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
+                            u32 domain);
+int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
+                            u32 vf_num);
+int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
+                            bool *pmac_id_active, u32 *pmac_id, u8 domain);
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id, u8 *mac);
+int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
+int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count,
+                       u32 domain);
+int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom);
+int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain,
+                         u16 intf_id, u16 hsw_mode);
+int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain,
+                         u16 intf_id, u8 *mode);
+int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
+int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
+                                  struct be_dma_mem *cmd);
+int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
+                                  struct be_dma_mem *cmd,
+                                  struct be_fat_conf_params *cfgs);
+int lancer_wait_ready(struct be_adapter *adapter);
+int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
+int lancer_initiate_dump(struct be_adapter *adapter);
+bool dump_present(struct be_adapter *adapter);
+int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
+int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
 int be_cmd_get_func_config(struct be_adapter *adapter,
                           struct be_resources *res);
 int be_cmd_get_profile_config(struct be_adapter *adapter,
                              struct be_resources *res, u8 domain);
-extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
-                                    u8 domain);
-extern int be_cmd_get_if_id(struct be_adapter *adapter,
-                           struct be_vf_cfg *vf_cfg, int vf_num);
-extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
-extern int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
+int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps, u8 domain);
+int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
+                    int vf_num);
+int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
+int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
index b440a1fac77b2883b3417eeab76861cc02ad42b9..08330034d9efab6f303025e95a17775caf657f50 100644 (file)
@@ -116,7 +116,12 @@ static const struct be_ethtool_stat et_stats[] = {
        {DRVSTAT_INFO(rx_drops_mtu)},
        /* Number of packets dropped due to random early drop function */
        {DRVSTAT_INFO(eth_red_drops)},
-       {DRVSTAT_INFO(be_on_die_temperature)}
+       {DRVSTAT_INFO(be_on_die_temperature)},
+       {DRVSTAT_INFO(rx_roce_bytes_lsd)},
+       {DRVSTAT_INFO(rx_roce_bytes_msd)},
+       {DRVSTAT_INFO(rx_roce_frames)},
+       {DRVSTAT_INFO(roce_drops_payload_len)},
+       {DRVSTAT_INFO(roce_drops_crc)}
 };
 #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats)
 
@@ -155,7 +160,9 @@ static const struct be_ethtool_stat et_tx_stats[] = {
        /* Number of times the TX queue was stopped due to lack
         * of spaces in the TXQ.
         */
-       {DRVSTAT_TX_INFO(tx_stops)}
+       {DRVSTAT_TX_INFO(tx_stops)},
+       /* Pkts dropped in the driver's transmit path */
+       {DRVSTAT_TX_INFO(tx_drv_drops)}
 };
 #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats))
 
@@ -290,19 +297,19 @@ static int be_get_coalesce(struct net_device *netdev,
                           struct ethtool_coalesce *et)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
-       struct be_eq_obj *eqo = &adapter->eq_obj[0];
+       struct be_aic_obj *aic = &adapter->aic_obj[0];
 
 
-       et->rx_coalesce_usecs = eqo->cur_eqd;
-       et->rx_coalesce_usecs_high = eqo->max_eqd;
-       et->rx_coalesce_usecs_low = eqo->min_eqd;
+       et->rx_coalesce_usecs = aic->prev_eqd;
+       et->rx_coalesce_usecs_high = aic->max_eqd;
+       et->rx_coalesce_usecs_low = aic->min_eqd;
 
-       et->tx_coalesce_usecs = eqo->cur_eqd;
-       et->tx_coalesce_usecs_high = eqo->max_eqd;
-       et->tx_coalesce_usecs_low = eqo->min_eqd;
+       et->tx_coalesce_usecs = aic->prev_eqd;
+       et->tx_coalesce_usecs_high = aic->max_eqd;
+       et->tx_coalesce_usecs_low = aic->min_eqd;
 
-       et->use_adaptive_rx_coalesce = eqo->enable_aic;
-       et->use_adaptive_tx_coalesce = eqo->enable_aic;
+       et->use_adaptive_rx_coalesce = aic->enable;
+       et->use_adaptive_tx_coalesce = aic->enable;
 
        return 0;
 }
@@ -314,14 +321,17 @@ static int be_set_coalesce(struct net_device *netdev,
                           struct ethtool_coalesce *et)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
+       struct be_aic_obj *aic = &adapter->aic_obj[0];
        struct be_eq_obj *eqo;
        int i;
 
        for_all_evt_queues(adapter, eqo, i) {
-               eqo->enable_aic = et->use_adaptive_rx_coalesce;
-               eqo->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
-               eqo->min_eqd = min(et->rx_coalesce_usecs_low, eqo->max_eqd);
-               eqo->eqd = et->rx_coalesce_usecs;
+               aic->enable = et->use_adaptive_rx_coalesce;
+               aic->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD);
+               aic->min_eqd = min(et->rx_coalesce_usecs_low, aic->max_eqd);
+               aic->et_eqd = min(et->rx_coalesce_usecs, aic->max_eqd);
+               aic->et_eqd = max(aic->et_eqd, aic->min_eqd);
+               aic++;
        }
 
        return 0;
index 2c38cc402119c763021ea77461455fd0fa8ac035..77b4a8ae87a6ca64684d7103ee53a08446e060bc 100644 (file)
@@ -306,9 +306,13 @@ static void *hw_stats_from_cmd(struct be_adapter *adapter)
                struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
 
                return &cmd->hw_stats;
-       } else  {
+       } else if (BE3_chip(adapter)) {
                struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
 
+               return &cmd->hw_stats;
+       } else {
+               struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
+
                return &cmd->hw_stats;
        }
 }
@@ -320,9 +324,13 @@ static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
                struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
 
                return &hw_stats->erx;
-       } else {
+       } else if (BE3_chip(adapter)) {
                struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
 
+               return &hw_stats->erx;
+       } else {
+               struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
+
                return &hw_stats->erx;
        }
 }
@@ -422,6 +430,60 @@ static void populate_be_v1_stats(struct be_adapter *adapter)
        adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
 }
 
+static void populate_be_v2_stats(struct be_adapter *adapter)
+{
+       struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
+       struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
+       struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
+       struct be_port_rxf_stats_v2 *port_stats =
+                                       &rxf_stats->port[adapter->port_num];
+       struct be_drv_stats *drvs = &adapter->drv_stats;
+
+       be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+       drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
+       drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
+       drvs->rx_pause_frames = port_stats->rx_pause_frames;
+       drvs->rx_crc_errors = port_stats->rx_crc_errors;
+       drvs->rx_control_frames = port_stats->rx_control_frames;
+       drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
+       drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
+       drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
+       drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
+       drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
+       drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
+       drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
+       drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
+       drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
+       drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
+       drvs->rx_dropped_header_too_small =
+               port_stats->rx_dropped_header_too_small;
+       drvs->rx_input_fifo_overflow_drop =
+               port_stats->rx_input_fifo_overflow_drop;
+       drvs->rx_address_filtered = port_stats->rx_address_filtered;
+       drvs->rx_alignment_symbol_errors =
+               port_stats->rx_alignment_symbol_errors;
+       drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
+       drvs->tx_pauseframes = port_stats->tx_pauseframes;
+       drvs->tx_controlframes = port_stats->tx_controlframes;
+       drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
+       drvs->jabber_events = port_stats->jabber_events;
+       drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
+       drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
+       drvs->forwarded_packets = rxf_stats->forwarded_packets;
+       drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
+       drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
+       drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
+       adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
+       if (be_roce_supported(adapter))  {
+               drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
+               drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
+               drvs->rx_roce_frames = port_stats->roce_frames_received;
+               drvs->roce_drops_crc = port_stats->roce_drops_crc;
+               drvs->roce_drops_payload_len =
+                       port_stats->roce_drops_payload_len;
+       }
+}
+
 static void populate_lancer_stats(struct be_adapter *adapter)
 {
 
@@ -489,7 +551,7 @@ static void populate_erx_stats(struct be_adapter *adapter,
 
 void be_parse_stats(struct be_adapter *adapter)
 {
-       struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
+       struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
        struct be_rx_obj *rxo;
        int i;
        u32 erx_stat;
@@ -499,11 +561,13 @@ void be_parse_stats(struct be_adapter *adapter)
        } else {
                if (BE2_chip(adapter))
                        populate_be_v0_stats(adapter);
-               else
-                       /* for BE3 and Skyhawk */
+               else if (BE3_chip(adapter))
+                       /* for BE3 */
                        populate_be_v1_stats(adapter);
+               else
+                       populate_be_v2_stats(adapter);
 
-               /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
+               /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
                for_all_rx_queues(adapter, rxo, i) {
                        erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
                        populate_erx_stats(adapter, rxo, erx_stat);
@@ -935,8 +999,10 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
        u32 start = txq->head;
 
        skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
-       if (!skb)
+       if (!skb) {
+               tx_stats(txo)->tx_drv_drops++;
                return NETDEV_TX_OK;
+       }
 
        wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
 
@@ -965,6 +1031,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
                be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
        } else {
                txq->head = start;
+               tx_stats(txo)->tx_drv_drops++;
                dev_kfree_skb_any(skb);
        }
        return NETDEV_TX_OK;
@@ -1275,53 +1342,79 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
        return status;
 }
 
-static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
+static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
+                         ulong now)
 {
-       struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
-       ulong now = jiffies;
-       ulong delta = now - stats->rx_jiffies;
-       u64 pkts;
-       unsigned int start, eqd;
+       aic->rx_pkts_prev = rx_pkts;
+       aic->tx_reqs_prev = tx_pkts;
+       aic->jiffies = now;
+}
 
-       if (!eqo->enable_aic) {
-               eqd = eqo->eqd;
-               goto modify_eqd;
-       }
+static void be_eqd_update(struct be_adapter *adapter)
+{
+       struct be_set_eqd set_eqd[MAX_EVT_QS];
+       int eqd, i, num = 0, start;
+       struct be_aic_obj *aic;
+       struct be_eq_obj *eqo;
+       struct be_rx_obj *rxo;
+       struct be_tx_obj *txo;
+       u64 rx_pkts, tx_pkts;
+       ulong now;
+       u32 pps, delta;
 
-       if (eqo->idx >= adapter->num_rx_qs)
-               return;
+       for_all_evt_queues(adapter, eqo, i) {
+               aic = &adapter->aic_obj[eqo->idx];
+               if (!aic->enable) {
+                       if (aic->jiffies)
+                               aic->jiffies = 0;
+                       eqd = aic->et_eqd;
+                       goto modify_eqd;
+               }
 
-       stats = rx_stats(&adapter->rx_obj[eqo->idx]);
+               rxo = &adapter->rx_obj[eqo->idx];
+               do {
+                       start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
+                       rx_pkts = rxo->stats.rx_pkts;
+               } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
 
-       /* Wrapped around */
-       if (time_before(now, stats->rx_jiffies)) {
-               stats->rx_jiffies = now;
-               return;
-       }
+               txo = &adapter->tx_obj[eqo->idx];
+               do {
+                       start = u64_stats_fetch_begin_bh(&txo->stats.sync);
+                       tx_pkts = txo->stats.tx_reqs;
+               } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
 
-       /* Update once a second */
-       if (delta < HZ)
-               return;
 
-       do {
-               start = u64_stats_fetch_begin_bh(&stats->sync);
-               pkts = stats->rx_pkts;
-       } while (u64_stats_fetch_retry_bh(&stats->sync, start));
-
-       stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
-       stats->rx_pkts_prev = pkts;
-       stats->rx_jiffies = now;
-       eqd = (stats->rx_pps / 110000) << 3;
-       eqd = min(eqd, eqo->max_eqd);
-       eqd = max(eqd, eqo->min_eqd);
-       if (eqd < 10)
-               eqd = 0;
+               /* Skip, if wrapped around or first calculation */
+               now = jiffies;
+               if (!aic->jiffies || time_before(now, aic->jiffies) ||
+                   rx_pkts < aic->rx_pkts_prev ||
+                   tx_pkts < aic->tx_reqs_prev) {
+                       be_aic_update(aic, rx_pkts, tx_pkts, now);
+                       continue;
+               }
+
+               delta = jiffies_to_msecs(now - aic->jiffies);
+               pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
+                       (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
+               eqd = (pps / 15000) << 2;
+
+               if (eqd < 8)
+                       eqd = 0;
+               eqd = min_t(u32, eqd, aic->max_eqd);
+               eqd = max_t(u32, eqd, aic->min_eqd);
 
+               be_aic_update(aic, rx_pkts, tx_pkts, now);
 modify_eqd:
-       if (eqd != eqo->cur_eqd) {
-               be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
-               eqo->cur_eqd = eqd;
+               if (eqd != aic->prev_eqd) {
+                       set_eqd[num].delay_multiplier = (eqd * 65)/100;
+                       set_eqd[num].eq_id = eqo->q.id;
+                       aic->prev_eqd = eqd;
+                       num++;
+               }
        }
+
+       if (num)
+               be_cmd_modify_eqd(adapter, set_eqd, num);
 }
 
 static void be_rx_stats_update(struct be_rx_obj *rxo,
@@ -1938,6 +2031,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
 {
        struct be_queue_info *eq;
        struct be_eq_obj *eqo;
+       struct be_aic_obj *aic;
        int i, rc;
 
        adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
@@ -1946,11 +2040,12 @@ static int be_evt_queues_create(struct be_adapter *adapter)
        for_all_evt_queues(adapter, eqo, i) {
                netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
                               BE_NAPI_WEIGHT);
+               aic = &adapter->aic_obj[i];
                eqo->adapter = adapter;
                eqo->tx_budget = BE_TX_BUDGET;
                eqo->idx = i;
-               eqo->max_eqd = BE_MAX_EQD;
-               eqo->enable_aic = true;
+               aic->max_eqd = BE_MAX_EQD;
+               aic->enable = true;
 
                eq = &eqo->q;
                rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
@@ -2937,7 +3032,8 @@ static int be_vf_setup(struct be_adapter *adapter)
                        goto err;
                vf_cfg->def_vid = def_vlan;
 
-               be_cmd_enable_vf(adapter, vf + 1);
+               if (!old_vfs)
+                       be_cmd_enable_vf(adapter, vf + 1);
        }
 
        if (!old_vfs) {
@@ -2962,12 +3058,12 @@ static void BEx_get_resources(struct be_adapter *adapter,
        struct pci_dev *pdev = adapter->pdev;
        bool use_sriov = false;
 
-       if (BE3_chip(adapter) && be_physfn(adapter)) {
+       if (BE3_chip(adapter) && sriov_want(adapter)) {
                int max_vfs;
 
                max_vfs = pci_sriov_get_totalvfs(pdev);
                res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
-               use_sriov = res->max_vfs && num_vfs;
+               use_sriov = res->max_vfs;
        }
 
        if (be_physfn(adapter))
@@ -2983,8 +3079,9 @@ static void BEx_get_resources(struct be_adapter *adapter,
                res->max_vlans = BE_NUM_VLANS_SUPPORTED;
        res->max_mcast_mac = BE_MAX_MC;
 
+       /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
        if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
-           !be_physfn(adapter))
+           !be_physfn(adapter) || (adapter->port_num > 1))
                res->max_tx_qs = 1;
        else
                res->max_tx_qs = BE3_MAX_TX_QS;
@@ -3026,14 +3123,6 @@ static int be_get_resources(struct be_adapter *adapter)
                adapter->res = res;
        }
 
-       /* For BE3 only check if FW suggests a different max-txqs value */
-       if (BE3_chip(adapter)) {
-               status = be_cmd_get_profile_config(adapter, &res, 0);
-               if (!status && res.max_tx_qs)
-                       adapter->res.max_tx_qs =
-                               min(adapter->res.max_tx_qs, res.max_tx_qs);
-       }
-
        /* For Lancer, SH etc read per-function resource limits from FW.
         * GET_FUNC_CONFIG returns per function guaranteed limits.
         * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
@@ -3258,7 +3347,7 @@ static int be_setup(struct be_adapter *adapter)
                be_cmd_set_flow_control(adapter, adapter->tx_fc,
                                        adapter->rx_fc);
 
-       if (be_physfn(adapter) && num_vfs) {
+       if (sriov_want(adapter)) {
                if (be_max_vfs(adapter))
                        be_vf_setup(adapter);
                else
@@ -4077,9 +4166,11 @@ static int be_stats_init(struct be_adapter *adapter)
                cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
        else if (BE2_chip(adapter))
                cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
-       else
-               /* BE3 and Skyhawk */
+       else if (BE3_chip(adapter))
                cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
+       else
+               /* ALL non-BE ASICs */
+               cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
 
        cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
                                      GFP_KERNEL);
@@ -4113,7 +4204,6 @@ static void be_remove(struct pci_dev *pdev)
 
        pci_disable_pcie_error_reporting(pdev);
 
-       pci_set_drvdata(pdev, NULL);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
 
@@ -4262,7 +4352,6 @@ static void be_worker(struct work_struct *work)
        struct be_adapter *adapter =
                container_of(work, struct be_adapter, work.work);
        struct be_rx_obj *rxo;
-       struct be_eq_obj *eqo;
        int i;
 
        /* when interrupts are not yet enabled, just reap any pending
@@ -4293,8 +4382,7 @@ static void be_worker(struct work_struct *work)
                }
        }
 
-       for_all_evt_queues(adapter, eqo, i)
-               be_eqd_update(adapter, eqo);
+       be_eqd_update(adapter);
 
 reschedule:
        adapter->work_counter++;
@@ -4351,28 +4439,22 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
        adapter->netdev = netdev;
        SET_NETDEV_DEV(netdev, &pdev->dev);
 
-       status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+       status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (!status) {
-               status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-               if (status < 0) {
-                       dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
-                       goto free_netdev;
-               }
                netdev->features |= NETIF_F_HIGHDMA;
        } else {
-               status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
-               if (!status)
-                       status = dma_set_coherent_mask(&pdev->dev,
-                                                      DMA_BIT_MASK(32));
+               status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (status) {
                        dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
                        goto free_netdev;
                }
        }
 
-       status = pci_enable_pcie_error_reporting(pdev);
-       if (status)
-               dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
+       if (be_physfn(adapter)) {
+               status = pci_enable_pcie_error_reporting(pdev);
+               if (!status)
+                       dev_info(&pdev->dev, "PCIe error reporting enabled\n");
+       }
 
        status = be_ctrl_init(adapter);
        if (status)
@@ -4443,7 +4525,6 @@ ctrl_clean:
        be_ctrl_cleanup(adapter);
 free_netdev:
        free_netdev(netdev);
-       pci_set_drvdata(pdev, NULL);
 rel_reg:
        pci_release_regions(pdev);
 disable_dev:
index c706b7a9397ed6f13dedfee142f99ae50a98ff98..4b22a9579f859e7e9d61da22d5175632d6441d2d 100644 (file)
@@ -699,7 +699,6 @@ static void fealnx_remove_one(struct pci_dev *pdev)
                pci_iounmap(pdev, np->mem);
                free_netdev(dev);
                pci_release_regions(pdev);
-               pci_set_drvdata(pdev, NULL);
        } else
                printk(KERN_ERR "fealnx: remove for unknown device\n");
 }
index 6b60582ce8cf1c7a82dd3da1d69acb64981014f4..56f2f608a9f43aa27a32b038d21daf83ffe7f682 100644 (file)
@@ -1083,7 +1083,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
 
        mac_addr = of_get_mac_address(ofdev->dev.of_node);
        if (mac_addr)
-               memcpy(ndev->dev_addr, mac_addr, 6);
+               memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
 
        ret = fep->ops->allocate_bd(ndev);
        if (ret)
index c4eaadeb572fa3dcd97396afffd961f936d0189d..d6d810cb97c79b80da7991f2ee423406b89853af 100644 (file)
@@ -88,6 +88,7 @@
 
 #include <asm/io.h>
 #include <asm/reg.h>
+#include <asm/mpc85xx.h>
 #include <asm/irq.h>
 #include <asm/uaccess.h>
 #include <linux/module.h>
@@ -939,9 +940,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)
        }
 }
 
-static void gfar_detect_errata(struct gfar_private *priv)
+static void __gfar_detect_errata_83xx(struct gfar_private *priv)
 {
-       struct device *dev = &priv->ofdev->dev;
        unsigned int pvr = mfspr(SPRN_PVR);
        unsigned int svr = mfspr(SPRN_SVR);
        unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
@@ -957,15 +957,33 @@ static void gfar_detect_errata(struct gfar_private *priv)
            (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
                priv->errata |= GFAR_ERRATA_76;
 
-       /* MPC8313 and MPC837x all rev */
-       if ((pvr == 0x80850010 && mod == 0x80b0) ||
-           (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
-               priv->errata |= GFAR_ERRATA_A002;
+       /* MPC8313 Rev < 2.0 */
+       if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
+               priv->errata |= GFAR_ERRATA_12;
+}
+
+static void __gfar_detect_errata_85xx(struct gfar_private *priv)
+{
+       unsigned int svr = mfspr(SPRN_SVR);
 
-       /* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
-       if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
-           (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
+       if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
                priv->errata |= GFAR_ERRATA_12;
+       if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
+           ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
+               priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
+}
+
+static void gfar_detect_errata(struct gfar_private *priv)
+{
+       struct device *dev = &priv->ofdev->dev;
+
+       /* no plans to fix */
+       priv->errata |= GFAR_ERRATA_A002;
+
+       if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
+               __gfar_detect_errata_85xx(priv);
+       else /* non-mpc85xx parts, i.e. e300 core based */
+               __gfar_detect_errata_83xx(priv);
 
        if (priv->errata)
                dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
@@ -1599,7 +1617,7 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
        /* Normaly TSEC should not hang on GRS commands, so we should
         * actually wait for IEVENT_GRSC flag.
         */
-       if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
+       if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
                return 0;
 
        /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
@@ -2900,7 +2918,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
        struct gfar_priv_rx_q *rx_queue = NULL;
        int work_done = 0, work_done_per_q = 0;
        int i, budget_per_q = 0;
-       int has_tx_work;
+       int has_tx_work = 0;
        unsigned long rstat_rxf;
        int num_act_queues;
 
@@ -2915,62 +2933,51 @@ static int gfar_poll(struct napi_struct *napi, int budget)
        if (num_act_queues)
                budget_per_q = budget/num_act_queues;
 
-       while (1) {
-               has_tx_work = 0;
-               for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
-                       tx_queue = priv->tx_queue[i];
-                       /* run Tx cleanup to completion */
-                       if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
-                               gfar_clean_tx_ring(tx_queue);
-                               has_tx_work = 1;
-                       }
+       for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
+               tx_queue = priv->tx_queue[i];
+               /* run Tx cleanup to completion */
+               if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
+                       gfar_clean_tx_ring(tx_queue);
+                       has_tx_work = 1;
                }
+       }
 
-               for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
-                       /* skip queue if not active */
-                       if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
-                               continue;
-
-                       rx_queue = priv->rx_queue[i];
-                       work_done_per_q =
-                               gfar_clean_rx_ring(rx_queue, budget_per_q);
-                       work_done += work_done_per_q;
-
-                       /* finished processing this queue */
-                       if (work_done_per_q < budget_per_q) {
-                               /* clear active queue hw indication */
-                               gfar_write(&regs->rstat,
-                                          RSTAT_CLEAR_RXF0 >> i);
-                               rstat_rxf &= ~(RSTAT_CLEAR_RXF0 >> i);
-                               num_act_queues--;
-
-                               if (!num_act_queues)
-                                       break;
-                               /* recompute budget per Rx queue */
-                               budget_per_q =
-                                       (budget - work_done) / num_act_queues;
-                       }
-               }
+       for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+               /* skip queue if not active */
+               if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
+                       continue;
 
-               if (work_done >= budget)
-                       break;
+               rx_queue = priv->rx_queue[i];
+               work_done_per_q =
+                       gfar_clean_rx_ring(rx_queue, budget_per_q);
+               work_done += work_done_per_q;
+
+               /* finished processing this queue */
+               if (work_done_per_q < budget_per_q) {
+                       /* clear active queue hw indication */
+                       gfar_write(&regs->rstat,
+                                  RSTAT_CLEAR_RXF0 >> i);
+                       num_act_queues--;
+
+                       if (!num_act_queues)
+                               break;
+               }
+       }
 
-               if (!num_act_queues && !has_tx_work) {
+       if (!num_act_queues && !has_tx_work) {
 
-                       napi_complete(napi);
+               napi_complete(napi);
 
-                       /* Clear the halt bit in RSTAT */
-                       gfar_write(&regs->rstat, gfargrp->rstat);
+               /* Clear the halt bit in RSTAT */
+               gfar_write(&regs->rstat, gfargrp->rstat);
 
-                       gfar_write(&regs->imask, IMASK_DEFAULT);
+               gfar_write(&regs->imask, IMASK_DEFAULT);
 
-                       /* If we are coalescing interrupts, update the timer
-                        * Otherwise, clear it
-                        */
-                       gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
-                                                 gfargrp->tx_bit_map);
-                       break;
-               }
+               /* If we are coalescing interrupts, update the timer
+                * Otherwise, clear it
+                */
+               gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
+                                         gfargrp->tx_bit_map);
        }
 
        return work_done;
index 04112b98ff5d9231f1ed60e4df438fa88b6b7947..114c58f9d8d25a83d235e0cafa976a93db3c7426 100644 (file)
@@ -1177,21 +1177,21 @@ static inline void gfar_read_filer(struct gfar_private *priv,
        *fpr = gfar_read(&regs->rqfpr);
 }
 
-extern void lock_rx_qs(struct gfar_private *priv);
-extern void lock_tx_qs(struct gfar_private *priv);
-extern void unlock_rx_qs(struct gfar_private *priv);
-extern void unlock_tx_qs(struct gfar_private *priv);
-extern irqreturn_t gfar_receive(int irq, void *dev_id);
-extern int startup_gfar(struct net_device *dev);
-extern void stop_gfar(struct net_device *dev);
-extern void gfar_halt(struct net_device *dev);
-extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
-               int enable, u32 regnum, u32 read);
-extern void gfar_configure_coalescing_all(struct gfar_private *priv);
+void lock_rx_qs(struct gfar_private *priv);
+void lock_tx_qs(struct gfar_private *priv);
+void unlock_rx_qs(struct gfar_private *priv);
+void unlock_tx_qs(struct gfar_private *priv);
+irqreturn_t gfar_receive(int irq, void *dev_id);
+int startup_gfar(struct net_device *dev);
+void stop_gfar(struct net_device *dev);
+void gfar_halt(struct net_device *dev);
+void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
+                  u32 regnum, u32 read);
+void gfar_configure_coalescing_all(struct gfar_private *priv);
 void gfar_init_sysfs(struct net_device *dev);
 int gfar_set_features(struct net_device *dev, netdev_features_t features);
-extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
-extern void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
+void gfar_check_rx_parser_mode(struct gfar_private *priv);
+void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
 
 extern const struct ethtool_ops gfar_ethtool_ops;
 
index 5930c39672db25eee560dabd16cf38d1ca54636b..d58a3dfc95c296086f3d729819e42bc758ef5f6f 100644 (file)
@@ -3899,7 +3899,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
 
        mac_addr = of_get_mac_address(np);
        if (mac_addr)
-               memcpy(dev->dev_addr, mac_addr, 6);
+               memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
 
        ugeth->ug_info = ug_info;
        ugeth->dev = device;
index 6231bc02b9648f9c178ef984c22c362bc08c10cb..1085257385d2c2d1312b0a31c5272eec484a8d4b 100644 (file)
@@ -5,7 +5,7 @@
 config NET_VENDOR_FUJITSU
        bool "Fujitsu devices"
        default y
-       depends on ISA || PCMCIA
+       depends on PCMCIA
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y
          and read the Ethernet-HOWTO, available from
index 91227d03274e02685d5e96fe6096e56e6af999eb..37860096f744005a17c7dc805f890e396ac8933e 100644 (file)
@@ -1098,7 +1098,7 @@ static int hp100_open(struct net_device *dev)
        if (request_irq(dev->irq, hp100_interrupt,
                        lp->bus == HP100_BUS_PCI || lp->bus ==
                        HP100_BUS_EISA ? IRQF_SHARED : 0,
-                       "hp100", dev)) {
+                       dev->name, dev)) {
                printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
                return -EAGAIN;
        }
index e38816145395c9e2b08416934e0b10b6352b4507..a15877affc9bd6b5791e1df9fa0e8ec2c2905c0d 100644 (file)
@@ -711,7 +711,7 @@ static int init_i596_mem(struct net_device *dev)
        i596_add_cmd(dev, &lp->cf_cmd.cmd);
 
        DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
-       memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
+       memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
        lp->sa_cmd.cmd.command = CmdSASetup;
        i596_add_cmd(dev, &lp->sa_cmd.cmd);
 
@@ -1155,7 +1155,7 @@ struct net_device * __init i82596_probe(int unit)
                        err = -ENODEV;
                        goto out;
                }
-               memcpy(eth_addr, (void *) 0xfffc1f2c, 6);       /* YUCK! Get addr from NOVRAM */
+               memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN);        /* YUCK! Get addr from NOVRAM */
                dev->base_addr = MVME_I596_BASE;
                dev->irq = (unsigned) MVME16x_IRQ_I596;
                goto found;
index d653bac4cfc4e2be49d1ab1ff9a1247eae5460aa..861fa15e1e81b11e31f0d8a2f03907056689ce9d 100644 (file)
@@ -607,7 +607,7 @@ static int init_i596_mem(struct net_device *dev)
        i596_add_cmd(dev, &dma->cf_cmd.cmd);
 
        DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
-       memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, 6);
+       memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
        dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
        DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
        i596_add_cmd(dev, &dma->sa_cmd.cmd);
@@ -1396,13 +1396,13 @@ static void set_multicast_list(struct net_device *dev)
                netdev_for_each_mc_addr(ha, dev) {
                        if (!cnt--)
                                break;
-                       memcpy(cp, ha->addr, 6);
+                       memcpy(cp, ha->addr, ETH_ALEN);
                        if (i596_debug > 1)
                                DEB(DEB_MULTI,
                                    printk(KERN_DEBUG
                                           "%s: Adding address %pM\n",
                                           dev->name, cp));
-                       cp += 6;
+                       cp += ETH_ALEN;
                }
                DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
                i596_add_cmd(dev, &cmd->cmd);
index 6b5c7222342c5e96144febc4a55bd7fb7dfc137e..ef21a2e10180c516a5ee3935b15af5fb52b506ab 100644 (file)
@@ -2676,7 +2676,7 @@ static int emac_init_config(struct emac_instance *dev)
                       np->full_name);
                return -ENXIO;
        }
-       memcpy(dev->ndev->dev_addr, p, 6);
+       memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
 
        /* IAHT and GAHT filter parameterization */
        if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
index 59a92d5870b5535826a6fec333f27d1796e062db..9c45efe4c8fecfdc0e16371ee67a622e4bfcbdbe 100644 (file)
 struct emac_instance;
 struct mal_instance;
 
-extern void emac_dbg_register(struct emac_instance *dev);
-extern void emac_dbg_unregister(struct emac_instance *dev);
-extern void mal_dbg_register(struct mal_instance *mal);
-extern void mal_dbg_unregister(struct mal_instance *mal);
-extern int emac_init_debug(void) __init;
-extern void emac_fini_debug(void) __exit;
-extern void emac_dbg_dump_all(void);
+void emac_dbg_register(struct emac_instance *dev);
+void emac_dbg_unregister(struct emac_instance *dev);
+void mal_dbg_register(struct mal_instance *mal);
+void mal_dbg_unregister(struct mal_instance *mal);
+int emac_init_debug(void) __init;
+void emac_fini_debug(void) __exit;
+void emac_dbg_dump_all(void);
 
 # define DBG_LEVEL             1
 
index 668bceeff4a2112eb98eece3405c9594dd8accef..d4f1374d19000ea0562d9c4c738e1ea56caffb4b 100644 (file)
@@ -56,15 +56,15 @@ struct rgmii_instance {
 
 #ifdef CONFIG_IBM_EMAC_RGMII
 
-extern int rgmii_init(void);
-extern void rgmii_exit(void);
-extern int rgmii_attach(struct platform_device *ofdev, int input, int mode);
-extern void rgmii_detach(struct platform_device *ofdev, int input);
-extern void rgmii_get_mdio(struct platform_device *ofdev, int input);
-extern void rgmii_put_mdio(struct platform_device *ofdev, int input);
-extern void rgmii_set_speed(struct platform_device *ofdev, int input, int speed);
-extern int rgmii_get_regs_len(struct platform_device *ofdev);
-extern void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
+int rgmii_init(void);
+void rgmii_exit(void);
+int rgmii_attach(struct platform_device *ofdev, int input, int mode);
+void rgmii_detach(struct platform_device *ofdev, int input);
+void rgmii_get_mdio(struct platform_device *ofdev, int input);
+void rgmii_put_mdio(struct platform_device *ofdev, int input);
+void rgmii_set_speed(struct platform_device *ofdev, int input, int speed);
+int rgmii_get_regs_len(struct platform_device *ofdev);
+void *rgmii_dump_regs(struct platform_device *ofdev, void *buf);
 
 #else
 
index 350b7096a041a310f620911f7f2e448bc7d4e8ca..4d5f336f07b3669c15c916ea79ce8e90bc9efb47 100644 (file)
@@ -72,13 +72,13 @@ struct tah_instance {
 
 #ifdef CONFIG_IBM_EMAC_TAH
 
-extern int tah_init(void);
-extern void tah_exit(void);
-extern int tah_attach(struct platform_device *ofdev, int channel);
-extern void tah_detach(struct platform_device *ofdev, int channel);
-extern void tah_reset(struct platform_device *ofdev);
-extern int tah_get_regs_len(struct platform_device *ofdev);
-extern void *tah_dump_regs(struct platform_device *ofdev, void *buf);
+int tah_init(void);
+void tah_exit(void);
+int tah_attach(struct platform_device *ofdev, int channel);
+void tah_detach(struct platform_device *ofdev, int channel);
+void tah_reset(struct platform_device *ofdev);
+int tah_get_regs_len(struct platform_device *ofdev);
+void *tah_dump_regs(struct platform_device *ofdev, void *buf);
 
 #else
 
index 455bfb0854934d684164d5105faa45ae89df03bb..0959c55b14591dc7f2d58b1946585eeb0da04d68 100644 (file)
@@ -53,15 +53,15 @@ struct zmii_instance {
 
 #ifdef CONFIG_IBM_EMAC_ZMII
 
-extern int zmii_init(void);
-extern void zmii_exit(void);
-extern int zmii_attach(struct platform_device *ofdev, int input, int *mode);
-extern void zmii_detach(struct platform_device *ofdev, int input);
-extern void zmii_get_mdio(struct platform_device *ofdev, int input);
-extern void zmii_put_mdio(struct platform_device *ofdev, int input);
-extern void zmii_set_speed(struct platform_device *ofdev, int input, int speed);
-extern int zmii_get_regs_len(struct platform_device *ocpdev);
-extern void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
+int zmii_init(void);
+void zmii_exit(void);
+int zmii_attach(struct platform_device *ofdev, int input, int *mode);
+void zmii_detach(struct platform_device *ofdev, int input);
+void zmii_get_mdio(struct platform_device *ofdev, int input);
+void zmii_put_mdio(struct platform_device *ofdev, int input);
+void zmii_set_speed(struct platform_device *ofdev, int input, int speed);
+int zmii_get_regs_len(struct platform_device *ocpdev);
+void *zmii_dump_regs(struct platform_device *ofdev, void *buf);
 
 #else
 # define zmii_init()           0
index 5d41aee69d1646e3b42a1d91b8873825344043cc..952d795230a479c79c0684ee0849a05e5e0ff631 100644 (file)
@@ -1185,7 +1185,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
                netdev_for_each_mc_addr(ha, netdev) {
                        /* add the multicast address to the filter table */
                        unsigned long mcast_addr = 0;
-                       memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
+                       memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN);
                        lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
                                                   IbmVethMcastAddFilter,
                                                   mcast_addr);
@@ -1370,7 +1370,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
        netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
 
        adapter->mac_addr = 0;
-       memcpy(&adapter->mac_addr, mac_addr_p, 6);
+       memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN);
 
        netdev->irq = dev->irq;
        netdev->netdev_ops = &ibmveth_netdev_ops;
index bdf5023724e768945f8ec71ceb32a27d897dba6e..25045ae071711f03cee9ccabf9898cf2abebbc21 100644 (file)
@@ -2183,7 +2183,6 @@ static void ipg_remove(struct pci_dev *pdev)
 
        free_netdev(dev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static const struct net_device_ops ipg_netdev_ops = {
index ada6e210279f3750e26ea5720ca3bbe5057abf2c..cbaba4442d4b226d18691059a4e122029040ff0a 100644 (file)
@@ -2985,7 +2985,6 @@ err_out_free_res:
 err_out_disable_pdev:
        pci_disable_device(pdev);
 err_out_free_dev:
-       pci_set_drvdata(pdev, NULL);
        free_netdev(netdev);
        return err;
 }
@@ -3003,7 +3002,6 @@ static void e100_remove(struct pci_dev *pdev)
                free_netdev(netdev);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
-               pci_set_drvdata(pdev, NULL);
        }
 }
 
index 26d9cd59ec75a25451185a8cb933c318f9068912..58c147271a362e68914d55d97b71b327b194e86b 100644 (file)
@@ -325,7 +325,7 @@ enum e1000_state_t {
 #undef pr_fmt
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
+struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
 #define e_dbg(format, arg...) \
        netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
 #define e_err(msglvl, format, arg...) \
@@ -346,20 +346,20 @@ extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
 extern char e1000_driver_name[];
 extern const char e1000_driver_version[];
 
-extern int e1000_up(struct e1000_adapter *adapter);
-extern void e1000_down(struct e1000_adapter *adapter);
-extern void e1000_reinit_locked(struct e1000_adapter *adapter);
-extern void e1000_reset(struct e1000_adapter *adapter);
-extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
-extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
-extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
-extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
-extern void e1000_update_stats(struct e1000_adapter *adapter);
-extern bool e1000_has_link(struct e1000_adapter *adapter);
-extern void e1000_power_up_phy(struct e1000_adapter *);
-extern void e1000_set_ethtool_ops(struct net_device *netdev);
-extern void e1000_check_options(struct e1000_adapter *adapter);
-extern char *e1000_get_hw_dev_name(struct e1000_hw *hw);
+int e1000_up(struct e1000_adapter *adapter);
+void e1000_down(struct e1000_adapter *adapter);
+void e1000_reinit_locked(struct e1000_adapter *adapter);
+void e1000_reset(struct e1000_adapter *adapter);
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_update_stats(struct e1000_adapter *adapter);
+bool e1000_has_link(struct e1000_adapter *adapter);
+void e1000_power_up_phy(struct e1000_adapter *);
+void e1000_set_ethtool_ops(struct net_device *netdev);
+void e1000_check_options(struct e1000_adapter *adapter);
+char *e1000_get_hw_dev_name(struct e1000_hw *hw);
 
 #endif /* _E1000_H_ */
index 59ad007dd5aa09a6123cbd25db7c7b8f4e06f1b7..34672f87726cb7b9ab0cdd1ac294b52a031ba4c8 100644 (file)
@@ -1018,19 +1018,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
         */
        pci_using_dac = 0;
        if ((hw->bus_type == e1000_bus_type_pcix) &&
-           !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
-               /* according to DMA-API-HOWTO, coherent calls will always
-                * succeed if the set call did
-                */
-               dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+           !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
                pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
                        pr_err("No usable DMA config, aborting\n");
                        goto err_dma;
                }
-               dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
        }
 
        netdev->netdev_ops = &e1000_netdev_ops;
index ad0edd11015d7b40a14d79f06afdab8ecec76cef..0150f7fc893d4ae6985096f17e5a4678f39fc3b3 100644 (file)
@@ -472,26 +472,25 @@ enum latency_range {
 extern char e1000e_driver_name[];
 extern const char e1000e_driver_version[];
 
-extern void e1000e_check_options(struct e1000_adapter *adapter);
-extern void e1000e_set_ethtool_ops(struct net_device *netdev);
-
-extern int e1000e_up(struct e1000_adapter *adapter);
-extern void e1000e_down(struct e1000_adapter *adapter);
-extern void e1000e_reinit_locked(struct e1000_adapter *adapter);
-extern void e1000e_reset(struct e1000_adapter *adapter);
-extern void e1000e_power_up_phy(struct e1000_adapter *adapter);
-extern int e1000e_setup_rx_resources(struct e1000_ring *ring);
-extern int e1000e_setup_tx_resources(struct e1000_ring *ring);
-extern void e1000e_free_rx_resources(struct e1000_ring *ring);
-extern void e1000e_free_tx_resources(struct e1000_ring *ring);
-extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
-                                                   struct rtnl_link_stats64
-                                                   *stats);
-extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
-extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
-extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
-extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
-extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
+void e1000e_check_options(struct e1000_adapter *adapter);
+void e1000e_set_ethtool_ops(struct net_device *netdev);
+
+int e1000e_up(struct e1000_adapter *adapter);
+void e1000e_down(struct e1000_adapter *adapter);
+void e1000e_reinit_locked(struct e1000_adapter *adapter);
+void e1000e_reset(struct e1000_adapter *adapter);
+void e1000e_power_up_phy(struct e1000_adapter *adapter);
+int e1000e_setup_rx_resources(struct e1000_ring *ring);
+int e1000e_setup_tx_resources(struct e1000_ring *ring);
+void e1000e_free_rx_resources(struct e1000_ring *ring);
+void e1000e_free_tx_resources(struct e1000_ring *ring);
+struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+                                            struct rtnl_link_stats64 *stats);
+void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
+void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+void e1000e_get_hw_control(struct e1000_adapter *adapter);
+void e1000e_release_hw_control(struct e1000_adapter *adapter);
+void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
 
 extern unsigned int copybreak;
 
@@ -508,8 +507,8 @@ extern const struct e1000_info e1000_pch2_info;
 extern const struct e1000_info e1000_pch_lpt_info;
 extern const struct e1000_info e1000_es2_info;
 
-extern void e1000e_ptp_init(struct e1000_adapter *adapter);
-extern void e1000e_ptp_remove(struct e1000_adapter *adapter);
+void e1000e_ptp_init(struct e1000_adapter *adapter);
+void e1000e_ptp_remove(struct e1000_adapter *adapter);
 
 static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
 {
@@ -536,7 +535,7 @@ static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
        return hw->phy.ops.write_reg_locked(hw, offset, data);
 }
 
-extern void e1000e_reload_nvm_generic(struct e1000_hw *hw);
+void e1000e_reload_nvm_generic(struct e1000_hw *hw);
 
 static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
 {
index 4ef786775acb7ca6665873cae61f520a6fbc8b45..aedd5736a87d53862fa2be4176e9762fcd8df18a 100644 (file)
@@ -6553,21 +6553,15 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                return err;
 
        pci_using_dac = 0;
-       err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (!err) {
-               err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-               if (!err)
-                       pci_using_dac = 1;
+               pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
-                                                   DMA_BIT_MASK(32));
-                       if (err) {
-                               dev_err(&pdev->dev,
-                                       "No usable DMA configuration, aborting\n");
-                               goto err_dma;
-                       }
+                       dev_err(&pdev->dev,
+                               "No usable DMA configuration, aborting\n");
+                       goto err_dma;
                }
        }
 
index b5252eb8a6c757861ff2c22a3efea81400c3934d..49572dcdba8744d6658352df6c06d884ccb3ae3c 100644 (file)
@@ -347,9 +347,9 @@ struct i40e_vsi {
        u32 rx_buf_failed;
        u32 rx_page_failed;
 
-       /* These are arrays of rings, allocated at run-time */
-       struct i40e_ring *rx_rings;
-       struct i40e_ring *tx_rings;
+       /* These are containers of ring pointers, allocated at run-time */
+       struct i40e_ring **rx_rings;
+       struct i40e_ring **tx_rings;
 
        u16 work_limit;
        /* high bit set means dynamic, use accessor routines to read/write.
@@ -366,7 +366,7 @@ struct i40e_vsi {
        u8  dtype;
 
        /* List of q_vectors allocated to this VSI */
-       struct i40e_q_vector *q_vectors;
+       struct i40e_q_vector **q_vectors;
        int num_q_vectors;
        int base_vector;
 
@@ -422,8 +422,9 @@ struct i40e_q_vector {
 
        u8 num_ringpairs;       /* total number of ring pairs in vector */
 
-       char name[IFNAMSIZ + 9];
        cpumask_t affinity_mask;
+       struct rcu_head rcu;    /* to avoid race with update stats on free */
+       char name[IFNAMSIZ + 9];
 } ____cacheline_internodealigned_in_smp;
 
 /* lan device */
@@ -544,6 +545,7 @@ static inline void i40e_dbg_init(void) {}
 static inline void i40e_dbg_exit(void) {}
 #endif /* CONFIG_DEBUG_FS*/
 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
index 8dbd91f64b74d815fff682c6fe07570182e3182c..ef4cb1cf31f2d392a4ed4e1ae2e0059919423317 100644 (file)
@@ -151,9 +151,7 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
                                   size_t count, loff_t *ppos)
 {
        struct i40e_pf *pf = filp->private_data;
-       char dump_request_buf[16];
        bool seid_found = false;
-       int bytes_not_copied;
        long seid = -1;
        int buflen = 0;
        int i, ret;
@@ -163,21 +161,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
        /* don't allow partial writes */
        if (*ppos != 0)
                return 0;
-       if (count >= sizeof(dump_request_buf))
-               return -ENOSPC;
-
-       bytes_not_copied = copy_from_user(dump_request_buf, buffer, count);
-       if (bytes_not_copied < 0)
-               return bytes_not_copied;
-       if (bytes_not_copied > 0)
-               count -= bytes_not_copied;
-       dump_request_buf[count] = '\0';
 
        /* decode the SEID given to be dumped */
-       ret = kstrtol(dump_request_buf, 0, &seid);
-       if (ret < 0) {
-               dev_info(&pf->pdev->dev, "bad seid value '%s'\n",
-                        dump_request_buf);
+       ret = kstrtol_from_user(buffer, count, 0, &seid);
+
+       if (ret) {
+               dev_info(&pf->pdev->dev, "bad seid value\n");
        } else if (seid == 0) {
                seid_found = true;
 
@@ -245,26 +234,33 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
                        memcpy(p, vsi, len);
                        p += len;
 
-                       len = (sizeof(struct i40e_q_vector)
-                               * vsi->num_q_vectors);
-                       memcpy(p, vsi->q_vectors, len);
-                       p += len;
-
-                       len = (sizeof(struct i40e_ring) * vsi->num_queue_pairs);
-                       memcpy(p, vsi->tx_rings, len);
-                       p += len;
-                       memcpy(p, vsi->rx_rings, len);
-                       p += len;
+                       if (vsi->num_q_vectors) {
+                               len = (sizeof(struct i40e_q_vector)
+                                       * vsi->num_q_vectors);
+                               memcpy(p, vsi->q_vectors, len);
+                               p += len;
+                       }
 
-                       for (i = 0; i < vsi->num_queue_pairs; i++) {
-                               len = sizeof(struct i40e_tx_buffer);
-                               memcpy(p, vsi->tx_rings[i].tx_bi, len);
+                       if (vsi->num_queue_pairs) {
+                               len = (sizeof(struct i40e_ring) *
+                                     vsi->num_queue_pairs);
+                               memcpy(p, vsi->tx_rings, len);
+                               p += len;
+                               memcpy(p, vsi->rx_rings, len);
                                p += len;
                        }
-                       for (i = 0; i < vsi->num_queue_pairs; i++) {
+
+                       if (vsi->tx_rings[0]) {
+                               len = sizeof(struct i40e_tx_buffer);
+                               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                                       memcpy(p, vsi->tx_rings[i]->tx_bi, len);
+                                       p += len;
+                               }
                                len = sizeof(struct i40e_rx_buffer);
-                               memcpy(p, vsi->rx_rings[i].rx_bi, len);
-                               p += len;
+                               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                                       memcpy(p, vsi->rx_rings[i]->rx_bi, len);
+                                       p += len;
+                               }
                        }
 
                        /* macvlan filter list */
@@ -484,100 +480,104 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
                 "    tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
                 vsi->tx_restart, vsi->tx_busy,
                 vsi->rx_buf_failed, vsi->rx_page_failed);
-       if (vsi->rx_rings) {
-               for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       dev_info(&pf->pdev->dev,
-                                "    rx_rings[%i]: desc = %p\n",
-                                i, vsi->rx_rings[i].desc);
-                       dev_info(&pf->pdev->dev,
-                                "    rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
-                                i, vsi->rx_rings[i].dev,
-                                vsi->rx_rings[i].netdev,
-                                vsi->rx_rings[i].rx_bi);
-                       dev_info(&pf->pdev->dev,
-                                "    rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
-                                i, vsi->rx_rings[i].state,
-                                vsi->rx_rings[i].queue_index,
-                                vsi->rx_rings[i].reg_idx);
-                       dev_info(&pf->pdev->dev,
-                                "    rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
-                                i, vsi->rx_rings[i].rx_hdr_len,
-                                vsi->rx_rings[i].rx_buf_len,
-                                vsi->rx_rings[i].dtype);
-                       dev_info(&pf->pdev->dev,
-                                "    rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
-                                i, vsi->rx_rings[i].hsplit,
-                                vsi->rx_rings[i].next_to_use,
-                                vsi->rx_rings[i].next_to_clean,
-                                vsi->rx_rings[i].ring_active);
-                       dev_info(&pf->pdev->dev,
-                                "    rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
-                                i, vsi->rx_rings[i].rx_stats.packets,
-                                vsi->rx_rings[i].rx_stats.bytes,
-                                vsi->rx_rings[i].rx_stats.non_eop_descs);
-                       dev_info(&pf->pdev->dev,
-                                "    rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
-                                i,
-                                vsi->rx_rings[i].rx_stats.alloc_rx_page_failed,
-                               vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed);
-                       dev_info(&pf->pdev->dev,
-                                "    rx_rings[%i]: size = %i, dma = 0x%08lx\n",
-                                i, vsi->rx_rings[i].size,
-                                (long unsigned int)vsi->rx_rings[i].dma);
-                       dev_info(&pf->pdev->dev,
-                                "    rx_rings[%i]: vsi = %p, q_vector = %p\n",
-                                i, vsi->rx_rings[i].vsi,
-                                vsi->rx_rings[i].q_vector);
-               }
+       rcu_read_lock();
+       for (i = 0; i < vsi->num_queue_pairs; i++) {
+               struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
+               if (!rx_ring)
+                       continue;
+
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: desc = %p\n",
+                        i, rx_ring->desc);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
+                        i, rx_ring->dev,
+                        rx_ring->netdev,
+                        rx_ring->rx_bi);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+                        i, rx_ring->state,
+                        rx_ring->queue_index,
+                        rx_ring->reg_idx);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
+                        i, rx_ring->rx_hdr_len,
+                        rx_ring->rx_buf_len,
+                        rx_ring->dtype);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+                        i, rx_ring->hsplit,
+                        rx_ring->next_to_use,
+                        rx_ring->next_to_clean,
+                        rx_ring->ring_active);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
+                        i, rx_ring->stats.packets,
+                        rx_ring->stats.bytes,
+                        rx_ring->rx_stats.non_eop_descs);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
+                        i,
+                        rx_ring->rx_stats.alloc_rx_page_failed,
+                       rx_ring->rx_stats.alloc_rx_buff_failed);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: size = %i, dma = 0x%08lx\n",
+                        i, rx_ring->size,
+                        (long unsigned int)rx_ring->dma);
+               dev_info(&pf->pdev->dev,
+                        "    rx_rings[%i]: vsi = %p, q_vector = %p\n",
+                        i, rx_ring->vsi,
+                        rx_ring->q_vector);
        }
-       if (vsi->tx_rings) {
-               for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       dev_info(&pf->pdev->dev,
-                                "    tx_rings[%i]: desc = %p\n",
-                                i, vsi->tx_rings[i].desc);
-                       dev_info(&pf->pdev->dev,
-                                "    tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
-                                i, vsi->tx_rings[i].dev,
-                                vsi->tx_rings[i].netdev,
-                                vsi->tx_rings[i].tx_bi);
-                       dev_info(&pf->pdev->dev,
-                                "    tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
-                                i, vsi->tx_rings[i].state,
-                                vsi->tx_rings[i].queue_index,
-                                vsi->tx_rings[i].reg_idx);
-                       dev_info(&pf->pdev->dev,
-                                "    tx_rings[%i]: dtype = %d\n",
-                                i, vsi->tx_rings[i].dtype);
-                       dev_info(&pf->pdev->dev,
-                                "    tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
-                                i, vsi->tx_rings[i].hsplit,
-                                vsi->tx_rings[i].next_to_use,
-                                vsi->tx_rings[i].next_to_clean,
-                                vsi->tx_rings[i].ring_active);
-                       dev_info(&pf->pdev->dev,
-                                "    tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
-                                i, vsi->tx_rings[i].tx_stats.packets,
-                                vsi->tx_rings[i].tx_stats.bytes,
-                                vsi->tx_rings[i].tx_stats.restart_queue);
-                       dev_info(&pf->pdev->dev,
-                                "    tx_rings[%i]: tx_stats: tx_busy = %lld, completed = %lld, tx_done_old = %lld\n",
-                                i,
-                                vsi->tx_rings[i].tx_stats.tx_busy,
-                                vsi->tx_rings[i].tx_stats.completed,
-                                vsi->tx_rings[i].tx_stats.tx_done_old);
-                       dev_info(&pf->pdev->dev,
-                                "    tx_rings[%i]: size = %i, dma = 0x%08lx\n",
-                                i, vsi->tx_rings[i].size,
-                                (long unsigned int)vsi->tx_rings[i].dma);
-                       dev_info(&pf->pdev->dev,
-                                "    tx_rings[%i]: vsi = %p, q_vector = %p\n",
-                                i, vsi->tx_rings[i].vsi,
-                                vsi->tx_rings[i].q_vector);
-                       dev_info(&pf->pdev->dev,
-                                "    tx_rings[%i]: DCB tc = %d\n",
-                                i, vsi->tx_rings[i].dcb_tc);
-               }
+       for (i = 0; i < vsi->num_queue_pairs; i++) {
+               struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+               if (!tx_ring)
+                       continue;
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: desc = %p\n",
+                        i, tx_ring->desc);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
+                        i, tx_ring->dev,
+                        tx_ring->netdev,
+                        tx_ring->tx_bi);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
+                        i, tx_ring->state,
+                        tx_ring->queue_index,
+                        tx_ring->reg_idx);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: dtype = %d\n",
+                        i, tx_ring->dtype);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+                        i, tx_ring->hsplit,
+                        tx_ring->next_to_use,
+                        tx_ring->next_to_clean,
+                        tx_ring->ring_active);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
+                        i, tx_ring->stats.packets,
+                        tx_ring->stats.bytes,
+                        tx_ring->tx_stats.restart_queue);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
+                        i,
+                        tx_ring->tx_stats.tx_busy,
+                        tx_ring->tx_stats.tx_done_old);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: size = %i, dma = 0x%08lx\n",
+                        i, tx_ring->size,
+                        (long unsigned int)tx_ring->dma);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: vsi = %p, q_vector = %p\n",
+                        i, tx_ring->vsi,
+                        tx_ring->q_vector);
+               dev_info(&pf->pdev->dev,
+                        "    tx_rings[%i]: DCB tc = %d\n",
+                        i, tx_ring->dcb_tc);
        }
+       rcu_read_unlock();
        dev_info(&pf->pdev->dev,
                 "    work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
                 vsi->work_limit, vsi->rx_itr_setting,
@@ -587,15 +587,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
        dev_info(&pf->pdev->dev,
                 "    max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
                 vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
-       if (vsi->q_vectors) {
-               for (i = 0; i < vsi->num_q_vectors; i++) {
-                       dev_info(&pf->pdev->dev,
-                                "    q_vectors[%i]: base index = %ld\n",
-                                i, ((long int)*vsi->q_vectors[i].rx.ring-
-                                       (long int)*vsi->q_vectors[0].rx.ring)/
-                                       sizeof(struct i40e_ring));
-               }
-       }
        dev_info(&pf->pdev->dev,
                 "    num_q_vectors = %i, base_vector = %i\n",
                 vsi->num_q_vectors, vsi->base_vector);
@@ -792,9 +783,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
                return;
        }
        if (is_rx_ring)
-               ring = vsi->rx_rings[ring_id];
+               ring = *vsi->rx_rings[ring_id];
        else
-               ring = vsi->tx_rings[ring_id];
+               ring = *vsi->tx_rings[ring_id];
        if (cnt == 2) {
                dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
                         vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
@@ -1028,11 +1019,11 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                                      size_t count, loff_t *ppos)
 {
        struct i40e_pf *pf = filp->private_data;
+       char *cmd_buf, *cmd_buf_tmp;
        int bytes_not_copied;
        struct i40e_vsi *vsi;
        u8 *print_buf_start;
        u8 *print_buf;
-       char *cmd_buf;
        int vsi_seid;
        int veb_seid;
        int cnt;
@@ -1051,6 +1042,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                count -= bytes_not_copied;
        cmd_buf[count] = '\0';
 
+       cmd_buf_tmp = strchr(cmd_buf, '\n');
+       if (cmd_buf_tmp) {
+               *cmd_buf_tmp = '\0';
+               count = cmd_buf_tmp - cmd_buf + 1;
+       }
+
        print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
        if (!print_buf_start)
                goto command_write_done;
@@ -1157,9 +1154,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                i40e_veb_release(pf->veb[i]);
 
        } else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
-               u8 ma[6];
-               int vlan = 0;
                struct i40e_mac_filter *f;
+               int vlan = 0;
+               u8 ma[6];
                int ret;
 
                cnt = sscanf(&cmd_buf[11],
@@ -1195,8 +1192,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                                 ma, vlan, vsi_seid, f, ret);
 
        } else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
-               u8 ma[6];
                int vlan = 0;
+               u8 ma[6];
                int ret;
 
                cnt = sscanf(&cmd_buf[11],
@@ -1232,9 +1229,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                                 ma, vlan, vsi_seid, ret);
 
        } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
-               int v;
-               u16 vid;
                i40e_status ret;
+               u16 vid;
+               int v;
 
                cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
                if (cnt != 2) {
@@ -1545,10 +1542,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
        } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
                   (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
                struct i40e_fdir_data fd_data;
-               int ret;
                u16 packet_len, i, j = 0;
                char *asc_packet;
                bool add = false;
+               int ret;
 
                asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
                                     GFP_KERNEL);
@@ -1636,9 +1633,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        }
                } else if (strncmp(&cmd_buf[5],
                           "get local", 9) == 0) {
+                       u16 llen, rlen;
                        int ret, i;
                        u8 *buff;
-                       u16 llen, rlen;
                        buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
                        if (!buff)
                                goto command_write_done;
@@ -1669,9 +1666,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        kfree(buff);
                        buff = NULL;
                } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
+                       u16 llen, rlen;
                        int ret, i;
                        u8 *buff;
-                       u16 llen, rlen;
                        buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
                        if (!buff)
                                goto command_write_done;
@@ -1747,11 +1744,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        goto command_write_done;
                }
 
-               /* Read at least 512 words */
-               if (buffer_len == 0)
-                       buffer_len = 512;
+               /* set the max length */
+               buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2);
 
                bytes = 2 * buffer_len;
+
+               /* read at least 1k bytes, no more than 4kB */
+               bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE);
                buff = kzalloc(bytes, GFP_KERNEL);
                if (!buff)
                        goto command_write_done;
@@ -1903,6 +1902,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
        struct i40e_pf *pf = filp->private_data;
        int bytes_not_copied;
        struct i40e_vsi *vsi;
+       char *buf_tmp;
        int vsi_seid;
        int i, cnt;
 
@@ -1921,6 +1921,12 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
                count -= bytes_not_copied;
        i40e_dbg_netdev_ops_buf[count] = '\0';
 
+       buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
+       if (buf_tmp) {
+               *buf_tmp = '\0';
+               count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
+       }
+
        if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
                cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
                if (cnt != 1) {
@@ -1996,7 +2002,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
                        goto netdev_ops_write_done;
                }
                for (i = 0; i < vsi->num_q_vectors; i++)
-                       napi_schedule(&vsi->q_vectors[i].napi);
+                       napi_schedule(&vsi->q_vectors[i]->napi);
                dev_info(&pf->pdev->dev, "napi called\n");
        } else {
                dev_info(&pf->pdev->dev, "unknown command '%s'\n",
@@ -2024,21 +2030,35 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = {
  **/
 void i40e_dbg_pf_init(struct i40e_pf *pf)
 {
-       struct dentry *pfile __attribute__((unused));
+       struct dentry *pfile;
        const char *name = pci_name(pf->pdev);
+       const struct device *dev = &pf->pdev->dev;
 
        pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
-       if (pf->i40e_dbg_pf) {
-               pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf,
-                                           pf, &i40e_dbg_command_fops);
-               pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
-                                           &i40e_dbg_dump_fops);
-               pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf,
-                                           pf, &i40e_dbg_netdev_ops_fops);
-       } else {
-               dev_info(&pf->pdev->dev,
-                        "debugfs entry for %s failed\n", name);
-       }
+       if (!pf->i40e_dbg_pf)
+               return;
+
+       pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
+                                   &i40e_dbg_command_fops);
+       if (!pfile)
+               goto create_failed;
+
+       pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
+                                   &i40e_dbg_dump_fops);
+       if (!pfile)
+               goto create_failed;
+
+       pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
+                                   &i40e_dbg_netdev_ops_fops);
+       if (!pfile)
+               goto create_failed;
+
+       return;
+
+create_failed:
+       dev_info(dev, "debugfs dir/file for %s failed\n", name);
+       debugfs_remove_recursive(pf->i40e_dbg_pf);
+       return;
 }
 
 /**
index 9a76b8cec76c0b9ef700c8fb6c68c75fd4e09e05..1b86138fa9e19fb0a87510db8d05c5dafd347a61 100644 (file)
@@ -399,8 +399,8 @@ static void i40e_get_ringparam(struct net_device *netdev,
        ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
        ring->rx_mini_max_pending = 0;
        ring->rx_jumbo_max_pending = 0;
-       ring->rx_pending = vsi->rx_rings[0].count;
-       ring->tx_pending = vsi->tx_rings[0].count;
+       ring->rx_pending = vsi->rx_rings[0]->count;
+       ring->tx_pending = vsi->tx_rings[0]->count;
        ring->rx_mini_pending = 0;
        ring->rx_jumbo_pending = 0;
 }
@@ -429,8 +429,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
        new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
 
        /* if nothing to do return success */
-       if ((new_tx_count == vsi->tx_rings[0].count) &&
-           (new_rx_count == vsi->rx_rings[0].count))
+       if ((new_tx_count == vsi->tx_rings[0]->count) &&
+           (new_rx_count == vsi->rx_rings[0]->count))
                return 0;
 
        while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
@@ -439,8 +439,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
        if (!netif_running(vsi->netdev)) {
                /* simple case - set for the next time the netdev is started */
                for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       vsi->tx_rings[i].count = new_tx_count;
-                       vsi->rx_rings[i].count = new_rx_count;
+                       vsi->tx_rings[i]->count = new_tx_count;
+                       vsi->rx_rings[i]->count = new_rx_count;
                }
                goto done;
        }
@@ -451,10 +451,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
         */
 
        /* alloc updated Tx resources */
-       if (new_tx_count != vsi->tx_rings[0].count) {
+       if (new_tx_count != vsi->tx_rings[0]->count) {
                netdev_info(netdev,
                            "Changing Tx descriptor count from %d to %d.\n",
-                           vsi->tx_rings[0].count, new_tx_count);
+                           vsi->tx_rings[0]->count, new_tx_count);
                tx_rings = kcalloc(vsi->alloc_queue_pairs,
                                   sizeof(struct i40e_ring), GFP_KERNEL);
                if (!tx_rings) {
@@ -464,7 +464,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
 
                for (i = 0; i < vsi->num_queue_pairs; i++) {
                        /* clone ring and setup updated count */
-                       tx_rings[i] = vsi->tx_rings[i];
+                       tx_rings[i] = *vsi->tx_rings[i];
                        tx_rings[i].count = new_tx_count;
                        err = i40e_setup_tx_descriptors(&tx_rings[i]);
                        if (err) {
@@ -481,10 +481,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
        }
 
        /* alloc updated Rx resources */
-       if (new_rx_count != vsi->rx_rings[0].count) {
+       if (new_rx_count != vsi->rx_rings[0]->count) {
                netdev_info(netdev,
                            "Changing Rx descriptor count from %d to %d\n",
-                           vsi->rx_rings[0].count, new_rx_count);
+                           vsi->rx_rings[0]->count, new_rx_count);
                rx_rings = kcalloc(vsi->alloc_queue_pairs,
                                   sizeof(struct i40e_ring), GFP_KERNEL);
                if (!rx_rings) {
@@ -494,7 +494,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
 
                for (i = 0; i < vsi->num_queue_pairs; i++) {
                        /* clone ring and setup updated count */
-                       rx_rings[i] = vsi->rx_rings[i];
+                       rx_rings[i] = *vsi->rx_rings[i];
                        rx_rings[i].count = new_rx_count;
                        err = i40e_setup_rx_descriptors(&rx_rings[i]);
                        if (err) {
@@ -517,8 +517,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
 
        if (tx_rings) {
                for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       i40e_free_tx_resources(&vsi->tx_rings[i]);
-                       vsi->tx_rings[i] = tx_rings[i];
+                       i40e_free_tx_resources(vsi->tx_rings[i]);
+                       *vsi->tx_rings[i] = tx_rings[i];
                }
                kfree(tx_rings);
                tx_rings = NULL;
@@ -526,8 +526,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
 
        if (rx_rings) {
                for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       i40e_free_rx_resources(&vsi->rx_rings[i]);
-                       vsi->rx_rings[i] = rx_rings[i];
+                       i40e_free_rx_resources(vsi->rx_rings[i]);
+                       *vsi->rx_rings[i] = rx_rings[i];
                }
                kfree(rx_rings);
                rx_rings = NULL;
@@ -579,6 +579,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
        char *p;
        int j;
        struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
+       unsigned int start;
 
        i40e_update_stats(vsi);
 
@@ -587,14 +588,30 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
                data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
                        sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
        }
-       for (j = 0; j < vsi->num_queue_pairs; j++) {
-               data[i++] = vsi->tx_rings[j].tx_stats.packets;
-               data[i++] = vsi->tx_rings[j].tx_stats.bytes;
-       }
-       for (j = 0; j < vsi->num_queue_pairs; j++) {
-               data[i++] = vsi->rx_rings[j].rx_stats.packets;
-               data[i++] = vsi->rx_rings[j].rx_stats.bytes;
+       rcu_read_lock();
+       for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
+               struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
+               struct i40e_ring *rx_ring;
+
+               if (!tx_ring)
+                       continue;
+
+               /* process Tx ring statistics */
+               do {
+                       start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
+                       data[i] = tx_ring->stats.packets;
+                       data[i + 1] = tx_ring->stats.bytes;
+               } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
+
+               /* Rx ring is the 2nd half of the queue pair */
+               rx_ring = &tx_ring[1];
+               do {
+                       start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
+                       data[i + 2] = rx_ring->stats.packets;
+                       data[i + 3] = rx_ring->stats.bytes;
+               } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
        }
+       rcu_read_unlock();
        if (vsi == pf->vsi[pf->lan_vsi]) {
                for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
                        p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
@@ -641,8 +658,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
                        p += ETH_GSTRING_LEN;
                        snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
                        p += ETH_GSTRING_LEN;
-               }
-               for (i = 0; i < vsi->num_queue_pairs; i++) {
                        snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
                        p += ETH_GSTRING_LEN;
                        snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
@@ -910,8 +925,8 @@ static int i40e_set_coalesce(struct net_device *netdev,
        }
 
        vector = vsi->base_vector;
-       q_vector = vsi->q_vectors;
-       for (i = 0; i < vsi->num_q_vectors; i++, vector++, q_vector++) {
+       for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+               q_vector = vsi->q_vectors[i];
                q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
                wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
                q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
index 221aa4795017649ccdd57accb01520c4bccb42e8..41a79df373d5fb137afb30629cdf26d5d7657d71 100644 (file)
@@ -36,7 +36,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 0
 #define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 9
+#define DRV_VERSION_BUILD 11
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -347,14 +347,53 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
  **/
 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
                                             struct net_device *netdev,
-                                            struct rtnl_link_stats64 *storage)
+                                            struct rtnl_link_stats64 *stats)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
+       struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
+       int i;
+
+       rcu_read_lock();
+       for (i = 0; i < vsi->num_queue_pairs; i++) {
+               struct i40e_ring *tx_ring, *rx_ring;
+               u64 bytes, packets;
+               unsigned int start;
+
+               tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
+               if (!tx_ring)
+                       continue;
 
-       *storage = *i40e_get_vsi_stats_struct(vsi);
+               do {
+                       start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
+                       packets = tx_ring->stats.packets;
+                       bytes   = tx_ring->stats.bytes;
+               } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));
+
+               stats->tx_packets += packets;
+               stats->tx_bytes   += bytes;
+               rx_ring = &tx_ring[1];
 
-       return storage;
+               do {
+                       start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
+                       packets = rx_ring->stats.packets;
+                       bytes   = rx_ring->stats.bytes;
+               } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
+
+               stats->rx_packets += packets;
+               stats->rx_bytes   += bytes;
+       }
+       rcu_read_unlock();
+
+       /* following stats updated by ixgbe_watchdog_task() */
+       stats->multicast        = vsi_stats->multicast;
+       stats->tx_errors        = vsi_stats->tx_errors;
+       stats->tx_dropped       = vsi_stats->tx_dropped;
+       stats->rx_errors        = vsi_stats->rx_errors;
+       stats->rx_crc_errors    = vsi_stats->rx_crc_errors;
+       stats->rx_length_errors = vsi_stats->rx_length_errors;
+
+       return stats;
 }
 
 /**
@@ -376,10 +415,14 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
        memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
        if (vsi->rx_rings)
                for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       memset(&vsi->rx_rings[i].rx_stats, 0 ,
-                              sizeof(vsi->rx_rings[i].rx_stats));
-                       memset(&vsi->tx_rings[i].tx_stats, 0,
-                              sizeof(vsi->tx_rings[i].tx_stats));
+                       memset(&vsi->rx_rings[i]->stats, 0 ,
+                              sizeof(vsi->rx_rings[i]->stats));
+                       memset(&vsi->rx_rings[i]->rx_stats, 0 ,
+                              sizeof(vsi->rx_rings[i]->rx_stats));
+                       memset(&vsi->tx_rings[i]->stats, 0 ,
+                              sizeof(vsi->tx_rings[i]->stats));
+                       memset(&vsi->tx_rings[i]->tx_stats, 0,
+                              sizeof(vsi->tx_rings[i]->tx_stats));
                }
        vsi->stat_offsets_loaded = false;
 }
@@ -598,7 +641,7 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
                        continue;
 
                for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       struct i40e_ring *ring = &vsi->tx_rings[i];
+                       struct i40e_ring *ring = vsi->tx_rings[i];
                        clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
                }
        }
@@ -652,7 +695,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
                        continue;
 
                for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       struct i40e_ring *ring = &vsi->tx_rings[i];
+                       struct i40e_ring *ring = vsi->tx_rings[i];
 
                        tc = ring->dcb_tc;
                        if (xoff[tc])
@@ -704,21 +747,38 @@ void i40e_update_stats(struct i40e_vsi *vsi)
        tx_restart = tx_busy = 0;
        rx_page = 0;
        rx_buf = 0;
+       rcu_read_lock();
        for (q = 0; q < vsi->num_queue_pairs; q++) {
                struct i40e_ring *p;
+               u64 bytes, packets;
+               unsigned int start;
 
-               p = &vsi->rx_rings[q];
-               rx_b += p->rx_stats.bytes;
-               rx_p += p->rx_stats.packets;
-               rx_buf += p->rx_stats.alloc_rx_buff_failed;
-               rx_page += p->rx_stats.alloc_rx_page_failed;
+               /* locate Tx ring */
+               p = ACCESS_ONCE(vsi->tx_rings[q]);
 
-               p = &vsi->tx_rings[q];
-               tx_b += p->tx_stats.bytes;
-               tx_p += p->tx_stats.packets;
+               do {
+                       start = u64_stats_fetch_begin_bh(&p->syncp);
+                       packets = p->stats.packets;
+                       bytes = p->stats.bytes;
+               } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+               tx_b += bytes;
+               tx_p += packets;
                tx_restart += p->tx_stats.restart_queue;
                tx_busy += p->tx_stats.tx_busy;
+
+               /* Rx queue is part of the same block as Tx queue */
+               p = &p[1];
+               do {
+                       start = u64_stats_fetch_begin_bh(&p->syncp);
+                       packets = p->stats.packets;
+                       bytes = p->stats.bytes;
+               } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+               rx_b += bytes;
+               rx_p += packets;
+               rx_buf += p->rx_stats.alloc_rx_buff_failed;
+               rx_page += p->rx_stats.alloc_rx_page_failed;
        }
+       rcu_read_unlock();
        vsi->tx_restart = tx_restart;
        vsi->tx_busy = tx_busy;
        vsi->rx_page_failed = rx_page;
@@ -1988,7 +2048,7 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
        int i, err = 0;
 
        for (i = 0; i < vsi->num_queue_pairs && !err; i++)
-               err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]);
+               err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
 
        return err;
 }
@@ -2004,8 +2064,8 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
        int i;
 
        for (i = 0; i < vsi->num_queue_pairs; i++)
-               if (vsi->tx_rings[i].desc)
-                       i40e_free_tx_resources(&vsi->tx_rings[i]);
+               if (vsi->tx_rings[i]->desc)
+                       i40e_free_tx_resources(vsi->tx_rings[i]);
 }
 
 /**
@@ -2023,7 +2083,7 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
        int i, err = 0;
 
        for (i = 0; i < vsi->num_queue_pairs && !err; i++)
-               err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]);
+               err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
        return err;
 }
 
@@ -2038,8 +2098,8 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
        int i;
 
        for (i = 0; i < vsi->num_queue_pairs; i++)
-               if (vsi->rx_rings[i].desc)
-                       i40e_free_rx_resources(&vsi->rx_rings[i]);
+               if (vsi->rx_rings[i]->desc)
+                       i40e_free_rx_resources(vsi->rx_rings[i]);
 }
 
 /**
@@ -2114,8 +2174,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
 
        /* Now associate this queue with this PCI function */
        qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
-       qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
-                                               & I40E_QTX_CTL_PF_INDX_MASK);
+       qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+                   I40E_QTX_CTL_PF_INDX_MASK);
        wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
        i40e_flush(hw);
 
@@ -2223,8 +2283,8 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
        int err = 0;
        u16 i;
 
-       for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++)
-               err = i40e_configure_tx_ring(&vsi->tx_rings[i]);
+       for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
+               err = i40e_configure_tx_ring(vsi->tx_rings[i]);
 
        return err;
 }
@@ -2274,7 +2334,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
 
        /* set up individual rings */
        for (i = 0; i < vsi->num_queue_pairs && !err; i++)
-               err = i40e_configure_rx_ring(&vsi->rx_rings[i]);
+               err = i40e_configure_rx_ring(vsi->rx_rings[i]);
 
        return err;
 }
@@ -2298,8 +2358,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
                qoffset = vsi->tc_config.tc_info[n].qoffset;
                qcount = vsi->tc_config.tc_info[n].qcount;
                for (i = qoffset; i < (qoffset + qcount); i++) {
-                       struct i40e_ring *rx_ring = &vsi->rx_rings[i];
-                       struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+                       struct i40e_ring *rx_ring = vsi->rx_rings[i];
+                       struct i40e_ring *tx_ring = vsi->tx_rings[i];
                        rx_ring->dcb_tc = n;
                        tx_ring->dcb_tc = n;
                }
@@ -2354,8 +2414,8 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
         */
        qp = vsi->base_queue;
        vector = vsi->base_vector;
-       q_vector = vsi->q_vectors;
-       for (i = 0; i < vsi->num_q_vectors; i++, q_vector++, vector++) {
+       for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+               q_vector = vsi->q_vectors[i];
                q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
                q_vector->rx.latency_range = I40E_LOW_LATENCY;
                wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
@@ -2435,7 +2495,7 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
  **/
 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
 {
-       struct i40e_q_vector *q_vector = vsi->q_vectors;
+       struct i40e_q_vector *q_vector = vsi->q_vectors[0];
        struct i40e_pf *pf = vsi->back;
        struct i40e_hw *hw = &pf->hw;
        u32 val;
@@ -2472,7 +2532,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
  * @pf: board private structure
  **/
-static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->hw;
        u32 val;
@@ -2500,7 +2560,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
              I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
              (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
        wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
-       i40e_flush(hw);
+       /* skip the flush */
 }
 
 /**
@@ -2512,7 +2572,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
 {
        struct i40e_q_vector *q_vector = data;
 
-       if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+       if (!q_vector->tx.ring && !q_vector->rx.ring)
                return IRQ_HANDLED;
 
        napi_schedule(&q_vector->napi);
@@ -2529,7 +2589,7 @@ static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
 {
        struct i40e_q_vector *q_vector = data;
 
-       if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
+       if (!q_vector->tx.ring && !q_vector->rx.ring)
                return IRQ_HANDLED;
 
        pr_info("fdir ring cleaning needed\n");
@@ -2554,16 +2614,16 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
        int vector, err;
 
        for (vector = 0; vector < q_vectors; vector++) {
-               struct i40e_q_vector *q_vector = &(vsi->q_vectors[vector]);
+               struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
 
-               if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) {
+               if (q_vector->tx.ring && q_vector->rx.ring) {
                        snprintf(q_vector->name, sizeof(q_vector->name) - 1,
                                 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
                        tx_int_idx++;
-               } else if (q_vector->rx.ring[0]) {
+               } else if (q_vector->rx.ring) {
                        snprintf(q_vector->name, sizeof(q_vector->name) - 1,
                                 "%s-%s-%d", basename, "rx", rx_int_idx++);
-               } else if (q_vector->tx.ring[0]) {
+               } else if (q_vector->tx.ring) {
                        snprintf(q_vector->name, sizeof(q_vector->name) - 1,
                                 "%s-%s-%d", basename, "tx", tx_int_idx++);
                } else {
@@ -2611,8 +2671,8 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
        int i;
 
        for (i = 0; i < vsi->num_queue_pairs; i++) {
-               wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0);
-               wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0);
+               wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
+               wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
        }
 
        if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
@@ -2649,6 +2709,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
                i40e_irq_dynamic_enable_icr0(pf);
        }
 
+       i40e_flush(&pf->hw);
        return 0;
 }
 
@@ -2681,14 +2742,14 @@ static irqreturn_t i40e_intr(int irq, void *data)
 
        icr0 = rd32(hw, I40E_PFINT_ICR0);
 
-       /* if sharing a legacy IRQ, we might get called w/o an intr pending */
-       if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
-               return IRQ_NONE;
-
        val = rd32(hw, I40E_PFINT_DYN_CTL0);
        val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
        wr32(hw, I40E_PFINT_DYN_CTL0, val);
 
+       /* if sharing a legacy IRQ, we might get called w/o an intr pending */
+       if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
+               return IRQ_NONE;
+
        ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
 
        /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
@@ -2702,10 +2763,9 @@ static irqreturn_t i40e_intr(int irq, void *data)
                qval = rd32(hw, I40E_QINT_TQCTL(0));
                qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
                wr32(hw, I40E_QINT_TQCTL(0), qval);
-               i40e_flush(hw);
 
                if (!test_bit(__I40E_DOWN, &pf->state))
-                       napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0].napi);
+                       napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
        }
 
        if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
@@ -2764,7 +2824,6 @@ static irqreturn_t i40e_intr(int irq, void *data)
 
        /* re-enable interrupt causes */
        wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
-       i40e_flush(hw);
        if (!test_bit(__I40E_DOWN, &pf->state)) {
                i40e_service_event_schedule(pf);
                i40e_irq_dynamic_enable_icr0(pf);
@@ -2774,40 +2833,26 @@ static irqreturn_t i40e_intr(int irq, void *data)
 }
 
 /**
- * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector
- * @vsi: the VSI being configured
- * @v_idx: vector index
- * @r_idx: rx queue index
- **/
-static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx)
-{
-       struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
-       struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]);
-
-       rx_ring->q_vector = q_vector;
-       q_vector->rx.ring[q_vector->rx.count] = rx_ring;
-       q_vector->rx.count++;
-       q_vector->rx.latency_range = I40E_LOW_LATENCY;
-       q_vector->vsi = vsi;
-}
-
-/**
- * i40e_map_vector_to_txq - Assigns the Tx queue to the vector
+ * i40e_map_vector_to_qp - Assigns the queue pair to the vector
  * @vsi: the VSI being configured
  * @v_idx: vector index
- * @t_idx: tx queue index
+ * @qp_idx: queue pair index
  **/
-static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx)
+static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
 {
-       struct i40e_q_vector *q_vector = &(vsi->q_vectors[v_idx]);
-       struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]);
+       struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
+       struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
+       struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
 
        tx_ring->q_vector = q_vector;
-       q_vector->tx.ring[q_vector->tx.count] = tx_ring;
+       tx_ring->next = q_vector->tx.ring;
+       q_vector->tx.ring = tx_ring;
        q_vector->tx.count++;
-       q_vector->tx.latency_range = I40E_LOW_LATENCY;
-       q_vector->num_ringpairs++;
-       q_vector->vsi = vsi;
+
+       rx_ring->q_vector = q_vector;
+       rx_ring->next = q_vector->rx.ring;
+       q_vector->rx.ring = rx_ring;
+       q_vector->rx.count++;
 }
 
 /**
@@ -2823,7 +2868,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
 {
        int qp_remaining = vsi->num_queue_pairs;
        int q_vectors = vsi->num_q_vectors;
-       int qp_per_vector;
+       int num_ringpairs;
        int v_start = 0;
        int qp_idx = 0;
 
@@ -2831,11 +2876,21 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
         * group them so there are multiple queues per vector.
         */
        for (; v_start < q_vectors && qp_remaining; v_start++) {
-               qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
-               for (; qp_per_vector;
-                    qp_per_vector--, qp_idx++, qp_remaining--) {
-                       map_vector_to_rxq(vsi, v_start, qp_idx);
-                       map_vector_to_txq(vsi, v_start, qp_idx);
+               struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
+
+               num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
+
+               q_vector->num_ringpairs = num_ringpairs;
+
+               q_vector->rx.count = 0;
+               q_vector->tx.count = 0;
+               q_vector->rx.ring = NULL;
+               q_vector->tx.ring = NULL;
+
+               while (num_ringpairs--) {
+                       map_vector_to_qp(vsi, v_start, qp_idx);
+                       qp_idx++;
+                       qp_remaining--;
                }
        }
 }
@@ -2887,7 +2942,7 @@ static void i40e_netpoll(struct net_device *netdev)
        pf->flags |= I40E_FLAG_IN_NETPOLL;
        if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
                for (i = 0; i < vsi->num_q_vectors; i++)
-                       i40e_msix_clean_rings(0, &vsi->q_vectors[i]);
+                       i40e_msix_clean_rings(0, vsi->q_vectors[i]);
        } else {
                i40e_intr(pf->pdev->irq, netdev);
        }
@@ -3073,14 +3128,14 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
                        u16 vector = i + base;
 
                        /* free only the irqs that were actually requested */
-                       if (vsi->q_vectors[i].num_ringpairs == 0)
+                       if (vsi->q_vectors[i]->num_ringpairs == 0)
                                continue;
 
                        /* clear the affinity_mask in the IRQ descriptor */
                        irq_set_affinity_hint(pf->msix_entries[vector].vector,
                                              NULL);
                        free_irq(pf->msix_entries[vector].vector,
-                                &vsi->q_vectors[i]);
+                                vsi->q_vectors[i]);
 
                        /* Tear down the interrupt queue link list
                         *
@@ -3163,6 +3218,39 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
        }
 }
 
+/**
+ * i40e_free_q_vector - Free memory allocated for specific interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+       struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
+       struct i40e_ring *ring;
+
+       if (!q_vector)
+               return;
+
+       /* disassociate q_vector from rings */
+       i40e_for_each_ring(ring, q_vector->tx)
+               ring->q_vector = NULL;
+
+       i40e_for_each_ring(ring, q_vector->rx)
+               ring->q_vector = NULL;
+
+       /* only VSI w/ an associated netdev is set up w/ NAPI */
+       if (vsi->netdev)
+               netif_napi_del(&q_vector->napi);
+
+       vsi->q_vectors[v_idx] = NULL;
+
+       kfree_rcu(q_vector, rcu);
+}
+
 /**
  * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
  * @vsi: the VSI being un-configured
@@ -3174,24 +3262,8 @@ static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
 {
        int v_idx;
 
-       for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
-               struct i40e_q_vector *q_vector = &vsi->q_vectors[v_idx];
-               int r_idx;
-
-               if (!q_vector)
-                       continue;
-
-               /* disassociate q_vector from rings */
-               for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++)
-                       q_vector->tx.ring[r_idx]->q_vector = NULL;
-               for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++)
-                       q_vector->rx.ring[r_idx]->q_vector = NULL;
-
-               /* only VSI w/ an associated netdev is set up w/ NAPI */
-               if (vsi->netdev)
-                       netif_napi_del(&q_vector->napi);
-       }
-       kfree(vsi->q_vectors);
+       for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+               i40e_free_q_vector(vsi, v_idx);
 }
 
 /**
@@ -3241,7 +3313,7 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
                return;
 
        for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
-               napi_enable(&vsi->q_vectors[q_idx].napi);
+               napi_enable(&vsi->q_vectors[q_idx]->napi);
 }
 
 /**
@@ -3256,7 +3328,7 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
                return;
 
        for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
-               napi_disable(&vsi->q_vectors[q_idx].napi);
+               napi_disable(&vsi->q_vectors[q_idx]->napi);
 }
 
 /**
@@ -3703,8 +3775,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
 
        if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
            (vsi->netdev)) {
+               netdev_info(vsi->netdev, "NIC Link is Up\n");
                netif_tx_start_all_queues(vsi->netdev);
                netif_carrier_on(vsi->netdev);
+       } else if (vsi->netdev) {
+               netdev_info(vsi->netdev, "NIC Link is Down\n");
        }
        i40e_service_event_schedule(pf);
 
@@ -3772,8 +3847,8 @@ void i40e_down(struct i40e_vsi *vsi)
        i40e_napi_disable_all(vsi);
 
        for (i = 0; i < vsi->num_queue_pairs; i++) {
-               i40e_clean_tx_ring(&vsi->tx_rings[i]);
-               i40e_clean_rx_ring(&vsi->rx_rings[i]);
+               i40e_clean_tx_ring(vsi->tx_rings[i]);
+               i40e_clean_rx_ring(vsi->rx_rings[i]);
        }
 }
 
@@ -4153,8 +4228,9 @@ static void i40e_link_event(struct i40e_pf *pf)
        if (new_link == old_link)
                return;
 
-       netdev_info(pf->vsi[pf->lan_vsi]->netdev,
-                   "NIC Link is %s\n", (new_link ? "Up" : "Down"));
+       if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
+               netdev_info(pf->vsi[pf->lan_vsi]->netdev,
+                           "NIC Link is %s\n", (new_link ? "Up" : "Down"));
 
        /* Notify the base of the switch tree connected to
         * the link.  Floating VEBs are not notified.
@@ -4199,9 +4275,9 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
                        continue;
 
                for (i = 0; i < vsi->num_queue_pairs; i++) {
-                       set_check_for_tx_hang(&vsi->tx_rings[i]);
+                       set_check_for_tx_hang(vsi->tx_rings[i]);
                        if (test_bit(__I40E_HANG_CHECK_ARMED,
-                                    &vsi->tx_rings[i].state))
+                                    &vsi->tx_rings[i]->state))
                                armed++;
                }
 
@@ -4537,7 +4613,8 @@ static void i40e_fdir_setup(struct i40e_pf *pf)
        bool new_vsi = false;
        int err, i;
 
-       if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED)))
+       if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED |
+                          I40E_FLAG_FDIR_ATR_ENABLED)))
                return;
 
        pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
@@ -4937,6 +5014,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
 {
        int ret = -ENODEV;
        struct i40e_vsi *vsi;
+       int sz_vectors;
+       int sz_rings;
        int vsi_idx;
        int i;
 
@@ -4962,14 +5041,14 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
                vsi_idx = i;             /* Found one! */
        } else {
                ret = -ENODEV;
-               goto err_alloc_vsi;  /* out of VSI slots! */
+               goto unlock_pf;  /* out of VSI slots! */
        }
        pf->next_vsi = ++i;
 
        vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
        if (!vsi) {
                ret = -ENOMEM;
-               goto err_alloc_vsi;
+               goto unlock_pf;
        }
        vsi->type = type;
        vsi->back = pf;
@@ -4982,14 +5061,40 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
        vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
        INIT_LIST_HEAD(&vsi->mac_filter_list);
 
-       i40e_set_num_rings_in_vsi(vsi);
+       ret = i40e_set_num_rings_in_vsi(vsi);
+       if (ret)
+               goto err_rings;
+
+       /* allocate memory for ring pointers */
+       sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
+       vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL);
+       if (!vsi->tx_rings) {
+               ret = -ENOMEM;
+               goto err_rings;
+       }
+       vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
+
+       /* allocate memory for q_vector pointers */
+       sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
+       vsi->q_vectors = kzalloc(sz_vectors, GFP_KERNEL);
+       if (!vsi->q_vectors) {
+               ret = -ENOMEM;
+               goto err_vectors;
+       }
 
        /* Setup default MSIX irq handler for VSI */
        i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
 
        pf->vsi[vsi_idx] = vsi;
        ret = vsi_idx;
-err_alloc_vsi:
+       goto unlock_pf;
+
+err_vectors:
+       kfree(vsi->tx_rings);
+err_rings:
+       pf->next_vsi = i - 1;
+       kfree(vsi);
+unlock_pf:
        mutex_unlock(&pf->switch_mutex);
        return ret;
 }
@@ -5030,6 +5135,10 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
        i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
        i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
 
+       /* free the ring and vector containers */
+       kfree(vsi->q_vectors);
+       kfree(vsi->tx_rings);
+
        pf->vsi[vsi->idx] = NULL;
        if (vsi->idx < pf->next_vsi)
                pf->next_vsi = vsi->idx;
@@ -5042,6 +5151,24 @@ free_vsi:
        return 0;
 }
 
+/**
+ * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
+ * @vsi: the VSI being cleaned
+ **/
+static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
+{
+       int i;
+
+       if (vsi->tx_rings[0])
+               for (i = 0; i < vsi->alloc_queue_pairs; i++) {
+                       kfree_rcu(vsi->tx_rings[i], rcu);
+                       vsi->tx_rings[i] = NULL;
+                       vsi->rx_rings[i] = NULL;
+               }
+
+       return 0;
+}
+
 /**
  * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
  * @vsi: the VSI being configured
@@ -5049,28 +5176,16 @@ free_vsi:
 static int i40e_alloc_rings(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
-       int ret = 0;
        int i;
 
-       vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs,
-                               sizeof(struct i40e_ring), GFP_KERNEL);
-       if (!vsi->rx_rings) {
-               ret = -ENOMEM;
-               goto err_alloc_rings;
-       }
-
-       vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs,
-                               sizeof(struct i40e_ring), GFP_KERNEL);
-       if (!vsi->tx_rings) {
-               ret = -ENOMEM;
-               kfree(vsi->rx_rings);
-               goto err_alloc_rings;
-       }
-
        /* Set basic values in the rings to be used later during open() */
        for (i = 0; i < vsi->alloc_queue_pairs; i++) {
-               struct i40e_ring *rx_ring = &vsi->rx_rings[i];
-               struct i40e_ring *tx_ring = &vsi->tx_rings[i];
+               struct i40e_ring *tx_ring;
+               struct i40e_ring *rx_ring;
+
+               tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
+               if (!tx_ring)
+                       goto err_out;
 
                tx_ring->queue_index = i;
                tx_ring->reg_idx = vsi->base_queue + i;
@@ -5081,7 +5196,9 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
                tx_ring->count = vsi->num_desc;
                tx_ring->size = 0;
                tx_ring->dcb_tc = 0;
+               vsi->tx_rings[i] = tx_ring;
 
+               rx_ring = &tx_ring[1];
                rx_ring->queue_index = i;
                rx_ring->reg_idx = vsi->base_queue + i;
                rx_ring->ring_active = false;
@@ -5095,24 +5212,14 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
                        set_ring_16byte_desc_enabled(rx_ring);
                else
                        clear_ring_16byte_desc_enabled(rx_ring);
-       }
-
-err_alloc_rings:
-       return ret;
-}
-
-/**
- * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
- * @vsi: the VSI being cleaned
- **/
-static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)
-{
-       if (vsi) {
-               kfree(vsi->rx_rings);
-               kfree(vsi->tx_rings);
+               vsi->rx_rings[i] = rx_ring;
        }
 
        return 0;
+
+err_out:
+       i40e_vsi_clear_rings(vsi);
+       return -ENOMEM;
 }
 
 /**
@@ -5248,6 +5355,38 @@ static int i40e_init_msix(struct i40e_pf *pf)
        return err;
 }
 
+/**
+ * i40e_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @vsi: the VSI being configured
+ * @v_idx: index of the vector in the vsi struct
+ *
+ * We allocate one q_vector.  If allocation fails we return -ENOMEM.
+ **/
+static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+{
+       struct i40e_q_vector *q_vector;
+
+       /* allocate q_vector */
+       q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
+       if (!q_vector)
+               return -ENOMEM;
+
+       q_vector->vsi = vsi;
+       q_vector->v_idx = v_idx;
+       cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+       if (vsi->netdev)
+               netif_napi_add(vsi->netdev, &q_vector->napi,
+                              i40e_napi_poll, vsi->work_limit);
+
+       q_vector->rx.latency_range = I40E_LOW_LATENCY;
+       q_vector->tx.latency_range = I40E_LOW_LATENCY;
+
+       /* tie q_vector and vsi together */
+       vsi->q_vectors[v_idx] = q_vector;
+
+       return 0;
+}
+
 /**
  * i40e_alloc_q_vectors - Allocate memory for interrupt vectors
  * @vsi: the VSI being configured
@@ -5259,6 +5398,7 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
        int v_idx, num_q_vectors;
+       int err;
 
        /* if not MSIX, give the one vector only to the LAN VSI */
        if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -5268,22 +5408,19 @@ static int i40e_alloc_q_vectors(struct i40e_vsi *vsi)
        else
                return -EINVAL;
 
-       vsi->q_vectors = kcalloc(num_q_vectors,
-                                sizeof(struct i40e_q_vector),
-                                GFP_KERNEL);
-       if (!vsi->q_vectors)
-               return -ENOMEM;
-
        for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
-               vsi->q_vectors[v_idx].vsi = vsi;
-               vsi->q_vectors[v_idx].v_idx = v_idx;
-               cpumask_set_cpu(v_idx, &vsi->q_vectors[v_idx].affinity_mask);
-               if (vsi->netdev)
-                       netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx].napi,
-                                      i40e_napi_poll, vsi->work_limit);
+               err = i40e_alloc_q_vector(vsi, v_idx);
+               if (err)
+                       goto err_out;
        }
 
        return 0;
+
+err_out:
+       while (v_idx--)
+               i40e_free_q_vector(vsi, v_idx);
+
+       return err;
 }
 
 /**
@@ -5297,7 +5434,8 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
        if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
                err = i40e_init_msix(pf);
                if (err) {
-                       pf->flags &= ~(I40E_FLAG_RSS_ENABLED       |
+                       pf->flags &= ~(I40E_FLAG_MSIX_ENABLED      |
+                                       I40E_FLAG_RSS_ENABLED      |
                                        I40E_FLAG_MQ_ENABLED       |
                                        I40E_FLAG_DCB_ENABLED      |
                                        I40E_FLAG_SRIOV_ENABLED    |
@@ -5312,14 +5450,17 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
 
        if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
            (pf->flags & I40E_FLAG_MSI_ENABLED)) {
+               dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n");
                err = pci_enable_msi(pf->pdev);
                if (err) {
-                       dev_info(&pf->pdev->dev,
-                                "MSI init failed (%d), trying legacy.\n", err);
+                       dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
                        pf->flags &= ~I40E_FLAG_MSI_ENABLED;
                }
        }
 
+       if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
+               dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n");
+
        /* track first vector for misc interrupts */
        err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
 }
@@ -5950,7 +6091,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
        int ret = -ENOENT;
        struct i40e_pf *pf = vsi->back;
 
-       if (vsi->q_vectors) {
+       if (vsi->q_vectors[0]) {
                dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
                         vsi->seid);
                return -EEXIST;
@@ -5972,8 +6113,9 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
                goto vector_setup_out;
        }
 
-       vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
-                                        vsi->num_q_vectors, vsi->idx);
+       if (vsi->num_q_vectors)
+               vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
+                                                vsi->num_q_vectors, vsi->idx);
        if (vsi->base_vector < 0) {
                dev_info(&pf->pdev->dev,
                         "failed to get q tracking for VSI %d, err=%d\n",
index 49d2cfa9b0cceb20c5b5f3f6d64cac1968398b60..f1f03bc5c729131bacdb13e243213b18b5b43132 100644 (file)
@@ -37,6 +37,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
                           ((u64)td_tag  << I40E_TXD_QW1_L2TAG1_SHIFT));
 }
 
+#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
 /**
  * i40e_program_fdir_filter - Program a Flow Director filter
  * @fdir_input: Packet data that will be filter parameters
@@ -50,6 +51,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
        struct i40e_tx_buffer *tx_buf;
        struct i40e_tx_desc *tx_desc;
        struct i40e_ring *tx_ring;
+       unsigned int fpt, dcc;
        struct i40e_vsi *vsi;
        struct device *dev;
        dma_addr_t dma;
@@ -64,93 +66,78 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
        if (!vsi)
                return -ENOENT;
 
-       tx_ring = &vsi->tx_rings[0];
+       tx_ring = vsi->tx_rings[0];
        dev = tx_ring->dev;
 
        dma = dma_map_single(dev, fdir_data->raw_packet,
-                               I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
+                            I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
        if (dma_mapping_error(dev, dma))
                goto dma_fail;
 
        /* grab the next descriptor */
-       fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
-       tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
-       tx_ring->next_to_use++;
-       if (tx_ring->next_to_use == tx_ring->count)
-               tx_ring->next_to_use = 0;
+       i = tx_ring->next_to_use;
+       fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+       tx_buf = &tx_ring->tx_bi[i];
+
+       tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
 
-       fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index
-                                            << I40E_TXD_FLTR_QW0_QINDEX_SHIFT)
-                                            & I40E_TXD_FLTR_QW0_QINDEX_MASK);
+       fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
+             I40E_TXD_FLTR_QW0_QINDEX_MASK;
 
-       fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->flex_off
-                                           << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT)
-                                           & I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
+       fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
+              I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
 
-       fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->pctype
-                                            << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT)
-                                            & I40E_TXD_FLTR_QW0_PCTYPE_MASK);
+       fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
+              I40E_TXD_FLTR_QW0_PCTYPE_MASK;
 
        /* Use LAN VSI Id if not programmed by user */
        if (fdir_data->dest_vsi == 0)
-               fdir_desc->qindex_flex_ptype_vsi |=
-                                         cpu_to_le32((pf->vsi[pf->lan_vsi]->id)
-                                          << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
+               fpt |= (pf->vsi[pf->lan_vsi]->id) <<
+                      I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
        else
-               fdir_desc->qindex_flex_ptype_vsi |=
-                                           cpu_to_le32((fdir_data->dest_vsi
-                                           << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
-                                           & I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
+               fpt |= ((u32)fdir_data->dest_vsi <<
+                       I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
+                      I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
 
-       fdir_desc->dtype_cmd_cntindex =
-                                   cpu_to_le32(I40E_TX_DESC_DTYPE_FILTER_PROG);
+       fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
+
+       dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
 
        if (add)
-               fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
-                                      I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE
-                                       << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+               dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
+                      I40E_TXD_FLTR_QW1_PCMD_SHIFT;
        else
-               fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
-                                          I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE
-                                          << I40E_TXD_FLTR_QW1_PCMD_SHIFT);
+               dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
+                      I40E_TXD_FLTR_QW1_PCMD_SHIFT;
 
-       fdir_desc->dtype_cmd_cntindex |= cpu_to_le32((fdir_data->dest_ctl
-                                         << I40E_TXD_FLTR_QW1_DEST_SHIFT)
-                                         & I40E_TXD_FLTR_QW1_DEST_MASK);
+       dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
+              I40E_TXD_FLTR_QW1_DEST_MASK;
 
-       fdir_desc->dtype_cmd_cntindex |= cpu_to_le32(
-                    (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
-                     & I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
+       dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
+              I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
 
        if (fdir_data->cnt_index != 0) {
-               fdir_desc->dtype_cmd_cntindex |=
-                                   cpu_to_le32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
-               fdir_desc->dtype_cmd_cntindex |=
-                                           cpu_to_le32((fdir_data->cnt_index
-                                           << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
-                                           & I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
+               dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+               dcc |= ((u32)fdir_data->cnt_index <<
+                       I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+                      I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
        }
 
+       fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
        fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
 
        /* Now program a dummy descriptor */
-       tx_desc = I40E_TX_DESC(tx_ring, tx_ring->next_to_use);
-       tx_buf = &tx_ring->tx_bi[tx_ring->next_to_use];
-       tx_ring->next_to_use++;
-       if (tx_ring->next_to_use == tx_ring->count)
-               tx_ring->next_to_use = 0;
+       i = tx_ring->next_to_use;
+       tx_desc = I40E_TX_DESC(tx_ring, i);
+
+       tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
 
        tx_desc->buffer_addr = cpu_to_le64(dma);
-       td_cmd = I40E_TX_DESC_CMD_EOP |
-                I40E_TX_DESC_CMD_RS  |
-                I40E_TX_DESC_CMD_DUMMY;
+       td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
 
        tx_desc->cmd_type_offset_bsz =
                build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
 
-       /* Mark the data descriptor to be watched */
-       tx_buf->next_to_watch = tx_desc;
-
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
         * applicable for weak-ordered memory model archs,
@@ -158,6 +145,9 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
         */
        wmb();
 
+       /* Mark the data descriptor to be watched */
+       tx_buf->next_to_watch = tx_desc;
+
        writel(tx_ring->next_to_use, tx_ring->tail);
        return 0;
 
@@ -188,27 +178,30 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id)
 }
 
 /**
- * i40e_unmap_tx_resource - Release a Tx buffer
+ * i40e_unmap_and_free_tx_resource - Release a Tx buffer
  * @ring:      the ring that owns the buffer
  * @tx_buffer: the buffer to free
  **/
-static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,
-                                         struct i40e_tx_buffer *tx_buffer)
+static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
+                                           struct i40e_tx_buffer *tx_buffer)
 {
-       if (tx_buffer->dma) {
-               if (tx_buffer->tx_flags & I40E_TX_FLAGS_MAPPED_AS_PAGE)
-                       dma_unmap_page(ring->dev,
-                                      tx_buffer->dma,
-                                      tx_buffer->length,
-                                      DMA_TO_DEVICE);
-               else
+       if (tx_buffer->skb) {
+               dev_kfree_skb_any(tx_buffer->skb);
+               if (dma_unmap_len(tx_buffer, len))
                        dma_unmap_single(ring->dev,
-                                        tx_buffer->dma,
-                                        tx_buffer->length,
+                                        dma_unmap_addr(tx_buffer, dma),
+                                        dma_unmap_len(tx_buffer, len),
                                         DMA_TO_DEVICE);
+       } else if (dma_unmap_len(tx_buffer, len)) {
+               dma_unmap_page(ring->dev,
+                              dma_unmap_addr(tx_buffer, dma),
+                              dma_unmap_len(tx_buffer, len),
+                              DMA_TO_DEVICE);
        }
-       tx_buffer->dma = 0;
-       tx_buffer->time_stamp = 0;
+       tx_buffer->next_to_watch = NULL;
+       tx_buffer->skb = NULL;
+       dma_unmap_len_set(tx_buffer, len, 0);
+       /* tx_buffer must be completely set up in the transmit path */
 }
 
 /**
@@ -217,7 +210,6 @@ static inline void i40e_unmap_tx_resource(struct i40e_ring *ring,
  **/
 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
 {
-       struct i40e_tx_buffer *tx_buffer;
        unsigned long bi_size;
        u16 i;
 
@@ -226,13 +218,8 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
                return;
 
        /* Free all the Tx ring sk_buffs */
-       for (i = 0; i < tx_ring->count; i++) {
-               tx_buffer = &tx_ring->tx_bi[i];
-               i40e_unmap_tx_resource(tx_ring, tx_buffer);
-               if (tx_buffer->skb)
-                       dev_kfree_skb_any(tx_buffer->skb);
-               tx_buffer->skb = NULL;
-       }
+       for (i = 0; i < tx_ring->count; i++)
+               i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
 
        bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
        memset(tx_ring->tx_bi, 0, bi_size);
@@ -242,6 +229,13 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
 
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
+
+       if (!tx_ring->netdev)
+               return;
+
+       /* cleanup Tx queue statistics */
+       netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
+                                                 tx_ring->queue_index));
 }
 
 /**
@@ -300,14 +294,14 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
         * run the check_tx_hang logic with a transmit completion
         * pending but without time to complete it yet.
         */
-       if ((tx_ring->tx_stats.tx_done_old == tx_ring->tx_stats.packets) &&
+       if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
            tx_pending) {
                /* make sure it is true for two checks in a row */
                ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
                                       &tx_ring->state);
        } else {
                /* update completed stats and disarm the hang check */
-               tx_ring->tx_stats.tx_done_old = tx_ring->tx_stats.packets;
+               tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
                clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
        }
 
@@ -331,62 +325,88 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
 
        tx_buf = &tx_ring->tx_bi[i];
        tx_desc = I40E_TX_DESC(tx_ring, i);
+       i -= tx_ring->count;
 
-       for (; budget; budget--) {
-               struct i40e_tx_desc *eop_desc;
-
-               eop_desc = tx_buf->next_to_watch;
+       do {
+               struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
 
                /* if next_to_watch is not set then there is no work pending */
                if (!eop_desc)
                        break;
 
+               /* prevent any other reads prior to eop_desc */
+               read_barrier_depends();
+
                /* if the descriptor isn't done, no work yet to do */
                if (!(eop_desc->cmd_type_offset_bsz &
                      cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
                        break;
 
-               /* count the packet as being completed */
-               tx_ring->tx_stats.completed++;
+               /* clear next_to_watch to prevent false hangs */
                tx_buf->next_to_watch = NULL;
-               tx_buf->time_stamp = 0;
-
-               /* set memory barrier before eop_desc is verified */
-               rmb();
 
-               do {
-                       i40e_unmap_tx_resource(tx_ring, tx_buf);
+               /* update the statistics for this packet */
+               total_bytes += tx_buf->bytecount;
+               total_packets += tx_buf->gso_segs;
 
-                       /* clear dtype status */
-                       tx_desc->cmd_type_offset_bsz &=
-                               ~cpu_to_le64(I40E_TXD_QW1_DTYPE_MASK);
+               /* free the skb */
+               dev_kfree_skb_any(tx_buf->skb);
 
-                       if (likely(tx_desc == eop_desc)) {
-                               eop_desc = NULL;
+               /* unmap skb header data */
+               dma_unmap_single(tx_ring->dev,
+                                dma_unmap_addr(tx_buf, dma),
+                                dma_unmap_len(tx_buf, len),
+                                DMA_TO_DEVICE);
 
-                               dev_kfree_skb_any(tx_buf->skb);
-                               tx_buf->skb = NULL;
+               /* clear tx_buffer data */
+               tx_buf->skb = NULL;
+               dma_unmap_len_set(tx_buf, len, 0);
 
-                               total_bytes += tx_buf->bytecount;
-                               total_packets += tx_buf->gso_segs;
-                       }
+               /* unmap remaining buffers */
+               while (tx_desc != eop_desc) {
 
                        tx_buf++;
                        tx_desc++;
                        i++;
-                       if (unlikely(i == tx_ring->count)) {
-                               i = 0;
+                       if (unlikely(!i)) {
+                               i -= tx_ring->count;
                                tx_buf = tx_ring->tx_bi;
                                tx_desc = I40E_TX_DESC(tx_ring, 0);
                        }
-               } while (eop_desc);
-       }
 
+                       /* unmap any remaining paged data */
+                       if (dma_unmap_len(tx_buf, len)) {
+                               dma_unmap_page(tx_ring->dev,
+                                              dma_unmap_addr(tx_buf, dma),
+                                              dma_unmap_len(tx_buf, len),
+                                              DMA_TO_DEVICE);
+                               dma_unmap_len_set(tx_buf, len, 0);
+                       }
+               }
+
+               /* move us one more past the eop_desc for start of next pkt */
+               tx_buf++;
+               tx_desc++;
+               i++;
+               if (unlikely(!i)) {
+                       i -= tx_ring->count;
+                       tx_buf = tx_ring->tx_bi;
+                       tx_desc = I40E_TX_DESC(tx_ring, 0);
+               }
+
+               /* update budget accounting */
+               budget--;
+       } while (likely(budget));
+
+       i += tx_ring->count;
        tx_ring->next_to_clean = i;
-       tx_ring->tx_stats.bytes += total_bytes;
-       tx_ring->tx_stats.packets += total_packets;
+       u64_stats_update_begin(&tx_ring->syncp);
+       tx_ring->stats.bytes += total_bytes;
+       tx_ring->stats.packets += total_packets;
+       u64_stats_update_end(&tx_ring->syncp);
        tx_ring->q_vector->tx.total_bytes += total_bytes;
        tx_ring->q_vector->tx.total_packets += total_packets;
+
        if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
                /* schedule immediate reset if we believe we hung */
                dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -414,6 +434,10 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
                return true;
        }
 
+       netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
+                                                     tx_ring->queue_index),
+                                 total_packets, total_bytes);
+
 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
        if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
                     (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
@@ -524,8 +548,6 @@ static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
        i40e_set_new_dynamic_itr(&q_vector->tx);
        if (old_itr != q_vector->tx.itr)
                wr32(hw, reg_addr, q_vector->tx.itr);
-
-       i40e_flush(hw);
 }
 
 /**
@@ -1042,8 +1064,10 @@ next_desc:
        }
 
        rx_ring->next_to_clean = i;
-       rx_ring->rx_stats.packets += total_rx_packets;
-       rx_ring->rx_stats.bytes += total_rx_bytes;
+       u64_stats_update_begin(&rx_ring->syncp);
+       rx_ring->stats.packets += total_rx_packets;
+       rx_ring->stats.bytes += total_rx_bytes;
+       u64_stats_update_end(&rx_ring->syncp);
        rx_ring->q_vector->rx.total_packets += total_rx_packets;
        rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
 
@@ -1067,27 +1091,28 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
        struct i40e_q_vector *q_vector =
                               container_of(napi, struct i40e_q_vector, napi);
        struct i40e_vsi *vsi = q_vector->vsi;
+       struct i40e_ring *ring;
        bool clean_complete = true;
        int budget_per_ring;
-       int i;
 
        if (test_bit(__I40E_DOWN, &vsi->state)) {
                napi_complete(napi);
                return 0;
        }
 
+       /* Since the actual Tx work is minimal, we can give the Tx a larger
+        * budget and be more aggressive about cleaning up the Tx descriptors.
+        */
+       i40e_for_each_ring(ring, q_vector->tx)
+               clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+
        /* We attempt to distribute budget to each Rx queue fairly, but don't
         * allow the budget to go below 1 because that would exit polling early.
-        * Since the actual Tx work is minimal, we can give the Tx a larger
-        * budget and be more aggressive about cleaning up the Tx descriptors.
         */
        budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
-       for (i = 0; i < q_vector->num_ringpairs; i++) {
-               clean_complete &= i40e_clean_tx_irq(q_vector->tx.ring[i],
-                                                   vsi->work_limit);
-               clean_complete &= i40e_clean_rx_irq(q_vector->rx.ring[i],
-                                                   budget_per_ring);
-       }
+
+       i40e_for_each_ring(ring, q_vector->rx)
+               clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
 
        /* If work not completed, return budget and polling will return */
        if (!clean_complete)
@@ -1117,7 +1142,8 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
                        qval = rd32(hw, I40E_QINT_TQCTL(0));
                        qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
                        wr32(hw, I40E_QINT_TQCTL(0), qval);
-                       i40e_flush(hw);
+
+                       i40e_irq_dynamic_enable_icr0(vsi->back);
                }
        }
 
@@ -1144,6 +1170,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
        struct tcphdr *th;
        unsigned int hlen;
        u32 flex_ptype, dtype_cmd;
+       u16 i;
 
        /* make sure ATR is enabled */
        if (!(pf->flags & I40E_FLAG_FDIR_ATR_ENABLED))
@@ -1183,10 +1210,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
        tx_ring->atr_count = 0;
 
        /* grab the next descriptor */
-       fdir_desc = I40E_TX_FDIRDESC(tx_ring, tx_ring->next_to_use);
-       tx_ring->next_to_use++;
-       if (tx_ring->next_to_use == tx_ring->count)
-               tx_ring->next_to_use = 0;
+       i = tx_ring->next_to_use;
+       fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+
+       i++;
+       tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
 
        flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
                      I40E_TXD_FLTR_QW0_QINDEX_MASK;
@@ -1216,7 +1244,6 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
        fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
 }
 
-#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
 /**
  * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
  * @skb:     send buffer
@@ -1275,27 +1302,6 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
        return 0;
 }
 
-/**
- * i40e_tx_csum - is checksum offload requested
- * @tx_ring:  ptr to the ring to send
- * @skb:      ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
- *
- * Returns true if checksum offload is requested
- **/
-static bool i40e_tx_csum(struct i40e_ring *tx_ring, struct sk_buff *skb,
-                        u32 tx_flags, __be16 protocol)
-{
-       if ((skb->ip_summed != CHECKSUM_PARTIAL) &&
-           !(tx_flags & I40E_TX_FLAGS_TXSW)) {
-               if (!(tx_flags & I40E_TX_FLAGS_HW_VLAN))
-                       return false;
-       }
-
-       return skb->ip_summed == CHECKSUM_PARTIAL;
-}
-
 /**
  * i40e_tso - set up the tso context descriptor
  * @tx_ring:  ptr to the ring to send
@@ -1482,15 +1488,16 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
                               const u32 cd_tunneling, const u32 cd_l2tag2)
 {
        struct i40e_tx_context_desc *context_desc;
+       int i = tx_ring->next_to_use;
 
        if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2)
                return;
 
        /* grab the next descriptor */
-       context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use);
-       tx_ring->next_to_use++;
-       if (tx_ring->next_to_use == tx_ring->count)
-               tx_ring->next_to_use = 0;
+       context_desc = I40E_TX_CTXTDESC(tx_ring, i);
+
+       i++;
+       tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
 
        /* cpu_to_le32 and assign to struct fields */
        context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
@@ -1512,68 +1519,71 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                        struct i40e_tx_buffer *first, u32 tx_flags,
                        const u8 hdr_len, u32 td_cmd, u32 td_offset)
 {
-       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
        unsigned int data_len = skb->data_len;
        unsigned int size = skb_headlen(skb);
-       struct device *dev = tx_ring->dev;
-       u32 paylen = skb->len - hdr_len;
-       u16 i = tx_ring->next_to_use;
+       struct skb_frag_struct *frag;
        struct i40e_tx_buffer *tx_bi;
        struct i40e_tx_desc *tx_desc;
-       u32 buf_offset = 0;
+       u16 i = tx_ring->next_to_use;
        u32 td_tag = 0;
        dma_addr_t dma;
        u16 gso_segs;
 
-       dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
-       if (dma_mapping_error(dev, dma))
-               goto dma_error;
-
        if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
                td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
                td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
                         I40E_TX_FLAGS_VLAN_SHIFT;
        }
 
+       if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
+               gso_segs = skb_shinfo(skb)->gso_segs;
+       else
+               gso_segs = 1;
+
+       /* multiply data chunks by size of headers */
+       first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
+       first->gso_segs = gso_segs;
+       first->skb = skb;
+       first->tx_flags = tx_flags;
+
+       dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
        tx_desc = I40E_TX_DESC(tx_ring, i);
-       for (;;) {
-               while (size > I40E_MAX_DATA_PER_TXD) {
-                       tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
+       tx_bi = first;
+
+       for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+               if (dma_mapping_error(tx_ring->dev, dma))
+                       goto dma_error;
+
+               /* record length, and DMA address */
+               dma_unmap_len_set(tx_bi, len, size);
+               dma_unmap_addr_set(tx_bi, dma, dma);
+
+               tx_desc->buffer_addr = cpu_to_le64(dma);
+
+               while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
                        tx_desc->cmd_type_offset_bsz =
                                build_ctob(td_cmd, td_offset,
                                           I40E_MAX_DATA_PER_TXD, td_tag);
 
-                       buf_offset += I40E_MAX_DATA_PER_TXD;
-                       size -= I40E_MAX_DATA_PER_TXD;
-
                        tx_desc++;
                        i++;
                        if (i == tx_ring->count) {
                                tx_desc = I40E_TX_DESC(tx_ring, 0);
                                i = 0;
                        }
-               }
 
-               tx_bi = &tx_ring->tx_bi[i];
-               tx_bi->length = buf_offset + size;
-               tx_bi->tx_flags = tx_flags;
-               tx_bi->dma = dma;
+                       dma += I40E_MAX_DATA_PER_TXD;
+                       size -= I40E_MAX_DATA_PER_TXD;
 
-               tx_desc->buffer_addr = cpu_to_le64(dma + buf_offset);
-               tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
-                                                         size, td_tag);
+                       tx_desc->buffer_addr = cpu_to_le64(dma);
+               }
 
                if (likely(!data_len))
                        break;
 
-               size = skb_frag_size(frag);
-               data_len -= size;
-               buf_offset = 0;
-               tx_flags |= I40E_TX_FLAGS_MAPPED_AS_PAGE;
-
-               dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
-               if (dma_mapping_error(dev, dma))
-                       goto dma_error;
+               tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
+                                                         size, td_tag);
 
                tx_desc++;
                i++;
@@ -1582,31 +1592,25 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                        i = 0;
                }
 
-               frag++;
-       }
-
-       tx_desc->cmd_type_offset_bsz |=
-                      cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
+               size = skb_frag_size(frag);
+               data_len -= size;
 
-       i++;
-       if (i == tx_ring->count)
-               i = 0;
+               dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+                                      DMA_TO_DEVICE);
 
-       tx_ring->next_to_use = i;
+               tx_bi = &tx_ring->tx_bi[i];
+       }
 
-       if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
-               gso_segs = skb_shinfo(skb)->gso_segs;
-       else
-               gso_segs = 1;
+       tx_desc->cmd_type_offset_bsz =
+               build_ctob(td_cmd, td_offset, size, td_tag) |
+               cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
 
-       /* multiply data chunks by size of headers */
-       tx_bi->bytecount = paylen + (gso_segs * hdr_len);
-       tx_bi->gso_segs = gso_segs;
-       tx_bi->skb = skb;
+       netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
+                                                tx_ring->queue_index),
+                            first->bytecount);
 
-       /* set the timestamp and next to watch values */
+       /* set the timestamp */
        first->time_stamp = jiffies;
-       first->next_to_watch = tx_desc;
 
        /* Force memory writes to complete before letting h/w
         * know there are new descriptors to fetch.  (Only
@@ -1615,16 +1619,27 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
         */
        wmb();
 
+       /* set next_to_watch value indicating a packet is present */
+       first->next_to_watch = tx_desc;
+
+       i++;
+       if (i == tx_ring->count)
+               i = 0;
+
+       tx_ring->next_to_use = i;
+
+       /* notify HW of packet */
        writel(i, tx_ring->tail);
+
        return;
 
 dma_error:
-       dev_info(dev, "TX DMA map failed\n");
+       dev_info(tx_ring->dev, "TX DMA map failed\n");
 
        /* clear dma mappings for failed tx_bi map */
        for (;;) {
                tx_bi = &tx_ring->tx_bi[i];
-               i40e_unmap_tx_resource(tx_ring, tx_bi);
+               i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
                if (tx_bi == first)
                        break;
                if (i == 0)
@@ -1632,8 +1647,6 @@ dma_error:
                i--;
        }
 
-       dev_kfree_skb_any(skb);
-
        tx_ring->next_to_use = i;
 }
 
@@ -1758,16 +1771,16 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
 
        skb_tx_timestamp(skb);
 
+       /* always enable CRC insertion offload */
+       td_cmd |= I40E_TX_DESC_CMD_ICRC;
+
        /* Always offload the checksum, since it's in the data descriptor */
-       if (i40e_tx_csum(tx_ring, skb, tx_flags, protocol))
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
                tx_flags |= I40E_TX_FLAGS_CSUM;
 
-       /* always enable offload insertion */
-       td_cmd |= I40E_TX_DESC_CMD_ICRC;
-
-       if (tx_flags & I40E_TX_FLAGS_CSUM)
                i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
                                    tx_ring, &cd_tunneling);
+       }
 
        i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
                           cd_tunneling, cd_l2tag2);
@@ -1801,7 +1814,7 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
-       struct i40e_ring *tx_ring = &vsi->tx_rings[skb->queue_mapping];
+       struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
 
        /* hardware can't handle really short frames, hardware padding works
         * beyond this point
index b1d7722d98a7fc4cd217d43cae627895f5baa5f6..db55d9947f151cf19c17f2f55e8ee24e69364cbd 100644 (file)
 #define I40E_TX_FLAGS_IPV6             (u32)(1 << 5)
 #define I40E_TX_FLAGS_FCCRC            (u32)(1 << 6)
 #define I40E_TX_FLAGS_FSO              (u32)(1 << 7)
-#define I40E_TX_FLAGS_TXSW             (u32)(1 << 8)
-#define I40E_TX_FLAGS_MAPPED_AS_PAGE   (u32)(1 << 9)
 #define I40E_TX_FLAGS_VLAN_MASK                0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT  29
 #define I40E_TX_FLAGS_VLAN_SHIFT       16
 
 struct i40e_tx_buffer {
-       struct sk_buff *skb;
-       dma_addr_t dma;
-       unsigned long time_stamp;
-       u16 length;
-       u32 tx_flags;
        struct i40e_tx_desc *next_to_watch;
+       unsigned long time_stamp;
+       struct sk_buff *skb;
        unsigned int bytecount;
-       u16 gso_segs;
-       u8 mapped_as_page;
+       unsigned short gso_segs;
+       DEFINE_DMA_UNMAP_ADDR(dma);
+       DEFINE_DMA_UNMAP_LEN(len);
+       u32 tx_flags;
 };
 
 struct i40e_rx_buffer {
@@ -129,18 +126,18 @@ struct i40e_rx_buffer {
        unsigned int page_offset;
 };
 
-struct i40e_tx_queue_stats {
+struct i40e_queue_stats {
        u64 packets;
        u64 bytes;
+};
+
+struct i40e_tx_queue_stats {
        u64 restart_queue;
        u64 tx_busy;
-       u64 completed;
        u64 tx_done_old;
 };
 
 struct i40e_rx_queue_stats {
-       u64 packets;
-       u64 bytes;
        u64 non_eop_descs;
        u64 alloc_rx_page_failed;
        u64 alloc_rx_buff_failed;
@@ -183,6 +180,7 @@ enum i40e_ring_state_t {
 
 /* struct that defines a descriptor ring, associated with a VSI */
 struct i40e_ring {
+       struct i40e_ring *next;         /* pointer to next ring in q_vector */
        void *desc;                     /* Descriptor ring memory */
        struct device *dev;             /* Used for DMA mapping */
        struct net_device *netdev;      /* netdev ring maps to */
@@ -219,6 +217,8 @@ struct i40e_ring {
        bool ring_active;               /* is ring online or not */
 
        /* stats structs */
+       struct i40e_queue_stats stats;
+       struct u64_stats_sync syncp;
        union {
                struct i40e_tx_queue_stats tx_stats;
                struct i40e_rx_queue_stats rx_stats;
@@ -229,6 +229,8 @@ struct i40e_ring {
 
        struct i40e_vsi *vsi;           /* Backreference to associated VSI */
        struct i40e_q_vector *q_vector; /* Backreference to associated vector */
+
+       struct rcu_head rcu;            /* to avoid race on free */
 } ____cacheline_internodealigned_in_smp;
 
 enum i40e_latency_range {
@@ -238,9 +240,8 @@ enum i40e_latency_range {
 };
 
 struct i40e_ring_container {
-#define I40E_MAX_RINGPAIR_PER_VECTOR 8
        /* array of pointers to rings */
-       struct i40e_ring *ring[I40E_MAX_RINGPAIR_PER_VECTOR];
+       struct i40e_ring *ring;
        unsigned int total_bytes;       /* total bytes processed this int */
        unsigned int total_packets;     /* total packets processed this int */
        u16 count;
@@ -248,6 +249,10 @@ struct i40e_ring_container {
        u16 itr;
 };
 
+/* iterator for handling rings in ring container */
+#define i40e_for_each_ring(pos, head) \
+       for (pos = (head).ring; pos != NULL; pos = pos->next)
+
 void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
index 8967e58e2408a4930507091bfa85bd26a3862b6f..07596982a4773fc942a5a0c3b8a458bdaa837768 100644 (file)
@@ -251,7 +251,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
                reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
        else
                reg_idx = I40E_VPINT_LNKLSTN(
-                           ((pf->hw.func_caps.num_msix_vectors_vf - 1)
+                                          (pf->hw.func_caps.num_msix_vectors_vf
                                              * vf->vf_id) + (vector_id - 1));
 
        if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
@@ -383,7 +383,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
 
        /* associate this queue with the PCI VF function */
        qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
-       qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT)
+       qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
                    & I40E_QTX_CTL_PF_INDX_MASK);
        qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
                     << I40E_QTX_CTL_VFVM_INDX_SHIFT)
index 74a1506b42359d02305ea9d78cda7e536633559f..8c2437722aad2b32fec48bfc3f17bdf71db5c164 100644 (file)
 #ifndef _E1000_82575_H_
 #define _E1000_82575_H_
 
-extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
-extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
-extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
-extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
-extern s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
-                               u8 dev_addr, u8 *data);
-extern s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
-                                u8 dev_addr, u8 data);
+void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
+void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
+void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
+void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
+s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+                     u8 *data);
+s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+                      u8 data);
 
 #define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
                                      (ID_LED_DEF1_DEF2 <<  8) | \
index 37a9c06a6c6816bf3f87c82986851504d79d7b71..2e166b22d52b6374e3814a9526d6cad9decfa5a3 100644 (file)
@@ -562,11 +562,11 @@ struct e1000_hw {
        u8  revision_id;
 };
 
-extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
+struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
 #define hw_dbg(format, arg...) \
        netdev_dbg(igb_get_hw_dev(hw), format, ##arg)
 
 /* These functions must be implemented by drivers */
-s32  igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
-s32  igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
 #endif /* _E1000_HW_H_ */
index dde3c4b7ea9971db46981bc319faeb5fe56e452c..2d913716573a29a830610de841164e30e505cdfb 100644 (file)
 #ifndef _E1000_I210_H_
 #define _E1000_I210_H_
 
-extern s32 igb_update_flash_i210(struct e1000_hw *hw);
-extern s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
-extern s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
-extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
-                             u16 words, u16 *data);
-extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
-                            u16 words, u16 *data);
-extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
-extern void igb_release_nvm_i210(struct e1000_hw *hw);
-extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
-extern s32 igb_read_invm_version(struct e1000_hw *hw,
-                                struct e1000_fw_version *invm_ver);
-extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
-                             u16 *data);
-extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
-                              u16 data);
-extern s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
-extern bool igb_get_flash_presence_i210(struct e1000_hw *hw);
+s32 igb_update_flash_i210(struct e1000_hw *hw);
+s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
+s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
+s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+                           u16 *data);
+s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+                          u16 *data);
+s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
+void igb_release_nvm_i210(struct e1000_hw *hw);
+s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
+s32 igb_read_invm_version(struct e1000_hw *hw,
+                         struct e1000_fw_version *invm_ver);
+s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
+s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
+s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
+bool igb_get_flash_presence_i210(struct e1000_hw *hw);
 
 #define E1000_STM_OPCODE               0xDB00
 #define E1000_EEPROM_FLASH_SIZE_WORD   0x11
index 5e13e83cc608358273d3e7dadb6bf488e426127b..e4cbe8ef67b3094b32b73cbccda9be5673243d1f 100644 (file)
@@ -86,6 +86,6 @@ enum e1000_mng_mode {
 
 #define E1000_MNG_DHCP_COOKIE_STATUS_VLAN      0x2
 
-extern void e1000_init_function_pointers_82575(struct e1000_hw *hw);
+void e1000_init_function_pointers_82575(struct e1000_hw *hw);
 
 #endif
index 6807b098edaee27c0343f3903a080c08604ea3d5..5e9ed89403aa45adb9238330e4937e8aa1f4cd09 100644 (file)
@@ -483,40 +483,38 @@ enum igb_boards {
 extern char igb_driver_name[];
 extern char igb_driver_version[];
 
-extern int igb_up(struct igb_adapter *);
-extern void igb_down(struct igb_adapter *);
-extern void igb_reinit_locked(struct igb_adapter *);
-extern void igb_reset(struct igb_adapter *);
-extern void igb_write_rss_indir_tbl(struct igb_adapter *);
-extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
-extern int igb_setup_tx_resources(struct igb_ring *);
-extern int igb_setup_rx_resources(struct igb_ring *);
-extern void igb_free_tx_resources(struct igb_ring *);
-extern void igb_free_rx_resources(struct igb_ring *);
-extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
-extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
-extern void igb_setup_tctl(struct igb_adapter *);
-extern void igb_setup_rctl(struct igb_adapter *);
-extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
-extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
-                                          struct igb_tx_buffer *);
-extern void igb_alloc_rx_buffers(struct igb_ring *, u16);
-extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
-extern bool igb_has_link(struct igb_adapter *adapter);
-extern void igb_set_ethtool_ops(struct net_device *);
-extern void igb_power_up_link(struct igb_adapter *);
-extern void igb_set_fw_version(struct igb_adapter *);
-extern void igb_ptp_init(struct igb_adapter *adapter);
-extern void igb_ptp_stop(struct igb_adapter *adapter);
-extern void igb_ptp_reset(struct igb_adapter *adapter);
-extern void igb_ptp_tx_work(struct work_struct *work);
-extern void igb_ptp_rx_hang(struct igb_adapter *adapter);
-extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
-extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
-                               struct sk_buff *skb);
-extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
-                               unsigned char *va,
-                               struct sk_buff *skb);
+int igb_up(struct igb_adapter *);
+void igb_down(struct igb_adapter *);
+void igb_reinit_locked(struct igb_adapter *);
+void igb_reset(struct igb_adapter *);
+int igb_reinit_queues(struct igb_adapter *);
+void igb_write_rss_indir_tbl(struct igb_adapter *);
+int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
+int igb_setup_tx_resources(struct igb_ring *);
+int igb_setup_rx_resources(struct igb_ring *);
+void igb_free_tx_resources(struct igb_ring *);
+void igb_free_rx_resources(struct igb_ring *);
+void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_setup_tctl(struct igb_adapter *);
+void igb_setup_rctl(struct igb_adapter *);
+netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
+void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *);
+void igb_alloc_rx_buffers(struct igb_ring *, u16);
+void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
+bool igb_has_link(struct igb_adapter *adapter);
+void igb_set_ethtool_ops(struct net_device *);
+void igb_power_up_link(struct igb_adapter *);
+void igb_set_fw_version(struct igb_adapter *);
+void igb_ptp_init(struct igb_adapter *adapter);
+void igb_ptp_stop(struct igb_adapter *adapter);
+void igb_ptp_reset(struct igb_adapter *adapter);
+void igb_ptp_tx_work(struct work_struct *work);
+void igb_ptp_rx_hang(struct igb_adapter *adapter);
+void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
+void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
+                        struct sk_buff *skb);
 static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
                                       union e1000_adv_rx_desc *rx_desc,
                                       struct sk_buff *skb)
@@ -531,11 +529,11 @@ static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
        rx_ring->last_rx_timestamp = jiffies;
 }
 
-extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
-                                 struct ifreq *ifr, int cmd);
+int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr,
+                          int cmd);
 #ifdef CONFIG_IGB_HWMON
-extern void igb_sysfs_exit(struct igb_adapter *adapter);
-extern int igb_sysfs_init(struct igb_adapter *adapter);
+void igb_sysfs_exit(struct igb_adapter *adapter);
+int igb_sysfs_init(struct igb_adapter *adapter);
 #endif
 static inline s32 igb_reset_phy(struct e1000_hw *hw)
 {
index 86d51429a189295d9c7418a1273b625a933d9a9b..0ae3177416c736ea5c48b2eead34a2c0553f7368 100644 (file)
@@ -1659,7 +1659,8 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
                if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
                (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
                (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
-               (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+               (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+               (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
 
                        /* Enable DH89xxCC MPHY for near end loopback */
                        reg = rd32(E1000_MPHY_ADDR_CTL);
@@ -1725,7 +1726,8 @@ static void igb_loopback_cleanup(struct igb_adapter *adapter)
        if ((hw->device_id == E1000_DEV_ID_DH89XXCC_SGMII) ||
        (hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
        (hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
-       (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP)) {
+       (hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
+       (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
                u32 reg;
 
                /* Disable near end loopback on DH89xxCC */
@@ -2655,6 +2657,8 @@ static int igb_set_eee(struct net_device *netdev,
            (hw->phy.media_type != e1000_media_type_copper))
                return -EOPNOTSUPP;
 
+       memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+
        ret_val = igb_get_eee(netdev, &eee_curr);
        if (ret_val)
                return ret_val;
@@ -2875,6 +2879,88 @@ static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
        return 0;
 }
 
+static unsigned int igb_max_channels(struct igb_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       unsigned int max_combined = 0;
+
+       switch (hw->mac.type) {
+       case e1000_i211:
+               max_combined = IGB_MAX_RX_QUEUES_I211;
+               break;
+       case e1000_82575:
+       case e1000_i210:
+               max_combined = IGB_MAX_RX_QUEUES_82575;
+               break;
+       case e1000_i350:
+               if (!!adapter->vfs_allocated_count) {
+                       max_combined = 1;
+                       break;
+               }
+               /* fall through */
+       case e1000_82576:
+               if (!!adapter->vfs_allocated_count) {
+                       max_combined = 2;
+                       break;
+               }
+               /* fall through */
+       case e1000_82580:
+       case e1000_i354:
+       default:
+               max_combined = IGB_MAX_RX_QUEUES;
+               break;
+       }
+
+       return max_combined;
+}
+
+static void igb_get_channels(struct net_device *netdev,
+                            struct ethtool_channels *ch)
+{
+       struct igb_adapter *adapter = netdev_priv(netdev);
+
+       /* Report maximum channels */
+       ch->max_combined = igb_max_channels(adapter);
+
+       /* Report info for other vector */
+       if (adapter->msix_entries) {
+               ch->max_other = NON_Q_VECTORS;
+               ch->other_count = NON_Q_VECTORS;
+       }
+
+       ch->combined_count = adapter->rss_queues;
+}
+
+static int igb_set_channels(struct net_device *netdev,
+                           struct ethtool_channels *ch)
+{
+       struct igb_adapter *adapter = netdev_priv(netdev);
+       unsigned int count = ch->combined_count;
+
+       /* Verify they are not requesting separate vectors */
+       if (!count || ch->rx_count || ch->tx_count)
+               return -EINVAL;
+
+       /* Verify other_count is valid and has not been changed */
+       if (ch->other_count != NON_Q_VECTORS)
+               return -EINVAL;
+
+       /* Verify the number of channels doesn't exceed hw limits */
+       if (count > igb_max_channels(adapter))
+               return -EINVAL;
+
+       if (count != adapter->rss_queues) {
+               adapter->rss_queues = count;
+
+               /* Hardware has to reinitialize queues and interrupts to
+                * match the new configuration.
+                */
+               return igb_reinit_queues(adapter);
+       }
+
+       return 0;
+}
+
 static const struct ethtool_ops igb_ethtool_ops = {
        .get_settings           = igb_get_settings,
        .set_settings           = igb_set_settings,
@@ -2911,6 +2997,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
        .get_rxfh_indir_size    = igb_get_rxfh_indir_size,
        .get_rxfh_indir         = igb_get_rxfh_indir,
        .set_rxfh_indir         = igb_set_rxfh_indir,
+       .get_channels           = igb_get_channels,
+       .set_channels           = igb_set_channels,
        .begin                  = igb_ethtool_begin,
        .complete               = igb_ethtool_complete,
 };
index 8cf44f2a8ccd5b531f42fa0dfb357a19d6efa4f6..66784cd9a7e5e40fb6eaa2d7cdd76c3fb0cfe1fa 100644 (file)
@@ -2034,21 +2034,15 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                return err;
 
        pci_using_dac = 0;
-       err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (!err) {
-               err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-               if (!err)
-                       pci_using_dac = 1;
+               pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
-                                                   DMA_BIT_MASK(32));
-                       if (err) {
-                               dev_err(&pdev->dev,
-                                       "No usable DMA configuration, aborting\n");
-                               goto err_dma;
-                       }
+                       dev_err(&pdev->dev,
+                               "No usable DMA configuration, aborting\n");
+                       goto err_dma;
                }
        }
 
@@ -5708,7 +5702,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
 
        /* reply to reset with ack and vf mac address */
        msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
-       memcpy(addr, vf_mac, 6);
+       memcpy(addr, vf_mac, ETH_ALEN);
        igb_write_mbx(hw, msgbuf, 3, vf);
 }
 
@@ -7838,4 +7832,26 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
                return E1000_SUCCESS;
 
 }
+
+int igb_reinit_queues(struct igb_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       struct pci_dev *pdev = adapter->pdev;
+       int err = 0;
+
+       if (netif_running(netdev))
+               igb_close(netdev);
+
+       igb_clear_interrupt_scheme(adapter);
+
+       if (igb_init_interrupt_scheme(adapter, true)) {
+               dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+               return -ENOMEM;
+       }
+
+       if (netif_running(netdev))
+               err = igb_open(netdev);
+
+       return err;
+}
 /* igb_main.c */
index a1463e3d14c0522511c75c424ffa16bd3162411a..7d6a25c8f889efd0eb69ef749dd0bb13f18302ad 100644 (file)
@@ -312,17 +312,17 @@ enum igbvf_state_t {
 extern char igbvf_driver_name[];
 extern const char igbvf_driver_version[];
 
-extern void igbvf_check_options(struct igbvf_adapter *);
-extern void igbvf_set_ethtool_ops(struct net_device *);
-
-extern int igbvf_up(struct igbvf_adapter *);
-extern void igbvf_down(struct igbvf_adapter *);
-extern void igbvf_reinit_locked(struct igbvf_adapter *);
-extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
-extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
-extern void igbvf_free_rx_resources(struct igbvf_ring *);
-extern void igbvf_free_tx_resources(struct igbvf_ring *);
-extern void igbvf_update_stats(struct igbvf_adapter *);
+void igbvf_check_options(struct igbvf_adapter *);
+void igbvf_set_ethtool_ops(struct net_device *);
+
+int igbvf_up(struct igbvf_adapter *);
+void igbvf_down(struct igbvf_adapter *);
+void igbvf_reinit_locked(struct igbvf_adapter *);
+int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *);
+int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *);
+void igbvf_free_rx_resources(struct igbvf_ring *);
+void igbvf_free_tx_resources(struct igbvf_ring *);
+void igbvf_update_stats(struct igbvf_adapter *);
 
 extern unsigned int copybreak;
 
index 93eb7ee06d3e0aa2159fcccc61e797eb44fd79bd..4e6b02fbe65280614c68b6b76d013368281ccc4f 100644 (file)
@@ -2638,21 +2638,15 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                return err;
 
        pci_using_dac = 0;
-       err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (!err) {
-               err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-               if (!err)
-                       pci_using_dac = 1;
+               pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
-                                                   DMA_BIT_MASK(32));
-                       if (err) {
-                               dev_err(&pdev->dev, "No usable DMA "
-                                       "configuration, aborting\n");
-                               goto err_dma;
-                       }
+                       dev_err(&pdev->dev, "No usable DMA "
+                               "configuration, aborting\n");
+                       goto err_dma;
                }
        }
 
index eea0e10ce12f95d47ad884d48170495913f8db70..955ad8c2c53456a2bb9ce28515a00d52264ed3ee 100644 (file)
@@ -154,7 +154,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
                ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
                if (!ret_val) {
                        if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK))
-                               memcpy(hw->mac.perm_addr, addr, 6);
+                               memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
                        else
                                ret_val = -E1000_ERR_MAC_INIT;
                }
@@ -314,7 +314,7 @@ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
 
        memset(msgbuf, 0, 12);
        msgbuf[0] = E1000_VF_SET_MAC_ADDR;
-       memcpy(msg_addr, addr, 6);
+       memcpy(msg_addr, addr, ETH_ALEN);
        ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
 
        if (!ret_val)
index 4d2ae97ff1b3d7380de5c2ea4653f33fda330880..2224cc2edf1396d338b9786a2dd23af8b77aa581 100644 (file)
@@ -187,21 +187,21 @@ enum ixgb_state_t {
 };
 
 /* Exported from other modules */
-extern void ixgb_check_options(struct ixgb_adapter *adapter);
-extern void ixgb_set_ethtool_ops(struct net_device *netdev);
+void ixgb_check_options(struct ixgb_adapter *adapter);
+void ixgb_set_ethtool_ops(struct net_device *netdev);
 extern char ixgb_driver_name[];
 extern const char ixgb_driver_version[];
 
-extern void ixgb_set_speed_duplex(struct net_device *netdev);
+void ixgb_set_speed_duplex(struct net_device *netdev);
 
-extern int ixgb_up(struct ixgb_adapter *adapter);
-extern void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
-extern void ixgb_reset(struct ixgb_adapter *adapter);
-extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
-extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
-extern void ixgb_update_stats(struct ixgb_adapter *adapter);
+int ixgb_up(struct ixgb_adapter *adapter);
+void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
+void ixgb_reset(struct ixgb_adapter *adapter);
+int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
+int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
+void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
+void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
+void ixgb_update_stats(struct ixgb_adapter *adapter);
 
 
 #endif /* _IXGB_H_ */
index 2a99a35c33aa24c30a61837ae880ea373b908c14..0bd5d72e1af5b6be4873baf31431e7f5ad09a347 100644 (file)
@@ -759,27 +759,20 @@ struct ixgb_hw_stats {
 };
 
 /* Function Prototypes */
-extern bool ixgb_adapter_stop(struct ixgb_hw *hw);
-extern bool ixgb_init_hw(struct ixgb_hw *hw);
-extern bool ixgb_adapter_start(struct ixgb_hw *hw);
-extern void ixgb_check_for_link(struct ixgb_hw *hw);
-extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
-
-extern void ixgb_rar_set(struct ixgb_hw *hw,
-                               u8 *addr,
-                               u32 index);
+bool ixgb_adapter_stop(struct ixgb_hw *hw);
+bool ixgb_init_hw(struct ixgb_hw *hw);
+bool ixgb_adapter_start(struct ixgb_hw *hw);
+void ixgb_check_for_link(struct ixgb_hw *hw);
+bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
 
+void ixgb_rar_set(struct ixgb_hw *hw, u8 *addr, u32 index);
 
 /* Filters (multicast, vlan, receive) */
-extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw,
-                                  u8 *mc_addr_list,
-                                  u32 mc_addr_count,
-                                  u32 pad);
+void ixgb_mc_addr_list_update(struct ixgb_hw *hw, u8 *mc_addr_list,
+                             u32 mc_addr_count, u32 pad);
 
 /* Vfta functions */
-extern void ixgb_write_vfta(struct ixgb_hw *hw,
-                                u32 offset,
-                                u32 value);
+void ixgb_write_vfta(struct ixgb_hw *hw, u32 offset, u32 value);
 
 /* Access functions to eeprom data */
 void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr);
index 9f6b236828e6a2d6e5342c0acfd5ba6222a4d531..57e390cbe6d0d21630f6bf6904a0c8a663601931 100644 (file)
@@ -408,20 +408,14 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                return err;
 
        pci_using_dac = 0;
-       err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
        if (!err) {
-               err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-               if (!err)
-                       pci_using_dac = 1;
+               pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
-                                                   DMA_BIT_MASK(32));
-                       if (err) {
-                               pr_err("No usable DMA configuration, aborting\n");
-                               goto err_dma_mask;
-                       }
+                       pr_err("No usable DMA configuration, aborting\n");
+                       goto err_dma_mask;
                }
        }
 
index 0ac6b11c6e4ec323aaa50db976e2a996b1b02d19..dc1588ee264a31da9e7561904f4ced02b0861502 100644 (file)
@@ -55,7 +55,7 @@
 #include <net/busy_poll.h>
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
-#define LL_EXTENDED_STATS
+#define BP_EXTENDED_STATS
 #endif
 /* common prefix used by pr_<> macros */
 #undef pr_fmt
@@ -187,11 +187,11 @@ struct ixgbe_rx_buffer {
 struct ixgbe_queue_stats {
        u64 packets;
        u64 bytes;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
        u64 yields;
        u64 misses;
        u64 cleaned;
-#endif  /* LL_EXTENDED_STATS */
+#endif  /* BP_EXTENDED_STATS */
 };
 
 struct ixgbe_tx_queue_stats {
@@ -399,7 +399,7 @@ static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
                WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI);
                q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD;
                rc = false;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                q_vector->tx.ring->stats.yields++;
 #endif
        } else
@@ -432,7 +432,7 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
        if ((q_vector->state & IXGBE_QV_LOCKED)) {
                q_vector->state |= IXGBE_QV_STATE_POLL_YIELD;
                rc = false;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                q_vector->rx.ring->stats.yields++;
 #endif
        } else
@@ -457,7 +457,7 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
 }
 
 /* true if a socket is polling, even if it did not get the lock */
-static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
 {
        WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
        return q_vector->state & IXGBE_QV_USER_PEND;
@@ -487,7 +487,7 @@ static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
        return false;
 }
 
-static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
+static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
 {
        return false;
 }
@@ -786,93 +786,89 @@ extern const char ixgbe_driver_version[];
 extern char ixgbe_default_device_descr[];
 #endif /* IXGBE_FCOE */
 
-extern void ixgbe_up(struct ixgbe_adapter *adapter);
-extern void ixgbe_down(struct ixgbe_adapter *adapter);
-extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
-extern void ixgbe_reset(struct ixgbe_adapter *adapter);
-extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
-extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
-extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
-extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
-extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
-extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
-extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
-                                  struct ixgbe_ring *);
-extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
-extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
-extern int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+void ixgbe_up(struct ixgbe_adapter *adapter);
+void ixgbe_down(struct ixgbe_adapter *adapter);
+void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
+void ixgbe_reset(struct ixgbe_adapter *adapter);
+void ixgbe_set_ethtool_ops(struct net_device *netdev);
+int ixgbe_setup_rx_resources(struct ixgbe_ring *);
+int ixgbe_setup_tx_resources(struct ixgbe_ring *);
+void ixgbe_free_rx_resources(struct ixgbe_ring *);
+void ixgbe_free_tx_resources(struct ixgbe_ring *);
+void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
+void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
+void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
+void ixgbe_update_stats(struct ixgbe_adapter *adapter);
+int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
+int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
                               u16 subdevice_id);
-extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
-extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
-                                        struct ixgbe_adapter *,
-                                        struct ixgbe_ring *);
-extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
-                                             struct ixgbe_tx_buffer *);
-extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
-extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
-extern int ixgbe_poll(struct napi_struct *napi, int budget);
-extern int ethtool_ioctl(struct ifreq *ifr);
-extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
-extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
-                                                union ixgbe_atr_hash_dword input,
-                                                union ixgbe_atr_hash_dword common,
-                                                 u8 queue);
-extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
-                                          union ixgbe_atr_input *input_mask);
-extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
-                                                union ixgbe_atr_input *input,
-                                                u16 soft_id, u8 queue);
-extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
-                                                union ixgbe_atr_input *input,
-                                                u16 soft_id);
-extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
-                                                union ixgbe_atr_input *mask);
-extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
-extern void ixgbe_set_rx_mode(struct net_device *netdev);
+void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
+                                 struct ixgbe_ring *);
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
+                                     struct ixgbe_tx_buffer *);
+void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
+void ixgbe_write_eitr(struct ixgbe_q_vector *);
+int ixgbe_poll(struct napi_struct *napi, int budget);
+int ethtool_ioctl(struct ifreq *ifr);
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+                                         union ixgbe_atr_hash_dword input,
+                                         union ixgbe_atr_hash_dword common,
+                                         u8 queue);
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+                                   union ixgbe_atr_input *input_mask);
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+                                         union ixgbe_atr_input *input,
+                                         u16 soft_id, u8 queue);
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+                                         union ixgbe_atr_input *input,
+                                         u16 soft_id);
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+                                         union ixgbe_atr_input *mask);
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
+void ixgbe_set_rx_mode(struct net_device *netdev);
 #ifdef CONFIG_IXGBE_DCB
-extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
+void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
 #endif
-extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
-extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
-extern void ixgbe_do_reset(struct net_device *netdev);
+int ixgbe_setup_tc(struct net_device *dev, u8 tc);
+void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
+void ixgbe_do_reset(struct net_device *netdev);
 #ifdef CONFIG_IXGBE_HWMON
-extern void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
-extern int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
+void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
+int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
 #endif /* CONFIG_IXGBE_HWMON */
 #ifdef IXGBE_FCOE
-extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
-extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
-                    struct ixgbe_tx_buffer *first,
-                    u8 *hdr_len);
-extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
-                         union ixgbe_adv_rx_desc *rx_desc,
-                         struct sk_buff *skb);
-extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
-                              struct scatterlist *sgl, unsigned int sgc);
-extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
-                                struct scatterlist *sgl, unsigned int sgc);
-extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
-extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
-extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
-extern int ixgbe_fcoe_enable(struct net_device *netdev);
-extern int ixgbe_fcoe_disable(struct net_device *netdev);
+void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
+int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
+             u8 *hdr_len);
+int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
+                  union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb);
+int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
+                      struct scatterlist *sgl, unsigned int sgc);
+int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
+                         struct scatterlist *sgl, unsigned int sgc);
+int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
+int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+int ixgbe_fcoe_enable(struct net_device *netdev);
+int ixgbe_fcoe_disable(struct net_device *netdev);
 #ifdef CONFIG_IXGBE_DCB
-extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
-extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
+u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
+u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
 #endif /* CONFIG_IXGBE_DCB */
-extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
-extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
-                                 struct netdev_fcoe_hbainfo *info);
-extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
+int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
+int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
+                          struct netdev_fcoe_hbainfo *info);
+u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
 #endif /* IXGBE_FCOE */
 #ifdef CONFIG_DEBUG_FS
-extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
-extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
-extern void ixgbe_dbg_init(void);
-extern void ixgbe_dbg_exit(void);
+void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter);
+void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter);
+void ixgbe_dbg_init(void);
+void ixgbe_dbg_exit(void);
 #else
 static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {}
 static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {}
@@ -884,12 +880,12 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
        return netdev_get_tx_queue(ring->netdev, ring->queue_index);
 }
 
-extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
-extern void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
-                                   struct sk_buff *skb);
+void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
+void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+                            struct sk_buff *skb);
 static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
                                         union ixgbe_adv_rx_desc *rx_desc,
                                         struct sk_buff *skb)
@@ -906,11 +902,11 @@ static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
        rx_ring->last_rx_timestamp = jiffies;
 }
 
-extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
-                                   struct ifreq *ifr, int cmd);
-extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
-extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
+int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr,
+                            int cmd);
+void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
 #ifdef CONFIG_PCI_IOV
 void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
 #endif
index e8649abf97c0dd93152d7037bae870eaf3532954..90aac31b3551746d8fd3bd95d61d5000d2fc4a1a 100644 (file)
@@ -442,7 +442,7 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
 
 static int ixgbe_get_regs_len(struct net_device *netdev)
 {
-#define IXGBE_REGS_LEN  1129
+#define IXGBE_REGS_LEN  1139
        return IXGBE_REGS_LEN * sizeof(u32);
 }
 
@@ -602,22 +602,53 @@ static void ixgbe_get_regs(struct net_device *netdev,
        regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
 
        /* DCB */
-       regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
-       regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
-       regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
-       regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
-       for (i = 0; i < 8; i++)
-               regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
-       for (i = 0; i < 8; i++)
-               regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
-       for (i = 0; i < 8; i++)
-               regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
-       for (i = 0; i < 8; i++)
-               regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
+       regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);   /* same as FCCFG  */
+       regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
+               regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
+               for (i = 0; i < 8; i++)
+                       regs_buff[833 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
+               for (i = 0; i < 8; i++)
+                       regs_buff[841 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
+               for (i = 0; i < 8; i++)
+                       regs_buff[849 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
+               for (i = 0; i < 8; i++)
+                       regs_buff[857 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+               regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
+               for (i = 0; i < 8; i++)
+                       regs_buff[833 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i));
+               for (i = 0; i < 8; i++)
+                       regs_buff[841 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i));
+               for (i = 0; i < 8; i++)
+                       regs_buff[849 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i));
+               for (i = 0; i < 8; i++)
+                       regs_buff[857 + i] =
+                               IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i));
+               break;
+       default:
+               break;
+       }
+
        for (i = 0; i < 8; i++)
-               regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
+               regs_buff[865 + i] =
+               IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */
        for (i = 0; i < 8; i++)
-               regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
+               regs_buff[873 + i] =
+               IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */
 
        /* Statistics */
        regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
@@ -757,6 +788,20 @@ static void ixgbe_get_regs(struct net_device *netdev,
 
        /* 82599 X540 specific registers  */
        regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+
+       /* 82599 X540 specific DCB registers  */
+       regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
+       regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC);
+       for (i = 0; i < 4; i++)
+               regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i));
+       regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM);
+                                       /* same as RTTQCNRM */
+       regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD);
+                                       /* same as RTTQCNRR */
+
+       /* X540 specific DCB registers  */
+       regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR);
+       regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG);
 }
 
 static int ixgbe_get_eeprom_len(struct net_device *netdev)
@@ -1072,7 +1117,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i] = 0;
                        data[i+1] = 0;
                        i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                        data[i] = 0;
                        data[i+1] = 0;
                        data[i+2] = 0;
@@ -1087,7 +1132,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i+1] = ring->stats.bytes;
                } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
                i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                data[i] = ring->stats.yields;
                data[i+1] = ring->stats.misses;
                data[i+2] = ring->stats.cleaned;
@@ -1100,7 +1145,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i] = 0;
                        data[i+1] = 0;
                        i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                        data[i] = 0;
                        data[i+1] = 0;
                        data[i+2] = 0;
@@ -1115,7 +1160,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i+1] = ring->stats.bytes;
                } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
                i += 2;
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                data[i] = ring->stats.yields;
                data[i+1] = ring->stats.misses;
                data[i+2] = ring->stats.cleaned;
@@ -1157,28 +1202,28 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "tx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
-#ifdef LL_EXTENDED_STATS
-                       sprintf(p, "tx_queue_%u_ll_napi_yield", i);
+#ifdef BP_EXTENDED_STATS
+                       sprintf(p, "tx_queue_%u_bp_napi_yield", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "tx_queue_%u_ll_misses", i);
+                       sprintf(p, "tx_queue_%u_bp_misses", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "tx_queue_%u_ll_cleaned", i);
+                       sprintf(p, "tx_queue_%u_bp_cleaned", i);
                        p += ETH_GSTRING_LEN;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
                }
                for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
                        sprintf(p, "rx_queue_%u_packets", i);
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "rx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
-#ifdef LL_EXTENDED_STATS
-                       sprintf(p, "rx_queue_%u_ll_poll_yield", i);
+#ifdef BP_EXTENDED_STATS
+                       sprintf(p, "rx_queue_%u_bp_poll_yield", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "rx_queue_%u_ll_misses", i);
+                       sprintf(p, "rx_queue_%u_bp_misses", i);
                        p += ETH_GSTRING_LEN;
-                       sprintf(p, "rx_queue_%u_ll_cleaned", i);
+                       sprintf(p, "rx_queue_%u_bp_cleaned", i);
                        p += ETH_GSTRING_LEN;
-#endif /* LL_EXTENDED_STATS */
+#endif /* BP_EXTENDED_STATS */
                }
                for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
                        sprintf(p, "tx_pb_%u_pxon", i);
index 0ade0cd5ef53ffab28b3fd34136374bfe9f4b51e..546980fe64b839de10c3c49cdde030cd43d71870 100644 (file)
@@ -1585,7 +1585,7 @@ static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
 
-       if (ixgbe_qv_ll_polling(q_vector))
+       if (ixgbe_qv_busy_polling(q_vector))
                netif_receive_skb(skb);
        else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
                napi_gro_receive(&q_vector->napi, skb);
@@ -2097,7 +2097,7 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi)
 
        ixgbe_for_each_ring(ring, q_vector->rx) {
                found = ixgbe_clean_rx_irq(q_vector, ring, 4);
-#ifdef LL_EXTENDED_STATS
+#ifdef BP_EXTENDED_STATS
                if (found)
                        ring->stats.cleaned += found;
                else
@@ -7490,19 +7490,14 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                return err;
 
-       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
-           !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+       if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
                pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
-                                                   DMA_BIT_MASK(32));
-                       if (err) {
-                               dev_err(&pdev->dev,
-                                       "No usable DMA configuration, aborting\n");
-                               goto err_dma;
-                       }
+                       dev_err(&pdev->dev,
+                               "No usable DMA configuration, aborting\n");
+                       goto err_dma;
                }
                pci_using_dac = 0;
        }
index 24af12e3719e00c8345ba750399226a8522105de..aae900a256da98601a501a121eed1f14347d3997 100644 (file)
 #define IXGBE_SFF_QSFP_DEVICE_TECH     0x93
 
 /* Bitmasks */
-#define IXGBE_SFF_DA_PASSIVE_CABLE           0x4
-#define IXGBE_SFF_DA_ACTIVE_CABLE            0x8
-#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING    0x4
-#define IXGBE_SFF_1GBASESX_CAPABLE           0x1
-#define IXGBE_SFF_1GBASELX_CAPABLE           0x2
-#define IXGBE_SFF_1GBASET_CAPABLE            0x8
-#define IXGBE_SFF_10GBASESR_CAPABLE          0x10
-#define IXGBE_SFF_10GBASELR_CAPABLE          0x20
-#define IXGBE_SFF_SOFT_RS_SELECT_MASK  0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_10G   0x8
-#define IXGBE_SFF_SOFT_RS_SELECT_1G    0x0
-#define IXGBE_SFF_ADDRESSING_MODE           0x4
-#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE       0x1
-#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE      0x8
+#define IXGBE_SFF_DA_PASSIVE_CABLE             0x4
+#define IXGBE_SFF_DA_ACTIVE_CABLE              0x8
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING      0x4
+#define IXGBE_SFF_1GBASESX_CAPABLE             0x1
+#define IXGBE_SFF_1GBASELX_CAPABLE             0x2
+#define IXGBE_SFF_1GBASET_CAPABLE              0x8
+#define IXGBE_SFF_10GBASESR_CAPABLE            0x10
+#define IXGBE_SFF_10GBASELR_CAPABLE            0x20
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK          0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G           0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G            0x0
+#define IXGBE_SFF_ADDRESSING_MODE              0x4
+#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE         0x1
+#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE                0x8
 #define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
 #define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL  0x0
-#define IXGBE_I2C_EEPROM_READ_MASK           0x100
-#define IXGBE_I2C_EEPROM_STATUS_MASK         0x3
-#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
-#define IXGBE_I2C_EEPROM_STATUS_PASS         0x1
-#define IXGBE_I2C_EEPROM_STATUS_FAIL         0x2
-#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS  0x3
+#define IXGBE_I2C_EEPROM_READ_MASK             0x100
+#define IXGBE_I2C_EEPROM_STATUS_MASK           0x3
+#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION   0x0
+#define IXGBE_I2C_EEPROM_STATUS_PASS           0x1
+#define IXGBE_I2C_EEPROM_STATUS_FAIL           0x2
+#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS    0x3
 
 /* Flow control defines */
 #define IXGBE_TAF_SYM_PAUSE                  0x400
index 276d7b135332c1c1c0ec4415f0594f15ffd9f1d2..1fe7cb0142e106919e772f668fe5a5411b8ff2fc 100644 (file)
@@ -558,7 +558,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
        struct ixgbe_hw *hw = &adapter->hw;
        int rar_entry = hw->mac.num_rar_entries - (vf + 1);
 
-       memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
+       memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
        hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
 
        return 0;
@@ -621,16 +621,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
 
 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
 {
-       unsigned char vf_mac_addr[6];
        struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
        unsigned int vfn = (event_mask & 0x3f);
 
        bool enable = ((event_mask & 0x10000000U) != 0);
 
-       if (enable) {
-               eth_zero_addr(vf_mac_addr);
-               memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
-       }
+       if (enable)
+               eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses);
 
        return 0;
 }
index 10775cb9b6d84c066263c51881257bf8995e037a..7c19e969576f60160649f3ea95b6033582ed9552 100644 (file)
@@ -561,6 +561,10 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_RTTDQSEL    0x04904
 #define IXGBE_RTTDT1C     0x04908
 #define IXGBE_RTTDT1S     0x0490C
+#define IXGBE_RTTQCNCR    0x08B00
+#define IXGBE_RTTQCNTG    0x04A90
+#define IXGBE_RTTBCNRD    0x0498C
+#define IXGBE_RTTQCNRR    0x0498C
 #define IXGBE_RTTDTECC    0x04990
 #define IXGBE_RTTDTECC_NO_BCN   0x00000100
 #define IXGBE_RTTBCNRC    0x04984
@@ -570,6 +574,7 @@ struct ixgbe_thermal_sensor_data {
 #define IXGBE_RTTBCNRC_RF_INT_MASK     \
        (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
 #define IXGBE_RTTBCNRM    0x04980
+#define IXGBE_RTTQCNRM    0x04980
 
 /* FCoE DMA Context Registers */
 #define IXGBE_FCPTRL    0x02410 /* FC User Desc. PTR Low */
index 389324f5929a5ef9fd16bac01b9a6d799943747a..24b80a6cfca4ec593f7bed4bf2e1423213125d90 100644 (file)
 #include "ixgbe.h"
 #include "ixgbe_phy.h"
 
-#define IXGBE_X540_MAX_TX_QUEUES 128
-#define IXGBE_X540_MAX_RX_QUEUES 128
-#define IXGBE_X540_RAR_ENTRIES   128
-#define IXGBE_X540_MC_TBL_SIZE   128
-#define IXGBE_X540_VFT_TBL_SIZE  128
-#define IXGBE_X540_RX_PB_SIZE   384
+#define IXGBE_X540_MAX_TX_QUEUES       128
+#define IXGBE_X540_MAX_RX_QUEUES       128
+#define IXGBE_X540_RAR_ENTRIES         128
+#define IXGBE_X540_MC_TBL_SIZE         128
+#define IXGBE_X540_VFT_TBL_SIZE                128
+#define IXGBE_X540_RX_PB_SIZE          384
 
 static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
 static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
index c9d0c12d6f04156e19ec0240c19d64c0f23ebc8a..84329b0d567a1caeb55fb197e6f4bfe0e07944f1 100644 (file)
@@ -140,58 +140,10 @@ static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
 
 #define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
 
-static char *ixgbevf_reg_names[] = {
-       "IXGBE_VFCTRL",
-       "IXGBE_VFSTATUS",
-       "IXGBE_VFLINKS",
-       "IXGBE_VFRXMEMWRAP",
-       "IXGBE_VFFRTIMER",
-       "IXGBE_VTEICR",
-       "IXGBE_VTEICS",
-       "IXGBE_VTEIMS",
-       "IXGBE_VTEIMC",
-       "IXGBE_VTEIAC",
-       "IXGBE_VTEIAM",
-       "IXGBE_VTEITR",
-       "IXGBE_VTIVAR",
-       "IXGBE_VTIVAR_MISC",
-       "IXGBE_VFRDBAL0",
-       "IXGBE_VFRDBAL1",
-       "IXGBE_VFRDBAH0",
-       "IXGBE_VFRDBAH1",
-       "IXGBE_VFRDLEN0",
-       "IXGBE_VFRDLEN1",
-       "IXGBE_VFRDH0",
-       "IXGBE_VFRDH1",
-       "IXGBE_VFRDT0",
-       "IXGBE_VFRDT1",
-       "IXGBE_VFRXDCTL0",
-       "IXGBE_VFRXDCTL1",
-       "IXGBE_VFSRRCTL0",
-       "IXGBE_VFSRRCTL1",
-       "IXGBE_VFPSRTYPE",
-       "IXGBE_VFTDBAL0",
-       "IXGBE_VFTDBAL1",
-       "IXGBE_VFTDBAH0",
-       "IXGBE_VFTDBAH1",
-       "IXGBE_VFTDLEN0",
-       "IXGBE_VFTDLEN1",
-       "IXGBE_VFTDH0",
-       "IXGBE_VFTDH1",
-       "IXGBE_VFTDT0",
-       "IXGBE_VFTDT1",
-       "IXGBE_VFTXDCTL0",
-       "IXGBE_VFTXDCTL1",
-       "IXGBE_VFTDWBAL0",
-       "IXGBE_VFTDWBAL1",
-       "IXGBE_VFTDWBAH0",
-       "IXGBE_VFTDWBAH1"
-};
-
-
 static int ixgbevf_get_regs_len(struct net_device *netdev)
 {
-       return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
+#define IXGBE_REGS_LEN 45
+       return IXGBE_REGS_LEN * sizeof(u32);
 }
 
 static void ixgbevf_get_regs(struct net_device *netdev,
@@ -264,9 +216,6 @@ static void ixgbevf_get_regs(struct net_device *netdev,
                regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
        for (i = 0; i < 2; i++)
                regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
-
-       for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
-               hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
 }
 
 static void ixgbevf_get_drvinfo(struct net_device *netdev,
index fff0d98675295182fd15b7bbba36820274286ea2..64a2b912e73c4cf6c09a71571b3d6b321375ec7b 100644 (file)
@@ -281,27 +281,23 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
 extern const char ixgbevf_driver_name[];
 extern const char ixgbevf_driver_version[];
 
-extern void ixgbevf_up(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
-extern void ixgbevf_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *,
-                                     struct ixgbevf_ring *);
-extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *,
-                                     struct ixgbevf_ring *);
-extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
-                                     struct ixgbevf_ring *);
-extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
-                                     struct ixgbevf_ring *);
-extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
-extern int ethtool_ioctl(struct ifreq *ifr);
-
-extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
-extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
+void ixgbevf_up(struct ixgbevf_adapter *adapter);
+void ixgbevf_down(struct ixgbevf_adapter *adapter);
+void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
+void ixgbevf_reset(struct ixgbevf_adapter *adapter);
+void ixgbevf_set_ethtool_ops(struct net_device *netdev);
+int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *);
+void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
+int ethtool_ioctl(struct ifreq *ifr);
+
+void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
+void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
 
 #ifdef DEBUG
-extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
+char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
 #define hw_dbg(hw, format, arg...) \
        printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
 #else
index 59a62bbfb3714a55683bec8fcb49e9adcb5cb279..275ccde300cd3dfddd3f5fb2473dde35bbcd7790 100644 (file)
@@ -756,37 +756,12 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
 static irqreturn_t ixgbevf_msix_other(int irq, void *data)
 {
        struct ixgbevf_adapter *adapter = data;
-       struct pci_dev *pdev = adapter->pdev;
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 msg;
-       bool got_ack = false;
 
        hw->mac.get_link_status = 1;
-       if (!hw->mbx.ops.check_for_ack(hw))
-               got_ack = true;
-
-       if (!hw->mbx.ops.check_for_msg(hw)) {
-               hw->mbx.ops.read(hw, &msg, 1);
-
-               if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) {
-                       mod_timer(&adapter->watchdog_timer,
-                                 round_jiffies(jiffies + 1));
-                       adapter->link_up = false;
-               }
 
-               if (msg & IXGBE_VT_MSGTYPE_NACK)
-                       dev_info(&pdev->dev,
-                                "Last Request of type %2.2x to PF Nacked\n",
-                                msg & 0xFF);
-               hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
-       }
-
-       /* checking for the ack clears the PFACK bit.  Place
-        * it back in the v2p_mailbox cache so that anyone
-        * polling for an ack will not miss it
-        */
-       if (got_ack)
-               hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
+       if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+               mod_timer(&adapter->watchdog_timer, jiffies);
 
        IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
 
@@ -1327,27 +1302,51 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
        }
 }
 
-#define IXGBE_MAX_RX_DESC_POLL 10
-static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
-                                               int rxr)
+#define IXGBEVF_MAX_RX_DESC_POLL 10
+static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+                                        int rxr)
 {
        struct ixgbe_hw *hw = &adapter->hw;
+       int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+       u32 rxdctl;
        int j = adapter->rx_ring[rxr].reg_idx;
-       int k;
 
-       for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
-               if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
-                       break;
-               else
-                       msleep(1);
-       }
-       if (k >= IXGBE_MAX_RX_DESC_POLL) {
-               hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
-                      "not set within the polling period\n", rxr);
-       }
+       do {
+               usleep_range(1000, 2000);
+               rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+       } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+
+       if (!wait_loop)
+               hw_dbg(hw, "RXDCTL.ENABLE queue %d not set while polling\n",
+                      rxr);
 
-       ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr],
-                               adapter->rx_ring[rxr].count - 1);
+       ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
+                               (adapter->rx_ring[rxr].count - 1));
+}
+
+static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
+                                    struct ixgbevf_ring *ring)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int wait_loop = IXGBEVF_MAX_RX_DESC_POLL;
+       u32 rxdctl;
+       u8 reg_idx = ring->reg_idx;
+
+       rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+       rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+       /* write value back with RXDCTL.ENABLE bit cleared */
+       IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
+
+       /* the hardware may take up to 100us to really disable the rx queue */
+       do {
+               udelay(10);
+               rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
+       } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+       if (!wait_loop)
+               hw_dbg(hw, "RXDCTL.ENABLE queue %d not cleared while polling\n",
+                      reg_idx);
 }
 
 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@ -1545,8 +1544,6 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
 
-       ixgbevf_negotiate_api(adapter);
-
        ixgbevf_reset_queues(adapter);
 
        ixgbevf_configure(adapter);
@@ -1679,7 +1676,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
 
        /* signal that we are down to the interrupt handler */
        set_bit(__IXGBEVF_DOWN, &adapter->state);
-       /* disable receives */
+
+       /* disable all enabled rx queues */
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               ixgbevf_disable_rx_queue(adapter, &adapter->rx_ring[i]);
 
        netif_tx_disable(netdev);
 
@@ -1733,10 +1733,12 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        struct net_device *netdev = adapter->netdev;
 
-       if (hw->mac.ops.reset_hw(hw))
+       if (hw->mac.ops.reset_hw(hw)) {
                hw_dbg(hw, "PF still resetting\n");
-       else
+       } else {
                hw->mac.ops.init_hw(hw);
+               ixgbevf_negotiate_api(adapter);
+       }
 
        if (is_valid_ether_addr(adapter->hw.mac.addr)) {
                memcpy(netdev->dev_addr, adapter->hw.mac.addr,
@@ -2072,6 +2074,9 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
        hw->mac.max_tx_queues = 2;
        hw->mac.max_rx_queues = 2;
 
+       /* lock to protect mailbox accesses */
+       spin_lock_init(&adapter->mbx_lock);
+
        err = hw->mac.ops.reset_hw(hw);
        if (err) {
                dev_info(&pdev->dev,
@@ -2082,6 +2087,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
                        pr_err("init_shared_code failed: %d\n", err);
                        goto out;
                }
+               ixgbevf_negotiate_api(adapter);
                err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
                if (err)
                        dev_info(&pdev->dev, "Error reading MAC address\n");
@@ -2097,9 +2103,6 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
                memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
        }
 
-       /* lock to protect mailbox accesses */
-       spin_lock_init(&adapter->mbx_lock);
-
        /* Enable dynamic interrupt throttling rates */
        adapter->rx_itr_setting = 1;
        adapter->tx_itr_setting = 1;
@@ -2620,8 +2623,6 @@ static int ixgbevf_open(struct net_device *netdev)
                }
        }
 
-       ixgbevf_negotiate_api(adapter);
-
        /* setup queue reg_idx and Rx queue count */
        err = ixgbevf_setup_queues(adapter);
        if (err)
@@ -3216,6 +3217,8 @@ static int ixgbevf_resume(struct pci_dev *pdev)
        }
        pci_set_master(pdev);
 
+       ixgbevf_reset(adapter);
+
        rtnl_lock();
        err = ixgbevf_init_interrupt_scheme(adapter);
        rtnl_unlock();
@@ -3224,8 +3227,6 @@ static int ixgbevf_resume(struct pci_dev *pdev)
                return err;
        }
 
-       ixgbevf_reset(adapter);
-
        if (netif_running(netdev)) {
                err = ixgbevf_open(netdev);
                if (err)
@@ -3326,19 +3327,14 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                return err;
 
-       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
-           !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+       if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
                pci_using_dac = 1;
        } else {
-               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
-                       err = dma_set_coherent_mask(&pdev->dev,
-                                                   DMA_BIT_MASK(32));
-                       if (err) {
-                               dev_err(&pdev->dev, "No usable DMA "
-                                       "configuration, aborting\n");
-                               goto err_dma;
-                       }
+                       dev_err(&pdev->dev, "No usable DMA "
+                               "configuration, aborting\n");
+                       goto err_dma;
                }
                pci_using_dac = 0;
        }
index 387b52635bc051259a5e5b99d9740c69bcca687a..4d44d64ae3870c42dd62bb4d1cdfeb96fe9122b9 100644 (file)
@@ -242,7 +242,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
        msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
        msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
        if (addr)
-               memcpy(msg_addr, addr, 6);
+               memcpy(msg_addr, addr, ETH_ALEN);
        ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
 
        if (!ret_val)
@@ -275,7 +275,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
 
        memset(msgbuf, 0, sizeof(msgbuf));
        msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
-       memcpy(msg_addr, addr, 6);
+       memcpy(msg_addr, addr, ETH_ALEN);
        ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
 
        if (!ret_val)
index 23de82a9da82ff408181759505450adfd439e88d..f5685c0d057911c6c2f59dd982f21f869d1b978c 100644 (file)
@@ -309,7 +309,7 @@ static void
 jme_load_macaddr(struct net_device *netdev)
 {
        struct jme_adapter *jme = netdev_priv(netdev);
-       unsigned char macaddr[6];
+       unsigned char macaddr[ETH_ALEN];
        u32 val;
 
        spin_lock_bh(&jme->macaddr_lock);
@@ -321,7 +321,7 @@ jme_load_macaddr(struct net_device *netdev)
        val = jread32(jme, JME_RXUMA_HI);
        macaddr[4] = (val >>  0) & 0xFF;
        macaddr[5] = (val >>  8) & 0xFF;
-       memcpy(netdev->dev_addr, macaddr, 6);
+       memcpy(netdev->dev_addr, macaddr, ETH_ALEN);
        spin_unlock_bh(&jme->macaddr_lock);
 }
 
@@ -3192,7 +3192,6 @@ jme_init_one(struct pci_dev *pdev,
 err_out_unmap:
        iounmap(jme->regs);
 err_out_free_netdev:
-       pci_set_drvdata(pdev, NULL);
        free_netdev(netdev);
 err_out_release_regions:
        pci_release_regions(pdev);
@@ -3210,7 +3209,6 @@ jme_remove_one(struct pci_dev *pdev)
 
        unregister_netdev(netdev);
        iounmap(jme->regs);
-       pci_set_drvdata(pdev, NULL);
        free_netdev(netdev);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
index a36fa80968eb1a0a42e9051b8ca2ea91ece4840c..4a5e3b0f712e82e7253f92c7ef19ebd282f5dd4d 100644 (file)
@@ -1110,7 +1110,7 @@ static int korina_probe(struct platform_device *pdev)
        lp = netdev_priv(dev);
 
        bif->dev = dev;
-       memcpy(dev->dev_addr, bif->mac, 6);
+       memcpy(dev->dev_addr, bif->mac, ETH_ALEN);
 
        lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
        lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
index 7fb5677451f9fb7aee3ebccd31a82b476cfac4df..4cfae6c9a63fb29f9bb8da02713b60835bb0f79c 100644 (file)
@@ -1131,15 +1131,13 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
        p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
        p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
        spin_unlock_bh(&mp->mib_counters_lock);
-
-       mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
 }
 
 static void mib_counters_timer_wrapper(unsigned long _mp)
 {
        struct mv643xx_eth_private *mp = (void *)_mp;
-
        mib_counters_update(mp);
+       mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
 }
 
 
@@ -2237,6 +2235,7 @@ static int mv643xx_eth_open(struct net_device *dev)
                mp->int_mask |= INT_TX_END_0 << i;
        }
 
+       add_timer(&mp->mib_counters_timer);
        port_start(mp);
 
        wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
@@ -2514,7 +2513,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
 
        mac_addr = of_get_mac_address(pnp);
        if (mac_addr)
-               memcpy(ppd.mac_addr, mac_addr, 6);
+               memcpy(ppd.mac_addr, mac_addr, ETH_ALEN);
 
        mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
        mv643xx_eth_property(pnp, "tx-sram-addr", ppd.tx_sram_addr);
@@ -2534,6 +2533,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
        if (!ppdev)
                return -ENOMEM;
        ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ppdev->dev.of_node = pnp;
 
        ret = platform_device_add_resources(ppdev, &res, 1);
        if (ret)
@@ -2696,7 +2696,7 @@ static void set_params(struct mv643xx_eth_private *mp,
        struct net_device *dev = mp->dev;
 
        if (is_valid_ether_addr(pd->mac_addr))
-               memcpy(dev->dev_addr, pd->mac_addr, 6);
+               memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
        else
                uc_addr_get(mp, dev->dev_addr);
 
@@ -2916,7 +2916,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
        mp->mib_counters_timer.data = (unsigned long)mp;
        mp->mib_counters_timer.function = mib_counters_timer_wrapper;
        mp->mib_counters_timer.expires = jiffies + 30 * HZ;
-       add_timer(&mp->mib_counters_timer);
 
        spin_lock_init(&mp->mib_counters_lock);
 
index ecc7f7b696b89a122b80318a12d5f6488a190126..5978461938699f098f7927c7d439cd9134fbc6a7 100644 (file)
@@ -4046,7 +4046,6 @@ err_out_free_regions:
        pci_release_regions(pdev);
 err_out_disable_pdev:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 err_out:
        return err;
 }
@@ -4090,7 +4089,6 @@ static void skge_remove(struct pci_dev *pdev)
 
        iounmap(hw->regs);
        kfree(hw);
-       pci_set_drvdata(pdev, NULL);
 }
 
 #ifdef CONFIG_PM_SLEEP
index e09a8c6f85366c4c1251f5365549e154f743f08c..a7df981d2123a9d0c73180fbc4fc70834adacd98 100644 (file)
@@ -5081,7 +5081,6 @@ err_out_free_regions:
 err_out_disable:
        pci_disable_device(pdev);
 err_out:
-       pci_set_drvdata(pdev, NULL);
        return err;
 }
 
@@ -5124,8 +5123,6 @@ static void sky2_remove(struct pci_dev *pdev)
 
        iounmap(hw->regs);
        kfree(hw);
-
-       pci_set_drvdata(pdev, NULL);
 }
 
 static int sky2_suspend(struct device *dev)
index ea20182c6969245e53a3a9da563c76461555f7f5..735765c21c9595a8d7b3bacce95de32589a7ee4e 100644 (file)
@@ -2253,7 +2253,6 @@ EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
 int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
-       struct mlx4_vport_oper_state *vf_oper;
        struct mlx4_vport_state *vf_admin;
        int slave;
 
@@ -2269,7 +2268,6 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
                return -EINVAL;
 
        vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
-       vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
 
        if ((0 == vlan) && (0 == qos))
                vf_admin->default_vlan = MLX4_VGT;
index fa37b7a612133c739263346a5d6f372af0fd9dda..85d91665d400117f8a0adb489e7ab27aae2f5f0f 100644 (file)
@@ -1733,7 +1733,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
 
        /* Unregister Mac address for the port */
        mlx4_en_put_qp(priv);
-       if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
+       if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN))
                mdev->mac_removed[priv->port] = 1;
 
        /* Free RX Rings */
index dec455c8f6274a9827f93bf038d8e12c58242765..afe2efa69c8683647766a8d5dc7d561f3f76c2df 100644 (file)
@@ -70,14 +70,15 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
                put_page(page);
                return -ENOMEM;
        }
-       page_alloc->size = PAGE_SIZE << order;
+       page_alloc->page_size = PAGE_SIZE << order;
        page_alloc->page = page;
        page_alloc->dma = dma;
-       page_alloc->offset = frag_info->frag_align;
+       page_alloc->page_offset = frag_info->frag_align;
        /* Not doing get_page() for each frag is a big win
         * on asymetric workloads.
         */
-       atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride);
+       atomic_set(&page->_count,
+                  page_alloc->page_size / frag_info->frag_stride);
        return 0;
 }
 
@@ -96,16 +97,19 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
        for (i = 0; i < priv->num_frags; i++) {
                frag_info = &priv->frag_info[i];
                page_alloc[i] = ring_alloc[i];
-               page_alloc[i].offset += frag_info->frag_stride;
-               if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size)
+               page_alloc[i].page_offset += frag_info->frag_stride;
+
+               if (page_alloc[i].page_offset + frag_info->frag_stride <=
+                   ring_alloc[i].page_size)
                        continue;
+
                if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
                        goto out;
        }
 
        for (i = 0; i < priv->num_frags; i++) {
                frags[i] = ring_alloc[i];
-               dma = ring_alloc[i].dma + ring_alloc[i].offset;
+               dma = ring_alloc[i].dma + ring_alloc[i].page_offset;
                ring_alloc[i] = page_alloc[i];
                rx_desc->data[i].addr = cpu_to_be64(dma);
        }
@@ -117,7 +121,7 @@ out:
                frag_info = &priv->frag_info[i];
                if (page_alloc[i].page != ring_alloc[i].page) {
                        dma_unmap_page(priv->ddev, page_alloc[i].dma,
-                               page_alloc[i].size, PCI_DMA_FROMDEVICE);
+                               page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
                        page = page_alloc[i].page;
                        atomic_set(&page->_count, 1);
                        put_page(page);
@@ -131,10 +135,12 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
                              int i)
 {
        const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
+       u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
+
 
-       if (frags[i].offset + frag_info->frag_stride > frags[i].size)
-               dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size,
-                                        PCI_DMA_FROMDEVICE);
+       if (next_frag_end > frags[i].page_size)
+               dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
+                              PCI_DMA_FROMDEVICE);
 
        if (frags[i].page)
                put_page(frags[i].page);
@@ -161,7 +167,7 @@ out:
 
                page_alloc = &ring->page_alloc[i];
                dma_unmap_page(priv->ddev, page_alloc->dma,
-                              page_alloc->size, PCI_DMA_FROMDEVICE);
+                              page_alloc->page_size, PCI_DMA_FROMDEVICE);
                page = page_alloc->page;
                atomic_set(&page->_count, 1);
                put_page(page);
@@ -184,10 +190,11 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
                       i, page_count(page_alloc->page));
 
                dma_unmap_page(priv->ddev, page_alloc->dma,
-                               page_alloc->size, PCI_DMA_FROMDEVICE);
-               while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) {
+                               page_alloc->page_size, PCI_DMA_FROMDEVICE);
+               while (page_alloc->page_offset + frag_info->frag_stride <
+                      page_alloc->page_size) {
                        put_page(page_alloc->page);
-                       page_alloc->offset += frag_info->frag_stride;
+                       page_alloc->page_offset += frag_info->frag_stride;
                }
                page_alloc->page = NULL;
        }
@@ -478,7 +485,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
                /* Save page reference in skb */
                __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
                skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
-               skb_frags_rx[nr].page_offset = frags[nr].offset;
+               skb_frags_rx[nr].page_offset = frags[nr].page_offset;
                skb->truesize += frag_info->frag_stride;
                frags[nr].page = NULL;
        }
@@ -517,7 +524,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
 
        /* Get pointer to first fragment so we could copy the headers into the
         * (linear part of the) skb */
-       va = page_address(frags[0].page) + frags[0].offset;
+       va = page_address(frags[0].page) + frags[0].page_offset;
 
        if (length <= SMALL_PACKET_SIZE) {
                /* We are copying all relevant data to the skb - temporarily
@@ -645,7 +652,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                        dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
                                                DMA_FROM_DEVICE);
                        ethh = (struct ethhdr *)(page_address(frags[0].page) +
-                                                frags[0].offset);
+                                                frags[0].page_offset);
 
                        if (is_multicast_ether_addr(ethh->h_dest)) {
                                struct mlx4_mac_entry *entry;
index 0d63daa2f422e082d85fd627a892c9fb5f5c1945..c151e7a6710a4b11d7efaa2816f65b4ba2732e81 100644 (file)
@@ -652,7 +652,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
        MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
        if (field & 1<<6)
-               dev_cap->flags2 |= MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN;
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
        MLX4_GET(dev_cap->max_icm_sz, outbox,
                 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
        if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
@@ -1713,7 +1713,6 @@ void mlx4_opreq_action(struct work_struct *work)
        u32 *outbox;
        u32 modifier;
        u16 token;
-       u16 type_m;
        u16 type;
        int err;
        u32 num_qps;
@@ -1746,7 +1745,6 @@ void mlx4_opreq_action(struct work_struct *work)
                MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
                MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
                MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
-               type_m = type >> 12;
                type &= 0xfff;
 
                switch (type) {
index 60c9f4f103fce1a2c7d815ccc303b7b94805068c..179d26709c94d93bcc1c720b16d6bbdcf59a2eb7 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/io-mapping.h>
 #include <linux/delay.h>
 #include <linux/netdevice.h>
+#include <linux/kmod.h>
 
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/doorbell.h>
@@ -650,6 +651,27 @@ err_mem:
        return err;
 }
 
+static void mlx4_request_modules(struct mlx4_dev *dev)
+{
+       int port;
+       int has_ib_port = false;
+       int has_eth_port = false;
+#define EN_DRV_NAME    "mlx4_en"
+#define IB_DRV_NAME    "mlx4_ib"
+
+       for (port = 1; port <= dev->caps.num_ports; port++) {
+               if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
+                       has_ib_port = true;
+               else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+                       has_eth_port = true;
+       }
+
+       if (has_ib_port)
+               request_module_nowait(IB_DRV_NAME);
+       if (has_eth_port)
+               request_module_nowait(EN_DRV_NAME);
+}
+
 /*
  * Change the port configuration of the device.
  * Every user of this function must hold the port mutex.
@@ -681,6 +703,11 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
                }
                mlx4_set_port_mask(dev);
                err = mlx4_register_device(dev);
+               if (err) {
+                       mlx4_err(dev, "Failed to register device\n");
+                       goto out;
+               }
+               mlx4_request_modules(dev);
        }
 
 out:
@@ -2305,6 +2332,8 @@ slave_start:
        if (err)
                goto err_port;
 
+       mlx4_request_modules(dev);
+
        mlx4_sense_init(dev);
        mlx4_start_sense(dev);
 
index 55f6245efb6cd250f2efda4e6941bdb81b1fd9f3..70f0213d68c42cd93a5a7c1b5969214ed83e0797 100644 (file)
@@ -645,7 +645,7 @@ static const u8 __promisc_mode[] = {
 int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
                                    enum mlx4_net_trans_promisc_mode flow_type)
 {
-       if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) {
+       if (flow_type >= MLX4_FS_MODE_NUM) {
                mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
                return -EINVAL;
        }
@@ -681,7 +681,7 @@ const u16 __sw_id_hw[] = {
 int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
                                  enum mlx4_net_trans_rule_id id)
 {
-       if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
+       if (id >= MLX4_NET_TRANS_RULE_NUM) {
                mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
                return -EINVAL;
        }
@@ -706,7 +706,7 @@ static const int __rule_hw_sz[] = {
 int mlx4_hw_rule_sz(struct mlx4_dev *dev,
               enum mlx4_net_trans_rule_id id)
 {
-       if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
+       if (id >= MLX4_NET_TRANS_RULE_NUM) {
                mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
                return -EINVAL;
        }
index 5e0aa569306aab2f6c8185cd1574cdd4abeb4aab..bf06e3610d27787ced2f43b9f9aa9fef105aa3df 100644 (file)
@@ -237,8 +237,8 @@ struct mlx4_en_tx_desc {
 struct mlx4_en_rx_alloc {
        struct page     *page;
        dma_addr_t      dma;
-       u32             offset;
-       u32             size;
+       u32             page_offset;
+       u32             page_size;
 };
 
 struct mlx4_en_tx_ring {
index 79fd269e2c54b3e0a043ab072a4e2b0f888e0088..9e08e35ce351c30046da5d59ce64daa65c48b380 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/init.h>
 
 #include <linux/mlx4/cmd.h>
+#include <linux/mlx4/srq.h>
 #include <linux/export.h>
 #include <linux/gfp.h>
 
index 5472cbd34028d9038539824c4167a0c3010a3a01..6ca30739625f7ac568d693b5d4b9d3d9e47b46de 100644 (file)
@@ -180,28 +180,32 @@ static int verify_block_sig(struct mlx5_cmd_prot_block *block)
        return 0;
 }
 
-static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token)
+static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
+                          int csum)
 {
        block->token = token;
-       block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 2);
-       block->sig = ~xor8_buf(block, sizeof(*block) - 1);
+       if (csum) {
+               block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
+                                           sizeof(block->data) - 2);
+               block->sig = ~xor8_buf(block, sizeof(*block) - 1);
+       }
 }
 
-static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token)
+static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
 {
        struct mlx5_cmd_mailbox *next = msg->next;
 
        while (next) {
-               calc_block_sig(next->buf, token);
+               calc_block_sig(next->buf, token, csum);
                next = next->next;
        }
 }
 
-static void set_signature(struct mlx5_cmd_work_ent *ent)
+static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
 {
        ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
-       calc_chain_sig(ent->in, ent->token);
-       calc_chain_sig(ent->out, ent->token);
+       calc_chain_sig(ent->in, ent->token, csum);
+       calc_chain_sig(ent->out, ent->token, csum);
 }
 
 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
@@ -539,8 +543,7 @@ static void cmd_work_handler(struct work_struct *work)
        lay->type = MLX5_PCI_CMD_XPORT;
        lay->token = ent->token;
        lay->status_own = CMD_OWNER_HW;
-       if (!cmd->checksum_disabled)
-               set_signature(ent);
+       set_signature(ent, !cmd->checksum_disabled);
        dump_command(dev, ent, 1);
        ktime_get_ts(&ent->ts1);
 
@@ -773,8 +776,6 @@ static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
 
                copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
                block = next->buf;
-               if (xor8_buf(block, sizeof(*block)) != 0xff)
-                       return -EINVAL;
 
                memcpy(to, block->data, copy);
                to += copy;
@@ -1361,6 +1362,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
                goto err_map;
        }
 
+       cmd->checksum_disabled = 1;
        cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
        cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
 
@@ -1510,7 +1512,7 @@ int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
        case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:           return -EIO;
        case MLX5_CMD_STAT_BAD_RES_ERR:                 return -EINVAL;
        case MLX5_CMD_STAT_RES_BUSY:                    return -EBUSY;
-       case MLX5_CMD_STAT_LIM_ERR:                     return -EINVAL;
+       case MLX5_CMD_STAT_LIM_ERR:                     return -ENOMEM;
        case MLX5_CMD_STAT_BAD_RES_STATE_ERR:           return -EINVAL;
        case MLX5_CMD_STAT_IX_ERR:                      return -EINVAL;
        case MLX5_CMD_STAT_NO_RES_ERR:                  return -EAGAIN;
index 443cc4d7b024c02d2cc77861868e1c1b17ee2524..2231d93cc7ad116e55c77e0269fa27878f6c6a4d 100644 (file)
@@ -366,9 +366,11 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
                goto err_in;
        }
 
+       snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
+                name, pci_name(dev->pdev));
        eq->eqn = out.eq_number;
        err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
-                         name, eq);
+                         eq->name, eq);
        if (err)
                goto err_eq;
 
index b47739b0b5f6dfb34139ad74e02ac10db1d4fbbd..bc0f5fb66e249dc2e652b4126f9b907707b87c6c 100644 (file)
@@ -165,9 +165,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
        struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL;
        struct mlx5_cmd_query_hca_cap_mbox_in query_ctx;
        struct mlx5_cmd_set_hca_cap_mbox_out set_out;
-       struct mlx5_profile *prof = dev->profile;
        u64 flags;
-       int csum = 1;
        int err;
 
        memset(&query_ctx, 0, sizeof(query_ctx));
@@ -197,20 +195,14 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
        memcpy(&set_ctx->hca_cap, &query_out->hca_cap,
               sizeof(set_ctx->hca_cap));
 
-       if (prof->mask & MLX5_PROF_MASK_CMDIF_CSUM) {
-               csum = !!prof->cmdif_csum;
-               flags = be64_to_cpu(set_ctx->hca_cap.flags);
-               if (csum)
-                       flags |= MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
-               else
-                       flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
-
-               set_ctx->hca_cap.flags = cpu_to_be64(flags);
-       }
-
        if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
                set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
 
+       flags = be64_to_cpu(query_out->hca_cap.flags);
+       /* disable checksum */
+       flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
+
+       set_ctx->hca_cap.flags = cpu_to_be64(flags);
        memset(&set_out, 0, sizeof(set_out));
        set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12);
        set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP);
@@ -225,9 +217,6 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
        if (err)
                goto query_ex;
 
-       if (!csum)
-               dev->cmd.checksum_disabled = 1;
-
 query_ex:
        kfree(query_out);
        kfree(set_ctx);
index 3a2408d448203623754d0aa87e35c16e809da43f..7b12acf210f81cd408a4b0e8a965fd4441165c9f 100644 (file)
@@ -90,6 +90,10 @@ struct mlx5_manage_pages_outbox {
        __be64                  pas[0];
 };
 
+enum {
+       MAX_RECLAIM_TIME_MSECS  = 5000,
+};
+
 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
 {
        struct rb_root *root = &dev->priv.page_root;
@@ -279,6 +283,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
        int err;
        int i;
 
+       if (nclaimed)
+               *nclaimed = 0;
+
        memset(&in, 0, sizeof(in));
        outlen = sizeof(*out) + npages * sizeof(out->pas[0]);
        out = mlx5_vzalloc(outlen);
@@ -388,20 +395,25 @@ static int optimal_reclaimed_pages(void)
 
 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
 {
-       unsigned long end = jiffies + msecs_to_jiffies(5000);
+       unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
        struct fw_page *fwp;
        struct rb_node *p;
+       int nclaimed = 0;
        int err;
 
        do {
                p = rb_first(&dev->priv.page_root);
                if (p) {
                        fwp = rb_entry(p, struct fw_page, rb_node);
-                       err = reclaim_pages(dev, fwp->func_id, optimal_reclaimed_pages(), NULL);
+                       err = reclaim_pages(dev, fwp->func_id,
+                                           optimal_reclaimed_pages(),
+                                           &nclaimed);
                        if (err) {
                                mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err);
                                return err;
                        }
+                       if (nclaimed)
+                               end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
                }
                if (time_after(jiffies, end)) {
                        mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
index 075f4e21d33df6f4f2b749dcd4aeb9fee1405077..c83d16dc7cd56034de4bfbc7deafcad6ddb42439 100644 (file)
@@ -1248,7 +1248,7 @@ static void ks_set_mac(struct ks_net *ks, u8 *data)
        w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
        ks_wrreg16(ks, KS_MARL, w);
 
-       memcpy(ks->mac_addr, data, 6);
+       memcpy(ks->mac_addr, data, ETH_ALEN);
 
        if (ks->enabled)
                ks_start_rx(ks);
@@ -1651,7 +1651,7 @@ static int ks8851_probe(struct platform_device *pdev)
        }
        netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
 
-       memcpy(netdev->dev_addr, ks->mac_addr, 6);
+       memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN);
 
        ks_set_mac(ks, netdev->dev_addr);
 
index 8ebc352bcbe6b817912e9d055202c4c077779924..ddd252a3da9c023794e0bde418d689d87cb4b8c4 100644 (file)
@@ -7150,8 +7150,6 @@ static void pcidev_exit(struct pci_dev *pdev)
        struct platform_info *info = pci_get_drvdata(pdev);
        struct dev_info *hw_priv = &info->dev_info;
 
-       pci_set_drvdata(pdev, NULL);
-
        release_mem_region(pci_resource_start(pdev, 0),
                pci_resource_len(pdev, 0));
        for (i = 0; i < hw_priv->hw.dev_count; i++) {
@@ -7227,7 +7225,7 @@ static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
 
 static char pcidev_name[] = "ksz884xp";
 
-static struct pci_device_id pcidev_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(pcidev_table) = {
        { PCI_VENDOR_ID_MICREL_KS, 0x8841,
                PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
        { PCI_VENDOR_ID_MICREL_KS, 0x8842,
index bd1a2d2bc2aebbad9612d167915fd743cf973c94..cbd013379252a158322aa5c4f508d0a9d1413020 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/of_irq.h>
 #include <linux/crc32.h>
 #include <linux/crc32c.h>
-#include <linux/dma-mapping.h>
 
 #include "moxart_ether.h"
 
@@ -448,7 +447,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
        irq = irq_of_parse_and_map(node, 0);
        if (irq <= 0) {
                netdev_err(ndev, "irq_of_parse_and_map failed\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto irq_map_fail;
        }
 
        priv = netdev_priv(ndev);
@@ -472,24 +472,32 @@ static int moxart_mac_probe(struct platform_device *pdev)
        priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
                                                TX_DESC_NUM, &priv->tx_base,
                                                GFP_DMA | GFP_KERNEL);
-       if (priv->tx_desc_base == NULL)
+       if (priv->tx_desc_base == NULL) {
+               ret = -ENOMEM;
                goto init_fail;
+       }
 
        priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
                                                RX_DESC_NUM, &priv->rx_base,
                                                GFP_DMA | GFP_KERNEL);
-       if (priv->rx_desc_base == NULL)
+       if (priv->rx_desc_base == NULL) {
+               ret = -ENOMEM;
                goto init_fail;
+       }
 
        priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM,
                                    GFP_ATOMIC);
-       if (!priv->tx_buf_base)
+       if (!priv->tx_buf_base) {
+               ret = -ENOMEM;
                goto init_fail;
+       }
 
        priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM,
                                    GFP_ATOMIC);
-       if (!priv->rx_buf_base)
+       if (!priv->rx_buf_base) {
+               ret = -ENOMEM;
                goto init_fail;
+       }
 
        platform_set_drvdata(pdev, ndev);
 
@@ -522,7 +530,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
 init_fail:
        netdev_err(ndev, "init failed\n");
        moxart_mac_free_memory(ndev);
-
+irq_map_fail:
+       free_netdev(ndev);
        return ret;
 }
 
index 149355b52ad0c8f2d4aac4f6cfe83041bc59f45b..68026f7e8ba308d62c66570fb5ac7c19eb9f8994 100644 (file)
@@ -934,7 +934,7 @@ static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
 
 static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
 {
-       int rc = true;
+       bool rc = true;
        spin_lock(&ss->lock);
        if ((ss->state & SLICE_LOCKED)) {
                WARN_ON((ss->state & SLICE_STATE_NAPI));
@@ -957,7 +957,7 @@ static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
 
 static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
 {
-       int rc = true;
+       bool rc = true;
        spin_lock_bh(&ss->lock);
        if ((ss->state & SLICE_LOCKED)) {
                ss->state |= SLICE_STATE_POLL_YIELD;
@@ -3164,7 +3164,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
 
        /* Walk the multicast list, and add each address */
        netdev_for_each_mc_addr(ha, dev) {
-               memcpy(data, &ha->addr, 6);
+               memcpy(data, &ha->addr, ETH_ALEN);
                cmd.data0 = ntohl(data[0]);
                cmd.data1 = ntohl(data[1]);
                err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
@@ -3207,7 +3207,7 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
        }
 
        /* change the dev structure */
-       memcpy(dev->dev_addr, sa->sa_data, 6);
+       memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
        return 0;
 }
 
@@ -4208,7 +4208,6 @@ static void myri10ge_remove(struct pci_dev *pdev)
        set_fw_name(mgp, NULL, false);
        free_netdev(netdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E     0x0008
index 7a5e295588b0740f38c288364de1a51be6898fd9..64ec2a437f46a3280e9377c55d8226a19f82d089 100644 (file)
@@ -970,7 +970,6 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
 
  err_ioremap:
        pci_release_regions(pdev);
-       pci_set_drvdata(pdev, NULL);
 
  err_pci_request_regions:
        free_netdev(dev);
@@ -3220,7 +3219,6 @@ static void natsemi_remove1(struct pci_dev *pdev)
        pci_release_regions (pdev);
        iounmap(ioaddr);
        free_netdev (dev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 #ifdef CONFIG_PM
index 51b00941302c8d7e7eea3dcad6daa17c7702c65e..9eeddbd0b2c7c749de1c863e4b142c25f35c4fb9 100644 (file)
@@ -8185,7 +8185,6 @@ mem_alloc_failed:
        free_shared_mem(sp);
        pci_disable_device(pdev);
        pci_release_regions(pdev);
-       pci_set_drvdata(pdev, NULL);
        free_netdev(dev);
 
        return ret;
@@ -8221,7 +8220,6 @@ static void s2io_rem_nic(struct pci_dev *pdev)
        iounmap(sp->bar0);
        iounmap(sp->bar1);
        pci_release_regions(pdev);
-       pci_set_drvdata(pdev, NULL);
        free_netdev(dev);
        pci_disable_device(pdev);
 }
index 5a20eaf903dd8b8833262166e4be4e65abd6f674..8614eeb7de8140114d256ae05c62ce95d5844a18 100644 (file)
@@ -4739,7 +4739,6 @@ _exit6:
 _exit5:
        vxge_device_unregister(hldev);
 _exit4:
-       pci_set_drvdata(pdev, NULL);
        vxge_hw_device_terminate(hldev);
        pci_disable_sriov(pdev);
 _exit3:
@@ -4782,7 +4781,6 @@ static void vxge_remove(struct pci_dev *pdev)
                vxge_free_mac_add_list(&vdev->vpaths[i]);
 
        vxge_device_unregister(hldev);
-       pci_set_drvdata(pdev, NULL);
        /* Do not call pci_disable_sriov here, as it will break child devices */
        vxge_hw_device_terminate(hldev);
        iounmap(vdev->bar0);
index a061b93efe66a29fd663a49bf16d1a510ff0879f..ba3ca18611f7905d559eaec225b6a4ae837c3dda 100644 (file)
@@ -1399,8 +1399,10 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
        }
 
        if (pldat->dma_buff_base_v == 0) {
-               pldat->pdev->dev.coherent_dma_mask = 0xFFFFFFFF;
-               pldat->pdev->dev.dma_mask = &pldat->pdev->dev.coherent_dma_mask;
+               ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+               if (ret)
+                       goto err_out_free_irq;
+
                pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
 
                /* Allocate a chunk of memory for the DMA ethernet buffers
index 622aa75904c4ee07c1b666466925e08ce0df25d3..2006a07004829dcc7f1743cf343480c6db3c5c95 100644 (file)
@@ -1552,8 +1552,9 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
 
        p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
 
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
-       pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (result)
+               goto err;
 
        netif_carrier_off(netdev);
        result = register_netdev(netdev);
index 6797b1075874ae26cca6756bf450559ed7c7e6be..2a9003071d51a985be9b133fea76a585dabdf2d4 100644 (file)
@@ -653,38 +653,38 @@ struct pch_gbe_adapter {
 extern const char pch_driver_version[];
 
 /* pch_gbe_main.c */
-extern int pch_gbe_up(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_down(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter);
-extern void pch_gbe_reset(struct pch_gbe_adapter *adapter);
-extern int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
-                                      struct pch_gbe_tx_ring *txdr);
-extern int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
-                                      struct pch_gbe_rx_ring *rxdr);
-extern void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
-                                      struct pch_gbe_tx_ring *tx_ring);
-extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
-                                      struct pch_gbe_rx_ring *rx_ring);
-extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
-extern u32 pch_ch_control_read(struct pci_dev *pdev);
-extern void pch_ch_control_write(struct pci_dev *pdev, u32 val);
-extern u32 pch_ch_event_read(struct pci_dev *pdev);
-extern void pch_ch_event_write(struct pci_dev *pdev, u32 val);
-extern u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
-extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
-extern u64 pch_rx_snap_read(struct pci_dev *pdev);
-extern u64 pch_tx_snap_read(struct pci_dev *pdev);
-extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
+int pch_gbe_up(struct pch_gbe_adapter *adapter);
+void pch_gbe_down(struct pch_gbe_adapter *adapter);
+void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter);
+void pch_gbe_reset(struct pch_gbe_adapter *adapter);
+int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
+                              struct pch_gbe_tx_ring *txdr);
+int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
+                              struct pch_gbe_rx_ring *rxdr);
+void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
+                              struct pch_gbe_tx_ring *tx_ring);
+void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
+                              struct pch_gbe_rx_ring *rx_ring);
+void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
+u32 pch_ch_control_read(struct pci_dev *pdev);
+void pch_ch_control_write(struct pci_dev *pdev, u32 val);
+u32 pch_ch_event_read(struct pci_dev *pdev);
+void pch_ch_event_write(struct pci_dev *pdev, u32 val);
+u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
+u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
+u64 pch_rx_snap_read(struct pci_dev *pdev);
+u64 pch_tx_snap_read(struct pci_dev *pdev);
+int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
 
 /* pch_gbe_param.c */
-extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
+void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
 
 /* pch_gbe_ethtool.c */
-extern void pch_gbe_set_ethtool_ops(struct net_device *netdev);
+void pch_gbe_set_ethtool_ops(struct net_device *netdev);
 
 /* pch_gbe_mac.c */
-extern s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
-extern s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
-extern u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw,
-                                 u32 addr, u32 dir, u32 reg, u16 data);
+s32 pch_gbe_mac_force_mac_fc(struct pch_gbe_hw *hw);
+s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw);
+u16 pch_gbe_mac_ctrl_miim(struct pch_gbe_hw *hw, u32 addr, u32 dir, u32 reg,
+                         u16 data);
 #endif /* _PCH_GBE_H_ */
index cac33e5f9bc2635cd2a4fd9b38c1af6f155164d8..b6bdeb3c19711ac960646dfdbeef2207b86f366d 100644 (file)
@@ -1910,7 +1910,6 @@ static void hamachi_remove_one(struct pci_dev *pdev)
                iounmap(hmp->base);
                free_netdev(dev);
                pci_release_regions(pdev);
-               pci_set_drvdata(pdev, NULL);
        }
 }
 
index d28593b1fc3e8664b1f6aad8e6d53dba4fdd1a5c..07a890eb72ad8677a47782f84f79c6f02ee1140e 100644 (file)
@@ -513,7 +513,6 @@ err_out_unmap_rx:
 err_out_unmap_tx:
         pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
 err_out_cleardev:
-       pci_set_drvdata(pdev, NULL);
        pci_iounmap(pdev, ioaddr);
 err_out_free_res:
        pci_release_regions(pdev);
@@ -1392,7 +1391,6 @@ static void yellowfin_remove_one(struct pci_dev *pdev)
        pci_release_regions (pdev);
 
        free_netdev (dev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 
index 5b65356e7568b28e0d2030c7436863db10a44b32..dbaa49e58b0cd8610c71a83c8170c7f10255453e 100644 (file)
@@ -1870,7 +1870,6 @@ static void pasemi_mac_remove(struct pci_dev *pdev)
        pasemi_dma_free_chan(&mac->tx->chan);
        pasemi_dma_free_chan(&mac->rx->chan);
 
-       pci_set_drvdata(pdev, NULL);
        free_netdev(netdev);
 }
 
index 32675e16021e8aafd2a5598473cc2ad54dae7668..9adcdbb49476e53face68f353dd7261ffea356b3 100644 (file)
@@ -53,8 +53,8 @@
 
 #define _NETXEN_NIC_LINUX_MAJOR 4
 #define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 81
-#define NETXEN_NIC_LINUX_VERSIONID  "4.0.81"
+#define _NETXEN_NIC_LINUX_SUBVERSION 82
+#define NETXEN_NIC_LINUX_VERSIONID  "4.0.82"
 
 #define NETXEN_VERSION_CODE(a, b, c)   (((a) << 24) + ((b) << 16) + (c))
 #define _major(v)      (((v) >> 24) & 0xff)
@@ -1883,9 +1883,8 @@ static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
 
 int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac);
 int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac);
-extern void netxen_change_ringparam(struct netxen_adapter *adapter);
-extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
-                               int *valp);
+void netxen_change_ringparam(struct netxen_adapter *adapter);
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
 
 extern const struct ethtool_ops netxen_nic_ethtool_ops;
 
index 32c790659f9c1ba0410614c30818eb9cf20a46ee..0c64c82b9acffdcfd58b9471f971d0eebe95a9cc 100644 (file)
@@ -958,6 +958,7 @@ enum {
 #define NETXEN_PEG_HALT_STATUS2        (NETXEN_CAM_RAM(0xac))
 #define NX_CRB_DEV_REF_COUNT           (NETXEN_CAM_RAM(0x138))
 #define NX_CRB_DEV_STATE               (NETXEN_CAM_RAM(0x140))
+#define NETXEN_ULA_KEY                 (NETXEN_CAM_RAM(0x178))
 
 /* MiniDIMM related macros */
 #define NETXEN_DIMM_CAPABILITY         (NETXEN_CAM_RAM(0x258))
index 8375cbde996976047475b9c5e6c16f33fd365df4..67efe754367de10d78e6bb1cbacd65caa6f1524c 100644 (file)
@@ -648,7 +648,7 @@ nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op)
 
        mac_req = (nx_mac_req_t *)&req.words[0];
        mac_req->op = op;
-       memcpy(mac_req->mac_addr, addr, 6);
+       memcpy(mac_req->mac_addr, addr, ETH_ALEN);
 
        return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
 }
index cbd75f97ffb3c6d32624e15b81adcb05d5d4b1d9..3bec8cfebf99299ef9c3733b47033a26fe8ba7b1 100644 (file)
@@ -1415,6 +1415,32 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
        return 0;
 }
 
+#define NETXEN_ULA_ADAPTER_KEY         (0xdaddad01)
+#define NETXEN_NON_ULA_ADAPTER_KEY     (0xdaddad00)
+
+static void netxen_read_ula_info(struct netxen_adapter *adapter)
+{
+       u32 temp;
+
+       /* Print ULA info only once for an adapter */
+       if (adapter->portnum != 0)
+               return;
+
+       temp = NXRD32(adapter, NETXEN_ULA_KEY);
+       switch (temp) {
+       case NETXEN_ULA_ADAPTER_KEY:
+               dev_info(&adapter->pdev->dev, "ULA adapter");
+               break;
+       case NETXEN_NON_ULA_ADAPTER_KEY:
+               dev_info(&adapter->pdev->dev, "non ULA adapter");
+               break;
+       default:
+               break;
+       }
+
+       return;
+}
+
 #ifdef CONFIG_PCIEAER
 static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
 {
@@ -1561,6 +1587,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_out_disable_msi;
        }
 
+       netxen_read_ula_info(adapter);
+
        err = netxen_setup_netdev(adapter, netdev);
        if (err)
                goto err_out_disable_msi;
@@ -1602,7 +1630,6 @@ err_out_free_res:
        pci_release_regions(pdev);
 
 err_out_disable_pdev:
-       pci_set_drvdata(pdev, NULL);
        pci_disable_device(pdev);
        return err;
 }
@@ -1661,7 +1688,6 @@ static void netxen_nic_remove(struct pci_dev *pdev)
 
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 
        free_netdev(netdev);
 }
index 91a8fcd6c24654e52d94aa6a96f68bf02e32a4ee..0758b9435358b9f4012e703a2605ad64ef4aff7c 100644 (file)
@@ -3916,7 +3916,6 @@ err_out_free_regions:
        pci_release_regions(pdev);
 err_out_disable_pdev:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 err_out:
        return err;
 }
@@ -3939,7 +3938,6 @@ static void ql3xxx_remove(struct pci_dev *pdev)
 
        iounmap(qdev->mem_map_registers);
        pci_release_regions(pdev);
-       pci_set_drvdata(pdev, NULL);
        free_netdev(ndev);
 }
 
index 81bf83604c4fb1c04aa0a5dfe361d1ffaefd160d..0c2405dbc970eed07e69dc411bf73c1f1c65bfe0 100644 (file)
@@ -38,8 +38,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 50
-#define QLCNIC_LINUX_VERSIONID  "5.3.50"
+#define _QLCNIC_LINUX_SUBVERSION 51
+#define QLCNIC_LINUX_VERSIONID  "5.3.51"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
                 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -961,8 +961,6 @@ struct qlcnic_ipaddr {
 #define __QLCNIC_SRIOV_CAPABLE         11
 #define __QLCNIC_MBX_POLL_ENABLE       12
 #define __QLCNIC_DIAG_MODE             13
-#define __QLCNIC_DCB_STATE             14
-#define __QLCNIC_DCB_IN_AEN            15
 
 #define QLCNIC_INTERRUPT_TEST          1
 #define QLCNIC_LOOPBACK_TEST           2
@@ -1199,6 +1197,7 @@ struct qlcnic_npar_info {
        u8      promisc_mode;
        u8      offload_flags;
        u8      pci_func;
+       u8      mac[ETH_ALEN];
 };
 
 struct qlcnic_eswitch {
@@ -2115,98 +2114,4 @@ static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
 
        return status;
 }
-
-static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
-{
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (dcb && dcb->ops->get_hw_capability)
-               return dcb->ops->get_hw_capability(adapter);
-
-       return 0;
-}
-
-static inline void qlcnic_dcb_free(struct qlcnic_adapter *adapter)
-{
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (dcb && dcb->ops->free)
-               dcb->ops->free(adapter);
-}
-
-static inline int qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
-{
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (dcb && dcb->ops->attach)
-               return dcb->ops->attach(adapter);
-
-       return 0;
-}
-
-static inline int
-qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter, char *buf)
-{
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (dcb && dcb->ops->query_hw_capability)
-               return dcb->ops->query_hw_capability(adapter, buf);
-
-       return 0;
-}
-
-static inline void qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
-{
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (dcb && dcb->ops->get_info)
-               dcb->ops->get_info(adapter);
-}
-
-static inline int
-qlcnic_dcb_query_cee_param(struct qlcnic_adapter *adapter, char *buf, u8 type)
-{
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (dcb && dcb->ops->query_cee_param)
-               return dcb->ops->query_cee_param(adapter, buf, type);
-
-       return 0;
-}
-
-static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
-{
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (dcb && dcb->ops->get_cee_cfg)
-               return dcb->ops->get_cee_cfg(adapter);
-
-       return 0;
-}
-
-static inline void
-qlcnic_dcb_register_aen(struct qlcnic_adapter *adapter, u8 flag)
-{
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (dcb && dcb->ops->register_aen)
-               dcb->ops->register_aen(adapter, flag);
-}
-
-static inline void qlcnic_dcb_handle_aen(struct qlcnic_adapter *adapter,
-                                        void *msg)
-{
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (dcb && dcb->ops->handle_aen)
-               dcb->ops->handle_aen(adapter, msg);
-}
-
-static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_adapter *adapter)
-{
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (dcb && dcb->ops->init_dcbnl_ops)
-               dcb->ops->init_dcbnl_ops(adapter);
-}
 #endif                         /* __QLCNIC_H_ */
index 3ca00e05f23d2ade287d6c1d9047410eeac43114..a126bdf2795248a1fdb888a18aef1ce3b85f80db 100644 (file)
@@ -902,7 +902,7 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
                         QLCNIC_MBX_RSP(event[0]));
                break;
        case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT:
-               qlcnic_dcb_handle_aen(adapter, (void *)&event[1]);
+               qlcnic_dcb_aen_handler(adapter->dcb, (void *)&event[1]);
                break;
        default:
                dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n",
@@ -2321,19 +2321,7 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
                        i++;
                        memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2);
                        i = i + 3;
-                       if (ahw->op_mode == QLCNIC_MGMT_FUNC)
-                               dev_info(dev, "id = %d active = %d type = %d\n"
-                                        "\tport = %d min bw = %d max bw = %d\n"
-                                        "\tmac_addr =  %pM\n", pci_info->id,
-                                        pci_info->active, pci_info->type,
-                                        pci_info->default_port,
-                                        pci_info->tx_min_bw,
-                                        pci_info->tx_max_bw, pci_info->mac);
                }
-               if (ahw->op_mode == QLCNIC_MGMT_FUNC)
-                       dev_info(dev, "Max functions = %d, active functions = %d\n",
-                                ahw->max_pci_func, ahw->act_pci_func);
-
        } else {
                dev_err(dev, "Failed to get PCI Info, error = %d\n", err);
                err = -EIO;
@@ -3279,12 +3267,12 @@ int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter)
        return 0;
 }
 
-int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter)
+inline int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter)
 {
        return (ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl) *
-               sizeof(adapter->ahw->ext_reg_tbl)) +
-               (ARRAY_SIZE(qlcnic_83xx_reg_tbl) +
-               sizeof(adapter->ahw->reg_tbl));
+               sizeof(*adapter->ahw->ext_reg_tbl)) +
+               (ARRAY_SIZE(qlcnic_83xx_reg_tbl) *
+               sizeof(*adapter->ahw->reg_tbl));
 }
 
 int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff)
@@ -3381,10 +3369,21 @@ void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *adapter,
        }
        config = ahw->port_config;
        if (config & QLC_83XX_CFG_STD_PAUSE) {
-               if (config & QLC_83XX_CFG_STD_TX_PAUSE)
+               switch (MSW(config)) {
+               case QLC_83XX_TX_PAUSE:
                        pause->tx_pause = 1;
-               if (config & QLC_83XX_CFG_STD_RX_PAUSE)
+                       break;
+               case QLC_83XX_RX_PAUSE:
                        pause->rx_pause = 1;
+                       break;
+               case QLC_83XX_TX_RX_PAUSE:
+               default:
+                       /* Backward compatibility for existing
+                        * flash definitions
+                        */
+                       pause->tx_pause = 1;
+                       pause->rx_pause = 1;
+               }
        }
 
        if (QLC_83XX_AUTONEG(config))
@@ -3427,7 +3426,8 @@ int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter,
                ahw->port_config &= ~QLC_83XX_CFG_STD_RX_PAUSE;
                ahw->port_config |= QLC_83XX_CFG_STD_TX_PAUSE;
        } else if (!pause->rx_pause && !pause->tx_pause) {
-               ahw->port_config &= ~QLC_83XX_CFG_STD_TX_RX_PAUSE;
+               ahw->port_config &= ~(QLC_83XX_CFG_STD_TX_RX_PAUSE |
+                                     QLC_83XX_CFG_STD_PAUSE);
        }
        status = qlcnic_83xx_set_port_config(adapter);
        if (status) {
index 533e150503afc06fb5db107d1fa4bdbcb894abb8..9f4e4c4ab5214e2215b6cf061347f429c68ff5e6 100644 (file)
@@ -363,6 +363,9 @@ enum qlcnic_83xx_states {
 #define QLC_83XX_LINK_EEE(data)                ((data) & BIT_13)
 #define QLC_83XX_DCBX(data)                    (((data) >> 28) & 7)
 #define QLC_83XX_AUTONEG(data)                 ((data) & BIT_15)
+#define QLC_83XX_TX_PAUSE                      0x10
+#define QLC_83XX_RX_PAUSE                      0x20
+#define QLC_83XX_TX_RX_PAUSE                   0x30
 #define QLC_83XX_CFG_STD_PAUSE                 (1 << 5)
 #define QLC_83XX_CFG_STD_TX_PAUSE              (1 << 20)
 #define QLC_83XX_CFG_STD_RX_PAUSE              (2 << 20)
@@ -626,7 +629,7 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
 int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
                                    struct qlcnic_info *, u8);
 int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
-int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *, int);
+int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *);
 
 void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
 void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
index f09e787af0b25ae9d523f531b47b67805c7bc8e0..e2cd48417041c1e749058d89f235a02822eaf159 100644 (file)
@@ -636,7 +636,7 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
        if (adapter->portnum == 0)
                qlcnic_set_drv_version(adapter);
 
-       qlcnic_dcb_get_info(adapter);
+       qlcnic_dcb_get_info(adapter->dcb);
        qlcnic_83xx_idc_attach_driver(adapter);
 
        return 0;
@@ -818,6 +818,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
        struct qlcnic_hardware_context *ahw = adapter->ahw;
        struct qlcnic_mailbox *mbx = ahw->mailbox;
        int ret = 0;
+       u32 owner;
        u32 val;
 
        /* Perform NIC configuration based ready state entry actions */
@@ -846,6 +847,10 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
                        clear_bit(QLC_83XX_MBX_READY, &mbx->status);
                        set_bit(__QLCNIC_RESETTING, &adapter->state);
                        qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
+               }  else {
+                       owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+                       if (ahw->pci_func == owner)
+                               qlcnic_dump_fw(adapter);
                }
                return -EIO;
        }
@@ -1058,6 +1063,12 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
        adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
        qlcnic_83xx_periodic_tasks(adapter);
 
+       /* Do not reschedule if firmaware is in hanged state and auto
+        * recovery is disabled
+        */
+       if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset)
+               return;
+
        /* Re-schedule the function */
        if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
                qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
@@ -2163,6 +2174,7 @@ static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter)
 int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
 {
        struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_dcb *dcb;
        int err = 0;
 
        ahw->msix_supported = !!qlcnic_use_msi_x;
@@ -2220,8 +2232,10 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
        if (err)
                goto disable_mbx_intr;
 
-       if (adapter->dcb && qlcnic_dcb_attach(adapter))
-               qlcnic_clear_dcb_ops(adapter);
+       dcb = adapter->dcb;
+
+       if (dcb && qlcnic_dcb_attach(dcb))
+               qlcnic_clear_dcb_ops(dcb);
 
        /* Periodically monitor device status */
        qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
index 0248a4c2f5dd15fb1f12827b90bf1af0b1e73ba1..734d28602ac3f335214ba9cc5f1c9d4408d6d554 100644 (file)
@@ -94,13 +94,29 @@ qlcnic_83xx_config_vnic_buff_descriptors(struct qlcnic_adapter *adapter)
  **/
 static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
 {
-       int err = -EIO;
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct device *dev = &adapter->pdev->dev;
+       struct qlcnic_npar_info *npar;
+       int i, err = -EIO;
 
        qlcnic_83xx_get_minidump_template(adapter);
+
        if (!(adapter->flags & QLCNIC_ADAPTER_INITIALIZED)) {
                if (qlcnic_init_pci_info(adapter))
                        return err;
 
+               npar = adapter->npars;
+
+               for (i = 0; i < ahw->act_pci_func; i++, npar++) {
+                       dev_info(dev, "id:%d active:%d type:%d port:%d min_bw:%d max_bw:%d mac_addr:%pM\n",
+                                npar->pci_func, npar->active, npar->type,
+                                npar->phy_port, npar->min_bw, npar->max_bw,
+                                npar->mac);
+               }
+
+               dev_info(dev, "Max functions = %d, active functions = %d\n",
+                        ahw->max_pci_func, ahw->act_pci_func);
+
                if (qlcnic_83xx_set_vnic_opmode(adapter))
                        return err;
 
@@ -115,12 +131,12 @@ static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
                return err;
 
        qlcnic_83xx_config_vnic_buff_descriptors(adapter);
-       adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+       ahw->msix_supported = qlcnic_use_msi_x ? 1 : 0;
        adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
        qlcnic_83xx_enable_vnic_mode(adapter, 1);
 
-       dev_info(&adapter->pdev->dev, "HAL Version: %d, Management function\n",
-                adapter->ahw->fw_hal_version);
+       dev_info(dev, "HAL Version: %d, Management function\n",
+                ahw->fw_hal_version);
 
        return 0;
 }
@@ -240,8 +256,8 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
        return 0;
 }
 
-static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter,
-                                            int func, int *port_id)
+int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *adapter,
+                                       int func, int *port_id)
 {
        struct qlcnic_info nic_info;
        int err = 0;
@@ -257,23 +273,8 @@ static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter,
        else
                err = -EIO;
 
-       return err;
-}
-
-int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *adapter, int func)
-{
-       int id, err = 0;
-
-       err = qlcnic_83xx_get_eswitch_port_info(adapter, func, &id);
-       if (err)
-               return err;
-
-       if (!(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
-               if (!qlcnic_enable_eswitch(adapter, id, 1))
-                       adapter->eswitch[id].flags |= QLCNIC_SWITCH_ENABLE;
-               else
-                       err = -EIO;
-       }
+       if (!err)
+               adapter->eswitch[*port_id].flags |= QLCNIC_SWITCH_ENABLE;
 
        return err;
 }
index d62d5ce432ec7525ca3f504b790d050797f343d2..86bca7c14f99245a07e65a7385e8b67796afb3a9 100644 (file)
@@ -57,22 +57,22 @@ static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops;
 static void qlcnic_dcb_aen_work(struct work_struct *);
 static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *);
 
-static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *);
-static void __qlcnic_dcb_free(struct qlcnic_adapter *);
-static int __qlcnic_dcb_attach(struct qlcnic_adapter *);
-static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *, char *);
-static void __qlcnic_dcb_get_info(struct qlcnic_adapter *);
-
-static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *);
-static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
-static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
-static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
-
-static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *);
-static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
-static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *, bool);
-static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *);
+static void __qlcnic_dcb_free(struct qlcnic_dcb *);
+static int __qlcnic_dcb_attach(struct qlcnic_dcb *);
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *, char *);
+static void __qlcnic_dcb_get_info(struct qlcnic_dcb *);
+
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *);
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
+static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
+
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *);
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
+static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *, bool);
+static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
 
 struct qlcnic_dcb_capability {
        bool    tsa_capability;
@@ -180,7 +180,7 @@ static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
        .query_cee_param        = qlcnic_83xx_dcb_query_cee_param,
        .get_cee_cfg            = qlcnic_83xx_dcb_get_cee_cfg,
        .register_aen           = qlcnic_83xx_dcb_register_aen,
-       .handle_aen             = qlcnic_83xx_dcb_handle_aen,
+       .aen_handler            = qlcnic_83xx_dcb_aen_handler,
 };
 
 static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
@@ -193,7 +193,7 @@ static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
        .get_hw_capability      = qlcnic_82xx_dcb_get_hw_capability,
        .query_cee_param        = qlcnic_82xx_dcb_query_cee_param,
        .get_cee_cfg            = qlcnic_82xx_dcb_get_cee_cfg,
-       .handle_aen             = qlcnic_82xx_dcb_handle_aen,
+       .aen_handler            = qlcnic_82xx_dcb_aen_handler,
 };
 
 static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val)
@@ -242,10 +242,10 @@ static int qlcnic_dcb_prio_count(u8 up_tc_map)
        return j;
 }
 
-static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *adapter)
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *dcb)
 {
-       if (test_bit(__QLCNIC_DCB_STATE, &adapter->state))
-               adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
+       if (test_bit(QLCNIC_DCB_STATE, &dcb->state))
+               dcb->adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
 }
 
 static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
@@ -256,7 +256,7 @@ static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
                adapter->dcb->ops = &qlcnic_83xx_dcb_ops;
 }
 
-int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
 {
        struct qlcnic_dcb *dcb;
 
@@ -267,20 +267,22 @@ int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
        adapter->dcb = dcb;
        dcb->adapter = adapter;
        qlcnic_set_dcb_ops(adapter);
+       dcb->state = 0;
 
        return 0;
 }
 
-static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter)
+static void __qlcnic_dcb_free(struct qlcnic_dcb *dcb)
 {
-       struct qlcnic_dcb *dcb = adapter->dcb;
+       struct qlcnic_adapter *adapter;
 
        if (!dcb)
                return;
 
-       qlcnic_dcb_register_aen(adapter, 0);
+       adapter = dcb->adapter;
+       qlcnic_dcb_register_aen(dcb, 0);
 
-       while (test_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+       while (test_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
                usleep_range(10000, 11000);
 
        cancel_delayed_work_sync(&dcb->aen_work);
@@ -298,23 +300,22 @@ static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter)
        adapter->dcb = NULL;
 }
 
-static void __qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
+static void __qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
 {
-       qlcnic_dcb_get_hw_capability(adapter);
-       qlcnic_dcb_get_cee_cfg(adapter);
-       qlcnic_dcb_register_aen(adapter, 1);
+       qlcnic_dcb_get_hw_capability(dcb);
+       qlcnic_dcb_get_cee_cfg(dcb);
+       qlcnic_dcb_register_aen(dcb, 1);
 }
 
-static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
+static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
 {
-       struct qlcnic_dcb *dcb = adapter->dcb;
        int err = 0;
 
        INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work);
 
        dcb->wq = create_singlethread_workqueue("qlcnic-dcb");
        if (!dcb->wq) {
-               dev_err(&adapter->pdev->dev,
+               dev_err(&dcb->adapter->pdev->dev,
                        "DCB workqueue allocation failed. DCB will be disabled\n");
                return -1;
        }
@@ -331,7 +332,7 @@ static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
                goto out_free_cfg;
        }
 
-       qlcnic_dcb_get_info(adapter);
+       qlcnic_dcb_get_info(dcb);
 
        return 0;
 out_free_cfg:
@@ -345,9 +346,9 @@ out_free_wq:
        return err;
 }
 
-static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter,
-                                           char *buf)
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
 {
+       struct qlcnic_adapter *adapter = dcb->adapter;
        struct qlcnic_cmd_args cmd;
        u32 mbx_out;
        int err;
@@ -371,15 +372,15 @@ static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter,
        return err;
 }
 
-static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val)
+static int __qlcnic_dcb_get_capability(struct qlcnic_dcb *dcb, u32 *val)
 {
-       struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
+       struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
        u32 mbx_out;
        int err;
 
        memset(cap, 0, sizeof(struct qlcnic_dcb_capability));
 
-       err = qlcnic_dcb_query_hw_capability(adapter, (char *)val);
+       err = qlcnic_dcb_query_hw_capability(dcb, (char *)val);
        if (err)
                return err;
 
@@ -397,21 +398,21 @@ static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val)
        if (cap->max_num_tc > QLC_DCB_MAX_TC ||
            cap->max_ets_tc > cap->max_num_tc ||
            cap->max_pfc_tc > cap->max_num_tc) {
-               dev_err(&adapter->pdev->dev, "Invalid DCB configuration\n");
+               dev_err(&dcb->adapter->pdev->dev, "Invalid DCB configuration\n");
                return -EINVAL;
        }
 
        return err;
 }
 
-static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
 {
-       struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+       struct qlcnic_dcb_cfg *cfg = dcb->cfg;
        struct qlcnic_dcb_capability *cap;
        u32 mbx_out;
        int err;
 
-       err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
+       err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
        if (err)
                return err;
 
@@ -419,15 +420,16 @@ static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
        cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED;
 
        if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
-               set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+               set_bit(QLCNIC_DCB_STATE, &dcb->state);
 
        return err;
 }
 
-static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
                                           char *buf, u8 type)
 {
        u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le);
+       struct qlcnic_adapter *adapter = dcb->adapter;
        struct qlcnic_82xx_dcb_param_mbx_le *prsp_le;
        struct device *dev = &adapter->pdev->dev;
        dma_addr_t cardrsp_phys_addr;
@@ -447,8 +449,7 @@ static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
                return -EINVAL;
        }
 
-       addr = dma_alloc_coherent(&adapter->pdev->dev, size, &cardrsp_phys_addr,
-                                 GFP_KERNEL);
+       addr = dma_alloc_coherent(dev, size, &cardrsp_phys_addr, GFP_KERNEL);
        if (addr == NULL)
                return -ENOMEM;
 
@@ -488,72 +489,67 @@ out:
        qlcnic_free_mbx_args(&cmd);
 
 out_free_rsp:
-       dma_free_coherent(&adapter->pdev->dev, size, addr, cardrsp_phys_addr);
+       dma_free_coherent(dev, size, addr, cardrsp_phys_addr);
 
        return err;
 }
 
-static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
 {
        struct qlcnic_dcb_mbx_params *mbx;
        int err;
 
-       mbx = adapter->dcb->param;
+       mbx = dcb->param;
        if (!mbx)
                return 0;
 
-       err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[0],
+       err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[0],
                                         QLC_DCB_LOCAL_PARAM_FWID);
        if (err)
                return err;
 
-       err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[1],
+       err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[1],
                                         QLC_DCB_OPER_PARAM_FWID);
        if (err)
                return err;
 
-       err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[2],
+       err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[2],
                                         QLC_DCB_PEER_PARAM_FWID);
        if (err)
                return err;
 
        mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP;
 
-       qlcnic_dcb_data_cee_param_map(adapter);
+       qlcnic_dcb_data_cee_param_map(dcb->adapter);
 
        return err;
 }
 
 static void qlcnic_dcb_aen_work(struct work_struct *work)
 {
-       struct qlcnic_adapter *adapter;
        struct qlcnic_dcb *dcb;
 
        dcb = container_of(work, struct qlcnic_dcb, aen_work.work);
-       adapter = dcb->adapter;
 
-       qlcnic_dcb_get_cee_cfg(adapter);
-       clear_bit(__QLCNIC_DCB_IN_AEN, &adapter->state);
+       qlcnic_dcb_get_cee_cfg(dcb);
+       clear_bit(QLCNIC_DCB_AEN_MODE, &dcb->state);
 }
 
-static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
-                                      void *data)
+static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
 {
-       struct qlcnic_dcb *dcb = adapter->dcb;
-
-       if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+       if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
                return;
 
        queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
 }
 
-static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
 {
-       struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
+       struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
        u32 mbx_out;
        int err;
 
-       err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
+       err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
        if (err)
                return err;
 
@@ -565,14 +561,15 @@ static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
                cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED;
 
        if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
-               set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+               set_bit(QLCNIC_DCB_STATE, &dcb->state);
 
        return err;
 }
 
-static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
                                           char *buf, u8 idx)
 {
+       struct qlcnic_adapter *adapter = dcb->adapter;
        struct qlcnic_dcb_mbx_params mbx_out;
        int err, i, j, k, max_app, size;
        struct qlcnic_dcb_param *each;
@@ -632,24 +629,23 @@ out:
        return err;
 }
 
-static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
 {
-       struct qlcnic_dcb *dcb = adapter->dcb;
        int err;
 
-       err = qlcnic_dcb_query_cee_param(adapter, (char *)dcb->param, 0);
+       err = qlcnic_dcb_query_cee_param(dcb, (char *)dcb->param, 0);
        if (err)
                return err;
 
-       qlcnic_dcb_data_cee_param_map(adapter);
+       qlcnic_dcb_data_cee_param_map(dcb->adapter);
 
        return err;
 }
 
-static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter,
-                                       bool flag)
+static int qlcnic_83xx_dcb_register_aen(struct qlcnic_dcb *dcb, bool flag)
 {
        u8 val = (flag ? QLCNIC_CMD_INIT_NIC_FUNC : QLCNIC_CMD_STOP_NIC_FUNC);
+       struct qlcnic_adapter *adapter = dcb->adapter;
        struct qlcnic_cmd_args cmd;
        int err;
 
@@ -669,19 +665,17 @@ static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter,
        return err;
 }
 
-static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
-                                      void *data)
+static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
 {
-       struct qlcnic_dcb *dcb = adapter->dcb;
        u32 *val = data;
 
-       if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+       if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
                return;
 
        if (*val & BIT_8)
-               set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+               set_bit(QLCNIC_DCB_STATE, &dcb->state);
        else
-               clear_bit(__QLCNIC_DCB_STATE, &adapter->state);
+               clear_bit(QLCNIC_DCB_STATE, &dcb->state);
 
        queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
 }
@@ -814,12 +808,12 @@ static u8 qlcnic_dcb_get_state(struct net_device *netdev)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
 
-       return test_bit(__QLCNIC_DCB_STATE, &adapter->state);
+       return test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state);
 }
 
 static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr)
 {
-       memcpy(addr, netdev->dev_addr, netdev->addr_len);
+       memcpy(addr, netdev->perm_addr, netdev->addr_len);
 }
 
 static void
@@ -834,7 +828,7 @@ qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio,
        type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
        *prio = *pgid = *bw_per = *up_tc_map = 0;
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
            !type->tc_param_valid)
                return;
 
@@ -870,7 +864,7 @@ static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
        *bw_pct = 0;
        type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
            !type->tc_param_valid)
                return;
 
@@ -896,7 +890,7 @@ static void qlcnic_dcb_get_pfc_cfg(struct net_device *netdev, int prio,
        *setting = 0;
        type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
            !type->pfc_mode_enable)
                return;
 
@@ -915,7 +909,7 @@ static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
                return 0;
 
        switch (capid) {
@@ -944,7 +938,7 @@ static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num)
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
                return -EINVAL;
 
        switch (attr) {
@@ -967,7 +961,7 @@ static u8 qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
                                .protocol = id,
                             };
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
                return 0;
 
        return dcb_getapp(netdev, &app);
@@ -978,7 +972,7 @@ static u8 qlcnic_dcb_get_pfc_state(struct net_device *netdev)
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_dcb *dcb = adapter->dcb;
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+       if (!test_bit(QLCNIC_DCB_STATE, &dcb->state))
                return 0;
 
        return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable;
@@ -989,7 +983,7 @@ static u8 qlcnic_dcb_get_dcbx(struct net_device *netdev)
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
                return 0;
 
        return cfg->capability.dcb_capability;
@@ -1000,7 +994,7 @@ static u8 qlcnic_dcb_get_feat_cfg(struct net_device *netdev, int fid, u8 *flag)
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct qlcnic_dcb_cee *type;
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
                return 1;
 
        type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
@@ -1055,7 +1049,7 @@ static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
 
        *app_count = 0;
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
                return 0;
 
        peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1076,7 +1070,7 @@ static int qlcnic_dcb_peer_app_table(struct net_device *netdev,
        struct qlcnic_dcb_app *app;
        int i, j;
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
                return 0;
 
        peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1101,7 +1095,7 @@ static int qlcnic_dcb_cee_peer_get_pg(struct net_device *netdev,
        struct qlcnic_dcb_cee *peer;
        u8 i, j, k, map;
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
                return 0;
 
        peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
@@ -1136,7 +1130,7 @@ static int qlcnic_dcb_cee_peer_get_pfc(struct net_device *netdev,
 
        pfc->pfc_en = 0;
 
-       if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+       if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
                return 0;
 
        peer = &cfg->type[QLC_DCB_PEER_IDX];
index b87ce9fb503e48298ea9e96e0490047fd3bb488b..c04ae0cdc108c98eb57d8a860d772535379178af 100644 (file)
@@ -8,26 +8,29 @@
 #ifndef __QLCNIC_DCBX_H
 #define __QLCNIC_DCBX_H
 
-void qlcnic_clear_dcb_ops(struct qlcnic_adapter *);
+#define QLCNIC_DCB_STATE       0
+#define QLCNIC_DCB_AEN_MODE    1
 
 #ifdef CONFIG_QLCNIC_DCB
-int __qlcnic_register_dcb(struct qlcnic_adapter *);
+int qlcnic_register_dcb(struct qlcnic_adapter *);
 #else
-static inline int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+static inline int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
 { return 0; }
 #endif
 
+struct qlcnic_dcb;
+
 struct qlcnic_dcb_ops {
-       void (*init_dcbnl_ops) (struct qlcnic_adapter *);
-       void (*free) (struct qlcnic_adapter *);
-       int (*attach) (struct qlcnic_adapter *);
-       int (*query_hw_capability) (struct qlcnic_adapter *, char *);
-       int (*get_hw_capability) (struct qlcnic_adapter *);
-       void (*get_info) (struct qlcnic_adapter *);
-       int (*query_cee_param) (struct qlcnic_adapter *, char *, u8);
-       int (*get_cee_cfg) (struct qlcnic_adapter *);
-       int (*register_aen) (struct qlcnic_adapter *, bool);
-       void (*handle_aen) (struct qlcnic_adapter *, void *);
+       int (*query_hw_capability) (struct qlcnic_dcb *, char *);
+       int (*get_hw_capability) (struct qlcnic_dcb *);
+       int (*query_cee_param) (struct qlcnic_dcb *, char *, u8);
+       void (*init_dcbnl_ops) (struct qlcnic_dcb *);
+       int (*register_aen) (struct qlcnic_dcb *, bool);
+       void (*aen_handler) (struct qlcnic_dcb *, void *);
+       int (*get_cee_cfg) (struct qlcnic_dcb *);
+       void (*get_info) (struct qlcnic_dcb *);
+       int (*attach) (struct qlcnic_dcb *);
+       void (*free) (struct qlcnic_dcb *);
 };
 
 struct qlcnic_dcb {
@@ -37,5 +40,85 @@ struct qlcnic_dcb {
        struct workqueue_struct         *wq;
        struct qlcnic_dcb_ops           *ops;
        struct qlcnic_dcb_cfg           *cfg;
+       unsigned long                   state;
 };
+
+static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb)
+{
+       kfree(dcb);
+       dcb = NULL;
+}
+
+static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
+{
+       if (dcb && dcb->ops->get_hw_capability)
+               return dcb->ops->get_hw_capability(dcb);
+
+       return 0;
+}
+
+static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb)
+{
+       if (dcb && dcb->ops->free)
+               dcb->ops->free(dcb);
+}
+
+static inline int qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
+{
+       if (dcb && dcb->ops->attach)
+               return dcb->ops->attach(dcb);
+
+       return 0;
+}
+
+static inline int
+qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
+{
+       if (dcb && dcb->ops->query_hw_capability)
+               return dcb->ops->query_hw_capability(dcb, buf);
+
+       return 0;
+}
+
+static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
+{
+       if (dcb && dcb->ops->get_info)
+               dcb->ops->get_info(dcb);
+}
+
+static inline int
+qlcnic_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type)
+{
+       if (dcb && dcb->ops->query_cee_param)
+               return dcb->ops->query_cee_param(dcb, buf, type);
+
+       return 0;
+}
+
+static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
+{
+       if (dcb && dcb->ops->get_cee_cfg)
+               return dcb->ops->get_cee_cfg(dcb);
+
+       return 0;
+}
+
+static inline void
+qlcnic_dcb_register_aen(struct qlcnic_dcb *dcb, u8 flag)
+{
+       if (dcb && dcb->ops->register_aen)
+               dcb->ops->register_aen(dcb, flag);
+}
+
+static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg)
+{
+       if (dcb && dcb->ops->aen_handler)
+               dcb->ops->aen_handler(dcb, msg);
+}
+
+static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
+{
+       if (dcb && dcb->ops->init_dcbnl_ops)
+               dcb->ops->init_dcbnl_ops(dcb);
+}
 #endif
index ebe4c86e5230223f0c5465485a9812dc1a0aa3b0..b2a8805997ca98ccde591cd05dc0b2951470ef19 100644 (file)
@@ -187,8 +187,8 @@ static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
                return -1;
 }
 
-#define QLCNIC_RING_REGS_COUNT 20
-#define QLCNIC_RING_REGS_LEN   (QLCNIC_RING_REGS_COUNT * sizeof(u32))
+#define        QLCNIC_TX_INTR_NOT_CONFIGURED   0X78563412
+
 #define QLCNIC_MAX_EEPROM_LEN   1024
 
 static const u32 diag_registers[] = {
@@ -219,7 +219,15 @@ static const u32 ext_diag_registers[] = {
 };
 
 #define QLCNIC_MGMT_API_VERSION        2
-#define QLCNIC_ETHTOOL_REGS_VER        3
+#define QLCNIC_ETHTOOL_REGS_VER        4
+
+static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter)
+{
+       int ring_regs_cnt = (adapter->max_drv_tx_rings * 5) +
+                           (adapter->max_rds_rings * 2) +
+                           (adapter->max_sds_rings * 3) + 5;
+       return ring_regs_cnt * sizeof(u32);
+}
 
 static int qlcnic_get_regs_len(struct net_device *dev)
 {
@@ -231,7 +239,9 @@ static int qlcnic_get_regs_len(struct net_device *dev)
        else
                len = sizeof(ext_diag_registers) + sizeof(diag_registers);
 
-       return QLCNIC_RING_REGS_LEN + len + QLCNIC_DEV_INFO_SIZE + 1;
+       len += ((QLCNIC_DEV_INFO_SIZE + 2) * sizeof(u32));
+       len += qlcnic_get_ring_regs_len(adapter);
+       return len;
 }
 
 static int qlcnic_get_eeprom_len(struct net_device *dev)
@@ -493,6 +503,8 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
        struct qlcnic_adapter *adapter = netdev_priv(dev);
        struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
        struct qlcnic_host_sds_ring *sds_ring;
+       struct qlcnic_host_rds_ring *rds_rings;
+       struct qlcnic_host_tx_ring *tx_ring;
        u32 *regs_buff = p;
        int ring, i = 0;
 
@@ -512,21 +524,35 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
        if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
                return;
 
-       regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
-
-       regs_buff[i++] = 1; /* No. of tx ring */
-       regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
-       regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer);
-
-       regs_buff[i++] = 2; /* No. of rx ring */
-       regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer);
-       regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer);
+       /* Marker btw regs and TX ring count */
+       regs_buff[i++] = 0xFFEFCDAB;
+
+       regs_buff[i++] = adapter->max_drv_tx_rings; /* No. of TX ring */
+       for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+               tx_ring = &adapter->tx_ring[ring];
+               regs_buff[i++] = le32_to_cpu(*(tx_ring->hw_consumer));
+               regs_buff[i++] = tx_ring->sw_consumer;
+               regs_buff[i++] = readl(tx_ring->crb_cmd_producer);
+               regs_buff[i++] = tx_ring->producer;
+               if (tx_ring->crb_intr_mask)
+                       regs_buff[i++] = readl(tx_ring->crb_intr_mask);
+               else
+                       regs_buff[i++] = QLCNIC_TX_INTR_NOT_CONFIGURED;
+       }
 
-       regs_buff[i++] = adapter->max_sds_rings;
+       regs_buff[i++] = adapter->max_rds_rings; /* No. of RX ring */
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_rings = &recv_ctx->rds_rings[ring];
+               regs_buff[i++] = readl(rds_rings->crb_rcv_producer);
+               regs_buff[i++] = rds_rings->producer;
+       }
 
+       regs_buff[i++] = adapter->max_sds_rings; /* No. of SDS ring */
        for (ring = 0; ring < adapter->max_sds_rings; ring++) {
                sds_ring = &(recv_ctx->sds_rings[ring]);
                regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
+               regs_buff[i++] = sds_ring->consumer;
+               regs_buff[i++] = readl(sds_ring->crb_intr_mask);
        }
 }
 
@@ -665,7 +691,7 @@ static int qlcnic_set_channels(struct net_device *dev,
                        return err;
        }
 
-       if (channel->tx_count) {
+       if (qlcnic_82xx_check(adapter) && channel->tx_count) {
                err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count);
                if (err)
                        return err;
index f8adc7b01f1f5ef9e62899a9c68c5eea9d2e2cba..73e72eb83bdfb346fd034140b5df7b654d7db193 100644 (file)
@@ -445,7 +445,7 @@ int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
 
        mac_req = (struct qlcnic_mac_req *)&req.words[0];
        mac_req->op = op;
-       memcpy(mac_req->mac_addr, addr, 6);
+       memcpy(mac_req->mac_addr, addr, ETH_ALEN);
 
        vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
        vlan_req->vlan_id = cpu_to_le16(vlan_id);
index 11b4bb83b9308b2c4a9331ea737ee7766c074794..897627dd1d04e7f6c3cd60ad25ede51fad376327 100644 (file)
@@ -1011,7 +1011,7 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index,
                }
                break;
        case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
-               qlcnic_dcb_handle_aen(adapter, (void *)&msg);
+               qlcnic_dcb_aen_handler(adapter->dcb, (void *)&msg);
                break;
        default:
                break;
index 21d00a0449a10f394fe6b02d5689c7b0ab3b95cf..dcf4a4e7ce23d49073b949b2275f8192b09a0c97 100644 (file)
@@ -819,7 +819,7 @@ static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter)
 int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
 {
        struct qlcnic_pci_info *pci_info;
-       int i, ret = 0, j = 0;
+       int i, id = 0, ret = 0, j = 0;
        u16 act_pci_func;
        u8 pfn;
 
@@ -860,7 +860,8 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
                        continue;
 
                if (qlcnic_port_eswitch_cfg_capability(adapter)) {
-                       if (!qlcnic_83xx_enable_port_eswitch(adapter, pfn))
+                       if (!qlcnic_83xx_set_port_eswitch_status(adapter, pfn,
+                                                                &id))
                                adapter->npars[j].eswitch_status = true;
                        else
                                continue;
@@ -875,15 +876,16 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
                adapter->npars[j].min_bw = pci_info[i].tx_min_bw;
                adapter->npars[j].max_bw = pci_info[i].tx_max_bw;
 
+               memcpy(&adapter->npars[j].mac, &pci_info[i].mac, ETH_ALEN);
                j++;
        }
 
-       if (qlcnic_82xx_check(adapter)) {
+       /* Update eSwitch status for adapters without per port eSwitch
+        * configuration capability
+        */
+       if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
                for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
                        adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
-       } else if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
-               for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
-                       qlcnic_enable_eswitch(adapter, i, 1);
        }
 
        kfree(pci_info);
@@ -2069,7 +2071,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
                return err;
        }
 
-       qlcnic_dcb_init_dcbnl_ops(adapter);
+       qlcnic_dcb_init_dcbnl_ops(adapter->dcb);
 
        return 0;
 }
@@ -2164,17 +2166,6 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
                qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
 }
 
-static int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
-{
-       return __qlcnic_register_dcb(adapter);
-}
-
-void qlcnic_clear_dcb_ops(struct qlcnic_adapter *adapter)
-{
-       kfree(adapter->dcb);
-       adapter->dcb = NULL;
-}
-
 static int
 qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
@@ -2183,6 +2174,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct qlcnic_hardware_context *ahw;
        int err, pci_using_dac = -1;
        char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
+       struct qlcnic_dcb *dcb;
 
        if (pdev->is_virtfn)
                return -ENODEV;
@@ -2257,7 +2249,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        err = qlcnic_alloc_adapter_resources(adapter);
        if (err)
-               goto err_out_free_netdev;
+               goto err_out_free_wq;
 
        adapter->dev_rst_time = jiffies;
        adapter->ahw->revision_id = pdev->revision;
@@ -2303,8 +2295,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
                adapter->flags |= QLCNIC_NEED_FLR;
 
-               if (adapter->dcb && qlcnic_dcb_attach(adapter))
-                       qlcnic_clear_dcb_ops(adapter);
+               dcb = adapter->dcb;
+
+               if (dcb && qlcnic_dcb_attach(dcb))
+                       qlcnic_clear_dcb_ops(dcb);
 
        } else if (qlcnic_83xx_check(adapter)) {
                adapter->max_drv_tx_rings = 1;
@@ -2396,6 +2390,9 @@ err_out_disable_msi:
 err_out_free_hw:
        qlcnic_free_adapter_resources(adapter);
 
+err_out_free_wq:
+       destroy_workqueue(adapter->qlcnic_wq);
+
 err_out_free_netdev:
        free_netdev(netdev);
 
@@ -2409,7 +2406,6 @@ err_out_free_res:
        pci_release_regions(pdev);
 
 err_out_disable_pdev:
-       pci_set_drvdata(pdev, NULL);
        pci_disable_device(pdev);
        return err;
 
@@ -2446,7 +2442,7 @@ static void qlcnic_remove(struct pci_dev *pdev)
        qlcnic_cancel_idc_work(adapter);
        ahw = adapter->ahw;
 
-       qlcnic_dcb_free(adapter);
+       qlcnic_dcb_free(adapter->dcb);
 
        unregister_netdev(netdev);
        qlcnic_sriov_cleanup(adapter);
@@ -2485,7 +2481,6 @@ static void qlcnic_remove(struct pci_dev *pdev)
        pci_disable_pcie_error_reporting(pdev);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 
        if (adapter->qlcnic_wq) {
                destroy_workqueue(adapter->qlcnic_wq);
@@ -3324,7 +3319,7 @@ qlcnic_attach_work(struct work_struct *work)
                return;
        }
 attach:
-       qlcnic_dcb_get_info(adapter);
+       qlcnic_dcb_get_info(adapter->dcb);
 
        if (netif_running(netdev)) {
                if (qlcnic_up(adapter, netdev))
@@ -3349,6 +3344,8 @@ done:
 static int
 qlcnic_check_health(struct qlcnic_adapter *adapter)
 {
+       struct qlcnic_hardware_context *ahw = adapter->ahw;
+       struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
        u32 state = 0, heartbeat;
        u32 peg_status;
        int err = 0;
@@ -3373,7 +3370,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
                if (adapter->need_fw_reset)
                        goto detach;
 
-               if (adapter->ahw->reset_context && qlcnic_auto_fw_reset)
+               if (ahw->reset_context && qlcnic_auto_fw_reset)
                        qlcnic_reset_hw_context(adapter);
 
                return 0;
@@ -3416,6 +3413,9 @@ detach:
 
                qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
                QLCDB(adapter, DRV, "fw recovery scheduled.\n");
+       } else if (!qlcnic_auto_fw_reset && fw_dump->enable &&
+                  adapter->flags & QLCNIC_FW_RESET_OWNER) {
+               qlcnic_dump_fw(adapter);
        }
 
        return 1;
@@ -3648,11 +3648,6 @@ int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, u32 txq)
        u8 max_hw = QLCNIC_MAX_TX_RINGS;
        u32 max_allowed;
 
-       if (!qlcnic_82xx_check(adapter)) {
-               netdev_err(netdev, "No Multi TX-Q support\n");
-               return -EINVAL;
-       }
-
        if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
                netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n");
                return -EINVAL;
@@ -3692,8 +3687,7 @@ int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
        u8 max_hw = adapter->ahw->max_rx_ques;
        u32 max_allowed;
 
-       if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
-           !qlcnic_use_msi) {
+       if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
                netdev_err(netdev, "No RSS support in INT-x mode\n");
                return -EINVAL;
        }
index 15513608d4808851917f1b5294e7733a5c4ea5cd..7763962e2ec4e9128ae22cf7b8ee694248d8058d 100644 (file)
@@ -1187,41 +1187,38 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
                }
 
                if (ops_index == ops_cnt) {
-                       dev_info(&adapter->pdev->dev,
-                                "Invalid entry type %d, exiting dump\n",
+                       dev_info(dev, "Skipping unknown entry opcode %d\n",
                                 entry->hdr.type);
-                       goto error;
+                       entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+                       entry_offset += entry->hdr.offset;
+                       continue;
                }
 
                /* Collect dump for this entry */
                dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
-               if (!qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, dump))
+               if (!qlcnic_valid_dump_entry(dev, entry, dump)) {
                        entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+                       entry_offset += entry->hdr.offset;
+                       continue;
+               }
+
                buf_offset += entry->hdr.cap_size;
                entry_offset += entry->hdr.offset;
                buffer = fw_dump->data + buf_offset;
        }
-       if (dump_size != buf_offset) {
-               dev_info(&adapter->pdev->dev,
-                        "Captured(%d) and expected size(%d) do not match\n",
-                        buf_offset, dump_size);
-               goto error;
-       } else {
-               fw_dump->clr = 1;
-               snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
-                        adapter->netdev->name);
-               dev_info(&adapter->pdev->dev, "%s: Dump data, %d bytes captured\n",
-                        adapter->netdev->name, fw_dump->size);
-               /* Send a udev event to notify availability of FW dump */
-               kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
-               return 0;
-       }
-error:
+
+       fw_dump->clr = 1;
+       snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
+       dev_info(dev, "%s: Dump data %d bytes captured, template header size %d bytes\n",
+                adapter->netdev->name, fw_dump->size, tmpl_hdr->size);
+       /* Send a udev event to notify availability of FW dump */
+       kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
+
        if (fw_dump->use_pex_dma)
                dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE,
                                  fw_dump->dma_buffer, fw_dump->phys_addr);
-       vfree(fw_dump->data);
-       return -EINVAL;
+
+       return 0;
 }
 
 void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
index 392b9bd12b4fb67585501f817defd1fb0194b9ca..8b96e29df30fbd2b26da283bca65aa3d88d2cb41 100644 (file)
@@ -500,6 +500,7 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
 static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
                                 int pci_using_dac)
 {
+       struct qlcnic_dcb *dcb;
        int err;
 
        INIT_LIST_HEAD(&adapter->vf_mc_list);
@@ -533,8 +534,10 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
        if (err)
                goto err_out_send_channel_term;
 
-       if (adapter->dcb && qlcnic_dcb_attach(adapter))
-               qlcnic_clear_dcb_ops(adapter);
+       dcb = adapter->dcb;
+
+       if (dcb && qlcnic_dcb_attach(dcb))
+               qlcnic_clear_dcb_ops(dcb);
 
        err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
        if (err)
@@ -1577,7 +1580,7 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
        if (err)
                goto err_out_term_channel;
 
-       qlcnic_dcb_get_info(adapter);
+       qlcnic_dcb_get_info(adapter->dcb);
 
        return 0;
 
index 89943377846699e1e2719be8b2da16b07b144d09..0c9c4e89559524d78aa789dfb65e5e32211b8bc1 100644 (file)
@@ -18,7 +18,7 @@
  */
 #define DRV_NAME       "qlge"
 #define DRV_STRING     "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION    "v1.00.00.32"
+#define DRV_VERSION    "1.00.00.33"
 
 #define WQ_ADDR_ALIGN  0x3     /* 4 byte alignment */
 
@@ -2206,14 +2206,14 @@ extern char qlge_driver_name[];
 extern const char qlge_driver_version[];
 extern const struct ethtool_ops qlge_ethtool_ops;
 
-extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
-extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
-extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
-extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
-                              u32 *value);
-extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
-extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
-                       u16 q_id);
+int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
+void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
+int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
+                       u32 *value);
+int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
+int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
+                u16 q_id);
 void ql_queue_fw_error(struct ql_adapter *qdev);
 void ql_mpi_work(struct work_struct *work);
 void ql_mpi_reset_work(struct work_struct *work);
@@ -2233,10 +2233,9 @@ int ql_unpause_mpi_risc(struct ql_adapter *qdev);
 int ql_pause_mpi_risc(struct ql_adapter *qdev);
 int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
 int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
-int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
-               u32 ram_addr, int word_count);
-int ql_core_dump(struct ql_adapter *qdev,
-               struct ql_mpi_coredump *mpi_coredump);
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr,
+                         int word_count);
+int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump);
 int ql_mb_about_fw(struct ql_adapter *qdev);
 int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
 int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
@@ -2249,8 +2248,7 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev);
 int ql_mb_set_port_cfg(struct ql_adapter *qdev);
 int ql_wait_fifo_empty(struct ql_adapter *qdev);
 void ql_get_dump(struct ql_adapter *qdev, void *buff);
-void ql_gen_reg_dump(struct ql_adapter *qdev,
-                       struct ql_reg_dump *mpi_coredump);
+void ql_gen_reg_dump(struct ql_adapter *qdev, struct ql_reg_dump *mpi_coredump);
 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
 void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
 int ql_own_firmware(struct ql_adapter *qdev);
@@ -2264,9 +2262,9 @@ int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
 /* #define QL_OB_DUMP */
 
 #ifdef QL_REG_DUMP
-extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
-extern void ql_dump_routing_entries(struct ql_adapter *qdev);
-extern void ql_dump_regs(struct ql_adapter *qdev);
+void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
+void ql_dump_routing_entries(struct ql_adapter *qdev);
+void ql_dump_regs(struct ql_adapter *qdev);
 #define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
 #define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
 #define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
@@ -2277,26 +2275,26 @@ extern void ql_dump_regs(struct ql_adapter *qdev);
 #endif
 
 #ifdef QL_STAT_DUMP
-extern void ql_dump_stat(struct ql_adapter *qdev);
+void ql_dump_stat(struct ql_adapter *qdev);
 #define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
 #else
 #define QL_DUMP_STAT(qdev)
 #endif
 
 #ifdef QL_DEV_DUMP
-extern void ql_dump_qdev(struct ql_adapter *qdev);
+void ql_dump_qdev(struct ql_adapter *qdev);
 #define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
 #else
 #define QL_DUMP_QDEV(qdev)
 #endif
 
 #ifdef QL_CB_DUMP
-extern void ql_dump_wqicb(struct wqicb *wqicb);
-extern void ql_dump_tx_ring(struct tx_ring *tx_ring);
-extern void ql_dump_ricb(struct ricb *ricb);
-extern void ql_dump_cqicb(struct cqicb *cqicb);
-extern void ql_dump_rx_ring(struct rx_ring *rx_ring);
-extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
+void ql_dump_wqicb(struct wqicb *wqicb);
+void ql_dump_tx_ring(struct tx_ring *tx_ring);
+void ql_dump_ricb(struct ricb *ricb);
+void ql_dump_cqicb(struct cqicb *cqicb);
+void ql_dump_rx_ring(struct rx_ring *rx_ring);
+void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
 #define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
 #define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
 #define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
@@ -2314,9 +2312,9 @@ extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
 #endif
 
 #ifdef QL_OB_DUMP
-extern void ql_dump_tx_desc(struct tx_buf_desc *tbd);
-extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
-extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
+void ql_dump_tx_desc(struct tx_buf_desc *tbd);
+void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
+void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
 #define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
 #define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
 #else
@@ -2325,14 +2323,14 @@ extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
 #endif
 
 #ifdef QL_IB_DUMP
-extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
+void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
 #define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
 #else
 #define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
 #endif
 
 #ifdef QL_ALL_DUMP
-extern void ql_dump_all(struct ql_adapter *qdev);
+void ql_dump_all(struct ql_adapter *qdev);
 #define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
 #else
 #define QL_DUMP_ALL(qdev)
index 2553cf4503b9f83996bb72bd291fb756558644f9..a245dc18d769241bcf23d607458e33538c7ee99c 100644 (file)
@@ -96,8 +96,10 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
 
 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
 
-static int ql_wol(struct ql_adapter *qdev);
-static void qlge_set_multicast_list(struct net_device *ndev);
+static int ql_wol(struct ql_adapter *);
+static void qlge_set_multicast_list(struct net_device *);
+static int ql_adapter_down(struct ql_adapter *);
+static int ql_adapter_up(struct ql_adapter *);
 
 /* This hardware semaphore causes exclusive access to
  * resources shared between the NIC driver, MPI firmware,
@@ -1464,6 +1466,29 @@ static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
        }
 }
 
+/**
+ * ql_update_mac_hdr_len - helper routine to update the mac header length
+ * based on vlan tags if present
+ */
+static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
+                                 struct ib_mac_iocb_rsp *ib_mac_rsp,
+                                 void *page, size_t *len)
+{
+       u16 *tags;
+
+       if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
+               return;
+       if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
+               tags = (u16 *)page;
+               /* Look for stacked vlan tags in ethertype field */
+               if (tags[6] == ETH_P_8021Q &&
+                   tags[8] == ETH_P_8021Q)
+                       *len += 2 * VLAN_HLEN;
+               else
+                       *len += VLAN_HLEN;
+       }
+}
+
 /* Process an inbound completion from an rx ring. */
 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
                                        struct rx_ring *rx_ring,
@@ -1523,6 +1548,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
        void *addr;
        struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
        struct napi_struct *napi = &rx_ring->napi;
+       size_t hlen = ETH_HLEN;
 
        skb = netdev_alloc_skb(ndev, length);
        if (!skb) {
@@ -1540,25 +1566,28 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
                goto err_out;
        }
 
+       /* Update the MAC header length*/
+       ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
+
        /* The max framesize filter on this chip is set higher than
         * MTU since FCoE uses 2k frames.
         */
-       if (skb->len > ndev->mtu + ETH_HLEN) {
+       if (skb->len > ndev->mtu + hlen) {
                netif_err(qdev, drv, qdev->ndev,
                          "Segment too small, dropping.\n");
                rx_ring->rx_dropped++;
                goto err_out;
        }
-       memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
+       memcpy(skb_put(skb, hlen), addr, hlen);
        netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
                     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
                     length);
        skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
-                               lbq_desc->p.pg_chunk.offset+ETH_HLEN,
-                               length-ETH_HLEN);
-       skb->len += length-ETH_HLEN;
-       skb->data_len += length-ETH_HLEN;
-       skb->truesize += length-ETH_HLEN;
+                               lbq_desc->p.pg_chunk.offset + hlen,
+                               length - hlen);
+       skb->len += length - hlen;
+       skb->data_len += length - hlen;
+       skb->truesize += length - hlen;
 
        rx_ring->rx_packets++;
        rx_ring->rx_bytes += skb->len;
@@ -1576,7 +1605,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
                                (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
                        /* Unfragmented ipv4 UDP frame. */
                        struct iphdr *iph =
-                               (struct iphdr *) ((u8 *)addr + ETH_HLEN);
+                               (struct iphdr *)((u8 *)addr + hlen);
                        if (!(iph->frag_off &
                                htons(IP_MF|IP_OFFSET))) {
                                skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1726,7 +1755,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
        struct bq_desc *sbq_desc;
        struct sk_buff *skb = NULL;
        u32 length = le32_to_cpu(ib_mac_rsp->data_len);
-       u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+       u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
+       size_t hlen = ETH_HLEN;
 
        /*
         * Handle the header buffer if present.
@@ -1853,9 +1883,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
                        skb->data_len += length;
                        skb->truesize += length;
                        length -= length;
-                       __pskb_pull_tail(skb,
-                               (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
-                               VLAN_ETH_HLEN : ETH_HLEN);
+                       ql_update_mac_hdr_len(qdev, ib_mac_rsp,
+                                             lbq_desc->p.pg_chunk.va,
+                                             &hlen);
+                       __pskb_pull_tail(skb, hlen);
                }
        } else {
                /*
@@ -1910,8 +1941,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
                        length -= size;
                        i++;
                }
-               __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
-                               VLAN_ETH_HLEN : ETH_HLEN);
+               ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
+                                     &hlen);
+               __pskb_pull_tail(skb, hlen);
        }
        return skb;
 }
@@ -2003,7 +2035,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
        rx_ring->rx_packets++;
        rx_ring->rx_bytes += skb->len;
        skb_record_rx_queue(skb, rx_ring->cq_id);
-       if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
+       if (vlan_id != 0xffff)
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
        if (skb->ip_summed == CHECKSUM_UNNECESSARY)
                napi_gro_receive(&rx_ring->napi, skb);
@@ -2017,7 +2049,8 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
                                        struct ib_mac_iocb_rsp *ib_mac_rsp)
 {
        u32 length = le32_to_cpu(ib_mac_rsp->data_len);
-       u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
+       u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
+                       (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
                        ((le16_to_cpu(ib_mac_rsp->vlan_id) &
                        IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
 
@@ -2310,9 +2343,39 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
        }
 }
 
+/**
+ * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
+ * based on the features to enable/disable hardware vlan accel
+ */
+static int qlge_update_hw_vlan_features(struct net_device *ndev,
+                                       netdev_features_t features)
+{
+       struct ql_adapter *qdev = netdev_priv(ndev);
+       int status = 0;
+
+       status = ql_adapter_down(qdev);
+       if (status) {
+               netif_err(qdev, link, qdev->ndev,
+                         "Failed to bring down the adapter\n");
+               return status;
+       }
+
+       /* update the features with resent change */
+       ndev->features = features;
+
+       status = ql_adapter_up(qdev);
+       if (status) {
+               netif_err(qdev, link, qdev->ndev,
+                         "Failed to bring up the adapter\n");
+               return status;
+       }
+       return status;
+}
+
 static netdev_features_t qlge_fix_features(struct net_device *ndev,
        netdev_features_t features)
 {
+       int err;
        /*
         * Since there is no support for separate rx/tx vlan accel
         * enable/disable make sure tx flag is always in same state as rx.
@@ -2322,6 +2385,11 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
        else
                features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 
+       /* Update the behavior of vlan accel in the adapter */
+       err = qlge_update_hw_vlan_features(ndev, features);
+       if (err)
+               return err;
+
        return features;
 }
 
@@ -3704,8 +3772,12 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
        ql_write32(qdev, SYS, mask | value);
 
        /* Set the default queue, and VLAN behavior. */
-       value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
-       mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
+       value = NIC_RCV_CFG_DFQ;
+       mask = NIC_RCV_CFG_DFQ_MASK;
+       if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+               value |= NIC_RCV_CFG_RV;
+               mask |= (NIC_RCV_CFG_RV << 16);
+       }
        ql_write32(qdev, NIC_RCV_CFG, (mask | value));
 
        /* Set the MPI interrupt to enabled. */
@@ -4505,7 +4577,6 @@ static void ql_release_all(struct pci_dev *pdev)
                iounmap(qdev->doorbell_area);
        vfree(qdev->mpi_coredump);
        pci_release_regions(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
@@ -4692,11 +4763,15 @@ static int qlge_probe(struct pci_dev *pdev,
 
        qdev = netdev_priv(ndev);
        SET_NETDEV_DEV(ndev, &pdev->dev);
-       ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
-               NETIF_F_TSO | NETIF_F_TSO_ECN |
-               NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM;
-       ndev->features = ndev->hw_features |
-               NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
+       ndev->hw_features = NETIF_F_SG |
+                           NETIF_F_IP_CSUM |
+                           NETIF_F_TSO |
+                           NETIF_F_TSO_ECN |
+                           NETIF_F_HW_VLAN_CTAG_TX |
+                           NETIF_F_HW_VLAN_CTAG_RX |
+                           NETIF_F_HW_VLAN_CTAG_FILTER |
+                           NETIF_F_RXCSUM;
+       ndev->features = ndev->hw_features;
        ndev->vlan_features = ndev->hw_features;
 
        if (test_bit(QL_DMA64, &qdev->flags))
index e9dc84943cfcb5fa39a8031e5a28051f5963da90..1e49ec5b22324815005bcefcfcbd9eef6e46facc 100644 (file)
@@ -1231,7 +1231,6 @@ err_out_mdio:
        mdiobus_free(lp->mii_bus);
 err_out_unmap:
        netif_napi_del(&lp->napi);
-       pci_set_drvdata(pdev, NULL);
        pci_iounmap(pdev, ioaddr);
 err_out_free_res:
        pci_release_regions(pdev);
@@ -1257,7 +1256,6 @@ static void r6040_remove_one(struct pci_dev *pdev)
        pci_release_regions(pdev);
        free_netdev(dev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 
index d2e591955bdde3fd03b40dac1c9c22e3d013c23d..f2a2128165dd98b4c41598fb481c4e4f40417614 100644 (file)
@@ -2052,7 +2052,6 @@ static void cp_remove_one (struct pci_dev *pdev)
        pci_release_regions(pdev);
        pci_clear_mwi(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        free_netdev(dev);
 }
 
index 3ccedeb8aba03703bcdff318dff2ff888eec4bfa..50a92104dd0a65399f118b6c6505bc04e04329af 100644 (file)
@@ -727,7 +727,6 @@ static void __rtl8139_cleanup_dev (struct net_device *dev)
        pci_release_regions (pdev);
 
        free_netdev(dev);
-       pci_set_drvdata (pdev, NULL);
 }
 
 
index 3397cee89777ba4be6c888fe9711e928ef209e06..799387570766b6642ad4fdbc13f917acd358887a 100644 (file)
@@ -6811,7 +6811,6 @@ static void rtl_remove_one(struct pci_dev *pdev)
 
        rtl_disable_msi(pdev, tp);
        rtl8169_release_board(pdev, dev, tp->mmio_addr);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static const struct net_device_ops rtl_netdev_ops = {
index 5cd831ebfa83b0a95c472926a0105c521f7c352a..eaf11e47334fbcbca50cc8b6b27084f46f4b47c4 100644 (file)
@@ -483,7 +483,7 @@ static struct sh_eth_cpu_data sh7757_data = {
        .register_type  = SH_ETH_REG_FAST_SH4,
 
        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
-       .rmcr_value     = 0x00000001,
+       .rmcr_value     = RMCR_RNC,
 
        .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
@@ -561,7 +561,7 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
                          EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
                          EESR_TDE | EESR_ECI,
        .fdr_value      = 0x0000072f,
-       .rmcr_value     = 0x00000001,
+       .rmcr_value     = RMCR_RNC,
 
        .irq_flags      = IRQF_SHARED,
        .apr            = 1,
@@ -688,12 +688,16 @@ static struct sh_eth_cpu_data r8a7740_data = {
        .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
                          EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
                          EESR_TDE | EESR_ECI,
+       .fdr_value      = 0x0000070f,
+       .rmcr_value     = RMCR_RNC,
 
        .apr            = 1,
        .mpr            = 1,
        .tpauser        = 1,
        .bculr          = 1,
        .hw_swap        = 1,
+       .rpadir         = 1,
+       .rpadir_value   = 2 << 16,
        .no_trimd       = 1,
        .no_ade         = 1,
        .tsu            = 1,
@@ -868,7 +872,7 @@ static void update_mac_address(struct net_device *ndev)
 static void read_mac_address(struct net_device *ndev, unsigned char *mac)
 {
        if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
-               memcpy(ndev->dev_addr, mac, 6);
+               memcpy(ndev->dev_addr, mac, ETH_ALEN);
        } else {
                ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
                ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
index a0db02c63b1157930ca0fca070dd733aa39b7c54..f32c1692d31017186e0e33aaf408eddf7d16a3fa 100644 (file)
@@ -321,6 +321,9 @@ enum TD_STS_BIT {
 #define TD_TFP (TD_TFP1|TD_TFP0)
 
 /* RMCR */
+enum RMCR_BIT {
+       RMCR_RNC = 0x00000001,
+};
 #define DEFAULT_RMCR_VALUE     0x00000000
 
 /* ECMR */
index 9f18ae984f9ed38386b16e5284d4d10864687c4b..676c3c057bfba69e69b6116e212e2d03fdff330c 100644 (file)
@@ -285,6 +285,181 @@ static int efx_ef10_free_vis(struct efx_nic *efx)
        return rc;
 }
 
+#ifdef EFX_USE_PIO
+
+static void efx_ef10_free_piobufs(struct efx_nic *efx)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
+       unsigned int i;
+       int rc;
+
+       BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
+
+       for (i = 0; i < nic_data->n_piobufs; i++) {
+               MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
+                              nic_data->piobuf_handle[i]);
+               rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
+                                 NULL, 0, NULL);
+               WARN_ON(rc);
+       }
+
+       nic_data->n_piobufs = 0;
+}
+
+static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
+       unsigned int i;
+       size_t outlen;
+       int rc = 0;
+
+       BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
+
+       for (i = 0; i < n; i++) {
+               rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
+                                 outbuf, sizeof(outbuf), &outlen);
+               if (rc)
+                       break;
+               if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
+                       rc = -EIO;
+                       break;
+               }
+               nic_data->piobuf_handle[i] =
+                       MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
+               netif_dbg(efx, probe, efx->net_dev,
+                         "allocated PIO buffer %u handle %x\n", i,
+                         nic_data->piobuf_handle[i]);
+       }
+
+       nic_data->n_piobufs = i;
+       if (rc)
+               efx_ef10_free_piobufs(efx);
+       return rc;
+}
+
+static int efx_ef10_link_piobufs(struct efx_nic *efx)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       MCDI_DECLARE_BUF(inbuf,
+                        max(MC_CMD_LINK_PIOBUF_IN_LEN,
+                            MC_CMD_UNLINK_PIOBUF_IN_LEN));
+       struct efx_channel *channel;
+       struct efx_tx_queue *tx_queue;
+       unsigned int offset, index;
+       int rc;
+
+       BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
+       BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
+
+       /* Link a buffer to each VI in the write-combining mapping */
+       for (index = 0; index < nic_data->n_piobufs; ++index) {
+               MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
+                              nic_data->piobuf_handle[index]);
+               MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
+                              nic_data->pio_write_vi_base + index);
+               rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
+                                 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
+                                 NULL, 0, NULL);
+               if (rc) {
+                       netif_err(efx, drv, efx->net_dev,
+                                 "failed to link VI %u to PIO buffer %u (%d)\n",
+                                 nic_data->pio_write_vi_base + index, index,
+                                 rc);
+                       goto fail;
+               }
+               netif_dbg(efx, probe, efx->net_dev,
+                         "linked VI %u to PIO buffer %u\n",
+                         nic_data->pio_write_vi_base + index, index);
+       }
+
+       /* Link a buffer to each TX queue */
+       efx_for_each_channel(channel, efx) {
+               efx_for_each_channel_tx_queue(tx_queue, channel) {
+                       /* We assign the PIO buffers to queues in
+                        * reverse order to allow for the following
+                        * special case.
+                        */
+                       offset = ((efx->tx_channel_offset + efx->n_tx_channels -
+                                  tx_queue->channel->channel - 1) *
+                                 efx_piobuf_size);
+                       index = offset / ER_DZ_TX_PIOBUF_SIZE;
+                       offset = offset % ER_DZ_TX_PIOBUF_SIZE;
+
+                       /* When the host page size is 4K, the first
+                        * host page in the WC mapping may be within
+                        * the same VI page as the last TX queue.  We
+                        * can only link one buffer to each VI.
+                        */
+                       if (tx_queue->queue == nic_data->pio_write_vi_base) {
+                               BUG_ON(index != 0);
+                               rc = 0;
+                       } else {
+                               MCDI_SET_DWORD(inbuf,
+                                              LINK_PIOBUF_IN_PIOBUF_HANDLE,
+                                              nic_data->piobuf_handle[index]);
+                               MCDI_SET_DWORD(inbuf,
+                                              LINK_PIOBUF_IN_TXQ_INSTANCE,
+                                              tx_queue->queue);
+                               rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
+                                                 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
+                                                 NULL, 0, NULL);
+                       }
+
+                       if (rc) {
+                               /* This is non-fatal; the TX path just
+                                * won't use PIO for this queue
+                                */
+                               netif_err(efx, drv, efx->net_dev,
+                                         "failed to link VI %u to PIO buffer %u (%d)\n",
+                                         tx_queue->queue, index, rc);
+                               tx_queue->piobuf = NULL;
+                       } else {
+                               tx_queue->piobuf =
+                                       nic_data->pio_write_base +
+                                       index * EFX_VI_PAGE_SIZE + offset;
+                               tx_queue->piobuf_offset = offset;
+                               netif_dbg(efx, probe, efx->net_dev,
+                                         "linked VI %u to PIO buffer %u offset %x addr %p\n",
+                                         tx_queue->queue, index,
+                                         tx_queue->piobuf_offset,
+                                         tx_queue->piobuf);
+                       }
+               }
+       }
+
+       return 0;
+
+fail:
+       while (index--) {
+               MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
+                              nic_data->pio_write_vi_base + index);
+               efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
+                            inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
+                            NULL, 0, NULL);
+       }
+       return rc;
+}
+
+#else /* !EFX_USE_PIO */
+
+static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
+{
+       return n == 0 ? 0 : -ENOBUFS;
+}
+
+static int efx_ef10_link_piobufs(struct efx_nic *efx)
+{
+       return 0;
+}
+
+static void efx_ef10_free_piobufs(struct efx_nic *efx)
+{
+}
+
+#endif /* EFX_USE_PIO */
+
 static void efx_ef10_remove(struct efx_nic *efx)
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -295,9 +470,15 @@ static void efx_ef10_remove(struct efx_nic *efx)
        /* This needs to be after efx_ptp_remove_channel() with no filters */
        efx_ef10_rx_free_indir_table(efx);
 
+       if (nic_data->wc_membase)
+               iounmap(nic_data->wc_membase);
+
        rc = efx_ef10_free_vis(efx);
        WARN_ON(rc != 0);
 
+       if (!nic_data->must_restore_piobufs)
+               efx_ef10_free_piobufs(efx);
+
        efx_mcdi_fini(efx);
        efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
        kfree(nic_data);
@@ -330,12 +511,126 @@ static int efx_ef10_alloc_vis(struct efx_nic *efx,
        return 0;
 }
 
+/* Note that the failure path of this function does not free
+ * resources, as this will be done by efx_ef10_remove().
+ */
 static int efx_ef10_dimension_resources(struct efx_nic *efx)
 {
-       unsigned int n_vis =
-               max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       unsigned int uc_mem_map_size, wc_mem_map_size;
+       unsigned int min_vis, pio_write_vi_base, max_vis;
+       void __iomem *membase;
+       int rc;
+
+       min_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+
+#ifdef EFX_USE_PIO
+       /* Try to allocate PIO buffers if wanted and if the full
+        * number of PIO buffers would be sufficient to allocate one
+        * copy-buffer per TX channel.  Failure is non-fatal, as there
+        * are only a small number of PIO buffers shared between all
+        * functions of the controller.
+        */
+       if (efx_piobuf_size != 0 &&
+           ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
+           efx->n_tx_channels) {
+               unsigned int n_piobufs =
+                       DIV_ROUND_UP(efx->n_tx_channels,
+                                    ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
+
+               rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
+               if (rc)
+                       netif_err(efx, probe, efx->net_dev,
+                                 "failed to allocate PIO buffers (%d)\n", rc);
+               else
+                       netif_dbg(efx, probe, efx->net_dev,
+                                 "allocated %u PIO buffers\n", n_piobufs);
+       }
+#else
+       nic_data->n_piobufs = 0;
+#endif
+
+       /* PIO buffers should be mapped with write-combining enabled,
+        * and we want to make single UC and WC mappings rather than
+        * several of each (in fact that's the only option if host
+        * page size is >4K).  So we may allocate some extra VIs just
+        * for writing PIO buffers through.
+        */
+       uc_mem_map_size = PAGE_ALIGN((min_vis - 1) * EFX_VI_PAGE_SIZE +
+                                    ER_DZ_TX_PIOBUF);
+       if (nic_data->n_piobufs) {
+               pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
+               wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
+                                              nic_data->n_piobufs) *
+                                             EFX_VI_PAGE_SIZE) -
+                                  uc_mem_map_size);
+               max_vis = pio_write_vi_base + nic_data->n_piobufs;
+       } else {
+               pio_write_vi_base = 0;
+               wc_mem_map_size = 0;
+               max_vis = min_vis;
+       }
+
+       /* In case the last attached driver failed to free VIs, do it now */
+       rc = efx_ef10_free_vis(efx);
+       if (rc != 0)
+               return rc;
+
+       rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
+       if (rc != 0)
+               return rc;
+
+       /* If we didn't get enough VIs to map all the PIO buffers, free the
+        * PIO buffers
+        */
+       if (nic_data->n_piobufs &&
+           nic_data->n_allocated_vis <
+           pio_write_vi_base + nic_data->n_piobufs) {
+               netif_dbg(efx, probe, efx->net_dev,
+                         "%u VIs are not sufficient to map %u PIO buffers\n",
+                         nic_data->n_allocated_vis, nic_data->n_piobufs);
+               efx_ef10_free_piobufs(efx);
+       }
+
+       /* Shrink the original UC mapping of the memory BAR */
+       membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
+       if (!membase) {
+               netif_err(efx, probe, efx->net_dev,
+                         "could not shrink memory BAR to %x\n",
+                         uc_mem_map_size);
+               return -ENOMEM;
+       }
+       iounmap(efx->membase);
+       efx->membase = membase;
+
+       /* Set up the WC mapping if needed */
+       if (wc_mem_map_size) {
+               nic_data->wc_membase = ioremap_wc(efx->membase_phys +
+                                                 uc_mem_map_size,
+                                                 wc_mem_map_size);
+               if (!nic_data->wc_membase) {
+                       netif_err(efx, probe, efx->net_dev,
+                                 "could not allocate WC mapping of size %x\n",
+                                 wc_mem_map_size);
+                       return -ENOMEM;
+               }
+               nic_data->pio_write_vi_base = pio_write_vi_base;
+               nic_data->pio_write_base =
+                       nic_data->wc_membase +
+                       (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
+                        uc_mem_map_size);
 
-       return efx_ef10_alloc_vis(efx, n_vis, n_vis);
+               rc = efx_ef10_link_piobufs(efx);
+               if (rc)
+                       efx_ef10_free_piobufs(efx);
+       }
+
+       netif_dbg(efx, probe, efx->net_dev,
+                 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
+                 &efx->membase_phys, efx->membase, uc_mem_map_size,
+                 nic_data->wc_membase, wc_mem_map_size);
+
+       return 0;
 }
 
 static int efx_ef10_init_nic(struct efx_nic *efx)
@@ -359,6 +654,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
                nic_data->must_realloc_vis = false;
        }
 
+       if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
+               rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
+               if (rc == 0) {
+                       rc = efx_ef10_link_piobufs(efx);
+                       if (rc)
+                               efx_ef10_free_piobufs(efx);
+               }
+
+               /* Log an error on failure, but this is non-fatal */
+               if (rc)
+                       netif_err(efx, drv, efx->net_dev,
+                                 "failed to restore PIO buffers (%d)\n", rc);
+               nic_data->must_restore_piobufs = false;
+       }
+
        efx_ef10_rx_push_indir_table(efx);
        return 0;
 }
@@ -444,6 +754,18 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
        EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
        EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
        EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
+       EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
+       EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
+       EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
+       EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
+       EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
+       EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
+       EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
+       EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
+       EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
+       EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
+       EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
+       EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
 };
 
 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) |          \
@@ -498,44 +820,72 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
                                  (1ULL << EF10_STAT_rx_length_error))
 
-#if BITS_PER_LONG == 64
-#define STAT_MASK_BITMAP(bits) (bits)
-#else
-#define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32
-#endif
-
-static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx)
-{
-       static const unsigned long hunt_40g_stat_mask[] = {
-               STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
-                                HUNT_40G_EXTRA_STAT_MASK)
-       };
-       static const unsigned long hunt_10g_only_stat_mask[] = {
-               STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
-                                HUNT_10G_ONLY_STAT_MASK)
-       };
+/* These statistics are only provided if the firmware supports the
+ * capability PM_AND_RXDP_COUNTERS.
+ */
+#define HUNT_PM_AND_RXDP_STAT_MASK (                                   \
+       (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) |                   \
+       (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) |                 \
+       (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) |                    \
+       (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) |                  \
+       (1ULL << EF10_STAT_rx_pm_trunc_qbb) |                           \
+       (1ULL << EF10_STAT_rx_pm_discard_qbb) |                         \
+       (1ULL << EF10_STAT_rx_pm_discard_mapping) |                     \
+       (1ULL << EF10_STAT_rx_dp_q_disabled_packets) |                  \
+       (1ULL << EF10_STAT_rx_dp_di_dropped_packets) |                  \
+       (1ULL << EF10_STAT_rx_dp_streaming_packets) |                   \
+       (1ULL << EF10_STAT_rx_dp_emerg_fetch) |                         \
+       (1ULL << EF10_STAT_rx_dp_emerg_wait))
+
+static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
+{
+       u64 raw_mask = HUNT_COMMON_STAT_MASK;
        u32 port_caps = efx_mcdi_phy_get_caps(efx);
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
 
        if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
-               return hunt_40g_stat_mask;
+               raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
        else
-               return hunt_10g_only_stat_mask;
+               raw_mask |= HUNT_10G_ONLY_STAT_MASK;
+
+       if (nic_data->datapath_caps &
+           (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
+               raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
+
+       return raw_mask;
+}
+
+static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
+{
+       u64 raw_mask = efx_ef10_raw_stat_mask(efx);
+
+#if BITS_PER_LONG == 64
+       mask[0] = raw_mask;
+#else
+       mask[0] = raw_mask & 0xffffffff;
+       mask[1] = raw_mask >> 32;
+#endif
 }
 
 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
 {
+       DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+
+       efx_ef10_get_stat_mask(efx, mask);
        return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
-                                     efx_ef10_stat_mask(efx), names);
+                                     mask, names);
 }
 
 static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
-       const unsigned long *stats_mask = efx_ef10_stat_mask(efx);
+       DECLARE_BITMAP(mask, EF10_STAT_COUNT);
        __le64 generation_start, generation_end;
        u64 *stats = nic_data->stats;
        __le64 *dma_stats;
 
+       efx_ef10_get_stat_mask(efx, mask);
+
        dma_stats = efx->stats_buffer.addr;
        nic_data = efx->nic_data;
 
@@ -543,8 +893,9 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
        if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
                return 0;
        rmb();
-       efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask,
+       efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
                             stats, efx->stats_buffer.addr, false);
+       rmb();
        generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
        if (generation_end != generation_start)
                return -EAGAIN;
@@ -563,12 +914,14 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
 static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
                                    struct rtnl_link_stats64 *core_stats)
 {
-       const unsigned long *mask = efx_ef10_stat_mask(efx);
+       DECLARE_BITMAP(mask, EF10_STAT_COUNT);
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
        u64 *stats = nic_data->stats;
        size_t stats_count = 0, index;
        int retry;
 
+       efx_ef10_get_stat_mask(efx, mask);
+
        /* If we're unlucky enough to read statistics during the DMA, wait
         * up to 10ms for it to finish (typically takes <500us)
         */
@@ -716,6 +1069,7 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
        /* All our allocations have been reset */
        nic_data->must_realloc_vis = true;
        nic_data->must_restore_filters = true;
+       nic_data->must_restore_piobufs = true;
        nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
 
        /* The datapath firmware might have been changed */
@@ -2137,7 +2491,7 @@ out_unlock:
        return rc;
 }
 
-void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
+static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
 {
        /* no need to do anything here on EF10 */
 }
index b3f4e3755fd97cd7c38130597d54e22c20b591d6..207ac9a1e3de989d0f5e4d588c27b6035eec31ec 100644 (file)
 #define        ESF_DZ_TX_PIO_TYPE_WIDTH 1
 #define        ESF_DZ_TX_PIO_OPT_LBN 60
 #define        ESF_DZ_TX_PIO_OPT_WIDTH 3
+#define        ESE_DZ_TX_OPTION_DESC_PIO 1
 #define        ESF_DZ_TX_PIO_CONT_LBN 59
 #define        ESF_DZ_TX_PIO_CONT_WIDTH 1
 #define        ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
index 07c9bc4c61bc0d15484e79798b36720eff8ff741..2e27837ce6a289dc033c806e613f8dbca42496b9 100644 (file)
@@ -1121,7 +1121,7 @@ static int efx_init_io(struct efx_nic *efx)
         */
        while (dma_mask > 0x7fffffffUL) {
                if (dma_supported(&pci_dev->dev, dma_mask)) {
-                       rc = dma_set_mask(&pci_dev->dev, dma_mask);
+                       rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
                        if (rc == 0)
                                break;
                }
@@ -1134,16 +1134,6 @@ static int efx_init_io(struct efx_nic *efx)
        }
        netif_dbg(efx, probe, efx->net_dev,
                  "using DMA mask %llx\n", (unsigned long long) dma_mask);
-       rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask);
-       if (rc) {
-               /* dma_set_coherent_mask() is not *allowed* to
-                * fail with a mask that dma_set_mask() accepted,
-                * but just in case...
-                */
-               netif_err(efx, probe, efx->net_dev,
-                         "failed to set consistent DMA mask\n");
-               goto fail2;
-       }
 
        efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
        rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
index 34d00f5771fe4c521177f17061acb9abac846c50..b8235ee5d7d739ae879aaba810620740951af8ff 100644 (file)
 #define EFX_MEM_BAR 2
 
 /* TX */
-extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
-extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
-extern netdev_tx_t
-efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
-extern netdev_tx_t
-efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
-extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
-extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
-extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
+netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
+                               struct net_device *net_dev);
+netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+extern unsigned int efx_piobuf_size;
 
 /* RX */
-extern void efx_rx_config_page_split(struct efx_nic *efx);
-extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
-extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
-extern void efx_rx_slow_fill(unsigned long context);
-extern void __efx_rx_packet(struct efx_channel *channel);
-extern void efx_rx_packet(struct efx_rx_queue *rx_queue,
-                         unsigned int index, unsigned int n_frags,
-                         unsigned int len, u16 flags);
+void efx_rx_config_page_split(struct efx_nic *efx);
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
+void efx_rx_slow_fill(unsigned long context);
+void __efx_rx_packet(struct efx_channel *channel);
+void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+                  unsigned int n_frags, unsigned int len, u16 flags);
 static inline void efx_rx_flush_packet(struct efx_channel *channel)
 {
        if (channel->rx_pkt_n_frags)
                __efx_rx_packet(channel);
 }
-extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
 
 #define EFX_MAX_DMAQ_SIZE 4096UL
 #define EFX_DEFAULT_DMAQ_SIZE 1024UL
@@ -162,9 +161,9 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
        return efx->type->filter_get_rx_ids(efx, priority, buf, size);
 }
 #ifdef CONFIG_RFS_ACCEL
-extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
-                         u16 rxq_index, u32 flow_id);
-extern bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+                  u16 rxq_index, u32 flow_id);
+bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota);
 static inline void efx_filter_rfs_expire(struct efx_channel *channel)
 {
        if (channel->rfs_filters_added >= 60 &&
@@ -176,50 +175,48 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel)
 static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
 #define efx_filter_rfs_enabled() 0
 #endif
-extern bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
 
 /* Channels */
-extern int efx_channel_dummy_op_int(struct efx_channel *channel);
-extern void efx_channel_dummy_op_void(struct efx_channel *channel);
-extern int
-efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
+int efx_channel_dummy_op_int(struct efx_channel *channel);
+void efx_channel_dummy_op_void(struct efx_channel *channel);
+int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
 
 /* Ports */
-extern int efx_reconfigure_port(struct efx_nic *efx);
-extern int __efx_reconfigure_port(struct efx_nic *efx);
+int efx_reconfigure_port(struct efx_nic *efx);
+int __efx_reconfigure_port(struct efx_nic *efx);
 
 /* Ethtool support */
 extern const struct ethtool_ops efx_ethtool_ops;
 
 /* Reset handling */
-extern int efx_reset(struct efx_nic *efx, enum reset_type method);
-extern void efx_reset_down(struct efx_nic *efx, enum reset_type method);
-extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
-extern int efx_try_recovery(struct efx_nic *efx);
+int efx_reset(struct efx_nic *efx, enum reset_type method);
+void efx_reset_down(struct efx_nic *efx, enum reset_type method);
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
+int efx_try_recovery(struct efx_nic *efx);
 
 /* Global */
-extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
-extern int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
-                                  unsigned int rx_usecs, bool rx_adaptive,
-                                  bool rx_may_override_tx);
-extern void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
-                                  unsigned int *rx_usecs, bool *rx_adaptive);
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
+int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
+                           unsigned int rx_usecs, bool rx_adaptive,
+                           bool rx_may_override_tx);
+void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
+                           unsigned int *rx_usecs, bool *rx_adaptive);
 
 /* Dummy PHY ops for PHY drivers */
-extern int efx_port_dummy_op_int(struct efx_nic *efx);
-extern void efx_port_dummy_op_void(struct efx_nic *efx);
-
+int efx_port_dummy_op_int(struct efx_nic *efx);
+void efx_port_dummy_op_void(struct efx_nic *efx);
 
 /* MTD */
 #ifdef CONFIG_SFC_MTD
-extern int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
-                      size_t n_parts, size_t sizeof_part);
+int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+               size_t n_parts, size_t sizeof_part);
 static inline int efx_mtd_probe(struct efx_nic *efx)
 {
        return efx->type->mtd_probe(efx);
 }
-extern void efx_mtd_rename(struct efx_nic *efx);
-extern void efx_mtd_remove(struct efx_nic *efx);
+void efx_mtd_rename(struct efx_nic *efx);
+void efx_mtd_remove(struct efx_nic *efx);
 #else
 static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
 static inline void efx_mtd_rename(struct efx_nic *efx) {}
@@ -241,9 +238,9 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel)
        efx_schedule_channel(channel);
 }
 
-extern void efx_link_status_changed(struct efx_nic *efx);
-extern void efx_link_set_advertising(struct efx_nic *efx, u32);
-extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
+void efx_link_status_changed(struct efx_nic *efx);
+void efx_link_set_advertising(struct efx_nic *efx, u32);
+void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
 
 static inline void efx_device_detach_sync(struct efx_nic *efx)
 {
index 5b471cf5c323a2aabae6ae3c55404194e6f90c42..1f529fa2edb10008a5c03944bea2fb0d995e1e61 100644 (file)
@@ -70,6 +70,7 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
        EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
        EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
        EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
+       EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
        EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
@@ -1035,8 +1036,8 @@ static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
        return 0;
 }
 
-int efx_ethtool_get_ts_info(struct net_device *net_dev,
-                           struct ethtool_ts_info *ts_info)
+static int efx_ethtool_get_ts_info(struct net_device *net_dev,
+                                  struct ethtool_ts_info *ts_info)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
 
index 96ce507d8602eb4a8f40a7cfa034f5cf3dede3c3..4d3f119b67b38ec35719ebac40d59f49ea9b844f 100644 (file)
 #define EFX_USE_QWORD_IO 1
 #endif
 
+/* PIO is a win only if write-combining is possible */
+#ifdef ARCH_HAS_IOREMAP_WC
+#define EFX_USE_PIO 1
+#endif
+
 #ifdef EFX_USE_QWORD_IO
 static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
                                  unsigned int reg)
index c082562dbf4ee8d96388dc21e0b2da1803fe84f2..366c8e3e37844c8e2d8840a4662467067e937b3b 100644 (file)
@@ -963,7 +963,7 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
                               bool *was_attached)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
-       MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
        size_t outlen;
        int rc;
 
@@ -981,6 +981,22 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
                goto fail;
        }
 
+       /* We currently assume we have control of the external link
+        * and are completely trusted by firmware.  Abort probing
+        * if that's not true for this function.
+        */
+       if (driver_operating &&
+           outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN &&
+           (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) &
+            (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+             1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
+           (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+            1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) {
+               netif_err(efx, probe, efx->net_dev,
+                         "This driver version only supports one function per port\n");
+               return -ENODEV;
+       }
+
        if (was_attached != NULL)
                *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
        return 0;
index c34d0d4e10ee2e0d453b3f192acaed86c5d75301..656a3277c2b210e69ffd028d059ce10b809e8db8 100644 (file)
@@ -108,38 +108,35 @@ static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
 }
 #endif
 
-extern int efx_mcdi_init(struct efx_nic *efx);
-extern void efx_mcdi_fini(struct efx_nic *efx);
+int efx_mcdi_init(struct efx_nic *efx);
+void efx_mcdi_fini(struct efx_nic *efx);
 
-extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
-                       const efx_dword_t *inbuf, size_t inlen,
+int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
+                size_t inlen, efx_dword_t *outbuf, size_t outlen,
+                size_t *outlen_actual);
+
+int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+                      const efx_dword_t *inbuf, size_t inlen);
+int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
                        efx_dword_t *outbuf, size_t outlen,
                        size_t *outlen_actual);
 
-extern int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
-                             const efx_dword_t *inbuf, size_t inlen);
-extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
-                              efx_dword_t *outbuf, size_t outlen,
-                              size_t *outlen_actual);
-
 typedef void efx_mcdi_async_completer(struct efx_nic *efx,
                                      unsigned long cookie, int rc,
                                      efx_dword_t *outbuf,
                                      size_t outlen_actual);
-extern int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
-                             const efx_dword_t *inbuf, size_t inlen,
-                             size_t outlen,
-                             efx_mcdi_async_completer *complete,
-                             unsigned long cookie);
+int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+                      const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+                      efx_mcdi_async_completer *complete,
+                      unsigned long cookie);
 
-extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
-extern void efx_mcdi_mode_poll(struct efx_nic *efx);
-extern void efx_mcdi_mode_event(struct efx_nic *efx);
-extern void efx_mcdi_flush_async(struct efx_nic *efx);
+int efx_mcdi_poll_reboot(struct efx_nic *efx);
+void efx_mcdi_mode_poll(struct efx_nic *efx);
+void efx_mcdi_mode_event(struct efx_nic *efx);
+void efx_mcdi_flush_async(struct efx_nic *efx);
 
-extern void efx_mcdi_process_event(struct efx_channel *channel,
-                                  efx_qword_t *event);
-extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
+void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
 
 /* We expect that 16- and 32-bit fields in MCDI requests and responses
  * are appropriately aligned, but 64-bit fields are only
@@ -275,55 +272,54 @@ extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
 #define MCDI_EVENT_FIELD(_ev, _field)                  \
        EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
 
-extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
-extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
-                                 u16 *fw_subtype_list, u32 *capabilities);
-extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
-                            u32 dest_evq);
-extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
-extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
-                              size_t *size_out, size_t *erase_size_out,
-                              bool *protected_out);
-extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
-extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
-extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
-extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
-                                        const u8 *mac, int *id_out);
-extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
-extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
-extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
-extern int efx_mcdi_flush_rxqs(struct efx_nic *efx);
-extern int efx_mcdi_port_probe(struct efx_nic *efx);
-extern void efx_mcdi_port_remove(struct efx_nic *efx);
-extern int efx_mcdi_port_reconfigure(struct efx_nic *efx);
-extern int efx_mcdi_port_get_number(struct efx_nic *efx);
-extern u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
-extern void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
-extern int efx_mcdi_set_mac(struct efx_nic *efx);
+void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
+int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+                          u16 *fw_subtype_list, u32 *capabilities);
+int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq);
+int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
+int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
+                       size_t *size_out, size_t *erase_size_out,
+                       bool *protected_out);
+int efx_mcdi_nvram_test_all(struct efx_nic *efx);
+int efx_mcdi_handle_assertion(struct efx_nic *efx);
+void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
+                                 int *id_out);
+int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
+int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
+int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
+int efx_mcdi_flush_rxqs(struct efx_nic *efx);
+int efx_mcdi_port_probe(struct efx_nic *efx);
+void efx_mcdi_port_remove(struct efx_nic *efx);
+int efx_mcdi_port_reconfigure(struct efx_nic *efx);
+int efx_mcdi_port_get_number(struct efx_nic *efx);
+u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
+int efx_mcdi_set_mac(struct efx_nic *efx);
 #define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
-extern void efx_mcdi_mac_start_stats(struct efx_nic *efx);
-extern void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
-extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
-extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
-extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
-extern int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+void efx_mcdi_mac_start_stats(struct efx_nic *efx);
+void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
+bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
+enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
+int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
 
 #ifdef CONFIG_SFC_MCDI_MON
-extern int efx_mcdi_mon_probe(struct efx_nic *efx);
-extern void efx_mcdi_mon_remove(struct efx_nic *efx);
+int efx_mcdi_mon_probe(struct efx_nic *efx);
+void efx_mcdi_mon_remove(struct efx_nic *efx);
 #else
 static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
 static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
 #endif
 
 #ifdef CONFIG_SFC_MTD
-extern int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
-                            size_t len, size_t *retlen, u8 *buffer);
-extern int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
-extern int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
-                             size_t len, size_t *retlen, const u8 *buffer);
-extern int efx_mcdi_mtd_sync(struct mtd_info *mtd);
-extern void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
+int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
+                     size_t *retlen, u8 *buffer);
+int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
+int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len,
+                      size_t *retlen, const u8 *buffer);
+int efx_mcdi_mtd_sync(struct mtd_info *mtd);
+void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
 #endif
 
 #endif /* EFX_MCDI_H */
index b5cf62492f8e77ff5bf2a8f0504ae7df4cf8c2ef..e0a63ddb7a6ceb1246dc9a7a7349ce7efffa0d9e 100644 (file)
 #define          MC_CMD_MAC_RX_LANES01_DISP_ERR  0x39 /* enum */
 #define          MC_CMD_MAC_RX_LANES23_DISP_ERR  0x3a /* enum */
 #define          MC_CMD_MAC_RX_MATCH_FAULT  0x3b /* enum */
-#define          MC_CMD_GMAC_DMABUF_START  0x40 /* enum */
-#define          MC_CMD_GMAC_DMABUF_END    0x5f /* enum */
+/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define          MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW  0x3c
+/* enum: PM discard_bb_overflow counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW  0x3d
+/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define          MC_CMD_MAC_PM_TRUNC_VFIFO_FULL  0x3e
+/* enum: PM discard_vfifo_full counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_PM_DISCARD_VFIFO_FULL  0x3f
+/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define          MC_CMD_MAC_PM_TRUNC_QBB  0x40
+/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define          MC_CMD_MAC_PM_DISCARD_QBB  0x41
+/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define          MC_CMD_MAC_PM_DISCARD_MAPPING  0x42
+/* enum: RXDP counter: Number of packets dropped due to the queue being
+ * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_RXDP_Q_DISABLED_PKTS  0x43
+/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
+ * with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_RXDP_DI_DROPPED_PKTS  0x45
+/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_RXDP_STREAMING_PKTS  0x46
+/* enum: RXDP counter: Number of times an emergency descriptor fetch was
+ * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS  0x47
+/* enum: RXDP counter: Number of times the DPCPU waited for an existing
+ * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS  0x48
+/* enum: Start of GMAC stats buffer space, for Siena only. */
+#define          MC_CMD_GMAC_DMABUF_START  0x40
+/* enum: End of GMAC stats buffer space, for Siena only. */
+#define          MC_CMD_GMAC_DMABUF_END    0x5f
 #define          MC_CMD_MAC_GENERATION_END 0x60 /* enum */
 #define          MC_CMD_MAC_NSTATS  0x61 /* enum */
 
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
 #define        MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
 /* RxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
 #define       MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
index 16824fecc5ee5c652a34179aeba4130c93fbc5fb..4a2dc4c281b730fd3b415c4714657c8ead4bdad7 100644 (file)
@@ -20,7 +20,7 @@
 
 static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
 static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
-extern unsigned efx_mdio_id_oui(u32 id);
+unsigned efx_mdio_id_oui(u32 id);
 
 static inline int efx_mdio_read(struct efx_nic *efx, int devad, int addr)
 {
@@ -56,7 +56,7 @@ static inline bool efx_mdio_phyxgxs_lane_sync(struct efx_nic *efx)
        return sync;
 }
 
-extern const char *efx_mdio_mmd_name(int mmd);
+const char *efx_mdio_mmd_name(int mmd);
 
 /*
  * Reset a specific MMD and wait for reset to clear.
@@ -64,30 +64,29 @@ extern const char *efx_mdio_mmd_name(int mmd);
  *
  * This function will sleep
  */
-extern int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd,
-                             int spins, int spintime);
+int efx_mdio_reset_mmd(struct efx_nic *efx, int mmd, int spins, int spintime);
 
 /* As efx_mdio_check_mmd but for multiple MMDs */
 int efx_mdio_check_mmds(struct efx_nic *efx, unsigned int mmd_mask);
 
 /* Check the link status of specified mmds in bit mask */
-extern bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
+bool efx_mdio_links_ok(struct efx_nic *efx, unsigned int mmd_mask);
 
 /* Generic transmit disable support though PMAPMD */
-extern void efx_mdio_transmit_disable(struct efx_nic *efx);
+void efx_mdio_transmit_disable(struct efx_nic *efx);
 
 /* Generic part of reconfigure: set/clear loopback bits */
-extern void efx_mdio_phy_reconfigure(struct efx_nic *efx);
+void efx_mdio_phy_reconfigure(struct efx_nic *efx);
 
 /* Set the power state of the specified MMDs */
-extern void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
-                                    int low_power, unsigned int mmd_mask);
+void efx_mdio_set_mmds_lpower(struct efx_nic *efx, int low_power,
+                             unsigned int mmd_mask);
 
 /* Set (some of) the PHY settings over MDIO */
-extern int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
+int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
 
 /* Push advertising flags and restart autonegotiation */
-extern void efx_mdio_an_reconfigure(struct efx_nic *efx);
+void efx_mdio_an_reconfigure(struct efx_nic *efx);
 
 /* Get pause parameters from AN if available (otherwise return
  * requested pause parameters)
@@ -95,8 +94,7 @@ extern void efx_mdio_an_reconfigure(struct efx_nic *efx);
 u8 efx_mdio_get_pause(struct efx_nic *efx);
 
 /* Wait for specified MMDs to exit reset within a timeout */
-extern int efx_mdio_wait_reset_mmds(struct efx_nic *efx,
-                                   unsigned int mmd_mask);
+int efx_mdio_wait_reset_mmds(struct efx_nic *efx, unsigned int mmd_mask);
 
 /* Set or clear flag, debouncing */
 static inline void
@@ -107,6 +105,6 @@ efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
 }
 
 /* Liveness self-test for MDIO PHYs */
-extern int efx_mdio_test_alive(struct efx_nic *efx);
+int efx_mdio_test_alive(struct efx_nic *efx);
 
 #endif /* EFX_MDIO_10G_H */
index b172ed13305554cf6009fc445ac4c09123b97550..aac22a1e85b8e081f5c83723721c637161ba1e46 100644 (file)
@@ -182,6 +182,9 @@ struct efx_tx_buffer {
  * @tsoh_page: Array of pages of TSO header buffers
  * @txd: The hardware descriptor ring
  * @ptr_mask: The size of the ring minus 1.
+ * @piobuf: PIO buffer region for this TX queue (shared with its partner).
+ *     Size of the region is efx_piobuf_size.
+ * @piobuf_offset: Buffer offset to be specified in PIO descriptors
  * @initialised: Has hardware queue been initialised?
  * @read_count: Current read pointer.
  *     This is the number of buffers that have been removed from both rings.
@@ -209,6 +212,7 @@ struct efx_tx_buffer {
  *     blocks
  * @tso_packets: Number of packets via the TSO xmit path
  * @pushes: Number of times the TX push feature has been used
+ * @pio_packets: Number of times the TX PIO feature has been used
  * @empty_read_count: If the completion path has seen the queue as empty
  *     and the transmission path has not yet checked this, the value of
  *     @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
@@ -223,6 +227,8 @@ struct efx_tx_queue {
        struct efx_buffer *tsoh_page;
        struct efx_special_buffer txd;
        unsigned int ptr_mask;
+       void __iomem *piobuf;
+       unsigned int piobuf_offset;
        bool initialised;
 
        /* Members used mainly on the completion path */
@@ -238,6 +244,7 @@ struct efx_tx_queue {
        unsigned int tso_long_headers;
        unsigned int tso_packets;
        unsigned int pushes;
+       unsigned int pio_packets;
 
        /* Members shared between paths and sometimes updated */
        unsigned int empty_read_count ____cacheline_aligned_in_smp;
index e7dbd2dd202e8bb7332b83560f1fa93d56071674..9c90bf56090f604b4b4df77fb12b0353c922b6e2 100644 (file)
@@ -19,6 +19,7 @@
 #include "bitfield.h"
 #include "efx.h"
 #include "nic.h"
+#include "ef10_regs.h"
 #include "farch_regs.h"
 #include "io.h"
 #include "workarounds.h"
@@ -166,26 +167,30 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
 
 /* Register dump */
 
-#define REGISTER_REVISION_A    1
-#define REGISTER_REVISION_B    2
-#define REGISTER_REVISION_C    3
-#define REGISTER_REVISION_Z    3       /* latest revision */
+#define REGISTER_REVISION_FA   1
+#define REGISTER_REVISION_FB   2
+#define REGISTER_REVISION_FC   3
+#define REGISTER_REVISION_FZ   3       /* last Falcon arch revision */
+#define REGISTER_REVISION_ED   4
+#define REGISTER_REVISION_EZ   4       /* latest EF10 revision */
 
 struct efx_nic_reg {
        u32 offset:24;
-       u32 min_revision:2, max_revision:2;
+       u32 min_revision:3, max_revision:3;
 };
 
-#define REGISTER(name, min_rev, max_rev) {                             \
-       FR_ ## min_rev ## max_rev ## _ ## name,                         \
-       REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev    \
+#define REGISTER(name, arch, min_rev, max_rev) {                       \
+       arch ## R_ ## min_rev ## max_rev ## _ ## name,                  \
+       REGISTER_REVISION_ ## arch ## min_rev,                          \
+       REGISTER_REVISION_ ## arch ## max_rev                           \
 }
-#define REGISTER_AA(name) REGISTER(name, A, A)
-#define REGISTER_AB(name) REGISTER(name, A, B)
-#define REGISTER_AZ(name) REGISTER(name, A, Z)
-#define REGISTER_BB(name) REGISTER(name, B, B)
-#define REGISTER_BZ(name) REGISTER(name, B, Z)
-#define REGISTER_CZ(name) REGISTER(name, C, Z)
+#define REGISTER_AA(name) REGISTER(name, F, A, A)
+#define REGISTER_AB(name) REGISTER(name, F, A, B)
+#define REGISTER_AZ(name) REGISTER(name, F, A, Z)
+#define REGISTER_BB(name) REGISTER(name, F, B, B)
+#define REGISTER_BZ(name) REGISTER(name, F, B, Z)
+#define REGISTER_CZ(name) REGISTER(name, F, C, Z)
+#define REGISTER_DZ(name) REGISTER(name, E, D, Z)
 
 static const struct efx_nic_reg efx_nic_regs[] = {
        REGISTER_AZ(ADR_REGION),
@@ -292,37 +297,42 @@ static const struct efx_nic_reg efx_nic_regs[] = {
        REGISTER_AB(XX_TXDRV_CTL),
        /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
        /* XX_CORE_STAT is partly RC */
+       REGISTER_DZ(BIU_HW_REV_ID),
+       REGISTER_DZ(MC_DB_LWRD),
+       REGISTER_DZ(MC_DB_HWRD),
 };
 
 struct efx_nic_reg_table {
        u32 offset:24;
-       u32 min_revision:2, max_revision:2;
+       u32 min_revision:3, max_revision:3;
        u32 step:6, rows:21;
 };
 
-#define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
+#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
        offset,                                                         \
-       REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev,   \
+       REGISTER_REVISION_ ## arch ## min_rev,                          \
+       REGISTER_REVISION_ ## arch ## max_rev,                          \
        step, rows                                                      \
 }
-#define REGISTER_TABLE(name, min_rev, max_rev)                         \
+#define REGISTER_TABLE(name, arch, min_rev, max_rev)                   \
        REGISTER_TABLE_DIMENSIONS(                                      \
-               name, FR_ ## min_rev ## max_rev ## _ ## name,           \
-               min_rev, max_rev,                                       \
-               FR_ ## min_rev ## max_rev ## _ ## name ## _STEP,        \
-               FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
-#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
-#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
-#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
-#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
+               name, arch ## R_ ## min_rev ## max_rev ## _ ## name,    \
+               arch, min_rev, max_rev,                                 \
+               arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
+               arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
+#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
+#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
+#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
+#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
 #define REGISTER_TABLE_BB_CZ(name)                                     \
-       REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B,           \
+       REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B,        \
                                  FR_BZ_ ## name ## _STEP,              \
                                  FR_BB_ ## name ## _ROWS),             \
-       REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z,           \
+       REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z,        \
                                  FR_BZ_ ## name ## _STEP,              \
                                  FR_CZ_ ## name ## _ROWS)
-#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
+#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
+#define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z)
 
 static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
        /* DRIVER is not used */
@@ -340,9 +350,9 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
         * 1K entries allows for some expansion of queue count and
         * size before we need to change the version. */
        REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
-                                 A, A, 8, 1024),
+                                 F, A, A, 8, 1024),
        REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
-                                 B, Z, 8, 1024),
+                                 F, B, Z, 8, 1024),
        REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
        REGISTER_TABLE_BB_CZ(TIMER_TBL),
        REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
@@ -353,6 +363,7 @@ static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
        /* MSIX_PBA_TABLE is not mapped */
        /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
        REGISTER_TABLE_BZ(RX_FILTER_TBL0),
+       REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS),
 };
 
 size_t efx_nic_get_regs_len(struct efx_nic *efx)
@@ -469,8 +480,7 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
  * @count: Length of the @desc array
  * @mask: Bitmask of which elements of @desc are enabled
  * @stats: Buffer to update with the converted statistics.  The length
- *     of this array must be at least the number of set bits in the
- *     first @count bits of @mask.
+ *     of this array must be at least @count.
  * @dma_buf: DMA buffer containing hardware statistics
  * @accumulate: If set, the converted values will be added rather than
  *     directly stored to the corresponding elements of @stats
@@ -503,11 +513,9 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
                        }
 
                        if (accumulate)
-                               *stats += val;
+                               stats[index] += val;
                        else
-                               *stats = val;
+                               stats[index] = val;
                }
-
-               ++stats;
        }
 }
index fda29d39032f422d2c395a4fe68a4c5091654006..11b6112d9249a734701eebe66414bd039702bcdd 100644 (file)
@@ -30,7 +30,7 @@ static inline int efx_nic_rev(struct efx_nic *efx)
        return efx->type->revision;
 }
 
-extern u32 efx_farch_fpga_ver(struct efx_nic *efx);
+u32 efx_farch_fpga_ver(struct efx_nic *efx);
 
 /* NIC has two interlinked PCI functions for the same port. */
 static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
@@ -71,6 +71,26 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
        return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
 }
 
+/* Report whether the NIC considers this TX queue empty, given the
+ * write_count used for the last doorbell push.  May return false
+ * negative.
+ */
+static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
+                                        unsigned int write_count)
+{
+       unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+
+       if (empty_read_count == 0)
+               return false;
+
+       return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
+}
+
+static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue)
+{
+       return __efx_nic_tx_is_empty(tx_queue, tx_queue->write_count);
+}
+
 /* Decide whether to push a TX descriptor to the NIC vs merely writing
  * the doorbell.  This can reduce latency when we are adding a single
  * descriptor to an empty queue, but is otherwise pointless.  Further,
@@ -80,14 +100,10 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
 static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
                                            unsigned int write_count)
 {
-       unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
-
-       if (empty_read_count == 0)
-               return false;
+       bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count);
 
        tx_queue->empty_read_count = 0;
-       return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
-               && tx_queue->write_count - write_count == 1;
+       return was_empty && tx_queue->write_count - write_count == 1;
 }
 
 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
@@ -386,9 +402,27 @@ enum {
        EF10_STAT_rx_align_error,
        EF10_STAT_rx_length_error,
        EF10_STAT_rx_nodesc_drops,
+       EF10_STAT_rx_pm_trunc_bb_overflow,
+       EF10_STAT_rx_pm_discard_bb_overflow,
+       EF10_STAT_rx_pm_trunc_vfifo_full,
+       EF10_STAT_rx_pm_discard_vfifo_full,
+       EF10_STAT_rx_pm_trunc_qbb,
+       EF10_STAT_rx_pm_discard_qbb,
+       EF10_STAT_rx_pm_discard_mapping,
+       EF10_STAT_rx_dp_q_disabled_packets,
+       EF10_STAT_rx_dp_di_dropped_packets,
+       EF10_STAT_rx_dp_streaming_packets,
+       EF10_STAT_rx_dp_emerg_fetch,
+       EF10_STAT_rx_dp_emerg_wait,
        EF10_STAT_COUNT
 };
 
+/* Maximum number of TX PIO buffers we may allocate to a function.
+ * This matches the total number of buffers on each SFC9100-family
+ * controller.
+ */
+#define EF10_TX_PIOBUF_COUNT 16
+
 /**
  * struct efx_ef10_nic_data - EF10 architecture NIC state
  * @mcdi_buf: DMA buffer for MCDI
@@ -397,6 +431,13 @@ enum {
  * @n_allocated_vis: Number of VIs allocated to this function
  * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
  * @must_restore_filters: Flag: filters have yet to be restored after MC reboot
+ * @n_piobufs: Number of PIO buffers allocated to this function
+ * @wc_membase: Base address of write-combining mapping of the memory BAR
+ * @pio_write_base: Base address for writing PIO buffers
+ * @pio_write_vi_base: Relative VI number for @pio_write_base
+ * @piobuf_handle: Handle of each PIO buffer allocated
+ * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
+ *     reboot
  * @rx_rss_context: Firmware handle for our RSS context
  * @stats: Hardware statistics
  * @workaround_35388: Flag: firmware supports workaround for bug 35388
@@ -412,6 +453,11 @@ struct efx_ef10_nic_data {
        unsigned int n_allocated_vis;
        bool must_realloc_vis;
        bool must_restore_filters;
+       unsigned int n_piobufs;
+       void __iomem *wc_membase, *pio_write_base;
+       unsigned int pio_write_vi_base;
+       unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
+       bool must_restore_piobufs;
        u32 rx_rss_context;
        u64 stats[EF10_STAT_COUNT];
        bool workaround_35388;
@@ -463,18 +509,18 @@ static inline unsigned int efx_vf_size(struct efx_nic *efx)
        return 1 << efx->vi_scale;
 }
 
-extern int efx_init_sriov(void);
-extern void efx_sriov_probe(struct efx_nic *efx);
-extern int efx_sriov_init(struct efx_nic *efx);
-extern void efx_sriov_mac_address_changed(struct efx_nic *efx);
-extern void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-extern void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
-extern void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
-extern void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
-extern void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
-extern void efx_sriov_reset(struct efx_nic *efx);
-extern void efx_sriov_fini(struct efx_nic *efx);
-extern void efx_fini_sriov(void);
+int efx_init_sriov(void);
+void efx_sriov_probe(struct efx_nic *efx);
+int efx_sriov_init(struct efx_nic *efx);
+void efx_sriov_mac_address_changed(struct efx_nic *efx);
+void efx_sriov_tx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_sriov_rx_flush_done(struct efx_nic *efx, efx_qword_t *event);
+void efx_sriov_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_sriov_desc_fetch_err(struct efx_nic *efx, unsigned dmaq);
+void efx_sriov_flr(struct efx_nic *efx, unsigned flr);
+void efx_sriov_reset(struct efx_nic *efx);
+void efx_sriov_fini(struct efx_nic *efx);
+void efx_fini_sriov(void);
 
 #else
 
@@ -500,22 +546,20 @@ static inline void efx_fini_sriov(void) {}
 
 #endif
 
-extern int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
-extern int efx_sriov_set_vf_vlan(struct net_device *dev, int vf,
-                                u16 vlan, u8 qos);
-extern int efx_sriov_get_vf_config(struct net_device *dev, int vf,
-                                  struct ifla_vf_info *ivf);
-extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
-                                    bool spoofchk);
+int efx_sriov_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
+int efx_sriov_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos);
+int efx_sriov_get_vf_config(struct net_device *dev, int vf,
+                           struct ifla_vf_info *ivf);
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
+                             bool spoofchk);
 
 struct ethtool_ts_info;
-extern void efx_ptp_probe(struct efx_nic *efx);
-extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
-extern void efx_ptp_get_ts_info(struct efx_nic *efx,
-                               struct ethtool_ts_info *ts_info);
-extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
-extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
-extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+void efx_ptp_probe(struct efx_nic *efx);
+int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
+void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
+bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
 
 extern const struct efx_nic_type falcon_a1_nic_type;
 extern const struct efx_nic_type falcon_b0_nic_type;
@@ -529,7 +573,7 @@ extern const struct efx_nic_type efx_hunt_a0_nic_type;
  **************************************************************************
  */
 
-extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
+int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
 
 /* TX data path */
 static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
@@ -597,58 +641,58 @@ static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
 {
        channel->efx->type->ev_read_ack(channel);
 }
-extern void efx_nic_event_test_start(struct efx_channel *channel);
+void efx_nic_event_test_start(struct efx_channel *channel);
 
 /* Falcon/Siena queue operations */
-extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
-extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
-extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
-extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
-extern int efx_farch_ev_probe(struct efx_channel *channel);
-extern int efx_farch_ev_init(struct efx_channel *channel);
-extern void efx_farch_ev_fini(struct efx_channel *channel);
-extern void efx_farch_ev_remove(struct efx_channel *channel);
-extern int efx_farch_ev_process(struct efx_channel *channel, int quota);
-extern void efx_farch_ev_read_ack(struct efx_channel *channel);
-extern void efx_farch_ev_test_generate(struct efx_channel *channel);
+int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
+int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
+int efx_farch_ev_probe(struct efx_channel *channel);
+int efx_farch_ev_init(struct efx_channel *channel);
+void efx_farch_ev_fini(struct efx_channel *channel);
+void efx_farch_ev_remove(struct efx_channel *channel);
+int efx_farch_ev_process(struct efx_channel *channel, int quota);
+void efx_farch_ev_read_ack(struct efx_channel *channel);
+void efx_farch_ev_test_generate(struct efx_channel *channel);
 
 /* Falcon/Siena filter operations */
-extern int efx_farch_filter_table_probe(struct efx_nic *efx);
-extern void efx_farch_filter_table_restore(struct efx_nic *efx);
-extern void efx_farch_filter_table_remove(struct efx_nic *efx);
-extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
-extern s32 efx_farch_filter_insert(struct efx_nic *efx,
-                                  struct efx_filter_spec *spec, bool replace);
-extern int efx_farch_filter_remove_safe(struct efx_nic *efx,
-                                       enum efx_filter_priority priority,
-                                       u32 filter_id);
-extern int efx_farch_filter_get_safe(struct efx_nic *efx,
-                                    enum efx_filter_priority priority,
-                                    u32 filter_id, struct efx_filter_spec *);
-extern void efx_farch_filter_clear_rx(struct efx_nic *efx,
-                                     enum efx_filter_priority priority);
-extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
-                                         enum efx_filter_priority priority);
-extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
-extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
-                                      enum efx_filter_priority priority,
-                                      u32 *buf, u32 size);
+int efx_farch_filter_table_probe(struct efx_nic *efx);
+void efx_farch_filter_table_restore(struct efx_nic *efx);
+void efx_farch_filter_table_remove(struct efx_nic *efx);
+void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
+s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
+                           bool replace);
+int efx_farch_filter_remove_safe(struct efx_nic *efx,
+                                enum efx_filter_priority priority,
+                                u32 filter_id);
+int efx_farch_filter_get_safe(struct efx_nic *efx,
+                             enum efx_filter_priority priority, u32 filter_id,
+                             struct efx_filter_spec *);
+void efx_farch_filter_clear_rx(struct efx_nic *efx,
+                              enum efx_filter_priority priority);
+u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
+                                  enum efx_filter_priority priority);
+u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
+s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
+                               enum efx_filter_priority priority, u32 *buf,
+                               u32 size);
 #ifdef CONFIG_RFS_ACCEL
-extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
-                                      struct efx_filter_spec *spec);
-extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
-                                           unsigned int index);
+s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
+                               struct efx_filter_spec *spec);
+bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+                                    unsigned int index);
 #endif
-extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
+void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
 
-extern bool efx_nic_event_present(struct efx_channel *channel);
+bool efx_nic_event_present(struct efx_channel *channel);
 
 /* Some statistics are computed as A - B where A and B each increase
  * linearly with some hardware counter(s) and the counters are read
@@ -669,17 +713,17 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff)
 }
 
 /* Interrupts */
-extern int efx_nic_init_interrupt(struct efx_nic *efx);
-extern void efx_nic_irq_test_start(struct efx_nic *efx);
-extern void efx_nic_fini_interrupt(struct efx_nic *efx);
+int efx_nic_init_interrupt(struct efx_nic *efx);
+void efx_nic_irq_test_start(struct efx_nic *efx);
+void efx_nic_fini_interrupt(struct efx_nic *efx);
 
 /* Falcon/Siena interrupts */
-extern void efx_farch_irq_enable_master(struct efx_nic *efx);
-extern void efx_farch_irq_test_generate(struct efx_nic *efx);
-extern void efx_farch_irq_disable_master(struct efx_nic *efx);
-extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
-extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
-extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
+void efx_farch_irq_enable_master(struct efx_nic *efx);
+void efx_farch_irq_test_generate(struct efx_nic *efx);
+void efx_farch_irq_disable_master(struct efx_nic *efx);
+irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
+irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
+irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
 
 static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
 {
@@ -691,21 +735,21 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
 }
 
 /* Global Resources */
-extern int efx_nic_flush_queues(struct efx_nic *efx);
-extern void siena_prepare_flush(struct efx_nic *efx);
-extern int efx_farch_fini_dmaq(struct efx_nic *efx);
-extern void siena_finish_flush(struct efx_nic *efx);
-extern void falcon_start_nic_stats(struct efx_nic *efx);
-extern void falcon_stop_nic_stats(struct efx_nic *efx);
-extern int falcon_reset_xaui(struct efx_nic *efx);
-extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
-extern void efx_farch_init_common(struct efx_nic *efx);
-extern void efx_ef10_handle_drain_event(struct efx_nic *efx);
+int efx_nic_flush_queues(struct efx_nic *efx);
+void siena_prepare_flush(struct efx_nic *efx);
+int efx_farch_fini_dmaq(struct efx_nic *efx);
+void siena_finish_flush(struct efx_nic *efx);
+void falcon_start_nic_stats(struct efx_nic *efx);
+void falcon_stop_nic_stats(struct efx_nic *efx);
+int falcon_reset_xaui(struct efx_nic *efx);
+void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
+void efx_farch_init_common(struct efx_nic *efx);
+void efx_ef10_handle_drain_event(struct efx_nic *efx);
 static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
 {
        efx->type->rx_push_indir_table(efx);
 }
-extern void efx_farch_rx_push_indir_table(struct efx_nic *efx);
+void efx_farch_rx_push_indir_table(struct efx_nic *efx);
 
 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
                         unsigned int len, gfp_t gfp_flags);
@@ -716,24 +760,22 @@ struct efx_farch_register_test {
        unsigned address;
        efx_oword_t mask;
 };
-extern int efx_farch_test_registers(struct efx_nic *efx,
-                                   const struct efx_farch_register_test *regs,
-                                   size_t n_regs);
+int efx_farch_test_registers(struct efx_nic *efx,
+                            const struct efx_farch_register_test *regs,
+                            size_t n_regs);
 
-extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
-extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
+size_t efx_nic_get_regs_len(struct efx_nic *efx);
+void efx_nic_get_regs(struct efx_nic *efx, void *buf);
 
-extern size_t
-efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
-                      const unsigned long *mask, u8 *names);
-extern void
-efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
-                    const unsigned long *mask,
-                    u64 *stats, const void *dma_buf, bool accumulate);
+size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+                             const unsigned long *mask, u8 *names);
+void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+                         const unsigned long *mask, u64 *stats,
+                         const void *dma_buf, bool accumulate);
 
 #define EFX_MAX_FLUSH_TIME 5000
 
-extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
-                                    efx_qword_t *event);
+void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
+                             efx_qword_t *event);
 
 #endif /* EFX_NIC_H */
index 45eeb70751562651b3a361e7b61f4ea4469b381b..803bf445c08e22df119c8274816cb26cf0b51a2b 100644 (file)
@@ -15,7 +15,7 @@
  */
 extern const struct efx_phy_operations falcon_sfx7101_phy_ops;
 
-extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
 
 /****************************************************************************
  * AMCC/Quake QT202x PHYs
@@ -34,7 +34,7 @@ extern const struct efx_phy_operations falcon_qt202x_phy_ops;
 #define QUAKE_LED_TXLINK       (0)
 #define QUAKE_LED_RXLINK       (8)
 
-extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
+void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
 
 /****************************************************************************
 * Transwitch CX4 retimer
@@ -44,7 +44,7 @@ extern const struct efx_phy_operations falcon_txc_phy_ops;
 #define TXC_GPIO_DIR_INPUT     0
 #define TXC_GPIO_DIR_OUTPUT    1
 
-extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
-extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
+void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
+void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
 
 #endif
index 4a596725023f07284ef66b3bb2cc337e92a0d69e..8f09e686fc2392a80f56610c78a61c6374b4a410 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/in.h>
 #include <linux/slab.h>
 #include <linux/ip.h>
+#include <linux/ipv6.h>
 #include <linux/tcp.h>
 #include <linux/udp.h>
 #include <linux/prefetch.h>
@@ -818,44 +819,70 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
        struct efx_nic *efx = netdev_priv(net_dev);
        struct efx_channel *channel;
        struct efx_filter_spec spec;
-       const struct iphdr *ip;
        const __be16 *ports;
+       __be16 ether_type;
        int nhoff;
        int rc;
 
-       nhoff = skb_network_offset(skb);
+       /* The core RPS/RFS code has already parsed and validated
+        * VLAN, IP and transport headers.  We assume they are in the
+        * header area.
+        */
 
        if (skb->protocol == htons(ETH_P_8021Q)) {
-               EFX_BUG_ON_PARANOID(skb_headlen(skb) <
-                                   nhoff + sizeof(struct vlan_hdr));
-               if (((const struct vlan_hdr *)skb->data + nhoff)->
-                   h_vlan_encapsulated_proto != htons(ETH_P_IP))
-                       return -EPROTONOSUPPORT;
+               const struct vlan_hdr *vh =
+                       (const struct vlan_hdr *)skb->data;
 
-               /* This is IP over 802.1q VLAN.  We can't filter on the
-                * IP 5-tuple and the vlan together, so just strip the
-                * vlan header and filter on the IP part.
+               /* We can't filter on the IP 5-tuple and the vlan
+                * together, so just strip the vlan header and filter
+                * on the IP part.
                 */
-               nhoff += sizeof(struct vlan_hdr);
-       } else if (skb->protocol != htons(ETH_P_IP)) {
-               return -EPROTONOSUPPORT;
+               EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
+               ether_type = vh->h_vlan_encapsulated_proto;
+               nhoff = sizeof(struct vlan_hdr);
+       } else {
+               ether_type = skb->protocol;
+               nhoff = 0;
        }
 
-       /* RFS must validate the IP header length before calling us */
-       EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
-       ip = (const struct iphdr *)(skb->data + nhoff);
-       if (ip_is_fragment(ip))
+       if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
                return -EPROTONOSUPPORT;
-       EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
-       ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
 
        efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
                           efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
                           rxq_index);
-       rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
-                                     ip->daddr, ports[1], ip->saddr, ports[0]);
-       if (rc)
-               return rc;
+       spec.match_flags =
+               EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+               EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+               EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+       spec.ether_type = ether_type;
+
+       if (ether_type == htons(ETH_P_IP)) {
+               const struct iphdr *ip =
+                       (const struct iphdr *)(skb->data + nhoff);
+
+               EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
+               if (ip_is_fragment(ip))
+                       return -EPROTONOSUPPORT;
+               spec.ip_proto = ip->protocol;
+               spec.rem_host[0] = ip->saddr;
+               spec.loc_host[0] = ip->daddr;
+               EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
+               ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+       } else {
+               const struct ipv6hdr *ip6 =
+                       (const struct ipv6hdr *)(skb->data + nhoff);
+
+               EFX_BUG_ON_PARANOID(skb_headlen(skb) <
+                                   nhoff + sizeof(*ip6) + 4);
+               spec.ip_proto = ip6->nexthdr;
+               memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
+               memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
+               ports = (const __be16 *)(ip6 + 1);
+       }
+
+       spec.rem_port = ports[0];
+       spec.loc_port = ports[1];
 
        rc = efx->type->filter_rfs_insert(efx, &spec);
        if (rc < 0)
@@ -866,11 +893,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
        channel = efx_get_channel(efx, skb_get_rx_queue(skb));
        ++channel->rfs_filters_added;
 
-       netif_info(efx, rx_status, efx->net_dev,
-                  "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
-                  (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
-                  &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
-                  rxq_index, flow_id, rc);
+       if (ether_type == htons(ETH_P_IP))
+               netif_info(efx, rx_status, efx->net_dev,
+                          "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+                          (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+                          spec.rem_host, ntohs(ports[0]), spec.loc_host,
+                          ntohs(ports[1]), rxq_index, flow_id, rc);
+       else
+               netif_info(efx, rx_status, efx->net_dev,
+                          "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
+                          (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+                          spec.rem_host, ntohs(ports[0]), spec.loc_host,
+                          ntohs(ports[1]), rxq_index, flow_id, rc);
 
        return rc;
 }
index 87698ae0bf75f7dab78ca093ea2c3f3b3f1774d5..a2f4a06ffa4e4d8a6cb6829b45b8397ea10d4283 100644 (file)
@@ -43,13 +43,12 @@ struct efx_self_tests {
        struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
 };
 
-extern void efx_loopback_rx_packet(struct efx_nic *efx,
-                                  const char *buf_ptr, int pkt_len);
-extern int efx_selftest(struct efx_nic *efx,
-                       struct efx_self_tests *tests,
-                       unsigned flags);
-extern void efx_selftest_async_start(struct efx_nic *efx);
-extern void efx_selftest_async_cancel(struct efx_nic *efx);
-extern void efx_selftest_async_work(struct work_struct *data);
+void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
+                           int pkt_len);
+int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
+                unsigned flags);
+void efx_selftest_async_start(struct efx_nic *efx);
+void efx_selftest_async_cancel(struct efx_nic *efx);
+void efx_selftest_async_work(struct work_struct *data);
 
 #endif /* EFX_SELFTEST_H */
index 2ac91c5b5eeae4e7a210e5b3693c9f01a04c9f9c..282692c48e6b6de94949e814ecdadf848186edc6 100644 (file)
 #include <net/ipv6.h>
 #include <linux/if_ether.h>
 #include <linux/highmem.h>
+#include <linux/cache.h>
 #include "net_driver.h"
 #include "efx.h"
+#include "io.h"
 #include "nic.h"
 #include "workarounds.h"
+#include "ef10_regs.h"
+
+#ifdef EFX_USE_PIO
+
+#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
+#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
+unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
+
+#endif /* EFX_USE_PIO */
+
+static inline unsigned int
+efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
+{
+       return tx_queue->insert_count & tx_queue->ptr_mask;
+}
+
+static inline struct efx_tx_buffer *
+__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+{
+       return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
+}
+
+static inline struct efx_tx_buffer *
+efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+{
+       struct efx_tx_buffer *buffer =
+               __efx_tx_queue_get_insert_buffer(tx_queue);
+
+       EFX_BUG_ON_PARANOID(buffer->len);
+       EFX_BUG_ON_PARANOID(buffer->flags);
+       EFX_BUG_ON_PARANOID(buffer->unmap_len);
+
+       return buffer;
+}
 
 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
                               struct efx_tx_buffer *buffer,
@@ -83,8 +119,10 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
         */
        unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
 
-       /* Possibly one more per segment for the alignment workaround */
-       if (EFX_WORKAROUND_5391(efx))
+       /* Possibly one more per segment for the alignment workaround,
+        * or for option descriptors
+        */
+       if (EFX_WORKAROUND_5391(efx) || efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
                max_descs += EFX_TSO_MAX_SEGS;
 
        /* Possibly more for PCIe page boundaries within input fragments */
@@ -145,6 +183,145 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
        }
 }
 
+#ifdef EFX_USE_PIO
+
+struct efx_short_copy_buffer {
+       int used;
+       u8 buf[L1_CACHE_BYTES];
+};
+
+/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
+ * Advances piobuf pointer. Leaves additional data in the copy buffer.
+ */
+static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
+                                   u8 *data, int len,
+                                   struct efx_short_copy_buffer *copy_buf)
+{
+       int block_len = len & ~(sizeof(copy_buf->buf) - 1);
+
+       memcpy_toio(*piobuf, data, block_len);
+       *piobuf += block_len;
+       len -= block_len;
+
+       if (len) {
+               data += block_len;
+               BUG_ON(copy_buf->used);
+               BUG_ON(len > sizeof(copy_buf->buf));
+               memcpy(copy_buf->buf, data, len);
+               copy_buf->used = len;
+       }
+}
+
+/* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
+ * Advances piobuf pointer. Leaves additional data in the copy buffer.
+ */
+static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
+                                      u8 *data, int len,
+                                      struct efx_short_copy_buffer *copy_buf)
+{
+       if (copy_buf->used) {
+               /* if the copy buffer is partially full, fill it up and write */
+               int copy_to_buf =
+                       min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
+
+               memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
+               copy_buf->used += copy_to_buf;
+
+               /* if we didn't fill it up then we're done for now */
+               if (copy_buf->used < sizeof(copy_buf->buf))
+                       return;
+
+               memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+               *piobuf += sizeof(copy_buf->buf);
+               data += copy_to_buf;
+               len -= copy_to_buf;
+               copy_buf->used = 0;
+       }
+
+       efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
+}
+
+static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
+                                 struct efx_short_copy_buffer *copy_buf)
+{
+       /* if there's anything in it, write the whole buffer, including junk */
+       if (copy_buf->used)
+               memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+}
+
+/* Traverse skb structure and copy fragments in to PIO buffer.
+ * Advances piobuf pointer.
+ */
+static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
+                                    u8 __iomem **piobuf,
+                                    struct efx_short_copy_buffer *copy_buf)
+{
+       int i;
+
+       efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
+                               copy_buf);
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+               skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+               u8 *vaddr;
+
+               vaddr = kmap_atomic(skb_frag_page(f));
+
+               efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
+                                          skb_frag_size(f), copy_buf);
+               kunmap_atomic(vaddr);
+       }
+
+       EFX_BUG_ON_PARANOID(skb_shinfo(skb)->frag_list);
+}
+
+static struct efx_tx_buffer *
+efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+       struct efx_tx_buffer *buffer =
+               efx_tx_queue_get_insert_buffer(tx_queue);
+       u8 __iomem *piobuf = tx_queue->piobuf;
+
+       /* Copy to PIO buffer. Ensure the writes are padded to the end
+        * of a cache line, as this is required for write-combining to be
+        * effective on at least x86.
+        */
+
+       if (skb_shinfo(skb)->nr_frags) {
+               /* The size of the copy buffer will ensure all writes
+                * are the size of a cache line.
+                */
+               struct efx_short_copy_buffer copy_buf;
+
+               copy_buf.used = 0;
+
+               efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
+                                        &piobuf, &copy_buf);
+               efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
+       } else {
+               /* Pad the write to the size of a cache line.
+                * We can do this because we know the skb_shared_info sruct is
+                * after the source, and the destination buffer is big enough.
+                */
+               BUILD_BUG_ON(L1_CACHE_BYTES >
+                            SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+               memcpy_toio(tx_queue->piobuf, skb->data,
+                           ALIGN(skb->len, L1_CACHE_BYTES));
+       }
+
+       EFX_POPULATE_QWORD_5(buffer->option,
+                            ESF_DZ_TX_DESC_IS_OPT, 1,
+                            ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
+                            ESF_DZ_TX_PIO_CONT, 0,
+                            ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
+                            ESF_DZ_TX_PIO_BUF_ADDR,
+                            tx_queue->piobuf_offset);
+       ++tx_queue->pio_packets;
+       ++tx_queue->insert_count;
+       return buffer;
+}
+#endif /* EFX_USE_PIO */
+
 /*
  * Add a socket buffer to a TX queue
  *
@@ -167,7 +344,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
        struct device *dma_dev = &efx->pci_dev->dev;
        struct efx_tx_buffer *buffer;
        skb_frag_t *fragment;
-       unsigned int len, unmap_len = 0, insert_ptr;
+       unsigned int len, unmap_len = 0;
        dma_addr_t dma_addr, unmap_addr = 0;
        unsigned int dma_len;
        unsigned short dma_flags;
@@ -189,6 +366,17 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
                        return NETDEV_TX_OK;
        }
 
+       /* Consider using PIO for short packets */
+#ifdef EFX_USE_PIO
+       if (skb->len <= efx_piobuf_size && tx_queue->piobuf &&
+           efx_nic_tx_is_empty(tx_queue) &&
+           efx_nic_tx_is_empty(efx_tx_queue_partner(tx_queue))) {
+               buffer = efx_enqueue_skb_pio(tx_queue, skb);
+               dma_flags = EFX_TX_BUF_OPTION;
+               goto finish_packet;
+       }
+#endif
+
        /* Map for DMA.  Use dma_map_single rather than dma_map_page
         * since this is more efficient on machines with sparse
         * memory.
@@ -208,11 +396,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
 
                /* Add to TX queue, splitting across DMA boundaries */
                do {
-                       insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
-                       buffer = &tx_queue->buffer[insert_ptr];
-                       EFX_BUG_ON_PARANOID(buffer->flags);
-                       EFX_BUG_ON_PARANOID(buffer->len);
-                       EFX_BUG_ON_PARANOID(buffer->unmap_len);
+                       buffer = efx_tx_queue_get_insert_buffer(tx_queue);
 
                        dma_len = efx_max_tx_len(efx, dma_addr);
                        if (likely(dma_len >= len))
@@ -245,6 +429,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
        }
 
        /* Transfer ownership of the skb to the final buffer */
+finish_packet:
        buffer->skb = skb;
        buffer->flags = EFX_TX_BUF_SKB | dma_flags;
 
@@ -270,8 +455,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
        while (tx_queue->insert_count != tx_queue->write_count) {
                unsigned int pkts_compl = 0, bytes_compl = 0;
                --tx_queue->insert_count;
-               insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
-               buffer = &tx_queue->buffer[insert_ptr];
+               buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
                efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
        }
 
@@ -628,6 +812,9 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
  * @tcp_off: Offset of TCP header
  * @header_len: Number of bytes of header
  * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
+ * @header_dma_addr: Header DMA address, when using option descriptors
+ * @header_unmap_len: Header DMA mapped length, or 0 if not using option
+ *     descriptors
  *
  * The state used during segmentation.  It is put into this data structure
  * just to make it easy to pass into inline functions.
@@ -636,7 +823,7 @@ struct tso_state {
        /* Output position */
        unsigned out_len;
        unsigned seqnum;
-       unsigned ipv4_id;
+       u16 ipv4_id;
        unsigned packet_space;
 
        /* Input position */
@@ -651,6 +838,8 @@ struct tso_state {
        unsigned int tcp_off;
        unsigned header_len;
        unsigned int ip_base_len;
+       dma_addr_t header_dma_addr;
+       unsigned int header_unmap_len;
 };
 
 
@@ -737,23 +926,18 @@ static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
 {
        struct efx_tx_buffer *buffer;
        struct efx_nic *efx = tx_queue->efx;
-       unsigned dma_len, insert_ptr;
+       unsigned dma_len;
 
        EFX_BUG_ON_PARANOID(len <= 0);
 
        while (1) {
-               insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
-               buffer = &tx_queue->buffer[insert_ptr];
+               buffer = efx_tx_queue_get_insert_buffer(tx_queue);
                ++tx_queue->insert_count;
 
                EFX_BUG_ON_PARANOID(tx_queue->insert_count -
                                    tx_queue->read_count >=
                                    efx->txq_entries);
 
-               EFX_BUG_ON_PARANOID(buffer->len);
-               EFX_BUG_ON_PARANOID(buffer->unmap_len);
-               EFX_BUG_ON_PARANOID(buffer->flags);
-
                buffer->dma_addr = dma_addr;
 
                dma_len = efx_max_tx_len(efx, dma_addr);
@@ -814,19 +998,27 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
        /* Work backwards until we hit the original insert pointer value */
        while (tx_queue->insert_count != tx_queue->write_count) {
                --tx_queue->insert_count;
-               buffer = &tx_queue->buffer[tx_queue->insert_count &
-                                          tx_queue->ptr_mask];
+               buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
                efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
        }
 }
 
 
 /* Parse the SKB header and initialise state. */
-static void tso_start(struct tso_state *st, const struct sk_buff *skb)
+static int tso_start(struct tso_state *st, struct efx_nic *efx,
+                    const struct sk_buff *skb)
 {
+       bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+       struct device *dma_dev = &efx->pci_dev->dev;
+       unsigned int header_len, in_len;
+       dma_addr_t dma_addr;
+
        st->ip_off = skb_network_header(skb) - skb->data;
        st->tcp_off = skb_transport_header(skb) - skb->data;
-       st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
+       header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
+       in_len = skb_headlen(skb) - header_len;
+       st->header_len = header_len;
+       st->in_len = in_len;
        if (st->protocol == htons(ETH_P_IP)) {
                st->ip_base_len = st->header_len - st->ip_off;
                st->ipv4_id = ntohs(ip_hdr(skb)->id);
@@ -840,9 +1032,34 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
        EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
        EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
 
-       st->out_len = skb->len - st->header_len;
-       st->unmap_len = 0;
-       st->dma_flags = 0;
+       st->out_len = skb->len - header_len;
+
+       if (!use_options) {
+               st->header_unmap_len = 0;
+
+               if (likely(in_len == 0)) {
+                       st->dma_flags = 0;
+                       st->unmap_len = 0;
+                       return 0;
+               }
+
+               dma_addr = dma_map_single(dma_dev, skb->data + header_len,
+                                         in_len, DMA_TO_DEVICE);
+               st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
+               st->dma_addr = dma_addr;
+               st->unmap_addr = dma_addr;
+               st->unmap_len = in_len;
+       } else {
+               dma_addr = dma_map_single(dma_dev, skb->data,
+                                         skb_headlen(skb), DMA_TO_DEVICE);
+               st->header_dma_addr = dma_addr;
+               st->header_unmap_len = skb_headlen(skb);
+               st->dma_flags = 0;
+               st->dma_addr = dma_addr + header_len;
+               st->unmap_len = 0;
+       }
+
+       return unlikely(dma_mapping_error(dma_dev, dma_addr)) ? -ENOMEM : 0;
 }
 
 static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
@@ -860,24 +1077,6 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
        return -ENOMEM;
 }
 
-static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
-                                const struct sk_buff *skb)
-{
-       int hl = st->header_len;
-       int len = skb_headlen(skb) - hl;
-
-       st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
-                                       len, DMA_TO_DEVICE);
-       if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
-               st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
-               st->unmap_len = len;
-               st->in_len = len;
-               st->dma_addr = st->unmap_addr;
-               return 0;
-       }
-       return -ENOMEM;
-}
-
 
 /**
  * tso_fill_packet_with_fragment - form descriptors for the current fragment
@@ -944,55 +1143,97 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
                                struct tso_state *st)
 {
        struct efx_tx_buffer *buffer =
-               &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
-       struct tcphdr *tsoh_th;
-       unsigned ip_length;
-       u8 *header;
-       int rc;
+               efx_tx_queue_get_insert_buffer(tx_queue);
+       bool is_last = st->out_len <= skb_shinfo(skb)->gso_size;
+       u8 tcp_flags_clear;
 
-       /* Allocate and insert a DMA-mapped header buffer. */
-       header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
-       if (!header)
-               return -ENOMEM;
-
-       tsoh_th = (struct tcphdr *)(header + st->tcp_off);
-
-       /* Copy and update the headers. */
-       memcpy(header, skb->data, st->header_len);
-
-       tsoh_th->seq = htonl(st->seqnum);
-       st->seqnum += skb_shinfo(skb)->gso_size;
-       if (st->out_len > skb_shinfo(skb)->gso_size) {
-               /* This packet will not finish the TSO burst. */
+       if (!is_last) {
                st->packet_space = skb_shinfo(skb)->gso_size;
-               tsoh_th->fin = 0;
-               tsoh_th->psh = 0;
+               tcp_flags_clear = 0x09; /* mask out FIN and PSH */
        } else {
-               /* This packet will be the last in the TSO burst. */
                st->packet_space = st->out_len;
-               tsoh_th->fin = tcp_hdr(skb)->fin;
-               tsoh_th->psh = tcp_hdr(skb)->psh;
+               tcp_flags_clear = 0x00;
        }
-       ip_length = st->ip_base_len + st->packet_space;
 
-       if (st->protocol == htons(ETH_P_IP)) {
-               struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off);
+       if (!st->header_unmap_len) {
+               /* Allocate and insert a DMA-mapped header buffer. */
+               struct tcphdr *tsoh_th;
+               unsigned ip_length;
+               u8 *header;
+               int rc;
+
+               header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
+               if (!header)
+                       return -ENOMEM;
 
-               tsoh_iph->tot_len = htons(ip_length);
+               tsoh_th = (struct tcphdr *)(header + st->tcp_off);
+
+               /* Copy and update the headers. */
+               memcpy(header, skb->data, st->header_len);
+
+               tsoh_th->seq = htonl(st->seqnum);
+               ((u8 *)tsoh_th)[13] &= ~tcp_flags_clear;
+
+               ip_length = st->ip_base_len + st->packet_space;
+
+               if (st->protocol == htons(ETH_P_IP)) {
+                       struct iphdr *tsoh_iph =
+                               (struct iphdr *)(header + st->ip_off);
+
+                       tsoh_iph->tot_len = htons(ip_length);
+                       tsoh_iph->id = htons(st->ipv4_id);
+               } else {
+                       struct ipv6hdr *tsoh_iph =
+                               (struct ipv6hdr *)(header + st->ip_off);
+
+                       tsoh_iph->payload_len = htons(ip_length);
+               }
 
-               /* Linux leaves suitable gaps in the IP ID space for us to fill. */
-               tsoh_iph->id = htons(st->ipv4_id);
-               st->ipv4_id++;
+               rc = efx_tso_put_header(tx_queue, buffer, header);
+               if (unlikely(rc))
+                       return rc;
        } else {
-               struct ipv6hdr *tsoh_iph =
-                       (struct ipv6hdr *)(header + st->ip_off);
+               /* Send the original headers with a TSO option descriptor
+                * in front
+                */
+               u8 tcp_flags = ((u8 *)tcp_hdr(skb))[13] & ~tcp_flags_clear;
 
-               tsoh_iph->payload_len = htons(ip_length);
+               buffer->flags = EFX_TX_BUF_OPTION;
+               buffer->len = 0;
+               buffer->unmap_len = 0;
+               EFX_POPULATE_QWORD_5(buffer->option,
+                                    ESF_DZ_TX_DESC_IS_OPT, 1,
+                                    ESF_DZ_TX_OPTION_TYPE,
+                                    ESE_DZ_TX_OPTION_DESC_TSO,
+                                    ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
+                                    ESF_DZ_TX_TSO_IP_ID, st->ipv4_id,
+                                    ESF_DZ_TX_TSO_TCP_SEQNO, st->seqnum);
+               ++tx_queue->insert_count;
+
+               /* We mapped the headers in tso_start().  Unmap them
+                * when the last segment is completed.
+                */
+               buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+               buffer->dma_addr = st->header_dma_addr;
+               buffer->len = st->header_len;
+               if (is_last) {
+                       buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_MAP_SINGLE;
+                       buffer->unmap_len = st->header_unmap_len;
+                       /* Ensure we only unmap them once in case of a
+                        * later DMA mapping error and rollback
+                        */
+                       st->header_unmap_len = 0;
+               } else {
+                       buffer->flags = EFX_TX_BUF_CONT;
+                       buffer->unmap_len = 0;
+               }
+               ++tx_queue->insert_count;
        }
 
-       rc = efx_tso_put_header(tx_queue, buffer, header);
-       if (unlikely(rc))
-               return rc;
+       st->seqnum += skb_shinfo(skb)->gso_size;
+
+       /* Linux leaves suitable gaps in the IP ID space for us to fill. */
+       ++st->ipv4_id;
 
        ++tx_queue->tso_packets;
 
@@ -1023,12 +1264,11 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
 
        EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
 
-       tso_start(&state, skb);
+       rc = tso_start(&state, efx, skb);
+       if (rc)
+               goto mem_err;
 
-       /* Assume that skb header area contains exactly the headers, and
-        * all payload is in the frag list.
-        */
-       if (skb_headlen(skb) == state.header_len) {
+       if (likely(state.in_len == 0)) {
                /* Grab the first payload fragment. */
                EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1);
                frag_i = 0;
@@ -1037,9 +1277,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
                if (rc)
                        goto mem_err;
        } else {
-               rc = tso_get_head_fragment(&state, efx, skb);
-               if (rc)
-                       goto mem_err;
+               /* Payload starts in the header area. */
                frag_i = -1;
        }
 
@@ -1091,6 +1329,11 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
                                       state.unmap_len, DMA_TO_DEVICE);
        }
 
+       /* Free the header DMA mapping, if using option descriptors */
+       if (state.header_unmap_len)
+               dma_unmap_single(&efx->pci_dev->dev, state.header_dma_addr,
+                                state.header_unmap_len, DMA_TO_DEVICE);
+
        efx_enqueue_unwind(tx_queue);
        return NETDEV_TX_OK;
 }
index 770036bc2d87c9fde8335241a43b52a5df581a6f..513ed8b1ba582add37602ab4c93faaf462c7f3a3 100644 (file)
@@ -839,7 +839,7 @@ static int meth_probe(struct platform_device *pdev)
        dev->watchdog_timeo     = timeout;
        dev->irq                = MACE_ETHERNET_IRQ;
        dev->base_addr          = (unsigned long)&mace->eth;
-       memcpy(dev->dev_addr, o2meth_eaddr, 6);
+       memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN);
 
        priv = netdev_priv(dev);
        spin_lock_init(&priv->meth_lock);
index ee18e6f7b4fe19d9b8e3bbec69fd5e2cd098cb81..acbbe48a519c0c673ff8de55c31b1690c3976cb5 100644 (file)
@@ -1921,7 +1921,6 @@ static void sis190_remove_one(struct pci_dev *pdev)
        cancel_work_sync(&tp->phy_task);
        unregister_netdev(dev);
        sis190_release_board(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static struct pci_driver sis190_pci_driver = {
index 03b256af7ed5f8c68a75a47cea63cf8f6bc7bb2e..8c5c24a16f8a22e2cd7e78ca22ee9c9e253d38b9 100644 (file)
@@ -1535,7 +1535,6 @@ static void epic_remove_one(struct pci_dev *pdev)
        pci_release_regions(pdev);
        free_netdev(dev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        /* pci_power_off(pdev, -1); */
 }
 
index e85c2e7e82468e8d53d4e5a14f692d9ac6a34c5c..afd9873e9bdb4a527666681e2b55da0504e43ea5 100644 (file)
@@ -95,14 +95,6 @@ static const char version[] =
 #define USE_32_BIT 1
 #endif
 
-#if defined(__H8300H__) || defined(__H8300S__)
-#define NO_AUTOPROBE
-#undef insl
-#undef outsl
-#define insl(a,b,l)  io_insl_noswap(a,b,l)
-#define outsl(a,b,l) io_outsl_noswap(a,b,l)
-#endif
-
 /*
  .the SMC9194 can be at any of the following port addresses.  To change,
  .for a slightly different card, you can add it to the array.  Keep in
@@ -114,12 +106,6 @@ struct devlist {
        unsigned int irq;
 };
 
-#if defined(CONFIG_H8S_EDOSK2674)
-static struct devlist smc_devlist[] __initdata = {
-       {.port = 0xf80000, .irq = 16},
-       {.port = 0,        .irq = 0 },
-};
-#else
 static struct devlist smc_devlist[] __initdata = {
        {.port = 0x200, .irq = 0},
        {.port = 0x220, .irq = 0},
@@ -139,7 +125,6 @@ static struct devlist smc_devlist[] __initdata = {
        {.port = 0x3E0, .irq = 0},
        {.port = 0,     .irq = 0},
 };
-#endif
 /*
  . Wait time for memory to be free.  This probably shouldn't be
  . tuned that much, as waiting for this means nothing else happens
@@ -651,11 +636,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
 #ifdef USE_32_BIT
        if ( length & 0x2  ) {
                outsl(ioaddr + DATA_1, buf,  length >> 2 );
-#if !defined(__H8300H__) && !defined(__H8300S__)
                outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1);
-#else
-               ctrl_outw( *((word *)(buf + (length & 0xFFFFFFFC))),ioaddr +DATA_1);
-#endif
        }
        else
                outsl(ioaddr + DATA_1, buf,  length >> 2 );
@@ -899,7 +880,6 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
                retval = -ENODEV;
                goto err_out;
        }
-#if !defined(CONFIG_H8S_EDOSK2674)
        /* well, we've already written once, so hopefully another time won't
           hurt.  This time, I need to switch the bank register to bank 1,
           so I can access the base address register */
@@ -914,10 +894,6 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
                retval = -ENODEV;
                goto err_out;
        }
-#else
-       (void)base_address_register; /* Warning suppression */
-#endif
-
 
        /*  check if the revision register is something that I recognize.
            These might need to be added to later, as future revisions
index 5730fe2445a6c1acf5dfa5cd8767d1b1bb749734..98eedb90cdc3c0220c6f8f4d26d2b7f945cf9624 100644 (file)
@@ -1124,8 +1124,7 @@ static const char * chip_ids[ 16 ] =  {
                        void __iomem *__ioaddr = ioaddr;                \
                        if (__len >= 2 && (unsigned long)__ptr & 2) {   \
                                __len -= 2;                             \
-                               SMC_outw(*(u16 *)__ptr, ioaddr,         \
-                                       DATA_REG(lp));          \
+                               SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
                                __ptr += 2;                             \
                        }                                               \
                        if (SMC_CAN_USE_DATACS && lp->datacs)           \
@@ -1133,8 +1132,7 @@ static const char * chip_ids[ 16 ] =  {
                        SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \
                        if (__len & 2) {                                \
                                __ptr += (__len & ~3);                  \
-                               SMC_outw(*((u16 *)__ptr), ioaddr,       \
-                                        DATA_REG(lp));         \
+                               SMC_outsw(ioaddr, DATA_REG(lp), __ptr, 1); \
                        }                                               \
                } else if (SMC_16BIT(lp))                               \
                        SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1);   \
index 5fdbc2686eb3a2f6dda0a9f1f4339e641011ffb4..01f8459c321393342def894dbce875322eee3402 100644 (file)
@@ -2502,7 +2502,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
                SMSC_TRACE(pdata, probe,
                           "MAC Address is specified by configuration");
        } else if (is_valid_ether_addr(pdata->config.mac)) {
-               memcpy(dev->dev_addr, pdata->config.mac, 6);
+               memcpy(dev->dev_addr, pdata->config.mac, ETH_ALEN);
                SMSC_TRACE(pdata, probe,
                           "MAC Address specified by platform data");
        } else {
index 5f9e79f7f2df52f8b6a3c1955bd7e99ba4bbb122..e55e3365a30658c793f751c11b05094256bbdf50 100644 (file)
@@ -1707,8 +1707,6 @@ static void smsc9420_remove(struct pci_dev *pdev)
        if (!dev)
                return;
 
-       pci_set_drvdata(pdev, NULL);
-
        pd = netdev_priv(dev);
        unregister_netdev(dev);
 
index 7eb8babed2cbe38f822aab596f2451d75112c9a4..fc94f202a43e40f24247019fb6c5906da8ac20be 100644 (file)
@@ -451,14 +451,14 @@ struct mac_device_info {
 struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr);
 struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
 
-extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
-                               unsigned int high, unsigned int low);
-extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
-                               unsigned int high, unsigned int low);
+void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+                        unsigned int high, unsigned int low);
+void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+                        unsigned int high, unsigned int low);
 
-extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
+void stmmac_set_mac(void __iomem *ioaddr, bool enable);
 
-extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
+void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
 extern const struct stmmac_ring_mode_ops ring_mode_ops;
 extern const struct stmmac_chain_mode_ops chain_mode_ops;
 
index 8e5662ce488bd80d2a0a7457c25fbf49ca523bb1..def266da55dbe617e8be83f7a3d630f8230b9c5c 100644 (file)
 #define DMA_STATUS_TI  0x00000001      /* Transmit Interrupt */
 #define DMA_CONTROL_FTF                0x00100000      /* Flush transmit FIFO */
 
-extern void dwmac_enable_dma_transmission(void __iomem *ioaddr);
-extern void dwmac_enable_dma_irq(void __iomem *ioaddr);
-extern void dwmac_disable_dma_irq(void __iomem *ioaddr);
-extern void dwmac_dma_start_tx(void __iomem *ioaddr);
-extern void dwmac_dma_stop_tx(void __iomem *ioaddr);
-extern void dwmac_dma_start_rx(void __iomem *ioaddr);
-extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
-extern int dwmac_dma_interrupt(void __iomem *ioaddr,
-                              struct stmmac_extra_stats *x);
+void dwmac_enable_dma_transmission(void __iomem *ioaddr);
+void dwmac_enable_dma_irq(void __iomem *ioaddr);
+void dwmac_disable_dma_irq(void __iomem *ioaddr);
+void dwmac_dma_start_tx(void __iomem *ioaddr);
+void dwmac_dma_stop_tx(void __iomem *ioaddr);
+void dwmac_dma_start_rx(void __iomem *ioaddr);
+void dwmac_dma_stop_rx(void __iomem *ioaddr);
+int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x);
 
 #endif /* __DWMAC_DMA_H__ */
index 48ec001566b5540421c5e5a25fe0b18db3c25e58..8607488cbcfcfaea7bc70510f0a3f601c5dade7d 100644 (file)
@@ -128,8 +128,8 @@ struct stmmac_counters {
        unsigned int mmc_rx_icmp_err_octets;
 };
 
-extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
-extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
-extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
+void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
+void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
+void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
 
 #endif /* __MMC_H__ */
index f16a9bdf45bb6738c7fca4063cf96307d6303462..22f89ffdfd95fc3e8ca530808e4d6d0dfdcfeaf2 100644 (file)
@@ -110,14 +110,14 @@ struct stmmac_priv {
 
 extern int phyaddr;
 
-extern int stmmac_mdio_unregister(struct net_device *ndev);
-extern int stmmac_mdio_register(struct net_device *ndev);
-extern void stmmac_set_ethtool_ops(struct net_device *netdev);
+int stmmac_mdio_unregister(struct net_device *ndev);
+int stmmac_mdio_register(struct net_device *ndev);
+void stmmac_set_ethtool_ops(struct net_device *netdev);
 extern const struct stmmac_desc_ops enh_desc_ops;
 extern const struct stmmac_desc_ops ndesc_ops;
 extern const struct stmmac_hwtimestamp stmmac_ptp;
-extern int stmmac_ptp_register(struct stmmac_priv *priv);
-extern void stmmac_ptp_unregister(struct stmmac_priv *priv);
+int stmmac_ptp_register(struct stmmac_priv *priv);
+void stmmac_ptp_unregister(struct stmmac_priv *priv);
 int stmmac_freeze(struct net_device *ndev);
 int stmmac_restore(struct net_device *ndev);
 int stmmac_resume(struct net_device *ndev);
index 023b7c29cb2fc3408ff3e3a00c03b9656ddd2ab4..644d80ece067717f3dd296d6ed5f54d9c90a7134 100644 (file)
@@ -138,7 +138,6 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
 
        stmmac_dvr_remove(ndev);
 
-       pci_set_drvdata(pdev, NULL);
        pci_iounmap(pdev, priv->ioaddr);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
index 759441b29e535b4c7b1c0f94e6f7520fc628fbdd..b4d50d74ba183b29156bce538af3c5c03369d9ea 100644 (file)
@@ -3354,7 +3354,7 @@ use_random_mac_addr:
 #if defined(CONFIG_SPARC)
        addr = of_get_property(cp->of_node, "local-mac-address", NULL);
        if (addr != NULL) {
-               memcpy(dev_addr, addr, 6);
+               memcpy(dev_addr, addr, ETH_ALEN);
                goto done;
        }
 #endif
@@ -5168,7 +5168,6 @@ err_out_free_netdev:
 
 err_out_disable_pdev:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        return -ENODEV;
 }
 
@@ -5206,7 +5205,6 @@ static void cas_remove_one(struct pci_dev *pdev)
        free_netdev(dev);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 #ifdef CONFIG_PM
index f28460ce24a7134a371dd148887cc5466e934f8b..388540fcb9773dab8973094bb4ec76f74fe4f73d 100644 (file)
@@ -9875,7 +9875,6 @@ err_out_free_res:
 
 err_out_disable_pdev:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 
        return err;
 }
@@ -9900,7 +9899,6 @@ static void niu_pci_remove_one(struct pci_dev *pdev)
                free_netdev(dev);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
-               pci_set_drvdata(pdev, NULL);
        }
 }
 
index e62df2b81302bd32881ca22daef127e04985679e..b5655b79bd3b25bcd2e90376fd18e67fb78e1f93 100644 (file)
@@ -2779,7 +2779,7 @@ static int gem_get_device_address(struct gem *gp)
                return -1;
 #endif
        }
-       memcpy(dev->dev_addr, addr, 6);
+       memcpy(dev->dev_addr, addr, ETH_ALEN);
 #else
        get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
 #endif
@@ -2806,8 +2806,6 @@ static void gem_remove_one(struct pci_dev *pdev)
                iounmap(gp->regs);
                pci_release_regions(pdev);
                free_netdev(dev);
-
-               pci_set_drvdata(pdev, NULL);
        }
 }
 
index e37b587b386048dd299ac6ad0af55dd47d3e199f..0dbf46f08ed56a479a647a18f4c73692f2ec6160 100644 (file)
@@ -2675,10 +2675,10 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
 
                addr = of_get_property(dp, "local-mac-address", &len);
 
-               if (qfe_slot != -1 && addr && len == 6)
-                       memcpy(dev->dev_addr, addr, 6);
+               if (qfe_slot != -1 && addr && len == ETH_ALEN)
+                       memcpy(dev->dev_addr, addr, ETH_ALEN);
                else
-                       memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+                       memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
        }
 
        hp = netdev_priv(dev);
@@ -3024,9 +3024,9 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
                    (addr = of_get_property(dp, "local-mac-address", &len))
                        != NULL &&
                    len == 6) {
-                       memcpy(dev->dev_addr, addr, 6);
+                       memcpy(dev->dev_addr, addr, ETH_ALEN);
                } else {
-                       memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+                       memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
                }
 #else
                get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
@@ -3170,8 +3170,6 @@ static void happy_meal_pci_remove(struct pci_dev *pdev)
        pci_release_regions(hp->happy_dev);
 
        free_netdev(net_dev);
-
-       pci_set_drvdata(pdev, NULL);
 }
 
 static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
index b072f4dba033c1661bf4a341747b1d82d4874fc7..5695ae2411dea0f74718d3a87254aab4cf97ac42 100644 (file)
@@ -843,7 +843,7 @@ static int qec_ether_init(struct platform_device *op)
        if (!dev)
                return -ENOMEM;
 
-       memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+       memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
 
        qe = netdev_priv(dev);
 
index 571452e786d5103460180e54481cd11ae88e4927..dd0dd6279b4eec8168c006457643568032ee358f 100644 (file)
@@ -2447,7 +2447,6 @@ static void bdx_remove(struct pci_dev *pdev)
        iounmap(nic->regs);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        vfree(nic);
 
        RET();
index de71b1ec4625e8f21a9ad4d56db5fa8592c89e6e..53150c25a96bd455f58dc30ca3c592b6a598461e 100644 (file)
@@ -49,11 +49,19 @@ config TI_DAVINCI_CPDMA
          To compile this driver as a module, choose M here: the module
          will be called davinci_cpdma.  This is recommended.
 
+config TI_CPSW_PHY_SEL
+       boolean "TI CPSW Switch Phy sel Support"
+       depends on TI_CPSW
+       ---help---
+         This driver supports configuring of the phy mode connected to
+         the CPSW.
+
 config TI_CPSW
        tristate "TI CPSW Switch Support"
        depends on ARM && (ARCH_DAVINCI || SOC_AM33XX)
        select TI_DAVINCI_CPDMA
        select TI_DAVINCI_MDIO
+       select TI_CPSW_PHY_SEL
        ---help---
          This driver supports TI's CPSW Ethernet Switch.
 
index c65148e8aa1d4810e00cbe6d7448fa72572847d7..9cfaab8152be08aa16bc37c91f622ba8e287989b 100644 (file)
@@ -7,5 +7,6 @@ obj-$(CONFIG_CPMAC) += cpmac.o
 obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
 obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
 obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
+obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
 obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
 ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
new file mode 100644 (file)
index 0000000..148da9a
--- /dev/null
@@ -0,0 +1,161 @@
+/* Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "cpsw.h"
+
+/* AM33xx SoC specific definitions for the CONTROL port */
+#define AM33XX_GMII_SEL_MODE_MII       0
+#define AM33XX_GMII_SEL_MODE_RMII      1
+#define AM33XX_GMII_SEL_MODE_RGMII     2
+
+#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN        BIT(7)
+#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN        BIT(6)
+
+struct cpsw_phy_sel_priv {
+       struct device   *dev;
+       u32 __iomem     *gmii_sel;
+       bool            rmii_clock_external;
+       void (*cpsw_phy_sel)(struct cpsw_phy_sel_priv *priv,
+                            phy_interface_t phy_mode, int slave);
+};
+
+
+static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
+                                phy_interface_t phy_mode, int slave)
+{
+       u32 reg;
+       u32 mask;
+       u32 mode = 0;
+
+       reg = readl(priv->gmii_sel);
+
+       switch (phy_mode) {
+       case PHY_INTERFACE_MODE_RMII:
+               mode = AM33XX_GMII_SEL_MODE_RMII;
+               break;
+
+       case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+               mode = AM33XX_GMII_SEL_MODE_RGMII;
+               break;
+
+       case PHY_INTERFACE_MODE_MII:
+       default:
+               mode = AM33XX_GMII_SEL_MODE_MII;
+               break;
+       };
+
+       mask = 0x3 << (slave * 2) | BIT(slave + 6);
+       mode <<= slave * 2;
+
+       if (priv->rmii_clock_external) {
+               if (slave == 0)
+                       mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN;
+               else
+                       mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN;
+       }
+
+       reg &= ~mask;
+       reg |= mode;
+
+       writel(reg, priv->gmii_sel);
+}
+
+static struct platform_driver cpsw_phy_sel_driver;
+static int match(struct device *dev, void *data)
+{
+       struct device_node *node = (struct device_node *)data;
+       return dev->of_node == node &&
+               dev->driver == &cpsw_phy_sel_driver.driver;
+}
+
+void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
+{
+       struct device_node *node;
+       struct cpsw_phy_sel_priv *priv;
+
+       node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
+       if (!node) {
+               dev_err(dev, "Phy mode driver DT not found\n");
+               return;
+       }
+
+       dev = bus_find_device(&platform_bus_type, NULL, node, match);
+       priv = dev_get_drvdata(dev);
+
+       priv->cpsw_phy_sel(priv, phy_mode, slave);
+}
+EXPORT_SYMBOL_GPL(cpsw_phy_sel);
+
+static const struct of_device_id cpsw_phy_sel_id_table[] = {
+       {
+               .compatible     = "ti,am3352-cpsw-phy-sel",
+               .data           = &cpsw_gmii_sel_am3352,
+       },
+       {}
+};
+MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table);
+
+static int cpsw_phy_sel_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       const struct of_device_id *of_id;
+       struct cpsw_phy_sel_priv *priv;
+
+       of_id = of_match_node(cpsw_phy_sel_id_table, pdev->dev.of_node);
+       if (!of_id)
+               return -EINVAL;
+
+       priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv) {
+               dev_err(&pdev->dev, "unable to alloc memory for cpsw phy sel\n");
+               return -ENOMEM;
+       }
+
+       priv->cpsw_phy_sel = of_id->data;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel");
+       priv->gmii_sel = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(priv->gmii_sel))
+               return PTR_ERR(priv->gmii_sel);
+
+       if (of_find_property(pdev->dev.of_node, "rmii-clock-ext", NULL))
+               priv->rmii_clock_external = true;
+
+       dev_set_drvdata(&pdev->dev, priv);
+
+       return 0;
+}
+
+static struct platform_driver cpsw_phy_sel_driver = {
+       .probe          = cpsw_phy_sel_probe,
+       .driver         = {
+               .name   = "cpsw-phy-sel",
+               .owner  = THIS_MODULE,
+               .of_match_table = cpsw_phy_sel_id_table,
+       },
+};
+
+module_platform_driver(cpsw_phy_sel_driver);
+MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
+MODULE_LICENSE("GPL v2");
index 79974e31187ac19af63452a3a0c421a5ff2b7cf7..90d41d26ec6d8c37f04682aa05b8731a4d4c3002 100644 (file)
@@ -367,8 +367,6 @@ struct cpsw_priv {
        spinlock_t                      lock;
        struct platform_device          *pdev;
        struct net_device               *ndev;
-       struct resource                 *cpsw_res;
-       struct resource                 *cpsw_wr_res;
        struct napi_struct              napi;
        struct device                   *dev;
        struct cpsw_platform_data       data;
@@ -639,13 +637,6 @@ void cpsw_rx_handler(void *token, int len, int status)
 static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
 {
        struct cpsw_priv *priv = dev_id;
-       u32 rx, tx, rx_thresh;
-
-       rx_thresh = __raw_readl(&priv->wr_regs->rx_thresh_stat);
-       rx = __raw_readl(&priv->wr_regs->rx_stat);
-       tx = __raw_readl(&priv->wr_regs->tx_stat);
-       if (!rx_thresh && !rx && !tx)
-               return IRQ_NONE;
 
        cpsw_intr_disable(priv);
        if (priv->irq_enabled == true) {
@@ -1023,6 +1014,10 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
                dev_info(priv->dev, "phy found : id is : 0x%x\n",
                         slave->phy->phy_id);
                phy_start(slave->phy);
+
+               /* Configure GMII_SEL register */
+               cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface,
+                            slave->slave_num);
        }
 }
 
@@ -1169,9 +1164,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
                }
        }
 
+       napi_enable(&priv->napi);
        cpdma_ctlr_start(priv->dma);
        cpsw_intr_enable(priv);
-       napi_enable(&priv->napi);
        cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
        cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
 
@@ -1712,67 +1707,60 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
 
        if (of_property_read_u32(node, "active_slave", &prop)) {
                pr_err("Missing active_slave property in the DT.\n");
-               ret = -EINVAL;
-               goto error_ret;
+               return -EINVAL;
        }
        data->active_slave = prop;
 
        if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
                pr_err("Missing cpts_clock_mult property in the DT.\n");
-               ret = -EINVAL;
-               goto error_ret;
+               return -EINVAL;
        }
        data->cpts_clock_mult = prop;
 
        if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
                pr_err("Missing cpts_clock_shift property in the DT.\n");
-               ret = -EINVAL;
-               goto error_ret;
+               return -EINVAL;
        }
        data->cpts_clock_shift = prop;
 
-       data->slave_data = kcalloc(data->slaves, sizeof(struct cpsw_slave_data),
-                                  GFP_KERNEL);
+       data->slave_data = devm_kzalloc(&pdev->dev, data->slaves
+                                       * sizeof(struct cpsw_slave_data),
+                                       GFP_KERNEL);
        if (!data->slave_data)
-               return -EINVAL;
+               return -ENOMEM;
 
        if (of_property_read_u32(node, "cpdma_channels", &prop)) {
                pr_err("Missing cpdma_channels property in the DT.\n");
-               ret = -EINVAL;
-               goto error_ret;
+               return -EINVAL;
        }
        data->channels = prop;
 
        if (of_property_read_u32(node, "ale_entries", &prop)) {
                pr_err("Missing ale_entries property in the DT.\n");
-               ret = -EINVAL;
-               goto error_ret;
+               return -EINVAL;
        }
        data->ale_entries = prop;
 
        if (of_property_read_u32(node, "bd_ram_size", &prop)) {
                pr_err("Missing bd_ram_size property in the DT.\n");
-               ret = -EINVAL;
-               goto error_ret;
+               return -EINVAL;
        }
        data->bd_ram_size = prop;
 
        if (of_property_read_u32(node, "rx_descs", &prop)) {
                pr_err("Missing rx_descs property in the DT.\n");
-               ret = -EINVAL;
-               goto error_ret;
+               return -EINVAL;
        }
        data->rx_descs = prop;
 
        if (of_property_read_u32(node, "mac_control", &prop)) {
                pr_err("Missing mac_control property in the DT.\n");
-               ret = -EINVAL;
-               goto error_ret;
+               return -EINVAL;
        }
        data->mac_control = prop;
 
-       if (!of_property_read_u32(node, "dual_emac", &prop))
-               data->dual_emac = prop;
+       if (of_property_read_bool(node, "dual_emac"))
+               data->dual_emac = 1;
 
        /*
         * Populate all the child nodes here...
@@ -1782,7 +1770,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
        if (ret)
                pr_warn("Doesn't have any child node\n");
 
-       for_each_node_by_name(slave_node, "slave") {
+       for_each_child_of_node(node, slave_node) {
                struct cpsw_slave_data *slave_data = data->slave_data + i;
                const void *mac_addr = NULL;
                u32 phyid;
@@ -1791,11 +1779,14 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                struct device_node *mdio_node;
                struct platform_device *mdio;
 
+               /* This is no slave child node, continue */
+               if (strcmp(slave_node->name, "slave"))
+                       continue;
+
                parp = of_get_property(slave_node, "phy_id", &lenp);
                if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
                        pr_err("Missing slave[%d] phy_id property\n", i);
-                       ret = -EINVAL;
-                       goto error_ret;
+                       return -EINVAL;
                }
                mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
                phyid = be32_to_cpup(parp+1);
@@ -1825,10 +1816,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
        }
 
        return 0;
-
-error_ret:
-       kfree(data->slave_data);
-       return ret;
 }
 
 static int cpsw_probe_dual_emac(struct platform_device *pdev,
@@ -1870,7 +1857,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
        priv_sl2->coal_intvl = 0;
        priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
 
-       priv_sl2->cpsw_res = priv->cpsw_res;
        priv_sl2->regs = priv->regs;
        priv_sl2->host_port = priv->host_port;
        priv_sl2->host_port_regs = priv->host_port_regs;
@@ -1914,8 +1900,8 @@ static int cpsw_probe(struct platform_device *pdev)
        struct cpsw_priv                *priv;
        struct cpdma_params             dma_params;
        struct cpsw_ale_params          ale_params;
-       void __iomem                    *ss_regs, *wr_regs;
-       struct resource                 *res;
+       void __iomem                    *ss_regs;
+       struct resource                 *res, *ss_res;
        u32 slave_offset, sliver_offset, slave_size;
        int ret = 0, i, k = 0;
 
@@ -1951,7 +1937,7 @@ static int cpsw_probe(struct platform_device *pdev)
        if (cpsw_probe_dt(&priv->data, pdev)) {
                pr_err("cpsw: platform data missing\n");
                ret = -ENODEV;
-               goto clean_ndev_ret;
+               goto clean_runtime_disable_ret;
        }
        data = &priv->data;
 
@@ -1965,11 +1951,12 @@ static int cpsw_probe(struct platform_device *pdev)
 
        memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
 
-       priv->slaves = kzalloc(sizeof(struct cpsw_slave) * data->slaves,
-                              GFP_KERNEL);
+       priv->slaves = devm_kzalloc(&pdev->dev,
+                                   sizeof(struct cpsw_slave) * data->slaves,
+                                   GFP_KERNEL);
        if (!priv->slaves) {
-               ret = -EBUSY;
-               goto clean_ndev_ret;
+               ret = -ENOMEM;
+               goto clean_runtime_disable_ret;
        }
        for (i = 0; i < data->slaves; i++)
                priv->slaves[i].slave_num = i;
@@ -1977,55 +1964,31 @@ static int cpsw_probe(struct platform_device *pdev)
        priv->slaves[0].ndev = ndev;
        priv->emac_port = 0;
 
-       priv->clk = clk_get(&pdev->dev, "fck");
+       priv->clk = devm_clk_get(&pdev->dev, "fck");
        if (IS_ERR(priv->clk)) {
-               dev_err(&pdev->dev, "fck is not found\n");
+               dev_err(priv->dev, "fck is not found\n");
                ret = -ENODEV;
-               goto clean_slave_ret;
+               goto clean_runtime_disable_ret;
        }
        priv->coal_intvl = 0;
        priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
 
-       priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!priv->cpsw_res) {
-               dev_err(priv->dev, "error getting i/o resource\n");
-               ret = -ENOENT;
-               goto clean_clk_ret;
-       }
-       if (!request_mem_region(priv->cpsw_res->start,
-                               resource_size(priv->cpsw_res), ndev->name)) {
-               dev_err(priv->dev, "failed request i/o region\n");
-               ret = -ENXIO;
-               goto clean_clk_ret;
-       }
-       ss_regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
-       if (!ss_regs) {
-               dev_err(priv->dev, "unable to map i/o region\n");
-               goto clean_cpsw_iores_ret;
+       ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
+       if (IS_ERR(ss_regs)) {
+               ret = PTR_ERR(ss_regs);
+               goto clean_runtime_disable_ret;
        }
        priv->regs = ss_regs;
        priv->version = __raw_readl(&priv->regs->id_ver);
        priv->host_port = HOST_PORT_NUM;
 
-       priv->cpsw_wr_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (!priv->cpsw_wr_res) {
-               dev_err(priv->dev, "error getting i/o resource\n");
-               ret = -ENOENT;
-               goto clean_iomap_ret;
-       }
-       if (!request_mem_region(priv->cpsw_wr_res->start,
-                       resource_size(priv->cpsw_wr_res), ndev->name)) {
-               dev_err(priv->dev, "failed request i/o region\n");
-               ret = -ENXIO;
-               goto clean_iomap_ret;
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       priv->wr_regs = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(priv->wr_regs)) {
+               ret = PTR_ERR(priv->wr_regs);
+               goto clean_runtime_disable_ret;
        }
-       wr_regs = ioremap(priv->cpsw_wr_res->start,
-                               resource_size(priv->cpsw_wr_res));
-       if (!wr_regs) {
-               dev_err(priv->dev, "unable to map i/o region\n");
-               goto clean_cpsw_wr_iores_ret;
-       }
-       priv->wr_regs = wr_regs;
 
        memset(&dma_params, 0, sizeof(dma_params));
        memset(&ale_params, 0, sizeof(ale_params));
@@ -2056,12 +2019,12 @@ static int cpsw_probe(struct platform_device *pdev)
                slave_size           = CPSW2_SLAVE_SIZE;
                sliver_offset        = CPSW2_SLIVER_OFFSET;
                dma_params.desc_mem_phys =
-                       (u32 __force) priv->cpsw_res->start + CPSW2_BD_OFFSET;
+                       (u32 __force) ss_res->start + CPSW2_BD_OFFSET;
                break;
        default:
                dev_err(priv->dev, "unknown version 0x%08x\n", priv->version);
                ret = -ENODEV;
-               goto clean_cpsw_wr_iores_ret;
+               goto clean_runtime_disable_ret;
        }
        for (i = 0; i < priv->data.slaves; i++) {
                struct cpsw_slave *slave = &priv->slaves[i];
@@ -2089,7 +2052,7 @@ static int cpsw_probe(struct platform_device *pdev)
        if (!priv->dma) {
                dev_err(priv->dev, "error initializing dma\n");
                ret = -ENOMEM;
-               goto clean_wr_iomap_ret;
+               goto clean_runtime_disable_ret;
        }
 
        priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
@@ -2124,8 +2087,8 @@ static int cpsw_probe(struct platform_device *pdev)
 
        while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
                for (i = res->start; i <= res->end; i++) {
-                       if (request_irq(i, cpsw_interrupt, 0,
-                                       dev_name(&pdev->dev), priv)) {
+                       if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
+                                            dev_name(priv->dev), priv)) {
                                dev_err(priv->dev, "error attaching irq\n");
                                goto clean_ale_ret;
                        }
@@ -2147,7 +2110,7 @@ static int cpsw_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(priv->dev, "error registering net device\n");
                ret = -ENODEV;
-               goto clean_irq_ret;
+               goto clean_ale_ret;
        }
 
        if (cpts_register(&pdev->dev, priv->cpts,
@@ -2155,44 +2118,27 @@ static int cpsw_probe(struct platform_device *pdev)
                dev_err(priv->dev, "error registering cpts device\n");
 
        cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
-                 priv->cpsw_res->start, ndev->irq);
+                   ss_res->start, ndev->irq);
 
        if (priv->data.dual_emac) {
                ret = cpsw_probe_dual_emac(pdev, priv);
                if (ret) {
                        cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
-                       goto clean_irq_ret;
+                       goto clean_ale_ret;
                }
        }
 
        return 0;
 
-clean_irq_ret:
-       for (i = 0; i < priv->num_irqs; i++)
-               free_irq(priv->irqs_table[i], priv);
 clean_ale_ret:
        cpsw_ale_destroy(priv->ale);
 clean_dma_ret:
        cpdma_chan_destroy(priv->txch);
        cpdma_chan_destroy(priv->rxch);
        cpdma_ctlr_destroy(priv->dma);
-clean_wr_iomap_ret:
-       iounmap(priv->wr_regs);
-clean_cpsw_wr_iores_ret:
-       release_mem_region(priv->cpsw_wr_res->start,
-                          resource_size(priv->cpsw_wr_res));
-clean_iomap_ret:
-       iounmap(priv->regs);
-clean_cpsw_iores_ret:
-       release_mem_region(priv->cpsw_res->start,
-                          resource_size(priv->cpsw_res));
-clean_clk_ret:
-       clk_put(priv->clk);
-clean_slave_ret:
+clean_runtime_disable_ret:
        pm_runtime_disable(&pdev->dev);
-       kfree(priv->slaves);
 clean_ndev_ret:
-       kfree(priv->data.slave_data);
        free_netdev(priv->ndev);
        return ret;
 }
@@ -2201,30 +2147,18 @@ static int cpsw_remove(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct cpsw_priv *priv = netdev_priv(ndev);
-       int i;
 
        if (priv->data.dual_emac)
                unregister_netdev(cpsw_get_slave_ndev(priv, 1));
        unregister_netdev(ndev);
 
        cpts_unregister(priv->cpts);
-       for (i = 0; i < priv->num_irqs; i++)
-               free_irq(priv->irqs_table[i], priv);
 
        cpsw_ale_destroy(priv->ale);
        cpdma_chan_destroy(priv->txch);
        cpdma_chan_destroy(priv->rxch);
        cpdma_ctlr_destroy(priv->dma);
-       iounmap(priv->regs);
-       release_mem_region(priv->cpsw_res->start,
-                          resource_size(priv->cpsw_res));
-       iounmap(priv->wr_regs);
-       release_mem_region(priv->cpsw_wr_res->start,
-                          resource_size(priv->cpsw_wr_res));
        pm_runtime_disable(&pdev->dev);
-       clk_put(priv->clk);
-       kfree(priv->slaves);
-       kfree(priv->data.slave_data);
        if (priv->data.dual_emac)
                free_netdev(cpsw_get_slave_ndev(priv, 1));
        free_netdev(ndev);
@@ -2280,7 +2214,7 @@ static struct platform_driver cpsw_driver = {
                .name    = "cpsw",
                .owner   = THIS_MODULE,
                .pm      = &cpsw_pm_ops,
-               .of_match_table = of_match_ptr(cpsw_of_mtable),
+               .of_match_table = cpsw_of_mtable,
        },
        .probe = cpsw_probe,
        .remove = cpsw_remove,
index eb3e101ec04878c87f8a3a6dee6243f9d6970b52..574f49da693f194fe9a32ac731c90487498df9ba 100644 (file)
@@ -39,4 +39,6 @@ struct cpsw_platform_data {
        bool    dual_emac;      /* Enable Dual EMAC mode */
 };
 
+void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
+
 #endif /* __CPSW_H__ */
index fe993cdd7e23b76ad13f09585c215d87c83a0e46..1a581ef7eee8fcbd2ed121d64a747e06e84c6113 100644 (file)
@@ -127,8 +127,8 @@ struct cpts {
 };
 
 #ifdef CONFIG_TI_CPTS
-extern void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
-extern void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
 #else
 static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
 {
@@ -138,8 +138,7 @@ static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
 }
 #endif
 
-extern int cpts_register(struct device *dev, struct cpts *cpts,
-                        u32 mult, u32 shift);
-extern void cpts_unregister(struct cpts *cpts);
+int cpts_register(struct device *dev, struct cpts *cpts, u32 mult, u32 shift);
+void cpts_unregister(struct cpts *cpts);
 
 #endif
index 67df09ea9d045da26420de1e9da09af58ec0edb8..41ba974bf37cb9175c74ab40bba1817e890749e7 100644 (file)
@@ -876,8 +876,7 @@ static void emac_dev_mcast_set(struct net_device *ndev)
                    netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
                        mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
                        emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
-               }
-               if (!netdev_mc_empty(ndev)) {
+               } else if (!netdev_mc_empty(ndev)) {
                        struct netdev_hw_addr *ha;
 
                        mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
@@ -1853,7 +1852,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
        }
 
        /* MAC addr and PHY mask , RMII enable info from platform_data */
-       memcpy(priv->mac_addr, pdata->mac_addr, 6);
+       memcpy(priv->mac_addr, pdata->mac_addr, ETH_ALEN);
        priv->phy_id = pdata->phy_id;
        priv->rmii_en = pdata->rmii_en;
        priv->version = pdata->version;
index 591437e59b9019c9fa0597a8342f63ae5357fcaa..62b19be5183d3349433ee5855ad082b1560b5c69 100644 (file)
@@ -319,7 +319,6 @@ static void tlan_remove_one(struct pci_dev *pdev)
 
        free_netdev(dev);
 
-       pci_set_drvdata(pdev, NULL);
        cancel_work_sync(&priv->tlan_tqueue);
 }
 
index 13e6fff8ca23af28e4b1e2229846522082dd60c4..628b736e5ae776fcf00333bed8c355e4b518314e 100644 (file)
@@ -2230,7 +2230,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
                nz_addr |= mac[i];
 
        if (nz_addr) {
-               memcpy(dev->dev_addr, mac, 6);
+               memcpy(dev->dev_addr, mac, ETH_ALEN);
                dev->addr_len = 6;
        } else {
                eth_hw_addr_random(dev);
index 309abb472aa2040e978fea46faff0cfd81c0bb43..8505196be9f52bd7a982ec9dcb91f850379cfd07 100644 (file)
@@ -359,27 +359,26 @@ static inline void *port_priv(struct gelic_port *port)
 }
 
 #ifdef CONFIG_PPC_EARLY_DEBUG_PS3GELIC
-extern void udbg_shutdown_ps3gelic(void);
+void udbg_shutdown_ps3gelic(void);
 #else
 static inline void udbg_shutdown_ps3gelic(void) {}
 #endif
 
-extern int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
+int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask);
 /* shared netdev ops */
-extern void gelic_card_up(struct gelic_card *card);
-extern void gelic_card_down(struct gelic_card *card);
-extern int gelic_net_open(struct net_device *netdev);
-extern int gelic_net_stop(struct net_device *netdev);
-extern int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
-extern void gelic_net_set_multi(struct net_device *netdev);
-extern void gelic_net_tx_timeout(struct net_device *netdev);
-extern int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
-extern int gelic_net_setup_netdev(struct net_device *netdev,
-                                 struct gelic_card *card);
+void gelic_card_up(struct gelic_card *card);
+void gelic_card_down(struct gelic_card *card);
+int gelic_net_open(struct net_device *netdev);
+int gelic_net_stop(struct net_device *netdev);
+int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
+void gelic_net_set_multi(struct net_device *netdev);
+void gelic_net_tx_timeout(struct net_device *netdev);
+int gelic_net_change_mtu(struct net_device *netdev, int new_mtu);
+int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card);
 
 /* shared ethtool ops */
-extern void gelic_net_get_drvinfo(struct net_device *netdev,
-                                 struct ethtool_drvinfo *info);
-extern void gelic_net_poll_controller(struct net_device *netdev);
+void gelic_net_get_drvinfo(struct net_device *netdev,
+                          struct ethtool_drvinfo *info);
+void gelic_net_poll_controller(struct net_device *netdev);
 
 #endif /* _GELIC_NET_H */
index f7e51b7d704960a2eb45ae63d4ed19f134d6d105..11f443d8e4ea9042b6eb6831b0c8d62612b8ad52 100644 (file)
@@ -320,7 +320,7 @@ struct gelic_eurus_cmd {
 #define GELIC_WL_PRIV_SET_PSK          (SIOCIWFIRSTPRIV + 0)
 #define GELIC_WL_PRIV_GET_PSK          (SIOCIWFIRSTPRIV + 1)
 
-extern int gelic_wl_driver_probe(struct gelic_card *card);
-extern int gelic_wl_driver_remove(struct gelic_card *card);
-extern void gelic_wl_interrupt(struct net_device *netdev, u64 status);
+int gelic_wl_driver_probe(struct gelic_card *card);
+int gelic_wl_driver_remove(struct gelic_card *card);
+void gelic_wl_interrupt(struct net_device *netdev, u64 status);
 #endif /* _GELIC_WIRELESS_H */
index 5734480c1ecfa5f91a134720fe478bb68826fc50..3f4a32e39d276f6fcf9c9d9d16c4aec440b9daf6 100644 (file)
@@ -2478,7 +2478,6 @@ out_release_regions:
        pci_release_regions(pdev);
 out_disable_dev:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        return NULL;
 }
 
index 4ba2135474d1a118a7a86b579aae38b11eb2c56a..9b6af0845a1101deca4b2083412dd1764fe2cc5b 100644 (file)
@@ -29,8 +29,8 @@
 
 #include <linux/sungem_phy.h>
 
-extern int spider_net_stop(struct net_device *netdev);
-extern int spider_net_open(struct net_device *netdev);
+int spider_net_stop(struct net_device *netdev);
+int spider_net_open(struct net_device *netdev);
 
 extern const struct ethtool_ops spider_net_ethtool_ops;
 
index a971b9cca564c910f8e928806ea9ac443ed22cf2..1322546d92ac97287a358cbea644568aedb98168 100644 (file)
@@ -887,7 +887,6 @@ static void tc35815_remove_one(struct pci_dev *pdev)
        mdiobus_free(lp->mii_bus);
        unregister_netdev(dev);
        free_netdev(dev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static int
index bdf697b184ae14246a4173d09833367ba312d7df..4a7293ed95e9c124b84ec87cabf1a9c1ef6ab1f9 100644 (file)
@@ -2292,7 +2292,6 @@ static void rhine_remove_one(struct pci_dev *pdev)
 
        free_netdev(dev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static void rhine_shutdown (struct pci_dev *pdev)
index 4c619ea5189fc22308b8fe36851824e72eaad0ab..74234a51c851186c0c9bcfbc140b261724144e20 100644 (file)
@@ -31,7 +31,7 @@
 #define DRIVER_NAME "xilinx_emaclite"
 
 /* Register offsets for the EmacLite Core */
-#define XEL_TXBUFF_OFFSET      0x0             /* Transmit Buffer */
+#define XEL_TXBUFF_OFFSET      0x0             /* Transmit Buffer */
 #define XEL_MDIOADDR_OFFSET    0x07E4          /* MDIO Address Register */
 #define XEL_MDIOWR_OFFSET      0x07E8          /* MDIO Write Data Register */
 #define XEL_MDIORD_OFFSET      0x07EC          /* MDIO Read Data Register */
 #define XEL_MDIOCTRL_MDIOEN_MASK  0x00000008   /* MDIO Enable */
 
 /* Global Interrupt Enable Register (GIER) Bit Masks */
-#define XEL_GIER_GIE_MASK      0x80000000      /* Global Enable */
+#define XEL_GIER_GIE_MASK      0x80000000      /* Global Enable */
 
 /* Transmit Status Register (TSR) Bit Masks */
-#define XEL_TSR_XMIT_BUSY_MASK  0x00000001     /* Tx complete */
-#define XEL_TSR_PROGRAM_MASK    0x00000002     /* Program the MAC address */
-#define XEL_TSR_XMIT_IE_MASK    0x00000008     /* Tx interrupt enable bit */
-#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000    /* Buffer is active, SW bit
+#define XEL_TSR_XMIT_BUSY_MASK  0x00000001     /* Tx complete */
+#define XEL_TSR_PROGRAM_MASK    0x00000002     /* Program the MAC address */
+#define XEL_TSR_XMIT_IE_MASK    0x00000008     /* Tx interrupt enable bit */
+#define XEL_TSR_XMIT_ACTIVE_MASK 0x80000000    /* Buffer is active, SW bit
                                                 * only. This is not documented
                                                 * in the HW spec */
 
 #define XEL_TSR_PROG_MAC_ADDR  (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_PROGRAM_MASK)
 
 /* Receive Status Register (RSR) */
-#define XEL_RSR_RECV_DONE_MASK 0x00000001      /* Rx complete */
-#define XEL_RSR_RECV_IE_MASK   0x00000008      /* Rx interrupt enable bit */
+#define XEL_RSR_RECV_DONE_MASK 0x00000001      /* Rx complete */
+#define XEL_RSR_RECV_IE_MASK   0x00000008      /* Rx interrupt enable bit */
 
 /* Transmit Packet Length Register (TPLR) */
-#define XEL_TPLR_LENGTH_MASK   0x0000FFFF      /* Tx packet length */
+#define XEL_TPLR_LENGTH_MASK   0x0000FFFF      /* Tx packet length */
 
 /* Receive Packet Length Register (RPLR) */
-#define XEL_RPLR_LENGTH_MASK   0x0000FFFF      /* Rx packet length */
+#define XEL_RPLR_LENGTH_MASK   0x0000FFFF      /* Rx packet length */
 
-#define XEL_HEADER_OFFSET      12              /* Offset to length field */
-#define XEL_HEADER_SHIFT       16              /* Shift value for length */
+#define XEL_HEADER_OFFSET      12              /* Offset to length field */
+#define XEL_HEADER_SHIFT       16              /* Shift value for length */
 
 /* General Ethernet Definitions */
-#define XEL_ARP_PACKET_SIZE            28      /* Max ARP packet size */
-#define XEL_HEADER_IP_LENGTH_OFFSET    16      /* IP Length Offset */
+#define XEL_ARP_PACKET_SIZE            28      /* Max ARP packet size */
+#define XEL_HEADER_IP_LENGTH_OFFSET    16      /* IP Length Offset */
 
 
 
@@ -1075,14 +1075,9 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
  * This function un maps the IO region of the Emaclite device and frees the net
  * device.
  */
-static void xemaclite_remove_ndev(struct net_device *ndev,
-                                 struct platform_device *pdev)
+static void xemaclite_remove_ndev(struct net_device *ndev)
 {
        if (ndev) {
-               struct net_local *lp = netdev_priv(ndev);
-
-               if (lp->base_addr)
-                       devm_iounmap(&pdev->dev, lp->base_addr);
                free_netdev(ndev);
        }
 }
@@ -1177,7 +1172,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
 
        if (mac_address)
                /* Set the MAC address. */
-               memcpy(ndev->dev_addr, mac_address, 6);
+               memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
        else
                dev_warn(dev, "No MAC address found\n");
 
@@ -1214,7 +1209,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
        return 0;
 
 error:
-       xemaclite_remove_ndev(ndev, ofdev);
+       xemaclite_remove_ndev(ndev);
        return rc;
 }
 
@@ -1248,7 +1243,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
                of_node_put(lp->phy_node);
        lp->phy_node = NULL;
 
-       xemaclite_remove_ndev(ndev, of_dev);
+       xemaclite_remove_ndev(ndev);
 
        return 0;
 }
index a20ed1a98099f3d959317f68964bbd88639e3d8b..f8399359017405756f06641fa6c0405c5d5ea0a9 100644 (file)
@@ -453,7 +453,7 @@ static void directed_beacon(struct s_smc *smc)
         */
        * (char *) a = (char) ((long)DBEACON_INFO<<24L) ;
        a[1] = 0 ;
-       memcpy((char *)a+1,(char *) &smc->mib.m[MAC0].fddiMACUpstreamNbr,6) ;
+       memcpy((char *)a+1, (char *) &smc->mib.m[MAC0].fddiMACUpstreamNbr, ETH_ALEN);
 
        CHECK_NPP() ;
         /* set memory address reg for writes */
index 3ca308b282148832b534ae91f17f33991f6f6d16..bd1166bf8f61a55c0e3440d2762f1b1e0765cc7b 100644 (file)
@@ -469,20 +469,20 @@ struct s_smc {
 
 extern const struct fddi_addr fddi_broadcast;
 
-extern void all_selection_criteria(struct s_smc *smc);
-extern void card_stop(struct s_smc *smc);
-extern void init_board(struct s_smc *smc, u_char *mac_addr);
-extern int init_fplus(struct s_smc *smc);
-extern void init_plc(struct s_smc *smc);
-extern int init_smt(struct s_smc *smc, u_char * mac_addr);
-extern void mac1_irq(struct s_smc *smc, u_short stu, u_short stl);
-extern void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l);
-extern void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l);
-extern int pcm_status_twisted(struct s_smc *smc);
-extern void plc1_irq(struct s_smc *smc);
-extern void plc2_irq(struct s_smc *smc);
-extern void read_address(struct s_smc *smc, u_char * mac_addr);
-extern void timer_irq(struct s_smc *smc);
+void all_selection_criteria(struct s_smc *smc);
+void card_stop(struct s_smc *smc);
+void init_board(struct s_smc *smc, u_char *mac_addr);
+int init_fplus(struct s_smc *smc);
+void init_plc(struct s_smc *smc);
+int init_smt(struct s_smc *smc, u_char *mac_addr);
+void mac1_irq(struct s_smc *smc, u_short stu, u_short stl);
+void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l);
+void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l);
+int pcm_status_twisted(struct s_smc *smc);
+void plc1_irq(struct s_smc *smc);
+void plc2_irq(struct s_smc *smc);
+void read_address(struct s_smc *smc, u_char *mac_addr);
+void timer_irq(struct s_smc *smc);
 
 #endif /* _SCMECM_ */
 
index f5d7305a5784174f9868f045d186e9c90d2bbc20..713d303a06a9e1d2ea06e198a9682beb4bc54fc2 100644 (file)
@@ -436,7 +436,7 @@ static  int skfp_driver_init(struct net_device *dev)
        }
        read_address(smc, NULL);
        pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
-       memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
+       memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
 
        smt_reset_defaults(smc, 0);
 
@@ -503,7 +503,7 @@ static int skfp_open(struct net_device *dev)
         *               address.
         */
        read_address(smc, NULL);
-       memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
+       memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
 
        init_smt(smc, NULL);
        smt_online(smc, 1);
@@ -1213,7 +1213,7 @@ static void CheckSourceAddress(unsigned char *frame, unsigned char *hw_addr)
        if ((unsigned short) frame[1 + 10] != 0)
                return;
        SRBit = frame[1 + 6] & 0x01;
-       memcpy(&frame[1 + 6], hw_addr, 6);
+       memcpy(&frame[1 + 6], hw_addr, ETH_ALEN);
        frame[8] |= SRBit;
 }                              // CheckSourceAddress
 
index a974727dd9a2bb604746a20b3f4e65d22b9801db..636b65c66d49e6568b1e8535e66fac5b9a8dbd49 100644 (file)
@@ -445,7 +445,7 @@ static int ser12_open(struct net_device *dev)
        outb(0, FCR(dev->base_addr));  /* disable FIFOs */
        outb(0x0d, MCR(dev->base_addr));
        outb(0, IER(dev->base_addr));
-       if (request_irq(dev->irq, ser12_interrupt, IRQF_DISABLED | IRQF_SHARED,
+       if (request_irq(dev->irq, ser12_interrupt, IRQF_SHARED,
                        "baycom_ser_fdx", dev)) {
                release_region(dev->base_addr, SER12_EXTENT);
                return -EBUSY;
index e349d867449b2d744ff9550535317c93c069f07b..f9a8976195ba05f0fc07723969e5bb2136140a9b 100644 (file)
@@ -490,7 +490,7 @@ static int ser12_open(struct net_device *dev)
        outb(0, FCR(dev->base_addr));  /* disable FIFOs */
        outb(0x0d, MCR(dev->base_addr));
        outb(0, IER(dev->base_addr));
-       if (request_irq(dev->irq, ser12_interrupt, IRQF_DISABLED | IRQF_SHARED,
+       if (request_irq(dev->irq, ser12_interrupt, IRQF_SHARED,
                        "baycom_ser12", dev)) {
                release_region(dev->base_addr, SER12_EXTENT);       
                return -EBUSY;
index bc1d5217038985565f05e15fe98b2df4078c4114..4bc6ee8e7987796b04a6f1ef5a1a5ffe044ec8af 100644 (file)
@@ -1734,7 +1734,7 @@ static int scc_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        if (!Ivec[hwcfg.irq].used && hwcfg.irq)
                        {
                                if (request_irq(hwcfg.irq, scc_isr,
-                                               IRQF_DISABLED, "AX.25 SCC",
+                                               0, "AX.25 SCC",
                                                (void *)(long) hwcfg.irq))
                                        printk(KERN_WARNING "z8530drv: warning, cannot get IRQ %d\n", hwcfg.irq);
                                else
index 0721e72f9299250c6c29f0e1d3f7f51affffe7e8..1971411574db1c7ae2dd6549490ab6975e816cd1 100644 (file)
@@ -888,7 +888,7 @@ static int yam_open(struct net_device *dev)
                goto out_release_base;
        }
        outb(0, IER(dev->base_addr));
-       if (request_irq(dev->irq, yam_interrupt, IRQF_DISABLED | IRQF_SHARED, dev->name, dev)) {
+       if (request_irq(dev->irq, yam_interrupt, IRQF_SHARED, dev->name, dev)) {
                printk(KERN_ERR "%s: irq %d busy\n", dev->name, dev->irq);
                ret = -EBUSY;
                goto out_release_base;
@@ -975,7 +975,6 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        return -EINVAL;         /* Cannot change this parameter when up */
                if ((ym = kmalloc(sizeof(struct yamdrv_ioctl_mcs), GFP_KERNEL)) == NULL)
                        return -ENOBUFS;
-               ym->bitrate = 9600;
                if (copy_from_user(ym, ifr->ifr_data, sizeof(struct yamdrv_ioctl_mcs))) {
                        kfree(ym);
                        return -EFAULT;
index 42e6deee6db55ed607170109438960523d6b9b8e..0632d34905c73811456594cf0bf5e27f8710f5c7 100644 (file)
@@ -82,7 +82,6 @@ struct mrf24j40 {
 
        struct mutex buffer_mutex; /* only used to protect buf */
        struct completion tx_complete;
-       struct work_struct irqwork;
        u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */
 };
 
@@ -344,6 +343,8 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
        if (ret)
                goto err;
 
+       INIT_COMPLETION(devrec->tx_complete);
+
        /* Set TXNTRIG bit of TXNCON to send packet */
        ret = read_short_reg(devrec, REG_TXNCON, &val);
        if (ret)
@@ -354,8 +355,6 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
                val |= 0x4;
        write_short_reg(devrec, REG_TXNCON, val);
 
-       INIT_COMPLETION(devrec->tx_complete);
-
        /* Wait for the device to send the TX complete interrupt. */
        ret = wait_for_completion_interruptible_timeout(
                                                &devrec->tx_complete,
@@ -590,17 +589,6 @@ static struct ieee802154_ops mrf24j40_ops = {
 static irqreturn_t mrf24j40_isr(int irq, void *data)
 {
        struct mrf24j40 *devrec = data;
-
-       disable_irq_nosync(irq);
-
-       schedule_work(&devrec->irqwork);
-
-       return IRQ_HANDLED;
-}
-
-static void mrf24j40_isrwork(struct work_struct *work)
-{
-       struct mrf24j40 *devrec = container_of(work, struct mrf24j40, irqwork);
        u8 intstat;
        int ret;
 
@@ -618,7 +606,7 @@ static void mrf24j40_isrwork(struct work_struct *work)
                mrf24j40_handle_rx(devrec);
 
 out:
-       enable_irq(devrec->spi->irq);
+       return IRQ_HANDLED;
 }
 
 static int mrf24j40_probe(struct spi_device *spi)
@@ -642,7 +630,6 @@ static int mrf24j40_probe(struct spi_device *spi)
 
        mutex_init(&devrec->buffer_mutex);
        init_completion(&devrec->tx_complete);
-       INIT_WORK(&devrec->irqwork, mrf24j40_isrwork);
        devrec->spi = spi;
        spi_set_drvdata(spi, devrec);
 
@@ -688,11 +675,12 @@ static int mrf24j40_probe(struct spi_device *spi)
        val &= ~0x3; /* Clear RX mode (normal) */
        write_short_reg(devrec, REG_RXMCR, val);
 
-       ret = request_irq(spi->irq,
-                         mrf24j40_isr,
-                         IRQF_TRIGGER_FALLING,
-                         dev_name(&spi->dev),
-                         devrec);
+       ret = request_threaded_irq(spi->irq,
+                                  NULL,
+                                  mrf24j40_isr,
+                                  IRQF_TRIGGER_LOW|IRQF_ONESHOT,
+                                  dev_name(&spi->dev),
+                                  devrec);
 
        if (ret) {
                dev_err(printdev(devrec), "Unable to get IRQ");
@@ -721,7 +709,6 @@ static int mrf24j40_remove(struct spi_device *spi)
        dev_dbg(printdev(devrec), "remove\n");
 
        free_irq(spi->irq, devrec);
-       flush_work(&devrec->irqwork); /* TODO: Is this the right call? */
        ieee802154_unregister_device(devrec->dev);
        ieee802154_free_device(devrec->dev);
        /* TODO: Will ieee802154_free_device() wait until ->xmit() is
index c74f384c87d50a0314b84e2808ab735c00ee8684..303c4bd26e17953de1693475b042266eac8dca90 100644 (file)
@@ -411,12 +411,12 @@ static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
 
 #else
 
-       if (request_irq(port->irq, bfin_sir_rx_int, IRQF_DISABLED, "BFIN_SIR_RX", dev)) {
+       if (request_irq(port->irq, bfin_sir_rx_int, 0, "BFIN_SIR_RX", dev)) {
                dev_warn(&dev->dev, "Unable to attach SIR RX interrupt\n");
                return -EBUSY;
        }
 
-       if (request_irq(port->irq+1, bfin_sir_tx_int, IRQF_DISABLED, "BFIN_SIR_TX", dev)) {
+       if (request_irq(port->irq+1, bfin_sir_tx_int, 0, "BFIN_SIR_TX", dev)) {
                dev_warn(&dev->dev, "Unable to attach SIR TX interrupt\n");
                free_irq(port->irq, dev);
                return -EBUSY;
index 31bcb98ef35609b9f0be24cfc63bcd4fd43ae852..768dfe9a93159e4964d4b9c0bc6986da2e6c09b6 100644 (file)
@@ -1352,7 +1352,7 @@ toshoboe_net_open (struct net_device *dev)
     return 0;
 
   rc = request_irq (self->io.irq, toshoboe_interrupt,
-                    IRQF_SHARED | IRQF_DISABLED, dev->name, self);
+                    IRQF_SHARED, dev->name, self);
   if (rc)
        return rc;
 
@@ -1559,7 +1559,7 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
   self->io.fir_base = self->base;
   self->io.fir_ext = OBOE_IO_EXTENT;
   self->io.irq = pci_dev->irq;
-  self->io.irqflags = IRQF_SHARED | IRQF_DISABLED;
+  self->io.irqflags = IRQF_SHARED;
 
   self->speed = self->io.speed = 9600;
   self->async = 0;
index 4455425f1c777865676150fa5da63b94f0ffc736..ff45cd0d60e84f88b5e34ec40f61e2e5e0d85eb3 100644 (file)
@@ -804,7 +804,7 @@ static int sh_irda_probe(struct platform_device *pdev)
                goto err_mem_4;
 
        platform_set_drvdata(pdev, ndev);
-       err = request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self);
+       err = request_irq(irq, sh_irda_irq, 0, "sh_irda", self);
        if (err) {
                dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
                goto err_mem_4;
index 89682b49900ff2cac834c85cc58429ec4fb65ec3..8d9ae5a086d54d8d78ba8ae8ee4fee1c9fd76475 100644 (file)
@@ -761,7 +761,7 @@ static int sh_sir_probe(struct platform_device *pdev)
                goto err_mem_4;
 
        platform_set_drvdata(pdev, ndev);
-       err = request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self);
+       err = request_irq(irq, sh_sir_irq, 0, "sh_sir", self);
        if (err) {
                dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
                goto err_mem_4;
index 6d5b1e2b12893271adb7fd7d4c373cde4823d151..f50b9c1c0639008b1e314099f4918e3eb0ed57db 100644 (file)
@@ -102,28 +102,29 @@ struct sir_driver {
 
 /* exported */
 
-extern int irda_register_dongle(struct dongle_driver *new);
-extern int irda_unregister_dongle(struct dongle_driver *drv);
+int irda_register_dongle(struct dongle_driver *new);
+int irda_unregister_dongle(struct dongle_driver *drv);
 
-extern struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *name);
-extern int sirdev_put_instance(struct sir_dev *self);
+struct sir_dev *sirdev_get_instance(const struct sir_driver *drv,
+                                   const char *name);
+int sirdev_put_instance(struct sir_dev *self);
 
-extern int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type);
-extern void sirdev_write_complete(struct sir_dev *dev);
-extern int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count);
+int sirdev_set_dongle(struct sir_dev *dev, IRDA_DONGLE type);
+void sirdev_write_complete(struct sir_dev *dev);
+int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count);
 
 /* low level helpers for SIR device/dongle setup */
-extern int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len);
-extern int sirdev_raw_read(struct sir_dev *dev, char *buf, int len);
-extern int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts);
+int sirdev_raw_write(struct sir_dev *dev, const char *buf, int len);
+int sirdev_raw_read(struct sir_dev *dev, char *buf, int len);
+int sirdev_set_dtr_rts(struct sir_dev *dev, int dtr, int rts);
 
 /* not exported */
 
-extern int sirdev_get_dongle(struct sir_dev *self, IRDA_DONGLE type);
-extern int sirdev_put_dongle(struct sir_dev *self);
+int sirdev_get_dongle(struct sir_dev *self, IRDA_DONGLE type);
+int sirdev_put_dongle(struct sir_dev *self);
 
-extern void sirdev_enable_rx(struct sir_dev *dev);
-extern int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
+void sirdev_enable_rx(struct sir_dev *dev);
+int sirdev_schedule_request(struct sir_dev *dev, int state, unsigned param);
 
 /* inline helpers */
 
index 9bf46bd19b87bf947fd79e547bdde626c8a12097..cc9845ec91c1b1be56cc960a0768ee71b8db69c9 100644 (file)
@@ -828,22 +828,21 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
                eth_hw_addr_inherit(dev, lowerdev);
        }
 
+       port->count += 1;
+       err = register_netdevice(dev);
+       if (err < 0)
+               goto destroy_port;
+
        err = netdev_upper_dev_link(lowerdev, dev);
        if (err)
                goto destroy_port;
 
-       port->count += 1;
-       err = register_netdevice(dev);
-       if (err < 0)
-               goto upper_dev_unlink;
 
        list_add_tail_rcu(&vlan->list, &port->vlans);
        netif_stacked_transfer_operstate(lowerdev, dev);
 
        return 0;
 
-upper_dev_unlink:
-       netdev_upper_dev_unlink(lowerdev, dev);
 destroy_port:
        port->count -= 1;
        if (!port->count)
index ac22283aaf23213ab9bf9d931f76df602ccea09d..bc71947b1ec329f2eacd52b915b37ce56a4f16bb 100644 (file)
@@ -100,6 +100,45 @@ static void at803x_get_wol(struct phy_device *phydev,
                wol->wolopts |= WAKE_MAGIC;
 }
 
+static int at803x_suspend(struct phy_device *phydev)
+{
+       int value;
+       int wol_enabled;
+
+       mutex_lock(&phydev->lock);
+
+       value = phy_read(phydev, AT803X_INTR_ENABLE);
+       wol_enabled = value & AT803X_WOL_ENABLE;
+
+       value = phy_read(phydev, MII_BMCR);
+
+       if (wol_enabled)
+               value |= BMCR_ISOLATE;
+       else
+               value |= BMCR_PDOWN;
+
+       phy_write(phydev, MII_BMCR, value);
+
+       mutex_unlock(&phydev->lock);
+
+       return 0;
+}
+
+static int at803x_resume(struct phy_device *phydev)
+{
+       int value;
+
+       mutex_lock(&phydev->lock);
+
+       value = phy_read(phydev, MII_BMCR);
+       value &= ~(BMCR_PDOWN | BMCR_ISOLATE);
+       phy_write(phydev, MII_BMCR, value);
+
+       mutex_unlock(&phydev->lock);
+
+       return 0;
+}
+
 static int at803x_config_init(struct phy_device *phydev)
 {
        int val;
@@ -161,10 +200,12 @@ static struct phy_driver at803x_driver[] = {
        .config_init    = at803x_config_init,
        .set_wol        = at803x_set_wol,
        .get_wol        = at803x_get_wol,
+       .suspend        = at803x_suspend,
+       .resume         = at803x_resume,
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_INTERRUPT,
-       .config_aneg    = &genphy_config_aneg,
-       .read_status    = &genphy_read_status,
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
        .driver         = {
                .owner = THIS_MODULE,
        },
@@ -176,10 +217,12 @@ static struct phy_driver at803x_driver[] = {
        .config_init    = at803x_config_init,
        .set_wol        = at803x_set_wol,
        .get_wol        = at803x_get_wol,
+       .suspend        = at803x_suspend,
+       .resume         = at803x_resume,
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_INTERRUPT,
-       .config_aneg    = &genphy_config_aneg,
-       .read_status    = &genphy_read_status,
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
        .driver         = {
                .owner = THIS_MODULE,
        },
@@ -191,10 +234,12 @@ static struct phy_driver at803x_driver[] = {
        .config_init    = at803x_config_init,
        .set_wol        = at803x_set_wol,
        .get_wol        = at803x_get_wol,
+       .suspend        = at803x_suspend,
+       .resume         = at803x_resume,
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_INTERRUPT,
-       .config_aneg    = &genphy_config_aneg,
-       .read_status    = &genphy_read_status,
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
        .driver         = {
                .owner = THIS_MODULE,
        },
index 2e91477362d4d70b15df3db59ad41dec990a3c57..2e3c778ea9bf6f0437f95759f12c902a76cf9e7c 100644 (file)
@@ -34,9 +34,9 @@
 #include <linux/marvell_phy.h>
 #include <linux/of.h>
 
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 #define MII_MARVELL_PHY_PAGE           22
 
index c31aad0004cb5ed93453089114e9f7dc31894ab2..3ae28f420868fc35af3b3ed4e652f96e43a8f35b 100644 (file)
@@ -287,6 +287,8 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = ks8737_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE,},
 }, {
        .phy_id         = PHY_ID_KSZ8021,
@@ -300,6 +302,8 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE,},
 }, {
        .phy_id         = PHY_ID_KSZ8031,
@@ -313,6 +317,8 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE,},
 }, {
        .phy_id         = PHY_ID_KSZ8041,
@@ -326,6 +332,8 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE,},
 }, {
        .phy_id         = PHY_ID_KSZ8051,
@@ -339,6 +347,8 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE,},
 }, {
        .phy_id         = PHY_ID_KSZ8001,
@@ -351,6 +361,8 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE,},
 }, {
        .phy_id         = PHY_ID_KSZ8081,
@@ -363,6 +375,8 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE,},
 }, {
        .phy_id         = PHY_ID_KSZ8061,
@@ -375,6 +389,8 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = kszphy_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE,},
 }, {
        .phy_id         = PHY_ID_KSZ9021,
@@ -387,6 +403,8 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = ksz9021_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE, },
 }, {
        .phy_id         = PHY_ID_KSZ9031,
@@ -400,6 +418,8 @@ static struct phy_driver ksphy_driver[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = kszphy_ack_interrupt,
        .config_intr    = ksz9021_config_intr,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE, },
 }, {
        .phy_id         = PHY_ID_KSZ8873MLL,
@@ -410,6 +430,8 @@ static struct phy_driver ksphy_driver[] = {
        .config_init    = kszphy_config_init,
        .config_aneg    = ksz8873mll_config_aneg,
        .read_status    = ksz8873mll_read_status,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE, },
 }, {
        .phy_id         = PHY_ID_KSZ886X,
@@ -420,6 +442,8 @@ static struct phy_driver ksphy_driver[] = {
        .config_init    = kszphy_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
+       .suspend        = genphy_suspend,
+       .resume         = genphy_resume,
        .driver         = { .owner = THIS_MODULE, },
 } };
 
index 1f7bef90b46757a6ce2485c9aefd5d197464b7b1..7b4ff35c8bf7dcb28b455fb3a578eacddb298900 100644 (file)
@@ -1002,7 +1002,7 @@ plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
                /* Any address will do - we take the first */
                const struct in_ifaddr *ifa = in_dev->ifa_list;
                if (ifa) {
-                       memcpy(eth->h_source, dev->dev_addr, 6);
+                       memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
                        memset(eth->h_dest, 0xfc, 2);
                        memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
                }
index 807815fc996839d14efd18625fb300a2f48d2577..7cb105c103fe9408eb7c02b96dac4f4bfb702456 100644 (file)
@@ -1293,7 +1293,8 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
        if (unlikely(!noblock))
                add_wait_queue(&tfile->wq.wait, &wait);
        while (len) {
-               current->state = TASK_INTERRUPTIBLE;
+               if (unlikely(!noblock))
+                       current->state = TASK_INTERRUPTIBLE;
 
                /* Read frames from the queue */
                if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
@@ -1320,9 +1321,10 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
                break;
        }
 
-       current->state = TASK_RUNNING;
-       if (unlikely(!noblock))
+       if (unlikely(!noblock)) {
+               current->state = TASK_RUNNING;
                remove_wait_queue(&tfile->wq.wait, &wait);
+       }
 
        return ret;
 }
index 3569293df8726df8c87786ad83b5be506e6b3285..846cc19c04f23f2f5a15adf65d8098cc36513eb6 100644 (file)
@@ -36,8 +36,8 @@
 #define AX_RXHDR_L4_TYPE_TCP                   16
 #define AX_RXHDR_L3CSUM_ERR                    2
 #define AX_RXHDR_L4CSUM_ERR                    1
-#define AX_RXHDR_CRC_ERR                       ((u32)BIT(31))
-#define AX_RXHDR_DROP_ERR                      ((u32)BIT(30))
+#define AX_RXHDR_CRC_ERR                       ((u32)BIT(29))
+#define AX_RXHDR_DROP_ERR                      ((u32)BIT(31))
 #define AX_ACCESS_MAC                          0x01
 #define AX_ACCESS_PHY                          0x02
 #define AX_ACCESS_EEPROM                       0x04
@@ -1406,6 +1406,19 @@ static const struct driver_info sitecom_info = {
        .tx_fixup = ax88179_tx_fixup,
 };
 
+static const struct driver_info samsung_info = {
+       .description = "Samsung USB Ethernet Adapter",
+       .bind = ax88179_bind,
+       .unbind = ax88179_unbind,
+       .status = ax88179_status,
+       .link_reset = ax88179_link_reset,
+       .reset = ax88179_reset,
+       .stop = ax88179_stop,
+       .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+       .rx_fixup = ax88179_rx_fixup,
+       .tx_fixup = ax88179_tx_fixup,
+};
+
 static const struct usb_device_id products[] = {
 {
        /* ASIX AX88179 10/100/1000 */
@@ -1418,7 +1431,11 @@ static const struct usb_device_id products[] = {
 }, {
        /* Sitecom USB 3.0 to Gigabit Adapter */
        USB_DEVICE(0x0df6, 0x0072),
-       .driver_info = (unsigned long) &sitecom_info,
+       .driver_info = (unsigned long)&sitecom_info,
+}, {
+       /* Samsung USB Ethernet Adapter */
+       USB_DEVICE(0x04e8, 0xa100),
+       .driver_info = (unsigned long)&samsung_info,
 },
        { },
 };
index 8d5cac2d8e33bee6b2545d45e83d24f52f8ac4c3..df507e6dbb9c99d9a15f718128e008f06ba11cdb 100644 (file)
@@ -640,10 +640,10 @@ static void catc_set_multicast_list(struct net_device *netdev)
 {
        struct catc *catc = netdev_priv(netdev);
        struct netdev_hw_addr *ha;
-       u8 broadcast[6];
+       u8 broadcast[ETH_ALEN];
        u8 rx = RxEnable | RxPolarity | RxMultiCast;
 
-       memset(broadcast, 0xff, 6);
+       memset(broadcast, 0xff, ETH_ALEN);
        memset(catc->multicast, 0, 64);
 
        catc_multicast(broadcast, catc->multicast);
@@ -778,7 +778,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
        struct usb_device *usbdev = interface_to_usbdev(intf);
        struct net_device *netdev;
        struct catc *catc;
-       u8 broadcast[6];
+       u8 broadcast[ETH_ALEN];
        int i, pktsz;
 
        if (usb_set_interface(usbdev,
@@ -882,7 +882,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
                
                dev_dbg(dev, "Filling the multicast list.\n");
          
-               memset(broadcast, 0xff, 6);
+               memset(broadcast, 0xff, ETH_ALEN);
                catc_multicast(broadcast, catc->multicast);
                catc_multicast(netdev->dev_addr, catc->multicast);
                catc_write_mem(catc, 0xfa80, catc->multicast, 64);
index 7d78669000d704d8448aea5e24a8a73c312badfc..6358d420e185b4d4cb7b64a0c2a459fc5b70ed7f 100644 (file)
@@ -328,7 +328,7 @@ MODULE_DEVICE_TABLE(usb, usbpn_ids);
 
 static struct usb_driver usbpn_driver;
 
-int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
+static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
 {
        static const char ifname[] = "usbpn%d";
        const struct usb_cdc_union_desc *union_header = NULL;
index 3d6aaf79d8b2399565b4e9c1a64edd4fe27b90af..e0a4a2b08e4526a14fc20e64346af85e88623ed2 100644 (file)
@@ -143,16 +143,22 @@ static const struct net_device_ops qmi_wwan_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
 };
 
-/* using a counter to merge subdriver requests with our own into a combined state */
+/* using a counter to merge subdriver requests with our own into a
+ * combined state
+ */
 static int qmi_wwan_manage_power(struct usbnet *dev, int on)
 {
        struct qmi_wwan_state *info = (void *)&dev->data;
        int rv = 0;
 
-       dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on);
+       dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__,
+               atomic_read(&info->pmcount), on);
 
-       if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
-               /* need autopm_get/put here to ensure the usbcore sees the new value */
+       if ((on && atomic_add_return(1, &info->pmcount) == 1) ||
+           (!on && atomic_dec_and_test(&info->pmcount))) {
+               /* need autopm_get/put here to ensure the usbcore sees
+                * the new value
+                */
                rv = usb_autopm_get_interface(dev->intf);
                if (rv < 0)
                        goto err;
@@ -199,7 +205,8 @@ static int qmi_wwan_register_subdriver(struct usbnet *dev)
        atomic_set(&info->pmcount, 0);
 
        /* register subdriver */
-       subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 4096, &qmi_wwan_cdc_wdm_manage_power);
+       subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc,
+                                        4096, &qmi_wwan_cdc_wdm_manage_power);
        if (IS_ERR(subdriver)) {
                dev_err(&info->control->dev, "subdriver registration failed\n");
                rv = PTR_ERR(subdriver);
@@ -228,7 +235,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
        struct usb_driver *driver = driver_of(intf);
        struct qmi_wwan_state *info = (void *)&dev->data;
 
-       BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
+       BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) <
+                     sizeof(struct qmi_wwan_state)));
 
        /* set up initial state */
        info->control = intf;
@@ -250,7 +258,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
                                goto err;
                        }
                        if (h->bLength != sizeof(struct usb_cdc_header_desc)) {
-                               dev_dbg(&intf->dev, "CDC header len %u\n", h->bLength);
+                               dev_dbg(&intf->dev, "CDC header len %u\n",
+                                       h->bLength);
                                goto err;
                        }
                        break;
@@ -260,7 +269,8 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
                                goto err;
                        }
                        if (h->bLength != sizeof(struct usb_cdc_union_desc)) {
-                               dev_dbg(&intf->dev, "CDC union len %u\n", h->bLength);
+                               dev_dbg(&intf->dev, "CDC union len %u\n",
+                                       h->bLength);
                                goto err;
                        }
                        cdc_union = (struct usb_cdc_union_desc *)buf;
@@ -271,15 +281,15 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
                                goto err;
                        }
                        if (h->bLength != sizeof(struct usb_cdc_ether_desc)) {
-                               dev_dbg(&intf->dev, "CDC ether len %u\n",  h->bLength);
+                               dev_dbg(&intf->dev, "CDC ether len %u\n",
+                                       h->bLength);
                                goto err;
                        }
                        cdc_ether = (struct usb_cdc_ether_desc *)buf;
                        break;
                }
 
-               /*
-                * Remember which CDC functional descriptors we've seen.  Works
+               /* Remember which CDC functional descriptors we've seen.  Works
                 * for all types we care about, of which USB_CDC_ETHERNET_TYPE
                 * (0x0f) is the highest numbered
                 */
@@ -293,10 +303,14 @@ next_desc:
 
        /* Use separate control and data interfaces if we found a CDC Union */
        if (cdc_union) {
-               info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
-               if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 || !info->data) {
-                       dev_err(&intf->dev, "bogus CDC Union: master=%u, slave=%u\n",
-                               cdc_union->bMasterInterface0, cdc_union->bSlaveInterface0);
+               info->data = usb_ifnum_to_if(dev->udev,
+                                            cdc_union->bSlaveInterface0);
+               if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 ||
+                   !info->data) {
+                       dev_err(&intf->dev,
+                               "bogus CDC Union: master=%u, slave=%u\n",
+                               cdc_union->bMasterInterface0,
+                               cdc_union->bSlaveInterface0);
                        goto err;
                }
        }
@@ -374,8 +388,7 @@ static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
        struct qmi_wwan_state *info = (void *)&dev->data;
        int ret;
 
-       /*
-        * Both usbnet_suspend() and subdriver->suspend() MUST return 0
+       /* Both usbnet_suspend() and subdriver->suspend() MUST return 0
         * in system sleep context, otherwise, the resume callback has
         * to recover device from previous suspend failure.
         */
@@ -383,7 +396,8 @@ static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
        if (ret < 0)
                goto err;
 
-       if (intf == info->control && info->subdriver && info->subdriver->suspend)
+       if (intf == info->control && info->subdriver &&
+           info->subdriver->suspend)
                ret = info->subdriver->suspend(intf, message);
        if (ret < 0)
                usbnet_resume(intf);
@@ -396,7 +410,8 @@ static int qmi_wwan_resume(struct usb_interface *intf)
        struct usbnet *dev = usb_get_intfdata(intf);
        struct qmi_wwan_state *info = (void *)&dev->data;
        int ret = 0;
-       bool callsub = (intf == info->control && info->subdriver && info->subdriver->resume);
+       bool callsub = (intf == info->control && info->subdriver &&
+                       info->subdriver->resume);
 
        if (callsub)
                ret = info->subdriver->resume(intf);
@@ -714,6 +729,8 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
        {QMI_FIXED_INTF(0x2357, 0x9000, 4)},    /* TP-LINK MA260 */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
+       {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)},    /* Telit LE920 */
+       {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)},    /* Olivetti Olicard 200 */
        {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)},    /* Cinterion PLxx */
 
        /* 4. Gobi 1000 devices */
@@ -776,7 +793,8 @@ static const struct usb_device_id products[] = {
 };
 MODULE_DEVICE_TABLE(usb, products);
 
-static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id *prod)
+static int qmi_wwan_probe(struct usb_interface *intf,
+                         const struct usb_device_id *prod)
 {
        struct usb_device_id *id = (struct usb_device_id *)prod;
 
index bf94e10a37c8e0121d783fc54c1b565b57a7ce21..90a429b7ebad8497d317639389c041d534366255 100644 (file)
@@ -1688,8 +1688,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
        if (dev->can_dma_sg && !(info->flags & FLAG_SEND_ZLP) &&
                !(info->flags & FLAG_MULTI_PACKET)) {
                dev->padding_pkt = kzalloc(1, GFP_KERNEL);
-               if (!dev->padding_pkt)
+               if (!dev->padding_pkt) {
+                       status = -ENOMEM;
                        goto out4;
+               }
        }
 
        status = register_netdev (net);
index eee1f19ef1e9397469e343133490ec6972b4bbdc..b2d034791e1551ab07d7782f5019a0ee4fb86052 100644 (file)
@@ -188,6 +188,11 @@ static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
        return tot;
 }
 
+/* fake multicast ability */
+static void veth_set_multicast_list(struct net_device *dev)
+{
+}
+
 static int veth_open(struct net_device *dev)
 {
        struct veth_priv *priv = netdev_priv(dev);
@@ -250,6 +255,7 @@ static const struct net_device_ops veth_netdev_ops = {
        .ndo_start_xmit      = veth_xmit,
        .ndo_change_mtu      = veth_change_mtu,
        .ndo_get_stats64     = veth_get_stats64,
+       .ndo_set_rx_mode     = veth_set_multicast_list,
        .ndo_set_mac_address = eth_mac_addr,
 };
 
index defec2b3c5a408ff035889d015d717f040e2864c..9b3481ed1cf38ce23cb048fbbcd6f51c812e567a 100644 (file)
@@ -852,8 +852,13 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
                        return -EINVAL;
                }
        } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
-               vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
-                                 addr->sa_data, dev->addr_len);
+               unsigned int i;
+
+               /* Naturally, this has an atomicity problem. */
+               for (i = 0; i < dev->addr_len; i++)
+                       virtio_cwrite8(vdev,
+                                      offsetof(struct virtio_net_config, mac) +
+                                      i, addr->sa_data[i]);
        }
 
        eth_commit_mac_addr_change(dev, p);
@@ -938,7 +943,9 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
                return -EINVAL;
        } else {
                vi->curr_queue_pairs = queue_pairs;
-               schedule_delayed_work(&vi->refill, 0);
+               /* virtnet_open() will refill when device is going to up. */
+               if (dev->flags & IFF_UP)
+                       schedule_delayed_work(&vi->refill, 0);
        }
 
        return 0;
@@ -1116,6 +1123,11 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
 {
        struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
 
+       mutex_lock(&vi->config_lock);
+
+       if (!vi->config_enable)
+               goto done;
+
        switch(action & ~CPU_TASKS_FROZEN) {
        case CPU_ONLINE:
        case CPU_DOWN_FAILED:
@@ -1128,6 +1140,9 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
        default:
                break;
        }
+
+done:
+       mutex_unlock(&vi->config_lock);
        return NOTIFY_OK;
 }
 
@@ -1266,9 +1281,8 @@ static void virtnet_config_changed_work(struct work_struct *work)
        if (!vi->config_enable)
                goto done;
 
-       if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
-                             offsetof(struct virtio_net_config, status),
-                             &v) < 0)
+       if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
+                                struct virtio_net_config, status, &v) < 0)
                goto done;
 
        if (v & VIRTIO_NET_S_ANNOUNCE) {
@@ -1490,9 +1504,9 @@ static int virtnet_probe(struct virtio_device *vdev)
        u16 max_queue_pairs;
 
        /* Find if host supports multiqueue virtio_net device */
-       err = virtio_config_val(vdev, VIRTIO_NET_F_MQ,
-                               offsetof(struct virtio_net_config,
-                               max_virtqueue_pairs), &max_queue_pairs);
+       err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
+                                  struct virtio_net_config,
+                                  max_virtqueue_pairs, &max_queue_pairs);
 
        /* We need at least 2 queue's */
        if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
@@ -1544,9 +1558,11 @@ static int virtnet_probe(struct virtio_device *vdev)
        dev->vlan_features = dev->features;
 
        /* Configuration may specify what MAC to use.  Otherwise random. */
-       if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC,
-                                 offsetof(struct virtio_net_config, mac),
-                                 dev->dev_addr, dev->addr_len) < 0)
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
+               virtio_cread_bytes(vdev,
+                                  offsetof(struct virtio_net_config, mac),
+                                  dev->dev_addr, dev->addr_len);
+       else
                eth_hw_addr_random(dev);
 
        /* Set up our device-specific information */
@@ -1683,7 +1699,7 @@ static void virtnet_remove(struct virtio_device *vdev)
        free_netdev(vi->dev);
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int virtnet_freeze(struct virtio_device *vdev)
 {
        struct virtnet_info *vi = vdev->priv;
@@ -1733,7 +1749,9 @@ static int virtnet_restore(struct virtio_device *vdev)
        vi->config_enable = true;
        mutex_unlock(&vi->config_lock);
 
+       rtnl_lock();
        virtnet_set_queues(vi, vi->curr_queue_pairs);
+       rtnl_unlock();
 
        return 0;
 }
@@ -1766,7 +1784,7 @@ static struct virtio_driver virtio_net_driver = {
        .probe =        virtnet_probe,
        .remove =       virtnet_remove,
        .config_changed = virtnet_config_changed,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
        .freeze =       virtnet_freeze,
        .restore =      virtnet_restore,
 #endif
index a03f358fd58b9a44fc480ea33c67adf28e3cf0ef..12040a35d95d17223e5200b0a2716e288f31163d 100644 (file)
@@ -410,9 +410,9 @@ int
 vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
                      u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
 
-extern void vmxnet3_set_ethtool_ops(struct net_device *netdev);
+void vmxnet3_set_ethtool_ops(struct net_device *netdev);
 
-extern struct rtnl_link_stats64 *
+struct rtnl_link_stats64 *
 vmxnet3_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
 
 extern char vmxnet3_driver_name[];
index 2ef5b6219f3f46db28cb1256ce6eaa8fffc0bb3c..da8479479d01cfa9a01b17317ec24f2f4526c592 100644 (file)
@@ -2087,7 +2087,7 @@ static void vxlan_setup(struct net_device *dev)
        vxlan->age_timer.function = vxlan_cleanup;
        vxlan->age_timer.data = (unsigned long) vxlan;
 
-       inet_get_local_port_range(&low, &high);
+       inet_get_local_port_range(dev_net(dev), &low, &high);
        vxlan->port_min = low;
        vxlan->port_max = high;
        vxlan->dst_port = htons(vxlan_port);
index 3f0c4f268751030318dd920a2a81bf2ddd1d328d..bcfff0d62de4f2070d5ac644a3becfedb74e3462 100644 (file)
@@ -1972,6 +1972,7 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
        }
 
        i = port->index;
+       memset(&sync, 0, sizeof(sync));
        sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed);
        /* Lucky card and linux use same encoding here */
        sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
index 3d80e4267de83f3fae3e3aedf611ad1b3dbd3e2a..3d741663fd677a42eb2a3823e54412cf8afe7ee0 100644 (file)
@@ -220,7 +220,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
        /* We want a fast IRQ for this device. Actually we'd like an even faster
           IRQ ;) - This is one driver RtLinux is made for */
 
-       if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
+       if (request_irq(irq, z8530_interrupt, 0,
                        "Hostess SV11", sv) < 0) {
                pr_warn("IRQ %d already in use\n", irq);
                goto err_irq;
index 4f7748478984750fa4a9872341f3db5f36d802a8..27860b4f59081a46f71ad4f5baa2f7605ef8e17c 100644 (file)
@@ -266,7 +266,7 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
        /* We want a fast IRQ for this device. Actually we'd like an even faster
           IRQ ;) - This is one driver RtLinux is made for */
 
-       if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
+       if (request_irq(irq, z8530_interrupt, 0,
                        "SeaLevel", dev) < 0) {
                pr_warn("IRQ %d already in use\n", irq);
                goto err_request_irq;
index 6a24a5a70cc7d4459e04e0348882f0324dd4fad3..4c0a69779b8980a16ccca5a90acc5ece605cf06a 100644 (file)
@@ -355,6 +355,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                        ifr->ifr_settings.size = size; /* data size wanted */
                        return -ENOBUFS;
                }
+               memset(&line, 0, sizeof(line));
                line.clock_type = get_status(port)->clocking;
                line.clock_rate = 0;
                line.loopback = 0;
index 8f0fc2e57e2be06fce081c6f835bb0ea72243859..f57ee67836aea02fc58ff782e38a1b8e3302c94d 100644 (file)
@@ -41,6 +41,6 @@ struct x25_asy {
 
 #define X25_ASY_MAGIC 0x5303
 
-extern int x25_asy_init(struct net_device *dev);
+int x25_asy_init(struct net_device *dev);
 
 #endif /* _LINUX_X25_ASY.H */
index f29d554fc07d4f893c0857593f0b108fa9b4a614..2416a9d60bd69e4781efcede38e93af611264e00 100644 (file)
@@ -395,20 +395,19 @@ struct z8530_dev
 extern u8 z8530_dead_port[];
 extern u8 z8530_hdlc_kilostream_85230[];
 extern u8 z8530_hdlc_kilostream[];
-extern irqreturn_t z8530_interrupt(int, void *);
-extern void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
-extern int z8530_init(struct z8530_dev *);
-extern int z8530_shutdown(struct z8530_dev *);
-extern int z8530_sync_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_close(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
-extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
-extern int z8530_channel_load(struct z8530_channel *, u8 *);
-extern netdev_tx_t z8530_queue_xmit(struct z8530_channel *c,
-                                         struct sk_buff *skb);
-extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
+irqreturn_t z8530_interrupt(int, void *);
+void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io);
+int z8530_init(struct z8530_dev *);
+int z8530_shutdown(struct z8530_dev *);
+int z8530_sync_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_close(struct net_device *, struct z8530_channel *);
+int z8530_sync_dma_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_dma_close(struct net_device *, struct z8530_channel *);
+int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *);
+int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *);
+int z8530_channel_load(struct z8530_channel *, u8 *);
+netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb);
+void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb);
 
 
 /*
index 9f1e947f3557823bc228549a2fd9b0636fed55b2..649ecad6844c73baa58a9e8b0e1850b9fdb88b36 100644 (file)
@@ -256,21 +256,20 @@ void i2400mu_init(struct i2400mu *i2400mu)
        i2400mu->rx_size_auto_shrink = 1;
 }
 
-extern int i2400mu_notification_setup(struct i2400mu *);
-extern void i2400mu_notification_release(struct i2400mu *);
+int i2400mu_notification_setup(struct i2400mu *);
+void i2400mu_notification_release(struct i2400mu *);
 
-extern int i2400mu_rx_setup(struct i2400mu *);
-extern void i2400mu_rx_release(struct i2400mu *);
-extern void i2400mu_rx_kick(struct i2400mu *);
+int i2400mu_rx_setup(struct i2400mu *);
+void i2400mu_rx_release(struct i2400mu *);
+void i2400mu_rx_kick(struct i2400mu *);
 
-extern int i2400mu_tx_setup(struct i2400mu *);
-extern void i2400mu_tx_release(struct i2400mu *);
-extern void i2400mu_bus_tx_kick(struct i2400m *);
+int i2400mu_tx_setup(struct i2400mu *);
+void i2400mu_tx_release(struct i2400mu *);
+void i2400mu_bus_tx_kick(struct i2400m *);
 
-extern ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *,
-                                      const struct i2400m_bootrom_header *,
-                                      size_t, int);
-extern ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *,
-                                          struct i2400m_bootrom_header *,
-                                          size_t);
+ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *,
+                               const struct i2400m_bootrom_header *, size_t,
+                               int);
+ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *,
+                                   struct i2400m_bootrom_header *, size_t);
 #endif /* #ifndef __I2400M_USB_H__ */
index 79c6505b5c2030c0f3b0813b97aef5fdc8780278..5a34e72bab9afb42d670f7ea72286c9e191a31e2 100644 (file)
@@ -710,18 +710,18 @@ enum i2400m_bri {
        I2400M_BRI_MAC_REINIT = 1 << 3,
 };
 
-extern void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
-extern int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
-extern int i2400m_read_mac_addr(struct i2400m *);
-extern int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
-extern int i2400m_is_boot_barker(struct i2400m *, const void *, size_t);
+void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
+int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
+int i2400m_read_mac_addr(struct i2400m *);
+int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
+int i2400m_is_boot_barker(struct i2400m *, const void *, size_t);
 static inline
 int i2400m_is_d2h_barker(const void *buf)
 {
        const __le32 *barker = buf;
        return le32_to_cpu(*barker) == I2400M_D2H_MSG_BARKER;
 }
-extern void i2400m_unknown_barker(struct i2400m *, const void *, size_t);
+void i2400m_unknown_barker(struct i2400m *, const void *, size_t);
 
 /* Make/grok boot-rom header commands */
 
@@ -789,32 +789,31 @@ unsigned i2400m_brh_get_signature(const struct i2400m_bootrom_header *hdr)
 /*
  * Driver / device setup and internal functions
  */
-extern void i2400m_init(struct i2400m *);
-extern int i2400m_reset(struct i2400m *, enum i2400m_reset_type);
-extern void i2400m_netdev_setup(struct net_device *net_dev);
-extern int i2400m_sysfs_setup(struct device_driver *);
-extern void i2400m_sysfs_release(struct device_driver *);
-extern int i2400m_tx_setup(struct i2400m *);
-extern void i2400m_wake_tx_work(struct work_struct *);
-extern void i2400m_tx_release(struct i2400m *);
-
-extern int i2400m_rx_setup(struct i2400m *);
-extern void i2400m_rx_release(struct i2400m *);
-
-extern void i2400m_fw_cache(struct i2400m *);
-extern void i2400m_fw_uncache(struct i2400m *);
-
-extern void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned,
-                         const void *, int);
-extern void i2400m_net_erx(struct i2400m *, struct sk_buff *,
-                          enum i2400m_cs);
-extern void i2400m_net_wake_stop(struct i2400m *);
+void i2400m_init(struct i2400m *);
+int i2400m_reset(struct i2400m *, enum i2400m_reset_type);
+void i2400m_netdev_setup(struct net_device *net_dev);
+int i2400m_sysfs_setup(struct device_driver *);
+void i2400m_sysfs_release(struct device_driver *);
+int i2400m_tx_setup(struct i2400m *);
+void i2400m_wake_tx_work(struct work_struct *);
+void i2400m_tx_release(struct i2400m *);
+
+int i2400m_rx_setup(struct i2400m *);
+void i2400m_rx_release(struct i2400m *);
+
+void i2400m_fw_cache(struct i2400m *);
+void i2400m_fw_uncache(struct i2400m *);
+
+void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned, const void *,
+                  int);
+void i2400m_net_erx(struct i2400m *, struct sk_buff *, enum i2400m_cs);
+void i2400m_net_wake_stop(struct i2400m *);
 enum i2400m_pt;
-extern int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
+int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
 
 #ifdef CONFIG_DEBUG_FS
-extern int i2400m_debugfs_add(struct i2400m *);
-extern void i2400m_debugfs_rm(struct i2400m *);
+int i2400m_debugfs_add(struct i2400m *);
+void i2400m_debugfs_rm(struct i2400m *);
 #else
 static inline int i2400m_debugfs_add(struct i2400m *i2400m)
 {
@@ -824,8 +823,8 @@ static inline void i2400m_debugfs_rm(struct i2400m *i2400m) {}
 #endif
 
 /* Initialize/shutdown the device */
-extern int i2400m_dev_initialize(struct i2400m *);
-extern void i2400m_dev_shutdown(struct i2400m *);
+int i2400m_dev_initialize(struct i2400m *);
+void i2400m_dev_shutdown(struct i2400m *);
 
 extern struct attribute_group i2400m_dev_attr_group;
 
@@ -873,21 +872,21 @@ void i2400m_put(struct i2400m *i2400m)
        dev_put(i2400m->wimax_dev.net_dev);
 }
 
-extern int i2400m_dev_reset_handle(struct i2400m *, const char *);
-extern int i2400m_pre_reset(struct i2400m *);
-extern int i2400m_post_reset(struct i2400m *);
-extern void i2400m_error_recovery(struct i2400m *);
+int i2400m_dev_reset_handle(struct i2400m *, const char *);
+int i2400m_pre_reset(struct i2400m *);
+int i2400m_post_reset(struct i2400m *);
+void i2400m_error_recovery(struct i2400m *);
 
 /*
  * _setup()/_release() are called by the probe/disconnect functions of
  * the bus-specific drivers.
  */
-extern int i2400m_setup(struct i2400m *, enum i2400m_bri bm_flags);
-extern void i2400m_release(struct i2400m *);
+int i2400m_setup(struct i2400m *, enum i2400m_bri bm_flags);
+void i2400m_release(struct i2400m *);
 
-extern int i2400m_rx(struct i2400m *, struct sk_buff *);
-extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
-extern void i2400m_tx_msg_sent(struct i2400m *);
+int i2400m_rx(struct i2400m *, struct sk_buff *);
+struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
+void i2400m_tx_msg_sent(struct i2400m *);
 
 
 /*
@@ -900,20 +899,19 @@ struct device *i2400m_dev(struct i2400m *i2400m)
        return i2400m->wimax_dev.net_dev->dev.parent;
 }
 
-extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *,
-                                  char *, size_t);
-extern int i2400m_msg_size_check(struct i2400m *,
-                                const struct i2400m_l3l4_hdr *, size_t);
-extern struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t);
-extern void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int);
-extern void i2400m_report_hook(struct i2400m *,
-                              const struct i2400m_l3l4_hdr *, size_t);
-extern void i2400m_report_hook_work(struct work_struct *);
-extern int i2400m_cmd_enter_powersave(struct i2400m *);
-extern int i2400m_cmd_exit_idle(struct i2400m *);
-extern struct sk_buff *i2400m_get_device_info(struct i2400m *);
-extern int i2400m_firmware_check(struct i2400m *);
-extern int i2400m_set_idle_timeout(struct i2400m *, unsigned);
+int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *, char *, size_t);
+int i2400m_msg_size_check(struct i2400m *, const struct i2400m_l3l4_hdr *,
+                         size_t);
+struct sk_buff *i2400m_msg_to_dev(struct i2400m *, const void *, size_t);
+void i2400m_msg_to_dev_cancel_wait(struct i2400m *, int);
+void i2400m_report_hook(struct i2400m *, const struct i2400m_l3l4_hdr *,
+                       size_t);
+void i2400m_report_hook_work(struct work_struct *);
+int i2400m_cmd_enter_powersave(struct i2400m *);
+int i2400m_cmd_exit_idle(struct i2400m *);
+struct sk_buff *i2400m_get_device_info(struct i2400m *);
+int i2400m_firmware_check(struct i2400m *);
+int i2400m_set_idle_timeout(struct i2400m *, unsigned);
 
 static inline
 struct usb_endpoint_descriptor *usb_get_epd(struct usb_interface *iface, int ep)
@@ -921,10 +919,9 @@ struct usb_endpoint_descriptor *usb_get_epd(struct usb_interface *iface, int ep)
        return &iface->cur_altsetting->endpoint[ep].desc;
 }
 
-extern int i2400m_op_rfkill_sw_toggle(struct wimax_dev *,
-                                     enum wimax_rf_state);
-extern void i2400m_report_tlv_rf_switches_status(
-       struct i2400m *, const struct i2400m_tlv_rf_switches_status *);
+int i2400m_op_rfkill_sw_toggle(struct wimax_dev *, enum wimax_rf_state);
+void i2400m_report_tlv_rf_switches_status(struct i2400m *,
+                                         const struct i2400m_tlv_rf_switches_status *);
 
 /*
  * Helpers for firmware backwards compatibility
@@ -968,8 +965,8 @@ void __i2400m_msleep(unsigned ms)
 
 
 /* module initialization helpers */
-extern int i2400m_barker_db_init(const char *);
-extern void i2400m_barker_db_exit(void);
+int i2400m_barker_db_init(const char *);
+void i2400m_barker_db_exit(void);
 
 
 
index f9a24e599dee4c7acf9b8a5bf0a0caa2306d01e5..cfce83e1f273f0259ee07d072fd8c8f59df8e99a 100644 (file)
@@ -1924,7 +1924,6 @@ static int adm8211_probe(struct pci_dev *pdev,
        pci_iounmap(pdev, priv->map);
 
  err_free_dev:
-       pci_set_drvdata(pdev, NULL);
        ieee80211_free_hw(dev);
 
  err_free_reg:
index 7fe19648f10e8f1e966b857c12077f34025dc8b5..edf4b57c4aaa306ebbf12beea5100d6f6a7f5e2e 100644 (file)
@@ -5570,7 +5570,6 @@ static void airo_pci_remove(struct pci_dev *pdev)
        airo_print_info(dev->name, "Unregistering...");
        stop_airo_card(dev, 1);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
index 1abf1d421173085e72a4bfa549fd147f83a87ca5..c63d1159db5ca939718f4c34385347865b53ef67 100644 (file)
@@ -25,6 +25,23 @@ config ATH_DEBUG
          Say Y, if you want to debug atheros wireless drivers.
          Right now only ath9k makes use of this.
 
+config ATH_REG_DYNAMIC_USER_REG_HINTS
+       bool "Atheros dynamic user regulatory hints"
+       depends on CFG80211_CERTIFICATION_ONUS
+       default n
+       ---help---
+         Say N. This should only be enabled in countries where
+         this feature is explicitly allowed and only on cards that
+         specifically have been tested for this.
+
+config ATH_REG_DYNAMIC_USER_CERT_TESTING
+       bool "Atheros dynamic user regulatory testing"
+       depends on ATH_REG_DYNAMIC_USER_REG_HINTS && CFG80211_CERTIFICATION_ONUS
+       default n
+       ---help---
+         Say N. This should only be enabled on systems
+         undergoing certification testing.
+
 source "drivers/net/wireless/ath/ath5k/Kconfig"
 source "drivers/net/wireless/ath/ath9k/Kconfig"
 source "drivers/net/wireless/ath/carl9170/Kconfig"
@@ -32,5 +49,6 @@ source "drivers/net/wireless/ath/ath6kl/Kconfig"
 source "drivers/net/wireless/ath/ar5523/Kconfig"
 source "drivers/net/wireless/ath/wil6210/Kconfig"
 source "drivers/net/wireless/ath/ath10k/Kconfig"
+source "drivers/net/wireless/ath/wcn36xx/Kconfig"
 
 endif
index fb05cfd193616ea65cecbfc489653409b173e617..7d023b0f13b47d064cd2db064c58690e2da8e1ea 100644 (file)
@@ -5,13 +5,16 @@ obj-$(CONFIG_ATH6KL)          += ath6kl/
 obj-$(CONFIG_AR5523)           += ar5523/
 obj-$(CONFIG_WIL6210)          += wil6210/
 obj-$(CONFIG_ATH10K)           += ath10k/
+obj-$(CONFIG_WCN36XX)          += wcn36xx/
 
 obj-$(CONFIG_ATH_COMMON)       += ath.o
 
 ath-objs :=    main.o \
                regd.o \
                hw.o \
-               key.o
+               key.o \
+               dfs_pattern_detector.o \
+               dfs_pri_detector.o
 
 ath-$(CONFIG_ATH_DEBUG) += debug.o
 ccflags-y += -D__CHECK_ENDIAN__
index 17d7fece35d2c203eedef5fed0725074a0067cf7..280fc3d53a36466ca8a3f4ff7fd73074efc86b75 100644 (file)
@@ -1762,6 +1762,7 @@ static struct usb_device_id ar5523_id_table[] = {
        AR5523_DEVICE_UX(0x2001, 0x3a00),       /* Dlink / DWLAG132 */
        AR5523_DEVICE_UG(0x2001, 0x3a02),       /* Dlink / DWLG132 */
        AR5523_DEVICE_UX(0x2001, 0x3a04),       /* Dlink / DWLAG122 */
+       AR5523_DEVICE_UG(0x07d1, 0x3a07),       /* D-Link / WUA-2340 rev A1 */
        AR5523_DEVICE_UG(0x1690, 0x0712),       /* Gigaset / AR5523 */
        AR5523_DEVICE_UG(0x1690, 0x0710),       /* Gigaset / SMCWUSBTG */
        AR5523_DEVICE_UG(0x129b, 0x160c),       /* Gigaset / USB stick 108
index 744da6d1c405d91a645428d0f686c3c9cb4f00f0..a1f0996288508e3cad8ecd8e03f33153980e593e 100644 (file)
@@ -22,7 +22,8 @@
 
 void ath10k_bmi_start(struct ath10k *ar)
 {
-       ath10k_dbg(ATH10K_DBG_CORE, "BMI started\n");
+       ath10k_dbg(ATH10K_DBG_BMI, "bmi start\n");
+
        ar->bmi.done_sent = false;
 }
 
@@ -32,8 +33,10 @@ int ath10k_bmi_done(struct ath10k *ar)
        u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
        int ret;
 
+       ath10k_dbg(ATH10K_DBG_BMI, "bmi done\n");
+
        if (ar->bmi.done_sent) {
-               ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__);
+               ath10k_dbg(ATH10K_DBG_BMI, "bmi skipped\n");
                return 0;
        }
 
@@ -46,7 +49,6 @@ int ath10k_bmi_done(struct ath10k *ar)
                return ret;
        }
 
-       ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n");
        return 0;
 }
 
@@ -59,6 +61,8 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
        u32 resplen = sizeof(resp.get_target_info);
        int ret;
 
+       ath10k_dbg(ATH10K_DBG_BMI, "bmi get target info\n");
+
        if (ar->bmi.done_sent) {
                ath10k_warn("BMI Get Target Info Command disallowed\n");
                return -EBUSY;
@@ -80,6 +84,7 @@ int ath10k_bmi_get_target_info(struct ath10k *ar,
 
        target_info->version = __le32_to_cpu(resp.get_target_info.version);
        target_info->type    = __le32_to_cpu(resp.get_target_info.type);
+
        return 0;
 }
 
@@ -92,15 +97,14 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
        u32 rxlen;
        int ret;
 
+       ath10k_dbg(ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
+                  address, length);
+
        if (ar->bmi.done_sent) {
                ath10k_warn("command disallowed\n");
                return -EBUSY;
        }
 
-       ath10k_dbg(ATH10K_DBG_CORE,
-                  "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
-                  __func__, ar, address, length);
-
        while (length) {
                rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
 
@@ -133,15 +137,14 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
        u32 txlen;
        int ret;
 
+       ath10k_dbg(ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
+                  address, length);
+
        if (ar->bmi.done_sent) {
                ath10k_warn("command disallowed\n");
                return -EBUSY;
        }
 
-       ath10k_dbg(ATH10K_DBG_CORE,
-                  "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
-                  __func__, ar, address, length);
-
        while (length) {
                txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
 
@@ -180,15 +183,14 @@ int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
        u32 resplen = sizeof(resp.execute);
        int ret;
 
+       ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
+                  address, *param);
+
        if (ar->bmi.done_sent) {
                ath10k_warn("command disallowed\n");
                return -EBUSY;
        }
 
-       ath10k_dbg(ATH10K_DBG_CORE,
-                  "%s: (device: 0x%p, address: 0x%x, param: %d)\n",
-                  __func__, ar, address, *param);
-
        cmd.id            = __cpu_to_le32(BMI_EXECUTE);
        cmd.execute.addr  = __cpu_to_le32(address);
        cmd.execute.param = __cpu_to_le32(*param);
@@ -216,6 +218,9 @@ int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
        u32 txlen;
        int ret;
 
+       ath10k_dbg(ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
+                  buffer, length);
+
        if (ar->bmi.done_sent) {
                ath10k_warn("command disallowed\n");
                return -EBUSY;
@@ -250,6 +255,9 @@ int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
        u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
        int ret;
 
+       ath10k_dbg(ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
+                  address);
+
        if (ar->bmi.done_sent) {
                ath10k_warn("command disallowed\n");
                return -EBUSY;
@@ -275,6 +283,10 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
        u32 trailer_len = length - head_len;
        int ret;
 
+       ath10k_dbg(ATH10K_DBG_BMI,
+                  "bmi fast download address 0x%x buffer 0x%p length %d\n",
+                  address, buffer, length);
+
        ret = ath10k_bmi_lz_stream_start(ar, address);
        if (ret)
                return ret;
index f8b969f518f84ecee55c4e38f5d5f8247d501cf0..e46951b8fb925df8a4cd4bb4ed14a738871175b2 100644 (file)
@@ -76,36 +76,7 @@ static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
                                                      u32 ce_ctrl_addr,
                                                      unsigned int n)
 {
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       void __iomem *indicator_addr;
-
-       if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
-               ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
-               return;
-       }
-
-       /* workaround for QCA988x_1.0 HW CE */
-       indicator_addr = ar_pci->mem + ce_ctrl_addr + DST_WATERMARK_ADDRESS;
-
-       if (ce_ctrl_addr == ath10k_ce_base_address(CDC_WAR_DATA_CE)) {
-               iowrite32((CDC_WAR_MAGIC_STR | n), indicator_addr);
-       } else {
-               unsigned long irq_flags;
-               local_irq_save(irq_flags);
-               iowrite32(1, indicator_addr);
-
-               /*
-                * PCIE write waits for ACK in IPQ8K, there is no
-                * need to read back value.
-                */
-               (void)ioread32(indicator_addr);
-               (void)ioread32(indicator_addr); /* conservative */
-
-               ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
-
-               iowrite32(0, indicator_addr);
-               local_irq_restore(irq_flags);
-       }
+       ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
 }
 
 static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
@@ -285,7 +256,7 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
  * ath10k_ce_sendlist_send.
  * The caller takes responsibility for any needed locking.
  */
-static int ath10k_ce_send_nolock(struct ce_state *ce_state,
+static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
                                 void *per_transfer_context,
                                 u32 buffer,
                                 unsigned int nbytes,
@@ -293,7 +264,7 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state,
                                 unsigned int flags)
 {
        struct ath10k *ar = ce_state->ar;
-       struct ce_ring_state *src_ring = ce_state->src_ring;
+       struct ath10k_ce_ring *src_ring = ce_state->src_ring;
        struct ce_desc *desc, *sdesc;
        unsigned int nentries_mask = src_ring->nentries_mask;
        unsigned int sw_index = src_ring->sw_index;
@@ -306,11 +277,13 @@ static int ath10k_ce_send_nolock(struct ce_state *ce_state,
                ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
                            __func__, nbytes, ce_state->src_sz_max);
 
-       ath10k_pci_wake(ar);
+       ret = ath10k_pci_wake(ar);
+       if (ret)
+               return ret;
 
        if (unlikely(CE_RING_DELTA(nentries_mask,
                                   write_index, sw_index - 1) <= 0)) {
-               ret = -EIO;
+               ret = -ENOSR;
                goto exit;
        }
 
@@ -346,7 +319,7 @@ exit:
        return ret;
 }
 
-int ath10k_ce_send(struct ce_state *ce_state,
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
                   void *per_transfer_context,
                   u32 buffer,
                   unsigned int nbytes,
@@ -365,77 +338,26 @@ int ath10k_ce_send(struct ce_state *ce_state,
        return ret;
 }
 
-void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer,
-                               unsigned int nbytes, u32 flags)
+int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
 {
-       unsigned int num_items = sendlist->num_items;
-       struct ce_sendlist_item *item;
-
-       item = &sendlist->item[num_items];
-       item->data = buffer;
-       item->u.nbytes = nbytes;
-       item->flags = flags;
-       sendlist->num_items++;
-}
-
-int ath10k_ce_sendlist_send(struct ce_state *ce_state,
-                           void *per_transfer_context,
-                           struct ce_sendlist *sendlist,
-                           unsigned int transfer_id)
-{
-       struct ce_ring_state *src_ring = ce_state->src_ring;
-       struct ce_sendlist_item *item;
-       struct ath10k *ar = ce_state->ar;
+       struct ath10k *ar = pipe->ar;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       unsigned int nentries_mask = src_ring->nentries_mask;
-       unsigned int num_items = sendlist->num_items;
-       unsigned int sw_index;
-       unsigned int write_index;
-       int i, delta, ret = -ENOMEM;
+       int delta;
 
        spin_lock_bh(&ar_pci->ce_lock);
-
-       sw_index = src_ring->sw_index;
-       write_index = src_ring->write_index;
-
-       delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
-
-       if (delta >= num_items) {
-               /*
-                * Handle all but the last item uniformly.
-                */
-               for (i = 0; i < num_items - 1; i++) {
-                       item = &sendlist->item[i];
-                       ret = ath10k_ce_send_nolock(ce_state,
-                                                   CE_SENDLIST_ITEM_CTXT,
-                                                   (u32) item->data,
-                                                   item->u.nbytes, transfer_id,
-                                                   item->flags |
-                                                   CE_SEND_FLAG_GATHER);
-                       if (ret)
-                               ath10k_warn("CE send failed for item: %d\n", i);
-               }
-               /*
-                * Provide valid context pointer for final item.
-                */
-               item = &sendlist->item[i];
-               ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
-                                           (u32) item->data, item->u.nbytes,
-                                           transfer_id, item->flags);
-               if (ret)
-                       ath10k_warn("CE send failed for last item: %d\n", i);
-       }
-
+       delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
+                             pipe->src_ring->write_index,
+                             pipe->src_ring->sw_index - 1);
        spin_unlock_bh(&ar_pci->ce_lock);
 
-       return ret;
+       return delta;
 }
 
-int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
+int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
                               void *per_recv_context,
                               u32 buffer)
 {
-       struct ce_ring_state *dest_ring = ce_state->dest_ring;
+       struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
        u32 ctrl_addr = ce_state->ctrl_addr;
        struct ath10k *ar = ce_state->ar;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -448,7 +370,9 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
        write_index = dest_ring->write_index;
        sw_index = dest_ring->sw_index;
 
-       ath10k_pci_wake(ar);
+       ret = ath10k_pci_wake(ar);
+       if (ret)
+               goto out;
 
        if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
                struct ce_desc *base = dest_ring->base_addr_owner_space;
@@ -470,6 +394,8 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
                ret = -EIO;
        }
        ath10k_pci_sleep(ar);
+
+out:
        spin_unlock_bh(&ar_pci->ce_lock);
 
        return ret;
@@ -479,14 +405,14 @@ int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
  * Guts of ath10k_ce_completed_recv_next.
  * The caller takes responsibility for any necessary locking.
  */
-static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
+static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
                                                void **per_transfer_contextp,
                                                u32 *bufferp,
                                                unsigned int *nbytesp,
                                                unsigned int *transfer_idp,
                                                unsigned int *flagsp)
 {
-       struct ce_ring_state *dest_ring = ce_state->dest_ring;
+       struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
        unsigned int nentries_mask = dest_ring->nentries_mask;
        unsigned int sw_index = dest_ring->sw_index;
 
@@ -535,7 +461,7 @@ static int ath10k_ce_completed_recv_next_nolock(struct ce_state *ce_state,
        return 0;
 }
 
-int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
                                  void **per_transfer_contextp,
                                  u32 *bufferp,
                                  unsigned int *nbytesp,
@@ -556,11 +482,11 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
        return ret;
 }
 
-int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
                               void **per_transfer_contextp,
                               u32 *bufferp)
 {
-       struct ce_ring_state *dest_ring;
+       struct ath10k_ce_ring *dest_ring;
        unsigned int nentries_mask;
        unsigned int sw_index;
        unsigned int write_index;
@@ -612,19 +538,20 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
  * Guts of ath10k_ce_completed_send_next.
  * The caller takes responsibility for any necessary locking.
  */
-static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
+static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
                                                void **per_transfer_contextp,
                                                u32 *bufferp,
                                                unsigned int *nbytesp,
                                                unsigned int *transfer_idp)
 {
-       struct ce_ring_state *src_ring = ce_state->src_ring;
+       struct ath10k_ce_ring *src_ring = ce_state->src_ring;
        u32 ctrl_addr = ce_state->ctrl_addr;
        struct ath10k *ar = ce_state->ar;
        unsigned int nentries_mask = src_ring->nentries_mask;
        unsigned int sw_index = src_ring->sw_index;
+       struct ce_desc *sdesc, *sbase;
        unsigned int read_index;
-       int ret = -EIO;
+       int ret;
 
        if (src_ring->hw_index == sw_index) {
                /*
@@ -634,48 +561,54 @@ static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
                 * the SW has really caught up to the HW, or if the cached
                 * value of the HW index has become stale.
                 */
-               ath10k_pci_wake(ar);
+
+               ret = ath10k_pci_wake(ar);
+               if (ret)
+                       return ret;
+
                src_ring->hw_index =
                        ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
                src_ring->hw_index &= nentries_mask;
+
                ath10k_pci_sleep(ar);
        }
+
        read_index = src_ring->hw_index;
 
-       if ((read_index != sw_index) && (read_index != 0xffffffff)) {
-               struct ce_desc *sbase = src_ring->shadow_base;
-               struct ce_desc *sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
+       if ((read_index == sw_index) || (read_index == 0xffffffff))
+               return -EIO;
 
-               /* Return data from completed source descriptor */
-               *bufferp = __le32_to_cpu(sdesc->addr);
-               *nbytesp = __le16_to_cpu(sdesc->nbytes);
-               *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
-                                               CE_DESC_FLAGS_META_DATA);
+       sbase = src_ring->shadow_base;
+       sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
 
-               if (per_transfer_contextp)
-                       *per_transfer_contextp =
-                               src_ring->per_transfer_context[sw_index];
+       /* Return data from completed source descriptor */
+       *bufferp = __le32_to_cpu(sdesc->addr);
+       *nbytesp = __le16_to_cpu(sdesc->nbytes);
+       *transfer_idp = MS(__le16_to_cpu(sdesc->flags),
+                          CE_DESC_FLAGS_META_DATA);
 
-               /* sanity */
-               src_ring->per_transfer_context[sw_index] = NULL;
+       if (per_transfer_contextp)
+               *per_transfer_contextp =
+                       src_ring->per_transfer_context[sw_index];
 
-               /* Update sw_index */
-               sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
-               src_ring->sw_index = sw_index;
-               ret = 0;
-       }
+       /* sanity */
+       src_ring->per_transfer_context[sw_index] = NULL;
 
-       return ret;
+       /* Update sw_index */
+       sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+       src_ring->sw_index = sw_index;
+
+       return 0;
 }
 
 /* NB: Modeled after ath10k_ce_completed_send_next */
-int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
                               void **per_transfer_contextp,
                               u32 *bufferp,
                               unsigned int *nbytesp,
                               unsigned int *transfer_idp)
 {
-       struct ce_ring_state *src_ring;
+       struct ath10k_ce_ring *src_ring;
        unsigned int nentries_mask;
        unsigned int sw_index;
        unsigned int write_index;
@@ -727,7 +660,7 @@ int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
        return ret;
 }
 
-int ath10k_ce_completed_send_next(struct ce_state *ce_state,
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
                                  void **per_transfer_contextp,
                                  u32 *bufferp,
                                  unsigned int *nbytesp,
@@ -756,53 +689,29 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state,
 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
+       struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
        u32 ctrl_addr = ce_state->ctrl_addr;
-       void *transfer_context;
-       u32 buf;
-       unsigned int nbytes;
-       unsigned int id;
-       unsigned int flags;
+       int ret;
+
+       ret = ath10k_pci_wake(ar);
+       if (ret)
+               return;
 
-       ath10k_pci_wake(ar);
        spin_lock_bh(&ar_pci->ce_lock);
 
        /* Clear the copy-complete interrupts that will be handled here. */
        ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
                                          HOST_IS_COPY_COMPLETE_MASK);
 
-       if (ce_state->recv_cb) {
-               /*
-                * Pop completed recv buffers and call the registered
-                * recv callback for each
-                */
-               while (ath10k_ce_completed_recv_next_nolock(ce_state,
-                                                           &transfer_context,
-                                                           &buf, &nbytes,
-                                                           &id, &flags) == 0) {
-                       spin_unlock_bh(&ar_pci->ce_lock);
-                       ce_state->recv_cb(ce_state, transfer_context, buf,
-                                         nbytes, id, flags);
-                       spin_lock_bh(&ar_pci->ce_lock);
-               }
-       }
+       spin_unlock_bh(&ar_pci->ce_lock);
 
-       if (ce_state->send_cb) {
-               /*
-                * Pop completed send buffers and call the registered
-                * send callback for each
-                */
-               while (ath10k_ce_completed_send_next_nolock(ce_state,
-                                                           &transfer_context,
-                                                           &buf,
-                                                           &nbytes,
-                                                           &id) == 0) {
-                       spin_unlock_bh(&ar_pci->ce_lock);
-                       ce_state->send_cb(ce_state, transfer_context,
-                                         buf, nbytes, id);
-                       spin_lock_bh(&ar_pci->ce_lock);
-               }
-       }
+       if (ce_state->recv_cb)
+               ce_state->recv_cb(ce_state);
+
+       if (ce_state->send_cb)
+               ce_state->send_cb(ce_state);
+
+       spin_lock_bh(&ar_pci->ce_lock);
 
        /*
         * Misc CE interrupts are not being handled, but still need
@@ -823,10 +732,13 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
 void ath10k_ce_per_engine_service_any(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       int ce_id;
+       int ce_id, ret;
        u32 intr_summary;
 
-       ath10k_pci_wake(ar);
+       ret = ath10k_pci_wake(ar);
+       if (ret)
+               return;
+
        intr_summary = CE_INTERRUPT_SUMMARY(ar);
 
        for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) {
@@ -849,13 +761,16 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
  *
  * Called with ce_lock held.
  */
-static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
+static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
                                                int disable_copy_compl_intr)
 {
        u32 ctrl_addr = ce_state->ctrl_addr;
        struct ath10k *ar = ce_state->ar;
+       int ret;
 
-       ath10k_pci_wake(ar);
+       ret = ath10k_pci_wake(ar);
+       if (ret)
+               return;
 
        if ((!disable_copy_compl_intr) &&
            (ce_state->send_cb || ce_state->recv_cb))
@@ -871,11 +786,14 @@ static void ath10k_ce_per_engine_handler_adjust(struct ce_state *ce_state,
 void ath10k_ce_disable_interrupts(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       int ce_id;
+       int ce_id, ret;
+
+       ret = ath10k_pci_wake(ar);
+       if (ret)
+               return;
 
-       ath10k_pci_wake(ar);
        for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) {
-               struct ce_state *ce_state = ar_pci->ce_id_to_state[ce_id];
+               struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
                u32 ctrl_addr = ce_state->ctrl_addr;
 
                ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
@@ -883,12 +801,8 @@ void ath10k_ce_disable_interrupts(struct ath10k *ar)
        ath10k_pci_sleep(ar);
 }
 
-void ath10k_ce_send_cb_register(struct ce_state *ce_state,
-                               void (*send_cb) (struct ce_state *ce_state,
-                                                void *transfer_context,
-                                                u32 buffer,
-                                                unsigned int nbytes,
-                                                unsigned int transfer_id),
+void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
+                               void (*send_cb)(struct ath10k_ce_pipe *),
                                int disable_interrupts)
 {
        struct ath10k *ar = ce_state->ar;
@@ -900,13 +814,8 @@ void ath10k_ce_send_cb_register(struct ce_state *ce_state,
        spin_unlock_bh(&ar_pci->ce_lock);
 }
 
-void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
-                               void (*recv_cb) (struct ce_state *ce_state,
-                                                void *transfer_context,
-                                                u32 buffer,
-                                                unsigned int nbytes,
-                                                unsigned int transfer_id,
-                                                unsigned int flags))
+void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
+                               void (*recv_cb)(struct ath10k_ce_pipe *))
 {
        struct ath10k *ar = ce_state->ar;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -919,11 +828,11 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
 
 static int ath10k_ce_init_src_ring(struct ath10k *ar,
                                   unsigned int ce_id,
-                                  struct ce_state *ce_state,
+                                  struct ath10k_ce_pipe *ce_state,
                                   const struct ce_attr *attr)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ce_ring_state *src_ring;
+       struct ath10k_ce_ring *src_ring;
        unsigned int nentries = attr->src_nentries;
        unsigned int ce_nbytes;
        u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@@ -937,19 +846,18 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
                return 0;
        }
 
-       ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
+       ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
        ptr = kzalloc(ce_nbytes, GFP_KERNEL);
        if (ptr == NULL)
                return -ENOMEM;
 
-       ce_state->src_ring = (struct ce_ring_state *)ptr;
+       ce_state->src_ring = (struct ath10k_ce_ring *)ptr;
        src_ring = ce_state->src_ring;
 
-       ptr += sizeof(struct ce_ring_state);
+       ptr += sizeof(struct ath10k_ce_ring);
        src_ring->nentries = nentries;
        src_ring->nentries_mask = nentries - 1;
 
-       ath10k_pci_wake(ar);
        src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
        src_ring->sw_index &= src_ring->nentries_mask;
        src_ring->hw_index = src_ring->sw_index;
@@ -957,7 +865,6 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
        src_ring->write_index =
                ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
        src_ring->write_index &= src_ring->nentries_mask;
-       ath10k_pci_sleep(ar);
 
        src_ring->per_transfer_context = (void **)ptr;
 
@@ -970,6 +877,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
                                     (nentries * sizeof(struct ce_desc) +
                                      CE_DESC_RING_ALIGN),
                                     &base_addr);
+       if (!src_ring->base_addr_owner_space_unaligned) {
+               kfree(ce_state->src_ring);
+               ce_state->src_ring = NULL;
+               return -ENOMEM;
+       }
+
        src_ring->base_addr_ce_space_unaligned = base_addr;
 
        src_ring->base_addr_owner_space = PTR_ALIGN(
@@ -986,12 +899,21 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
        src_ring->shadow_base_unaligned =
                kmalloc((nentries * sizeof(struct ce_desc) +
                         CE_DESC_RING_ALIGN), GFP_KERNEL);
+       if (!src_ring->shadow_base_unaligned) {
+               pci_free_consistent(ar_pci->pdev,
+                                   (nentries * sizeof(struct ce_desc) +
+                                    CE_DESC_RING_ALIGN),
+                                   src_ring->base_addr_owner_space,
+                                   src_ring->base_addr_ce_space);
+               kfree(ce_state->src_ring);
+               ce_state->src_ring = NULL;
+               return -ENOMEM;
+       }
 
        src_ring->shadow_base = PTR_ALIGN(
                        src_ring->shadow_base_unaligned,
                        CE_DESC_RING_ALIGN);
 
-       ath10k_pci_wake(ar);
        ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
                                         src_ring->base_addr_ce_space);
        ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
@@ -999,18 +921,21 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
        ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
        ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
        ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
-       ath10k_pci_sleep(ar);
+
+       ath10k_dbg(ATH10K_DBG_BOOT,
+                  "boot ce src ring id %d entries %d base_addr %p\n",
+                  ce_id, nentries, src_ring->base_addr_owner_space);
 
        return 0;
 }
 
 static int ath10k_ce_init_dest_ring(struct ath10k *ar,
                                    unsigned int ce_id,
-                                   struct ce_state *ce_state,
+                                   struct ath10k_ce_pipe *ce_state,
                                    const struct ce_attr *attr)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ce_ring_state *dest_ring;
+       struct ath10k_ce_ring *dest_ring;
        unsigned int nentries = attr->dest_nentries;
        unsigned int ce_nbytes;
        u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@@ -1024,25 +949,23 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
                return 0;
        }
 
-       ce_nbytes = sizeof(struct ce_ring_state) + (nentries * sizeof(void *));
+       ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *));
        ptr = kzalloc(ce_nbytes, GFP_KERNEL);
        if (ptr == NULL)
                return -ENOMEM;
 
-       ce_state->dest_ring = (struct ce_ring_state *)ptr;
+       ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
        dest_ring = ce_state->dest_ring;
 
-       ptr += sizeof(struct ce_ring_state);
+       ptr += sizeof(struct ath10k_ce_ring);
        dest_ring->nentries = nentries;
        dest_ring->nentries_mask = nentries - 1;
 
-       ath10k_pci_wake(ar);
        dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
        dest_ring->sw_index &= dest_ring->nentries_mask;
        dest_ring->write_index =
                ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
        dest_ring->write_index &= dest_ring->nentries_mask;
-       ath10k_pci_sleep(ar);
 
        dest_ring->per_transfer_context = (void **)ptr;
 
@@ -1055,6 +978,12 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
                                     (nentries * sizeof(struct ce_desc) +
                                      CE_DESC_RING_ALIGN),
                                     &base_addr);
+       if (!dest_ring->base_addr_owner_space_unaligned) {
+               kfree(ce_state->dest_ring);
+               ce_state->dest_ring = NULL;
+               return -ENOMEM;
+       }
+
        dest_ring->base_addr_ce_space_unaligned = base_addr;
 
        /*
@@ -1071,44 +1000,35 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
                        dest_ring->base_addr_ce_space_unaligned,
                        CE_DESC_RING_ALIGN);
 
-       ath10k_pci_wake(ar);
        ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
                                          dest_ring->base_addr_ce_space);
        ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
        ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
        ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
        ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
-       ath10k_pci_sleep(ar);
+
+       ath10k_dbg(ATH10K_DBG_BOOT,
+                  "boot ce dest ring id %d entries %d base_addr %p\n",
+                  ce_id, nentries, dest_ring->base_addr_owner_space);
 
        return 0;
 }
 
-static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
+static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
                                             unsigned int ce_id,
                                             const struct ce_attr *attr)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ce_state *ce_state = NULL;
+       struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
        u32 ctrl_addr = ath10k_ce_base_address(ce_id);
 
        spin_lock_bh(&ar_pci->ce_lock);
 
-       if (!ar_pci->ce_id_to_state[ce_id]) {
-               ce_state = kzalloc(sizeof(*ce_state), GFP_ATOMIC);
-               if (ce_state == NULL) {
-                       spin_unlock_bh(&ar_pci->ce_lock);
-                       return NULL;
-               }
-
-               ar_pci->ce_id_to_state[ce_id] = ce_state;
-               ce_state->ar = ar;
-               ce_state->id = ce_id;
-               ce_state->ctrl_addr = ctrl_addr;
-               ce_state->state = CE_RUNNING;
-               /* Save attribute flags */
-               ce_state->attr_flags = attr->flags;
-               ce_state->src_sz_max = attr->src_sz_max;
-       }
+       ce_state->ar = ar;
+       ce_state->id = ce_id;
+       ce_state->ctrl_addr = ctrl_addr;
+       ce_state->attr_flags = attr->flags;
+       ce_state->src_sz_max = attr->src_sz_max;
 
        spin_unlock_bh(&ar_pci->ce_lock);
 
@@ -1122,12 +1042,17 @@ static struct ce_state *ath10k_ce_init_state(struct ath10k *ar,
  * initialization. It may be that only one side or the other is
  * initialized by software/firmware.
  */
-struct ce_state *ath10k_ce_init(struct ath10k *ar,
+struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
                                unsigned int ce_id,
                                const struct ce_attr *attr)
 {
-       struct ce_state *ce_state;
+       struct ath10k_ce_pipe *ce_state;
        u32 ctrl_addr = ath10k_ce_base_address(ce_id);
+       int ret;
+
+       ret = ath10k_pci_wake(ar);
+       if (ret)
+               return NULL;
 
        ce_state = ath10k_ce_init_state(ar, ce_id, attr);
        if (!ce_state) {
@@ -1136,40 +1061,38 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar,
        }
 
        if (attr->src_nentries) {
-               if (ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr)) {
-                       ath10k_err("Failed to initialize CE src ring for ID: %d\n",
-                                  ce_id);
+               ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr);
+               if (ret) {
+                       ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
+                                  ce_id, ret);
                        ath10k_ce_deinit(ce_state);
                        return NULL;
                }
        }
 
        if (attr->dest_nentries) {
-               if (ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr)) {
-                       ath10k_err("Failed to initialize CE dest ring for ID: %d\n",
-                                  ce_id);
+               ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr);
+               if (ret) {
+                       ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
+                                  ce_id, ret);
                        ath10k_ce_deinit(ce_state);
                        return NULL;
                }
        }
 
        /* Enable CE error interrupts */
-       ath10k_pci_wake(ar);
        ath10k_ce_error_intr_enable(ar, ctrl_addr);
+
        ath10k_pci_sleep(ar);
 
        return ce_state;
 }
 
-void ath10k_ce_deinit(struct ce_state *ce_state)
+void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state)
 {
-       unsigned int ce_id = ce_state->id;
        struct ath10k *ar = ce_state->ar;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
-       ce_state->state = CE_UNUSED;
-       ar_pci->ce_id_to_state[ce_id] = NULL;
-
        if (ce_state->src_ring) {
                kfree(ce_state->src_ring->shadow_base_unaligned);
                pci_free_consistent(ar_pci->pdev,
@@ -1190,5 +1113,7 @@ void ath10k_ce_deinit(struct ce_state *ce_state)
                                    ce_state->dest_ring->base_addr_ce_space);
                kfree(ce_state->dest_ring);
        }
-       kfree(ce_state);
+
+       ce_state->src_ring = NULL;
+       ce_state->dest_ring = NULL;
 }
index c17f07c026f48f585fc8699de1d850e2f1440a9d..15d45b5b76153b60133502be39b1803eb86c8517 100644 (file)
@@ -27,7 +27,6 @@
 
 /* Descriptor rings must be aligned to this boundary */
 #define CE_DESC_RING_ALIGN     8
-#define CE_SENDLIST_ITEMS_MAX  12
 #define CE_SEND_FLAG_GATHER    0x00010000
 
 /*
  * how to use copy engines.
  */
 
-struct ce_state;
+struct ath10k_ce_pipe;
 
 
-/* Copy Engine operational state */
-enum ce_op_state {
-       CE_UNUSED,
-       CE_PAUSED,
-       CE_RUNNING,
-};
-
 #define CE_DESC_FLAGS_GATHER         (1 << 0)
 #define CE_DESC_FLAGS_BYTE_SWAP      (1 << 1)
 #define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
@@ -57,8 +49,7 @@ struct ce_desc {
        __le16 flags; /* %CE_DESC_FLAGS_ */
 };
 
-/* Copy Engine Ring internal state */
-struct ce_ring_state {
+struct ath10k_ce_ring {
        /* Number of entries in this ring; must be power of 2 */
        unsigned int nentries;
        unsigned int nentries_mask;
@@ -116,49 +107,20 @@ struct ce_ring_state {
        void **per_transfer_context;
 };
 
-/* Copy Engine internal state */
-struct ce_state {
+struct ath10k_ce_pipe {
        struct ath10k *ar;
        unsigned int id;
 
        unsigned int attr_flags;
 
        u32 ctrl_addr;
-       enum ce_op_state state;
-
-       void (*send_cb) (struct ce_state *ce_state,
-                        void *per_transfer_send_context,
-                        u32 buffer,
-                        unsigned int nbytes,
-                        unsigned int transfer_id);
-       void (*recv_cb) (struct ce_state *ce_state,
-                        void *per_transfer_recv_context,
-                        u32 buffer,
-                        unsigned int nbytes,
-                        unsigned int transfer_id,
-                        unsigned int flags);
-
-       unsigned int src_sz_max;
-       struct ce_ring_state *src_ring;
-       struct ce_ring_state *dest_ring;
-};
 
-struct ce_sendlist_item {
-       /* e.g. buffer or desc list */
-       dma_addr_t data;
-       union {
-               /* simple buffer */
-               unsigned int nbytes;
-               /* Rx descriptor list */
-               unsigned int ndesc;
-       } u;
-       /* externally-specified flags; OR-ed with internal flags */
-       u32 flags;
-};
+       void (*send_cb)(struct ath10k_ce_pipe *);
+       void (*recv_cb)(struct ath10k_ce_pipe *);
 
-struct ce_sendlist {
-       unsigned int num_items;
-       struct ce_sendlist_item item[CE_SENDLIST_ITEMS_MAX];
+       unsigned int src_sz_max;
+       struct ath10k_ce_ring *src_ring;
+       struct ath10k_ce_ring *dest_ring;
 };
 
 /* Copy Engine settable attributes */
@@ -182,7 +144,7 @@ struct ce_attr;
  *
  * Implementation note: pushes 1 buffer to Source ring
  */
-int ath10k_ce_send(struct ce_state *ce_state,
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
                   void *per_transfer_send_context,
                   u32 buffer,
                   unsigned int nbytes,
@@ -190,36 +152,11 @@ int ath10k_ce_send(struct ce_state *ce_state,
                   unsigned int transfer_id,
                   unsigned int flags);
 
-void ath10k_ce_send_cb_register(struct ce_state *ce_state,
-                               void (*send_cb) (struct ce_state *ce_state,
-                                                void *transfer_context,
-                                                u32 buffer,
-                                                unsigned int nbytes,
-                                                unsigned int transfer_id),
+void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
+                               void (*send_cb)(struct ath10k_ce_pipe *),
                                int disable_interrupts);
 
-/* Append a simple buffer (address/length) to a sendlist. */
-void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist,
-                               u32 buffer,
-                               unsigned int nbytes,
-                               /* OR-ed with internal flags */
-                               u32 flags);
-
-/*
- * Queue a "sendlist" of buffers to be sent using gather to a single
- * anonymous destination buffer
- *   ce         - which copy engine to use
- *   sendlist        - list of simple buffers to send using gather
- *   transfer_id     - arbitrary ID; reflected to destination
- * Returns 0 on success; otherwise an error status.
- *
- * Implemenation note: Pushes multiple buffers with Gather to Source ring.
- */
-int ath10k_ce_sendlist_send(struct ce_state *ce_state,
-                           void *per_transfer_send_context,
-                           struct ce_sendlist *sendlist,
-                           /* 14 bits */
-                           unsigned int transfer_id);
+int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
 
 /*==================Recv=======================*/
 
@@ -233,17 +170,12 @@ int ath10k_ce_sendlist_send(struct ce_state *ce_state,
  *
  * Implemenation note: Pushes a buffer to Dest ring.
  */
-int ath10k_ce_recv_buf_enqueue(struct ce_state *ce_state,
+int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
                               void *per_transfer_recv_context,
                               u32 buffer);
 
-void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
-                               void (*recv_cb) (struct ce_state *ce_state,
-                                                void *transfer_context,
-                                                u32 buffer,
-                                                unsigned int nbytes,
-                                                unsigned int transfer_id,
-                                                unsigned int flags));
+void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
+                               void (*recv_cb)(struct ath10k_ce_pipe *));
 
 /* recv flags */
 /* Data is byte-swapped */
@@ -253,7 +185,7 @@ void ath10k_ce_recv_cb_register(struct ce_state *ce_state,
  * Supply data for the next completed unprocessed receive descriptor.
  * Pops buffer from Dest ring.
  */
-int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
                                  void **per_transfer_contextp,
                                  u32 *bufferp,
                                  unsigned int *nbytesp,
@@ -263,7 +195,7 @@ int ath10k_ce_completed_recv_next(struct ce_state *ce_state,
  * Supply data for the next completed unprocessed send descriptor.
  * Pops 1 completed send buffer from Source ring.
  */
-int ath10k_ce_completed_send_next(struct ce_state *ce_state,
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
                           void **per_transfer_contextp,
                           u32 *bufferp,
                           unsigned int *nbytesp,
@@ -272,7 +204,7 @@ int ath10k_ce_completed_send_next(struct ce_state *ce_state,
 /*==================CE Engine Initialization=======================*/
 
 /* Initialize an instance of a CE */
-struct ce_state *ath10k_ce_init(struct ath10k *ar,
+struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
                                unsigned int ce_id,
                                const struct ce_attr *attr);
 
@@ -282,7 +214,7 @@ struct ce_state *ath10k_ce_init(struct ath10k *ar,
  * receive buffers.  Target DMA must be stopped before using
  * this API.
  */
-int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
                               void **per_transfer_contextp,
                               u32 *bufferp);
 
@@ -291,13 +223,13 @@ int ath10k_ce_revoke_recv_next(struct ce_state *ce_state,
  * pending sends.  Target DMA must be stopped before using
  * this API.
  */
-int ath10k_ce_cancel_send_next(struct ce_state *ce_state,
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
                               void **per_transfer_contextp,
                               u32 *bufferp,
                               unsigned int *nbytesp,
                               unsigned int *transfer_idp);
 
-void ath10k_ce_deinit(struct ce_state *ce_state);
+void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
 
 /*==================CE Interrupt Handlers====================*/
 void ath10k_ce_per_engine_service_any(struct ath10k *ar);
@@ -322,9 +254,6 @@ struct ce_attr {
        /* CE_ATTR_* values */
        unsigned int flags;
 
-       /* currently not in use */
-       unsigned int priority;
-
        /* #entries in source ring - Must be a power of 2 */
        unsigned int src_nentries;
 
@@ -336,21 +265,8 @@ struct ce_attr {
 
        /* #entries in destination ring - Must be a power of 2 */
        unsigned int dest_nentries;
-
-       /* Future use */
-       void *reserved;
 };
 
-/*
- * When using sendlist_send to transfer multiple buffer fragments, the
- * transfer context of each fragment, except last one, will be filled
- * with CE_SENDLIST_ITEM_CTXT. ce_completed_send will return success for
- * each fragment done with send and the transfer context would be
- * CE_SENDLIST_ITEM_CTXT. Upper layer could use this to identify the
- * status of a send completion.
- */
-#define CE_SENDLIST_ITEM_CTXT  ((void *)0xcecebeef)
-
 #define SR_BA_ADDRESS          0x0000
 #define SR_SIZE_ADDRESS                0x0004
 #define DR_BA_ADDRESS          0x0008
index 7226c23b956991165f5f1f7c5e7253d951400387..1129994fb10505864c57e1025d95cec8aa009093 100644 (file)
@@ -38,17 +38,6 @@ MODULE_PARM_DESC(uart_print, "Uart target debugging");
 MODULE_PARM_DESC(p2p, "Enable ath10k P2P support");
 
 static const struct ath10k_hw_params ath10k_hw_params_list[] = {
-       {
-               .id = QCA988X_HW_1_0_VERSION,
-               .name = "qca988x hw1.0",
-               .patch_load_addr = QCA988X_HW_1_0_PATCH_LOAD_ADDR,
-               .fw = {
-                       .dir = QCA988X_HW_1_0_FW_DIR,
-                       .fw = QCA988X_HW_1_0_FW_FILE,
-                       .otp = QCA988X_HW_1_0_OTP_FILE,
-                       .board = QCA988X_HW_1_0_BOARD_DATA_FILE,
-               },
-       },
        {
                .id = QCA988X_HW_2_0_VERSION,
                .name = "qca988x hw2.0",
@@ -64,33 +53,12 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 
 static void ath10k_send_suspend_complete(struct ath10k *ar)
 {
-       ath10k_dbg(ATH10K_DBG_CORE, "%s\n", __func__);
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n");
 
        ar->is_target_paused = true;
        wake_up(&ar->event_queue);
 }
 
-static int ath10k_check_fw_version(struct ath10k *ar)
-{
-       char version[32];
-
-       if (ar->fw_version_major >= SUPPORTED_FW_MAJOR &&
-           ar->fw_version_minor >= SUPPORTED_FW_MINOR &&
-           ar->fw_version_release >= SUPPORTED_FW_RELEASE &&
-           ar->fw_version_build >= SUPPORTED_FW_BUILD)
-               return 0;
-
-       snprintf(version, sizeof(version), "%u.%u.%u.%u",
-                SUPPORTED_FW_MAJOR, SUPPORTED_FW_MINOR,
-                SUPPORTED_FW_RELEASE, SUPPORTED_FW_BUILD);
-
-       ath10k_warn("WARNING: Firmware version %s is not officially supported.\n",
-                   ar->hw->wiphy->fw_version);
-       ath10k_warn("Please upgrade to version %s (or newer)\n", version);
-
-       return 0;
-}
-
 static int ath10k_init_connect_htc(struct ath10k *ar)
 {
        int status;
@@ -112,7 +80,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar)
                goto timeout;
        }
 
-       ath10k_dbg(ATH10K_DBG_CORE, "core wmi ready\n");
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot wmi ready\n");
        return 0;
 
 timeout:
@@ -200,8 +168,7 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
        return fw;
 }
 
-static int ath10k_push_board_ext_data(struct ath10k *ar,
-                                     const struct firmware *fw)
+static int ath10k_push_board_ext_data(struct ath10k *ar)
 {
        u32 board_data_size = QCA988X_BOARD_DATA_SZ;
        u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ;
@@ -214,21 +181,21 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
                return ret;
        }
 
-       ath10k_dbg(ATH10K_DBG_CORE,
-                  "ath10k: Board extended Data download addr: 0x%x\n",
+       ath10k_dbg(ATH10K_DBG_BOOT,
+                  "boot push board extended data addr 0x%x\n",
                   board_ext_data_addr);
 
        if (board_ext_data_addr == 0)
                return 0;
 
-       if (fw->size != (board_data_size + board_ext_data_size)) {
+       if (ar->board_len != (board_data_size + board_ext_data_size)) {
                ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n",
-                          fw->size, board_data_size, board_ext_data_size);
+                          ar->board_len, board_data_size, board_ext_data_size);
                return -EINVAL;
        }
 
        ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
-                                     fw->data + board_data_size,
+                                     ar->board_data + board_data_size,
                                      board_ext_data_size);
        if (ret) {
                ath10k_err("could not write board ext data (%d)\n", ret);
@@ -247,12 +214,11 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
 
 static int ath10k_download_board_data(struct ath10k *ar)
 {
-       const struct firmware *fw = ar->board_data;
        u32 board_data_size = QCA988X_BOARD_DATA_SZ;
        u32 address;
        int ret;
 
-       ret = ath10k_push_board_ext_data(ar, fw);
+       ret = ath10k_push_board_ext_data(ar);
        if (ret) {
                ath10k_err("could not push board ext data (%d)\n", ret);
                goto exit;
@@ -264,8 +230,9 @@ static int ath10k_download_board_data(struct ath10k *ar)
                goto exit;
        }
 
-       ret = ath10k_bmi_write_memory(ar, address, fw->data,
-                                     min_t(u32, board_data_size, fw->size));
+       ret = ath10k_bmi_write_memory(ar, address, ar->board_data,
+                                     min_t(u32, board_data_size,
+                                           ar->board_len));
        if (ret) {
                ath10k_err("could not write board data (%d)\n", ret);
                goto exit;
@@ -283,17 +250,16 @@ exit:
 
 static int ath10k_download_and_run_otp(struct ath10k *ar)
 {
-       const struct firmware *fw = ar->otp;
        u32 address = ar->hw_params.patch_load_addr;
        u32 exec_param;
        int ret;
 
        /* OTP is optional */
 
-       if (!ar->otp)
+       if (!ar->otp_data || !ar->otp_len)
                return 0;
 
-       ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
+       ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
        if (ret) {
                ath10k_err("could not write otp (%d)\n", ret);
                goto exit;
@@ -312,13 +278,13 @@ exit:
 
 static int ath10k_download_fw(struct ath10k *ar)
 {
-       const struct firmware *fw = ar->firmware;
        u32 address;
        int ret;
 
        address = ar->hw_params.patch_load_addr;
 
-       ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
+       ret = ath10k_bmi_fast_download(ar, address, ar->firmware_data,
+                                      ar->firmware_len);
        if (ret) {
                ath10k_err("could not write fw (%d)\n", ret);
                goto exit;
@@ -330,8 +296,8 @@ exit:
 
 static void ath10k_core_free_firmware_files(struct ath10k *ar)
 {
-       if (ar->board_data && !IS_ERR(ar->board_data))
-               release_firmware(ar->board_data);
+       if (ar->board && !IS_ERR(ar->board))
+               release_firmware(ar->board);
 
        if (ar->otp && !IS_ERR(ar->otp))
                release_firmware(ar->otp);
@@ -339,12 +305,20 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
        if (ar->firmware && !IS_ERR(ar->firmware))
                release_firmware(ar->firmware);
 
+       ar->board = NULL;
        ar->board_data = NULL;
+       ar->board_len = 0;
+
        ar->otp = NULL;
+       ar->otp_data = NULL;
+       ar->otp_len = 0;
+
        ar->firmware = NULL;
+       ar->firmware_data = NULL;
+       ar->firmware_len = 0;
 }
 
-static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
+static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
 {
        int ret = 0;
 
@@ -358,15 +332,18 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
                return -EINVAL;
        }
 
-       ar->board_data = ath10k_fetch_fw_file(ar,
-                                             ar->hw_params.fw.dir,
-                                             ar->hw_params.fw.board);
-       if (IS_ERR(ar->board_data)) {
-               ret = PTR_ERR(ar->board_data);
+       ar->board = ath10k_fetch_fw_file(ar,
+                                        ar->hw_params.fw.dir,
+                                        ar->hw_params.fw.board);
+       if (IS_ERR(ar->board)) {
+               ret = PTR_ERR(ar->board);
                ath10k_err("could not fetch board data (%d)\n", ret);
                goto err;
        }
 
+       ar->board_data = ar->board->data;
+       ar->board_len = ar->board->size;
+
        ar->firmware = ath10k_fetch_fw_file(ar,
                                            ar->hw_params.fw.dir,
                                            ar->hw_params.fw.fw);
@@ -376,6 +353,9 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
                goto err;
        }
 
+       ar->firmware_data = ar->firmware->data;
+       ar->firmware_len = ar->firmware->size;
+
        /* OTP may be undefined. If so, don't fetch it at all */
        if (ar->hw_params.fw.otp == NULL)
                return 0;
@@ -389,6 +369,172 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
                goto err;
        }
 
+       ar->otp_data = ar->otp->data;
+       ar->otp_len = ar->otp->size;
+
+       return 0;
+
+err:
+       ath10k_core_free_firmware_files(ar);
+       return ret;
+}
+
+static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
+{
+       size_t magic_len, len, ie_len;
+       int ie_id, i, index, bit, ret;
+       struct ath10k_fw_ie *hdr;
+       const u8 *data;
+       __le32 *timestamp;
+
+       /* first fetch the firmware file (firmware-*.bin) */
+       ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
+       if (IS_ERR(ar->firmware)) {
+               ath10k_err("Could not fetch firmware file '%s': %ld\n",
+                          name, PTR_ERR(ar->firmware));
+               return PTR_ERR(ar->firmware);
+       }
+
+       data = ar->firmware->data;
+       len = ar->firmware->size;
+
+       /* magic also includes the null byte, check that as well */
+       magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
+
+       if (len < magic_len) {
+               ath10k_err("firmware image too small to contain magic: %zu\n",
+                          len);
+               ret = -EINVAL;
+               goto err;
+       }
+
+       if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
+               ath10k_err("Invalid firmware magic\n");
+               ret = -EINVAL;
+               goto err;
+       }
+
+       /* jump over the padding */
+       magic_len = ALIGN(magic_len, 4);
+
+       len -= magic_len;
+       data += magic_len;
+
+       /* loop elements */
+       while (len > sizeof(struct ath10k_fw_ie)) {
+               hdr = (struct ath10k_fw_ie *)data;
+
+               ie_id = le32_to_cpu(hdr->id);
+               ie_len = le32_to_cpu(hdr->len);
+
+               len -= sizeof(*hdr);
+               data += sizeof(*hdr);
+
+               if (len < ie_len) {
+                       ath10k_err("Invalid length for FW IE %d (%zu < %zu)\n",
+                                  ie_id, len, ie_len);
+                       ret = -EINVAL;
+                       goto err;
+               }
+
+               switch (ie_id) {
+               case ATH10K_FW_IE_FW_VERSION:
+                       if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1)
+                               break;
+
+                       memcpy(ar->hw->wiphy->fw_version, data, ie_len);
+                       ar->hw->wiphy->fw_version[ie_len] = '\0';
+
+                       ath10k_dbg(ATH10K_DBG_BOOT,
+                                  "found fw version %s\n",
+                                   ar->hw->wiphy->fw_version);
+                       break;
+               case ATH10K_FW_IE_TIMESTAMP:
+                       if (ie_len != sizeof(u32))
+                               break;
+
+                       timestamp = (__le32 *)data;
+
+                       ath10k_dbg(ATH10K_DBG_BOOT, "found fw timestamp %d\n",
+                                  le32_to_cpup(timestamp));
+                       break;
+               case ATH10K_FW_IE_FEATURES:
+                       ath10k_dbg(ATH10K_DBG_BOOT,
+                                  "found firmware features ie (%zd B)\n",
+                                  ie_len);
+
+                       for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
+                               index = i / 8;
+                               bit = i % 8;
+
+                               if (index == ie_len)
+                                       break;
+
+                               if (data[index] & (1 << bit))
+                                       __set_bit(i, ar->fw_features);
+                       }
+
+                       ath10k_dbg_dump(ATH10K_DBG_BOOT, "features", "",
+                                       ar->fw_features,
+                                       sizeof(ar->fw_features));
+                       break;
+               case ATH10K_FW_IE_FW_IMAGE:
+                       ath10k_dbg(ATH10K_DBG_BOOT,
+                                  "found fw image ie (%zd B)\n",
+                                  ie_len);
+
+                       ar->firmware_data = data;
+                       ar->firmware_len = ie_len;
+
+                       break;
+               case ATH10K_FW_IE_OTP_IMAGE:
+                       ath10k_dbg(ATH10K_DBG_BOOT,
+                                  "found otp image ie (%zd B)\n",
+                                  ie_len);
+
+                       ar->otp_data = data;
+                       ar->otp_len = ie_len;
+
+                       break;
+               default:
+                       ath10k_warn("Unknown FW IE: %u\n",
+                                   le32_to_cpu(hdr->id));
+                       break;
+               }
+
+               /* jump over the padding */
+               ie_len = ALIGN(ie_len, 4);
+
+               len -= ie_len;
+               data += ie_len;
+       }
+
+       if (!ar->firmware_data || !ar->firmware_len) {
+               ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from %s, skipping\n",
+                           name);
+               ret = -ENOMEDIUM;
+               goto err;
+       }
+
+       /* now fetch the board file */
+       if (ar->hw_params.fw.board == NULL) {
+               ath10k_err("board data file not defined");
+               ret = -EINVAL;
+               goto err;
+       }
+
+       ar->board = ath10k_fetch_fw_file(ar,
+                                        ar->hw_params.fw.dir,
+                                        ar->hw_params.fw.board);
+       if (IS_ERR(ar->board)) {
+               ret = PTR_ERR(ar->board);
+               ath10k_err("could not fetch board data (%d)\n", ret);
+               goto err;
+       }
+
+       ar->board_data = ar->board->data;
+       ar->board_len = ar->board->size;
+
        return 0;
 
 err:
@@ -396,6 +542,28 @@ err:
        return ret;
 }
 
+static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
+{
+       int ret;
+
+       ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
+       if (ret == 0) {
+               ar->fw_api = 2;
+               goto out;
+       }
+
+       ret = ath10k_core_fetch_firmware_api_1(ar);
+       if (ret)
+               return ret;
+
+       ar->fw_api = 1;
+
+out:
+       ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+
+       return 0;
+}
+
 static int ath10k_init_download_firmware(struct ath10k *ar)
 {
        int ret;
@@ -446,6 +614,13 @@ static int ath10k_init_uart(struct ath10k *ar)
                return ret;
        }
 
+       /* Set the UART baud rate to 19200. */
+       ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200);
+       if (ret) {
+               ath10k_warn("could not set the baud rate (%d)\n", ret);
+               return ret;
+       }
+
        ath10k_info("UART prints enabled\n");
        return 0;
 }
@@ -545,6 +720,9 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
        INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
        skb_queue_head_init(&ar->offchan_tx_queue);
 
+       INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
+       skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+
        init_waitqueue_head(&ar->event_queue);
 
        INIT_WORK(&ar->restart_work, ath10k_core_restart);
@@ -559,6 +737,8 @@ EXPORT_SYMBOL(ath10k_core_create);
 
 void ath10k_core_destroy(struct ath10k *ar)
 {
+       ath10k_debug_destroy(ar);
+
        flush_workqueue(ar->workqueue);
        destroy_workqueue(ar->workqueue);
 
@@ -570,6 +750,8 @@ int ath10k_core_start(struct ath10k *ar)
 {
        int status;
 
+       lockdep_assert_held(&ar->conf_mutex);
+
        ath10k_bmi_start(ar);
 
        if (ath10k_init_configure_target(ar)) {
@@ -620,10 +802,6 @@ int ath10k_core_start(struct ath10k *ar)
 
        ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version);
 
-       status = ath10k_check_fw_version(ar);
-       if (status)
-               goto err_disconnect_htc;
-
        status = ath10k_wmi_cmd_init(ar);
        if (status) {
                ath10k_err("could not send WMI init command (%d)\n", status);
@@ -641,7 +819,12 @@ int ath10k_core_start(struct ath10k *ar)
        if (status)
                goto err_disconnect_htc;
 
+       status = ath10k_debug_start(ar);
+       if (status)
+               goto err_disconnect_htc;
+
        ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
+       INIT_LIST_HEAD(&ar->arvifs);
 
        return 0;
 
@@ -658,6 +841,9 @@ EXPORT_SYMBOL(ath10k_core_start);
 
 void ath10k_core_stop(struct ath10k *ar)
 {
+       lockdep_assert_held(&ar->conf_mutex);
+
+       ath10k_debug_stop(ar);
        ath10k_htc_stop(&ar->htc);
        ath10k_htt_detach(&ar->htt);
        ath10k_wmi_detach(ar);
@@ -704,23 +890,65 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
                return ret;
        }
 
+       mutex_lock(&ar->conf_mutex);
+
        ret = ath10k_core_start(ar);
        if (ret) {
                ath10k_err("could not init core (%d)\n", ret);
                ath10k_core_free_firmware_files(ar);
                ath10k_hif_power_down(ar);
+               mutex_unlock(&ar->conf_mutex);
                return ret;
        }
 
        ath10k_core_stop(ar);
+
+       mutex_unlock(&ar->conf_mutex);
+
        ath10k_hif_power_down(ar);
        return 0;
 }
 
-int ath10k_core_register(struct ath10k *ar)
+static int ath10k_core_check_chip_id(struct ath10k *ar)
+{
+       u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV);
+
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n",
+                  ar->chip_id, hw_revision);
+
+       /* Check that we are not using hw1.0 (some of them have same pci id
+        * as hw2.0) before doing anything else as ath10k crashes horribly
+        * due to missing hw1.0 workarounds. */
+       switch (hw_revision) {
+       case QCA988X_HW_1_0_CHIP_ID_REV:
+               ath10k_err("ERROR: qca988x hw1.0 is not supported\n");
+               return -EOPNOTSUPP;
+
+       case QCA988X_HW_2_0_CHIP_ID_REV:
+               /* known hardware revision, continue normally */
+               return 0;
+
+       default:
+               ath10k_warn("Warning: hardware revision unknown (0x%x), expect problems\n",
+                           ar->chip_id);
+               return 0;
+       }
+
+       return 0;
+}
+
+int ath10k_core_register(struct ath10k *ar, u32 chip_id)
 {
        int status;
 
+       ar->chip_id = chip_id;
+
+       status = ath10k_core_check_chip_id(ar);
+       if (status) {
+               ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
+               return status;
+       }
+
        status = ath10k_core_probe_fw(ar);
        if (status) {
                ath10k_err("could not probe fw (%d)\n", status);
@@ -755,6 +983,7 @@ void ath10k_core_unregister(struct ath10k *ar)
         * Otherwise we will fail to submit commands to FW and mac80211 will be
         * unhappy about callback failures. */
        ath10k_mac_unregister(ar);
+
        ath10k_core_free_firmware_files(ar);
 }
 EXPORT_SYMBOL(ath10k_core_unregister);
index e4bba563ed4273613e58332e45c4146c04818516..0934f7633de399df82df8ab74a9e0f6ef64f5b82 100644 (file)
 /* Antenna noise floor */
 #define ATH10K_DEFAULT_NOISE_FLOOR -95
 
+#define ATH10K_MAX_NUM_MGMT_PENDING 16
+
 struct ath10k;
 
 struct ath10k_skb_cb {
        dma_addr_t paddr;
        bool is_mapped;
        bool is_aborted;
+       u8 vdev_id;
 
        struct {
-               u8 vdev_id;
-               u16 msdu_id;
                u8 tid;
                bool is_offchan;
-               bool is_conf;
-               bool discard;
-               bool no_ack;
-               u8 refcount;
-               struct sk_buff *txfrag;
-               struct sk_buff *msdu;
-       } __packed htt;
 
-       /* 4 bytes left on 64bit arch */
+               u8 frag_len;
+               u8 pad_len;
+       } __packed htt;
 } __packed;
 
 static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
@@ -108,15 +104,26 @@ struct ath10k_bmi {
        bool done_sent;
 };
 
+#define ATH10K_MAX_MEM_REQS 16
+
+struct ath10k_mem_chunk {
+       void *vaddr;
+       dma_addr_t paddr;
+       u32 len;
+       u32 req_id;
+};
+
 struct ath10k_wmi {
        enum ath10k_htc_ep_id eid;
        struct completion service_ready;
        struct completion unified_ready;
-       atomic_t pending_tx_count;
-       wait_queue_head_t wq;
+       wait_queue_head_t tx_credits_wq;
+       struct wmi_cmd_map *cmd;
+       struct wmi_vdev_param_map *vdev_param;
+       struct wmi_pdev_param_map *pdev_param;
 
-       struct sk_buff_head wmi_event_list;
-       struct work_struct wmi_event_work;
+       u32 num_mem_chunks;
+       struct ath10k_mem_chunk mem_chunks[ATH10K_MAX_MEM_REQS];
 };
 
 struct ath10k_peer_stat {
@@ -198,17 +205,22 @@ struct ath10k_peer {
 #define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
 
 struct ath10k_vif {
+       struct list_head list;
+
        u32 vdev_id;
        enum wmi_vdev_type vdev_type;
        enum wmi_vdev_subtype vdev_subtype;
        u32 beacon_interval;
        u32 dtim_period;
+       struct sk_buff *beacon;
 
        struct ath10k *ar;
        struct ieee80211_vif *vif;
 
+       struct work_struct wep_key_work;
        struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
-       u8 def_wep_key_index;
+       u8 def_wep_key_idx;
+       u8 def_wep_key_newidx;
 
        u16 tx_seq_no;
 
@@ -246,6 +258,9 @@ struct ath10k_debug {
        u32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
 
        struct completion event_stats_compl;
+
+       unsigned long htt_stats_mask;
+       struct delayed_work htt_stats_dwork;
 };
 
 enum ath10k_state {
@@ -270,12 +285,27 @@ enum ath10k_state {
        ATH10K_STATE_WEDGED,
 };
 
+enum ath10k_fw_features {
+       /* wmi_mgmt_rx_hdr contains extra RSSI information */
+       ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
+
+       /* firmware from 10X branch */
+       ATH10K_FW_FEATURE_WMI_10X = 1,
+
+       /* firmware support tx frame management over WMI, otherwise it's HTT */
+       ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX = 2,
+
+       /* keep last */
+       ATH10K_FW_FEATURE_COUNT,
+};
+
 struct ath10k {
        struct ath_common ath_common;
        struct ieee80211_hw *hw;
        struct device *dev;
        u8 mac_addr[ETH_ALEN];
 
+       u32 chip_id;
        u32 target_version;
        u8 fw_version_major;
        u32 fw_version_minor;
@@ -288,6 +318,8 @@ struct ath10k {
        u32 vht_cap_info;
        u32 num_rf_chains;
 
+       DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
+
        struct targetdef *targetdef;
        struct hostdef *hostdef;
 
@@ -319,9 +351,19 @@ struct ath10k {
                } fw;
        } hw_params;
 
-       const struct firmware *board_data;
+       const struct firmware *board;
+       const void *board_data;
+       size_t board_len;
+
        const struct firmware *otp;
+       const void *otp_data;
+       size_t otp_len;
+
        const struct firmware *firmware;
+       const void *firmware_data;
+       size_t firmware_len;
+
+       int fw_api;
 
        struct {
                struct completion started;
@@ -364,6 +406,7 @@ struct ath10k {
        /* protects shared structure data */
        spinlock_t data_lock;
 
+       struct list_head arvifs;
        struct list_head peers;
        wait_queue_head_t peer_mapping_wq;
 
@@ -372,6 +415,9 @@ struct ath10k {
        struct completion offchan_tx_completed;
        struct sk_buff *offchan_tx_skb;
 
+       struct work_struct wmi_mgmt_tx_work;
+       struct sk_buff_head wmi_mgmt_tx_queue;
+
        enum ath10k_state state;
 
        struct work_struct restart_work;
@@ -393,7 +439,7 @@ void ath10k_core_destroy(struct ath10k *ar);
 
 int ath10k_core_start(struct ath10k *ar);
 void ath10k_core_stop(struct ath10k *ar);
-int ath10k_core_register(struct ath10k *ar);
+int ath10k_core_register(struct ath10k *ar, u32 chip_id);
 void ath10k_core_unregister(struct ath10k *ar);
 
 #endif /* _CORE_H_ */
index 3d65594fa098a40f638f73499d423e80d2a64a7e..760ff2289e3cf2903773544d3349cab7bde1a852 100644 (file)
@@ -21,6 +21,9 @@
 #include "core.h"
 #include "debug.h"
 
+/* ms */
+#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
+
 static int ath10k_printk(const char *level, const char *fmt, ...)
 {
        struct va_format vaf;
@@ -260,7 +263,6 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
        }
 
        spin_unlock_bh(&ar->data_lock);
-       mutex_unlock(&ar->conf_mutex);
        complete(&ar->debug.event_stats_compl);
 }
 
@@ -499,6 +501,144 @@ static const struct file_operations fops_simulate_fw_crash = {
        .llseek = default_llseek,
 };
 
+static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf,
+                                  size_t count, loff_t *ppos)
+{
+       struct ath10k *ar = file->private_data;
+       unsigned int len;
+       char buf[50];
+
+       len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_chip_id = {
+       .read = ath10k_read_chip_id,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+static int ath10k_debug_htt_stats_req(struct ath10k *ar)
+{
+       u64 cookie;
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       if (ar->debug.htt_stats_mask == 0)
+               /* htt stats are disabled */
+               return 0;
+
+       if (ar->state != ATH10K_STATE_ON)
+               return 0;
+
+       cookie = get_jiffies_64();
+
+       ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
+                                      cookie);
+       if (ret) {
+               ath10k_warn("failed to send htt stats request: %d\n", ret);
+               return ret;
+       }
+
+       queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork,
+                          msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL));
+
+       return 0;
+}
+
+static void ath10k_debug_htt_stats_dwork(struct work_struct *work)
+{
+       struct ath10k *ar = container_of(work, struct ath10k,
+                                        debug.htt_stats_dwork.work);
+
+       mutex_lock(&ar->conf_mutex);
+
+       ath10k_debug_htt_stats_req(ar);
+
+       mutex_unlock(&ar->conf_mutex);
+}
+
+static ssize_t ath10k_read_htt_stats_mask(struct file *file,
+                                           char __user *user_buf,
+                                           size_t count, loff_t *ppos)
+{
+       struct ath10k *ar = file->private_data;
+       char buf[32];
+       unsigned int len;
+
+       len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_htt_stats_mask(struct file *file,
+                                            const char __user *user_buf,
+                                            size_t count, loff_t *ppos)
+{
+       struct ath10k *ar = file->private_data;
+       unsigned long mask;
+       int ret;
+
+       ret = kstrtoul_from_user(user_buf, count, 0, &mask);
+       if (ret)
+               return ret;
+
+       /* max 8 bit masks (for now) */
+       if (mask > 0xff)
+               return -E2BIG;
+
+       mutex_lock(&ar->conf_mutex);
+
+       ar->debug.htt_stats_mask = mask;
+
+       ret = ath10k_debug_htt_stats_req(ar);
+       if (ret)
+               goto out;
+
+       ret = count;
+
+out:
+       mutex_unlock(&ar->conf_mutex);
+
+       return ret;
+}
+
+static const struct file_operations fops_htt_stats_mask = {
+       .read = ath10k_read_htt_stats_mask,
+       .write = ath10k_write_htt_stats_mask,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+int ath10k_debug_start(struct ath10k *ar)
+{
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       ret = ath10k_debug_htt_stats_req(ar);
+       if (ret)
+               /* continue normally anyway, this isn't serious */
+               ath10k_warn("failed to start htt stats workqueue: %d\n", ret);
+
+       return 0;
+}
+
+void ath10k_debug_stop(struct ath10k *ar)
+{
+       lockdep_assert_held(&ar->conf_mutex);
+
+       /* Must not use _sync to avoid deadlock, we do that in
+        * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
+        * warning from del_timer(). */
+       if (ar->debug.htt_stats_mask != 0)
+               cancel_delayed_work(&ar->debug.htt_stats_dwork);
+}
+
 int ath10k_debug_create(struct ath10k *ar)
 {
        ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
@@ -507,6 +647,9 @@ int ath10k_debug_create(struct ath10k *ar)
        if (!ar->debug.debugfs_phy)
                return -ENOMEM;
 
+       INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
+                         ath10k_debug_htt_stats_dwork);
+
        init_completion(&ar->debug.event_stats_compl);
 
        debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
@@ -518,8 +661,20 @@ int ath10k_debug_create(struct ath10k *ar)
        debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
                            ar, &fops_simulate_fw_crash);
 
+       debugfs_create_file("chip_id", S_IRUSR, ar->debug.debugfs_phy,
+                           ar, &fops_chip_id);
+
+       debugfs_create_file("htt_stats_mask", S_IRUSR, ar->debug.debugfs_phy,
+                           ar, &fops_htt_stats_mask);
+
        return 0;
 }
+
+void ath10k_debug_destroy(struct ath10k *ar)
+{
+       cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
+}
+
 #endif /* CONFIG_ATH10K_DEBUGFS */
 
 #ifdef CONFIG_ATH10K_DEBUG
index 168140c54028eb16d7482ecbb7a6a835d4807736..3cfe3ee90dbe1d802d7c3b8b4fe0c7cd8c815ee9 100644 (file)
@@ -27,22 +27,26 @@ enum ath10k_debug_mask {
        ATH10K_DBG_HTC          = 0x00000004,
        ATH10K_DBG_HTT          = 0x00000008,
        ATH10K_DBG_MAC          = 0x00000010,
-       ATH10K_DBG_CORE         = 0x00000020,
+       ATH10K_DBG_BOOT         = 0x00000020,
        ATH10K_DBG_PCI_DUMP     = 0x00000040,
        ATH10K_DBG_HTT_DUMP     = 0x00000080,
        ATH10K_DBG_MGMT         = 0x00000100,
        ATH10K_DBG_DATA         = 0x00000200,
+       ATH10K_DBG_BMI          = 0x00000400,
        ATH10K_DBG_ANY          = 0xffffffff,
 };
 
 extern unsigned int ath10k_debug_mask;
 
-extern __printf(1, 2) int ath10k_info(const char *fmt, ...);
-extern __printf(1, 2) int ath10k_err(const char *fmt, ...);
-extern __printf(1, 2) int ath10k_warn(const char *fmt, ...);
+__printf(1, 2) int ath10k_info(const char *fmt, ...);
+__printf(1, 2) int ath10k_err(const char *fmt, ...);
+__printf(1, 2) int ath10k_warn(const char *fmt, ...);
 
 #ifdef CONFIG_ATH10K_DEBUGFS
+int ath10k_debug_start(struct ath10k *ar);
+void ath10k_debug_stop(struct ath10k *ar);
 int ath10k_debug_create(struct ath10k *ar);
+void ath10k_debug_destroy(struct ath10k *ar);
 void ath10k_debug_read_service_map(struct ath10k *ar,
                                   void *service_map,
                                   size_t map_size);
@@ -50,11 +54,24 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
                                    struct wmi_stats_event *ev);
 
 #else
+static inline int ath10k_debug_start(struct ath10k *ar)
+{
+       return 0;
+}
+
+static inline void ath10k_debug_stop(struct ath10k *ar)
+{
+}
+
 static inline int ath10k_debug_create(struct ath10k *ar)
 {
        return 0;
 }
 
+static inline void ath10k_debug_destroy(struct ath10k *ar)
+{
+}
+
 static inline void ath10k_debug_read_service_map(struct ath10k *ar,
                                                 void *service_map,
                                                 size_t map_size)
@@ -68,7 +85,7 @@ static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
 #endif /* CONFIG_ATH10K_DEBUGFS */
 
 #ifdef CONFIG_ATH10K_DEBUG
-extern __printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
+__printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
                                      const char *fmt, ...);
 void ath10k_dbg_dump(enum ath10k_debug_mask mask,
                     const char *msg, const char *prefix,
index ef3329ef52f369f0b65a7fd2e830ab07d99c573b..3118d7506734267c8fcca7e80ad9eeba9269fce1 100644 (file)
@@ -103,10 +103,10 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
        struct ath10k_htc_hdr *hdr;
 
        hdr = (struct ath10k_htc_hdr *)skb->data;
-       memset(hdr, 0, sizeof(*hdr));
 
        hdr->eid = ep->eid;
        hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
+       hdr->flags = 0;
 
        spin_lock_bh(&ep->htc->tx_lock);
        hdr->seq_no = ep->seq_no++;
@@ -117,134 +117,13 @@ static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
        spin_unlock_bh(&ep->htc->tx_lock);
 }
 
-static int ath10k_htc_issue_skb(struct ath10k_htc *htc,
-                               struct ath10k_htc_ep *ep,
-                               struct sk_buff *skb,
-                               u8 credits)
-{
-       struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
-       int ret;
-
-       ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
-                  ep->eid, skb);
-
-       ath10k_htc_prepare_tx_skb(ep, skb);
-
-       ret = ath10k_skb_map(htc->ar->dev, skb);
-       if (ret)
-               goto err;
-
-       ret = ath10k_hif_send_head(htc->ar,
-                                  ep->ul_pipe_id,
-                                  ep->eid,
-                                  skb->len,
-                                  skb);
-       if (unlikely(ret))
-               goto err;
-
-       return 0;
-err:
-       ath10k_warn("HTC issue failed: %d\n", ret);
-
-       spin_lock_bh(&htc->tx_lock);
-       ep->tx_credits += credits;
-       spin_unlock_bh(&htc->tx_lock);
-
-       /* this is the simplest way to handle out-of-resources for non-credit
-        * based endpoints. credit based endpoints can still get -ENOSR, but
-        * this is highly unlikely as credit reservation should prevent that */
-       if (ret == -ENOSR) {
-               spin_lock_bh(&htc->tx_lock);
-               __skb_queue_head(&ep->tx_queue, skb);
-               spin_unlock_bh(&htc->tx_lock);
-
-               return ret;
-       }
-
-       skb_cb->is_aborted = true;
-       ath10k_htc_notify_tx_completion(ep, skb);
-
-       return ret;
-}
-
-static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc,
-                                                      struct ath10k_htc_ep *ep,
-                                                      u8 *credits)
-{
-       struct sk_buff *skb;
-       struct ath10k_skb_cb *skb_cb;
-       int credits_required;
-       int remainder;
-       unsigned int transfer_len;
-
-       lockdep_assert_held(&htc->tx_lock);
-
-       skb = __skb_dequeue(&ep->tx_queue);
-       if (!skb)
-               return NULL;
-
-       skb_cb = ATH10K_SKB_CB(skb);
-       transfer_len = skb->len;
-
-       if (likely(transfer_len <= htc->target_credit_size)) {
-               credits_required = 1;
-       } else {
-               /* figure out how many credits this message requires */
-               credits_required = transfer_len / htc->target_credit_size;
-               remainder = transfer_len % htc->target_credit_size;
-
-               if (remainder)
-                       credits_required++;
-       }
-
-       ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n",
-                  credits_required, ep->tx_credits);
-
-       if (ep->tx_credits < credits_required) {
-               __skb_queue_head(&ep->tx_queue, skb);
-               return NULL;
-       }
-
-       ep->tx_credits -= credits_required;
-       *credits = credits_required;
-       return skb;
-}
-
-static void ath10k_htc_send_work(struct work_struct *work)
-{
-       struct ath10k_htc_ep *ep = container_of(work,
-                                       struct ath10k_htc_ep, send_work);
-       struct ath10k_htc *htc = ep->htc;
-       struct sk_buff *skb;
-       u8 credits = 0;
-       int ret;
-
-       while (true) {
-               if (ep->ul_is_polled)
-                       ath10k_htc_send_complete_check(ep, 0);
-
-               spin_lock_bh(&htc->tx_lock);
-               if (ep->tx_credit_flow_enabled)
-                       skb = ath10k_htc_get_skb_credit_based(htc, ep,
-                                                             &credits);
-               else
-                       skb = __skb_dequeue(&ep->tx_queue);
-               spin_unlock_bh(&htc->tx_lock);
-
-               if (!skb)
-                       break;
-
-               ret = ath10k_htc_issue_skb(htc, ep, skb, credits);
-               if (ret == -ENOSR)
-                       break;
-       }
-}
-
 int ath10k_htc_send(struct ath10k_htc *htc,
                    enum ath10k_htc_ep_id eid,
                    struct sk_buff *skb)
 {
        struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+       int credits = 0;
+       int ret;
 
        if (htc->ar->state == ATH10K_STATE_WEDGED)
                return -ECOMM;
@@ -254,18 +133,55 @@ int ath10k_htc_send(struct ath10k_htc *htc,
                return -ENOENT;
        }
 
+       /* FIXME: This looks ugly, can we fix it? */
        spin_lock_bh(&htc->tx_lock);
        if (htc->stopped) {
                spin_unlock_bh(&htc->tx_lock);
                return -ESHUTDOWN;
        }
+       spin_unlock_bh(&htc->tx_lock);
 
-       __skb_queue_tail(&ep->tx_queue, skb);
        skb_push(skb, sizeof(struct ath10k_htc_hdr));
-       spin_unlock_bh(&htc->tx_lock);
 
-       queue_work(htc->ar->workqueue, &ep->send_work);
+       if (ep->tx_credit_flow_enabled) {
+               credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
+               spin_lock_bh(&htc->tx_lock);
+               if (ep->tx_credits < credits) {
+                       spin_unlock_bh(&htc->tx_lock);
+                       ret = -EAGAIN;
+                       goto err_pull;
+               }
+               ep->tx_credits -= credits;
+               spin_unlock_bh(&htc->tx_lock);
+       }
+
+       ath10k_htc_prepare_tx_skb(ep, skb);
+
+       ret = ath10k_skb_map(htc->ar->dev, skb);
+       if (ret)
+               goto err_credits;
+
+       ret = ath10k_hif_send_head(htc->ar, ep->ul_pipe_id, ep->eid,
+                                  skb->len, skb);
+       if (ret)
+               goto err_unmap;
+
        return 0;
+
+err_unmap:
+       ath10k_skb_unmap(htc->ar->dev, skb);
+err_credits:
+       if (ep->tx_credit_flow_enabled) {
+               spin_lock_bh(&htc->tx_lock);
+               ep->tx_credits += credits;
+               spin_unlock_bh(&htc->tx_lock);
+
+               if (ep->ep_ops.ep_tx_credits)
+                       ep->ep_ops.ep_tx_credits(htc->ar);
+       }
+err_pull:
+       skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+       return ret;
 }
 
 static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
@@ -278,39 +194,9 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
        ath10k_htc_notify_tx_completion(ep, skb);
        /* the skb now belongs to the completion handler */
 
-       /* note: when using TX credit flow, the re-checking of queues happens
-        * when credits flow back from the target.  in the non-TX credit case,
-        * we recheck after the packet completes */
-       spin_lock_bh(&htc->tx_lock);
-       if (!ep->tx_credit_flow_enabled && !htc->stopped)
-               queue_work(ar->workqueue, &ep->send_work);
-       spin_unlock_bh(&htc->tx_lock);
-
        return 0;
 }
 
-/* flush endpoint TX queue */
-static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc,
-                                        struct ath10k_htc_ep *ep)
-{
-       struct sk_buff *skb;
-       struct ath10k_skb_cb *skb_cb;
-
-       spin_lock_bh(&htc->tx_lock);
-       for (;;) {
-               skb = __skb_dequeue(&ep->tx_queue);
-               if (!skb)
-                       break;
-
-               skb_cb = ATH10K_SKB_CB(skb);
-               skb_cb->is_aborted = true;
-               ath10k_htc_notify_tx_completion(ep, skb);
-       }
-       spin_unlock_bh(&htc->tx_lock);
-
-       cancel_work_sync(&ep->send_work);
-}
-
 /***********/
 /* Receive */
 /***********/
@@ -340,8 +226,11 @@ ath10k_htc_process_credit_report(struct ath10k_htc *htc,
                ep = &htc->endpoint[report->eid];
                ep->tx_credits += report->credits;
 
-               if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue))
-                       queue_work(htc->ar->workqueue, &ep->send_work);
+               if (ep->ep_ops.ep_tx_credits) {
+                       spin_unlock_bh(&htc->tx_lock);
+                       ep->ep_ops.ep_tx_credits(htc->ar);
+                       spin_lock_bh(&htc->tx_lock);
+               }
        }
        spin_unlock_bh(&htc->tx_lock);
 }
@@ -599,10 +488,8 @@ static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
                ep->max_ep_message_len = 0;
                ep->max_tx_queue_depth = 0;
                ep->eid = i;
-               skb_queue_head_init(&ep->tx_queue);
                ep->htc = htc;
                ep->tx_credit_flow_enabled = true;
-               INIT_WORK(&ep->send_work, ath10k_htc_send_work);
        }
 }
 
@@ -752,8 +639,8 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
        tx_alloc = ath10k_htc_get_credit_allocation(htc,
                                                    conn_req->service_id);
        if (!tx_alloc)
-               ath10k_dbg(ATH10K_DBG_HTC,
-                          "HTC Service %s does not allocate target credits\n",
+               ath10k_dbg(ATH10K_DBG_BOOT,
+                          "boot htc service %s does not allocate target credits\n",
                           htc_service_name(conn_req->service_id));
 
        skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
@@ -772,16 +659,16 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
 
        flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
 
-       req_msg = &msg->connect_service;
-       req_msg->flags = __cpu_to_le16(flags);
-       req_msg->service_id = __cpu_to_le16(conn_req->service_id);
-
        /* Only enable credit flow control for WMI ctrl service */
        if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
                flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
                disable_credit_flow_ctrl = true;
        }
 
+       req_msg = &msg->connect_service;
+       req_msg->flags = __cpu_to_le16(flags);
+       req_msg->service_id = __cpu_to_le16(conn_req->service_id);
+
        INIT_COMPLETION(htc->ctl_resp);
 
        status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
@@ -873,19 +760,19 @@ setup:
        if (status)
                return status;
 
-       ath10k_dbg(ATH10K_DBG_HTC,
-                  "HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n",
+       ath10k_dbg(ATH10K_DBG_BOOT,
+                  "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
                   htc_service_name(ep->service_id), ep->ul_pipe_id,
                   ep->dl_pipe_id, ep->eid);
 
-       ath10k_dbg(ATH10K_DBG_HTC,
-                  "EP %d UL polled: %d, DL polled: %d\n",
+       ath10k_dbg(ATH10K_DBG_BOOT,
+                  "boot htc ep %d ul polled %d dl polled %d\n",
                   ep->eid, ep->ul_is_polled, ep->dl_is_polled);
 
        if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
                ep->tx_credit_flow_enabled = false;
-               ath10k_dbg(ATH10K_DBG_HTC,
-                          "HTC service: %s eid: %d TX flow control disabled\n",
+               ath10k_dbg(ATH10K_DBG_BOOT,
+                          "boot htc service '%s' eid %d TX flow control disabled\n",
                           htc_service_name(ep->service_id), assigned_eid);
        }
 
@@ -945,18 +832,10 @@ int ath10k_htc_start(struct ath10k_htc *htc)
  */
 void ath10k_htc_stop(struct ath10k_htc *htc)
 {
-       int i;
-       struct ath10k_htc_ep *ep;
-
        spin_lock_bh(&htc->tx_lock);
        htc->stopped = true;
        spin_unlock_bh(&htc->tx_lock);
 
-       for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
-               ep = &htc->endpoint[i];
-               ath10k_htc_flush_endpoint_tx(htc, ep);
-       }
-
        ath10k_hif_stop(htc->ar);
 }
 
index e1dd8c761853d7d3197c173aeacb9499d4bd274a..4716d331e6b6504d712c858345545a410cbcbab1 100644 (file)
@@ -276,6 +276,7 @@ struct ath10k_htc_ops {
 struct ath10k_htc_ep_ops {
        void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
        void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
+       void (*ep_tx_credits)(struct ath10k *);
 };
 
 /* service connection information */
@@ -315,15 +316,11 @@ struct ath10k_htc_ep {
        int ul_is_polled; /* call HIF to get tx completions */
        int dl_is_polled; /* call HIF to fetch rx (not implemented) */
 
-       struct sk_buff_head tx_queue;
-
        u8 seq_no; /* for debugging */
        int tx_credits;
        int tx_credit_size;
        int tx_credits_per_max_message;
        bool tx_credit_flow_enabled;
-
-       struct work_struct send_work;
 };
 
 struct ath10k_htc_svc_tx_credits {
index 39342c5cfcb270d8ded3d53fb8b0aaba011cc789..5f7eeebc54327736cbfc2b6fb77bc030029bf7c3 100644 (file)
@@ -104,21 +104,16 @@ err_htc_attach:
 
 static int ath10k_htt_verify_version(struct ath10k_htt *htt)
 {
-       ath10k_dbg(ATH10K_DBG_HTT,
-                  "htt target version %d.%d; host version %d.%d\n",
-                   htt->target_version_major,
-                   htt->target_version_minor,
-                   HTT_CURRENT_VERSION_MAJOR,
-                   HTT_CURRENT_VERSION_MINOR);
-
-       if (htt->target_version_major != HTT_CURRENT_VERSION_MAJOR) {
-               ath10k_err("htt major versions are incompatible!\n");
+       ath10k_info("htt target version %d.%d\n",
+                   htt->target_version_major, htt->target_version_minor);
+
+       if (htt->target_version_major != 2 &&
+           htt->target_version_major != 3) {
+               ath10k_err("unsupported htt major version %d. supported versions are 2 and 3\n",
+                          htt->target_version_major);
                return -ENOTSUPP;
        }
 
-       if (htt->target_version_minor != HTT_CURRENT_VERSION_MINOR)
-               ath10k_warn("htt minor version differ but still compatible\n");
-
        return 0;
 }
 
index 318be4629cded3b19248fe13c17d6def30253b92..1a337e93b7e95e9ef70ab9053696dce7cf4e4c95 100644 (file)
 #define _HTT_H_
 
 #include <linux/bug.h>
+#include <linux/interrupt.h>
 
 #include "htc.h"
 #include "rx_desc.h"
 
-#define HTT_CURRENT_VERSION_MAJOR      2
-#define HTT_CURRENT_VERSION_MINOR      1
-
 enum htt_dbg_stats_type {
        HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
        HTT_DBG_STATS_RX_REORDER    = 1 << 1,
@@ -45,6 +43,9 @@ enum htt_h2t_msg_type { /* host-to-target */
        HTT_H2T_MSG_TYPE_SYNC               = 4,
        HTT_H2T_MSG_TYPE_AGGR_CFG           = 5,
        HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
+
+       /* This command is used for sending management frames in HTT < 3.0.
+        * HTT >= 3.0 uses TX_FRM for everything. */
        HTT_H2T_MSG_TYPE_MGMT_TX            = 7,
 
        HTT_H2T_NUM_MSGS /* keep this last */
@@ -1268,6 +1269,7 @@ struct ath10k_htt {
        /* set if host-fw communication goes haywire
         * used to avoid further failures */
        bool rx_confused;
+       struct tasklet_struct rx_replenish_task;
 };
 
 #define RX_HTT_HDR_STATUS_LEN 64
@@ -1308,6 +1310,10 @@ struct htt_rx_desc {
 #define HTT_RX_BUF_SIZE 1920
 #define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
 
+/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
+ * aggregated traffic more nicely. */
+#define ATH10K_HTT_MAX_NUM_REFILL 16
+
 /*
  * DMA_MAP expects the buffer to be an integral number of cache lines.
  * Rather than checking the actual cache line size, this code makes a
@@ -1327,6 +1333,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt);
 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
 
 void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
index e784c40b904b55165a112df250c61351c606979b..90d4f74c28d7deaa4c087bfdbc47ca83ac1a9716 100644 (file)
@@ -20,6 +20,7 @@
 #include "htt.h"
 #include "txrx.h"
 #include "debug.h"
+#include "trace.h"
 
 #include <linux/log2.h>
 
 /* when under memory pressure rx ring refill may fail and needs a retry */
 #define HTT_RX_RING_REFILL_RETRY_MS 50
 
+
+static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
+
+
 static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
 {
        int size;
@@ -177,10 +182,27 @@ static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
 
 static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
 {
-       int ret, num_to_fill;
+       int ret, num_deficit, num_to_fill;
 
+       /* Refilling the whole RX ring buffer proves to be a bad idea. The
+        * reason is RX may take up significant amount of CPU cycles and starve
+        * other tasks, e.g. TX on an ethernet device while acting as a bridge
+        * with ath10k wlan interface. This ended up with very poor performance
+        * once CPU the host system was overwhelmed with RX on ath10k.
+        *
+        * By limiting the number of refills the replenishing occurs
+        * progressively. This in turns makes use of the fact tasklets are
+        * processed in FIFO order. This means actual RX processing can starve
+        * out refilling. If there's not enough buffers on RX ring FW will not
+        * report RX until it is refilled with enough buffers. This
+        * automatically balances load wrt to CPU power.
+        *
+        * This probably comes at a cost of lower maximum throughput but
+        * improves the avarage and stability. */
        spin_lock_bh(&htt->rx_ring.lock);
-       num_to_fill = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
+       num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
+       num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
+       num_deficit -= num_to_fill;
        ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
        if (ret == -ENOMEM) {
                /*
@@ -191,6 +213,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
                 */
                mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
                          msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
+       } else if (num_deficit > 0) {
+               tasklet_schedule(&htt->rx_replenish_task);
        }
        spin_unlock_bh(&htt->rx_ring.lock);
 }
@@ -212,6 +236,7 @@ void ath10k_htt_rx_detach(struct ath10k_htt *htt)
        int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
 
        del_timer_sync(&htt->rx_ring.refill_retry_timer);
+       tasklet_kill(&htt->rx_replenish_task);
 
        while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
                struct sk_buff *skb =
@@ -441,6 +466,12 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
        return msdu_chaining;
 }
 
+static void ath10k_htt_rx_replenish_task(unsigned long ptr)
+{
+       struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
+       ath10k_htt_rx_msdu_buff_replenish(htt);
+}
+
 int ath10k_htt_rx_attach(struct ath10k_htt *htt)
 {
        dma_addr_t paddr;
@@ -501,7 +532,10 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
        if (__ath10k_htt_rx_ring_fill_n(htt, htt->rx_ring.fill_level))
                goto err_fill_ring;
 
-       ath10k_dbg(ATH10K_DBG_HTT, "HTT RX ring size: %d, fill_level: %d\n",
+       tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
+                    (unsigned long)htt);
+
+       ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
                   htt->rx_ring.size, htt->rx_ring.fill_level);
        return 0;
 
@@ -590,134 +624,144 @@ static bool ath10k_htt_rx_hdr_is_amsdu(struct ieee80211_hdr *hdr)
        return false;
 }
 
-static int ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
-                       struct htt_rx_info *info)
+struct rfc1042_hdr {
+       u8 llc_dsap;
+       u8 llc_ssap;
+       u8 llc_ctrl;
+       u8 snap_oui[3];
+       __be16 snap_type;
+} __packed;
+
+struct amsdu_subframe_hdr {
+       u8 dst[ETH_ALEN];
+       u8 src[ETH_ALEN];
+       __be16 len;
+} __packed;
+
+static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
+                               struct htt_rx_info *info)
 {
        struct htt_rx_desc *rxd;
-       struct sk_buff *amsdu;
        struct sk_buff *first;
-       struct ieee80211_hdr *hdr;
        struct sk_buff *skb = info->skb;
        enum rx_msdu_decap_format fmt;
        enum htt_rx_mpdu_encrypt_type enctype;
+       struct ieee80211_hdr *hdr;
+       u8 hdr_buf[64], addr[ETH_ALEN], *qos;
        unsigned int hdr_len;
-       int crypto_len;
 
        rxd = (void *)skb->data - sizeof(*rxd);
-       fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
-                       RX_MSDU_START_INFO1_DECAP_FORMAT);
        enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
                        RX_MPDU_START_INFO0_ENCRYPT_TYPE);
 
-       /* FIXME: No idea what assumptions are safe here. Need logs */
-       if ((fmt == RX_MSDU_DECAP_RAW && skb->next) ||
-           (fmt == RX_MSDU_DECAP_8023_SNAP_LLC)) {
-               ath10k_htt_rx_free_msdu_chain(skb->next);
-               skb->next = NULL;
-               return -ENOTSUPP;
-       }
+       hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+       hdr_len = ieee80211_hdrlen(hdr->frame_control);
+       memcpy(hdr_buf, hdr, hdr_len);
+       hdr = (struct ieee80211_hdr *)hdr_buf;
 
-       /* A-MSDU max is a little less than 8K */
-       amsdu = dev_alloc_skb(8*1024);
-       if (!amsdu) {
-               ath10k_warn("A-MSDU allocation failed\n");
-               ath10k_htt_rx_free_msdu_chain(skb->next);
-               skb->next = NULL;
-               return -ENOMEM;
-       }
-
-       if (fmt >= RX_MSDU_DECAP_NATIVE_WIFI) {
-               int hdrlen;
-
-               hdr = (void *)rxd->rx_hdr_status;
-               hdrlen = ieee80211_hdrlen(hdr->frame_control);
-               memcpy(skb_put(amsdu, hdrlen), hdr, hdrlen);
-       }
+       /* FIXME: Hopefully this is a temporary measure.
+        *
+        * Reporting individual A-MSDU subframes means each reported frame
+        * shares the same sequence number.
+        *
+        * mac80211 drops frames it recognizes as duplicates, i.e.
+        * retransmission flag is set and sequence number matches sequence
+        * number from a previous frame (as per IEEE 802.11-2012: 9.3.2.10
+        * "Duplicate detection and recovery")
+        *
+        * To avoid frames being dropped clear retransmission flag for all
+        * received A-MSDUs.
+        *
+        * Worst case: actual duplicate frames will be reported but this should
+        * still be handled gracefully by other OSI/ISO layers. */
+       hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_RETRY);
 
        first = skb;
        while (skb) {
                void *decap_hdr;
-               int decap_len = 0;
+               int len;
 
                rxd = (void *)skb->data - sizeof(*rxd);
                fmt = MS(__le32_to_cpu(rxd->msdu_start.info1),
-                               RX_MSDU_START_INFO1_DECAP_FORMAT);
+                        RX_MSDU_START_INFO1_DECAP_FORMAT);
                decap_hdr = (void *)rxd->rx_hdr_status;
 
-               if (skb == first) {
-                       /* We receive linked A-MSDU subframe skbuffs. The
-                        * first one contains the original 802.11 header (and
-                        * possible crypto param) in the RX descriptor. The
-                        * A-MSDU subframe header follows that. Each part is
-                        * aligned to 4 byte boundary. */
-
-                       hdr = (void *)amsdu->data;
-                       hdr_len = ieee80211_hdrlen(hdr->frame_control);
-                       crypto_len = ath10k_htt_rx_crypto_param_len(enctype);
-
-                       decap_hdr += roundup(hdr_len, 4);
-                       decap_hdr += roundup(crypto_len, 4);
-               }
+               skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
 
-               if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
-                       /* Ethernet2 decap inserts ethernet header in place of
-                        * A-MSDU subframe header. */
-                       skb_pull(skb, 6 + 6 + 2);
-
-                       /* A-MSDU subframe header length */
-                       decap_len += 6 + 6 + 2;
-
-                       /* Ethernet2 decap also strips the LLC/SNAP so we need
-                        * to re-insert it. The LLC/SNAP follows A-MSDU
-                        * subframe header. */
-                       /* FIXME: Not all LLCs are 8 bytes long */
-                       decap_len += 8;
-
-                       memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
+               /* First frame in an A-MSDU chain has more decapped data. */
+               if (skb == first) {
+                       len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
+                       len += round_up(ath10k_htt_rx_crypto_param_len(enctype),
+                                       4);
+                       decap_hdr += len;
                }
 
-               if (fmt == RX_MSDU_DECAP_NATIVE_WIFI) {
-                       /* Native Wifi decap inserts regular 802.11 header
-                        * in place of A-MSDU subframe header. */
+               switch (fmt) {
+               case RX_MSDU_DECAP_RAW:
+                       /* remove trailing FCS */
+                       skb_trim(skb, skb->len - FCS_LEN);
+                       break;
+               case RX_MSDU_DECAP_NATIVE_WIFI:
+                       /* pull decapped header and copy DA */
                        hdr = (struct ieee80211_hdr *)skb->data;
-                       skb_pull(skb, ieee80211_hdrlen(hdr->frame_control));
+                       hdr_len = ieee80211_hdrlen(hdr->frame_control);
+                       memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
+                       skb_pull(skb, hdr_len);
 
-                       /* A-MSDU subframe header length */
-                       decap_len += 6 + 6 + 2;
+                       /* push original 802.11 header */
+                       hdr = (struct ieee80211_hdr *)hdr_buf;
+                       hdr_len = ieee80211_hdrlen(hdr->frame_control);
+                       memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
 
-                       memcpy(skb_put(amsdu, decap_len), decap_hdr, decap_len);
-               }
+                       /* original A-MSDU header has the bit set but we're
+                        * not including A-MSDU subframe header */
+                       hdr = (struct ieee80211_hdr *)skb->data;
+                       qos = ieee80211_get_qos_ctl(hdr);
+                       qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
 
-               if (fmt == RX_MSDU_DECAP_RAW)
-                       skb_trim(skb, skb->len - 4); /* remove FCS */
+                       /* original 802.11 header has a different DA */
+                       memcpy(ieee80211_get_DA(hdr), addr, ETH_ALEN);
+                       break;
+               case RX_MSDU_DECAP_ETHERNET2_DIX:
+                       /* strip ethernet header and insert decapped 802.11
+                        * header, amsdu subframe header and rfc1042 header */
 
-               memcpy(skb_put(amsdu, skb->len), skb->data, skb->len);
+                       len = 0;
+                       len += sizeof(struct rfc1042_hdr);
+                       len += sizeof(struct amsdu_subframe_hdr);
 
-               /* A-MSDU subframes are padded to 4bytes
-                * but relative to first subframe, not the whole MPDU */
-               if (skb->next && ((decap_len + skb->len) & 3)) {
-                       int padlen = 4 - ((decap_len + skb->len) & 3);
-                       memset(skb_put(amsdu, padlen), 0, padlen);
+                       skb_pull(skb, sizeof(struct ethhdr));
+                       memcpy(skb_push(skb, len), decap_hdr, len);
+                       memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+                       break;
+               case RX_MSDU_DECAP_8023_SNAP_LLC:
+                       /* insert decapped 802.11 header making a singly
+                        * A-MSDU */
+                       memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+                       break;
                }
 
+               info->skb = skb;
+               info->encrypt_type = enctype;
                skb = skb->next;
-       }
+               info->skb->next = NULL;
 
-       info->skb = amsdu;
-       info->encrypt_type = enctype;
-
-       ath10k_htt_rx_free_msdu_chain(first);
+               ath10k_process_rx(htt->ar, info);
+       }
 
-       return 0;
+       /* FIXME: It might be nice to re-assemble the A-MSDU when there's a
+        * monitor interface active for sniffing purposes. */
 }
 
-static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
+static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
 {
        struct sk_buff *skb = info->skb;
        struct htt_rx_desc *rxd;
        struct ieee80211_hdr *hdr;
        enum rx_msdu_decap_format fmt;
        enum htt_rx_mpdu_encrypt_type enctype;
+       int hdr_len;
+       void *rfc1042;
 
        /* This shouldn't happen. If it does than it may be a FW bug. */
        if (skb->next) {
@@ -731,49 +775,53 @@ static int ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
                        RX_MSDU_START_INFO1_DECAP_FORMAT);
        enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
                        RX_MPDU_START_INFO0_ENCRYPT_TYPE);
-       hdr = (void *)skb->data - RX_HTT_HDR_STATUS_LEN;
+       hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+       hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+       skb->ip_summed = ath10k_htt_rx_get_csum_state(skb);
 
        switch (fmt) {
        case RX_MSDU_DECAP_RAW:
                /* remove trailing FCS */
-               skb_trim(skb, skb->len - 4);
+               skb_trim(skb, skb->len - FCS_LEN);
                break;
        case RX_MSDU_DECAP_NATIVE_WIFI:
-               /* nothing to do here */
+               /* Pull decapped header */
+               hdr = (struct ieee80211_hdr *)skb->data;
+               hdr_len = ieee80211_hdrlen(hdr->frame_control);
+               skb_pull(skb, hdr_len);
+
+               /* Push original header */
+               hdr = (struct ieee80211_hdr *)rxd->rx_hdr_status;
+               hdr_len = ieee80211_hdrlen(hdr->frame_control);
+               memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
                break;
        case RX_MSDU_DECAP_ETHERNET2_DIX:
-               /* macaddr[6] + macaddr[6] + ethertype[2] */
-               skb_pull(skb, 6 + 6 + 2);
-               break;
-       case RX_MSDU_DECAP_8023_SNAP_LLC:
-               /* macaddr[6] + macaddr[6] + len[2] */
-               /* we don't need this for non-A-MSDU */
-               skb_pull(skb, 6 + 6 + 2);
-               break;
-       }
+               /* strip ethernet header and insert decapped 802.11 header and
+                * rfc1042 header */
 
-       if (fmt == RX_MSDU_DECAP_ETHERNET2_DIX) {
-               void *llc;
-               int llclen;
+               rfc1042 = hdr;
+               rfc1042 += roundup(hdr_len, 4);
+               rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
 
-               llclen = 8;
-               llc  = hdr;
-               llc += roundup(ieee80211_hdrlen(hdr->frame_control), 4);
-               llc += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
-
-               skb_push(skb, llclen);
-               memcpy(skb->data, llc, llclen);
-       }
+               skb_pull(skb, sizeof(struct ethhdr));
+               memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
+                      rfc1042, sizeof(struct rfc1042_hdr));
+               memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+               break;
+       case RX_MSDU_DECAP_8023_SNAP_LLC:
+               /* remove A-MSDU subframe header and insert
+                * decapped 802.11 header. rfc1042 header is already there */
 
-       if (fmt >= RX_MSDU_DECAP_ETHERNET2_DIX) {
-               int len = ieee80211_hdrlen(hdr->frame_control);
-               skb_push(skb, len);
-               memcpy(skb->data, hdr, len);
+               skb_pull(skb, sizeof(struct amsdu_subframe_hdr));
+               memcpy(skb_push(skb, hdr_len), hdr, hdr_len);
+               break;
        }
 
        info->skb = skb;
        info->encrypt_type = enctype;
-       return 0;
+
+       ath10k_process_rx(htt->ar, info);
 }
 
 static bool ath10k_htt_rx_has_decrypt_err(struct sk_buff *skb)
@@ -845,8 +893,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
        int fw_desc_len;
        u8 *fw_desc;
        int i, j;
-       int ret;
-       int ip_summed;
 
        memset(&info, 0, sizeof(info));
 
@@ -921,11 +967,6 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
                                continue;
                        }
 
-                       /* The skb is not yet processed and it may be
-                        * reallocated. Since the offload is in the original
-                        * skb extract the checksum now and assign it later */
-                       ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
-
                        info.skb     = msdu_head;
                        info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
                        info.signal  = ATH10K_DEFAULT_NOISE_FLOOR;
@@ -938,28 +979,13 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
                        hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
 
                        if (ath10k_htt_rx_hdr_is_amsdu(hdr))
-                               ret = ath10k_htt_rx_amsdu(htt, &info);
+                               ath10k_htt_rx_amsdu(htt, &info);
                        else
-                               ret = ath10k_htt_rx_msdu(htt, &info);
-
-                       if (ret && !info.fcs_err) {
-                               ath10k_warn("error processing msdus %d\n", ret);
-                               dev_kfree_skb_any(info.skb);
-                               continue;
-                       }
-
-                       if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
-                               ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
-
-                       info.skb->ip_summed = ip_summed;
-
-                       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
-                                       info.skb->data, info.skb->len);
-                       ath10k_process_rx(htt->ar, &info);
+                               ath10k_htt_rx_msdu(htt, &info);
                }
        }
 
-       ath10k_htt_rx_msdu_buff_replenish(htt);
+       tasklet_schedule(&htt->rx_replenish_task);
 }
 
 static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
@@ -1131,7 +1157,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                        break;
                }
 
-               ath10k_txrx_tx_completed(htt, &tx_done);
+               ath10k_txrx_tx_unref(htt, &tx_done);
                break;
        }
        case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
@@ -1165,7 +1191,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
                        msdu_id = resp->data_tx_completion.msdus[i];
                        tx_done.msdu_id = __le16_to_cpu(msdu_id);
-                       ath10k_txrx_tx_completed(htt, &tx_done);
+                       ath10k_txrx_tx_unref(htt, &tx_done);
                }
                break;
        }
@@ -1190,8 +1216,10 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
        case HTT_T2H_MSG_TYPE_TEST:
                /* FIX THIS */
                break;
-       case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
        case HTT_T2H_MSG_TYPE_STATS_CONF:
+               trace_ath10k_htt_stats(skb->data, skb->len);
+               break;
+       case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
        case HTT_T2H_MSG_TYPE_RX_ADDBA:
        case HTT_T2H_MSG_TYPE_RX_DELBA:
        case HTT_T2H_MSG_TYPE_RX_FLUSH:
index 656c2546b2949825a38b1b9b05d266e5af8bc5f0..d9335e9d0d04d247e868e465323037e4f9f78d9e 100644 (file)
@@ -96,7 +96,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
        htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
                                                                   pipe);
 
-       ath10k_dbg(ATH10K_DBG_HTT, "htt tx max num pending tx %d\n",
+       ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
                   htt->max_num_pending_tx);
 
        htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
@@ -117,7 +117,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
 
 static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
 {
-       struct sk_buff *txdesc;
+       struct htt_tx_done tx_done = {0};
        int msdu_id;
 
        /* No locks needed. Called after communication with the device has
@@ -127,18 +127,13 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
                if (!test_bit(msdu_id, htt->used_msdu_ids))
                        continue;
 
-               txdesc = htt->pending_tx[msdu_id];
-               if (!txdesc)
-                       continue;
-
                ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
                           msdu_id);
 
-               if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
-                       ATH10K_SKB_CB(txdesc)->htt.refcount = 1;
+               tx_done.discard = 1;
+               tx_done.msdu_id = msdu_id;
 
-               ATH10K_SKB_CB(txdesc)->htt.discard = true;
-               ath10k_txrx_tx_unref(htt, txdesc);
+               ath10k_txrx_tx_unref(htt, &tx_done);
        }
 }
 
@@ -152,26 +147,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt)
 
 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
 {
-       struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
-       struct ath10k_htt *htt = &ar->htt;
-
-       if (skb_cb->htt.is_conf) {
-               dev_kfree_skb_any(skb);
-               return;
-       }
-
-       if (skb_cb->is_aborted) {
-               skb_cb->htt.discard = true;
-
-               /* if the skbuff is aborted we need to make sure we'll free up
-                * the tx resources, we can't simply run tx_unref() 2 times
-                * because if htt tx completion came in earlier we'd access
-                * unallocated memory */
-               if (skb_cb->htt.refcount > 1)
-                       skb_cb->htt.refcount = 1;
-       }
-
-       ath10k_txrx_tx_unref(htt, skb);
+       dev_kfree_skb_any(skb);
 }
 
 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
@@ -192,10 +168,48 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
        cmd = (struct htt_cmd *)skb->data;
        cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
 
-       ATH10K_SKB_CB(skb)->htt.is_conf = true;
+       ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+       if (ret) {
+               dev_kfree_skb_any(skb);
+               return ret;
+       }
+
+       return 0;
+}
+
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
+{
+       struct htt_stats_req *req;
+       struct sk_buff *skb;
+       struct htt_cmd *cmd;
+       int len = 0, ret;
+
+       len += sizeof(cmd->hdr);
+       len += sizeof(cmd->stats_req);
+
+       skb = ath10k_htc_alloc_skb(len);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, len);
+       cmd = (struct htt_cmd *)skb->data;
+       cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
+
+       req = &cmd->stats_req;
+
+       memset(req, 0, sizeof(*req));
+
+       /* currently we support only max 8 bit masks so no need to worry
+        * about endian support */
+       req->upload_types[0] = mask;
+       req->reset_types[0] = mask;
+       req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
+       req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
+       req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
 
        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
        if (ret) {
+               ath10k_warn("failed to send htt type stats request: %d", ret);
                dev_kfree_skb_any(skb);
                return ret;
        }
@@ -279,8 +293,6 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
 
 #undef desc_offset
 
-       ATH10K_SKB_CB(skb)->htt.is_conf = true;
-
        ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
        if (ret) {
                dev_kfree_skb_any(skb);
@@ -293,10 +305,10 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
 {
        struct device *dev = htt->ar->dev;
-       struct ath10k_skb_cb *skb_cb;
        struct sk_buff *txdesc = NULL;
        struct htt_cmd *cmd;
-       u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
+       struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+       u8 vdev_id = skb_cb->vdev_id;
        int len = 0;
        int msdu_id = -1;
        int res;
@@ -304,30 +316,30 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
 
        res = ath10k_htt_tx_inc_pending(htt);
        if (res)
-               return res;
+               goto err;
 
        len += sizeof(cmd->hdr);
        len += sizeof(cmd->mgmt_tx);
 
-       txdesc = ath10k_htc_alloc_skb(len);
-       if (!txdesc) {
-               res = -ENOMEM;
-               goto err;
-       }
-
        spin_lock_bh(&htt->tx_lock);
-       msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
-       if (msdu_id < 0) {
+       res = ath10k_htt_tx_alloc_msdu_id(htt);
+       if (res < 0) {
                spin_unlock_bh(&htt->tx_lock);
-               res = msdu_id;
-               goto err;
+               goto err_tx_dec;
        }
-       htt->pending_tx[msdu_id] = txdesc;
+       msdu_id = res;
+       htt->pending_tx[msdu_id] = msdu;
        spin_unlock_bh(&htt->tx_lock);
 
+       txdesc = ath10k_htc_alloc_skb(len);
+       if (!txdesc) {
+               res = -ENOMEM;
+               goto err_free_msdu_id;
+       }
+
        res = ath10k_skb_map(dev, msdu);
        if (res)
-               goto err;
+               goto err_free_txdesc;
 
        skb_put(txdesc, len);
        cmd = (struct htt_cmd *)txdesc->data;
@@ -339,31 +351,27 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        memcpy(cmd->mgmt_tx.hdr, msdu->data,
               min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
 
-       /* refcount is decremented by HTC and HTT completions until it reaches
-        * zero and is freed */
-       skb_cb = ATH10K_SKB_CB(txdesc);
-       skb_cb->htt.msdu_id = msdu_id;
-       skb_cb->htt.refcount = 2;
-       skb_cb->htt.msdu = msdu;
+       skb_cb->htt.frag_len = 0;
+       skb_cb->htt.pad_len = 0;
 
        res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
        if (res)
-               goto err;
+               goto err_unmap_msdu;
 
        return 0;
 
-err:
+err_unmap_msdu:
        ath10k_skb_unmap(dev, msdu);
-
-       if (txdesc)
-               dev_kfree_skb_any(txdesc);
-       if (msdu_id >= 0) {
-               spin_lock_bh(&htt->tx_lock);
-               htt->pending_tx[msdu_id] = NULL;
-               ath10k_htt_tx_free_msdu_id(htt, msdu_id);
-               spin_unlock_bh(&htt->tx_lock);
-       }
+err_free_txdesc:
+       dev_kfree_skb_any(txdesc);
+err_free_msdu_id:
+       spin_lock_bh(&htt->tx_lock);
+       htt->pending_tx[msdu_id] = NULL;
+       ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+       spin_unlock_bh(&htt->tx_lock);
+err_tx_dec:
        ath10k_htt_tx_dec_pending(htt);
+err:
        return res;
 }
 
@@ -373,13 +381,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        struct htt_cmd *cmd;
        struct htt_data_tx_desc_frag *tx_frags;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
-       struct ath10k_skb_cb *skb_cb;
+       struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
        struct sk_buff *txdesc = NULL;
-       struct sk_buff *txfrag = NULL;
-       u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
+       bool use_frags;
+       u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id;
        u8 tid;
-       int prefetch_len, desc_len, frag_len;
-       dma_addr_t frags_paddr;
+       int prefetch_len, desc_len;
        int msdu_id = -1;
        int res;
        u8 flags0;
@@ -387,69 +394,82 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
 
        res = ath10k_htt_tx_inc_pending(htt);
        if (res)
-               return res;
+               goto err;
+
+       spin_lock_bh(&htt->tx_lock);
+       res = ath10k_htt_tx_alloc_msdu_id(htt);
+       if (res < 0) {
+               spin_unlock_bh(&htt->tx_lock);
+               goto err_tx_dec;
+       }
+       msdu_id = res;
+       htt->pending_tx[msdu_id] = msdu;
+       spin_unlock_bh(&htt->tx_lock);
 
        prefetch_len = min(htt->prefetch_len, msdu->len);
        prefetch_len = roundup(prefetch_len, 4);
 
        desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
-       frag_len = sizeof(*tx_frags) * 2;
 
        txdesc = ath10k_htc_alloc_skb(desc_len);
        if (!txdesc) {
                res = -ENOMEM;
-               goto err;
+               goto err_free_msdu_id;
        }
 
-       txfrag = dev_alloc_skb(frag_len);
-       if (!txfrag) {
-               res = -ENOMEM;
-               goto err;
-       }
+       /* Since HTT 3.0 there is no separate mgmt tx command. However in case
+        * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
+        * fragment list host driver specifies directly frame pointer. */
+       use_frags = htt->target_version_major < 3 ||
+                   !ieee80211_is_mgmt(hdr->frame_control);
 
        if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
                ath10k_warn("htt alignment check failed. dropping packet.\n");
                res = -EIO;
-               goto err;
+               goto err_free_txdesc;
        }
 
-       spin_lock_bh(&htt->tx_lock);
-       msdu_id = ath10k_htt_tx_alloc_msdu_id(htt);
-       if (msdu_id < 0) {
-               spin_unlock_bh(&htt->tx_lock);
-               res = msdu_id;
-               goto err;
+       if (use_frags) {
+               skb_cb->htt.frag_len = sizeof(*tx_frags) * 2;
+               skb_cb->htt.pad_len = (unsigned long)msdu->data -
+                                     round_down((unsigned long)msdu->data, 4);
+
+               skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+       } else {
+               skb_cb->htt.frag_len = 0;
+               skb_cb->htt.pad_len = 0;
        }
-       htt->pending_tx[msdu_id] = txdesc;
-       spin_unlock_bh(&htt->tx_lock);
 
        res = ath10k_skb_map(dev, msdu);
        if (res)
-               goto err;
-
-       /* tx fragment list must be terminated with zero-entry */
-       skb_put(txfrag, frag_len);
-       tx_frags = (struct htt_data_tx_desc_frag *)txfrag->data;
-       tx_frags[0].paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
-       tx_frags[0].len   = __cpu_to_le32(msdu->len);
-       tx_frags[1].paddr = __cpu_to_le32(0);
-       tx_frags[1].len   = __cpu_to_le32(0);
-
-       res = ath10k_skb_map(dev, txfrag);
-       if (res)
-               goto err;
+               goto err_pull_txfrag;
+
+       if (use_frags) {
+               dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
+                                       DMA_TO_DEVICE);
+
+               /* tx fragment list must be terminated with zero-entry */
+               tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
+               tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
+                                                 skb_cb->htt.frag_len +
+                                                 skb_cb->htt.pad_len);
+               tx_frags[0].len   = __cpu_to_le32(msdu->len -
+                                                 skb_cb->htt.frag_len -
+                                                 skb_cb->htt.pad_len);
+               tx_frags[1].paddr = __cpu_to_le32(0);
+               tx_frags[1].len   = __cpu_to_le32(0);
+
+               dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
+                                          DMA_TO_DEVICE);
+       }
 
-       ath10k_dbg(ATH10K_DBG_HTT, "txfrag 0x%llx msdu 0x%llx\n",
-                  (unsigned long long) ATH10K_SKB_CB(txfrag)->paddr,
+       ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n",
                   (unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
-       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "txfrag: ",
-                       txfrag->data, frag_len);
        ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
                        msdu->data, msdu->len);
 
        skb_put(txdesc, desc_len);
        cmd = (struct htt_cmd *)txdesc->data;
-       memset(cmd, 0, desc_len);
 
        tid = ATH10K_SKB_CB(msdu)->htt.tid;
 
@@ -459,8 +479,13 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        if (!ieee80211_has_protected(hdr->frame_control))
                flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
        flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
-       flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
-                    HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+
+       if (use_frags)
+               flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
+                            HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+       else
+               flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+                            HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
 
        flags1  = 0;
        flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
@@ -468,45 +493,37 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
        flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
        flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
 
-       frags_paddr = ATH10K_SKB_CB(txfrag)->paddr;
-
        cmd->hdr.msg_type        = HTT_H2T_MSG_TYPE_TX_FRM;
        cmd->data_tx.flags0      = flags0;
        cmd->data_tx.flags1      = __cpu_to_le16(flags1);
-       cmd->data_tx.len         = __cpu_to_le16(msdu->len);
+       cmd->data_tx.len         = __cpu_to_le16(msdu->len -
+                                                skb_cb->htt.frag_len -
+                                                skb_cb->htt.pad_len);
        cmd->data_tx.id          = __cpu_to_le16(msdu_id);
-       cmd->data_tx.frags_paddr = __cpu_to_le32(frags_paddr);
+       cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr);
        cmd->data_tx.peerid      = __cpu_to_le32(HTT_INVALID_PEERID);
 
-       memcpy(cmd->data_tx.prefetch, msdu->data, prefetch_len);
-
-       /* refcount is decremented by HTC and HTT completions until it reaches
-        * zero and is freed */
-       skb_cb = ATH10K_SKB_CB(txdesc);
-       skb_cb->htt.msdu_id = msdu_id;
-       skb_cb->htt.refcount = 2;
-       skb_cb->htt.txfrag = txfrag;
-       skb_cb->htt.msdu = msdu;
+       memcpy(cmd->data_tx.prefetch, hdr, prefetch_len);
 
        res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
        if (res)
-               goto err;
+               goto err_unmap_msdu;
 
        return 0;
-err:
-       if (txfrag)
-               ath10k_skb_unmap(dev, txfrag);
-       if (txdesc)
-               dev_kfree_skb_any(txdesc);
-       if (txfrag)
-               dev_kfree_skb_any(txfrag);
-       if (msdu_id >= 0) {
-               spin_lock_bh(&htt->tx_lock);
-               htt->pending_tx[msdu_id] = NULL;
-               ath10k_htt_tx_free_msdu_id(htt, msdu_id);
-               spin_unlock_bh(&htt->tx_lock);
-       }
-       ath10k_htt_tx_dec_pending(htt);
+
+err_unmap_msdu:
        ath10k_skb_unmap(dev, msdu);
+err_pull_txfrag:
+       skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+err_free_txdesc:
+       dev_kfree_skb_any(txdesc);
+err_free_msdu_id:
+       spin_lock_bh(&htt->tx_lock);
+       htt->pending_tx[msdu_id] = NULL;
+       ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+       spin_unlock_bh(&htt->tx_lock);
+err_tx_dec:
+       ath10k_htt_tx_dec_pending(htt);
+err:
        return res;
 }
index 44ed5af0a2043f19122685a2f0ff548bbc89feea..8aeb46d9b53409da2ced923d49780b763105a5d6 100644 (file)
 
 #include "targaddrs.h"
 
-/* Supported FW version */
-#define SUPPORTED_FW_MAJOR     1
-#define SUPPORTED_FW_MINOR     0
-#define SUPPORTED_FW_RELEASE   0
-#define SUPPORTED_FW_BUILD     629
-
-/* QCA988X 1.0 definitions */
-#define QCA988X_HW_1_0_VERSION         0x4000002c
-#define QCA988X_HW_1_0_FW_DIR          "ath10k/QCA988X/hw1.0"
-#define QCA988X_HW_1_0_FW_FILE         "firmware.bin"
-#define QCA988X_HW_1_0_OTP_FILE                "otp.bin"
-#define QCA988X_HW_1_0_BOARD_DATA_FILE "board.bin"
-#define QCA988X_HW_1_0_PATCH_LOAD_ADDR 0x1234
+/* QCA988X 1.0 definitions (unsupported) */
+#define QCA988X_HW_1_0_CHIP_ID_REV     0x0
 
 /* QCA988X 2.0 definitions */
 #define QCA988X_HW_2_0_VERSION         0x4100016c
+#define QCA988X_HW_2_0_CHIP_ID_REV     0x2
 #define QCA988X_HW_2_0_FW_DIR          "ath10k/QCA988X/hw2.0"
 #define QCA988X_HW_2_0_FW_FILE         "firmware.bin"
 #define QCA988X_HW_2_0_OTP_FILE                "otp.bin"
 #define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
 #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
 
+#define ATH10K_FW_API2_FILE            "firmware-2.bin"
+
+/* includes also the null byte */
+#define ATH10K_FIRMWARE_MAGIC               "QCA-ATH10K"
+
+struct ath10k_fw_ie {
+       __le32 id;
+       __le32 len;
+       u8 data[0];
+};
+
+enum ath10k_fw_ie_type {
+       ATH10K_FW_IE_FW_VERSION = 0,
+       ATH10K_FW_IE_TIMESTAMP = 1,
+       ATH10K_FW_IE_FEATURES = 2,
+       ATH10K_FW_IE_FW_IMAGE = 3,
+       ATH10K_FW_IE_OTP_IMAGE = 4,
+};
+
 /* Known pecularities:
  *  - current FW doesn't support raw rx mode (last tested v599)
  *  - current FW dumps upon raw tx mode (last tested v599)
@@ -53,6 +62,9 @@ enum ath10k_hw_txrx_mode {
        ATH10K_HW_TXRX_RAW = 0,
        ATH10K_HW_TXRX_NATIVE_WIFI = 1,
        ATH10K_HW_TXRX_ETHERNET = 2,
+
+       /* Valid for HTT >= 3.0. Used for management frames in TX_FRM. */
+       ATH10K_HW_TXRX_MGMT = 3,
 };
 
 enum ath10k_mcast2ucast_mode {
@@ -60,6 +72,7 @@ enum ath10k_mcast2ucast_mode {
        ATH10K_MCAST2UCAST_ENABLED = 1,
 };
 
+/* Target specific defines for MAIN firmware */
 #define TARGET_NUM_VDEVS                       8
 #define TARGET_NUM_PEER_AST                    2
 #define TARGET_NUM_WDS_ENTRIES                 32
@@ -75,7 +88,11 @@ enum ath10k_mcast2ucast_mode {
 #define TARGET_RX_CHAIN_MASK                   (BIT(0) | BIT(1) | BIT(2))
 #define TARGET_RX_TIMEOUT_LO_PRI               100
 #define TARGET_RX_TIMEOUT_HI_PRI               40
-#define TARGET_RX_DECAP_MODE                   ATH10K_HW_TXRX_ETHERNET
+
+/* Native Wifi decap mode is used to align IP frames to 4-byte boundaries and
+ * avoid a very expensive re-alignment in mac80211. */
+#define TARGET_RX_DECAP_MODE                   ATH10K_HW_TXRX_NATIVE_WIFI
+
 #define TARGET_SCAN_MAX_PENDING_REQS           4
 #define TARGET_BMISS_OFFLOAD_MAX_VDEV          3
 #define TARGET_ROAM_OFFLOAD_MAX_VDEV           3
@@ -90,6 +107,36 @@ enum ath10k_mcast2ucast_mode {
 #define TARGET_NUM_MSDU_DESC                   (1024 + 400)
 #define TARGET_MAX_FRAG_ENTRIES                        0
 
+/* Target specific defines for 10.X firmware */
+#define TARGET_10X_NUM_VDEVS                   16
+#define TARGET_10X_NUM_PEER_AST                        2
+#define TARGET_10X_NUM_WDS_ENTRIES             32
+#define TARGET_10X_DMA_BURST_SIZE              0
+#define TARGET_10X_MAC_AGGR_DELIM              0
+#define TARGET_10X_AST_SKID_LIMIT              16
+#define TARGET_10X_NUM_PEERS                   (128 + (TARGET_10X_NUM_VDEVS))
+#define TARGET_10X_NUM_OFFLOAD_PEERS           0
+#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS    0
+#define TARGET_10X_NUM_PEER_KEYS               2
+#define TARGET_10X_NUM_TIDS                    256
+#define TARGET_10X_TX_CHAIN_MASK               (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_10X_RX_CHAIN_MASK               (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_10X_RX_TIMEOUT_LO_PRI           100
+#define TARGET_10X_RX_TIMEOUT_HI_PRI           40
+#define TARGET_10X_RX_DECAP_MODE               ATH10K_HW_TXRX_NATIVE_WIFI
+#define TARGET_10X_SCAN_MAX_PENDING_REQS       4
+#define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV      2
+#define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV       2
+#define TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES        8
+#define TARGET_10X_GTK_OFFLOAD_MAX_VDEV                3
+#define TARGET_10X_NUM_MCAST_GROUPS            0
+#define TARGET_10X_NUM_MCAST_TABLE_ELEMS       0
+#define TARGET_10X_MCAST2UCAST_MODE            ATH10K_MCAST2UCAST_DISABLED
+#define TARGET_10X_TX_DBG_LOG_SIZE             1024
+#define TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_10X_VOW_CONFIG                  0
+#define TARGET_10X_NUM_MSDU_DESC               (1024 + 400)
+#define TARGET_10X_MAX_FRAG_ENTRIES            0
 
 /* Number of Copy Engines supported */
 #define CE_COUNT 8
@@ -169,6 +216,10 @@ enum ath10k_mcast2ucast_mode {
 #define SOC_LPO_CAL_ENABLE_LSB                 20
 #define SOC_LPO_CAL_ENABLE_MASK                        0x00100000
 
+#define SOC_CHIP_ID_ADDRESS                    0x000000ec
+#define SOC_CHIP_ID_REV_LSB                    8
+#define SOC_CHIP_ID_REV_MASK                   0x00000f00
+
 #define WLAN_RESET_CONTROL_COLD_RST_MASK       0x00000008
 #define WLAN_RESET_CONTROL_WARM_RST_MASK       0x00000004
 #define WLAN_SYSTEM_SLEEP_DISABLE_LSB          0
index cf2ba4d850c9bf0cedb8d123c0e05cccb821e6a2..0b1cc516e778c912e77d341a90f4921e5227a258 100644 (file)
@@ -334,25 +334,29 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
 
 static int  ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
 {
+       struct ath10k *ar = arvif->ar;
+       u32 vdev_param;
+
        if (value != 0xFFFFFFFF)
                value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold,
                              ATH10K_RTS_MAX);
 
-       return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
-                                        WMI_VDEV_PARAM_RTS_THRESHOLD,
-                                        value);
+       vdev_param = ar->wmi.vdev_param->rts_threshold;
+       return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
 }
 
 static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
 {
+       struct ath10k *ar = arvif->ar;
+       u32 vdev_param;
+
        if (value != 0xFFFFFFFF)
                value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
                                ATH10K_FRAGMT_THRESHOLD_MIN,
                                ATH10K_FRAGMT_THRESHOLD_MAX);
 
-       return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
-                                        WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
-                                        value);
+       vdev_param = ar->wmi.vdev_param->fragmentation_threshold;
+       return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
 }
 
 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
@@ -460,6 +464,11 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
                arg.ssid_len = arvif->vif->bss_conf.ssid_len;
        }
 
+       ath10k_dbg(ATH10K_DBG_MAC,
+                  "mac vdev %d start center_freq %d phymode %s\n",
+                  arg.vdev_id, arg.channel.freq,
+                  ath10k_wmi_phymode_str(arg.channel.mode));
+
        ret = ath10k_wmi_vdev_start(ar, &arg);
        if (ret) {
                ath10k_warn("WMI vdev start failed: ret %d\n", ret);
@@ -503,13 +512,10 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
 {
        struct ieee80211_channel *channel = ar->hw->conf.chandef.chan;
        struct wmi_vdev_start_request_arg arg = {};
-       enum nl80211_channel_type type;
        int ret = 0;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       type = cfg80211_get_chandef_type(&ar->hw->conf.chandef);
-
        arg.vdev_id = vdev_id;
        arg.channel.freq = channel->center_freq;
        arg.channel.band_center_freq1 = ar->hw->conf.chandef.center_freq1;
@@ -560,12 +566,9 @@ static int ath10k_monitor_stop(struct ath10k *ar)
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       /* For some reasons, ath10k_wmi_vdev_down() here couse
-        * often ath10k_wmi_vdev_stop() to fail. Next we could
-        * not run monitor vdev and driver reload
-        * required. Don't see such problems we skip
-        * ath10k_wmi_vdev_down() here.
-        */
+       ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
+       if (ret)
+               ath10k_warn("Monitor vdev down failed: %d\n", ret);
 
        ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
        if (ret)
@@ -607,7 +610,7 @@ static int ath10k_monitor_create(struct ath10k *ar)
                goto vdev_fail;
        }
 
-       ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface created, vdev id: %d\n",
+       ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
                   ar->monitor_vdev_id);
 
        ar->monitor_present = true;
@@ -639,7 +642,7 @@ static int ath10k_monitor_destroy(struct ath10k *ar)
        ar->free_vdev_map |= 1 << (ar->monitor_vdev_id);
        ar->monitor_present = false;
 
-       ath10k_dbg(ATH10K_DBG_MAC, "Monitor interface destroyed, vdev id: %d\n",
+       ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
                   ar->monitor_vdev_id);
        return ret;
 }
@@ -668,13 +671,14 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
                            arvif->vdev_id);
                return;
        }
-       ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d up\n", arvif->vdev_id);
+       ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
 }
 
 static void ath10k_control_ibss(struct ath10k_vif *arvif,
                                struct ieee80211_bss_conf *info,
                                const u8 self_peer[ETH_ALEN])
 {
+       u32 vdev_param;
        int ret = 0;
 
        lockdep_assert_held(&arvif->ar->conf_mutex);
@@ -708,8 +712,8 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
                return;
        }
 
-       ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
-                                       WMI_VDEV_PARAM_ATIM_WINDOW,
+       vdev_param = arvif->ar->wmi.vdev_param->atim_window;
+       ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
                                        ATH10K_DEFAULT_ATIM);
        if (ret)
                ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n",
@@ -719,47 +723,45 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
 /*
  * Review this when mac80211 gains per-interface powersave support.
  */
-static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
 {
-       struct ath10k_generic_iter *ar_iter = data;
-       struct ieee80211_conf *conf = &ar_iter->ar->hw->conf;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k *ar = arvif->ar;
+       struct ieee80211_conf *conf = &ar->hw->conf;
        enum wmi_sta_powersave_param param;
        enum wmi_sta_ps_mode psmode;
        int ret;
 
        lockdep_assert_held(&arvif->ar->conf_mutex);
 
-       if (vif->type != NL80211_IFTYPE_STATION)
-               return;
+       if (arvif->vif->type != NL80211_IFTYPE_STATION)
+               return 0;
 
        if (conf->flags & IEEE80211_CONF_PS) {
                psmode = WMI_STA_PS_MODE_ENABLED;
                param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
 
-               ret = ath10k_wmi_set_sta_ps_param(ar_iter->ar,
-                                                 arvif->vdev_id,
-                                                 param,
+               ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
                                                  conf->dynamic_ps_timeout);
                if (ret) {
                        ath10k_warn("Failed to set inactivity time for VDEV: %d\n",
                                    arvif->vdev_id);
-                       return;
+                       return ret;
                }
-
-               ar_iter->ret = ret;
        } else {
                psmode = WMI_STA_PS_MODE_DISABLED;
        }
 
-       ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id,
-                                            psmode);
-       if (ar_iter->ret)
+       ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
+                  arvif->vdev_id, psmode ? "enable" : "disable");
+
+       ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
+       if (ret) {
                ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n",
                            psmode, arvif->vdev_id);
-       else
-               ath10k_dbg(ATH10K_DBG_MAC, "Set PS Mode: %d for VDEV: %d\n",
-                          psmode, arvif->vdev_id);
+               return ret;
+       }
+
+       return 0;
 }
 
 /**********************/
@@ -949,7 +951,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
        arg->peer_ht_rates.num_rates = n;
        arg->peer_num_spatial_streams = max((n+7) / 8, 1);
 
-       ath10k_dbg(ATH10K_DBG_MAC, "mcs cnt %d nss %d\n",
+       ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
+                  arg->addr,
                   arg->peer_ht_rates.num_rates,
                   arg->peer_num_spatial_streams);
 }
@@ -969,11 +972,11 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
                arg->peer_flags |= WMI_PEER_QOS;
 
        if (sta->wme && sta->uapsd_queues) {
-               ath10k_dbg(ATH10K_DBG_MAC, "uapsd_queues: 0x%X, max_sp: %d\n",
+               ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
                           sta->uapsd_queues, sta->max_sp);
 
                arg->peer_flags |= WMI_PEER_APSD;
-               arg->peer_flags |= WMI_RC_UAPSD_FLAG;
+               arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
 
                if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
                        uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
@@ -1028,14 +1031,27 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
                                    struct wmi_peer_assoc_complete_arg *arg)
 {
        const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+       u8 ampdu_factor;
 
        if (!vht_cap->vht_supported)
                return;
 
        arg->peer_flags |= WMI_PEER_VHT;
-
        arg->peer_vht_caps = vht_cap->cap;
 
+
+       ampdu_factor = (vht_cap->cap &
+                       IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
+                      IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+
+       /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
+        * zero in VHT IE. Using it would result in degraded throughput.
+        * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
+        * it if VHT max_mpdu is smaller. */
+       arg->peer_max_mpdu = max(arg->peer_max_mpdu,
+                                (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+                                       ampdu_factor)) - 1);
+
        if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
                arg->peer_flags |= WMI_PEER_80MHZ;
 
@@ -1048,7 +1064,8 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
        arg->peer_vht_rates.tx_mcs_set =
                __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
 
-       ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer\n");
+       ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
+                  sta->addr, arg->peer_max_mpdu, arg->peer_flags);
 }
 
 static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
@@ -1076,8 +1093,6 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
 {
        enum wmi_phy_mode phymode = MODE_UNKNOWN;
 
-       /* FIXME: add VHT */
-
        switch (ar->hw->conf.chandef.chan->band) {
        case IEEE80211_BAND_2GHZ:
                if (sta->ht_cap.ht_supported) {
@@ -1091,7 +1106,17 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
 
                break;
        case IEEE80211_BAND_5GHZ:
-               if (sta->ht_cap.ht_supported) {
+               /*
+                * Check VHT first.
+                */
+               if (sta->vht_cap.vht_supported) {
+                       if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+                               phymode = MODE_11AC_VHT80;
+                       else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+                               phymode = MODE_11AC_VHT40;
+                       else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+                               phymode = MODE_11AC_VHT20;
+               } else if (sta->ht_cap.ht_supported) {
                        if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
                                phymode = MODE_11NA_HT40;
                        else
@@ -1105,30 +1130,32 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
                break;
        }
 
+       ath10k_dbg(ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
+                  sta->addr, ath10k_wmi_phymode_str(phymode));
+
        arg->peer_phymode = phymode;
        WARN_ON(phymode == MODE_UNKNOWN);
 }
 
-static int ath10k_peer_assoc(struct ath10k *ar,
-                            struct ath10k_vif *arvif,
-                            struct ieee80211_sta *sta,
-                            struct ieee80211_bss_conf *bss_conf)
+static int ath10k_peer_assoc_prepare(struct ath10k *ar,
+                                    struct ath10k_vif *arvif,
+                                    struct ieee80211_sta *sta,
+                                    struct ieee80211_bss_conf *bss_conf,
+                                    struct wmi_peer_assoc_complete_arg *arg)
 {
-       struct wmi_peer_assoc_complete_arg arg;
-
        lockdep_assert_held(&ar->conf_mutex);
 
-       memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg));
+       memset(arg, 0, sizeof(*arg));
 
-       ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg);
-       ath10k_peer_assoc_h_crypto(ar, arvif, &arg);
-       ath10k_peer_assoc_h_rates(ar, sta, &arg);
-       ath10k_peer_assoc_h_ht(ar, sta, &arg);
-       ath10k_peer_assoc_h_vht(ar, sta, &arg);
-       ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, &arg);
-       ath10k_peer_assoc_h_phymode(ar, arvif, sta, &arg);
+       ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, arg);
+       ath10k_peer_assoc_h_crypto(ar, arvif, arg);
+       ath10k_peer_assoc_h_rates(ar, sta, arg);
+       ath10k_peer_assoc_h_ht(ar, sta, arg);
+       ath10k_peer_assoc_h_vht(ar, sta, arg);
+       ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, arg);
+       ath10k_peer_assoc_h_phymode(ar, arvif, sta, arg);
 
-       return ath10k_wmi_peer_assoc(ar, &arg);
+       return 0;
 }
 
 /* can be called only in mac80211 callbacks due to `key_count` usage */
@@ -1138,6 +1165,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
 {
        struct ath10k *ar = hw->priv;
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct wmi_peer_assoc_complete_arg peer_arg;
        struct ieee80211_sta *ap_sta;
        int ret;
 
@@ -1153,24 +1181,33 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
                return;
        }
 
-       ret = ath10k_peer_assoc(ar, arvif, ap_sta, bss_conf);
+       ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
+                                       bss_conf, &peer_arg);
        if (ret) {
-               ath10k_warn("Peer assoc failed for %pM\n", bss_conf->bssid);
+               ath10k_warn("Peer assoc prepare failed for %pM\n: %d",
+                           bss_conf->bssid, ret);
                rcu_read_unlock();
                return;
        }
 
        rcu_read_unlock();
 
+       ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
+       if (ret) {
+               ath10k_warn("Peer assoc failed for %pM\n: %d",
+                           bss_conf->bssid, ret);
+               return;
+       }
+
+       ath10k_dbg(ATH10K_DBG_MAC,
+                  "mac vdev %d up (associated) bssid %pM aid %d\n",
+                  arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+
        ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, bss_conf->aid,
                                 bss_conf->bssid);
        if (ret)
                ath10k_warn("VDEV: %d up failed: ret %d\n",
                            arvif->vdev_id, ret);
-       else
-               ath10k_dbg(ATH10K_DBG_MAC,
-                          "VDEV: %d associated, BSSID: %pM, AID: %d\n",
-                          arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
 }
 
 /*
@@ -1191,10 +1228,11 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
         * No idea why this happens, even though VDEV-DOWN is supposed
         * to be analogous to link down, so just stop the VDEV.
         */
+       ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n",
+                  arvif->vdev_id);
+
+       /* FIXME: check return value */
        ret = ath10k_vdev_stop(arvif);
-       if (!ret)
-               ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d stopped\n",
-                          arvif->vdev_id);
 
        /*
         * If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and
@@ -1203,26 +1241,33 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
         * interfaces as it expects there is no rx when no interface is
         * running.
         */
-       ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
-       if (ret)
-               ath10k_dbg(ATH10K_DBG_MAC, "VDEV: %d ath10k_wmi_vdev_down failed (%d)\n",
-                          arvif->vdev_id, ret);
+       ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id);
 
-       ath10k_wmi_flush_tx(ar);
+       /* FIXME: why don't we print error if wmi call fails? */
+       ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
 
-       arvif->def_wep_key_index = 0;
+       arvif->def_wep_key_idx = 0;
 }
 
 static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
                                struct ieee80211_sta *sta)
 {
+       struct wmi_peer_assoc_complete_arg peer_arg;
        int ret = 0;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       ret = ath10k_peer_assoc(ar, arvif, sta, NULL);
+       ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
+       if (ret) {
+               ath10k_warn("WMI peer assoc prepare failed for %pM\n",
+                           sta->addr);
+               return ret;
+       }
+
+       ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
        if (ret) {
-               ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr);
+               ath10k_warn("Peer assoc failed for STA %pM\n: %d",
+                           sta->addr, ret);
                return ret;
        }
 
@@ -1333,8 +1378,8 @@ static int ath10k_update_channel_list(struct ath10k *ar)
                                continue;
 
                        ath10k_dbg(ATH10K_DBG_WMI,
-                                  "%s: [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
-                                  __func__, ch - arg.channels, arg.n_channels,
+                                  "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
+                                   ch - arg.channels, arg.n_channels,
                                   ch->freq, ch->max_power, ch->max_reg_power,
                                   ch->max_antenna_gain, ch->mode);
 
@@ -1391,6 +1436,33 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
 /* TX handlers */
 /***************/
 
+static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr)
+{
+       if (ieee80211_is_mgmt(hdr->frame_control))
+               return HTT_DATA_TX_EXT_TID_MGMT;
+
+       if (!ieee80211_is_data_qos(hdr->frame_control))
+               return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+       if (!is_unicast_ether_addr(ieee80211_get_DA(hdr)))
+               return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+
+       return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar,
+                                 struct ieee80211_tx_info *info)
+{
+       if (info->control.vif)
+               return ath10k_vif_to_arvif(info->control.vif)->vdev_id;
+
+       if (ar->monitor_enabled)
+               return ar->monitor_vdev_id;
+
+       ath10k_warn("could not resolve vdev id\n");
+       return 0;
+}
+
 /*
  * Frames sent to the FW have to be in "Native Wifi" format.
  * Strip the QoS field from the 802.11 header.
@@ -1411,6 +1483,30 @@ static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
        skb_pull(skb, IEEE80211_QOS_CTL_LEN);
 }
 
+static void ath10k_tx_wep_key_work(struct work_struct *work)
+{
+       struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+                                               wep_key_work);
+       int ret, keyidx = arvif->def_wep_key_newidx;
+
+       if (arvif->def_wep_key_idx == keyidx)
+               return;
+
+       ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
+                  arvif->vdev_id, keyidx);
+
+       ret = ath10k_wmi_vdev_set_param(arvif->ar,
+                                       arvif->vdev_id,
+                                       arvif->ar->wmi.vdev_param->def_keyid,
+                                       keyidx);
+       if (ret) {
+               ath10k_warn("could not update wep keyidx (%d)\n", ret);
+               return;
+       }
+
+       arvif->def_wep_key_idx = keyidx;
+}
+
 static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
 {
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1419,11 +1515,6 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
        struct ath10k *ar = arvif->ar;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_key_conf *key = info->control.hw_key;
-       int ret;
-
-       /* TODO AP mode should be implemented */
-       if (vif->type != NL80211_IFTYPE_STATION)
-               return;
 
        if (!ieee80211_has_protected(hdr->frame_control))
                return;
@@ -1435,20 +1526,14 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
            key->cipher != WLAN_CIPHER_SUITE_WEP104)
                return;
 
-       if (key->keyidx == arvif->def_wep_key_index)
+       if (key->keyidx == arvif->def_wep_key_idx)
                return;
 
-       ath10k_dbg(ATH10K_DBG_MAC, "new wep keyidx will be %d\n", key->keyidx);
-
-       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
-                                       WMI_VDEV_PARAM_DEF_KEYID,
-                                       key->keyidx);
-       if (ret) {
-               ath10k_warn("could not update wep keyidx (%d)\n", ret);
-               return;
-       }
-
-       arvif->def_wep_key_index = key->keyidx;
+       /* FIXME: Most likely a few frames will be TXed with an old key. Simply
+        * queueing frames until key index is updated is not an option because
+        * sk_buff may need more processing to be done, e.g. offchannel */
+       arvif->def_wep_key_newidx = key->keyidx;
+       ieee80211_queue_work(ar->hw, &arvif->wep_key_work);
 }
 
 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
@@ -1478,19 +1563,42 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
 static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       int ret;
+       int ret = 0;
 
-       if (ieee80211_is_mgmt(hdr->frame_control))
-               ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
-       else if (ieee80211_is_nullfunc(hdr->frame_control))
+       if (ar->htt.target_version_major >= 3) {
+               /* Since HTT 3.0 there is no separate mgmt tx command */
+               ret = ath10k_htt_tx(&ar->htt, skb);
+               goto exit;
+       }
+
+       if (ieee80211_is_mgmt(hdr->frame_control)) {
+               if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+                            ar->fw_features)) {
+                       if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
+                           ATH10K_MAX_NUM_MGMT_PENDING) {
+                               ath10k_warn("wmi mgmt_tx queue limit reached\n");
+                               ret = -EBUSY;
+                               goto exit;
+                       }
+
+                       skb_queue_tail(&ar->wmi_mgmt_tx_queue, skb);
+                       ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+               } else {
+                       ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
+               }
+       } else if (!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+                            ar->fw_features) &&
+                  ieee80211_is_nullfunc(hdr->frame_control)) {
                /* FW does not report tx status properly for NullFunc frames
                 * unless they are sent through mgmt tx path. mac80211 sends
-                * those frames when it detects link/beacon loss and depends on
-                * the tx status to be correct. */
+                * those frames when it detects link/beacon loss and depends
+                * on the tx status to be correct. */
                ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
-       else
+       } else {
                ret = ath10k_htt_tx(&ar->htt, skb);
+       }
 
+exit:
        if (ret) {
                ath10k_warn("tx failed (%d). dropping packet.\n", ret);
                ieee80211_free_txskb(ar->hw, skb);
@@ -1534,18 +1642,19 @@ void ath10k_offchan_tx_work(struct work_struct *work)
 
                mutex_lock(&ar->conf_mutex);
 
-               ath10k_dbg(ATH10K_DBG_MAC, "processing offchannel skb %p\n",
+               ath10k_dbg(ATH10K_DBG_MAC, "mac offchannel skb %p\n",
                           skb);
 
                hdr = (struct ieee80211_hdr *)skb->data;
                peer_addr = ieee80211_get_DA(hdr);
-               vdev_id = ATH10K_SKB_CB(skb)->htt.vdev_id;
+               vdev_id = ATH10K_SKB_CB(skb)->vdev_id;
 
                spin_lock_bh(&ar->data_lock);
                peer = ath10k_peer_find(ar, vdev_id, peer_addr);
                spin_unlock_bh(&ar->data_lock);
 
                if (peer)
+                       /* FIXME: should this use ath10k_warn()? */
                        ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
                                   peer_addr, vdev_id);
 
@@ -1580,6 +1689,36 @@ void ath10k_offchan_tx_work(struct work_struct *work)
        }
 }
 
+void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
+{
+       struct sk_buff *skb;
+
+       for (;;) {
+               skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
+               if (!skb)
+                       break;
+
+               ieee80211_free_txskb(ar->hw, skb);
+       }
+}
+
+void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
+{
+       struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
+       struct sk_buff *skb;
+       int ret;
+
+       for (;;) {
+               skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
+               if (!skb)
+                       break;
+
+               ret = ath10k_wmi_mgmt_tx(ar, skb);
+               if (ret)
+                       ath10k_warn("wmi mgmt_tx failed (%d)\n", ret);
+       }
+}
+
 /************/
 /* Scanning */
 /************/
@@ -1643,8 +1782,6 @@ static int ath10k_abort_scan(struct ath10k *ar)
                return -EIO;
        }
 
-       ath10k_wmi_flush_tx(ar);
-
        ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
        if (ret == 0)
                ath10k_warn("timed out while waiting for scan to stop\n");
@@ -1678,10 +1815,6 @@ static int ath10k_start_scan(struct ath10k *ar,
        if (ret)
                return ret;
 
-       /* make sure we submit the command so the completion
-       * timeout makes sense */
-       ath10k_wmi_flush_tx(ar);
-
        ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
        if (ret == 0) {
                ath10k_abort_scan(ar);
@@ -1709,16 +1842,7 @@ static void ath10k_tx(struct ieee80211_hw *hw,
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ath10k *ar = hw->priv;
-       struct ath10k_vif *arvif = NULL;
-       u32 vdev_id = 0;
-       u8 tid;
-
-       if (info->control.vif) {
-               arvif = ath10k_vif_to_arvif(info->control.vif);
-               vdev_id = arvif->vdev_id;
-       } else if (ar->monitor_enabled) {
-               vdev_id = ar->monitor_vdev_id;
-       }
+       u8 tid, vdev_id;
 
        /* We should disable CCK RATE due to P2P */
        if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
@@ -1726,12 +1850,8 @@ static void ath10k_tx(struct ieee80211_hw *hw,
 
        /* we must calculate tid before we apply qos workaround
         * as we'd lose the qos control field */
-       tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
-       if (ieee80211_is_data_qos(hdr->frame_control) &&
-           is_unicast_ether_addr(ieee80211_get_DA(hdr))) {
-               u8 *qc = ieee80211_get_qos_ctl(hdr);
-               tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
-       }
+       tid = ath10k_tx_h_get_tid(hdr);
+       vdev_id = ath10k_tx_h_get_vdev_id(ar, info);
 
        /* it makes no sense to process injected frames like that */
        if (info->control.vif &&
@@ -1742,14 +1862,14 @@ static void ath10k_tx(struct ieee80211_hw *hw,
                ath10k_tx_h_seq_no(skb);
        }
 
-       memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb)));
-       ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id;
+       ATH10K_SKB_CB(skb)->vdev_id = vdev_id;
+       ATH10K_SKB_CB(skb)->htt.is_offchan = false;
        ATH10K_SKB_CB(skb)->htt.tid = tid;
 
        if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
                spin_lock_bh(&ar->data_lock);
                ATH10K_SKB_CB(skb)->htt.is_offchan = true;
-               ATH10K_SKB_CB(skb)->htt.vdev_id = ar->scan.vdev_id;
+               ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id;
                spin_unlock_bh(&ar->data_lock);
 
                ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb);
@@ -1771,6 +1891,7 @@ void ath10k_halt(struct ath10k *ar)
 
        del_timer_sync(&ar->scan.timeout);
        ath10k_offchan_tx_purge(ar);
+       ath10k_mgmt_over_wmi_tx_purge(ar);
        ath10k_peer_cleanup_all(ar);
        ath10k_core_stop(ar);
        ath10k_hif_power_down(ar);
@@ -1817,12 +1938,12 @@ static int ath10k_start(struct ieee80211_hw *hw)
        else if (ar->state == ATH10K_STATE_RESTARTING)
                ar->state = ATH10K_STATE_RESTARTED;
 
-       ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1);
+       ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
        if (ret)
                ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n",
                            ret);
 
-       ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 0);
+       ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 0);
        if (ret)
                ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
                            ret);
@@ -1847,32 +1968,29 @@ static void ath10k_stop(struct ieee80211_hw *hw)
        ar->state = ATH10K_STATE_OFF;
        mutex_unlock(&ar->conf_mutex);
 
+       ath10k_mgmt_over_wmi_tx_purge(ar);
+
        cancel_work_sync(&ar->offchan_tx_work);
+       cancel_work_sync(&ar->wmi_mgmt_tx_work);
        cancel_work_sync(&ar->restart_work);
 }
 
-static void ath10k_config_ps(struct ath10k *ar)
+static int ath10k_config_ps(struct ath10k *ar)
 {
-       struct ath10k_generic_iter ar_iter;
+       struct ath10k_vif *arvif;
+       int ret = 0;
 
        lockdep_assert_held(&ar->conf_mutex);
 
-       /* During HW reconfiguration mac80211 reports all interfaces that were
-        * running until reconfiguration was started. Since FW doesn't have any
-        * vdevs at this point we must not iterate over this interface list.
-        * This setting will be updated upon add_interface(). */
-       if (ar->state == ATH10K_STATE_RESTARTED)
-               return;
-
-       memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
-       ar_iter.ar = ar;
-
-       ieee80211_iterate_active_interfaces_atomic(
-               ar->hw, IEEE80211_IFACE_ITER_NORMAL,
-               ath10k_ps_iter, &ar_iter);
+       list_for_each_entry(arvif, &ar->arvifs, list) {
+               ret = ath10k_mac_vif_setup_ps(arvif);
+               if (ret) {
+                       ath10k_warn("could not setup powersave (%d)\n", ret);
+                       break;
+               }
+       }
 
-       if (ar_iter.ret)
-               ath10k_warn("failed to set ps config (%d)\n", ar_iter.ret);
+       return ret;
 }
 
 static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
@@ -1884,7 +2002,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
        mutex_lock(&ar->conf_mutex);
 
        if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
-               ath10k_dbg(ATH10K_DBG_MAC, "Config channel %d mhz\n",
+               ath10k_dbg(ATH10K_DBG_MAC, "mac config channel %d mhz\n",
                           conf->chandef.chan->center_freq);
                spin_lock_bh(&ar->data_lock);
                ar->rx_channel = conf->chandef.chan;
@@ -1901,7 +2019,6 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
                        ret = ath10k_monitor_destroy(ar);
        }
 
-       ath10k_wmi_flush_tx(ar);
        mutex_unlock(&ar->conf_mutex);
        return ret;
 }
@@ -1922,6 +2039,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
        int ret = 0;
        u32 value;
        int bit;
+       u32 vdev_param;
 
        mutex_lock(&ar->conf_mutex);
 
@@ -1930,21 +2048,22 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
        arvif->ar = ar;
        arvif->vif = vif;
 
+       INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
+
        if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
                ath10k_warn("Only one monitor interface allowed\n");
                ret = -EBUSY;
-               goto exit;
+               goto err;
        }
 
        bit = ffs(ar->free_vdev_map);
        if (bit == 0) {
                ret = -EBUSY;
-               goto exit;
+               goto err;
        }
 
        arvif->vdev_id = bit - 1;
        arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
-       ar->free_vdev_map &= ~(1 << arvif->vdev_id);
 
        if (ar->p2p)
                arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
@@ -1973,32 +2092,41 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                break;
        }
 
-       ath10k_dbg(ATH10K_DBG_MAC, "Add interface: id %d type %d subtype %d\n",
+       ath10k_dbg(ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n",
                   arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype);
 
        ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
                                     arvif->vdev_subtype, vif->addr);
        if (ret) {
                ath10k_warn("WMI vdev create failed: ret %d\n", ret);
-               goto exit;
+               goto err;
        }
 
-       ret = ath10k_wmi_vdev_set_param(ar, 0, WMI_VDEV_PARAM_DEF_KEYID,
-                                       arvif->def_wep_key_index);
-       if (ret)
+       ar->free_vdev_map &= ~BIT(arvif->vdev_id);
+       list_add(&arvif->list, &ar->arvifs);
+
+       vdev_param = ar->wmi.vdev_param->def_keyid;
+       ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
+                                       arvif->def_wep_key_idx);
+       if (ret) {
                ath10k_warn("Failed to set default keyid: %d\n", ret);
+               goto err_vdev_delete;
+       }
 
-       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
-                                       WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+       vdev_param = ar->wmi.vdev_param->tx_encap_type;
+       ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
                                        ATH10K_HW_TXRX_NATIVE_WIFI);
-       if (ret)
+       /* 10.X firmware does not support this VDEV parameter. Do not warn */
+       if (ret && ret != -EOPNOTSUPP) {
                ath10k_warn("Failed to set TX encap: %d\n", ret);
+               goto err_vdev_delete;
+       }
 
        if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
                ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
                if (ret) {
                        ath10k_warn("Failed to create peer for AP: %d\n", ret);
-                       goto exit;
+                       goto err_vdev_delete;
                }
        }
 
@@ -2007,39 +2135,62 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
                ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
                                                  param, value);
-               if (ret)
+               if (ret) {
                        ath10k_warn("Failed to set RX wake policy: %d\n", ret);
+                       goto err_peer_delete;
+               }
 
                param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
                value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
                ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
                                                  param, value);
-               if (ret)
+               if (ret) {
                        ath10k_warn("Failed to set TX wake thresh: %d\n", ret);
+                       goto err_peer_delete;
+               }
 
                param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
                value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
                ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
                                                  param, value);
-               if (ret)
+               if (ret) {
                        ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
+                       goto err_peer_delete;
+               }
        }
 
        ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
-       if (ret)
+       if (ret) {
                ath10k_warn("failed to set rts threshold for vdev %d (%d)\n",
                            arvif->vdev_id, ret);
+               goto err_peer_delete;
+       }
 
        ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
-       if (ret)
+       if (ret) {
                ath10k_warn("failed to set frag threshold for vdev %d (%d)\n",
                            arvif->vdev_id, ret);
+               goto err_peer_delete;
+       }
 
        if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
                ar->monitor_present = true;
 
-exit:
        mutex_unlock(&ar->conf_mutex);
+       return 0;
+
+err_peer_delete:
+       if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
+               ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
+
+err_vdev_delete:
+       ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
+       ar->free_vdev_map &= ~BIT(arvif->vdev_id);
+       list_del(&arvif->list);
+
+err:
+       mutex_unlock(&ar->conf_mutex);
+
        return ret;
 }
 
@@ -2052,9 +2203,17 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
 
        mutex_lock(&ar->conf_mutex);
 
-       ath10k_dbg(ATH10K_DBG_MAC, "Remove interface: id %d\n", arvif->vdev_id);
+       cancel_work_sync(&arvif->wep_key_work);
+
+       spin_lock_bh(&ar->data_lock);
+       if (arvif->beacon) {
+               dev_kfree_skb_any(arvif->beacon);
+               arvif->beacon = NULL;
+       }
+       spin_unlock_bh(&ar->data_lock);
 
        ar->free_vdev_map |= 1 << (arvif->vdev_id);
+       list_del(&arvif->list);
 
        if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
                ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
@@ -2064,6 +2223,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
                kfree(arvif->u.ap.noa_data);
        }
 
+       ath10k_dbg(ATH10K_DBG_MAC, "mac vdev delete %d (remove interface)\n",
+                  arvif->vdev_id);
+
        ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
        if (ret)
                ath10k_warn("WMI vdev delete failed: %d\n", ret);
@@ -2105,18 +2267,20 @@ static void ath10k_configure_filter(struct ieee80211_hw *hw,
 
        if ((ar->filter_flags & FIF_PROMISC_IN_BSS) &&
            !ar->monitor_enabled) {
+               ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d start\n",
+                          ar->monitor_vdev_id);
+
                ret = ath10k_monitor_start(ar, ar->monitor_vdev_id);
                if (ret)
                        ath10k_warn("Unable to start monitor mode\n");
-               else
-                       ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode started\n");
        } else if (!(ar->filter_flags & FIF_PROMISC_IN_BSS) &&
                   ar->monitor_enabled) {
+               ath10k_dbg(ATH10K_DBG_MAC, "mac monitor %d stop\n",
+                          ar->monitor_vdev_id);
+
                ret = ath10k_monitor_stop(ar);
                if (ret)
                        ath10k_warn("Unable to stop monitor mode\n");
-               else
-                       ath10k_dbg(ATH10K_DBG_MAC, "Monitor mode stopped\n");
        }
 
        mutex_unlock(&ar->conf_mutex);
@@ -2130,6 +2294,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
        struct ath10k *ar = hw->priv;
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
        int ret = 0;
+       u32 vdev_param, pdev_param;
 
        mutex_lock(&ar->conf_mutex);
 
@@ -2138,44 +2303,44 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
 
        if (changed & BSS_CHANGED_BEACON_INT) {
                arvif->beacon_interval = info->beacon_int;
-               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
-                                               WMI_VDEV_PARAM_BEACON_INTERVAL,
+               vdev_param = ar->wmi.vdev_param->beacon_interval;
+               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
                                                arvif->beacon_interval);
+               ath10k_dbg(ATH10K_DBG_MAC,
+                          "mac vdev %d beacon_interval %d\n",
+                          arvif->vdev_id, arvif->beacon_interval);
+
                if (ret)
                        ath10k_warn("Failed to set beacon interval for VDEV: %d\n",
                                    arvif->vdev_id);
-               else
-                       ath10k_dbg(ATH10K_DBG_MAC,
-                                  "Beacon interval: %d set for VDEV: %d\n",
-                                  arvif->beacon_interval, arvif->vdev_id);
        }
 
        if (changed & BSS_CHANGED_BEACON) {
-               ret = ath10k_wmi_pdev_set_param(ar,
-                                               WMI_PDEV_PARAM_BEACON_TX_MODE,
+               ath10k_dbg(ATH10K_DBG_MAC,
+                          "vdev %d set beacon tx mode to staggered\n",
+                          arvif->vdev_id);
+
+               pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
+               ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
                                                WMI_BEACON_STAGGERED_MODE);
                if (ret)
                        ath10k_warn("Failed to set beacon mode for VDEV: %d\n",
                                    arvif->vdev_id);
-               else
-                       ath10k_dbg(ATH10K_DBG_MAC,
-                                  "Set staggered beacon mode for VDEV: %d\n",
-                                  arvif->vdev_id);
        }
 
        if (changed & BSS_CHANGED_BEACON_INFO) {
                arvif->dtim_period = info->dtim_period;
 
-               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
-                                               WMI_VDEV_PARAM_DTIM_PERIOD,
+               ath10k_dbg(ATH10K_DBG_MAC,
+                          "mac vdev %d dtim_period %d\n",
+                          arvif->vdev_id, arvif->dtim_period);
+
+               vdev_param = ar->wmi.vdev_param->dtim_period;
+               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
                                                arvif->dtim_period);
                if (ret)
                        ath10k_warn("Failed to set dtim period for VDEV: %d\n",
                                    arvif->vdev_id);
-               else
-                       ath10k_dbg(ATH10K_DBG_MAC,
-                                  "Set dtim period: %d for VDEV: %d\n",
-                                  arvif->dtim_period, arvif->vdev_id);
        }
 
        if (changed & BSS_CHANGED_SSID &&
@@ -2188,16 +2353,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
 
        if (changed & BSS_CHANGED_BSSID) {
                if (!is_zero_ether_addr(info->bssid)) {
+                       ath10k_dbg(ATH10K_DBG_MAC,
+                                  "mac vdev %d create peer %pM\n",
+                                  arvif->vdev_id, info->bssid);
+
                        ret = ath10k_peer_create(ar, arvif->vdev_id,
                                                 info->bssid);
                        if (ret)
                                ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
                                            info->bssid, arvif->vdev_id);
-                       else
-                               ath10k_dbg(ATH10K_DBG_MAC,
-                                          "Added peer: %pM for VDEV: %d\n",
-                                          info->bssid, arvif->vdev_id);
-
 
                        if (vif->type == NL80211_IFTYPE_STATION) {
                                /*
@@ -2207,11 +2371,12 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                                memcpy(arvif->u.sta.bssid, info->bssid,
                                       ETH_ALEN);
 
+                               ath10k_dbg(ATH10K_DBG_MAC,
+                                          "mac vdev %d start %pM\n",
+                                          arvif->vdev_id, info->bssid);
+
+                               /* FIXME: check return value */
                                ret = ath10k_vdev_start(arvif);
-                               if (!ret)
-                                       ath10k_dbg(ATH10K_DBG_MAC,
-                                                  "VDEV: %d started with BSSID: %pM\n",
-                                                  arvif->vdev_id, info->bssid);
                        }
 
                        /*
@@ -2235,16 +2400,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                else
                        cts_prot = 0;
 
-               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
-                                               WMI_VDEV_PARAM_ENABLE_RTSCTS,
+               ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
+                          arvif->vdev_id, cts_prot);
+
+               vdev_param = ar->wmi.vdev_param->enable_rtscts;
+               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
                                                cts_prot);
                if (ret)
                        ath10k_warn("Failed to set CTS prot for VDEV: %d\n",
                                    arvif->vdev_id);
-               else
-                       ath10k_dbg(ATH10K_DBG_MAC,
-                                  "Set CTS prot: %d for VDEV: %d\n",
-                                  cts_prot, arvif->vdev_id);
        }
 
        if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -2255,16 +2419,15 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                else
                        slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
 
-               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
-                                               WMI_VDEV_PARAM_SLOT_TIME,
+               ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
+                          arvif->vdev_id, slottime);
+
+               vdev_param = ar->wmi.vdev_param->slot_time;
+               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
                                                slottime);
                if (ret)
                        ath10k_warn("Failed to set erp slot for VDEV: %d\n",
                                    arvif->vdev_id);
-               else
-                       ath10k_dbg(ATH10K_DBG_MAC,
-                                  "Set slottime: %d for VDEV: %d\n",
-                                  slottime, arvif->vdev_id);
        }
 
        if (changed & BSS_CHANGED_ERP_PREAMBLE) {
@@ -2274,16 +2437,16 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                else
                        preamble = WMI_VDEV_PREAMBLE_LONG;
 
-               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
-                                               WMI_VDEV_PARAM_PREAMBLE,
+               ath10k_dbg(ATH10K_DBG_MAC,
+                          "mac vdev %d preamble %dn",
+                          arvif->vdev_id, preamble);
+
+               vdev_param = ar->wmi.vdev_param->preamble;
+               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
                                                preamble);
                if (ret)
                        ath10k_warn("Failed to set preamble for VDEV: %d\n",
                                    arvif->vdev_id);
-               else
-                       ath10k_dbg(ATH10K_DBG_MAC,
-                                  "Set preamble: %d for VDEV: %d\n",
-                                  preamble, arvif->vdev_id);
        }
 
        if (changed & BSS_CHANGED_ASSOC) {
@@ -2474,27 +2637,26 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                /*
                 * New station addition.
                 */
+               ath10k_dbg(ATH10K_DBG_MAC,
+                          "mac vdev %d peer create %pM (new sta)\n",
+                          arvif->vdev_id, sta->addr);
+
                ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
                if (ret)
                        ath10k_warn("Failed to add peer: %pM for VDEV: %d\n",
                                    sta->addr, arvif->vdev_id);
-               else
-                       ath10k_dbg(ATH10K_DBG_MAC,
-                                  "Added peer: %pM for VDEV: %d\n",
-                                  sta->addr, arvif->vdev_id);
        } else if ((old_state == IEEE80211_STA_NONE &&
                    new_state == IEEE80211_STA_NOTEXIST)) {
                /*
                 * Existing station deletion.
                 */
+               ath10k_dbg(ATH10K_DBG_MAC,
+                          "mac vdev %d peer delete %pM (sta gone)\n",
+                          arvif->vdev_id, sta->addr);
                ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
                if (ret)
                        ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n",
                                    sta->addr, arvif->vdev_id);
-               else
-                       ath10k_dbg(ATH10K_DBG_MAC,
-                                  "Removed peer: %pM for VDEV: %d\n",
-                                  sta->addr, arvif->vdev_id);
 
                if (vif->type == NL80211_IFTYPE_STATION)
                        ath10k_bss_disassoc(hw, vif);
@@ -2505,14 +2667,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                /*
                 * New association.
                 */
+               ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n",
+                          sta->addr);
+
                ret = ath10k_station_assoc(ar, arvif, sta);
                if (ret)
                        ath10k_warn("Failed to associate station: %pM\n",
                                    sta->addr);
-               else
-                       ath10k_dbg(ATH10K_DBG_MAC,
-                                  "Station %pM moved to assoc state\n",
-                                  sta->addr);
        } else if (old_state == IEEE80211_STA_ASSOC &&
                   new_state == IEEE80211_STA_AUTH &&
                   (vif->type == NL80211_IFTYPE_AP ||
@@ -2520,14 +2681,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                /*
                 * Disassociation.
                 */
+               ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
+                          sta->addr);
+
                ret = ath10k_station_disassoc(ar, arvif, sta);
                if (ret)
                        ath10k_warn("Failed to disassociate station: %pM\n",
                                    sta->addr);
-               else
-                       ath10k_dbg(ATH10K_DBG_MAC,
-                                  "Station %pM moved to disassociated state\n",
-                                  sta->addr);
        }
 
        mutex_unlock(&ar->conf_mutex);
@@ -2732,88 +2892,51 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
  * Both RTS and Fragmentation threshold are interface-specific
  * in ath10k, but device-specific in mac80211.
  */
-static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
-       struct ath10k_generic_iter *ar_iter = data;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
-       u32 rts = ar_iter->ar->hw->wiphy->rts_threshold;
-
-       lockdep_assert_held(&arvif->ar->conf_mutex);
-
-       /* During HW reconfiguration mac80211 reports all interfaces that were
-        * running until reconfiguration was started. Since FW doesn't have any
-        * vdevs at this point we must not iterate over this interface list.
-        * This setting will be updated upon add_interface(). */
-       if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
-               return;
-
-       ar_iter->ret = ath10k_mac_set_rts(arvif, rts);
-       if (ar_iter->ret)
-               ath10k_warn("Failed to set RTS threshold for VDEV: %d\n",
-                           arvif->vdev_id);
-       else
-               ath10k_dbg(ATH10K_DBG_MAC,
-                          "Set RTS threshold: %d for VDEV: %d\n",
-                          rts, arvif->vdev_id);
-}
 
 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
 {
-       struct ath10k_generic_iter ar_iter;
        struct ath10k *ar = hw->priv;
-
-       memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
-       ar_iter.ar = ar;
+       struct ath10k_vif *arvif;
+       int ret = 0;
 
        mutex_lock(&ar->conf_mutex);
-       ieee80211_iterate_active_interfaces_atomic(
-               hw, IEEE80211_IFACE_ITER_NORMAL,
-               ath10k_set_rts_iter, &ar_iter);
-       mutex_unlock(&ar->conf_mutex);
-
-       return ar_iter.ret;
-}
+       list_for_each_entry(arvif, &ar->arvifs, list) {
+               ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
+                          arvif->vdev_id, value);
 
-static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
-{
-       struct ath10k_generic_iter *ar_iter = data;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
-       u32 frag = ar_iter->ar->hw->wiphy->frag_threshold;
-
-       lockdep_assert_held(&arvif->ar->conf_mutex);
-
-       /* During HW reconfiguration mac80211 reports all interfaces that were
-        * running until reconfiguration was started. Since FW doesn't have any
-        * vdevs at this point we must not iterate over this interface list.
-        * This setting will be updated upon add_interface(). */
-       if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
-               return;
+               ret = ath10k_mac_set_rts(arvif, value);
+               if (ret) {
+                       ath10k_warn("could not set rts threshold for vdev %d (%d)\n",
+                                   arvif->vdev_id, ret);
+                       break;
+               }
+       }
+       mutex_unlock(&ar->conf_mutex);
 
-       ar_iter->ret = ath10k_mac_set_frag(arvif, frag);
-       if (ar_iter->ret)
-               ath10k_warn("Failed to set frag threshold for VDEV: %d\n",
-                           arvif->vdev_id);
-       else
-               ath10k_dbg(ATH10K_DBG_MAC,
-                          "Set frag threshold: %d for VDEV: %d\n",
-                          frag, arvif->vdev_id);
+       return ret;
 }
 
 static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
 {
-       struct ath10k_generic_iter ar_iter;
        struct ath10k *ar = hw->priv;
-
-       memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
-       ar_iter.ar = ar;
+       struct ath10k_vif *arvif;
+       int ret = 0;
 
        mutex_lock(&ar->conf_mutex);
-       ieee80211_iterate_active_interfaces_atomic(
-               hw, IEEE80211_IFACE_ITER_NORMAL,
-               ath10k_set_frag_iter, &ar_iter);
+       list_for_each_entry(arvif, &ar->arvifs, list) {
+               ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n",
+                          arvif->vdev_id, value);
+
+               ret = ath10k_mac_set_rts(arvif, value);
+               if (ret) {
+                       ath10k_warn("could not set fragmentation threshold for vdev %d (%d)\n",
+                                   arvif->vdev_id, ret);
+                       break;
+               }
+       }
        mutex_unlock(&ar->conf_mutex);
 
-       return ar_iter.ret;
+       return ret;
 }
 
 static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
@@ -2836,8 +2959,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
                        bool empty;
 
                        spin_lock_bh(&ar->htt.tx_lock);
-                       empty = bitmap_empty(ar->htt.used_msdu_ids,
-                                            ar->htt.max_num_pending_tx);
+                       empty = (ar->htt.num_pending_tx == 0);
                        spin_unlock_bh(&ar->htt.tx_lock);
 
                        skip = (ar->state == ATH10K_STATE_WEDGED);
@@ -3326,6 +3448,10 @@ int ath10k_mac_register(struct ath10k *ar)
                        IEEE80211_HW_WANT_MONITOR_VIF |
                        IEEE80211_HW_AP_LINK_PS;
 
+       /* MSDU can have HTT TX fragment pushed in front. The additional 4
+        * bytes is used for padding/alignment if necessary. */
+       ar->hw->extra_tx_headroom += sizeof(struct htt_data_tx_desc_frag)*2 + 4;
+
        if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
                ar->hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
 
index 6fce9bfb19a5f2340d2e41fe2f668b7e8c9f4dce..ba1021997b8fc8c9604884604c2facd57ab0a8d1 100644 (file)
@@ -34,6 +34,8 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
 void ath10k_reset_scan(unsigned long ptr);
 void ath10k_offchan_tx_purge(struct ath10k *ar);
 void ath10k_offchan_tx_work(struct work_struct *work);
+void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);
+void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
 void ath10k_halt(struct ath10k *ar);
 
 static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
index e2f9ef50b1bd3999f6b7cc2989c24e407e9bd72a..f8d59c7b90821a69b3401a7ec143bd6d8e0fc367 100644 (file)
@@ -36,11 +36,9 @@ static unsigned int ath10k_target_ps;
 module_param(ath10k_target_ps, uint, 0644);
 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
 
-#define QCA988X_1_0_DEVICE_ID  (0xabcd)
 #define QCA988X_2_0_DEVICE_ID  (0x003c)
 
 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
-       { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
        { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
        {0}
 };
@@ -50,9 +48,9 @@ static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
 
 static void ath10k_pci_process_ce(struct ath10k *ar);
 static int ath10k_pci_post_rx(struct ath10k *ar);
-static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
+static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
                                             int num);
-static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
+static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
 static void ath10k_pci_stop_ce(struct ath10k *ar);
 static void ath10k_pci_device_reset(struct ath10k *ar);
 static int ath10k_pci_reset_target(struct ath10k *ar);
@@ -60,43 +58,145 @@ static int ath10k_pci_start_intr(struct ath10k *ar);
 static void ath10k_pci_stop_intr(struct ath10k *ar);
 
 static const struct ce_attr host_ce_config_wlan[] = {
-       /* host->target HTC control and raw streams */
-       { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
-       /* could be moved to share CE3 */
-       /* target->host HTT + HTC control */
-       { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,},
-       /* target->host WMI */
-       { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
-       /* host->target WMI */
-       { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
-       /* host->target HTT */
-       { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0,
-                   CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
-       /* unused */
-       { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
-       /* Target autonomous hif_memcpy */
-       { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
-       /* ce_diag, the Diagnostic Window */
-       { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
+       /* CE0: host->target HTC control and raw streams */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 16,
+               .src_sz_max = 256,
+               .dest_nentries = 0,
+       },
+
+       /* CE1: target->host HTT + HTC control */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 512,
+               .dest_nentries = 512,
+       },
+
+       /* CE2: target->host WMI */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 2048,
+               .dest_nentries = 32,
+       },
+
+       /* CE3: host->target WMI */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 32,
+               .src_sz_max = 2048,
+               .dest_nentries = 0,
+       },
+
+       /* CE4: host->target HTT */
+       {
+               .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+               .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
+               .src_sz_max = 256,
+               .dest_nentries = 0,
+       },
+
+       /* CE5: unused */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 0,
+               .dest_nentries = 0,
+       },
+
+       /* CE6: target autonomous hif_memcpy */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 0,
+               .dest_nentries = 0,
+       },
+
+       /* CE7: ce_diag, the Diagnostic Window */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 2,
+               .src_sz_max = DIAG_TRANSFER_LIMIT,
+               .dest_nentries = 2,
+       },
 };
 
 /* Target firmware's Copy Engine configuration. */
 static const struct ce_pipe_config target_ce_config_wlan[] = {
-       /* host->target HTC control and raw streams */
-       { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
-       /* target->host HTT + HTC control */
-       { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
-       /* target->host WMI */
-       { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
-       /* host->target WMI */
-       { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
-       /* host->target HTT */
-       { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
+       /* CE0: host->target HTC control and raw streams */
+       {
+               .pipenum = 0,
+               .pipedir = PIPEDIR_OUT,
+               .nentries = 32,
+               .nbytes_max = 256,
+               .flags = CE_ATTR_FLAGS,
+               .reserved = 0,
+       },
+
+       /* CE1: target->host HTT + HTC control */
+       {
+               .pipenum = 1,
+               .pipedir = PIPEDIR_IN,
+               .nentries = 32,
+               .nbytes_max = 512,
+               .flags = CE_ATTR_FLAGS,
+               .reserved = 0,
+       },
+
+       /* CE2: target->host WMI */
+       {
+               .pipenum = 2,
+               .pipedir = PIPEDIR_IN,
+               .nentries = 32,
+               .nbytes_max = 2048,
+               .flags = CE_ATTR_FLAGS,
+               .reserved = 0,
+       },
+
+       /* CE3: host->target WMI */
+       {
+               .pipenum = 3,
+               .pipedir = PIPEDIR_OUT,
+               .nentries = 32,
+               .nbytes_max = 2048,
+               .flags = CE_ATTR_FLAGS,
+               .reserved = 0,
+       },
+
+       /* CE4: host->target HTT */
+       {
+               .pipenum = 4,
+               .pipedir = PIPEDIR_OUT,
+               .nentries = 256,
+               .nbytes_max = 256,
+               .flags = CE_ATTR_FLAGS,
+               .reserved = 0,
+       },
+
        /* NB: 50% of src nentries, since tx has 2 frags */
-       /* unused */
-       { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
-       /* Reserved for target autonomous hif_memcpy */
-       { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
+
+       /* CE5: unused */
+       {
+               .pipenum = 5,
+               .pipedir = PIPEDIR_OUT,
+               .nentries = 32,
+               .nbytes_max = 2048,
+               .flags = CE_ATTR_FLAGS,
+               .reserved = 0,
+       },
+
+       /* CE6: Reserved for target autonomous hif_memcpy */
+       {
+               .pipenum = 6,
+               .pipedir = PIPEDIR_INOUT,
+               .nentries = 32,
+               .nbytes_max = 4096,
+               .flags = CE_ATTR_FLAGS,
+               .reserved = 0,
+       },
+
        /* CE7 used only by Host */
 };
 
@@ -114,7 +214,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
        unsigned int id;
        unsigned int flags;
-       struct ce_state *ce_diag;
+       struct ath10k_ce_pipe *ce_diag;
        /* Host buffer address in CE space */
        u32 ce_data;
        dma_addr_t ce_data_base = 0;
@@ -278,7 +378,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
        unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
        unsigned int id;
        unsigned int flags;
-       struct ce_state *ce_diag;
+       struct ath10k_ce_pipe *ce_diag;
        void *data_buf = NULL;
        u32 ce_data;    /* Host buffer address in CE space */
        dma_addr_t ce_data_base = 0;
@@ -437,7 +537,7 @@ static void ath10k_pci_wait(struct ath10k *ar)
                ath10k_warn("Unable to wakeup target\n");
 }
 
-void ath10k_do_pci_wake(struct ath10k *ar)
+int ath10k_do_pci_wake(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        void __iomem *pci_addr = ar_pci->mem;
@@ -453,18 +553,19 @@ void ath10k_do_pci_wake(struct ath10k *ar)
        atomic_inc(&ar_pci->keep_awake_count);
 
        if (ar_pci->verified_awake)
-               return;
+               return 0;
 
        for (;;) {
                if (ath10k_pci_target_is_awake(ar)) {
                        ar_pci->verified_awake = true;
-                       break;
+                       return 0;
                }
 
                if (tot_delay > PCIE_WAKE_TIMEOUT) {
-                       ath10k_warn("target takes too long to wake up (awake count %d)\n",
+                       ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
+                                   PCIE_WAKE_TIMEOUT,
                                    atomic_read(&ar_pci->keep_awake_count));
-                       break;
+                       return -ETIMEDOUT;
                }
 
                udelay(curr_delay);
@@ -493,7 +594,7 @@ void ath10k_do_pci_sleep(struct ath10k *ar)
  * FIXME: Handle OOM properly.
  */
 static inline
-struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info)
+struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
 {
        struct ath10k_pci_compl *compl = NULL;
 
@@ -511,39 +612,28 @@ exit:
 }
 
 /* Called by lower (CE) layer when a send to Target completes. */
-static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
-                                   void *transfer_context,
-                                   u32 ce_data,
-                                   unsigned int nbytes,
-                                   unsigned int transfer_id)
+static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
 {
        struct ath10k *ar = ce_state->ar;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct hif_ce_pipe_info *pipe_info =  &ar_pci->pipe_info[ce_state->id];
+       struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
        struct ath10k_pci_compl *compl;
-       bool process = false;
-
-       do {
-               /*
-                * For the send completion of an item in sendlist, just
-                * increment num_sends_allowed. The upper layer callback will
-                * be triggered when last fragment is done with send.
-                */
-               if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
-                       spin_lock_bh(&pipe_info->pipe_lock);
-                       pipe_info->num_sends_allowed++;
-                       spin_unlock_bh(&pipe_info->pipe_lock);
-                       continue;
-               }
+       void *transfer_context;
+       u32 ce_data;
+       unsigned int nbytes;
+       unsigned int transfer_id;
 
+       while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
+                                            &ce_data, &nbytes,
+                                            &transfer_id) == 0) {
                compl = get_free_compl(pipe_info);
                if (!compl)
                        break;
 
-               compl->send_or_recv = HIF_CE_COMPLETE_SEND;
+               compl->state = ATH10K_PCI_COMPL_SEND;
                compl->ce_state = ce_state;
                compl->pipe_info = pipe_info;
-               compl->transfer_context = transfer_context;
+               compl->skb = transfer_context;
                compl->nbytes = nbytes;
                compl->transfer_id = transfer_id;
                compl->flags = 0;
@@ -554,46 +644,36 @@ static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
                spin_lock_bh(&ar_pci->compl_lock);
                list_add_tail(&compl->list, &ar_pci->compl_process);
                spin_unlock_bh(&ar_pci->compl_lock);
-
-               process = true;
-       } while (ath10k_ce_completed_send_next(ce_state,
-                                                          &transfer_context,
-                                                          &ce_data, &nbytes,
-                                                          &transfer_id) == 0);
-
-       /*
-        * If only some of the items within a sendlist have completed,
-        * don't invoke completion processing until the entire sendlist
-        * has been sent.
-        */
-       if (!process)
-               return;
+       }
 
        ath10k_pci_process_ce(ar);
 }
 
 /* Called by lower (CE) layer when data is received from the Target. */
-static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
-                                   void *transfer_context, u32 ce_data,
-                                   unsigned int nbytes,
-                                   unsigned int transfer_id,
-                                   unsigned int flags)
+static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
 {
        struct ath10k *ar = ce_state->ar;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct hif_ce_pipe_info *pipe_info =  &ar_pci->pipe_info[ce_state->id];
+       struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
        struct ath10k_pci_compl *compl;
        struct sk_buff *skb;
+       void *transfer_context;
+       u32 ce_data;
+       unsigned int nbytes;
+       unsigned int transfer_id;
+       unsigned int flags;
 
-       do {
+       while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
+                                            &ce_data, &nbytes, &transfer_id,
+                                            &flags) == 0) {
                compl = get_free_compl(pipe_info);
                if (!compl)
                        break;
 
-               compl->send_or_recv = HIF_CE_COMPLETE_RECV;
+               compl->state = ATH10K_PCI_COMPL_RECV;
                compl->ce_state = ce_state;
                compl->pipe_info = pipe_info;
-               compl->transfer_context = transfer_context;
+               compl->skb = transfer_context;
                compl->nbytes = nbytes;
                compl->transfer_id = transfer_id;
                compl->flags = flags;
@@ -608,12 +688,7 @@ static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
                spin_lock_bh(&ar_pci->compl_lock);
                list_add_tail(&compl->list, &ar_pci->compl_process);
                spin_unlock_bh(&ar_pci->compl_lock);
-
-       } while (ath10k_ce_completed_recv_next(ce_state,
-                                                          &transfer_context,
-                                                          &ce_data, &nbytes,
-                                                          &transfer_id,
-                                                          &flags) == 0);
+       }
 
        ath10k_pci_process_ce(ar);
 }
@@ -625,15 +700,12 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
 {
        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]);
-       struct ce_state *ce_hdl = pipe_info->ce_hdl;
-       struct ce_sendlist sendlist;
+       struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
+       struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
        unsigned int len;
        u32 flags = 0;
        int ret;
 
-       memset(&sendlist, 0, sizeof(struct ce_sendlist));
-
        len = min(bytes, nbuf->len);
        bytes -= len;
 
@@ -648,19 +720,8 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
                        "ath10k tx: data: ",
                        nbuf->data, nbuf->len);
 
-       ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
-
-       /* Make sure we have resources to handle this request */
-       spin_lock_bh(&pipe_info->pipe_lock);
-       if (!pipe_info->num_sends_allowed) {
-               ath10k_warn("Pipe: %d is full\n", pipe_id);
-               spin_unlock_bh(&pipe_info->pipe_lock);
-               return -ENOSR;
-       }
-       pipe_info->num_sends_allowed--;
-       spin_unlock_bh(&pipe_info->pipe_lock);
-
-       ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
+       ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
+                            flags);
        if (ret)
                ath10k_warn("CE send failed: %p\n", nbuf);
 
@@ -670,14 +731,7 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]);
-       int ret;
-
-       spin_lock_bh(&pipe_info->pipe_lock);
-       ret = pipe_info->num_sends_allowed;
-       spin_unlock_bh(&pipe_info->pipe_lock);
-
-       return ret;
+       return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
 }
 
 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
@@ -764,9 +818,9 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
 static int ath10k_pci_start_ce(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ce_state *ce_diag = ar_pci->ce_diag;
+       struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
        const struct ce_attr *attr;
-       struct hif_ce_pipe_info *pipe_info;
+       struct ath10k_pci_pipe *pipe_info;
        struct ath10k_pci_compl *compl;
        int i, pipe_num, completions, disable_interrupts;
 
@@ -792,7 +846,6 @@ static int ath10k_pci_start_ce(struct ath10k *ar)
                                                   ath10k_pci_ce_send_done,
                                                   disable_interrupts);
                        completions += attr->src_nentries;
-                       pipe_info->num_sends_allowed = attr->src_nentries - 1;
                }
 
                if (attr->dest_nentries) {
@@ -805,15 +858,14 @@ static int ath10k_pci_start_ce(struct ath10k *ar)
                        continue;
 
                for (i = 0; i < completions; i++) {
-                       compl = kmalloc(sizeof(struct ath10k_pci_compl),
-                                       GFP_KERNEL);
+                       compl = kmalloc(sizeof(*compl), GFP_KERNEL);
                        if (!compl) {
                                ath10k_warn("No memory for completion state\n");
                                ath10k_pci_stop_ce(ar);
                                return -ENOMEM;
                        }
 
-                       compl->send_or_recv = HIF_CE_COMPLETE_FREE;
+                       compl->state = ATH10K_PCI_COMPL_FREE;
                        list_add_tail(&compl->list, &pipe_info->compl_free);
                }
        }
@@ -840,7 +892,7 @@ static void ath10k_pci_stop_ce(struct ath10k *ar)
         * their associated resources */
        spin_lock_bh(&ar_pci->compl_lock);
        list_for_each_entry(compl, &ar_pci->compl_process, list) {
-               skb = (struct sk_buff *)compl->transfer_context;
+               skb = compl->skb;
                ATH10K_SKB_CB(skb)->is_aborted = true;
        }
        spin_unlock_bh(&ar_pci->compl_lock);
@@ -850,7 +902,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        struct ath10k_pci_compl *compl, *tmp;
-       struct hif_ce_pipe_info *pipe_info;
+       struct ath10k_pci_pipe *pipe_info;
        struct sk_buff *netbuf;
        int pipe_num;
 
@@ -861,7 +913,7 @@ static void ath10k_pci_cleanup_ce(struct ath10k *ar)
 
        list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
                list_del(&compl->list);
-               netbuf = (struct sk_buff *)compl->transfer_context;
+               netbuf = compl->skb;
                dev_kfree_skb_any(netbuf);
                kfree(compl);
        }
@@ -912,12 +964,14 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
                list_del(&compl->list);
                spin_unlock_bh(&ar_pci->compl_lock);
 
-               if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) {
+               switch (compl->state) {
+               case ATH10K_PCI_COMPL_SEND:
                        cb->tx_completion(ar,
-                                         compl->transfer_context,
+                                         compl->skb,
                                          compl->transfer_id);
                        send_done = 1;
-               } else {
+                       break;
+               case ATH10K_PCI_COMPL_RECV:
                        ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
                        if (ret) {
                                ath10k_warn("Unable to post recv buffer for pipe: %d\n",
@@ -925,7 +979,7 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
                                break;
                        }
 
-                       skb = (struct sk_buff *)compl->transfer_context;
+                       skb = compl->skb;
                        nbytes = compl->nbytes;
 
                        ath10k_dbg(ATH10K_DBG_PCI,
@@ -944,16 +998,23 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
                                            nbytes,
                                            skb->len + skb_tailroom(skb));
                        }
+                       break;
+               case ATH10K_PCI_COMPL_FREE:
+                       ath10k_warn("free completion cannot be processed\n");
+                       break;
+               default:
+                       ath10k_warn("invalid completion state (%d)\n",
+                                   compl->state);
+                       break;
                }
 
-               compl->send_or_recv = HIF_CE_COMPLETE_FREE;
+               compl->state = ATH10K_PCI_COMPL_FREE;
 
                /*
                 * Add completion back to the pipe's free list.
                 */
                spin_lock_bh(&compl->pipe_info->pipe_lock);
                list_add_tail(&compl->list, &compl->pipe_info->compl_free);
-               compl->pipe_info->num_sends_allowed += send_done;
                spin_unlock_bh(&compl->pipe_info->pipe_lock);
        }
 
@@ -1037,12 +1098,12 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
                                                 &dl_is_polled);
 }
 
-static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
+static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
                                   int num)
 {
        struct ath10k *ar = pipe_info->hif_ce_state;
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ce_state *ce_state = pipe_info->ce_hdl;
+       struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
        struct sk_buff *skb;
        dma_addr_t ce_data;
        int i, ret = 0;
@@ -1097,7 +1158,7 @@ err:
 static int ath10k_pci_post_rx(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct hif_ce_pipe_info *pipe_info;
+       struct ath10k_pci_pipe *pipe_info;
        const struct ce_attr *attr;
        int pipe_num, ret = 0;
 
@@ -1147,11 +1208,11 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
        return 0;
 }
 
-static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
+static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
 {
        struct ath10k *ar;
        struct ath10k_pci *ar_pci;
-       struct ce_state *ce_hdl;
+       struct ath10k_ce_pipe *ce_hdl;
        u32 buf_sz;
        struct sk_buff *netbuf;
        u32 ce_data;
@@ -1179,11 +1240,11 @@ static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
        }
 }
 
-static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
+static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
 {
        struct ath10k *ar;
        struct ath10k_pci *ar_pci;
-       struct ce_state *ce_hdl;
+       struct ath10k_ce_pipe *ce_hdl;
        struct sk_buff *netbuf;
        u32 ce_data;
        unsigned int nbytes;
@@ -1206,15 +1267,14 @@ static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
 
        while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
                                          &ce_data, &nbytes, &id) == 0) {
-               if (netbuf != CE_SENDLIST_ITEM_CTXT)
-                       /*
-                        * Indicate the completion to higer layer to free
-                        * the buffer
-                        */
-                       ATH10K_SKB_CB(netbuf)->is_aborted = true;
-                       ar_pci->msg_callbacks_current.tx_completion(ar,
-                                                                   netbuf,
-                                                                   id);
+               /*
+                * Indicate the completion to higer layer to free
+                * the buffer
+                */
+               ATH10K_SKB_CB(netbuf)->is_aborted = true;
+               ar_pci->msg_callbacks_current.tx_completion(ar,
+                                                           netbuf,
+                                                           id);
        }
 }
 
@@ -1232,7 +1292,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
        int pipe_num;
 
        for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
-               struct hif_ce_pipe_info *pipe_info;
+               struct ath10k_pci_pipe *pipe_info;
 
                pipe_info = &ar_pci->pipe_info[pipe_num];
                ath10k_pci_rx_pipe_cleanup(pipe_info);
@@ -1243,7 +1303,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
 static void ath10k_pci_ce_deinit(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct hif_ce_pipe_info *pipe_info;
+       struct ath10k_pci_pipe *pipe_info;
        int pipe_num;
 
        for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
@@ -1293,8 +1353,10 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
                                           void *resp, u32 *resp_len)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
-       struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
+       struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
+       struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
+       struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
+       struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
        dma_addr_t req_paddr = 0;
        dma_addr_t resp_paddr = 0;
        struct bmi_xfer xfer = {};
@@ -1378,13 +1440,16 @@ err_dma:
        return ret;
 }
 
-static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
-                                    void *transfer_context,
-                                    u32 data,
-                                    unsigned int nbytes,
-                                    unsigned int transfer_id)
+static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
 {
-       struct bmi_xfer *xfer = transfer_context;
+       struct bmi_xfer *xfer;
+       u32 ce_data;
+       unsigned int nbytes;
+       unsigned int transfer_id;
+
+       if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
+                                         &nbytes, &transfer_id))
+               return;
 
        if (xfer->wait_for_resp)
                return;
@@ -1392,14 +1457,17 @@ static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
        complete(&xfer->done);
 }
 
-static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
-                                    void *transfer_context,
-                                    u32 data,
-                                    unsigned int nbytes,
-                                    unsigned int transfer_id,
-                                    unsigned int flags)
+static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
 {
-       struct bmi_xfer *xfer = transfer_context;
+       struct bmi_xfer *xfer;
+       u32 ce_data;
+       unsigned int nbytes;
+       unsigned int transfer_id;
+       unsigned int flags;
+
+       if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
+                                         &nbytes, &transfer_id, &flags))
+               return;
 
        if (!xfer->wait_for_resp) {
                ath10k_warn("unexpected: BMI data received; ignoring\n");
@@ -1679,7 +1747,7 @@ static int ath10k_pci_init_config(struct ath10k *ar)
 static int ath10k_pci_ce_init(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       struct hif_ce_pipe_info *pipe_info;
+       struct ath10k_pci_pipe *pipe_info;
        const struct ce_attr *attr;
        int pipe_num;
 
@@ -1895,7 +1963,7 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
 
 static void ath10k_pci_ce_tasklet(unsigned long ptr)
 {
-       struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr;
+       struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
        struct ath10k_pci *ar_pci = pipe->ar_pci;
 
        ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
@@ -2212,18 +2280,13 @@ static int ath10k_pci_reset_target(struct ath10k *ar)
 
 static void ath10k_pci_device_reset(struct ath10k *ar)
 {
-       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       void __iomem *mem = ar_pci->mem;
        int i;
        u32 val;
 
        if (!SOC_GLOBAL_RESET_ADDRESS)
                return;
 
-       if (!mem)
-               return;
-
-       ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
+       ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
                               PCIE_SOC_WAKE_V_MASK);
        for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
                if (ath10k_pci_target_is_awake(ar))
@@ -2232,12 +2295,12 @@ static void ath10k_pci_device_reset(struct ath10k *ar)
        }
 
        /* Put Target, including PCIe, into RESET. */
-       val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
+       val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
        val |= 1;
-       ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
+       ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
 
        for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
-               if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
+               if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
                                          RTC_STATE_COLD_RESET_MASK)
                        break;
                msleep(1);
@@ -2245,16 +2308,16 @@ static void ath10k_pci_device_reset(struct ath10k *ar)
 
        /* Pull Target, including PCIe, out of RESET. */
        val &= ~1;
-       ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
+       ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
 
        for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
-               if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
+               if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
                                            RTC_STATE_COLD_RESET_MASK))
                        break;
                msleep(1);
        }
 
-       ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
+       ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
 }
 
 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
@@ -2267,13 +2330,10 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
 
                switch (i) {
                case ATH10K_PCI_FEATURE_MSI_X:
-                       ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
-                       break;
-               case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
-                       ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
+                       ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
                        break;
                case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
-                       ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
+                       ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
                        break;
                }
        }
@@ -2286,7 +2346,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
        int ret = 0;
        struct ath10k *ar;
        struct ath10k_pci *ar_pci;
-       u32 lcr_val;
+       u32 lcr_val, chip_id;
 
        ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
 
@@ -2298,9 +2358,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
        ar_pci->dev = &pdev->dev;
 
        switch (pci_dev->device) {
-       case QCA988X_1_0_DEVICE_ID:
-               set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features);
-               break;
        case QCA988X_2_0_DEVICE_ID:
                set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
                break;
@@ -2322,10 +2379,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
                goto err_ar_pci;
        }
 
-       /* Enable QCA988X_1.0 HW workarounds */
-       if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features))
-               spin_lock_init(&ar_pci->hw_v1_workaround_lock);
-
        ar_pci->ar = ar;
        ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
        atomic_set(&ar_pci->keep_awake_count, 0);
@@ -2395,9 +2448,20 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
 
        spin_lock_init(&ar_pci->ce_lock);
 
-       ar_pci->cacheline_sz = dma_get_cache_alignment();
+       ret = ath10k_do_pci_wake(ar);
+       if (ret) {
+               ath10k_err("Failed to get chip id: %d\n", ret);
+               return ret;
+       }
+
+       chip_id = ath10k_pci_read32(ar,
+                                   RTC_SOC_BASE_ADDRESS + SOC_CHIP_ID_ADDRESS);
+
+       ath10k_do_pci_sleep(ar);
+
+       ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
 
-       ret = ath10k_core_register(ar);
+       ret = ath10k_core_register(ar, chip_id);
        if (ret) {
                ath10k_err("could not register driver core (%d)\n", ret);
                goto err_iomap;
@@ -2414,7 +2478,6 @@ err_region:
 err_device:
        pci_disable_device(pdev);
 err_ar:
-       pci_set_drvdata(pdev, NULL);
        ath10k_core_destroy(ar);
 err_ar_pci:
        /* call HIF PCI free here */
@@ -2442,7 +2505,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
 
        ath10k_core_unregister(ar);
 
-       pci_set_drvdata(pdev, NULL);
        pci_iounmap(pdev, ar_pci->mem);
        pci_release_region(pdev, BAR_NUM);
        pci_clear_master(pdev);
@@ -2483,9 +2545,6 @@ module_exit(ath10k_pci_exit);
 MODULE_AUTHOR("Qualcomm Atheros");
 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
 MODULE_LICENSE("Dual BSD/GPL");
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
-MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
index 871bb339d56dc3fbb19d475ed50ea938c59fefa2..52fb7b9735714ea3f2dbf3ed03936ac3ae11f5c7 100644 (file)
@@ -43,22 +43,23 @@ struct bmi_xfer {
        u32 resp_len;
 };
 
+enum ath10k_pci_compl_state {
+       ATH10K_PCI_COMPL_FREE = 0,
+       ATH10K_PCI_COMPL_SEND,
+       ATH10K_PCI_COMPL_RECV,
+};
+
 struct ath10k_pci_compl {
        struct list_head list;
-       int send_or_recv;
-       struct ce_state *ce_state;
-       struct hif_ce_pipe_info *pipe_info;
-       void *transfer_context;
+       enum ath10k_pci_compl_state state;
+       struct ath10k_ce_pipe *ce_state;
+       struct ath10k_pci_pipe *pipe_info;
+       struct sk_buff *skb;
        unsigned int nbytes;
        unsigned int transfer_id;
        unsigned int flags;
 };
 
-/* compl_state.send_or_recv */
-#define HIF_CE_COMPLETE_FREE 0
-#define HIF_CE_COMPLETE_SEND 1
-#define HIF_CE_COMPLETE_RECV 2
-
 /*
  * PCI-specific Target state
  *
@@ -152,17 +153,16 @@ struct service_to_pipe {
 
 enum ath10k_pci_features {
        ATH10K_PCI_FEATURE_MSI_X                = 0,
-       ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND    = 1,
-       ATH10K_PCI_FEATURE_SOC_POWER_SAVE       = 2,
+       ATH10K_PCI_FEATURE_SOC_POWER_SAVE       = 1,
 
        /* keep last */
        ATH10K_PCI_FEATURE_COUNT
 };
 
 /* Per-pipe state. */
-struct hif_ce_pipe_info {
+struct ath10k_pci_pipe {
        /* Handle of underlying Copy Engine */
-       struct ce_state *ce_hdl;
+       struct ath10k_ce_pipe *ce_hdl;
 
        /* Our pipe number; facilitiates use of pipe_info ptrs. */
        u8 pipe_num;
@@ -178,9 +178,6 @@ struct hif_ce_pipe_info {
        /* List of free CE completion slots */
        struct list_head compl_free;
 
-       /* Limit the number of outstanding send requests. */
-       int num_sends_allowed;
-
        struct ath10k_pci *ar_pci;
        struct tasklet_struct intr;
 };
@@ -190,7 +187,6 @@ struct ath10k_pci {
        struct device *dev;
        struct ath10k *ar;
        void __iomem *mem;
-       int cacheline_sz;
 
        DECLARE_BITMAP(features, ATH10K_PCI_FEATURE_COUNT);
 
@@ -219,7 +215,7 @@ struct ath10k_pci {
 
        bool compl_processing;
 
-       struct hif_ce_pipe_info pipe_info[CE_COUNT_MAX];
+       struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
 
        struct ath10k_hif_cb msg_callbacks_current;
 
@@ -227,16 +223,13 @@ struct ath10k_pci {
        u32 fw_indicator_address;
 
        /* Copy Engine used for Diagnostic Accesses */
-       struct ce_state *ce_diag;
+       struct ath10k_ce_pipe *ce_diag;
 
        /* FIXME: document what this really protects */
        spinlock_t ce_lock;
 
        /* Map CE id to ce_state */
-       struct ce_state *ce_id_to_state[CE_COUNT_MAX];
-
-       /* makes sure that dummy reads are atomic */
-       spinlock_t hw_v1_workaround_lock;
+       struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
 };
 
 static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
@@ -244,14 +237,18 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
        return ar->hif.priv;
 }
 
-static inline u32 ath10k_pci_reg_read32(void __iomem *mem, u32 addr)
+static inline u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
 {
-       return ioread32(mem + PCIE_LOCAL_BASE_ADDRESS + addr);
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       return ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
 }
 
-static inline void ath10k_pci_reg_write32(void __iomem *mem, u32 addr, u32 val)
+static inline void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
 {
-       iowrite32(val, mem + PCIE_LOCAL_BASE_ADDRESS + addr);
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       iowrite32(val, ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS + addr);
 }
 
 #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
@@ -310,23 +307,8 @@ static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
                                      u32 value)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-       void __iomem *addr = ar_pci->mem;
-
-       if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
-               unsigned long irq_flags;
 
-               spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags);
-
-               ioread32(addr+offset+4); /* 3rd read prior to write */
-               ioread32(addr+offset+4); /* 2nd read prior to write */
-               ioread32(addr+offset+4); /* 1st read prior to write */
-               iowrite32(value, addr+offset);
-
-               spin_unlock_irqrestore(&ar_pci->hw_v1_workaround_lock,
-                                      irq_flags);
-       } else {
-               iowrite32(value, addr+offset);
-       }
+       iowrite32(value, ar_pci->mem + offset);
 }
 
 static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
@@ -336,15 +318,17 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
        return ioread32(ar_pci->mem + offset);
 }
 
-void ath10k_do_pci_wake(struct ath10k *ar);
+int ath10k_do_pci_wake(struct ath10k *ar);
 void ath10k_do_pci_sleep(struct ath10k *ar);
 
-static inline void ath10k_pci_wake(struct ath10k *ar)
+static inline int ath10k_pci_wake(struct ath10k *ar)
 {
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
 
        if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
-               ath10k_do_pci_wake(ar);
+               return ath10k_do_pci_wake(ar);
+
+       return 0;
 }
 
 static inline void ath10k_pci_sleep(struct ath10k *ar)
index bfec6c8f2ecb5aed3946ae23c6a93f81ef28f117..1c584c4b019cb7362bd611688fe57e01b3d38b02 100644 (file)
@@ -422,10 +422,30 @@ struct rx_mpdu_end {
 #define RX_MSDU_START_INFO1_IP_FRAG             (1 << 14)
 #define RX_MSDU_START_INFO1_TCP_ONLY_ACK        (1 << 15)
 
+/* The decapped header (rx_hdr_status) contains the following:
+ *  a) 802.11 header
+ *  [padding to 4 bytes]
+ *  b) HW crypto parameter
+ *     - 0 bytes for no security
+ *     - 4 bytes for WEP
+ *     - 8 bytes for TKIP, AES
+ *  [padding to 4 bytes]
+ *  c) A-MSDU subframe header (14 bytes) if appliable
+ *  d) LLC/SNAP (RFC1042, 8 bytes)
+ *
+ * In case of A-MSDU only first frame in sequence contains (a) and (b). */
 enum rx_msdu_decap_format {
-       RX_MSDU_DECAP_RAW           = 0,
-       RX_MSDU_DECAP_NATIVE_WIFI   = 1,
+       RX_MSDU_DECAP_RAW = 0,
+
+       /* Note: QoS frames are reported as non-QoS. The rx_hdr_status in
+        * htt_rx_desc contains the original decapped 802.11 header. */
+       RX_MSDU_DECAP_NATIVE_WIFI = 1,
+
+       /* Payload contains an ethernet header (struct ethhdr). */
        RX_MSDU_DECAP_ETHERNET2_DIX = 2,
+
+       /* Payload contains two 48-bit addresses and 2-byte length (14 bytes
+        * total), followed by an RFC1042 header (8 bytes). */
        RX_MSDU_DECAP_8023_SNAP_LLC = 3
 };
 
index 85e806bf7257ca231e21ace94a1abf8f9d83290a..90817ddc92ba70857ed9d3075c1e663ec75c59ce 100644 (file)
@@ -111,26 +111,29 @@ TRACE_EVENT(ath10k_log_dbg_dump,
 );
 
 TRACE_EVENT(ath10k_wmi_cmd,
-       TP_PROTO(int id, void *buf, size_t buf_len),
+       TP_PROTO(int id, void *buf, size_t buf_len, int ret),
 
-       TP_ARGS(id, buf, buf_len),
+       TP_ARGS(id, buf, buf_len, ret),
 
        TP_STRUCT__entry(
                __field(unsigned int, id)
                __field(size_t, buf_len)
                __dynamic_array(u8, buf, buf_len)
+               __field(int, ret)
        ),
 
        TP_fast_assign(
                __entry->id = id;
                __entry->buf_len = buf_len;
+               __entry->ret = ret;
                memcpy(__get_dynamic_array(buf), buf, buf_len);
        ),
 
        TP_printk(
-               "id %d len %zu",
+               "id %d len %zu ret %d",
                __entry->id,
-               __entry->buf_len
+               __entry->buf_len,
+               __entry->ret
        )
 );
 
@@ -158,6 +161,27 @@ TRACE_EVENT(ath10k_wmi_event,
        )
 );
 
+TRACE_EVENT(ath10k_htt_stats,
+       TP_PROTO(void *buf, size_t buf_len),
+
+       TP_ARGS(buf, buf_len),
+
+       TP_STRUCT__entry(
+               __field(size_t, buf_len)
+               __dynamic_array(u8, buf, buf_len)
+       ),
+
+       TP_fast_assign(
+               __entry->buf_len = buf_len;
+               memcpy(__get_dynamic_array(buf), buf, buf_len);
+       ),
+
+       TP_printk(
+               "len %zu",
+               __entry->buf_len
+       )
+);
+
 #endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
 
 /* we don't want to use include/trace/events */
index 68b6faefd1d883f89642775c03390b67a9f929ac..5ae373a1e2942fbcb3d185533d41eaf9dd8e24bd 100644 (file)
@@ -44,40 +44,39 @@ out:
        spin_unlock_bh(&ar->data_lock);
 }
 
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+                         const struct htt_tx_done *tx_done)
 {
        struct device *dev = htt->ar->dev;
        struct ieee80211_tx_info *info;
-       struct sk_buff *txfrag = ATH10K_SKB_CB(txdesc)->htt.txfrag;
-       struct sk_buff *msdu = ATH10K_SKB_CB(txdesc)->htt.msdu;
+       struct ath10k_skb_cb *skb_cb;
+       struct sk_buff *msdu;
        int ret;
 
-       if (ATH10K_SKB_CB(txdesc)->htt.refcount == 0)
-               return;
-
-       ATH10K_SKB_CB(txdesc)->htt.refcount--;
+       ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
+                  tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
 
-       if (ATH10K_SKB_CB(txdesc)->htt.refcount > 0)
+       if (tx_done->msdu_id >= htt->max_num_pending_tx) {
+               ath10k_warn("warning: msdu_id %d too big, ignoring\n",
+                           tx_done->msdu_id);
                return;
-
-       if (txfrag) {
-               ret = ath10k_skb_unmap(dev, txfrag);
-               if (ret)
-                       ath10k_warn("txfrag unmap failed (%d)\n", ret);
-
-               dev_kfree_skb_any(txfrag);
        }
 
+       msdu = htt->pending_tx[tx_done->msdu_id];
+       skb_cb = ATH10K_SKB_CB(msdu);
+
        ret = ath10k_skb_unmap(dev, msdu);
        if (ret)
                ath10k_warn("data skb unmap failed (%d)\n", ret);
 
+       if (skb_cb->htt.frag_len)
+               skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
+
        ath10k_report_offchan_tx(htt->ar, msdu);
 
        info = IEEE80211_SKB_CB(msdu);
-       memset(&info->status, 0, sizeof(info->status));
 
-       if (ATH10K_SKB_CB(txdesc)->htt.discard) {
+       if (tx_done->discard) {
                ieee80211_free_txskb(htt->ar->hw, msdu);
                goto exit;
        }
@@ -85,7 +84,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
        if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
                info->flags |= IEEE80211_TX_STAT_ACK;
 
-       if (ATH10K_SKB_CB(txdesc)->htt.no_ack)
+       if (tx_done->no_ack)
                info->flags &= ~IEEE80211_TX_STAT_ACK;
 
        ieee80211_tx_status(htt->ar->hw, msdu);
@@ -93,36 +92,12 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc)
 
 exit:
        spin_lock_bh(&htt->tx_lock);
-       htt->pending_tx[ATH10K_SKB_CB(txdesc)->htt.msdu_id] = NULL;
-       ath10k_htt_tx_free_msdu_id(htt, ATH10K_SKB_CB(txdesc)->htt.msdu_id);
+       htt->pending_tx[tx_done->msdu_id] = NULL;
+       ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
        __ath10k_htt_tx_dec_pending(htt);
-       if (bitmap_empty(htt->used_msdu_ids, htt->max_num_pending_tx))
+       if (htt->num_pending_tx == 0)
                wake_up(&htt->empty_tx_wq);
        spin_unlock_bh(&htt->tx_lock);
-
-       dev_kfree_skb_any(txdesc);
-}
-
-void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
-                             const struct htt_tx_done *tx_done)
-{
-       struct sk_buff *txdesc;
-
-       ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
-                  tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
-
-       if (tx_done->msdu_id >= htt->max_num_pending_tx) {
-               ath10k_warn("warning: msdu_id %d too big, ignoring\n",
-                           tx_done->msdu_id);
-               return;
-       }
-
-       txdesc = htt->pending_tx[tx_done->msdu_id];
-
-       ATH10K_SKB_CB(txdesc)->htt.discard = tx_done->discard;
-       ATH10K_SKB_CB(txdesc)->htt.no_ack = tx_done->no_ack;
-
-       ath10k_txrx_tx_unref(htt, txdesc);
 }
 
 static const u8 rx_legacy_rate_idx[] = {
@@ -293,6 +268,8 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
                   status->vht_nss,
                   status->freq,
                   status->band);
+       ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
+                       info->skb->data, info->skb->len);
 
        ieee80211_rx(ar->hw, info->skb);
 }
index e78632a76df7bd4451cadaada87740bd4bc79991..356dc9c04c9e3981feaeb04d8df4e8f45fc36f7f 100644 (file)
@@ -19,9 +19,8 @@
 
 #include "htt.h"
 
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt, struct sk_buff *txdesc);
-void ath10k_txrx_tx_completed(struct ath10k_htt *htt,
-                             const struct htt_tx_done *tx_done);
+void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+                         const struct htt_tx_done *tx_done);
 void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info);
 
 struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
index 55f90c761868ddd9fa228510efb27465d360c00b..ccf3597fd9e2511c11cce44e136c3d1c50c59d06 100644 (file)
 #include "wmi.h"
 #include "mac.h"
 
-void ath10k_wmi_flush_tx(struct ath10k *ar)
-{
-       int ret;
-
-       lockdep_assert_held(&ar->conf_mutex);
-
-       if (ar->state == ATH10K_STATE_WEDGED) {
-               ath10k_warn("wmi flush skipped - device is wedged anyway\n");
-               return;
-       }
-
-       ret = wait_event_timeout(ar->wmi.wq,
-                                atomic_read(&ar->wmi.pending_tx_count) == 0,
-                                5*HZ);
-       if (atomic_read(&ar->wmi.pending_tx_count) == 0)
-               return;
-
-       if (ret == 0)
-               ret = -ETIMEDOUT;
-
-       if (ret < 0)
-               ath10k_warn("wmi flush failed (%d)\n", ret);
-}
+/* MAIN WMI cmd track */
+static struct wmi_cmd_map wmi_cmd_map = {
+       .init_cmdid = WMI_INIT_CMDID,
+       .start_scan_cmdid = WMI_START_SCAN_CMDID,
+       .stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
+       .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
+       .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
+       .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
+       .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
+       .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
+       .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
+       .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
+       .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
+       .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
+       .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+       .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+       .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
+       .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+       .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
+       .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
+       .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
+       .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
+       .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
+       .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
+       .vdev_up_cmdid = WMI_VDEV_UP_CMDID,
+       .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
+       .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
+       .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
+       .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
+       .peer_create_cmdid = WMI_PEER_CREATE_CMDID,
+       .peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
+       .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
+       .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
+       .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
+       .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
+       .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+       .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
+       .bcn_tx_cmdid = WMI_BCN_TX_CMDID,
+       .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
+       .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
+       .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
+       .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
+       .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
+       .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
+       .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
+       .addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
+       .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
+       .delba_send_cmdid = WMI_DELBA_SEND_CMDID,
+       .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
+       .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
+       .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
+       .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
+       .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
+       .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
+       .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
+       .roam_scan_mode = WMI_ROAM_SCAN_MODE,
+       .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
+       .roam_scan_period = WMI_ROAM_SCAN_PERIOD,
+       .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+       .roam_ap_profile = WMI_ROAM_AP_PROFILE,
+       .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
+       .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+       .ofl_scan_period = WMI_OFL_SCAN_PERIOD,
+       .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
+       .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
+       .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
+       .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
+       .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+       .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
+       .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+       .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
+       .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
+       .wlan_profile_set_hist_intvl_cmdid =
+                               WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+       .wlan_profile_get_profile_data_cmdid =
+                               WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+       .wlan_profile_enable_profile_id_cmdid =
+                               WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+       .wlan_profile_list_profile_id_cmdid =
+                               WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+       .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
+       .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
+       .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
+       .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
+       .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
+       .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+       .wow_enable_disable_wake_event_cmdid =
+                               WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+       .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
+       .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+       .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
+       .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
+       .vdev_spectral_scan_configure_cmdid =
+                               WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+       .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+       .request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
+       .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
+       .network_list_offload_config_cmdid =
+                               WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
+       .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
+       .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
+       .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+       .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
+       .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
+       .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
+       .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
+       .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+       .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
+       .echo_cmdid = WMI_ECHO_CMDID,
+       .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
+       .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
+       .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
+       .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
+       .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
+       .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
+       .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
+       .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
+       .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
+};
+
+/* 10.X WMI cmd track */
+static struct wmi_cmd_map wmi_10x_cmd_map = {
+       .init_cmdid = WMI_10X_INIT_CMDID,
+       .start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
+       .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
+       .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
+       .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
+       .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
+       .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
+       .pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
+       .pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
+       .pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
+       .pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
+       .pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
+       .pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
+       .pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
+       .pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+       .pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
+       .pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
+       .vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
+       .vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
+       .vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
+       .vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
+       .vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
+       .vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
+       .vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
+       .vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
+       .vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
+       .peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
+       .peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
+       .peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
+       .peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
+       .peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
+       .peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
+       .peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
+       .peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
+       .bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
+       .pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
+       .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+       .bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
+       .prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
+       .mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
+       .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+       .addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
+       .addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
+       .addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
+       .delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
+       .addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
+       .send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
+       .sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
+       .sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
+       .sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
+       .pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
+       .pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
+       .roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
+       .roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
+       .roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
+       .roam_scan_rssi_change_threshold =
+                               WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+       .roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
+       .ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
+       .ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
+       .ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
+       .p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
+       .p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
+       .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
+       .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
+       .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
+       .ap_ps_peer_param_cmdid = WMI_CMD_UNSUPPORTED,
+       .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
+       .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
+       .wlan_profile_set_hist_intvl_cmdid =
+                               WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+       .wlan_profile_get_profile_data_cmdid =
+                               WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+       .wlan_profile_enable_profile_id_cmdid =
+                               WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+       .wlan_profile_list_profile_id_cmdid =
+                               WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+       .pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
+       .pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
+       .add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
+       .rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
+       .wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
+       .wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
+       .wow_enable_disable_wake_event_cmdid =
+                               WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+       .wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
+       .wow_hostwakeup_from_sleep_cmdid =
+                               WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+       .rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
+       .rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
+       .vdev_spectral_scan_configure_cmdid =
+                               WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+       .vdev_spectral_scan_enable_cmdid =
+                               WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+       .request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
+       .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+       .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
+       .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
+       .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
+       .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+       .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+       .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+       .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+       .echo_cmdid = WMI_10X_ECHO_CMDID,
+       .pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
+       .dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
+       .pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
+       .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+       .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
+       .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
+       .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
+};
+
+/* MAIN WMI VDEV param map */
+static struct wmi_vdev_param_map wmi_vdev_param_map = {
+       .rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
+       .fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+       .beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
+       .listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
+       .multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
+       .mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
+       .slot_time = WMI_VDEV_PARAM_SLOT_TIME,
+       .preamble = WMI_VDEV_PARAM_PREAMBLE,
+       .swba_time = WMI_VDEV_PARAM_SWBA_TIME,
+       .wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
+       .wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+       .wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
+       .dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
+       .wmi_vdev_oc_scheduler_air_time_limit =
+                                       WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+       .wds = WMI_VDEV_PARAM_WDS,
+       .atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
+       .bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+       .bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+       .bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+       .feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
+       .chwidth = WMI_VDEV_PARAM_CHWIDTH,
+       .chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
+       .disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+       .sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+       .mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
+       .protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
+       .fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
+       .sgi = WMI_VDEV_PARAM_SGI,
+       .ldpc = WMI_VDEV_PARAM_LDPC,
+       .tx_stbc = WMI_VDEV_PARAM_TX_STBC,
+       .rx_stbc = WMI_VDEV_PARAM_RX_STBC,
+       .intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
+       .def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
+       .nss = WMI_VDEV_PARAM_NSS,
+       .bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
+       .mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
+       .mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
+       .dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
+       .unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+       .ap_keepalive_min_idle_inactive_time_secs =
+                       WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+       .ap_keepalive_max_idle_inactive_time_secs =
+                       WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+       .ap_keepalive_max_unresponsive_time_secs =
+                       WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+       .ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+       .mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
+       .enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
+       .txbf = WMI_VDEV_PARAM_TXBF,
+       .packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
+       .drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
+       .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+       .ap_detect_out_of_sync_sleeping_sta_time_secs =
+                                       WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+/* 10.X WMI VDEV param map */
+static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
+       .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
+       .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+       .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+       .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+       .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+       .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+       .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
+       .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
+       .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
+       .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+       .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+       .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+       .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+       .wmi_vdev_oc_scheduler_air_time_limit =
+                               WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+       .wds = WMI_10X_VDEV_PARAM_WDS,
+       .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+       .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+       .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+       .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+       .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
+       .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
+       .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+       .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+       .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+       .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
+       .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+       .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
+       .sgi = WMI_10X_VDEV_PARAM_SGI,
+       .ldpc = WMI_10X_VDEV_PARAM_LDPC,
+       .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
+       .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
+       .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+       .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
+       .nss = WMI_10X_VDEV_PARAM_NSS,
+       .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+       .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+       .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+       .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+       .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+       .ap_keepalive_min_idle_inactive_time_secs =
+               WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+       .ap_keepalive_max_idle_inactive_time_secs =
+               WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+       .ap_keepalive_max_unresponsive_time_secs =
+               WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+       .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+       .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+       .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+       .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
+       .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
+       .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
+       .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+       .ap_detect_out_of_sync_sleeping_sta_time_secs =
+               WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+};
+
+static struct wmi_pdev_param_map wmi_pdev_param_map = {
+       .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
+       .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
+       .txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+       .txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+       .txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
+       .beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
+       .beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
+       .resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+       .protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
+       .dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
+       .non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+       .agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+       .sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
+       .ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+       .ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
+       .ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+       .ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+       .ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+       .ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+       .ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+       .ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+       .ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+       .ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+       .l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
+       .dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
+       .pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+       .pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+       .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+       .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+       .pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+       .vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+       .peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+       .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+       .pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
+       .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+       .arpdhcp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+       .dcs = WMI_PDEV_PARAM_DCS,
+       .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
+       .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+       .ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+       .ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+       .ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+       .dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
+       .proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
+       .idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+       .power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+       .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+       .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
+       .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
+       .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
+       .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+       .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+       .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+       .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+       .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+       .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+       .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+       .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+       .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+       .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+       .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+       .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+       .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+       .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
+       .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+       .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+       .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+       .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+       .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+       .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+       .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+       .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+       .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+       .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+       .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
+       .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
+       .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
+       .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
+       .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+       .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+       .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+       .bcnflt_stats_update_period =
+                               WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+       .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
+       .arp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+       .arpdhcp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+       .dcs = WMI_10X_PDEV_PARAM_DCS,
+       .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
+       .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+       .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+       .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+       .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+       .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+       .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
+       .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
+       .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
+       .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+       .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
+       .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
+};
 
 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
 {
@@ -85,18 +526,14 @@ static struct sk_buff *ath10k_wmi_alloc_skb(u32 len)
 static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
 {
        dev_kfree_skb(skb);
-
-       if (atomic_sub_return(1, &ar->wmi.pending_tx_count) == 0)
-               wake_up(&ar->wmi.wq);
 }
 
-/* WMI command API */
-static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
-                              enum wmi_cmd_id cmd_id)
+static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
+                                     u32 cmd_id)
 {
        struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
        struct wmi_cmd_hdr *cmd_hdr;
-       int status;
+       int ret;
        u32 cmd = 0;
 
        if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
@@ -107,25 +544,146 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
        cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
        cmd_hdr->cmd_id = __cpu_to_le32(cmd);
 
-       if (atomic_add_return(1, &ar->wmi.pending_tx_count) >
-           WMI_MAX_PENDING_TX_COUNT) {
-               /* avoid using up memory when FW hangs */
-               atomic_dec(&ar->wmi.pending_tx_count);
-               return -EBUSY;
+       memset(skb_cb, 0, sizeof(*skb_cb));
+       ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
+       trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len, ret);
+
+       if (ret)
+               goto err_pull;
+
+       return 0;
+
+err_pull:
+       skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+       return ret;
+}
+
+static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
+{
+       struct wmi_bcn_tx_arg arg = {0};
+       int ret;
+
+       lockdep_assert_held(&arvif->ar->data_lock);
+
+       if (arvif->beacon == NULL)
+               return;
+
+       arg.vdev_id = arvif->vdev_id;
+       arg.tx_rate = 0;
+       arg.tx_power = 0;
+       arg.bcn = arvif->beacon->data;
+       arg.bcn_len = arvif->beacon->len;
+
+       ret = ath10k_wmi_beacon_send_nowait(arvif->ar, &arg);
+       if (ret)
+               return;
+
+       dev_kfree_skb_any(arvif->beacon);
+       arvif->beacon = NULL;
+}
+
+static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
+                                      struct ieee80211_vif *vif)
+{
+       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+
+       ath10k_wmi_tx_beacon_nowait(arvif);
+}
+
+static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
+{
+       spin_lock_bh(&ar->data_lock);
+       ieee80211_iterate_active_interfaces_atomic(ar->hw,
+                                                  IEEE80211_IFACE_ITER_NORMAL,
+                                                  ath10k_wmi_tx_beacons_iter,
+                                                  NULL);
+       spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
+{
+       /* try to send pending beacons first. they take priority */
+       ath10k_wmi_tx_beacons_nowait(ar);
+
+       wake_up(&ar->wmi.tx_credits_wq);
+}
+
+static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
+                              u32 cmd_id)
+{
+       int ret = -EOPNOTSUPP;
+
+       might_sleep();
+
+       if (cmd_id == WMI_CMD_UNSUPPORTED) {
+               ath10k_warn("wmi command %d is not supported by firmware\n",
+                           cmd_id);
+               return ret;
        }
 
-       memset(skb_cb, 0, sizeof(*skb_cb));
+       wait_event_timeout(ar->wmi.tx_credits_wq, ({
+               /* try to send pending beacons first. they take priority */
+               ath10k_wmi_tx_beacons_nowait(ar);
 
-       trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len);
+               ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
+               (ret != -EAGAIN);
+       }), 3*HZ);
 
-       status = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
-       if (status) {
+       if (ret)
                dev_kfree_skb_any(skb);
-               atomic_dec(&ar->wmi.pending_tx_count);
-               return status;
+
+       return ret;
+}
+
+int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
+{
+       int ret = 0;
+       struct wmi_mgmt_tx_cmd *cmd;
+       struct ieee80211_hdr *hdr;
+       struct sk_buff *wmi_skb;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       int len;
+       u16 fc;
+
+       hdr = (struct ieee80211_hdr *)skb->data;
+       fc = le16_to_cpu(hdr->frame_control);
+
+       if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
+               return -EINVAL;
+
+       len = sizeof(cmd->hdr) + skb->len;
+       len = round_up(len, 4);
+
+       wmi_skb = ath10k_wmi_alloc_skb(len);
+       if (!wmi_skb)
+               return -ENOMEM;
+
+       cmd = (struct wmi_mgmt_tx_cmd *)wmi_skb->data;
+
+       cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id);
+       cmd->hdr.tx_rate = 0;
+       cmd->hdr.tx_power = 0;
+       cmd->hdr.buf_len = __cpu_to_le32((u32)(skb->len));
+
+       memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN);
+       memcpy(cmd->buf, skb->data, skb->len);
+
+       ath10k_dbg(ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
+                  wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE,
+                  fc & IEEE80211_FCTL_STYPE);
+
+       /* Send the management frame buffer to the target */
+       ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
+       if (ret) {
+               dev_kfree_skb_any(skb);
+               return ret;
        }
 
-       return 0;
+       /* TODO: report tx status to mac80211 - temporary just ACK */
+       info->flags |= IEEE80211_TX_STAT_ACK;
+       ieee80211_tx_status_irqsafe(ar->hw, skb);
+
+       return ret;
 }
 
 static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
@@ -315,7 +873,9 @@ static inline u8 get_rate_idx(u32 rate, enum ieee80211_band band)
 
 static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
 {
-       struct wmi_mgmt_rx_event *event = (struct wmi_mgmt_rx_event *)skb->data;
+       struct wmi_mgmt_rx_event_v1 *ev_v1;
+       struct wmi_mgmt_rx_event_v2 *ev_v2;
+       struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        struct ieee80211_hdr *hdr;
        u32 rx_status;
@@ -325,13 +885,24 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
        u32 rate;
        u32 buf_len;
        u16 fc;
+       int pull_len;
+
+       if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
+               ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
+               ev_hdr = &ev_v2->hdr.v1;
+               pull_len = sizeof(*ev_v2);
+       } else {
+               ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
+               ev_hdr = &ev_v1->hdr;
+               pull_len = sizeof(*ev_v1);
+       }
 
-       channel   = __le32_to_cpu(event->hdr.channel);
-       buf_len   = __le32_to_cpu(event->hdr.buf_len);
-       rx_status = __le32_to_cpu(event->hdr.status);
-       snr       = __le32_to_cpu(event->hdr.snr);
-       phy_mode  = __le32_to_cpu(event->hdr.phy_mode);
-       rate      = __le32_to_cpu(event->hdr.rate);
+       channel   = __le32_to_cpu(ev_hdr->channel);
+       buf_len   = __le32_to_cpu(ev_hdr->buf_len);
+       rx_status = __le32_to_cpu(ev_hdr->status);
+       snr       = __le32_to_cpu(ev_hdr->snr);
+       phy_mode  = __le32_to_cpu(ev_hdr->phy_mode);
+       rate      = __le32_to_cpu(ev_hdr->rate);
 
        memset(status, 0, sizeof(*status));
 
@@ -358,7 +929,7 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
        status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
        status->rate_idx = get_rate_idx(rate, status->band);
 
-       skb_pull(skb, sizeof(event->hdr));
+       skb_pull(skb, pull_len);
 
        hdr = (struct ieee80211_hdr *)skb->data;
        fc = le16_to_cpu(hdr->frame_control);
@@ -734,10 +1305,8 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
        int i = -1;
        struct wmi_bcn_info *bcn_info;
        struct ath10k_vif *arvif;
-       struct wmi_bcn_tx_arg arg;
        struct sk_buff *bcn;
        int vdev_id = 0;
-       int ret;
 
        ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
 
@@ -794,17 +1363,17 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
                ath10k_wmi_update_tim(ar, arvif, bcn, bcn_info);
                ath10k_wmi_update_noa(ar, arvif, bcn, bcn_info);
 
-               arg.vdev_id = arvif->vdev_id;
-               arg.tx_rate = 0;
-               arg.tx_power = 0;
-               arg.bcn = bcn->data;
-               arg.bcn_len = bcn->len;
+               spin_lock_bh(&ar->data_lock);
+               if (arvif->beacon) {
+                       ath10k_warn("SWBA overrun on vdev %d\n",
+                                   arvif->vdev_id);
+                       dev_kfree_skb_any(arvif->beacon);
+               }
 
-               ret = ath10k_wmi_beacon_send(ar, &arg);
-               if (ret)
-                       ath10k_warn("could not send beacon (%d)\n", ret);
+               arvif->beacon = bcn;
 
-               dev_kfree_skb_any(bcn);
+               ath10k_wmi_tx_beacon_nowait(arvif);
+               spin_unlock_bh(&ar->data_lock);
        }
 }
 
@@ -919,6 +1488,55 @@ static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
        ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
 }
 
+static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar,
+                                            struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
+}
+
+static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar,
+                                             struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
+}
+
+static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar,
+                                            struct sk_buff *skb)
+{
+       ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
+}
+
+static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
+                                     u32 num_units, u32 unit_len)
+{
+       dma_addr_t paddr;
+       u32 pool_size;
+       int idx = ar->wmi.num_mem_chunks;
+
+       pool_size = num_units * round_up(unit_len, 4);
+
+       if (!pool_size)
+               return -EINVAL;
+
+       ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
+                                                          pool_size,
+                                                          &paddr,
+                                                          GFP_ATOMIC);
+       if (!ar->wmi.mem_chunks[idx].vaddr) {
+               ath10k_warn("failed to allocate memory chunk\n");
+               return -ENOMEM;
+       }
+
+       memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size);
+
+       ar->wmi.mem_chunks[idx].paddr = paddr;
+       ar->wmi.mem_chunks[idx].len = pool_size;
+       ar->wmi.mem_chunks[idx].req_id = req_id;
+       ar->wmi.num_mem_chunks++;
+
+       return 0;
+}
+
 static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
                                              struct sk_buff *skb)
 {
@@ -943,6 +1561,10 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
        ar->phy_capability = __le32_to_cpu(ev->phy_capability);
        ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
 
+       /* only manually set fw features when not using FW IE format */
+       if (ar->fw_api == 1 && ar->fw_version_build > 636)
+               set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
+
        if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
                ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
                            ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
@@ -987,6 +1609,108 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
        complete(&ar->wmi.service_ready);
 }
 
+static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
+                                                 struct sk_buff *skb)
+{
+       u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
+       int ret;
+       struct wmi_service_ready_event_10x *ev = (void *)skb->data;
+
+       if (skb->len < sizeof(*ev)) {
+               ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
+                           skb->len, sizeof(*ev));
+               return;
+       }
+
+       ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
+       ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
+       ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
+       ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
+       ar->fw_version_major =
+               (__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
+       ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
+       ar->phy_capability = __le32_to_cpu(ev->phy_capability);
+       ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
+
+       if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
+               ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
+                           ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
+               ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
+       }
+
+       ar->ath_common.regulatory.current_rd =
+               __le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
+
+       ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap,
+                                     sizeof(ev->wmi_service_bitmap));
+
+       if (strlen(ar->hw->wiphy->fw_version) == 0) {
+               snprintf(ar->hw->wiphy->fw_version,
+                        sizeof(ar->hw->wiphy->fw_version),
+                        "%u.%u",
+                        ar->fw_version_major,
+                        ar->fw_version_minor);
+       }
+
+       num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs);
+
+       if (num_mem_reqs > ATH10K_MAX_MEM_REQS) {
+               ath10k_warn("requested memory chunks number (%d) exceeds the limit\n",
+                           num_mem_reqs);
+               return;
+       }
+
+       if (!num_mem_reqs)
+               goto exit;
+
+       ath10k_dbg(ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n",
+                  num_mem_reqs);
+
+       for (i = 0; i < num_mem_reqs; ++i) {
+               req_id = __le32_to_cpu(ev->mem_reqs[i].req_id);
+               num_units = __le32_to_cpu(ev->mem_reqs[i].num_units);
+               unit_size = __le32_to_cpu(ev->mem_reqs[i].unit_size);
+               num_unit_info = __le32_to_cpu(ev->mem_reqs[i].num_unit_info);
+
+               if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
+                       /* number of units to allocate is number of
+                        * peers, 1 extra for self peer on target */
+                       /* this needs to be tied, host and target
+                        * can get out of sync */
+                       num_units = TARGET_10X_NUM_PEERS + 1;
+               else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
+                       num_units = TARGET_10X_NUM_VDEVS + 1;
+
+               ath10k_dbg(ATH10K_DBG_WMI,
+                          "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
+                          req_id,
+                          __le32_to_cpu(ev->mem_reqs[i].num_units),
+                          num_unit_info,
+                          unit_size,
+                          num_units);
+
+               ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
+                                               unit_size);
+               if (ret)
+                       return;
+       }
+
+exit:
+       ath10k_dbg(ATH10K_DBG_WMI,
+                  "wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
+                  __le32_to_cpu(ev->sw_version),
+                  __le32_to_cpu(ev->abi_version),
+                  __le32_to_cpu(ev->phy_capability),
+                  __le32_to_cpu(ev->ht_cap_info),
+                  __le32_to_cpu(ev->vht_cap_info),
+                  __le32_to_cpu(ev->vht_supp_mcs),
+                  __le32_to_cpu(ev->sys_cap_info),
+                  __le32_to_cpu(ev->num_mem_reqs),
+                  __le32_to_cpu(ev->num_rf_chains));
+
+       complete(&ar->wmi.service_ready);
+}
+
 static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
 {
        struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
@@ -1007,7 +1731,7 @@ static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
        return 0;
 }
 
-static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
+static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
 {
        struct wmi_cmd_hdr *cmd_hdr;
        enum wmi_event_id id;
@@ -1126,64 +1850,158 @@ static void ath10k_wmi_event_process(struct ath10k *ar, struct sk_buff *skb)
        dev_kfree_skb(skb);
 }
 
-static void ath10k_wmi_event_work(struct work_struct *work)
+static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb)
 {
-       struct ath10k *ar = container_of(work, struct ath10k,
-                                        wmi.wmi_event_work);
-       struct sk_buff *skb;
+       struct wmi_cmd_hdr *cmd_hdr;
+       enum wmi_10x_event_id id;
+       u16 len;
 
-       for (;;) {
-               skb = skb_dequeue(&ar->wmi.wmi_event_list);
-               if (!skb)
-                       break;
+       cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+       id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
 
-               ath10k_wmi_event_process(ar, skb);
-       }
-}
+       if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+               return;
 
-static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
-{
-       struct wmi_cmd_hdr *cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
-       enum wmi_event_id event_id;
+       len = skb->len;
 
-       event_id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+       trace_ath10k_wmi_event(id, skb->data, skb->len);
 
-       /* some events require to be handled ASAP
-        * thus can't be defered to a worker thread */
-       switch (event_id) {
-       case WMI_HOST_SWBA_EVENTID:
-       case WMI_MGMT_RX_EVENTID:
-               ath10k_wmi_event_process(ar, skb);
+       switch (id) {
+       case WMI_10X_MGMT_RX_EVENTID:
+               ath10k_wmi_event_mgmt_rx(ar, skb);
+               /* mgmt_rx() owns the skb now! */
                return;
+       case WMI_10X_SCAN_EVENTID:
+               ath10k_wmi_event_scan(ar, skb);
+               break;
+       case WMI_10X_CHAN_INFO_EVENTID:
+               ath10k_wmi_event_chan_info(ar, skb);
+               break;
+       case WMI_10X_ECHO_EVENTID:
+               ath10k_wmi_event_echo(ar, skb);
+               break;
+       case WMI_10X_DEBUG_MESG_EVENTID:
+               ath10k_wmi_event_debug_mesg(ar, skb);
+               break;
+       case WMI_10X_UPDATE_STATS_EVENTID:
+               ath10k_wmi_event_update_stats(ar, skb);
+               break;
+       case WMI_10X_VDEV_START_RESP_EVENTID:
+               ath10k_wmi_event_vdev_start_resp(ar, skb);
+               break;
+       case WMI_10X_VDEV_STOPPED_EVENTID:
+               ath10k_wmi_event_vdev_stopped(ar, skb);
+               break;
+       case WMI_10X_PEER_STA_KICKOUT_EVENTID:
+               ath10k_wmi_event_peer_sta_kickout(ar, skb);
+               break;
+       case WMI_10X_HOST_SWBA_EVENTID:
+               ath10k_wmi_event_host_swba(ar, skb);
+               break;
+       case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
+               ath10k_wmi_event_tbttoffset_update(ar, skb);
+               break;
+       case WMI_10X_PHYERR_EVENTID:
+               ath10k_wmi_event_phyerr(ar, skb);
+               break;
+       case WMI_10X_ROAM_EVENTID:
+               ath10k_wmi_event_roam(ar, skb);
+               break;
+       case WMI_10X_PROFILE_MATCH:
+               ath10k_wmi_event_profile_match(ar, skb);
+               break;
+       case WMI_10X_DEBUG_PRINT_EVENTID:
+               ath10k_wmi_event_debug_print(ar, skb);
+               break;
+       case WMI_10X_PDEV_QVIT_EVENTID:
+               ath10k_wmi_event_pdev_qvit(ar, skb);
+               break;
+       case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
+               ath10k_wmi_event_wlan_profile_data(ar, skb);
+               break;
+       case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
+               ath10k_wmi_event_rtt_measurement_report(ar, skb);
+               break;
+       case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
+               ath10k_wmi_event_tsf_measurement_report(ar, skb);
+               break;
+       case WMI_10X_RTT_ERROR_REPORT_EVENTID:
+               ath10k_wmi_event_rtt_error_report(ar, skb);
+               break;
+       case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
+               ath10k_wmi_event_wow_wakeup_host(ar, skb);
+               break;
+       case WMI_10X_DCS_INTERFERENCE_EVENTID:
+               ath10k_wmi_event_dcs_interference(ar, skb);
+               break;
+       case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
+               ath10k_wmi_event_pdev_tpc_config(ar, skb);
+               break;
+       case WMI_10X_INST_RSSI_STATS_EVENTID:
+               ath10k_wmi_event_inst_rssi_stats(ar, skb);
+               break;
+       case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
+               ath10k_wmi_event_vdev_standby_req(ar, skb);
+               break;
+       case WMI_10X_VDEV_RESUME_REQ_EVENTID:
+               ath10k_wmi_event_vdev_resume_req(ar, skb);
+               break;
+       case WMI_10X_SERVICE_READY_EVENTID:
+               ath10k_wmi_10x_service_ready_event_rx(ar, skb);
+               break;
+       case WMI_10X_READY_EVENTID:
+               ath10k_wmi_ready_event_rx(ar, skb);
+               break;
        default:
+               ath10k_warn("Unknown eventid: %d\n", id);
                break;
        }
 
-       skb_queue_tail(&ar->wmi.wmi_event_list, skb);
-       queue_work(ar->workqueue, &ar->wmi.wmi_event_work);
+       dev_kfree_skb(skb);
+}
+
+
+static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+       if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+               ath10k_wmi_10x_process_rx(ar, skb);
+       else
+               ath10k_wmi_main_process_rx(ar, skb);
 }
 
 /* WMI Initialization functions */
 int ath10k_wmi_attach(struct ath10k *ar)
 {
+       if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+               ar->wmi.cmd = &wmi_10x_cmd_map;
+               ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
+               ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+       } else {
+               ar->wmi.cmd = &wmi_cmd_map;
+               ar->wmi.vdev_param = &wmi_vdev_param_map;
+               ar->wmi.pdev_param = &wmi_pdev_param_map;
+       }
+
        init_completion(&ar->wmi.service_ready);
        init_completion(&ar->wmi.unified_ready);
-       init_waitqueue_head(&ar->wmi.wq);
-
-       skb_queue_head_init(&ar->wmi.wmi_event_list);
-       INIT_WORK(&ar->wmi.wmi_event_work, ath10k_wmi_event_work);
+       init_waitqueue_head(&ar->wmi.tx_credits_wq);
 
        return 0;
 }
 
 void ath10k_wmi_detach(struct ath10k *ar)
 {
-       /* HTC should've drained the packets already */
-       if (WARN_ON(atomic_read(&ar->wmi.pending_tx_count) > 0))
-               ath10k_warn("there are still pending packets\n");
+       int i;
+
+       /* free the host memory chunks requested by firmware */
+       for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+               dma_free_coherent(ar->dev,
+                                 ar->wmi.mem_chunks[i].len,
+                                 ar->wmi.mem_chunks[i].vaddr,
+                                 ar->wmi.mem_chunks[i].paddr);
+       }
 
-       cancel_work_sync(&ar->wmi.wmi_event_work);
-       skb_queue_purge(&ar->wmi.wmi_event_list);
+       ar->wmi.num_mem_chunks = 0;
 }
 
 int ath10k_wmi_connect_htc_service(struct ath10k *ar)
@@ -1198,6 +2016,7 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
        /* these fields are the same for all service endpoints */
        conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
        conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
+       conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
 
        /* connect to control service */
        conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
@@ -1234,7 +2053,8 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
                   "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
                   rd, rd2g, rd5g, ctl2g, ctl5g);
 
-       return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb,
+                                  ar->wmi.cmd->pdev_set_regdomain_cmdid);
 }
 
 int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
@@ -1264,7 +2084,8 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
                   "wmi set channel mode %d freq %d\n",
                   arg->mode, arg->freq);
 
-       return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_CHANNEL_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb,
+                                  ar->wmi.cmd->pdev_set_channel_cmdid);
 }
 
 int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
@@ -1279,7 +2100,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
        cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
        cmd->suspend_opt = WMI_PDEV_SUSPEND;
 
-       return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SUSPEND_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
 }
 
 int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
@@ -1290,15 +2111,19 @@ int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
        if (skb == NULL)
                return -ENOMEM;
 
-       return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_RESUME_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
 }
 
-int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
-                             u32 value)
+int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
 {
        struct wmi_pdev_set_param_cmd *cmd;
        struct sk_buff *skb;
 
+       if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
+               ath10k_warn("pdev param %d not supported by firmware\n", id);
+               return -EOPNOTSUPP;
+       }
+
        skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
        if (!skb)
                return -ENOMEM;
@@ -1309,15 +2134,16 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
 
        ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
                   id, value);
-       return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_PARAM_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
 }
 
-int ath10k_wmi_cmd_init(struct ath10k *ar)
+static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
 {
        struct wmi_init_cmd *cmd;
        struct sk_buff *buf;
        struct wmi_resource_config config = {};
-       u32 val;
+       u32 len, val;
+       int i;
 
        config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
        config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
@@ -1370,23 +2196,158 @@ int ath10k_wmi_cmd_init(struct ath10k *ar)
        config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
        config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
 
-       buf = ath10k_wmi_alloc_skb(sizeof(*cmd));
+       len = sizeof(*cmd) +
+             (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+       buf = ath10k_wmi_alloc_skb(len);
        if (!buf)
                return -ENOMEM;
 
        cmd = (struct wmi_init_cmd *)buf->data;
-       cmd->num_host_mem_chunks = 0;
+
+       if (ar->wmi.num_mem_chunks == 0) {
+               cmd->num_host_mem_chunks = 0;
+               goto out;
+       }
+
+       ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
+                  __cpu_to_le32(ar->wmi.num_mem_chunks));
+
+       cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+       for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+               cmd->host_mem_chunks[i].ptr =
+                       __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
+               cmd->host_mem_chunks[i].size =
+                       __cpu_to_le32(ar->wmi.mem_chunks[i].len);
+               cmd->host_mem_chunks[i].req_id =
+                       __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+
+               ath10k_dbg(ATH10K_DBG_WMI,
+                          "wmi chunk %d len %d requested, addr 0x%x\n",
+                          i,
+                          cmd->host_mem_chunks[i].size,
+                          cmd->host_mem_chunks[i].ptr);
+       }
+out:
        memcpy(&cmd->resource_config, &config, sizeof(config));
 
        ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n");
-       return ath10k_wmi_cmd_send(ar, buf, WMI_INIT_CMDID);
+       return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
 }
 
-static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg)
+static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
+{
+       struct wmi_init_cmd_10x *cmd;
+       struct sk_buff *buf;
+       struct wmi_resource_config_10x config = {};
+       u32 len, val;
+       int i;
+
+       config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
+       config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
+       config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
+       config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
+       config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
+       config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
+       config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
+       config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+       config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+       config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+       config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
+       config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
+
+       config.scan_max_pending_reqs =
+               __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
+
+       config.bmiss_offload_max_vdev =
+               __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
+
+       config.roam_offload_max_vdev =
+               __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
+
+       config.roam_offload_max_ap_profiles =
+               __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
+
+       config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
+       config.num_mcast_table_elems =
+               __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
+
+       config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
+       config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
+       config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
+       config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
+       config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
+
+       val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+       config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
+
+       config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
+
+       config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
+       config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
+
+       len = sizeof(*cmd) +
+             (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+       buf = ath10k_wmi_alloc_skb(len);
+       if (!buf)
+               return -ENOMEM;
+
+       cmd = (struct wmi_init_cmd_10x *)buf->data;
+
+       if (ar->wmi.num_mem_chunks == 0) {
+               cmd->num_host_mem_chunks = 0;
+               goto out;
+       }
+
+       ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
+                  __cpu_to_le32(ar->wmi.num_mem_chunks));
+
+       cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+       for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+               cmd->host_mem_chunks[i].ptr =
+                       __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
+               cmd->host_mem_chunks[i].size =
+                       __cpu_to_le32(ar->wmi.mem_chunks[i].len);
+               cmd->host_mem_chunks[i].req_id =
+                       __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+
+               ath10k_dbg(ATH10K_DBG_WMI,
+                          "wmi chunk %d len %d requested, addr 0x%x\n",
+                          i,
+                          cmd->host_mem_chunks[i].size,
+                          cmd->host_mem_chunks[i].ptr);
+       }
+out:
+       memcpy(&cmd->resource_config, &config, sizeof(config));
+
+       ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10x\n");
+       return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
+}
+
+int ath10k_wmi_cmd_init(struct ath10k *ar)
+{
+       int ret;
+
+       if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+               ret = ath10k_wmi_10x_cmd_init(ar);
+       else
+               ret = ath10k_wmi_main_cmd_init(ar);
+
+       return ret;
+}
+
+static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar,
+                                         const struct wmi_start_scan_arg *arg)
 {
        int len;
 
-       len = sizeof(struct wmi_start_scan_cmd);
+       if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+               len = sizeof(struct wmi_start_scan_cmd_10x);
+       else
+               len = sizeof(struct wmi_start_scan_cmd);
 
        if (arg->ie_len) {
                if (!arg->ie)
@@ -1446,7 +2407,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
        int len = 0;
        int i;
 
-       len = ath10k_wmi_start_scan_calc_len(arg);
+       len = ath10k_wmi_start_scan_calc_len(ar, arg);
        if (len < 0)
                return len; /* len contains error code here */
 
@@ -1478,7 +2439,14 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
        cmd->scan_ctrl_flags    = __cpu_to_le32(arg->scan_ctrl_flags);
 
        /* TLV list starts after fields included in the struct */
-       off = sizeof(*cmd);
+       /* There's just one filed that differes the two start_scan
+        * structures - burst_duration, which we are not using btw,
+          no point to make the split here, just shift the buffer to fit with
+          given FW */
+       if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+               off = sizeof(struct wmi_start_scan_cmd_10x);
+       else
+               off = sizeof(struct wmi_start_scan_cmd);
 
        if (arg->n_channels) {
                channels = (void *)skb->data + off;
@@ -1540,7 +2508,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
        }
 
        ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n");
-       return ath10k_wmi_cmd_send(ar, skb, WMI_START_SCAN_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
 }
 
 void ath10k_wmi_start_scan_init(struct ath10k *ar,
@@ -1556,7 +2524,7 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar,
        arg->repeat_probe_time = 0;
        arg->probe_spacing_time = 0;
        arg->idle_time = 0;
-       arg->max_scan_time = 5000;
+       arg->max_scan_time = 20000;
        arg->probe_delay = 5;
        arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
                | WMI_SCAN_EVENT_COMPLETED
@@ -1600,7 +2568,7 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
        ath10k_dbg(ATH10K_DBG_WMI,
                   "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
                   arg->req_id, arg->req_type, arg->u.scan_id);
-       return ath10k_wmi_cmd_send(ar, skb, WMI_STOP_SCAN_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
 }
 
 int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
@@ -1625,7 +2593,7 @@ int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
                   "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
                   vdev_id, type, subtype, macaddr);
 
-       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_CREATE_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
 }
 
 int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
@@ -1643,20 +2611,20 @@ int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
        ath10k_dbg(ATH10K_DBG_WMI,
                   "WMI vdev delete id %d\n", vdev_id);
 
-       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DELETE_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
 }
 
 static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
                                const struct wmi_vdev_start_request_arg *arg,
-                               enum wmi_cmd_id cmd_id)
+                               u32 cmd_id)
 {
        struct wmi_vdev_start_request_cmd *cmd;
        struct sk_buff *skb;
        const char *cmdname;
        u32 flags = 0;
 
-       if (cmd_id != WMI_VDEV_START_REQUEST_CMDID &&
-           cmd_id != WMI_VDEV_RESTART_REQUEST_CMDID)
+       if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
+           cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
                return -EINVAL;
        if (WARN_ON(arg->ssid && arg->ssid_len == 0))
                return -EINVAL;
@@ -1665,9 +2633,9 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
        if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
                return -EINVAL;
 
-       if (cmd_id == WMI_VDEV_START_REQUEST_CMDID)
+       if (cmd_id == ar->wmi.cmd->vdev_start_request_cmdid)
                cmdname = "start";
-       else if (cmd_id == WMI_VDEV_RESTART_REQUEST_CMDID)
+       else if (cmd_id == ar->wmi.cmd->vdev_restart_request_cmdid)
                cmdname = "restart";
        else
                return -EINVAL; /* should not happen, we already check cmd_id */
@@ -1718,15 +2686,17 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
 int ath10k_wmi_vdev_start(struct ath10k *ar,
                          const struct wmi_vdev_start_request_arg *arg)
 {
-       return ath10k_wmi_vdev_start_restart(ar, arg,
-                                            WMI_VDEV_START_REQUEST_CMDID);
+       u32 cmd_id = ar->wmi.cmd->vdev_start_request_cmdid;
+
+       return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
 }
 
 int ath10k_wmi_vdev_restart(struct ath10k *ar,
                     const struct wmi_vdev_start_request_arg *arg)
 {
-       return ath10k_wmi_vdev_start_restart(ar, arg,
-                                            WMI_VDEV_RESTART_REQUEST_CMDID);
+       u32 cmd_id = ar->wmi.cmd->vdev_restart_request_cmdid;
+
+       return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
 }
 
 int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
@@ -1743,7 +2713,7 @@ int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
 
        ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
 
-       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_STOP_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
 }
 
 int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
@@ -1758,13 +2728,13 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
        cmd = (struct wmi_vdev_up_cmd *)skb->data;
        cmd->vdev_id       = __cpu_to_le32(vdev_id);
        cmd->vdev_assoc_id = __cpu_to_le32(aid);
-       memcpy(&cmd->vdev_bssid.addr, bssid, 6);
+       memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN);
 
        ath10k_dbg(ATH10K_DBG_WMI,
                   "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
                   vdev_id, aid, bssid);
 
-       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_UP_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
 }
 
 int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
@@ -1782,15 +2752,22 @@ int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
        ath10k_dbg(ATH10K_DBG_WMI,
                   "wmi mgmt vdev down id 0x%x\n", vdev_id);
 
-       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DOWN_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
 }
 
 int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
-                             enum wmi_vdev_param param_id, u32 param_value)
+                             u32 param_id, u32 param_value)
 {
        struct wmi_vdev_set_param_cmd *cmd;
        struct sk_buff *skb;
 
+       if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
+               ath10k_dbg(ATH10K_DBG_WMI,
+                          "vdev param %d not supported by firmware\n",
+                           param_id);
+               return -EOPNOTSUPP;
+       }
+
        skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
        if (!skb)
                return -ENOMEM;
@@ -1804,7 +2781,7 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
                   "wmi vdev id 0x%x set param %d value %d\n",
                   vdev_id, param_id, param_value);
 
-       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_SET_PARAM_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
 }
 
 int ath10k_wmi_vdev_install_key(struct ath10k *ar,
@@ -1839,7 +2816,8 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar,
        ath10k_dbg(ATH10K_DBG_WMI,
                   "wmi vdev install key idx %d cipher %d len %d\n",
                   arg->key_idx, arg->key_cipher, arg->key_len);
-       return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb,
+                                  ar->wmi.cmd->vdev_install_key_cmdid);
 }
 
 int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
@@ -1859,7 +2837,7 @@ int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
        ath10k_dbg(ATH10K_DBG_WMI,
                   "wmi peer create vdev_id %d peer_addr %pM\n",
                   vdev_id, peer_addr);
-       return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_CREATE_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
 }
 
 int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
@@ -1879,7 +2857,7 @@ int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
        ath10k_dbg(ATH10K_DBG_WMI,
                   "wmi peer delete vdev_id %d peer_addr %pM\n",
                   vdev_id, peer_addr);
-       return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_DELETE_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
 }
 
 int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
@@ -1900,7 +2878,7 @@ int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
        ath10k_dbg(ATH10K_DBG_WMI,
                   "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
                   vdev_id, peer_addr, tid_bitmap);
-       return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_FLUSH_TIDS_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
 }
 
 int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
@@ -1918,13 +2896,13 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
        cmd->vdev_id     = __cpu_to_le32(vdev_id);
        cmd->param_id    = __cpu_to_le32(param_id);
        cmd->param_value = __cpu_to_le32(param_value);
-       memcpy(&cmd->peer_macaddr.addr, peer_addr, 6);
+       memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
 
        ath10k_dbg(ATH10K_DBG_WMI,
                   "wmi vdev %d peer 0x%pM set param %d value %d\n",
                   vdev_id, peer_addr, param_id, param_value);
 
-       return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_SET_PARAM_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
 }
 
 int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
@@ -1945,7 +2923,8 @@ int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
                   "wmi set powersave id 0x%x mode %d\n",
                   vdev_id, psmode);
 
-       return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_MODE_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb,
+                                  ar->wmi.cmd->sta_powersave_mode_cmdid);
 }
 
 int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
@@ -1967,7 +2946,8 @@ int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
        ath10k_dbg(ATH10K_DBG_WMI,
                   "wmi sta ps param vdev_id 0x%x param %d value %d\n",
                   vdev_id, param_id, value);
-       return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb,
+                                  ar->wmi.cmd->sta_powersave_param_cmdid);
 }
 
 int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
@@ -1993,7 +2973,8 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
                   "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
                   vdev_id, param_id, value, mac);
 
-       return ath10k_wmi_cmd_send(ar, skb, WMI_AP_PS_PEER_PARAM_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb,
+                                  ar->wmi.cmd->ap_ps_peer_param_cmdid);
 }
 
 int ath10k_wmi_scan_chan_list(struct ath10k *ar,
@@ -2046,7 +3027,7 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
                ci->flags            |= __cpu_to_le32(flags);
        }
 
-       return ath10k_wmi_cmd_send(ar, skb, WMI_SCAN_CHAN_LIST_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
 }
 
 int ath10k_wmi_peer_assoc(struct ath10k *ar,
@@ -2105,10 +3086,11 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
        ath10k_dbg(ATH10K_DBG_WMI,
                   "wmi peer assoc vdev %d addr %pM\n",
                   arg->vdev_id, arg->addr);
-       return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
 }
 
-int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
+int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
+                                 const struct wmi_bcn_tx_arg *arg)
 {
        struct wmi_bcn_tx_cmd *cmd;
        struct sk_buff *skb;
@@ -2124,7 +3106,7 @@ int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg)
        cmd->hdr.bcn_len  = __cpu_to_le32(arg->bcn_len);
        memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
 
-       return ath10k_wmi_cmd_send(ar, skb, WMI_BCN_TX_CMDID);
+       return ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid);
 }
 
 static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
@@ -2155,7 +3137,8 @@ int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
        ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
 
        ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
-       return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_WMM_PARAMS_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb,
+                                  ar->wmi.cmd->pdev_set_wmm_params_cmdid);
 }
 
 int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
@@ -2171,7 +3154,7 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
        cmd->stats_id = __cpu_to_le32(stats_id);
 
        ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
-       return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
 }
 
 int ath10k_wmi_force_fw_hang(struct ath10k *ar,
@@ -2190,5 +3173,5 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar,
 
        ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
                   type, delay_ms);
-       return ath10k_wmi_cmd_send(ar, skb, WMI_FORCE_FW_HANG_CMDID);
+       return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
 }
index 2c5a4f8daf2ee4b80e9a9e84391586e638389271..78c991aec7f93d6419cc62d1b09ea2f63033b627 100644 (file)
@@ -208,6 +208,118 @@ struct wmi_mac_addr {
        (c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \
        } while (0)
 
+struct wmi_cmd_map {
+       u32 init_cmdid;
+       u32 start_scan_cmdid;
+       u32 stop_scan_cmdid;
+       u32 scan_chan_list_cmdid;
+       u32 scan_sch_prio_tbl_cmdid;
+       u32 pdev_set_regdomain_cmdid;
+       u32 pdev_set_channel_cmdid;
+       u32 pdev_set_param_cmdid;
+       u32 pdev_pktlog_enable_cmdid;
+       u32 pdev_pktlog_disable_cmdid;
+       u32 pdev_set_wmm_params_cmdid;
+       u32 pdev_set_ht_cap_ie_cmdid;
+       u32 pdev_set_vht_cap_ie_cmdid;
+       u32 pdev_set_dscp_tid_map_cmdid;
+       u32 pdev_set_quiet_mode_cmdid;
+       u32 pdev_green_ap_ps_enable_cmdid;
+       u32 pdev_get_tpc_config_cmdid;
+       u32 pdev_set_base_macaddr_cmdid;
+       u32 vdev_create_cmdid;
+       u32 vdev_delete_cmdid;
+       u32 vdev_start_request_cmdid;
+       u32 vdev_restart_request_cmdid;
+       u32 vdev_up_cmdid;
+       u32 vdev_stop_cmdid;
+       u32 vdev_down_cmdid;
+       u32 vdev_set_param_cmdid;
+       u32 vdev_install_key_cmdid;
+       u32 peer_create_cmdid;
+       u32 peer_delete_cmdid;
+       u32 peer_flush_tids_cmdid;
+       u32 peer_set_param_cmdid;
+       u32 peer_assoc_cmdid;
+       u32 peer_add_wds_entry_cmdid;
+       u32 peer_remove_wds_entry_cmdid;
+       u32 peer_mcast_group_cmdid;
+       u32 bcn_tx_cmdid;
+       u32 pdev_send_bcn_cmdid;
+       u32 bcn_tmpl_cmdid;
+       u32 bcn_filter_rx_cmdid;
+       u32 prb_req_filter_rx_cmdid;
+       u32 mgmt_tx_cmdid;
+       u32 prb_tmpl_cmdid;
+       u32 addba_clear_resp_cmdid;
+       u32 addba_send_cmdid;
+       u32 addba_status_cmdid;
+       u32 delba_send_cmdid;
+       u32 addba_set_resp_cmdid;
+       u32 send_singleamsdu_cmdid;
+       u32 sta_powersave_mode_cmdid;
+       u32 sta_powersave_param_cmdid;
+       u32 sta_mimo_ps_mode_cmdid;
+       u32 pdev_dfs_enable_cmdid;
+       u32 pdev_dfs_disable_cmdid;
+       u32 roam_scan_mode;
+       u32 roam_scan_rssi_threshold;
+       u32 roam_scan_period;
+       u32 roam_scan_rssi_change_threshold;
+       u32 roam_ap_profile;
+       u32 ofl_scan_add_ap_profile;
+       u32 ofl_scan_remove_ap_profile;
+       u32 ofl_scan_period;
+       u32 p2p_dev_set_device_info;
+       u32 p2p_dev_set_discoverability;
+       u32 p2p_go_set_beacon_ie;
+       u32 p2p_go_set_probe_resp_ie;
+       u32 p2p_set_vendor_ie_data_cmdid;
+       u32 ap_ps_peer_param_cmdid;
+       u32 ap_ps_peer_uapsd_coex_cmdid;
+       u32 peer_rate_retry_sched_cmdid;
+       u32 wlan_profile_trigger_cmdid;
+       u32 wlan_profile_set_hist_intvl_cmdid;
+       u32 wlan_profile_get_profile_data_cmdid;
+       u32 wlan_profile_enable_profile_id_cmdid;
+       u32 wlan_profile_list_profile_id_cmdid;
+       u32 pdev_suspend_cmdid;
+       u32 pdev_resume_cmdid;
+       u32 add_bcn_filter_cmdid;
+       u32 rmv_bcn_filter_cmdid;
+       u32 wow_add_wake_pattern_cmdid;
+       u32 wow_del_wake_pattern_cmdid;
+       u32 wow_enable_disable_wake_event_cmdid;
+       u32 wow_enable_cmdid;
+       u32 wow_hostwakeup_from_sleep_cmdid;
+       u32 rtt_measreq_cmdid;
+       u32 rtt_tsf_cmdid;
+       u32 vdev_spectral_scan_configure_cmdid;
+       u32 vdev_spectral_scan_enable_cmdid;
+       u32 request_stats_cmdid;
+       u32 set_arp_ns_offload_cmdid;
+       u32 network_list_offload_config_cmdid;
+       u32 gtk_offload_cmdid;
+       u32 csa_offload_enable_cmdid;
+       u32 csa_offload_chanswitch_cmdid;
+       u32 chatter_set_mode_cmdid;
+       u32 peer_tid_addba_cmdid;
+       u32 peer_tid_delba_cmdid;
+       u32 sta_dtim_ps_method_cmdid;
+       u32 sta_uapsd_auto_trig_cmdid;
+       u32 sta_keepalive_cmd;
+       u32 echo_cmdid;
+       u32 pdev_utf_cmdid;
+       u32 dbglog_cfg_cmdid;
+       u32 pdev_qvit_cmdid;
+       u32 pdev_ftm_intg_cmdid;
+       u32 vdev_set_keepalive_cmdid;
+       u32 vdev_get_keepalive_cmdid;
+       u32 force_fw_hang_cmdid;
+       u32 gpio_config_cmdid;
+       u32 gpio_output_cmdid;
+};
+
 /*
  * wmi command groups.
  */
@@ -247,7 +359,9 @@ enum wmi_cmd_group {
 #define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
 #define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
 
-/* Command IDs and commande events. */
+#define WMI_CMD_UNSUPPORTED 0
+
+/* Command IDs and command events for MAIN FW. */
 enum wmi_cmd_id {
        WMI_INIT_CMDID = 0x1,
 
@@ -488,6 +602,217 @@ enum wmi_event_id {
        WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO),
 };
 
+/* Command IDs and command events for 10.X firmware */
+enum wmi_10x_cmd_id {
+       WMI_10X_START_CMDID = 0x9000,
+       WMI_10X_END_CMDID = 0x9FFF,
+
+       /* initialize the wlan sub system */
+       WMI_10X_INIT_CMDID,
+
+       /* Scan specific commands */
+
+       WMI_10X_START_SCAN_CMDID = WMI_10X_START_CMDID,
+       WMI_10X_STOP_SCAN_CMDID,
+       WMI_10X_SCAN_CHAN_LIST_CMDID,
+       WMI_10X_ECHO_CMDID,
+
+       /* PDEV(physical device) specific commands */
+       WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
+       WMI_10X_PDEV_SET_CHANNEL_CMDID,
+       WMI_10X_PDEV_SET_PARAM_CMDID,
+       WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
+       WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
+       WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
+       WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
+       WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
+       WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
+       WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
+       WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
+       WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+       WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
+
+       /* VDEV(virtual device) specific commands */
+       WMI_10X_VDEV_CREATE_CMDID,
+       WMI_10X_VDEV_DELETE_CMDID,
+       WMI_10X_VDEV_START_REQUEST_CMDID,
+       WMI_10X_VDEV_RESTART_REQUEST_CMDID,
+       WMI_10X_VDEV_UP_CMDID,
+       WMI_10X_VDEV_STOP_CMDID,
+       WMI_10X_VDEV_DOWN_CMDID,
+       WMI_10X_VDEV_STANDBY_RESPONSE_CMDID,
+       WMI_10X_VDEV_RESUME_RESPONSE_CMDID,
+       WMI_10X_VDEV_SET_PARAM_CMDID,
+       WMI_10X_VDEV_INSTALL_KEY_CMDID,
+
+       /* peer specific commands */
+       WMI_10X_PEER_CREATE_CMDID,
+       WMI_10X_PEER_DELETE_CMDID,
+       WMI_10X_PEER_FLUSH_TIDS_CMDID,
+       WMI_10X_PEER_SET_PARAM_CMDID,
+       WMI_10X_PEER_ASSOC_CMDID,
+       WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
+       WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
+       WMI_10X_PEER_MCAST_GROUP_CMDID,
+
+       /* beacon/management specific commands */
+
+       WMI_10X_BCN_TX_CMDID,
+       WMI_10X_BCN_PRB_TMPL_CMDID,
+       WMI_10X_BCN_FILTER_RX_CMDID,
+       WMI_10X_PRB_REQ_FILTER_RX_CMDID,
+       WMI_10X_MGMT_TX_CMDID,
+
+       /* commands to directly control ba negotiation directly from host. */
+       WMI_10X_ADDBA_CLEAR_RESP_CMDID,
+       WMI_10X_ADDBA_SEND_CMDID,
+       WMI_10X_ADDBA_STATUS_CMDID,
+       WMI_10X_DELBA_SEND_CMDID,
+       WMI_10X_ADDBA_SET_RESP_CMDID,
+       WMI_10X_SEND_SINGLEAMSDU_CMDID,
+
+       /* Station power save specific config */
+       WMI_10X_STA_POWERSAVE_MODE_CMDID,
+       WMI_10X_STA_POWERSAVE_PARAM_CMDID,
+       WMI_10X_STA_MIMO_PS_MODE_CMDID,
+
+       /* set debug log config */
+       WMI_10X_DBGLOG_CFG_CMDID,
+
+       /* DFS-specific commands */
+       WMI_10X_PDEV_DFS_ENABLE_CMDID,
+       WMI_10X_PDEV_DFS_DISABLE_CMDID,
+
+       /* QVIT specific command id */
+       WMI_10X_PDEV_QVIT_CMDID,
+
+       /* Offload Scan and Roaming related  commands */
+       WMI_10X_ROAM_SCAN_MODE,
+       WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
+       WMI_10X_ROAM_SCAN_PERIOD,
+       WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+       WMI_10X_ROAM_AP_PROFILE,
+       WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
+       WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
+       WMI_10X_OFL_SCAN_PERIOD,
+
+       /* P2P specific commands */
+       WMI_10X_P2P_DEV_SET_DEVICE_INFO,
+       WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
+       WMI_10X_P2P_GO_SET_BEACON_IE,
+       WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
+
+       /* AP power save specific config */
+       WMI_10X_AP_PS_PEER_PARAM_CMDID,
+       WMI_10X_AP_PS_PEER_UAPSD_COEX_CMDID,
+
+       /* Rate-control specific commands */
+       WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
+
+       /* WLAN Profiling commands. */
+       WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
+       WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+       WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+       WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+       WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+
+       /* Suspend resume command Ids */
+       WMI_10X_PDEV_SUSPEND_CMDID,
+       WMI_10X_PDEV_RESUME_CMDID,
+
+       /* Beacon filter commands */
+       WMI_10X_ADD_BCN_FILTER_CMDID,
+       WMI_10X_RMV_BCN_FILTER_CMDID,
+
+       /* WOW Specific WMI commands*/
+       WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
+       WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
+       WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+       WMI_10X_WOW_ENABLE_CMDID,
+       WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+
+       /* RTT measurement related cmd */
+       WMI_10X_RTT_MEASREQ_CMDID,
+       WMI_10X_RTT_TSF_CMDID,
+
+       /* transmit beacon by value */
+       WMI_10X_PDEV_SEND_BCN_CMDID,
+
+       /* F/W stats */
+       WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+       WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+       WMI_10X_REQUEST_STATS_CMDID,
+
+       /* GPIO Configuration */
+       WMI_10X_GPIO_CONFIG_CMDID,
+       WMI_10X_GPIO_OUTPUT_CMDID,
+
+       WMI_10X_PDEV_UTF_CMDID = WMI_10X_END_CMDID - 1,
+};
+
+enum wmi_10x_event_id {
+       WMI_10X_SERVICE_READY_EVENTID = 0x8000,
+       WMI_10X_READY_EVENTID,
+       WMI_10X_START_EVENTID = 0x9000,
+       WMI_10X_END_EVENTID = 0x9FFF,
+
+       /* Scan specific events */
+       WMI_10X_SCAN_EVENTID = WMI_10X_START_EVENTID,
+       WMI_10X_ECHO_EVENTID,
+       WMI_10X_DEBUG_MESG_EVENTID,
+       WMI_10X_UPDATE_STATS_EVENTID,
+
+       /* Instantaneous RSSI event */
+       WMI_10X_INST_RSSI_STATS_EVENTID,
+
+       /* VDEV specific events */
+       WMI_10X_VDEV_START_RESP_EVENTID,
+       WMI_10X_VDEV_STANDBY_REQ_EVENTID,
+       WMI_10X_VDEV_RESUME_REQ_EVENTID,
+       WMI_10X_VDEV_STOPPED_EVENTID,
+
+       /* peer  specific events */
+       WMI_10X_PEER_STA_KICKOUT_EVENTID,
+
+       /* beacon/mgmt specific events */
+       WMI_10X_HOST_SWBA_EVENTID,
+       WMI_10X_TBTTOFFSET_UPDATE_EVENTID,
+       WMI_10X_MGMT_RX_EVENTID,
+
+       /* Channel stats event */
+       WMI_10X_CHAN_INFO_EVENTID,
+
+       /* PHY Error specific WMI event */
+       WMI_10X_PHYERR_EVENTID,
+
+       /* Roam event to trigger roaming on host */
+       WMI_10X_ROAM_EVENTID,
+
+       /* matching AP found from list of profiles */
+       WMI_10X_PROFILE_MATCH,
+
+       /* debug print message used for tracing FW code while debugging */
+       WMI_10X_DEBUG_PRINT_EVENTID,
+       /* VI spoecific event */
+       WMI_10X_PDEV_QVIT_EVENTID,
+       /* FW code profile data in response to profile request */
+       WMI_10X_WLAN_PROFILE_DATA_EVENTID,
+
+       /*RTT related event ID*/
+       WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID,
+       WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID,
+       WMI_10X_RTT_ERROR_REPORT_EVENTID,
+
+       WMI_10X_WOW_WAKEUP_HOST_EVENTID,
+       WMI_10X_DCS_INTERFERENCE_EVENTID,
+
+       /* TPC config for the current operating channel */
+       WMI_10X_PDEV_TPC_CONFIG_EVENTID,
+
+       WMI_10X_GPIO_INPUT_EVENTID,
+       WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1,
+};
+
 enum wmi_phy_mode {
        MODE_11A        = 0,   /* 11a Mode */
        MODE_11G        = 1,   /* 11b/g Mode */
@@ -508,6 +833,48 @@ enum wmi_phy_mode {
        MODE_MAX        = 14
 };
 
+static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
+{
+       switch (mode) {
+       case MODE_11A:
+               return "11a";
+       case MODE_11G:
+               return "11g";
+       case MODE_11B:
+               return "11b";
+       case MODE_11GONLY:
+               return "11gonly";
+       case MODE_11NA_HT20:
+               return "11na-ht20";
+       case MODE_11NG_HT20:
+               return "11ng-ht20";
+       case MODE_11NA_HT40:
+               return "11na-ht40";
+       case MODE_11NG_HT40:
+               return "11ng-ht40";
+       case MODE_11AC_VHT20:
+               return "11ac-vht20";
+       case MODE_11AC_VHT40:
+               return "11ac-vht40";
+       case MODE_11AC_VHT80:
+               return "11ac-vht80";
+       case MODE_11AC_VHT20_2G:
+               return "11ac-vht20-2g";
+       case MODE_11AC_VHT40_2G:
+               return "11ac-vht40-2g";
+       case MODE_11AC_VHT80_2G:
+               return "11ac-vht80-2g";
+       case MODE_UNKNOWN:
+               /* skip */
+               break;
+
+               /* no default handler to allow compiler to check that the
+                * enum is fully handled */
+       };
+
+       return "<unknown>";
+}
+
 #define WMI_CHAN_LIST_TAG      0x1
 #define WMI_SSID_LIST_TAG      0x2
 #define WMI_BSSID_LIST_TAG     0x3
@@ -763,13 +1130,45 @@ struct wmi_service_ready_event {
        struct wlan_host_mem_req mem_reqs[1];
 } __packed;
 
-/*
- * status consists of  upper 16 bits fo int status and lower 16 bits of
- * module ID that retuned status
- */
-#define WLAN_INIT_STATUS_SUCCESS   0x0
-#define WLAN_GET_INIT_STATUS_REASON(status)    ((status) & 0xffff)
-#define WLAN_GET_INIT_STATUS_MODULE_ID(status) (((status) >> 16) & 0xffff)
+/* This is the definition from 10.X firmware branch */
+struct wmi_service_ready_event_10x {
+       __le32 sw_version;
+       __le32 abi_version;
+
+       /* WMI_PHY_CAPABILITY */
+       __le32 phy_capability;
+
+       /* Maximum number of frag table entries that SW will populate less 1 */
+       __le32 max_frag_entry;
+       __le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
+       __le32 num_rf_chains;
+
+       /*
+        * The following field is only valid for service type
+        * WMI_SERVICE_11AC
+        */
+       __le32 ht_cap_info; /* WMI HT Capability */
+       __le32 vht_cap_info; /* VHT capability info field of 802.11ac */
+       __le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */
+       __le32 hw_min_tx_power;
+       __le32 hw_max_tx_power;
+
+       struct hal_reg_capabilities hal_reg_capabilities;
+
+       __le32 sys_cap_info;
+       __le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */
+
+       /*
+        * request to host to allocate a chuck of memory and pss it down to FW
+        * via WM_INIT. FW uses this as FW extesnsion memory for saving its
+        * data structures. Only valid for low latency interfaces like PCIE
+        * where FW can access this memory directly (or) by DMA.
+        */
+       __le32 num_mem_reqs;
+
+       struct wlan_host_mem_req mem_reqs[1];
+} __packed;
+
 
 #define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
 #define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
@@ -978,6 +1377,192 @@ struct wmi_resource_config {
        __le32 max_frag_entries;
 } __packed;
 
+struct wmi_resource_config_10x {
+       /* number of virtual devices (VAPs) to support */
+       __le32 num_vdevs;
+
+       /* number of peer nodes to support */
+       __le32 num_peers;
+
+       /* number of keys per peer */
+       __le32 num_peer_keys;
+
+       /* total number of TX/RX data TIDs */
+       __le32 num_tids;
+
+       /*
+        * max skid for resolving hash collisions
+        *
+        *   The address search table is sparse, so that if two MAC addresses
+        *   result in the same hash value, the second of these conflicting
+        *   entries can slide to the next index in the address search table,
+        *   and use it, if it is unoccupied.  This ast_skid_limit parameter
+        *   specifies the upper bound on how many subsequent indices to search
+        *   over to find an unoccupied space.
+        */
+       __le32 ast_skid_limit;
+
+       /*
+        * the nominal chain mask for transmit
+        *
+        *   The chain mask may be modified dynamically, e.g. to operate AP
+        *   tx with a reduced number of chains if no clients are associated.
+        *   This configuration parameter specifies the nominal chain-mask that
+        *   should be used when not operating with a reduced set of tx chains.
+        */
+       __le32 tx_chain_mask;
+
+       /*
+        * the nominal chain mask for receive
+        *
+        *   The chain mask may be modified dynamically, e.g. for a client
+        *   to use a reduced number of chains for receive if the traffic to
+        *   the client is low enough that it doesn't require downlink MIMO
+        *   or antenna diversity.
+        *   This configuration parameter specifies the nominal chain-mask that
+        *   should be used when not operating with a reduced set of rx chains.
+        */
+       __le32 rx_chain_mask;
+
+       /*
+        * what rx reorder timeout (ms) to use for the AC
+        *
+        *   Each WMM access class (voice, video, best-effort, background) will
+        *   have its own timeout value to dictate how long to wait for missing
+        *   rx MPDUs to arrive before flushing subsequent MPDUs that have
+        *   already been received.
+        *   This parameter specifies the timeout in milliseconds for each
+        *   class.
+        */
+       __le32 rx_timeout_pri_vi;
+       __le32 rx_timeout_pri_vo;
+       __le32 rx_timeout_pri_be;
+       __le32 rx_timeout_pri_bk;
+
+       /*
+        * what mode the rx should decap packets to
+        *
+        *   MAC can decap to RAW (no decap), native wifi or Ethernet types
+        *   THis setting also determines the default TX behavior, however TX
+        *   behavior can be modified on a per VAP basis during VAP init
+        */
+       __le32 rx_decap_mode;
+
+       /* what is the maximum scan requests than can be queued */
+       __le32 scan_max_pending_reqs;
+
+       /* maximum VDEV that could use BMISS offload */
+       __le32 bmiss_offload_max_vdev;
+
+       /* maximum VDEV that could use offload roaming */
+       __le32 roam_offload_max_vdev;
+
+       /* maximum AP profiles that would push to offload roaming */
+       __le32 roam_offload_max_ap_profiles;
+
+       /*
+        * how many groups to use for mcast->ucast conversion
+        *
+        *   The target's WAL maintains a table to hold information regarding
+        *   which peers belong to a given multicast group, so that if
+        *   multicast->unicast conversion is enabled, the target can convert
+        *   multicast tx frames to a series of unicast tx frames, to each
+        *   peer within the multicast group.
+            This num_mcast_groups configuration parameter tells the target how
+        *   many multicast groups to provide storage for within its multicast
+        *   group membership table.
+        */
+       __le32 num_mcast_groups;
+
+       /*
+        * size to alloc for the mcast membership table
+        *
+        *   This num_mcast_table_elems configuration parameter tells the
+        *   target how many peer elements it needs to provide storage for in
+        *   its multicast group membership table.
+        *   These multicast group membership table elements are shared by the
+        *   multicast groups stored within the table.
+        */
+       __le32 num_mcast_table_elems;
+
+       /*
+        * whether/how to do multicast->unicast conversion
+        *
+        *   This configuration parameter specifies whether the target should
+        *   perform multicast --> unicast conversion on transmit, and if so,
+        *   what to do if it finds no entries in its multicast group
+        *   membership table for the multicast IP address in the tx frame.
+        *   Configuration value:
+        *   0 -> Do not perform multicast to unicast conversion.
+        *   1 -> Convert multicast frames to unicast, if the IP multicast
+        *        address from the tx frame is found in the multicast group
+        *        membership table.  If the IP multicast address is not found,
+        *        drop the frame.
+        *   2 -> Convert multicast frames to unicast, if the IP multicast
+        *        address from the tx frame is found in the multicast group
+        *        membership table.  If the IP multicast address is not found,
+        *        transmit the frame as multicast.
+        */
+       __le32 mcast2ucast_mode;
+
+       /*
+        * how much memory to allocate for a tx PPDU dbg log
+        *
+        *   This parameter controls how much memory the target will allocate
+        *   to store a log of tx PPDU meta-information (how large the PPDU
+        *   was, when it was sent, whether it was successful, etc.)
+        */
+       __le32 tx_dbg_log_size;
+
+       /* how many AST entries to be allocated for WDS */
+       __le32 num_wds_entries;
+
+       /*
+        * MAC DMA burst size, e.g., For target PCI limit can be
+        * 0 -default, 1 256B
+        */
+       __le32 dma_burst_size;
+
+       /*
+        * Fixed delimiters to be inserted after every MPDU to
+        * account for interface latency to avoid underrun.
+        */
+       __le32 mac_aggr_delim;
+
+       /*
+        *   determine whether target is responsible for detecting duplicate
+        *   non-aggregate MPDU and timing out stale fragments.
+        *
+        *   A-MPDU reordering is always performed on the target.
+        *
+        *   0: target responsible for frag timeout and dup checking
+        *   1: host responsible for frag timeout and dup checking
+        */
+       __le32 rx_skip_defrag_timeout_dup_detection_check;
+
+       /*
+        * Configuration for VoW :
+        * No of Video Nodes to be supported
+        * and Max no of descriptors for each Video link (node).
+        */
+       __le32 vow_config;
+
+       /* Number of msdu descriptors target should use */
+       __le32 num_msdu_desc;
+
+       /*
+        * Max. number of Tx fragments per MSDU
+        *  This parameter controls the max number of Tx fragments per MSDU.
+        *  This is sent by the target as part of the WMI_SERVICE_READY event
+        *  and is overriden by the OS shim as required.
+        */
+       __le32 max_frag_entries;
+} __packed;
+
+
+#define NUM_UNITS_IS_NUM_VDEVS   0x1
+#define NUM_UNITS_IS_NUM_PEERS   0x2
+
 /* strucutre describing host memory chunk. */
 struct host_memory_chunk {
        /* id of the request that is passed up in service ready */
@@ -999,6 +1584,18 @@ struct wmi_init_cmd {
        struct host_memory_chunk host_mem_chunks[1];
 } __packed;
 
+/* _10x stucture is from 10.X FW API */
+struct wmi_init_cmd_10x {
+       struct wmi_resource_config_10x resource_config;
+       __le32 num_host_mem_chunks;
+
+       /*
+        * variable number of host memory chunks.
+        * This should be the last element in the structure
+        */
+       struct host_memory_chunk host_mem_chunks[1];
+} __packed;
+
 /* TLV for channel list */
 struct wmi_chan_list {
        __le32 tag; /* WMI_CHAN_LIST_TAG */
@@ -1118,6 +1715,88 @@ struct wmi_start_scan_cmd {
         */
 } __packed;
 
+/* This is the definition from 10.X firmware branch */
+struct wmi_start_scan_cmd_10x {
+       /* Scan ID */
+       __le32 scan_id;
+
+       /* Scan requestor ID */
+       __le32 scan_req_id;
+
+       /* VDEV id(interface) that is requesting scan */
+       __le32 vdev_id;
+
+       /* Scan Priority, input to scan scheduler */
+       __le32 scan_priority;
+
+       /* Scan events subscription */
+       __le32 notify_scan_events;
+
+       /* dwell time in msec on active channels */
+       __le32 dwell_time_active;
+
+       /* dwell time in msec on passive channels */
+       __le32 dwell_time_passive;
+
+       /*
+        * min time in msec on the BSS channel,only valid if atleast one
+        * VDEV is active
+        */
+       __le32 min_rest_time;
+
+       /*
+        * max rest time in msec on the BSS channel,only valid if at least
+        * one VDEV is active
+        */
+       /*
+        * the scanner will rest on the bss channel at least min_rest_time
+        * after min_rest_time the scanner will start checking for tx/rx
+        * activity on all VDEVs. if there is no activity the scanner will
+        * switch to off channel. if there is activity the scanner will let
+        * the radio on the bss channel until max_rest_time expires.at
+        * max_rest_time scanner will switch to off channel irrespective of
+        * activity. activity is determined by the idle_time parameter.
+        */
+       __le32 max_rest_time;
+
+       /*
+        * time before sending next set of probe requests.
+        * The scanner keeps repeating probe requests transmission with
+        * period specified by repeat_probe_time.
+        * The number of probe requests specified depends on the ssid_list
+        * and bssid_list
+        */
+       __le32 repeat_probe_time;
+
+       /* time in msec between 2 consequetive probe requests with in a set. */
+       __le32 probe_spacing_time;
+
+       /*
+        * data inactivity time in msec on bss channel that will be used by
+        * scanner for measuring the inactivity.
+        */
+       __le32 idle_time;
+
+       /* maximum time in msec allowed for scan  */
+       __le32 max_scan_time;
+
+       /*
+        * delay in msec before sending first probe request after switching
+        * to a channel
+        */
+       __le32 probe_delay;
+
+       /* Scan control flags */
+       __le32 scan_ctrl_flags;
+
+       /*
+        * TLV (tag length value )  paramerters follow the scan_cmd structure.
+        * TLV can contain channel list, bssid list, ssid list and
+        * ie. the TLV tags are defined above;
+        */
+} __packed;
+
+
 struct wmi_ssid_arg {
        int len;
        const u8 *ssid;
@@ -1268,7 +1947,7 @@ struct wmi_scan_event {
  * good idea to pass all the fields in the RX status
  * descriptor up to the host.
  */
-struct wmi_mgmt_rx_hdr {
+struct wmi_mgmt_rx_hdr_v1 {
        __le32 channel;
        __le32 snr;
        __le32 rate;
@@ -1277,8 +1956,18 @@ struct wmi_mgmt_rx_hdr {
        __le32 status; /* %WMI_RX_STATUS_ */
 } __packed;
 
-struct wmi_mgmt_rx_event {
-       struct wmi_mgmt_rx_hdr hdr;
+struct wmi_mgmt_rx_hdr_v2 {
+       struct wmi_mgmt_rx_hdr_v1 v1;
+       __le32 rssi_ctl[4];
+} __packed;
+
+struct wmi_mgmt_rx_event_v1 {
+       struct wmi_mgmt_rx_hdr_v1 hdr;
+       u8 buf[0];
+} __packed;
+
+struct wmi_mgmt_rx_event_v2 {
+       struct wmi_mgmt_rx_hdr_v2 hdr;
        u8 buf[0];
 } __packed;
 
@@ -1465,6 +2154,60 @@ struct wmi_csa_event {
 #define VDEV_DEFAULT_STATS_UPDATE_PERIOD    500
 #define PEER_DEFAULT_STATS_UPDATE_PERIOD    500
 
+struct wmi_pdev_param_map {
+       u32 tx_chain_mask;
+       u32 rx_chain_mask;
+       u32 txpower_limit2g;
+       u32 txpower_limit5g;
+       u32 txpower_scale;
+       u32 beacon_gen_mode;
+       u32 beacon_tx_mode;
+       u32 resmgr_offchan_mode;
+       u32 protection_mode;
+       u32 dynamic_bw;
+       u32 non_agg_sw_retry_th;
+       u32 agg_sw_retry_th;
+       u32 sta_kickout_th;
+       u32 ac_aggrsize_scaling;
+       u32 ltr_enable;
+       u32 ltr_ac_latency_be;
+       u32 ltr_ac_latency_bk;
+       u32 ltr_ac_latency_vi;
+       u32 ltr_ac_latency_vo;
+       u32 ltr_ac_latency_timeout;
+       u32 ltr_sleep_override;
+       u32 ltr_rx_override;
+       u32 ltr_tx_activity_timeout;
+       u32 l1ss_enable;
+       u32 dsleep_enable;
+       u32 pcielp_txbuf_flush;
+       u32 pcielp_txbuf_watermark;
+       u32 pcielp_txbuf_tmo_en;
+       u32 pcielp_txbuf_tmo_value;
+       u32 pdev_stats_update_period;
+       u32 vdev_stats_update_period;
+       u32 peer_stats_update_period;
+       u32 bcnflt_stats_update_period;
+       u32 pmf_qos;
+       u32 arp_ac_override;
+       u32 arpdhcp_ac_override;
+       u32 dcs;
+       u32 ani_enable;
+       u32 ani_poll_period;
+       u32 ani_listen_period;
+       u32 ani_ofdm_level;
+       u32 ani_cck_level;
+       u32 dyntxchain;
+       u32 proxy_sta;
+       u32 idle_ps_config;
+       u32 power_gating_sleep;
+       u32 fast_channel_reset;
+       u32 burst_dur;
+       u32 burst_enable;
+};
+
+#define WMI_PDEV_PARAM_UNSUPPORTED 0
+
 enum wmi_pdev_param {
        /* TX chian mask */
        WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
@@ -1564,6 +2307,97 @@ enum wmi_pdev_param {
        WMI_PDEV_PARAM_POWER_GATING_SLEEP,
 };
 
+enum wmi_10x_pdev_param {
+       /* TX chian mask */
+       WMI_10X_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+       /* RX chian mask */
+       WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+       /* TX power limit for 2G Radio */
+       WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+       /* TX power limit for 5G Radio */
+       WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+       /* TX power scale */
+       WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+       /* Beacon generation mode . 0: host, 1: target   */
+       WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+       /* Beacon generation mode . 0: staggered 1: bursted   */
+       WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+       /*
+        * Resource manager off chan mode .
+        * 0: turn off off chan mode. 1: turn on offchan mode
+        */
+       WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+       /*
+        * Protection mode:
+        * 0: no protection 1:use CTS-to-self 2: use RTS/CTS
+        */
+       WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+       /* Dynamic bandwidth 0: disable 1: enable */
+       WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+       /* Non aggregrate/ 11g sw retry threshold.0-disable */
+       WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+       /* aggregrate sw retry threshold. 0-disable*/
+       WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+       /* Station kickout threshold (non of consecutive failures).0-disable */
+       WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+       /* Aggerate size scaling configuration per AC */
+       WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+       /* LTR enable */
+       WMI_10X_PDEV_PARAM_LTR_ENABLE,
+       /* LTR latency for BE, in us */
+       WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+       /* LTR latency for BK, in us */
+       WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+       /* LTR latency for VI, in us */
+       WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+       /* LTR latency for VO, in us  */
+       WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+       /* LTR AC latency timeout, in ms */
+       WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+       /* LTR platform latency override, in us */
+       WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+       /* LTR-RX override, in us */
+       WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+       /* Tx activity timeout for LTR, in us */
+       WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+       /* L1SS state machine enable */
+       WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+       /* Deep sleep state machine enable */
+       WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+       /* pdev level stats update period in ms */
+       WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+       /* vdev level stats update period in ms */
+       WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+       /* peer level stats update period in ms */
+       WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+       /* beacon filter status update period */
+       WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+       /* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */
+       WMI_10X_PDEV_PARAM_PMF_QOS,
+       /* Access category on which ARP and DHCP frames are sent */
+       WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+       /* DCS configuration */
+       WMI_10X_PDEV_PARAM_DCS,
+       /* Enable/Disable ANI on target */
+       WMI_10X_PDEV_PARAM_ANI_ENABLE,
+       /* configure the ANI polling period */
+       WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+       /* configure the ANI listening period */
+       WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+       /* configure OFDM immunity level */
+       WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+       /* configure CCK immunity level */
+       WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+       /* Enable/Disable CDD for 1x1 STAs in rate control module */
+       WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+       /* Enable/Disable Fast channel reset*/
+       WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+       /* Set Bursting DUR */
+       WMI_10X_PDEV_PARAM_BURST_DUR,
+       /* Set Bursting Enable*/
+       WMI_10X_PDEV_PARAM_BURST_ENABLE,
+};
+
 struct wmi_pdev_set_param_cmd {
        __le32 param_id;
        __le32 param_value;
@@ -2088,6 +2922,61 @@ enum wmi_rate_preamble {
 /* Value to disable fixed rate setting */
 #define WMI_FIXED_RATE_NONE    (0xff)
 
+struct wmi_vdev_param_map {
+       u32 rts_threshold;
+       u32 fragmentation_threshold;
+       u32 beacon_interval;
+       u32 listen_interval;
+       u32 multicast_rate;
+       u32 mgmt_tx_rate;
+       u32 slot_time;
+       u32 preamble;
+       u32 swba_time;
+       u32 wmi_vdev_stats_update_period;
+       u32 wmi_vdev_pwrsave_ageout_time;
+       u32 wmi_vdev_host_swba_interval;
+       u32 dtim_period;
+       u32 wmi_vdev_oc_scheduler_air_time_limit;
+       u32 wds;
+       u32 atim_window;
+       u32 bmiss_count_max;
+       u32 bmiss_first_bcnt;
+       u32 bmiss_final_bcnt;
+       u32 feature_wmm;
+       u32 chwidth;
+       u32 chextoffset;
+       u32 disable_htprotection;
+       u32 sta_quickkickout;
+       u32 mgmt_rate;
+       u32 protection_mode;
+       u32 fixed_rate;
+       u32 sgi;
+       u32 ldpc;
+       u32 tx_stbc;
+       u32 rx_stbc;
+       u32 intra_bss_fwd;
+       u32 def_keyid;
+       u32 nss;
+       u32 bcast_data_rate;
+       u32 mcast_data_rate;
+       u32 mcast_indicate;
+       u32 dhcp_indicate;
+       u32 unknown_dest_indicate;
+       u32 ap_keepalive_min_idle_inactive_time_secs;
+       u32 ap_keepalive_max_idle_inactive_time_secs;
+       u32 ap_keepalive_max_unresponsive_time_secs;
+       u32 ap_enable_nawds;
+       u32 mcast2ucast_set;
+       u32 enable_rtscts;
+       u32 txbf;
+       u32 packet_powersave;
+       u32 drop_unencry;
+       u32 tx_encap_type;
+       u32 ap_detect_out_of_sync_sleeping_sta_time_secs;
+};
+
+#define WMI_VDEV_PARAM_UNSUPPORTED 0
+
 /* the definition of different VDEV parameters */
 enum wmi_vdev_param {
        /* RTS Threshold */
@@ -2219,6 +3108,121 @@ enum wmi_vdev_param {
        WMI_VDEV_PARAM_TX_ENCAP_TYPE,
 };
 
+/* the definition of different VDEV parameters */
+enum wmi_10x_vdev_param {
+       /* RTS Threshold */
+       WMI_10X_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+       /* Fragmentation threshold */
+       WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+       /* beacon interval in TUs */
+       WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+       /* Listen interval in TUs */
+       WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+       /* muticast rate in Mbps */
+       WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+       /* management frame rate in Mbps */
+       WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+       /* slot time (long vs short) */
+       WMI_10X_VDEV_PARAM_SLOT_TIME,
+       /* preamble (long vs short) */
+       WMI_10X_VDEV_PARAM_PREAMBLE,
+       /* SWBA time (time before tbtt in msec) */
+       WMI_10X_VDEV_PARAM_SWBA_TIME,
+       /* time period for updating VDEV stats */
+       WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+       /* age out time in msec for frames queued for station in power save */
+       WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+       /*
+        * Host SWBA interval (time in msec before tbtt for SWBA event
+        * generation).
+        */
+       WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+       /* DTIM period (specified in units of num beacon intervals) */
+       WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+       /*
+        * scheduler air time limit for this VDEV. used by off chan
+        * scheduler.
+        */
+       WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+       /* enable/dsiable WDS for this VDEV  */
+       WMI_10X_VDEV_PARAM_WDS,
+       /* ATIM Window */
+       WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+       /* BMISS max */
+       WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+       /* WMM enables/disabled */
+       WMI_10X_VDEV_PARAM_FEATURE_WMM,
+       /* Channel width */
+       WMI_10X_VDEV_PARAM_CHWIDTH,
+       /* Channel Offset */
+       WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+       /* Disable HT Protection */
+       WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+       /* Quick STA Kickout */
+       WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+       /* Rate to be used with Management frames */
+       WMI_10X_VDEV_PARAM_MGMT_RATE,
+       /* Protection Mode */
+       WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+       /* Fixed rate setting */
+       WMI_10X_VDEV_PARAM_FIXED_RATE,
+       /* Short GI Enable/Disable */
+       WMI_10X_VDEV_PARAM_SGI,
+       /* Enable LDPC */
+       WMI_10X_VDEV_PARAM_LDPC,
+       /* Enable Tx STBC */
+       WMI_10X_VDEV_PARAM_TX_STBC,
+       /* Enable Rx STBC */
+       WMI_10X_VDEV_PARAM_RX_STBC,
+       /* Intra BSS forwarding  */
+       WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+       /* Setting Default xmit key for Vdev */
+       WMI_10X_VDEV_PARAM_DEF_KEYID,
+       /* NSS width */
+       WMI_10X_VDEV_PARAM_NSS,
+       /* Set the custom rate for the broadcast data frames */
+       WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+       /* Set the custom rate (rate-code) for multicast data frames */
+       WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+       /* Tx multicast packet indicate Enable/Disable */
+       WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+       /* Tx DHCP packet indicate Enable/Disable */
+       WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+       /* Enable host inspection of Tx unicast packet to unknown destination */
+       WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+
+       /* The minimum amount of time AP begins to consider STA inactive */
+       WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+
+       /*
+        * An associated STA is considered inactive when there is no recent
+        * TX/RX activity and no downlink frames are buffered for it. Once a
+        * STA exceeds the maximum idle inactive time, the AP will send an
+        * 802.11 data-null as a keep alive to verify the STA is still
+        * associated. If the STA does ACK the data-null, or if the data-null
+        * is buffered and the STA does not retrieve it, the STA will be
+        * considered unresponsive
+        * (see WMI_10X_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS).
+        */
+       WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+
+       /*
+        * An associated STA is considered unresponsive if there is no recent
+        * TX/RX activity and downlink frames are buffered for it. Once a STA
+        * exceeds the maximum unresponsive time, the AP will send a
+        * WMI_10X_STA_KICKOUT event to the host so the STA can be deleted. */
+       WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+
+       /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
+       WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+
+       WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+       /* Enable/Disable RTS-CTS */
+       WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+
+       WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+};
+
 /* slot time long */
 #define WMI_VDEV_SLOT_TIME_LONG                0x1
 /* slot time short */
@@ -3000,7 +4004,6 @@ struct wmi_force_fw_hang_cmd {
 
 #define WMI_MAX_EVENT 0x1000
 /* Maximum number of pending TXed WMI packets */
-#define WMI_MAX_PENDING_TX_COUNT 128
 #define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
 
 /* By default disable power save for IBSS */
@@ -3013,7 +4016,6 @@ int ath10k_wmi_attach(struct ath10k *ar);
 void ath10k_wmi_detach(struct ath10k *ar);
 int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
 int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
-void ath10k_wmi_flush_tx(struct ath10k *ar);
 
 int ath10k_wmi_connect_htc_service(struct ath10k *ar);
 int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
@@ -3022,8 +4024,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar);
 int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
 int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
                                  u16 rd5g, u16 ctl2g, u16 ctl5g);
-int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
-                             u32 value);
+int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);
 int ath10k_wmi_cmd_init(struct ath10k *ar);
 int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
 void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
@@ -3043,7 +4044,7 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
                       const u8 *bssid);
 int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id);
 int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
-                             enum wmi_vdev_param param_id, u32 param_value);
+                             u32 param_id, u32 param_value);
 int ath10k_wmi_vdev_install_key(struct ath10k *ar,
                                const struct wmi_vdev_install_key_arg *arg);
 int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
@@ -3066,11 +4067,13 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
                               enum wmi_ap_ps_peer_param param_id, u32 value);
 int ath10k_wmi_scan_chan_list(struct ath10k *ar,
                              const struct wmi_scan_chan_list_arg *arg);
-int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg);
+int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
+                                 const struct wmi_bcn_tx_arg *arg);
 int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
                        const struct wmi_pdev_set_wmm_params_arg *arg);
 int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
 int ath10k_wmi_force_fw_hang(struct ath10k *ar,
                             enum wmi_force_fw_hang_type type, u32 delay_ms);
+int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb);
 
 #endif /* _WMI_H_ */
index e9bc9e616b69c94e4c6758ae2c2327768778b942..79bffe165caba6e1f963b9fa81deed4844cb42d1 100644 (file)
@@ -37,12 +37,9 @@ ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
 {
        struct ath5k_hw *ah = common->priv;
        struct platform_device *pdev = to_platform_device(ah->dev);
-       struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+       struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
        u16 *eeprom, *eeprom_end;
 
-
-
-       bcfg = pdev->dev.platform_data;
        eeprom = (u16 *) bcfg->radio;
        eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ;
 
@@ -57,7 +54,7 @@ ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
 int ath5k_hw_read_srev(struct ath5k_hw *ah)
 {
        struct platform_device *pdev = to_platform_device(ah->dev);
-       struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+       struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
        ah->ah_mac_srev = bcfg->devid;
        return 0;
 }
@@ -65,7 +62,7 @@ int ath5k_hw_read_srev(struct ath5k_hw *ah)
 static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
 {
        struct platform_device *pdev = to_platform_device(ah->dev);
-       struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+       struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
        u8 *cfg_mac;
 
        if (to_platform_device(ah->dev)->id == 0)
@@ -87,7 +84,7 @@ static const struct ath_bus_ops ath_ahb_bus_ops = {
 /*Initialization*/
 static int ath_ahb_probe(struct platform_device *pdev)
 {
-       struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+       struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
        struct ath5k_hw *ah;
        struct ieee80211_hw *hw;
        struct resource *res;
@@ -96,7 +93,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
        int ret = 0;
        u32 reg;
 
-       if (!pdev->dev.platform_data) {
+       if (!dev_get_platdata(&pdev->dev)) {
                dev_err(&pdev->dev, "no platform data specified\n");
                ret = -EINVAL;
                goto err_out;
@@ -193,7 +190,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
 
 static int ath_ahb_remove(struct platform_device *pdev)
 {
-       struct ar231x_board_config *bcfg = pdev->dev.platform_data;
+       struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
        struct ieee80211_hw *hw = platform_get_drvdata(pdev);
        struct ath5k_hw *ah;
        u32 reg;
index 48161edec8de84769fd9f3db92fa1c4aa165d70b..69f58b073e85ff1a183ec1f06e803ff9da00806c 100644 (file)
@@ -1663,15 +1663,15 @@ ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
        ah->stats.tx_bytes_count += skb->len;
        info = IEEE80211_SKB_CB(skb);
 
+       size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
+       memcpy(info->status.rates, bf->rates, size);
+
        tries[0] = info->status.rates[0].count;
        tries[1] = info->status.rates[1].count;
        tries[2] = info->status.rates[2].count;
 
        ieee80211_tx_info_clear_status(info);
 
-       size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
-       memcpy(info->status.rates, bf->rates, size);
-
        for (i = 0; i < ts->ts_final_idx; i++) {
                struct ieee80211_tx_rate *r =
                        &info->status.rates[i];
index 98a886154d9cc59775eaa0c17c74305452e52e8e..05debf700a846db00f55e0071df53e207d2e6c63 100644 (file)
@@ -22,8 +22,7 @@
 
 #define ATH6KL_MAX_IE                  256
 
-extern __printf(2, 3)
-int ath6kl_printk(const char *level, const char *fmt, ...);
+__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
 
 /*
  * Reflects the version of binary interface exposed by ATH6KL target
index 74369de00fb57a40dcebd72ced2002d0cb571b24..ca9ba005f2871f3e42bbc914bb5ca90f6e8b90e9 100644 (file)
@@ -50,11 +50,10 @@ enum ATH6K_DEBUG_MASK {
 };
 
 extern unsigned int debug_mask;
-extern __printf(2, 3)
-int ath6kl_printk(const char *level, const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_info(const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_err(const char *fmt, ...);
-extern __printf(1, 2) int ath6kl_warn(const char *fmt, ...);
+__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
+__printf(1, 2) int ath6kl_info(const char *fmt, ...);
+__printf(1, 2) int ath6kl_err(const char *fmt, ...);
+__printf(1, 2) int ath6kl_warn(const char *fmt, ...);
 
 enum ath6kl_war {
        ATH6KL_WAR_INVALID_RATE,
index a2c8ff8097939240f4fecc4e238203b6a5c7de73..14cab1403dd6071d48179922346b676d89e9610b 100644 (file)
@@ -60,7 +60,7 @@
 /* disable credit flow control on a specific service */
 #define HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL          (1 << 3)
 #define HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT    8
-#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK     0xFF00
+#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK     0xFF00U
 
 /* connect response status codes */
 #define HTC_SERVICE_SUCCESS      0
index 7944c25c9a43d54940ffc3e5ee9bc95591fe69d6..32f139e2e897e255b4c378c33c6d99df8abd51e0 100644 (file)
@@ -84,6 +84,26 @@ config ATH9K_DFS_CERTIFIED
          developed. At this point enabling this option won't do anything
          except increase code size.
 
+config ATH9K_TX99
+       bool "Atheros ath9k TX99 testing support"
+       depends on CFG80211_CERTIFICATION_ONUS
+       default n
+       ---help---
+         Say N. This should only be enabled on systems undergoing
+         certification testing and evaluation in a controlled environment.
+         Enabling this will only enable TX99 support, all other modes of
+         operation will be disabled.
+
+         TX99 support enables Specific Absorption Rate (SAR) testing.
+         SAR is the unit of measurement for the amount of radio frequency(RF)
+         absorbed by the body when using a wireless device. The RF exposure
+         limits used are expressed in the terms of SAR, which is a measure
+         of the electric and magnetic field strength and power density for
+         transmitters operating at frequencies from 300 kHz to 100 GHz.
+         Regulatory bodies around the world require that wireless device
+         be evaluated to meet the RF exposure limits set forth in the
+         governmental SAR regulations.
+
 config ATH9K_LEGACY_RATE_CONTROL
        bool "Atheros ath9k rate control"
        depends on ATH9K
index 75ee9e7704ce627eb52939c25d5ea0b6d1eb4f3d..6205ef5a9321e8072c283d24e8e7814b2a8392df 100644 (file)
@@ -14,9 +14,7 @@ ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
 ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
 ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
 ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += \
-               dfs.o \
-               dfs_pattern_detector.o \
-               dfs_pri_detector.o
+               dfs.o
 ath9k-$(CONFIG_PM_SLEEP) += wow.o
 
 obj-$(CONFIG_ATH9K) += ath9k.o
index 072e4b53106765ce8e0d6c833652501a7abde62d..2dff2765769bb339eea79b05877414b6b9bdc0f9 100644 (file)
@@ -54,7 +54,7 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
        struct platform_device *pdev = to_platform_device(sc->dev);
        struct ath9k_platform_data *pdata;
 
-       pdata = (struct ath9k_platform_data *) pdev->dev.platform_data;
+       pdata = dev_get_platdata(&pdev->dev);
        if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
                ath_err(common,
                        "%s: flash read failed, offset %08x is out of range\n",
@@ -84,7 +84,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
        struct ath_hw *ah;
        char hw_name[64];
 
-       if (!pdev->dev.platform_data) {
+       if (!dev_get_platdata(&pdev->dev)) {
                dev_err(&pdev->dev, "no platform data specified\n");
                return -EINVAL;
        }
index be466b0ef7a7725c736114ee439596ca45a4a380..d28923b7435b257f13a91e3f8896115c30adb1c9 100644 (file)
@@ -338,10 +338,9 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
                    aniState->cckNoiseImmunityLevel !=
                    ATH9K_ANI_CCK_DEF_LEVEL) {
                        ath_dbg(common, ANI,
-                               "Restore defaults: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
+                               "Restore defaults: opmode %u chan %d Mhz is_scanning=%d ofdm:%d cck:%d\n",
                                ah->opmode,
                                chan->channel,
-                               chan->channelFlags,
                                is_scanning,
                                aniState->ofdmNoiseImmunityLevel,
                                aniState->cckNoiseImmunityLevel);
@@ -354,10 +353,9 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
                 * restore historical levels for this channel
                 */
                ath_dbg(common, ANI,
-                       "Restore history: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
+                       "Restore history: opmode %u chan %d Mhz is_scanning=%d ofdm:%d cck:%d\n",
                        ah->opmode,
                        chan->channel,
-                       chan->channelFlags,
                        is_scanning,
                        aniState->ofdmNoiseImmunityLevel,
                        aniState->cckNoiseImmunityLevel);
index dd1cc73d7946104d2353a777586c6da4bf3c6f67..bd048cc69a334d6234b6967ecff5e1aec8b75fbe 100644 (file)
@@ -332,7 +332,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
                }
 
                if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
-                   ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
+                   div_ant_conf->lna1_lna2_switch_delta)
                        div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
                else
                        div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
@@ -554,42 +554,22 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
                        ant_conf->fast_div_bias = 0x1;
                        break;
                case 0x10: /* LNA2 A-B */
-                       if ((antcomb->scan == 0) &&
-                           (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
-                               ant_conf->fast_div_bias = 0x3f;
-                       } else {
-                               ant_conf->fast_div_bias = 0x1;
-                       }
+                       ant_conf->fast_div_bias = 0x2;
                        break;
                case 0x12: /* LNA2 LNA1 */
-                       ant_conf->fast_div_bias = 0x39;
+                       ant_conf->fast_div_bias = 0x3f;
                        break;
                case 0x13: /* LNA2 A+B */
-                       if ((antcomb->scan == 0) &&
-                           (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
-                               ant_conf->fast_div_bias = 0x3f;
-                       } else {
-                               ant_conf->fast_div_bias = 0x1;
-                       }
+                       ant_conf->fast_div_bias = 0x2;
                        break;
                case 0x20: /* LNA1 A-B */
-                       if ((antcomb->scan == 0) &&
-                           (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
-                               ant_conf->fast_div_bias = 0x3f;
-                       } else {
-                               ant_conf->fast_div_bias = 0x4;
-                       }
+                       ant_conf->fast_div_bias = 0x3;
                        break;
                case 0x21: /* LNA1 LNA2 */
-                       ant_conf->fast_div_bias = 0x6;
+                       ant_conf->fast_div_bias = 0x3;
                        break;
                case 0x23: /* LNA1 A+B */
-                       if ((antcomb->scan == 0) &&
-                           (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)) {
-                               ant_conf->fast_div_bias = 0x3f;
-                       } else {
-                               ant_conf->fast_div_bias = 0x6;
-                       }
+                       ant_conf->fast_div_bias = 0x3;
                        break;
                case 0x30: /* A+B A-B */
                        ant_conf->fast_div_bias = 0x1;
@@ -638,7 +618,7 @@ static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
                antcomb->rssi_sub = alt_rssi_avg;
                antcomb->scan = false;
                if (antcomb->rssi_lna2 >
-                   (antcomb->rssi_lna1 + ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
+                   (antcomb->rssi_lna1 + conf->lna1_lna2_switch_delta)) {
                        /* use LNA2 as main LNA */
                        if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
                            (antcomb->rssi_add > antcomb->rssi_sub)) {
index 08656473c63e5a796b37bcd25f087504d85433fb..ff415e863ee9cc5140879ca008df366b8f1ad967 100644 (file)
@@ -626,12 +626,11 @@ static void ar5008_hw_override_ini(struct ath_hw *ah,
                if (AR_SREV_9287_11_OR_LATER(ah))
                        val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
 
+               val |= AR_PCU_MISC_MODE2_CFP_IGNORE;
+
                REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
        }
 
-       REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
-                   AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
-
        if (AR_SREV_9280_20_OR_LATER(ah))
                return;
        /*
@@ -667,14 +666,13 @@ static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
        if (IS_CHAN_HT40(chan)) {
                phymode |= AR_PHY_FC_DYN2040_EN;
 
-               if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
-                   (chan->chanmode == CHANNEL_G_HT40PLUS))
+               if (IS_CHAN_HT40PLUS(chan))
                        phymode |= AR_PHY_FC_DYN2040_PRI_CH;
 
        }
        REG_WRITE(ah, AR_PHY_TURBO, phymode);
 
-       ath9k_hw_set11nmac2040(ah);
+       ath9k_hw_set11nmac2040(ah, chan);
 
        ENABLE_REGWRITE_BUFFER(ah);
 
@@ -692,31 +690,12 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
        int i, regWrites = 0;
        u32 modesIndex, freqIndex;
 
-       switch (chan->chanmode) {
-       case CHANNEL_A:
-       case CHANNEL_A_HT20:
-               modesIndex = 1;
-               freqIndex = 1;
-               break;
-       case CHANNEL_A_HT40PLUS:
-       case CHANNEL_A_HT40MINUS:
-               modesIndex = 2;
+       if (IS_CHAN_5GHZ(chan)) {
                freqIndex = 1;
-               break;
-       case CHANNEL_G:
-       case CHANNEL_G_HT20:
-       case CHANNEL_B:
-               modesIndex = 4;
-               freqIndex = 2;
-               break;
-       case CHANNEL_G_HT40PLUS:
-       case CHANNEL_G_HT40MINUS:
-               modesIndex = 3;
+               modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+       } else {
                freqIndex = 2;
-               break;
-
-       default:
-               return -EINVAL;
+               modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
        }
 
        /*
@@ -815,8 +794,10 @@ static void ar5008_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
        if (chan == NULL)
                return;
 
-       rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
-               ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+       if (IS_CHAN_2GHZ(chan))
+               rfMode |= AR_PHY_MODE_DYNAMIC;
+       else
+               rfMode |= AR_PHY_MODE_OFDM;
 
        if (!AR_SREV_9280_20_OR_LATER(ah))
                rfMode |= (IS_CHAN_5GHZ(chan)) ?
@@ -1219,12 +1200,11 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
 
        iniDef = &aniState->iniDef;
 
-       ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+       ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz\n",
                ah->hw_version.macVersion,
                ah->hw_version.macRev,
                ah->opmode,
-               chan->channel,
-               chan->channelFlags);
+               chan->channel);
 
        val = REG_READ(ah, AR_PHY_SFCORR);
        iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
index 9f589744a9f945b285ca1c2988d795488cbf7a47..cdc74005650ce9848147ee0e051c64f9df989ffd 100644 (file)
@@ -33,15 +33,12 @@ static bool ar9002_hw_is_cal_supported(struct ath_hw *ah,
        bool supported = false;
        switch (ah->supp_cals & cal_type) {
        case IQ_MISMATCH_CAL:
-               /* Run IQ Mismatch for non-CCK only */
-               if (!IS_CHAN_B(chan))
-                       supported = true;
+               supported = true;
                break;
        case ADC_GAIN_CAL:
        case ADC_DC_CAL:
                /* Run ADC Gain Cal for non-CCK & non 2GHz-HT20 only */
-               if (!IS_CHAN_B(chan) &&
-                   !((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
+               if (!((IS_CHAN_2GHZ(chan) || IS_CHAN_A_FAST_CLOCK(ah, chan)) &&
                      IS_CHAN_HT20(chan)))
                        supported = true;
                break;
@@ -671,7 +668,7 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
 
        nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF);
        if (ah->caldata)
-               nfcal_pending = ah->caldata->nfcal_pending;
+               nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
 
        if (currCal && !nfcal &&
            (currCal->calState == CAL_RUNNING ||
@@ -861,7 +858,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
        ar9002_hw_pa_cal(ah, true);
 
        if (ah->caldata)
-               ah->caldata->nfcal_pending = true;
+               set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
 
        ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
 
index fb61b081d1721124518f7c4cad85792cedaefac0..5c95fd9e9c9e9c861edb283a2a8cfbf4a9b5e3b9 100644 (file)
@@ -419,28 +419,10 @@ void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan)
        u32 modesIndex;
        int i;
 
-       switch (chan->chanmode) {
-       case CHANNEL_A:
-       case CHANNEL_A_HT20:
-               modesIndex = 1;
-               break;
-       case CHANNEL_A_HT40PLUS:
-       case CHANNEL_A_HT40MINUS:
-               modesIndex = 2;
-               break;
-       case CHANNEL_G:
-       case CHANNEL_G_HT20:
-       case CHANNEL_B:
-               modesIndex = 4;
-               break;
-       case CHANNEL_G_HT40PLUS:
-       case CHANNEL_G_HT40MINUS:
-               modesIndex = 3;
-               break;
-
-       default:
-               return;
-       }
+       if (IS_CHAN_5GHZ(chan))
+               modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+       else
+               modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
 
        ENABLE_REGWRITE_BUFFER(ah);
 
index 1fc1fa955d44fff8ddd60e7f231701f873df5473..f087117b2e6b6b2592ae8eb7a2270314cffb7325 100644 (file)
@@ -485,7 +485,7 @@ static void ar9002_hw_do_getnf(struct ath_hw *ah,
        if (IS_CHAN_HT40(ah->curchan))
                nfarray[3] = sign_extend32(nf, 8);
 
-       if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
+       if (!(ah->rxchainmask & BIT(1)))
                return;
 
        nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR);
@@ -532,6 +532,7 @@ static void ar9002_hw_antdiv_comb_conf_get(struct ath_hw *ah,
                                 AR_PHY_9285_ANT_DIV_ALT_LNACONF_S;
        antconf->fast_div_bias = (regval & AR_PHY_9285_FAST_DIV_BIAS) >>
                                  AR_PHY_9285_FAST_DIV_BIAS_S;
+       antconf->lna1_lna2_switch_delta = -1;
        antconf->lna1_lna2_delta = -3;
        antconf->div_group = 0;
 }
@@ -679,6 +680,26 @@ static void ar9002_hw_spectral_scan_wait(struct ath_hw *ah)
        }
 }
 
+static void ar9002_hw_tx99_start(struct ath_hw *ah, u32 qnum)
+{
+       REG_SET_BIT(ah, 0x9864, 0x7f000);
+       REG_SET_BIT(ah, 0x9924, 0x7f00fe);
+       REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+       REG_WRITE(ah, AR_CR, AR_CR_RXD);
+       REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
+       REG_WRITE(ah, AR_D_GBL_IFS_SIFS, 20);
+       REG_WRITE(ah, AR_D_GBL_IFS_EIFS, 20);
+       REG_WRITE(ah, AR_D_FPCTL, 0x10|qnum);
+       REG_WRITE(ah, AR_TIME_OUT, 0x00000400);
+       REG_WRITE(ah, AR_DRETRY_LIMIT(qnum), 0xffffffff);
+       REG_SET_BIT(ah, AR_QMISC(qnum), AR_Q_MISC_DCU_EARLY_TERM_REQ);
+}
+
+static void ar9002_hw_tx99_stop(struct ath_hw *ah)
+{
+       REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+}
+
 void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
 {
        struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -700,6 +721,8 @@ void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
        ops->set_bt_ant_diversity = ar9002_hw_set_bt_ant_diversity;
 #endif
+       ops->tx99_start = ar9002_hw_tx99_start;
+       ops->tx99_stop = ar9002_hw_tx99_stop;
 
        ar9002_hw_set_nf_limits(ah);
 }
index 6988e1d081f225c0c0a8efd0dc49fbacb4fc6a1a..22934d3ca54413fa9558a928fdccbc4ab108131a 100644 (file)
@@ -727,8 +727,12 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
        REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
                      AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
 
-       if (caldata)
-               caldata->done_txiqcal_once = is_reusable;
+       if (caldata) {
+               if (is_reusable)
+                       set_bit(TXIQCAL_DONE, &caldata->cal_flags);
+               else
+                       clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+       }
 
        return;
 }
@@ -961,18 +965,44 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
 }
 
 static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah,
-                                        struct ath9k_channel *chan)
+                                        struct ath9k_channel *chan,
+                                        bool run_rtt_cal)
 {
+       struct ath9k_hw_cal_data *caldata = ah->caldata;
        int i;
 
        if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah))
                return;
 
+       if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && !run_rtt_cal)
+               return;
+
        for (i = 0; i < AR9300_MAX_CHAINS; i++) {
                if (!(ah->rxchainmask & (1 << i)))
                        continue;
                ar9003_hw_manual_peak_cal(ah, i, IS_CHAN_2GHZ(chan));
        }
+
+       if (caldata)
+               set_bit(SW_PKDET_DONE, &caldata->cal_flags);
+
+       if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && caldata) {
+               if (IS_CHAN_2GHZ(chan)){
+                       caldata->caldac[0] = REG_READ_FIELD(ah,
+                                                   AR_PHY_65NM_RXRF_AGC(0),
+                                                   AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR);
+                       caldata->caldac[1] = REG_READ_FIELD(ah,
+                                                   AR_PHY_65NM_RXRF_AGC(1),
+                                                   AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR);
+               } else {
+                       caldata->caldac[0] = REG_READ_FIELD(ah,
+                                                   AR_PHY_65NM_RXRF_AGC(0),
+                                                   AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR);
+                       caldata->caldac[1] = REG_READ_FIELD(ah,
+                                                   AR_PHY_65NM_RXRF_AGC(1),
+                                                   AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR);
+               }
+       }
 }
 
 static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
@@ -990,7 +1020,7 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
        txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) &
                          AR_PHY_AGC_CONTROL_CLC_SUCCESS);
 
-       if (caldata->done_txclcal_once) {
+       if (test_bit(TXCLCAL_DONE, &caldata->cal_flags)) {
                for (i = 0; i < AR9300_MAX_CHAINS; i++) {
                        if (!(ah->txchainmask & (1 << i)))
                                continue;
@@ -1006,7 +1036,7 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
                                caldata->tx_clcal[i][j] =
                                        REG_READ(ah, CL_TAB_ENTRY(cl_idx[i]));
                }
-               caldata->done_txclcal_once = true;
+               set_bit(TXCLCAL_DONE, &caldata->cal_flags);
        }
 }
 
@@ -1019,6 +1049,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
        bool is_reusable = true, status = true;
        bool run_rtt_cal = false, run_agc_cal, sep_iq_cal = false;
        bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
+       u32 rx_delay = 0;
        u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
                                          AR_PHY_AGC_CONTROL_FLTR_CAL   |
                                          AR_PHY_AGC_CONTROL_PKDET_CAL;
@@ -1042,17 +1073,22 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
                ar9003_hw_rtt_clear_hist(ah);
        }
 
-       if (rtt && !run_rtt_cal) {
-               agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL);
-               agc_supp_cals &= agc_ctrl;
-               agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL |
-                            AR_PHY_AGC_CONTROL_FLTR_CAL |
-                            AR_PHY_AGC_CONTROL_PKDET_CAL);
-               REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
+       if (rtt) {
+               if (!run_rtt_cal) {
+                       agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL);
+                       agc_supp_cals &= agc_ctrl;
+                       agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL |
+                                     AR_PHY_AGC_CONTROL_FLTR_CAL |
+                                     AR_PHY_AGC_CONTROL_PKDET_CAL);
+                       REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
+               } else {
+                       if (ah->ah_flags & AH_FASTCC)
+                               run_agc_cal = true;
+               }
        }
 
        if (ah->enabled_cals & TX_CL_CAL) {
-               if (caldata && caldata->done_txclcal_once)
+               if (caldata && test_bit(TXCLCAL_DONE, &caldata->cal_flags))
                        REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL,
                                    AR_PHY_CL_CAL_ENABLE);
                else {
@@ -1076,14 +1112,14 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
         * AGC calibration
         */
        if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
-               if (caldata && !caldata->done_txiqcal_once)
+               if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags))
                        REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
                                    AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
                else
                        REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
                                    AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
                txiqcal_done = run_agc_cal = true;
-       } else if (caldata && !caldata->done_txiqcal_once) {
+       } else if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags)) {
                run_agc_cal = true;
                sep_iq_cal = true;
        }
@@ -1099,6 +1135,15 @@ skip_tx_iqcal:
                REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
        }
 
+       if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
+               rx_delay = REG_READ(ah, AR_PHY_RX_DELAY);
+               /* Disable BB_active */
+               REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
+               udelay(5);
+               REG_WRITE(ah, AR_PHY_RX_DELAY, AR_PHY_RX_DELAY_DELAY);
+               REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
+       }
+
        if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
                /* Calibrate the AGC */
                REG_WRITE(ah, AR_PHY_AGC_CONTROL,
@@ -1110,7 +1155,12 @@ skip_tx_iqcal:
                                       AR_PHY_AGC_CONTROL_CAL,
                                       0, AH_WAIT_TIMEOUT);
 
-               ar9003_hw_do_manual_peak_cal(ah, chan);
+               ar9003_hw_do_manual_peak_cal(ah, chan, run_rtt_cal);
+       }
+
+       if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) {
+               REG_WRITE(ah, AR_PHY_RX_DELAY, rx_delay);
+               udelay(5);
        }
 
        if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
@@ -1133,19 +1183,23 @@ skip_tx_iqcal:
 
        if (txiqcal_done)
                ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
-       else if (caldata && caldata->done_txiqcal_once)
+       else if (caldata && test_bit(TXIQCAL_DONE, &caldata->cal_flags))
                ar9003_hw_tx_iq_cal_reload(ah);
 
        ar9003_hw_cl_cal_post_proc(ah, is_reusable);
 
        if (run_rtt_cal && caldata) {
                if (is_reusable) {
-                       if (!ath9k_hw_rfbus_req(ah))
+                       if (!ath9k_hw_rfbus_req(ah)) {
                                ath_err(ath9k_hw_common(ah),
                                        "Could not stop baseband\n");
-                       else
+                       } else {
                                ar9003_hw_rtt_fill_hist(ah);
 
+                               if (test_bit(SW_PKDET_DONE, &caldata->cal_flags))
+                                       ar9003_hw_rtt_load_hist(ah);
+                       }
+
                        ath9k_hw_rfbus_done(ah);
                }
 
index f4864807e15bc9fd127c439cb4770d7ff75b1c8a..1ec52356b5a16956dda70a2f7365103497b43c68 100644 (file)
@@ -2991,7 +2991,10 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
        case EEP_CHAIN_MASK_REDUCE:
                return (pBase->miscConfiguration >> 0x3) & 0x1;
        case EEP_ANT_DIV_CTL1:
-               return eep->base_ext1.ant_div_control;
+               if (AR_SREV_9565(ah))
+                       return AR9300_EEP_ANTDIV_CONTROL_DEFAULT_VALUE;
+               else
+                       return eep->base_ext1.ant_div_control;
        case EEP_ANTENNA_GAIN_5G:
                return eep->modalHeader5G.antennaGain;
        case EEP_ANTENNA_GAIN_2G:
@@ -3424,12 +3427,12 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
        struct ar9300_base_eep_hdr *pBase;
 
        if (!dump_base_hdr) {
-               len += snprintf(buf + len, size - len,
-                               "%20s :\n", "2GHz modal Header");
+               len += scnprintf(buf + len, size - len,
+                                "%20s :\n", "2GHz modal Header");
                len = ar9003_dump_modal_eeprom(buf, len, size,
                                                &eep->modalHeader2G);
-               len += snprintf(buf + len, size - len,
-                               "%20s :\n", "5GHz modal Header");
+               len += scnprintf(buf + len, size - len,
+                                "%20s :\n", "5GHz modal Header");
                len = ar9003_dump_modal_eeprom(buf, len, size,
                                                &eep->modalHeader5G);
                goto out;
@@ -3479,8 +3482,8 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
        PR_EEP("Rx Gain", pBase->txrxgain & 0xf);
        PR_EEP("SW Reg", le32_to_cpu(pBase->swreg));
 
-       len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
-                       ah->eeprom.ar9300_eep.macAddr);
+       len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+                        ah->eeprom.ar9300_eep.macAddr);
 out:
        if (len > size)
                len = size;
@@ -3656,9 +3659,23 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
                if (AR_SREV_9565(ah)) {
                        if (common->bt_ant_diversity) {
                                regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S);
+
+                               REG_SET_BIT(ah, AR_PHY_RESTART,
+                                           AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+
+                               /* Force WLAN LNA diversity ON */
+                               REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
+                                           AR_BTCOEX_WL_LNADIV_FORCE_ON);
                        } else {
                                regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S);
                                regval &= ~(1 << AR_PHY_ANT_SW_RX_PROT_S);
+
+                               REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+                                           (1 << AR_PHY_ANT_SW_RX_PROT_S));
+
+                               /* Force WLAN LNA diversity OFF */
+                               REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
+                                           AR_BTCOEX_WL_LNADIV_FORCE_ON);
                        }
                }
 
@@ -3669,7 +3686,8 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
                regval &= (~AR_FAST_DIV_ENABLE);
                regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
 
-               if (AR_SREV_9485(ah) && common->bt_ant_diversity)
+               if ((AR_SREV_9485(ah) || AR_SREV_9565(ah))
+                   && common->bt_ant_diversity)
                        regval |= AR_FAST_DIV_ENABLE;
 
                REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
index 75d4fb41962f312567cd161c900b9d3578007f0d..0e5daa58a4fc14371d360bdc5aa22937902bc824 100644 (file)
@@ -52,6 +52,8 @@
 #define AR9300_PAPRD_SCALE_2           0x70000000
 #define AR9300_PAPRD_SCALE_2_S         28
 
+#define AR9300_EEP_ANTDIV_CONTROL_DEFAULT_VALUE 0xc9
+
 /* Delta from which to start power to pdadc table */
 /* This offset is used in both open loop and closed loop power control
  * schemes. In open loop power control, it is not really needed, but for
index 608bb4824e2a2f800c057ff1e717f9219e8414db..b07f164d65cf582a63c2ddfbfccaced509a6ceca 100644 (file)
@@ -364,6 +364,8 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
 
                INIT_INI_ARRAY(&ah->iniModesFastClock,
                                ar9565_1p0_modes_fast_clock);
+               INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
+                              ar9565_1p0_baseband_core_txfir_coeff_japan_2484);
        } else {
                /* mac */
                INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
@@ -628,6 +630,9 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
        else if (AR_SREV_9462_20(ah))
                INIT_INI_ARRAY(&ah->iniModesRxGain,
                                ar9462_common_rx_gain_table_2p0);
+       else if (AR_SREV_9565(ah))
+               INIT_INI_ARRAY(&ah->iniModesRxGain,
+                              ar9565_1p0_Common_rx_gain_table);
        else
                INIT_INI_ARRAY(&ah->iniModesRxGain,
                                ar9300Common_rx_gain_table_2p2);
index 8dd069259e7b7ea7d9dd40212571cab73424f131..7b94a6c7db3d50dd4feb53dd74e1973bd68f2e6d 100644 (file)
@@ -753,9 +753,9 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
                    1 << AR_PHY_TIMING_CONTROL4_DO_GAIN_DC_IQ_CAL_SHIFT);
 
        if (caldata) {
-               caldata->done_txiqcal_once = false;
-               caldata->done_txclcal_once = false;
-               caldata->rtt_done = false;
+               clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+               clear_bit(TXCLCAL_DONE, &caldata->cal_flags);
+               clear_bit(RTT_DONE, &caldata->cal_flags);
        }
 
        if (!ath9k_hw_init_cal(ah, chan))
index e897648d32335dd1800df489916afa80fca81a2b..11f53589a3f34879b6ab3e8d9062e294fb6f7260 100644 (file)
@@ -551,8 +551,7 @@ static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
        if (IS_CHAN_HT40(chan)) {
                phymode |= AR_PHY_GC_DYN2040_EN;
                /* Configure control (primary) channel at +-10MHz */
-               if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
-                   (chan->chanmode == CHANNEL_G_HT40PLUS))
+               if (IS_CHAN_HT40PLUS(chan))
                        phymode |= AR_PHY_GC_DYN2040_PRI_CH;
 
        }
@@ -565,7 +564,7 @@ static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
        REG_WRITE(ah, AR_PHY_GEN_CTRL, phymode);
 
        /* Configure MAC for 20/40 operation */
-       ath9k_hw_set11nmac2040(ah);
+       ath9k_hw_set11nmac2040(ah, chan);
 
        /* global transmit timeout (25 TUs default)*/
        REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
@@ -627,11 +626,10 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
         * MAC addr only will fail.
         */
        val = REG_READ(ah, AR_PCU_MISC_MODE2) & (~AR_ADHOC_MCAST_KEYID_ENABLE);
-       REG_WRITE(ah, AR_PCU_MISC_MODE2,
-                 val | AR_AGG_WEP_ENABLE_FIX | AR_AGG_WEP_ENABLE);
-
-       REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
-                   AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
+       val |= AR_AGG_WEP_ENABLE_FIX |
+              AR_AGG_WEP_ENABLE |
+              AR_PCU_MISC_MODE2_CFP_IGNORE;
+       REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
 
        if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
                REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
@@ -683,41 +681,22 @@ static int ar9550_hw_get_modes_txgain_index(struct ath_hw *ah,
 {
        int ret;
 
-       switch (chan->chanmode) {
-       case CHANNEL_A:
-       case CHANNEL_A_HT20:
-               if (chan->channel <= 5350)
-                       ret = 1;
-               else if ((chan->channel > 5350) && (chan->channel <= 5600))
-                       ret = 3;
-               else
-                       ret = 5;
-               break;
-
-       case CHANNEL_A_HT40PLUS:
-       case CHANNEL_A_HT40MINUS:
-               if (chan->channel <= 5350)
-                       ret = 2;
-               else if ((chan->channel > 5350) && (chan->channel <= 5600))
-                       ret = 4;
+       if (IS_CHAN_2GHZ(chan)) {
+               if (IS_CHAN_HT40(chan))
+                       return 7;
                else
-                       ret = 6;
-               break;
-
-       case CHANNEL_G:
-       case CHANNEL_G_HT20:
-       case CHANNEL_B:
-               ret = 8;
-               break;
+                       return 8;
+       }
 
-       case CHANNEL_G_HT40PLUS:
-       case CHANNEL_G_HT40MINUS:
-               ret = 7;
-               break;
+       if (chan->channel <= 5350)
+               ret = 1;
+       else if ((chan->channel > 5350) && (chan->channel <= 5600))
+               ret = 3;
+       else
+               ret = 5;
 
-       default:
-               ret = -EINVAL;
-       }
+       if (IS_CHAN_HT40(chan))
+               ret++;
 
        return ret;
 }
@@ -728,28 +707,10 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
        unsigned int regWrites = 0, i;
        u32 modesIndex;
 
-       switch (chan->chanmode) {
-       case CHANNEL_A:
-       case CHANNEL_A_HT20:
-               modesIndex = 1;
-               break;
-       case CHANNEL_A_HT40PLUS:
-       case CHANNEL_A_HT40MINUS:
-               modesIndex = 2;
-               break;
-       case CHANNEL_G:
-       case CHANNEL_G_HT20:
-       case CHANNEL_B:
-               modesIndex = 4;
-               break;
-       case CHANNEL_G_HT40PLUS:
-       case CHANNEL_G_HT40MINUS:
-               modesIndex = 3;
-               break;
-
-       default:
-               return -EINVAL;
-       }
+       if (IS_CHAN_5GHZ(chan))
+               modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+       else
+               modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
 
        /*
         * SOC, MAC, BB, RADIO initvals.
@@ -847,8 +808,10 @@ static void ar9003_hw_set_rfmode(struct ath_hw *ah,
        if (chan == NULL)
                return;
 
-       rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
-               ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
+       if (IS_CHAN_2GHZ(chan))
+               rfMode |= AR_PHY_MODE_DYNAMIC;
+       else
+               rfMode |= AR_PHY_MODE_OFDM;
 
        if (IS_CHAN_A_FAST_CLOCK(ah, chan))
                rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
@@ -1274,12 +1237,11 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
        aniState = &ah->ani;
        iniDef = &aniState->iniDef;
 
-       ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+       ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz\n",
                ah->hw_version.macVersion,
                ah->hw_version.macRev,
                ah->opmode,
-               chan->channel,
-               chan->channelFlags);
+               chan->channel);
 
        val = REG_READ(ah, AR_PHY_SFCORR);
        iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
@@ -1375,15 +1337,19 @@ static void ar9003_hw_antdiv_comb_conf_get(struct ath_hw *ah,
                                  AR_PHY_ANT_FAST_DIV_BIAS_S;
 
        if (AR_SREV_9330_11(ah)) {
+               antconf->lna1_lna2_switch_delta = -1;
                antconf->lna1_lna2_delta = -9;
                antconf->div_group = 1;
        } else if (AR_SREV_9485(ah)) {
+               antconf->lna1_lna2_switch_delta = -1;
                antconf->lna1_lna2_delta = -9;
                antconf->div_group = 2;
        } else if (AR_SREV_9565(ah)) {
-               antconf->lna1_lna2_delta = -3;
+               antconf->lna1_lna2_switch_delta = 3;
+               antconf->lna1_lna2_delta = -9;
                antconf->div_group = 3;
        } else {
+               antconf->lna1_lna2_switch_delta = -1;
                antconf->lna1_lna2_delta = -3;
                antconf->div_group = 0;
        }
@@ -1488,18 +1454,25 @@ static void ar9003_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
                }
        } else if (AR_SREV_9565(ah)) {
                if (enable) {
+                       REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+                                   AR_ANT_DIV_ENABLE);
                        REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
                                    (1 << AR_PHY_ANT_SW_RX_PROT_S));
-                       if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
-                               REG_SET_BIT(ah, AR_PHY_RESTART,
-                                           AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+                       REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
+                                   AR_FAST_DIV_ENABLE);
+                       REG_SET_BIT(ah, AR_PHY_RESTART,
+                                   AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
                        REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
                                    AR_BTCOEX_WL_LNADIV_FORCE_ON);
                } else {
-                       REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
+                       REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+                                   AR_ANT_DIV_ENABLE);
                        REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
                                    (1 << AR_PHY_ANT_SW_RX_PROT_S));
-                       REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
+                       REG_CLR_BIT(ah, AR_PHY_CCK_DETECT,
+                                   AR_FAST_DIV_ENABLE);
+                       REG_CLR_BIT(ah, AR_PHY_RESTART,
+                                   AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
                        REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
                                    AR_BTCOEX_WL_LNADIV_FORCE_ON);
 
@@ -1526,28 +1499,10 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
        unsigned int regWrites = 0;
        u32 modesIndex;
 
-       switch (chan->chanmode) {
-       case CHANNEL_A:
-       case CHANNEL_A_HT20:
-               modesIndex = 1;
-               break;
-       case CHANNEL_A_HT40PLUS:
-       case CHANNEL_A_HT40MINUS:
-               modesIndex = 2;
-               break;
-       case CHANNEL_G:
-       case CHANNEL_G_HT20:
-       case CHANNEL_B:
-               modesIndex = 4;
-               break;
-       case CHANNEL_G_HT40PLUS:
-       case CHANNEL_G_HT40MINUS:
-               modesIndex = 3;
-               break;
-
-       default:
-               return -EINVAL;
-       }
+       if (IS_CHAN_5GHZ(chan))
+               modesIndex = IS_CHAN_HT40(chan) ? 2 : 1;
+       else
+               modesIndex = IS_CHAN_HT40(chan) ? 3 : 4;
 
        if (modesIndex == ah->modes_index) {
                *ini_reloaded = false;
@@ -1662,6 +1617,98 @@ static void ar9003_hw_spectral_scan_wait(struct ath_hw *ah)
        }
 }
 
+static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum)
+{
+       REG_SET_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
+       REG_SET_BIT(ah, 0x9864, 0x7f000);
+       REG_SET_BIT(ah, 0x9924, 0x7f00fe);
+       REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+       REG_WRITE(ah, AR_CR, AR_CR_RXD);
+       REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
+       REG_WRITE(ah, AR_D_GBL_IFS_SIFS, 20); /* 50 OK */
+       REG_WRITE(ah, AR_D_GBL_IFS_EIFS, 20);
+       REG_WRITE(ah, AR_TIME_OUT, 0x00000400);
+       REG_WRITE(ah, AR_DRETRY_LIMIT(qnum), 0xffffffff);
+       REG_SET_BIT(ah, AR_QMISC(qnum), AR_Q_MISC_DCU_EARLY_TERM_REQ);
+}
+
+static void ar9003_hw_tx99_stop(struct ath_hw *ah)
+{
+       REG_CLR_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
+       REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+}
+
+static void ar9003_hw_tx99_set_txpower(struct ath_hw *ah, u8 txpower)
+{
+       static s16 p_pwr_array[ar9300RateSize] = { 0 };
+       unsigned int i;
+
+       if (txpower <= MAX_RATE_POWER) {
+               for (i = 0; i < ar9300RateSize; i++)
+                       p_pwr_array[i] = txpower;
+       } else {
+               for (i = 0; i < ar9300RateSize; i++)
+                       p_pwr_array[i] = MAX_RATE_POWER;
+       }
+
+       REG_WRITE(ah, 0xa458, 0);
+
+       REG_WRITE(ah, 0xa3c0,
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 24) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 16) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24],  8) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24],  0));
+       REG_WRITE(ah, 0xa3c4,
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_54],  24) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_48],  16) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_36],   8) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 0));
+       REG_WRITE(ah, 0xa3c8,
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 24) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 16) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L],  0));
+       REG_WRITE(ah, 0xa3cc,
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_11S],   24) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_11L],   16) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_5S],     8) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L],  0));
+       REG_WRITE(ah, 0xa3d0,
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_5],  24) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_4],  16) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_1_3_9_11_17_19], 8)|
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_0_8_16], 0));
+       REG_WRITE(ah, 0xa3d4,
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_13], 24) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_12], 16) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_7],   8) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_6],   0));
+       REG_WRITE(ah, 0xa3e4,
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_21], 24) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_20], 16) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_15],  8) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_14],  0));
+       REG_WRITE(ah, 0xa3e8,
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_23], 24) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_22], 16) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_23],  8) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_22],  0));
+       REG_WRITE(ah, 0xa3d8,
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_5], 24) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_4], 16) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_0_8_16], 0));
+       REG_WRITE(ah, 0xa3dc,
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_13], 24) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_12], 16) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_7],   8) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_6],   0));
+       REG_WRITE(ah, 0xa3ec,
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_21], 24) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_20], 16) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_15],  8) |
+                 ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_14],  0));
+}
+
 void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
 {
        struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -1701,6 +1748,9 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
        ops->set_bt_ant_diversity = ar9003_hw_set_bt_ant_diversity;
 #endif
+       ops->tx99_start = ar9003_hw_tx99_start;
+       ops->tx99_stop = ar9003_hw_tx99_stop;
+       ops->tx99_set_txpower = ar9003_hw_tx99_set_txpower;
 
        ar9003_hw_set_nf_limits(ah);
        ar9003_hw_set_radar_conf(ah);
index 6fd752321e3616c171c81be16a754b31486de667..fca624322dc8886f991632d7a2d5e78b0bdfa114 100644 (file)
 
 #define AR_PHY_CCA_NOM_VAL_9462_2GHZ          -127
 #define AR_PHY_CCA_MIN_GOOD_VAL_9462_2GHZ     -127
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_2GHZ     -60
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ -95
 #define AR_PHY_CCA_NOM_VAL_9462_5GHZ          -127
 #define AR_PHY_CCA_MIN_GOOD_VAL_9462_5GHZ     -127
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_5GHZ     -60
+#define AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ -100
 
 #define AR_PHY_CCA_NOM_VAL_9330_2GHZ          -118
 
index 74de3539c2c8337680bb273933ea7158b0e946ec..934418872e8e156a641145a512280f3bee6f86bd 100644 (file)
@@ -118,6 +118,27 @@ void ar9003_hw_rtt_load_hist(struct ath_hw *ah)
        }
 }
 
+static void ar9003_hw_patch_rtt(struct ath_hw *ah, int index, int chain)
+{
+       int agc, caldac;
+
+       if (!test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags))
+               return;
+
+       if ((index != 5) || (chain >= 2))
+               return;
+
+       agc = REG_READ_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+                            AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE);
+       if (!agc)
+               return;
+
+       caldac = ah->caldata->caldac[chain];
+       ah->caldata->rtt_table[chain][index] &= 0xFFFF05FF;
+       caldac = (caldac & 0x20) | ((caldac & 0x1F) << 7);
+       ah->caldata->rtt_table[chain][index] |= (caldac << 4);
+}
+
 static int ar9003_hw_rtt_fill_hist_entry(struct ath_hw *ah, u8 chain, u32 index)
 {
        u32 val;
@@ -155,13 +176,16 @@ void ar9003_hw_rtt_fill_hist(struct ath_hw *ah)
                for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) {
                        ah->caldata->rtt_table[chain][i] =
                                ar9003_hw_rtt_fill_hist_entry(ah, chain, i);
+
+                       ar9003_hw_patch_rtt(ah, i, chain);
+
                        ath_dbg(ath9k_hw_common(ah), CALIBRATE,
                                "RTT value at idx %d, chain %d is: 0x%x\n",
                                i, chain, ah->caldata->rtt_table[chain][i]);
                }
        }
 
-       ah->caldata->rtt_done = true;
+       set_bit(RTT_DONE, &ah->caldata->cal_flags);
 }
 
 void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
@@ -176,7 +200,7 @@ void ar9003_hw_rtt_clear_hist(struct ath_hw *ah)
        }
 
        if (ah->caldata)
-               ah->caldata->rtt_done = false;
+               clear_bit(RTT_DONE, &ah->caldata->cal_flags);
 }
 
 bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
@@ -186,11 +210,37 @@ bool ar9003_hw_rtt_restore(struct ath_hw *ah, struct ath9k_channel *chan)
        if (!ah->caldata)
                return false;
 
-       if (!ah->caldata->rtt_done)
+       if (test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags)) {
+               if (IS_CHAN_2GHZ(chan)){
+                       REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+                                     AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
+                                     ah->caldata->caldac[0]);
+                       REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+                                     AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR,
+                                     ah->caldata->caldac[1]);
+               } else {
+                       REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+                                     AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
+                                     ah->caldata->caldac[0]);
+                       REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+                                     AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR,
+                                     ah->caldata->caldac[1]);
+               }
+               REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1),
+                             AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
+               REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0),
+                             AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
+       }
+
+       if (!test_bit(RTT_DONE, &ah->caldata->cal_flags))
                return false;
 
        ar9003_hw_rtt_enable(ah);
-       ar9003_hw_rtt_set_mask(ah, 0x10);
+
+       if (test_bit(SW_PKDET_DONE, &ah->caldata->cal_flags))
+               ar9003_hw_rtt_set_mask(ah, 0x30);
+       else
+               ar9003_hw_rtt_set_mask(ah, 0x10);
 
        if (!ath9k_hw_rfbus_req(ah)) {
                ath_err(ath9k_hw_common(ah), "Could not stop baseband\n");
index 88ff1d7b53ab41c642333d553646e90065c3045b..6f899c6926474ba2f60cd642399e10d61814ec3d 100644 (file)
 
 /* AR9485 1.1 */
 
-#define ar9485_1_1_mac_postamble ar9300_2p2_mac_postamble
+static const u32 ar9485_1_1_mac_postamble[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+       {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+       {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+       {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+       {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+       {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+       {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+       {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
 
 static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
        /* Addr      allmodes  */
@@ -34,6 +44,7 @@ static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
        {0x00009e00, 0x037216a0},
        {0x00009e04, 0x00182020},
        {0x00009e18, 0x00000000},
+       {0x00009e20, 0x000003a8},
        {0x00009e2c, 0x00004121},
        {0x00009e44, 0x02282324},
        {0x0000a000, 0x00060005},
@@ -174,7 +185,7 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
        {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
        {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
        {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
-       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050da, 0x000050da},
        {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
        {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
@@ -200,14 +211,14 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
        {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
        {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
        {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
-       {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
-       {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
-       {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-       {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-       {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-       {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-       {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
-       {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62001eee, 0x62001eee},
+       {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001ff6, 0x66001ff6},
+       {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+       {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+       {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+       {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+       {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+       {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
        {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -263,6 +274,11 @@ static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
 static const u32 ar9485Modes_green_ob_db_tx_gain_1_1[][5] = {
        /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
        {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
+       {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+       {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+       {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+       {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+       {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
        {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
        {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
        {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
@@ -297,6 +313,22 @@ static const u32 ar9485Modes_green_ob_db_tx_gain_1_1[][5] = {
        {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
        {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
        {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+       {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+       {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+       {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+       {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+       {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+       {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+       {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+       {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+       {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
        {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
        {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
        {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
@@ -341,6 +373,100 @@ static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
        {0x0000a2e0, 0x00000000, 0x00000000, 0xffc63a84, 0xffc63a84},
        {0x0000a2e4, 0x00000000, 0x00000000, 0xfe0fc000, 0xfe0fc000},
        {0x0000a2e8, 0x00000000, 0x00000000, 0xfff00000, 0xfff00000},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050da, 0x000050da},
+       {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
+       {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
+       {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
+       {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
+       {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
+       {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
+       {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
+       {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
+       {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
+       {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
+       {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
+       {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
+       {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
+       {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
+       {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
+       {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
+       {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
+       {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
+       {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
+       {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
+       {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
+       {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
+       {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
+       {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
+       {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62001eee, 0x62001eee},
+       {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001ff6, 0x66001ff6},
+       {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+       {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+       {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+       {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+       {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+       {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001ff6, 0x66001ff6},
+       {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a58c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a590, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a594, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a598, 0x00000000, 0x00000000, 0x01404501, 0x01404501},
+       {0x0000a59c, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+       {0x0000a5a0, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+       {0x0000a5a4, 0x00000000, 0x00000000, 0x02808803, 0x02808803},
+       {0x0000a5a8, 0x00000000, 0x00000000, 0x04c14b04, 0x04c14b04},
+       {0x0000a5ac, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+       {0x0000a5b0, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+       {0x0000a5b4, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+       {0x0000a5b8, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+       {0x0000a5bc, 0x00000000, 0x00000000, 0x04c15305, 0x04c15305},
+       {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
+       {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
+};
+
+static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
+       /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
+       {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
+       {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+       {0x0000a2dc, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+       {0x0000a2e0, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+       {0x0000a2e4, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
+       {0x0000a2e8, 0x00000000, 0x00000000, 0xfe2d3552, 0xfe2d3552},
        {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
        {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
@@ -427,7 +553,7 @@ static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
        {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
 };
 
-static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
+static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = {
        /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
        {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
        {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
@@ -521,12 +647,15 @@ static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
        {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
 };
 
-#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
-
 static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
        /* Addr      5G_HT20     5G_HT40     2G_HT40     2G_HT20   */
        {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
-       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
+       {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0x7999a83a, 0x7999a83a},
+       {0x0000a2dc, 0x00000000, 0x00000000, 0xffad452a, 0xffad452a},
+       {0x0000a2e0, 0x00000000, 0x00000000, 0xffc98634, 0xffc98634},
+       {0x0000a2e4, 0x00000000, 0x00000000, 0xfff60780, 0xfff60780},
+       {0x0000a2e8, 0x00000000, 0x00000000, 0xfffff800, 0xfffff800},
+       {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
        {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
        {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
        {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
@@ -543,23 +672,39 @@ static const u32 ar9485Modes_green_spur_ob_db_tx_gain_1_1[][5] = {
        {0x0000a530, 0x48023ec6, 0x48023ec6, 0x310006e0, 0x310006e0},
        {0x0000a534, 0x4d023f01, 0x4d023f01, 0x330006e0, 0x330006e0},
        {0x0000a538, 0x53023f4b, 0x53023f4b, 0x3e0008e3, 0x3e0008e3},
-       {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x410008e5, 0x410008e5},
-       {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x430008e6, 0x430008e6},
-       {0x0000a544, 0x6502feca, 0x6502feca, 0x4a0008ec, 0x4a0008ec},
-       {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4e0008f1, 0x4e0008f1},
-       {0x0000a54c, 0x7203feca, 0x7203feca, 0x520008f3, 0x520008f3},
-       {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x54000eed, 0x54000eed},
-       {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x58000ef1, 0x58000ef1},
-       {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5c000ef3, 0x5c000ef3},
-       {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x60000ef5, 0x60000ef5},
-       {0x0000a560, 0x900fff0b, 0x900fff0b, 0x62000ef6, 0x62000ef6},
-       {0x0000a564, 0x960fffcb, 0x960fffcb, 0x62000ef6, 0x62000ef6},
-       {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
-       {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
-       {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
-       {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
-       {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
-       {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x62000ef6, 0x62000ef6},
+       {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x430008e6, 0x430008e6},
+       {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4a0008ec, 0x4a0008ec},
+       {0x0000a544, 0x6502feca, 0x6502feca, 0x4e0008f1, 0x4e0008f1},
+       {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x520008f3, 0x520008f3},
+       {0x0000a54c, 0x7203feca, 0x7203feca, 0x54000eed, 0x54000eed},
+       {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x58000ef1, 0x58000ef1},
+       {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5c000ef3, 0x5c000ef3},
+       {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x62000ef6, 0x62000ef6},
+       {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001ff0, 0x66001ff0},
+       {0x0000a560, 0x900fff0b, 0x900fff0b, 0x68001ff6, 0x68001ff6},
+       {0x0000a564, 0x960fffcb, 0x960fffcb, 0x68001ff6, 0x68001ff6},
+       {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+       {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+       {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+       {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+       {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+       {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x68001ff6, 0x68001ff6},
+       {0x0000a580, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a584, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a588, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+       {0x0000a58c, 0x00000000, 0x00000000, 0x01804000, 0x01804000},
+       {0x0000a590, 0x00000000, 0x00000000, 0x02808a02, 0x02808a02},
+       {0x0000a594, 0x00000000, 0x00000000, 0x0340ca02, 0x0340ca02},
+       {0x0000a598, 0x00000000, 0x00000000, 0x0340cd03, 0x0340cd03},
+       {0x0000a59c, 0x00000000, 0x00000000, 0x0340cd03, 0x0340cd03},
+       {0x0000a5a0, 0x00000000, 0x00000000, 0x06415304, 0x06415304},
+       {0x0000a5a4, 0x00000000, 0x00000000, 0x04c11905, 0x04c11905},
+       {0x0000a5a8, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+       {0x0000a5ac, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+       {0x0000a5b0, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+       {0x0000a5b4, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+       {0x0000a5b8, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
+       {0x0000a5bc, 0x00000000, 0x00000000, 0x06415905, 0x06415905},
        {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
        {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
        {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
@@ -823,6 +968,7 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = {
        {0x00009e00, 0x03721b20},
        {0x00009e04, 0x00082020},
        {0x00009e18, 0x0300501e},
+       {0x00009e20, 0x000003ba},
        {0x00009e2c, 0x00002e21},
        {0x00009e44, 0x02182324},
        {0x0000a000, 0x00060005},
@@ -1001,7 +1147,6 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
        {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
        {0x00009e14, 0x31395d53, 0x31396053, 0x312e6053, 0x312e5d53},
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
-       {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
        {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
        {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
        {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -1020,7 +1165,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
        {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
        {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
-       {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
+       {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
        {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
        {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
        {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -1206,6 +1351,11 @@ static const u32 ar9485_1_1_mac_core[][2] = {
        {0x000083d0, 0x000301ff},
 };
 
-#define ar9485_1_1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
+static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
+       /* Addr      allmodes  */
+       {0x0000a398, 0x00000000},
+       {0x0000a39c, 0x6f7f0301},
+       {0x0000a3a0, 0xca9228ee},
+};
 
 #endif /* INITVALS_9485_H */
index e85a8b076c223e5bd3fbc4549789519c296b7761..a8c757b6124fdac483e21149733c18944f160fd7 100644 (file)
@@ -272,9 +272,9 @@ static const u32 ar9565_1p0_baseband_core[][2] = {
        {0x0000a398, 0x001f0e0f},
        {0x0000a39c, 0x0075393f},
        {0x0000a3a0, 0xb79f6427},
-       {0x0000a3a4, 0x00000000},
-       {0x0000a3a8, 0xaaaaaaaa},
-       {0x0000a3ac, 0x3c466478},
+       {0x0000a3a4, 0x00000011},
+       {0x0000a3a8, 0xaaaaaa6e},
+       {0x0000a3ac, 0x3c466455},
        {0x0000a3c0, 0x20202020},
        {0x0000a3c4, 0x22222220},
        {0x0000a3c8, 0x20200020},
@@ -295,11 +295,11 @@ static const u32 ar9565_1p0_baseband_core[][2] = {
        {0x0000a404, 0x00000000},
        {0x0000a408, 0x0e79e5c6},
        {0x0000a40c, 0x00820820},
-       {0x0000a414, 0x1ce739ce},
+       {0x0000a414, 0x1ce739c5},
        {0x0000a418, 0x2d001dce},
-       {0x0000a41c, 0x1ce739ce},
+       {0x0000a41c, 0x1ce739c5},
        {0x0000a420, 0x000001ce},
-       {0x0000a424, 0x1ce739ce},
+       {0x0000a424, 0x1ce739c5},
        {0x0000a428, 0x000001ce},
        {0x0000a42c, 0x1ce739ce},
        {0x0000a430, 0x1ce739ce},
@@ -351,9 +351,9 @@ static const u32 ar9565_1p0_baseband_postamble[][5] = {
        {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
        {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
        {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
-       {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+       {0x00009e20, 0x000003b5, 0x000003b5, 0x000003a4, 0x000003a4},
        {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
-       {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+       {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946220, 0xcf946220},
        {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
        {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
        {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -452,6 +452,7 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
        /* Addr      allmodes  */
        {0x00004050, 0x00300300},
        {0x0000406c, 0x00100000},
+       {0x00009e20, 0x000003b6},
        {0x0000a000, 0x00010000},
        {0x0000a004, 0x00030002},
        {0x0000a008, 0x00050004},
@@ -1230,4 +1231,11 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
        {0x00016054, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
 };
 
+static const u32 ar9565_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
+       /* Addr      allmodes  */
+       {0x0000a398, 0x00000000},
+       {0x0000a39c, 0x6f7f0301},
+       {0x0000a3a0, 0xca9228ee},
+};
+
 #endif /* INITVALS_9565_1P0_H */
index 2ee35f677c0e8843cdc66534fd40cde15755275b..e7a38d844a6a4e7a2e9ba738e56606d946fea47a 100644 (file)
@@ -64,7 +64,6 @@ struct ath_node;
 
 struct ath_config {
        u16 txpowlimit;
-       u8 cabqReadytime;
 };
 
 /*************************/
@@ -207,6 +206,14 @@ struct ath_frame_info {
        u8 baw_tracked : 1;
 };
 
+struct ath_rxbuf {
+       struct list_head list;
+       struct sk_buff *bf_mpdu;
+       void *bf_desc;
+       dma_addr_t bf_daddr;
+       dma_addr_t bf_buf_addr;
+};
+
 struct ath_buf_state {
        u8 bf_type;
        u8 bfs_paprd;
@@ -307,7 +314,7 @@ struct ath_rx {
        struct ath_descdma rxdma;
        struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
 
-       struct ath_buf *buf_hold;
+       struct ath_rxbuf *buf_hold;
        struct sk_buff *frag;
 
        u32 ampdu_ref;
@@ -459,8 +466,8 @@ void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
 
 #define ATH_DUMP_BTCOEX(_s, _val)                              \
        do {                                                    \
-               len += snprintf(buf + len, size - len,          \
-                               "%20s : %10d\n", _s, (_val));   \
+               len += scnprintf(buf + len, size - len,         \
+                                "%20s : %10d\n", _s, (_val));  \
        } while (0)
 
 enum bt_op_flags {
@@ -581,7 +588,6 @@ static inline void ath_fill_led_pin(struct ath_softc *sc)
 #define ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI 50
 #define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI 50
 
-#define ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA -1
 #define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4
 #define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2
 #define ATH_ANT_DIV_COMB_LNA1_DELTA_LOW 2
@@ -626,12 +632,15 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
 /* Main driver core */
 /********************/
 
-#define ATH9K_PCI_CUS198     0x0001
-#define ATH9K_PCI_CUS230     0x0002
-#define ATH9K_PCI_CUS217     0x0004
-#define ATH9K_PCI_WOW        0x0008
-#define ATH9K_PCI_BT_ANT_DIV 0x0010
-#define ATH9K_PCI_D3_L1_WAR  0x0020
+#define ATH9K_PCI_CUS198      0x0001
+#define ATH9K_PCI_CUS230      0x0002
+#define ATH9K_PCI_CUS217      0x0004
+#define ATH9K_PCI_CUS252      0x0008
+#define ATH9K_PCI_WOW         0x0010
+#define ATH9K_PCI_BT_ANT_DIV  0x0020
+#define ATH9K_PCI_D3_L1_WAR   0x0040
+#define ATH9K_PCI_AR9565_1ANT 0x0080
+#define ATH9K_PCI_AR9565_2ANT 0x0100
 
 /*
  * Default cache line size, in bytes.
@@ -769,6 +778,11 @@ struct ath_softc {
        enum spectral_mode spectral_mode;
        struct ath_spec_scan spec_config;
 
+       struct ieee80211_vif *tx99_vif;
+       struct sk_buff *tx99_skb;
+       bool tx99_state;
+       s16 tx99_power;
+
 #ifdef CONFIG_PM_SLEEP
        atomic_t wow_got_bmiss_intr;
        atomic_t wow_sleep_proc_intr; /* in the middle of WoW sleep ? */
@@ -877,6 +891,7 @@ static inline u8 spectral_bitmap_weight(u8 *bins)
  */
 enum ath_fft_sample_type {
        ATH_FFT_SAMPLE_HT20 = 1,
+       ATH_FFT_SAMPLE_HT20_40,
 };
 
 struct fft_sample_tlv {
@@ -903,6 +918,39 @@ struct fft_sample_ht20 {
        u8 data[SPECTRAL_HT20_NUM_BINS];
 } __packed;
 
+struct fft_sample_ht20_40 {
+       struct fft_sample_tlv tlv;
+
+       u8 channel_type;
+       __be16 freq;
+
+       s8 lower_rssi;
+       s8 upper_rssi;
+
+       __be64 tsf;
+
+       s8 lower_noise;
+       s8 upper_noise;
+
+       __be16 lower_max_magnitude;
+       __be16 upper_max_magnitude;
+
+       u8 lower_max_index;
+       u8 upper_max_index;
+
+       u8 lower_bitmap_weight;
+       u8 upper_bitmap_weight;
+
+       u8 max_exp;
+
+       u8 data[SPECTRAL_HT20_40_NUM_BINS];
+} __packed;
+
+int ath9k_tx99_init(struct ath_softc *sc);
+void ath9k_tx99_deinit(struct ath_softc *sc);
+int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
+                   struct ath_tx_control *txctl);
+
 void ath9k_tasklet(unsigned long data);
 int ath_cabq_update(struct ath_softc *);
 
@@ -924,7 +972,6 @@ void ath9k_deinit_device(struct ath_softc *sc);
 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
 void ath9k_reload_chainmask_settings(struct ath_softc *sc);
 
-bool ath9k_uses_beacons(int type);
 void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw);
 int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
                               enum spectral_mode spectral_mode);
@@ -952,7 +999,7 @@ void ath9k_ps_restore(struct ath_softc *sc);
 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
 
 void ath_start_rfkill_poll(struct ath_softc *sc);
-extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
 void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
                               struct ieee80211_vif *vif,
                               struct ath9k_vif_iter_data *iter_data);
index b5c16b3a37b953133038d0450616d3c5b2be16e3..17be35392bb4f5134996a6e14b6bdc2547d40de9 100644 (file)
@@ -334,6 +334,8 @@ void ath9k_beacon_tasklet(unsigned long data)
        if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) {
                sc->beacon.bmisscnt++;
 
+               ath9k_hw_check_nav(ah);
+
                if (!ath9k_hw_check_alive(ah))
                        ieee80211_queue_work(sc->hw, &sc->hw_check_work);
 
index 5e8219a91e252b3d4a9ac2f5fda599caf4ae9ae8..278365b8a8955dda362292b99949e63155be354c 100644 (file)
@@ -63,13 +63,13 @@ static s16 ath9k_hw_get_default_nf(struct ath_hw *ah,
        return ath9k_hw_get_nf_limits(ah, chan)->nominal;
 }
 
-s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
+s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan,
+                          s16 nf)
 {
        s8 noise = ATH_DEFAULT_NOISE_FLOOR;
 
-       if (chan && chan->noisefloor) {
-               s8 delta = chan->noisefloor -
-                          ATH9K_NF_CAL_NOISE_THRESH -
+       if (nf) {
+               s8 delta = nf - ATH9K_NF_CAL_NOISE_THRESH -
                           ath9k_hw_get_default_nf(ah, chan);
                if (delta > 0)
                        noise += delta;
@@ -119,7 +119,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
                        ath_dbg(common, CALIBRATE,
                                "NFmid[%d] (%d) > MAX (%d), %s\n",
                                i, h[i].privNF, limit->max,
-                               (cal->nfcal_interference ?
+                               (test_bit(NFCAL_INTF, &cal->cal_flags) ?
                                 "not corrected (due to interference)" :
                                 "correcting to MAX"));
 
@@ -130,7 +130,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
                         * we bypass this limit here in order to better deal
                         * with our environment.
                         */
-                       if (!cal->nfcal_interference)
+                       if (!test_bit(NFCAL_INTF, &cal->cal_flags))
                                h[i].privNF = limit->max;
                }
        }
@@ -141,7 +141,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
         * Re-enable the enforcement of the NF maximum again.
         */
        if (!high_nf_mid)
-               cal->nfcal_interference = false;
+               clear_bit(NFCAL_INTF, &cal->cal_flags);
 }
 
 static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
@@ -186,7 +186,6 @@ void ath9k_hw_reset_calibration(struct ath_hw *ah,
 bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
 {
        struct ath_common *common = ath9k_hw_common(ah);
-       struct ieee80211_conf *conf = &common->hw->conf;
        struct ath9k_cal_list *currCal = ah->cal_list_curr;
 
        if (!ah->caldata)
@@ -208,7 +207,7 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
                return true;
 
        ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
-               currCal->calData->calType, conf->chandef.chan->center_freq);
+               currCal->calData->calType, ah->curchan->chan->center_freq);
 
        ah->caldata->CalValid &= ~currCal->calData->calType;
        currCal->calState = CAL_WAITING;
@@ -220,7 +219,7 @@ EXPORT_SYMBOL(ath9k_hw_reset_calvalid);
 void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update)
 {
        if (ah->caldata)
-               ah->caldata->nfcal_pending = true;
+               set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
 
        REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
                    AR_PHY_AGC_CONTROL_ENABLE_NF);
@@ -242,7 +241,6 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
        int32_t val;
        u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
        struct ath_common *common = ath9k_hw_common(ah);
-       struct ieee80211_conf *conf = &common->hw->conf;
        s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
 
        if (ah->caldata)
@@ -252,7 +250,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
                if (chainmask & (1 << i)) {
                        s16 nfval;
 
-                       if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
+                       if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
                                continue;
 
                        if (h)
@@ -314,7 +312,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
        ENABLE_REGWRITE_BUFFER(ah);
        for (i = 0; i < NUM_NF_READINGS; i++) {
                if (chainmask & (1 << i)) {
-                       if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
+                       if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan))
                                continue;
 
                        val = REG_READ(ah, ah->nf_regs[i]);
@@ -391,10 +389,10 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
        }
 
        h = caldata->nfCalHist;
-       caldata->nfcal_pending = false;
+       clear_bit(NFCAL_PENDING, &caldata->cal_flags);
        ath9k_hw_update_nfcal_hist_buffer(ah, caldata, nfarray);
        chan->noisefloor = h[0].privNF;
-       ah->noise = ath9k_hw_getchan_noise(ah, chan);
+       ah->noise = ath9k_hw_getchan_noise(ah, chan, chan->noisefloor);
        return true;
 }
 EXPORT_SYMBOL(ath9k_hw_getnf);
@@ -408,7 +406,6 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
 
        ah->caldata->channel = chan->channel;
        ah->caldata->channelFlags = chan->channelFlags;
-       ah->caldata->chanmode = chan->chanmode;
        h = ah->caldata->nfCalHist;
        default_nf = ath9k_hw_get_default_nf(ah, chan);
        for (i = 0; i < NUM_NF_READINGS; i++) {
@@ -437,12 +434,12 @@ void ath9k_hw_bstuck_nfcal(struct ath_hw *ah)
         * the baseband update the internal NF value itself, similar to
         * what is being done after a full reset.
         */
-       if (!caldata->nfcal_pending)
+       if (!test_bit(NFCAL_PENDING, &caldata->cal_flags))
                ath9k_hw_start_nfcal(ah, true);
        else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF))
                ath9k_hw_getnf(ah, ah->curchan);
 
-       caldata->nfcal_interference = true;
+       set_bit(NFCAL_INTF, &caldata->cal_flags);
 }
 EXPORT_SYMBOL(ath9k_hw_bstuck_nfcal);
 
index 3d70b8c2bcdd0a3ddad872f0ceff9c241f59f0a4..b8ed95e9a335d90d86e8d0dd4031e5abc2e8dffc 100644 (file)
@@ -116,7 +116,8 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
 void ath9k_hw_bstuck_nfcal(struct ath_hw *ah);
 void ath9k_hw_reset_calibration(struct ath_hw *ah,
                                struct ath9k_cal_list *currCal);
-s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan);
+s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan,
+                          s16 nf);
 
 
 #endif /* CALIB_H */
index d3063c21e16c7efbd67d073c756de2198e5794e5..a7e5a05b2eff88e8d367cea6403058cc2813ebbc 100644 (file)
@@ -49,103 +49,64 @@ int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype);
 
-static u32 ath9k_get_extchanmode(struct cfg80211_chan_def *chandef)
-{
-       u32 chanmode = 0;
-
-       switch (chandef->chan->band) {
-       case IEEE80211_BAND_2GHZ:
-               switch (chandef->width) {
-               case NL80211_CHAN_WIDTH_20_NOHT:
-               case NL80211_CHAN_WIDTH_20:
-                       chanmode = CHANNEL_G_HT20;
-                       break;
-               case NL80211_CHAN_WIDTH_40:
-                       if (chandef->center_freq1 > chandef->chan->center_freq)
-                               chanmode = CHANNEL_G_HT40PLUS;
-                       else
-                               chanmode = CHANNEL_G_HT40MINUS;
-                       break;
-               default:
-                       break;
-               }
-               break;
-       case IEEE80211_BAND_5GHZ:
-               switch (chandef->width) {
-               case NL80211_CHAN_WIDTH_20_NOHT:
-               case NL80211_CHAN_WIDTH_20:
-                       chanmode = CHANNEL_A_HT20;
-                       break;
-               case NL80211_CHAN_WIDTH_40:
-                       if (chandef->center_freq1 > chandef->chan->center_freq)
-                               chanmode = CHANNEL_A_HT40PLUS;
-                       else
-                               chanmode = CHANNEL_A_HT40MINUS;
-                       break;
-               default:
-                       break;
-               }
-               break;
-       default:
-               break;
-       }
-
-       return chanmode;
-}
-
 /*
  * Update internal channel flags.
  */
-void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
-                              struct cfg80211_chan_def *chandef)
+static void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
+                                     struct cfg80211_chan_def *chandef)
 {
-       ichan->channel = chandef->chan->center_freq;
-       ichan->chan = chandef->chan;
-
-       if (chandef->chan->band == IEEE80211_BAND_2GHZ) {
-               ichan->chanmode = CHANNEL_G;
-               ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM;
-       } else {
-               ichan->chanmode = CHANNEL_A;
-               ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
-       }
+       struct ieee80211_channel *chan = chandef->chan;
+       u16 flags = 0;
+
+       ichan->channel = chan->center_freq;
+       ichan->chan = chan;
+
+       if (chan->band == IEEE80211_BAND_5GHZ)
+               flags |= CHANNEL_5GHZ;
 
        switch (chandef->width) {
        case NL80211_CHAN_WIDTH_5:
-               ichan->channelFlags |= CHANNEL_QUARTER;
+               flags |= CHANNEL_QUARTER;
                break;
        case NL80211_CHAN_WIDTH_10:
-               ichan->channelFlags |= CHANNEL_HALF;
+               flags |= CHANNEL_HALF;
                break;
        case NL80211_CHAN_WIDTH_20_NOHT:
                break;
        case NL80211_CHAN_WIDTH_20:
+               flags |= CHANNEL_HT;
+               break;
        case NL80211_CHAN_WIDTH_40:
-               ichan->chanmode = ath9k_get_extchanmode(chandef);
+               if (chandef->center_freq1 > chandef->chan->center_freq)
+                       flags |= CHANNEL_HT40PLUS | CHANNEL_HT;
+               else
+                       flags |= CHANNEL_HT40MINUS | CHANNEL_HT;
                break;
        default:
                WARN_ON(1);
        }
+
+       ichan->channelFlags = flags;
 }
-EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
 
 /*
  * Get the internal channel reference.
  */
-struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
-                                              struct ath_hw *ah)
+struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
+                                           struct ath_hw *ah,
+                                           struct cfg80211_chan_def *chandef)
 {
-       struct ieee80211_channel *curchan = hw->conf.chandef.chan;
+       struct ieee80211_channel *curchan = chandef->chan;
        struct ath9k_channel *channel;
        u8 chan_idx;
 
        chan_idx = curchan->hw_value;
        channel = &ah->channels[chan_idx];
-       ath9k_cmn_update_ichannel(channel, &hw->conf.chandef);
+       ath9k_cmn_update_ichannel(channel, chandef);
 
        return channel;
 }
-EXPORT_SYMBOL(ath9k_cmn_get_curchannel);
+EXPORT_SYMBOL(ath9k_cmn_get_channel);
 
 int ath9k_cmn_count_streams(unsigned int chainmask, int max)
 {
index e039bcbfbd7923b4f8013f1c4535915a83d4b1fc..eb85e1bdca889a4e43b46261be31e691ff403363 100644 (file)
        (((x) + ((mul)/2)) / (mul))
 
 int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
-void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
-                              struct cfg80211_chan_def *chandef);
-struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
-                                              struct ath_hw *ah);
+struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
+                                           struct ath_hw *ah,
+                                           struct cfg80211_chan_def *chandef);
 int ath9k_cmn_count_streams(unsigned int chainmask, int max);
 void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
                                  enum ath_stomp_type stomp_type);
index c088744a6bfb6ea924fb97b07fe47671b53f5e74..83a2c59f680b0445173d25cc5ebccc2f7eb5777e 100644 (file)
@@ -104,37 +104,37 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
                return -ENOMEM;
 
        if (common->disable_ani) {
-               len += snprintf(buf + len, size - len, "%s: %s\n",
-                               "ANI", "DISABLED");
+               len += scnprintf(buf + len, size - len, "%s: %s\n",
+                                "ANI", "DISABLED");
                goto exit;
        }
 
-       len += snprintf(buf + len, size - len, "%15s: %s\n",
-                       "ANI", "ENABLED");
-       len += snprintf(buf + len, size - len, "%15s: %u\n",
-                       "ANI RESET", ah->stats.ast_ani_reset);
-       len += snprintf(buf + len, size - len, "%15s: %u\n",
-                       "SPUR UP", ah->stats.ast_ani_spurup);
-       len += snprintf(buf + len, size - len, "%15s: %u\n",
-                       "SPUR DOWN", ah->stats.ast_ani_spurup);
-       len += snprintf(buf + len, size - len, "%15s: %u\n",
-                       "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
-       len += snprintf(buf + len, size - len, "%15s: %u\n",
-                       "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
-       len += snprintf(buf + len, size - len, "%15s: %u\n",
-                       "MRC-CCK ON", ah->stats.ast_ani_ccklow);
-       len += snprintf(buf + len, size - len, "%15s: %u\n",
-                       "MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
-       len += snprintf(buf + len, size - len, "%15s: %u\n",
-                       "FIR-STEP UP", ah->stats.ast_ani_stepup);
-       len += snprintf(buf + len, size - len, "%15s: %u\n",
-                       "FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
-       len += snprintf(buf + len, size - len, "%15s: %u\n",
-                       "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
-       len += snprintf(buf + len, size - len, "%15s: %u\n",
-                       "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
-       len += snprintf(buf + len, size - len, "%15s: %u\n",
-                       "CCK ERRORS", ah->stats.ast_ani_cckerrs);
+       len += scnprintf(buf + len, size - len, "%15s: %s\n",
+                        "ANI", "ENABLED");
+       len += scnprintf(buf + len, size - len, "%15s: %u\n",
+                        "ANI RESET", ah->stats.ast_ani_reset);
+       len += scnprintf(buf + len, size - len, "%15s: %u\n",
+                        "SPUR UP", ah->stats.ast_ani_spurup);
+       len += scnprintf(buf + len, size - len, "%15s: %u\n",
+                        "SPUR DOWN", ah->stats.ast_ani_spurup);
+       len += scnprintf(buf + len, size - len, "%15s: %u\n",
+                        "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon);
+       len += scnprintf(buf + len, size - len, "%15s: %u\n",
+                        "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff);
+       len += scnprintf(buf + len, size - len, "%15s: %u\n",
+                        "MRC-CCK ON", ah->stats.ast_ani_ccklow);
+       len += scnprintf(buf + len, size - len, "%15s: %u\n",
+                        "MRC-CCK OFF", ah->stats.ast_ani_cckhigh);
+       len += scnprintf(buf + len, size - len, "%15s: %u\n",
+                        "FIR-STEP UP", ah->stats.ast_ani_stepup);
+       len += scnprintf(buf + len, size - len, "%15s: %u\n",
+                        "FIR-STEP DOWN", ah->stats.ast_ani_stepdown);
+       len += scnprintf(buf + len, size - len, "%15s: %u\n",
+                        "INV LISTENTIME", ah->stats.ast_ani_lneg_or_lzero);
+       len += scnprintf(buf + len, size - len, "%15s: %u\n",
+                        "OFDM ERRORS", ah->stats.ast_ani_ofdmerrs);
+       len += scnprintf(buf + len, size - len, "%15s: %u\n",
+                        "CCK ERRORS", ah->stats.ast_ani_cckerrs);
 exit:
        if (len > size)
                len = size;
@@ -280,70 +280,70 @@ static ssize_t read_file_antenna_diversity(struct file *file,
                return -ENOMEM;
 
        if (!(pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) {
-               len += snprintf(buf + len, size - len, "%s\n",
-                               "Antenna Diversity Combining is disabled");
+               len += scnprintf(buf + len, size - len, "%s\n",
+                                "Antenna Diversity Combining is disabled");
                goto exit;
        }
 
        ath9k_ps_wakeup(sc);
        ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
-       len += snprintf(buf + len, size - len, "Current MAIN config : %s\n",
-                       lna_conf_str[div_ant_conf.main_lna_conf]);
-       len += snprintf(buf + len, size - len, "Current ALT config  : %s\n",
-                       lna_conf_str[div_ant_conf.alt_lna_conf]);
-       len += snprintf(buf + len, size - len, "Average MAIN RSSI   : %d\n",
-                       as_main->rssi_avg);
-       len += snprintf(buf + len, size - len, "Average ALT RSSI    : %d\n\n",
-                       as_alt->rssi_avg);
+       len += scnprintf(buf + len, size - len, "Current MAIN config : %s\n",
+                        lna_conf_str[div_ant_conf.main_lna_conf]);
+       len += scnprintf(buf + len, size - len, "Current ALT config  : %s\n",
+                        lna_conf_str[div_ant_conf.alt_lna_conf]);
+       len += scnprintf(buf + len, size - len, "Average MAIN RSSI   : %d\n",
+                        as_main->rssi_avg);
+       len += scnprintf(buf + len, size - len, "Average ALT RSSI    : %d\n\n",
+                        as_alt->rssi_avg);
        ath9k_ps_restore(sc);
 
-       len += snprintf(buf + len, size - len, "Packet Receive Cnt:\n");
-       len += snprintf(buf + len, size - len, "-------------------\n");
-
-       len += snprintf(buf + len, size - len, "%30s%15s\n",
-                       "MAIN", "ALT");
-       len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
-                       "TOTAL COUNT",
-                       as_main->recv_cnt,
-                       as_alt->recv_cnt);
-       len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
-                       "LNA1",
-                       as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
-                       as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
-       len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
-                       "LNA2",
-                       as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
-                       as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
-       len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
-                       "LNA1 + LNA2",
-                       as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
-                       as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
-       len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
-                       "LNA1 - LNA2",
-                       as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
-                       as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
-
-       len += snprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
-       len += snprintf(buf + len, size - len, "--------------------\n");
-
-       len += snprintf(buf + len, size - len, "%30s%15s\n",
-                       "MAIN", "ALT");
-       len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
-                       "LNA1",
-                       as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
-                       as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
-       len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
-                       "LNA2",
-                       as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
-                       as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
-       len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
-                       "LNA1 + LNA2",
-                       as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
-                       as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
-       len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
-                       "LNA1 - LNA2",
-                       as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
-                       as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
+       len += scnprintf(buf + len, size - len, "Packet Receive Cnt:\n");
+       len += scnprintf(buf + len, size - len, "-------------------\n");
+
+       len += scnprintf(buf + len, size - len, "%30s%15s\n",
+                        "MAIN", "ALT");
+       len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+                        "TOTAL COUNT",
+                        as_main->recv_cnt,
+                        as_alt->recv_cnt);
+       len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+                        "LNA1",
+                        as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
+                        as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
+       len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+                        "LNA2",
+                        as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
+                        as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
+       len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+                        "LNA1 + LNA2",
+                        as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
+                        as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
+       len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+                        "LNA1 - LNA2",
+                        as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
+                        as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
+
+       len += scnprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
+       len += scnprintf(buf + len, size - len, "--------------------\n");
+
+       len += scnprintf(buf + len, size - len, "%30s%15s\n",
+                        "MAIN", "ALT");
+       len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+                        "LNA1",
+                        as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
+                        as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
+       len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+                        "LNA2",
+                        as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
+                        as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
+       len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+                        "LNA1 + LNA2",
+                        as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
+                        as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
+       len += scnprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+                        "LNA1 - LNA2",
+                        as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
+                        as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
 
 exit:
        if (len > size)
@@ -385,21 +385,21 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
                   (AR_MACMISC_MISC_OBS_BUS_1 <<
                    AR_MACMISC_MISC_OBS_BUS_MSB_S)));
 
-       len += snprintf(buf + len, DMA_BUF_LEN - len,
-                       "Raw DMA Debug values:\n");
+       len += scnprintf(buf + len, DMA_BUF_LEN - len,
+                        "Raw DMA Debug values:\n");
 
        for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
                if (i % 4 == 0)
-                       len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
+                       len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n");
 
                val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32)));
-               len += snprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
-                               i, val[i]);
+               len += scnprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
+                                i, val[i]);
        }
 
-       len += snprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
-       len += snprintf(buf + len, DMA_BUF_LEN - len,
-                       "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
+       len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
+       len += scnprintf(buf + len, DMA_BUF_LEN - len,
+                        "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
 
        for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) {
                if (i == 8) {
@@ -412,39 +412,39 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
                        dcuBase++;
                }
 
-               len += snprintf(buf + len, DMA_BUF_LEN - len,
-                       "%2d          %2x      %1x     %2x           %2x\n",
-                       i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
-                       (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
-                       val[2] & (0x7 << (i * 3)) >> (i * 3),
-                       (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
+               len += scnprintf(buf + len, DMA_BUF_LEN - len,
+                        "%2d          %2x      %1x     %2x           %2x\n",
+                        i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
+                        (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
+                        val[2] & (0x7 << (i * 3)) >> (i * 3),
+                        (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
        }
 
-       len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
+       len += scnprintf(buf + len, DMA_BUF_LEN - len, "\n");
 
-       len += snprintf(buf + len, DMA_BUF_LEN - len,
+       len += scnprintf(buf + len, DMA_BUF_LEN - len,
                "qcu_stitch state:   %2x    qcu_fetch state:        %2x\n",
                (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
-       len += snprintf(buf + len, DMA_BUF_LEN - len,
+       len += scnprintf(buf + len, DMA_BUF_LEN - len,
                "qcu_complete state: %2x    dcu_complete state:     %2x\n",
                (val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
-       len += snprintf(buf + len, DMA_BUF_LEN - len,
+       len += scnprintf(buf + len, DMA_BUF_LEN - len,
                "dcu_arb state:      %2x    dcu_fp state:           %2x\n",
                (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
-       len += snprintf(buf + len, DMA_BUF_LEN - len,
+       len += scnprintf(buf + len, DMA_BUF_LEN - len,
                "chan_idle_dur:     %3d    chan_idle_dur_valid:     %1d\n",
                (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
-       len += snprintf(buf + len, DMA_BUF_LEN - len,
+       len += scnprintf(buf + len, DMA_BUF_LEN - len,
                "txfifo_valid_0:      %1d    txfifo_valid_1:          %1d\n",
                (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
-       len += snprintf(buf + len, DMA_BUF_LEN - len,
+       len += scnprintf(buf + len, DMA_BUF_LEN - len,
                "txfifo_dcu_num_0:   %2d    txfifo_dcu_num_1:       %2d\n",
                (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
 
-       len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
-                       REG_READ_D(ah, AR_OBS_BUS_1));
-       len += snprintf(buf + len, DMA_BUF_LEN - len,
-                       "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
+       len += scnprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
+                        REG_READ_D(ah, AR_OBS_BUS_1));
+       len += scnprintf(buf + len, DMA_BUF_LEN - len,
+                        "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
 
        ath9k_ps_restore(sc);
 
@@ -530,9 +530,9 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
 
 #define PR_IS(a, s)                                            \
        do {                                                    \
-               len += snprintf(buf + len, mxlen - len,         \
-                               "%21s: %10u\n", a,              \
-                               sc->debug.stats.istats.s);      \
+               len += scnprintf(buf + len, mxlen - len,        \
+                                "%21s: %10u\n", a,             \
+                                sc->debug.stats.istats.s);     \
        } while (0)
 
        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
@@ -563,8 +563,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
        PR_IS("GENTIMER", gen_timer);
        PR_IS("TOTAL", total);
 
-       len += snprintf(buf + len, mxlen - len,
-                       "SYNC_CAUSE stats:\n");
+       len += scnprintf(buf + len, mxlen - len,
+                        "SYNC_CAUSE stats:\n");
 
        PR_IS("Sync-All", sync_cause_all);
        PR_IS("RTC-IRQ", sync_rtc_irq);
@@ -655,16 +655,16 @@ static ssize_t print_queue(struct ath_softc *sc, struct ath_txq *txq,
 
        ath_txq_lock(sc, txq);
 
-       len += snprintf(buf + len, size - len, "%s: %d ",
-                       "qnum", txq->axq_qnum);
-       len += snprintf(buf + len, size - len, "%s: %2d ",
-                       "qdepth", txq->axq_depth);
-       len += snprintf(buf + len, size - len, "%s: %2d ",
-                       "ampdu-depth", txq->axq_ampdu_depth);
-       len += snprintf(buf + len, size - len, "%s: %3d ",
-                       "pending", txq->pending_frames);
-       len += snprintf(buf + len, size - len, "%s: %d\n",
-                       "stopped", txq->stopped);
+       len += scnprintf(buf + len, size - len, "%s: %d ",
+                        "qnum", txq->axq_qnum);
+       len += scnprintf(buf + len, size - len, "%s: %2d ",
+                        "qdepth", txq->axq_depth);
+       len += scnprintf(buf + len, size - len, "%s: %2d ",
+                        "ampdu-depth", txq->axq_ampdu_depth);
+       len += scnprintf(buf + len, size - len, "%s: %3d ",
+                        "pending", txq->pending_frames);
+       len += scnprintf(buf + len, size - len, "%s: %d\n",
+                        "stopped", txq->stopped);
 
        ath_txq_unlock(sc, txq);
        return len;
@@ -687,11 +687,11 @@ static ssize_t read_file_queues(struct file *file, char __user *user_buf,
 
        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
                txq = sc->tx.txq_map[i];
-               len += snprintf(buf + len, size - len, "(%s):  ", qname[i]);
+               len += scnprintf(buf + len, size - len, "(%s):  ", qname[i]);
                len += print_queue(sc, txq, buf + len, size - len);
        }
 
-       len += snprintf(buf + len, size - len, "(CAB): ");
+       len += scnprintf(buf + len, size - len, "(CAB): ");
        len += print_queue(sc, sc->beacon.cabq, buf + len, size - len);
 
        if (len > size)
@@ -716,80 +716,82 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
        unsigned int reg;
        u32 rxfilter;
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "BSSID: %pM\n", common->curbssid);
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "BSSID-MASK: %pM\n", common->bssidmask);
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "OPMODE: %s\n", ath_opmode_to_string(sc->sc_ah->opmode));
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "BSSID: %pM\n", common->curbssid);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "BSSID-MASK: %pM\n", common->bssidmask);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "OPMODE: %s\n",
+                        ath_opmode_to_string(sc->sc_ah->opmode));
 
        ath9k_ps_wakeup(sc);
        rxfilter = ath9k_hw_getrxfilter(sc->sc_ah);
        ath9k_ps_restore(sc);
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "RXFILTER: 0x%x", rxfilter);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "RXFILTER: 0x%x", rxfilter);
 
        if (rxfilter & ATH9K_RX_FILTER_UCAST)
-               len += snprintf(buf + len, sizeof(buf) - len, " UCAST");
+               len += scnprintf(buf + len, sizeof(buf) - len, " UCAST");
        if (rxfilter & ATH9K_RX_FILTER_MCAST)
-               len += snprintf(buf + len, sizeof(buf) - len, " MCAST");
+               len += scnprintf(buf + len, sizeof(buf) - len, " MCAST");
        if (rxfilter & ATH9K_RX_FILTER_BCAST)
-               len += snprintf(buf + len, sizeof(buf) - len, " BCAST");
+               len += scnprintf(buf + len, sizeof(buf) - len, " BCAST");
        if (rxfilter & ATH9K_RX_FILTER_CONTROL)
-               len += snprintf(buf + len, sizeof(buf) - len, " CONTROL");
+               len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL");
        if (rxfilter & ATH9K_RX_FILTER_BEACON)
-               len += snprintf(buf + len, sizeof(buf) - len, " BEACON");
+               len += scnprintf(buf + len, sizeof(buf) - len, " BEACON");
        if (rxfilter & ATH9K_RX_FILTER_PROM)
-               len += snprintf(buf + len, sizeof(buf) - len, " PROM");
+               len += scnprintf(buf + len, sizeof(buf) - len, " PROM");
        if (rxfilter & ATH9K_RX_FILTER_PROBEREQ)
-               len += snprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
+               len += scnprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
        if (rxfilter & ATH9K_RX_FILTER_PHYERR)
-               len += snprintf(buf + len, sizeof(buf) - len, " PHYERR");
+               len += scnprintf(buf + len, sizeof(buf) - len, " PHYERR");
        if (rxfilter & ATH9K_RX_FILTER_MYBEACON)
-               len += snprintf(buf + len, sizeof(buf) - len, " MYBEACON");
+               len += scnprintf(buf + len, sizeof(buf) - len, " MYBEACON");
        if (rxfilter & ATH9K_RX_FILTER_COMP_BAR)
-               len += snprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
+               len += scnprintf(buf + len, sizeof(buf) - len, " COMP_BAR");
        if (rxfilter & ATH9K_RX_FILTER_PSPOLL)
-               len += snprintf(buf + len, sizeof(buf) - len, " PSPOLL");
+               len += scnprintf(buf + len, sizeof(buf) - len, " PSPOLL");
        if (rxfilter & ATH9K_RX_FILTER_PHYRADAR)
-               len += snprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
+               len += scnprintf(buf + len, sizeof(buf) - len, " PHYRADAR");
        if (rxfilter & ATH9K_RX_FILTER_MCAST_BCAST_ALL)
-               len += snprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL");
+               len += scnprintf(buf + len, sizeof(buf) - len, " MCAST_BCAST_ALL");
        if (rxfilter & ATH9K_RX_FILTER_CONTROL_WRAPPER)
-               len += snprintf(buf + len, sizeof(buf) - len, " CONTROL_WRAPPER");
+               len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL_WRAPPER");
 
-       len += snprintf(buf + len, sizeof(buf) - len, "\n");
+       len += scnprintf(buf + len, sizeof(buf) - len, "\n");
 
        reg = sc->sc_ah->imask;
 
-       len += snprintf(buf + len, sizeof(buf) - len, "INTERRUPT-MASK: 0x%x", reg);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "INTERRUPT-MASK: 0x%x", reg);
 
        if (reg & ATH9K_INT_SWBA)
-               len += snprintf(buf + len, sizeof(buf) - len, " SWBA");
+               len += scnprintf(buf + len, sizeof(buf) - len, " SWBA");
        if (reg & ATH9K_INT_BMISS)
-               len += snprintf(buf + len, sizeof(buf) - len, " BMISS");
+               len += scnprintf(buf + len, sizeof(buf) - len, " BMISS");
        if (reg & ATH9K_INT_CST)
-               len += snprintf(buf + len, sizeof(buf) - len, " CST");
+               len += scnprintf(buf + len, sizeof(buf) - len, " CST");
        if (reg & ATH9K_INT_RX)
-               len += snprintf(buf + len, sizeof(buf) - len, " RX");
+               len += scnprintf(buf + len, sizeof(buf) - len, " RX");
        if (reg & ATH9K_INT_RXHP)
-               len += snprintf(buf + len, sizeof(buf) - len, " RXHP");
+               len += scnprintf(buf + len, sizeof(buf) - len, " RXHP");
        if (reg & ATH9K_INT_RXLP)
-               len += snprintf(buf + len, sizeof(buf) - len, " RXLP");
+               len += scnprintf(buf + len, sizeof(buf) - len, " RXLP");
        if (reg & ATH9K_INT_BB_WATCHDOG)
-               len += snprintf(buf + len, sizeof(buf) - len, " BB_WATCHDOG");
+               len += scnprintf(buf + len, sizeof(buf) - len, " BB_WATCHDOG");
 
-       len += snprintf(buf + len, sizeof(buf) - len, "\n");
+       len += scnprintf(buf + len, sizeof(buf) - len, "\n");
 
        ath9k_calculate_iter_data(hw, NULL, &iter_data);
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "VIF-COUNTS: AP: %i STA: %i MESH: %i WDS: %i"
-                       " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
-                       iter_data.naps, iter_data.nstations, iter_data.nmeshes,
-                       iter_data.nwds, iter_data.nadhocs,
-                       sc->nvifs, sc->nbcnvifs);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "VIF-COUNTS: AP: %i STA: %i MESH: %i WDS: %i"
+                        " ADHOC: %i TOTAL: %hi BEACON-VIF: %hi\n",
+                        iter_data.naps, iter_data.nstations, iter_data.nmeshes,
+                        iter_data.nwds, iter_data.nadhocs,
+                        sc->nvifs, sc->nbcnvifs);
 
        if (len > sizeof(buf))
                len = sizeof(buf);
@@ -805,27 +807,27 @@ static ssize_t read_file_reset(struct file *file, char __user *user_buf,
        char buf[512];
        unsigned int len = 0;
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%17s: %2d\n", "Baseband Hang",
-                       sc->debug.stats.reset[RESET_TYPE_BB_HANG]);
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%17s: %2d\n", "Baseband Watchdog",
-                       sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]);
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%17s: %2d\n", "Fatal HW Error",
-                       sc->debug.stats.reset[RESET_TYPE_FATAL_INT]);
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%17s: %2d\n", "TX HW error",
-                       sc->debug.stats.reset[RESET_TYPE_TX_ERROR]);
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%17s: %2d\n", "TX Path Hang",
-                       sc->debug.stats.reset[RESET_TYPE_TX_HANG]);
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%17s: %2d\n", "PLL RX Hang",
-                       sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%17s: %2d\n", "MCI Reset",
-                       sc->debug.stats.reset[RESET_TYPE_MCI]);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%17s: %2d\n", "Baseband Hang",
+                        sc->debug.stats.reset[RESET_TYPE_BB_HANG]);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%17s: %2d\n", "Baseband Watchdog",
+                        sc->debug.stats.reset[RESET_TYPE_BB_WATCHDOG]);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%17s: %2d\n", "Fatal HW Error",
+                        sc->debug.stats.reset[RESET_TYPE_FATAL_INT]);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%17s: %2d\n", "TX HW error",
+                        sc->debug.stats.reset[RESET_TYPE_TX_ERROR]);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%17s: %2d\n", "TX Path Hang",
+                        sc->debug.stats.reset[RESET_TYPE_TX_HANG]);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%17s: %2d\n", "PLL RX Hang",
+                        sc->debug.stats.reset[RESET_TYPE_PLL_HANG]);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%17s: %2d\n", "MCI Reset",
+                        sc->debug.stats.reset[RESET_TYPE_MCI]);
 
        if (len > sizeof(buf))
                len = sizeof(buf);
@@ -902,14 +904,14 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
                              size_t count, loff_t *ppos)
 {
 #define PHY_ERR(s, p) \
-       len += snprintf(buf + len, size - len, "%22s : %10u\n", s, \
-                       sc->debug.stats.rxstats.phy_err_stats[p]);
+       len += scnprintf(buf + len, size - len, "%22s : %10u\n", s, \
+                        sc->debug.stats.rxstats.phy_err_stats[p]);
 
 #define RXS_ERR(s, e)                                      \
        do {                                                \
-               len += snprintf(buf + len, size - len,      \
-                               "%22s : %10u\n", s,         \
-                               sc->debug.stats.rxstats.e); \
+               len += scnprintf(buf + len, size - len,     \
+                                "%22s : %10u\n", s,        \
+                                sc->debug.stats.rxstats.e);\
        } while (0)
 
        struct ath_softc *sc = file->private_data;
@@ -1048,6 +1050,9 @@ static ssize_t write_file_spec_scan_ctl(struct file *file,
        char buf[32];
        ssize_t len;
 
+       if (config_enabled(CONFIG_ATH9K_TX99))
+               return -EOPNOTSUPP;
+
        len = min(count, sizeof(buf) - 1);
        if (copy_from_user(buf, user_buf, len))
                return -EFAULT;
@@ -1439,22 +1444,22 @@ static ssize_t read_file_dump_nfcal(struct file *file, char __user *user_buf,
        if (!buf)
                return -ENOMEM;
 
-       len += snprintf(buf + len, size - len,
-                       "Channel Noise Floor : %d\n", ah->noise);
-       len += snprintf(buf + len, size - len,
-                       "Chain | privNF | # Readings | NF Readings\n");
+       len += scnprintf(buf + len, size - len,
+                        "Channel Noise Floor : %d\n", ah->noise);
+       len += scnprintf(buf + len, size - len,
+                        "Chain | privNF | # Readings | NF Readings\n");
        for (i = 0; i < NUM_NF_READINGS; i++) {
                if (!(chainmask & (1 << i)) ||
                    ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
                        continue;
 
                nread = AR_PHY_CCA_FILTERWINDOW_LENGTH - h[i].invalidNFcount;
-               len += snprintf(buf + len, size - len, " %d\t %d\t %d\t\t",
-                               i, h[i].privNF, nread);
+               len += scnprintf(buf + len, size - len, " %d\t %d\t %d\t\t",
+                                i, h[i].privNF, nread);
                for (j = 0; j < nread; j++)
-                       len += snprintf(buf + len, size - len,
-                                       " %d", h[i].nfCalBuffer[j]);
-               len += snprintf(buf + len, size - len, "\n");
+                       len += scnprintf(buf + len, size - len,
+                                        " %d", h[i].nfCalBuffer[j]);
+               len += scnprintf(buf + len, size - len, "\n");
        }
 
        if (len > size)
@@ -1543,8 +1548,8 @@ static ssize_t read_file_btcoex(struct file *file, char __user *user_buf,
                return -ENOMEM;
 
        if (!sc->sc_ah->common.btcoex_enabled) {
-               len = snprintf(buf, size, "%s\n",
-                              "BTCOEX is disabled");
+               len = scnprintf(buf, size, "%s\n",
+                               "BTCOEX is disabled");
                goto exit;
        }
 
@@ -1582,43 +1587,43 @@ static ssize_t read_file_node_stat(struct file *file, char __user *user_buf,
                return -ENOMEM;
 
        if (!an->sta->ht_cap.ht_supported) {
-               len = snprintf(buf, size, "%s\n",
-                              "HT not supported");
+               len = scnprintf(buf, size, "%s\n",
+                               "HT not supported");
                goto exit;
        }
 
-       len = snprintf(buf, size, "Max-AMPDU: %d\n",
-                      an->maxampdu);
-       len += snprintf(buf + len, size - len, "MPDU Density: %d\n\n",
-                       an->mpdudensity);
+       len = scnprintf(buf, size, "Max-AMPDU: %d\n",
+                       an->maxampdu);
+       len += scnprintf(buf + len, size - len, "MPDU Density: %d\n\n",
+                        an->mpdudensity);
 
-       len += snprintf(buf + len, size - len,
-                       "%2s%7s\n", "AC", "SCHED");
+       len += scnprintf(buf + len, size - len,
+                        "%2s%7s\n", "AC", "SCHED");
 
        for (acno = 0, ac = &an->ac[acno];
             acno < IEEE80211_NUM_ACS; acno++, ac++) {
                txq = ac->txq;
                ath_txq_lock(sc, txq);
-               len += snprintf(buf + len, size - len,
-                               "%2d%7d\n",
-                               acno, ac->sched);
+               len += scnprintf(buf + len, size - len,
+                                "%2d%7d\n",
+                                acno, ac->sched);
                ath_txq_unlock(sc, txq);
        }
 
-       len += snprintf(buf + len, size - len,
-                       "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
-                       "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
-                       "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
+       len += scnprintf(buf + len, size - len,
+                        "\n%3s%11s%10s%10s%10s%10s%9s%6s%8s\n",
+                        "TID", "SEQ_START", "SEQ_NEXT", "BAW_SIZE",
+                        "BAW_HEAD", "BAW_TAIL", "BAR_IDX", "SCHED", "PAUSED");
 
        for (tidno = 0, tid = &an->tid[tidno];
             tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
                txq = tid->ac->txq;
                ath_txq_lock(sc, txq);
-               len += snprintf(buf + len, size - len,
-                               "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
-                               tid->tidno, tid->seq_start, tid->seq_next,
-                               tid->baw_size, tid->baw_head, tid->baw_tail,
-                               tid->bar_index, tid->sched, tid->paused);
+               len += scnprintf(buf + len, size - len,
+                                "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
+                                tid->tidno, tid->seq_start, tid->seq_next,
+                                tid->baw_size, tid->baw_head, tid->baw_tail,
+                                tid->bar_index, tid->sched, tid->paused);
                ath_txq_unlock(sc, txq);
        }
 exit:
@@ -1773,6 +1778,111 @@ void ath9k_deinit_debug(struct ath_softc *sc)
        }
 }
 
+static ssize_t read_file_tx99(struct file *file, char __user *user_buf,
+                             size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       char buf[3];
+       unsigned int len;
+
+       len = sprintf(buf, "%d\n", sc->tx99_state);
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
+                              size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       char buf[32];
+       bool start;
+       ssize_t len;
+       int r;
+
+       if (sc->nvifs > 1)
+               return -EOPNOTSUPP;
+
+       len = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, len))
+               return -EFAULT;
+
+       if (strtobool(buf, &start))
+               return -EINVAL;
+
+       if (start == sc->tx99_state) {
+               if (!start)
+                       return count;
+               ath_dbg(common, XMIT, "Resetting TX99\n");
+               ath9k_tx99_deinit(sc);
+       }
+
+       if (!start) {
+               ath9k_tx99_deinit(sc);
+               return count;
+       }
+
+       r = ath9k_tx99_init(sc);
+       if (r)
+               return r;
+
+       return count;
+}
+
+static const struct file_operations fops_tx99 = {
+       .read = read_file_tx99,
+       .write = write_file_tx99,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+static ssize_t read_file_tx99_power(struct file *file,
+                                   char __user *user_buf,
+                                   size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       char buf[32];
+       unsigned int len;
+
+       len = sprintf(buf, "%d (%d dBm)\n",
+                     sc->tx99_power,
+                     sc->tx99_power / 2);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_tx99_power(struct file *file,
+                                    const char __user *user_buf,
+                                    size_t count, loff_t *ppos)
+{
+       struct ath_softc *sc = file->private_data;
+       int r;
+       u8 tx_power;
+
+       r = kstrtou8_from_user(user_buf, count, 0, &tx_power);
+       if (r)
+               return r;
+
+       if (tx_power > MAX_RATE_POWER)
+               return -EINVAL;
+
+       sc->tx99_power = tx_power;
+
+       ath9k_ps_wakeup(sc);
+       ath9k_hw_tx99_set_txpower(sc->sc_ah, sc->tx99_power);
+       ath9k_ps_restore(sc);
+
+       return count;
+}
+
+static const struct file_operations fops_tx99_power = {
+       .read = read_file_tx99_power,
+       .write = write_file_tx99_power,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
 int ath9k_init_debug(struct ath_hw *ah)
 {
        struct ath_common *common = ath9k_hw_common(ah);
@@ -1864,5 +1974,15 @@ int ath9k_init_debug(struct ath_hw *ah)
        debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc,
                            &fops_btcoex);
 #endif
+       if (config_enabled(CONFIG_ATH9K_TX99) &&
+           AR_SREV_9300_20_OR_LATER(ah)) {
+               debugfs_create_file("tx99", S_IRUSR | S_IWUSR,
+                                   sc->debug.debugfs_phy, sc,
+                                   &fops_tx99);
+               debugfs_create_file("tx99_power", S_IRUSR | S_IWUSR,
+                                   sc->debug.debugfs_phy, sc,
+                                   &fops_tx99_power);
+       }
+
        return 0;
 }
index 6e1556fa2f3e88713fa834cb56d3a4d9f6d87c48..d6e3fa4299a4fa5e4f85e49502a900920d48f718 100644 (file)
@@ -193,12 +193,12 @@ struct ath_tx_stats {
 #define TXSTATS sc->debug.stats.txstats
 #define PR(str, elem)                                                  \
        do {                                                            \
-               len += snprintf(buf + len, size - len,                  \
-                               "%s%13u%11u%10u%10u\n", str,            \
-                               TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem, \
-                               TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem, \
-                               TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem, \
-                               TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
+               len += scnprintf(buf + len, size - len,                 \
+                                "%s%13u%11u%10u%10u\n", str,           \
+                                TXSTATS[PR_QNUM(IEEE80211_AC_BE)].elem,\
+                                TXSTATS[PR_QNUM(IEEE80211_AC_BK)].elem,\
+                                TXSTATS[PR_QNUM(IEEE80211_AC_VI)].elem,\
+                                TXSTATS[PR_QNUM(IEEE80211_AC_VO)].elem); \
        } while(0)
 
 #define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
index 3c839f06a06afb7903382fcf91554fa8046d252f..c6fa3d5b5d74e3137fc34f08a430ccb1c4a670e5 100644 (file)
@@ -17,7 +17,7 @@
 
 #ifndef ATH9K_DFS_H
 #define ATH9K_DFS_H
-#include "dfs_pattern_detector.h"
+#include "../dfs_pattern_detector.h"
 
 #if defined(CONFIG_ATH9K_DFS_CERTIFIED)
 /**
index 3c6e4138a95d13405ed8b2cff14ffbd5a2f52bb5..90b8342d1ed4bd2e95389541f299d49d59261e6a 100644 (file)
 
 #include "ath9k.h"
 #include "dfs_debug.h"
+#include "../dfs_pattern_detector.h"
 
-
-struct ath_dfs_pool_stats global_dfs_pool_stats = { 0 };
+static struct ath_dfs_pool_stats dfs_pool_stats = { 0 };
 
 #define ATH9K_DFS_STAT(s, p) \
-       len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
-                       sc->debug.stats.dfs_stats.p);
+       len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \
+                        sc->debug.stats.dfs_stats.p);
 #define ATH9K_DFS_POOL_STAT(s, p) \
-       len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
-                       global_dfs_pool_stats.p);
+       len += scnprintf(buf + len, size - len, "%28s : %10u\n", s, \
+                        dfs_pool_stats.p);
 
 static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
                             size_t count, loff_t *ppos)
@@ -44,12 +44,15 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
        if (buf == NULL)
                return -ENOMEM;
 
-       len += snprintf(buf + len, size - len, "DFS support for "
-                       "macVersion = 0x%x, macRev = 0x%x: %s\n",
-                       hw_ver->macVersion, hw_ver->macRev,
-                       (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
+       if (sc->dfs_detector)
+               dfs_pool_stats = sc->dfs_detector->get_stats(sc->dfs_detector);
+
+       len += scnprintf(buf + len, size - len, "DFS support for "
+                        "macVersion = 0x%x, macRev = 0x%x: %s\n",
+                        hw_ver->macVersion, hw_ver->macRev,
+                        (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
                                        "enabled" : "disabled");
-       len += snprintf(buf + len, size - len, "Pulse detector statistics:\n");
+       len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n");
        ATH9K_DFS_STAT("pulse events reported   ", pulses_total);
        ATH9K_DFS_STAT("invalid pulse events    ", pulses_no_dfs);
        ATH9K_DFS_STAT("DFS pulses detected     ", pulses_detected);
@@ -59,11 +62,12 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
        ATH9K_DFS_STAT("Primary channel pulses  ", pri_phy_errors);
        ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors);
        ATH9K_DFS_STAT("Dual channel pulses     ", dc_phy_errors);
-       len += snprintf(buf + len, size - len, "Radar detector statistics "
-                       "(current DFS region: %d)\n", sc->dfs_detector->region);
+       len += scnprintf(buf + len, size - len, "Radar detector statistics "
+                        "(current DFS region: %d)\n",
+                        sc->dfs_detector->region);
        ATH9K_DFS_STAT("Pulse events processed  ", pulses_processed);
        ATH9K_DFS_STAT("Radars detected         ", radar_detected);
-       len += snprintf(buf + len, size - len, "Global Pool statistics:\n");
+       len += scnprintf(buf + len, size - len, "Global Pool statistics:\n");
        ATH9K_DFS_POOL_STAT("Pool references         ", pool_reference);
        ATH9K_DFS_POOL_STAT("Pulses allocated        ", pulse_allocated);
        ATH9K_DFS_POOL_STAT("Pulses alloc error      ", pulse_alloc_error);
index e36810a4b585c51515e6fc3336410e1b58e3a9b0..0a7ddf4c88c93eb55589dedff95394e2157c3510 100644 (file)
@@ -51,25 +51,11 @@ struct ath_dfs_stats {
        u32 radar_detected;
 };
 
-/**
- * struct ath_dfs_pool_stats - DFS Statistics for global pools
- */
-struct ath_dfs_pool_stats {
-       u32 pool_reference;
-       u32 pulse_allocated;
-       u32 pulse_alloc_error;
-       u32 pulse_used;
-       u32 pseq_allocated;
-       u32 pseq_alloc_error;
-       u32 pseq_used;
-};
 #if defined(CONFIG_ATH9K_DFS_DEBUGFS)
 
 #define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++)
 void ath9k_dfs_init_debug(struct ath_softc *sc);
 
-#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
-#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
 extern struct ath_dfs_pool_stats global_dfs_pool_stats;
 
 #else
@@ -77,8 +63,6 @@ extern struct ath_dfs_pool_stats global_dfs_pool_stats;
 #define DFS_STAT_INC(sc, c) do { } while (0)
 static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { }
 
-#define DFS_POOL_STAT_INC(c) do { } while (0)
-#define DFS_POOL_STAT_DEC(c) do { } while (0)
 #endif /* CONFIG_ATH9K_DFS_DEBUGFS */
 
 #endif /* ATH9K_DFS_DEBUG_H */
index 9ea8e4b779c97c99b329619616e1ed232a1f5044..b4091716e9b3322881a6ed65fc3996a57e59a8b8 100644 (file)
@@ -129,10 +129,10 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
        struct base_eep_header_4k *pBase = &eep->baseEepHeader;
 
        if (!dump_base_hdr) {
-               len += snprintf(buf + len, size - len,
-                               "%20s :\n", "2GHz modal Header");
+               len += scnprintf(buf + len, size - len,
+                                "%20s :\n", "2GHz modal Header");
                len = ath9k_dump_4k_modal_eeprom(buf, len, size,
-                                                 &eep->modalHeader);
+                                                &eep->modalHeader);
                goto out;
        }
 
@@ -160,8 +160,8 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
        PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
        PR_EEP("TX Gain type", pBase->txGainType);
 
-       len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
-                       pBase->macAddr);
+       len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+                        pBase->macAddr);
 
 out:
        if (len > size)
index 3ae1f3df063758f000519a5f7d08984e8fb474b2..e1d0c217c104f9ba6c2b0a1e55a941e2ccc63d20 100644 (file)
@@ -125,8 +125,8 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
        struct base_eep_ar9287_header *pBase = &eep->baseEepHeader;
 
        if (!dump_base_hdr) {
-               len += snprintf(buf + len, size - len,
-                               "%20s :\n", "2GHz modal Header");
+               len += scnprintf(buf + len, size - len,
+                                "%20s :\n", "2GHz modal Header");
                len = ar9287_dump_modal_eeprom(buf, len, size,
                                                &eep->modalHeader);
                goto out;
@@ -157,8 +157,8 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
        PR_EEP("Power Table Offset", pBase->pwrTableOffset);
        PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
 
-       len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
-                       pBase->macAddr);
+       len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+                        pBase->macAddr);
 
 out:
        if (len > size)
index 1c25368b3836f756f39c7f17f884ba43a2fb233c..39107e31e79aaa98fa93453f74db8b4e416ae457 100644 (file)
@@ -205,12 +205,12 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
        struct base_eep_header *pBase = &eep->baseEepHeader;
 
        if (!dump_base_hdr) {
-               len += snprintf(buf + len, size - len,
-                               "%20s :\n", "2GHz modal Header");
+               len += scnprintf(buf + len, size - len,
+                                "%20s :\n", "2GHz modal Header");
                len = ath9k_def_dump_modal_eeprom(buf, len, size,
                                                   &eep->modalHeader[0]);
-               len += snprintf(buf + len, size - len,
-                               "%20s :\n", "5GHz modal Header");
+               len += scnprintf(buf + len, size - len,
+                                "%20s :\n", "5GHz modal Header");
                len = ath9k_def_dump_modal_eeprom(buf, len, size,
                                                   &eep->modalHeader[1]);
                goto out;
@@ -240,8 +240,8 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
        PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
        PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
 
-       len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
-                       pBase->macAddr);
+       len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+                        pBase->macAddr);
 
 out:
        if (len > size)
index 4b412aaf4f3699e65396598be9f7485f2af0754f..c34f21241da947e5297bc7ad18e6de9cc3430825 100644 (file)
@@ -522,22 +522,22 @@ static int ath9k_dump_mci_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
        ATH_DUMP_BTCOEX("Concurrent Tx", btcoex_hw->mci.concur_tx);
        ATH_DUMP_BTCOEX("Concurrent RSSI cnt", btcoex->rssi_count);
 
-       len += snprintf(buf + len, size - len, "BT Weights: ");
+       len += scnprintf(buf + len, size - len, "BT Weights: ");
        for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
-               len += snprintf(buf + len, size - len, "%08x ",
-                               btcoex_hw->bt_weight[i]);
-       len += snprintf(buf + len, size - len, "\n");
-       len += snprintf(buf + len, size - len, "WLAN Weights: ");
+               len += scnprintf(buf + len, size - len, "%08x ",
+                                btcoex_hw->bt_weight[i]);
+       len += scnprintf(buf + len, size - len, "\n");
+       len += scnprintf(buf + len, size - len, "WLAN Weights: ");
        for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
-               len += snprintf(buf + len, size - len, "%08x ",
-                               btcoex_hw->wlan_weight[i]);
-       len += snprintf(buf + len, size - len, "\n");
-       len += snprintf(buf + len, size - len, "Tx Priorities: ");
+               len += scnprintf(buf + len, size - len, "%08x ",
+                                btcoex_hw->wlan_weight[i]);
+       len += scnprintf(buf + len, size - len, "\n");
+       len += scnprintf(buf + len, size - len, "Tx Priorities: ");
        for (i = 0; i < ATH_BTCOEX_STOMP_MAX; i++)
-               len += snprintf(buf + len, size - len, "%08x ",
+               len += scnprintf(buf + len, size - len, "%08x ",
                                btcoex_hw->tx_prio[i]);
 
-       len += snprintf(buf + len, size - len, "\n");
+       len += scnprintf(buf + len, size - len, "\n");
 
        return len;
 }
index c1b45e2f848124bdeb0d28a156ed570f4928e635..fb071ee4fcfb3dd5969005a40537908ec92791c6 100644 (file)
@@ -37,29 +37,29 @@ static ssize_t read_file_tgt_int_stats(struct file *file, char __user *user_buf,
 
        ath9k_htc_ps_restore(priv);
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "RX",
-                       be32_to_cpu(cmd_rsp.rx));
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "RX",
+                        be32_to_cpu(cmd_rsp.rx));
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "RXORN",
-                       be32_to_cpu(cmd_rsp.rxorn));
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "RXORN",
+                        be32_to_cpu(cmd_rsp.rxorn));
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "RXEOL",
-                       be32_to_cpu(cmd_rsp.rxeol));
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "RXEOL",
+                        be32_to_cpu(cmd_rsp.rxeol));
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "TXURN",
-                       be32_to_cpu(cmd_rsp.txurn));
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "TXURN",
+                        be32_to_cpu(cmd_rsp.txurn));
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "TXTO",
-                       be32_to_cpu(cmd_rsp.txto));
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "TXTO",
+                        be32_to_cpu(cmd_rsp.txto));
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "CST",
-                       be32_to_cpu(cmd_rsp.cst));
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "CST",
+                        be32_to_cpu(cmd_rsp.cst));
 
        if (len > sizeof(buf))
                len = sizeof(buf);
@@ -95,41 +95,41 @@ static ssize_t read_file_tgt_tx_stats(struct file *file, char __user *user_buf,
 
        ath9k_htc_ps_restore(priv);
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "Xretries",
-                       be32_to_cpu(cmd_rsp.xretries));
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "Xretries",
+                        be32_to_cpu(cmd_rsp.xretries));
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "FifoErr",
-                       be32_to_cpu(cmd_rsp.fifoerr));
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "FifoErr",
+                        be32_to_cpu(cmd_rsp.fifoerr));
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "Filtered",
-                       be32_to_cpu(cmd_rsp.filtered));
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "Filtered",
+                        be32_to_cpu(cmd_rsp.filtered));
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "TimerExp",
-                       be32_to_cpu(cmd_rsp.timer_exp));
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "TimerExp",
+                        be32_to_cpu(cmd_rsp.timer_exp));
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "ShortRetries",
-                       be32_to_cpu(cmd_rsp.shortretries));
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "ShortRetries",
+                        be32_to_cpu(cmd_rsp.shortretries));
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "LongRetries",
-                       be32_to_cpu(cmd_rsp.longretries));
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "LongRetries",
+                        be32_to_cpu(cmd_rsp.longretries));
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "QueueNull",
-                       be32_to_cpu(cmd_rsp.qnull));
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "QueueNull",
+                        be32_to_cpu(cmd_rsp.qnull));
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "EncapFail",
-                       be32_to_cpu(cmd_rsp.encap_fail));
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "EncapFail",
+                        be32_to_cpu(cmd_rsp.encap_fail));
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "NoBuf",
-                       be32_to_cpu(cmd_rsp.nobuf));
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "NoBuf",
+                        be32_to_cpu(cmd_rsp.nobuf));
 
        if (len > sizeof(buf))
                len = sizeof(buf);
@@ -165,17 +165,17 @@ static ssize_t read_file_tgt_rx_stats(struct file *file, char __user *user_buf,
 
        ath9k_htc_ps_restore(priv);
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "NoBuf",
-                       be32_to_cpu(cmd_rsp.nobuf));
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "NoBuf",
+                        be32_to_cpu(cmd_rsp.nobuf));
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "HostSend",
-                       be32_to_cpu(cmd_rsp.host_send));
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "HostSend",
+                        be32_to_cpu(cmd_rsp.host_send));
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "HostDone",
-                       be32_to_cpu(cmd_rsp.host_done));
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "HostDone",
+                        be32_to_cpu(cmd_rsp.host_done));
 
        if (len > sizeof(buf))
                len = sizeof(buf);
@@ -197,37 +197,37 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
        char buf[512];
        unsigned int len = 0;
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "Buffers queued",
-                       priv->debug.tx_stats.buf_queued);
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "Buffers completed",
-                       priv->debug.tx_stats.buf_completed);
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "SKBs queued",
-                       priv->debug.tx_stats.skb_queued);
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "SKBs success",
-                       priv->debug.tx_stats.skb_success);
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "SKBs failed",
-                       priv->debug.tx_stats.skb_failed);
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "CAB queued",
-                       priv->debug.tx_stats.cab_queued);
-
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "BE queued",
-                       priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]);
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "BK queued",
-                       priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]);
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "VI queued",
-                       priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]);
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "%20s : %10u\n", "VO queued",
-                       priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "Buffers queued",
+                        priv->debug.tx_stats.buf_queued);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "Buffers completed",
+                        priv->debug.tx_stats.buf_completed);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "SKBs queued",
+                        priv->debug.tx_stats.skb_queued);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "SKBs success",
+                        priv->debug.tx_stats.skb_success);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "SKBs failed",
+                        priv->debug.tx_stats.skb_failed);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "CAB queued",
+                        priv->debug.tx_stats.cab_queued);
+
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "BE queued",
+                        priv->debug.tx_stats.queue_stats[IEEE80211_AC_BE]);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "BK queued",
+                        priv->debug.tx_stats.queue_stats[IEEE80211_AC_BK]);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "VI queued",
+                        priv->debug.tx_stats.queue_stats[IEEE80211_AC_VI]);
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "%20s : %10u\n", "VO queued",
+                        priv->debug.tx_stats.queue_stats[IEEE80211_AC_VO]);
 
        if (len > sizeof(buf))
                len = sizeof(buf);
@@ -273,8 +273,8 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
                              size_t count, loff_t *ppos)
 {
 #define PHY_ERR(s, p)                                                  \
-       len += snprintf(buf + len, size - len, "%20s : %10u\n", s,      \
-                       priv->debug.rx_stats.err_phy_stats[p]);
+       len += scnprintf(buf + len, size - len, "%20s : %10u\n", s,     \
+                        priv->debug.rx_stats.err_phy_stats[p]);
 
        struct ath9k_htc_priv *priv = file->private_data;
        char *buf;
@@ -285,37 +285,37 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
        if (buf == NULL)
                return -ENOMEM;
 
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10u\n", "SKBs allocated",
-                       priv->debug.rx_stats.skb_allocated);
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10u\n", "SKBs completed",
-                       priv->debug.rx_stats.skb_completed);
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10u\n", "SKBs Dropped",
-                       priv->debug.rx_stats.skb_dropped);
-
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10u\n", "CRC ERR",
-                       priv->debug.rx_stats.err_crc);
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10u\n", "DECRYPT CRC ERR",
-                       priv->debug.rx_stats.err_decrypt_crc);
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10u\n", "MIC ERR",
-                       priv->debug.rx_stats.err_mic);
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10u\n", "PRE-DELIM CRC ERR",
-                       priv->debug.rx_stats.err_pre_delim);
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10u\n", "POST-DELIM CRC ERR",
-                       priv->debug.rx_stats.err_post_delim);
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10u\n", "DECRYPT BUSY ERR",
-                       priv->debug.rx_stats.err_decrypt_busy);
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10u\n", "TOTAL PHY ERR",
-                       priv->debug.rx_stats.err_phy);
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10u\n", "SKBs allocated",
+                        priv->debug.rx_stats.skb_allocated);
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10u\n", "SKBs completed",
+                        priv->debug.rx_stats.skb_completed);
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10u\n", "SKBs Dropped",
+                        priv->debug.rx_stats.skb_dropped);
+
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10u\n", "CRC ERR",
+                        priv->debug.rx_stats.err_crc);
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10u\n", "DECRYPT CRC ERR",
+                        priv->debug.rx_stats.err_decrypt_crc);
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10u\n", "MIC ERR",
+                        priv->debug.rx_stats.err_mic);
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10u\n", "PRE-DELIM CRC ERR",
+                        priv->debug.rx_stats.err_pre_delim);
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10u\n", "POST-DELIM CRC ERR",
+                        priv->debug.rx_stats.err_post_delim);
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10u\n", "DECRYPT BUSY ERR",
+                        priv->debug.rx_stats.err_decrypt_busy);
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10u\n", "TOTAL PHY ERR",
+                        priv->debug.rx_stats.err_phy);
 
 
        PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
@@ -372,16 +372,16 @@ static ssize_t read_file_slot(struct file *file, char __user *user_buf,
 
        spin_lock_bh(&priv->tx.tx_lock);
 
-       len += snprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
+       len += scnprintf(buf + len, sizeof(buf) - len, "TX slot bitmap : ");
 
        len += bitmap_scnprintf(buf + len, sizeof(buf) - len,
                               priv->tx.tx_slot, MAX_TX_BUF_NUM);
 
-       len += snprintf(buf + len, sizeof(buf) - len, "\n");
+       len += scnprintf(buf + len, sizeof(buf) - len, "\n");
 
-       len += snprintf(buf + len, sizeof(buf) - len,
-                       "Used slots     : %d\n",
-                       bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM));
+       len += scnprintf(buf + len, sizeof(buf) - len,
+                        "Used slots     : %d\n",
+                        bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM));
 
        spin_unlock_bh(&priv->tx.tx_lock);
 
@@ -405,30 +405,30 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
        char buf[512];
        unsigned int len = 0;
 
-       len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
-                       "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
+       len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+                        "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue));
 
-       len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
-                       "Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue));
+       len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+                        "Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue));
 
-       len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
-                       "Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue));
+       len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+                        "Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue));
 
-       len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
-                       "Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue));
+       len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+                        "Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue));
 
-       len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
-                       "Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue));
+       len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+                        "Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue));
 
-       len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
-                       "Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue));
+       len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+                        "Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue));
 
-       len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
-                       "Failed queue", skb_queue_len(&priv->tx.tx_failed));
+       len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+                        "Failed queue", skb_queue_len(&priv->tx.tx_failed));
 
        spin_lock_bh(&priv->tx.tx_lock);
-       len += snprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
-                       "Queued count", priv->tx.queued_cnt);
+       len += scnprintf(buf + len, sizeof(buf) - len, "%20s : %10u\n",
+                        "Queued count", priv->tx.queued_cnt);
        spin_unlock_bh(&priv->tx.tx_lock);
 
        if (len > sizeof(buf))
@@ -507,70 +507,70 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
        if (buf == NULL)
                return -ENOMEM;
 
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10d\n", "Major Version",
-                       pBase->version >> 12);
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10d\n", "Minor Version",
-                       pBase->version & 0xFFF);
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10d\n", "Checksum",
-                       pBase->checksum);
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10d\n", "Length",
-                       pBase->length);
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10d\n", "RegDomain1",
-                       pBase->regDmn[0]);
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10d\n", "RegDomain2",
-                       pBase->regDmn[1]);
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10d\n",
-                       "TX Mask", pBase->txMask);
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10d\n",
-                       "RX Mask", pBase->rxMask);
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10d\n",
-                       "Allow 5GHz",
-                       !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10d\n",
-                       "Allow 2GHz",
-                       !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10d\n",
-                       "Disable 2GHz HT20",
-                       !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10d\n",
-                       "Disable 2GHz HT40",
-                       !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10d\n",
-                       "Disable 5Ghz HT20",
-                       !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10d\n",
-                       "Disable 5Ghz HT40",
-                       !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10d\n",
-                       "Big Endian",
-                       !!(pBase->eepMisc & 0x01));
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10d\n",
-                       "Cal Bin Major Ver",
-                       (pBase->binBuildNumber >> 24) & 0xFF);
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10d\n",
-                       "Cal Bin Minor Ver",
-                       (pBase->binBuildNumber >> 16) & 0xFF);
-       len += snprintf(buf + len, size - len,
-                       "%20s : %10d\n",
-                       "Cal Bin Build",
-                       (pBase->binBuildNumber >> 8) & 0xFF);
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10d\n", "Major Version",
+                        pBase->version >> 12);
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10d\n", "Minor Version",
+                        pBase->version & 0xFFF);
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10d\n", "Checksum",
+                        pBase->checksum);
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10d\n", "Length",
+                        pBase->length);
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10d\n", "RegDomain1",
+                        pBase->regDmn[0]);
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10d\n", "RegDomain2",
+                        pBase->regDmn[1]);
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10d\n",
+                        "TX Mask", pBase->txMask);
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10d\n",
+                        "RX Mask", pBase->rxMask);
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10d\n",
+                        "Allow 5GHz",
+                        !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10d\n",
+                        "Allow 2GHz",
+                        !!(pBase->opCapFlags & AR5416_OPFLAGS_11G));
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10d\n",
+                        "Disable 2GHz HT20",
+                        !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20));
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10d\n",
+                        "Disable 2GHz HT40",
+                        !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40));
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10d\n",
+                        "Disable 5Ghz HT20",
+                        !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20));
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10d\n",
+                        "Disable 5Ghz HT40",
+                        !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40));
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10d\n",
+                        "Big Endian",
+                        !!(pBase->eepMisc & 0x01));
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10d\n",
+                        "Cal Bin Major Ver",
+                        (pBase->binBuildNumber >> 24) & 0xFF);
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10d\n",
+                        "Cal Bin Minor Ver",
+                        (pBase->binBuildNumber >> 16) & 0xFF);
+       len += scnprintf(buf + len, size - len,
+                        "%20s : %10d\n",
+                        "Cal Bin Build",
+                        (pBase->binBuildNumber >> 8) & 0xFF);
 
        /*
         * UB91 specific data.
@@ -579,10 +579,10 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
                struct base_eep_header_4k *pBase4k =
                        &priv->ah->eeprom.map4k.baseEepHeader;
 
-               len += snprintf(buf + len, size - len,
-                               "%20s : %10d\n",
-                               "TX Gain type",
-                               pBase4k->txGainType);
+               len += scnprintf(buf + len, size - len,
+                                "%20s : %10d\n",
+                                "TX Gain type",
+                                pBase4k->txGainType);
        }
 
        /*
@@ -592,19 +592,19 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
                struct base_eep_ar9287_header *pBase9287 =
                        &priv->ah->eeprom.map9287.baseEepHeader;
 
-               len += snprintf(buf + len, size - len,
-                               "%20s : %10ddB\n",
-                               "Power Table Offset",
-                               pBase9287->pwrTableOffset);
+               len += scnprintf(buf + len, size - len,
+                                "%20s : %10ddB\n",
+                                "Power Table Offset",
+                                pBase9287->pwrTableOffset);
 
-               len += snprintf(buf + len, size - len,
-                               "%20s : %10d\n",
-                               "OpenLoop Power Ctrl",
-                               pBase9287->openLoopPwrCntl);
+               len += scnprintf(buf + len, size - len,
+                                "%20s : %10d\n",
+                                "OpenLoop Power Ctrl",
+                                pBase9287->openLoopPwrCntl);
        }
 
-       len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
-                       pBase->macAddr);
+       len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
+                        pBase->macAddr);
        if (len > size)
                len = size;
 
@@ -627,8 +627,8 @@ static ssize_t read_4k_modal_eeprom(struct file *file,
 {
 #define PR_EEP(_s, _val)                                               \
        do {                                                            \
-               len += snprintf(buf + len, size - len, "%20s : %10d\n", \
-                               _s, (_val));                            \
+               len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
+                                _s, (_val));                           \
        } while (0)
 
        struct ath9k_htc_priv *priv = file->private_data;
@@ -708,12 +708,12 @@ static ssize_t read_def_modal_eeprom(struct file *file,
        do {                                                            \
                if (pBase->opCapFlags & AR5416_OPFLAGS_11G) {           \
                        pModal = &priv->ah->eeprom.def.modalHeader[1];  \
-                       len += snprintf(buf + len, size - len, "%20s : %8d%7s", \
-                                       _s, (_val), "|");               \
+                       len += scnprintf(buf + len, size - len, "%20s : %8d%7s", \
+                                        _s, (_val), "|");              \
                }                                                       \
                if (pBase->opCapFlags & AR5416_OPFLAGS_11A) {           \
                        pModal = &priv->ah->eeprom.def.modalHeader[0];  \
-                       len += snprintf(buf + len, size - len, "%9d\n", \
+                       len += scnprintf(buf + len, size - len, "%9d\n",\
                                        (_val));                        \
                }                                                       \
        } while (0)
@@ -729,10 +729,10 @@ static ssize_t read_def_modal_eeprom(struct file *file,
        if (buf == NULL)
                return -ENOMEM;
 
-       len += snprintf(buf + len, size - len,
-                       "%31s %15s\n", "2G", "5G");
-       len += snprintf(buf + len, size - len,
-                       "%32s %16s\n", "====", "====\n");
+       len += scnprintf(buf + len, size - len,
+                        "%31s %15s\n", "2G", "5G");
+       len += scnprintf(buf + len, size - len,
+                        "%32s %16s\n", "====", "====\n");
 
        PR_EEP("Chain0 Ant. Control", pModal->antCtrlChain[0]);
        PR_EEP("Chain1 Ant. Control", pModal->antCtrlChain[1]);
@@ -814,8 +814,8 @@ static ssize_t read_9287_modal_eeprom(struct file *file,
 {
 #define PR_EEP(_s, _val)                                               \
        do {                                                            \
-               len += snprintf(buf + len, size - len, "%20s : %10d\n", \
-                               _s, (_val));                            \
+               len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
+                                _s, (_val));                           \
        } while (0)
 
        struct ath9k_htc_priv *priv = file->private_data;
index d44258172c0f640236719a7e8eb4e8480c9c682b..9a2657fdd9ccd4ec62f96f8a639182e2ded29fec 100644 (file)
 static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
                                              struct ath9k_channel *ichan)
 {
-       enum htc_phymode mode;
-
-       mode = -EINVAL;
-
-       switch (ichan->chanmode) {
-       case CHANNEL_G:
-       case CHANNEL_G_HT20:
-       case CHANNEL_G_HT40PLUS:
-       case CHANNEL_G_HT40MINUS:
-               mode = HTC_MODE_11NG;
-               break;
-       case CHANNEL_A:
-       case CHANNEL_A_HT20:
-       case CHANNEL_A_HT40PLUS:
-       case CHANNEL_A_HT40MINUS:
-               mode = HTC_MODE_11NA;
-               break;
-       default:
-               break;
-       }
+       if (IS_CHAN_5GHZ(ichan))
+               return HTC_MODE_11NA;
 
-       WARN_ON(mode < 0);
-
-       return mode;
+       return HTC_MODE_11NG;
 }
 
 bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
@@ -926,7 +906,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
        WMI_CMD(WMI_FLUSH_RECV_CMDID);
 
        /* setup initial channel */
-       init_channel = ath9k_cmn_get_curchannel(hw, ah);
+       init_channel = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
 
        ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
        if (ret) {
@@ -1208,9 +1188,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
                ath_dbg(common, CONFIG, "Set channel: %d MHz\n",
                        curchan->center_freq);
 
-               ath9k_cmn_update_ichannel(&priv->ah->channels[pos],
-                                         &hw->conf.chandef);
-
+               ath9k_cmn_get_channel(hw, priv->ah, &hw->conf.chandef);
                if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
                        ath_err(common, "Unable to set channel\n");
                        ret = -EINVAL;
index 83f4927aeacae1d07a2b18057ea313ad716b0cec..4f9378ddf07f21455a33a908180b9a28729bf6cd 100644 (file)
@@ -78,6 +78,22 @@ static inline void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
        ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf);
 }
 
+static inline void ath9k_hw_tx99_start(struct ath_hw *ah, u32 qnum)
+{
+       ath9k_hw_ops(ah)->tx99_start(ah, qnum);
+}
+
+static inline void ath9k_hw_tx99_stop(struct ath_hw *ah)
+{
+       ath9k_hw_ops(ah)->tx99_stop(ah);
+}
+
+static inline void ath9k_hw_tx99_set_txpower(struct ath_hw *ah, u8 power)
+{
+       if (ath9k_hw_ops(ah)->tx99_set_txpower)
+               ath9k_hw_ops(ah)->tx99_set_txpower(ah, power);
+}
+
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
 
 static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
index ecc6ec4a1edb39b7f3e4d26da756652fc324971b..54b04155e43b1058575aa44df3e6ece1ab18e55e 100644 (file)
@@ -130,29 +130,29 @@ void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause)
 
 static void ath9k_hw_set_clockrate(struct ath_hw *ah)
 {
-       struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
        struct ath_common *common = ath9k_hw_common(ah);
+       struct ath9k_channel *chan = ah->curchan;
        unsigned int clockrate;
 
        /* AR9287 v1.3+ uses async FIFO and runs the MAC at 117 MHz */
        if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah))
                clockrate = 117;
-       else if (!ah->curchan) /* should really check for CCK instead */
+       else if (!chan) /* should really check for CCK instead */
                clockrate = ATH9K_CLOCK_RATE_CCK;
-       else if (conf->chandef.chan->band == IEEE80211_BAND_2GHZ)
+       else if (IS_CHAN_2GHZ(chan))
                clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM;
        else if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK)
                clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
        else
                clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM;
 
-       if (conf_is_ht40(conf))
+       if (IS_CHAN_HT40(chan))
                clockrate *= 2;
 
        if (ah->curchan) {
-               if (IS_CHAN_HALF_RATE(ah->curchan))
+               if (IS_CHAN_HALF_RATE(chan))
                        clockrate /= 2;
-               if (IS_CHAN_QUARTER_RATE(ah->curchan))
+               if (IS_CHAN_QUARTER_RATE(chan))
                        clockrate /= 4;
        }
 
@@ -190,10 +190,7 @@ EXPORT_SYMBOL(ath9k_hw_wait);
 void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
                          int hw_delay)
 {
-       if (IS_CHAN_B(chan))
-               hw_delay = (4 * hw_delay) / 22;
-       else
-               hw_delay /= 10;
+       hw_delay /= 10;
 
        if (IS_CHAN_HALF_RATE(chan))
                hw_delay *= 2;
@@ -294,8 +291,7 @@ void ath9k_hw_get_channel_centers(struct ath_hw *ah,
                return;
        }
 
-       if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
-           (chan->chanmode == CHANNEL_G_HT40PLUS)) {
+       if (IS_CHAN_HT40PLUS(chan)) {
                centers->synth_center =
                        chan->channel + HT40_CHANNEL_CENTER_SHIFT;
                extoff = 1;
@@ -549,6 +545,18 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
 
        ath9k_hw_ani_init(ah);
 
+       /*
+        * EEPROM needs to be initialized before we do this.
+        * This is required for regulatory compliance.
+        */
+       if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+               u16 regdmn = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
+               if ((regdmn & 0xF0) == CTL_FCC) {
+                       ah->nf_2g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_2GHZ;
+                       ah->nf_5g.max = AR_PHY_CCA_MAX_GOOD_VAL_9462_FCC_5GHZ;
+               }
+       }
+
        return 0;
 }
 
@@ -1030,7 +1038,6 @@ static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
 void ath9k_hw_init_global_settings(struct ath_hw *ah)
 {
        struct ath_common *common = ath9k_hw_common(ah);
-       struct ieee80211_conf *conf = &common->hw->conf;
        const struct ath9k_channel *chan = ah->curchan;
        int acktimeout, ctstimeout, ack_offset = 0;
        int slottime;
@@ -1105,8 +1112,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
         * BA frames in some implementations, but it has been found to fix ACK
         * timeout issues in other cases as well.
         */
-       if (conf->chandef.chan &&
-           conf->chandef.chan->band == IEEE80211_BAND_2GHZ &&
+       if (IS_CHAN_2GHZ(chan) &&
            !IS_CHAN_HALF_RATE(chan) && !IS_CHAN_QUARTER_RATE(chan)) {
                acktimeout += 64 - sifstime - ah->slottime;
                ctstimeout += 48 - sifstime - ah->slottime;
@@ -1148,9 +1154,7 @@ u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan)
 {
        u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band);
 
-       if (IS_CHAN_B(chan))
-               ctl |= CTL_11B;
-       else if (IS_CHAN_G(chan))
+       if (IS_CHAN_2GHZ(chan))
                ctl |= CTL_11G;
        else
                ctl |= CTL_11A;
@@ -1498,10 +1502,8 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
        int r;
 
        if (pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) {
-               u32 cur = ah->curchan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
-               u32 new = chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
-               band_switch = (cur != new);
-               mode_diff = (chan->chanmode != ah->curchan->chanmode);
+               band_switch = IS_CHAN_5GHZ(ah->curchan) != IS_CHAN_5GHZ(chan);
+               mode_diff = (chan->channelFlags != ah->curchan->channelFlags);
        }
 
        for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
@@ -1540,9 +1542,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
        ath9k_hw_set_clockrate(ah);
        ath9k_hw_apply_txpower(ah, chan, false);
 
-       if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
-               ath9k_hw_set_delta_slope(ah, chan);
-
+       ath9k_hw_set_delta_slope(ah, chan);
        ath9k_hw_spur_mitigate_freq(ah, chan);
 
        if (band_switch || ini_reloaded)
@@ -1644,6 +1644,19 @@ hang_check_iter:
        return true;
 }
 
+void ath9k_hw_check_nav(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+       u32 val;
+
+       val = REG_READ(ah, AR_NAV);
+       if (val != 0xdeadbeef && val > 0x7fff) {
+               ath_dbg(common, BSTUCK, "Abnormal NAV: 0x%x\n", val);
+               REG_WRITE(ah, AR_NAV, 0);
+       }
+}
+EXPORT_SYMBOL(ath9k_hw_check_nav);
+
 bool ath9k_hw_check_alive(struct ath_hw *ah)
 {
        int count = 50;
@@ -1799,20 +1812,11 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
                goto fail;
 
        /*
-        * If cross-band fcc is not supoprted, bail out if
-        * either channelFlags or chanmode differ.
-        *
-        * chanmode will be different if the HT operating mode
-        * changes because of CSA.
+        * If cross-band fcc is not supoprted, bail out if channelFlags differ.
         */
-       if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH)) {
-               if ((chan->channelFlags & CHANNEL_ALL) !=
-                   (ah->curchan->channelFlags & CHANNEL_ALL))
-                       goto fail;
-
-               if (chan->chanmode != ah->curchan->chanmode)
-                       goto fail;
-       }
+       if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) &&
+           chan->channelFlags != ah->curchan->channelFlags)
+               goto fail;
 
        if (!ath9k_hw_check_alive(ah))
                goto fail;
@@ -1822,9 +1826,9 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
         * re-using are present.
         */
        if (AR_SREV_9462(ah) && (ah->caldata &&
-                                (!ah->caldata->done_txiqcal_once ||
-                                 !ah->caldata->done_txclcal_once ||
-                                 !ah->caldata->rtt_done)))
+                                (!test_bit(TXIQCAL_DONE, &ah->caldata->cal_flags) ||
+                                 !test_bit(TXCLCAL_DONE, &ah->caldata->cal_flags) ||
+                                 !test_bit(RTT_DONE, &ah->caldata->cal_flags))))
                goto fail;
 
        ath_dbg(common, RESET, "FastChannelChange for %d -> %d\n",
@@ -1874,15 +1878,14 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 
        ah->caldata = caldata;
        if (caldata && (chan->channel != caldata->channel ||
-                       chan->channelFlags != caldata->channelFlags ||
-                       chan->chanmode != caldata->chanmode)) {
+                       chan->channelFlags != caldata->channelFlags)) {
                /* Operating channel changed, reset channel calibration data */
                memset(caldata, 0, sizeof(*caldata));
                ath9k_init_nfcal_hist_buffer(ah, chan);
        } else if (caldata) {
-               caldata->paprd_packet_sent = false;
+               clear_bit(PAPRD_PACKET_SENT, &caldata->cal_flags);
        }
-       ah->noise = ath9k_hw_getchan_noise(ah, chan);
+       ah->noise = ath9k_hw_getchan_noise(ah, chan, chan->noisefloor);
 
        if (fastcc) {
                r = ath9k_hw_do_fastcc(ah, chan);
@@ -1964,9 +1967,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
 
        ath9k_hw_init_mfp(ah);
 
-       if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
-               ath9k_hw_set_delta_slope(ah, chan);
-
+       ath9k_hw_set_delta_slope(ah, chan);
        ath9k_hw_spur_mitigate_freq(ah, chan);
        ah->eep_ops->set_board_values(ah, chan);
 
@@ -2017,8 +2018,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
        ath9k_hw_init_bb(ah, chan);
 
        if (caldata) {
-               caldata->done_txiqcal_once = false;
-               caldata->done_txclcal_once = false;
+               clear_bit(TXIQCAL_DONE, &caldata->cal_flags);
+               clear_bit(TXCLCAL_DONE, &caldata->cal_flags);
        }
        if (!ath9k_hw_init_cal(ah, chan))
                return -EIO;
@@ -2943,12 +2944,11 @@ void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set)
 }
 EXPORT_SYMBOL(ath9k_hw_set_tsfadjust);
 
-void ath9k_hw_set11nmac2040(struct ath_hw *ah)
+void ath9k_hw_set11nmac2040(struct ath_hw *ah, struct ath9k_channel *chan)
 {
-       struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
        u32 macmode;
 
-       if (conf_is_ht40(conf) && !ah->config.cwm_ignore_extcca)
+       if (IS_CHAN_HT40(chan) && !ah->config.cwm_ignore_extcca)
                macmode = AR_2040_JOINED_RX_CLEAR;
        else
                macmode = 0;
@@ -3240,19 +3240,19 @@ void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len)
 
        /* chipsets >= AR9280 are single-chip */
        if (AR_SREV_9280_20_OR_LATER(ah)) {
-               used = snprintf(hw_name, len,
-                              "Atheros AR%s Rev:%x",
-                              ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
-                              ah->hw_version.macRev);
+               used = scnprintf(hw_name, len,
+                                "Atheros AR%s Rev:%x",
+                                ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
+                                ah->hw_version.macRev);
        }
        else {
-               used = snprintf(hw_name, len,
-                              "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
-                              ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
-                              ah->hw_version.macRev,
-                              ath9k_hw_rf_name((ah->hw_version.analog5GhzRev &
-                                               AR_RADIO_SREV_MAJOR)),
-                              ah->hw_version.phyRev);
+               used = scnprintf(hw_name, len,
+                                "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
+                                ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
+                                ah->hw_version.macRev,
+                                ath9k_hw_rf_name((ah->hw_version.analog5GhzRev
+                                                 & AR_RADIO_SREV_MAJOR)),
+                                ah->hw_version.phyRev);
        }
 
        hw_name[used] = '\0';
index 69a907b55a73b129fae1171f2d9859a3303ec3e7..9ea24f1cba73f812de0b3352806e89434c0d59f9 100644 (file)
@@ -98,8 +98,8 @@
 
 #define PR_EEP(_s, _val)                                               \
        do {                                                            \
-               len += snprintf(buf + len, size - len, "%20s : %10d\n", \
-                               _s, (_val));                            \
+               len += scnprintf(buf + len, size - len, "%20s : %10d\n",\
+                                _s, (_val));                           \
        } while (0)
 
 #define SM(_v, _f)  (((_v) << _f##_S) & _f)
@@ -369,55 +369,30 @@ enum ath9k_int {
        ATH9K_INT_NOCARD = 0xffffffff
 };
 
-#define CHANNEL_CCK       0x00020
-#define CHANNEL_OFDM      0x00040
-#define CHANNEL_2GHZ      0x00080
-#define CHANNEL_5GHZ      0x00100
-#define CHANNEL_PASSIVE   0x00200
-#define CHANNEL_DYN       0x00400
-#define CHANNEL_HALF      0x04000
-#define CHANNEL_QUARTER   0x08000
-#define CHANNEL_HT20      0x10000
-#define CHANNEL_HT40PLUS  0x20000
-#define CHANNEL_HT40MINUS 0x40000
-
-#define CHANNEL_A           (CHANNEL_5GHZ|CHANNEL_OFDM)
-#define CHANNEL_B           (CHANNEL_2GHZ|CHANNEL_CCK)
-#define CHANNEL_G           (CHANNEL_2GHZ|CHANNEL_OFDM)
-#define CHANNEL_G_HT20      (CHANNEL_2GHZ|CHANNEL_HT20)
-#define CHANNEL_A_HT20      (CHANNEL_5GHZ|CHANNEL_HT20)
-#define CHANNEL_G_HT40PLUS  (CHANNEL_2GHZ|CHANNEL_HT40PLUS)
-#define CHANNEL_G_HT40MINUS (CHANNEL_2GHZ|CHANNEL_HT40MINUS)
-#define CHANNEL_A_HT40PLUS  (CHANNEL_5GHZ|CHANNEL_HT40PLUS)
-#define CHANNEL_A_HT40MINUS (CHANNEL_5GHZ|CHANNEL_HT40MINUS)
-#define CHANNEL_ALL                            \
-       (CHANNEL_OFDM|                          \
-        CHANNEL_CCK|                           \
-        CHANNEL_2GHZ |                         \
-        CHANNEL_5GHZ |                         \
-        CHANNEL_HT20 |                         \
-        CHANNEL_HT40PLUS |                     \
-        CHANNEL_HT40MINUS)
-
 #define MAX_RTT_TABLE_ENTRY     6
 #define MAX_IQCAL_MEASUREMENT  8
 #define MAX_CL_TAB_ENTRY       16
 #define CL_TAB_ENTRY(reg_base) (reg_base + (4 * j))
 
+enum ath9k_cal_flags {
+       RTT_DONE,
+       PAPRD_PACKET_SENT,
+       PAPRD_DONE,
+       NFCAL_PENDING,
+       NFCAL_INTF,
+       TXIQCAL_DONE,
+       TXCLCAL_DONE,
+       SW_PKDET_DONE,
+};
+
 struct ath9k_hw_cal_data {
        u16 channel;
-       u32 channelFlags;
-       u32 chanmode;
+       u16 channelFlags;
+       unsigned long cal_flags;
        int32_t CalValid;
        int8_t iCoff;
        int8_t qCoff;
-       bool rtt_done;
-       bool paprd_packet_sent;
-       bool paprd_done;
-       bool nfcal_pending;
-       bool nfcal_interference;
-       bool done_txiqcal_once;
-       bool done_txclcal_once;
+       u8 caldac[2];
        u16 small_signal_gain[AR9300_MAX_CHAINS];
        u32 pa_table[AR9300_MAX_CHAINS][PAPRD_TABLE_SZ];
        u32 num_measures[AR9300_MAX_CHAINS];
@@ -430,33 +405,34 @@ struct ath9k_hw_cal_data {
 struct ath9k_channel {
        struct ieee80211_channel *chan;
        u16 channel;
-       u32 channelFlags;
-       u32 chanmode;
+       u16 channelFlags;
        s16 noisefloor;
 };
 
-#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \
-       (((_c)->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20) || \
-       (((_c)->channelFlags & CHANNEL_G_HT40PLUS) == CHANNEL_G_HT40PLUS) || \
-       (((_c)->channelFlags & CHANNEL_G_HT40MINUS) == CHANNEL_G_HT40MINUS))
-#define IS_CHAN_OFDM(_c) (((_c)->channelFlags & CHANNEL_OFDM) != 0)
-#define IS_CHAN_5GHZ(_c) (((_c)->channelFlags & CHANNEL_5GHZ) != 0)
-#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
-#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0)
-#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
+#define CHANNEL_5GHZ           BIT(0)
+#define CHANNEL_HALF           BIT(1)
+#define CHANNEL_QUARTER                BIT(2)
+#define CHANNEL_HT             BIT(3)
+#define CHANNEL_HT40PLUS       BIT(4)
+#define CHANNEL_HT40MINUS      BIT(5)
+
+#define IS_CHAN_5GHZ(_c) (!!((_c)->channelFlags & CHANNEL_5GHZ))
+#define IS_CHAN_2GHZ(_c) (!IS_CHAN_5GHZ(_c))
+
+#define IS_CHAN_HALF_RATE(_c) (!!((_c)->channelFlags & CHANNEL_HALF))
+#define IS_CHAN_QUARTER_RATE(_c) (!!((_c)->channelFlags & CHANNEL_QUARTER))
 #define IS_CHAN_A_FAST_CLOCK(_ah, _c)                  \
-       ((((_c)->channelFlags & CHANNEL_5GHZ) != 0) &&  \
-        ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK))
-
-/* These macros check chanmode and not channelFlags */
-#define IS_CHAN_B(_c) ((_c)->chanmode == CHANNEL_B)
-#define IS_CHAN_HT20(_c) (((_c)->chanmode == CHANNEL_A_HT20) ||        \
-                         ((_c)->chanmode == CHANNEL_G_HT20))
-#define IS_CHAN_HT40(_c) (((_c)->chanmode == CHANNEL_A_HT40PLUS) ||    \
-                         ((_c)->chanmode == CHANNEL_A_HT40MINUS) ||    \
-                         ((_c)->chanmode == CHANNEL_G_HT40PLUS) ||     \
-                         ((_c)->chanmode == CHANNEL_G_HT40MINUS))
-#define IS_CHAN_HT(_c) (IS_CHAN_HT20((_c)) || IS_CHAN_HT40((_c)))
+       (IS_CHAN_5GHZ(_c) && ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK))
+
+#define IS_CHAN_HT(_c) ((_c)->channelFlags & CHANNEL_HT)
+
+#define IS_CHAN_HT20(_c) (IS_CHAN_HT(_c) && !IS_CHAN_HT40(_c))
+
+#define IS_CHAN_HT40(_c) \
+       (!!((_c)->channelFlags & (CHANNEL_HT40PLUS | CHANNEL_HT40MINUS)))
+
+#define IS_CHAN_HT40PLUS(_c) ((_c)->channelFlags & CHANNEL_HT40PLUS)
+#define IS_CHAN_HT40MINUS(_c) ((_c)->channelFlags & CHANNEL_HT40MINUS)
 
 enum ath9k_power_mode {
        ATH9K_PM_AWAKE = 0,
@@ -558,6 +534,7 @@ struct ath_hw_antcomb_conf {
        u8 main_gaintb;
        u8 alt_gaintb;
        int lna1_lna2_delta;
+       int lna1_lna2_switch_delta;
        u8 div_group;
 };
 
@@ -726,6 +703,10 @@ struct ath_hw_ops {
        void (*spectral_scan_trigger)(struct ath_hw *ah);
        void (*spectral_scan_wait)(struct ath_hw *ah);
 
+       void (*tx99_start)(struct ath_hw *ah, u32 qnum);
+       void (*tx99_stop)(struct ath_hw *ah);
+       void (*tx99_set_txpower)(struct ath_hw *ah, u8 power);
+
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
        void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
 #endif
@@ -1026,10 +1007,11 @@ void ath9k_hw_reset_tsf(struct ath_hw *ah);
 void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
 void ath9k_hw_init_global_settings(struct ath_hw *ah);
 u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
-void ath9k_hw_set11nmac2040(struct ath_hw *ah);
+void ath9k_hw_set11nmac2040(struct ath_hw *ah, struct ath9k_channel *chan);
 void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
 void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
                                    const struct ath9k_beacon_state *bs);
+void ath9k_hw_check_nav(struct ath_hw *ah);
 bool ath9k_hw_check_alive(struct ath_hw *ah);
 
 bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
index 9a1f349f926001662dee74a796db2c580d21dbb7..e89db64532f567442ae13106a77a09bb282f2445 100644 (file)
@@ -347,7 +347,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        u8 *ds;
-       struct ath_buf *bf;
        int i, bsize, desc_len;
 
        ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
@@ -399,33 +398,68 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
                ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
 
        /* allocate buffers */
-       bsize = sizeof(struct ath_buf) * nbuf;
-       bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
-       if (!bf)
-               return -ENOMEM;
+       if (is_tx) {
+               struct ath_buf *bf;
+
+               bsize = sizeof(struct ath_buf) * nbuf;
+               bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
+               if (!bf)
+                       return -ENOMEM;
+
+               for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
+                       bf->bf_desc = ds;
+                       bf->bf_daddr = DS2PHYS(dd, ds);
+
+                       if (!(sc->sc_ah->caps.hw_caps &
+                                 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+                               /*
+                                * Skip descriptor addresses which can cause 4KB
+                                * boundary crossing (addr + length) with a 32 dword
+                                * descriptor fetch.
+                                */
+                               while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+                                       BUG_ON((caddr_t) bf->bf_desc >=
+                                                  ((caddr_t) dd->dd_desc +
+                                               dd->dd_desc_len));
+
+                                       ds += (desc_len * ndesc);
+                                       bf->bf_desc = ds;
+                                       bf->bf_daddr = DS2PHYS(dd, ds);
+                               }
+                       }
+                       list_add_tail(&bf->list, head);
+               }
+       } else {
+               struct ath_rxbuf *bf;
 
-       for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
-               bf->bf_desc = ds;
-               bf->bf_daddr = DS2PHYS(dd, ds);
-
-               if (!(sc->sc_ah->caps.hw_caps &
-                     ATH9K_HW_CAP_4KB_SPLITTRANS)) {
-                       /*
-                        * Skip descriptor addresses which can cause 4KB
-                        * boundary crossing (addr + length) with a 32 dword
-                        * descriptor fetch.
-                        */
-                       while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
-                               BUG_ON((caddr_t) bf->bf_desc >=
-                                      ((caddr_t) dd->dd_desc +
-                                       dd->dd_desc_len));
-
-                               ds += (desc_len * ndesc);
-                               bf->bf_desc = ds;
-                               bf->bf_daddr = DS2PHYS(dd, ds);
+               bsize = sizeof(struct ath_rxbuf) * nbuf;
+               bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
+               if (!bf)
+                       return -ENOMEM;
+
+               for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
+                       bf->bf_desc = ds;
+                       bf->bf_daddr = DS2PHYS(dd, ds);
+
+                       if (!(sc->sc_ah->caps.hw_caps &
+                                 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+                               /*
+                                * Skip descriptor addresses which can cause 4KB
+                                * boundary crossing (addr + length) with a 32 dword
+                                * descriptor fetch.
+                                */
+                               while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+                                       BUG_ON((caddr_t) bf->bf_desc >=
+                                                  ((caddr_t) dd->dd_desc +
+                                               dd->dd_desc_len));
+
+                                       ds += (desc_len * ndesc);
+                                       bf->bf_desc = ds;
+                                       bf->bf_daddr = DS2PHYS(dd, ds);
+                               }
                        }
+                       list_add_tail(&bf->list, head);
                }
-               list_add_tail(&bf->list, head);
        }
        return 0;
 }
@@ -437,7 +471,6 @@ static int ath9k_init_queues(struct ath_softc *sc)
        sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
        sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
 
-       sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
        ath_cabq_update(sc);
 
        sc->tx.uapsdq = ath_txq_setup(sc, ATH9K_TX_QUEUE_UAPSD, 0);
@@ -547,6 +580,26 @@ static void ath9k_init_platform(struct ath_softc *sc)
        if (sc->driver_data & ATH9K_PCI_CUS217)
                ath_info(common, "CUS217 card detected\n");
 
+       if (sc->driver_data & ATH9K_PCI_CUS252)
+               ath_info(common, "CUS252 card detected\n");
+
+       if (sc->driver_data & ATH9K_PCI_AR9565_1ANT)
+               ath_info(common, "WB335 1-ANT card detected\n");
+
+       if (sc->driver_data & ATH9K_PCI_AR9565_2ANT)
+               ath_info(common, "WB335 2-ANT card detected\n");
+
+       /*
+        * Some WB335 cards do not support antenna diversity. Since
+        * we use a hardcoded value for AR9565 instead of using the
+        * EEPROM/OTP data, remove the combining feature from
+        * the HW capabilities bitmap.
+        */
+       if (sc->driver_data & (ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_AR9565_2ANT)) {
+               if (!(sc->driver_data & ATH9K_PCI_BT_ANT_DIV))
+                       pCap->hw_caps &= ~ATH9K_HW_CAP_ANT_DIV_COMB;
+       }
+
        if (sc->driver_data & ATH9K_PCI_BT_ANT_DIV) {
                pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV;
                ath_info(common, "Set BT/WLAN RX diversity capability\n");
@@ -627,7 +680,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
        sc->sc_ah = ah;
        pCap = &ah->caps;
 
-       sc->dfs_detector = dfs_pattern_detector_init(ah, NL80211_DFS_UNSET);
+       common = ath9k_hw_common(ah);
+       sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET);
+       sc->tx99_power = MAX_RATE_POWER + 1;
 
        if (!pdata) {
                ah->ah_flags |= AH_USE_EEPROM;
@@ -641,7 +696,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
                ah->external_reset = pdata->external_reset;
        }
 
-       common = ath9k_hw_common(ah);
        common->ops = &ah->reg_ops;
        common->bus_ops = bus_ops;
        common->ah = ah;
@@ -732,6 +786,7 @@ err_queues:
        ath9k_hw_deinit(ah);
 err_hw:
        ath9k_eeprom_release(sc);
+       dev_kfree_skb_any(sc->tx99_skb);
        return ret;
 }
 
@@ -748,7 +803,7 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
                chan = &sband->channels[i];
                ah->curchan = &ah->channels[chan->hw_value];
                cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
-               ath9k_cmn_update_ichannel(ah->curchan, &chandef);
+               ath9k_cmn_get_channel(sc->hw, ah, &chandef);
                ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
        }
 }
@@ -789,7 +844,6 @@ static const struct ieee80211_iface_limit if_limits[] = {
                                 BIT(NL80211_IFTYPE_P2P_GO) },
 };
 
-
 static const struct ieee80211_iface_limit if_dfs_limits[] = {
        { .max = 1,     .types = BIT(NL80211_IFTYPE_AP) },
 };
@@ -850,17 +904,18 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
 
        hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
 
-       hw->wiphy->interface_modes =
-               BIT(NL80211_IFTYPE_P2P_GO) |
-               BIT(NL80211_IFTYPE_P2P_CLIENT) |
-               BIT(NL80211_IFTYPE_AP) |
-               BIT(NL80211_IFTYPE_WDS) |
-               BIT(NL80211_IFTYPE_STATION) |
-               BIT(NL80211_IFTYPE_ADHOC) |
-               BIT(NL80211_IFTYPE_MESH_POINT);
-
-       hw->wiphy->iface_combinations = if_comb;
-       hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+       if (!config_enabled(CONFIG_ATH9K_TX99)) {
+               hw->wiphy->interface_modes =
+                       BIT(NL80211_IFTYPE_P2P_GO) |
+                       BIT(NL80211_IFTYPE_P2P_CLIENT) |
+                       BIT(NL80211_IFTYPE_AP) |
+                       BIT(NL80211_IFTYPE_WDS) |
+                       BIT(NL80211_IFTYPE_STATION) |
+                       BIT(NL80211_IFTYPE_ADHOC) |
+                       BIT(NL80211_IFTYPE_MESH_POINT);
+               hw->wiphy->iface_combinations = if_comb;
+               hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+       }
 
        hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
index 2f831db396ac2fd84c0ca1e3a6e46bc325b1013e..aed7e29dc50f152b954278020b8b2999f3e795e7 100644 (file)
@@ -28,6 +28,13 @@ void ath_tx_complete_poll_work(struct work_struct *work)
        int i;
        bool needreset = false;
 
+
+       if (sc->tx99_state) {
+               ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
+                       "skip tx hung detection on tx99\n");
+               return;
+       }
+
        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
                txq = sc->tx.txq_map[i];
 
@@ -70,7 +77,7 @@ void ath_hw_check(struct work_struct *work)
        ath9k_ps_wakeup(sc);
        is_alive = ath9k_hw_check_alive(sc->sc_ah);
 
-       if (is_alive && !AR_SREV_9300(sc->sc_ah))
+       if ((is_alive && !AR_SREV_9300(sc->sc_ah)) || sc->tx99_state)
                goto out;
        else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
                ath_dbg(common, RESET,
@@ -141,6 +148,9 @@ void ath_hw_pll_work(struct work_struct *work)
        if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
                return;
 
+       if (sc->tx99_state)
+               return;
+
        ath9k_ps_wakeup(sc);
        pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);
        ath9k_ps_restore(sc);
@@ -184,7 +194,7 @@ static void ath_paprd_activate(struct ath_softc *sc)
        struct ath9k_hw_cal_data *caldata = ah->caldata;
        int chain;
 
-       if (!caldata || !caldata->paprd_done) {
+       if (!caldata || !test_bit(PAPRD_DONE, &caldata->cal_flags)) {
                ath_dbg(common, CALIBRATE, "Failed to activate PAPRD\n");
                return;
        }
@@ -256,7 +266,9 @@ void ath_paprd_calibrate(struct work_struct *work)
        int len = 1800;
        int ret;
 
-       if (!caldata || !caldata->paprd_packet_sent || caldata->paprd_done) {
+       if (!caldata ||
+           !test_bit(PAPRD_PACKET_SENT, &caldata->cal_flags) ||
+           test_bit(PAPRD_DONE, &caldata->cal_flags)) {
                ath_dbg(common, CALIBRATE, "Skipping PAPRD calibration\n");
                return;
        }
@@ -316,7 +328,7 @@ void ath_paprd_calibrate(struct work_struct *work)
        kfree_skb(skb);
 
        if (chain_ok) {
-               caldata->paprd_done = true;
+               set_bit(PAPRD_DONE, &caldata->cal_flags);
                ath_paprd_activate(sc);
        }
 
@@ -343,7 +355,7 @@ void ath_ani_calibrate(unsigned long data)
        u32 cal_interval, short_cal_interval, long_cal_interval;
        unsigned long flags;
 
-       if (ah->caldata && ah->caldata->nfcal_interference)
+       if (ah->caldata && test_bit(NFCAL_INTF, &ah->caldata->cal_flags))
                long_cal_interval = ATH_LONG_CALINTERVAL_INT;
        else
                long_cal_interval = ATH_LONG_CALINTERVAL;
@@ -432,7 +444,7 @@ set_timer:
        mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
 
        if (ar9003_is_paprd_enabled(ah) && ah->caldata) {
-               if (!ah->caldata->paprd_done) {
+               if (!test_bit(PAPRD_DONE, &ah->caldata->cal_flags)) {
                        ieee80211_queue_work(sc->hw, &sc->paprd_work);
                } else if (!ah->paprd_table_write_done) {
                        ath9k_ps_wakeup(sc);
@@ -516,7 +528,8 @@ void ath_update_survey_nf(struct ath_softc *sc, int channel)
 
        if (chan->noisefloor) {
                survey->filled |= SURVEY_INFO_NOISE_DBM;
-               survey->noise = ath9k_hw_getchan_noise(ah, chan);
+               survey->noise = ath9k_hw_getchan_noise(ah, chan,
+                                                      chan->noisefloor);
        }
 }
 
index a3eff0986a3f95c1ea6b523fc71b02a1ac2b4eba..6a18f9d3e9cc952ef116f9fda1bf56f37526d8bc 100644 (file)
@@ -374,7 +374,6 @@ EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
 bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
 {
        struct ath_common *common = ath9k_hw_common(ah);
-       struct ath9k_channel *chan = ah->curchan;
        struct ath9k_tx_queue_info *qi;
        u32 cwMin, chanCwMin, value;
 
@@ -387,10 +386,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
        ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q);
 
        if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
-               if (chan && IS_CHAN_B(chan))
-                       chanCwMin = INIT_CWMIN_11B;
-               else
-                       chanCwMin = INIT_CWMIN;
+               chanCwMin = INIT_CWMIN;
 
                for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
        } else
index bfccaceed44ef82f34c7e9df6562975f4322bf5e..e3eed81f24391c61b1389a749d471cf4ad662056 100644 (file)
@@ -603,8 +603,6 @@ enum ath9k_tx_queue_flags {
 #define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001
 
 #define ATH9K_DECOMP_MASK_SIZE     128
-#define ATH9K_READY_TIME_LO_BOUND  50
-#define ATH9K_READY_TIME_HI_BOUND  96
 
 enum ath9k_pkt_type {
        ATH9K_PKT_TYPE_NORMAL = 0,
index e4f65900132dedf40a68e2299b2a6177e8dfaf89..74f452c7b1667c47a65506a077042f2b0668c3a8 100644 (file)
@@ -208,6 +208,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        unsigned long flags;
+       int i;
 
        if (ath_startrecv(sc) != 0) {
                ath_err(common, "Unable to restart recv logic\n");
@@ -235,6 +236,15 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
                }
        work:
                ath_restart_work(sc);
+
+               for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
+                       if (!ATH_TXQ_SETUP(sc, i))
+                               continue;
+
+                       spin_lock_bh(&sc->tx.txq[i].axq_lock);
+                       ath_txq_schedule(sc, &sc->tx.txq[i]);
+                       spin_unlock_bh(&sc->tx.txq[i].axq_lock);
+               }
        }
 
        ieee80211_wake_queues(sc->hw);
@@ -302,17 +312,91 @@ out:
  * by reseting the chip.  To accomplish this we must first cleanup any pending
  * DMA, then restart stuff.
 */
-static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
-                   struct ath9k_channel *hchan)
+static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chandef)
 {
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ieee80211_hw *hw = sc->hw;
+       struct ath9k_channel *hchan;
+       struct ieee80211_channel *chan = chandef->chan;
+       unsigned long flags;
+       bool offchannel;
+       int pos = chan->hw_value;
+       int old_pos = -1;
        int r;
 
        if (test_bit(SC_OP_INVALID, &sc->sc_flags))
                return -EIO;
 
+       offchannel = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
+
+       if (ah->curchan)
+               old_pos = ah->curchan - &ah->channels[0];
+
+       ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
+               chan->center_freq, chandef->width);
+
+       /* update survey stats for the old channel before switching */
+       spin_lock_irqsave(&common->cc_lock, flags);
+       ath_update_survey_stats(sc);
+       spin_unlock_irqrestore(&common->cc_lock, flags);
+
+       ath9k_cmn_get_channel(hw, ah, chandef);
+
+       /*
+        * If the operating channel changes, change the survey in-use flags
+        * along with it.
+        * Reset the survey data for the new channel, unless we're switching
+        * back to the operating channel from an off-channel operation.
+        */
+       if (!offchannel && sc->cur_survey != &sc->survey[pos]) {
+               if (sc->cur_survey)
+                       sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
+
+               sc->cur_survey = &sc->survey[pos];
+
+               memset(sc->cur_survey, 0, sizeof(struct survey_info));
+               sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
+       } else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
+               memset(&sc->survey[pos], 0, sizeof(struct survey_info));
+       }
+
+       hchan = &sc->sc_ah->channels[pos];
        r = ath_reset_internal(sc, hchan);
+       if (r)
+               return r;
 
-       return r;
+       /*
+        * The most recent snapshot of channel->noisefloor for the old
+        * channel is only available after the hardware reset. Copy it to
+        * the survey stats now.
+        */
+       if (old_pos >= 0)
+               ath_update_survey_nf(sc, old_pos);
+
+       /*
+        * Enable radar pulse detection if on a DFS channel. Spectral
+        * scanning and radar detection can not be used concurrently.
+        */
+       if (hw->conf.radar_enabled) {
+               u32 rxfilter;
+
+               /* set HW specific DFS configuration */
+               ath9k_hw_set_radar_params(ah);
+               rxfilter = ath9k_hw_getrxfilter(ah);
+               rxfilter |= ATH9K_RX_FILTER_PHYRADAR |
+                               ATH9K_RX_FILTER_PHYERR;
+               ath9k_hw_setrxfilter(ah, rxfilter);
+               ath_dbg(common, DFS, "DFS enabled at freq %d\n",
+                       chan->center_freq);
+       } else {
+               /* perform spectral scan if requested. */
+               if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
+                       sc->spectral_mode == SPECTRAL_CHANSCAN)
+                       ath9k_spectral_scan_trigger(hw);
+       }
+
+       return 0;
 }
 
 static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -362,6 +446,13 @@ void ath9k_tasklet(unsigned long data)
                        type = RESET_TYPE_BB_WATCHDOG;
 
                ath9k_queue_reset(sc, type);
+
+               /*
+                * Increment the ref. counter here so that
+                * interrupts are enabled in the reset routine.
+                */
+               atomic_inc(&ah->intr_ref_cnt);
+               ath_dbg(common, ANY, "FATAL: Skipping interrupts\n");
                goto out;
        }
 
@@ -400,10 +491,9 @@ void ath9k_tasklet(unsigned long data)
 
        ath9k_btcoex_handle_interrupt(sc, status);
 
-out:
        /* re-enable hardware interrupt */
        ath9k_hw_enable_interrupts(ah);
-
+out:
        spin_unlock(&sc->sc_pcu_lock);
        ath9k_ps_restore(sc);
 }
@@ -539,21 +629,10 @@ chip_reset:
 
 static int ath_reset(struct ath_softc *sc)
 {
-       int i, r;
+       int r;
 
        ath9k_ps_wakeup(sc);
-
        r = ath_reset_internal(sc, NULL);
-
-       for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
-               if (!ATH_TXQ_SETUP(sc, i))
-                       continue;
-
-               spin_lock_bh(&sc->tx.txq[i].axq_lock);
-               ath_txq_schedule(sc, &sc->tx.txq[i]);
-               spin_unlock_bh(&sc->tx.txq[i].axq_lock);
-       }
-
        ath9k_ps_restore(sc);
 
        return r;
@@ -595,7 +674,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
        ath9k_ps_wakeup(sc);
        mutex_lock(&sc->mutex);
 
-       init_channel = ath9k_cmn_get_curchannel(hw, ah);
+       init_channel = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
 
        /* Reset SERDES registers */
        ath9k_hw_configpcipowersave(ah, false);
@@ -798,7 +877,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
        }
 
        if (!ah->curchan)
-               ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
+               ah->curchan = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
 
        ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
        ath9k_hw_phy_disable(ah);
@@ -817,7 +896,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
        ath_dbg(common, CONFIG, "Driver halt\n");
 }
 
-bool ath9k_uses_beacons(int type)
+static bool ath9k_uses_beacons(int type)
 {
        switch (type) {
        case NL80211_IFTYPE_AP:
@@ -967,6 +1046,14 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
 
        mutex_lock(&sc->mutex);
 
+       if (config_enabled(CONFIG_ATH9K_TX99)) {
+               if (sc->nvifs >= 1) {
+                       mutex_unlock(&sc->mutex);
+                       return -EOPNOTSUPP;
+               }
+               sc->tx99_vif = vif;
+       }
+
        ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
        sc->nvifs++;
 
@@ -995,9 +1082,15 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
        struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 
-       ath_dbg(common, CONFIG, "Change Interface\n");
        mutex_lock(&sc->mutex);
 
+       if (config_enabled(CONFIG_ATH9K_TX99)) {
+               mutex_unlock(&sc->mutex);
+               return -EOPNOTSUPP;
+       }
+
+       ath_dbg(common, CONFIG, "Change Interface\n");
+
        if (ath9k_uses_beacons(vif->type))
                ath9k_beacon_remove_slot(sc, vif);
 
@@ -1027,6 +1120,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
        mutex_lock(&sc->mutex);
 
        sc->nvifs--;
+       sc->tx99_vif = NULL;
 
        if (ath9k_uses_beacons(vif->type))
                ath9k_beacon_remove_slot(sc, vif);
@@ -1048,6 +1142,9 @@ static void ath9k_enable_ps(struct ath_softc *sc)
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
 
+       if (config_enabled(CONFIG_ATH9K_TX99))
+               return;
+
        sc->ps_enabled = true;
        if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
                if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) {
@@ -1064,6 +1161,9 @@ static void ath9k_disable_ps(struct ath_softc *sc)
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
 
+       if (config_enabled(CONFIG_ATH9K_TX99))
+               return;
+
        sc->ps_enabled = false;
        ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
        if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
@@ -1087,6 +1187,9 @@ void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw)
        struct ath_common *common = ath9k_hw_common(ah);
        u32 rxfilter;
 
+       if (config_enabled(CONFIG_ATH9K_TX99))
+               return;
+
        if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
                ath_err(common, "spectrum analyzer not implemented on this hardware\n");
                return;
@@ -1202,81 +1305,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
        }
 
        if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
-               struct ieee80211_channel *curchan = hw->conf.chandef.chan;
-               int pos = curchan->hw_value;
-               int old_pos = -1;
-               unsigned long flags;
-
-               if (ah->curchan)
-                       old_pos = ah->curchan - &ah->channels[0];
-
-               ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
-                       curchan->center_freq, hw->conf.chandef.width);
-
-               /* update survey stats for the old channel before switching */
-               spin_lock_irqsave(&common->cc_lock, flags);
-               ath_update_survey_stats(sc);
-               spin_unlock_irqrestore(&common->cc_lock, flags);
-
-               ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
-                                         &conf->chandef);
-
-               /*
-                * If the operating channel changes, change the survey in-use flags
-                * along with it.
-                * Reset the survey data for the new channel, unless we're switching
-                * back to the operating channel from an off-channel operation.
-                */
-               if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) &&
-                   sc->cur_survey != &sc->survey[pos]) {
-
-                       if (sc->cur_survey)
-                               sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
-
-                       sc->cur_survey = &sc->survey[pos];
-
-                       memset(sc->cur_survey, 0, sizeof(struct survey_info));
-                       sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
-               } else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
-                       memset(&sc->survey[pos], 0, sizeof(struct survey_info));
-               }
-
-               if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
+               if (ath_set_channel(sc, &hw->conf.chandef) < 0) {
                        ath_err(common, "Unable to set channel\n");
                        mutex_unlock(&sc->mutex);
                        ath9k_ps_restore(sc);
                        return -EINVAL;
                }
-
-               /*
-                * The most recent snapshot of channel->noisefloor for the old
-                * channel is only available after the hardware reset. Copy it to
-                * the survey stats now.
-                */
-               if (old_pos >= 0)
-                       ath_update_survey_nf(sc, old_pos);
-
-               /*
-                * Enable radar pulse detection if on a DFS channel. Spectral
-                * scanning and radar detection can not be used concurrently.
-                */
-               if (hw->conf.radar_enabled) {
-                       u32 rxfilter;
-
-                       /* set HW specific DFS configuration */
-                       ath9k_hw_set_radar_params(ah);
-                       rxfilter = ath9k_hw_getrxfilter(ah);
-                       rxfilter |= ATH9K_RX_FILTER_PHYRADAR |
-                                   ATH9K_RX_FILTER_PHYERR;
-                       ath9k_hw_setrxfilter(ah, rxfilter);
-                       ath_dbg(common, DFS, "DFS enabled at freq %d\n",
-                               curchan->center_freq);
-               } else {
-                       /* perform spectral scan if requested. */
-                       if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
-                           sc->spectral_mode == SPECTRAL_CHANSCAN)
-                               ath9k_spectral_scan_trigger(hw);
-               }
        }
 
        if (changed & IEEE80211_CONF_CHANGE_POWER) {
@@ -1735,6 +1769,9 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
        unsigned long flags;
        int pos;
 
+       if (config_enabled(CONFIG_ATH9K_TX99))
+               return -EOPNOTSUPP;
+
        spin_lock_irqsave(&common->cc_lock, flags);
        if (idx == 0)
                ath_update_survey_stats(sc);
@@ -1767,6 +1804,9 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
        struct ath_softc *sc = hw->priv;
        struct ath_hw *ah = sc->sc_ah;
 
+       if (config_enabled(CONFIG_ATH9K_TX99))
+               return;
+
        mutex_lock(&sc->mutex);
        ah->coverage_class = coverage_class;
 
@@ -2333,6 +2373,134 @@ static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
        sc->csa_vif = vif;
 }
 
+static void ath9k_tx99_stop(struct ath_softc *sc)
+{
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       ath_drain_all_txq(sc);
+       ath_startrecv(sc);
+
+       ath9k_hw_set_interrupts(ah);
+       ath9k_hw_enable_interrupts(ah);
+
+       ieee80211_wake_queues(sc->hw);
+
+       kfree_skb(sc->tx99_skb);
+       sc->tx99_skb = NULL;
+       sc->tx99_state = false;
+
+       ath9k_hw_tx99_stop(sc->sc_ah);
+       ath_dbg(common, XMIT, "TX99 stopped\n");
+}
+
+static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
+{
+       static u8 PN9Data[] = {0xff, 0x87, 0xb8, 0x59, 0xb7, 0xa1, 0xcc, 0x24,
+                              0x57, 0x5e, 0x4b, 0x9c, 0x0e, 0xe9, 0xea, 0x50,
+                              0x2a, 0xbe, 0xb4, 0x1b, 0xb6, 0xb0, 0x5d, 0xf1,
+                              0xe6, 0x9a, 0xe3, 0x45, 0xfd, 0x2c, 0x53, 0x18,
+                              0x0c, 0xca, 0xc9, 0xfb, 0x49, 0x37, 0xe5, 0xa8,
+                              0x51, 0x3b, 0x2f, 0x61, 0xaa, 0x72, 0x18, 0x84,
+                              0x02, 0x23, 0x23, 0xab, 0x63, 0x89, 0x51, 0xb3,
+                              0xe7, 0x8b, 0x72, 0x90, 0x4c, 0xe8, 0xfb, 0xc0};
+       u32 len = 1200;
+       struct ieee80211_hw *hw = sc->hw;
+       struct ieee80211_hdr *hdr;
+       struct ieee80211_tx_info *tx_info;
+       struct sk_buff *skb;
+
+       skb = alloc_skb(len, GFP_KERNEL);
+       if (!skb)
+               return NULL;
+
+       skb_put(skb, len);
+
+       memset(skb->data, 0, len);
+
+       hdr = (struct ieee80211_hdr *)skb->data;
+       hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA);
+       hdr->duration_id = 0;
+
+       memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
+       memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
+       memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
+
+       hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
+
+       tx_info = IEEE80211_SKB_CB(skb);
+       memset(tx_info, 0, sizeof(*tx_info));
+       tx_info->band = hw->conf.chandef.chan->band;
+       tx_info->flags = IEEE80211_TX_CTL_NO_ACK;
+       tx_info->control.vif = sc->tx99_vif;
+
+       memcpy(skb->data + sizeof(*hdr), PN9Data, sizeof(PN9Data));
+
+       return skb;
+}
+
+void ath9k_tx99_deinit(struct ath_softc *sc)
+{
+       ath_reset(sc);
+
+       ath9k_ps_wakeup(sc);
+       ath9k_tx99_stop(sc);
+       ath9k_ps_restore(sc);
+}
+
+int ath9k_tx99_init(struct ath_softc *sc)
+{
+       struct ieee80211_hw *hw = sc->hw;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ath_tx_control txctl;
+       int r;
+
+       if (sc->sc_flags & SC_OP_INVALID) {
+               ath_err(common,
+                       "driver is in invalid state unable to use TX99");
+               return -EINVAL;
+       }
+
+       sc->tx99_skb = ath9k_build_tx99_skb(sc);
+       if (!sc->tx99_skb)
+               return -ENOMEM;
+
+       memset(&txctl, 0, sizeof(txctl));
+       txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
+
+       ath_reset(sc);
+
+       ath9k_ps_wakeup(sc);
+
+       ath9k_hw_disable_interrupts(ah);
+       atomic_set(&ah->intr_ref_cnt, -1);
+       ath_drain_all_txq(sc);
+       ath_stoprecv(sc);
+
+       sc->tx99_state = true;
+
+       ieee80211_stop_queues(hw);
+
+       if (sc->tx99_power == MAX_RATE_POWER + 1)
+               sc->tx99_power = MAX_RATE_POWER;
+
+       ath9k_hw_tx99_set_txpower(ah, sc->tx99_power);
+       r = ath9k_tx99_send(sc, sc->tx99_skb, &txctl);
+       if (r) {
+               ath_dbg(common, XMIT, "Failed to xmit TX99 skb\n");
+               return r;
+       }
+
+       ath_dbg(common, XMIT, "TX99 xmit started using %d ( %ddBm)\n",
+               sc->tx99_power,
+               sc->tx99_power / 2);
+
+       /* We leave the harware awake as it will be chugging on */
+
+       return 0;
+}
+
 struct ieee80211_ops ath9k_ops = {
        .tx                 = ath9k_tx,
        .start              = ath9k_start,
index 815bee21c19a0fe1aeb2194fbdba2372be2b279b..0ac1b5f04256517050be277696e9a9eae4f2391c 100644 (file)
@@ -661,9 +661,9 @@ void ath9k_mci_update_wlan_channels(struct ath_softc *sc, bool allow_all)
        chan_start = wlan_chan - 10;
        chan_end = wlan_chan + 10;
 
-       if (chan->chanmode == CHANNEL_G_HT40PLUS)
+       if (IS_CHAN_HT40PLUS(chan))
                chan_end += 20;
-       else if (chan->chanmode == CHANNEL_G_HT40MINUS)
+       else if (IS_CHAN_HT40MINUS(chan))
                chan_start -= 20;
 
        /* adjust side band */
@@ -707,11 +707,11 @@ void ath9k_mci_set_txpower(struct ath_softc *sc, bool setchannel,
 
        if (setchannel) {
                struct ath9k_hw_cal_data *caldata = &sc->caldata;
-               if ((caldata->chanmode == CHANNEL_G_HT40PLUS) &&
+               if (IS_CHAN_HT40PLUS(ah->curchan) &&
                    (ah->curchan->channel > caldata->channel) &&
                    (ah->curchan->channel <= caldata->channel + 20))
                        return;
-               if ((caldata->chanmode == CHANNEL_G_HT40MINUS) &&
+               if (IS_CHAN_HT40MINUS(ah->curchan) &&
                    (ah->curchan->channel < caldata->channel) &&
                    (ah->curchan->channel >= caldata->channel - 20))
                        return;
index d089a7cf01c43810e88353d6504a90c238abf721..7e4c2524b63052006650ff76b92b3b839d6c9cd6 100644 (file)
@@ -269,7 +269,200 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
 
        { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E  AR9462 */
        { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E  AR1111/AR9485 */
-       { PCI_VDEVICE(ATHEROS, 0x0036) }, /* PCI-E  AR9565 */
+
+       /* CUS252 */
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        PCI_VENDOR_ID_ATHEROS,
+                        0x3028),
+         .driver_data = ATH9K_PCI_CUS252 |
+                        ATH9K_PCI_AR9565_2ANT |
+                        ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        PCI_VENDOR_ID_AZWAVE,
+                        0x2176),
+         .driver_data = ATH9K_PCI_CUS252 |
+                        ATH9K_PCI_AR9565_2ANT |
+                        ATH9K_PCI_BT_ANT_DIV },
+
+       /* WB335 1-ANT */
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        PCI_VENDOR_ID_FOXCONN,
+                        0xE068),
+         .driver_data = ATH9K_PCI_AR9565_1ANT },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        0x185F, /* WNC */
+                        0xA119),
+         .driver_data = ATH9K_PCI_AR9565_1ANT },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        0x11AD, /* LITEON */
+                        0x0632),
+         .driver_data = ATH9K_PCI_AR9565_1ANT },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        0x11AD, /* LITEON */
+                        0x6671),
+         .driver_data = ATH9K_PCI_AR9565_1ANT },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        0x1B9A, /* XAVI */
+                        0x2811),
+         .driver_data = ATH9K_PCI_AR9565_1ANT },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        0x1B9A, /* XAVI */
+                        0x2812),
+         .driver_data = ATH9K_PCI_AR9565_1ANT },
+
+       /* WB335 1-ANT / Antenna Diversity */
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        PCI_VENDOR_ID_ATHEROS,
+                        0x3025),
+         .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        PCI_VENDOR_ID_ATHEROS,
+                        0x3026),
+         .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        PCI_VENDOR_ID_ATHEROS,
+                        0x302B),
+         .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        PCI_VENDOR_ID_FOXCONN,
+                        0xE069),
+         .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        0x185F, /* WNC */
+                        0x3028),
+         .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        0x11AD, /* LITEON */
+                        0x0622),
+         .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        0x11AD, /* LITEON */
+                        0x0672),
+         .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        0x11AD, /* LITEON */
+                        0x0662),
+         .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        PCI_VENDOR_ID_AZWAVE,
+                        0x213A),
+         .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        PCI_VENDOR_ID_LENOVO,
+                        0x3026),
+         .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        PCI_VENDOR_ID_HP,
+                        0x18E3),
+         .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        PCI_VENDOR_ID_HP,
+                        0x217F),
+         .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        PCI_VENDOR_ID_DELL,
+                        0x020E),
+         .driver_data = ATH9K_PCI_AR9565_1ANT | ATH9K_PCI_BT_ANT_DIV },
+
+       /* WB335 2-ANT */
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        PCI_VENDOR_ID_SAMSUNG,
+                        0x411A),
+         .driver_data = ATH9K_PCI_AR9565_2ANT },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        PCI_VENDOR_ID_SAMSUNG,
+                        0x411B),
+         .driver_data = ATH9K_PCI_AR9565_2ANT },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        PCI_VENDOR_ID_SAMSUNG,
+                        0x411C),
+         .driver_data = ATH9K_PCI_AR9565_2ANT },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        PCI_VENDOR_ID_SAMSUNG,
+                        0x411D),
+         .driver_data = ATH9K_PCI_AR9565_2ANT },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        PCI_VENDOR_ID_SAMSUNG,
+                        0x411E),
+         .driver_data = ATH9K_PCI_AR9565_2ANT },
+
+       /* WB335 2-ANT / Antenna-Diversity */
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        PCI_VENDOR_ID_ATHEROS,
+                        0x3027),
+         .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        PCI_VENDOR_ID_ATHEROS,
+                        0x302C),
+         .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        0x11AD, /* LITEON */
+                        0x0642),
+         .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        0x11AD, /* LITEON */
+                        0x0652),
+         .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        0x11AD, /* LITEON */
+                        0x0612),
+         .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        PCI_VENDOR_ID_AZWAVE,
+                        0x2130),
+         .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        0x144F, /* ASKEY */
+                        0x7202),
+         .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        0x1B9A, /* XAVI */
+                        0x2810),
+         .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+                        0x0036,
+                        0x185F, /* WNC */
+                        0x3027),
+         .driver_data = ATH9K_PCI_AR9565_2ANT | ATH9K_PCI_BT_ANT_DIV },
+
+       /* PCI-E AR9565 (WB335) */
+       { PCI_VDEVICE(ATHEROS, 0x0036),
+         .driver_data = ATH9K_PCI_BT_ANT_DIV },
+
        { 0 }
 };
 
index d3d7c51fa6c8bd65d7df5f803c99450aff980d95..d829bb62a3fc6d9e94ae867b2a675530ea4d1d7a 100644 (file)
@@ -1387,31 +1387,31 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
                int used_mcs = 0, used_htmode = 0;
 
                if (WLAN_RC_PHY_HT(rc->rate_table->info[i].phy)) {
-                       used_mcs = snprintf(mcs, 5, "%d",
-                               rc->rate_table->info[i].ratecode);
+                       used_mcs = scnprintf(mcs, 5, "%d",
+                                            rc->rate_table->info[i].ratecode);
 
                        if (WLAN_RC_PHY_40(rc->rate_table->info[i].phy))
-                               used_htmode = snprintf(htmode, 5, "HT40");
+                               used_htmode = scnprintf(htmode, 5, "HT40");
                        else if (WLAN_RC_PHY_20(rc->rate_table->info[i].phy))
-                               used_htmode = snprintf(htmode, 5, "HT20");
+                               used_htmode = scnprintf(htmode, 5, "HT20");
                        else
-                               used_htmode = snprintf(htmode, 5, "????");
+                               used_htmode = scnprintf(htmode, 5, "????");
                }
 
                mcs[used_mcs] = '\0';
                htmode[used_htmode] = '\0';
 
-               len += snprintf(buf + len, max - len,
-                       "%6s %6s %3u.%d: "
-                       "%10u %10u %10u %10u\n",
-                       htmode,
-                       mcs,
-                       ratekbps / 1000,
-                       (ratekbps % 1000) / 100,
-                       stats->success,
-                       stats->retries,
-                       stats->xretries,
-                       stats->per);
+               len += scnprintf(buf + len, max - len,
+                                "%6s %6s %3u.%d: "
+                                "%10u %10u %10u %10u\n",
+                                htmode,
+                                mcs,
+                                ratekbps / 1000,
+                                (ratekbps % 1000) / 100,
+                                stats->success,
+                                stats->retries,
+                                stats->xretries,
+                                stats->per);
        }
 
        if (len > max)
index ab9e3a8410bc2065fff9cb58996645fbf67be4fc..95ddca5495d492cb5a1bf4bb99d8ee3283a9e1ec 100644 (file)
@@ -19,7 +19,7 @@
 #include "ath9k.h"
 #include "ar9003_mac.h"
 
-#define SKB_CB_ATHBUF(__skb)   (*((struct ath_buf **)__skb->cb))
+#define SKB_CB_ATHBUF(__skb)   (*((struct ath_rxbuf **)__skb->cb))
 
 static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
 {
@@ -35,7 +35,7 @@ static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
  * buffer (or rx fifo). This can incorrectly acknowledge packets
  * to a sender if last desc is self-linked.
  */
-static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
+static void ath_rx_buf_link(struct ath_softc *sc, struct ath_rxbuf *bf)
 {
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
@@ -68,7 +68,7 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
        sc->rx.rxlink = &ds->ds_link;
 }
 
-static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
+static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_rxbuf *bf)
 {
        if (sc->rx.buf_hold)
                ath_rx_buf_link(sc, sc->rx.buf_hold);
@@ -112,13 +112,13 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc,
        struct ath_hw *ah = sc->sc_ah;
        struct ath_rx_edma *rx_edma;
        struct sk_buff *skb;
-       struct ath_buf *bf;
+       struct ath_rxbuf *bf;
 
        rx_edma = &sc->rx.rx_edma[qtype];
        if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
                return false;
 
-       bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+       bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
        list_del_init(&bf->list);
 
        skb = bf->bf_mpdu;
@@ -138,7 +138,7 @@ static void ath_rx_addbuffer_edma(struct ath_softc *sc,
                                  enum ath9k_rx_qtype qtype)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ath_buf *bf, *tbf;
+       struct ath_rxbuf *bf, *tbf;
 
        if (list_empty(&sc->rx.rxbuf)) {
                ath_dbg(common, QUEUE, "No free rx buf available\n");
@@ -154,7 +154,7 @@ static void ath_rx_addbuffer_edma(struct ath_softc *sc,
 static void ath_rx_remove_buffer(struct ath_softc *sc,
                                 enum ath9k_rx_qtype qtype)
 {
-       struct ath_buf *bf;
+       struct ath_rxbuf *bf;
        struct ath_rx_edma *rx_edma;
        struct sk_buff *skb;
 
@@ -171,7 +171,7 @@ static void ath_rx_edma_cleanup(struct ath_softc *sc)
 {
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
-       struct ath_buf *bf;
+       struct ath_rxbuf *bf;
 
        ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
        ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
@@ -199,7 +199,7 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct ath_hw *ah = sc->sc_ah;
        struct sk_buff *skb;
-       struct ath_buf *bf;
+       struct ath_rxbuf *bf;
        int error = 0, i;
        u32 size;
 
@@ -211,7 +211,7 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
        ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
                               ah->caps.rx_hp_qdepth);
 
-       size = sizeof(struct ath_buf) * nbufs;
+       size = sizeof(struct ath_rxbuf) * nbufs;
        bf = devm_kzalloc(sc->dev, size, GFP_KERNEL);
        if (!bf)
                return -ENOMEM;
@@ -271,7 +271,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        struct sk_buff *skb;
-       struct ath_buf *bf;
+       struct ath_rxbuf *bf;
        int error = 0;
 
        spin_lock_init(&sc->sc_pcu_lock);
@@ -332,7 +332,7 @@ void ath_rx_cleanup(struct ath_softc *sc)
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct sk_buff *skb;
-       struct ath_buf *bf;
+       struct ath_rxbuf *bf;
 
        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
                ath_rx_edma_cleanup(sc);
@@ -375,6 +375,9 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
 {
        u32 rfilt;
 
+       if (config_enabled(CONFIG_ATH9K_TX99))
+               return 0;
+
        rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
                | ATH9K_RX_FILTER_MCAST;
 
@@ -427,7 +430,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
 int ath_startrecv(struct ath_softc *sc)
 {
        struct ath_hw *ah = sc->sc_ah;
-       struct ath_buf *bf, *tbf;
+       struct ath_rxbuf *bf, *tbf;
 
        if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
                ath_edma_start_recv(sc);
@@ -447,7 +450,7 @@ int ath_startrecv(struct ath_softc *sc)
        if (list_empty(&sc->rx.rxbuf))
                goto start_recv;
 
-       bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+       bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
        ath9k_hw_putrxbuf(ah, bf->bf_daddr);
        ath9k_hw_rxena(ah);
 
@@ -603,13 +606,13 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
 static bool ath_edma_get_buffers(struct ath_softc *sc,
                                 enum ath9k_rx_qtype qtype,
                                 struct ath_rx_status *rs,
-                                struct ath_buf **dest)
+                                struct ath_rxbuf **dest)
 {
        struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct sk_buff *skb;
-       struct ath_buf *bf;
+       struct ath_rxbuf *bf;
        int ret;
 
        skb = skb_peek(&rx_edma->rx_fifo);
@@ -653,11 +656,11 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
        return true;
 }
 
-static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
+static struct ath_rxbuf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
                                                struct ath_rx_status *rs,
                                                enum ath9k_rx_qtype qtype)
 {
-       struct ath_buf *bf = NULL;
+       struct ath_rxbuf *bf = NULL;
 
        while (ath_edma_get_buffers(sc, qtype, rs, &bf)) {
                if (!bf)
@@ -668,13 +671,13 @@ static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
        return NULL;
 }
 
-static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
+static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
                                           struct ath_rx_status *rs)
 {
        struct ath_hw *ah = sc->sc_ah;
        struct ath_common *common = ath9k_hw_common(ah);
        struct ath_desc *ds;
-       struct ath_buf *bf;
+       struct ath_rxbuf *bf;
        int ret;
 
        if (list_empty(&sc->rx.rxbuf)) {
@@ -682,7 +685,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
                return NULL;
        }
 
-       bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+       bf = list_first_entry(&sc->rx.rxbuf, struct ath_rxbuf, list);
        if (bf == sc->rx.buf_hold)
                return NULL;
 
@@ -702,7 +705,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
        ret = ath9k_hw_rxprocdesc(ah, ds, rs);
        if (ret == -EINPROGRESS) {
                struct ath_rx_status trs;
-               struct ath_buf *tbf;
+               struct ath_rxbuf *tbf;
                struct ath_desc *tds;
 
                memset(&trs, 0, sizeof(trs));
@@ -711,7 +714,7 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
                        return NULL;
                }
 
-               tbf = list_entry(bf->list.next, struct ath_buf, list);
+               tbf = list_entry(bf->list.next, struct ath_rxbuf, list);
 
                /*
                 * On some hardware the descriptor status words could
@@ -972,14 +975,15 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
 {
 #ifdef CONFIG_ATH9K_DEBUGFS
        struct ath_hw *ah = sc->sc_ah;
-       u8 bins[SPECTRAL_HT20_NUM_BINS];
-       u8 *vdata = (u8 *)hdr;
-       struct fft_sample_ht20 fft_sample;
+       u8 num_bins, *bins, *vdata = (u8 *)hdr;
+       struct fft_sample_ht20 fft_sample_20;
+       struct fft_sample_ht20_40 fft_sample_40;
+       struct fft_sample_tlv *tlv;
        struct ath_radar_info *radar_info;
-       struct ath_ht20_mag_info *mag_info;
        int len = rs->rs_datalen;
        int dc_pos;
-       u16 length, max_magnitude;
+       u16 fft_len, length, freq = ah->curchan->chan->center_freq;
+       enum nl80211_channel_type chan_type;
 
        /* AR9280 and before report via ATH9K_PHYERR_RADAR, AR93xx and newer
         * via ATH9K_PHYERR_SPECTRAL. Haven't seen ATH9K_PHYERR_FALSE_RADAR_EXT
@@ -997,45 +1001,44 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
        if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
                return 0;
 
-       /* Variation in the data length is possible and will be fixed later.
-        * Note that we only support HT20 for now.
-        *
-        * TODO: add HT20_40 support as well.
-        */
-       if ((len > SPECTRAL_HT20_TOTAL_DATA_LEN + 2) ||
-           (len < SPECTRAL_HT20_TOTAL_DATA_LEN - 1))
-               return 1;
-
-       fft_sample.tlv.type = ATH_FFT_SAMPLE_HT20;
-       length = sizeof(fft_sample) - sizeof(fft_sample.tlv);
-       fft_sample.tlv.length = __cpu_to_be16(length);
+       chan_type = cfg80211_get_chandef_type(&sc->hw->conf.chandef);
+       if ((chan_type == NL80211_CHAN_HT40MINUS) ||
+           (chan_type == NL80211_CHAN_HT40PLUS)) {
+               fft_len = SPECTRAL_HT20_40_TOTAL_DATA_LEN;
+               num_bins = SPECTRAL_HT20_40_NUM_BINS;
+               bins = (u8 *)fft_sample_40.data;
+       } else {
+               fft_len = SPECTRAL_HT20_TOTAL_DATA_LEN;
+               num_bins = SPECTRAL_HT20_NUM_BINS;
+               bins = (u8 *)fft_sample_20.data;
+       }
 
-       fft_sample.freq = __cpu_to_be16(ah->curchan->chan->center_freq);
-       fft_sample.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
-       fft_sample.noise = ah->noise;
+       /* Variation in the data length is possible and will be fixed later */
+       if ((len > fft_len + 2) || (len < fft_len - 1))
+               return 1;
 
-       switch (len - SPECTRAL_HT20_TOTAL_DATA_LEN) {
+       switch (len - fft_len) {
        case 0:
                /* length correct, nothing to do. */
-               memcpy(bins, vdata, SPECTRAL_HT20_NUM_BINS);
+               memcpy(bins, vdata, num_bins);
                break;
        case -1:
                /* first byte missing, duplicate it. */
-               memcpy(&bins[1], vdata, SPECTRAL_HT20_NUM_BINS - 1);
+               memcpy(&bins[1], vdata, num_bins - 1);
                bins[0] = vdata[0];
                break;
        case 2:
                /* MAC added 2 extra bytes at bin 30 and 32, remove them. */
                memcpy(bins, vdata, 30);
                bins[30] = vdata[31];
-               memcpy(&bins[31], &vdata[33], SPECTRAL_HT20_NUM_BINS - 31);
+               memcpy(&bins[31], &vdata[33], num_bins - 31);
                break;
        case 1:
                /* MAC added 2 extra bytes AND first byte is missing. */
                bins[0] = vdata[0];
-               memcpy(&bins[0], vdata, 30);
+               memcpy(&bins[1], vdata, 30);
                bins[31] = vdata[31];
-               memcpy(&bins[32], &vdata[33], SPECTRAL_HT20_NUM_BINS - 32);
+               memcpy(&bins[32], &vdata[33], num_bins - 32);
                break;
        default:
                return 1;
@@ -1044,23 +1047,93 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
        /* DC value (value in the middle) is the blind spot of the spectral
         * sample and invalid, interpolate it.
         */
-       dc_pos = SPECTRAL_HT20_NUM_BINS / 2;
+       dc_pos = num_bins / 2;
        bins[dc_pos] = (bins[dc_pos + 1] + bins[dc_pos - 1]) / 2;
 
-       /* mag data is at the end of the frame, in front of radar_info */
-       mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
+       if ((chan_type == NL80211_CHAN_HT40MINUS) ||
+           (chan_type == NL80211_CHAN_HT40PLUS)) {
+               s8 lower_rssi, upper_rssi;
+               s16 ext_nf;
+               u8 lower_max_index, upper_max_index;
+               u8 lower_bitmap_w, upper_bitmap_w;
+               u16 lower_mag, upper_mag;
+               struct ath9k_hw_cal_data *caldata = ah->caldata;
+               struct ath_ht20_40_mag_info *mag_info;
+
+               if (caldata)
+                       ext_nf = ath9k_hw_getchan_noise(ah, ah->curchan,
+                                       caldata->nfCalHist[3].privNF);
+               else
+                       ext_nf = ATH_DEFAULT_NOISE_FLOOR;
+
+               length = sizeof(fft_sample_40) - sizeof(struct fft_sample_tlv);
+               fft_sample_40.tlv.type = ATH_FFT_SAMPLE_HT20_40;
+               fft_sample_40.tlv.length = __cpu_to_be16(length);
+               fft_sample_40.freq = __cpu_to_be16(freq);
+               fft_sample_40.channel_type = chan_type;
+
+               if (chan_type == NL80211_CHAN_HT40PLUS) {
+                       lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
+                       upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0);
 
-       /* copy raw bins without scaling them */
-       memcpy(fft_sample.data, bins, SPECTRAL_HT20_NUM_BINS);
-       fft_sample.max_exp = mag_info->max_exp & 0xf;
+                       fft_sample_40.lower_noise = ah->noise;
+                       fft_sample_40.upper_noise = ext_nf;
+               } else {
+                       lower_rssi = fix_rssi_inv_only(rs->rs_rssi_ext0);
+                       upper_rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
 
-       max_magnitude = spectral_max_magnitude(mag_info->all_bins);
-       fft_sample.max_magnitude = __cpu_to_be16(max_magnitude);
-       fft_sample.max_index = spectral_max_index(mag_info->all_bins);
-       fft_sample.bitmap_weight = spectral_bitmap_weight(mag_info->all_bins);
-       fft_sample.tsf = __cpu_to_be64(tsf);
+                       fft_sample_40.lower_noise = ext_nf;
+                       fft_sample_40.upper_noise = ah->noise;
+               }
+               fft_sample_40.lower_rssi = lower_rssi;
+               fft_sample_40.upper_rssi = upper_rssi;
+
+               mag_info = ((struct ath_ht20_40_mag_info *)radar_info) - 1;
+               lower_mag = spectral_max_magnitude(mag_info->lower_bins);
+               upper_mag = spectral_max_magnitude(mag_info->upper_bins);
+               fft_sample_40.lower_max_magnitude = __cpu_to_be16(lower_mag);
+               fft_sample_40.upper_max_magnitude = __cpu_to_be16(upper_mag);
+               lower_max_index = spectral_max_index(mag_info->lower_bins);
+               upper_max_index = spectral_max_index(mag_info->upper_bins);
+               fft_sample_40.lower_max_index = lower_max_index;
+               fft_sample_40.upper_max_index = upper_max_index;
+               lower_bitmap_w = spectral_bitmap_weight(mag_info->lower_bins);
+               upper_bitmap_w = spectral_bitmap_weight(mag_info->upper_bins);
+               fft_sample_40.lower_bitmap_weight = lower_bitmap_w;
+               fft_sample_40.upper_bitmap_weight = upper_bitmap_w;
+               fft_sample_40.max_exp = mag_info->max_exp & 0xf;
+
+               fft_sample_40.tsf = __cpu_to_be64(tsf);
+
+               tlv = (struct fft_sample_tlv *)&fft_sample_40;
+       } else {
+               u8 max_index, bitmap_w;
+               u16 magnitude;
+               struct ath_ht20_mag_info *mag_info;
+
+               length = sizeof(fft_sample_20) - sizeof(struct fft_sample_tlv);
+               fft_sample_20.tlv.type = ATH_FFT_SAMPLE_HT20;
+               fft_sample_20.tlv.length = __cpu_to_be16(length);
+               fft_sample_20.freq = __cpu_to_be16(freq);
+
+               fft_sample_20.rssi = fix_rssi_inv_only(rs->rs_rssi_ctl0);
+               fft_sample_20.noise = ah->noise;
+
+               mag_info = ((struct ath_ht20_mag_info *)radar_info) - 1;
+               magnitude = spectral_max_magnitude(mag_info->all_bins);
+               fft_sample_20.max_magnitude = __cpu_to_be16(magnitude);
+               max_index = spectral_max_index(mag_info->all_bins);
+               fft_sample_20.max_index = max_index;
+               bitmap_w = spectral_bitmap_weight(mag_info->all_bins);
+               fft_sample_20.bitmap_weight = bitmap_w;
+               fft_sample_20.max_exp = mag_info->max_exp & 0xf;
+
+               fft_sample_20.tsf = __cpu_to_be64(tsf);
+
+               tlv = (struct fft_sample_tlv *)&fft_sample_20;
+       }
 
-       ath_debug_send_fft_sample(sc, &fft_sample.tlv);
+       ath_debug_send_fft_sample(sc, tlv);
        return 1;
 #else
        return 0;
@@ -1308,7 +1381,7 @@ static void ath9k_apply_ampdu_details(struct ath_softc *sc,
 
 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
 {
-       struct ath_buf *bf;
+       struct ath_rxbuf *bf;
        struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
        struct ieee80211_rx_status *rxs;
        struct ath_hw *ah = sc->sc_ah;
index fde6da619f30f96e146c00b017e5f37a9df0e80d..0db37f230018ee2692ef4fcbff7cf3f0b3135e8e 100644 (file)
@@ -39,7 +39,7 @@ struct wmi_fw_version {
 struct wmi_event_swba {
        __be64 tsf;
        u8 beacon_pending;
-};
+} __packed;
 
 /*
  * 64 - HTC header - WMI header - 1 / txstatus
index 5ac713d2ff5d22dc6d976291c6d97098bfbceafd..09cdbcd097394a3a2c324230c2743f5d181b0900 100644 (file)
@@ -1241,12 +1241,13 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
                if (bf->bf_next)
                        info.link = bf->bf_next->bf_daddr;
                else
-                       info.link = 0;
+                       info.link = (sc->tx99_state) ? bf->bf_daddr : 0;
 
                if (!bf_first) {
                        bf_first = bf;
 
-                       info.flags = ATH9K_TXDESC_INTREQ;
+                       if (!sc->tx99_state)
+                               info.flags = ATH9K_TXDESC_INTREQ;
                        if ((tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) ||
                            txq == sc->tx.uapsdq)
                                info.flags |= ATH9K_TXDESC_CLRDMASK;
@@ -1704,16 +1705,9 @@ int ath_cabq_update(struct ath_softc *sc)
        int qnum = sc->beacon.cabq->axq_qnum;
 
        ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
-       /*
-        * Ensure the readytime % is within the bounds.
-        */
-       if (sc->config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
-               sc->config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
-       else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
-               sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
 
        qi.tqi_readyTime = (cur_conf->beacon_interval *
-                           sc->config.cabqReadytime) / 100;
+                           ATH_CABQ_READY_TIME) / 100;
        ath_txq_update(sc, qnum, &qi);
 
        return 0;
@@ -1948,7 +1942,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
                        txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
        }
 
-       if (!edma) {
+       if (!edma || sc->tx99_state) {
                TX_STAT_INC(txq->axq_qnum, txstart);
                ath9k_hw_txstart(ah, txq->axq_qnum);
        }
@@ -1969,15 +1963,18 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
                               struct ath_atx_tid *tid, struct sk_buff *skb)
 {
+       struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ath_frame_info *fi = get_frame_info(skb);
        struct list_head bf_head;
-       struct ath_buf *bf;
-
-       bf = fi->bf;
+       struct ath_buf *bf = fi->bf;
 
        INIT_LIST_HEAD(&bf_head);
        list_add_tail(&bf->list, &bf_head);
        bf->bf_state.bf_type = 0;
+       if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
+               bf->bf_state.bf_type = BUF_AMPDU;
+               ath_tx_addto_baw(sc, tid, bf);
+       }
 
        bf->bf_next = NULL;
        bf->bf_lastbf = bf;
@@ -2024,6 +2021,9 @@ static void setup_frame_info(struct ieee80211_hw *hw,
                fi->keyix = ATH9K_TXKEYIX_INVALID;
        fi->keytype = keytype;
        fi->framelen = framelen;
+
+       if (!rate)
+               return;
        fi->rtscts_rate = rate->hw_value;
        if (short_preamble)
                fi->rtscts_rate |= rate->hw_value_short;
@@ -2034,8 +2034,7 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
        struct ath_hw *ah = sc->sc_ah;
        struct ath9k_channel *curchan = ah->curchan;
 
-       if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) &&
-           (curchan->channelFlags & CHANNEL_5GHZ) &&
+       if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && IS_CHAN_5GHZ(curchan) &&
            (chainmask == 0x7) && (rate < 0x90))
                return 0x3;
        else if (AR_SREV_9462(ah) && ath9k_hw_btcoex_is_enabled(ah) &&
@@ -2326,7 +2325,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
        ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
 
        if (sc->sc_ah->caldata)
-               sc->sc_ah->caldata->paprd_packet_sent = true;
+               set_bit(PAPRD_PACKET_SENT, &sc->sc_ah->caldata->cal_flags);
 
        if (!(tx_flags & ATH_TX_ERROR))
                /* Frame was ACKed */
@@ -2376,6 +2375,8 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
 
        dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
        bf->bf_buf_addr = 0;
+       if (sc->tx99_state)
+               goto skip_tx_complete;
 
        if (bf->bf_state.bfs_paprd) {
                if (time_after(jiffies,
@@ -2388,6 +2389,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
                ath_debug_stat_tx(sc, bf, ts, txq, tx_flags);
                ath_tx_complete(sc, skb, tx_flags, txq);
        }
+skip_tx_complete:
        /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
         * accidentally reference it later.
         */
@@ -2746,3 +2748,46 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
                ath_txq_unlock(sc, txq);
        }
 }
+
+int ath9k_tx99_send(struct ath_softc *sc, struct sk_buff *skb,
+                   struct ath_tx_control *txctl)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       struct ath_frame_info *fi = get_frame_info(skb);
+       struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+       struct ath_buf *bf;
+       int padpos, padsize;
+
+       padpos = ieee80211_hdrlen(hdr->frame_control);
+       padsize = padpos & 3;
+
+       if (padsize && skb->len > padpos) {
+               if (skb_headroom(skb) < padsize) {
+                       ath_dbg(common, XMIT,
+                               "tx99 padding failed\n");
+               return -EINVAL;
+               }
+
+               skb_push(skb, padsize);
+               memmove(skb->data, skb->data + padsize, padpos);
+       }
+
+       fi->keyix = ATH9K_TXKEYIX_INVALID;
+       fi->framelen = skb->len + FCS_LEN;
+       fi->keytype = ATH9K_KEY_TYPE_CLEAR;
+
+       bf = ath_tx_setup_buffer(sc, txctl->txq, NULL, skb);
+       if (!bf) {
+               ath_dbg(common, XMIT, "tx99 buffer setup failed\n");
+               return -EINVAL;
+       }
+
+       ath_set_rates(sc->tx99_vif, NULL, bf);
+
+       ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, bf->bf_daddr);
+       ath9k_hw_tx99_start(sc->sc_ah, txctl->txq->axq_qnum);
+
+       ath_tx_send_normal(sc, txctl->txq, NULL, skb);
+
+       return 0;
+}
similarity index 95%
rename from drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
rename to drivers/net/wireless/ath/dfs_pattern_detector.c
index 491305c81fcecd36f5bc9bae0d7b66afd1a7e45e..a1a69c5db409262e3f63a1f1246d6590e448cbfa 100644 (file)
@@ -19,7 +19,7 @@
 
 #include "dfs_pattern_detector.h"
 #include "dfs_pri_detector.h"
-#include "ath9k.h"
+#include "ath.h"
 
 /*
  * tolerated deviation of radar time stamp in usecs on both sides
@@ -143,7 +143,6 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
 {
        u32 sz, i;
        struct channel_detector *cd;
-       struct ath_common *common = ath9k_hw_common(dpd->ah);
 
        cd = kmalloc(sizeof(*cd), GFP_ATOMIC);
        if (cd == NULL)
@@ -167,7 +166,7 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
        return cd;
 
 fail:
-       ath_dbg(common, DFS,
+       ath_dbg(dpd->common, DFS,
                "failed to allocate channel_detector for freq=%d\n", freq);
        channel_detector_exit(dpd, cd);
        return NULL;
@@ -242,7 +241,7 @@ dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
                struct pri_detector *pd = cd->detectors[i];
                struct pri_sequence *ps = pd->add_pulse(pd, event);
                if (ps != NULL) {
-                       ath_dbg(ath9k_hw_common(dpd->ah), DFS,
+                       ath_dbg(dpd->common, DFS,
                                "DFS: radar found on freq=%d: id=%d, pri=%d, "
                                "count=%d, count_false=%d\n",
                                event->freq, pd->rs->type_id,
@@ -254,6 +253,12 @@ dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
        return false;
 }
 
+static struct ath_dfs_pool_stats
+dpd_get_stats(struct dfs_pattern_detector *dpd)
+{
+       return global_dfs_pool_stats;
+}
+
 static bool dpd_set_domain(struct dfs_pattern_detector *dpd,
                           enum nl80211_dfs_regions region)
 {
@@ -284,14 +289,18 @@ static struct dfs_pattern_detector default_dpd = {
        .exit           = dpd_exit,
        .set_dfs_domain = dpd_set_domain,
        .add_pulse      = dpd_add_pulse,
+       .get_stats      = dpd_get_stats,
        .region         = NL80211_DFS_UNSET,
 };
 
 struct dfs_pattern_detector *
-dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region)
+dfs_pattern_detector_init(struct ath_common *common,
+                         enum nl80211_dfs_regions region)
 {
        struct dfs_pattern_detector *dpd;
-       struct ath_common *common = ath9k_hw_common(ah);
+
+       if (!config_enabled(CONFIG_CFG80211_CERTIFICATION_ONUS))
+               return NULL;
 
        dpd = kmalloc(sizeof(*dpd), GFP_KERNEL);
        if (dpd == NULL)
@@ -300,7 +309,7 @@ dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region)
        *dpd = default_dpd;
        INIT_LIST_HEAD(&dpd->channel_detectors);
 
-       dpd->ah = ah;
+       dpd->common = common;
        if (dpd->set_dfs_domain(dpd, region))
                return dpd;
 
similarity index 87%
rename from drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
rename to drivers/net/wireless/ath/dfs_pattern_detector.h
index 90a5abcc4265aeeddcaa012058de9e3c6838d415..dde2652b787cd533d7d0c2c411aefd99ada56c4b 100644 (file)
 #include <linux/list.h>
 #include <linux/nl80211.h>
 
+/**
+ * struct ath_dfs_pool_stats - DFS Statistics for global pools
+ */
+struct ath_dfs_pool_stats {
+       u32 pool_reference;
+       u32 pulse_allocated;
+       u32 pulse_alloc_error;
+       u32 pulse_used;
+       u32 pseq_allocated;
+       u32 pseq_alloc_error;
+       u32 pseq_used;
+};
+
 /**
  * struct pulse_event - describing pulses reported by PHY
  * @ts: pulse time stamp in us
@@ -77,11 +90,12 @@ struct dfs_pattern_detector {
        bool (*add_pulse)(struct dfs_pattern_detector *dpd,
                          struct pulse_event *pe);
 
+       struct ath_dfs_pool_stats (*get_stats)(struct dfs_pattern_detector *dpd);
        enum nl80211_dfs_regions region;
        u8 num_radar_types;
        u64 last_pulse_ts;
        /* needed for ath_dbg() */
-       struct ath_hw *ah;
+       struct ath_common *common;
 
        const struct radar_detector_specs *radar_spec;
        struct list_head channel_detectors;
@@ -92,15 +106,7 @@ struct dfs_pattern_detector {
  * @param region: DFS domain to be used, can be NL80211_DFS_UNSET at creation
  * @return instance pointer on success, NULL otherwise
  */
-#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
 extern struct dfs_pattern_detector *
-dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region);
-#else
-static inline struct dfs_pattern_detector *
-dfs_pattern_detector_init(struct ath_hw *ah, enum nl80211_dfs_regions region)
-{
-       return NULL;
-}
-#endif /* CONFIG_ATH9K_DFS_CERTIFIED */
-
+dfs_pattern_detector_init(struct ath_common *common,
+                         enum nl80211_dfs_regions region);
 #endif /* DFS_PATTERN_DETECTOR_H */
similarity index 98%
rename from drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
rename to drivers/net/wireless/ath/dfs_pri_detector.c
index 5ba4b6fe37c0aa630692c5291858fea0fcae6b35..43b60817888450555cd579b7803848054811d387 100644 (file)
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 
-#include "ath9k.h"
+#include "ath.h"
 #include "dfs_pattern_detector.h"
 #include "dfs_pri_detector.h"
-#include "dfs_debug.h"
+
+struct ath_dfs_pool_stats global_dfs_pool_stats = {};
+
+#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
+#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
 
 /**
  * struct pulse_elem - elements in pulse queue
@@ -392,7 +396,7 @@ static struct pri_sequence *pri_detector_add_pulse(struct pri_detector *de,
 
        if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
                pri_detector_reset(de, ts);
-               return false;
+               return NULL;
        }
 
        ps = pseq_handler_check_detection(de);
similarity index 97%
rename from drivers/net/wireless/ath/ath9k/dfs_pri_detector.h
rename to drivers/net/wireless/ath/dfs_pri_detector.h
index 723962d1abc68421a73b4c200cc6cb1bcf272be6..79f0fff4d1e662d065a4afb43ebde80d83eaa96c 100644 (file)
@@ -19,6 +19,8 @@
 
 #include <linux/list.h>
 
+extern struct ath_dfs_pool_stats global_dfs_pool_stats;
+
 /**
  * struct pri_sequence - sequence of pulses matching one PRI
  * @head: list_head
index 7d077c752dd56ca9e3c3d9cb0aae0c951795cadb..c00687e05688e6498b70131ba11c1105d0b933b1 100644 (file)
@@ -356,14 +356,131 @@ static u16 ath_regd_find_country_by_name(char *alpha2)
        return -1;
 }
 
+static int __ath_reg_dyn_country(struct wiphy *wiphy,
+                                struct ath_regulatory *reg,
+                                struct regulatory_request *request)
+{
+       u16 country_code;
+
+       if (!ath_is_world_regd(reg))
+               return -EINVAL;
+
+       country_code = ath_regd_find_country_by_name(request->alpha2);
+       if (country_code == (u16) -1)
+               return -EINVAL;
+
+       reg->current_rd = COUNTRY_ERD_FLAG;
+       reg->current_rd |= country_code;
+
+       __ath_regd_init(reg);
+
+       ath_reg_apply_world_flags(wiphy, request->initiator, reg);
+
+       return 0;
+}
+
+static void ath_reg_dyn_country(struct wiphy *wiphy,
+                               struct ath_regulatory *reg,
+                               struct regulatory_request *request)
+{
+       if (__ath_reg_dyn_country(wiphy, reg, request))
+               return;
+
+       printk(KERN_DEBUG "ath: regdomain 0x%0x "
+                         "dynamically updated by %s\n",
+              reg->current_rd,
+              reg_initiator_name(request->initiator));
+}
+
+static bool dynamic_country_user_possible(struct ath_regulatory *reg)
+{
+       if (config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING))
+               return true;
+
+       switch (reg->country_code) {
+       case CTRY_UNITED_STATES:
+       case CTRY_JAPAN1:
+       case CTRY_JAPAN2:
+       case CTRY_JAPAN3:
+       case CTRY_JAPAN4:
+       case CTRY_JAPAN5:
+       case CTRY_JAPAN6:
+       case CTRY_JAPAN7:
+       case CTRY_JAPAN8:
+       case CTRY_JAPAN9:
+       case CTRY_JAPAN10:
+       case CTRY_JAPAN11:
+       case CTRY_JAPAN12:
+       case CTRY_JAPAN13:
+       case CTRY_JAPAN14:
+       case CTRY_JAPAN15:
+       case CTRY_JAPAN16:
+       case CTRY_JAPAN17:
+       case CTRY_JAPAN18:
+       case CTRY_JAPAN19:
+       case CTRY_JAPAN20:
+       case CTRY_JAPAN21:
+       case CTRY_JAPAN22:
+       case CTRY_JAPAN23:
+       case CTRY_JAPAN24:
+       case CTRY_JAPAN25:
+       case CTRY_JAPAN26:
+       case CTRY_JAPAN27:
+       case CTRY_JAPAN28:
+       case CTRY_JAPAN29:
+       case CTRY_JAPAN30:
+       case CTRY_JAPAN31:
+       case CTRY_JAPAN32:
+       case CTRY_JAPAN33:
+       case CTRY_JAPAN34:
+       case CTRY_JAPAN35:
+       case CTRY_JAPAN36:
+       case CTRY_JAPAN37:
+       case CTRY_JAPAN38:
+       case CTRY_JAPAN39:
+       case CTRY_JAPAN40:
+       case CTRY_JAPAN41:
+       case CTRY_JAPAN42:
+       case CTRY_JAPAN43:
+       case CTRY_JAPAN44:
+       case CTRY_JAPAN45:
+       case CTRY_JAPAN46:
+       case CTRY_JAPAN47:
+       case CTRY_JAPAN48:
+       case CTRY_JAPAN49:
+       case CTRY_JAPAN50:
+       case CTRY_JAPAN51:
+       case CTRY_JAPAN52:
+       case CTRY_JAPAN53:
+       case CTRY_JAPAN54:
+       case CTRY_JAPAN55:
+       case CTRY_JAPAN56:
+       case CTRY_JAPAN57:
+       case CTRY_JAPAN58:
+       case CTRY_JAPAN59:
+               return false;
+       }
+
+       return true;
+}
+
+static void ath_reg_dyn_country_user(struct wiphy *wiphy,
+                                    struct ath_regulatory *reg,
+                                    struct regulatory_request *request)
+{
+       if (!config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
+               return;
+       if (!dynamic_country_user_possible(reg))
+               return;
+       ath_reg_dyn_country(wiphy, reg, request);
+}
+
 void ath_reg_notifier_apply(struct wiphy *wiphy,
                            struct regulatory_request *request,
                            struct ath_regulatory *reg)
 {
        struct ath_common *common = container_of(reg, struct ath_common,
                                                 regulatory);
-       u16 country_code;
-
        /* We always apply this */
        ath_reg_apply_radar_flags(wiphy);
 
@@ -388,25 +505,12 @@ void ath_reg_notifier_apply(struct wiphy *wiphy,
                       sizeof(struct ath_regulatory));
                break;
        case NL80211_REGDOM_SET_BY_DRIVER:
+               break;
        case NL80211_REGDOM_SET_BY_USER:
+               ath_reg_dyn_country_user(wiphy, reg, request);
                break;
        case NL80211_REGDOM_SET_BY_COUNTRY_IE:
-               if (!ath_is_world_regd(reg))
-                       break;
-
-               country_code = ath_regd_find_country_by_name(request->alpha2);
-               if (country_code == (u16) -1)
-                       break;
-
-               reg->current_rd = COUNTRY_ERD_FLAG;
-               reg->current_rd |= country_code;
-
-               printk(KERN_DEBUG "ath: regdomain 0x%0x updated by CountryIE\n",
-                       reg->current_rd);
-               __ath_regd_init(reg);
-
-               ath_reg_apply_world_flags(wiphy, request->initiator, reg);
-
+               ath_reg_dyn_country(wiphy, reg, request);
                break;
        }
 }
diff --git a/drivers/net/wireless/ath/wcn36xx/Kconfig b/drivers/net/wireless/ath/wcn36xx/Kconfig
new file mode 100644 (file)
index 0000000..591ebae
--- /dev/null
@@ -0,0 +1,16 @@
+config WCN36XX
+       tristate "Qualcomm Atheros WCN3660/3680 support"
+       depends on MAC80211 && HAS_DMA
+       ---help---
+         This module adds support for wireless adapters based on
+         Qualcomm Atheros WCN3660 and WCN3680 mobile chipsets.
+
+         If you choose to build a module, it'll be called wcn36xx.
+
+config WCN36XX_DEBUGFS
+       bool "WCN36XX debugfs support"
+       depends on WCN36XX
+       ---help---
+         Enabled debugfs support
+
+         If unsure, say Y to make it easier to debug problems.
diff --git a/drivers/net/wireless/ath/wcn36xx/Makefile b/drivers/net/wireless/ath/wcn36xx/Makefile
new file mode 100644 (file)
index 0000000..50c43b4
--- /dev/null
@@ -0,0 +1,7 @@
+obj-$(CONFIG_WCN36XX) := wcn36xx.o
+wcn36xx-y +=   main.o \
+               dxe.o \
+               txrx.o \
+               smd.o \
+               pmc.o \
+               debug.o
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c
new file mode 100644 (file)
index 0000000..5b84f7a
--- /dev/null
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include "wcn36xx.h"
+#include "debug.h"
+#include "pmc.h"
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+
+static ssize_t read_file_bool_bmps(struct file *file, char __user *user_buf,
+                                  size_t count, loff_t *ppos)
+{
+       struct wcn36xx *wcn = file->private_data;
+       struct wcn36xx_vif *vif_priv = NULL;
+       struct ieee80211_vif *vif = NULL;
+       char buf[3];
+
+       list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+                       vif = container_of((void *)vif_priv,
+                                  struct ieee80211_vif,
+                                  drv_priv);
+                       if (NL80211_IFTYPE_STATION == vif->type) {
+                               if (vif_priv->pw_state == WCN36XX_BMPS)
+                                       buf[0] = '1';
+                               else
+                                       buf[0] = '0';
+                               break;
+                       }
+       }
+       buf[1] = '\n';
+       buf[2] = 0x00;
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t write_file_bool_bmps(struct file *file,
+                                   const char __user *user_buf,
+                                   size_t count, loff_t *ppos)
+{
+       struct wcn36xx *wcn = file->private_data;
+       struct wcn36xx_vif *vif_priv = NULL;
+       struct ieee80211_vif *vif = NULL;
+
+       char buf[32];
+       int buf_size;
+
+       buf_size = min(count, (sizeof(buf)-1));
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       switch (buf[0]) {
+       case 'y':
+       case 'Y':
+       case '1':
+               list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+                       vif = container_of((void *)vif_priv,
+                                  struct ieee80211_vif,
+                                  drv_priv);
+                       if (NL80211_IFTYPE_STATION == vif->type) {
+                               wcn36xx_enable_keep_alive_null_packet(wcn, vif);
+                               wcn36xx_pmc_enter_bmps_state(wcn, vif);
+                       }
+               }
+               break;
+       case 'n':
+       case 'N':
+       case '0':
+               list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+                       vif = container_of((void *)vif_priv,
+                                  struct ieee80211_vif,
+                                  drv_priv);
+                       if (NL80211_IFTYPE_STATION == vif->type)
+                               wcn36xx_pmc_exit_bmps_state(wcn, vif);
+               }
+               break;
+       }
+
+       return count;
+}
+
+static const struct file_operations fops_wcn36xx_bmps = {
+       .open = simple_open,
+       .read  =       read_file_bool_bmps,
+       .write =       write_file_bool_bmps,
+};
+
+static ssize_t write_file_dump(struct file *file,
+                                   const char __user *user_buf,
+                                   size_t count, loff_t *ppos)
+{
+       struct wcn36xx *wcn = file->private_data;
+       char buf[255], *tmp;
+       int buf_size;
+       u32 arg[WCN36xx_MAX_DUMP_ARGS];
+       int i;
+
+       memset(buf, 0, sizeof(buf));
+       memset(arg, 0, sizeof(arg));
+
+       buf_size = min(count, (sizeof(buf) - 1));
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       tmp = buf;
+
+       for (i = 0; i < WCN36xx_MAX_DUMP_ARGS; i++) {
+               char *begin;
+               begin = strsep(&tmp, " ");
+               if (begin == NULL)
+                       break;
+
+               if (kstrtoul(begin, 0, (unsigned long *)(arg + i)) != 0)
+                       break;
+       }
+
+       wcn36xx_info("DUMP args is %d %d %d %d %d\n", arg[0], arg[1], arg[2],
+                    arg[3], arg[4]);
+       wcn36xx_smd_dump_cmd_req(wcn, arg[0], arg[1], arg[2], arg[3], arg[4]);
+
+       return count;
+}
+
+static const struct file_operations fops_wcn36xx_dump = {
+       .open = simple_open,
+       .write =       write_file_dump,
+};
+
+#define ADD_FILE(name, mode, fop, priv_data)           \
+       do {                                                    \
+               struct dentry *d;                               \
+               d = debugfs_create_file(__stringify(name),      \
+                                       mode, dfs->rootdir,     \
+                                       priv_data, fop);        \
+               dfs->file_##name.dentry = d;                    \
+               if (IS_ERR(d)) {                                \
+                       wcn36xx_warn("Create the debugfs entry failed");\
+                       dfs->file_##name.dentry = NULL;         \
+               }                                               \
+       } while (0)
+
+
+void wcn36xx_debugfs_init(struct wcn36xx *wcn)
+{
+       struct wcn36xx_dfs_entry *dfs = &wcn->dfs;
+
+       dfs->rootdir = debugfs_create_dir(KBUILD_MODNAME,
+                                         wcn->hw->wiphy->debugfsdir);
+       if (IS_ERR(dfs->rootdir)) {
+               wcn36xx_warn("Create the debugfs failed\n");
+               dfs->rootdir = NULL;
+       }
+
+       ADD_FILE(bmps_switcher, S_IRUSR | S_IWUSR,
+                &fops_wcn36xx_bmps, wcn);
+       ADD_FILE(dump, S_IWUSR, &fops_wcn36xx_dump, wcn);
+}
+
+void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
+{
+       struct wcn36xx_dfs_entry *dfs = &wcn->dfs;
+       debugfs_remove_recursive(dfs->rootdir);
+}
+
+#endif /* CONFIG_WCN36XX_DEBUGFS */
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.h b/drivers/net/wireless/ath/wcn36xx/debug.h
new file mode 100644 (file)
index 0000000..46307aa
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_DEBUG_H_
+#define _WCN36XX_DEBUG_H_
+
+#include <linux/kernel.h>
+
+#define WCN36xx_MAX_DUMP_ARGS  5
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+struct wcn36xx_dfs_file {
+       struct dentry *dentry;
+       u32 value;
+};
+
+struct wcn36xx_dfs_entry {
+       struct dentry *rootdir;
+       struct wcn36xx_dfs_file file_bmps_switcher;
+       struct wcn36xx_dfs_file file_dump;
+};
+
+void wcn36xx_debugfs_init(struct wcn36xx *wcn);
+void wcn36xx_debugfs_exit(struct wcn36xx *wcn);
+
+#else
+static inline void wcn36xx_debugfs_init(struct wcn36xx *wcn)
+{
+}
+static inline void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
+{
+}
+
+#endif /* CONFIG_WCN36XX_DEBUGFS */
+
+#endif /* _WCN36XX_DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
new file mode 100644 (file)
index 0000000..ee25786
--- /dev/null
@@ -0,0 +1,805 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* DXE - DMA transfer engine
+ * we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
+ * through low channels data packets are transfered
+ * through high channels managment packets are transfered
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/interrupt.h>
+#include "wcn36xx.h"
+#include "txrx.h"
+
+void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low)
+{
+       struct wcn36xx_dxe_ch *ch = is_low ?
+               &wcn->dxe_tx_l_ch :
+               &wcn->dxe_tx_h_ch;
+
+       return ch->head_blk_ctl->bd_cpu_addr;
+}
+
+static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
+{
+       wcn36xx_dbg(WCN36XX_DBG_DXE,
+                   "wcn36xx_dxe_write_register: addr=%x, data=%x\n",
+                   addr, data);
+
+       writel(data, wcn->mmio + addr);
+}
+
+static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
+{
+       *data = readl(wcn->mmio + addr);
+
+       wcn36xx_dbg(WCN36XX_DBG_DXE,
+                   "wcn36xx_dxe_read_register: addr=%x, data=%x\n",
+                   addr, *data);
+}
+
+static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
+{
+       struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
+       int i;
+
+       for (i = 0; i < ch->desc_num && ctl; i++) {
+               next = ctl->next;
+               kfree(ctl);
+               ctl = next;
+       }
+}
+
+static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
+{
+       struct wcn36xx_dxe_ctl *prev_ctl = NULL;
+       struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+       int i;
+
+       for (i = 0; i < ch->desc_num; i++) {
+               cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
+               if (!cur_ctl)
+                       goto out_fail;
+
+               cur_ctl->ctl_blk_order = i;
+               if (i == 0) {
+                       ch->head_blk_ctl = cur_ctl;
+                       ch->tail_blk_ctl = cur_ctl;
+               } else if (ch->desc_num - 1 == i) {
+                       prev_ctl->next = cur_ctl;
+                       cur_ctl->next = ch->head_blk_ctl;
+               } else {
+                       prev_ctl->next = cur_ctl;
+               }
+               prev_ctl = cur_ctl;
+       }
+
+       return 0;
+
+out_fail:
+       wcn36xx_dxe_free_ctl_block(ch);
+       return -ENOMEM;
+}
+
+int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
+{
+       int ret;
+
+       wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
+       wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
+       wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
+       wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
+
+       wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
+       wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
+       wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
+       wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
+
+       wcn->dxe_tx_l_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_L;
+       wcn->dxe_tx_h_ch.dxe_wq =  WCN36XX_DXE_WQ_TX_H;
+
+       wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
+       wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
+
+       wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
+       wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
+
+       wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
+       wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
+
+       wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
+       wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
+
+       /* DXE control block allocation */
+       ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
+       if (ret)
+               goto out_err;
+       ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
+       if (ret)
+               goto out_err;
+       ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
+       if (ret)
+               goto out_err;
+       ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
+       if (ret)
+               goto out_err;
+
+       /* Initialize SMSM state  Clear TX Enable RING EMPTY STATE */
+       ret = wcn->ctrl_ops->smsm_change_state(
+               WCN36XX_SMSM_WLAN_TX_ENABLE,
+               WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
+
+       return 0;
+
+out_err:
+       wcn36xx_err("Failed to allocate DXE control blocks\n");
+       wcn36xx_dxe_free_ctl_blks(wcn);
+       return -ENOMEM;
+}
+
+void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
+{
+       wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
+       wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
+       wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
+       wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
+}
+
+static int wcn36xx_dxe_init_descs(struct wcn36xx_dxe_ch *wcn_ch)
+{
+       struct wcn36xx_dxe_desc *cur_dxe = NULL;
+       struct wcn36xx_dxe_desc *prev_dxe = NULL;
+       struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+       size_t size;
+       int i;
+
+       size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
+       wcn_ch->cpu_addr = dma_alloc_coherent(NULL, size, &wcn_ch->dma_addr,
+                                             GFP_KERNEL);
+       if (!wcn_ch->cpu_addr)
+               return -ENOMEM;
+
+       memset(wcn_ch->cpu_addr, 0, size);
+
+       cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
+       cur_ctl = wcn_ch->head_blk_ctl;
+
+       for (i = 0; i < wcn_ch->desc_num; i++) {
+               cur_ctl->desc = cur_dxe;
+               cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
+                       i * sizeof(struct wcn36xx_dxe_desc);
+
+               switch (wcn_ch->ch_type) {
+               case WCN36XX_DXE_CH_TX_L:
+                       cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
+                       cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L;
+                       break;
+               case WCN36XX_DXE_CH_TX_H:
+                       cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
+                       cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H;
+                       break;
+               case WCN36XX_DXE_CH_RX_L:
+                       cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
+                       cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
+                       break;
+               case WCN36XX_DXE_CH_RX_H:
+                       cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
+                       cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
+                       break;
+               }
+               if (0 == i) {
+                       cur_dxe->phy_next_l = 0;
+               } else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
+                       prev_dxe->phy_next_l =
+                               cur_ctl->desc_phy_addr;
+               } else if (i == (wcn_ch->desc_num - 1)) {
+                       prev_dxe->phy_next_l =
+                               cur_ctl->desc_phy_addr;
+                       cur_dxe->phy_next_l =
+                               wcn_ch->head_blk_ctl->desc_phy_addr;
+               }
+               cur_ctl = cur_ctl->next;
+               prev_dxe = cur_dxe;
+               cur_dxe++;
+       }
+
+       return 0;
+}
+
+static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
+                                  struct wcn36xx_dxe_mem_pool *pool)
+{
+       int i, chunk_size = pool->chunk_size;
+       dma_addr_t bd_phy_addr = pool->phy_addr;
+       void *bd_cpu_addr = pool->virt_addr;
+       struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
+
+       for (i = 0; i < ch->desc_num; i++) {
+               /* Only every second dxe needs a bd pointer,
+                  the other will point to the skb data */
+               if (!(i & 1)) {
+                       cur->bd_phy_addr = bd_phy_addr;
+                       cur->bd_cpu_addr = bd_cpu_addr;
+                       bd_phy_addr += chunk_size;
+                       bd_cpu_addr += chunk_size;
+               } else {
+                       cur->bd_phy_addr = 0;
+                       cur->bd_cpu_addr = NULL;
+               }
+               cur = cur->next;
+       }
+}
+
+static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
+{
+       int reg_data = 0;
+
+       wcn36xx_dxe_read_register(wcn,
+                                 WCN36XX_DXE_INT_MASK_REG,
+                                 &reg_data);
+
+       reg_data |= wcn_ch;
+
+       wcn36xx_dxe_write_register(wcn,
+                                  WCN36XX_DXE_INT_MASK_REG,
+                                  (int)reg_data);
+       return 0;
+}
+
+static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *ctl)
+{
+       struct wcn36xx_dxe_desc *dxe = ctl->desc;
+       struct sk_buff *skb;
+
+       skb = alloc_skb(WCN36XX_PKT_SIZE, GFP_ATOMIC);
+       if (skb == NULL)
+               return -ENOMEM;
+
+       dxe->dst_addr_l = dma_map_single(NULL,
+                                        skb_tail_pointer(skb),
+                                        WCN36XX_PKT_SIZE,
+                                        DMA_FROM_DEVICE);
+       ctl->skb = skb;
+
+       return 0;
+}
+
+static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
+                                   struct wcn36xx_dxe_ch *wcn_ch)
+{
+       int i;
+       struct wcn36xx_dxe_ctl *cur_ctl = NULL;
+
+       cur_ctl = wcn_ch->head_blk_ctl;
+
+       for (i = 0; i < wcn_ch->desc_num; i++) {
+               wcn36xx_dxe_fill_skb(cur_ctl);
+               cur_ctl = cur_ctl->next;
+       }
+
+       return 0;
+}
+
+static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
+                                    struct wcn36xx_dxe_ch *wcn_ch)
+{
+       struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
+       int i;
+
+       for (i = 0; i < wcn_ch->desc_num; i++) {
+               kfree_skb(cur->skb);
+               cur = cur->next;
+       }
+}
+
+void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
+{
+       struct ieee80211_tx_info *info;
+       struct sk_buff *skb;
+       unsigned long flags;
+
+       spin_lock_irqsave(&wcn->dxe_lock, flags);
+       skb = wcn->tx_ack_skb;
+       wcn->tx_ack_skb = NULL;
+       spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+
+       if (!skb) {
+               wcn36xx_warn("Spurious TX complete indication\n");
+               return;
+       }
+
+       info = IEEE80211_SKB_CB(skb);
+
+       if (status == 1)
+               info->flags |= IEEE80211_TX_STAT_ACK;
+
+       wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
+
+       ieee80211_tx_status_irqsafe(wcn->hw, skb);
+       ieee80211_wake_queues(wcn->hw);
+}
+
+static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
+{
+       struct wcn36xx_dxe_ctl *ctl = ch->tail_blk_ctl;
+       struct ieee80211_tx_info *info;
+       unsigned long flags;
+
+       /*
+        * Make at least one loop of do-while because in case ring is
+        * completely full head and tail are pointing to the same element
+        * and while-do will not make any cycles.
+        */
+       do {
+               if (ctl->skb) {
+                       dma_unmap_single(NULL, ctl->desc->src_addr_l,
+                                        ctl->skb->len, DMA_TO_DEVICE);
+                       info = IEEE80211_SKB_CB(ctl->skb);
+                       if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
+                               /* Keep frame until TX status comes */
+                               ieee80211_free_txskb(wcn->hw, ctl->skb);
+                       }
+                       spin_lock_irqsave(&ctl->skb_lock, flags);
+                       if (wcn->queues_stopped) {
+                               wcn->queues_stopped = false;
+                               ieee80211_wake_queues(wcn->hw);
+                       }
+                       spin_unlock_irqrestore(&ctl->skb_lock, flags);
+
+                       ctl->skb = NULL;
+               }
+               ctl = ctl->next;
+       } while (ctl != ch->head_blk_ctl &&
+              !(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK));
+
+       ch->tail_blk_ctl = ctl;
+}
+
+static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
+{
+       struct wcn36xx *wcn = (struct wcn36xx *)dev;
+       int int_src, int_reason;
+
+       wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
+
+       if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
+               wcn36xx_dxe_read_register(wcn,
+                                         WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
+                                         &int_reason);
+
+               /* TODO: Check int_reason */
+
+               wcn36xx_dxe_write_register(wcn,
+                                          WCN36XX_DXE_0_INT_CLR,
+                                          WCN36XX_INT_MASK_CHAN_TX_H);
+
+               wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
+                                          WCN36XX_INT_MASK_CHAN_TX_H);
+               wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high\n");
+               reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
+       }
+
+       if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
+               wcn36xx_dxe_read_register(wcn,
+                                         WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
+                                         &int_reason);
+               /* TODO: Check int_reason */
+
+               wcn36xx_dxe_write_register(wcn,
+                                          WCN36XX_DXE_0_INT_CLR,
+                                          WCN36XX_INT_MASK_CHAN_TX_L);
+
+               wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_ED_CLR,
+                                          WCN36XX_INT_MASK_CHAN_TX_L);
+               wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low\n");
+               reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
+{
+       struct wcn36xx *wcn = (struct wcn36xx *)dev;
+
+       disable_irq_nosync(wcn->rx_irq);
+       wcn36xx_dxe_rx_frame(wcn);
+       enable_irq(wcn->rx_irq);
+       return IRQ_HANDLED;
+}
+
+static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
+{
+       int ret;
+
+       ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
+                         IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
+       if (ret) {
+               wcn36xx_err("failed to alloc tx irq\n");
+               goto out_err;
+       }
+
+       ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
+                         "wcn36xx_rx", wcn);
+       if (ret) {
+               wcn36xx_err("failed to alloc rx irq\n");
+               goto out_txirq;
+       }
+
+       enable_irq_wake(wcn->rx_irq);
+
+       return 0;
+
+out_txirq:
+       free_irq(wcn->tx_irq, wcn);
+out_err:
+       return ret;
+
+}
+
+static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
+                                    struct wcn36xx_dxe_ch *ch)
+{
+       struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl;
+       struct wcn36xx_dxe_desc *dxe = ctl->desc;
+       dma_addr_t  dma_addr;
+       struct sk_buff *skb;
+
+       while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
+               skb = ctl->skb;
+               dma_addr = dxe->dst_addr_l;
+               wcn36xx_dxe_fill_skb(ctl);
+
+               switch (ch->ch_type) {
+               case WCN36XX_DXE_CH_RX_L:
+                       dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
+                       wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
+                                                  WCN36XX_DXE_INT_CH1_MASK);
+                       break;
+               case WCN36XX_DXE_CH_RX_H:
+                       dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
+                       wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR,
+                                                  WCN36XX_DXE_INT_CH3_MASK);
+                       break;
+               default:
+                       wcn36xx_warn("Unknown channel\n");
+               }
+
+               dma_unmap_single(NULL, dma_addr, WCN36XX_PKT_SIZE,
+                                DMA_FROM_DEVICE);
+               wcn36xx_rx_skb(wcn, skb);
+               ctl = ctl->next;
+               dxe = ctl->desc;
+       }
+
+       ch->head_blk_ctl = ctl;
+
+       return 0;
+}
+
+void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
+{
+       int int_src;
+
+       wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
+
+       /* RX_LOW_PRI */
+       if (int_src & WCN36XX_DXE_INT_CH1_MASK) {
+               wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
+                                          WCN36XX_DXE_INT_CH1_MASK);
+               wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_l_ch));
+       }
+
+       /* RX_HIGH_PRI */
+       if (int_src & WCN36XX_DXE_INT_CH3_MASK) {
+               /* Clean up all the INT within this channel */
+               wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR,
+                                          WCN36XX_DXE_INT_CH3_MASK);
+               wcn36xx_rx_handle_packets(wcn, &(wcn->dxe_rx_h_ch));
+       }
+
+       if (!int_src)
+               wcn36xx_warn("No DXE interrupt pending\n");
+}
+
+int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
+{
+       size_t s;
+       void *cpu_addr;
+
+       /* Allocate BD headers for MGMT frames */
+
+       /* Where this come from ask QC */
+       wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
+               16 - (WCN36XX_BD_CHUNK_SIZE % 8);
+
+       s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
+       cpu_addr = dma_alloc_coherent(NULL, s, &wcn->mgmt_mem_pool.phy_addr,
+                                     GFP_KERNEL);
+       if (!cpu_addr)
+               goto out_err;
+
+       wcn->mgmt_mem_pool.virt_addr = cpu_addr;
+       memset(cpu_addr, 0, s);
+
+       /* Allocate BD headers for DATA frames */
+
+       /* Where this come from ask QC */
+       wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
+               16 - (WCN36XX_BD_CHUNK_SIZE % 8);
+
+       s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
+       cpu_addr = dma_alloc_coherent(NULL, s, &wcn->data_mem_pool.phy_addr,
+                                     GFP_KERNEL);
+       if (!cpu_addr)
+               goto out_err;
+
+       wcn->data_mem_pool.virt_addr = cpu_addr;
+       memset(cpu_addr, 0, s);
+
+       return 0;
+
+out_err:
+       wcn36xx_dxe_free_mem_pools(wcn);
+       wcn36xx_err("Failed to allocate BD mempool\n");
+       return -ENOMEM;
+}
+
+void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
+{
+       if (wcn->mgmt_mem_pool.virt_addr)
+               dma_free_coherent(NULL, wcn->mgmt_mem_pool.chunk_size *
+                                 WCN36XX_DXE_CH_DESC_NUMB_TX_H,
+                                 wcn->mgmt_mem_pool.virt_addr,
+                                 wcn->mgmt_mem_pool.phy_addr);
+
+       if (wcn->data_mem_pool.virt_addr) {
+               dma_free_coherent(NULL, wcn->data_mem_pool.chunk_size *
+                                 WCN36XX_DXE_CH_DESC_NUMB_TX_L,
+                                 wcn->data_mem_pool.virt_addr,
+                                 wcn->data_mem_pool.phy_addr);
+       }
+}
+
+int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
+                        struct wcn36xx_vif *vif_priv,
+                        struct sk_buff *skb,
+                        bool is_low)
+{
+       struct wcn36xx_dxe_ctl *ctl = NULL;
+       struct wcn36xx_dxe_desc *desc = NULL;
+       struct wcn36xx_dxe_ch *ch = NULL;
+       unsigned long flags;
+
+       ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
+
+       ctl = ch->head_blk_ctl;
+
+       spin_lock_irqsave(&ctl->next->skb_lock, flags);
+
+       /*
+        * If skb is not null that means that we reached the tail of the ring
+        * hence ring is full. Stop queues to let mac80211 back off until ring
+        * has an empty slot again.
+        */
+       if (NULL != ctl->next->skb) {
+               ieee80211_stop_queues(wcn->hw);
+               wcn->queues_stopped = true;
+               spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
+               return -EBUSY;
+       }
+       spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
+
+       ctl->skb = NULL;
+       desc = ctl->desc;
+
+       /* Set source address of the BD we send */
+       desc->src_addr_l = ctl->bd_phy_addr;
+
+       desc->dst_addr_l = ch->dxe_wq;
+       desc->fr_len = sizeof(struct wcn36xx_tx_bd);
+       desc->ctrl = ch->ctrl_bd;
+
+       wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
+
+       wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
+                        (char *)desc, sizeof(*desc));
+       wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
+                        "BD   >>> ", (char *)ctl->bd_cpu_addr,
+                        sizeof(struct wcn36xx_tx_bd));
+
+       /* Set source address of the SKB we send */
+       ctl = ctl->next;
+       ctl->skb = skb;
+       desc = ctl->desc;
+       if (ctl->bd_cpu_addr) {
+               wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
+               return -EINVAL;
+       }
+
+       desc->src_addr_l = dma_map_single(NULL,
+                                         ctl->skb->data,
+                                         ctl->skb->len,
+                                         DMA_TO_DEVICE);
+
+       desc->dst_addr_l = ch->dxe_wq;
+       desc->fr_len = ctl->skb->len;
+
+       /* set dxe descriptor to VALID */
+       desc->ctrl = ch->ctrl_skb;
+
+       wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
+                        (char *)desc, sizeof(*desc));
+       wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB   >>> ",
+                        (char *)ctl->skb->data, ctl->skb->len);
+
+       /* Move the head of the ring to the next empty descriptor */
+        ch->head_blk_ctl = ctl->next;
+
+       /*
+        * When connected and trying to send data frame chip can be in sleep
+        * mode and writing to the register will not wake up the chip. Instead
+        * notify chip about new frame through SMSM bus.
+        */
+       if (is_low &&  vif_priv->pw_state == WCN36XX_BMPS) {
+               wcn->ctrl_ops->smsm_change_state(
+                                 0,
+                                 WCN36XX_SMSM_WLAN_TX_ENABLE);
+       } else {
+               /* indicate End Of Packet and generate interrupt on descriptor
+                * done.
+                */
+               wcn36xx_dxe_write_register(wcn,
+                       ch->reg_ctrl, ch->def_ctrl);
+       }
+
+       return 0;
+}
+
+int wcn36xx_dxe_init(struct wcn36xx *wcn)
+{
+       int reg_data = 0, ret;
+
+       reg_data = WCN36XX_DXE_REG_RESET;
+       wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
+
+       /* Setting interrupt path */
+       reg_data = WCN36XX_DXE_CCU_INT;
+       wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
+
+       /***************************************/
+       /* Init descriptors for TX LOW channel */
+       /***************************************/
+       wcn36xx_dxe_init_descs(&wcn->dxe_tx_l_ch);
+       wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
+
+       /* Write channel head to a NEXT register */
+       wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
+               wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
+
+       /* Program DMA destination addr for TX LOW */
+       wcn36xx_dxe_write_register(wcn,
+               WCN36XX_DXE_CH_DEST_ADDR_TX_L,
+               WCN36XX_DXE_WQ_TX_L);
+
+       wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
+       wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
+
+       /***************************************/
+       /* Init descriptors for TX HIGH channel */
+       /***************************************/
+       wcn36xx_dxe_init_descs(&wcn->dxe_tx_h_ch);
+       wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
+
+       /* Write channel head to a NEXT register */
+       wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
+               wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
+
+       /* Program DMA destination addr for TX HIGH */
+       wcn36xx_dxe_write_register(wcn,
+               WCN36XX_DXE_CH_DEST_ADDR_TX_H,
+               WCN36XX_DXE_WQ_TX_H);
+
+       wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, &reg_data);
+
+       /* Enable channel interrupts */
+       wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
+
+       /***************************************/
+       /* Init descriptors for RX LOW channel */
+       /***************************************/
+       wcn36xx_dxe_init_descs(&wcn->dxe_rx_l_ch);
+
+       /* For RX we need to preallocated buffers */
+       wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
+
+       /* Write channel head to a NEXT register */
+       wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
+               wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
+
+       /* Write DMA source address */
+       wcn36xx_dxe_write_register(wcn,
+               WCN36XX_DXE_CH_SRC_ADDR_RX_L,
+               WCN36XX_DXE_WQ_RX_L);
+
+       /* Program preallocated destination address */
+       wcn36xx_dxe_write_register(wcn,
+               WCN36XX_DXE_CH_DEST_ADDR_RX_L,
+               wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
+
+       /* Enable default control registers */
+       wcn36xx_dxe_write_register(wcn,
+               WCN36XX_DXE_REG_CTL_RX_L,
+               WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
+
+       /* Enable channel interrupts */
+       wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
+
+       /***************************************/
+       /* Init descriptors for RX HIGH channel */
+       /***************************************/
+       wcn36xx_dxe_init_descs(&wcn->dxe_rx_h_ch);
+
+       /* For RX we need to prealocat buffers */
+       wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
+
+       /* Write chanel head to a NEXT register */
+       wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
+               wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
+
+       /* Write DMA source address */
+       wcn36xx_dxe_write_register(wcn,
+               WCN36XX_DXE_CH_SRC_ADDR_RX_H,
+               WCN36XX_DXE_WQ_RX_H);
+
+       /* Program preallocated destination address */
+       wcn36xx_dxe_write_register(wcn,
+               WCN36XX_DXE_CH_DEST_ADDR_RX_H,
+                wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
+
+       /* Enable default control registers */
+       wcn36xx_dxe_write_register(wcn,
+               WCN36XX_DXE_REG_CTL_RX_H,
+               WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
+
+       /* Enable channel interrupts */
+       wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
+
+       ret = wcn36xx_dxe_request_irqs(wcn);
+       if (ret < 0)
+               goto out_err;
+
+       return 0;
+
+out_err:
+       return ret;
+}
+
+void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
+{
+       free_irq(wcn->tx_irq, wcn);
+       free_irq(wcn->rx_irq, wcn);
+
+       if (wcn->tx_ack_skb) {
+               ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
+               wcn->tx_ack_skb = NULL;
+       }
+
+       wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
+       wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h
new file mode 100644 (file)
index 0000000..c88562f
--- /dev/null
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DXE_H_
+#define _DXE_H_
+
+#include "wcn36xx.h"
+
+/*
+TX_LOW = DMA0
+TX_HIGH        = DMA4
+RX_LOW = DMA1
+RX_HIGH        = DMA3
+H2H_TEST_RX_TX = DMA2
+*/
+
+/* DXE registers */
+#define WCN36XX_DXE_MEM_BASE                   0x03000000
+#define WCN36XX_DXE_MEM_REG                    0x202000
+
+#define WCN36XX_DXE_CCU_INT                    0xA0011
+#define WCN36XX_DXE_REG_CCU_INT                        0x200b10
+
+/* TODO This must calculated properly but not hardcoded */
+#define WCN36XX_DXE_CTRL_TX_L                  0x328a44
+#define WCN36XX_DXE_CTRL_TX_H                  0x32ce44
+#define WCN36XX_DXE_CTRL_RX_L                  0x12ad2f
+#define WCN36XX_DXE_CTRL_RX_H                  0x12d12f
+#define WCN36XX_DXE_CTRL_TX_H_BD               0x30ce45
+#define WCN36XX_DXE_CTRL_TX_H_SKB              0x32ce4d
+#define WCN36XX_DXE_CTRL_TX_L_BD               0x308a45
+#define WCN36XX_DXE_CTRL_TX_L_SKB              0x328a4d
+
+/* TODO This must calculated properly but not hardcoded */
+#define WCN36XX_DXE_WQ_TX_L                    0x17
+#define WCN36XX_DXE_WQ_TX_H                    0x17
+#define WCN36XX_DXE_WQ_RX_L                    0xB
+#define WCN36XX_DXE_WQ_RX_H                    0x4
+
+/* DXE descriptor control filed */
+#define WCN36XX_DXE_CTRL_VALID_MASK (0x00000001)
+
+/* TODO This must calculated properly but not hardcoded */
+/* DXE default control register values */
+#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_L                0x847EAD2F
+#define WCN36XX_DXE_CH_DEFAULT_CTL_RX_H                0x84FED12F
+#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_H                0x853ECF4D
+#define WCN36XX_DXE_CH_DEFAULT_CTL_TX_L                0x843e8b4d
+
+/* Common DXE registers */
+#define WCN36XX_DXE_MEM_CSR                    (WCN36XX_DXE_MEM_REG + 0x00)
+#define WCN36XX_DXE_REG_CSR_RESET              (WCN36XX_DXE_MEM_REG + 0x00)
+#define WCN36XX_DXE_ENCH_ADDR                  (WCN36XX_DXE_MEM_REG + 0x04)
+#define WCN36XX_DXE_REG_CH_EN                  (WCN36XX_DXE_MEM_REG + 0x08)
+#define WCN36XX_DXE_REG_CH_DONE                        (WCN36XX_DXE_MEM_REG + 0x0C)
+#define WCN36XX_DXE_REG_CH_ERR                 (WCN36XX_DXE_MEM_REG + 0x10)
+#define WCN36XX_DXE_INT_MASK_REG               (WCN36XX_DXE_MEM_REG + 0x18)
+#define WCN36XX_DXE_INT_SRC_RAW_REG            (WCN36XX_DXE_MEM_REG + 0x20)
+       /* #define WCN36XX_DXE_INT_CH6_MASK     0x00000040 */
+       /* #define WCN36XX_DXE_INT_CH5_MASK     0x00000020 */
+       #define WCN36XX_DXE_INT_CH4_MASK        0x00000010
+       #define WCN36XX_DXE_INT_CH3_MASK        0x00000008
+       /* #define WCN36XX_DXE_INT_CH2_MASK     0x00000004 */
+       #define WCN36XX_DXE_INT_CH1_MASK        0x00000002
+       #define WCN36XX_DXE_INT_CH0_MASK        0x00000001
+#define WCN36XX_DXE_0_INT_CLR                  (WCN36XX_DXE_MEM_REG + 0x30)
+#define WCN36XX_DXE_0_INT_ED_CLR               (WCN36XX_DXE_MEM_REG + 0x34)
+#define WCN36XX_DXE_0_INT_DONE_CLR             (WCN36XX_DXE_MEM_REG + 0x38)
+#define WCN36XX_DXE_0_INT_ERR_CLR              (WCN36XX_DXE_MEM_REG + 0x3C)
+
+#define WCN36XX_DXE_0_CH0_STATUS               (WCN36XX_DXE_MEM_REG + 0x404)
+#define WCN36XX_DXE_0_CH1_STATUS               (WCN36XX_DXE_MEM_REG + 0x444)
+#define WCN36XX_DXE_0_CH2_STATUS               (WCN36XX_DXE_MEM_REG + 0x484)
+#define WCN36XX_DXE_0_CH3_STATUS               (WCN36XX_DXE_MEM_REG + 0x4C4)
+#define WCN36XX_DXE_0_CH4_STATUS               (WCN36XX_DXE_MEM_REG + 0x504)
+
+#define WCN36XX_DXE_REG_RESET                  0x5c89
+
+/* Temporary BMU Workqueue 4 */
+#define WCN36XX_DXE_BMU_WQ_RX_LOW              0xB
+#define WCN36XX_DXE_BMU_WQ_RX_HIGH             0x4
+/* DMA channel offset */
+#define WCN36XX_DXE_TX_LOW_OFFSET              0x400
+#define WCN36XX_DXE_TX_HIGH_OFFSET             0x500
+#define WCN36XX_DXE_RX_LOW_OFFSET              0x440
+#define WCN36XX_DXE_RX_HIGH_OFFSET             0x4C0
+
+/* Address of the next DXE descriptor */
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR          0x001C
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L     (WCN36XX_DXE_MEM_REG + \
+                                                WCN36XX_DXE_TX_LOW_OFFSET + \
+                                                WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H     (WCN36XX_DXE_MEM_REG + \
+                                                WCN36XX_DXE_TX_HIGH_OFFSET + \
+                                                WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L     (WCN36XX_DXE_MEM_REG + \
+                                                WCN36XX_DXE_RX_LOW_OFFSET + \
+                                                WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+#define WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H     (WCN36XX_DXE_MEM_REG + \
+                                                WCN36XX_DXE_RX_HIGH_OFFSET + \
+                                                WCN36XX_DXE_CH_NEXT_DESC_ADDR)
+
+/* DXE Descriptor source address */
+#define WCN36XX_DXE_CH_SRC_ADDR                        0x000C
+#define WCN36XX_DXE_CH_SRC_ADDR_RX_L           (WCN36XX_DXE_MEM_REG + \
+                                                WCN36XX_DXE_RX_LOW_OFFSET + \
+                                                WCN36XX_DXE_CH_SRC_ADDR)
+#define WCN36XX_DXE_CH_SRC_ADDR_RX_H           (WCN36XX_DXE_MEM_REG + \
+                                                WCN36XX_DXE_RX_HIGH_OFFSET + \
+                                                WCN36XX_DXE_CH_SRC_ADDR)
+
+/* DXE Descriptor address destination address */
+#define WCN36XX_DXE_CH_DEST_ADDR               0x0014
+#define WCN36XX_DXE_CH_DEST_ADDR_TX_L          (WCN36XX_DXE_MEM_REG + \
+                                                WCN36XX_DXE_TX_LOW_OFFSET + \
+                                                WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_TX_H          (WCN36XX_DXE_MEM_REG + \
+                                                WCN36XX_DXE_TX_HIGH_OFFSET + \
+                                                WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_RX_L          (WCN36XX_DXE_MEM_REG + \
+                                                WCN36XX_DXE_RX_LOW_OFFSET + \
+                                                WCN36XX_DXE_CH_DEST_ADDR)
+#define WCN36XX_DXE_CH_DEST_ADDR_RX_H          (WCN36XX_DXE_MEM_REG + \
+                                                WCN36XX_DXE_RX_HIGH_OFFSET + \
+                                                WCN36XX_DXE_CH_DEST_ADDR)
+
+/* Interrupt status */
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR         0x0004
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L    (WCN36XX_DXE_MEM_REG + \
+                                                WCN36XX_DXE_TX_LOW_OFFSET + \
+                                                WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H    (WCN36XX_DXE_MEM_REG + \
+                                                WCN36XX_DXE_TX_HIGH_OFFSET + \
+                                                WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L    (WCN36XX_DXE_MEM_REG + \
+                                                WCN36XX_DXE_RX_LOW_OFFSET + \
+                                                WCN36XX_DXE_CH_STATUS_REG_ADDR)
+#define WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H    (WCN36XX_DXE_MEM_REG + \
+                                                WCN36XX_DXE_RX_HIGH_OFFSET + \
+                                                WCN36XX_DXE_CH_STATUS_REG_ADDR)
+
+
+/* DXE default control register */
+#define WCN36XX_DXE_REG_CTL_RX_L               (WCN36XX_DXE_MEM_REG + \
+                                                WCN36XX_DXE_RX_LOW_OFFSET)
+#define WCN36XX_DXE_REG_CTL_RX_H               (WCN36XX_DXE_MEM_REG + \
+                                                WCN36XX_DXE_RX_HIGH_OFFSET)
+#define WCN36XX_DXE_REG_CTL_TX_H               (WCN36XX_DXE_MEM_REG + \
+                                                WCN36XX_DXE_TX_HIGH_OFFSET)
+#define WCN36XX_DXE_REG_CTL_TX_L               (WCN36XX_DXE_MEM_REG + \
+                                                WCN36XX_DXE_TX_LOW_OFFSET)
+
+#define WCN36XX_SMSM_WLAN_TX_ENABLE            0x00000400
+#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY       0x00000200
+
+
+/* Interrupt control channel mask */
+#define WCN36XX_INT_MASK_CHAN_TX_L             0x00000001
+#define WCN36XX_INT_MASK_CHAN_RX_L             0x00000002
+#define WCN36XX_INT_MASK_CHAN_RX_H             0x00000008
+#define WCN36XX_INT_MASK_CHAN_TX_H             0x00000010
+
+#define WCN36XX_BD_CHUNK_SIZE                  128
+
+#define WCN36XX_PKT_SIZE                       0xF20
+enum wcn36xx_dxe_ch_type {
+       WCN36XX_DXE_CH_TX_L,
+       WCN36XX_DXE_CH_TX_H,
+       WCN36XX_DXE_CH_RX_L,
+       WCN36XX_DXE_CH_RX_H
+};
+
+/* amount of descriptors per channel */
+enum wcn36xx_dxe_ch_desc_num {
+       WCN36XX_DXE_CH_DESC_NUMB_TX_L           = 128,
+       WCN36XX_DXE_CH_DESC_NUMB_TX_H           = 10,
+       WCN36XX_DXE_CH_DESC_NUMB_RX_L           = 512,
+       WCN36XX_DXE_CH_DESC_NUMB_RX_H           = 40
+};
+
+/**
+ * struct wcn36xx_dxe_desc - describes descriptor of one DXE buffer
+ *
+ * @ctrl: is a union that consists of following bits:
+ * union {
+ *     u32     valid           :1; //0 = DMA stop, 1 = DMA continue with this
+ *                                 //descriptor
+ *     u32     transfer_type   :2; //0 = Host to Host space
+ *     u32     eop             :1; //End of Packet
+ *     u32     bd_handling     :1; //if transferType = Host to BMU, then 0
+ *                                 // means first 128 bytes contain BD, and 1
+ *                                 // means create new empty BD
+ *     u32     siq             :1; // SIQ
+ *     u32     diq             :1; // DIQ
+ *     u32     pdu_rel         :1; //0 = don't release BD and PDUs when done,
+ *                                 // 1 = release them
+ *     u32     bthld_sel       :4; //BMU Threshold Select
+ *     u32     prio            :3; //Specifies the priority level to use for
+ *                                 // the transfer
+ *     u32     stop_channel    :1; //1 = DMA stops processing further, channel
+ *                                 //requires re-enabling after this
+ *     u32     intr            :1; //Interrupt on Descriptor Done
+ *     u32     rsvd            :1; //reserved
+ *     u32     size            :14;//14 bits used - ignored for BMU transfers,
+ *                                 //only used for host to host transfers?
+ * } ctrl;
+ */
+struct wcn36xx_dxe_desc {
+       u32     ctrl;
+       u32     fr_len;
+
+       u32     src_addr_l;
+       u32     dst_addr_l;
+       u32     phy_next_l;
+       u32     src_addr_h;
+       u32     dst_addr_h;
+       u32     phy_next_h;
+} __packed;
+
+/* DXE Control block */
+struct wcn36xx_dxe_ctl {
+       struct wcn36xx_dxe_ctl  *next;
+       struct wcn36xx_dxe_desc *desc;
+       unsigned int            desc_phy_addr;
+       int                     ctl_blk_order;
+       struct sk_buff          *skb;
+       spinlock_t              skb_lock;
+       void                    *bd_cpu_addr;
+       dma_addr_t              bd_phy_addr;
+};
+
+struct wcn36xx_dxe_ch {
+       enum wcn36xx_dxe_ch_type        ch_type;
+       void                            *cpu_addr;
+       dma_addr_t                      dma_addr;
+       enum wcn36xx_dxe_ch_desc_num    desc_num;
+       /* DXE control block ring */
+       struct wcn36xx_dxe_ctl          *head_blk_ctl;
+       struct wcn36xx_dxe_ctl          *tail_blk_ctl;
+
+       /* DXE channel specific configs */
+       u32                             dxe_wq;
+       u32                             ctrl_bd;
+       u32                             ctrl_skb;
+       u32                             reg_ctrl;
+       u32                             def_ctrl;
+};
+
+/* Memory Pool for BD headers */
+struct wcn36xx_dxe_mem_pool {
+       int             chunk_size;
+       void            *virt_addr;
+       dma_addr_t      phy_addr;
+};
+
+struct wcn36xx_vif;
+int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn);
+void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn);
+void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn);
+int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn);
+void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn);
+int wcn36xx_dxe_init(struct wcn36xx *wcn);
+void wcn36xx_dxe_deinit(struct wcn36xx *wcn);
+int wcn36xx_dxe_init_channels(struct wcn36xx *wcn);
+int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
+                        struct wcn36xx_vif *vif_priv,
+                        struct sk_buff *skb,
+                        bool is_low);
+void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status);
+void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low);
+#endif /* _DXE_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
new file mode 100644 (file)
index 0000000..c02dbc6
--- /dev/null
@@ -0,0 +1,4657 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HAL_H_
+#define _HAL_H_
+
+/*---------------------------------------------------------------------------
+  API VERSIONING INFORMATION
+
+  The RIVA API is versioned as MAJOR.MINOR.VERSION.REVISION
+  The MAJOR is incremented for major product/architecture changes
+      (and then MINOR/VERSION/REVISION are zeroed)
+  The MINOR is incremented for minor product/architecture changes
+      (and then VERSION/REVISION are zeroed)
+  The VERSION is incremented if a significant API change occurs
+      (and then REVISION is zeroed)
+  The REVISION is incremented if an insignificant API change occurs
+      or if a new API is added
+  All values are in the range 0..255 (ie they are 8-bit values)
+ ---------------------------------------------------------------------------*/
+#define WCN36XX_HAL_VER_MAJOR 1
+#define WCN36XX_HAL_VER_MINOR 4
+#define WCN36XX_HAL_VER_VERSION 1
+#define WCN36XX_HAL_VER_REVISION 2
+
+/* This is to force compiler to use the maximum of an int ( 4 bytes ) */
+#define WCN36XX_HAL_MAX_ENUM_SIZE    0x7FFFFFFF
+#define WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE    0x7FFF
+
+/* Max no. of transmit categories */
+#define STACFG_MAX_TC    8
+
+/* The maximum value of access category */
+#define WCN36XX_HAL_MAX_AC  4
+
+#define WCN36XX_HAL_IPV4_ADDR_LEN       4
+
+#define WALN_HAL_STA_INVALID_IDX 0xFF
+#define WCN36XX_HAL_BSS_INVALID_IDX 0xFF
+
+/* Default Beacon template size */
+#define BEACON_TEMPLATE_SIZE 0x180
+
+/* Param Change Bitmap sent to HAL */
+#define PARAM_BCN_INTERVAL_CHANGED                      (1 << 0)
+#define PARAM_SHORT_PREAMBLE_CHANGED                 (1 << 1)
+#define PARAM_SHORT_SLOT_TIME_CHANGED                 (1 << 2)
+#define PARAM_llACOEXIST_CHANGED                            (1 << 3)
+#define PARAM_llBCOEXIST_CHANGED                            (1 << 4)
+#define PARAM_llGCOEXIST_CHANGED                            (1 << 5)
+#define PARAM_HT20MHZCOEXIST_CHANGED                  (1<<6)
+#define PARAM_NON_GF_DEVICES_PRESENT_CHANGED (1<<7)
+#define PARAM_RIFS_MODE_CHANGED                            (1<<8)
+#define PARAM_LSIG_TXOP_FULL_SUPPORT_CHANGED   (1<<9)
+#define PARAM_OBSS_MODE_CHANGED                               (1<<10)
+#define PARAM_BEACON_UPDATE_MASK \
+       (PARAM_BCN_INTERVAL_CHANGED |                                   \
+        PARAM_SHORT_PREAMBLE_CHANGED |                                 \
+        PARAM_SHORT_SLOT_TIME_CHANGED |                                \
+        PARAM_llACOEXIST_CHANGED |                                     \
+        PARAM_llBCOEXIST_CHANGED |                                     \
+        PARAM_llGCOEXIST_CHANGED |                                     \
+        PARAM_HT20MHZCOEXIST_CHANGED |                                 \
+        PARAM_NON_GF_DEVICES_PRESENT_CHANGED |                         \
+        PARAM_RIFS_MODE_CHANGED |                                      \
+        PARAM_LSIG_TXOP_FULL_SUPPORT_CHANGED |                         \
+        PARAM_OBSS_MODE_CHANGED)
+
+/* dump command response Buffer size */
+#define DUMPCMD_RSP_BUFFER 100
+
+/* version string max length (including NULL) */
+#define WCN36XX_HAL_VERSION_LENGTH  64
+
+/* message types for messages exchanged between WDI and HAL */
+enum wcn36xx_hal_host_msg_type {
+       /* Init/De-Init */
+       WCN36XX_HAL_START_REQ = 0,
+       WCN36XX_HAL_START_RSP = 1,
+       WCN36XX_HAL_STOP_REQ = 2,
+       WCN36XX_HAL_STOP_RSP = 3,
+
+       /* Scan */
+       WCN36XX_HAL_INIT_SCAN_REQ = 4,
+       WCN36XX_HAL_INIT_SCAN_RSP = 5,
+       WCN36XX_HAL_START_SCAN_REQ = 6,
+       WCN36XX_HAL_START_SCAN_RSP = 7,
+       WCN36XX_HAL_END_SCAN_REQ = 8,
+       WCN36XX_HAL_END_SCAN_RSP = 9,
+       WCN36XX_HAL_FINISH_SCAN_REQ = 10,
+       WCN36XX_HAL_FINISH_SCAN_RSP = 11,
+
+       /* HW STA configuration/deconfiguration */
+       WCN36XX_HAL_CONFIG_STA_REQ = 12,
+       WCN36XX_HAL_CONFIG_STA_RSP = 13,
+       WCN36XX_HAL_DELETE_STA_REQ = 14,
+       WCN36XX_HAL_DELETE_STA_RSP = 15,
+       WCN36XX_HAL_CONFIG_BSS_REQ = 16,
+       WCN36XX_HAL_CONFIG_BSS_RSP = 17,
+       WCN36XX_HAL_DELETE_BSS_REQ = 18,
+       WCN36XX_HAL_DELETE_BSS_RSP = 19,
+
+       /* Infra STA asscoiation */
+       WCN36XX_HAL_JOIN_REQ = 20,
+       WCN36XX_HAL_JOIN_RSP = 21,
+       WCN36XX_HAL_POST_ASSOC_REQ = 22,
+       WCN36XX_HAL_POST_ASSOC_RSP = 23,
+
+       /* Security */
+       WCN36XX_HAL_SET_BSSKEY_REQ = 24,
+       WCN36XX_HAL_SET_BSSKEY_RSP = 25,
+       WCN36XX_HAL_SET_STAKEY_REQ = 26,
+       WCN36XX_HAL_SET_STAKEY_RSP = 27,
+       WCN36XX_HAL_RMV_BSSKEY_REQ = 28,
+       WCN36XX_HAL_RMV_BSSKEY_RSP = 29,
+       WCN36XX_HAL_RMV_STAKEY_REQ = 30,
+       WCN36XX_HAL_RMV_STAKEY_RSP = 31,
+
+       /* Qos Related */
+       WCN36XX_HAL_ADD_TS_REQ = 32,
+       WCN36XX_HAL_ADD_TS_RSP = 33,
+       WCN36XX_HAL_DEL_TS_REQ = 34,
+       WCN36XX_HAL_DEL_TS_RSP = 35,
+       WCN36XX_HAL_UPD_EDCA_PARAMS_REQ = 36,
+       WCN36XX_HAL_UPD_EDCA_PARAMS_RSP = 37,
+       WCN36XX_HAL_ADD_BA_REQ = 38,
+       WCN36XX_HAL_ADD_BA_RSP = 39,
+       WCN36XX_HAL_DEL_BA_REQ = 40,
+       WCN36XX_HAL_DEL_BA_RSP = 41,
+
+       WCN36XX_HAL_CH_SWITCH_REQ = 42,
+       WCN36XX_HAL_CH_SWITCH_RSP = 43,
+       WCN36XX_HAL_SET_LINK_ST_REQ = 44,
+       WCN36XX_HAL_SET_LINK_ST_RSP = 45,
+       WCN36XX_HAL_GET_STATS_REQ = 46,
+       WCN36XX_HAL_GET_STATS_RSP = 47,
+       WCN36XX_HAL_UPDATE_CFG_REQ = 48,
+       WCN36XX_HAL_UPDATE_CFG_RSP = 49,
+
+       WCN36XX_HAL_MISSED_BEACON_IND = 50,
+       WCN36XX_HAL_UNKNOWN_ADDR2_FRAME_RX_IND = 51,
+       WCN36XX_HAL_MIC_FAILURE_IND = 52,
+       WCN36XX_HAL_FATAL_ERROR_IND = 53,
+       WCN36XX_HAL_SET_KEYDONE_MSG = 54,
+
+       /* NV Interface */
+       WCN36XX_HAL_DOWNLOAD_NV_REQ = 55,
+       WCN36XX_HAL_DOWNLOAD_NV_RSP = 56,
+
+       WCN36XX_HAL_ADD_BA_SESSION_REQ = 57,
+       WCN36XX_HAL_ADD_BA_SESSION_RSP = 58,
+       WCN36XX_HAL_TRIGGER_BA_REQ = 59,
+       WCN36XX_HAL_TRIGGER_BA_RSP = 60,
+       WCN36XX_HAL_UPDATE_BEACON_REQ = 61,
+       WCN36XX_HAL_UPDATE_BEACON_RSP = 62,
+       WCN36XX_HAL_SEND_BEACON_REQ = 63,
+       WCN36XX_HAL_SEND_BEACON_RSP = 64,
+
+       WCN36XX_HAL_SET_BCASTKEY_REQ = 65,
+       WCN36XX_HAL_SET_BCASTKEY_RSP = 66,
+       WCN36XX_HAL_DELETE_STA_CONTEXT_IND = 67,
+       WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ = 68,
+       WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_RSP = 69,
+
+       /* PTT interface support */
+       WCN36XX_HAL_PROCESS_PTT_REQ = 70,
+       WCN36XX_HAL_PROCESS_PTT_RSP = 71,
+
+       /* BTAMP related events */
+       WCN36XX_HAL_SIGNAL_BTAMP_EVENT_REQ = 72,
+       WCN36XX_HAL_SIGNAL_BTAMP_EVENT_RSP = 73,
+       WCN36XX_HAL_TL_HAL_FLUSH_AC_REQ = 74,
+       WCN36XX_HAL_TL_HAL_FLUSH_AC_RSP = 75,
+
+       WCN36XX_HAL_ENTER_IMPS_REQ = 76,
+       WCN36XX_HAL_EXIT_IMPS_REQ = 77,
+       WCN36XX_HAL_ENTER_BMPS_REQ = 78,
+       WCN36XX_HAL_EXIT_BMPS_REQ = 79,
+       WCN36XX_HAL_ENTER_UAPSD_REQ = 80,
+       WCN36XX_HAL_EXIT_UAPSD_REQ = 81,
+       WCN36XX_HAL_UPDATE_UAPSD_PARAM_REQ = 82,
+       WCN36XX_HAL_CONFIGURE_RXP_FILTER_REQ = 83,
+       WCN36XX_HAL_ADD_BCN_FILTER_REQ = 84,
+       WCN36XX_HAL_REM_BCN_FILTER_REQ = 85,
+       WCN36XX_HAL_ADD_WOWL_BCAST_PTRN = 86,
+       WCN36XX_HAL_DEL_WOWL_BCAST_PTRN = 87,
+       WCN36XX_HAL_ENTER_WOWL_REQ = 88,
+       WCN36XX_HAL_EXIT_WOWL_REQ = 89,
+       WCN36XX_HAL_HOST_OFFLOAD_REQ = 90,
+       WCN36XX_HAL_SET_RSSI_THRESH_REQ = 91,
+       WCN36XX_HAL_GET_RSSI_REQ = 92,
+       WCN36XX_HAL_SET_UAPSD_AC_PARAMS_REQ = 93,
+       WCN36XX_HAL_CONFIGURE_APPS_CPU_WAKEUP_STATE_REQ = 94,
+
+       WCN36XX_HAL_ENTER_IMPS_RSP = 95,
+       WCN36XX_HAL_EXIT_IMPS_RSP = 96,
+       WCN36XX_HAL_ENTER_BMPS_RSP = 97,
+       WCN36XX_HAL_EXIT_BMPS_RSP = 98,
+       WCN36XX_HAL_ENTER_UAPSD_RSP = 99,
+       WCN36XX_HAL_EXIT_UAPSD_RSP = 100,
+       WCN36XX_HAL_SET_UAPSD_AC_PARAMS_RSP = 101,
+       WCN36XX_HAL_UPDATE_UAPSD_PARAM_RSP = 102,
+       WCN36XX_HAL_CONFIGURE_RXP_FILTER_RSP = 103,
+       WCN36XX_HAL_ADD_BCN_FILTER_RSP = 104,
+       WCN36XX_HAL_REM_BCN_FILTER_RSP = 105,
+       WCN36XX_HAL_SET_RSSI_THRESH_RSP = 106,
+       WCN36XX_HAL_HOST_OFFLOAD_RSP = 107,
+       WCN36XX_HAL_ADD_WOWL_BCAST_PTRN_RSP = 108,
+       WCN36XX_HAL_DEL_WOWL_BCAST_PTRN_RSP = 109,
+       WCN36XX_HAL_ENTER_WOWL_RSP = 110,
+       WCN36XX_HAL_EXIT_WOWL_RSP = 111,
+       WCN36XX_HAL_RSSI_NOTIFICATION_IND = 112,
+       WCN36XX_HAL_GET_RSSI_RSP = 113,
+       WCN36XX_HAL_CONFIGURE_APPS_CPU_WAKEUP_STATE_RSP = 114,
+
+       /* 11k related events */
+       WCN36XX_HAL_SET_MAX_TX_POWER_REQ = 115,
+       WCN36XX_HAL_SET_MAX_TX_POWER_RSP = 116,
+
+       /* 11R related msgs */
+       WCN36XX_HAL_AGGR_ADD_TS_REQ = 117,
+       WCN36XX_HAL_AGGR_ADD_TS_RSP = 118,
+
+       /* P2P  WLAN_FEATURE_P2P */
+       WCN36XX_HAL_SET_P2P_GONOA_REQ = 119,
+       WCN36XX_HAL_SET_P2P_GONOA_RSP = 120,
+
+       /* WLAN Dump commands */
+       WCN36XX_HAL_DUMP_COMMAND_REQ = 121,
+       WCN36XX_HAL_DUMP_COMMAND_RSP = 122,
+
+       /* OEM_DATA FEATURE SUPPORT */
+       WCN36XX_HAL_START_OEM_DATA_REQ = 123,
+       WCN36XX_HAL_START_OEM_DATA_RSP = 124,
+
+       /* ADD SELF STA REQ and RSP */
+       WCN36XX_HAL_ADD_STA_SELF_REQ = 125,
+       WCN36XX_HAL_ADD_STA_SELF_RSP = 126,
+
+       /* DEL SELF STA SUPPORT */
+       WCN36XX_HAL_DEL_STA_SELF_REQ = 127,
+       WCN36XX_HAL_DEL_STA_SELF_RSP = 128,
+
+       /* Coex Indication */
+       WCN36XX_HAL_COEX_IND = 129,
+
+       /* Tx Complete Indication */
+       WCN36XX_HAL_OTA_TX_COMPL_IND = 130,
+
+       /* Host Suspend/resume messages */
+       WCN36XX_HAL_HOST_SUSPEND_IND = 131,
+       WCN36XX_HAL_HOST_RESUME_REQ = 132,
+       WCN36XX_HAL_HOST_RESUME_RSP = 133,
+
+       WCN36XX_HAL_SET_TX_POWER_REQ = 134,
+       WCN36XX_HAL_SET_TX_POWER_RSP = 135,
+       WCN36XX_HAL_GET_TX_POWER_REQ = 136,
+       WCN36XX_HAL_GET_TX_POWER_RSP = 137,
+
+       WCN36XX_HAL_P2P_NOA_ATTR_IND = 138,
+
+       WCN36XX_HAL_ENABLE_RADAR_DETECT_REQ = 139,
+       WCN36XX_HAL_ENABLE_RADAR_DETECT_RSP = 140,
+       WCN36XX_HAL_GET_TPC_REPORT_REQ = 141,
+       WCN36XX_HAL_GET_TPC_REPORT_RSP = 142,
+       WCN36XX_HAL_RADAR_DETECT_IND = 143,
+       WCN36XX_HAL_RADAR_DETECT_INTR_IND = 144,
+       WCN36XX_HAL_KEEP_ALIVE_REQ = 145,
+       WCN36XX_HAL_KEEP_ALIVE_RSP = 146,
+
+       /* PNO messages */
+       WCN36XX_HAL_SET_PREF_NETWORK_REQ = 147,
+       WCN36XX_HAL_SET_PREF_NETWORK_RSP = 148,
+       WCN36XX_HAL_SET_RSSI_FILTER_REQ = 149,
+       WCN36XX_HAL_SET_RSSI_FILTER_RSP = 150,
+       WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ = 151,
+       WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP = 152,
+       WCN36XX_HAL_PREF_NETW_FOUND_IND = 153,
+
+       WCN36XX_HAL_SET_TX_PER_TRACKING_REQ = 154,
+       WCN36XX_HAL_SET_TX_PER_TRACKING_RSP = 155,
+       WCN36XX_HAL_TX_PER_HIT_IND = 156,
+
+       WCN36XX_HAL_8023_MULTICAST_LIST_REQ = 157,
+       WCN36XX_HAL_8023_MULTICAST_LIST_RSP = 158,
+
+       WCN36XX_HAL_SET_PACKET_FILTER_REQ = 159,
+       WCN36XX_HAL_SET_PACKET_FILTER_RSP = 160,
+       WCN36XX_HAL_PACKET_FILTER_MATCH_COUNT_REQ = 161,
+       WCN36XX_HAL_PACKET_FILTER_MATCH_COUNT_RSP = 162,
+       WCN36XX_HAL_CLEAR_PACKET_FILTER_REQ = 163,
+       WCN36XX_HAL_CLEAR_PACKET_FILTER_RSP = 164,
+
+       /*
+        * This is temp fix. Should be removed once Host and Riva code is
+        * in sync.
+        */
+       WCN36XX_HAL_INIT_SCAN_CON_REQ = 165,
+
+       WCN36XX_HAL_SET_POWER_PARAMS_REQ = 166,
+       WCN36XX_HAL_SET_POWER_PARAMS_RSP = 167,
+
+       WCN36XX_HAL_TSM_STATS_REQ = 168,
+       WCN36XX_HAL_TSM_STATS_RSP = 169,
+
+       /* wake reason indication (WOW) */
+       WCN36XX_HAL_WAKE_REASON_IND = 170,
+
+       /* GTK offload support */
+       WCN36XX_HAL_GTK_OFFLOAD_REQ = 171,
+       WCN36XX_HAL_GTK_OFFLOAD_RSP = 172,
+       WCN36XX_HAL_GTK_OFFLOAD_GETINFO_REQ = 173,
+       WCN36XX_HAL_GTK_OFFLOAD_GETINFO_RSP = 174,
+
+       WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ = 175,
+       WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP = 176,
+       WCN36XX_HAL_EXCLUDE_UNENCRYPTED_IND = 177,
+
+       WCN36XX_HAL_SET_THERMAL_MITIGATION_REQ = 178,
+       WCN36XX_HAL_SET_THERMAL_MITIGATION_RSP = 179,
+
+       WCN36XX_HAL_UPDATE_VHT_OP_MODE_REQ = 182,
+       WCN36XX_HAL_UPDATE_VHT_OP_MODE_RSP = 183,
+
+       WCN36XX_HAL_P2P_NOA_START_IND = 184,
+
+       WCN36XX_HAL_GET_ROAM_RSSI_REQ = 185,
+       WCN36XX_HAL_GET_ROAM_RSSI_RSP = 186,
+
+       WCN36XX_HAL_CLASS_B_STATS_IND = 187,
+       WCN36XX_HAL_DEL_BA_IND = 188,
+       WCN36XX_HAL_DHCP_START_IND = 189,
+       WCN36XX_HAL_DHCP_STOP_IND = 190,
+
+       WCN36XX_HAL_MSG_MAX = WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE
+};
+
+/* Enumeration for Version */
+enum wcn36xx_hal_host_msg_version {
+       WCN36XX_HAL_MSG_VERSION0 = 0,
+       WCN36XX_HAL_MSG_VERSION1 = 1,
+       /* define as 2 bytes data */
+       WCN36XX_HAL_MSG_WCNSS_CTRL_VERSION = 0x7FFF,
+       WCN36XX_HAL_MSG_VERSION_MAX_FIELD = WCN36XX_HAL_MSG_WCNSS_CTRL_VERSION
+};
+
+enum driver_type {
+       DRIVER_TYPE_PRODUCTION = 0,
+       DRIVER_TYPE_MFG = 1,
+       DRIVER_TYPE_DVT = 2,
+       DRIVER_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_stop_type {
+       HAL_STOP_TYPE_SYS_RESET,
+       HAL_STOP_TYPE_SYS_DEEP_SLEEP,
+       HAL_STOP_TYPE_RF_KILL,
+       HAL_STOP_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_sys_mode {
+       HAL_SYS_MODE_NORMAL,
+       HAL_SYS_MODE_LEARN,
+       HAL_SYS_MODE_SCAN,
+       HAL_SYS_MODE_PROMISC,
+       HAL_SYS_MODE_SUSPEND_LINK,
+       HAL_SYS_MODE_ROAM_SCAN,
+       HAL_SYS_MODE_ROAM_SUSPEND_LINK,
+       HAL_SYS_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum phy_chan_bond_state {
+       /* 20MHz IF bandwidth centered on IF carrier */
+       PHY_SINGLE_CHANNEL_CENTERED = 0,
+
+       /* 40MHz IF bandwidth with lower 20MHz supporting the primary channel */
+       PHY_DOUBLE_CHANNEL_LOW_PRIMARY = 1,
+
+       /* 40MHz IF bandwidth centered on IF carrier */
+       PHY_DOUBLE_CHANNEL_CENTERED = 2,
+
+       /* 40MHz IF bandwidth with higher 20MHz supporting the primary ch */
+       PHY_DOUBLE_CHANNEL_HIGH_PRIMARY = 3,
+
+       /* 20/40MHZ offset LOW 40/80MHZ offset CENTERED */
+       PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_CENTERED = 4,
+
+       /* 20/40MHZ offset CENTERED 40/80MHZ offset CENTERED */
+       PHY_QUADRUPLE_CHANNEL_20MHZ_CENTERED_40MHZ_CENTERED = 5,
+
+       /* 20/40MHZ offset HIGH 40/80MHZ offset CENTERED */
+       PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_CENTERED = 6,
+
+       /* 20/40MHZ offset LOW 40/80MHZ offset LOW */
+       PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW = 7,
+
+       /* 20/40MHZ offset HIGH 40/80MHZ offset LOW */
+       PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW = 8,
+
+       /* 20/40MHZ offset LOW 40/80MHZ offset HIGH */
+       PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH = 9,
+
+       /* 20/40MHZ offset-HIGH 40/80MHZ offset HIGH */
+       PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH = 10,
+
+       PHY_CHANNEL_BONDING_STATE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Spatial Multiplexing(SM) Power Save mode */
+enum wcn36xx_hal_ht_mimo_state {
+       /* Static SM Power Save mode */
+       WCN36XX_HAL_HT_MIMO_PS_STATIC = 0,
+
+       /* Dynamic SM Power Save mode */
+       WCN36XX_HAL_HT_MIMO_PS_DYNAMIC = 1,
+
+       /* reserved */
+       WCN36XX_HAL_HT_MIMO_PS_NA = 2,
+
+       /* SM Power Save disabled */
+       WCN36XX_HAL_HT_MIMO_PS_NO_LIMIT = 3,
+
+       WCN36XX_HAL_HT_MIMO_PS_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* each station added has a rate mode which specifies the sta attributes */
+enum sta_rate_mode {
+       STA_TAURUS = 0,
+       STA_TITAN,
+       STA_POLARIS,
+       STA_11b,
+       STA_11bg,
+       STA_11a,
+       STA_11n,
+       STA_11ac,
+       STA_INVALID_RATE_MODE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* 1,2,5.5,11 */
+#define WCN36XX_HAL_NUM_DSSS_RATES           4
+
+/* 6,9,12,18,24,36,48,54 */
+#define WCN36XX_HAL_NUM_OFDM_RATES           8
+
+/* 72,96,108 */
+#define WCN36XX_HAL_NUM_POLARIS_RATES       3
+
+#define WCN36XX_HAL_MAC_MAX_SUPPORTED_MCS_SET    16
+
+enum wcn36xx_hal_bss_type {
+       WCN36XX_HAL_INFRASTRUCTURE_MODE,
+
+       /* Added for softAP support */
+       WCN36XX_HAL_INFRA_AP_MODE,
+
+       WCN36XX_HAL_IBSS_MODE,
+
+       /* Added for BT-AMP support */
+       WCN36XX_HAL_BTAMP_STA_MODE,
+
+       /* Added for BT-AMP support */
+       WCN36XX_HAL_BTAMP_AP_MODE,
+
+       WCN36XX_HAL_AUTO_MODE,
+
+       WCN36XX_HAL_DONOT_USE_BSS_TYPE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_nw_type {
+       WCN36XX_HAL_11A_NW_TYPE,
+       WCN36XX_HAL_11B_NW_TYPE,
+       WCN36XX_HAL_11G_NW_TYPE,
+       WCN36XX_HAL_11N_NW_TYPE,
+       WCN36XX_HAL_DONOT_USE_NW_TYPE = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+#define WCN36XX_HAL_MAC_RATESET_EID_MAX            12
+
+enum wcn36xx_hal_ht_operating_mode {
+       /* No Protection */
+       WCN36XX_HAL_HT_OP_MODE_PURE,
+
+       /* Overlap Legacy device present, protection is optional */
+       WCN36XX_HAL_HT_OP_MODE_OVERLAP_LEGACY,
+
+       /* No legacy device, but 20 MHz HT present */
+       WCN36XX_HAL_HT_OP_MODE_NO_LEGACY_20MHZ_HT,
+
+       /* Protection is required */
+       WCN36XX_HAL_HT_OP_MODE_MIXED,
+
+       WCN36XX_HAL_HT_OP_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Encryption type enum used with peer */
+enum ani_ed_type {
+       WCN36XX_HAL_ED_NONE,
+       WCN36XX_HAL_ED_WEP40,
+       WCN36XX_HAL_ED_WEP104,
+       WCN36XX_HAL_ED_TKIP,
+       WCN36XX_HAL_ED_CCMP,
+       WCN36XX_HAL_ED_WPI,
+       WCN36XX_HAL_ED_AES_128_CMAC,
+       WCN36XX_HAL_ED_NOT_IMPLEMENTED = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+#define WLAN_MAX_KEY_RSC_LEN                16
+#define WLAN_WAPI_KEY_RSC_LEN               16
+
+/* MAX key length when ULA is used */
+#define WCN36XX_HAL_MAC_MAX_KEY_LENGTH              32
+#define WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS     4
+
+/*
+ * Enum to specify whether key is used for TX only, RX only or both.
+ */
+enum ani_key_direction {
+       WCN36XX_HAL_TX_ONLY,
+       WCN36XX_HAL_RX_ONLY,
+       WCN36XX_HAL_TX_RX,
+       WCN36XX_HAL_TX_DEFAULT,
+       WCN36XX_HAL_DONOT_USE_KEY_DIRECTION = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum ani_wep_type {
+       WCN36XX_HAL_WEP_STATIC,
+       WCN36XX_HAL_WEP_DYNAMIC,
+       WCN36XX_HAL_WEP_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_link_state {
+
+       WCN36XX_HAL_LINK_IDLE_STATE = 0,
+       WCN36XX_HAL_LINK_PREASSOC_STATE = 1,
+       WCN36XX_HAL_LINK_POSTASSOC_STATE = 2,
+       WCN36XX_HAL_LINK_AP_STATE = 3,
+       WCN36XX_HAL_LINK_IBSS_STATE = 4,
+
+       /* BT-AMP Case */
+       WCN36XX_HAL_LINK_BTAMP_PREASSOC_STATE = 5,
+       WCN36XX_HAL_LINK_BTAMP_POSTASSOC_STATE = 6,
+       WCN36XX_HAL_LINK_BTAMP_AP_STATE = 7,
+       WCN36XX_HAL_LINK_BTAMP_STA_STATE = 8,
+
+       /* Reserved for HAL Internal Use */
+       WCN36XX_HAL_LINK_LEARN_STATE = 9,
+       WCN36XX_HAL_LINK_SCAN_STATE = 10,
+       WCN36XX_HAL_LINK_FINISH_SCAN_STATE = 11,
+       WCN36XX_HAL_LINK_INIT_CAL_STATE = 12,
+       WCN36XX_HAL_LINK_FINISH_CAL_STATE = 13,
+       WCN36XX_HAL_LINK_LISTEN_STATE = 14,
+
+       WCN36XX_HAL_LINK_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_stats_mask {
+       HAL_SUMMARY_STATS_INFO = 0x00000001,
+       HAL_GLOBAL_CLASS_A_STATS_INFO = 0x00000002,
+       HAL_GLOBAL_CLASS_B_STATS_INFO = 0x00000004,
+       HAL_GLOBAL_CLASS_C_STATS_INFO = 0x00000008,
+       HAL_GLOBAL_CLASS_D_STATS_INFO = 0x00000010,
+       HAL_PER_STA_STATS_INFO = 0x00000020
+};
+
+/* BT-AMP events type */
+enum bt_amp_event_type {
+       BTAMP_EVENT_CONNECTION_START,
+       BTAMP_EVENT_CONNECTION_STOP,
+       BTAMP_EVENT_CONNECTION_TERMINATED,
+
+       /* This and beyond are invalid values */
+       BTAMP_EVENT_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+/* PE Statistics */
+enum pe_stats_mask {
+       PE_SUMMARY_STATS_INFO = 0x00000001,
+       PE_GLOBAL_CLASS_A_STATS_INFO = 0x00000002,
+       PE_GLOBAL_CLASS_B_STATS_INFO = 0x00000004,
+       PE_GLOBAL_CLASS_C_STATS_INFO = 0x00000008,
+       PE_GLOBAL_CLASS_D_STATS_INFO = 0x00000010,
+       PE_PER_STA_STATS_INFO = 0x00000020,
+
+       /* This and beyond are invalid values */
+       PE_STATS_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/*
+ * Configuration Parameter IDs
+ */
+#define WCN36XX_HAL_CFG_STA_ID                         0
+#define WCN36XX_HAL_CFG_CURRENT_TX_ANTENNA             1
+#define WCN36XX_HAL_CFG_CURRENT_RX_ANTENNA             2
+#define WCN36XX_HAL_CFG_LOW_GAIN_OVERRIDE              3
+#define WCN36XX_HAL_CFG_POWER_STATE_PER_CHAIN          4
+#define WCN36XX_HAL_CFG_CAL_PERIOD                     5
+#define WCN36XX_HAL_CFG_CAL_CONTROL                    6
+#define WCN36XX_HAL_CFG_PROXIMITY                      7
+#define WCN36XX_HAL_CFG_NETWORK_DENSITY                        8
+#define WCN36XX_HAL_CFG_MAX_MEDIUM_TIME                        9
+#define WCN36XX_HAL_CFG_MAX_MPDUS_IN_AMPDU             10
+#define WCN36XX_HAL_CFG_RTS_THRESHOLD                  11
+#define WCN36XX_HAL_CFG_SHORT_RETRY_LIMIT              12
+#define WCN36XX_HAL_CFG_LONG_RETRY_LIMIT               13
+#define WCN36XX_HAL_CFG_FRAGMENTATION_THRESHOLD                14
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_ZERO         15
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_ONE          16
+#define WCN36XX_HAL_CFG_DYNAMIC_THRESHOLD_TWO          17
+#define WCN36XX_HAL_CFG_FIXED_RATE                     18
+#define WCN36XX_HAL_CFG_RETRYRATE_POLICY               19
+#define WCN36XX_HAL_CFG_RETRYRATE_SECONDARY            20
+#define WCN36XX_HAL_CFG_RETRYRATE_TERTIARY             21
+#define WCN36XX_HAL_CFG_FORCE_POLICY_PROTECTION                22
+#define WCN36XX_HAL_CFG_FIXED_RATE_MULTICAST_24GHZ     23
+#define WCN36XX_HAL_CFG_FIXED_RATE_MULTICAST_5GHZ      24
+#define WCN36XX_HAL_CFG_DEFAULT_RATE_INDEX_24GHZ       25
+#define WCN36XX_HAL_CFG_DEFAULT_RATE_INDEX_5GHZ                26
+#define WCN36XX_HAL_CFG_MAX_BA_SESSIONS                        27
+#define WCN36XX_HAL_CFG_PS_DATA_INACTIVITY_TIMEOUT     28
+#define WCN36XX_HAL_CFG_PS_ENABLE_BCN_FILTER           29
+#define WCN36XX_HAL_CFG_PS_ENABLE_RSSI_MONITOR         30
+#define WCN36XX_HAL_CFG_NUM_BEACON_PER_RSSI_AVERAGE    31
+#define WCN36XX_HAL_CFG_STATS_PERIOD                   32
+#define WCN36XX_HAL_CFG_CFP_MAX_DURATION               33
+#define WCN36XX_HAL_CFG_FRAME_TRANS_ENABLED            34
+#define WCN36XX_HAL_CFG_DTIM_PERIOD                    35
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACBK                  36
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACBE                  37
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACVO                  38
+#define WCN36XX_HAL_CFG_EDCA_WMM_ACVI                  39
+#define WCN36XX_HAL_CFG_BA_THRESHOLD_HIGH              40
+#define WCN36XX_HAL_CFG_MAX_BA_BUFFERS                 41
+#define WCN36XX_HAL_CFG_RPE_POLLING_THRESHOLD          42
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC0_REG        43
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC1_REG        44
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC2_REG        45
+#define WCN36XX_HAL_CFG_RPE_AGING_THRESHOLD_FOR_AC3_REG        46
+#define WCN36XX_HAL_CFG_NO_OF_ONCHIP_REORDER_SESSIONS  47
+#define WCN36XX_HAL_CFG_PS_LISTEN_INTERVAL             48
+#define WCN36XX_HAL_CFG_PS_HEART_BEAT_THRESHOLD                49
+#define WCN36XX_HAL_CFG_PS_NTH_BEACON_FILTER           50
+#define WCN36XX_HAL_CFG_PS_MAX_PS_POLL                 51
+#define WCN36XX_HAL_CFG_PS_MIN_RSSI_THRESHOLD          52
+#define WCN36XX_HAL_CFG_PS_RSSI_FILTER_PERIOD          53
+#define WCN36XX_HAL_CFG_PS_BROADCAST_FRAME_FILTER_ENABLE 54
+#define WCN36XX_HAL_CFG_PS_IGNORE_DTIM                 55
+#define WCN36XX_HAL_CFG_PS_ENABLE_BCN_EARLY_TERM       56
+#define WCN36XX_HAL_CFG_DYNAMIC_PS_POLL_VALUE          57
+#define WCN36XX_HAL_CFG_PS_NULLDATA_AP_RESP_TIMEOUT    58
+#define WCN36XX_HAL_CFG_TELE_BCN_WAKEUP_EN             59
+#define WCN36XX_HAL_CFG_TELE_BCN_TRANS_LI              60
+#define WCN36XX_HAL_CFG_TELE_BCN_TRANS_LI_IDLE_BCNS    61
+#define WCN36XX_HAL_CFG_TELE_BCN_MAX_LI                        62
+#define WCN36XX_HAL_CFG_TELE_BCN_MAX_LI_IDLE_BCNS      63
+#define WCN36XX_HAL_CFG_TX_PWR_CTRL_ENABLE             64
+#define WCN36XX_HAL_CFG_VALID_RADAR_CHANNEL_LIST       65
+#define WCN36XX_HAL_CFG_TX_POWER_24_20                 66
+#define WCN36XX_HAL_CFG_TX_POWER_24_40                 67
+#define WCN36XX_HAL_CFG_TX_POWER_50_20                 68
+#define WCN36XX_HAL_CFG_TX_POWER_50_40                 69
+#define WCN36XX_HAL_CFG_MCAST_BCAST_FILTER_SETTING     70
+#define WCN36XX_HAL_CFG_BCN_EARLY_TERM_WAKEUP_INTERVAL 71
+#define WCN36XX_HAL_CFG_MAX_TX_POWER_2_4               72
+#define WCN36XX_HAL_CFG_MAX_TX_POWER_5                 73
+#define WCN36XX_HAL_CFG_INFRA_STA_KEEP_ALIVE_PERIOD    74
+#define WCN36XX_HAL_CFG_ENABLE_CLOSE_LOOP              75
+#define WCN36XX_HAL_CFG_BTC_EXECUTION_MODE             76
+#define WCN36XX_HAL_CFG_BTC_DHCP_BT_SLOTS_TO_BLOCK     77
+#define WCN36XX_HAL_CFG_BTC_A2DP_DHCP_BT_SUB_INTERVALS 78
+#define WCN36XX_HAL_CFG_PS_TX_INACTIVITY_TIMEOUT       79
+#define WCN36XX_HAL_CFG_WCNSS_API_VERSION              80
+#define WCN36XX_HAL_CFG_AP_KEEPALIVE_TIMEOUT           81
+#define WCN36XX_HAL_CFG_GO_KEEPALIVE_TIMEOUT           82
+#define WCN36XX_HAL_CFG_ENABLE_MC_ADDR_LIST            83
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_INQ_BT          84
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_PAGE_BT         85
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_CONN_BT         86
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_LE_BT           87
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_INQ_WLAN                88
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_PAGE_WLAN       89
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_CONN_WLAN       90
+#define WCN36XX_HAL_CFG_BTC_STATIC_LEN_LE_WLAN         91
+#define WCN36XX_HAL_CFG_BTC_DYN_MAX_LEN_BT             92
+#define WCN36XX_HAL_CFG_BTC_DYN_MAX_LEN_WLAN           93
+#define WCN36XX_HAL_CFG_BTC_MAX_SCO_BLOCK_PERC         94
+#define WCN36XX_HAL_CFG_BTC_DHCP_PROT_ON_A2DP          95
+#define WCN36XX_HAL_CFG_BTC_DHCP_PROT_ON_SCO           96
+#define WCN36XX_HAL_CFG_ENABLE_UNICAST_FILTER          97
+#define WCN36XX_HAL_CFG_MAX_ASSOC_LIMIT                        98
+#define WCN36XX_HAL_CFG_ENABLE_LPWR_IMG_TRANSITION     99
+#define WCN36XX_HAL_CFG_ENABLE_MCC_ADAPTIVE_SCHEDULER  100
+#define WCN36XX_HAL_CFG_ENABLE_DETECT_PS_SUPPORT       101
+#define WCN36XX_HAL_CFG_AP_LINK_MONITOR_TIMEOUT                102
+#define WCN36XX_HAL_CFG_BTC_DWELL_TIME_MULTIPLIER      103
+#define WCN36XX_HAL_CFG_ENABLE_TDLS_OXYGEN_MODE                104
+#define WCN36XX_HAL_CFG_MAX_PARAMS                     105
+
+/* Message definitons - All the messages below need to be packed */
+
+/* Definition for HAL API Version. */
+struct wcnss_wlan_version {
+       u8 revision;
+       u8 version;
+       u8 minor;
+       u8 major;
+} __packed;
+
+/* Definition for Encryption Keys */
+struct wcn36xx_hal_keys {
+       u8 id;
+
+       /* 0 for multicast */
+       u8 unicast;
+
+       enum ani_key_direction direction;
+
+       /* Usage is unknown */
+       u8 rsc[WLAN_MAX_KEY_RSC_LEN];
+
+       /* =1 for authenticator,=0 for supplicant */
+       u8 pae_role;
+
+       u16 length;
+       u8 key[WCN36XX_HAL_MAC_MAX_KEY_LENGTH];
+} __packed;
+
+/*
+ * set_sta_key_params Moving here since it is shared by
+ * configbss/setstakey msgs
+ */
+struct wcn36xx_hal_set_sta_key_params {
+       /* STA Index */
+       u16 sta_index;
+
+       /* Encryption Type used with peer */
+       enum ani_ed_type enc_type;
+
+       /* STATIC/DYNAMIC - valid only for WEP */
+       enum ani_wep_type wep_type;
+
+       /* Default WEP key, valid only for static WEP, must between 0 and 3. */
+       u8 def_wep_idx;
+
+       /* valid only for non-static WEP encyrptions */
+       struct wcn36xx_hal_keys key[WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS];
+
+       /*
+        * Control for Replay Count, 1= Single TID based replay count on Tx
+        * 0 = Per TID based replay count on TX
+        */
+       u8 single_tid_rc;
+
+} __packed;
+
+/* 4-byte control message header used by HAL*/
+struct wcn36xx_hal_msg_header {
+       enum wcn36xx_hal_host_msg_type msg_type:16;
+       enum wcn36xx_hal_host_msg_version msg_version:16;
+       u32 len;
+} __packed;
+
+/* Config format required by HAL for each CFG item*/
+struct wcn36xx_hal_cfg {
+       /* Cfg Id. The Id required by HAL is exported by HAL
+        * in shared header file between UMAC and HAL.*/
+       u16 id;
+
+       /* Length of the Cfg. This parameter is used to go to next cfg
+        * in the TLV format.*/
+       u16 len;
+
+       /* Padding bytes for unaligned address's */
+       u16 pad_bytes;
+
+       /* Reserve bytes for making cfgVal to align address */
+       u16 reserve;
+
+       /* Following the uCfgLen field there should be a 'uCfgLen' bytes
+        * containing the uCfgValue ; u8 uCfgValue[uCfgLen] */
+} __packed;
+
+struct wcn36xx_hal_mac_start_parameters {
+       /* Drive Type - Production or FTM etc */
+       enum driver_type type;
+
+       /* Length of the config buffer */
+       u32 len;
+
+       /* Following this there is a TLV formatted buffer of length
+        * "len" bytes containing all config values.
+        * The TLV is expected to be formatted like this:
+        * 0           15            31           31+CFG_LEN-1        length-1
+        * |   CFG_ID   |   CFG_LEN   |   CFG_BODY    |  CFG_ID  |......|
+        */
+} __packed;
+
+struct wcn36xx_hal_mac_start_req_msg {
+       /* config buffer must start in TLV format just here */
+       struct wcn36xx_hal_msg_header header;
+       struct wcn36xx_hal_mac_start_parameters params;
+} __packed;
+
+struct wcn36xx_hal_mac_start_rsp_params {
+       /* success or failure */
+       u16 status;
+
+       /* Max number of STA supported by the device */
+       u8 stations;
+
+       /* Max number of BSS supported by the device */
+       u8 bssids;
+
+       /* API Version */
+       struct wcnss_wlan_version version;
+
+       /* CRM build information */
+       u8 crm_version[WCN36XX_HAL_VERSION_LENGTH];
+
+       /* hardware/chipset/misc version information */
+       u8 wlan_version[WCN36XX_HAL_VERSION_LENGTH];
+
+} __packed;
+
+struct wcn36xx_hal_mac_start_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+       struct wcn36xx_hal_mac_start_rsp_params start_rsp_params;
+} __packed;
+
+struct wcn36xx_hal_mac_stop_req_params {
+       /* The reason for which the device is being stopped */
+       enum wcn36xx_hal_stop_type reason;
+
+} __packed;
+
+struct wcn36xx_hal_mac_stop_req_msg {
+       struct wcn36xx_hal_msg_header header;
+       struct wcn36xx_hal_mac_stop_req_params stop_req_params;
+} __packed;
+
+struct wcn36xx_hal_mac_stop_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+} __packed;
+
+struct wcn36xx_hal_update_cfg_req_msg {
+       /*
+        * Note: The length specified in tHalUpdateCfgReqMsg messages should be
+        * header.msgLen = sizeof(tHalUpdateCfgReqMsg) + uConfigBufferLen
+        */
+       struct wcn36xx_hal_msg_header header;
+
+       /* Length of the config buffer. Allows UMAC to update multiple CFGs */
+       u32 len;
+
+       /*
+        * Following this there is a TLV formatted buffer of length
+        * "uConfigBufferLen" bytes containing all config values.
+        * The TLV is expected to be formatted like this:
+        * 0           15            31           31+CFG_LEN-1        length-1
+        * |   CFG_ID   |   CFG_LEN   |   CFG_BODY    |  CFG_ID  |......|
+        */
+
+} __packed;
+
+struct wcn36xx_hal_update_cfg_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+
+} __packed;
+
+/* Frame control field format (2 bytes) */
+struct wcn36xx_hal_mac_frame_ctl {
+
+#ifndef ANI_LITTLE_BIT_ENDIAN
+
+       u8 subType:4;
+       u8 type:2;
+       u8 protVer:2;
+
+       u8 order:1;
+       u8 wep:1;
+       u8 moreData:1;
+       u8 powerMgmt:1;
+       u8 retry:1;
+       u8 moreFrag:1;
+       u8 fromDS:1;
+       u8 toDS:1;
+
+#else
+
+       u8 protVer:2;
+       u8 type:2;
+       u8 subType:4;
+
+       u8 toDS:1;
+       u8 fromDS:1;
+       u8 moreFrag:1;
+       u8 retry:1;
+       u8 powerMgmt:1;
+       u8 moreData:1;
+       u8 wep:1;
+       u8 order:1;
+
+#endif
+
+};
+
+/* Sequence control field */
+struct wcn36xx_hal_mac_seq_ctl {
+       u8 fragNum:4;
+       u8 seqNumLo:4;
+       u8 seqNumHi:8;
+};
+
+/* Management header format */
+struct wcn36xx_hal_mac_mgmt_hdr {
+       struct wcn36xx_hal_mac_frame_ctl fc;
+       u8 durationLo;
+       u8 durationHi;
+       u8 da[6];
+       u8 sa[6];
+       u8 bssId[6];
+       struct wcn36xx_hal_mac_seq_ctl seqControl;
+};
+
+/* FIXME: pronto v1 apparently has 4 */
+#define WCN36XX_HAL_NUM_BSSID               2
+
+/* Scan Entry to hold active BSS idx's */
+struct wcn36xx_hal_scan_entry {
+       u8 bss_index[WCN36XX_HAL_NUM_BSSID];
+       u8 active_bss_count;
+};
+
+struct wcn36xx_hal_init_scan_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* LEARN - AP Role
+          SCAN - STA Role */
+       enum wcn36xx_hal_sys_mode mode;
+
+       /* BSSID of the BSS */
+       u8 bssid[ETH_ALEN];
+
+       /* Whether BSS needs to be notified */
+       u8 notify;
+
+       /* Kind of frame to be used for notifying the BSS (Data Null, QoS
+        * Null, or CTS to Self). Must always be a valid frame type. */
+       u8 frame_type;
+
+       /* UMAC has the option of passing the MAC frame to be used for
+        * notifying the BSS. If non-zero, HAL will use the MAC frame
+        * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+        * appropriate MAC frame based on frameType. */
+       u8 frame_len;
+
+       /* Following the framelength there is a MAC frame buffer if
+        * frameLength is non-zero. */
+       struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+       /* Entry to hold number of active BSS idx's */
+       struct wcn36xx_hal_scan_entry scan_entry;
+};
+
+struct wcn36xx_hal_init_scan_con_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* LEARN - AP Role
+          SCAN - STA Role */
+       enum wcn36xx_hal_sys_mode mode;
+
+       /* BSSID of the BSS */
+       u8 bssid[ETH_ALEN];
+
+       /* Whether BSS needs to be notified */
+       u8 notify;
+
+       /* Kind of frame to be used for notifying the BSS (Data Null, QoS
+        * Null, or CTS to Self). Must always be a valid frame type. */
+       u8 frame_type;
+
+       /* UMAC has the option of passing the MAC frame to be used for
+        * notifying the BSS. If non-zero, HAL will use the MAC frame
+        * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+        * appropriate MAC frame based on frameType. */
+       u8 frame_length;
+
+       /* Following the framelength there is a MAC frame buffer if
+        * frameLength is non-zero. */
+       struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+       /* Entry to hold number of active BSS idx's */
+       struct wcn36xx_hal_scan_entry scan_entry;
+
+       /* Single NoA usage in Scanning */
+       u8 use_noa;
+
+       /* Indicates the scan duration (in ms) */
+       u16 scan_duration;
+
+};
+
+struct wcn36xx_hal_init_scan_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+
+} __packed;
+
+struct wcn36xx_hal_start_scan_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Indicates the channel to scan */
+       u8 scan_channel;
+} __packed;
+
+struct wcn36xx_hal_start_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+
+       u32 start_tsf[2];
+       u8 tx_mgmt_power;
+
+} __packed;
+
+struct wcn36xx_hal_end_scan_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Indicates the channel to stop scanning. Not used really. But
+        * retained for symmetry with "start Scan" message. It can also
+        * help in error check if needed. */
+       u8 scan_channel;
+} __packed;
+
+struct wcn36xx_hal_end_scan_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+} __packed;
+
+struct wcn36xx_hal_finish_scan_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Identifies the operational state of the AP/STA
+        * LEARN - AP Role SCAN - STA Role */
+       enum wcn36xx_hal_sys_mode mode;
+
+       /* Operating channel to tune to. */
+       u8 oper_channel;
+
+       /* Channel Bonding state If 20/40 MHz is operational, this will
+        * indicate the 40 MHz extension channel in combination with the
+        * control channel */
+       enum phy_chan_bond_state cb_state;
+
+       /* BSSID of the BSS */
+       u8 bssid[ETH_ALEN];
+
+       /* Whether BSS needs to be notified */
+       u8 notify;
+
+       /* Kind of frame to be used for notifying the BSS (Data Null, QoS
+        * Null, or CTS to Self). Must always be a valid frame type. */
+       u8 frame_type;
+
+       /* UMAC has the option of passing the MAC frame to be used for
+        * notifying the BSS. If non-zero, HAL will use the MAC frame
+        * buffer pointed to by macMgmtHdr. If zero, HAL will generate the
+        * appropriate MAC frame based on frameType. */
+       u8 frame_length;
+
+       /* Following the framelength there is a MAC frame buffer if
+        * frameLength is non-zero. */
+       struct wcn36xx_hal_mac_mgmt_hdr mac_mgmt_hdr;
+
+       /* Entry to hold number of active BSS idx's */
+       struct wcn36xx_hal_scan_entry scan_entry;
+
+} __packed;
+
+struct wcn36xx_hal_finish_scan_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+
+} __packed;
+
+enum wcn36xx_hal_rate_index {
+       HW_RATE_INDEX_1MBPS     = 0x82,
+       HW_RATE_INDEX_2MBPS     = 0x84,
+       HW_RATE_INDEX_5_5MBPS   = 0x8B,
+       HW_RATE_INDEX_6MBPS     = 0x0C,
+       HW_RATE_INDEX_9MBPS     = 0x12,
+       HW_RATE_INDEX_11MBPS    = 0x96,
+       HW_RATE_INDEX_12MBPS    = 0x18,
+       HW_RATE_INDEX_18MBPS    = 0x24,
+       HW_RATE_INDEX_24MBPS    = 0x30,
+       HW_RATE_INDEX_36MBPS    = 0x48,
+       HW_RATE_INDEX_48MBPS    = 0x60,
+       HW_RATE_INDEX_54MBPS    = 0x6C
+};
+
+struct wcn36xx_hal_supported_rates {
+       /*
+        * For Self STA Entry: this represents Self Mode.
+        * For Peer Stations, this represents the mode of the peer.
+        * On Station:
+        *
+        * --this mode is updated when PE adds the Self Entry.
+        *
+        * -- OR when PE sends 'ADD_BSS' message and station context in BSS
+        *    is used to indicate the mode of the AP.
+        *
+        * ON AP:
+        *
+        * -- this mode is updated when PE sends 'ADD_BSS' and Sta entry
+        *     for that BSS is used to indicate the self mode of the AP.
+        *
+        * -- OR when a station is associated, PE sends 'ADD_STA' message
+        *    with this mode updated.
+        */
+
+       enum sta_rate_mode op_rate_mode;
+
+       /* 11b, 11a and aniLegacyRates are IE rates which gives rate in
+        * unit of 500Kbps */
+       u16 dsss_rates[WCN36XX_HAL_NUM_DSSS_RATES];
+       u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES];
+       u16 legacy_rates[WCN36XX_HAL_NUM_POLARIS_RATES];
+       u16 reserved;
+
+       /* Taurus only supports 26 Titan Rates(no ESF/concat Rates will be
+        * supported) First 26 bits are reserved for those Titan rates and
+        * the last 4 bits(bit28-31) for Taurus, 2(bit26-27) bits are
+        * reserved. */
+       /* Titan and Taurus Rates */
+       u32 enhanced_rate_bitmap;
+
+       /*
+        * 0-76 bits used, remaining reserved
+        * bits 0-15 and 32 should be set.
+        */
+       u8 supported_mcs_set[WCN36XX_HAL_MAC_MAX_SUPPORTED_MCS_SET];
+
+       /*
+        * RX Highest Supported Data Rate defines the highest data
+        * rate that the STA is able to receive, in unites of 1Mbps.
+        * This value is derived from "Supported MCS Set field" inside
+        * the HT capability element.
+        */
+       u16 rx_highest_data_rate;
+
+} __packed;
+
+struct wcn36xx_hal_config_sta_params {
+       /* BSSID of STA */
+       u8 bssid[ETH_ALEN];
+
+       /* ASSOC ID, as assigned by UMAC */
+       u16 aid;
+
+       /* STA entry Type: 0 - Self, 1 - Other/Peer, 2 - BSSID, 3 - BCAST */
+       u8 type;
+
+       /* Short Preamble Supported. */
+       u8 short_preamble_supported;
+
+       /* MAC Address of STA */
+       u8 mac[ETH_ALEN];
+
+       /* Listen interval of the STA */
+       u16 listen_interval;
+
+       /* Support for 11e/WMM */
+       u8 wmm_enabled;
+
+       /* 11n HT capable STA */
+       u8 ht_capable;
+
+       /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+       u8 tx_channel_width_set;
+
+       /* RIFS mode 0 - NA, 1 - Allowed */
+       u8 rifs_mode;
+
+       /* L-SIG TXOP Protection mechanism
+          0 - No Support, 1 - Supported
+          SG - there is global field */
+       u8 lsig_txop_protection;
+
+       /* Max Ampdu Size supported by STA. TPE programming.
+          0 : 8k , 1 : 16k, 2 : 32k, 3 : 64k */
+       u8 max_ampdu_size;
+
+       /* Max Ampdu density. Used by RA.  3 : 0~7 : 2^(11nAMPDUdensity -4) */
+       u8 max_ampdu_density;
+
+       /* Max AMSDU size 1 : 3839 bytes, 0 : 7935 bytes */
+       u8 max_amsdu_size;
+
+       /* Short GI support for 40Mhz packets */
+       u8 sgi_40mhz;
+
+       /* Short GI support for 20Mhz packets */
+       u8 sgi_20Mhz;
+
+       /* TODO move this parameter to the end for 3680 */
+       /* These rates are the intersection of peer and self capabilities. */
+       struct wcn36xx_hal_supported_rates supported_rates;
+
+       /* Robust Management Frame (RMF) enabled/disabled */
+       u8 rmf;
+
+       /* The unicast encryption type in the association */
+       u32 encrypt_type;
+
+       /* HAL should update the existing STA entry, if this flag is set. UMAC
+          will set this flag in case of RE-ASSOC, where we want to reuse the
+          old STA ID. 0 = Add, 1 = Update */
+       u8 action;
+
+       /* U-APSD Flags: 1b per AC.  Encoded as follows:
+          b7 b6 b5 b4 b3 b2 b1 b0 =
+          X  X  X  X  BE BK VI VO */
+       u8 uapsd;
+
+       /* Max SP Length */
+       u8 max_sp_len;
+
+       /* 11n Green Field preamble support
+          0 - Not supported, 1 - Supported */
+       u8 green_field_capable;
+
+       /* MIMO Power Save mode */
+       enum wcn36xx_hal_ht_mimo_state mimo_ps;
+
+       /* Delayed BA Support */
+       u8 delayed_ba_support;
+
+       /* Max AMPDU duration in 32us */
+       u8 max_ampdu_duration;
+
+       /* HT STA should set it to 1 if it is enabled in BSS. HT STA should
+        * set it to 0 if AP does not support it. This indication is sent
+        * to HAL and HAL uses this flag to pickup up appropriate 40Mhz
+        * rates. */
+       u8 dsss_cck_mode_40mhz;
+
+       /* Valid STA Idx when action=Update. Set to 0xFF when invalid!
+        * Retained for backward compalibity with existing HAL code */
+       u8 sta_index;
+
+       /* BSSID of BSS to which station is associated. Set to 0xFF when
+        * invalid. Retained for backward compalibity with existing HAL
+        * code */
+       u8 bssid_index;
+
+       u8 p2p;
+
+       /* TODO add this parameter for 3680. */
+       /* Reserved to align next field on a dword boundary */
+       /* u8 reserved; */
+} __packed;
+
+struct wcn36xx_hal_config_sta_req_msg {
+       struct wcn36xx_hal_msg_header header;
+       struct wcn36xx_hal_config_sta_params sta_params;
+} __packed;
+
+struct wcn36xx_hal_config_sta_params_v1 {
+       /* BSSID of STA */
+       u8 bssid[ETH_ALEN];
+
+       /* ASSOC ID, as assigned by UMAC */
+       u16 aid;
+
+       /* STA entry Type: 0 - Self, 1 - Other/Peer, 2 - BSSID, 3 - BCAST */
+       u8 type;
+
+       /* Short Preamble Supported. */
+       u8 short_preamble_supported;
+
+       /* MAC Address of STA */
+       u8 mac[ETH_ALEN];
+
+       /* Listen interval of the STA */
+       u16 listen_interval;
+
+       /* Support for 11e/WMM */
+       u8 wmm_enabled;
+
+       /* 11n HT capable STA */
+       u8 ht_capable;
+
+       /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+       u8 tx_channel_width_set;
+
+       /* RIFS mode 0 - NA, 1 - Allowed */
+       u8 rifs_mode;
+
+       /* L-SIG TXOP Protection mechanism
+          0 - No Support, 1 - Supported
+          SG - there is global field */
+       u8 lsig_txop_protection;
+
+       /* Max Ampdu Size supported by STA. TPE programming.
+          0 : 8k , 1 : 16k, 2 : 32k, 3 : 64k */
+       u8 max_ampdu_size;
+
+       /* Max Ampdu density. Used by RA.  3 : 0~7 : 2^(11nAMPDUdensity -4) */
+       u8 max_ampdu_density;
+
+       /* Max AMSDU size 1 : 3839 bytes, 0 : 7935 bytes */
+       u8 max_amsdu_size;
+
+       /* Short GI support for 40Mhz packets */
+       u8 sgi_40mhz;
+
+       /* Short GI support for 20Mhz packets */
+       u8 sgi_20Mhz;
+
+       /* Robust Management Frame (RMF) enabled/disabled */
+       u8 rmf;
+
+       /* The unicast encryption type in the association */
+       u32 encrypt_type;
+
+       /* HAL should update the existing STA entry, if this flag is set. UMAC
+          will set this flag in case of RE-ASSOC, where we want to reuse the
+          old STA ID. 0 = Add, 1 = Update */
+       u8 action;
+
+       /* U-APSD Flags: 1b per AC.  Encoded as follows:
+          b7 b6 b5 b4 b3 b2 b1 b0 =
+          X  X  X  X  BE BK VI VO */
+       u8 uapsd;
+
+       /* Max SP Length */
+       u8 max_sp_len;
+
+       /* 11n Green Field preamble support
+          0 - Not supported, 1 - Supported */
+       u8 green_field_capable;
+
+       /* MIMO Power Save mode */
+       enum wcn36xx_hal_ht_mimo_state mimo_ps;
+
+       /* Delayed BA Support */
+       u8 delayed_ba_support;
+
+       /* Max AMPDU duration in 32us */
+       u8 max_ampdu_duration;
+
+       /* HT STA should set it to 1 if it is enabled in BSS. HT STA should
+        * set it to 0 if AP does not support it. This indication is sent
+        * to HAL and HAL uses this flag to pickup up appropriate 40Mhz
+        * rates. */
+       u8 dsss_cck_mode_40mhz;
+
+       /* Valid STA Idx when action=Update. Set to 0xFF when invalid!
+        * Retained for backward compalibity with existing HAL code */
+       u8 sta_index;
+
+       /* BSSID of BSS to which station is associated. Set to 0xFF when
+        * invalid. Retained for backward compalibity with existing HAL
+        * code */
+       u8 bssid_index;
+
+       u8 p2p;
+
+       /* Reserved to align next field on a dword boundary */
+       u8 reserved;
+
+       /* These rates are the intersection of peer and self capabilities. */
+       struct wcn36xx_hal_supported_rates supported_rates;
+} __packed;
+
+struct wcn36xx_hal_config_sta_req_msg_v1 {
+       struct wcn36xx_hal_msg_header header;
+       struct wcn36xx_hal_config_sta_params_v1 sta_params;
+} __packed;
+
+struct config_sta_rsp_params {
+       /* success or failure */
+       u32 status;
+
+       /* Station index; valid only when 'status' field value SUCCESS */
+       u8 sta_index;
+
+       /* BSSID Index of BSS to which the station is associated */
+       u8 bssid_index;
+
+       /* DPU Index for PTK */
+       u8 dpu_index;
+
+       /* DPU Index for GTK */
+       u8 bcast_dpu_index;
+
+       /* DPU Index for IGTK  */
+       u8 bcast_mgmt_dpu_idx;
+
+       /* PTK DPU signature */
+       u8 uc_ucast_sig;
+
+       /* GTK DPU isignature */
+       u8 uc_bcast_sig;
+
+       /* IGTK DPU signature */
+       u8 uc_mgmt_sig;
+
+       u8 p2p;
+
+} __packed;
+
+struct wcn36xx_hal_config_sta_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       struct config_sta_rsp_params params;
+} __packed;
+
+/* Delete STA Request message */
+struct wcn36xx_hal_delete_sta_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Index of STA to delete */
+       u8 sta_index;
+
+} __packed;
+
+/* Delete STA Response message */
+struct wcn36xx_hal_delete_sta_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+
+       /* Index of STA deleted */
+       u8 sta_id;
+} __packed;
+
+/* 12 Bytes long because this structure can be used to represent rate and
+ * extended rate set IEs. The parser assume this to be at least 12 */
+struct wcn36xx_hal_rate_set {
+       u8 num_rates;
+       u8 rate[WCN36XX_HAL_MAC_RATESET_EID_MAX];
+} __packed;
+
+/* access category record */
+struct wcn36xx_hal_aci_aifsn {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+       u8 rsvd:1;
+       u8 aci:2;
+       u8 acm:1;
+       u8 aifsn:4;
+#else
+       u8 aifsn:4;
+       u8 acm:1;
+       u8 aci:2;
+       u8 rsvd:1;
+#endif
+} __packed;
+
+/* contention window size */
+struct wcn36xx_hal_mac_cw {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+       u8 max:4;
+       u8 min:4;
+#else
+       u8 min:4;
+       u8 max:4;
+#endif
+} __packed;
+
+struct wcn36xx_hal_edca_param_record {
+       struct wcn36xx_hal_aci_aifsn aci;
+       struct wcn36xx_hal_mac_cw cw;
+       u16 txop_limit;
+} __packed;
+
+struct wcn36xx_hal_mac_ssid {
+       u8 length;
+       u8 ssid[32];
+} __packed;
+
+/* Concurrency role. These are generic IDs that identify the various roles
+ *  in the software system. */
+enum wcn36xx_hal_con_mode {
+       WCN36XX_HAL_STA_MODE = 0,
+
+       /* to support softAp mode . This is misleading.
+          It means AP MODE only. */
+       WCN36XX_HAL_STA_SAP_MODE = 1,
+
+       WCN36XX_HAL_P2P_CLIENT_MODE,
+       WCN36XX_HAL_P2P_GO_MODE,
+       WCN36XX_HAL_MONITOR_MODE,
+};
+
+/* This is a bit pattern to be set for each mode
+ * bit 0 - sta mode
+ * bit 1 - ap mode
+ * bit 2 - p2p client mode
+ * bit 3 - p2p go mode */
+enum wcn36xx_hal_concurrency_mode {
+       HAL_STA = 1,
+       HAL_SAP = 2,
+
+       /* to support sta, softAp  mode . This means STA+AP mode */
+       HAL_STA_SAP = 3,
+
+       HAL_P2P_CLIENT = 4,
+       HAL_P2P_GO = 8,
+       HAL_MAX_CONCURRENCY_PERSONA = 4
+};
+
+struct wcn36xx_hal_config_bss_params {
+       /* BSSID */
+       u8 bssid[ETH_ALEN];
+
+       /* Self Mac Address */
+       u8 self_mac_addr[ETH_ALEN];
+
+       /* BSS type */
+       enum wcn36xx_hal_bss_type bss_type;
+
+       /* Operational Mode: AP =0, STA = 1 */
+       u8 oper_mode;
+
+       /* Network Type */
+       enum wcn36xx_hal_nw_type nw_type;
+
+       /* Used to classify PURE_11G/11G_MIXED to program MTU */
+       u8 short_slot_time_supported;
+
+       /* Co-exist with 11a STA */
+       u8 lla_coexist;
+
+       /* Co-exist with 11b STA */
+       u8 llb_coexist;
+
+       /* Co-exist with 11g STA */
+       u8 llg_coexist;
+
+       /* Coexistence with 11n STA */
+       u8 ht20_coexist;
+
+       /* Non GF coexist flag */
+       u8 lln_non_gf_coexist;
+
+       /* TXOP protection support */
+       u8 lsig_tx_op_protection_full_support;
+
+       /* RIFS mode */
+       u8 rifs_mode;
+
+       /* Beacon Interval in TU */
+       u16 beacon_interval;
+
+       /* DTIM period */
+       u8 dtim_period;
+
+       /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+       u8 tx_channel_width_set;
+
+       /* Operating channel */
+       u8 oper_channel;
+
+       /* Extension channel for channel bonding */
+       u8 ext_channel;
+
+       /* Reserved to align next field on a dword boundary */
+       u8 reserved;
+
+       /* TODO move sta to the end for 3680 */
+       /* Context of the station being added in HW
+        *  Add a STA entry for "itself" -
+        *
+        *  On AP  - Add the AP itself in an "STA context"
+        *
+        *  On STA - Add the AP to which this STA is joining in an
+        *  "STA context"
+        */
+       struct wcn36xx_hal_config_sta_params sta;
+       /* SSID of the BSS */
+       struct wcn36xx_hal_mac_ssid ssid;
+
+       /* HAL should update the existing BSS entry, if this flag is set.
+        * UMAC will set this flag in case of reassoc, where we want to
+        * resue the the old BSSID and still return success 0 = Add, 1 =
+        * Update */
+       u8 action;
+
+       /* MAC Rate Set */
+       struct wcn36xx_hal_rate_set rateset;
+
+       /* Enable/Disable HT capabilities of the BSS */
+       u8 ht;
+
+       /* Enable/Disable OBSS protection */
+       u8 obss_prot_enabled;
+
+       /* RMF enabled/disabled */
+       u8 rmf;
+
+       /* HT Operating Mode operating mode of the 802.11n STA */
+       enum wcn36xx_hal_ht_operating_mode ht_oper_mode;
+
+       /* Dual CTS Protection: 0 - Unused, 1 - Used */
+       u8 dual_cts_protection;
+
+       /* Probe Response Max retries */
+       u8 max_probe_resp_retry_limit;
+
+       /* To Enable Hidden ssid */
+       u8 hidden_ssid;
+
+       /* To Enable Disable FW Proxy Probe Resp */
+       u8 proxy_probe_resp;
+
+       /* Boolean to indicate if EDCA params are valid. UMAC might not
+        * have valid EDCA params or might not desire to apply EDCA params
+        * during config BSS. 0 implies Not Valid ; Non-Zero implies
+        * valid */
+       u8 edca_params_valid;
+
+       /* EDCA Parameters for Best Effort Access Category */
+       struct wcn36xx_hal_edca_param_record acbe;
+
+       /* EDCA Parameters forBackground Access Category */
+       struct wcn36xx_hal_edca_param_record acbk;
+
+       /* EDCA Parameters for Video Access Category */
+       struct wcn36xx_hal_edca_param_record acvi;
+
+       /* EDCA Parameters for Voice Access Category */
+       struct wcn36xx_hal_edca_param_record acvo;
+
+       /* Ext Bss Config Msg if set */
+       u8 ext_set_sta_key_param_valid;
+
+       /* SetStaKeyParams for ext bss msg */
+       struct wcn36xx_hal_set_sta_key_params ext_set_sta_key_param;
+
+       /* Persona for the BSS can be STA,AP,GO,CLIENT value same as enum
+        * wcn36xx_hal_con_mode */
+       u8 wcn36xx_hal_persona;
+
+       u8 spectrum_mgt_enable;
+
+       /* HAL fills in the tx power used for mgmt frames in txMgmtPower */
+       s8 tx_mgmt_power;
+
+       /* maxTxPower has max power to be used after applying the power
+        * constraint if any */
+       s8 max_tx_power;
+} __packed;
+
+struct wcn36xx_hal_config_bss_req_msg {
+       struct wcn36xx_hal_msg_header header;
+       struct wcn36xx_hal_config_bss_params bss_params;
+} __packed;
+
+struct wcn36xx_hal_config_bss_params_v1 {
+       /* BSSID */
+       u8 bssid[ETH_ALEN];
+
+       /* Self Mac Address */
+       u8 self_mac_addr[ETH_ALEN];
+
+       /* BSS type */
+       enum wcn36xx_hal_bss_type bss_type;
+
+       /* Operational Mode: AP =0, STA = 1 */
+       u8 oper_mode;
+
+       /* Network Type */
+       enum wcn36xx_hal_nw_type nw_type;
+
+       /* Used to classify PURE_11G/11G_MIXED to program MTU */
+       u8 short_slot_time_supported;
+
+       /* Co-exist with 11a STA */
+       u8 lla_coexist;
+
+       /* Co-exist with 11b STA */
+       u8 llb_coexist;
+
+       /* Co-exist with 11g STA */
+       u8 llg_coexist;
+
+       /* Coexistence with 11n STA */
+       u8 ht20_coexist;
+
+       /* Non GF coexist flag */
+       u8 lln_non_gf_coexist;
+
+       /* TXOP protection support */
+       u8 lsig_tx_op_protection_full_support;
+
+       /* RIFS mode */
+       u8 rifs_mode;
+
+       /* Beacon Interval in TU */
+       u16 beacon_interval;
+
+       /* DTIM period */
+       u8 dtim_period;
+
+       /* TX Width Set: 0 - 20 MHz only, 1 - 20/40 MHz */
+       u8 tx_channel_width_set;
+
+       /* Operating channel */
+       u8 oper_channel;
+
+       /* Extension channel for channel bonding */
+       u8 ext_channel;
+
+       /* Reserved to align next field on a dword boundary */
+       u8 reserved;
+
+       /* SSID of the BSS */
+       struct wcn36xx_hal_mac_ssid ssid;
+
+       /* HAL should update the existing BSS entry, if this flag is set.
+        * UMAC will set this flag in case of reassoc, where we want to
+        * resue the the old BSSID and still return success 0 = Add, 1 =
+        * Update */
+       u8 action;
+
+       /* MAC Rate Set */
+       struct wcn36xx_hal_rate_set rateset;
+
+       /* Enable/Disable HT capabilities of the BSS */
+       u8 ht;
+
+       /* Enable/Disable OBSS protection */
+       u8 obss_prot_enabled;
+
+       /* RMF enabled/disabled */
+       u8 rmf;
+
+       /* HT Operating Mode operating mode of the 802.11n STA */
+       enum wcn36xx_hal_ht_operating_mode ht_oper_mode;
+
+       /* Dual CTS Protection: 0 - Unused, 1 - Used */
+       u8 dual_cts_protection;
+
+       /* Probe Response Max retries */
+       u8 max_probe_resp_retry_limit;
+
+       /* To Enable Hidden ssid */
+       u8 hidden_ssid;
+
+       /* To Enable Disable FW Proxy Probe Resp */
+       u8 proxy_probe_resp;
+
+       /* Boolean to indicate if EDCA params are valid. UMAC might not
+        * have valid EDCA params or might not desire to apply EDCA params
+        * during config BSS. 0 implies Not Valid ; Non-Zero implies
+        * valid */
+       u8 edca_params_valid;
+
+       /* EDCA Parameters for Best Effort Access Category */
+       struct wcn36xx_hal_edca_param_record acbe;
+
+       /* EDCA Parameters forBackground Access Category */
+       struct wcn36xx_hal_edca_param_record acbk;
+
+       /* EDCA Parameters for Video Access Category */
+       struct wcn36xx_hal_edca_param_record acvi;
+
+       /* EDCA Parameters for Voice Access Category */
+       struct wcn36xx_hal_edca_param_record acvo;
+
+       /* Ext Bss Config Msg if set */
+       u8 ext_set_sta_key_param_valid;
+
+       /* SetStaKeyParams for ext bss msg */
+       struct wcn36xx_hal_set_sta_key_params ext_set_sta_key_param;
+
+       /* Persona for the BSS can be STA,AP,GO,CLIENT value same as enum
+        * wcn36xx_hal_con_mode */
+       u8 wcn36xx_hal_persona;
+
+       u8 spectrum_mgt_enable;
+
+       /* HAL fills in the tx power used for mgmt frames in txMgmtPower */
+       s8 tx_mgmt_power;
+
+       /* maxTxPower has max power to be used after applying the power
+        * constraint if any */
+       s8 max_tx_power;
+
+       /* Context of the station being added in HW
+        *  Add a STA entry for "itself" -
+        *
+        *  On AP  - Add the AP itself in an "STA context"
+        *
+        *  On STA - Add the AP to which this STA is joining in an
+        *  "STA context"
+        */
+       struct wcn36xx_hal_config_sta_params_v1 sta;
+} __packed;
+
+struct wcn36xx_hal_config_bss_req_msg_v1 {
+       struct wcn36xx_hal_msg_header header;
+       struct wcn36xx_hal_config_bss_params_v1 bss_params;
+} __packed;
+
+struct wcn36xx_hal_config_bss_rsp_params {
+       /* Success or Failure */
+       u32 status;
+
+       /* BSS index allocated by HAL */
+       u8 bss_index;
+
+       /* DPU descriptor index for PTK */
+       u8 dpu_desc_index;
+
+       /* PTK DPU signature */
+       u8 ucast_dpu_signature;
+
+       /* DPU descriptor index for GTK */
+       u8 bcast_dpu_desc_indx;
+
+       /* GTK DPU signature */
+       u8 bcast_dpu_signature;
+
+       /* DPU descriptor for IGTK */
+       u8 mgmt_dpu_desc_index;
+
+       /* IGTK DPU signature */
+       u8 mgmt_dpu_signature;
+
+       /* Station Index for BSS entry */
+       u8 bss_sta_index;
+
+       /* Self station index for this BSS */
+       u8 bss_self_sta_index;
+
+       /* Bcast station for buffering bcast frames in AP role */
+       u8 bss_bcast_sta_idx;
+
+       /* MAC Address of STA(PEER/SELF) in staContext of configBSSReq */
+       u8 mac[ETH_ALEN];
+
+       /* HAL fills in the tx power used for mgmt frames in this field. */
+       s8 tx_mgmt_power;
+
+} __packed;
+
+struct wcn36xx_hal_config_bss_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+       struct wcn36xx_hal_config_bss_rsp_params bss_rsp_params;
+} __packed;
+
+struct wcn36xx_hal_delete_bss_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* BSS index to be deleted */
+       u8 bss_index;
+
+} __packed;
+
+struct wcn36xx_hal_delete_bss_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Success or Failure */
+       u32 status;
+
+       /* BSS index that has been deleted */
+       u8 bss_index;
+
+} __packed;
+
+struct wcn36xx_hal_join_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Indicates the BSSID to which STA is going to associate */
+       u8 bssid[ETH_ALEN];
+
+       /* Indicates the channel to switch to. */
+       u8 channel;
+
+       /* Self STA MAC */
+       u8 self_sta_mac_addr[ETH_ALEN];
+
+       /* Local power constraint */
+       u8 local_power_constraint;
+
+       /* Secondary channel offset */
+       enum phy_chan_bond_state secondary_channel_offset;
+
+       /* link State */
+       enum wcn36xx_hal_link_state link_state;
+
+       /* Max TX power */
+       s8 max_tx_power;
+} __packed;
+
+struct wcn36xx_hal_join_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+
+       /* HAL fills in the tx power used for mgmt frames in this field */
+       u8 tx_mgmt_power;
+} __packed;
+
+struct post_assoc_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       struct wcn36xx_hal_config_sta_params sta_params;
+       struct wcn36xx_hal_config_bss_params bss_params;
+};
+
+struct post_assoc_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+       struct config_sta_rsp_params sta_rsp_params;
+       struct wcn36xx_hal_config_bss_rsp_params bss_rsp_params;
+};
+
+/* This is used to create a set of WEP keys for a given BSS. */
+struct wcn36xx_hal_set_bss_key_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* BSS Index of the BSS */
+       u8 bss_idx;
+
+       /* Encryption Type used with peer */
+       enum ani_ed_type enc_type;
+
+       /* Number of keys */
+       u8 num_keys;
+
+       /* Array of keys. */
+       struct wcn36xx_hal_keys keys[WCN36XX_HAL_MAC_MAX_NUM_OF_DEFAULT_KEYS];
+
+       /* Control for Replay Count, 1= Single TID based replay count on Tx
+        * 0 = Per TID based replay count on TX */
+       u8 single_tid_rc;
+} __packed;
+
+/* tagged version of set bss key */
+struct wcn36xx_hal_set_bss_key_req_msg_tagged {
+       struct wcn36xx_hal_set_bss_key_req_msg Msg;
+       u32 tag;
+} __packed;
+
+struct wcn36xx_hal_set_bss_key_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+} __packed;
+
+/*
+ * This is used  configure the key information on a given station.
+ * When the sec_type is WEP40 or WEP104, the def_wep_idx is used to locate
+ * a preconfigured key from a BSS the station assoicated with; otherwise
+ * a new key descriptor is created based on the key field.
+ */
+struct wcn36xx_hal_set_sta_key_req_msg {
+       struct wcn36xx_hal_msg_header header;
+       struct wcn36xx_hal_set_sta_key_params set_sta_key_params;
+} __packed;
+
+struct wcn36xx_hal_set_sta_key_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+} __packed;
+
+struct wcn36xx_hal_remove_bss_key_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* BSS Index of the BSS */
+       u8 bss_idx;
+
+       /* Encryption Type used with peer */
+       enum ani_ed_type enc_type;
+
+       /* Key Id */
+       u8 key_id;
+
+       /* STATIC/DYNAMIC. Used in Nullifying in Key Descriptors for
+        * Static/Dynamic keys */
+       enum ani_wep_type wep_type;
+} __packed;
+
+struct wcn36xx_hal_remove_bss_key_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+} __packed;
+
+/*
+ * This is used by PE to Remove the key information on a given station.
+ */
+struct wcn36xx_hal_remove_sta_key_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* STA Index */
+       u16 sta_idx;
+
+       /* Encryption Type used with peer */
+       enum ani_ed_type enc_type;
+
+       /* Key Id */
+       u8 key_id;
+
+       /* Whether to invalidate the Broadcast key or Unicast key. In case
+        * of WEP, the same key is used for both broadcast and unicast. */
+       u8 unicast;
+
+} __packed;
+
+struct wcn36xx_hal_remove_sta_key_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /*success or failure */
+       u32 status;
+
+} __packed;
+
+#ifdef FEATURE_OEM_DATA_SUPPORT
+
+#ifndef OEM_DATA_REQ_SIZE
+#define OEM_DATA_REQ_SIZE 134
+#endif
+
+#ifndef OEM_DATA_RSP_SIZE
+#define OEM_DATA_RSP_SIZE 1968
+#endif
+
+struct start_oem_data_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u32 status;
+       tSirMacAddr self_mac_addr;
+       u8 oem_data_req[OEM_DATA_REQ_SIZE];
+
+};
+
+struct start_oem_data_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 oem_data_rsp[OEM_DATA_RSP_SIZE];
+};
+
+#endif
+
+struct wcn36xx_hal_switch_channel_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Channel number */
+       u8 channel_number;
+
+       /* Local power constraint */
+       u8 local_power_constraint;
+
+       /* Secondary channel offset */
+       enum phy_chan_bond_state secondary_channel_offset;
+
+       /* HAL fills in the tx power used for mgmt frames in this field. */
+       u8 tx_mgmt_power;
+
+       /* Max TX power */
+       u8 max_tx_power;
+
+       /* Self STA MAC */
+       u8 self_sta_mac_addr[ETH_ALEN];
+
+       /* VO WIFI comment: BSSID needed to identify session. As the
+        * request has power constraints, this should be applied only to
+        * that session Since MTU timing and EDCA are sessionized, this
+        * struct needs to be sessionized and bssid needs to be out of the
+        * VOWifi feature flag V IMP: Keep bssId field at the end of this
+        * msg. It is used to mantain backward compatbility by way of
+        * ignoring if using new host/old FW or old host/new FW since it is
+        * at the end of this struct
+        */
+       u8 bssid[ETH_ALEN];
+} __packed;
+
+struct wcn36xx_hal_switch_channel_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Status */
+       u32 status;
+
+       /* Channel number - same as in request */
+       u8 channel_number;
+
+       /* HAL fills in the tx power used for mgmt frames in this field */
+       u8 tx_mgmt_power;
+
+       /* BSSID needed to identify session - same as in request */
+       u8 bssid[ETH_ALEN];
+
+} __packed;
+
+struct update_edca_params_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /*BSS Index */
+       u16 bss_index;
+
+       /* Best Effort */
+       struct wcn36xx_hal_edca_param_record acbe;
+
+       /* Background */
+       struct wcn36xx_hal_edca_param_record acbk;
+
+       /* Video */
+       struct wcn36xx_hal_edca_param_record acvi;
+
+       /* Voice */
+       struct wcn36xx_hal_edca_param_record acvo;
+};
+
+struct update_edca_params_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct dpu_stats_params {
+       /* Index of STA to which the statistics */
+       u16 sta_index;
+
+       /* Encryption mode */
+       u8 enc_mode;
+
+       /* status */
+       u32 status;
+
+       /* Statistics */
+       u32 send_blocks;
+       u32 recv_blocks;
+       u32 replays;
+       u8 mic_error_cnt;
+       u32 prot_excl_cnt;
+       u16 format_err_cnt;
+       u16 un_decryptable_cnt;
+       u32 decrypt_err_cnt;
+       u32 decrypt_ok_cnt;
+};
+
+struct wcn36xx_hal_stats_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Valid STA Idx for per STA stats request */
+       u32 sta_id;
+
+       /* Categories of stats requested as specified in eHalStatsMask */
+       u32 stats_mask;
+};
+
+struct ani_summary_stats_info {
+       /* Total number of packets(per AC) that were successfully
+        * transmitted with retries */
+       u32 retry_cnt[4];
+
+       /* The number of MSDU packets and MMPDU frames per AC that the
+        * 802.11 station successfully transmitted after more than one
+        * retransmission attempt */
+       u32 multiple_retry_cnt[4];
+
+       /* Total number of packets(per AC) that were successfully
+        * transmitted (with and without retries, including multi-cast,
+        * broadcast) */
+       u32 tx_frm_cnt[4];
+
+       /* Total number of packets that were successfully received (after
+        * appropriate filter rules including multi-cast, broadcast) */
+       u32 rx_frm_cnt;
+
+       /* Total number of duplicate frames received successfully */
+       u32 frm_dup_cnt;
+
+       /* Total number packets(per AC) failed to transmit */
+       u32 fail_cnt[4];
+
+       /* Total number of RTS/CTS sequence failures for transmission of a
+        * packet */
+       u32 rts_fail_cnt;
+
+       /* Total number packets failed transmit because of no ACK from the
+        * remote entity */
+       u32 ack_fail_cnt;
+
+       /* Total number of RTS/CTS sequence success for transmission of a
+        * packet */
+       u32 rts_succ_cnt;
+
+       /* The sum of the receive error count and dropped-receive-buffer
+        * error count. HAL will provide this as a sum of (FCS error) +
+        * (Fail get BD/PDU in HW) */
+       u32 rx_discard_cnt;
+
+       /*
+        * The receive error count. HAL will provide the RxP FCS error
+        * global counter. */
+       u32 rx_error_cnt;
+
+       /* The sum of the transmit-directed byte count, transmit-multicast
+        * byte count and transmit-broadcast byte count. HAL will sum TPE
+        * UC/MC/BCAST global counters to provide this. */
+       u32 tx_byte_cnt;
+};
+
+/* defines tx_rate_flags */
+enum tx_rate_info {
+       /* Legacy rates */
+       HAL_TX_RATE_LEGACY = 0x1,
+
+       /* HT20 rates */
+       HAL_TX_RATE_HT20 = 0x2,
+
+       /* HT40 rates */
+       HAL_TX_RATE_HT40 = 0x4,
+
+       /* Rate with Short guard interval */
+       HAL_TX_RATE_SGI = 0x8,
+
+       /* Rate with Long guard interval */
+       HAL_TX_RATE_LGI = 0x10
+};
+
+struct ani_global_class_a_stats_info {
+       /* The number of MPDU frames received by the 802.11 station for
+        * MSDU packets or MMPDU frames */
+       u32 rx_frag_cnt;
+
+       /* The number of MPDU frames received by the 802.11 station for
+        * MSDU packets or MMPDU frames when a promiscuous packet filter
+        * was enabled */
+       u32 promiscuous_rx_frag_cnt;
+
+       /* The receiver input sensitivity referenced to a FER of 8% at an
+        * MPDU length of 1024 bytes at the antenna connector. Each element
+        * of the array shall correspond to a supported rate and the order
+        * shall be the same as the supporteRates parameter. */
+       u32 rx_input_sensitivity;
+
+       /* The maximum transmit power in dBm upto one decimal. for eg: if
+        * it is 10.5dBm, the value would be 105 */
+       u32 max_pwr;
+
+       /* Number of times the receiver failed to synchronize with the
+        * incoming signal after detecting the sync in the preamble of the
+        * transmitted PLCP protocol data unit. */
+       u32 sync_fail_cnt;
+
+       /* Legacy transmit rate, in units of 500 kbit/sec, for the most
+        * recently transmitted frame */
+       u32 tx_rate;
+
+       /* mcs index for HT20 and HT40 rates */
+       u32 mcs_index;
+
+       /* to differentiate between HT20 and HT40 rates; short and long
+        * guard interval */
+       u32 tx_rate_flags;
+};
+
+struct ani_global_security_stats {
+       /* The number of unencrypted received MPDU frames that the MAC
+        * layer discarded when the IEEE 802.11 dot11ExcludeUnencrypted
+        * management information base (MIB) object is enabled */
+       u32 rx_wep_unencrypted_frm_cnt;
+
+       /* The number of received MSDU packets that that the 802.11 station
+        * discarded because of MIC failures */
+       u32 rx_mic_fail_cnt;
+
+       /* The number of encrypted MPDU frames that the 802.11 station
+        * failed to decrypt because of a TKIP ICV error */
+       u32 tkip_icv_err;
+
+       /* The number of received MPDU frames that the 802.11 discarded
+        * because of an invalid AES-CCMP format */
+       u32 aes_ccmp_format_err;
+
+       /* The number of received MPDU frames that the 802.11 station
+        * discarded because of the AES-CCMP replay protection procedure */
+       u32 aes_ccmp_replay_cnt;
+
+       /* The number of received MPDU frames that the 802.11 station
+        * discarded because of errors detected by the AES-CCMP decryption
+        * algorithm */
+       u32 aes_ccmp_decrpt_err;
+
+       /* The number of encrypted MPDU frames received for which a WEP
+        * decryption key was not available on the 802.11 station */
+       u32 wep_undecryptable_cnt;
+
+       /* The number of encrypted MPDU frames that the 802.11 station
+        * failed to decrypt because of a WEP ICV error */
+       u32 wep_icv_err;
+
+       /* The number of received encrypted packets that the 802.11 station
+        * successfully decrypted */
+       u32 rx_decrypt_succ_cnt;
+
+       /* The number of encrypted packets that the 802.11 station failed
+        * to decrypt */
+       u32 rx_decrypt_fail_cnt;
+};
+
+struct ani_global_class_b_stats_info {
+       struct ani_global_security_stats uc_stats;
+       struct ani_global_security_stats mc_bc_stats;
+};
+
+struct ani_global_class_c_stats_info {
+       /* This counter shall be incremented for a received A-MSDU frame
+        * with the stations MAC address in the address 1 field or an
+        * A-MSDU frame with a group address in the address 1 field */
+       u32 rx_amsdu_cnt;
+
+       /* This counter shall be incremented when the MAC receives an AMPDU
+        * from the PHY */
+       u32 rx_ampdu_cnt;
+
+       /* This counter shall be incremented when a Frame is transmitted
+        * only on the primary channel */
+       u32 tx_20_frm_cnt;
+
+       /* This counter shall be incremented when a Frame is received only
+        * on the primary channel */
+       u32 rx_20_frm_cnt;
+
+       /* This counter shall be incremented by the number of MPDUs
+        * received in the A-MPDU when an A-MPDU is received */
+       u32 rx_mpdu_in_ampdu_cnt;
+
+       /* This counter shall be incremented when an MPDU delimiter has a
+        * CRC error when this is the first CRC error in the received AMPDU
+        * or when the previous delimiter has been decoded correctly */
+       u32 ampdu_delimiter_crc_err;
+};
+
+struct ani_per_sta_stats_info {
+       /* The number of MPDU frames that the 802.11 station transmitted
+        * and acknowledged through a received 802.11 ACK frame */
+       u32 tx_frag_cnt[4];
+
+       /* This counter shall be incremented when an A-MPDU is transmitted */
+       u32 tx_ampdu_cnt;
+
+       /* This counter shall increment by the number of MPDUs in the AMPDU
+        * when an A-MPDU is transmitted */
+       u32 tx_mpdu_in_ampdu_cnt;
+};
+
+struct wcn36xx_hal_stats_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Success or Failure */
+       u32 status;
+
+       /* STA Idx */
+       u32 sta_index;
+
+       /* Categories of STATS being returned as per eHalStatsMask */
+       u32 stats_mask;
+
+       /* message type is same as the request type */
+       u16 msg_type;
+
+       /* length of the entire request, includes the pStatsBuf length too */
+       u16 msg_len;
+};
+
+struct wcn36xx_hal_set_link_state_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 bssid[ETH_ALEN];
+       enum wcn36xx_hal_link_state state;
+       u8 self_mac_addr[ETH_ALEN];
+
+} __packed;
+
+struct set_link_state_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+/* TSPEC Params */
+struct wcn36xx_hal_ts_info_tfc {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+       u16 ackPolicy:2;
+       u16 userPrio:3;
+       u16 psb:1;
+       u16 aggregation:1;
+       u16 accessPolicy:2;
+       u16 direction:2;
+       u16 tsid:4;
+       u16 trafficType:1;
+#else
+       u16 trafficType:1;
+       u16 tsid:4;
+       u16 direction:2;
+       u16 accessPolicy:2;
+       u16 aggregation:1;
+       u16 psb:1;
+       u16 userPrio:3;
+       u16 ackPolicy:2;
+#endif
+};
+
+/* Flag to schedule the traffic type */
+struct wcn36xx_hal_ts_info_sch {
+#ifndef ANI_LITTLE_BIT_ENDIAN
+       u8 rsvd:7;
+       u8 schedule:1;
+#else
+       u8 schedule:1;
+       u8 rsvd:7;
+#endif
+};
+
+/* Traffic and scheduling info */
+struct wcn36xx_hal_ts_info {
+       struct wcn36xx_hal_ts_info_tfc traffic;
+       struct wcn36xx_hal_ts_info_sch schedule;
+};
+
+/* Information elements */
+struct wcn36xx_hal_tspec_ie {
+       u8 type;
+       u8 length;
+       struct wcn36xx_hal_ts_info ts_info;
+       u16 nom_msdu_size;
+       u16 max_msdu_size;
+       u32 min_svc_interval;
+       u32 max_svc_interval;
+       u32 inact_interval;
+       u32 suspend_interval;
+       u32 svc_start_time;
+       u32 min_data_rate;
+       u32 mean_data_rate;
+       u32 peak_data_rate;
+       u32 max_burst_sz;
+       u32 delay_bound;
+       u32 min_phy_rate;
+       u16 surplus_bw;
+       u16 medium_time;
+};
+
+struct add_ts_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Station Index */
+       u16 sta_index;
+
+       /* TSPEC handler uniquely identifying a TSPEC for a STA in a BSS */
+       u16 tspec_index;
+
+       /* To program TPE with required parameters */
+       struct wcn36xx_hal_tspec_ie tspec;
+
+       /* U-APSD Flags: 1b per AC.  Encoded as follows:
+          b7 b6 b5 b4 b3 b2 b1 b0 =
+          X  X  X  X  BE BK VI VO */
+       u8 uapsd;
+
+       /* These parameters are for all the access categories */
+
+       /* Service Interval */
+       u32 service_interval[WCN36XX_HAL_MAX_AC];
+
+       /* Suspend Interval */
+       u32 suspend_interval[WCN36XX_HAL_MAX_AC];
+
+       /* Delay Interval */
+       u32 delay_interval[WCN36XX_HAL_MAX_AC];
+};
+
+struct add_rs_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct del_ts_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Station Index */
+       u16 sta_index;
+
+       /* TSPEC identifier uniquely identifying a TSPEC for a STA in a BSS */
+       u16 tspec_index;
+
+       /* To lookup station id using the mac address */
+       u8 bssid[ETH_ALEN];
+};
+
+struct del_ts_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+/* End of TSpec Parameters */
+
+/* Start of BLOCK ACK related Parameters */
+
+struct wcn36xx_hal_add_ba_session_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Station Index */
+       u16 sta_index;
+
+       /* Peer MAC Address */
+       u8 mac_addr[ETH_ALEN];
+
+       /* ADDBA Action Frame dialog token
+          HAL will not interpret this object */
+       u8 dialog_token;
+
+       /* TID for which the BA is being setup
+          This identifies the TC or TS of interest */
+       u8 tid;
+
+       /* 0 - Delayed BA (Not supported)
+          1 - Immediate BA */
+       u8 policy;
+
+       /* Indicates the number of buffers for this TID (baTID)
+          NOTE - This is the requested buffer size. When this
+          is processed by HAL and subsequently by HDD, it is
+          possible that HDD may change this buffer size. Any
+          change in the buffer size should be noted by PE and
+          advertized appropriately in the ADDBA response */
+       u16 buffer_size;
+
+       /* BA timeout in TU's 0 means no timeout will occur */
+       u16 timeout;
+
+       /* b0..b3 - Fragment Number - Always set to 0
+          b4..b15 - Starting Sequence Number of first MSDU
+          for which this BA is setup */
+       u16 ssn;
+
+       /* ADDBA direction
+          1 - Originator
+          0 - Recipient */
+       u8 direction;
+} __packed;
+
+struct wcn36xx_hal_add_ba_session_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+
+       /* Dialog token */
+       u8 dialog_token;
+
+       /* TID for which the BA session has been setup */
+       u8 ba_tid;
+
+       /* BA Buffer Size allocated for the current BA session */
+       u8 ba_buffer_size;
+
+       u8 ba_session_id;
+
+       /* Reordering Window buffer */
+       u8 win_size;
+
+       /* Station Index to id the sta */
+       u8 sta_index;
+
+       /* Starting Sequence Number */
+       u16 ssn;
+} __packed;
+
+struct wcn36xx_hal_add_ba_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Session Id */
+       u8 session_id;
+
+       /* Reorder Window Size */
+       u8 win_size;
+/* Old FW 1.2.2.4 does not support this*/
+#ifdef FEATURE_ON_CHIP_REORDERING
+       u8 reordering_done_on_chip;
+#endif
+} __packed;
+
+struct wcn36xx_hal_add_ba_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+
+       /* Dialog token */
+       u8 dialog_token;
+} __packed;
+
+struct add_ba_info {
+       u16 ba_enable:1;
+       u16 starting_seq_num:12;
+       u16 reserved:3;
+};
+
+struct wcn36xx_hal_trigger_ba_rsp_candidate {
+       u8 sta_addr[ETH_ALEN];
+       struct add_ba_info ba_info[STACFG_MAX_TC];
+} __packed;
+
+struct wcn36xx_hal_trigget_ba_req_candidate {
+       u8 sta_index;
+       u8 tid_bitmap;
+} __packed;
+
+struct wcn36xx_hal_trigger_ba_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Session Id */
+       u8 session_id;
+
+       /* baCandidateCnt is followed by trigger BA
+        * Candidate List(tTriggerBaCandidate)
+        */
+       u16 candidate_cnt;
+
+} __packed;
+
+struct wcn36xx_hal_trigger_ba_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* TO SUPPORT BT-AMP */
+       u8 bssid[ETH_ALEN];
+
+       /* success or failure */
+       u32 status;
+
+       /* baCandidateCnt is followed by trigger BA
+        * Rsp Candidate List(tTriggerRspBaCandidate)
+        */
+       u16 candidate_cnt;
+} __packed;
+
+struct wcn36xx_hal_del_ba_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Station Index */
+       u16 sta_index;
+
+       /* TID for which the BA session is being deleted */
+       u8 tid;
+
+       /* DELBA direction
+          1 - Originator
+          0 - Recipient */
+       u8 direction;
+} __packed;
+
+struct wcn36xx_hal_del_ba_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+} __packed;
+
+struct tsm_stats_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Traffic Id */
+       u8 tid;
+
+       u8 bssid[ETH_ALEN];
+};
+
+struct tsm_stats_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /*success or failure */
+       u32 status;
+
+       /* Uplink Packet Queue delay */
+       u16 uplink_pkt_queue_delay;
+
+       /* Uplink Packet Queue delay histogram */
+       u16 uplink_pkt_queue_delay_hist[4];
+
+       /* Uplink Packet Transmit delay */
+       u32 uplink_pkt_tx_delay;
+
+       /* Uplink Packet loss */
+       u16 uplink_pkt_loss;
+
+       /* Uplink Packet count */
+       u16 uplink_pkt_count;
+
+       /* Roaming count */
+       u8 roaming_count;
+
+       /* Roaming Delay */
+       u16 roaming_delay;
+};
+
+struct set_key_done_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /*bssid of the keys */
+       u8 bssidx;
+       u8 enc_type;
+};
+
+struct wcn36xx_hal_nv_img_download_req_msg {
+       /* Note: The length specified in wcn36xx_hal_nv_img_download_req_msg
+        * messages should be
+        * header.len = sizeof(wcn36xx_hal_nv_img_download_req_msg) +
+        * nv_img_buffer_size */
+       struct wcn36xx_hal_msg_header header;
+
+       /* Fragment sequence number of the NV Image. Note that NV Image
+        * might not fit into one message due to size limitation of the SMD
+        * channel FIFO. UMAC can hence choose to chop the NV blob into
+        * multiple fragments starting with seqeunce number 0, 1, 2 etc.
+        * The last fragment MUST be indicated by marking the
+        * isLastFragment field to 1. Note that all the NV blobs would be
+        * concatenated together by HAL without any padding bytes in
+        * between.*/
+       u16 frag_number;
+
+       /* Is this the last fragment? When set to 1 it indicates that no
+        * more fragments will be sent by UMAC and HAL can concatenate all
+        * the NV blobs rcvd & proceed with the parsing. HAL would generate
+        * a WCN36XX_HAL_DOWNLOAD_NV_RSP to the WCN36XX_HAL_DOWNLOAD_NV_REQ
+        * after it receives each fragment */
+       u16 last_fragment;
+
+       /* NV Image size (number of bytes) */
+       u32 nv_img_buffer_size;
+
+       /* Following the 'nv_img_buffer_size', there should be
+        * nv_img_buffer_size bytes of NV Image i.e.
+        * u8[nv_img_buffer_size] */
+} __packed;
+
+struct wcn36xx_hal_nv_img_download_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Success or Failure. HAL would generate a
+        * WCN36XX_HAL_DOWNLOAD_NV_RSP after each fragment */
+       u32 status;
+} __packed;
+
+struct wcn36xx_hal_nv_store_ind {
+       /* Note: The length specified in tHalNvStoreInd messages should be
+        * header.msgLen = sizeof(tHalNvStoreInd) + nvBlobSize */
+       struct wcn36xx_hal_msg_header header;
+
+       /* NV Item */
+       u32 table_id;
+
+       /* Size of NV Blob */
+       u32 nv_blob_size;
+
+       /* Following the 'nvBlobSize', there should be nvBlobSize bytes of
+        * NV blob i.e. u8[nvBlobSize] */
+};
+
+/* End of Block Ack Related Parameters */
+
+#define WCN36XX_HAL_CIPHER_SEQ_CTR_SIZE 6
+
+/* Definition for MIC failure indication MAC reports this each time a MIC
+ * failure occures on Rx TKIP packet
+ */
+struct mic_failure_ind_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 bssid[ETH_ALEN];
+
+       /* address used to compute MIC */
+       u8 src_addr[ETH_ALEN];
+
+       /* transmitter address */
+       u8 ta_addr[ETH_ALEN];
+
+       u8 dst_addr[ETH_ALEN];
+
+       u8 multicast;
+
+       /* first byte of IV */
+       u8 iv1;
+
+       /* second byte of IV */
+       u8 key_id;
+
+       /* sequence number */
+       u8 tsc[WCN36XX_HAL_CIPHER_SEQ_CTR_SIZE];
+
+       /* receive address */
+       u8 rx_addr[ETH_ALEN];
+};
+
+struct update_vht_op_mode_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u16 op_mode;
+       u16 sta_id;
+};
+
+struct update_vht_op_mode_params_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u32 status;
+};
+
+struct update_beacon_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 bss_index;
+
+       /* shortPreamble mode. HAL should update all the STA rates when it
+        * receives this message */
+       u8 short_preamble;
+
+       /* short Slot time. */
+       u8 short_slot_time;
+
+       /* Beacon Interval */
+       u16 beacon_interval;
+
+       /* Protection related */
+       u8 lla_coexist;
+       u8 llb_coexist;
+       u8 llg_coexist;
+       u8 ht20_coexist;
+       u8 lln_non_gf_coexist;
+       u8 lsig_tx_op_protection_full_support;
+       u8 rifs_mode;
+
+       u16 param_change_bitmap;
+};
+
+struct update_beacon_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+       u32 status;
+};
+
+struct wcn36xx_hal_send_beacon_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* length of the template. */
+       u32 beacon_length;
+
+       /* Beacon data. */
+       u8 beacon[BEACON_TEMPLATE_SIZE];
+
+       u8 bssid[ETH_ALEN];
+
+       /* TIM IE offset from the beginning of the template. */
+       u32 tim_ie_offset;
+
+       /* P2P IE offset from the begining of the template */
+       u16 p2p_ie_offset;
+} __packed;
+
+struct send_beacon_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+       u32 status;
+} __packed;
+
+struct enable_radar_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 bssid[ETH_ALEN];
+       u8 channel;
+};
+
+struct enable_radar_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Link Parameters */
+       u8 bssid[ETH_ALEN];
+
+       /* success or failure */
+       u32 status;
+};
+
+struct radar_detect_intr_ind_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 radar_det_channel;
+};
+
+struct radar_detect_ind_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* channel number in which the RADAR detected */
+       u8 channel_number;
+
+       /* RADAR pulse width in usecond */
+       u16 radar_pulse_width;
+
+       /* Number of RADAR pulses */
+       u16 num_radar_pulse;
+};
+
+struct wcn36xx_hal_get_tpc_report_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 sta[ETH_ALEN];
+       u8 dialog_token;
+       u8 txpower;
+};
+
+struct wcn36xx_hal_get_tpc_report_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct wcn36xx_hal_send_probe_resp_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 probe_resp_template[BEACON_TEMPLATE_SIZE];
+       u32 probe_resp_template_len;
+       u32 proxy_probe_req_valid_ie_bmap[8];
+       u8 bssid[ETH_ALEN];
+};
+
+struct send_probe_resp_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct send_unknown_frame_rx_ind_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct wcn36xx_hal_delete_sta_context_ind_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u16 aid;
+       u16 sta_id;
+
+       /* TO SUPPORT BT-AMP */
+       u8 bssid[ETH_ALEN];
+
+       /* HAL copies bssid from the sta table. */
+       u8 addr2[ETH_ALEN];
+
+       /* To unify the keepalive / unknown A2 / tim-based disa */
+       u16 reason_code;
+} __packed;
+
+struct indicate_del_sta {
+       struct wcn36xx_hal_msg_header header;
+       u8 aid;
+       u8 sta_index;
+       u8 bss_index;
+       u8 reason_code;
+       u32 status;
+};
+
+struct bt_amp_event_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       enum bt_amp_event_type btAmpEventType;
+};
+
+struct bt_amp_event_rsp {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct tl_hal_flush_ac_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Station Index. originates from HAL */
+       u8 sta_id;
+
+       /* TID for which the transmit queue is being flushed */
+       u8 tid;
+};
+
+struct tl_hal_flush_ac_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Station Index. originates from HAL */
+       u8 sta_id;
+
+       /* TID for which the transmit queue is being flushed */
+       u8 tid;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct wcn36xx_hal_enter_imps_req_msg {
+       struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_exit_imps_req {
+       struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_enter_bmps_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 bss_index;
+
+       /* TBTT value derived from the last beacon */
+#ifndef BUILD_QWPTTSTATIC
+       u64 tbtt;
+#endif
+       u8 dtim_count;
+
+       /* DTIM period given to HAL during association may not be valid, if
+        * association is based on ProbeRsp instead of beacon. */
+       u8 dtim_period;
+
+       /* For CCX and 11R Roaming */
+       u32 rssi_filter_period;
+
+       u32 num_beacon_per_rssi_average;
+       u8 rssi_filter_enable;
+} __packed;
+
+struct wcn36xx_hal_exit_bmps_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 send_data_null;
+       u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_missed_beacon_ind_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 bss_index;
+} __packed;
+
+/* Beacon Filtering data structures */
+
+/* The above structure would be followed by multiple of below mentioned
+ * structure
+ */
+struct beacon_filter_ie {
+       u8 element_id;
+       u8 check_ie_presence;
+       u8 offset;
+       u8 value;
+       u8 bitmask;
+       u8 ref;
+};
+
+struct wcn36xx_hal_add_bcn_filter_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u16 capability_info;
+       u16 capability_mask;
+       u16 beacon_interval;
+       u16 ie_num;
+       u8 bss_index;
+       u8 reserved;
+};
+
+struct wcn36xx_hal_rem_bcn_filter_req {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 ie_Count;
+       u8 rem_ie_id[1];
+};
+
+#define WCN36XX_HAL_IPV4_ARP_REPLY_OFFLOAD                  0
+#define WCN36XX_HAL_IPV6_NEIGHBOR_DISCOVERY_OFFLOAD         1
+#define WCN36XX_HAL_IPV6_NS_OFFLOAD                         2
+#define WCN36XX_HAL_IPV6_ADDR_LEN                           16
+#define WCN36XX_HAL_OFFLOAD_DISABLE                         0
+#define WCN36XX_HAL_OFFLOAD_ENABLE                          1
+#define WCN36XX_HAL_OFFLOAD_BCAST_FILTER_ENABLE             0x2
+#define WCN36XX_HAL_OFFLOAD_ARP_AND_BCAST_FILTER_ENABLE        \
+       (HAL_OFFLOAD_ENABLE|HAL_OFFLOAD_BCAST_FILTER_ENABLE)
+
+struct wcn36xx_hal_ns_offload_params {
+       u8 src_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+       u8 self_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+
+       /* Only support 2 possible Network Advertisement IPv6 address */
+       u8 target_ipv6_addr1[WCN36XX_HAL_IPV6_ADDR_LEN];
+       u8 target_ipv6_addr2[WCN36XX_HAL_IPV6_ADDR_LEN];
+
+       u8 self_addr[ETH_ALEN];
+       u8 src_ipv6_addr_valid:1;
+       u8 target_ipv6_addr1_valid:1;
+       u8 target_ipv6_addr2_valid:1;
+       u8 reserved1:5;
+
+       /* make it DWORD aligned */
+       u8 reserved2;
+
+       /* slot index for this offload */
+       u32 slot_index;
+       u8 bss_index;
+};
+
+struct wcn36xx_hal_host_offload_req {
+       u8 offload_Type;
+
+       /* enable or disable */
+       u8 enable;
+
+       union {
+               u8 host_ipv4_addr[4];
+               u8 host_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
+       } u;
+};
+
+struct wcn36xx_hal_host_offload_req_msg {
+       struct wcn36xx_hal_msg_header header;
+       struct wcn36xx_hal_host_offload_req host_offload_params;
+       struct wcn36xx_hal_ns_offload_params ns_offload_params;
+};
+
+/* Packet Types. */
+#define WCN36XX_HAL_KEEP_ALIVE_NULL_PKT              1
+#define WCN36XX_HAL_KEEP_ALIVE_UNSOLICIT_ARP_RSP     2
+
+/* Enable or disable keep alive */
+#define WCN36XX_HAL_KEEP_ALIVE_DISABLE   0
+#define WCN36XX_HAL_KEEP_ALIVE_ENABLE    1
+#define WCN36XX_KEEP_ALIVE_TIME_PERIOD  30 /* unit: s */
+
+/* Keep Alive request. */
+struct wcn36xx_hal_keep_alive_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 packet_type;
+       u32 time_period;
+       u8 host_ipv4_addr[WCN36XX_HAL_IPV4_ADDR_LEN];
+       u8 dest_ipv4_addr[WCN36XX_HAL_IPV4_ADDR_LEN];
+       u8 dest_addr[ETH_ALEN];
+       u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_rssi_threshold_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       s8 threshold1:8;
+       s8 threshold2:8;
+       s8 threshold3:8;
+       u8 thres1_pos_notify:1;
+       u8 thres1_neg_notify:1;
+       u8 thres2_pos_notify:1;
+       u8 thres2_neg_notify:1;
+       u8 thres3_pos_notify:1;
+       u8 thres3_neg_notify:1;
+       u8 reserved10:2;
+};
+
+struct wcn36xx_hal_enter_uapsd_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 bk_delivery:1;
+       u8 be_delivery:1;
+       u8 vi_delivery:1;
+       u8 vo_delivery:1;
+       u8 bk_trigger:1;
+       u8 be_trigger:1;
+       u8 vi_trigger:1;
+       u8 vo_trigger:1;
+       u8 bss_index;
+};
+
+struct wcn36xx_hal_exit_uapsd_req_msg {
+       struct wcn36xx_hal_msg_header header;
+       u8 bss_index;
+};
+
+#define WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE 128
+#define WCN36XX_HAL_WOWL_BCAST_MAX_NUM_PATTERNS 16
+
+struct wcn36xx_hal_wowl_add_bcast_ptrn_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Pattern ID */
+       u8 id;
+
+       /* Pattern byte offset from beginning of the 802.11 packet to start
+        * of the wake-up pattern */
+       u8 byte_Offset;
+
+       /* Non-Zero Pattern size */
+       u8 size;
+
+       /* Pattern */
+       u8 pattern[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+       /* Non-zero pattern mask size */
+       u8 mask_size;
+
+       /* Pattern mask */
+       u8 mask[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+       /* Extra pattern */
+       u8 extra[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+       /* Extra pattern mask */
+       u8 mask_extra[WCN36XX_HAL_WOWL_BCAST_PATTERN_MAX_SIZE];
+
+       u8 bss_index;
+};
+
+struct wcn36xx_hal_wow_del_bcast_ptrn_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Pattern ID of the wakeup pattern to be deleted */
+       u8 id;
+       u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_enter_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Enables/disables magic packet filtering */
+       u8 magic_packet_enable;
+
+       /* Magic pattern */
+       u8 magic_pattern[ETH_ALEN];
+
+       /* Enables/disables packet pattern filtering in firmware. Enabling
+        * this flag enables broadcast pattern matching in Firmware. If
+        * unicast pattern matching is also desired,
+        * ucUcastPatternFilteringEnable flag must be set tot true as well
+        */
+       u8 pattern_filtering_enable;
+
+       /* Enables/disables unicast packet pattern filtering. This flag
+        * specifies whether we want to do pattern match on unicast packets
+        * as well and not just broadcast packets. This flag has no effect
+        * if the ucPatternFilteringEnable (main controlling flag) is set
+        * to false
+        */
+       u8 ucast_pattern_filtering_enable;
+
+       /* This configuration is valid only when magicPktEnable=1. It
+        * requests hardware to wake up when it receives the Channel Switch
+        * Action Frame.
+        */
+       u8 wow_channel_switch_receive;
+
+       /* This configuration is valid only when magicPktEnable=1. It
+        * requests hardware to wake up when it receives the
+        * Deauthentication Frame.
+        */
+       u8 wow_deauth_receive;
+
+       /* This configuration is valid only when magicPktEnable=1. It
+        * requests hardware to wake up when it receives the Disassociation
+        * Frame.
+        */
+       u8 wow_disassoc_receive;
+
+       /* This configuration is valid only when magicPktEnable=1. It
+        * requests hardware to wake up when it has missed consecutive
+        * beacons. This is a hardware register configuration (NOT a
+        * firmware configuration).
+        */
+       u8 wow_max_missed_beacons;
+
+       /* This configuration is valid only when magicPktEnable=1. This is
+        * a timeout value in units of microsec. It requests hardware to
+        * unconditionally wake up after it has stayed in WoWLAN mode for
+        * some time. Set 0 to disable this feature.
+        */
+       u8 wow_max_sleep;
+
+       /* This configuration directs the WoW packet filtering to look for
+        * EAP-ID requests embedded in EAPOL frames and use this as a wake
+        * source.
+        */
+       u8 wow_eap_id_request_enable;
+
+       /* This configuration directs the WoW packet filtering to look for
+        * EAPOL-4WAY requests and use this as a wake source.
+        */
+       u8 wow_eapol_4way_enable;
+
+       /* This configuration allows a host wakeup on an network scan
+        * offload match.
+        */
+       u8 wow_net_scan_offload_match;
+
+       /* This configuration allows a host wakeup on any GTK rekeying
+        * error.
+        */
+       u8 wow_gtk_rekey_error;
+
+       /* This configuration allows a host wakeup on BSS connection loss.
+        */
+       u8 wow_bss_connection_loss;
+
+       u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_exit_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 bss_index;
+};
+
+struct wcn36xx_hal_get_rssi_req_msg {
+       struct wcn36xx_hal_msg_header header;
+};
+
+struct wcn36xx_hal_get_roam_rssi_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Valid STA Idx for per STA stats request */
+       u32 sta_id;
+};
+
+struct wcn36xx_hal_set_uapsd_ac_params_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* STA index */
+       u8 sta_idx;
+
+       /* Access Category */
+       u8 ac;
+
+       /* User Priority */
+       u8 up;
+
+       /* Service Interval */
+       u32 service_interval;
+
+       /* Suspend Interval */
+       u32 suspend_interval;
+
+       /* Delay Interval */
+       u32 delay_interval;
+};
+
+struct wcn36xx_hal_configure_rxp_filter_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 set_mcst_bcst_filter_setting;
+       u8 set_mcst_bcst_filter;
+};
+
+struct wcn36xx_hal_enter_imps_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct wcn36xx_hal_exit_imps_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct wcn36xx_hal_enter_bmps_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+
+       u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_exit_bmps_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+
+       u8 bss_index;
+} __packed;
+
+struct wcn36xx_hal_enter_uapsd_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+
+       u8 bss_index;
+};
+
+struct wcn36xx_hal_exit_uapsd_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+
+       u8 bss_index;
+};
+
+struct wcn36xx_hal_rssi_notification_ind_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u32 rssi_thres1_pos_cross:1;
+       u32 rssi_thres1_neg_cross:1;
+       u32 rssi_thres2_pos_cross:1;
+       u32 rssi_thres2_neg_cross:1;
+       u32 rssi_thres3_pos_cross:1;
+       u32 rssi_thres3_neg_cross:1;
+       u32 avg_rssi:8;
+       u32 reserved:18;
+
+};
+
+struct wcn36xx_hal_get_rssio_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+       s8 rssi;
+
+};
+
+struct wcn36xx_hal_get_roam_rssi_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+
+       u8 sta_id;
+       s8 rssi;
+};
+
+struct wcn36xx_hal_wowl_enter_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+       u8 bss_index;
+};
+
+struct wcn36xx_hal_wowl_exit_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+       u8 bss_index;
+};
+
+struct wcn36xx_hal_add_bcn_filter_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct wcn36xx_hal_rem_bcn_filter_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct wcn36xx_hal_add_wowl_bcast_ptrn_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+       u8 bss_index;
+};
+
+struct wcn36xx_hal_del_wowl_bcast_ptrn_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+       u8 bss_index;
+};
+
+struct wcn36xx_hal_host_offload_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct wcn36xx_hal_keep_alive_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct wcn36xx_hal_set_rssi_thresh_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct wcn36xx_hal_set_uapsd_ac_params_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct wcn36xx_hal_configure_rxp_filter_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct set_max_tx_pwr_req {
+       struct wcn36xx_hal_msg_header header;
+
+       /* BSSID is needed to identify which session issued this request.
+        * As the request has power constraints, this should be applied
+        * only to that session */
+       u8 bssid[ETH_ALEN];
+
+       u8 self_addr[ETH_ALEN];
+
+       /* In request, power == MaxTx power to be used. */
+       u8 power;
+};
+
+struct set_max_tx_pwr_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* power == tx power used for management frames */
+       u8 power;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct set_tx_pwr_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* TX Power in milli watts */
+       u32 tx_power;
+
+       u8 bss_index;
+};
+
+struct set_tx_pwr_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct get_tx_pwr_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 sta_id;
+};
+
+struct get_tx_pwr_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+
+       /* TX Power in milli watts */
+       u32 tx_power;
+};
+
+struct set_p2p_gonoa_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 opp_ps;
+       u32 ct_window;
+       u8 count;
+       u32 duration;
+       u32 interval;
+       u32 single_noa_duration;
+       u8 ps_selection;
+};
+
+struct set_p2p_gonoa_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct wcn36xx_hal_add_sta_self_req {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 self_addr[ETH_ALEN];
+       u32 status;
+} __packed;
+
+struct wcn36xx_hal_add_sta_self_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+
+       /* Self STA Index */
+       u8 self_sta_index;
+
+       /* DPU Index (IGTK, PTK, GTK all same) */
+       u8 dpu_index;
+
+       /* DPU Signature */
+       u8 dpu_signature;
+} __packed;
+
+struct wcn36xx_hal_del_sta_self_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 self_addr[ETH_ALEN];
+} __packed;
+
+struct wcn36xx_hal_del_sta_self_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /*success or failure */
+       u32 status;
+
+       u8 self_addr[ETH_ALEN];
+} __packed;
+
+struct aggr_add_ts_req {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Station Index */
+       u16 sta_idx;
+
+       /* TSPEC handler uniquely identifying a TSPEC for a STA in a BSS.
+        * This will carry the bitmap with the bit positions representing
+        * different AC.s */
+       u16 tspec_index;
+
+       /* Tspec info per AC To program TPE with required parameters */
+       struct wcn36xx_hal_tspec_ie tspec[WCN36XX_HAL_MAX_AC];
+
+       /* U-APSD Flags: 1b per AC.  Encoded as follows:
+          b7 b6 b5 b4 b3 b2 b1 b0 =
+          X  X  X  X  BE BK VI VO */
+       u8 uapsd;
+
+       /* These parameters are for all the access categories */
+
+       /* Service Interval */
+       u32 service_interval[WCN36XX_HAL_MAX_AC];
+
+       /* Suspend Interval */
+       u32 suspend_interval[WCN36XX_HAL_MAX_AC];
+
+       /* Delay Interval */
+       u32 delay_interval[WCN36XX_HAL_MAX_AC];
+};
+
+struct aggr_add_ts_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status0;
+
+       /* FIXME PRIMA for future use for 11R */
+       u32 status1;
+};
+
+struct wcn36xx_hal_configure_apps_cpu_wakeup_state_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 is_apps_cpu_awake;
+};
+
+struct wcn36xx_hal_configure_apps_cpu_wakeup_state_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct wcn36xx_hal_dump_cmd_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u32 arg1;
+       u32 arg2;
+       u32 arg3;
+       u32 arg4;
+       u32 arg5;
+} __packed;
+
+struct wcn36xx_hal_dump_cmd_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+
+       /* Length of the responce message */
+       u32 rsp_length;
+
+       /* FIXME: Currently considering the the responce will be less than
+        * 100bytes */
+       u8 rsp_buffer[DUMPCMD_RSP_BUFFER];
+} __packed;
+
+#define WLAN_COEX_IND_DATA_SIZE (4)
+#define WLAN_COEX_IND_TYPE_DISABLE_HB_MONITOR (0)
+#define WLAN_COEX_IND_TYPE_ENABLE_HB_MONITOR (1)
+
+struct coex_ind_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Coex Indication Type */
+       u32 type;
+
+       /* Coex Indication Data */
+       u32 data[WLAN_COEX_IND_DATA_SIZE];
+};
+
+struct wcn36xx_hal_tx_compl_ind_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Tx Complete Indication Success or Failure */
+       u32 status;
+};
+
+struct wcn36xx_hal_wlan_host_suspend_ind_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u32 configured_mcst_bcst_filter_setting;
+       u32 active_session_count;
+};
+
+struct wcn36xx_hal_wlan_exclude_unencrpted_ind_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 dot11_exclude_unencrypted;
+       u8 bssid[ETH_ALEN];
+};
+
+struct noa_attr_ind_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 index;
+       u8 opp_ps_flag;
+       u16 ctwin;
+
+       u16 noa1_interval_count;
+       u16 bss_index;
+       u32 noa1_duration;
+       u32 noa1_interval;
+       u32 noa1_starttime;
+
+       u16 noa2_interval_count;
+       u16 reserved2;
+       u32 noa2_duration;
+       u32 noa2_interval;
+       u32 noa2_start_time;
+
+       u32 status;
+};
+
+struct noa_start_ind_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u32 status;
+       u32 bss_index;
+};
+
+struct wcn36xx_hal_wlan_host_resume_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 configured_mcst_bcst_filter_setting;
+};
+
+struct wcn36xx_hal_host_resume_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+struct wcn36xx_hal_del_ba_ind_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u16 sta_idx;
+
+       /* Peer MAC Address, whose BA session has timed out */
+       u8 peer_addr[ETH_ALEN];
+
+       /* TID for which a BA session timeout is being triggered */
+       u8 ba_tid;
+
+       /* DELBA direction
+        * 1 - Originator
+        * 0 - Recipient
+        */
+       u8 direction;
+
+       u32 reason_code;
+
+       /* TO SUPPORT BT-AMP */
+       u8 bssid[ETH_ALEN];
+};
+
+/* PNO Messages */
+
+/* Max number of channels that a network can be found on */
+#define WCN36XX_HAL_PNO_MAX_NETW_CHANNELS  26
+
+/* Max number of channels that a network can be found on */
+#define WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX  60
+
+/* Maximum numbers of networks supported by PNO */
+#define WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS  16
+
+/* The number of scan time intervals that can be programmed into PNO */
+#define WCN36XX_HAL_PNO_MAX_SCAN_TIMERS    10
+
+/* Maximum size of the probe template */
+#define WCN36XX_HAL_PNO_MAX_PROBE_SIZE     450
+
+/* Type of PNO enabling:
+ *
+ * Immediate - scanning will start immediately and PNO procedure will be
+ * repeated based on timer
+ *
+ * Suspend - scanning will start at suspend
+ *
+ * Resume - scanning will start on system resume
+ */
+enum pno_mode {
+       PNO_MODE_IMMEDIATE,
+       PNO_MODE_ON_SUSPEND,
+       PNO_MODE_ON_RESUME,
+       PNO_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Authentication type */
+enum auth_type {
+       AUTH_TYPE_ANY = 0,
+       AUTH_TYPE_OPEN_SYSTEM = 1,
+
+       /* Upper layer authentication types */
+       AUTH_TYPE_WPA = 2,
+       AUTH_TYPE_WPA_PSK = 3,
+
+       AUTH_TYPE_RSN = 4,
+       AUTH_TYPE_RSN_PSK = 5,
+       AUTH_TYPE_FT_RSN = 6,
+       AUTH_TYPE_FT_RSN_PSK = 7,
+       AUTH_TYPE_WAPI_WAI_CERTIFICATE = 8,
+       AUTH_TYPE_WAPI_WAI_PSK = 9,
+
+       AUTH_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* Encryption type */
+enum ed_type {
+       ED_ANY = 0,
+       ED_NONE = 1,
+       ED_WEP = 2,
+       ED_TKIP = 3,
+       ED_CCMP = 4,
+       ED_WPI = 5,
+
+       ED_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* SSID broadcast  type */
+enum ssid_bcast_type {
+       BCAST_UNKNOWN = 0,
+       BCAST_NORMAL = 1,
+       BCAST_HIDDEN = 2,
+
+       BCAST_TYPE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE
+};
+
+/* The network description for which PNO will have to look for */
+struct network_type {
+       /* SSID of the BSS */
+       struct wcn36xx_hal_mac_ssid ssid;
+
+       /* Authentication type for the network */
+       enum auth_type authentication;
+
+       /* Encryption type for the network */
+       enum ed_type encryption;
+
+       /* Indicate the channel on which the Network can be found 0 - if
+        * all channels */
+       u8 channel_count;
+       u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+       /* Indicates the RSSI threshold for the network to be considered */
+       u8 rssi_threshold;
+};
+
+struct scan_timer {
+       /* How much it should wait */
+       u32 value;
+
+       /* How many times it should repeat that wait value 0 - keep using
+        * this timer until PNO is disabled */
+       u32 repeat;
+
+       /* e.g: 2 3 4 0 - it will wait 2s between consecutive scans for 3
+        * times - after that it will wait 4s between consecutive scans
+        * until disabled */
+};
+
+/* The network parameters to be sent to the PNO algorithm */
+struct scan_timers_type {
+       /* set to 0 if you wish for PNO to use its default telescopic timer */
+       u8 count;
+
+       /* A set value represents the amount of time that PNO will wait
+        * between two consecutive scan procedures If the desired is for a
+        * uniform timer that fires always at the exact same interval - one
+        * single value is to be set If there is a desire for a more
+        * complex - telescopic like timer multiple values can be set -
+        * once PNO reaches the end of the array it will continue scanning
+        * at intervals presented by the last value */
+       struct scan_timer values[WCN36XX_HAL_PNO_MAX_SCAN_TIMERS];
+};
+
+/* Preferred network list request */
+struct set_pref_netw_list_req {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Enable PNO */
+       u32 enable;
+
+       /* Immediate,  On Suspend,   On Resume */
+       enum pno_mode mode;
+
+       /* Number of networks sent for PNO */
+       u32 networks_count;
+
+       /* The networks that PNO needs to look for */
+       struct network_type networks[WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS];
+
+       /* The scan timers required for PNO */
+       struct scan_timers_type scan_timers;
+
+       /* Probe template for 2.4GHz band */
+       u16 band_24g_probe_size;
+       u8 band_24g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+
+       /* Probe template for 5GHz band */
+       u16 band_5g_probe_size;
+       u8 band_5g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+};
+
+/* The network description for which PNO will have to look for */
+struct network_type_new {
+       /* SSID of the BSS */
+       struct wcn36xx_hal_mac_ssid ssid;
+
+       /* Authentication type for the network */
+       enum auth_type authentication;
+
+       /* Encryption type for the network */
+       enum ed_type encryption;
+
+       /* SSID broadcast type, normal, hidden or unknown */
+       enum ssid_bcast_type bcast_network_type;
+
+       /* Indicate the channel on which the Network can be found 0 - if
+        * all channels */
+       u8 channel_count;
+       u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+       /* Indicates the RSSI threshold for the network to be considered */
+       u8 rssi_threshold;
+};
+
+/* Preferred network list request new */
+struct set_pref_netw_list_req_new {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Enable PNO */
+       u32 enable;
+
+       /* Immediate,  On Suspend,   On Resume */
+       enum pno_mode mode;
+
+       /* Number of networks sent for PNO */
+       u32 networks_count;
+
+       /* The networks that PNO needs to look for */
+       struct network_type_new networks[WCN36XX_HAL_PNO_MAX_SUPP_NETWORKS];
+
+       /* The scan timers required for PNO */
+       struct scan_timers_type scan_timers;
+
+       /* Probe template for 2.4GHz band */
+       u16 band_24g_probe_size;
+       u8 band_24g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+
+       /* Probe template for 5GHz band */
+       u16 band_5g_probe_size;
+       u8 band_5g_probe_template[WCN36XX_HAL_PNO_MAX_PROBE_SIZE];
+};
+
+/* Preferred network list response */
+struct set_pref_netw_list_resp {
+       struct wcn36xx_hal_msg_header header;
+
+       /* status of the request - just to indicate that PNO has
+        * acknowledged the request and will start scanning */
+       u32 status;
+};
+
+/* Preferred network found indication */
+struct pref_netw_found_ind {
+
+       struct wcn36xx_hal_msg_header header;
+
+       /* Network that was found with the highest RSSI */
+       struct wcn36xx_hal_mac_ssid ssid;
+
+       /* Indicates the RSSI */
+       u8 rssi;
+};
+
+/* RSSI Filter request */
+struct set_rssi_filter_req {
+       struct wcn36xx_hal_msg_header header;
+
+       /* RSSI Threshold */
+       u8 rssi_threshold;
+};
+
+/* Set RSSI filter resp */
+struct set_rssi_filter_resp {
+       struct wcn36xx_hal_msg_header header;
+
+       /* status of the request */
+       u32 status;
+};
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct wcn36xx_hal_update_scan_params_req {
+
+       struct wcn36xx_hal_msg_header header;
+
+       /* Host setting for 11d */
+       u8 dot11d_enabled;
+
+       /* Lets PNO know that host has determined the regulatory domain */
+       u8 dot11d_resolved;
+
+       /* Channels on which PNO is allowed to scan */
+       u8 channel_count;
+       u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS];
+
+       /* Minimum channel time */
+       u16 active_min_ch_time;
+
+       /* Maximum channel time */
+       u16 active_max_ch_time;
+
+       /* Minimum channel time */
+       u16 passive_min_ch_time;
+
+       /* Maximum channel time */
+       u16 passive_max_ch_time;
+
+       /* Cb State */
+       enum phy_chan_bond_state state;
+} __packed;
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct update_scan_params_req_ex {
+
+       struct wcn36xx_hal_msg_header header;
+
+       /* Host setting for 11d */
+       u8 dot11d_enabled;
+
+       /* Lets PNO know that host has determined the regulatory domain */
+       u8 dot11d_resolved;
+
+       /* Channels on which PNO is allowed to scan */
+       u8 channel_count;
+       u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX];
+
+       /* Minimum channel time */
+       u16 active_min_ch_time;
+
+       /* Maximum channel time */
+       u16 active_max_ch_time;
+
+       /* Minimum channel time */
+       u16 passive_min_ch_time;
+
+       /* Maximum channel time */
+       u16 passive_max_ch_time;
+
+       /* Cb State */
+       enum phy_chan_bond_state state;
+};
+
+/* Update scan params - sent from host to PNO to be used during PNO
+ * scanningx */
+struct wcn36xx_hal_update_scan_params_resp {
+
+       struct wcn36xx_hal_msg_header header;
+
+       /* status of the request */
+       u32 status;
+} __packed;
+
+struct wcn36xx_hal_set_tx_per_tracking_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* 0: disable, 1:enable */
+       u8 tx_per_tracking_enable;
+
+       /* Check period, unit is sec. */
+       u8 tx_per_tracking_period;
+
+       /* (Fail TX packet)/(Total TX packet) ratio, the unit is 10%. */
+       u8 tx_per_tracking_ratio;
+
+       /* A watermark of check number, once the tx packet exceed this
+        * number, we do the check, default is 5 */
+       u32 tx_per_tracking_watermark;
+};
+
+struct wcn36xx_hal_set_tx_per_tracking_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+
+};
+
+struct tx_per_hit_ind_msg {
+       struct wcn36xx_hal_msg_header header;
+};
+
+/* Packet Filtering Definitions Begin */
+#define    WCN36XX_HAL_PROTOCOL_DATA_LEN                  8
+#define    WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS        240
+#define    WCN36XX_HAL_MAX_NUM_FILTERS                   20
+#define    WCN36XX_HAL_MAX_CMP_PER_FILTER                10
+
+enum wcn36xx_hal_receive_packet_filter_type {
+       HAL_RCV_FILTER_TYPE_INVALID,
+       HAL_RCV_FILTER_TYPE_FILTER_PKT,
+       HAL_RCV_FILTER_TYPE_BUFFER_PKT,
+       HAL_RCV_FILTER_TYPE_MAX_ENUM_SIZE
+};
+
+enum wcn36xx_hal_rcv_pkt_flt_protocol_type {
+       HAL_FILTER_PROTO_TYPE_INVALID,
+       HAL_FILTER_PROTO_TYPE_MAC,
+       HAL_FILTER_PROTO_TYPE_ARP,
+       HAL_FILTER_PROTO_TYPE_IPV4,
+       HAL_FILTER_PROTO_TYPE_IPV6,
+       HAL_FILTER_PROTO_TYPE_UDP,
+       HAL_FILTER_PROTO_TYPE_MAX
+};
+
+enum wcn36xx_hal_rcv_pkt_flt_cmp_flag_type {
+       HAL_FILTER_CMP_TYPE_INVALID,
+       HAL_FILTER_CMP_TYPE_EQUAL,
+       HAL_FILTER_CMP_TYPE_MASK_EQUAL,
+       HAL_FILTER_CMP_TYPE_NOT_EQUAL,
+       HAL_FILTER_CMP_TYPE_MAX
+};
+
+struct wcn36xx_hal_rcv_pkt_filter_params {
+       u8 protocol_layer;
+       u8 cmp_flag;
+
+       /* Length of the data to compare */
+       u16 data_length;
+
+       /* from start of the respective frame header */
+       u8 data_offset;
+
+       /* Reserved field */
+       u8 reserved;
+
+       /* Data to compare */
+       u8 compare_data[WCN36XX_HAL_PROTOCOL_DATA_LEN];
+
+       /* Mask to be applied on the received packet data before compare */
+       u8 data_mask[WCN36XX_HAL_PROTOCOL_DATA_LEN];
+};
+
+struct wcn36xx_hal_sessionized_rcv_pkt_filter_cfg_type {
+       u8 id;
+       u8 type;
+       u8 params_count;
+       u32 coleasce_time;
+       u8 bss_index;
+       struct wcn36xx_hal_rcv_pkt_filter_params params[1];
+};
+
+struct wcn36xx_hal_set_rcv_pkt_filter_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 id;
+       u8 type;
+       u8 params_count;
+       u32 coalesce_time;
+       struct wcn36xx_hal_rcv_pkt_filter_params params[1];
+};
+
+struct wcn36xx_hal_rcv_flt_mc_addr_list_type {
+       /* from start of the respective frame header */
+       u8 data_offset;
+
+       u32 mc_addr_count;
+       u8 mc_addr[ETH_ALEN][WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS];
+       u8 bss_index;
+};
+
+struct wcn36xx_hal_set_pkt_filter_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+
+       u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt {
+       u8 id;
+       u32 match_cnt;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_match_cnt_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Success or Failure */
+       u32 status;
+
+       u32 match_count;
+       struct wcn36xx_hal_rcv_flt_pkt_match_cnt
+               matches[WCN36XX_HAL_MAX_NUM_FILTERS];
+       u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_param {
+       /* only valid for response message */
+       u32 status;
+       u8 id;
+       u8 bss_index;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_req_msg {
+       struct wcn36xx_hal_msg_header header;
+       struct wcn36xx_hal_rcv_flt_pkt_clear_param param;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_clear_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+       struct wcn36xx_hal_rcv_flt_pkt_clear_param param;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg {
+       struct wcn36xx_hal_msg_header header;
+       struct wcn36xx_hal_rcv_flt_mc_addr_list_type mc_addr_list;
+};
+
+struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+       u32 status;
+       u8 bss_index;
+};
+
+/* Packet Filtering Definitions End */
+
+struct wcn36xx_hal_set_power_params_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /*  Ignore DTIM */
+       u32 ignore_dtim;
+
+       /* DTIM Period */
+       u32 dtim_period;
+
+       /* Listen Interval */
+       u32 listen_interval;
+
+       /* Broadcast Multicast Filter  */
+       u32 bcast_mcast_filter;
+
+       /* Beacon Early Termination */
+       u32 enable_bet;
+
+       /* Beacon Early Termination Interval */
+       u32 bet_interval;
+} __packed;
+
+struct wcn36xx_hal_set_power_params_resp {
+
+       struct wcn36xx_hal_msg_header header;
+
+       /* status of the request */
+       u32 status;
+} __packed;
+
+/* Capability bitmap exchange definitions and macros starts */
+
+enum place_holder_in_cap_bitmap {
+       MCC = 0,
+       P2P = 1,
+       DOT11AC = 2,
+       SLM_SESSIONIZATION = 3,
+       DOT11AC_OPMODE = 4,
+       SAP32STA = 5,
+       TDLS = 6,
+       P2P_GO_NOA_DECOUPLE_INIT_SCAN = 7,
+       WLANACTIVE_OFFLOAD = 8,
+       BEACON_OFFLOAD = 9,
+       SCAN_OFFLOAD = 10,
+       ROAM_OFFLOAD = 11,
+       BCN_MISS_OFFLOAD = 12,
+       STA_POWERSAVE = 13,
+       STA_ADVANCED_PWRSAVE = 14,
+       AP_UAPSD = 15,
+       AP_DFS = 16,
+       BLOCKACK = 17,
+       PHY_ERR = 18,
+       BCN_FILTER = 19,
+       RTT = 20,
+       RATECTRL = 21,
+       WOW = 22,
+       MAX_FEATURE_SUPPORTED = 128,
+};
+
+struct wcn36xx_hal_feat_caps_msg {
+
+       struct wcn36xx_hal_msg_header header;
+
+       u32 feat_caps[4];
+} __packed;
+
+/* status codes to help debug rekey failures */
+enum gtk_rekey_status {
+       WCN36XX_HAL_GTK_REKEY_STATUS_SUCCESS = 0,
+
+       /* rekey detected, but not handled */
+       WCN36XX_HAL_GTK_REKEY_STATUS_NOT_HANDLED = 1,
+
+       /* MIC check error on M1 */
+       WCN36XX_HAL_GTK_REKEY_STATUS_MIC_ERROR = 2,
+
+       /* decryption error on M1  */
+       WCN36XX_HAL_GTK_REKEY_STATUS_DECRYPT_ERROR = 3,
+
+       /* M1 replay detected */
+       WCN36XX_HAL_GTK_REKEY_STATUS_REPLAY_ERROR = 4,
+
+       /* missing GTK key descriptor in M1 */
+       WCN36XX_HAL_GTK_REKEY_STATUS_MISSING_KDE = 5,
+
+       /* missing iGTK key descriptor in M1 */
+       WCN36XX_HAL_GTK_REKEY_STATUS_MISSING_IGTK_KDE = 6,
+
+       /* key installation error */
+       WCN36XX_HAL_GTK_REKEY_STATUS_INSTALL_ERROR = 7,
+
+       /* iGTK key installation error */
+       WCN36XX_HAL_GTK_REKEY_STATUS_IGTK_INSTALL_ERROR = 8,
+
+       /* GTK rekey M2 response TX error */
+       WCN36XX_HAL_GTK_REKEY_STATUS_RESP_TX_ERROR = 9,
+
+       /* non-specific general error */
+       WCN36XX_HAL_GTK_REKEY_STATUS_GEN_ERROR = 255
+};
+
+/* wake reason types */
+enum wake_reason_type {
+       WCN36XX_HAL_WAKE_REASON_NONE = 0,
+
+       /* magic packet match */
+       WCN36XX_HAL_WAKE_REASON_MAGIC_PACKET = 1,
+
+       /* host defined pattern match */
+       WCN36XX_HAL_WAKE_REASON_PATTERN_MATCH = 2,
+
+       /* EAP-ID frame detected */
+       WCN36XX_HAL_WAKE_REASON_EAPID_PACKET = 3,
+
+       /* start of EAPOL 4-way handshake detected */
+       WCN36XX_HAL_WAKE_REASON_EAPOL4WAY_PACKET = 4,
+
+       /* network scan offload match */
+       WCN36XX_HAL_WAKE_REASON_NETSCAN_OFFL_MATCH = 5,
+
+       /* GTK rekey status wakeup (see status) */
+       WCN36XX_HAL_WAKE_REASON_GTK_REKEY_STATUS = 6,
+
+       /* BSS connection lost */
+       WCN36XX_HAL_WAKE_REASON_BSS_CONN_LOST = 7,
+};
+
+/*
+  Wake Packet which is saved at tWakeReasonParams.DataStart
+  This data is sent for any wake reasons that involve a packet-based wakeup :
+
+  WCN36XX_HAL_WAKE_REASON_TYPE_MAGIC_PACKET
+  WCN36XX_HAL_WAKE_REASON_TYPE_PATTERN_MATCH
+  WCN36XX_HAL_WAKE_REASON_TYPE_EAPID_PACKET
+  WCN36XX_HAL_WAKE_REASON_TYPE_EAPOL4WAY_PACKET
+  WCN36XX_HAL_WAKE_REASON_TYPE_GTK_REKEY_STATUS
+
+  The information is provided to the host for auditing and debug purposes
+
+*/
+
+/* Wake reason indication */
+struct wcn36xx_hal_wake_reason_ind {
+       struct wcn36xx_hal_msg_header header;
+
+       /* see tWakeReasonType */
+       u32 reason;
+
+       /* argument specific to the reason type */
+       u32 reason_arg;
+
+       /* length of optional data stored in this message, in case HAL
+        * truncates the data (i.e. data packets) this length will be less
+        * than the actual length */
+       u32 stored_data_len;
+
+       /* actual length of data */
+       u32 actual_data_len;
+
+       /* variable length start of data (length == storedDataLen) see
+        * specific wake type */
+       u8 data_start[1];
+
+       u32 bss_index:8;
+       u32 reserved:24;
+};
+
+#define WCN36XX_HAL_GTK_KEK_BYTES 16
+#define WCN36XX_HAL_GTK_KCK_BYTES 16
+
+#define WCN36XX_HAL_GTK_OFFLOAD_FLAGS_DISABLE (1 << 0)
+
+#define GTK_SET_BSS_KEY_TAG  0x1234AA55
+
+struct wcn36xx_hal_gtk_offload_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* optional flags */
+       u32 flags;
+
+       /* Key confirmation key */
+       u8 kck[WCN36XX_HAL_GTK_KCK_BYTES];
+
+       /* key encryption key */
+       u8 kek[WCN36XX_HAL_GTK_KEK_BYTES];
+
+       /* replay counter */
+       u64 key_replay_counter;
+
+       u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+
+       u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_get_info_req_msg {
+       struct wcn36xx_hal_msg_header header;
+       u8 bss_index;
+};
+
+struct wcn36xx_hal_gtk_offload_get_info_rsp_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+
+       /* last rekey status when the rekey was offloaded */
+       u32 last_rekey_status;
+
+       /* current replay counter value */
+       u64 key_replay_counter;
+
+       /* total rekey attempts */
+       u32 total_rekey_count;
+
+       /* successful GTK rekeys */
+       u32 gtk_rekey_count;
+
+       /* successful iGTK rekeys */
+       u32 igtk_rekey_count;
+
+       u8 bss_index;
+};
+
+struct dhcp_info {
+       /* Indicates the device mode which indicates about the DHCP activity */
+       u8 device_mode;
+
+       u8 addr[ETH_ALEN];
+};
+
+struct dhcp_ind_status {
+       struct wcn36xx_hal_msg_header header;
+
+       /* success or failure */
+       u32 status;
+};
+
+/*
+ *   Thermal Mitigation mode of operation.
+ *
+ *  WCN36XX_HAL_THERMAL_MITIGATION_MODE_0 - Based on AMPDU disabling aggregation
+ *
+ *  WCN36XX_HAL_THERMAL_MITIGATION_MODE_1 - Based on AMPDU disabling aggregation
+ *  and reducing transmit power
+ *
+ *  WCN36XX_HAL_THERMAL_MITIGATION_MODE_2 - Not supported */
+enum wcn36xx_hal_thermal_mitigation_mode_type {
+       HAL_THERMAL_MITIGATION_MODE_INVALID = -1,
+       HAL_THERMAL_MITIGATION_MODE_0,
+       HAL_THERMAL_MITIGATION_MODE_1,
+       HAL_THERMAL_MITIGATION_MODE_2,
+       HAL_THERMAL_MITIGATION_MODE_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+
+/*
+ *   Thermal Mitigation level.
+ * Note the levels are incremental i.e WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_2 =
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_0 +
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_1
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_0 - lowest level of thermal mitigation.
+ * This level indicates normal mode of operation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_1 - 1st level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_2 - 2nd level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_3 - 3rd level of thermal mitigation
+ *
+ * WCN36XX_HAL_THERMAL_MITIGATION_LEVEL_4 - 4th level of thermal mitigation
+ */
+enum wcn36xx_hal_thermal_mitigation_level_type {
+       HAL_THERMAL_MITIGATION_LEVEL_INVALID = -1,
+       HAL_THERMAL_MITIGATION_LEVEL_0,
+       HAL_THERMAL_MITIGATION_LEVEL_1,
+       HAL_THERMAL_MITIGATION_LEVEL_2,
+       HAL_THERMAL_MITIGATION_LEVEL_3,
+       HAL_THERMAL_MITIGATION_LEVEL_4,
+       HAL_THERMAL_MITIGATION_LEVEL_MAX = WCN36XX_HAL_MAX_ENUM_SIZE,
+};
+
+
+/* WCN36XX_HAL_SET_THERMAL_MITIGATION_REQ */
+struct set_thermal_mitigation_req_msg {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Thermal Mitigation Operation Mode */
+       enum wcn36xx_hal_thermal_mitigation_mode_type mode;
+
+       /* Thermal Mitigation Level */
+       enum wcn36xx_hal_thermal_mitigation_level_type level;
+};
+
+struct set_thermal_mitigation_resp {
+
+       struct wcn36xx_hal_msg_header header;
+
+       /* status of the request */
+       u32 status;
+};
+
+/* Per STA Class B Statistics. Class B statistics are STA TX/RX stats
+ * provided to FW from Host via periodic messages */
+struct stats_class_b_ind {
+       struct wcn36xx_hal_msg_header header;
+
+       /* Duration over which this stats was collected */
+       u32 duration;
+
+       /* Per STA Stats */
+
+       /* TX stats */
+       u32 tx_bytes_pushed;
+       u32 tx_packets_pushed;
+
+       /* RX stats */
+       u32 rx_bytes_rcvd;
+       u32 rx_packets_rcvd;
+       u32 rx_time_total;
+};
+
+#endif /* _HAL_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
new file mode 100644 (file)
index 0000000..7839b31
--- /dev/null
@@ -0,0 +1,1036 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "wcn36xx.h"
+
+unsigned int wcn36xx_dbg_mask;
+module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644);
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+
+#define CHAN2G(_freq, _idx) { \
+       .band = IEEE80211_BAND_2GHZ, \
+       .center_freq = (_freq), \
+       .hw_value = (_idx), \
+       .max_power = 25, \
+}
+
+#define CHAN5G(_freq, _idx) { \
+       .band = IEEE80211_BAND_5GHZ, \
+       .center_freq = (_freq), \
+       .hw_value = (_idx), \
+       .max_power = 25, \
+}
+
+/* The wcn firmware expects channel values to matching
+ * their mnemonic values. So use these for .hw_value. */
+static struct ieee80211_channel wcn_2ghz_channels[] = {
+       CHAN2G(2412, 1), /* Channel 1 */
+       CHAN2G(2417, 2), /* Channel 2 */
+       CHAN2G(2422, 3), /* Channel 3 */
+       CHAN2G(2427, 4), /* Channel 4 */
+       CHAN2G(2432, 5), /* Channel 5 */
+       CHAN2G(2437, 6), /* Channel 6 */
+       CHAN2G(2442, 7), /* Channel 7 */
+       CHAN2G(2447, 8), /* Channel 8 */
+       CHAN2G(2452, 9), /* Channel 9 */
+       CHAN2G(2457, 10), /* Channel 10 */
+       CHAN2G(2462, 11), /* Channel 11 */
+       CHAN2G(2467, 12), /* Channel 12 */
+       CHAN2G(2472, 13), /* Channel 13 */
+       CHAN2G(2484, 14)  /* Channel 14 */
+
+};
+
+static struct ieee80211_channel wcn_5ghz_channels[] = {
+       CHAN5G(5180, 36),
+       CHAN5G(5200, 40),
+       CHAN5G(5220, 44),
+       CHAN5G(5240, 48),
+       CHAN5G(5260, 52),
+       CHAN5G(5280, 56),
+       CHAN5G(5300, 60),
+       CHAN5G(5320, 64),
+       CHAN5G(5500, 100),
+       CHAN5G(5520, 104),
+       CHAN5G(5540, 108),
+       CHAN5G(5560, 112),
+       CHAN5G(5580, 116),
+       CHAN5G(5600, 120),
+       CHAN5G(5620, 124),
+       CHAN5G(5640, 128),
+       CHAN5G(5660, 132),
+       CHAN5G(5700, 140),
+       CHAN5G(5745, 149),
+       CHAN5G(5765, 153),
+       CHAN5G(5785, 157),
+       CHAN5G(5805, 161),
+       CHAN5G(5825, 165)
+};
+
+#define RATE(_bitrate, _hw_rate, _flags) { \
+       .bitrate        = (_bitrate),                   \
+       .flags          = (_flags),                     \
+       .hw_value       = (_hw_rate),                   \
+       .hw_value_short = (_hw_rate)  \
+}
+
+static struct ieee80211_rate wcn_2ghz_rates[] = {
+       RATE(10, HW_RATE_INDEX_1MBPS, 0),
+       RATE(20, HW_RATE_INDEX_2MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+       RATE(55, HW_RATE_INDEX_5_5MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+       RATE(110, HW_RATE_INDEX_11MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
+       RATE(60, HW_RATE_INDEX_6MBPS, 0),
+       RATE(90, HW_RATE_INDEX_9MBPS, 0),
+       RATE(120, HW_RATE_INDEX_12MBPS, 0),
+       RATE(180, HW_RATE_INDEX_18MBPS, 0),
+       RATE(240, HW_RATE_INDEX_24MBPS, 0),
+       RATE(360, HW_RATE_INDEX_36MBPS, 0),
+       RATE(480, HW_RATE_INDEX_48MBPS, 0),
+       RATE(540, HW_RATE_INDEX_54MBPS, 0)
+};
+
+static struct ieee80211_rate wcn_5ghz_rates[] = {
+       RATE(60, HW_RATE_INDEX_6MBPS, 0),
+       RATE(90, HW_RATE_INDEX_9MBPS, 0),
+       RATE(120, HW_RATE_INDEX_12MBPS, 0),
+       RATE(180, HW_RATE_INDEX_18MBPS, 0),
+       RATE(240, HW_RATE_INDEX_24MBPS, 0),
+       RATE(360, HW_RATE_INDEX_36MBPS, 0),
+       RATE(480, HW_RATE_INDEX_48MBPS, 0),
+       RATE(540, HW_RATE_INDEX_54MBPS, 0)
+};
+
+static struct ieee80211_supported_band wcn_band_2ghz = {
+       .channels       = wcn_2ghz_channels,
+       .n_channels     = ARRAY_SIZE(wcn_2ghz_channels),
+       .bitrates       = wcn_2ghz_rates,
+       .n_bitrates     = ARRAY_SIZE(wcn_2ghz_rates),
+       .ht_cap         = {
+               .cap =  IEEE80211_HT_CAP_GRN_FLD |
+                       IEEE80211_HT_CAP_SGI_20 |
+                       IEEE80211_HT_CAP_DSSSCCK40 |
+                       IEEE80211_HT_CAP_LSIG_TXOP_PROT,
+               .ht_supported = true,
+               .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
+               .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+               .mcs = {
+                       .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+                       .rx_highest = cpu_to_le16(72),
+                       .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+               }
+       }
+};
+
+static struct ieee80211_supported_band wcn_band_5ghz = {
+       .channels       = wcn_5ghz_channels,
+       .n_channels     = ARRAY_SIZE(wcn_5ghz_channels),
+       .bitrates       = wcn_5ghz_rates,
+       .n_bitrates     = ARRAY_SIZE(wcn_5ghz_rates),
+       .ht_cap         = {
+               .cap =  IEEE80211_HT_CAP_GRN_FLD |
+                       IEEE80211_HT_CAP_SGI_20 |
+                       IEEE80211_HT_CAP_DSSSCCK40 |
+                       IEEE80211_HT_CAP_LSIG_TXOP_PROT |
+                       IEEE80211_HT_CAP_SGI_40 |
+                       IEEE80211_HT_CAP_SUP_WIDTH_20_40,
+               .ht_supported = true,
+               .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
+               .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+               .mcs = {
+                       .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+                       .rx_highest = cpu_to_le16(72),
+                       .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+               }
+       }
+};
+
+#ifdef CONFIG_PM
+
+static const struct wiphy_wowlan_support wowlan_support = {
+       .flags = WIPHY_WOWLAN_ANY
+};
+
+#endif
+
+static inline u8 get_sta_index(struct ieee80211_vif *vif,
+                              struct wcn36xx_sta *sta_priv)
+{
+       return NL80211_IFTYPE_STATION == vif->type ?
+              sta_priv->bss_sta_index :
+              sta_priv->sta_index;
+}
+
+static int wcn36xx_start(struct ieee80211_hw *hw)
+{
+       struct wcn36xx *wcn = hw->priv;
+       int ret;
+
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac start\n");
+
+       /* SMD initialization */
+       ret = wcn36xx_smd_open(wcn);
+       if (ret) {
+               wcn36xx_err("Failed to open smd channel: %d\n", ret);
+               goto out_err;
+       }
+
+       /* Allocate memory pools for Mgmt BD headers and Data BD headers */
+       ret = wcn36xx_dxe_allocate_mem_pools(wcn);
+       if (ret) {
+               wcn36xx_err("Failed to alloc DXE mempool: %d\n", ret);
+               goto out_smd_close;
+       }
+
+       ret = wcn36xx_dxe_alloc_ctl_blks(wcn);
+       if (ret) {
+               wcn36xx_err("Failed to alloc DXE ctl blocks: %d\n", ret);
+               goto out_free_dxe_pool;
+       }
+
+       wcn->hal_buf = kmalloc(WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
+       if (!wcn->hal_buf) {
+               wcn36xx_err("Failed to allocate smd buf\n");
+               ret = -ENOMEM;
+               goto out_free_dxe_ctl;
+       }
+
+       ret = wcn36xx_smd_load_nv(wcn);
+       if (ret) {
+               wcn36xx_err("Failed to push NV to chip\n");
+               goto out_free_smd_buf;
+       }
+
+       ret = wcn36xx_smd_start(wcn);
+       if (ret) {
+               wcn36xx_err("Failed to start chip\n");
+               goto out_free_smd_buf;
+       }
+
+       /* DMA channel initialization */
+       ret = wcn36xx_dxe_init(wcn);
+       if (ret) {
+               wcn36xx_err("DXE init failed\n");
+               goto out_smd_stop;
+       }
+
+       wcn36xx_debugfs_init(wcn);
+
+       if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+               ret = wcn36xx_smd_feature_caps_exchange(wcn);
+               if (ret)
+                       wcn36xx_warn("Exchange feature caps failed\n");
+       }
+       INIT_LIST_HEAD(&wcn->vif_list);
+       return 0;
+
+out_smd_stop:
+       wcn36xx_smd_stop(wcn);
+out_free_smd_buf:
+       kfree(wcn->hal_buf);
+out_free_dxe_pool:
+       wcn36xx_dxe_free_mem_pools(wcn);
+out_free_dxe_ctl:
+       wcn36xx_dxe_free_ctl_blks(wcn);
+out_smd_close:
+       wcn36xx_smd_close(wcn);
+out_err:
+       return ret;
+}
+
+static void wcn36xx_stop(struct ieee80211_hw *hw)
+{
+       struct wcn36xx *wcn = hw->priv;
+
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac stop\n");
+
+       wcn36xx_debugfs_exit(wcn);
+       wcn36xx_smd_stop(wcn);
+       wcn36xx_dxe_deinit(wcn);
+       wcn36xx_smd_close(wcn);
+
+       wcn36xx_dxe_free_mem_pools(wcn);
+       wcn36xx_dxe_free_ctl_blks(wcn);
+
+       kfree(wcn->hal_buf);
+}
+
+static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
+{
+       struct wcn36xx *wcn = hw->priv;
+       struct ieee80211_vif *vif = NULL;
+       struct wcn36xx_vif *tmp;
+
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed);
+
+       if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+               int ch = WCN36XX_HW_CHANNEL(wcn);
+               wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
+                           ch);
+               list_for_each_entry(tmp, &wcn->vif_list, list) {
+                       vif = container_of((void *)tmp,
+                                          struct ieee80211_vif,
+                                          drv_priv);
+                       wcn36xx_smd_switch_channel(wcn, vif, ch);
+               }
+       }
+
+       return 0;
+}
+
+#define WCN36XX_SUPPORTED_FILTERS (0)
+
+static void wcn36xx_configure_filter(struct ieee80211_hw *hw,
+                                    unsigned int changed,
+                                    unsigned int *total, u64 multicast)
+{
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac configure filter\n");
+
+       *total &= WCN36XX_SUPPORTED_FILTERS;
+}
+
+static void wcn36xx_tx(struct ieee80211_hw *hw,
+                      struct ieee80211_tx_control *control,
+                      struct sk_buff *skb)
+{
+       struct wcn36xx *wcn = hw->priv;
+       struct wcn36xx_sta *sta_priv = NULL;
+
+       if (control->sta)
+               sta_priv = (struct wcn36xx_sta *)control->sta->drv_priv;
+
+       if (wcn36xx_start_tx(wcn, sta_priv, skb))
+               ieee80211_free_txskb(wcn->hw, skb);
+}
+
+static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+                          struct ieee80211_vif *vif,
+                          struct ieee80211_sta *sta,
+                          struct ieee80211_key_conf *key_conf)
+{
+       struct wcn36xx *wcn = hw->priv;
+       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+       struct wcn36xx_sta *sta_priv = vif_priv->sta;
+       int ret = 0;
+       u8 key[WLAN_MAX_KEY_LEN];
+
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 set key\n");
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "Key: cmd=0x%x algo:0x%x, id:%d, len:%d flags 0x%x\n",
+                   cmd, key_conf->cipher, key_conf->keyidx,
+                   key_conf->keylen, key_conf->flags);
+       wcn36xx_dbg_dump(WCN36XX_DBG_MAC, "KEY: ",
+                        key_conf->key,
+                        key_conf->keylen);
+
+       switch (key_conf->cipher) {
+       case WLAN_CIPHER_SUITE_WEP40:
+               vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
+               break;
+       case WLAN_CIPHER_SUITE_WEP104:
+               vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
+               break;
+       case WLAN_CIPHER_SUITE_CCMP:
+               vif_priv->encrypt_type = WCN36XX_HAL_ED_CCMP;
+               break;
+       case WLAN_CIPHER_SUITE_TKIP:
+               vif_priv->encrypt_type = WCN36XX_HAL_ED_TKIP;
+               break;
+       default:
+               wcn36xx_err("Unsupported key type 0x%x\n",
+                             key_conf->cipher);
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
+
+       switch (cmd) {
+       case SET_KEY:
+               if (WCN36XX_HAL_ED_TKIP == vif_priv->encrypt_type) {
+                       /*
+                        * Supplicant is sending key in the wrong order:
+                        * Temporal Key (16 b) - TX MIC (8 b) - RX MIC (8 b)
+                        * but HW expects it to be in the order as described in
+                        * IEEE 802.11 spec (see chapter 11.7) like this:
+                        * Temporal Key (16 b) - RX MIC (8 b) - TX MIC (8 b)
+                        */
+                       memcpy(key, key_conf->key, 16);
+                       memcpy(key + 16, key_conf->key + 24, 8);
+                       memcpy(key + 24, key_conf->key + 16, 8);
+               } else {
+                       memcpy(key, key_conf->key, key_conf->keylen);
+               }
+
+               if (IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags) {
+                       sta_priv->is_data_encrypted = true;
+                       /* Reconfigure bss with encrypt_type */
+                       if (NL80211_IFTYPE_STATION == vif->type)
+                               wcn36xx_smd_config_bss(wcn,
+                                                      vif,
+                                                      sta,
+                                                      sta->addr,
+                                                      true);
+
+                       wcn36xx_smd_set_stakey(wcn,
+                               vif_priv->encrypt_type,
+                               key_conf->keyidx,
+                               key_conf->keylen,
+                               key,
+                               get_sta_index(vif, sta_priv));
+               } else {
+                       wcn36xx_smd_set_bsskey(wcn,
+                               vif_priv->encrypt_type,
+                               key_conf->keyidx,
+                               key_conf->keylen,
+                               key);
+                       if ((WLAN_CIPHER_SUITE_WEP40 == key_conf->cipher) ||
+                           (WLAN_CIPHER_SUITE_WEP104 == key_conf->cipher)) {
+                               sta_priv->is_data_encrypted = true;
+                               wcn36xx_smd_set_stakey(wcn,
+                                       vif_priv->encrypt_type,
+                                       key_conf->keyidx,
+                                       key_conf->keylen,
+                                       key,
+                                       get_sta_index(vif, sta_priv));
+                       }
+               }
+               break;
+       case DISABLE_KEY:
+               if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) {
+                       wcn36xx_smd_remove_bsskey(wcn,
+                               vif_priv->encrypt_type,
+                               key_conf->keyidx);
+               } else {
+                       sta_priv->is_data_encrypted = false;
+                       /* do not remove key if disassociated */
+                       if (sta_priv->aid)
+                               wcn36xx_smd_remove_stakey(wcn,
+                                       vif_priv->encrypt_type,
+                                       key_conf->keyidx,
+                                       get_sta_index(vif, sta_priv));
+               }
+               break;
+       default:
+               wcn36xx_err("Unsupported key cmd 0x%x\n", cmd);
+               ret = -EOPNOTSUPP;
+               goto out;
+               break;
+       }
+
+out:
+       return ret;
+}
+
+static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw)
+{
+       struct wcn36xx *wcn = hw->priv;
+
+       wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN);
+       wcn36xx_smd_start_scan(wcn);
+}
+
+static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw)
+{
+       struct wcn36xx *wcn = hw->priv;
+
+       wcn36xx_smd_end_scan(wcn);
+       wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN);
+}
+
+static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
+                                        enum ieee80211_band band)
+{
+       int i, size;
+       u16 *rates_table;
+       struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+       u32 rates = sta->supp_rates[band];
+
+       memset(&sta_priv->supported_rates, 0,
+               sizeof(sta_priv->supported_rates));
+       sta_priv->supported_rates.op_rate_mode = STA_11n;
+
+       size = ARRAY_SIZE(sta_priv->supported_rates.dsss_rates);
+       rates_table = sta_priv->supported_rates.dsss_rates;
+       if (band == IEEE80211_BAND_2GHZ) {
+               for (i = 0; i < size; i++) {
+                       if (rates & 0x01) {
+                               rates_table[i] = wcn_2ghz_rates[i].hw_value;
+                               rates = rates >> 1;
+                       }
+               }
+       }
+
+       size = ARRAY_SIZE(sta_priv->supported_rates.ofdm_rates);
+       rates_table = sta_priv->supported_rates.ofdm_rates;
+       for (i = 0; i < size; i++) {
+               if (rates & 0x01) {
+                       rates_table[i] = wcn_5ghz_rates[i].hw_value;
+                       rates = rates >> 1;
+               }
+       }
+
+       if (sta->ht_cap.ht_supported) {
+               BUILD_BUG_ON(sizeof(sta->ht_cap.mcs.rx_mask) >
+                       sizeof(sta_priv->supported_rates.supported_mcs_set));
+               memcpy(sta_priv->supported_rates.supported_mcs_set,
+                      sta->ht_cap.mcs.rx_mask,
+                      sizeof(sta->ht_cap.mcs.rx_mask));
+       }
+}
+void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates)
+{
+       u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES] = {
+               HW_RATE_INDEX_6MBPS,
+               HW_RATE_INDEX_9MBPS,
+               HW_RATE_INDEX_12MBPS,
+               HW_RATE_INDEX_18MBPS,
+               HW_RATE_INDEX_24MBPS,
+               HW_RATE_INDEX_36MBPS,
+               HW_RATE_INDEX_48MBPS,
+               HW_RATE_INDEX_54MBPS
+       };
+       u16 dsss_rates[WCN36XX_HAL_NUM_DSSS_RATES] = {
+               HW_RATE_INDEX_1MBPS,
+               HW_RATE_INDEX_2MBPS,
+               HW_RATE_INDEX_5_5MBPS,
+               HW_RATE_INDEX_11MBPS
+       };
+
+       rates->op_rate_mode = STA_11n;
+       memcpy(rates->dsss_rates, dsss_rates,
+               sizeof(*dsss_rates) * WCN36XX_HAL_NUM_DSSS_RATES);
+       memcpy(rates->ofdm_rates, ofdm_rates,
+               sizeof(*ofdm_rates) * WCN36XX_HAL_NUM_OFDM_RATES);
+       rates->supported_mcs_set[0] = 0xFF;
+}
+static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
+                                    struct ieee80211_vif *vif,
+                                    struct ieee80211_bss_conf *bss_conf,
+                                    u32 changed)
+{
+       struct wcn36xx *wcn = hw->priv;
+       struct sk_buff *skb = NULL;
+       u16 tim_off, tim_len;
+       enum wcn36xx_hal_link_state link_state;
+       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n",
+                   vif, changed);
+
+       if (changed & BSS_CHANGED_BEACON_INFO) {
+               wcn36xx_dbg(WCN36XX_DBG_MAC,
+                           "mac bss changed dtim period %d\n",
+                           bss_conf->dtim_period);
+
+               vif_priv->dtim_period = bss_conf->dtim_period;
+       }
+
+       if (changed & BSS_CHANGED_PS) {
+               wcn36xx_dbg(WCN36XX_DBG_MAC,
+                           "mac bss PS set %d\n",
+                           bss_conf->ps);
+               if (bss_conf->ps) {
+                       wcn36xx_pmc_enter_bmps_state(wcn, vif);
+               } else {
+                       wcn36xx_pmc_exit_bmps_state(wcn, vif);
+               }
+       }
+
+       if (changed & BSS_CHANGED_BSSID) {
+               wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
+                           bss_conf->bssid);
+
+               if (!is_zero_ether_addr(bss_conf->bssid)) {
+                       vif_priv->is_joining = true;
+                       vif_priv->bss_index = 0xff;
+                       wcn36xx_smd_join(wcn, bss_conf->bssid,
+                                        vif->addr, WCN36XX_HW_CHANNEL(wcn));
+                       wcn36xx_smd_config_bss(wcn, vif, NULL,
+                                              bss_conf->bssid, false);
+               } else {
+                       vif_priv->is_joining = false;
+                       wcn36xx_smd_delete_bss(wcn, vif);
+               }
+       }
+
+       if (changed & BSS_CHANGED_SSID) {
+               wcn36xx_dbg(WCN36XX_DBG_MAC,
+                           "mac bss changed ssid\n");
+               wcn36xx_dbg_dump(WCN36XX_DBG_MAC, "ssid ",
+                                bss_conf->ssid, bss_conf->ssid_len);
+
+               vif_priv->ssid.length = bss_conf->ssid_len;
+               memcpy(&vif_priv->ssid.ssid,
+                      bss_conf->ssid,
+                      bss_conf->ssid_len);
+       }
+
+       if (changed & BSS_CHANGED_ASSOC) {
+               vif_priv->is_joining = false;
+               if (bss_conf->assoc) {
+                       struct ieee80211_sta *sta;
+                       struct wcn36xx_sta *sta_priv;
+
+                       wcn36xx_dbg(WCN36XX_DBG_MAC,
+                                   "mac assoc bss %pM vif %pM AID=%d\n",
+                                    bss_conf->bssid,
+                                    vif->addr,
+                                    bss_conf->aid);
+
+                       rcu_read_lock();
+                       sta = ieee80211_find_sta(vif, bss_conf->bssid);
+                       if (!sta) {
+                               wcn36xx_err("sta %pM is not found\n",
+                                             bss_conf->bssid);
+                               rcu_read_unlock();
+                               goto out;
+                       }
+                       sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+                       wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
+
+                       wcn36xx_smd_set_link_st(wcn, bss_conf->bssid,
+                               vif->addr,
+                               WCN36XX_HAL_LINK_POSTASSOC_STATE);
+                       wcn36xx_smd_config_bss(wcn, vif, sta,
+                                              bss_conf->bssid,
+                                              true);
+                       sta_priv->aid = bss_conf->aid;
+                       /*
+                        * config_sta must be called from  because this is the
+                        * place where AID is available.
+                        */
+                       wcn36xx_smd_config_sta(wcn, vif, sta);
+                       rcu_read_unlock();
+               } else {
+                       wcn36xx_dbg(WCN36XX_DBG_MAC,
+                                   "disassociated bss %pM vif %pM AID=%d\n",
+                                   bss_conf->bssid,
+                                   vif->addr,
+                                   bss_conf->aid);
+                       wcn36xx_smd_set_link_st(wcn,
+                                               bss_conf->bssid,
+                                               vif->addr,
+                                               WCN36XX_HAL_LINK_IDLE_STATE);
+               }
+       }
+
+       if (changed & BSS_CHANGED_AP_PROBE_RESP) {
+               wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed ap probe resp\n");
+               skb = ieee80211_proberesp_get(hw, vif);
+               if (!skb) {
+                       wcn36xx_err("failed to alloc probereq skb\n");
+                       goto out;
+               }
+
+               wcn36xx_smd_update_proberesp_tmpl(wcn, vif, skb);
+               dev_kfree_skb(skb);
+       }
+
+       if (changed & BSS_CHANGED_BEACON_ENABLED) {
+               wcn36xx_dbg(WCN36XX_DBG_MAC,
+                           "mac bss changed beacon enabled %d\n",
+                           bss_conf->enable_beacon);
+
+               if (bss_conf->enable_beacon) {
+                       vif_priv->bss_index = 0xff;
+                       wcn36xx_smd_config_bss(wcn, vif, NULL,
+                                              vif->addr, false);
+                       skb = ieee80211_beacon_get_tim(hw, vif, &tim_off,
+                                                      &tim_len);
+                       if (!skb) {
+                               wcn36xx_err("failed to alloc beacon skb\n");
+                               goto out;
+                       }
+                       wcn36xx_smd_send_beacon(wcn, vif, skb, tim_off, 0);
+                       dev_kfree_skb(skb);
+
+                       if (vif->type == NL80211_IFTYPE_ADHOC ||
+                           vif->type == NL80211_IFTYPE_MESH_POINT)
+                               link_state = WCN36XX_HAL_LINK_IBSS_STATE;
+                       else
+                               link_state = WCN36XX_HAL_LINK_AP_STATE;
+
+                       wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
+                                               link_state);
+               } else {
+                       wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
+                                               WCN36XX_HAL_LINK_IDLE_STATE);
+                       wcn36xx_smd_delete_bss(wcn, vif);
+               }
+       }
+out:
+       return;
+}
+
+/* this is required when using IEEE80211_HW_HAS_RATE_CONTROL */
+static int wcn36xx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+       struct wcn36xx *wcn = hw->priv;
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac set RTS threshold %d\n", value);
+
+       wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_RTS_THRESHOLD, value);
+       return 0;
+}
+
+static void wcn36xx_remove_interface(struct ieee80211_hw *hw,
+                                    struct ieee80211_vif *vif)
+{
+       struct wcn36xx *wcn = hw->priv;
+       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac remove interface vif %p\n", vif);
+
+       list_del(&vif_priv->list);
+       wcn36xx_smd_delete_sta_self(wcn, vif->addr);
+}
+
+static int wcn36xx_add_interface(struct ieee80211_hw *hw,
+                                struct ieee80211_vif *vif)
+{
+       struct wcn36xx *wcn = hw->priv;
+       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac add interface vif %p type %d\n",
+                   vif, vif->type);
+
+       if (!(NL80211_IFTYPE_STATION == vif->type ||
+             NL80211_IFTYPE_AP == vif->type ||
+             NL80211_IFTYPE_ADHOC == vif->type ||
+             NL80211_IFTYPE_MESH_POINT == vif->type)) {
+               wcn36xx_warn("Unsupported interface type requested: %d\n",
+                            vif->type);
+               return -EOPNOTSUPP;
+       }
+
+       list_add(&vif_priv->list, &wcn->vif_list);
+       wcn36xx_smd_add_sta_self(wcn, vif);
+
+       return 0;
+}
+
+static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                          struct ieee80211_sta *sta)
+{
+       struct wcn36xx *wcn = hw->priv;
+       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+       struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n",
+                   vif, sta->addr);
+
+       vif_priv->sta = sta_priv;
+       sta_priv->vif = vif_priv;
+       /*
+        * For STA mode HW will be configured on BSS_CHANGED_ASSOC because
+        * at this stage AID is not available yet.
+        */
+       if (NL80211_IFTYPE_STATION != vif->type) {
+               wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
+               sta_priv->aid = sta->aid;
+               wcn36xx_smd_config_sta(wcn, vif, sta);
+       }
+       return 0;
+}
+
+static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
+                             struct ieee80211_vif *vif,
+                             struct ieee80211_sta *sta)
+{
+       struct wcn36xx *wcn = hw->priv;
+       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+       struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n",
+                   vif, sta->addr, sta_priv->sta_index);
+
+       wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
+       vif_priv->sta = NULL;
+       sta_priv->vif = NULL;
+       return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int wcn36xx_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wow)
+{
+       struct wcn36xx *wcn = hw->priv;
+
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac suspend\n");
+
+       flush_workqueue(wcn->hal_ind_wq);
+       wcn36xx_smd_set_power_params(wcn, true);
+       return 0;
+}
+
+static int wcn36xx_resume(struct ieee80211_hw *hw)
+{
+       struct wcn36xx *wcn = hw->priv;
+
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac resume\n");
+
+       flush_workqueue(wcn->hal_ind_wq);
+       wcn36xx_smd_set_power_params(wcn, false);
+       return 0;
+}
+
+#endif
+
+static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
+                   struct ieee80211_vif *vif,
+                   enum ieee80211_ampdu_mlme_action action,
+                   struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+                   u8 buf_size)
+{
+       struct wcn36xx *wcn = hw->priv;
+       struct wcn36xx_sta *sta_priv = NULL;
+
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
+                   action, tid);
+
+       sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+       switch (action) {
+       case IEEE80211_AMPDU_RX_START:
+               sta_priv->tid = tid;
+               wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 0,
+                       get_sta_index(vif, sta_priv));
+               wcn36xx_smd_add_ba(wcn);
+               wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv));
+               ieee80211_start_tx_ba_session(sta, tid, 0);
+               break;
+       case IEEE80211_AMPDU_RX_STOP:
+               wcn36xx_smd_del_ba(wcn, tid, get_sta_index(vif, sta_priv));
+               break;
+       case IEEE80211_AMPDU_TX_START:
+               ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+               break;
+       case IEEE80211_AMPDU_TX_OPERATIONAL:
+               wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 1,
+                       get_sta_index(vif, sta_priv));
+               break;
+       case IEEE80211_AMPDU_TX_STOP_FLUSH:
+       case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+       case IEEE80211_AMPDU_TX_STOP_CONT:
+               ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+               break;
+       default:
+               wcn36xx_err("Unknown AMPDU action\n");
+       }
+
+       return 0;
+}
+
+static const struct ieee80211_ops wcn36xx_ops = {
+       .start                  = wcn36xx_start,
+       .stop                   = wcn36xx_stop,
+       .add_interface          = wcn36xx_add_interface,
+       .remove_interface       = wcn36xx_remove_interface,
+#ifdef CONFIG_PM
+       .suspend                = wcn36xx_suspend,
+       .resume                 = wcn36xx_resume,
+#endif
+       .config                 = wcn36xx_config,
+       .configure_filter       = wcn36xx_configure_filter,
+       .tx                     = wcn36xx_tx,
+       .set_key                = wcn36xx_set_key,
+       .sw_scan_start          = wcn36xx_sw_scan_start,
+       .sw_scan_complete       = wcn36xx_sw_scan_complete,
+       .bss_info_changed       = wcn36xx_bss_info_changed,
+       .set_rts_threshold      = wcn36xx_set_rts_threshold,
+       .sta_add                = wcn36xx_sta_add,
+       .sta_remove             = wcn36xx_sta_remove,
+       .ampdu_action           = wcn36xx_ampdu_action,
+};
+
+static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
+{
+       int ret = 0;
+
+       static const u32 cipher_suites[] = {
+               WLAN_CIPHER_SUITE_WEP40,
+               WLAN_CIPHER_SUITE_WEP104,
+               WLAN_CIPHER_SUITE_TKIP,
+               WLAN_CIPHER_SUITE_CCMP,
+       };
+
+       wcn->hw->flags = IEEE80211_HW_SIGNAL_DBM |
+               IEEE80211_HW_HAS_RATE_CONTROL |
+               IEEE80211_HW_SUPPORTS_PS |
+               IEEE80211_HW_CONNECTION_MONITOR |
+               IEEE80211_HW_AMPDU_AGGREGATION |
+               IEEE80211_HW_TIMING_BEACON_ONLY;
+
+       wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+               BIT(NL80211_IFTYPE_AP) |
+               BIT(NL80211_IFTYPE_ADHOC) |
+               BIT(NL80211_IFTYPE_MESH_POINT);
+
+       wcn->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wcn_band_2ghz;
+       wcn->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wcn_band_5ghz;
+
+       wcn->hw->wiphy->cipher_suites = cipher_suites;
+       wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+       wcn->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+
+#ifdef CONFIG_PM
+       wcn->hw->wiphy->wowlan = &wowlan_support;
+#endif
+
+       wcn->hw->max_listen_interval = 200;
+
+       wcn->hw->queues = 4;
+
+       SET_IEEE80211_DEV(wcn->hw, wcn->dev);
+
+       wcn->hw->sta_data_size = sizeof(struct wcn36xx_sta);
+       wcn->hw->vif_data_size = sizeof(struct wcn36xx_vif);
+
+       return ret;
+}
+
+static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
+                                         struct platform_device *pdev)
+{
+       struct resource *res;
+       /* Set TX IRQ */
+       res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+                                          "wcnss_wlantx_irq");
+       if (!res) {
+               wcn36xx_err("failed to get tx_irq\n");
+               return -ENOENT;
+       }
+       wcn->tx_irq = res->start;
+
+       /* Set RX IRQ */
+       res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+                                          "wcnss_wlanrx_irq");
+       if (!res) {
+               wcn36xx_err("failed to get rx_irq\n");
+               return -ENOENT;
+       }
+       wcn->rx_irq = res->start;
+
+       /* Map the memory */
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+                                                "wcnss_mmio");
+       if (!res) {
+               wcn36xx_err("failed to get mmio\n");
+               return -ENOENT;
+       }
+       wcn->mmio = ioremap(res->start, resource_size(res));
+       if (!wcn->mmio) {
+               wcn36xx_err("failed to map io memory\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+static int wcn36xx_probe(struct platform_device *pdev)
+{
+       struct ieee80211_hw *hw;
+       struct wcn36xx *wcn;
+       int ret;
+       u8 addr[ETH_ALEN];
+
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "platform probe\n");
+
+       hw = ieee80211_alloc_hw(sizeof(struct wcn36xx), &wcn36xx_ops);
+       if (!hw) {
+               wcn36xx_err("failed to alloc hw\n");
+               ret = -ENOMEM;
+               goto out_err;
+       }
+       platform_set_drvdata(pdev, hw);
+       wcn = hw->priv;
+       wcn->hw = hw;
+       wcn->dev = &pdev->dev;
+       wcn->ctrl_ops = pdev->dev.platform_data;
+
+       mutex_init(&wcn->hal_mutex);
+
+       if (!wcn->ctrl_ops->get_hw_mac(addr)) {
+               wcn36xx_info("mac address: %pM\n", addr);
+               SET_IEEE80211_PERM_ADDR(wcn->hw, addr);
+       }
+
+       ret = wcn36xx_platform_get_resources(wcn, pdev);
+       if (ret)
+               goto out_wq;
+
+       wcn36xx_init_ieee80211(wcn);
+       ret = ieee80211_register_hw(wcn->hw);
+       if (ret)
+               goto out_unmap;
+
+       return 0;
+
+out_unmap:
+       iounmap(wcn->mmio);
+out_wq:
+       ieee80211_free_hw(hw);
+out_err:
+       return ret;
+}
+static int wcn36xx_remove(struct platform_device *pdev)
+{
+       struct ieee80211_hw *hw = platform_get_drvdata(pdev);
+       struct wcn36xx *wcn = hw->priv;
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n");
+
+       mutex_destroy(&wcn->hal_mutex);
+
+       ieee80211_unregister_hw(hw);
+       iounmap(wcn->mmio);
+       ieee80211_free_hw(hw);
+
+       return 0;
+}
+static const struct platform_device_id wcn36xx_platform_id_table[] = {
+       {
+               .name = "wcn36xx",
+               .driver_data = 0
+       },
+       {}
+};
+MODULE_DEVICE_TABLE(platform, wcn36xx_platform_id_table);
+
+static struct platform_driver wcn36xx_driver = {
+       .probe      = wcn36xx_probe,
+       .remove     = wcn36xx_remove,
+       .driver         = {
+               .name   = "wcn36xx",
+               .owner  = THIS_MODULE,
+       },
+       .id_table    = wcn36xx_platform_id_table,
+};
+
+static int __init wcn36xx_init(void)
+{
+       platform_driver_register(&wcn36xx_driver);
+       return 0;
+}
+module_init(wcn36xx_init);
+
+static void __exit wcn36xx_exit(void)
+{
+       platform_driver_unregister(&wcn36xx_driver);
+}
+module_exit(wcn36xx_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com");
+MODULE_FIRMWARE(WLAN_NV_FILE);
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
new file mode 100644 (file)
index 0000000..28b515c
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "wcn36xx.h"
+
+int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
+                                struct ieee80211_vif *vif)
+{
+       int ret = 0;
+       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+       /* TODO: Make sure the TX chain clean */
+       ret = wcn36xx_smd_enter_bmps(wcn, vif);
+       if (!ret) {
+               wcn36xx_dbg(WCN36XX_DBG_PMC, "Entered BMPS\n");
+               vif_priv->pw_state = WCN36XX_BMPS;
+       } else {
+               /*
+                * One of the reasons why HW will not enter BMPS is because
+                * driver is trying to enter bmps before first beacon was
+                * received just after auth complete
+                */
+               wcn36xx_err("Can not enter BMPS!\n");
+       }
+       return ret;
+}
+
+int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
+                               struct ieee80211_vif *vif)
+{
+       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+
+       if (WCN36XX_BMPS != vif_priv->pw_state) {
+               wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n");
+               return -EINVAL;
+       }
+       wcn36xx_smd_exit_bmps(wcn, vif);
+       vif_priv->pw_state = WCN36XX_FULL_POWER;
+       return 0;
+}
+
+int wcn36xx_enable_keep_alive_null_packet(struct wcn36xx *wcn,
+                                         struct ieee80211_vif *vif)
+{
+       wcn36xx_dbg(WCN36XX_DBG_PMC, "%s\n", __func__);
+       return wcn36xx_smd_keep_alive_req(wcn, vif,
+                                         WCN36XX_HAL_KEEP_ALIVE_NULL_PKT);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.h b/drivers/net/wireless/ath/wcn36xx/pmc.h
new file mode 100644 (file)
index 0000000..f72ed68
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_PMC_H_
+#define _WCN36XX_PMC_H_
+
+struct wcn36xx;
+
+enum wcn36xx_power_state {
+       WCN36XX_FULL_POWER,
+       WCN36XX_BMPS
+};
+
+int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
+                                struct ieee80211_vif *vif);
+int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
+                               struct ieee80211_vif *vif);
+int wcn36xx_enable_keep_alive_null_packet(struct wcn36xx *wcn,
+                                         struct ieee80211_vif *vif);
+#endif /* _WCN36XX_PMC_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
new file mode 100644 (file)
index 0000000..f8c3a10
--- /dev/null
@@ -0,0 +1,2126 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/bitops.h>
+#include "smd.h"
+
+static int put_cfg_tlv_u32(struct wcn36xx *wcn, size_t *len, u32 id, u32 value)
+{
+       struct wcn36xx_hal_cfg *entry;
+       u32 *val;
+
+       if (*len + sizeof(*entry) + sizeof(u32) >= WCN36XX_HAL_BUF_SIZE) {
+               wcn36xx_err("Not enough room for TLV entry\n");
+               return -ENOMEM;
+       }
+
+       entry = (struct wcn36xx_hal_cfg *) (wcn->hal_buf + *len);
+       entry->id = id;
+       entry->len = sizeof(u32);
+       entry->pad_bytes = 0;
+       entry->reserve = 0;
+
+       val = (u32 *) (entry + 1);
+       *val = value;
+
+       *len += sizeof(*entry) + sizeof(u32);
+
+       return 0;
+}
+
+static void wcn36xx_smd_set_bss_nw_type(struct wcn36xx *wcn,
+               struct ieee80211_sta *sta,
+               struct wcn36xx_hal_config_bss_params *bss_params)
+{
+       if (IEEE80211_BAND_5GHZ == WCN36XX_BAND(wcn))
+               bss_params->nw_type = WCN36XX_HAL_11A_NW_TYPE;
+       else if (sta && sta->ht_cap.ht_supported)
+               bss_params->nw_type = WCN36XX_HAL_11N_NW_TYPE;
+       else if (sta && (sta->supp_rates[IEEE80211_BAND_2GHZ] & 0x7f))
+               bss_params->nw_type = WCN36XX_HAL_11G_NW_TYPE;
+       else
+               bss_params->nw_type = WCN36XX_HAL_11B_NW_TYPE;
+}
+
+static inline u8 is_cap_supported(unsigned long caps, unsigned long flag)
+{
+       return caps & flag ? 1 : 0;
+}
+static void wcn36xx_smd_set_bss_ht_params(struct ieee80211_vif *vif,
+               struct ieee80211_sta *sta,
+               struct wcn36xx_hal_config_bss_params *bss_params)
+{
+       if (sta && sta->ht_cap.ht_supported) {
+               unsigned long caps = sta->ht_cap.cap;
+               bss_params->ht = sta->ht_cap.ht_supported;
+               bss_params->tx_channel_width_set = is_cap_supported(caps,
+                       IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+               bss_params->lsig_tx_op_protection_full_support =
+                       is_cap_supported(caps,
+                                        IEEE80211_HT_CAP_LSIG_TXOP_PROT);
+
+               bss_params->ht_oper_mode = vif->bss_conf.ht_operation_mode;
+               bss_params->lln_non_gf_coexist =
+                       !!(vif->bss_conf.ht_operation_mode &
+                          IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+               /* IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT */
+               bss_params->dual_cts_protection = 0;
+               /* IEEE80211_HT_OP_MODE_PROTECTION_20MHZ */
+               bss_params->ht20_coexist = 0;
+       }
+}
+
+static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta,
+               struct wcn36xx_hal_config_sta_params *sta_params)
+{
+       if (sta->ht_cap.ht_supported) {
+               unsigned long caps = sta->ht_cap.cap;
+               sta_params->ht_capable = sta->ht_cap.ht_supported;
+               sta_params->tx_channel_width_set = is_cap_supported(caps,
+                       IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+               sta_params->lsig_txop_protection = is_cap_supported(caps,
+                       IEEE80211_HT_CAP_LSIG_TXOP_PROT);
+
+               sta_params->max_ampdu_size = sta->ht_cap.ampdu_factor;
+               sta_params->max_ampdu_density = sta->ht_cap.ampdu_density;
+               sta_params->max_amsdu_size = is_cap_supported(caps,
+                       IEEE80211_HT_CAP_MAX_AMSDU);
+               sta_params->sgi_20Mhz = is_cap_supported(caps,
+                       IEEE80211_HT_CAP_SGI_20);
+               sta_params->sgi_40mhz = is_cap_supported(caps,
+                       IEEE80211_HT_CAP_SGI_40);
+               sta_params->green_field_capable = is_cap_supported(caps,
+                       IEEE80211_HT_CAP_GRN_FLD);
+               sta_params->delayed_ba_support = is_cap_supported(caps,
+                       IEEE80211_HT_CAP_DELAY_BA);
+               sta_params->dsss_cck_mode_40mhz = is_cap_supported(caps,
+                       IEEE80211_HT_CAP_DSSSCCK40);
+       }
+}
+
+static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
+               struct ieee80211_vif *vif,
+               struct ieee80211_sta *sta,
+               struct wcn36xx_hal_config_sta_params *sta_params)
+{
+       struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+       struct wcn36xx_sta *priv_sta = NULL;
+       if (vif->type == NL80211_IFTYPE_ADHOC ||
+           vif->type == NL80211_IFTYPE_AP ||
+           vif->type == NL80211_IFTYPE_MESH_POINT) {
+               sta_params->type = 1;
+               sta_params->sta_index = 0xFF;
+       } else {
+               sta_params->type = 0;
+               sta_params->sta_index = 1;
+       }
+
+       sta_params->listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
+
+       /*
+        * In STA mode ieee80211_sta contains bssid and ieee80211_vif
+        * contains our mac address. In  AP mode we are bssid so vif
+        * contains bssid and ieee80211_sta contains mac.
+        */
+       if (NL80211_IFTYPE_STATION == vif->type)
+               memcpy(&sta_params->mac, vif->addr, ETH_ALEN);
+       else
+               memcpy(&sta_params->bssid, vif->addr, ETH_ALEN);
+
+       sta_params->encrypt_type = priv_vif->encrypt_type;
+       sta_params->short_preamble_supported =
+               !(WCN36XX_FLAGS(wcn) &
+                 IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE);
+
+       sta_params->rifs_mode = 0;
+       sta_params->rmf = 0;
+       sta_params->action = 0;
+       sta_params->uapsd = 0;
+       sta_params->mimo_ps = WCN36XX_HAL_HT_MIMO_PS_STATIC;
+       sta_params->max_ampdu_duration = 0;
+       sta_params->bssid_index = priv_vif->bss_index;
+       sta_params->p2p = 0;
+
+       if (sta) {
+               priv_sta = (struct wcn36xx_sta *)sta->drv_priv;
+               if (NL80211_IFTYPE_STATION == vif->type)
+                       memcpy(&sta_params->bssid, sta->addr, ETH_ALEN);
+               else
+                       memcpy(&sta_params->mac, sta->addr, ETH_ALEN);
+               sta_params->wmm_enabled = sta->wme;
+               sta_params->max_sp_len = sta->max_sp;
+               sta_params->aid = priv_sta->aid;
+               wcn36xx_smd_set_sta_ht_params(sta, sta_params);
+               memcpy(&sta_params->supported_rates, &priv_sta->supported_rates,
+                       sizeof(priv_sta->supported_rates));
+       } else {
+               wcn36xx_set_default_rates(&sta_params->supported_rates);
+       }
+}
+
+static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
+{
+       int ret = 0;
+       wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "HAL >>> ", wcn->hal_buf, len);
+
+       init_completion(&wcn->hal_rsp_compl);
+       ret = wcn->ctrl_ops->tx(wcn->hal_buf, len);
+       if (ret) {
+               wcn36xx_err("HAL TX failed\n");
+               goto out;
+       }
+       if (wait_for_completion_timeout(&wcn->hal_rsp_compl,
+               msecs_to_jiffies(HAL_MSG_TIMEOUT)) <= 0) {
+               wcn36xx_err("Timeout while waiting SMD response\n");
+               ret = -ETIME;
+               goto out;
+       }
+out:
+       return ret;
+}
+
+#define INIT_HAL_MSG(msg_body, type) \
+       do {                                                            \
+               memset(&msg_body, 0, sizeof(msg_body));                 \
+               msg_body.header.msg_type = type;                        \
+               msg_body.header.msg_version = WCN36XX_HAL_MSG_VERSION0; \
+               msg_body.header.len = sizeof(msg_body);                 \
+       } while (0)                                                     \
+
+#define PREPARE_HAL_BUF(send_buf, msg_body) \
+       do {                                                    \
+               memset(send_buf, 0, msg_body.header.len);       \
+               memcpy(send_buf, &msg_body, sizeof(msg_body));  \
+       } while (0)                                             \
+
+static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
+{
+       struct wcn36xx_fw_msg_status_rsp *rsp;
+
+       if (len < sizeof(struct wcn36xx_hal_msg_header) +
+           sizeof(struct wcn36xx_fw_msg_status_rsp))
+               return -EIO;
+
+       rsp = (struct wcn36xx_fw_msg_status_rsp *)
+               (buf + sizeof(struct wcn36xx_hal_msg_header));
+
+       if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status)
+               return rsp->status;
+
+       return 0;
+}
+
+int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
+{
+       const struct firmware *nv;
+       struct nv_data *nv_d;
+       struct wcn36xx_hal_nv_img_download_req_msg msg_body;
+       int fw_bytes_left;
+       int ret;
+       u16 fm_offset = 0;
+
+       ret = request_firmware(&nv, WLAN_NV_FILE, wcn->dev);
+       if (ret) {
+               wcn36xx_err("Failed to load nv file %s: %d\n",
+                             WLAN_NV_FILE, ret);
+               goto out_free_nv;
+       }
+
+       nv_d = (struct nv_data *)nv->data;
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_DOWNLOAD_NV_REQ);
+
+       msg_body.header.len += WCN36XX_NV_FRAGMENT_SIZE;
+
+       msg_body.frag_number = 0;
+       /* hal_buf must be protected with  mutex */
+       mutex_lock(&wcn->hal_mutex);
+
+       do {
+               fw_bytes_left = nv->size - fm_offset - 4;
+               if (fw_bytes_left > WCN36XX_NV_FRAGMENT_SIZE) {
+                       msg_body.last_fragment = 0;
+                       msg_body.nv_img_buffer_size = WCN36XX_NV_FRAGMENT_SIZE;
+               } else {
+                       msg_body.last_fragment = 1;
+                       msg_body.nv_img_buffer_size = fw_bytes_left;
+
+                       /* Do not forget update general message len */
+                       msg_body.header.len = sizeof(msg_body) + fw_bytes_left;
+
+               }
+
+               /* Add load NV request message header */
+               memcpy(wcn->hal_buf, &msg_body, sizeof(msg_body));
+
+               /* Add NV body itself */
+               memcpy(wcn->hal_buf + sizeof(msg_body),
+                      &nv_d->table + fm_offset,
+                      msg_body.nv_img_buffer_size);
+
+               ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+               if (ret)
+                       goto out_unlock;
+               ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf,
+                                                  wcn->hal_rsp_len);
+               if (ret) {
+                       wcn36xx_err("hal_load_nv response failed err=%d\n",
+                                   ret);
+                       goto out_unlock;
+               }
+               msg_body.frag_number++;
+               fm_offset += WCN36XX_NV_FRAGMENT_SIZE;
+
+       } while (msg_body.last_fragment != 1);
+
+out_unlock:
+       mutex_unlock(&wcn->hal_mutex);
+out_free_nv:
+       release_firmware(nv);
+
+       return ret;
+}
+
+static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
+{
+       struct wcn36xx_hal_mac_start_rsp_msg *rsp;
+
+       if (len < sizeof(*rsp))
+               return -EIO;
+
+       rsp = (struct wcn36xx_hal_mac_start_rsp_msg *)buf;
+
+       if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->start_rsp_params.status)
+               return -EIO;
+
+       memcpy(wcn->crm_version, rsp->start_rsp_params.crm_version,
+              WCN36XX_HAL_VERSION_LENGTH);
+       memcpy(wcn->wlan_version, rsp->start_rsp_params.wlan_version,
+              WCN36XX_HAL_VERSION_LENGTH);
+
+       /* null terminate the strings, just in case */
+       wcn->crm_version[WCN36XX_HAL_VERSION_LENGTH] = '\0';
+       wcn->wlan_version[WCN36XX_HAL_VERSION_LENGTH] = '\0';
+
+       wcn->fw_revision = rsp->start_rsp_params.version.revision;
+       wcn->fw_version = rsp->start_rsp_params.version.version;
+       wcn->fw_minor = rsp->start_rsp_params.version.minor;
+       wcn->fw_major = rsp->start_rsp_params.version.major;
+
+       wcn36xx_info("firmware WLAN version '%s' and CRM version '%s'\n",
+                    wcn->wlan_version, wcn->crm_version);
+
+       wcn36xx_info("firmware API %u.%u.%u.%u, %u stations, %u bssids\n",
+                    wcn->fw_major, wcn->fw_minor,
+                    wcn->fw_version, wcn->fw_revision,
+                    rsp->start_rsp_params.stations,
+                    rsp->start_rsp_params.bssids);
+
+       return 0;
+}
+
+int wcn36xx_smd_start(struct wcn36xx *wcn)
+{
+       struct wcn36xx_hal_mac_start_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_REQ);
+
+       msg_body.params.type = DRIVER_TYPE_PRODUCTION;
+       msg_body.params.len = 0;
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start type %d\n",
+                   msg_body.params.type);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_start failed\n");
+               goto out;
+       }
+
+       ret = wcn36xx_smd_start_rsp(wcn, wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_start response failed err=%d\n", ret);
+               goto out;
+       }
+
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_stop(struct wcn36xx *wcn)
+{
+       struct wcn36xx_hal_mac_stop_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_REQ);
+
+       msg_body.stop_req_params.reason = HAL_STOP_TYPE_RF_KILL;
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_stop failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_stop response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode)
+{
+       struct wcn36xx_hal_init_scan_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_INIT_SCAN_REQ);
+
+       msg_body.mode = mode;
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL, "hal init scan mode %d\n", msg_body.mode);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_init_scan failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_init_scan response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_start_scan(struct wcn36xx *wcn)
+{
+       struct wcn36xx_hal_start_scan_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ);
+
+       msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn);
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start scan channel %d\n",
+                   msg_body.scan_channel);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_start_scan failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_start_scan response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_end_scan(struct wcn36xx *wcn)
+{
+       struct wcn36xx_hal_end_scan_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ);
+
+       msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn);
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL, "hal end scan channel %d\n",
+                   msg_body.scan_channel);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_end_scan failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_end_scan response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
+                           enum wcn36xx_hal_sys_mode mode)
+{
+       struct wcn36xx_hal_finish_scan_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_FINISH_SCAN_REQ);
+
+       msg_body.mode = mode;
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL, "hal finish scan mode %d\n",
+                   msg_body.mode);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_finish_scan failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_finish_scan response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
+{
+       struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
+       int ret = 0;
+
+       ret = wcn36xx_smd_rsp_status_check(buf, len);
+       if (ret)
+               return ret;
+       rsp = (struct wcn36xx_hal_switch_channel_rsp_msg *)buf;
+       wcn36xx_dbg(WCN36XX_DBG_HAL, "channel switched to: %d, status: %d\n",
+                   rsp->channel_number, rsp->status);
+       return ret;
+}
+
+int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
+                              struct ieee80211_vif *vif, int ch)
+{
+       struct wcn36xx_hal_switch_channel_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_CH_SWITCH_REQ);
+
+       msg_body.channel_number = (u8)ch;
+       msg_body.tx_mgmt_power = 0xbf;
+       msg_body.max_tx_power = 0xbf;
+       memcpy(msg_body.self_sta_mac_addr, vif->addr, ETH_ALEN);
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_switch_channel failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_switch_channel_rsp(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_switch_channel response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+static int wcn36xx_smd_update_scan_params_rsp(void *buf, size_t len)
+{
+       struct wcn36xx_hal_update_scan_params_resp *rsp;
+
+       rsp = (struct wcn36xx_hal_update_scan_params_resp *)buf;
+
+       /* Remove the PNO version bit */
+       rsp->status &= (~(WCN36XX_FW_MSG_PNO_VERSION_MASK));
+
+       if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status) {
+               wcn36xx_warn("error response from update scan\n");
+               return rsp->status;
+       }
+
+       return 0;
+}
+
+int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn)
+{
+       struct wcn36xx_hal_update_scan_params_req msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ);
+
+       msg_body.dot11d_enabled = 0;
+       msg_body.dot11d_resolved = 0;
+       msg_body.channel_count = 26;
+       msg_body.active_min_ch_time = 60;
+       msg_body.active_max_ch_time = 120;
+       msg_body.passive_min_ch_time = 60;
+       msg_body.passive_max_ch_time = 110;
+       msg_body.state = 0;
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL,
+                   "hal update scan params channel_count %d\n",
+                   msg_body.channel_count);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_update_scan_params failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_update_scan_params_rsp(wcn->hal_buf,
+                                                wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_update_scan_params response failed err=%d\n",
+                           ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
+                                       struct ieee80211_vif *vif,
+                                       void *buf,
+                                       size_t len)
+{
+       struct wcn36xx_hal_add_sta_self_rsp_msg *rsp;
+       struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+
+       if (len < sizeof(*rsp))
+               return -EINVAL;
+
+       rsp = (struct wcn36xx_hal_add_sta_self_rsp_msg *)buf;
+
+       if (rsp->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+               wcn36xx_warn("hal add sta self failure: %d\n",
+                            rsp->status);
+               return rsp->status;
+       }
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL,
+                   "hal add sta self status %d self_sta_index %d dpu_index %d\n",
+                   rsp->status, rsp->self_sta_index, rsp->dpu_index);
+
+       priv_vif->self_sta_index = rsp->self_sta_index;
+       priv_vif->self_dpu_desc_index = rsp->dpu_index;
+
+       return 0;
+}
+
+int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+       struct wcn36xx_hal_add_sta_self_req msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_STA_SELF_REQ);
+
+       memcpy(&msg_body.self_addr, vif->addr, ETH_ALEN);
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL,
+                   "hal add sta self self_addr %pM status %d\n",
+                   msg_body.self_addr, msg_body.status);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_add_sta_self failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_add_sta_self_rsp(wcn,
+                                          vif,
+                                          wcn->hal_buf,
+                                          wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_add_sta_self response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr)
+{
+       struct wcn36xx_hal_del_sta_self_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_STA_SELF_REQ);
+
+       memcpy(&msg_body.self_addr, addr, ETH_ALEN);
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_delete_sta_self failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_delete_sta_self response failed err=%d\n",
+                           ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index)
+{
+       struct wcn36xx_hal_delete_sta_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_STA_REQ);
+
+       msg_body.sta_index = sta_index;
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL,
+                   "hal delete sta sta_index %d\n",
+                   msg_body.sta_index);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_delete_sta failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_delete_sta response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+static int wcn36xx_smd_join_rsp(void *buf, size_t len)
+{
+       struct wcn36xx_hal_join_rsp_msg *rsp;
+
+       if (wcn36xx_smd_rsp_status_check(buf, len))
+               return -EIO;
+
+       rsp = (struct wcn36xx_hal_join_rsp_msg *)buf;
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL,
+                   "hal rsp join status %d tx_mgmt_power %d\n",
+                   rsp->status, rsp->tx_mgmt_power);
+
+       return 0;
+}
+
+int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch)
+{
+       struct wcn36xx_hal_join_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_JOIN_REQ);
+
+       memcpy(&msg_body.bssid, bssid, ETH_ALEN);
+       memcpy(&msg_body.self_sta_mac_addr, vif, ETH_ALEN);
+       msg_body.channel = ch;
+
+       if (conf_is_ht40_minus(&wcn->hw->conf))
+               msg_body.secondary_channel_offset =
+                       PHY_DOUBLE_CHANNEL_HIGH_PRIMARY;
+       else if (conf_is_ht40_plus(&wcn->hw->conf))
+               msg_body.secondary_channel_offset =
+                       PHY_DOUBLE_CHANNEL_LOW_PRIMARY;
+       else
+               msg_body.secondary_channel_offset =
+                       PHY_SINGLE_CHANNEL_CENTERED;
+
+       msg_body.link_state = WCN36XX_HAL_LINK_PREASSOC_STATE;
+
+       msg_body.max_tx_power = 0xbf;
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL,
+                   "hal join req bssid %pM self_sta_mac_addr %pM channel %d link_state %d\n",
+                   msg_body.bssid, msg_body.self_sta_mac_addr,
+                   msg_body.channel, msg_body.link_state);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_join failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_join_rsp(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_join response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
+                           const u8 *sta_mac,
+                           enum wcn36xx_hal_link_state state)
+{
+       struct wcn36xx_hal_set_link_state_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_LINK_ST_REQ);
+
+       memcpy(&msg_body.bssid, bssid, ETH_ALEN);
+       memcpy(&msg_body.self_mac_addr, sta_mac, ETH_ALEN);
+       msg_body.state = state;
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL,
+                   "hal set link state bssid %pM self_mac_addr %pM state %d\n",
+                   msg_body.bssid, msg_body.self_mac_addr, msg_body.state);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_set_link_st failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_set_link_st response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+static void wcn36xx_smd_convert_sta_to_v1(struct wcn36xx *wcn,
+                       const struct wcn36xx_hal_config_sta_params *orig,
+                       struct wcn36xx_hal_config_sta_params_v1 *v1)
+{
+       /* convert orig to v1 format */
+       memcpy(&v1->bssid, orig->bssid, ETH_ALEN);
+       memcpy(&v1->mac, orig->mac, ETH_ALEN);
+       v1->aid = orig->aid;
+       v1->type = orig->type;
+       v1->listen_interval = orig->listen_interval;
+       v1->ht_capable = orig->ht_capable;
+
+       v1->max_ampdu_size = orig->max_ampdu_size;
+       v1->max_ampdu_density = orig->max_ampdu_density;
+       v1->sgi_40mhz = orig->sgi_40mhz;
+       v1->sgi_20Mhz = orig->sgi_20Mhz;
+
+       memcpy(&v1->supported_rates, &orig->supported_rates,
+              sizeof(orig->supported_rates));
+       v1->sta_index = orig->sta_index;
+}
+
+static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
+                                     struct ieee80211_sta *sta,
+                                     void *buf,
+                                     size_t len)
+{
+       struct wcn36xx_hal_config_sta_rsp_msg *rsp;
+       struct config_sta_rsp_params *params;
+       struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+
+       if (len < sizeof(*rsp))
+               return -EINVAL;
+
+       rsp = (struct wcn36xx_hal_config_sta_rsp_msg *)buf;
+       params = &rsp->params;
+
+       if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+               wcn36xx_warn("hal config sta response failure: %d\n",
+                            params->status);
+               return -EIO;
+       }
+
+       sta_priv->sta_index = params->sta_index;
+       sta_priv->dpu_desc_index = params->dpu_index;
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL,
+                   "hal config sta rsp status %d sta_index %d bssid_index %d p2p %d\n",
+                   params->status, params->sta_index, params->bssid_index,
+                   params->p2p);
+
+       return 0;
+}
+
+static int wcn36xx_smd_config_sta_v1(struct wcn36xx *wcn,
+                    const struct wcn36xx_hal_config_sta_req_msg *orig)
+{
+       struct wcn36xx_hal_config_sta_req_msg_v1 msg_body;
+       struct wcn36xx_hal_config_sta_params_v1 *sta = &msg_body.sta_params;
+
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_STA_REQ);
+
+       wcn36xx_smd_convert_sta_to_v1(wcn, &orig->sta_params,
+                                     &msg_body.sta_params);
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL,
+                   "hal config sta v1 action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
+                   sta->action, sta->sta_index, sta->bssid_index,
+                   sta->bssid, sta->type, sta->mac, sta->aid);
+
+       return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+}
+
+int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+                          struct ieee80211_sta *sta)
+{
+       struct wcn36xx_hal_config_sta_req_msg msg;
+       struct wcn36xx_hal_config_sta_params *sta_params;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_STA_REQ);
+
+       sta_params = &msg.sta_params;
+
+       wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
+
+       if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+               ret = wcn36xx_smd_config_sta_v1(wcn, &msg);
+       } else {
+               PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+               wcn36xx_dbg(WCN36XX_DBG_HAL,
+                           "hal config sta action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
+                           sta_params->action, sta_params->sta_index,
+                           sta_params->bssid_index, sta_params->bssid,
+                           sta_params->type, sta_params->mac, sta_params->aid);
+
+               ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+       }
+       if (ret) {
+               wcn36xx_err("Sending hal_config_sta failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_config_sta_rsp(wcn,
+                                        sta,
+                                        wcn->hal_buf,
+                                        wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_config_sta response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn,
+                       const struct wcn36xx_hal_config_bss_req_msg *orig)
+{
+       struct wcn36xx_hal_config_bss_req_msg_v1 msg_body;
+       struct wcn36xx_hal_config_bss_params_v1 *bss = &msg_body.bss_params;
+       struct wcn36xx_hal_config_sta_params_v1 *sta = &bss->sta;
+
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_BSS_REQ);
+
+       /* convert orig to v1 */
+       memcpy(&msg_body.bss_params.bssid,
+              &orig->bss_params.bssid, ETH_ALEN);
+       memcpy(&msg_body.bss_params.self_mac_addr,
+              &orig->bss_params.self_mac_addr, ETH_ALEN);
+
+       msg_body.bss_params.bss_type = orig->bss_params.bss_type;
+       msg_body.bss_params.oper_mode = orig->bss_params.oper_mode;
+       msg_body.bss_params.nw_type = orig->bss_params.nw_type;
+
+       msg_body.bss_params.short_slot_time_supported =
+               orig->bss_params.short_slot_time_supported;
+       msg_body.bss_params.lla_coexist = orig->bss_params.lla_coexist;
+       msg_body.bss_params.llb_coexist = orig->bss_params.llb_coexist;
+       msg_body.bss_params.llg_coexist = orig->bss_params.llg_coexist;
+       msg_body.bss_params.ht20_coexist = orig->bss_params.ht20_coexist;
+       msg_body.bss_params.lln_non_gf_coexist =
+               orig->bss_params.lln_non_gf_coexist;
+
+       msg_body.bss_params.lsig_tx_op_protection_full_support =
+               orig->bss_params.lsig_tx_op_protection_full_support;
+       msg_body.bss_params.rifs_mode = orig->bss_params.rifs_mode;
+       msg_body.bss_params.beacon_interval = orig->bss_params.beacon_interval;
+       msg_body.bss_params.dtim_period = orig->bss_params.dtim_period;
+       msg_body.bss_params.tx_channel_width_set =
+               orig->bss_params.tx_channel_width_set;
+       msg_body.bss_params.oper_channel = orig->bss_params.oper_channel;
+       msg_body.bss_params.ext_channel = orig->bss_params.ext_channel;
+
+       msg_body.bss_params.reserved = orig->bss_params.reserved;
+
+       memcpy(&msg_body.bss_params.ssid,
+              &orig->bss_params.ssid,
+              sizeof(orig->bss_params.ssid));
+
+       msg_body.bss_params.action = orig->bss_params.action;
+       msg_body.bss_params.rateset = orig->bss_params.rateset;
+       msg_body.bss_params.ht = orig->bss_params.ht;
+       msg_body.bss_params.obss_prot_enabled =
+               orig->bss_params.obss_prot_enabled;
+       msg_body.bss_params.rmf = orig->bss_params.rmf;
+       msg_body.bss_params.ht_oper_mode = orig->bss_params.ht_oper_mode;
+       msg_body.bss_params.dual_cts_protection =
+               orig->bss_params.dual_cts_protection;
+
+       msg_body.bss_params.max_probe_resp_retry_limit =
+               orig->bss_params.max_probe_resp_retry_limit;
+       msg_body.bss_params.hidden_ssid = orig->bss_params.hidden_ssid;
+       msg_body.bss_params.proxy_probe_resp =
+               orig->bss_params.proxy_probe_resp;
+       msg_body.bss_params.edca_params_valid =
+               orig->bss_params.edca_params_valid;
+
+       memcpy(&msg_body.bss_params.acbe,
+              &orig->bss_params.acbe,
+              sizeof(orig->bss_params.acbe));
+       memcpy(&msg_body.bss_params.acbk,
+              &orig->bss_params.acbk,
+              sizeof(orig->bss_params.acbk));
+       memcpy(&msg_body.bss_params.acvi,
+              &orig->bss_params.acvi,
+              sizeof(orig->bss_params.acvi));
+       memcpy(&msg_body.bss_params.acvo,
+              &orig->bss_params.acvo,
+              sizeof(orig->bss_params.acvo));
+
+       msg_body.bss_params.ext_set_sta_key_param_valid =
+               orig->bss_params.ext_set_sta_key_param_valid;
+
+       memcpy(&msg_body.bss_params.ext_set_sta_key_param,
+              &orig->bss_params.ext_set_sta_key_param,
+              sizeof(orig->bss_params.acvo));
+
+       msg_body.bss_params.wcn36xx_hal_persona =
+               orig->bss_params.wcn36xx_hal_persona;
+       msg_body.bss_params.spectrum_mgt_enable =
+               orig->bss_params.spectrum_mgt_enable;
+       msg_body.bss_params.tx_mgmt_power = orig->bss_params.tx_mgmt_power;
+       msg_body.bss_params.max_tx_power = orig->bss_params.max_tx_power;
+
+       wcn36xx_smd_convert_sta_to_v1(wcn, &orig->bss_params.sta,
+                                     &msg_body.bss_params.sta);
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL,
+                   "hal config bss v1 bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
+                   bss->bssid, bss->self_mac_addr, bss->bss_type,
+                   bss->oper_mode, bss->nw_type);
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL,
+                   "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
+                   sta->bssid, sta->action, sta->sta_index,
+                   sta->bssid_index, sta->aid, sta->type, sta->mac);
+
+       return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+}
+
+
+static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
+                                     struct ieee80211_vif *vif,
+                                     void *buf,
+                                     size_t len)
+{
+       struct wcn36xx_hal_config_bss_rsp_msg *rsp;
+       struct wcn36xx_hal_config_bss_rsp_params *params;
+       struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+
+       if (len < sizeof(*rsp))
+               return -EINVAL;
+
+       rsp = (struct wcn36xx_hal_config_bss_rsp_msg *)buf;
+       params = &rsp->bss_rsp_params;
+
+       if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+               wcn36xx_warn("hal config bss response failure: %d\n",
+                            params->status);
+               return -EIO;
+       }
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL,
+                   "hal config bss rsp status %d bss_idx %d dpu_desc_index %d"
+                   " sta_idx %d self_idx %d bcast_idx %d mac %pM"
+                   " power %d ucast_dpu_signature %d\n",
+                   params->status, params->bss_index, params->dpu_desc_index,
+                   params->bss_sta_index, params->bss_self_sta_index,
+                   params->bss_bcast_sta_idx, params->mac,
+                   params->tx_mgmt_power, params->ucast_dpu_signature);
+
+       priv_vif->bss_index = params->bss_index;
+
+       if (priv_vif->sta) {
+               priv_vif->sta->bss_sta_index =  params->bss_sta_index;
+               priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index;
+       }
+
+       priv_vif->ucast_dpu_signature = params->ucast_dpu_signature;
+
+       return 0;
+}
+
+int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+                          struct ieee80211_sta *sta, const u8 *bssid,
+                          bool update)
+{
+       struct wcn36xx_hal_config_bss_req_msg msg;
+       struct wcn36xx_hal_config_bss_params *bss;
+       struct wcn36xx_hal_config_sta_params *sta_params;
+       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_BSS_REQ);
+
+       bss = &msg.bss_params;
+       sta_params = &bss->sta;
+
+       WARN_ON(is_zero_ether_addr(bssid));
+
+       memcpy(&bss->bssid, bssid, ETH_ALEN);
+
+       memcpy(bss->self_mac_addr, vif->addr, ETH_ALEN);
+
+       if (vif->type == NL80211_IFTYPE_STATION) {
+               bss->bss_type = WCN36XX_HAL_INFRASTRUCTURE_MODE;
+
+               /* STA */
+               bss->oper_mode = 1;
+               bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_MODE;
+       } else if (vif->type == NL80211_IFTYPE_AP) {
+               bss->bss_type = WCN36XX_HAL_INFRA_AP_MODE;
+
+               /* AP */
+               bss->oper_mode = 0;
+               bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_SAP_MODE;
+       } else if (vif->type == NL80211_IFTYPE_ADHOC ||
+                  vif->type == NL80211_IFTYPE_MESH_POINT) {
+               bss->bss_type = WCN36XX_HAL_IBSS_MODE;
+
+               /* STA */
+               bss->oper_mode = 1;
+       } else {
+               wcn36xx_warn("Unknown type for bss config: %d\n", vif->type);
+       }
+
+       if (vif->type == NL80211_IFTYPE_STATION)
+               wcn36xx_smd_set_bss_nw_type(wcn, sta, bss);
+       else
+               bss->nw_type = WCN36XX_HAL_11N_NW_TYPE;
+
+       bss->short_slot_time_supported = vif->bss_conf.use_short_slot;
+       bss->lla_coexist = 0;
+       bss->llb_coexist = 0;
+       bss->llg_coexist = 0;
+       bss->rifs_mode = 0;
+       bss->beacon_interval = vif->bss_conf.beacon_int;
+       bss->dtim_period = vif_priv->dtim_period;
+
+       wcn36xx_smd_set_bss_ht_params(vif, sta, bss);
+
+       bss->oper_channel = WCN36XX_HW_CHANNEL(wcn);
+
+       if (conf_is_ht40_minus(&wcn->hw->conf))
+               bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+       else if (conf_is_ht40_plus(&wcn->hw->conf))
+               bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+       else
+               bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+
+       bss->reserved = 0;
+       wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
+
+       /* wcn->ssid is only valid in AP and IBSS mode */
+       bss->ssid.length = vif_priv->ssid.length;
+       memcpy(bss->ssid.ssid, vif_priv->ssid.ssid, vif_priv->ssid.length);
+
+       bss->obss_prot_enabled = 0;
+       bss->rmf = 0;
+       bss->max_probe_resp_retry_limit = 0;
+       bss->hidden_ssid = vif->bss_conf.hidden_ssid;
+       bss->proxy_probe_resp = 0;
+       bss->edca_params_valid = 0;
+
+       /* FIXME: set acbe, acbk, acvi and acvo */
+
+       bss->ext_set_sta_key_param_valid = 0;
+
+       /* FIXME: set ext_set_sta_key_param */
+
+       bss->spectrum_mgt_enable = 0;
+       bss->tx_mgmt_power = 0;
+       bss->max_tx_power = WCN36XX_MAX_POWER(wcn);
+
+       bss->action = update;
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL,
+                   "hal config bss bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
+                   bss->bssid, bss->self_mac_addr, bss->bss_type,
+                   bss->oper_mode, bss->nw_type);
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL,
+                   "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
+                   sta_params->bssid, sta_params->action,
+                   sta_params->sta_index, sta_params->bssid_index,
+                   sta_params->aid, sta_params->type,
+                   sta_params->mac);
+
+       if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+               ret = wcn36xx_smd_config_bss_v1(wcn, &msg);
+       } else {
+               PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+               ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+       }
+       if (ret) {
+               wcn36xx_err("Sending hal_config_bss failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_config_bss_rsp(wcn,
+                                        vif,
+                                        wcn->hal_buf,
+                                        wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_config_bss response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+       struct wcn36xx_hal_delete_bss_req_msg msg_body;
+       struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_BSS_REQ);
+
+       msg_body.bss_index = priv_vif->bss_index;
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL, "hal delete bss %d\n", msg_body.bss_index);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_delete_bss failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_delete_bss response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+                           struct sk_buff *skb_beacon, u16 tim_off,
+                           u16 p2p_off)
+{
+       struct wcn36xx_hal_send_beacon_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ);
+
+       /* TODO need to find out why this is needed? */
+       msg_body.beacon_length = skb_beacon->len + 6;
+
+       if (BEACON_TEMPLATE_SIZE > msg_body.beacon_length) {
+               memcpy(&msg_body.beacon, &skb_beacon->len, sizeof(u32));
+               memcpy(&(msg_body.beacon[4]), skb_beacon->data,
+                      skb_beacon->len);
+       } else {
+               wcn36xx_err("Beacon is to big: beacon size=%d\n",
+                             msg_body.beacon_length);
+               return -ENOMEM;
+       }
+       memcpy(msg_body.bssid, vif->addr, ETH_ALEN);
+
+       /* TODO need to find out why this is needed? */
+       msg_body.tim_ie_offset = tim_off+4;
+       msg_body.p2p_ie_offset = p2p_off;
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL,
+                   "hal send beacon beacon_length %d\n",
+                   msg_body.beacon_length);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_send_beacon failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_send_beacon response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
+                                     struct ieee80211_vif *vif,
+                                     struct sk_buff *skb)
+{
+       struct wcn36xx_hal_send_probe_resp_req_msg msg;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg, WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ);
+
+       if (skb->len > BEACON_TEMPLATE_SIZE) {
+               wcn36xx_warn("probe response template is too big: %d\n",
+                            skb->len);
+               return -E2BIG;
+       }
+
+       msg.probe_resp_template_len = skb->len;
+       memcpy(&msg.probe_resp_template, skb->data, skb->len);
+
+       memcpy(msg.bssid, vif->addr, ETH_ALEN);
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg);
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL,
+                   "hal update probe rsp len %d bssid %pM\n",
+                   msg.probe_resp_template_len, msg.bssid);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_update_proberesp_tmpl failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_update_proberesp_tmpl response failed err=%d\n",
+                           ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
+                          enum ani_ed_type enc_type,
+                          u8 keyidx,
+                          u8 keylen,
+                          u8 *key,
+                          u8 sta_index)
+{
+       struct wcn36xx_hal_set_sta_key_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_STAKEY_REQ);
+
+       msg_body.set_sta_key_params.sta_index = sta_index;
+       msg_body.set_sta_key_params.enc_type = enc_type;
+
+       msg_body.set_sta_key_params.key[0].id = keyidx;
+       msg_body.set_sta_key_params.key[0].unicast = 1;
+       msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX;
+       msg_body.set_sta_key_params.key[0].pae_role = 0;
+       msg_body.set_sta_key_params.key[0].length = keylen;
+       memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen);
+       msg_body.set_sta_key_params.single_tid_rc = 1;
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_set_stakey failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_set_stakey response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
+                          enum ani_ed_type enc_type,
+                          u8 keyidx,
+                          u8 keylen,
+                          u8 *key)
+{
+       struct wcn36xx_hal_set_bss_key_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ);
+       msg_body.bss_idx = 0;
+       msg_body.enc_type = enc_type;
+       msg_body.num_keys = 1;
+       msg_body.keys[0].id = keyidx;
+       msg_body.keys[0].unicast = 0;
+       msg_body.keys[0].direction = WCN36XX_HAL_RX_ONLY;
+       msg_body.keys[0].pae_role = 0;
+       msg_body.keys[0].length = keylen;
+       memcpy(msg_body.keys[0].key, key, keylen);
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_set_bsskey failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_set_bsskey response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
+                             enum ani_ed_type enc_type,
+                             u8 keyidx,
+                             u8 sta_index)
+{
+       struct wcn36xx_hal_remove_sta_key_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_STAKEY_REQ);
+
+       msg_body.sta_idx = sta_index;
+       msg_body.enc_type = enc_type;
+       msg_body.key_id = keyidx;
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_remove_stakey failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_remove_stakey response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
+                             enum ani_ed_type enc_type,
+                             u8 keyidx)
+{
+       struct wcn36xx_hal_remove_bss_key_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ);
+       msg_body.bss_idx = 0;
+       msg_body.enc_type = enc_type;
+       msg_body.key_id = keyidx;
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_remove_bsskey failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_remove_bsskey response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+       struct wcn36xx_hal_enter_bmps_req_msg msg_body;
+       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_BMPS_REQ);
+
+       msg_body.bss_index = vif_priv->bss_index;
+       msg_body.tbtt = vif->bss_conf.sync_tsf;
+       msg_body.dtim_period = vif_priv->dtim_period;
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_enter_bmps failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_enter_bmps response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
+{
+       struct wcn36xx_hal_enter_bmps_req_msg msg_body;
+       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_BMPS_REQ);
+
+       msg_body.bss_index = vif_priv->bss_index;
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_exit_bmps failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim)
+{
+       struct wcn36xx_hal_set_power_params_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_POWER_PARAMS_REQ);
+
+       /*
+        * When host is down ignore every second dtim
+        */
+       if (ignore_dtim) {
+               msg_body.ignore_dtim = 1;
+               msg_body.dtim_period = 2;
+       }
+       msg_body.listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_set_power_params failed\n");
+               goto out;
+       }
+
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+/* Notice: This function should be called after associated, or else it
+ * will be invalid
+ */
+int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
+                              struct ieee80211_vif *vif,
+                              int packet_type)
+{
+       struct wcn36xx_hal_keep_alive_req_msg msg_body;
+       struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_KEEP_ALIVE_REQ);
+
+       if (packet_type == WCN36XX_HAL_KEEP_ALIVE_NULL_PKT) {
+               msg_body.bss_index = vif_priv->bss_index;
+               msg_body.packet_type = WCN36XX_HAL_KEEP_ALIVE_NULL_PKT;
+               msg_body.time_period = WCN36XX_KEEP_ALIVE_TIME_PERIOD;
+       } else if (packet_type == WCN36XX_HAL_KEEP_ALIVE_UNSOLICIT_ARP_RSP) {
+               /* TODO: it also support ARP response type */
+       } else {
+               wcn36xx_warn("unknow keep alive packet type %d\n", packet_type);
+               return -EINVAL;
+       }
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_exit_bmps failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
+                            u32 arg3, u32 arg4, u32 arg5)
+{
+       struct wcn36xx_hal_dump_cmd_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_DUMP_COMMAND_REQ);
+
+       msg_body.arg1 = arg1;
+       msg_body.arg2 = arg2;
+       msg_body.arg3 = arg3;
+       msg_body.arg4 = arg4;
+       msg_body.arg5 = arg5;
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_dump_cmd failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_dump_cmd response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+static inline void set_feat_caps(u32 *bitmap,
+                                enum place_holder_in_cap_bitmap cap)
+{
+       int arr_idx, bit_idx;
+
+       if (cap < 0 || cap > 127) {
+               wcn36xx_warn("error cap idx %d\n", cap);
+               return;
+       }
+
+       arr_idx = cap / 32;
+       bit_idx = cap % 32;
+       bitmap[arr_idx] |= (1 << bit_idx);
+}
+
+static inline int get_feat_caps(u32 *bitmap,
+                               enum place_holder_in_cap_bitmap cap)
+{
+       int arr_idx, bit_idx;
+       int ret = 0;
+
+       if (cap < 0 || cap > 127) {
+               wcn36xx_warn("error cap idx %d\n", cap);
+               return -EINVAL;
+       }
+
+       arr_idx = cap / 32;
+       bit_idx = cap % 32;
+       ret = (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
+       return ret;
+}
+
+static inline void clear_feat_caps(u32 *bitmap,
+                               enum place_holder_in_cap_bitmap cap)
+{
+       int arr_idx, bit_idx;
+
+       if (cap < 0 || cap > 127) {
+               wcn36xx_warn("error cap idx %d\n", cap);
+               return;
+       }
+
+       arr_idx = cap / 32;
+       bit_idx = cap % 32;
+       bitmap[arr_idx] &= ~(1 << bit_idx);
+}
+
+int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
+{
+       struct wcn36xx_hal_feat_caps_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
+
+       set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_feature_caps_exchange failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_feature_caps_exchange response failed err=%d\n",
+                           ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
+               struct ieee80211_sta *sta,
+               u16 tid,
+               u16 *ssn,
+               u8 direction,
+               u8 sta_index)
+{
+       struct wcn36xx_hal_add_ba_session_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_SESSION_REQ);
+
+       msg_body.sta_index = sta_index;
+       memcpy(&msg_body.mac_addr, sta->addr, ETH_ALEN);
+       msg_body.dialog_token = 0x10;
+       msg_body.tid = tid;
+
+       /* Immediate BA because Delayed BA is not supported */
+       msg_body.policy = 1;
+       msg_body.buffer_size = WCN36XX_AGGR_BUFFER_SIZE;
+       msg_body.timeout = 0;
+       if (ssn)
+               msg_body.ssn = *ssn;
+       msg_body.direction = direction;
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_add_ba_session failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_add_ba_session response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_add_ba(struct wcn36xx *wcn)
+{
+       struct wcn36xx_hal_add_ba_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_REQ);
+
+       msg_body.session_id = 0;
+       msg_body.win_size = WCN36XX_AGGR_BUFFER_SIZE;
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_add_ba failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_add_ba response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index)
+{
+       struct wcn36xx_hal_del_ba_req_msg msg_body;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_BA_REQ);
+
+       msg_body.sta_index = sta_index;
+       msg_body.tid = tid;
+       msg_body.direction = 0;
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_del_ba failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_del_ba response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
+{
+       struct wcn36xx_hal_trigger_ba_req_msg msg_body;
+       struct wcn36xx_hal_trigget_ba_req_candidate *candidate;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ);
+
+       msg_body.session_id = 0;
+       msg_body.candidate_cnt = 1;
+       msg_body.header.len += sizeof(*candidate);
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       candidate = (struct wcn36xx_hal_trigget_ba_req_candidate *)
+               (wcn->hal_buf + sizeof(msg_body));
+       candidate->sta_index = sta_index;
+       candidate->tid_bitmap = 1;
+
+       ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_trigger_ba failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_trigger_ba response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+
+static int wcn36xx_smd_tx_compl_ind(struct wcn36xx *wcn, void *buf, size_t len)
+{
+       struct wcn36xx_hal_tx_compl_ind_msg *rsp = buf;
+
+       if (len != sizeof(*rsp)) {
+               wcn36xx_warn("Bad TX complete indication\n");
+               return -EIO;
+       }
+
+       wcn36xx_dxe_tx_ack_ind(wcn, rsp->status);
+
+       return 0;
+}
+
+static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
+                                        void *buf,
+                                        size_t len)
+{
+       struct wcn36xx_hal_missed_beacon_ind_msg *rsp = buf;
+       struct ieee80211_vif *vif = NULL;
+       struct wcn36xx_vif *tmp;
+
+       /* Old FW does not have bss index */
+       if (wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+               list_for_each_entry(tmp, &wcn->vif_list, list) {
+                       wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
+                                   tmp->bss_index);
+                       vif = container_of((void *)tmp,
+                                                struct ieee80211_vif,
+                                                drv_priv);
+                       ieee80211_connection_loss(vif);
+               }
+               return 0;
+       }
+
+       if (len != sizeof(*rsp)) {
+               wcn36xx_warn("Corrupted missed beacon indication\n");
+               return -EIO;
+       }
+
+       list_for_each_entry(tmp, &wcn->vif_list, list) {
+               if (tmp->bss_index == rsp->bss_index) {
+                       wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
+                                   rsp->bss_index);
+                       vif = container_of((void *)tmp,
+                                                struct ieee80211_vif,
+                                                drv_priv);
+                       ieee80211_connection_loss(vif);
+                       return 0;
+               }
+       }
+
+       wcn36xx_warn("BSS index %d not found\n", rsp->bss_index);
+       return -ENOENT;
+}
+
+static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn,
+                                             void *buf,
+                                             size_t len)
+{
+       struct wcn36xx_hal_delete_sta_context_ind_msg *rsp = buf;
+       struct wcn36xx_vif *tmp;
+       struct ieee80211_sta *sta = NULL;
+
+       if (len != sizeof(*rsp)) {
+               wcn36xx_warn("Corrupted delete sta indication\n");
+               return -EIO;
+       }
+
+       list_for_each_entry(tmp, &wcn->vif_list, list) {
+               if (sta && (tmp->sta->sta_index == rsp->sta_id)) {
+                       sta = container_of((void *)tmp->sta,
+                                                struct ieee80211_sta,
+                                                drv_priv);
+                       wcn36xx_dbg(WCN36XX_DBG_HAL,
+                                   "delete station indication %pM index %d\n",
+                                   rsp->addr2,
+                                   rsp->sta_id);
+                       ieee80211_report_low_ack(sta, 0);
+                       return 0;
+               }
+       }
+
+       wcn36xx_warn("STA with addr %pM and index %d not found\n",
+                    rsp->addr2,
+                    rsp->sta_id);
+       return -ENOENT;
+}
+
+int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value)
+{
+       struct wcn36xx_hal_update_cfg_req_msg msg_body, *body;
+       size_t len;
+       int ret = 0;
+
+       mutex_lock(&wcn->hal_mutex);
+       INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_CFG_REQ);
+
+       PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+       body = (struct wcn36xx_hal_update_cfg_req_msg *) wcn->hal_buf;
+       len = msg_body.header.len;
+
+       put_cfg_tlv_u32(wcn, &len, cfg_id, value);
+       body->header.len = len;
+       body->len = len - sizeof(*body);
+
+       ret = wcn36xx_smd_send_and_wait(wcn, body->header.len);
+       if (ret) {
+               wcn36xx_err("Sending hal_update_cfg failed\n");
+               goto out;
+       }
+       ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+       if (ret) {
+               wcn36xx_err("hal_update_cfg response failed err=%d\n", ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&wcn->hal_mutex);
+       return ret;
+}
+static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
+{
+       struct wcn36xx_hal_msg_header *msg_header = buf;
+       struct wcn36xx_hal_ind_msg *msg_ind;
+       wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "SMD <<< ", buf, len);
+
+       switch (msg_header->msg_type) {
+       case WCN36XX_HAL_START_RSP:
+       case WCN36XX_HAL_CONFIG_STA_RSP:
+       case WCN36XX_HAL_CONFIG_BSS_RSP:
+       case WCN36XX_HAL_ADD_STA_SELF_RSP:
+       case WCN36XX_HAL_STOP_RSP:
+       case WCN36XX_HAL_DEL_STA_SELF_RSP:
+       case WCN36XX_HAL_DELETE_STA_RSP:
+       case WCN36XX_HAL_INIT_SCAN_RSP:
+       case WCN36XX_HAL_START_SCAN_RSP:
+       case WCN36XX_HAL_END_SCAN_RSP:
+       case WCN36XX_HAL_FINISH_SCAN_RSP:
+       case WCN36XX_HAL_DOWNLOAD_NV_RSP:
+       case WCN36XX_HAL_DELETE_BSS_RSP:
+       case WCN36XX_HAL_SEND_BEACON_RSP:
+       case WCN36XX_HAL_SET_LINK_ST_RSP:
+       case WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_RSP:
+       case WCN36XX_HAL_SET_BSSKEY_RSP:
+       case WCN36XX_HAL_SET_STAKEY_RSP:
+       case WCN36XX_HAL_RMV_STAKEY_RSP:
+       case WCN36XX_HAL_RMV_BSSKEY_RSP:
+       case WCN36XX_HAL_ENTER_BMPS_RSP:
+       case WCN36XX_HAL_SET_POWER_PARAMS_RSP:
+       case WCN36XX_HAL_EXIT_BMPS_RSP:
+       case WCN36XX_HAL_KEEP_ALIVE_RSP:
+       case WCN36XX_HAL_DUMP_COMMAND_RSP:
+       case WCN36XX_HAL_ADD_BA_SESSION_RSP:
+       case WCN36XX_HAL_ADD_BA_RSP:
+       case WCN36XX_HAL_DEL_BA_RSP:
+       case WCN36XX_HAL_TRIGGER_BA_RSP:
+       case WCN36XX_HAL_UPDATE_CFG_RSP:
+       case WCN36XX_HAL_JOIN_RSP:
+       case WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP:
+       case WCN36XX_HAL_CH_SWITCH_RSP:
+       case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP:
+               memcpy(wcn->hal_buf, buf, len);
+               wcn->hal_rsp_len = len;
+               complete(&wcn->hal_rsp_compl);
+               break;
+
+       case WCN36XX_HAL_OTA_TX_COMPL_IND:
+       case WCN36XX_HAL_MISSED_BEACON_IND:
+       case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
+               mutex_lock(&wcn->hal_ind_mutex);
+               msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL);
+               msg_ind->msg_len = len;
+               msg_ind->msg = kmalloc(len, GFP_KERNEL);
+               memcpy(msg_ind->msg, buf, len);
+               list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
+               queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
+               wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
+               mutex_unlock(&wcn->hal_ind_mutex);
+               break;
+       default:
+               wcn36xx_err("SMD_EVENT (%d) not supported\n",
+                             msg_header->msg_type);
+       }
+}
+static void wcn36xx_ind_smd_work(struct work_struct *work)
+{
+       struct wcn36xx *wcn =
+               container_of(work, struct wcn36xx, hal_ind_work);
+       struct wcn36xx_hal_msg_header *msg_header;
+       struct wcn36xx_hal_ind_msg *hal_ind_msg;
+
+       mutex_lock(&wcn->hal_ind_mutex);
+
+       hal_ind_msg = list_first_entry(&wcn->hal_ind_queue,
+                                      struct wcn36xx_hal_ind_msg,
+                                      list);
+
+       msg_header = (struct wcn36xx_hal_msg_header *)hal_ind_msg->msg;
+
+       switch (msg_header->msg_type) {
+       case WCN36XX_HAL_OTA_TX_COMPL_IND:
+               wcn36xx_smd_tx_compl_ind(wcn,
+                                        hal_ind_msg->msg,
+                                        hal_ind_msg->msg_len);
+               break;
+       case WCN36XX_HAL_MISSED_BEACON_IND:
+               wcn36xx_smd_missed_beacon_ind(wcn,
+                                             hal_ind_msg->msg,
+                                             hal_ind_msg->msg_len);
+               break;
+       case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
+               wcn36xx_smd_delete_sta_context_ind(wcn,
+                                                  hal_ind_msg->msg,
+                                                  hal_ind_msg->msg_len);
+               break;
+       default:
+               wcn36xx_err("SMD_EVENT (%d) not supported\n",
+                             msg_header->msg_type);
+       }
+       list_del(wcn->hal_ind_queue.next);
+       kfree(hal_ind_msg->msg);
+       kfree(hal_ind_msg);
+       mutex_unlock(&wcn->hal_ind_mutex);
+}
+int wcn36xx_smd_open(struct wcn36xx *wcn)
+{
+       int ret = 0;
+       wcn->hal_ind_wq = create_freezable_workqueue("wcn36xx_smd_ind");
+       if (!wcn->hal_ind_wq) {
+               wcn36xx_err("failed to allocate wq\n");
+               ret = -ENOMEM;
+               goto out;
+       }
+       INIT_WORK(&wcn->hal_ind_work, wcn36xx_ind_smd_work);
+       INIT_LIST_HEAD(&wcn->hal_ind_queue);
+       mutex_init(&wcn->hal_ind_mutex);
+
+       ret = wcn->ctrl_ops->open(wcn, wcn36xx_smd_rsp_process);
+       if (ret) {
+               wcn36xx_err("failed to open control channel\n");
+               goto free_wq;
+       }
+
+       return ret;
+
+free_wq:
+       destroy_workqueue(wcn->hal_ind_wq);
+out:
+       return ret;
+}
+
+void wcn36xx_smd_close(struct wcn36xx *wcn)
+{
+       wcn->ctrl_ops->close();
+       destroy_workqueue(wcn->hal_ind_wq);
+       mutex_destroy(&wcn->hal_ind_mutex);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
new file mode 100644 (file)
index 0000000..e7c3901
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SMD_H_
+#define _SMD_H_
+
+#include "wcn36xx.h"
+
+/* Max shared size is 4k but we take less.*/
+#define WCN36XX_NV_FRAGMENT_SIZE                       3072
+
+#define WCN36XX_HAL_BUF_SIZE                           4096
+
+#define HAL_MSG_TIMEOUT 200
+#define WCN36XX_SMSM_WLAN_TX_ENABLE                    0x00000400
+#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY               0x00000200
+/* The PNO version info be contained in the rsp msg */
+#define WCN36XX_FW_MSG_PNO_VERSION_MASK                        0x8000
+
+enum wcn36xx_fw_msg_result {
+       WCN36XX_FW_MSG_RESULT_SUCCESS                   = 0,
+       WCN36XX_FW_MSG_RESULT_SUCCESS_SYNC              = 1,
+
+       WCN36XX_FW_MSG_RESULT_MEM_FAIL                  = 5,
+};
+
+/******************************/
+/* SMD requests and responses */
+/******************************/
+struct wcn36xx_fw_msg_status_rsp {
+       u32     status;
+} __packed;
+
+struct wcn36xx_hal_ind_msg {
+       struct list_head list;
+       u8 *msg;
+       size_t msg_len;
+};
+
+struct wcn36xx;
+
+int wcn36xx_smd_open(struct wcn36xx *wcn);
+void wcn36xx_smd_close(struct wcn36xx *wcn);
+
+int wcn36xx_smd_load_nv(struct wcn36xx *wcn);
+int wcn36xx_smd_start(struct wcn36xx *wcn);
+int wcn36xx_smd_stop(struct wcn36xx *wcn);
+int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode);
+int wcn36xx_smd_start_scan(struct wcn36xx *wcn);
+int wcn36xx_smd_end_scan(struct wcn36xx *wcn);
+int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
+                           enum wcn36xx_hal_sys_mode mode);
+int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn);
+int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr);
+int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index);
+int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch);
+int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
+                           const u8 *sta_mac,
+                           enum wcn36xx_hal_link_state state);
+int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+                          struct ieee80211_sta *sta, const u8 *bssid,
+                          bool update);
+int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+                          struct ieee80211_sta *sta);
+int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+                           struct sk_buff *skb_beacon, u16 tim_off,
+                           u16 p2p_off);
+int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
+                              struct ieee80211_vif *vif, int ch);
+int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
+                                     struct ieee80211_vif *vif,
+                                     struct sk_buff *skb);
+int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
+                          enum ani_ed_type enc_type,
+                          u8 keyidx,
+                          u8 keylen,
+                          u8 *key,
+                          u8 sta_index);
+int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
+                          enum ani_ed_type enc_type,
+                          u8 keyidx,
+                          u8 keylen,
+                          u8 *key);
+int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
+                             enum ani_ed_type enc_type,
+                             u8 keyidx,
+                             u8 sta_index);
+int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
+                             enum ani_ed_type enc_type,
+                             u8 keyidx);
+int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif);
+int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim);
+int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
+                              struct ieee80211_vif *vif,
+                              int packet_type);
+int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
+                            u32 arg3, u32 arg4, u32 arg5);
+int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn);
+
+int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
+               struct ieee80211_sta *sta,
+               u16 tid,
+               u16 *ssn,
+               u8 direction,
+               u8 sta_index);
+int wcn36xx_smd_add_ba(struct wcn36xx *wcn);
+int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index);
+int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index);
+
+int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
+#endif /* _SMD_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
new file mode 100644 (file)
index 0000000..b2b60e3
--- /dev/null
@@ -0,0 +1,284 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "txrx.h"
+
+static inline int get_rssi0(struct wcn36xx_rx_bd *bd)
+{
+       return 100 - ((bd->phy_stat0 >> 24) & 0xff);
+}
+
+int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
+{
+       struct ieee80211_rx_status status;
+       struct ieee80211_hdr *hdr;
+       struct wcn36xx_rx_bd *bd;
+       u16 fc, sn;
+
+       /*
+        * All fields must be 0, otherwise it can lead to
+        * unexpected consequences.
+        */
+       memset(&status, 0, sizeof(status));
+
+       bd = (struct wcn36xx_rx_bd *)skb->data;
+       buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
+       wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP,
+                        "BD   <<< ", (char *)bd,
+                        sizeof(struct wcn36xx_rx_bd));
+
+       skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len);
+       skb_pull(skb, bd->pdu.mpdu_header_off);
+
+       status.mactime = 10;
+       status.freq = WCN36XX_CENTER_FREQ(wcn);
+       status.band = WCN36XX_BAND(wcn);
+       status.signal = -get_rssi0(bd);
+       status.antenna = 1;
+       status.rate_idx = 1;
+       status.flag = 0;
+       status.rx_flags = 0;
+       status.flag |= RX_FLAG_IV_STRIPPED |
+                      RX_FLAG_MMIC_STRIPPED |
+                      RX_FLAG_DECRYPTED;
+
+       wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x status->vendor_radiotap_len=%x\n",
+                   status.flag,  status.vendor_radiotap_len);
+
+       memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
+
+       hdr = (struct ieee80211_hdr *) skb->data;
+       fc = __le16_to_cpu(hdr->frame_control);
+       sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
+
+       if (ieee80211_is_beacon(hdr->frame_control)) {
+               wcn36xx_dbg(WCN36XX_DBG_BEACON, "beacon skb %p len %d fc %04x sn %d\n",
+                           skb, skb->len, fc, sn);
+               wcn36xx_dbg_dump(WCN36XX_DBG_BEACON_DUMP, "SKB <<< ",
+                                (char *)skb->data, skb->len);
+       } else {
+               wcn36xx_dbg(WCN36XX_DBG_RX, "rx skb %p len %d fc %04x sn %d\n",
+                           skb, skb->len, fc, sn);
+               wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP, "SKB <<< ",
+                                (char *)skb->data, skb->len);
+       }
+
+       ieee80211_rx_irqsafe(wcn->hw, skb);
+
+       return 0;
+}
+
+static void wcn36xx_set_tx_pdu(struct wcn36xx_tx_bd *bd,
+                              u32 mpdu_header_len,
+                              u32 len,
+                              u16 tid)
+{
+       bd->pdu.mpdu_header_len = mpdu_header_len;
+       bd->pdu.mpdu_header_off = sizeof(*bd);
+       bd->pdu.mpdu_data_off = bd->pdu.mpdu_header_len +
+               bd->pdu.mpdu_header_off;
+       bd->pdu.mpdu_len = len;
+       bd->pdu.tid = tid;
+}
+
+static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn,
+                                                 u8 *addr)
+{
+       struct wcn36xx_vif *vif_priv = NULL;
+       struct ieee80211_vif *vif = NULL;
+       list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+                       vif = container_of((void *)vif_priv,
+                                  struct ieee80211_vif,
+                                  drv_priv);
+                       if (memcmp(vif->addr, addr, ETH_ALEN) == 0)
+                               return vif_priv;
+       }
+       wcn36xx_warn("vif %pM not found\n", addr);
+       return NULL;
+}
+static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
+                               struct wcn36xx *wcn,
+                               struct wcn36xx_vif **vif_priv,
+                               struct wcn36xx_sta *sta_priv,
+                               struct ieee80211_hdr *hdr,
+                               bool bcast)
+{
+       struct ieee80211_vif *vif = NULL;
+       struct wcn36xx_vif *__vif_priv = NULL;
+       bd->bd_rate = WCN36XX_BD_RATE_DATA;
+
+       /*
+        * For not unicast frames mac80211 will not set sta pointer so use
+        * self_sta_index instead.
+        */
+       if (sta_priv) {
+               __vif_priv = sta_priv->vif;
+               vif = container_of((void *)__vif_priv,
+                                  struct ieee80211_vif,
+                                  drv_priv);
+
+               if (vif->type == NL80211_IFTYPE_STATION) {
+                       bd->sta_index = sta_priv->bss_sta_index;
+                       bd->dpu_desc_idx = sta_priv->bss_dpu_desc_index;
+               } else if (vif->type == NL80211_IFTYPE_AP ||
+                          vif->type == NL80211_IFTYPE_ADHOC ||
+                          vif->type == NL80211_IFTYPE_MESH_POINT) {
+                       bd->sta_index = sta_priv->sta_index;
+                       bd->dpu_desc_idx = sta_priv->dpu_desc_index;
+               }
+       } else {
+               __vif_priv = get_vif_by_addr(wcn, hdr->addr2);
+               bd->sta_index = __vif_priv->self_sta_index;
+               bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
+       }
+
+       bd->dpu_sign = __vif_priv->ucast_dpu_signature;
+
+       if (ieee80211_is_nullfunc(hdr->frame_control) ||
+          (sta_priv && !sta_priv->is_data_encrypted))
+               bd->dpu_ne = 1;
+
+       if (bcast) {
+               bd->ub = 1;
+               bd->ack_policy = 1;
+       }
+       *vif_priv = __vif_priv;
+}
+
+static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd,
+                               struct wcn36xx *wcn,
+                               struct wcn36xx_vif **vif_priv,
+                               struct ieee80211_hdr *hdr,
+                               bool bcast)
+{
+       struct wcn36xx_vif *__vif_priv =
+               get_vif_by_addr(wcn, hdr->addr2);
+       bd->sta_index = __vif_priv->self_sta_index;
+       bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
+       bd->dpu_ne = 1;
+
+       /* default rate for unicast */
+       if (ieee80211_is_mgmt(hdr->frame_control))
+               bd->bd_rate = (WCN36XX_BAND(wcn) == IEEE80211_BAND_5GHZ) ?
+                       WCN36XX_BD_RATE_CTRL :
+                       WCN36XX_BD_RATE_MGMT;
+       else if (ieee80211_is_ctl(hdr->frame_control))
+               bd->bd_rate = WCN36XX_BD_RATE_CTRL;
+       else
+               wcn36xx_warn("frame control type unknown\n");
+
+       /*
+        * In joining state trick hardware that probe is sent as
+        * unicast even if address is broadcast.
+        */
+       if (__vif_priv->is_joining &&
+           ieee80211_is_probe_req(hdr->frame_control))
+               bcast = false;
+
+       if (bcast) {
+               /* broadcast */
+               bd->ub = 1;
+               /* No ack needed not unicast */
+               bd->ack_policy = 1;
+               bd->queue_id = WCN36XX_TX_B_WQ_ID;
+       } else
+               bd->queue_id = WCN36XX_TX_U_WQ_ID;
+       *vif_priv = __vif_priv;
+}
+
+int wcn36xx_start_tx(struct wcn36xx *wcn,
+                    struct wcn36xx_sta *sta_priv,
+                    struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       struct wcn36xx_vif *vif_priv = NULL;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       unsigned long flags;
+       bool is_low = ieee80211_is_data(hdr->frame_control);
+       bool bcast = is_broadcast_ether_addr(hdr->addr1) ||
+               is_multicast_ether_addr(hdr->addr1);
+       struct wcn36xx_tx_bd *bd = wcn36xx_dxe_get_next_bd(wcn, is_low);
+
+       if (!bd) {
+               /*
+                * TX DXE are used in pairs. One for the BD and one for the
+                * actual frame. The BD DXE's has a preallocated buffer while
+                * the skb ones does not. If this isn't true something is really
+                * wierd. TODO: Recover from this situation
+                */
+
+               wcn36xx_err("bd address may not be NULL for BD DXE\n");
+               return -EINVAL;
+       }
+
+       memset(bd, 0, sizeof(*bd));
+
+       wcn36xx_dbg(WCN36XX_DBG_TX,
+                   "tx skb %p len %d fc %04x sn %d %s %s\n",
+                   skb, skb->len, __le16_to_cpu(hdr->frame_control),
+                   IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)),
+                   is_low ? "low" : "high", bcast ? "bcast" : "ucast");
+
+       wcn36xx_dbg_dump(WCN36XX_DBG_TX_DUMP, "", skb->data, skb->len);
+
+       bd->dpu_rf = WCN36XX_BMU_WQ_TX;
+
+       bd->tx_comp = info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS;
+       if (bd->tx_comp) {
+               wcn36xx_dbg(WCN36XX_DBG_DXE, "TX_ACK status requested\n");
+               spin_lock_irqsave(&wcn->dxe_lock, flags);
+               if (wcn->tx_ack_skb) {
+                       spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+                       wcn36xx_warn("tx_ack_skb already set\n");
+                       return -EINVAL;
+               }
+
+               wcn->tx_ack_skb = skb;
+               spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+
+               /* Only one at a time is supported by fw. Stop the TX queues
+                * until the ack status gets back.
+                *
+                * TODO: Add watchdog in case FW does not answer
+                */
+               ieee80211_stop_queues(wcn->hw);
+       }
+
+       /* Data frames served first*/
+       if (is_low) {
+               wcn36xx_set_tx_data(bd, wcn, &vif_priv, sta_priv, hdr, bcast);
+               wcn36xx_set_tx_pdu(bd,
+                          ieee80211_is_data_qos(hdr->frame_control) ?
+                          sizeof(struct ieee80211_qos_hdr) :
+                          sizeof(struct ieee80211_hdr_3addr),
+                          skb->len, sta_priv ? sta_priv->tid : 0);
+       } else {
+               /* MGMT and CTRL frames are handeld here*/
+               wcn36xx_set_tx_mgmt(bd, wcn, &vif_priv, hdr, bcast);
+               wcn36xx_set_tx_pdu(bd,
+                          ieee80211_is_data_qos(hdr->frame_control) ?
+                          sizeof(struct ieee80211_qos_hdr) :
+                          sizeof(struct ieee80211_hdr_3addr),
+                          skb->len, WCN36XX_TID);
+       }
+
+       buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
+       bd->tx_bd_sign = 0xbdbdbdbd;
+
+       return wcn36xx_dxe_tx_frame(wcn, vif_priv, skb, is_low);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.h b/drivers/net/wireless/ath/wcn36xx/txrx.h
new file mode 100644 (file)
index 0000000..bbfbcf8
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _TXRX_H_
+#define _TXRX_H_
+
+#include <linux/etherdevice.h>
+#include "wcn36xx.h"
+
+/* TODO describe all properties */
+#define WCN36XX_802_11_HEADER_LEN      24
+#define WCN36XX_BMU_WQ_TX              25
+#define WCN36XX_TID                    7
+/* broadcast wq ID */
+#define WCN36XX_TX_B_WQ_ID             0xA
+#define WCN36XX_TX_U_WQ_ID             0x9
+/* bd_rate */
+#define WCN36XX_BD_RATE_DATA 0
+#define WCN36XX_BD_RATE_MGMT 2
+#define WCN36XX_BD_RATE_CTRL 3
+
+struct wcn36xx_pdu {
+       u32     dpu_fb:8;
+       u32     adu_fb:8;
+       u32     pdu_id:16;
+
+       /* 0x04*/
+       u32     tail_pdu_idx:16;
+       u32     head_pdu_idx:16;
+
+       /* 0x08*/
+       u32     pdu_count:7;
+       u32     mpdu_data_off:9;
+       u32     mpdu_header_off:8;
+       u32     mpdu_header_len:8;
+
+       /* 0x0c*/
+       u32     reserved4:8;
+       u32     tid:4;
+       u32     reserved3:4;
+       u32     mpdu_len:16;
+};
+
+struct wcn36xx_rx_bd {
+       u32     bdt:2;
+       u32     ft:1;
+       u32     dpu_ne:1;
+       u32     rx_key_id:3;
+       u32     ub:1;
+       u32     rmf:1;
+       u32     uma_bypass:1;
+       u32     csr11:1;
+       u32     reserved0:1;
+       u32     scan_learn:1;
+       u32     rx_ch:4;
+       u32     rtsf:1;
+       u32     bsf:1;
+       u32     a2hf:1;
+       u32     st_auf:1;
+       u32     dpu_sign:3;
+       u32     dpu_rf:8;
+
+       struct wcn36xx_pdu pdu;
+
+       /* 0x14*/
+       u32     addr3:8;
+       u32     addr2:8;
+       u32     addr1:8;
+       u32     dpu_desc_idx:8;
+
+       /* 0x18*/
+       u32     rxp_flags:23;
+       u32     rate_id:9;
+
+       u32     phy_stat0;
+       u32     phy_stat1;
+
+       /* 0x24 */
+       u32     rx_times;
+
+       u32     pmi_cmd[6];
+
+       /* 0x40 */
+       u32     reserved7:4;
+       u32     reorder_slot_id:6;
+       u32     reorder_fwd_id:6;
+       u32     reserved6:12;
+       u32     reorder_code:4;
+
+       /* 0x44 */
+       u32     exp_seq_num:12;
+       u32     cur_seq_num:12;
+       u32     fr_type_subtype:8;
+
+       /* 0x48 */
+       u32     msdu_size:16;
+       u32     sub_fr_id:4;
+       u32     proc_order:4;
+       u32     reserved9:4;
+       u32     aef:1;
+       u32     lsf:1;
+       u32     esf:1;
+       u32     asf:1;
+};
+
+struct wcn36xx_tx_bd {
+       u32     bdt:2;
+       u32     ft:1;
+       u32     dpu_ne:1;
+       u32     fw_tx_comp:1;
+       u32     tx_comp:1;
+       u32     reserved1:1;
+       u32     ub:1;
+       u32     rmf:1;
+       u32     reserved0:12;
+       u32     dpu_sign:3;
+       u32     dpu_rf:8;
+
+       struct wcn36xx_pdu pdu;
+
+       /* 0x14*/
+       u32     reserved5:7;
+       u32     queue_id:5;
+       u32     bd_rate:2;
+       u32     ack_policy:2;
+       u32     sta_index:8;
+       u32     dpu_desc_idx:8;
+
+       u32     tx_bd_sign;
+       u32     reserved6;
+       u32     dxe_start_time;
+       u32     dxe_end_time;
+
+       /*u32   tcp_udp_start_off:10;
+       u32     header_cks:16;
+       u32     reserved7:6;*/
+};
+
+struct wcn36xx_sta;
+struct wcn36xx;
+
+int  wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb);
+int wcn36xx_start_tx(struct wcn36xx *wcn,
+                    struct wcn36xx_sta *sta_priv,
+                    struct sk_buff *skb);
+
+#endif /* _TXRX_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
new file mode 100644 (file)
index 0000000..58b6383
--- /dev/null
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WCN36XX_H_
+#define _WCN36XX_H_
+
+#include <linux/completion.h>
+#include <linux/printk.h>
+#include <linux/spinlock.h>
+#include <net/mac80211.h>
+
+#include "hal.h"
+#include "smd.h"
+#include "txrx.h"
+#include "dxe.h"
+#include "pmc.h"
+#include "debug.h"
+
+#define WLAN_NV_FILE               "wlan/prima/WCNSS_qcom_wlan_nv.bin"
+#define WCN36XX_AGGR_BUFFER_SIZE 64
+
+extern unsigned int wcn36xx_dbg_mask;
+
+enum wcn36xx_debug_mask {
+       WCN36XX_DBG_DXE         = 0x00000001,
+       WCN36XX_DBG_DXE_DUMP    = 0x00000002,
+       WCN36XX_DBG_SMD         = 0x00000004,
+       WCN36XX_DBG_SMD_DUMP    = 0x00000008,
+       WCN36XX_DBG_RX          = 0x00000010,
+       WCN36XX_DBG_RX_DUMP     = 0x00000020,
+       WCN36XX_DBG_TX          = 0x00000040,
+       WCN36XX_DBG_TX_DUMP     = 0x00000080,
+       WCN36XX_DBG_HAL         = 0x00000100,
+       WCN36XX_DBG_HAL_DUMP    = 0x00000200,
+       WCN36XX_DBG_MAC         = 0x00000400,
+       WCN36XX_DBG_BEACON      = 0x00000800,
+       WCN36XX_DBG_BEACON_DUMP = 0x00001000,
+       WCN36XX_DBG_PMC         = 0x00002000,
+       WCN36XX_DBG_PMC_DUMP    = 0x00004000,
+       WCN36XX_DBG_ANY         = 0xffffffff,
+};
+
+#define wcn36xx_err(fmt, arg...)                               \
+       printk(KERN_ERR pr_fmt("ERROR " fmt), ##arg);
+
+#define wcn36xx_warn(fmt, arg...)                              \
+       printk(KERN_WARNING pr_fmt("WARNING " fmt), ##arg)
+
+#define wcn36xx_info(fmt, arg...)              \
+       printk(KERN_INFO pr_fmt(fmt), ##arg)
+
+#define wcn36xx_dbg(mask, fmt, arg...) do {                    \
+       if (wcn36xx_dbg_mask & mask)                                    \
+               printk(KERN_DEBUG pr_fmt(fmt), ##arg);  \
+} while (0)
+
+#define wcn36xx_dbg_dump(mask, prefix_str, buf, len) do {      \
+       if (wcn36xx_dbg_mask & mask)                                    \
+               print_hex_dump(KERN_DEBUG, pr_fmt(prefix_str),  \
+                              DUMP_PREFIX_OFFSET, 32, 1,       \
+                              buf, len, false);                \
+} while (0)
+
+#define WCN36XX_HW_CHANNEL(__wcn) (__wcn->hw->conf.chandef.chan->hw_value)
+#define WCN36XX_BAND(__wcn) (__wcn->hw->conf.chandef.chan->band)
+#define WCN36XX_CENTER_FREQ(__wcn) (__wcn->hw->conf.chandef.chan->center_freq)
+#define WCN36XX_LISTEN_INTERVAL(__wcn) (__wcn->hw->conf.listen_interval)
+#define WCN36XX_FLAGS(__wcn) (__wcn->hw->flags)
+#define WCN36XX_MAX_POWER(__wcn) (__wcn->hw->conf.chandef.chan->max_power)
+
+static inline void buff_to_be(u32 *buf, size_t len)
+{
+       int i;
+       for (i = 0; i < len; i++)
+               buf[i] = cpu_to_be32(buf[i]);
+}
+
+struct nv_data {
+       int     is_valid;
+       u8      table;
+};
+
+/* Interface for platform control path
+ *
+ * @open: hook must be called when wcn36xx wants to open control channel.
+ * @tx: sends a buffer.
+ */
+struct wcn36xx_platform_ctrl_ops {
+       int (*open)(void *drv_priv, void *rsp_cb);
+       void (*close)(void);
+       int (*tx)(char *buf, size_t len);
+       int (*get_hw_mac)(u8 *addr);
+       int (*smsm_change_state)(u32 clear_mask, u32 set_mask);
+};
+
+/**
+ * struct wcn36xx_vif - holds VIF related fields
+ *
+ * @bss_index: bss_index is initially set to 0xFF. bss_index is received from
+ * HW after first config_bss call and must be used in delete_bss and
+ * enter/exit_bmps.
+ */
+struct wcn36xx_vif {
+       struct list_head list;
+       struct wcn36xx_sta *sta;
+       u8 dtim_period;
+       enum ani_ed_type encrypt_type;
+       bool is_joining;
+       struct wcn36xx_hal_mac_ssid ssid;
+
+       /* Power management */
+       enum wcn36xx_power_state pw_state;
+
+       u8 bss_index;
+       u8 ucast_dpu_signature;
+       /* Returned from WCN36XX_HAL_ADD_STA_SELF_RSP */
+       u8 self_sta_index;
+       u8 self_dpu_desc_index;
+};
+
+/**
+ * struct wcn36xx_sta - holds STA related fields
+ *
+ * @tid: traffic ID that is used during AMPDU and in TX BD.
+ * @sta_index: STA index is returned from HW after config_sta call and is
+ * used in both SMD channel and TX BD.
+ * @dpu_desc_index: DPU descriptor index is returned from HW after config_sta
+ * call and is used in TX BD.
+ * @bss_sta_index: STA index is returned from HW after config_bss call and is
+ * used in both SMD channel and TX BD. See table bellow when it is used.
+ * @bss_dpu_desc_index: DPU descriptor index is returned from HW after
+ * config_bss call and is used in TX BD.
+ * ______________________________________________
+ * |             |     STA     |       AP      |
+ * |______________|_____________|_______________|
+ * |    TX BD     |bss_sta_index|   sta_index   |
+ * |______________|_____________|_______________|
+ * |all SMD calls |bss_sta_index|   sta_index  |
+ * |______________|_____________|_______________|
+ * |smd_delete_sta|  sta_index  |   sta_index  |
+ * |______________|_____________|_______________|
+ */
+struct wcn36xx_sta {
+       struct wcn36xx_vif *vif;
+       u16 aid;
+       u16 tid;
+       u8 sta_index;
+       u8 dpu_desc_index;
+       u8 bss_sta_index;
+       u8 bss_dpu_desc_index;
+       bool is_data_encrypted;
+       /* Rates */
+       struct wcn36xx_hal_supported_rates supported_rates;
+};
+struct wcn36xx_dxe_ch;
+struct wcn36xx {
+       struct ieee80211_hw     *hw;
+       struct device           *dev;
+       struct list_head        vif_list;
+
+       u8                      fw_revision;
+       u8                      fw_version;
+       u8                      fw_minor;
+       u8                      fw_major;
+
+       /* extra byte for the NULL termination */
+       u8                      crm_version[WCN36XX_HAL_VERSION_LENGTH + 1];
+       u8                      wlan_version[WCN36XX_HAL_VERSION_LENGTH + 1];
+
+       /* IRQs */
+       int                     tx_irq;
+       int                     rx_irq;
+       void __iomem            *mmio;
+
+       struct wcn36xx_platform_ctrl_ops *ctrl_ops;
+       /*
+        * smd_buf must be protected with smd_mutex to garantee
+        * that all messages are sent one after another
+        */
+       u8                      *hal_buf;
+       size_t                  hal_rsp_len;
+       struct mutex            hal_mutex;
+       struct completion       hal_rsp_compl;
+       struct workqueue_struct *hal_ind_wq;
+       struct work_struct      hal_ind_work;
+       struct mutex            hal_ind_mutex;
+       struct list_head        hal_ind_queue;
+
+       /* DXE channels */
+       struct wcn36xx_dxe_ch   dxe_tx_l_ch;    /* TX low */
+       struct wcn36xx_dxe_ch   dxe_tx_h_ch;    /* TX high */
+       struct wcn36xx_dxe_ch   dxe_rx_l_ch;    /* RX low */
+       struct wcn36xx_dxe_ch   dxe_rx_h_ch;    /* RX high */
+
+       /* For synchronization of DXE resources from BH, IRQ and WQ contexts */
+       spinlock_t      dxe_lock;
+       bool                    queues_stopped;
+
+       /* Memory pools */
+       struct wcn36xx_dxe_mem_pool mgmt_mem_pool;
+       struct wcn36xx_dxe_mem_pool data_mem_pool;
+
+       struct sk_buff          *tx_ack_skb;
+
+#ifdef CONFIG_WCN36XX_DEBUGFS
+       /* Debug file system entry */
+       struct wcn36xx_dfs_entry    dfs;
+#endif /* CONFIG_WCN36XX_DEBUGFS */
+
+};
+
+static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
+                                        u8 major,
+                                        u8 minor,
+                                        u8 version,
+                                        u8 revision)
+{
+       return (wcn->fw_major == major &&
+               wcn->fw_minor == minor &&
+               wcn->fw_version == version &&
+               wcn->fw_revision == revision);
+}
+void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates);
+
+#endif /* _WCN36XX_H_ */
index 61c302a6bdeaa38ef1a1d20b3ac1e477db794745..5b340769d5bb2196bf3d0192e007ed13f23236fc 100644 (file)
@@ -316,8 +316,8 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
        }
        conn.channel = ch - 1;
 
-       memcpy(conn.bssid, bss->bssid, 6);
-       memcpy(conn.dst_mac, bss->bssid, 6);
+       memcpy(conn.bssid, bss->bssid, ETH_ALEN);
+       memcpy(conn.dst_mac, bss->bssid, ETH_ALEN);
        /*
         * FW don't support scan after connection attempt
         */
index eb1dc7ad80fb367bbb6c56959a8758aa85225ac4..eeceab39cda22aee81e2fcbb86fc0c9520766a5f 100644 (file)
@@ -197,7 +197,6 @@ static void wil_pcie_remove(struct pci_dev *pdev)
        pci_iounmap(pdev, wil->csr);
        pci_release_region(pdev, 0);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 }
 
 static DEFINE_PCI_DEVICE_TABLE(wil6210_pcie_ids) = {
index b827d51c30a37b93f1747d7df9b2049aeb97e25f..0d950f209dae01efd23ebe9956ac2dc68daa7063 100644 (file)
@@ -844,18 +844,18 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
        if (priv->wep_is_on)
                frame_ctl |= IEEE80211_FCTL_PROTECTED;
        if (priv->operating_mode == IW_MODE_ADHOC) {
-               skb_copy_from_linear_data(skb, &header.addr1, 6);
-               memcpy(&header.addr2, dev->dev_addr, 6);
-               memcpy(&header.addr3, priv->BSSID, 6);
+               skb_copy_from_linear_data(skb, &header.addr1, ETH_ALEN);
+               memcpy(&header.addr2, dev->dev_addr, ETH_ALEN);
+               memcpy(&header.addr3, priv->BSSID, ETH_ALEN);
        } else {
                frame_ctl |= IEEE80211_FCTL_TODS;
-               memcpy(&header.addr1, priv->CurrentBSSID, 6);
-               memcpy(&header.addr2, dev->dev_addr, 6);
-               skb_copy_from_linear_data(skb, &header.addr3, 6);
+               memcpy(&header.addr1, priv->CurrentBSSID, ETH_ALEN);
+               memcpy(&header.addr2, dev->dev_addr, ETH_ALEN);
+               skb_copy_from_linear_data(skb, &header.addr3, ETH_ALEN);
        }
 
        if (priv->use_wpa)
-               memcpy(&header.addr4, SNAP_RFC1024, 6);
+               memcpy(&header.addr4, SNAP_RFC1024, ETH_ALEN);
 
        header.frame_control = cpu_to_le16(frame_ctl);
        /* Copy the wireless header into the card */
@@ -929,11 +929,11 @@ static void fast_rx_path(struct atmel_private *priv,
                }
        }
 
-       memcpy(skbp, header->addr1, 6); /* destination address */
+       memcpy(skbp, header->addr1, ETH_ALEN); /* destination address */
        if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
-               memcpy(&skbp[6], header->addr3, 6);
+               memcpy(&skbp[ETH_ALEN], header->addr3, ETH_ALEN);
        else
-               memcpy(&skbp[6], header->addr2, 6); /* source address */
+               memcpy(&skbp[ETH_ALEN], header->addr2, ETH_ALEN); /* source address */
 
        skb->protocol = eth_type_trans(skb, priv->dev);
        skb->ip_summed = CHECKSUM_NONE;
@@ -969,14 +969,14 @@ static void frag_rx_path(struct atmel_private *priv,
                         u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no,
                         u8 frag_no, int more_frags)
 {
-       u8 mac4[6];
-       u8 source[6];
+       u8 mac4[ETH_ALEN];
+       u8 source[ETH_ALEN];
        struct sk_buff *skb;
 
        if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
-               memcpy(source, header->addr3, 6);
+               memcpy(source, header->addr3, ETH_ALEN);
        else
-               memcpy(source, header->addr2, 6);
+               memcpy(source, header->addr2, ETH_ALEN);
 
        rx_packet_loc += 24; /* skip header */
 
@@ -984,9 +984,9 @@ static void frag_rx_path(struct atmel_private *priv,
                msdu_size -= 4;
 
        if (frag_no == 0) { /* first fragment */
-               atmel_copy_to_host(priv->dev, mac4, rx_packet_loc, 6);
-               msdu_size -= 6;
-               rx_packet_loc += 6;
+               atmel_copy_to_host(priv->dev, mac4, rx_packet_loc, ETH_ALEN);
+               msdu_size -= ETH_ALEN;
+               rx_packet_loc += ETH_ALEN;
 
                if (priv->do_rx_crc)
                        crc = crc32_le(crc, mac4, 6);
@@ -994,9 +994,9 @@ static void frag_rx_path(struct atmel_private *priv,
                priv->frag_seq = seq_no;
                priv->frag_no = 1;
                priv->frag_len = msdu_size;
-               memcpy(priv->frag_source, source, 6);
-               memcpy(&priv->rx_buf[6], source, 6);
-               memcpy(priv->rx_buf, header->addr1, 6);
+               memcpy(priv->frag_source, source, ETH_ALEN);
+               memcpy(&priv->rx_buf[ETH_ALEN], source, ETH_ALEN);
+               memcpy(priv->rx_buf, header->addr1, ETH_ALEN);
 
                atmel_copy_to_host(priv->dev, &priv->rx_buf[12], rx_packet_loc, msdu_size);
 
@@ -1006,13 +1006,13 @@ static void frag_rx_path(struct atmel_private *priv,
                        atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
                        if ((crc ^ 0xffffffff) != netcrc) {
                                priv->dev->stats.rx_crc_errors++;
-                               memset(priv->frag_source, 0xff, 6);
+                               memset(priv->frag_source, 0xff, ETH_ALEN);
                        }
                }
 
        } else if (priv->frag_no == frag_no &&
                   priv->frag_seq == seq_no &&
-                  memcmp(priv->frag_source, source, 6) == 0) {
+                  memcmp(priv->frag_source, source, ETH_ALEN) == 0) {
 
                atmel_copy_to_host(priv->dev, &priv->rx_buf[12 + priv->frag_len],
                                   rx_packet_loc, msdu_size);
@@ -1024,7 +1024,7 @@ static void frag_rx_path(struct atmel_private *priv,
                        atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
                        if ((crc ^ 0xffffffff) != netcrc) {
                                priv->dev->stats.rx_crc_errors++;
-                               memset(priv->frag_source, 0xff, 6);
+                               memset(priv->frag_source, 0xff, ETH_ALEN);
                                more_frags = 1; /* don't send broken assembly */
                        }
                }
@@ -1033,7 +1033,7 @@ static void frag_rx_path(struct atmel_private *priv,
                priv->frag_no++;
 
                if (!more_frags) { /* last one */
-                       memset(priv->frag_source, 0xff, 6);
+                       memset(priv->frag_source, 0xff, ETH_ALEN);
                        if (!(skb = dev_alloc_skb(priv->frag_len + 14))) {
                                priv->dev->stats.rx_dropped++;
                        } else {
@@ -1129,7 +1129,7 @@ static void rx_done_irq(struct atmel_private *priv)
                        atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size);
 
                        /* we use the same buffer for frag reassembly and control packets */
-                       memset(priv->frag_source, 0xff, 6);
+                       memset(priv->frag_source, 0xff, ETH_ALEN);
 
                        if (priv->do_rx_crc) {
                                /* last 4 octets is crc */
@@ -1557,7 +1557,7 @@ struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
        priv->last_qual = jiffies;
        priv->last_beacon_timestamp = 0;
        memset(priv->frag_source, 0xff, sizeof(priv->frag_source));
-       memset(priv->BSSID, 0, 6);
+       memset(priv->BSSID, 0, ETH_ALEN);
        priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */
        priv->station_was_associated = 0;
 
@@ -1718,7 +1718,7 @@ static int atmel_get_wap(struct net_device *dev,
                         char *extra)
 {
        struct atmel_private *priv = netdev_priv(dev);
-       memcpy(awrq->sa_data, priv->CurrentBSSID, 6);
+       memcpy(awrq->sa_data, priv->CurrentBSSID, ETH_ALEN);
        awrq->sa_family = ARPHRD_ETHER;
 
        return 0;
@@ -2356,7 +2356,7 @@ static int atmel_get_scan(struct net_device *dev,
        for (i = 0; i < priv->BSS_list_entries; i++) {
                iwe.cmd = SIOCGIWAP;
                iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
-               memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, 6);
+               memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, ETH_ALEN);
                current_ev = iwe_stream_add_event(info, current_ev,
                                                  extra + IW_SCAN_MAX_DATA,
                                                  &iwe, IW_EV_ADDR_LEN);
@@ -2760,7 +2760,7 @@ static void atmel_enter_state(struct atmel_private *priv, int new_state)
 static void atmel_scan(struct atmel_private *priv, int specific_ssid)
 {
        struct {
-               u8 BSSID[6];
+               u8 BSSID[ETH_ALEN];
                u8 SSID[MAX_SSID_LENGTH];
                u8 scan_type;
                u8 channel;
@@ -2771,7 +2771,7 @@ static void atmel_scan(struct atmel_private *priv, int specific_ssid)
                u8 SSID_size;
        } cmd;
 
-       memset(cmd.BSSID, 0xff, 6);
+       memset(cmd.BSSID, 0xff, ETH_ALEN);
 
        if (priv->fast_scan) {
                cmd.SSID_size = priv->SSID_size;
@@ -2816,7 +2816,7 @@ static void join(struct atmel_private *priv, int type)
 
        cmd.SSID_size = priv->SSID_size;
        memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
-       memcpy(cmd.BSSID, priv->CurrentBSSID, 6);
+       memcpy(cmd.BSSID, priv->CurrentBSSID, ETH_ALEN);
        cmd.channel = (priv->channel & 0x7f);
        cmd.BSS_type = type;
        cmd.timeout = cpu_to_le16(2000);
@@ -2837,7 +2837,7 @@ static void start(struct atmel_private *priv, int type)
 
        cmd.SSID_size = priv->SSID_size;
        memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
-       memcpy(cmd.BSSID, priv->BSSID, 6);
+       memcpy(cmd.BSSID, priv->BSSID, ETH_ALEN);
        cmd.BSS_type = type;
        cmd.channel = (priv->channel & 0x7f);
 
@@ -2883,9 +2883,9 @@ static void send_authentication_request(struct atmel_private *priv, u16 system,
        header.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
        header.duration_id = cpu_to_le16(0x8000);
        header.seq_ctrl = 0;
-       memcpy(header.addr1, priv->CurrentBSSID, 6);
-       memcpy(header.addr2, priv->dev->dev_addr, 6);
-       memcpy(header.addr3, priv->CurrentBSSID, 6);
+       memcpy(header.addr1, priv->CurrentBSSID, ETH_ALEN);
+       memcpy(header.addr2, priv->dev->dev_addr, ETH_ALEN);
+       memcpy(header.addr3, priv->CurrentBSSID, ETH_ALEN);
 
        if (priv->wep_is_on && priv->CurrentAuthentTransactionSeqNum != 1)
                /* no WEP for authentication frames with TrSeqNo 1 */
@@ -2916,7 +2916,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
        struct ass_req_format {
                __le16 capability;
                __le16 listen_interval;
-               u8 ap[6]; /* nothing after here directly accessible */
+               u8 ap[ETH_ALEN]; /* nothing after here directly accessible */
                u8 ssid_el_id;
                u8 ssid_len;
                u8 ssid[MAX_SSID_LENGTH];
@@ -2930,9 +2930,9 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
        header.duration_id = cpu_to_le16(0x8000);
        header.seq_ctrl = 0;
 
-       memcpy(header.addr1, priv->CurrentBSSID, 6);
-       memcpy(header.addr2, priv->dev->dev_addr, 6);
-       memcpy(header.addr3, priv->CurrentBSSID, 6);
+       memcpy(header.addr1, priv->CurrentBSSID, ETH_ALEN);
+       memcpy(header.addr2, priv->dev->dev_addr, ETH_ALEN);
+       memcpy(header.addr3, priv->CurrentBSSID, ETH_ALEN);
 
        body.capability = cpu_to_le16(WLAN_CAPABILITY_ESS);
        if (priv->wep_is_on)
@@ -2944,7 +2944,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
 
        /* current AP address - only in reassoc frame */
        if (is_reassoc) {
-               memcpy(body.ap, priv->CurrentBSSID, 6);
+               memcpy(body.ap, priv->CurrentBSSID, ETH_ALEN);
                ssid_el_p = &body.ssid_el_id;
                bodysize = 18 + priv->SSID_size;
        } else {
@@ -3021,7 +3021,7 @@ static void store_bss_info(struct atmel_private *priv,
        int i, index;
 
        for (index = -1, i = 0; i < priv->BSS_list_entries; i++)
-               if (memcmp(bss, priv->BSSinfo[i].BSSID, 6) == 0)
+               if (memcmp(bss, priv->BSSinfo[i].BSSID, ETH_ALEN) == 0)
                        index = i;
 
        /* If we process a probe and an entry from this BSS exists
@@ -3032,7 +3032,7 @@ static void store_bss_info(struct atmel_private *priv,
                if (priv->BSS_list_entries == MAX_BSS_ENTRIES)
                        return;
                index = priv->BSS_list_entries++;
-               memcpy(priv->BSSinfo[index].BSSID, bss, 6);
+               memcpy(priv->BSSinfo[index].BSSID, bss, ETH_ALEN);
                priv->BSSinfo[index].RSSI = rssi;
        } else {
                if (rssi > priv->BSSinfo[index].RSSI)
@@ -3212,7 +3212,7 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
        if (subtype == IEEE80211_STYPE_REASSOC_RESP &&
            status != WLAN_STATUS_ASSOC_DENIED_RATES &&
            status != WLAN_STATUS_CAPS_UNSUPPORTED &&
-           priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) {
+           priv->ReAssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) {
                mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
                priv->ReAssociationRequestRetryCnt++;
                send_association_request(priv, 1);
@@ -3235,7 +3235,7 @@ static void atmel_join_bss(struct atmel_private *priv, int bss_index)
 {
        struct bss_info *bss =  &priv->BSSinfo[bss_index];
 
-       memcpy(priv->CurrentBSSID, bss->BSSID, 6);
+       memcpy(priv->CurrentBSSID, bss->BSSID, ETH_ALEN);
        memcpy(priv->SSID, bss->SSID, priv->SSID_size = bss->SSIDsize);
 
        /* The WPA stuff cares about the current AP address */
@@ -3767,7 +3767,7 @@ static int probe_atmel_card(struct net_device *dev)
                                0x00, 0x04, 0x25, 0x00, 0x00, 0x00
                        };
                        printk(KERN_ALERT "%s: *** Invalid MAC address. UPGRADE Firmware ****\n", dev->name);
-                       memcpy(dev->dev_addr, default_mac, 6);
+                       memcpy(dev->dev_addr, default_mac, ETH_ALEN);
                }
        }
 
@@ -3819,7 +3819,7 @@ static void build_wpa_mib(struct atmel_private *priv)
 
        struct { /* NB this is matched to the hardware, don't change. */
                u8 cipher_default_key_value[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE];
-               u8 receiver_address[6];
+               u8 receiver_address[ETH_ALEN];
                u8 wep_is_on;
                u8 default_key; /* 0..3 */
                u8 group_key;
@@ -3837,7 +3837,7 @@ static void build_wpa_mib(struct atmel_private *priv)
 
        mib.wep_is_on = priv->wep_is_on;
        mib.exclude_unencrypted = priv->exclude_unencrypted;
-       memcpy(mib.receiver_address, priv->CurrentBSSID, 6);
+       memcpy(mib.receiver_address, priv->CurrentBSSID, ETH_ALEN);
 
        /* zero all the keys before adding in valid ones. */
        memset(mib.cipher_default_key_value, 0, sizeof(mib.cipher_default_key_value));
index c51d2dc489e45bd3a9b2aac36bf9fa2130453e72..1d7982afc0ad6af6b1b32f92ae07fb9ef694f2e1 100644 (file)
@@ -1065,12 +1065,9 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
        /* Try to set the DMA mask. If it fails, try falling back to a
         * lower mask, as we can always also support a lower one. */
        while (1) {
-               err = dma_set_mask(dev->dev->dma_dev, mask);
-               if (!err) {
-                       err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
-                       if (!err)
-                               break;
-               }
+               err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask);
+               if (!err)
+                       break;
                if (mask == DMA_BIT_MASK(64)) {
                        mask = DMA_BIT_MASK(32);
                        fallback = true;
index 7c970d3ae358834a4230f35826d6c598661ee323..05ee7f10cc8f577532e9180ac9874adcf9343d0f 100644 (file)
@@ -164,7 +164,8 @@ static void b43_nphy_rf_ctl_override_rev7(struct b43_wldev *dev, u16 field,
                }
                en_addr = en_addrs[override][i];
 
-               val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
+               if (e)
+                       val_addr = (i == 0) ? e->val_addr_core0 : e->val_addr_core1;
 
                if (off) {
                        b43_phy_mask(dev, en_addr, ~en_mask);
index 8cb206a89083aaa314868ef2c07b906181fe6689..4ae63f4ddfb20394d1a38c4847b5c5732887f277 100644 (file)
@@ -278,7 +278,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
        else
                txhdr->phy_rate = b43_plcp_get_ratecode_cck(rate);
        txhdr->mac_frame_ctl = wlhdr->frame_control;
-       memcpy(txhdr->tx_receiver, wlhdr->addr1, 6);
+       memcpy(txhdr->tx_receiver, wlhdr->addr1, ETH_ALEN);
 
        /* Calculate duration for fallback rate */
        if ((rate_fb == rate) ||
index 42eb26c99e11cea54a2b063de4120ec69ae33b22..b2ed1795130bb0d7e1f10f95ab9775743b5f276b 100644 (file)
@@ -806,12 +806,9 @@ static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
        /* Try to set the DMA mask. If it fails, try falling back to a
         * lower mask, as we can always also support a lower one. */
        while (1) {
-               err = dma_set_mask(dev->dev->dma_dev, mask);
-               if (!err) {
-                       err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
-                       if (!err)
-                               break;
-               }
+               err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask);
+               if (!err)
+                       break;
                if (mask == DMA_BIT_MASK(64)) {
                        mask = DMA_BIT_MASK(32);
                        fallback = true;
index 849a28c803023e03570d69ca8d6de8f60462a6c6..86588c9ff0f2b6cddee0b3a7da5455fd05b29a9b 100644 (file)
@@ -215,7 +215,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
        rate_fb_ofdm = b43legacy_is_ofdm_rate(rate_fb->hw_value);
 
        txhdr->mac_frame_ctl = wlhdr->frame_control;
-       memcpy(txhdr->tx_receiver, wlhdr->addr1, 6);
+       memcpy(txhdr->tx_receiver, wlhdr->addr1, ETH_ALEN);
 
        /* Calculate duration for fallback rate */
        if ((rate_fb->hw_value == rate) ||
index e13b1a65c65fe7469882b4eee83e6a08f567bce4..3e10b801eee84bc420867815ced96a3fca3428b4 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/mmc/sdio.h>
 #include <linux/mmc/sdio_func.h>
 #include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
 #include <linux/platform_data/brcmfmac-sdio.h>
 
 #include <defs.h>
@@ -239,7 +238,9 @@ brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
                func_num = SDIO_FUNC_1;
                reg_size = 4;
 
-               brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
+               ret = brcmf_sdio_addrprep(sdiodev, reg_size, &addr);
+               if (ret)
+                       goto done;
        }
 
        do {
@@ -255,6 +256,7 @@ brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
                                                       func_num, addr, data, 4);
        } while (ret != 0 && retry++ < SDIOH_API_ACCESS_RETRY_LIMIT);
 
+done:
        if (ret != 0)
                brcmf_err("failed with %d\n", ret);
 
@@ -315,8 +317,36 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
                *ret = retval;
 }
 
+static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
+                            bool write, u32 addr, struct sk_buff *pkt)
+{
+       unsigned int req_sz;
+
+       brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
+       if (brcmf_pm_resume_error(sdiodev))
+               return -EIO;
+
+       /* Single skb use the standard mmc interface */
+       req_sz = pkt->len + 3;
+       req_sz &= (uint)~3;
+
+       if (write)
+               return sdio_memcpy_toio(sdiodev->func[fn], addr,
+                                       ((u8 *)(pkt->data)),
+                                       req_sz);
+       else if (fn == 1)
+               return sdio_memcpy_fromio(sdiodev->func[fn],
+                                         ((u8 *)(pkt->data)),
+                                         addr, req_sz);
+       else
+               /* function 2 read is FIFO operation */
+               return sdio_readsb(sdiodev->func[fn],
+                                  ((u8 *)(pkt->data)), addr,
+                                  req_sz);
+}
+
 /**
- * brcmf_sdio_buffrw - SDIO interface function for block data access
+ * brcmf_sdio_sglist_rw - SDIO interface function for block data access
  * @sdiodev: brcmfmac sdio device
  * @fn: SDIO function number
  * @write: direction flag
@@ -327,12 +357,13 @@ void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
  * stack for block data access. It assumes that the skb passed down by the
  * caller has already been padded and aligned.
  */
-static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
-                            bool write, u32 addr, struct sk_buff_head *pktlist)
+static int brcmf_sdio_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
+                               bool write, u32 addr,
+                               struct sk_buff_head *pktlist)
 {
        unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
-       unsigned int max_blks, max_req_sz, orig_offset, dst_offset;
-       unsigned short max_seg_sz, seg_sz;
+       unsigned int max_req_sz, orig_offset, dst_offset;
+       unsigned short max_seg_cnt, seg_sz;
        unsigned char *pkt_data, *orig_data, *dst_data;
        struct sk_buff *pkt_next = NULL, *local_pkt_next;
        struct sk_buff_head local_list, *target_list;
@@ -341,7 +372,6 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
        struct mmc_data mmc_dat;
        struct sg_table st;
        struct scatterlist *sgl;
-       struct mmc_host *host;
        int ret = 0;
 
        if (!pktlist->qlen)
@@ -351,27 +381,6 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
        if (brcmf_pm_resume_error(sdiodev))
                return -EIO;
 
-       /* Single skb use the standard mmc interface */
-       if (pktlist->qlen == 1) {
-               pkt_next = pktlist->next;
-               req_sz = pkt_next->len + 3;
-               req_sz &= (uint)~3;
-
-               if (write)
-                       return sdio_memcpy_toio(sdiodev->func[fn], addr,
-                                               ((u8 *)(pkt_next->data)),
-                                               req_sz);
-               else if (fn == 1)
-                       return sdio_memcpy_fromio(sdiodev->func[fn],
-                                                 ((u8 *)(pkt_next->data)),
-                                                 addr, req_sz);
-               else
-                       /* function 2 read is FIFO operation */
-                       return sdio_readsb(sdiodev->func[fn],
-                                          ((u8 *)(pkt_next->data)), addr,
-                                          req_sz);
-       }
-
        target_list = pktlist;
        /* for host with broken sg support, prepare a page aligned list */
        __skb_queue_head_init(&local_list);
@@ -398,38 +407,46 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
                target_list = &local_list;
        }
 
-       host = sdiodev->func[fn]->card->host;
        func_blk_sz = sdiodev->func[fn]->cur_blksize;
-       /* Blocks per command is limited by host count, host transfer
-        * size and the maximum for IO_RW_EXTENDED of 511 blocks.
-        */
-       max_blks = min_t(unsigned int, host->max_blk_count, 511u);
-       max_req_sz = min_t(unsigned int, host->max_req_size,
-                          max_blks * func_blk_sz);
-       max_seg_sz = min_t(unsigned short, host->max_segs, SG_MAX_SINGLE_ALLOC);
-       max_seg_sz = min_t(unsigned short, max_seg_sz, target_list->qlen);
+       max_req_sz = sdiodev->max_request_size;
+       max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
+                           target_list->qlen);
        seg_sz = target_list->qlen;
        pkt_offset = 0;
        pkt_next = target_list->next;
 
-       if (sg_alloc_table(&st, max_seg_sz, GFP_KERNEL)) {
+       if (sg_alloc_table(&st, max_seg_cnt, GFP_KERNEL)) {
                ret = -ENOMEM;
                goto exit;
        }
 
+       memset(&mmc_req, 0, sizeof(struct mmc_request));
+       memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+       memset(&mmc_dat, 0, sizeof(struct mmc_data));
+
+       mmc_dat.sg = st.sgl;
+       mmc_dat.blksz = func_blk_sz;
+       mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+       mmc_cmd.opcode = SD_IO_RW_EXTENDED;
+       mmc_cmd.arg = write ? 1<<31 : 0;        /* write flag  */
+       mmc_cmd.arg |= (fn & 0x7) << 28;        /* SDIO func num */
+       mmc_cmd.arg |= 1<<27;                   /* block mode */
+       /* for function 1 the addr will be incremented */
+       mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
+       mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
+       mmc_req.cmd = &mmc_cmd;
+       mmc_req.data = &mmc_dat;
+
        while (seg_sz) {
                req_sz = 0;
                sg_cnt = 0;
-               memset(&mmc_req, 0, sizeof(struct mmc_request));
-               memset(&mmc_cmd, 0, sizeof(struct mmc_command));
-               memset(&mmc_dat, 0, sizeof(struct mmc_data));
                sgl = st.sgl;
                /* prep sg table */
                while (pkt_next != (struct sk_buff *)target_list) {
                        pkt_data = pkt_next->data + pkt_offset;
                        sg_data_sz = pkt_next->len - pkt_offset;
-                       if (sg_data_sz > host->max_seg_size)
-                               sg_data_sz = host->max_seg_size;
+                       if (sg_data_sz > sdiodev->max_segment_size)
+                               sg_data_sz = sdiodev->max_segment_size;
                        if (sg_data_sz > max_req_sz - req_sz)
                                sg_data_sz = max_req_sz - req_sz;
 
@@ -444,7 +461,7 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
                                pkt_next = pkt_next->next;
                        }
 
-                       if (req_sz >= max_req_sz || sg_cnt >= max_seg_sz)
+                       if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
                                break;
                }
                seg_sz -= sg_cnt;
@@ -455,27 +472,17 @@ static int brcmf_sdio_buffrw(struct brcmf_sdio_dev *sdiodev, uint fn,
                        ret = -ENOTBLK;
                        goto exit;
                }
-               mmc_dat.sg = st.sgl;
+
                mmc_dat.sg_len = sg_cnt;
-               mmc_dat.blksz = func_blk_sz;
                mmc_dat.blocks = req_sz / func_blk_sz;
-               mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
-               mmc_cmd.opcode = SD_IO_RW_EXTENDED;
-               mmc_cmd.arg = write ? 1<<31 : 0;        /* write flag  */
-               mmc_cmd.arg |= (fn & 0x7) << 28;        /* SDIO func num */
-               mmc_cmd.arg |= 1<<27;                   /* block mode */
-               /* incrementing addr for function 1 */
-               mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
                mmc_cmd.arg |= (addr & 0x1FFFF) << 9;   /* address */
                mmc_cmd.arg |= mmc_dat.blocks & 0x1FF;  /* block count */
-               mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
-               mmc_req.cmd = &mmc_cmd;
-               mmc_req.data = &mmc_dat;
+               /* incrementing addr for function 1 */
                if (fn == 1)
                        addr += req_sz;
 
                mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
-               mmc_wait_for_req(host, &mmc_req);
+               mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
 
                ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
                if (ret != 0) {
@@ -546,7 +553,6 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
 {
        uint width;
        int err = 0;
-       struct sk_buff_head pkt_list;
 
        brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
                  fn, addr, pkt->len);
@@ -556,19 +562,17 @@ brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
        if (err)
                goto done;
 
-       skb_queue_head_init(&pkt_list);
-       skb_queue_tail(&pkt_list, pkt);
-       err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, &pkt_list);
-       skb_dequeue_tail(&pkt_list);
+       err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pkt);
 
 done:
        return err;
 }
 
 int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                           uint flags, struct sk_buff_head *pktq)
+                           uint flags, struct sk_buff_head *pktq, uint totlen)
 {
-       uint incr_fix;
+       struct sk_buff *glom_skb;
+       struct sk_buff *skb;
        uint width;
        int err = 0;
 
@@ -580,8 +584,22 @@ int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
        if (err)
                goto done;
 
-       incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
-       err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq);
+       if (pktq->qlen == 1)
+               err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, pktq->next);
+       else if (!sdiodev->sg_support) {
+               glom_skb = brcmu_pkt_buf_get_skb(totlen);
+               if (!glom_skb)
+                       return -ENOMEM;
+               err = brcmf_sdio_buffrw(sdiodev, fn, false, addr, glom_skb);
+               if (err)
+                       goto done;
+
+               skb_queue_walk(pktq, skb) {
+                       memcpy(skb->data, glom_skb->data, skb->len);
+                       skb_pull(glom_skb, skb->len);
+               }
+       } else
+               err = brcmf_sdio_sglist_rw(sdiodev, fn, false, addr, pktq);
 
 done:
        return err;
@@ -592,7 +610,7 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
                      uint flags, u8 *buf, uint nbytes)
 {
        struct sk_buff *mypkt;
-       struct sk_buff_head pktq;
+       uint width;
        int err;
 
        mypkt = brcmu_pkt_buf_get_skb(nbytes);
@@ -603,10 +621,12 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
        }
 
        memcpy(mypkt->data, buf, nbytes);
-       __skb_queue_head_init(&pktq);
-       __skb_queue_tail(&pktq, mypkt);
-       err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, &pktq);
-       __skb_dequeue_tail(&pktq);
+
+       width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+       err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+
+       if (!err)
+               err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, mypkt);
 
        brcmu_pkt_buf_free_skb(mypkt);
        return err;
@@ -617,16 +637,26 @@ int
 brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
                      uint flags, struct sk_buff_head *pktq)
 {
+       struct sk_buff *skb;
        uint width;
-       int err = 0;
+       int err;
 
        brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
                  fn, addr, pktq->qlen);
 
        width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
-       brcmf_sdio_addrprep(sdiodev, width, &addr);
+       err = brcmf_sdio_addrprep(sdiodev, width, &addr);
+       if (err)
+               return err;
 
-       err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, pktq);
+       if (pktq->qlen == 1 || !sdiodev->sg_support)
+               skb_queue_walk(pktq, skb) {
+                       err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, skb);
+                       if (err)
+                               break;
+               }
+       else
+               err = brcmf_sdio_sglist_rw(sdiodev, fn, true, addr, pktq);
 
        return err;
 }
@@ -639,7 +669,6 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
        struct sk_buff *pkt;
        u32 sdaddr;
        uint dsize;
-       struct sk_buff_head pkt_list;
 
        dsize = min_t(uint, SBSDIO_SB_OFT_ADDR_LIMIT, size);
        pkt = dev_alloc_skb(dsize);
@@ -648,7 +677,6 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
                return -EIO;
        }
        pkt->priority = 0;
-       skb_queue_head_init(&pkt_list);
 
        /* Determine initial transfer parameters */
        sdaddr = address & SBSDIO_SB_OFT_ADDR_MASK;
@@ -676,10 +704,8 @@ brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
                skb_put(pkt, dsize);
                if (write)
                        memcpy(pkt->data, data, dsize);
-               skb_queue_tail(&pkt_list, pkt);
                bcmerror = brcmf_sdio_buffrw(sdiodev, SDIO_FUNC_1, write,
-                                            sdaddr, &pkt_list);
-               skb_dequeue_tail(&pkt_list);
+                                            sdaddr, pkt);
                if (bcmerror) {
                        brcmf_err("membytes transfer failed\n");
                        break;
index c3462b75bd080d4f7e9d72dd979f86bd5c894319..905704e335d7164b90a4ecb6fd51c2324911c154 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/mmc/sdio_func.h>
 #include <linux/mmc/sdio_ids.h>
 #include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
 #include <linux/suspend.h>
 #include <linux/errno.h>
 #include <linux/sched.h>       /* request_irq() */
@@ -34,6 +35,7 @@
 #include <brcmu_utils.h>
 #include <brcmu_wifi.h>
 #include "sdio_host.h"
+#include "sdio_chip.h"
 #include "dhd_dbg.h"
 #include "dhd_bus.h"
 
 
 #define DMA_ALIGN_MASK 0x03
 
-#define SDIO_DEVICE_ID_BROADCOM_43143  43143
-#define SDIO_DEVICE_ID_BROADCOM_43241  0x4324
-#define SDIO_DEVICE_ID_BROADCOM_4329   0x4329
-#define SDIO_DEVICE_ID_BROADCOM_4330   0x4330
-#define SDIO_DEVICE_ID_BROADCOM_4334   0x4334
-#define SDIO_DEVICE_ID_BROADCOM_4335   0x4335
-
 #define SDIO_FUNC1_BLOCKSIZE           64
 #define SDIO_FUNC2_BLOCKSIZE           512
 
@@ -58,7 +53,8 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
        {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
        {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
        {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
-       {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4335)},
+       {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM,
+                    SDIO_DEVICE_ID_BROADCOM_4335_4339)},
        { /* end: all zeroes */ },
 };
 MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -320,6 +316,8 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
        int err;
        struct brcmf_sdio_dev *sdiodev;
        struct brcmf_bus *bus_if;
+       struct mmc_host *host;
+       uint max_blocks;
 
        brcmf_dbg(SDIO, "Enter\n");
        brcmf_dbg(SDIO, "Class=%x\n", func->class);
@@ -366,6 +364,20 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
                brcmf_err("F2 error, probe failed %d...\n", err);
                goto fail;
        }
+
+       /*
+        * determine host related variables after brcmf_sdio_probe()
+        * as func->cur_blksize is properly set and F2 init has been
+        * completed successfully.
+        */
+       host = func->card->host;
+       sdiodev->sg_support = host->max_segs > 1;
+       max_blocks = min_t(uint, host->max_blk_count, 511u);
+       sdiodev->max_request_size = min_t(uint, host->max_req_size,
+                                         max_blocks * func->cur_blksize);
+       sdiodev->max_segment_count = min_t(uint, host->max_segs,
+                                          SG_MAX_SINGLE_ALLOC);
+       sdiodev->max_segment_size = host->max_seg_size;
        brcmf_dbg(SDIO, "F2 init completed...\n");
        return 0;
 
@@ -466,7 +478,7 @@ static int brcmf_sdio_pd_probe(struct platform_device *pdev)
 {
        brcmf_dbg(SDIO, "Enter\n");
 
-       brcmfmac_sdio_pdata = pdev->dev.platform_data;
+       brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev);
 
        if (brcmfmac_sdio_pdata->power_on)
                brcmfmac_sdio_pdata->power_on();
index 2eb9e642c9bf80d604bea8d201d86a0bb5c1a524..899a2ada5b8212f28e30632551910b67474e6080 100644 (file)
@@ -97,8 +97,6 @@
 #define        WLC_PHY_TYPE_LCN        8
 #define        WLC_PHY_TYPE_NULL       0xf
 
-#define BRCMF_EVENTING_MASK_LEN        16
-
 #define TOE_TX_CSUM_OL         0x00000001
 #define TOE_RX_CSUM_OL         0x00000002
 
@@ -632,29 +630,29 @@ struct brcmf_skb_reorder_data {
        u8 *reorder;
 };
 
-extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
+int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
 
 /* Return pointer to interface name */
-extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
+char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
 
 /* Query dongle */
-extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx,
-                                      uint cmd, void *buf, uint len);
-extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
-                                   void *buf, uint len);
+int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+                              void *buf, uint len);
+int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
+                            void *buf, uint len);
 
 /* Remove any protocol-specific data header. */
-extern int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
-                              struct sk_buff *rxp);
+int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
+                       struct sk_buff *rxp);
 
-extern int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
-extern struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx,
-                                    s32 ifidx, char *name, u8 *mac_addr);
-extern void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
+int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
+struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
+                             char *name, u8 *mac_addr);
+void brcmf_del_if(struct brcmf_pub *drvr, s32 bssidx);
 void brcmf_txflowblock_if(struct brcmf_if *ifp,
                          enum brcmf_netif_stop_reason reason, bool state);
-extern u32 brcmf_get_chip_info(struct brcmf_if *ifp);
-extern void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
-                            bool success);
+u32 brcmf_get_chip_info(struct brcmf_if *ifp);
+void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
+                     bool success);
 
 #endif                         /* _BRCMF_H_ */
index 74156f84180ca19b87df7ff244358f0ab830dab3..a6eb09e5d46f408acf9205937ebffd772b15846d 100644 (file)
@@ -132,35 +132,34 @@ struct pktq *brcmf_bus_gettxq(struct brcmf_bus *bus)
  * interface functions from common layer
  */
 
-extern bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
-                        struct sk_buff *pkt, int prec);
+bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt,
+                     int prec);
 
 /* Receive frame for delivery to OS.  Callee disposes of rxp. */
-extern void brcmf_rx_frames(struct device *dev, struct sk_buff_head *rxlist);
+void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp);
 
 /* Indication from bus module regarding presence/insertion of dongle. */
-extern int brcmf_attach(uint bus_hdrlen, struct device *dev);
+int brcmf_attach(uint bus_hdrlen, struct device *dev);
 /* Indication from bus module regarding removal/absence of dongle */
-extern void brcmf_detach(struct device *dev);
+void brcmf_detach(struct device *dev);
 /* Indication from bus module that dongle should be reset */
-extern void brcmf_dev_reset(struct device *dev);
+void brcmf_dev_reset(struct device *dev);
 /* Indication from bus module to change flow-control state */
-extern void brcmf_txflowblock(struct device *dev, bool state);
+void brcmf_txflowblock(struct device *dev, bool state);
 
 /* Notify the bus has transferred the tx packet to firmware */
-extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
-                            bool success);
+void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
 
-extern int brcmf_bus_start(struct device *dev);
+int brcmf_bus_start(struct device *dev);
 
 #ifdef CONFIG_BRCMFMAC_SDIO
-extern void brcmf_sdio_exit(void);
-extern void brcmf_sdio_init(void);
-extern void brcmf_sdio_register(void);
+void brcmf_sdio_exit(void);
+void brcmf_sdio_init(void);
+void brcmf_sdio_register(void);
 #endif
 #ifdef CONFIG_BRCMFMAC_USB
-extern void brcmf_usb_exit(void);
-extern void brcmf_usb_register(void);
+void brcmf_usb_exit(void);
+void brcmf_usb_register(void);
 #endif
 
 #endif                         /* _BRCMF_BUS_H_ */
index 40e7f854e10f9634b44e4475b3c78cab02ea4dc9..64e9cff241b9156745bab110010520bfba730394 100644 (file)
@@ -509,9 +509,8 @@ netif_rx:
        }
 }
 
-void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
+void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
 {
-       struct sk_buff *skb, *pnext;
        struct brcmf_if *ifp;
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
        struct brcmf_pub *drvr = bus_if->drvr;
@@ -519,29 +518,24 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
        u8 ifidx;
        int ret;
 
-       brcmf_dbg(DATA, "Enter: %s: count=%u\n", dev_name(dev),
-                 skb_queue_len(skb_list));
+       brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
 
-       skb_queue_walk_safe(skb_list, skb, pnext) {
-               skb_unlink(skb, skb_list);
-
-               /* process and remove protocol-specific header */
-               ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
-               ifp = drvr->iflist[ifidx];
-
-               if (ret || !ifp || !ifp->ndev) {
-                       if ((ret != -ENODATA) && ifp)
-                               ifp->stats.rx_errors++;
-                       brcmu_pkt_buf_free_skb(skb);
-                       continue;
-               }
+       /* process and remove protocol-specific header */
+       ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
+       ifp = drvr->iflist[ifidx];
 
-               rd = (struct brcmf_skb_reorder_data *)skb->cb;
-               if (rd->reorder)
-                       brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
-               else
-                       brcmf_netif_rx(ifp, skb);
+       if (ret || !ifp || !ifp->ndev) {
+               if ((ret != -ENODATA) && ifp)
+                       ifp->stats.rx_errors++;
+               brcmu_pkt_buf_free_skb(skb);
+               return;
        }
+
+       rd = (struct brcmf_skb_reorder_data *)skb->cb;
+       if (rd->reorder)
+               brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
+       else
+               brcmf_netif_rx(ifp, skb);
 }
 
 void brcmf_txfinalize(struct brcmf_pub *drvr, struct sk_buff *txp,
index ef9179883748f0aa9637900a6e7bab1b476cd937..53c6e710f2cb243c9997e9bb6f453a60b3f7634b 100644 (file)
  */
 
 /* Linkage, sets prot link and updates hdrlen in pub */
-extern int brcmf_proto_attach(struct brcmf_pub *drvr);
+int brcmf_proto_attach(struct brcmf_pub *drvr);
 
 /* Unlink, frees allocated protocol memory (including brcmf_proto) */
-extern void brcmf_proto_detach(struct brcmf_pub *drvr);
+void brcmf_proto_detach(struct brcmf_pub *drvr);
 
 /* Stop protocol: sync w/dongle state. */
-extern void brcmf_proto_stop(struct brcmf_pub *drvr);
+void brcmf_proto_stop(struct brcmf_pub *drvr);
 
 /* Add any protocol-specific data header.
  * Caller must reserve prot_hdrlen prepend space.
  */
-extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
-                               struct sk_buff *txp);
+void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
+                        struct sk_buff *txp);
 
 /* Sets dongle media info (drv_version, mac address). */
-extern int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
 
 #endif                         /* _BRCMF_PROTO_H_ */
index 1aa75d5951b82720f21f0ff486e701252837333b..b02953c4ade721235fbe0e4ed662aaa422ab21f7 100644 (file)
@@ -275,11 +275,6 @@ struct rte_console {
 /* Flags for SDH calls */
 #define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
 
-#define BRCMF_SDIO_FW_NAME     "brcm/brcmfmac-sdio.bin"
-#define BRCMF_SDIO_NV_NAME     "brcm/brcmfmac-sdio.txt"
-MODULE_FIRMWARE(BRCMF_SDIO_FW_NAME);
-MODULE_FIRMWARE(BRCMF_SDIO_NV_NAME);
-
 #define BRCMF_IDLE_IMMEDIATE   (-1)    /* Enter idle immediately */
 #define BRCMF_IDLE_ACTIVE      0       /* Do not request any SD clock change
                                         * when idle
@@ -454,9 +449,6 @@ struct brcmf_sdio {
        struct work_struct datawork;
        atomic_t dpc_tskcnt;
 
-       const struct firmware *firmware;
-       u32 fw_ptr;
-
        bool txoff;             /* Transmit flow-controlled */
        struct brcmf_sdio_count sdcnt;
        bool sr_enabled; /* SaveRestore enabled */
@@ -493,6 +485,100 @@ enum brcmf_sdio_frmtype {
        BRCMF_SDIO_FT_SUB,
 };
 
+#define BCM43143_FIRMWARE_NAME         "brcm/brcmfmac43143-sdio.bin"
+#define BCM43143_NVRAM_NAME            "brcm/brcmfmac43143-sdio.txt"
+#define BCM43241B0_FIRMWARE_NAME       "brcm/brcmfmac43241b0-sdio.bin"
+#define BCM43241B0_NVRAM_NAME          "brcm/brcmfmac43241b0-sdio.txt"
+#define BCM43241B4_FIRMWARE_NAME       "brcm/brcmfmac43241b4-sdio.bin"
+#define BCM43241B4_NVRAM_NAME          "brcm/brcmfmac43241b4-sdio.txt"
+#define BCM4329_FIRMWARE_NAME          "brcm/brcmfmac4329-sdio.bin"
+#define BCM4329_NVRAM_NAME             "brcm/brcmfmac4329-sdio.txt"
+#define BCM4330_FIRMWARE_NAME          "brcm/brcmfmac4330-sdio.bin"
+#define BCM4330_NVRAM_NAME             "brcm/brcmfmac4330-sdio.txt"
+#define BCM4334_FIRMWARE_NAME          "brcm/brcmfmac4334-sdio.bin"
+#define BCM4334_NVRAM_NAME             "brcm/brcmfmac4334-sdio.txt"
+#define BCM4335_FIRMWARE_NAME          "brcm/brcmfmac4335-sdio.bin"
+#define BCM4335_NVRAM_NAME             "brcm/brcmfmac4335-sdio.txt"
+
+MODULE_FIRMWARE(BCM43143_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43143_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43241B0_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43241B0_NVRAM_NAME);
+MODULE_FIRMWARE(BCM43241B4_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM43241B4_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4329_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4329_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4330_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4330_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4334_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4334_NVRAM_NAME);
+MODULE_FIRMWARE(BCM4335_FIRMWARE_NAME);
+MODULE_FIRMWARE(BCM4335_NVRAM_NAME);
+
+struct brcmf_firmware_names {
+       u32 chipid;
+       u32 revmsk;
+       const char *bin;
+       const char *nv;
+};
+
+enum brcmf_firmware_type {
+       BRCMF_FIRMWARE_BIN,
+       BRCMF_FIRMWARE_NVRAM
+};
+
+#define BRCMF_FIRMWARE_NVRAM(name) \
+       name ## _FIRMWARE_NAME, name ## _NVRAM_NAME
+
+static const struct brcmf_firmware_names brcmf_fwname_data[] = {
+       { BCM43143_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43143) },
+       { BCM43241_CHIP_ID, 0x0000001F, BRCMF_FIRMWARE_NVRAM(BCM43241B0) },
+       { BCM43241_CHIP_ID, 0xFFFFFFE0, BRCMF_FIRMWARE_NVRAM(BCM43241B4) },
+       { BCM4329_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4329) },
+       { BCM4330_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4330) },
+       { BCM4334_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4334) },
+       { BCM4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) }
+};
+
+
+static const struct firmware *brcmf_sdbrcm_get_fw(struct brcmf_sdio *bus,
+                                                 enum brcmf_firmware_type type)
+{
+       const struct firmware *fw;
+       const char *name;
+       int err, i;
+
+       for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
+               if (brcmf_fwname_data[i].chipid == bus->ci->chip &&
+                   brcmf_fwname_data[i].revmsk & BIT(bus->ci->chiprev)) {
+                       switch (type) {
+                       case BRCMF_FIRMWARE_BIN:
+                               name = brcmf_fwname_data[i].bin;
+                               break;
+                       case BRCMF_FIRMWARE_NVRAM:
+                               name = brcmf_fwname_data[i].nv;
+                               break;
+                       default:
+                               brcmf_err("invalid firmware type (%d)\n", type);
+                               return NULL;
+                       }
+                       goto found;
+               }
+       }
+       brcmf_err("Unknown chipid %d [%d]\n",
+                 bus->ci->chip, bus->ci->chiprev);
+       return NULL;
+
+found:
+       err = request_firmware(&fw, name, &bus->sdiodev->func[2]->dev);
+       if ((err) || (!fw)) {
+               brcmf_err("fail to request firmware %s (%d)\n", name, err);
+               return NULL;
+       }
+
+       return fw;
+}
+
 static void pkt_align(struct sk_buff *p, int len, int align)
 {
        uint datalign;
@@ -1061,6 +1147,8 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
        u8 rx_seq, fc, tx_seq_max;
        u32 swheader;
 
+       trace_brcmf_sdpcm_hdr(false, header);
+
        /* hw header */
        len = get_unaligned_le16(header);
        checksum = get_unaligned_le16(header + sizeof(u16));
@@ -1183,6 +1271,7 @@ static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
                     SDPCM_DOFFSET_MASK;
        *(((__le32 *)header) + 1) = cpu_to_le32(sw_header);
        *(((__le32 *)header) + 2) = 0;
+       trace_brcmf_sdpcm_hdr(true, header);
 }
 
 static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
@@ -1303,7 +1392,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
                sdio_claim_host(bus->sdiodev->func[1]);
                errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
                                bus->sdiodev->sbwad,
-                               SDIO_FUNC_2, F2SYNC, &bus->glom);
+                               SDIO_FUNC_2, F2SYNC, &bus->glom, dlen);
                sdio_release_host(bus->sdiodev->func[1]);
                bus->sdcnt.f2rxdata++;
 
@@ -1406,13 +1495,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
                                           bus->glom.qlen, pfirst, pfirst->data,
                                           pfirst->len, pfirst->next,
                                           pfirst->prev);
+                       skb_unlink(pfirst, &bus->glom);
+                       brcmf_rx_frame(bus->sdiodev->dev, pfirst);
+                       bus->sdcnt.rxglompkts++;
                }
-               /* sent any remaining packets up */
-               if (bus->glom.qlen)
-                       brcmf_rx_frames(bus->sdiodev->dev, &bus->glom);
 
                bus->sdcnt.rxglomframes++;
-               bus->sdcnt.rxglompkts += bus->glom.qlen;
        }
        return num;
 }
@@ -1557,7 +1645,6 @@ static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
 static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
 {
        struct sk_buff *pkt;            /* Packet for event or data frames */
-       struct sk_buff_head pktlist;    /* needed for bus interface */
        u16 pad;                /* Number of pad bytes to read */
        uint rxleft = 0;        /* Remaining number of frames allowed */
        int ret;                /* Return code from calls */
@@ -1759,9 +1846,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
                        continue;
                }
 
-               skb_queue_head_init(&pktlist);
-               skb_queue_tail(&pktlist, pkt);
-               brcmf_rx_frames(bus->sdiodev->dev, &pktlist);
+               brcmf_rx_frame(bus->sdiodev->dev, pkt);
        }
 
        rxcount = maxframes - rxleft;
@@ -1786,10 +1871,65 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
        return;
 }
 
+/**
+ * struct brcmf_skbuff_cb reserves first two bytes in sk_buff::cb for
+ * bus layer usage.
+ */
 /* flag marking a dummy skb added for DMA alignment requirement */
-#define DUMMY_SKB_FLAG         0x10000
+#define ALIGN_SKB_FLAG         0x8000
 /* bit mask of data length chopped from the previous packet */
-#define DUMMY_SKB_CHOP_LEN_MASK        0xffff
+#define ALIGN_SKB_CHOP_LEN_MASK        0x7fff
+
+static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio_dev *sdiodev,
+                                   struct sk_buff_head *pktq,
+                                   struct sk_buff *pkt, uint chan)
+{
+       struct sk_buff *pkt_pad;
+       u16 tail_pad, tail_chop, sg_align;
+       unsigned int blksize;
+       u8 *dat_buf;
+       int ntail;
+
+       blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize;
+       sg_align = 4;
+       if (sdiodev->pdata && sdiodev->pdata->sd_sgentry_align > 4)
+               sg_align = sdiodev->pdata->sd_sgentry_align;
+       /* sg entry alignment should be a divisor of block size */
+       WARN_ON(blksize % sg_align);
+
+       /* Check tail padding */
+       pkt_pad = NULL;
+       tail_chop = pkt->len % sg_align;
+       tail_pad = sg_align - tail_chop;
+       tail_pad += blksize - (pkt->len + tail_pad) % blksize;
+       if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
+               pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
+               if (pkt_pad == NULL)
+                       return -ENOMEM;
+               memcpy(pkt_pad->data,
+                      pkt->data + pkt->len - tail_chop,
+                      tail_chop);
+               *(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
+               skb_trim(pkt, pkt->len - tail_chop);
+               __skb_queue_after(pktq, pkt, pkt_pad);
+       } else {
+               ntail = pkt->data_len + tail_pad -
+                       (pkt->end - pkt->tail);
+               if (skb_cloned(pkt) || ntail > 0)
+                       if (pskb_expand_head(pkt, 0, ntail, GFP_ATOMIC))
+                               return -ENOMEM;
+               if (skb_linearize(pkt))
+                       return -ENOMEM;
+               dat_buf = (u8 *)(pkt->data);
+               __skb_put(pkt, tail_pad);
+       }
+
+       if (pkt_pad)
+               return pkt->len + tail_chop;
+       else
+               return pkt->len - tail_pad;
+}
+
 /**
  * brcmf_sdio_txpkt_prep - packet preparation for transmit
  * @bus: brcmf_sdio structure pointer
@@ -1806,24 +1946,16 @@ static int
 brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
                      uint chan)
 {
-       u16 head_pad, tail_pad, tail_chop, head_align, sg_align;
-       int ntail;
-       struct sk_buff *pkt_next, *pkt_new;
+       u16 head_pad, head_align;
+       struct sk_buff *pkt_next;
        u8 *dat_buf;
-       unsigned blksize = bus->sdiodev->func[SDIO_FUNC_2]->cur_blksize;
+       int err;
        struct brcmf_sdio_hdrinfo hd_info = {0};
 
        /* SDIO ADMA requires at least 32 bit alignment */
        head_align = 4;
-       sg_align = 4;
-       if (bus->sdiodev->pdata) {
-               head_align = bus->sdiodev->pdata->sd_head_align > 4 ?
-                            bus->sdiodev->pdata->sd_head_align : 4;
-               sg_align = bus->sdiodev->pdata->sd_sgentry_align > 4 ?
-                          bus->sdiodev->pdata->sd_sgentry_align : 4;
-       }
-       /* sg entry alignment should be a divisor of block size */
-       WARN_ON(blksize % sg_align);
+       if (bus->sdiodev->pdata && bus->sdiodev->pdata->sd_head_align > 4)
+               head_align = bus->sdiodev->pdata->sd_head_align;
 
        pkt_next = pktq->next;
        dat_buf = (u8 *)(pkt_next->data);
@@ -1842,40 +1974,20 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
                memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
        }
 
-       /* Check tail padding */
-       pkt_new = NULL;
-       tail_chop = pkt_next->len % sg_align;
-       tail_pad = sg_align - tail_chop;
-       tail_pad += blksize - (pkt_next->len + tail_pad) % blksize;
-       if (skb_tailroom(pkt_next) < tail_pad && pkt_next->len > blksize) {
-               pkt_new = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
-               if (pkt_new == NULL)
-                       return -ENOMEM;
-               memcpy(pkt_new->data,
-                      pkt_next->data + pkt_next->len - tail_chop,
-                      tail_chop);
-               *(u32 *)(pkt_new->cb) = DUMMY_SKB_FLAG + tail_chop;
-               skb_trim(pkt_next, pkt_next->len - tail_chop);
-               __skb_queue_after(pktq, pkt_next, pkt_new);
+       if (bus->sdiodev->sg_support && pktq->qlen > 1) {
+               err = brcmf_sdio_txpkt_prep_sg(bus->sdiodev, pktq,
+                                              pkt_next, chan);
+               if (err < 0)
+                       return err;
+               hd_info.len = (u16)err;
        } else {
-               ntail = pkt_next->data_len + tail_pad -
-                       (pkt_next->end - pkt_next->tail);
-               if (skb_cloned(pkt_next) || ntail > 0)
-                       if (pskb_expand_head(pkt_next, 0, ntail, GFP_ATOMIC))
-                               return -ENOMEM;
-               if (skb_linearize(pkt_next))
-                       return -ENOMEM;
-               dat_buf = (u8 *)(pkt_next->data);
-               __skb_put(pkt_next, tail_pad);
+               hd_info.len = pkt_next->len;
        }
 
-       /* Now prep the header */
-       if (pkt_new)
-               hd_info.len = pkt_next->len + tail_chop;
-       else
-               hd_info.len = pkt_next->len - tail_pad;
        hd_info.channel = chan;
        hd_info.dat_offset = head_pad + bus->tx_hdrlen;
+
+       /* Now fill the header */
        brcmf_sdio_hdpack(bus, dat_buf, &hd_info);
 
        if (BRCMF_BYTES_ON() &&
@@ -1908,8 +2020,8 @@ brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
 
        skb_queue_walk_safe(pktq, pkt_next, tmp) {
                dummy_flags = *(u32 *)(pkt_next->cb);
-               if (dummy_flags & DUMMY_SKB_FLAG) {
-                       chop_len = dummy_flags & DUMMY_SKB_CHOP_LEN_MASK;
+               if (dummy_flags & ALIGN_SKB_FLAG) {
+                       chop_len = dummy_flags & ALIGN_SKB_CHOP_LEN_MASK;
                        if (chop_len) {
                                pkt_prev = pkt_next->prev;
                                memcpy(pkt_prev->data + pkt_prev->len,
@@ -3037,69 +3149,43 @@ static bool brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
        return true;
 }
 
-static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
-{
-       if (bus->firmware->size < bus->fw_ptr + len)
-               len = bus->firmware->size - bus->fw_ptr;
-
-       memcpy(buf, &bus->firmware->data[bus->fw_ptr], len);
-       bus->fw_ptr += len;
-       return len;
-}
-
 static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
 {
+       const struct firmware *fw;
+       int err;
        int offset;
-       uint len;
-       u8 *memblock = NULL, *memptr;
-       int ret;
-       u8 idx;
-
-       brcmf_dbg(INFO, "Enter\n");
-
-       ret = request_firmware(&bus->firmware, BRCMF_SDIO_FW_NAME,
-                              &bus->sdiodev->func[2]->dev);
-       if (ret) {
-               brcmf_err("Fail to request firmware %d\n", ret);
-               return ret;
-       }
-       bus->fw_ptr = 0;
-
-       memptr = memblock = kmalloc(MEMBLOCK + BRCMF_SDALIGN, GFP_ATOMIC);
-       if (memblock == NULL) {
-               ret = -ENOMEM;
-               goto err;
-       }
-       if ((u32)(unsigned long)memblock % BRCMF_SDALIGN)
-               memptr += (BRCMF_SDALIGN -
-                          ((u32)(unsigned long)memblock % BRCMF_SDALIGN));
-
-       offset = bus->ci->rambase;
-
-       /* Download image */
-       len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
-       idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4);
-       if (BRCMF_MAX_CORENUM != idx)
-               memcpy(&bus->ci->rst_vec, memptr, sizeof(bus->ci->rst_vec));
-       while (len) {
-               ret = brcmf_sdio_ramrw(bus->sdiodev, true, offset, memptr, len);
-               if (ret) {
+       int address;
+       int len;
+
+       fw = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_BIN);
+       if (fw == NULL)
+               return -ENOENT;
+
+       if (brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_ARM_CR4) !=
+           BRCMF_MAX_CORENUM)
+               memcpy(&bus->ci->rst_vec, fw->data, sizeof(bus->ci->rst_vec));
+
+       err = 0;
+       offset = 0;
+       address = bus->ci->rambase;
+       while (offset < fw->size) {
+               len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
+                     fw->size - offset;
+               err = brcmf_sdio_ramrw(bus->sdiodev, true, address,
+                                      (u8 *)&fw->data[offset], len);
+               if (err) {
                        brcmf_err("error %d on writing %d membytes at 0x%08x\n",
-                                 ret, MEMBLOCK, offset);
-                       goto err;
+                                 err, len, address);
+                       goto failure;
                }
-
-               offset += MEMBLOCK;
-               len = brcmf_sdbrcm_get_image((char *)memptr, MEMBLOCK, bus);
+               offset += len;
+               address += len;
        }
 
-err:
-       kfree(memblock);
-
-       release_firmware(bus->firmware);
-       bus->fw_ptr = 0;
+failure:
+       release_firmware(fw);
 
-       return ret;
+       return err;
 }
 
 /*
@@ -3111,7 +3197,8 @@ err:
  * by two NULs.
 */
 
-static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
+static int brcmf_process_nvram_vars(struct brcmf_sdio *bus,
+                                   const struct firmware *nv)
 {
        char *varbuf;
        char *dp;
@@ -3120,12 +3207,12 @@ static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
        int ret = 0;
        uint buf_len, n, len;
 
-       len = bus->firmware->size;
+       len = nv->size;
        varbuf = vmalloc(len);
        if (!varbuf)
                return -ENOMEM;
 
-       memcpy(varbuf, bus->firmware->data, len);
+       memcpy(varbuf, nv->data, len);
        dp = varbuf;
 
        findNewline = false;
@@ -3177,18 +3264,16 @@ err:
 
 static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
 {
+       const struct firmware *nv;
        int ret;
 
-       ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
-                              &bus->sdiodev->func[2]->dev);
-       if (ret) {
-               brcmf_err("Fail to request nvram %d\n", ret);
-               return ret;
-       }
+       nv = brcmf_sdbrcm_get_fw(bus, BRCMF_FIRMWARE_NVRAM);
+       if (nv == NULL)
+               return -ENOENT;
 
-       ret = brcmf_process_nvram_vars(bus);
+       ret = brcmf_process_nvram_vars(bus, nv);
 
-       release_firmware(bus->firmware);
+       release_firmware(nv);
 
        return ret;
 }
index e679214b3c98be52662e9f7f09d1e90eb0b62ec7..14bc24dc5baeb1f8272b4da0660d893c4bfc9b48 100644 (file)
@@ -102,7 +102,8 @@ struct brcmf_event;
        BRCMF_ENUM_DEF(DCS_REQUEST, 73) \
        BRCMF_ENUM_DEF(FIFO_CREDIT_MAP, 74) \
        BRCMF_ENUM_DEF(ACTION_FRAME_RX, 75) \
-       BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127)
+       BRCMF_ENUM_DEF(BCMC_CREDIT_SUPPORT, 127) \
+       BRCMF_ENUM_DEF(PSTA_PRIMARY_INTF_IND, 128)
 
 #define BRCMF_ENUM_DEF(id, val) \
        BRCMF_E_##id = (val),
@@ -114,6 +115,8 @@ enum brcmf_fweh_event_code {
 };
 #undef BRCMF_ENUM_DEF
 
+#define BRCMF_EVENTING_MASK_LEN                DIV_ROUND_UP(BRCMF_E_LAST, 8)
+
 /* flags field values in struct brcmf_event_msg */
 #define BRCMF_EVENT_MSG_LINK           0x01
 #define BRCMF_EVENT_MSG_FLUSHTXQ       0x02
index 82f9140f3d35481766b317f4a31204fa7b4da6b7..d0cd0bf95c5af4393719a981d008049cde67756d 100644 (file)
@@ -168,6 +168,7 @@ enum brcmf_fws_skb_state {
 /**
  * struct brcmf_skbuff_cb - control buffer associated with skbuff.
  *
+ * @bus_flags: 2 bytes reserved for bus specific parameters
  * @if_flags: holds interface index and packet related flags.
  * @htod: host to device packet identifier (used in PKTTAG tlv).
  * @state: transmit state of the packet.
@@ -177,6 +178,7 @@ enum brcmf_fws_skb_state {
  * provides 48 bytes of storage so this structure should not exceed that.
  */
 struct brcmf_skbuff_cb {
+       u16 bus_flags;
        u16 if_flags;
        u32 htod;
        enum brcmf_fws_skb_state state;
index ca72177388b92726368a6764f86395db6f736b05..2096a14ef1fba5fcaf156140a64b2f52f4b67757 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/types.h>
 #include <linux/netdevice.h>
 #include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
 #include <linux/ssb/ssb_regs.h>
 #include <linux/bcma/bcma.h>
 
@@ -136,6 +137,8 @@ brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
        u8 idx;
 
        idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+       if (idx == BRCMF_MAX_CORENUM)
+               return false;
 
        regdata = brcmf_sdio_regrl(sdiodev,
                                   CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
@@ -154,6 +157,8 @@ brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
        bool ret;
 
        idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+       if (idx == BRCMF_MAX_CORENUM)
+               return false;
 
        regdata = brcmf_sdio_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
                                   NULL);
@@ -261,6 +266,8 @@ brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
        u32 regdata;
 
        idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+       if (idx == BRCMF_MAX_CORENUM)
+               return;
 
        /* if core is already in reset, just return */
        regdata = brcmf_sdio_regrl(sdiodev,
@@ -304,6 +311,8 @@ brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
        u8 idx;
 
        idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+       if (idx == BRCMF_MAX_CORENUM)
+               return;
 
        /*
         * Must do the disable sequence first to work for
@@ -368,6 +377,8 @@ brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
        u32 regdata;
 
        idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+       if (idx == BRCMF_MAX_CORENUM)
+               return;
 
        /* must disable first to work for arbitrary current core state */
        brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, core_bits);
@@ -444,6 +455,9 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
                                   NULL);
        ci->chip = regdata & CID_ID_MASK;
        ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+       if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
+           ci->chiprev >= 2)
+               ci->chip = BCM4339_CHIP_ID;
        ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
 
        brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
@@ -541,6 +555,20 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
                ci->ramsize = 0xc0000;
                ci->rambase = 0x180000;
                break;
+       case BCM4339_CHIP_ID:
+               ci->c_inf[0].wrapbase = 0x18100000;
+               ci->c_inf[0].cib = 0x2e084411;
+               ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+               ci->c_inf[1].base = 0x18005000;
+               ci->c_inf[1].wrapbase = 0x18105000;
+               ci->c_inf[1].cib = 0x15004211;
+               ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
+               ci->c_inf[2].base = 0x18002000;
+               ci->c_inf[2].wrapbase = 0x18102000;
+               ci->c_inf[2].cib = 0x04084411;
+               ci->ramsize = 0xc0000;
+               ci->rambase = 0x180000;
+               break;
        default:
                brcmf_err("chipid 0x%x is not supported\n", ci->chip);
                return -ENODEV;
index 83c041f1bf4ad154afb0822d734818a7561f9ad6..507c61c991fa06c96b6adb1a0f99acefa5f74068 100644 (file)
 
 #define BRCMF_MAX_CORENUM      6
 
+/* SDIO device ID */
+#define SDIO_DEVICE_ID_BROADCOM_43143          43143
+#define SDIO_DEVICE_ID_BROADCOM_43241          0x4324
+#define SDIO_DEVICE_ID_BROADCOM_4329           0x4329
+#define SDIO_DEVICE_ID_BROADCOM_4330           0x4330
+#define SDIO_DEVICE_ID_BROADCOM_4334           0x4334
+#define SDIO_DEVICE_ID_BROADCOM_4335_4339      0x4335
+
 struct chip_core_info {
        u16 id;
        u16 rev;
@@ -215,17 +223,16 @@ struct sdpcmd_regs {
        u16 PAD[0x80];
 };
 
-extern int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
-                                 struct chip_info **ci_ptr, u32 regs);
-extern void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
-extern void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
-                                             struct chip_info *ci,
-                                             u32 drivestrength);
-extern u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
-extern void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
-                                          struct chip_info *ci);
-extern bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
-                                         struct chip_info *ci, char *nvram_dat,
-                                         uint nvram_sz);
+int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+                          struct chip_info **ci_ptr, u32 regs);
+void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
+void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+                                      struct chip_info *ci, u32 drivestrength);
+u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
+void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
+                                   struct chip_info *ci);
+bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
+                                  struct chip_info *ci, char *nvram_dat,
+                                  uint nvram_sz);
 
 #endif         /* _BRCMFMAC_SDIO_CHIP_H_ */
index 2b5407f002e53bf90b8e320d0b54384b2a200f9b..bfadcb836b6dca4cf6e11279b801d44a2f993360 100644 (file)
@@ -178,21 +178,25 @@ struct brcmf_sdio_dev {
        bool irq_en;                    /* irq enable flags */
        spinlock_t irq_en_lock;
        bool irq_wake;                  /* irq wake enable flags */
+       bool sg_support;
+       uint max_request_size;
+       ushort max_segment_count;
+       uint max_segment_size;
 };
 
 /* Register/deregister interrupt handler. */
-extern int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
-extern int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_intr_unregister(struct brcmf_sdio_dev *sdiodev);
 
 /* sdio device register access interface */
-extern u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-extern u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
-extern void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr,
-                            u8 data, int *ret);
-extern void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr,
-                            u32 data, int *ret);
-extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
-                                  void *data, bool write);
+u8 brcmf_sdio_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+u32 brcmf_sdio_regrl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
+void brcmf_sdio_regwb(struct brcmf_sdio_dev *sdiodev, u32 addr, u8 data,
+                     int *ret);
+void brcmf_sdio_regwl(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
+                     int *ret);
+int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
+                           void *data, bool write);
 
 /* Buffer transfer to/from device (client) core via cmd53.
  *   fn:       function number
@@ -206,22 +210,18 @@ extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
  * Returns 0 or error code.
  * NOTE: Async operation is not currently supported.
  */
-extern int
-brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags, struct sk_buff_head *pktq);
-extern int
-brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags, u8 *buf, uint nbytes);
-
-extern int
-brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags, struct sk_buff *pkt);
-extern int
-brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                     uint flags, u8 *buf, uint nbytes);
-extern int
-brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
-                       uint flags, struct sk_buff_head *pktq);
+int brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                         uint flags, struct sk_buff_head *pktq);
+int brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                         uint flags, u8 *buf, uint nbytes);
+
+int brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                         uint flags, struct sk_buff *pkt);
+int brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                         uint flags, u8 *buf, uint nbytes);
+int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+                           uint flags, struct sk_buff_head *pktq,
+                           uint totlen);
 
 /* Flags bits */
 
@@ -237,46 +237,43 @@ brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
  *   nbytes:   number of bytes to transfer to/from buf
  * Returns 0 or error code.
  */
-extern int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw,
-                              u32 addr, u8 *buf, uint nbytes);
-extern int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write,
-                           u32 address, u8 *data, uint size);
+int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
+                       u8 *buf, uint nbytes);
+int brcmf_sdio_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
+                    u8 *data, uint size);
 
 /* Issue an abort to the specified function */
-extern int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
+int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
 
 /* platform specific/high level functions */
-extern int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
-extern int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdio_remove(struct brcmf_sdio_dev *sdiodev);
 
 /* attach, return handler on success, NULL if failed.
  *  The handler shall be provided by all subsequent calls. No local cache
  *  cfghdl points to the starting address of pci device mapped memory
  */
-extern int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev);
-extern void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdioh_attach(struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdioh_detach(struct brcmf_sdio_dev *sdiodev);
 
 /* read or write one byte using cmd52 */
-extern int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw,
-                                   uint fnc, uint addr, u8 *byte);
+int brcmf_sdioh_request_byte(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
+                            uint addr, u8 *byte);
 
 /* read or write 2/4 bytes using cmd53 */
-extern int
-brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
-                        uint rw, uint fnc, uint addr,
-                        u32 *word, uint nbyte);
+int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev, uint rw, uint fnc,
+                            uint addr, u32 *word, uint nbyte);
 
 /* Watchdog timer interface for pm ops */
-extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
-                                   bool enable);
+void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev, bool enable);
 
-extern void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
-extern void brcmf_sdbrcm_disconnect(void *ptr);
-extern void brcmf_sdbrcm_isr(void *arg);
+void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
+void brcmf_sdbrcm_disconnect(void *ptr);
+void brcmf_sdbrcm_isr(void *arg);
 
-extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
+void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
 
-extern void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
-                                wait_queue_head_t *wq);
-extern bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
+void brcmf_pm_resume_wait(struct brcmf_sdio_dev *sdiodev,
+                         wait_queue_head_t *wq);
+bool brcmf_pm_resume_error(struct brcmf_sdio_dev *sdiodev);
 #endif                         /* _BRCM_SDH_H_ */
index bc29171128991a9836d8237c556be00482f66d00..3c67529b90743123691d08aa7ed4d3fdb4a45c6d 100644 (file)
@@ -78,13 +78,15 @@ TRACE_EVENT(brcmf_hexdump,
        TP_ARGS(data, len),
        TP_STRUCT__entry(
                __field(unsigned long, len)
+               __field(unsigned long, addr)
                __dynamic_array(u8, hdata, len)
        ),
        TP_fast_assign(
                __entry->len = len;
+               __entry->addr = (unsigned long)data;
                memcpy(__get_dynamic_array(hdata), data, len);
        ),
-       TP_printk("hexdump [length=%lu]", __entry->len)
+       TP_printk("hexdump [addr=%lx, length=%lu]", __entry->addr, __entry->len)
 );
 
 TRACE_EVENT(brcmf_bdchdr,
@@ -108,6 +110,23 @@ TRACE_EVENT(brcmf_bdchdr,
        TP_printk("bdc: prio=%d siglen=%d", __entry->prio, __entry->siglen)
 );
 
+TRACE_EVENT(brcmf_sdpcm_hdr,
+       TP_PROTO(bool tx, void *data),
+       TP_ARGS(tx, data),
+       TP_STRUCT__entry(
+               __field(u8, tx)
+               __field(u16, len)
+               __array(u8, hdr, 12)
+       ),
+       TP_fast_assign(
+               memcpy(__entry->hdr, data, 12);
+               __entry->len = __entry->hdr[0] | (__entry->hdr[1] << 8);
+               __entry->tx = tx ? 1 : 0;
+       ),
+       TP_printk("sdpcm: %s len %u, seq %d", __entry->tx ? "TX" : "RX",
+                 __entry->len, __entry->hdr[4])
+);
+
 #ifdef CONFIG_BRCM_TRACING
 
 #undef TRACE_INCLUDE_PATH
index f4aea47e0730996ec059f02dbc1f2edaf98c04d1..422f44c631756b2332dc10d4c1fb24c8172343ff 100644 (file)
@@ -435,7 +435,6 @@ static void brcmf_usb_rx_complete(struct urb *urb)
        struct brcmf_usbreq  *req = (struct brcmf_usbreq *)urb->context;
        struct brcmf_usbdev_info *devinfo = req->devinfo;
        struct sk_buff *skb;
-       struct sk_buff_head skbq;
 
        brcmf_dbg(USB, "Enter, urb->status=%d\n", urb->status);
        brcmf_usb_del_fromq(devinfo, req);
@@ -450,10 +449,8 @@ static void brcmf_usb_rx_complete(struct urb *urb)
        }
 
        if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
-               skb_queue_head_init(&skbq);
-               skb_queue_tail(&skbq, skb);
                skb_put(skb, urb->actual_length);
-               brcmf_rx_frames(devinfo->dev, &skbq);
+               brcmf_rx_frame(devinfo->dev, skb);
                brcmf_usb_rx_refill(devinfo, req);
        } else {
                brcmu_pkt_buf_free_skb(skb);
index a8a267b5b87aebfd6bd40006c628c7964afc9cca..2d08c155c23bcd93afba34a0f28d17e2e68e46c5 100644 (file)
@@ -172,19 +172,19 @@ struct si_info {
 
 
 /* AMBA Interconnect exported externs */
-extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
+u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
 
 /* === exported functions === */
-extern struct si_pub *ai_attach(struct bcma_bus *pbus);
-extern void ai_detach(struct si_pub *sih);
-extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
-extern void ai_clkctl_init(struct si_pub *sih);
-extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
-extern bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
-extern bool ai_deviceremoved(struct si_pub *sih);
+struct si_pub *ai_attach(struct bcma_bus *pbus);
+void ai_detach(struct si_pub *sih);
+uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
+void ai_clkctl_init(struct si_pub *sih);
+u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
+bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
+bool ai_deviceremoved(struct si_pub *sih);
 
 /* Enable Ex-PA for 4313 */
-extern void ai_epa_4313war(struct si_pub *sih);
+void ai_epa_4313war(struct si_pub *sih);
 
 static inline u32 ai_get_cccaps(struct si_pub *sih)
 {
index 73d01e5861090d99d875c886ae8783c9dff5561c..03bdcf29bd50ee228d269b4ae4359159ffa1df9d 100644 (file)
@@ -37,17 +37,17 @@ struct brcms_ampdu_session {
        u16 dma_len;
 };
 
-extern void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
-                                       struct brcms_c_info *wlc);
-extern int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
-                                  struct sk_buff *p);
-extern void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session);
+void brcms_c_ampdu_reset_session(struct brcms_ampdu_session *session,
+                                struct brcms_c_info *wlc);
+int brcms_c_ampdu_add_frame(struct brcms_ampdu_session *session,
+                           struct sk_buff *p);
+void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session);
 
-extern struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_detach(struct ampdu_info *ampdu);
-extern void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
-                                struct sk_buff *p, struct tx_status *txs);
-extern void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu);
+struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc);
+void brcms_c_ampdu_detach(struct ampdu_info *ampdu);
+void brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
+                             struct sk_buff *p, struct tx_status *txs);
+void brcms_c_ampdu_macaddr_upd(struct brcms_c_info *wlc);
+void brcms_c_ampdu_shm_upd(struct ampdu_info *ampdu);
 
 #endif                         /* _BRCM_AMPDU_H_ */
index 97ea3881a8ec7c969ee40839b65878650def99e6..a3d487ab19646bcc2dc2be25accc559052bc7afc 100644 (file)
 #ifndef _BRCM_ANTSEL_H_
 #define _BRCM_ANTSEL_H_
 
-extern struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc);
-extern void brcms_c_antsel_detach(struct antsel_info *asi);
-extern void brcms_c_antsel_init(struct antsel_info *asi);
-extern void brcms_c_antsel_antcfg_get(struct antsel_info *asi, bool usedef,
-                                 bool sel,
-                                 u8 id, u8 fbid, u8 *antcfg,
-                                 u8 *fbantcfg);
-extern u8 brcms_c_antsel_antsel2id(struct antsel_info *asi, u16 antsel);
+struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc);
+void brcms_c_antsel_detach(struct antsel_info *asi);
+void brcms_c_antsel_init(struct antsel_info *asi);
+void brcms_c_antsel_antcfg_get(struct antsel_info *asi, bool usedef, bool sel,
+                              u8 id, u8 fbid, u8 *antcfg, u8 *fbantcfg);
+u8 brcms_c_antsel_antsel2id(struct antsel_info *asi, u16 antsel);
 
 #endif /* _BRCM_ANTSEL_H_ */
index 006483a0abe6452d022600666b5ddf9ad5d28248..39dd3a5b2979a572613c5335473cd121295b4649 100644 (file)
 
 #define BRCMS_DFS_EU (BRCMS_DFS_TPC | BRCMS_RADAR_TYPE_EU) /* Flag for DFS EU */
 
-extern struct brcms_cm_info *
-brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
+struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
 
-extern void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
+void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
 
-extern bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm,
-                                     u16 chspec);
+bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec);
 
-extern void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm,
-                                  u16 chanspec,
-                                  struct txpwr_limits *txpwr);
-extern void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm,
-                                    u16 chanspec,
-                                    u8 local_constraint_qdbm);
-extern void brcms_c_regd_init(struct brcms_c_info *wlc);
+void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
+                               struct txpwr_limits *txpwr);
+void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
+                                 u8 local_constraint_qdbm);
+void brcms_c_regd_init(struct brcms_c_info *wlc);
 
 #endif                         /* _WLC_CHANNEL_H */
index 4090032e81a29f5d4e9aaf0b9258ae8d8c80bfcc..198053dfc3102ccb17a12b051372d84ff720a32e 100644 (file)
@@ -88,26 +88,26 @@ struct brcms_info {
 };
 
 /* misc callbacks */
-extern void brcms_init(struct brcms_info *wl);
-extern uint brcms_reset(struct brcms_info *wl);
-extern void brcms_intrson(struct brcms_info *wl);
-extern u32 brcms_intrsoff(struct brcms_info *wl);
-extern void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask);
-extern int brcms_up(struct brcms_info *wl);
-extern void brcms_down(struct brcms_info *wl);
-extern void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
-                               bool state, int prio);
-extern bool brcms_rfkill_set_hw_state(struct brcms_info *wl);
+void brcms_init(struct brcms_info *wl);
+uint brcms_reset(struct brcms_info *wl);
+void brcms_intrson(struct brcms_info *wl);
+u32 brcms_intrsoff(struct brcms_info *wl);
+void brcms_intrsrestore(struct brcms_info *wl, u32 macintmask);
+int brcms_up(struct brcms_info *wl);
+void brcms_down(struct brcms_info *wl);
+void brcms_txflowcontrol(struct brcms_info *wl, struct brcms_if *wlif,
+                        bool state, int prio);
+bool brcms_rfkill_set_hw_state(struct brcms_info *wl);
 
 /* timer functions */
-extern struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
-                                     void (*fn) (void *arg), void *arg,
-                                     const char *name);
-extern void brcms_free_timer(struct brcms_timer *timer);
-extern void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
-extern bool brcms_del_timer(struct brcms_timer *timer);
-extern void brcms_dpc(unsigned long data);
-extern void brcms_timer(struct brcms_timer *t);
-extern void brcms_fatal_error(struct brcms_info *wl);
+struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
+                                    void (*fn) (void *arg), void *arg,
+                                    const char *name);
+void brcms_free_timer(struct brcms_timer *timer);
+void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
+bool brcms_del_timer(struct brcms_timer *timer);
+void brcms_dpc(unsigned long data);
+void brcms_timer(struct brcms_timer *t);
+void brcms_fatal_error(struct brcms_info *wl);
 
 #endif                         /* _BRCM_MAC80211_IF_H_ */
index 4608e0eb14939d12a73a4af5d3c2408e27bc877c..8138f1cff4e56a3bd801dc48f1917963012653ca 100644 (file)
@@ -1906,14 +1906,14 @@ static void brcms_c_get_macaddr(struct brcms_hardware *wlc_hw, u8 etheraddr[ETH_
 
        /* If macaddr exists, use it (Sromrev4, CIS, ...). */
        if (!is_zero_ether_addr(sprom->il0mac)) {
-               memcpy(etheraddr, sprom->il0mac, 6);
+               memcpy(etheraddr, sprom->il0mac, ETH_ALEN);
                return;
        }
 
        if (wlc_hw->_nbands > 1)
-               memcpy(etheraddr, sprom->et1mac, 6);
+               memcpy(etheraddr, sprom->et1mac, ETH_ALEN);
        else
-               memcpy(etheraddr, sprom->il0mac, 6);
+               memcpy(etheraddr, sprom->il0mac, ETH_ALEN);
 }
 
 /* power both the pll and external oscillator on/off */
@@ -5695,7 +5695,7 @@ static bool brcms_c_chipmatch_pci(struct bcma_device *core)
                return true;
        if ((device == BCM43224_D11N_ID) || (device == BCM43225_D11N2G_ID))
                return true;
-       if (device == BCM4313_D11N2G_ID)
+       if (device == BCM4313_D11N2G_ID || device == BCM4313_CHIP_ID)
                return true;
        if ((device == BCM43236_D11N_ID) || (device == BCM43236_D11N2G_ID))
                return true;
index b5d7a38b53fe3d8baf6f8cbc28ba8cef0aaa2b8a..c4d135cff04ad2f7883c783fb96244bbbabfa370 100644 (file)
@@ -616,66 +616,54 @@ struct brcms_bss_cfg {
        struct brcms_bss_info *current_bss;
 };
 
-extern int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo,
-                          struct sk_buff *p);
-extern int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
-                  uint *blocks);
-
-extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
-extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
-extern u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec,
-                               uint mac_len);
-extern u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc,
-                                            u32 rspec,
-                                            bool use_rspec, u16 mimo_ctlchbw);
-extern u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
-                                     u32 rts_rate,
-                                     u32 frame_rate,
-                                     u8 rts_preamble_type,
-                                     u8 frame_preamble_type, uint frame_len,
-                                     bool ba);
-extern void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
-                              struct ieee80211_sta *sta,
-                              void (*dma_callback_fn));
-extern void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend);
-extern int brcms_c_set_nmode(struct brcms_c_info *wlc);
-extern void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc,
-                                         u32 bcn_rate);
-extern void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw,
-                                    u8 antsel_type);
-extern void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw,
-                                 u16 chanspec,
-                                 bool mute, struct txpwr_limits *txpwr);
-extern void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset,
-                             u16 v);
-extern u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset);
-extern void brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask,
-                       u16 val, int bands);
-extern void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags);
-extern void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val);
-extern void brcms_b_phy_reset(struct brcms_hardware *wlc_hw);
-extern void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw);
-extern void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw);
-extern void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
-                                       u32 override_bit);
-extern void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
-                                         u32 override_bit);
-extern void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw,
-                                      int offset, int len, void *buf);
-extern u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate);
-extern void brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw,
-                                  uint offset, const void *buf, int len,
-                                  u32 sel);
-extern void brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset,
-                                    void *buf, int len, u32 sel);
-extern void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode);
-extern u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw);
-extern void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk);
-extern void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk);
-extern void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on);
-extern void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant);
-extern void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw,
-                                   u8 stf_mode);
-extern void brcms_c_init_scb(struct scb *scb);
+int brcms_c_txfifo(struct brcms_c_info *wlc, uint fifo, struct sk_buff *p);
+int brcms_b_xmtfifo_sz_get(struct brcms_hardware *wlc_hw, uint fifo,
+                          uint *blocks);
+
+int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
+void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
+u16 brcms_c_calc_lsig_len(struct brcms_c_info *wlc, u32 ratespec, uint mac_len);
+u32 brcms_c_rspec_to_rts_rspec(struct brcms_c_info *wlc, u32 rspec,
+                              bool use_rspec, u16 mimo_ctlchbw);
+u16 brcms_c_compute_rtscts_dur(struct brcms_c_info *wlc, bool cts_only,
+                              u32 rts_rate, u32 frame_rate,
+                              u8 rts_preamble_type, u8 frame_preamble_type,
+                              uint frame_len, bool ba);
+void brcms_c_inval_dma_pkts(struct brcms_hardware *hw,
+                           struct ieee80211_sta *sta, void (*dma_callback_fn));
+void brcms_c_update_probe_resp(struct brcms_c_info *wlc, bool suspend);
+int brcms_c_set_nmode(struct brcms_c_info *wlc);
+void brcms_c_beacon_phytxctl_txant_upd(struct brcms_c_info *wlc, u32 bcn_rate);
+void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw, u8 antsel_type);
+void brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
+                         bool mute, struct txpwr_limits *txpwr);
+void brcms_b_write_shm(struct brcms_hardware *wlc_hw, uint offset, u16 v);
+u16 brcms_b_read_shm(struct brcms_hardware *wlc_hw, uint offset);
+void brcms_b_mhf(struct brcms_hardware *wlc_hw, u8 idx, u16 mask, u16 val,
+                int bands);
+void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags);
+void brcms_b_mctrl(struct brcms_hardware *wlc_hw, u32 mask, u32 val);
+void brcms_b_phy_reset(struct brcms_hardware *wlc_hw);
+void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw);
+void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw);
+void brcms_c_ucode_wake_override_set(struct brcms_hardware *wlc_hw,
+                                    u32 override_bit);
+void brcms_c_ucode_wake_override_clear(struct brcms_hardware *wlc_hw,
+                                      u32 override_bit);
+void brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset,
+                               int len, void *buf);
+u16 brcms_b_rate_shm_offset(struct brcms_hardware *wlc_hw, u8 rate);
+void brcms_b_copyto_objmem(struct brcms_hardware *wlc_hw, uint offset,
+                          const void *buf, int len, u32 sel);
+void brcms_b_copyfrom_objmem(struct brcms_hardware *wlc_hw, uint offset,
+                            void *buf, int len, u32 sel);
+void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode);
+u16 brcms_b_get_txant(struct brcms_hardware *wlc_hw);
+void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk);
+void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk);
+void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on);
+void brcms_b_txant_set(struct brcms_hardware *wlc_hw, u16 phytxant);
+void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode);
+void brcms_c_init_scb(struct scb *scb);
 
 #endif                         /* _BRCM_MAIN_H_ */
index e34a71e7d24204a0cc6dd0a3cb12fd043ec2636a..4d3734f48d9c7a444ef7aab2383655b471a0a5f2 100644 (file)
@@ -179,121 +179,106 @@ struct shared_phy_params {
 };
 
 
-extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
-extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
-                                           struct bcma_device *d11core,
-                                           int bandtype, struct wiphy *wiphy);
-extern void wlc_phy_detach(struct brcms_phy_pub *ppi);
-
-extern bool wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype,
-                                  u16 *phyrev, u16 *radioid,
-                                  u16 *radiover);
-extern bool wlc_phy_get_encore(struct brcms_phy_pub *pih);
-extern u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *ppi, bool newstate);
-extern void wlc_phy_hw_state_upd(struct brcms_phy_pub *ppi, bool newstate);
-extern void wlc_phy_init(struct brcms_phy_pub *ppi, u16 chanspec);
-extern void wlc_phy_watchdog(struct brcms_phy_pub *ppi);
-extern int wlc_phy_down(struct brcms_phy_pub *ppi);
-extern u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih);
-extern void wlc_phy_cal_init(struct brcms_phy_pub *ppi);
-extern void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init);
-
-extern void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi,
-                                u16 chanspec);
-extern u16 wlc_phy_chanspec_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi,
-                                      u16 newch);
-extern u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw);
-
-extern int wlc_phy_rssi_compute(struct brcms_phy_pub *pih,
-                               struct d11rxhdr *rxh);
-extern void wlc_phy_por_inform(struct brcms_phy_pub *ppi);
-extern void wlc_phy_noise_sample_intr(struct brcms_phy_pub *ppi);
-extern bool wlc_phy_bist_check_phy(struct brcms_phy_pub *ppi);
-
-extern void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag);
-
-extern void wlc_phy_switch_radio(struct brcms_phy_pub *ppi, bool on);
-extern void wlc_phy_anacore(struct brcms_phy_pub *ppi, bool on);
-
-
-extern void wlc_phy_BSSinit(struct brcms_phy_pub *ppi, bool bonlyap, int rssi);
-
-extern void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi,
-                                                bool wide_filter);
-extern void wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
-                                         struct brcms_chanvec *channels);
-extern u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi,
-                                        uint band);
-
-extern void wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint chan,
-                                     u8 *_min_, u8 *_max_, int rate);
-extern void wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi,
-                                             uint chan, u8 *_max_, u8 *_min_);
-extern void wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi,
-                                           uint band, s32 *, s32 *, u32 *);
-extern void wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi,
-                                     struct txpwr_limits *,
-                                     u16 chanspec);
-extern int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm,
-                              bool *override);
-extern int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm,
-                              bool override);
-extern void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
-                                      struct txpwr_limits *);
-extern bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi);
-extern void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi,
-                                       bool hwpwrctrl);
-extern u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi);
-extern u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi);
-extern bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain,
-                                  u8 rxchain);
-extern void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain,
-                                 u8 rxchain);
-extern void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain,
-                                 u8 *rxchain);
-extern u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih);
-extern s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih,
-                                u16 chanspec);
-extern void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool val);
-
-extern void wlc_phy_cal_perical(struct brcms_phy_pub *ppi, u8 reason);
-extern void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *ppi);
-extern void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock);
-extern void wlc_phy_cal_papd_recal(struct brcms_phy_pub *ppi);
-
-extern void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val);
-extern void wlc_phy_clear_tssi(struct brcms_phy_pub *ppi);
-extern void wlc_phy_hold_upd(struct brcms_phy_pub *ppi, u32 id, bool val);
-extern void wlc_phy_mute_upd(struct brcms_phy_pub *ppi, bool val, u32 flags);
-
-extern void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type);
-
-extern void wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi,
-                                       struct tx_power *power, uint channel);
-
-extern void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal);
-extern bool wlc_phy_test_ison(struct brcms_phy_pub *ppi);
-extern void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi,
-                                     u8 txpwr_percent);
-extern void wlc_phy_ofdm_rateset_war(struct brcms_phy_pub *pih, bool war);
-extern void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih,
-                                     bool bf_preempt);
-extern void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap);
-
-extern void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end);
-
-extern void wlc_phy_freqtrack_start(struct brcms_phy_pub *ppi);
-extern void wlc_phy_freqtrack_end(struct brcms_phy_pub *ppi);
-
-extern const u8 *wlc_phy_get_ofdm_rate_lookup(void);
-
-extern s8 wlc_phy_get_tx_power_offset_by_mcs(struct brcms_phy_pub *ppi,
-                                            u8 mcs_offset);
-extern s8 wlc_phy_get_tx_power_offset(struct brcms_phy_pub *ppi, u8 tbl_offset);
+struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
+struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
+                                    struct bcma_device *d11core, int bandtype,
+                                    struct wiphy *wiphy);
+void wlc_phy_detach(struct brcms_phy_pub *ppi);
+
+bool wlc_phy_get_phyversion(struct brcms_phy_pub *pih, u16 *phytype,
+                           u16 *phyrev, u16 *radioid, u16 *radiover);
+bool wlc_phy_get_encore(struct brcms_phy_pub *pih);
+u32 wlc_phy_get_coreflags(struct brcms_phy_pub *pih);
+
+void wlc_phy_hw_clk_state_upd(struct brcms_phy_pub *ppi, bool newstate);
+void wlc_phy_hw_state_upd(struct brcms_phy_pub *ppi, bool newstate);
+void wlc_phy_init(struct brcms_phy_pub *ppi, u16 chanspec);
+void wlc_phy_watchdog(struct brcms_phy_pub *ppi);
+int wlc_phy_down(struct brcms_phy_pub *ppi);
+u32 wlc_phy_clk_bwbits(struct brcms_phy_pub *pih);
+void wlc_phy_cal_init(struct brcms_phy_pub *ppi);
+void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init);
+
+void wlc_phy_chanspec_set(struct brcms_phy_pub *ppi, u16 chanspec);
+u16 wlc_phy_chanspec_get(struct brcms_phy_pub *ppi);
+void wlc_phy_chanspec_radio_set(struct brcms_phy_pub *ppi, u16 newch);
+u16 wlc_phy_bw_state_get(struct brcms_phy_pub *ppi);
+void wlc_phy_bw_state_set(struct brcms_phy_pub *ppi, u16 bw);
+
+int wlc_phy_rssi_compute(struct brcms_phy_pub *pih, struct d11rxhdr *rxh);
+void wlc_phy_por_inform(struct brcms_phy_pub *ppi);
+void wlc_phy_noise_sample_intr(struct brcms_phy_pub *ppi);
+bool wlc_phy_bist_check_phy(struct brcms_phy_pub *ppi);
+
+void wlc_phy_set_deaf(struct brcms_phy_pub *ppi, bool user_flag);
+
+void wlc_phy_switch_radio(struct brcms_phy_pub *ppi, bool on);
+void wlc_phy_anacore(struct brcms_phy_pub *ppi, bool on);
+
+
+void wlc_phy_BSSinit(struct brcms_phy_pub *ppi, bool bonlyap, int rssi);
+
+void wlc_phy_chanspec_ch14_widefilter_set(struct brcms_phy_pub *ppi,
+                                         bool wide_filter);
+void wlc_phy_chanspec_band_validch(struct brcms_phy_pub *ppi, uint band,
+                                  struct brcms_chanvec *channels);
+u16 wlc_phy_chanspec_band_firstch(struct brcms_phy_pub *ppi, uint band);
+
+void wlc_phy_txpower_sromlimit(struct brcms_phy_pub *ppi, uint chan, u8 *_min_,
+                              u8 *_max_, int rate);
+void wlc_phy_txpower_sromlimit_max_get(struct brcms_phy_pub *ppi, uint chan,
+                                      u8 *_max_, u8 *_min_);
+void wlc_phy_txpower_boardlimit_band(struct brcms_phy_pub *ppi, uint band,
+                                    s32 *, s32 *, u32 *);
+void wlc_phy_txpower_limit_set(struct brcms_phy_pub *ppi, struct txpwr_limits *,
+                              u16 chanspec);
+int wlc_phy_txpower_get(struct brcms_phy_pub *ppi, uint *qdbm, bool *override);
+int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override);
+void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
+                               struct txpwr_limits *);
+bool wlc_phy_txpower_hw_ctrl_get(struct brcms_phy_pub *ppi);
+void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl);
+u8 wlc_phy_txpower_get_target_min(struct brcms_phy_pub *ppi);
+u8 wlc_phy_txpower_get_target_max(struct brcms_phy_pub *ppi);
+bool wlc_phy_txpower_ipa_ison(struct brcms_phy_pub *pih);
+
+void wlc_phy_stf_chain_init(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain);
+void wlc_phy_stf_chain_set(struct brcms_phy_pub *pih, u8 txchain, u8 rxchain);
+void wlc_phy_stf_chain_get(struct brcms_phy_pub *pih, u8 *txchain, u8 *rxchain);
+u8 wlc_phy_stf_chain_active_get(struct brcms_phy_pub *pih);
+s8 wlc_phy_stf_ssmode_get(struct brcms_phy_pub *pih, u16 chanspec);
+void wlc_phy_ldpc_override_set(struct brcms_phy_pub *ppi, bool val);
+
+void wlc_phy_cal_perical(struct brcms_phy_pub *ppi, u8 reason);
+void wlc_phy_noise_sample_request_external(struct brcms_phy_pub *ppi);
+void wlc_phy_edcrs_lock(struct brcms_phy_pub *pih, bool lock);
+void wlc_phy_cal_papd_recal(struct brcms_phy_pub *ppi);
+
+void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val);
+void wlc_phy_clear_tssi(struct brcms_phy_pub *ppi);
+void wlc_phy_hold_upd(struct brcms_phy_pub *ppi, u32 id, bool val);
+void wlc_phy_mute_upd(struct brcms_phy_pub *ppi, bool val, u32 flags);
+
+void wlc_phy_antsel_type_set(struct brcms_phy_pub *ppi, u8 antsel_type);
+
+void wlc_phy_txpower_get_current(struct brcms_phy_pub *ppi,
+                                struct tx_power *power, uint channel);
+
+void wlc_phy_initcal_enable(struct brcms_phy_pub *pih, bool initcal);
+bool wlc_phy_test_ison(struct brcms_phy_pub *ppi);
+void wlc_phy_txpwr_percent_set(struct brcms_phy_pub *ppi, u8 txpwr_percent);
+void wlc_phy_ofdm_rateset_war(struct brcms_phy_pub *pih, bool war);
+void wlc_phy_bf_preempt_enable(struct brcms_phy_pub *pih, bool bf_preempt);
+void wlc_phy_machwcap_set(struct brcms_phy_pub *ppi, u32 machwcap);
+
+void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end);
+
+void wlc_phy_freqtrack_start(struct brcms_phy_pub *ppi);
+void wlc_phy_freqtrack_end(struct brcms_phy_pub *ppi);
+
+const u8 *wlc_phy_get_ofdm_rate_lookup(void);
+
+s8 wlc_phy_get_tx_power_offset_by_mcs(struct brcms_phy_pub *ppi,
+                                     u8 mcs_offset);
+s8 wlc_phy_get_tx_power_offset(struct brcms_phy_pub *ppi, u8 tbl_offset);
 #endif                          /* _BRCM_PHY_HAL_H_ */
index 1dc767c31653b29a9458cc08b9d113ab1c6979ad..4960f7d2680430313d42e3e7e64787a21c0f8dde 100644 (file)
@@ -910,113 +910,103 @@ struct lcnphy_radio_regs {
        u8 do_init_g;
 };
 
-extern u16 read_phy_reg(struct brcms_phy *pi, u16 addr);
-extern void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
-
-extern u16 read_radio_reg(struct brcms_phy *pi, u16 addr);
-extern void or_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void and_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-extern void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask,
-                         u16 val);
-extern void xor_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask);
-
-extern void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
-
-extern void wlc_phyreg_enter(struct brcms_phy_pub *pih);
-extern void wlc_phyreg_exit(struct brcms_phy_pub *pih);
-extern void wlc_radioreg_enter(struct brcms_phy_pub *pih);
-extern void wlc_radioreg_exit(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_read_table(struct brcms_phy *pi,
-                              const struct phytbl_info *ptbl_info,
-                              u16 tblAddr, u16 tblDataHi,
-                              u16 tblDatalo);
-extern void wlc_phy_write_table(struct brcms_phy *pi,
-                               const struct phytbl_info *ptbl_info,
-                               u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
-extern void wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id,
-                              uint tbl_offset, u16 tblAddr, u16 tblDataHi,
-                              u16 tblDataLo);
-extern void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val);
-
-extern void write_phy_channel_reg(struct brcms_phy *pi, uint val);
-extern void wlc_phy_txpower_update_shm(struct brcms_phy *pi);
-
-extern u8 wlc_phy_nbits(s32 value);
-extern void wlc_phy_compute_dB(u32 *cmplx_pwr, s8 *p_dB, u8 core);
-
-extern uint wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi,
-                                            struct radio_20xx_regs *radioregs);
-extern uint wlc_phy_init_radio_regs(struct brcms_phy *pi,
-                                   const struct radio_regs *radioregs,
-                                   u16 core_offset);
-
-extern void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi);
-
-extern void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on);
-extern void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real,
-                                       s32 *eps_imag);
-
-extern void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi);
-extern void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi);
-
-extern bool wlc_phy_attach_nphy(struct brcms_phy *pi);
-extern bool wlc_phy_attach_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_detach_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_init_nphy(struct brcms_phy *pi);
-extern void wlc_phy_init_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_cal_init_nphy(struct brcms_phy *pi);
-extern void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi,
-                                     u16 chanspec);
-extern void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi,
-                                       u16 chanspec);
-extern void wlc_phy_chanspec_set_fixup_lcnphy(struct brcms_phy *pi,
-                                             u16 chanspec);
-extern int wlc_phy_channel2freq(uint channel);
-extern int wlc_phy_chanspec_freq2bandrange_lpssn(uint);
-extern int wlc_phy_chanspec_bandrange_get(struct brcms_phy *, u16 chanspec);
-
-extern void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode);
-extern s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi);
-
-extern void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_txpower_recalc_target(struct brcms_phy *pi);
-extern void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi);
-
-extern void wlc_lcnphy_set_tx_pwr_by_index(struct brcms_phy *pi, int index);
-extern void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable);
-extern void wlc_lcnphy_stop_tx_tone(struct brcms_phy *pi);
-extern void wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz,
-                                    u16 max_val, bool iqcalmode);
-
-extern void wlc_phy_txpower_sromlimit_get_nphy(struct brcms_phy *pi, uint chan,
-                                              u8 *max_pwr, u8 rate_id);
-extern void wlc_phy_ofdm_to_mcs_powers_nphy(u8 *power, u8 rate_mcs_start,
-                                           u8 rate_mcs_end,
-                                           u8 rate_ofdm_start);
-extern void wlc_phy_mcs_to_ofdm_powers_nphy(u8 *power,
-                                           u8 rate_ofdm_start,
-                                           u8 rate_ofdm_end,
-                                           u8 rate_mcs_start);
-
-extern u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode);
-extern s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode);
-extern s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode);
-extern s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode);
-extern void wlc_phy_carrier_suppress_lcnphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel);
-extern void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode);
-extern void wlc_2064_vco_cal(struct brcms_phy *pi);
-
-extern void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
+u16 read_phy_reg(struct brcms_phy *pi, u16 addr);
+void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
+
+u16 read_radio_reg(struct brcms_phy *pi, u16 addr);
+void or_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void and_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val);
+void xor_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask);
+
+void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val);
+
+void wlc_phyreg_enter(struct brcms_phy_pub *pih);
+void wlc_phyreg_exit(struct brcms_phy_pub *pih);
+void wlc_radioreg_enter(struct brcms_phy_pub *pih);
+void wlc_radioreg_exit(struct brcms_phy_pub *pih);
+
+void wlc_phy_read_table(struct brcms_phy *pi,
+                       const struct phytbl_info *ptbl_info,
+                       u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
+void wlc_phy_write_table(struct brcms_phy *pi,
+                        const struct phytbl_info *ptbl_info,
+                        u16 tblAddr, u16 tblDataHi, u16 tblDatalo);
+void wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id, uint tbl_offset,
+                       u16 tblAddr, u16 tblDataHi, u16 tblDataLo);
+void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val);
+
+void write_phy_channel_reg(struct brcms_phy *pi, uint val);
+void wlc_phy_txpower_update_shm(struct brcms_phy *pi);
+
+u8 wlc_phy_nbits(s32 value);
+void wlc_phy_compute_dB(u32 *cmplx_pwr, s8 *p_dB, u8 core);
+
+uint wlc_phy_init_radio_regs_allbands(struct brcms_phy *pi,
+                                     struct radio_20xx_regs *radioregs);
+uint wlc_phy_init_radio_regs(struct brcms_phy *pi,
+                            const struct radio_regs *radioregs,
+                            u16 core_offset);
+
+void wlc_phy_txpower_ipa_upd(struct brcms_phy *pi);
+
+void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on);
+void wlc_phy_papd_decode_epsilon(u32 epsilon, s32 *eps_real, s32 *eps_imag);
+
+void wlc_phy_cal_perical_mphase_reset(struct brcms_phy *pi);
+void wlc_phy_cal_perical_mphase_restart(struct brcms_phy *pi);
+
+bool wlc_phy_attach_nphy(struct brcms_phy *pi);
+bool wlc_phy_attach_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_detach_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_init_nphy(struct brcms_phy *pi);
+void wlc_phy_init_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_cal_init_nphy(struct brcms_phy *pi);
+void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi);
+
+void wlc_phy_chanspec_set_nphy(struct brcms_phy *pi, u16 chanspec);
+void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec);
+void wlc_phy_chanspec_set_fixup_lcnphy(struct brcms_phy *pi, u16 chanspec);
+int wlc_phy_channel2freq(uint channel);
+int wlc_phy_chanspec_freq2bandrange_lpssn(uint);
+int wlc_phy_chanspec_bandrange_get(struct brcms_phy *, u16 chanspec);
+
+void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode);
+s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi);
+
+void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi);
+void wlc_lcnphy_txpower_recalc_target(struct brcms_phy *pi);
+void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi);
+
+void wlc_lcnphy_set_tx_pwr_by_index(struct brcms_phy *pi, int index);
+void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable);
+void wlc_lcnphy_stop_tx_tone(struct brcms_phy *pi);
+void wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz, u16 max_val,
+                             bool iqcalmode);
+
+void wlc_phy_txpower_sromlimit_get_nphy(struct brcms_phy *pi, uint chan,
+                                       u8 *max_pwr, u8 rate_id);
+void wlc_phy_ofdm_to_mcs_powers_nphy(u8 *power, u8 rate_mcs_start,
+                                    u8 rate_mcs_end, u8 rate_ofdm_start);
+void wlc_phy_mcs_to_ofdm_powers_nphy(u8 *power, u8 rate_ofdm_start,
+                                    u8 rate_ofdm_end, u8 rate_mcs_start);
+
+u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode);
+s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode);
+s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode);
+s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode);
+void wlc_phy_carrier_suppress_lcnphy(struct brcms_phy *pi);
+void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel);
+void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode);
+void wlc_2064_vco_cal(struct brcms_phy *pi);
+
+void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
 
 #define LCNPHY_TBL_ID_PAPDCOMPDELTATBL 0x18
 #define LCNPHY_TX_POWER_TABLE_SIZE     128
@@ -1030,26 +1020,24 @@ extern void wlc_phy_txpower_recalc_target(struct brcms_phy *pi);
 
 #define LCNPHY_TX_PWR_CTRL_TEMPBASED   0xE001
 
-extern void wlc_lcnphy_write_table(struct brcms_phy *pi,
-                                  const struct phytbl_info *pti);
-extern void wlc_lcnphy_read_table(struct brcms_phy *pi,
-                                 struct phytbl_info *pti);
-extern void wlc_lcnphy_set_tx_iqcc(struct brcms_phy *pi, u16 a, u16 b);
-extern void wlc_lcnphy_set_tx_locc(struct brcms_phy *pi, u16 didq);
-extern void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b);
-extern u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi);
-extern void wlc_lcnphy_get_radio_loft(struct brcms_phy *pi, u8 *ei0,
-                                     u8 *eq0, u8 *fi0, u8 *fq0);
-extern void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode);
-extern void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode);
-extern bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi);
-extern void wlc_lcnphy_tx_pwr_update_npt(struct brcms_phy *pi);
-extern s32 wlc_lcnphy_tssi2dbm(s32 tssi, s32 a1, s32 b0, s32 b1);
-extern void wlc_lcnphy_get_tssi(struct brcms_phy *pi, s8 *ofdm_pwr,
-                               s8 *cck_pwr);
-extern void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi);
-
-extern s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index);
+void wlc_lcnphy_write_table(struct brcms_phy *pi,
+                           const struct phytbl_info *pti);
+void wlc_lcnphy_read_table(struct brcms_phy *pi, struct phytbl_info *pti);
+void wlc_lcnphy_set_tx_iqcc(struct brcms_phy *pi, u16 a, u16 b);
+void wlc_lcnphy_set_tx_locc(struct brcms_phy *pi, u16 didq);
+void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b);
+u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi);
+void wlc_lcnphy_get_radio_loft(struct brcms_phy *pi, u8 *ei0, u8 *eq0, u8 *fi0,
+                              u8 *fq0);
+void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode);
+void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode);
+bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi);
+void wlc_lcnphy_tx_pwr_update_npt(struct brcms_phy *pi);
+s32 wlc_lcnphy_tssi2dbm(s32 tssi, s32 a1, s32 b0, s32 b1);
+void wlc_lcnphy_get_tssi(struct brcms_phy *pi, s8 *ofdm_pwr, s8 *cck_pwr);
+void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi);
+
+s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index);
 
 #define NPHY_MAX_HPVGA1_INDEX          10
 #define NPHY_DEF_HPVGA1_INDEXLIMIT     7
@@ -1060,9 +1048,8 @@ struct phy_iq_est {
        u32 q_pwr;
 };
 
-extern void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi,
-                                              bool enable);
-extern void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
+void wlc_phy_stay_in_carriersearch_nphy(struct brcms_phy *pi, bool enable);
+void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
 
 #define wlc_phy_write_table_nphy(pi, pti) \
        wlc_phy_write_table(pi, pti, 0x72, 0x74, 0x73)
@@ -1076,10 +1063,10 @@ extern void wlc_nphy_deaf_mode(struct brcms_phy *pi, bool mode);
 #define wlc_nphy_table_data_write(pi, w, v) \
        wlc_phy_table_data_write((pi), (w), (v))
 
-extern void wlc_phy_table_read_nphy(struct brcms_phy *pi, u32, u32 l, u32 o,
-                                   u32 w, void *d);
-extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
-                                    u32, const void *);
+void wlc_phy_table_read_nphy(struct brcms_phy *pi, u32, u32 l, u32 o, u32 w,
+                            void *d);
+void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32, u32,
+                             const void *);
 
 #define        PHY_IPA(pi) \
        ((pi->ipa2g_on && CHSPEC_IS2G(pi->radio_chanspec)) || \
@@ -1089,73 +1076,67 @@ extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
        if (NREV_LT((pi)->pubpi.phy_rev, 3)) \
                (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol))
 
-extern void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
-extern void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
-extern void wlc_phy_pa_override_nphy(struct brcms_phy *pi, bool en);
-
-extern u8 wlc_phy_get_chan_freq_range_nphy(struct brcms_phy *pi, uint chan);
-extern void wlc_phy_switch_radio_nphy(struct brcms_phy *pi, bool on);
-
-extern void wlc_phy_stf_chain_upd_nphy(struct brcms_phy *pi);
-
-extern void wlc_phy_force_rfseq_nphy(struct brcms_phy *pi, u8 cmd);
-extern s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi);
-
-extern u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val);
-
-extern void wlc_phy_rx_iq_est_nphy(struct brcms_phy *pi, struct phy_iq_est *est,
-                                  u16 num_samps, u8 wait_time,
-                                  u8 wait_for_crs);
-
-extern void wlc_phy_rx_iq_coeffs_nphy(struct brcms_phy *pi, u8 write,
-                                     struct nphy_iq_comp *comp);
-extern void wlc_phy_aci_and_noise_reduction_nphy(struct brcms_phy *pi);
-
-extern void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih,
-                                        u8 rxcore_bitmask);
-extern u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih);
-
-extern void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type);
-extern void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi);
-extern void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi);
-extern void wlc_phy_txpwr_papd_cal_nphy(struct brcms_phy *pi);
-extern u16 wlc_phy_txpwr_idx_get_nphy(struct brcms_phy *pi);
-
-extern struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi);
-extern int wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi,
-                                  struct nphy_txgains target_gain,
-                                  bool full, bool m);
-extern int wlc_phy_cal_rxiq_nphy(struct brcms_phy *pi,
-                                struct nphy_txgains target_gain,
-                                u8 type, bool d);
-extern void wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask,
-                                    s8 txpwrindex, bool res);
-extern void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core, u8 rssi_type);
-extern int wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type,
-                                 s32 *rssi_buf, u8 nsamps);
-extern void wlc_phy_rssi_cal_nphy(struct brcms_phy *pi);
-extern int wlc_phy_aci_scan_nphy(struct brcms_phy *pi);
-extern void wlc_phy_cal_txgainctrl_nphy(struct brcms_phy *pi,
-                                       s32 dBm_targetpower, bool debug);
-extern int wlc_phy_tx_tone_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val,
-                               u8 mode, u8, bool);
-extern void wlc_phy_stopplayback_nphy(struct brcms_phy *pi);
-extern void wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf,
-                                    u8 num_samps);
-extern void wlc_phy_radio205x_vcocal_nphy(struct brcms_phy *pi);
-
-extern int wlc_phy_rssi_compute_nphy(struct brcms_phy *pi,
-                                    struct d11rxhdr *rxh);
+void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
+void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
+void wlc_phy_pa_override_nphy(struct brcms_phy *pi, bool en);
+
+u8 wlc_phy_get_chan_freq_range_nphy(struct brcms_phy *pi, uint chan);
+void wlc_phy_switch_radio_nphy(struct brcms_phy *pi, bool on);
+
+void wlc_phy_stf_chain_upd_nphy(struct brcms_phy *pi);
+
+void wlc_phy_force_rfseq_nphy(struct brcms_phy *pi, u8 cmd);
+s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi);
+
+u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val);
+
+void wlc_phy_rx_iq_est_nphy(struct brcms_phy *pi, struct phy_iq_est *est,
+                           u16 num_samps, u8 wait_time, u8 wait_for_crs);
+
+void wlc_phy_rx_iq_coeffs_nphy(struct brcms_phy *pi, u8 write,
+                              struct nphy_iq_comp *comp);
+void wlc_phy_aci_and_noise_reduction_nphy(struct brcms_phy *pi);
+
+void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask);
+u8 wlc_phy_rxcore_getstate_nphy(struct brcms_phy_pub *pih);
+
+void wlc_phy_txpwrctrl_enable_nphy(struct brcms_phy *pi, u8 ctrl_type);
+void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi);
+void wlc_phy_txpwr_apply_nphy(struct brcms_phy *pi);
+void wlc_phy_txpwr_papd_cal_nphy(struct brcms_phy *pi);
+u16 wlc_phy_txpwr_idx_get_nphy(struct brcms_phy *pi);
+
+struct nphy_txgains wlc_phy_get_tx_gain_nphy(struct brcms_phy *pi);
+int wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi,
+                           struct nphy_txgains target_gain, bool full, bool m);
+int wlc_phy_cal_rxiq_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
+                         u8 type, bool d);
+void wlc_phy_txpwr_index_nphy(struct brcms_phy *pi, u8 core_mask,
+                             s8 txpwrindex, bool res);
+void wlc_phy_rssisel_nphy(struct brcms_phy *pi, u8 core, u8 rssi_type);
+int wlc_phy_poll_rssi_nphy(struct brcms_phy *pi, u8 rssi_type,
+                          s32 *rssi_buf, u8 nsamps);
+void wlc_phy_rssi_cal_nphy(struct brcms_phy *pi);
+int wlc_phy_aci_scan_nphy(struct brcms_phy *pi);
+void wlc_phy_cal_txgainctrl_nphy(struct brcms_phy *pi, s32 dBm_targetpower,
+                                bool debug);
+int wlc_phy_tx_tone_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val, u8 mode,
+                        u8, bool);
+void wlc_phy_stopplayback_nphy(struct brcms_phy *pi);
+void wlc_phy_est_tonepwr_nphy(struct brcms_phy *pi, s32 *qdBm_pwrbuf,
+                             u8 num_samps);
+void wlc_phy_radio205x_vcocal_nphy(struct brcms_phy *pi);
+
+int wlc_phy_rssi_compute_nphy(struct brcms_phy *pi, struct d11rxhdr *rxh);
 
 #define NPHY_TESTPATTERN_BPHY_EVM   0
 #define NPHY_TESTPATTERN_BPHY_RFCS  1
 
-extern void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs);
+void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs);
 
 void wlc_phy_get_pwrdet_offsets(struct brcms_phy *pi, s8 *cckoffset,
                                s8 *ofdmoffset);
-extern s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi,
-                                 u16 chanspec);
+s8 wlc_phy_upd_rssi_offset(struct brcms_phy *pi, s8 rssi, u16 chanspec);
 
-extern bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pih);
+bool wlc_phy_n_txpower_ipa_ison(struct brcms_phy *pih);
 #endif                         /* _BRCM_PHY_INT_H_ */
index 2c5b66b75970939ba6ee0ffe2b709faecbc3428e..dd8774717adee148134cb85229f422c72fa143e2 100644 (file)
 
 struct brcms_phy;
 
-extern struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
-                                                struct brcms_info *wl,
-                                                struct brcms_c_info *wlc);
-extern void wlc_phy_shim_detach(struct phy_shim_info *physhim);
+struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
+                                         struct brcms_info *wl,
+                                         struct brcms_c_info *wlc);
+void wlc_phy_shim_detach(struct phy_shim_info *physhim);
 
 /* PHY to WL utility functions */
-extern struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
-                                           void (*fn) (struct brcms_phy *pi),
-                                           void *arg, const char *name);
-extern void wlapi_free_timer(struct wlapi_timer *t);
-extern void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
-extern bool wlapi_del_timer(struct wlapi_timer *t);
-extern void wlapi_intrson(struct phy_shim_info *physhim);
-extern u32 wlapi_intrsoff(struct phy_shim_info *physhim);
-extern void wlapi_intrsrestore(struct phy_shim_info *physhim,
-                              u32 macintmask);
-
-extern void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset,
-                                u16 v);
-extern u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset);
-extern void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx,
-                          u16 mask, u16 val, int bands);
-extern void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags);
-extern void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim);
-extern void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode);
-extern void wlapi_enable_mac(struct phy_shim_info *physhim);
-extern void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask,
-                            u32 val);
-extern void wlapi_bmac_phy_reset(struct phy_shim_info *physhim);
-extern void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw);
-extern void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk);
-extern void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk);
-extern void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on);
-extern void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim);
-extern void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *
-                                                     physhim);
-extern void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *
-                                                       physhim);
-extern void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int o,
-                                         int len, void *buf);
-extern u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim,
-                                        u8 rate);
-extern void wlapi_ucode_sample_init(struct phy_shim_info *physhim);
-extern void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint,
-                                 void *buf, int, u32 sel);
-extern void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint,
-                               const void *buf, int, u32);
-
-extern void wlapi_high_update_phy_mode(struct phy_shim_info *physhim,
-                                      u32 phy_mode);
-extern u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
+struct wlapi_timer *wlapi_init_timer(struct phy_shim_info *physhim,
+                                    void (*fn)(struct brcms_phy *pi),
+                                    void *arg, const char *name);
+void wlapi_free_timer(struct wlapi_timer *t);
+void wlapi_add_timer(struct wlapi_timer *t, uint ms, int periodic);
+bool wlapi_del_timer(struct wlapi_timer *t);
+void wlapi_intrson(struct phy_shim_info *physhim);
+u32 wlapi_intrsoff(struct phy_shim_info *physhim);
+void wlapi_intrsrestore(struct phy_shim_info *physhim, u32 macintmask);
+
+void wlapi_bmac_write_shm(struct phy_shim_info *physhim, uint offset, u16 v);
+u16 wlapi_bmac_read_shm(struct phy_shim_info *physhim, uint offset);
+void wlapi_bmac_mhf(struct phy_shim_info *physhim, u8 idx, u16 mask, u16 val,
+                   int bands);
+void wlapi_bmac_corereset(struct phy_shim_info *physhim, u32 flags);
+void wlapi_suspend_mac_and_wait(struct phy_shim_info *physhim);
+void wlapi_switch_macfreq(struct phy_shim_info *physhim, u8 spurmode);
+void wlapi_enable_mac(struct phy_shim_info *physhim);
+void wlapi_bmac_mctrl(struct phy_shim_info *physhim, u32 mask, u32 val);
+void wlapi_bmac_phy_reset(struct phy_shim_info *physhim);
+void wlapi_bmac_bw_set(struct phy_shim_info *physhim, u16 bw);
+void wlapi_bmac_phyclk_fgc(struct phy_shim_info *physhim, bool clk);
+void wlapi_bmac_macphyclk_set(struct phy_shim_info *physhim, bool clk);
+void wlapi_bmac_core_phypll_ctl(struct phy_shim_info *physhim, bool on);
+void wlapi_bmac_core_phypll_reset(struct phy_shim_info *physhim);
+void wlapi_bmac_ucode_wake_override_phyreg_set(struct phy_shim_info *physhim);
+void wlapi_bmac_ucode_wake_override_phyreg_clear(struct phy_shim_info *physhim);
+void wlapi_bmac_write_template_ram(struct phy_shim_info *physhim, int o,
+                                  int len, void *buf);
+u16 wlapi_bmac_rate_shm_offset(struct phy_shim_info *physhim, u8 rate);
+void wlapi_ucode_sample_init(struct phy_shim_info *physhim);
+void wlapi_copyfrom_objmem(struct phy_shim_info *physhim, uint, void *buf,
+                          int, u32 sel);
+void wlapi_copyto_objmem(struct phy_shim_info *physhim, uint, const void *buf,
+                        int, u32);
+
+void wlapi_high_update_phy_mode(struct phy_shim_info *physhim, u32 phy_mode);
+u16 wlapi_bmac_get_txant(struct phy_shim_info *physhim);
 
 #endif                         /* _BRCM_PHY_SHIM_H_ */
index 20e2012d5a3a2d2d83c8b7544f6828d14d3f6f33..a014bbc4f93555cc789bbc337adc36491da12902 100644 (file)
@@ -20,7 +20,7 @@
 
 #include "types.h"
 
-extern u16 si_pmu_fast_pwrup_delay(struct si_pub *sih);
-extern u32 si_pmu_measure_alpclk(struct si_pub *sih);
+u16 si_pmu_fast_pwrup_delay(struct si_pub *sih);
+u32 si_pmu_measure_alpclk(struct si_pub *sih);
 
 #endif /* _BRCM_PMU_H_ */
index d36ea5e1cc494231e56dd18bb2f0dadc65f4304d..4da38cb4f31854a60878c5da31c22cf4e39980b6 100644 (file)
@@ -266,83 +266,76 @@ struct brcms_antselcfg {
 };
 
 /* common functions for every port */
-extern struct brcms_c_info *
-brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
-              bool piomode, uint *perr);
-extern uint brcms_c_detach(struct brcms_c_info *wlc);
-extern int brcms_c_up(struct brcms_c_info *wlc);
-extern uint brcms_c_down(struct brcms_c_info *wlc);
-
-extern bool brcms_c_chipmatch(struct bcma_device *core);
-extern void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
-extern void brcms_c_reset(struct brcms_c_info *wlc);
-
-extern void brcms_c_intrson(struct brcms_c_info *wlc);
-extern u32 brcms_c_intrsoff(struct brcms_c_info *wlc);
-extern void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask);
-extern bool brcms_c_intrsupd(struct brcms_c_info *wlc);
-extern bool brcms_c_isr(struct brcms_c_info *wlc);
-extern bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded);
-extern bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc,
-                                    struct sk_buff *sdu,
-                                    struct ieee80211_hw *hw);
-extern bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid);
-extern void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx,
-                                  int val);
-extern int brcms_c_get_header_len(void);
-extern void brcms_c_set_addrmatch(struct brcms_c_info *wlc,
-                                 int match_reg_offset,
-                                 const u8 *addr);
-extern void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
-                             const struct ieee80211_tx_queue_params *arg,
-                             bool suspend);
-extern struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc);
-extern void brcms_c_ampdu_flush(struct brcms_c_info *wlc,
-                           struct ieee80211_sta *sta, u16 tid);
-extern void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
-                                        u8 ba_wsize, uint max_rx_ampdu_bytes);
-extern int brcms_c_module_register(struct brcms_pub *pub,
-                                  const char *name, struct brcms_info *hdl,
-                                  int (*down_fn)(void *handle));
-extern int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
-                                    struct brcms_info *hdl);
-extern void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc);
-extern void brcms_c_enable_mac(struct brcms_c_info *wlc);
-extern void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
-extern void brcms_c_scan_start(struct brcms_c_info *wlc);
-extern void brcms_c_scan_stop(struct brcms_c_info *wlc);
-extern int brcms_c_get_curband(struct brcms_c_info *wlc);
-extern int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
-extern int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
-extern void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
+struct brcms_c_info *brcms_c_attach(struct brcms_info *wl,
+                                   struct bcma_device *core, uint unit,
+                                   bool piomode, uint *perr);
+uint brcms_c_detach(struct brcms_c_info *wlc);
+int brcms_c_up(struct brcms_c_info *wlc);
+uint brcms_c_down(struct brcms_c_info *wlc);
+
+bool brcms_c_chipmatch(struct bcma_device *core);
+void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
+void brcms_c_reset(struct brcms_c_info *wlc);
+
+void brcms_c_intrson(struct brcms_c_info *wlc);
+u32 brcms_c_intrsoff(struct brcms_c_info *wlc);
+void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask);
+bool brcms_c_intrsupd(struct brcms_c_info *wlc);
+bool brcms_c_isr(struct brcms_c_info *wlc);
+bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded);
+bool brcms_c_sendpkt_mac80211(struct brcms_c_info *wlc, struct sk_buff *sdu,
+                             struct ieee80211_hw *hw);
+bool brcms_c_aggregatable(struct brcms_c_info *wlc, u8 tid);
+void brcms_c_protection_upd(struct brcms_c_info *wlc, uint idx, int val);
+int brcms_c_get_header_len(void);
+void brcms_c_set_addrmatch(struct brcms_c_info *wlc, int match_reg_offset,
+                          const u8 *addr);
+void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
+                          const struct ieee80211_tx_queue_params *arg,
+                          bool suspend);
+struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc);
+void brcms_c_ampdu_flush(struct brcms_c_info *wlc, struct ieee80211_sta *sta,
+                        u16 tid);
+void brcms_c_ampdu_tx_operational(struct brcms_c_info *wlc, u8 tid,
+                                 u8 ba_wsize, uint max_rx_ampdu_bytes);
+int brcms_c_module_register(struct brcms_pub *pub, const char *name,
+                           struct brcms_info *hdl,
+                           int (*down_fn)(void *handle));
+int brcms_c_module_unregister(struct brcms_pub *pub, const char *name,
+                             struct brcms_info *hdl);
+void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc);
+void brcms_c_enable_mac(struct brcms_c_info *wlc);
+void brcms_c_associate_upd(struct brcms_c_info *wlc, bool state);
+void brcms_c_scan_start(struct brcms_c_info *wlc);
+void brcms_c_scan_stop(struct brcms_c_info *wlc);
+int brcms_c_get_curband(struct brcms_c_info *wlc);
+int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel);
+int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl);
+void brcms_c_get_current_rateset(struct brcms_c_info *wlc,
                                 struct brcm_rateset *currs);
-extern int brcms_c_set_rateset(struct brcms_c_info *wlc,
-                                       struct brcm_rateset *rs);
-extern int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period);
-extern u16 brcms_c_get_phy_type(struct brcms_c_info *wlc, int phyidx);
-extern void brcms_c_set_shortslot_override(struct brcms_c_info *wlc,
+int brcms_c_set_rateset(struct brcms_c_info *wlc, struct brcm_rateset *rs);
+int brcms_c_set_beacon_period(struct brcms_c_info *wlc, u16 period);
+u16 brcms_c_get_phy_type(struct brcms_c_info *wlc, int phyidx);
+void brcms_c_set_shortslot_override(struct brcms_c_info *wlc,
                                    s8 sslot_override);
-extern void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc,
-                                       u8 interval);
-extern u64 brcms_c_tsf_get(struct brcms_c_info *wlc);
-extern void brcms_c_tsf_set(struct brcms_c_info *wlc, u64 tsf);
-extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
-extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
-extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
-extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
-extern bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
-extern void brcms_c_start_station(struct brcms_c_info *wlc, u8 *addr);
-extern void brcms_c_start_ap(struct brcms_c_info *wlc, u8 *addr,
-                            const u8 *bssid, u8 *ssid, size_t ssid_len);
-extern void brcms_c_start_adhoc(struct brcms_c_info *wlc, u8 *addr);
-extern void brcms_c_update_beacon(struct brcms_c_info *wlc);
-extern void brcms_c_set_new_beacon(struct brcms_c_info *wlc,
-                                  struct sk_buff *beacon, u16 tim_offset,
-                                  u16 dtim_period);
-extern void brcms_c_set_new_probe_resp(struct brcms_c_info *wlc,
-                                      struct sk_buff *probe_resp);
-extern void brcms_c_enable_probe_resp(struct brcms_c_info *wlc, bool enable);
-extern void brcms_c_set_ssid(struct brcms_c_info *wlc, u8 *ssid,
-                            size_t ssid_len);
+void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval);
+u64 brcms_c_tsf_get(struct brcms_c_info *wlc);
+void brcms_c_tsf_set(struct brcms_c_info *wlc, u64 tsf);
+int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
+int brcms_c_get_tx_power(struct brcms_c_info *wlc);
+bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
+void brcms_c_mute(struct brcms_c_info *wlc, bool on);
+bool brcms_c_tx_flush_completed(struct brcms_c_info *wlc);
+void brcms_c_start_station(struct brcms_c_info *wlc, u8 *addr);
+void brcms_c_start_ap(struct brcms_c_info *wlc, u8 *addr, const u8 *bssid,
+                     u8 *ssid, size_t ssid_len);
+void brcms_c_start_adhoc(struct brcms_c_info *wlc, u8 *addr);
+void brcms_c_update_beacon(struct brcms_c_info *wlc);
+void brcms_c_set_new_beacon(struct brcms_c_info *wlc, struct sk_buff *beacon,
+                           u16 tim_offset, u16 dtim_period);
+void brcms_c_set_new_probe_resp(struct brcms_c_info *wlc,
+                               struct sk_buff *probe_resp);
+void brcms_c_enable_probe_resp(struct brcms_c_info *wlc, bool enable);
+void brcms_c_set_ssid(struct brcms_c_info *wlc, u8 *ssid, size_t ssid_len);
 
 #endif                         /* _BRCM_PUB_H_ */
index 980d578825cc0b3b131995bfcf6926bbab0bbbf1..5bb88b78ed648407dbbcb39db73d86ea503d76c9 100644 (file)
@@ -216,34 +216,30 @@ static inline u8 cck_phy2mac_rate(u8 signal)
 
 /* sanitize, and sort a rateset with the basic bit(s) preserved, validate
  * rateset */
-extern bool
-brcms_c_rate_hwrs_filter_sort_validate(struct brcms_c_rateset *rs,
-                                      const struct brcms_c_rateset *hw_rs,
-                                      bool check_brate, u8 txstreams);
+bool brcms_c_rate_hwrs_filter_sort_validate(struct brcms_c_rateset *rs,
+                                           const struct brcms_c_rateset *hw_rs,
+                                           bool check_brate, u8 txstreams);
 /* copy rateset src to dst as-is (no masking or sorting) */
-extern void brcms_c_rateset_copy(const struct brcms_c_rateset *src,
-                            struct brcms_c_rateset *dst);
+void brcms_c_rateset_copy(const struct brcms_c_rateset *src,
+                         struct brcms_c_rateset *dst);
 
 /* would be nice to have these documented ... */
-extern u32 brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp);
-
-extern void brcms_c_rateset_filter(struct brcms_c_rateset *src,
-       struct brcms_c_rateset *dst, bool basic_only, u8 rates, uint xmask,
-       bool mcsallow);
-
-extern void
-brcms_c_rateset_default(struct brcms_c_rateset *rs_tgt,
-                       const struct brcms_c_rateset *rs_hw, uint phy_type,
-                       int bandtype, bool cck_only, uint rate_mask,
-                       bool mcsallow, u8 bw, u8 txstreams);
-
-extern s16 brcms_c_rate_legacy_phyctl(uint rate);
-
-extern void brcms_c_rateset_mcs_upd(struct brcms_c_rateset *rs, u8 txstreams);
-extern void brcms_c_rateset_mcs_clear(struct brcms_c_rateset *rateset);
-extern void brcms_c_rateset_mcs_build(struct brcms_c_rateset *rateset,
-                                     u8 txstreams);
-extern void brcms_c_rateset_bw_mcs_filter(struct brcms_c_rateset *rateset,
-                                         u8 bw);
+u32 brcms_c_compute_rspec(struct d11rxhdr *rxh, u8 *plcp);
+
+void brcms_c_rateset_filter(struct brcms_c_rateset *src,
+                           struct brcms_c_rateset *dst, bool basic_only,
+                           u8 rates, uint xmask, bool mcsallow);
+
+void brcms_c_rateset_default(struct brcms_c_rateset *rs_tgt,
+                            const struct brcms_c_rateset *rs_hw, uint phy_type,
+                            int bandtype, bool cck_only, uint rate_mask,
+                            bool mcsallow, u8 bw, u8 txstreams);
+
+s16 brcms_c_rate_legacy_phyctl(uint rate);
+
+void brcms_c_rateset_mcs_upd(struct brcms_c_rateset *rs, u8 txstreams);
+void brcms_c_rateset_mcs_clear(struct brcms_c_rateset *rateset);
+void brcms_c_rateset_mcs_build(struct brcms_c_rateset *rateset, u8 txstreams);
+void brcms_c_rateset_bw_mcs_filter(struct brcms_c_rateset *rateset, u8 bw);
 
 #endif                         /* _BRCM_RATE_H_ */
index 19f6580f69be27890eea13ddfe7c689cbe6f3089..ba9493009a3340db524bfef6dc5359c6a8be0ec4 100644 (file)
 
 #include "types.h"
 
-extern int brcms_c_stf_attach(struct brcms_c_info *wlc);
-extern void brcms_c_stf_detach(struct brcms_c_info *wlc);
+int brcms_c_stf_attach(struct brcms_c_info *wlc);
+void brcms_c_stf_detach(struct brcms_c_info *wlc);
 
-extern void brcms_c_tempsense_upd(struct brcms_c_info *wlc);
-extern void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc,
-                                       u16 *ss_algo_channel,
-                                       u16 chanspec);
-extern int brcms_c_stf_ss_update(struct brcms_c_info *wlc,
-                            struct brcms_band *band);
-extern void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
-extern int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val,
-                              bool force);
-extern bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
-extern void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
-extern void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc);
-extern u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc,
-                                     u32 rspec);
-extern u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc,
-                                       u32 rspec);
+void brcms_c_tempsense_upd(struct brcms_c_info *wlc);
+void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc,
+                                    u16 *ss_algo_channel, u16 chanspec);
+int brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band);
+void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
+int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, bool force);
+bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
+void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
+void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc);
+u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc, u32 rspec);
+u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc, u32 rspec);
 
 #endif                         /* _BRCM_STF_H_ */
index 18750a814b4f219f918aaed081d8e99f01e7f6f4..c87dd89bcb78bddd2774bb7275741b1507aaf8d3 100644 (file)
@@ -43,16 +43,14 @@ struct brcms_ucode {
        u32 *bcm43xx_bomminor;
 };
 
-extern int
-brcms_ucode_data_init(struct brcms_info *wl, struct brcms_ucode *ucode);
+int brcms_ucode_data_init(struct brcms_info *wl, struct brcms_ucode *ucode);
 
-extern void brcms_ucode_data_free(struct brcms_ucode *ucode);
+void brcms_ucode_data_free(struct brcms_ucode *ucode);
 
-extern int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf,
-                               unsigned int idx);
-extern int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes,
-                                unsigned int idx);
-extern void brcms_ucode_free_buf(void *);
-extern int  brcms_check_firmwares(struct brcms_info *wl);
+int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, unsigned int idx);
+int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes,
+                         unsigned int idx);
+void brcms_ucode_free_buf(void *);
+int  brcms_check_firmwares(struct brcms_info *wl);
 
 #endif /* _BRCM_UCODE_H_ */
index c1fe245bb07ee18aba557892b274435a3ad40fcb..84113ea16f8434fcd0ee4383aeea18c0dfe9c480 100644 (file)
@@ -41,5 +41,6 @@
 #define BCM4331_CHIP_ID                0x4331
 #define BCM4334_CHIP_ID                0x4334
 #define BCM4335_CHIP_ID                0x4335
+#define BCM4339_CHIP_ID                0x4339
 
 #endif                         /* _BRCM_HW_IDS_H_ */
index 92623f02b1c045460c28d860520000c127cfc611..8660a2cba09810428f127967c996a02cfbc174a1 100644 (file)
@@ -140,6 +140,6 @@ struct brcmu_d11inf {
        void (*decchspec)(struct brcmu_chan *ch);
 };
 
-extern void brcmu_d11_attach(struct brcmu_d11inf *d11inf);
+void brcmu_d11_attach(struct brcmu_d11inf *d11inf);
 
 #endif /* _BRCMU_CHANNELS_H_ */
index 898cacb8d01df299f1e4dce5ebc72fc008e5b4f2..8ba445b3fd72a92ca78cbd1042dcc38528df8efb 100644 (file)
@@ -114,31 +114,29 @@ static inline struct sk_buff *pktq_ppeek_tail(struct pktq *pq, int prec)
        return skb_peek_tail(&pq->q[prec].skblist);
 }
 
-extern struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
-                                struct sk_buff *p);
-extern struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
-                                     struct sk_buff *p);
-extern struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
-extern struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
-extern struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
-                                            bool (*match_fn)(struct sk_buff *p,
-                                                             void *arg),
-                                            void *arg);
+struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec, struct sk_buff *p);
+struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
+                                    struct sk_buff *p);
+struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
+struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
+struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
+                                     bool (*match_fn)(struct sk_buff *p,
+                                                      void *arg),
+                                     void *arg);
 
 /* packet primitives */
-extern struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
-extern void brcmu_pkt_buf_free_skb(struct sk_buff *skb);
+struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
+void brcmu_pkt_buf_free_skb(struct sk_buff *skb);
 
 /* Empty the queue at particular precedence level */
 /* callback function fn(pkt, arg) returns true if pkt belongs to if */
-extern void brcmu_pktq_pflush(struct pktq *pq, int prec,
-       bool dir, bool (*fn)(struct sk_buff *, void *), void *arg);
+void brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir,
+                      bool (*fn)(struct sk_buff *, void *), void *arg);
 
 /* operations on a set of precedences in packet queue */
 
-extern int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp);
-extern struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
-       int *prec_out);
+int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp);
+struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
 
 /* operations on packet queue as a whole */
 
@@ -167,11 +165,11 @@ static inline bool pktq_empty(struct pktq *pq)
        return pq->len == 0;
 }
 
-extern void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len);
+void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len);
 /* prec_out may be NULL if caller is not interested in return value */
-extern struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out);
-extern void brcmu_pktq_flush(struct pktq *pq, bool dir,
-               bool (*fn)(struct sk_buff *, void *), void *arg);
+struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out);
+void brcmu_pktq_flush(struct pktq *pq, bool dir,
+                     bool (*fn)(struct sk_buff *, void *), void *arg);
 
 /* externs */
 /* ip address */
@@ -204,13 +202,13 @@ static inline u16 brcmu_maskget16(u16 var, u16 mask, u8 shift)
 /* externs */
 /* format/print */
 #ifdef DEBUG
-extern void brcmu_prpkt(const char *msg, struct sk_buff *p0);
+void brcmu_prpkt(const char *msg, struct sk_buff *p0);
 #else
 #define brcmu_prpkt(a, b)
 #endif                         /* DEBUG */
 
 #ifdef DEBUG
-extern __printf(3, 4)
+__printf(3, 4)
 void brcmu_dbg_hex_dump(const void *data, size_t size, const char *fmt, ...);
 #else
 __printf(3, 4)
index 899cad34ccd3aa1649029d90294fe21c30608d14..40078f5f932ec6b1ea3a26af5453f2bc5d2206ec 100644 (file)
@@ -237,7 +237,9 @@ static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id)
        struct hwbus_priv *self = dev_id;
 
        if (self->core) {
+               cw1200_spi_lock(self);
                cw1200_irq_handler(self->core);
+               cw1200_spi_unlock(self);
                return IRQ_HANDLED;
        } else {
                return IRQ_NONE;
@@ -363,7 +365,7 @@ static struct hwbus_ops cw1200_spi_hwbus_ops = {
 static int cw1200_spi_probe(struct spi_device *func)
 {
        const struct cw1200_platform_data_spi *plat_data =
-               func->dev.platform_data;
+               dev_get_platdata(&func->dev);
        struct hwbus_priv *self;
        int status;
 
@@ -441,7 +443,7 @@ static int cw1200_spi_disconnect(struct spi_device *func)
                }
                kfree(self);
        }
-       cw1200_spi_off(func->dev.platform_data);
+       cw1200_spi_off(dev_get_platdata(&func->dev));
 
        return 0;
 }
index 970a48baaf804a38ff1883702bbd9460d7d5b8c8..de7c4ffec3096b07ccaece961666b0bdde51ea27 100644 (file)
@@ -217,7 +217,7 @@ static void prism2_host_roaming(local_info_t *local)
                }
        }
 
-       memcpy(req.bssid, selected->bssid, 6);
+       memcpy(req.bssid, selected->bssid, ETH_ALEN);
        req.channel = selected->chid;
        spin_unlock_irqrestore(&local->lock, flags);
 
index 6b823a1ab7892fd9c81be07ce56bc45e75a6b8f1..81903e33d5b1bd4779681da798dc0183c0638f48 100644 (file)
@@ -2698,7 +2698,7 @@ static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
 /* data's copy of the eeprom data                                 */
 static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
 {
-       memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
+       memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN);
 }
 
 static void ipw_read_eeprom(struct ipw_priv *priv)
@@ -11885,7 +11885,6 @@ static int ipw_pci_probe(struct pci_dev *pdev,
        pci_release_regions(pdev);
       out_pci_disable_device:
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
       out_free_libipw:
        free_libipw(priv->net_dev, 0);
       out:
@@ -11966,7 +11965,6 @@ static void ipw_pci_remove(struct pci_dev *pdev)
        iounmap(priv->hw_base);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
        /* wiphy_unregister needs to be here, before free_libipw */
        wiphy_unregister(priv->ieee->wdev.wiphy);
        kfree(priv->ieee->a_band.channels);
index 6eede52ad8c0fbc0581a2b3a68d302cb589f4dde..5ce2f59d3378ed7ddcc4891769d72401eb66cc0d 100644 (file)
@@ -950,66 +950,55 @@ static inline int libipw_is_cck_rate(u8 rate)
 }
 
 /* libipw.c */
-extern void free_libipw(struct net_device *dev, int monitor);
-extern struct net_device *alloc_libipw(int sizeof_priv, int monitor);
-extern int libipw_change_mtu(struct net_device *dev, int new_mtu);
+void free_libipw(struct net_device *dev, int monitor);
+struct net_device *alloc_libipw(int sizeof_priv, int monitor);
+int libipw_change_mtu(struct net_device *dev, int new_mtu);
 
-extern void libipw_networks_age(struct libipw_device *ieee,
-                                  unsigned long age_secs);
+void libipw_networks_age(struct libipw_device *ieee, unsigned long age_secs);
 
-extern int libipw_set_encryption(struct libipw_device *ieee);
+int libipw_set_encryption(struct libipw_device *ieee);
 
 /* libipw_tx.c */
-extern netdev_tx_t libipw_xmit(struct sk_buff *skb,
-                              struct net_device *dev);
-extern void libipw_txb_free(struct libipw_txb *);
+netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev);
+void libipw_txb_free(struct libipw_txb *);
 
 /* libipw_rx.c */
-extern void libipw_rx_any(struct libipw_device *ieee,
-                    struct sk_buff *skb, struct libipw_rx_stats *stats);
-extern int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
-                       struct libipw_rx_stats *rx_stats);
+void libipw_rx_any(struct libipw_device *ieee, struct sk_buff *skb,
+                  struct libipw_rx_stats *stats);
+int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
+             struct libipw_rx_stats *rx_stats);
 /* make sure to set stats->len */
-extern void libipw_rx_mgt(struct libipw_device *ieee,
-                            struct libipw_hdr_4addr *header,
-                            struct libipw_rx_stats *stats);
-extern void libipw_network_reset(struct libipw_network *network);
+void libipw_rx_mgt(struct libipw_device *ieee, struct libipw_hdr_4addr *header,
+                  struct libipw_rx_stats *stats);
+void libipw_network_reset(struct libipw_network *network);
 
 /* libipw_geo.c */
-extern const struct libipw_geo *libipw_get_geo(struct libipw_device
-                                                    *ieee);
-extern void libipw_set_geo(struct libipw_device *ieee,
-                            const struct libipw_geo *geo);
-
-extern int libipw_is_valid_channel(struct libipw_device *ieee,
-                                     u8 channel);
-extern int libipw_channel_to_index(struct libipw_device *ieee,
-                                     u8 channel);
-extern u8 libipw_freq_to_channel(struct libipw_device *ieee, u32 freq);
-extern u8 libipw_get_channel_flags(struct libipw_device *ieee,
-                                     u8 channel);
-extern const struct libipw_channel *libipw_get_channel(struct
-                                                            libipw_device
-                                                            *ieee, u8 channel);
-extern u32 libipw_channel_to_freq(struct libipw_device * ieee,
-                                     u8 channel);
+const struct libipw_geo *libipw_get_geo(struct libipw_device *ieee);
+void libipw_set_geo(struct libipw_device *ieee, const struct libipw_geo *geo);
+
+int libipw_is_valid_channel(struct libipw_device *ieee, u8 channel);
+int libipw_channel_to_index(struct libipw_device *ieee, u8 channel);
+u8 libipw_freq_to_channel(struct libipw_device *ieee, u32 freq);
+u8 libipw_get_channel_flags(struct libipw_device *ieee, u8 channel);
+const struct libipw_channel *libipw_get_channel(struct libipw_device *ieee,
+                                               u8 channel);
+u32 libipw_channel_to_freq(struct libipw_device *ieee, u8 channel);
 
 /* libipw_wx.c */
-extern int libipw_wx_get_scan(struct libipw_device *ieee,
-                                struct iw_request_info *info,
-                                union iwreq_data *wrqu, char *key);
-extern int libipw_wx_set_encode(struct libipw_device *ieee,
-                                  struct iw_request_info *info,
-                                  union iwreq_data *wrqu, char *key);
-extern int libipw_wx_get_encode(struct libipw_device *ieee,
-                                  struct iw_request_info *info,
-                                  union iwreq_data *wrqu, char *key);
-extern int libipw_wx_set_encodeext(struct libipw_device *ieee,
-                                     struct iw_request_info *info,
-                                     union iwreq_data *wrqu, char *extra);
-extern int libipw_wx_get_encodeext(struct libipw_device *ieee,
-                                     struct iw_request_info *info,
-                                     union iwreq_data *wrqu, char *extra);
+int libipw_wx_get_scan(struct libipw_device *ieee, struct iw_request_info *info,
+                      union iwreq_data *wrqu, char *key);
+int libipw_wx_set_encode(struct libipw_device *ieee,
+                        struct iw_request_info *info, union iwreq_data *wrqu,
+                        char *key);
+int libipw_wx_get_encode(struct libipw_device *ieee,
+                        struct iw_request_info *info, union iwreq_data *wrqu,
+                        char *key);
+int libipw_wx_set_encodeext(struct libipw_device *ieee,
+                           struct iw_request_info *info,
+                           union iwreq_data *wrqu, char *extra);
+int libipw_wx_get_encodeext(struct libipw_device *ieee,
+                           struct iw_request_info *info,
+                           union iwreq_data *wrqu, char *extra);
 
 static inline void libipw_increment_scans(struct libipw_device *ieee)
 {
index 9581d07a4242bbdabef6174e60560d42aa4e6b09..dea3b50d68b9c5bcfc1c95794262992bc59d917d 100644 (file)
@@ -3811,7 +3811,6 @@ out_iounmap:
 out_pci_release_regions:
        pci_release_regions(pdev);
 out_pci_disable_device:
-       pci_set_drvdata(pdev, NULL);
        pci_disable_device(pdev);
 out_ieee80211_free_hw:
        ieee80211_free_hw(il->hw);
@@ -3888,7 +3887,6 @@ il3945_pci_remove(struct pci_dev *pdev)
        iounmap(il->hw_base);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 
        il_free_channel_map(il);
        il_free_geos(il);
index 9a8703def0ba1a1825c264b5e5806003412c5e91..00030d43a1947380cf818bae7ff11a31b26d92a8 100644 (file)
@@ -189,15 +189,14 @@ struct il3945_ibss_seq {
  * for use by iwl-*.c
  *
  *****************************************************************************/
-extern int il3945_calc_db_from_ratio(int sig_ratio);
-extern void il3945_rx_replenish(void *data);
-extern void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
-extern unsigned int il3945_fill_beacon_frame(struct il_priv *il,
-                                            struct ieee80211_hdr *hdr,
-                                            int left);
-extern int il3945_dump_nic_event_log(struct il_priv *il, bool full_log,
-                                    char **buf, bool display);
-extern void il3945_dump_nic_error_log(struct il_priv *il);
+int il3945_calc_db_from_ratio(int sig_ratio);
+void il3945_rx_replenish(void *data);
+void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+unsigned int il3945_fill_beacon_frame(struct il_priv *il,
+                                     struct ieee80211_hdr *hdr, int left);
+int il3945_dump_nic_event_log(struct il_priv *il, bool full_log, char **buf,
+                             bool display);
+void il3945_dump_nic_error_log(struct il_priv *il);
 
 /******************************************************************************
  *
@@ -215,39 +214,36 @@ extern void il3945_dump_nic_error_log(struct il_priv *il);
  * il3945_mac_     <-- mac80211 callback
  *
  ****************************************************************************/
-extern void il3945_hw_handler_setup(struct il_priv *il);
-extern void il3945_hw_setup_deferred_work(struct il_priv *il);
-extern void il3945_hw_cancel_deferred_work(struct il_priv *il);
-extern int il3945_hw_rxq_stop(struct il_priv *il);
-extern int il3945_hw_set_hw_params(struct il_priv *il);
-extern int il3945_hw_nic_init(struct il_priv *il);
-extern int il3945_hw_nic_stop_master(struct il_priv *il);
-extern void il3945_hw_txq_ctx_free(struct il_priv *il);
-extern void il3945_hw_txq_ctx_stop(struct il_priv *il);
-extern int il3945_hw_nic_reset(struct il_priv *il);
-extern int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il,
-                                          struct il_tx_queue *txq,
-                                          dma_addr_t addr, u16 len, u8 reset,
-                                          u8 pad);
-extern void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
-extern int il3945_hw_get_temperature(struct il_priv *il);
-extern int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
-extern unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
-                                            struct il3945_frame *frame,
-                                            u8 rate);
+void il3945_hw_handler_setup(struct il_priv *il);
+void il3945_hw_setup_deferred_work(struct il_priv *il);
+void il3945_hw_cancel_deferred_work(struct il_priv *il);
+int il3945_hw_rxq_stop(struct il_priv *il);
+int il3945_hw_set_hw_params(struct il_priv *il);
+int il3945_hw_nic_init(struct il_priv *il);
+int il3945_hw_nic_stop_master(struct il_priv *il);
+void il3945_hw_txq_ctx_free(struct il_priv *il);
+void il3945_hw_txq_ctx_stop(struct il_priv *il);
+int il3945_hw_nic_reset(struct il_priv *il);
+int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+                                   dma_addr_t addr, u16 len, u8 reset, u8 pad);
+void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+int il3945_hw_get_temperature(struct il_priv *il);
+int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
+                                     struct il3945_frame *frame, u8 rate);
 void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
                                 struct ieee80211_tx_info *info,
                                 struct ieee80211_hdr *hdr, int sta_id);
-extern int il3945_hw_reg_send_txpower(struct il_priv *il);
-extern int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
-extern void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
+int il3945_hw_reg_send_txpower(struct il_priv *il);
+int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
+void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
 void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
-extern void il3945_disable_events(struct il_priv *il);
-extern int il4965_get_temperature(const struct il_priv *il);
-extern void il3945_post_associate(struct il_priv *il);
-extern void il3945_config_ap(struct il_priv *il);
+void il3945_disable_events(struct il_priv *il);
+int il4965_get_temperature(const struct il_priv *il);
+void il3945_post_associate(struct il_priv *il);
+void il3945_config_ap(struct il_priv *il);
 
-extern int il3945_commit_rxon(struct il_priv *il);
+int il3945_commit_rxon(struct il_priv *il);
 
 /**
  * il3945_hw_find_station - Find station id for a given BSSID
@@ -257,14 +253,14 @@ extern int il3945_commit_rxon(struct il_priv *il);
  * not yet been merged into a single common layer for managing the
  * station tables.
  */
-extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid);
+u8 il3945_hw_find_station(struct il_priv *il, const u8 *bssid);
 
-extern __le32 il3945_get_antenna_flags(const struct il_priv *il);
-extern int il3945_init_hw_rate_table(struct il_priv *il);
-extern void il3945_reg_txpower_periodic(struct il_priv *il);
-extern int il3945_txpower_set_from_eeprom(struct il_priv *il);
+__le32 il3945_get_antenna_flags(const struct il_priv *il);
+int il3945_init_hw_rate_table(struct il_priv *il);
+void il3945_reg_txpower_periodic(struct il_priv *il);
+int il3945_txpower_set_from_eeprom(struct il_priv *il);
 
-extern int il3945_rs_next_rate(struct il_priv *il, int rate);
+int il3945_rs_next_rate(struct il_priv *il, int rate);
 
 /* scanning */
 int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
index 5ab50a5b48b1511708bd67783ccb7518a9a81eb1..3982ab76f3755fdbda4f0cab89aa6cca537a8f43 100644 (file)
@@ -6706,7 +6706,6 @@ out_free_eeprom:
 out_iounmap:
        iounmap(il->hw_base);
 out_pci_release_regions:
-       pci_set_drvdata(pdev, NULL);
        pci_release_regions(pdev);
 out_pci_disable_device:
        pci_disable_device(pdev);
@@ -6787,7 +6786,6 @@ il4965_pci_remove(struct pci_dev *pdev)
        iounmap(il->hw_base);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
-       pci_set_drvdata(pdev, NULL);
 
        il4965_uninit_drv(il);
 
index 1b15b0b2292b4fe06c3fa1efcc95279c9901370e..337dfcf3bbde7c58fe4681268f63b9d29ea8f5fa 100644 (file)
@@ -272,7 +272,7 @@ il4965_hw_valid_rtc_data_addr(u32 addr)
        ((t) < IL_TX_POWER_TEMPERATURE_MIN || \
         (t) > IL_TX_POWER_TEMPERATURE_MAX)
 
-extern void il4965_temperature_calib(struct il_priv *il);
+void il4965_temperature_calib(struct il_priv *il);
 /********************* END TEMPERATURE ***************************************/
 
 /********************* START TXPOWER *****************************************/
index 83f8ed8a5528cbd209c97560e9ab9c59d2294a90..ad123d66ab6c5c13e2bd120df7b3c2b412d1400a 100644 (file)
@@ -858,9 +858,9 @@ struct il_hw_params {
  * il4965_mac_     <-- mac80211 callback
  *
  ****************************************************************************/
-extern void il4965_update_chain_flags(struct il_priv *il);
+void il4965_update_chain_flags(struct il_priv *il);
 extern const u8 il_bcast_addr[ETH_ALEN];
-extern int il_queue_space(const struct il_queue *q);
+int il_queue_space(const struct il_queue *q);
 static inline int
 il_queue_used(const struct il_queue *q, int i)
 {
@@ -1727,7 +1727,7 @@ int il_alloc_txq_mem(struct il_priv *il);
 void il_free_txq_mem(struct il_priv *il);
 
 #ifdef CONFIG_IWLEGACY_DEBUGFS
-extern void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
+void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
 #else
 static inline void
 il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
@@ -1760,12 +1760,12 @@ void il_chswitch_done(struct il_priv *il, bool is_success);
 /*****************************************************
 * TX
 ******************************************************/
-extern void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
-extern int il_tx_queue_init(struct il_priv *il, u32 txq_id);
-extern void il_tx_queue_reset(struct il_priv *il, u32 txq_id);
-extern void il_tx_queue_unmap(struct il_priv *il, int txq_id);
-extern void il_tx_queue_free(struct il_priv *il, int txq_id);
-extern void il_setup_watchdog(struct il_priv *il);
+void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
+int il_tx_queue_init(struct il_priv *il, u32 txq_id);
+void il_tx_queue_reset(struct il_priv *il, u32 txq_id);
+void il_tx_queue_unmap(struct il_priv *il, int txq_id);
+void il_tx_queue_free(struct il_priv *il, int txq_id);
+void il_setup_watchdog(struct il_priv *il);
 /*****************************************************
  * TX power
  ****************************************************/
@@ -1931,10 +1931,10 @@ il_is_ready_rf(struct il_priv *il)
        return il_is_ready(il);
 }
 
-extern void il_send_bt_config(struct il_priv *il);
-extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
-extern void il_apm_stop(struct il_priv *il);
-extern void _il_apm_stop(struct il_priv *il);
+void il_send_bt_config(struct il_priv *il);
+int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
+void il_apm_stop(struct il_priv *il);
+void _il_apm_stop(struct il_priv *il);
 
 int il_apm_init(struct il_priv *il);
 
@@ -1968,15 +1968,15 @@ void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
 
 irqreturn_t il_isr(int irq, void *data);
 
-extern void il_set_bit(struct il_priv *p, u32 r, u32 m);
-extern void il_clear_bit(struct il_priv *p, u32 r, u32 m);
-extern bool _il_grab_nic_access(struct il_priv *il);
-extern int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
-extern int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
-extern u32 il_rd_prph(struct il_priv *il, u32 reg);
-extern void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
-extern u32 il_read_targ_mem(struct il_priv *il, u32 addr);
-extern void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
+void il_set_bit(struct il_priv *p, u32 r, u32 m);
+void il_clear_bit(struct il_priv *p, u32 r, u32 m);
+bool _il_grab_nic_access(struct il_priv *il);
+int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
+int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
+u32 il_rd_prph(struct il_priv *il, u32 reg);
+void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
+u32 il_read_targ_mem(struct il_priv *il, u32 addr);
+void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
 
 static inline void
 _il_write8(struct il_priv *il, u32 ofs, u8 val)
@@ -2868,13 +2868,13 @@ il4965_first_antenna(u8 mask)
  * The specific throughput table used is based on the type of network
  * the associated with, including A, B, G, and G w/ TGG protection
  */
-extern void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
 
 /* Initialize station's rate scaling information after adding station */
-extern void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
-                               u8 sta_id);
-extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
-                               u8 sta_id);
+void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+                        u8 sta_id);
+void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+                        u8 sta_id);
 
 /**
  * il_rate_control_register - Register the rate control algorithm callbacks
@@ -2886,8 +2886,8 @@ extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
  * ieee80211_register_hw
  *
  */
-extern int il4965_rate_control_register(void);
-extern int il3945_rate_control_register(void);
+int il4965_rate_control_register(void);
+int il3945_rate_control_register(void);
 
 /**
  * il_rate_control_unregister - Unregister the rate control callbacks
@@ -2895,11 +2895,11 @@ extern int il3945_rate_control_register(void);
  * This should be called after calling ieee80211_unregister_hw, but before
  * the driver is unloaded.
  */
-extern void il4965_rate_control_unregister(void);
-extern void il3945_rate_control_unregister(void);
+void il4965_rate_control_unregister(void);
+void il3945_rate_control_unregister(void);
 
-extern int il_power_update_mode(struct il_priv *il, bool force);
-extern void il_power_initialize(struct il_priv *il);
+int il_power_update_mode(struct il_priv *il, bool force);
+void il_power_initialize(struct il_priv *il);
 
 extern u32 il_debug_level;
 
index f2a86ffc3b4cf09440874c88b6f0cc3d804ec3a6..23d5f0275ce98e1cde6c05e7b9a1654c870774ea 100644 (file)
@@ -397,7 +397,7 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
        return cpu_to_le32(flags|(u32)rate);
 }
 
-extern int iwl_alive_start(struct iwl_priv *priv);
+int iwl_alive_start(struct iwl_priv *priv);
 
 #ifdef CONFIG_IWLWIFI_DEBUG
 void iwl_print_rx_config_cmd(struct iwl_priv *priv,
index a79fdd137f956ce5cb4de2ef7e1c0f239e76895a..7434d9edf3b773566530b79eb542a8431bbfb24c 100644 (file)
@@ -270,7 +270,7 @@ struct iwl_sensitivity_ranges {
  * iwlXXXX_     <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
  *
  ****************************************************************************/
-extern void iwl_update_chain_flags(struct iwl_priv *priv);
+void iwl_update_chain_flags(struct iwl_priv *priv);
 extern const u8 iwl_bcast_addr[ETH_ALEN];
 
 #define IWL_OPERATION_MODE_AUTO     0
index 5d83cab22d625084389b8880b46a4abd21fb9f44..26fc550cd68c58ae24342232bda51be2dfd690a5 100644 (file)
@@ -407,8 +407,8 @@ static inline u8 first_antenna(u8 mask)
 
 
 /* Initialize station's rate scaling information after adding station */
-extern void iwl_rs_rate_init(struct iwl_priv *priv,
-                            struct ieee80211_sta *sta, u8 sta_id);
+void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta,
+                     u8 sta_id);
 
 /**
  * iwl_rate_control_register - Register the rate control algorithm callbacks
@@ -420,7 +420,7 @@ extern void iwl_rs_rate_init(struct iwl_priv *priv,
  * ieee80211_register_hw
  *
  */
-extern int iwlagn_rate_control_register(void);
+int iwlagn_rate_control_register(void);
 
 /**
  * iwl_rate_control_unregister - Unregister the rate control callbacks
@@ -428,6 +428,6 @@ extern int iwlagn_rate_control_register(void);
  * This should be called after calling ieee80211_unregister_hw, but before
  * the driver is unloaded.
  */
-extern void iwlagn_rate_control_unregister(void);
+void iwlagn_rate_control_unregister(void);
 
 #endif /* __iwl_agn__rs__ */
index da442b81370a769054b4b77a946daa4bbd856ed8..1fef5240e6adc07317b7128bf7d7617385fa9aa8 100644 (file)
@@ -433,27 +433,19 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
        /* Copy MAC header from skb into command buffer */
        memcpy(tx_cmd->hdr, hdr, hdr_len);
 
+       txq_id = info->hw_queue;
+
        if (is_agg)
                txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
        else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
-               /*
-                * Send this frame after DTIM -- there's a special queue
-                * reserved for this for contexts that support AP mode.
-                */
-               txq_id = ctx->mcast_queue;
-
                /*
                 * The microcode will clear the more data
                 * bit in the last frame it transmits.
                 */
                hdr->frame_control |=
                        cpu_to_le16(IEEE80211_FCTL_MOREDATA);
-       } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
-               txq_id = IWL_AUX_QUEUE;
-       else
-               txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
+       }
 
-       WARN_ON_ONCE(!is_agg && txq_id != info->hw_queue);
        WARN_ON_ONCE(is_agg &&
                     priv->queue_to_mac80211[txq_id] != info->hw_queue);
 
index 30d45e2fc193a5bc04987acbdef9b0252a2b5c01..8ac305be68f489be533606cb8260dbfd4dc5cfda 100644 (file)
@@ -240,6 +240,12 @@ const struct iwl_cfg iwl6035_2agn_cfg = {
        .ht_params = &iwl6000_ht_params,
 };
 
+const struct iwl_cfg iwl6035_2agn_sff_cfg = {
+       .name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN",
+       IWL_DEVICE_6035,
+       .ht_params = &iwl6000_ht_params,
+};
+
 const struct iwl_cfg iwl1030_bgn_cfg = {
        .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
        IWL_DEVICE_6030,
index 76e14c046d9402cc0b7fbe39b0966c8195f0708a..85879dbaa402cdaa88d93247b678aaac9a3ff000 100644 (file)
@@ -83,6 +83,8 @@
 #define IWL7260_TX_POWER_VERSION       0xffff /* meaningless */
 #define IWL3160_NVM_VERSION            0x709
 #define IWL3160_TX_POWER_VERSION       0xffff /* meaningless */
+#define IWL7265_NVM_VERSION            0x0a1d
+#define IWL7265_TX_POWER_VERSION       0xffff /* meaningless */
 
 #define IWL7260_FW_PRE "iwlwifi-7260-"
 #define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode"
@@ -90,6 +92,9 @@
 #define IWL3160_FW_PRE "iwlwifi-3160-"
 #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
 
+#define IWL7265_FW_PRE "iwlwifi-7265-"
+#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+
 static const struct iwl_base_params iwl7000_base_params = {
        .eeprom_size = OTP_LOW_IMAGE_SIZE,
        .num_of_queues = IWLAGN_NUM_QUEUES,
@@ -182,5 +187,14 @@ const struct iwl_cfg iwl3160_n_cfg = {
        .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
 };
 
+const struct iwl_cfg iwl7265_2ac_cfg = {
+       .name = "Intel(R) Dual Band Wireless AC 7265",
+       .fw_name_pre = IWL7265_FW_PRE,
+       IWL_DEVICE_7000,
+       .ht_params = &iwl7000_ht_params,
+       .nvm_ver = IWL7265_NVM_VERSION,
+       .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+};
+
 MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
index e4d370bff30679fceacc1bc82907ccb6bc194f22..18f232e8e81253b31d730755f242b7a51552157c 100644 (file)
@@ -280,6 +280,7 @@ extern const struct iwl_cfg iwl2000_2bgn_cfg;
 extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
 extern const struct iwl_cfg iwl2030_2bgn_cfg;
 extern const struct iwl_cfg iwl6035_2agn_cfg;
+extern const struct iwl_cfg iwl6035_2agn_sff_cfg;
 extern const struct iwl_cfg iwl105_bgn_cfg;
 extern const struct iwl_cfg iwl105_bgn_d_cfg;
 extern const struct iwl_cfg iwl135_bgn_cfg;
@@ -292,6 +293,7 @@ extern const struct iwl_cfg iwl7260_n_cfg;
 extern const struct iwl_cfg iwl3160_2ac_cfg;
 extern const struct iwl_cfg iwl3160_2n_cfg;
 extern const struct iwl_cfg iwl3160_n_cfg;
+extern const struct iwl_cfg iwl7265_2ac_cfg;
 #endif /* CONFIG_IWLMVM */
 
 #endif /* __IWL_CONFIG_H__ */
index a276af476e2d7ab64e859748c9b8fd48b84ebb80..54a4fdc631b73c987f12e459e8245d38b18f0c0b 100644 (file)
 #define CSR_DRAM_INT_TBL_ENABLE                (1 << 31)
 #define CSR_DRAM_INIT_TBL_WRAP_CHECK   (1 << 27)
 
+/* SECURE boot registers */
+#define CSR_SECURE_BOOT_CONFIG_ADDR    (0x100)
+enum secure_boot_config_reg {
+       CSR_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP  = 0x00000001,
+       CSR_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ        = 0x00000002,
+};
+
+#define CSR_SECURE_BOOT_CPU1_STATUS_ADDR       (0x100)
+#define CSR_SECURE_BOOT_CPU2_STATUS_ADDR       (0x100)
+enum secure_boot_status_reg {
+       CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS          = 0x00000003,
+       CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED       = 0x00000002,
+       CSR_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS         = 0x00000004,
+       CSR_SECURE_BOOT_CPU_STATUS_VERF_FAIL            = 0x00000008,
+       CSR_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL       = 0x00000010,
+};
+
+#define CSR_UCODE_LOAD_STATUS_ADDR     (0x100)
+enum secure_load_status_reg {
+       CSR_CPU_STATUS_LOADING_STARTED                  = 0x00000001,
+       CSR_CPU_STATUS_LOADING_COMPLETED                = 0x00000002,
+       CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED            = 0x000000F8,
+       CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK         = 0x0000FF00,
+};
+
+#define CSR_SECURE_INSPECTOR_CODE_ADDR (0x100)
+#define CSR_SECURE_INSPECTOR_DATA_ADDR (0x100)
+
+#define CSR_SECURE_TIME_OUT    (100)
+
+#define FH_TCSR_0_REG0 (0x1D00)
+
 /*
  * HBUS (Host-side Bus)
  *
index 99e1da3123c9a07649008e48d2a2a56dc685d820..ff570027e9dd9fa02f3e8360af6966b480c68069 100644 (file)
@@ -483,6 +483,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
        const u8 *tlv_data;
        char buildstr[25];
        u32 build;
+       int num_of_cpus;
 
        if (len < sizeof(*ucode)) {
                IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
@@ -692,6 +693,42 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                                goto invalid_tlv_len;
                        drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data);
                        break;
+                case IWL_UCODE_TLV_SECURE_SEC_RT:
+                       iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
+                                           tlv_len);
+                       drv->fw.mvm_fw = true;
+                       drv->fw.img[IWL_UCODE_REGULAR].is_secure = true;
+                       break;
+               case IWL_UCODE_TLV_SECURE_SEC_INIT:
+                       iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
+                                           tlv_len);
+                       drv->fw.mvm_fw = true;
+                       drv->fw.img[IWL_UCODE_INIT].is_secure = true;
+                       break;
+               case IWL_UCODE_TLV_SECURE_SEC_WOWLAN:
+                       iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
+                                           tlv_len);
+                       drv->fw.mvm_fw = true;
+                       drv->fw.img[IWL_UCODE_WOWLAN].is_secure = true;
+                       break;
+               case IWL_UCODE_TLV_NUM_OF_CPU:
+                       if (tlv_len != sizeof(u32))
+                               goto invalid_tlv_len;
+                       num_of_cpus =
+                               le32_to_cpup((__le32 *)tlv_data);
+
+                       if (num_of_cpus == 2) {
+                               drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus =
+                                       true;
+                               drv->fw.img[IWL_UCODE_INIT].is_dual_cpus =
+                                       true;
+                               drv->fw.img[IWL_UCODE_WOWLAN].is_dual_cpus =
+                                       true;
+                       } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
+                               IWL_ERR(drv, "Driver support upto 2 CPUs\n");
+                               return -EINVAL;
+                       }
+                       break;
                default:
                        IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
                        break;
index 8b6c6fd95ed038ac1052827a2edb2a39f04f849a..6c6c35c5228cabd71d1520f533af7bd3adf8ad26 100644 (file)
@@ -121,6 +121,10 @@ enum iwl_ucode_tlv_type {
        IWL_UCODE_TLV_SEC_WOWLAN        = 21,
        IWL_UCODE_TLV_DEF_CALIB         = 22,
        IWL_UCODE_TLV_PHY_SKU           = 23,
+       IWL_UCODE_TLV_SECURE_SEC_RT     = 24,
+       IWL_UCODE_TLV_SECURE_SEC_INIT   = 25,
+       IWL_UCODE_TLV_SECURE_SEC_WOWLAN = 26,
+       IWL_UCODE_TLV_NUM_OF_CPU        = 27,
 };
 
 struct iwl_ucode_tlv {
index a1223680bc70d2f765e32180176ac7997884a3d7..87b66a821ec8983fb79876519364880861b7ae71 100644 (file)
  * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
  * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
  * @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
+ * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
+ *     offload profile config command.
  * @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
  * @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
  * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
  *     (rather than two) IPv6 addresses
  * @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
+ * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
+ *     from the probe request template.
+ * @IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API: modified D3 API to allow keeping
+ *     connection when going back to D0
+ * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
+ * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
+ * @IWL_UCODE_TLV_FLAGS_SCHED_SCAN: this uCode image supports scheduled scan.
+ * @IWL_UCODE_TLV_FLAGS_STA_KEY_CMD: new ADD_STA and ADD_STA_KEY command API
+ * @IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD: support device wide power command
+ *     containing CAM (Continuous Active Mode) indication.
  */
 enum iwl_ucode_tlv_flag {
        IWL_UCODE_TLV_FLAGS_PAN                 = BIT(0),
@@ -87,11 +99,20 @@ enum iwl_ucode_tlv_flag {
        IWL_UCODE_TLV_FLAGS_MFP                 = BIT(2),
        IWL_UCODE_TLV_FLAGS_P2P                 = BIT(3),
        IWL_UCODE_TLV_FLAGS_DW_BC_TABLE         = BIT(4),
+       IWL_UCODE_TLV_FLAGS_NEWBT_COEX          = BIT(5),
        IWL_UCODE_TLV_FLAGS_UAPSD               = BIT(6),
+       IWL_UCODE_TLV_FLAGS_SHORT_BL            = BIT(7),
        IWL_UCODE_TLV_FLAGS_RX_ENERGY_API       = BIT(8),
        IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2   = BIT(9),
        IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS     = BIT(10),
        IWL_UCODE_TLV_FLAGS_BF_UPDATED          = BIT(11),
+       IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID       = BIT(12),
+       IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API   = BIT(14),
+       IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL    = BIT(15),
+       IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE    = BIT(16),
+       IWL_UCODE_TLV_FLAGS_SCHED_SCAN          = BIT(17),
+       IWL_UCODE_TLV_FLAGS_STA_KEY_CMD         = BIT(19),
+       IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD       = BIT(20),
 };
 
 /* The default calibrate table size if not specified by firmware file */
@@ -133,7 +154,8 @@ enum iwl_ucode_sec {
  * For 16.0 uCode and above, there is no differentiation between sections,
  * just an offset to the HW address.
  */
-#define IWL_UCODE_SECTION_MAX 4
+#define IWL_UCODE_SECTION_MAX 6
+#define IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU  (IWL_UCODE_SECTION_MAX/2)
 
 struct iwl_ucode_capabilities {
        u32 max_probe_length;
@@ -150,6 +172,8 @@ struct fw_desc {
 
 struct fw_img {
        struct fw_desc sec[IWL_UCODE_SECTION_MAX];
+       bool is_secure;
+       bool is_dual_cpus;
 };
 
 /* uCode version contains 4 values: Major/Minor/API/Serial */
index ff8cc75c189d4d842abf8611fb8c5e7c7a63bf38..a70c7b9d9bad897345fb1e1e89d5c421e0d8a3da 100644 (file)
@@ -97,6 +97,8 @@
 
 #define APMG_PCIDEV_STT_VAL_L1_ACT_DIS         (0x00000800)
 
+#define APMG_RTC_INT_STT_RFKILL                (0x10000000)
+
 /* Device system time */
 #define DEVICE_SYSTEM_TIME_REG 0xA0206C
 
index dd57a36ecb1005d7571290a59925fd4d641b61aa..c6bac7c90b00bf458b17a829ec2895c2acaa065d 100644 (file)
@@ -601,8 +601,10 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
 {
        int ret;
 
-       WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
-                 "%s bad state = %d", __func__, trans->state);
+       if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
+               IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
+               return -EIO;
+       }
 
        if (!(cmd->flags & CMD_ASYNC))
                lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
@@ -638,8 +640,8 @@ static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
                               struct iwl_device_cmd *dev_cmd, int queue)
 {
-       WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
-                 "%s bad state = %d", __func__, trans->state);
+       if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+               IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
 
        return trans->ops->tx(trans, skb, dev_cmd, queue);
 }
@@ -647,16 +649,16 @@ static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
                                     int ssn, struct sk_buff_head *skbs)
 {
-       WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
-                 "%s bad state = %d", __func__, trans->state);
+       if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+               IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
 
        trans->ops->reclaim(trans, queue, ssn, skbs);
 }
 
 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue)
 {
-       WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
-                 "%s bad state = %d", __func__, trans->state);
+       if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+               IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
 
        trans->ops->txq_disable(trans, queue);
 }
@@ -667,8 +669,8 @@ static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
 {
        might_sleep();
 
-       WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
-                 "%s bad state = %d", __func__, trans->state);
+       if (unlikely((trans->state != IWL_TRANS_FW_ALIVE)))
+               IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
 
        trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
                                 frame_limit, ssn);
@@ -683,8 +685,8 @@ static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
 
 static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
 {
-       WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
-                 "%s bad state = %d", __func__, trans->state);
+       if (unlikely(trans->state != IWL_TRANS_FW_ALIVE))
+               IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
 
        return trans->ops->wait_tx_queue_empty(trans);
 }
index 0fad98b85f60dd8dedca668296b27382b15ca0f6..5b630f12bbff774e4108c4df070e96ef9a0e2088 100644 (file)
@@ -98,126 +98,258 @@ static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
 
 #undef EVENT_PRIO_ANT
 
-/* BT Antenna Coupling Threshold (dB) */
-#define IWL_BT_ANTENNA_COUPLING_THRESHOLD      (35)
-#define IWL_BT_LOAD_FORCE_SISO_THRESHOLD       (3)
-
 #define BT_ENABLE_REDUCED_TXPOWER_THRESHOLD    (-62)
 #define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD   (-65)
-#define BT_REDUCED_TX_POWER_BIT                        BIT(7)
-
-static inline bool is_loose_coex(void)
-{
-       return iwlwifi_mod_params.ant_coupling >
-               IWL_BT_ANTENNA_COUPLING_THRESHOLD;
-}
+#define BT_ANTENNA_COUPLING_THRESHOLD          (30)
 
 int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
 {
+       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
+               return 0;
+
        return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
                                    sizeof(struct iwl_bt_coex_prio_tbl_cmd),
                                    &iwl_bt_prio_tbl);
 }
 
-static int iwl_send_bt_env(struct iwl_mvm *mvm, u8 action, u8 type)
-{
-       struct iwl_bt_coex_prot_env_cmd env_cmd;
-       int ret;
-
-       env_cmd.action = action;
-       env_cmd.type = type;
-       ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PROT_ENV, CMD_SYNC,
-                                  sizeof(env_cmd), &env_cmd);
-       if (ret)
-               IWL_ERR(mvm, "failed to send BT env command\n");
-       return ret;
-}
-
-enum iwl_bt_kill_msk {
-       BT_KILL_MSK_DEFAULT,
-       BT_KILL_MSK_SCO_HID_A2DP,
-       BT_KILL_MSK_REDUCED_TXPOW,
-       BT_KILL_MSK_MAX,
-};
-
-static const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX] = {
+const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX] = {
        [BT_KILL_MSK_DEFAULT] = 0xffff0000,
        [BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
        [BT_KILL_MSK_REDUCED_TXPOW] = 0,
 };
 
-static const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
+const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
        [BT_KILL_MSK_DEFAULT] = 0xffff0000,
        [BT_KILL_MSK_SCO_HID_A2DP] = 0xffffffff,
        [BT_KILL_MSK_REDUCED_TXPOW] = 0,
 };
 
-#define IWL_BT_DEFAULT_BOOST (0xf0f0f0f0)
-
-/* Tight Coex */
-static const __le32 iwl_tight_lookup[BT_COEX_LUT_SIZE] = {
-       cpu_to_le32(0xaaaaaaaa),
-       cpu_to_le32(0xaaaaaaaa),
-       cpu_to_le32(0xaeaaaaaa),
-       cpu_to_le32(0xaaaaaaaa),
-       cpu_to_le32(0xcc00ff28),
-       cpu_to_le32(0x0000aaaa),
-       cpu_to_le32(0xcc00aaaa),
-       cpu_to_le32(0x0000aaaa),
-       cpu_to_le32(0xc0004000),
-       cpu_to_le32(0x00000000),
-       cpu_to_le32(0xf0005000),
-       cpu_to_le32(0xf0005000),
+static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
+       cpu_to_le32(0xf0f0f0f0),
+       cpu_to_le32(0xc0c0c0c0),
+       cpu_to_le32(0xfcfcfcfc),
+       cpu_to_le32(0xff00ff00),
 };
 
-/* Loose Coex */
-static const __le32 iwl_loose_lookup[BT_COEX_LUT_SIZE] = {
-       cpu_to_le32(0xaaaaaaaa),
-       cpu_to_le32(0xaaaaaaaa),
-       cpu_to_le32(0xaaaaaaaa),
-       cpu_to_le32(0xaaaaaaaa),
-       cpu_to_le32(0xcc00ff28),
-       cpu_to_le32(0x0000aaaa),
-       cpu_to_le32(0xcc00aaaa),
-       cpu_to_le32(0x0000aaaa),
-       cpu_to_le32(0x00000000),
-       cpu_to_le32(0x00000000),
-       cpu_to_le32(0xf0005000),
-       cpu_to_le32(0xf0005000),
+static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
+       {
+               cpu_to_le32(0x40000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x44000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x40000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x44000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0xf0005000),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0xf0005000),
+       },
+       {
+               cpu_to_le32(0x40000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x44000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x40000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x44000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0xf0005000),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0xf0005000),
+       },
+       {
+               cpu_to_le32(0x40000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x44000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x40000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x44000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0xf0005000),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0xf0005000),
+       },
 };
 
-/* Full concurrency */
-static const __le32 iwl_concurrent_lookup[BT_COEX_LUT_SIZE] = {
-       cpu_to_le32(0xaaaaaaaa),
-       cpu_to_le32(0xaaaaaaaa),
-       cpu_to_le32(0xaaaaaaaa),
-       cpu_to_le32(0xaaaaaaaa),
-       cpu_to_le32(0xaaaaaaaa),
-       cpu_to_le32(0xaaaaaaaa),
-       cpu_to_le32(0xaaaaaaaa),
-       cpu_to_le32(0xaaaaaaaa),
-       cpu_to_le32(0x00000000),
-       cpu_to_le32(0x00000000),
-       cpu_to_le32(0x00000000),
-       cpu_to_le32(0x00000000),
+static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
+       {
+               /* Tight */
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaeaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xcc00ff28),
+               cpu_to_le32(0x0000aaaa),
+               cpu_to_le32(0xcc00aaaa),
+               cpu_to_le32(0x0000aaaa),
+               cpu_to_le32(0xc0004000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0xf0005000),
+               cpu_to_le32(0xf0005000),
+       },
+       {
+               /* Loose */
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xcc00ff28),
+               cpu_to_le32(0x0000aaaa),
+               cpu_to_le32(0xcc00aaaa),
+               cpu_to_le32(0x0000aaaa),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0x00000000),
+               cpu_to_le32(0xf0005000),
+               cpu_to_le32(0xf0005000),
+       },
+       {
+               /* Tx Tx disabled */
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xaaaaaaaa),
+               cpu_to_le32(0xcc00ff28),
+               cpu_to_le32(0x0000aaaa),
+               cpu_to_le32(0xcc00aaaa),
+               cpu_to_le32(0x0000aaaa),
+               cpu_to_le32(0xC0004000),
+               cpu_to_le32(0xC0004000),
+               cpu_to_le32(0xF0005000),
+               cpu_to_le32(0xF0005000),
+       },
 };
 
-/* single shared antenna */
-static const __le32 iwl_single_shared_ant_lookup[BT_COEX_LUT_SIZE] = {
-       cpu_to_le32(0x40000000),
-       cpu_to_le32(0x00000000),
-       cpu_to_le32(0x44000000),
-       cpu_to_le32(0x00000000),
-       cpu_to_le32(0x40000000),
-       cpu_to_le32(0x00000000),
-       cpu_to_le32(0x44000000),
-       cpu_to_le32(0x00000000),
-       cpu_to_le32(0xC0004000),
-       cpu_to_le32(0xF0005000),
-       cpu_to_le32(0xC0004000),
-       cpu_to_le32(0xF0005000),
+/* 20MHz / 40MHz below / 40Mhz above*/
+static const __le64 iwl_ci_mask[][3] = {
+       /* dummy entry for channel 0 */
+       {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
+       {
+               cpu_to_le64(0x0000001FFFULL),
+               cpu_to_le64(0x0ULL),
+               cpu_to_le64(0x00007FFFFFULL),
+       },
+       {
+               cpu_to_le64(0x000000FFFFULL),
+               cpu_to_le64(0x0ULL),
+               cpu_to_le64(0x0003FFFFFFULL),
+       },
+       {
+               cpu_to_le64(0x000003FFFCULL),
+               cpu_to_le64(0x0ULL),
+               cpu_to_le64(0x000FFFFFFCULL),
+       },
+       {
+               cpu_to_le64(0x00001FFFE0ULL),
+               cpu_to_le64(0x0ULL),
+               cpu_to_le64(0x007FFFFFE0ULL),
+       },
+       {
+               cpu_to_le64(0x00007FFF80ULL),
+               cpu_to_le64(0x00007FFFFFULL),
+               cpu_to_le64(0x01FFFFFF80ULL),
+       },
+       {
+               cpu_to_le64(0x0003FFFC00ULL),
+               cpu_to_le64(0x0003FFFFFFULL),
+               cpu_to_le64(0x0FFFFFFC00ULL),
+       },
+       {
+               cpu_to_le64(0x000FFFF000ULL),
+               cpu_to_le64(0x000FFFFFFCULL),
+               cpu_to_le64(0x3FFFFFF000ULL),
+       },
+       {
+               cpu_to_le64(0x007FFF8000ULL),
+               cpu_to_le64(0x007FFFFFE0ULL),
+               cpu_to_le64(0xFFFFFF8000ULL),
+       },
+       {
+               cpu_to_le64(0x01FFFE0000ULL),
+               cpu_to_le64(0x01FFFFFF80ULL),
+               cpu_to_le64(0xFFFFFE0000ULL),
+       },
+       {
+               cpu_to_le64(0x0FFFF00000ULL),
+               cpu_to_le64(0x0FFFFFFC00ULL),
+               cpu_to_le64(0x0ULL),
+       },
+       {
+               cpu_to_le64(0x3FFFC00000ULL),
+               cpu_to_le64(0x3FFFFFF000ULL),
+               cpu_to_le64(0x0)
+       },
+       {
+               cpu_to_le64(0xFFFE000000ULL),
+               cpu_to_le64(0xFFFFFF8000ULL),
+               cpu_to_le64(0x0)
+       },
+       {
+               cpu_to_le64(0xFFF8000000ULL),
+               cpu_to_le64(0xFFFFFE0000ULL),
+               cpu_to_le64(0x0)
+       },
+       {
+               cpu_to_le64(0xFE00000000ULL),
+               cpu_to_le64(0x0ULL),
+               cpu_to_le64(0x0)
+       },
 };
 
+static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
+       cpu_to_le32(0x22002200),
+       cpu_to_le32(0x33113311),
+};
+
+static enum iwl_bt_coex_lut_type
+iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
+{
+       struct ieee80211_chanctx_conf *chanctx_conf;
+       enum iwl_bt_coex_lut_type ret;
+       u16 phy_ctx_id;
+
+       /*
+        * Checking that we hold mvm->mutex is a good idea, but the rate
+        * control can't acquire the mutex since it runs in Tx path.
+        * So this is racy in that case, but in the worst case, the AMPDU
+        * size limit will be wrong for a short time which is not a big
+        * issue.
+        */
+
+       rcu_read_lock();
+
+       chanctx_conf = rcu_dereference(vif->chanctx_conf);
+
+       if (!chanctx_conf ||
+            chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
+               rcu_read_unlock();
+               return BT_COEX_LOOSE_LUT;
+       }
+
+       ret = BT_COEX_TX_DIS_LUT;
+
+       if (mvm->cfg->bt_shared_single_ant) {
+               rcu_read_unlock();
+               return ret;
+       }
+
+       phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
+
+       if (mvm->last_bt_ci_cmd.primary_ch_phy_id == phy_ctx_id)
+               ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut);
+       else if (mvm->last_bt_ci_cmd.secondary_ch_phy_id == phy_ctx_id)
+               ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut);
+       /* else - default = TX TX disallowed */
+
+       rcu_read_unlock();
+
+       return ret;
+}
+
 int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
 {
        struct iwl_bt_coex_cmd *bt_cmd;
@@ -228,17 +360,10 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
                .flags = CMD_SYNC,
        };
        int ret;
+       u32 flags;
 
-       /* go to CALIB state in internal BT-Coex state machine */
-       ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN,
-                             BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
-       if (ret)
-               return ret;
-
-       ret  = iwl_send_bt_env(mvm, BT_COEX_ENV_CLOSE,
-                              BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
-       if (ret)
-               return ret;
+       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
+               return 0;
 
        bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
        if (!bt_cmd)
@@ -246,40 +371,52 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
        cmd.data[0] = bt_cmd;
 
        bt_cmd->max_kill = 5;
-       bt_cmd->bt3_time_t7_value = 1;
-       bt_cmd->bt3_prio_sample_time = 2;
-       bt_cmd->bt3_timer_t2_value = 0xc;
+       bt_cmd->bt4_antenna_isolation_thr = BT_ANTENNA_COUPLING_THRESHOLD,
+       bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling,
+       bt_cmd->bt4_tx_tx_delta_freq_thr = 15,
+       bt_cmd->bt4_tx_rx_max_freq0 = 15,
 
-       bt_cmd->flags = iwlwifi_mod_params.bt_coex_active ?
+       flags = iwlwifi_mod_params.bt_coex_active ?
                        BT_COEX_NW : BT_COEX_DISABLE;
-       bt_cmd->flags |= BT_CH_PRIMARY_EN | BT_SYNC_2_BT_DISABLE;
+       flags |= BT_CH_PRIMARY_EN | BT_CH_SECONDARY_EN | BT_SYNC_2_BT_DISABLE;
+       bt_cmd->flags = cpu_to_le32(flags);
 
-       bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE |
+       bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
                                            BT_VALID_BT_PRIO_BOOST |
                                            BT_VALID_MAX_KILL |
                                            BT_VALID_3W_TMRS |
                                            BT_VALID_KILL_ACK |
                                            BT_VALID_KILL_CTS |
                                            BT_VALID_REDUCED_TX_POWER |
-                                           BT_VALID_LUT);
+                                           BT_VALID_LUT |
+                                           BT_VALID_WIFI_RX_SW_PRIO_BOOST |
+                                           BT_VALID_WIFI_TX_SW_PRIO_BOOST |
+                                           BT_VALID_MULTI_PRIO_LUT |
+                                           BT_VALID_CORUN_LUT_20 |
+                                           BT_VALID_CORUN_LUT_40 |
+                                           BT_VALID_ANT_ISOLATION |
+                                           BT_VALID_ANT_ISOLATION_THRS |
+                                           BT_VALID_TXTX_DELTA_FREQ_THRS |
+                                           BT_VALID_TXRX_MAX_FREQ_0);
 
        if (mvm->cfg->bt_shared_single_ant)
-               memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant_lookup,
-                      sizeof(iwl_single_shared_ant_lookup));
-       else if (is_loose_coex())
-               memcpy(&bt_cmd->decision_lut, iwl_loose_lookup,
-                      sizeof(iwl_tight_lookup));
+               memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
+                      sizeof(iwl_single_shared_ant));
        else
-               memcpy(&bt_cmd->decision_lut, iwl_tight_lookup,
-                      sizeof(iwl_tight_lookup));
+               memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
+                      sizeof(iwl_combined_lookup));
 
-       bt_cmd->bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST);
+       memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
+              sizeof(iwl_bt_prio_boost));
+       memcpy(&bt_cmd->bt4_multiprio_lut, iwl_bt_mprio_lut,
+              sizeof(iwl_bt_mprio_lut));
        bt_cmd->kill_ack_msk =
                cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]);
        bt_cmd->kill_cts_msk =
                cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]);
 
        memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
+       memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
 
        ret = iwl_mvm_send_cmd(mvm, &cmd);
 
@@ -334,13 +471,17 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
        if (!bt_cmd)
                return -ENOMEM;
        cmd.data[0] = bt_cmd;
+       bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
 
        bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
        bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
-       bt_cmd->valid_bit_msk =
-               cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS);
+       bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
+                                            BT_VALID_KILL_ACK |
+                                            BT_VALID_KILL_CTS);
 
-       IWL_DEBUG_COEX(mvm, "bt_kill_msk = %d\n", bt_kill_msk);
+       IWL_DEBUG_COEX(mvm, "ACK Kill msk = 0x%08x, CTS Kill msk = 0x%08x\n",
+                      iwl_bt_ack_kill_msk[bt_kill_msk],
+                      iwl_bt_cts_kill_msk[bt_kill_msk]);
 
        ret = iwl_mvm_send_cmd(mvm, &cmd);
 
@@ -380,8 +521,10 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
        if (!bt_cmd)
                return -ENOMEM;
        cmd.data[0] = bt_cmd;
+       bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
 
-       bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_REDUCED_TX_POWER),
+       bt_cmd->valid_bit_msk =
+               cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER);
        bt_cmd->bt_reduced_tx_power = sta_id;
 
        if (enable)
@@ -403,8 +546,25 @@ struct iwl_bt_iterator_data {
        struct iwl_mvm *mvm;
        u32 num_bss_ifaces;
        bool reduced_tx_power;
+       struct ieee80211_chanctx_conf *primary;
+       struct ieee80211_chanctx_conf *secondary;
 };
 
+static inline
+void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
+                                      struct ieee80211_vif *vif,
+                                      bool enable, int rssi)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       mvmvif->bf_data.last_bt_coex_event = rssi;
+       mvmvif->bf_data.bt_coex_max_thold =
+               enable ? BT_ENABLE_REDUCED_TXPOWER_THRESHOLD : 0;
+       mvmvif->bf_data.bt_coex_min_thold =
+               enable ? BT_DISABLE_REDUCED_TXPOWER_THRESHOLD : 0;
+}
+
+/* must be called under rcu_read_lock */
 static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
                                      struct ieee80211_vif *vif)
 {
@@ -413,65 +573,94 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
        struct iwl_mvm *mvm = data->mvm;
        struct ieee80211_chanctx_conf *chanctx_conf;
        enum ieee80211_smps_mode smps_mode;
-       enum ieee80211_band band;
        int ave_rssi;
 
        lockdep_assert_held(&mvm->mutex);
-       if (vif->type != NL80211_IFTYPE_STATION)
-               return;
 
-       rcu_read_lock();
-       chanctx_conf = rcu_dereference(vif->chanctx_conf);
-       if (chanctx_conf && chanctx_conf->def.chan)
-               band = chanctx_conf->def.chan->band;
-       else
-               band = -1;
-       rcu_read_unlock();
+       if (vif->type != NL80211_IFTYPE_STATION &&
+           vif->type != NL80211_IFTYPE_AP)
+               return;
 
        smps_mode = IEEE80211_SMPS_AUTOMATIC;
 
-       /* non associated BSSes aren't to be considered */
-       if (!vif->bss_conf.assoc)
+       chanctx_conf = rcu_dereference(vif->chanctx_conf);
+
+       /* If channel context is invalid or not on 2.4GHz .. */
+       if ((!chanctx_conf ||
+            chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
+               /* ... and it is an associated STATION, relax constraints */
+               if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc)
+                       iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+                                           smps_mode);
+               iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
                return;
+       }
+
+       /* SoftAP / GO will always be primary */
+       if (vif->type == NL80211_IFTYPE_AP) {
+               if (!mvmvif->ap_ibss_active)
+                       return;
+
+               /* the Ack / Cts kill mask must be default if AP / GO */
+               data->reduced_tx_power = false;
 
-       if (band != IEEE80211_BAND_2GHZ) {
-               iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
-                                   smps_mode);
+               if (chanctx_conf == data->primary)
+                       return;
+
+               /* downgrade the current primary no matter what its type is */
+               data->secondary = data->primary;
+               data->primary = chanctx_conf;
                return;
        }
 
-       if (data->notif->bt_status)
-               smps_mode = IEEE80211_SMPS_DYNAMIC;
+       data->num_bss_ifaces++;
+
+       /* we are now a STA / P2P Client, and take associated ones only */
+       if (!vif->bss_conf.assoc)
+               return;
+
+       /* STA / P2P Client, try to be primary if first vif */
+       if (!data->primary || data->primary == chanctx_conf)
+               data->primary = chanctx_conf;
+       else if (!data->secondary)
+               /* if secondary is not NULL, it might be a GO */
+               data->secondary = chanctx_conf;
 
-       if (data->notif->bt_traffic_load >= IWL_BT_LOAD_FORCE_SISO_THRESHOLD)
+       if (le32_to_cpu(data->notif->bt_activity_grading) >= BT_HIGH_TRAFFIC)
                smps_mode = IEEE80211_SMPS_STATIC;
+       else if (le32_to_cpu(data->notif->bt_activity_grading) >=
+                BT_LOW_TRAFFIC)
+               smps_mode = IEEE80211_SMPS_DYNAMIC;
 
        IWL_DEBUG_COEX(data->mvm,
-                      "mac %d: bt_status %d traffic_load %d smps_req %d\n",
+                      "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
                       mvmvif->id,  data->notif->bt_status,
-                      data->notif->bt_traffic_load, smps_mode);
+                      data->notif->bt_activity_grading, smps_mode);
 
        iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
 
        /* don't reduce the Tx power if in loose scheme */
-       if (is_loose_coex())
+       if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
+           mvm->cfg->bt_shared_single_ant) {
+               data->reduced_tx_power = false;
+               iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
                return;
+       }
 
-       data->num_bss_ifaces++;
-
-       /* reduced Txpower only if there are open BT connections, so ...*/
-       if (!BT_MBOX_MSG(data->notif, 3, OPEN_CON_2)) {
+       /* reduced Txpower only if BT is on, so ...*/
+       if (!data->notif->bt_status) {
                /* ... cancel reduced Tx power ... */
                if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
                        IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
                data->reduced_tx_power = false;
 
                /* ... and there is no need to get reports on RSSI any more. */
-               ieee80211_disable_rssi_reports(vif);
+               iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
                return;
        }
 
-       ave_rssi = ieee80211_ave_rssi(vif);
+       /* try to get the avg rssi from fw */
+       ave_rssi = mvmvif->bf_data.ave_beacon_signal;
 
        /* if the RSSI isn't valid, fake it is very low */
        if (!ave_rssi)
@@ -499,8 +688,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
        }
 
        /* Begin to monitor the RSSI: it may influence the reduced Tx power */
-       ieee80211_enable_rssi_reports(vif, BT_DISABLE_REDUCED_TXPOWER_THRESHOLD,
-                                     BT_ENABLE_REDUCED_TXPOWER_THRESHOLD);
+       iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
 }
 
 static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
@@ -510,11 +698,72 @@ static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
                .notif = &mvm->last_bt_notif,
                .reduced_tx_power = true,
        };
+       struct iwl_bt_coex_ci_cmd cmd = {};
+       u8 ci_bw_idx;
 
+       rcu_read_lock();
        ieee80211_iterate_active_interfaces_atomic(
                                        mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
                                        iwl_mvm_bt_notif_iterator, &data);
 
+       if (data.primary) {
+               struct ieee80211_chanctx_conf *chan = data.primary;
+               if (WARN_ON(!chan->def.chan)) {
+                       rcu_read_unlock();
+                       return;
+               }
+
+               if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+                       ci_bw_idx = 0;
+                       cmd.co_run_bw_primary = 0;
+               } else {
+                       cmd.co_run_bw_primary = 1;
+                       if (chan->def.center_freq1 >
+                           chan->def.chan->center_freq)
+                               ci_bw_idx = 2;
+                       else
+                               ci_bw_idx = 1;
+               }
+
+               cmd.bt_primary_ci =
+                       iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+               cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv);
+       }
+
+       if (data.secondary) {
+               struct ieee80211_chanctx_conf *chan = data.secondary;
+               if (WARN_ON(!data.secondary->def.chan)) {
+                       rcu_read_unlock();
+                       return;
+               }
+
+               if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+                       ci_bw_idx = 0;
+                       cmd.co_run_bw_secondary = 0;
+               } else {
+                       cmd.co_run_bw_secondary = 1;
+                       if (chan->def.center_freq1 >
+                           chan->def.chan->center_freq)
+                               ci_bw_idx = 2;
+                       else
+                               ci_bw_idx = 1;
+               }
+
+               cmd.bt_secondary_ci =
+                       iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+               cmd.secondary_ch_phy_id = *((u16 *)data.primary->drv_priv);
+       }
+
+       rcu_read_unlock();
+
+       /* Don't spam the fw with the same command over and over */
+       if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
+               if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, CMD_SYNC,
+                                        sizeof(cmd), &cmd))
+                       IWL_ERR(mvm, "Failed to send BT_CI cmd");
+               memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
+       }
+
        /*
         * If there are no BSS / P2P client interfaces, reduced Tx Power is
         * irrelevant since it is based on the RSSI coming from the beacon.
@@ -536,12 +785,18 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
 
 
        IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
-       IWL_DEBUG_COEX(mvm, "\tBT %salive\n", notif->bt_status ? "" : "not ");
+       IWL_DEBUG_COEX(mvm, "\tBT status: %s\n",
+                      notif->bt_status ? "ON" : "OFF");
        IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
-       IWL_DEBUG_COEX(mvm, "\tBT traffic load %d\n", notif->bt_traffic_load);
+       IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
+       IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
+                      le32_to_cpu(notif->primary_ch_lut));
+       IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
+                      le32_to_cpu(notif->secondary_ch_lut));
+       IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
+                      le32_to_cpu(notif->bt_activity_grading));
        IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
                       notif->bt_agg_traffic_load);
-       IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
 
        /* remember this notification for future use: rssi fluctuations */
        memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
@@ -565,6 +820,18 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
        struct ieee80211_sta *sta;
        struct iwl_mvm_sta *mvmsta;
 
+       struct ieee80211_chanctx_conf *chanctx_conf;
+
+       rcu_read_lock();
+       chanctx_conf = rcu_dereference(vif->chanctx_conf);
+       /* If channel context is invalid or not on 2.4GHz - don't count it */
+       if (!chanctx_conf ||
+           chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
+               rcu_read_unlock();
+               return;
+       }
+       rcu_read_unlock();
+
        if (vif->type != NL80211_IFTYPE_STATION ||
            mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
                return;
@@ -594,15 +861,15 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        };
        int ret;
 
-       mutex_lock(&mvm->mutex);
+       lockdep_assert_held(&mvm->mutex);
 
        /* Rssi update while not associated ?! */
        if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
-               goto out_unlock;
+               return;
 
-       /* No open connection - reports should be disabled */
-       if (!BT_MBOX_MSG(&mvm->last_bt_notif, 3, OPEN_CON_2))
-               goto out_unlock;
+       /* No BT - reports should be disabled */
+       if (!mvm->last_bt_notif.bt_status)
+               return;
 
        IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
                       rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
@@ -611,7 +878,8 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
         * Check if rssi is good enough for reduced Tx power, but not in loose
         * scheme.
         */
-       if (rssi_event == RSSI_EVENT_LOW || is_loose_coex())
+       if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
+           iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
                ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
                                                  false);
        else
@@ -633,12 +901,52 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm, data.reduced_tx_power))
                IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
+}
+
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF   (4000)
+#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT        (1200)
 
- out_unlock:
-       mutex_unlock(&mvm->mutex);
+u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
+                                  struct ieee80211_sta *sta)
+{
+       struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+       enum iwl_bt_coex_lut_type lut_type;
+
+       if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+           BT_LOW_TRAFFIC)
+               return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+       lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
+
+       if (lut_type == BT_COEX_LOOSE_LUT)
+               return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+       /* tight coex, high bt traffic, reduce AGG time limit */
+       return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
+}
+
+bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
+                                    struct ieee80211_sta *sta)
+{
+       struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
+
+       if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+           BT_HIGH_TRAFFIC)
+               return true;
+
+       /*
+        * In Tight, BT can't Rx while we Tx, so use both antennas since BT is
+        * already killed.
+        * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while we
+        * Tx.
+        */
+       return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
 }
 
-void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
 {
+       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
+               return;
+
        iwl_mvm_bt_coex_notif_handle(mvm);
 }
index 2bf29f7992ee4d953536bf1ec6ab0b2fbf1debae..4b6d670c35092d18679e8437ce5dbeb14ecf7702 100644 (file)
@@ -70,7 +70,9 @@
 #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT          (50 * USEC_PER_MSEC)
 #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT          (50 * USEC_PER_MSEC)
 #define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS       20
-#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS       20
+#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS       8
+#define IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS        30
+#define IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS        20
 #define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT       50
 #define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT       50
 #define IWL_MVM_PS_SNOOZE_INTERVAL             25
index 417639f77b01c8a9a166de79e4374cefa2313ba6..6f45966817bb4c1d34cd3a10c4db74536f4a13bf 100644 (file)
@@ -67,6 +67,7 @@
 #include <net/cfg80211.h>
 #include <net/ipv6.h>
 #include <net/tcp.h>
+#include <net/addrconf.h>
 #include "iwl-modparams.h"
 #include "fw-api.h"
 #include "mvm.h"
@@ -381,14 +382,74 @@ static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
        union {
                struct iwl_proto_offload_cmd_v1 v1;
                struct iwl_proto_offload_cmd_v2 v2;
+               struct iwl_proto_offload_cmd_v3_small v3s;
+               struct iwl_proto_offload_cmd_v3_large v3l;
        } cmd = {};
+       struct iwl_host_cmd hcmd = {
+               .id = PROT_OFFLOAD_CONFIG_CMD,
+               .flags = CMD_SYNC,
+               .data[0] = &cmd,
+               .dataflags[0] = IWL_HCMD_DFL_DUP,
+       };
        struct iwl_proto_offload_cmd_common *common;
        u32 enabled = 0, size;
+       u32 capa_flags = mvm->fw->ucode_capa.flags;
 #if IS_ENABLED(CONFIG_IPV6)
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        int i;
 
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+       if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
+           capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+               struct iwl_ns_config *nsc;
+               struct iwl_targ_addr *addrs;
+               int n_nsc, n_addrs;
+               int c;
+
+               if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+                       nsc = cmd.v3s.ns_config;
+                       n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
+                       addrs = cmd.v3s.targ_addrs;
+                       n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
+               } else {
+                       nsc = cmd.v3l.ns_config;
+                       n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
+                       addrs = cmd.v3l.targ_addrs;
+                       n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
+               }
+
+               if (mvmvif->num_target_ipv6_addrs)
+                       enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+
+               /*
+                * For each address we have (and that will fit) fill a target
+                * address struct and combine for NS offload structs with the
+                * solicited node addresses.
+                */
+               for (i = 0, c = 0;
+                    i < mvmvif->num_target_ipv6_addrs &&
+                    i < n_addrs && c < n_nsc; i++) {
+                       struct in6_addr solicited_addr;
+                       int j;
+
+                       addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
+                                                 &solicited_addr);
+                       for (j = 0; j < c; j++)
+                               if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
+                                                 &solicited_addr) == 0)
+                                       break;
+                       if (j == c)
+                               c++;
+                       addrs[i].addr = mvmvif->target_ipv6_addrs[i];
+                       addrs[i].config_num = cpu_to_le32(j);
+                       nsc[j].dest_ipv6_addr = solicited_addr;
+                       memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
+               }
+
+               if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
+                       cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
+               else
+                       cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
+       } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
                if (mvmvif->num_target_ipv6_addrs) {
                        enabled |= IWL_D3_PROTO_OFFLOAD_NS;
                        memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
@@ -419,7 +480,13 @@ static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
        }
 #endif
 
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+       if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+               common = &cmd.v3s.common;
+               size = sizeof(cmd.v3s);
+       } else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+               common = &cmd.v3l.common;
+               size = sizeof(cmd.v3l);
+       } else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
                common = &cmd.v2.common;
                size = sizeof(cmd.v2);
        } else {
@@ -438,8 +505,8 @@ static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
 
        common->enabled = cpu_to_le32(enabled);
 
-       return iwl_mvm_send_cmd_pdu(mvm, PROT_OFFLOAD_CONFIG_CMD, CMD_SYNC,
-                                   size, &cmd);
+       hcmd.len[0] = size;
+       return iwl_mvm_send_cmd(mvm, &hcmd);
 }
 
 enum iwl_mvm_tcp_packet_type {
@@ -793,6 +860,74 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        return 0;
 }
 
+static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
+                                      struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_nonqos_seq_query_cmd query_cmd = {
+               .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET),
+               .mac_id_n_color =
+                       cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+                                                       mvmvif->color)),
+       };
+       struct iwl_host_cmd cmd = {
+               .id = NON_QOS_TX_COUNTER_CMD,
+               .flags = CMD_SYNC | CMD_WANT_SKB,
+       };
+       int err;
+       u32 size;
+
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
+               cmd.data[0] = &query_cmd;
+               cmd.len[0] = sizeof(query_cmd);
+       }
+
+       err = iwl_mvm_send_cmd(mvm, &cmd);
+       if (err)
+               return err;
+
+       size = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+       size -= sizeof(cmd.resp_pkt->hdr);
+       if (size < sizeof(__le16)) {
+               err = -EINVAL;
+       } else {
+               err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
+               /* new API returns next, not last-used seqno */
+               if (mvm->fw->ucode_capa.flags &
+                               IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
+                       err -= 0x10;
+       }
+
+       iwl_free_resp(&cmd);
+       return err;
+}
+
+void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_nonqos_seq_query_cmd query_cmd = {
+               .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET),
+               .mac_id_n_color =
+                       cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+                                                       mvmvif->color)),
+               .value = cpu_to_le16(mvmvif->seqno),
+       };
+
+       /* return if called during restart, not resume from D3 */
+       if (!mvmvif->seqno_valid)
+               return;
+
+       mvmvif->seqno_valid = false;
+
+       if (!(mvm->fw->ucode_capa.flags &
+                       IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API))
+               return;
+
+       if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, CMD_SYNC,
+                                sizeof(query_cmd), &query_cmd))
+               IWL_ERR(mvm, "failed to set non-QoS seqno\n");
+}
+
 static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
                             struct cfg80211_wowlan *wowlan,
                             bool test)
@@ -829,7 +964,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
        };
        int ret, i;
        int len __maybe_unused;
-       u16 seq;
        u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT;
 
        if (!wowlan) {
@@ -872,26 +1006,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 
        mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
 
-       /*
-        * The D3 firmware still hardcodes the AP station ID for the
-        * BSS we're associated with as 0. Store the real STA ID here
-        * and assign 0. When we leave this function, we'll restore
-        * the original value for the resume code.
-        */
-       old_ap_sta_id = mvm_ap_sta->sta_id;
-       mvm_ap_sta->sta_id = 0;
-       mvmvif->ap_sta_id = 0;
-
        /* TODO: wowlan_config_cmd.wowlan_ba_teardown_tids */
 
        wowlan_config_cmd.is_11n_connection = ap_sta->ht_cap.ht_supported;
 
-       /*
-        * We know the last used seqno, and the uCode expects to know that
-        * one, it will increment before TX.
-        */
-       seq = mvm_ap_sta->last_seq_ctl & IEEE80211_SCTL_SEQ;
-       wowlan_config_cmd.non_qos_seq = cpu_to_le16(seq);
+       /* Query the last used seqno and set it */
+       ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
+       if (ret < 0)
+               goto out_noreset;
+       wowlan_config_cmd.non_qos_seq = cpu_to_le16(ret);
 
        /*
         * For QoS counters, we store the one to use next, so subtract 0x10
@@ -899,7 +1022,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
         * increment after using the value (i.e. store the next value to use).
         */
        for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
-               seq = mvm_ap_sta->tid_data[i].seq_number;
+               u16 seq = mvm_ap_sta->tid_data[i].seq_number;
                seq -= 0x10;
                wowlan_config_cmd.qos_seq[i] = cpu_to_le16(seq);
        }
@@ -944,6 +1067,16 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 
        iwl_trans_stop_device(mvm->trans);
 
+       /*
+        * The D3 firmware still hardcodes the AP station ID for the
+        * BSS we're associated with as 0. Store the real STA ID here
+        * and assign 0. When we leave this function, we'll restore
+        * the original value for the resume code.
+        */
+       old_ap_sta_id = mvm_ap_sta->sta_id;
+       mvm_ap_sta->sta_id = 0;
+       mvmvif->ap_sta_id = 0;
+
        /*
         * Set the HW restart bit -- this is mostly true as we're
         * going to load new firmware and reprogram that, though
@@ -1059,6 +1192,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
        if (ret)
                goto out;
 
+       ret = iwl_mvm_power_update_device_mode(mvm);
+       if (ret)
+               goto out;
+
        ret = iwl_mvm_power_update_mode(mvm, vif);
        if (ret)
                goto out;
@@ -1109,16 +1246,26 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
        return __iwl_mvm_suspend(hw, wowlan, false);
 }
 
+/* converted data from the different status responses */
+struct iwl_wowlan_status_data {
+       u16 pattern_number;
+       u16 qos_seq_ctr[8];
+       u32 wakeup_reasons;
+       u32 wake_packet_length;
+       u32 wake_packet_bufsize;
+       const u8 *wake_packet;
+};
+
 static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
                                          struct ieee80211_vif *vif,
-                                         struct iwl_wowlan_status *status)
+                                         struct iwl_wowlan_status_data *status)
 {
        struct sk_buff *pkt = NULL;
        struct cfg80211_wowlan_wakeup wakeup = {
                .pattern_idx = -1,
        };
        struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
-       u32 reasons = le32_to_cpu(status->wakeup_reasons);
+       u32 reasons = status->wakeup_reasons;
 
        if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
                wakeup_report = NULL;
@@ -1130,7 +1277,7 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
 
        if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
                wakeup.pattern_idx =
-                       le16_to_cpu(status->pattern_number);
+                       status->pattern_number;
 
        if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
                       IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
@@ -1158,8 +1305,8 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
                wakeup.tcp_match = true;
 
        if (status->wake_packet_bufsize) {
-               int pktsize = le32_to_cpu(status->wake_packet_bufsize);
-               int pktlen = le32_to_cpu(status->wake_packet_length);
+               int pktsize = status->wake_packet_bufsize;
+               int pktlen = status->wake_packet_length;
                const u8 *pktdata = status->wake_packet;
                struct ieee80211_hdr *hdr = (void *)pktdata;
                int truncated = pktlen - pktsize;
@@ -1239,8 +1386,229 @@ static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
        kfree_skb(pkt);
 }
 
+static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc,
+                                 struct ieee80211_key_seq *seq)
+{
+       u64 pn;
+
+       pn = le64_to_cpu(sc->pn);
+       seq->ccmp.pn[0] = pn >> 40;
+       seq->ccmp.pn[1] = pn >> 32;
+       seq->ccmp.pn[2] = pn >> 24;
+       seq->ccmp.pn[3] = pn >> 16;
+       seq->ccmp.pn[4] = pn >> 8;
+       seq->ccmp.pn[5] = pn;
+}
+
+static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc,
+                                  struct ieee80211_key_seq *seq)
+{
+       seq->tkip.iv32 = le32_to_cpu(sc->iv32);
+       seq->tkip.iv16 = le16_to_cpu(sc->iv16);
+}
+
+static void iwl_mvm_set_aes_rx_seq(struct aes_sc *scs,
+                                  struct ieee80211_key_conf *key)
+{
+       int tid;
+
+       BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
+
+       for (tid = 0; tid < IWL_NUM_RSC; tid++) {
+               struct ieee80211_key_seq seq = {};
+
+               iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
+               ieee80211_set_key_rx_seq(key, tid, &seq);
+       }
+}
+
+static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
+                                   struct ieee80211_key_conf *key)
+{
+       int tid;
+
+       BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
+
+       for (tid = 0; tid < IWL_NUM_RSC; tid++) {
+               struct ieee80211_key_seq seq = {};
+
+               iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq);
+               ieee80211_set_key_rx_seq(key, tid, &seq);
+       }
+}
+
+static void iwl_mvm_set_key_rx_seq(struct ieee80211_key_conf *key,
+                                  struct iwl_wowlan_status_v6 *status)
+{
+       union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
+
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_CCMP:
+               iwl_mvm_set_aes_rx_seq(rsc->aes.multicast_rsc, key);
+               break;
+       case WLAN_CIPHER_SUITE_TKIP:
+               iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key);
+               break;
+       default:
+               WARN_ON(1);
+       }
+}
+
+struct iwl_mvm_d3_gtk_iter_data {
+       struct iwl_wowlan_status_v6 *status;
+       void *last_gtk;
+       u32 cipher;
+       bool find_phase, unhandled_cipher;
+       int num_keys;
+};
+
+static void iwl_mvm_d3_update_gtks(struct ieee80211_hw *hw,
+                                  struct ieee80211_vif *vif,
+                                  struct ieee80211_sta *sta,
+                                  struct ieee80211_key_conf *key,
+                                  void *_data)
+{
+       struct iwl_mvm_d3_gtk_iter_data *data = _data;
+
+       if (data->unhandled_cipher)
+               return;
+
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_WEP40:
+       case WLAN_CIPHER_SUITE_WEP104:
+               /* ignore WEP completely, nothing to do */
+               return;
+       case WLAN_CIPHER_SUITE_CCMP:
+       case WLAN_CIPHER_SUITE_TKIP:
+               /* we support these */
+               break;
+       default:
+               /* everything else (even CMAC for MFP) - disconnect from AP */
+               data->unhandled_cipher = true;
+               return;
+       }
+
+       data->num_keys++;
+
+       /*
+        * pairwise key - update sequence counters only;
+        * note that this assumes no TDLS sessions are active
+        */
+       if (sta) {
+               struct ieee80211_key_seq seq = {};
+               union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc;
+
+               if (data->find_phase)
+                       return;
+
+               switch (key->cipher) {
+               case WLAN_CIPHER_SUITE_CCMP:
+                       iwl_mvm_aes_sc_to_seq(&sc->aes.tsc, &seq);
+                       iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key);
+                       break;
+               case WLAN_CIPHER_SUITE_TKIP:
+                       iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
+                       iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
+                       break;
+               }
+               ieee80211_set_key_tx_seq(key, &seq);
+
+               /* that's it for this key */
+               return;
+       }
+
+       if (data->find_phase) {
+               data->last_gtk = key;
+               data->cipher = key->cipher;
+               return;
+       }
+
+       if (data->status->num_of_gtk_rekeys)
+               ieee80211_remove_key(key);
+       else if (data->last_gtk == key)
+               iwl_mvm_set_key_rx_seq(key, data->status);
+}
+
+static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
+                                         struct ieee80211_vif *vif,
+                                         struct iwl_wowlan_status_v6 *status)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm_d3_gtk_iter_data gtkdata = {
+               .status = status,
+       };
+
+       if (!status || !vif->bss_conf.bssid)
+               return false;
+
+       /* find last GTK that we used initially, if any */
+       gtkdata.find_phase = true;
+       ieee80211_iter_keys(mvm->hw, vif,
+                           iwl_mvm_d3_update_gtks, &gtkdata);
+       /* not trying to keep connections with MFP/unhandled ciphers */
+       if (gtkdata.unhandled_cipher)
+               return false;
+       if (!gtkdata.num_keys)
+               return true;
+       if (!gtkdata.last_gtk)
+               return false;
+
+       /*
+        * invalidate all other GTKs that might still exist and update
+        * the one that we used
+        */
+       gtkdata.find_phase = false;
+       ieee80211_iter_keys(mvm->hw, vif,
+                           iwl_mvm_d3_update_gtks, &gtkdata);
+
+       if (status->num_of_gtk_rekeys) {
+               struct ieee80211_key_conf *key;
+               struct {
+                       struct ieee80211_key_conf conf;
+                       u8 key[32];
+               } conf = {
+                       .conf.cipher = gtkdata.cipher,
+                       .conf.keyidx = status->gtk.key_index,
+               };
+
+               switch (gtkdata.cipher) {
+               case WLAN_CIPHER_SUITE_CCMP:
+                       conf.conf.keylen = WLAN_KEY_LEN_CCMP;
+                       memcpy(conf.conf.key, status->gtk.decrypt_key,
+                              WLAN_KEY_LEN_CCMP);
+                       break;
+               case WLAN_CIPHER_SUITE_TKIP:
+                       conf.conf.keylen = WLAN_KEY_LEN_TKIP;
+                       memcpy(conf.conf.key, status->gtk.decrypt_key, 16);
+                       /* leave TX MIC key zeroed, we don't use it anyway */
+                       memcpy(conf.conf.key +
+                              NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+                              status->gtk.tkip_mic_key, 8);
+                       break;
+               }
+
+               key = ieee80211_gtk_rekey_add(vif, &conf.conf);
+               if (IS_ERR(key))
+                       return false;
+               iwl_mvm_set_key_rx_seq(key, status);
+       }
+
+       if (status->num_of_gtk_rekeys) {
+               __be64 replay_ctr =
+                       cpu_to_be64(le64_to_cpu(status->replay_ctr));
+               ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
+                                          (void *)&replay_ctr, GFP_KERNEL);
+       }
+
+       mvmvif->seqno_valid = true;
+       /* +0x10 because the set API expects next-to-use, not last-used */
+       mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
+
+       return true;
+}
+
 /* releases the MVM mutex */
-static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
+static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
                                         struct ieee80211_vif *vif)
 {
        u32 base = mvm->error_event_table;
@@ -1253,8 +1621,12 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
                .id = WOWLAN_GET_STATUSES,
                .flags = CMD_SYNC | CMD_WANT_SKB,
        };
-       struct iwl_wowlan_status *status;
-       int ret, len;
+       struct iwl_wowlan_status_data status;
+       struct iwl_wowlan_status_v6 *status_v6;
+       int ret, len, status_size, i;
+       bool keep;
+       struct ieee80211_sta *ap_sta;
+       struct iwl_mvm_sta *mvm_ap_sta;
 
        iwl_trans_read_mem_bytes(mvm->trans, base,
                                 &err_info, sizeof(err_info));
@@ -1287,32 +1659,83 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
        if (!cmd.resp_pkt)
                goto out_unlock;
 
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
+               status_size = sizeof(struct iwl_wowlan_status_v6);
+       else
+               status_size = sizeof(struct iwl_wowlan_status_v4);
+
        len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
-       if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
+       if (len - sizeof(struct iwl_cmd_header) < status_size) {
                IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
                goto out_free_resp;
        }
 
-       status = (void *)cmd.resp_pkt->data;
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API) {
+               status_v6 = (void *)cmd.resp_pkt->data;
+
+               status.pattern_number = le16_to_cpu(status_v6->pattern_number);
+               for (i = 0; i < 8; i++)
+                       status.qos_seq_ctr[i] =
+                               le16_to_cpu(status_v6->qos_seq_ctr[i]);
+               status.wakeup_reasons = le32_to_cpu(status_v6->wakeup_reasons);
+               status.wake_packet_length =
+                       le32_to_cpu(status_v6->wake_packet_length);
+               status.wake_packet_bufsize =
+                       le32_to_cpu(status_v6->wake_packet_bufsize);
+               status.wake_packet = status_v6->wake_packet;
+       } else {
+               struct iwl_wowlan_status_v4 *status_v4;
+               status_v6 = NULL;
+               status_v4 = (void *)cmd.resp_pkt->data;
+
+               status.pattern_number = le16_to_cpu(status_v4->pattern_number);
+               for (i = 0; i < 8; i++)
+                       status.qos_seq_ctr[i] =
+                               le16_to_cpu(status_v4->qos_seq_ctr[i]);
+               status.wakeup_reasons = le32_to_cpu(status_v4->wakeup_reasons);
+               status.wake_packet_length =
+                       le32_to_cpu(status_v4->wake_packet_length);
+               status.wake_packet_bufsize =
+                       le32_to_cpu(status_v4->wake_packet_bufsize);
+               status.wake_packet = status_v4->wake_packet;
+       }
 
        if (len - sizeof(struct iwl_cmd_header) !=
-           sizeof(*status) +
-           ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
+           status_size + ALIGN(status.wake_packet_bufsize, 4)) {
                IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
                goto out_free_resp;
        }
 
+       /* still at hard-coded place 0 for D3 image */
+       ap_sta = rcu_dereference_protected(
+                       mvm->fw_id_to_mac_id[0],
+                       lockdep_is_held(&mvm->mutex));
+       if (IS_ERR_OR_NULL(ap_sta))
+               goto out_free_resp;
+
+       mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
+       for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+               u16 seq = status.qos_seq_ctr[i];
+               /* firmware stores last-used value, we store next value */
+               seq += 0x10;
+               mvm_ap_sta->tid_data[i].seq_number = seq;
+       }
+
        /* now we have all the data we need, unlock to avoid mac80211 issues */
        mutex_unlock(&mvm->mutex);
 
-       iwl_mvm_report_wakeup_reasons(mvm, vif, status);
+       iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
+
+       keep = iwl_mvm_setup_connection_keep(mvm, vif, status_v6);
+
        iwl_free_resp(&cmd);
-       return;
+       return keep;
 
  out_free_resp:
        iwl_free_resp(&cmd);
  out_unlock:
        mutex_unlock(&mvm->mutex);
+       return false;
 }
 
 static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
@@ -1335,6 +1758,17 @@ static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
 #endif
 }
 
+static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
+                                      struct ieee80211_vif *vif)
+{
+       /* skip the one we keep connection on */
+       if (data == vif)
+               return;
+
+       if (vif->type == NL80211_IFTYPE_STATION)
+               ieee80211_resume_disconnect(vif);
+}
+
 static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
 {
        struct iwl_d3_iter_data resume_iter_data = {
@@ -1343,6 +1777,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
        struct ieee80211_vif *vif = NULL;
        int ret;
        enum iwl_d3_status d3_status;
+       bool keep = false;
 
        mutex_lock(&mvm->mutex);
 
@@ -1368,7 +1803,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
        /* query SRAM first in case we want event logging */
        iwl_mvm_read_d3_sram(mvm);
 
-       iwl_mvm_query_wakeup_reasons(mvm, vif);
+       keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
        /* has unlocked the mutex, so skip that */
        goto out;
 
@@ -1376,8 +1811,10 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
        mutex_unlock(&mvm->mutex);
 
  out:
-       if (!test && vif)
-               ieee80211_resume_disconnect(vif);
+       if (!test)
+               ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
+                       IEEE80211_IFACE_ITER_NORMAL,
+                       iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
 
        /* return 1 to reconfigure the device */
        set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
index aac81b8984b05ec87f50fa09b32f7ab9a3dc36ed..0675f0c8ef9388a7d888625077860207aba0a826 100644 (file)
@@ -246,58 +246,56 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
        return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
-static ssize_t iwl_dbgfs_power_down_allow_write(struct file *file,
-                                               const char __user *user_buf,
+static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
+                                               char __user *user_buf,
                                                size_t count, loff_t *ppos)
 {
        struct iwl_mvm *mvm = file->private_data;
-       char buf[8] = {};
-       int allow;
-
-       if (!mvm->ucode_loaded)
-               return -EIO;
-
-       if (copy_from_user(buf, user_buf, sizeof(buf)))
-               return -EFAULT;
-
-       if (sscanf(buf, "%d", &allow) != 1)
-               return -EINVAL;
-
-       IWL_DEBUG_POWER(mvm, "%s device power down\n",
-                       allow ? "allow" : "prevent");
+       char buf[64];
+       int bufsz = sizeof(buf);
+       int pos = 0;
 
-       /*
-        * TODO: Send REPLY_DEBUG_CMD (0xf0) when FW support it
-        */
+       pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d0=%d\n",
+                        mvm->disable_power_off);
+       pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d3=%d\n",
+                        mvm->disable_power_off_d3);
 
-       return count;
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
 }
 
-static ssize_t iwl_dbgfs_power_down_d3_allow_write(struct file *file,
-                                                  const char __user *user_buf,
-                                                  size_t count, loff_t *ppos)
+static ssize_t iwl_dbgfs_disable_power_off_write(struct file *file,
+                                                const char __user *user_buf,
+                                                size_t count, loff_t *ppos)
 {
        struct iwl_mvm *mvm = file->private_data;
-       char buf[8] = {};
-       int allow;
+       char buf[64] = {};
+       int ret;
+       int val;
 
-       if (copy_from_user(buf, user_buf, sizeof(buf)))
+       if (!mvm->ucode_loaded)
+               return -EIO;
+
+       count = min_t(size_t, count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, count))
                return -EFAULT;
 
-       if (sscanf(buf, "%d", &allow) != 1)
+       if (!strncmp("disable_power_off_d0=", buf, 21)) {
+               if (sscanf(buf + 21, "%d", &val) != 1)
+                       return -EINVAL;
+               mvm->disable_power_off = val;
+       } else if (!strncmp("disable_power_off_d3=", buf, 21)) {
+               if (sscanf(buf + 21, "%d", &val) != 1)
+                       return -EINVAL;
+               mvm->disable_power_off_d3 = val;
+       } else {
                return -EINVAL;
+       }
 
-       IWL_DEBUG_POWER(mvm, "%s device power down in d3\n",
-                       allow ? "allow" : "prevent");
-
-       /*
-        * TODO: When WoWLAN FW alive notification happens, driver will send
-        * REPLY_DEBUG_CMD setting power_down_allow flag according to
-        * mvm->prevent_power_down_d3
-        */
-       mvm->prevent_power_down_d3 = !allow;
+       mutex_lock(&mvm->mutex);
+       ret = iwl_mvm_power_update_device_mode(mvm);
+       mutex_unlock(&mvm->mutex);
 
-       return count;
+       return ret ?: count;
 }
 
 static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
@@ -371,7 +369,8 @@ static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
        int val;
        int ret;
 
-       if (copy_from_user(buf, user_buf, sizeof(buf)))
+       count = min_t(size_t, count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, count))
                return -EFAULT;
 
        if (!strncmp("keep_alive=", buf, 11)) {
@@ -394,7 +393,9 @@ static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
                if (sscanf(buf + 16, "%d", &val) != 1)
                        return -EINVAL;
                param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
-       } else if (!strncmp("disable_power_off=", buf, 18)) {
+       } else if (!strncmp("disable_power_off=", buf, 18) &&
+                  !(mvm->fw->ucode_capa.flags &
+                    IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)) {
                if (sscanf(buf + 18, "%d", &val) != 1)
                        return -EINVAL;
                param = MVM_DEBUGFS_PM_DISABLE_POWER_OFF;
@@ -581,15 +582,21 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
        BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
 
        pos += scnprintf(buf+pos, bufsz-pos, "bt_status = %d\n",
-                                        notif->bt_status);
+                        notif->bt_status);
        pos += scnprintf(buf+pos, bufsz-pos, "bt_open_conn = %d\n",
-                                        notif->bt_open_conn);
+                        notif->bt_open_conn);
        pos += scnprintf(buf+pos, bufsz-pos, "bt_traffic_load = %d\n",
-                                        notif->bt_traffic_load);
+                        notif->bt_traffic_load);
        pos += scnprintf(buf+pos, bufsz-pos, "bt_agg_traffic_load = %d\n",
-                                        notif->bt_agg_traffic_load);
+                        notif->bt_agg_traffic_load);
        pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
-                                        notif->bt_ci_compliance);
+                        notif->bt_ci_compliance);
+       pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
+                        le32_to_cpu(notif->primary_ch_lut));
+       pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
+                        le32_to_cpu(notif->secondary_ch_lut));
+       pos += scnprintf(buf+pos, bufsz-pos, "bt_activity_grading = %d\n",
+                        le32_to_cpu(notif->bt_activity_grading));
 
        mutex_unlock(&mvm->mutex);
 
@@ -600,6 +607,38 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
 }
 #undef BT_MBOX_PRINT
 
+static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
+                                    size_t count, loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+       struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
+       char buf[256];
+       int bufsz = sizeof(buf);
+       int pos = 0;
+
+       mutex_lock(&mvm->mutex);
+
+       pos += scnprintf(buf+pos, bufsz-pos, "Channel inhibition CMD\n");
+       pos += scnprintf(buf+pos, bufsz-pos,
+                      "\tPrimary Channel Bitmap 0x%016llx Fat: %d\n",
+                      le64_to_cpu(cmd->bt_primary_ci),
+                      !!cmd->co_run_bw_primary);
+       pos += scnprintf(buf+pos, bufsz-pos,
+                      "\tSecondary Channel Bitmap 0x%016llx Fat: %d\n",
+                      le64_to_cpu(cmd->bt_secondary_ci),
+                      !!cmd->co_run_bw_secondary);
+
+       pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n");
+       pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill Mask 0x%08x\n",
+                        iwl_bt_ack_kill_msk[mvm->bt_kill_msk]);
+       pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill Mask 0x%08x\n",
+                        iwl_bt_cts_kill_msk[mvm->bt_kill_msk]);
+
+       mutex_unlock(&mvm->mutex);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
 #define PRINT_STATS_LE32(_str, _val)                                   \
                         pos += scnprintf(buf + pos, bufsz - pos,       \
                                          fmt_table, _str,              \
@@ -615,9 +654,11 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
        int pos = 0;
        char *buf;
        int ret;
-       int bufsz = sizeof(struct mvm_statistics_rx_phy) * 20 +
-                   sizeof(struct mvm_statistics_rx_non_phy) * 10 +
-                   sizeof(struct mvm_statistics_rx_ht_phy) * 10 + 200;
+       /* 43 is the size of each data line, 33 is the size of each header */
+       size_t bufsz =
+               ((sizeof(struct mvm_statistics_rx) / sizeof(__le32)) * 43) +
+               (4 * 33) + 1;
+
        struct mvm_statistics_rx_phy *ofdm;
        struct mvm_statistics_rx_phy *cck;
        struct mvm_statistics_rx_non_phy *general;
@@ -712,6 +753,7 @@ static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
        PRINT_STATS_LE32("beacon_energy_b", general->beacon_energy_b);
        PRINT_STATS_LE32("beacon_energy_c", general->beacon_energy_c);
        PRINT_STATS_LE32("num_bt_kills", general->num_bt_kills);
+       PRINT_STATS_LE32("mac_id", general->mac_id);
        PRINT_STATS_LE32("directed_data_mpdu", general->directed_data_mpdu);
 
        pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
@@ -757,6 +799,59 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
        return count;
 }
 
+static ssize_t
+iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
+                               char __user *user_buf,
+                               size_t count, loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+       int pos = 0;
+       char buf[32];
+       const size_t bufsz = sizeof(buf);
+
+       /* print which antennas were set for the scan command by the user */
+       pos += scnprintf(buf + pos, bufsz - pos, "Antennas for scan: ");
+       if (mvm->scan_rx_ant & ANT_A)
+               pos += scnprintf(buf + pos, bufsz - pos, "A");
+       if (mvm->scan_rx_ant & ANT_B)
+               pos += scnprintf(buf + pos, bufsz - pos, "B");
+       if (mvm->scan_rx_ant & ANT_C)
+               pos += scnprintf(buf + pos, bufsz - pos, "C");
+       pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+iwl_dbgfs_scan_ant_rxchain_write(struct file *file,
+                                const char __user *user_buf,
+                                size_t count, loff_t *ppos)
+{
+       struct iwl_mvm *mvm = file->private_data;
+       char buf[8];
+       int buf_size;
+       u8 scan_rx_ant;
+
+       memset(buf, 0, sizeof(buf));
+       buf_size = min(count, sizeof(buf) - 1);
+
+       /* get the argument from the user and check if it is valid */
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+       if (sscanf(buf, "%hhx", &scan_rx_ant) != 1)
+               return -EINVAL;
+       if (scan_rx_ant > ANT_ABC)
+               return -EINVAL;
+       if (scan_rx_ant & ~iwl_fw_valid_rx_ant(mvm->fw))
+               return -EINVAL;
+
+       /* change the rx antennas for scan command */
+       mvm->scan_rx_ant = scan_rx_ant;
+
+       return count;
+}
+
+
 static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
                                enum iwl_dbgfs_bf_mask param, int value)
 {
@@ -968,7 +1063,8 @@ static ssize_t iwl_dbgfs_d3_sram_write(struct file *file,
        char buf[8] = {};
        int store;
 
-       if (copy_from_user(buf, user_buf, sizeof(buf)))
+       count = min_t(size_t, count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, count))
                return -EFAULT;
 
        if (sscanf(buf, "%d", &store) != 1)
@@ -1063,10 +1159,12 @@ MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram);
 MVM_DEBUGFS_READ_FILE_OPS(stations);
 MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
-MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow);
-MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow);
+MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off);
 MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
 MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain);
+
 #ifdef CONFIG_PM_SLEEP
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram);
 #endif
@@ -1087,10 +1185,14 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
        MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
-       MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR);
-       MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR);
+       MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)
+               MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir,
+                                    S_IRUSR | S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
        MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
+       MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
+                            S_IWUSR | S_IRUSR);
 #ifdef CONFIG_PM_SLEEP
        MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR);
index 05c61d6f384eeaa5f75ff2d461692cbec3b8a6fa..4ea5e24ca92d8da9a0a366a3ab0e0bf903f55ef1 100644 (file)
@@ -82,6 +82,8 @@
  * @BT_USE_DEFAULTS:
  * @BT_SYNC_2_BT_DISABLE:
  * @BT_COEX_CORUNNING_TBL_EN:
+ *
+ * The COEX_MODE must be set for each command. Even if it is not changed.
  */
 enum iwl_bt_coex_flags {
        BT_CH_PRIMARY_EN                = BIT(0),
@@ -95,14 +97,16 @@ enum iwl_bt_coex_flags {
        BT_COEX_NW                      = 0x3 << BT_COEX_MODE_POS,
        BT_USE_DEFAULTS                 = BIT(6),
        BT_SYNC_2_BT_DISABLE            = BIT(7),
-       /*
-        * For future use - when the flags will be enlarged
-        * BT_COEX_CORUNNING_TBL_EN     = BIT(8),
-        */
+       BT_COEX_CORUNNING_TBL_EN        = BIT(8),
+       BT_COEX_MPLUT_TBL_EN            = BIT(9),
+       /* Bit 10 is reserved */
+       BT_COEX_WF_PRIO_BOOST_CHECK_EN  = BIT(11),
 };
 
 /*
  * indicates what has changed in the BT_COEX command.
+ * BT_VALID_ENABLE must be set for each command. Commands without this bit will
+ * discarded by the firmware
  */
 enum iwl_bt_coex_valid_bit_msk {
        BT_VALID_ENABLE                 = BIT(0),
@@ -121,11 +125,8 @@ enum iwl_bt_coex_valid_bit_msk {
        BT_VALID_CORUN_LUT_40           = BIT(13),
        BT_VALID_ANT_ISOLATION          = BIT(14),
        BT_VALID_ANT_ISOLATION_THRS     = BIT(15),
-       /*
-        * For future use - when the valid flags will be enlarged
-        * BT_VALID_TXTX_DELTA_FREQ_THRS        = BIT(16),
-        * BT_VALID_TXRX_MAX_FREQ_0     = BIT(17),
-        */
+       BT_VALID_TXTX_DELTA_FREQ_THRS   = BIT(16),
+       BT_VALID_TXRX_MAX_FREQ_0        = BIT(17),
 };
 
 /**
@@ -142,48 +143,88 @@ enum iwl_bt_reduced_tx_power {
        BT_REDUCED_TX_POWER_DATA        = BIT(1),
 };
 
+enum iwl_bt_coex_lut_type {
+       BT_COEX_TIGHT_LUT = 0,
+       BT_COEX_LOOSE_LUT,
+       BT_COEX_TX_DIS_LUT,
+
+       BT_COEX_MAX_LUT,
+};
+
 #define BT_COEX_LUT_SIZE (12)
+#define BT_COEX_CORUN_LUT_SIZE (32)
+#define BT_COEX_MULTI_PRIO_LUT_SIZE (2)
+#define BT_COEX_BOOST_SIZE (4)
+#define BT_REDUCED_TX_POWER_BIT BIT(7)
 
 /**
  * struct iwl_bt_coex_cmd - bt coex configuration command
  * @flags:&enum iwl_bt_coex_flags
- * @lead_time:
  * @max_kill:
- * @bt3_time_t7_value:
- * @kill_ack_msk:
- * @kill_cts_msk:
- * @bt3_prio_sample_time:
- * @bt3_timer_t2_value:
- * @bt4_reaction_time:
- * @decision_lut[12]:
  * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
- * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
- * @bt_prio_boost: values for PTA boost register
+ * @bt4_antenna_isolation:
+ * @bt4_antenna_isolation_thr:
+ * @bt4_tx_tx_delta_freq_thr:
+ * @bt4_tx_rx_max_freq0:
+ * @bt_prio_boost:
  * @wifi_tx_prio_boost: SW boost of wifi tx priority
  * @wifi_rx_prio_boost: SW boost of wifi rx priority
+ * @kill_ack_msk:
+ * @kill_cts_msk:
+ * @decision_lut:
+ * @bt4_multiprio_lut:
+ * @bt4_corun_lut20:
+ * @bt4_corun_lut40:
+ * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
  *
  * The structure is used for the BT_COEX command.
  */
 struct iwl_bt_coex_cmd {
-       u8 flags;
-       u8 lead_time;
+       __le32 flags;
        u8 max_kill;
-       u8 bt3_time_t7_value;
+       u8 bt_reduced_tx_power;
+       u8 reserved[2];
+
+       u8 bt4_antenna_isolation;
+       u8 bt4_antenna_isolation_thr;
+       u8 bt4_tx_tx_delta_freq_thr;
+       u8 bt4_tx_rx_max_freq0;
+
+       __le32 bt_prio_boost[BT_COEX_BOOST_SIZE];
+       __le32 wifi_tx_prio_boost;
+       __le32 wifi_rx_prio_boost;
        __le32 kill_ack_msk;
        __le32 kill_cts_msk;
-       u8 bt3_prio_sample_time;
-       u8 bt3_timer_t2_value;
-       __le16 bt4_reaction_time;
-       __le32 decision_lut[BT_COEX_LUT_SIZE];
-       u8 bt_reduced_tx_power;
-       u8 reserved;
-       __le16 valid_bit_msk;
-       __le32 bt_prio_boost;
-       u8 reserved2;
-       u8 wifi_tx_prio_boost;
-       __le16 wifi_rx_prio_boost;
+
+       __le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE];
+       __le32 bt4_multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE];
+       __le32 bt4_corun_lut20[BT_COEX_CORUN_LUT_SIZE];
+       __le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE];
+
+       __le32 valid_bit_msk;
 } __packed; /* BT_COEX_CMD_API_S_VER_3 */
 
+/**
+ * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
+ * @bt_primary_ci:
+ * @bt_secondary_ci:
+ * @co_run_bw_primary:
+ * @co_run_bw_secondary:
+ * @primary_ch_phy_id:
+ * @secondary_ch_phy_id:
+ *
+ * Used for BT_COEX_CI command
+ */
+struct iwl_bt_coex_ci_cmd {
+       __le64 bt_primary_ci;
+       __le64 bt_secondary_ci;
+
+       u8 co_run_bw_primary;
+       u8 co_run_bw_secondary;
+       u8 primary_ch_phy_id;
+       u8 secondary_ch_phy_id;
+} __packed; /* BT_CI_MSG_API_S_VER_1 */
+
 #define BT_MBOX(n_dw, _msg, _pos, _nbits)      \
        BT_MBOX##n_dw##_##_msg##_POS = (_pos),  \
        BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS
@@ -244,23 +285,39 @@ enum iwl_bt_mxbox_dw3 {
        ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
        >> BT_MBOX##_num##_##_field##_POS)
 
+enum iwl_bt_activity_grading {
+       BT_OFF                  = 0,
+       BT_ON_NO_CONNECTION     = 1,
+       BT_LOW_TRAFFIC          = 2,
+       BT_HIGH_TRAFFIC         = 3,
+};
+
 /**
  * struct iwl_bt_coex_profile_notif - notification about BT coex
  * @mbox_msg: message from BT to WiFi
- * @:bt_status: 0 - off, 1 - on
- * @:bt_open_conn: number of BT connections open
- * @:bt_traffic_load: load of BT traffic
- * @:bt_agg_traffic_load: aggregated load of BT traffic
- * @:bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
+ * @msg_idx: the index of the message
+ * @bt_status: 0 - off, 1 - on
+ * @bt_open_conn: number of BT connections open
+ * @bt_traffic_load: load of BT traffic
+ * @bt_agg_traffic_load: aggregated load of BT traffic
+ * @bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
+ * @primary_ch_lut: LUT used for primary channel
+ * @secondary_ch_lut: LUT used for secondary channel
+ * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading
  */
 struct iwl_bt_coex_profile_notif {
        __le32 mbox_msg[4];
+       __le32 msg_idx;
        u8 bt_status;
        u8 bt_open_conn;
        u8 bt_traffic_load;
        u8 bt_agg_traffic_load;
        u8 bt_ci_compliance;
        u8 reserved[3];
+
+       __le32 primary_ch_lut;
+       __le32 secondary_ch_lut;
+       __le32 bt_activity_grading;
 } __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */
 
 enum iwl_bt_coex_prio_table_event {
@@ -300,20 +357,4 @@ struct iwl_bt_coex_prio_tbl_cmd {
        u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
 } __packed;
 
-enum iwl_bt_coex_env_action {
-       BT_COEX_ENV_CLOSE        = 0,
-       BT_COEX_ENV_OPEN         = 1,
-}; /* BT_COEX_PROT_ENV_ACTION_API_E_VER_1 */
-
-/**
- * struct iwl_bt_coex_prot_env_cmd - BT Protection Envelope
- * @action: enum %iwl_bt_coex_env_action
- * @type: enum %iwl_bt_coex_prio_table_event
- */
-struct iwl_bt_coex_prot_env_cmd {
-       u8 action; /* 0 = closed, 1 = open */
-       u8 type; /* 0 .. 15 */
-       u8 reserved[2];
-} __packed;
-
 #endif /* __fw_api_bt_coex_h__ */
index df72fcdf81705b54e9a36f51ede8a652f8bcfa2d..4e7dd8cf87dce738cbe062d931dcabe49ce67a51 100644 (file)
@@ -100,7 +100,12 @@ enum iwl_proto_offloads {
 
 #define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1    2
 #define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2    6
-#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX   6
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L   12
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S   4
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX   12
+
+#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L    4
+#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S    2
 
 /**
  * struct iwl_proto_offload_cmd_common - ARP/NS offload common part
@@ -155,6 +160,43 @@ struct iwl_proto_offload_cmd_v2 {
        u8 reserved2[3];
 } __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */
 
+struct iwl_ns_config {
+       struct in6_addr source_ipv6_addr;
+       struct in6_addr dest_ipv6_addr;
+       u8 target_mac_addr[ETH_ALEN];
+       __le16 reserved;
+} __packed; /* NS_OFFLOAD_CONFIG */
+
+struct iwl_targ_addr {
+       struct in6_addr addr;
+       __le32 config_num;
+} __packed; /* TARGET_IPV6_ADDRESS */
+
+/**
+ * struct iwl_proto_offload_cmd_v3_small - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @target_ipv6_addr: target IPv6 addresses
+ * @ns_config: NS offload configurations
+ */
+struct iwl_proto_offload_cmd_v3_small {
+       struct iwl_proto_offload_cmd_common common;
+       __le32 num_valid_ipv6_addrs;
+       struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S];
+       struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
+
+/**
+ * struct iwl_proto_offload_cmd_v3_large - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @target_ipv6_addr: target IPv6 addresses
+ * @ns_config: NS offload configurations
+ */
+struct iwl_proto_offload_cmd_v3_large {
+       struct iwl_proto_offload_cmd_common common;
+       __le32 num_valid_ipv6_addrs;
+       struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L];
+       struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
 
 /*
  * WOWLAN_PATTERNS
@@ -293,7 +335,7 @@ enum iwl_wowlan_wakeup_reason {
        IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET             = BIT(12),
 }; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
 
-struct iwl_wowlan_status {
+struct iwl_wowlan_status_v4 {
        __le64 replay_ctr;
        __le16 pattern_number;
        __le16 non_qos_seq_ctr;
@@ -308,6 +350,29 @@ struct iwl_wowlan_status {
        u8 wake_packet[]; /* can be truncated from _length to _bufsize */
 } __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
 
+struct iwl_wowlan_gtk_status {
+       u8 key_index;
+       u8 reserved[3];
+       u8 decrypt_key[16];
+       u8 tkip_mic_key[8];
+       struct iwl_wowlan_rsc_tsc_params_cmd rsc;
+} __packed;
+
+struct iwl_wowlan_status_v6 {
+       struct iwl_wowlan_gtk_status gtk;
+       __le64 replay_ctr;
+       __le16 pattern_number;
+       __le16 non_qos_seq_ctr;
+       __le16 qos_seq_ctr[8];
+       __le32 wakeup_reasons;
+       __le32 num_of_gtk_rekeys;
+       __le32 transmitted_ndps;
+       __le32 received_beacons;
+       __le32 wake_packet_length;
+       __le32 wake_packet_bufsize;
+       u8 wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */
+
 #define IWL_WOWLAN_TCP_MAX_PACKET_LEN          64
 #define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN  128
 #define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS      2048
index 98b1feb43d388f70bc4aadf0f4434cc714744d05..39c3148bdfa8eff2a09a7641b317f8a8a1648da9 100644 (file)
@@ -170,12 +170,14 @@ struct iwl_mac_data_ap {
  * @beacon_tsf: beacon transmit time in TSF
  * @bi: beacon interval in TU
  * @bi_reciprocal: 2^32 / bi
+ * @beacon_template: beacon template ID
  */
 struct iwl_mac_data_ibss {
        __le32 beacon_time;
        __le64 beacon_tsf;
        __le32 bi;
        __le32 bi_reciprocal;
+       __le32 beacon_template;
 } __packed; /* IBSS_MAC_DATA_API_S_VER_1 */
 
 /**
@@ -372,4 +374,13 @@ static inline u32 iwl_mvm_reciprocal(u32 v)
        return 0xFFFFFFFF / v;
 }
 
+#define IWL_NONQOS_SEQ_GET     0x1
+#define IWL_NONQOS_SEQ_SET     0x2
+struct iwl_nonqos_seq_query_cmd {
+       __le32 get_set_flag;
+       __le32 mac_id_n_color;
+       __le16 value;
+       __le16 reserved;
+} __packed; /* NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */
+
 #endif /* __fw_api_mac_h__ */
index 8e7ab41079ca6ca51eba975a2e39c92cee5f002b..5cb93ae5cd2f73bfff7a1daf5d381f6e9ccc987b 100644 (file)
@@ -131,6 +131,33 @@ struct iwl_powertable_cmd {
        __le32 lprx_rssi_threshold;
 } __packed;
 
+/**
+ * enum iwl_device_power_flags - masks for device power command flags
+ * @DEVIC_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
+ *     receiver and transmitter. '0' - does not allow. This flag should be
+ *     always set to '1' unless one need to disable actual power down for debug
+ *     purposes.
+ * @DEVICE_POWER_FLAGS_CAM_MSK: '1' CAM (Continuous Active Mode) is set, meaning
+ *     that power management is disabled. '0' Power management is enabled, one
+ *     of power schemes is applied.
+*/
+enum iwl_device_power_flags {
+       DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK   = BIT(0),
+       DEVICE_POWER_FLAGS_CAM_MSK              = BIT(13),
+};
+
+/**
+ * struct iwl_device_power_cmd - device wide power command.
+ * DEVICE_POWER_CMD = 0x77 (command, has simple generic response)
+ *
+ * @flags:     Power table command flags from DEVICE_POWER_FLAGS_*
+ */
+struct iwl_device_power_cmd {
+       /* PM_POWER_TABLE_CMD_API_S_VER_6 */
+       __le16 flags;
+       __le16 reserved;
+} __packed;
+
 /**
  * struct iwl_mac_power_cmd - New power command containing uAPSD support
  * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
@@ -290,7 +317,7 @@ struct iwl_beacon_filter_cmd {
 #define IWL_BF_ESCAPE_TIMER_MIN 0
 
 #define IWL_BA_ESCAPE_TIMER_DEFAULT 6
-#define IWL_BA_ESCAPE_TIMER_D3 6
+#define IWL_BA_ESCAPE_TIMER_D3 9
 #define IWL_BA_ESCAPE_TIMER_MAX 1024
 #define IWL_BA_ESCAPE_TIMER_MIN 0
 
index fdd33bc0a594a2d5456044b1a10df5d621ed8f8e..538f1c7a59664f9cb7c586eeb0974974c971b6f8 100644 (file)
@@ -68,6 +68,7 @@
 /*
  * These serve as indexes into
  * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
+ * TODO: avoid overlap between legacy and HT rates
  */
 enum {
        IWL_RATE_1M_INDEX = 0,
@@ -78,18 +79,31 @@ enum {
        IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
        IWL_RATE_6M_INDEX,
        IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
+       IWL_RATE_MCS_0_INDEX = IWL_RATE_6M_INDEX,
+       IWL_FIRST_HT_RATE = IWL_RATE_MCS_0_INDEX,
+       IWL_FIRST_VHT_RATE = IWL_RATE_MCS_0_INDEX,
        IWL_RATE_9M_INDEX,
        IWL_RATE_12M_INDEX,
+       IWL_RATE_MCS_1_INDEX = IWL_RATE_12M_INDEX,
        IWL_RATE_18M_INDEX,
+       IWL_RATE_MCS_2_INDEX = IWL_RATE_18M_INDEX,
        IWL_RATE_24M_INDEX,
+       IWL_RATE_MCS_3_INDEX = IWL_RATE_24M_INDEX,
        IWL_RATE_36M_INDEX,
+       IWL_RATE_MCS_4_INDEX = IWL_RATE_36M_INDEX,
        IWL_RATE_48M_INDEX,
+       IWL_RATE_MCS_5_INDEX = IWL_RATE_48M_INDEX,
        IWL_RATE_54M_INDEX,
+       IWL_RATE_MCS_6_INDEX = IWL_RATE_54M_INDEX,
        IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX,
        IWL_RATE_60M_INDEX,
-       IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
+       IWL_RATE_MCS_7_INDEX = IWL_RATE_60M_INDEX,
+       IWL_LAST_HT_RATE = IWL_RATE_MCS_7_INDEX,
+       IWL_RATE_MCS_8_INDEX,
+       IWL_RATE_MCS_9_INDEX,
+       IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX,
        IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
-       IWL_RATE_COUNT,
+       IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1,
 };
 
 #define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
@@ -108,6 +122,7 @@ enum {
        IWL_RATE_2M_PLCP  = 20,
        IWL_RATE_5M_PLCP  = 55,
        IWL_RATE_11M_PLCP = 110,
+       IWL_RATE_INVM_PLCP = -1,
 };
 
 /*
@@ -164,6 +179,8 @@ enum {
  * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.)
  */
 #define RATE_HT_MCS_RATE_CODE_MSK      0x7
+#define RATE_HT_MCS_NSS_POS             3
+#define RATE_HT_MCS_NSS_MSK             (3 << RATE_HT_MCS_NSS_POS)
 
 /* Bit 10: (1) Use Green Field preamble */
 #define RATE_HT_MCS_GF_POS             10
index 83cb9b992ea4622f39ff03a4c5bbcea36a7d128f..c3782b48ded14d8a52458f66677dc3d3cd766fd7 100644 (file)
@@ -356,6 +356,7 @@ struct iwl_scan_complete_notif {
 /* scan offload */
 #define IWL_MAX_SCAN_CHANNELS          40
 #define IWL_SCAN_MAX_BLACKLIST_LEN     64
+#define IWL_SCAN_SHORT_BLACKLIST_LEN   16
 #define IWL_SCAN_MAX_PROFILES          11
 #define SCAN_OFFLOAD_PROBE_REQ_SIZE    512
 
@@ -368,6 +369,12 @@ struct iwl_scan_complete_notif {
 #define IWL_FULL_SCAN_MULTIPLIER 5
 #define IWL_FAST_SCHED_SCAN_ITERATIONS 3
 
+enum scan_framework_client {
+       SCAN_CLIENT_SCHED_SCAN          = BIT(0),
+       SCAN_CLIENT_NETDETECT           = BIT(1),
+       SCAN_CLIENT_ASSET_TRACKING      = BIT(2),
+};
+
 /**
  * struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6
  * @scan_flags:                see enum iwl_scan_flags
@@ -449,11 +456,12 @@ struct iwl_scan_offload_cfg {
  * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
  * @ssid:              MAC address to filter out
  * @reported_rssi:     AP rssi reported to the host
+ * @client_bitmap: clients ignore this entry  - enum scan_framework_client
  */
 struct iwl_scan_offload_blacklist {
        u8 ssid[ETH_ALEN];
        u8 reported_rssi;
-       u8 reserved;
+       u8 client_bitmap;
 } __packed;
 
 enum iwl_scan_offload_network_type {
@@ -475,6 +483,7 @@ enum iwl_scan_offload_band_selection {
  * @aut_alg:           authentication olgorithm to match - bitmap
  * @network_type:      enum iwl_scan_offload_network_type
  * @band_selection:    enum iwl_scan_offload_band_selection
+ * @client_bitmap:     clients waiting for match - enum scan_framework_client
  */
 struct iwl_scan_offload_profile {
        u8 ssid_index;
@@ -482,7 +491,8 @@ struct iwl_scan_offload_profile {
        u8 auth_alg;
        u8 network_type;
        u8 band_selection;
-       u8 reserved[3];
+       u8 client_bitmap;
+       u8 reserved[2];
 } __packed;
 
 /**
@@ -491,13 +501,18 @@ struct iwl_scan_offload_profile {
  * @profiles:          profiles to search for match
  * @blacklist_len:     length of blacklist
  * @num_profiles:      num of profiles in the list
+ * @match_notify:      clients waiting for match found notification
+ * @pass_match:                clients waiting for the results
+ * @active_clients:    active clients bitmap - enum scan_framework_client
  */
 struct iwl_scan_offload_profile_cfg {
-       struct iwl_scan_offload_blacklist blacklist[IWL_SCAN_MAX_BLACKLIST_LEN];
        struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
        u8 blacklist_len;
        u8 num_profiles;
-       u8 reserved[2];
+       u8 match_notify;
+       u8 pass_match;
+       u8 active_clients;
+       u8 reserved[3];
 } __packed;
 
 /**
@@ -560,4 +575,15 @@ struct iwl_scan_offload_complete {
        u8 reserved;
 } __packed;
 
+/**
+ * iwl_sched_scan_results - SCAN_OFFLOAD_MATCH_FOUND_NTF_API_S_VER_1
+ * @ssid_bitmap:       SSIDs indexes found in this iteration
+ * @client_bitmap:     clients that are active and wait for this notification
+ */
+struct iwl_sched_scan_results {
+       __le16 ssid_bitmap;
+       u8 client_bitmap;
+       u8 reserved;
+};
+
 #endif
index a30691a8a85b03a0b76815bf627480242a2458af..4aca5933a65d1619fc3e2e419ced4ddddc3d44c1 100644 (file)
@@ -247,7 +247,7 @@ struct iwl_mvm_keyinfo {
 } __packed;
 
 /**
- * struct iwl_mvm_add_sta_cmd - Add / modify a station in the fw's station table
+ * struct iwl_mvm_add_sta_cmd_v5 - Add/modify a station in the fw's sta table.
  * ( REPLY_ADD_STA = 0x18 )
  * @add_modify: 1: modify existing, 0: add new station
  * @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent
@@ -286,7 +286,7 @@ struct iwl_mvm_keyinfo {
  * ADD_STA sets up the table entry for one station, either creating a new
  * entry, or modifying a pre-existing one.
  */
-struct iwl_mvm_add_sta_cmd {
+struct iwl_mvm_add_sta_cmd_v5 {
        u8 add_modify;
        u8 unicast_tx_key_id;
        u8 multicast_tx_key_id;
@@ -312,6 +312,57 @@ struct iwl_mvm_add_sta_cmd {
        __le32 tfd_queue_msk;
 } __packed; /* ADD_STA_CMD_API_S_VER_5 */
 
+/**
+ * struct iwl_mvm_add_sta_cmd_v6 - Add / modify a station
+ * VER_6 of this command is quite similar to VER_5 except
+ * exclusion of all fields related to the security key installation.
+ */
+struct iwl_mvm_add_sta_cmd_v6 {
+       u8 add_modify;
+       u8 reserved1;
+       __le16 tid_disable_tx;
+       __le32 mac_id_n_color;
+       u8 addr[ETH_ALEN];      /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
+       __le16 reserved2;
+       u8 sta_id;
+       u8 modify_mask;
+       __le16 reserved3;
+       __le32 station_flags;
+       __le32 station_flags_msk;
+       u8 add_immediate_ba_tid;
+       u8 remove_immediate_ba_tid;
+       __le16 add_immediate_ba_ssn;
+       __le16 sleep_tx_count;
+       __le16 sleep_state_flags;
+       __le16 assoc_id;
+       __le16 beamform_flags;
+       __le32 tfd_queue_msk;
+} __packed; /* ADD_STA_CMD_API_S_VER_6 */
+
+/**
+ * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
+ * ( REPLY_ADD_STA_KEY = 0x17 )
+ * @sta_id: index of station in uCode's station table
+ * @key_offset: key offset in key storage
+ * @key_flags: type %iwl_sta_key_flag
+ * @key: key material data
+ * @key2: key material data
+ * @rx_secur_seq_cnt: RX security sequence counter for the key
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ */
+struct iwl_mvm_add_sta_key_cmd {
+       u8 sta_id;
+       u8 key_offset;
+       __le16 key_flags;
+       u8 key[16];
+       u8 key2[16];
+       u8 rx_secur_seq_cnt[16];
+       u8 tkip_rx_tsc_byte2;
+       u8 reserved;
+       __le16 tkip_rx_ttak[5];
+} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */
+
 /**
  * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command
  * @ADD_STA_SUCCESS: operation was executed successfully
index 66264cc5a0168d296ddac86be6617cd56f1d84d5..bad5a552dd8d10412f5d9d874026215d6c6e9359 100644 (file)
 #include "fw-api-d3.h"
 #include "fw-api-bt-coex.h"
 
-/* queue and FIFO numbers by usage */
+/* maximal number of Tx queues in any platform */
+#define IWL_MVM_MAX_QUEUES     20
+
+/* Tx queue numbers */
 enum {
        IWL_MVM_OFFCHANNEL_QUEUE = 8,
        IWL_MVM_CMD_QUEUE = 9,
-       IWL_MVM_AUX_QUEUE = 15,
-       IWL_MVM_FIRST_AGG_QUEUE = 16,
-       IWL_MVM_NUM_QUEUES = 20,
-       IWL_MVM_LAST_AGG_QUEUE = IWL_MVM_NUM_QUEUES - 1,
-       IWL_MVM_CMD_FIFO = 7
 };
 
+#define IWL_MVM_CMD_FIFO       7
+
 #define IWL_MVM_STATION_COUNT  16
 
 /* commands */
@@ -97,6 +97,7 @@ enum {
        DBG_CFG = 0x9,
 
        /* station table */
+       ADD_STA_KEY = 0x17,
        ADD_STA = 0x18,
        REMOVE_STA = 0x19,
 
@@ -114,6 +115,7 @@ enum {
        TIME_EVENT_NOTIFICATION = 0x2a,
        BINDING_CONTEXT_CMD = 0x2b,
        TIME_QUOTA_CMD = 0x2c,
+       NON_QOS_TX_COUNTER_CMD = 0x2d,
 
        LQ_CMD = 0x4e,
 
@@ -130,6 +132,7 @@ enum {
        SCAN_OFFLOAD_COMPLETE = 0x6D,
        SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E,
        SCAN_OFFLOAD_CONFIG_CMD = 0x6f,
+       MATCH_FOUND_NOTIFICATION = 0xd9,
 
        /* Phy */
        PHY_CONFIGURATION_CMD = 0x6a,
@@ -178,6 +181,7 @@ enum {
        BT_COEX_PRIO_TABLE = 0xcc,
        BT_COEX_PROT_ENV = 0xcd,
        BT_PROFILE_NOTIFICATION = 0xce,
+       BT_COEX_CI = 0x5d,
 
        REPLY_BEACON_FILTERING_CMD = 0xd2,
 
index c76299a3a1e0821708a91885ffab780b624fdbc1..83fc5ca04433f36b4009404465f8646cafba9cb0 100644 (file)
@@ -199,7 +199,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
         */
 
        for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
-               if (i < IWL_MVM_FIRST_AGG_QUEUE && i != IWL_MVM_CMD_QUEUE)
+               if (i < mvm->first_agg_queue && i != IWL_MVM_CMD_QUEUE)
                        mvm->queue_to_mac80211[i] = i;
                else
                        mvm->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
@@ -243,7 +243,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
 
        lockdep_assert_held(&mvm->mutex);
 
-       if (mvm->init_ucode_run)
+       if (mvm->init_ucode_complete)
                return 0;
 
        iwl_init_notification_wait(&mvm->notif_wait,
@@ -264,6 +264,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
        if (ret)
                goto error;
 
+       /* Read the NVM only at driver load time, no need to do this twice */
        if (read_nvm) {
                /* Read nvm */
                ret = iwl_nvm_init(mvm);
@@ -273,6 +274,10 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
                }
        }
 
+       /* In case we read the NVM from external file, load it to the NIC */
+       if (iwlwifi_mod_params.nvm_file)
+               iwl_mvm_load_nvm_to_nic(mvm);
+
        ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
        WARN_ON(ret);
 
@@ -310,7 +315,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
        ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
                        MVM_UCODE_CALIB_TIMEOUT);
        if (!ret)
-               mvm->init_ucode_run = true;
+               mvm->init_ucode_complete = true;
        goto out;
 
 error:
@@ -353,8 +358,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        if (ret)
                return ret;
 
-       /* If we were in RFKILL during module loading, load init ucode now */
-       if (!mvm->init_ucode_run) {
+       /*
+        * If we haven't completed the run of the init ucode during
+        * module loading, load init ucode now
+        * (for example, if we were in RFKILL)
+        */
+       if (!mvm->init_ucode_complete) {
                ret = iwl_run_init_mvm_ucode(mvm, false);
                if (ret && !iwlmvm_mod_params.init_dbg) {
                        IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
@@ -424,6 +433,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                        goto error;
        }
 
+       ret = iwl_mvm_power_update_device_mode(mvm);
+       if (ret)
+               goto error;
+
        IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
        return 0;
  error:
index 5fe23a5ea9b66b662cd746c113b644b1318517e2..ab5a7ac90dcd68948882cc245cd8df04e62df37c 100644 (file)
@@ -80,7 +80,7 @@ struct iwl_mvm_mac_iface_iterator_data {
        struct ieee80211_vif *vif;
        unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)];
        unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)];
-       unsigned long used_hw_queues[BITS_TO_LONGS(IWL_MVM_FIRST_AGG_QUEUE)];
+       unsigned long used_hw_queues[BITS_TO_LONGS(IWL_MVM_MAX_QUEUES)];
        enum iwl_tsf_id preferred_tsf;
        bool found_vif;
 };
@@ -218,7 +218,7 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
                .preferred_tsf = NUM_TSF_IDS,
                .used_hw_queues = {
                        BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
-                       BIT(IWL_MVM_AUX_QUEUE) |
+                       BIT(mvm->aux_queue) |
                        BIT(IWL_MVM_CMD_QUEUE)
                },
                .found_vif = false,
@@ -242,9 +242,17 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
         * that we should share it with another interface.
         */
 
-       /* Currently, MAC ID 0 should be used only for the managed vif */
-       if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+       /* Currently, MAC ID 0 should be used only for the managed/IBSS vif */
+       switch (vif->type) {
+       case NL80211_IFTYPE_ADHOC:
+               break;
+       case NL80211_IFTYPE_STATION:
+               if (!vif->p2p)
+                       break;
+               /* fall through */
+       default:
                __clear_bit(0, data.available_mac_ids);
+       }
 
        ieee80211_iterate_active_interfaces_atomic(
                mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
@@ -302,9 +310,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
        /* Find available queues, and allocate them to the ACs */
        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
                u8 queue = find_first_zero_bit(data.used_hw_queues,
-                                              IWL_MVM_FIRST_AGG_QUEUE);
+                                              mvm->first_agg_queue);
 
-               if (queue >= IWL_MVM_FIRST_AGG_QUEUE) {
+               if (queue >= mvm->first_agg_queue) {
                        IWL_ERR(mvm, "Failed to allocate queue\n");
                        ret = -EIO;
                        goto exit_fail;
@@ -317,9 +325,9 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
        /* Allocate the CAB queue for softAP and GO interfaces */
        if (vif->type == NL80211_IFTYPE_AP) {
                u8 queue = find_first_zero_bit(data.used_hw_queues,
-                                              IWL_MVM_FIRST_AGG_QUEUE);
+                                              mvm->first_agg_queue);
 
-               if (queue >= IWL_MVM_FIRST_AGG_QUEUE) {
+               if (queue >= mvm->first_agg_queue) {
                        IWL_ERR(mvm, "Failed to allocate cab queue\n");
                        ret = -EIO;
                        goto exit_fail;
@@ -559,8 +567,12 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
                cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
 
        /* Don't use cts to self as the fw doesn't support it currently. */
-       if (vif->bss_conf.use_cts_prot)
+       if (vif->bss_conf.use_cts_prot) {
                cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
+               if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
+                       cmd->protection_flags |=
+                               cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
+       }
 
        /*
         * I think that we should enable these 2 flags regardless the HT PROT
@@ -712,6 +724,31 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
        return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
 }
 
+static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
+                                    struct ieee80211_vif *vif,
+                                    u32 action)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mac_ctx_cmd cmd = {};
+
+       WARN_ON(vif->type != NL80211_IFTYPE_ADHOC);
+
+       iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+
+       cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
+                                      MAC_FILTER_IN_PROBE_REQUEST);
+
+       /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
+       cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
+       cmd.ibss.bi_reciprocal =
+               cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
+
+       /* TODO: Assumes that the beacon id == mac context id */
+       cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id);
+
+       return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
 struct iwl_mvm_go_iterator_data {
        bool go_active;
 };
@@ -721,7 +758,8 @@ static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif)
        struct iwl_mvm_go_iterator_data *data = _data;
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 
-       if (vif->type == NL80211_IFTYPE_AP && vif->p2p && mvmvif->ap_active)
+       if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
+           mvmvif->ap_ibss_active)
                data->go_active = true;
 }
 
@@ -833,9 +871,10 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
                cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate));
 
        /* Set up TX beacon command fields */
-       iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd,
-                                beacon->data,
-                                beacon_skb_len);
+       if (vif->type == NL80211_IFTYPE_AP)
+               iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd,
+                                        beacon->data,
+                                        beacon_skb_len);
 
        /* Submit command */
        cmd.len[0] = sizeof(beacon_cmd);
@@ -848,14 +887,15 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
        return iwl_mvm_send_cmd(mvm, &cmd);
 }
 
-/* The beacon template for the AP/GO context has changed and needs update */
+/* The beacon template for the AP/GO/IBSS has changed and needs update */
 int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
                                    struct ieee80211_vif *vif)
 {
        struct sk_buff *beacon;
        int ret;
 
-       WARN_ON(vif->type != NL80211_IFTYPE_AP);
+       WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+               vif->type != NL80211_IFTYPE_ADHOC);
 
        beacon = ieee80211_beacon_get(mvm->hw, vif);
        if (!beacon)
@@ -1018,6 +1058,8 @@ static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action);
        case NL80211_IFTYPE_P2P_DEVICE:
                return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action);
+       case NL80211_IFTYPE_ADHOC:
+               return iwl_mvm_mac_ctxt_cmd_ibss(mvm, vif, action);
        default:
                break;
        }
@@ -1038,6 +1080,9 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        if (ret)
                return ret;
 
+       /* will only do anything at resume from D3 time */
+       iwl_mvm_set_last_nonqos_seq(mvm, vif);
+
        mvmvif->uploaded = true;
        return 0;
 }
index 9833cdf6177cd34d199de063cb0ff466042cfed0..f40685c3764ea8b211ad9fa7affb8ed675ef1ddd 100644 (file)
@@ -77,6 +77,7 @@
 #include "iwl-eeprom-parse.h"
 #include "fw-api-scan.h"
 #include "iwl-phy-db.h"
+#include "testmode.h"
 
 static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
        {
@@ -138,6 +139,14 @@ static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
        }
 }
 
+static int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
+{
+       /* we create the 802.11 header and SSID element */
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID)
+               return mvm->fw->ucode_capa.max_probe_length - 24 - 2;
+       return mvm->fw->ucode_capa.max_probe_length - 24 - 34;
+}
+
 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 {
        struct ieee80211_hw *hw = mvm->hw;
@@ -158,7 +167,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                    IEEE80211_HW_SUPPORTS_STATIC_SMPS |
                    IEEE80211_HW_SUPPORTS_UAPSD;
 
-       hw->queues = IWL_MVM_FIRST_AGG_QUEUE;
+       hw->queues = mvm->first_agg_queue;
        hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
        hw->rate_control_algorithm = "iwl-mvm-rs";
 
@@ -181,6 +190,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                BIT(NL80211_IFTYPE_P2P_GO) |
                BIT(NL80211_IFTYPE_P2P_DEVICE);
 
+       /* IBSS has bugs in older versions */
+       if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
+               hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+
        hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
                            WIPHY_FLAG_DISABLE_BEACON_HINTS |
                            WIPHY_FLAG_IBSS_RSN;
@@ -212,9 +225,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 
        iwl_mvm_reset_phy_ctxts(mvm);
 
-       /* we create the 802.11 header and a max-length SSID element */
-       hw->wiphy->max_scan_ie_len =
-               mvm->fw->ucode_capa.max_probe_length - 24 - 34;
+       hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
+
        hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
 
        if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
@@ -231,6 +243,15 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        else
                hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
+               hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+               hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+               hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+               /* we create the 802.11 header and zero length SSID IE. */
+               hw->wiphy->max_sched_scan_ie_len =
+                                       SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
+       }
+
        hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
                               NL80211_FEATURE_P2P_GO_OPPPS;
 
@@ -548,7 +569,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
         * In short: there's not much we can do at this point, other than
         * allocating resources :)
         */
-       if (vif->type == NL80211_IFTYPE_AP) {
+       if (vif->type == NL80211_IFTYPE_AP ||
+           vif->type == NL80211_IFTYPE_ADHOC) {
                u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
                ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta,
                                               qmask);
@@ -698,7 +720,14 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
         * For AP/GO interface, the tear down of the resources allocated to the
         * interface is be handled as part of the stop_ap flow.
         */
-       if (vif->type == NL80211_IFTYPE_AP) {
+       if (vif->type == NL80211_IFTYPE_AP ||
+           vif->type == NL80211_IFTYPE_ADHOC) {
+#ifdef CONFIG_NL80211_TESTMODE
+               if (vif == mvm->noa_vif) {
+                       mvm->noa_vif = NULL;
+                       mvm->noa_duration = 0;
+               }
+#endif
                iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
                goto out_release;
        }
@@ -796,6 +825,27 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                                return;
                        }
                        iwl_mvm_configure_mcast_filter(mvm, vif);
+
+                       if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+                                    &mvm->status)) {
+                               /*
+                                * If we're restarting then the firmware will
+                                * obviously have lost synchronisation with
+                                * the AP. It will attempt to synchronise by
+                                * itself, but we can make it more reliable by
+                                * scheduling a session protection time event.
+                                *
+                                * The firmware needs to receive a beacon to
+                                * catch up with synchronisation, use 110% of
+                                * the beacon interval.
+                                *
+                                * Set a large maximum delay to allow for more
+                                * than a single interface.
+                                */
+                               u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
+                               iwl_mvm_protect_session(mvm, vif, dur, dur,
+                                                       5 * dur);
+                       }
                } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
                        /* remove AP station now that the MAC is unassoc */
                        ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
@@ -819,7 +869,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                        if (ret)
                                IWL_ERR(mvm, "failed to update power mode\n");
                }
-               iwl_mvm_bt_coex_vif_assoc(mvm, vif);
+               iwl_mvm_bt_coex_vif_change(mvm);
        } else if (changes & BSS_CHANGED_BEACON_INFO) {
                /*
                 * We received a beacon _after_ association so
@@ -848,7 +898,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
        }
 }
 
-static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
+                                struct ieee80211_vif *vif)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -871,7 +922,7 @@ static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        if (ret)
                goto out_remove;
 
-       mvmvif->ap_active = true;
+       mvmvif->ap_ibss_active = true;
 
        /* Send the bcast station. At this stage the TBTT and DTIM time events
         * are added and applied to the scheduler */
@@ -883,10 +934,12 @@ static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        if (ret)
                goto out_rm_bcast;
 
-       /* Need to update the P2P Device MAC */
+       /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
        if (vif->p2p && mvm->p2p_device_vif)
                iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
 
+       iwl_mvm_bt_coex_vif_change(mvm);
+
        mutex_unlock(&mvm->mutex);
        return 0;
 
@@ -901,7 +954,8 @@ out_unlock:
        return ret;
 }
 
-static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
+                                struct ieee80211_vif *vif)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -910,9 +964,11 @@ static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 
        mutex_lock(&mvm->mutex);
 
-       mvmvif->ap_active = false;
+       mvmvif->ap_ibss_active = false;
+
+       iwl_mvm_bt_coex_vif_change(mvm);
 
-       /* Need to update the P2P Device MAC */
+       /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
        if (vif->p2p && mvm->p2p_device_vif)
                iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
 
@@ -924,10 +980,11 @@ static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        mutex_unlock(&mvm->mutex);
 }
 
-static void iwl_mvm_bss_info_changed_ap(struct iwl_mvm *mvm,
-                                       struct ieee80211_vif *vif,
-                                       struct ieee80211_bss_conf *bss_conf,
-                                       u32 changes)
+static void
+iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
+                                struct ieee80211_vif *vif,
+                                struct ieee80211_bss_conf *bss_conf,
+                                u32 changes)
 {
        /* Need to send a new beacon template to the FW */
        if (changes & BSS_CHANGED_BEACON) {
@@ -950,7 +1007,8 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
                iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
                break;
        case NL80211_IFTYPE_AP:
-               iwl_mvm_bss_info_changed_ap(mvm, vif, bss_conf, changes);
+       case NL80211_IFTYPE_ADHOC:
+               iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
                break;
        default:
                /* shouldn't happen */
@@ -1163,7 +1221,54 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
 
        mutex_lock(&mvm->mutex);
        /* Try really hard to protect the session and hear a beacon */
-       iwl_mvm_protect_session(mvm, vif, duration, min_duration);
+       iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500);
+       mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
+                                       struct ieee80211_vif *vif,
+                                       struct cfg80211_sched_scan_request *req,
+                                       struct ieee80211_sched_scan_ies *ies)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       int ret;
+
+       mutex_lock(&mvm->mutex);
+
+       if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
+               IWL_DEBUG_SCAN(mvm,
+                              "SCHED SCAN request during internal scan - abort\n");
+               ret = -EBUSY;
+               goto out;
+       }
+
+       mvm->scan_status = IWL_MVM_SCAN_SCHED;
+
+       ret = iwl_mvm_config_sched_scan(mvm, vif, req, ies);
+       if (ret)
+               goto err;
+
+       ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
+       if (ret)
+               goto err;
+
+       ret = iwl_mvm_sched_scan_start(mvm, req);
+       if (!ret)
+               goto out;
+err:
+       mvm->scan_status = IWL_MVM_SCAN_NONE;
+out:
+       mutex_unlock(&mvm->mutex);
+       return ret;
+}
+
+static void iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
+                                       struct ieee80211_vif *vif)
+{
+       struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+       mutex_lock(&mvm->mutex);
+       iwl_mvm_sched_scan_stop(mvm);
        mutex_unlock(&mvm->mutex);
 }
 
@@ -1207,8 +1312,13 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
 
        switch (cmd) {
        case SET_KEY:
-               if (vif->type == NL80211_IFTYPE_AP && !sta) {
-                       /* GTK on AP interface is a TX-only key, return 0 */
+               if ((vif->type == NL80211_IFTYPE_ADHOC ||
+                    vif->type == NL80211_IFTYPE_AP) && !sta) {
+                       /*
+                        * GTK on AP interface is a TX-only key, return 0;
+                        * on IBSS they're per-station and because we're lazy
+                        * we don't support them for RX, so do the same.
+                        */
                        ret = 0;
                        key->hw_key_idx = STA_KEY_IDX_INVALID;
                        break;
@@ -1252,6 +1362,9 @@ static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 
+       if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
+               return;
+
        iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
 }
 
@@ -1445,6 +1558,7 @@ static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
        iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->def,
                                 ctx->rx_chains_static,
                                 ctx->rx_chains_dynamic);
+       iwl_mvm_bt_coex_vif_change(mvm);
        mutex_unlock(&mvm->mutex);
 }
 
@@ -1464,14 +1578,14 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
 
        switch (vif->type) {
        case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_ADHOC:
                /*
                 * The AP binding flow is handled as part of the start_ap flow
-                * (in bss_info_changed).
+                * (in bss_info_changed), similarly for IBSS.
                 */
                ret = 0;
                goto out_unlock;
        case NL80211_IFTYPE_STATION:
-       case NL80211_IFTYPE_ADHOC:
        case NL80211_IFTYPE_MONITOR:
                break;
        default:
@@ -1517,10 +1631,10 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
 
        iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
 
-       if (vif->type == NL80211_IFTYPE_AP)
-               goto out_unlock;
-
        switch (vif->type) {
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_ADHOC:
+               goto out_unlock;
        case NL80211_IFTYPE_MONITOR:
                mvmvif->monitor_active = false;
                iwl_mvm_update_quotas(mvm, NULL);
@@ -1550,14 +1664,72 @@ static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
        return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
 }
 
-static void iwl_mvm_mac_rssi_callback(struct ieee80211_hw *hw,
+#ifdef CONFIG_NL80211_TESTMODE
+static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
+       [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
+       [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
+       [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
+};
+
+static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
                                      struct ieee80211_vif *vif,
-                                     enum ieee80211_rssi_event rssi_event)
+                                     void *data, int len)
+{
+       struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
+       int err;
+       u32 noa_duration;
+
+       err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
+       if (err)
+               return err;
+
+       if (!tb[IWL_MVM_TM_ATTR_CMD])
+               return -EINVAL;
+
+       switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
+       case IWL_MVM_TM_CMD_SET_NOA:
+               if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
+                   !vif->bss_conf.enable_beacon ||
+                   !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
+                       return -EINVAL;
+
+               noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
+               if (noa_duration >= vif->bss_conf.beacon_int)
+                       return -EINVAL;
+
+               mvm->noa_duration = noa_duration;
+               mvm->noa_vif = vif;
+
+               return iwl_mvm_update_quotas(mvm, NULL);
+       case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
+               /* must be associated client vif - ignore authorized */
+               if (!vif || vif->type != NL80211_IFTYPE_STATION ||
+                   !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
+                   !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
+                       return -EINVAL;
+
+               if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
+                       return iwl_mvm_enable_beacon_filter(mvm, vif);
+               return iwl_mvm_disable_beacon_filter(mvm, vif);
+       }
+
+       return -EOPNOTSUPP;
+}
+
+static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
+                                   struct ieee80211_vif *vif,
+                                   void *data, int len)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+       int err;
 
-       iwl_mvm_bt_rssi_event(mvm, vif, rssi_event);
+       mutex_lock(&mvm->mutex);
+       err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
+       mutex_unlock(&mvm->mutex);
+
+       return err;
 }
+#endif
 
 struct ieee80211_ops iwl_mvm_hw_ops = {
        .tx = iwl_mvm_mac_tx,
@@ -1578,23 +1750,27 @@ struct ieee80211_ops iwl_mvm_hw_ops = {
        .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
        .conf_tx = iwl_mvm_mac_conf_tx,
        .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
+       .sched_scan_start = iwl_mvm_mac_sched_scan_start,
+       .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
        .set_key = iwl_mvm_mac_set_key,
        .update_tkip_key = iwl_mvm_mac_update_tkip_key,
        .remain_on_channel = iwl_mvm_roc,
        .cancel_remain_on_channel = iwl_mvm_cancel_roc,
-       .rssi_callback = iwl_mvm_mac_rssi_callback,
-
        .add_chanctx = iwl_mvm_add_chanctx,
        .remove_chanctx = iwl_mvm_remove_chanctx,
        .change_chanctx = iwl_mvm_change_chanctx,
        .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
        .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
 
-       .start_ap = iwl_mvm_start_ap,
-       .stop_ap = iwl_mvm_stop_ap,
+       .start_ap = iwl_mvm_start_ap_ibss,
+       .stop_ap = iwl_mvm_stop_ap_ibss,
+       .join_ibss = iwl_mvm_start_ap_ibss,
+       .leave_ibss = iwl_mvm_stop_ap_ibss,
 
        .set_tim = iwl_mvm_set_tim,
 
+       CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
+
 #ifdef CONFIG_PM_SLEEP
        /* look at d3.c */
        .suspend = iwl_mvm_suspend,
index b0389279cc1ed774f9f90f20137d11b6691d8eae..6235cb729f5c0ecd5c40fe90d9e78b8fceb1f1c5 100644 (file)
@@ -162,6 +162,7 @@ enum iwl_power_scheme {
 struct iwl_mvm_power_ops {
        int (*power_update_mode)(struct iwl_mvm *mvm,
                                 struct ieee80211_vif *vif);
+       int (*power_update_device_mode)(struct iwl_mvm *mvm);
        int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -241,12 +242,18 @@ enum iwl_mvm_smps_type_request {
 * @last_beacon_signal: last beacon rssi signal in dbm
 * @ave_beacon_signal: average beacon signal
 * @last_cqm_event: rssi of the last cqm event
+* @bt_coex_min_thold: minimum threshold for BT coex
+* @bt_coex_max_thold: maximum threshold for BT coex
+* @last_bt_coex_event: rssi of the last BT coex event
 */
 struct iwl_mvm_vif_bf_data {
        bool bf_enabled;
        bool ba_enabled;
        s8 ave_beacon_signal;
        s8 last_cqm_event;
+       s8 bt_coex_min_thold;
+       s8 bt_coex_max_thold;
+       s8 last_bt_coex_event;
 };
 
 /**
@@ -255,8 +262,8 @@ struct iwl_mvm_vif_bf_data {
  * @color: to solve races upon MAC addition and removal
  * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA
  * @uploaded: indicates the MAC context has been added to the device
- * @ap_active: indicates that ap context is configured, and that the interface
- *  should get quota etc.
+ * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
+ *     should get quota etc.
  * @monitor_active: indicates that monitor context is configured, and that the
  * interface should get quota etc.
  * @queue_params: QoS params for this MAC
@@ -272,7 +279,7 @@ struct iwl_mvm_vif {
        u8 ap_sta_id;
 
        bool uploaded;
-       bool ap_active;
+       bool ap_ibss_active;
        bool monitor_active;
        struct iwl_mvm_vif_bf_data bf_data;
 
@@ -306,6 +313,9 @@ struct iwl_mvm_vif {
 
        int tx_key_idx;
 
+       bool seqno_valid;
+       u16 seqno;
+
 #if IS_ENABLED(CONFIG_IPV6)
        /* IPv6 addresses for WoWLAN */
        struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
@@ -333,6 +343,7 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
 enum iwl_scan_status {
        IWL_MVM_SCAN_NONE,
        IWL_MVM_SCAN_OS,
+       IWL_MVM_SCAN_SCHED,
 };
 
 /**
@@ -434,7 +445,7 @@ struct iwl_mvm {
 
        enum iwl_ucode_type cur_ucode;
        bool ucode_loaded;
-       bool init_ucode_run;
+       bool init_ucode_complete;
        u32 error_event_table;
        u32 log_event_table;
 
@@ -470,6 +481,9 @@ struct iwl_mvm {
        enum iwl_scan_status scan_status;
        struct iwl_scan_cmd *scan_cmd;
 
+       /* rx chain antennas set through debugfs for the scan command */
+       u8 scan_rx_ant;
+
        /* Internal station */
        struct iwl_mvm_int_sta aux_sta;
 
@@ -479,7 +493,8 @@ struct iwl_mvm {
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        struct dentry *debugfs_dir;
        u32 dbgfs_sram_offset, dbgfs_sram_len;
-       bool prevent_power_down_d3;
+       bool disable_power_off;
+       bool disable_power_off_d3;
 #endif
 
        struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
@@ -523,12 +538,23 @@ struct iwl_mvm {
        /* BT-Coex */
        u8 bt_kill_msk;
        struct iwl_bt_coex_profile_notif last_bt_notif;
+       struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
 
        /* Thermal Throttling and CTkill */
        struct iwl_mvm_tt_mgmt thermal_throttle;
        s32 temperature;        /* Celsius */
 
        const struct iwl_mvm_power_ops *pm_ops;
+
+#ifdef CONFIG_NL80211_TESTMODE
+       u32 noa_duration;
+       struct ieee80211_vif *noa_vif;
+#endif
+
+       /* Tx queues */
+       u8 aux_queue;
+       u8 first_agg_queue;
+       u8 last_agg_queue;
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -570,6 +596,9 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
 /* Utils */
 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
                                        enum ieee80211_band band);
+void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
+                              enum ieee80211_band band,
+                              struct ieee80211_tx_rate *r);
 u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
 void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
 void iwl_mvm_dump_sram(struct iwl_mvm *mvm);
@@ -608,6 +637,7 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
 
 /* NVM */
 int iwl_nvm_init(struct iwl_mvm *mvm);
+int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
 
 int iwl_mvm_up(struct iwl_mvm *mvm);
 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
@@ -682,6 +712,23 @@ int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                             struct iwl_device_cmd *cmd);
 void iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
 
+/* Scheduled scan */
+int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
+                                          struct iwl_rx_cmd_buffer *rxb,
+                                          struct iwl_device_cmd *cmd);
+int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
+                             struct ieee80211_vif *vif,
+                             struct cfg80211_sched_scan_request *req,
+                             struct ieee80211_sched_scan_ies *ies);
+int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
+                                      struct cfg80211_sched_scan_request *req);
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+                            struct cfg80211_sched_scan_request *req);
+void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm);
+int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
+                                 struct iwl_rx_cmd_buffer *rxb,
+                                 struct iwl_device_cmd *cmd);
+
 /* MVM debugfs */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
@@ -720,6 +767,13 @@ static inline int iwl_mvm_power_disable(struct iwl_mvm *mvm,
        return mvm->pm_ops->power_disable(mvm, vif);
 }
 
+static inline int iwl_mvm_power_update_device_mode(struct iwl_mvm *mvm)
+{
+       if (mvm->pm_ops->power_update_device_mode)
+               return mvm->pm_ops->power_update_device_mode(mvm);
+       return 0;
+}
+
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
                                            struct ieee80211_vif *vif,
@@ -745,6 +799,15 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
 void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
                                     struct ieee80211_vif *vif, int idx);
 extern const struct file_operations iwl_dbgfs_d3_test_ops;
+#ifdef CONFIG_PM_SLEEP
+void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
+                                struct ieee80211_vif *vif);
+#else
+static inline void
+iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+}
+#endif
 
 /* BT Coex */
 int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
@@ -754,7 +817,20 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
                             struct iwl_device_cmd *cmd);
 void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                           enum ieee80211_rssi_event rssi_event);
-void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
+u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
+                                  struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
+                                    struct ieee80211_sta *sta);
+
+enum iwl_bt_kill_msk {
+       BT_KILL_MSK_DEFAULT,
+       BT_KILL_MSK_SCO_HID_A2DP,
+       BT_KILL_MSK_REDUCED_TXPOW,
+       BT_KILL_MSK_MAX,
+};
+extern const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX];
+extern const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX];
 
 /* beacon filtering */
 #ifdef CONFIG_IWLWIFI_DEBUGFS
index edb94ea316545f439a18e8c16e0952066336e2a2..2beffd028b67cef7469faa40899b6b6992501976 100644 (file)
@@ -77,7 +77,7 @@ static const int nvm_to_read[] = {
 
 /* Default NVM size to read */
 #define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
-#define IWL_MAX_NVM_SECTION_SIZE 6000
+#define IWL_MAX_NVM_SECTION_SIZE 7000
 
 #define NVM_WRITE_OPCODE 1
 #define NVM_READ_OPCODE 0
@@ -259,6 +259,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
 #define MAX_NVM_FILE_LEN       16384
 
 /*
+ * Reads external NVM from a file into mvm->nvm_sections
+ *
  * HOW TO CREATE THE NVM FILE FORMAT:
  * ------------------------------
  * 1. create hex file, format:
@@ -277,20 +279,23 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
  *
  * 4. save as "iNVM_xxx.bin" under /lib/firmware
  */
-static int iwl_mvm_load_external_nvm(struct iwl_mvm *mvm)
+static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
 {
-       int ret, section_id, section_size;
+       int ret, section_size;
+       u16 section_id;
        const struct firmware *fw_entry;
        const struct {
                __le16 word1;
                __le16 word2;
                u8 data[];
        } *file_sec;
-       const u8 *eof;
+       const u8 *eof, *temp;
 
 #define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
 #define NVM_WORD2_ID(x) (x >> 12)
 
+       IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
+
        /*
         * Obtain NVM image via request_firmware. Since we already used
         * request_firmware_nowait() for the firmware binary load and only
@@ -362,12 +367,18 @@ static int iwl_mvm_load_external_nvm(struct iwl_mvm *mvm)
                        break;
                }
 
-               ret = iwl_nvm_write_section(mvm, section_id, file_sec->data,
-                                           section_size);
-               if (ret < 0) {
-                       IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
+               temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
+               if (!temp) {
+                       ret = -ENOMEM;
+                       break;
+               }
+               if (WARN_ON(section_id >= NVM_NUM_OF_SECTIONS)) {
+                       IWL_ERR(mvm, "Invalid NVM section ID\n");
+                       ret = -EINVAL;
                        break;
                }
+               mvm->nvm_sections[section_id].data = temp;
+               mvm->nvm_sections[section_id].length = section_size;
 
                /* advance to the next section */
                file_sec = (void *)(file_sec->data + section_size);
@@ -377,6 +388,28 @@ out:
        return ret;
 }
 
+/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
+int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
+{
+       int i, ret;
+       u16 section_id;
+       struct iwl_nvm_section *sections = mvm->nvm_sections;
+
+       IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n");
+
+       for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
+               section_id = nvm_to_read[i];
+               ret = iwl_nvm_write_section(mvm, section_id,
+                                           sections[section_id].data,
+                                           sections[section_id].length);
+               if (ret < 0) {
+                       IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
+                       break;
+               }
+       }
+       return ret;
+}
+
 int iwl_nvm_init(struct iwl_mvm *mvm)
 {
        int ret, i, section;
@@ -385,36 +418,36 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
        /* load external NVM if configured */
        if (iwlwifi_mod_params.nvm_file) {
                /* move to External NVM flow */
-               ret = iwl_mvm_load_external_nvm(mvm);
+               ret = iwl_mvm_read_external_nvm(mvm);
                if (ret)
                        return ret;
-       }
-
-       /* Read From FW NVM */
-       IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
-
-       /* TODO: find correct NVM max size for a section */
-       nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
-                            GFP_KERNEL);
-       if (!nvm_buffer)
-               return -ENOMEM;
-       for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
-               section = nvm_to_read[i];
-               /* we override the constness for initial read */
-               ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
-               if (ret < 0)
-                       break;
-               temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
-               if (!temp) {
-                       ret = -ENOMEM;
-                       break;
+       } else {
+               /* Read From FW NVM */
+               IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
+
+               /* TODO: find correct NVM max size for a section */
+               nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
+                                    GFP_KERNEL);
+               if (!nvm_buffer)
+                       return -ENOMEM;
+               for (i = 0; i < ARRAY_SIZE(nvm_to_read); i++) {
+                       section = nvm_to_read[i];
+                       /* we override the constness for initial read */
+                       ret = iwl_nvm_read_section(mvm, section, nvm_buffer);
+                       if (ret < 0)
+                               break;
+                       temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
+                       if (!temp) {
+                               ret = -ENOMEM;
+                               break;
+                       }
+                       mvm->nvm_sections[section].data = temp;
+                       mvm->nvm_sections[section].length = ret;
                }
-               mvm->nvm_sections[section].data = temp;
-               mvm->nvm_sections[section].length = ret;
+               kfree(nvm_buffer);
+               if (ret < 0)
+                       return ret;
        }
-       kfree(nvm_buffer);
-       if (ret < 0)
-               return ret;
 
        mvm->nvm_data = iwl_parse_nvm_sections(mvm);
        if (!mvm->nvm_data)
index 2fcc8ef88a68d78fbc0f2225bbabc733d8d4e619..59b7cb3c61344939c96f285e46ea9751e9197d35 100644 (file)
@@ -224,6 +224,10 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
 
        RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
        RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
+       RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
+                  iwl_mvm_rx_scan_offload_complete_notif, false),
+       RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_sched_scan_results,
+                  false),
 
        RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
        RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
@@ -249,6 +253,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(TIME_EVENT_NOTIFICATION),
        CMD(BINDING_CONTEXT_CMD),
        CMD(TIME_QUOTA_CMD),
+       CMD(NON_QOS_TX_COUNTER_CMD),
        CMD(RADIO_VERSION_NOTIFICATION),
        CMD(SCAN_REQUEST_CMD),
        CMD(SCAN_ABORT_CMD),
@@ -260,10 +265,12 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(CALIB_RES_NOTIF_PHY_DB),
        CMD(SET_CALIB_DEFAULT_CMD),
        CMD(CALIBRATION_COMPLETE_NOTIFICATION),
+       CMD(ADD_STA_KEY),
        CMD(ADD_STA),
        CMD(REMOVE_STA),
        CMD(LQ_CMD),
        CMD(SCAN_OFFLOAD_CONFIG_CMD),
+       CMD(MATCH_FOUND_NOTIFICATION),
        CMD(SCAN_OFFLOAD_REQUEST_CMD),
        CMD(SCAN_OFFLOAD_ABORT_CMD),
        CMD(SCAN_OFFLOAD_COMPLETE),
@@ -303,6 +310,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(REPLY_BEACON_FILTERING_CMD),
        CMD(REPLY_THERMAL_MNG_BACKOFF),
        CMD(MAC_PM_POWER_TABLE),
+       CMD(BT_COEX_CI),
 };
 #undef CMD
 
@@ -344,6 +352,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
        mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
 
+       mvm->aux_queue = 15;
+       mvm->first_agg_queue = 16;
+       mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
+       if (mvm->cfg->base_params->num_of_queues == 16) {
+               mvm->aux_queue = 11;
+               mvm->first_agg_queue = 12;
+       }
+
        mutex_init(&mvm->mutex);
        spin_lock_init(&mvm->async_handlers_lock);
        INIT_LIST_HEAD(&mvm->time_event_list);
@@ -401,24 +417,32 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
                 mvm->cfg->name, mvm->trans->hw_rev);
 
-       err = iwl_trans_start_hw(mvm->trans);
-       if (err)
-               goto out_free;
-
        iwl_mvm_tt_initialize(mvm);
 
-       mutex_lock(&mvm->mutex);
-       err = iwl_run_init_mvm_ucode(mvm, true);
-       mutex_unlock(&mvm->mutex);
-       /* returns 0 if successful, 1 if success but in rfkill */
-       if (err < 0 && !iwlmvm_mod_params.init_dbg) {
-               IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
-               goto out_free;
-       }
+       /*
+        * If the NVM exists in an external file,
+        * there is no need to unnecessarily power up the NIC at driver load
+        */
+       if (iwlwifi_mod_params.nvm_file) {
+                       iwl_nvm_init(mvm);
+       } else {
+               err = iwl_trans_start_hw(mvm->trans);
+               if (err)
+                       goto out_free;
+
+               mutex_lock(&mvm->mutex);
+               err = iwl_run_init_mvm_ucode(mvm, true);
+               mutex_unlock(&mvm->mutex);
+               /* returns 0 if successful, 1 if success but in rfkill */
+               if (err < 0 && !iwlmvm_mod_params.init_dbg) {
+                       IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
+                       goto out_free;
+               }
 
-       /* Stop the hw after the ALIVE and NVM has been read */
-       if (!iwlmvm_mod_params.init_dbg)
-               iwl_trans_stop_hw(mvm->trans, false);
+               /* Stop the hw after the ALIVE and NVM has been read */
+               if (!iwlmvm_mod_params.init_dbg)
+                       iwl_trans_stop_hw(mvm->trans, false);
+       }
 
        scan_size = sizeof(struct iwl_scan_cmd) +
                mvm->fw->ucode_capa.max_probe_length +
@@ -449,7 +473,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
  out_free:
        iwl_phy_db_free(mvm->phy_db);
        kfree(mvm->scan_cmd);
-       iwl_trans_stop_hw(trans, true);
+       if (!iwlwifi_mod_params.nvm_file)
+               iwl_trans_stop_hw(trans, true);
        ieee80211_free_hw(mvm->hw);
        return NULL;
 }
@@ -715,6 +740,9 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
                case IWL_MVM_SCAN_OS:
                        ieee80211_scan_completed(mvm->hw, true);
                        break;
+               case IWL_MVM_SCAN_SCHED:
+                       ieee80211_sched_scan_stopped(mvm->hw);
+                       break;
                }
 
                if (mvm->restart_fw > 0)
index 21407a353a3b0e623c87ae3627957ae45ae3b162..550824aa84ea4bde8794b267bda27401c6b6f058 100644 (file)
@@ -273,7 +273,10 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
                if (!mvmvif->queue_params[ac].uapsd)
                        continue;
 
-               cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+               if (mvm->cur_ucode != IWL_UCODE_WOWLAN)
+                       cmd->flags |=
+                               cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+
                cmd->uapsd_ac_flags |= BIT(ac);
 
                /* QNDP TID - the highest TID with no admission control */
@@ -297,11 +300,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
        }
 
        if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
-               cmd->rx_data_timeout_uapsd =
-                       cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
-               cmd->tx_data_timeout_uapsd =
-                       cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
-
                if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
                                            BIT(IEEE80211_AC_VI) |
                                            BIT(IEEE80211_AC_BE) |
@@ -316,10 +314,31 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
                }
 
                cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
-               cmd->heavy_tx_thld_packets =
-                       IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
-               cmd->heavy_rx_thld_packets =
-                       IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+
+               if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
+                   cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+                       cmd->rx_data_timeout_uapsd =
+                               cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+                       cmd->tx_data_timeout_uapsd =
+                               cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+               } else {
+                       cmd->rx_data_timeout_uapsd =
+                               cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
+                       cmd->tx_data_timeout_uapsd =
+                               cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
+               }
+
+               if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+                       cmd->heavy_tx_thld_packets =
+                               IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
+                       cmd->heavy_rx_thld_packets =
+                               IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
+               } else {
+                       cmd->heavy_tx_thld_packets =
+                               IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
+                       cmd->heavy_rx_thld_packets =
+                               IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+               }
                cmd->heavy_tx_thld_percentage =
                        IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
                cmd->heavy_rx_thld_percentage =
@@ -427,6 +446,32 @@ static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
                                    sizeof(cmd), &cmd);
 }
 
+static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
+{
+       struct iwl_device_power_cmd cmd = {
+               .flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
+       };
+
+       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
+               return 0;
+
+       if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+               cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if ((mvm->cur_ucode == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3 :
+           mvm->disable_power_off)
+               cmd.flags &=
+                       cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+#endif
+       IWL_DEBUG_POWER(mvm,
+                       "Sending device power command with flags = 0x%X\n",
+                       cmd.flags);
+
+       return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC, sizeof(cmd),
+                                   &cmd);
+}
+
 #ifdef CONFIG_IWLWIFI_DEBUGFS
 static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
                                        struct ieee80211_vif *vif, char *buf,
@@ -437,10 +482,11 @@ static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
 
        iwl_mvm_power_build_cmd(mvm, vif, &cmd);
 
-       pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
-                        (cmd.flags &
-                        cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
-                        0 : 1);
+       if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
+               pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
+                                (cmd.flags &
+                                cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
+                                0 : 1);
        pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
                         iwlmvm_mod_params.power_scheme);
        pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
@@ -606,6 +652,7 @@ int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
 
 const struct iwl_mvm_power_ops pm_mac_ops = {
        .power_update_mode = iwl_mvm_power_mac_update_mode,
+       .power_update_device_mode = iwl_mvm_power_update_device,
        .power_disable = iwl_mvm_power_mac_disable,
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        .power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
index 5c6ae16ec52b934c16835a4fecdffb300a1df38b..17e2bc827f9a3b20b7da9e0d7ee4dd43ac00606a 100644 (file)
@@ -110,7 +110,8 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
                        data->n_interfaces[id]++;
                break;
        case NL80211_IFTYPE_AP:
-               if (mvmvif->ap_active)
+       case NL80211_IFTYPE_ADHOC:
+               if (mvmvif->ap_ibss_active)
                        data->n_interfaces[id]++;
                break;
        case NL80211_IFTYPE_MONITOR:
@@ -119,16 +120,45 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
                break;
        case NL80211_IFTYPE_P2P_DEVICE:
                break;
-       case NL80211_IFTYPE_ADHOC:
-               if (vif->bss_conf.ibss_joined)
-                       data->n_interfaces[id]++;
-               break;
        default:
                WARN_ON_ONCE(1);
                break;
        }
 }
 
+static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
+                                        struct iwl_time_quota_cmd *cmd)
+{
+#ifdef CONFIG_NL80211_TESTMODE
+       struct iwl_mvm_vif *mvmvif;
+       int i, phy_id = -1, beacon_int = 0;
+
+       if (!mvm->noa_duration || !mvm->noa_vif)
+               return;
+
+       mvmvif = iwl_mvm_vif_from_mac80211(mvm->noa_vif);
+       if (!mvmvif->ap_ibss_active)
+               return;
+
+       phy_id = mvmvif->phy_ctxt->id;
+       beacon_int = mvm->noa_vif->bss_conf.beacon_int;
+
+       for (i = 0; i < MAX_BINDINGS; i++) {
+               u32 id_n_c = le32_to_cpu(cmd->quotas[i].id_and_color);
+               u32 id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS;
+               u32 quota = le32_to_cpu(cmd->quotas[i].quota);
+
+               if (id != phy_id)
+                       continue;
+
+               quota *= (beacon_int - mvm->noa_duration);
+               quota /= beacon_int;
+
+               cmd->quotas[i].quota = cpu_to_le32(quota);
+       }
+#endif
+}
+
 int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
 {
        struct iwl_time_quota_cmd cmd = {};
@@ -196,6 +226,8 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
        /* Give the remainder of the session to the first binding */
        le32_add_cpu(&cmd.quotas[0].quota, quota_rem);
 
+       iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
+
        ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, CMD_SYNC,
                                   sizeof(cmd), &cmd);
        if (ret)
index 4ffaa3fa153f78e7d14f2ad90223b01146562c50..a0b4cc8d9c3b412c931d04606d02b72c4d769e61 100644 (file)
@@ -82,13 +82,24 @@ static const u8 ant_toggle_lookup[] = {
        [ANT_ABC] = ANT_ABC,
 };
 
-#define IWL_DECLARE_RATE_INFO(r, s, rp, rn)                   \
-       [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,      \
-                                   IWL_RATE_SISO_##s##M_PLCP, \
-                                   IWL_RATE_MIMO2_##s##M_PLCP,\
-                                   IWL_RATE_##rp##M_INDEX,    \
+#define IWL_DECLARE_RATE_INFO(r, s, rp, rn)                          \
+       [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,             \
+                                   IWL_RATE_HT_SISO_MCS_##s##_PLCP,  \
+                                   IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \
+                                   IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \
+                                   IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP,\
+                                   IWL_RATE_##rp##M_INDEX,           \
                                    IWL_RATE_##rn##M_INDEX }
 
+#define IWL_DECLARE_MCS_RATE(s)                                                  \
+       [IWL_RATE_MCS_##s##_INDEX] = { IWL_RATE_INVM_PLCP,                \
+                                      IWL_RATE_HT_SISO_MCS_##s##_PLCP,   \
+                                      IWL_RATE_HT_MIMO2_MCS_##s##_PLCP,  \
+                                      IWL_RATE_VHT_SISO_MCS_##s##_PLCP,  \
+                                      IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP, \
+                                      IWL_RATE_INVM_INDEX,               \
+                                      IWL_RATE_INVM_INDEX }
+
 /*
  * Parameter order:
  *   rate, ht rate, prev rate, next rate
@@ -102,16 +113,17 @@ static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
        IWL_DECLARE_RATE_INFO(2, INV, 1, 5),     /*  2mbps */
        IWL_DECLARE_RATE_INFO(5, INV, 2, 11),    /*5.5mbps */
        IWL_DECLARE_RATE_INFO(11, INV, 9, 12),   /* 11mbps */
-       IWL_DECLARE_RATE_INFO(6, 6, 5, 11),      /*  6mbps */
-       IWL_DECLARE_RATE_INFO(9, 6, 6, 11),      /*  9mbps */
-       IWL_DECLARE_RATE_INFO(12, 12, 11, 18),   /* 12mbps */
-       IWL_DECLARE_RATE_INFO(18, 18, 12, 24),   /* 18mbps */
-       IWL_DECLARE_RATE_INFO(24, 24, 18, 36),   /* 24mbps */
-       IWL_DECLARE_RATE_INFO(36, 36, 24, 48),   /* 36mbps */
-       IWL_DECLARE_RATE_INFO(48, 48, 36, 54),   /* 48mbps */
-       IWL_DECLARE_RATE_INFO(54, 54, 48, INV),  /* 54mbps */
-       IWL_DECLARE_RATE_INFO(60, 60, 48, INV),  /* 60mbps */
-       /* FIXME:RS:          ^^    should be INV (legacy) */
+       IWL_DECLARE_RATE_INFO(6, 0, 5, 11),      /*  6mbps ; MCS 0 */
+       IWL_DECLARE_RATE_INFO(9, INV, 6, 11),    /*  9mbps */
+       IWL_DECLARE_RATE_INFO(12, 1, 11, 18),    /* 12mbps ; MCS 1 */
+       IWL_DECLARE_RATE_INFO(18, 2, 12, 24),    /* 18mbps ; MCS 2 */
+       IWL_DECLARE_RATE_INFO(24, 3, 18, 36),    /* 24mbps ; MCS 3 */
+       IWL_DECLARE_RATE_INFO(36, 4, 24, 48),    /* 36mbps ; MCS 4 */
+       IWL_DECLARE_RATE_INFO(48, 5, 36, 54),    /* 48mbps ; MCS 5 */
+       IWL_DECLARE_RATE_INFO(54, 6, 48, INV),   /* 54mbps ; MCS 6 */
+       IWL_DECLARE_MCS_RATE(7),                 /* MCS 7 */
+       IWL_DECLARE_MCS_RATE(8),                 /* MCS 8 */
+       IWL_DECLARE_MCS_RATE(9),                 /* MCS 9 */
 };
 
 static inline u8 rs_extract_rate(u32 rate_n_flags)
@@ -124,26 +136,30 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
 {
        int idx = 0;
 
-       /* HT rate format */
        if (rate_n_flags & RATE_MCS_HT_MSK) {
-               idx = rs_extract_rate(rate_n_flags);
-
-               WARN_ON_ONCE(idx >= IWL_RATE_MIMO3_6M_PLCP);
-               if (idx >= IWL_RATE_MIMO2_6M_PLCP)
-                       idx = idx - IWL_RATE_MIMO2_6M_PLCP;
+               idx = rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK;
+               idx += IWL_RATE_MCS_0_INDEX;
 
-               idx += IWL_FIRST_OFDM_RATE;
-               /* skip 9M not supported in ht*/
+               /* skip 9M not supported in HT*/
                if (idx >= IWL_RATE_9M_INDEX)
                        idx += 1;
-               if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
+               if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE))
                        return idx;
+       } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+               idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+               idx += IWL_RATE_MCS_0_INDEX;
 
-       /* legacy rate format, search for match in table */
+               /* skip 9M not supported in VHT*/
+               if (idx >= IWL_RATE_9M_INDEX)
+                       idx++;
+               if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE))
+                       return idx;
        } else {
+               /* legacy rate format, search for match in table */
+
+               u8 legacy_rate = rs_extract_rate(rate_n_flags);
                for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
-                       if (iwl_rates[idx].plcp ==
-                                       rs_extract_rate(rate_n_flags))
+                       if (iwl_rates[idx].plcp == legacy_rate)
                                return idx;
        }
 
@@ -155,6 +171,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
                                   struct ieee80211_sta *sta,
                                   struct iwl_lq_sta *lq_sta);
 static void rs_fill_link_cmd(struct iwl_mvm *mvm,
+                            struct ieee80211_sta *sta,
                             struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
 static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
 
@@ -180,35 +197,52 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
  */
 
 static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
-       7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
+       7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0
 };
 
-static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
-       {0, 0, 0, 0, 42, 0,  76, 102, 124, 159, 183, 193, 202}, /* Norm */
-       {0, 0, 0, 0, 46, 0,  82, 110, 132, 168, 192, 202, 210}, /* SGI */
-       {0, 0, 0, 0, 47, 0,  91, 133, 171, 242, 305, 334, 362}, /* AGG */
-       {0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
+/* Expected TpT tables. 4 indexes:
+ * 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI
+ */
+static s32 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
+       {0, 0, 0, 0, 42, 0,  76, 102, 124, 159, 183, 193, 202, 216, 0},
+       {0, 0, 0, 0, 46, 0,  82, 110, 132, 168, 192, 202, 210, 225, 0},
+       {0, 0, 0, 0, 49, 0,  97, 145, 192, 285, 375, 420, 464, 551, 0},
+       {0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0},
 };
 
-static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
-       {0, 0, 0, 0,  77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
-       {0, 0, 0, 0,  83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
-       {0, 0, 0, 0,  94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
-       {0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
+static s32 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
+       {0, 0, 0, 0,  77, 0, 127, 160, 184, 220, 242, 250,  257,  269,  275},
+       {0, 0, 0, 0,  83, 0, 135, 169, 193, 229, 250, 257,  264,  275,  280},
+       {0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828,  911, 1070, 1173},
+       {0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284},
+};
+
+static s32 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
+       {0, 0, 0, 0, 130, 0, 191, 223, 244,  273,  288,  294,  298,  305,  308},
+       {0, 0, 0, 0, 138, 0, 200, 231, 251,  279,  293,  298,  302,  308,  312},
+       {0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466},
+       {0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
 };
 
 static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
-       {0, 0, 0, 0,  74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
-       {0, 0, 0, 0,  81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
-       {0, 0, 0, 0,  89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
-       {0, 0, 0, 0,  97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
+       {0, 0, 0, 0,  74, 0, 123, 155, 179, 213, 235, 243, 250,  261, 0},
+       {0, 0, 0, 0,  81, 0, 131, 164, 187, 221, 242, 250, 256,  267, 0},
+       {0, 0, 0, 0,  98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0},
+       {0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0},
 };
 
 static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
-       {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
-       {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
-       {0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
-       {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
+       {0, 0, 0, 0, 123, 0, 182, 214, 235,  264,  279,  285,  289,  296,  300},
+       {0, 0, 0, 0, 131, 0, 191, 222, 242,  270,  284,  289,  293,  300,  303},
+       {0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053},
+       {0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221},
+};
+
+static s32 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
+       {0, 0, 0, 0, 182, 0, 240,  264,  278,  299,  308,  311,  313,  317,  319},
+       {0, 0, 0, 0, 190, 0, 247,  269,  282,  302,  310,  313,  315,  319,  320},
+       {0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219},
+       {0, 0, 0, 0, 474, 0, 920, 1338, 1732, 2464, 3116, 3418, 3705, 4225, 4545},
 };
 
 /* mbps, mcs */
@@ -263,7 +297,7 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
                       lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
 
        if (lq_sta->dbg_fixed_rate) {
-               rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
+               rs_fill_link_cmd(NULL, NULL, lq_sta, lq_sta->dbg_fixed_rate);
                iwl_mvm_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
        }
 }
@@ -275,17 +309,6 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
 {
        int ret = -EAGAIN;
 
-       /*
-        * Don't create TX aggregation sessions when in high
-        * BT traffic, as they would just be disrupted by BT.
-        */
-       if (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2) {
-               IWL_DEBUG_COEX(mvm, "BT traffic (%d), no aggregation allowed\n",
-                              BT_MBOX_MSG(&mvm->last_bt_notif,
-                                          3, TRAFFIC_LOAD));
-               return ret;
-       }
-
        IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
                     sta->addr, tid);
        ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
@@ -416,49 +439,54 @@ static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
  */
 /* FIXME:RS:remove this function and put the flags statically in the table */
 static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm,
-                                struct iwl_scale_tbl_info *tbl,
-                                int index, u8 use_green)
+                                struct iwl_scale_tbl_info *tbl, int index)
 {
        u32 rate_n_flags = 0;
 
+       rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
+                        RATE_MCS_ANT_ABC_MSK);
+
        if (is_legacy(tbl->lq_type)) {
-               rate_n_flags = iwl_rates[index].plcp;
+               rate_n_flags |= iwl_rates[index].plcp;
                if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
                        rate_n_flags |= RATE_MCS_CCK_MSK;
-       } else if (is_Ht(tbl->lq_type)) {
-               if (index > IWL_LAST_OFDM_RATE) {
+               return rate_n_flags;
+       }
+
+       if (is_ht(tbl->lq_type)) {
+               if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) {
                        IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
-                       index = IWL_LAST_OFDM_RATE;
+                       index = IWL_LAST_HT_RATE;
                }
-               rate_n_flags = RATE_MCS_HT_MSK;
+               rate_n_flags |= RATE_MCS_HT_MSK;
 
-               if (is_siso(tbl->lq_type))
-                       rate_n_flags |= iwl_rates[index].plcp_siso;
-               else if (is_mimo2(tbl->lq_type))
-                       rate_n_flags |= iwl_rates[index].plcp_mimo2;
+               if (is_ht_siso(tbl->lq_type))
+                       rate_n_flags |= iwl_rates[index].plcp_ht_siso;
+               else if (is_ht_mimo2(tbl->lq_type))
+                       rate_n_flags |= iwl_rates[index].plcp_ht_mimo2;
                else
                        WARN_ON_ONCE(1);
+       } else if (is_vht(tbl->lq_type)) {
+               if (index < IWL_FIRST_VHT_RATE || index > IWL_LAST_VHT_RATE) {
+                       IWL_ERR(mvm, "Invalid VHT rate index %d\n", index);
+                       index = IWL_LAST_VHT_RATE;
+               }
+               rate_n_flags |= RATE_MCS_VHT_MSK;
+               if (is_vht_siso(tbl->lq_type))
+                       rate_n_flags |= iwl_rates[index].plcp_vht_siso;
+               else if (is_vht_mimo2(tbl->lq_type))
+                       rate_n_flags |= iwl_rates[index].plcp_vht_mimo2;
+               else
+                       WARN_ON_ONCE(1);
+
        } else {
                IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type);
        }
 
-       rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
-                                                    RATE_MCS_ANT_ABC_MSK);
-
-       if (is_Ht(tbl->lq_type)) {
-               if (tbl->is_ht40)
-                       rate_n_flags |= RATE_MCS_CHAN_WIDTH_40;
-               if (tbl->is_SGI)
-                       rate_n_flags |= RATE_MCS_SGI_MSK;
-
-               if (use_green) {
-                       rate_n_flags |= RATE_HT_MCS_GF_MSK;
-                       if (is_siso(tbl->lq_type) && tbl->is_SGI) {
-                               rate_n_flags &= ~RATE_MCS_SGI_MSK;
-                               IWL_ERR(mvm, "GF was set with SGI:SISO\n");
-                       }
-               }
-       }
+       rate_n_flags |= tbl->bw;
+       if (tbl->is_SGI)
+               rate_n_flags |= RATE_MCS_SGI_MSK;
+
        return rate_n_flags;
 }
 
@@ -473,7 +501,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
 {
        u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
        u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
-       u8 mcs;
+       u8 nss;
 
        memset(tbl, 0, offsetof(struct iwl_scale_tbl_info, win));
        *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
@@ -483,41 +511,62 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
                return -EINVAL;
        }
        tbl->is_SGI = 0;        /* default legacy setup */
-       tbl->is_ht40 = 0;
+       tbl->bw = 0;
        tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
        tbl->lq_type = LQ_NONE;
        tbl->max_search = IWL_MAX_SEARCH;
 
-       /* legacy rate format */
-       if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
+       /* Legacy */
+       if (!(rate_n_flags & RATE_MCS_HT_MSK) &&
+           !(rate_n_flags & RATE_MCS_VHT_MSK)) {
                if (num_of_ant == 1) {
                        if (band == IEEE80211_BAND_5GHZ)
-                               tbl->lq_type = LQ_A;
+                               tbl->lq_type = LQ_LEGACY_A;
                        else
-                               tbl->lq_type = LQ_G;
+                               tbl->lq_type = LQ_LEGACY_G;
                }
-       /* HT rate format */
-       } else {
-               if (rate_n_flags & RATE_MCS_SGI_MSK)
-                       tbl->is_SGI = 1;
-
-               if (rate_n_flags & RATE_MCS_CHAN_WIDTH_40) /* TODO */
-                       tbl->is_ht40 = 1;
-
-               mcs = rs_extract_rate(rate_n_flags);
-
-               /* SISO */
-               if (mcs <= IWL_RATE_SISO_60M_PLCP) {
-                       if (num_of_ant == 1)
-                               tbl->lq_type = LQ_SISO; /*else NONE*/
-               /* MIMO2 */
-               } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
-                       if (num_of_ant == 2)
-                               tbl->lq_type = LQ_MIMO2;
+
+               return 0;
+       }
+
+       /* HT or VHT */
+       if (rate_n_flags & RATE_MCS_SGI_MSK)
+               tbl->is_SGI = 1;
+
+       tbl->bw = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
+
+       if (rate_n_flags & RATE_MCS_HT_MSK) {
+               nss = ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >>
+                      RATE_HT_MCS_NSS_POS) + 1;
+
+               if (nss == 1) {
+                       tbl->lq_type = LQ_HT_SISO;
+                       WARN_ON_ONCE(num_of_ant != 1);
+               } else if (nss == 2) {
+                       tbl->lq_type = LQ_HT_MIMO2;
+                       WARN_ON_ONCE(num_of_ant != 2);
+               } else {
+                       WARN_ON_ONCE(1);
+               }
+       } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+               nss = ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+                      RATE_VHT_MCS_NSS_POS) + 1;
+
+               if (nss == 1) {
+                       tbl->lq_type = LQ_VHT_SISO;
+                       WARN_ON_ONCE(num_of_ant != 1);
+               } else if (nss == 2) {
+                       tbl->lq_type = LQ_VHT_MIMO2;
+                       WARN_ON_ONCE(num_of_ant != 2);
                } else {
-                       WARN_ON_ONCE(num_of_ant == 3);
+                       WARN_ON_ONCE(1);
                }
        }
+
+       WARN_ON_ONCE(tbl->bw == RATE_MCS_CHAN_WIDTH_160);
+       WARN_ON_ONCE(tbl->bw == RATE_MCS_CHAN_WIDTH_80 &&
+                    !is_vht(tbl->lq_type));
+
        return 0;
 }
 
@@ -549,22 +598,6 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
        return 1;
 }
 
-/**
- * Green-field mode is valid if the station supports it and
- * there are no non-GF stations present in the BSS.
- */
-static bool rs_use_green(struct ieee80211_sta *sta)
-{
-       /*
-        * There's a bug somewhere in this code that causes the
-        * scaling to get stuck because GF+SGI can't be combined
-        * in SISO rates. Until we find that bug, disable GF, it
-        * has only limited benefit and we still interoperate with
-        * GF APs since we can always receive GF transmissions.
-        */
-       return false;
-}
-
 /**
  * rs_get_supported_rates - get the available rates
  *
@@ -576,16 +609,15 @@ static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
                                  struct ieee80211_hdr *hdr,
                                  enum iwl_table_type rate_type)
 {
-       if (is_legacy(rate_type)) {
+       if (is_legacy(rate_type))
                return lq_sta->active_legacy_rate;
-       } else {
-               if (is_siso(rate_type))
-                       return lq_sta->active_siso_rate;
-               else {
-                       WARN_ON_ONCE(!is_mimo2(rate_type));
-                       return lq_sta->active_mimo2_rate;
-               }
-       }
+       else if (is_siso(rate_type))
+               return lq_sta->active_siso_rate;
+       else if (is_mimo2(rate_type))
+               return lq_sta->active_mimo2_rate;
+
+       WARN_ON_ONCE(1);
+       return 0;
 }
 
 static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
@@ -652,7 +684,6 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
        u16 rate_mask;
        u16 high_low;
        u8 switch_to_legacy = 0;
-       u8 is_green = lq_sta->is_green;
        struct iwl_mvm *mvm = lq_sta->drv;
 
        /* check if we need to switch from HT to legacy rates.
@@ -662,15 +693,15 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
                switch_to_legacy = 1;
                scale_index = rs_ht_to_legacy[scale_index];
                if (lq_sta->band == IEEE80211_BAND_5GHZ)
-                       tbl->lq_type = LQ_A;
+                       tbl->lq_type = LQ_LEGACY_A;
                else
-                       tbl->lq_type = LQ_G;
+                       tbl->lq_type = LQ_LEGACY_G;
 
                if (num_of_ant(tbl->ant_type) > 1)
                        tbl->ant_type =
                            first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
 
-               tbl->is_ht40 = 0;
+               tbl->bw = 0;
                tbl->is_SGI = 0;
                tbl->max_search = IWL_MAX_SEARCH;
        }
@@ -701,7 +732,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
                low = scale_index;
 
 out:
-       return rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
+       return rate_n_flags_from_tbl(lq_sta->drv, tbl, low);
 }
 
 /*
@@ -714,6 +745,18 @@ static bool table_type_matches(struct iwl_scale_tbl_info *a,
                (a->is_SGI == b->is_SGI);
 }
 
+static u32 rs_ch_width_from_mac_flags(enum mac80211_rate_control_flags flags)
+{
+       if (flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+               return RATE_MCS_CHAN_WIDTH_40;
+       else if (flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+               return RATE_MCS_CHAN_WIDTH_80;
+       else if (flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+               return RATE_MCS_CHAN_WIDTH_160;
+
+       return RATE_MCS_CHAN_WIDTH_20;
+}
+
 /*
  * mac80211 sends us Tx status
  */
@@ -783,16 +826,23 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
                 */
                if (info->band == IEEE80211_BAND_2GHZ)
                        mac_index += IWL_FIRST_OFDM_RATE;
+       } else if (mac_flags & IEEE80211_TX_RC_VHT_MCS) {
+               mac_index &= RATE_VHT_MCS_RATE_CODE_MSK;
+               if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
+                       mac_index++;
        }
+
        /* Here we actually compare this rate to the latest LQ command */
        if ((mac_index < 0) ||
            (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
-           (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
+           (tbl_type.bw != rs_ch_width_from_mac_flags(mac_flags)) ||
            (tbl_type.ant_type != info->status.antenna) ||
            (!!(tx_rate & RATE_MCS_HT_MSK) !=
-                               !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
+            !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
+           (!!(tx_rate & RATE_MCS_VHT_MSK) !=
+            !!(mac_flags & IEEE80211_TX_RC_VHT_MCS)) ||
            (!!(tx_rate & RATE_HT_MCS_GF_MSK) !=
-                               !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
+            !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
            (rs_index != mac_index)) {
                IWL_DEBUG_RATE(mvm,
                               "initial rate %d does not match %d (0x%x)\n",
@@ -947,7 +997,8 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
        s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
 
        /* Check for invalid LQ type */
-       if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+       if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_ht(tbl->lq_type) &&
+                        !(is_vht(tbl->lq_type)))) {
                tbl->expected_tpt = expected_tpt_legacy;
                return;
        }
@@ -958,18 +1009,40 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
                return;
        }
 
+       ht_tbl_pointer = expected_tpt_mimo2_20MHz;
        /* Choose among many HT tables depending on number of streams
-        * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
+        * (SISO/MIMO2), channel width (20/40/80), SGI, and aggregation
         * status */
-       if (is_siso(tbl->lq_type) && !tbl->is_ht40)
-               ht_tbl_pointer = expected_tpt_siso20MHz;
-       else if (is_siso(tbl->lq_type))
-               ht_tbl_pointer = expected_tpt_siso40MHz;
-       else if (is_mimo2(tbl->lq_type) && !tbl->is_ht40)
-               ht_tbl_pointer = expected_tpt_mimo2_20MHz;
-       else {
-               WARN_ON_ONCE(!is_mimo2(tbl->lq_type));
-               ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+       if (is_siso(tbl->lq_type)) {
+               switch (tbl->bw) {
+               case RATE_MCS_CHAN_WIDTH_20:
+                       ht_tbl_pointer = expected_tpt_siso_20MHz;
+                       break;
+               case RATE_MCS_CHAN_WIDTH_40:
+                       ht_tbl_pointer = expected_tpt_siso_40MHz;
+                       break;
+               case RATE_MCS_CHAN_WIDTH_80:
+                       ht_tbl_pointer = expected_tpt_siso_80MHz;
+                       break;
+               default:
+                       WARN_ON_ONCE(1);
+               }
+       } else if (is_mimo2(tbl->lq_type)) {
+               switch (tbl->bw) {
+               case RATE_MCS_CHAN_WIDTH_20:
+                       ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+                       break;
+               case RATE_MCS_CHAN_WIDTH_40:
+                       ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+                       break;
+               case RATE_MCS_CHAN_WIDTH_80:
+                       ht_tbl_pointer = expected_tpt_mimo2_80MHz;
+                       break;
+               default:
+                       WARN_ON_ONCE(1);
+               }
+       } else {
+               WARN_ON_ONCE(1);
        }
 
        if (!tbl->is_SGI && !lq_sta->is_agg)            /* Normal */
@@ -1084,9 +1157,47 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
        return new_rate;
 }
 
-static bool iwl_is_ht40_tx_allowed(struct ieee80211_sta *sta)
+/* Move to the next action and wrap around to the first action in case
+ * we're at the last action. Assumes actions start at 0.
+ */
+static inline void rs_move_next_action(struct iwl_scale_tbl_info *tbl,
+                                      u8 last_action)
+{
+       BUILD_BUG_ON(IWL_LEGACY_FIRST_ACTION != 0);
+       BUILD_BUG_ON(IWL_SISO_FIRST_ACTION != 0);
+       BUILD_BUG_ON(IWL_MIMO2_FIRST_ACTION != 0);
+
+       tbl->action = (tbl->action + 1) % (last_action + 1);
+}
+
+static void rs_set_bw_from_sta(struct iwl_scale_tbl_info *tbl,
+                              struct ieee80211_sta *sta)
+{
+       if (sta->bandwidth >= IEEE80211_STA_RX_BW_80)
+               tbl->bw = RATE_MCS_CHAN_WIDTH_80;
+       else if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
+               tbl->bw = RATE_MCS_CHAN_WIDTH_40;
+       else
+               tbl->bw = RATE_MCS_CHAN_WIDTH_20;
+}
+
+static bool rs_sgi_allowed(struct iwl_scale_tbl_info *tbl,
+                          struct ieee80211_sta *sta)
 {
-       return sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+
+       if (is_ht20(tbl) && (ht_cap->cap &
+                            IEEE80211_HT_CAP_SGI_20))
+               return true;
+       if (is_ht40(tbl) && (ht_cap->cap &
+                            IEEE80211_HT_CAP_SGI_40))
+               return true;
+       if (is_ht80(tbl) && (vht_cap->cap &
+                            IEEE80211_VHT_CAP_SHORT_GI_80))
+               return true;
+
+       return false;
 }
 
 /*
@@ -1099,7 +1210,6 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
 {
        u16 rate_mask;
        s32 rate;
-       s8 is_green = lq_sta->is_green;
 
        if (!sta->ht_cap.ht_supported)
                return -1;
@@ -1113,16 +1223,12 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
 
        IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO2\n");
 
-       tbl->lq_type = LQ_MIMO2;
+       tbl->lq_type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
        tbl->action = 0;
        tbl->max_search = IWL_MAX_SEARCH;
        rate_mask = lq_sta->active_mimo2_rate;
 
-       if (iwl_is_ht40_tx_allowed(sta))
-               tbl->is_ht40 = 1;
-       else
-               tbl->is_ht40 = 0;
-
+       rs_set_bw_from_sta(tbl, sta);
        rs_set_expected_tpt_table(lq_sta, tbl);
 
        rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
@@ -1134,10 +1240,10 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
                               rate, rate_mask);
                return -1;
        }
-       tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
+       tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate);
 
-       IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
-                      tbl->current_rate, is_green);
+       IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index\n",
+                      tbl->current_rate);
        return 0;
 }
 
@@ -1150,7 +1256,6 @@ static int rs_switch_to_siso(struct iwl_mvm *mvm,
                             struct iwl_scale_tbl_info *tbl, int index)
 {
        u16 rate_mask;
-       u8 is_green = lq_sta->is_green;
        s32 rate;
 
        if (!sta->ht_cap.ht_supported)
@@ -1158,19 +1263,12 @@ static int rs_switch_to_siso(struct iwl_mvm *mvm,
 
        IWL_DEBUG_RATE(mvm, "LQ: try to switch to SISO\n");
 
-       tbl->lq_type = LQ_SISO;
+       tbl->lq_type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
        tbl->action = 0;
        tbl->max_search = IWL_MAX_SEARCH;
        rate_mask = lq_sta->active_siso_rate;
 
-       if (iwl_is_ht40_tx_allowed(sta))
-               tbl->is_ht40 = 1;
-       else
-               tbl->is_ht40 = 0;
-
-       if (is_green)
-               tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
-
+       rs_set_bw_from_sta(tbl, sta);
        rs_set_expected_tpt_table(lq_sta, tbl);
        rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
 
@@ -1181,9 +1279,9 @@ static int rs_switch_to_siso(struct iwl_mvm *mvm,
                               rate, rate_mask);
                return -1;
        }
-       tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
-       IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
-                      tbl->current_rate, is_green);
+       tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate);
+       IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index\n",
+                      tbl->current_rate);
        return 0;
 }
 
@@ -1211,14 +1309,10 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
        while (1) {
                lq_sta->action_counter++;
                switch (tbl->action) {
-               case IWL_LEGACY_SWITCH_ANTENNA1:
-               case IWL_LEGACY_SWITCH_ANTENNA2:
+               case IWL_LEGACY_SWITCH_ANTENNA:
                        IWL_DEBUG_RATE(mvm, "LQ: Legacy toggle Antenna\n");
 
-                       if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
-                            tx_chains_num <= 1) ||
-                           (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
-                            tx_chains_num <= 2))
+                       if (tx_chains_num <= 1)
                                break;
 
                        /* Don't change antenna if success has been great */
@@ -1273,9 +1367,7 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
                default:
                        WARN_ON_ONCE(1);
                }
-               tbl->action++;
-               if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
-                       tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+               rs_move_next_action(tbl, IWL_LEGACY_LAST_ACTION);
 
                if (tbl->action == start_action)
                        break;
@@ -1285,9 +1377,7 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
 
 out:
        lq_sta->search_better_tbl = 1;
-       tbl->action++;
-       if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
-               tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+       rs_move_next_action(tbl, IWL_LEGACY_LAST_ACTION);
        if (update_search_tbl_counter)
                search_tbl->action = tbl->action;
        return 0;
@@ -1300,12 +1390,10 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
                                 struct iwl_lq_sta *lq_sta,
                                 struct ieee80211_sta *sta, int index)
 {
-       u8 is_green = lq_sta->is_green;
        struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
        struct iwl_scale_tbl_info *search_tbl =
                                &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
        struct iwl_rate_scale_data *window = &(tbl->win[index]);
-       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
        u32 sz = (sizeof(struct iwl_scale_tbl_info) -
                  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
        u8 start_action;
@@ -1314,40 +1402,17 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
        u8 update_search_tbl_counter = 0;
        int ret;
 
-       switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
-       case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
-               /* nothing */
-               break;
-       case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
-               /* avoid antenna B unless MIMO */
-               if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
-                       tbl->action = IWL_SISO_SWITCH_MIMO2;
-               break;
-       case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
-       case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
-               /* avoid antenna B and MIMO */
-               valid_tx_ant =
-                       first_antenna(iwl_fw_valid_tx_ant(mvm->fw));
-               if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
-                       tbl->action = IWL_SISO_SWITCH_ANTENNA1;
-               break;
-       default:
-               IWL_ERR(mvm, "Invalid BT load %d",
-                       BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
-               break;
-       }
+       if (tbl->action == IWL_SISO_SWITCH_MIMO2 &&
+           !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
+               tbl->action = IWL_SISO_SWITCH_ANTENNA;
 
        start_action = tbl->action;
        while (1) {
                lq_sta->action_counter++;
                switch (tbl->action) {
-               case IWL_SISO_SWITCH_ANTENNA1:
-               case IWL_SISO_SWITCH_ANTENNA2:
+               case IWL_SISO_SWITCH_ANTENNA:
                        IWL_DEBUG_RATE(mvm, "LQ: SISO toggle Antenna\n");
-                       if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
-                            tx_chains_num <= 1) ||
-                           (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
-                            tx_chains_num <= 2))
+                       if (tx_chains_num <= 1)
                                break;
 
                        if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
@@ -1380,23 +1445,12 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
                                goto out;
                        break;
                case IWL_SISO_SWITCH_GI:
-                       if (!tbl->is_ht40 && !(ht_cap->cap &
-                                               IEEE80211_HT_CAP_SGI_20))
-                               break;
-                       if (tbl->is_ht40 && !(ht_cap->cap &
-                                               IEEE80211_HT_CAP_SGI_40))
+                       if (!rs_sgi_allowed(tbl, sta))
                                break;
 
                        IWL_DEBUG_RATE(mvm, "LQ: SISO toggle SGI/NGI\n");
 
                        memcpy(search_tbl, tbl, sz);
-                       if (is_green) {
-                               if (!tbl->is_SGI)
-                                       break;
-                               else
-                                       IWL_ERR(mvm,
-                                               "SGI was set in GF+SISO\n");
-                       }
                        search_tbl->is_SGI = !tbl->is_SGI;
                        rs_set_expected_tpt_table(lq_sta, search_tbl);
                        if (tbl->is_SGI) {
@@ -1405,16 +1459,13 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
                                        break;
                        }
                        search_tbl->current_rate =
-                               rate_n_flags_from_tbl(mvm, search_tbl,
-                                                     index, is_green);
+                               rate_n_flags_from_tbl(mvm, search_tbl, index);
                        update_search_tbl_counter = 1;
                        goto out;
                default:
                        WARN_ON_ONCE(1);
                }
-               tbl->action++;
-               if (tbl->action > IWL_SISO_SWITCH_GI)
-                       tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+               rs_move_next_action(tbl, IWL_SISO_LAST_ACTION);
 
                if (tbl->action == start_action)
                        break;
@@ -1424,9 +1475,7 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
 
  out:
        lq_sta->search_better_tbl = 1;
-       tbl->action++;
-       if (tbl->action > IWL_SISO_SWITCH_GI)
-               tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+       rs_move_next_action(tbl, IWL_SISO_LAST_ACTION);
        if (update_search_tbl_counter)
                search_tbl->action = tbl->action;
 
@@ -1440,63 +1489,20 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
                                 struct iwl_lq_sta *lq_sta,
                                 struct ieee80211_sta *sta, int index)
 {
-       s8 is_green = lq_sta->is_green;
        struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
        struct iwl_scale_tbl_info *search_tbl =
                                &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
-       struct iwl_rate_scale_data *window = &(tbl->win[index]);
-       struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
        u32 sz = (sizeof(struct iwl_scale_tbl_info) -
                  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
        u8 start_action;
        u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
-       u8 tx_chains_num = num_of_ant(valid_tx_ant);
        u8 update_search_tbl_counter = 0;
        int ret;
 
-       switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
-       case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
-               /* nothing */
-               break;
-       case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
-       case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
-               /* avoid antenna B and MIMO */
-               if (tbl->action != IWL_MIMO2_SWITCH_SISO_A)
-                       tbl->action = IWL_MIMO2_SWITCH_SISO_A;
-               break;
-       case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
-               /* avoid antenna B unless MIMO */
-               if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
-                       tbl->action = IWL_MIMO2_SWITCH_SISO_A;
-               break;
-       default:
-               IWL_ERR(mvm, "Invalid BT load %d",
-                       BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
-               break;
-       }
-
        start_action = tbl->action;
        while (1) {
                lq_sta->action_counter++;
                switch (tbl->action) {
-               case IWL_MIMO2_SWITCH_ANTENNA1:
-               case IWL_MIMO2_SWITCH_ANTENNA2:
-                       IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle Antennas\n");
-
-                       if (tx_chains_num <= 2)
-                               break;
-
-                       if (window->success_ratio >= IWL_RS_GOOD_RATIO)
-                               break;
-
-                       memcpy(search_tbl, tbl, sz);
-                       if (rs_toggle_antenna(valid_tx_ant,
-                                             &search_tbl->current_rate,
-                                             search_tbl)) {
-                               update_search_tbl_counter = 1;
-                               goto out;
-                       }
-                       break;
                case IWL_MIMO2_SWITCH_SISO_A:
                case IWL_MIMO2_SWITCH_SISO_B:
                        IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n");
@@ -1521,11 +1527,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
                        break;
 
                case IWL_MIMO2_SWITCH_GI:
-                       if (!tbl->is_ht40 && !(ht_cap->cap &
-                                               IEEE80211_HT_CAP_SGI_20))
-                               break;
-                       if (tbl->is_ht40 && !(ht_cap->cap &
-                                               IEEE80211_HT_CAP_SGI_40))
+                       if (!rs_sgi_allowed(tbl, sta))
                                break;
 
                        IWL_DEBUG_RATE(mvm, "LQ: MIMO2 toggle SGI/NGI\n");
@@ -1546,16 +1548,13 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
                                        break;
                        }
                        search_tbl->current_rate =
-                               rate_n_flags_from_tbl(mvm, search_tbl,
-                                                     index, is_green);
+                               rate_n_flags_from_tbl(mvm, search_tbl, index);
                        update_search_tbl_counter = 1;
                        goto out;
                default:
                        WARN_ON_ONCE(1);
                }
-               tbl->action++;
-               if (tbl->action > IWL_MIMO2_SWITCH_GI)
-                       tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+               rs_move_next_action(tbl, IWL_MIMO2_LAST_ACTION);
 
                if (tbl->action == start_action)
                        break;
@@ -1564,9 +1563,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
        return 0;
  out:
        lq_sta->search_better_tbl = 1;
-       tbl->action++;
-       if (tbl->action > IWL_MIMO2_SWITCH_GI)
-               tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+       rs_move_next_action(tbl, IWL_MIMO2_LAST_ACTION);
        if (update_search_tbl_counter)
                search_tbl->action = tbl->action;
 
@@ -1660,15 +1657,16 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
  * setup rate table in uCode
  */
 static void rs_update_rate_tbl(struct iwl_mvm *mvm,
+                              struct ieee80211_sta *sta,
                               struct iwl_lq_sta *lq_sta,
                               struct iwl_scale_tbl_info *tbl,
-                              int index, u8 is_green)
+                              int index)
 {
        u32 rate;
 
        /* Update uCode's rate table. */
-       rate = rate_n_flags_from_tbl(mvm, tbl, index, is_green);
-       rs_fill_link_cmd(mvm, lq_sta, rate);
+       rate = rate_n_flags_from_tbl(mvm, tbl, index);
+       rs_fill_link_cmd(mvm, sta, lq_sta, rate);
        iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
 }
 
@@ -1712,7 +1710,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
        u8 update_lq = 0;
        struct iwl_scale_tbl_info *tbl, *tbl1;
        u16 rate_scale_index_msk = 0;
-       u8 is_green = 0;
        u8 active_tbl = 0;
        u8 done_search = 0;
        u16 high_low;
@@ -1754,11 +1751,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
                active_tbl = 1 - lq_sta->active_tbl;
 
        tbl = &(lq_sta->lq_info[active_tbl]);
-       if (is_legacy(tbl->lq_type))
-               lq_sta->is_green = 0;
-       else
-               lq_sta->is_green = rs_use_green(sta);
-       is_green = lq_sta->is_green;
 
        /* current tx rate */
        index = lq_sta->last_txrate_idx;
@@ -1797,7 +1789,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
                        tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
                        /* get "active" rate info */
                        index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
-                       rs_update_rate_tbl(mvm, lq_sta, tbl, index, is_green);
+                       rs_update_rate_tbl(mvm, sta, lq_sta, tbl, index);
                }
                return;
        }
@@ -1978,24 +1970,24 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
             (current_tpt > (100 * tbl->expected_tpt[low]))))
                scale_action = 0;
 
-       if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
+       if ((le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >=
             IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && (is_mimo(tbl->lq_type))) {
                if (lq_sta->last_bt_traffic >
-                   BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
+                   le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)) {
                        /*
                         * don't set scale_action, don't want to scale up if
                         * the rate scale doesn't otherwise think that is a
                         * good idea.
                         */
                } else if (lq_sta->last_bt_traffic <=
-                          BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
+                          le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)) {
                        scale_action = -1;
                }
        }
        lq_sta->last_bt_traffic =
-               BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD);
+               le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
 
-       if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
+       if ((le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) >=
             IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && is_mimo(tbl->lq_type)) {
                /* search for a new modulation */
                rs_stay_in_table(lq_sta, true);
@@ -2032,7 +2024,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
 lq_update:
        /* Replace uCode's rate table for the destination station. */
        if (update_lq)
-               rs_update_rate_tbl(mvm, lq_sta, tbl, index, is_green);
+               rs_update_rate_tbl(mvm, sta, lq_sta, tbl, index);
 
        rs_stay_in_table(lq_sta, false);
 
@@ -2071,7 +2063,7 @@ lq_update:
                        IWL_DEBUG_RATE(mvm,
                                       "Switch current  mcs: %X index: %d\n",
                                       tbl->current_rate, index);
-                       rs_fill_link_cmd(mvm, lq_sta, tbl->current_rate);
+                       rs_fill_link_cmd(mvm, sta, lq_sta, tbl->current_rate);
                        iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
                } else {
                        done_search = 1;
@@ -2113,7 +2105,7 @@ lq_update:
        }
 
 out:
-       tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, index, is_green);
+       tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, index);
        lq_sta->last_txrate_idx = index;
 }
 
@@ -2140,7 +2132,6 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
        int rate_idx;
        int i;
        u32 rate;
-       u8 use_green = rs_use_green(sta);
        u8 active_tbl = 0;
        u8 valid_tx_ant;
 
@@ -2172,10 +2163,10 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
        if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
                rs_toggle_antenna(valid_tx_ant, &rate, tbl);
 
-       rate = rate_n_flags_from_tbl(mvm, tbl, rate_idx, use_green);
+       rate = rate_n_flags_from_tbl(mvm, tbl, rate_idx);
        tbl->current_rate = rate;
        rs_set_expected_tpt_table(lq_sta, tbl);
-       rs_fill_link_cmd(NULL, lq_sta, rate);
+       rs_fill_link_cmd(NULL, NULL, lq_sta, rate);
        /* TODO restore station should remember the lq cmd */
        iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_SYNC, true);
 }
@@ -2190,7 +2181,6 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
        struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct iwl_lq_sta *lq_sta = mvm_sta;
-       int rate_idx;
 
        IWL_DEBUG_RATE_LIMIT(mvm, "rate scale calculate new rate for skb\n");
 
@@ -2215,36 +2205,9 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
        if (rate_control_send_low(sta, mvm_sta, txrc))
                return;
 
-       rate_idx  = lq_sta->last_txrate_idx;
-
-       if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
-               rate_idx -= IWL_FIRST_OFDM_RATE;
-               /* 6M and 9M shared same MCS index */
-               rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
-               WARN_ON_ONCE(rs_extract_rate(lq_sta->last_rate_n_flags) >=
-                            IWL_RATE_MIMO3_6M_PLCP);
-               if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
-                   IWL_RATE_MIMO2_6M_PLCP)
-                       rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
-               info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
-               if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
-                       info->control.rates[0].flags |= IEEE80211_TX_RC_SHORT_GI;
-               if (lq_sta->last_rate_n_flags & RATE_MCS_CHAN_WIDTH_40) /* TODO */
-                       info->control.rates[0].flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
-               if (lq_sta->last_rate_n_flags & RATE_HT_MCS_GF_MSK)
-                       info->control.rates[0].flags |= IEEE80211_TX_RC_GREEN_FIELD;
-       } else {
-               /* Check for invalid rates */
-               if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
-                   ((sband->band == IEEE80211_BAND_5GHZ) &&
-                    (rate_idx < IWL_FIRST_OFDM_RATE)))
-                       rate_idx = rate_lowest_index(sband, sta);
-               /* On valid 5 GHz rate, adjust index */
-               else if (sband->band == IEEE80211_BAND_5GHZ)
-                       rate_idx -= IWL_FIRST_OFDM_RATE;
-               info->control.rates[0].flags = 0;
-       }
-       info->control.rates[0].idx = rate_idx;
+       iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
+                                 info->band, &info->control.rates[0]);
+
        info->control.rates[0].count = 1;
 }
 
@@ -2261,6 +2224,24 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
        return &sta_priv->lq_sta;
 }
 
+static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
+                                      int nss)
+{
+       u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
+               (0x3 << (2 * (nss - 1)));
+       rx_mcs >>= (2 * (nss - 1));
+
+       if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_7)
+               return IWL_RATE_MCS_7_INDEX;
+       else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_8)
+               return IWL_RATE_MCS_8_INDEX;
+       else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_9)
+               return IWL_RATE_MCS_9_INDEX;
+
+       WARN_ON_ONCE(rx_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED);
+       return -1;
+}
+
 /*
  * Called after adding a new station to initialize rate scaling
  */
@@ -2270,6 +2251,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        int i, j;
        struct ieee80211_hw *hw = mvm->hw;
        struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+       struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
        struct iwl_mvm_sta *sta_priv;
        struct iwl_lq_sta *lq_sta;
        struct ieee80211_supported_band *sband;
@@ -2298,7 +2280,6 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 
        lq_sta->max_rate_idx = -1;
        lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
-       lq_sta->is_green = rs_use_green(sta);
        lq_sta->band = sband->band;
        /*
         * active legacy rates as per supported rates bitmap
@@ -2308,25 +2289,54 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        for_each_set_bit(i, &supp, BITS_PER_LONG)
                lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
 
-       /*
-        * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
-        * supp_rates[] does not; shift to convert format, force 9 MBits off.
-        */
-       lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
-       lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
-       lq_sta->active_siso_rate &= ~((u16)0x2);
-       lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
+       /* TODO: should probably account for rx_highest for both HT/VHT */
+       if (!vht_cap || !vht_cap->vht_supported) {
+               /* active_siso_rate mask includes 9 MBits (bit 5),
+                * and CCK (bits 0-3), supp_rates[] does not;
+                * shift to convert format, force 9 MBits off.
+                */
+               lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+               lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+               lq_sta->active_siso_rate &= ~((u16)0x2);
+               lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
+
+               /* Same here */
+               lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+               lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+               lq_sta->active_mimo2_rate &= ~((u16)0x2);
+               lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+
+               lq_sta->is_vht = false;
+       } else {
+               int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1);
+               if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+                       for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+                               if (i == IWL_RATE_9M_INDEX)
+                                       continue;
+
+                               lq_sta->active_siso_rate |= BIT(i);
+                       }
+               }
+
+               highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
+               if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+                       for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+                               if (i == IWL_RATE_9M_INDEX)
+                                       continue;
 
-       /* Same here */
-       lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
-       lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
-       lq_sta->active_mimo2_rate &= ~((u16)0x2);
-       lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+                               lq_sta->active_mimo2_rate |= BIT(i);
+                       }
+               }
+
+               /* TODO: avoid MCS9 in 20Mhz which isn't valid for 11ac */
+               lq_sta->is_vht = true;
+       }
 
        IWL_DEBUG_RATE(mvm,
-                      "SISO-RATE=%X MIMO2-RATE=%X\n",
+                      "SISO-RATE=%X MIMO2-RATE=%X VHT=%d\n",
                       lq_sta->active_siso_rate,
-                      lq_sta->active_mimo2_rate);
+                      lq_sta->active_mimo2_rate,
+                      lq_sta->is_vht);
 
        /* These values will be overridden later */
        lq_sta->lq.single_stream_ant_msk =
@@ -2358,6 +2368,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
 }
 
 static void rs_fill_link_cmd(struct iwl_mvm *mvm,
+                            struct ieee80211_sta *sta,
                             struct iwl_lq_sta *lq_sta, u32 new_rate)
 {
        struct iwl_scale_tbl_info tbl_type;
@@ -2429,7 +2440,6 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
                rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
                                         &rate_idx);
 
-
                /* Indicate to uCode which entries might be MIMO.
                 * If initial rate was MIMO, this will finally end up
                 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
@@ -2455,7 +2465,9 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
                }
 
                /* Don't allow HT rates after next pass.
-                * rs_get_lower_rate() will change type to LQ_A or LQ_G. */
+                * rs_get_lower_rate() will change type to LQ_LEGACY_A
+                * or LQ_LEGACY_G.
+                */
                use_ht_possible = 0;
 
                /* Override next rate if needed for debug purposes */
@@ -2474,12 +2486,9 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
        lq_cmd->agg_time_limit =
                cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
 
-       /*
-        * overwrite if needed, pass aggregation time limit
-        * to uCode in uSec - This is racy - but heh, at least it helps...
-        */
-       if (mvm && BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= 2)
-               lq_cmd->agg_time_limit = cpu_to_le16(1200);
+       if (sta)
+               lq_cmd->agg_time_limit =
+                       cpu_to_le16(iwl_mvm_bt_coex_agg_time_limit(mvm, sta));
 }
 
 static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -2586,16 +2595,18 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
            (iwl_fw_valid_tx_ant(mvm->fw) & ANT_B) ? "ANT_B," : "",
            (iwl_fw_valid_tx_ant(mvm->fw) & ANT_C) ? "ANT_C" : "");
        desc += sprintf(buff+desc, "lq type %s\n",
-          (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
-       if (is_Ht(tbl->lq_type)) {
+                       (is_legacy(tbl->lq_type)) ? "legacy" :
+                       is_vht(tbl->lq_type) ? "VHT" : "HT");
+       if (is_ht(tbl->lq_type)) {
                desc += sprintf(buff+desc, " %s",
                   (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
                   desc += sprintf(buff+desc, " %s",
-                  (tbl->is_ht40) ? "40MHz" : "20MHz");
-                  desc += sprintf(buff+desc, " %s %s %s\n",
+                                  (is_ht20(tbl)) ? "20MHz" :
+                                  (is_ht40(tbl)) ? "40MHz" :
+                                  (is_ht80(tbl)) ? "80Mhz" : "BAD BW");
+                  desc += sprintf(buff+desc, " %s %s\n",
                                   (tbl->is_SGI) ? "SGI" : "",
-                  (lq_sta->is_green) ? "GF enabled" : "",
-                  (lq_sta->is_agg) ? "AGG on" : "");
+                                  (lq_sta->is_agg) ? "AGG on" : "");
        }
        desc += sprintf(buff+desc, "last tx rate=0x%X\n",
                        lq_sta->last_rate_n_flags);
@@ -2653,7 +2664,7 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
        int desc = 0;
        int i, j;
        ssize_t ret;
-
+       struct iwl_scale_tbl_info *tbl;
        struct iwl_lq_sta *lq_sta = file->private_data;
 
        buff = kmalloc(1024, GFP_KERNEL);
@@ -2661,21 +2672,23 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
                return -ENOMEM;
 
        for (i = 0; i < LQ_SIZE; i++) {
+               tbl = &(lq_sta->lq_info[i]);
                desc += sprintf(buff+desc,
-                               "%s type=%d SGI=%d HT40=%d DUP=0 GF=%d\n"
+                               "%s type=%d SGI=%d BW=%s DUP=0\n"
                                "rate=0x%X\n",
                                lq_sta->active_tbl == i ? "*" : "x",
-                               lq_sta->lq_info[i].lq_type,
-                               lq_sta->lq_info[i].is_SGI,
-                               lq_sta->lq_info[i].is_ht40,
-                               lq_sta->is_green,
-                               lq_sta->lq_info[i].current_rate);
+                               tbl->lq_type,
+                               tbl->is_SGI,
+                               is_ht20(tbl) ? "20Mhz" :
+                               is_ht40(tbl) ? "40Mhz" :
+                               is_ht80(tbl) ? "80Mhz" : "ERR",
+                               tbl->current_rate);
                for (j = 0; j < IWL_RATE_COUNT; j++) {
                        desc += sprintf(buff+desc,
                                "counter=%d success=%d %%=%d\n",
-                               lq_sta->lq_info[i].win[j].counter,
-                               lq_sta->lq_info[i].win[j].success_counter,
-                               lq_sta->lq_info[i].win[j].success_ratio);
+                               tbl->win[j].counter,
+                               tbl->win[j].success_counter,
+                               tbl->win[j].success_ratio);
                }
        }
        ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
index 335cf16829023e6702ef1d5921c19cccb3196c84..5d5344f7070bf5de62a4cde464c573b43d01f8db 100644 (file)
 #include "iwl-trans.h"
 
 struct iwl_rs_rate_info {
-       u8 plcp;        /* uCode API:  IWL_RATE_6M_PLCP, etc. */
-       u8 plcp_siso;   /* uCode API:  IWL_RATE_SISO_6M_PLCP, etc. */
-       u8 plcp_mimo2;  /* uCode API:  IWL_RATE_MIMO2_6M_PLCP, etc. */
+       u8 plcp;          /* uCode API:  IWL_RATE_6M_PLCP, etc. */
+       u8 plcp_ht_siso;  /* uCode API:  IWL_RATE_SISO_6M_PLCP, etc. */
+       u8 plcp_ht_mimo2; /* uCode API:  IWL_RATE_MIMO2_6M_PLCP, etc. */
+       u8 plcp_vht_siso;
+       u8 plcp_vht_mimo2;
        u8 prev_rs;      /* previous rate used in rs algo */
        u8 next_rs;      /* next rate used in rs algo */
 };
@@ -83,35 +85,52 @@ enum {
 #define        IWL_RATE_11M_MASK  (1 << IWL_RATE_11M_INDEX)
 
 
-/* uCode API values for OFDM high-throughput (HT) bit rates */
+/* uCode API values for HT/VHT bit rates */
 enum {
-       IWL_RATE_SISO_6M_PLCP = 0,
-       IWL_RATE_SISO_12M_PLCP = 1,
-       IWL_RATE_SISO_18M_PLCP = 2,
-       IWL_RATE_SISO_24M_PLCP = 3,
-       IWL_RATE_SISO_36M_PLCP = 4,
-       IWL_RATE_SISO_48M_PLCP = 5,
-       IWL_RATE_SISO_54M_PLCP = 6,
-       IWL_RATE_SISO_60M_PLCP = 7,
-       IWL_RATE_MIMO2_6M_PLCP  = 0x8,
-       IWL_RATE_MIMO2_12M_PLCP = 0x9,
-       IWL_RATE_MIMO2_18M_PLCP = 0xa,
-       IWL_RATE_MIMO2_24M_PLCP = 0xb,
-       IWL_RATE_MIMO2_36M_PLCP = 0xc,
-       IWL_RATE_MIMO2_48M_PLCP = 0xd,
-       IWL_RATE_MIMO2_54M_PLCP = 0xe,
-       IWL_RATE_MIMO2_60M_PLCP = 0xf,
-       IWL_RATE_MIMO3_6M_PLCP  = 0x10,
-       IWL_RATE_MIMO3_12M_PLCP = 0x11,
-       IWL_RATE_MIMO3_18M_PLCP = 0x12,
-       IWL_RATE_MIMO3_24M_PLCP = 0x13,
-       IWL_RATE_MIMO3_36M_PLCP = 0x14,
-       IWL_RATE_MIMO3_48M_PLCP = 0x15,
-       IWL_RATE_MIMO3_54M_PLCP = 0x16,
-       IWL_RATE_MIMO3_60M_PLCP = 0x17,
-       IWL_RATE_SISO_INVM_PLCP,
-       IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
-       IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
+       IWL_RATE_HT_SISO_MCS_0_PLCP = 0,
+       IWL_RATE_HT_SISO_MCS_1_PLCP = 1,
+       IWL_RATE_HT_SISO_MCS_2_PLCP = 2,
+       IWL_RATE_HT_SISO_MCS_3_PLCP = 3,
+       IWL_RATE_HT_SISO_MCS_4_PLCP = 4,
+       IWL_RATE_HT_SISO_MCS_5_PLCP = 5,
+       IWL_RATE_HT_SISO_MCS_6_PLCP = 6,
+       IWL_RATE_HT_SISO_MCS_7_PLCP = 7,
+       IWL_RATE_HT_MIMO2_MCS_0_PLCP = 0x8,
+       IWL_RATE_HT_MIMO2_MCS_1_PLCP = 0x9,
+       IWL_RATE_HT_MIMO2_MCS_2_PLCP = 0xA,
+       IWL_RATE_HT_MIMO2_MCS_3_PLCP = 0xB,
+       IWL_RATE_HT_MIMO2_MCS_4_PLCP = 0xC,
+       IWL_RATE_HT_MIMO2_MCS_5_PLCP = 0xD,
+       IWL_RATE_HT_MIMO2_MCS_6_PLCP = 0xE,
+       IWL_RATE_HT_MIMO2_MCS_7_PLCP = 0xF,
+       IWL_RATE_VHT_SISO_MCS_0_PLCP = 0,
+       IWL_RATE_VHT_SISO_MCS_1_PLCP = 1,
+       IWL_RATE_VHT_SISO_MCS_2_PLCP = 2,
+       IWL_RATE_VHT_SISO_MCS_3_PLCP = 3,
+       IWL_RATE_VHT_SISO_MCS_4_PLCP = 4,
+       IWL_RATE_VHT_SISO_MCS_5_PLCP = 5,
+       IWL_RATE_VHT_SISO_MCS_6_PLCP = 6,
+       IWL_RATE_VHT_SISO_MCS_7_PLCP = 7,
+       IWL_RATE_VHT_SISO_MCS_8_PLCP = 8,
+       IWL_RATE_VHT_SISO_MCS_9_PLCP = 9,
+       IWL_RATE_VHT_MIMO2_MCS_0_PLCP = 0x10,
+       IWL_RATE_VHT_MIMO2_MCS_1_PLCP = 0x11,
+       IWL_RATE_VHT_MIMO2_MCS_2_PLCP = 0x12,
+       IWL_RATE_VHT_MIMO2_MCS_3_PLCP = 0x13,
+       IWL_RATE_VHT_MIMO2_MCS_4_PLCP = 0x14,
+       IWL_RATE_VHT_MIMO2_MCS_5_PLCP = 0x15,
+       IWL_RATE_VHT_MIMO2_MCS_6_PLCP = 0x16,
+       IWL_RATE_VHT_MIMO2_MCS_7_PLCP = 0x17,
+       IWL_RATE_VHT_MIMO2_MCS_8_PLCP = 0x18,
+       IWL_RATE_VHT_MIMO2_MCS_9_PLCP = 0x19,
+       IWL_RATE_HT_SISO_MCS_INV_PLCP,
+       IWL_RATE_HT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+       IWL_RATE_VHT_SISO_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+       IWL_RATE_VHT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+       IWL_RATE_HT_SISO_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+       IWL_RATE_HT_SISO_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+       IWL_RATE_HT_MIMO2_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+       IWL_RATE_HT_MIMO2_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
 };
 
 #define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
@@ -139,25 +158,33 @@ enum {
 #define IWL_RATE_DECREASE_TH           1920    /*  15% */
 
 /* possible actions when in legacy mode */
-#define IWL_LEGACY_SWITCH_ANTENNA1      0
-#define IWL_LEGACY_SWITCH_ANTENNA2      1
-#define IWL_LEGACY_SWITCH_SISO          2
-#define IWL_LEGACY_SWITCH_MIMO2         3
+enum {
+       IWL_LEGACY_SWITCH_ANTENNA,
+       IWL_LEGACY_SWITCH_SISO,
+       IWL_LEGACY_SWITCH_MIMO2,
+       IWL_LEGACY_FIRST_ACTION = IWL_LEGACY_SWITCH_ANTENNA,
+       IWL_LEGACY_LAST_ACTION = IWL_LEGACY_SWITCH_MIMO2,
+};
 
 /* possible actions when in siso mode */
-#define IWL_SISO_SWITCH_ANTENNA1        0
-#define IWL_SISO_SWITCH_ANTENNA2        1
-#define IWL_SISO_SWITCH_MIMO2           2
-#define IWL_SISO_SWITCH_GI              3
+enum {
+       IWL_SISO_SWITCH_ANTENNA,
+       IWL_SISO_SWITCH_MIMO2,
+       IWL_SISO_SWITCH_GI,
+       IWL_SISO_FIRST_ACTION = IWL_SISO_SWITCH_ANTENNA,
+       IWL_SISO_LAST_ACTION = IWL_SISO_SWITCH_GI,
+};
 
 /* possible actions when in mimo mode */
-#define IWL_MIMO2_SWITCH_ANTENNA1       0
-#define IWL_MIMO2_SWITCH_ANTENNA2       1
-#define IWL_MIMO2_SWITCH_SISO_A         2
-#define IWL_MIMO2_SWITCH_SISO_B         3
-#define IWL_MIMO2_SWITCH_GI             4
+enum {
+       IWL_MIMO2_SWITCH_SISO_A,
+       IWL_MIMO2_SWITCH_SISO_B,
+       IWL_MIMO2_SWITCH_GI,
+       IWL_MIMO2_FIRST_ACTION = IWL_MIMO2_SWITCH_SISO_A,
+       IWL_MIMO2_LAST_ACTION = IWL_MIMO2_SWITCH_GI,
+};
 
-#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
+#define IWL_MAX_SEARCH IWL_MIMO2_LAST_ACTION
 
 #define IWL_ACTION_LIMIT               3       /* # possible actions */
 
@@ -188,20 +215,31 @@ enum {
 
 enum iwl_table_type {
        LQ_NONE,
-       LQ_G,           /* legacy types */
-       LQ_A,
-       LQ_SISO,        /* high-throughput types */
-       LQ_MIMO2,
+       LQ_LEGACY_G,    /* legacy types */
+       LQ_LEGACY_A,
+       LQ_HT_SISO,     /* HT types */
+       LQ_HT_MIMO2,
+       LQ_VHT_SISO,    /* VHT types */
+       LQ_VHT_MIMO2,
        LQ_MAX,
 };
 
-#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
-#define is_siso(tbl) ((tbl) == LQ_SISO)
-#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
-#define is_mimo(tbl) is_mimo2(tbl)
-#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
-#define is_a_band(tbl) ((tbl) == LQ_A)
-#define is_g_and(tbl) ((tbl) == LQ_G)
+#define is_legacy(tbl) (((tbl) == LQ_LEGACY_G) || ((tbl) == LQ_LEGACY_A))
+#define is_ht_siso(tbl) ((tbl) == LQ_HT_SISO)
+#define is_ht_mimo2(tbl) ((tbl) == LQ_HT_MIMO2)
+#define is_vht_siso(tbl) ((tbl) == LQ_VHT_SISO)
+#define is_vht_mimo2(tbl) ((tbl) == LQ_VHT_MIMO2)
+#define is_siso(tbl) (is_ht_siso(tbl) || is_vht_siso(tbl))
+#define is_mimo2(tbl) (is_ht_mimo2(tbl) || is_vht_mimo2(tbl))
+#define is_mimo(tbl) (is_mimo2(tbl))
+#define is_ht(tbl) (is_ht_siso(tbl) || is_ht_mimo2(tbl))
+#define is_vht(tbl) (is_vht_siso(tbl) || is_vht_mimo2(tbl))
+#define is_a_band(tbl) ((tbl) == LQ_LEGACY_A)
+#define is_g_band(tbl) ((tbl) == LQ_LEGACY_G)
+
+#define is_ht20(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_20)
+#define is_ht40(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_40)
+#define is_ht80(tbl) (tbl->bw == RATE_MCS_CHAN_WIDTH_80)
 
 #define IWL_MAX_MCS_DISPLAY_SIZE       12
 
@@ -232,7 +270,7 @@ struct iwl_scale_tbl_info {
        enum iwl_table_type lq_type;
        u8 ant_type;
        u8 is_SGI;      /* 1 = short guard interval */
-       u8 is_ht40;     /* 1 = 40 MHz channel width */
+       u32 bw;         /* channel bandwidth; RATE_MCS_CHAN_WIDTH_XX */
        u8 action;      /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
        u8 max_search;  /* maximun number of tables we can search */
        s32 *expected_tpt;      /* throughput metrics; expected_tpt_G, etc. */
@@ -262,7 +300,7 @@ struct iwl_lq_sta {
        u64 flush_timer;        /* time staying in mode before new search */
 
        u8 action_counter;      /* # mode-switch actions tried */
-       u8 is_green;
+       bool is_vht;
        enum ieee80211_band band;
 
        /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
@@ -314,9 +352,8 @@ static inline u8 num_of_ant(u8 mask)
 }
 
 /* Initialize station's rate scaling information after adding station */
-extern void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm,
-                                struct ieee80211_sta *sta,
-                                enum ieee80211_band band);
+void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+                         enum ieee80211_band band);
 
 /**
  * iwl_rate_control_register - Register the rate control algorithm callbacks
@@ -328,7 +365,7 @@ extern void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm,
  * ieee80211_register_hw
  *
  */
-extern int iwl_mvm_rate_control_register(void);
+int iwl_mvm_rate_control_register(void);
 
 /**
  * iwl_rate_control_unregister - Unregister the rate control callbacks
@@ -336,7 +373,7 @@ extern int iwl_mvm_rate_control_register(void);
  * This should be called after calling ieee80211_unregister_hw, but before
  * the driver is unloaded.
  */
-extern void iwl_mvm_rate_control_unregister(void);
+void iwl_mvm_rate_control_unregister(void);
 
 struct iwl_mvm_sta;
 
index 2a8cb5a60535d161f410116c32967cbd9a983987..a4af5019a4960bc1afe1e0b9a5c5296381d4f590 100644 (file)
@@ -422,6 +422,27 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
 
        mvmvif->bf_data.ave_beacon_signal = sig;
 
+       /* BT Coex */
+       if (mvmvif->bf_data.bt_coex_min_thold !=
+           mvmvif->bf_data.bt_coex_max_thold) {
+               last_event = mvmvif->bf_data.last_bt_coex_event;
+               if (sig > mvmvif->bf_data.bt_coex_max_thold &&
+                   (last_event <= mvmvif->bf_data.bt_coex_min_thold ||
+                    last_event == 0)) {
+                       mvmvif->bf_data.last_bt_coex_event = sig;
+                       IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n",
+                                    sig);
+                       iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH);
+               } else if (sig < mvmvif->bf_data.bt_coex_min_thold &&
+                          (last_event >= mvmvif->bf_data.bt_coex_max_thold ||
+                           last_event == 0)) {
+                       mvmvif->bf_data.last_bt_coex_event = sig;
+                       IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n",
+                                    sig);
+                       iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW);
+               }
+       }
+
        if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
                return;
 
index 9a7ab84953000234b463ac1636cabcf506b95433..dff7592e1ff84e5c3f6a05d673e2a39514cbb870 100644 (file)
 static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
 {
        u16 rx_chain;
-       u8 rx_ant = iwl_fw_valid_rx_ant(mvm->fw);
+       u8 rx_ant;
 
+       if (mvm->scan_rx_ant != ANT_NONE)
+               rx_ant = mvm->scan_rx_ant;
+       else
+               rx_ant = iwl_fw_valid_rx_ant(mvm->fw);
        rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
        rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
        rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
@@ -93,10 +97,10 @@ static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif)
 
 static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif)
 {
-       if (vif->bss_conf.assoc)
-               return cpu_to_le32(vif->bss_conf.beacon_int);
-       else
+       if (!vif->bss_conf.assoc)
                return 0;
+
+       return cpu_to_le32(ieee80211_tu_to_usec(vif->bss_conf.beacon_int));
 }
 
 static inline __le32
@@ -133,11 +137,12 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
  * request.
  */
 static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
-                                   struct cfg80211_scan_request *req)
+                                   struct cfg80211_scan_request *req,
+                                   int first)
 {
        int fw_idx, req_idx;
 
-       for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx > 0;
+       for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx >= first;
             req_idx--, fw_idx++) {
                cmd->direct_scan[fw_idx].id = WLAN_EID_SSID;
                cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len;
@@ -153,9 +158,9 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
  * just to notify that this scan is active and not passive.
  * In order to notify the FW of the number of SSIDs we wish to scan (including
  * the zero-length one), we need to set the corresponding bits in chan->type,
- * one for each SSID, and set the active bit (first). The first SSID is already
- * included in the probe template, so we need to set only req->n_ssids - 1 bits
- * in addition to the first bit.
+ * one for each SSID, and set the active bit (first). If the first SSID is
+ * already included in the probe template, so we need to set only
+ * req->n_ssids - 1 bits in addition to the first bit.
  */
 static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
 {
@@ -170,7 +175,8 @@ static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
 }
 
 static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
-                                      struct cfg80211_scan_request *req)
+                                      struct cfg80211_scan_request *req,
+                                      bool basic_ssid)
 {
        u16 passive_dwell = iwl_mvm_get_passive_dwell(req->channels[0]->band);
        u16 active_dwell = iwl_mvm_get_active_dwell(req->channels[0]->band,
@@ -178,10 +184,14 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
        struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
                (cmd->data + le16_to_cpu(cmd->tx_cmd.len));
        int i;
+       int type = BIT(req->n_ssids) - 1;
+
+       if (!basic_ssid)
+               type |= BIT(req->n_ssids);
 
        for (i = 0; i < cmd->channel_count; i++) {
                chan->channel = cpu_to_le16(req->channels[i]->hw_value);
-               chan->type = cpu_to_le32(BIT(req->n_ssids) - 1);
+               chan->type = cpu_to_le32(type);
                if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN)
                        chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
                chan->active_dwell = cpu_to_le16(active_dwell);
@@ -268,6 +278,8 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
        u32 status;
        int ssid_len = 0;
        u8 *ssid = NULL;
+       bool basic_ssid = !(mvm->fw->ucode_capa.flags &
+                          IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
 
        lockdep_assert_held(&mvm->mutex);
        BUG_ON(mvm->scan_cmd == NULL);
@@ -302,14 +314,16 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
        if (req->n_ssids > 0) {
                cmd->passive2active = cpu_to_le16(1);
                cmd->scan_flags |= SCAN_FLAGS_PASSIVE2ACTIVE;
-               ssid = req->ssids[0].ssid;
-               ssid_len = req->ssids[0].ssid_len;
+               if (basic_ssid) {
+                       ssid = req->ssids[0].ssid;
+                       ssid_len = req->ssids[0].ssid_len;
+               }
        } else {
                cmd->passive2active = 0;
                cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE;
        }
 
-       iwl_mvm_scan_fill_ssids(cmd, req);
+       iwl_mvm_scan_fill_ssids(cmd, req, basic_ssid ? 1 : 0);
 
        cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
        cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
@@ -326,7 +340,7 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
                            req->ie, req->ie_len,
                            mvm->fw->ucode_capa.max_probe_length));
 
-       iwl_mvm_scan_fill_channels(cmd, req);
+       iwl_mvm_scan_fill_channels(cmd, req, basic_ssid);
 
        cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
                le16_to_cpu(cmd->tx_cmd.len) +
@@ -377,6 +391,21 @@ int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
        return 0;
 }
 
+int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
+                                 struct iwl_rx_cmd_buffer *rxb,
+                                 struct iwl_device_cmd *cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_sched_scan_results *notif = (void *)pkt->data;
+
+       if (notif->client_bitmap & SCAN_CLIENT_SCHED_SCAN) {
+               IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
+               ieee80211_sched_scan_results(mvm->hw);
+       }
+
+       return 0;
+}
+
 static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
                                     struct iwl_rx_packet *pkt, void *data)
 {
@@ -394,6 +423,11 @@ static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
                        return false;
                }
 
+               /*
+                * If scan cannot be aborted, it means that we had a
+                * SCAN_COMPLETE_NOTIFICATION in the pipe and it called
+                * ieee80211_scan_completed already.
+                */
                IWL_DEBUG_SCAN(mvm, "Scan cannot be aborted, exit now: %d\n",
                               *resp);
                return true;
@@ -417,14 +451,19 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
                                               SCAN_COMPLETE_NOTIFICATION };
        int ret;
 
+       if (mvm->scan_status == IWL_MVM_SCAN_NONE)
+               return;
+
        iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
                                   scan_abort_notif,
                                   ARRAY_SIZE(scan_abort_notif),
                                   iwl_mvm_scan_abort_notif, NULL);
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
+       ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD,
+                                  CMD_SYNC | CMD_SEND_IN_RFKILL, 0, NULL);
        if (ret) {
                IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
+               /* mac80211's state will be cleaned in the fw_restart flow */
                goto out_remove_notif;
        }
 
@@ -437,3 +476,406 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
 out_remove_notif:
        iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
 }
+
+int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
+                                          struct iwl_rx_cmd_buffer *rxb,
+                                          struct iwl_device_cmd *cmd)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_scan_offload_complete *scan_notif = (void *)pkt->data;
+
+       IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
+                      scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
+                      "completed" : "aborted");
+
+       mvm->scan_status = IWL_MVM_SCAN_NONE;
+       ieee80211_sched_scan_stopped(mvm->hw);
+
+       return 0;
+}
+
+static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm,
+                                         struct ieee80211_vif *vif,
+                                         struct ieee80211_sched_scan_ies *ies,
+                                         enum ieee80211_band band,
+                                         struct iwl_tx_cmd *cmd,
+                                         u8 *data)
+{
+       u16 cmd_len;
+
+       cmd->tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
+       cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+       cmd->sta_id = mvm->aux_sta.sta_id;
+
+       cmd->rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm, band, false);
+
+       cmd_len = iwl_mvm_fill_probe_req((struct ieee80211_mgmt *)data,
+                                        vif->addr,
+                                        1, NULL, 0,
+                                        ies->ie[band], ies->len[band],
+                                        SCAN_OFFLOAD_PROBE_REQ_SIZE);
+       cmd->len = cpu_to_le16(cmd_len);
+}
+
+static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
+                              struct ieee80211_vif *vif,
+                              struct cfg80211_sched_scan_request *req,
+                              struct iwl_scan_offload_cmd *scan)
+{
+       scan->channel_count =
+               mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
+               mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
+       scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
+       scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
+       scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
+       scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
+       scan->max_out_time = cpu_to_le32(200 * 1024);
+       scan->suspend_time = iwl_mvm_scan_suspend_time(vif);
+       scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
+                                         MAC_FILTER_IN_BEACON);
+       scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
+       scan->rep_count = cpu_to_le32(1);
+}
+
+static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
+{
+       int i;
+
+       for (i = 0; i < PROBE_OPTION_MAX; i++) {
+               if (!ssid_list[i].len)
+                       break;
+               if (ssid_list[i].len == ssid_len &&
+                   !memcmp(ssid_list->ssid, ssid, ssid_len))
+                       return i;
+       }
+       return -1;
+}
+
+static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
+                                       struct iwl_scan_offload_cmd *scan,
+                                       u32 *ssid_bitmap)
+{
+       int i, j;
+       int index;
+
+       /*
+        * copy SSIDs from match list.
+        * iwl_config_sched_scan_profiles() uses the order of these ssids to
+        * config match list.
+        */
+       for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
+               scan->direct_scan[i].id = WLAN_EID_SSID;
+               scan->direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
+               memcpy(scan->direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
+                      scan->direct_scan[i].len);
+       }
+
+       /* add SSIDs from scan SSID list */
+       *ssid_bitmap = 0;
+       for (j = 0; j < req->n_ssids && i < PROBE_OPTION_MAX; j++) {
+               index = iwl_ssid_exist(req->ssids[j].ssid,
+                                      req->ssids[j].ssid_len,
+                                      scan->direct_scan);
+               if (index < 0) {
+                       if (!req->ssids[j].ssid_len)
+                               continue;
+                       scan->direct_scan[i].id = WLAN_EID_SSID;
+                       scan->direct_scan[i].len = req->ssids[j].ssid_len;
+                       memcpy(scan->direct_scan[i].ssid, req->ssids[j].ssid,
+                              scan->direct_scan[i].len);
+                       *ssid_bitmap |= BIT(i + 1);
+                       i++;
+               } else {
+                       *ssid_bitmap |= BIT(index + 1);
+               }
+       }
+}
+
+static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
+                                 struct cfg80211_sched_scan_request *req,
+                                 struct iwl_scan_channel_cfg *channels,
+                                 enum ieee80211_band band,
+                                 int *head, int *tail,
+                                 u32 ssid_bitmap)
+{
+       struct ieee80211_supported_band *s_band;
+       int n_probes = req->n_ssids;
+       int n_channels = req->n_channels;
+       u8 active_dwell, passive_dwell;
+       int i, j, index = 0;
+       bool partial;
+
+       /*
+        * We have to configure all supported channels, even if we don't want to
+        * scan on them, but we have to send channels in the order that we want
+        * to scan. So add requested channels to head of the list and others to
+        * the end.
+       */
+       active_dwell = iwl_mvm_get_active_dwell(band, n_probes);
+       passive_dwell = iwl_mvm_get_passive_dwell(band);
+       s_band = &mvm->nvm_data->bands[band];
+
+       for (i = 0; i < s_band->n_channels && *head <= *tail; i++) {
+               partial = false;
+               for (j = 0; j < n_channels; j++)
+                       if (s_band->channels[i].center_freq ==
+                                               req->channels[j]->center_freq) {
+                               index = *head;
+                               (*head)++;
+                               /*
+                                * Channels that came with the request will be
+                                * in partial scan .
+                                */
+                               partial = true;
+                               break;
+                       }
+               if (!partial) {
+                       index = *tail;
+                       (*tail)--;
+               }
+               channels->channel_number[index] =
+                       cpu_to_le16(ieee80211_frequency_to_channel(
+                                       s_band->channels[i].center_freq));
+               channels->dwell_time[index][0] = active_dwell;
+               channels->dwell_time[index][1] = passive_dwell;
+
+               channels->iter_count[index] = cpu_to_le16(1);
+               channels->iter_interval[index] = 0;
+
+               if (!(s_band->channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN))
+                       channels->type[index] |=
+                               cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
+
+               channels->type[index] |=
+                               cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL);
+               if (partial)
+                       channels->type[index] |=
+                               cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
+
+               if (s_band->channels[i].flags & IEEE80211_CHAN_NO_HT40)
+                       channels->type[index] |=
+                               cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
+
+               /* scan for all SSIDs from req->ssids */
+               channels->type[index] |= cpu_to_le32(ssid_bitmap);
+       }
+}
+
+int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
+                             struct ieee80211_vif *vif,
+                             struct cfg80211_sched_scan_request *req,
+                             struct ieee80211_sched_scan_ies *ies)
+{
+       int supported_bands = 0;
+       int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
+       int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
+       int head = 0;
+       int tail = band_2ghz + band_5ghz;
+       u32 ssid_bitmap;
+       int cmd_len;
+       int ret;
+
+       struct iwl_scan_offload_cfg *scan_cfg;
+       struct iwl_host_cmd cmd = {
+               .id = SCAN_OFFLOAD_CONFIG_CMD,
+               .flags = CMD_SYNC,
+       };
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (band_2ghz)
+               supported_bands++;
+       if (band_5ghz)
+               supported_bands++;
+
+       cmd_len = sizeof(struct iwl_scan_offload_cfg) +
+                               supported_bands * SCAN_OFFLOAD_PROBE_REQ_SIZE;
+
+       scan_cfg = kzalloc(cmd_len, GFP_KERNEL);
+       if (!scan_cfg)
+               return -ENOMEM;
+
+       iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd);
+       scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len);
+
+       iwl_scan_offload_build_ssid(req, &scan_cfg->scan_cmd, &ssid_bitmap);
+       /* build tx frames for supported bands */
+       if (band_2ghz) {
+               iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
+                                             IEEE80211_BAND_2GHZ,
+                                             &scan_cfg->scan_cmd.tx_cmd[0],
+                                             scan_cfg->data);
+               iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
+                                     IEEE80211_BAND_2GHZ, &head, &tail,
+                                     ssid_bitmap);
+       }
+       if (band_5ghz) {
+               iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
+                                             IEEE80211_BAND_5GHZ,
+                                             &scan_cfg->scan_cmd.tx_cmd[1],
+                                             scan_cfg->data +
+                                               SCAN_OFFLOAD_PROBE_REQ_SIZE);
+               iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
+                                     IEEE80211_BAND_5GHZ, &head, &tail,
+                                     ssid_bitmap);
+       }
+
+       cmd.data[0] = scan_cfg;
+       cmd.len[0] = cmd_len;
+       cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+
+       IWL_DEBUG_SCAN(mvm, "Sending scheduled scan config\n");
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+       kfree(scan_cfg);
+       return ret;
+}
+
+int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
+                                      struct cfg80211_sched_scan_request *req)
+{
+       struct iwl_scan_offload_profile *profile;
+       struct iwl_scan_offload_profile_cfg *profile_cfg;
+       struct iwl_scan_offload_blacklist *blacklist;
+       struct iwl_host_cmd cmd = {
+               .id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+               .flags = CMD_SYNC,
+               .len[1] = sizeof(*profile_cfg),
+               .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+               .dataflags[1] = IWL_HCMD_DFL_NOCOPY,
+       };
+       int blacklist_len;
+       int i;
+       int ret;
+
+       if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
+                       return -EIO;
+
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
+               blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
+       else
+               blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
+
+       blacklist = kzalloc(sizeof(*blacklist) * blacklist_len, GFP_KERNEL);
+       if (!blacklist)
+               return -ENOMEM;
+
+       profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL);
+       if (!profile_cfg) {
+               ret = -ENOMEM;
+               goto free_blacklist;
+       }
+
+       cmd.data[0] = blacklist;
+       cmd.len[0] = sizeof(*blacklist) * blacklist_len;
+       cmd.data[1] = profile_cfg;
+
+       /* No blacklist configuration */
+
+       profile_cfg->num_profiles = req->n_match_sets;
+       profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
+       profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
+       profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
+
+       for (i = 0; i < req->n_match_sets; i++) {
+               profile = &profile_cfg->profiles[i];
+               profile->ssid_index = i;
+               /* Support any cipher and auth algorithm */
+               profile->unicast_cipher = 0xff;
+               profile->auth_alg = 0xff;
+               profile->network_type = IWL_NETWORK_TYPE_ANY;
+               profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
+               profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
+       }
+
+       IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
+
+       ret = iwl_mvm_send_cmd(mvm, &cmd);
+       kfree(profile_cfg);
+free_blacklist:
+       kfree(blacklist);
+
+       return ret;
+}
+
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+                            struct cfg80211_sched_scan_request *req)
+{
+       struct iwl_scan_offload_req scan_req = {
+               .watchdog = IWL_SCHED_SCAN_WATCHDOG,
+
+               .schedule_line[0].iterations = IWL_FAST_SCHED_SCAN_ITERATIONS,
+               .schedule_line[0].delay = req->interval / 1000,
+               .schedule_line[0].full_scan_mul = 1,
+
+               .schedule_line[1].iterations = 0xff,
+               .schedule_line[1].delay = req->interval / 1000,
+               .schedule_line[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER,
+       };
+
+       if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
+               IWL_DEBUG_SCAN(mvm,
+                              "Sending scheduled scan with filtering, filter len %d\n",
+                              req->n_match_sets);
+               scan_req.flags |=
+                               cpu_to_le16(IWL_SCAN_OFFLOAD_FLAG_FILTER_SSID);
+       } else {
+               IWL_DEBUG_SCAN(mvm,
+                              "Sending Scheduled scan without filtering\n");
+       }
+
+       return iwl_mvm_send_cmd_pdu(mvm, SCAN_OFFLOAD_REQUEST_CMD, CMD_SYNC,
+                                   sizeof(scan_req), &scan_req);
+}
+
+static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
+{
+       int ret;
+       struct iwl_host_cmd cmd = {
+               .id = SCAN_OFFLOAD_ABORT_CMD,
+               .flags = CMD_SYNC,
+       };
+       u32 status;
+
+       /* Exit instantly with error when device is not ready
+        * to receive scan abort command or it does not perform
+        * scheduled scan currently */
+       if (mvm->scan_status != IWL_MVM_SCAN_SCHED)
+               return -EIO;
+
+       ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
+       if (ret)
+               return ret;
+
+       if (status != CAN_ABORT_STATUS) {
+               /*
+                * The scan abort will return 1 for success or
+                * 2 for "failure".  A failure condition can be
+                * due to simply not being in an active scan which
+                * can occur if we send the scan abort before the
+                * microcode has notified us that a scan is completed.
+                */
+               IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
+               ret = -EIO;
+       }
+
+       return ret;
+}
+
+void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
+{
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (mvm->scan_status != IWL_MVM_SCAN_SCHED) {
+               IWL_DEBUG_SCAN(mvm, "No offloaded scan to stop\n");
+               return;
+       }
+
+       ret = iwl_mvm_send_sched_scan_abort(mvm);
+       if (ret)
+               IWL_DEBUG_SCAN(mvm, "Send stop offload scan failed %d\n", ret);
+       else
+               IWL_DEBUG_SCAN(mvm, "Successfully sent stop offload scan\n");
+}
index 44add291531bb505a6d388fde3fdf961b7cc14e2..329952363a54f3b2c0913d018c401e9c7004fcfc 100644 (file)
 #include "sta.h"
 #include "rs.h"
 
+static void iwl_mvm_add_sta_cmd_v6_to_v5(struct iwl_mvm_add_sta_cmd_v6 *cmd_v6,
+                                        struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
+{
+       memset(cmd_v5, 0, sizeof(*cmd_v5));
+
+       cmd_v5->add_modify = cmd_v6->add_modify;
+       cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
+       cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
+       memcpy(cmd_v5->addr, cmd_v6->addr, ETH_ALEN);
+       cmd_v5->sta_id = cmd_v6->sta_id;
+       cmd_v5->modify_mask = cmd_v6->modify_mask;
+       cmd_v5->station_flags = cmd_v6->station_flags;
+       cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
+       cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
+       cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
+       cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
+       cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
+       cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
+       cmd_v5->assoc_id = cmd_v6->assoc_id;
+       cmd_v5->beamform_flags = cmd_v6->beamform_flags;
+       cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
+}
+
+static void
+iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
+                                     struct iwl_mvm_add_sta_cmd_v5 *sta_cmd,
+                                     u32 mac_id_n_color)
+{
+       memset(sta_cmd, 0, sizeof(*sta_cmd));
+
+       sta_cmd->sta_id = key_cmd->sta_id;
+       sta_cmd->add_modify = STA_MODE_MODIFY;
+       sta_cmd->modify_mask = STA_MODIFY_KEY;
+       sta_cmd->mac_id_n_color = cpu_to_le32(mac_id_n_color);
+
+       sta_cmd->key.key_offset = key_cmd->key_offset;
+       sta_cmd->key.key_flags = key_cmd->key_flags;
+       memcpy(sta_cmd->key.key, key_cmd->key, sizeof(sta_cmd->key.key));
+       sta_cmd->key.tkip_rx_tsc_byte2 = key_cmd->tkip_rx_tsc_byte2;
+       memcpy(sta_cmd->key.tkip_rx_ttak, key_cmd->tkip_rx_ttak,
+              sizeof(sta_cmd->key.tkip_rx_ttak));
+}
+
+static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
+                                          struct iwl_mvm_add_sta_cmd_v6 *cmd,
+                                          int *status)
+{
+       struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
+
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
+               return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
+                                                  cmd, status);
+
+       iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+
+       return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
+                                          &cmd_v5, status);
+}
+
+static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
+                                   struct iwl_mvm_add_sta_cmd_v6 *cmd)
+{
+       struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
+
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
+               return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
+                                           sizeof(*cmd), cmd);
+
+       iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+
+       return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
+                                   &cmd_v5);
+}
+
+static int
+iwl_mvm_send_add_sta_key_cmd_status(struct iwl_mvm *mvm,
+                                   struct iwl_mvm_add_sta_key_cmd *cmd,
+                                   u32 mac_id_n_color,
+                                   int *status)
+{
+       struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
+
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
+               return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY,
+                                                  sizeof(*cmd), cmd, status);
+
+       iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
+
+       return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(sta_cmd),
+                                          &sta_cmd, status);
+}
+
+static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
+                                       u32 flags,
+                                       struct iwl_mvm_add_sta_key_cmd *cmd,
+                                       u32 mac_id_n_color)
+{
+       struct iwl_mvm_add_sta_cmd_v5 sta_cmd;
+
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_STA_KEY_CMD)
+               return iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, flags,
+                                           sizeof(*cmd), cmd);
+
+       iwl_mvm_add_sta_key_to_add_sta_cmd_v5(cmd, &sta_cmd, mac_id_n_color);
+
+       return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(sta_cmd),
+                                   &sta_cmd);
+}
+
 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm)
 {
        int sta_id;
@@ -87,7 +196,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                           bool update)
 {
        struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
-       struct iwl_mvm_add_sta_cmd add_sta_cmd;
+       struct iwl_mvm_add_sta_cmd_v6 add_sta_cmd;
        int ret;
        u32 status;
        u32 agg_size = 0, mpdu_dens = 0;
@@ -175,8 +284,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
-                                         &add_sta_cmd, &status);
+       ret = iwl_mvm_send_add_sta_cmd_status(mvm, &add_sta_cmd, &status);
        if (ret)
                return ret;
 
@@ -229,8 +337,12 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
                if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
                        mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
 
-       /* for HW restart - need to reset the seq_number etc... */
-       memset(mvm_sta->tid_data, 0, sizeof(mvm_sta->tid_data));
+       /* for HW restart - reset everything but the sequence number */
+       for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+               u16 seq = mvm_sta->tid_data[i].seq_number;
+               memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
+               mvm_sta->tid_data[i].seq_number = seq;
+       }
 
        ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
        if (ret)
@@ -256,7 +368,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
                      bool drain)
 {
-       struct iwl_mvm_add_sta_cmd cmd = {};
+       struct iwl_mvm_add_sta_cmd_v6 cmd = {};
        int ret;
        u32 status;
 
@@ -269,8 +381,7 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
        cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
-                                         &cmd, &status);
+       ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
        if (ret)
                return ret;
 
@@ -469,13 +580,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
                                      const u8 *addr,
                                      u16 mac_id, u16 color)
 {
-       struct iwl_mvm_add_sta_cmd cmd;
+       struct iwl_mvm_add_sta_cmd_v6 cmd;
        int ret;
        u32 status;
 
        lockdep_assert_held(&mvm->mutex);
 
-       memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd));
+       memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v6));
        cmd.sta_id = sta->sta_id;
        cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
                                                             color));
@@ -485,8 +596,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
        if (addr)
                memcpy(cmd.addr, addr, ETH_ALEN);
 
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
-                                         &cmd, &status);
+       ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
        if (ret)
                return ret;
 
@@ -534,10 +644,14 @@ int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                           struct iwl_mvm_int_sta *bsta)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       static const u8 baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+       static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+       static const u8 *baddr = _baddr;
 
        lockdep_assert_held(&mvm->mutex);
 
+       if (vif->type == NL80211_IFTYPE_ADHOC)
+               baddr = vif->bss_conf.bssid;
+
        if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
                return -ENOSPC;
 
@@ -614,7 +728,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                       int tid, u16 ssn, bool start)
 {
        struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
-       struct iwl_mvm_add_sta_cmd cmd = {};
+       struct iwl_mvm_add_sta_cmd_v6 cmd = {};
        int ret;
        u32 status;
 
@@ -638,8 +752,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                                  STA_MODIFY_REMOVE_BA_TID;
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
-                                         &cmd, &status);
+       ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
        if (ret)
                return ret;
 
@@ -674,7 +787,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                              int tid, u8 queue, bool start)
 {
        struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
-       struct iwl_mvm_add_sta_cmd cmd = {};
+       struct iwl_mvm_add_sta_cmd_v6 cmd = {};
        int ret;
        u32 status;
 
@@ -696,8 +809,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
-                                         &cmd, &status);
+       ret = iwl_mvm_send_add_sta_cmd_status(mvm, &cmd, &status);
        if (ret)
                return ret;
 
@@ -743,13 +855,13 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        lockdep_assert_held(&mvm->mutex);
 
-       for (txq_id = IWL_MVM_FIRST_AGG_QUEUE;
-            txq_id <= IWL_MVM_LAST_AGG_QUEUE; txq_id++)
+       for (txq_id = mvm->first_agg_queue;
+            txq_id <= mvm->last_agg_queue; txq_id++)
                if (mvm->queue_to_mac80211[txq_id] ==
                    IWL_INVALID_MAC80211_QUEUE)
                        break;
 
-       if (txq_id > IWL_MVM_LAST_AGG_QUEUE) {
+       if (txq_id > mvm->last_agg_queue) {
                IWL_ERR(mvm, "Failed to allocate agg queue\n");
                return -EIO;
        }
@@ -987,10 +1099,11 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
                                u32 cmd_flags)
 {
        __le16 key_flags;
-       struct iwl_mvm_add_sta_cmd cmd = {};
+       struct iwl_mvm_add_sta_key_cmd cmd = {};
        int ret, status;
        u16 keyidx;
        int i;
+       u32 mac_id_n_color = mvm_sta->mac_id_n_color;
 
        keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
                 STA_KEY_FLG_KEYID_MSK;
@@ -1000,14 +1113,14 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
        switch (keyconf->cipher) {
        case WLAN_CIPHER_SUITE_TKIP:
                key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
-               cmd.key.tkip_rx_tsc_byte2 = tkip_iv32;
+               cmd.tkip_rx_tsc_byte2 = tkip_iv32;
                for (i = 0; i < 5; i++)
-                       cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
-               memcpy(cmd.key.key, keyconf->key, keyconf->keylen);
+                       cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
+               memcpy(cmd.key, keyconf->key, keyconf->keylen);
                break;
        case WLAN_CIPHER_SUITE_CCMP:
                key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
-               memcpy(cmd.key.key, keyconf->key, keyconf->keylen);
+               memcpy(cmd.key, keyconf->key, keyconf->keylen);
                break;
        default:
                WARN_ON(1);
@@ -1017,20 +1130,18 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
        if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
                key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
 
-       cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
-       cmd.key.key_offset = keyconf->hw_key_idx;
-       cmd.key.key_flags = key_flags;
-       cmd.add_modify = STA_MODE_MODIFY;
-       cmd.modify_mask = STA_MODIFY_KEY;
+       cmd.key_offset = keyconf->hw_key_idx;
+       cmd.key_flags = key_flags;
        cmd.sta_id = sta_id;
 
        status = ADD_STA_SUCCESS;
        if (cmd_flags == CMD_SYNC)
-               ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
-                                                 &cmd, &status);
+               ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
+                                                         mac_id_n_color,
+                                                         &status);
        else
-               ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
-                                          sizeof(cmd), &cmd);
+               ret = iwl_mvm_send_add_sta_key_cmd(mvm, CMD_ASYNC, &cmd,
+                                                  mac_id_n_color);
 
        switch (status) {
        case ADD_STA_SUCCESS:
@@ -1197,7 +1308,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
                           struct ieee80211_key_conf *keyconf)
 {
        struct iwl_mvm_sta *mvm_sta;
-       struct iwl_mvm_add_sta_cmd cmd = {};
+       struct iwl_mvm_add_sta_key_cmd cmd = {};
        __le16 key_flags;
        int ret, status;
        u8 sta_id;
@@ -1252,17 +1363,14 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
        if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
                key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
 
-       cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
-       cmd.key.key_flags = key_flags;
-       cmd.key.key_offset = keyconf->hw_key_idx;
+       cmd.key_flags = key_flags;
+       cmd.key_offset = keyconf->hw_key_idx;
        cmd.sta_id = sta_id;
 
-       cmd.modify_mask = STA_MODIFY_KEY;
-       cmd.add_modify = STA_MODE_MODIFY;
-
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
-                                         &cmd, &status);
+       ret = iwl_mvm_send_add_sta_key_cmd_status(mvm, &cmd,
+                                                 mvm_sta->mac_id_n_color,
+                                                 &status);
 
        switch (status) {
        case ADD_STA_SUCCESS:
@@ -1309,7 +1417,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
                                struct ieee80211_sta *sta)
 {
        struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
-       struct iwl_mvm_add_sta_cmd cmd = {
+       struct iwl_mvm_add_sta_cmd_v6 cmd = {
                .add_modify = STA_MODE_MODIFY,
                .sta_id = mvmsta->sta_id,
                .station_flags_msk = cpu_to_le32(STA_FLG_PS),
@@ -1317,7 +1425,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
        };
        int ret;
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+       ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
 }
@@ -1331,7 +1439,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
                (reason == IEEE80211_FRAME_RELEASE_UAPSD) ?
                        STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL;
        struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
-       struct iwl_mvm_add_sta_cmd cmd = {
+       struct iwl_mvm_add_sta_cmd_v6 cmd = {
                .add_modify = STA_MODE_MODIFY,
                .sta_id = mvmsta->sta_id,
                .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
@@ -1346,7 +1454,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
        int ret;
 
        /* TODO: somehow the fw doesn't seem to take PS_POLL into account */
-       ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+       ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
 }
index 94b265eb32b82bfb7a5f467cef60963f5da8d252..4dfc359a4bdda187cbb1fdcbda8b4cfe112c78fd 100644 (file)
@@ -293,10 +293,6 @@ struct iwl_mvm_sta {
        struct iwl_lq_sta lq_sta;
        struct ieee80211_vif *vif;
 
-#ifdef CONFIG_PM_SLEEP
-       u16 last_seq_ctl;
-#endif
-
        /* Temporary, until the new TLC will control the Tx protection */
        s8 tx_protection;
        bool tt_tx_protection;
diff --git a/drivers/net/wireless/iwlwifi/mvm/testmode.h b/drivers/net/wireless/iwlwifi/mvm/testmode.h
new file mode 100644 (file)
index 0000000..eb74391
--- /dev/null
@@ -0,0 +1,95 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_MVM_TESTMODE_H__
+#define __IWL_MVM_TESTMODE_H__
+
+/**
+ * enum iwl_mvm_testmode_attrs - testmode attributes inside NL80211_ATTR_TESTDATA
+ * @IWL_MVM_TM_ATTR_UNSPEC: (invalid attribute)
+ * @IWL_MVM_TM_ATTR_CMD: sub command, see &enum iwl_mvm_testmode_commands (u32)
+ * @IWL_MVM_TM_ATTR_NOA_DURATION: requested NoA duration (u32)
+ * @IWL_MVM_TM_ATTR_BEACON_FILTER_STATE: beacon filter state (0 or 1, u32)
+ */
+enum iwl_mvm_testmode_attrs {
+       IWL_MVM_TM_ATTR_UNSPEC,
+       IWL_MVM_TM_ATTR_CMD,
+       IWL_MVM_TM_ATTR_NOA_DURATION,
+       IWL_MVM_TM_ATTR_BEACON_FILTER_STATE,
+
+       /* keep last */
+       NUM_IWL_MVM_TM_ATTRS,
+       IWL_MVM_TM_ATTR_MAX = NUM_IWL_MVM_TM_ATTRS - 1,
+};
+
+/**
+ * enum iwl_mvm_testmode_commands - MVM testmode commands
+ * @IWL_MVM_TM_CMD_SET_NOA: set NoA on GO vif for testing
+ * @IWL_MVM_TM_CMD_SET_BEACON_FILTER: turn beacon filtering off/on
+ */
+enum iwl_mvm_testmode_commands {
+       IWL_MVM_TM_CMD_SET_NOA,
+       IWL_MVM_TM_CMD_SET_BEACON_FILTER,
+};
+
+#endif /* __IWL_MVM_TESTMODE_H__ */
index 76a3c177e100ab28660ac9433fe5c26edc5a79e8..33cf56fdfc41f86b8bb517a8824740bdbb71a896 100644 (file)
@@ -387,7 +387,8 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
 
 void iwl_mvm_protect_session(struct iwl_mvm *mvm,
                             struct ieee80211_vif *vif,
-                            u32 duration, u32 min_duration)
+                            u32 duration, u32 min_duration,
+                            u32 max_delay)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
@@ -426,7 +427,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
                cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
 
        time_cmd.max_frags = TE_V2_FRAG_NONE;
-       time_cmd.max_delay = cpu_to_le32(500);
+       time_cmd.max_delay = cpu_to_le32(max_delay);
        /* TODO: why do we need to interval = bi if it is not periodic? */
        time_cmd.interval = cpu_to_le32(1);
        time_cmd.duration = cpu_to_le32(duration);
index f86c51065ed3afe1e860f8fb15d50cc1f6eaef9b..d9c8d6cfa2db0c4a44a58b0ec2bc0eaf6fca94cc 100644 (file)
  * @duration: the duration of the session in TU.
  * @min_duration: will start a new session if the current session will end
  *     in less than min_duration.
+ * @max_delay: maximum delay before starting the time event (in TU)
  *
  * This function can be used to start a session protection which means that the
  * fw will stay on the channel for %duration_ms milliseconds. This function
  */
 void iwl_mvm_protect_session(struct iwl_mvm *mvm,
                             struct ieee80211_vif *vif,
-                            u32 duration, u32 min_duration);
+                            u32 duration, u32 min_duration,
+                            u32 max_delay);
 
 /**
  * iwl_mvm_stop_session_protection - cancel the session protection.
index e05440d90319b339f203a2f1ff9658a0d97f615e..43d97c33a75abcbf3833958b0c5cbca5a91e0950 100644 (file)
@@ -417,7 +417,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
 
        spin_unlock(&mvmsta->lock);
 
-       if (txq_id < IWL_MVM_FIRST_AGG_QUEUE)
+       if (txq_id < mvm->first_agg_queue)
                atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
 
        return 0;
@@ -511,16 +511,10 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status)
 }
 #endif /* CONFIG_IWLWIFI_DEBUG */
 
-/**
- * translate ucode response to mac80211 tx status control values
- */
-static void iwl_mvm_hwrate_to_tx_control(u32 rate_n_flags,
-                                        struct ieee80211_tx_info *info)
+void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
+                              enum ieee80211_band band,
+                              struct ieee80211_tx_rate *r)
 {
-       struct ieee80211_tx_rate *r = &info->status.rates[0];
-
-       info->status.antenna =
-               ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
        if (rate_n_flags & RATE_HT_MCS_GF_MSK)
                r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
        switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
@@ -549,10 +543,23 @@ static void iwl_mvm_hwrate_to_tx_control(u32 rate_n_flags,
                r->flags |= IEEE80211_TX_RC_VHT_MCS;
        } else {
                r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
-                                                            info->band);
+                                                            band);
        }
 }
 
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
+                                       struct ieee80211_tx_info *info)
+{
+       struct ieee80211_tx_rate *r = &info->status.rates[0];
+
+       info->status.antenna =
+               ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+       iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
+}
+
 static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                                     struct iwl_rx_packet *pkt)
 {
@@ -602,11 +609,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                }
 
                info->status.rates[0].count = tx_resp->failure_frame + 1;
-               iwl_mvm_hwrate_to_tx_control(le32_to_cpu(tx_resp->initial_rate),
-                                            info);
+               iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
+                                           info);
 
                /* Single frame failure in an AMPDU queue => send BAR */
-               if (txq_id >= IWL_MVM_FIRST_AGG_QUEUE &&
+               if (txq_id >= mvm->first_agg_queue &&
                    !(info->flags & IEEE80211_TX_STAT_ACK))
                        info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
 
@@ -619,7 +626,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                ieee80211_tx_status_ni(mvm->hw, skb);
        }
 
-       if (txq_id >= IWL_MVM_FIRST_AGG_QUEUE) {
+       if (txq_id >= mvm->first_agg_queue) {
                /* If this is an aggregation queue, we use the ssn since:
                 * ssn = wifi seq_num % 256.
                 * The seq_ctl is the sequence control of the packet to which
@@ -668,10 +675,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                        iwl_mvm_check_ratid_empty(mvm, sta, tid);
                        spin_unlock_bh(&mvmsta->lock);
                }
-
-#ifdef CONFIG_PM_SLEEP
-               mvmsta->last_seq_ctl = seq_ctl;
-#endif
        } else {
                sta = NULL;
                mvmsta = NULL;
@@ -681,7 +684,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
         * If the txq is not an AMPDU queue, there is no chance we freed
         * several skbs. Check that out...
         */
-       if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && !WARN_ON(skb_freed > 1) &&
+       if (txq_id < mvm->first_agg_queue && !WARN_ON(skb_freed > 1) &&
            atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) {
                if (mvmsta) {
                        /*
@@ -777,7 +780,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
        u16 sequence = le16_to_cpu(pkt->hdr.sequence);
        struct ieee80211_sta *sta;
 
-       if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < IWL_MVM_FIRST_AGG_QUEUE))
+       if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < mvm->first_agg_queue))
                return;
 
        if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
@@ -904,8 +907,8 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
                        info->flags |= IEEE80211_TX_STAT_AMPDU;
                        info->status.ampdu_ack_len = ba_notif->txed_2_done;
                        info->status.ampdu_len = ba_notif->txed;
-                       iwl_mvm_hwrate_to_tx_control(tid_data->rate_n_flags,
-                                                    info);
+                       iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
+                                                   info);
                }
        }
 
index a9c3574914348ad3583fd19387a6c48211530f8d..ed69e9b78e82ba325a533624107974ea81af526d 100644 (file)
@@ -466,7 +466,7 @@ void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
        ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
        len = img->sec[IWL_UCODE_SECTION_DATA].len;
 
-       buf = kzalloc(len, GFP_KERNEL);
+       buf = kzalloc(len, GFP_ATOMIC);
        if (!buf)
                return;
 
index dc02cb9792afbbb48c23f01d748c262bee1828fd..941c0c88f982639b28436a60e52a0fdc6fba8c4b 100644 (file)
@@ -139,13 +139,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
 
 /* 6x00 Series */
        {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
+       {IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
        {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
+       {IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
        {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
        {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
        {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
        {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
        {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
        {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
+       {IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
        {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
        {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
 
@@ -153,12 +156,16 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
        {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
+       {IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
+       {IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
        {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
+       {IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
        {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
        {IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
+       {IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
        {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
        {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
@@ -240,8 +247,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
 
 /* 6x35 Series */
        {IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
+       {IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
        {IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
+       {IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
        {IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
+       {IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
        {IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
        {IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
 
@@ -258,56 +268,91 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
 #endif /* CONFIG_IWLDVM */
 
 #if IS_ENABLED(CONFIG_IWLMVM)
-/* 7000 Series */
+/* 7260 Series */
        {IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)},
        {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)},
        {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)},
+       {IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)},
 
 /* 3160 Series */
        {IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)},
        {IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
        {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
+
+/* 7265 Series */
+       {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
 #endif /* CONFIG_IWLMVM */
 
        {0}
@@ -349,7 +394,6 @@ out_free_drv:
        iwl_drv_stop(trans_pcie->drv);
 out_free_trans:
        iwl_trans_pcie_free(iwl_trans);
-       pci_set_drvdata(pdev, NULL);
        return ret;
 }
 
@@ -360,8 +404,6 @@ static void iwl_pci_remove(struct pci_dev *pdev)
 
        iwl_drv_stop(trans_pcie->drv);
        iwl_trans_pcie_free(trans);
-
-       pci_set_drvdata(pdev, NULL);
 }
 
 #ifdef CONFIG_PM_SLEEP
index bad95d28d50da52158ff102de3fc9855e46a662b..5d9337bec67a87c59f3ccef73e5f5b26644930d5 100644 (file)
@@ -220,6 +220,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
        iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
                          APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
 
+       /* Clear the interrupt in APMG if the NIC is in RFKILL */
+       iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL);
+
        set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
 
 out:
@@ -443,22 +446,138 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
        return ret;
 }
 
+static int iwl_pcie_secure_set(struct iwl_trans *trans, int cpu)
+{
+       int shift_param;
+       u32 address;
+       int ret = 0;
+
+       if (cpu == 1) {
+               shift_param = 0;
+               address = CSR_SECURE_BOOT_CPU1_STATUS_ADDR;
+       } else {
+               shift_param = 16;
+               address = CSR_SECURE_BOOT_CPU2_STATUS_ADDR;
+       }
+
+       /* set CPU to started */
+       iwl_trans_set_bits_mask(trans,
+                               CSR_UCODE_LOAD_STATUS_ADDR,
+                               CSR_CPU_STATUS_LOADING_STARTED << shift_param,
+                               1);
+
+       /* set last complete descriptor number */
+       iwl_trans_set_bits_mask(trans,
+                               CSR_UCODE_LOAD_STATUS_ADDR,
+                               CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED
+                               << shift_param,
+                               1);
+
+       /* set last loaded block */
+       iwl_trans_set_bits_mask(trans,
+                               CSR_UCODE_LOAD_STATUS_ADDR,
+                               CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK
+                               << shift_param,
+                               1);
+
+       /* image loading complete */
+       iwl_trans_set_bits_mask(trans,
+                               CSR_UCODE_LOAD_STATUS_ADDR,
+                               CSR_CPU_STATUS_LOADING_COMPLETED
+                               << shift_param,
+                               1);
+
+       /* set FH_TCSR_0_REG  */
+       iwl_trans_set_bits_mask(trans, FH_TCSR_0_REG0, 0x00400000, 1);
+
+       /* verify image verification started  */
+       ret = iwl_poll_bit(trans, address,
+                          CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
+                          CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
+                          CSR_SECURE_TIME_OUT);
+       if (ret < 0) {
+               IWL_ERR(trans, "secure boot process didn't start\n");
+               return ret;
+       }
+
+       /* wait for image verification to complete  */
+       ret = iwl_poll_bit(trans, address,
+                          CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
+                          CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
+                          CSR_SECURE_TIME_OUT);
+
+       if (ret < 0) {
+               IWL_ERR(trans, "Time out on secure boot process\n");
+               return ret;
+       }
+
+       return 0;
+}
+
 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
                                const struct fw_img *image)
 {
        int i, ret = 0;
 
-       for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
+       IWL_DEBUG_FW(trans,
+                    "working with %s image\n",
+                    image->is_secure ? "Secured" : "Non Secured");
+       IWL_DEBUG_FW(trans,
+                    "working with %s CPU\n",
+                    image->is_dual_cpus ? "Dual" : "Single");
+
+       /* configure the ucode to be ready to get the secured image */
+       if (image->is_secure) {
+               /* set secure boot inspector addresses */
+               iwl_write32(trans, CSR_SECURE_INSPECTOR_CODE_ADDR, 0);
+               iwl_write32(trans, CSR_SECURE_INSPECTOR_DATA_ADDR, 0);
+
+               /* release CPU1 reset if secure inspector image burned in OTP */
+               iwl_write32(trans, CSR_RESET, 0);
+       }
+
+       /* load to FW the binary sections of CPU1 */
+       IWL_DEBUG_INFO(trans, "Loading CPU1\n");
+       for (i = 0;
+            i < IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
+            i++) {
                if (!image->sec[i].data)
                        break;
-
                ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
                if (ret)
                        return ret;
        }
 
-       /* Remove all resets to allow NIC to operate */
-       iwl_write32(trans, CSR_RESET, 0);
+       /* configure the ucode to start secure process on CPU1 */
+       if (image->is_secure) {
+               /* config CPU1 to start secure protocol */
+               ret = iwl_pcie_secure_set(trans, 1);
+               if (ret)
+                       return ret;
+       } else {
+               /* Remove all resets to allow NIC to operate */
+               iwl_write32(trans, CSR_RESET, 0);
+       }
+
+       if (image->is_dual_cpus) {
+               /* load to FW the binary sections of CPU2 */
+               IWL_DEBUG_INFO(trans, "working w/ DUAL CPUs - Loading CPU2\n");
+               for (i = IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
+                       i < IWL_UCODE_SECTION_MAX; i++) {
+                       if (!image->sec[i].data)
+                               break;
+                       ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+                       if (ret)
+                               return ret;
+               }
+
+               if (image->is_secure) {
+                       /* set CPU2 for secure protocol */
+                       ret = iwl_pcie_secure_set(trans, 2);
+                       if (ret)
+                               return ret;
+               }
+       }
 
        return 0;
 }
@@ -1401,6 +1520,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        spin_lock_init(&trans_pcie->reg_lock);
        init_waitqueue_head(&trans_pcie->ucode_write_waitq);
 
+       err = pci_enable_device(pdev);
+       if (err)
+               goto out_no_pci;
+
        if (!cfg->base_params->pcie_l1_allowed) {
                /*
                 * W/A - seems to solve weird behavior. We need to remove this
@@ -1412,10 +1535,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                                       PCIE_LINK_STATE_CLKPM);
        }
 
-       err = pci_enable_device(pdev);
-       if (err)
-               goto out_no_pci;
-
        pci_set_master(pdev);
 
        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
index f45eb29c2ede0b62cc1723f82ae7f30cc1f5a8ed..f644fcf861a8c9ead756169ce4b6d960a8f51e1b 100644 (file)
@@ -1102,6 +1102,8 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
                 * non-AGG queue.
                 */
                iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+
+               ssn = trans_pcie->txq[txq_id].q.read_ptr;
        }
 
        /* Place first TFD at index corresponding to start sequence number.
@@ -1463,7 +1465,8 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
        spin_unlock_bh(&txq->lock);
 }
 
-#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+#define HOST_COMPLETE_TIMEOUT  (2 * HZ)
+#define COMMAND_POKE_TIMEOUT   (HZ / 10)
 
 static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
                                    struct iwl_host_cmd *cmd)
@@ -1491,6 +1494,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        int cmd_idx;
        int ret;
+       int timeout = HOST_COMPLETE_TIMEOUT;
 
        IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
                       get_cmd_string(trans_pcie, cmd->id));
@@ -1515,10 +1519,29 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
                return ret;
        }
 
-       ret = wait_event_timeout(trans_pcie->wait_command_queue,
-                                !test_bit(STATUS_HCMD_ACTIVE,
-                                          &trans_pcie->status),
-                                HOST_COMPLETE_TIMEOUT);
+       while (timeout > 0) {
+               unsigned long flags;
+
+               timeout -= COMMAND_POKE_TIMEOUT;
+               ret = wait_event_timeout(trans_pcie->wait_command_queue,
+                                        !test_bit(STATUS_HCMD_ACTIVE,
+                                                  &trans_pcie->status),
+                                        COMMAND_POKE_TIMEOUT);
+               if (ret)
+                       break;
+               /* poke the device - it may have lost the command */
+               if (iwl_trans_grab_nic_access(trans, true, &flags)) {
+                       iwl_trans_release_nic_access(trans, &flags);
+                       IWL_DEBUG_INFO(trans,
+                                      "Tried to wake NIC for command %s\n",
+                                      get_cmd_string(trans_pcie, cmd->id));
+               } else {
+                       IWL_ERR(trans, "Failed to poke NIC for command %s\n",
+                               get_cmd_string(trans_pcie, cmd->id));
+                       break;
+               }
+       }
+
        if (!ret) {
                if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
                        struct iwl_txq *txq =
@@ -1539,6 +1562,9 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
                                       "Clearing HCMD_ACTIVE for command %s\n",
                                       get_cmd_string(trans_pcie, cmd->id));
                        ret = -ETIMEDOUT;
+
+                       iwl_op_mode_nic_error(trans->op_mode);
+
                        goto cancel;
                }
        }
index c0f9e7e862f66e8bfcb549ae4a7f3ba7425306ae..51b92b5df11956adfedadb5137f8f6b61342b0c4 100644 (file)
@@ -53,6 +53,11 @@ static void main_firmware_cb(const struct firmware *firmware, void *context)
 
        /* Firmware found! */
        lbs_fw_loaded(priv, 0, priv->helper_fw, firmware);
+       if (priv->helper_fw) {
+               release_firmware (priv->helper_fw);
+               priv->helper_fw = NULL;
+       }
+       release_firmware (firmware);
 }
 
 static void helper_firmware_cb(const struct firmware *firmware, void *context)
index c94dd6802672a053d795b052710d5ada4ff3e2bb..ef8c98e21098479d231df5844fb2abab8ddab04f 100644 (file)
@@ -754,14 +754,14 @@ static void if_cs_prog_firmware(struct lbs_private *priv, int ret,
        if (ret == 0 && (card->model != MODEL_8305))
                ret = if_cs_prog_real(card, mainfw);
        if (ret)
-               goto out;
+               return;
 
        /* Now actually get the IRQ */
        ret = request_irq(card->p_dev->irq, if_cs_interrupt,
                IRQF_SHARED, DRV_NAME, card);
        if (ret) {
                pr_err("error in request_irq\n");
-               goto out;
+               return;
        }
 
        /*
@@ -777,10 +777,6 @@ static void if_cs_prog_firmware(struct lbs_private *priv, int ret,
                pr_err("could not activate card\n");
                free_irq(card->p_dev->irq, card);
        }
-
-out:
-       release_firmware(helper);
-       release_firmware(mainfw);
 }
 
 
index 45578335e4200f2f47a3fc8284f9bd09127209ee..991238afd1b60c04ae437fafeb7a108713ea3134 100644 (file)
@@ -708,20 +708,16 @@ static void if_sdio_do_prog_firmware(struct lbs_private *priv, int ret,
 
        ret = if_sdio_prog_helper(card, helper);
        if (ret)
-               goto out;
+               return;
 
        lbs_deb_sdio("Helper firmware loaded\n");
 
        ret = if_sdio_prog_real(card, mainfw);
        if (ret)
-               goto out;
+               return;
 
        lbs_deb_sdio("Firmware loaded\n");
        if_sdio_finish_power_on(card);
-
-out:
-       release_firmware(helper);
-       release_firmware(mainfw);
 }
 
 static int if_sdio_prog_firmware(struct if_sdio_card *card)
index 4bb6574f40737f462f3b7f4ad3d9fea3d705b415..83669151bb8242e31931735700eb27fe4cd01821 100644 (file)
@@ -1094,11 +1094,7 @@ static int if_spi_init_card(struct if_spi_card *card)
                goto out;
 
 out:
-       release_firmware(helper);
-       release_firmware(mainfw);
-
        lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
-
        return err;
 }
 
@@ -1128,7 +1124,7 @@ static int if_spi_probe(struct spi_device *spi)
 {
        struct if_spi_card *card;
        struct lbs_private *priv = NULL;
-       struct libertas_spi_platform_data *pdata = spi->dev.platform_data;
+       struct libertas_spi_platform_data *pdata = dev_get_platdata(&spi->dev);
        int err = 0;
 
        lbs_deb_enter(LBS_DEB_SPI);
index 27980778d992db783e58f777446a1cc03d40c28c..dff08a2896a38f644894749c006787e26b1f3ea6 100644 (file)
@@ -844,7 +844,7 @@ static void if_usb_prog_firmware(struct lbs_private *priv, int ret,
        cardp->fw = fw;
        if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) {
                ret = -EINVAL;
-               goto release_fw;
+               goto done;
        }
 
        /* Cancel any pending usb business */
@@ -861,7 +861,7 @@ restart:
        if (if_usb_submit_rx_urb_fwload(cardp) < 0) {
                lbs_deb_usbd(&cardp->udev->dev, "URB submission is failed\n");
                ret = -EIO;
-               goto release_fw;
+               goto done;
        }
 
        cardp->bootcmdresp = 0;
@@ -883,14 +883,14 @@ restart:
                usb_kill_urb(cardp->tx_urb);
                if (if_usb_submit_rx_urb(cardp) < 0)
                        ret = -EIO;
-               goto release_fw;
+               goto done;
        } else if (cardp->bootcmdresp <= 0) {
                if (--reset_count >= 0) {
                        if_usb_reset_device(cardp);
                        goto restart;
                }
                ret = -EIO;
-               goto release_fw;
+               goto done;
        }
 
        i = 0;
@@ -921,14 +921,14 @@ restart:
 
                pr_info("FW download failure, time = %d ms\n", i * 100);
                ret = -EIO;
-               goto release_fw;
+               goto done;
        }
 
        cardp->priv->fw_ready = 1;
        if_usb_submit_rx_urb(cardp);
 
        if (lbs_start_card(priv))
-               goto release_fw;
+               goto done;
 
        if_usb_setup_firmware(priv);
 
@@ -939,11 +939,8 @@ restart:
        if (lbs_host_sleep_cfg(priv, priv->wol_criteria, NULL))
                priv->ehs_remove_supported = false;
 
- release_fw:
-       release_firmware(cardp->fw);
-       cardp->fw = NULL;
-
  done:
+       cardp->fw = NULL;
        lbs_deb_leave(LBS_DEB_USB);
 }
 
index a6c46f3b6e3a0d622b796f094b5b75016428438e..e47f4e3012b85344c7d400d78799634cb5bf0234 100644 (file)
@@ -1048,7 +1048,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
        struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
        unsigned long cmd_flags;
        unsigned long scan_pending_q_flags;
-       uint16_t cancel_scan_cmd = false;
+       bool cancel_scan_cmd = false;
 
        if ((adapter->curr_cmd) &&
            (adapter->curr_cmd->wait_q_enabled)) {
index 9d7c0e6c4fc7419facd68a3c61f251fbedb86143..4e4686e6ac092b3ce23282c575eadc2b40071eff 100644 (file)
@@ -621,7 +621,7 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
        int ret = 0;
        struct ieee_types_assoc_rsp *assoc_rsp;
        struct mwifiex_bssdescriptor *bss_desc;
-       u8 enable_data = true;
+       bool enable_data = true;
        u16 cap_info, status_code;
 
        assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
@@ -1422,13 +1422,19 @@ static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv, u8 *mac)
  */
 int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
 {
+       int ret = 0;
+
        if (!priv->media_connected)
                return 0;
 
        switch (priv->bss_mode) {
        case NL80211_IFTYPE_STATION:
        case NL80211_IFTYPE_P2P_CLIENT:
-               return mwifiex_deauthenticate_infra(priv, mac);
+               ret = mwifiex_deauthenticate_infra(priv, mac);
+               if (ret)
+                       cfg80211_disconnected(priv->netdev, 0, NULL, 0,
+                                             GFP_KERNEL);
+               break;
        case NL80211_IFTYPE_ADHOC:
                return mwifiex_send_cmd_sync(priv,
                                             HostCmd_CMD_802_11_AD_HOC_STOP,
@@ -1440,7 +1446,7 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
                break;
        }
 
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(mwifiex_deauthenticate);
 
index fd778337deeec4e58b1bddb4d08668aaf51890d9..9d7c9d354d34aeb9e0b3e4bf854fc6a999671958 100644 (file)
@@ -358,10 +358,12 @@ process_start:
                }
        } while (true);
 
-       if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter))
+       spin_lock_irqsave(&adapter->main_proc_lock, flags);
+       if ((adapter->int_status) || IS_CARD_RX_RCVD(adapter)) {
+               spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
                goto process_start;
+       }
 
-       spin_lock_irqsave(&adapter->main_proc_lock, flags);
        adapter->mwifiex_processing = false;
        spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
 
@@ -880,7 +882,9 @@ mwifiex_add_card(void *card, struct semaphore *sem,
        adapter->cmd_wait_q.status = 0;
        adapter->scan_wait_q_woken = false;
 
-       adapter->workqueue = create_workqueue("MWIFIEX_WORK_QUEUE");
+       adapter->workqueue =
+               alloc_workqueue("MWIFIEX_WORK_QUEUE",
+                               WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
        if (!adapter->workqueue)
                goto err_kmalloc;
 
index 52da8ee7599a041d7922180122c1744d19fe743e..33fa9432b241b353c3ae381ab57c48d5d71f763b 100644 (file)
@@ -93,7 +93,7 @@ static int mwifiex_pcie_suspend(struct device *dev)
        struct pci_dev *pdev = to_pci_dev(dev);
 
        if (pdev) {
-               card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+               card = pci_get_drvdata(pdev);
                if (!card || !card->adapter) {
                        pr_err("Card or adapter structure is not valid\n");
                        return 0;
@@ -128,7 +128,7 @@ static int mwifiex_pcie_resume(struct device *dev)
        struct pci_dev *pdev = to_pci_dev(dev);
 
        if (pdev) {
-               card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+               card = pci_get_drvdata(pdev);
                if (!card || !card->adapter) {
                        pr_err("Card or adapter structure is not valid\n");
                        return 0;
@@ -2037,7 +2037,7 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
                goto exit;
        }
 
-       card = (struct pcie_service_card *) pci_get_drvdata(pdev);
+       card = pci_get_drvdata(pdev);
        if (!card || !card->adapter) {
                pr_debug("info: %s: card=%p adapter=%p\n", __func__, card,
                         card ? card->adapter : NULL);
index c0268b5977480b384f04f452e3278a9443c93e85..7d66018a2e33060d1bdc1a96b970fd61f6480e4a 100644 (file)
@@ -327,7 +327,7 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
 {
        struct mwifiex_adapter *adapter = priv->adapter;
        struct host_cmd_ds_802_11_hs_cfg_enh *hs_cfg = &cmd->params.opt_hs_cfg;
-       u16 hs_activate = false;
+       bool hs_activate = false;
 
        if (!hscfg_param)
                /* New Activate command */
index 8b057524b252e535307644ddaa98e12df75a35cd..8c351f71f72f9f6f6a17650198ba805bf7c1f5cd 100644 (file)
@@ -118,7 +118,8 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
        dev_dbg(adapter->dev,
                "info: successfully disconnected from %pM: reason code %d\n",
                priv->cfg_bssid, reason_code);
-       if (priv->bss_mode == NL80211_IFTYPE_STATION) {
+       if (priv->bss_mode == NL80211_IFTYPE_STATION ||
+           priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
                cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
                                      GFP_KERNEL);
        }
index 95fa3599b4070b02aecc8d4d46073137b05c5b98..5dd0ccc70b863ea15fad25d739adebd39ce09dbd 100644 (file)
@@ -708,7 +708,7 @@ int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
 {
        u8 *curr = (u8 *) &resp->params.get_wmm_status;
        uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
-       int valid = true;
+       bool valid = true;
 
        struct mwifiex_ie_types_data *tlv_hdr;
        struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
index 644d6e0c51ccf235e5b457da85f57c64029e5c2d..0f129d498fb1057d898db2c6e92868d8ff1f3baf 100644 (file)
@@ -83,11 +83,10 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead)
 }
 
 void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
-                                       struct sk_buff *skb);
+                                struct sk_buff *skb);
 void mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra);
 void mwifiex_rotate_priolists(struct mwifiex_private *priv,
-                             struct mwifiex_ra_list_tbl *ra,
-                             int tid);
+                             struct mwifiex_ra_list_tbl *ra, int tid);
 
 int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter);
 void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter);
@@ -95,21 +94,18 @@ int mwifiex_is_ralist_valid(struct mwifiex_private *priv,
                            struct mwifiex_ra_list_tbl *ra_list, int tid);
 
 u8 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
-                                            const struct sk_buff *skb);
+                                    const struct sk_buff *skb);
 void mwifiex_wmm_init(struct mwifiex_adapter *adapter);
 
-extern u32 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
-                                                u8 **assoc_buf,
-                                                struct ieee_types_wmm_parameter
-                                                *wmmie,
-                                                struct ieee80211_ht_cap
-                                                *htcap);
+u32 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
+                                       u8 **assoc_buf,
+                                       struct ieee_types_wmm_parameter *wmmie,
+                                       struct ieee80211_ht_cap *htcap);
 
 void mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
-                                       struct ieee_types_wmm_parameter
-                                       *wmm_ie);
+                                       struct ieee_types_wmm_parameter *wmm_ie);
 void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
-extern int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
-                                     const struct host_cmd_ds_command *resp);
+int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
+                              const struct host_cmd_ds_command *resp);
 
 #endif /* !_MWIFIEX_WMM_H_ */
index a3707fd4ef623e5968afc85e5d265cb49024d764..b953ad621e0b90ef23e9fbe009bc0c4cd5f66876 100644 (file)
@@ -6093,7 +6093,6 @@ err_iounmap:
        if (priv->sram != NULL)
                pci_iounmap(pdev, priv->sram);
 
-       pci_set_drvdata(pdev, NULL);
        ieee80211_free_hw(hw);
 
 err_free_reg:
@@ -6147,7 +6146,6 @@ static void mwl8k_remove(struct pci_dev *pdev)
 unmap:
        pci_iounmap(pdev, priv->regs);
        pci_iounmap(pdev, priv->sram);
-       pci_set_drvdata(pdev, NULL);
        ieee80211_free_hw(hw);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
index 3bb936b9558c7f96dedc215189f72b49a313b8d7..eebd2be21ee9067e7d90f45f32b227bb07601998 100644 (file)
@@ -182,23 +182,20 @@ extern int orinoco_debug;
 /* Exported prototypes                                              */
 /********************************************************************/
 
-extern struct orinoco_private *alloc_orinocodev(
-       int sizeof_card, struct device *device,
-       int (*hard_reset)(struct orinoco_private *),
-       int (*stop_fw)(struct orinoco_private *, int));
-extern void free_orinocodev(struct orinoco_private *priv);
-extern int orinoco_init(struct orinoco_private *priv);
-extern int orinoco_if_add(struct orinoco_private *priv,
-                         unsigned long base_addr,
-                         unsigned int irq,
-                         const struct net_device_ops *ops);
-extern void orinoco_if_del(struct orinoco_private *priv);
-extern int orinoco_up(struct orinoco_private *priv);
-extern void orinoco_down(struct orinoco_private *priv);
-extern irqreturn_t orinoco_interrupt(int irq, void *dev_id);
-
-extern void __orinoco_ev_info(struct net_device *dev, struct hermes *hw);
-extern void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw);
+struct orinoco_private *alloc_orinocodev(int sizeof_card, struct device *device,
+                                        int (*hard_reset)(struct orinoco_private *),
+                                        int (*stop_fw)(struct orinoco_private *, int));
+void free_orinocodev(struct orinoco_private *priv);
+int orinoco_init(struct orinoco_private *priv);
+int orinoco_if_add(struct orinoco_private *priv, unsigned long base_addr,
+                  unsigned int irq, const struct net_device_ops *ops);
+void orinoco_if_del(struct orinoco_private *priv);
+int orinoco_up(struct orinoco_private *priv);
+void orinoco_down(struct orinoco_private *priv);
+irqreturn_t orinoco_interrupt(int irq, void *dev_id);
+
+void __orinoco_ev_info(struct net_device *dev, struct hermes *hw);
+void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw);
 
 int orinoco_process_xmit_skb(struct sk_buff *skb,
                             struct net_device *dev,
index d73fdf6185a2c7495891544ffe693ed140432f54..ffb2469eb6794c16453022e2b4a0a17a239077d3 100644 (file)
@@ -234,7 +234,6 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev,
        free_irq(pdev->irq, priv);
 
  fail_irq:
-       pci_set_drvdata(pdev, NULL);
        free_orinocodev(priv);
 
  fail_alloc:
@@ -265,7 +264,6 @@ static void orinoco_nortel_remove_one(struct pci_dev *pdev)
 
        orinoco_if_del(priv);
        free_irq(pdev->irq, priv);
-       pci_set_drvdata(pdev, NULL);
        free_orinocodev(priv);
        pci_iounmap(pdev, priv->hw.iobase);
        pci_iounmap(pdev, card->attr_io);
index 677bf14eca844a1dd7c8cc2dca0724e9c5a5b505..5ae1191d2532545af84df76970140902906bb373 100644 (file)
@@ -184,7 +184,6 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
        free_irq(pdev->irq, priv);
 
  fail_irq:
-       pci_set_drvdata(pdev, NULL);
        free_orinocodev(priv);
 
  fail_alloc:
@@ -205,7 +204,6 @@ static void orinoco_pci_remove_one(struct pci_dev *pdev)
 
        orinoco_if_del(priv);
        free_irq(pdev->irq, priv);
-       pci_set_drvdata(pdev, NULL);
        free_orinocodev(priv);
        pci_iounmap(pdev, priv->hw.iobase);
        pci_release_regions(pdev);
index 2559dbd6184b2421b95a2d73630e14a887a9525c..bbd36d1676ff16aff0ff04b70a4e4a761e2ba9ba 100644 (file)
@@ -273,7 +273,6 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
        free_irq(pdev->irq, priv);
 
  fail_irq:
-       pci_set_drvdata(pdev, NULL);
        free_orinocodev(priv);
 
  fail_alloc:
@@ -301,7 +300,6 @@ static void orinoco_plx_remove_one(struct pci_dev *pdev)
 
        orinoco_if_del(priv);
        free_irq(pdev->irq, priv);
-       pci_set_drvdata(pdev, NULL);
        free_orinocodev(priv);
        pci_iounmap(pdev, priv->hw.iobase);
        pci_iounmap(pdev, card->attr_io);
index 42afeeea2c405c2403f0075f1aeabacd7af57767..04b08de5fd5db33cc73b89fefe8d2f9cea2adb69 100644 (file)
@@ -170,7 +170,6 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
        free_irq(pdev->irq, priv);
 
  fail_irq:
-       pci_set_drvdata(pdev, NULL);
        free_orinocodev(priv);
 
  fail_alloc:
@@ -195,7 +194,6 @@ static void orinoco_tmd_remove_one(struct pci_dev *pdev)
 
        orinoco_if_del(priv);
        free_irq(pdev->irq, priv);
-       pci_set_drvdata(pdev, NULL);
        free_orinocodev(priv);
        pci_iounmap(pdev, priv->hw.iobase);
        pci_iounmap(pdev, card->bridge_io);
index 57e3af8ebb4b39622450d1fbee0742b39ab5a0a3..f9a07b0d83acd2f40df4755880fa1551e6276091 100644 (file)
@@ -631,7 +631,6 @@ static int p54p_probe(struct pci_dev *pdev,
        iounmap(priv->map);
 
  err_free_dev:
-       pci_set_drvdata(pdev, NULL);
        p54_free_common(dev);
 
  err_free_reg:
index 7fc46f26cf2be3085f08c56673931a44d538d098..de15171e2cd896f8dfd78c6d5e00966e2161ec12 100644 (file)
@@ -636,7 +636,7 @@ static int p54spi_probe(struct spi_device *spi)
        gpio_direction_input(p54spi_gpio_irq);
 
        ret = request_irq(gpio_to_irq(p54spi_gpio_irq),
-                         p54spi_interrupt, IRQF_DISABLED, "p54spi",
+                         p54spi_interrupt, 0, "p54spi",
                          priv->spi);
        if (ret < 0) {
                dev_err(&priv->spi->dev, "request_irq() failed");
index 1c22b81e6ef35e30f86afc994445ff66ef4f7c36..8863a6cb2388d952926cf8fe209a8b67fa004ddb 100644 (file)
@@ -183,7 +183,7 @@ prism54_update_stats(struct work_struct *work)
        data = r.ptr;
 
        /* copy this MAC to the bss */
-       memcpy(bss.address, data, 6);
+       memcpy(bss.address, data, ETH_ALEN);
        kfree(data);
 
        /* now ask for the corresponding bss */
@@ -531,7 +531,7 @@ prism54_set_wap(struct net_device *ndev, struct iw_request_info *info,
                return -EINVAL;
 
        /* prepare the structure for the set object */
-       memcpy(&bssid[0], awrq->sa_data, 6);
+       memcpy(&bssid[0], awrq->sa_data, ETH_ALEN);
 
        /* set the bssid -- does this make sense when in AP mode? */
        rvalue = mgt_set_request(priv, DOT11_OID_BSSID, 0, &bssid);
@@ -550,7 +550,7 @@ prism54_get_wap(struct net_device *ndev, struct iw_request_info *info,
        int rvalue;
 
        rvalue = mgt_get_request(priv, DOT11_OID_BSSID, 0, NULL, &r);
-       memcpy(awrq->sa_data, r.ptr, 6);
+       memcpy(awrq->sa_data, r.ptr, ETH_ALEN);
        awrq->sa_family = ARPHRD_ETHER;
        kfree(r.ptr);
 
@@ -582,7 +582,7 @@ prism54_translate_bss(struct net_device *ndev, struct iw_request_info *info,
        size_t wpa_ie_len;
 
        /* The first entry must be the MAC address */
-       memcpy(iwe.u.ap_addr.sa_data, bss->address, 6);
+       memcpy(iwe.u.ap_addr.sa_data, bss->address, ETH_ALEN);
        iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
        iwe.cmd = SIOCGIWAP;
        current_ev = iwe_stream_add_event(info, current_ev, end_buf,
@@ -2489,7 +2489,7 @@ prism54_set_mac_address(struct net_device *ndev, void *addr)
                              &((struct sockaddr *) addr)->sa_data);
        if (!ret)
                memcpy(priv->ndev->dev_addr,
-                      &((struct sockaddr *) addr)->sa_data, 6);
+                      &((struct sockaddr *) addr)->sa_data, ETH_ALEN);
 
        return ret;
 }
index 5970ff6f40cc315dd253054c84f031cceda01170..41a16d30c79c5be46f89229d3185a033d8131f0f 100644 (file)
@@ -837,7 +837,7 @@ islpci_setup(struct pci_dev *pdev)
        /* ndev->set_multicast_list = &islpci_set_multicast_list; */
        ndev->addr_len = ETH_ALEN;
        /* Get a non-zero dummy MAC address for nameif. Jean II */
-       memcpy(ndev->dev_addr, dummy_mac, 6);
+       memcpy(ndev->dev_addr, dummy_mac, ETH_ALEN);
 
        ndev->watchdog_timeo = ISLPCI_TX_TIMEOUT;
 
index a01606b36e03f836a5cc57ab292dd47f2ba2352f..056af38e72e399d88528343e978fec15d512f180 100644 (file)
@@ -682,7 +682,7 @@ mgt_update_addr(islpci_private *priv)
                                     isl_oid[GEN_OID_MACADDRESS].size, &res);
 
        if ((ret == 0) && res && (res->header->operation != PIMFOR_OP_ERROR))
-               memcpy(priv->ndev->dev_addr, res->data, 6);
+               memcpy(priv->ndev->dev_addr, res->data, ETH_ALEN);
        else
                ret = -EIO;
        if (res)
index 68dbbb9c6d1259eb3d83d7c1458126f6fc69a2f2..006b8bcb2e31dfc5c21d7ed602546003e4a9e54c 100644 (file)
@@ -58,11 +58,11 @@ config RT61PCI
 
 config RT2800PCI
        tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
-       depends on PCI || SOC_RT288X || SOC_RT305X
+       depends on PCI
        select RT2800_LIB
+       select RT2800_LIB_MMIO
        select RT2X00_LIB_MMIO
-       select RT2X00_LIB_PCI if PCI
-       select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X
+       select RT2X00_LIB_PCI
        select RT2X00_LIB_FIRMWARE
        select RT2X00_LIB_CRYPTO
        select CRC_CCITT
@@ -199,9 +199,30 @@ config RT2800USB_UNKNOWN
 
 endif
 
+config RT2800SOC
+       tristate "Ralink WiSoC support"
+       depends on SOC_RT288X || SOC_RT305X
+       select RT2X00_LIB_SOC
+       select RT2X00_LIB_MMIO
+       select RT2X00_LIB_CRYPTO
+       select RT2X00_LIB_FIRMWARE
+       select RT2800_LIB
+       select RT2800_LIB_MMIO
+       ---help---
+         This adds support for Ralink WiSoC devices.
+         Supported chips: RT2880, RT3050, RT3052, RT3350, RT3352.
+
+         When compiled as a module, this driver will be called rt2800soc.
+
+
 config RT2800_LIB
        tristate
 
+config RT2800_LIB_MMIO
+       tristate
+       select RT2X00_LIB_MMIO
+       select RT2800_LIB
+
 config RT2X00_LIB_MMIO
        tristate
 
@@ -219,6 +240,7 @@ config RT2X00_LIB_USB
 
 config RT2X00_LIB
        tristate
+       select AVERAGE
 
 config RT2X00_LIB_FIRMWARE
        boolean
index f069d8bc5b678332151d7a6fe337f44e29205d82..24a66015a4959f6fd6d44a18e6bd8d0aed7d1c1c 100644 (file)
@@ -14,6 +14,7 @@ obj-$(CONFIG_RT2X00_LIB_PCI)          += rt2x00pci.o
 obj-$(CONFIG_RT2X00_LIB_SOC)           += rt2x00soc.o
 obj-$(CONFIG_RT2X00_LIB_USB)           += rt2x00usb.o
 obj-$(CONFIG_RT2800_LIB)               += rt2800lib.o
+obj-$(CONFIG_RT2800_LIB_MMIO)          += rt2800mmio.o
 obj-$(CONFIG_RT2400PCI)                        += rt2400pci.o
 obj-$(CONFIG_RT2500PCI)                        += rt2500pci.o
 obj-$(CONFIG_RT61PCI)                  += rt61pci.o
@@ -21,3 +22,4 @@ obj-$(CONFIG_RT2800PCI)                       += rt2800pci.o
 obj-$(CONFIG_RT2500USB)                        += rt2500usb.o
 obj-$(CONFIG_RT73USB)                  += rt73usb.o
 obj-$(CONFIG_RT2800USB)                        += rt2800usb.o
+obj-$(CONFIG_RT2800SOC)                        += rt2800soc.o
index 3d53a09da5a12da22f2697c9a22b7dfb49daaff0..38ed9a3e44c8c00c06ea8f1cdd9ae1a68bbe2d60 100644 (file)
@@ -1261,7 +1261,7 @@ static void rt2400pci_fill_rxdone(struct queue_entry *entry,
         */
        rxdesc->timestamp = ((u64)rx_high << 32) | rx_low;
        rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL) & ~0x08;
-       rxdesc->rssi = rt2x00_get_field32(word2, RXD_W3_RSSI) -
+       rxdesc->rssi = rt2x00_get_field32(word3, RXD_W3_RSSI) -
            entry->queue->rt2x00dev->rssi_offset;
        rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
 
index fa33b5edf931ba688cb40297937e519a67883b76..aab6b5e4f5ddd0fb0a52b7385388f3a07c738add 100644 (file)
@@ -52,6 +52,7 @@
  * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
  * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
  * RF5592 2.4G/5G 2T2R
+ * RF3070 2.4G 1T1R
  * RF5360 2.4G 1T1R
  * RF5370 2.4G 1T1R
  * RF5390 2.4G 1T1R
@@ -70,6 +71,7 @@
 #define RF3322                         0x000c
 #define RF3053                         0x000d
 #define RF5592                         0x000f
+#define RF3070                         0x3070
 #define RF3290                         0x3290
 #define RF5360                         0x5360
 #define RF5370                         0x5370
 /*
  * MAC_CSR0_3290: MAC_CSR0 for RT3290 to identity MAC version number.
  */
-#define MAC_CSR0_3290                          0x0000
+#define MAC_CSR0_3290                  0x0000
 
 /*
  * E2PROM_CSR: PCI EEPROM control register.
 /*
  * COEX_CFG_0
  */
-#define COEX_CFG0                      0x0040
+#define COEX_CFG0              0x0040
 #define COEX_CFG_ANT           FIELD32(0xff000000)
 /*
  * COEX_CFG_1
  */
-#define COEX_CFG1                      0x0044
+#define COEX_CFG1              0x0044
 
 /*
  * COEX_CFG_2
  */
-#define COEX_CFG2                      0x0048
+#define COEX_CFG2              0x0048
 #define BT_COEX_CFG1           FIELD32(0xff000000)
 #define BT_COEX_CFG0           FIELD32(0x00ff0000)
 #define WL_COEX_CFG1           FIELD32(0x0000ff00)
 #define PLL_RESERVED_INPUT2    FIELD32(0x0000ff00)
 #define PLL_CONTROL            FIELD32(0x00070000)
 #define PLL_LPF_R1             FIELD32(0x00080000)
-#define PLL_LPF_C1_CTRL        FIELD32(0x00300000)
-#define PLL_LPF_C2_CTRL        FIELD32(0x00c00000)
+#define PLL_LPF_C1_CTRL                FIELD32(0x00300000)
+#define PLL_LPF_C2_CTRL                FIELD32(0x00c00000)
 #define PLL_CP_CURRENT_CTRL    FIELD32(0x03000000)
 #define PLL_PFD_DELAY_CTRL     FIELD32(0x0c000000)
 #define PLL_LOCK_CTRL          FIELD32(0x70000000)
@@ -2164,7 +2166,7 @@ struct mac_iveiv_entry {
  */
 #define RFCSR6_R1                      FIELD8(0x03)
 #define RFCSR6_R2                      FIELD8(0x40)
-#define RFCSR6_TXDIV           FIELD8(0x0c)
+#define RFCSR6_TXDIV                   FIELD8(0x0c)
 /* bits for RF3053 */
 #define RFCSR6_VCO_IC                  FIELD8(0xc0)
 
@@ -2202,13 +2204,13 @@ struct mac_iveiv_entry {
  * RFCSR 12:
  */
 #define RFCSR12_TX_POWER               FIELD8(0x1f)
-#define RFCSR12_DR0                            FIELD8(0xe0)
+#define RFCSR12_DR0                    FIELD8(0xe0)
 
 /*
  * RFCSR 13:
  */
 #define RFCSR13_TX_POWER               FIELD8(0x1f)
-#define RFCSR13_DR0                            FIELD8(0xe0)
+#define RFCSR13_DR0                    FIELD8(0xe0)
 
 /*
  * RFCSR 15:
@@ -2226,7 +2228,7 @@ struct mac_iveiv_entry {
 #define RFCSR17_TXMIXER_GAIN           FIELD8(0x07)
 #define RFCSR17_TX_LO1_EN              FIELD8(0x08)
 #define RFCSR17_R                      FIELD8(0x20)
-#define RFCSR17_CODE                   FIELD8(0x7f)
+#define RFCSR17_CODE                   FIELD8(0x7f)
 
 /* RFCSR 18 */
 #define RFCSR18_XO_TUNE_BYPASS         FIELD8(0x40)
@@ -2449,7 +2451,7 @@ enum rt2800_eeprom_word {
  */
 #define EEPROM_NIC_CONF0_RXPATH                FIELD16(0x000f)
 #define EEPROM_NIC_CONF0_TXPATH                FIELD16(0x00f0)
-#define EEPROM_NIC_CONF0_RF_TYPE               FIELD16(0x0f00)
+#define EEPROM_NIC_CONF0_RF_TYPE       FIELD16(0x0f00)
 
 /*
  * EEPROM NIC Configuration 1
@@ -2471,18 +2473,18 @@ enum rt2800_eeprom_word {
  * DAC_TEST: 0: disable, 1: enable
  */
 #define EEPROM_NIC_CONF1_HW_RADIO              FIELD16(0x0001)
-#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC               FIELD16(0x0002)
-#define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G               FIELD16(0x0004)
-#define EEPROM_NIC_CONF1_EXTERNAL_LNA_5G               FIELD16(0x0008)
+#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC       FIELD16(0x0002)
+#define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G       FIELD16(0x0004)
+#define EEPROM_NIC_CONF1_EXTERNAL_LNA_5G       FIELD16(0x0008)
 #define EEPROM_NIC_CONF1_CARDBUS_ACCEL         FIELD16(0x0010)
 #define EEPROM_NIC_CONF1_BW40M_SB_2G           FIELD16(0x0020)
 #define EEPROM_NIC_CONF1_BW40M_SB_5G           FIELD16(0x0040)
 #define EEPROM_NIC_CONF1_WPS_PBC               FIELD16(0x0080)
 #define EEPROM_NIC_CONF1_BW40M_2G              FIELD16(0x0100)
 #define EEPROM_NIC_CONF1_BW40M_5G              FIELD16(0x0200)
-#define EEPROM_NIC_CONF1_BROADBAND_EXT_LNA             FIELD16(0x400)
+#define EEPROM_NIC_CONF1_BROADBAND_EXT_LNA     FIELD16(0x400)
 #define EEPROM_NIC_CONF1_ANT_DIVERSITY         FIELD16(0x1800)
-#define EEPROM_NIC_CONF1_INTERNAL_TX_ALC               FIELD16(0x2000)
+#define EEPROM_NIC_CONF1_INTERNAL_TX_ALC       FIELD16(0x2000)
 #define EEPROM_NIC_CONF1_BT_COEXIST            FIELD16(0x4000)
 #define EEPROM_NIC_CONF1_DAC_TEST              FIELD16(0x8000)
 
@@ -2521,9 +2523,9 @@ enum rt2800_eeprom_word {
  * TX_STREAM: 0: Reserved, 1: 1 Stream, 2: 2 Stream
  * CRYSTAL: 00: Reserved, 01: One crystal, 10: Two crystal, 11: Reserved
  */
-#define EEPROM_NIC_CONF2_RX_STREAM             FIELD16(0x000f)
-#define EEPROM_NIC_CONF2_TX_STREAM             FIELD16(0x00f0)
-#define EEPROM_NIC_CONF2_CRYSTAL               FIELD16(0x0600)
+#define EEPROM_NIC_CONF2_RX_STREAM     FIELD16(0x000f)
+#define EEPROM_NIC_CONF2_TX_STREAM     FIELD16(0x00f0)
+#define EEPROM_NIC_CONF2_CRYSTAL       FIELD16(0x0600)
 
 /*
  * EEPROM LNA
@@ -2790,7 +2792,7 @@ enum rt2800_eeprom_word {
 #define MCU_CURRENT                    0x36
 #define MCU_LED                                0x50
 #define MCU_LED_STRENGTH               0x51
-#define MCU_LED_AG_CONF                0x52
+#define MCU_LED_AG_CONF                        0x52
 #define MCU_LED_ACT_CONF               0x53
 #define MCU_LED_LED_POLARITY           0x54
 #define MCU_RADAR                      0x60
@@ -2799,7 +2801,7 @@ enum rt2800_eeprom_word {
 #define MCU_FREQ_OFFSET                        0x74
 #define MCU_BBP_SIGNAL                 0x80
 #define MCU_POWER_SAVE                 0x83
-#define MCU_BAND_SELECT                0x91
+#define MCU_BAND_SELECT                        0x91
 
 /*
  * MCU mailbox tokens
index 88ce656f96cda3d7ea1f7ae5756827538edfc6d2..c5738f14c4ba21b7a60453ab282309089134bdc2 100644 (file)
@@ -278,12 +278,9 @@ static const unsigned int rt2800_eeprom_map_ext[EEPROM_WORD_COUNT] = {
        [EEPROM_LNA]                    = 0x0026,
        [EEPROM_EXT_LNA2]               = 0x0027,
        [EEPROM_RSSI_BG]                = 0x0028,
-       [EEPROM_TXPOWER_DELTA]          = 0x0028, /* Overlaps with RSSI_BG */
        [EEPROM_RSSI_BG2]               = 0x0029,
-       [EEPROM_TXMIXER_GAIN_BG]        = 0x0029, /* Overlaps with RSSI_BG2 */
        [EEPROM_RSSI_A]                 = 0x002a,
        [EEPROM_RSSI_A2]                = 0x002b,
-       [EEPROM_TXMIXER_GAIN_A]         = 0x002b, /* Overlaps with RSSI_A2 */
        [EEPROM_TXPOWER_BG1]            = 0x0030,
        [EEPROM_TXPOWER_BG2]            = 0x0037,
        [EEPROM_EXT_TXPOWER_BG3]        = 0x003e,
@@ -1783,7 +1780,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
        rt2800_bbp_read(rt2x00dev, 3, &r3);
 
        if (rt2x00_rt(rt2x00dev, RT3572) &&
-           test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+           rt2x00_has_cap_bt_coexist(rt2x00dev))
                rt2800_config_3572bt_ant(rt2x00dev);
 
        /*
@@ -1795,7 +1792,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
                break;
        case 2:
                if (rt2x00_rt(rt2x00dev, RT3572) &&
-                   test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+                   rt2x00_has_cap_bt_coexist(rt2x00dev))
                        rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 1);
                else
                        rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
@@ -1825,7 +1822,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
                break;
        case 2:
                if (rt2x00_rt(rt2x00dev, RT3572) &&
-                   test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+                   rt2x00_has_cap_bt_coexist(rt2x00dev)) {
                        rt2x00_set_field8(&r3, BBP3_RX_ADC, 1);
                        rt2x00_set_field8(&r3, BBP3_RX_ANTENNA,
                                rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
@@ -2029,13 +2026,6 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
                          rt2x00dev->default_ant.tx_chain_num <= 2);
        rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
 
-       rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
-       rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
-       rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
-       msleep(1);
-       rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
-       rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
-
        rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
        rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
        rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
@@ -2141,7 +2131,7 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
        rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
        rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
        rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
-       if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+       if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
                if (rf->channel <= 14) {
                        rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
                        rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
@@ -2674,7 +2664,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
        if (rf->channel <= 14) {
                int idx = rf->channel-1;
 
-               if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+               if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
                        if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) {
                                /* r55/r59 value array of channel 1~14 */
                                static const char r55_bt_rev[] = {0x83, 0x83,
@@ -3152,6 +3142,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        case RF3322:
                rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info);
                break;
+       case RF3070:
        case RF5360:
        case RF5370:
        case RF5372:
@@ -3166,7 +3157,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
                rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
        }
 
-       if (rt2x00_rf(rt2x00dev, RF3290) ||
+       if (rt2x00_rf(rt2x00dev, RF3070) ||
+           rt2x00_rf(rt2x00dev, RF3290) ||
            rt2x00_rf(rt2x00dev, RF3322) ||
            rt2x00_rf(rt2x00dev, RF5360) ||
            rt2x00_rf(rt2x00dev, RF5370) ||
@@ -3218,8 +3210,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        if (rf->channel <= 14) {
                if (!rt2x00_rt(rt2x00dev, RT5390) &&
                    !rt2x00_rt(rt2x00dev, RT5392)) {
-                       if (test_bit(CAPABILITY_EXTERNAL_LNA_BG,
-                                    &rt2x00dev->cap_flags)) {
+                       if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
                                rt2800_bbp_write(rt2x00dev, 82, 0x62);
                                rt2800_bbp_write(rt2x00dev, 75, 0x46);
                        } else {
@@ -3244,7 +3235,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
                if (rt2x00_rt(rt2x00dev, RT3593))
                        rt2800_bbp_write(rt2x00dev, 83, 0x9a);
 
-               if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
+               if (rt2x00_has_cap_external_lna_a(rt2x00dev))
                        rt2800_bbp_write(rt2x00dev, 75, 0x46);
                else
                        rt2800_bbp_write(rt2x00dev, 75, 0x50);
@@ -3280,7 +3271,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
                /* Turn on primary PAs */
                rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN,
                                   rf->channel > 14);
-               if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags))
+               if (rt2x00_has_cap_bt_coexist(rt2x00dev))
                        rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1);
                else
                        rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN,
@@ -3311,33 +3302,50 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
 
        rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
 
-       if (rt2x00_rt(rt2x00dev, RT3572))
+       if (rt2x00_rt(rt2x00dev, RT3572)) {
                rt2800_rfcsr_write(rt2x00dev, 8, 0x80);
 
+               /* AGC init */
+               if (rf->channel <= 14)
+                       reg = 0x1c + (2 * rt2x00dev->lna_gain);
+               else
+                       reg = 0x22 + ((rt2x00dev->lna_gain * 5) / 3);
+
+               rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
+       }
+
        if (rt2x00_rt(rt2x00dev, RT3593)) {
-               if (rt2x00_is_usb(rt2x00dev)) {
-                       rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+               rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
 
-                       /* Band selection. GPIO #8 controls all paths */
+               /* Band selection */
+               if (rt2x00_is_usb(rt2x00dev) ||
+                   rt2x00_is_pcie(rt2x00dev)) {
+                       /* GPIO #8 controls all paths */
                        rt2x00_set_field32(&reg, GPIO_CTRL_DIR8, 0);
                        if (rf->channel <= 14)
                                rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 1);
                        else
                                rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 0);
+               }
 
+               /* LNA PE control. */
+               if (rt2x00_is_usb(rt2x00dev)) {
+                       /* GPIO #4 controls PE0 and PE1,
+                        * GPIO #7 controls PE2
+                        */
                        rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0);
                        rt2x00_set_field32(&reg, GPIO_CTRL_DIR7, 0);
 
-                       /* LNA PE control.
-                       * GPIO #4 controls PE0 and PE1,
-                       * GPIO #7 controls PE2
-                       */
                        rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1);
                        rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 1);
-
-                       rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
+               } else if (rt2x00_is_pcie(rt2x00dev)) {
+                       /* GPIO #4 controls PE0, PE1 and PE2 */
+                       rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0);
+                       rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1);
                }
 
+               rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
+
                /* AGC init */
                if (rf->channel <= 14)
                        reg = 0x1c + 2 * rt2x00dev->lna_gain;
@@ -3565,7 +3573,7 @@ static int rt2800_get_txpower_reg_delta(struct rt2x00_dev *rt2x00dev,
 {
        int delta;
 
-       if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags))
+       if (rt2x00_has_cap_power_limit(rt2x00dev))
                return 0;
 
        /*
@@ -3594,7 +3602,7 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
        if (rt2x00_rt(rt2x00dev, RT3593))
                return min_t(u8, txpower, 0xc);
 
-       if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags)) {
+       if (rt2x00_has_cap_power_limit(rt2x00dev)) {
                /*
                 * Check if eirp txpower exceed txpower_limit.
                 * We use OFDM 6M as criterion and its eirp txpower
@@ -4264,6 +4272,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
                rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
                break;
        case RF3053:
+       case RF3070:
        case RF3290:
        case RF5360:
        case RF5370:
@@ -4405,6 +4414,7 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
                    rt2x00_rt(rt2x00dev, RT3290) ||
                    rt2x00_rt(rt2x00dev, RT3390) ||
                    rt2x00_rt(rt2x00dev, RT3572) ||
+                   rt2x00_rt(rt2x00dev, RT3593) ||
                    rt2x00_rt(rt2x00dev, RT5390) ||
                    rt2x00_rt(rt2x00dev, RT5392) ||
                    rt2x00_rt(rt2x00dev, RT5592))
@@ -4412,8 +4422,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
                else
                        vgc = 0x2e + rt2x00dev->lna_gain;
        } else { /* 5GHZ band */
-               if (rt2x00_rt(rt2x00dev, RT3572))
-                       vgc = 0x22 + (rt2x00dev->lna_gain * 5) / 3;
+               if (rt2x00_rt(rt2x00dev, RT3593))
+                       vgc = 0x20 + (rt2x00dev->lna_gain * 5) / 3;
                else if (rt2x00_rt(rt2x00dev, RT5592))
                        vgc = 0x24 + (2 * rt2x00dev->lna_gain);
                else {
@@ -4431,11 +4441,17 @@ static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
                                  struct link_qual *qual, u8 vgc_level)
 {
        if (qual->vgc_level != vgc_level) {
-               if (rt2x00_rt(rt2x00dev, RT5592)) {
+               if (rt2x00_rt(rt2x00dev, RT3572) ||
+                   rt2x00_rt(rt2x00dev, RT3593)) {
+                       rt2800_bbp_write_with_rx_chain(rt2x00dev, 66,
+                                                      vgc_level);
+               } else if (rt2x00_rt(rt2x00dev, RT5592)) {
                        rt2800_bbp_write(rt2x00dev, 83, qual->rssi > -65 ? 0x4a : 0x7a);
                        rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, vgc_level);
-               } else
+               } else {
                        rt2800_bbp_write(rt2x00dev, 66, vgc_level);
+               }
+
                qual->vgc_level = vgc_level;
                qual->vgc_level_reg = vgc_level;
        }
@@ -4454,17 +4470,35 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
 
        if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C))
                return;
-       /*
-        * When RSSI is better then -80 increase VGC level with 0x10, except
-        * for rt5592 chip.
+
+       /* When RSSI is better than a certain threshold, increase VGC
+        * with a chip specific value in order to improve the balance
+        * between sensibility and noise isolation.
         */
 
        vgc = rt2800_get_default_vgc(rt2x00dev);
 
-       if (rt2x00_rt(rt2x00dev, RT5592) && qual->rssi > -65)
-               vgc += 0x20;
-       else if (qual->rssi > -80)
-               vgc += 0x10;
+       switch (rt2x00dev->chip.rt) {
+       case RT3572:
+       case RT3593:
+               if (qual->rssi > -65) {
+                       if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ)
+                               vgc += 0x20;
+                       else
+                               vgc += 0x10;
+               }
+               break;
+
+       case RT5592:
+               if (qual->rssi > -65)
+                       vgc += 0x20;
+               break;
+
+       default:
+               if (qual->rssi > -80)
+                       vgc += 0x10;
+               break;
+       }
 
        rt2800_set_vgc(rt2x00dev, qual, vgc);
 }
@@ -5489,7 +5523,7 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
        ant = (div_mode == 3) ? 1 : 0;
 
        /* check if this is a Bluetooth combo card */
-       if (test_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags)) {
+       if (rt2x00_has_cap_bt_coexist(rt2x00dev)) {
                u32 reg;
 
                rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
@@ -5798,7 +5832,7 @@ static void rt2800_normal_mode_setup_3xxx(struct rt2x00_dev *rt2x00dev)
            rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
            rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
            rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
-               if (!test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
+               if (!rt2x00_has_cap_external_lna_bg(rt2x00dev))
                        rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
        }
 
@@ -5985,7 +6019,7 @@ static void rt2800_init_rfcsr_30xx(struct rt2x00_dev *rt2x00dev)
        rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
        rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
        rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
-       rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
+       rt2800_rfcsr_write(rt2x00dev, 25, 0x03);
        rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
 
        if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@@ -6441,7 +6475,7 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
        rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
        rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
 
-       rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
        rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
        rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
        rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
@@ -6479,7 +6513,7 @@ static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
        rt2800_rfcsr_write(rt2x00dev, 56, 0x22);
        rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
        rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
-       rt2800_rfcsr_write(rt2x00dev, 59, 0x63);
+       rt2800_rfcsr_write(rt2x00dev, 59, 0x8f);
 
        rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
        if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F))
@@ -6499,7 +6533,6 @@ static void rt2800_init_rfcsr_5392(struct rt2x00_dev *rt2x00dev)
        rt2800_rf_init_calibration(rt2x00dev, 2);
 
        rt2800_rfcsr_write(rt2x00dev, 1, 0x17);
-       rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
        rt2800_rfcsr_write(rt2x00dev, 3, 0x88);
        rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
        rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
@@ -6653,17 +6686,20 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
        u16 word;
 
        /*
-        * Initialize all registers.
+        * Initialize MAC registers.
         */
        if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
                     rt2800_init_registers(rt2x00dev)))
                return -EIO;
 
+       /*
+        * Wait BBP/RF to wake up.
+        */
        if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev)))
                return -EIO;
 
        /*
-        * Send signal to firmware during boot time.
+        * Send signal during boot time to initialize firmware.
         */
        rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
        rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
@@ -6672,9 +6708,15 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
        rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
        msleep(1);
 
+       /*
+        * Make sure BBP is up and running.
+        */
        if (unlikely(rt2800_wait_bbp_ready(rt2x00dev)))
                return -EIO;
 
+       /*
+        * Initialize BBP/RF registers.
+        */
        rt2800_init_bbp(rt2x00dev);
        rt2800_init_rfcsr(rt2x00dev);
 
@@ -7021,6 +7063,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        case RF3022:
        case RF3052:
        case RF3053:
+       case RF3070:
        case RF3290:
        case RF3320:
        case RF3322:
@@ -7203,7 +7246,7 @@ static const struct rf_channel rf_vals[] = {
 
 /*
  * RF value list for rt3xxx
- * Supports: 2.4 GHz (all) & 5.2 GHz (RF3052)
+ * Supports: 2.4 GHz (all) & 5.2 GHz (RF3052 & RF3053)
  */
 static const struct rf_channel rf_vals_3x[] = {
        {1,  241, 2, 2 },
@@ -7399,72 +7442,6 @@ static const struct rf_channel rf_vals_5592_xtal40[] = {
        {196, 83, 0, 12, 1},
 };
 
-static const struct rf_channel rf_vals_3053[] = {
-       /* Channel, N, R, K */
-       {1, 241, 2, 2},
-       {2, 241, 2, 7},
-       {3, 242, 2, 2},
-       {4, 242, 2, 7},
-       {5, 243, 2, 2},
-       {6, 243, 2, 7},
-       {7, 244, 2, 2},
-       {8, 244, 2, 7},
-       {9, 245, 2, 2},
-       {10, 245, 2, 7},
-       {11, 246, 2, 2},
-       {12, 246, 2, 7},
-       {13, 247, 2, 2},
-       {14, 248, 2, 4},
-
-       {36, 0x56, 0, 4},
-       {38, 0x56, 0, 6},
-       {40, 0x56, 0, 8},
-       {44, 0x57, 0, 0},
-       {46, 0x57, 0, 2},
-       {48, 0x57, 0, 4},
-       {52, 0x57, 0, 8},
-       {54, 0x57, 0, 10},
-       {56, 0x58, 0, 0},
-       {60, 0x58, 0, 4},
-       {62, 0x58, 0, 6},
-       {64, 0x58, 0, 8},
-
-       {100, 0x5B, 0, 8},
-       {102, 0x5B, 0, 10},
-       {104, 0x5C, 0, 0},
-       {108, 0x5C, 0, 4},
-       {110, 0x5C, 0, 6},
-       {112, 0x5C, 0, 8},
-
-       /* NOTE: Channel 114 has been removed intentionally.
-        * The EEPROM contains no TX power values for that,
-        * and it is disabled in the vendor driver as well.
-        */
-
-       {116, 0x5D, 0, 0},
-       {118, 0x5D, 0, 2},
-       {120, 0x5D, 0, 4},
-       {124, 0x5D, 0, 8},
-       {126, 0x5D, 0, 10},
-       {128, 0x5E, 0, 0},
-       {132, 0x5E, 0, 4},
-       {134, 0x5E, 0, 6},
-       {136, 0x5E, 0, 8},
-       {140, 0x5F, 0, 0},
-
-       {149, 0x5F, 0, 9},
-       {151, 0x5F, 0, 11},
-       {153, 0x60, 0, 1},
-       {157, 0x60, 0, 5},
-       {159, 0x60, 0, 7},
-       {161, 0x60, 0, 9},
-       {165, 0x61, 0, 1},
-       {167, 0x61, 0, 3},
-       {169, 0x61, 0, 5},
-       {171, 0x61, 0, 7},
-       {173, 0x61, 0, 9},
-};
-
 static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 {
        struct hw_mode_spec *spec = &rt2x00dev->spec;
@@ -7473,7 +7450,6 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
        char *default_power2;
        char *default_power3;
        unsigned int i;
-       u16 eeprom;
        u32 reg;
 
        /*
@@ -7522,48 +7498,48 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
        rt2x00dev->hw->max_report_rates = 7;
        rt2x00dev->hw->max_rate_tries = 1;
 
-       rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
-
        /*
         * Initialize hw_mode information.
         */
-       spec->supported_bands = SUPPORT_BAND_2GHZ;
        spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
 
-       if (rt2x00_rf(rt2x00dev, RF2820) ||
-           rt2x00_rf(rt2x00dev, RF2720)) {
+       switch (rt2x00dev->chip.rf) {
+       case RF2720:
+       case RF2820:
                spec->num_channels = 14;
                spec->channels = rf_vals;
-       } else if (rt2x00_rf(rt2x00dev, RF2850) ||
-                  rt2x00_rf(rt2x00dev, RF2750)) {
-               spec->supported_bands |= SUPPORT_BAND_5GHZ;
+               break;
+
+       case RF2750:
+       case RF2850:
                spec->num_channels = ARRAY_SIZE(rf_vals);
                spec->channels = rf_vals;
-       } else if (rt2x00_rf(rt2x00dev, RF3020) ||
-                  rt2x00_rf(rt2x00dev, RF2020) ||
-                  rt2x00_rf(rt2x00dev, RF3021) ||
-                  rt2x00_rf(rt2x00dev, RF3022) ||
-                  rt2x00_rf(rt2x00dev, RF3290) ||
-                  rt2x00_rf(rt2x00dev, RF3320) ||
-                  rt2x00_rf(rt2x00dev, RF3322) ||
-                  rt2x00_rf(rt2x00dev, RF5360) ||
-                  rt2x00_rf(rt2x00dev, RF5370) ||
-                  rt2x00_rf(rt2x00dev, RF5372) ||
-                  rt2x00_rf(rt2x00dev, RF5390) ||
-                  rt2x00_rf(rt2x00dev, RF5392)) {
+               break;
+
+       case RF2020:
+       case RF3020:
+       case RF3021:
+       case RF3022:
+       case RF3070:
+       case RF3290:
+       case RF3320:
+       case RF3322:
+       case RF5360:
+       case RF5370:
+       case RF5372:
+       case RF5390:
+       case RF5392:
                spec->num_channels = 14;
                spec->channels = rf_vals_3x;
-       } else if (rt2x00_rf(rt2x00dev, RF3052)) {
-               spec->supported_bands |= SUPPORT_BAND_5GHZ;
+               break;
+
+       case RF3052:
+       case RF3053:
                spec->num_channels = ARRAY_SIZE(rf_vals_3x);
                spec->channels = rf_vals_3x;
-       } else if (rt2x00_rf(rt2x00dev, RF3053)) {
-               spec->supported_bands |= SUPPORT_BAND_5GHZ;
-               spec->num_channels = ARRAY_SIZE(rf_vals_3053);
-               spec->channels = rf_vals_3053;
-       } else if (rt2x00_rf(rt2x00dev, RF5592)) {
-               spec->supported_bands |= SUPPORT_BAND_5GHZ;
+               break;
 
+       case RF5592:
                rt2800_register_read(rt2x00dev, MAC_DEBUG_INDEX, &reg);
                if (rt2x00_get_field32(reg, MAC_DEBUG_INDEX_XTAL)) {
                        spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal40);
@@ -7572,11 +7548,16 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
                        spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal20);
                        spec->channels = rf_vals_5592_xtal20;
                }
+               break;
        }
 
        if (WARN_ON_ONCE(!spec->channels))
                return -ENODEV;
 
+       spec->supported_bands = SUPPORT_BAND_2GHZ;
+       if (spec->num_channels > 14)
+               spec->supported_bands |= SUPPORT_BAND_5GHZ;
+
        /*
         * Initialize HT information.
         */
@@ -7591,22 +7572,21 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
            IEEE80211_HT_CAP_SGI_20 |
            IEEE80211_HT_CAP_SGI_40;
 
-       if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) >= 2)
+       if (rt2x00dev->default_ant.tx_chain_num >= 2)
                spec->ht.cap |= IEEE80211_HT_CAP_TX_STBC;
 
-       spec->ht.cap |=
-           rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) <<
-               IEEE80211_HT_CAP_RX_STBC_SHIFT;
+       spec->ht.cap |= rt2x00dev->default_ant.rx_chain_num <<
+                       IEEE80211_HT_CAP_RX_STBC_SHIFT;
 
        spec->ht.ampdu_factor = 3;
        spec->ht.ampdu_density = 4;
        spec->ht.mcs.tx_params =
            IEEE80211_HT_MCS_TX_DEFINED |
            IEEE80211_HT_MCS_TX_RX_DIFF |
-           ((rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) - 1) <<
-               IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+           ((rt2x00dev->default_ant.tx_chain_num - 1) <<
+            IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
 
-       switch (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH)) {
+       switch (rt2x00dev->default_ant.rx_chain_num) {
        case 3:
                spec->ht.mcs.rx_mask[2] = 0xff;
        case 2:
@@ -7671,6 +7651,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
        case RF3320:
        case RF3052:
        case RF3053:
+       case RF3070:
        case RF3290:
        case RF5360:
        case RF5370:
diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.c b/drivers/net/wireless/rt2x00/rt2800mmio.c
new file mode 100644 (file)
index 0000000..ae15228
--- /dev/null
@@ -0,0 +1,873 @@
+/*     Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
+ *     Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
+ *     Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+ *     Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
+ *     Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+ *     Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
+ *     Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+ *     Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
+ *     <http://rt2x00.serialmonkey.com>
+ *
+ *     This program is free software; you can redistribute it and/or modify
+ *     it under the terms of the GNU General Public License as published by
+ *     the Free Software Foundation; either version 2 of the License, or
+ *     (at your option) any later version.
+ *
+ *     This program is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public License
+ *     along with this program; if not, write to the
+ *     Free Software Foundation, Inc.,
+ *     59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*     Module: rt2800mmio
+ *     Abstract: rt2800 MMIO device routines.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/export.h>
+
+#include "rt2x00.h"
+#include "rt2x00mmio.h"
+#include "rt2800.h"
+#include "rt2800lib.h"
+#include "rt2800mmio.h"
+
+/*
+ * TX descriptor initialization
+ */
+__le32 *rt2800mmio_get_txwi(struct queue_entry *entry)
+{
+       return (__le32 *) entry->skb->data;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_get_txwi);
+
+void rt2800mmio_write_tx_desc(struct queue_entry *entry,
+                             struct txentry_desc *txdesc)
+{
+       struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+       struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
+       __le32 *txd = entry_priv->desc;
+       u32 word;
+       const unsigned int txwi_size = entry->queue->winfo_size;
+
+       /*
+        * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
+        * must contains a TXWI structure + 802.11 header + padding + 802.11
+        * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
+        * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
+        * data. It means that LAST_SEC0 is always 0.
+        */
+
+       /*
+        * Initialize TX descriptor
+        */
+       word = 0;
+       rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
+       rt2x00_desc_write(txd, 0, word);
+
+       word = 0;
+       rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
+       rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
+                          !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
+       rt2x00_set_field32(&word, TXD_W1_BURST,
+                          test_bit(ENTRY_TXD_BURST, &txdesc->flags));
+       rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
+       rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
+       rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
+       rt2x00_desc_write(txd, 1, word);
+
+       word = 0;
+       rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
+                          skbdesc->skb_dma + txwi_size);
+       rt2x00_desc_write(txd, 2, word);
+
+       word = 0;
+       rt2x00_set_field32(&word, TXD_W3_WIV,
+                          !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
+       rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
+       rt2x00_desc_write(txd, 3, word);
+
+       /*
+        * Register descriptor details in skb frame descriptor.
+        */
+       skbdesc->desc = txd;
+       skbdesc->desc_len = TXD_DESC_SIZE;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_write_tx_desc);
+
+/*
+ * RX control handlers
+ */
+void rt2800mmio_fill_rxdone(struct queue_entry *entry,
+                           struct rxdone_entry_desc *rxdesc)
+{
+       struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
+       __le32 *rxd = entry_priv->desc;
+       u32 word;
+
+       rt2x00_desc_read(rxd, 3, &word);
+
+       if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
+               rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
+
+       /*
+        * Unfortunately we don't know the cipher type used during
+        * decryption. This prevents us from correct providing
+        * correct statistics through debugfs.
+        */
+       rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
+
+       if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
+               /*
+                * Hardware has stripped IV/EIV data from 802.11 frame during
+                * decryption. Unfortunately the descriptor doesn't contain
+                * any fields with the EIV/IV data either, so they can't
+                * be restored by rt2x00lib.
+                */
+               rxdesc->flags |= RX_FLAG_IV_STRIPPED;
+
+               /*
+                * The hardware has already checked the Michael Mic and has
+                * stripped it from the frame. Signal this to mac80211.
+                */
+               rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
+
+               if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
+                       rxdesc->flags |= RX_FLAG_DECRYPTED;
+               else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
+                       rxdesc->flags |= RX_FLAG_MMIC_ERROR;
+       }
+
+       if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
+               rxdesc->dev_flags |= RXDONE_MY_BSS;
+
+       if (rt2x00_get_field32(word, RXD_W3_L2PAD))
+               rxdesc->dev_flags |= RXDONE_L2PAD;
+
+       /*
+        * Process the RXWI structure that is at the start of the buffer.
+        */
+       rt2800_process_rxwi(entry, rxdesc);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_fill_rxdone);
+
+/*
+ * Interrupt functions.
+ */
+static void rt2800mmio_wakeup(struct rt2x00_dev *rt2x00dev)
+{
+       struct ieee80211_conf conf = { .flags = 0 };
+       struct rt2x00lib_conf libconf = { .conf = &conf };
+
+       rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
+}
+
+static bool rt2800mmio_txdone_entry_check(struct queue_entry *entry, u32 status)
+{
+       __le32 *txwi;
+       u32 word;
+       int wcid, tx_wcid;
+
+       wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
+
+       txwi = rt2800_drv_get_txwi(entry);
+       rt2x00_desc_read(txwi, 1, &word);
+       tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
+
+       return (tx_wcid == wcid);
+}
+
+static bool rt2800mmio_txdone_find_entry(struct queue_entry *entry, void *data)
+{
+       u32 status = *(u32 *)data;
+
+       /*
+        * rt2800pci hardware might reorder frames when exchanging traffic
+        * with multiple BA enabled STAs.
+        *
+        * For example, a tx queue
+        *    [ STA1 | STA2 | STA1 | STA2 ]
+        * can result in tx status reports
+        *    [ STA1 | STA1 | STA2 | STA2 ]
+        * when the hw decides to aggregate the frames for STA1 into one AMPDU.
+        *
+        * To mitigate this effect, associate the tx status to the first frame
+        * in the tx queue with a matching wcid.
+        */
+       if (rt2800mmio_txdone_entry_check(entry, status) &&
+           !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
+               /*
+                * Got a matching frame, associate the tx status with
+                * the frame
+                */
+               entry->status = status;
+               set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
+               return true;
+       }
+
+       /* Check the next frame */
+       return false;
+}
+
+static bool rt2800mmio_txdone_match_first(struct queue_entry *entry, void *data)
+{
+       u32 status = *(u32 *)data;
+
+       /*
+        * Find the first frame without tx status and assign this status to it
+        * regardless if it matches or not.
+        */
+       if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
+               /*
+                * Got a matching frame, associate the tx status with
+                * the frame
+                */
+               entry->status = status;
+               set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
+               return true;
+       }
+
+       /* Check the next frame */
+       return false;
+}
+static bool rt2800mmio_txdone_release_entries(struct queue_entry *entry,
+                                             void *data)
+{
+       if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
+               rt2800_txdone_entry(entry, entry->status,
+                                   rt2800mmio_get_txwi(entry));
+               return false;
+       }
+
+       /* No more frames to release */
+       return true;
+}
+
+static bool rt2800mmio_txdone(struct rt2x00_dev *rt2x00dev)
+{
+       struct data_queue *queue;
+       u32 status;
+       u8 qid;
+       int max_tx_done = 16;
+
+       while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
+               qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
+               if (unlikely(qid >= QID_RX)) {
+                       /*
+                        * Unknown queue, this shouldn't happen. Just drop
+                        * this tx status.
+                        */
+                       rt2x00_warn(rt2x00dev, "Got TX status report with unexpected pid %u, dropping\n",
+                                   qid);
+                       break;
+               }
+
+               queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
+               if (unlikely(queue == NULL)) {
+                       /*
+                        * The queue is NULL, this shouldn't happen. Stop
+                        * processing here and drop the tx status
+                        */
+                       rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n",
+                                   qid);
+                       break;
+               }
+
+               if (unlikely(rt2x00queue_empty(queue))) {
+                       /*
+                        * The queue is empty. Stop processing here
+                        * and drop the tx status.
+                        */
+                       rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
+                                   qid);
+                       break;
+               }
+
+               /*
+                * Let's associate this tx status with the first
+                * matching frame.
+                */
+               if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
+                                               Q_INDEX, &status,
+                                               rt2800mmio_txdone_find_entry)) {
+                       /*
+                        * We cannot match the tx status to any frame, so just
+                        * use the first one.
+                        */
+                       if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
+                                                       Q_INDEX, &status,
+                                                       rt2800mmio_txdone_match_first)) {
+                               rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n",
+                                           qid);
+                               break;
+                       }
+               }
+
+               /*
+                * Release all frames with a valid tx status.
+                */
+               rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
+                                          Q_INDEX, NULL,
+                                          rt2800mmio_txdone_release_entries);
+
+               if (--max_tx_done == 0)
+                       break;
+       }
+
+       return !max_tx_done;
+}
+
+static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev,
+                                              struct rt2x00_field32 irq_field)
+{
+       u32 reg;
+
+       /*
+        * Enable a single interrupt. The interrupt mask register
+        * access needs locking.
+        */
+       spin_lock_irq(&rt2x00dev->irqmask_lock);
+       rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+       rt2x00_set_field32(&reg, irq_field, 1);
+       rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
+       spin_unlock_irq(&rt2x00dev->irqmask_lock);
+}
+
+void rt2800mmio_txstatus_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       if (rt2800mmio_txdone(rt2x00dev))
+               tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+
+       /*
+        * No need to enable the tx status interrupt here as we always
+        * leave it enabled to minimize the possibility of a tx status
+        * register overflow. See comment in interrupt handler.
+        */
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet);
+
+void rt2800mmio_pretbtt_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2x00lib_pretbtt(rt2x00dev);
+       if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+               rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_pretbtt_tasklet);
+
+void rt2800mmio_tbtt_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+       u32 reg;
+
+       rt2x00lib_beacondone(rt2x00dev);
+
+       if (rt2x00dev->intf_ap_count) {
+               /*
+                * The rt2800pci hardware tbtt timer is off by 1us per tbtt
+                * causing beacon skew and as a result causing problems with
+                * some powersaving clients over time. Shorten the beacon
+                * interval every 64 beacons by 64us to mitigate this effect.
+                */
+               if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
+                       rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+                       rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
+                                          (rt2x00dev->beacon_int * 16) - 1);
+                       rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+               } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
+                       rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+                       rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
+                                          (rt2x00dev->beacon_int * 16));
+                       rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+               }
+               drv_data->tbtt_tick++;
+               drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
+       }
+
+       if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+               rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_tbtt_tasklet);
+
+void rt2800mmio_rxdone_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       if (rt2x00mmio_rxdone(rt2x00dev))
+               tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+       else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+               rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_rxdone_tasklet);
+
+void rt2800mmio_autowake_tasklet(unsigned long data)
+{
+       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+       rt2800mmio_wakeup(rt2x00dev);
+       if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+               rt2800mmio_enable_interrupt(rt2x00dev,
+                                           INT_MASK_CSR_AUTO_WAKEUP);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet);
+
+static void rt2800mmio_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
+{
+       u32 status;
+       int i;
+
+       /*
+        * The TX_FIFO_STATUS interrupt needs special care. We should
+        * read TX_STA_FIFO but we should do it immediately as otherwise
+        * the register can overflow and we would lose status reports.
+        *
+        * Hence, read the TX_STA_FIFO register and copy all tx status
+        * reports into a kernel FIFO which is handled in the txstatus
+        * tasklet. We use a tasklet to process the tx status reports
+        * because we can schedule the tasklet multiple times (when the
+        * interrupt fires again during tx status processing).
+        *
+        * Furthermore we don't disable the TX_FIFO_STATUS
+        * interrupt here but leave it enabled so that the TX_STA_FIFO
+        * can also be read while the tx status tasklet gets executed.
+        *
+        * Since we have only one producer and one consumer we don't
+        * need to lock the kfifo.
+        */
+       for (i = 0; i < rt2x00dev->tx->limit; i++) {
+               rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
+
+               if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
+                       break;
+
+               if (!kfifo_put(&rt2x00dev->txstatus_fifo, &status)) {
+                       rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
+                       break;
+               }
+       }
+
+       /* Schedule the tasklet for processing the tx status. */
+       tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+}
+
+irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance)
+{
+       struct rt2x00_dev *rt2x00dev = dev_instance;
+       u32 reg, mask;
+
+       /* Read status and ACK all interrupts */
+       rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
+       rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+
+       if (!reg)
+               return IRQ_NONE;
+
+       if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+               return IRQ_HANDLED;
+
+       /*
+        * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
+        * for interrupts and interrupt masks we can just use the value of
+        * INT_SOURCE_CSR to create the interrupt mask.
+        */
+       mask = ~reg;
+
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
+               rt2800mmio_txstatus_interrupt(rt2x00dev);
+               /*
+                * Never disable the TX_FIFO_STATUS interrupt.
+                */
+               rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+       }
+
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
+               tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
+
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
+               tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
+
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
+               tasklet_schedule(&rt2x00dev->rxdone_tasklet);
+
+       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
+               tasklet_schedule(&rt2x00dev->autowake_tasklet);
+
+       /*
+        * Disable all interrupts for which a tasklet was scheduled right now,
+        * the tasklet will reenable the appropriate interrupts.
+        */
+       spin_lock(&rt2x00dev->irqmask_lock);
+       rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
+       reg &= mask;
+       rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
+       spin_unlock(&rt2x00dev->irqmask_lock);
+
+       return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_interrupt);
+
+void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
+                          enum dev_state state)
+{
+       u32 reg;
+       unsigned long flags;
+
+       /*
+        * When interrupts are being enabled, the interrupt registers
+        * should clear the register to assure a clean state.
+        */
+       if (state == STATE_RADIO_IRQ_ON) {
+               rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
+               rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
+       }
+
+       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+       reg = 0;
+       if (state == STATE_RADIO_IRQ_ON) {
+               rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
+               rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
+               rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
+               rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+               rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
+       }
+       rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
+       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
+
+       if (state == STATE_RADIO_IRQ_OFF) {
+               /*
+                * Wait for possibly running tasklets to finish.
+                */
+               tasklet_kill(&rt2x00dev->txstatus_tasklet);
+               tasklet_kill(&rt2x00dev->rxdone_tasklet);
+               tasklet_kill(&rt2x00dev->autowake_tasklet);
+               tasklet_kill(&rt2x00dev->tbtt_tasklet);
+               tasklet_kill(&rt2x00dev->pretbtt_tasklet);
+       }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_toggle_irq);
+
+/*
+ * Queue handlers.
+ */
+void rt2800mmio_start_queue(struct data_queue *queue)
+{
+       struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+       u32 reg;
+
+       switch (queue->qid) {
+       case QID_RX:
+               rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+               rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
+               rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+               break;
+       case QID_BEACON:
+               rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+               rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
+               rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
+               rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
+               rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+               rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
+               rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
+               rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
+               break;
+       default:
+               break;
+       }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_start_queue);
+
+void rt2800mmio_kick_queue(struct data_queue *queue)
+{
+       struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+       struct queue_entry *entry;
+
+       switch (queue->qid) {
+       case QID_AC_VO:
+       case QID_AC_VI:
+       case QID_AC_BE:
+       case QID_AC_BK:
+               entry = rt2x00queue_get_entry(queue, Q_INDEX);
+               rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
+                                         entry->entry_idx);
+               break;
+       case QID_MGMT:
+               entry = rt2x00queue_get_entry(queue, Q_INDEX);
+               rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5),
+                                         entry->entry_idx);
+               break;
+       default:
+               break;
+       }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue);
+
+void rt2800mmio_stop_queue(struct data_queue *queue)
+{
+       struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+       u32 reg;
+
+       switch (queue->qid) {
+       case QID_RX:
+               rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
+               rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
+               rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+               break;
+       case QID_BEACON:
+               rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
+               rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
+               rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
+               rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
+               rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
+
+               rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
+               rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
+               rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
+
+               /*
+                * Wait for current invocation to finish. The tasklet
+                * won't be scheduled anymore afterwards since we disabled
+                * the TBTT and PRE TBTT timer.
+                */
+               tasklet_kill(&rt2x00dev->tbtt_tasklet);
+               tasklet_kill(&rt2x00dev->pretbtt_tasklet);
+
+               break;
+       default:
+               break;
+       }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_stop_queue);
+
+void rt2800mmio_queue_init(struct data_queue *queue)
+{
+       struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+       unsigned short txwi_size, rxwi_size;
+
+       rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
+
+       switch (queue->qid) {
+       case QID_RX:
+               queue->limit = 128;
+               queue->data_size = AGGREGATION_SIZE;
+               queue->desc_size = RXD_DESC_SIZE;
+               queue->winfo_size = rxwi_size;
+               queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+               break;
+
+       case QID_AC_VO:
+       case QID_AC_VI:
+       case QID_AC_BE:
+       case QID_AC_BK:
+               queue->limit = 64;
+               queue->data_size = AGGREGATION_SIZE;
+               queue->desc_size = TXD_DESC_SIZE;
+               queue->winfo_size = txwi_size;
+               queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+               break;
+
+       case QID_BEACON:
+               queue->limit = 8;
+               queue->data_size = 0; /* No DMA required for beacons */
+               queue->desc_size = TXD_DESC_SIZE;
+               queue->winfo_size = txwi_size;
+               queue->priv_size = sizeof(struct queue_entry_priv_mmio);
+               break;
+
+       case QID_ATIM:
+               /* fallthrough */
+       default:
+               BUG();
+               break;
+       }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_queue_init);
+
+/*
+ * Initialization functions.
+ */
+bool rt2800mmio_get_entry_state(struct queue_entry *entry)
+{
+       struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
+       u32 word;
+
+       if (entry->queue->qid == QID_RX) {
+               rt2x00_desc_read(entry_priv->desc, 1, &word);
+
+               return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
+       } else {
+               rt2x00_desc_read(entry_priv->desc, 1, &word);
+
+               return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
+       }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_get_entry_state);
+
+void rt2800mmio_clear_entry(struct queue_entry *entry)
+{
+       struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
+       struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
+       struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
+       u32 word;
+
+       if (entry->queue->qid == QID_RX) {
+               rt2x00_desc_read(entry_priv->desc, 0, &word);
+               rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
+               rt2x00_desc_write(entry_priv->desc, 0, word);
+
+               rt2x00_desc_read(entry_priv->desc, 1, &word);
+               rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
+               rt2x00_desc_write(entry_priv->desc, 1, word);
+
+               /*
+                * Set RX IDX in register to inform hardware that we have
+                * handled this entry and it is available for reuse again.
+                */
+               rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
+                                         entry->entry_idx);
+       } else {
+               rt2x00_desc_read(entry_priv->desc, 1, &word);
+               rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
+               rt2x00_desc_write(entry_priv->desc, 1, word);
+       }
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_clear_entry);
+
+int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev)
+{
+       struct queue_entry_priv_mmio *entry_priv;
+
+       /*
+        * Initialize registers.
+        */
+       entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
+       rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0,
+                                 entry_priv->desc_dma);
+       rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0,
+                                 rt2x00dev->tx[0].limit);
+       rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0);
+       rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0);
+
+       entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
+       rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1,
+                                 entry_priv->desc_dma);
+       rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1,
+                                 rt2x00dev->tx[1].limit);
+       rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0);
+       rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0);
+
+       entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
+       rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2,
+                                 entry_priv->desc_dma);
+       rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2,
+                                 rt2x00dev->tx[2].limit);
+       rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0);
+       rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0);
+
+       entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
+       rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3,
+                                 entry_priv->desc_dma);
+       rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3,
+                                 rt2x00dev->tx[3].limit);
+       rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0);
+       rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0);
+
+       rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0);
+       rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0);
+       rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0);
+       rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0);
+
+       rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0);
+       rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0);
+       rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0);
+       rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0);
+
+       entry_priv = rt2x00dev->rx->entries[0].priv_data;
+       rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR,
+                                 entry_priv->desc_dma);
+       rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT,
+                                 rt2x00dev->rx[0].limit);
+       rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
+                                 rt2x00dev->rx[0].limit - 1);
+       rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0);
+
+       rt2800_disable_wpdma(rt2x00dev);
+
+       rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_init_queues);
+
+int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev)
+{
+       u32 reg;
+
+       /*
+        * Reset DMA indexes
+        */
+       rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
+       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
+       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
+       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
+       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
+       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
+       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
+       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
+       rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
+
+       rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
+       rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
+
+       if (rt2x00_is_pcie(rt2x00dev) &&
+           (rt2x00_rt(rt2x00dev, RT3090) ||
+            rt2x00_rt(rt2x00dev, RT3390) ||
+            rt2x00_rt(rt2x00dev, RT3572) ||
+            rt2x00_rt(rt2x00dev, RT3593) ||
+            rt2x00_rt(rt2x00dev, RT5390) ||
+            rt2x00_rt(rt2x00dev, RT5392) ||
+            rt2x00_rt(rt2x00dev, RT5592))) {
+               rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
+               rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
+               rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
+               rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
+       }
+
+       rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
+
+       reg = 0;
+       rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
+       rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
+       rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
+
+       rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_init_registers);
+
+/*
+ * Device state switch handlers.
+ */
+int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev)
+{
+       /* Wait for DMA, ignore error until we initialize queues. */
+       rt2800_wait_wpdma_ready(rt2x00dev);
+
+       if (unlikely(rt2800mmio_init_queues(rt2x00dev)))
+               return -EIO;
+
+       return rt2800_enable_radio(rt2x00dev);
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_enable_radio);
+
+MODULE_AUTHOR(DRV_PROJECT);
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("rt2800 MMIO library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2800mmio.h b/drivers/net/wireless/rt2x00/rt2800mmio.h
new file mode 100644 (file)
index 0000000..6a10de3
--- /dev/null
@@ -0,0 +1,165 @@
+/*     Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
+ *     Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
+ *     Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+ *     Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
+ *     Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+ *     Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
+ *     Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+ *     Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
+ *     <http://rt2x00.serialmonkey.com>
+ *
+ *     This program is free software; you can redistribute it and/or modify
+ *     it under the terms of the GNU General Public License as published by
+ *     the Free Software Foundation; either version 2 of the License, or
+ *     (at your option) any later version.
+ *
+ *     This program is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public License
+ *     along with this program; if not, write to the
+ *     Free Software Foundation, Inc.,
+ *     59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*     Module: rt2800mmio
+ *     Abstract: forward declarations for the rt2800mmio module.
+ */
+
+#ifndef RT2800MMIO_H
+#define RT2800MMIO_H
+
+/*
+ * Queue register offset macros
+ */
+#define TX_QUEUE_REG_OFFSET    0x10
+#define TX_BASE_PTR(__x)       (TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_MAX_CNT(__x)                (TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_CTX_IDX(__x)                (TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
+#define TX_DTX_IDX(__x)                (TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
+
+/*
+ * DMA descriptor defines.
+ */
+#define TXD_DESC_SIZE                  (4 * sizeof(__le32))
+#define RXD_DESC_SIZE                  (4 * sizeof(__le32))
+
+/*
+ * TX descriptor format for TX, PRIO and Beacon Ring.
+ */
+
+/*
+ * Word0
+ */
+#define TXD_W0_SD_PTR0                 FIELD32(0xffffffff)
+
+/*
+ * Word1
+ */
+#define TXD_W1_SD_LEN1                 FIELD32(0x00003fff)
+#define TXD_W1_LAST_SEC1               FIELD32(0x00004000)
+#define TXD_W1_BURST                   FIELD32(0x00008000)
+#define TXD_W1_SD_LEN0                 FIELD32(0x3fff0000)
+#define TXD_W1_LAST_SEC0               FIELD32(0x40000000)
+#define TXD_W1_DMA_DONE                        FIELD32(0x80000000)
+
+/*
+ * Word2
+ */
+#define TXD_W2_SD_PTR1                 FIELD32(0xffffffff)
+
+/*
+ * Word3
+ * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
+ * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
+ *       0:MGMT, 1:HCCA 2:EDCA
+ */
+#define TXD_W3_WIV                     FIELD32(0x01000000)
+#define TXD_W3_QSEL                    FIELD32(0x06000000)
+#define TXD_W3_TCO                     FIELD32(0x20000000)
+#define TXD_W3_UCO                     FIELD32(0x40000000)
+#define TXD_W3_ICO                     FIELD32(0x80000000)
+
+/*
+ * RX descriptor format for RX Ring.
+ */
+
+/*
+ * Word0
+ */
+#define RXD_W0_SDP0                    FIELD32(0xffffffff)
+
+/*
+ * Word1
+ */
+#define RXD_W1_SDL1                    FIELD32(0x00003fff)
+#define RXD_W1_SDL0                    FIELD32(0x3fff0000)
+#define RXD_W1_LS0                     FIELD32(0x40000000)
+#define RXD_W1_DMA_DONE                        FIELD32(0x80000000)
+
+/*
+ * Word2
+ */
+#define RXD_W2_SDP1                    FIELD32(0xffffffff)
+
+/*
+ * Word3
+ * AMSDU: RX with 802.3 header, not 802.11 header.
+ * DECRYPTED: This frame is being decrypted.
+ */
+#define RXD_W3_BA                      FIELD32(0x00000001)
+#define RXD_W3_DATA                    FIELD32(0x00000002)
+#define RXD_W3_NULLDATA                        FIELD32(0x00000004)
+#define RXD_W3_FRAG                    FIELD32(0x00000008)
+#define RXD_W3_UNICAST_TO_ME           FIELD32(0x00000010)
+#define RXD_W3_MULTICAST               FIELD32(0x00000020)
+#define RXD_W3_BROADCAST               FIELD32(0x00000040)
+#define RXD_W3_MY_BSS                  FIELD32(0x00000080)
+#define RXD_W3_CRC_ERROR               FIELD32(0x00000100)
+#define RXD_W3_CIPHER_ERROR            FIELD32(0x00000600)
+#define RXD_W3_AMSDU                   FIELD32(0x00000800)
+#define RXD_W3_HTC                     FIELD32(0x00001000)
+#define RXD_W3_RSSI                    FIELD32(0x00002000)
+#define RXD_W3_L2PAD                   FIELD32(0x00004000)
+#define RXD_W3_AMPDU                   FIELD32(0x00008000)
+#define RXD_W3_DECRYPTED               FIELD32(0x00010000)
+#define RXD_W3_PLCP_SIGNAL             FIELD32(0x00020000)
+#define RXD_W3_PLCP_RSSI               FIELD32(0x00040000)
+
+/* TX descriptor initialization */
+__le32 *rt2800mmio_get_txwi(struct queue_entry *entry);
+void rt2800mmio_write_tx_desc(struct queue_entry *entry,
+                             struct txentry_desc *txdesc);
+
+/* RX control handlers */
+void rt2800mmio_fill_rxdone(struct queue_entry *entry,
+                           struct rxdone_entry_desc *rxdesc);
+
+/* Interrupt functions */
+void rt2800mmio_txstatus_tasklet(unsigned long data);
+void rt2800mmio_pretbtt_tasklet(unsigned long data);
+void rt2800mmio_tbtt_tasklet(unsigned long data);
+void rt2800mmio_rxdone_tasklet(unsigned long data);
+void rt2800mmio_autowake_tasklet(unsigned long data);
+irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance);
+void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
+                          enum dev_state state);
+
+/* Queue handlers */
+void rt2800mmio_start_queue(struct data_queue *queue);
+void rt2800mmio_kick_queue(struct data_queue *queue);
+void rt2800mmio_stop_queue(struct data_queue *queue);
+void rt2800mmio_queue_init(struct data_queue *queue);
+
+/* Initialization functions */
+bool rt2800mmio_get_entry_state(struct queue_entry *entry);
+void rt2800mmio_clear_entry(struct queue_entry *entry);
+int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev);
+int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev);
+
+/* Device state switch handlers. */
+int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev);
+
+#endif /* RT2800MMIO_H */
index f8f2abbfbb6554f1f432c3e7a8b51facb5d9adec..b504455b4fec10c69c4cc09c1f44be4b92ac3be4 100644 (file)
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/pci.h>
-#include <linux/platform_device.h>
 #include <linux/eeprom_93cx6.h>
 
 #include "rt2x00.h"
 #include "rt2x00mmio.h"
 #include "rt2x00pci.h"
-#include "rt2x00soc.h"
 #include "rt2800lib.h"
+#include "rt2800mmio.h"
 #include "rt2800.h"
 #include "rt2800pci.h"
 
@@ -90,27 +89,6 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
        rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
 }
 
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
-static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
-{
-       void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
-
-       if (!base_addr)
-               return -ENOMEM;
-
-       memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
-
-       iounmap(base_addr);
-       return 0;
-}
-#else
-static inline int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
-{
-       return -ENOMEM;
-}
-#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
-
-#ifdef CONFIG_PCI
 static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
 {
        struct rt2x00_dev *rt2x00dev = eeprom->data;
@@ -183,112 +161,6 @@ static inline int rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
 {
        return rt2800_read_eeprom_efuse(rt2x00dev);
 }
-#else
-static inline int rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
-{
-       return -EOPNOTSUPP;
-}
-
-static inline int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev)
-{
-       return 0;
-}
-
-static inline int rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
-{
-       return -EOPNOTSUPP;
-}
-#endif /* CONFIG_PCI */
-
-/*
- * Queue handlers.
- */
-static void rt2800pci_start_queue(struct data_queue *queue)
-{
-       struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
-       u32 reg;
-
-       switch (queue->qid) {
-       case QID_RX:
-               rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
-               rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
-               rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
-               break;
-       case QID_BEACON:
-               rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
-               rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
-               rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
-               rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
-               rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
-               rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
-               rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1);
-               rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
-               break;
-       default:
-               break;
-       }
-}
-
-static void rt2800pci_kick_queue(struct data_queue *queue)
-{
-       struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
-       struct queue_entry *entry;
-
-       switch (queue->qid) {
-       case QID_AC_VO:
-       case QID_AC_VI:
-       case QID_AC_BE:
-       case QID_AC_BK:
-               entry = rt2x00queue_get_entry(queue, Q_INDEX);
-               rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
-                                         entry->entry_idx);
-               break;
-       case QID_MGMT:
-               entry = rt2x00queue_get_entry(queue, Q_INDEX);
-               rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5),
-                                         entry->entry_idx);
-               break;
-       default:
-               break;
-       }
-}
-
-static void rt2800pci_stop_queue(struct data_queue *queue)
-{
-       struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
-       u32 reg;
-
-       switch (queue->qid) {
-       case QID_RX:
-               rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
-               rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
-               rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
-               break;
-       case QID_BEACON:
-               rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
-               rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
-               rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
-               rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
-               rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-
-               rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN, &reg);
-               rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0);
-               rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg);
-
-               /*
-                * Wait for current invocation to finish. The tasklet
-                * won't be scheduled anymore afterwards since we disabled
-                * the TBTT and PRE TBTT timer.
-                */
-               tasklet_kill(&rt2x00dev->tbtt_tasklet);
-               tasklet_kill(&rt2x00dev->pretbtt_tasklet);
-
-               break;
-       default:
-               break;
-       }
-}
 
 /*
  * Firmware functions
@@ -331,218 +203,14 @@ static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev,
        return 0;
 }
 
-/*
- * Initialization functions.
- */
-static bool rt2800pci_get_entry_state(struct queue_entry *entry)
-{
-       struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
-       u32 word;
-
-       if (entry->queue->qid == QID_RX) {
-               rt2x00_desc_read(entry_priv->desc, 1, &word);
-
-               return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
-       } else {
-               rt2x00_desc_read(entry_priv->desc, 1, &word);
-
-               return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
-       }
-}
-
-static void rt2800pci_clear_entry(struct queue_entry *entry)
-{
-       struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
-       struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
-       struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
-       u32 word;
-
-       if (entry->queue->qid == QID_RX) {
-               rt2x00_desc_read(entry_priv->desc, 0, &word);
-               rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
-               rt2x00_desc_write(entry_priv->desc, 0, word);
-
-               rt2x00_desc_read(entry_priv->desc, 1, &word);
-               rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
-               rt2x00_desc_write(entry_priv->desc, 1, word);
-
-               /*
-                * Set RX IDX in register to inform hardware that we have
-                * handled this entry and it is available for reuse again.
-                */
-               rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
-                                         entry->entry_idx);
-       } else {
-               rt2x00_desc_read(entry_priv->desc, 1, &word);
-               rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
-               rt2x00_desc_write(entry_priv->desc, 1, word);
-       }
-}
-
-static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
-{
-       struct queue_entry_priv_mmio *entry_priv;
-
-       /*
-        * Initialize registers.
-        */
-       entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
-       rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0,
-                                 entry_priv->desc_dma);
-       rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0,
-                                 rt2x00dev->tx[0].limit);
-       rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0);
-       rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0);
-
-       entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
-       rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1,
-                                 entry_priv->desc_dma);
-       rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1,
-                                 rt2x00dev->tx[1].limit);
-       rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0);
-       rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0);
-
-       entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
-       rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2,
-                                 entry_priv->desc_dma);
-       rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2,
-                                 rt2x00dev->tx[2].limit);
-       rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0);
-       rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0);
-
-       entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
-       rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3,
-                                 entry_priv->desc_dma);
-       rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3,
-                                 rt2x00dev->tx[3].limit);
-       rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0);
-       rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0);
-
-       rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0);
-       rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0);
-       rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0);
-       rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0);
-
-       rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0);
-       rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0);
-       rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0);
-       rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0);
-
-       entry_priv = rt2x00dev->rx->entries[0].priv_data;
-       rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR,
-                                 entry_priv->desc_dma);
-       rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT,
-                                 rt2x00dev->rx[0].limit);
-       rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX,
-                                 rt2x00dev->rx[0].limit - 1);
-       rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0);
-
-       rt2800_disable_wpdma(rt2x00dev);
-
-       rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0);
-
-       return 0;
-}
-
 /*
  * Device state switch handlers.
  */
-static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
-                                enum dev_state state)
-{
-       u32 reg;
-       unsigned long flags;
-
-       /*
-        * When interrupts are being enabled, the interrupt registers
-        * should clear the register to assure a clean state.
-        */
-       if (state == STATE_RADIO_IRQ_ON) {
-               rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
-               rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
-       }
-
-       spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
-       reg = 0;
-       if (state == STATE_RADIO_IRQ_ON) {
-               rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
-               rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
-               rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
-               rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
-               rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
-       }
-       rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
-       spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
-
-       if (state == STATE_RADIO_IRQ_OFF) {
-               /*
-                * Wait for possibly running tasklets to finish.
-                */
-               tasklet_kill(&rt2x00dev->txstatus_tasklet);
-               tasklet_kill(&rt2x00dev->rxdone_tasklet);
-               tasklet_kill(&rt2x00dev->autowake_tasklet);
-               tasklet_kill(&rt2x00dev->tbtt_tasklet);
-               tasklet_kill(&rt2x00dev->pretbtt_tasklet);
-       }
-}
-
-static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
-{
-       u32 reg;
-
-       /*
-        * Reset DMA indexes
-        */
-       rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
-       rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
-       rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
-
-       rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
-       rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
-
-       if (rt2x00_is_pcie(rt2x00dev) &&
-           (rt2x00_rt(rt2x00dev, RT3090) ||
-            rt2x00_rt(rt2x00dev, RT3390) ||
-            rt2x00_rt(rt2x00dev, RT3572) ||
-            rt2x00_rt(rt2x00dev, RT3593) ||
-            rt2x00_rt(rt2x00dev, RT5390) ||
-            rt2x00_rt(rt2x00dev, RT5392) ||
-            rt2x00_rt(rt2x00dev, RT5592))) {
-               rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
-               rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
-               rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
-               rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg);
-       }
-
-       rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
-
-       reg = 0;
-       rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
-       rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
-       rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
-
-       rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
-
-       return 0;
-}
-
 static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
 {
        int retval;
 
-       /* Wait for DMA, ignore error until we initialize queues. */
-       rt2800_wait_wpdma_ready(rt2x00dev);
-
-       if (unlikely(rt2800pci_init_queues(rt2x00dev)))
-               return -EIO;
-
-       retval = rt2800_enable_radio(rt2x00dev);
+       retval = rt2800mmio_enable_radio(rt2x00dev);
        if (retval)
                return retval;
 
@@ -559,15 +227,6 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
        return retval;
 }
 
-static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
-{
-       if (rt2x00_is_soc(rt2x00dev)) {
-               rt2800_disable_radio(rt2x00dev);
-               rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0);
-               rt2x00mmio_register_write(rt2x00dev, TX_PIN_CFG, 0);
-       }
-}
-
 static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
                               enum dev_state state)
 {
@@ -601,12 +260,11 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
                 * After the radio has been disabled, the device should
                 * be put to sleep for powersaving.
                 */
-               rt2800pci_disable_radio(rt2x00dev);
                rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
                break;
        case STATE_RADIO_IRQ_ON:
        case STATE_RADIO_IRQ_OFF:
-               rt2800pci_toggle_irq(rt2x00dev, state);
+               rt2800mmio_toggle_irq(rt2x00dev, state);
                break;
        case STATE_DEEP_SLEEP:
        case STATE_SLEEP:
@@ -626,470 +284,6 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
        return retval;
 }
 
-/*
- * TX descriptor initialization
- */
-static __le32 *rt2800pci_get_txwi(struct queue_entry *entry)
-{
-       return (__le32 *) entry->skb->data;
-}
-
-static void rt2800pci_write_tx_desc(struct queue_entry *entry,
-                                   struct txentry_desc *txdesc)
-{
-       struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
-       struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
-       __le32 *txd = entry_priv->desc;
-       u32 word;
-       const unsigned int txwi_size = entry->queue->winfo_size;
-
-       /*
-        * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
-        * must contains a TXWI structure + 802.11 header + padding + 802.11
-        * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
-        * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
-        * data. It means that LAST_SEC0 is always 0.
-        */
-
-       /*
-        * Initialize TX descriptor
-        */
-       word = 0;
-       rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
-       rt2x00_desc_write(txd, 0, word);
-
-       word = 0;
-       rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len);
-       rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
-                          !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
-       rt2x00_set_field32(&word, TXD_W1_BURST,
-                          test_bit(ENTRY_TXD_BURST, &txdesc->flags));
-       rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size);
-       rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
-       rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
-       rt2x00_desc_write(txd, 1, word);
-
-       word = 0;
-       rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
-                          skbdesc->skb_dma + txwi_size);
-       rt2x00_desc_write(txd, 2, word);
-
-       word = 0;
-       rt2x00_set_field32(&word, TXD_W3_WIV,
-                          !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
-       rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
-       rt2x00_desc_write(txd, 3, word);
-
-       /*
-        * Register descriptor details in skb frame descriptor.
-        */
-       skbdesc->desc = txd;
-       skbdesc->desc_len = TXD_DESC_SIZE;
-}
-
-/*
- * RX control handlers
- */
-static void rt2800pci_fill_rxdone(struct queue_entry *entry,
-                                 struct rxdone_entry_desc *rxdesc)
-{
-       struct queue_entry_priv_mmio *entry_priv = entry->priv_data;
-       __le32 *rxd = entry_priv->desc;
-       u32 word;
-
-       rt2x00_desc_read(rxd, 3, &word);
-
-       if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
-               rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
-
-       /*
-        * Unfortunately we don't know the cipher type used during
-        * decryption. This prevents us from correct providing
-        * correct statistics through debugfs.
-        */
-       rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
-
-       if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
-               /*
-                * Hardware has stripped IV/EIV data from 802.11 frame during
-                * decryption. Unfortunately the descriptor doesn't contain
-                * any fields with the EIV/IV data either, so they can't
-                * be restored by rt2x00lib.
-                */
-               rxdesc->flags |= RX_FLAG_IV_STRIPPED;
-
-               /*
-                * The hardware has already checked the Michael Mic and has
-                * stripped it from the frame. Signal this to mac80211.
-                */
-               rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
-
-               if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
-                       rxdesc->flags |= RX_FLAG_DECRYPTED;
-               else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
-                       rxdesc->flags |= RX_FLAG_MMIC_ERROR;
-       }
-
-       if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
-               rxdesc->dev_flags |= RXDONE_MY_BSS;
-
-       if (rt2x00_get_field32(word, RXD_W3_L2PAD))
-               rxdesc->dev_flags |= RXDONE_L2PAD;
-
-       /*
-        * Process the RXWI structure that is at the start of the buffer.
-        */
-       rt2800_process_rxwi(entry, rxdesc);
-}
-
-/*
- * Interrupt functions.
- */
-static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev)
-{
-       struct ieee80211_conf conf = { .flags = 0 };
-       struct rt2x00lib_conf libconf = { .conf = &conf };
-
-       rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
-}
-
-static bool rt2800pci_txdone_entry_check(struct queue_entry *entry, u32 status)
-{
-       __le32 *txwi;
-       u32 word;
-       int wcid, tx_wcid;
-
-       wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
-
-       txwi = rt2800_drv_get_txwi(entry);
-       rt2x00_desc_read(txwi, 1, &word);
-       tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
-
-       return (tx_wcid == wcid);
-}
-
-static bool rt2800pci_txdone_find_entry(struct queue_entry *entry, void *data)
-{
-       u32 status = *(u32 *)data;
-
-       /*
-        * rt2800pci hardware might reorder frames when exchanging traffic
-        * with multiple BA enabled STAs.
-        *
-        * For example, a tx queue
-        *    [ STA1 | STA2 | STA1 | STA2 ]
-        * can result in tx status reports
-        *    [ STA1 | STA1 | STA2 | STA2 ]
-        * when the hw decides to aggregate the frames for STA1 into one AMPDU.
-        *
-        * To mitigate this effect, associate the tx status to the first frame
-        * in the tx queue with a matching wcid.
-        */
-       if (rt2800pci_txdone_entry_check(entry, status) &&
-           !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
-               /*
-                * Got a matching frame, associate the tx status with
-                * the frame
-                */
-               entry->status = status;
-               set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
-               return true;
-       }
-
-       /* Check the next frame */
-       return false;
-}
-
-static bool rt2800pci_txdone_match_first(struct queue_entry *entry, void *data)
-{
-       u32 status = *(u32 *)data;
-
-       /*
-        * Find the first frame without tx status and assign this status to it
-        * regardless if it matches or not.
-        */
-       if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
-               /*
-                * Got a matching frame, associate the tx status with
-                * the frame
-                */
-               entry->status = status;
-               set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
-               return true;
-       }
-
-       /* Check the next frame */
-       return false;
-}
-static bool rt2800pci_txdone_release_entries(struct queue_entry *entry,
-                                            void *data)
-{
-       if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
-               rt2800_txdone_entry(entry, entry->status,
-                                   rt2800pci_get_txwi(entry));
-               return false;
-       }
-
-       /* No more frames to release */
-       return true;
-}
-
-static bool rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
-{
-       struct data_queue *queue;
-       u32 status;
-       u8 qid;
-       int max_tx_done = 16;
-
-       while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
-               qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
-               if (unlikely(qid >= QID_RX)) {
-                       /*
-                        * Unknown queue, this shouldn't happen. Just drop
-                        * this tx status.
-                        */
-                       rt2x00_warn(rt2x00dev, "Got TX status report with unexpected pid %u, dropping\n",
-                                   qid);
-                       break;
-               }
-
-               queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
-               if (unlikely(queue == NULL)) {
-                       /*
-                        * The queue is NULL, this shouldn't happen. Stop
-                        * processing here and drop the tx status
-                        */
-                       rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n",
-                                   qid);
-                       break;
-               }
-
-               if (unlikely(rt2x00queue_empty(queue))) {
-                       /*
-                        * The queue is empty. Stop processing here
-                        * and drop the tx status.
-                        */
-                       rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
-                                   qid);
-                       break;
-               }
-
-               /*
-                * Let's associate this tx status with the first
-                * matching frame.
-                */
-               if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
-                                               Q_INDEX, &status,
-                                               rt2800pci_txdone_find_entry)) {
-                       /*
-                        * We cannot match the tx status to any frame, so just
-                        * use the first one.
-                        */
-                       if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
-                                                       Q_INDEX, &status,
-                                                       rt2800pci_txdone_match_first)) {
-                               rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n",
-                                           qid);
-                               break;
-                       }
-               }
-
-               /*
-                * Release all frames with a valid tx status.
-                */
-               rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
-                                          Q_INDEX, NULL,
-                                          rt2800pci_txdone_release_entries);
-
-               if (--max_tx_done == 0)
-                       break;
-       }
-
-       return !max_tx_done;
-}
-
-static inline void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
-                                             struct rt2x00_field32 irq_field)
-{
-       u32 reg;
-
-       /*
-        * Enable a single interrupt. The interrupt mask register
-        * access needs locking.
-        */
-       spin_lock_irq(&rt2x00dev->irqmask_lock);
-       rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
-       rt2x00_set_field32(&reg, irq_field, 1);
-       rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
-       spin_unlock_irq(&rt2x00dev->irqmask_lock);
-}
-
-static void rt2800pci_txstatus_tasklet(unsigned long data)
-{
-       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
-       if (rt2800pci_txdone(rt2x00dev))
-               tasklet_schedule(&rt2x00dev->txstatus_tasklet);
-
-       /*
-        * No need to enable the tx status interrupt here as we always
-        * leave it enabled to minimize the possibility of a tx status
-        * register overflow. See comment in interrupt handler.
-        */
-}
-
-static void rt2800pci_pretbtt_tasklet(unsigned long data)
-{
-       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
-       rt2x00lib_pretbtt(rt2x00dev);
-       if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
-               rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
-}
-
-static void rt2800pci_tbtt_tasklet(unsigned long data)
-{
-       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
-       struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
-       u32 reg;
-
-       rt2x00lib_beacondone(rt2x00dev);
-
-       if (rt2x00dev->intf_ap_count) {
-               /*
-                * The rt2800pci hardware tbtt timer is off by 1us per tbtt
-                * causing beacon skew and as a result causing problems with
-                * some powersaving clients over time. Shorten the beacon
-                * interval every 64 beacons by 64us to mitigate this effect.
-                */
-               if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) {
-                       rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
-                       rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
-                                          (rt2x00dev->beacon_int * 16) - 1);
-                       rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-               } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) {
-                       rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
-                       rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
-                                          (rt2x00dev->beacon_int * 16));
-                       rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg);
-               }
-               drv_data->tbtt_tick++;
-               drv_data->tbtt_tick %= BCN_TBTT_OFFSET;
-       }
-
-       if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
-               rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT);
-}
-
-static void rt2800pci_rxdone_tasklet(unsigned long data)
-{
-       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
-       if (rt2x00mmio_rxdone(rt2x00dev))
-               tasklet_schedule(&rt2x00dev->rxdone_tasklet);
-       else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
-               rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE);
-}
-
-static void rt2800pci_autowake_tasklet(unsigned long data)
-{
-       struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
-       rt2800pci_wakeup(rt2x00dev);
-       if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
-               rt2800pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_AUTO_WAKEUP);
-}
-
-static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
-{
-       u32 status;
-       int i;
-
-       /*
-        * The TX_FIFO_STATUS interrupt needs special care. We should
-        * read TX_STA_FIFO but we should do it immediately as otherwise
-        * the register can overflow and we would lose status reports.
-        *
-        * Hence, read the TX_STA_FIFO register and copy all tx status
-        * reports into a kernel FIFO which is handled in the txstatus
-        * tasklet. We use a tasklet to process the tx status reports
-        * because we can schedule the tasklet multiple times (when the
-        * interrupt fires again during tx status processing).
-        *
-        * Furthermore we don't disable the TX_FIFO_STATUS
-        * interrupt here but leave it enabled so that the TX_STA_FIFO
-        * can also be read while the tx status tasklet gets executed.
-        *
-        * Since we have only one producer and one consumer we don't
-        * need to lock the kfifo.
-        */
-       for (i = 0; i < rt2x00dev->tx->limit; i++) {
-               rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO, &status);
-
-               if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
-                       break;
-
-               if (!kfifo_put(&rt2x00dev->txstatus_fifo, &status)) {
-                       rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n");
-                       break;
-               }
-       }
-
-       /* Schedule the tasklet for processing the tx status. */
-       tasklet_schedule(&rt2x00dev->txstatus_tasklet);
-}
-
-static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
-{
-       struct rt2x00_dev *rt2x00dev = dev_instance;
-       u32 reg, mask;
-
-       /* Read status and ACK all interrupts */
-       rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
-       rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
-
-       if (!reg)
-               return IRQ_NONE;
-
-       if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
-               return IRQ_HANDLED;
-
-       /*
-        * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits
-        * for interrupts and interrupt masks we can just use the value of
-        * INT_SOURCE_CSR to create the interrupt mask.
-        */
-       mask = ~reg;
-
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
-               rt2800pci_txstatus_interrupt(rt2x00dev);
-               /*
-                * Never disable the TX_FIFO_STATUS interrupt.
-                */
-               rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
-       }
-
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
-               tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet);
-
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT))
-               tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet);
-
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
-               tasklet_schedule(&rt2x00dev->rxdone_tasklet);
-
-       if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
-               tasklet_schedule(&rt2x00dev->autowake_tasklet);
-
-       /*
-        * Disable all interrupts for which a tasklet was scheduled right now,
-        * the tasklet will reenable the appropriate interrupts.
-        */
-       spin_lock(&rt2x00dev->irqmask_lock);
-       rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR, &reg);
-       reg &= mask;
-       rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg);
-       spin_unlock(&rt2x00dev->irqmask_lock);
-
-       return IRQ_HANDLED;
-}
-
 /*
  * Device probe functions.
  */
@@ -1097,9 +291,7 @@ static int rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev)
 {
        int retval;
 
-       if (rt2x00_is_soc(rt2x00dev))
-               retval = rt2800pci_read_eeprom_soc(rt2x00dev);
-       else if (rt2800pci_efuse_detect(rt2x00dev))
+       if (rt2800pci_efuse_detect(rt2x00dev))
                retval = rt2800pci_read_eeprom_efuse(rt2x00dev);
        else
                retval = rt2800pci_read_eeprom_pci(rt2x00dev);
@@ -1145,25 +337,25 @@ static const struct rt2800_ops rt2800pci_rt2800_ops = {
        .read_eeprom            = rt2800pci_read_eeprom,
        .hwcrypt_disabled       = rt2800pci_hwcrypt_disabled,
        .drv_write_firmware     = rt2800pci_write_firmware,
-       .drv_init_registers     = rt2800pci_init_registers,
-       .drv_get_txwi           = rt2800pci_get_txwi,
+       .drv_init_registers     = rt2800mmio_init_registers,
+       .drv_get_txwi           = rt2800mmio_get_txwi,
 };
 
 static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
-       .irq_handler            = rt2800pci_interrupt,
-       .txstatus_tasklet       = rt2800pci_txstatus_tasklet,
-       .pretbtt_tasklet        = rt2800pci_pretbtt_tasklet,
-       .tbtt_tasklet           = rt2800pci_tbtt_tasklet,
-       .rxdone_tasklet         = rt2800pci_rxdone_tasklet,
-       .autowake_tasklet       = rt2800pci_autowake_tasklet,
+       .irq_handler            = rt2800mmio_interrupt,
+       .txstatus_tasklet       = rt2800mmio_txstatus_tasklet,
+       .pretbtt_tasklet        = rt2800mmio_pretbtt_tasklet,
+       .tbtt_tasklet           = rt2800mmio_tbtt_tasklet,
+       .rxdone_tasklet         = rt2800mmio_rxdone_tasklet,
+       .autowake_tasklet       = rt2800mmio_autowake_tasklet,
        .probe_hw               = rt2800_probe_hw,
        .get_firmware_name      = rt2800pci_get_firmware_name,
        .check_firmware         = rt2800_check_firmware,
        .load_firmware          = rt2800_load_firmware,
        .initialize             = rt2x00mmio_initialize,
        .uninitialize           = rt2x00mmio_uninitialize,
-       .get_entry_state        = rt2800pci_get_entry_state,
-       .clear_entry            = rt2800pci_clear_entry,
+       .get_entry_state        = rt2800mmio_get_entry_state,
+       .clear_entry            = rt2800mmio_clear_entry,
        .set_device_state       = rt2800pci_set_device_state,
        .rfkill_poll            = rt2800_rfkill_poll,
        .link_stats             = rt2800_link_stats,
@@ -1171,15 +363,15 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
        .link_tuner             = rt2800_link_tuner,
        .gain_calibration       = rt2800_gain_calibration,
        .vco_calibration        = rt2800_vco_calibration,
-       .start_queue            = rt2800pci_start_queue,
-       .kick_queue             = rt2800pci_kick_queue,
-       .stop_queue             = rt2800pci_stop_queue,
+       .start_queue            = rt2800mmio_start_queue,
+       .kick_queue             = rt2800mmio_kick_queue,
+       .stop_queue             = rt2800mmio_stop_queue,
        .flush_queue            = rt2x00mmio_flush_queue,
-       .write_tx_desc          = rt2800pci_write_tx_desc,
+       .write_tx_desc          = rt2800mmio_write_tx_desc,
        .write_tx_data          = rt2800_write_tx_data,
        .write_beacon           = rt2800_write_beacon,
        .clear_beacon           = rt2800_clear_beacon,
-       .fill_rxdone            = rt2800pci_fill_rxdone,
+       .fill_rxdone            = rt2800mmio_fill_rxdone,
        .config_shared_key      = rt2800_config_shared_key,
        .config_pairwise_key    = rt2800_config_pairwise_key,
        .config_filter          = rt2800_config_filter,
@@ -1191,49 +383,6 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
        .sta_remove             = rt2800_sta_remove,
 };
 
-static void rt2800pci_queue_init(struct data_queue *queue)
-{
-       struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
-       unsigned short txwi_size, rxwi_size;
-
-       rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
-
-       switch (queue->qid) {
-       case QID_RX:
-               queue->limit = 128;
-               queue->data_size = AGGREGATION_SIZE;
-               queue->desc_size = RXD_DESC_SIZE;
-               queue->winfo_size = rxwi_size;
-               queue->priv_size = sizeof(struct queue_entry_priv_mmio);
-               break;
-
-       case QID_AC_VO:
-       case QID_AC_VI:
-       case QID_AC_BE:
-       case QID_AC_BK:
-               queue->limit = 64;
-               queue->data_size = AGGREGATION_SIZE;
-               queue->desc_size = TXD_DESC_SIZE;
-               queue->winfo_size = txwi_size;
-               queue->priv_size = sizeof(struct queue_entry_priv_mmio);
-               break;
-
-       case QID_BEACON:
-               queue->limit = 8;
-               queue->data_size = 0; /* No DMA required for beacons */
-               queue->desc_size = TXD_DESC_SIZE;
-               queue->winfo_size = txwi_size;
-               queue->priv_size = sizeof(struct queue_entry_priv_mmio);
-               break;
-
-       case QID_ATIM:
-               /* fallthrough */
-       default:
-               BUG();
-               break;
-       }
-}
-
 static const struct rt2x00_ops rt2800pci_ops = {
        .name                   = KBUILD_MODNAME,
        .drv_data_size          = sizeof(struct rt2800_drv_data),
@@ -1241,7 +390,7 @@ static const struct rt2x00_ops rt2800pci_ops = {
        .eeprom_size            = EEPROM_SIZE,
        .rf_size                = RF_SIZE,
        .tx_queues              = NUM_TX_QUEUES,
-       .queue_init             = rt2800pci_queue_init,
+       .queue_init             = rt2800mmio_queue_init,
        .lib                    = &rt2800pci_rt2x00_ops,
        .drv                    = &rt2800pci_rt2800_ops,
        .hw                     = &rt2800pci_mac80211_ops,
@@ -1253,7 +402,6 @@ static const struct rt2x00_ops rt2800pci_ops = {
 /*
  * RT2800pci module information.
  */
-#ifdef CONFIG_PCI
 static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
        { PCI_DEVICE(0x1814, 0x0601) },
        { PCI_DEVICE(0x1814, 0x0681) },
@@ -1298,38 +446,15 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
 #endif
        { 0, }
 };
-#endif /* CONFIG_PCI */
 
 MODULE_AUTHOR(DRV_PROJECT);
 MODULE_VERSION(DRV_VERSION);
 MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver.");
 MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards");
-#ifdef CONFIG_PCI
 MODULE_FIRMWARE(FIRMWARE_RT2860);
 MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
-#endif /* CONFIG_PCI */
 MODULE_LICENSE("GPL");
 
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
-static int rt2800soc_probe(struct platform_device *pdev)
-{
-       return rt2x00soc_probe(pdev, &rt2800pci_ops);
-}
-
-static struct platform_driver rt2800soc_driver = {
-       .driver         = {
-               .name           = "rt2800_wmac",
-               .owner          = THIS_MODULE,
-               .mod_name       = KBUILD_MODNAME,
-       },
-       .probe          = rt2800soc_probe,
-       .remove         = rt2x00soc_remove,
-       .suspend        = rt2x00soc_suspend,
-       .resume         = rt2x00soc_resume,
-};
-#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
-
-#ifdef CONFIG_PCI
 static int rt2800pci_probe(struct pci_dev *pci_dev,
                           const struct pci_device_id *id)
 {
@@ -1344,39 +469,5 @@ static struct pci_driver rt2800pci_driver = {
        .suspend        = rt2x00pci_suspend,
        .resume         = rt2x00pci_resume,
 };
-#endif /* CONFIG_PCI */
-
-static int __init rt2800pci_init(void)
-{
-       int ret = 0;
-
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
-       ret = platform_driver_register(&rt2800soc_driver);
-       if (ret)
-               return ret;
-#endif
-#ifdef CONFIG_PCI
-       ret = pci_register_driver(&rt2800pci_driver);
-       if (ret) {
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
-               platform_driver_unregister(&rt2800soc_driver);
-#endif
-               return ret;
-       }
-#endif
-
-       return ret;
-}
-
-static void __exit rt2800pci_exit(void)
-{
-#ifdef CONFIG_PCI
-       pci_unregister_driver(&rt2800pci_driver);
-#endif
-#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
-       platform_driver_unregister(&rt2800soc_driver);
-#endif
-}
 
-module_init(rt2800pci_init);
-module_exit(rt2800pci_exit);
+module_pci_driver(rt2800pci_driver);
index ab22a087c50dba15cf67b3ff237f8915bc8e2996..a81c9ee281c075dbf267e6b4b9b9c78de912c315 100644 (file)
 #ifndef RT2800PCI_H
 #define RT2800PCI_H
 
-/*
- * Queue register offset macros
- */
-#define TX_QUEUE_REG_OFFSET            0x10
-#define TX_BASE_PTR(__x)               (TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET))
-#define TX_MAX_CNT(__x)                        (TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET))
-#define TX_CTX_IDX(__x)                        (TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
-#define TX_DTX_IDX(__x)                        (TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
-
 /*
  * 8051 firmware image.
  */
 #define FIRMWARE_RT3290                        "rt3290.bin"
 #define FIRMWARE_IMAGE_BASE            0x2000
 
-/*
- * DMA descriptor defines.
- */
-#define TXD_DESC_SIZE                  (4 * sizeof(__le32))
-#define RXD_DESC_SIZE                  (4 * sizeof(__le32))
-
-/*
- * TX descriptor format for TX, PRIO and Beacon Ring.
- */
-
-/*
- * Word0
- */
-#define TXD_W0_SD_PTR0                 FIELD32(0xffffffff)
-
-/*
- * Word1
- */
-#define TXD_W1_SD_LEN1                 FIELD32(0x00003fff)
-#define TXD_W1_LAST_SEC1               FIELD32(0x00004000)
-#define TXD_W1_BURST                   FIELD32(0x00008000)
-#define TXD_W1_SD_LEN0                 FIELD32(0x3fff0000)
-#define TXD_W1_LAST_SEC0               FIELD32(0x40000000)
-#define TXD_W1_DMA_DONE                        FIELD32(0x80000000)
-
-/*
- * Word2
- */
-#define TXD_W2_SD_PTR1                 FIELD32(0xffffffff)
-
-/*
- * Word3
- * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
- * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
- *       0:MGMT, 1:HCCA 2:EDCA
- */
-#define TXD_W3_WIV                     FIELD32(0x01000000)
-#define TXD_W3_QSEL                    FIELD32(0x06000000)
-#define TXD_W3_TCO                     FIELD32(0x20000000)
-#define TXD_W3_UCO                     FIELD32(0x40000000)
-#define TXD_W3_ICO                     FIELD32(0x80000000)
-
-/*
- * RX descriptor format for RX Ring.
- */
-
-/*
- * Word0
- */
-#define RXD_W0_SDP0                    FIELD32(0xffffffff)
-
-/*
- * Word1
- */
-#define RXD_W1_SDL1                    FIELD32(0x00003fff)
-#define RXD_W1_SDL0                    FIELD32(0x3fff0000)
-#define RXD_W1_LS0                     FIELD32(0x40000000)
-#define RXD_W1_DMA_DONE                        FIELD32(0x80000000)
-
-/*
- * Word2
- */
-#define RXD_W2_SDP1                    FIELD32(0xffffffff)
-
-/*
- * Word3
- * AMSDU: RX with 802.3 header, not 802.11 header.
- * DECRYPTED: This frame is being decrypted.
- */
-#define RXD_W3_BA                      FIELD32(0x00000001)
-#define RXD_W3_DATA                    FIELD32(0x00000002)
-#define RXD_W3_NULLDATA                        FIELD32(0x00000004)
-#define RXD_W3_FRAG                    FIELD32(0x00000008)
-#define RXD_W3_UNICAST_TO_ME           FIELD32(0x00000010)
-#define RXD_W3_MULTICAST               FIELD32(0x00000020)
-#define RXD_W3_BROADCAST               FIELD32(0x00000040)
-#define RXD_W3_MY_BSS                  FIELD32(0x00000080)
-#define RXD_W3_CRC_ERROR               FIELD32(0x00000100)
-#define RXD_W3_CIPHER_ERROR            FIELD32(0x00000600)
-#define RXD_W3_AMSDU                   FIELD32(0x00000800)
-#define RXD_W3_HTC                     FIELD32(0x00001000)
-#define RXD_W3_RSSI                    FIELD32(0x00002000)
-#define RXD_W3_L2PAD                   FIELD32(0x00004000)
-#define RXD_W3_AMPDU                   FIELD32(0x00008000)
-#define RXD_W3_DECRYPTED               FIELD32(0x00010000)
-#define RXD_W3_PLCP_SIGNAL             FIELD32(0x00020000)
-#define RXD_W3_PLCP_RSSI               FIELD32(0x00040000)
-
 #endif /* RT2800PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800soc.c b/drivers/net/wireless/rt2x00/rt2800soc.c
new file mode 100644 (file)
index 0000000..1359227
--- /dev/null
@@ -0,0 +1,263 @@
+/*     Copyright (C) 2009 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
+ *     Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
+ *     Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
+ *     Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
+ *     Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
+ *     Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
+ *     Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
+ *     Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
+ *     <http://rt2x00.serialmonkey.com>
+ *
+ *     This program is free software; you can redistribute it and/or modify
+ *     it under the terms of the GNU General Public License as published by
+ *     the Free Software Foundation; either version 2 of the License, or
+ *     (at your option) any later version.
+ *
+ *     This program is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ *     GNU General Public License for more details.
+ *
+ *     You should have received a copy of the GNU General Public License
+ *     along with this program; if not, write to the
+ *     Free Software Foundation, Inc.,
+ *     59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*     Module: rt2800soc
+ *     Abstract: rt2800 WiSoC specific routines.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "rt2x00.h"
+#include "rt2x00mmio.h"
+#include "rt2x00soc.h"
+#include "rt2800.h"
+#include "rt2800lib.h"
+#include "rt2800mmio.h"
+
+/* Allow hardware encryption to be disabled. */
+static bool modparam_nohwcrypt;
+module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
+
+static bool rt2800soc_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
+{
+       return modparam_nohwcrypt;
+}
+
+static void rt2800soc_disable_radio(struct rt2x00_dev *rt2x00dev)
+{
+       rt2800_disable_radio(rt2x00dev);
+       rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0);
+       rt2x00mmio_register_write(rt2x00dev, TX_PIN_CFG, 0);
+}
+
+static int rt2800soc_set_device_state(struct rt2x00_dev *rt2x00dev,
+                                     enum dev_state state)
+{
+       int retval = 0;
+
+       switch (state) {
+       case STATE_RADIO_ON:
+               retval = rt2800mmio_enable_radio(rt2x00dev);
+               break;
+
+       case STATE_RADIO_OFF:
+               rt2800soc_disable_radio(rt2x00dev);
+               break;
+
+       case STATE_RADIO_IRQ_ON:
+       case STATE_RADIO_IRQ_OFF:
+               rt2800mmio_toggle_irq(rt2x00dev, state);
+               break;
+
+       case STATE_DEEP_SLEEP:
+       case STATE_SLEEP:
+       case STATE_STANDBY:
+       case STATE_AWAKE:
+               /* These states are not supported, but don't report an error */
+               retval = 0;
+               break;
+
+       default:
+               retval = -ENOTSUPP;
+               break;
+       }
+
+       if (unlikely(retval))
+               rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n",
+                          state, retval);
+
+       return retval;
+}
+
+static int rt2800soc_read_eeprom(struct rt2x00_dev *rt2x00dev)
+{
+       void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
+
+       if (!base_addr)
+               return -ENOMEM;
+
+       memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
+
+       iounmap(base_addr);
+       return 0;
+}
+
+/* Firmware functions */
+static char *rt2800soc_get_firmware_name(struct rt2x00_dev *rt2x00dev)
+{
+       WARN_ON_ONCE(1);
+       return NULL;
+}
+
+static int rt2800soc_load_firmware(struct rt2x00_dev *rt2x00dev,
+                                  const u8 *data, const size_t len)
+{
+       WARN_ON_ONCE(1);
+       return 0;
+}
+
+static int rt2800soc_check_firmware(struct rt2x00_dev *rt2x00dev,
+                                   const u8 *data, const size_t len)
+{
+       WARN_ON_ONCE(1);
+       return 0;
+}
+
+static int rt2800soc_write_firmware(struct rt2x00_dev *rt2x00dev,
+                                   const u8 *data, const size_t len)
+{
+       WARN_ON_ONCE(1);
+       return 0;
+}
+
+static const struct ieee80211_ops rt2800soc_mac80211_ops = {
+       .tx                     = rt2x00mac_tx,
+       .start                  = rt2x00mac_start,
+       .stop                   = rt2x00mac_stop,
+       .add_interface          = rt2x00mac_add_interface,
+       .remove_interface       = rt2x00mac_remove_interface,
+       .config                 = rt2x00mac_config,
+       .configure_filter       = rt2x00mac_configure_filter,
+       .set_key                = rt2x00mac_set_key,
+       .sw_scan_start          = rt2x00mac_sw_scan_start,
+       .sw_scan_complete       = rt2x00mac_sw_scan_complete,
+       .get_stats              = rt2x00mac_get_stats,
+       .get_tkip_seq           = rt2800_get_tkip_seq,
+       .set_rts_threshold      = rt2800_set_rts_threshold,
+       .sta_add                = rt2x00mac_sta_add,
+       .sta_remove             = rt2x00mac_sta_remove,
+       .bss_info_changed       = rt2x00mac_bss_info_changed,
+       .conf_tx                = rt2800_conf_tx,
+       .get_tsf                = rt2800_get_tsf,
+       .rfkill_poll            = rt2x00mac_rfkill_poll,
+       .ampdu_action           = rt2800_ampdu_action,
+       .flush                  = rt2x00mac_flush,
+       .get_survey             = rt2800_get_survey,
+       .get_ringparam          = rt2x00mac_get_ringparam,
+       .tx_frames_pending      = rt2x00mac_tx_frames_pending,
+};
+
+static const struct rt2800_ops rt2800soc_rt2800_ops = {
+       .register_read          = rt2x00mmio_register_read,
+       .register_read_lock     = rt2x00mmio_register_read, /* same for SoCs */
+       .register_write         = rt2x00mmio_register_write,
+       .register_write_lock    = rt2x00mmio_register_write, /* same for SoCs */
+       .register_multiread     = rt2x00mmio_register_multiread,
+       .register_multiwrite    = rt2x00mmio_register_multiwrite,
+       .regbusy_read           = rt2x00mmio_regbusy_read,
+       .read_eeprom            = rt2800soc_read_eeprom,
+       .hwcrypt_disabled       = rt2800soc_hwcrypt_disabled,
+       .drv_write_firmware     = rt2800soc_write_firmware,
+       .drv_init_registers     = rt2800mmio_init_registers,
+       .drv_get_txwi           = rt2800mmio_get_txwi,
+};
+
+static const struct rt2x00lib_ops rt2800soc_rt2x00_ops = {
+       .irq_handler            = rt2800mmio_interrupt,
+       .txstatus_tasklet       = rt2800mmio_txstatus_tasklet,
+       .pretbtt_tasklet        = rt2800mmio_pretbtt_tasklet,
+       .tbtt_tasklet           = rt2800mmio_tbtt_tasklet,
+       .rxdone_tasklet         = rt2800mmio_rxdone_tasklet,
+       .autowake_tasklet       = rt2800mmio_autowake_tasklet,
+       .probe_hw               = rt2800_probe_hw,
+       .get_firmware_name      = rt2800soc_get_firmware_name,
+       .check_firmware         = rt2800soc_check_firmware,
+       .load_firmware          = rt2800soc_load_firmware,
+       .initialize             = rt2x00mmio_initialize,
+       .uninitialize           = rt2x00mmio_uninitialize,
+       .get_entry_state        = rt2800mmio_get_entry_state,
+       .clear_entry            = rt2800mmio_clear_entry,
+       .set_device_state       = rt2800soc_set_device_state,
+       .rfkill_poll            = rt2800_rfkill_poll,
+       .link_stats             = rt2800_link_stats,
+       .reset_tuner            = rt2800_reset_tuner,
+       .link_tuner             = rt2800_link_tuner,
+       .gain_calibration       = rt2800_gain_calibration,
+       .vco_calibration        = rt2800_vco_calibration,
+       .start_queue            = rt2800mmio_start_queue,
+       .kick_queue             = rt2800mmio_kick_queue,
+       .stop_queue             = rt2800mmio_stop_queue,
+       .flush_queue            = rt2x00mmio_flush_queue,
+       .write_tx_desc          = rt2800mmio_write_tx_desc,
+       .write_tx_data          = rt2800_write_tx_data,
+       .write_beacon           = rt2800_write_beacon,
+       .clear_beacon           = rt2800_clear_beacon,
+       .fill_rxdone            = rt2800mmio_fill_rxdone,
+       .config_shared_key      = rt2800_config_shared_key,
+       .config_pairwise_key    = rt2800_config_pairwise_key,
+       .config_filter          = rt2800_config_filter,
+       .config_intf            = rt2800_config_intf,
+       .config_erp             = rt2800_config_erp,
+       .config_ant             = rt2800_config_ant,
+       .config                 = rt2800_config,
+       .sta_add                = rt2800_sta_add,
+       .sta_remove             = rt2800_sta_remove,
+};
+
+static const struct rt2x00_ops rt2800soc_ops = {
+       .name                   = KBUILD_MODNAME,
+       .drv_data_size          = sizeof(struct rt2800_drv_data),
+       .max_ap_intf            = 8,
+       .eeprom_size            = EEPROM_SIZE,
+       .rf_size                = RF_SIZE,
+       .tx_queues              = NUM_TX_QUEUES,
+       .queue_init             = rt2800mmio_queue_init,
+       .lib                    = &rt2800soc_rt2x00_ops,
+       .drv                    = &rt2800soc_rt2800_ops,
+       .hw                     = &rt2800soc_mac80211_ops,
+#ifdef CONFIG_RT2X00_LIB_DEBUGFS
+       .debugfs                = &rt2800_rt2x00debug,
+#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
+};
+
+static int rt2800soc_probe(struct platform_device *pdev)
+{
+       return rt2x00soc_probe(pdev, &rt2800soc_ops);
+}
+
+static struct platform_driver rt2800soc_driver = {
+       .driver         = {
+               .name           = "rt2800_wmac",
+               .owner          = THIS_MODULE,
+               .mod_name       = KBUILD_MODNAME,
+       },
+       .probe          = rt2800soc_probe,
+       .remove         = rt2x00soc_remove,
+       .suspend        = rt2x00soc_suspend,
+       .resume         = rt2x00soc_resume,
+};
+
+module_platform_driver(rt2800soc_driver);
+
+MODULE_AUTHOR(DRV_PROJECT);
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION("Ralink WiSoC Wireless LAN driver.");
+MODULE_LICENSE("GPL");
index 96961b9a395cf0d2e01c32449c4e9bcc55e7828c..997df03a0c2e22abd46bd274bf13f4b1e4f81f01 100644 (file)
@@ -148,6 +148,8 @@ static bool rt2800usb_txstatus_timeout(struct rt2x00_dev *rt2x00dev)
        return false;
 }
 
+#define TXSTATUS_READ_INTERVAL 1000000
+
 static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
                                                 int urb_status, u32 tx_status)
 {
@@ -176,8 +178,9 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
                queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
 
        if (rt2800usb_txstatus_pending(rt2x00dev)) {
-               /* Read register after 250 us */
-               hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 250000),
+               /* Read register after 1 ms */
+               hrtimer_start(&rt2x00dev->txstatus_timer,
+                             ktime_set(0, TXSTATUS_READ_INTERVAL),
                              HRTIMER_MODE_REL);
                return false;
        }
@@ -202,8 +205,9 @@ static void rt2800usb_async_read_tx_status(struct rt2x00_dev *rt2x00dev)
        if (test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
                return;
 
-       /* Read TX_STA_FIFO register after 500 us */
-       hrtimer_start(&rt2x00dev->txstatus_timer, ktime_set(0, 500000),
+       /* Read TX_STA_FIFO register after 2 ms */
+       hrtimer_start(&rt2x00dev->txstatus_timer,
+                     ktime_set(0, 2*TXSTATUS_READ_INTERVAL),
                      HRTIMER_MODE_REL);
 }
 
@@ -1176,6 +1180,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
        /* Linksys */
        { USB_DEVICE(0x13b1, 0x002f) },
        { USB_DEVICE(0x1737, 0x0079) },
+       /* Logitec */
+       { USB_DEVICE(0x0789, 0x0170) },
        /* Ralink */
        { USB_DEVICE(0x148f, 0x3572) },
        /* Sitecom */
@@ -1199,6 +1205,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x050d, 0x1103) },
        /* Cameo */
        { USB_DEVICE(0x148f, 0xf301) },
+       /* D-Link */
+       { USB_DEVICE(0x2001, 0x3c1f) },
        /* Edimax */
        { USB_DEVICE(0x7392, 0x7733) },
        /* Hawking */
@@ -1212,6 +1220,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x0789, 0x016b) },
        /* NETGEAR */
        { USB_DEVICE(0x0846, 0x9012) },
+       { USB_DEVICE(0x0846, 0x9013) },
        { USB_DEVICE(0x0846, 0x9019) },
        /* Planex */
        { USB_DEVICE(0x2019, 0xed19) },
@@ -1220,6 +1229,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
        /* Sitecom */
        { USB_DEVICE(0x0df6, 0x0067) },
        { USB_DEVICE(0x0df6, 0x006a) },
+       { USB_DEVICE(0x0df6, 0x006e) },
        /* ZyXEL */
        { USB_DEVICE(0x0586, 0x3421) },
 #endif
@@ -1236,6 +1246,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x2001, 0x3c1c) },
        { USB_DEVICE(0x2001, 0x3c1d) },
        { USB_DEVICE(0x2001, 0x3c1e) },
+       { USB_DEVICE(0x2001, 0x3c20) },
+       { USB_DEVICE(0x2001, 0x3c22) },
+       { USB_DEVICE(0x2001, 0x3c23) },
        /* LG innotek */
        { USB_DEVICE(0x043e, 0x7a22) },
        { USB_DEVICE(0x043e, 0x7a42) },
@@ -1258,12 +1271,17 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x043e, 0x7a32) },
        /* AVM GmbH */
        { USB_DEVICE(0x057c, 0x8501) },
-       /* D-Link DWA-160-B2 */
+       /* Buffalo */
+       { USB_DEVICE(0x0411, 0x0241) },
+       /* D-Link */
        { USB_DEVICE(0x2001, 0x3c1a) },
+       { USB_DEVICE(0x2001, 0x3c21) },
        /* Proware */
        { USB_DEVICE(0x043e, 0x7a13) },
        /* Ralink */
        { USB_DEVICE(0x148f, 0x5572) },
+       /* TRENDnet */
+       { USB_DEVICE(0x20f4, 0x724a) },
 #endif
 #ifdef CONFIG_RT2800USB_UNKNOWN
        /*
@@ -1333,6 +1351,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x1d4d, 0x0010) },
        /* Planex */
        { USB_DEVICE(0x2019, 0xab24) },
+       { USB_DEVICE(0x2019, 0xab29) },
        /* Qcom */
        { USB_DEVICE(0x18e8, 0x6259) },
        /* RadioShack */
index fe4c572db52c2749317690b75ad4cc296aec5157..e4ba2ce0f212b955f15db89df45d3b32af9942de 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/input-polldev.h>
 #include <linux/kfifo.h>
 #include <linux/hrtimer.h>
+#include <linux/average.h>
 
 #include <net/mac80211.h>
 
 #define SHORT_EIFS             ( SIFS + SHORT_DIFS + \
                                  GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) )
 
-/*
- * Structure for average calculation
- * The avg field contains the actual average value,
- * but avg_weight is internally used during calculations
- * to prevent rounding errors.
- */
-struct avg_val {
-       int avg;
-       int avg_weight;
-};
-
 enum rt2x00_chip_intf {
        RT2X00_CHIP_INTF_PCI,
        RT2X00_CHIP_INTF_PCIE,
@@ -297,7 +287,7 @@ struct link_ant {
         * Similar to the avg_rssi in the link_qual structure
         * this value is updated by using the walking average.
         */
-       struct avg_val rssi_ant;
+       struct ewma rssi_ant;
 };
 
 /*
@@ -326,7 +316,7 @@ struct link {
        /*
         * Currently active average RSSI value
         */
-       struct avg_val avg_rssi;
+       struct ewma avg_rssi;
 
        /*
         * Work structure for scheduling periodic link tuning.
@@ -1179,6 +1169,93 @@ static inline bool rt2x00_is_soc(struct rt2x00_dev *rt2x00dev)
        return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC);
 }
 
+/* Helpers for capability flags */
+
+static inline bool
+rt2x00_has_cap_flag(struct rt2x00_dev *rt2x00dev,
+                   enum rt2x00_capability_flags cap_flag)
+{
+       return test_bit(cap_flag, &rt2x00dev->cap_flags);
+}
+
+static inline bool
+rt2x00_has_cap_hw_crypto(struct rt2x00_dev *rt2x00dev)
+{
+       return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_HW_CRYPTO);
+}
+
+static inline bool
+rt2x00_has_cap_power_limit(struct rt2x00_dev *rt2x00dev)
+{
+       return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_POWER_LIMIT);
+}
+
+static inline bool
+rt2x00_has_cap_control_filters(struct rt2x00_dev *rt2x00dev)
+{
+       return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_CONTROL_FILTERS);
+}
+
+static inline bool
+rt2x00_has_cap_control_filter_pspoll(struct rt2x00_dev *rt2x00dev)
+{
+       return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_CONTROL_FILTER_PSPOLL);
+}
+
+static inline bool
+rt2x00_has_cap_pre_tbtt_interrupt(struct rt2x00_dev *rt2x00dev)
+{
+       return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_PRE_TBTT_INTERRUPT);
+}
+
+static inline bool
+rt2x00_has_cap_link_tuning(struct rt2x00_dev *rt2x00dev)
+{
+       return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_LINK_TUNING);
+}
+
+static inline bool
+rt2x00_has_cap_frame_type(struct rt2x00_dev *rt2x00dev)
+{
+       return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_FRAME_TYPE);
+}
+
+static inline bool
+rt2x00_has_cap_rf_sequence(struct rt2x00_dev *rt2x00dev)
+{
+       return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_RF_SEQUENCE);
+}
+
+static inline bool
+rt2x00_has_cap_external_lna_a(struct rt2x00_dev *rt2x00dev)
+{
+       return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_EXTERNAL_LNA_A);
+}
+
+static inline bool
+rt2x00_has_cap_external_lna_bg(struct rt2x00_dev *rt2x00dev)
+{
+       return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_EXTERNAL_LNA_BG);
+}
+
+static inline bool
+rt2x00_has_cap_double_antenna(struct rt2x00_dev *rt2x00dev)
+{
+       return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_DOUBLE_ANTENNA);
+}
+
+static inline bool
+rt2x00_has_cap_bt_coexist(struct rt2x00_dev *rt2x00dev)
+{
+       return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_BT_COEXIST);
+}
+
+static inline bool
+rt2x00_has_cap_vco_recalibration(struct rt2x00_dev *rt2x00dev)
+{
+       return rt2x00_has_cap_flag(rt2x00dev, CAPABILITY_VCO_RECALIBRATION);
+}
+
 /**
  * rt2x00queue_map_txskb - Map a skb into DMA for TX purposes.
  * @entry: Pointer to &struct queue_entry
index 1ca4c7ffc1898c9ce97d87506b0b8e78464e78ad..3db0d99d9da7a9980f63939186f7c2b7971cd533 100644 (file)
@@ -52,7 +52,7 @@ void rt2x00crypto_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
 
-       if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags) || !hw_key)
+       if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || !hw_key)
                return;
 
        __set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags);
@@ -80,7 +80,7 @@ unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev,
        struct ieee80211_key_conf *key = tx_info->control.hw_key;
        unsigned int overhead = 0;
 
-       if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags) || !key)
+       if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || !key)
                return overhead;
 
        /*
index fe7a7f63a9edc2cf75c0a2caa2f6b8ffe02234aa..7f7baae5ae029e6c252676841682cf8fabf270f4 100644 (file)
@@ -750,7 +750,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
                                intf, &rt2x00debug_fop_queue_stats);
 
 #ifdef CONFIG_RT2X00_LIB_CRYPTO
-       if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+       if (rt2x00_has_cap_hw_crypto(rt2x00dev))
                intf->crypto_stats_entry =
                    debugfs_create_file("crypto", S_IRUGO, intf->queue_folder,
                                        intf, &rt2x00debug_fop_crypto_stats);
index 712eea9d398ffa747a1c76ebfd0da0ad84ed8854..080b1fcae5fa8f3f2b376d5b77bffab2519ddf57 100644 (file)
@@ -88,7 +88,7 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
        rt2x00queue_start_queues(rt2x00dev);
        rt2x00link_start_tuner(rt2x00dev);
        rt2x00link_start_agc(rt2x00dev);
-       if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+       if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
                rt2x00link_start_vcocal(rt2x00dev);
 
        /*
@@ -113,7 +113,7 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
         * Stop all queues
         */
        rt2x00link_stop_agc(rt2x00dev);
-       if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+       if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
                rt2x00link_stop_vcocal(rt2x00dev);
        rt2x00link_stop_tuner(rt2x00dev);
        rt2x00queue_stop_queues(rt2x00dev);
@@ -234,7 +234,7 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
         * here as they will fetch the next beacon directly prior to
         * transmission.
         */
-       if (test_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags))
+       if (rt2x00_has_cap_pre_tbtt_interrupt(rt2x00dev))
                return;
 
        /* fetch next beacon */
@@ -358,7 +358,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
         * mac80211 will expect the same data to be present it the
         * frame as it was passed to us.
         */
-       if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+       if (rt2x00_has_cap_hw_crypto(rt2x00dev))
                rt2x00crypto_tx_insert_iv(entry->skb, header_length);
 
        /*
index 8368aab86f286ee6ff5be65b85a73940f3c24cd0..c2b3b66291884e6171b2cd92b78024406ceb7252 100644 (file)
  */
 #define DEFAULT_RSSI           -128
 
-/*
- * Helper struct and macro to work with moving/walking averages.
- * When adding a value to the average value the following calculation
- * is needed:
- *
- *        avg_rssi = ((avg_rssi * 7) + rssi) / 8;
- *
- * The advantage of this approach is that we only need 1 variable
- * to store the average in (No need for a count and a total).
- * But more importantly, normal average values will over time
- * move less and less towards newly added values this results
- * that with link tuning, the device can have a very good RSSI
- * for a few minutes but when the device is moved away from the AP
- * the average will not decrease fast enough to compensate.
- * The walking average compensates this and will move towards
- * the new values correctly allowing a effective link tuning,
- * the speed of the average moving towards other values depends
- * on the value for the number of samples. The higher the number
- * of samples, the slower the average will move.
- * We use two variables to keep track of the average value to
- * compensate for the rounding errors. This can be a significant
- * error (>5dBm) if the factor is too low.
- */
-#define AVG_SAMPLES    8
-#define AVG_FACTOR     1000
-#define MOVING_AVERAGE(__avg, __val) \
-({ \
-       struct avg_val __new; \
-       __new.avg_weight = \
-           (__avg).avg_weight  ? \
-               ((((__avg).avg_weight * ((AVG_SAMPLES) - 1)) + \
-                 ((__val) * (AVG_FACTOR))) / \
-                (AVG_SAMPLES)) : \
-               ((__val) * (AVG_FACTOR)); \
-       __new.avg = __new.avg_weight / (AVG_FACTOR); \
-       __new; \
-})
+/* Constants for EWMA calculations. */
+#define RT2X00_EWMA_FACTOR     1024
+#define RT2X00_EWMA_WEIGHT     8
+
+static inline int rt2x00link_get_avg_rssi(struct ewma *ewma)
+{
+       unsigned long avg;
+
+       avg = ewma_read(ewma);
+       if (avg)
+               return -avg;
+
+       return DEFAULT_RSSI;
+}
 
 static int rt2x00link_antenna_get_link_rssi(struct rt2x00_dev *rt2x00dev)
 {
        struct link_ant *ant = &rt2x00dev->link.ant;
 
-       if (ant->rssi_ant.avg && rt2x00dev->link.qual.rx_success)
-               return ant->rssi_ant.avg;
+       if (rt2x00dev->link.qual.rx_success)
+               return rt2x00link_get_avg_rssi(&ant->rssi_ant);
+
        return DEFAULT_RSSI;
 }
 
@@ -100,8 +78,8 @@ static void rt2x00link_antenna_update_rssi_history(struct rt2x00_dev *rt2x00dev,
 
 static void rt2x00link_antenna_reset(struct rt2x00_dev *rt2x00dev)
 {
-       rt2x00dev->link.ant.rssi_ant.avg = 0;
-       rt2x00dev->link.ant.rssi_ant.avg_weight = 0;
+       ewma_init(&rt2x00dev->link.ant.rssi_ant, RT2X00_EWMA_FACTOR,
+                 RT2X00_EWMA_WEIGHT);
 }
 
 static void rt2x00lib_antenna_diversity_sample(struct rt2x00_dev *rt2x00dev)
@@ -249,12 +227,12 @@ void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev,
        /*
         * Update global RSSI
         */
-       link->avg_rssi = MOVING_AVERAGE(link->avg_rssi, rxdesc->rssi);
+       ewma_add(&link->avg_rssi, -rxdesc->rssi);
 
        /*
         * Update antenna RSSI
         */
-       ant->rssi_ant = MOVING_AVERAGE(ant->rssi_ant, rxdesc->rssi);
+       ewma_add(&ant->rssi_ant, -rxdesc->rssi);
 }
 
 void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
@@ -309,6 +287,8 @@ void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna)
         */
        rt2x00dev->link.count = 0;
        memset(qual, 0, sizeof(*qual));
+       ewma_init(&rt2x00dev->link.avg_rssi, RT2X00_EWMA_FACTOR,
+                 RT2X00_EWMA_WEIGHT);
 
        /*
         * Restore the VGC level as stored in the registers,
@@ -363,17 +343,17 @@ static void rt2x00link_tuner(struct work_struct *work)
         * collect the RSSI data we could use this. Otherwise we
         * must fallback to the default RSSI value.
         */
-       if (!link->avg_rssi.avg || !qual->rx_success)
+       if (!qual->rx_success)
                qual->rssi = DEFAULT_RSSI;
        else
-               qual->rssi = link->avg_rssi.avg;
+               qual->rssi = rt2x00link_get_avg_rssi(&link->avg_rssi);
 
        /*
         * Check if link tuning is supported by the hardware, some hardware
         * do not support link tuning at all, while other devices can disable
         * the feature from the EEPROM.
         */
-       if (test_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags))
+       if (rt2x00_has_cap_link_tuning(rt2x00dev))
                rt2x00dev->ops->lib->link_tuner(rt2x00dev, qual, link->count);
 
        /*
@@ -513,7 +493,7 @@ static void rt2x00link_vcocal(struct work_struct *work)
 void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
 {
        INIT_DELAYED_WORK(&rt2x00dev->link.agc_work, rt2x00link_agc);
-       if (test_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags))
+       if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
                INIT_DELAYED_WORK(&rt2x00dev->link.vco_work, rt2x00link_vcocal);
        INIT_DELAYED_WORK(&rt2x00dev->link.watchdog_work, rt2x00link_watchdog);
        INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner);
index f883802f350585322338ed7683e065bb85349ff3..7c157857f5cee925e796a49396a5843a758a01cd 100644 (file)
@@ -382,11 +382,11 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
         * of different types, but has no a separate filter for PS Poll frames,
         * FIF_CONTROL flag implies FIF_PSPOLL.
         */
-       if (!test_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags)) {
+       if (!rt2x00_has_cap_control_filters(rt2x00dev)) {
                if (*total_flags & FIF_CONTROL || *total_flags & FIF_PSPOLL)
                        *total_flags |= FIF_CONTROL | FIF_PSPOLL;
        }
-       if (!test_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags)) {
+       if (!rt2x00_has_cap_control_filter_pspoll(rt2x00dev)) {
                if (*total_flags & FIF_CONTROL)
                        *total_flags |= FIF_PSPOLL;
        }
@@ -469,7 +469,7 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
        if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
                return 0;
 
-       if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+       if (!rt2x00_has_cap_hw_crypto(rt2x00dev))
                return -EOPNOTSUPP;
 
        /*
@@ -754,6 +754,9 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
        struct rt2x00_dev *rt2x00dev = hw->priv;
        struct data_queue *queue;
 
+       if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+               return;
+
        tx_queue_for_each(rt2x00dev, queue)
                rt2x00queue_flush_queue(queue, drop);
 }
index 76d95deb274be56feb9803adb5b8d198f0c35bd0..25da20e7e1f34ab2c0b1fd3e38a82d1faece0f9e 100644 (file)
@@ -105,13 +105,11 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
                goto exit_release_regions;
        }
 
-       pci_enable_msi(pci_dev);
-
        hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
        if (!hw) {
                rt2x00_probe_err("Failed to allocate hardware\n");
                retval = -ENOMEM;
-               goto exit_disable_msi;
+               goto exit_release_regions;
        }
 
        pci_set_drvdata(pci_dev, hw);
@@ -121,7 +119,7 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
        rt2x00dev->ops = ops;
        rt2x00dev->hw = hw;
        rt2x00dev->irq = pci_dev->irq;
-       rt2x00dev->name = pci_name(pci_dev);
+       rt2x00dev->name = ops->name;
 
        if (pci_is_pcie(pci_dev))
                rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
@@ -152,9 +150,6 @@ exit_free_reg:
 exit_free_device:
        ieee80211_free_hw(hw);
 
-exit_disable_msi:
-       pci_disable_msi(pci_dev);
-
 exit_release_regions:
        pci_release_regions(pci_dev);
 
@@ -179,8 +174,6 @@ void rt2x00pci_remove(struct pci_dev *pci_dev)
        rt2x00pci_free_reg(rt2x00dev);
        ieee80211_free_hw(hw);
 
-       pci_disable_msi(pci_dev);
-
        /*
         * Free the PCI device data.
         */
index 6c8a33b6ee225082d137df7e392306fa0101534a..50590b1420a516863845249c96f689b28e766964 100644 (file)
@@ -61,7 +61,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
         * at least 8 bytes bytes available in headroom for IV/EIV
         * and 8 bytes for ICV data as tailroon.
         */
-       if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) {
+       if (rt2x00_has_cap_hw_crypto(rt2x00dev)) {
                head_size += 8;
                tail_size += 8;
        }
@@ -1033,38 +1033,21 @@ EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
 
 void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
 {
-       bool started;
        bool tx_queue =
                (queue->qid == QID_AC_VO) ||
                (queue->qid == QID_AC_VI) ||
                (queue->qid == QID_AC_BE) ||
                (queue->qid == QID_AC_BK);
 
-       mutex_lock(&queue->status_lock);
 
        /*
-        * If the queue has been started, we must stop it temporarily
-        * to prevent any new frames to be queued on the device. If
-        * we are not dropping the pending frames, the queue must
-        * only be stopped in the software and not the hardware,
-        * otherwise the queue will never become empty on its own.
+        * If we are not supposed to drop any pending
+        * frames, this means we must force a start (=kick)
+        * to the queue to make sure the hardware will
+        * start transmitting.
         */
-       started = test_bit(QUEUE_STARTED, &queue->flags);
-       if (started) {
-               /*
-                * Pause the queue
-                */
-               rt2x00queue_pause_queue(queue);
-
-               /*
-                * If we are not supposed to drop any pending
-                * frames, this means we must force a start (=kick)
-                * to the queue to make sure the hardware will
-                * start transmitting.
-                */
-               if (!drop && tx_queue)
-                       queue->rt2x00dev->ops->lib->kick_queue(queue);
-       }
+       if (!drop && tx_queue)
+               queue->rt2x00dev->ops->lib->kick_queue(queue);
 
        /*
         * Check if driver supports flushing, if that is the case we can
@@ -1080,14 +1063,6 @@ void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
        if (unlikely(!rt2x00queue_empty(queue)))
                rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n",
                            queue->qid);
-
-       /*
-        * Restore the queue to the previous status
-        */
-       if (started)
-               rt2x00queue_unpause_queue(queue);
-
-       mutex_unlock(&queue->status_lock);
 }
 EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
 
index 88289873c0cfd754790192e670c6e13bb5736933..4e121627925d91db9de550b635b3fcece71c90e3 100644 (file)
@@ -523,7 +523,9 @@ static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
        rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n",
                    queue->qid);
 
+       rt2x00queue_stop_queue(queue);
        rt2x00queue_flush_queue(queue, true);
+       rt2x00queue_start_queue(queue);
 }
 
 static int rt2x00usb_dma_timeout(struct data_queue *queue)
index 54d3ddfc988845cfa844b5c1d76c6a78ec62e4de..a5b69cb49012162580ef78291b696ce15abbeb94 100644 (file)
@@ -685,7 +685,7 @@ static void rt61pci_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
 
        rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529));
        rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
-                         !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags));
+                         !rt2x00_has_cap_frame_type(rt2x00dev));
 
        /*
         * Configure the RX antenna.
@@ -813,10 +813,10 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
 
        if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
                sel = antenna_sel_a;
-               lna = test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
+               lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
        } else {
                sel = antenna_sel_bg;
-               lna = test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
+               lna = rt2x00_has_cap_external_lna_bg(rt2x00dev);
        }
 
        for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
@@ -836,7 +836,7 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
        else if (rt2x00_rf(rt2x00dev, RF2527))
                rt61pci_config_antenna_2x(rt2x00dev, ant);
        else if (rt2x00_rf(rt2x00dev, RF2529)) {
-               if (test_bit(CAPABILITY_DOUBLE_ANTENNA, &rt2x00dev->cap_flags))
+               if (rt2x00_has_cap_double_antenna(rt2x00dev))
                        rt61pci_config_antenna_2x(rt2x00dev, ant);
                else
                        rt61pci_config_antenna_2529(rt2x00dev, ant);
@@ -850,13 +850,13 @@ static void rt61pci_config_lna_gain(struct rt2x00_dev *rt2x00dev,
        short lna_gain = 0;
 
        if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
-               if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
+               if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
                        lna_gain += 14;
 
                rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
                lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1);
        } else {
-               if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
+               if (rt2x00_has_cap_external_lna_a(rt2x00dev))
                        lna_gain += 14;
 
                rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A, &eeprom);
@@ -1054,14 +1054,14 @@ static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev,
        if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
                low_bound = 0x28;
                up_bound = 0x48;
-               if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
+               if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
                        low_bound += 0x10;
                        up_bound += 0x10;
                }
        } else {
                low_bound = 0x20;
                up_bound = 0x40;
-               if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) {
+               if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
                        low_bound += 0x10;
                        up_bound += 0x10;
                }
@@ -2578,7 +2578,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
         * eeprom word.
         */
        if (rt2x00_rf(rt2x00dev, RF2529) &&
-           !test_bit(CAPABILITY_DOUBLE_ANTENNA, &rt2x00dev->cap_flags)) {
+           !rt2x00_has_cap_double_antenna(rt2x00dev)) {
                rt2x00dev->default_ant.rx =
                    ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED);
                rt2x00dev->default_ant.tx =
@@ -2793,7 +2793,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
        spec->supported_bands = SUPPORT_BAND_2GHZ;
        spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
 
-       if (!test_bit(CAPABILITY_RF_SEQUENCE, &rt2x00dev->cap_flags)) {
+       if (!rt2x00_has_cap_rf_sequence(rt2x00dev)) {
                spec->num_channels = 14;
                spec->channels = rf_vals_noseq;
        } else {
index 1d3880e09a13eabb73e93c77b7af948ca12421f8..1baf9c896dcd6477efba878a1c0e407411635206 100644 (file)
@@ -595,8 +595,8 @@ static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
        switch (ant->rx) {
        case ANTENNA_HW_DIVERSITY:
                rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
-               temp = !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags)
-                      && (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ);
+               temp = !rt2x00_has_cap_frame_type(rt2x00dev) &&
+                      (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ);
                rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp);
                break;
        case ANTENNA_A:
@@ -636,7 +636,7 @@ static void rt73usb_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
 
        rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0);
        rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
-                         !test_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags));
+                         !rt2x00_has_cap_frame_type(rt2x00dev));
 
        /*
         * Configure the RX antenna.
@@ -709,10 +709,10 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
 
        if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
                sel = antenna_sel_a;
-               lna = test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags);
+               lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
        } else {
                sel = antenna_sel_bg;
-               lna = test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags);
+               lna = rt2x00_has_cap_external_lna_bg(rt2x00dev);
        }
 
        for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++)
@@ -740,7 +740,7 @@ static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
        short lna_gain = 0;
 
        if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
-               if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags))
+               if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
                        lna_gain += 14;
 
                rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG, &eeprom);
@@ -930,7 +930,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
                low_bound = 0x28;
                up_bound = 0x48;
 
-               if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
+               if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
                        low_bound += 0x10;
                        up_bound += 0x10;
                }
@@ -946,7 +946,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
                        up_bound = 0x1c;
                }
 
-               if (test_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags)) {
+               if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
                        low_bound += 0x14;
                        up_bound += 0x10;
                }
@@ -1661,7 +1661,7 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
        }
 
        if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
-               if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags)) {
+               if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
                        if (lna == 3 || lna == 2)
                                offset += 10;
                } else {
index fc207b268e4fd3db1926c494c8b470a6833c8102..a91506b12a627f26a084aef5e6dcb9a4213ac5e4 100644 (file)
@@ -1122,7 +1122,6 @@ static int rtl8180_probe(struct pci_dev *pdev,
        iounmap(priv->map);
 
  err_free_dev:
-       pci_set_drvdata(pdev, NULL);
        ieee80211_free_hw(dev);
 
  err_free_reg:
index 8bb4a9a01a1838e47d0d64b844503726999f3ff9..9a78e3daf74264fa13f446a4779d342ab6341488 100644 (file)
@@ -1613,6 +1613,35 @@ err_free:
 }
 EXPORT_SYMBOL(rtl_send_smps_action);
 
+void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       enum io_type iotype;
+
+       if (!is_hal_stop(rtlhal)) {
+               switch (operation) {
+               case SCAN_OPT_BACKUP:
+                       iotype = IO_CMD_PAUSE_DM_BY_SCAN;
+                       rtlpriv->cfg->ops->set_hw_reg(hw,
+                                                     HW_VAR_IO_CMD,
+                                                     (u8 *)&iotype);
+                       break;
+               case SCAN_OPT_RESTORE:
+                       iotype = IO_CMD_RESUME_DM_BY_SCAN;
+                       rtlpriv->cfg->ops->set_hw_reg(hw,
+                                                     HW_VAR_IO_CMD,
+                                                     (u8 *)&iotype);
+                       break;
+               default:
+                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
+                                "Unknown Scan Backup operation.\n");
+                       break;
+               }
+       }
+}
+EXPORT_SYMBOL(rtl_phy_scan_operation_backup);
+
 /* There seem to be issues in mac80211 regarding when del ba frames can be
  * received. As a work around, we make a fake del_ba if we receive a ba_req;
  * however, rx_agg was opened to let mac80211 release some ba related
index 0e5fe0902daf6eb180a1da431ce54477e49486cb..0cd07420777a702afca02422cab06f16e06a9121 100644 (file)
@@ -114,7 +114,6 @@ void rtl_init_rfkill(struct ieee80211_hw *hw);
 void rtl_deinit_rfkill(struct ieee80211_hw *hw);
 
 void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb);
-void rtl_watch_dog_timer_callback(unsigned long data);
 void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
 
 bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
@@ -153,5 +152,6 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
 bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
 struct sk_buff *rtl_make_del_ba(struct ieee80211_hw *hw,
                                u8 *sa, u8 *bssid, u16 tid);
+void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
 
 #endif
index 35e00086a520bf06393237b3aebf0aa3bbb9e617..0105e6c1901ed96fc6be6cfd4900b2285ed0835a 100644 (file)
 #define        CAM_CONFIG_USEDK                                1
 #define        CAM_CONFIG_NO_USEDK                             0
 
-extern void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
-extern u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
-                       u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
-                       u32 ul_default_key, u8 *key_content);
+void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
+u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+                        u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
+                        u32 ul_default_key, u8 *key_content);
 int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
-                       u32 ul_key_id);
+                            u32 ul_key_id);
 void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index);
 void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index);
 void rtl_cam_reset_sec_info(struct ieee80211_hw *hw);
index 733b7ce7f0e2a981f442cd9cdadd8f40a6426558..210ce7cd94d8d14201a68ce285e9c880993d30db 100644 (file)
@@ -115,7 +115,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
        mutex_lock(&rtlpriv->locks.conf_mutex);
 
        mac->link_state = MAC80211_NOLINK;
-       memset(mac->bssid, 0, 6);
+       memset(mac->bssid, 0, ETH_ALEN);
        mac->vendor = PEER_UNKNOWN;
 
        /*reset sec info */
@@ -280,7 +280,7 @@ static void rtl_op_remove_interface(struct ieee80211_hw *hw,
        mac->p2p = 0;
        mac->vif = NULL;
        mac->link_state = MAC80211_NOLINK;
-       memset(mac->bssid, 0, 6);
+       memset(mac->bssid, 0, ETH_ALEN);
        mac->vendor = PEER_UNKNOWN;
        mac->opmode = NL80211_IFTYPE_UNSPECIFIED;
        rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
@@ -721,7 +721,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                        mac->link_state = MAC80211_LINKED;
                        mac->cnt_after_linked = 0;
                        mac->assoc_id = bss_conf->aid;
-                       memcpy(mac->bssid, bss_conf->bssid, 6);
+                       memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
 
                        if (rtlpriv->cfg->ops->linked_set_reg)
                                rtlpriv->cfg->ops->linked_set_reg(hw);
@@ -750,7 +750,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                        if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
                                rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
                        mac->link_state = MAC80211_NOLINK;
-                       memset(mac->bssid, 0, 6);
+                       memset(mac->bssid, 0, ETH_ALEN);
                        mac->vendor = PEER_UNKNOWN;
 
                        if (rtlpriv->dm.supp_phymode_switch) {
@@ -826,7 +826,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
                         bss_conf->bssid);
 
                mac->vendor = PEER_UNKNOWN;
-               memcpy(mac->bssid, bss_conf->bssid, 6);
+               memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
                rtlpriv->cfg->ops->set_network_type(hw, vif->type);
 
                rcu_read_lock();
index 838a1ed3f1942b9f3d56a122c2840548b38103c9..ae13fb94b2e8d7b82a259cdc830643cb2e144901 100644 (file)
@@ -1203,20 +1203,18 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
 
 static u16 efuse_get_current_size(struct ieee80211_hw *hw)
 {
-       int continual = true;
        u16 efuse_addr = 0;
        u8 hworden;
        u8 efuse_data, word_cnts;
 
-       while (continual && efuse_one_byte_read(hw, efuse_addr, &efuse_data)
-              && (efuse_addr < EFUSE_MAX_SIZE)) {
-               if (efuse_data != 0xFF) {
-                       hworden = efuse_data & 0x0F;
-                       word_cnts = efuse_calculate_word_cnts(hworden);
-                       efuse_addr = efuse_addr + (word_cnts * 2) + 1;
-               } else {
-                       continual = false;
-               }
+       while (efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
+              efuse_addr < EFUSE_MAX_SIZE) {
+               if (efuse_data == 0xFF)
+                       break;
+
+               hworden = efuse_data & 0x0F;
+               word_cnts = efuse_calculate_word_cnts(hworden);
+               efuse_addr = efuse_addr + (word_cnts * 2) + 1;
        }
 
        return efuse_addr;
index 395a326acfb44a0874b331b64442d23b5e120d1a..1663b3afd41e90e67512823f0c5a0f7ee02ef80b 100644 (file)
@@ -104,20 +104,19 @@ struct efuse_priv {
        u8 tx_power_g[14];
 };
 
-extern void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
-extern void efuse_initialize(struct ieee80211_hw *hw);
-extern u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
-extern void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
-extern void read_efuse(struct ieee80211_hw *hw, u16 _offset,
-                      u16 _size_byte, u8 *pbuf);
-extern void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
-                             u16 offset, u32 *value);
-extern void efuse_shadow_write(struct ieee80211_hw *hw, u8 type,
-                              u16 offset, u32 value);
-extern bool efuse_shadow_update(struct ieee80211_hw *hw);
-extern bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
-extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
-extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
-extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
+void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf);
+void efuse_initialize(struct ieee80211_hw *hw);
+u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
+void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
+void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf);
+void efuse_shadow_read(struct ieee80211_hw *hw, u8 type, u16 offset,
+                      u32 *value);
+void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, u16 offset,
+                       u32 value);
+bool efuse_shadow_update(struct ieee80211_hw *hw);
+bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
+void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
+void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
+void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
 
 #endif
index 703f839af6ca0b4abefcfc754dc21eb3d13b2dac..0f494444bcd1d90b457b927d704bf77abe0592ca 100644 (file)
@@ -736,7 +736,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
 
        struct rtl_stats stats = {
                .signal = 0,
-               .noise = -98,
                .rate = 0,
        };
        int index = rtlpci->rx_ring[rx_queue_idx].idx;
@@ -2009,7 +2008,6 @@ fail2:
 fail1:
        if (hw)
                ieee80211_free_hw(hw);
-       pci_set_drvdata(pdev, NULL);
        pci_disable_device(pdev);
 
        return err;
@@ -2064,8 +2062,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
 
        rtl_pci_disable_aspm(hw);
 
-       pci_set_drvdata(pdev, NULL);
-
        ieee80211_free_hw(hw);
 }
 EXPORT_SYMBOL(rtl_pci_disconnect);
index b68cae3024fc851fec0b58359328a39e14e1e4da..e06971be7df77e047e8d869d046a126d0e987d14 100644 (file)
@@ -143,6 +143,7 @@ static void _rtl88ee_set_fw_clock_on(struct ieee80211_hw *hw,
                } else {
                        rtlhal->fw_clk_change_in_progress = false;
                        spin_unlock_bh(&rtlpriv->locks.fw_ps_lock);
+                       break;
                }
        }
 
index e655c04732251f18ee1121625df732ab8b8d8096..d67f9c731cc4600e57d774e7125d80c945e196b6 100644 (file)
@@ -1136,34 +1136,6 @@ void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
                                           &bw40_pwr[0], channel);
 }
 
-void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       enum io_type iotype;
-
-       if (!is_hal_stop(rtlhal)) {
-               switch (operation) {
-               case SCAN_OPT_BACKUP:
-                       iotype = IO_CMD_PAUSE_DM_BY_SCAN;
-                       rtlpriv->cfg->ops->set_hw_reg(hw,
-                                                     HW_VAR_IO_CMD,
-                                                     (u8 *)&iotype);
-                       break;
-               case SCAN_OPT_RESTORE:
-                       iotype = IO_CMD_RESUME_DM_BY_SCAN;
-                       rtlpriv->cfg->ops->set_hw_reg(hw,
-                                                     HW_VAR_IO_CMD,
-                                                     (u8 *)&iotype);
-                       break;
-               default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Unknown Scan Backup operation.\n");
-                       break;
-               }
-       }
-}
-
 void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
index f1acd6d27e444ca4fac956d14190de873ef15d06..89f0f1ef14657ae467bea1445658cfd202c31da6 100644 (file)
@@ -200,37 +200,35 @@ enum _ANT_DIV_TYPE {
        CGCS_RX_SW_ANTDIV               = 0x05,
 };
 
-extern u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw,
-                                  u32 regaddr, u32 bitmask);
-extern void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
-                                 u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
-                                  enum radio_path rfpath, u32 regaddr,
-                                  u32 bitmask);
-extern void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
-                                 enum radio_path rfpath, u32 regaddr,
-                                 u32 bitmask, u32 data);
-extern bool rtl88e_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl88e_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl88e_phy_rf_config(struct ieee80211_hw *hw);
-extern void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw,
-                                        long *powerlevel);
-extern void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
-extern void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw,
-                                            u8 operation);
-extern void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-extern void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
-                                  enum nl80211_channel_type ch_type);
-extern void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw);
-extern u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw);
-extern void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
+u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw,
+                           u32 regaddr, u32 bitmask);
+void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
+                          u32 regaddr, u32 bitmask, u32 data);
+u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
+                           enum radio_path rfpath, u32 regaddr,
+                           u32 bitmask);
+void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
+                          enum radio_path rfpath, u32 regaddr,
+                          u32 bitmask, u32 data);
+bool rtl88e_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl88e_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl88e_phy_rf_config(struct ieee80211_hw *hw);
+void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl88e_phy_get_txpower_level(struct ieee80211_hw *hw,
+                                 long *powerlevel);
+void rtl88e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
+                           enum nl80211_channel_type ch_type);
+void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw);
+void rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
 void rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw);
 void rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
 bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                                          enum radio_path rfpath);
 bool rtl88e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl88e_phy_set_rf_power_state(struct ieee80211_hw *hw,
-                                         enum rf_pwrstate rfpwr_state);
+bool rtl88e_phy_set_rf_power_state(struct ieee80211_hw *hw,
+                                  enum rf_pwrstate rfpwr_state);
 
 #endif
index c254693a1e6ac3b9ce69e6e89947748e5331b48e..347af1e4f438e57cf2c37b8975169a7c92941681 100644 (file)
@@ -30,6 +30,7 @@
 #include "../wifi.h"
 #include "../core.h"
 #include "../pci.h"
+#include "../base.h"
 #include "reg.h"
 #include "def.h"
 #include "phy.h"
@@ -244,7 +245,7 @@ static struct rtl_hal_ops rtl8188ee_hal_ops = {
        .set_bw_mode = rtl88e_phy_set_bw_mode,
        .switch_channel = rtl88e_phy_sw_chnl,
        .dm_watchdog = rtl88e_dm_watchdog,
-       .scan_operation_backup = rtl88e_phy_scan_operation_backup,
+       .scan_operation_backup = rtl_phy_scan_operation_backup,
        .set_rf_power_state = rtl88e_phy_set_rf_power_state,
        .led_control = rtl88ee_led_control,
        .set_desc = rtl88ee_set_desc,
index 68685a8982574e3e6b88c94379d00e4518a02601..aece6c9cccf1b50febc16049f4fd962c5c834aa7 100644 (file)
@@ -478,7 +478,6 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
 
        /*rx_status->qual = status->signal; */
        rx_status->signal = status->recvsignalpower + 10;
-       /*rx_status->noise = -status->noise; */
        if (status->packet_report_type == TX_REPORT2) {
                status->macid_valid_entry[0] =
                         GET_RX_RPT2_DESC_MACID_VALID_1(pdesc);
index d2d57a27a7c1636538003fc408b36c2bda24ce73..e9caa5d4cff0f910488cd4b22899ad9b5c84665a 100644 (file)
@@ -541,29 +541,6 @@ EXPORT_SYMBOL(rtl92c_dm_write_dig);
 
 static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
 {
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
-
-       u8 h2c_parameter[3] = { 0 };
-
-       return;
-
-       if (tmpentry_max_pwdb != 0) {
-               rtlpriv->dm.entry_max_undec_sm_pwdb = tmpentry_max_pwdb;
-       } else {
-               rtlpriv->dm.entry_max_undec_sm_pwdb = 0;
-       }
-
-       if (tmpentry_min_pwdb != 0xff) {
-               rtlpriv->dm.entry_min_undec_sm_pwdb = tmpentry_min_pwdb;
-       } else {
-               rtlpriv->dm.entry_min_undec_sm_pwdb = 0;
-       }
-
-       h2c_parameter[2] = (u8) (rtlpriv->dm.undec_sm_pwdb & 0xFF);
-       h2c_parameter[0] = 0;
-
-       rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
 }
 
 void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
@@ -673,7 +650,7 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
        s8 cck_index = 0;
        int i;
        bool is2t = IS_92C_SERIAL(rtlhal->version);
-       s8 txpwr_level[2] = {0, 0};
+       s8 txpwr_level[3] = {0, 0, 0};
        u8 ofdm_min_index = 6, rf;
 
        rtlpriv->dm.txpower_trackinginit = true;
index 246e5352f2e15a850dbc7971c08bdbbcffbc7172..0c0e78263a665190aacf3d33d21fa87b99d4dd86 100644 (file)
@@ -592,36 +592,6 @@ long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
 }
 EXPORT_SYMBOL(_rtl92c_phy_txpwr_idx_to_dbm);
 
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       enum io_type iotype;
-
-       if (!is_hal_stop(rtlhal)) {
-               switch (operation) {
-               case SCAN_OPT_BACKUP:
-                       iotype = IO_CMD_PAUSE_DM_BY_SCAN;
-                       rtlpriv->cfg->ops->set_hw_reg(hw,
-                                                     HW_VAR_IO_CMD,
-                                                     (u8 *)&iotype);
-
-                       break;
-               case SCAN_OPT_RESTORE:
-                       iotype = IO_CMD_RESUME_DM_BY_SCAN;
-                       rtlpriv->cfg->ops->set_hw_reg(hw,
-                                                     HW_VAR_IO_CMD,
-                                                     (u8 *)&iotype);
-                       break;
-               default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Unknown Scan Backup operation\n");
-                       break;
-               }
-       }
-}
-EXPORT_SYMBOL(rtl92c_phy_scan_operation_backup);
-
 void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
                            enum nl80211_channel_type ch_type)
 {
index cec10d696492ab4055a2982ffadb1202494eea5e..e79dabe9ba1de77c757e91b46c6ba48ca6e9f12f 100644 (file)
@@ -39,9 +39,7 @@
 #define RT_CANNOT_IO(hw)               false
 #define HIGHPOWER_RADIOA_ARRAYLEN      22
 
-#define IQK_ADDA_REG_NUM               16
 #define MAX_TOLERANCE                  5
-#define        IQK_DELAY_TIME                  1
 
 #define        APK_BB_REG_NUM                  5
 #define        APK_AFE_REG_NUM                 16
@@ -205,8 +203,6 @@ void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
 void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
 bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
                                          long power_indbm);
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
-                                            u8 operation);
 void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
                                   enum nl80211_channel_type ch_type);
 void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
index 3cfa1bb0f47676853500b237a37aaeb9e49b1bad..fa24de43ce795d4588f002a93390ea8ffb5ee109 100644 (file)
@@ -152,8 +152,6 @@ enum version_8192c {
 #define IS_VENDOR_UMC_A_CUT(version)   ((IS_CHIP_VENDOR_UMC(version)) ? \
        ((GET_CVID_CUT_VERSION(version)) ? false : true) : false)
 #define IS_CHIP_VER_B(version)  ((version & CHIP_VER_B) ? true : false)
-#define IS_VENDOR_UMC_A_CUT(version)   ((IS_CHIP_VENDOR_UMC(version)) ? \
-       ((GET_CVID_CUT_VERSION(version)) ? false : true) : false)
 #define IS_92C_SERIAL(version)  ((version & CHIP_92C_BITMASK) ? true : false)
 #define IS_CHIP_VENDOR_UMC(version)            \
        ((version & CHIP_VENDOR_UMC) ? true : false)
index d5e3b704f9304a596bd943cfcbebd00db6559776..94486cca400077515ced7c81e0c35922af97e425 100644 (file)
@@ -39,9 +39,7 @@
 #define RT_CANNOT_IO(hw)               false
 #define HIGHPOWER_RADIOA_ARRAYLEN      22
 
-#define IQK_ADDA_REG_NUM               16
 #define MAX_TOLERANCE                  5
-#define        IQK_DELAY_TIME                  1
 
 #define        APK_BB_REG_NUM                  5
 #define        APK_AFE_REG_NUM                 16
@@ -188,36 +186,29 @@ struct tx_power_struct {
 };
 
 bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
-u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw,
-                                  u32 regaddr, u32 bitmask);
-void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
-                                 u32 regaddr, u32 bitmask, u32 data);
-u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
-                                  enum radio_path rfpath, u32 regaddr,
-                                  u32 bitmask);
-extern void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
-                                  enum radio_path rfpath, u32 regaddr,
-                                  u32 bitmask, u32 data);
+u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
+void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
+                          u32 data);
+u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+                           u32 regaddr, u32 bitmask);
+void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+                           u32 regaddr, u32 bitmask, u32 data);
 bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
 bool rtl92ce_phy_bb_config(struct ieee80211_hw *hw);
 bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
 bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
-                                                enum radio_path rfpath);
+                                         enum radio_path rfpath);
 void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
-                                        long *powerlevel);
+void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel);
 void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
 bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
                                          long power_indbm);
-void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
-                                            u8 operation);
 void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
-                                  enum nl80211_channel_type ch_type);
+                           enum nl80211_channel_type ch_type);
 void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
 u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw);
 void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
-void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw,
-                                        u16 beaconinterval);
+void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval);
 void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
 void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
 void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
@@ -225,28 +216,25 @@ void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
 bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                                          enum radio_path rfpath);
 bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
-                                             u32 rfpath);
-bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
+                                      u32 rfpath);
 bool rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
-                                         enum rf_pwrstate rfpwr_state);
+                                   enum rf_pwrstate rfpwr_state);
 void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
 bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
 void rtl92c_phy_set_io(struct ieee80211_hw *hw);
 void rtl92c_bb_block_on(struct ieee80211_hw *hw);
-u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
-                                     enum radio_path rfpath, u32 offset);
+u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw, enum radio_path rfpath,
+                              u32 offset);
 u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
-                                        enum radio_path rfpath, u32 offset);
+                                 enum radio_path rfpath, u32 offset);
 u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
 void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
-                                       enum radio_path rfpath, u32 offset,
-                                       u32 data);
+                                enum radio_path rfpath, u32 offset, u32 data);
 void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
-                                          enum radio_path rfpath, u32 offset,
-                                          u32 data);
+                                   enum radio_path rfpath, u32 offset,
+                                   u32 data);
 void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
-                                                  u32 regaddr, u32 bitmask,
-                                                  u32 data);
+                                           u32 regaddr, u32 bitmask, u32 data);
 bool _rtl92ce_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
 void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
 bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
index bd4aef74c0567929849c24008ead104b5a11b15d..8922ecb47ad245cdb7d1b759f44c558f97e52173 100644 (file)
 #define        EEPROM_DEFAULT_TXPOWERLEVEL             0x22
 #define        EEPROM_DEFAULT_HT40_2SDIFF              0x0
 #define EEPROM_DEFAULT_HT20_DIFF               2
-#define        EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF      0x3
 #define EEPROM_DEFAULT_HT40_PWRMAXOFFSET       0
 #define EEPROM_DEFAULT_HT20_PWRMAXOFFSET       0
 
 
 #define        EEPROM_TXPWR_GROUP                      0x6F
 
-#define EEPROM_TSSI_A                          0x76
-#define EEPROM_TSSI_B                          0x77
-#define EEPROM_THERMAL_METER                   0x78
-
 #define EEPROM_CHANNELPLAN                     0x75
 
-#define RF_OPTION1                             0x79
-#define RF_OPTION2                             0x7A
-#define RF_OPTION3                             0x7B
-#define RF_OPTION4                             0x7C
-
 #define        STOPBECON                               BIT(6)
 #define        STOPHIGHT                               BIT(5)
 #define        STOPMGT                                 BIT(4)
 #define RSV_CTRL                               0x001C
 #define RD_CTRL                                        0x0524
 
-#define REG_USB_INFO                           0xFE17
-#define REG_USB_SPECIAL_OPTION                 0xFE55
-
-#define REG_USB_DMA_AGG_TO                     0xFE5B
-#define REG_USB_AGG_TO                         0xFE5C
-#define REG_USB_AGG_TH                         0xFE5D
-
 #define REG_USB_VID                            0xFE60
 #define REG_USB_PID                            0xFE62
 #define REG_USB_OPTIONAL                       0xFE64
 #define POLLING_LLT_THRESHOLD                  20
 #define POLLING_READY_TIMEOUT_COUNT            1000
 
-#define        MAX_MSS_DENSITY_2T                      0x13
-#define        MAX_MSS_DENSITY_1T                      0x0A
-
 #define EPROM_CMD_OPERATING_MODE_MASK  ((1<<7)|(1<<6))
 #define EPROM_CMD_CONFIG                       0x3
 #define EPROM_CMD_LOAD                         1
index 6c8d56efceae2fd8e76e8e128fea6e720241cf12..d8fe68b389d213e17b133b7044fec647b315f2b8 100644 (file)
 #define RF6052_MAX_REG                 0x3F
 #define RF6052_MAX_PATH                        2
 
-extern void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
-                                            u8 bandwidth);
-extern void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
-                                              u8 *ppowerlevel);
-extern void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
-                                               u8 *ppowerlevel, u8 channel);
-extern bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
+void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92ce_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                       u8 *ppowerlevel);
+void rtl92ce_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                        u8 *ppowerlevel, u8 channel);
+bool rtl92ce_phy_rf6052_config(struct ieee80211_hw *hw);
 #endif
index 14203561b6ee5463d04841848a435cc9c51d59cd..b790320d20305427c5ed4fa5fd9f37d944957e31 100644 (file)
@@ -30,6 +30,7 @@
 #include "../wifi.h"
 #include "../core.h"
 #include "../pci.h"
+#include "../base.h"
 #include "reg.h"
 #include "def.h"
 #include "phy.h"
@@ -219,7 +220,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
        .set_bw_mode = rtl92c_phy_set_bw_mode,
        .switch_channel = rtl92c_phy_sw_chnl,
        .dm_watchdog = rtl92c_dm_watchdog,
-       .scan_operation_backup = rtl92c_phy_scan_operation_backup,
+       .scan_operation_backup = rtl_phy_scan_operation_backup,
        .set_rf_power_state = rtl92c_phy_set_rf_power_state,
        .led_control = rtl92ce_led_control,
        .set_desc = rtl92ce_set_desc,
index 6ad23b413eb3e6e4251e2242fc4dc912e3b7b6e5..52abf0a862fa70f26ec26ff62d2017f0a11ce5c9 100644 (file)
@@ -420,7 +420,6 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
 
        /*rx_status->qual = stats->signal; */
        rx_status->signal = stats->recvsignalpower + 10;
-       /*rx_status->noise = -stats->noise; */
 
        return true;
 }
index da4f587199ee5537cd20b0f62a326fd8157afb9a..393685390f3ee41f78a903d198eb51484654a55f 100644 (file)
@@ -32,6 +32,7 @@
 #include "../usb.h"
 #include "../ps.h"
 #include "../cam.h"
+#include "../stats.h"
 #include "reg.h"
 #include "def.h"
 #include "phy.h"
@@ -738,16 +739,6 @@ static u8 _rtl92c_evm_db_to_percentage(char value)
        return ret_val;
 }
 
-static long _rtl92c_translate_todbm(struct ieee80211_hw *hw,
-                                    u8 signal_strength_index)
-{
-       long signal_power;
-
-       signal_power = (long)((signal_strength_index + 1) >> 1);
-       signal_power -= 95;
-       return signal_power;
-}
-
 static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
                long currsig)
 {
@@ -913,180 +904,6 @@ static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
                          (hw, total_rssi /= rf_rx_num));
 }
 
-static void _rtl92c_process_ui_rssi(struct ieee80211_hw *hw,
-               struct rtl_stats *pstats)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       u8 rfpath;
-       u32 last_rssi, tmpval;
-
-       if (pstats->packet_toself || pstats->packet_beacon) {
-               rtlpriv->stats.rssi_calculate_cnt++;
-               if (rtlpriv->stats.ui_rssi.total_num++ >=
-                   PHY_RSSI_SLID_WIN_MAX) {
-                       rtlpriv->stats.ui_rssi.total_num =
-                           PHY_RSSI_SLID_WIN_MAX;
-                       last_rssi =
-                           rtlpriv->stats.ui_rssi.elements[rtlpriv->
-                                                          stats.ui_rssi.index];
-                       rtlpriv->stats.ui_rssi.total_val -= last_rssi;
-               }
-               rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
-               rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.
-                                       index++] = pstats->signalstrength;
-               if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
-                       rtlpriv->stats.ui_rssi.index = 0;
-               tmpval = rtlpriv->stats.ui_rssi.total_val /
-                   rtlpriv->stats.ui_rssi.total_num;
-               rtlpriv->stats.signal_strength =
-                   _rtl92c_translate_todbm(hw, (u8) tmpval);
-               pstats->rssi = rtlpriv->stats.signal_strength;
-       }
-       if (!pstats->is_cck && pstats->packet_toself) {
-               for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
-                    rfpath++) {
-                       if (!rtl8192_phy_check_is_legal_rfpath(hw, rfpath))
-                               continue;
-                       if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
-                               rtlpriv->stats.rx_rssi_percentage[rfpath] =
-                                   pstats->rx_mimo_signalstrength[rfpath];
-                       }
-                       if (pstats->rx_mimo_signalstrength[rfpath] >
-                           rtlpriv->stats.rx_rssi_percentage[rfpath]) {
-                               rtlpriv->stats.rx_rssi_percentage[rfpath] =
-                                   ((rtlpriv->stats.
-                                     rx_rssi_percentage[rfpath] *
-                                     (RX_SMOOTH_FACTOR - 1)) +
-                                    (pstats->rx_mimo_signalstrength[rfpath])) /
-                                   (RX_SMOOTH_FACTOR);
-
-                               rtlpriv->stats.rx_rssi_percentage[rfpath] =
-                                   rtlpriv->stats.rx_rssi_percentage[rfpath] +
-                                   1;
-                       } else {
-                               rtlpriv->stats.rx_rssi_percentage[rfpath] =
-                                   ((rtlpriv->stats.
-                                     rx_rssi_percentage[rfpath] *
-                                     (RX_SMOOTH_FACTOR - 1)) +
-                                    (pstats->rx_mimo_signalstrength[rfpath])) /
-                                   (RX_SMOOTH_FACTOR);
-                       }
-               }
-       }
-}
-
-static void _rtl92c_update_rxsignalstatistics(struct ieee80211_hw *hw,
-                                              struct rtl_stats *pstats)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       int weighting = 0;
-
-       if (rtlpriv->stats.recv_signal_power == 0)
-               rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
-       if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
-               weighting = 5;
-       else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
-               weighting = (-5);
-       rtlpriv->stats.recv_signal_power =
-           (rtlpriv->stats.recv_signal_power * 5 +
-            pstats->recvsignalpower + weighting) / 6;
-}
-
-static void _rtl92c_process_pwdb(struct ieee80211_hw *hw,
-               struct rtl_stats *pstats)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
-       long undec_sm_pwdb = 0;
-
-       if (mac->opmode == NL80211_IFTYPE_ADHOC) {
-               return;
-       } else {
-               undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
-       }
-       if (pstats->packet_toself || pstats->packet_beacon) {
-               if (undec_sm_pwdb < 0)
-                       undec_sm_pwdb = pstats->rx_pwdb_all;
-               if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
-                       undec_sm_pwdb = (((undec_sm_pwdb) *
-                             (RX_SMOOTH_FACTOR - 1)) +
-                            (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
-                       undec_sm_pwdb += 1;
-               } else {
-                       undec_sm_pwdb = (((undec_sm_pwdb) *
-                             (RX_SMOOTH_FACTOR - 1)) +
-                            (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
-               }
-               rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
-               _rtl92c_update_rxsignalstatistics(hw, pstats);
-       }
-}
-
-static void _rtl92c_process_LINK_Q(struct ieee80211_hw *hw,
-                                            struct rtl_stats *pstats)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 last_evm = 0, n_stream, tmpval;
-
-       if (pstats->signalquality != 0) {
-               if (pstats->packet_toself || pstats->packet_beacon) {
-                       if (rtlpriv->stats.LINK_Q.total_num++ >=
-                           PHY_LINKQUALITY_SLID_WIN_MAX) {
-                               rtlpriv->stats.LINK_Q.total_num =
-                                   PHY_LINKQUALITY_SLID_WIN_MAX;
-                               last_evm =
-                                   rtlpriv->stats.LINK_Q.elements
-                                   [rtlpriv->stats.LINK_Q.index];
-                               rtlpriv->stats.LINK_Q.total_val -=
-                                   last_evm;
-                       }
-                       rtlpriv->stats.LINK_Q.total_val +=
-                           pstats->signalquality;
-                       rtlpriv->stats.LINK_Q.elements
-                          [rtlpriv->stats.LINK_Q.index++] =
-                           pstats->signalquality;
-                       if (rtlpriv->stats.LINK_Q.index >=
-                           PHY_LINKQUALITY_SLID_WIN_MAX)
-                               rtlpriv->stats.LINK_Q.index = 0;
-                       tmpval = rtlpriv->stats.LINK_Q.total_val /
-                           rtlpriv->stats.LINK_Q.total_num;
-                       rtlpriv->stats.signal_quality = tmpval;
-                       rtlpriv->stats.last_sigstrength_inpercent = tmpval;
-                       for (n_stream = 0; n_stream < 2;
-                            n_stream++) {
-                               if (pstats->RX_SIGQ[n_stream] != -1) {
-                                       if (!rtlpriv->stats.RX_EVM[n_stream]) {
-                                               rtlpriv->stats.RX_EVM[n_stream]
-                                                = pstats->RX_SIGQ[n_stream];
-                                       }
-                                       rtlpriv->stats.RX_EVM[n_stream] =
-                                           ((rtlpriv->stats.RX_EVM
-                                           [n_stream] *
-                                           (RX_SMOOTH_FACTOR - 1)) +
-                                           (pstats->RX_SIGQ
-                                           [n_stream] * 1)) /
-                                           (RX_SMOOTH_FACTOR);
-                               }
-                       }
-               }
-       } else {
-               ;
-       }
-}
-
-static void _rtl92c_process_phyinfo(struct ieee80211_hw *hw,
-                                    u8 *buffer,
-                                    struct rtl_stats *pcurrent_stats)
-{
-       if (!pcurrent_stats->packet_matchbssid &&
-           !pcurrent_stats->packet_beacon)
-               return;
-       _rtl92c_process_ui_rssi(hw, pcurrent_stats);
-       _rtl92c_process_pwdb(hw, pcurrent_stats);
-       _rtl92c_process_LINK_Q(hw, pcurrent_stats);
-}
-
 void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
                                               struct sk_buff *skb,
                                               struct rtl_stats *pstats,
@@ -1123,5 +940,5 @@ void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
        _rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
                                   packet_matchbssid, packet_toself,
                                   packet_beacon);
-       _rtl92c_process_phyinfo(hw, tmp_buf, pstats);
+       rtl_process_phyinfo(hw, tmp_buf, pstats);
 }
index 090fd33a158db1635c14d3ea323a5363f7c82169..11b439d6b67167c11e1529919bc5f07dfe96e060 100644 (file)
 #define RF6052_MAX_REG                 0x3F
 #define RF6052_MAX_PATH                        2
 
-extern void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
-                                           u8 bandwidth);
-extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
-                                             u8 *ppowerlevel);
-extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
-                                              u8 *ppowerlevel, u8 channel);
+void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                      u8 *ppowerlevel);
+void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                       u8 *ppowerlevel, u8 channel);
 bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw);
 bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
-                                         enum radio_path rfpath);
+                                          enum radio_path rfpath);
 void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
                                        u8 *ppowerlevel);
 void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
index 2bd5985262171bfdfa81de5494a4ef9640d72cc5..9936de716ad58929e50dc0e6f02fca77f5efee68 100644 (file)
@@ -31,6 +31,7 @@
 #include "../core.h"
 #include "../usb.h"
 #include "../efuse.h"
+#include "../base.h"
 #include "reg.h"
 #include "def.h"
 #include "phy.h"
@@ -117,7 +118,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
        .set_bw_mode = rtl92c_phy_set_bw_mode,
        .switch_channel = rtl92c_phy_sw_chnl,
        .dm_watchdog = rtl92c_dm_watchdog,
-       .scan_operation_backup = rtl92c_phy_scan_operation_backup,
+       .scan_operation_backup = rtl_phy_scan_operation_backup,
        .set_rf_power_state = rtl92cu_phy_set_rf_power_state,
        .led_control = rtl92cu_led_control,
        .enable_hw_sec = rtl92cu_enable_hw_security_config,
index 763cf1defab5b4027b22604c3771e4c8465a4405..25e50ffc44ec8b42fd35b4ab0b60a08ee09edd7d 100644 (file)
@@ -343,13 +343,13 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw,
                                        (bool)GET_RX_DESC_PAGGR(pdesc));
        rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
        if (phystatus) {
-               p_drvinfo = (struct rx_fwinfo_92c *)(pdesc + RTL_RX_DESC_SIZE);
+               p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
+                                                    stats->rx_bufshift);
                rtl92c_translate_rx_signal_stuff(hw, skb, stats, pdesc,
                                                 p_drvinfo);
        }
        /*rx_status->qual = stats->signal; */
        rx_status->signal = stats->rssi + 10;
-       /*rx_status->noise = -stats->noise; */
        return true;
 }
 
@@ -364,7 +364,6 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
        u8 *rxdesc;
        struct rtl_stats stats = {
                .signal = 0,
-               .noise = -98,
                .rate = 0,
        };
        struct rx_fwinfo_92c *p_drvinfo;
index f700f7a614b264691ce8be3cfb79f219f5edd9ad..7908e1c85819409091abd71ba5b57595947ff909 100644 (file)
@@ -840,9 +840,9 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
        bool internal_pa = false;
        long ele_a = 0, ele_d, temp_cck, val_x, value32;
        long val_y, ele_c = 0;
-       u8 ofdm_index[2];
+       u8 ofdm_index[3];
        s8 cck_index = 0;
-       u8 ofdm_index_old[2] = {0, 0};
+       u8 ofdm_index_old[3] = {0, 0, 0};
        s8 cck_index_old = 0;
        u8 index;
        int i;
@@ -1118,6 +1118,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
                                 val_x, val_y, ele_a, ele_c, ele_d,
                                 val_x, val_y);
 
+                       if (cck_index >= CCK_TABLE_SIZE)
+                               cck_index = CCK_TABLE_SIZE - 1;
+                       if (cck_index < 0)
+                               cck_index = 0;
                        if (rtlhal->current_bandtype == BAND_ON_2_4G) {
                                /* Adjust CCK according to IQK result */
                                if (!rtlpriv->dm.cck_inch14) {
index 7dd8f6de0550f4f03424472f8926577a23b513c9..c4a7db9135d6e3850dcd8490e5e9165807178b64 100644 (file)
@@ -1194,25 +1194,7 @@ void rtl92d_linked_set_reg(struct ieee80211_hw *hw)
  * mac80211 will send pkt when scan */
 void rtl92de_set_qos(struct ieee80211_hw *hw, int aci)
 {
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
        rtl92d_dm_init_edca_turbo(hw);
-       return;
-       switch (aci) {
-       case AC1_BK:
-               rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0xa44f);
-               break;
-       case AC0_BE:
-               break;
-       case AC2_VI:
-               rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x5e4322);
-               break;
-       case AC3_VO:
-               rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
-               break;
-       default:
-               RT_ASSERT(false, "invalid aci: %d !\n", aci);
-               break;
-       }
 }
 
 void rtl92de_enable_interrupt(struct ieee80211_hw *hw)
index 7c9f7a2f1e427d2bac52cfacdea38c949f390a2e..1bc7b1a96d4aebe5577067460e56fa698405b3a8 100644 (file)
@@ -55,10 +55,9 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
                     u8 *p_macaddr, bool is_group, u8 enc_algo,
                     bool is_wepkey, bool clear_all);
 
-extern void rtl92de_write_dword_dbi(struct ieee80211_hw *hw, u16 offset,
-                                   u32 value, u8 direct);
-extern u32 rtl92de_read_dword_dbi(struct ieee80211_hw *hw, u16 offset,
-                                 u8 direct);
+void rtl92de_write_dword_dbi(struct ieee80211_hw *hw, u16 offset, u32 value,
+                            u8 direct);
+u32 rtl92de_read_dword_dbi(struct ieee80211_hw *hw, u16 offset, u8 direct);
 void rtl92de_suspend(struct ieee80211_hw *hw);
 void rtl92de_resume(struct ieee80211_hw *hw);
 void rtl92d_linked_set_reg(struct ieee80211_hw *hw);
index 840bac5fa2f80bfa224bd5470b6562585aec0a02..13196cc4b1d380279e7ce3096b5861ba14087b81 100644 (file)
@@ -1022,34 +1022,6 @@ void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
        rtl92d_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
 }
 
-void rtl92d_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       enum io_type iotype;
-
-       if (!is_hal_stop(rtlhal)) {
-               switch (operation) {
-               case SCAN_OPT_BACKUP:
-                       rtlhal->current_bandtypebackup =
-                                                rtlhal->current_bandtype;
-                       iotype = IO_CMD_PAUSE_DM_BY_SCAN;
-                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
-                                                     (u8 *)&iotype);
-                       break;
-               case SCAN_OPT_RESTORE:
-                       iotype = IO_CMD_RESUME_DM_BY_SCAN;
-                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_IO_CMD,
-                                                     (u8 *)&iotype);
-                       break;
-               default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Unknown Scan Backup operation\n");
-                       break;
-               }
-       }
-}
-
 void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
                            enum nl80211_channel_type ch_type)
 {
index f074952bf25c53c3fcd6dbf6ae22f0a1f8de9316..48d5c6835b6a98a950daf3f14a5330606a656ba5 100644 (file)
@@ -39,9 +39,7 @@
 #define RT_CANNOT_IO(hw)                       false
 #define HIGHPOWER_RADIOA_ARRAYLEN              22
 
-#define IQK_ADDA_REG_NUM                       16
 #define MAX_TOLERANCE                          5
-#define        IQK_DELAY_TIME                          1
 
 #define        APK_BB_REG_NUM                          5
 #define        APK_AFE_REG_NUM                         16
@@ -127,34 +125,32 @@ static inline void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
                        *flag);
 }
 
-extern u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw,
-                                  u32 regaddr, u32 bitmask);
-extern void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
-                                 u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
-                                  enum radio_path rfpath, u32 regaddr,
-                                  u32 bitmask);
-extern void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw,
-                                 enum radio_path rfpath, u32 regaddr,
-                                 u32 bitmask, u32 data);
-extern bool rtl92d_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_rf_config(struct ieee80211_hw *hw);
-extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
-                                                enum radio_path rfpath);
-extern void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
-extern void rtl92d_phy_scan_operation_backup(struct ieee80211_hw *hw,
-                                            u8 operation);
-extern void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
-                                  enum nl80211_channel_type ch_type);
-extern u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw);
+u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw,
+                           u32 regaddr, u32 bitmask);
+void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
+                          u32 regaddr, u32 bitmask, u32 data);
+u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
+                           enum radio_path rfpath, u32 regaddr,
+                           u32 bitmask);
+void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw,
+                          enum radio_path rfpath, u32 regaddr,
+                          u32 bitmask, u32 data);
+bool rtl92d_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_rf_config(struct ieee80211_hw *hw);
+bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+                                         enum radio_path rfpath);
+void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
+                           enum nl80211_channel_type ch_type);
+u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw);
 bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                                          enum rf_content content,
                                          enum radio_path rfpath);
 bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
-                                         enum rf_pwrstate rfpwr_state);
+bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
+                                  enum rf_pwrstate rfpwr_state);
 
 void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw);
 void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw);
@@ -173,6 +169,5 @@ void rtl92d_acquire_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
                                       unsigned long *flag);
 u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl);
 void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel);
-void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw);
 
 #endif
index 0fe1a48593e8b90dd797335efed9fd9e38598ef6..7303d12c266fbe2ed44d55aa7e46f689e1acc2c2 100644 (file)
 #ifndef __RTL92D_RF_H__
 #define __RTL92D_RF_H__
 
-extern void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
-                                           u8 bandwidth);
-extern void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
-                                             u8 *ppowerlevel);
-extern void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
-                                              u8 *ppowerlevel, u8 channel);
-extern bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw);
-extern bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0);
-extern void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw,
-                                           bool bmac0);
+void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                      u8 *ppowerlevel);
+void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                       u8 *ppowerlevel, u8 channel);
+bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw);
+bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0);
+void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0);
 
 #endif
index c18c04bf0c13e4ac98c050f205bf35ddd7d7d14a..edab5a5351b52a814ec10833109666309279e2b1 100644 (file)
@@ -30,6 +30,7 @@
 #include "../wifi.h"
 #include "../core.h"
 #include "../pci.h"
+#include "../base.h"
 #include "reg.h"
 #include "def.h"
 #include "phy.h"
@@ -236,7 +237,7 @@ static struct rtl_hal_ops rtl8192de_hal_ops = {
        .set_bw_mode = rtl92d_phy_set_bw_mode,
        .switch_channel = rtl92d_phy_sw_chnl,
        .dm_watchdog = rtl92d_dm_watchdog,
-       .scan_operation_backup = rtl92d_phy_scan_operation_backup,
+       .scan_operation_backup = rtl_phy_scan_operation_backup,
        .set_rf_power_state = rtl92d_phy_set_rf_power_state,
        .led_control = rtl92de_led_control,
        .set_desc = rtl92de_set_desc,
index b8ec718a0fabbf348df84820fa530f8e28031699..945ddecf90c9a3b6c6bcd916c3c1d293ac51dc15 100644 (file)
@@ -526,7 +526,6 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
        }
        /*rx_status->qual = stats->signal; */
        rx_status->signal = stats->rssi + 10;
-       /*rx_status->noise = -stats->noise; */
        return true;
 }
 
index 84d1181795b8aeba6ff9d3d0d0245f75e596933f..c81c8359194007893412cd3d55f1da054b1bc645 100644 (file)
 #define        EXT_IMEM_CODE_DONE                      BIT(2)
 #define        IMEM_CHK_RPT                            BIT(1)
 #define        IMEM_CODE_DONE                          BIT(0)
-#define        IMEM_CODE_DONE                          BIT(0)
-#define        IMEM_CHK_RPT                            BIT(1)
 #define        EMEM_CODE_DONE                          BIT(2)
 #define        EMEM_CHK_RPT                            BIT(3)
-#define        DMEM_CODE_DONE                          BIT(4)
 #define        IMEM_RDY                                BIT(5)
-#define        BASECHG                                 BIT(6)
-#define        FWRDY                                   BIT(7)
 #define        LOAD_FW_READY                           (IMEM_CODE_DONE | \
                                                IMEM_CHK_RPT | \
                                                EMEM_CODE_DONE | \
index c7095118de6e8c23bbfd0ee5727fc353969db9fd..222d2e792ca6d259885fa6da3a8ad792bbb338ad 100644 (file)
@@ -330,7 +330,6 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
 
        /*rx_status->qual = stats->signal; */
        rx_status->signal = stats->rssi + 10;
-       /*rx_status->noise = -stats->noise; */
 
        return true;
 }
index eafbb18dd48e69a44d67cd60f171b5915662e58c..5d318a85eda4047100eeec7c5297412c15d615fd 100644 (file)
@@ -934,35 +934,6 @@ static long _phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
        return pwrout_dbm;
 }
 
-void rtl8723ae_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       enum io_type iotype;
-
-       if (!is_hal_stop(rtlhal)) {
-               switch (operation) {
-               case SCAN_OPT_BACKUP:
-                       iotype = IO_CMD_PAUSE_DM_BY_SCAN;
-                       rtlpriv->cfg->ops->set_hw_reg(hw,
-                                                     HW_VAR_IO_CMD,
-                                                     (u8 *)&iotype);
-
-                       break;
-               case SCAN_OPT_RESTORE:
-                       iotype = IO_CMD_RESUME_DM_BY_SCAN;
-                       rtlpriv->cfg->ops->set_hw_reg(hw,
-                                                     HW_VAR_IO_CMD,
-                                                     (u8 *)&iotype);
-                       break;
-               default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Unknown Scan Backup operation.\n");
-                       break;
-               }
-       }
-}
-
 void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
index e7a59eba351adf85cd951f28377517b8347a6807..007ebdbbe108623f88c163e77d3e548eabc00eec 100644 (file)
@@ -183,42 +183,40 @@ struct tx_power_struct {
        u32 mcs_original_offset[4][16];
 };
 
-extern u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw,
-                                     u32 regaddr, u32 bitmask);
-extern void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
-                                    u32 regaddr, u32 bitmask, u32 data);
-extern u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
-                                     enum radio_path rfpath, u32 regaddr,
-                                     u32 bitmask);
-extern void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
-                                    enum radio_path rfpath, u32 regaddr,
-                                    u32 bitmask, u32 data);
-extern bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw);
-extern bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw);
-extern bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw);
-extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
-                                                enum radio_path rfpath);
-extern void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw,
-                                           long *powerlevel);
-extern void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw,
-                                           u8 channel);
-extern bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw,
-                                            long power_indbm);
-extern void rtl8723ae_phy_scan_operation_backup(struct ieee80211_hw *hw,
-                                               u8 operation);
-extern void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw,
-                                     enum nl80211_channel_type ch_type);
-extern void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw);
-extern u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw);
-extern void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
+u32 rtl8723ae_phy_query_bb_reg(struct ieee80211_hw *hw,
+                              u32 regaddr, u32 bitmask);
+void rtl8723ae_phy_set_bb_reg(struct ieee80211_hw *hw,
+                             u32 regaddr, u32 bitmask, u32 data);
+u32 rtl8723ae_phy_query_rf_reg(struct ieee80211_hw *hw,
+                              enum radio_path rfpath, u32 regaddr,
+                              u32 bitmask);
+void rtl8723ae_phy_set_rf_reg(struct ieee80211_hw *hw,
+                             enum radio_path rfpath, u32 regaddr,
+                             u32 bitmask, u32 data);
+bool rtl8723ae_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl8723ae_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl8723ae_phy_rf_config(struct ieee80211_hw *hw);
+bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
+                                         enum radio_path rfpath);
+void rtl8723ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl8723ae_phy_get_txpower_level(struct ieee80211_hw *hw,
+                                    long *powerlevel);
+void rtl8723ae_phy_set_txpower_level(struct ieee80211_hw *hw,
+                                    u8 channel);
+bool rtl8723ae_phy_update_txpower_dbm(struct ieee80211_hw *hw,
+                                     long power_indbm);
+void rtl8723ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
+void rtl8723ae_phy_set_bw_mode(struct ieee80211_hw *hw,
+                              enum nl80211_channel_type ch_type);
+void rtl8723ae_phy_sw_chnl_callback(struct ieee80211_hw *hw);
+u8 rtl8723ae_phy_sw_chnl(struct ieee80211_hw *hw);
+void rtl8723ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery);
 void rtl8723ae_phy_lc_calibrate(struct ieee80211_hw *hw);
 void rtl8723ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
 bool rtl8723ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                                             enum radio_path rfpath);
 bool rtl8723ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
-extern bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
-                                            enum rf_pwrstate rfpwr_state);
+bool rtl8723ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
+                                     enum rf_pwrstate rfpwr_state);
 
 #endif
index d0f9dd79abea6f9f40ed12a1c8cc3eeea337bd68..57f1933ee663e48b2433ef887a0b361a107e9770 100644 (file)
 
 #define RF6052_MAX_TX_PWR              0x3F
 
-extern void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
-                                           u8 bandwidth);
-extern void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
-                                             u8 *ppowerlevel);
-extern void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
-                                              u8 *ppowerlevel, u8 channel);
-extern bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw);
+void rtl8723ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl8723ae_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+                                         u8 *ppowerlevel);
+void rtl8723ae_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+                                          u8 *ppowerlevel, u8 channel);
+bool rtl8723ae_phy_rf6052_config(struct ieee80211_hw *hw);
 
 #endif
index d9ee2efffe5ffb1284335352eed2e13ab8e4df96..62b204faf773f74b1137aa2ada315375357268e8 100644 (file)
@@ -33,6 +33,7 @@
 
 #include "../core.h"
 #include "../pci.h"
+#include "../base.h"
 #include "reg.h"
 #include "def.h"
 #include "phy.h"
@@ -220,7 +221,7 @@ static struct rtl_hal_ops rtl8723ae_hal_ops = {
        .set_bw_mode = rtl8723ae_phy_set_bw_mode,
        .switch_channel = rtl8723ae_phy_sw_chnl,
        .dm_watchdog = rtl8723ae_dm_watchdog,
-       .scan_operation_backup = rtl8723ae_phy_scan_operation_backup,
+       .scan_operation_backup = rtl_phy_scan_operation_backup,
        .set_rf_power_state = rtl8723ae_phy_set_rf_power_state,
        .led_control = rtl8723ae_led_control,
        .set_desc = rtl8723ae_set_desc,
index bcd82a1020a5bebd7104467594ffefc47bf28eba..50b7be3f3a605673756e543e456324993c66980d 100644 (file)
@@ -359,7 +359,6 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
 
        /*rx_status->qual = status->signal; */
        rx_status->signal = status->recvsignalpower + 10;
-       /*rx_status->noise = -status->noise; */
 
        return true;
 }
index e56778cac9bfce39e56d5aa239e34f2fb1135ffb..6e2b5c5c83c8d21234ee013c802d7af317f6db8c 100644 (file)
@@ -455,7 +455,6 @@ static void _rtl_usb_rx_process_agg(struct ieee80211_hw *hw,
        struct ieee80211_rx_status rx_status = {0};
        struct rtl_stats stats = {
                .signal = 0,
-               .noise = -98,
                .rate = 0,
        };
 
@@ -498,7 +497,6 @@ static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
        struct ieee80211_rx_status rx_status = {0};
        struct rtl_stats stats = {
                .signal = 0,
-               .noise = -98,
                .rate = 0,
        };
 
@@ -582,12 +580,15 @@ static void _rtl_rx_work(unsigned long param)
 static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
                                        unsigned int len)
 {
+#if NET_IP_ALIGN != 0
        unsigned int padding = 0;
+#endif
 
        /* make function no-op when possible */
        if (NET_IP_ALIGN == 0 || len < sizeof(*hdr))
                return 0;
 
+#if NET_IP_ALIGN != 0
        /* alignment calculation as in lbtf_rx() / carl9170_rx_copy_data() */
        /* TODO: deduplicate common code, define helper function instead? */
 
@@ -608,6 +609,7 @@ static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
                padding ^= NET_IP_ALIGN;
 
        return padding;
+#endif
 }
 
 #define __RADIO_TAP_SIZE_RSV   32
index 703258742d28eefd339bde254d30661c4175b1a9..d224dc3bb092b0ef04545cc57891a7bdb4de4b2f 100644 (file)
@@ -192,8 +192,6 @@ enum hardware_type {
 (IS_HARDWARE_TYPE_8192DE(rtlhal) || IS_HARDWARE_TYPE_8192DU(rtlhal))
 #define        IS_HARDWARE_TYPE_8723(rtlhal)                   \
 (IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal))
-#define IS_HARDWARE_TYPE_8723U(rtlhal)                 \
-       (rtlhal->hw_type == HARDWARE_TYPE_RTL8723U)
 
 #define RX_HAL_IS_CCK_RATE(_pdesc)\
        (_pdesc->rxmcs == DESC92_RATE1M ||              \
index c7dc6feab2ff38cd2bbbef713ca4b2c0113a4b51..1342f81e683d1498bb16d5904d032009244bed58 100644 (file)
@@ -243,7 +243,7 @@ static int wl1251_spi_probe(struct spi_device *spi)
        struct wl1251 *wl;
        int ret;
 
-       pdata = spi->dev.platform_data;
+       pdata = dev_get_platdata(&spi->dev);
        if (!pdata) {
                wl1251_error("no platform data");
                return -ENODEV;
index fd02060038de6479051accfa7b63c1993193315b..2c3bd1bff3f68e08de48b42101c52606a472fa6a 100644 (file)
@@ -424,8 +424,8 @@ void wl1251_disable_interrupts(struct wl1251 *wl);
 #define CHIP_ID_1271_PG10                 (0x4030101)
 #define CHIP_ID_1271_PG20                 (0x4030111)
 
-#define WL1251_FW_NAME "wl1251-fw.bin"
-#define WL1251_NVS_NAME "wl1251-nvs.bin"
+#define WL1251_FW_NAME "ti-connectivity/wl1251-fw.bin"
+#define WL1251_NVS_NAME "ti-connectivity/wl1251-nvs.bin"
 
 #define WL1251_POWER_ON_SLEEP 10 /* in milliseconds */
 
index 1c627da85083949c9dd9837e09b53b5ca8a9ac74..591526b991547281e4e04f341e071d57c84d58e2 100644 (file)
@@ -1704,7 +1704,7 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
 static int wl12xx_setup(struct wl1271 *wl)
 {
        struct wl12xx_priv *priv = wl->priv;
-       struct wlcore_platdev_data *pdev_data = wl->pdev->dev.platform_data;
+       struct wlcore_platdev_data *pdev_data = dev_get_platdata(&wl->pdev->dev);
        struct wl12xx_platform_data *pdata = pdev_data->pdata;
 
        wl->rtable = wl12xx_rtable;
index 7aa0eb848c5a2c83cc93f120f96567cd04d5d0fa..d0daca1d23bc55154432d2a031feff4ee51b15e4 100644 (file)
@@ -623,6 +623,18 @@ static const int wl18xx_rtable[REG_TABLE_LEN] = {
        [REG_RAW_FW_STATUS_ADDR]        = WL18XX_FW_STATUS_ADDR,
 };
 
+static const struct wl18xx_clk_cfg wl18xx_clk_table_coex[NUM_CLOCK_CONFIGS] = {
+       [CLOCK_CONFIG_16_2_M]   = { 8,  121, 0, 0, false },
+       [CLOCK_CONFIG_16_368_M] = { 8,  120, 0, 0, false },
+       [CLOCK_CONFIG_16_8_M]   = { 8,  117, 0, 0, false },
+       [CLOCK_CONFIG_19_2_M]   = { 10, 128, 0, 0, false },
+       [CLOCK_CONFIG_26_M]     = { 11, 104, 0, 0, false },
+       [CLOCK_CONFIG_32_736_M] = { 8,  120, 0, 0, false },
+       [CLOCK_CONFIG_33_6_M]   = { 8,  117, 0, 0, false },
+       [CLOCK_CONFIG_38_468_M] = { 10, 128, 0, 0, false },
+       [CLOCK_CONFIG_52_M]     = { 11, 104, 0, 0, false },
+};
+
 static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
        [CLOCK_CONFIG_16_2_M]   = { 7,  104,  801, 4,  true },
        [CLOCK_CONFIG_16_368_M] = { 9,  132, 3751, 4,  true },
@@ -704,6 +716,23 @@ static int wl18xx_set_clk(struct wl1271 *wl)
                     wl18xx_clk_table[clk_freq].p, wl18xx_clk_table[clk_freq].q,
                     wl18xx_clk_table[clk_freq].swallow ? "swallow" : "spit");
 
+       /* coex PLL configuration */
+       ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_N,
+                                  wl18xx_clk_table_coex[clk_freq].n);
+       if (ret < 0)
+               goto out;
+
+       ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_M,
+                                  wl18xx_clk_table_coex[clk_freq].m);
+       if (ret < 0)
+               goto out;
+
+       /* bypass the swallowing logic */
+       ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_SWALLOW_EN,
+                                  PLLSH_COEX_PLL_SWALLOW_EN_VAL1);
+       if (ret < 0)
+               goto out;
+
        ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_N,
                                   wl18xx_clk_table[clk_freq].n);
        if (ret < 0)
@@ -745,6 +774,30 @@ static int wl18xx_set_clk(struct wl1271 *wl)
                                           PLLSH_WCS_PLL_SWALLOW_EN_VAL2);
        }
 
+       /* choose WCS PLL */
+       ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_SEL,
+                                  PLLSH_WL_PLL_SEL_WCS_PLL);
+       if (ret < 0)
+               goto out;
+
+       /* enable both PLLs */
+       ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_EN, PLLSH_WL_PLL_EN_VAL1);
+       if (ret < 0)
+               goto out;
+
+       udelay(1000);
+
+       /* disable coex PLL */
+       ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_EN, PLLSH_WL_PLL_EN_VAL2);
+       if (ret < 0)
+               goto out;
+
+       /* reset the swallowing logic */
+       ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_SWALLOW_EN,
+                                  PLLSH_COEX_PLL_SWALLOW_EN_VAL2);
+       if (ret < 0)
+               goto out;
+
 out:
        return ret;
 }
@@ -1175,16 +1228,48 @@ static u32 wl18xx_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
        }
 }
 
+static const char *wl18xx_rdl_name(enum wl18xx_rdl_num rdl_num)
+{
+       switch (rdl_num) {
+       case RDL_1_HP:
+               return "183xH";
+       case RDL_2_SP:
+               return "183x or 180x";
+       case RDL_3_HP:
+               return "187xH";
+       case RDL_4_SP:
+               return "187x";
+       case RDL_5_SP:
+               return "RDL11 - Not Supported";
+       case RDL_6_SP:
+               return "180xD";
+       case RDL_7_SP:
+               return "RDL13 - Not Supported (1893Q)";
+       case RDL_8_SP:
+               return "18xxQ";
+       case RDL_NONE:
+               return "UNTRIMMED";
+       default:
+               return "UNKNOWN";
+       }
+}
+
 static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
 {
        u32 fuse;
-       s8 rom = 0, metal = 0, pg_ver = 0, rdl_ver = 0;
+       s8 rom = 0, metal = 0, pg_ver = 0, rdl_ver = 0, package_type = 0;
        int ret;
 
        ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
        if (ret < 0)
                goto out;
 
+       ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_2_3, &fuse);
+       if (ret < 0)
+               goto out;
+
+       package_type = (fuse >> WL18XX_PACKAGE_TYPE_OFFSET) & 1;
+
        ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_1_3, &fuse);
        if (ret < 0)
                goto out;
@@ -1192,7 +1277,7 @@ static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
        pg_ver = (fuse & WL18XX_PG_VER_MASK) >> WL18XX_PG_VER_OFFSET;
        rom = (fuse & WL18XX_ROM_VER_MASK) >> WL18XX_ROM_VER_OFFSET;
 
-       if (rom <= 0xE)
+       if ((rom <= 0xE) && (package_type == WL18XX_PACKAGE_TYPE_WSP))
                metal = (fuse & WL18XX_METAL_VER_MASK) >>
                        WL18XX_METAL_VER_OFFSET;
        else
@@ -1204,11 +1289,9 @@ static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
                goto out;
 
        rdl_ver = (fuse & WL18XX_RDL_VER_MASK) >> WL18XX_RDL_VER_OFFSET;
-       if (rdl_ver > RDL_MAX)
-               rdl_ver = RDL_NONE;
 
-       wl1271_info("wl18xx HW: RDL %d, %s, PG %x.%x (ROM %x)",
-                   rdl_ver, rdl_names[rdl_ver], pg_ver, metal, rom);
+       wl1271_info("wl18xx HW: %s, PG %d.%d (ROM 0x%x)",
+                   wl18xx_rdl_name(rdl_ver), pg_ver, metal, rom);
 
        if (ver)
                *ver = pg_ver;
index 05dd8bad27469389e0e5ff3cf713fab083d348d5..a433a75f3cd7c85d51f67cfb8d03830e35c041be 100644 (file)
 #define PLATFORM_DETECTION             0xA0E3E0
 #define OCS_EN                         0xA02080
 #define PRIMARY_CLK_DETECT             0xA020A6
+#define PLLSH_COEX_PLL_N               0xA02384
+#define PLLSH_COEX_PLL_M               0xA02382
+#define PLLSH_COEX_PLL_SWALLOW_EN      0xA0238E
+#define PLLSH_WL_PLL_SEL               0xA02398
+
 #define PLLSH_WCS_PLL_N                        0xA02362
 #define PLLSH_WCS_PLL_M                        0xA02360
 #define PLLSH_WCS_PLL_Q_FACTOR_CFG_1   0xA02364
 #define PLLSH_WCS_PLL_P_FACTOR_CFG_1_MASK      0xFFFF
 #define PLLSH_WCS_PLL_P_FACTOR_CFG_2_MASK      0x000F
 
+#define PLLSH_WL_PLL_EN_VAL1           0x7
+#define PLLSH_WL_PLL_EN_VAL2           0x2
+#define PLLSH_COEX_PLL_SWALLOW_EN_VAL1 0x2
+#define PLLSH_COEX_PLL_SWALLOW_EN_VAL2 0x11
+
 #define PLLSH_WCS_PLL_SWALLOW_EN_VAL1  0x1
 #define PLLSH_WCS_PLL_SWALLOW_EN_VAL2  0x12
 
+#define PLLSH_WL_PLL_SEL_WCS_PLL       0x0
+#define PLLSH_WL_PLL_SEL_COEX_PLL      0x1
+
 #define WL18XX_REG_FUSE_DATA_1_3       0xA0260C
 #define WL18XX_PG_VER_MASK             0x70
 #define WL18XX_PG_VER_OFFSET           4
-#define WL18XX_ROM_VER_MASK            0x3
-#define WL18XX_ROM_VER_OFFSET          0
+#define WL18XX_ROM_VER_MASK            0x3e00
+#define WL18XX_ROM_VER_OFFSET          9
 #define WL18XX_METAL_VER_MASK          0xC
 #define WL18XX_METAL_VER_OFFSET                2
 #define WL18XX_NEW_METAL_VER_MASK      0x180
 #define WL18XX_NEW_METAL_VER_OFFSET    7
 
+#define WL18XX_PACKAGE_TYPE_OFFSET     13
+#define WL18XX_PACKAGE_TYPE_WSP                0
+
 #define WL18XX_REG_FUSE_DATA_2_3       0xA02614
 #define WL18XX_RDL_VER_MASK            0x1f00
 #define WL18XX_RDL_VER_OFFSET          8
@@ -201,24 +217,21 @@ enum {
        NUM_BOARD_TYPES,
 };
 
-enum {
+enum wl18xx_rdl_num {
        RDL_NONE        = 0,
        RDL_1_HP        = 1,
        RDL_2_SP        = 2,
        RDL_3_HP        = 3,
        RDL_4_SP        = 4,
+       RDL_5_SP        = 0x11,
+       RDL_6_SP        = 0x12,
+       RDL_7_SP        = 0x13,
+       RDL_8_SP        = 0x14,
 
        _RDL_LAST,
        RDL_MAX = _RDL_LAST - 1,
 };
 
-static const char * const rdl_names[] = {
-       [RDL_NONE]      = "",
-       [RDL_1_HP]      = "1853 SISO",
-       [RDL_2_SP]      = "1857 MIMO",
-       [RDL_3_HP]      = "1893 SISO",
-       [RDL_4_SP]      = "1897 MIMO",
-};
 
 /* FPGA_SPARE_1 register - used to change the PHY ATPG clock at boot time */
 #define WL18XX_PHY_FPGA_SPARE_1                0x8093CA40
index c9e060795d13760befc72a9190b008b65b6f1b41..9e5416f8764d13899cc7de963a71a58bf7b1b0e4 100644 (file)
@@ -1126,6 +1126,8 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        u16 template_id_2_4 = wl->scan_templ_id_2_4;
        u16 template_id_5 = wl->scan_templ_id_5;
 
+       wl1271_debug(DEBUG_SCAN, "build probe request band %d", band);
+
        skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
                                     ie_len);
        if (!skb) {
@@ -1135,8 +1137,6 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        if (ie_len)
                memcpy(skb_put(skb, ie_len), ie, ie_len);
 
-       wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
-
        if (sched_scan &&
            (wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL)) {
                template_id_2_4 = wl->sched_scan_templ_id_2_4;
@@ -1172,7 +1172,7 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
        if (!skb)
                goto out;
 
-       wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len);
+       wl1271_debug(DEBUG_SCAN, "set ap probe request template");
 
        rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
        if (wlvif->band == IEEE80211_BAND_2GHZ)
@@ -1607,33 +1607,43 @@ out:
 
 static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch)
 {
-       int idx = -1;
-
+       /*
+        * map the given band/channel to the respective predefined
+        * bit expected by the fw
+        */
        switch (band) {
-       case IEEE80211_BAND_5GHZ:
-               if (ch >= 8 && ch <= 16)
-                       idx = ((ch-8)/4 + 18);
-               else if (ch >= 34 && ch <= 64)
-                       idx = ((ch-34)/2 + 3 + 18);
-               else if (ch >= 100 && ch <= 140)
-                       idx = ((ch-100)/4 + 15 + 18);
-               else if (ch >= 149 && ch <= 165)
-                       idx = ((ch-149)/4 + 26 + 18);
-               else
-                       idx = -1;
-               break;
        case IEEE80211_BAND_2GHZ:
+               /* channels 1..14 are mapped to 0..13 */
                if (ch >= 1 && ch <= 14)
-                       idx = ch - 1;
-               else
-                       idx = -1;
+                       return ch - 1;
+               break;
+       case IEEE80211_BAND_5GHZ:
+               switch (ch) {
+               case 8 ... 16:
+                       /* channels 8,12,16 are mapped to 18,19,20 */
+                       return 18 + (ch-8)/4;
+               case 34 ... 48:
+                       /* channels 34,36..48 are mapped to 21..28 */
+                       return 21 + (ch-34)/2;
+               case 52 ... 64:
+                       /* channels 52,56..64 are mapped to 29..32 */
+                       return 29 + (ch-52)/4;
+               case 100 ... 140:
+                       /* channels 100,104..140 are mapped to 33..43 */
+                       return 33 + (ch-100)/4;
+               case 149 ... 165:
+                       /* channels 149,153..165 are mapped to 44..48 */
+                       return 44 + (ch-149)/4;
+               default:
+                       break;
+               }
                break;
        default:
-               wl1271_error("get reg conf ch idx - unknown band: %d",
-                            (int)band);
+               break;
        }
 
-       return idx;
+       wl1271_error("%s: unknown band/channel: %d/%d", __func__, band, ch);
+       return -1;
 }
 
 void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
@@ -1646,7 +1656,7 @@ void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
 
        ch_bit_idx = wlcore_get_reg_conf_ch_idx(band, channel);
 
-       if (ch_bit_idx > 0 && ch_bit_idx <= WL1271_MAX_CHANNELS)
+       if (ch_bit_idx >= 0 && ch_bit_idx <= WL1271_MAX_CHANNELS)
                set_bit(ch_bit_idx, (long *)wl->reg_ch_conf_pending);
 }
 
index 38995f90040dea19b50d9c46ea2d4c8b7dc1b41b..bbdd10632373d0481ae7dd21b331ce566903c77c 100644 (file)
@@ -1062,7 +1062,8 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
        static const char* const PLT_MODE[] = {
                "PLT_OFF",
                "PLT_ON",
-               "PLT_FEM_DETECT"
+               "PLT_FEM_DETECT",
+               "PLT_CHIP_AWAKE"
        };
 
        int ret;
@@ -1088,9 +1089,11 @@ int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
                if (ret < 0)
                        goto power_off;
 
-               ret = wl->ops->plt_init(wl);
-               if (ret < 0)
-                       goto power_off;
+               if (plt_mode != PLT_CHIP_AWAKE) {
+                       ret = wl->ops->plt_init(wl);
+                       if (ret < 0)
+                               goto power_off;
+               }
 
                wl->state = WLCORE_STATE_ON;
                wl1271_notice("firmware booted in PLT mode %s (%s)",
@@ -2008,6 +2011,47 @@ out:
        mutex_unlock(&wl->mutex);
 }
 
+static void wlcore_pending_auth_complete_work(struct work_struct *work)
+{
+       struct delayed_work *dwork;
+       struct wl1271 *wl;
+       struct wl12xx_vif *wlvif;
+       unsigned long time_spare;
+       int ret;
+
+       dwork = container_of(work, struct delayed_work, work);
+       wlvif = container_of(dwork, struct wl12xx_vif,
+                            pending_auth_complete_work);
+       wl = wlvif->wl;
+
+       mutex_lock(&wl->mutex);
+
+       if (unlikely(wl->state != WLCORE_STATE_ON))
+               goto out;
+
+       /*
+        * Make sure a second really passed since the last auth reply. Maybe
+        * a second auth reply arrived while we were stuck on the mutex.
+        * Check for a little less than the timeout to protect from scheduler
+        * irregularities.
+        */
+       time_spare = jiffies +
+                       msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
+       if (!time_after(time_spare, wlvif->pending_auth_reply_time))
+               goto out;
+
+       ret = wl1271_ps_elp_wakeup(wl);
+       if (ret < 0)
+               goto out;
+
+       /* cancel the ROC if active */
+       wlcore_update_inconn_sta(wl, wlvif, NULL, false);
+
+       wl1271_ps_elp_sleep(wl);
+out:
+       mutex_unlock(&wl->mutex);
+}
+
 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
 {
        u8 policy = find_first_zero_bit(wl->rate_policies_map,
@@ -2159,6 +2203,8 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
                          wlcore_channel_switch_work);
        INIT_DELAYED_WORK(&wlvif->connection_loss_work,
                          wlcore_connection_loss_work);
+       INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
+                         wlcore_pending_auth_complete_work);
        INIT_LIST_HEAD(&wlvif->list);
 
        setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
@@ -2376,6 +2422,11 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
        int ret = 0;
        u8 role_type;
 
+       if (wl->plt) {
+               wl1271_error("Adding Interface not allowed while in PLT mode");
+               return -EBUSY;
+       }
+
        vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
                             IEEE80211_VIF_SUPPORTS_CQM_RSSI;
 
@@ -2590,6 +2641,7 @@ unlock:
        cancel_work_sync(&wlvif->rx_streaming_disable_work);
        cancel_delayed_work_sync(&wlvif->connection_loss_work);
        cancel_delayed_work_sync(&wlvif->channel_switch_work);
+       cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
 
        mutex_lock(&wl->mutex);
 }
@@ -2875,6 +2927,25 @@ static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
        wlvif->rate_set = wlvif->basic_rate_set;
 }
 
+static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                                  bool idle)
+{
+       bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
+
+       if (idle == cur_idle)
+               return;
+
+       if (idle) {
+               clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
+       } else {
+               /* The current firmware only supports sched_scan in idle */
+               if (wl->sched_vif == wlvif)
+                       wl->ops->sched_scan_stop(wl, wlvif);
+
+               set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
+       }
+}
+
 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                             struct ieee80211_conf *conf, u32 changed)
 {
@@ -3969,6 +4040,13 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
                        }
                } else {
                        if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+                               /*
+                                * AP might be in ROC in case we have just
+                                * sent auth reply. handle it.
+                                */
+                               if (test_bit(wlvif->role_id, wl->roc_map))
+                                       wl12xx_croc(wl, wlvif->role_id);
+
                                ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
                                if (ret < 0)
                                        goto out;
@@ -4120,6 +4198,9 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
                do_join = true;
        }
 
+       if (changed & BSS_CHANGED_IDLE && !is_ibss)
+               wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
+
        if (changed & BSS_CHANGED_CQM) {
                bool enable = false;
                if (bss_conf->cqm_rssi_thold)
@@ -4656,29 +4737,49 @@ static void wlcore_roc_if_possible(struct wl1271 *wl,
        wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
 }
 
-static void wlcore_update_inconn_sta(struct wl1271 *wl,
-                                    struct wl12xx_vif *wlvif,
-                                    struct wl1271_station *wl_sta,
-                                    bool in_connection)
+/*
+ * when wl_sta is NULL, we treat this call as if coming from a
+ * pending auth reply.
+ * wl->mutex must be taken and the FW must be awake when the call
+ * takes place.
+ */
+void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                             struct wl1271_station *wl_sta, bool in_conn)
 {
-       if (in_connection) {
-               if (WARN_ON(wl_sta->in_connection))
+       if (in_conn) {
+               if (WARN_ON(wl_sta && wl_sta->in_connection))
                        return;
-               wl_sta->in_connection = true;
-               if (!wlvif->inconn_count++)
+
+               if (!wlvif->ap_pending_auth_reply &&
+                   !wlvif->inconn_count)
                        wlcore_roc_if_possible(wl, wlvif);
+
+               if (wl_sta) {
+                       wl_sta->in_connection = true;
+                       wlvif->inconn_count++;
+               } else {
+                       wlvif->ap_pending_auth_reply = true;
+               }
        } else {
-               if (!wl_sta->in_connection)
+               if (wl_sta && !wl_sta->in_connection)
+                       return;
+
+               if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
                        return;
 
-               wl_sta->in_connection = false;
-               wlvif->inconn_count--;
-               if (WARN_ON(wlvif->inconn_count < 0))
+               if (WARN_ON(wl_sta && !wlvif->inconn_count))
                        return;
 
-               if (!wlvif->inconn_count)
-                       if (test_bit(wlvif->role_id, wl->roc_map))
-                               wl12xx_croc(wl, wlvif->role_id);
+               if (wl_sta) {
+                       wl_sta->in_connection = false;
+                       wlvif->inconn_count--;
+               } else {
+                       wlvif->ap_pending_auth_reply = false;
+               }
+
+               if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
+                   test_bit(wlvif->role_id, wl->roc_map))
+                       wl12xx_croc(wl, wlvif->role_id);
        }
 }
 
@@ -5313,10 +5414,7 @@ static struct ieee80211_rate wl1271_rates_5ghz[] = {
 
 /* 5 GHz band channels for WL1273 */
 static struct ieee80211_channel wl1271_channels_5ghz[] = {
-       { .hw_value = 7, .center_freq = 5035, .max_power = WLCORE_MAX_TXPWR },
        { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
-       { .hw_value = 9, .center_freq = 5045, .max_power = WLCORE_MAX_TXPWR },
-       { .hw_value = 11, .center_freq = 5055, .max_power = WLCORE_MAX_TXPWR },
        { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
        { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
        { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
@@ -5896,14 +5994,20 @@ static const struct wiphy_wowlan_support wlcore_wowlan_support = {
 };
 #endif
 
+static irqreturn_t wlcore_hardirq(int irq, void *cookie)
+{
+       return IRQ_WAKE_THREAD;
+}
+
 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
 {
        struct wl1271 *wl = context;
        struct platform_device *pdev = wl->pdev;
-       struct wlcore_platdev_data *pdev_data = pdev->dev.platform_data;
+       struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
        struct wl12xx_platform_data *pdata = pdev_data->pdata;
        unsigned long irqflags;
        int ret;
+       irq_handler_t hardirq_fn = NULL;
 
        if (fw) {
                wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
@@ -5932,12 +6036,14 @@ static void wlcore_nvs_cb(const struct firmware *fw, void *context)
        wl->platform_quirks = pdata->platform_quirks;
        wl->if_ops = pdev_data->if_ops;
 
-       if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
+       if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
                irqflags = IRQF_TRIGGER_RISING;
-       else
+               hardirq_fn = wlcore_hardirq;
+       } else {
                irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+       }
 
-       ret = request_threaded_irq(wl->irq, NULL, wlcore_irq,
+       ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
                                   irqflags, pdev->name, wl);
        if (ret < 0) {
                wl1271_error("request_irq() failed: %d", ret);
index 98066d40c2ad107943384509abcbc10b82d82019..26bfc365ba70bf03e215ddc0be7df0d01c13512b 100644 (file)
@@ -83,6 +83,10 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
        struct wl12xx_vif *wlvif;
        u32 timeout;
 
+       /* We do not enter elp sleep in PLT mode */
+       if (wl->plt)
+               return;
+
        if (wl->sleep_auth != WL1271_PSM_ELP)
                return;
 
index f407101e525b60a9c4bff3c402865d12f3d6afde..13e743df2e31d45815f8d8fd916e9f5370977841 100644 (file)
@@ -174,17 +174,7 @@ wlcore_scan_get_channels(struct wl1271 *wl,
                    /* if radar is set, we ignore the passive flag */
                    (radar ||
                     !!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
-                       wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
-                                    req_channels[i]->band,
-                                    req_channels[i]->center_freq);
-                       wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X",
-                                    req_channels[i]->hw_value,
-                                    req_channels[i]->flags);
-                       wl1271_debug(DEBUG_SCAN, "max_power %d",
-                                    req_channels[i]->max_power);
-                       wl1271_debug(DEBUG_SCAN, "min_dwell_time %d max dwell time %d",
-                                    min_dwell_time_active,
-                                    max_dwell_time_active);
+
 
                        if (flags & IEEE80211_CHAN_RADAR) {
                                channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
@@ -222,6 +212,17 @@ wlcore_scan_get_channels(struct wl1271 *wl,
                                             *n_pactive_ch);
                        }
 
+                       wl1271_debug(DEBUG_SCAN, "freq %d, ch. %d, flags 0x%x, power %d, min/max_dwell %d/%d%s%s",
+                                    req_channels[i]->center_freq,
+                                    req_channels[i]->hw_value,
+                                    req_channels[i]->flags,
+                                    req_channels[i]->max_power,
+                                    min_dwell_time_active,
+                                    max_dwell_time_active,
+                                    flags & IEEE80211_CHAN_RADAR ?
+                                       ", DFS" : "",
+                                    flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+                                       ", PASSIVE" : "");
                        j++;
                }
        }
@@ -364,7 +365,7 @@ wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl,
        struct cfg80211_ssid *ssids = req->ssids;
        int ret = 0, type, i, j, n_match_ssids = 0;
 
-       wl1271_debug(DEBUG_CMD, "cmd sched scan ssid list");
+       wl1271_debug((DEBUG_CMD | DEBUG_SCAN), "cmd sched scan ssid list");
 
        /* count the match sets that contain SSIDs */
        for (i = 0; i < req->n_match_sets; i++)
@@ -442,8 +443,6 @@ wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl,
                }
        }
 
-       wl1271_dump(DEBUG_SCAN, "SSID_LIST: ", cmd, sizeof(*cmd));
-
        ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_SSID_CFG, cmd,
                              sizeof(*cmd), 0);
        if (ret < 0) {
index 1b0cd98e35f187f8be9f0525e152e5960742471d..b2c018dccf1887bb8dbdd5b85d9a88e0d30ff46e 100644 (file)
@@ -335,7 +335,7 @@ static int wl1271_probe(struct spi_device *spi)
        if (!pdev_data)
                goto out;
 
-       pdev_data->pdata = spi->dev.platform_data;
+       pdev_data->pdata = dev_get_platdata(&spi->dev);
        if (!pdev_data->pdata) {
                dev_err(&spi->dev, "no platform data\n");
                ret = -ENODEV;
index 527590f2adfbe66b088a99c48d5ce382ff030efd..a3b7d950d8e9b0f2a989b6624e130ed7cc081d91 100644 (file)
@@ -297,7 +297,8 @@ static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
                ret = wl1271_plt_stop(wl);
                break;
        case PLT_ON:
-               ret = wl1271_plt_start(wl, PLT_ON);
+       case PLT_CHIP_AWAKE:
+               ret = wl1271_plt_start(wl, val);
                break;
        case PLT_FEM_DETECT:
                ret = wl1271_tm_detect_fem(wl, tb);
@@ -361,6 +362,7 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 {
        struct wl1271 *wl = hw->priv;
        struct nlattr *tb[WL1271_TM_ATTR_MAX + 1];
+       u32 nla_cmd;
        int err;
 
        err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy);
@@ -370,7 +372,14 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        if (!tb[WL1271_TM_ATTR_CMD_ID])
                return -EINVAL;
 
-       switch (nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID])) {
+       nla_cmd = nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID]);
+
+       /* Only SET_PLT_MODE is allowed in case of mode PLT_CHIP_AWAKE */
+       if (wl->plt_mode == PLT_CHIP_AWAKE &&
+           nla_cmd != WL1271_TM_CMD_SET_PLT_MODE)
+               return -EOPNOTSUPP;
+
+       switch (nla_cmd) {
        case WL1271_TM_CMD_TEST:
                return wl1271_tm_cmd_test(wl, tb);
        case WL1271_TM_CMD_INTERROGATE:
index 7e93fe63a2c74a215b4948a91a95c179a8d78592..87cd707affa240390f6b34ea0d1b28caf23ffb83 100644 (file)
@@ -86,19 +86,34 @@ void wl1271_free_tx_id(struct wl1271 *wl, int id)
 EXPORT_SYMBOL(wl1271_free_tx_id);
 
 static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
+                                                struct wl12xx_vif *wlvif,
                                                 struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr;
 
+       hdr = (struct ieee80211_hdr *)(skb->data +
+                                      sizeof(struct wl1271_tx_hw_descr));
+       if (!ieee80211_is_auth(hdr->frame_control))
+               return;
+
        /*
         * add the station to the known list before transmitting the
         * authentication response. this way it won't get de-authed by FW
         * when transmitting too soon.
         */
-       hdr = (struct ieee80211_hdr *)(skb->data +
-                                      sizeof(struct wl1271_tx_hw_descr));
-       if (ieee80211_is_auth(hdr->frame_control))
-               wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+       wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+
+       /*
+        * ROC for 1 second on the AP channel for completing the connection.
+        * Note the ROC will be continued by the update_sta_state callbacks
+        * once the station reaches the associated state.
+        */
+       wlcore_update_inconn_sta(wl, wlvif, NULL, true);
+       wlvif->pending_auth_reply_time = jiffies;
+       cancel_delayed_work(&wlvif->pending_auth_complete_work);
+       ieee80211_queue_delayed_work(wl->hw,
+                               &wlvif->pending_auth_complete_work,
+                               msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT));
 }
 
 static void wl1271_tx_regulate_link(struct wl1271 *wl,
@@ -386,7 +401,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
                is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
                         (cipher == WLAN_CIPHER_SUITE_WEP104);
 
-               if (WARN_ON(is_wep && wlvif->default_key != idx)) {
+               if (WARN_ON(is_wep && wlvif && wlvif->default_key != idx)) {
                        ret = wl1271_set_default_wep_key(wl, wlvif, idx);
                        if (ret < 0)
                                return ret;
@@ -404,7 +419,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
        wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
 
        if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
-               wl1271_tx_ap_update_inconnection_sta(wl, skb);
+               wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb);
                wl1271_tx_regulate_link(wl, wlvif, hlid);
        }
 
index 55aa4acf9105a4e703036b0b12250ae19a9a7f81..35489c300da17bfefe3b35fc7515dc58aeeaa196 100644 (file)
@@ -56,6 +56,9 @@
 /* Used for management frames and dummy packets */
 #define WL1271_TID_MGMT 7
 
+/* stop a ROC for pending authentication reply after this time (ms) */
+#define WLCORE_PEND_AUTH_ROC_TIMEOUT     1000
+
 struct wl127x_tx_mem {
        /*
         * Number of extra memory blocks to allocate for this packet
index 0034979e97cbaa0316dfd2a62db2e5be59b7f058..54ce5d5e84db0be55da713ddcdbcd19818d507f3 100644 (file)
@@ -481,6 +481,8 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
                   struct ieee80211_sta *sta,
                   struct ieee80211_key_conf *key_conf);
 void wlcore_regdomain_config(struct wl1271 *wl);
+void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+                             struct wl1271_station *wl_sta, bool in_conn);
 
 static inline void
 wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
index e5e146435fe77e34bc04ac7bca36a1e8e553a17a..2a50e089b0e755eb9fafa79f1cff00c6e9d2d4a7 100644 (file)
@@ -255,6 +255,7 @@ enum wl12xx_vif_flags {
        WLVIF_FLAG_CS_PROGRESS,
        WLVIF_FLAG_AP_PROBE_RESP_SET,
        WLVIF_FLAG_IN_USE,
+       WLVIF_FLAG_ACTIVE,
 };
 
 struct wl12xx_vif;
@@ -307,6 +308,7 @@ enum plt_mode {
        PLT_OFF = 0,
        PLT_ON = 1,
        PLT_FEM_DETECT = 2,
+       PLT_CHIP_AWAKE = 3
 };
 
 struct wl12xx_rx_filter_field {
@@ -456,6 +458,15 @@ struct wl12xx_vif {
         */
        int hw_queue_base;
 
+       /* do we have a pending auth reply? (and ROC) */
+       bool ap_pending_auth_reply;
+
+       /* time when we sent the pending auth reply */
+       unsigned long pending_auth_reply_time;
+
+       /* work for canceling ROC after pending auth reply */
+       struct delayed_work pending_auth_complete_work;
+
        /*
         * This struct must be last!
         * data that has to be saved acrossed reconfigs (e.g. recovery)
index 5715318d6bab3b4c7905c7c69ee549e39636c6cc..55b8dec86233c60f9b3e1dcf96006d29ca5361c3 100644 (file)
@@ -87,9 +87,13 @@ struct pending_tx_info {
 struct xenvif_rx_meta {
        int id;
        int size;
+       int gso_type;
        int gso_size;
 };
 
+#define GSO_BIT(type) \
+       (1 << XEN_NETIF_GSO_TYPE_ ## type)
+
 /* Discriminate from any valid pending_idx value. */
 #define INVALID_PENDING_IDX 0xFFFF
 
@@ -150,10 +154,12 @@ struct xenvif {
        u8               fe_dev_addr[6];
 
        /* Frontend feature information. */
+       int gso_mask;
+       int gso_prefix_mask;
+
        u8 can_sg:1;
-       u8 gso:1;
-       u8 gso_prefix:1;
-       u8 csum:1;
+       u8 ip_csum:1;
+       u8 ipv6_csum:1;
 
        /* Internal feature information. */
        u8 can_queue:1;     /* can queue packets for receiver? */
index 01bb854c7f62bfc281dfdc0081829237f2f843d6..e4aa26748f806f175808564180d6e593d8b92954 100644 (file)
@@ -214,10 +214,14 @@ static netdev_features_t xenvif_fix_features(struct net_device *dev,
 
        if (!vif->can_sg)
                features &= ~NETIF_F_SG;
-       if (!vif->gso && !vif->gso_prefix)
+       if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4))
                features &= ~NETIF_F_TSO;
-       if (!vif->csum)
+       if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6))
+               features &= ~NETIF_F_TSO6;
+       if (!vif->ip_csum)
                features &= ~NETIF_F_IP_CSUM;
+       if (!vif->ipv6_csum)
+               features &= ~NETIF_F_IPV6_CSUM;
 
        return features;
 }
@@ -306,7 +310,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
        vif->domid  = domid;
        vif->handle = handle;
        vif->can_sg = 1;
-       vif->csum = 1;
+       vif->ip_csum = 1;
        vif->dev = dev;
 
        vif->credit_bytes = vif->remaining_credit = ~0UL;
@@ -316,8 +320,10 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
        vif->credit_timeout.expires = jiffies;
 
        dev->netdev_ops = &xenvif_netdev_ops;
-       dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
-       dev->features = dev->hw_features;
+       dev->hw_features = NETIF_F_SG |
+               NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+               NETIF_F_TSO | NETIF_F_TSO6;
+       dev->features = dev->hw_features | NETIF_F_RXCSUM;
        SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
 
        dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
index f3e591c611ded744ec0b6ea2cefe9b7dd3d568c7..828fdab4f1a4b08ae456f8223e02218486cfc43c 100644 (file)
@@ -109,15 +109,12 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif,
        return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
 }
 
-/*
- * This is the amount of packet we copy rather than map, so that the
- * guest can't fiddle with the contents of the headers while we do
- * packet processing on them (netfilter, routing, etc).
+/* This is a miniumum size for the linear area to avoid lots of
+ * calls to __pskb_pull_tail() as we set up checksum offsets. The
+ * value 128 was chosen as it covers all IPv4 and most likely
+ * IPv6 headers.
  */
-#define PKT_PROT_LEN    (ETH_HLEN + \
-                        VLAN_HLEN + \
-                        sizeof(struct iphdr) + MAX_IPOPTLEN + \
-                        sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
+#define PKT_PROT_LEN 128
 
 static u16 frag_get_pending_idx(skb_frag_t *frag)
 {
@@ -145,7 +142,7 @@ static int max_required_rx_slots(struct xenvif *vif)
        int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
 
        /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
-       if (vif->can_sg || vif->gso || vif->gso_prefix)
+       if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask)
                max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
 
        return max;
@@ -317,6 +314,7 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
        req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
 
        meta = npo->meta + npo->meta_prod++;
+       meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
        meta->gso_size = 0;
        meta->size = 0;
        meta->id = req->id;
@@ -339,6 +337,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
        struct gnttab_copy *copy_gop;
        struct xenvif_rx_meta *meta;
        unsigned long bytes;
+       int gso_type;
 
        /* Data must not cross a page boundary. */
        BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
@@ -397,7 +396,14 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
                }
 
                /* Leave a gap for the GSO descriptor. */
-               if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
+               if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+                       gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+               else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+                       gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+               else
+                       gso_type = XEN_NETIF_GSO_TYPE_NONE;
+
+               if (*head && ((1 << gso_type) & vif->gso_mask))
                        vif->rx.req_cons++;
 
                *head = 0; /* There must be something in this buffer now. */
@@ -428,14 +434,28 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        unsigned char *data;
        int head = 1;
        int old_meta_prod;
+       int gso_type;
+       int gso_size;
 
        old_meta_prod = npo->meta_prod;
 
+       if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
+               gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+               gso_size = skb_shinfo(skb)->gso_size;
+       } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+               gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+               gso_size = skb_shinfo(skb)->gso_size;
+       } else {
+               gso_type = XEN_NETIF_GSO_TYPE_NONE;
+               gso_size = 0;
+       }
+
        /* Set up a GSO prefix descriptor, if necessary */
-       if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
+       if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) {
                req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
                meta = npo->meta + npo->meta_prod++;
-               meta->gso_size = skb_shinfo(skb)->gso_size;
+               meta->gso_type = gso_type;
+               meta->gso_size = gso_size;
                meta->size = 0;
                meta->id = req->id;
        }
@@ -443,10 +463,13 @@ static int xenvif_gop_skb(struct sk_buff *skb,
        req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
        meta = npo->meta + npo->meta_prod++;
 
-       if (!vif->gso_prefix)
-               meta->gso_size = skb_shinfo(skb)->gso_size;
-       else
+       if ((1 << gso_type) & vif->gso_mask) {
+               meta->gso_type = gso_type;
+               meta->gso_size = gso_size;
+       } else {
+               meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
                meta->gso_size = 0;
+       }
 
        meta->size = 0;
        meta->id = req->id;
@@ -592,7 +615,8 @@ void xenvif_rx_action(struct xenvif *vif)
 
                vif = netdev_priv(skb->dev);
 
-               if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
+               if ((1 << vif->meta[npo.meta_cons].gso_type) &
+                   vif->gso_prefix_mask) {
                        resp = RING_GET_RESPONSE(&vif->rx,
                                                 vif->rx.rsp_prod_pvt++);
 
@@ -629,7 +653,8 @@ void xenvif_rx_action(struct xenvif *vif)
                                        vif->meta[npo.meta_cons].size,
                                        flags);
 
-               if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
+               if ((1 << vif->meta[npo.meta_cons].gso_type) &
+                   vif->gso_mask) {
                        struct xen_netif_extra_info *gso =
                                (struct xen_netif_extra_info *)
                                RING_GET_RESPONSE(&vif->rx,
@@ -637,8 +662,8 @@ void xenvif_rx_action(struct xenvif *vif)
 
                        resp->flags |= XEN_NETRXF_extra_info;
 
+                       gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
                        gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
-                       gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
                        gso->u.gso.pad = 0;
                        gso->u.gso.features = 0;
 
@@ -1101,15 +1126,20 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
                return -EINVAL;
        }
 
-       /* Currently only TCPv4 S.O. is supported. */
-       if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
+       switch (gso->u.gso.type) {
+       case XEN_NETIF_GSO_TYPE_TCPV4:
+               skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+               break;
+       case XEN_NETIF_GSO_TYPE_TCPV6:
+               skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+               break;
+       default:
                netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
                xenvif_fatal_tx_err(vif);
                return -EINVAL;
        }
 
        skb_shinfo(skb)->gso_size = gso->u.gso.size;
-       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 
        /* Header must be checked, and gso_segs computed. */
        skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
@@ -1118,61 +1148,74 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
        return 0;
 }
 
-static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
+static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len)
+{
+       if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) {
+               /* If we need to pullup then pullup to the max, so we
+                * won't need to do it again.
+                */
+               int target = min_t(int, skb->len, MAX_TCP_HEADER);
+               __pskb_pull_tail(skb, target - skb_headlen(skb));
+       }
+}
+
+static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
+                            int recalculate_partial_csum)
 {
-       struct iphdr *iph;
+       struct iphdr *iph = (void *)skb->data;
+       unsigned int header_size;
+       unsigned int off;
        int err = -EPROTO;
-       int recalculate_partial_csum = 0;
 
-       /*
-        * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
-        * peers can fail to set NETRXF_csum_blank when sending a GSO
-        * frame. In this case force the SKB to CHECKSUM_PARTIAL and
-        * recalculate the partial checksum.
-        */
-       if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
-               vif->rx_gso_checksum_fixup++;
-               skb->ip_summed = CHECKSUM_PARTIAL;
-               recalculate_partial_csum = 1;
-       }
+       off = sizeof(struct iphdr);
 
-       /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
-       if (skb->ip_summed != CHECKSUM_PARTIAL)
-               return 0;
+       header_size = skb->network_header + off + MAX_IPOPTLEN;
+       maybe_pull_tail(skb, header_size);
 
-       if (skb->protocol != htons(ETH_P_IP))
-               goto out;
+       off = iph->ihl * 4;
 
-       iph = (void *)skb->data;
        switch (iph->protocol) {
        case IPPROTO_TCP:
-               if (!skb_partial_csum_set(skb, 4 * iph->ihl,
+               if (!skb_partial_csum_set(skb, off,
                                          offsetof(struct tcphdr, check)))
                        goto out;
 
                if (recalculate_partial_csum) {
                        struct tcphdr *tcph = tcp_hdr(skb);
+
+                       header_size = skb->network_header +
+                               off +
+                               sizeof(struct tcphdr);
+                       maybe_pull_tail(skb, header_size);
+
                        tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
-                                                        skb->len - iph->ihl*4,
+                                                        skb->len - off,
                                                         IPPROTO_TCP, 0);
                }
                break;
        case IPPROTO_UDP:
-               if (!skb_partial_csum_set(skb, 4 * iph->ihl,
+               if (!skb_partial_csum_set(skb, off,
                                          offsetof(struct udphdr, check)))
                        goto out;
 
                if (recalculate_partial_csum) {
                        struct udphdr *udph = udp_hdr(skb);
+
+                       header_size = skb->network_header +
+                               off +
+                               sizeof(struct udphdr);
+                       maybe_pull_tail(skb, header_size);
+
                        udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
-                                                        skb->len - iph->ihl*4,
+                                                        skb->len - off,
                                                         IPPROTO_UDP, 0);
                }
                break;
        default:
                if (net_ratelimit())
                        netdev_err(vif->dev,
-                                  "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
+                                  "Attempting to checksum a non-TCP/UDP packet, "
+                                  "dropping a protocol %d packet\n",
                                   iph->protocol);
                goto out;
        }
@@ -1183,6 +1226,158 @@ out:
        return err;
 }
 
+static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
+                              int recalculate_partial_csum)
+{
+       int err = -EPROTO;
+       struct ipv6hdr *ipv6h = (void *)skb->data;
+       u8 nexthdr;
+       unsigned int header_size;
+       unsigned int off;
+       bool fragment;
+       bool done;
+
+       done = false;
+
+       off = sizeof(struct ipv6hdr);
+
+       header_size = skb->network_header + off;
+       maybe_pull_tail(skb, header_size);
+
+       nexthdr = ipv6h->nexthdr;
+
+       while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) &&
+              !done) {
+               switch (nexthdr) {
+               case IPPROTO_DSTOPTS:
+               case IPPROTO_HOPOPTS:
+               case IPPROTO_ROUTING: {
+                       struct ipv6_opt_hdr *hp = (void *)(skb->data + off);
+
+                       header_size = skb->network_header +
+                               off +
+                               sizeof(struct ipv6_opt_hdr);
+                       maybe_pull_tail(skb, header_size);
+
+                       nexthdr = hp->nexthdr;
+                       off += ipv6_optlen(hp);
+                       break;
+               }
+               case IPPROTO_AH: {
+                       struct ip_auth_hdr *hp = (void *)(skb->data + off);
+
+                       header_size = skb->network_header +
+                               off +
+                               sizeof(struct ip_auth_hdr);
+                       maybe_pull_tail(skb, header_size);
+
+                       nexthdr = hp->nexthdr;
+                       off += (hp->hdrlen+2)<<2;
+                       break;
+               }
+               case IPPROTO_FRAGMENT:
+                       fragment = true;
+                       /* fall through */
+               default:
+                       done = true;
+                       break;
+               }
+       }
+
+       if (!done) {
+               if (net_ratelimit())
+                       netdev_err(vif->dev, "Failed to parse packet header\n");
+               goto out;
+       }
+
+       if (fragment) {
+               if (net_ratelimit())
+                       netdev_err(vif->dev, "Packet is a fragment!\n");
+               goto out;
+       }
+
+       switch (nexthdr) {
+       case IPPROTO_TCP:
+               if (!skb_partial_csum_set(skb, off,
+                                         offsetof(struct tcphdr, check)))
+                       goto out;
+
+               if (recalculate_partial_csum) {
+                       struct tcphdr *tcph = tcp_hdr(skb);
+
+                       header_size = skb->network_header +
+                               off +
+                               sizeof(struct tcphdr);
+                       maybe_pull_tail(skb, header_size);
+
+                       tcph->check = ~csum_ipv6_magic(&ipv6h->saddr,
+                                                      &ipv6h->daddr,
+                                                      skb->len - off,
+                                                      IPPROTO_TCP, 0);
+               }
+               break;
+       case IPPROTO_UDP:
+               if (!skb_partial_csum_set(skb, off,
+                                         offsetof(struct udphdr, check)))
+                       goto out;
+
+               if (recalculate_partial_csum) {
+                       struct udphdr *udph = udp_hdr(skb);
+
+                       header_size = skb->network_header +
+                               off +
+                               sizeof(struct udphdr);
+                       maybe_pull_tail(skb, header_size);
+
+                       udph->check = ~csum_ipv6_magic(&ipv6h->saddr,
+                                                      &ipv6h->daddr,
+                                                      skb->len - off,
+                                                      IPPROTO_UDP, 0);
+               }
+               break;
+       default:
+               if (net_ratelimit())
+                       netdev_err(vif->dev,
+                                  "Attempting to checksum a non-TCP/UDP packet, "
+                                  "dropping a protocol %d packet\n",
+                                  nexthdr);
+               goto out;
+       }
+
+       err = 0;
+
+out:
+       return err;
+}
+
+static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
+{
+       int err = -EPROTO;
+       int recalculate_partial_csum = 0;
+
+       /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
+        * peers can fail to set NETRXF_csum_blank when sending a GSO
+        * frame. In this case force the SKB to CHECKSUM_PARTIAL and
+        * recalculate the partial checksum.
+        */
+       if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
+               vif->rx_gso_checksum_fixup++;
+               skb->ip_summed = CHECKSUM_PARTIAL;
+               recalculate_partial_csum = 1;
+       }
+
+       /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return 0;
+
+       if (skb->protocol == htons(ETH_P_IP))
+               err = checksum_setup_ip(vif, skb, recalculate_partial_csum);
+       else if (skb->protocol == htons(ETH_P_IPV6))
+               err = checksum_setup_ipv6(vif, skb, recalculate_partial_csum);
+
+       return err;
+}
+
 static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
 {
        unsigned long now = jiffies;
@@ -1428,12 +1623,7 @@ static int xenvif_tx_submit(struct xenvif *vif, int budget)
 
                xenvif_fill_frags(vif, skb);
 
-               /*
-                * If the initial fragment was < PKT_PROT_LEN then
-                * pull through some bytes from the other fragments to
-                * increase the linear region to PKT_PROT_LEN bytes.
-                */
-               if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
+               if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
                        int target = min_t(int, skb->len, PKT_PROT_LEN);
                        __pskb_pull_tail(skb, target - skb_headlen(skb));
                }
index b45bce20ad7624421b7500cff3ccfc5a49f9204f..f0358992b04fa820cb083300765271b92823320a 100644 (file)
@@ -39,11 +39,15 @@ static int connect_rings(struct backend_info *);
 static void connect(struct backend_info *);
 static void backend_create_xenvif(struct backend_info *be);
 static void unregister_hotplug_status_watch(struct backend_info *be);
+static void set_backend_state(struct backend_info *be,
+                             enum xenbus_state state);
 
 static int netback_remove(struct xenbus_device *dev)
 {
        struct backend_info *be = dev_get_drvdata(&dev->dev);
 
+       set_backend_state(be, XenbusStateClosed);
+
        unregister_hotplug_status_watch(be);
        if (be->vif) {
                kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
@@ -101,6 +105,22 @@ static int netback_probe(struct xenbus_device *dev,
                        goto abort_transaction;
                }
 
+               err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv6",
+                                   "%d", sg);
+               if (err) {
+                       message = "writing feature-gso-tcpv6";
+                       goto abort_transaction;
+               }
+
+               /* We support partial checksum setup for IPv6 packets */
+               err = xenbus_printf(xbt, dev->nodename,
+                                   "feature-ipv6-csum-offload",
+                                   "%d", 1);
+               if (err) {
+                       message = "writing feature-ipv6-csum-offload";
+                       goto abort_transaction;
+               }
+
                /* We support rx-copy path. */
                err = xenbus_printf(xbt, dev->nodename,
                                    "feature-rx-copy", "%d", 1);
@@ -557,20 +577,50 @@ static int connect_rings(struct backend_info *be)
                val = 0;
        vif->can_sg = !!val;
 
+       vif->gso_mask = 0;
+       vif->gso_prefix_mask = 0;
+
        if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4",
                         "%d", &val) < 0)
                val = 0;
-       vif->gso = !!val;
+       if (val)
+               vif->gso_mask |= GSO_BIT(TCPV4);
 
        if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix",
                         "%d", &val) < 0)
                val = 0;
-       vif->gso_prefix = !!val;
+       if (val)
+               vif->gso_prefix_mask |= GSO_BIT(TCPV4);
+
+       if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6",
+                        "%d", &val) < 0)
+               val = 0;
+       if (val)
+               vif->gso_mask |= GSO_BIT(TCPV6);
+
+       if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix",
+                        "%d", &val) < 0)
+               val = 0;
+       if (val)
+               vif->gso_prefix_mask |= GSO_BIT(TCPV6);
+
+       if (vif->gso_mask & vif->gso_prefix_mask) {
+               xenbus_dev_fatal(dev, err,
+                                "%s: gso and gso prefix flags are not "
+                                "mutually exclusive",
+                                dev->otherend);
+               return -EOPNOTSUPP;
+       }
 
        if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
                         "%d", &val) < 0)
                val = 0;
-       vif->csum = !val;
+       vif->ip_csum = !val;
+
+       if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload",
+                        "%d", &val) < 0)
+               val = 0;
+       vif->ipv6_csum = !!val;
 
        /* Map the shared frame, irq etc. */
        err = xenvif_connect(vif, tx_ring_ref, rx_ring_ref,
index 36808bf256770a5e02e7674002ed389c7fd8ac19..dd1011e55cb598096ef7c2ad64981a2572b760d7 100644 (file)
@@ -952,7 +952,7 @@ static int handle_incoming_queue(struct net_device *dev,
                u64_stats_update_end(&stats->syncp);
 
                /* Pass it up. */
-               netif_receive_skb(skb);
+               napi_gro_receive(&np->napi, skb);
        }
 
        return packets_dropped;
@@ -1051,6 +1051,8 @@ err:
        if (work_done < budget) {
                int more_to_do = 0;
 
+               napi_gro_flush(napi, false);
+
                local_irq_save(flags);
 
                RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
index b0b64ccb7d7d5d2581047bbd8ff151dabbf3492c..c1fb20603338299eb2eecfd085653480dd726428 100644 (file)
@@ -46,6 +46,16 @@ config NFC_SIM
 
          If unsure, say N.
 
+config NFC_PORT100
+       tristate "Sony NFC Port-100 Series USB device support"
+       depends on USB
+       depends on NFC_DIGITAL
+       help
+         This adds support for Sony Port-100 chip based USB devices such as the
+         RC-S380 dongle.
+
+         If unsure, say N.
+
 source "drivers/nfc/pn544/Kconfig"
 source "drivers/nfc/microread/Kconfig"
 
index be7636abcb3fa2c79076f533b54ab4769251fbb3..c715fe8582a8075503f7db5de758fa250d2905bb 100644 (file)
@@ -8,5 +8,6 @@ obj-$(CONFIG_NFC_PN533)         += pn533.o
 obj-$(CONFIG_NFC_WILINK)       += nfcwilink.o
 obj-$(CONFIG_NFC_MEI_PHY)      += mei_phy.o
 obj-$(CONFIG_NFC_SIM)          += nfcsim.o
+obj-$(CONFIG_NFC_PORT100)      += port100.o
 
 ccflags-$(CONFIG_NFC_DEBUG) := -DDEBUG
index 606bf55e76ec5941bf69443f8113a38266b4c000..85f90090cc1d1e167de6391e3481b597451555bd 100644 (file)
@@ -18,6 +18,8 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/nfc.h>
@@ -60,13 +62,13 @@ int nfc_mei_phy_enable(void *phy_id)
 
        r = mei_cl_enable_device(phy->device);
        if (r < 0) {
-               pr_err("MEI_PHY: Could not enable device\n");
+               pr_err("Could not enable device\n");
                return r;
        }
 
        r = mei_cl_register_event_cb(phy->device, nfc_mei_event_cb, phy);
        if (r) {
-               pr_err("MEY_PHY: Event cb registration failed\n");
+               pr_err("Event cb registration failed\n");
                mei_cl_disable_device(phy->device);
                phy->powered = 0;
 
index 101089495bf81f6b5ee115f219f8b13f3f5cb667..696e3467eccc33ab1ee97c6da2a4940c489ee086 100644 (file)
@@ -18,6 +18,8 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
@@ -95,12 +97,8 @@ static int check_crc(struct sk_buff *skb)
                crc = crc ^ skb->data[i];
 
        if (crc != skb->data[skb->len-1]) {
-               pr_err(MICROREAD_I2C_DRIVER_NAME
-                      ": CRC error 0x%x != 0x%x\n",
-                      crc, skb->data[skb->len-1]);
-
-               pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
-
+               pr_err("CRC error 0x%x != 0x%x\n", crc, skb->data[skb->len-1]);
+               pr_info("%s: BAD CRC\n", __func__);
                return -EPERM;
        }
 
@@ -160,18 +158,15 @@ static int microread_i2c_read(struct microread_i2c_phy *phy,
        u8 tmp[MICROREAD_I2C_LLC_MAX_SIZE - 1];
        struct i2c_client *client = phy->i2c_dev;
 
-       pr_debug("%s\n", __func__);
-
        r = i2c_master_recv(client, &len, 1);
        if (r != 1) {
-               dev_err(&client->dev, "cannot read len byte\n");
+               nfc_err(&client->dev, "cannot read len byte\n");
                return -EREMOTEIO;
        }
 
        if ((len < MICROREAD_I2C_LLC_MIN_SIZE) ||
            (len > MICROREAD_I2C_LLC_MAX_SIZE)) {
-               dev_err(&client->dev, "invalid len byte\n");
-               pr_err("invalid len byte\n");
+               nfc_err(&client->dev, "invalid len byte\n");
                r = -EBADMSG;
                goto flush;
        }
@@ -228,7 +223,6 @@ static irqreturn_t microread_i2c_irq_thread_fn(int irq, void *phy_id)
        }
 
        client = phy->i2c_dev;
-       dev_dbg(&client->dev, "IRQ\n");
 
        if (phy->hard_fault != 0)
                return IRQ_HANDLED;
@@ -263,20 +257,18 @@ static int microread_i2c_probe(struct i2c_client *client,
                dev_get_platdata(&client->dev);
        int r;
 
-       dev_dbg(&client->dev, "client %p", client);
+       dev_dbg(&client->dev, "client %p\n", client);
 
        if (!pdata) {
-               dev_err(&client->dev, "client %p: missing platform data",
+               nfc_err(&client->dev, "client %p: missing platform data\n",
                        client);
                return -EINVAL;
        }
 
        phy = devm_kzalloc(&client->dev, sizeof(struct microread_i2c_phy),
                           GFP_KERNEL);
-       if (!phy) {
-               dev_err(&client->dev, "Can't allocate microread phy");
+       if (!phy)
                return -ENOMEM;
-       }
 
        i2c_set_clientdata(client, phy);
        phy->i2c_dev = client;
@@ -285,7 +277,7 @@ static int microread_i2c_probe(struct i2c_client *client,
                                 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
                                 MICROREAD_I2C_DRIVER_NAME, phy);
        if (r) {
-               dev_err(&client->dev, "Unable to register IRQ handler");
+               nfc_err(&client->dev, "Unable to register IRQ handler\n");
                return r;
        }
 
@@ -296,7 +288,7 @@ static int microread_i2c_probe(struct i2c_client *client,
        if (r < 0)
                goto err_irq;
 
-       dev_info(&client->dev, "Probed");
+       nfc_info(&client->dev, "Probed");
 
        return 0;
 
@@ -310,8 +302,6 @@ static int microread_i2c_remove(struct i2c_client *client)
 {
        struct microread_i2c_phy *phy = i2c_get_clientdata(client);
 
-       dev_dbg(&client->dev, "%s\n", __func__);
-
        microread_remove(phy->hdev);
 
        free_irq(client->irq, phy);
index cdf1bc53b257f6ae34e3f2148de97b47f53521b7..72fafec3d46058908ea841f2c89273c179b6d30d 100644 (file)
@@ -18,6 +18,8 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/mod_devicetable.h>
 #include <linux/nfc.h>
@@ -59,8 +61,6 @@ static int microread_mei_remove(struct mei_cl_device *device)
 {
        struct nfc_mei_phy *phy = mei_cl_get_drvdata(device);
 
-       pr_info("Removing microread\n");
-
        microread_remove(phy->hdev);
 
        nfc_mei_phy_free(phy);
index cdb9f6de132a5c16b564e2a1228bbc3ae594ec2f..970ded6bfcf562b278e43b03e5112042c798fd0c 100644 (file)
@@ -18,6 +18,8 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
@@ -546,7 +548,7 @@ exit:
        kfree_skb(skb);
 
        if (r)
-               pr_err("Failed to handle discovered target err=%d", r);
+               pr_err("Failed to handle discovered target err=%d\n", r);
 }
 
 static int microread_event_received(struct nfc_hci_dev *hdev, u8 gate,
@@ -656,7 +658,6 @@ int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
 
        info = kzalloc(sizeof(struct microread_info), GFP_KERNEL);
        if (!info) {
-               pr_err("Cannot allocate memory for microread_info.\n");
                r = -ENOMEM;
                goto err_info_alloc;
        }
@@ -686,7 +687,7 @@ int microread_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
                                             MICROREAD_CMD_TAILROOM,
                                             phy_payload);
        if (!info->hdev) {
-               pr_err("Cannot allocate nfc hdev.\n");
+               pr_err("Cannot allocate nfc hdev\n");
                r = -ENOMEM;
                goto err_alloc_hdev;
        }
index 9a53f13c88df50a705c84b89e4fe7c081dbe3c88..93111fa8d2829735ec1ddea6c09c6da86bbb9459 100644 (file)
 #include <linux/nfc.h>
 #include <net/nfc/nfc.h>
 
-#define DEV_ERR(_dev, fmt, args...) nfc_dev_err(&_dev->nfc_dev->dev, \
+#define DEV_ERR(_dev, fmt, args...) nfc_err(&_dev->nfc_dev->dev, \
                                                "%s: " fmt, __func__, ## args)
 
-#define DEV_DBG(_dev, fmt, args...) nfc_dev_dbg(&_dev->nfc_dev->dev, \
+#define DEV_DBG(_dev, fmt, args...) dev_dbg(&_dev->nfc_dev->dev, \
                                                "%s: " fmt, __func__, ## args)
 
 #define NFCSIM_VERSION "0.1"
@@ -64,7 +64,7 @@ static struct workqueue_struct *wq;
 
 static void nfcsim_cleanup_dev(struct nfcsim *dev, u8 shutdown)
 {
-       DEV_DBG(dev, "shutdown=%d", shutdown);
+       DEV_DBG(dev, "shutdown=%d\n", shutdown);
 
        mutex_lock(&dev->lock);
 
@@ -84,7 +84,7 @@ static int nfcsim_target_found(struct nfcsim *dev)
 {
        struct nfc_target nfc_tgt;
 
-       DEV_DBG(dev, "");
+       DEV_DBG(dev, "\n");
 
        memset(&nfc_tgt, 0, sizeof(struct nfc_target));
 
@@ -98,7 +98,7 @@ static int nfcsim_dev_up(struct nfc_dev *nfc_dev)
 {
        struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
 
-       DEV_DBG(dev, "");
+       DEV_DBG(dev, "\n");
 
        mutex_lock(&dev->lock);
 
@@ -113,7 +113,7 @@ static int nfcsim_dev_down(struct nfc_dev *nfc_dev)
 {
        struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
 
-       DEV_DBG(dev, "");
+       DEV_DBG(dev, "\n");
 
        mutex_lock(&dev->lock);
 
@@ -143,7 +143,7 @@ static int nfcsim_dep_link_up(struct nfc_dev *nfc_dev,
 
        remote_gb = nfc_get_local_general_bytes(peer->nfc_dev, &remote_gb_len);
        if (!remote_gb) {
-               DEV_ERR(peer, "Can't get remote general bytes");
+               DEV_ERR(peer, "Can't get remote general bytes\n");
 
                mutex_unlock(&peer->lock);
                return -EINVAL;
@@ -155,7 +155,7 @@ static int nfcsim_dep_link_up(struct nfc_dev *nfc_dev,
 
        rc = nfc_set_remote_general_bytes(nfc_dev, remote_gb, remote_gb_len);
        if (rc) {
-               DEV_ERR(dev, "Can't set remote general bytes");
+               DEV_ERR(dev, "Can't set remote general bytes\n");
                mutex_unlock(&dev->lock);
                return rc;
        }
@@ -172,7 +172,7 @@ static int nfcsim_dep_link_down(struct nfc_dev *nfc_dev)
 {
        struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
 
-       DEV_DBG(dev, "");
+       DEV_DBG(dev, "\n");
 
        nfcsim_cleanup_dev(dev, 0);
 
@@ -188,7 +188,7 @@ static int nfcsim_start_poll(struct nfc_dev *nfc_dev,
        mutex_lock(&dev->lock);
 
        if (dev->polling_mode != NFCSIM_POLL_NONE) {
-               DEV_ERR(dev, "Already in polling mode");
+               DEV_ERR(dev, "Already in polling mode\n");
                rc = -EBUSY;
                goto exit;
        }
@@ -200,7 +200,7 @@ static int nfcsim_start_poll(struct nfc_dev *nfc_dev,
                dev->polling_mode |= NFCSIM_POLL_TARGET;
 
        if (dev->polling_mode == NFCSIM_POLL_NONE) {
-               DEV_ERR(dev, "Unsupported polling mode");
+               DEV_ERR(dev, "Unsupported polling mode\n");
                rc = -EINVAL;
                goto exit;
        }
@@ -210,7 +210,7 @@ static int nfcsim_start_poll(struct nfc_dev *nfc_dev,
 
        queue_delayed_work(wq, &dev->poll_work, 0);
 
-       DEV_DBG(dev, "Start polling: im: 0x%X, tm: 0x%X", im_protocols,
+       DEV_DBG(dev, "Start polling: im: 0x%X, tm: 0x%X\n", im_protocols,
                tm_protocols);
 
        rc = 0;
@@ -224,7 +224,7 @@ static void nfcsim_stop_poll(struct nfc_dev *nfc_dev)
 {
        struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
 
-       DEV_DBG(dev, "Stop poll");
+       DEV_DBG(dev, "Stop poll\n");
 
        mutex_lock(&dev->lock);
 
@@ -240,7 +240,7 @@ static int nfcsim_activate_target(struct nfc_dev *nfc_dev,
 {
        struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
 
-       DEV_DBG(dev, "");
+       DEV_DBG(dev, "\n");
 
        return -ENOTSUPP;
 }
@@ -250,7 +250,7 @@ static void nfcsim_deactivate_target(struct nfc_dev *nfc_dev,
 {
        struct nfcsim *dev = nfc_get_drvdata(nfc_dev);
 
-       DEV_DBG(dev, "");
+       DEV_DBG(dev, "\n");
 }
 
 static void nfcsim_wq_recv(struct work_struct *work)
@@ -267,7 +267,7 @@ static void nfcsim_wq_recv(struct work_struct *work)
 
        if (dev->initiator) {
                if (!dev->cb) {
-                       DEV_ERR(dev, "Null recv callback");
+                       DEV_ERR(dev, "Null recv callback\n");
                        dev_kfree_skb(dev->clone_skb);
                        goto exit;
                }
@@ -310,7 +310,7 @@ static int nfcsim_tx(struct nfc_dev *nfc_dev, struct nfc_target *target,
        peer->clone_skb = skb_clone(skb, GFP_KERNEL);
 
        if (!peer->clone_skb) {
-               DEV_ERR(dev, "skb_clone failed");
+               DEV_ERR(dev, "skb_clone failed\n");
                mutex_unlock(&peer->lock);
                err = -ENOMEM;
                goto exit;
@@ -397,13 +397,13 @@ static void nfcsim_wq_poll(struct work_struct *work)
        nfcsim_set_polling_mode(dev);
 
        if (dev->curr_polling_mode == NFCSIM_POLL_NONE) {
-               DEV_DBG(dev, "Not polling");
+               DEV_DBG(dev, "Not polling\n");
                goto unlock;
        }
 
        DEV_DBG(dev, "Polling as %s",
                dev->curr_polling_mode == NFCSIM_POLL_INITIATOR ?
-               "initiator" : "target");
+               "initiator\n" : "target\n");
 
        if (dev->curr_polling_mode == NFCSIM_POLL_TARGET)
                goto sched_work;
index 59f95d8fc98c887228d15e0421c69906bdd85154..71308645593f638a2879df4348f1349f0e031ce4 100644 (file)
@@ -146,13 +146,11 @@ static int nfcwilink_get_bts_file_name(struct nfcwilink *drv, char *file_name)
        unsigned long comp_ret;
        int rc;
 
-       nfc_dev_dbg(&drv->pdev->dev, "get_bts_file_name entry");
-
        skb = nfcwilink_skb_alloc(sizeof(struct nci_vs_nfcc_info_cmd),
                                        GFP_KERNEL);
        if (!skb) {
-               nfc_dev_err(&drv->pdev->dev,
-                               "no memory for nci_vs_nfcc_info_cmd");
+               nfc_err(&drv->pdev->dev,
+                       "no memory for nci_vs_nfcc_info_cmd\n");
                return -ENOMEM;
        }
 
@@ -170,21 +168,19 @@ static int nfcwilink_get_bts_file_name(struct nfcwilink *drv, char *file_name)
 
        comp_ret = wait_for_completion_timeout(&drv->completed,
                                msecs_to_jiffies(NFCWILINK_CMD_TIMEOUT));
-       nfc_dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld",
-                       comp_ret);
+       dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld\n",
+               comp_ret);
        if (comp_ret == 0) {
-               nfc_dev_err(&drv->pdev->dev,
-                               "timeout on wait_for_completion_timeout");
+               nfc_err(&drv->pdev->dev,
+                       "timeout on wait_for_completion_timeout\n");
                return -ETIMEDOUT;
        }
 
-       nfc_dev_dbg(&drv->pdev->dev, "nci_vs_nfcc_info_rsp: plen %d, status %d",
-                       drv->nfcc_info.plen,
-                       drv->nfcc_info.status);
+       dev_dbg(&drv->pdev->dev, "nci_vs_nfcc_info_rsp: plen %d, status %d\n",
+               drv->nfcc_info.plen, drv->nfcc_info.status);
 
        if ((drv->nfcc_info.plen != 5) || (drv->nfcc_info.status != 0)) {
-               nfc_dev_err(&drv->pdev->dev,
-                               "invalid nci_vs_nfcc_info_rsp");
+               nfc_err(&drv->pdev->dev, "invalid nci_vs_nfcc_info_rsp\n");
                return -EINVAL;
        }
 
@@ -195,7 +191,7 @@ static int nfcwilink_get_bts_file_name(struct nfcwilink *drv, char *file_name)
                        drv->nfcc_info.sw_ver_z,
                        drv->nfcc_info.patch_id);
 
-       nfc_dev_info(&drv->pdev->dev, "nfcwilink FW file name: %s", file_name);
+       nfc_info(&drv->pdev->dev, "nfcwilink FW file name: %s\n", file_name);
 
        return 0;
 }
@@ -207,15 +203,13 @@ static int nfcwilink_send_bts_cmd(struct nfcwilink *drv, __u8 *data, int len)
        unsigned long comp_ret;
        int rc;
 
-       nfc_dev_dbg(&drv->pdev->dev, "send_bts_cmd entry");
-
        /* verify valid cmd for the NFC channel */
        if ((len <= sizeof(struct nfcwilink_hdr)) ||
                (len > BTS_FILE_CMD_MAX_LEN) ||
                (hdr->chnl != NFCWILINK_CHNL) ||
                (hdr->opcode != NFCWILINK_OPCODE)) {
-               nfc_dev_err(&drv->pdev->dev,
-                       "ignoring invalid bts cmd, len %d, chnl %d, opcode %d",
+               nfc_err(&drv->pdev->dev,
+                       "ignoring invalid bts cmd, len %d, chnl %d, opcode %d\n",
                        len, hdr->chnl, hdr->opcode);
                return 0;
        }
@@ -226,7 +220,7 @@ static int nfcwilink_send_bts_cmd(struct nfcwilink *drv, __u8 *data, int len)
 
        skb = nfcwilink_skb_alloc(len, GFP_KERNEL);
        if (!skb) {
-               nfc_dev_err(&drv->pdev->dev, "no memory for bts cmd");
+               nfc_err(&drv->pdev->dev, "no memory for bts cmd\n");
                return -ENOMEM;
        }
 
@@ -238,11 +232,11 @@ static int nfcwilink_send_bts_cmd(struct nfcwilink *drv, __u8 *data, int len)
 
        comp_ret = wait_for_completion_timeout(&drv->completed,
                                msecs_to_jiffies(NFCWILINK_CMD_TIMEOUT));
-       nfc_dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld",
-                       comp_ret);
+       dev_dbg(&drv->pdev->dev, "wait_for_completion_timeout returned %ld\n",
+               comp_ret);
        if (comp_ret == 0) {
-               nfc_dev_err(&drv->pdev->dev,
-                               "timeout on wait_for_completion_timeout");
+               nfc_err(&drv->pdev->dev,
+                       "timeout on wait_for_completion_timeout\n");
                return -ETIMEDOUT;
        }
 
@@ -257,8 +251,6 @@ static int nfcwilink_download_fw(struct nfcwilink *drv)
        __u8 *ptr;
        int len, rc;
 
-       nfc_dev_dbg(&drv->pdev->dev, "download_fw entry");
-
        set_bit(NFCWILINK_FW_DOWNLOAD, &drv->flags);
 
        rc = nfcwilink_get_bts_file_name(drv, file_name);
@@ -267,7 +259,7 @@ static int nfcwilink_download_fw(struct nfcwilink *drv)
 
        rc = request_firmware(&fw, file_name, &drv->pdev->dev);
        if (rc) {
-               nfc_dev_err(&drv->pdev->dev, "request_firmware failed %d", rc);
+               nfc_err(&drv->pdev->dev, "request_firmware failed %d\n", rc);
 
                /* if the file is not found, don't exit with failure */
                if (rc == -ENOENT)
@@ -280,14 +272,14 @@ static int nfcwilink_download_fw(struct nfcwilink *drv)
        ptr = (__u8 *)fw->data;
 
        if ((len == 0) || (ptr == NULL)) {
-               nfc_dev_dbg(&drv->pdev->dev,
-                               "request_firmware returned size %d", len);
+               dev_dbg(&drv->pdev->dev,
+                       "request_firmware returned size %d\n", len);
                goto release_fw;
        }
 
        if (__le32_to_cpu(((struct bts_file_hdr *)ptr)->magic) !=
                        BTS_FILE_HDR_MAGIC) {
-               nfc_dev_err(&drv->pdev->dev, "wrong bts magic number");
+               nfc_err(&drv->pdev->dev, "wrong bts magic number\n");
                rc = -EINVAL;
                goto release_fw;
        }
@@ -302,8 +294,8 @@ static int nfcwilink_download_fw(struct nfcwilink *drv)
                action_len =
                        __le16_to_cpu(((struct bts_file_action *)ptr)->len);
 
-               nfc_dev_dbg(&drv->pdev->dev, "bts_file_action type %d, len %d",
-                               action_type, action_len);
+               dev_dbg(&drv->pdev->dev, "bts_file_action type %d, len %d\n",
+                       action_type, action_len);
 
                switch (action_type) {
                case BTS_FILE_ACTION_TYPE_SEND_CMD:
@@ -333,8 +325,6 @@ static void nfcwilink_register_complete(void *priv_data, char data)
 {
        struct nfcwilink *drv = priv_data;
 
-       nfc_dev_dbg(&drv->pdev->dev, "register_complete entry");
-
        /* store ST registration status */
        drv->st_register_cb_status = data;
 
@@ -356,7 +346,7 @@ static long nfcwilink_receive(void *priv_data, struct sk_buff *skb)
                return -EFAULT;
        }
 
-       nfc_dev_dbg(&drv->pdev->dev, "receive entry, len %d", skb->len);
+       dev_dbg(&drv->pdev->dev, "receive entry, len %d\n", skb->len);
 
        /* strip the ST header
        (apart for the chnl byte, which is not received in the hdr) */
@@ -370,7 +360,7 @@ static long nfcwilink_receive(void *priv_data, struct sk_buff *skb)
        /* Forward skb to NCI core layer */
        rc = nci_recv_frame(drv->ndev, skb);
        if (rc < 0) {
-               nfc_dev_err(&drv->pdev->dev, "nci_recv_frame failed %d", rc);
+               nfc_err(&drv->pdev->dev, "nci_recv_frame failed %d\n", rc);
                return rc;
        }
 
@@ -396,8 +386,6 @@ static int nfcwilink_open(struct nci_dev *ndev)
        unsigned long comp_ret;
        int rc;
 
-       nfc_dev_dbg(&drv->pdev->dev, "open entry");
-
        if (test_and_set_bit(NFCWILINK_RUNNING, &drv->flags)) {
                rc = -EBUSY;
                goto exit;
@@ -415,9 +403,9 @@ static int nfcwilink_open(struct nci_dev *ndev)
                        &drv->completed,
                        msecs_to_jiffies(NFCWILINK_REGISTER_TIMEOUT));
 
-                       nfc_dev_dbg(&drv->pdev->dev,
-                       "wait_for_completion_timeout returned %ld",
-                       comp_ret);
+                       dev_dbg(&drv->pdev->dev,
+                               "wait_for_completion_timeout returned %ld\n",
+                               comp_ret);
 
                        if (comp_ret == 0) {
                                /* timeout */
@@ -425,13 +413,12 @@ static int nfcwilink_open(struct nci_dev *ndev)
                                goto clear_exit;
                        } else if (drv->st_register_cb_status != 0) {
                                rc = drv->st_register_cb_status;
-                               nfc_dev_err(&drv->pdev->dev,
-                               "st_register_cb failed %d", rc);
+                               nfc_err(&drv->pdev->dev,
+                                       "st_register_cb failed %d\n", rc);
                                goto clear_exit;
                        }
                } else {
-                       nfc_dev_err(&drv->pdev->dev,
-                               "st_register failed %d", rc);
+                       nfc_err(&drv->pdev->dev, "st_register failed %d\n", rc);
                        goto clear_exit;
                }
        }
@@ -441,8 +428,8 @@ static int nfcwilink_open(struct nci_dev *ndev)
        drv->st_write = nfcwilink_proto.write;
 
        if (nfcwilink_download_fw(drv)) {
-               nfc_dev_err(&drv->pdev->dev, "nfcwilink_download_fw failed %d",
-                               rc);
+               nfc_err(&drv->pdev->dev, "nfcwilink_download_fw failed %d\n",
+                       rc);
                /* open should succeed, even if the FW download failed */
        }
 
@@ -460,14 +447,12 @@ static int nfcwilink_close(struct nci_dev *ndev)
        struct nfcwilink *drv = nci_get_drvdata(ndev);
        int rc;
 
-       nfc_dev_dbg(&drv->pdev->dev, "close entry");
-
        if (!test_and_clear_bit(NFCWILINK_RUNNING, &drv->flags))
                return 0;
 
        rc = st_unregister(&nfcwilink_proto);
        if (rc)
-               nfc_dev_err(&drv->pdev->dev, "st_unregister failed %d", rc);
+               nfc_err(&drv->pdev->dev, "st_unregister failed %d\n", rc);
 
        drv->st_write = NULL;
 
@@ -480,7 +465,7 @@ static int nfcwilink_send(struct nci_dev *ndev, struct sk_buff *skb)
        struct nfcwilink_hdr hdr = {NFCWILINK_CHNL, NFCWILINK_OPCODE, 0x0000};
        long len;
 
-       nfc_dev_dbg(&drv->pdev->dev, "send entry, len %d", skb->len);
+       dev_dbg(&drv->pdev->dev, "send entry, len %d\n", skb->len);
 
        if (!test_bit(NFCWILINK_RUNNING, &drv->flags)) {
                kfree_skb(skb);
@@ -498,7 +483,7 @@ static int nfcwilink_send(struct nci_dev *ndev, struct sk_buff *skb)
        len = drv->st_write(skb);
        if (len < 0) {
                kfree_skb(skb);
-               nfc_dev_err(&drv->pdev->dev, "st_write failed %ld", len);
+               nfc_err(&drv->pdev->dev, "st_write failed %ld\n", len);
                return -EFAULT;
        }
 
@@ -517,8 +502,6 @@ static int nfcwilink_probe(struct platform_device *pdev)
        int rc;
        __u32 protocols;
 
-       nfc_dev_dbg(&pdev->dev, "probe entry");
-
        drv = devm_kzalloc(&pdev->dev, sizeof(struct nfcwilink), GFP_KERNEL);
        if (!drv) {
                rc = -ENOMEM;
@@ -538,7 +521,7 @@ static int nfcwilink_probe(struct platform_device *pdev)
                                        NFCWILINK_HDR_LEN,
                                        0);
        if (!drv->ndev) {
-               nfc_dev_err(&pdev->dev, "nci_allocate_device failed");
+               nfc_err(&pdev->dev, "nci_allocate_device failed\n");
                rc = -ENOMEM;
                goto exit;
        }
@@ -548,7 +531,7 @@ static int nfcwilink_probe(struct platform_device *pdev)
 
        rc = nci_register_device(drv->ndev);
        if (rc < 0) {
-               nfc_dev_err(&pdev->dev, "nci_register_device failed %d", rc);
+               nfc_err(&pdev->dev, "nci_register_device failed %d\n", rc);
                goto free_dev_exit;
        }
 
@@ -568,8 +551,6 @@ static int nfcwilink_remove(struct platform_device *pdev)
        struct nfcwilink *drv = dev_get_drvdata(&pdev->dev);
        struct nci_dev *ndev;
 
-       nfc_dev_dbg(&pdev->dev, "remove entry");
-
        if (!drv)
                return -EFAULT;
 
@@ -578,8 +559,6 @@ static int nfcwilink_remove(struct platform_device *pdev)
        nci_unregister_device(ndev);
        nci_free_device(ndev);
 
-       dev_set_drvdata(&pdev->dev, NULL);
-
        return 0;
 }
 
index 5df730be88a388ba28f705fde82fdce04b1ff88e..2daf04c073383c53ae4374d8a9613546fe13d09b 100644 (file)
@@ -150,6 +150,7 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
 #define PN533_CMD_TG_INIT_AS_TARGET 0x8c
 #define PN533_CMD_TG_GET_DATA 0x86
 #define PN533_CMD_TG_SET_DATA 0x8e
+#define PN533_CMD_TG_SET_META_DATA 0x94
 #define PN533_CMD_UNDEF 0xff
 
 #define PN533_CMD_RESPONSE(cmd) (cmd + 1)
@@ -373,6 +374,8 @@ struct pn533 {
        struct delayed_work poll_work;
        struct work_struct mi_rx_work;
        struct work_struct mi_tx_work;
+       struct work_struct mi_tm_rx_work;
+       struct work_struct mi_tm_tx_work;
        struct work_struct tg_work;
        struct work_struct rf_work;
 
@@ -387,6 +390,7 @@ struct pn533 {
        struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1];
        u8 poll_mod_count;
        u8 poll_mod_curr;
+       u8 poll_dep;
        u32 poll_protocols;
        u32 listen_protocols;
        struct timer_list listen_timer;
@@ -722,32 +726,32 @@ static void pn533_recv_response(struct urb *urb)
                break; /* success */
        case -ECONNRESET:
        case -ENOENT:
-               nfc_dev_dbg(&dev->interface->dev,
-                           "The urb has been canceled (status %d)",
-                           urb->status);
+               dev_dbg(&dev->interface->dev,
+                       "The urb has been canceled (status %d)\n",
+                       urb->status);
                goto sched_wq;
        case -ESHUTDOWN:
        default:
-               nfc_dev_err(&dev->interface->dev,
-                           "Urb failure (status %d)", urb->status);
+               nfc_err(&dev->interface->dev,
+                       "Urb failure (status %d)\n", urb->status);
                goto sched_wq;
        }
 
        in_frame = dev->in_urb->transfer_buffer;
 
-       nfc_dev_dbg(&dev->interface->dev, "Received a frame.");
+       dev_dbg(&dev->interface->dev, "Received a frame\n");
        print_hex_dump_debug("PN533 RX: ", DUMP_PREFIX_NONE, 16, 1, in_frame,
                             dev->ops->rx_frame_size(in_frame), false);
 
        if (!dev->ops->rx_is_frame_valid(in_frame, dev)) {
-               nfc_dev_err(&dev->interface->dev, "Received an invalid frame");
+               nfc_err(&dev->interface->dev, "Received an invalid frame\n");
                cmd->status = -EIO;
                goto sched_wq;
        }
 
        if (!pn533_rx_frame_is_cmd_response(dev, in_frame)) {
-               nfc_dev_err(&dev->interface->dev,
-                           "It it not the response to the last command");
+               nfc_err(&dev->interface->dev,
+                       "It it not the response to the last command\n");
                cmd->status = -EIO;
                goto sched_wq;
        }
@@ -777,29 +781,29 @@ static void pn533_recv_ack(struct urb *urb)
                break; /* success */
        case -ECONNRESET:
        case -ENOENT:
-               nfc_dev_dbg(&dev->interface->dev,
-                           "The urb has been stopped (status %d)",
-                           urb->status);
+               dev_dbg(&dev->interface->dev,
+                       "The urb has been stopped (status %d)\n",
+                       urb->status);
                goto sched_wq;
        case -ESHUTDOWN:
        default:
-               nfc_dev_err(&dev->interface->dev,
-                           "Urb failure (status %d)", urb->status);
+               nfc_err(&dev->interface->dev,
+                       "Urb failure (status %d)\n", urb->status);
                goto sched_wq;
        }
 
        in_frame = dev->in_urb->transfer_buffer;
 
        if (!pn533_std_rx_frame_is_ack(in_frame)) {
-               nfc_dev_err(&dev->interface->dev, "Received an invalid ack");
+               nfc_err(&dev->interface->dev, "Received an invalid ack\n");
                cmd->status = -EIO;
                goto sched_wq;
        }
 
        rc = pn533_submit_urb_for_response(dev, GFP_ATOMIC);
        if (rc) {
-               nfc_dev_err(&dev->interface->dev,
-                           "usb_submit_urb failed with result %d", rc);
+               nfc_err(&dev->interface->dev,
+                       "usb_submit_urb failed with result %d\n", rc);
                cmd->status = rc;
                goto sched_wq;
        }
@@ -823,8 +827,6 @@ static int pn533_send_ack(struct pn533 *dev, gfp_t flags)
        /* spec 7.1.1.3:  Preamble, SoPC (2), ACK Code (2), Postamble */
        int rc;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
-
        dev->out_urb->transfer_buffer = ack;
        dev->out_urb->transfer_buffer_length = sizeof(ack);
        rc = usb_submit_urb(dev->out_urb, flags);
@@ -927,7 +929,7 @@ static int __pn533_send_async(struct pn533 *dev, u8 cmd_code,
        struct pn533_cmd *cmd;
        int rc = 0;
 
-       nfc_dev_dbg(&dev->interface->dev, "Sending command 0x%x", cmd_code);
+       dev_dbg(&dev->interface->dev, "Sending command 0x%x\n", cmd_code);
 
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
        if (!cmd)
@@ -954,8 +956,8 @@ static int __pn533_send_async(struct pn533 *dev, u8 cmd_code,
                goto unlock;
        }
 
-       nfc_dev_dbg(&dev->interface->dev, "%s Queueing command 0x%x", __func__,
-                   cmd_code);
+       dev_dbg(&dev->interface->dev, "%s Queueing command 0x%x\n",
+               __func__, cmd_code);
 
        INIT_LIST_HEAD(&cmd->queue);
        list_add_tail(&cmd->queue, &dev->cmd_queue);
@@ -1168,14 +1170,14 @@ static void pn533_send_complete(struct urb *urb)
                break; /* success */
        case -ECONNRESET:
        case -ENOENT:
-               nfc_dev_dbg(&dev->interface->dev,
-                           "The urb has been stopped (status %d)",
-                           urb->status);
+               dev_dbg(&dev->interface->dev,
+                       "The urb has been stopped (status %d)\n",
+                       urb->status);
                break;
        case -ESHUTDOWN:
        default:
-               nfc_dev_err(&dev->interface->dev,
-                           "Urb failure (status %d)", urb->status);
+               nfc_err(&dev->interface->dev, "Urb failure (status %d)\n",
+                       urb->status);
        }
 }
 
@@ -1452,8 +1454,8 @@ static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata,
        struct nfc_target nfc_tgt;
        int rc;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s - modulation=%d", __func__,
-                   dev->poll_mod_curr);
+       dev_dbg(&dev->interface->dev, "%s: modulation=%d\n",
+               __func__, dev->poll_mod_curr);
 
        if (tg != 1)
                return -EPROTO;
@@ -1475,8 +1477,8 @@ static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata,
                rc = pn533_target_found_type_b(&nfc_tgt, tgdata, tgdata_len);
                break;
        default:
-               nfc_dev_err(&dev->interface->dev,
-                           "Unknown current poll modulation");
+               nfc_err(&dev->interface->dev,
+                       "Unknown current poll modulation\n");
                return -EPROTO;
        }
 
@@ -1484,14 +1486,14 @@ static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata,
                return rc;
 
        if (!(nfc_tgt.supported_protocols & dev->poll_protocols)) {
-               nfc_dev_dbg(&dev->interface->dev,
-                           "The Tg found doesn't have the desired protocol");
+               dev_dbg(&dev->interface->dev,
+                       "The Tg found doesn't have the desired protocol\n");
                return -EAGAIN;
        }
 
-       nfc_dev_dbg(&dev->interface->dev,
-                   "Target found - supported protocols: 0x%x",
-                   nfc_tgt.supported_protocols);
+       dev_dbg(&dev->interface->dev,
+               "Target found - supported protocols: 0x%x\n",
+               nfc_tgt.supported_protocols);
 
        dev->tgt_available_prots = nfc_tgt.supported_protocols;
 
@@ -1548,7 +1550,8 @@ static int pn533_start_poll_complete(struct pn533 *dev, struct sk_buff *resp)
        u8 nbtg, tg, *tgdata;
        int rc, tgdata_len;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       /* Toggle the DEP polling */
+       dev->poll_dep = 1;
 
        nbtg = resp->data[0];
        tg = resp->data[1];
@@ -1624,37 +1627,130 @@ static struct sk_buff *pn533_alloc_poll_tg_frame(struct pn533 *dev)
 
 #define PN533_CMD_DATAEXCH_HEAD_LEN 1
 #define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
+static void pn533_wq_tm_mi_recv(struct work_struct *work);
+static struct sk_buff *pn533_build_response(struct pn533 *dev);
+
 static int pn533_tm_get_data_complete(struct pn533 *dev, void *arg,
                                      struct sk_buff *resp)
 {
-       u8 status;
+       struct sk_buff *skb;
+       u8 status, ret, mi;
+       int rc;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
-       if (IS_ERR(resp))
+       if (IS_ERR(resp)) {
+               skb_queue_purge(&dev->resp_q);
                return PTR_ERR(resp);
+       }
 
        status = resp->data[0];
+
+       ret = status & PN533_CMD_RET_MASK;
+       mi = status & PN533_CMD_MI_MASK;
+
        skb_pull(resp, sizeof(status));
 
-       if (status != 0) {
-               nfc_tm_deactivated(dev->nfc_dev);
-               dev->tgt_mode = 0;
-               dev_kfree_skb(resp);
-               return 0;
+       if (ret != PN533_CMD_RET_SUCCESS) {
+               rc = -EIO;
+               goto error;
        }
 
-       return nfc_tm_data_received(dev->nfc_dev, resp);
+       skb_queue_tail(&dev->resp_q, resp);
+
+       if (mi) {
+               queue_work(dev->wq, &dev->mi_tm_rx_work);
+               return -EINPROGRESS;
+       }
+
+       skb = pn533_build_response(dev);
+       if (!skb) {
+               rc = -EIO;
+               goto error;
+       }
+
+       return nfc_tm_data_received(dev->nfc_dev, skb);
+
+error:
+       nfc_tm_deactivated(dev->nfc_dev);
+       dev->tgt_mode = 0;
+       skb_queue_purge(&dev->resp_q);
+       dev_kfree_skb(resp);
+
+       return rc;
+}
+
+static void pn533_wq_tm_mi_recv(struct work_struct *work)
+{
+       struct pn533 *dev = container_of(work, struct pn533, mi_tm_rx_work);
+       struct sk_buff *skb;
+       int rc;
+
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
+
+       skb = pn533_alloc_skb(dev, 0);
+       if (!skb)
+               return;
+
+       rc = pn533_send_cmd_direct_async(dev,
+                                       PN533_CMD_TG_GET_DATA,
+                                       skb,
+                                       pn533_tm_get_data_complete,
+                                       NULL);
+
+       if (rc < 0)
+               dev_kfree_skb(skb);
+
+       return;
+}
+
+static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
+                                 struct sk_buff *resp);
+static void pn533_wq_tm_mi_send(struct work_struct *work)
+{
+       struct pn533 *dev = container_of(work, struct pn533, mi_tm_tx_work);
+       struct sk_buff *skb;
+       int rc;
+
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
+
+       /* Grab the first skb in the queue */
+       skb = skb_dequeue(&dev->fragment_skb);
+       if (skb == NULL) {      /* No more data */
+               /* Reset the queue for future use */
+               skb_queue_head_init(&dev->fragment_skb);
+               goto error;
+       }
+
+       /* last entry - remove MI bit */
+       if (skb_queue_len(&dev->fragment_skb) == 0) {
+               rc = pn533_send_cmd_direct_async(dev, PN533_CMD_TG_SET_DATA,
+                                       skb, pn533_tm_send_complete, NULL);
+       } else
+               rc = pn533_send_cmd_direct_async(dev,
+                                       PN533_CMD_TG_SET_META_DATA,
+                                       skb, pn533_tm_send_complete, NULL);
+
+       if (rc == 0) /* success */
+               return;
+
+       dev_err(&dev->interface->dev,
+               "Error %d when trying to perform set meta data_exchange", rc);
+
+       dev_kfree_skb(skb);
+
+error:
+       pn533_send_ack(dev, GFP_KERNEL);
+       queue_work(dev->wq, &dev->cmd_work);
 }
 
 static void pn533_wq_tg_get_data(struct work_struct *work)
 {
        struct pn533 *dev = container_of(work, struct pn533, tg_work);
-
        struct sk_buff *skb;
        int rc;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
        skb = pn533_alloc_skb(dev, 0);
        if (!skb)
@@ -1676,7 +1772,7 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
        size_t gb_len;
        int rc;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
        if (resp->len < ATR_REQ_GB_OFFSET + 1)
                return -EINVAL;
@@ -1684,8 +1780,8 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
        mode = resp->data[0];
        cmd = &resp->data[1];
 
-       nfc_dev_dbg(&dev->interface->dev, "Target mode 0x%x len %d\n",
-                   mode, resp->len);
+       dev_dbg(&dev->interface->dev, "Target mode 0x%x len %d\n",
+               mode, resp->len);
 
        if ((mode & PN533_INIT_TARGET_RESP_FRAME_MASK) ==
            PN533_INIT_TARGET_RESP_ACTIVE)
@@ -1700,8 +1796,8 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
        rc = nfc_tm_activated(dev->nfc_dev, NFC_PROTO_NFC_DEP_MASK,
                              comm_mode, gb, gb_len);
        if (rc < 0) {
-               nfc_dev_err(&dev->interface->dev,
-                           "Error when signaling target activation");
+               nfc_err(&dev->interface->dev,
+                       "Error when signaling target activation\n");
                return rc;
        }
 
@@ -1715,7 +1811,7 @@ static void pn533_listen_mode_timer(unsigned long data)
 {
        struct pn533 *dev = (struct pn533 *)data;
 
-       nfc_dev_dbg(&dev->interface->dev, "Listen mode timeout");
+       dev_dbg(&dev->interface->dev, "Listen mode timeout\n");
 
        dev->cancel_listen = 1;
 
@@ -1730,13 +1826,12 @@ static int pn533_rf_complete(struct pn533 *dev, void *arg,
 {
        int rc = 0;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
        if (IS_ERR(resp)) {
                rc = PTR_ERR(resp);
 
-               nfc_dev_err(&dev->interface->dev, "%s RF setting error %d",
-                           __func__, rc);
+               nfc_err(&dev->interface->dev, "RF setting error %d", rc);
 
                return rc;
        }
@@ -1754,7 +1849,7 @@ static void pn533_wq_rf(struct work_struct *work)
        struct sk_buff *skb;
        int rc;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
        skb = pn533_alloc_skb(dev, 2);
        if (!skb)
@@ -1767,25 +1862,136 @@ static void pn533_wq_rf(struct work_struct *work)
                                  pn533_rf_complete, NULL);
        if (rc < 0) {
                dev_kfree_skb(skb);
-               nfc_dev_err(&dev->interface->dev, "RF setting error %d", rc);
+               nfc_err(&dev->interface->dev, "RF setting error %d\n", rc);
        }
 
        return;
 }
 
+static int pn533_poll_dep_complete(struct pn533 *dev, void *arg,
+                                  struct sk_buff *resp)
+{
+       struct pn533_cmd_jump_dep_response *rsp;
+       struct nfc_target nfc_target;
+       u8 target_gt_len;
+       int rc;
+
+       if (IS_ERR(resp))
+               return PTR_ERR(resp);
+
+       rsp = (struct pn533_cmd_jump_dep_response *)resp->data;
+
+       rc = rsp->status & PN533_CMD_RET_MASK;
+       if (rc != PN533_CMD_RET_SUCCESS) {
+               /* Not target found, turn radio off */
+               queue_work(dev->wq, &dev->rf_work);
+
+               dev_kfree_skb(resp);
+               return 0;
+       }
+
+       dev_dbg(&dev->interface->dev, "Creating new target");
+
+       nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+       nfc_target.nfcid1_len = 10;
+       memcpy(nfc_target.nfcid1, rsp->nfcid3t, nfc_target.nfcid1_len);
+       rc = nfc_targets_found(dev->nfc_dev, &nfc_target, 1);
+       if (rc)
+               goto error;
+
+       dev->tgt_available_prots = 0;
+       dev->tgt_active_prot = NFC_PROTO_NFC_DEP;
+
+       /* ATR_RES general bytes are located at offset 17 */
+       target_gt_len = resp->len - 17;
+       rc = nfc_set_remote_general_bytes(dev->nfc_dev,
+                                         rsp->gt, target_gt_len);
+       if (!rc) {
+               rc = nfc_dep_link_is_up(dev->nfc_dev,
+                                       dev->nfc_dev->targets[0].idx,
+                                       0, NFC_RF_INITIATOR);
+
+               if (!rc)
+                       pn533_poll_reset_mod_list(dev);
+       }
+error:
+       dev_kfree_skb(resp);
+       return rc;
+}
+
+#define PASSIVE_DATA_LEN 5
+static int pn533_poll_dep(struct nfc_dev *nfc_dev)
+{
+       struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+       struct sk_buff *skb;
+       int rc, skb_len;
+       u8 *next, nfcid3[NFC_NFCID3_MAXSIZE];
+       u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
+
+       dev_dbg(&dev->interface->dev, "%s", __func__);
+
+       if (!dev->gb) {
+               dev->gb = nfc_get_local_general_bytes(nfc_dev, &dev->gb_len);
+
+               if (!dev->gb || !dev->gb_len) {
+                       dev->poll_dep = 0;
+                       queue_work(dev->wq, &dev->rf_work);
+               }
+       }
+
+       skb_len = 3 + dev->gb_len; /* ActPass + BR + Next */
+       skb_len += PASSIVE_DATA_LEN;
+
+       /* NFCID3 */
+       skb_len += NFC_NFCID3_MAXSIZE;
+       nfcid3[0] = 0x1;
+       nfcid3[1] = 0xfe;
+       get_random_bytes(nfcid3 + 2, 6);
+
+       skb = pn533_alloc_skb(dev, skb_len);
+       if (!skb)
+               return -ENOMEM;
+
+       *skb_put(skb, 1) = 0x01;  /* Active */
+       *skb_put(skb, 1) = 0x02;  /* 424 kbps */
+
+       next = skb_put(skb, 1);  /* Next */
+       *next = 0;
+
+       /* Copy passive data */
+       memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data, PASSIVE_DATA_LEN);
+       *next |= 1;
+
+       /* Copy NFCID3 (which is NFCID2 from SENSF_RES) */
+       memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), nfcid3,
+              NFC_NFCID3_MAXSIZE);
+       *next |= 2;
+
+       memcpy(skb_put(skb, dev->gb_len), dev->gb, dev->gb_len);
+       *next |= 4; /* We have some Gi */
+
+       rc = pn533_send_cmd_async(dev, PN533_CMD_IN_JUMP_FOR_DEP, skb,
+                                 pn533_poll_dep_complete, NULL);
+
+       if (rc < 0)
+               dev_kfree_skb(skb);
+
+       return rc;
+}
+
 static int pn533_poll_complete(struct pn533 *dev, void *arg,
                               struct sk_buff *resp)
 {
        struct pn533_poll_modulations *cur_mod;
        int rc;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
        if (IS_ERR(resp)) {
                rc = PTR_ERR(resp);
 
-               nfc_dev_err(&dev->interface->dev, "%s  Poll complete error %d",
-                           __func__, rc);
+               nfc_err(&dev->interface->dev, "%s  Poll complete error %d\n",
+                       __func__, rc);
 
                if (rc == -ENOENT) {
                        if (dev->poll_mod_count != 0)
@@ -1793,8 +1999,8 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg,
                        else
                                goto stop_poll;
                } else if (rc < 0) {
-                       nfc_dev_err(&dev->interface->dev,
-                                   "Error %d when running poll", rc);
+                       nfc_err(&dev->interface->dev,
+                               "Error %d when running poll\n", rc);
                        goto stop_poll;
                }
        }
@@ -1813,7 +2019,7 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg,
                goto done;
 
        if (!dev->poll_mod_count) {
-               nfc_dev_dbg(&dev->interface->dev, "Polling has been stopped.");
+               dev_dbg(&dev->interface->dev, "Polling has been stopped\n");
                goto done;
        }
 
@@ -1826,7 +2032,7 @@ done:
        return rc;
 
 stop_poll:
-       nfc_dev_err(&dev->interface->dev, "Polling operation has been stopped");
+       nfc_err(&dev->interface->dev, "Polling operation has been stopped\n");
 
        pn533_poll_reset_mod_list(dev);
        dev->poll_protocols = 0;
@@ -1856,8 +2062,13 @@ static int pn533_send_poll_frame(struct pn533 *dev)
 
        mod = dev->poll_mod_active[dev->poll_mod_curr];
 
-       nfc_dev_dbg(&dev->interface->dev, "%s mod len %d\n",
-                   __func__, mod->len);
+       dev_dbg(&dev->interface->dev, "%s mod len %d\n",
+               __func__, mod->len);
+
+       if (dev->poll_dep)  {
+               dev->poll_dep = 0;
+               return pn533_poll_dep(dev->nfc_dev);
+       }
 
        if (mod->len == 0) {  /* Listen mode */
                cmd_code = PN533_CMD_TG_INIT_AS_TARGET;
@@ -1868,7 +2079,7 @@ static int pn533_send_poll_frame(struct pn533 *dev)
        }
 
        if (!skb) {
-               nfc_dev_err(&dev->interface->dev, "Failed to allocate skb.");
+               nfc_err(&dev->interface->dev, "Failed to allocate skb\n");
                return -ENOMEM;
        }
 
@@ -1876,7 +2087,7 @@ static int pn533_send_poll_frame(struct pn533 *dev)
                                  NULL);
        if (rc < 0) {
                dev_kfree_skb(skb);
-               nfc_dev_err(&dev->interface->dev, "Polling loop error %d", rc);
+               nfc_err(&dev->interface->dev, "Polling loop error %d\n", rc);
        }
 
        return rc;
@@ -1890,9 +2101,9 @@ static void pn533_wq_poll(struct work_struct *work)
 
        cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
 
-       nfc_dev_dbg(&dev->interface->dev,
-                   "%s cancel_listen %d modulation len %d",
-                   __func__, dev->cancel_listen, cur_mod->len);
+       dev_dbg(&dev->interface->dev,
+               "%s cancel_listen %d modulation len %d\n",
+               __func__, dev->cancel_listen, cur_mod->len);
 
        if (dev->cancel_listen == 1) {
                dev->cancel_listen = 0;
@@ -1913,21 +2124,23 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
                            u32 im_protocols, u32 tm_protocols)
 {
        struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+       struct pn533_poll_modulations *cur_mod;
        u8 rand_mod;
+       int rc;
 
-       nfc_dev_dbg(&dev->interface->dev,
-                   "%s: im protocols 0x%x tm protocols 0x%x",
-                   __func__, im_protocols, tm_protocols);
+       dev_dbg(&dev->interface->dev,
+               "%s: im protocols 0x%x tm protocols 0x%x\n",
+               __func__, im_protocols, tm_protocols);
 
        if (dev->tgt_active_prot) {
-               nfc_dev_err(&dev->interface->dev,
-                           "Cannot poll with a target already activated");
+               nfc_err(&dev->interface->dev,
+                       "Cannot poll with a target already activated\n");
                return -EBUSY;
        }
 
        if (dev->tgt_mode) {
-               nfc_dev_err(&dev->interface->dev,
-                           "Cannot poll while already being activated");
+               nfc_err(&dev->interface->dev,
+                       "Cannot poll while already being activated\n");
                return -EBUSY;
        }
 
@@ -1946,20 +2159,26 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
        rand_mod %= dev->poll_mod_count;
        dev->poll_mod_curr = rand_mod;
 
-       return pn533_send_poll_frame(dev);
+       cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
+
+       rc = pn533_send_poll_frame(dev);
+
+       /* Start listen timer */
+       if (!rc && cur_mod->len == 0 && dev->poll_mod_count > 1)
+               mod_timer(&dev->listen_timer, jiffies + PN533_LISTEN_TIME * HZ);
+
+       return rc;
 }
 
 static void pn533_stop_poll(struct nfc_dev *nfc_dev)
 {
        struct pn533 *dev = nfc_get_drvdata(nfc_dev);
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
-
        del_timer(&dev->listen_timer);
 
        if (!dev->poll_mod_count) {
-               nfc_dev_dbg(&dev->interface->dev,
-                           "Polling operation was not running");
+               dev_dbg(&dev->interface->dev,
+                       "Polling operation was not running\n");
                return;
        }
 
@@ -1973,11 +2192,10 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
        struct pn533_cmd_activate_response *rsp;
        u16 gt_len;
        int rc;
-
        struct sk_buff *skb;
        struct sk_buff *resp;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
        skb = pn533_alloc_skb(dev, sizeof(u8) * 2); /*TG + Next*/
        if (!skb)
@@ -1993,8 +2211,8 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
        rsp = (struct pn533_cmd_activate_response *)resp->data;
        rc = rsp->status & PN533_CMD_RET_MASK;
        if (rc != PN533_CMD_RET_SUCCESS) {
-               nfc_dev_err(&dev->interface->dev,
-                           "Target activation failed (error 0x%x)", rc);
+               nfc_err(&dev->interface->dev,
+                       "Target activation failed (error 0x%x)\n", rc);
                dev_kfree_skb(resp);
                return -EIO;
        }
@@ -2013,39 +2231,38 @@ static int pn533_activate_target(struct nfc_dev *nfc_dev,
        struct pn533 *dev = nfc_get_drvdata(nfc_dev);
        int rc;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s - protocol=%u", __func__,
-                   protocol);
+       dev_dbg(&dev->interface->dev, "%s: protocol=%u\n", __func__, protocol);
 
        if (dev->poll_mod_count) {
-               nfc_dev_err(&dev->interface->dev,
-                           "Cannot activate while polling");
+               nfc_err(&dev->interface->dev,
+                       "Cannot activate while polling\n");
                return -EBUSY;
        }
 
        if (dev->tgt_active_prot) {
-               nfc_dev_err(&dev->interface->dev,
-                           "There is already an active target");
+               nfc_err(&dev->interface->dev,
+                       "There is already an active target\n");
                return -EBUSY;
        }
 
        if (!dev->tgt_available_prots) {
-               nfc_dev_err(&dev->interface->dev,
-                           "There is no available target to activate");
+               nfc_err(&dev->interface->dev,
+                       "There is no available target to activate\n");
                return -EINVAL;
        }
 
        if (!(dev->tgt_available_prots & (1 << protocol))) {
-               nfc_dev_err(&dev->interface->dev,
-                           "Target doesn't support requested proto %u",
-                           protocol);
+               nfc_err(&dev->interface->dev,
+                       "Target doesn't support requested proto %u\n",
+                       protocol);
                return -EINVAL;
        }
 
        if (protocol == NFC_PROTO_NFC_DEP) {
                rc = pn533_activate_target_nfcdep(dev);
                if (rc) {
-                       nfc_dev_err(&dev->interface->dev,
-                                   "Activating target with DEP failed %d", rc);
+                       nfc_err(&dev->interface->dev,
+                               "Activating target with DEP failed %d\n", rc);
                        return rc;
                }
        }
@@ -2060,16 +2277,14 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
                                    struct nfc_target *target)
 {
        struct pn533 *dev = nfc_get_drvdata(nfc_dev);
-
        struct sk_buff *skb;
        struct sk_buff *resp;
-
        int rc;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
        if (!dev->tgt_active_prot) {
-               nfc_dev_err(&dev->interface->dev, "There is no active target");
+               nfc_err(&dev->interface->dev, "There is no active target\n");
                return;
        }
 
@@ -2088,8 +2303,8 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
 
        rc = resp->data[0] & PN533_CMD_RET_MASK;
        if (rc != PN533_CMD_RET_SUCCESS)
-               nfc_dev_err(&dev->interface->dev,
-                           "Error 0x%x when releasing the target", rc);
+               nfc_err(&dev->interface->dev,
+                       "Error 0x%x when releasing the target\n", rc);
 
        dev_kfree_skb(resp);
        return;
@@ -2111,8 +2326,8 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
 
        if (dev->tgt_available_prots &&
            !(dev->tgt_available_prots & (1 << NFC_PROTO_NFC_DEP))) {
-               nfc_dev_err(&dev->interface->dev,
-                           "The target does not support DEP");
+               nfc_err(&dev->interface->dev,
+                       "The target does not support DEP\n");
                rc =  -EINVAL;
                goto error;
        }
@@ -2121,15 +2336,15 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
 
        rc = rsp->status & PN533_CMD_RET_MASK;
        if (rc != PN533_CMD_RET_SUCCESS) {
-               nfc_dev_err(&dev->interface->dev,
-                           "Bringing DEP link up failed (error 0x%x)", rc);
+               nfc_err(&dev->interface->dev,
+                       "Bringing DEP link up failed (error 0x%x)\n", rc);
                goto error;
        }
 
        if (!dev->tgt_available_prots) {
                struct nfc_target nfc_target;
 
-               nfc_dev_dbg(&dev->interface->dev, "Creating new target");
+               dev_dbg(&dev->interface->dev, "Creating new target\n");
 
                nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
                nfc_target.nfcid1_len = 10;
@@ -2158,7 +2373,6 @@ error:
 }
 
 static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf);
-#define PASSIVE_DATA_LEN 5
 static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
                             u8 comm_mode, u8 *gb, size_t gb_len)
 {
@@ -2166,20 +2380,19 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
        struct sk_buff *skb;
        int rc, skb_len;
        u8 *next, *arg, nfcid3[NFC_NFCID3_MAXSIZE];
-
        u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
        if (dev->poll_mod_count) {
-               nfc_dev_err(&dev->interface->dev,
-                           "Cannot bring the DEP link up while polling");
+               nfc_err(&dev->interface->dev,
+                       "Cannot bring the DEP link up while polling\n");
                return -EBUSY;
        }
 
        if (dev->tgt_active_prot) {
-               nfc_dev_err(&dev->interface->dev,
-                           "There is already an active target");
+               nfc_err(&dev->interface->dev,
+                       "There is already an active target\n");
                return -EBUSY;
        }
 
@@ -2249,7 +2462,7 @@ static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
 {
        struct pn533 *dev = nfc_get_drvdata(nfc_dev);
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
        pn533_poll_reset_mod_list(dev);
 
@@ -2274,7 +2487,7 @@ static struct sk_buff *pn533_build_response(struct pn533 *dev)
        struct sk_buff *skb, *tmp, *t;
        unsigned int skb_len = 0, tmp_len = 0;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
        if (skb_queue_empty(&dev->resp_q))
                return NULL;
@@ -2287,8 +2500,8 @@ static struct sk_buff *pn533_build_response(struct pn533 *dev)
        skb_queue_walk_safe(&dev->resp_q, tmp, t)
                skb_len += tmp->len;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s total length %d\n",
-                   __func__, skb_len);
+       dev_dbg(&dev->interface->dev, "%s total length %d\n",
+               __func__, skb_len);
 
        skb = alloc_skb(skb_len, GFP_KERNEL);
        if (skb == NULL)
@@ -2315,7 +2528,7 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
        int rc = 0;
        u8 status, ret, mi;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
        if (IS_ERR(resp)) {
                rc = PTR_ERR(resp);
@@ -2329,8 +2542,8 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
        skb_pull(resp, sizeof(status));
 
        if (ret != PN533_CMD_RET_SUCCESS) {
-               nfc_dev_err(&dev->interface->dev,
-                           "Exchanging data failed (error 0x%x)", ret);
+               nfc_err(&dev->interface->dev,
+                       "Exchanging data failed (error 0x%x)\n", ret);
                rc = -EIO;
                goto error;
        }
@@ -2388,14 +2601,17 @@ static int pn533_fill_fragment_skbs(struct pn533 *dev, struct sk_buff *skb)
                        break;
                }
 
-               /* Reserve the TG/MI byte */
-               skb_reserve(frag, 1);
+               if (!dev->tgt_mode) {
+                       /* Reserve the TG/MI byte */
+                       skb_reserve(frag, 1);
 
-               /* MI + TG */
-               if (frag_size  == PN533_CMD_DATAFRAME_MAXLEN)
-                       *skb_push(frag, sizeof(u8)) = (PN533_CMD_MI_MASK | 1);
-               else
-                       *skb_push(frag, sizeof(u8)) =  1; /* TG */
+                       /* MI + TG */
+                       if (frag_size  == PN533_CMD_DATAFRAME_MAXLEN)
+                               *skb_push(frag, sizeof(u8)) =
+                                                       (PN533_CMD_MI_MASK | 1);
+                       else
+                               *skb_push(frag, sizeof(u8)) =  1; /* TG */
+               }
 
                memcpy(skb_put(frag, frag_size), skb->data, frag_size);
 
@@ -2420,11 +2636,11 @@ static int pn533_transceive(struct nfc_dev *nfc_dev,
        struct pn533_data_exchange_arg *arg = NULL;
        int rc;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
        if (!dev->tgt_active_prot) {
-               nfc_dev_err(&dev->interface->dev,
-                           "Can't exchange data if there is no active target");
+               nfc_err(&dev->interface->dev,
+                       "Can't exchange data if there is no active target\n");
                rc = -EINVAL;
                goto error;
        }
@@ -2487,13 +2703,18 @@ static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
 {
        u8 status;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
        if (IS_ERR(resp))
                return PTR_ERR(resp);
 
        status = resp->data[0];
 
+       /* Prepare for the next round */
+       if (skb_queue_len(&dev->fragment_skb) > 0) {
+               queue_work(dev->wq, &dev->mi_tm_tx_work);
+               return -EINPROGRESS;
+       }
        dev_kfree_skb(resp);
 
        if (status != 0) {
@@ -2514,19 +2735,34 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
        struct pn533 *dev = nfc_get_drvdata(nfc_dev);
        int rc;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
+       /* let's split in multiple chunks if size's too big */
        if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
-               nfc_dev_err(&dev->interface->dev,
-                           "Data length greater than the max allowed: %d",
-                           PN533_CMD_DATAEXCH_DATA_MAXLEN);
-               return -ENOSYS;
+               rc = pn533_fill_fragment_skbs(dev, skb);
+               if (rc <= 0)
+                       goto error;
+
+               /* get the first skb */
+               skb = skb_dequeue(&dev->fragment_skb);
+               if (!skb) {
+                       rc = -EIO;
+                       goto error;
+               }
+
+               rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_META_DATA, skb,
+                                               pn533_tm_send_complete, NULL);
+       } else {
+               /* Send th skb */
+               rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_DATA, skb,
+                                               pn533_tm_send_complete, NULL);
        }
 
-       rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_DATA, skb,
-                                  pn533_tm_send_complete, NULL);
-       if (rc < 0)
+error:
+       if (rc < 0) {
                dev_kfree_skb(skb);
+               skb_queue_purge(&dev->fragment_skb);
+       }
 
        return rc;
 }
@@ -2534,11 +2770,10 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
 static void pn533_wq_mi_recv(struct work_struct *work)
 {
        struct pn533 *dev = container_of(work, struct pn533, mi_rx_work);
-
        struct sk_buff *skb;
        int rc;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
        skb = pn533_alloc_skb(dev, PN533_CMD_DATAEXCH_HEAD_LEN);
        if (!skb)
@@ -2570,8 +2805,8 @@ static void pn533_wq_mi_recv(struct work_struct *work)
        if (rc == 0) /* success */
                return;
 
-       nfc_dev_err(&dev->interface->dev,
-                   "Error %d when trying to perform data_exchange", rc);
+       nfc_err(&dev->interface->dev,
+               "Error %d when trying to perform data_exchange\n", rc);
 
        dev_kfree_skb(skb);
        kfree(dev->cmd_complete_mi_arg);
@@ -2587,7 +2822,7 @@ static void pn533_wq_mi_send(struct work_struct *work)
        struct sk_buff *skb;
        int rc;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
        /* Grab the first skb in the queue */
        skb = skb_dequeue(&dev->fragment_skb);
@@ -2625,8 +2860,8 @@ static void pn533_wq_mi_send(struct work_struct *work)
        if (rc == 0) /* success */
                return;
 
-       nfc_dev_err(&dev->interface->dev,
-                   "Error %d when trying to perform data_exchange", rc);
+       nfc_err(&dev->interface->dev,
+               "Error %d when trying to perform data_exchange\n", rc);
 
        dev_kfree_skb(skb);
        kfree(dev->cmd_complete_dep_arg);
@@ -2641,10 +2876,9 @@ static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
 {
        struct sk_buff *skb;
        struct sk_buff *resp;
-
        int skb_len;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
        skb_len = sizeof(cfgitem) + cfgdata_len; /* cfgitem + cfgdata */
 
@@ -2691,7 +2925,7 @@ static int pn533_pasori_fw_reset(struct pn533 *dev)
        struct sk_buff *skb;
        struct sk_buff *resp;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
        skb = pn533_alloc_skb(dev, sizeof(u8));
        if (!skb)
@@ -2717,7 +2951,7 @@ static void pn533_acr122_poweron_rdr_resp(struct urb *urb)
 {
        struct pn533_acr122_poweron_rdr_arg *arg = urb->context;
 
-       nfc_dev_dbg(&urb->dev->dev, "%s", __func__);
+       dev_dbg(&urb->dev->dev, "%s\n", __func__);
 
        print_hex_dump_debug("ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1,
                       urb->transfer_buffer, urb->transfer_buffer_length,
@@ -2737,7 +2971,7 @@ static int pn533_acr122_poweron_rdr(struct pn533 *dev)
        void *cntx;
        struct pn533_acr122_poweron_rdr_arg arg;
 
-       nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+       dev_dbg(&dev->interface->dev, "%s\n", __func__);
 
        init_completion(&arg.done);
        cntx = dev->in_urb->context;  /* backup context */
@@ -2755,16 +2989,15 @@ static int pn533_acr122_poweron_rdr(struct pn533 *dev)
 
        rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
        if (rc) {
-               nfc_dev_err(&dev->interface->dev,
-                           "Reader power on cmd error %d", rc);
+               nfc_err(&dev->interface->dev,
+                       "Reader power on cmd error %d\n", rc);
                return rc;
        }
 
        rc =  usb_submit_urb(dev->in_urb, GFP_KERNEL);
        if (rc) {
-               nfc_dev_err(&dev->interface->dev,
-                           "Can't submit for reader power on cmd response %d",
-                           rc);
+               nfc_err(&dev->interface->dev,
+                       "Can't submit reader poweron cmd response %d\n", rc);
                return rc;
        }
 
@@ -2785,20 +3018,19 @@ static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf)
        rc = pn533_set_configuration(dev, PN533_CFGITEM_RF_FIELD,
                                     (u8 *)&rf_field, 1);
        if (rc) {
-               nfc_dev_err(&dev->interface->dev,
-                           "Error on setting RF field");
+               nfc_err(&dev->interface->dev, "Error on setting RF field\n");
                return rc;
        }
 
        return rc;
 }
 
-int pn533_dev_up(struct nfc_dev *nfc_dev)
+static int pn533_dev_up(struct nfc_dev *nfc_dev)
 {
        return pn533_rf_field(nfc_dev, 1);
 }
 
-int pn533_dev_down(struct nfc_dev *nfc_dev)
+static int pn533_dev_down(struct nfc_dev *nfc_dev)
 {
        return pn533_rf_field(nfc_dev, 0);
 }
@@ -2839,16 +3071,16 @@ static int pn533_setup(struct pn533 *dev)
                break;
 
        default:
-               nfc_dev_err(&dev->interface->dev, "Unknown device type %d\n",
-                           dev->device_type);
+               nfc_err(&dev->interface->dev, "Unknown device type %d\n",
+                       dev->device_type);
                return -EINVAL;
        }
 
        rc = pn533_set_configuration(dev, PN533_CFGITEM_MAX_RETRIES,
                                     (u8 *)&max_retries, sizeof(max_retries));
        if (rc) {
-               nfc_dev_err(&dev->interface->dev,
-                           "Error on setting MAX_RETRIES config");
+               nfc_err(&dev->interface->dev,
+                       "Error on setting MAX_RETRIES config\n");
                return rc;
        }
 
@@ -2856,8 +3088,7 @@ static int pn533_setup(struct pn533 *dev)
        rc = pn533_set_configuration(dev, PN533_CFGITEM_TIMING,
                                     (u8 *)&timing, sizeof(timing));
        if (rc) {
-               nfc_dev_err(&dev->interface->dev,
-                           "Error on setting RF timings");
+               nfc_err(&dev->interface->dev, "Error on setting RF timings\n");
                return rc;
        }
 
@@ -2871,8 +3102,8 @@ static int pn533_setup(struct pn533 *dev)
                rc = pn533_set_configuration(dev, PN533_CFGITEM_PASORI,
                                             pasori_cfg, 3);
                if (rc) {
-                       nfc_dev_err(&dev->interface->dev,
-                                   "Error while settings PASORI config");
+                       nfc_err(&dev->interface->dev,
+                               "Error while settings PASORI config\n");
                        return rc;
                }
 
@@ -2917,8 +3148,8 @@ static int pn533_probe(struct usb_interface *interface,
        }
 
        if (!in_endpoint || !out_endpoint) {
-               nfc_dev_err(&interface->dev,
-                           "Could not find bulk-in or bulk-out endpoint");
+               nfc_err(&interface->dev,
+                       "Could not find bulk-in or bulk-out endpoint\n");
                rc = -ENODEV;
                goto error;
        }
@@ -2941,6 +3172,8 @@ static int pn533_probe(struct usb_interface *interface,
        INIT_WORK(&dev->mi_rx_work, pn533_wq_mi_recv);
        INIT_WORK(&dev->mi_tx_work, pn533_wq_mi_send);
        INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data);
+       INIT_WORK(&dev->mi_tm_rx_work, pn533_wq_tm_mi_recv);
+       INIT_WORK(&dev->mi_tm_tx_work, pn533_wq_tm_mi_send);
        INIT_DELAYED_WORK(&dev->poll_work, pn533_wq_poll);
        INIT_WORK(&dev->rf_work, pn533_wq_rf);
        dev->wq = alloc_ordered_workqueue("pn533", 0);
@@ -2978,16 +3211,15 @@ static int pn533_probe(struct usb_interface *interface,
 
                rc = pn533_acr122_poweron_rdr(dev);
                if (rc < 0) {
-                       nfc_dev_err(&dev->interface->dev,
-                                   "Couldn't poweron the reader (error %d)",
-                                   rc);
+                       nfc_err(&dev->interface->dev,
+                               "Couldn't poweron the reader (error %d)\n", rc);
                        goto destroy_wq;
                }
                break;
 
        default:
-               nfc_dev_err(&dev->interface->dev, "Unknown device type %d\n",
-                           dev->device_type);
+               nfc_err(&dev->interface->dev, "Unknown device type %d\n",
+                       dev->device_type);
                rc = -EINVAL;
                goto destroy_wq;
        }
@@ -2997,9 +3229,9 @@ static int pn533_probe(struct usb_interface *interface,
        if (rc < 0)
                goto destroy_wq;
 
-       nfc_dev_info(&dev->interface->dev,
-                    "NXP PN5%02X firmware ver %d.%d now attached",
-                    fw_ver.ic, fw_ver.ver, fw_ver.rev);
+       nfc_info(&dev->interface->dev,
+                "NXP PN5%02X firmware ver %d.%d now attached\n",
+                fw_ver.ic, fw_ver.ver, fw_ver.rev);
 
 
        dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
@@ -3070,7 +3302,7 @@ static void pn533_disconnect(struct usb_interface *interface)
        usb_free_urb(dev->out_urb);
        kfree(dev);
 
-       nfc_dev_info(&interface->dev, "NXP PN533 NFC device disconnected");
+       nfc_info(&interface->dev, "NXP PN533 NFC device disconnected\n");
 }
 
 static struct usb_driver pn533_driver = {
index 01e27d4bdd0d7abcfdc4d91c7f6963974f3b4e48..b158ee1c2ac69fc8ec0ff3fc667df10e5eb3f70e 100644 (file)
@@ -18,6 +18,8 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/crc-ccitt.h>
 #include <linux/module.h>
 #include <linux/i2c.h>
@@ -151,8 +153,7 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
        char rset_cmd[] = { 0x05, 0xF9, 0x04, 0x00, 0xC3, 0xE5 };
        int count = sizeof(rset_cmd);
 
-       pr_info(DRIVER_DESC ": %s\n", __func__);
-       dev_info(&phy->i2c_dev->dev, "Detecting nfc_en polarity\n");
+       nfc_info(&phy->i2c_dev->dev, "Detecting nfc_en polarity\n");
 
        /* Disable fw download */
        gpio_set_value(phy->gpio_fw, 0);
@@ -173,7 +174,7 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
                        dev_dbg(&phy->i2c_dev->dev, "Sending reset cmd\n");
                        ret = i2c_master_send(phy->i2c_dev, rset_cmd, count);
                        if (ret == count) {
-                               dev_info(&phy->i2c_dev->dev,
+                               nfc_info(&phy->i2c_dev->dev,
                                         "nfc_en polarity : active %s\n",
                                         (polarity == 0 ? "low" : "high"));
                                goto out;
@@ -181,7 +182,7 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
                }
        }
 
-       dev_err(&phy->i2c_dev->dev,
+       nfc_err(&phy->i2c_dev->dev,
                "Could not detect nfc_en polarity, fallback to active high\n");
 
 out:
@@ -201,7 +202,7 @@ static int pn544_hci_i2c_enable(void *phy_id)
 {
        struct pn544_i2c_phy *phy = phy_id;
 
-       pr_info(DRIVER_DESC ": %s\n", __func__);
+       pr_info("%s\n", __func__);
 
        pn544_hci_i2c_enable_mode(phy, PN544_HCI_MODE);
 
@@ -214,8 +215,6 @@ static void pn544_hci_i2c_disable(void *phy_id)
 {
        struct pn544_i2c_phy *phy = phy_id;
 
-       pr_info(DRIVER_DESC ": %s\n", __func__);
-
        gpio_set_value(phy->gpio_fw, 0);
        gpio_set_value(phy->gpio_en, !phy->en_polarity);
        usleep_range(10000, 15000);
@@ -298,11 +297,9 @@ static int check_crc(u8 *buf, int buflen)
        crc = ~crc;
 
        if (buf[len - 2] != (crc & 0xff) || buf[len - 1] != (crc >> 8)) {
-               pr_err(PN544_HCI_I2C_DRIVER_NAME
-                      ": CRC error 0x%x != 0x%x 0x%x\n",
+               pr_err("CRC error 0x%x != 0x%x 0x%x\n",
                       crc, buf[len - 1], buf[len - 2]);
-
-               pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__);
+               pr_info("%s: BAD CRC\n", __func__);
                print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
                               16, 2, buf, buflen, false);
                return -EPERM;
@@ -328,13 +325,13 @@ static int pn544_hci_i2c_read(struct pn544_i2c_phy *phy, struct sk_buff **skb)
 
        r = i2c_master_recv(client, &len, 1);
        if (r != 1) {
-               dev_err(&client->dev, "cannot read len byte\n");
+               nfc_err(&client->dev, "cannot read len byte\n");
                return -EREMOTEIO;
        }
 
        if ((len < (PN544_HCI_I2C_LLC_MIN_SIZE - 1)) ||
            (len > (PN544_HCI_I2C_LLC_MAX_SIZE - 1))) {
-               dev_err(&client->dev, "invalid len byte\n");
+               nfc_err(&client->dev, "invalid len byte\n");
                r = -EBADMSG;
                goto flush;
        }
@@ -386,7 +383,7 @@ static int pn544_hci_i2c_fw_read_status(struct pn544_i2c_phy *phy)
 
        r = i2c_master_recv(client, (char *) &response, sizeof(response));
        if (r != sizeof(response)) {
-               dev_err(&client->dev, "cannot read fw status\n");
+               nfc_err(&client->dev, "cannot read fw status\n");
                return -EIO;
        }
 
@@ -478,8 +475,7 @@ static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name)
 {
        struct pn544_i2c_phy *phy = phy_id;
 
-       pr_info(DRIVER_DESC ": Starting Firmware Download (%s)\n",
-               firmware_name);
+       pr_info("Starting Firmware Download (%s)\n", firmware_name);
 
        strcpy(phy->firmware_name, firmware_name);
 
@@ -493,7 +489,7 @@ static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name)
 static void pn544_hci_i2c_fw_work_complete(struct pn544_i2c_phy *phy,
                                           int result)
 {
-       pr_info(DRIVER_DESC ": Firmware Download Complete, result=%d\n", result);
+       pr_info("Firmware Download Complete, result=%d\n", result);
 
        pn544_hci_i2c_disable(phy);
 
@@ -694,14 +690,14 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
        dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
 
        if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
-               dev_err(&client->dev, "Need I2C_FUNC_I2C\n");
+               nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
                return -ENODEV;
        }
 
        phy = devm_kzalloc(&client->dev, sizeof(struct pn544_i2c_phy),
                           GFP_KERNEL);
        if (!phy) {
-               dev_err(&client->dev,
+               nfc_err(&client->dev,
                        "Cannot allocate memory for pn544 i2c phy.\n");
                return -ENOMEM;
        }
@@ -714,18 +710,18 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
 
        pdata = client->dev.platform_data;
        if (pdata == NULL) {
-               dev_err(&client->dev, "No platform data\n");
+               nfc_err(&client->dev, "No platform data\n");
                return -EINVAL;
        }
 
        if (pdata->request_resources == NULL) {
-               dev_err(&client->dev, "request_resources() missing\n");
+               nfc_err(&client->dev, "request_resources() missing\n");
                return -EINVAL;
        }
 
        r = pdata->request_resources(client);
        if (r) {
-               dev_err(&client->dev, "Cannot get platform resources\n");
+               nfc_err(&client->dev, "Cannot get platform resources\n");
                return r;
        }
 
@@ -739,7 +735,7 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
                                 IRQF_TRIGGER_RISING | IRQF_ONESHOT,
                                 PN544_HCI_I2C_DRIVER_NAME, phy);
        if (r < 0) {
-               dev_err(&client->dev, "Unable to register IRQ handler\n");
+               nfc_err(&client->dev, "Unable to register IRQ handler\n");
                goto err_rti;
        }
 
index 078e62feba1715e9c4cc3bfd246e6c947082735b..74cfa0a88b9e7c0ae6cdedd66d77deedd15e9872 100644 (file)
@@ -18,6 +18,8 @@
  * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/module.h>
@@ -41,6 +43,7 @@ enum pn544_state {
 
 /* Proprietary commands */
 #define PN544_WRITE            0x3f
+#define PN544_TEST_SWP         0x21
 
 /* Proprietary gates, events, commands and registers */
 
@@ -81,14 +84,17 @@ enum pn544_state {
 #define PN544_PL_NFCT_DEACTIVATED              0x09
 
 #define PN544_SWP_MGMT_GATE                    0xA0
+#define PN544_SWP_DEFAULT_MODE                 0x01
 
 #define PN544_NFC_WI_MGMT_GATE                 0xA1
+#define PN544_NFC_ESE_DEFAULT_MODE             0x01
 
 #define PN544_HCI_EVT_SND_DATA                 0x01
 #define PN544_HCI_EVT_ACTIVATED                        0x02
 #define PN544_HCI_EVT_DEACTIVATED              0x03
 #define PN544_HCI_EVT_RCV_DATA                 0x04
 #define PN544_HCI_EVT_CONTINUE_MI              0x05
+#define PN544_HCI_EVT_SWITCH_MODE              0x03
 
 #define PN544_HCI_CMD_ATTREQUEST               0x12
 #define PN544_HCI_CMD_CONTINUE_ACTIVATION      0x13
@@ -187,13 +193,6 @@ static int pn544_hci_ready(struct nfc_hci_dev *hdev)
 
                {{0x9e, 0xb4}, 0x00},
 
-               {{0x9e, 0xd9}, 0xff},
-               {{0x9e, 0xda}, 0xff},
-               {{0x9e, 0xdb}, 0x23},
-               {{0x9e, 0xdc}, 0x21},
-               {{0x9e, 0xdd}, 0x22},
-               {{0x9e, 0xde}, 0x24},
-
                {{0x9c, 0x01}, 0x08},
 
                {{0x9e, 0xaa}, 0x01},
@@ -394,7 +393,7 @@ static int pn544_hci_start_poll(struct nfc_hci_dev *hdev,
        if ((im_protocols | tm_protocols) & NFC_PROTO_NFC_DEP_MASK) {
                hdev->gb = nfc_get_local_general_bytes(hdev->ndev,
                                                        &hdev->gb_len);
-               pr_debug("generate local bytes %p", hdev->gb);
+               pr_debug("generate local bytes %p\n", hdev->gb);
                if (hdev->gb == NULL || hdev->gb_len == 0) {
                        im_protocols &= ~NFC_PROTO_NFC_DEP_MASK;
                        tm_protocols &= ~NFC_PROTO_NFC_DEP_MASK;
@@ -696,7 +695,7 @@ static int pn544_hci_tm_send(struct nfc_hci_dev *hdev, struct sk_buff *skb)
 static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
                                   struct nfc_target *target)
 {
-       pr_debug("supported protocol %d", target->supported_protocols);
+       pr_debug("supported protocol %d\b", target->supported_protocols);
        if (target->supported_protocols & (NFC_PROTO_ISO14443_MASK |
                                        NFC_PROTO_ISO14443_B_MASK)) {
                return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
@@ -733,7 +732,7 @@ static int pn544_hci_event_received(struct nfc_hci_dev *hdev, u8 gate, u8 event,
        struct sk_buff *rgb_skb = NULL;
        int r;
 
-       pr_debug("hci event %d", event);
+       pr_debug("hci event %d\n", event);
        switch (event) {
        case PN544_HCI_EVT_ACTIVATED:
                if (gate == PN544_RF_READER_NFCIP1_INITIATOR_GATE) {
@@ -764,7 +763,7 @@ static int pn544_hci_event_received(struct nfc_hci_dev *hdev, u8 gate, u8 event,
                }
 
                if (skb->data[0] != 0) {
-                       pr_debug("data0 %d", skb->data[0]);
+                       pr_debug("data0 %d\n", skb->data[0]);
                        r = -EPROTO;
                        goto exit;
                }
@@ -792,6 +791,108 @@ static int pn544_hci_fw_download(struct nfc_hci_dev *hdev,
        return info->fw_download(info->phy_id, firmware_name);
 }
 
+static int pn544_hci_discover_se(struct nfc_hci_dev *hdev)
+{
+       u32 se_idx = 0;
+       u8 ese_mode = 0x01; /* Default mode */
+       struct sk_buff *res_skb;
+       int r;
+
+       r = nfc_hci_send_cmd(hdev, PN544_SYS_MGMT_GATE, PN544_TEST_SWP,
+                            NULL, 0, &res_skb);
+
+       if (r == 0) {
+               if (res_skb->len == 2 && res_skb->data[0] == 0x00)
+                       nfc_add_se(hdev->ndev, se_idx++, NFC_SE_UICC);
+
+               kfree_skb(res_skb);
+       }
+
+       r = nfc_hci_send_event(hdev, PN544_NFC_WI_MGMT_GATE,
+                               PN544_HCI_EVT_SWITCH_MODE,
+                               &ese_mode, 1);
+       if (r == 0)
+               nfc_add_se(hdev->ndev, se_idx++, NFC_SE_EMBEDDED);
+
+       return !se_idx;
+}
+
+#define PN544_SE_MODE_OFF      0x00
+#define PN544_SE_MODE_ON       0x01
+static int pn544_hci_enable_se(struct nfc_hci_dev *hdev, u32 se_idx)
+{
+       struct nfc_se *se;
+       u8 enable = PN544_SE_MODE_ON;
+       static struct uicc_gatelist {
+               u8 head;
+               u8 adr[2];
+               u8 value;
+       } uicc_gatelist[] = {
+               {0x00, {0x9e, 0xd9}, 0x23},
+               {0x00, {0x9e, 0xda}, 0x21},
+               {0x00, {0x9e, 0xdb}, 0x22},
+               {0x00, {0x9e, 0xdc}, 0x24},
+       };
+       struct uicc_gatelist *p = uicc_gatelist;
+       int count = ARRAY_SIZE(uicc_gatelist);
+       struct sk_buff *res_skb;
+       int r;
+
+       se = nfc_find_se(hdev->ndev, se_idx);
+
+       switch (se->type) {
+       case NFC_SE_UICC:
+               while (count--) {
+                       r = nfc_hci_send_cmd(hdev, PN544_SYS_MGMT_GATE,
+                                       PN544_WRITE, (u8 *)p, 4, &res_skb);
+                       if (r < 0)
+                               return r;
+
+                       if (res_skb->len != 1) {
+                               kfree_skb(res_skb);
+                               return -EPROTO;
+                       }
+
+                       if (res_skb->data[0] != p->value) {
+                               kfree_skb(res_skb);
+                               return -EIO;
+                       }
+
+                       kfree_skb(res_skb);
+
+                       p++;
+               }
+
+               return nfc_hci_set_param(hdev, PN544_SWP_MGMT_GATE,
+                             PN544_SWP_DEFAULT_MODE, &enable, 1);
+       case NFC_SE_EMBEDDED:
+               return nfc_hci_set_param(hdev, PN544_NFC_WI_MGMT_GATE,
+                             PN544_NFC_ESE_DEFAULT_MODE, &enable, 1);
+
+       default:
+               return -EINVAL;
+       }
+}
+
+static int pn544_hci_disable_se(struct nfc_hci_dev *hdev, u32 se_idx)
+{
+       struct nfc_se *se;
+       u8 disable = PN544_SE_MODE_OFF;
+
+       se = nfc_find_se(hdev->ndev, se_idx);
+
+       switch (se->type) {
+       case NFC_SE_UICC:
+               return nfc_hci_set_param(hdev, PN544_SWP_MGMT_GATE,
+                             PN544_SWP_DEFAULT_MODE, &disable, 1);
+       case NFC_SE_EMBEDDED:
+               return nfc_hci_set_param(hdev, PN544_NFC_WI_MGMT_GATE,
+                             PN544_NFC_ESE_DEFAULT_MODE, &disable, 1);
+       default:
+               return -EINVAL;
+       }
+}
+
 static struct nfc_hci_ops pn544_hci_ops = {
        .open = pn544_hci_open,
        .close = pn544_hci_close,
@@ -807,6 +908,9 @@ static struct nfc_hci_ops pn544_hci_ops = {
        .check_presence = pn544_hci_check_presence,
        .event_received = pn544_hci_event_received,
        .fw_download = pn544_hci_fw_download,
+       .discover_se = pn544_hci_discover_se,
+       .enable_se = pn544_hci_enable_se,
+       .disable_se = pn544_hci_disable_se,
 };
 
 int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
@@ -820,7 +924,6 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
 
        info = kzalloc(sizeof(struct pn544_hci_info), GFP_KERNEL);
        if (!info) {
-               pr_err("Cannot allocate memory for pn544_hci_info.\n");
                r = -ENOMEM;
                goto err_info_alloc;
        }
@@ -853,7 +956,7 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
                                             phy_headroom + PN544_CMDS_HEADROOM,
                                             phy_tailroom, phy_payload);
        if (!info->hdev) {
-               pr_err("Cannot allocate nfc hdev.\n");
+               pr_err("Cannot allocate nfc hdev\n");
                r = -ENOMEM;
                goto err_alloc_hdev;
        }
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
new file mode 100644 (file)
index 0000000..8a0571e
--- /dev/null
@@ -0,0 +1,1529 @@
+/*
+ * Sony NFC Port-100 Series driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * Partly based/Inspired by Stephen Tiedemann's nfcpy
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <net/nfc/digital.h>
+
+#define VERSION "0.1"
+
+#define SONY_VENDOR_ID    0x054c
+#define RCS380_PRODUCT_ID 0x06c1
+
+#define PORT100_PROTOCOLS (NFC_PROTO_JEWEL_MASK    | \
+                          NFC_PROTO_MIFARE_MASK   | \
+                          NFC_PROTO_FELICA_MASK   | \
+                          NFC_PROTO_NFC_DEP_MASK)
+
+#define PORT100_CAPABILITIES (NFC_DIGITAL_DRV_CAPS_IN_CRC | \
+                             NFC_DIGITAL_DRV_CAPS_TG_CRC)
+
+/* Standard port100 frame definitions */
+#define PORT100_FRAME_HEADER_LEN (sizeof(struct port100_frame) \
+                                 + 2) /* data[0] CC, data[1] SCC */
+#define PORT100_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/
+
+#define PORT100_COMM_RF_HEAD_MAX_LEN (sizeof(struct port100_tg_comm_rf_cmd))
+
+/*
+ * Max extended frame payload len, excluding CC and SCC
+ * which are already in PORT100_FRAME_HEADER_LEN.
+ */
+#define PORT100_FRAME_MAX_PAYLOAD_LEN 1001
+
+#define PORT100_FRAME_ACK_SIZE 6 /* Preamble (1), SoPC (2), ACK Code (2),
+                                   Postamble (1) */
+static u8 ack_frame[PORT100_FRAME_ACK_SIZE] = {
+       0x00, 0x00, 0xff, 0x00, 0xff, 0x00
+};
+
+#define PORT100_FRAME_CHECKSUM(f) (f->data[le16_to_cpu(f->datalen)])
+#define PORT100_FRAME_POSTAMBLE(f) (f->data[le16_to_cpu(f->datalen) + 1])
+
+/* start of frame */
+#define PORT100_FRAME_SOF      0x00FF
+#define PORT100_FRAME_EXT      0xFFFF
+#define PORT100_FRAME_ACK      0x00FF
+
+/* Port-100 command: in or out */
+#define PORT100_FRAME_DIRECTION(f) (f->data[0]) /* CC */
+#define PORT100_FRAME_DIR_OUT 0xD6
+#define PORT100_FRAME_DIR_IN  0xD7
+
+/* Port-100 sub-command */
+#define PORT100_FRAME_CMD(f) (f->data[1]) /* SCC */
+
+#define PORT100_CMD_GET_FIRMWARE_VERSION 0x20
+#define PORT100_CMD_GET_COMMAND_TYPE     0x28
+#define PORT100_CMD_SET_COMMAND_TYPE     0x2A
+
+#define PORT100_CMD_IN_SET_RF       0x00
+#define PORT100_CMD_IN_SET_PROTOCOL 0x02
+#define PORT100_CMD_IN_COMM_RF      0x04
+
+#define PORT100_CMD_TG_SET_RF       0x40
+#define PORT100_CMD_TG_SET_PROTOCOL 0x42
+#define PORT100_CMD_TG_SET_RF_OFF   0x46
+#define PORT100_CMD_TG_COMM_RF      0x48
+
+#define PORT100_CMD_SWITCH_RF       0x06
+
+#define PORT100_CMD_RESPONSE(cmd) (cmd + 1)
+
+#define PORT100_CMD_TYPE_IS_SUPPORTED(mask, cmd_type) \
+       ((mask) & (0x01 << (cmd_type)))
+#define PORT100_CMD_TYPE_0     0
+#define PORT100_CMD_TYPE_1     1
+
+#define PORT100_CMD_STATUS_OK      0x00
+#define PORT100_CMD_STATUS_TIMEOUT 0x80
+
+#define PORT100_MDAA_TGT_HAS_BEEN_ACTIVATED_MASK 0x01
+#define PORT100_MDAA_TGT_WAS_ACTIVATED_MASK      0x02
+
+struct port100;
+
+typedef void (*port100_send_async_complete_t)(struct port100 *dev, void *arg,
+                                             struct sk_buff *resp);
+
+/**
+ * Setting sets structure for in_set_rf command
+ *
+ * @in_*_set_number: Represent the entry indexes in the port-100 RF Base Table.
+ *              This table contains multiple RF setting sets required for RF
+ *              communication.
+ *
+ * @in_*_comm_type: Theses fields set the communication type to be used.
+ */
+struct port100_in_rf_setting {
+       u8 in_send_set_number;
+       u8 in_send_comm_type;
+       u8 in_recv_set_number;
+       u8 in_recv_comm_type;
+} __packed;
+
+#define PORT100_COMM_TYPE_IN_212F 0x01
+#define PORT100_COMM_TYPE_IN_424F 0x02
+#define PORT100_COMM_TYPE_IN_106A 0x03
+
+static const struct port100_in_rf_setting in_rf_settings[] = {
+       [NFC_DIGITAL_RF_TECH_212F] = {
+               .in_send_set_number = 1,
+               .in_send_comm_type  = PORT100_COMM_TYPE_IN_212F,
+               .in_recv_set_number = 15,
+               .in_recv_comm_type  = PORT100_COMM_TYPE_IN_212F,
+       },
+       [NFC_DIGITAL_RF_TECH_424F] = {
+               .in_send_set_number = 1,
+               .in_send_comm_type  = PORT100_COMM_TYPE_IN_424F,
+               .in_recv_set_number = 15,
+               .in_recv_comm_type  = PORT100_COMM_TYPE_IN_424F,
+       },
+       [NFC_DIGITAL_RF_TECH_106A] = {
+               .in_send_set_number = 2,
+               .in_send_comm_type  = PORT100_COMM_TYPE_IN_106A,
+               .in_recv_set_number = 15,
+               .in_recv_comm_type  = PORT100_COMM_TYPE_IN_106A,
+       },
+};
+
+/**
+ * Setting sets structure for tg_set_rf command
+ *
+ * @tg_set_number: Represents the entry index in the port-100 RF Base Table.
+ *                 This table contains multiple RF setting sets required for RF
+ *                 communication. this field is used for both send and receive
+ *                 settings.
+ *
+ * @tg_comm_type: Sets the communication type to be used to send and receive
+ *                data.
+ */
+struct port100_tg_rf_setting {
+       u8 tg_set_number;
+       u8 tg_comm_type;
+} __packed;
+
+#define PORT100_COMM_TYPE_TG_106A 0x0B
+#define PORT100_COMM_TYPE_TG_212F 0x0C
+#define PORT100_COMM_TYPE_TG_424F 0x0D
+
+static const struct port100_tg_rf_setting tg_rf_settings[] = {
+       [NFC_DIGITAL_RF_TECH_106A] = {
+               .tg_set_number = 8,
+               .tg_comm_type = PORT100_COMM_TYPE_TG_106A,
+       },
+       [NFC_DIGITAL_RF_TECH_212F] = {
+               .tg_set_number = 8,
+               .tg_comm_type = PORT100_COMM_TYPE_TG_212F,
+       },
+       [NFC_DIGITAL_RF_TECH_424F] = {
+               .tg_set_number = 8,
+               .tg_comm_type = PORT100_COMM_TYPE_TG_424F,
+       },
+};
+
+#define PORT100_IN_PROT_INITIAL_GUARD_TIME      0x00
+#define PORT100_IN_PROT_ADD_CRC                 0x01
+#define PORT100_IN_PROT_CHECK_CRC               0x02
+#define PORT100_IN_PROT_MULTI_CARD              0x03
+#define PORT100_IN_PROT_ADD_PARITY              0x04
+#define PORT100_IN_PROT_CHECK_PARITY            0x05
+#define PORT100_IN_PROT_BITWISE_AC_RECV_MODE    0x06
+#define PORT100_IN_PROT_VALID_BIT_NUMBER        0x07
+#define PORT100_IN_PROT_CRYPTO1                 0x08
+#define PORT100_IN_PROT_ADD_SOF                 0x09
+#define PORT100_IN_PROT_CHECK_SOF               0x0A
+#define PORT100_IN_PROT_ADD_EOF                 0x0B
+#define PORT100_IN_PROT_CHECK_EOF               0x0C
+#define PORT100_IN_PROT_DEAF_TIME               0x0E
+#define PORT100_IN_PROT_CRM                     0x0F
+#define PORT100_IN_PROT_CRM_MIN_LEN             0x10
+#define PORT100_IN_PROT_T1_TAG_FRAME            0x11
+#define PORT100_IN_PROT_RFCA                    0x12
+#define PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR 0x13
+#define PORT100_IN_PROT_END                     0x14
+
+#define PORT100_IN_MAX_NUM_PROTOCOLS            19
+
+#define PORT100_TG_PROT_TU           0x00
+#define PORT100_TG_PROT_RF_OFF       0x01
+#define PORT100_TG_PROT_CRM          0x02
+#define PORT100_TG_PROT_END          0x03
+
+#define PORT100_TG_MAX_NUM_PROTOCOLS 3
+
+struct port100_protocol {
+       u8 number;
+       u8 value;
+} __packed;
+
+static struct port100_protocol
+in_protocols[][PORT100_IN_MAX_NUM_PROTOCOLS + 1] = {
+       [NFC_DIGITAL_FRAMING_NFCA_SHORT] = {
+               { PORT100_IN_PROT_INITIAL_GUARD_TIME,      6 },
+               { PORT100_IN_PROT_ADD_CRC,                 0 },
+               { PORT100_IN_PROT_CHECK_CRC,               0 },
+               { PORT100_IN_PROT_MULTI_CARD,              0 },
+               { PORT100_IN_PROT_ADD_PARITY,              0 },
+               { PORT100_IN_PROT_CHECK_PARITY,            1 },
+               { PORT100_IN_PROT_BITWISE_AC_RECV_MODE,    0 },
+               { PORT100_IN_PROT_VALID_BIT_NUMBER,        7 },
+               { PORT100_IN_PROT_CRYPTO1,                 0 },
+               { PORT100_IN_PROT_ADD_SOF,                 0 },
+               { PORT100_IN_PROT_CHECK_SOF,               0 },
+               { PORT100_IN_PROT_ADD_EOF,                 0 },
+               { PORT100_IN_PROT_CHECK_EOF,               0 },
+               { PORT100_IN_PROT_DEAF_TIME,               4 },
+               { PORT100_IN_PROT_CRM,                     0 },
+               { PORT100_IN_PROT_CRM_MIN_LEN,             0 },
+               { PORT100_IN_PROT_T1_TAG_FRAME,            0 },
+               { PORT100_IN_PROT_RFCA,                    0 },
+               { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 },
+               { PORT100_IN_PROT_END,                     0 },
+       },
+       [NFC_DIGITAL_FRAMING_NFCA_STANDARD] = {
+               { PORT100_IN_PROT_INITIAL_GUARD_TIME,      6 },
+               { PORT100_IN_PROT_ADD_CRC,                 0 },
+               { PORT100_IN_PROT_CHECK_CRC,               0 },
+               { PORT100_IN_PROT_MULTI_CARD,              0 },
+               { PORT100_IN_PROT_ADD_PARITY,              1 },
+               { PORT100_IN_PROT_CHECK_PARITY,            1 },
+               { PORT100_IN_PROT_BITWISE_AC_RECV_MODE,    0 },
+               { PORT100_IN_PROT_VALID_BIT_NUMBER,        8 },
+               { PORT100_IN_PROT_CRYPTO1,                 0 },
+               { PORT100_IN_PROT_ADD_SOF,                 0 },
+               { PORT100_IN_PROT_CHECK_SOF,               0 },
+               { PORT100_IN_PROT_ADD_EOF,                 0 },
+               { PORT100_IN_PROT_CHECK_EOF,               0 },
+               { PORT100_IN_PROT_DEAF_TIME,               4 },
+               { PORT100_IN_PROT_CRM,                     0 },
+               { PORT100_IN_PROT_CRM_MIN_LEN,             0 },
+               { PORT100_IN_PROT_T1_TAG_FRAME,            0 },
+               { PORT100_IN_PROT_RFCA,                    0 },
+               { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 },
+               { PORT100_IN_PROT_END,                     0 },
+       },
+       [NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A] = {
+               { PORT100_IN_PROT_INITIAL_GUARD_TIME,      6 },
+               { PORT100_IN_PROT_ADD_CRC,                 1 },
+               { PORT100_IN_PROT_CHECK_CRC,               1 },
+               { PORT100_IN_PROT_MULTI_CARD,              0 },
+               { PORT100_IN_PROT_ADD_PARITY,              1 },
+               { PORT100_IN_PROT_CHECK_PARITY,            1 },
+               { PORT100_IN_PROT_BITWISE_AC_RECV_MODE,    0 },
+               { PORT100_IN_PROT_VALID_BIT_NUMBER,        8 },
+               { PORT100_IN_PROT_CRYPTO1,                 0 },
+               { PORT100_IN_PROT_ADD_SOF,                 0 },
+               { PORT100_IN_PROT_CHECK_SOF,               0 },
+               { PORT100_IN_PROT_ADD_EOF,                 0 },
+               { PORT100_IN_PROT_CHECK_EOF,               0 },
+               { PORT100_IN_PROT_DEAF_TIME,               4 },
+               { PORT100_IN_PROT_CRM,                     0 },
+               { PORT100_IN_PROT_CRM_MIN_LEN,             0 },
+               { PORT100_IN_PROT_T1_TAG_FRAME,            0 },
+               { PORT100_IN_PROT_RFCA,                    0 },
+               { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 },
+               { PORT100_IN_PROT_END,                     0 },
+       },
+       [NFC_DIGITAL_FRAMING_NFCA_T1T] = {
+               /* nfc_digital_framing_nfca_short */
+               { PORT100_IN_PROT_ADD_CRC,          2 },
+               { PORT100_IN_PROT_CHECK_CRC,        2 },
+               { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 },
+               { PORT100_IN_PROT_T1_TAG_FRAME,     2 },
+               { PORT100_IN_PROT_END,              0 },
+       },
+       [NFC_DIGITAL_FRAMING_NFCA_T2T] = {
+               /* nfc_digital_framing_nfca_standard */
+               { PORT100_IN_PROT_ADD_CRC,   1 },
+               { PORT100_IN_PROT_CHECK_CRC, 0 },
+               { PORT100_IN_PROT_END,       0 },
+       },
+       [NFC_DIGITAL_FRAMING_NFCA_NFC_DEP] = {
+               /* nfc_digital_framing_nfca_standard */
+               { PORT100_IN_PROT_END, 0 },
+       },
+       [NFC_DIGITAL_FRAMING_NFCF] = {
+               { PORT100_IN_PROT_INITIAL_GUARD_TIME,     18 },
+               { PORT100_IN_PROT_ADD_CRC,                 1 },
+               { PORT100_IN_PROT_CHECK_CRC,               1 },
+               { PORT100_IN_PROT_MULTI_CARD,              0 },
+               { PORT100_IN_PROT_ADD_PARITY,              0 },
+               { PORT100_IN_PROT_CHECK_PARITY,            0 },
+               { PORT100_IN_PROT_BITWISE_AC_RECV_MODE,    0 },
+               { PORT100_IN_PROT_VALID_BIT_NUMBER,        8 },
+               { PORT100_IN_PROT_CRYPTO1,                 0 },
+               { PORT100_IN_PROT_ADD_SOF,                 0 },
+               { PORT100_IN_PROT_CHECK_SOF,               0 },
+               { PORT100_IN_PROT_ADD_EOF,                 0 },
+               { PORT100_IN_PROT_CHECK_EOF,               0 },
+               { PORT100_IN_PROT_DEAF_TIME,               4 },
+               { PORT100_IN_PROT_CRM,                     0 },
+               { PORT100_IN_PROT_CRM_MIN_LEN,             0 },
+               { PORT100_IN_PROT_T1_TAG_FRAME,            0 },
+               { PORT100_IN_PROT_RFCA,                    0 },
+               { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 },
+               { PORT100_IN_PROT_END,                     0 },
+       },
+       [NFC_DIGITAL_FRAMING_NFCF_T3T] = {
+               /* nfc_digital_framing_nfcf */
+               { PORT100_IN_PROT_END, 0 },
+       },
+       [NFC_DIGITAL_FRAMING_NFCF_NFC_DEP] = {
+               /* nfc_digital_framing_nfcf */
+               { PORT100_IN_PROT_END, 0 },
+       },
+       [NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED] = {
+               { PORT100_IN_PROT_END, 0 },
+       },
+};
+
+static struct port100_protocol
+tg_protocols[][PORT100_TG_MAX_NUM_PROTOCOLS + 1] = {
+       [NFC_DIGITAL_FRAMING_NFCA_SHORT] = {
+               { PORT100_TG_PROT_END, 0 },
+       },
+       [NFC_DIGITAL_FRAMING_NFCA_STANDARD] = {
+               { PORT100_TG_PROT_END, 0 },
+       },
+       [NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A] = {
+               { PORT100_TG_PROT_END, 0 },
+       },
+       [NFC_DIGITAL_FRAMING_NFCA_T1T] = {
+               { PORT100_TG_PROT_END, 0 },
+       },
+       [NFC_DIGITAL_FRAMING_NFCA_T2T] = {
+               { PORT100_TG_PROT_END, 0 },
+       },
+       [NFC_DIGITAL_FRAMING_NFCA_NFC_DEP] = {
+               { PORT100_TG_PROT_TU,     1 },
+               { PORT100_TG_PROT_RF_OFF, 0 },
+               { PORT100_TG_PROT_CRM,    7 },
+               { PORT100_TG_PROT_END,    0 },
+       },
+       [NFC_DIGITAL_FRAMING_NFCF] = {
+               { PORT100_TG_PROT_END, 0 },
+       },
+       [NFC_DIGITAL_FRAMING_NFCF_T3T] = {
+               { PORT100_TG_PROT_END, 0 },
+       },
+       [NFC_DIGITAL_FRAMING_NFCF_NFC_DEP] = {
+               { PORT100_TG_PROT_TU,     1 },
+               { PORT100_TG_PROT_RF_OFF, 0 },
+               { PORT100_TG_PROT_CRM,    7 },
+               { PORT100_TG_PROT_END,    0 },
+       },
+       [NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED] = {
+               { PORT100_TG_PROT_RF_OFF, 1 },
+               { PORT100_TG_PROT_END,    0 },
+       },
+};
+
+struct port100 {
+       struct nfc_digital_dev *nfc_digital_dev;
+
+       int skb_headroom;
+       int skb_tailroom;
+
+       struct usb_device *udev;
+       struct usb_interface *interface;
+
+       struct urb *out_urb;
+       struct urb *in_urb;
+
+       struct work_struct cmd_complete_work;
+
+       u8 cmd_type;
+
+       /* The digital stack serializes commands to be sent. There is no need
+        * for any queuing/locking mechanism at driver level.
+        */
+       struct port100_cmd *cmd;
+};
+
+struct port100_cmd {
+       u8 code;
+       int status;
+       struct sk_buff *req;
+       struct sk_buff *resp;
+       int resp_len;
+       port100_send_async_complete_t  complete_cb;
+       void *complete_cb_context;
+};
+
+struct port100_frame {
+       u8 preamble;
+       __be16 start_frame;
+       __be16 extended_frame;
+       __le16 datalen;
+       u8 datalen_checksum;
+       u8 data[];
+} __packed;
+
+struct port100_ack_frame {
+       u8 preamble;
+       __be16 start_frame;
+       __be16 ack_frame;
+       u8 postambule;
+} __packed;
+
+struct port100_cb_arg {
+       nfc_digital_cmd_complete_t complete_cb;
+       void *complete_arg;
+       u8 mdaa;
+};
+
+struct port100_tg_comm_rf_cmd {
+       __le16 guard_time;
+       __le16 send_timeout;
+       u8 mdaa;
+       u8 nfca_param[6];
+       u8 nfcf_param[18];
+       u8 mf_halted;
+       u8 arae_flag;
+       __le16 recv_timeout;
+       u8 data[];
+} __packed;
+
+struct port100_tg_comm_rf_res {
+       u8 comm_type;
+       u8 ar_status;
+       u8 target_activated;
+       __le32 status;
+       u8 data[];
+} __packed;
+
+/* The rule: value + checksum = 0 */
+static inline u8 port100_checksum(u16 value)
+{
+       return ~(((u8 *)&value)[0] + ((u8 *)&value)[1]) + 1;
+}
+
+/* The rule: sum(data elements) + checksum = 0 */
+static u8 port100_data_checksum(u8 *data, int datalen)
+{
+       u8 sum = 0;
+       int i;
+
+       for (i = 0; i < datalen; i++)
+               sum += data[i];
+
+       return port100_checksum(sum);
+}
+
+static void port100_tx_frame_init(void *_frame, u8 cmd_code)
+{
+       struct port100_frame *frame = _frame;
+
+       frame->preamble = 0;
+       frame->start_frame = cpu_to_be16(PORT100_FRAME_SOF);
+       frame->extended_frame = cpu_to_be16(PORT100_FRAME_EXT);
+       PORT100_FRAME_DIRECTION(frame) = PORT100_FRAME_DIR_OUT;
+       PORT100_FRAME_CMD(frame) = cmd_code;
+       frame->datalen = cpu_to_le16(2);
+}
+
+static void port100_tx_frame_finish(void *_frame)
+{
+       struct port100_frame *frame = _frame;
+
+       frame->datalen_checksum = port100_checksum(le16_to_cpu(frame->datalen));
+
+       PORT100_FRAME_CHECKSUM(frame) =
+               port100_data_checksum(frame->data, le16_to_cpu(frame->datalen));
+
+       PORT100_FRAME_POSTAMBLE(frame) = 0;
+}
+
+static void port100_tx_update_payload_len(void *_frame, int len)
+{
+       struct port100_frame *frame = _frame;
+
+       frame->datalen = cpu_to_le16(le16_to_cpu(frame->datalen) + len);
+}
+
+static bool port100_rx_frame_is_valid(void *_frame)
+{
+       u8 checksum;
+       struct port100_frame *frame = _frame;
+
+       if (frame->start_frame != cpu_to_be16(PORT100_FRAME_SOF) ||
+           frame->extended_frame != cpu_to_be16(PORT100_FRAME_EXT))
+               return false;
+
+       checksum = port100_checksum(le16_to_cpu(frame->datalen));
+       if (checksum != frame->datalen_checksum)
+               return false;
+
+       checksum = port100_data_checksum(frame->data,
+                                        le16_to_cpu(frame->datalen));
+       if (checksum != PORT100_FRAME_CHECKSUM(frame))
+               return false;
+
+       return true;
+}
+
+static bool port100_rx_frame_is_ack(struct port100_ack_frame *frame)
+{
+       return (frame->start_frame == cpu_to_be16(PORT100_FRAME_SOF) &&
+               frame->ack_frame == cpu_to_be16(PORT100_FRAME_ACK));
+}
+
+static inline int port100_rx_frame_size(void *frame)
+{
+       struct port100_frame *f = frame;
+
+       return sizeof(struct port100_frame) + le16_to_cpu(f->datalen) +
+              PORT100_FRAME_TAIL_LEN;
+}
+
+static bool port100_rx_frame_is_cmd_response(struct port100 *dev, void *frame)
+{
+       struct port100_frame *f = frame;
+
+       return (PORT100_FRAME_CMD(f) == PORT100_CMD_RESPONSE(dev->cmd->code));
+}
+
+static void port100_recv_response(struct urb *urb)
+{
+       struct port100 *dev = urb->context;
+       struct port100_cmd *cmd = dev->cmd;
+       u8 *in_frame;
+
+       cmd->status = urb->status;
+
+       switch (urb->status) {
+       case 0:
+               break; /* success */
+       case -ECONNRESET:
+       case -ENOENT:
+               nfc_err(&dev->interface->dev,
+                       "The urb has been canceled (status %d)", urb->status);
+               goto sched_wq;
+       case -ESHUTDOWN:
+       default:
+               nfc_err(&dev->interface->dev, "Urb failure (status %d)",
+                       urb->status);
+               goto sched_wq;
+       }
+
+       in_frame = dev->in_urb->transfer_buffer;
+
+       if (!port100_rx_frame_is_valid(in_frame)) {
+               nfc_err(&dev->interface->dev, "Received an invalid frame");
+               cmd->status = -EIO;
+               goto sched_wq;
+       }
+
+       print_hex_dump_debug("PORT100 RX: ", DUMP_PREFIX_NONE, 16, 1, in_frame,
+                            port100_rx_frame_size(in_frame), false);
+
+       if (!port100_rx_frame_is_cmd_response(dev, in_frame)) {
+               nfc_err(&dev->interface->dev,
+                       "It's not the response to the last command");
+               cmd->status = -EIO;
+               goto sched_wq;
+       }
+
+sched_wq:
+       schedule_work(&dev->cmd_complete_work);
+}
+
+static int port100_submit_urb_for_response(struct port100 *dev, gfp_t flags)
+{
+       dev->in_urb->complete = port100_recv_response;
+
+       return usb_submit_urb(dev->in_urb, flags);
+}
+
+static void port100_recv_ack(struct urb *urb)
+{
+       struct port100 *dev = urb->context;
+       struct port100_cmd *cmd = dev->cmd;
+       struct port100_ack_frame *in_frame;
+       int rc;
+
+       cmd->status = urb->status;
+
+       switch (urb->status) {
+       case 0:
+               break; /* success */
+       case -ECONNRESET:
+       case -ENOENT:
+               nfc_err(&dev->interface->dev,
+                       "The urb has been stopped (status %d)", urb->status);
+               goto sched_wq;
+       case -ESHUTDOWN:
+       default:
+               nfc_err(&dev->interface->dev, "Urb failure (status %d)",
+                       urb->status);
+               goto sched_wq;
+       }
+
+       in_frame = dev->in_urb->transfer_buffer;
+
+       if (!port100_rx_frame_is_ack(in_frame)) {
+               nfc_err(&dev->interface->dev, "Received an invalid ack");
+               cmd->status = -EIO;
+               goto sched_wq;
+       }
+
+       rc = port100_submit_urb_for_response(dev, GFP_ATOMIC);
+       if (rc) {
+               nfc_err(&dev->interface->dev,
+                       "usb_submit_urb failed with result %d", rc);
+               cmd->status = rc;
+               goto sched_wq;
+       }
+
+       return;
+
+sched_wq:
+       schedule_work(&dev->cmd_complete_work);
+}
+
+static int port100_submit_urb_for_ack(struct port100 *dev, gfp_t flags)
+{
+       dev->in_urb->complete = port100_recv_ack;
+
+       return usb_submit_urb(dev->in_urb, flags);
+}
+
+static int port100_send_ack(struct port100 *dev)
+{
+       int rc;
+
+       dev->out_urb->transfer_buffer = ack_frame;
+       dev->out_urb->transfer_buffer_length = sizeof(ack_frame);
+       rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
+
+       return rc;
+}
+
+static int port100_send_frame_async(struct port100 *dev, struct sk_buff *out,
+                                   struct sk_buff *in, int in_len)
+{
+       int rc;
+
+       dev->out_urb->transfer_buffer = out->data;
+       dev->out_urb->transfer_buffer_length = out->len;
+
+       dev->in_urb->transfer_buffer = in->data;
+       dev->in_urb->transfer_buffer_length = in_len;
+
+       print_hex_dump_debug("PORT100 TX: ", DUMP_PREFIX_NONE, 16, 1,
+                            out->data, out->len, false);
+
+       rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
+       if (rc)
+               return rc;
+
+       rc = port100_submit_urb_for_ack(dev, GFP_KERNEL);
+       if (rc)
+               goto error;
+
+       return 0;
+
+error:
+       usb_unlink_urb(dev->out_urb);
+       return rc;
+}
+
+static void port100_build_cmd_frame(struct port100 *dev, u8 cmd_code,
+                                   struct sk_buff *skb)
+{
+       /* payload is already there, just update datalen */
+       int payload_len = skb->len;
+
+       skb_push(skb, PORT100_FRAME_HEADER_LEN);
+       skb_put(skb, PORT100_FRAME_TAIL_LEN);
+
+       port100_tx_frame_init(skb->data, cmd_code);
+       port100_tx_update_payload_len(skb->data, payload_len);
+       port100_tx_frame_finish(skb->data);
+}
+
+static void port100_send_async_complete(struct port100 *dev)
+{
+       struct port100_cmd *cmd = dev->cmd;
+       int status = cmd->status;
+
+       struct sk_buff *req = cmd->req;
+       struct sk_buff *resp = cmd->resp;
+
+       dev_kfree_skb(req);
+
+       dev->cmd = NULL;
+
+       if (status < 0) {
+               cmd->complete_cb(dev, cmd->complete_cb_context,
+                                ERR_PTR(status));
+               dev_kfree_skb(resp);
+               goto done;
+       }
+
+       skb_put(resp, port100_rx_frame_size(resp->data));
+       skb_pull(resp, PORT100_FRAME_HEADER_LEN);
+       skb_trim(resp, resp->len - PORT100_FRAME_TAIL_LEN);
+
+       cmd->complete_cb(dev, cmd->complete_cb_context, resp);
+
+done:
+       kfree(cmd);
+}
+
+static int port100_send_cmd_async(struct port100 *dev, u8 cmd_code,
+                               struct sk_buff *req,
+                               port100_send_async_complete_t complete_cb,
+                               void *complete_cb_context)
+{
+       struct port100_cmd *cmd;
+       struct sk_buff *resp;
+       int rc;
+       int  resp_len = PORT100_FRAME_HEADER_LEN +
+                       PORT100_FRAME_MAX_PAYLOAD_LEN +
+                       PORT100_FRAME_TAIL_LEN;
+
+       resp = alloc_skb(resp_len, GFP_KERNEL);
+       if (!resp)
+               return -ENOMEM;
+
+       cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+       if (!cmd) {
+               dev_kfree_skb(resp);
+               return -ENOMEM;
+       }
+
+       cmd->code = cmd_code;
+       cmd->req = req;
+       cmd->resp = resp;
+       cmd->resp_len = resp_len;
+       cmd->complete_cb = complete_cb;
+       cmd->complete_cb_context = complete_cb_context;
+
+       port100_build_cmd_frame(dev, cmd_code, req);
+
+       dev->cmd = cmd;
+
+       rc = port100_send_frame_async(dev, req, resp, resp_len);
+       if (rc) {
+               kfree(cmd);
+               dev_kfree_skb(resp);
+               dev->cmd = NULL;
+       }
+
+       return rc;
+}
+
+struct port100_sync_cmd_response {
+       struct sk_buff *resp;
+       struct completion done;
+};
+
+static void port100_wq_cmd_complete(struct work_struct *work)
+{
+       struct port100 *dev = container_of(work, struct port100,
+                                          cmd_complete_work);
+
+       port100_send_async_complete(dev);
+}
+
+static void port100_send_sync_complete(struct port100 *dev, void *_arg,
+                                     struct sk_buff *resp)
+{
+       struct port100_sync_cmd_response *arg = _arg;
+
+       arg->resp = resp;
+       complete(&arg->done);
+}
+
+static struct sk_buff *port100_send_cmd_sync(struct port100 *dev, u8 cmd_code,
+                                            struct sk_buff *req)
+{
+       int rc;
+       struct port100_sync_cmd_response arg;
+
+       init_completion(&arg.done);
+
+       rc = port100_send_cmd_async(dev, cmd_code, req,
+                                   port100_send_sync_complete, &arg);
+       if (rc) {
+               dev_kfree_skb(req);
+               return ERR_PTR(rc);
+       }
+
+       wait_for_completion(&arg.done);
+
+       return arg.resp;
+}
+
+static void port100_send_complete(struct urb *urb)
+{
+       struct port100 *dev = urb->context;
+
+       switch (urb->status) {
+       case 0:
+               break; /* success */
+       case -ECONNRESET:
+       case -ENOENT:
+               nfc_err(&dev->interface->dev,
+                       "The urb has been stopped (status %d)", urb->status);
+               break;
+       case -ESHUTDOWN:
+       default:
+               nfc_err(&dev->interface->dev, "Urb failure (status %d)",
+                       urb->status);
+       }
+}
+
+static void port100_abort_cmd(struct nfc_digital_dev *ddev)
+{
+       struct port100 *dev = nfc_digital_get_drvdata(ddev);
+
+       /* An ack will cancel the last issued command */
+       port100_send_ack(dev);
+
+       /* cancel the urb request */
+       usb_kill_urb(dev->in_urb);
+}
+
+static struct sk_buff *port100_alloc_skb(struct port100 *dev, unsigned int size)
+{
+       struct sk_buff *skb;
+
+       skb = alloc_skb(dev->skb_headroom + dev->skb_tailroom + size,
+                       GFP_KERNEL);
+       if (skb)
+               skb_reserve(skb, dev->skb_headroom);
+
+       return skb;
+}
+
+static int port100_set_command_type(struct port100 *dev, u8 command_type)
+{
+       struct sk_buff *skb;
+       struct sk_buff *resp;
+       int rc;
+
+       skb = port100_alloc_skb(dev, 1);
+       if (!skb)
+               return -ENOMEM;
+
+       *skb_put(skb, sizeof(u8)) = command_type;
+
+       resp = port100_send_cmd_sync(dev, PORT100_CMD_SET_COMMAND_TYPE, skb);
+       if (IS_ERR(resp))
+               return PTR_ERR(resp);
+
+       rc = resp->data[0];
+
+       dev_kfree_skb(resp);
+
+       return rc;
+}
+
+static u64 port100_get_command_type_mask(struct port100 *dev)
+{
+       struct sk_buff *skb;
+       struct sk_buff *resp;
+       u64 mask;
+
+       skb = port100_alloc_skb(dev, 0);
+       if (!skb)
+               return -ENOMEM;
+
+       resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_COMMAND_TYPE, skb);
+       if (IS_ERR(resp))
+               return PTR_ERR(resp);
+
+       if (resp->len < 8)
+               mask = 0;
+       else
+               mask = be64_to_cpu(*(__be64 *)resp->data);
+
+       dev_kfree_skb(resp);
+
+       return mask;
+}
+
+static u16 port100_get_firmware_version(struct port100 *dev)
+{
+       struct sk_buff *skb;
+       struct sk_buff *resp;
+       u16 fw_ver;
+
+       skb = port100_alloc_skb(dev, 0);
+       if (!skb)
+               return 0;
+
+       resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_FIRMWARE_VERSION,
+                                    skb);
+       if (IS_ERR(resp))
+               return 0;
+
+       fw_ver = le16_to_cpu(*(__le16 *)resp->data);
+
+       dev_kfree_skb(resp);
+
+       return fw_ver;
+}
+
+static int port100_switch_rf(struct nfc_digital_dev *ddev, bool on)
+{
+       struct port100 *dev = nfc_digital_get_drvdata(ddev);
+       struct sk_buff *skb, *resp;
+
+       skb = port100_alloc_skb(dev, 1);
+       if (!skb)
+               return -ENOMEM;
+
+       *skb_put(skb, 1) = on ? 1 : 0;
+
+       resp = port100_send_cmd_sync(dev, PORT100_CMD_SWITCH_RF, skb);
+
+       if (IS_ERR(resp))
+               return PTR_ERR(resp);
+
+       dev_kfree_skb(resp);
+
+       return 0;
+}
+
+static int port100_in_set_rf(struct nfc_digital_dev *ddev, u8 rf)
+{
+       struct port100 *dev = nfc_digital_get_drvdata(ddev);
+       struct sk_buff *skb;
+       struct sk_buff *resp;
+       int rc;
+
+       if (rf >= NFC_DIGITAL_RF_TECH_LAST)
+               return -EINVAL;
+
+       skb = port100_alloc_skb(dev, sizeof(struct port100_in_rf_setting));
+       if (!skb)
+               return -ENOMEM;
+
+       memcpy(skb_put(skb, sizeof(struct port100_in_rf_setting)),
+              &in_rf_settings[rf],
+              sizeof(struct port100_in_rf_setting));
+
+       resp = port100_send_cmd_sync(dev, PORT100_CMD_IN_SET_RF, skb);
+
+       if (IS_ERR(resp))
+               return PTR_ERR(resp);
+
+       rc = resp->data[0];
+
+       dev_kfree_skb(resp);
+
+       return rc;
+}
+
+static int port100_in_set_framing(struct nfc_digital_dev *ddev, int param)
+{
+       struct port100 *dev = nfc_digital_get_drvdata(ddev);
+       struct port100_protocol *protocols;
+       struct sk_buff *skb;
+       struct sk_buff *resp;
+       int num_protocols;
+       size_t size;
+       int rc;
+
+       if (param >= NFC_DIGITAL_FRAMING_LAST)
+               return -EINVAL;
+
+       protocols = in_protocols[param];
+
+       num_protocols = 0;
+       while (protocols[num_protocols].number != PORT100_IN_PROT_END)
+               num_protocols++;
+
+       if (!num_protocols)
+               return 0;
+
+       size = sizeof(struct port100_protocol) * num_protocols;
+
+       skb = port100_alloc_skb(dev, size);
+       if (!skb)
+               return -ENOMEM;
+
+       memcpy(skb_put(skb, size), protocols, size);
+
+       resp = port100_send_cmd_sync(dev, PORT100_CMD_IN_SET_PROTOCOL, skb);
+
+       if (IS_ERR(resp))
+               return PTR_ERR(resp);
+
+       rc = resp->data[0];
+
+       dev_kfree_skb(resp);
+
+       return rc;
+}
+
+static int port100_in_configure_hw(struct nfc_digital_dev *ddev, int type,
+                                  int param)
+{
+       if (type == NFC_DIGITAL_CONFIG_RF_TECH)
+               return port100_in_set_rf(ddev, param);
+
+       if (type == NFC_DIGITAL_CONFIG_FRAMING)
+               return port100_in_set_framing(ddev, param);
+
+       return -EINVAL;
+}
+
+static void port100_in_comm_rf_complete(struct port100 *dev, void *arg,
+                                      struct sk_buff *resp)
+{
+       struct port100_cb_arg *cb_arg = arg;
+       nfc_digital_cmd_complete_t cb = cb_arg->complete_cb;
+       u32 status;
+       int rc;
+
+       if (IS_ERR(resp)) {
+               rc =  PTR_ERR(resp);
+               goto exit;
+       }
+
+       if (resp->len < 4) {
+               nfc_err(&dev->interface->dev,
+                       "Invalid packet length received.\n");
+               rc = -EIO;
+               goto error;
+       }
+
+       status = le32_to_cpu(*(__le32 *)resp->data);
+
+       skb_pull(resp, sizeof(u32));
+
+       if (status == PORT100_CMD_STATUS_TIMEOUT) {
+               rc = -ETIMEDOUT;
+               goto error;
+       }
+
+       if (status != PORT100_CMD_STATUS_OK) {
+               nfc_err(&dev->interface->dev,
+                       "in_comm_rf failed with status 0x%08x\n", status);
+               rc = -EIO;
+               goto error;
+       }
+
+       /* Remove collision bits byte */
+       skb_pull(resp, 1);
+
+       goto exit;
+
+error:
+       kfree_skb(resp);
+       resp = ERR_PTR(rc);
+
+exit:
+       cb(dev->nfc_digital_dev, cb_arg->complete_arg, resp);
+
+       kfree(cb_arg);
+}
+
+static int port100_in_send_cmd(struct nfc_digital_dev *ddev,
+                              struct sk_buff *skb, u16 _timeout,
+                              nfc_digital_cmd_complete_t cb, void *arg)
+{
+       struct port100 *dev = nfc_digital_get_drvdata(ddev);
+       struct port100_cb_arg *cb_arg;
+       __le16 timeout;
+
+       cb_arg = kzalloc(sizeof(struct port100_cb_arg), GFP_KERNEL);
+       if (!cb_arg)
+               return -ENOMEM;
+
+       cb_arg->complete_cb = cb;
+       cb_arg->complete_arg = arg;
+
+       timeout = cpu_to_le16(_timeout * 10);
+
+       memcpy(skb_push(skb, sizeof(__le16)), &timeout, sizeof(__le16));
+
+       return port100_send_cmd_async(dev, PORT100_CMD_IN_COMM_RF, skb,
+                                     port100_in_comm_rf_complete, cb_arg);
+}
+
+static int port100_tg_set_rf(struct nfc_digital_dev *ddev, u8 rf)
+{
+       struct port100 *dev = nfc_digital_get_drvdata(ddev);
+       struct sk_buff *skb;
+       struct sk_buff *resp;
+       int rc;
+
+       if (rf >= NFC_DIGITAL_RF_TECH_LAST)
+               return -EINVAL;
+
+       skb = port100_alloc_skb(dev, sizeof(struct port100_tg_rf_setting));
+       if (!skb)
+               return -ENOMEM;
+
+       memcpy(skb_put(skb, sizeof(struct port100_tg_rf_setting)),
+              &tg_rf_settings[rf],
+              sizeof(struct port100_tg_rf_setting));
+
+       resp = port100_send_cmd_sync(dev, PORT100_CMD_TG_SET_RF, skb);
+
+       if (IS_ERR(resp))
+               return PTR_ERR(resp);
+
+       rc = resp->data[0];
+
+       dev_kfree_skb(resp);
+
+       return rc;
+}
+
+static int port100_tg_set_framing(struct nfc_digital_dev *ddev, int param)
+{
+       struct port100 *dev = nfc_digital_get_drvdata(ddev);
+       struct port100_protocol *protocols;
+       struct sk_buff *skb;
+       struct sk_buff *resp;
+       int rc;
+       int num_protocols;
+       size_t size;
+
+       if (param >= NFC_DIGITAL_FRAMING_LAST)
+               return -EINVAL;
+
+       protocols = tg_protocols[param];
+
+       num_protocols = 0;
+       while (protocols[num_protocols].number != PORT100_TG_PROT_END)
+               num_protocols++;
+
+       if (!num_protocols)
+               return 0;
+
+       size = sizeof(struct port100_protocol) * num_protocols;
+
+       skb = port100_alloc_skb(dev, size);
+       if (!skb)
+               return -ENOMEM;
+
+       memcpy(skb_put(skb, size), protocols, size);
+
+       resp = port100_send_cmd_sync(dev, PORT100_CMD_TG_SET_PROTOCOL, skb);
+
+       if (IS_ERR(resp))
+               return PTR_ERR(resp);
+
+       rc = resp->data[0];
+
+       dev_kfree_skb(resp);
+
+       return rc;
+}
+
+static int port100_tg_configure_hw(struct nfc_digital_dev *ddev, int type,
+                                  int param)
+{
+       if (type == NFC_DIGITAL_CONFIG_RF_TECH)
+               return port100_tg_set_rf(ddev, param);
+
+       if (type == NFC_DIGITAL_CONFIG_FRAMING)
+               return port100_tg_set_framing(ddev, param);
+
+       return -EINVAL;
+}
+
+static bool port100_tg_target_activated(struct port100 *dev, u8 tgt_activated)
+{
+       u8 mask;
+
+       switch (dev->cmd_type) {
+       case PORT100_CMD_TYPE_0:
+               mask = PORT100_MDAA_TGT_HAS_BEEN_ACTIVATED_MASK;
+               break;
+       case PORT100_CMD_TYPE_1:
+               mask = PORT100_MDAA_TGT_HAS_BEEN_ACTIVATED_MASK |
+                      PORT100_MDAA_TGT_WAS_ACTIVATED_MASK;
+               break;
+       default:
+               nfc_err(&dev->interface->dev, "Unknonwn command type.\n");
+               return false;
+       }
+
+       return ((tgt_activated & mask) == mask);
+}
+
+static void port100_tg_comm_rf_complete(struct port100 *dev, void *arg,
+                                       struct sk_buff *resp)
+{
+       u32 status;
+       struct port100_cb_arg *cb_arg = arg;
+       nfc_digital_cmd_complete_t cb = cb_arg->complete_cb;
+       struct port100_tg_comm_rf_res *hdr;
+
+       if (IS_ERR(resp))
+               goto exit;
+
+       hdr = (struct port100_tg_comm_rf_res *)resp->data;
+
+       status = le32_to_cpu(hdr->status);
+
+       if (cb_arg->mdaa &&
+           !port100_tg_target_activated(dev, hdr->target_activated)) {
+               kfree_skb(resp);
+               resp = ERR_PTR(-ETIMEDOUT);
+
+               goto exit;
+       }
+
+       skb_pull(resp, sizeof(struct port100_tg_comm_rf_res));
+
+       if (status != PORT100_CMD_STATUS_OK) {
+               kfree_skb(resp);
+
+               if (status == PORT100_CMD_STATUS_TIMEOUT)
+                       resp = ERR_PTR(-ETIMEDOUT);
+               else
+                       resp = ERR_PTR(-EIO);
+       }
+
+exit:
+       cb(dev->nfc_digital_dev, cb_arg->complete_arg, resp);
+
+       kfree(cb_arg);
+}
+
+static int port100_tg_send_cmd(struct nfc_digital_dev *ddev,
+                              struct sk_buff *skb, u16 timeout,
+                              nfc_digital_cmd_complete_t cb, void *arg)
+{
+       struct port100 *dev = nfc_digital_get_drvdata(ddev);
+       struct port100_tg_comm_rf_cmd *hdr;
+       struct port100_cb_arg *cb_arg;
+
+       cb_arg = kzalloc(sizeof(struct port100_cb_arg), GFP_KERNEL);
+       if (!cb_arg)
+               return -ENOMEM;
+
+       cb_arg->complete_cb = cb;
+       cb_arg->complete_arg = arg;
+
+       skb_push(skb, sizeof(struct port100_tg_comm_rf_cmd));
+
+       hdr = (struct port100_tg_comm_rf_cmd *)skb->data;
+
+       memset(hdr, 0, sizeof(struct port100_tg_comm_rf_cmd));
+       hdr->guard_time = cpu_to_le16(500);
+       hdr->send_timeout = cpu_to_le16(0xFFFF);
+       hdr->recv_timeout = cpu_to_le16(timeout);
+
+       return port100_send_cmd_async(dev, PORT100_CMD_TG_COMM_RF, skb,
+                                     port100_tg_comm_rf_complete, cb_arg);
+}
+
+static int port100_listen_mdaa(struct nfc_digital_dev *ddev,
+                              struct digital_tg_mdaa_params *params,
+                              u16 timeout,
+                              nfc_digital_cmd_complete_t cb, void *arg)
+{
+       struct port100 *dev = nfc_digital_get_drvdata(ddev);
+       struct port100_tg_comm_rf_cmd *hdr;
+       struct port100_cb_arg *cb_arg;
+       struct sk_buff *skb;
+       int rc;
+
+       rc = port100_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH,
+                                    NFC_DIGITAL_RF_TECH_106A);
+       if (rc)
+               return rc;
+
+       rc = port100_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+                                    NFC_DIGITAL_FRAMING_NFCA_NFC_DEP);
+       if (rc)
+               return rc;
+
+       cb_arg = kzalloc(sizeof(struct port100_cb_arg), GFP_KERNEL);
+       if (!cb_arg)
+               return -ENOMEM;
+
+       cb_arg->complete_cb = cb;
+       cb_arg->complete_arg = arg;
+       cb_arg->mdaa = 1;
+
+       skb = port100_alloc_skb(dev, 0);
+       if (!skb) {
+               kfree(cb_arg);
+               return -ENOMEM;
+       }
+
+       skb_push(skb, sizeof(struct port100_tg_comm_rf_cmd));
+       hdr = (struct port100_tg_comm_rf_cmd *)skb->data;
+
+       memset(hdr, 0, sizeof(struct port100_tg_comm_rf_cmd));
+
+       hdr->guard_time = 0;
+       hdr->send_timeout = cpu_to_le16(0xFFFF);
+       hdr->mdaa = 1;
+       hdr->nfca_param[0] = (params->sens_res >> 8) & 0xFF;
+       hdr->nfca_param[1] = params->sens_res & 0xFF;
+       memcpy(hdr->nfca_param + 2, params->nfcid1, 3);
+       hdr->nfca_param[5] = params->sel_res;
+       memcpy(hdr->nfcf_param, params->nfcid2, 8);
+       hdr->nfcf_param[16] = (params->sc >> 8) & 0xFF;
+       hdr->nfcf_param[17] = params->sc & 0xFF;
+       hdr->recv_timeout = cpu_to_le16(timeout);
+
+       return port100_send_cmd_async(dev, PORT100_CMD_TG_COMM_RF, skb,
+                                     port100_tg_comm_rf_complete, cb_arg);
+}
+
+static int port100_listen(struct nfc_digital_dev *ddev, u16 timeout,
+                         nfc_digital_cmd_complete_t cb, void *arg)
+{
+       struct port100 *dev = nfc_digital_get_drvdata(ddev);
+       struct sk_buff *skb;
+
+       skb = port100_alloc_skb(dev, 0);
+       if (!skb)
+               return -ENOMEM;
+
+       return port100_tg_send_cmd(ddev, skb, timeout, cb, arg);
+}
+
+static struct nfc_digital_ops port100_digital_ops = {
+       .in_configure_hw = port100_in_configure_hw,
+       .in_send_cmd = port100_in_send_cmd,
+
+       .tg_listen_mdaa = port100_listen_mdaa,
+       .tg_listen = port100_listen,
+       .tg_configure_hw = port100_tg_configure_hw,
+       .tg_send_cmd = port100_tg_send_cmd,
+
+       .switch_rf = port100_switch_rf,
+       .abort_cmd = port100_abort_cmd,
+};
+
+static const struct usb_device_id port100_table[] = {
+       { .match_flags          = USB_DEVICE_ID_MATCH_DEVICE,
+         .idVendor             = SONY_VENDOR_ID,
+         .idProduct            = RCS380_PRODUCT_ID,
+       },
+       { }
+};
+MODULE_DEVICE_TABLE(usb, port100_table);
+
+static int port100_probe(struct usb_interface *interface,
+                        const struct usb_device_id *id)
+{
+       struct port100 *dev;
+       int rc;
+       struct usb_host_interface *iface_desc;
+       struct usb_endpoint_descriptor *endpoint;
+       int in_endpoint;
+       int out_endpoint;
+       u16 fw_version;
+       u64 cmd_type_mask;
+       int i;
+
+       dev = devm_kzalloc(&interface->dev, sizeof(struct port100), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+
+       dev->udev = usb_get_dev(interface_to_usbdev(interface));
+       dev->interface = interface;
+       usb_set_intfdata(interface, dev);
+
+       in_endpoint = out_endpoint = 0;
+       iface_desc = interface->cur_altsetting;
+       for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+               endpoint = &iface_desc->endpoint[i].desc;
+
+               if (!in_endpoint && usb_endpoint_is_bulk_in(endpoint))
+                       in_endpoint = endpoint->bEndpointAddress;
+
+               if (!out_endpoint && usb_endpoint_is_bulk_out(endpoint))
+                       out_endpoint = endpoint->bEndpointAddress;
+       }
+
+       if (!in_endpoint || !out_endpoint) {
+               nfc_err(&interface->dev,
+                       "Could not find bulk-in or bulk-out endpoint\n");
+               rc = -ENODEV;
+               goto error;
+       }
+
+       dev->in_urb = usb_alloc_urb(0, GFP_KERNEL);
+       dev->out_urb = usb_alloc_urb(0, GFP_KERNEL);
+
+       if (!dev->in_urb || !dev->out_urb) {
+               nfc_err(&interface->dev, "Could not allocate USB URBs\n");
+               rc = -ENOMEM;
+               goto error;
+       }
+
+       usb_fill_bulk_urb(dev->in_urb, dev->udev,
+                         usb_rcvbulkpipe(dev->udev, in_endpoint),
+                         NULL, 0, NULL, dev);
+       usb_fill_bulk_urb(dev->out_urb, dev->udev,
+                         usb_sndbulkpipe(dev->udev, out_endpoint),
+                         NULL, 0, port100_send_complete, dev);
+
+       dev->skb_headroom = PORT100_FRAME_HEADER_LEN +
+                           PORT100_COMM_RF_HEAD_MAX_LEN;
+       dev->skb_tailroom = PORT100_FRAME_TAIL_LEN;
+
+       INIT_WORK(&dev->cmd_complete_work, port100_wq_cmd_complete);
+
+       /* The first thing to do with the Port-100 is to set the command type
+        * to be used. If supported we use command type 1. 0 otherwise.
+        */
+       cmd_type_mask = port100_get_command_type_mask(dev);
+       if (!cmd_type_mask) {
+               nfc_err(&interface->dev,
+                       "Could not get supported command types.\n");
+               rc = -ENODEV;
+               goto error;
+       }
+
+       if (PORT100_CMD_TYPE_IS_SUPPORTED(cmd_type_mask, PORT100_CMD_TYPE_1))
+               dev->cmd_type = PORT100_CMD_TYPE_1;
+       else
+               dev->cmd_type = PORT100_CMD_TYPE_0;
+
+       rc = port100_set_command_type(dev, dev->cmd_type);
+       if (rc) {
+               nfc_err(&interface->dev,
+                       "The device does not support command type %u.\n",
+                       dev->cmd_type);
+               goto error;
+       }
+
+       fw_version = port100_get_firmware_version(dev);
+       if (!fw_version)
+               nfc_err(&interface->dev,
+                       "Could not get device firmware version.\n");
+
+       nfc_info(&interface->dev,
+                "Sony NFC Port-100 Series attached (firmware v%x.%02x)\n",
+                (fw_version & 0xFF00) >> 8, fw_version & 0xFF);
+
+       dev->nfc_digital_dev = nfc_digital_allocate_device(&port100_digital_ops,
+                                                          PORT100_PROTOCOLS,
+                                                          PORT100_CAPABILITIES,
+                                                          dev->skb_headroom,
+                                                          dev->skb_tailroom);
+       if (!dev->nfc_digital_dev) {
+               nfc_err(&interface->dev,
+                       "Could not allocate nfc_digital_dev.\n");
+               rc = -ENOMEM;
+               goto error;
+       }
+
+       nfc_digital_set_parent_dev(dev->nfc_digital_dev, &interface->dev);
+       nfc_digital_set_drvdata(dev->nfc_digital_dev, dev);
+
+       rc = nfc_digital_register_device(dev->nfc_digital_dev);
+       if (rc) {
+               nfc_err(&interface->dev,
+                       "Could not register digital device.\n");
+               goto free_nfc_dev;
+       }
+
+       return 0;
+
+free_nfc_dev:
+       nfc_digital_free_device(dev->nfc_digital_dev);
+
+error:
+       usb_free_urb(dev->in_urb);
+       usb_free_urb(dev->out_urb);
+       usb_put_dev(dev->udev);
+
+       return rc;
+}
+
+static void port100_disconnect(struct usb_interface *interface)
+{
+       struct port100 *dev;
+
+       dev = usb_get_intfdata(interface);
+       usb_set_intfdata(interface, NULL);
+
+       nfc_digital_unregister_device(dev->nfc_digital_dev);
+       nfc_digital_free_device(dev->nfc_digital_dev);
+
+       usb_kill_urb(dev->in_urb);
+       usb_kill_urb(dev->out_urb);
+
+       usb_free_urb(dev->in_urb);
+       usb_free_urb(dev->out_urb);
+
+       kfree(dev->cmd);
+
+       nfc_info(&interface->dev, "Sony Port-100 NFC device disconnected");
+}
+
+static struct usb_driver port100_driver = {
+       .name =         "port100",
+       .probe =        port100_probe,
+       .disconnect =   port100_disconnect,
+       .id_table =     port100_table,
+};
+
+module_usb_driver(port100_driver);
+
+MODULE_DESCRIPTION("NFC Port-100 series usb driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
index 9d2009a9004d14bc6f80bc9e6192a478fc9362aa..78cc76053328c2c8c70bff559f5dc0f903a241df 100644 (file)
@@ -74,10 +74,4 @@ config OF_MTD
        depends on MTD
        def_bool y
 
-config OF_RESERVED_MEM
-       depends on OF_FLATTREE && (DMA_CMA || (HAVE_GENERIC_DMA_COHERENT && HAVE_MEMBLOCK))
-       def_bool y
-       help
-         Initialization code for DMA reserved memory
-
 endmenu # OF
index ed9660adad7751357b49914fcb046fa64470767c..efd05102c40533100794b7d6a7626f583a1f0cdc 100644 (file)
@@ -9,4 +9,3 @@ obj-$(CONFIG_OF_MDIO)   += of_mdio.o
 obj-$(CONFIG_OF_PCI)   += of_pci.o
 obj-$(CONFIG_OF_PCI_IRQ)  += of_pci_irq.o
 obj-$(CONFIG_OF_MTD)   += of_mtd.o
-obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
index 865d3f66c86b2735810e1f6d4d7a219dfd9b350e..7d4c70f859e30687bcd0892c195f9cbfc34826dc 100644 (file)
@@ -303,10 +303,8 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
        struct device_node *cpun, *cpus;
 
        cpus = of_find_node_by_path("/cpus");
-       if (!cpus) {
-               pr_warn("Missing cpus node, bailing out\n");
+       if (!cpus)
                return NULL;
-       }
 
        for_each_child_of_node(cpus, cpun) {
                if (of_node_cmp(cpun->type, "cpu"))
index 229dd9d69e180529cc7e3135d71001421ab9deea..a4fa9ad31b8f7cfb62f45abf47ada0649d87b867 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/string.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
-#include <linux/random.h>
 
 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
 #ifdef CONFIG_PPC
@@ -803,14 +802,3 @@ void __init unflatten_device_tree(void)
 }
 
 #endif /* CONFIG_OF_EARLY_FLATTREE */
-
-/* Feed entire flattened device tree into the random pool */
-static int __init add_fdt_randomness(void)
-{
-       if (initial_boot_params)
-               add_device_randomness(initial_boot_params,
-                               be32_to_cpu(initial_boot_params->totalsize));
-
-       return 0;
-}
-core_initcall(add_fdt_randomness);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
deleted file mode 100644 (file)
index 0fe40c7..0000000
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Device tree based initialization code for reserved memory.
- *
- * Copyright (c) 2013 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- * Author: Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License or (at your optional) any later version of the license.
- */
-
-#include <linux/memblock.h>
-#include <linux/err.h>
-#include <linux/of.h>
-#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
-#include <linux/mm.h>
-#include <linux/sizes.h>
-#include <linux/mm_types.h>
-#include <linux/dma-contiguous.h>
-#include <linux/dma-mapping.h>
-#include <linux/of_reserved_mem.h>
-
-#define MAX_RESERVED_REGIONS   16
-struct reserved_mem {
-       phys_addr_t             base;
-       unsigned long           size;
-       struct cma              *cma;
-       char                    name[32];
-};
-static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
-static int reserved_mem_count;
-
-static int __init fdt_scan_reserved_mem(unsigned long node, const char *uname,
-                                       int depth, void *data)
-{
-       struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
-       phys_addr_t base, size;
-       int is_cma, is_reserved;
-       unsigned long len;
-       const char *status;
-       __be32 *prop;
-
-       is_cma = IS_ENABLED(CONFIG_DMA_CMA) &&
-              of_flat_dt_is_compatible(node, "linux,contiguous-memory-region");
-       is_reserved = of_flat_dt_is_compatible(node, "reserved-memory-region");
-
-       if (!is_reserved && !is_cma) {
-               /* ignore node and scan next one */
-               return 0;
-       }
-
-       status = of_get_flat_dt_prop(node, "status", &len);
-       if (status && strcmp(status, "okay") != 0) {
-               /* ignore disabled node nad scan next one */
-               return 0;
-       }
-
-       prop = of_get_flat_dt_prop(node, "reg", &len);
-       if (!prop || (len < (dt_root_size_cells + dt_root_addr_cells) *
-                            sizeof(__be32))) {
-               pr_err("Reserved mem: node %s, incorrect \"reg\" property\n",
-                      uname);
-               /* ignore node and scan next one */
-               return 0;
-       }
-       base = dt_mem_next_cell(dt_root_addr_cells, &prop);
-       size = dt_mem_next_cell(dt_root_size_cells, &prop);
-
-       if (!size) {
-               /* ignore node and scan next one */
-               return 0;
-       }
-
-       pr_info("Reserved mem: found %s, memory base %lx, size %ld MiB\n",
-               uname, (unsigned long)base, (unsigned long)size / SZ_1M);
-
-       if (reserved_mem_count == ARRAY_SIZE(reserved_mem))
-               return -ENOSPC;
-
-       rmem->base = base;
-       rmem->size = size;
-       strlcpy(rmem->name, uname, sizeof(rmem->name));
-
-       if (is_cma) {
-               struct cma *cma;
-               if (dma_contiguous_reserve_area(size, base, 0, &cma) == 0) {
-                       rmem->cma = cma;
-                       reserved_mem_count++;
-                       if (of_get_flat_dt_prop(node,
-                                               "linux,default-contiguous-region",
-                                               NULL))
-                               dma_contiguous_set_default(cma);
-               }
-       } else if (is_reserved) {
-               if (memblock_remove(base, size) == 0)
-                       reserved_mem_count++;
-               else
-                       pr_err("Failed to reserve memory for %s\n", uname);
-       }
-
-       return 0;
-}
-
-static struct reserved_mem *get_dma_memory_region(struct device *dev)
-{
-       struct device_node *node;
-       const char *name;
-       int i;
-
-       node = of_parse_phandle(dev->of_node, "memory-region", 0);
-       if (!node)
-               return NULL;
-
-       name = kbasename(node->full_name);
-       for (i = 0; i < reserved_mem_count; i++)
-               if (strcmp(name, reserved_mem[i].name) == 0)
-                       return &reserved_mem[i];
-       return NULL;
-}
-
-/**
- * of_reserved_mem_device_init() - assign reserved memory region to given device
- *
- * This function assign memory region pointed by "memory-region" device tree
- * property to the given device.
- */
-void of_reserved_mem_device_init(struct device *dev)
-{
-       struct reserved_mem *region = get_dma_memory_region(dev);
-       if (!region)
-               return;
-
-       if (region->cma) {
-               dev_set_cma_area(dev, region->cma);
-               pr_info("Assigned CMA %s to %s device\n", region->name,
-                       dev_name(dev));
-       } else {
-               if (dma_declare_coherent_memory(dev, region->base, region->base,
-                   region->size, DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) != 0)
-                       pr_info("Declared reserved memory %s to %s device\n",
-                               region->name, dev_name(dev));
-       }
-}
-
-/**
- * of_reserved_mem_device_release() - release reserved memory device structures
- *
- * This function releases structures allocated for memory region handling for
- * the given device.
- */
-void of_reserved_mem_device_release(struct device *dev)
-{
-       struct reserved_mem *region = get_dma_memory_region(dev);
-       if (!region && !region->cma)
-               dma_release_declared_memory(dev);
-}
-
-/**
- * early_init_dt_scan_reserved_mem() - create reserved memory regions
- *
- * This function grabs memory from early allocator for device exclusive use
- * defined in device tree structures. It should be called by arch specific code
- * once the early allocator (memblock) has been activated and all other
- * subsystems have already allocated/reserved memory.
- */
-void __init early_init_dt_scan_reserved_mem(void)
-{
-       of_scan_flat_dt_by_path("/memory/reserved-memory",
-                               fdt_scan_reserved_mem, NULL);
-}
index 9b439ac63d8e73eef85aa490d5cab6ec825120b7..049c3d0bddd1068edaf01f4a58165047bb1336f0 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
-#include <linux/of_reserved_mem.h>
 #include <linux/platform_device.h>
 
 const struct of_device_id of_default_bus_match_table[] = {
@@ -219,8 +218,6 @@ static struct platform_device *of_platform_device_create_pdata(
        dev->dev.bus = &platform_bus_type;
        dev->dev.platform_data = platform_data;
 
-       of_reserved_mem_device_init(&dev->dev);
-
        /* We do not fill the DMA ops for platform devices by default.
         * This is currently the responsibility of the platform code
         * to do such, possibly using a device notifier
@@ -228,7 +225,6 @@ static struct platform_device *of_platform_device_create_pdata(
 
        if (of_device_add(dev) != 0) {
                platform_device_put(dev);
-               of_reserved_mem_device_release(&dev->dev);
                return NULL;
        }
 
@@ -284,9 +280,6 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
        else
                of_device_make_bus_id(&dev->dev);
 
-       /* setup amba-specific device info */
-       dev->dma_mask = ~0;
-
        /* Allow the HW Peripheral ID to be overridden */
        prop = of_get_property(node, "arm,primecell-periphid", NULL);
        if (prop)
index 70694ce38be2bb2579efdc3ff4db3d8e0c73190f..2225237ff63f64a70f6b005f418d7e3de3c5ef93 100644 (file)
@@ -31,14 +31,17 @@ menuconfig PARPORT
 
          If unsure, say Y.
 
+config ARCH_MIGHT_HAVE_PC_PARPORT
+       bool
+       help
+         Select this config option from the architecture Kconfig if
+         the architecture might have PC parallel port hardware.
+
 if PARPORT
 
 config PARPORT_PC
        tristate "PC-style hardware"
-       depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && !S390 && \
-               (!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && \
-               !XTENSA && !CRIS && !H8300
-
+       depends on ARCH_MIGHT_HAVE_PC_PARPORT
        ---help---
          You should say Y here if you have a PC-style parallel port. All
          IBM PC compatible computers and some Alphas have PC-style
index 903e1285fda06ce30c84bf79b44078e733048c38..9637615262296235737a094282645b72f3c6ccc5 100644 (file)
@@ -2004,6 +2004,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
        struct resource *ECR_res = NULL;
        struct resource *EPP_res = NULL;
        struct platform_device *pdev = NULL;
+       int ret;
 
        if (!dev) {
                /* We need a physical device to attach to, but none was
@@ -2014,8 +2015,11 @@ struct parport *parport_pc_probe_port(unsigned long int base,
                        return NULL;
                dev = &pdev->dev;
 
-               dev->coherent_dma_mask = DMA_BIT_MASK(24);
-               dev->dma_mask = &dev->coherent_dma_mask;
+               ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(24));
+               if (ret) {
+                       dev_err(dev, "Unable to set coherent dma mask: disabling DMA\n");
+                       dma = PARPORT_DMA_NONE;
+               }
        }
 
        ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
index 3d950481112634fc847d61fef187efc86503a189..efa24d9a33615a1268e83212fc3a04844806497f 100644 (file)
@@ -15,6 +15,12 @@ config PCI_EXYNOS
        select PCIEPORTBUS
        select PCIE_DW
 
+config PCI_IMX6
+       bool "Freescale i.MX6 PCIe controller"
+       depends on SOC_IMX6Q
+       select PCIEPORTBUS
+       select PCIE_DW
+
 config PCI_TEGRA
        bool "NVIDIA Tegra PCIe controller"
        depends on ARCH_TEGRA
index c9a997b2690dc9532e7654ae804c0697652679ae..287d6a053dda4b0a2d61dd85e5a0bc19349fee4f 100644 (file)
@@ -1,4 +1,5 @@
 obj-$(CONFIG_PCIE_DW) += pcie-designware.o
 obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
 obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
 obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o
index 94e096bb2d0a3e88d66634ea201dc4e546f03b7e..ee692c2c3d73fa83e4815b8648aafc81386fd8e1 100644 (file)
@@ -48,6 +48,7 @@ struct exynos_pcie {
 #define PCIE_IRQ_SPECIAL               0x008
 #define PCIE_IRQ_EN_PULSE              0x00c
 #define PCIE_IRQ_EN_LEVEL              0x010
+#define IRQ_MSI_ENABLE                 (0x1 << 2)
 #define PCIE_IRQ_EN_SPECIAL            0x014
 #define PCIE_PWR_RESET                 0x018
 #define PCIE_CORE_RESET                        0x01c
@@ -77,18 +78,28 @@ struct exynos_pcie {
 #define PCIE_PHY_PLL_BIAS              0x00c
 #define PCIE_PHY_DCC_FEEDBACK          0x014
 #define PCIE_PHY_PLL_DIV_1             0x05c
+#define PCIE_PHY_COMMON_POWER          0x064
+#define PCIE_PHY_COMMON_PD_CMN         (0x1 << 3)
 #define PCIE_PHY_TRSV0_EMP_LVL         0x084
 #define PCIE_PHY_TRSV0_DRV_LVL         0x088
 #define PCIE_PHY_TRSV0_RXCDR           0x0ac
+#define PCIE_PHY_TRSV0_POWER           0x0c4
+#define PCIE_PHY_TRSV0_PD_TSV          (0x1 << 7)
 #define PCIE_PHY_TRSV0_LVCC            0x0dc
 #define PCIE_PHY_TRSV1_EMP_LVL         0x144
 #define PCIE_PHY_TRSV1_RXCDR           0x16c
+#define PCIE_PHY_TRSV1_POWER           0x184
+#define PCIE_PHY_TRSV1_PD_TSV          (0x1 << 7)
 #define PCIE_PHY_TRSV1_LVCC            0x19c
 #define PCIE_PHY_TRSV2_EMP_LVL         0x204
 #define PCIE_PHY_TRSV2_RXCDR           0x22c
+#define PCIE_PHY_TRSV2_POWER           0x244
+#define PCIE_PHY_TRSV2_PD_TSV          (0x1 << 7)
 #define PCIE_PHY_TRSV2_LVCC            0x25c
 #define PCIE_PHY_TRSV3_EMP_LVL         0x2c4
 #define PCIE_PHY_TRSV3_RXCDR           0x2ec
+#define PCIE_PHY_TRSV3_POWER           0x304
+#define PCIE_PHY_TRSV3_PD_TSV          (0x1 << 7)
 #define PCIE_PHY_TRSV3_LVCC            0x31c
 
 static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
@@ -202,6 +213,58 @@ static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
        exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET);
 }
 
+static void exynos_pcie_power_on_phy(struct pcie_port *pp)
+{
+       u32 val;
+       struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+       val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
+       val &= ~PCIE_PHY_COMMON_PD_CMN;
+       exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER);
+
+       val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER);
+       val &= ~PCIE_PHY_TRSV0_PD_TSV;
+       exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER);
+
+       val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER);
+       val &= ~PCIE_PHY_TRSV1_PD_TSV;
+       exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER);
+
+       val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER);
+       val &= ~PCIE_PHY_TRSV2_PD_TSV;
+       exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER);
+
+       val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER);
+       val &= ~PCIE_PHY_TRSV3_PD_TSV;
+       exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
+}
+
+static void exynos_pcie_power_off_phy(struct pcie_port *pp)
+{
+       u32 val;
+       struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+       val = exynos_phy_readl(exynos_pcie, PCIE_PHY_COMMON_POWER);
+       val |= PCIE_PHY_COMMON_PD_CMN;
+       exynos_phy_writel(exynos_pcie, val, PCIE_PHY_COMMON_POWER);
+
+       val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV0_POWER);
+       val |= PCIE_PHY_TRSV0_PD_TSV;
+       exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV0_POWER);
+
+       val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV1_POWER);
+       val |= PCIE_PHY_TRSV1_PD_TSV;
+       exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV1_POWER);
+
+       val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV2_POWER);
+       val |= PCIE_PHY_TRSV2_PD_TSV;
+       exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV2_POWER);
+
+       val = exynos_phy_readl(exynos_pcie, PCIE_PHY_TRSV3_POWER);
+       val |= PCIE_PHY_TRSV3_PD_TSV;
+       exynos_phy_writel(exynos_pcie, val, PCIE_PHY_TRSV3_POWER);
+}
+
 static void exynos_pcie_init_phy(struct pcie_port *pp)
 {
        struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
@@ -270,6 +333,9 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
        /* de-assert phy reset */
        exynos_pcie_deassert_phy_reset(pp);
 
+       /* power on phy */
+       exynos_pcie_power_on_phy(pp);
+
        /* initialize phy */
        exynos_pcie_init_phy(pp);
 
@@ -302,6 +368,9 @@ static int exynos_pcie_establish_link(struct pcie_port *pp)
                                                       PCIE_PHY_PLL_LOCKED);
                                dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
                        }
+                       /* power off phy */
+                       exynos_pcie_power_off_phy(pp);
+
                        dev_err(pp->dev, "PCIe Link Fail\n");
                        return -EINVAL;
                }
@@ -342,9 +411,36 @@ static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
        return IRQ_HANDLED;
 }
 
+static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg)
+{
+       struct pcie_port *pp = arg;
+
+       dw_handle_msi_irq(pp);
+
+       return IRQ_HANDLED;
+}
+
+static void exynos_pcie_msi_init(struct pcie_port *pp)
+{
+       u32 val;
+       struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+       dw_pcie_msi_init(pp);
+
+       /* enable MSI interrupt */
+       val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_EN_LEVEL);
+       val |= IRQ_MSI_ENABLE;
+       exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_LEVEL);
+       return;
+}
+
 static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
 {
        exynos_pcie_enable_irq_pulse(pp);
+
+       if (IS_ENABLED(CONFIG_PCI_MSI))
+               exynos_pcie_msi_init(pp);
+
        return;
 }
 
@@ -430,6 +526,22 @@ static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev)
                return ret;
        }
 
+       if (IS_ENABLED(CONFIG_PCI_MSI)) {
+               pp->msi_irq = platform_get_irq(pdev, 0);
+               if (!pp->msi_irq) {
+                       dev_err(&pdev->dev, "failed to get msi irq\n");
+                       return -ENODEV;
+               }
+
+               ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+                                       exynos_pcie_msi_irq_handler,
+                                       IRQF_SHARED, "exynos-pcie", pp);
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to request msi irq\n");
+                       return ret;
+               }
+       }
+
        pp->root_bus_nr = -1;
        pp->ops = &exynos_pcie_host_ops;
 
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
new file mode 100644 (file)
index 0000000..5afa922
--- /dev/null
@@ -0,0 +1,575 @@
+/*
+ * PCIe host controller driver for Freescale i.MX6 SoCs
+ *
+ * Copyright (C) 2013 Kosagi
+ *             http://www.kosagi.com
+ *
+ * Author: Sean Cross <xobs@kosagi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define to_imx6_pcie(x)        container_of(x, struct imx6_pcie, pp)
+
+struct imx6_pcie {
+       int                     reset_gpio;
+       int                     power_on_gpio;
+       int                     wake_up_gpio;
+       int                     disable_gpio;
+       struct clk              *lvds_gate;
+       struct clk              *sata_ref_100m;
+       struct clk              *pcie_ref_125m;
+       struct clk              *pcie_axi;
+       struct pcie_port        pp;
+       struct regmap           *iomuxc_gpr;
+       void __iomem            *mem_base;
+};
+
+/* PCIe Port Logic registers (memory-mapped) */
+#define PL_OFFSET 0x700
+#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
+#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
+
+#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
+#define PCIE_PHY_CTRL_DATA_LOC 0
+#define PCIE_PHY_CTRL_CAP_ADR_LOC 16
+#define PCIE_PHY_CTRL_CAP_DAT_LOC 17
+#define PCIE_PHY_CTRL_WR_LOC 18
+#define PCIE_PHY_CTRL_RD_LOC 19
+
+#define PCIE_PHY_STAT (PL_OFFSET + 0x110)
+#define PCIE_PHY_STAT_ACK_LOC 16
+
+/* PHY registers (not memory-mapped) */
+#define PCIE_PHY_RX_ASIC_OUT 0x100D
+
+#define PHY_RX_OVRD_IN_LO 0x1005
+#define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
+#define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
+
+static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
+{
+       u32 val;
+       u32 max_iterations = 10;
+       u32 wait_counter = 0;
+
+       do {
+               val = readl(dbi_base + PCIE_PHY_STAT);
+               val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
+               wait_counter++;
+
+               if (val == exp_val)
+                       return 0;
+
+               udelay(1);
+       } while (wait_counter < max_iterations);
+
+       return -ETIMEDOUT;
+}
+
+static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
+{
+       u32 val;
+       int ret;
+
+       val = addr << PCIE_PHY_CTRL_DATA_LOC;
+       writel(val, dbi_base + PCIE_PHY_CTRL);
+
+       val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
+       writel(val, dbi_base + PCIE_PHY_CTRL);
+
+       ret = pcie_phy_poll_ack(dbi_base, 1);
+       if (ret)
+               return ret;
+
+       val = addr << PCIE_PHY_CTRL_DATA_LOC;
+       writel(val, dbi_base + PCIE_PHY_CTRL);
+
+       ret = pcie_phy_poll_ack(dbi_base, 0);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+/* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
+static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data)
+{
+       u32 val, phy_ctl;
+       int ret;
+
+       ret = pcie_phy_wait_ack(dbi_base, addr);
+       if (ret)
+               return ret;
+
+       /* assert Read signal */
+       phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
+       writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
+
+       ret = pcie_phy_poll_ack(dbi_base, 1);
+       if (ret)
+               return ret;
+
+       val = readl(dbi_base + PCIE_PHY_STAT);
+       *data = val & 0xffff;
+
+       /* deassert Read signal */
+       writel(0x00, dbi_base + PCIE_PHY_CTRL);
+
+       ret = pcie_phy_poll_ack(dbi_base, 0);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
+{
+       u32 var;
+       int ret;
+
+       /* write addr */
+       /* cap addr */
+       ret = pcie_phy_wait_ack(dbi_base, addr);
+       if (ret)
+               return ret;
+
+       var = data << PCIE_PHY_CTRL_DATA_LOC;
+       writel(var, dbi_base + PCIE_PHY_CTRL);
+
+       /* capture data */
+       var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
+       writel(var, dbi_base + PCIE_PHY_CTRL);
+
+       ret = pcie_phy_poll_ack(dbi_base, 1);
+       if (ret)
+               return ret;
+
+       /* deassert cap data */
+       var = data << PCIE_PHY_CTRL_DATA_LOC;
+       writel(var, dbi_base + PCIE_PHY_CTRL);
+
+       /* wait for ack de-assertion */
+       ret = pcie_phy_poll_ack(dbi_base, 0);
+       if (ret)
+               return ret;
+
+       /* assert wr signal */
+       var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
+       writel(var, dbi_base + PCIE_PHY_CTRL);
+
+       /* wait for ack */
+       ret = pcie_phy_poll_ack(dbi_base, 1);
+       if (ret)
+               return ret;
+
+       /* deassert wr signal */
+       var = data << PCIE_PHY_CTRL_DATA_LOC;
+       writel(var, dbi_base + PCIE_PHY_CTRL);
+
+       /* wait for ack de-assertion */
+       ret = pcie_phy_poll_ack(dbi_base, 0);
+       if (ret)
+               return ret;
+
+       writel(0x0, dbi_base + PCIE_PHY_CTRL);
+
+       return 0;
+}
+
+/*  Added for PCI abort handling */
+static int imx6q_pcie_abort_handler(unsigned long addr,
+               unsigned int fsr, struct pt_regs *regs)
+{
+       /*
+        * If it was an imprecise abort, then we need to correct the
+        * return address to be _after_ the instruction.
+        */
+       if (fsr & (1 << 10))
+               regs->ARM_pc += 4;
+       return 0;
+}
+
+static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
+{
+       struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+                       IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+                       IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+                       IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
+
+       gpio_set_value(imx6_pcie->reset_gpio, 0);
+       msleep(100);
+       gpio_set_value(imx6_pcie->reset_gpio, 1);
+
+       return 0;
+}
+
+static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
+{
+       struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+       int ret;
+
+       if (gpio_is_valid(imx6_pcie->power_on_gpio))
+               gpio_set_value(imx6_pcie->power_on_gpio, 1);
+
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+                       IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+                       IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
+
+       ret = clk_prepare_enable(imx6_pcie->sata_ref_100m);
+       if (ret) {
+               dev_err(pp->dev, "unable to enable sata_ref_100m\n");
+               goto err_sata_ref;
+       }
+
+       ret = clk_prepare_enable(imx6_pcie->pcie_ref_125m);
+       if (ret) {
+               dev_err(pp->dev, "unable to enable pcie_ref_125m\n");
+               goto err_pcie_ref;
+       }
+
+       ret = clk_prepare_enable(imx6_pcie->lvds_gate);
+       if (ret) {
+               dev_err(pp->dev, "unable to enable lvds_gate\n");
+               goto err_lvds_gate;
+       }
+
+       ret = clk_prepare_enable(imx6_pcie->pcie_axi);
+       if (ret) {
+               dev_err(pp->dev, "unable to enable pcie_axi\n");
+               goto err_pcie_axi;
+       }
+
+       /* allow the clocks to stabilize */
+       usleep_range(200, 500);
+
+       return 0;
+
+err_pcie_axi:
+       clk_disable_unprepare(imx6_pcie->lvds_gate);
+err_lvds_gate:
+       clk_disable_unprepare(imx6_pcie->pcie_ref_125m);
+err_pcie_ref:
+       clk_disable_unprepare(imx6_pcie->sata_ref_100m);
+err_sata_ref:
+       return ret;
+
+}
+
+static void imx6_pcie_init_phy(struct pcie_port *pp)
+{
+       struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+                       IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+
+       /* configure constant input signal to the pcie ctrl and phy */
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+                       IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+                       IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
+
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+                       IMX6Q_GPR8_TX_DEEMPH_GEN1, 0 << 0);
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+                       IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, 0 << 6);
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+                       IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, 20 << 12);
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+                       IMX6Q_GPR8_TX_SWING_FULL, 127 << 18);
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
+                       IMX6Q_GPR8_TX_SWING_LOW, 127 << 25);
+}
+
+static void imx6_pcie_host_init(struct pcie_port *pp)
+{
+       int count = 0;
+       struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+
+       imx6_pcie_assert_core_reset(pp);
+
+       imx6_pcie_init_phy(pp);
+
+       imx6_pcie_deassert_core_reset(pp);
+
+       dw_pcie_setup_rc(pp);
+
+       regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+                       IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+
+       while (!dw_pcie_link_up(pp)) {
+               usleep_range(100, 1000);
+               count++;
+               if (count >= 10) {
+                       dev_err(pp->dev, "phy link never came up\n");
+                       dev_dbg(pp->dev,
+                               "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
+                               readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
+                               readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+                       break;
+               }
+       }
+
+       return;
+}
+
+static int imx6_pcie_link_up(struct pcie_port *pp)
+{
+       u32 rc, ltssm, rx_valid, temp;
+
+       /* link is debug bit 36, debug register 1 starts at bit 32 */
+       rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) & (0x1 << (36 - 32));
+       if (rc)
+               return -EAGAIN;
+
+       /*
+        * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
+        * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
+        * If (MAC/LTSSM.state == Recovery.RcvrLock)
+        * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
+        * to gen2 is stuck
+        */
+       pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
+       ltssm = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0) & 0x3F;
+
+       if (rx_valid & 0x01)
+               return 0;
+
+       if (ltssm != 0x0d)
+               return 0;
+
+       dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
+
+       pcie_phy_read(pp->dbi_base,
+               PHY_RX_OVRD_IN_LO, &temp);
+       temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN
+               | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+       pcie_phy_write(pp->dbi_base,
+               PHY_RX_OVRD_IN_LO, temp);
+
+       usleep_range(2000, 3000);
+
+       pcie_phy_read(pp->dbi_base,
+               PHY_RX_OVRD_IN_LO, &temp);
+       temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN
+               | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+       pcie_phy_write(pp->dbi_base,
+               PHY_RX_OVRD_IN_LO, temp);
+
+       return 0;
+}
+
+static struct pcie_host_ops imx6_pcie_host_ops = {
+       .link_up = imx6_pcie_link_up,
+       .host_init = imx6_pcie_host_init,
+};
+
+static int imx6_add_pcie_port(struct pcie_port *pp,
+                       struct platform_device *pdev)
+{
+       int ret;
+
+       pp->irq = platform_get_irq(pdev, 0);
+       if (!pp->irq) {
+               dev_err(&pdev->dev, "failed to get irq\n");
+               return -ENODEV;
+       }
+
+       pp->root_bus_nr = -1;
+       pp->ops = &imx6_pcie_host_ops;
+
+       spin_lock_init(&pp->conf_lock);
+       ret = dw_pcie_host_init(pp);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to initialize host\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int __init imx6_pcie_probe(struct platform_device *pdev)
+{
+       struct imx6_pcie *imx6_pcie;
+       struct pcie_port *pp;
+       struct device_node *np = pdev->dev.of_node;
+       struct resource *dbi_base;
+       int ret;
+
+       imx6_pcie = devm_kzalloc(&pdev->dev, sizeof(*imx6_pcie), GFP_KERNEL);
+       if (!imx6_pcie)
+               return -ENOMEM;
+
+       pp = &imx6_pcie->pp;
+       pp->dev = &pdev->dev;
+
+       /* Added for PCI abort handling */
+       hook_fault_code(16 + 6, imx6q_pcie_abort_handler, SIGBUS, 0,
+               "imprecise external abort");
+
+       dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!dbi_base) {
+               dev_err(&pdev->dev, "dbi_base memory resource not found\n");
+               return -ENODEV;
+       }
+
+       pp->dbi_base = devm_ioremap_resource(&pdev->dev, dbi_base);
+       if (IS_ERR(pp->dbi_base)) {
+               dev_err(&pdev->dev, "unable to remap dbi_base\n");
+               ret = PTR_ERR(pp->dbi_base);
+               goto err;
+       }
+
+       /* Fetch GPIOs */
+       imx6_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+       if (!gpio_is_valid(imx6_pcie->reset_gpio)) {
+               dev_err(&pdev->dev, "no reset-gpio defined\n");
+               ret = -ENODEV;
+       }
+       ret = devm_gpio_request_one(&pdev->dev,
+                               imx6_pcie->reset_gpio,
+                               GPIOF_OUT_INIT_LOW,
+                               "PCIe reset");
+       if (ret) {
+               dev_err(&pdev->dev, "unable to get reset gpio\n");
+               goto err;
+       }
+
+       imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0);
+       if (gpio_is_valid(imx6_pcie->power_on_gpio)) {
+               ret = devm_gpio_request_one(&pdev->dev,
+                                       imx6_pcie->power_on_gpio,
+                                       GPIOF_OUT_INIT_LOW,
+                                       "PCIe power enable");
+               if (ret) {
+                       dev_err(&pdev->dev, "unable to get power-on gpio\n");
+                       goto err;
+               }
+       }
+
+       imx6_pcie->wake_up_gpio = of_get_named_gpio(np, "wake-up-gpio", 0);
+       if (gpio_is_valid(imx6_pcie->wake_up_gpio)) {
+               ret = devm_gpio_request_one(&pdev->dev,
+                                       imx6_pcie->wake_up_gpio,
+                                       GPIOF_IN,
+                                       "PCIe wake up");
+               if (ret) {
+                       dev_err(&pdev->dev, "unable to get wake-up gpio\n");
+                       goto err;
+               }
+       }
+
+       imx6_pcie->disable_gpio = of_get_named_gpio(np, "disable-gpio", 0);
+       if (gpio_is_valid(imx6_pcie->disable_gpio)) {
+               ret = devm_gpio_request_one(&pdev->dev,
+                                       imx6_pcie->disable_gpio,
+                                       GPIOF_OUT_INIT_HIGH,
+                                       "PCIe disable endpoint");
+               if (ret) {
+                       dev_err(&pdev->dev, "unable to get disable-ep gpio\n");
+                       goto err;
+               }
+       }
+
+       /* Fetch clocks */
+       imx6_pcie->lvds_gate = devm_clk_get(&pdev->dev, "lvds_gate");
+       if (IS_ERR(imx6_pcie->lvds_gate)) {
+               dev_err(&pdev->dev,
+                       "lvds_gate clock select missing or invalid\n");
+               ret = PTR_ERR(imx6_pcie->lvds_gate);
+               goto err;
+       }
+
+       imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m");
+       if (IS_ERR(imx6_pcie->sata_ref_100m)) {
+               dev_err(&pdev->dev,
+                       "sata_ref_100m clock source missing or invalid\n");
+               ret = PTR_ERR(imx6_pcie->sata_ref_100m);
+               goto err;
+       }
+
+       imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m");
+       if (IS_ERR(imx6_pcie->pcie_ref_125m)) {
+               dev_err(&pdev->dev,
+                       "pcie_ref_125m clock source missing or invalid\n");
+               ret = PTR_ERR(imx6_pcie->pcie_ref_125m);
+               goto err;
+       }
+
+       imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi");
+       if (IS_ERR(imx6_pcie->pcie_axi)) {
+               dev_err(&pdev->dev,
+                       "pcie_axi clock source missing or invalid\n");
+               ret = PTR_ERR(imx6_pcie->pcie_axi);
+               goto err;
+       }
+
+       /* Grab GPR config register range */
+       imx6_pcie->iomuxc_gpr =
+                syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+       if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
+               dev_err(&pdev->dev, "unable to find iomuxc registers\n");
+               ret = PTR_ERR(imx6_pcie->iomuxc_gpr);
+               goto err;
+       }
+
+       ret = imx6_add_pcie_port(pp, pdev);
+       if (ret < 0)
+               goto err;
+
+       platform_set_drvdata(pdev, imx6_pcie);
+       return 0;
+
+err:
+       return ret;
+}
+
+static const struct of_device_id imx6_pcie_of_match[] = {
+       { .compatible = "fsl,imx6q-pcie", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, imx6_pcie_of_match);
+
+static struct platform_driver imx6_pcie_driver = {
+       .driver = {
+               .name   = "imx6q-pcie",
+               .owner  = THIS_MODULE,
+               .of_match_table = of_match_ptr(imx6_pcie_of_match),
+       },
+};
+
+/* Freescale PCIe driver does not allow module unload */
+
+static int __init imx6_pcie_init(void)
+{
+       return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
+}
+module_init(imx6_pcie_init);
+
+MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
+MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
index 2e9888a0635aea41ac5e310ae39e35b564d4874f..7c4f38dd42ba6a5ef868e24e7b033998284fb4ec 100644 (file)
@@ -408,7 +408,7 @@ static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
 
        list_for_each_entry(bus, &pcie->busses, list)
                if (bus->nr == busnr)
-                       return bus->area->addr;
+                       return (void __iomem *)bus->area->addr;
 
        bus = tegra_pcie_bus_alloc(pcie, busnr);
        if (IS_ERR(bus))
@@ -416,7 +416,7 @@ static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
 
        list_add_tail(&bus->list, &pcie->busses);
 
-       return bus->area->addr;
+       return (void __iomem *)bus->area->addr;
 }
 
 static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
index c10e9ac9bbbc81849d8e4fba538ba3b58a96aad5..896301788e9d0253bb22828f8725bb4a40d7510a 100644 (file)
  * published by the Free Software Foundation.
  */
 
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/msi.h>
 #include <linux/of_address.h>
 #include <linux/pci.h>
 #include <linux/pci_regs.h>
@@ -142,6 +145,204 @@ int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
        return ret;
 }
 
+static struct irq_chip dw_msi_irq_chip = {
+       .name = "PCI-MSI",
+       .irq_enable = unmask_msi_irq,
+       .irq_disable = mask_msi_irq,
+       .irq_mask = mask_msi_irq,
+       .irq_unmask = unmask_msi_irq,
+};
+
+/* MSI int handler */
+void dw_handle_msi_irq(struct pcie_port *pp)
+{
+       unsigned long val;
+       int i, pos;
+
+       for (i = 0; i < MAX_MSI_CTRLS; i++) {
+               dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4,
+                               (u32 *)&val);
+               if (val) {
+                       pos = 0;
+                       while ((pos = find_next_bit(&val, 32, pos)) != 32) {
+                               generic_handle_irq(pp->msi_irq_start
+                                       + (i * 32) + pos);
+                               pos++;
+                       }
+               }
+               dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, val);
+       }
+}
+
+void dw_pcie_msi_init(struct pcie_port *pp)
+{
+       pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
+
+       /* program the msi_data */
+       dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
+                       virt_to_phys((void *)pp->msi_data));
+       dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0);
+}
+
+static int find_valid_pos0(struct pcie_port *pp, int msgvec, int pos, int *pos0)
+{
+       int flag = 1;
+
+       do {
+               pos = find_next_zero_bit(pp->msi_irq_in_use,
+                               MAX_MSI_IRQS, pos);
+               /*if you have reached to the end then get out from here.*/
+               if (pos == MAX_MSI_IRQS)
+                       return -ENOSPC;
+               /*
+                * Check if this position is at correct offset.nvec is always a
+                * power of two. pos0 must be nvec bit alligned.
+                */
+               if (pos % msgvec)
+                       pos += msgvec - (pos % msgvec);
+               else
+                       flag = 0;
+       } while (flag);
+
+       *pos0 = pos;
+       return 0;
+}
+
+static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos)
+{
+       int res, bit, irq, pos0, pos1, i;
+       u32 val;
+       struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
+
+       if (!pp) {
+               BUG();
+               return -EINVAL;
+       }
+
+       pos0 = find_first_zero_bit(pp->msi_irq_in_use,
+                       MAX_MSI_IRQS);
+       if (pos0 % no_irqs) {
+               if (find_valid_pos0(pp, no_irqs, pos0, &pos0))
+                       goto no_valid_irq;
+       }
+       if (no_irqs > 1) {
+               pos1 = find_next_bit(pp->msi_irq_in_use,
+                               MAX_MSI_IRQS, pos0);
+               /* there must be nvec number of consecutive free bits */
+               while ((pos1 - pos0) < no_irqs) {
+                       if (find_valid_pos0(pp, no_irqs, pos1, &pos0))
+                               goto no_valid_irq;
+                       pos1 = find_next_bit(pp->msi_irq_in_use,
+                                       MAX_MSI_IRQS, pos0);
+               }
+       }
+
+       irq = (pp->msi_irq_start + pos0);
+
+       if ((irq + no_irqs) > (pp->msi_irq_start + MAX_MSI_IRQS-1))
+               goto no_valid_irq;
+
+       i = 0;
+       while (i < no_irqs) {
+               set_bit(pos0 + i, pp->msi_irq_in_use);
+               irq_alloc_descs((irq + i), (irq + i), 1, 0);
+               irq_set_msi_desc(irq + i, desc);
+               /*Enable corresponding interrupt in MSI interrupt controller */
+               res = ((pos0 + i) / 32) * 12;
+               bit = (pos0 + i) % 32;
+               dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+               val |= 1 << bit;
+               dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+               i++;
+       }
+
+       *pos = pos0;
+       return irq;
+
+no_valid_irq:
+       *pos = pos0;
+       return -ENOSPC;
+}
+
+static void clear_irq(unsigned int irq)
+{
+       int res, bit, val, pos;
+       struct irq_desc *desc;
+       struct msi_desc *msi;
+       struct pcie_port *pp;
+
+       /* get the port structure */
+       desc = irq_to_desc(irq);
+       msi = irq_desc_get_msi_desc(desc);
+       pp = sys_to_pcie(msi->dev->bus->sysdata);
+       if (!pp) {
+               BUG();
+               return;
+       }
+
+       pos = irq - pp->msi_irq_start;
+
+       irq_free_desc(irq);
+
+       clear_bit(pos, pp->msi_irq_in_use);
+
+       /* Disable corresponding interrupt on MSI interrupt controller */
+       res = (pos / 32) * 12;
+       bit = pos % 32;
+       dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val);
+       val &= ~(1 << bit);
+       dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val);
+}
+
+static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
+                       struct msi_desc *desc)
+{
+       int irq, pos, msgvec;
+       u16 msg_ctr;
+       struct msi_msg msg;
+       struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
+
+       if (!pp) {
+               BUG();
+               return -EINVAL;
+       }
+
+       pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS,
+                               &msg_ctr);
+       msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4;
+       if (msgvec == 0)
+               msgvec = (msg_ctr & PCI_MSI_FLAGS_QMASK) >> 1;
+       if (msgvec > 5)
+               msgvec = 0;
+
+       irq = assign_irq((1 << msgvec), desc, &pos);
+       if (irq < 0)
+               return irq;
+
+       msg_ctr &= ~PCI_MSI_FLAGS_QSIZE;
+       msg_ctr |= msgvec << 4;
+       pci_write_config_word(pdev, desc->msi_attrib.pos + PCI_MSI_FLAGS,
+                               msg_ctr);
+       desc->msi_attrib.multiple = msgvec;
+
+       msg.address_lo = virt_to_phys((void *)pp->msi_data);
+       msg.address_hi = 0x0;
+       msg.data = pos;
+       write_msi_msg(irq, &msg);
+
+       return 0;
+}
+
+static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
+{
+       clear_irq(irq);
+}
+
+static struct msi_chip dw_pcie_msi_chip = {
+       .setup_irq = dw_msi_setup_irq,
+       .teardown_irq = dw_msi_teardown_irq,
+};
+
 int dw_pcie_link_up(struct pcie_port *pp)
 {
        if (pp->ops->link_up)
@@ -150,6 +351,20 @@ int dw_pcie_link_up(struct pcie_port *pp)
                return 0;
 }
 
+static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+                       irq_hw_number_t hwirq)
+{
+       irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq);
+       irq_set_chip_data(irq, domain->host_data);
+       set_irq_flags(irq, IRQF_VALID);
+
+       return 0;
+}
+
+static const struct irq_domain_ops msi_domain_ops = {
+       .map = dw_pcie_msi_map,
+};
+
 int __init dw_pcie_host_init(struct pcie_port *pp)
 {
        struct device_node *np = pp->dev->of_node;
@@ -157,6 +372,8 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
        struct of_pci_range_parser parser;
        u32 val;
 
+       struct irq_domain *irq_domain;
+
        if (of_pci_range_parser_init(&parser, np)) {
                dev_err(pp->dev, "missing ranges property\n");
                return -EINVAL;
@@ -223,6 +440,18 @@ int __init dw_pcie_host_init(struct pcie_port *pp)
                return -EINVAL;
        }
 
+       if (IS_ENABLED(CONFIG_PCI_MSI)) {
+               irq_domain = irq_domain_add_linear(pp->dev->of_node,
+                                       MAX_MSI_IRQS, &msi_domain_ops,
+                                       &dw_pcie_msi_chip);
+               if (!irq_domain) {
+                       dev_err(pp->dev, "irq domain init failed\n");
+                       return -ENXIO;
+               }
+
+               pp->msi_irq_start = irq_find_mapping(irq_domain, 0);
+       }
+
        if (pp->ops->host_init)
                pp->ops->host_init(pp);
 
@@ -485,10 +714,21 @@ int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
        return pp->irq;
 }
 
+static void dw_pcie_add_bus(struct pci_bus *bus)
+{
+       if (IS_ENABLED(CONFIG_PCI_MSI)) {
+               struct pcie_port *pp = sys_to_pcie(bus->sysdata);
+
+               dw_pcie_msi_chip.dev = pp->dev;
+               bus->msi = &dw_pcie_msi_chip;
+       }
+}
+
 static struct hw_pci dw_pci = {
        .setup          = dw_pcie_setup,
        .scan           = dw_pcie_scan_bus,
        .map_irq        = dw_pcie_map_irq,
+       .add_bus        = dw_pcie_add_bus,
 };
 
 void dw_pcie_setup_rc(struct pcie_port *pp)
index 133820f1da971ecc19997b5e6e245f3cc992aaa4..faccbbf31907c11b01971f3105ff7ea29c9339e7 100644 (file)
@@ -20,6 +20,14 @@ struct pcie_port_info {
        phys_addr_t     mem_bus_addr;
 };
 
+/*
+ * Maximum number of MSI IRQs can be 256 per controller. But keep
+ * it 32 as of now. Probably we will never need more than 32. If needed,
+ * then increment it in multiple of 32.
+ */
+#define MAX_MSI_IRQS                   32
+#define MAX_MSI_CTRLS                  (MAX_MSI_IRQS / 32)
+
 struct pcie_port {
        struct device           *dev;
        u8                      root_bus_nr;
@@ -38,6 +46,10 @@ struct pcie_port {
        int                     irq;
        u32                     lanes;
        struct pcie_host_ops    *ops;
+       int                     msi_irq;
+       int                     msi_irq_start;
+       unsigned long           msi_data;
+       DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
 };
 
 struct pcie_host_ops {
@@ -57,6 +69,8 @@ int cfg_read(void __iomem *addr, int where, int size, u32 *val);
 int cfg_write(void __iomem *addr, int where, int size, u32 val);
 int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, u32 val);
 int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, u32 *val);
+void dw_handle_msi_irq(struct pcie_port *pp);
+void dw_pcie_msi_init(struct pcie_port *pp);
 int dw_pcie_link_up(struct pcie_port *pp);
 void dw_pcie_setup_rc(struct pcie_port *pp);
 int dw_pcie_host_init(struct pcie_port *pp);
index 2a47e82821dacff72dd227950a6c3de828755053..1ce8ee054f1aa89ba83fb1001761e9200262a666 100644 (file)
@@ -338,7 +338,7 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
        acpi_handle chandle, handle;
        struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
 
-       flags &= OSC_SHPC_NATIVE_HP_CONTROL;
+       flags &= OSC_PCI_SHPC_NATIVE_HP_CONTROL;
        if (!flags) {
                err("Invalid flags %u specified!\n", flags);
                return -EINVAL;
@@ -411,13 +411,10 @@ EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware);
 static int pcihp_is_ejectable(acpi_handle handle)
 {
        acpi_status status;
-       acpi_handle tmp;
        unsigned long long removable;
-       status = acpi_get_handle(handle, "_ADR", &tmp);
-       if (ACPI_FAILURE(status))
+       if (!acpi_has_method(handle, "_ADR"))
                return 0;
-       status = acpi_get_handle(handle, "_EJ0", &tmp);
-       if (ACPI_SUCCESS(status))
+       if (acpi_has_method(handle, "_EJ0"))
                return 1;
        status = acpi_evaluate_integer(handle, "_RMV", NULL, &removable);
        if (ACPI_SUCCESS(status) && removable)
index f4e0289246672c0a9157927303945f0934e26d31..26100f510b1087f45bbe39b79dcf9649acf38e9d 100644 (file)
 #include <linux/mutex.h>
 #include <linux/pci_hotplug.h>
 
-#define dbg(format, arg...)                                    \
-       do {                                                    \
-               if (acpiphp_debug)                              \
-                       printk(KERN_DEBUG "%s: " format,        \
-                               MY_NAME , ## arg);              \
-       } while (0)
-#define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME , ## arg)
-#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
-#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
-
 struct acpiphp_context;
 struct acpiphp_bridge;
 struct acpiphp_slot;
index bf2203ef1308bfa13f3e8b2744db6bd0b2adbcb8..8650d39db3922c74d36a01029068ca7b03c75935 100644 (file)
@@ -31,6 +31,8 @@
  *
  */
 
+#define pr_fmt(fmt) "acpiphp: " fmt
+
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/smp.h>
 #include "acpiphp.h"
 
-#define MY_NAME        "acpiphp"
-
 /* name size which is used for entries in pcihpfs */
 #define SLOT_NAME_SIZE  21              /* {_SUN} */
 
-bool acpiphp_debug;
 bool acpiphp_disabled;
 
 /* local variables */
@@ -61,9 +60,7 @@ static struct acpiphp_attention_info *attention_info;
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
-MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
 MODULE_PARM_DESC(disable, "disable acpiphp driver");
-module_param_named(debug, acpiphp_debug, bool, 0644);
 module_param_named(disable, acpiphp_disabled, bool, 0444);
 
 /* export the attention callback registration methods */
@@ -139,7 +136,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
 {
        struct slot *slot = hotplug_slot->private;
 
-       dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+       pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
        /* enable the specified slot */
        return acpiphp_enable_slot(slot->acpi_slot);
@@ -156,7 +153,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
 {
        struct slot *slot = hotplug_slot->private;
 
-       dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+       pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
        /* disable the specified slot */
        return acpiphp_disable_and_eject_slot(slot->acpi_slot);
@@ -176,8 +173,9 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
  {
        int retval = -ENODEV;
 
-       dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
+       pr_debug("%s - physical_slot = %s\n", __func__,
+               hotplug_slot_name(hotplug_slot));
+
        if (attention_info && try_module_get(attention_info->owner)) {
                retval = attention_info->set_attn(hotplug_slot, status);
                module_put(attention_info->owner);
@@ -199,7 +197,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
 {
        struct slot *slot = hotplug_slot->private;
 
-       dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+       pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
        *value = acpiphp_get_power_status(slot->acpi_slot);
 
@@ -221,7 +219,8 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
 {
        int retval = -EINVAL;
 
-       dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
+       pr_debug("%s - physical_slot = %s\n", __func__,
+               hotplug_slot_name(hotplug_slot));
 
        if (attention_info && try_module_get(attention_info->owner)) {
                retval = attention_info->get_attn(hotplug_slot, value);
@@ -244,7 +243,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
 {
        struct slot *slot = hotplug_slot->private;
 
-       dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+       pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
        *value = acpiphp_get_latch_status(slot->acpi_slot);
 
@@ -264,7 +263,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
 {
        struct slot *slot = hotplug_slot->private;
 
-       dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+       pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
        *value = acpiphp_get_adapter_status(slot->acpi_slot);
 
@@ -279,7 +278,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
 {
        struct slot *slot = hotplug_slot->private;
 
-       dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
+       pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
 
        kfree(slot->hotplug_slot);
        kfree(slot);
@@ -322,11 +321,11 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot,
        if (retval == -EBUSY)
                goto error_hpslot;
        if (retval) {
-               err("pci_hp_register failed with error %d\n", retval);
+               pr_err("pci_hp_register failed with error %d\n", retval);
                goto error_hpslot;
        }
 
-       info("Slot [%s] registered\n", slot_name(slot));
+       pr_info("Slot [%s] registered\n", slot_name(slot));
 
        return 0;
 error_hpslot:
@@ -343,17 +342,17 @@ void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
        struct slot *slot = acpiphp_slot->slot;
        int retval = 0;
 
-       info("Slot [%s] unregistered\n", slot_name(slot));
+       pr_info("Slot [%s] unregistered\n", slot_name(slot));
 
        retval = pci_hp_deregister(slot->hotplug_slot);
        if (retval)
-               err("pci_hp_deregister failed with error %d\n", retval);
+               pr_err("pci_hp_deregister failed with error %d\n", retval);
 }
 
 
 void __init acpiphp_init(void)
 {
-       info(DRIVER_DESC " version: " DRIVER_VERSION "%s\n",
+       pr_info(DRIVER_DESC " version: " DRIVER_VERSION "%s\n",
                acpiphp_disabled ? ", disabled by user; please report a bug"
                                 : "");
 }
index 0b7d23b4ad954489b657480d0ee08a3b7b2d81d8..9d066b86c72421aa82683a3a7ba31c05d7021869 100644 (file)
@@ -39,6 +39,8 @@
  *    bus. It loses the refcount when the the driver unloads.
  */
 
+#define pr_fmt(fmt) "acpiphp_glue: " fmt
+
 #include <linux/init.h>
 #include <linux/module.h>
 
@@ -58,8 +60,6 @@ static LIST_HEAD(bridge_list);
 static DEFINE_MUTEX(bridge_mutex);
 static DEFINE_MUTEX(acpiphp_context_lock);
 
-#define MY_NAME "acpiphp_glue"
-
 static void handle_hotplug_event(acpi_handle handle, u32 type, void *data);
 static void acpiphp_sanitize_bus(struct pci_bus *bus);
 static void acpiphp_set_hpp_values(struct pci_bus *bus);
@@ -335,7 +335,7 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
                if (ACPI_FAILURE(status))
                        sun = bridge->nr_slots;
 
-               dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
+               pr_debug("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
                    sun, pci_domain_nr(pbus), pbus->number, device);
 
                retval = acpiphp_register_hotplug_slot(slot, sun);
@@ -343,10 +343,10 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
                        slot->slot = NULL;
                        bridge->nr_slots--;
                        if (retval == -EBUSY)
-                               warn("Slot %llu already registered by another "
+                               pr_warn("Slot %llu already registered by another "
                                        "hotplug driver\n", sun);
                        else
-                               warn("acpiphp_register_hotplug_slot failed "
+                               pr_warn("acpiphp_register_hotplug_slot failed "
                                        "(err code = 0x%x)\n", retval);
                }
                /* Even if the slot registration fails, we can still use it. */
@@ -369,7 +369,7 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
                if (register_hotplug_dock_device(handle,
                        &acpiphp_dock_ops, context,
                        acpiphp_dock_init, acpiphp_dock_release))
-                       dbg("failed to register dock device\n");
+                       pr_debug("failed to register dock device\n");
        }
 
        /* install notify handler */
@@ -427,7 +427,7 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
                                                        ACPI_SYSTEM_NOTIFY,
                                                        handle_hotplug_event);
                                if (ACPI_FAILURE(status))
-                                       err("failed to remove notify handler\n");
+                                       pr_err("failed to remove notify handler\n");
                        }
                }
                if (slot->slot)
@@ -830,8 +830,9 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
        switch (type) {
        case ACPI_NOTIFY_BUS_CHECK:
                /* bus re-enumerate */
-               dbg("%s: Bus check notify on %s\n", __func__, objname);
-               dbg("%s: re-enumerating slots under %s\n", __func__, objname);
+               pr_debug("%s: Bus check notify on %s\n", __func__, objname);
+               pr_debug("%s: re-enumerating slots under %s\n",
+                        __func__, objname);
                if (bridge) {
                        acpiphp_check_bridge(bridge);
                } else {
@@ -845,7 +846,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
 
        case ACPI_NOTIFY_DEVICE_CHECK:
                /* device check */
-               dbg("%s: Device check notify on %s\n", __func__, objname);
+               pr_debug("%s: Device check notify on %s\n", __func__, objname);
                if (bridge) {
                        acpiphp_check_bridge(bridge);
                } else {
@@ -866,7 +867,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
 
        case ACPI_NOTIFY_EJECT_REQUEST:
                /* request device eject */
-               dbg("%s: Device eject notify on %s\n", __func__, objname);
+               pr_debug("%s: Device eject notify on %s\n", __func__, objname);
                acpiphp_disable_and_eject_slot(func->slot);
                break;
        }
@@ -994,14 +995,16 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
 
                /*
                 * This bridge should have been registered as a hotplug function
-                * under its parent, so the context has to be there.  If not, we
-                * are in deep goo.
+                * under its parent, so the context should be there, unless the
+                * parent is going to be handled by pciehp, in which case this
+                * bridge is not interesting to us either.
                 */
                mutex_lock(&acpiphp_context_lock);
                context = acpiphp_get_context(handle);
-               if (WARN_ON(!context)) {
+               if (!context) {
                        mutex_unlock(&acpiphp_context_lock);
                        put_device(&bus->dev);
+                       pci_dev_put(bridge->pci_dev);
                        kfree(bridge);
                        return;
                }
index 2f5786c8522c2f170d8f57bc08c592e0ef78048a..0d64c414bf7876a74baf7acc4ebf6664c8bc7542 100644 (file)
@@ -25,6 +25,8 @@
  *
  */
 
+#define pr_fmt(fmt) "acpiphp_ibm: " fmt
+
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #define DRIVER_AUTHOR  "Irene Zubarev <zubarev@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>"
 #define DRIVER_DESC    "ACPI Hot Plug PCI Controller Driver IBM extension"
 
-static bool debug;
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRIVER_VERSION);
-module_param(debug, bool, 0644);
-MODULE_PARM_DESC(debug, " Debugging mode enabled or not");
-#define MY_NAME "acpiphp_ibm"
-
-#undef dbg
-#define dbg(format, arg...)                            \
-do {                                                   \
-       if (debug)                                      \
-               printk(KERN_DEBUG "%s: " format,        \
-                               MY_NAME , ## arg);      \
-} while (0)
 
 #define FOUND_APCI 0x61504349
 /* these are the names for the IBM ACPI pseudo-device */
@@ -189,7 +179,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
 
        ibm_slot = ibm_slot_from_id(hpslot_to_sun(slot));
 
-       dbg("%s: set slot %d (%d) attention status to %d\n", __func__,
+       pr_debug("%s: set slot %d (%d) attention status to %d\n", __func__,
                        ibm_slot->slot.slot_num, ibm_slot->slot.slot_id,
                        (status ? 1 : 0));
 
@@ -202,10 +192,10 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status)
 
        stat = acpi_evaluate_integer(ibm_acpi_handle, "APLS", &params, &rc);
        if (ACPI_FAILURE(stat)) {
-               err("APLS evaluation failed:  0x%08x\n", stat);
+               pr_err("APLS evaluation failed:  0x%08x\n", stat);
                return -ENODEV;
        } else if (!rc) {
-               err("APLS method failed:  0x%08llx\n", rc);
+               pr_err("APLS method failed:  0x%08llx\n", rc);
                return -ERANGE;
        }
        return 0;
@@ -234,7 +224,7 @@ static int ibm_get_attention_status(struct hotplug_slot *slot, u8 *status)
        else
                *status = 0;
 
-       dbg("%s: get slot %d (%d) attention status is %d\n", __func__,
+       pr_debug("%s: get slot %d (%d) attention status is %d\n", __func__,
                        ibm_slot->slot.slot_num, ibm_slot->slot.slot_id,
                        *status);
 
@@ -266,10 +256,10 @@ static void ibm_handle_events(acpi_handle handle, u32 event, void *context)
        u8 subevent = event & 0xf0;
        struct notification *note = context;
 
-       dbg("%s: Received notification %02x\n", __func__, event);
+       pr_debug("%s: Received notification %02x\n", __func__, event);
 
        if (subevent == 0x80) {
-               dbg("%s: generationg bus event\n", __func__);
+               pr_debug("%s: generationg bus event\n", __func__);
                acpi_bus_generate_netlink_event(note->device->pnp.device_class,
                                                  dev_name(&note->device->dev),
                                                  note->event, detail);
@@ -301,7 +291,7 @@ static int ibm_get_table_from_acpi(char **bufp)
 
        status = acpi_evaluate_object(ibm_acpi_handle, "APCI", NULL, &buffer);
        if (ACPI_FAILURE(status)) {
-               err("%s:  APCI evaluation failed\n", __func__);
+               pr_err("%s:  APCI evaluation failed\n", __func__);
                return -ENODEV;
        }
 
@@ -309,13 +299,13 @@ static int ibm_get_table_from_acpi(char **bufp)
        if (!(package) ||
                        (package->type != ACPI_TYPE_PACKAGE) ||
                        !(package->package.elements)) {
-               err("%s:  Invalid APCI object\n", __func__);
+               pr_err("%s:  Invalid APCI object\n", __func__);
                goto read_table_done;
        }
 
        for(size = 0, i = 0; i < package->package.count; i++) {
                if (package->package.elements[i].type != ACPI_TYPE_BUFFER) {
-                       err("%s:  Invalid APCI element %d\n", __func__, i);
+                       pr_err("%s:  Invalid APCI element %d\n", __func__, i);
                        goto read_table_done;
                }
                size += package->package.elements[i].buffer.length;
@@ -325,7 +315,7 @@ static int ibm_get_table_from_acpi(char **bufp)
                goto read_table_done;
 
        lbuf = kzalloc(size, GFP_KERNEL);
-       dbg("%s: element count: %i, ASL table size: %i, &table = 0x%p\n",
+       pr_debug("%s: element count: %i, ASL table size: %i, &table = 0x%p\n",
                        __func__, package->package.count, size, lbuf);
 
        if (lbuf) {
@@ -370,8 +360,8 @@ static ssize_t ibm_read_apci_table(struct file *filp, struct kobject *kobj,
 {
        int bytes_read = -EINVAL;
        char *table = NULL;
-       
-       dbg("%s: pos = %d, size = %zd\n", __func__, (int)pos, size);
+
+       pr_debug("%s: pos = %d, size = %zd\n", __func__, (int)pos, size);
 
        if (pos == 0) {
                bytes_read = ibm_get_table_from_acpi(&table);
@@ -403,7 +393,7 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
 
        status = acpi_get_object_info(handle, &info);
        if (ACPI_FAILURE(status)) {
-               err("%s:  Failed to get device information status=0x%x\n",
+               pr_err("%s:  Failed to get device information status=0x%x\n",
                        __func__, status);
                return retval;
        }
@@ -411,7 +401,7 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
        if (info->current_status && (info->valid & ACPI_VALID_HID) &&
                        (!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) ||
                         !strcmp(info->hardware_id.string, IBM_HARDWARE_ID2))) {
-               dbg("found hardware: %s, handle: %p\n",
+               pr_debug("found hardware: %s, handle: %p\n",
                        info->hardware_id.string, handle);
                *phandle = handle;
                /* returning non-zero causes the search to stop
@@ -432,18 +422,18 @@ static int __init ibm_acpiphp_init(void)
        struct acpi_device *device;
        struct kobject *sysdir = &pci_slots_kset->kobj;
 
-       dbg("%s\n", __func__);
+       pr_debug("%s\n", __func__);
 
        if (acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
                        ACPI_UINT32_MAX, ibm_find_acpi_device, NULL,
                        &ibm_acpi_handle, NULL) != FOUND_APCI) {
-               err("%s: acpi_walk_namespace failed\n", __func__);
+               pr_err("%s: acpi_walk_namespace failed\n", __func__);
                retval = -ENODEV;
                goto init_return;
        }
-       dbg("%s: found IBM aPCI device\n", __func__);
+       pr_debug("%s: found IBM aPCI device\n", __func__);
        if (acpi_bus_get_device(ibm_acpi_handle, &device)) {
-               err("%s: acpi_bus_get_device failed\n", __func__);
+               pr_err("%s: acpi_bus_get_device failed\n", __func__);
                retval = -ENODEV;
                goto init_return;
        }
@@ -457,7 +447,7 @@ static int __init ibm_acpiphp_init(void)
                        ACPI_DEVICE_NOTIFY, ibm_handle_events,
                        &ibm_note);
        if (ACPI_FAILURE(status)) {
-               err("%s: Failed to register notification handler\n",
+               pr_err("%s: Failed to register notification handler\n",
                                __func__);
                retval = -EBUSY;
                goto init_cleanup;
@@ -479,17 +469,17 @@ static void __exit ibm_acpiphp_exit(void)
        acpi_status status;
        struct kobject *sysdir = &pci_slots_kset->kobj;
 
-       dbg("%s\n", __func__);
+       pr_debug("%s\n", __func__);
 
        if (acpiphp_unregister_attention(&ibm_attention_info))
-               err("%s: attention info deregistration failed", __func__);
+               pr_err("%s: attention info deregistration failed", __func__);
 
        status = acpi_remove_notify_handler(
                           ibm_acpi_handle,
                           ACPI_DEVICE_NOTIFY,
                           ibm_handle_events);
        if (ACPI_FAILURE(status))
-               err("%s: Notification handler removal failed\n", __func__);
+               pr_err("%s: Notification handler removal failed\n", __func__);
        /* remove the /sys entries */
        sysfs_remove_bin_file(sysdir, &ibm_apci_table_attr);
 }
index 66e505ca24ef418a250219789faeb8bd1de8a9be..3c7eb5dd91c636c17e7dadd374022096455d24b1 100644 (file)
@@ -133,7 +133,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
 {
        struct slot *slot = hotplug_slot->private;
 
-       pr_debug("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot));
        kfree(slot->hotplug_slot->info);
        kfree(slot->hotplug_slot);
        kfree(slot);
@@ -183,10 +182,9 @@ int zpci_init_slot(struct zpci_dev *zdev)
        snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid);
        rc = pci_hp_register(slot->hotplug_slot, zdev->bus,
                             ZPCI_DEVFN, name);
-       if (rc) {
-               pr_err("pci_hp_register failed with error %d\n", rc);
+       if (rc)
                goto error_reg;
-       }
+
        list_add(&slot->slot_list, &s390_hotplug_slot_list);
        return 0;
 
index e260f207a90e2f22b3be53388e6cc5ab1b294920..d876e4b3c6a98d8412f154a4f63814ec5b27682c 100644 (file)
@@ -191,7 +191,7 @@ static inline const char *slot_name(struct slot *slot)
 #include <linux/pci-acpi.h>
 static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev)
 {
-       u32 flags = OSC_SHPC_NATIVE_HP_CONTROL;
+       u32 flags = OSC_PCI_SHPC_NATIVE_HP_CONTROL;
        return acpi_get_hp_hw_control_from_firmware(dev, flags);
 }
 #else
index b0299e6d9a3f2d1359f78454eb67e399ea6d3eb1..dfd1f59de729c6293416d789fb6f665dc09bf701 100644 (file)
@@ -181,7 +181,6 @@ static bool acpi_pci_power_manageable(struct pci_dev *dev)
 static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 {
        acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
-       acpi_handle tmp;
        static const u8 state_conv[] = {
                [PCI_D0] = ACPI_STATE_D0,
                [PCI_D1] = ACPI_STATE_D1,
@@ -192,7 +191,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
        int error = -EINVAL;
 
        /* If the ACPI device has _EJ0, ignore the device */
-       if (!handle || ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp)))
+       if (!handle || acpi_has_method(handle, "_EJ0"))
                return -ENODEV;
 
        switch (state) {
index bdd64b1b4817f7223fa31db19a118db5a9caaaa5..2f2eedceda34c6c80e0f010fb56144e50a866e29 100644 (file)
@@ -2860,7 +2860,7 @@ void __weak pcibios_set_master(struct pci_dev *dev)
                lat = pcibios_max_latency;
        else
                return;
-       dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
+
        pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
 }
 
index 7ef0f868b3e07bf48941075e269ef3976d3efcf1..5e14f5a51357cabd86d8e420e7684d3122b30006 100644 (file)
@@ -641,8 +641,7 @@ static void pci_set_bus_speed(struct pci_bus *bus)
                return;
        }
 
-       pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
-       if (pos) {
+       if (pci_is_pcie(bridge)) {
                u32 linkcap;
                u16 linksta;
 
@@ -984,7 +983,6 @@ void set_pcie_port_type(struct pci_dev *pdev)
        pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
        if (!pos)
                return;
-       pdev->is_pcie = 1;
        pdev->pcie_cap = pos;
        pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
        pdev->pcie_flags_reg = reg16;
index f6c31fabf3af0bbe24f20b949cc0b6373b11ab97..91490453c2296878f19ff8105e9dbbec4d4878b1 100644 (file)
@@ -2954,6 +2954,29 @@ static void disable_igfx_irq(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
 
+/*
+ * PCI devices which are on Intel chips can skip the 10ms delay
+ * before entering D3 mode.
+ */
+static void quirk_remove_d3_delay(struct pci_dev *dev)
+{
+       dev->d3_delay = 0;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
+
 /*
  * Some devices may pass our check in pci_intx_mask_supported if
  * PCI_COMMAND_INTX_DISABLE works though they actually do not properly
index bc26d7990cc3744512c717540565b7c0dfa557ad..4ce83b26ae9ef2dcbdf9e29c81d55751a9525a7e 100644 (file)
@@ -982,7 +982,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
        }
 
        min_align = calculate_mem_align(aligns, max_order);
-       min_align = max(min_align, window_alignment(bus, b_res->flags & mask));
+       min_align = max(min_align, window_alignment(bus, b_res->flags));
        size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
        if (children_add_size > add_size)
                add_size = children_add_size;
@@ -1136,7 +1136,7 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus,
        }
 
        /* The root bus? */
-       if (!bus->self)
+       if (pci_is_root_bus(bus))
                return;
 
        switch (bus->self->class >> 8) {
index 96d6b2eef4f2a1209f2c9f113e3932b8eb812738..b51a7460cc49bc03b4c055e8f46bfe2fedf718ff 100644 (file)
@@ -504,6 +504,7 @@ config ASUS_WMI
        depends on BACKLIGHT_CLASS_DEVICE
        depends on RFKILL || RFKILL = n
        depends on HOTPLUG_PCI
+       depends on ACPI_VIDEO || ACPI_VIDEO = n
        select INPUT_SPARSEKMAP
        select LEDS_CLASS
        select NEW_LEDS
index a6afd4108beb0592c15604efa9f779180d5349ef..aefcc32e563479d2b22404fdd75ceb7072423d16 100644 (file)
@@ -190,16 +190,10 @@ struct eeepc_laptop {
  */
 static int write_acpi_int(acpi_handle handle, const char *method, int val)
 {
-       struct acpi_object_list params;
-       union acpi_object in_obj;
        acpi_status status;
 
-       params.count = 1;
-       params.pointer = &in_obj;
-       in_obj.type = ACPI_TYPE_INTEGER;
-       in_obj.integer.value = val;
+       status = acpi_execute_simple_method(handle, (char *)method, val);
 
-       status = acpi_evaluate_object(handle, (char *)method, &params, NULL);
        return (status == AE_OK ? 0 : -1);
 }
 
index 52b8a97efde150f52d393b4e7a6c0be9360bc9ef..9d30d69aa78f24a3bbb1caa71f797f9c520bbc5c 100644 (file)
@@ -219,8 +219,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
        { .type = ACPI_TYPE_INTEGER }
        };
        struct acpi_object_list arg_list = { 4, &params[0] };
-       struct acpi_buffer output;
-       union acpi_object out_obj;
+       unsigned long long value;
        acpi_handle handle = NULL;
 
        status = acpi_get_handle(fujitsu_hotkey->acpi_handle, "FUNC", &handle);
@@ -235,10 +234,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
        params[2].integer.value = arg1;
        params[3].integer.value = arg2;
 
-       output.length = sizeof(out_obj);
-       output.pointer = &out_obj;
-
-       status = acpi_evaluate_object(handle, NULL, &arg_list, &output);
+       status = acpi_evaluate_integer(handle, NULL, &arg_list, &value);
        if (ACPI_FAILURE(status)) {
                vdbg_printk(FUJLAPTOP_DBG_WARN,
                        "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) call failed\n",
@@ -246,18 +242,10 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
                return -ENODEV;
        }
 
-       if (out_obj.type != ACPI_TYPE_INTEGER) {
-               vdbg_printk(FUJLAPTOP_DBG_WARN,
-                       "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) did not "
-                       "return an integer\n",
-                       cmd, arg0, arg1, arg2);
-               return -ENODEV;
-       }
-
        vdbg_printk(FUJLAPTOP_DBG_TRACE,
                "FUNC 0x%x (args 0x%x, 0x%x, 0x%x) returned 0x%x\n",
-                       cmd, arg0, arg1, arg2, (int)out_obj.integer.value);
-       return out_obj.integer.value;
+                       cmd, arg0, arg1, arg2, (int)value);
+       return value;
 }
 
 #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
@@ -317,8 +305,6 @@ static enum led_brightness kblamps_get(struct led_classdev *cdev)
 static int set_lcd_level(int level)
 {
        acpi_status status = AE_OK;
-       union acpi_object arg0 = { ACPI_TYPE_INTEGER };
-       struct acpi_object_list arg_list = { 1, &arg0 };
        acpi_handle handle = NULL;
 
        vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n",
@@ -333,9 +319,8 @@ static int set_lcd_level(int level)
                return -ENODEV;
        }
 
-       arg0.integer.value = level;
 
-       status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
+       status = acpi_execute_simple_method(handle, NULL, level);
        if (ACPI_FAILURE(status))
                return -ENODEV;
 
@@ -345,8 +330,6 @@ static int set_lcd_level(int level)
 static int set_lcd_level_alt(int level)
 {
        acpi_status status = AE_OK;
-       union acpi_object arg0 = { ACPI_TYPE_INTEGER };
-       struct acpi_object_list arg_list = { 1, &arg0 };
        acpi_handle handle = NULL;
 
        vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n",
@@ -361,9 +344,7 @@ static int set_lcd_level_alt(int level)
                return -ENODEV;
        }
 
-       arg0.integer.value = level;
-
-       status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
+       status = acpi_execute_simple_method(handle, NULL, level);
        if (ACPI_FAILURE(status))
                return -ENODEV;
 
@@ -586,11 +567,10 @@ static struct platform_driver fujitsupf_driver = {
 
 static void dmi_check_cb_common(const struct dmi_system_id *id)
 {
-       acpi_handle handle;
        pr_info("Identified laptop model '%s'\n", id->ident);
        if (use_alt_lcd_levels == -1) {
-               if (ACPI_SUCCESS(acpi_get_handle(NULL,
-                               "\\_SB.PCI0.LPCB.FJEX.SBL2", &handle)))
+               if (acpi_has_method(NULL,
+                               "\\_SB.PCI0.LPCB.FJEX.SBL2"))
                        use_alt_lcd_levels = 1;
                else
                        use_alt_lcd_levels = 0;
@@ -653,7 +633,6 @@ static struct dmi_system_id fujitsu_dmi_table[] = {
 
 static int acpi_fujitsu_add(struct acpi_device *device)
 {
-       acpi_handle handle;
        int result = 0;
        int state = 0;
        struct input_dev *input;
@@ -702,8 +681,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
 
        fujitsu->dev = device;
 
-       if (ACPI_SUCCESS
-           (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) {
+       if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
                vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
                if (ACPI_FAILURE
                    (acpi_evaluate_object
@@ -803,7 +781,6 @@ static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)
 
 static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
 {
-       acpi_handle handle;
        int result = 0;
        int state = 0;
        struct input_dev *input;
@@ -866,8 +843,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
 
        fujitsu_hotkey->dev = device;
 
-       if (ACPI_SUCCESS
-           (acpi_get_handle(device->handle, METHOD_NAME__INI, &handle))) {
+       if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
                vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
                if (ACPI_FAILURE
                    (acpi_evaluate_object
index 89c4519d48ac80d2a54f42ceabf7a48b792e871c..6788acc22ab97f01b410240eef0a5b2082c98550 100644 (file)
@@ -72,8 +72,15 @@ enum {
        VPCCMD_W_BL_POWER = 0x33,
 };
 
+struct ideapad_rfk_priv {
+       int dev;
+       struct ideapad_private *priv;
+};
+
 struct ideapad_private {
+       struct acpi_device *adev;
        struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM];
+       struct ideapad_rfk_priv rfk_priv[IDEAPAD_RFKILL_DEV_NUM];
        struct platform_device *platform_device;
        struct input_dev *inputdev;
        struct backlight_device *blightdev;
@@ -81,8 +88,6 @@ struct ideapad_private {
        unsigned long cfg;
 };
 
-static acpi_handle ideapad_handle;
-static struct ideapad_private *ideapad_priv;
 static bool no_bt_rfkill;
 module_param(no_bt_rfkill, bool, 0444);
 MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
@@ -200,34 +205,38 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data)
  */
 static int debugfs_status_show(struct seq_file *s, void *data)
 {
+       struct ideapad_private *priv = s->private;
        unsigned long value;
 
-       if (!read_ec_data(ideapad_handle, VPCCMD_R_BL_MAX, &value))
+       if (!priv)
+               return -EINVAL;
+
+       if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &value))
                seq_printf(s, "Backlight max:\t%lu\n", value);
-       if (!read_ec_data(ideapad_handle, VPCCMD_R_BL, &value))
+       if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL, &value))
                seq_printf(s, "Backlight now:\t%lu\n", value);
-       if (!read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &value))
+       if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &value))
                seq_printf(s, "BL power value:\t%s\n", value ? "On" : "Off");
        seq_printf(s, "=====================\n");
 
-       if (!read_ec_data(ideapad_handle, VPCCMD_R_RF, &value))
+       if (!read_ec_data(priv->adev->handle, VPCCMD_R_RF, &value))
                seq_printf(s, "Radio status:\t%s(%lu)\n",
                           value ? "On" : "Off", value);
-       if (!read_ec_data(ideapad_handle, VPCCMD_R_WIFI, &value))
+       if (!read_ec_data(priv->adev->handle, VPCCMD_R_WIFI, &value))
                seq_printf(s, "Wifi status:\t%s(%lu)\n",
                           value ? "On" : "Off", value);
-       if (!read_ec_data(ideapad_handle, VPCCMD_R_BT, &value))
+       if (!read_ec_data(priv->adev->handle, VPCCMD_R_BT, &value))
                seq_printf(s, "BT status:\t%s(%lu)\n",
                           value ? "On" : "Off", value);
-       if (!read_ec_data(ideapad_handle, VPCCMD_R_3G, &value))
+       if (!read_ec_data(priv->adev->handle, VPCCMD_R_3G, &value))
                seq_printf(s, "3G status:\t%s(%lu)\n",
                           value ? "On" : "Off", value);
        seq_printf(s, "=====================\n");
 
-       if (!read_ec_data(ideapad_handle, VPCCMD_R_TOUCHPAD, &value))
+       if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value))
                seq_printf(s, "Touchpad status:%s(%lu)\n",
                           value ? "On" : "Off", value);
-       if (!read_ec_data(ideapad_handle, VPCCMD_R_CAMERA, &value))
+       if (!read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &value))
                seq_printf(s, "Camera status:\t%s(%lu)\n",
                           value ? "On" : "Off", value);
 
@@ -236,7 +245,7 @@ static int debugfs_status_show(struct seq_file *s, void *data)
 
 static int debugfs_status_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, debugfs_status_show, NULL);
+       return single_open(file, debugfs_status_show, inode->i_private);
 }
 
 static const struct file_operations debugfs_status_fops = {
@@ -249,21 +258,23 @@ static const struct file_operations debugfs_status_fops = {
 
 static int debugfs_cfg_show(struct seq_file *s, void *data)
 {
-       if (!ideapad_priv) {
+       struct ideapad_private *priv = s->private;
+
+       if (!priv) {
                seq_printf(s, "cfg: N/A\n");
        } else {
                seq_printf(s, "cfg: 0x%.8lX\n\nCapability: ",
-                          ideapad_priv->cfg);
-               if (test_bit(CFG_BT_BIT, &ideapad_priv->cfg))
+                          priv->cfg);
+               if (test_bit(CFG_BT_BIT, &priv->cfg))
                        seq_printf(s, "Bluetooth ");
-               if (test_bit(CFG_3G_BIT, &ideapad_priv->cfg))
+               if (test_bit(CFG_3G_BIT, &priv->cfg))
                        seq_printf(s, "3G ");
-               if (test_bit(CFG_WIFI_BIT, &ideapad_priv->cfg))
+               if (test_bit(CFG_WIFI_BIT, &priv->cfg))
                        seq_printf(s, "Wireless ");
-               if (test_bit(CFG_CAMERA_BIT, &ideapad_priv->cfg))
+               if (test_bit(CFG_CAMERA_BIT, &priv->cfg))
                        seq_printf(s, "Camera ");
                seq_printf(s, "\nGraphic: ");
-               switch ((ideapad_priv->cfg)&0x700) {
+               switch ((priv->cfg)&0x700) {
                case 0x100:
                        seq_printf(s, "Intel");
                        break;
@@ -287,7 +298,7 @@ static int debugfs_cfg_show(struct seq_file *s, void *data)
 
 static int debugfs_cfg_open(struct inode *inode, struct file *file)
 {
-       return single_open(file, debugfs_cfg_show, NULL);
+       return single_open(file, debugfs_cfg_show, inode->i_private);
 }
 
 static const struct file_operations debugfs_cfg_fops = {
@@ -308,14 +319,14 @@ static int ideapad_debugfs_init(struct ideapad_private *priv)
                goto errout;
        }
 
-       node = debugfs_create_file("cfg", S_IRUGO, priv->debug, NULL,
+       node = debugfs_create_file("cfg", S_IRUGO, priv->debug, priv,
                                   &debugfs_cfg_fops);
        if (!node) {
                pr_err("failed to create cfg in debugfs");
                goto errout;
        }
 
-       node = debugfs_create_file("status", S_IRUGO, priv->debug, NULL,
+       node = debugfs_create_file("status", S_IRUGO, priv->debug, priv,
                                   &debugfs_status_fops);
        if (!node) {
                pr_err("failed to create status in debugfs");
@@ -342,8 +353,9 @@ static ssize_t show_ideapad_cam(struct device *dev,
                                char *buf)
 {
        unsigned long result;
+       struct ideapad_private *priv = dev_get_drvdata(dev);
 
-       if (read_ec_data(ideapad_handle, VPCCMD_R_CAMERA, &result))
+       if (read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &result))
                return sprintf(buf, "-1\n");
        return sprintf(buf, "%lu\n", result);
 }
@@ -353,12 +365,13 @@ static ssize_t store_ideapad_cam(struct device *dev,
                                 const char *buf, size_t count)
 {
        int ret, state;
+       struct ideapad_private *priv = dev_get_drvdata(dev);
 
        if (!count)
                return 0;
        if (sscanf(buf, "%i", &state) != 1)
                return -EINVAL;
-       ret = write_ec_cmd(ideapad_handle, VPCCMD_W_CAMERA, state);
+       ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_CAMERA, state);
        if (ret < 0)
                return -EIO;
        return count;
@@ -371,8 +384,9 @@ static ssize_t show_ideapad_fan(struct device *dev,
                                char *buf)
 {
        unsigned long result;
+       struct ideapad_private *priv = dev_get_drvdata(dev);
 
-       if (read_ec_data(ideapad_handle, VPCCMD_R_FAN, &result))
+       if (read_ec_data(priv->adev->handle, VPCCMD_R_FAN, &result))
                return sprintf(buf, "-1\n");
        return sprintf(buf, "%lu\n", result);
 }
@@ -382,6 +396,7 @@ static ssize_t store_ideapad_fan(struct device *dev,
                                 const char *buf, size_t count)
 {
        int ret, state;
+       struct ideapad_private *priv = dev_get_drvdata(dev);
 
        if (!count)
                return 0;
@@ -389,7 +404,7 @@ static ssize_t store_ideapad_fan(struct device *dev,
                return -EINVAL;
        if (state < 0 || state > 4 || state == 3)
                return -EINVAL;
-       ret = write_ec_cmd(ideapad_handle, VPCCMD_W_FAN, state);
+       ret = write_ec_cmd(priv->adev->handle, VPCCMD_W_FAN, state);
        if (ret < 0)
                return -EIO;
        return count;
@@ -415,7 +430,8 @@ static umode_t ideapad_is_visible(struct kobject *kobj,
                supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg));
        else if (attr == &dev_attr_fan_mode.attr) {
                unsigned long value;
-               supported = !read_ec_data(ideapad_handle, VPCCMD_R_FAN, &value);
+               supported = !read_ec_data(priv->adev->handle, VPCCMD_R_FAN,
+                                         &value);
        } else
                supported = true;
 
@@ -445,9 +461,9 @@ const struct ideapad_rfk_data ideapad_rfk_data[] = {
 
 static int ideapad_rfk_set(void *data, bool blocked)
 {
-       unsigned long opcode = (unsigned long)data;
+       struct ideapad_rfk_priv *priv = data;
 
-       return write_ec_cmd(ideapad_handle, opcode, !blocked);
+       return write_ec_cmd(priv->priv->adev->handle, priv->dev, !blocked);
 }
 
 static struct rfkill_ops ideapad_rfk_ops = {
@@ -459,7 +475,7 @@ static void ideapad_sync_rfk_state(struct ideapad_private *priv)
        unsigned long hw_blocked;
        int i;
 
-       if (read_ec_data(ideapad_handle, VPCCMD_R_RF, &hw_blocked))
+       if (read_ec_data(priv->adev->handle, VPCCMD_R_RF, &hw_blocked))
                return;
        hw_blocked = !hw_blocked;
 
@@ -468,27 +484,30 @@ static void ideapad_sync_rfk_state(struct ideapad_private *priv)
                        rfkill_set_hw_state(priv->rfk[i], hw_blocked);
 }
 
-static int ideapad_register_rfkill(struct acpi_device *adevice, int dev)
+static int ideapad_register_rfkill(struct ideapad_private *priv, int dev)
 {
-       struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
        int ret;
        unsigned long sw_blocked;
 
        if (no_bt_rfkill &&
            (ideapad_rfk_data[dev].type == RFKILL_TYPE_BLUETOOTH)) {
                /* Force to enable bluetooth when no_bt_rfkill=1 */
-               write_ec_cmd(ideapad_handle,
+               write_ec_cmd(priv->adev->handle,
                             ideapad_rfk_data[dev].opcode, 1);
                return 0;
        }
-
-       priv->rfk[dev] = rfkill_alloc(ideapad_rfk_data[dev].name, &adevice->dev,
-                                     ideapad_rfk_data[dev].type, &ideapad_rfk_ops,
-                                     (void *)(long)dev);
+       priv->rfk_priv[dev].dev = dev;
+       priv->rfk_priv[dev].priv = priv;
+
+       priv->rfk[dev] = rfkill_alloc(ideapad_rfk_data[dev].name,
+                                     &priv->platform_device->dev,
+                                     ideapad_rfk_data[dev].type,
+                                     &ideapad_rfk_ops,
+                                     &priv->rfk_priv[dev]);
        if (!priv->rfk[dev])
                return -ENOMEM;
 
-       if (read_ec_data(ideapad_handle, ideapad_rfk_data[dev].opcode-1,
+       if (read_ec_data(priv->adev->handle, ideapad_rfk_data[dev].opcode-1,
                         &sw_blocked)) {
                rfkill_init_sw_state(priv->rfk[dev], 0);
        } else {
@@ -504,10 +523,8 @@ static int ideapad_register_rfkill(struct acpi_device *adevice, int dev)
        return 0;
 }
 
-static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev)
+static void ideapad_unregister_rfkill(struct ideapad_private *priv, int dev)
 {
-       struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
-
        if (!priv->rfk[dev])
                return;
 
@@ -518,37 +535,16 @@ static void ideapad_unregister_rfkill(struct acpi_device *adevice, int dev)
 /*
  * Platform device
  */
-static int ideapad_platform_init(struct ideapad_private *priv)
+static int ideapad_sysfs_init(struct ideapad_private *priv)
 {
-       int result;
-
-       priv->platform_device = platform_device_alloc("ideapad", -1);
-       if (!priv->platform_device)
-               return -ENOMEM;
-       platform_set_drvdata(priv->platform_device, priv);
-
-       result = platform_device_add(priv->platform_device);
-       if (result)
-               goto fail_platform_device;
-
-       result = sysfs_create_group(&priv->platform_device->dev.kobj,
+       return sysfs_create_group(&priv->platform_device->dev.kobj,
                                    &ideapad_attribute_group);
-       if (result)
-               goto fail_sysfs;
-       return 0;
-
-fail_sysfs:
-       platform_device_del(priv->platform_device);
-fail_platform_device:
-       platform_device_put(priv->platform_device);
-       return result;
 }
 
-static void ideapad_platform_exit(struct ideapad_private *priv)
+static void ideapad_sysfs_exit(struct ideapad_private *priv)
 {
        sysfs_remove_group(&priv->platform_device->dev.kobj,
                           &ideapad_attribute_group);
-       platform_device_unregister(priv->platform_device);
 }
 
 /*
@@ -623,7 +619,7 @@ static void ideapad_input_novokey(struct ideapad_private *priv)
 {
        unsigned long long_pressed;
 
-       if (read_ec_data(ideapad_handle, VPCCMD_R_NOVO, &long_pressed))
+       if (read_ec_data(priv->adev->handle, VPCCMD_R_NOVO, &long_pressed))
                return;
        if (long_pressed)
                ideapad_input_report(priv, 17);
@@ -635,7 +631,7 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
 {
        unsigned long bit, value;
 
-       read_ec_data(ideapad_handle, VPCCMD_R_SPECIAL_BUTTONS, &value);
+       read_ec_data(priv->adev->handle, VPCCMD_R_SPECIAL_BUTTONS, &value);
 
        for (bit = 0; bit < 16; bit++) {
                if (test_bit(bit, &value)) {
@@ -662,19 +658,28 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv)
  */
 static int ideapad_backlight_get_brightness(struct backlight_device *blightdev)
 {
+       struct ideapad_private *priv = bl_get_data(blightdev);
        unsigned long now;
 
-       if (read_ec_data(ideapad_handle, VPCCMD_R_BL, &now))
+       if (!priv)
+               return -EINVAL;
+
+       if (read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now))
                return -EIO;
        return now;
 }
 
 static int ideapad_backlight_update_status(struct backlight_device *blightdev)
 {
-       if (write_ec_cmd(ideapad_handle, VPCCMD_W_BL,
+       struct ideapad_private *priv = bl_get_data(blightdev);
+
+       if (!priv)
+               return -EINVAL;
+
+       if (write_ec_cmd(priv->adev->handle, VPCCMD_W_BL,
                         blightdev->props.brightness))
                return -EIO;
-       if (write_ec_cmd(ideapad_handle, VPCCMD_W_BL_POWER,
+       if (write_ec_cmd(priv->adev->handle, VPCCMD_W_BL_POWER,
                         blightdev->props.power == FB_BLANK_POWERDOWN ? 0 : 1))
                return -EIO;
 
@@ -692,11 +697,11 @@ static int ideapad_backlight_init(struct ideapad_private *priv)
        struct backlight_properties props;
        unsigned long max, now, power;
 
-       if (read_ec_data(ideapad_handle, VPCCMD_R_BL_MAX, &max))
+       if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &max))
                return -EIO;
-       if (read_ec_data(ideapad_handle, VPCCMD_R_BL, &now))
+       if (read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now))
                return -EIO;
-       if (read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &power))
+       if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power))
                return -EIO;
 
        memset(&props, 0, sizeof(struct backlight_properties));
@@ -734,7 +739,7 @@ static void ideapad_backlight_notify_power(struct ideapad_private *priv)
 
        if (!blightdev)
                return;
-       if (read_ec_data(ideapad_handle, VPCCMD_R_BL_POWER, &power))
+       if (read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &power))
                return;
        blightdev->props.power = power ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
 }
@@ -745,7 +750,7 @@ static void ideapad_backlight_notify_brightness(struct ideapad_private *priv)
 
        /* if we control brightness via acpi video driver */
        if (priv->blightdev == NULL) {
-               read_ec_data(ideapad_handle, VPCCMD_R_BL, &now);
+               read_ec_data(priv->adev->handle, VPCCMD_R_BL, &now);
                return;
        }
 
@@ -755,19 +760,12 @@ static void ideapad_backlight_notify_brightness(struct ideapad_private *priv)
 /*
  * module init/exit
  */
-static const struct acpi_device_id ideapad_device_ids[] = {
-       { "VPC2004", 0},
-       { "", 0},
-};
-MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
-
-static void ideapad_sync_touchpad_state(struct acpi_device *adevice)
+static void ideapad_sync_touchpad_state(struct ideapad_private *priv)
 {
-       struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
        unsigned long value;
 
        /* Without reading from EC touchpad LED doesn't switch state */
-       if (!read_ec_data(adevice->handle, VPCCMD_R_TOUCHPAD, &value)) {
+       if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) {
                /* Some IdeaPads don't really turn off touchpad - they only
                 * switch the LED state. We (de)activate KBC AUX port to turn
                 * touchpad off and on. We send KEY_TOUCHPAD_OFF and
@@ -779,26 +777,77 @@ static void ideapad_sync_touchpad_state(struct acpi_device *adevice)
        }
 }
 
-static int ideapad_acpi_add(struct acpi_device *adevice)
+static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
+{
+       struct ideapad_private *priv = data;
+       unsigned long vpc1, vpc2, vpc_bit;
+
+       if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1))
+               return;
+       if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2))
+               return;
+
+       vpc1 = (vpc2 << 8) | vpc1;
+       for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) {
+               if (test_bit(vpc_bit, &vpc1)) {
+                       switch (vpc_bit) {
+                       case 9:
+                               ideapad_sync_rfk_state(priv);
+                               break;
+                       case 13:
+                       case 11:
+                       case 7:
+                       case 6:
+                               ideapad_input_report(priv, vpc_bit);
+                               break;
+                       case 5:
+                               ideapad_sync_touchpad_state(priv);
+                               break;
+                       case 4:
+                               ideapad_backlight_notify_brightness(priv);
+                               break;
+                       case 3:
+                               ideapad_input_novokey(priv);
+                               break;
+                       case 2:
+                               ideapad_backlight_notify_power(priv);
+                               break;
+                       case 0:
+                               ideapad_check_special_buttons(priv);
+                               break;
+                       default:
+                               pr_info("Unknown event: %lu\n", vpc_bit);
+                       }
+               }
+       }
+}
+
+static int ideapad_acpi_add(struct platform_device *pdev)
 {
        int ret, i;
        int cfg;
        struct ideapad_private *priv;
+       struct acpi_device *adev;
+
+       ret = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev);
+       if (ret)
+               return -ENODEV;
 
-       if (read_method_int(adevice->handle, "_CFG", &cfg))
+       if (read_method_int(adev->handle, "_CFG", &cfg))
                return -ENODEV;
 
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv)
                return -ENOMEM;
-       dev_set_drvdata(&adevice->dev, priv);
-       ideapad_priv = priv;
-       ideapad_handle = adevice->handle;
+
+       dev_set_drvdata(&pdev->dev, priv);
        priv->cfg = cfg;
+       priv->adev = adev;
+       priv->platform_device = pdev;
 
-       ret = ideapad_platform_init(priv);
+       ret = ideapad_sysfs_init(priv);
        if (ret)
-               goto platform_failed;
+               goto sysfs_failed;
 
        ret = ideapad_debugfs_init(priv);
        if (ret)
@@ -810,117 +859,92 @@ static int ideapad_acpi_add(struct acpi_device *adevice)
 
        for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
                if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
-                       ideapad_register_rfkill(adevice, i);
+                       ideapad_register_rfkill(priv, i);
                else
                        priv->rfk[i] = NULL;
        }
        ideapad_sync_rfk_state(priv);
-       ideapad_sync_touchpad_state(adevice);
+       ideapad_sync_touchpad_state(priv);
 
        if (!acpi_video_backlight_support()) {
                ret = ideapad_backlight_init(priv);
                if (ret && ret != -ENODEV)
                        goto backlight_failed;
        }
+       ret = acpi_install_notify_handler(adev->handle,
+               ACPI_DEVICE_NOTIFY, ideapad_acpi_notify, priv);
+       if (ret)
+               goto notification_failed;
 
        return 0;
-
+notification_failed:
+       ideapad_backlight_exit(priv);
 backlight_failed:
        for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
-               ideapad_unregister_rfkill(adevice, i);
+               ideapad_unregister_rfkill(priv, i);
        ideapad_input_exit(priv);
 input_failed:
        ideapad_debugfs_exit(priv);
 debugfs_failed:
-       ideapad_platform_exit(priv);
-platform_failed:
+       ideapad_sysfs_exit(priv);
+sysfs_failed:
        kfree(priv);
        return ret;
 }
 
-static int ideapad_acpi_remove(struct acpi_device *adevice)
+static int ideapad_acpi_remove(struct platform_device *pdev)
 {
-       struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
+       struct ideapad_private *priv = dev_get_drvdata(&pdev->dev);
        int i;
 
+       acpi_remove_notify_handler(priv->adev->handle,
+               ACPI_DEVICE_NOTIFY, ideapad_acpi_notify);
        ideapad_backlight_exit(priv);
        for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
-               ideapad_unregister_rfkill(adevice, i);
+               ideapad_unregister_rfkill(priv, i);
        ideapad_input_exit(priv);
        ideapad_debugfs_exit(priv);
-       ideapad_platform_exit(priv);
-       dev_set_drvdata(&adevice->dev, NULL);
+       ideapad_sysfs_exit(priv);
+       dev_set_drvdata(&pdev->dev, NULL);
        kfree(priv);
 
        return 0;
 }
 
-static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
+#ifdef CONFIG_PM_SLEEP
+static int ideapad_acpi_resume(struct device *device)
 {
-       struct ideapad_private *priv = dev_get_drvdata(&adevice->dev);
-       acpi_handle handle = adevice->handle;
-       unsigned long vpc1, vpc2, vpc_bit;
-
-       if (read_ec_data(handle, VPCCMD_R_VPC1, &vpc1))
-               return;
-       if (read_ec_data(handle, VPCCMD_R_VPC2, &vpc2))
-               return;
+       struct ideapad_private *priv;
 
-       vpc1 = (vpc2 << 8) | vpc1;
-       for (vpc_bit = 0; vpc_bit < 16; vpc_bit++) {
-               if (test_bit(vpc_bit, &vpc1)) {
-                       switch (vpc_bit) {
-                       case 9:
-                               ideapad_sync_rfk_state(priv);
-                               break;
-                       case 13:
-                       case 11:
-                       case 7:
-                       case 6:
-                               ideapad_input_report(priv, vpc_bit);
-                               break;
-                       case 5:
-                               ideapad_sync_touchpad_state(adevice);
-                               break;
-                       case 4:
-                               ideapad_backlight_notify_brightness(priv);
-                               break;
-                       case 3:
-                               ideapad_input_novokey(priv);
-                               break;
-                       case 2:
-                               ideapad_backlight_notify_power(priv);
-                               break;
-                       case 0:
-                               ideapad_check_special_buttons(priv);
-                               break;
-                       default:
-                               pr_info("Unknown event: %lu\n", vpc_bit);
-                       }
-               }
-       }
-}
+       if (!device)
+               return -EINVAL;
+       priv = dev_get_drvdata(device);
 
-static int ideapad_acpi_resume(struct device *device)
-{
-       ideapad_sync_rfk_state(ideapad_priv);
-       ideapad_sync_touchpad_state(to_acpi_device(device));
+       ideapad_sync_rfk_state(priv);
+       ideapad_sync_touchpad_state(priv);
        return 0;
 }
-
+#endif
 static SIMPLE_DEV_PM_OPS(ideapad_pm, NULL, ideapad_acpi_resume);
 
-static struct acpi_driver ideapad_acpi_driver = {
-       .name = "ideapad_acpi",
-       .class = "IdeaPad",
-       .ids = ideapad_device_ids,
-       .ops.add = ideapad_acpi_add,
-       .ops.remove = ideapad_acpi_remove,
-       .ops.notify = ideapad_acpi_notify,
-       .drv.pm = &ideapad_pm,
-       .owner = THIS_MODULE,
+static const struct acpi_device_id ideapad_device_ids[] = {
+       { "VPC2004", 0},
+       { "", 0},
 };
-module_acpi_driver(ideapad_acpi_driver);
+MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
+
+static struct platform_driver ideapad_acpi_driver = {
+       .probe = ideapad_acpi_add,
+       .remove = ideapad_acpi_remove,
+       .driver = {
+               .name   = "ideapad_acpi",
+               .owner  = THIS_MODULE,
+               .pm     = &ideapad_pm,
+               .acpi_match_table = ACPI_PTR(ideapad_device_ids),
+       },
+};
+
+module_platform_driver(ideapad_acpi_driver);
 
 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
 MODULE_DESCRIPTION("IdeaPad ACPI Extras");
index 41b740cb28bca8743ea282bbf2fb5639a2f4266c..a2083a9e5662d660fd95cbb297815aa0570c8268 100644 (file)
@@ -29,24 +29,16 @@ static ssize_t irst_show_wakeup_events(struct device *dev,
                                       char *buf)
 {
        struct acpi_device *acpi;
-       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
-       union acpi_object *result;
+       unsigned long long value;
        acpi_status status;
 
        acpi = to_acpi_device(dev);
 
-       status = acpi_evaluate_object(acpi->handle, "GFFS", NULL, &output);
+       status = acpi_evaluate_integer(acpi->handle, "GFFS", NULL, &value);
        if (!ACPI_SUCCESS(status))
                return -EINVAL;
 
-       result = output.pointer;
-
-       if (result->type != ACPI_TYPE_INTEGER) {
-               kfree(result);
-               return -EINVAL;
-       }
-
-       return sprintf(buf, "%lld\n", result->integer.value);
+       return sprintf(buf, "%lld\n", value);
 }
 
 static ssize_t irst_store_wakeup_events(struct device *dev,
@@ -54,8 +46,6 @@ static ssize_t irst_store_wakeup_events(struct device *dev,
                                        const char *buf, size_t count)
 {
        struct acpi_device *acpi;
-       struct acpi_object_list input;
-       union acpi_object param;
        acpi_status status;
        unsigned long value;
        int error;
@@ -67,13 +57,7 @@ static ssize_t irst_store_wakeup_events(struct device *dev,
        if (error)
                return error;
 
-       param.type = ACPI_TYPE_INTEGER;
-       param.integer.value = value;
-
-       input.count = 1;
-       input.pointer = &param;
-
-       status = acpi_evaluate_object(acpi->handle, "SFFS", &input, NULL);
+       status = acpi_execute_simple_method(acpi->handle, "SFFS", value);
 
        if (!ACPI_SUCCESS(status))
                return -EINVAL;
@@ -91,24 +75,16 @@ static ssize_t irst_show_wakeup_time(struct device *dev,
                                     struct device_attribute *attr, char *buf)
 {
        struct acpi_device *acpi;
-       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
-       union acpi_object *result;
+       unsigned long long value;
        acpi_status status;
 
        acpi = to_acpi_device(dev);
 
-       status = acpi_evaluate_object(acpi->handle, "GFTV", NULL, &output);
+       status = acpi_evaluate_integer(acpi->handle, "GFTV", NULL, &value);
        if (!ACPI_SUCCESS(status))
                return -EINVAL;
 
-       result = output.pointer;
-
-       if (result->type != ACPI_TYPE_INTEGER) {
-               kfree(result);
-               return -EINVAL;
-       }
-
-       return sprintf(buf, "%lld\n", result->integer.value);
+       return sprintf(buf, "%lld\n", value);
 }
 
 static ssize_t irst_store_wakeup_time(struct device *dev,
@@ -116,8 +92,6 @@ static ssize_t irst_store_wakeup_time(struct device *dev,
                                      const char *buf, size_t count)
 {
        struct acpi_device *acpi;
-       struct acpi_object_list input;
-       union acpi_object param;
        acpi_status status;
        unsigned long value;
        int error;
@@ -129,13 +103,7 @@ static ssize_t irst_store_wakeup_time(struct device *dev,
        if (error)
                return error;
 
-       param.type = ACPI_TYPE_INTEGER;
-       param.integer.value = value;
-
-       input.count = 1;
-       input.pointer = &param;
-
-       status = acpi_evaluate_object(acpi->handle, "SFTV", &input, NULL);
+       status = acpi_execute_simple_method(acpi->handle, "SFTV", value);
 
        if (!ACPI_SUCCESS(status))
                return -EINVAL;
index 52259dcabecb8251edad2a0e0dc7eccb9b174500..1838400dc0360615f799fac64a82b51f7efb9c17 100644 (file)
@@ -25,37 +25,18 @@ MODULE_LICENSE("GPL");
 
 static int smartconnect_acpi_init(struct acpi_device *acpi)
 {
-       struct acpi_object_list input;
-       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
-       union acpi_object *result;
-       union acpi_object param;
+       unsigned long long value;
        acpi_status status;
 
-       status = acpi_evaluate_object(acpi->handle, "GAOS", NULL, &output);
+       status = acpi_evaluate_integer(acpi->handle, "GAOS", NULL, &value);
        if (!ACPI_SUCCESS(status))
                return -EINVAL;
 
-       result = output.pointer;
-
-       if (result->type != ACPI_TYPE_INTEGER) {
-               kfree(result);
-               return -EINVAL;
-       }
-
-       if (result->integer.value & 0x1) {
-               param.type = ACPI_TYPE_INTEGER;
-               param.integer.value = 0;
-
-               input.count = 1;
-               input.pointer = &param;
-
+       if (value & 0x1) {
                dev_info(&acpi->dev, "Disabling Intel Smart Connect\n");
-               status = acpi_evaluate_object(acpi->handle, "SAOS", &input,
-                                             NULL);
+               status = acpi_execute_simple_method(acpi->handle, "SAOS", 0);
        }
 
-       kfree(result);
-
        return 0;
 }
 
index d6cfc1558c2f1e067d8fd72edb1dc7ce3ecd00e3..11244f8703c402c876fb34b0e107383781661416 100644 (file)
@@ -156,19 +156,15 @@ static struct thermal_cooling_device_ops memory_cooling_ops = {
 static int intel_menlow_memory_add(struct acpi_device *device)
 {
        int result = -ENODEV;
-       acpi_status status = AE_OK;
-       acpi_handle dummy;
        struct thermal_cooling_device *cdev;
 
        if (!device)
                return -EINVAL;
 
-       status = acpi_get_handle(device->handle, MEMORY_GET_BANDWIDTH, &dummy);
-       if (ACPI_FAILURE(status))
+       if (!acpi_has_method(device->handle, MEMORY_GET_BANDWIDTH))
                goto end;
 
-       status = acpi_get_handle(device->handle, MEMORY_SET_BANDWIDTH, &dummy);
-       if (ACPI_FAILURE(status))
+       if (!acpi_has_method(device->handle, MEMORY_SET_BANDWIDTH))
                goto end;
 
        cdev = thermal_cooling_device_register("Memory controller", device,
index d3fd52036fd6e041592c7060c987ed5508684a0e..47caab0ea7a14faa1051b35eafe1ff8e53703ee0 100644 (file)
@@ -127,18 +127,17 @@ MODULE_PARM_DESC(minor,
                 "default is -1 (automatic)");
 #endif
 
-static int kbd_backlight = 1;
+static int kbd_backlight = -1;
 module_param(kbd_backlight, int, 0444);
 MODULE_PARM_DESC(kbd_backlight,
                 "set this to 0 to disable keyboard backlight, "
-                "1 to enable it (default: 0)");
+                "1 to enable it (default: no change from current value)");
 
-static int kbd_backlight_timeout;      /* = 0 */
+static int kbd_backlight_timeout = -1;
 module_param(kbd_backlight_timeout, int, 0444);
 MODULE_PARM_DESC(kbd_backlight_timeout,
-                "set this to 0 to set the default 10 seconds timeout, "
-                "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout "
-                "(default: 0)");
+                "meaningful values vary from 0 to 3 and their meaning depends "
+                "on the model (default: no change from current value)");
 
 #ifdef CONFIG_PM_SLEEP
 static void sony_nc_kbd_backlight_resume(void);
@@ -1509,7 +1508,6 @@ static void sony_nc_function_resume(void)
 static int sony_nc_resume(struct device *dev)
 {
        struct sony_nc_value *item;
-       acpi_handle handle;
 
        for (item = sony_nc_values; item->name; item++) {
                int ret;
@@ -1524,15 +1522,13 @@ static int sony_nc_resume(struct device *dev)
                }
        }
 
-       if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
-                                        &handle))) {
+       if (acpi_has_method(sony_nc_acpi_handle, "ECON")) {
                int arg = 1;
                if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
                        dprintk("ECON Method failed\n");
        }
 
-       if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
-                                        &handle)))
+       if (acpi_has_method(sony_nc_acpi_handle, "SN00"))
                sony_nc_function_resume();
 
        return 0;
@@ -1844,6 +1840,8 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
        if (!kbdbl_ctl)
                return -ENOMEM;
 
+       kbdbl_ctl->mode = kbd_backlight;
+       kbdbl_ctl->timeout = kbd_backlight_timeout;
        kbdbl_ctl->handle = handle;
        if (handle == 0x0137)
                kbdbl_ctl->base = 0x0C00;
@@ -1870,8 +1868,8 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
        if (ret)
                goto outmode;
 
-       __sony_nc_kbd_backlight_mode_set(kbd_backlight);
-       __sony_nc_kbd_backlight_timeout_set(kbd_backlight_timeout);
+       __sony_nc_kbd_backlight_mode_set(kbdbl_ctl->mode);
+       __sony_nc_kbd_backlight_timeout_set(kbdbl_ctl->timeout);
 
        return 0;
 
@@ -1886,17 +1884,8 @@ outkzalloc:
 static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
 {
        if (kbdbl_ctl) {
-               int result;
-
                device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
                device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr);
-
-               /* restore the default hw behaviour */
-               sony_call_snc_handle(kbdbl_ctl->handle,
-                               kbdbl_ctl->base | 0x10000, &result);
-               sony_call_snc_handle(kbdbl_ctl->handle,
-                               kbdbl_ctl->base + 0x200, &result);
-
                kfree(kbdbl_ctl);
                kbdbl_ctl = NULL;
        }
@@ -2690,7 +2679,6 @@ static void sony_nc_backlight_ng_read_limits(int handle,
 
 static void sony_nc_backlight_setup(void)
 {
-       acpi_handle unused;
        int max_brightness = 0;
        const struct backlight_ops *ops = NULL;
        struct backlight_properties props;
@@ -2725,8 +2713,7 @@ static void sony_nc_backlight_setup(void)
                sony_nc_backlight_ng_read_limits(0x14c, &sony_bl_props);
                max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
 
-       } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
-                                               &unused))) {
+       } else if (acpi_has_method(sony_nc_acpi_handle, "GBRT")) {
                ops = &sony_backlight_ops;
                max_brightness = SONY_MAX_BRIGHTNESS - 1;
 
@@ -2758,7 +2745,6 @@ static int sony_nc_add(struct acpi_device *device)
 {
        acpi_status status;
        int result = 0;
-       acpi_handle handle;
        struct sony_nc_value *item;
 
        pr_info("%s v%s\n", SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
@@ -2798,15 +2784,13 @@ static int sony_nc_add(struct acpi_device *device)
                goto outplatform;
        }
 
-       if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
-                                        &handle))) {
+       if (acpi_has_method(sony_nc_acpi_handle, "ECON")) {
                int arg = 1;
                if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
                        dprintk("ECON Method failed\n");
        }
 
-       if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
-                                        &handle))) {
+       if (acpi_has_method(sony_nc_acpi_handle, "SN00")) {
                dprintk("Doing SNC setup\n");
                /* retrieve the available handles */
                result = sony_nc_handles_setup(sony_pf_device);
@@ -2829,9 +2813,8 @@ static int sony_nc_add(struct acpi_device *device)
 
                /* find the available acpiget as described in the DSDT */
                for (; item->acpiget && *item->acpiget; ++item->acpiget) {
-                       if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle,
-                                                        *item->acpiget,
-                                                        &handle))) {
+                       if (acpi_has_method(sony_nc_acpi_handle,
+                                                       *item->acpiget)) {
                                dprintk("Found %s getter: %s\n",
                                                item->name, *item->acpiget);
                                item->devattr.attr.mode |= S_IRUGO;
@@ -2841,9 +2824,8 @@ static int sony_nc_add(struct acpi_device *device)
 
                /* find the available acpiset as described in the DSDT */
                for (; item->acpiset && *item->acpiset; ++item->acpiset) {
-                       if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle,
-                                                        *item->acpiset,
-                                                        &handle))) {
+                       if (acpi_has_method(sony_nc_acpi_handle,
+                                                       *item->acpiset)) {
                                dprintk("Found %s setter: %s\n",
                                                item->name, *item->acpiset);
                                item->devattr.attr.mode |= S_IWUSR;
index 03ca6c139f1a5c9dd0cc821045bad7b5cb409d51..05e046aa5e314be112b0e165f93fd82c6ab16fe5 100644 (file)
@@ -23,7 +23,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#define TPACPI_VERSION "0.24"
+#define TPACPI_VERSION "0.25"
 #define TPACPI_SYSFS_VERSION 0x020700
 
 /*
@@ -88,6 +88,7 @@
 
 #include <linux/pci_ids.h>
 
+#include <linux/thinkpad_acpi.h>
 
 /* ThinkPad CMOS commands */
 #define TP_CMOS_VOLUME_DOWN    0
@@ -700,6 +701,14 @@ static void __init drv_acpi_handle_init(const char *name,
 static acpi_status __init tpacpi_acpi_handle_locate_callback(acpi_handle handle,
                        u32 level, void *context, void **return_value)
 {
+       struct acpi_device *dev;
+       if (!strcmp(context, "video")) {
+               if (acpi_bus_get_device(handle, &dev))
+                       return AE_OK;
+               if (strcmp(ACPI_VIDEO_HID, acpi_device_hid(dev)))
+                       return AE_OK;
+       }
+
        *(acpi_handle *)return_value = handle;
 
        return AE_CTRL_TERMINATE;
@@ -712,10 +721,10 @@ static void __init tpacpi_acpi_handle_locate(const char *name,
        acpi_status status;
        acpi_handle device_found;
 
-       BUG_ON(!name || !hid || !handle);
+       BUG_ON(!name || !handle);
        vdbg_printk(TPACPI_DBG_INIT,
                        "trying to locate ACPI handle for %s, using HID %s\n",
-                       name, hid);
+                       name, hid ? hid : "NULL");
 
        memset(&device_found, 0, sizeof(device_found));
        status = acpi_get_devices(hid, tpacpi_acpi_handle_locate_callback,
@@ -6090,19 +6099,28 @@ static int __init tpacpi_query_bcl_levels(acpi_handle handle)
 {
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *obj;
+       struct acpi_device *device, *child;
        int rc;
 
-       if (ACPI_SUCCESS(acpi_evaluate_object(handle, "_BCL", NULL, &buffer))) {
+       if (acpi_bus_get_device(handle, &device))
+               return 0;
+
+       rc = 0;
+       list_for_each_entry(child, &device->children, node) {
+               acpi_status status = acpi_evaluate_object(child->handle, "_BCL",
+                                                         NULL, &buffer);
+               if (ACPI_FAILURE(status))
+                       continue;
+
                obj = (union acpi_object *)buffer.pointer;
                if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
                        pr_err("Unknown _BCL data, please report this to %s\n",
-                              TPACPI_MAIL);
+                               TPACPI_MAIL);
                        rc = 0;
                } else {
                        rc = obj->package.count;
                }
-       } else {
-               return 0;
+               break;
        }
 
        kfree(buffer.pointer);
@@ -6118,7 +6136,7 @@ static unsigned int __init tpacpi_check_std_acpi_brightness_support(void)
        acpi_handle video_device;
        int bcl_levels = 0;
 
-       tpacpi_acpi_handle_locate("video", ACPI_VIDEO_HID, &video_device);
+       tpacpi_acpi_handle_locate("video", NULL, &video_device);
        if (video_device)
                bcl_levels = tpacpi_query_bcl_levels(video_device);
 
@@ -8350,6 +8368,91 @@ static struct ibm_struct fan_driver_data = {
        .resume = fan_resume,
 };
 
+/*************************************************************************
+ * Mute LED subdriver
+ */
+
+
+struct tp_led_table {
+       acpi_string name;
+       int on_value;
+       int off_value;
+       int state;
+};
+
+static struct tp_led_table led_tables[] = {
+       [TPACPI_LED_MUTE] = {
+               .name = "SSMS",
+               .on_value = 1,
+               .off_value = 0,
+       },
+       [TPACPI_LED_MICMUTE] = {
+               .name = "MMTS",
+               .on_value = 2,
+               .off_value = 0,
+       },
+};
+
+static int mute_led_on_off(struct tp_led_table *t, bool state)
+{
+       acpi_handle temp;
+       int output;
+
+       if (!ACPI_SUCCESS(acpi_get_handle(hkey_handle, t->name, &temp))) {
+               pr_warn("Thinkpad ACPI has no %s interface.\n", t->name);
+               return -EIO;
+       }
+
+       if (!acpi_evalf(hkey_handle, &output, t->name, "dd",
+                       state ? t->on_value : t->off_value))
+               return -EIO;
+
+       t->state = state;
+       return state;
+}
+
+int tpacpi_led_set(int whichled, bool on)
+{
+       struct tp_led_table *t;
+
+       if (whichled < 0 || whichled >= TPACPI_LED_MAX)
+               return -EINVAL;
+
+       t = &led_tables[whichled];
+       if (t->state < 0 || t->state == on)
+               return t->state;
+       return mute_led_on_off(t, on);
+}
+EXPORT_SYMBOL_GPL(tpacpi_led_set);
+
+static int mute_led_init(struct ibm_init_struct *iibm)
+{
+       acpi_handle temp;
+       int i;
+
+       for (i = 0; i < TPACPI_LED_MAX; i++) {
+               struct tp_led_table *t = &led_tables[i];
+               if (ACPI_SUCCESS(acpi_get_handle(hkey_handle, t->name, &temp)))
+                       mute_led_on_off(t, false);
+               else
+                       t->state = -ENODEV;
+       }
+       return 0;
+}
+
+static void mute_led_exit(void)
+{
+       int i;
+
+       for (i = 0; i < TPACPI_LED_MAX; i++)
+               tpacpi_led_set(i, false);
+}
+
+static struct ibm_struct mute_led_driver_data = {
+       .name = "mute_led",
+       .exit = mute_led_exit,
+};
+
 /****************************************************************************
  ****************************************************************************
  *
@@ -8768,6 +8871,10 @@ static struct ibm_init_struct ibms_init[] __initdata = {
                .init = fan_init,
                .data = &fan_driver_data,
        },
+       {
+               .init = mute_led_init,
+               .data = &mute_led_driver_data,
+       },
 };
 
 static int __init set_ibm_param(const char *val, struct kernel_param *kp)
index 4ab618c63b457e0bbf5a613274ab10a3e76f26cc..67897c8740ba58ea3c93cf54b0a2a3e74a1e3a84 100644 (file)
@@ -80,13 +80,9 @@ static void acpi_topstar_notify(struct acpi_device *device, u32 event)
 static int acpi_topstar_fncx_switch(struct acpi_device *device, bool state)
 {
        acpi_status status;
-       union acpi_object fncx_params[1] = {
-               { .type = ACPI_TYPE_INTEGER }
-       };
-       struct acpi_object_list fncx_arg_list = { 1, &fncx_params[0] };
 
-       fncx_params[0].integer.value = state ? 0x86 : 0x87;
-       status = acpi_evaluate_object(device->handle, "FNCX", &fncx_arg_list, NULL);
+       status = acpi_execute_simple_method(device->handle, "FNCX",
+                                               state ? 0x86 : 0x87);
        if (ACPI_FAILURE(status)) {
                pr_err("Unable to switch FNCX notifications\n");
                return -ENODEV;
index eb3467ea6d860e6c6961f3294922804e0e5776a2..0cfadb65f7c639597abce1d1bcb81ba3a04ff1d3 100644 (file)
@@ -191,16 +191,9 @@ static __inline__ void _set_bit(u32 * word, u32 mask, int value)
 
 static int write_acpi_int(const char *methodName, int val)
 {
-       struct acpi_object_list params;
-       union acpi_object in_objs[1];
        acpi_status status;
 
-       params.count = ARRAY_SIZE(in_objs);
-       params.pointer = in_objs;
-       in_objs[0].type = ACPI_TYPE_INTEGER;
-       in_objs[0].integer.value = val;
-
-       status = acpi_evaluate_object(NULL, (char *)methodName, &params, NULL);
+       status = acpi_execute_simple_method(NULL, (char *)methodName, val);
        return (status == AE_OK) ? 0 : -EIO;
 }
 
@@ -947,21 +940,17 @@ static void toshiba_acpi_hotkey_work(struct work_struct *work)
  */
 static int toshiba_acpi_query_hotkey(struct toshiba_acpi_dev *dev)
 {
-       struct acpi_buffer buf;
-       union acpi_object out_obj;
+       unsigned long long value;
        acpi_status status;
 
-       buf.pointer = &out_obj;
-       buf.length = sizeof(out_obj);
-
-       status = acpi_evaluate_object(dev->acpi_dev->handle, "INFO",
-                                     NULL, &buf);
-       if (ACPI_FAILURE(status) || out_obj.type != ACPI_TYPE_INTEGER) {
+       status = acpi_evaluate_integer(dev->acpi_dev->handle, "INFO",
+                                     NULL, &value);
+       if (ACPI_FAILURE(status)) {
                pr_err("ACPI INFO method execution failed\n");
                return -EIO;
        }
 
-       return out_obj.integer.value;
+       return value;
 }
 
 static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
@@ -981,7 +970,7 @@ static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
 static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
 {
        acpi_status status;
-       acpi_handle ec_handle, handle;
+       acpi_handle ec_handle;
        int error;
        u32 hci_result;
 
@@ -1008,10 +997,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
         */
        status = AE_ERROR;
        ec_handle = ec_get_handle();
-       if (ec_handle)
-               status = acpi_get_handle(ec_handle, "NTFY", &handle);
-
-       if (ACPI_SUCCESS(status)) {
+       if (ec_handle && acpi_has_method(ec_handle, "NTFY")) {
                INIT_WORK(&dev->hotkey_work, toshiba_acpi_hotkey_work);
 
                error = i8042_install_filter(toshiba_acpi_i8042_filter);
@@ -1027,10 +1013,9 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
         * Determine hotkey query interface. Prefer using the INFO
         * method when it is available.
         */
-       status = acpi_get_handle(dev->acpi_dev->handle, "INFO", &handle);
-       if (ACPI_SUCCESS(status)) {
+       if (acpi_has_method(dev->acpi_dev->handle, "INFO"))
                dev->info_supported = 1;
-       else {
+       else {
                hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
                if (hci_result == HCI_SUCCESS)
                        dev->system_event_supported = 1;
@@ -1155,15 +1140,10 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
 
 static const char *find_hci_method(acpi_handle handle)
 {
-       acpi_status status;
-       acpi_handle hci_handle;
-
-       status = acpi_get_handle(handle, "GHCI", &hci_handle);
-       if (ACPI_SUCCESS(status))
+       if (acpi_has_method(handle, "GHCI"))
                return "GHCI";
 
-       status = acpi_get_handle(handle, "SPFC", &hci_handle);
-       if (ACPI_SUCCESS(status))
+       if (acpi_has_method(handle, "SPFC"))
                return "SPFC";
 
        return NULL;
index 601ea951224201a87719f0acc7de5ee7e4635649..62e8c221d01ea10a5f105a0a44b2d2d8277d7878 100644 (file)
@@ -252,8 +252,6 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
 {
        struct guid_block *block = NULL;
        char method[5];
-       struct acpi_object_list input;
-       union acpi_object params[1];
        acpi_status status;
        acpi_handle handle;
 
@@ -263,13 +261,9 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
        if (!block)
                return AE_NOT_EXIST;
 
-       input.count = 1;
-       input.pointer = params;
-       params[0].type = ACPI_TYPE_INTEGER;
-       params[0].integer.value = enable;
 
        snprintf(method, 5, "WE%02X", block->notify_id);
-       status = acpi_evaluate_object(handle, method, &input, NULL);
+       status = acpi_execute_simple_method(handle, method, enable);
 
        if (status != AE_OK && status != AE_NOT_FOUND)
                return status;
@@ -353,10 +347,10 @@ struct acpi_buffer *out)
 {
        struct guid_block *block = NULL;
        struct wmi_block *wblock = NULL;
-       acpi_handle handle, wc_handle;
+       acpi_handle handle;
        acpi_status status, wc_status = AE_ERROR;
-       struct acpi_object_list input, wc_input;
-       union acpi_object wc_params[1], wq_params[1];
+       struct acpi_object_list input;
+       union acpi_object wq_params[1];
        char method[5];
        char wc_method[5] = "WC";
 
@@ -386,11 +380,6 @@ struct acpi_buffer *out)
         * enable collection.
         */
        if (block->flags & ACPI_WMI_EXPENSIVE) {
-               wc_input.count = 1;
-               wc_input.pointer = wc_params;
-               wc_params[0].type = ACPI_TYPE_INTEGER;
-               wc_params[0].integer.value = 1;
-
                strncat(wc_method, block->object_id, 2);
 
                /*
@@ -398,10 +387,9 @@ struct acpi_buffer *out)
                 * expensive, but have no corresponding WCxx method. So we
                 * should not fail if this happens.
                 */
-               wc_status = acpi_get_handle(handle, wc_method, &wc_handle);
-               if (ACPI_SUCCESS(wc_status))
-                       wc_status = acpi_evaluate_object(handle, wc_method,
-                               &wc_input, NULL);
+               if (acpi_has_method(handle, wc_method))
+                       wc_status = acpi_execute_simple_method(handle,
+                                                               wc_method, 1);
        }
 
        strcpy(method, "WQ");
@@ -414,9 +402,7 @@ struct acpi_buffer *out)
         * the WQxx method failed - we should disable collection anyway.
         */
        if ((block->flags & ACPI_WMI_EXPENSIVE) && ACPI_SUCCESS(wc_status)) {
-               wc_params[0].integer.value = 0;
-               status = acpi_evaluate_object(handle,
-               wc_method, &wc_input, NULL);
+               status = acpi_execute_simple_method(handle, wc_method, 0);
        }
 
        return status;
index 34049b0b4c731e352ae48f62c7fd3edcfc71b517..747826d99059955f8941d592ef2f7734cd38fd08 100644 (file)
@@ -239,8 +239,6 @@ static char *__init pnpacpi_get_id(struct acpi_device *device)
 
 static int __init pnpacpi_add_device(struct acpi_device *device)
 {
-       acpi_handle temp = NULL;
-       acpi_status status;
        struct pnp_dev *dev;
        char *pnpid;
        struct acpi_hardware_id *id;
@@ -253,8 +251,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
         * If a PnPacpi device is not present , the device
         * driver should not be loaded.
         */
-       status = acpi_get_handle(device->handle, "_CRS", &temp);
-       if (ACPI_FAILURE(status))
+       if (!acpi_has_method(device->handle, "_CRS"))
                return 0;
 
        pnpid = pnpacpi_get_id(device);
@@ -271,16 +268,14 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
        dev->data = device;
        /* .enabled means the device can decode the resources */
        dev->active = device->status.enabled;
-       status = acpi_get_handle(device->handle, "_SRS", &temp);
-       if (ACPI_SUCCESS(status))
+       if (acpi_has_method(device->handle, "_SRS"))
                dev->capabilities |= PNP_CONFIGURABLE;
        dev->capabilities |= PNP_READ;
        if (device->flags.dynamic_status && (dev->capabilities & PNP_CONFIGURABLE))
                dev->capabilities |= PNP_WRITE;
        if (device->flags.removable)
                dev->capabilities |= PNP_REMOVABLE;
-       status = acpi_get_handle(device->handle, "_DIS", &temp);
-       if (ACPI_SUCCESS(status))
+       if (acpi_has_method(device->handle, "_DIS"))
                dev->capabilities |= PNP_DISABLE;
 
        if (strlen(acpi_device_name(device)))
diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig
new file mode 100644 (file)
index 0000000..a7c81b5
--- /dev/null
@@ -0,0 +1,32 @@
+#
+# Generic power capping sysfs interface configuration
+#
+
+menuconfig POWERCAP
+       bool "Generic powercap sysfs driver"
+       help
+         The power capping sysfs interface allows kernel subsystems to expose power
+         capping settings to user space in a consistent way.  Usually, it consists
+         of multiple control types that determine which settings may be exposed and
+         power zones representing parts of the system that can be subject to power
+         capping.
+
+         If you want this code to be compiled in, say Y here.
+
+if POWERCAP
+# Client driver configurations go here.
+config INTEL_RAPL
+       tristate "Intel RAPL Support"
+       depends on X86
+       default n
+       ---help---
+         This enables support for the Intel Running Average Power Limit (RAPL)
+         technology which allows power limits to be enforced and monitored on
+         modern Intel processors (Sandy Bridge and later).
+
+         In RAPL, the platform level settings are divided into domains for
+         fine grained control. These domains include processor package, DRAM
+         controller, CPU core (Power Plance 0), graphics uncore (Power Plane
+         1), etc.
+
+endif
diff --git a/drivers/powercap/Makefile b/drivers/powercap/Makefile
new file mode 100644 (file)
index 0000000..0a21ef3
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_POWERCAP) += powercap_sys.o
+obj-$(CONFIG_INTEL_RAPL) += intel_rapl.o
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
new file mode 100644 (file)
index 0000000..2a786c5
--- /dev/null
@@ -0,0 +1,1395 @@
+/*
+ * Intel Running Average Power Limit (RAPL) Driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/log2.h>
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/cpu.h>
+#include <linux/powercap.h>
+
+#include <asm/processor.h>
+#include <asm/cpu_device_id.h>
+
+/* bitmasks for RAPL MSRs, used by primitive access functions */
+#define ENERGY_STATUS_MASK      0xffffffff
+
+#define POWER_LIMIT1_MASK       0x7FFF
+#define POWER_LIMIT1_ENABLE     BIT(15)
+#define POWER_LIMIT1_CLAMP      BIT(16)
+
+#define POWER_LIMIT2_MASK       (0x7FFFULL<<32)
+#define POWER_LIMIT2_ENABLE     BIT_ULL(47)
+#define POWER_LIMIT2_CLAMP      BIT_ULL(48)
+#define POWER_PACKAGE_LOCK      BIT_ULL(63)
+#define POWER_PP_LOCK           BIT(31)
+
+#define TIME_WINDOW1_MASK       (0x7FULL<<17)
+#define TIME_WINDOW2_MASK       (0x7FULL<<49)
+
+#define POWER_UNIT_OFFSET      0
+#define POWER_UNIT_MASK                0x0F
+
+#define ENERGY_UNIT_OFFSET     0x08
+#define ENERGY_UNIT_MASK       0x1F00
+
+#define TIME_UNIT_OFFSET       0x10
+#define TIME_UNIT_MASK         0xF0000
+
+#define POWER_INFO_MAX_MASK     (0x7fffULL<<32)
+#define POWER_INFO_MIN_MASK     (0x7fffULL<<16)
+#define POWER_INFO_MAX_TIME_WIN_MASK     (0x3fULL<<48)
+#define POWER_INFO_THERMAL_SPEC_MASK     0x7fff
+
+#define PERF_STATUS_THROTTLE_TIME_MASK 0xffffffff
+#define PP_POLICY_MASK         0x1F
+
+/* Non HW constants */
+#define RAPL_PRIMITIVE_DERIVED       BIT(1) /* not from raw data */
+#define RAPL_PRIMITIVE_DUMMY         BIT(2)
+
+/* scale RAPL units to avoid floating point math inside kernel */
+#define POWER_UNIT_SCALE     (1000000)
+#define ENERGY_UNIT_SCALE    (1000000)
+#define TIME_UNIT_SCALE      (1000000)
+
+#define TIME_WINDOW_MAX_MSEC 40000
+#define TIME_WINDOW_MIN_MSEC 250
+
+enum unit_type {
+       ARBITRARY_UNIT, /* no translation */
+       POWER_UNIT,
+       ENERGY_UNIT,
+       TIME_UNIT,
+};
+
+enum rapl_domain_type {
+       RAPL_DOMAIN_PACKAGE, /* entire package/socket */
+       RAPL_DOMAIN_PP0, /* core power plane */
+       RAPL_DOMAIN_PP1, /* graphics uncore */
+       RAPL_DOMAIN_DRAM,/* DRAM control_type */
+       RAPL_DOMAIN_MAX,
+};
+
+enum rapl_domain_msr_id {
+       RAPL_DOMAIN_MSR_LIMIT,
+       RAPL_DOMAIN_MSR_STATUS,
+       RAPL_DOMAIN_MSR_PERF,
+       RAPL_DOMAIN_MSR_POLICY,
+       RAPL_DOMAIN_MSR_INFO,
+       RAPL_DOMAIN_MSR_MAX,
+};
+
+/* per domain data, some are optional */
+enum rapl_primitives {
+       ENERGY_COUNTER,
+       POWER_LIMIT1,
+       POWER_LIMIT2,
+       FW_LOCK,
+
+       PL1_ENABLE,  /* power limit 1, aka long term */
+       PL1_CLAMP,   /* allow frequency to go below OS request */
+       PL2_ENABLE,  /* power limit 2, aka short term, instantaneous */
+       PL2_CLAMP,
+
+       TIME_WINDOW1, /* long term */
+       TIME_WINDOW2, /* short term */
+       THERMAL_SPEC_POWER,
+       MAX_POWER,
+
+       MIN_POWER,
+       MAX_TIME_WINDOW,
+       THROTTLED_TIME,
+       PRIORITY_LEVEL,
+
+       /* below are not raw primitive data */
+       AVERAGE_POWER,
+       NR_RAPL_PRIMITIVES,
+};
+
+#define NR_RAW_PRIMITIVES (NR_RAPL_PRIMITIVES - 2)
+
+/* Can be expanded to include events, etc.*/
+struct rapl_domain_data {
+       u64 primitives[NR_RAPL_PRIMITIVES];
+       unsigned long timestamp;
+};
+
+
+#define        DOMAIN_STATE_INACTIVE           BIT(0)
+#define        DOMAIN_STATE_POWER_LIMIT_SET    BIT(1)
+#define DOMAIN_STATE_BIOS_LOCKED        BIT(2)
+
+#define NR_POWER_LIMITS (2)
+struct rapl_power_limit {
+       struct powercap_zone_constraint *constraint;
+       int prim_id; /* primitive ID used to enable */
+       struct rapl_domain *domain;
+       const char *name;
+};
+
+static const char pl1_name[] = "long_term";
+static const char pl2_name[] = "short_term";
+
+struct rapl_domain {
+       const char *name;
+       enum rapl_domain_type id;
+       int msrs[RAPL_DOMAIN_MSR_MAX];
+       struct powercap_zone power_zone;
+       struct rapl_domain_data rdd;
+       struct rapl_power_limit rpl[NR_POWER_LIMITS];
+       u64 attr_map; /* track capabilities */
+       unsigned int state;
+       int package_id;
+};
+#define power_zone_to_rapl_domain(_zone) \
+       container_of(_zone, struct rapl_domain, power_zone)
+
+
+/* Each physical package contains multiple domains, these are the common
+ * data across RAPL domains within a package.
+ */
+struct rapl_package {
+       unsigned int id; /* physical package/socket id */
+       unsigned int nr_domains;
+       unsigned long domain_map; /* bit map of active domains */
+       unsigned int power_unit_divisor;
+       unsigned int energy_unit_divisor;
+       unsigned int time_unit_divisor;
+       struct rapl_domain *domains; /* array of domains, sized at runtime */
+       struct powercap_zone *power_zone; /* keep track of parent zone */
+       int nr_cpus; /* active cpus on the package, topology info is lost during
+                     * cpu hotplug. so we have to track ourselves.
+                     */
+       unsigned long power_limit_irq; /* keep track of package power limit
+                                       * notify interrupt enable status.
+                                       */
+       struct list_head plist;
+};
+#define PACKAGE_PLN_INT_SAVED   BIT(0)
+#define MAX_PRIM_NAME (32)
+
+/* per domain data. used to describe individual knobs such that access function
+ * can be consolidated into one instead of many inline functions.
+ */
+struct rapl_primitive_info {
+       const char *name;
+       u64 mask;
+       int shift;
+       enum rapl_domain_msr_id id;
+       enum unit_type unit;
+       u32 flag;
+};
+
+#define PRIMITIVE_INFO_INIT(p, m, s, i, u, f) {        \
+               .name = #p,                     \
+               .mask = m,                      \
+               .shift = s,                     \
+               .id = i,                        \
+               .unit = u,                      \
+               .flag = f                       \
+       }
+
+static void rapl_init_domains(struct rapl_package *rp);
+static int rapl_read_data_raw(struct rapl_domain *rd,
+                       enum rapl_primitives prim,
+                       bool xlate, u64 *data);
+static int rapl_write_data_raw(struct rapl_domain *rd,
+                       enum rapl_primitives prim,
+                       unsigned long long value);
+static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
+                       int to_raw);
+static void package_power_limit_irq_save(int package_id);
+
+static LIST_HEAD(rapl_packages); /* guarded by CPU hotplug lock */
+
+static const char * const rapl_domain_names[] = {
+       "package",
+       "core",
+       "uncore",
+       "dram",
+};
+
+static struct powercap_control_type *control_type; /* PowerCap Controller */
+
+/* caller to ensure CPU hotplug lock is held */
+static struct rapl_package *find_package_by_id(int id)
+{
+       struct rapl_package *rp;
+
+       list_for_each_entry(rp, &rapl_packages, plist) {
+               if (rp->id == id)
+                       return rp;
+       }
+
+       return NULL;
+}
+
+/* caller to ensure CPU hotplug lock is held */
+static int find_active_cpu_on_package(int package_id)
+{
+       int i;
+
+       for_each_online_cpu(i) {
+               if (topology_physical_package_id(i) == package_id)
+                       return i;
+       }
+       /* all CPUs on this package are offline */
+
+       return -ENODEV;
+}
+
+/* caller must hold cpu hotplug lock */
+static void rapl_cleanup_data(void)
+{
+       struct rapl_package *p, *tmp;
+
+       list_for_each_entry_safe(p, tmp, &rapl_packages, plist) {
+               kfree(p->domains);
+               list_del(&p->plist);
+               kfree(p);
+       }
+}
+
+static int get_energy_counter(struct powercap_zone *power_zone, u64 *energy_raw)
+{
+       struct rapl_domain *rd;
+       u64 energy_now;
+
+       /* prevent CPU hotplug, make sure the RAPL domain does not go
+        * away while reading the counter.
+        */
+       get_online_cpus();
+       rd = power_zone_to_rapl_domain(power_zone);
+
+       if (!rapl_read_data_raw(rd, ENERGY_COUNTER, true, &energy_now)) {
+               *energy_raw = energy_now;
+               put_online_cpus();
+
+               return 0;
+       }
+       put_online_cpus();
+
+       return -EIO;
+}
+
+static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy)
+{
+       *energy = rapl_unit_xlate(0, ENERGY_UNIT, ENERGY_STATUS_MASK, 0);
+       return 0;
+}
+
+static int release_zone(struct powercap_zone *power_zone)
+{
+       struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
+       struct rapl_package *rp;
+
+       /* package zone is the last zone of a package, we can free
+        * memory here since all children has been unregistered.
+        */
+       if (rd->id == RAPL_DOMAIN_PACKAGE) {
+               rp = find_package_by_id(rd->package_id);
+               if (!rp) {
+                       dev_warn(&power_zone->dev, "no package id %s\n",
+                               rd->name);
+                       return -ENODEV;
+               }
+               kfree(rd);
+               rp->domains = NULL;
+       }
+
+       return 0;
+
+}
+
+static int find_nr_power_limit(struct rapl_domain *rd)
+{
+       int i;
+
+       for (i = 0; i < NR_POWER_LIMITS; i++) {
+               if (rd->rpl[i].name == NULL)
+                       break;
+       }
+
+       return i;
+}
+
+static int set_domain_enable(struct powercap_zone *power_zone, bool mode)
+{
+       struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
+       int nr_powerlimit;
+
+       if (rd->state & DOMAIN_STATE_BIOS_LOCKED)
+               return -EACCES;
+       get_online_cpus();
+       nr_powerlimit = find_nr_power_limit(rd);
+       /* here we activate/deactivate the hardware for power limiting */
+       rapl_write_data_raw(rd, PL1_ENABLE, mode);
+       /* always enable clamp such that p-state can go below OS requested
+        * range. power capping priority over guranteed frequency.
+        */
+       rapl_write_data_raw(rd, PL1_CLAMP, mode);
+       /* some domains have pl2 */
+       if (nr_powerlimit > 1) {
+               rapl_write_data_raw(rd, PL2_ENABLE, mode);
+               rapl_write_data_raw(rd, PL2_CLAMP, mode);
+       }
+       put_online_cpus();
+
+       return 0;
+}
+
+static int get_domain_enable(struct powercap_zone *power_zone, bool *mode)
+{
+       struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
+       u64 val;
+
+       if (rd->state & DOMAIN_STATE_BIOS_LOCKED) {
+               *mode = false;
+               return 0;
+       }
+       get_online_cpus();
+       if (rapl_read_data_raw(rd, PL1_ENABLE, true, &val)) {
+               put_online_cpus();
+               return -EIO;
+       }
+       *mode = val;
+       put_online_cpus();
+
+       return 0;
+}
+
+/* per RAPL domain ops, in the order of rapl_domain_type */
+static struct powercap_zone_ops zone_ops[] = {
+       /* RAPL_DOMAIN_PACKAGE */
+       {
+               .get_energy_uj = get_energy_counter,
+               .get_max_energy_range_uj = get_max_energy_counter,
+               .release = release_zone,
+               .set_enable = set_domain_enable,
+               .get_enable = get_domain_enable,
+       },
+       /* RAPL_DOMAIN_PP0 */
+       {
+               .get_energy_uj = get_energy_counter,
+               .get_max_energy_range_uj = get_max_energy_counter,
+               .release = release_zone,
+               .set_enable = set_domain_enable,
+               .get_enable = get_domain_enable,
+       },
+       /* RAPL_DOMAIN_PP1 */
+       {
+               .get_energy_uj = get_energy_counter,
+               .get_max_energy_range_uj = get_max_energy_counter,
+               .release = release_zone,
+               .set_enable = set_domain_enable,
+               .get_enable = get_domain_enable,
+       },
+       /* RAPL_DOMAIN_DRAM */
+       {
+               .get_energy_uj = get_energy_counter,
+               .get_max_energy_range_uj = get_max_energy_counter,
+               .release = release_zone,
+               .set_enable = set_domain_enable,
+               .get_enable = get_domain_enable,
+       },
+};
+
+static int set_power_limit(struct powercap_zone *power_zone, int id,
+                       u64 power_limit)
+{
+       struct rapl_domain *rd;
+       struct rapl_package *rp;
+       int ret = 0;
+
+       get_online_cpus();
+       rd = power_zone_to_rapl_domain(power_zone);
+       rp = find_package_by_id(rd->package_id);
+       if (!rp) {
+               ret = -ENODEV;
+               goto set_exit;
+       }
+
+       if (rd->state & DOMAIN_STATE_BIOS_LOCKED) {
+               dev_warn(&power_zone->dev, "%s locked by BIOS, monitoring only\n",
+                       rd->name);
+               ret = -EACCES;
+               goto set_exit;
+       }
+
+       switch (rd->rpl[id].prim_id) {
+       case PL1_ENABLE:
+               rapl_write_data_raw(rd, POWER_LIMIT1, power_limit);
+               break;
+       case PL2_ENABLE:
+               rapl_write_data_raw(rd, POWER_LIMIT2, power_limit);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+       if (!ret)
+               package_power_limit_irq_save(rd->package_id);
+set_exit:
+       put_online_cpus();
+       return ret;
+}
+
+static int get_current_power_limit(struct powercap_zone *power_zone, int id,
+                                       u64 *data)
+{
+       struct rapl_domain *rd;
+       u64 val;
+       int prim;
+       int ret = 0;
+
+       get_online_cpus();
+       rd = power_zone_to_rapl_domain(power_zone);
+       switch (rd->rpl[id].prim_id) {
+       case PL1_ENABLE:
+               prim = POWER_LIMIT1;
+               break;
+       case PL2_ENABLE:
+               prim = POWER_LIMIT2;
+               break;
+       default:
+               put_online_cpus();
+               return -EINVAL;
+       }
+       if (rapl_read_data_raw(rd, prim, true, &val))
+               ret = -EIO;
+       else
+               *data = val;
+
+       put_online_cpus();
+
+       return ret;
+}
+
+static int set_time_window(struct powercap_zone *power_zone, int id,
+                                                               u64 window)
+{
+       struct rapl_domain *rd;
+       int ret = 0;
+
+       get_online_cpus();
+       rd = power_zone_to_rapl_domain(power_zone);
+       switch (rd->rpl[id].prim_id) {
+       case PL1_ENABLE:
+               rapl_write_data_raw(rd, TIME_WINDOW1, window);
+               break;
+       case PL2_ENABLE:
+               rapl_write_data_raw(rd, TIME_WINDOW2, window);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+       put_online_cpus();
+       return ret;
+}
+
+static int get_time_window(struct powercap_zone *power_zone, int id, u64 *data)
+{
+       struct rapl_domain *rd;
+       u64 val;
+       int ret = 0;
+
+       get_online_cpus();
+       rd = power_zone_to_rapl_domain(power_zone);
+       switch (rd->rpl[id].prim_id) {
+       case PL1_ENABLE:
+               ret = rapl_read_data_raw(rd, TIME_WINDOW1, true, &val);
+               break;
+       case PL2_ENABLE:
+               ret = rapl_read_data_raw(rd, TIME_WINDOW2, true, &val);
+               break;
+       default:
+               put_online_cpus();
+               return -EINVAL;
+       }
+       if (!ret)
+               *data = val;
+       put_online_cpus();
+
+       return ret;
+}
+
+static const char *get_constraint_name(struct powercap_zone *power_zone, int id)
+{
+       struct rapl_power_limit *rpl;
+       struct rapl_domain *rd;
+
+       rd = power_zone_to_rapl_domain(power_zone);
+       rpl = (struct rapl_power_limit *) &rd->rpl[id];
+
+       return rpl->name;
+}
+
+
+static int get_max_power(struct powercap_zone *power_zone, int id,
+                                       u64 *data)
+{
+       struct rapl_domain *rd;
+       u64 val;
+       int prim;
+       int ret = 0;
+
+       get_online_cpus();
+       rd = power_zone_to_rapl_domain(power_zone);
+       switch (rd->rpl[id].prim_id) {
+       case PL1_ENABLE:
+               prim = THERMAL_SPEC_POWER;
+               break;
+       case PL2_ENABLE:
+               prim = MAX_POWER;
+               break;
+       default:
+               put_online_cpus();
+               return -EINVAL;
+       }
+       if (rapl_read_data_raw(rd, prim, true, &val))
+               ret = -EIO;
+       else
+               *data = val;
+
+       put_online_cpus();
+
+       return ret;
+}
+
+static struct powercap_zone_constraint_ops constraint_ops = {
+       .set_power_limit_uw = set_power_limit,
+       .get_power_limit_uw = get_current_power_limit,
+       .set_time_window_us = set_time_window,
+       .get_time_window_us = get_time_window,
+       .get_max_power_uw = get_max_power,
+       .get_name = get_constraint_name,
+};
+
+/* called after domain detection and package level data are set */
+static void rapl_init_domains(struct rapl_package *rp)
+{
+       int i;
+       struct rapl_domain *rd = rp->domains;
+
+       for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
+               unsigned int mask = rp->domain_map & (1 << i);
+               switch (mask) {
+               case BIT(RAPL_DOMAIN_PACKAGE):
+                       rd->name = rapl_domain_names[RAPL_DOMAIN_PACKAGE];
+                       rd->id = RAPL_DOMAIN_PACKAGE;
+                       rd->msrs[0] = MSR_PKG_POWER_LIMIT;
+                       rd->msrs[1] = MSR_PKG_ENERGY_STATUS;
+                       rd->msrs[2] = MSR_PKG_PERF_STATUS;
+                       rd->msrs[3] = 0;
+                       rd->msrs[4] = MSR_PKG_POWER_INFO;
+                       rd->rpl[0].prim_id = PL1_ENABLE;
+                       rd->rpl[0].name = pl1_name;
+                       rd->rpl[1].prim_id = PL2_ENABLE;
+                       rd->rpl[1].name = pl2_name;
+                       break;
+               case BIT(RAPL_DOMAIN_PP0):
+                       rd->name = rapl_domain_names[RAPL_DOMAIN_PP0];
+                       rd->id = RAPL_DOMAIN_PP0;
+                       rd->msrs[0] = MSR_PP0_POWER_LIMIT;
+                       rd->msrs[1] = MSR_PP0_ENERGY_STATUS;
+                       rd->msrs[2] = 0;
+                       rd->msrs[3] = MSR_PP0_POLICY;
+                       rd->msrs[4] = 0;
+                       rd->rpl[0].prim_id = PL1_ENABLE;
+                       rd->rpl[0].name = pl1_name;
+                       break;
+               case BIT(RAPL_DOMAIN_PP1):
+                       rd->name = rapl_domain_names[RAPL_DOMAIN_PP1];
+                       rd->id = RAPL_DOMAIN_PP1;
+                       rd->msrs[0] = MSR_PP1_POWER_LIMIT;
+                       rd->msrs[1] = MSR_PP1_ENERGY_STATUS;
+                       rd->msrs[2] = 0;
+                       rd->msrs[3] = MSR_PP1_POLICY;
+                       rd->msrs[4] = 0;
+                       rd->rpl[0].prim_id = PL1_ENABLE;
+                       rd->rpl[0].name = pl1_name;
+                       break;
+               case BIT(RAPL_DOMAIN_DRAM):
+                       rd->name = rapl_domain_names[RAPL_DOMAIN_DRAM];
+                       rd->id = RAPL_DOMAIN_DRAM;
+                       rd->msrs[0] = MSR_DRAM_POWER_LIMIT;
+                       rd->msrs[1] = MSR_DRAM_ENERGY_STATUS;
+                       rd->msrs[2] = MSR_DRAM_PERF_STATUS;
+                       rd->msrs[3] = 0;
+                       rd->msrs[4] = MSR_DRAM_POWER_INFO;
+                       rd->rpl[0].prim_id = PL1_ENABLE;
+                       rd->rpl[0].name = pl1_name;
+                       break;
+               }
+               if (mask) {
+                       rd->package_id = rp->id;
+                       rd++;
+               }
+       }
+}
+
+static u64 rapl_unit_xlate(int package, enum unit_type type, u64 value,
+                       int to_raw)
+{
+       u64 divisor = 1;
+       int scale = 1; /* scale to user friendly data without floating point */
+       u64 f, y; /* fraction and exp. used for time unit */
+       struct rapl_package *rp;
+
+       rp = find_package_by_id(package);
+       if (!rp)
+               return value;
+
+       switch (type) {
+       case POWER_UNIT:
+               divisor = rp->power_unit_divisor;
+               scale = POWER_UNIT_SCALE;
+               break;
+       case ENERGY_UNIT:
+               scale = ENERGY_UNIT_SCALE;
+               divisor = rp->energy_unit_divisor;
+               break;
+       case TIME_UNIT:
+               divisor = rp->time_unit_divisor;
+               scale = TIME_UNIT_SCALE;
+               /* special processing based on 2^Y*(1+F)/4 = val/divisor, refer
+                * to Intel Software Developer's manual Vol. 3a, CH 14.7.4.
+                */
+               if (!to_raw) {
+                       f = (value & 0x60) >> 5;
+                       y = value & 0x1f;
+                       value = (1 << y) * (4 + f) * scale / 4;
+                       return div64_u64(value, divisor);
+               } else {
+                       do_div(value, scale);
+                       value *= divisor;
+                       y = ilog2(value);
+                       f = div64_u64(4 * (value - (1 << y)), 1 << y);
+                       value = (y & 0x1f) | ((f & 0x3) << 5);
+                       return value;
+               }
+               break;
+       case ARBITRARY_UNIT:
+       default:
+               return value;
+       };
+
+       if (to_raw)
+               return div64_u64(value * divisor, scale);
+       else
+               return div64_u64(value * scale, divisor);
+}
+
+/* in the order of enum rapl_primitives */
+static struct rapl_primitive_info rpi[] = {
+       /* name, mask, shift, msr index, unit divisor */
+       PRIMITIVE_INFO_INIT(ENERGY_COUNTER, ENERGY_STATUS_MASK, 0,
+                               RAPL_DOMAIN_MSR_STATUS, ENERGY_UNIT, 0),
+       PRIMITIVE_INFO_INIT(POWER_LIMIT1, POWER_LIMIT1_MASK, 0,
+                               RAPL_DOMAIN_MSR_LIMIT, POWER_UNIT, 0),
+       PRIMITIVE_INFO_INIT(POWER_LIMIT2, POWER_LIMIT2_MASK, 32,
+                               RAPL_DOMAIN_MSR_LIMIT, POWER_UNIT, 0),
+       PRIMITIVE_INFO_INIT(FW_LOCK, POWER_PP_LOCK, 31,
+                               RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+       PRIMITIVE_INFO_INIT(PL1_ENABLE, POWER_LIMIT1_ENABLE, 15,
+                               RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+       PRIMITIVE_INFO_INIT(PL1_CLAMP, POWER_LIMIT1_CLAMP, 16,
+                               RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+       PRIMITIVE_INFO_INIT(PL2_ENABLE, POWER_LIMIT2_ENABLE, 47,
+                               RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+       PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48,
+                               RAPL_DOMAIN_MSR_LIMIT, ARBITRARY_UNIT, 0),
+       PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17,
+                               RAPL_DOMAIN_MSR_LIMIT, TIME_UNIT, 0),
+       PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49,
+                               RAPL_DOMAIN_MSR_LIMIT, TIME_UNIT, 0),
+       PRIMITIVE_INFO_INIT(THERMAL_SPEC_POWER, POWER_INFO_THERMAL_SPEC_MASK,
+                               0, RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0),
+       PRIMITIVE_INFO_INIT(MAX_POWER, POWER_INFO_MAX_MASK, 32,
+                               RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0),
+       PRIMITIVE_INFO_INIT(MIN_POWER, POWER_INFO_MIN_MASK, 16,
+                               RAPL_DOMAIN_MSR_INFO, POWER_UNIT, 0),
+       PRIMITIVE_INFO_INIT(MAX_TIME_WINDOW, POWER_INFO_MAX_TIME_WIN_MASK, 48,
+                               RAPL_DOMAIN_MSR_INFO, TIME_UNIT, 0),
+       PRIMITIVE_INFO_INIT(THROTTLED_TIME, PERF_STATUS_THROTTLE_TIME_MASK, 0,
+                               RAPL_DOMAIN_MSR_PERF, TIME_UNIT, 0),
+       PRIMITIVE_INFO_INIT(PRIORITY_LEVEL, PP_POLICY_MASK, 0,
+                               RAPL_DOMAIN_MSR_POLICY, ARBITRARY_UNIT, 0),
+       /* non-hardware */
+       PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0, POWER_UNIT,
+                               RAPL_PRIMITIVE_DERIVED),
+       {NULL, 0, 0, 0},
+};
+
+/* Read primitive data based on its related struct rapl_primitive_info.
+ * if xlate flag is set, return translated data based on data units, i.e.
+ * time, energy, and power.
+ * RAPL MSRs are non-architectual and are laid out not consistently across
+ * domains. Here we use primitive info to allow writing consolidated access
+ * functions.
+ * For a given primitive, it is processed by MSR mask and shift. Unit conversion
+ * is pre-assigned based on RAPL unit MSRs read at init time.
+ * 63-------------------------- 31--------------------------- 0
+ * |                           xxxxx (mask)                   |
+ * |                                |<- shift ----------------|
+ * 63-------------------------- 31--------------------------- 0
+ */
+static int rapl_read_data_raw(struct rapl_domain *rd,
+                       enum rapl_primitives prim,
+                       bool xlate, u64 *data)
+{
+       u64 value, final;
+       u32 msr;
+       struct rapl_primitive_info *rp = &rpi[prim];
+       int cpu;
+
+       if (!rp->name || rp->flag & RAPL_PRIMITIVE_DUMMY)
+               return -EINVAL;
+
+       msr = rd->msrs[rp->id];
+       if (!msr)
+               return -EINVAL;
+       /* use physical package id to look up active cpus */
+       cpu = find_active_cpu_on_package(rd->package_id);
+       if (cpu < 0)
+               return cpu;
+
+       /* special-case package domain, which uses a different bit*/
+       if (prim == FW_LOCK && rd->id == RAPL_DOMAIN_PACKAGE) {
+               rp->mask = POWER_PACKAGE_LOCK;
+               rp->shift = 63;
+       }
+       /* non-hardware data are collected by the polling thread */
+       if (rp->flag & RAPL_PRIMITIVE_DERIVED) {
+               *data = rd->rdd.primitives[prim];
+               return 0;
+       }
+
+       if (rdmsrl_safe_on_cpu(cpu, msr, &value)) {
+               pr_debug("failed to read msr 0x%x on cpu %d\n", msr, cpu);
+               return -EIO;
+       }
+
+       final = value & rp->mask;
+       final = final >> rp->shift;
+       if (xlate)
+               *data = rapl_unit_xlate(rd->package_id, rp->unit, final, 0);
+       else
+               *data = final;
+
+       return 0;
+}
+
+/* Similar use of primitive info in the read counterpart */
+static int rapl_write_data_raw(struct rapl_domain *rd,
+                       enum rapl_primitives prim,
+                       unsigned long long value)
+{
+       u64 msr_val;
+       u32 msr;
+       struct rapl_primitive_info *rp = &rpi[prim];
+       int cpu;
+
+       cpu = find_active_cpu_on_package(rd->package_id);
+       if (cpu < 0)
+               return cpu;
+       msr = rd->msrs[rp->id];
+       if (rdmsrl_safe_on_cpu(cpu, msr, &msr_val)) {
+               dev_dbg(&rd->power_zone.dev,
+                       "failed to read msr 0x%x on cpu %d\n", msr, cpu);
+               return -EIO;
+       }
+       value = rapl_unit_xlate(rd->package_id, rp->unit, value, 1);
+       msr_val &= ~rp->mask;
+       msr_val |= value << rp->shift;
+       if (wrmsrl_safe_on_cpu(cpu, msr, msr_val)) {
+               dev_dbg(&rd->power_zone.dev,
+                       "failed to write msr 0x%x on cpu %d\n", msr, cpu);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int rapl_check_unit(struct rapl_package *rp, int cpu)
+{
+       u64 msr_val;
+       u32 value;
+
+       if (rdmsrl_safe_on_cpu(cpu, MSR_RAPL_POWER_UNIT, &msr_val)) {
+               pr_err("Failed to read power unit MSR 0x%x on CPU %d, exit.\n",
+                       MSR_RAPL_POWER_UNIT, cpu);
+               return -ENODEV;
+       }
+
+       /* Raw RAPL data stored in MSRs are in certain scales. We need to
+        * convert them into standard units based on the divisors reported in
+        * the RAPL unit MSRs.
+        * i.e.
+        * energy unit: 1/enery_unit_divisor Joules
+        * power unit: 1/power_unit_divisor Watts
+        * time unit: 1/time_unit_divisor Seconds
+        */
+       value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
+       rp->energy_unit_divisor = 1 << value;
+
+
+       value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
+       rp->power_unit_divisor = 1 << value;
+
+       value = (msr_val & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
+       rp->time_unit_divisor = 1 << value;
+
+       pr_debug("Physical package %d units: energy=%d, time=%d, power=%d\n",
+               rp->id,
+               rp->energy_unit_divisor,
+               rp->time_unit_divisor,
+               rp->power_unit_divisor);
+
+       return 0;
+}
+
+/* REVISIT:
+ * When package power limit is set artificially low by RAPL, LVT
+ * thermal interrupt for package power limit should be ignored
+ * since we are not really exceeding the real limit. The intention
+ * is to avoid excessive interrupts while we are trying to save power.
+ * A useful feature might be routing the package_power_limit interrupt
+ * to userspace via eventfd. once we have a usecase, this is simple
+ * to do by adding an atomic notifier.
+ */
+
+static void package_power_limit_irq_save(int package_id)
+{
+       u32 l, h = 0;
+       int cpu;
+       struct rapl_package *rp;
+
+       rp = find_package_by_id(package_id);
+       if (!rp)
+               return;
+
+       if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
+               return;
+
+       cpu = find_active_cpu_on_package(package_id);
+       if (cpu < 0)
+               return;
+       /* save the state of PLN irq mask bit before disabling it */
+       rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
+       if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) {
+               rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE;
+               rp->power_limit_irq |= PACKAGE_PLN_INT_SAVED;
+       }
+       l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
+       wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+}
+
+/* restore per package power limit interrupt enable state */
+static void package_power_limit_irq_restore(int package_id)
+{
+       u32 l, h;
+       int cpu;
+       struct rapl_package *rp;
+
+       rp = find_package_by_id(package_id);
+       if (!rp)
+               return;
+
+       if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
+               return;
+
+       cpu = find_active_cpu_on_package(package_id);
+       if (cpu < 0)
+               return;
+
+       /* irq enable state not saved, nothing to restore */
+       if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED))
+               return;
+       rdmsr_safe_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
+
+       if (rp->power_limit_irq & PACKAGE_THERM_INT_PLN_ENABLE)
+               l |= PACKAGE_THERM_INT_PLN_ENABLE;
+       else
+               l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
+
+       wrmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
+}
+
+static const struct x86_cpu_id rapl_ids[] = {
+       { X86_VENDOR_INTEL, 6, 0x2a},/* SNB */
+       { X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */
+       { X86_VENDOR_INTEL, 6, 0x3a},/* IVB */
+       { X86_VENDOR_INTEL, 6, 0x45},/* HSW */
+       /* TODO: Add more CPU IDs after testing */
+       {}
+};
+MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
+
+/* read once for all raw primitive data for all packages, domains */
+static void rapl_update_domain_data(void)
+{
+       int dmn, prim;
+       u64 val;
+       struct rapl_package *rp;
+
+       list_for_each_entry(rp, &rapl_packages, plist) {
+               for (dmn = 0; dmn < rp->nr_domains; dmn++) {
+                       pr_debug("update package %d domain %s data\n", rp->id,
+                               rp->domains[dmn].name);
+                       /* exclude non-raw primitives */
+                       for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++)
+                               if (!rapl_read_data_raw(&rp->domains[dmn], prim,
+                                                               rpi[prim].unit,
+                                                               &val))
+                                       rp->domains[dmn].rdd.primitives[prim] =
+                                                                       val;
+               }
+       }
+
+}
+
+static int rapl_unregister_powercap(void)
+{
+       struct rapl_package *rp;
+       struct rapl_domain *rd, *rd_package = NULL;
+
+       /* unregister all active rapl packages from the powercap layer,
+        * hotplug lock held
+        */
+       list_for_each_entry(rp, &rapl_packages, plist) {
+               package_power_limit_irq_restore(rp->id);
+
+               for (rd = rp->domains; rd < rp->domains + rp->nr_domains;
+                    rd++) {
+                       pr_debug("remove package, undo power limit on %d: %s\n",
+                               rp->id, rd->name);
+                       rapl_write_data_raw(rd, PL1_ENABLE, 0);
+                       rapl_write_data_raw(rd, PL2_ENABLE, 0);
+                       rapl_write_data_raw(rd, PL1_CLAMP, 0);
+                       rapl_write_data_raw(rd, PL2_CLAMP, 0);
+                       if (rd->id == RAPL_DOMAIN_PACKAGE) {
+                               rd_package = rd;
+                               continue;
+                       }
+                       powercap_unregister_zone(control_type, &rd->power_zone);
+               }
+               /* do the package zone last */
+               if (rd_package)
+                       powercap_unregister_zone(control_type,
+                                               &rd_package->power_zone);
+       }
+       powercap_unregister_control_type(control_type);
+
+       return 0;
+}
+
+static int rapl_package_register_powercap(struct rapl_package *rp)
+{
+       struct rapl_domain *rd;
+       int ret = 0;
+       char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/
+       struct powercap_zone *power_zone = NULL;
+       int nr_pl;
+
+       /* first we register package domain as the parent zone*/
+       for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+               if (rd->id == RAPL_DOMAIN_PACKAGE) {
+                       nr_pl = find_nr_power_limit(rd);
+                       pr_debug("register socket %d package domain %s\n",
+                               rp->id, rd->name);
+                       memset(dev_name, 0, sizeof(dev_name));
+                       snprintf(dev_name, sizeof(dev_name), "%s-%d",
+                               rd->name, rp->id);
+                       power_zone = powercap_register_zone(&rd->power_zone,
+                                                       control_type,
+                                                       dev_name, NULL,
+                                                       &zone_ops[rd->id],
+                                                       nr_pl,
+                                                       &constraint_ops);
+                       if (IS_ERR(power_zone)) {
+                               pr_debug("failed to register package, %d\n",
+                                       rp->id);
+                               ret = PTR_ERR(power_zone);
+                               goto exit_package;
+                       }
+                       /* track parent zone in per package/socket data */
+                       rp->power_zone = power_zone;
+                       /* done, only one package domain per socket */
+                       break;
+               }
+       }
+       if (!power_zone) {
+               pr_err("no package domain found, unknown topology!\n");
+               ret = -ENODEV;
+               goto exit_package;
+       }
+       /* now register domains as children of the socket/package*/
+       for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+               if (rd->id == RAPL_DOMAIN_PACKAGE)
+                       continue;
+               /* number of power limits per domain varies */
+               nr_pl = find_nr_power_limit(rd);
+               power_zone = powercap_register_zone(&rd->power_zone,
+                                               control_type, rd->name,
+                                               rp->power_zone,
+                                               &zone_ops[rd->id], nr_pl,
+                                               &constraint_ops);
+
+               if (IS_ERR(power_zone)) {
+                       pr_debug("failed to register power_zone, %d:%s:%s\n",
+                               rp->id, rd->name, dev_name);
+                       ret = PTR_ERR(power_zone);
+                       goto err_cleanup;
+               }
+       }
+
+exit_package:
+       return ret;
+err_cleanup:
+       /* clean up previously initialized domains within the package if we
+        * failed after the first domain setup.
+        */
+       while (--rd >= rp->domains) {
+               pr_debug("unregister package %d domain %s\n", rp->id, rd->name);
+               powercap_unregister_zone(control_type, &rd->power_zone);
+       }
+
+       return ret;
+}
+
+static int rapl_register_powercap(void)
+{
+       struct rapl_domain *rd;
+       struct rapl_package *rp;
+       int ret = 0;
+
+       control_type = powercap_register_control_type(NULL, "intel-rapl", NULL);
+       if (IS_ERR(control_type)) {
+               pr_debug("failed to register powercap control_type.\n");
+               return PTR_ERR(control_type);
+       }
+       /* read the initial data */
+       rapl_update_domain_data();
+       list_for_each_entry(rp, &rapl_packages, plist)
+               if (rapl_package_register_powercap(rp))
+                       goto err_cleanup_package;
+       return ret;
+
+err_cleanup_package:
+       /* clean up previously initialized packages */
+       list_for_each_entry_continue_reverse(rp, &rapl_packages, plist) {
+               for (rd = rp->domains; rd < rp->domains + rp->nr_domains;
+                    rd++) {
+                       pr_debug("unregister zone/package %d, %s domain\n",
+                               rp->id, rd->name);
+                       powercap_unregister_zone(control_type, &rd->power_zone);
+               }
+       }
+
+       return ret;
+}
+
+static int rapl_check_domain(int cpu, int domain)
+{
+       unsigned msr;
+       u64 val1, val2 = 0;
+       int retry = 0;
+
+       switch (domain) {
+       case RAPL_DOMAIN_PACKAGE:
+               msr = MSR_PKG_ENERGY_STATUS;
+               break;
+       case RAPL_DOMAIN_PP0:
+               msr = MSR_PP0_ENERGY_STATUS;
+               break;
+       case RAPL_DOMAIN_PP1:
+               msr = MSR_PP1_ENERGY_STATUS;
+               break;
+       case RAPL_DOMAIN_DRAM:
+               msr = MSR_DRAM_ENERGY_STATUS;
+               break;
+       default:
+               pr_err("invalid domain id %d\n", domain);
+               return -EINVAL;
+       }
+       if (rdmsrl_safe_on_cpu(cpu, msr, &val1))
+               return -ENODEV;
+
+       /* energy counters roll slowly on some domains */
+       while (++retry < 10) {
+               usleep_range(10000, 15000);
+               rdmsrl_safe_on_cpu(cpu, msr, &val2);
+               if ((val1 & ENERGY_STATUS_MASK) != (val2 & ENERGY_STATUS_MASK))
+                       return 0;
+       }
+       /* if energy counter does not change, report as bad domain */
+       pr_info("domain %s energy ctr %llu:%llu not working, skip\n",
+               rapl_domain_names[domain], val1, val2);
+
+       return -ENODEV;
+}
+
+/* Detect active and valid domains for the given CPU, caller must
+ * ensure the CPU belongs to the targeted package and CPU hotlug is disabled.
+ */
+static int rapl_detect_domains(struct rapl_package *rp, int cpu)
+{
+       int i;
+       int ret = 0;
+       struct rapl_domain *rd;
+       u64 locked;
+
+       for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
+               /* use physical package id to read counters */
+               if (!rapl_check_domain(cpu, i))
+                       rp->domain_map |= 1 << i;
+       }
+       rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX);
+       if (!rp->nr_domains) {
+               pr_err("no valid rapl domains found in package %d\n", rp->id);
+               ret = -ENODEV;
+               goto done;
+       }
+       pr_debug("found %d domains on package %d\n", rp->nr_domains, rp->id);
+
+       rp->domains = kcalloc(rp->nr_domains + 1, sizeof(struct rapl_domain),
+                       GFP_KERNEL);
+       if (!rp->domains) {
+               ret = -ENOMEM;
+               goto done;
+       }
+       rapl_init_domains(rp);
+
+       for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+               /* check if the domain is locked by BIOS */
+               if (rapl_read_data_raw(rd, FW_LOCK, false, &locked)) {
+                       pr_info("RAPL package %d domain %s locked by BIOS\n",
+                               rp->id, rd->name);
+                               rd->state |= DOMAIN_STATE_BIOS_LOCKED;
+               }
+       }
+
+
+done:
+       return ret;
+}
+
+static bool is_package_new(int package)
+{
+       struct rapl_package *rp;
+
+       /* caller prevents cpu hotplug, there will be no new packages added
+        * or deleted while traversing the package list, no need for locking.
+        */
+       list_for_each_entry(rp, &rapl_packages, plist)
+               if (package == rp->id)
+                       return false;
+
+       return true;
+}
+
+/* RAPL interface can be made of a two-level hierarchy: package level and domain
+ * level. We first detect the number of packages then domains of each package.
+ * We have to consider the possiblity of CPU online/offline due to hotplug and
+ * other scenarios.
+ */
+static int rapl_detect_topology(void)
+{
+       int i;
+       int phy_package_id;
+       struct rapl_package *new_package, *rp;
+
+       for_each_online_cpu(i) {
+               phy_package_id = topology_physical_package_id(i);
+               if (is_package_new(phy_package_id)) {
+                       new_package = kzalloc(sizeof(*rp), GFP_KERNEL);
+                       if (!new_package) {
+                               rapl_cleanup_data();
+                               return -ENOMEM;
+                       }
+                       /* add the new package to the list */
+                       new_package->id = phy_package_id;
+                       new_package->nr_cpus = 1;
+
+                       /* check if the package contains valid domains */
+                       if (rapl_detect_domains(new_package, i) ||
+                               rapl_check_unit(new_package, i)) {
+                               kfree(new_package->domains);
+                               kfree(new_package);
+                               /* free up the packages already initialized */
+                               rapl_cleanup_data();
+                               return -ENODEV;
+                       }
+                       INIT_LIST_HEAD(&new_package->plist);
+                       list_add(&new_package->plist, &rapl_packages);
+               } else {
+                       rp = find_package_by_id(phy_package_id);
+                       if (rp)
+                               ++rp->nr_cpus;
+               }
+       }
+
+       return 0;
+}
+
+/* called from CPU hotplug notifier, hotplug lock held */
+static void rapl_remove_package(struct rapl_package *rp)
+{
+       struct rapl_domain *rd, *rd_package = NULL;
+
+       for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
+               if (rd->id == RAPL_DOMAIN_PACKAGE) {
+                       rd_package = rd;
+                       continue;
+               }
+               pr_debug("remove package %d, %s domain\n", rp->id, rd->name);
+               powercap_unregister_zone(control_type, &rd->power_zone);
+       }
+       /* do parent zone last */
+       powercap_unregister_zone(control_type, &rd_package->power_zone);
+       list_del(&rp->plist);
+       kfree(rp);
+}
+
+/* called from CPU hotplug notifier, hotplug lock held */
+static int rapl_add_package(int cpu)
+{
+       int ret = 0;
+       int phy_package_id;
+       struct rapl_package *rp;
+
+       phy_package_id = topology_physical_package_id(cpu);
+       rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL);
+       if (!rp)
+               return -ENOMEM;
+
+       /* add the new package to the list */
+       rp->id = phy_package_id;
+       rp->nr_cpus = 1;
+       /* check if the package contains valid domains */
+       if (rapl_detect_domains(rp, cpu) ||
+               rapl_check_unit(rp, cpu)) {
+               ret = -ENODEV;
+               goto err_free_package;
+       }
+       if (!rapl_package_register_powercap(rp)) {
+               INIT_LIST_HEAD(&rp->plist);
+               list_add(&rp->plist, &rapl_packages);
+               return ret;
+       }
+
+err_free_package:
+       kfree(rp->domains);
+       kfree(rp);
+
+       return ret;
+}
+
+/* Handles CPU hotplug on multi-socket systems.
+ * If a CPU goes online as the first CPU of the physical package
+ * we add the RAPL package to the system. Similarly, when the last
+ * CPU of the package is removed, we remove the RAPL package and its
+ * associated domains. Cooling devices are handled accordingly at
+ * per-domain level.
+ */
+static int rapl_cpu_callback(struct notifier_block *nfb,
+                               unsigned long action, void *hcpu)
+{
+       unsigned long cpu = (unsigned long)hcpu;
+       int phy_package_id;
+       struct rapl_package *rp;
+
+       phy_package_id = topology_physical_package_id(cpu);
+       switch (action) {
+       case CPU_ONLINE:
+       case CPU_ONLINE_FROZEN:
+       case CPU_DOWN_FAILED:
+       case CPU_DOWN_FAILED_FROZEN:
+               rp = find_package_by_id(phy_package_id);
+               if (rp)
+                       ++rp->nr_cpus;
+               else
+                       rapl_add_package(cpu);
+               break;
+       case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
+               rp = find_package_by_id(phy_package_id);
+               if (!rp)
+                       break;
+               if (--rp->nr_cpus == 0)
+                       rapl_remove_package(rp);
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block rapl_cpu_notifier = {
+       .notifier_call = rapl_cpu_callback,
+};
+
+static int __init rapl_init(void)
+{
+       int ret = 0;
+
+       if (!x86_match_cpu(rapl_ids)) {
+               pr_err("driver does not support CPU family %d model %d\n",
+                       boot_cpu_data.x86, boot_cpu_data.x86_model);
+
+               return -ENODEV;
+       }
+       /* prevent CPU hotplug during detection */
+       get_online_cpus();
+       ret = rapl_detect_topology();
+       if (ret)
+               goto done;
+
+       if (rapl_register_powercap()) {
+               rapl_cleanup_data();
+               ret = -ENODEV;
+               goto done;
+       }
+       register_hotcpu_notifier(&rapl_cpu_notifier);
+done:
+       put_online_cpus();
+
+       return ret;
+}
+
+static void __exit rapl_exit(void)
+{
+       get_online_cpus();
+       unregister_hotcpu_notifier(&rapl_cpu_notifier);
+       rapl_unregister_powercap();
+       rapl_cleanup_data();
+       put_online_cpus();
+}
+
+module_init(rapl_init);
+module_exit(rapl_exit);
+
+MODULE_DESCRIPTION("Driver for Intel RAPL (Running Average Power Limit)");
+MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
new file mode 100644 (file)
index 0000000..c22fa4c
--- /dev/null
@@ -0,0 +1,683 @@
+/*
+ * Power capping class
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/powercap.h>
+
+#define to_powercap_zone(n) container_of(n, struct powercap_zone, dev)
+#define to_powercap_control_type(n) \
+                       container_of(n, struct powercap_control_type, dev)
+
+/* Power zone show function */
+#define define_power_zone_show(_attr)          \
+static ssize_t _attr##_show(struct device *dev, \
+                                       struct device_attribute *dev_attr,\
+                                       char *buf) \
+{ \
+       u64 value; \
+       ssize_t len = -EINVAL; \
+       struct powercap_zone *power_zone = to_powercap_zone(dev); \
+       \
+       if (power_zone->ops->get_##_attr) { \
+               if (!power_zone->ops->get_##_attr(power_zone, &value)) \
+                       len = sprintf(buf, "%lld\n", value); \
+       } \
+       \
+       return len; \
+}
+
+/* The only meaningful input is 0 (reset), others are silently ignored */
+#define define_power_zone_store(_attr)         \
+static ssize_t _attr##_store(struct device *dev,\
+                               struct device_attribute *dev_attr, \
+                               const char *buf, size_t count) \
+{ \
+       int err; \
+       struct powercap_zone *power_zone = to_powercap_zone(dev); \
+       u64 value; \
+       \
+       err = kstrtoull(buf, 10, &value); \
+       if (err) \
+               return -EINVAL; \
+       if (value) \
+               return count; \
+       if (power_zone->ops->reset_##_attr) { \
+               if (!power_zone->ops->reset_##_attr(power_zone)) \
+                       return count; \
+       } \
+       \
+       return -EINVAL; \
+}
+
+/* Power zone constraint show function */
+#define define_power_zone_constraint_show(_attr) \
+static ssize_t show_constraint_##_attr(struct device *dev, \
+                               struct device_attribute *dev_attr,\
+                               char *buf) \
+{ \
+       u64 value; \
+       ssize_t len = -ENODATA; \
+       struct powercap_zone *power_zone = to_powercap_zone(dev); \
+       int id; \
+       struct powercap_zone_constraint *pconst;\
+       \
+       if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \
+               return -EINVAL; \
+       if (id >= power_zone->const_id_cnt)     \
+               return -EINVAL; \
+       pconst = &power_zone->constraints[id]; \
+       if (pconst && pconst->ops && pconst->ops->get_##_attr) { \
+               if (!pconst->ops->get_##_attr(power_zone, id, &value)) \
+                       len = sprintf(buf, "%lld\n", value); \
+       } \
+       \
+       return len; \
+}
+
+/* Power zone constraint store function */
+#define define_power_zone_constraint_store(_attr) \
+static ssize_t store_constraint_##_attr(struct device *dev,\
+                               struct device_attribute *dev_attr, \
+                               const char *buf, size_t count) \
+{ \
+       int err; \
+       u64 value; \
+       struct powercap_zone *power_zone = to_powercap_zone(dev); \
+       int id; \
+       struct powercap_zone_constraint *pconst;\
+       \
+       if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \
+               return -EINVAL; \
+       if (id >= power_zone->const_id_cnt)     \
+               return -EINVAL; \
+       pconst = &power_zone->constraints[id]; \
+       err = kstrtoull(buf, 10, &value); \
+       if (err) \
+               return -EINVAL; \
+       if (pconst && pconst->ops && pconst->ops->set_##_attr) { \
+               if (!pconst->ops->set_##_attr(power_zone, id, value)) \
+                       return count; \
+       } \
+       \
+       return -ENODATA; \
+}
+
+/* Power zone information callbacks */
+define_power_zone_show(power_uw);
+define_power_zone_show(max_power_range_uw);
+define_power_zone_show(energy_uj);
+define_power_zone_store(energy_uj);
+define_power_zone_show(max_energy_range_uj);
+
+/* Power zone attributes */
+static DEVICE_ATTR_RO(max_power_range_uw);
+static DEVICE_ATTR_RO(power_uw);
+static DEVICE_ATTR_RO(max_energy_range_uj);
+static DEVICE_ATTR_RW(energy_uj);
+
+/* Power zone constraint attributes callbacks */
+define_power_zone_constraint_show(power_limit_uw);
+define_power_zone_constraint_store(power_limit_uw);
+define_power_zone_constraint_show(time_window_us);
+define_power_zone_constraint_store(time_window_us);
+define_power_zone_constraint_show(max_power_uw);
+define_power_zone_constraint_show(min_power_uw);
+define_power_zone_constraint_show(max_time_window_us);
+define_power_zone_constraint_show(min_time_window_us);
+
+/* For one time seeding of constraint device attributes */
+struct powercap_constraint_attr {
+       struct device_attribute power_limit_attr;
+       struct device_attribute time_window_attr;
+       struct device_attribute max_power_attr;
+       struct device_attribute min_power_attr;
+       struct device_attribute max_time_window_attr;
+       struct device_attribute min_time_window_attr;
+       struct device_attribute name_attr;
+};
+
+static struct powercap_constraint_attr
+                               constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
+
+/* A list of powercap control_types */
+static LIST_HEAD(powercap_cntrl_list);
+/* Mutex to protect list of powercap control_types */
+static DEFINE_MUTEX(powercap_cntrl_list_lock);
+
+#define POWERCAP_CONSTRAINT_NAME_LEN   30 /* Some limit to avoid overflow */
+static ssize_t show_constraint_name(struct device *dev,
+                               struct device_attribute *dev_attr,
+                               char *buf)
+{
+       const char *name;
+       struct powercap_zone *power_zone = to_powercap_zone(dev);
+       int id;
+       ssize_t len = -ENODATA;
+       struct powercap_zone_constraint *pconst;
+
+       if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id))
+               return -EINVAL;
+       if (id >= power_zone->const_id_cnt)
+               return -EINVAL;
+       pconst = &power_zone->constraints[id];
+
+       if (pconst && pconst->ops && pconst->ops->get_name) {
+               name = pconst->ops->get_name(power_zone, id);
+               if (name) {
+                       snprintf(buf, POWERCAP_CONSTRAINT_NAME_LEN,
+                                                               "%s\n", name);
+                       buf[POWERCAP_CONSTRAINT_NAME_LEN] = '\0';
+                       len = strlen(buf);
+               }
+       }
+
+       return len;
+}
+
+static int create_constraint_attribute(int id, const char *name,
+                               int mode,
+                               struct device_attribute *dev_attr,
+                               ssize_t (*show)(struct device *,
+                                       struct device_attribute *, char *),
+                               ssize_t (*store)(struct device *,
+                                       struct device_attribute *,
+                               const char *, size_t)
+                               )
+{
+
+       dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
+                                                               id, name);
+       if (!dev_attr->attr.name)
+               return -ENOMEM;
+       dev_attr->attr.mode = mode;
+       dev_attr->show = show;
+       dev_attr->store = store;
+
+       return 0;
+}
+
+static void free_constraint_attributes(void)
+{
+       int i;
+
+       for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
+               kfree(constraint_attrs[i].power_limit_attr.attr.name);
+               kfree(constraint_attrs[i].time_window_attr.attr.name);
+               kfree(constraint_attrs[i].name_attr.attr.name);
+               kfree(constraint_attrs[i].max_power_attr.attr.name);
+               kfree(constraint_attrs[i].min_power_attr.attr.name);
+               kfree(constraint_attrs[i].max_time_window_attr.attr.name);
+               kfree(constraint_attrs[i].min_time_window_attr.attr.name);
+       }
+}
+
+static int seed_constraint_attributes(void)
+{
+       int i;
+       int ret;
+
+       for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
+               ret = create_constraint_attribute(i, "power_limit_uw",
+                                       S_IWUSR | S_IRUGO,
+                                       &constraint_attrs[i].power_limit_attr,
+                                       show_constraint_power_limit_uw,
+                                       store_constraint_power_limit_uw);
+               if (ret)
+                       goto err_alloc;
+               ret = create_constraint_attribute(i, "time_window_us",
+                                       S_IWUSR | S_IRUGO,
+                                       &constraint_attrs[i].time_window_attr,
+                                       show_constraint_time_window_us,
+                                       store_constraint_time_window_us);
+               if (ret)
+                       goto err_alloc;
+               ret = create_constraint_attribute(i, "name", S_IRUGO,
+                               &constraint_attrs[i].name_attr,
+                               show_constraint_name,
+                               NULL);
+               if (ret)
+                       goto err_alloc;
+               ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
+                               &constraint_attrs[i].max_power_attr,
+                               show_constraint_max_power_uw,
+                               NULL);
+               if (ret)
+                       goto err_alloc;
+               ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
+                               &constraint_attrs[i].min_power_attr,
+                               show_constraint_min_power_uw,
+                               NULL);
+               if (ret)
+                       goto err_alloc;
+               ret = create_constraint_attribute(i, "max_time_window_us",
+                               S_IRUGO,
+                               &constraint_attrs[i].max_time_window_attr,
+                               show_constraint_max_time_window_us,
+                               NULL);
+               if (ret)
+                       goto err_alloc;
+               ret = create_constraint_attribute(i, "min_time_window_us",
+                               S_IRUGO,
+                               &constraint_attrs[i].min_time_window_attr,
+                               show_constraint_min_time_window_us,
+                               NULL);
+               if (ret)
+                       goto err_alloc;
+
+       }
+
+       return 0;
+
+err_alloc:
+       free_constraint_attributes();
+
+       return ret;
+}
+
+static int create_constraints(struct powercap_zone *power_zone,
+                               int nr_constraints,
+                               struct powercap_zone_constraint_ops *const_ops)
+{
+       int i;
+       int ret = 0;
+       int count;
+       struct powercap_zone_constraint *pconst;
+
+       if (!power_zone || !const_ops || !const_ops->get_power_limit_uw ||
+                                       !const_ops->set_power_limit_uw ||
+                                       !const_ops->get_time_window_us ||
+                                       !const_ops->set_time_window_us)
+               return -EINVAL;
+
+       count = power_zone->zone_attr_count;
+       for (i = 0; i < nr_constraints; ++i) {
+               pconst = &power_zone->constraints[i];
+               pconst->ops = const_ops;
+               pconst->id = power_zone->const_id_cnt;
+               power_zone->const_id_cnt++;
+               power_zone->zone_dev_attrs[count++] =
+                               &constraint_attrs[i].power_limit_attr.attr;
+               power_zone->zone_dev_attrs[count++] =
+                               &constraint_attrs[i].time_window_attr.attr;
+               if (pconst->ops->get_name)
+                       power_zone->zone_dev_attrs[count++] =
+                               &constraint_attrs[i].name_attr.attr;
+               if (pconst->ops->get_max_power_uw)
+                       power_zone->zone_dev_attrs[count++] =
+                               &constraint_attrs[i].max_power_attr.attr;
+               if (pconst->ops->get_min_power_uw)
+                       power_zone->zone_dev_attrs[count++] =
+                               &constraint_attrs[i].min_power_attr.attr;
+               if (pconst->ops->get_max_time_window_us)
+                       power_zone->zone_dev_attrs[count++] =
+                               &constraint_attrs[i].max_time_window_attr.attr;
+               if (pconst->ops->get_min_time_window_us)
+                       power_zone->zone_dev_attrs[count++] =
+                               &constraint_attrs[i].min_time_window_attr.attr;
+       }
+       power_zone->zone_attr_count = count;
+
+       return ret;
+}
+
+static bool control_type_valid(void *control_type)
+{
+       struct powercap_control_type *pos = NULL;
+       bool found = false;
+
+       mutex_lock(&powercap_cntrl_list_lock);
+
+       list_for_each_entry(pos, &powercap_cntrl_list, node) {
+               if (pos == control_type) {
+                       found = true;
+                       break;
+               }
+       }
+       mutex_unlock(&powercap_cntrl_list_lock);
+
+       return found;
+}
+
+static ssize_t name_show(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf)
+{
+       struct powercap_zone *power_zone = to_powercap_zone(dev);
+
+       return sprintf(buf, "%s\n", power_zone->name);
+}
+
+static DEVICE_ATTR_RO(name);
+
+/* Create zone and attributes in sysfs */
+static void create_power_zone_common_attributes(
+                                       struct powercap_zone *power_zone)
+{
+       int count = 0;
+
+       power_zone->zone_dev_attrs[count++] = &dev_attr_name.attr;
+       if (power_zone->ops->get_max_energy_range_uj)
+               power_zone->zone_dev_attrs[count++] =
+                                       &dev_attr_max_energy_range_uj.attr;
+       if (power_zone->ops->get_energy_uj)
+               power_zone->zone_dev_attrs[count++] =
+                                       &dev_attr_energy_uj.attr;
+       if (power_zone->ops->get_power_uw)
+               power_zone->zone_dev_attrs[count++] =
+                                       &dev_attr_power_uw.attr;
+       if (power_zone->ops->get_max_power_range_uw)
+               power_zone->zone_dev_attrs[count++] =
+                                       &dev_attr_max_power_range_uw.attr;
+       power_zone->zone_dev_attrs[count] = NULL;
+       power_zone->zone_attr_count = count;
+}
+
+static void powercap_release(struct device *dev)
+{
+       bool allocated;
+
+       if (dev->parent) {
+               struct powercap_zone *power_zone = to_powercap_zone(dev);
+
+               /* Store flag as the release() may free memory */
+               allocated = power_zone->allocated;
+               /* Remove id from parent idr struct */
+               idr_remove(power_zone->parent_idr, power_zone->id);
+               /* Destroy idrs allocated for this zone */
+               idr_destroy(&power_zone->idr);
+               kfree(power_zone->name);
+               kfree(power_zone->zone_dev_attrs);
+               kfree(power_zone->constraints);
+               if (power_zone->ops->release)
+                       power_zone->ops->release(power_zone);
+               if (allocated)
+                       kfree(power_zone);
+       } else {
+               struct powercap_control_type *control_type =
+                                               to_powercap_control_type(dev);
+
+               /* Store flag as the release() may free memory */
+               allocated = control_type->allocated;
+               idr_destroy(&control_type->idr);
+               mutex_destroy(&control_type->lock);
+               if (control_type->ops && control_type->ops->release)
+                       control_type->ops->release(control_type);
+               if (allocated)
+                       kfree(control_type);
+       }
+}
+
+static ssize_t enabled_show(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf)
+{
+       bool mode = true;
+
+       /* Default is enabled */
+       if (dev->parent) {
+               struct powercap_zone *power_zone = to_powercap_zone(dev);
+               if (power_zone->ops->get_enable)
+                       if (power_zone->ops->get_enable(power_zone, &mode))
+                               mode = false;
+       } else {
+               struct powercap_control_type *control_type =
+                                               to_powercap_control_type(dev);
+               if (control_type->ops && control_type->ops->get_enable)
+                       if (control_type->ops->get_enable(control_type, &mode))
+                               mode = false;
+       }
+
+       return sprintf(buf, "%d\n", mode);
+}
+
+static ssize_t enabled_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf,  size_t len)
+{
+       bool mode;
+
+       if (strtobool(buf, &mode))
+               return -EINVAL;
+       if (dev->parent) {
+               struct powercap_zone *power_zone = to_powercap_zone(dev);
+               if (power_zone->ops->set_enable)
+                       if (!power_zone->ops->set_enable(power_zone, mode))
+                               return len;
+       } else {
+               struct powercap_control_type *control_type =
+                                               to_powercap_control_type(dev);
+               if (control_type->ops && control_type->ops->set_enable)
+                       if (!control_type->ops->set_enable(control_type, mode))
+                               return len;
+       }
+
+       return -ENOSYS;
+}
+
+static struct device_attribute powercap_def_attrs[] = {
+               __ATTR(enabled, S_IWUSR | S_IRUGO, enabled_show,
+                                                       enabled_store),
+               __ATTR_NULL
+};
+
+static struct class powercap_class = {
+       .name = "powercap",
+       .dev_release = powercap_release,
+       .dev_attrs = powercap_def_attrs,
+};
+
+struct powercap_zone *powercap_register_zone(
+                               struct powercap_zone *power_zone,
+                               struct powercap_control_type *control_type,
+                               const char *name,
+                               struct powercap_zone *parent,
+                               const struct powercap_zone_ops *ops,
+                               int nr_constraints,
+                               struct powercap_zone_constraint_ops *const_ops)
+{
+       int result;
+       int nr_attrs;
+
+       if (!name || !control_type || !ops ||
+                       nr_constraints > MAX_CONSTRAINTS_PER_ZONE ||
+                       (!ops->get_energy_uj && !ops->get_power_uw) ||
+                       !control_type_valid(control_type))
+               return ERR_PTR(-EINVAL);
+
+       if (power_zone) {
+               if (!ops->release)
+                       return ERR_PTR(-EINVAL);
+               memset(power_zone, 0, sizeof(*power_zone));
+       } else {
+               power_zone = kzalloc(sizeof(*power_zone), GFP_KERNEL);
+               if (!power_zone)
+                       return ERR_PTR(-ENOMEM);
+               power_zone->allocated = true;
+       }
+       power_zone->ops = ops;
+       power_zone->control_type_inst = control_type;
+       if (!parent) {
+               power_zone->dev.parent = &control_type->dev;
+               power_zone->parent_idr = &control_type->idr;
+       } else {
+               power_zone->dev.parent = &parent->dev;
+               power_zone->parent_idr = &parent->idr;
+       }
+       power_zone->dev.class = &powercap_class;
+
+       mutex_lock(&control_type->lock);
+       /* Using idr to get the unique id */
+       result = idr_alloc(power_zone->parent_idr, NULL, 0, 0, GFP_KERNEL);
+       if (result < 0)
+               goto err_idr_alloc;
+
+       power_zone->id = result;
+       idr_init(&power_zone->idr);
+       power_zone->name = kstrdup(name, GFP_KERNEL);
+       if (!power_zone->name)
+               goto err_name_alloc;
+       dev_set_name(&power_zone->dev, "%s:%x",
+                                       dev_name(power_zone->dev.parent),
+                                       power_zone->id);
+       power_zone->constraints = kzalloc(sizeof(*power_zone->constraints) *
+                                        nr_constraints, GFP_KERNEL);
+       if (!power_zone->constraints)
+               goto err_const_alloc;
+
+       nr_attrs = nr_constraints * POWERCAP_CONSTRAINTS_ATTRS +
+                                               POWERCAP_ZONE_MAX_ATTRS + 1;
+       power_zone->zone_dev_attrs = kzalloc(sizeof(void *) *
+                                               nr_attrs, GFP_KERNEL);
+       if (!power_zone->zone_dev_attrs)
+               goto err_attr_alloc;
+       create_power_zone_common_attributes(power_zone);
+       result = create_constraints(power_zone, nr_constraints, const_ops);
+       if (result)
+               goto err_dev_ret;
+
+       power_zone->zone_dev_attrs[power_zone->zone_attr_count] = NULL;
+       power_zone->dev_zone_attr_group.attrs = power_zone->zone_dev_attrs;
+       power_zone->dev_attr_groups[0] = &power_zone->dev_zone_attr_group;
+       power_zone->dev_attr_groups[1] = NULL;
+       power_zone->dev.groups = power_zone->dev_attr_groups;
+       result = device_register(&power_zone->dev);
+       if (result)
+               goto err_dev_ret;
+
+       control_type->nr_zones++;
+       mutex_unlock(&control_type->lock);
+
+       return power_zone;
+
+err_dev_ret:
+       kfree(power_zone->zone_dev_attrs);
+err_attr_alloc:
+       kfree(power_zone->constraints);
+err_const_alloc:
+       kfree(power_zone->name);
+err_name_alloc:
+       idr_remove(power_zone->parent_idr, power_zone->id);
+err_idr_alloc:
+       if (power_zone->allocated)
+               kfree(power_zone);
+       mutex_unlock(&control_type->lock);
+
+       return ERR_PTR(result);
+}
+EXPORT_SYMBOL_GPL(powercap_register_zone);
+
+int powercap_unregister_zone(struct powercap_control_type *control_type,
+                               struct powercap_zone *power_zone)
+{
+       if (!power_zone || !control_type)
+               return -EINVAL;
+
+       mutex_lock(&control_type->lock);
+       control_type->nr_zones--;
+       mutex_unlock(&control_type->lock);
+
+       device_unregister(&power_zone->dev);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(powercap_unregister_zone);
+
+struct powercap_control_type *powercap_register_control_type(
+                               struct powercap_control_type *control_type,
+                               const char *name,
+                               const struct powercap_control_type_ops *ops)
+{
+       int result;
+
+       if (!name)
+               return ERR_PTR(-EINVAL);
+       if (control_type) {
+               if (!ops || !ops->release)
+                       return ERR_PTR(-EINVAL);
+               memset(control_type, 0, sizeof(*control_type));
+       } else {
+               control_type = kzalloc(sizeof(*control_type), GFP_KERNEL);
+               if (!control_type)
+                       return ERR_PTR(-ENOMEM);
+               control_type->allocated = true;
+       }
+       mutex_init(&control_type->lock);
+       control_type->ops = ops;
+       INIT_LIST_HEAD(&control_type->node);
+       control_type->dev.class = &powercap_class;
+       dev_set_name(&control_type->dev, name);
+       result = device_register(&control_type->dev);
+       if (result) {
+               if (control_type->allocated)
+                       kfree(control_type);
+               return ERR_PTR(result);
+       }
+       idr_init(&control_type->idr);
+
+       mutex_lock(&powercap_cntrl_list_lock);
+       list_add_tail(&control_type->node, &powercap_cntrl_list);
+       mutex_unlock(&powercap_cntrl_list_lock);
+
+       return control_type;
+}
+EXPORT_SYMBOL_GPL(powercap_register_control_type);
+
+int powercap_unregister_control_type(struct powercap_control_type *control_type)
+{
+       struct powercap_control_type *pos = NULL;
+
+       if (control_type->nr_zones) {
+               dev_err(&control_type->dev, "Zones of this type still not freed\n");
+               return -EINVAL;
+       }
+       mutex_lock(&powercap_cntrl_list_lock);
+       list_for_each_entry(pos, &powercap_cntrl_list, node) {
+               if (pos == control_type) {
+                       list_del(&control_type->node);
+                       mutex_unlock(&powercap_cntrl_list_lock);
+                       device_unregister(&control_type->dev);
+                       return 0;
+               }
+       }
+       mutex_unlock(&powercap_cntrl_list_lock);
+
+       return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(powercap_unregister_control_type);
+
+static int __init powercap_init(void)
+{
+       int result = 0;
+
+       result = seed_constraint_attributes();
+       if (result)
+               return result;
+
+       result = class_register(&powercap_class);
+
+       return result;
+}
+
+device_initcall(powercap_init);
+
+MODULE_DESCRIPTION("PowerCap sysfs Driver");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
index 5adb2042e824fc30815ea891c54b89526680b4eb..cee7e2708a1fe35359eb81cc458d939e50ad1906 100644 (file)
@@ -2077,6 +2077,7 @@ dasd_eckd_build_format(struct dasd_device *base,
        int intensity = 0;
        int r0_perm;
        int nr_tracks;
+       int use_prefix;
 
        startdev = dasd_alias_get_start_dev(base);
        if (!startdev)
@@ -2106,28 +2107,46 @@ dasd_eckd_build_format(struct dasd_device *base,
                intensity = fdata->intensity;
        }
 
+       use_prefix = base_priv->features.feature[8] & 0x01;
+
        switch (intensity) {
        case 0x00:      /* Normal format */
        case 0x08:      /* Normal format, use cdl. */
                cplength = 2 + (rpt*nr_tracks);
-               datasize = sizeof(struct PFX_eckd_data) +
-                       sizeof(struct LO_eckd_data) +
-                       rpt * nr_tracks * sizeof(struct eckd_count);
+               if (use_prefix)
+                       datasize = sizeof(struct PFX_eckd_data) +
+                               sizeof(struct LO_eckd_data) +
+                               rpt * nr_tracks * sizeof(struct eckd_count);
+               else
+                       datasize = sizeof(struct DE_eckd_data) +
+                               sizeof(struct LO_eckd_data) +
+                               rpt * nr_tracks * sizeof(struct eckd_count);
                break;
        case 0x01:      /* Write record zero and format track. */
        case 0x09:      /* Write record zero and format track, use cdl. */
                cplength = 2 + rpt * nr_tracks;
-               datasize = sizeof(struct PFX_eckd_data) +
-                       sizeof(struct LO_eckd_data) +
-                       sizeof(struct eckd_count) +
-                       rpt * nr_tracks * sizeof(struct eckd_count);
+               if (use_prefix)
+                       datasize = sizeof(struct PFX_eckd_data) +
+                               sizeof(struct LO_eckd_data) +
+                               sizeof(struct eckd_count) +
+                               rpt * nr_tracks * sizeof(struct eckd_count);
+               else
+                       datasize = sizeof(struct DE_eckd_data) +
+                               sizeof(struct LO_eckd_data) +
+                               sizeof(struct eckd_count) +
+                               rpt * nr_tracks * sizeof(struct eckd_count);
                break;
        case 0x04:      /* Invalidate track. */
        case 0x0c:      /* Invalidate track, use cdl. */
                cplength = 3;
-               datasize = sizeof(struct PFX_eckd_data) +
-                       sizeof(struct LO_eckd_data) +
-                       sizeof(struct eckd_count);
+               if (use_prefix)
+                       datasize = sizeof(struct PFX_eckd_data) +
+                               sizeof(struct LO_eckd_data) +
+                               sizeof(struct eckd_count);
+               else
+                       datasize = sizeof(struct DE_eckd_data) +
+                               sizeof(struct LO_eckd_data) +
+                               sizeof(struct eckd_count);
                break;
        default:
                dev_warn(&startdev->cdev->dev,
@@ -2147,14 +2166,25 @@ dasd_eckd_build_format(struct dasd_device *base,
 
        switch (intensity & ~0x08) {
        case 0x00: /* Normal format. */
-               prefix(ccw++, (struct PFX_eckd_data *) data,
-                      fdata->start_unit, fdata->stop_unit,
-                      DASD_ECKD_CCW_WRITE_CKD, base, startdev);
-               /* grant subsystem permission to format R0 */
-               if (r0_perm)
-                       ((struct PFX_eckd_data *)data)
-                               ->define_extent.ga_extended |= 0x04;
-               data += sizeof(struct PFX_eckd_data);
+               if (use_prefix) {
+                       prefix(ccw++, (struct PFX_eckd_data *) data,
+                              fdata->start_unit, fdata->stop_unit,
+                              DASD_ECKD_CCW_WRITE_CKD, base, startdev);
+                       /* grant subsystem permission to format R0 */
+                       if (r0_perm)
+                               ((struct PFX_eckd_data *)data)
+                                       ->define_extent.ga_extended |= 0x04;
+                       data += sizeof(struct PFX_eckd_data);
+               } else {
+                       define_extent(ccw++, (struct DE_eckd_data *) data,
+                                     fdata->start_unit, fdata->stop_unit,
+                                     DASD_ECKD_CCW_WRITE_CKD, startdev);
+                       /* grant subsystem permission to format R0 */
+                       if (r0_perm)
+                               ((struct DE_eckd_data *) data)
+                                       ->ga_extended |= 0x04;
+                       data += sizeof(struct DE_eckd_data);
+               }
                ccw[-1].flags |= CCW_FLAG_CC;
                locate_record(ccw++, (struct LO_eckd_data *) data,
                              fdata->start_unit, 0, rpt*nr_tracks,
@@ -2163,11 +2193,18 @@ dasd_eckd_build_format(struct dasd_device *base,
                data += sizeof(struct LO_eckd_data);
                break;
        case 0x01: /* Write record zero + format track. */
-               prefix(ccw++, (struct PFX_eckd_data *) data,
-                      fdata->start_unit, fdata->stop_unit,
-                      DASD_ECKD_CCW_WRITE_RECORD_ZERO,
-                      base, startdev);
-               data += sizeof(struct PFX_eckd_data);
+               if (use_prefix) {
+                       prefix(ccw++, (struct PFX_eckd_data *) data,
+                              fdata->start_unit, fdata->stop_unit,
+                              DASD_ECKD_CCW_WRITE_RECORD_ZERO,
+                              base, startdev);
+                       data += sizeof(struct PFX_eckd_data);
+               } else {
+                       define_extent(ccw++, (struct DE_eckd_data *) data,
+                              fdata->start_unit, fdata->stop_unit,
+                              DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev);
+                       data += sizeof(struct DE_eckd_data);
+               }
                ccw[-1].flags |= CCW_FLAG_CC;
                locate_record(ccw++, (struct LO_eckd_data *) data,
                              fdata->start_unit, 0, rpt * nr_tracks + 1,
@@ -2176,10 +2213,17 @@ dasd_eckd_build_format(struct dasd_device *base,
                data += sizeof(struct LO_eckd_data);
                break;
        case 0x04: /* Invalidate track. */
-               prefix(ccw++, (struct PFX_eckd_data *) data,
-                      fdata->start_unit, fdata->stop_unit,
-                      DASD_ECKD_CCW_WRITE_CKD, base, startdev);
-               data += sizeof(struct PFX_eckd_data);
+               if (use_prefix) {
+                       prefix(ccw++, (struct PFX_eckd_data *) data,
+                              fdata->start_unit, fdata->stop_unit,
+                              DASD_ECKD_CCW_WRITE_CKD, base, startdev);
+                       data += sizeof(struct PFX_eckd_data);
+               } else {
+                       define_extent(ccw++, (struct DE_eckd_data *) data,
+                              fdata->start_unit, fdata->stop_unit,
+                              DASD_ECKD_CCW_WRITE_CKD, startdev);
+                       data += sizeof(struct DE_eckd_data);
+               }
                ccw[-1].flags |= CCW_FLAG_CC;
                locate_record(ccw++, (struct LO_eckd_data *) data,
                              fdata->start_unit, 0, 1,
index 8b387b32fd622b981a19e951eca1b3fd30c783bf..e59331e6c2e5ef682dae02f7f5fbebe1bae1b5c8 100644 (file)
@@ -107,7 +107,7 @@ extern debug_info_t *scm_debug;
 
 static inline void SCM_LOG_HEX(int level, void *data, int length)
 {
-       if (level > scm_debug->level)
+       if (!debug_level_enabled(scm_debug, level))
                return;
        while (length > 0) {
                debug_event(scm_debug, level, data, length);
index 4600aa10a1c6c5824f36168fb83b414d2e4e915b..668b32b0dc1dc39317ebbc682f55bedbc86537c3 100644 (file)
@@ -60,7 +60,7 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn)
        struct appldata_product_id id;
        int rc;
 
-       strcpy(id.prod_nr, "LNXAPPL");
+       strncpy(id.prod_nr, "LNXAPPL", 7);
        id.prod_fn = myhdr->applid;
        id.record_nr = myhdr->record_num;
        id.version_nr = myhdr->version;
index 24a08e8f19e1b9ed3366b5f035d4a0fd214bf97e..2cdec21e8924ea7b0403aafd1b0bae172a76416e 100644 (file)
@@ -615,10 +615,10 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
 
        if (rp->state != RAW3270_STATE_RESET)
                return;
-       if (rq && rq->rc) {
+       if (rq->rc) {
                /* Reset command failed. */
                rp->state = RAW3270_STATE_INIT;
-       } else if (0 && MACHINE_IS_VM) {
+       } else if (MACHINE_IS_VM) {
                raw3270_size_device_vm(rp);
                raw3270_size_device_done(rp);
        } else
index a3aa374799dcf99bd334b6e49c8d7753838fbdc3..1fe264379e0d10b57ddca456ce16b72c23535ccb 100644 (file)
@@ -486,7 +486,7 @@ sclp_sync_wait(void)
        timeout = 0;
        if (timer_pending(&sclp_request_timer)) {
                /* Get timeout TOD value */
-               timeout = get_tod_clock() +
+               timeout = get_tod_clock_fast() +
                          sclp_tod_from_jiffies(sclp_request_timer.expires -
                                                jiffies);
        }
@@ -508,7 +508,7 @@ sclp_sync_wait(void)
        while (sclp_running_state != sclp_running_state_idle) {
                /* Check for expired request timer */
                if (timer_pending(&sclp_request_timer) &&
-                   get_tod_clock() > timeout &&
+                   get_tod_clock_fast() > timeout &&
                    del_timer(&sclp_request_timer))
                        sclp_request_timer.function(sclp_request_timer.data);
                cpu_relax();
index 8cd34bf644b3ee4b34065814fab4300774c8ad23..77df9cb00688feeda6cad1fbfd4623fc5a26134e 100644 (file)
@@ -145,9 +145,11 @@ bool __init sclp_has_linemode(void)
 
        if (sccb->header.response_code != 0x20)
                return 0;
-       if (sccb->sclp_send_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))
-               return 1;
-       return 0;
+       if (!(sccb->sclp_send_mask & (EVTYP_OPCMD_MASK | EVTYP_PMSGCMD_MASK)))
+               return 0;
+       if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
+               return 0;
+       return 1;
 }
 
 bool __init sclp_has_vt220(void)
index a0f47c83fd62f94205a987a728edf207f86c3bc6..3f4ca4e09a4ccc4d49fba39d64094fafe1f1fdc7 100644 (file)
@@ -810,7 +810,7 @@ static void tty3270_resize_work(struct work_struct *work)
        struct winsize ws;
 
        screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols);
-       if (!screen)
+       if (IS_ERR(screen))
                return;
        /* Switch to new output size */
        spin_lock_bh(&tp->view.lock);
index 9b3a24e8d3a0e0dfec9ffcbf1757ddd37f66329e..cf31d3321dab86b889b16fa5e3b9b2e433c3992d 100644 (file)
@@ -313,7 +313,7 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp)
        int ret;
 
        dev_num = iminor(inode);
-       if (dev_num > MAXMINOR)
+       if (dev_num >= MAXMINOR)
                return -ENODEV;
        logptr = &sys_ser[dev_num];
 
index 794820a123d0c83c07b1b2e546efc84c25daa14a..ffb1fcf0bf5bed9928f53e1205f8b6b01635f6b6 100644 (file)
@@ -151,7 +151,7 @@ static int __init init_cpu_info(enum arch_id arch)
 
        /* get info for boot cpu from lowcore, stored in the HSA */
 
-       sa = kmalloc(sizeof(*sa), GFP_KERNEL);
+       sa = dump_save_area_create(0);
        if (!sa)
                return -ENOMEM;
        if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {
@@ -159,7 +159,6 @@ static int __init init_cpu_info(enum arch_id arch)
                kfree(sa);
                return -EIO;
        }
-       zfcpdump_save_areas[0] = sa;
        return 0;
 }
 
@@ -246,24 +245,25 @@ static int copy_lc(void __user *buf, void *sa, int sa_off, int len)
 static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
 {
        unsigned long end;
-       int i = 0;
+       int i;
 
        if (count == 0)
                return 0;
 
        end = start + count;
-       while (zfcpdump_save_areas[i]) {
+       for (i = 0; i < dump_save_areas.count; i++) {
                unsigned long cp_start, cp_end; /* copy range */
                unsigned long sa_start, sa_end; /* save area range */
                unsigned long prefix;
                unsigned long sa_off, len, buf_off;
+               struct save_area *save_area = dump_save_areas.areas[i];
 
-               prefix = zfcpdump_save_areas[i]->pref_reg;
+               prefix = save_area->pref_reg;
                sa_start = prefix + sys_info.sa_base;
                sa_end = prefix + sys_info.sa_base + sys_info.sa_size;
 
                if ((end < sa_start) || (start > sa_end))
-                       goto next;
+                       continue;
                cp_start = max(start, sa_start);
                cp_end = min(end, sa_end);
 
@@ -272,10 +272,8 @@ static int zcore_add_lc(char __user *buf, unsigned long start, size_t count)
                len = cp_end - cp_start;
 
                TRACE("copy_lc for: %lx\n", start);
-               if (copy_lc(buf + buf_off, zfcpdump_save_areas[i], sa_off, len))
+               if (copy_lc(buf + buf_off, save_area, sa_off, len))
                        return -EFAULT;
-next:
-               i++;
        }
        return 0;
 }
@@ -637,8 +635,8 @@ static void __init zcore_header_init(int arch, struct zcore_header *hdr,
        hdr->num_pages = mem_size / PAGE_SIZE;
        hdr->tod = get_tod_clock();
        get_cpu_id(&hdr->cpu_id);
-       for (i = 0; zfcpdump_save_areas[i]; i++) {
-               prefix = zfcpdump_save_areas[i]->pref_reg;
+       for (i = 0; i < dump_save_areas.count; i++) {
+               prefix = dump_save_areas.areas[i]->pref_reg;
                hdr->real_cpu_cnt++;
                if (!prefix)
                        continue;
index d028fd800c9c6afd7f5b5627475c4bfe6552f04b..f055df0b167fc83e1f2f6f17e0aef47e129007b4 100644 (file)
@@ -194,15 +194,14 @@ EXPORT_SYMBOL(airq_iv_release);
  */
 unsigned long airq_iv_alloc_bit(struct airq_iv *iv)
 {
-       const unsigned long be_to_le = BITS_PER_LONG - 1;
        unsigned long bit;
 
        if (!iv->avail)
                return -1UL;
        spin_lock(&iv->lock);
-       bit = find_first_bit_left(iv->avail, iv->bits);
+       bit = find_first_bit_inv(iv->avail, iv->bits);
        if (bit < iv->bits) {
-               clear_bit(bit ^ be_to_le, iv->avail);
+               clear_bit_inv(bit, iv->avail);
                if (bit >= iv->end)
                        iv->end = bit + 1;
        } else
@@ -220,19 +219,17 @@ EXPORT_SYMBOL(airq_iv_alloc_bit);
  */
 void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit)
 {
-       const unsigned long be_to_le = BITS_PER_LONG - 1;
-
        if (!iv->avail)
                return;
        spin_lock(&iv->lock);
        /* Clear (possibly left over) interrupt bit */
-       clear_bit(bit ^ be_to_le, iv->vector);
+       clear_bit_inv(bit, iv->vector);
        /* Make the bit position available again */
-       set_bit(bit ^ be_to_le, iv->avail);
+       set_bit_inv(bit, iv->avail);
        if (bit == iv->end - 1) {
                /* Find new end of bit-field */
                while (--iv->end > 0)
-                       if (!test_bit((iv->end - 1) ^ be_to_le, iv->avail))
+                       if (!test_bit_inv(iv->end - 1, iv->avail))
                                break;
        }
        spin_unlock(&iv->lock);
@@ -251,15 +248,13 @@ EXPORT_SYMBOL(airq_iv_free_bit);
 unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
                           unsigned long end)
 {
-       const unsigned long be_to_le = BITS_PER_LONG - 1;
        unsigned long bit;
 
        /* Find non-zero bit starting from 'ivs->next'. */
-       bit = find_next_bit_left(iv->vector, end, start);
+       bit = find_next_bit_inv(iv->vector, end, start);
        if (bit >= end)
                return -1UL;
-       /* Clear interrupt bit (find left uses big-endian bit numbers) */
-       clear_bit(bit ^ be_to_le, iv->vector);
+       clear_bit_inv(bit, iv->vector);
        return bit;
 }
 EXPORT_SYMBOL(airq_iv_scan);
index d7da67a31c77f606ef68445f9da446b521a4abf1..88e35d85d205f7c21de1f81860865386a7845289 100644 (file)
@@ -878,9 +878,9 @@ static void css_reset(void)
                        atomic_inc(&chpid_reset_count);
        }
        /* Wait for machine check for all channel paths. */
-       timeout = get_tod_clock() + (RCHP_TIMEOUT << 12);
+       timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12);
        while (atomic_read(&chpid_reset_count) != 0) {
-               if (get_tod_clock() > timeout)
+               if (get_tod_clock_fast() > timeout)
                        break;
                cpu_relax();
        }
index d9eddcba7e884d788a9d8c1f3dad14bc3d71cf77..aca7bfc113aaeb4067043cd10bdc0662dd8ec286 100644 (file)
@@ -6,6 +6,7 @@
  */
 
 #include <linux/kernel_stat.h>
+#include <linux/completion.h>
 #include <linux/workqueue.h>
 #include <linux/spinlock.h>
 #include <linux/device.h>
@@ -42,7 +43,7 @@ static debug_info_t *eadm_debug;
 
 static void EADM_LOG_HEX(int level, void *data, int length)
 {
-       if (level > eadm_debug->level)
+       if (!debug_level_enabled(eadm_debug, level))
                return;
        while (length > 0) {
                debug_event(eadm_debug, level, data, length);
@@ -159,6 +160,9 @@ static void eadm_subchannel_irq(struct subchannel *sch)
        }
        scm_irq_handler((struct aob *)(unsigned long)scsw->aob, error);
        private->state = EADM_IDLE;
+
+       if (private->completion)
+               complete(private->completion);
 }
 
 static struct subchannel *eadm_get_idle_sch(void)
@@ -255,13 +259,32 @@ out:
 
 static void eadm_quiesce(struct subchannel *sch)
 {
+       struct eadm_private *private = get_eadm_private(sch);
+       DECLARE_COMPLETION_ONSTACK(completion);
        int ret;
 
+       spin_lock_irq(sch->lock);
+       if (private->state != EADM_BUSY)
+               goto disable;
+
+       if (eadm_subchannel_clear(sch))
+               goto disable;
+
+       private->completion = &completion;
+       spin_unlock_irq(sch->lock);
+
+       wait_for_completion_io(&completion);
+
+       spin_lock_irq(sch->lock);
+       private->completion = NULL;
+
+disable:
+       eadm_subchannel_set_timeout(sch, 0);
        do {
-               spin_lock_irq(sch->lock);
                ret = cio_disable_subchannel(sch);
-               spin_unlock_irq(sch->lock);
        } while (ret == -EBUSY);
+
+       spin_unlock_irq(sch->lock);
 }
 
 static int eadm_subchannel_remove(struct subchannel *sch)
index 2779be093982ee1c219f4c9c6c24ab8edebe8ce3..9664e4653f9861416a78f4193cbe57d8d3105864 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef EADM_SCH_H
 #define EADM_SCH_H
 
+#include <linux/completion.h>
 #include <linux/device.h>
 #include <linux/timer.h>
 #include <linux/list.h>
@@ -9,9 +10,10 @@
 struct eadm_private {
        union orb orb;
        enum {EADM_IDLE, EADM_BUSY, EADM_NOT_OPER} state;
+       struct completion *completion;
+       struct subchannel *sch;
        struct timer_list timer;
        struct list_head head;
-       struct subchannel *sch;
 } __aligned(8);
 
 #define get_eadm_private(n) ((struct eadm_private *)dev_get_drvdata(&n->dev))
index 647b422bb22a594b760d0a5590c962a7683192b6..dfac9bfefea3f11a9f61069006281a750551f39c 100644 (file)
 extern debug_info_t *qdio_dbf_setup;
 extern debug_info_t *qdio_dbf_error;
 
-/* sort out low debug levels early to avoid wasted sprints */
-static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level)
-{
-       return (level <= dbf_grp->level);
-}
-
 #define DBF_ERR                3       /* error conditions     */
 #define DBF_WARN       4       /* warning conditions   */
 #define DBF_INFO       6       /* informational        */
@@ -65,7 +59,7 @@ static inline void DBF_ERROR_HEX(void *addr, int len)
 #define DBF_DEV_EVENT(level, device, text...) \
        do { \
                char debug_buffer[QDIO_DBF_LEN]; \
-               if (qdio_dbf_passes(device->debug_area, level)) { \
+               if (debug_level_enabled(device->debug_area, level)) { \
                        snprintf(debug_buffer, QDIO_DBF_LEN, text); \
                        debug_text_event(device->debug_area, level, debug_buffer); \
                } \
index 8ed52aa4912269405158e300f1c983577b29caec..3e602e8affa78cba97282e41c5f9e3a18315e45b 100644 (file)
@@ -338,10 +338,10 @@ again:
                retries++;
 
                if (!start_time) {
-                       start_time = get_tod_clock();
+                       start_time = get_tod_clock_fast();
                        goto again;
                }
-               if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
+               if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
                        goto again;
        }
        if (retries) {
@@ -504,7 +504,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
        int count, stop;
        unsigned char state = 0;
 
-       q->timestamp = get_tod_clock();
+       q->timestamp = get_tod_clock_fast();
 
        /*
         * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -528,7 +528,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
        case SLSB_P_INPUT_PRIMED:
                inbound_primed(q, count);
                q->first_to_check = add_buf(q->first_to_check, count);
-               if (atomic_sub(count, &q->nr_buf_used) == 0)
+               if (atomic_sub_return(count, &q->nr_buf_used) == 0)
                        qperf_inc(q, inbound_queue_full);
                if (q->irq_ptr->perf_stat_enabled)
                        account_sbals(q, count);
@@ -595,7 +595,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
         * At this point we know, that inbound first_to_check
         * has (probably) not moved (see qdio_inbound_processing).
         */
-       if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
+       if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
                              q->first_to_check);
                return 1;
@@ -728,7 +728,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
        int count, stop;
        unsigned char state = 0;
 
-       q->timestamp = get_tod_clock();
+       q->timestamp = get_tod_clock_fast();
 
        if (need_siga_sync(q))
                if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
index 841ea72e4a4e93a12d397070530191d4b02adb5d..28d9349de1adf77cf20dbe5f938d6a27afd3eaeb 100644 (file)
 /* that gives us 15 characters in the text event views */
 #define ZCRYPT_DBF_LEN 16
 
-/* sort out low debug levels early to avoid wasted sprints */
-static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
-{
-       return (level <= dbf_grp->level);
-}
-
 #define DBF_ERR                3       /* error conditions     */
 #define DBF_WARN       4       /* warning conditions   */
 #define DBF_INFO       6       /* informational        */
@@ -25,7 +19,7 @@ static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
 
 #define ZCRYPT_DBF_COMMON(level, text...) \
        do { \
-               if (zcrypt_dbf_passes(zcrypt_dbf_common, level)) { \
+               if (debug_level_enabled(zcrypt_dbf_common, level)) { \
                        char debug_buffer[ZCRYPT_DBF_LEN]; \
                        snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
                        debug_text_event(zcrypt_dbf_common, level, \
@@ -35,7 +29,7 @@ static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
 
 #define ZCRYPT_DBF_DEVICES(level, text...) \
        do { \
-               if (zcrypt_dbf_passes(zcrypt_dbf_devices, level)) { \
+               if (debug_level_enabled(zcrypt_dbf_devices, level)) { \
                        char debug_buffer[ZCRYPT_DBF_LEN]; \
                        snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
                        debug_text_event(zcrypt_dbf_devices, level, \
@@ -45,7 +39,7 @@ static inline int zcrypt_dbf_passes(debug_info_t *dbf_grp, int level)
 
 #define ZCRYPT_DBF_DEV(level, device, text...) \
        do { \
-               if (zcrypt_dbf_passes(device->dbf_area, level)) { \
+               if (debug_level_enabled(device->dbf_area, level)) { \
                        char debug_buffer[ZCRYPT_DBF_LEN]; \
                        snprintf(debug_buffer, ZCRYPT_DBF_LEN, text); \
                        debug_text_event(device->dbf_area, level, \
index 1bc5904df19ff550d3094ed939faab28f5cac44c..3339b9b607b3aa6ca5ad5ba60c20cd5c3144df55 100644 (file)
@@ -114,15 +114,9 @@ do { \
        debug_event(claw_dbf_##name,level,(void*)(addr),len); \
 } while (0)
 
-/* Allow to sort out low debug levels early to avoid wasted sprints */
-static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level)
-{
-       return (level <= dbf_grp->level);
-}
-
 #define CLAW_DBF_TEXT_(level,name,text...) \
        do { \
-               if (claw_dbf_passes(claw_dbf_##name, level)) { \
+               if (debug_level_enabled(claw_dbf_##name, level)) { \
                        sprintf(debug_buffer, text); \
                        debug_text_event(claw_dbf_##name, level, \
                                                debug_buffer); \
index 6514e1cb3f1cf1b21cc7bd2d4422ca48fe2b7d47..8363f1c966ef7e5a2e55d6f2a861ae011f9601d7 100644 (file)
@@ -66,7 +66,7 @@ void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *fmt, ...)
        char dbf_txt_buf[64];
        va_list args;
 
-       if (level > (ctcm_dbf[dbf_nix].id)->level)
+       if (!debug_level_enabled(ctcm_dbf[dbf_nix].id, level))
                return;
        va_start(args, fmt);
        vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
index 8c03392ac833f2a284c7f422f296b52fc610e285..150fcb4cebc3510e5a4e45dfb61f63728cd5ed7d 100644 (file)
@@ -16,15 +16,9 @@ do { \
        debug_event(lcs_dbf_##name,level,(void*)(addr),len); \
 } while (0)
 
-/* Allow to sort out low debug levels early to avoid wasted sprints */
-static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
-{
-       return (level <= dbf_grp->level);
-}
-
 #define LCS_DBF_TEXT_(level,name,text...) \
        do { \
-               if (lcs_dbf_passes(lcs_dbf_##name, level)) { \
+               if (debug_level_enabled(lcs_dbf_##name, level)) { \
                        sprintf(debug_buffer, text); \
                        debug_text_event(lcs_dbf_##name, level, debug_buffer); \
                } \
index 279ad504ec3c85e3e410d231fac82e812a75b5d2..9b333fcf1a4c38ea45d352f253aaa47838d78c91 100644 (file)
@@ -105,15 +105,9 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
 
 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
 
-/* Allow to sort out low debug levels early to avoid wasted sprints */
-static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
-{
-       return (level <= dbf_grp->level);
-}
-
 #define IUCV_DBF_TEXT_(name, level, text...) \
        do { \
-               if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
+               if (debug_level_enabled(iucv_dbf_##name, level)) { \
                        char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
                        sprintf(__buf, text); \
                        debug_text_event(iucv_dbf_##name, level, __buf); \
index 0a328d0d11bebaac40ccbaf2239ca999dd736373..d7b66a28fe75577dd6ccaef41619eb19b327df34 100644 (file)
@@ -5096,7 +5096,7 @@ void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
        char dbf_txt_buf[32];
        va_list args;
 
-       if (level > id->level)
+       if (!debug_level_enabled(id, level))
                return;
        va_start(args, fmt);
        vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
index 3ac7a4b30dd910ef6f59c4ada70966d66694c90e..0be3d48681aead94466a71ab7b34c6ae7ca098ff 100644 (file)
@@ -278,7 +278,7 @@ struct zfcp_dbf {
 static inline
 void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
 {
-       if (level <= req->adapter->dbf->hba->level)
+       if (debug_level_enabled(req->adapter->dbf->hba, level))
                zfcp_dbf_hba_fsf_res(tag, req);
 }
 
@@ -317,7 +317,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
        struct zfcp_adapter *adapter = (struct zfcp_adapter *)
                                        scmd->device->host->hostdata[0];
 
-       if (level <= adapter->dbf->scsi->level)
+       if (debug_level_enabled(adapter->dbf->scsi, level))
                zfcp_dbf_scsi(tag, scmd, req);
 }
 
index feab3a5e50b5ec6f546b102b6550ff7652424cac..757eb0716d45d4f273519ca2e2dd2ed03486a4a8 100644 (file)
@@ -696,7 +696,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
        while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC,
                                        PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
                                        pci_device)) != NULL) {
-               struct blogic_adapter *adapter = adapter;
+               struct blogic_adapter *host_adapter = adapter;
                struct blogic_adapter_info adapter_info;
                enum blogic_isa_ioport mod_ioaddr_req;
                unsigned char bus;
@@ -744,9 +744,9 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
                   known and enabled, note that the particular Standard ISA I/O
                   Address should not be probed.
                 */
-               adapter->io_addr = io_addr;
-               blogic_intreset(adapter);
-               if (blogic_cmd(adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
+               host_adapter->io_addr = io_addr;
+               blogic_intreset(host_adapter);
+               if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,
                                &adapter_info, sizeof(adapter_info)) ==
                                sizeof(adapter_info)) {
                        if (adapter_info.isa_port < 6)
@@ -762,7 +762,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
                   I/O Address assigned at system initialization.
                 */
                mod_ioaddr_req = BLOGIC_IO_DISABLE;
-               blogic_cmd(adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req,
+               blogic_cmd(host_adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req,
                                sizeof(mod_ioaddr_req), NULL, 0);
                /*
                   For the first MultiMaster Host Adapter enumerated,
@@ -779,12 +779,12 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
 
                        fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45;
                        fetch_localram.count = sizeof(autoscsi_byte45);
-                       blogic_cmd(adapter, BLOGIC_FETCH_LOCALRAM,
+                       blogic_cmd(host_adapter, BLOGIC_FETCH_LOCALRAM,
                                        &fetch_localram, sizeof(fetch_localram),
                                        &autoscsi_byte45,
                                        sizeof(autoscsi_byte45));
-                       blogic_cmd(adapter, BLOGIC_GET_BOARD_ID, NULL, 0, &id,
-                                       sizeof(id));
+                       blogic_cmd(host_adapter, BLOGIC_GET_BOARD_ID, NULL, 0,
+                                       &id, sizeof(id));
                        if (id.fw_ver_digit1 == '5')
                                force_scan_order =
                                        autoscsi_byte45.force_scan_order;
index f8ca7becacca15477dd04912acf4ccd11df948fc..7591fa4e28bb2be8ed6e6a64e972568429457f20 100644 (file)
@@ -766,49 +766,20 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
        bfad->pcidev = pdev;
 
        /* Adjust PCIe Maximum Read Request Size */
-       if (pcie_max_read_reqsz > 0) {
-               int pcie_cap_reg;
-               u16 pcie_dev_ctl;
-               u16 mask = 0xffff;
-
-               switch (pcie_max_read_reqsz) {
-               case 128:
-                       mask = 0x0;
-                       break;
-               case 256:
-                       mask = 0x1000;
-                       break;
-               case 512:
-                       mask = 0x2000;
-                       break;
-               case 1024:
-                       mask = 0x3000;
-                       break;
-               case 2048:
-                       mask = 0x4000;
-                       break;
-               case 4096:
-                       mask = 0x5000;
-                       break;
-               default:
-                       break;
-               }
-
-               pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
-               if (mask != 0xffff && pcie_cap_reg) {
-                       pcie_cap_reg += 0x08;
-                       pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl);
-                       if ((pcie_dev_ctl & 0x7000) != mask) {
-                               printk(KERN_WARNING "BFA[%s]: "
+       if (pci_is_pcie(pdev) && pcie_max_read_reqsz) {
+               if (pcie_max_read_reqsz >= 128 &&
+                   pcie_max_read_reqsz <= 4096 &&
+                   is_power_of_2(pcie_max_read_reqsz)) {
+                       int max_rq = pcie_get_readrq(pdev);
+                       printk(KERN_WARNING "BFA[%s]: "
                                "pcie_max_read_request_size is %d, "
-                               "reset to %d\n", bfad->pci_name,
-                               (1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7,
+                               "reset to %d\n", bfad->pci_name, max_rq,
                                pcie_max_read_reqsz);
-
-                               pcie_dev_ctl &= ~0x7000;
-                               pci_write_config_word(pdev, pcie_cap_reg,
-                                               pcie_dev_ctl | mask);
-                       }
+                       pcie_set_readrq(pdev, pcie_max_read_reqsz);
+               } else {
+                       printk(KERN_WARNING "BFA[%s]: invalid "
+                              "pcie_max_read_request_size %d ignored\n",
+                              bfad->pci_name, pcie_max_read_reqsz);
                }
        }
 
index 0eb35b9b37843852e140e1bb3a601c5fa3e0f3a4..0eaec474895788a6437e12c9907e053c9f74b048 100644 (file)
@@ -852,22 +852,6 @@ csio_hw_get_flash_params(struct csio_hw *hw)
        return 0;
 }
 
-static void
-csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)
-{
-       uint16_t val;
-       int pcie_cap;
-
-       if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) {
-               pci_read_config_word(hw->pdev,
-                                    pcie_cap + PCI_EXP_DEVCTL2, &val);
-               val &= 0xfff0;
-               val |= range ;
-               pci_write_config_word(hw->pdev,
-                                     pcie_cap + PCI_EXP_DEVCTL2, val);
-       }
-}
-
 /*****************************************************************************/
 /* HW State machine assists                                                  */
 /*****************************************************************************/
@@ -2069,8 +2053,10 @@ csio_hw_configure(struct csio_hw *hw)
                goto out;
        }
 
-       /* Set pci completion timeout value to 4 seconds. */
-       csio_set_pcie_completion_timeout(hw, 0xd);
+       /* Set PCIe completion timeout to 4 seconds */
+       if (pci_is_pcie(hw->pdev))
+               pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2,
+                               PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
 
        hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
 
index 2ef497ebadc06500a2aa95090f26d188cf8bccb7..ee5c1833eb731cb1e0e731448c287ca1b8f5b2ad 100644 (file)
@@ -20,7 +20,7 @@
  * | Device Discovery             |       0x2095       | 0x2020-0x2022, |
  * |                              |                    | 0x2011-0x2012, |
  * |                              |                    | 0x2016         |
- * | Queue Command and IO tracing |       0x3058       | 0x3006-0x300b  |
+ * | Queue Command and IO tracing |       0x3059       | 0x3006-0x300b  |
  * |                              |                    | 0x3027-0x3028  |
  * |                              |                    | 0x303d-0x3041  |
  * |                              |                    | 0x302d,0x3033  |
index df1b30ba938cc0578d409880711d2afa5b2a8898..ff9c86b1a0d896a870dd62b22515ee5dfc629284 100644 (file)
@@ -1957,6 +1957,15 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
        que = MSW(sts->handle);
        req = ha->req_q_map[que];
 
+       /* Check for invalid queue pointer */
+       if (req == NULL ||
+           que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
+               ql_dbg(ql_dbg_io, vha, 0x3059,
+                   "Invalid status handle (0x%x): Bad req pointer. req=%p, "
+                   "que=%u.\n", sts->handle, req, que);
+               return;
+       }
+
        /* Validate handle. */
        if (handle < req->num_outstanding_cmds)
                sp = req->outstanding_cmds[handle];
index 62ee7131b20420a7d1d363682dde7192ed152758..30d20e74e48a393b3560a44bb354fb1fd058e60e 100644 (file)
@@ -507,7 +507,7 @@ qlafx00_pci_config(scsi_qla_host_t *vha)
        pci_write_config_word(ha->pdev, PCI_COMMAND, w);
 
        /* PCIe -- adjust Maximum Read Request Size (2048). */
-       if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
+       if (pci_is_pcie(ha->pdev))
                pcie_set_readrq(ha->pdev, 2048);
 
        ha->chip_revision = ha->pdev->revision;
@@ -660,10 +660,8 @@ char *
 qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
 {
        struct qla_hw_data *ha = vha->hw;
-       int pcie_reg;
 
-       pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
-       if (pcie_reg) {
+       if (pci_is_pcie(ha->pdev)) {
                strcpy(str, "PCIe iSA");
                return str;
        }
index 9f01bbbf3a26c9138a9f457fa9af999fa6572b09..bcd57f699ebbcc7dad7d597044c7bc54da22cf52 100644 (file)
@@ -494,18 +494,14 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
        static char *pci_bus_modes[] = { "33", "66", "100", "133", };
        struct qla_hw_data *ha = vha->hw;
        uint32_t pci_bus;
-       int pcie_reg;
 
-       pcie_reg = pci_pcie_cap(ha->pdev);
-       if (pcie_reg) {
+       if (pci_is_pcie(ha->pdev)) {
                char lwstr[6];
-               uint16_t pcie_lstat, lspeed, lwidth;
+               uint32_t lstat, lspeed, lwidth;
 
-               pcie_reg += PCI_EXP_LNKCAP;
-               pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat);
-               lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3);
-               lwidth = (pcie_lstat &
-                   (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4;
+               pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat);
+               lspeed = lstat & PCI_EXP_LNKCAP_SLS;
+               lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;
 
                strcpy(str, "PCIe (");
                switch (lspeed) {
index d1549b74e2d1b91eeb4f8ebf459ec4f13db73275..7bd7f0d5f050a2ece3f176b1f05ca2f8936270a3 100644 (file)
@@ -1684,7 +1684,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
 
        host_dev = scsi_get_device(shost);
        if (host_dev && host_dev->dma_mask)
-               bounce_limit = *host_dev->dma_mask;
+               bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT;
 
        return bounce_limit;
 }
index e62d17d41d4e7b84ddf7469194aa0c63d541d59d..5693f6d7eddb8b20cd9b3e0374a4e73e60ab7aa2 100644 (file)
@@ -2854,6 +2854,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
                gd->events |= DISK_EVENT_MEDIA_CHANGE;
        }
 
+       blk_pm_runtime_init(sdp->request_queue, dev);
        add_disk(gd);
        if (sdkp->capacity)
                sd_dif_config_host(sdkp);
@@ -2862,7 +2863,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
 
        sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
                  sdp->removable ? "removable " : "");
-       blk_pm_runtime_init(sdp->request_queue, dev);
        scsi_autopm_put_device(sdp);
        put_device(&sdkp->dev);
 }
index 74b88efde6ad408123de6a9b96ec37460195fe82..e6bb2352df40d5759115108b071fa454c8f7a688 100644 (file)
@@ -710,19 +710,15 @@ static struct scsi_host_template virtscsi_host_template_multi = {
 #define virtscsi_config_get(vdev, fld) \
        ({ \
                typeof(((struct virtio_scsi_config *)0)->fld) __val; \
-               vdev->config->get(vdev, \
-                                 offsetof(struct virtio_scsi_config, fld), \
-                                 &__val, sizeof(__val)); \
+               virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
                __val; \
        })
 
 #define virtscsi_config_set(vdev, fld, val) \
-       (void)({ \
+       do { \
                typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
-               vdev->config->set(vdev, \
-                                 offsetof(struct virtio_scsi_config, fld), \
-                                 &__val, sizeof(__val)); \
-       })
+               virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
+       } while(0)
 
 static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
 {
@@ -954,7 +950,7 @@ static void virtscsi_remove(struct virtio_device *vdev)
        scsi_host_put(shost);
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int virtscsi_freeze(struct virtio_device *vdev)
 {
        virtscsi_remove_vqs(vdev);
@@ -988,7 +984,7 @@ static struct virtio_driver virtio_scsi_driver = {
        .id_table = id_table,
        .probe = virtscsi_probe,
        .scan = virtscsi_scan,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
        .freeze = virtscsi_freeze,
        .restore = virtscsi_restore,
 #endif
index fd7cc566095a40cb22d27519726c4faa14a5f37d..d4ac60b4a56e8a6f4615fcac50015a06cccd53fc 100644 (file)
@@ -1583,7 +1583,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
        /* Initialize the hardware */
        ret = clk_prepare_enable(clk);
        if (ret)
-               goto out_unmap_regs;
+               goto out_free_irq;
        spi_writel(as, CR, SPI_BIT(SWRST));
        spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
        if (as->caps.has_wdrbt) {
@@ -1614,6 +1614,7 @@ out_free_dma:
        spi_writel(as, CR, SPI_BIT(SWRST));
        spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
        clk_disable_unprepare(clk);
+out_free_irq:
        free_irq(irq, master);
 out_unmap_regs:
        iounmap(as->regs);
index 5655acf55bfe35a7d4f0fcb853e56435b4e847c9..6416798828e72bc9f5194282f55cf49a37f6809c 100644 (file)
@@ -226,7 +226,6 @@ static int spi_clps711x_probe(struct platform_device *pdev)
                               dev_name(&pdev->dev), hw);
        if (ret) {
                dev_err(&pdev->dev, "Can't request IRQ\n");
-               clk_put(hw->spi_clk);
                goto clk_out;
        }
 
@@ -247,7 +246,6 @@ err_out:
                        gpio_free(hw->chipselect[i]);
 
        spi_master_put(master);
-       kfree(master);
 
        return ret;
 }
@@ -263,7 +261,6 @@ static int spi_clps711x_remove(struct platform_device *pdev)
                        gpio_free(hw->chipselect[i]);
 
        spi_unregister_master(master);
-       kfree(master);
 
        return 0;
 }
index 6cd07d13ecab3b5fc78c263cb6220a3b0ae4ddb5..4e44575bd87a9674c72338cafe25c1b5006ef529 100644 (file)
@@ -476,15 +476,9 @@ static int dspi_probe(struct platform_device *pdev)
        master->bus_num = bus_num;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "can't get platform resource\n");
-               ret = -EINVAL;
-               goto out_master_put;
-       }
-
        dspi->base = devm_ioremap_resource(&pdev->dev, res);
-       if (!dspi->base) {
-               ret = -EINVAL;
+       if (IS_ERR(dspi->base)) {
+               ret = PTR_ERR(dspi->base);
                goto out_master_put;
        }
 
index dbc5e999a1f5689c1a526ace9f3dc4061f924223..6adf4e35816d76c1c7f120cd924420c4c8bf71a1 100644 (file)
@@ -522,8 +522,10 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
        psc_num = master->bus_num;
        snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num);
        clk = devm_clk_get(dev, clk_name);
-       if (IS_ERR(clk))
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
                goto free_irq;
+       }
        ret = clk_prepare_enable(clk);
        if (ret)
                goto free_irq;
index 2eb06ee0b3264020d040c2b1ea8bd6745656c372..c1a50674c1e359deb3e83fc2a04c9acf2324f58d 100644 (file)
@@ -546,8 +546,17 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
        if (pm_runtime_suspended(&drv_data->pdev->dev))
                return IRQ_NONE;
 
-       sccr1_reg = read_SSCR1(reg);
+       /*
+        * If the device is not yet in RPM suspended state and we get an
+        * interrupt that is meant for another device, check if status bits
+        * are all set to one. That means that the device is already
+        * powered off.
+        */
        status = read_SSSR(reg);
+       if (status == ~0)
+               return IRQ_NONE;
+
+       sccr1_reg = read_SSCR1(reg);
 
        /* Ignore possible writes if we don't need to write */
        if (!(sccr1_reg & SSCR1_TIE))
index 512b8893893bd3d8349506fde7785e29591236e2..a80376dc3a102d04684f27934a42412be3e851d7 100644 (file)
@@ -1428,6 +1428,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
               S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
               sdd->regs + S3C64XX_SPI_INT_EN);
 
+       pm_runtime_enable(&pdev->dev);
+
        if (spi_register_master(master)) {
                dev_err(&pdev->dev, "cannot register SPI master\n");
                ret = -EBUSY;
@@ -1440,8 +1442,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
                                        mem_res,
                                        sdd->rx_dma.dmach, sdd->tx_dma.dmach);
 
-       pm_runtime_enable(&pdev->dev);
-
        return 0;
 
 err3:
index 0b68cb592fa4d022bb6d3b3bc70296bcb224284b..e488a90a98b8acbfff5b1fd72dd92b54db2ae602 100644 (file)
@@ -296,6 +296,8 @@ static int hspi_probe(struct platform_device *pdev)
                goto error1;
        }
 
+       pm_runtime_enable(&pdev->dev);
+
        master->num_chipselect  = 1;
        master->bus_num         = pdev->id;
        master->setup           = hspi_setup;
@@ -309,8 +311,6 @@ static int hspi_probe(struct platform_device *pdev)
                goto error1;
        }
 
-       pm_runtime_enable(&pdev->dev);
-
        return 0;
 
  error1:
index 9e039c60c0680ae761e2be41f0de0a171368f3c6..740f9ddda227d55f15042e1e7a8d2cadb58aae71 100644 (file)
@@ -240,15 +240,27 @@ EXPORT_SYMBOL_GPL(spi_bus_type);
 static int spi_drv_probe(struct device *dev)
 {
        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
+       struct spi_device               *spi = to_spi_device(dev);
+       int ret;
+
+       acpi_dev_pm_attach(&spi->dev, true);
+       ret = sdrv->probe(spi);
+       if (ret)
+               acpi_dev_pm_detach(&spi->dev, true);
 
-       return sdrv->probe(to_spi_device(dev));
+       return ret;
 }
 
 static int spi_drv_remove(struct device *dev)
 {
        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
+       struct spi_device               *spi = to_spi_device(dev);
+       int ret;
+
+       ret = sdrv->remove(spi);
+       acpi_dev_pm_detach(&spi->dev, true);
 
-       return sdrv->remove(to_spi_device(dev));
+       return ret;
 }
 
 static void spi_drv_shutdown(struct device *dev)
@@ -1025,8 +1037,10 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
                return AE_OK;
        }
 
+       adev->power.flags.ignore_parent = true;
        strlcpy(spi->modalias, dev_name(&adev->dev), sizeof(spi->modalias));
        if (spi_add_device(spi)) {
+               adev->power.flags.ignore_parent = false;
                dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
                        dev_name(&adev->dev));
                spi_dev_put(spi);
index 44cce2fa6361194a36ac72dff5825575fd324bf5..1d68c49afabea493e6f426f99e7235acc738abbb 100644 (file)
@@ -100,8 +100,9 @@ static int dwc2_driver_probe(struct platform_device *dev)
         */
        if (!dev->dev.dma_mask)
                dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
-       if (!dev->dev.coherent_dma_mask)
-               dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       retval = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
+       if (retval)
+               return retval;
 
        irq = platform_get_irq(dev, 0);
        if (irq < 0) {
index f73e58f5ef8d0390f8b4068db2030d9d1b42592c..61da7ee36e458ccd73a634839f5feda5a3d00808 100644 (file)
@@ -4797,21 +4797,8 @@ static int et131x_pci_setup(struct pci_dev *pdev,
        pci_set_master(pdev);
 
        /* Check the DMA addressing support of this device */
-       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
-               rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-               if (rc < 0) {
-                       dev_err(&pdev->dev,
-                         "Unable to obtain 64 bit DMA for consistent allocations\n");
-                       goto err_release_res;
-               }
-       } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
-               rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
-               if (rc < 0) {
-                       dev_err(&pdev->dev,
-                         "Unable to obtain 32 bit DMA for consistent allocations\n");
-                       goto err_release_res;
-               }
-       } else {
+       if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
+           dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
                dev_err(&pdev->dev, "No usable DMA addressing method\n");
                rc = -EIO;
                goto err_release_res;
index 394254f7d6b545cf0dfdff0b9e77f2f98042e8f3..5032ff7c2259a8b1e9308db04324fee929249487 100644 (file)
@@ -1,6 +1,7 @@
 config DRM_IMX
        tristate "DRM Support for Freescale i.MX"
        select DRM_KMS_HELPER
+       select DRM_KMS_FB_HELPER
        select VIDEOMODE_HELPERS
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_CMA_HELPER
index a2e52a0c53c981690dc7b711a1a97625c01a9a0f..ad135d3c3281f40f3d45b4148c0538cf05a7cc37 100644 (file)
@@ -396,14 +396,14 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
 
        /*
         * enable drm irq mode.
-        * - with irq_enabled = 1, we can use the vblank feature.
+        * - with irq_enabled = true, we can use the vblank feature.
         *
         * P.S. note that we wouldn't use drm irq handler but
         *      just specific driver own one instead because
         *      drm framework supports only one irq handler and
         *      drivers can well take care of their interrupts
         */
-       drm->irq_enabled = 1;
+       drm->irq_enabled = true;
 
        drm_mode_config_init(drm);
        imx_drm_mode_config_init(drm);
@@ -423,11 +423,11 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
                goto err_init;
 
        /*
-        * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+        * with vblank_disable_allowed = true, vblank interrupt will be disabled
         * by drm timer once a current process gives up ownership of
         * vblank event.(after drm_vblank_put function is called)
         */
-       imxdrm->drm->vblank_disable_allowed = 1;
+       imxdrm->drm->vblank_disable_allowed = true;
 
        if (!imx_drm_device_get())
                ret = -EINVAL;
@@ -800,6 +800,12 @@ static struct drm_driver imx_drm_driver = {
 
 static int imx_drm_platform_probe(struct platform_device *pdev)
 {
+       int ret;
+
+       ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
+
        imx_drm_device->dev = &pdev->dev;
 
        return drm_platform_init(&imx_drm_driver, pdev);
@@ -842,8 +848,6 @@ static int __init imx_drm_init(void)
                goto err_pdev;
        }
 
-       imx_drm_pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32),
-
        ret = platform_driver_register(&imx_drm_pdrv);
        if (ret)
                goto err_pdrv;
index 6fd37a7453e9691ac573c414d396e69357e7b38f..9e73e8d8c9aaa350267ac25f0926a5cd8d7c6416 100644 (file)
@@ -523,7 +523,9 @@ static int ipu_drm_probe(struct platform_device *pdev)
        if (!pdata)
                return -EINVAL;
 
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        ipu_crtc = devm_kzalloc(&pdev->dev, sizeof(*ipu_crtc), GFP_KERNEL);
        if (!ipu_crtc)
index 90d6ac46935587fb905e910e39a7a52002888f43..081407be33ab5b6cdec1a01dbfb561bd53056c78 100644 (file)
@@ -901,10 +901,7 @@ dt3155_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        int err;
        struct dt3155_priv *pd;
 
-       err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
-       if (err)
-               return -ENODEV;
-       err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
        if (err)
                return -ENODEV;
        pd = kzalloc(sizeof(*pd), GFP_KERNEL);
index b94a95a597d63ec6428f2b92d460fe208465d351..76d5bbd4d93c38f3e25274f8d7b337eb808f1b7d 100644 (file)
@@ -1,3 +1,4 @@
 config USB_MSI3101
        tristate "Mirics MSi3101 SDR Dongle"
        depends on USB && VIDEO_DEV && VIDEO_V4L2
+        select VIDEOBUF2_VMALLOC
index 24c7b70a6cbf401696ee3faba21047d68a75f6e2..4c3bf776bb207b755c9a5bde50aa32138a6ff09a 100644 (file)
@@ -1131,7 +1131,13 @@ static int msi3101_queue_setup(struct vb2_queue *vq,
        /* Absolute min and max number of buffers available for mmap() */
        *nbuffers = 32;
        *nplanes = 1;
-       sizes[0] = PAGE_ALIGN(3 * 3072); /* 3 * 768 * 4 */
+       /*
+        *   3, wMaxPacketSize 3x 1024 bytes
+        * 504, max IQ sample pairs per 1024 frame
+        *   2, two samples, I and Q
+        *   4, 32-bit float
+        */
+       sizes[0] = PAGE_ALIGN(3 * 504 * 2 * 4); /* = 12096 */
        dev_dbg(&s->udev->dev, "%s: nbuffers=%d sizes[0]=%d\n",
                        __func__, *nbuffers, sizes[0]);
        return 0;
@@ -1657,7 +1663,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
                        f->frequency * 625UL / 10UL);
 }
 
-const struct v4l2_ioctl_ops msi3101_ioctl_ops = {
+static const struct v4l2_ioctl_ops msi3101_ioctl_ops = {
        .vidioc_querycap          = msi3101_querycap,
 
        .vidioc_enum_input        = msi3101_enum_input,
index a4c589604b028d7d74e1a7f895b6ab3aecbeac75..9a6d5c0b13396c2ad8715508139d49ac1f0952b7 100644 (file)
@@ -346,7 +346,7 @@ static int enc_get_mpeg_dma(struct solo_dev *solo_dev, dma_addr_t dma,
 /* Build a descriptor queue out of an SG list and send it to the P2M for
  * processing. */
 static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip,
-                         struct vb2_dma_sg_desc *vbuf, int off, int size,
+                         struct sg_table *vbuf, int off, int size,
                          unsigned int base, unsigned int base_size)
 {
        struct solo_dev *solo_dev = solo_enc->solo_dev;
@@ -359,7 +359,7 @@ static int solo_send_desc(struct solo_enc_dev *solo_enc, int skip,
 
        solo_enc->desc_count = 1;
 
-       for_each_sg(vbuf->sglist, sg, vbuf->num_pages, i) {
+       for_each_sg(vbuf->sgl, sg, vbuf->nents, i) {
                struct solo_p2m_desc *desc;
                dma_addr_t dma;
                int len;
@@ -434,7 +434,7 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
                struct vb2_buffer *vb, struct vop_header *vh)
 {
        struct solo_dev *solo_dev = solo_enc->solo_dev;
-       struct vb2_dma_sg_desc *vbuf = vb2_dma_sg_plane_desc(vb, 0);
+       struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
        int frame_size;
        int ret;
 
@@ -443,7 +443,7 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
        if (vb2_plane_size(vb, 0) < vh->jpeg_size + solo_enc->jpeg_len)
                return -EIO;
 
-       sg_copy_from_buffer(vbuf->sglist, vbuf->num_pages,
+       sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
                        solo_enc->jpeg_header,
                        solo_enc->jpeg_len);
 
@@ -451,12 +451,12 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
                & ~(DMA_ALIGN - 1);
        vb2_set_plane_payload(vb, 0, vh->jpeg_size + solo_enc->jpeg_len);
 
-       dma_map_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages,
+       dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
                        DMA_FROM_DEVICE);
        ret = solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf, vh->jpeg_off,
                        frame_size, SOLO_JPEG_EXT_ADDR(solo_dev),
                        SOLO_JPEG_EXT_SIZE(solo_dev));
-       dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages,
+       dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
                        DMA_FROM_DEVICE);
        return ret;
 }
@@ -465,7 +465,7 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
                struct vb2_buffer *vb, struct vop_header *vh)
 {
        struct solo_dev *solo_dev = solo_enc->solo_dev;
-       struct vb2_dma_sg_desc *vbuf = vb2_dma_sg_plane_desc(vb, 0);
+       struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
        int frame_off, frame_size;
        int skip = 0;
        int ret;
@@ -475,7 +475,7 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
 
        /* If this is a key frame, add extra header */
        if (!vh->vop_type) {
-               sg_copy_from_buffer(vbuf->sglist, vbuf->num_pages,
+               sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
                                solo_enc->vop,
                                solo_enc->vop_len);
 
@@ -494,12 +494,12 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
        frame_size = (vh->mpeg_size + skip + (DMA_ALIGN - 1))
                & ~(DMA_ALIGN - 1);
 
-       dma_map_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages,
+       dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
                        DMA_FROM_DEVICE);
        ret = solo_send_desc(solo_enc, skip, vbuf, frame_off, frame_size,
                        SOLO_MP4E_EXT_ADDR(solo_dev),
                        SOLO_MP4E_EXT_SIZE(solo_dev));
-       dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sglist, vbuf->num_pages,
+       dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
                        DMA_FROM_DEVICE);
        return ret;
 }
index dbfc390330acf2ee8bc6af339ff22f3b6fc3ec77..f35a1f75b15b87f2f37ea0bf671894acf549c207 100644 (file)
@@ -56,7 +56,7 @@ config THERMAL_DEFAULT_GOV_USER_SPACE
        select THERMAL_GOV_USER_SPACE
        help
          Select this if you want to let the user space manage the
-         lpatform thermals.
+         platform thermals.
 
 endchoice
 
@@ -69,6 +69,7 @@ config THERMAL_GOV_STEP_WISE
        bool "Step_wise thermal governor"
        help
          Enable this to manage platform thermals using a simple linear
+         governor.
 
 config THERMAL_GOV_USER_SPACE
        bool "User_space thermal governor"
@@ -78,7 +79,6 @@ config THERMAL_GOV_USER_SPACE
 config CPU_THERMAL
        bool "generic cpu cooling support"
        depends on CPU_FREQ
-       select CPU_FREQ_TABLE
        help
          This implements the generic cpu cooling mechanism through frequency
          reduction. An ACPI version of this already exists
@@ -117,14 +117,14 @@ config SPEAR_THERMAL
        depends on OF
        help
          Enable this to plug the SPEAr thermal sensor driver into the Linux
-         thermal framework
+         thermal framework.
 
 config RCAR_THERMAL
        tristate "Renesas R-Car thermal driver"
        depends on ARCH_SHMOBILE
        help
          Enable this to plug the R-Car thermal sensor driver into the Linux
-         thermal framework
+         thermal framework.
 
 config KIRKWOOD_THERMAL
        tristate "Temperature sensor on Marvell Kirkwood SoCs"
index b40b37cd25e08b7b2353b6ef6d85c321ad2e08eb..8f181b3f842b3b04b12d972da58d8f238b6a7a00 100644 (file)
@@ -675,6 +675,11 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
        { X86_VENDOR_INTEL, 6, 0x2e},
        { X86_VENDOR_INTEL, 6, 0x2f},
        { X86_VENDOR_INTEL, 6, 0x3a},
+       { X86_VENDOR_INTEL, 6, 0x3c},
+       { X86_VENDOR_INTEL, 6, 0x3e},
+       { X86_VENDOR_INTEL, 6, 0x3f},
+       { X86_VENDOR_INTEL, 6, 0x45},
+       { X86_VENDOR_INTEL, 6, 0x46},
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
@@ -758,21 +763,39 @@ static int powerclamp_init(void)
        /* probe cpu features and ids here */
        retval = powerclamp_probe();
        if (retval)
-               return retval;
+               goto exit_free;
+
        /* set default limit, maybe adjusted during runtime based on feedback */
        window_size = 2;
        register_hotcpu_notifier(&powerclamp_cpu_notifier);
+
        powerclamp_thread = alloc_percpu(struct task_struct *);
+       if (!powerclamp_thread) {
+               retval = -ENOMEM;
+               goto exit_unregister;
+       }
+
        cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL,
                                                &powerclamp_cooling_ops);
-       if (IS_ERR(cooling_dev))
-               return -ENODEV;
+       if (IS_ERR(cooling_dev)) {
+               retval = -ENODEV;
+               goto exit_free_thread;
+       }
 
        if (!duration)
                duration = jiffies_to_msecs(DEFAULT_DURATION_JIFFIES);
+
        powerclamp_create_debug_files();
 
        return 0;
+
+exit_free_thread:
+       free_percpu(powerclamp_thread);
+exit_unregister:
+       unregister_hotcpu_notifier(&powerclamp_cpu_notifier);
+exit_free:
+       kfree(cpu_clamping_mask);
+       return retval;
 }
 module_init(powerclamp_init);
 
index f10a6ad37c0609fd2383061d1c4f651bb9cde36b..c2301da08ac7bb0958755b5f2b1291447f4aae6a 100644 (file)
@@ -310,8 +310,6 @@ void exynos_report_trigger(struct thermal_sensor_conf *conf)
        }
 
        th_zone = conf->pzone_data;
-       if (th_zone->therm_dev)
-               return;
 
        if (th_zone->bind == false) {
                for (i = 0; i < th_zone->cool_dev_size; i++) {
index b43afda8acd163085a11eaeb851fb1869403ed4b..32f38b90c4f6a6705b44e8381ee339c2382e47bb 100644 (file)
@@ -317,6 +317,9 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
 
        con = readl(data->base + reg->tmu_ctrl);
 
+       if (pdata->test_mux)
+               con |= (pdata->test_mux << reg->test_mux_addr_shift);
+
        if (pdata->reference_voltage) {
                con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift);
                con |= pdata->reference_voltage << reg->buf_vref_sel_shift;
@@ -488,7 +491,7 @@ static const struct of_device_id exynos_tmu_match[] = {
        },
        {
                .compatible = "samsung,exynos4412-tmu",
-               .data = (void *)EXYNOS5250_TMU_DRV_DATA,
+               .data = (void *)EXYNOS4412_TMU_DRV_DATA,
        },
        {
                .compatible = "samsung,exynos5250-tmu",
@@ -629,9 +632,10 @@ static int exynos_tmu_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       if (pdata->type == SOC_ARCH_EXYNOS ||
-               pdata->type == SOC_ARCH_EXYNOS4210 ||
-                               pdata->type == SOC_ARCH_EXYNOS5440)
+       if (pdata->type == SOC_ARCH_EXYNOS4210 ||
+           pdata->type == SOC_ARCH_EXYNOS4412 ||
+           pdata->type == SOC_ARCH_EXYNOS5250 ||
+           pdata->type == SOC_ARCH_EXYNOS5440)
                data->soc = pdata->type;
        else {
                ret = -EINVAL;
index b364c9eee70103a622a852096295f4e3339b474f..3fb65547e64c9d9e3d3410f1c0ea729fb5a9332e 100644 (file)
@@ -41,7 +41,8 @@ enum calibration_mode {
 
 enum soc_type {
        SOC_ARCH_EXYNOS4210 = 1,
-       SOC_ARCH_EXYNOS,
+       SOC_ARCH_EXYNOS4412,
+       SOC_ARCH_EXYNOS5250,
        SOC_ARCH_EXYNOS5440,
 };
 
@@ -84,6 +85,7 @@ enum soc_type {
  * @triminfo_reload_shift: shift of triminfo reload enable bit in triminfo_ctrl
        reg.
  * @tmu_ctrl: TMU main controller register.
+ * @test_mux_addr_shift: shift bits of test mux address.
  * @buf_vref_sel_shift: shift bits of reference voltage in tmu_ctrl register.
  * @buf_vref_sel_mask: mask bits of reference voltage in tmu_ctrl register.
  * @therm_trip_mode_shift: shift bits of tripping mode in tmu_ctrl register.
@@ -150,6 +152,7 @@ struct exynos_tmu_registers {
        u32     triminfo_reload_shift;
 
        u32     tmu_ctrl;
+       u32     test_mux_addr_shift;
        u32     buf_vref_sel_shift;
        u32     buf_vref_sel_mask;
        u32     therm_trip_mode_shift;
@@ -257,6 +260,7 @@ struct exynos_tmu_registers {
  * @first_point_trim: temp value of the first point trimming
  * @second_point_trim: temp value of the second point trimming
  * @default_temp_offset: default temperature offset in case of no trimming
+ * @test_mux; information if SoC supports test MUX
  * @cal_type: calibration type for temperature
  * @cal_mode: calibration mode for temperature
  * @freq_clip_table: Table representing frequency reduction percentage.
@@ -286,6 +290,7 @@ struct exynos_tmu_platform_data {
        u8 first_point_trim;
        u8 second_point_trim;
        u8 default_temp_offset;
+       u8 test_mux;
 
        enum calibration_type cal_type;
        enum calibration_mode cal_mode;
index 9002499c1f69289350f2e7edd813716919927083..073c292baa53e12d0bc7c6aef2a8956a81ea4a6d 100644 (file)
@@ -90,14 +90,15 @@ struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
 };
 #endif
 
-#if defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412)
-static const struct exynos_tmu_registers exynos5250_tmu_registers = {
+#if defined(CONFIG_SOC_EXYNOS4412) || defined(CONFIG_SOC_EXYNOS5250)
+static const struct exynos_tmu_registers exynos4412_tmu_registers = {
        .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
        .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
        .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
        .triminfo_ctrl = EXYNOS_TMU_TRIMINFO_CON,
        .triminfo_reload_shift = EXYNOS_TRIMINFO_RELOAD_SHIFT,
        .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
+       .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT,
        .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
        .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
        .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
@@ -128,7 +129,7 @@ static const struct exynos_tmu_registers exynos5250_tmu_registers = {
        .emul_time_mask = EXYNOS_EMUL_TIME_MASK,
 };
 
-#define EXYNOS5250_TMU_DATA \
+#define EXYNOS4412_TMU_DATA \
        .threshold_falling = 10, \
        .trigger_levels[0] = 85, \
        .trigger_levels[1] = 103, \
@@ -162,15 +163,32 @@ static const struct exynos_tmu_registers exynos5250_tmu_registers = {
                .temp_level = 103, \
        }, \
        .freq_tab_count = 2, \
-       .type = SOC_ARCH_EXYNOS, \
-       .registers = &exynos5250_tmu_registers, \
+       .registers = &exynos4412_tmu_registers, \
        .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
                        TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
                        TMU_SUPPORT_EMUL_TIME)
+#endif
 
+#if defined(CONFIG_SOC_EXYNOS4412)
+struct exynos_tmu_init_data const exynos4412_default_tmu_data = {
+       .tmu_data = {
+               {
+                       EXYNOS4412_TMU_DATA,
+                       .type = SOC_ARCH_EXYNOS4412,
+                       .test_mux = EXYNOS4412_MUX_ADDR_VALUE,
+               },
+       },
+       .tmu_count = 1,
+};
+#endif
+
+#if defined(CONFIG_SOC_EXYNOS5250)
 struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
        .tmu_data = {
-               { EXYNOS5250_TMU_DATA },
+               {
+                       EXYNOS4412_TMU_DATA,
+                       .type = SOC_ARCH_EXYNOS5250,
+               },
        },
        .tmu_count = 1,
 };
index dc7feb51099b449bb680e5b9237966b8eb47295e..a1ea19d9e0a6e6bfee1a80f173dcdf4786dcc284 100644 (file)
 
 #define EXYNOS_MAX_TRIGGER_PER_REG     4
 
+/* Exynos4412 specific */
+#define EXYNOS4412_MUX_ADDR_VALUE          6
+#define EXYNOS4412_MUX_ADDR_SHIFT          20
+
 /*exynos5440 specific registers*/
 #define EXYNOS5440_TMU_S0_7_TRIM               0x000
 #define EXYNOS5440_TMU_S0_7_CTRL               0x020
@@ -138,7 +142,14 @@ extern struct exynos_tmu_init_data const exynos4210_default_tmu_data;
 #define EXYNOS4210_TMU_DRV_DATA (NULL)
 #endif
 
-#if (defined(CONFIG_SOC_EXYNOS5250) || defined(CONFIG_SOC_EXYNOS4412))
+#if defined(CONFIG_SOC_EXYNOS4412)
+extern struct exynos_tmu_init_data const exynos4412_default_tmu_data;
+#define EXYNOS4412_TMU_DRV_DATA (&exynos4412_default_tmu_data)
+#else
+#define EXYNOS4412_TMU_DRV_DATA (NULL)
+#endif
+
+#if defined(CONFIG_SOC_EXYNOS5250)
 extern struct exynos_tmu_init_data const exynos5250_default_tmu_data;
 #define EXYNOS5250_TMU_DRV_DATA (&exynos5250_default_tmu_data)
 #else
index eeef0e2498caa39edc55cd3cf6345953a537f626..fdb07199d9c2693b8ae91af6d626f588ed21350b 100644 (file)
@@ -159,7 +159,7 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
 
        INIT_LIST_HEAD(&hwmon->tz_list);
        strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
-       hwmon->device = hwmon_device_register(&tz->device);
+       hwmon->device = hwmon_device_register(NULL);
        if (IS_ERR(hwmon->device)) {
                result = PTR_ERR(hwmon->device);
                goto free_mem;
index 4f8b9af54a5a75d1de884a342920ccb50f9ba869..5a47cc8c8f85ae1771c3faa1042b8f068978d570 100644 (file)
@@ -110,6 +110,7 @@ static inline int ti_thermal_get_temp(struct thermal_zone_device *thermal,
                } else {
                        dev_err(bgp->dev,
                                "Failed to read PCB state. Using defaults\n");
+                       ret = 0;
                }
        }
        *temp = ti_thermal_hotspot_temperature(tmp, slope, constant);
index f36950e4134f55deb02e3a6eb172e9481828ea18..7722cb9d5a8020076afe549192b198542461f352 100644 (file)
@@ -316,18 +316,19 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work)
        int phy_id = topology_physical_package_id(cpu);
        struct phy_dev_entry *phdev = pkg_temp_thermal_get_phy_entry(cpu);
        bool notify = false;
+       unsigned long flags;
 
        if (!phdev)
                return;
 
-       spin_lock(&pkg_work_lock);
+       spin_lock_irqsave(&pkg_work_lock, flags);
        ++pkg_work_cnt;
        if (unlikely(phy_id > max_phy_id)) {
-               spin_unlock(&pkg_work_lock);
+               spin_unlock_irqrestore(&pkg_work_lock, flags);
                return;
        }
        pkg_work_scheduled[phy_id] = 0;
-       spin_unlock(&pkg_work_lock);
+       spin_unlock_irqrestore(&pkg_work_lock, flags);
 
        enable_pkg_thres_interrupt();
        rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val);
@@ -397,6 +398,7 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
        int thres_count;
        u32 eax, ebx, ecx, edx;
        u8 *temp;
+       unsigned long flags;
 
        cpuid(6, &eax, &ebx, &ecx, &edx);
        thres_count = ebx & 0x07;
@@ -420,19 +422,19 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
                goto err_ret_unlock;
        }
 
-       spin_lock(&pkg_work_lock);
+       spin_lock_irqsave(&pkg_work_lock, flags);
        if (topology_physical_package_id(cpu) > max_phy_id)
                max_phy_id = topology_physical_package_id(cpu);
        temp = krealloc(pkg_work_scheduled,
                        (max_phy_id+1) * sizeof(u8), GFP_ATOMIC);
        if (!temp) {
-               spin_unlock(&pkg_work_lock);
+               spin_unlock_irqrestore(&pkg_work_lock, flags);
                err = -ENOMEM;
                goto err_ret_free;
        }
        pkg_work_scheduled = temp;
        pkg_work_scheduled[topology_physical_package_id(cpu)] = 0;
-       spin_unlock(&pkg_work_lock);
+       spin_unlock_irqrestore(&pkg_work_lock, flags);
 
        phy_dev_entry->phys_proc_id = topology_physical_package_id(cpu);
        phy_dev_entry->first_cpu = cpu;
index cd69b48f6dfd390a8231f689b35f5d2588c16772..6496872e2e47c34c29ca75c79ce4d56bf7191146 100644 (file)
@@ -329,7 +329,7 @@ static void udbg_init_opal_common(void)
 void __init hvc_opal_init_early(void)
 {
        struct device_node *stdout_node = NULL;
-       const u32 *termno;
+       const __be32 *termno;
        const char *name = NULL;
        const struct hv_ops *ops;
        u32 index;
@@ -371,7 +371,7 @@ void __init hvc_opal_init_early(void)
        if (!stdout_node)
                return;
        termno = of_get_property(stdout_node, "reg", NULL);
-       index = termno ? *termno : 0;
+       index = termno ? be32_to_cpup(termno) : 0;
        if (index >= MAX_NR_HVC_CONSOLES)
                return;
        hvc_opal_privs[index] = &hvc_opal_boot_priv;
index ac2767100df56d3ef071f565ddfc7f4063432ca7..347050ea414a7f37e3962869f29437129cea1c75 100644 (file)
@@ -9,7 +9,7 @@
 
 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
 {
-       packet->seqno = atomic_inc_return(&pv->seqno);
+       packet->seqno = cpu_to_be16(atomic_inc_return(&pv->seqno));
 
        /* Assumes that always succeeds, works in practice */
        return pv->put_chars(pv->termno, (char *)packet, packet->len);
@@ -28,7 +28,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
        /* Send version query */
        q.hdr.type = VS_QUERY_PACKET_HEADER;
        q.hdr.len = sizeof(struct hvsi_query);
-       q.verb = VSV_SEND_VERSION_NUMBER;
+       q.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER);
        hvsi_send_packet(pv, &q.hdr);
 }
 
@@ -40,7 +40,7 @@ static int hvsi_send_close(struct hvsi_priv *pv)
 
        ctrl.hdr.type = VS_CONTROL_PACKET_HEADER;
        ctrl.hdr.len = sizeof(struct hvsi_control);
-       ctrl.verb = VSV_CLOSE_PROTOCOL;
+       ctrl.verb = cpu_to_be16(VSV_CLOSE_PROTOCOL);
        return hvsi_send_packet(pv, &ctrl.hdr);
 }
 
@@ -69,14 +69,14 @@ static void hvsi_got_control(struct hvsi_priv *pv)
 {
        struct hvsi_control *pkt = (struct hvsi_control *)pv->inbuf;
 
-       switch (pkt->verb) {
+       switch (be16_to_cpu(pkt->verb)) {
        case VSV_CLOSE_PROTOCOL:
                /* We restart the handshaking */
                hvsi_start_handshake(pv);
                break;
        case VSV_MODEM_CTL_UPDATE:
                /* Transition of carrier detect */
-               hvsi_cd_change(pv, pkt->word & HVSI_TSCD);
+               hvsi_cd_change(pv, be32_to_cpu(pkt->word) & HVSI_TSCD);
                break;
        }
 }
@@ -87,7 +87,7 @@ static void hvsi_got_query(struct hvsi_priv *pv)
        struct hvsi_query_response r;
 
        /* We only handle version queries */
-       if (pkt->verb != VSV_SEND_VERSION_NUMBER)
+       if (be16_to_cpu(pkt->verb) != VSV_SEND_VERSION_NUMBER)
                return;
 
        pr_devel("HVSI@%x: Got version query, sending response...\n",
@@ -96,7 +96,7 @@ static void hvsi_got_query(struct hvsi_priv *pv)
        /* Send version response */
        r.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
        r.hdr.len = sizeof(struct hvsi_query_response);
-       r.verb = VSV_SEND_VERSION_NUMBER;
+       r.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER);
        r.u.version = HVSI_VERSION;
        r.query_seqno = pkt->hdr.seqno;
        hvsi_send_packet(pv, &r.hdr);
@@ -112,7 +112,7 @@ static void hvsi_got_response(struct hvsi_priv *pv)
 
        switch(r->verb) {
        case VSV_SEND_MODEM_CTL_STATUS:
-               hvsi_cd_change(pv, r->u.mctrl_word & HVSI_TSCD);
+               hvsi_cd_change(pv, be32_to_cpu(r->u.mctrl_word) & HVSI_TSCD);
                pv->mctrl_update = 1;
                break;
        }
@@ -265,8 +265,7 @@ int hvsilib_read_mctrl(struct hvsi_priv *pv)
        pv->mctrl_update = 0;
        q.hdr.type = VS_QUERY_PACKET_HEADER;
        q.hdr.len = sizeof(struct hvsi_query);
-       q.hdr.seqno = atomic_inc_return(&pv->seqno);
-       q.verb = VSV_SEND_MODEM_CTL_STATUS;
+       q.verb = cpu_to_be16(VSV_SEND_MODEM_CTL_STATUS);
        rc = hvsi_send_packet(pv, &q.hdr);
        if (rc <= 0) {
                pr_devel("HVSI@%x: Error %d...\n", pv->termno, rc);
@@ -304,9 +303,9 @@ int hvsilib_write_mctrl(struct hvsi_priv *pv, int dtr)
 
        ctrl.hdr.type = VS_CONTROL_PACKET_HEADER,
        ctrl.hdr.len = sizeof(struct hvsi_control);
-       ctrl.verb = VSV_SET_MODEM_CTL;
-       ctrl.mask = HVSI_TSDTR;
-       ctrl.word = dtr ? HVSI_TSDTR : 0;
+       ctrl.verb = cpu_to_be16(VSV_SET_MODEM_CTL);
+       ctrl.mask = cpu_to_be32(HVSI_TSDTR);
+       ctrl.word = cpu_to_be32(dtr ? HVSI_TSDTR : 0);
        return hvsi_send_packet(pv, &ctrl.hdr);
 }
 
index d067285a2d203765d38d36535d588854988f75c2..6b0f75eac8a26de2c6a3a83eac061ae7a03295cd 100644 (file)
@@ -1499,7 +1499,7 @@ static void atmel_set_ops(struct uart_port *port)
 /*
  * Get ip name usart or uart
  */
-static int atmel_get_ip_name(struct uart_port *port)
+static void atmel_get_ip_name(struct uart_port *port)
 {
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
        int name = UART_GET_IP_NAME(port);
@@ -1518,10 +1518,7 @@ static int atmel_get_ip_name(struct uart_port *port)
                atmel_port->is_usart = false;
        } else {
                dev_err(port->dev, "Not supported ip name, set to uart\n");
-               return -EINVAL;
        }
-
-       return 0;
 }
 
 /*
@@ -2405,9 +2402,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
        /*
         * Get port name of usart or uart
         */
-       ret = atmel_get_ip_name(&port->uart);
-       if (ret < 0)
-               goto err_add_port;
+       atmel_get_ip_name(&port->uart);
 
        return 0;
 
index a0ebbc9ce5cdbacc69cc8c240f8d9fd76e43ec12..042aa077b5b3e166a8453ac0684da1dd024f6785 100644 (file)
@@ -1912,9 +1912,6 @@ static int serial_imx_probe_dt(struct imx_port *sport,
 
        sport->devdata = of_id->data;
 
-       if (of_device_is_stdout_path(np))
-               add_preferred_console(imx_reg.cons->name, sport->port.line, 0);
-
        return 0;
 }
 #else
index 537750261aaa2fe5a5e2de8fdb3e6ee0adf8fe4d..e7e9cabb21fd2a4a6613f075632e6f16f3432a33 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/gpio.h>
+#include <linux/of.h>
 
 #ifdef CONFIG_SUPERH
 #include <asm/sh_bios.h>
@@ -1433,7 +1434,7 @@ static void work_fn_rx(struct work_struct *work)
        desc = s->desc_rx[new];
 
        if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
-           DMA_SUCCESS) {
+           DMA_COMPLETE) {
                /* Handle incomplete DMA receive */
                struct dma_chan *chan = s->chan_rx;
                struct shdma_desc *sh_desc = container_of(desc,
@@ -2437,6 +2438,112 @@ static int sci_remove(struct platform_device *dev)
        return 0;
 }
 
+#ifdef CONFIG_OF
+static const struct of_device_id of_sci_match[] = {
+       { .compatible = "renesas,sci-SCI-uart",
+               .data = (void *)PORT_SCI },
+       { .compatible = "renesas,sci-SCIF-uart",
+               .data = (void *)PORT_SCIF },
+       { .compatible = "renesas,sci-IRDA-uart",
+               .data = (void *)PORT_IRDA },
+       { .compatible = "renesas,sci-SCIFA-uart",
+               .data = (void *)PORT_SCIFA },
+       { .compatible = "renesas,sci-SCIFB-uart",
+               .data = (void *)PORT_SCIFB },
+       {},
+};
+MODULE_DEVICE_TABLE(of, of_sci_match);
+
+static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
+                                                               int *dev_id)
+{
+       struct plat_sci_port *p;
+       struct device_node *np = pdev->dev.of_node;
+       const struct of_device_id *match;
+       struct resource *res;
+       const __be32 *prop;
+       int i, irq, val;
+
+       match = of_match_node(of_sci_match, pdev->dev.of_node);
+       if (!match || !match->data) {
+               dev_err(&pdev->dev, "OF match error\n");
+               return NULL;
+       }
+
+       p = devm_kzalloc(&pdev->dev, sizeof(struct plat_sci_port), GFP_KERNEL);
+       if (!p) {
+               dev_err(&pdev->dev, "failed to allocate DT config data\n");
+               return NULL;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "failed to get I/O memory\n");
+               return NULL;
+       }
+       p->mapbase = res->start;
+
+       for (i = 0; i < SCIx_NR_IRQS; i++) {
+               irq = platform_get_irq(pdev, i);
+               if (irq < 0) {
+                       dev_err(&pdev->dev, "failed to get irq data %d\n", i);
+                       return NULL;
+               }
+               p->irqs[i] = irq;
+       }
+
+       prop = of_get_property(np, "cell-index", NULL);
+       if (!prop) {
+               dev_err(&pdev->dev, "required DT prop cell-index missing\n");
+               return NULL;
+       }
+       *dev_id = be32_to_cpup(prop);
+
+       prop = of_get_property(np, "renesas,scscr", NULL);
+       if (!prop) {
+               dev_err(&pdev->dev, "required DT prop scscr missing\n");
+               return NULL;
+       }
+       p->scscr = be32_to_cpup(prop);
+
+       prop = of_get_property(np, "renesas,scbrr-algo-id", NULL);
+       if (!prop) {
+               dev_err(&pdev->dev, "required DT prop scbrr-algo-id missing\n");
+               return NULL;
+       }
+       val = be32_to_cpup(prop);
+       if (val <= SCBRR_ALGO_INVALID || val >= SCBRR_NR_ALGOS) {
+               dev_err(&pdev->dev, "DT prop scbrr-algo-id out of range\n");
+               return NULL;
+       }
+       p->scbrr_algo_id = val;
+
+       p->flags = UPF_IOREMAP;
+       if (of_get_property(np, "renesas,autoconf", NULL))
+               p->flags |= UPF_BOOT_AUTOCONF;
+
+       prop = of_get_property(np, "renesas,regtype", NULL);
+       if (prop) {
+               val = be32_to_cpup(prop);
+               if (val < SCIx_PROBE_REGTYPE || val >= SCIx_NR_REGTYPES) {
+                       dev_err(&pdev->dev, "DT prop regtype out of range\n");
+                       return NULL;
+               }
+               p->regtype = val;
+       }
+
+       p->type = (unsigned int)match->data;
+
+       return p;
+}
+#else
+static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
+                                                               int *dev_id)
+{
+       return NULL;
+}
+#endif /* CONFIG_OF */
+
 static int sci_probe_single(struct platform_device *dev,
                                      unsigned int index,
                                      struct plat_sci_port *p,
@@ -2469,9 +2576,9 @@ static int sci_probe_single(struct platform_device *dev,
 
 static int sci_probe(struct platform_device *dev)
 {
-       struct plat_sci_port *p = dev_get_platdata(&dev->dev);
-       struct sci_port *sp = &sci_ports[dev->id];
-       int ret;
+       struct plat_sci_port *p;
+       struct sci_port *sp;
+       int ret, dev_id = dev->id;
 
        /*
         * If we've come here via earlyprintk initialization, head off to
@@ -2481,9 +2588,20 @@ static int sci_probe(struct platform_device *dev)
        if (is_early_platform_device(dev))
                return sci_probe_earlyprintk(dev);
 
+       if (dev->dev.of_node)
+               p = sci_parse_dt(dev, &dev_id);
+       else
+               p = dev_get_platdata(&dev->dev);
+
+       if (!p) {
+               dev_err(&dev->dev, "no setup data supplied\n");
+               return -EINVAL;
+       }
+
+       sp = &sci_ports[dev_id];
        platform_set_drvdata(dev, sp);
 
-       ret = sci_probe_single(dev, dev->id, p, sp);
+       ret = sci_probe_single(dev, dev_id, p, sp);
        if (ret)
                return ret;
 
@@ -2535,6 +2653,7 @@ static struct platform_driver sci_driver = {
                .name   = "sh-sci",
                .owner  = THIS_MODULE,
                .pm     = &sci_dev_pm_ops,
+               .of_match_table = of_match_ptr(of_sci_match),
        },
 };
 
index 93b697a0de658ddc6769fedbe822b1151fef4bab..15ad6fcda88b323b2f5711b753ee76126741ea04 100644 (file)
@@ -561,12 +561,13 @@ static int vt8500_serial_probe(struct platform_device *pdev)
        if (!mmres || !irqres)
                return -ENODEV;
 
-       if (np)
+       if (np) {
                port = of_alias_get_id(np, "serial");
                if (port >= VT8500_MAX_PORTS)
                        port = -1;
-       else
+       } else {
                port = -1;
+       }
 
        if (port < 0) {
                /* calculate the port id */
index be822a2c1776cc30c2df5f3ed7840aff68ef10ac..7ad541591c81c0db06b6dc6b4b3d7e6d49137103 100644 (file)
@@ -121,10 +121,9 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
 
        pdata.phy = data->phy;
 
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto err_clk;
 
        if (data->usbmisc_data) {
                ret = imx_usbmisc_init(data->usbmisc_data);
index 6f96795dd20cc3ff3d0c08490ae8d87b0b2ccccf..64d7a6d9a1adcbd679258661a6b5875bd8bf834a 100644 (file)
@@ -100,8 +100,10 @@ static void host_stop(struct ci_hdrc *ci)
 {
        struct usb_hcd *hcd = ci->hcd;
 
-       usb_remove_hcd(hcd);
-       usb_put_hcd(hcd);
+       if (hcd) {
+               usb_remove_hcd(hcd);
+               usb_put_hcd(hcd);
+       }
        if (ci->platdata->reg_vbus)
                regulator_disable(ci->platdata->reg_vbus);
 }
index 5b44cd47da5b28b30046bead355aa54eda3eec0f..01fe36273f3b144cb1c1fe3306ebdf16459c605f 100644 (file)
@@ -97,6 +97,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Alcor Micro Corp. Hub */
        { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* MicroTouch Systems touchscreen */
+       { USB_DEVICE(0x0596, 0x051e), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* appletouch */
        { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
 
@@ -130,6 +133,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Broadcom BCM92035DGROM BT dongle */
        { USB_DEVICE(0x0a5c, 0x2021), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* MAYA44USB sound device */
+       { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Action Semiconductor flash disk */
        { USB_DEVICE(0x10d6, 0x2200), .driver_info =
                        USB_QUIRK_STRING_FETCH_255 },
index 2f2e88a3a11a3c9a9adbcca6b104b54fa2f4352b..8b20c70d91e788c8e27791b98e454c4a5ba87ae7 100644 (file)
@@ -119,10 +119,9 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we move to full device tree support this will vanish off.
         */
-       if (!dev->dma_mask)
-               dev->dma_mask = &dev->coherent_dma_mask;
-       if (!dev->coherent_dma_mask)
-               dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto err1;
 
        platform_set_drvdata(pdev, exynos);
 
index 67128be1e1b70f25b29f8cc571b1b916a9c35f2c..6a2a65aa0057c9d822cd5d9bac61aaeac470bd71 100644 (file)
@@ -3078,7 +3078,9 @@ static int __init lpc32xx_udc_probe(struct platform_device *pdev)
                 udc->isp1301_i2c_client->addr);
 
        pdev->dev.dma_mask = &lpc32xx_usbd_dmamask;
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       retval = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+       if (retval)
+               goto resource_fail;
 
        udc->board = &lpc32xx_usbddata;
 
index 08a1a3210a2117f43b29f899085a21c547a9e644..cd1431d850c4d84a925286de4a47667f24b19fe4 100644 (file)
@@ -450,11 +450,11 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
         * If we can't read the file, it's no good.
         * If we can't write the file, use it read-only.
         */
-       if (!(filp->f_op->read || filp->f_op->aio_read)) {
+       if (!file_readable(filp)) {
                LINFO(curlun, "file not readable: %s\n", filename);
                goto out;
        }
-       if (!(filp->f_op->write || filp->f_op->aio_write))
+       if (!file_writable(filp))
                ro = 1;
 
        size = i_size_read(inode->i_mapping->host);
index df13d425e9c5fd4700db812505978c4876ac5dd7..205f4a336583de6c32b608854a943cc121dc1dc4 100644 (file)
@@ -227,8 +227,7 @@ static int bcma_hcd_probe(struct bcma_device *dev)
 
        /* TODO: Probably need checks here; is the core connected? */
 
-       if (dma_set_mask(dev->dma_dev, DMA_BIT_MASK(32)) ||
-           dma_set_coherent_mask(dev->dma_dev, DMA_BIT_MASK(32)))
+       if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
                return -EOPNOTSUPP;
 
        usb_dev = kzalloc(sizeof(struct bcma_hcd_device), GFP_KERNEL);
index 3b645ff46f7b9f8df67d2d793c1758be26173b21..8e7323e07f794435d001ee2df81135fa5bc2325e 100644 (file)
@@ -90,10 +90,9 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (retval)
+               goto fail_create_hcd;
 
        hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
        if (!hcd) {
index 45cc00158412ac8a380cda88a28a4bb7536d62fa..323a02b1a0a65778b0e7dd74ad9423e632444402 100644 (file)
@@ -116,8 +116,10 @@ static int ehci_octeon_drv_probe(struct platform_device *pdev)
         * We can DMA from anywhere. But the descriptors must be in
         * the lower 4GB.
         */
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
        pdev->dev.dma_mask = &ehci_octeon_dma_mask;
+       ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        hcd = usb_create_hcd(&ehci_octeon_hc_driver, &pdev->dev, "octeon");
        if (!hcd)
index 78b01fa475bbfc85e0aa35fcd9ab917b963689ee..6fa82d6b7661bea07d38dd249d76a80843ff4d01 100644 (file)
@@ -104,7 +104,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
        struct resource *res;
        struct usb_hcd  *hcd;
        void __iomem *regs;
-       int ret = -ENODEV;
+       int ret;
        int irq;
        int i;
        struct omap_hcd *omap;
@@ -144,11 +144,11 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!dev->dma_mask)
-               dev->dma_mask = &dev->coherent_dma_mask;
-       if (!dev->coherent_dma_mask)
-               dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
+       ret = -ENODEV;
        hcd = usb_create_hcd(&ehci_omap_hc_driver, dev,
                        dev_name(dev));
        if (!hcd) {
index d1dfb9db5b420845edec3a30b81bdd30792704e9..2ba76730e6509ea8f9372234be96f9ea81796cd1 100644 (file)
@@ -180,10 +180,9 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
         * set. Since shared usb code relies on it, set it here for
         * now. Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (err)
+               goto err1;
 
        if (!request_mem_region(res->start, resource_size(res),
                                ehci_orion_hc_driver.description)) {
index f6b790ca8cf2415805d827fcceebbde48291a935..7f30b7168d5a53542cdbea786d9cba849a932579 100644 (file)
@@ -78,7 +78,7 @@ static int ehci_platform_probe(struct platform_device *dev)
        struct resource *res_mem;
        struct usb_ehci_pdata *pdata;
        int irq;
-       int err = -ENOMEM;
+       int err;
 
        if (usb_disabled())
                return -ENODEV;
@@ -89,10 +89,10 @@ static int ehci_platform_probe(struct platform_device *dev)
         */
        if (!dev_get_platdata(&dev->dev))
                dev->dev.platform_data = &ehci_platform_defaults;
-       if (!dev->dev.dma_mask)
-               dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
-       if (!dev->dev.coherent_dma_mask)
-               dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+
+       err = dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+       if (err)
+               return err;
 
        pdata = dev_get_platdata(&dev->dev);
 
index 7c3de95c7054339a5f62b64322f55a24f2bb286b..d919ed47bd47fdc8f631b1804e74bd54e134cf17 100644 (file)
@@ -89,10 +89,9 @@ static int s5p_ehci_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we move to full device tree support this will vanish off.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (err)
+               return err;
 
        s5p_setup_vbus_gpio(pdev);
 
index 1cf0adba3fc8dd4b3e071e8eeef769c5e9fbf1a2..ee6f9ffaa0e73ef9a6ca237d4e28960162e01d51 100644 (file)
@@ -81,10 +81,9 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (retval)
+               goto fail;
 
        usbh_clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(usbh_clk)) {
index 78fa76da332435a83cc3c55226d33cbeb2e78afe..e74aaf3f016450a84ee42600ce584bebab41d9b0 100644 (file)
@@ -362,10 +362,9 @@ static int tegra_ehci_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (err)
+               return err;
 
        hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev,
                                        dev_name(&pdev->dev));
index caa3764a34075e4735a9aca7d907f8fa693fde83..36423db63073bc9bce3d6fa28dbf624006f9b6e9 100644 (file)
@@ -524,7 +524,7 @@ MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids);
 static int ohci_at91_of_init(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
-       int i, gpio;
+       int i, gpio, ret;
        enum of_gpio_flags flags;
        struct at91_usbh_data   *pdata;
        u32 ports;
@@ -536,10 +536,9 @@ static int ohci_at91_of_init(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata)
index dc6ee9adacf58679305df6baa851167e34e61ed4..866f2464f9de64c84ac96114c96dccb52a472d32 100644 (file)
@@ -114,10 +114,9 @@ static int exynos_ohci_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we move to full device tree support this will vanish off.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (err)
+               return err;
 
        exynos_ohci = devm_kzalloc(&pdev->dev, sizeof(struct exynos_ohci_hcd),
                                        GFP_KERNEL);
index 7d7d507d54e83ef89cdd0695d694f7e3bb4a5e7d..df3eb3e0324ea9ffde44836ed1788cd509d30d19 100644 (file)
@@ -226,8 +226,9 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
                return -EPROBE_DEFER;
        }
 
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-       pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto fail_disable;
 
        dev_dbg(&pdev->dev, "%s: " DRIVER_DESC " (nxp)\n", hcd_name);
        if (usb_disabled()) {
index 342dc7e543b81afd5b17377ac351caf2405b7a02..6c16dcef15c6fe0ee47d063a931cd7330c81da29 100644 (file)
@@ -127,8 +127,9 @@ static int ohci_octeon_drv_probe(struct platform_device *pdev)
        }
 
        /* Ohci is a 32-bit device. */
-       pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-       pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        hcd = usb_create_hcd(&ohci_octeon_hc_driver, &pdev->dev, "octeon");
        if (!hcd)
index a09af26f69ed4efdde274e028eb88110231cfc18..db9bd6bc97b99106a80093d97262401c6862e167 100644 (file)
@@ -132,7 +132,7 @@ static int ohci_hcd_omap3_probe(struct platform_device *pdev)
        struct usb_hcd          *hcd = NULL;
        void __iomem            *regs = NULL;
        struct resource         *res;
-       int                     ret = -ENODEV;
+       int                     ret;
        int                     irq;
 
        if (usb_disabled())
@@ -166,11 +166,11 @@ static int ohci_hcd_omap3_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!dev->dma_mask)
-               dev->dma_mask = &dev->coherent_dma_mask;
-       if (!dev->coherent_dma_mask)
-               dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto err_io;
 
+       ret = -ENODEV;
        hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev,
                        dev_name(dev));
        if (!hcd) {
index 93371a235e821fac080068f7522bc5a80576119c..b64949bc43e2ebacc1620a76faf6d9db2935b16e 100644 (file)
@@ -287,6 +287,7 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
        struct device_node *np = pdev->dev.of_node;
        struct pxaohci_platform_data *pdata;
        u32 tmp;
+       int ret;
 
        if (!np)
                return 0;
@@ -295,10 +296,9 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata)
index 17b2a7dad77b81cb8d7edb8e69a97ae490d78a63..aa9e127bbe718d52b8c994910c2c30826f5db06e 100644 (file)
@@ -185,6 +185,12 @@ static int ohci_hcd_sa1111_probe(struct sa1111_dev *dev)
        if (usb_disabled())
                return -ENODEV;
 
+       /*
+        * We don't call dma_set_mask_and_coherent() here because the
+        * DMA mask has already been appropraitely setup by the core
+        * SA-1111 bus code (which includes bug workarounds.)
+        */
+
        hcd = usb_create_hcd(&ohci_sa1111_hc_driver, &dev->dev, "sa1111");
        if (!hcd)
                return -ENOMEM;
index cc9dd9e4f05e69469eeca87233dd0f29bac23992..075bb5e9b43fd0c4df8ddf63cfceffcc5ba1c81f 100644 (file)
@@ -111,10 +111,9 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       retval = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (retval)
+               goto fail;
 
        usbh_clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(usbh_clk)) {
index 2c76ef1320eab679e1dd1aabd1c8466981392efc..08ef2829a7e2b1c06bbd0af51f308884c8ad9a26 100644 (file)
@@ -799,7 +799,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
         * switchable ports.
         */
        pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
-                       cpu_to_le32(ports_available));
+                       ports_available);
 
        pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN,
                        &ports_available);
@@ -821,7 +821,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
         * host.
         */
        pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
-                       cpu_to_le32(ports_available));
+                       ports_available);
 
        pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR,
                        &ports_available);
index 74af2c6287d2670b321e1edb317f01c0ed2f333b..0196f766df734f48352fd8c5217d08963aab444f 100644 (file)
@@ -163,8 +163,7 @@ static int ssb_hcd_probe(struct ssb_device *dev,
 
        /* TODO: Probably need checks here; is the core connected? */
 
-       if (dma_set_mask(dev->dma_dev, DMA_BIT_MASK(32)) ||
-           dma_set_coherent_mask(dev->dma_dev, DMA_BIT_MASK(32)))
+       if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
                return -EOPNOTSUPP;
 
        usb_dev = kzalloc(sizeof(struct ssb_hcd_device), GFP_KERNEL);
index d033a0ec7f0d02bd3874ae5e7339d43e964d2611..f8548b72f7089c1d3e10bebf1f8c070759798b7e 100644 (file)
@@ -75,10 +75,9 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
         * Since shared usb code relies on it, set it here for now.
         * Once we have dma capability bindings this can go away.
         */
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
-       if (!pdev->dev.coherent_dma_mask)
-               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev,
                        pdev->name);
index 773a6b28c4f1657670547265e0d73caed6366907..e8b4c56dcf62adf1f5326e8609087cd387fedc2e 100644 (file)
@@ -1157,18 +1157,6 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
                t1 = xhci_port_state_to_neutral(t1);
                if (t1 != t2)
                        xhci_writel(xhci, t2, port_array[port_index]);
-
-               if (hcd->speed != HCD_USB3) {
-                       /* enable remote wake up for USB 2.0 */
-                       __le32 __iomem *addr;
-                       u32 tmp;
-
-                       /* Get the port power control register address. */
-                       addr = port_array[port_index] + PORTPMSC;
-                       tmp = xhci_readl(xhci, addr);
-                       tmp |= PORT_RWE;
-                       xhci_writel(xhci, tmp, addr);
-               }
        }
        hcd->state = HC_STATE_SUSPENDED;
        bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
@@ -1247,20 +1235,6 @@ int xhci_bus_resume(struct usb_hcd *hcd)
                                xhci_ring_device(xhci, slot_id);
                } else
                        xhci_writel(xhci, temp, port_array[port_index]);
-
-               if (hcd->speed != HCD_USB3) {
-                       /* disable remote wake up for USB 2.0 */
-                       __le32 __iomem *addr;
-                       u32 tmp;
-
-                       /* Add one to the port status register address to get
-                        * the port power control register address.
-                        */
-                       addr = port_array[port_index] + PORTPMSC;
-                       tmp = xhci_readl(xhci, addr);
-                       tmp &= ~PORT_RWE;
-                       xhci_writel(xhci, tmp, addr);
-               }
        }
 
        (void) xhci_readl(xhci, &xhci->op_regs->command);
index 236c3aabe94083ab5c1829241a46a20d39829587..b8dffd59eb256e52786328e5e4f1919846a80d9c 100644 (file)
@@ -35,6 +35,9 @@
 #define PCI_VENDOR_ID_ETRON            0x1b6f
 #define PCI_DEVICE_ID_ASROCK_P67       0x7023
 
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI     0x8c31
+#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI  0x9c31
+
 static const char hcd_name[] = "xhci_hcd";
 
 /* called after powerup, by probe or system-pm "wakeup" */
@@ -69,6 +72,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                                "QUIRK: Fresco Logic xHC needs configure"
                                " endpoint cmd after reset endpoint");
                }
+               if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
+                               pdev->revision == 0x4) {
+                       xhci->quirks |= XHCI_SLOW_SUSPEND;
+                       xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+                               "QUIRK: Fresco Logic xHC revision %u"
+                               "must be suspended extra slowly",
+                               pdev->revision);
+               }
                /* Fresco Logic confirms: all revisions of this chip do not
                 * support MSI, even though some of them claim to in their PCI
                 * capabilities.
@@ -110,6 +121,15 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                xhci->quirks |= XHCI_SPURIOUS_REBOOT;
                xhci->quirks |= XHCI_AVOID_BEI;
        }
+       if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+           (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI ||
+            pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI)) {
+               /* Workaround for occasional spurious wakeups from S5 (or
+                * any other sleep) on Haswell machines with LPT and LPT-LP
+                * with the new Intel BIOS
+                */
+               xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+       }
        if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
                        pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
                xhci->quirks |= XHCI_RESET_ON_RESUME;
@@ -217,6 +237,11 @@ static void xhci_pci_remove(struct pci_dev *dev)
                usb_put_hcd(xhci->shared_hcd);
        }
        usb_hcd_pci_remove(dev);
+
+       /* Workaround for spurious wakeups at shutdown with HSW */
+       if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+               pci_set_power_state(dev, PCI_D3hot);
+
        kfree(xhci);
 }
 
index 1e36dbb4836693dbe5ac90b8231361f82645a7b1..6e0d886bcce52c19361d321cf68c0abacf5b054c 100644 (file)
@@ -730,6 +730,9 @@ void xhci_shutdown(struct usb_hcd *hcd)
 
        spin_lock_irq(&xhci->lock);
        xhci_halt(xhci);
+       /* Workaround for spurious wakeups at shutdown with HSW */
+       if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+               xhci_reset(xhci);
        spin_unlock_irq(&xhci->lock);
 
        xhci_cleanup_msix(xhci);
@@ -737,6 +740,10 @@ void xhci_shutdown(struct usb_hcd *hcd)
        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
                        "xhci_shutdown completed - status = %x",
                        xhci_readl(xhci, &xhci->op_regs->status));
+
+       /* Yet another workaround for spurious wakeups at shutdown with HSW */
+       if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
+               pci_set_power_state(to_pci_dev(hcd->self.controller), PCI_D3hot);
 }
 
 #ifdef CONFIG_PM
@@ -839,6 +846,7 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
 int xhci_suspend(struct xhci_hcd *xhci)
 {
        int                     rc = 0;
+       unsigned int            delay = XHCI_MAX_HALT_USEC;
        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
        u32                     command;
 
@@ -861,8 +869,12 @@ int xhci_suspend(struct xhci_hcd *xhci)
        command = xhci_readl(xhci, &xhci->op_regs->command);
        command &= ~CMD_RUN;
        xhci_writel(xhci, command, &xhci->op_regs->command);
+
+       /* Some chips from Fresco Logic need an extraordinary delay */
+       delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
+
        if (xhci_handshake(xhci, &xhci->op_regs->status,
-                     STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC)) {
+                     STS_HALT, STS_HALT, delay)) {
                xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
                spin_unlock_irq(&xhci->lock);
                return -ETIMEDOUT;
index 289fbfbae7463f429ef051602e08e7eccb3e06e8..941d5f59e4dcc254770bac770ba024e36a677bad 100644 (file)
@@ -1548,6 +1548,8 @@ struct xhci_hcd {
 #define XHCI_COMP_MODE_QUIRK   (1 << 14)
 #define XHCI_AVOID_BEI         (1 << 15)
 #define XHCI_PLAT              (1 << 16)
+#define XHCI_SLOW_SUSPEND      (1 << 17)
+#define XHCI_SPURIOUS_WAKEUP   (1 << 18)
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
        /* There are two roothubs to keep track of bus suspend info for */
index e2b21c1d9c403088d704d12901c8049e329c360f..ba5f70f92888774c5ef900d44c6f8ad941e6b4e9 100644 (file)
@@ -246,6 +246,6 @@ config USB_EZUSB_FX2
 config USB_HSIC_USB3503
        tristate "USB3503 HSIC to USB20 Driver"
        depends on I2C
-       select REGMAP
+       select REGMAP_I2C
        help
          This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver.
index 5c310c664218dec6d1e71874ee8dc84f0232fa19..790b22b296b11f527b2060a76e6aa89624d8d804 100644 (file)
@@ -89,7 +89,6 @@ struct am35x_glue {
        struct clk              *phy_clk;
        struct clk              *clk;
 };
-#define glue_to_musb(g)                platform_get_drvdata(g->musb)
 
 /*
  * am35x_musb_enable - enable interrupts
@@ -452,14 +451,18 @@ static const struct musb_platform_ops am35x_ops = {
        .set_vbus       = am35x_musb_set_vbus,
 };
 
-static u64 am35x_dmamask = DMA_BIT_MASK(32);
+static const struct platform_device_info am35x_dev_info = {
+       .name           = "musb-hdrc",
+       .id             = PLATFORM_DEVID_AUTO,
+       .dma_mask       = DMA_BIT_MASK(32),
+};
 
 static int am35x_probe(struct platform_device *pdev)
 {
        struct musb_hdrc_platform_data  *pdata = dev_get_platdata(&pdev->dev);
        struct platform_device          *musb;
        struct am35x_glue               *glue;
-
+       struct platform_device_info     pinfo;
        struct clk                      *phy_clk;
        struct clk                      *clk;
 
@@ -471,12 +474,6 @@ static int am35x_probe(struct platform_device *pdev)
                goto err0;
        }
 
-       musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
-       if (!musb) {
-               dev_err(&pdev->dev, "failed to allocate musb device\n");
-               goto err1;
-       }
-
        phy_clk = clk_get(&pdev->dev, "fck");
        if (IS_ERR(phy_clk)) {
                dev_err(&pdev->dev, "failed to get PHY clock\n");
@@ -503,12 +500,7 @@ static int am35x_probe(struct platform_device *pdev)
                goto err6;
        }
 
-       musb->dev.parent                = &pdev->dev;
-       musb->dev.dma_mask              = &am35x_dmamask;
-       musb->dev.coherent_dma_mask     = am35x_dmamask;
-
        glue->dev                       = &pdev->dev;
-       glue->musb                      = musb;
        glue->phy_clk                   = phy_clk;
        glue->clk                       = clk;
 
@@ -516,22 +508,17 @@ static int am35x_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, glue);
 
-       ret = platform_device_add_resources(musb, pdev->resource,
-                       pdev->num_resources);
-       if (ret) {
-               dev_err(&pdev->dev, "failed to add resources\n");
-               goto err7;
-       }
-
-       ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
-       if (ret) {
-               dev_err(&pdev->dev, "failed to add platform_data\n");
-               goto err7;
-       }
-
-       ret = platform_device_add(musb);
-       if (ret) {
-               dev_err(&pdev->dev, "failed to register musb device\n");
+       pinfo = am35x_dev_info;
+       pinfo.parent = &pdev->dev;
+       pinfo.res = pdev->resource;
+       pinfo.num_res = pdev->num_resources;
+       pinfo.data = pdata;
+       pinfo.size_data = sizeof(*pdata);
+
+       glue->musb = musb = platform_device_register_full(&pinfo);
+       if (IS_ERR(musb)) {
+               ret = PTR_ERR(musb);
+               dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
                goto err7;
        }
 
@@ -550,9 +537,6 @@ err4:
        clk_put(phy_clk);
 
 err3:
-       platform_device_put(musb);
-
-err1:
        kfree(glue);
 
 err0:
index d9ddf4122f37e6ddfd19661eb9cb1448306525b6..2f2c1cb364218833f40468429d1469299a56e406 100644 (file)
@@ -472,7 +472,11 @@ static const struct musb_platform_ops da8xx_ops = {
        .set_vbus       = da8xx_musb_set_vbus,
 };
 
-static u64 da8xx_dmamask = DMA_BIT_MASK(32);
+static const struct platform_device_info da8xx_dev_info = {
+       .name           = "musb-hdrc",
+       .id             = PLATFORM_DEVID_AUTO,
+       .dma_mask       = DMA_BIT_MASK(32),
+};
 
 static int da8xx_probe(struct platform_device *pdev)
 {
@@ -480,7 +484,7 @@ static int da8xx_probe(struct platform_device *pdev)
        struct musb_hdrc_platform_data  *pdata = dev_get_platdata(&pdev->dev);
        struct platform_device          *musb;
        struct da8xx_glue               *glue;
-
+       struct platform_device_info     pinfo;
        struct clk                      *clk;
 
        int                             ret = -ENOMEM;
@@ -491,12 +495,6 @@ static int da8xx_probe(struct platform_device *pdev)
                goto err0;
        }
 
-       musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
-       if (!musb) {
-               dev_err(&pdev->dev, "failed to allocate musb device\n");
-               goto err1;
-       }
-
        clk = clk_get(&pdev->dev, "usb20");
        if (IS_ERR(clk)) {
                dev_err(&pdev->dev, "failed to get clock\n");
@@ -510,12 +508,7 @@ static int da8xx_probe(struct platform_device *pdev)
                goto err4;
        }
 
-       musb->dev.parent                = &pdev->dev;
-       musb->dev.dma_mask              = &da8xx_dmamask;
-       musb->dev.coherent_dma_mask     = da8xx_dmamask;
-
        glue->dev                       = &pdev->dev;
-       glue->musb                      = musb;
        glue->clk                       = clk;
 
        pdata->platform_ops             = &da8xx_ops;
@@ -535,22 +528,17 @@ static int da8xx_probe(struct platform_device *pdev)
        musb_resources[1].end = pdev->resource[1].end;
        musb_resources[1].flags = pdev->resource[1].flags;
 
-       ret = platform_device_add_resources(musb, musb_resources,
-                       ARRAY_SIZE(musb_resources));
-       if (ret) {
-               dev_err(&pdev->dev, "failed to add resources\n");
-               goto err5;
-       }
-
-       ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
-       if (ret) {
-               dev_err(&pdev->dev, "failed to add platform_data\n");
-               goto err5;
-       }
-
-       ret = platform_device_add(musb);
-       if (ret) {
-               dev_err(&pdev->dev, "failed to register musb device\n");
+       pinfo = da8xx_dev_info;
+       pinfo.parent = &pdev->dev;
+       pinfo.res = musb_resources;
+       pinfo.num_res = ARRAY_SIZE(musb_resources);
+       pinfo.data = pdata;
+       pinfo.size_data = sizeof(*pdata);
+
+       glue->musb = musb = platform_device_register_full(&pinfo);
+       if (IS_ERR(musb)) {
+               ret = PTR_ERR(musb);
+               dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
                goto err5;
        }
 
@@ -563,9 +551,6 @@ err4:
        clk_put(clk);
 
 err3:
-       platform_device_put(musb);
-
-err1:
        kfree(glue);
 
 err0:
index ed0834e2b72eeaa6c9dbe73bdb77c2f433282b42..45aae0bbb8dfb2b03f9af13aecd767907ec01ebb 100644 (file)
@@ -505,7 +505,11 @@ static const struct musb_platform_ops davinci_ops = {
        .set_vbus       = davinci_musb_set_vbus,
 };
 
-static u64 davinci_dmamask = DMA_BIT_MASK(32);
+static const struct platform_device_info davinci_dev_info = {
+       .name           = "musb-hdrc",
+       .id             = PLATFORM_DEVID_AUTO,
+       .dma_mask       = DMA_BIT_MASK(32),
+};
 
 static int davinci_probe(struct platform_device *pdev)
 {
@@ -513,6 +517,7 @@ static int davinci_probe(struct platform_device *pdev)
        struct musb_hdrc_platform_data  *pdata = dev_get_platdata(&pdev->dev);
        struct platform_device          *musb;
        struct davinci_glue             *glue;
+       struct platform_device_info     pinfo;
        struct clk                      *clk;
 
        int                             ret = -ENOMEM;
@@ -523,12 +528,6 @@ static int davinci_probe(struct platform_device *pdev)
                goto err0;
        }
 
-       musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
-       if (!musb) {
-               dev_err(&pdev->dev, "failed to allocate musb device\n");
-               goto err1;
-       }
-
        clk = clk_get(&pdev->dev, "usb");
        if (IS_ERR(clk)) {
                dev_err(&pdev->dev, "failed to get clock\n");
@@ -542,12 +541,7 @@ static int davinci_probe(struct platform_device *pdev)
                goto err4;
        }
 
-       musb->dev.parent                = &pdev->dev;
-       musb->dev.dma_mask              = &davinci_dmamask;
-       musb->dev.coherent_dma_mask     = davinci_dmamask;
-
        glue->dev                       = &pdev->dev;
-       glue->musb                      = musb;
        glue->clk                       = clk;
 
        pdata->platform_ops             = &davinci_ops;
@@ -567,22 +561,17 @@ static int davinci_probe(struct platform_device *pdev)
        musb_resources[1].end = pdev->resource[1].end;
        musb_resources[1].flags = pdev->resource[1].flags;
 
-       ret = platform_device_add_resources(musb, musb_resources,
-                       ARRAY_SIZE(musb_resources));
-       if (ret) {
-               dev_err(&pdev->dev, "failed to add resources\n");
-               goto err5;
-       }
-
-       ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
-       if (ret) {
-               dev_err(&pdev->dev, "failed to add platform_data\n");
-               goto err5;
-       }
-
-       ret = platform_device_add(musb);
-       if (ret) {
-               dev_err(&pdev->dev, "failed to register musb device\n");
+       pinfo = davinci_dev_info;
+       pinfo.parent = &pdev->dev;
+       pinfo.res = musb_resources;
+       pinfo.num_res = ARRAY_SIZE(musb_resources);
+       pinfo.data = pdata;
+       pinfo.size_data = sizeof(*pdata);
+
+       glue->musb = musb = platform_device_register_full(&pinfo);
+       if (IS_ERR(musb)) {
+               ret = PTR_ERR(musb);
+               dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
                goto err5;
        }
 
@@ -595,9 +584,6 @@ err4:
        clk_put(clk);
 
 err3:
-       platform_device_put(musb);
-
-err1:
        kfree(glue);
 
 err0:
index 18e877ffe7b7d8c7393bdf43efdfd0d58e618011..cd70cc8861711015f5443e2b76e969209d9bdb3a 100644 (file)
@@ -921,6 +921,52 @@ static void musb_generic_disable(struct musb *musb)
 
 }
 
+/*
+ * Program the HDRC to start (enable interrupts, dma, etc.).
+ */
+void musb_start(struct musb *musb)
+{
+       void __iomem    *regs = musb->mregs;
+       u8              devctl = musb_readb(regs, MUSB_DEVCTL);
+
+       dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
+
+       /*  Set INT enable registers, enable interrupts */
+       musb->intrtxe = musb->epmask;
+       musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
+       musb->intrrxe = musb->epmask & 0xfffe;
+       musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
+       musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
+
+       musb_writeb(regs, MUSB_TESTMODE, 0);
+
+       /* put into basic highspeed mode and start session */
+       musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
+                       | MUSB_POWER_HSENAB
+                       /* ENSUSPEND wedges tusb */
+                       /* | MUSB_POWER_ENSUSPEND */
+                  );
+
+       musb->is_active = 0;
+       devctl = musb_readb(regs, MUSB_DEVCTL);
+       devctl &= ~MUSB_DEVCTL_SESSION;
+
+       /* session started after:
+        * (a) ID-grounded irq, host mode;
+        * (b) vbus present/connect IRQ, peripheral mode;
+        * (c) peripheral initiates, using SRP
+        */
+       if (musb->port_mode != MUSB_PORT_MODE_HOST &&
+                       (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
+               musb->is_active = 1;
+       } else {
+               devctl |= MUSB_DEVCTL_SESSION;
+       }
+
+       musb_platform_enable(musb);
+       musb_writeb(regs, MUSB_DEVCTL, devctl);
+}
+
 /*
  * Make the HDRC stop (disable interrupts, etc.);
  * reversible by musb_start
index 65f3917b4fc5fb55c95bf4157d70a1d7194a110d..1c5bf75ee8ff8a45a3da1fca7cb1d31920f0eeff 100644 (file)
@@ -503,6 +503,7 @@ static inline void musb_configure_ep0(struct musb *musb)
 extern const char musb_driver_name[];
 
 extern void musb_stop(struct musb *musb);
+extern void musb_start(struct musb *musb);
 
 extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
 extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
index b19ed213ab8580c5456cbddaca4f217db31733c4..3671898a4535b3cf2de3c0e188f4947485d2760d 100644 (file)
@@ -1853,11 +1853,14 @@ static int musb_gadget_start(struct usb_gadget *g,
        musb->gadget_driver = driver;
 
        spin_lock_irqsave(&musb->lock, flags);
+       musb->is_active = 1;
 
        otg_set_peripheral(otg, &musb->g);
        musb->xceiv->state = OTG_STATE_B_IDLE;
        spin_unlock_irqrestore(&musb->lock, flags);
 
+       musb_start(musb);
+
        /* REVISIT:  funcall to other code, which also
         * handles power budgeting ... this way also
         * ensures HdrcStart is indirectly called.
index a523950c2b32e66e07f7c8bd78daa181f691f25c..d1d6b83aabca61df43dffccf57ccbe7687cbe399 100644 (file)
 
 #include "musb_core.h"
 
-/*
-* Program the HDRC to start (enable interrupts, dma, etc.).
-*/
-static void musb_start(struct musb *musb)
-{
-       void __iomem    *regs = musb->mregs;
-       u8              devctl = musb_readb(regs, MUSB_DEVCTL);
-
-       dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
-
-       /*  Set INT enable registers, enable interrupts */
-       musb->intrtxe = musb->epmask;
-       musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
-       musb->intrrxe = musb->epmask & 0xfffe;
-       musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
-       musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
-
-       musb_writeb(regs, MUSB_TESTMODE, 0);
-
-       /* put into basic highspeed mode and start session */
-       musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
-                                               | MUSB_POWER_HSENAB
-                                               /* ENSUSPEND wedges tusb */
-                                               /* | MUSB_POWER_ENSUSPEND */
-                                               );
-
-       musb->is_active = 0;
-       devctl = musb_readb(regs, MUSB_DEVCTL);
-       devctl &= ~MUSB_DEVCTL_SESSION;
-
-       /* session started after:
-        * (a) ID-grounded irq, host mode;
-        * (b) vbus present/connect IRQ, peripheral mode;
-        * (c) peripheral initiates, using SRP
-        */
-       if (musb->port_mode != MUSB_PORT_MODE_HOST &&
-           (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
-               musb->is_active = 1;
-       } else {
-               devctl |= MUSB_DEVCTL_SESSION;
-       }
-
-       musb_platform_enable(musb);
-       musb_writeb(regs, MUSB_DEVCTL, devctl);
-}
-
 static void musb_port_suspend(struct musb *musb, bool do_suspend)
 {
        struct usb_otg  *otg = musb->xceiv->otg;
index b3b3ed723882ffab75829e7c9bc1b08b42e1605d..4432314d70ee18f1dfe0e092c487dcab224c1742 100644 (file)
@@ -1152,7 +1152,11 @@ static const struct musb_platform_ops tusb_ops = {
        .set_vbus       = tusb_musb_set_vbus,
 };
 
-static u64 tusb_dmamask = DMA_BIT_MASK(32);
+static const struct platform_device_info tusb_dev_info = {
+       .name           = "musb-hdrc",
+       .id             = PLATFORM_DEVID_AUTO,
+       .dma_mask       = DMA_BIT_MASK(32),
+};
 
 static int tusb_probe(struct platform_device *pdev)
 {
@@ -1160,7 +1164,7 @@ static int tusb_probe(struct platform_device *pdev)
        struct musb_hdrc_platform_data  *pdata = dev_get_platdata(&pdev->dev);
        struct platform_device          *musb;
        struct tusb6010_glue            *glue;
-
+       struct platform_device_info     pinfo;
        int                             ret = -ENOMEM;
 
        glue = kzalloc(sizeof(*glue), GFP_KERNEL);
@@ -1169,18 +1173,7 @@ static int tusb_probe(struct platform_device *pdev)
                goto err0;
        }
 
-       musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
-       if (!musb) {
-               dev_err(&pdev->dev, "failed to allocate musb device\n");
-               goto err1;
-       }
-
-       musb->dev.parent                = &pdev->dev;
-       musb->dev.dma_mask              = &tusb_dmamask;
-       musb->dev.coherent_dma_mask     = tusb_dmamask;
-
        glue->dev                       = &pdev->dev;
-       glue->musb                      = musb;
 
        pdata->platform_ops             = &tusb_ops;
 
@@ -1204,31 +1197,23 @@ static int tusb_probe(struct platform_device *pdev)
        musb_resources[2].end = pdev->resource[2].end;
        musb_resources[2].flags = pdev->resource[2].flags;
 
-       ret = platform_device_add_resources(musb, musb_resources,
-                       ARRAY_SIZE(musb_resources));
-       if (ret) {
-               dev_err(&pdev->dev, "failed to add resources\n");
-               goto err3;
-       }
-
-       ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
-       if (ret) {
-               dev_err(&pdev->dev, "failed to add platform_data\n");
-               goto err3;
-       }
-
-       ret = platform_device_add(musb);
-       if (ret) {
-               dev_err(&pdev->dev, "failed to register musb device\n");
+       pinfo = tusb_dev_info;
+       pinfo.parent = &pdev->dev;
+       pinfo.res = musb_resources;
+       pinfo.num_res = ARRAY_SIZE(musb_resources);
+       pinfo.data = pdata;
+       pinfo.size_data = sizeof(*pdata);
+
+       glue->musb = musb = platform_device_register_full(&pinfo);
+       if (IS_ERR(musb)) {
+               ret = PTR_ERR(musb);
+               dev_err(&pdev->dev, "failed to register musb device: %d\n", ret);
                goto err3;
        }
 
        return 0;
 
 err3:
-       platform_device_put(musb);
-
-err1:
        kfree(glue);
 
 err0:
index 80a7104d5ddb3e6d81fa125bb13d9a4d0af05dcf..acaee066b99aa10e5ecf2a8e75cffd31899b6507 100644 (file)
@@ -451,6 +451,10 @@ static void option_instat_callback(struct urb *urb);
 #define CHANGHONG_VENDOR_ID                    0x2077
 #define CHANGHONG_PRODUCT_CH690                        0x7001
 
+/* Inovia */
+#define INOVIA_VENDOR_ID                       0x20a6
+#define INOVIA_SEW858                          0x1105
+
 /* some devices interfaces need special handling due to a number of reasons */
 enum option_blacklist_reason {
                OPTION_BLACKLIST_NONE = 0,
@@ -689,6 +693,222 @@ static const struct usb_device_id option_ids[] = {
        { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
        { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
        { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x02) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x03) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x04) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x05) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x06) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x0F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x10) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x12) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x13) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x14) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x15) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x17) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x18) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x19) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x1C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x31) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x32) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x33) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x34) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x35) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x36) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x3F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x48) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x49) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x4C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x61) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x62) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x63) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x64) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x65) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x66) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x6F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x78) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x79) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x03, 0x7C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x02) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x03) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x04) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x05) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x06) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x0F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x10) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x12) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x13) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x14) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x15) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x17) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x18) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x19) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x1C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x31) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x32) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x33) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x34) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x35) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x36) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x3F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x48) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x49) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x4C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x61) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x62) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x63) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x64) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x65) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x66) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x6F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x78) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x79) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x04, 0x7C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x02) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x03) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x04) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x05) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x06) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x0F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x10) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x12) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x13) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x14) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x15) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x17) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x18) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x19) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x1C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x31) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x32) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x33) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x34) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x35) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x36) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x3F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x48) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x49) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x4C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x61) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x62) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x63) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x64) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x65) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x66) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x6F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x78) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x79) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x05, 0x7C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x01) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x02) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x03) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x04) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x05) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x06) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x0F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x10) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x12) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x13) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x14) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x15) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x17) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x18) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x19) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x1C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x31) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x32) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x33) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x34) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x35) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x36) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x3F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x48) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x49) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x4C) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x61) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x62) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x63) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x64) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x65) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x66) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6D) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6E) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x6F) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x78) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x79) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7A) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
+       { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
 
 
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
@@ -1257,7 +1477,9 @@ static const struct usb_device_id option_ids[] = {
 
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
-       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) },
+       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
+               .driver_info = (kernel_ulong_t)&net_intf6_blacklist
+       },
        { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
        { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
@@ -1345,6 +1567,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
        { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+       { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index 760b78560f67fd7e7d905d53dff2cb7a46f2919f..c9a35697ebe9a6e131d5a9c145e5f3f3415dd5bf 100644 (file)
@@ -190,6 +190,7 @@ static struct usb_device_id ti_id_table_combined[] = {
        { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
        { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
        { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
+       { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
        { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
        { }     /* terminator */
 };
index 94d75edef77fdf34e4f5e341e5952e532f03f09e..18509e6c21ab84e7d3133f7a0e428ba462d2a24b 100644 (file)
@@ -211,8 +211,11 @@ static int slave_configure(struct scsi_device *sdev)
                /*
                 * Many devices do not respond properly to READ_CAPACITY_16.
                 * Tell the SCSI layer to try READ_CAPACITY_10 first.
+                * However some USB 3.0 drive enclosures return capacity
+                * modulo 2TB. Those must use READ_CAPACITY_16
                 */
-               sdev->try_rc_10_first = 1;
+               if (!(us->fflags & US_FL_NEEDS_CAP16))
+                       sdev->try_rc_10_first = 1;
 
                /* assume SPC3 or latter devices support sense size > 18 */
                if (sdev->scsi_level > SCSI_SPC_2)
index c015f2c16729c5b4030fc829fae9a3cfb90acde6..de32cfa5bfa6ca0772d0955ea83b9b98dd698ea5 100644 (file)
@@ -1925,6 +1925,13 @@ UNUSUAL_DEV(  0x1652, 0x6600, 0x0201, 0x0201,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_RESIDUE ),
 
+/* Reported by Oliver Neukum <oneukum@suse.com> */
+UNUSUAL_DEV(  0x174c, 0x55aa, 0x0100, 0x0100,
+               "ASMedia",
+               "AS2105",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NEEDS_CAP16),
+
 /* Reported by Jesse Feddema <jdfeddema@gmail.com> */
 UNUSUAL_DEV(  0x177f, 0x0400, 0x0000, 0x0000,
                "Yarvik",
index a9807dea3887af0afe44e545784f70d46efa40dd..4fb7a8f83c8a99ff8d3412a5328b2407c98409a8 100644 (file)
@@ -545,6 +545,8 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
        long npage;
        int ret = 0, prot = 0;
        uint64_t mask;
+       struct vfio_dma *dma = NULL;
+       unsigned long pfn;
 
        end = map->iova + map->size;
 
@@ -587,8 +589,6 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
        }
 
        for (iova = map->iova; iova < end; iova += size, vaddr += size) {
-               struct vfio_dma *dma = NULL;
-               unsigned long pfn;
                long i;
 
                /* Pin a contiguous chunk of memory */
@@ -597,16 +597,15 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
                if (npage <= 0) {
                        WARN_ON(!npage);
                        ret = (int)npage;
-                       break;
+                       goto out;
                }
 
                /* Verify pages are not already mapped */
                for (i = 0; i < npage; i++) {
                        if (iommu_iova_to_phys(iommu->domain,
                                               iova + (i << PAGE_SHIFT))) {
-                               vfio_unpin_pages(pfn, npage, prot, true);
                                ret = -EBUSY;
-                               break;
+                               goto out_unpin;
                        }
                }
 
@@ -616,8 +615,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
                if (ret) {
                        if (ret != -EBUSY ||
                            map_try_harder(iommu, iova, pfn, npage, prot)) {
-                               vfio_unpin_pages(pfn, npage, prot, true);
-                               break;
+                               goto out_unpin;
                        }
                }
 
@@ -672,9 +670,8 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
                        dma = kzalloc(sizeof(*dma), GFP_KERNEL);
                        if (!dma) {
                                iommu_unmap(iommu->domain, iova, size);
-                               vfio_unpin_pages(pfn, npage, prot, true);
                                ret = -ENOMEM;
-                               break;
+                               goto out_unpin;
                        }
 
                        dma->size = size;
@@ -685,16 +682,21 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
                }
        }
 
-       if (ret) {
-               struct vfio_dma *tmp;
-               iova = map->iova;
-               size = map->size;
-               while ((tmp = vfio_find_dma(iommu, iova, size))) {
-                       int r = vfio_remove_dma_overlap(iommu, iova,
-                                                       &size, tmp);
-                       if (WARN_ON(r || !size))
-                               break;
-               }
+       WARN_ON(ret);
+       mutex_unlock(&iommu->lock);
+       return ret;
+
+out_unpin:
+       vfio_unpin_pages(pfn, npage, prot, true);
+
+out:
+       iova = map->iova;
+       size = map->size;
+       while ((dma = vfio_find_dma(iommu, iova, size))) {
+               int r = vfio_remove_dma_overlap(iommu, iova,
+                                               &size, dma);
+               if (WARN_ON(r || !size))
+                       break;
        }
 
        mutex_unlock(&iommu->lock);
index 84b685f7ab6e0a3389c534b56e8f8187619e1e84..a312f048656f76f017c2a730836762cadd1e8553 100644 (file)
@@ -19,10 +19,10 @@ source "drivers/char/agp/Kconfig"
 
 source "drivers/gpu/vga/Kconfig"
 
-source "drivers/gpu/drm/Kconfig"
-
 source "drivers/gpu/host1x/Kconfig"
 
+source "drivers/gpu/drm/Kconfig"
+
 config VGASTATE
        tristate
        default n
index 0a2cce7285be99dd8aaa7983a43bbeae7e6b2550..afe4702a5528a0ea9675cdabfce8ee4243e3799a 100644 (file)
@@ -10,6 +10,7 @@
  *
  *  ARM PrimeCell PL110 Color LCD Controller
  */
+#include <linux/dma-mapping.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
@@ -551,6 +552,10 @@ static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id)
        if (!board)
                return -EINVAL;
 
+       ret = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto out;
+
        ret = amba_request_regions(dev, NULL);
        if (ret) {
                printk(KERN_ERR "CLCD: unable to reserve regs region\n");
index 94a403a9717ae9f84c941e334098bbc0c0713bbb..5d05555fe841d2a4eaac7d230e498e0c51f5d841 100644 (file)
@@ -21,6 +21,9 @@
 #include <asm/backlight.h>
 #endif
 
+static struct list_head backlight_dev_list;
+static struct mutex backlight_dev_list_mutex;
+
 static const char *const backlight_types[] = {
        [BACKLIGHT_RAW] = "raw",
        [BACKLIGHT_PLATFORM] = "platform",
@@ -349,10 +352,32 @@ struct backlight_device *backlight_device_register(const char *name,
        mutex_unlock(&pmac_backlight_mutex);
 #endif
 
+       mutex_lock(&backlight_dev_list_mutex);
+       list_add(&new_bd->entry, &backlight_dev_list);
+       mutex_unlock(&backlight_dev_list_mutex);
+
        return new_bd;
 }
 EXPORT_SYMBOL(backlight_device_register);
 
+bool backlight_device_registered(enum backlight_type type)
+{
+       bool found = false;
+       struct backlight_device *bd;
+
+       mutex_lock(&backlight_dev_list_mutex);
+       list_for_each_entry(bd, &backlight_dev_list, entry) {
+               if (bd->props.type == type) {
+                       found = true;
+                       break;
+               }
+       }
+       mutex_unlock(&backlight_dev_list_mutex);
+
+       return found;
+}
+EXPORT_SYMBOL(backlight_device_registered);
+
 /**
  * backlight_device_unregister - unregisters a backlight device object.
  * @bd: the backlight device object to be unregistered and freed.
@@ -364,6 +389,10 @@ void backlight_device_unregister(struct backlight_device *bd)
        if (!bd)
                return;
 
+       mutex_lock(&backlight_dev_list_mutex);
+       list_del(&bd->entry);
+       mutex_unlock(&backlight_dev_list_mutex);
+
 #ifdef CONFIG_PMAC_BACKLIGHT
        mutex_lock(&pmac_backlight_mutex);
        if (pmac_backlight == bd)
@@ -499,6 +528,8 @@ static int __init backlight_class_init(void)
 
        backlight_class->dev_groups = bl_device_groups;
        backlight_class->pm = &backlight_class_dev_pm_ops;
+       INIT_LIST_HEAD(&backlight_dev_list);
+       mutex_init(&backlight_dev_list_mutex);
        return 0;
 }
 
index 1f572c00a1bec2f13e68861e6dda971e6fef5d40..c444654fc33fb6f7e858824eb8ed816186bf3bda 100644 (file)
@@ -275,9 +275,8 @@ static inline s64 towards_target(struct virtio_balloon *vb)
        __le32 v;
        s64 target;
 
-       vb->vdev->config->get(vb->vdev,
-                             offsetof(struct virtio_balloon_config, num_pages),
-                             &v, sizeof(v));
+       virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages, &v);
+
        target = le32_to_cpu(v);
        return target - vb->num_pages;
 }
@@ -286,9 +285,8 @@ static void update_balloon_size(struct virtio_balloon *vb)
 {
        __le32 actual = cpu_to_le32(vb->num_pages);
 
-       vb->vdev->config->set(vb->vdev,
-                             offsetof(struct virtio_balloon_config, actual),
-                             &actual, sizeof(actual));
+       virtio_cwrite(vb->vdev, struct virtio_balloon_config, num_pages,
+                     &actual);
 }
 
 static int balloon(void *_vballoon)
@@ -513,7 +511,7 @@ static void virtballoon_remove(struct virtio_device *vdev)
        kfree(vb);
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int virtballoon_freeze(struct virtio_device *vdev)
 {
        struct virtio_balloon *vb = vdev->priv;
@@ -556,7 +554,7 @@ static struct virtio_driver virtio_balloon_driver = {
        .probe =        virtballoon_probe,
        .remove =       virtballoon_remove,
        .config_changed = virtballoon_changed,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
        .freeze =       virtballoon_freeze,
        .restore =      virtballoon_restore,
 #endif
index 6b4a4db4404d8f0e2fff5c2225fd40ccb00bc9ec..6547d46171b3814b3a14d381b41ca60812a0a629 100644 (file)
@@ -173,6 +173,8 @@ static inline int vring_add_indirect(struct vring_virtqueue *vq,
        head = vq->free_head;
        vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
        vq->vring.desc[head].addr = virt_to_phys(desc);
+       /* kmemleak gives a false positive, as it's hidden by virt_to_phys */
+       kmemleak_ignore(desc);
        vq->vring.desc[head].len = i * sizeof(struct vring_desc);
 
        /* Update free pointer */
index c7c64f18773d87d5f23e81ec90ba7d8bfaee504a..fa932c2f7d97276b66199eda06b6edb6565208bb 100644 (file)
@@ -613,6 +613,9 @@ static int w1_bus_notify(struct notifier_block *nb, unsigned long action,
        sl = dev_to_w1_slave(dev);
        fops = sl->family->fops;
 
+       if (!fops)
+               return 0;
+
        switch (action) {
        case BUS_NOTIFY_ADD_DEVICE:
                /* if the family driver needs to initialize something... */
@@ -713,7 +716,10 @@ static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
        atomic_set(&sl->refcnt, 0);
        init_completion(&sl->released);
 
+       /* slave modules need to be loaded in a context with unlocked mutex */
+       mutex_unlock(&dev->mutex);
        request_module("w1-family-0x%0x", rn->family);
+       mutex_lock(&dev->mutex);
 
        spin_lock(&w1_flock);
        f = w1_family_registered(rn->family);
index d1d53f301de7589960ea07322fdccde47fb3f7fa..6df632e0bb555d1841bae04c4a8ee5862a50896d 100644 (file)
@@ -418,8 +418,6 @@ config BFIN_WDT
 
 # FRV Architecture
 
-# H8300 Architecture
-
 # X86 (i386 + ia64 + x86_64) Architecture
 
 config ACQUIRE_WDT
index 6c5bb274d3cd22dfa4814e0c4a341e042dcd381c..8c7b8bcbbdc5b83d6f22584a144de9a3dbc3837e 100644 (file)
@@ -66,8 +66,6 @@ obj-$(CONFIG_BFIN_WDT) += bfin_wdt.o
 
 # FRV Architecture
 
-# H8300 Architecture
-
 # X86 (i386 + ia64 + x86_64) Architecture
 obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
 obj-$(CONFIG_ADVANTECH_WDT) += advantechwdt.o
index 5be5e3d14f794a3bdc446b78b5d0bb3fcb9b7fa9..19f3c3fc65f4c9af0ffa4a25cf8410e308a5fda8 100644 (file)
@@ -802,6 +802,12 @@ static int hpwdt_init_one(struct pci_dev *dev,
                return -ENODEV;
        }
 
+       /*
+        * Ignore all auxilary iLO devices with the following PCI ID
+        */
+       if (dev->subsystem_device == 0x1979)
+               return -ENODEV;
+
        if (pci_enable_device(dev)) {
                dev_warn(&dev->dev,
                        "Not possible to enable PCI Device: 0x%x:0x%x.\n",
index 491419e0772a83f89ae34f16a96750d2349407d1..5c3d4df63e6835f534cb57a3eca724ce08c3ba47 100644 (file)
@@ -35,7 +35,7 @@
 #define KEMPLD_WDT_STAGE_TIMEOUT(x)    (0x1b + (x) * 4)
 #define KEMPLD_WDT_STAGE_CFG(x)                (0x18 + (x))
 #define STAGE_CFG_GET_PRESCALER(x)     (((x) & 0x30) >> 4)
-#define STAGE_CFG_SET_PRESCALER(x)     (((x) & 0x30) << 4)
+#define STAGE_CFG_SET_PRESCALER(x)     (((x) & 0x3) << 4)
 #define STAGE_CFG_PRESCALER_MASK       0x30
 #define STAGE_CFG_ACTION_MASK          0x7
 #define STAGE_CFG_ASSERT               (1 << 3)
index 1f94b42764aabb95ab9d2cb38cda01611123df56..f6caa77151c74a4fc827d933715c92c57da5e0b7 100644 (file)
@@ -146,7 +146,7 @@ static const struct watchdog_ops sunxi_wdt_ops = {
        .set_timeout    = sunxi_wdt_set_timeout,
 };
 
-static int __init sunxi_wdt_probe(struct platform_device *pdev)
+static int sunxi_wdt_probe(struct platform_device *pdev)
 {
        struct sunxi_wdt_dev *sunxi_wdt;
        struct resource *res;
@@ -187,7 +187,7 @@ static int __init sunxi_wdt_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int __exit sunxi_wdt_remove(struct platform_device *pdev)
+static int sunxi_wdt_remove(struct platform_device *pdev)
 {
        struct sunxi_wdt_dev *sunxi_wdt = platform_get_drvdata(pdev);
 
index 42913f131dc2b051cdb89db52e72726bb228dd61..c9b0c627fe7e6d8513dd4d43ba18cb65cfbba132 100644 (file)
@@ -310,7 +310,8 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd,
 
        case WDIOC_GETSTATUS:
        case WDIOC_GETBOOTSTATUS:
-               return put_user(0, p);
+               error = put_user(0, p);
+               break;
 
        case WDIOC_KEEPALIVE:
                ts72xx_wdt_kick(wdt);
index a9ea73d6dcf311b7703af1225fb71698da2a4fc3..a69260f27555df85894618dc1647422971914dc4 100644 (file)
@@ -90,7 +90,7 @@ void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
 
        v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
                                                &v9fs_cache_session_index_def,
-                                               v9ses);
+                                               v9ses, true);
        p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n",
                 v9ses, v9ses->fscache);
 }
@@ -204,7 +204,7 @@ void v9fs_cache_inode_get_cookie(struct inode *inode)
        v9ses = v9fs_inode2v9ses(inode);
        v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
                                                  &v9fs_cache_inode_index_def,
-                                                 v9inode);
+                                                 v9inode, true);
 
        p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
                 inode, v9inode->fscache);
@@ -239,13 +239,12 @@ void v9fs_cache_inode_flush_cookie(struct inode *inode)
 void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
 {
        struct v9fs_inode *v9inode = V9FS_I(inode);
-       struct p9_fid *fid;
 
        if (!v9inode->fscache)
                return;
 
        spin_lock(&v9inode->fscache_lock);
-       fid = filp->private_data;
+
        if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
                v9fs_cache_inode_flush_cookie(inode);
        else
@@ -271,7 +270,7 @@ void v9fs_cache_inode_reset_cookie(struct inode *inode)
        v9ses = v9fs_inode2v9ses(inode);
        v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
                                                  &v9fs_cache_inode_index_def,
-                                                 v9inode);
+                                                 v9inode, true);
        p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n",
                 inode, old, v9inode->fscache);
 
index 40cc54ced5d97621b81bc96ab8b90f3644f72a4e..2f96754910950b59b26ac4a2046808cc7bf043c1 100644 (file)
@@ -101,6 +101,18 @@ static inline void v9fs_fscache_wait_on_page_write(struct inode *inode,
 
 #else /* CONFIG_9P_FSCACHE */
 
+static inline void v9fs_cache_inode_get_cookie(struct inode *inode)
+{
+}
+
+static inline void v9fs_cache_inode_put_cookie(struct inode *inode)
+{
+}
+
+static inline void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *file)
+{
+}
+
 static inline int v9fs_fscache_release_page(struct page *page,
                                            gfp_t gfp) {
        return 1;
index 9ff073f4090afee750e4058129427d4bd6f6117f..da0821bc05b9d2f56fb56e63c99a1b58ae57895d 100644 (file)
@@ -241,9 +241,8 @@ static int v9fs_launder_page(struct page *page)
  * v9fs_direct_IO - 9P address space operation for direct I/O
  * @rw: direction (read or write)
  * @iocb: target I/O control block
- * @iov: array of vectors that define I/O buffer
+ * @iter: array of vectors that define I/O buffer
  * @pos: offset in file to begin the operation
- * @nr_segs: size of iovec array
  *
  * The presence of v9fs_direct_IO() in the address space ops vector
  * allowes open() O_DIRECT flags which would have failed otherwise.
@@ -252,13 +251,12 @@ static int v9fs_launder_page(struct page *page)
  * the VFS gets them, so this method should never be called.
  *
  * Direct IO is not 'yet' supported in the cached mode. Hence when
- * this routine is called through generic_file_aio_read(), the read/write fails
- * with an error.
+ * this routine is called through generic_file_read_iter(), the read/write
+ * fails with an error.
  *
  */
 static ssize_t
-v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-              loff_t pos, unsigned long nr_segs)
+v9fs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
        /*
         * FIXME
@@ -267,7 +265,7 @@ v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
         */
        p9_debug(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) off/no(%lld/%lu) EINVAL\n",
                 iocb->ki_filp->f_path.dentry->d_name.name,
-                (long long)pos, nr_segs);
+                (long long)pos, iter->nr_segs);
 
        return -EINVAL;
 }
index aa5ecf479a57bf1614100011de047d8c184c620e..ec99a96f86eb70b093ae4fc1aed4a314b00eedce 100644 (file)
@@ -105,10 +105,8 @@ int v9fs_file_open(struct inode *inode, struct file *file)
                v9inode->writeback_fid = (void *) fid;
        }
        mutex_unlock(&v9inode->v_mutex);
-#ifdef CONFIG_9P_FSCACHE
        if (v9ses->cache)
                v9fs_cache_inode_set_cookie(inode, file);
-#endif
        return 0;
 out_error:
        p9_client_clunk(file->private_data);
@@ -463,14 +461,12 @@ v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid,
        int n;
        loff_t i_size;
        size_t total = 0;
-       struct p9_client *clnt;
        loff_t origin = *offset;
        unsigned long pg_start, pg_end;
 
        p9_debug(P9_DEBUG_VFS, "data %p count %d offset %x\n",
                 data, (int)count, (int)*offset);
 
-       clnt = fid->clnt;
        do {
                n = p9_client_write(fid, NULL, data+total, origin+total, count);
                if (n <= 0)
@@ -743,8 +739,8 @@ const struct file_operations v9fs_cached_file_operations = {
        .llseek = generic_file_llseek,
        .read = v9fs_cached_file_read,
        .write = v9fs_cached_file_write,
-       .aio_read = generic_file_aio_read,
-       .aio_write = generic_file_aio_write,
+       .read_iter = generic_file_read_iter,
+       .write_iter = generic_file_write_iter,
        .open = v9fs_file_open,
        .release = v9fs_dir_release,
        .lock = v9fs_file_lock,
@@ -756,8 +752,8 @@ const struct file_operations v9fs_cached_file_operations_dotl = {
        .llseek = generic_file_llseek,
        .read = v9fs_cached_file_read,
        .write = v9fs_cached_file_write,
-       .aio_read = generic_file_aio_read,
-       .aio_write = generic_file_aio_write,
+       .read_iter = generic_file_read_iter,
+       .write_iter = generic_file_write_iter,
        .open = v9fs_file_open,
        .release = v9fs_dir_release,
        .lock = v9fs_file_lock_dotl,
index 94de6d1482e2e076c4b3d10451c39fc245cce6ef..af7d531bdecdedcc4e7cef7a99dc36347b1c1163 100644 (file)
@@ -448,9 +448,7 @@ void v9fs_evict_inode(struct inode *inode)
        clear_inode(inode);
        filemap_fdatawrite(inode->i_mapping);
 
-#ifdef CONFIG_9P_FSCACHE
        v9fs_cache_inode_put_cookie(inode);
-#endif
        /* clunk the fid stashed in writeback_fid */
        if (v9inode->writeback_fid) {
                p9_client_clunk(v9inode->writeback_fid);
@@ -531,9 +529,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
                goto error;
 
        v9fs_stat2inode(st, inode, sb);
-#ifdef CONFIG_9P_FSCACHE
        v9fs_cache_inode_get_cookie(inode);
-#endif
        unlock_new_inode(inode);
        return inode;
 error:
@@ -783,7 +779,6 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
                                      unsigned int flags)
 {
        struct dentry *res;
-       struct super_block *sb;
        struct v9fs_session_info *v9ses;
        struct p9_fid *dfid, *fid;
        struct inode *inode;
@@ -795,7 +790,6 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
        if (dentry->d_name.len > NAME_MAX)
                return ERR_PTR(-ENAMETOOLONG);
 
-       sb = dir->i_sb;
        v9ses = v9fs_inode2v9ses(dir);
        /* We can walk d_parent because we hold the dir->i_mutex */
        dfid = v9fs_fid_lookup(dentry->d_parent);
@@ -867,7 +861,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
                return finish_no_open(file, res);
 
        err = 0;
-       fid = NULL;
+
        v9ses = v9fs_inode2v9ses(dir);
        perm = unixmode2p9mode(v9ses, mode);
        fid = v9fs_create(v9ses, dir, dentry, NULL, perm,
@@ -905,10 +899,8 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
                goto error;
 
        file->private_data = fid;
-#ifdef CONFIG_9P_FSCACHE
        if (v9ses->cache)
                v9fs_cache_inode_set_cookie(dentry->d_inode, file);
-#endif
 
        *opened |= FILE_CREATED;
 out:
index a7c481402c4654416106d22d1ed7d85f65f6f8e1..ecacec098fbb44784ee83d5064de14a33a44e6f6 100644 (file)
@@ -141,9 +141,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
                goto error;
 
        v9fs_stat2inode_dotl(st, inode);
-#ifdef CONFIG_9P_FSCACHE
        v9fs_cache_inode_get_cookie(inode);
-#endif
        retval = v9fs_get_acl(inode, fid);
        if (retval)
                goto error;
@@ -355,10 +353,8 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
        if (err)
                goto err_clunk_old_fid;
        file->private_data = ofid;
-#ifdef CONFIG_9P_FSCACHE
        if (v9ses->cache)
                v9fs_cache_inode_set_cookie(inode, file);
-#endif
        *opened |= FILE_CREATED;
 out:
        v9fs_put_acl(dacl, pacl);
@@ -477,13 +473,11 @@ static int
 v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
                 struct kstat *stat)
 {
-       int err;
        struct v9fs_session_info *v9ses;
        struct p9_fid *fid;
        struct p9_stat_dotl *st;
 
        p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry);
-       err = -EPERM;
        v9ses = v9fs_dentry2v9ses(dentry);
        if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
                generic_fillattr(dentry->d_inode, stat);
@@ -560,7 +554,6 @@ static int v9fs_mapped_iattr_valid(int iattr_valid)
 int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
 {
        int retval;
-       struct v9fs_session_info *v9ses;
        struct p9_fid *fid;
        struct p9_iattr_dotl p9attr;
        struct inode *inode = dentry->d_inode;
@@ -581,8 +574,6 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
        p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
        p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
 
-       retval = -EPERM;
-       v9ses = v9fs_dentry2v9ses(dentry);
        fid = v9fs_fid_lookup(dentry);
        if (IS_ERR(fid))
                return PTR_ERR(fid);
index 4fe6df3ec28fe5392b680e853b0fbfef6313179a..1afa0e020082f8279ce3f8708131c4f74ba49941 100644 (file)
@@ -11,7 +11,7 @@ obj-y :=      open.o read_write.o file_table.o super.o \
                attr.o bad_inode.o file.o filesystems.o namespace.o \
                seq_file.o xattr.o libfs.o fs-writeback.o \
                pnode.o splice.o sync.o utimes.o \
-               stack.o fs_struct.o statfs.o
+               stack.o fs_struct.o statfs.o iov-iter.o
 
 ifeq ($(CONFIG_BLOCK),y)
 obj-y +=       buffer.o bio.o block_dev.o direct-io.o mpage.o ioprio.o
index a36da5382b40dc9c09ab6aa59762d9c1a7808b72..da1e02161ac3386da0489d32834ae932c46bac72 100644 (file)
 const struct file_operations adfs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .mmap           = generic_file_mmap,
        .fsync          = generic_file_fsync,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .splice_read    = generic_file_splice_read,
 };
 
index 8669b6ecddee4cc030e21f39358ce2cf5058bddb..664f743c2d8d089cfdbc3a84ac5916f0867650fb 100644 (file)
@@ -28,9 +28,9 @@ static int affs_file_release(struct inode *inode, struct file *filp);
 const struct file_operations affs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .open           = affs_file_open,
        .release        = affs_file_release,
index 3c090b7555ea8f3a095e0504a69cbd696db13ab5..ca0a3cf93791879578121b842891e97e5b0ffb1e 100644 (file)
@@ -179,7 +179,7 @@ struct afs_cell *afs_cell_create(const char *name, unsigned namesz,
        /* put it up for caching (this never returns an error) */
        cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index,
                                             &afs_cell_cache_index_def,
-                                            cell);
+                                            cell, true);
 #endif
 
        /* add to the cell lists */
index 66d50fe2ee459a887511381e8e375db72d2bf1f3..3b71622e40f44a7f8d98e4794a688dd224713029 100644 (file)
@@ -33,8 +33,8 @@ const struct file_operations afs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = afs_file_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = afs_file_write,
        .mmap           = generic_file_readonly_mmap,
        .splice_read    = generic_file_splice_read,
        .fsync          = afs_fsync,
index 789bc253b5f63c2d3c9abfdd216e0778b140a765..ce25d755b7aa16d68b131dd4944898e560e0d8e3 100644 (file)
@@ -259,7 +259,7 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
 #ifdef CONFIG_AFS_FSCACHE
        vnode->cache = fscache_acquire_cookie(vnode->volume->cache,
                                              &afs_vnode_cache_index_def,
-                                             vnode);
+                                             vnode, true);
 #endif
 
        ret = afs_inode_map_status(vnode, key);
index a306bb6d88d9937badc2a1df1462d3bb0f469829..9c048ffac900f0fc89ecdb045449ef38d907c875 100644 (file)
@@ -747,8 +747,7 @@ extern int afs_write_end(struct file *file, struct address_space *mapping,
 extern int afs_writepage(struct page *, struct writeback_control *);
 extern int afs_writepages(struct address_space *, struct writeback_control *);
 extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
-extern ssize_t afs_file_write(struct kiocb *, const struct iovec *,
-                             unsigned long, loff_t);
+extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *, loff_t);
 extern int afs_writeback_all(struct afs_vnode *);
 extern int afs_fsync(struct file *, loff_t, loff_t, int);
 
index 57bcb1596530892e91ebf39fd12abfee0fe2dead..b6df2e83809f4ac81ed5b3d02f944ac7f32d9de8 100644 (file)
@@ -308,7 +308,8 @@ static int afs_vlocation_fill_in_record(struct afs_vlocation *vl,
        /* see if we have an in-cache copy (will set vl->valid if there is) */
 #ifdef CONFIG_AFS_FSCACHE
        vl->cache = fscache_acquire_cookie(vl->cell->cache,
-                                          &afs_vlocation_cache_index_def, vl);
+                                          &afs_vlocation_cache_index_def, vl,
+                                          true);
 #endif
 
        if (vl->valid) {
index 401eeb21869ff0657b966f4753fd951bc7897333..2b607257820c8ed7b383e486f3a7870052974ae4 100644 (file)
@@ -131,7 +131,7 @@ struct afs_volume *afs_volume_lookup(struct afs_mount_params *params)
 #ifdef CONFIG_AFS_FSCACHE
        volume->cache = fscache_acquire_cookie(vlocation->cache,
                                               &afs_volume_cache_index_def,
-                                              volume);
+                                              volume, true);
 #endif
        afs_get_vlocation(vlocation);
        volume->vlocation = vlocation;
index a890db4b9898fc1d888c5e7285da55db85e4da54..9fa2f596430accaecd06970f83209dba6e80425e 100644 (file)
@@ -625,15 +625,14 @@ void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
 /*
  * write to an AFS file
  */
-ssize_t afs_file_write(struct kiocb *iocb, const struct iovec *iov,
-                      unsigned long nr_segs, loff_t pos)
+ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
        struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
        ssize_t result;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(iter);
 
        _enter("{%x.%u},{%zu},%lu,",
-              vnode->fid.vid, vnode->fid.vnode, count, nr_segs);
+              vnode->fid.vid, vnode->fid.vnode, count, iter->nr_segs);
 
        if (IS_SWAPFILE(&vnode->vfs_inode)) {
                printk(KERN_INFO
@@ -644,7 +643,7 @@ ssize_t afs_file_write(struct kiocb *iocb, const struct iovec *iov,
        if (!count)
                return 0;
 
-       result = generic_file_aio_write(iocb, iov, nr_segs, pos);
+       result = generic_file_write_iter(iocb, iter, pos);
        if (IS_ERR_VALUE(result)) {
                _leave(" = %zd", result);
                return result;
index 067e3d340c353e614787fe5dbdb205dedb7b9ec4..a5630703eb565f4c0d2062bbe7b3ab1617dc2213 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -877,6 +877,10 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
                iocb->ki_ctx = ERR_PTR(-EXDEV);
                wake_up_process(iocb->ki_obj.tsk);
                return;
+       } else if (is_kernel_kiocb(iocb)) {
+               iocb->ki_obj.complete(iocb->ki_user_data, res);
+               aio_kernel_free(iocb);
+               return;
        }
 
        /*
@@ -1195,13 +1199,55 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb,
        return 0;
 }
 
+static ssize_t aio_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+       struct file *file = iocb->ki_filp;
+       ssize_t ret;
+
+       if (unlikely(!is_kernel_kiocb(iocb)))
+               return -EINVAL;
+
+       if (unlikely(!(file->f_mode & FMODE_READ)))
+               return -EBADF;
+
+       ret = security_file_permission(file, MAY_READ);
+       if (unlikely(ret))
+               return ret;
+
+       if (!file->f_op->read_iter)
+               return -EINVAL;
+
+       return file->f_op->read_iter(iocb, iter, iocb->ki_pos);
+}
+
+static ssize_t aio_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+       struct file *file = iocb->ki_filp;
+       ssize_t ret;
+
+       if (unlikely(!is_kernel_kiocb(iocb)))
+               return -EINVAL;
+
+       if (unlikely(!(file->f_mode & FMODE_WRITE)))
+               return -EBADF;
+
+       ret = security_file_permission(file, MAY_WRITE);
+       if (unlikely(ret))
+               return ret;
+
+       if (!file->f_op->write_iter)
+               return -EINVAL;
+
+       return file->f_op->write_iter(iocb, iter, iocb->ki_pos);
+}
+
 /*
  * aio_setup_iocb:
  *     Performs the initial checks and aio retry method
  *     setup for the kiocb at the time of io submission.
  */
 static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
-                           char __user *buf, bool compat)
+                           void *buf, bool compat)
 {
        struct file *file = req->ki_filp;
        ssize_t ret;
@@ -1216,14 +1262,14 @@ static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
        case IOCB_CMD_PREADV:
                mode    = FMODE_READ;
                rw      = READ;
-               rw_op   = file->f_op->aio_read;
+               rw_op   = do_aio_read;
                goto rw_common;
 
        case IOCB_CMD_PWRITE:
        case IOCB_CMD_PWRITEV:
                mode    = FMODE_WRITE;
                rw      = WRITE;
-               rw_op   = file->f_op->aio_write;
+               rw_op   = do_aio_write;
                goto rw_common;
 rw_common:
                if (unlikely(!(file->f_mode & mode)))
@@ -1266,6 +1312,14 @@ rw_common:
                        file_end_write(file);
                break;
 
+       case IOCB_CMD_READ_ITER:
+               ret = aio_read_iter(req, buf);
+               break;
+
+       case IOCB_CMD_WRITE_ITER:
+               ret = aio_write_iter(req, buf);
+               break;
+
        case IOCB_CMD_FDSYNC:
                if (!file->f_op->aio_fsync)
                        return -EINVAL;
@@ -1303,6 +1357,80 @@ rw_common:
        return 0;
 }
 
+/*
+ * This allocates an iocb that will be used to submit and track completion of
+ * an IO that is issued from kernel space.
+ *
+ * The caller is expected to call the appropriate aio_kernel_init_() functions
+ * and then call aio_kernel_submit().  From that point forward progress is
+ * guaranteed by the file system aio method.  Eventually the caller's
+ * completion callback will be called.
+ *
+ * These iocbs are special.  They don't have a context, we don't limit the
+ * number pending, and they can't be canceled.
+ */
+struct kiocb *aio_kernel_alloc(gfp_t gfp)
+{
+       return kzalloc(sizeof(struct kiocb), gfp);
+}
+EXPORT_SYMBOL_GPL(aio_kernel_alloc);
+
+void aio_kernel_free(struct kiocb *iocb)
+{
+       kfree(iocb);
+}
+EXPORT_SYMBOL_GPL(aio_kernel_free);
+
+/*
+ * ptr and count can be a buff and bytes or an iov and segs.
+ */
+void aio_kernel_init_rw(struct kiocb *iocb, struct file *filp,
+                       size_t nr, loff_t off)
+{
+       iocb->ki_filp = filp;
+       iocb->ki_nbytes = nr;
+       iocb->ki_pos = off;
+       iocb->ki_ctx = (void *)-1;
+}
+EXPORT_SYMBOL_GPL(aio_kernel_init_rw);
+
+void aio_kernel_init_callback(struct kiocb *iocb,
+                             void (*complete)(u64 user_data, long res),
+                             u64 user_data)
+{
+       iocb->ki_obj.complete = complete;
+       iocb->ki_user_data = user_data;
+}
+EXPORT_SYMBOL_GPL(aio_kernel_init_callback);
+
+/*
+ * The iocb is our responsibility once this is called.  The caller must not
+ * reference it.
+ *
+ * Callers must be prepared for their iocb completion callback to be called the
+ * moment they enter this function.  The completion callback may be called from
+ * any context.
+ *
+ * Returns: 0: the iocb completion callback will be called with the op result
+ * negative errno: the operation was not submitted and the iocb was freed
+ */
+int aio_kernel_submit(struct kiocb *iocb, unsigned op, void *ptr)
+{
+       int ret;
+
+       BUG_ON(!is_kernel_kiocb(iocb));
+       BUG_ON(!iocb->ki_obj.complete);
+       BUG_ON(!iocb->ki_filp);
+
+       ret = aio_run_iocb(iocb, op, ptr, 0);
+
+       if (ret)
+               aio_kernel_free(iocb);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(aio_kernel_submit);
+
 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
                         struct iocb *iocb, bool compat)
 {
@@ -1362,7 +1490,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
        req->ki_nbytes = iocb->aio_nbytes;
 
        ret = aio_run_iocb(req, iocb->aio_lio_opcode,
-                          (char __user *)(unsigned long)iocb->aio_buf,
+                          (void *)(unsigned long)iocb->aio_buf,
                           compat);
        if (ret)
                goto out_put_req;
index 7c93953030fbe5eda13d76b6a8c53d6f2a31902d..38651e5da183f80b60763c7423a1171e1dfc9d6f 100644 (file)
@@ -39,12 +39,24 @@ static ssize_t bad_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
        return -EIO;
 }
 
+static ssize_t bad_file_read_iter(struct kiocb *iocb, struct iov_iter *iter,
+                       loff_t pos)
+{
+       return -EIO;
+}
+
 static ssize_t bad_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                        unsigned long nr_segs, loff_t pos)
 {
        return -EIO;
 }
 
+static ssize_t bad_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                       loff_t pos)
+{
+       return -EIO;
+}
+
 static int bad_file_readdir(struct file *file, struct dir_context *ctx)
 {
        return -EIO;
@@ -151,7 +163,9 @@ static const struct file_operations bad_file_ops =
        .read           = bad_file_read,
        .write          = bad_file_write,
        .aio_read       = bad_file_aio_read,
+       .read_iter      = bad_file_read_iter,
        .aio_write      = bad_file_aio_write,
+       .write_iter     = bad_file_write_iter,
        .iterate        = bad_file_readdir,
        .poll           = bad_file_poll,
        .unlocked_ioctl = bad_file_unlocked_ioctl,
index e9c75e20db32d43b550506f5f8f62b2656f760e7..daa15d6ba45077755d2bc859cfffb44f6a82bfd0 100644 (file)
@@ -42,7 +42,7 @@ static void befs_destroy_inode(struct inode *inode);
 static int befs_init_inodecache(void);
 static void befs_destroy_inodecache(void);
 static void *befs_follow_link(struct dentry *, struct nameidata *);
-static void befs_put_link(struct dentry *, struct nameidata *, void *);
+static void *befs_fast_follow_link(struct dentry *, struct nameidata *);
 static int befs_utf2nls(struct super_block *sb, const char *in, int in_len,
                        char **out, int *out_len);
 static int befs_nls2utf(struct super_block *sb, const char *in, int in_len,
@@ -79,10 +79,15 @@ static const struct address_space_operations befs_aops = {
        .bmap           = befs_bmap,
 };
 
+static const struct inode_operations befs_fast_symlink_inode_operations = {
+       .readlink       = generic_readlink,
+       .follow_link    = befs_fast_follow_link,
+};
+
 static const struct inode_operations befs_symlink_inode_operations = {
        .readlink       = generic_readlink,
        .follow_link    = befs_follow_link,
-       .put_link       = befs_put_link,
+       .put_link       = kfree_put_link,
 };
 
 /* 
@@ -411,7 +416,10 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
                inode->i_op = &befs_dir_inode_operations;
                inode->i_fop = &befs_dir_operations;
        } else if (S_ISLNK(inode->i_mode)) {
-               inode->i_op = &befs_symlink_inode_operations;
+               if (befs_ino->i_flags & BEFS_LONG_SYMLINK)
+                       inode->i_op = &befs_symlink_inode_operations;
+               else
+                       inode->i_op = &befs_fast_symlink_inode_operations;
        } else {
                befs_error(sb, "Inode %lu is not a regular file, "
                           "directory or symlink. THAT IS WRONG! BeFS has no "
@@ -477,47 +485,40 @@ befs_destroy_inodecache(void)
 static void *
 befs_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
+       struct super_block *sb = dentry->d_sb;
        befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
+       befs_data_stream *data = &befs_ino->i_data.ds;
+       befs_off_t len = data->size;
        char *link;
 
-       if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
-               struct super_block *sb = dentry->d_sb;
-               befs_data_stream *data = &befs_ino->i_data.ds;
-               befs_off_t len = data->size;
+       if (len == 0) {
+               befs_error(sb, "Long symlink with illegal length");
+               link = ERR_PTR(-EIO);
+       } else {
+               befs_debug(sb, "Follow long symlink");
 
-               if (len == 0) {
-                       befs_error(sb, "Long symlink with illegal length");
+               link = kmalloc(len, GFP_NOFS);
+               if (!link) {
+                       link = ERR_PTR(-ENOMEM);
+               } else if (befs_read_lsymlink(sb, data, link, len) != len) {
+                       kfree(link);
+                       befs_error(sb, "Failed to read entire long symlink");
                        link = ERR_PTR(-EIO);
                } else {
-                       befs_debug(sb, "Follow long symlink");
-
-                       link = kmalloc(len, GFP_NOFS);
-                       if (!link) {
-                               link = ERR_PTR(-ENOMEM);
-                       } else if (befs_read_lsymlink(sb, data, link, len) != len) {
-                               kfree(link);
-                               befs_error(sb, "Failed to read entire long symlink");
-                               link = ERR_PTR(-EIO);
-                       } else {
-                               link[len - 1] = '\0';
-                       }
+                       link[len - 1] = '\0';
                }
-       } else {
-               link = befs_ino->i_data.symlink;
        }
-
        nd_set_link(nd, link);
        return NULL;
 }
 
-static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+
+static void *
+befs_fast_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
        befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
-       if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
-               char *link = nd_get_link(nd);
-               if (!IS_ERR(link))
-                       kfree(link);
-       }
+       nd_set_link(nd, befs_ino->i_data.symlink);
+       return NULL;
 }
 
 /*
index ae28922183357d4c0e4d491a452919e125ba8bc3..d150660d598bf8878a1513427946958a44930497 100644 (file)
@@ -24,9 +24,9 @@
 const struct file_operations bfs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .splice_read    = generic_file_splice_read,
 };
index 1e86823a9cbda37f8451269d91a50f90d00c9566..34d9da0e6b74030d6f708f80d35f1971f03e1ca6 100644 (file)
@@ -165,14 +165,14 @@ blkdev_get_block(struct inode *inode, sector_t iblock,
 }
 
 static ssize_t
-blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-                       loff_t offset, unsigned long nr_segs)
+blkdev_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
+                       loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
 
-       return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iov, offset,
-                                   nr_segs, blkdev_get_block, NULL, NULL, 0);
+       return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iter,
+                                   offset, blkdev_get_block, NULL, NULL, 0);
 }
 
 int __sync_blockdev(struct block_device *bdev, int wait)
@@ -1508,8 +1508,7 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
  * Does not take i_mutex for the write and thus is not for general purpose
  * use.
  */
-ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                        unsigned long nr_segs, loff_t pos)
+ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
        struct file *file = iocb->ki_filp;
        struct blk_plug plug;
@@ -1518,7 +1517,7 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
        BUG_ON(iocb->ki_pos != pos);
 
        blk_start_plug(&plug);
-       ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+       ret = __generic_file_write_iter(iocb, iter, &iocb->ki_pos);
        if (ret > 0) {
                ssize_t err;
 
@@ -1529,10 +1528,10 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
        blk_finish_plug(&plug);
        return ret;
 }
-EXPORT_SYMBOL_GPL(blkdev_aio_write);
+EXPORT_SYMBOL_GPL(blkdev_write_iter);
 
-static ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
-                        unsigned long nr_segs, loff_t pos)
+static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *iter,
+                        loff_t pos)
 {
        struct file *file = iocb->ki_filp;
        struct inode *bd_inode = file->f_mapping->host;
@@ -1543,8 +1542,8 @@ static ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
 
        size -= pos;
        if (size < iocb->ki_nbytes)
-               nr_segs = iov_shorten((struct iovec *)iov, nr_segs, size);
-       return generic_file_aio_read(iocb, iov, nr_segs, pos);
+               iov_iter_shorten(iter, size);
+       return generic_file_read_iter(iocb, iter, pos);
 }
 
 /*
@@ -1578,8 +1577,8 @@ const struct file_operations def_blk_fops = {
        .llseek         = block_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = blkdev_aio_read,
-       .aio_write      = blkdev_aio_write,
+       .read_iter      = blkdev_read_iter,
+       .write_iter     = blkdev_write_iter,
        .mmap           = generic_file_mmap,
        .fsync          = blkdev_fsync,
        .unlocked_ioctl = block_ioctl,
index 0506f40ede8331f8ab7b20a4d2778e49c886fe91..a80a2ccb955c97fd6b1a26d37ce4ff233c0e0c73 100644 (file)
@@ -3105,11 +3105,6 @@ static inline u32 btrfs_level_size(struct btrfs_root *root, int level)
        ((unsigned long)(btrfs_leaf_data(leaf) + \
        btrfs_item_offset_nr(leaf, slot)))
 
-static inline struct dentry *fdentry(struct file *file)
-{
-       return file->f_path.dentry;
-}
-
 static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info)
 {
        return ((space_info->flags & BTRFS_BLOCK_GROUP_METADATA) &&
index 4ae17ed13b3274f228c8879383ec5124ca5f05dd..62176ad89846173e4d4987da257166fb90b971ff 100644 (file)
@@ -1561,8 +1561,9 @@ int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
        return ret;
 }
 
-struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
-                                             struct btrfs_key *location)
+struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
+                                    struct btrfs_key *location,
+                                    bool check_ref)
 {
        struct btrfs_root *root;
        int ret;
@@ -1586,7 +1587,7 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
 again:
        root = btrfs_lookup_fs_root(fs_info, location->objectid);
        if (root) {
-               if (btrfs_root_refs(&root->root_item) == 0)
+               if (check_ref && btrfs_root_refs(&root->root_item) == 0)
                        return ERR_PTR(-ENOENT);
                return root;
        }
@@ -1595,7 +1596,7 @@ again:
        if (IS_ERR(root))
                return root;
 
-       if (btrfs_root_refs(&root->root_item) == 0) {
+       if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
                ret = -ENOENT;
                goto fail;
        }
index b71acd6e1e5b1941e75ed4c5c056d47268cdc407..5ce2a7da8b113fef13456687fdf4243fa9f1c2ba 100644 (file)
@@ -68,8 +68,17 @@ struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
 int btrfs_init_fs_root(struct btrfs_root *root);
 int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
                         struct btrfs_root *root);
-struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
-                                             struct btrfs_key *location);
+
+struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
+                                    struct btrfs_key *key,
+                                    bool check_ref);
+static inline struct btrfs_root *
+btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
+                          struct btrfs_key *location)
+{
+       return btrfs_get_fs_root(fs_info, location, true);
+}
+
 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
 void btrfs_btree_balance_dirty(struct btrfs_root *root);
 void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root);
index 22bda32acb893a09aa0a0472d8ab78c20c12fbdb..51731b76900de55e8350d5795feacc61a9050d02 100644 (file)
@@ -1490,10 +1490,8 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
                cur_start = state->end + 1;
                node = rb_next(node);
                total_bytes += state->end - state->start + 1;
-               if (total_bytes >= max_bytes) {
-                       *end = *start + max_bytes - 1;
+               if (total_bytes >= max_bytes)
                        break;
-               }
                if (!node)
                        break;
        }
@@ -1635,10 +1633,9 @@ again:
 
        /*
         * make sure to limit the number of pages we try to lock down
-        * if we're looping.
         */
-       if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
-               delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
+       if (delalloc_end + 1 - delalloc_start > max_bytes)
+               delalloc_end = delalloc_start + max_bytes - 1;
 
        /* step two, lock all the pages after the page that has start */
        ret = lock_delalloc_pages(inode, locked_page,
@@ -1649,8 +1646,7 @@ again:
                 */
                free_extent_state(cached_state);
                if (!loops) {
-                       unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
-                       max_bytes = PAGE_CACHE_SIZE - offset;
+                       max_bytes = PAGE_CACHE_SIZE;
                        loops = 1;
                        goto again;
                } else {
index 72da4df53c9a224d7a5106a907fc4ba4e08f5ebb..5e70fc2cef27f886a8dbe65dedc13a5cba0cf094 100644 (file)
@@ -453,7 +453,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
                write_bytes -= copied;
                total_copied += copied;
 
-               /* Return to btrfs_file_aio_write to fault page */
+               /* Return to btrfs_file_write_iter to fault page */
                if (unlikely(copied == 0))
                        break;
 
@@ -1557,27 +1557,23 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
 }
 
 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
-                                   const struct iovec *iov,
-                                   unsigned long nr_segs, loff_t pos,
-                                   loff_t *ppos, size_t count, size_t ocount)
+                                    struct iov_iter *iter, loff_t pos,
+                                   loff_t *ppos, size_t count)
 {
        struct file *file = iocb->ki_filp;
-       struct iov_iter i;
        ssize_t written;
        ssize_t written_buffered;
        loff_t endbyte;
        int err;
 
-       written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
-                                           count, ocount);
+       written = generic_file_direct_write_iter(iocb, iter, pos, ppos, count);
 
        if (written < 0 || written == count)
                return written;
 
        pos += written;
        count -= written;
-       iov_iter_init(&i, iov, nr_segs, count, written);
-       written_buffered = __btrfs_buffered_write(file, &i, pos);
+       written_buffered = __btrfs_buffered_write(file, iter, pos);
        if (written_buffered < 0) {
                err = written_buffered;
                goto out;
@@ -1612,9 +1608,8 @@ static void update_time_for_write(struct inode *inode)
                inode_inc_iversion(inode);
 }
 
-static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
-                                   const struct iovec *iov,
-                                   unsigned long nr_segs, loff_t pos)
+static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
+                                    struct iov_iter *iter, loff_t pos)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
@@ -1623,17 +1618,12 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
        u64 start_pos;
        ssize_t num_written = 0;
        ssize_t err = 0;
-       size_t count, ocount;
+       size_t count;
        bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
 
        mutex_lock(&inode->i_mutex);
 
-       err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
-       if (err) {
-               mutex_unlock(&inode->i_mutex);
-               goto out;
-       }
-       count = ocount;
+       count = iov_iter_count(iter);
 
        current->backing_dev_info = inode->i_mapping->backing_dev_info;
        err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
@@ -1686,14 +1676,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
                atomic_inc(&BTRFS_I(inode)->sync_writers);
 
        if (unlikely(file->f_flags & O_DIRECT)) {
-               num_written = __btrfs_direct_write(iocb, iov, nr_segs,
-                                                  pos, ppos, count, ocount);
+               num_written = __btrfs_direct_write(iocb, iter, pos, ppos,
+                                                  count);
        } else {
-               struct iov_iter i;
-
-               iov_iter_init(&i, iov, nr_segs, count, num_written);
-
-               num_written = __btrfs_buffered_write(file, &i, pos);
+               num_written = __btrfs_buffered_write(file, iter, pos);
                if (num_written > 0)
                        *ppos = pos + num_written;
        }
@@ -2552,9 +2538,9 @@ const struct file_operations btrfs_file_operations = {
        .llseek         = btrfs_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
        .splice_read    = generic_file_splice_read,
-       .aio_write      = btrfs_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = btrfs_file_write_iter,
        .mmap           = btrfs_file_mmap,
        .open           = generic_file_open,
        .release        = btrfs_release_file,
index 22ebc13b6c992a0755513253e17f9dd7f20b9706..e900216d89d0ab478e4f53f45b32c5c142018fd9 100644 (file)
@@ -6437,6 +6437,7 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
 
        if (btrfs_extent_readonly(root, disk_bytenr))
                goto out;
+       btrfs_release_path(path);
 
        /*
         * look for other files referencing this extent, if we
@@ -7154,8 +7155,7 @@ free_ordered:
 }
 
 static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
-                       const struct iovec *iov, loff_t offset,
-                       unsigned long nr_segs)
+                       struct iov_iter *iter, loff_t offset)
 {
        int seg;
        int i;
@@ -7169,35 +7169,50 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io
                goto out;
 
        /* Check the memory alignment.  Blocks cannot straddle pages */
-       for (seg = 0; seg < nr_segs; seg++) {
-               addr = (unsigned long)iov[seg].iov_base;
-               size = iov[seg].iov_len;
-               end += size;
-               if ((addr & blocksize_mask) || (size & blocksize_mask))
-                       goto out;
+       if (iov_iter_has_iovec(iter)) {
+               const struct iovec *iov = iov_iter_iovec(iter);
+
+               for (seg = 0; seg < iter->nr_segs; seg++) {
+                       addr = (unsigned long)iov[seg].iov_base;
+                       size = iov[seg].iov_len;
+                       end += size;
+                       if ((addr & blocksize_mask) || (size & blocksize_mask))
+                               goto out;
 
-               /* If this is a write we don't need to check anymore */
-               if (rw & WRITE)
-                       continue;
+                       /* If this is a write we don't need to check anymore */
+                       if (rw & WRITE)
+                               continue;
 
-               /*
-                * Check to make sure we don't have duplicate iov_base's in this
-                * iovec, if so return EINVAL, otherwise we'll get csum errors
-                * when reading back.
-                */
-               for (i = seg + 1; i < nr_segs; i++) {
-                       if (iov[seg].iov_base == iov[i].iov_base)
+                       /*
+                       * Check to make sure we don't have duplicate iov_base's
+                       * in this iovec, if so return EINVAL, otherwise we'll
+                       * get csum errors when reading back.
+                       */
+                       for (i = seg + 1; i < iter->nr_segs; i++) {
+                               if (iov[seg].iov_base == iov[i].iov_base)
+                                       goto out;
+                       }
+               }
+       } else if (iov_iter_has_bvec(iter)) {
+               struct bio_vec *bvec = iov_iter_bvec(iter);
+
+               for (seg = 0; seg < iter->nr_segs; seg++) {
+                       addr = (unsigned long)bvec[seg].bv_offset;
+                       size = bvec[seg].bv_len;
+                       end += size;
+                       if ((addr & blocksize_mask) || (size & blocksize_mask))
                                goto out;
                }
-       }
+       } else
+               BUG();
+
        retval = 0;
 out:
        return retval;
 }
 
 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
-                       const struct iovec *iov, loff_t offset,
-                       unsigned long nr_segs)
+                       struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -7207,8 +7222,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
        bool relock = false;
        ssize_t ret;
 
-       if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
-                           offset, nr_segs))
+       if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iter, offset))
                return 0;
 
        atomic_inc(&inode->i_dio_count);
@@ -7220,7 +7234,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
         * call btrfs_wait_ordered_range to make absolutely sure that any
         * outstanding dirty pages are on disk.
         */
-       count = iov_length(iov, nr_segs);
+       count = iov_iter_count(iter);
        btrfs_wait_ordered_range(inode, offset, count);
 
        if (rw & WRITE) {
@@ -7245,7 +7259,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
 
        ret = __blockdev_direct_IO(rw, iocb, inode,
                        BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
-                       iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
+                       iter, offset, btrfs_get_blocks_direct, NULL,
                        btrfs_submit_direct, flags);
        if (rw & WRITE) {
                if (ret < 0 && ret != -EIOCBQUEUED)
@@ -7986,7 +8000,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 
 
        /* check for collisions, even if the  name isn't there */
-       ret = btrfs_check_dir_item_collision(root, new_dir->i_ino,
+       ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
                             new_dentry->d_name.name,
                             new_dentry->d_name.len);
 
index 9d46f60cb9439ab3a41ab83f2f323f13aa564590..6bbf316764d7a1e1c941163dcdb5230398751c4a 100644 (file)
@@ -321,7 +321,7 @@ static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
 
 static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
 {
-       struct btrfs_fs_info *fs_info = btrfs_sb(fdentry(file)->d_sb);
+       struct btrfs_fs_info *fs_info = btrfs_sb(file_inode(file)->i_sb);
        struct btrfs_device *device;
        struct request_queue *q;
        struct fstrim_range range;
@@ -2098,7 +2098,7 @@ static noinline int btrfs_ioctl_ino_lookup(struct file *file,
 static noinline int btrfs_ioctl_snap_destroy(struct file *file,
                                             void __user *arg)
 {
-       struct dentry *parent = fdentry(file);
+       struct dentry *parent = file->f_path.dentry;
        struct dentry *dentry;
        struct inode *dir = parent->d_inode;
        struct inode *inode;
@@ -3119,7 +3119,7 @@ out:
 static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                                       u64 off, u64 olen, u64 destoff)
 {
-       struct inode *inode = fdentry(file)->d_inode;
+       struct inode *inode = file_inode(file);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct fd src_file;
        struct inode *src;
@@ -4317,7 +4317,7 @@ static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
 
 static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
 {
-       struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+       struct btrfs_root *root = BTRFS_I(file_inode(file))->root;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
index a5a26320503fd4a82358adff8e614240373ea130..4a355726151ec05dd8e1110745648949888781e8 100644 (file)
@@ -588,7 +588,7 @@ static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
        else
                key.offset = (u64)-1;
 
-       return btrfs_read_fs_root_no_name(fs_info, &key);
+       return btrfs_get_fs_root(fs_info, &key, false);
 }
 
 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
index 0b1f4ef8db987da12951128f092a052018d5c6b9..ec71ea44d2b4626c9a2bcc73b5fb94af666eaf5b 100644 (file)
@@ -299,11 +299,6 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
                        continue;
                }
 
-               if (btrfs_root_refs(&root->root_item) == 0) {
-                       btrfs_add_dead_root(root);
-                       continue;
-               }
-
                err = btrfs_init_fs_root(root);
                if (err) {
                        btrfs_free_fs_root(root);
@@ -318,6 +313,9 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
                        btrfs_free_fs_root(root);
                        break;
                }
+
+               if (btrfs_root_refs(&root->root_item) == 0)
+                       btrfs_add_dead_root(root);
        }
 
        btrfs_free_path(path);
index 4d7433534f5cd77b7f9b240fba57ac7923df07e6..6024877335caf2a9dfa6af1018c5da19b0e8a2ae 100644 (file)
@@ -1005,9 +1005,19 @@ grow_dev_page(struct block_device *bdev, sector_t block,
        struct buffer_head *bh;
        sector_t end_block;
        int ret = 0;            /* Will call free_more_memory() */
+       gfp_t gfp_mask;
 
-       page = find_or_create_page(inode->i_mapping, index,
-               (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
+       gfp_mask = mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS;
+       gfp_mask |= __GFP_MOVABLE;
+       /*
+        * XXX: __getblk_slow() can not really deal with failure and
+        * will endlessly loop on improvised global reclaim.  Prefer
+        * looping in the allocator rather than here, at least that
+        * code knows what it's doing.
+        */
+       gfp_mask |= __GFP_NOFAIL;
+
+       page = find_or_create_page(inode->i_mapping, index, gfp_mask);
        if (!page)
                return ret;
 
index 43eb5592cdea83c83df854a489edea79d076cfda..00baf1419989f804ad56f1f27ed633f86d2d6628 100644 (file)
@@ -270,7 +270,7 @@ static void cachefiles_drop_object(struct fscache_object *_object)
 #endif
 
        /* delete retired objects */
-       if (test_bit(FSCACHE_COOKIE_RETIRED, &object->fscache.cookie->flags) &&
+       if (test_bit(FSCACHE_OBJECT_RETIRED, &object->fscache.flags) &&
            _object != cache->cache.fsdef
            ) {
                _debug("- retire object OBJ%x", object->fscache.debug_id);
index 6df8bd481425379006912990ee6f9461eaf3cf1b..1cb39e65288619ac4ee88cd6a4bd3ae0aa5ca10d 100644 (file)
@@ -1179,8 +1179,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
  * never get called.
  */
 static ssize_t ceph_direct_io(int rw, struct kiocb *iocb,
-                             const struct iovec *iov,
-                             loff_t pos, unsigned long nr_segs)
+                             struct iov_iter *iter, loff_t pos)
 {
        WARN_ON(1);
        return -EINVAL;
index 6bfe65e0b03831280b0e66e96d616be9ee55e6b1..8c44fdd4e1c39f836b2c8a9b2a7a025f1844d3b3 100644 (file)
@@ -68,7 +68,7 @@ int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
 {
        fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
                                              &ceph_fscache_fsid_object_def,
-                                             fsc);
+                                             fsc, true);
 
        if (fsc->fscache == NULL) {
                pr_err("Unable to resgister fsid: %p fscache cookie", fsc);
@@ -204,7 +204,7 @@ void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc,
 
        ci->fscache = fscache_acquire_cookie(fsc->fscache,
                                             &ceph_fscache_inode_object_def,
-                                            ci);
+                                            ci, true);
 done:
        mutex_unlock(&inode->i_mutex);
 
@@ -324,6 +324,9 @@ void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
 
+       if (!PageFsCache(page))
+               return;
+
        fscache_wait_on_page_write(ci->fscache, page);
        fscache_uncache_page(ci->fscache, page);
 }
index 13976c33332ec1fd7ca3999053b15b7079c5ab31..3c0a4bd7499645ca8bf90fd1a6ba16f6831c164c 100644 (file)
@@ -897,7 +897,7 @@ static int __ceph_is_any_caps(struct ceph_inode_info *ci)
  * caller should hold i_ceph_lock.
  * caller will not hold session s_mutex if called from destroy_inode.
  */
-void __ceph_remove_cap(struct ceph_cap *cap)
+void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
 {
        struct ceph_mds_session *session = cap->session;
        struct ceph_inode_info *ci = cap->ci;
@@ -909,6 +909,16 @@ void __ceph_remove_cap(struct ceph_cap *cap)
 
        /* remove from session list */
        spin_lock(&session->s_cap_lock);
+       /*
+        * s_cap_reconnect is protected by s_cap_lock. no one changes
+        * s_cap_gen while session is in the reconnect state.
+        */
+       if (queue_release &&
+           (!session->s_cap_reconnect ||
+            cap->cap_gen == session->s_cap_gen))
+               __queue_cap_release(session, ci->i_vino.ino, cap->cap_id,
+                                   cap->mseq, cap->issue_seq);
+
        if (session->s_cap_iterator == cap) {
                /* not yet, we are iterating over this very cap */
                dout("__ceph_remove_cap  delaying %p removal from session %p\n",
@@ -1023,7 +1033,6 @@ void __queue_cap_release(struct ceph_mds_session *session,
        struct ceph_mds_cap_release *head;
        struct ceph_mds_cap_item *item;
 
-       spin_lock(&session->s_cap_lock);
        BUG_ON(!session->s_num_cap_releases);
        msg = list_first_entry(&session->s_cap_releases,
                               struct ceph_msg, list_head);
@@ -1052,7 +1061,6 @@ void __queue_cap_release(struct ceph_mds_session *session,
                     (int)CEPH_CAPS_PER_RELEASE,
                     (int)msg->front.iov_len);
        }
-       spin_unlock(&session->s_cap_lock);
 }
 
 /*
@@ -1067,12 +1075,8 @@ void ceph_queue_caps_release(struct inode *inode)
        p = rb_first(&ci->i_caps);
        while (p) {
                struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
-               struct ceph_mds_session *session = cap->session;
-
-               __queue_cap_release(session, ceph_ino(inode), cap->cap_id,
-                                   cap->mseq, cap->issue_seq);
                p = rb_next(p);
-               __ceph_remove_cap(cap);
+               __ceph_remove_cap(cap, true);
        }
 }
 
@@ -2791,7 +2795,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
                        }
                        spin_unlock(&mdsc->cap_dirty_lock);
                }
-               __ceph_remove_cap(cap);
+               __ceph_remove_cap(cap, false);
        }
        /* else, we already released it */
 
@@ -2931,9 +2935,12 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        if (!inode) {
                dout(" i don't have ino %llx\n", vino.ino);
 
-               if (op == CEPH_CAP_OP_IMPORT)
+               if (op == CEPH_CAP_OP_IMPORT) {
+                       spin_lock(&session->s_cap_lock);
                        __queue_cap_release(session, vino.ino, cap_id,
                                            mseq, seq);
+                       spin_unlock(&session->s_cap_lock);
+               }
                goto flush_cap_releases;
        }
 
index 868b61d56cac77f3a8328d5ba4851ec7947fe827..2a0bcaeb189acd18b124aff8d54619667fd97bf2 100644 (file)
@@ -352,8 +352,18 @@ more:
                }
 
                /* note next offset and last dentry name */
+               rinfo = &req->r_reply_info;
+               if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
+                       frag = le32_to_cpu(rinfo->dir_dir->frag);
+                       if (ceph_frag_is_leftmost(frag))
+                               fi->next_offset = 2;
+                       else
+                               fi->next_offset = 0;
+                       off = fi->next_offset;
+               }
                fi->offset = fi->next_offset;
                fi->last_readdir = req;
+               fi->frag = frag;
 
                if (req->r_reply_info.dir_end) {
                        kfree(fi->last_name);
@@ -363,7 +373,6 @@ more:
                        else
                                fi->next_offset = 0;
                } else {
-                       rinfo = &req->r_reply_info;
                        err = note_last_dentry(fi,
                                       rinfo->dir_dname[rinfo->dir_nr-1],
                                       rinfo->dir_dname_len[rinfo->dir_nr-1]);
index 3de89829e2a162ab6bce2a58296b25aef9235c43..c4419e848a4f89eced64c8a156a560f62bc2cbdd 100644 (file)
@@ -408,51 +408,92 @@ more:
  *
  * If the read spans object boundary, just do multiple reads.
  */
-static ssize_t ceph_sync_read(struct file *file, char __user *data,
-                             unsigned len, loff_t *poff, int *checkeof)
+static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
+                               int *checkeof)
 {
+       struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
        struct page **pages;
-       u64 off = *poff;
+       u64 off = iocb->ki_pos;
        int num_pages, ret;
+       size_t len = i->count;
 
-       dout("sync_read on file %p %llu~%u %s\n", file, off, len,
+       dout("sync_read on file %p %llu~%u %s\n", file, off,
+            (unsigned)len,
             (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
-
-       if (file->f_flags & O_DIRECT) {
-               num_pages = calc_pages_for((unsigned long)data, len);
-               pages = ceph_get_direct_page_vector(data, num_pages, true);
-       } else {
-               num_pages = calc_pages_for(off, len);
-               pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
-       }
-       if (IS_ERR(pages))
-               return PTR_ERR(pages);
-
        /*
         * flush any page cache pages in this range.  this
         * will make concurrent normal and sync io slow,
         * but it will at least behave sensibly when they are
         * in sequence.
         */
-       ret = filemap_write_and_wait(inode->i_mapping);
+       ret = filemap_write_and_wait_range(inode->i_mapping, off,
+                                               off + len);
        if (ret < 0)
-               goto done;
+               return ret;
 
-       ret = striped_read(inode, off, len, pages, num_pages, checkeof,
-                          file->f_flags & O_DIRECT,
-                          (unsigned long)data & ~PAGE_MASK);
+       if (file->f_flags & O_DIRECT) {
+               while (iov_iter_count(i)) {
+                       void __user *data = i->iov[0].iov_base + i->iov_offset;
+                       size_t len = i->iov[0].iov_len - i->iov_offset;
+
+                       num_pages = calc_pages_for((unsigned long)data, len);
+                       pages = ceph_get_direct_page_vector(data,
+                                                           num_pages, true);
+                       if (IS_ERR(pages))
+                               return PTR_ERR(pages);
+
+                       ret = striped_read(inode, off, len,
+                                          pages, num_pages, checkeof,
+                                          1, (unsigned long)data & ~PAGE_MASK);
+                       ceph_put_page_vector(pages, num_pages, true);
+
+                       if (ret <= 0)
+                               break;
+                       off += ret;
+                       iov_iter_advance(i, ret);
+                       if (ret < len)
+                               break;
+               }
+       } else {
+               num_pages = calc_pages_for(off, len);
+               pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
+               if (IS_ERR(pages))
+                       return PTR_ERR(pages);
+               ret = striped_read(inode, off, len, pages,
+                                       num_pages, checkeof, 0, 0);
+               if (ret > 0) {
+                       int l, k = 0;
+                       size_t left = len = ret;
+
+                       while (left) {
+                               void __user *data = i->iov[0].iov_base
+                                                       + i->iov_offset;
+                               l = min(i->iov[0].iov_len - i->iov_offset,
+                                       left);
+
+                               ret = ceph_copy_page_vector_to_user(&pages[k],
+                                                                   data, off,
+                                                                   l);
+                               if (ret > 0) {
+                                       iov_iter_advance(i, ret);
+                                       left -= ret;
+                                       off += ret;
+                                       k = calc_pages_for(iocb->ki_pos,
+                                                          len - left + 1) - 1;
+                                       BUG_ON(k >= num_pages && left);
+                               } else
+                                       break;
+                       }
+               }
+               ceph_release_page_vector(pages, num_pages);
+       }
 
-       if (ret >= 0 && (file->f_flags & O_DIRECT) == 0)
-               ret = ceph_copy_page_vector_to_user(pages, data, off, ret);
-       if (ret >= 0)
-               *poff = off + ret;
+       if (off > iocb->ki_pos) {
+               ret = off - iocb->ki_pos;
+               iocb->ki_pos = off;
+       }
 
-done:
-       if (file->f_flags & O_DIRECT)
-               ceph_put_page_vector(pages, num_pages, true);
-       else
-               ceph_release_page_vector(pages, num_pages);
        dout("sync_read result %d\n", ret);
        return ret;
 }
@@ -489,83 +530,79 @@ static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
        }
 }
 
+
 /*
- * Synchronous write, straight from __user pointer or user pages (if
- * O_DIRECT).
+ * Synchronous write, straight from __user pointer or user pages.
  *
  * If write spans object boundary, just do multiple writes.  (For a
  * correct atomic write, we should e.g. take write locks on all
  * objects, rollback on failure, etc.)
  */
-static ssize_t ceph_sync_write(struct file *file, const char __user *data,
-                              size_t left, loff_t pos, loff_t *ppos)
+static ssize_t
+ceph_sync_direct_write(struct kiocb *iocb, const struct iovec *iov,
+                      unsigned long nr_segs, size_t count)
 {
+       struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
        struct ceph_snap_context *snapc;
        struct ceph_vino vino;
        struct ceph_osd_request *req;
-       int num_ops = 1;
        struct page **pages;
        int num_pages;
-       u64 len;
        int written = 0;
        int flags;
        int check_caps = 0;
-       int page_align, io_align;
-       unsigned long buf_align;
+       int page_align;
        int ret;
        struct timespec mtime = CURRENT_TIME;
-       bool own_pages = false;
+       loff_t pos = iocb->ki_pos;
+       struct iov_iter i;
 
        if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
                return -EROFS;
 
-       dout("sync_write on file %p %lld~%u %s\n", file, pos,
-            (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
+       dout("sync_direct_write on file %p %lld~%u\n", file, pos,
+            (unsigned)count);
 
-       ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left);
+       ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
        if (ret < 0)
                return ret;
 
        ret = invalidate_inode_pages2_range(inode->i_mapping,
                                            pos >> PAGE_CACHE_SHIFT,
-                                           (pos + left) >> PAGE_CACHE_SHIFT);
+                                           (pos + count) >> PAGE_CACHE_SHIFT);
        if (ret < 0)
                dout("invalidate_inode_pages2_range returned %d\n", ret);
 
        flags = CEPH_OSD_FLAG_ORDERSNAP |
                CEPH_OSD_FLAG_ONDISK |
                CEPH_OSD_FLAG_WRITE;
-       if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0)
-               flags |= CEPH_OSD_FLAG_ACK;
-       else
-               num_ops++;      /* Also include a 'startsync' command. */
 
-       /*
-        * we may need to do multiple writes here if we span an object
-        * boundary.  this isn't atomic, unfortunately.  :(
-        */
-more:
-       io_align = pos & ~PAGE_MASK;
-       buf_align = (unsigned long)data & ~PAGE_MASK;
-       len = left;
-
-       snapc = ci->i_snap_realm->cached_context;
-       vino = ceph_vino(inode);
-       req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
-                                   vino, pos, &len, num_ops,
-                                   CEPH_OSD_OP_WRITE, flags, snapc,
-                                   ci->i_truncate_seq, ci->i_truncate_size,
-                                   false);
-       if (IS_ERR(req))
-               return PTR_ERR(req);
+       iov_iter_init(&i, iov, nr_segs, count, 0);
+
+       while (iov_iter_count(&i) > 0) {
+               void __user *data = i.iov->iov_base + i.iov_offset;
+               u64 len = i.iov->iov_len - i.iov_offset;
+
+               page_align = (unsigned long)data & ~PAGE_MASK;
+
+               snapc = ci->i_snap_realm->cached_context;
+               vino = ceph_vino(inode);
+               req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
+                                           vino, pos, &len,
+                                           2,/*include a 'startsync' command*/
+                                           CEPH_OSD_OP_WRITE, flags, snapc,
+                                           ci->i_truncate_seq,
+                                           ci->i_truncate_size,
+                                           false);
+               if (IS_ERR(req)) {
+                       ret = PTR_ERR(req);
+                       goto out;
+               }
 
-       /* write from beginning of first page, regardless of io alignment */
-       page_align = file->f_flags & O_DIRECT ? buf_align : io_align;
-       num_pages = calc_pages_for(page_align, len);
-       if (file->f_flags & O_DIRECT) {
+               num_pages = calc_pages_for(page_align, len);
                pages = ceph_get_direct_page_vector(data, num_pages, false);
                if (IS_ERR(pages)) {
                        ret = PTR_ERR(pages);
@@ -577,60 +614,175 @@ more:
                 * may block.
                 */
                truncate_inode_pages_range(inode->i_mapping, pos,
-                                          (pos+len) | (PAGE_CACHE_SIZE-1));
-       } else {
+                                  (pos+len) | (PAGE_CACHE_SIZE-1));
+               osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
+                                               false, false);
+
+               /* BUG_ON(vino.snap != CEPH_NOSNAP); */
+               ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
+
+               ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
+               if (!ret)
+                       ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
+
+               ceph_put_page_vector(pages, num_pages, false);
+
+out:
+               ceph_osdc_put_request(req);
+               if (ret == 0) {
+                       pos += len;
+                       written += len;
+                       iov_iter_advance(&i, (size_t)len);
+
+                       if (pos > i_size_read(inode)) {
+                               check_caps = ceph_inode_set_size(inode, pos);
+                               if (check_caps)
+                                       ceph_check_caps(ceph_inode(inode),
+                                                       CHECK_CAPS_AUTHONLY,
+                                                       NULL);
+                       }
+               } else
+                       break;
+       }
+
+       if (ret != -EOLDSNAPC && written > 0) {
+               iocb->ki_pos = pos;
+               ret = written;
+       }
+       return ret;
+}
+
+
+/*
+ * Synchronous write, straight from __user pointer or user pages.
+ *
+ * If write spans object boundary, just do multiple writes.  (For a
+ * correct atomic write, we should e.g. take write locks on all
+ * objects, rollback on failure, etc.)
+ */
+static ssize_t ceph_sync_write(struct kiocb *iocb, const struct iovec *iov,
+                              unsigned long nr_segs, size_t count)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file_inode(file);
+       struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_snap_context *snapc;
+       struct ceph_vino vino;
+       struct ceph_osd_request *req;
+       struct page **pages;
+       u64 len;
+       int num_pages;
+       int written = 0;
+       int flags;
+       int check_caps = 0;
+       int ret;
+       struct timespec mtime = CURRENT_TIME;
+       loff_t pos = iocb->ki_pos;
+       struct iov_iter i;
+
+       if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
+               return -EROFS;
+
+       dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count);
+
+       ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
+       if (ret < 0)
+               return ret;
+
+       ret = invalidate_inode_pages2_range(inode->i_mapping,
+                                           pos >> PAGE_CACHE_SHIFT,
+                                           (pos + count) >> PAGE_CACHE_SHIFT);
+       if (ret < 0)
+               dout("invalidate_inode_pages2_range returned %d\n", ret);
+
+       flags = CEPH_OSD_FLAG_ORDERSNAP |
+               CEPH_OSD_FLAG_ONDISK |
+               CEPH_OSD_FLAG_WRITE |
+               CEPH_OSD_FLAG_ACK;
+
+       iov_iter_init(&i, iov, nr_segs, count, 0);
+
+       while ((len = iov_iter_count(&i)) > 0) {
+               size_t left;
+               int n;
+
+               snapc = ci->i_snap_realm->cached_context;
+               vino = ceph_vino(inode);
+               req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
+                                           vino, pos, &len, 1,
+                                           CEPH_OSD_OP_WRITE, flags, snapc,
+                                           ci->i_truncate_seq,
+                                           ci->i_truncate_size,
+                                           false);
+               if (IS_ERR(req)) {
+                       ret = PTR_ERR(req);
+                       goto out;
+               }
+
+               /*
+                * write from beginning of first page,
+                * regardless of io alignment
+                */
+               num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+
                pages = ceph_alloc_page_vector(num_pages, GFP_NOFS);
                if (IS_ERR(pages)) {
                        ret = PTR_ERR(pages);
                        goto out;
                }
-               ret = ceph_copy_user_to_page_vector(pages, data, pos, len);
+
+               left = len;
+               for (n = 0; n < num_pages; n++) {
+                       size_t plen = min(left, PAGE_SIZE);
+                       ret = iov_iter_copy_from_user(pages[n], &i, 0, plen);
+                       if (ret != plen) {
+                               ret = -EFAULT;
+                               break;
+                       }
+                       left -= ret;
+                       iov_iter_advance(&i, ret);
+               }
+
                if (ret < 0) {
                        ceph_release_page_vector(pages, num_pages);
                        goto out;
                }
 
-               if ((file->f_flags & O_SYNC) == 0) {
-                       /* get a second commit callback */
-                       req->r_unsafe_callback = ceph_sync_write_unsafe;
-                       req->r_inode = inode;
-                       own_pages = true;
-               }
-       }
-       osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
-                                       false, own_pages);
+               /* get a second commit callback */
+               req->r_unsafe_callback = ceph_sync_write_unsafe;
+               req->r_inode = inode;
 
-       /* BUG_ON(vino.snap != CEPH_NOSNAP); */
-       ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
+               osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
+                                               false, true);
 
-       ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
-       if (!ret)
-               ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
+               /* BUG_ON(vino.snap != CEPH_NOSNAP); */
+               ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
 
-       if (file->f_flags & O_DIRECT)
-               ceph_put_page_vector(pages, num_pages, false);
-       else if (file->f_flags & O_SYNC)
-               ceph_release_page_vector(pages, num_pages);
+               ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
+               if (!ret)
+                       ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
 
 out:
-       ceph_osdc_put_request(req);
-       if (ret == 0) {
-               pos += len;
-               written += len;
-               left -= len;
-               data += len;
-               if (left)
-                       goto more;
+               ceph_osdc_put_request(req);
+               if (ret == 0) {
+                       pos += len;
+                       written += len;
+
+                       if (pos > i_size_read(inode)) {
+                               check_caps = ceph_inode_set_size(inode, pos);
+                               if (check_caps)
+                                       ceph_check_caps(ceph_inode(inode),
+                                                       CHECK_CAPS_AUTHONLY,
+                                                       NULL);
+                       }
+               } else
+                       break;
+       }
 
+       if (ret != -EOLDSNAPC && written > 0) {
                ret = written;
-               *ppos = pos;
-               if (pos > i_size_read(inode))
-                       check_caps = ceph_inode_set_size(inode, pos);
-               if (check_caps)
-                       ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY,
-                                       NULL);
-       } else if (ret != -EOLDSNAPC && written > 0) {
-               ret = written;
+               iocb->ki_pos = pos;
        }
        return ret;
 }
@@ -647,55 +799,84 @@ static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
 {
        struct file *filp = iocb->ki_filp;
        struct ceph_file_info *fi = filp->private_data;
-       loff_t *ppos = &iocb->ki_pos;
-       size_t len = iov->iov_len;
+       size_t len = iocb->ki_nbytes;
        struct inode *inode = file_inode(filp);
        struct ceph_inode_info *ci = ceph_inode(inode);
-       void __user *base = iov->iov_base;
        ssize_t ret;
        int want, got = 0;
        int checkeof = 0, read = 0;
 
-       dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
-            inode, ceph_vinop(inode), pos, (unsigned)len, inode);
 again:
+       dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
+            inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
+
        if (fi->fmode & CEPH_FILE_MODE_LAZY)
                want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
        else
                want = CEPH_CAP_FILE_CACHE;
        ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, &got, -1);
        if (ret < 0)
-               goto out;
-       dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
-            inode, ceph_vinop(inode), pos, (unsigned)len,
-            ceph_cap_string(got));
+               return ret;
 
        if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
            (iocb->ki_filp->f_flags & O_DIRECT) ||
-           (fi->flags & CEPH_F_SYNC))
+           (fi->flags & CEPH_F_SYNC)) {
+               struct iov_iter i;
+
+               dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
+                    inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
+                    ceph_cap_string(got));
+
+               if (!read) {
+                       ret = generic_segment_checks(iov, &nr_segs,
+                                                       &len, VERIFY_WRITE);
+                       if (ret)
+                               goto out;
+               }
+
+               iov_iter_init(&i, iov, nr_segs, len, read);
+
                /* hmm, this isn't really async... */
-               ret = ceph_sync_read(filp, base, len, ppos, &checkeof);
-       else
-               ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
+               ret = ceph_sync_read(iocb, &i, &checkeof);
+       } else {
+               /*
+                * We can't modify the content of iov,
+                * so we only read from beginning.
+                */
+               if (read) {
+                       iocb->ki_pos = pos;
+                       len = iocb->ki_nbytes;
+                       read = 0;
+               }
+               dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
+                    inode, ceph_vinop(inode), pos, (unsigned)len,
+                    ceph_cap_string(got));
 
+               ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
+       }
 out:
        dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
             inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
        ceph_put_cap_refs(ci, got);
 
        if (checkeof && ret >= 0) {
-               int statret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
+               int statret = ceph_do_getattr(inode,
+                                             CEPH_STAT_CAP_SIZE);
 
                /* hit EOF or hole? */
-               if (statret == 0 && *ppos < inode->i_size) {
-                       dout("aio_read sync_read hit hole, ppos %lld < size %lld, reading more\n", *ppos, inode->i_size);
+               if (statret == 0 && iocb->ki_pos < inode->i_size &&
+                       ret < len) {
+                       dout("sync_read hit hole, ppos %lld < size %lld"
+                            ", reading more\n", iocb->ki_pos,
+                            inode->i_size);
+
                        read += ret;
-                       base += ret;
                        len -= ret;
                        checkeof = 0;
                        goto again;
                }
        }
+
        if (ret >= 0)
                ret += read;
 
@@ -772,11 +953,13 @@ retry_snap:
             inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
 
        if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
-           (iocb->ki_filp->f_flags & O_DIRECT) ||
-           (fi->flags & CEPH_F_SYNC)) {
+           (file->f_flags & O_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
                mutex_unlock(&inode->i_mutex);
-               written = ceph_sync_write(file, iov->iov_base, count,
-                                         pos, &iocb->ki_pos);
+               if (file->f_flags & O_DIRECT)
+                       written = ceph_sync_direct_write(iocb, iov,
+                                                        nr_segs, count);
+               else
+                       written = ceph_sync_write(iocb, iov, nr_segs, count);
                if (written == -EOLDSNAPC) {
                        dout("aio_write %p %llx.%llx %llu~%u"
                                "got EOLDSNAPC, retrying\n",
index 8549a48115f71b23e1f35ef444caf3eb32dbced3..2ae1381de64a102c89d3fbe9b92dee3893fbbe7a 100644 (file)
@@ -436,6 +436,16 @@ void ceph_destroy_inode(struct inode *inode)
        call_rcu(&inode->i_rcu, ceph_i_callback);
 }
 
+int ceph_drop_inode(struct inode *inode)
+{
+       /*
+        * Positve dentry and corresponding inode are always accompanied
+        * in MDS reply. So no need to keep inode in the cache after
+        * dropping all its aliases.
+        */
+       return 1;
+}
+
 /*
  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
  * careful because either the client or MDS may have more up to date
@@ -577,6 +587,8 @@ static int fill_inode(struct inode *inode,
        int issued = 0, implemented;
        struct timespec mtime, atime, ctime;
        u32 nsplits;
+       struct ceph_inode_frag *frag;
+       struct rb_node *rb_node;
        struct ceph_buffer *xattr_blob = NULL;
        int err = 0;
        int queue_trunc = 0;
@@ -751,15 +763,38 @@ no_change:
        /* FIXME: move me up, if/when version reflects fragtree changes */
        nsplits = le32_to_cpu(info->fragtree.nsplits);
        mutex_lock(&ci->i_fragtree_mutex);
+       rb_node = rb_first(&ci->i_fragtree);
        for (i = 0; i < nsplits; i++) {
                u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
-               struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
-
-               if (IS_ERR(frag))
-                       continue;
+               frag = NULL;
+               while (rb_node) {
+                       frag = rb_entry(rb_node, struct ceph_inode_frag, node);
+                       if (ceph_frag_compare(frag->frag, id) >= 0) {
+                               if (frag->frag != id)
+                                       frag = NULL;
+                               else
+                                       rb_node = rb_next(rb_node);
+                               break;
+                       }
+                       rb_node = rb_next(rb_node);
+                       rb_erase(&frag->node, &ci->i_fragtree);
+                       kfree(frag);
+                       frag = NULL;
+               }
+               if (!frag) {
+                       frag = __get_or_create_frag(ci, id);
+                       if (IS_ERR(frag))
+                               continue;
+               }
                frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
                dout(" frag %x split by %d\n", frag->frag, frag->split_by);
        }
+       while (rb_node) {
+               frag = rb_entry(rb_node, struct ceph_inode_frag, node);
+               rb_node = rb_next(rb_node);
+               rb_erase(&frag->node, &ci->i_fragtree);
+               kfree(frag);
+       }
        mutex_unlock(&ci->i_fragtree_mutex);
 
        /* were we issued a capability? */
@@ -1250,8 +1285,20 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
        int err = 0, i;
        struct inode *snapdir = NULL;
        struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
-       u64 frag = le32_to_cpu(rhead->args.readdir.frag);
        struct ceph_dentry_info *di;
+       u64 r_readdir_offset = req->r_readdir_offset;
+       u32 frag = le32_to_cpu(rhead->args.readdir.frag);
+
+       if (rinfo->dir_dir &&
+           le32_to_cpu(rinfo->dir_dir->frag) != frag) {
+               dout("readdir_prepopulate got new frag %x -> %x\n",
+                    frag, le32_to_cpu(rinfo->dir_dir->frag));
+               frag = le32_to_cpu(rinfo->dir_dir->frag);
+               if (ceph_frag_is_leftmost(frag))
+                       r_readdir_offset = 2;
+               else
+                       r_readdir_offset = 0;
+       }
 
        if (req->r_aborted)
                return readdir_prepopulate_inodes_only(req, session);
@@ -1315,7 +1362,7 @@ retry_lookup:
                }
 
                di = dn->d_fsdata;
-               di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
+               di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
 
                /* inode */
                if (dn->d_inode) {
index b7bda5d9611da031aaf6f104ece9fa6351993070..6d953ab0ac06c08463c6134639d658a7ce90f304 100644 (file)
@@ -43,6 +43,7 @@
  */
 
 struct ceph_reconnect_state {
+       int nr_caps;
        struct ceph_pagelist *pagelist;
        bool flock;
 };
@@ -443,6 +444,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
        INIT_LIST_HEAD(&s->s_waiting);
        INIT_LIST_HEAD(&s->s_unsafe);
        s->s_num_cap_releases = 0;
+       s->s_cap_reconnect = 0;
        s->s_cap_iterator = NULL;
        INIT_LIST_HEAD(&s->s_cap_releases);
        INIT_LIST_HEAD(&s->s_cap_releases_done);
@@ -986,7 +988,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
        dout("removing cap %p, ci is %p, inode is %p\n",
             cap, ci, &ci->vfs_inode);
        spin_lock(&ci->i_ceph_lock);
-       __ceph_remove_cap(cap);
+       __ceph_remove_cap(cap, false);
        if (!__ceph_is_any_real_caps(ci)) {
                struct ceph_mds_client *mdsc =
                        ceph_sb_to_client(inode->i_sb)->mdsc;
@@ -1231,9 +1233,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
        session->s_trim_caps--;
        if (oissued) {
                /* we aren't the only cap.. just remove us */
-               __queue_cap_release(session, ceph_ino(inode), cap->cap_id,
-                                   cap->mseq, cap->issue_seq);
-               __ceph_remove_cap(cap);
+               __ceph_remove_cap(cap, true);
        } else {
                /* try to drop referring dentries */
                spin_unlock(&ci->i_ceph_lock);
@@ -1416,7 +1416,6 @@ static void discard_cap_releases(struct ceph_mds_client *mdsc,
        unsigned num;
 
        dout("discard_cap_releases mds%d\n", session->s_mds);
-       spin_lock(&session->s_cap_lock);
 
        /* zero out the in-progress message */
        msg = list_first_entry(&session->s_cap_releases,
@@ -1443,8 +1442,6 @@ static void discard_cap_releases(struct ceph_mds_client *mdsc,
                msg->front.iov_len = sizeof(*head);
                list_add(&msg->list_head, &session->s_cap_releases);
        }
-
-       spin_unlock(&session->s_cap_lock);
 }
 
 /*
@@ -2238,8 +2235,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
        err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
        if (err == 0) {
                if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
-                                   req->r_op == CEPH_MDS_OP_LSSNAP) &&
-                   rinfo->dir_nr)
+                                   req->r_op == CEPH_MDS_OP_LSSNAP))
                        ceph_readdir_prepopulate(req, req->r_session);
                ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
        }
@@ -2490,6 +2486,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
        cap->seq = 0;        /* reset cap seq */
        cap->issue_seq = 0;  /* and issue_seq */
        cap->mseq = 0;       /* and migrate_seq */
+       cap->cap_gen = cap->session->s_cap_gen;
 
        if (recon_state->flock) {
                rec.v2.cap_id = cpu_to_le64(cap->cap_id);
@@ -2552,6 +2549,8 @@ encode_again:
        } else {
                err = ceph_pagelist_append(pagelist, &rec, reclen);
        }
+
+       recon_state->nr_caps++;
 out_free:
        kfree(path);
 out_dput:
@@ -2579,6 +2578,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
        struct rb_node *p;
        int mds = session->s_mds;
        int err = -ENOMEM;
+       int s_nr_caps;
        struct ceph_pagelist *pagelist;
        struct ceph_reconnect_state recon_state;
 
@@ -2610,20 +2610,38 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
        dout("session %p state %s\n", session,
             session_state_name(session->s_state));
 
+       spin_lock(&session->s_gen_ttl_lock);
+       session->s_cap_gen++;
+       spin_unlock(&session->s_gen_ttl_lock);
+
+       spin_lock(&session->s_cap_lock);
+       /*
+        * notify __ceph_remove_cap() that we are composing cap reconnect.
+        * If a cap get released before being added to the cap reconnect,
+        * __ceph_remove_cap() should skip queuing cap release.
+        */
+       session->s_cap_reconnect = 1;
        /* drop old cap expires; we're about to reestablish that state */
        discard_cap_releases(mdsc, session);
+       spin_unlock(&session->s_cap_lock);
 
        /* traverse this session's caps */
-       err = ceph_pagelist_encode_32(pagelist, session->s_nr_caps);
+       s_nr_caps = session->s_nr_caps;
+       err = ceph_pagelist_encode_32(pagelist, s_nr_caps);
        if (err)
                goto fail;
 
+       recon_state.nr_caps = 0;
        recon_state.pagelist = pagelist;
        recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK;
        err = iterate_session_caps(session, encode_caps_cb, &recon_state);
        if (err < 0)
                goto fail;
 
+       spin_lock(&session->s_cap_lock);
+       session->s_cap_reconnect = 0;
+       spin_unlock(&session->s_cap_lock);
+
        /*
         * snaprealms.  we provide mds with the ino, seq (version), and
         * parent for all of our realms.  If the mds has any newer info,
@@ -2646,11 +2664,18 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
 
        if (recon_state.flock)
                reply->hdr.version = cpu_to_le16(2);
-       if (pagelist->length) {
-               /* set up outbound data if we have any */
-               reply->hdr.data_len = cpu_to_le32(pagelist->length);
-               ceph_msg_data_add_pagelist(reply, pagelist);
+
+       /* raced with cap release? */
+       if (s_nr_caps != recon_state.nr_caps) {
+               struct page *page = list_first_entry(&pagelist->head,
+                                                    struct page, lru);
+               __le32 *addr = kmap_atomic(page);
+               *addr = cpu_to_le32(recon_state.nr_caps);
+               kunmap_atomic(addr);
        }
+
+       reply->hdr.data_len = cpu_to_le32(pagelist->length);
+       ceph_msg_data_add_pagelist(reply, pagelist);
        ceph_con_send(&session->s_con, reply);
 
        mutex_unlock(&session->s_mutex);
index c2a19fbbe5177b619b7a3d7e6132b626df8c8508..4c053d099ae4e60400dbcbdcce21844138ba8a47 100644 (file)
@@ -132,6 +132,7 @@ struct ceph_mds_session {
        struct list_head  s_caps;     /* all caps issued by this session */
        int               s_nr_caps, s_trim_caps;
        int               s_num_cap_releases;
+       int               s_cap_reconnect;
        struct list_head  s_cap_releases; /* waiting cap_release messages */
        struct list_head  s_cap_releases_done; /* ready to send */
        struct ceph_cap  *s_cap_iterator;
index 6a0951e4304441a241ca8fe550aba36cc097c271..e58bd4a23bfb532bd2c119345bb4669be1d9336b 100644 (file)
@@ -686,6 +686,7 @@ static const struct super_operations ceph_super_ops = {
        .alloc_inode    = ceph_alloc_inode,
        .destroy_inode  = ceph_destroy_inode,
        .write_inode    = ceph_write_inode,
+       .drop_inode     = ceph_drop_inode,
        .sync_fs        = ceph_sync_fs,
        .put_super      = ceph_put_super,
        .show_options   = ceph_show_options,
index 6014b0a3c405cb12dfb62fdac7887f83a4977b96..8de94b564d670d28a033d6ccfc309a37f799ab57 100644 (file)
@@ -691,6 +691,7 @@ extern const struct inode_operations ceph_file_iops;
 
 extern struct inode *ceph_alloc_inode(struct super_block *sb);
 extern void ceph_destroy_inode(struct inode *inode);
+extern int ceph_drop_inode(struct inode *inode);
 
 extern struct inode *ceph_get_inode(struct super_block *sb,
                                    struct ceph_vino vino);
@@ -741,13 +742,7 @@ extern int ceph_add_cap(struct inode *inode,
                        int fmode, unsigned issued, unsigned wanted,
                        unsigned cap, unsigned seq, u64 realmino, int flags,
                        struct ceph_cap_reservation *caps_reservation);
-extern void __ceph_remove_cap(struct ceph_cap *cap);
-static inline void ceph_remove_cap(struct ceph_cap *cap)
-{
-       spin_lock(&cap->ci->i_ceph_lock);
-       __ceph_remove_cap(cap);
-       spin_unlock(&cap->ci->i_ceph_lock);
-}
+extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
 extern void ceph_put_cap(struct ceph_mds_client *mdsc,
                         struct ceph_cap *cap);
 
index a16b4e58bcc62ee88f9772f75120ef250d0112bd..849f6132b327e5e79e35bd4a178784eea001b267 100644 (file)
@@ -120,14 +120,16 @@ cifs_read_super(struct super_block *sb)
 {
        struct inode *inode;
        struct cifs_sb_info *cifs_sb;
+       struct cifs_tcon *tcon;
        int rc = 0;
 
        cifs_sb = CIFS_SB(sb);
+       tcon = cifs_sb_master_tcon(cifs_sb);
 
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
                sb->s_flags |= MS_POSIXACL;
 
-       if (cifs_sb_master_tcon(cifs_sb)->ses->capabilities & CAP_LARGE_FILES)
+       if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
                sb->s_maxbytes = MAX_LFS_FILESIZE;
        else
                sb->s_maxbytes = MAX_NON_LFS;
@@ -147,7 +149,7 @@ cifs_read_super(struct super_block *sb)
                goto out_no_root;
        }
 
-       if (cifs_sb_master_tcon(cifs_sb)->nocase)
+       if (tcon->nocase)
                sb->s_d_op = &cifs_ci_dentry_ops;
        else
                sb->s_d_op = &cifs_dentry_ops;
@@ -860,7 +862,7 @@ const struct inode_operations cifs_file_inode_ops = {
 const struct inode_operations cifs_symlink_inode_ops = {
        .readlink = generic_readlink,
        .follow_link = cifs_follow_link,
-       .put_link = cifs_put_link,
+       .put_link = kfree_put_link,
        .permission = cifs_permission,
        /* BB add the following two eventually */
        /* revalidate: cifs_revalidate,
index 6d0b07217ac9a6ec5b2887125fdb3fa60800642c..26a754f49ba1933259c0711d1e37ece069b3030c 100644 (file)
@@ -115,8 +115,6 @@ extern struct vfsmount *cifs_dfs_d_automount(struct path *path);
 
 /* Functions related to symlinks */
 extern void *cifs_follow_link(struct dentry *direntry, struct nameidata *nd);
-extern void cifs_put_link(struct dentry *direntry,
-                         struct nameidata *nd, void *);
 extern int cifs_readlink(struct dentry *direntry, char __user *buffer,
                         int buflen);
 extern int cifs_symlink(struct inode *inode, struct dentry *direntry,
index 52b6f6c26bfcbe37d2994ba0fe3fa0b3779016d4..a67cf12a1c01b873a6d824445e2d5c8eeca61258 100644 (file)
@@ -278,6 +278,8 @@ struct smb_version_operations {
        /* set attributes */
        int (*set_file_info)(struct inode *, const char *, FILE_BASIC_INFO *,
                             const unsigned int);
+       int (*set_compression)(const unsigned int, struct cifs_tcon *,
+                              struct cifsFileInfo *);
        /* check if we can send an echo or nor */
        bool (*can_echo)(struct TCP_Server_Info *);
        /* send echo request */
@@ -379,6 +381,9 @@ struct smb_version_operations {
        char * (*create_lease_buf)(u8 *, u8);
        /* parse lease context buffer and return oplock/epoch info */
        __u8 (*parse_lease_buf)(void *, unsigned int *);
+       int (*clone_range)(const unsigned int, struct cifsFileInfo *src_file,
+                       struct cifsFileInfo *target_file, u64 src_off, u64 len,
+                       u64 dest_off);
 };
 
 struct smb_version_values {
@@ -828,6 +833,8 @@ struct cifs_tcon {
        __u32 maximal_access;
        __u32 vol_serial_number;
        __le64 vol_create_time;
+       __u32 ss_flags;         /* sector size flags */
+       __u32 perf_sector_size; /* best sector size for perf */
 #endif /* CONFIG_CIFS_SMB2 */
 #ifdef CONFIG_CIFS_FSCACHE
        u64 resource_id;                /* server resource id */
index a630475e421c421ef38f6633ddb31c8351827752..f9bb4974161abf730bd9f3517c5c6d511553a956 100644 (file)
@@ -1352,6 +1352,35 @@ typedef struct smb_com_transaction_ioctl_req {
        __u8 Data[1];
 } __attribute__((packed)) TRANSACT_IOCTL_REQ;
 
+typedef struct smb_com_transaction_compr_ioctl_req {
+       struct smb_hdr hdr;     /* wct = 23 */
+       __u8 MaxSetupCount;
+       __u16 Reserved;
+       __le32 TotalParameterCount;
+       __le32 TotalDataCount;
+       __le32 MaxParameterCount;
+       __le32 MaxDataCount;
+       __le32 ParameterCount;
+       __le32 ParameterOffset;
+       __le32 DataCount;
+       __le32 DataOffset;
+       __u8 SetupCount; /* four setup words follow subcommand */
+       /* SNIA spec incorrectly included spurious pad here */
+       __le16 SubCommand; /* 2 = IOCTL/FSCTL */
+       __le32 FunctionCode;
+       __u16 Fid;
+       __u8 IsFsctl;  /* 1 = File System Control 0 = device control (IOCTL) */
+       __u8 IsRootFlag; /* 1 = apply command to root of share (must be DFS) */
+       __le16 ByteCount;
+       __u8 Pad[3];
+       __le16 compression_state;  /* See below for valid flags */
+} __attribute__((packed)) TRANSACT_COMPR_IOCTL_REQ;
+
+/* compression state flags */
+#define COMPRESSION_FORMAT_NONE                0x0000
+#define COMPRESSION_FORMAT_DEFAULT     0x0001
+#define COMPRESSION_FORMAT_LZNT1       0x0002
+
 typedef struct smb_com_transaction_ioctl_rsp {
        struct smb_hdr hdr;     /* wct = 19 */
        __u8 Reserved[3];
@@ -1491,15 +1520,30 @@ struct file_notify_information {
        __u8  FileName[0];
 } __attribute__((packed));
 
-struct reparse_data {
-       __u32   ReparseTag;
-       __u16   ReparseDataLength;
+/* For IO_REPARSE_TAG_SYMLINK */
+struct reparse_symlink_data {
+       __le32  ReparseTag;
+       __le16  ReparseDataLength;
+       __u16   Reserved;
+       __le16  SubstituteNameOffset;
+       __le16  SubstituteNameLength;
+       __le16  PrintNameOffset;
+       __le16  PrintNameLength;
+       __le32  Flags;
+       char    PathBuffer[0];
+} __attribute__((packed));
+
+/* For IO_REPARSE_TAG_NFS */
+#define NFS_SPECFILE_LNK       0x00000000014B4E4C
+#define NFS_SPECFILE_CHR       0x0000000000524843
+#define NFS_SPECFILE_BLK       0x00000000004B4C42
+#define NFS_SPECFILE_FIFO      0x000000004F464946
+#define NFS_SPECFILE_SOCK      0x000000004B434F53
+struct reparse_posix_data {
+       __le32  ReparseTag;
+       __le16  ReparseDataLength;
        __u16   Reserved;
-       __u16   SubstituteNameOffset;
-       __u16   SubstituteNameLength;
-       __u16   PrintNameOffset;
-       __u16   PrintNameLength;
-       __u32   Flags;
+       __le64  InodeType; /* LNK, FIFO, CHR etc. */
        char    PathBuffer[0];
 } __attribute__((packed));
 
@@ -2200,6 +2244,9 @@ typedef struct {
        __le32 DeviceCharacteristics;
 } __attribute__((packed)) FILE_SYSTEM_DEVICE_INFO; /* device info level 0x104 */
 
+/* minimum includes first three fields, and empty FS Name */
+#define MIN_FS_ATTR_INFO_SIZE 12
+
 typedef struct {
        __le32 Attributes;
        __le32 MaxPathNameComponentLength;
index b5ec2a268f560c77424744c55266480f25f3c8bb..aa3397620342d20beba92abc732845273d3663ca 100644 (file)
@@ -360,6 +360,8 @@ extern int CIFSSMBUnixQuerySymLink(const unsigned int xid,
 extern int CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
                               __u16 fid, char **symlinkinfo,
                               const struct nls_table *nls_codepage);
+extern int CIFSSMB_set_compression(const unsigned int xid,
+                                  struct cifs_tcon *tcon, __u16 fid);
 extern int CIFSSMBOpen(const unsigned int xid, struct cifs_tcon *tcon,
                        const char *fileName, const int disposition,
                        const int access_flags, const int omode,
index 4baf35949b51a16a395a698f40b66dd312a14246..93b29474714a0cdba1356c85c049e874dbc195dd 100644 (file)
@@ -3088,7 +3088,8 @@ CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
        bool is_unicode;
        unsigned int sub_len;
        char *sub_start;
-       struct reparse_data *reparse_buf;
+       struct reparse_symlink_data *reparse_buf;
+       struct reparse_posix_data *posix_buf;
        __u32 data_offset, data_count;
        char *end_of_smb;
 
@@ -3137,20 +3138,47 @@ CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
                goto qreparse_out;
        }
        end_of_smb = 2 + get_bcc(&pSMBr->hdr) + (char *)&pSMBr->ByteCount;
-       reparse_buf = (struct reparse_data *)
+       reparse_buf = (struct reparse_symlink_data *)
                                ((char *)&pSMBr->hdr.Protocol + data_offset);
        if ((char *)reparse_buf >= end_of_smb) {
                rc = -EIO;
                goto qreparse_out;
        }
-       if ((reparse_buf->PathBuffer + reparse_buf->PrintNameOffset +
-                               reparse_buf->PrintNameLength) > end_of_smb) {
+       if (reparse_buf->ReparseTag == cpu_to_le32(IO_REPARSE_TAG_NFS)) {
+               cifs_dbg(FYI, "NFS style reparse tag\n");
+               posix_buf =  (struct reparse_posix_data *)reparse_buf;
+
+               if (posix_buf->InodeType != cpu_to_le64(NFS_SPECFILE_LNK)) {
+                       cifs_dbg(FYI, "unsupported file type 0x%llx\n",
+                                le64_to_cpu(posix_buf->InodeType));
+                       rc = -EOPNOTSUPP;
+                       goto qreparse_out;
+               }
+               is_unicode = true;
+               sub_len = le16_to_cpu(reparse_buf->ReparseDataLength);
+               if (posix_buf->PathBuffer + sub_len > end_of_smb) {
+                       cifs_dbg(FYI, "reparse buf beyond SMB\n");
+                       rc = -EIO;
+                       goto qreparse_out;
+               }
+               *symlinkinfo = cifs_strndup_from_utf16(posix_buf->PathBuffer,
+                               sub_len, is_unicode, nls_codepage);
+               goto qreparse_out;
+       } else if (reparse_buf->ReparseTag !=
+                       cpu_to_le32(IO_REPARSE_TAG_SYMLINK)) {
+               rc = -EOPNOTSUPP;
+               goto qreparse_out;
+       }
+
+       /* Reparse tag is NTFS symlink */
+       sub_start = le16_to_cpu(reparse_buf->SubstituteNameOffset) +
+                               reparse_buf->PathBuffer;
+       sub_len = le16_to_cpu(reparse_buf->SubstituteNameLength);
+       if (sub_start + sub_len > end_of_smb) {
                cifs_dbg(FYI, "reparse buf beyond SMB\n");
                rc = -EIO;
                goto qreparse_out;
        }
-       sub_start = reparse_buf->SubstituteNameOffset + reparse_buf->PathBuffer;
-       sub_len = reparse_buf->SubstituteNameLength;
        if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE)
                is_unicode = true;
        else
@@ -3171,6 +3199,60 @@ qreparse_out:
        return rc;
 }
 
+int
+CIFSSMB_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+                   __u16 fid)
+{
+       int rc = 0;
+       int bytes_returned;
+       struct smb_com_transaction_compr_ioctl_req *pSMB;
+       struct smb_com_transaction_ioctl_rsp *pSMBr;
+
+       cifs_dbg(FYI, "Set compression for %u\n", fid);
+       rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **) &pSMB,
+                     (void **) &pSMBr);
+       if (rc)
+               return rc;
+
+       pSMB->compression_state = cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
+
+       pSMB->TotalParameterCount = 0;
+       pSMB->TotalDataCount = __constant_cpu_to_le32(2);
+       pSMB->MaxParameterCount = 0;
+       pSMB->MaxDataCount = 0;
+       pSMB->MaxSetupCount = 4;
+       pSMB->Reserved = 0;
+       pSMB->ParameterOffset = 0;
+       pSMB->DataCount = __constant_cpu_to_le32(2);
+       pSMB->DataOffset =
+               cpu_to_le32(offsetof(struct smb_com_transaction_compr_ioctl_req,
+                               compression_state) - 4);  /* 84 */
+       pSMB->SetupCount = 4;
+       pSMB->SubCommand = __constant_cpu_to_le16(NT_TRANSACT_IOCTL);
+       pSMB->ParameterCount = 0;
+       pSMB->FunctionCode = __constant_cpu_to_le32(FSCTL_SET_COMPRESSION);
+       pSMB->IsFsctl = 1; /* FSCTL */
+       pSMB->IsRootFlag = 0;
+       pSMB->Fid = fid; /* file handle always le */
+       /* 3 byte pad, followed by 2 byte compress state */
+       pSMB->ByteCount = __constant_cpu_to_le16(5);
+       inc_rfc1001_len(pSMB, 5);
+
+       rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+                        (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+       if (rc)
+               cifs_dbg(FYI, "Send error in SetCompression = %d\n", rc);
+
+       cifs_buf_release(pSMB);
+
+       /*
+        * Note: On -EAGAIN error only caller can retry on handle based calls
+        * since file handle passed in no longer valid.
+        */
+       return rc;
+}
+
+
 #ifdef CONFIG_CIFS_POSIX
 
 /*Convert an Access Control Entry from wire format to local POSIX xattr format*/
index 7ddddf2e25046af5fceb6fcbe097e2b14750710c..cf6aedc59c218ecde3c778b3bdd3cc8ef15e971c 100644 (file)
@@ -2737,8 +2737,8 @@ cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
                /* go while there's data to be copied and no errors */
                if (copy && !rc) {
                        pdata = kmap(page);
-                       rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
-                                               (int)copy);
+                       rc = memcpy_toiovecend(iov_iter_iovec(&ii), pdata,
+                                              ii.iov_offset, (int)copy);
                        kunmap(page);
                        if (!rc) {
                                *copied += copy;
index b3258f35e88a5126d61b463bb4d96387e5dcba42..8d4b7bc8ae914edf292b16da534d700f6d870b1e 100644 (file)
@@ -27,7 +27,7 @@ void cifs_fscache_get_client_cookie(struct TCP_Server_Info *server)
 {
        server->fscache =
                fscache_acquire_cookie(cifs_fscache_netfs.primary_index,
-                               &cifs_fscache_server_index_def, server);
+                               &cifs_fscache_server_index_def, server, true);
        cifs_dbg(FYI, "%s: (0x%p/0x%p)\n",
                 __func__, server, server->fscache);
 }
@@ -46,7 +46,7 @@ void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
 
        tcon->fscache =
                fscache_acquire_cookie(server->fscache,
-                               &cifs_fscache_super_index_def, tcon);
+                               &cifs_fscache_super_index_def, tcon, true);
        cifs_dbg(FYI, "%s: (0x%p/0x%p)\n",
                 __func__, server->fscache, tcon->fscache);
 }
@@ -69,7 +69,7 @@ static void cifs_fscache_enable_inode_cookie(struct inode *inode)
 
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) {
                cifsi->fscache = fscache_acquire_cookie(tcon->fscache,
-                               &cifs_fscache_inode_object_def, cifsi);
+                               &cifs_fscache_inode_object_def, cifsi, true);
                cifs_dbg(FYI, "%s: got FH cookie (0x%p/0x%p)\n",
                         __func__, tcon->fscache, cifsi->fscache);
        }
@@ -119,7 +119,7 @@ void cifs_fscache_reset_inode_cookie(struct inode *inode)
                cifsi->fscache = fscache_acquire_cookie(
                                        cifs_sb_master_tcon(cifs_sb)->fscache,
                                        &cifs_fscache_inode_object_def,
-                                       cifsi);
+                                       cifsi, true);
                cifs_dbg(FYI, "%s: new cookie 0x%p oldcookie 0x%p\n",
                         __func__, cifsi->fscache, old);
        }
index 3e0845585853e52c3f6be35c442b55991d71c5c2..d353f6cc55aad32b02a322f70f9cf1f539285433 100644 (file)
@@ -3,7 +3,7 @@
  *
  *   vfs operations that deal with io control
  *
- *   Copyright (C) International Business Machines  Corp., 2005,2007
+ *   Copyright (C) International Business Machines  Corp., 2005,2013
  *   Author(s): Steve French (sfrench@us.ibm.com)
  *
  *   This library is free software; you can redistribute it and/or modify
  */
 
 #include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mount.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/btrfs.h>
 #include "cifspdu.h"
 #include "cifsglob.h"
 #include "cifsproto.h"
 #include "cifs_debug.h"
 #include "cifsfs.h"
 
+static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
+                       unsigned long srcfd, u64 off, u64 len, u64 destoff)
+{
+       int rc;
+       struct cifsFileInfo *smb_file_target = dst_file->private_data;
+       struct inode *target_inode = file_inode(dst_file);
+       struct cifs_tcon *target_tcon;
+       struct fd src_file;
+       struct cifsFileInfo *smb_file_src;
+       struct inode *src_inode;
+       struct cifs_tcon *src_tcon;
+
+       cifs_dbg(FYI, "ioctl clone range\n");
+       /* the destination must be opened for writing */
+       if (!(dst_file->f_mode & FMODE_WRITE)) {
+               cifs_dbg(FYI, "file target not open for write\n");
+               return -EINVAL;
+       }
+
+       /* check if target volume is readonly and take reference */
+       rc = mnt_want_write_file(dst_file);
+       if (rc) {
+               cifs_dbg(FYI, "mnt_want_write failed with rc %d\n", rc);
+               return rc;
+       }
+
+       src_file = fdget(srcfd);
+       if (!src_file.file) {
+               rc = -EBADF;
+               goto out_drop_write;
+       }
+
+       if ((!src_file.file->private_data) || (!dst_file->private_data)) {
+               rc = -EBADF;
+               cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
+               goto out_fput;
+       }
+
+       rc = -EXDEV;
+       smb_file_target = dst_file->private_data;
+       smb_file_src = src_file.file->private_data;
+       src_tcon = tlink_tcon(smb_file_src->tlink);
+       target_tcon = tlink_tcon(smb_file_target->tlink);
+
+       /* check if source and target are on same tree connection */
+       if (src_tcon != target_tcon) {
+               cifs_dbg(VFS, "file copy src and target on different volume\n");
+               goto out_fput;
+       }
+
+       src_inode = src_file.file->f_dentry->d_inode;
+
+       /* Note: cifs case is easier than btrfs since server responsible for */
+       /* checks for proper open modes and file type and if it wants */
+       /* server could even support copy of range where source = target */
+
+       /* so we do not deadlock racing two ioctls on same files */
+       /* btrfs does a similar check */
+       if (target_inode < src_inode) {
+               mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_PARENT);
+               mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD);
+       } else {
+               mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_PARENT);
+               mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_CHILD);
+       }
+
+       /* determine range to clone */
+       rc = -EINVAL;
+       if (off + len > src_inode->i_size || off + len < off)
+               goto out_unlock;
+       if (len == 0)
+               len = src_inode->i_size - off;
+
+       cifs_dbg(FYI, "about to flush pages\n");
+       /* should we flush first and last page first */
+       truncate_inode_pages_range(&target_inode->i_data, destoff,
+                                  PAGE_CACHE_ALIGN(destoff + len)-1);
+
+       if (target_tcon->ses->server->ops->clone_range)
+               rc = target_tcon->ses->server->ops->clone_range(xid,
+                       smb_file_src, smb_file_target, off, len, destoff);
+
+       /* force revalidate of size and timestamps of target file now
+          that target is updated on the server */
+       CIFS_I(target_inode)->time = 0;
+out_unlock:
+       mutex_unlock(&src_inode->i_mutex);
+       mutex_unlock(&target_inode->i_mutex);
+out_fput:
+       fdput(src_file);
+out_drop_write:
+       mnt_drop_write_file(dst_file);
+       return rc;
+}
+
 long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
 {
        struct inode *inode = file_inode(filep);
        int rc = -ENOTTY; /* strange error - but the precedent */
        unsigned int xid;
        struct cifs_sb_info *cifs_sb;
-#ifdef CONFIG_CIFS_POSIX
        struct cifsFileInfo *pSMBFile = filep->private_data;
        struct cifs_tcon *tcon;
        __u64   ExtAttrBits = 0;
-       __u64   ExtAttrMask = 0;
        __u64   caps;
-#endif /* CONFIG_CIFS_POSIX */
 
        xid = get_xid();
 
@@ -49,13 +146,14 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
        cifs_sb = CIFS_SB(inode->i_sb);
 
        switch (command) {
-#ifdef CONFIG_CIFS_POSIX
                case FS_IOC_GETFLAGS:
                        if (pSMBFile == NULL)
                                break;
                        tcon = tlink_tcon(pSMBFile->tlink);
                        caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
+#ifdef CONFIG_CIFS_POSIX
                        if (CIFS_UNIX_EXTATTR_CAP & caps) {
+                               __u64   ExtAttrMask = 0;
                                rc = CIFSGetExtAttr(xid, tcon,
                                                    pSMBFile->fid.netfid,
                                                    &ExtAttrBits, &ExtAttrMask);
@@ -63,29 +161,53 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                                        rc = put_user(ExtAttrBits &
                                                FS_FL_USER_VISIBLE,
                                                (int __user *)arg);
+                               if (rc != EOPNOTSUPP)
+                                       break;
+                       }
+#endif /* CONFIG_CIFS_POSIX */
+                       rc = 0;
+                       if (CIFS_I(inode)->cifsAttrs & ATTR_COMPRESSED) {
+                               /* add in the compressed bit */
+                               ExtAttrBits = FS_COMPR_FL;
+                               rc = put_user(ExtAttrBits & FS_FL_USER_VISIBLE,
+                                             (int __user *)arg);
                        }
                        break;
-
                case FS_IOC_SETFLAGS:
                        if (pSMBFile == NULL)
                                break;
                        tcon = tlink_tcon(pSMBFile->tlink);
                        caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
-                       if (CIFS_UNIX_EXTATTR_CAP & caps) {
-                               if (get_user(ExtAttrBits, (int __user *)arg)) {
-                                       rc = -EFAULT;
-                                       break;
-                               }
-                               /*
-                                * rc = CIFSGetExtAttr(xid, tcon,
-                                *                     pSMBFile->fid.netfid,
-                                *                     extAttrBits,
-                                *                     &ExtAttrMask);
-                                */
+
+                       if (get_user(ExtAttrBits, (int __user *)arg)) {
+                               rc = -EFAULT;
+                               break;
+                       }
+
+                       /*
+                        * if (CIFS_UNIX_EXTATTR_CAP & caps)
+                        *      rc = CIFSSetExtAttr(xid, tcon,
+                        *                     pSMBFile->fid.netfid,
+                        *                     extAttrBits,
+                        *                     &ExtAttrMask);
+                        * if (rc != EOPNOTSUPP)
+                        *      break;
+                        */
+
+                       /* Currently only flag we can set is compressed flag */
+                       if ((ExtAttrBits & FS_COMPR_FL) == 0)
+                               break;
+
+                       /* Try to set compress flag */
+                       if (tcon->ses->server->ops->set_compression) {
+                               rc = tcon->ses->server->ops->set_compression(
+                                                       xid, tcon, pSMBFile);
+                               cifs_dbg(FYI, "set compress flag rc %d\n", rc);
                        }
-                       cifs_dbg(FYI, "set flags not implemented yet\n");
                        break;
-#endif /* CONFIG_CIFS_POSIX */
+               case BTRFS_IOC_CLONE:
+                       rc = cifs_ioctl_clone(xid, filep, arg, 0, 0, 0);
+                       break;
                default:
                        cifs_dbg(FYI, "unsupported ioctl\n");
                        break;
index 7e36ceba0c7a72d797a798de500847d4fa6ac66d..cc0234710ddbb780cae5037b717a93d8a7d3d54d 100644 (file)
@@ -621,10 +621,3 @@ symlink_exit:
        free_xid(xid);
        return rc;
 }
-
-void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
-{
-       char *p = nd_get_link(nd);
-       if (!IS_ERR(p))
-               kfree(p);
-}
index af847e1cf1c1985f5ddadd07e10dbc0e38a162a2..651a5279607b968a255a528e5411e4f4b39ca438 100644 (file)
@@ -780,7 +780,9 @@ static const struct {
        ERRDOS, ERRnoaccess, 0xc0000290}, {
        ERRDOS, ERRbadfunc, 0xc000029c}, {
        ERRDOS, ERRsymlink, NT_STATUS_STOPPED_ON_SYMLINK}, {
-       ERRDOS, ERRinvlevel, 0x007c0001}, };
+       ERRDOS, ERRinvlevel, 0x007c0001}, {
+       0, 0, 0 }
+};
 
 /*****************************************************************************
  Print an error message from the status code
index 352358de1d7e37a8126d61d76898112393f64d08..e87387dbf39fa1d24b19f245e86da4c907b0fdaf 100644 (file)
@@ -500,9 +500,9 @@ select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
                                return NTLMv2;
                        if (global_secflags & CIFSSEC_MAY_NTLM)
                                return NTLM;
-                       /* Fallthrough */
                default:
-                       return Unspecified;
+                       /* Fallthrough to attempt LANMAN authentication next */
+                       break;
                }
        case CIFS_NEGFLAVOR_LANMAN:
                switch (requested) {
index 8233b174de3d62c6e5a3223919e83db8f7c57016..ea99efe0ae3d5de4206add5153d68d9b4aede996 100644 (file)
@@ -806,6 +806,13 @@ out:
        return rc;
 }
 
+static int
+cifs_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+                  struct cifsFileInfo *cfile)
+{
+       return CIFSSMB_set_compression(xid, tcon, cfile->fid.netfid);
+}
+
 static int
 cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
                     const char *path, struct cifs_sb_info *cifs_sb,
@@ -956,6 +963,7 @@ struct smb_version_operations smb1_operations = {
        .set_path_size = CIFSSMBSetEOF,
        .set_file_size = CIFSSMBSetFileSize,
        .set_file_info = smb_set_file_info,
+       .set_compression = cifs_set_compression,
        .echo = CIFSSMBEcho,
        .mkdir = CIFSSMBMkDir,
        .mkdir_setinfo = cifs_mkdir_setinfo,
index 861b332141440c35c3a1b56ec1587b0166399006..11dde4b24f8aa1dce05354cef65fa93feb82735f 100644 (file)
@@ -209,6 +209,94 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
        return rsize;
 }
 
+#ifdef CONFIG_CIFS_STATS2
+static int
+SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
+{
+       int rc;
+       unsigned int ret_data_len = 0;
+       struct network_interface_info_ioctl_rsp *out_buf;
+
+       rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
+                       FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
+                       NULL /* no data input */, 0 /* no data input */,
+                       (char **)&out_buf, &ret_data_len);
+
+       if ((rc == 0)  && (ret_data_len > 0)) {
+               /* Dump info on first interface */
+               cifs_dbg(FYI, "Adapter Capability 0x%x\t",
+                       le32_to_cpu(out_buf->Capability));
+               cifs_dbg(FYI, "Link Speed %lld\n",
+                       le64_to_cpu(out_buf->LinkSpeed));
+       } else
+               cifs_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
+
+       return rc;
+}
+#endif /* STATS2 */
+
+static void
+smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
+{
+       int rc;
+       __le16 srch_path = 0; /* Null - open root of share */
+       u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+       struct cifs_open_parms oparms;
+       struct cifs_fid fid;
+
+       oparms.tcon = tcon;
+       oparms.desired_access = FILE_READ_ATTRIBUTES;
+       oparms.disposition = FILE_OPEN;
+       oparms.create_options = 0;
+       oparms.fid = &fid;
+       oparms.reconnect = false;
+
+       rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL);
+       if (rc)
+               return;
+
+#ifdef CONFIG_CIFS_STATS2
+       SMB3_request_interfaces(xid, tcon);
+#endif /* STATS2 */
+
+       SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+                       FS_ATTRIBUTE_INFORMATION);
+       SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+                       FS_DEVICE_INFORMATION);
+       SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+                       FS_SECTOR_SIZE_INFORMATION); /* SMB3 specific */
+       SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+       return;
+}
+
+static void
+smb2_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
+{
+       int rc;
+       __le16 srch_path = 0; /* Null - open root of share */
+       u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+       struct cifs_open_parms oparms;
+       struct cifs_fid fid;
+
+       oparms.tcon = tcon;
+       oparms.desired_access = FILE_READ_ATTRIBUTES;
+       oparms.disposition = FILE_OPEN;
+       oparms.create_options = 0;
+       oparms.fid = &fid;
+       oparms.reconnect = false;
+
+       rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL);
+       if (rc)
+               return;
+
+       SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+                       FS_ATTRIBUTE_INFORMATION);
+       SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
+                       FS_DEVICE_INFORMATION);
+       SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
+       return;
+}
+
 static int
 smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
                        struct cifs_sb_info *cifs_sb, const char *full_path)
@@ -304,7 +392,19 @@ smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
                seq_puts(m, " ASYMMETRIC,");
        if (tcon->capabilities == 0)
                seq_puts(m, " None");
+       if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
+               seq_puts(m, " Aligned,");
+       if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
+               seq_puts(m, " Partition Aligned,");
+       if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
+               seq_puts(m, " SSD,");
+       if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
+               seq_puts(m, " TRIM-support,");
+
        seq_printf(m, "\tShare Flags: 0x%x", tcon->share_flags);
+       if (tcon->perf_sector_size)
+               seq_printf(m, "\tOptimal sector size: 0x%x",
+                          tcon->perf_sector_size);
 }
 
 static void
@@ -393,6 +493,85 @@ smb2_close_file(const unsigned int xid, struct cifs_tcon *tcon,
        SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
 }
 
+static int
+SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
+                    u64 persistent_fid, u64 volatile_fid,
+                    struct copychunk_ioctl *pcchunk)
+{
+       int rc;
+       unsigned int ret_data_len;
+       struct resume_key_req *res_key;
+
+       rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
+                       FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
+                       NULL, 0 /* no input */,
+                       (char **)&res_key, &ret_data_len);
+
+       if (rc) {
+               cifs_dbg(VFS, "refcpy ioctl error %d getting resume key\n", rc);
+               goto req_res_key_exit;
+       }
+       if (ret_data_len < sizeof(struct resume_key_req)) {
+               cifs_dbg(VFS, "Invalid refcopy resume key length\n");
+               rc = -EINVAL;
+               goto req_res_key_exit;
+       }
+       memcpy(pcchunk->SourceKey, res_key->ResumeKey, COPY_CHUNK_RES_KEY_SIZE);
+
+req_res_key_exit:
+       kfree(res_key);
+       return rc;
+}
+
+static int
+smb2_clone_range(const unsigned int xid,
+                       struct cifsFileInfo *srcfile,
+                       struct cifsFileInfo *trgtfile, u64 src_off,
+                       u64 len, u64 dest_off)
+{
+       int rc;
+       unsigned int ret_data_len;
+       struct copychunk_ioctl *pcchunk;
+       char *retbuf = NULL;
+
+       pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
+
+       if (pcchunk == NULL)
+               return -ENOMEM;
+
+       cifs_dbg(FYI, "in smb2_clone_range - about to call request res key\n");
+       /* Request a key from the server to identify the source of the copy */
+       rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
+                               srcfile->fid.persistent_fid,
+                               srcfile->fid.volatile_fid, pcchunk);
+
+       /* Note: request_res_key sets res_key null only if rc !=0 */
+       if (rc)
+               return rc;
+
+       /* For now array only one chunk long, will make more flexible later */
+       pcchunk->ChunkCount = __constant_cpu_to_le32(1);
+       pcchunk->Reserved = 0;
+       pcchunk->SourceOffset = cpu_to_le64(src_off);
+       pcchunk->TargetOffset = cpu_to_le64(dest_off);
+       pcchunk->Length = cpu_to_le32(len);
+       pcchunk->Reserved2 = 0;
+
+       /* Request that server copy to target from src file identified by key */
+       rc = SMB2_ioctl(xid, tlink_tcon(trgtfile->tlink),
+                       trgtfile->fid.persistent_fid,
+                       trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
+                       true /* is_fsctl */, (char *)pcchunk,
+                       sizeof(struct copychunk_ioctl), &retbuf, &ret_data_len);
+
+       /* BB need to special case rc = EINVAL to alter chunk size */
+
+       cifs_dbg(FYI, "rc %d data length out %d\n", rc, ret_data_len);
+
+       kfree(pcchunk);
+       return rc;
+}
+
 static int
 smb2_flush_file(const unsigned int xid, struct cifs_tcon *tcon,
                struct cifs_fid *fid)
@@ -445,6 +624,14 @@ smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
                            cfile->fid.volatile_fid, cfile->pid, &eof);
 }
 
+static int
+smb2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+                  struct cifsFileInfo *cfile)
+{
+       return SMB2_set_compression(xid, tcon, cfile->fid.persistent_fid,
+                           cfile->fid.volatile_fid);
+}
+
 static int
 smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
                     const char *path, struct cifs_sb_info *cifs_sb,
@@ -865,6 +1052,7 @@ struct smb_version_operations smb20_operations = {
        .logoff = SMB2_logoff,
        .tree_connect = SMB2_tcon,
        .tree_disconnect = SMB2_tdis,
+       .qfs_tcon = smb2_qfs_tcon,
        .is_path_accessible = smb2_is_path_accessible,
        .can_echo = smb2_can_echo,
        .echo = SMB2_echo,
@@ -874,6 +1062,7 @@ struct smb_version_operations smb20_operations = {
        .set_path_size = smb2_set_path_size,
        .set_file_size = smb2_set_file_size,
        .set_file_info = smb2_set_file_info,
+       .set_compression = smb2_set_compression,
        .mkdir = smb2_mkdir,
        .mkdir_setinfo = smb2_mkdir_setinfo,
        .rmdir = smb2_rmdir,
@@ -907,6 +1096,7 @@ struct smb_version_operations smb20_operations = {
        .set_oplock_level = smb2_set_oplock_level,
        .create_lease_buf = smb2_create_lease_buf,
        .parse_lease_buf = smb2_parse_lease_buf,
+       .clone_range = smb2_clone_range,
 };
 
 struct smb_version_operations smb21_operations = {
@@ -936,6 +1126,7 @@ struct smb_version_operations smb21_operations = {
        .logoff = SMB2_logoff,
        .tree_connect = SMB2_tcon,
        .tree_disconnect = SMB2_tdis,
+       .qfs_tcon = smb2_qfs_tcon,
        .is_path_accessible = smb2_is_path_accessible,
        .can_echo = smb2_can_echo,
        .echo = SMB2_echo,
@@ -945,6 +1136,7 @@ struct smb_version_operations smb21_operations = {
        .set_path_size = smb2_set_path_size,
        .set_file_size = smb2_set_file_size,
        .set_file_info = smb2_set_file_info,
+       .set_compression = smb2_set_compression,
        .mkdir = smb2_mkdir,
        .mkdir_setinfo = smb2_mkdir_setinfo,
        .rmdir = smb2_rmdir,
@@ -978,6 +1170,7 @@ struct smb_version_operations smb21_operations = {
        .set_oplock_level = smb21_set_oplock_level,
        .create_lease_buf = smb2_create_lease_buf,
        .parse_lease_buf = smb2_parse_lease_buf,
+       .clone_range = smb2_clone_range,
 };
 
 struct smb_version_operations smb30_operations = {
@@ -1008,6 +1201,7 @@ struct smb_version_operations smb30_operations = {
        .logoff = SMB2_logoff,
        .tree_connect = SMB2_tcon,
        .tree_disconnect = SMB2_tdis,
+       .qfs_tcon = smb3_qfs_tcon,
        .is_path_accessible = smb2_is_path_accessible,
        .can_echo = smb2_can_echo,
        .echo = SMB2_echo,
@@ -1017,6 +1211,7 @@ struct smb_version_operations smb30_operations = {
        .set_path_size = smb2_set_path_size,
        .set_file_size = smb2_set_file_size,
        .set_file_info = smb2_set_file_info,
+       .set_compression = smb2_set_compression,
        .mkdir = smb2_mkdir,
        .mkdir_setinfo = smb2_mkdir_setinfo,
        .rmdir = smb2_rmdir,
@@ -1051,6 +1246,7 @@ struct smb_version_operations smb30_operations = {
        .set_oplock_level = smb3_set_oplock_level,
        .create_lease_buf = smb3_create_lease_buf,
        .parse_lease_buf = smb3_parse_lease_buf,
+       .clone_range = smb2_clone_range,
 };
 
 struct smb_version_values smb20_values = {
index eba0efde66d70ae15974eef74b10ff6d83ca44d8..8ab05b0d6778f99343d5740ff037ccbfad7bbef8 100644 (file)
@@ -687,6 +687,10 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
        else
                return -EIO;
 
+       /* no need to send SMB logoff if uid already closed due to reconnect */
+       if (ses->need_reconnect)
+               goto smb2_session_already_dead;
+
        rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req);
        if (rc)
                return rc;
@@ -701,6 +705,8 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
         * No tcon so can't do
         * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
         */
+
+smb2_session_already_dead:
        return rc;
 }
 
@@ -1131,6 +1137,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
 
        cifs_dbg(FYI, "SMB2 IOCTL\n");
 
+       *out_data = NULL;
        /* zero out returned data len, in case of error */
        if (plen)
                *plen = 0;
@@ -1176,11 +1183,23 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
                req->Flags = 0;
 
        iov[0].iov_base = (char *)req;
-       /* 4 for rfc1002 length field */
-       iov[0].iov_len = get_rfc1002_length(req) + 4;
 
-       if (indatalen)
-               inc_rfc1001_len(req, indatalen);
+       /*
+        * If no input data, the size of ioctl struct in
+        * protocol spec still includes a 1 byte data buffer,
+        * but if input data passed to ioctl, we do not
+        * want to double count this, so we do not send
+        * the dummy one byte of data in iovec[0] if sending
+        * input data (in iovec[1]). We also must add 4 bytes
+        * in first iovec to allow for rfc1002 length field.
+        */
+
+       if (indatalen) {
+               iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
+               inc_rfc1001_len(req, indatalen - 1);
+       } else
+               iov[0].iov_len = get_rfc1002_length(req) + 4;
+
 
        rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
        rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
@@ -1228,6 +1247,33 @@ ioctl_exit:
        return rc;
 }
 
+/*
+ *   Individual callers to ioctl worker function follow
+ */
+
+int
+SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+                    u64 persistent_fid, u64 volatile_fid)
+{
+       int rc;
+       char *res_key = NULL;
+       struct  compress_ioctl fsctl_input;
+       char *ret_data = NULL;
+
+       fsctl_input.CompressionState =
+                       __constant_cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
+
+       rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
+                       FSCTL_SET_COMPRESSION, true /* is_fsctl */,
+                       (char *)&fsctl_input /* data input */,
+                       2 /* in data len */, &ret_data /* out data */, NULL);
+
+       cifs_dbg(FYI, "set compression rc %d\n", rc);
+       kfree(res_key);
+
+       return rc;
+}
+
 int
 SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
           u64 persistent_fid, u64 volatile_fid)
@@ -2293,7 +2339,7 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
        rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
        if (rc) {
                cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
-               goto qinf_exit;
+               goto qfsinf_exit;
        }
        rsp = (struct smb2_query_info_rsp *)iov.iov_base;
 
@@ -2305,7 +2351,70 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
        if (!rc)
                copy_fs_info_to_kstatfs(info, fsdata);
 
-qinf_exit:
+qfsinf_exit:
+       free_rsp_buf(resp_buftype, iov.iov_base);
+       return rc;
+}
+
+int
+SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
+             u64 persistent_fid, u64 volatile_fid, int level)
+{
+       struct smb2_query_info_rsp *rsp = NULL;
+       struct kvec iov;
+       int rc = 0;
+       int resp_buftype, max_len, min_len;
+       struct cifs_ses *ses = tcon->ses;
+       unsigned int rsp_len, offset;
+
+       if (level == FS_DEVICE_INFORMATION) {
+               max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
+               min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
+       } else if (level == FS_ATTRIBUTE_INFORMATION) {
+               max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
+               min_len = MIN_FS_ATTR_INFO_SIZE;
+       } else if (level == FS_SECTOR_SIZE_INFORMATION) {
+               max_len = sizeof(struct smb3_fs_ss_info);
+               min_len = sizeof(struct smb3_fs_ss_info);
+       } else {
+               cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
+               return -EINVAL;
+       }
+
+       rc = build_qfs_info_req(&iov, tcon, level, max_len,
+                               persistent_fid, volatile_fid);
+       if (rc)
+               return rc;
+
+       rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
+       if (rc) {
+               cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
+               goto qfsattr_exit;
+       }
+       rsp = (struct smb2_query_info_rsp *)iov.iov_base;
+
+       rsp_len = le32_to_cpu(rsp->OutputBufferLength);
+       offset = le16_to_cpu(rsp->OutputBufferOffset);
+       rc = validate_buf(offset, rsp_len, &rsp->hdr, min_len);
+       if (rc)
+               goto qfsattr_exit;
+
+       if (level == FS_ATTRIBUTE_INFORMATION)
+               memcpy(&tcon->fsAttrInfo, 4 /* RFC1001 len */ + offset
+                       + (char *)&rsp->hdr, min_t(unsigned int,
+                       rsp_len, max_len));
+       else if (level == FS_DEVICE_INFORMATION)
+               memcpy(&tcon->fsDevInfo, 4 /* RFC1001 len */ + offset
+                       + (char *)&rsp->hdr, sizeof(FILE_SYSTEM_DEVICE_INFO));
+       else if (level == FS_SECTOR_SIZE_INFORMATION) {
+               struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
+                       (4 /* RFC1001 len */ + offset + (char *)&rsp->hdr);
+               tcon->ss_flags = le32_to_cpu(ss_info->Flags);
+               tcon->perf_sector_size =
+                       le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
+       }
+
+qfsattr_exit:
        free_rsp_buf(resp_buftype, iov.iov_base);
        return rc;
 }
index b83d0118a7577256ee4084d8a3abce4666e90bb7..b50a129572cd58149eacb2c2d93fa125f34ef9ff 100644 (file)
@@ -534,9 +534,16 @@ struct create_durable {
        } Data;
 } __packed;
 
+#define COPY_CHUNK_RES_KEY_SIZE        24
+struct resume_key_req {
+       char ResumeKey[COPY_CHUNK_RES_KEY_SIZE];
+       __le32  ContextLength;  /* MBZ */
+       char    Context[0];     /* ignored, Windows sets to 4 bytes of zero */
+} __packed;
+
 /* this goes in the ioctl buffer when doing a copychunk request */
 struct copychunk_ioctl {
-       char SourceKey[24];
+       char SourceKey[COPY_CHUNK_RES_KEY_SIZE];
        __le32 ChunkCount; /* we are only sending 1 */
        __le32 Reserved;
        /* array will only be one chunk long for us */
@@ -546,6 +553,12 @@ struct copychunk_ioctl {
        __u32 Reserved2;
 } __packed;
 
+struct copychunk_ioctl_rsp {
+       __le32 ChunksWritten;
+       __le32 ChunkBytesWritten;
+       __le32 TotalBytesWritten;
+} __packed;
+
 /* Response and Request are the same format */
 struct validate_negotiate_info {
        __le32 Capabilities;
@@ -569,6 +582,10 @@ struct network_interface_info_ioctl_rsp {
 
 #define NO_FILE_ID 0xFFFFFFFFFFFFFFFFULL /* general ioctls to srv not to file */
 
+struct compress_ioctl {
+       __le16 CompressionState; /* See cifspdu.h for possible flag values */
+} __packed;
+
 struct smb2_ioctl_req {
        struct smb2_hdr hdr;
        __le16 StructureSize;   /* Must be 57 */
@@ -584,7 +601,7 @@ struct smb2_ioctl_req {
        __le32 MaxOutputResponse;
        __le32 Flags;
        __u32  Reserved2;
-       char   Buffer[0];
+       __u8   Buffer[0];
 } __packed;
 
 struct smb2_ioctl_rsp {
@@ -870,14 +887,16 @@ struct smb2_lease_ack {
 
 /* File System Information Classes */
 #define FS_VOLUME_INFORMATION          1 /* Query */
-#define FS_LABEL_INFORMATION           2 /* Set */
+#define FS_LABEL_INFORMATION           2 /* Local only */
 #define FS_SIZE_INFORMATION            3 /* Query */
 #define FS_DEVICE_INFORMATION          4 /* Query */
 #define FS_ATTRIBUTE_INFORMATION       5 /* Query */
 #define FS_CONTROL_INFORMATION         6 /* Query, Set */
 #define FS_FULL_SIZE_INFORMATION       7 /* Query */
 #define FS_OBJECT_ID_INFORMATION       8 /* Query, Set */
-#define FS_DRIVER_PATH_INFORMATION     9 /* Query */
+#define FS_DRIVER_PATH_INFORMATION     9 /* Local only */
+#define FS_VOLUME_FLAGS_INFORMATION    10 /* Local only */
+#define FS_SECTOR_SIZE_INFORMATION     11 /* SMB3 or later. Query */
 
 struct smb2_fs_full_size_info {
        __le64 TotalAllocationUnits;
@@ -887,6 +906,22 @@ struct smb2_fs_full_size_info {
        __le32 BytesPerSector;
 } __packed;
 
+#define SSINFO_FLAGS_ALIGNED_DEVICE            0x00000001
+#define SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE 0x00000002
+#define SSINFO_FLAGS_NO_SEEK_PENALTY           0x00000004
+#define SSINFO_FLAGS_TRIM_ENABLED              0x00000008
+
+/* sector size info struct */
+struct smb3_fs_ss_info {
+       __le32 LogicalBytesPerSector;
+       __le32 PhysicalBytesPerSectorForAtomicity;
+       __le32 PhysicalBytesPerSectorForPerf;
+       __le32 FileSystemEffectivePhysicalBytesPerSectorForAtomicity;
+       __le32 Flags;
+       __le32 ByteOffsetForSectorAlignment;
+       __le32 ByteOffsetForPartitionAlignment;
+} __packed;
+
 /* partial list of QUERY INFO levels */
 #define FILE_DIRECTORY_INFORMATION     1
 #define FILE_FULL_DIRECTORY_INFORMATION 2
index e3fb4801ee969295484fd8324bec855fc9600e0b..313813e4c19b300357bfc5f6db225a3be85d1121 100644 (file)
@@ -142,12 +142,16 @@ extern int SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon,
 extern int SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
                         u64 persistent_fid, u64 volatile_fid,
                         FILE_BASIC_INFO *buf);
+extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
+                               u64 persistent_fid, u64 volatile_fid);
 extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
                             const u64 persistent_fid, const u64 volatile_fid,
                             const __u8 oplock_level);
 extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
                         u64 persistent_file_id, u64 volatile_file_id,
                         struct kstatfs *FSData);
+extern int SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
+                        u64 persistent_file_id, u64 volatile_file_id, int lvl);
 extern int SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
                     const __u64 persist_fid, const __u64 volatile_fid,
                     const __u32 pid, const __u64 length, const __u64 offset,
index d952ee48f4dcc629a5d1e91c3470cbd2bd6d1aec..a4b2391fe66e4e11cea93e7396b987c335d43823 100644 (file)
 #define FSCTL_QUERY_NETWORK_INTERFACE_INFO 0x001401FC /* BB add struct */
 #define FSCTL_SRV_READ_HASH          0x001441BB /* BB add struct */
 
+/* See FSCC 2.1.2.5 */
 #define IO_REPARSE_TAG_MOUNT_POINT   0xA0000003
 #define IO_REPARSE_TAG_HSM           0xC0000004
 #define IO_REPARSE_TAG_SIS           0x80000007
+#define IO_REPARSE_TAG_HSM2          0x80000006
+#define IO_REPARSE_TAG_DRIVER_EXTENDER 0x80000005
+/* Used by the DFS filter. See MS-DFSC */
+#define IO_REPARSE_TAG_DFS           0x8000000A
+/* Used by the DFS filter See MS-DFSC */
+#define IO_REPARSE_TAG_DFSR          0x80000012
+#define IO_REPARSE_TAG_FILTER_MANAGER 0x8000000B
+/* See section MS-FSCC 2.1.2.4 */
+#define IO_REPARSE_TAG_SYMLINK       0xA000000C
+#define IO_REPARSE_TAG_DEDUP         0x80000013
+#define IO_REPARSE_APPXSTREAM       0xC0000014
+/* NFS symlinks, Win 8/SMB3 and later */
+#define IO_REPARSE_TAG_NFS           0x80000014
 
 /* fsctl flags */
 /* If Flags is set to this value, the request is an FSCTL not ioctl request */
index 6fdcb1b4a106747779ae77ed365116256e79edec..800b938e4061768f5f55ee414b45afa56e6d6aa2 100644 (file)
@@ -410,8 +410,13 @@ static int
 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
                      const int optype)
 {
-       return wait_for_free_credits(server, timeout,
-                               server->ops->get_credits_field(server, optype));
+       int *val;
+
+       val = server->ops->get_credits_field(server, optype);
+       /* Since an echo is already inflight, no need to wait to send another */
+       if (*val <= 0 && optype == CIFS_ECHO_OP)
+               return -EAGAIN;
+       return wait_for_free_credits(server, timeout, val);
 }
 
 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
index 41000305d716ea51c47ed52ddb5abe024045e958..d70df2e0e0da8a6b41d9bfeba798fb4cc9481a80 100644 (file)
@@ -1331,14 +1331,6 @@ rename_retry:
  * list is non-empty and continue searching.
  */
 
-/**
- * have_submounts - check for mounts over a dentry
- * @parent: dentry to check.
- *
- * Return true if the parent or its subdirectories contain
- * a mount point
- */
-
 static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
 {
        int *ret = data;
@@ -1349,6 +1341,13 @@ static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
        return D_WALK_CONTINUE;
 }
 
+/**
+ * have_submounts - check for mounts over a dentry
+ * @parent: dentry to check.
+ *
+ * Return true if the parent or its subdirectories contain
+ * a mount point
+ */
 int have_submounts(struct dentry *parent)
 {
        int ret = 0;
@@ -1801,6 +1800,32 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
 
 EXPORT_SYMBOL(d_instantiate_unique);
 
+/**
+ * d_instantiate_no_diralias - instantiate a non-aliased dentry
+ * @entry: dentry to complete
+ * @inode: inode to attach to this dentry
+ *
+ * Fill in inode information in the entry.  If a directory alias is found, then
+ * return an error.  Together with d_materialise_unique() this guarantees that a
+ * directory inode may never have more than one alias.
+ */
+int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
+{
+       BUG_ON(!hlist_unhashed(&entry->d_alias));
+
+       spin_lock(&inode->i_lock);
+       if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
+               spin_unlock(&inode->i_lock);
+               return -EBUSY;
+       }
+       __d_instantiate(entry, inode);
+       spin_unlock(&inode->i_lock);
+       security_d_instantiate(entry, inode);
+
+       return 0;
+}
+EXPORT_SYMBOL(d_instantiate_no_diralias);
+
 struct dentry *d_make_root(struct inode *root_inode)
 {
        struct dentry *res = NULL;
index 0e04142d5962312fcb055738479247b2364a252e..a142314710a3a9d020e3d3d7a4cbbf2c168af72a 100644 (file)
@@ -127,6 +127,7 @@ struct dio {
        spinlock_t bio_lock;            /* protects BIO fields below */
        int page_errors;                /* errno from get_user_pages() */
        int is_async;                   /* is IO async ? */
+       int should_dirty;               /* should we mark read pages dirty? */
        bool defer_completion;          /* defer AIO completion to workqueue? */
        int io_error;                   /* IO error in completion path */
        unsigned long refcount;         /* direct_io_worker() and bios */
@@ -403,7 +404,7 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
        dio->refcount++;
        spin_unlock_irqrestore(&dio->bio_lock, flags);
 
-       if (dio->is_async && dio->rw == READ)
+       if (dio->is_async && dio->rw == READ && dio->should_dirty)
                bio_set_pages_dirty(bio);
 
        if (sdio->submit_io)
@@ -474,13 +475,14 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
        if (!uptodate)
                dio->io_error = -EIO;
 
-       if (dio->is_async && dio->rw == READ) {
+       if (dio->is_async && dio->rw == READ && dio->should_dirty) {
                bio_check_pages_dirty(bio);     /* transfers ownership */
        } else {
                bio_for_each_segment_all(bvec, bio, i) {
                        struct page *page = bvec->bv_page;
 
-                       if (dio->rw == READ && !PageCompound(page))
+                       if (dio->rw == READ && !PageCompound(page) &&
+                           dio->should_dirty)
                                set_page_dirty_lock(page);
                        page_cache_release(page);
                }
@@ -1081,6 +1083,101 @@ static inline int drop_refcount(struct dio *dio)
        return ret2;
 }
 
+static ssize_t direct_IO_iovec(const struct iovec *iov, unsigned long nr_segs,
+                              struct dio *dio, struct dio_submit *sdio,
+                              unsigned blkbits, struct buffer_head *map_bh)
+{
+       size_t bytes;
+       ssize_t retval = 0;
+       int seg;
+       unsigned long user_addr;
+
+       for (seg = 0; seg < nr_segs; seg++) {
+               user_addr = (unsigned long)iov[seg].iov_base;
+               sdio->pages_in_io +=
+                       ((user_addr + iov[seg].iov_len + PAGE_SIZE-1) /
+                               PAGE_SIZE - user_addr / PAGE_SIZE);
+       }
+
+       dio->should_dirty = 1;
+
+       for (seg = 0; seg < nr_segs; seg++) {
+               user_addr = (unsigned long)iov[seg].iov_base;
+               sdio->size += bytes = iov[seg].iov_len;
+
+               /* Index into the first page of the first block */
+               sdio->first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits;
+               sdio->final_block_in_request = sdio->block_in_file +
+                                               (bytes >> blkbits);
+               /* Page fetching state */
+               sdio->head = 0;
+               sdio->tail = 0;
+               sdio->curr_page = 0;
+
+               sdio->total_pages = 0;
+               if (user_addr & (PAGE_SIZE-1)) {
+                       sdio->total_pages++;
+                       bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1));
+               }
+               sdio->total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
+               sdio->curr_user_address = user_addr;
+
+               retval = do_direct_IO(dio, sdio, map_bh);
+
+               dio->result += iov[seg].iov_len -
+                       ((sdio->final_block_in_request - sdio->block_in_file) <<
+                                       blkbits);
+
+               if (retval) {
+                       dio_cleanup(dio, sdio);
+                       break;
+               }
+       } /* end iovec loop */
+
+       return retval;
+}
+
+static ssize_t direct_IO_bvec(struct bio_vec *bvec, unsigned long nr_segs,
+                             struct dio *dio, struct dio_submit *sdio,
+                             unsigned blkbits, struct buffer_head *map_bh)
+{
+       ssize_t retval = 0;
+       int seg;
+
+       sdio->pages_in_io += nr_segs;
+
+       for (seg = 0; seg < nr_segs; seg++) {
+               sdio->size += bvec[seg].bv_len;
+
+               /* Index into the first page of the first block */
+               sdio->first_block_in_page = bvec[seg].bv_offset >> blkbits;
+               sdio->final_block_in_request = sdio->block_in_file +
+                                               (bvec[seg].bv_len  >> blkbits);
+               /* Page fetching state */
+               sdio->curr_page = 0;
+               page_cache_get(bvec[seg].bv_page);
+               dio->pages[0] = bvec[seg].bv_page;
+               sdio->head = 0;
+               sdio->tail = 1;
+
+               sdio->total_pages = 1;
+               sdio->curr_user_address = 0;
+
+               retval = do_direct_IO(dio, sdio, map_bh);
+
+               dio->result += bvec[seg].bv_len -
+                       ((sdio->final_block_in_request - sdio->block_in_file) <<
+                                       blkbits);
+
+               if (retval) {
+                       dio_cleanup(dio, sdio);
+                       break;
+               }
+       }
+
+       return retval;
+}
+
 /*
  * This is a library function for use by filesystem drivers.
  *
@@ -1108,9 +1205,9 @@ static inline int drop_refcount(struct dio *dio)
  */
 static inline ssize_t
 do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
-       struct block_device *bdev, const struct iovec *iov, loff_t offset, 
-       unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
-       dio_submit_t submit_io, int flags)
+       struct block_device *bdev, struct iov_iter *iter, loff_t offset,
+       get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io,
+       int flags)
 {
        int seg;
        size_t size;
@@ -1122,10 +1219,9 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        loff_t end = offset;
        struct dio *dio;
        struct dio_submit sdio = { 0, };
-       unsigned long user_addr;
-       size_t bytes;
        struct buffer_head map_bh = { 0, };
        struct blk_plug plug;
+       unsigned long nr_segs = iter->nr_segs;
 
        if (rw & WRITE)
                rw = WRITE_ODIRECT;
@@ -1144,20 +1240,49 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        }
 
        /* Check the memory alignment.  Blocks cannot straddle pages */
-       for (seg = 0; seg < nr_segs; seg++) {
-               addr = (unsigned long)iov[seg].iov_base;
-               size = iov[seg].iov_len;
-               end += size;
-               if (unlikely((addr & blocksize_mask) ||
-                            (size & blocksize_mask))) {
-                       if (bdev)
-                               blkbits = blksize_bits(
-                                        bdev_logical_block_size(bdev));
-                       blocksize_mask = (1 << blkbits) - 1;
-                       if ((addr & blocksize_mask) || (size & blocksize_mask))
-                               goto out;
+       if (iov_iter_has_iovec(iter)) {
+               const struct iovec *iov = iov_iter_iovec(iter);
+
+               for (seg = 0; seg < nr_segs; seg++) {
+                       addr = (unsigned long)iov[seg].iov_base;
+                       size = iov[seg].iov_len;
+                       end += size;
+                       if (unlikely((addr & blocksize_mask) ||
+                                    (size & blocksize_mask))) {
+                               if (bdev)
+                                       blkbits = blksize_bits(
+                                                bdev_logical_block_size(bdev));
+                               blocksize_mask = (1 << blkbits) - 1;
+                               if ((addr & blocksize_mask) ||
+                                   (size & blocksize_mask))
+                                       goto out;
+                       }
                }
-       }
+       } else if (iov_iter_has_bvec(iter)) {
+               /*
+                * Is this necessary, or can we trust the in-kernel
+                * caller? Can we replace this with
+                *      end += iov_iter_count(iter); ?
+                */
+               struct bio_vec *bvec = iov_iter_bvec(iter);
+
+               for (seg = 0; seg < nr_segs; seg++) {
+                       addr = bvec[seg].bv_offset;
+                       size = bvec[seg].bv_len;
+                       end += size;
+                       if (unlikely((addr & blocksize_mask) ||
+                                    (size & blocksize_mask))) {
+                               if (bdev)
+                                       blkbits = blksize_bits(
+                                                bdev_logical_block_size(bdev));
+                               blocksize_mask = (1 << blkbits) - 1;
+                               if ((addr & blocksize_mask) ||
+                                   (size & blocksize_mask))
+                                       goto out;
+                       }
+               }
+       } else
+               BUG();
 
        /* watch out for a 0 len io from a tricksy fs */
        if (rw == READ && end == offset)
@@ -1251,47 +1376,14 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        if (unlikely(sdio.blkfactor))
                sdio.pages_in_io = 2;
 
-       for (seg = 0; seg < nr_segs; seg++) {
-               user_addr = (unsigned long)iov[seg].iov_base;
-               sdio.pages_in_io +=
-                       ((user_addr + iov[seg].iov_len + PAGE_SIZE-1) /
-                               PAGE_SIZE - user_addr / PAGE_SIZE);
-       }
-
        blk_start_plug(&plug);
 
-       for (seg = 0; seg < nr_segs; seg++) {
-               user_addr = (unsigned long)iov[seg].iov_base;
-               sdio.size += bytes = iov[seg].iov_len;
-
-               /* Index into the first page of the first block */
-               sdio.first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits;
-               sdio.final_block_in_request = sdio.block_in_file +
-                                               (bytes >> blkbits);
-               /* Page fetching state */
-               sdio.head = 0;
-               sdio.tail = 0;
-               sdio.curr_page = 0;
-
-               sdio.total_pages = 0;
-               if (user_addr & (PAGE_SIZE-1)) {
-                       sdio.total_pages++;
-                       bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1));
-               }
-               sdio.total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
-               sdio.curr_user_address = user_addr;
-
-               retval = do_direct_IO(dio, &sdio, &map_bh);
-
-               dio->result += iov[seg].iov_len -
-                       ((sdio.final_block_in_request - sdio.block_in_file) <<
-                                       blkbits);
-
-               if (retval) {
-                       dio_cleanup(dio, &sdio);
-                       break;
-               }
-       } /* end iovec loop */
+       if (iov_iter_has_iovec(iter))
+               retval = direct_IO_iovec(iov_iter_iovec(iter), nr_segs, dio,
+                                        &sdio, blkbits, &map_bh);
+       else
+               retval = direct_IO_bvec(iov_iter_bvec(iter), nr_segs, dio,
+                                       &sdio, blkbits, &map_bh);
 
        if (retval == -ENOTBLK) {
                /*
@@ -1360,9 +1452,9 @@ out:
 
 ssize_t
 __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
-       struct block_device *bdev, const struct iovec *iov, loff_t offset,
-       unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
-       dio_submit_t submit_io, int flags)
+       struct block_device *bdev, struct iov_iter *iter, loff_t offset,
+       get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io,
+       int flags)
 {
        /*
         * The block device state is needed in the end to finally
@@ -1376,9 +1468,8 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        prefetch(bdev->bd_queue);
        prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES);
 
-       return do_blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
-                                    nr_segs, get_block, end_io,
-                                    submit_io, flags);
+       return do_blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset,
+                                    get_block, end_io, submit_io, flags);
 }
 
 EXPORT_SYMBOL(__blockdev_direct_IO);
index 88556dc0458ee045659ed5a91875fb51d7ac2264..d5abafd56a6d69e53242f61fd487b6c04798abb5 100644 (file)
@@ -706,9 +706,7 @@ static int lkb_idr_is_local(int id, void *p, void *data)
 {
        struct dlm_lkb *lkb = p;
 
-       if (!lkb->lkb_nodeid)
-               return 1;
-       return 0;
+       return lkb->lkb_nodeid == 0 && lkb->lkb_grmode != DLM_LOCK_IV;
 }
 
 static int lkb_idr_is_any(int id, void *p, void *data)
index bf12ba5dd223befe93ce786c3a3848cdf5e38fcd..4000f6b3a7504505bb46d3d5ef2946b53e3d5cc9 100644 (file)
  */
 static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
 {
-       struct dentry *lower_dentry;
-       int rc = 1;
+       struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+       int rc;
+
+       if (!(lower_dentry->d_flags & DCACHE_OP_REVALIDATE))
+               return 1;
 
        if (flags & LOOKUP_RCU)
                return -ECHILD;
 
-       lower_dentry = ecryptfs_dentry_to_lower(dentry);
-       if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate)
-               goto out;
        rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
        if (dentry->d_inode) {
                struct inode *lower_inode =
@@ -60,12 +60,17 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
 
                fsstack_copy_attr_all(dentry->d_inode, lower_inode);
        }
-out:
        return rc;
 }
 
 struct kmem_cache *ecryptfs_dentry_info_cache;
 
+static void ecryptfs_dentry_free_rcu(struct rcu_head *head)
+{
+       kmem_cache_free(ecryptfs_dentry_info_cache,
+               container_of(head, struct ecryptfs_dentry_info, rcu));
+}
+
 /**
  * ecryptfs_d_release
  * @dentry: The ecryptfs dentry
@@ -74,15 +79,11 @@ struct kmem_cache *ecryptfs_dentry_info_cache;
  */
 static void ecryptfs_d_release(struct dentry *dentry)
 {
-       if (ecryptfs_dentry_to_private(dentry)) {
-               if (ecryptfs_dentry_to_lower(dentry)) {
-                       dput(ecryptfs_dentry_to_lower(dentry));
-                       mntput(ecryptfs_dentry_to_lower_mnt(dentry));
-               }
-               kmem_cache_free(ecryptfs_dentry_info_cache,
-                               ecryptfs_dentry_to_private(dentry));
+       struct ecryptfs_dentry_info *p = dentry->d_fsdata;
+       if (p) {
+               path_put(&p->lower_path);
+               call_rcu(&p->rcu, ecryptfs_dentry_free_rcu);
        }
-       return;
 }
 
 const struct dentry_operations ecryptfs_dops = {
index df19d34a033b9521de8623bb33ff2de6c0d9361a..90d1882b306face4c53d10ed08c44e3ec77b726a 100644 (file)
@@ -261,7 +261,10 @@ struct ecryptfs_inode_info {
  * vfsmount too. */
 struct ecryptfs_dentry_info {
        struct path lower_path;
-       struct ecryptfs_crypt_stat *crypt_stat;
+       union {
+               struct ecryptfs_crypt_stat *crypt_stat;
+               struct rcu_head rcu;
+       };
 };
 
 /**
@@ -512,13 +515,6 @@ ecryptfs_dentry_to_lower(struct dentry *dentry)
        return ((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path.dentry;
 }
 
-static inline void
-ecryptfs_set_dentry_lower(struct dentry *dentry, struct dentry *lower_dentry)
-{
-       ((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path.dentry =
-               lower_dentry;
-}
-
 static inline struct vfsmount *
 ecryptfs_dentry_to_lower_mnt(struct dentry *dentry)
 {
@@ -531,13 +527,6 @@ ecryptfs_dentry_to_lower_path(struct dentry *dentry)
        return &((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path;
 }
 
-static inline void
-ecryptfs_set_dentry_lower_mnt(struct dentry *dentry, struct vfsmount *lower_mnt)
-{
-       ((struct ecryptfs_dentry_info *)dentry->d_fsdata)->lower_path.mnt =
-               lower_mnt;
-}
-
 #define ecryptfs_printk(type, fmt, arg...) \
         __ecryptfs_printk(type "%s: " fmt, __func__, ## arg);
 __printf(1, 2)
index 992cf95830b5792a5d510f7d588049d27c139a94..3ed6e5f5bb4b73711792494b2a6edf4c415dd5a1 100644 (file)
 /**
  * ecryptfs_read_update_atime
  *
- * generic_file_read updates the atime of upper layer inode.  But, it
+ * generic_file_read_iter updates the atime of upper layer inode.  But, it
  * doesn't give us a chance to update the atime of the lower layer
- * inode.  This function is a wrapper to generic_file_read.  It
- * updates the atime of the lower level inode if generic_file_read
+ * inode.  This function is a wrapper to generic_file_read_iter.  It
+ * updates the atime of the lower level inode if generic_file_read_iter
  * returns without any errors. This is to be used only for file reads.
  * The function to be used for directory reads is ecryptfs_read.
  */
 static ssize_t ecryptfs_read_update_atime(struct kiocb *iocb,
-                               const struct iovec *iov,
-                               unsigned long nr_segs, loff_t pos)
+                               struct iov_iter *iter, loff_t pos)
 {
        ssize_t rc;
        struct path *path;
        struct file *file = iocb->ki_filp;
 
-       rc = generic_file_aio_read(iocb, iov, nr_segs, pos);
+       rc = generic_file_read_iter(iocb, iter, pos);
        /*
         * Even though this is a async interface, we need to wait
         * for IO to finish to update atime
@@ -357,9 +356,9 @@ const struct file_operations ecryptfs_dir_fops = {
 const struct file_operations ecryptfs_main_fops = {
        .llseek = generic_file_llseek,
        .read = do_sync_read,
-       .aio_read = ecryptfs_read_update_atime,
+       .read_iter = ecryptfs_read_update_atime,
        .write = do_sync_write,
-       .aio_write = generic_file_aio_write,
+       .write_iter = generic_file_write_iter,
        .iterate = ecryptfs_readdir,
        .unlocked_ioctl = ecryptfs_unlocked_ioctl,
 #ifdef CONFIG_COMPAT
index 67e9b6339691f9cc01a378564fa5aa0527da1977..0f9b66eaa7677ce920d8488e5afd49fc684c4ac7 100644 (file)
@@ -361,8 +361,8 @@ static int ecryptfs_lookup_interpose(struct dentry *dentry,
        BUG_ON(!d_count(lower_dentry));
 
        ecryptfs_set_dentry_private(dentry, dentry_info);
-       ecryptfs_set_dentry_lower(dentry, lower_dentry);
-       ecryptfs_set_dentry_lower_mnt(dentry, lower_mnt);
+       dentry_info->lower_path.mnt = lower_mnt;
+       dentry_info->lower_path.dentry = lower_dentry;
 
        if (!lower_dentry->d_inode) {
                /* We want to add because we couldn't find in lower */
@@ -703,16 +703,6 @@ out:
        return NULL;
 }
 
-static void
-ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
-{
-       char *buf = nd_get_link(nd);
-       if (!IS_ERR(buf)) {
-               /* Free the char* */
-               kfree(buf);
-       }
-}
-
 /**
  * upper_size_to_lower_size
  * @crypt_stat: Crypt_stat associated with file
@@ -1121,7 +1111,7 @@ out:
 const struct inode_operations ecryptfs_symlink_iops = {
        .readlink = generic_readlink,
        .follow_link = ecryptfs_follow_link,
-       .put_link = ecryptfs_put_link,
+       .put_link = kfree_put_link,
        .permission = ecryptfs_permission,
        .setattr = ecryptfs_setattr,
        .getattr = ecryptfs_getattr_link,
index 7d52806c21197206a5932b08a68e7dc9d6899253..4725a07f003cf3279fa81813748442afc24a053c 100644 (file)
@@ -1149,7 +1149,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
        struct ecryptfs_msg_ctx *msg_ctx;
        struct ecryptfs_message *msg = NULL;
        char *auth_tok_sig;
-       char *payload;
+       char *payload = NULL;
        size_t payload_len = 0;
        int rc;
 
@@ -1203,6 +1203,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
        }
 out:
        kfree(msg);
+       kfree(payload);
        return rc;
 }
 
index eb1c5979ecaf673d7e984b6b78219217e63c9bc4..1b119d3bf924d16eea7f91f52449b122ecb5dcb1 100644 (file)
@@ -585,8 +585,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
 
        /* ->kill_sb() will take care of root_info */
        ecryptfs_set_dentry_private(s->s_root, root_info);
-       ecryptfs_set_dentry_lower(s->s_root, path.dentry);
-       ecryptfs_set_dentry_lower_mnt(s->s_root, path.mnt);
+       root_info->lower_path = path;
 
        s->s_flags |= MS_ACTIVE;
        return dget(s->s_root);
index 491c6c078e7f5e0ac420646288452d93cca86ce2..20564f8a358a8e9809f5cb086c1900edeea9ffcf 100644 (file)
@@ -69,8 +69,8 @@ const struct file_operations exofs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .open           = generic_file_open,
        .release        = exofs_release_file,
index a5b3a5db31206f8f3c84781362f7c45e919ddf0e..6af043bab460b0225a2f08a3f9b9b6513c97d9f6 100644 (file)
@@ -64,8 +64,8 @@ const struct file_operations ext2_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .unlocked_ioctl = ext2_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ext2_compat_ioctl,
index c260de6d7b6df9e5350ecfc019f41ed6def75470..cf91b336e3dfc7e953747d1f84f5c8d3caff36c4 100644 (file)
@@ -848,18 +848,16 @@ static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
 }
 
 static ssize_t
-ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-                       loff_t offset, unsigned long nr_segs)
+ext2_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        struct inode *inode = mapping->host;
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
-                                ext2_get_block);
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, ext2_get_block);
        if (ret < 0 && (rw & WRITE))
-               ext2_write_failed(mapping, offset + iov_length(iov, nr_segs));
+               ext2_write_failed(mapping, offset + iov_iter_count(iter));
        return ret;
 }
 
index 25cb413277e906edb1f037ba625ece7aa92903bb..a79677188b54128121a04b4bf671f48b7dd90901 100644 (file)
@@ -52,8 +52,8 @@ const struct file_operations ext3_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .unlocked_ioctl = ext3_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ext3_compat_ioctl,
index 2bd85486b87974b390892a2280676a49d8eb274e..85bd13b8b7589b8522921ff7bd10bc89fcdfb270 100644 (file)
@@ -1862,8 +1862,7 @@ static int ext3_releasepage(struct page *page, gfp_t wait)
  * VFS code falls back into buffered path in that case so we are safe.
  */
 static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
-                       const struct iovec *iov, loff_t offset,
-                       unsigned long nr_segs)
+                       struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -1871,10 +1870,10 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
        handle_t *handle;
        ssize_t ret;
        int orphan = 0;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(iter);
        int retries = 0;
 
-       trace_ext3_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
+       trace_ext3_direct_IO_enter(inode, offset, count, rw);
 
        if (rw == WRITE) {
                loff_t final_size = offset + count;
@@ -1898,15 +1897,14 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
        }
 
 retry:
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
-                                ext3_get_block);
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, ext3_get_block);
        /*
         * In case of error extending write may have instantiated a few
         * blocks outside i_size. Trim these off again.
         */
        if (unlikely((rw & WRITE) && ret < 0)) {
                loff_t isize = i_size_read(inode);
-               loff_t end = offset + iov_length(iov, nr_segs);
+               loff_t end = offset + count;
 
                if (end > isize)
                        ext3_truncate_failed_direct_write(inode);
@@ -1949,8 +1947,7 @@ retry:
                        ret = err;
        }
 out:
-       trace_ext3_direct_IO_exit(inode, offset,
-                               iov_length(iov, nr_segs), rw, ret);
+       trace_ext3_direct_IO_exit(inode, offset, count, rw, ret);
        return ret;
 }
 
index 1194b1f0f8396c934ab6a382deadab6c0a6c4c2f..f8cde46de9cd77c3047182e94164bdfdac4a317d 100644 (file)
@@ -1783,7 +1783,7 @@ retry:
                d_tmpfile(dentry, inode);
                err = ext3_orphan_add(handle, inode);
                if (err)
-                       goto err_drop_inode;
+                       goto err_unlock_inode;
                mark_inode_dirty(inode);
                unlock_new_inode(inode);
        }
@@ -1791,10 +1791,9 @@ retry:
        if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
                goto retry;
        return err;
-err_drop_inode:
+err_unlock_inode:
        ext3_journal_stop(handle);
        unlock_new_inode(inode);
-       iput(inode);
        return err;
 }
 
index af815ea9d7cc4b6e209e28eea6f9bc97a6f247ce..850bf979beb00ab6917893cbe10bcc8b89afd9b7 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/wait.h>
 #include <linux/blockgroup_lock.h>
 #include <linux/percpu_counter.h>
+#include <linux/ratelimit.h>
 #include <crypto/hash.h>
 #ifdef __KERNEL__
 #include <linux/compat.h>
@@ -1314,6 +1315,11 @@ struct ext4_sb_info {
        unsigned long s_es_last_sorted;
        struct percpu_counter s_extent_cache_cnt;
        spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
+
+       /* Ratelimit ext4 messages. */
+       struct ratelimit_state s_err_ratelimit_state;
+       struct ratelimit_state s_warning_ratelimit_state;
+       struct ratelimit_state s_msg_ratelimit_state;
 };
 
 static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -2117,8 +2123,7 @@ extern void ext4_da_update_reserve_space(struct inode *inode,
 extern int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
                                struct ext4_map_blocks *map, int flags);
 extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
-                               const struct iovec *iov, loff_t offset,
-                               unsigned long nr_segs);
+                               struct iov_iter *iter, loff_t offset);
 extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
 extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks);
 extern void ext4_ind_truncate(handle_t *, struct inode *inode);
index 3da21945ff1fff3e16b8b70b332a0a6ee330c93e..2ab3dcb741df6f5b758565401534fc018f3f1119 100644 (file)
@@ -74,12 +74,11 @@ void ext4_unwritten_wait(struct inode *inode)
  * or one thread will zero the other's data, causing corruption.
  */
 static int
-ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
-                  unsigned long nr_segs, loff_t pos)
+ext4_unaligned_aio(struct inode *inode, struct iov_iter *iter, loff_t pos)
 {
        struct super_block *sb = inode->i_sb;
        int blockmask = sb->s_blocksize - 1;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(iter);
        loff_t final_size = pos + count;
 
        if (pos >= inode->i_size)
@@ -92,8 +91,8 @@ ext4_unaligned_aio(struct inode *inode, const struct iovec *iov,
 }
 
 static ssize_t
-ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
-                   unsigned long nr_segs, loff_t pos)
+ext4_file_dio_write(struct kiocb *iocb, struct iov_iter *iter,
+                   loff_t pos)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -101,11 +100,11 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
        int unaligned_aio = 0;
        ssize_t ret;
        int overwrite = 0;
-       size_t length = iov_length(iov, nr_segs);
+       size_t length = iov_iter_count(iter);
 
        if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
            !is_sync_kiocb(iocb))
-               unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos);
+               unaligned_aio = ext4_unaligned_aio(inode, iter, pos);
 
        /* Unaligned direct AIO must be serialized; see comment above */
        if (unaligned_aio) {
@@ -146,7 +145,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
                        overwrite = 1;
        }
 
-       ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+       ret = __generic_file_write_iter(iocb, iter, &iocb->ki_pos);
        mutex_unlock(&inode->i_mutex);
 
        if (ret > 0) {
@@ -165,8 +164,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
 }
 
 static ssize_t
-ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long nr_segs, loff_t pos)
+ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
        struct inode *inode = file_inode(iocb->ki_filp);
        ssize_t ret;
@@ -178,22 +176,24 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
 
        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
                struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-               size_t length = iov_length(iov, nr_segs);
+               size_t length = iov_iter_count(iter);
 
                if ((pos > sbi->s_bitmap_maxbytes ||
                    (pos == sbi->s_bitmap_maxbytes && length > 0)))
                        return -EFBIG;
 
                if (pos + length > sbi->s_bitmap_maxbytes) {
-                       nr_segs = iov_shorten((struct iovec *)iov, nr_segs,
-                                             sbi->s_bitmap_maxbytes - pos);
+                       ret = iov_iter_shorten(iter,
+                                              sbi->s_bitmap_maxbytes - pos);
+                       if (ret)
+                               return ret;
                }
        }
 
        if (unlikely(iocb->ki_filp->f_flags & O_DIRECT))
-               ret = ext4_file_dio_write(iocb, iov, nr_segs, pos);
+               ret = ext4_file_dio_write(iocb, iter, pos);
        else
-               ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
+               ret = generic_file_write_iter(iocb, iter, pos);
 
        return ret;
 }
@@ -594,8 +594,8 @@ const struct file_operations ext4_file_operations = {
        .llseek         = ext4_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = ext4_file_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = ext4_file_write_iter,
        .unlocked_ioctl = ext4_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ext4_compat_ioctl,
index 594009f5f523f0fd2228a72f1aa593a6f3ebf66f..8026469aa1fb400cc1a2e1dfda89b4cacceb8978 100644 (file)
@@ -639,8 +639,7 @@ out:
  * VFS code falls back into buffered path in that case so we are safe.
  */
 ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
-                          const struct iovec *iov, loff_t offset,
-                          unsigned long nr_segs)
+                          struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -648,7 +647,7 @@ ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
        handle_t *handle;
        ssize_t ret;
        int orphan = 0;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(iter);
        int retries = 0;
 
        if (rw == WRITE) {
@@ -687,18 +686,17 @@ retry:
                        goto locked;
                }
                ret = __blockdev_direct_IO(rw, iocb, inode,
-                                inode->i_sb->s_bdev, iov,
-                                offset, nr_segs,
-                                ext4_get_block, NULL, NULL, 0);
+                                inode->i_sb->s_bdev, iter,
+                                offset, ext4_get_block, NULL, NULL, 0);
                inode_dio_done(inode);
        } else {
 locked:
-               ret = blockdev_direct_IO(rw, iocb, inode, iov,
-                                offset, nr_segs, ext4_get_block);
+               ret = blockdev_direct_IO(rw, iocb, inode, iter,
+                                offset, ext4_get_block);
 
                if (unlikely((rw & WRITE) && ret < 0)) {
                        loff_t isize = i_size_read(inode);
-                       loff_t end = offset + iov_length(iov, nr_segs);
+                       loff_t end = offset + iov_iter_count(iter);
 
                        if (end > isize)
                                ext4_truncate_failed_write(inode);
index 0d424d7ac02b0a30f98e713bb10403e90ced51b5..05599cd23a1bd7cd054d72cbb8f53163f68ac7d9 100644 (file)
@@ -2178,6 +2178,9 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
  *
  * @handle - handle for journal operations
  * @mpd - extent to map
+ * @give_up_on_write - we set this to true iff there is a fatal error and there
+ *                     is no hope of writing the data. The caller should discard
+ *                     dirty pages to avoid infinite loops.
  *
  * The function maps extent starting at mpd->lblk of length mpd->len. If it is
  * delayed, blocks are allocated, if it is unwritten, we may need to convert
@@ -2295,6 +2298,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
        struct address_space *mapping = mpd->inode->i_mapping;
        struct pagevec pvec;
        unsigned int nr_pages;
+       long left = mpd->wbc->nr_to_write;
        pgoff_t index = mpd->first_page;
        pgoff_t end = mpd->last_page;
        int tag;
@@ -2330,6 +2334,17 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
                        if (page->index > end)
                                goto out;
 
+                       /*
+                        * Accumulated enough dirty pages? This doesn't apply
+                        * to WB_SYNC_ALL mode. For integrity sync we have to
+                        * keep going because someone may be concurrently
+                        * dirtying pages, and we might have synced a lot of
+                        * newly appeared dirty pages, but have not synced all
+                        * of the old dirty pages.
+                        */
+                       if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
+                               goto out;
+
                        /* If we can't merge this page, we are done. */
                        if (mpd->map.m_len > 0 && mpd->next_page != page->index)
                                goto out;
@@ -2364,19 +2379,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
                        if (err <= 0)
                                goto out;
                        err = 0;
-
-                       /*
-                        * Accumulated enough dirty pages? This doesn't apply
-                        * to WB_SYNC_ALL mode. For integrity sync we have to
-                        * keep going because someone may be concurrently
-                        * dirtying pages, and we might have synced a lot of
-                        * newly appeared dirty pages, but have not synced all
-                        * of the old dirty pages.
-                        */
-                       if (mpd->wbc->sync_mode == WB_SYNC_NONE &&
-                           mpd->next_page - mpd->first_page >=
-                                                       mpd->wbc->nr_to_write)
-                               goto out;
+                       left--;
                }
                pagevec_release(&pvec);
                cond_resched();
@@ -2563,7 +2566,7 @@ retry:
                        break;
        }
        blk_finish_plug(&plug);
-       if (!ret && !cycled) {
+       if (!ret && !cycled && wbc->nr_to_write > 0) {
                cycled = 1;
                mpd.last_page = writeback_index - 1;
                mpd.first_page = 0;
@@ -3067,13 +3070,12 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
  *
  */
 static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
-                             const struct iovec *iov, loff_t offset,
-                             unsigned long nr_segs)
+                             struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
        ssize_t ret;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(iter);
        int overwrite = 0;
        get_block_t *get_block_func = NULL;
        int dio_flags = 0;
@@ -3082,7 +3084,7 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
 
        /* Use the old path for reads and writes beyond i_size. */
        if (rw != WRITE || final_size > inode->i_size)
-               return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
+               return ext4_ind_direct_IO(rw, iocb, iter, offset);
 
        BUG_ON(iocb->private == NULL);
 
@@ -3149,8 +3151,8 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
                dio_flags = DIO_LOCKING;
        }
        ret = __blockdev_direct_IO(rw, iocb, inode,
-                                  inode->i_sb->s_bdev, iov,
-                                  offset, nr_segs,
+                                  inode->i_sb->s_bdev, iter,
+                                  offset,
                                   get_block_func,
                                   ext4_end_io_dio,
                                   NULL,
@@ -3204,8 +3206,7 @@ retake_lock:
 }
 
 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
-                             const struct iovec *iov, loff_t offset,
-                             unsigned long nr_segs)
+                             struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -3221,13 +3222,12 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
        if (ext4_has_inline_data(inode))
                return 0;
 
-       trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
+       trace_ext4_direct_IO_enter(inode, offset, iov_iter_count(iter), rw);
        if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
-               ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
+               ret = ext4_ext_direct_IO(rw, iocb, iter, offset);
        else
-               ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
-       trace_ext4_direct_IO_exit(inode, offset,
-                               iov_length(iov, nr_segs), rw, ret);
+               ret = ext4_ind_direct_IO(rw, iocb, iter, offset);
+       trace_ext4_direct_IO_exit(inode, offset, iov_iter_count(iter), rw, ret);
        return ret;
 }
 
index 1bec5a5c1e45a29e9ead318987ec03d803104c5a..5a0408d7b1147094c3e82b6d11750b33396b7732 100644 (file)
@@ -2319,7 +2319,7 @@ retry:
                d_tmpfile(dentry, inode);
                err = ext4_orphan_add(handle, inode);
                if (err)
-                       goto err_drop_inode;
+                       goto err_unlock_inode;
                mark_inode_dirty(inode);
                unlock_new_inode(inode);
        }
@@ -2328,10 +2328,9 @@ retry:
        if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
                goto retry;
        return err;
-err_drop_inode:
+err_unlock_inode:
        ext4_journal_stop(handle);
        unlock_new_inode(inode);
-       iput(inode);
        return err;
 }
 
index d7d0c7b46ed40feec818701641fce792569dcec9..d488f80ee32df1137e91df0aed72bef2f61b49ac 100644 (file)
@@ -197,14 +197,15 @@ static void dump_completed_IO(struct inode *inode, struct list_head *head)
 static void ext4_add_complete_io(ext4_io_end_t *io_end)
 {
        struct ext4_inode_info *ei = EXT4_I(io_end->inode);
+       struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
        struct workqueue_struct *wq;
        unsigned long flags;
 
        /* Only reserved conversions from writeback should enter here */
        WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
-       WARN_ON(!io_end->handle);
+       WARN_ON(!io_end->handle && sbi->s_journal);
        spin_lock_irqsave(&ei->i_completed_io_lock, flags);
-       wq = EXT4_SB(io_end->inode->i_sb)->rsv_conversion_wq;
+       wq = sbi->rsv_conversion_wq;
        if (list_empty(&ei->i_rsv_conversion_list))
                queue_work(wq, &ei->i_rsv_conversion_work);
        list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
index 2c2e6cbc6bedc549938262e50be8e118d1cb6de7..d3a857bfae47631db0ab56f765f1028a244261ea 100644 (file)
@@ -411,20 +411,26 @@ static void ext4_handle_error(struct super_block *sb)
                        sb->s_id);
 }
 
+#define ext4_error_ratelimit(sb)                                       \
+               ___ratelimit(&(EXT4_SB(sb)->s_err_ratelimit_state),     \
+                            "EXT4-fs error")
+
 void __ext4_error(struct super_block *sb, const char *function,
                  unsigned int line, const char *fmt, ...)
 {
        struct va_format vaf;
        va_list args;
 
-       va_start(args, fmt);
-       vaf.fmt = fmt;
-       vaf.va = &args;
-       printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
-              sb->s_id, function, line, current->comm, &vaf);
-       va_end(args);
+       if (ext4_error_ratelimit(sb)) {
+               va_start(args, fmt);
+               vaf.fmt = fmt;
+               vaf.va = &args;
+               printk(KERN_CRIT
+                      "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
+                      sb->s_id, function, line, current->comm, &vaf);
+               va_end(args);
+       }
        save_error_info(sb, function, line);
-
        ext4_handle_error(sb);
 }
 
@@ -438,22 +444,23 @@ void __ext4_error_inode(struct inode *inode, const char *function,
 
        es->s_last_error_ino = cpu_to_le32(inode->i_ino);
        es->s_last_error_block = cpu_to_le64(block);
+       if (ext4_error_ratelimit(inode->i_sb)) {
+               va_start(args, fmt);
+               vaf.fmt = fmt;
+               vaf.va = &args;
+               if (block)
+                       printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
+                              "inode #%lu: block %llu: comm %s: %pV\n",
+                              inode->i_sb->s_id, function, line, inode->i_ino,
+                              block, current->comm, &vaf);
+               else
+                       printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
+                              "inode #%lu: comm %s: %pV\n",
+                              inode->i_sb->s_id, function, line, inode->i_ino,
+                              current->comm, &vaf);
+               va_end(args);
+       }
        save_error_info(inode->i_sb, function, line);
-       va_start(args, fmt);
-       vaf.fmt = fmt;
-       vaf.va = &args;
-       if (block)
-               printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
-                      "inode #%lu: block %llu: comm %s: %pV\n",
-                      inode->i_sb->s_id, function, line, inode->i_ino,
-                      block, current->comm, &vaf);
-       else
-               printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: "
-                      "inode #%lu: comm %s: %pV\n",
-                      inode->i_sb->s_id, function, line, inode->i_ino,
-                      current->comm, &vaf);
-       va_end(args);
-
        ext4_handle_error(inode->i_sb);
 }
 
@@ -469,27 +476,28 @@ void __ext4_error_file(struct file *file, const char *function,
 
        es = EXT4_SB(inode->i_sb)->s_es;
        es->s_last_error_ino = cpu_to_le32(inode->i_ino);
+       if (ext4_error_ratelimit(inode->i_sb)) {
+               path = d_path(&(file->f_path), pathname, sizeof(pathname));
+               if (IS_ERR(path))
+                       path = "(unknown)";
+               va_start(args, fmt);
+               vaf.fmt = fmt;
+               vaf.va = &args;
+               if (block)
+                       printk(KERN_CRIT
+                              "EXT4-fs error (device %s): %s:%d: inode #%lu: "
+                              "block %llu: comm %s: path %s: %pV\n",
+                              inode->i_sb->s_id, function, line, inode->i_ino,
+                              block, current->comm, path, &vaf);
+               else
+                       printk(KERN_CRIT
+                              "EXT4-fs error (device %s): %s:%d: inode #%lu: "
+                              "comm %s: path %s: %pV\n",
+                              inode->i_sb->s_id, function, line, inode->i_ino,
+                              current->comm, path, &vaf);
+               va_end(args);
+       }
        save_error_info(inode->i_sb, function, line);
-       path = d_path(&(file->f_path), pathname, sizeof(pathname));
-       if (IS_ERR(path))
-               path = "(unknown)";
-       va_start(args, fmt);
-       vaf.fmt = fmt;
-       vaf.va = &args;
-       if (block)
-               printk(KERN_CRIT
-                      "EXT4-fs error (device %s): %s:%d: inode #%lu: "
-                      "block %llu: comm %s: path %s: %pV\n",
-                      inode->i_sb->s_id, function, line, inode->i_ino,
-                      block, current->comm, path, &vaf);
-       else
-               printk(KERN_CRIT
-                      "EXT4-fs error (device %s): %s:%d: inode #%lu: "
-                      "comm %s: path %s: %pV\n",
-                      inode->i_sb->s_id, function, line, inode->i_ino,
-                      current->comm, path, &vaf);
-       va_end(args);
-
        ext4_handle_error(inode->i_sb);
 }
 
@@ -543,11 +551,13 @@ void __ext4_std_error(struct super_block *sb, const char *function,
            (sb->s_flags & MS_RDONLY))
                return;
 
-       errstr = ext4_decode_error(sb, errno, nbuf);
-       printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
-              sb->s_id, function, line, errstr);
-       save_error_info(sb, function, line);
+       if (ext4_error_ratelimit(sb)) {
+               errstr = ext4_decode_error(sb, errno, nbuf);
+               printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
+                      sb->s_id, function, line, errstr);
+       }
 
+       save_error_info(sb, function, line);
        ext4_handle_error(sb);
 }
 
@@ -597,6 +607,9 @@ void __ext4_msg(struct super_block *sb,
        struct va_format vaf;
        va_list args;
 
+       if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs"))
+               return;
+
        va_start(args, fmt);
        vaf.fmt = fmt;
        vaf.va = &args;
@@ -610,6 +623,10 @@ void __ext4_warning(struct super_block *sb, const char *function,
        struct va_format vaf;
        va_list args;
 
+       if (!___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state),
+                         "EXT4-fs warning"))
+               return;
+
        va_start(args, fmt);
        vaf.fmt = fmt;
        vaf.va = &args;
@@ -633,18 +650,20 @@ __acquires(bitlock)
        es->s_last_error_block = cpu_to_le64(block);
        __save_error_info(sb, function, line);
 
-       va_start(args, fmt);
-
-       vaf.fmt = fmt;
-       vaf.va = &args;
-       printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
-              sb->s_id, function, line, grp);
-       if (ino)
-               printk(KERN_CONT "inode %lu: ", ino);
-       if (block)
-               printk(KERN_CONT "block %llu:", (unsigned long long) block);
-       printk(KERN_CONT "%pV\n", &vaf);
-       va_end(args);
+       if (ext4_error_ratelimit(sb)) {
+               va_start(args, fmt);
+               vaf.fmt = fmt;
+               vaf.va = &args;
+               printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
+                      sb->s_id, function, line, grp);
+               if (ino)
+                       printk(KERN_CONT "inode %lu: ", ino);
+               if (block)
+                       printk(KERN_CONT "block %llu:",
+                              (unsigned long long) block);
+               printk(KERN_CONT "%pV\n", &vaf);
+               va_end(args);
+       }
 
        if (test_opt(sb, ERRORS_CONT)) {
                ext4_commit_super(sb, 0);
@@ -2606,6 +2625,12 @@ EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
 EXT4_DEPRECATED_ATTR(max_writeback_mb_bump, 128);
 EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb);
 EXT4_ATTR(trigger_fs_error, 0200, NULL, trigger_test_error);
+EXT4_RW_ATTR_SBI_UI(err_ratelimit_interval_ms, s_err_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_UI(err_ratelimit_burst, s_err_ratelimit_state.burst);
+EXT4_RW_ATTR_SBI_UI(warning_ratelimit_interval_ms, s_warning_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_UI(warning_ratelimit_burst, s_warning_ratelimit_state.burst);
+EXT4_RW_ATTR_SBI_UI(msg_ratelimit_interval_ms, s_msg_ratelimit_state.interval);
+EXT4_RW_ATTR_SBI_UI(msg_ratelimit_burst, s_msg_ratelimit_state.burst);
 
 static struct attribute *ext4_attrs[] = {
        ATTR_LIST(delayed_allocation_blocks),
@@ -2623,6 +2648,12 @@ static struct attribute *ext4_attrs[] = {
        ATTR_LIST(max_writeback_mb_bump),
        ATTR_LIST(extent_max_zeroout_kb),
        ATTR_LIST(trigger_fs_error),
+       ATTR_LIST(err_ratelimit_interval_ms),
+       ATTR_LIST(err_ratelimit_burst),
+       ATTR_LIST(warning_ratelimit_interval_ms),
+       ATTR_LIST(warning_ratelimit_burst),
+       ATTR_LIST(msg_ratelimit_interval_ms),
+       ATTR_LIST(msg_ratelimit_burst),
        NULL,
 };
 
@@ -4118,6 +4149,11 @@ no_journal:
        if (es->s_error_count)
                mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
 
+       /* Enable message ratelimiting. Default is 10 messages per 5 secs. */
+       ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10);
+       ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10);
+       ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
+
        kfree(orig_data);
        return 0;
 
index c081e34f717f6903492acd3c4bc92d26dc888e7e..03e9bebba1989ef20263fbd1656959b764927b03 100644 (file)
@@ -1350,6 +1350,8 @@ retry:
                                    s_min_extra_isize) {
                                        tried_min_extra_isize++;
                                        new_extra_isize = s_min_extra_isize;
+                                       kfree(is); is = NULL;
+                                       kfree(bs); bs = NULL;
                                        goto retry;
                                }
                                error = -1;
index bb312201ca950114782f6a811725102da3baa718..5649a9d8e94225bf0330b30b84e23819465e0903 100644 (file)
@@ -81,7 +81,7 @@ static int f2fs_write_meta_page(struct page *page,
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 
        /* Should not write any meta pages, if any IO error was occurred */
-       if (wbc->for_reclaim ||
+       if (wbc->for_reclaim || sbi->por_doing ||
                        is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)) {
                dec_page_count(sbi, F2FS_DIRTY_META);
                wbc->pages_skipped++;
@@ -206,6 +206,7 @@ int acquire_orphan_inode(struct f2fs_sb_info *sbi)
 void release_orphan_inode(struct f2fs_sb_info *sbi)
 {
        mutex_lock(&sbi->orphan_inode_mutex);
+       BUG_ON(sbi->n_orphans == 0);
        sbi->n_orphans--;
        mutex_unlock(&sbi->orphan_inode_mutex);
 }
@@ -225,12 +226,8 @@ void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
                        break;
                orphan = NULL;
        }
-retry:
-       new = kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC);
-       if (!new) {
-               cond_resched();
-               goto retry;
-       }
+
+       new = f2fs_kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC);
        new->ino = ino;
 
        /* add new_oentry into list which is sorted by inode number */
@@ -253,6 +250,7 @@ void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
                if (orphan->ino == ino) {
                        list_del(&orphan->list);
                        kmem_cache_free(orphan_entry_slab, orphan);
+                       BUG_ON(sbi->n_orphans == 0);
                        sbi->n_orphans--;
                        break;
                }
@@ -277,7 +275,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
        if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
                return 0;
 
-       sbi->por_doing = 1;
+       sbi->por_doing = true;
        start_blk = __start_cp_addr(sbi) + 1;
        orphan_blkaddr = __start_sum_addr(sbi) - 1;
 
@@ -294,7 +292,7 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
        }
        /* clear Orphan Flag */
        clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
-       sbi->por_doing = 0;
+       sbi->por_doing = false;
        return 0;
 }
 
@@ -469,9 +467,7 @@ static int __add_dirty_inode(struct inode *inode, struct dir_inode_entry *new)
                        return -EEXIST;
        }
        list_add_tail(&new->list, head);
-#ifdef CONFIG_F2FS_STAT_FS
-       sbi->n_dirty_dirs++;
-#endif
+       stat_inc_dirty_dir(sbi);
        return 0;
 }
 
@@ -482,12 +478,8 @@ void set_dirty_dir_page(struct inode *inode, struct page *page)
 
        if (!S_ISDIR(inode->i_mode))
                return;
-retry:
-       new = kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
-       if (!new) {
-               cond_resched();
-               goto retry;
-       }
+
+       new = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
        new->inode = inode;
        INIT_LIST_HEAD(&new->list);
 
@@ -504,13 +496,9 @@ retry:
 void add_dirty_dir_inode(struct inode *inode)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       struct dir_inode_entry *new;
-retry:
-       new = kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
-       if (!new) {
-               cond_resched();
-               goto retry;
-       }
+       struct dir_inode_entry *new =
+                       f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
+
        new->inode = inode;
        INIT_LIST_HEAD(&new->list);
 
@@ -541,9 +529,7 @@ void remove_dirty_dir_inode(struct inode *inode)
                if (entry->inode == inode) {
                        list_del(&entry->list);
                        kmem_cache_free(inode_entry_slab, entry);
-#ifdef CONFIG_F2FS_STAT_FS
-                       sbi->n_dirty_dirs--;
-#endif
+                       stat_dec_dirty_dir(sbi);
                        break;
                }
        }
@@ -617,11 +603,10 @@ static void block_operations(struct f2fs_sb_info *sbi)
        blk_start_plug(&plug);
 
 retry_flush_dents:
-       mutex_lock_all(sbi);
-
+       f2fs_lock_all(sbi);
        /* write all the dirty dentry pages */
        if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
-               mutex_unlock_all(sbi);
+               f2fs_unlock_all(sbi);
                sync_dirty_dir_inodes(sbi);
                goto retry_flush_dents;
        }
@@ -644,7 +629,7 @@ retry_flush_nodes:
 static void unblock_operations(struct f2fs_sb_info *sbi)
 {
        mutex_unlock(&sbi->node_write);
-       mutex_unlock_all(sbi);
+       f2fs_unlock_all(sbi);
 }
 
 static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
@@ -756,8 +741,15 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
        f2fs_put_page(cp_page, 1);
 
        /* wait for previous submitted node/meta pages writeback */
-       while (get_pages(sbi, F2FS_WRITEBACK))
-               congestion_wait(BLK_RW_ASYNC, HZ / 50);
+       sbi->cp_task = current;
+       while (get_pages(sbi, F2FS_WRITEBACK)) {
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               if (!get_pages(sbi, F2FS_WRITEBACK))
+                       break;
+               io_schedule();
+       }
+       __set_current_state(TASK_RUNNING);
+       sbi->cp_task = NULL;
 
        filemap_fdatawait_range(sbi->node_inode->i_mapping, 0, LONG_MAX);
        filemap_fdatawait_range(sbi->meta_inode->i_mapping, 0, LONG_MAX);
index 941f9b9ca3a5b41fa9208f1d929b20212e08dfcb..d42a1bf993a84921f645ff04a70fdb90a6a43689 100644 (file)
@@ -68,9 +68,6 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
                                        struct buffer_head *bh_result)
 {
        struct f2fs_inode_info *fi = F2FS_I(inode);
-#ifdef CONFIG_F2FS_STAT_FS
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-#endif
        pgoff_t start_fofs, end_fofs;
        block_t start_blkaddr;
 
@@ -80,9 +77,8 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
                return 0;
        }
 
-#ifdef CONFIG_F2FS_STAT_FS
-       sbi->total_hit_ext++;
-#endif
+       stat_inc_hit_ext(inode->i_sb);
+
        start_fofs = fi->ext.fofs;
        end_fofs = fi->ext.fofs + fi->ext.len - 1;
        start_blkaddr = fi->ext.blk_addr;
@@ -100,9 +96,7 @@ static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
                else
                        bh_result->b_size = UINT_MAX;
 
-#ifdef CONFIG_F2FS_STAT_FS
-               sbi->read_hit_ext++;
-#endif
+               stat_inc_hit_ext(inode->i_sb);
                read_unlock(&fi->ext.ext_lock);
                return 1;
        }
@@ -560,9 +554,9 @@ write:
                inode_dec_dirty_dents(inode);
                err = do_write_data_page(page);
        } else {
-               int ilock = mutex_lock_op(sbi);
+               f2fs_lock_op(sbi);
                err = do_write_data_page(page);
-               mutex_unlock_op(sbi, ilock);
+               f2fs_unlock_op(sbi);
                need_balance_fs = true;
        }
        if (err == -ENOENT)
@@ -641,7 +635,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
        pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
        struct dnode_of_data dn;
        int err = 0;
-       int ilock;
 
        f2fs_balance_fs(sbi);
 repeat:
@@ -650,7 +643,7 @@ repeat:
                return -ENOMEM;
        *pagep = page;
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
 
        set_new_dnode(&dn, inode, NULL, NULL, 0);
        err = get_dnode_of_data(&dn, index, ALLOC_NODE);
@@ -664,7 +657,7 @@ repeat:
        if (err)
                goto err;
 
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 
        if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
                return 0;
@@ -700,7 +693,7 @@ out:
        return 0;
 
 err:
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
        f2fs_put_page(page, 1);
        return err;
 }
@@ -727,7 +720,7 @@ static int f2fs_write_end(struct file *file,
 }
 
 static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
-               const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+               struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -736,7 +729,7 @@ static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
                return 0;
 
        /* Needs synchronization with the cleaner */
-       return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
+       return blockdev_direct_IO(rw, iocb, inode, iter, offset,
                                                  get_data_block_ro);
 }
 
index 608f0df5b9190f8e8b301dd61e085fa70c66c5db..590a09efce4a3a8fde298f6d9999212616ac4eb5 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/crc32.h>
 #include <linux/magic.h>
 #include <linux/kobject.h>
+#include <linux/sched.h>
 
 /*
  * For mount options
@@ -317,14 +318,6 @@ enum count_type {
        NR_COUNT_TYPE,
 };
 
-/*
- * Uses as sbi->fs_lock[NR_GLOBAL_LOCKS].
- * The checkpoint procedure blocks all the locks in this fs_lock array.
- * Some FS operations grab free locks, and if there is no free lock,
- * then wait to grab a lock in a round-robin manner.
- */
-#define NR_GLOBAL_LOCKS        8
-
 /*
  * The below are the page types of bios used in submti_bio().
  * The available types are:
@@ -365,12 +358,12 @@ struct f2fs_sb_info {
        struct f2fs_checkpoint *ckpt;           /* raw checkpoint pointer */
        struct inode *meta_inode;               /* cache meta blocks */
        struct mutex cp_mutex;                  /* checkpoint procedure lock */
-       struct mutex fs_lock[NR_GLOBAL_LOCKS];  /* blocking FS operations */
+       struct rw_semaphore cp_rwsem;           /* blocking FS operations */
        struct mutex node_write;                /* locking node writes */
        struct mutex writepages;                /* mutex for writepages() */
-       unsigned char next_lock_num;            /* round-robin global locks */
-       int por_doing;                          /* recovery is doing or not */
-       int on_build_free_nids;                 /* build_free_nids is doing */
+       bool por_doing;                         /* recovery is doing or not */
+       bool on_build_free_nids;                /* build_free_nids is doing */
+       struct task_struct *cp_task;            /* checkpoint task */
 
        /* for orphan inode management */
        struct list_head orphan_inode_list;     /* orphan inode list */
@@ -520,48 +513,24 @@ static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
        cp->ckpt_flags = cpu_to_le32(ckpt_flags);
 }
 
-static inline void mutex_lock_all(struct f2fs_sb_info *sbi)
+static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
 {
-       int i;
-
-       for (i = 0; i < NR_GLOBAL_LOCKS; i++) {
-               /*
-                * This is the only time we take multiple fs_lock[]
-                * instances; the order is immaterial since we
-                * always hold cp_mutex, which serializes multiple
-                * such operations.
-                */
-               mutex_lock_nest_lock(&sbi->fs_lock[i], &sbi->cp_mutex);
-       }
+       down_read(&sbi->cp_rwsem);
 }
 
-static inline void mutex_unlock_all(struct f2fs_sb_info *sbi)
+static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
 {
-       int i = 0;
-       for (; i < NR_GLOBAL_LOCKS; i++)
-               mutex_unlock(&sbi->fs_lock[i]);
+       up_read(&sbi->cp_rwsem);
 }
 
-static inline int mutex_lock_op(struct f2fs_sb_info *sbi)
+static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
 {
-       unsigned char next_lock = sbi->next_lock_num % NR_GLOBAL_LOCKS;
-       int i = 0;
-
-       for (; i < NR_GLOBAL_LOCKS; i++)
-               if (mutex_trylock(&sbi->fs_lock[i]))
-                       return i;
-
-       mutex_lock(&sbi->fs_lock[next_lock]);
-       sbi->next_lock_num++;
-       return next_lock;
+       down_write_nest_lock(&sbi->cp_rwsem, &sbi->cp_mutex);
 }
 
-static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, int ilock)
+static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
 {
-       if (ilock < 0)
-               return;
-       BUG_ON(ilock >= NR_GLOBAL_LOCKS);
-       mutex_unlock(&sbi->fs_lock[ilock]);
+       up_write(&sbi->cp_rwsem);
 }
 
 /*
@@ -819,6 +788,20 @@ static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
        return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, ctor);
 }
 
+static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
+                                               gfp_t flags)
+{
+       void *entry;
+retry:
+       entry = kmem_cache_alloc(cachep, flags);
+       if (!entry) {
+               cond_resched();
+               goto retry;
+       }
+
+       return entry;
+}
+
 #define RAW_IS_INODE(p)        ((p)->footer.nid == (p)->footer.ino)
 
 static inline bool IS_INODE(struct page *page)
@@ -1172,7 +1155,13 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
        return (struct f2fs_stat_info*)sbi->stat_info;
 }
 
-#define stat_inc_call_count(si)        ((si)->call_count++)
+#define stat_inc_call_count(si)                ((si)->call_count++)
+#define stat_inc_bggc_count(sbi)       ((sbi)->bg_gc++)
+#define stat_inc_dirty_dir(sbi)                ((sbi)->n_dirty_dirs++)
+#define stat_dec_dirty_dir(sbi)                ((sbi)->n_dirty_dirs--)
+#define stat_inc_hit_ext(sb)           ((F2FS_SB(sb))->total_hit_ext++)
+#define stat_inc_alloc_type(sbi, curseg)                               \
+               ((sbi)->segment_count[(curseg)->alloc_type]++)
 
 #define stat_inc_seg_count(sbi, type)                                  \
        do {                                                            \
@@ -1201,12 +1190,18 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
                si->node_blks += (blks);                                \
        } while (0)
 
+
 int f2fs_build_stats(struct f2fs_sb_info *);
 void f2fs_destroy_stats(struct f2fs_sb_info *);
 void __init f2fs_create_root_stats(void);
 void f2fs_destroy_root_stats(void);
 #else
 #define stat_inc_call_count(si)
+#define stat_inc_bggc_count(si)
+#define stat_inc_dirty_dir(sbi)
+#define stat_dec_dirty_dir(sbi)
+#define stat_inc_hit_ext(sb)
+#define stat_inc_alloc_type(sbi, curseg)
 #define stat_inc_seg_count(si, type)
 #define stat_inc_tot_blk_count(si, blks)
 #define stat_inc_data_blk_count(si, blks)
index 02c906971cc6311f43527a6adfb9f75ee77eae9e..f27eb0b725375696f16cbf2085a012172df0558e 100644 (file)
@@ -35,18 +35,18 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        block_t old_blk_addr;
        struct dnode_of_data dn;
-       int err, ilock;
+       int err;
 
        f2fs_balance_fs(sbi);
 
        sb_start_pagefault(inode->i_sb);
 
        /* block allocation */
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        set_new_dnode(&dn, inode, NULL, NULL, 0);
        err = get_dnode_of_data(&dn, page->index, ALLOC_NODE);
        if (err) {
-               mutex_unlock_op(sbi, ilock);
+               f2fs_unlock_op(sbi);
                goto out;
        }
 
@@ -56,12 +56,12 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
                err = reserve_new_block(&dn);
                if (err) {
                        f2fs_put_dnode(&dn);
-                       mutex_unlock_op(sbi, ilock);
+                       f2fs_unlock_op(sbi);
                        goto out;
                }
        }
        f2fs_put_dnode(&dn);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 
        file_update_time(vma->vm_file);
        lock_page(page);
@@ -270,7 +270,7 @@ static int truncate_blocks(struct inode *inode, u64 from)
        unsigned int blocksize = inode->i_sb->s_blocksize;
        struct dnode_of_data dn;
        pgoff_t free_from;
-       int count = 0, ilock = -1;
+       int count = 0;
        int err;
 
        trace_f2fs_truncate_blocks_enter(inode, from);
@@ -278,13 +278,13 @@ static int truncate_blocks(struct inode *inode, u64 from)
        free_from = (pgoff_t)
                        ((from + blocksize - 1) >> (sbi->log_blocksize));
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        set_new_dnode(&dn, inode, NULL, NULL, 0);
        err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE);
        if (err) {
                if (err == -ENOENT)
                        goto free_next;
-               mutex_unlock_op(sbi, ilock);
+               f2fs_unlock_op(sbi);
                trace_f2fs_truncate_blocks_exit(inode, err);
                return err;
        }
@@ -305,7 +305,7 @@ static int truncate_blocks(struct inode *inode, u64 from)
        f2fs_put_dnode(&dn);
 free_next:
        err = truncate_inode_blocks(inode, free_from);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 
        /* lastly zero out the first data page */
        truncate_partial_data_page(inode, from);
@@ -416,16 +416,15 @@ static void fill_zero(struct inode *inode, pgoff_t index,
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        struct page *page;
-       int ilock;
 
        if (!len)
                return;
 
        f2fs_balance_fs(sbi);
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        page = get_new_data_page(inode, NULL, index, false);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 
        if (!IS_ERR(page)) {
                wait_on_page_writeback(page);
@@ -484,7 +483,6 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode)
                        struct address_space *mapping = inode->i_mapping;
                        loff_t blk_start, blk_end;
                        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-                       int ilock;
 
                        f2fs_balance_fs(sbi);
 
@@ -493,9 +491,9 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len, int mode)
                        truncate_inode_pages_range(mapping, blk_start,
                                        blk_end - 1);
 
-                       ilock = mutex_lock_op(sbi);
+                       f2fs_lock_op(sbi);
                        ret = truncate_hole(inode, pg_start, pg_end);
-                       mutex_unlock_op(sbi, ilock);
+                       f2fs_unlock_op(sbi);
                }
        }
 
@@ -529,13 +527,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
 
        for (index = pg_start; index <= pg_end; index++) {
                struct dnode_of_data dn;
-               int ilock;
 
-               ilock = mutex_lock_op(sbi);
+               f2fs_lock_op(sbi);
                set_new_dnode(&dn, inode, NULL, NULL, 0);
                ret = get_dnode_of_data(&dn, index, ALLOC_NODE);
                if (ret) {
-                       mutex_unlock_op(sbi, ilock);
+                       f2fs_unlock_op(sbi);
                        break;
                }
 
@@ -543,12 +540,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
                        ret = reserve_new_block(&dn);
                        if (ret) {
                                f2fs_put_dnode(&dn);
-                               mutex_unlock_op(sbi, ilock);
+                               f2fs_unlock_op(sbi);
                                break;
                        }
                }
                f2fs_put_dnode(&dn);
-               mutex_unlock_op(sbi, ilock);
+               f2fs_unlock_op(sbi);
 
                if (pg_start == pg_end)
                        new_size = offset + len;
@@ -685,8 +682,8 @@ const struct file_operations f2fs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .open           = generic_file_open,
        .mmap           = f2fs_file_mmap,
        .fsync          = f2fs_sync_file,
index 2f157e883687d5edb07f7cd53745d0ab6571cb6a..cb286d7b02b284a9cc6d89ca942f087db8f8be9b 100644 (file)
@@ -77,9 +77,7 @@ static int gc_thread_func(void *data)
                else
                        wait_ms = increase_sleep_time(gc_th, wait_ms);
 
-#ifdef CONFIG_F2FS_STAT_FS
-               sbi->bg_gc++;
-#endif
+               stat_inc_bggc_count(sbi);
 
                /* if return value is not zero, no victim was selected */
                if (f2fs_gc(sbi))
@@ -236,8 +234,8 @@ static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
        return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
 }
 
-static unsigned int get_gc_cost(struct f2fs_sb_info *sbi, unsigned int segno,
-                                       struct victim_sel_policy *p)
+static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
+                       unsigned int segno, struct victim_sel_policy *p)
 {
        if (p->alloc_mode == SSR)
                return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
@@ -293,7 +291,11 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
                        }
                        break;
                }
-               p.offset = ((segno / p.ofs_unit) * p.ofs_unit) + p.ofs_unit;
+
+               p.offset = segno + p.ofs_unit;
+               if (p.ofs_unit > 1)
+                       p.offset -= segno % p.ofs_unit;
+
                secno = GET_SECNO(sbi, segno);
 
                if (sec_usage_check(sbi, secno))
@@ -306,10 +308,9 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
                if (p.min_cost > cost) {
                        p.min_segno = segno;
                        p.min_cost = cost;
-               }
-
-               if (cost == max_cost)
+               } else if (unlikely(cost == max_cost)) {
                        continue;
+               }
 
                if (nsearched++ >= p.max_search) {
                        sbi->last_victim[p.gc_mode] = segno;
@@ -358,12 +359,8 @@ static void add_gc_inode(struct inode *inode, struct list_head *ilist)
                iput(inode);
                return;
        }
-repeat:
-       new_ie = kmem_cache_alloc(winode_slab, GFP_NOFS);
-       if (!new_ie) {
-               cond_resched();
-               goto repeat;
-       }
+
+       new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS);
        new_ie->inode = inode;
        list_add_tail(&new_ie->list, ilist);
 }
index 9339cd292047b897bbc7bb0c5ef5ff337f9190ab..7377ca3ce5c5b1a1a401b3f2a4e6f47492ab6433 100644 (file)
@@ -37,6 +37,31 @@ void f2fs_set_inode_flags(struct inode *inode)
                inode->i_flags |= S_DIRSYNC;
 }
 
+static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
+{
+       if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
+                       S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
+               if (ri->i_addr[0])
+                       inode->i_rdev = old_decode_dev(le32_to_cpu(ri->i_addr[0]));
+               else
+                       inode->i_rdev = new_decode_dev(le32_to_cpu(ri->i_addr[1]));
+       }
+}
+
+static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
+{
+       if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
+               if (old_valid_dev(inode->i_rdev)) {
+                       ri->i_addr[0] = cpu_to_le32(old_encode_dev(inode->i_rdev));
+                       ri->i_addr[1] = 0;
+               } else {
+                       ri->i_addr[0] = 0;
+                       ri->i_addr[1] = cpu_to_le32(new_encode_dev(inode->i_rdev));
+                       ri->i_addr[2] = 0;
+               }
+       }
+}
+
 static int do_read_inode(struct inode *inode)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
@@ -73,10 +98,6 @@ static int do_read_inode(struct inode *inode)
        inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
        inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
        inode->i_generation = le32_to_cpu(ri->i_generation);
-       if (ri->i_addr[0])
-               inode->i_rdev = old_decode_dev(le32_to_cpu(ri->i_addr[0]));
-       else
-               inode->i_rdev = new_decode_dev(le32_to_cpu(ri->i_addr[1]));
 
        fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
        fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
@@ -84,8 +105,13 @@ static int do_read_inode(struct inode *inode)
        fi->flags = 0;
        fi->i_advise = ri->i_advise;
        fi->i_pino = le32_to_cpu(ri->i_pino);
+
        get_extent_info(&fi->ext, ri->i_ext);
        get_inline_info(fi, ri);
+
+       /* get rdev by using inline_info */
+       __get_inode_rdev(inode, ri);
+
        f2fs_put_page(node_page, 1);
        return 0;
 }
@@ -179,21 +205,10 @@ void update_inode(struct inode *inode, struct page *node_page)
        ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
        ri->i_generation = cpu_to_le32(inode->i_generation);
 
-       if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
-               if (old_valid_dev(inode->i_rdev)) {
-                       ri->i_addr[0] =
-                               cpu_to_le32(old_encode_dev(inode->i_rdev));
-                       ri->i_addr[1] = 0;
-               } else {
-                       ri->i_addr[0] = 0;
-                       ri->i_addr[1] =
-                               cpu_to_le32(new_encode_dev(inode->i_rdev));
-                       ri->i_addr[2] = 0;
-               }
-       }
-
+       __set_inode_rdev(inode, ri);
        set_cold_node(inode, node_page);
        set_page_dirty(node_page);
+
        clear_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
 }
 
@@ -214,7 +229,7 @@ int update_inode_page(struct inode *inode)
 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       int ret, ilock;
+       int ret;
 
        if (inode->i_ino == F2FS_NODE_INO(sbi) ||
                        inode->i_ino == F2FS_META_INO(sbi))
@@ -227,9 +242,9 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
         * We need to lock here to prevent from producing dirty node pages
         * during the urgent cleaning time when runing out of free sections.
         */
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        ret = update_inode_page(inode);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 
        if (wbc)
                f2fs_balance_fs(sbi);
@@ -243,7 +258,6 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
 void f2fs_evict_inode(struct inode *inode)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
-       int ilock;
 
        trace_f2fs_evict_inode(inode);
        truncate_inode_pages(&inode->i_data, 0);
@@ -265,9 +279,9 @@ void f2fs_evict_inode(struct inode *inode)
        if (F2FS_HAS_BLOCKS(inode))
                f2fs_truncate(inode);
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        remove_inode_page(inode);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 
        sb_end_intwrite(inode->i_sb);
 no_delete:
index 2a5359c990fc09b0141982a2e26b345033848245..575adac17f8be2b55935498b0a49a15b78daddcf 100644 (file)
@@ -27,19 +27,19 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
        nid_t ino;
        struct inode *inode;
        bool nid_free = false;
-       int err, ilock;
+       int err;
 
        inode = new_inode(sb);
        if (!inode)
                return ERR_PTR(-ENOMEM);
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        if (!alloc_nid(sbi, &ino)) {
-               mutex_unlock_op(sbi, ilock);
+               f2fs_unlock_op(sbi);
                err = -ENOSPC;
                goto fail;
        }
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 
        inode->i_uid = current_fsuid();
 
@@ -115,7 +115,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
        struct inode *inode;
        nid_t ino = 0;
-       int err, ilock;
+       int err;
 
        f2fs_balance_fs(sbi);
 
@@ -131,9 +131,9 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
        inode->i_mapping->a_ops = &f2fs_dblock_aops;
        ino = inode->i_ino;
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        err = f2fs_add_link(dentry, inode);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
        if (err)
                goto out;
 
@@ -157,7 +157,7 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
        struct inode *inode = old_dentry->d_inode;
        struct super_block *sb = dir->i_sb;
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
-       int err, ilock;
+       int err;
 
        f2fs_balance_fs(sbi);
 
@@ -165,9 +165,9 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
        ihold(inode);
 
        set_inode_flag(F2FS_I(inode), FI_INC_LINK);
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        err = f2fs_add_link(dentry, inode);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
        if (err)
                goto out;
 
@@ -220,7 +220,6 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
        struct f2fs_dir_entry *de;
        struct page *page;
        int err = -ENOENT;
-       int ilock;
 
        trace_f2fs_unlink_enter(dir, dentry);
        f2fs_balance_fs(sbi);
@@ -229,16 +228,16 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
        if (!de)
                goto fail;
 
+       f2fs_lock_op(sbi);
        err = acquire_orphan_inode(sbi);
        if (err) {
+               f2fs_unlock_op(sbi);
                kunmap(page);
                f2fs_put_page(page, 0);
                goto fail;
        }
-
-       ilock = mutex_lock_op(sbi);
        f2fs_delete_entry(de, page, inode);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 
        /* In order to evict this inode,  we set it dirty */
        mark_inode_dirty(inode);
@@ -254,7 +253,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
        struct inode *inode;
        size_t symlen = strlen(symname) + 1;
-       int err, ilock;
+       int err;
 
        f2fs_balance_fs(sbi);
 
@@ -265,9 +264,9 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
        inode->i_op = &f2fs_symlink_inode_operations;
        inode->i_mapping->a_ops = &f2fs_dblock_aops;
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        err = f2fs_add_link(dentry, inode);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
        if (err)
                goto out;
 
@@ -290,7 +289,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
        struct inode *inode;
-       int err, ilock;
+       int err;
 
        f2fs_balance_fs(sbi);
 
@@ -304,9 +303,9 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
 
        set_inode_flag(F2FS_I(inode), FI_INC_LINK);
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        err = f2fs_add_link(dentry, inode);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
        if (err)
                goto out_fail;
 
@@ -342,7 +341,6 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
        struct inode *inode;
        int err = 0;
-       int ilock;
 
        if (!new_valid_dev(rdev))
                return -EINVAL;
@@ -356,9 +354,9 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
        init_special_inode(inode, inode->i_mode, rdev);
        inode->i_op = &f2fs_special_inode_operations;
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        err = f2fs_add_link(dentry, inode);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
        if (err)
                goto out;
 
@@ -387,7 +385,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct f2fs_dir_entry *old_dir_entry = NULL;
        struct f2fs_dir_entry *old_entry;
        struct f2fs_dir_entry *new_entry;
-       int err = -ENOENT, ilock = -1;
+       int err = -ENOENT;
 
        f2fs_balance_fs(sbi);
 
@@ -402,7 +400,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        goto out_old;
        }
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
 
        if (new_inode) {
 
@@ -467,7 +465,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
                update_inode_page(old_dir);
        }
 
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
        return 0;
 
 put_out_dir:
@@ -477,7 +475,7 @@ out_dir:
                kunmap(old_dir_page);
                f2fs_put_page(old_dir_page, 0);
        }
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 out_old:
        kunmap(old_page);
        f2fs_put_page(old_page, 0);
index 51ef2789443322ea0e3a2d683d305b753997aeae..cc119b65a0d3ff9dc6b96283a703497c591e7cce 100644 (file)
@@ -1156,6 +1156,9 @@ static int f2fs_write_node_page(struct page *page,
        block_t new_addr;
        struct node_info ni;
 
+       if (sbi->por_doing)
+               goto redirty_out;
+
        wait_on_page_writeback(page);
 
        /* get old block addr of this node page */
@@ -1171,12 +1174,8 @@ static int f2fs_write_node_page(struct page *page,
                return 0;
        }
 
-       if (wbc->for_reclaim) {
-               dec_page_count(sbi, F2FS_DIRTY_NODES);
-               wbc->pages_skipped++;
-               set_page_dirty(page);
-               return AOP_WRITEPAGE_ACTIVATE;
-       }
+       if (wbc->for_reclaim)
+               goto redirty_out;
 
        mutex_lock(&sbi->node_write);
        set_page_writeback(page);
@@ -1186,6 +1185,12 @@ static int f2fs_write_node_page(struct page *page,
        mutex_unlock(&sbi->node_write);
        unlock_page(page);
        return 0;
+
+redirty_out:
+       dec_page_count(sbi, F2FS_DIRTY_NODES);
+       wbc->pages_skipped++;
+       set_page_dirty(page);
+       return AOP_WRITEPAGE_ACTIVATE;
 }
 
 /*
@@ -1291,23 +1296,18 @@ static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
        if (nid == 0)
                return 0;
 
-       if (!build)
-               goto retry;
-
-       /* do not add allocated nids */
-       read_lock(&nm_i->nat_tree_lock);
-       ne = __lookup_nat_cache(nm_i, nid);
-       if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
-               allocated = true;
-       read_unlock(&nm_i->nat_tree_lock);
-       if (allocated)
-               return 0;
-retry:
-       i = kmem_cache_alloc(free_nid_slab, GFP_NOFS);
-       if (!i) {
-               cond_resched();
-               goto retry;
+       if (build) {
+               /* do not add allocated nids */
+               read_lock(&nm_i->nat_tree_lock);
+               ne = __lookup_nat_cache(nm_i, nid);
+               if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
+                       allocated = true;
+               read_unlock(&nm_i->nat_tree_lock);
+               if (allocated)
+                       return 0;
        }
+
+       i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
        i->nid = nid;
        i->state = NID_NEW;
 
@@ -1439,9 +1439,9 @@ retry:
 
        /* Let's scan nat pages and its caches to get free nids */
        mutex_lock(&nm_i->build_lock);
-       sbi->on_build_free_nids = 1;
+       sbi->on_build_free_nids = true;
        build_free_nids(sbi);
-       sbi->on_build_free_nids = 0;
+       sbi->on_build_free_nids = false;
        mutex_unlock(&nm_i->build_lock);
        goto retry;
 }
index 51ef5eec33d7fec07503e6eb9c90b86256b1dc3a..b278c68b3e0801d0a14454f7e96b74076507ba62 100644 (file)
@@ -64,24 +64,31 @@ static int recover_dentry(struct page *ipage, struct inode *inode)
        name.name = raw_inode->i_name;
 retry:
        de = f2fs_find_entry(dir, &name, &page);
-       if (de && inode->i_ino == le32_to_cpu(de->ino)) {
-               kunmap(page);
-               f2fs_put_page(page, 0);
-               goto out;
-       }
+       if (de && inode->i_ino == le32_to_cpu(de->ino))
+               goto out_unmap_put;
        if (de) {
                einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
                if (IS_ERR(einode)) {
                        WARN_ON(1);
                        if (PTR_ERR(einode) == -ENOENT)
                                err = -EEXIST;
-                       goto out;
+                       goto out_unmap_put;
+               }
+               err = acquire_orphan_inode(F2FS_SB(inode->i_sb));
+               if (err) {
+                       iput(einode);
+                       goto out_unmap_put;
                }
                f2fs_delete_entry(de, page, einode);
                iput(einode);
                goto retry;
        }
        err = __f2fs_add_link(dir, &name, inode);
+       goto out;
+
+out_unmap_put:
+       kunmap(page);
+       f2fs_put_page(page, 0);
 out:
        f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode and its dentry: "
                        "ino = %x, name = %s, dir = %lx, err = %d",
@@ -285,7 +292,6 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
        struct f2fs_summary sum;
        struct node_info ni;
        int err = 0, recovered = 0;
-       int ilock;
 
        start = start_bidx_of_node(ofs_of_node(page), fi);
        if (IS_INODE(page))
@@ -293,12 +299,12 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
        else
                end = start + ADDRS_PER_BLOCK;
 
-       ilock = mutex_lock_op(sbi);
+       f2fs_lock_op(sbi);
        set_new_dnode(&dn, inode, NULL, NULL, 0);
 
        err = get_dnode_of_data(&dn, start, ALLOC_NODE);
        if (err) {
-               mutex_unlock_op(sbi, ilock);
+               f2fs_unlock_op(sbi);
                return err;
        }
 
@@ -349,7 +355,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
        recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
 err:
        f2fs_put_dnode(&dn);
-       mutex_unlock_op(sbi, ilock);
+       f2fs_unlock_op(sbi);
 
        f2fs_msg(sbi->sb, KERN_NOTICE, "recover_data: ino = %lx, "
                        "recovered_data = %d blocks, err = %d",
@@ -419,6 +425,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
 {
        struct list_head inode_list;
        int err;
+       bool need_writecp = false;
 
        fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
                        sizeof(struct fsync_inode_entry), NULL);
@@ -428,7 +435,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
        INIT_LIST_HEAD(&inode_list);
 
        /* step #1: find fsynced inode numbers */
-       sbi->por_doing = 1;
+       sbi->por_doing = true;
        err = find_fsync_dnodes(sbi, &inode_list);
        if (err)
                goto out;
@@ -436,14 +443,16 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
        if (list_empty(&inode_list))
                goto out;
 
+       need_writecp = true;
+
        /* step #2: recover data */
        err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
        BUG_ON(!list_empty(&inode_list));
 out:
        destroy_fsync_dnodes(&inode_list);
        kmem_cache_destroy(fsync_entry_slab);
-       sbi->por_doing = 0;
-       if (!err)
+       sbi->por_doing = false;
+       if (!err && need_writecp)
                write_checkpoint(sbi, false);
        return err;
 }
index 09af9c7b0f52673fff92f00be5a37a6030b72668..c9c276e5316931eb461f0c79ff49e4d91b5f6782 100644 (file)
@@ -78,10 +78,14 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
        if (dirty_type == DIRTY) {
                enum dirty_type t = DIRTY_HOT_DATA;
 
-               /* clear all the bitmaps */
-               for (; t <= DIRTY_COLD_NODE; t++)
-                       if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
+               /* clear its dirty bitmap */
+               for (; t <= DIRTY_COLD_NODE; t++) {
+                       if (test_and_clear_bit(segno,
+                                               dirty_i->dirty_segmap[t])) {
                                dirty_i->nr_dirty[t]--;
+                               break;
+                       }
+               }
 
                if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
                        clear_bit(GET_SECNO(sbi, segno),
@@ -550,9 +554,8 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
                change_curseg(sbi, type, true);
        else
                new_curseg(sbi, type, false);
-#ifdef CONFIG_F2FS_STAT_FS
-       sbi->segment_count[curseg->alloc_type]++;
-#endif
+
+       stat_inc_alloc_type(sbi, curseg);
 }
 
 void allocate_new_segments(struct f2fs_sb_info *sbi)
@@ -597,6 +600,10 @@ static void f2fs_end_io_write(struct bio *bio, int err)
 
        if (p->is_sync)
                complete(p->wait);
+
+       if (!get_pages(p->sbi, F2FS_WRITEBACK) && p->sbi->cp_task)
+               wake_up_process(p->sbi->cp_task);
+
        kfree(p);
        bio_put(bio);
 }
@@ -657,6 +664,7 @@ static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page,
                                block_t blk_addr, enum page_type type)
 {
        struct block_device *bdev = sbi->sb->s_bdev;
+       int bio_blocks;
 
        verify_block_addr(sbi, blk_addr);
 
@@ -676,7 +684,8 @@ retry:
                        goto retry;
                }
 
-               sbi->bio[type] = f2fs_bio_alloc(bdev, max_hw_blocks(sbi));
+               bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
+               sbi->bio[type] = f2fs_bio_alloc(bdev, bio_blocks);
                sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
                sbi->bio[type]->bi_private = priv;
                /*
@@ -801,9 +810,8 @@ static void do_write_page(struct f2fs_sb_info *sbi, struct page *page,
 
        mutex_lock(&sit_i->sentry_lock);
        __refresh_next_blkoff(sbi, curseg);
-#ifdef CONFIG_F2FS_STAT_FS
-       sbi->block_count[curseg->alloc_type]++;
-#endif
+
+       stat_inc_alloc_type(sbi, curseg);
 
        /*
         * SIT information should be updated before segment allocation,
@@ -1271,9 +1279,9 @@ static bool flush_sits_in_journal(struct f2fs_sb_info *sbi)
                        __mark_sit_entry_dirty(sbi, segno);
                }
                update_sits_in_cursum(sum, -sits_in_cursum(sum));
-               return 1;
+               return true;
        }
-       return 0;
+       return false;
 }
 
 /*
index bdd10eab8c40d7c25fc24eebbeed2a0f705e902b..7f94d78cda3dea186b142970223f0a3863781cab 100644 (file)
@@ -90,6 +90,8 @@
        (blk_addr << ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE))
 #define SECTOR_TO_BLOCK(sbi, sectors)                                  \
        (sectors >> ((sbi)->log_blocksize - F2FS_LOG_SECTOR_SIZE))
+#define MAX_BIO_BLOCKS(max_hw_blocks)                                  \
+       (min((int)max_hw_blocks, BIO_MAX_PAGES))
 
 /* during checkpoint, bio_private is used to synchronize the last bio */
 struct bio_private {
index 13d0a0fe49dd413ed70d3a10dc864d27bea5c79a..9a094596fd21a53961e66ef7670f10833079ceda 100644 (file)
@@ -43,7 +43,9 @@ enum {
        Opt_disable_roll_forward,
        Opt_discard,
        Opt_noheap,
+       Opt_user_xattr,
        Opt_nouser_xattr,
+       Opt_acl,
        Opt_noacl,
        Opt_active_logs,
        Opt_disable_ext_identify,
@@ -56,7 +58,9 @@ static match_table_t f2fs_tokens = {
        {Opt_disable_roll_forward, "disable_roll_forward"},
        {Opt_discard, "discard"},
        {Opt_noheap, "no_heap"},
+       {Opt_user_xattr, "user_xattr"},
        {Opt_nouser_xattr, "nouser_xattr"},
+       {Opt_acl, "acl"},
        {Opt_noacl, "noacl"},
        {Opt_active_logs, "active_logs=%u"},
        {Opt_disable_ext_identify, "disable_ext_identify"},
@@ -237,6 +241,9 @@ static int parse_options(struct super_block *sb, char *options)
                        set_opt(sbi, NOHEAP);
                        break;
 #ifdef CONFIG_F2FS_FS_XATTR
+               case Opt_user_xattr:
+                       set_opt(sbi, XATTR_USER);
+                       break;
                case Opt_nouser_xattr:
                        clear_opt(sbi, XATTR_USER);
                        break;
@@ -244,6 +251,10 @@ static int parse_options(struct super_block *sb, char *options)
                        set_opt(sbi, INLINE_XATTR);
                        break;
 #else
+               case Opt_user_xattr:
+                       f2fs_msg(sb, KERN_INFO,
+                               "user_xattr options not supported");
+                       break;
                case Opt_nouser_xattr:
                        f2fs_msg(sb, KERN_INFO,
                                "nouser_xattr options not supported");
@@ -254,10 +265,16 @@ static int parse_options(struct super_block *sb, char *options)
                        break;
 #endif
 #ifdef CONFIG_F2FS_FS_POSIX_ACL
+               case Opt_acl:
+                       set_opt(sbi, POSIX_ACL);
+                       break;
                case Opt_noacl:
                        clear_opt(sbi, POSIX_ACL);
                        break;
 #else
+               case Opt_acl:
+                       f2fs_msg(sb, KERN_INFO, "acl options not supported");
+                       break;
                case Opt_noacl:
                        f2fs_msg(sb, KERN_INFO, "noacl options not supported");
                        break;
@@ -355,7 +372,9 @@ static void f2fs_put_super(struct super_block *sb)
        f2fs_destroy_stats(sbi);
        stop_gc_thread(sbi);
 
-       write_checkpoint(sbi, true);
+       /* We don't need to do checkpoint when it's clean */
+       if (sbi->s_dirty && get_pages(sbi, F2FS_DIRTY_NODES))
+               write_checkpoint(sbi, true);
 
        iput(sbi->node_inode);
        iput(sbi->meta_inode);
@@ -727,30 +746,47 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
                atomic_set(&sbi->nr_pages[i], 0);
 }
 
-static int validate_superblock(struct super_block *sb,
-               struct f2fs_super_block **raw_super,
-               struct buffer_head **raw_super_buf, sector_t block)
+/*
+ * Read f2fs raw super block.
+ * Because we have two copies of super block, so read the first one at first,
+ * if the first one is invalid, move to read the second one.
+ */
+static int read_raw_super_block(struct super_block *sb,
+                       struct f2fs_super_block **raw_super,
+                       struct buffer_head **raw_super_buf)
 {
-       const char *super = (block == 0 ? "first" : "second");
+       int block = 0;
 
-       /* read f2fs raw super block */
+retry:
        *raw_super_buf = sb_bread(sb, block);
        if (!*raw_super_buf) {
-               f2fs_msg(sb, KERN_ERR, "unable to read %s superblock",
-                               super);
-               return -EIO;
+               f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock",
+                               block + 1);
+               if (block == 0) {
+                       block++;
+                       goto retry;
+               } else {
+                       return -EIO;
+               }
        }
 
        *raw_super = (struct f2fs_super_block *)
                ((char *)(*raw_super_buf)->b_data + F2FS_SUPER_OFFSET);
 
        /* sanity checking of raw super */
-       if (!sanity_check_raw_super(sb, *raw_super))
-               return 0;
+       if (sanity_check_raw_super(sb, *raw_super)) {
+               brelse(*raw_super_buf);
+               f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem "
+                               "in %dth superblock", block + 1);
+               if(block == 0) {
+                       block++;
+                       goto retry;
+               } else {
+                       return -EINVAL;
+               }
+       }
 
-       f2fs_msg(sb, KERN_ERR, "Can't find a valid F2FS filesystem "
-                               "in %s superblock", super);
-       return -EINVAL;
+       return 0;
 }
 
 static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
@@ -760,7 +796,6 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        struct buffer_head *raw_super_buf;
        struct inode *root;
        long err = -EINVAL;
-       int i;
 
        /* allocate memory for f2fs-specific super block info */
        sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
@@ -773,14 +808,10 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
                goto free_sbi;
        }
 
-       err = validate_superblock(sb, &raw_super, &raw_super_buf, 0);
-       if (err) {
-               brelse(raw_super_buf);
-               /* check secondary superblock when primary failed */
-               err = validate_superblock(sb, &raw_super, &raw_super_buf, 1);
-               if (err)
-                       goto free_sb_buf;
-       }
+       err = read_raw_super_block(sb, &raw_super, &raw_super_buf);
+       if (err)
+               goto free_sbi;
+
        sb->s_fs_info = sbi;
        /* init some FS parameters */
        sbi->active_logs = NR_CURSEG_TYPE;
@@ -818,12 +849,11 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        mutex_init(&sbi->gc_mutex);
        mutex_init(&sbi->writepages);
        mutex_init(&sbi->cp_mutex);
-       for (i = 0; i < NR_GLOBAL_LOCKS; i++)
-               mutex_init(&sbi->fs_lock[i]);
        mutex_init(&sbi->node_write);
-       sbi->por_doing = 0;
+       sbi->por_doing = false;
        spin_lock_init(&sbi->stat_lock);
        init_rwsem(&sbi->bio_sem);
+       init_rwsem(&sbi->cp_rwsem);
        init_sb_info(sbi);
 
        /* get an inode for meta space */
index 1ac8a5f6e38096a702c62d2520b5b001a87f9fa2..f685138dd49607fce91e259c5d09872aece53bf5 100644 (file)
@@ -154,6 +154,9 @@ static int f2fs_xattr_advise_set(struct dentry *dentry, const char *name,
 }
 
 #ifdef CONFIG_F2FS_FS_SECURITY
+static int __f2fs_setxattr(struct inode *inode, int name_index,
+                       const char *name, const void *value, size_t value_len,
+                       struct page *ipage);
 static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
                void *page)
 {
@@ -161,7 +164,7 @@ static int f2fs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
        int err = 0;
 
        for (xattr = xattr_array; xattr->name != NULL; xattr++) {
-               err = f2fs_setxattr(inode, F2FS_XATTR_INDEX_SECURITY,
+               err = __f2fs_setxattr(inode, F2FS_XATTR_INDEX_SECURITY,
                                xattr->name, xattr->value,
                                xattr->value_len, (struct page *)page);
                if (err < 0)
@@ -469,16 +472,15 @@ cleanup:
        return error;
 }
 
-int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
-                       const void *value, size_t value_len, struct page *ipage)
+static int __f2fs_setxattr(struct inode *inode, int name_index,
+                       const char *name, const void *value, size_t value_len,
+                       struct page *ipage)
 {
-       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        struct f2fs_inode_info *fi = F2FS_I(inode);
        struct f2fs_xattr_entry *here, *last;
        void *base_addr;
        int found, newsize;
        size_t name_len;
-       int ilock;
        __u32 new_hsize;
        int error = -ENOMEM;
 
@@ -493,10 +495,6 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
        if (name_len > F2FS_NAME_LEN || value_len > MAX_VALUE_LEN(inode))
                return -ERANGE;
 
-       f2fs_balance_fs(sbi);
-
-       ilock = mutex_lock_op(sbi);
-
        base_addr = read_all_xattrs(inode, ipage);
        if (!base_addr)
                goto exit;
@@ -578,7 +576,21 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
        else
                update_inode_page(inode);
 exit:
-       mutex_unlock_op(sbi, ilock);
        kzfree(base_addr);
        return error;
 }
+
+int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
+                       const void *value, size_t value_len, struct page *ipage)
+{
+       struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+       int err;
+
+       f2fs_balance_fs(sbi);
+
+       f2fs_lock_op(sbi);
+       err = __f2fs_setxattr(inode, name_index, name, value, value_len, ipage);
+       f2fs_unlock_op(sbi);
+
+       return err;
+}
index 9b104f543056238016c683ef822046a784169f50..33711ff2b4a3e495a886ef09c229431fbead4598 100644 (file)
@@ -172,8 +172,8 @@ const struct file_operations fat_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .release        = fat_file_release,
        .unlocked_ioctl = fat_generic_ioctl,
index 0062da21dd8b7995aa1764bfd943cde9d2e22364..3134d1ede2925830fc16a6ae8d4bd3c9c5f59fae 100644 (file)
@@ -185,8 +185,7 @@ static int fat_write_end(struct file *file, struct address_space *mapping,
 }
 
 static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
-                            const struct iovec *iov,
-                            loff_t offset, unsigned long nr_segs)
+                            struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -203,7 +202,7 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
                 *
                 * Return 0, and fallback to normal buffered write.
                 */
-               loff_t size = offset + iov_length(iov, nr_segs);
+               loff_t size = offset + iov_iter_count(iter);
                if (MSDOS_I(inode)->mmu_private < size)
                        return 0;
        }
@@ -212,10 +211,9 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
         * FAT need to use the DIO_LOCKING for avoiding the race
         * condition of fat_get_block() and ->truncate().
         */
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
-                                fat_get_block);
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, fat_get_block);
        if (ret < 0 && (rw & WRITE))
-               fat_write_failed(mapping, offset + iov_length(iov, nr_segs));
+               fat_write_failed(mapping, offset + iov_iter_count(iter));
 
        return ret;
 }
index b2a86e324aac05f7bf64b7ce0d0e2c30d91f3d68..29d7feb62cf7a5784a3828d9e6b19db4c30ceb01 100644 (file)
@@ -58,15 +58,16 @@ void fscache_cookie_init_once(void *_cookie)
 struct fscache_cookie *__fscache_acquire_cookie(
        struct fscache_cookie *parent,
        const struct fscache_cookie_def *def,
-       void *netfs_data)
+       void *netfs_data,
+       bool enable)
 {
        struct fscache_cookie *cookie;
 
        BUG_ON(!def);
 
-       _enter("{%s},{%s},%p",
+       _enter("{%s},{%s},%p,%u",
               parent ? (char *) parent->def->name : "<no-parent>",
-              def->name, netfs_data);
+              def->name, netfs_data, enable);
 
        fscache_stat(&fscache_n_acquires);
 
@@ -106,7 +107,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
        cookie->def             = def;
        cookie->parent          = parent;
        cookie->netfs_data      = netfs_data;
-       cookie->flags           = 0;
+       cookie->flags           = (1 << FSCACHE_COOKIE_NO_DATA_YET);
 
        /* radix tree insertion won't use the preallocation pool unless it's
         * told it may not wait */
@@ -124,16 +125,22 @@ struct fscache_cookie *__fscache_acquire_cookie(
                break;
        }
 
-       /* if the object is an index then we need do nothing more here - we
-        * create indices on disk when we need them as an index may exist in
-        * multiple caches */
-       if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
-               if (fscache_acquire_non_index_cookie(cookie) < 0) {
-                       atomic_dec(&parent->n_children);
-                       __fscache_cookie_put(cookie);
-                       fscache_stat(&fscache_n_acquires_nobufs);
-                       _leave(" = NULL");
-                       return NULL;
+       if (enable) {
+               /* if the object is an index then we need do nothing more here
+                * - we create indices on disk when we need them as an index
+                * may exist in multiple caches */
+               if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
+                       if (fscache_acquire_non_index_cookie(cookie) == 0) {
+                               set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
+                       } else {
+                               atomic_dec(&parent->n_children);
+                               __fscache_cookie_put(cookie);
+                               fscache_stat(&fscache_n_acquires_nobufs);
+                               _leave(" = NULL");
+                               return NULL;
+                       }
+               } else {
+                       set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
                }
        }
 
@@ -143,6 +150,39 @@ struct fscache_cookie *__fscache_acquire_cookie(
 }
 EXPORT_SYMBOL(__fscache_acquire_cookie);
 
+/*
+ * Enable a cookie to permit it to accept new operations.
+ */
+void __fscache_enable_cookie(struct fscache_cookie *cookie,
+                            bool (*can_enable)(void *data),
+                            void *data)
+{
+       _enter("%p", cookie);
+
+       wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
+                        fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+
+       if (test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
+               goto out_unlock;
+
+       if (can_enable && !can_enable(data)) {
+               /* The netfs decided it didn't want to enable after all */
+       } else if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
+               /* Wait for outstanding disablement to complete */
+               __fscache_wait_on_invalidate(cookie);
+
+               if (fscache_acquire_non_index_cookie(cookie) == 0)
+                       set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
+       } else {
+               set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
+       }
+
+out_unlock:
+       clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags);
+       wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK);
+}
+EXPORT_SYMBOL(__fscache_enable_cookie);
+
 /*
  * acquire a non-index cookie
  * - this must make sure the index chain is instantiated and instantiate the
@@ -157,7 +197,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
 
        _enter("");
 
-       cookie->flags = 1 << FSCACHE_COOKIE_UNAVAILABLE;
+       set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
 
        /* now we need to see whether the backing objects for this cookie yet
         * exist, if not there'll be nothing to search */
@@ -180,9 +220,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
 
        _debug("cache %s", cache->tag->name);
 
-       cookie->flags =
-               (1 << FSCACHE_COOKIE_LOOKING_UP) |
-               (1 << FSCACHE_COOKIE_NO_DATA_YET);
+       set_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
 
        /* ask the cache to allocate objects for this cookie and its parent
         * chain */
@@ -398,7 +436,8 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
        if (!hlist_empty(&cookie->backing_objects)) {
                spin_lock(&cookie->lock);
 
-               if (!hlist_empty(&cookie->backing_objects) &&
+               if (fscache_cookie_enabled(cookie) &&
+                   !hlist_empty(&cookie->backing_objects) &&
                    !test_and_set_bit(FSCACHE_COOKIE_INVALIDATING,
                                      &cookie->flags)) {
                        object = hlist_entry(cookie->backing_objects.first,
@@ -452,10 +491,14 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
 
        spin_lock(&cookie->lock);
 
-       /* update the index entry on disk in each cache backing this cookie */
-       hlist_for_each_entry(object,
-                            &cookie->backing_objects, cookie_link) {
-               fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
+       if (fscache_cookie_enabled(cookie)) {
+               /* update the index entry on disk in each cache backing this
+                * cookie.
+                */
+               hlist_for_each_entry(object,
+                                    &cookie->backing_objects, cookie_link) {
+                       fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
+               }
        }
 
        spin_unlock(&cookie->lock);
@@ -464,28 +507,14 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
 EXPORT_SYMBOL(__fscache_update_cookie);
 
 /*
- * release a cookie back to the cache
- * - the object will be marked as recyclable on disk if retire is true
- * - all dependents of this cookie must have already been unregistered
- *   (indices/files/pages)
+ * Disable a cookie to stop it from accepting new requests from the netfs.
  */
-void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
+void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
 {
        struct fscache_object *object;
+       bool awaken = false;
 
-       fscache_stat(&fscache_n_relinquishes);
-       if (retire)
-               fscache_stat(&fscache_n_relinquishes_retire);
-
-       if (!cookie) {
-               fscache_stat(&fscache_n_relinquishes_null);
-               _leave(" [no cookie]");
-               return;
-       }
-
-       _enter("%p{%s,%p,%d},%d",
-              cookie, cookie->def->name, cookie->netfs_data,
-              atomic_read(&cookie->n_active), retire);
+       _enter("%p,%u", cookie, invalidate);
 
        ASSERTCMP(atomic_read(&cookie->n_active), >, 0);
 
@@ -495,24 +524,82 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
                BUG();
        }
 
-       /* No further netfs-accessing operations on this cookie permitted */
-       set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags);
-       if (retire)
-               set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags);
+       wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
+                        fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+       if (!test_and_clear_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
+               goto out_unlock_enable;
+
+       /* If the cookie is being invalidated, wait for that to complete first
+        * so that we can reuse the flag.
+        */
+       __fscache_wait_on_invalidate(cookie);
+
+       /* Dispose of the backing objects */
+       set_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags);
 
        spin_lock(&cookie->lock);
-       hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
-               fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
+       if (!hlist_empty(&cookie->backing_objects)) {
+               hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
+                       if (invalidate)
+                               set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
+                       fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
+               }
+       } else {
+               if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
+                       awaken = true;
        }
        spin_unlock(&cookie->lock);
+       if (awaken)
+               wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
 
        /* Wait for cessation of activity requiring access to the netfs (when
-        * n_active reaches 0).
+        * n_active reaches 0).  This makes sure outstanding reads and writes
+        * have completed.
         */
        if (!atomic_dec_and_test(&cookie->n_active))
                wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
                                 TASK_UNINTERRUPTIBLE);
 
+       /* Reset the cookie state if it wasn't relinquished */
+       if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
+               atomic_inc(&cookie->n_active);
+               set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
+       }
+
+out_unlock_enable:
+       clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags);
+       wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK);
+       _leave("");
+}
+EXPORT_SYMBOL(__fscache_disable_cookie);
+
+/*
+ * release a cookie back to the cache
+ * - the object will be marked as recyclable on disk if retire is true
+ * - all dependents of this cookie must have already been unregistered
+ *   (indices/files/pages)
+ */
+void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
+{
+       fscache_stat(&fscache_n_relinquishes);
+       if (retire)
+               fscache_stat(&fscache_n_relinquishes_retire);
+
+       if (!cookie) {
+               fscache_stat(&fscache_n_relinquishes_null);
+               _leave(" [no cookie]");
+               return;
+       }
+
+       _enter("%p{%s,%p,%d},%d",
+              cookie, cookie->def->name, cookie->netfs_data,
+              atomic_read(&cookie->n_active), retire);
+
+       /* No further netfs-accessing operations on this cookie permitted */
+       set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags);
+
+       __fscache_disable_cookie(cookie, retire);
+
        /* Clear pointers back to the netfs */
        cookie->netfs_data      = NULL;
        cookie->def             = NULL;
@@ -568,6 +655,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
 {
        struct fscache_operation *op;
        struct fscache_object *object;
+       bool wake_cookie = false;
        int ret;
 
        _enter("%p,", cookie);
@@ -591,7 +679,8 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
 
        spin_lock(&cookie->lock);
 
-       if (hlist_empty(&cookie->backing_objects))
+       if (!fscache_cookie_enabled(cookie) ||
+           hlist_empty(&cookie->backing_objects))
                goto inconsistent;
        object = hlist_entry(cookie->backing_objects.first,
                             struct fscache_object, cookie_link);
@@ -600,7 +689,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
 
        op->debug_id = atomic_inc_return(&fscache_op_debug_id);
 
-       atomic_inc(&cookie->n_active);
+       __fscache_use_cookie(cookie);
        if (fscache_submit_op(object, op) < 0)
                goto submit_failed;
 
@@ -622,9 +711,11 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
        return ret;
 
 submit_failed:
-       atomic_dec(&cookie->n_active);
+       wake_cookie = __fscache_unuse_cookie(cookie);
 inconsistent:
        spin_unlock(&cookie->lock);
+       if (wake_cookie)
+               __fscache_wake_unused_cookie(cookie);
        kfree(op);
        _leave(" = -ESTALE");
        return -ESTALE;
index 10a2ade0bdf8c1ff390be0e82c5dd2e161732054..5a117df2a9ef7e51f89a55d1bc40bdac81b79393 100644 (file)
@@ -59,6 +59,7 @@ struct fscache_cookie fscache_fsdef_index = {
        .lock           = __SPIN_LOCK_UNLOCKED(fscache_fsdef_index.lock),
        .backing_objects = HLIST_HEAD_INIT,
        .def            = &fscache_fsdef_index_def,
+       .flags          = 1 << FSCACHE_COOKIE_ENABLED,
 };
 EXPORT_SYMBOL(fscache_fsdef_index);
 
index b1bb6117473a25a292ccf326dcd607fcf4a4f354..989f394015472bafe7758c19e006a16c57a59e4c 100644 (file)
@@ -45,6 +45,7 @@ int __fscache_register_netfs(struct fscache_netfs *netfs)
        netfs->primary_index->def               = &fscache_fsdef_netfs_def;
        netfs->primary_index->parent            = &fscache_fsdef_index;
        netfs->primary_index->netfs_data        = netfs;
+       netfs->primary_index->flags             = 1 << FSCACHE_COOKIE_ENABLED;
 
        atomic_inc(&netfs->primary_index->parent->usage);
        atomic_inc(&netfs->primary_index->parent->n_children);
index 86d75a60b20c85543bd445869d9a57f37d42e920..dcb8216177747d033731bfd6b4a42ff903af44c5 100644 (file)
@@ -495,6 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
                 * returning ENODATA.
                 */
                set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
+               clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
 
                _debug("wake up lookup %p", &cookie->flags);
                clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
@@ -527,6 +528,7 @@ void fscache_obtained_object(struct fscache_object *object)
 
                /* We do (presumably) have data */
                clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
+               clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
 
                /* Allow write requests to begin stacking up and read requests
                 * to begin shovelling data.
@@ -679,7 +681,8 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
         */
        spin_lock(&cookie->lock);
        hlist_del_init(&object->cookie_link);
-       if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
+       if (hlist_empty(&cookie->backing_objects) &&
+           test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
                awaken = true;
        spin_unlock(&cookie->lock);
 
@@ -927,7 +930,7 @@ static const struct fscache_state *_fscache_invalidate_object(struct fscache_obj
         */
        if (!fscache_use_cookie(object)) {
                ASSERT(object->cookie->stores.rnode == NULL);
-               set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags);
+               set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
                _leave(" [no cookie]");
                return transit_to(KILL_OBJECT);
        }
index 73899c1c34494555d73dd5714ecb21eb74c0296d..7f5c658af755f9b43296c1dacfab5def788c4a1c 100644 (file)
@@ -163,12 +163,10 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
 
        fscache_stat(&fscache_n_attr_changed_calls);
 
-       if (fscache_object_is_active(object) &&
-           fscache_use_cookie(object)) {
+       if (fscache_object_is_active(object)) {
                fscache_stat(&fscache_n_cop_attr_changed);
                ret = object->cache->ops->attr_changed(object);
                fscache_stat_d(&fscache_n_cop_attr_changed);
-               fscache_unuse_cookie(object);
                if (ret < 0)
                        fscache_abort_object(object);
        }
@@ -184,6 +182,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
 {
        struct fscache_operation *op;
        struct fscache_object *object;
+       bool wake_cookie;
 
        _enter("%p", cookie);
 
@@ -199,15 +198,19 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
        }
 
        fscache_operation_init(op, fscache_attr_changed_op, NULL);
-       op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
+       op->flags = FSCACHE_OP_ASYNC |
+               (1 << FSCACHE_OP_EXCLUSIVE) |
+               (1 << FSCACHE_OP_UNUSE_COOKIE);
 
        spin_lock(&cookie->lock);
 
-       if (hlist_empty(&cookie->backing_objects))
+       if (!fscache_cookie_enabled(cookie) ||
+           hlist_empty(&cookie->backing_objects))
                goto nobufs;
        object = hlist_entry(cookie->backing_objects.first,
                             struct fscache_object, cookie_link);
 
+       __fscache_use_cookie(cookie);
        if (fscache_submit_exclusive_op(object, op) < 0)
                goto nobufs;
        spin_unlock(&cookie->lock);
@@ -217,8 +220,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
        return 0;
 
 nobufs:
+       wake_cookie = __fscache_unuse_cookie(cookie);
        spin_unlock(&cookie->lock);
        kfree(op);
+       if (wake_cookie)
+               __fscache_wake_unused_cookie(cookie);
        fscache_stat(&fscache_n_attr_changed_nobufs);
        _leave(" = %d", -ENOBUFS);
        return -ENOBUFS;
@@ -263,7 +269,6 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
        }
 
        fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
-       atomic_inc(&cookie->n_active);
        op->op.flags    = FSCACHE_OP_MYTHREAD |
                (1UL << FSCACHE_OP_WAITING) |
                (1UL << FSCACHE_OP_UNUSE_COOKIE);
@@ -384,6 +389,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
 {
        struct fscache_retrieval *op;
        struct fscache_object *object;
+       bool wake_cookie = false;
        int ret;
 
        _enter("%p,%p,,,", cookie, page);
@@ -405,7 +411,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
                return -ERESTARTSYS;
 
        op = fscache_alloc_retrieval(cookie, page->mapping,
-                                    end_io_func,context);
+                                    end_io_func, context);
        if (!op) {
                _leave(" = -ENOMEM");
                return -ENOMEM;
@@ -414,13 +420,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
 
        spin_lock(&cookie->lock);
 
-       if (hlist_empty(&cookie->backing_objects))
+       if (!fscache_cookie_enabled(cookie) ||
+           hlist_empty(&cookie->backing_objects))
                goto nobufs_unlock;
        object = hlist_entry(cookie->backing_objects.first,
                             struct fscache_object, cookie_link);
 
        ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags));
 
+       __fscache_use_cookie(cookie);
        atomic_inc(&object->n_reads);
        __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
 
@@ -475,9 +483,11 @@ error:
 
 nobufs_unlock_dec:
        atomic_dec(&object->n_reads);
+       wake_cookie = __fscache_unuse_cookie(cookie);
 nobufs_unlock:
        spin_unlock(&cookie->lock);
-       atomic_dec(&cookie->n_active);
+       if (wake_cookie)
+               __fscache_wake_unused_cookie(cookie);
        kfree(op);
 nobufs:
        fscache_stat(&fscache_n_retrievals_nobufs);
@@ -514,6 +524,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
 {
        struct fscache_retrieval *op;
        struct fscache_object *object;
+       bool wake_cookie = false;
        int ret;
 
        _enter("%p,,%d,,,", cookie, *nr_pages);
@@ -542,11 +553,13 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
 
        spin_lock(&cookie->lock);
 
-       if (hlist_empty(&cookie->backing_objects))
+       if (!fscache_cookie_enabled(cookie) ||
+           hlist_empty(&cookie->backing_objects))
                goto nobufs_unlock;
        object = hlist_entry(cookie->backing_objects.first,
                             struct fscache_object, cookie_link);
 
+       __fscache_use_cookie(cookie);
        atomic_inc(&object->n_reads);
        __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
 
@@ -601,10 +614,12 @@ error:
 
 nobufs_unlock_dec:
        atomic_dec(&object->n_reads);
+       wake_cookie = __fscache_unuse_cookie(cookie);
 nobufs_unlock:
        spin_unlock(&cookie->lock);
-       atomic_dec(&cookie->n_active);
        kfree(op);
+       if (wake_cookie)
+               __fscache_wake_unused_cookie(cookie);
 nobufs:
        fscache_stat(&fscache_n_retrievals_nobufs);
        _leave(" = -ENOBUFS");
@@ -626,6 +641,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
 {
        struct fscache_retrieval *op;
        struct fscache_object *object;
+       bool wake_cookie = false;
        int ret;
 
        _enter("%p,%p,,,", cookie, page);
@@ -653,13 +669,15 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
 
        spin_lock(&cookie->lock);
 
-       if (hlist_empty(&cookie->backing_objects))
+       if (!fscache_cookie_enabled(cookie) ||
+           hlist_empty(&cookie->backing_objects))
                goto nobufs_unlock;
        object = hlist_entry(cookie->backing_objects.first,
                             struct fscache_object, cookie_link);
 
+       __fscache_use_cookie(cookie);
        if (fscache_submit_op(object, &op->op) < 0)
-               goto nobufs_unlock;
+               goto nobufs_unlock_dec;
        spin_unlock(&cookie->lock);
 
        fscache_stat(&fscache_n_alloc_ops);
@@ -689,10 +707,13 @@ error:
        _leave(" = %d", ret);
        return ret;
 
+nobufs_unlock_dec:
+       wake_cookie = __fscache_unuse_cookie(cookie);
 nobufs_unlock:
        spin_unlock(&cookie->lock);
-       atomic_dec(&cookie->n_active);
        kfree(op);
+       if (wake_cookie)
+               __fscache_wake_unused_cookie(cookie);
 nobufs:
        fscache_stat(&fscache_n_allocs_nobufs);
        _leave(" = -ENOBUFS");
@@ -889,6 +910,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 {
        struct fscache_storage *op;
        struct fscache_object *object;
+       bool wake_cookie = false;
        int ret;
 
        _enter("%p,%x,", cookie, (u32) page->flags);
@@ -920,7 +942,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
        ret = -ENOBUFS;
        spin_lock(&cookie->lock);
 
-       if (hlist_empty(&cookie->backing_objects))
+       if (!fscache_cookie_enabled(cookie) ||
+           hlist_empty(&cookie->backing_objects))
                goto nobufs;
        object = hlist_entry(cookie->backing_objects.first,
                             struct fscache_object, cookie_link);
@@ -957,7 +980,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
        op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
        op->store_limit = object->store_limit;
 
-       atomic_inc(&cookie->n_active);
+       __fscache_use_cookie(cookie);
        if (fscache_submit_op(object, &op->op) < 0)
                goto submit_failed;
 
@@ -984,10 +1007,10 @@ already_pending:
        return 0;
 
 submit_failed:
-       atomic_dec(&cookie->n_active);
        spin_lock(&cookie->stores_lock);
        radix_tree_delete(&cookie->stores, page->index);
        spin_unlock(&cookie->stores_lock);
+       wake_cookie = __fscache_unuse_cookie(cookie);
        page_cache_release(page);
        ret = -ENOBUFS;
        goto nobufs;
@@ -999,6 +1022,8 @@ nobufs:
        spin_unlock(&cookie->lock);
        radix_tree_preload_end();
        kfree(op);
+       if (wake_cookie)
+               __fscache_wake_unused_cookie(cookie);
        fscache_stat(&fscache_n_stores_nobufs);
        _leave(" = -ENOBUFS");
        return -ENOBUFS;
index adbfd66b380f6bbc3d275234851746ba0c33c6d4..242fe3eb1ae8206b3e66ad00a891887851a7121f 100644 (file)
@@ -94,8 +94,11 @@ static ssize_t cuse_read(struct file *file, char __user *buf, size_t count,
        loff_t pos = 0;
        struct iovec iov = { .iov_base = buf, .iov_len = count };
        struct fuse_io_priv io = { .async = 0, .file = file };
+       struct iov_iter ii;
 
-       return fuse_direct_io(&io, &iov, 1, count, &pos, 0);
+       iov_iter_init(&ii, &iov, 1, count, 0);
+
+       return fuse_direct_io(&io, &ii, count, &pos, 0);
 }
 
 static ssize_t cuse_write(struct file *file, const char __user *buf,
@@ -104,12 +107,15 @@ static ssize_t cuse_write(struct file *file, const char __user *buf,
        loff_t pos = 0;
        struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
        struct fuse_io_priv io = { .async = 0, .file = file };
+       struct iov_iter ii;
+
+       iov_iter_init(&ii, &iov, 1, count, 0);
 
        /*
         * No locking or generic_write_checks(), the server is
         * responsible for locking and sanity checks.
         */
-       return fuse_direct_io(&io, &iov, 1, count, &pos, 1);
+       return fuse_direct_io(&io, &ii, count, &pos, 1);
 }
 
 static int cuse_open(struct inode *inode, struct file *file)
@@ -589,11 +595,14 @@ static struct attribute *cuse_class_dev_attrs[] = {
 ATTRIBUTE_GROUPS(cuse_class_dev);
 
 static struct miscdevice cuse_miscdev = {
-       .minor          = MISC_DYNAMIC_MINOR,
+       .minor          = CUSE_MINOR,
        .name           = "cuse",
        .fops           = &cuse_channel_fops,
 };
 
+MODULE_ALIAS_MISCDEV(CUSE_MINOR);
+MODULE_ALIAS("devname:cuse");
+
 static int __init cuse_init(void)
 {
        int i, rc;
index b7989f2ab4c471f92238fa29d50e7b6d357d8c87..0747f6eed59836954a23743cddde06fef4d8c05c 100644 (file)
@@ -342,24 +342,6 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
        return err;
 }
 
-static struct dentry *fuse_materialise_dentry(struct dentry *dentry,
-                                             struct inode *inode)
-{
-       struct dentry *newent;
-
-       if (inode && S_ISDIR(inode->i_mode)) {
-               struct fuse_conn *fc = get_fuse_conn(inode);
-
-               mutex_lock(&fc->inst_mutex);
-               newent = d_materialise_unique(dentry, inode);
-               mutex_unlock(&fc->inst_mutex);
-       } else {
-               newent = d_materialise_unique(dentry, inode);
-       }
-
-       return newent;
-}
-
 static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
                                  unsigned int flags)
 {
@@ -382,7 +364,7 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
        if (inode && get_node_id(inode) == FUSE_ROOT_ID)
                goto out_iput;
 
-       newent = fuse_materialise_dentry(entry, inode);
+       newent = d_materialise_unique(entry, inode);
        err = PTR_ERR(newent);
        if (IS_ERR(newent))
                goto out_err;
@@ -601,21 +583,11 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req,
        }
        kfree(forget);
 
-       if (S_ISDIR(inode->i_mode)) {
-               struct dentry *alias;
-               mutex_lock(&fc->inst_mutex);
-               alias = d_find_alias(inode);
-               if (alias) {
-                       /* New directory must have moved since mkdir */
-                       mutex_unlock(&fc->inst_mutex);
-                       dput(alias);
-                       iput(inode);
-                       return -EBUSY;
-               }
-               d_instantiate(entry, inode);
-               mutex_unlock(&fc->inst_mutex);
-       } else
-               d_instantiate(entry, inode);
+       err = d_instantiate_no_diralias(entry, inode);
+       if (err) {
+               iput(inode);
+               return err;
+       }
 
        fuse_change_entry_timeout(entry, &outarg);
        fuse_invalidate_attr(dir);
@@ -1284,7 +1256,7 @@ static int fuse_direntplus_link(struct file *file,
        if (!inode)
                goto out;
 
-       alias = fuse_materialise_dentry(dentry, inode);
+       alias = d_materialise_unique(dentry, inode);
        err = PTR_ERR(alias);
        if (IS_ERR(alias))
                goto out;
index 4598345ab87d683dba75022a4a07981dbfb458ed..26c33f36179af2061fd57641da00fdf0f60a0a92 100644 (file)
@@ -334,7 +334,8 @@ static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
 
                BUG_ON(req->inode != inode);
                curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
-               if (curr_index == index) {
+               if (curr_index <= index &&
+                   index < curr_index + req->num_pages) {
                        found = true;
                        break;
                }
@@ -1178,9 +1179,10 @@ static inline void fuse_page_descs_length_init(struct fuse_req *req,
                        req->page_descs[i].offset;
 }
 
-static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
+static inline unsigned long fuse_get_user_addr(struct iov_iter *ii)
 {
-       return (unsigned long)ii->iov->iov_base + ii->iov_offset;
+       struct iovec *iov = iov_iter_iovec(ii);
+       return (unsigned long)iov->iov_base + ii->iov_offset;
 }
 
 static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
@@ -1269,9 +1271,8 @@ static inline int fuse_iter_npages(const struct iov_iter *ii_p)
        return min(npages, FUSE_MAX_PAGES_PER_REQ);
 }
 
-ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
-                      unsigned long nr_segs, size_t count, loff_t *ppos,
-                      int write)
+ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *ii,
+                      size_t count, loff_t *ppos, int write)
 {
        struct file *file = io->file;
        struct fuse_file *ff = file->private_data;
@@ -1280,14 +1281,11 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
        loff_t pos = *ppos;
        ssize_t res = 0;
        struct fuse_req *req;
-       struct iov_iter ii;
-
-       iov_iter_init(&ii, iov, nr_segs, count, 0);
 
        if (io->async)
-               req = fuse_get_req_for_background(fc, fuse_iter_npages(&ii));
+               req = fuse_get_req_for_background(fc, fuse_iter_npages(ii));
        else
-               req = fuse_get_req(fc, fuse_iter_npages(&ii));
+               req = fuse_get_req(fc, fuse_iter_npages(ii));
        if (IS_ERR(req))
                return PTR_ERR(req);
 
@@ -1295,7 +1293,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
                size_t nres;
                fl_owner_t owner = current->files;
                size_t nbytes = min(count, nmax);
-               int err = fuse_get_user_pages(req, &ii, &nbytes, write);
+               int err = fuse_get_user_pages(req, ii, &nbytes, write);
                if (err) {
                        res = err;
                        break;
@@ -1325,9 +1323,9 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
                        fuse_put_request(fc, req);
                        if (io->async)
                                req = fuse_get_req_for_background(fc,
-                                       fuse_iter_npages(&ii));
+                                       fuse_iter_npages(ii));
                        else
-                               req = fuse_get_req(fc, fuse_iter_npages(&ii));
+                               req = fuse_get_req(fc, fuse_iter_npages(ii));
                        if (IS_ERR(req))
                                break;
                }
@@ -1341,10 +1339,8 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
 }
 EXPORT_SYMBOL_GPL(fuse_direct_io);
 
-static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
-                                 const struct iovec *iov,
-                                 unsigned long nr_segs, loff_t *ppos,
-                                 size_t count)
+static ssize_t __fuse_direct_read(struct fuse_io_priv *io, struct iov_iter *ii,
+                                 loff_t *ppos, size_t count)
 {
        ssize_t res;
        struct file *file = io->file;
@@ -1353,7 +1349,7 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
        if (is_bad_inode(inode))
                return -EIO;
 
-       res = fuse_direct_io(io, iov, nr_segs, count, ppos, 0);
+       res = fuse_direct_io(io, ii, count, ppos, 0);
 
        fuse_invalidate_attr(inode);
 
@@ -1365,21 +1361,24 @@ static ssize_t fuse_direct_read(struct file *file, char __user *buf,
 {
        struct fuse_io_priv io = { .async = 0, .file = file };
        struct iovec iov = { .iov_base = buf, .iov_len = count };
-       return __fuse_direct_read(&io, &iov, 1, ppos, count);
+       struct iov_iter ii;
+
+       iov_iter_init(&ii, &iov, 1, count, 0);
+
+       return __fuse_direct_read(&io, &ii, ppos, count);
 }
 
-static ssize_t __fuse_direct_write(struct fuse_io_priv *io,
-                                  const struct iovec *iov,
-                                  unsigned long nr_segs, loff_t *ppos)
+static ssize_t __fuse_direct_write(struct fuse_io_priv *io, struct iov_iter *ii,
+                                  loff_t *ppos)
 {
        struct file *file = io->file;
        struct inode *inode = file_inode(file);
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(ii);
        ssize_t res;
 
        res = generic_write_checks(file, ppos, &count, 0);
        if (!res)
-               res = fuse_direct_io(io, iov, nr_segs, count, ppos, 1);
+               res = fuse_direct_io(io, ii, count, ppos, 1);
 
        fuse_invalidate_attr(inode);
 
@@ -1390,6 +1389,7 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
                                 size_t count, loff_t *ppos)
 {
        struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = count };
+       struct iov_iter ii;
        struct inode *inode = file_inode(file);
        ssize_t res;
        struct fuse_io_priv io = { .async = 0, .file = file };
@@ -1397,9 +1397,11 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
        if (is_bad_inode(inode))
                return -EIO;
 
+       iov_iter_init(&ii, &iov, 1, count, 0);
+
        /* Don't allow parallel writes to the same file */
        mutex_lock(&inode->i_mutex);
-       res = __fuse_direct_write(&io, &iov, 1, ppos);
+       res = __fuse_direct_write(&io, &ii, ppos);
        if (res > 0)
                fuse_write_update_size(inode, *ppos);
        mutex_unlock(&inode->i_mutex);
@@ -1409,8 +1411,13 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
 
 static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
 {
-       __free_page(req->pages[0]);
-       fuse_file_put(req->ff, false);
+       int i;
+
+       for (i = 0; i < req->num_pages; i++)
+               __free_page(req->pages[i]);
+
+       if (req->ff)
+               fuse_file_put(req->ff, false);
 }
 
 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
@@ -1418,30 +1425,34 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
        struct inode *inode = req->inode;
        struct fuse_inode *fi = get_fuse_inode(inode);
        struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
+       int i;
 
        list_del(&req->writepages_entry);
-       dec_bdi_stat(bdi, BDI_WRITEBACK);
-       dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP);
-       bdi_writeout_inc(bdi);
+       for (i = 0; i < req->num_pages; i++) {
+               dec_bdi_stat(bdi, BDI_WRITEBACK);
+               dec_zone_page_state(req->pages[i], NR_WRITEBACK_TEMP);
+               bdi_writeout_inc(bdi);
+       }
        wake_up(&fi->page_waitq);
 }
 
 /* Called under fc->lock, may release and reacquire it */
-static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
+static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req,
+                               loff_t size)
 __releases(fc->lock)
 __acquires(fc->lock)
 {
        struct fuse_inode *fi = get_fuse_inode(req->inode);
-       loff_t size = i_size_read(req->inode);
        struct fuse_write_in *inarg = &req->misc.write.in;
+       __u64 data_size = req->num_pages * PAGE_CACHE_SIZE;
 
        if (!fc->connected)
                goto out_free;
 
-       if (inarg->offset + PAGE_CACHE_SIZE <= size) {
-               inarg->size = PAGE_CACHE_SIZE;
+       if (inarg->offset + data_size <= size) {
+               inarg->size = data_size;
        } else if (inarg->offset < size) {
-               inarg->size = size & (PAGE_CACHE_SIZE - 1);
+               inarg->size = size - inarg->offset;
        } else {
                /* Got truncated off completely */
                goto out_free;
@@ -1472,12 +1483,13 @@ __acquires(fc->lock)
 {
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_inode *fi = get_fuse_inode(inode);
+       size_t crop = i_size_read(inode);
        struct fuse_req *req;
 
        while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
                req = list_entry(fi->queued_writes.next, struct fuse_req, list);
                list_del_init(&req->list);
-               fuse_send_writepage(fc, req);
+               fuse_send_writepage(fc, req, crop);
        }
 }
 
@@ -1488,12 +1500,62 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
 
        mapping_set_error(inode->i_mapping, req->out.h.error);
        spin_lock(&fc->lock);
+       while (req->misc.write.next) {
+               struct fuse_conn *fc = get_fuse_conn(inode);
+               struct fuse_write_in *inarg = &req->misc.write.in;
+               struct fuse_req *next = req->misc.write.next;
+               req->misc.write.next = next->misc.write.next;
+               next->misc.write.next = NULL;
+               next->ff = fuse_file_get(req->ff);
+               list_add(&next->writepages_entry, &fi->writepages);
+
+               /*
+                * Skip fuse_flush_writepages() to make it easy to crop requests
+                * based on primary request size.
+                *
+                * 1st case (trivial): there are no concurrent activities using
+                * fuse_set/release_nowrite.  Then we're on safe side because
+                * fuse_flush_writepages() would call fuse_send_writepage()
+                * anyway.
+                *
+                * 2nd case: someone called fuse_set_nowrite and it is waiting
+                * now for completion of all in-flight requests.  This happens
+                * rarely and no more than once per page, so this should be
+                * okay.
+                *
+                * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
+                * of fuse_set_nowrite..fuse_release_nowrite section.  The fact
+                * that fuse_set_nowrite returned implies that all in-flight
+                * requests were completed along with all of their secondary
+                * requests.  Further primary requests are blocked by negative
+                * writectr.  Hence there cannot be any in-flight requests and
+                * no invocations of fuse_writepage_end() while we're in
+                * fuse_set_nowrite..fuse_release_nowrite section.
+                */
+               fuse_send_writepage(fc, next, inarg->offset + inarg->size);
+       }
        fi->writectr--;
        fuse_writepage_finish(fc, req);
        spin_unlock(&fc->lock);
        fuse_writepage_free(fc, req);
 }
 
+static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc,
+                                            struct fuse_inode *fi)
+{
+       struct fuse_file *ff = NULL;
+
+       spin_lock(&fc->lock);
+       if (!WARN_ON(list_empty(&fi->write_files))) {
+               ff = list_entry(fi->write_files.next, struct fuse_file,
+                               write_entry);
+               fuse_file_get(ff);
+       }
+       spin_unlock(&fc->lock);
+
+       return ff;
+}
+
 static int fuse_writepage_locked(struct page *page)
 {
        struct address_space *mapping = page->mapping;
@@ -1501,8 +1563,8 @@ static int fuse_writepage_locked(struct page *page)
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_inode *fi = get_fuse_inode(inode);
        struct fuse_req *req;
-       struct fuse_file *ff;
        struct page *tmp_page;
+       int error = -ENOMEM;
 
        set_page_writeback(page);
 
@@ -1515,16 +1577,16 @@ static int fuse_writepage_locked(struct page *page)
        if (!tmp_page)
                goto err_free;
 
-       spin_lock(&fc->lock);
-       BUG_ON(list_empty(&fi->write_files));
-       ff = list_entry(fi->write_files.next, struct fuse_file, write_entry);
-       req->ff = fuse_file_get(ff);
-       spin_unlock(&fc->lock);
+       error = -EIO;
+       req->ff = fuse_write_file_get(fc, fi);
+       if (!req->ff)
+               goto err_free;
 
-       fuse_write_fill(req, ff, page_offset(page), 0);
+       fuse_write_fill(req, req->ff, page_offset(page), 0);
 
        copy_highpage(tmp_page, page);
        req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
+       req->misc.write.next = NULL;
        req->in.argpages = 1;
        req->num_pages = 1;
        req->pages[0] = tmp_page;
@@ -1550,19 +1612,263 @@ err_free:
        fuse_request_free(req);
 err:
        end_page_writeback(page);
-       return -ENOMEM;
+       return error;
 }
 
 static int fuse_writepage(struct page *page, struct writeback_control *wbc)
 {
        int err;
 
+       if (fuse_page_is_writeback(page->mapping->host, page->index)) {
+               /*
+                * ->writepages() should be called for sync() and friends.  We
+                * should only get here on direct reclaim and then we are
+                * allowed to skip a page which is already in flight
+                */
+               WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
+
+               redirty_page_for_writepage(wbc, page);
+               return 0;
+       }
+
        err = fuse_writepage_locked(page);
        unlock_page(page);
 
        return err;
 }
 
+struct fuse_fill_wb_data {
+       struct fuse_req *req;
+       struct fuse_file *ff;
+       struct inode *inode;
+       struct page **orig_pages;
+};
+
+static void fuse_writepages_send(struct fuse_fill_wb_data *data)
+{
+       struct fuse_req *req = data->req;
+       struct inode *inode = data->inode;
+       struct fuse_conn *fc = get_fuse_conn(inode);
+       struct fuse_inode *fi = get_fuse_inode(inode);
+       int num_pages = req->num_pages;
+       int i;
+
+       req->ff = fuse_file_get(data->ff);
+       spin_lock(&fc->lock);
+       list_add_tail(&req->list, &fi->queued_writes);
+       fuse_flush_writepages(inode);
+       spin_unlock(&fc->lock);
+
+       for (i = 0; i < num_pages; i++)
+               end_page_writeback(data->orig_pages[i]);
+}
+
+static bool fuse_writepage_in_flight(struct fuse_req *new_req,
+                                    struct page *page)
+{
+       struct fuse_conn *fc = get_fuse_conn(new_req->inode);
+       struct fuse_inode *fi = get_fuse_inode(new_req->inode);
+       struct fuse_req *tmp;
+       struct fuse_req *old_req;
+       bool found = false;
+       pgoff_t curr_index;
+
+       BUG_ON(new_req->num_pages != 0);
+
+       spin_lock(&fc->lock);
+       list_del(&new_req->writepages_entry);
+       list_for_each_entry(old_req, &fi->writepages, writepages_entry) {
+               BUG_ON(old_req->inode != new_req->inode);
+               curr_index = old_req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
+               if (curr_index <= page->index &&
+                   page->index < curr_index + old_req->num_pages) {
+                       found = true;
+                       break;
+               }
+       }
+       if (!found) {
+               list_add(&new_req->writepages_entry, &fi->writepages);
+               goto out_unlock;
+       }
+
+       new_req->num_pages = 1;
+       for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) {
+               BUG_ON(tmp->inode != new_req->inode);
+               curr_index = tmp->misc.write.in.offset >> PAGE_CACHE_SHIFT;
+               if (tmp->num_pages == 1 &&
+                   curr_index == page->index) {
+                       old_req = tmp;
+               }
+       }
+
+       if (old_req->num_pages == 1 && (old_req->state == FUSE_REQ_INIT ||
+                                       old_req->state == FUSE_REQ_PENDING)) {
+               struct backing_dev_info *bdi = page->mapping->backing_dev_info;
+
+               copy_highpage(old_req->pages[0], page);
+               spin_unlock(&fc->lock);
+
+               dec_bdi_stat(bdi, BDI_WRITEBACK);
+               dec_zone_page_state(page, NR_WRITEBACK_TEMP);
+               bdi_writeout_inc(bdi);
+               fuse_writepage_free(fc, new_req);
+               fuse_request_free(new_req);
+               goto out;
+       } else {
+               new_req->misc.write.next = old_req->misc.write.next;
+               old_req->misc.write.next = new_req;
+       }
+out_unlock:
+       spin_unlock(&fc->lock);
+out:
+       return found;
+}
+
+static int fuse_writepages_fill(struct page *page,
+               struct writeback_control *wbc, void *_data)
+{
+       struct fuse_fill_wb_data *data = _data;
+       struct fuse_req *req = data->req;
+       struct inode *inode = data->inode;
+       struct fuse_conn *fc = get_fuse_conn(inode);
+       struct page *tmp_page;
+       bool is_writeback;
+       int err;
+
+       if (!data->ff) {
+               err = -EIO;
+               data->ff = fuse_write_file_get(fc, get_fuse_inode(inode));
+               if (!data->ff)
+                       goto out_unlock;
+       }
+
+       /*
+        * Being under writeback is unlikely but possible.  For example direct
+        * read to an mmaped fuse file will set the page dirty twice; once when
+        * the pages are faulted with get_user_pages(), and then after the read
+        * completed.
+        */
+       is_writeback = fuse_page_is_writeback(inode, page->index);
+
+       if (req && req->num_pages &&
+           (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
+            (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_write ||
+            data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) {
+               fuse_writepages_send(data);
+               data->req = NULL;
+       }
+       err = -ENOMEM;
+       tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+       if (!tmp_page)
+               goto out_unlock;
+
+       /*
+        * The page must not be redirtied until the writeout is completed
+        * (i.e. userspace has sent a reply to the write request).  Otherwise
+        * there could be more than one temporary page instance for each real
+        * page.
+        *
+        * This is ensured by holding the page lock in page_mkwrite() while
+        * checking fuse_page_is_writeback().  We already hold the page lock
+        * since clear_page_dirty_for_io() and keep it held until we add the
+        * request to the fi->writepages list and increment req->num_pages.
+        * After this fuse_page_is_writeback() will indicate that the page is
+        * under writeback, so we can release the page lock.
+        */
+       if (data->req == NULL) {
+               struct fuse_inode *fi = get_fuse_inode(inode);
+
+               err = -ENOMEM;
+               req = fuse_request_alloc_nofs(FUSE_MAX_PAGES_PER_REQ);
+               if (!req) {
+                       __free_page(tmp_page);
+                       goto out_unlock;
+               }
+
+               fuse_write_fill(req, data->ff, page_offset(page), 0);
+               req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
+               req->misc.write.next = NULL;
+               req->in.argpages = 1;
+               req->background = 1;
+               req->num_pages = 0;
+               req->end = fuse_writepage_end;
+               req->inode = inode;
+
+               spin_lock(&fc->lock);
+               list_add(&req->writepages_entry, &fi->writepages);
+               spin_unlock(&fc->lock);
+
+               data->req = req;
+       }
+       set_page_writeback(page);
+
+       copy_highpage(tmp_page, page);
+       req->pages[req->num_pages] = tmp_page;
+       req->page_descs[req->num_pages].offset = 0;
+       req->page_descs[req->num_pages].length = PAGE_SIZE;
+
+       inc_bdi_stat(page->mapping->backing_dev_info, BDI_WRITEBACK);
+       inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
+
+       err = 0;
+       if (is_writeback && fuse_writepage_in_flight(req, page)) {
+               end_page_writeback(page);
+               data->req = NULL;
+               goto out_unlock;
+       }
+       data->orig_pages[req->num_pages] = page;
+
+       /*
+        * Protected by fc->lock against concurrent access by
+        * fuse_page_is_writeback().
+        */
+       spin_lock(&fc->lock);
+       req->num_pages++;
+       spin_unlock(&fc->lock);
+
+out_unlock:
+       unlock_page(page);
+
+       return err;
+}
+
+static int fuse_writepages(struct address_space *mapping,
+                          struct writeback_control *wbc)
+{
+       struct inode *inode = mapping->host;
+       struct fuse_fill_wb_data data;
+       int err;
+
+       err = -EIO;
+       if (is_bad_inode(inode))
+               goto out;
+
+       data.inode = inode;
+       data.req = NULL;
+       data.ff = NULL;
+
+       err = -ENOMEM;
+       data.orig_pages = kzalloc(sizeof(struct page *) *
+                                 FUSE_MAX_PAGES_PER_REQ,
+                                 GFP_NOFS);
+       if (!data.orig_pages)
+               goto out;
+
+       err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
+       if (data.req) {
+               /* Ignore errors if we can write at least one page */
+               BUG_ON(!data.req->num_pages);
+               fuse_writepages_send(&data);
+               err = 0;
+       }
+       if (data.ff)
+               fuse_file_put(data.ff, false);
+
+       kfree(data.orig_pages);
+out:
+       return err;
+}
+
 static int fuse_launder_page(struct page *page)
 {
        int err = 0;
@@ -1602,14 +1908,17 @@ static void fuse_vma_close(struct vm_area_struct *vma)
 static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct page *page = vmf->page;
-       /*
-        * Don't use page->mapping as it may become NULL from a
-        * concurrent truncate.
-        */
-       struct inode *inode = vma->vm_file->f_mapping->host;
+       struct inode *inode = file_inode(vma->vm_file);
+
+       file_update_time(vma->vm_file);
+       lock_page(page);
+       if (page->mapping != inode->i_mapping) {
+               unlock_page(page);
+               return VM_FAULT_NOPAGE;
+       }
 
        fuse_wait_on_page_writeback(inode, page->index);
-       return 0;
+       return VM_FAULT_LOCKED;
 }
 
 static const struct vm_operations_struct fuse_file_vm_ops = {
@@ -1868,30 +2177,17 @@ static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
        while (iov_iter_count(&ii)) {
                struct page *page = pages[page_idx++];
                size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii));
-               void *kaddr;
-
-               kaddr = kmap(page);
+               size_t left;
 
-               while (todo) {
-                       char __user *uaddr = ii.iov->iov_base + ii.iov_offset;
-                       size_t iov_len = ii.iov->iov_len - ii.iov_offset;
-                       size_t copy = min(todo, iov_len);
-                       size_t left;
-
-                       if (!to_user)
-                               left = copy_from_user(kaddr, uaddr, copy);
-                       else
-                               left = copy_to_user(uaddr, kaddr, copy);
-
-                       if (unlikely(left))
-                               return -EFAULT;
+               if (!to_user)
+                       left = iov_iter_copy_from_user(page, &ii, 0, todo);
+               else
+                       left = iov_iter_copy_to_user(page, &ii, 0, todo);
 
-                       iov_iter_advance(&ii, copy);
-                       todo -= copy;
-                       kaddr += copy;
-               }
+               if (unlikely(left))
+                       return -EFAULT;
 
-               kunmap(page);
+               iov_iter_advance(&ii, todo);
        }
 
        return 0;
@@ -2385,8 +2681,8 @@ static inline loff_t fuse_round_up(loff_t off)
 }
 
 static ssize_t
-fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-                       loff_t offset, unsigned long nr_segs)
+fuse_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *ii,
+                       loff_t offset)
 {
        ssize_t ret = 0;
        struct file *file = iocb->ki_filp;
@@ -2395,7 +2691,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
        loff_t pos = 0;
        struct inode *inode;
        loff_t i_size;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(ii);
        struct fuse_io_priv *io;
 
        pos = offset;
@@ -2436,9 +2732,9 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
                io->async = false;
 
        if (rw == WRITE)
-               ret = __fuse_direct_write(io, iov, nr_segs, &pos);
+               ret = __fuse_direct_write(io, ii, &pos);
        else
-               ret = __fuse_direct_read(io, iov, nr_segs, &pos, count);
+               ret = __fuse_direct_read(io, ii, &pos, count);
 
        if (io->async) {
                fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
@@ -2581,6 +2877,7 @@ static const struct file_operations fuse_direct_io_file_operations = {
 static const struct address_space_operations fuse_file_aops  = {
        .readpage       = fuse_readpage,
        .writepage      = fuse_writepage,
+       .writepages     = fuse_writepages,
        .launder_page   = fuse_launder_page,
        .readpages      = fuse_readpages,
        .set_page_dirty = __set_page_dirty_nobuffers,
index 5b9e6f3b6aef4fc55aaaaaa94666dd532c4fbffc..04c6084b0cf98496822a24b293b7ff815de00531 100644 (file)
@@ -321,6 +321,7 @@ struct fuse_req {
                struct {
                        struct fuse_write_in in;
                        struct fuse_write_out out;
+                       struct fuse_req *next;
                } write;
                struct fuse_notify_retrieve_in retrieve_in;
                struct fuse_lk_in lk_in;
@@ -374,9 +375,6 @@ struct fuse_conn {
        /** Lock protecting accessess to  members of this structure */
        spinlock_t lock;
 
-       /** Mutex protecting against directory alias creation */
-       struct mutex inst_mutex;
-
        /** Refcount */
        atomic_t count;
 
@@ -858,9 +856,8 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
 
 int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
                 bool isdir);
-ssize_t fuse_direct_io(struct fuse_io_priv *io, const struct iovec *iov,
-                      unsigned long nr_segs, size_t count, loff_t *ppos,
-                      int write);
+ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *ii,
+                      size_t count, loff_t *ppos, int write);
 long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
                   unsigned int flags);
 long fuse_ioctl_common(struct file *file, unsigned int cmd,
index a8ce6dab60a0b0e3279b6996c8462610c0cbef14..1c15613c64f8d0e768596a2e35ee81f3d486d7cc 100644 (file)
@@ -565,7 +565,6 @@ void fuse_conn_init(struct fuse_conn *fc)
 {
        memset(fc, 0, sizeof(*fc));
        spin_lock_init(&fc->lock);
-       mutex_init(&fc->inst_mutex);
        init_rwsem(&fc->killsb);
        atomic_set(&fc->count, 1);
        init_waitqueue_head(&fc->waitq);
@@ -596,7 +595,6 @@ void fuse_conn_put(struct fuse_conn *fc)
        if (atomic_dec_and_test(&fc->count)) {
                if (fc->destroy_req)
                        fuse_request_free(fc->destroy_req);
-               mutex_destroy(&fc->inst_mutex);
                fc->release(fc);
        }
 }
index 1f7d8057ea68d1c7214d3db0a6446aa248888eea..01a2aa5f25a149c312f87d4ad6525ac0c1d808a6 100644 (file)
@@ -611,12 +611,14 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
                gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
 
        if (alloc_required) {
+               struct gfs2_alloc_parms ap = { .aflags = 0, };
                error = gfs2_quota_lock_check(ip);
                if (error)
                        goto out_unlock;
 
                requested = data_blocks + ind_blocks;
-               error = gfs2_inplace_reserve(ip, requested, 0);
+               ap.target = requested;
+               error = gfs2_inplace_reserve(ip, &ap);
                if (error)
                        goto out_qunlock;
        }
@@ -979,8 +981,7 @@ static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
 
 
 static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
-                             const struct iovec *iov, loff_t offset,
-                             unsigned long nr_segs)
+                             struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -1004,8 +1005,8 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
        if (rv != 1)
                goto out; /* dio not valid, fall back to buffered i/o */
 
-       rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
-                                 offset, nr_segs, gfs2_get_block_direct,
+       rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter,
+                                 offset, gfs2_get_block_direct,
                                  NULL, NULL, 0);
 out:
        gfs2_glock_dq(&gh);
index 62a65fc448dcedf5009f85a233cb43ad7cd4b442..fe0500c0af7aab88ca4276234df6894cca7290b3 100644 (file)
@@ -1216,6 +1216,7 @@ static int do_grow(struct inode *inode, u64 size)
 {
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
+       struct gfs2_alloc_parms ap = { .target = 1, };
        struct buffer_head *dibh;
        int error;
        int unstuff = 0;
@@ -1226,7 +1227,7 @@ static int do_grow(struct inode *inode, u64 size)
                if (error)
                        return error;
 
-               error = gfs2_inplace_reserve(ip, 1, 0);
+               error = gfs2_inplace_reserve(ip, &ap);
                if (error)
                        goto do_grow_qunlock;
                unstuff = 1;
@@ -1279,6 +1280,7 @@ do_grow_qunlock:
 
 int gfs2_setattr_size(struct inode *inode, u64 newsize)
 {
+       struct gfs2_inode *ip = GFS2_I(inode);
        int ret;
        u64 oldsize;
 
@@ -1294,7 +1296,7 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
 
        inode_dio_wait(inode);
 
-       ret = gfs2_rs_alloc(GFS2_I(inode));
+       ret = gfs2_rs_alloc(ip);
        if (ret)
                goto out;
 
@@ -1304,6 +1306,7 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
                goto out;
        }
 
+       gfs2_rs_deltree(ip->i_res);
        ret = do_shrink(inode, oldsize, newsize);
 out:
        put_write_access(inode);
index 0621b46d474d0e6d82157e6701b3e49449839908..0838913ca5687dc0e2abaaeb6ae4159a8458cce7 100644 (file)
@@ -383,6 +383,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct inode *inode = file_inode(vma->vm_file);
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
+       struct gfs2_alloc_parms ap = { .aflags = 0, };
        unsigned long last_index;
        u64 pos = page->index << PAGE_CACHE_SHIFT;
        unsigned int data_blocks, ind_blocks, rblocks;
@@ -430,7 +431,8 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        if (ret)
                goto out_unlock;
        gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
-       ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks, 0);
+       ap.target = data_blocks + ind_blocks;
+       ret = gfs2_inplace_reserve(ip, &ap);
        if (ret)
                goto out_quota_unlock;
 
@@ -620,7 +622,7 @@ static int gfs2_release(struct inode *inode, struct file *file)
        if (!(file->f_mode & FMODE_WRITE))
                return 0;
 
-       gfs2_rs_delete(ip);
+       gfs2_rs_delete(ip, &inode->i_writecount);
        return 0;
 }
 
@@ -681,10 +683,9 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
 }
 
 /**
- * gfs2_file_aio_write - Perform a write to a file
+ * gfs2_file_write_iter - Perform a write to a file
  * @iocb: The io context
- * @iov: The data to write
- * @nr_segs: Number of @iov segments
+ * @iter: The data to write
  * @pos: The file position
  *
  * We have to do a lock/unlock here to refresh the inode size for
@@ -694,11 +695,11 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
  *
  */
 
-static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                                  unsigned long nr_segs, loff_t pos)
+static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                                   loff_t pos)
 {
        struct file *file = iocb->ki_filp;
-       size_t writesize = iov_length(iov, nr_segs);
+       size_t writesize = iov_iter_count(iter);
        struct gfs2_inode *ip = GFS2_I(file_inode(file));
        int ret;
 
@@ -717,7 +718,7 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                gfs2_glock_dq_uninit(&gh);
        }
 
-       return generic_file_aio_write(iocb, iov, nr_segs, pos);
+       return generic_file_write_iter(iocb, iter, pos);
 }
 
 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
@@ -800,6 +801,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
        struct inode *inode = file_inode(file);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
        struct gfs2_inode *ip = GFS2_I(inode);
+       struct gfs2_alloc_parms ap = { .aflags = 0, };
        unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
        loff_t bytes, max_bytes;
        int error;
@@ -850,7 +852,8 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
 retry:
                gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
 
-               error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks, 0);
+               ap.target = data_blocks + ind_blocks;
+               error = gfs2_inplace_reserve(ip, &ap);
                if (error) {
                        if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
                                bytes >>= 1;
@@ -1049,9 +1052,9 @@ static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
 const struct file_operations gfs2_file_fops = {
        .llseek         = gfs2_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = gfs2_file_aio_write,
+       .write_iter     = gfs2_file_write_iter,
        .unlocked_ioctl = gfs2_ioctl,
        .mmap           = gfs2_mmap,
        .open           = gfs2_open,
@@ -1081,9 +1084,9 @@ const struct file_operations gfs2_dir_fops = {
 const struct file_operations gfs2_file_fops_nolock = {
        .llseek         = gfs2_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = gfs2_file_aio_write,
+       .write_iter     = gfs2_file_write_iter,
        .unlocked_ioctl = gfs2_ioctl,
        .mmap           = gfs2_mmap,
        .open           = gfs2_open,
index c2f41b4d00b9872ccfb4f5f23f988d46f154df32..e66a8009aff16d66b1179bba0ffc3a266ff923f6 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/bit_spinlock.h>
 #include <linux/percpu.h>
 #include <linux/list_sort.h>
+#include <linux/lockref.h>
 
 #include "gfs2.h"
 #include "incore.h"
@@ -129,10 +130,10 @@ void gfs2_glock_free(struct gfs2_glock *gl)
  *
  */
 
-void gfs2_glock_hold(struct gfs2_glock *gl)
+static void gfs2_glock_hold(struct gfs2_glock *gl)
 {
-       GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
-       atomic_inc(&gl->gl_ref);
+       GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
+       lockref_get(&gl->gl_lockref);
 }
 
 /**
@@ -186,20 +187,6 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
        spin_unlock(&lru_lock);
 }
 
-/**
- * gfs2_glock_put_nolock() - Decrement reference count on glock
- * @gl: The glock to put
- *
- * This function should only be used if the caller has its own reference
- * to the glock, in addition to the one it is dropping.
- */
-
-void gfs2_glock_put_nolock(struct gfs2_glock *gl)
-{
-       if (atomic_dec_and_test(&gl->gl_ref))
-               GLOCK_BUG_ON(gl, 1);
-}
-
 /**
  * gfs2_glock_put() - Decrement reference count on glock
  * @gl: The glock to put
@@ -211,17 +198,22 @@ void gfs2_glock_put(struct gfs2_glock *gl)
        struct gfs2_sbd *sdp = gl->gl_sbd;
        struct address_space *mapping = gfs2_glock2aspace(gl);
 
-       if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
-               __gfs2_glock_remove_from_lru(gl);
-               spin_unlock(&lru_lock);
-               spin_lock_bucket(gl->gl_hash);
-               hlist_bl_del_rcu(&gl->gl_list);
-               spin_unlock_bucket(gl->gl_hash);
-               GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
-               GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
-               trace_gfs2_glock_put(gl);
-               sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
-       }
+       if (lockref_put_or_lock(&gl->gl_lockref))
+               return;
+
+       lockref_mark_dead(&gl->gl_lockref);
+
+       spin_lock(&lru_lock);
+       __gfs2_glock_remove_from_lru(gl);
+       spin_unlock(&lru_lock);
+       spin_unlock(&gl->gl_lockref.lock);
+       spin_lock_bucket(gl->gl_hash);
+       hlist_bl_del_rcu(&gl->gl_list);
+       spin_unlock_bucket(gl->gl_hash);
+       GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
+       GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
+       trace_gfs2_glock_put(gl);
+       sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
 }
 
 /**
@@ -244,7 +236,7 @@ static struct gfs2_glock *search_bucket(unsigned int hash,
                        continue;
                if (gl->gl_sbd != sdp)
                        continue;
-               if (atomic_inc_not_zero(&gl->gl_ref))
+               if (lockref_get_not_dead(&gl->gl_lockref))
                        return gl;
        }
 
@@ -396,10 +388,11 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
        held2 = (new_state != LM_ST_UNLOCKED);
 
        if (held1 != held2) {
+               GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
                if (held2)
-                       gfs2_glock_hold(gl);
+                       gl->gl_lockref.count++;
                else
-                       gfs2_glock_put_nolock(gl);
+                       gl->gl_lockref.count--;
        }
        if (held1 && held2 && list_empty(&gl->gl_holders))
                clear_bit(GLF_QUEUED, &gl->gl_flags);
@@ -626,9 +619,9 @@ out:
 out_sched:
        clear_bit(GLF_LOCK, &gl->gl_flags);
        smp_mb__after_clear_bit();
-       gfs2_glock_hold(gl);
+       gl->gl_lockref.count++;
        if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
-               gfs2_glock_put_nolock(gl);
+               gl->gl_lockref.count--;
        return;
 
 out_unlock:
@@ -754,7 +747,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
        gl->gl_sbd = sdp;
        gl->gl_flags = 0;
        gl->gl_name = name;
-       atomic_set(&gl->gl_ref, 1);
+       gl->gl_lockref.count = 1;
        gl->gl_state = LM_ST_UNLOCKED;
        gl->gl_target = LM_ST_UNLOCKED;
        gl->gl_demote_state = LM_ST_EXCLUSIVE;
@@ -1356,10 +1349,10 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
                }
        }
 
-       spin_unlock(&gl->gl_spin);
+       gl->gl_lockref.count++;
        set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
-       smp_wmb();
-       gfs2_glock_hold(gl);
+       spin_unlock(&gl->gl_spin);
+
        if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
                gfs2_glock_put(gl);
 }
@@ -1404,15 +1397,19 @@ __acquires(&lru_lock)
        while(!list_empty(list)) {
                gl = list_entry(list->next, struct gfs2_glock, gl_lru);
                list_del_init(&gl->gl_lru);
+               if (!spin_trylock(&gl->gl_spin)) {
+                       list_add(&gl->gl_lru, &lru_list);
+                       atomic_inc(&lru_count);
+                       continue;
+               }
                clear_bit(GLF_LRU, &gl->gl_flags);
-               gfs2_glock_hold(gl);
                spin_unlock(&lru_lock);
-               spin_lock(&gl->gl_spin);
+               gl->gl_lockref.count++;
                if (demote_ok(gl))
                        handle_callback(gl, LM_ST_UNLOCKED, 0, false);
                WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
                if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
-                       gfs2_glock_put_nolock(gl);
+                       gl->gl_lockref.count--;
                spin_unlock(&gl->gl_spin);
                spin_lock(&lru_lock);
        }
@@ -1493,7 +1490,7 @@ static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
 
        rcu_read_lock();
        hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
-               if ((gl->gl_sbd == sdp) && atomic_inc_not_zero(&gl->gl_ref))
+               if ((gl->gl_sbd == sdp) && lockref_get_not_dead(&gl->gl_lockref))
                        examiner(gl);
        }
        rcu_read_unlock();
@@ -1746,7 +1743,7 @@ int gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
                  state2str(gl->gl_demote_state), dtime,
                  atomic_read(&gl->gl_ail_count),
                  atomic_read(&gl->gl_revokes),
-                 atomic_read(&gl->gl_ref), gl->gl_hold_time);
+                 (int)gl->gl_lockref.count, gl->gl_hold_time);
 
        list_for_each_entry(gh, &gl->gl_holders, gh_list) {
                error = dump_holder(seq, gh);
@@ -1902,7 +1899,7 @@ static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
                        gi->nhash = 0;
                }
        /* Skip entries for other sb and dead entries */
-       } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
+       } while (gi->sdp != gi->gl->gl_sbd || __lockref_is_dead(&gl->gl_lockref));
 
        return 0;
 }
index 69f66e3d22bf512787b3b9db403e0c8c91babd59..6647d77366ba097c4c98f482bcef1ab980704baa 100644 (file)
@@ -181,8 +181,6 @@ static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
 extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
                          const struct gfs2_glock_operations *glops,
                          int create, struct gfs2_glock **glp);
-extern void gfs2_glock_hold(struct gfs2_glock *gl);
-extern void gfs2_glock_put_nolock(struct gfs2_glock *gl);
 extern void gfs2_glock_put(struct gfs2_glock *gl);
 extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
                             unsigned flags, struct gfs2_holder *gh);
index e2e0a90396e7823da9aa47c44a727d5e75751cc6..db908f697139cfffbca462d3a7528e13139576b5 100644 (file)
@@ -525,9 +525,9 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
 
        if (gl->gl_demote_state == LM_ST_UNLOCKED &&
            gl->gl_state == LM_ST_SHARED && ip) {
-               gfs2_glock_hold(gl);
+               gl->gl_lockref.count++;
                if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
-                       gfs2_glock_put_nolock(gl);
+                       gl->gl_lockref.count--;
        }
 }
 
index 26aabd7caba7edfd56c9d611df1732ac6b16a6a1..bb88e417231f88f95c4801d582af0cf254cf8f7e 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/rbtree.h>
 #include <linux/ktime.h>
 #include <linux/percpu.h>
+#include <linux/lockref.h>
 
 #define DIO_WAIT       0x00000010
 #define DIO_METADATA   0x00000020
@@ -71,6 +72,7 @@ struct gfs2_bitmap {
        u32 bi_offset;
        u32 bi_start;
        u32 bi_len;
+       u32 bi_blocks;
 };
 
 struct gfs2_rgrpd {
@@ -101,19 +103,25 @@ struct gfs2_rgrpd {
 
 struct gfs2_rbm {
        struct gfs2_rgrpd *rgd;
-       struct gfs2_bitmap *bi; /* Bitmap must belong to the rgd */
        u32 offset;             /* The offset is bitmap relative */
+       int bii;                /* Bitmap index */
 };
 
+static inline struct gfs2_bitmap *rbm_bi(const struct gfs2_rbm *rbm)
+{
+       return rbm->rgd->rd_bits + rbm->bii;
+}
+
 static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm)
 {
-       return rbm->rgd->rd_data0 + (rbm->bi->bi_start * GFS2_NBBY) + rbm->offset;
+       return rbm->rgd->rd_data0 + (rbm_bi(rbm)->bi_start * GFS2_NBBY) +
+               rbm->offset;
 }
 
 static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1,
                               const struct gfs2_rbm *rbm2)
 {
-       return (rbm1->rgd == rbm2->rgd) && (rbm1->bi == rbm2->bi) && 
+       return (rbm1->rgd == rbm2->rgd) && (rbm1->bii == rbm2->bii) &&
               (rbm1->offset == rbm2->offset);
 }
 
@@ -278,6 +286,20 @@ struct gfs2_blkreserv {
        unsigned int rs_qa_qd_num;
 };
 
+/*
+ * Allocation parameters
+ * @target: The number of blocks we'd ideally like to allocate
+ * @aflags: The flags (e.g. Orlov flag)
+ *
+ * The intent is to gradually expand this structure over time in
+ * order to give more information, e.g. alignment, min extent size
+ * to the allocation code.
+ */
+struct gfs2_alloc_parms {
+       u32 target;
+       u32 aflags;
+};
+
 enum {
        GLF_LOCK                        = 1,
        GLF_DEMOTE                      = 3,
@@ -300,9 +322,9 @@ struct gfs2_glock {
        struct gfs2_sbd *gl_sbd;
        unsigned long gl_flags;         /* GLF_... */
        struct lm_lockname gl_name;
-       atomic_t gl_ref;
 
-       spinlock_t gl_spin;
+       struct lockref gl_lockref;
+#define gl_spin gl_lockref.lock
 
        /* State fields protected by gl_spin */
        unsigned int gl_state:2,        /* Current state */
@@ -516,7 +538,6 @@ struct gfs2_tune {
 
        unsigned int gt_logd_secs;
 
-       unsigned int gt_quota_simul_sync; /* Max quotavals to sync at once */
        unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
        unsigned int gt_quota_scale_num; /* Numerator */
        unsigned int gt_quota_scale_den; /* Denominator */
@@ -694,6 +715,7 @@ struct gfs2_sbd {
        struct list_head sd_quota_list;
        atomic_t sd_quota_count;
        struct mutex sd_quota_mutex;
+       struct mutex sd_quota_sync_mutex;
        wait_queue_head_t sd_quota_wait;
        struct list_head sd_trunc_list;
        spinlock_t sd_trunc_lock;
index ced3257f06e84bd24b6d96063a4f88f5e2b81525..1615df16cf4eb9ed5c5c4f20ee46c56bc00d3143 100644 (file)
@@ -379,6 +379,7 @@ static void munge_mode_uid_gid(const struct gfs2_inode *dip,
 static int alloc_dinode(struct gfs2_inode *ip, u32 flags)
 {
        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+       struct gfs2_alloc_parms ap = { .target = RES_DINODE, .aflags = flags, };
        int error;
        int dblocks = 1;
 
@@ -386,7 +387,7 @@ static int alloc_dinode(struct gfs2_inode *ip, u32 flags)
        if (error)
                goto out;
 
-       error = gfs2_inplace_reserve(ip, RES_DINODE, flags);
+       error = gfs2_inplace_reserve(ip, &ap);
        if (error)
                goto out_quota;
 
@@ -472,6 +473,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
                       struct gfs2_inode *ip, int arq)
 {
        struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
+       struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
        int error;
 
        if (arq) {
@@ -479,7 +481,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
                if (error)
                        goto fail_quota_locks;
 
-               error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres, 0);
+               error = gfs2_inplace_reserve(dip, &ap);
                if (error)
                        goto fail_quota_locks;
 
@@ -584,17 +586,17 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        if (!IS_ERR(inode)) {
                d = d_splice_alias(inode, dentry);
                error = 0;
-               if (file && !IS_ERR(d)) {
-                       if (d == NULL)
-                               d = dentry;
-                       if (S_ISREG(inode->i_mode))
-                               error = finish_open(file, d, gfs2_open_common, opened);
-                       else
+               if (file) {
+                       if (S_ISREG(inode->i_mode)) {
+                               WARN_ON(d != NULL);
+                               error = finish_open(file, dentry, gfs2_open_common, opened);
+                       } else {
                                error = finish_no_open(file, d);
+                       }
+               } else {
+                       dput(d);
                }
                gfs2_glock_dq_uninit(ghs);
-               if (IS_ERR(d))
-                       return PTR_ERR(d);
                return error;
        } else if (error != -ENOENT) {
                goto fail_gunlock;
@@ -713,7 +715,7 @@ fail_gunlock2:
 fail_free_inode:
        if (ip->i_gl)
                gfs2_glock_put(ip->i_gl);
-       gfs2_rs_delete(ip);
+       gfs2_rs_delete(ip, NULL);
        free_inode_nonrcu(inode);
        inode = NULL;
 fail_gunlock:
@@ -781,8 +783,10 @@ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry,
                error = finish_open(file, dentry, gfs2_open_common, opened);
 
        gfs2_glock_dq_uninit(&gh);
-       if (error)
+       if (error) {
+               dput(d);
                return ERR_PTR(error);
+       }
        return d;
 }
 
@@ -874,11 +878,12 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
        error = 0;
 
        if (alloc_required) {
+               struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
                error = gfs2_quota_lock_check(dip);
                if (error)
                        goto out_gunlock;
 
-               error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres, 0);
+               error = gfs2_inplace_reserve(dip, &ap);
                if (error)
                        goto out_gunlock_q;
 
@@ -1163,14 +1168,16 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
        d = __gfs2_lookup(dir, dentry, file, opened);
        if (IS_ERR(d))
                return PTR_ERR(d);
-       if (d == NULL)
-               d = dentry;
-       if (d->d_inode) {
+       if (d != NULL)
+               dentry = d;
+       if (dentry->d_inode) {
                if (!(*opened & FILE_OPENED))
-                       return finish_no_open(file, d);
+                       return finish_no_open(file, dentry);
+               dput(d);
                return 0;
        }
 
+       BUG_ON(d != NULL);
        if (!(flags & O_CREAT))
                return -ENOENT;
 
@@ -1385,11 +1392,12 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
                goto out_gunlock;
 
        if (alloc_required) {
+               struct gfs2_alloc_parms ap = { .target = sdp->sd_max_dirres, };
                error = gfs2_quota_lock_check(ndip);
                if (error)
                        goto out_gunlock;
 
-               error = gfs2_inplace_reserve(ndip, sdp->sd_max_dirres, 0);
+               error = gfs2_inplace_reserve(ndip, &ap);
                if (error)
                        goto out_gunlock_q;
 
@@ -1506,13 +1514,6 @@ out:
        return NULL;
 }
 
-static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
-{
-       char *s = nd_get_link(nd);
-       if (!IS_ERR(s))
-               kfree(s);
-}
-
 /**
  * gfs2_permission -
  * @inode: The inode
@@ -1864,7 +1865,7 @@ const struct inode_operations gfs2_dir_iops = {
 const struct inode_operations gfs2_symlink_iops = {
        .readlink = generic_readlink,
        .follow_link = gfs2_follow_link,
-       .put_link = gfs2_put_link,
+       .put_link = kfree_put_link,
        .permission = gfs2_permission,
        .setattr = gfs2_setattr,
        .getattr = gfs2_getattr,
index 19ff5e8c285c4c0764d402a719146f1416d9f35a..82303b4749582cd3c00d402a9f42b9972b1de677 100644 (file)
@@ -51,7 +51,6 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
 {
        spin_lock_init(&gt->gt_spin);
 
-       gt->gt_quota_simul_sync = 64;
        gt->gt_quota_warn_period = 10;
        gt->gt_quota_scale_num = 1;
        gt->gt_quota_scale_den = 1;
@@ -94,6 +93,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
 
        INIT_LIST_HEAD(&sdp->sd_quota_list);
        mutex_init(&sdp->sd_quota_mutex);
+       mutex_init(&sdp->sd_quota_sync_mutex);
        init_waitqueue_head(&sdp->sd_quota_wait);
        INIT_LIST_HEAD(&sdp->sd_trunc_list);
        spin_lock_init(&sdp->sd_trunc_lock);
index db441359ee8cd2f31fa4a980afe8c54a3f715447..4a9726aa191f59d1a01a7cff7241ad4a2285d687 100644 (file)
@@ -289,6 +289,26 @@ static void slot_hold(struct gfs2_quota_data *qd)
        spin_unlock(&qd_lru_lock);
 }
 
+static void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
+                            unsigned int bit, int new_value)
+{
+       unsigned int c, o, b = bit;
+       int old_value;
+
+       c = b / (8 * PAGE_SIZE);
+       b %= 8 * PAGE_SIZE;
+       o = b / 8;
+       b %= 8;
+
+       old_value = (bitmap[c][o] & (1 << b));
+       gfs2_assert_withdraw(sdp, !old_value != !new_value);
+
+       if (new_value)
+               bitmap[c][o] |= 1 << b;
+       else
+               bitmap[c][o] &= ~(1 << b);
+}
+
 static void slot_put(struct gfs2_quota_data *qd)
 {
        struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
@@ -363,6 +383,25 @@ static void bh_put(struct gfs2_quota_data *qd)
        mutex_unlock(&sdp->sd_quota_mutex);
 }
 
+static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
+                        u64 *sync_gen)
+{
+       if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
+           !test_bit(QDF_CHANGE, &qd->qd_flags) ||
+           (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
+               return 0;
+
+       list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
+
+       set_bit(QDF_LOCKED, &qd->qd_flags);
+       gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
+       atomic_inc(&qd->qd_count);
+       qd->qd_change_sync = qd->qd_change;
+       gfs2_assert_warn(sdp, qd->qd_slot_count);
+       qd->qd_slot_count++;
+       return 1;
+}
+
 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
 {
        struct gfs2_quota_data *qd = NULL;
@@ -377,22 +416,9 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
        spin_lock(&qd_lru_lock);
 
        list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
-               if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
-                   !test_bit(QDF_CHANGE, &qd->qd_flags) ||
-                   qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
-                       continue;
-
-               list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
-
-               set_bit(QDF_LOCKED, &qd->qd_flags);
-               gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
-               atomic_inc(&qd->qd_count);
-               qd->qd_change_sync = qd->qd_change;
-               gfs2_assert_warn(sdp, qd->qd_slot_count);
-               qd->qd_slot_count++;
-               found = 1;
-
-               break;
+               found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen);
+               if (found)
+                       break;
        }
 
        if (!found)
@@ -416,43 +442,6 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
        return 0;
 }
 
-static int qd_trylock(struct gfs2_quota_data *qd)
-{
-       struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
-
-       if (sdp->sd_vfs->s_flags & MS_RDONLY)
-               return 0;
-
-       spin_lock(&qd_lru_lock);
-
-       if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
-           !test_bit(QDF_CHANGE, &qd->qd_flags)) {
-               spin_unlock(&qd_lru_lock);
-               return 0;
-       }
-
-       list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
-
-       set_bit(QDF_LOCKED, &qd->qd_flags);
-       gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
-       atomic_inc(&qd->qd_count);
-       qd->qd_change_sync = qd->qd_change;
-       gfs2_assert_warn(sdp, qd->qd_slot_count);
-       qd->qd_slot_count++;
-
-       spin_unlock(&qd_lru_lock);
-
-       gfs2_assert_warn(sdp, qd->qd_change_sync);
-       if (bh_get(qd)) {
-               clear_bit(QDF_LOCKED, &qd->qd_flags);
-               slot_put(qd);
-               qd_put(qd);
-               return 0;
-       }
-
-       return 1;
-}
-
 static void qd_unlock(struct gfs2_quota_data *qd)
 {
        gfs2_assert_warn(qd->qd_gl->gl_sbd,
@@ -763,6 +752,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
 {
        struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
        struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
+       struct gfs2_alloc_parms ap = { .aflags = 0, };
        unsigned int data_blocks, ind_blocks;
        struct gfs2_holder *ghs, i_gh;
        unsigned int qx, x;
@@ -815,7 +805,8 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
        blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
 
        reserved = 1 + (nalloc * (data_blocks + ind_blocks));
-       error = gfs2_inplace_reserve(ip, reserved, 0);
+       ap.target = reserved;
+       error = gfs2_inplace_reserve(ip, &ap);
        if (error)
                goto out_alloc;
 
@@ -1001,9 +992,11 @@ static int need_sync(struct gfs2_quota_data *qd)
 
 void gfs2_quota_unlock(struct gfs2_inode *ip)
 {
+       struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
        struct gfs2_quota_data *qda[4];
        unsigned int count = 0;
        unsigned int x;
+       int found;
 
        if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
                goto out;
@@ -1016,9 +1009,25 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
                sync = need_sync(qd);
 
                gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
+               if (!sync)
+                       continue;
+
+               spin_lock(&qd_lru_lock);
+               found = qd_check_sync(sdp, qd, NULL);
+               spin_unlock(&qd_lru_lock);
 
-               if (sync && qd_trylock(qd))
-                       qda[count++] = qd;
+               if (!found)
+                       continue;
+
+               gfs2_assert_warn(sdp, qd->qd_change_sync);
+               if (bh_get(qd)) {
+                       clear_bit(QDF_LOCKED, &qd->qd_flags);
+                       slot_put(qd);
+                       qd_put(qd);
+                       continue;
+               }
+
+               qda[count++] = qd;
        }
 
        if (count) {
@@ -1118,17 +1127,18 @@ int gfs2_quota_sync(struct super_block *sb, int type)
 {
        struct gfs2_sbd *sdp = sb->s_fs_info;
        struct gfs2_quota_data **qda;
-       unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
+       unsigned int max_qd = PAGE_SIZE/sizeof(struct gfs2_holder);
        unsigned int num_qd;
        unsigned int x;
        int error = 0;
 
-       sdp->sd_quota_sync_gen++;
-
        qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
        if (!qda)
                return -ENOMEM;
 
+       mutex_lock(&sdp->sd_quota_sync_mutex);
+       sdp->sd_quota_sync_gen++;
+
        do {
                num_qd = 0;
 
@@ -1153,6 +1163,7 @@ int gfs2_quota_sync(struct super_block *sb, int type)
                }
        } while (!error && num_qd == max_qd);
 
+       mutex_unlock(&sdp->sd_quota_sync_mutex);
        kfree(qda);
 
        return error;
@@ -1573,10 +1584,12 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
        if (gfs2_is_stuffed(ip))
                alloc_required = 1;
        if (alloc_required) {
+               struct gfs2_alloc_parms ap = { .aflags = 0, };
                gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
                                       &data_blocks, &ind_blocks);
                blocks = 1 + data_blocks + ind_blocks;
-               error = gfs2_inplace_reserve(ip, blocks, 0);
+               ap.target = blocks;
+               error = gfs2_inplace_reserve(ip, &ap);
                if (error)
                        goto out_i;
                blocks += gfs2_rg_blocks(ip, blocks);
index 69317435faa723c9288d390407d9343fb6bf1096..4d83abdd5635273b3e0af9589eec83226be249ff 100644 (file)
@@ -81,11 +81,12 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
                               unsigned char new_state)
 {
        unsigned char *byte1, *byte2, *end, cur_state;
-       unsigned int buflen = rbm->bi->bi_len;
+       struct gfs2_bitmap *bi = rbm_bi(rbm);
+       unsigned int buflen = bi->bi_len;
        const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE;
 
-       byte1 = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY);
-       end = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + buflen;
+       byte1 = bi->bi_bh->b_data + bi->bi_offset + (rbm->offset / GFS2_NBBY);
+       end = bi->bi_bh->b_data + bi->bi_offset + buflen;
 
        BUG_ON(byte1 >= end);
 
@@ -95,18 +96,17 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
                printk(KERN_WARNING "GFS2: buf_blk = 0x%x old_state=%d, "
                       "new_state=%d\n", rbm->offset, cur_state, new_state);
                printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%x\n",
-                      (unsigned long long)rbm->rgd->rd_addr,
-                      rbm->bi->bi_start);
+                      (unsigned long long)rbm->rgd->rd_addr, bi->bi_start);
                printk(KERN_WARNING "GFS2: bi_offset=0x%x bi_len=0x%x\n",
-                      rbm->bi->bi_offset, rbm->bi->bi_len);
+                      bi->bi_offset, bi->bi_len);
                dump_stack();
                gfs2_consist_rgrpd(rbm->rgd);
                return;
        }
        *byte1 ^= (cur_state ^ new_state) << bit;
 
-       if (do_clone && rbm->bi->bi_clone) {
-               byte2 = rbm->bi->bi_clone + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY);
+       if (do_clone && bi->bi_clone) {
+               byte2 = bi->bi_clone + bi->bi_offset + (rbm->offset / GFS2_NBBY);
                cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
                *byte2 ^= (cur_state ^ new_state) << bit;
        }
@@ -121,7 +121,8 @@ static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone,
 
 static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm)
 {
-       const u8 *buffer = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset;
+       struct gfs2_bitmap *bi = rbm_bi(rbm);
+       const u8 *buffer = bi->bi_bh->b_data + bi->bi_offset;
        const u8 *byte;
        unsigned int bit;
 
@@ -252,28 +253,52 @@ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
 static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
 {
        u64 rblock = block - rbm->rgd->rd_data0;
-       u32 x;
 
        if (WARN_ON_ONCE(rblock > UINT_MAX))
                return -EINVAL;
        if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data)
                return -E2BIG;
 
-       rbm->bi = rbm->rgd->rd_bits;
+       rbm->bii = 0;
        rbm->offset = (u32)(rblock);
        /* Check if the block is within the first block */
-       if (rbm->offset < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY)
+       if (rbm->offset < rbm_bi(rbm)->bi_blocks)
                return 0;
 
        /* Adjust for the size diff between gfs2_meta_header and gfs2_rgrp */
        rbm->offset += (sizeof(struct gfs2_rgrp) -
                        sizeof(struct gfs2_meta_header)) * GFS2_NBBY;
-       x = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
-       rbm->offset -= x * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
-       rbm->bi += x;
+       rbm->bii = rbm->offset / rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
+       rbm->offset -= rbm->bii * rbm->rgd->rd_sbd->sd_blocks_per_bitmap;
        return 0;
 }
 
+/**
+ * gfs2_rbm_incr - increment an rbm structure
+ * @rbm: The rbm with rgd already set correctly
+ *
+ * This function takes an existing rbm structure and increments it to the next
+ * viable block offset.
+ *
+ * Returns: If incrementing the offset would cause the rbm to go past the
+ *          end of the rgrp, true is returned, otherwise false.
+ *
+ */
+
+static bool gfs2_rbm_incr(struct gfs2_rbm *rbm)
+{
+       if (rbm->offset + 1 < rbm_bi(rbm)->bi_blocks) { /* in the same bitmap */
+               rbm->offset++;
+               return false;
+       }
+       if (rbm->bii == rbm->rgd->rd_length - 1) /* at the last bitmap */
+               return true;
+
+       rbm->offset = 0;
+       rbm->bii++;
+       return false;
+}
+
 /**
  * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned
  * @rbm: Position to search (value/result)
@@ -285,7 +310,6 @@ static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
 
 static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len)
 {
-       u64 block;
        u32 n;
        u8 res;
 
@@ -296,8 +320,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
                (*len)--;
                if (*len == 0)
                        return true;
-               block = gfs2_rbm_to_block(rbm);
-               if (gfs2_rbm_from_block(rbm, block + 1))
+               if (gfs2_rbm_incr(rbm))
                        return true;
        }
 
@@ -328,6 +351,7 @@ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
        u32 chunk_size;
        u8 *ptr, *start, *end;
        u64 block;
+       struct gfs2_bitmap *bi;
 
        if (n_unaligned &&
            gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len))
@@ -336,11 +360,12 @@ static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len)
        n_unaligned = len & 3;
        /* Start is now byte aligned */
        while (len > 3) {
-               start = rbm.bi->bi_bh->b_data;
-               if (rbm.bi->bi_clone)
-                       start = rbm.bi->bi_clone;
-               end = start + rbm.bi->bi_bh->b_size;
-               start += rbm.bi->bi_offset;
+               bi = rbm_bi(&rbm);
+               start = bi->bi_bh->b_data;
+               if (bi->bi_clone)
+                       start = bi->bi_clone;
+               end = start + bi->bi_bh->b_size;
+               start += bi->bi_offset;
                BUG_ON(rbm.offset & 3);
                start += (rbm.offset / GFS2_NBBY);
                bytes = min_t(u32, len / GFS2_NBBY, (end - start));
@@ -605,11 +630,13 @@ static void __rs_deltree(struct gfs2_blkreserv *rs)
        RB_CLEAR_NODE(&rs->rs_node);
 
        if (rs->rs_free) {
+               struct gfs2_bitmap *bi = rbm_bi(&rs->rs_rbm);
+
                /* return reserved blocks to the rgrp */
                BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
                rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
                rs->rs_free = 0;
-               clear_bit(GBF_FULL, &rs->rs_rbm.bi->bi_flags);
+               clear_bit(GBF_FULL, &bi->bi_flags);
                smp_mb__after_clear_bit();
        }
 }
@@ -634,14 +661,13 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
 /**
  * gfs2_rs_delete - delete a multi-block reservation
  * @ip: The inode for this reservation
+ * @wcount: The inode's write count, or NULL
  *
  */
-void gfs2_rs_delete(struct gfs2_inode *ip)
+void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount)
 {
-       struct inode *inode = &ip->i_inode;
-
        down_write(&ip->i_rw_mutex);
-       if (ip->i_res && atomic_read(&inode->i_writecount) <= 1) {
+       if (ip->i_res && ((wcount == NULL) || (atomic_read(wcount) <= 1))) {
                gfs2_rs_deltree(ip->i_res);
                BUG_ON(ip->i_res->rs_free);
                kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
@@ -743,18 +769,21 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
                        bi->bi_offset = sizeof(struct gfs2_rgrp);
                        bi->bi_start = 0;
                        bi->bi_len = bytes;
+                       bi->bi_blocks = bytes * GFS2_NBBY;
                /* header block */
                } else if (x == 0) {
                        bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
                        bi->bi_offset = sizeof(struct gfs2_rgrp);
                        bi->bi_start = 0;
                        bi->bi_len = bytes;
+                       bi->bi_blocks = bytes * GFS2_NBBY;
                /* last block */
                } else if (x + 1 == length) {
                        bytes = bytes_left;
                        bi->bi_offset = sizeof(struct gfs2_meta_header);
                        bi->bi_start = rgd->rd_bitbytes - bytes_left;
                        bi->bi_len = bytes;
+                       bi->bi_blocks = bytes * GFS2_NBBY;
                /* other blocks */
                } else {
                        bytes = sdp->sd_sb.sb_bsize -
@@ -762,6 +791,7 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
                        bi->bi_offset = sizeof(struct gfs2_meta_header);
                        bi->bi_start = rgd->rd_bitbytes - bytes_left;
                        bi->bi_len = bytes;
+                       bi->bi_blocks = bytes * GFS2_NBBY;
                }
 
                bytes_left -= bytes;
@@ -1392,12 +1422,12 @@ static void rs_insert(struct gfs2_inode *ip)
  * rg_mblk_search - find a group of multiple free blocks to form a reservation
  * @rgd: the resource group descriptor
  * @ip: pointer to the inode for which we're reserving blocks
- * @requested: number of blocks required for this allocation
+ * @ap: the allocation parameters
  *
  */
 
 static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
-                          unsigned requested)
+                          const struct gfs2_alloc_parms *ap)
 {
        struct gfs2_rbm rbm = { .rgd = rgd, };
        u64 goal;
@@ -1410,7 +1440,7 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
        if (S_ISDIR(inode->i_mode))
                extlen = 1;
        else {
-               extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested);
+               extlen = max_t(u32, atomic_read(&rs->rs_sizehint), ap->target);
                extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
        }
        if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
@@ -1554,14 +1584,14 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
                         const struct gfs2_inode *ip, bool nowrap)
 {
        struct buffer_head *bh;
-       struct gfs2_bitmap *initial_bi;
+       int initial_bii;
        u32 initial_offset;
        u32 offset;
        u8 *buffer;
-       int index;
        int n = 0;
        int iters = rbm->rgd->rd_length;
        int ret;
+       struct gfs2_bitmap *bi;
 
        /* If we are not starting at the beginning of a bitmap, then we
         * need to add one to the bitmap count to ensure that we search
@@ -1571,52 +1601,53 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext,
                iters++;
 
        while(1) {
-               if (test_bit(GBF_FULL, &rbm->bi->bi_flags) &&
+               bi = rbm_bi(rbm);
+               if (test_bit(GBF_FULL, &bi->bi_flags) &&
                    (state == GFS2_BLKST_FREE))
                        goto next_bitmap;
 
-               bh = rbm->bi->bi_bh;
-               buffer = bh->b_data + rbm->bi->bi_offset;
+               bh = bi->bi_bh;
+               buffer = bh->b_data + bi->bi_offset;
                WARN_ON(!buffer_uptodate(bh));
-               if (state != GFS2_BLKST_UNLINKED && rbm->bi->bi_clone)
-                       buffer = rbm->bi->bi_clone + rbm->bi->bi_offset;
+               if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
+                       buffer = bi->bi_clone + bi->bi_offset;
                initial_offset = rbm->offset;
-               offset = gfs2_bitfit(buffer, rbm->bi->bi_len, rbm->offset, state);
+               offset = gfs2_bitfit(buffer, bi->bi_len, rbm->offset, state);
                if (offset == BFITNOENT)
                        goto bitmap_full;
                rbm->offset = offset;
                if (ip == NULL)
                        return 0;
 
-               initial_bi = rbm->bi;
+               initial_bii = rbm->bii;
                ret = gfs2_reservation_check_and_update(rbm, ip, minext);
                if (ret == 0)
                        return 0;
                if (ret > 0) {
-                       n += (rbm->bi - initial_bi);
+                       n += (rbm->bii - initial_bii);
                        goto next_iter;
                }
                if (ret == -E2BIG) {
-                       index = 0;
+                       rbm->bii = 0;
                        rbm->offset = 0;
-                       n += (rbm->bi - initial_bi);
+                       n += (rbm->bii - initial_bii);
                        goto res_covered_end_of_rgrp;
                }
                return ret;
 
 bitmap_full:   /* Mark bitmap as full and fall through */
-               if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
-                       set_bit(GBF_FULL, &rbm->bi->bi_flags);
+               if ((state == GFS2_BLKST_FREE) && initial_offset == 0) {
+                       struct gfs2_bitmap *bi = rbm_bi(rbm);
+                       set_bit(GBF_FULL, &bi->bi_flags);
+               }
 
 next_bitmap:   /* Find next bitmap in the rgrp */
                rbm->offset = 0;
-               index = rbm->bi - rbm->rgd->rd_bits;
-               index++;
-               if (index == rbm->rgd->rd_length)
-                       index = 0;
+               rbm->bii++;
+               if (rbm->bii == rbm->rgd->rd_length)
+                       rbm->bii = 0;
 res_covered_end_of_rgrp:
-               rbm->bi = &rbm->rgd->rd_bits[index];
-               if ((index == 0) && nowrap)
+               if ((rbm->bii == 0) && nowrap)
                        break;
                n++;
 next_iter:
@@ -1645,7 +1676,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
        struct gfs2_inode *ip;
        int error;
        int found = 0;
-       struct gfs2_rbm rbm = { .rgd = rgd, .bi = rgd->rd_bits, .offset = 0 };
+       struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
 
        while (1) {
                down_write(&sdp->sd_log_flush_lock);
@@ -1800,12 +1831,12 @@ static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *b
 /**
  * gfs2_inplace_reserve - Reserve space in the filesystem
  * @ip: the inode to reserve space for
- * @requested: the number of blocks to be reserved
+ * @ap: the allocation parameters
  *
  * Returns: errno
  */
 
-int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags)
+int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap)
 {
        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
        struct gfs2_rgrpd *begin = NULL;
@@ -1817,17 +1848,16 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags)
 
        if (sdp->sd_args.ar_rgrplvb)
                flags |= GL_SKIP;
-       if (gfs2_assert_warn(sdp, requested))
+       if (gfs2_assert_warn(sdp, ap->target))
                return -EINVAL;
        if (gfs2_rs_active(rs)) {
                begin = rs->rs_rbm.rgd;
-               flags = 0; /* Yoda: Do or do not. There is no try */
        } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
                rs->rs_rbm.rgd = begin = ip->i_rgd;
        } else {
                rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
        }
-       if (S_ISDIR(ip->i_inode.i_mode) && (aflags & GFS2_AF_ORLOV))
+       if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV))
                skip = gfs2_orlov_skip(ip);
        if (rs->rs_rbm.rgd == NULL)
                return -EBADSLT;
@@ -1869,14 +1899,14 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 aflags)
 
                /* Get a reservation if we don't already have one */
                if (!gfs2_rs_active(rs))
-                       rg_mblk_search(rs->rs_rbm.rgd, ip, requested);
+                       rg_mblk_search(rs->rs_rbm.rgd, ip, ap);
 
                /* Skip rgrps when we can't get a reservation on first pass */
                if (!gfs2_rs_active(rs) && (loops < 1))
                        goto check_rgrp;
 
                /* If rgrp has enough free space, use it */
-               if (rs->rs_rbm.rgd->rd_free_clone >= requested) {
+               if (rs->rs_rbm.rgd->rd_free_clone >= ap->target) {
                        ip->i_rgd = rs->rs_rbm.rgd;
                        return 0;
                }
@@ -1973,14 +2003,14 @@ static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
 
        *n = 1;
        block = gfs2_rbm_to_block(rbm);
-       gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm->bi->bi_bh);
+       gfs2_trans_add_meta(rbm->rgd->rd_gl, rbm_bi(rbm)->bi_bh);
        gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
        block++;
        while (*n < elen) {
                ret = gfs2_rbm_from_block(&pos, block);
                if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE)
                        break;
-               gfs2_trans_add_meta(pos.rgd->rd_gl, pos.bi->bi_bh);
+               gfs2_trans_add_meta(pos.rgd->rd_gl, rbm_bi(&pos)->bi_bh);
                gfs2_setbit(&pos, true, GFS2_BLKST_USED);
                (*n)++;
                block++;
@@ -2001,6 +2031,7 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
                                     u32 blen, unsigned char new_state)
 {
        struct gfs2_rbm rbm;
+       struct gfs2_bitmap *bi;
 
        rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
        if (!rbm.rgd) {
@@ -2011,15 +2042,15 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
 
        while (blen--) {
                gfs2_rbm_from_block(&rbm, bstart);
+               bi = rbm_bi(&rbm);
                bstart++;
-               if (!rbm.bi->bi_clone) {
-                       rbm.bi->bi_clone = kmalloc(rbm.bi->bi_bh->b_size,
-                                                  GFP_NOFS | __GFP_NOFAIL);
-                       memcpy(rbm.bi->bi_clone + rbm.bi->bi_offset,
-                              rbm.bi->bi_bh->b_data + rbm.bi->bi_offset,
-                              rbm.bi->bi_len);
+               if (!bi->bi_clone) {
+                       bi->bi_clone = kmalloc(bi->bi_bh->b_size,
+                                              GFP_NOFS | __GFP_NOFAIL);
+                       memcpy(bi->bi_clone + bi->bi_offset,
+                              bi->bi_bh->b_data + bi->bi_offset, bi->bi_len);
                }
-               gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.bi->bi_bh);
+               gfs2_trans_add_meta(rbm.rgd->rd_gl, bi->bi_bh);
                gfs2_setbit(&rbm, false, new_state);
        }
 
@@ -2102,6 +2133,35 @@ out:
        spin_unlock(&rgd->rd_rsspin);
 }
 
+/**
+ * gfs2_set_alloc_start - Set starting point for block allocation
+ * @rbm: The rbm which will be set to the required location
+ * @ip: The gfs2 inode
+ * @dinode: Flag to say if allocation includes a new inode
+ *
+ * This sets the starting point from the reservation if one is active
+ * otherwise it falls back to guessing a start point based on the
+ * inode's goal block or the last allocation point in the rgrp.
+ */
+
+static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
+                                const struct gfs2_inode *ip, bool dinode)
+{
+       u64 goal;
+
+       if (gfs2_rs_active(ip->i_res)) {
+               *rbm = ip->i_res->rs_rbm;
+               return;
+       }
+
+       if (!dinode && rgrp_contains_block(rbm->rgd, ip->i_goal))
+               goal = ip->i_goal;
+       else
+               goal = rbm->rgd->rd_last_alloc + rbm->rgd->rd_data0;
+
+       gfs2_rbm_from_block(rbm, goal);
+}
+
 /**
  * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
  * @ip: the inode to allocate the block for
@@ -2120,22 +2180,14 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
        struct buffer_head *dibh;
        struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
        unsigned int ndata;
-       u64 goal;
        u64 block; /* block, within the file system scope */
        int error;
 
-       if (gfs2_rs_active(ip->i_res))
-               goal = gfs2_rbm_to_block(&ip->i_res->rs_rbm);
-       else if (!dinode && rgrp_contains_block(rbm.rgd, ip->i_goal))
-               goal = ip->i_goal;
-       else
-               goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0;
-
-       gfs2_rbm_from_block(&rbm, goal);
+       gfs2_set_alloc_start(&rbm, ip, dinode);
        error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, ip, false);
 
        if (error == -ENOSPC) {
-               gfs2_rbm_from_block(&rbm, goal);
+               gfs2_set_alloc_start(&rbm, ip, dinode);
                error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, NULL, false);
        }
 
index 5b3f4a896e6ca305c7708478d73286b23ea3e3e5..3a10d2ffbbe7b34e93fd37a8dbc68028b8bfa041 100644 (file)
@@ -40,7 +40,7 @@ extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh);
 extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
 
 #define GFS2_AF_ORLOV 1
-extern int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested, u32 flags);
+extern int gfs2_inplace_reserve(struct gfs2_inode *ip, const struct gfs2_alloc_parms *ap);
 extern void gfs2_inplace_release(struct gfs2_inode *ip);
 
 extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
@@ -48,7 +48,7 @@ extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
 
 extern int gfs2_rs_alloc(struct gfs2_inode *ip);
 extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
-extern void gfs2_rs_delete(struct gfs2_inode *ip);
+extern void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount);
 extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta);
 extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
 extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
index e5639dec66c49dc7361ff0ed79828301031b675d..35da5b19c0deb62b59e4124ab4566f4dc9279ef9 100644 (file)
@@ -1526,7 +1526,7 @@ out_unlock:
 out:
        /* Case 3 starts here */
        truncate_inode_pages(&inode->i_data, 0);
-       gfs2_rs_delete(ip);
+       gfs2_rs_delete(ip, NULL);
        gfs2_ordered_del_inode(ip);
        clear_inode(inode);
        gfs2_dir_hash_inval(ip);
index aa5c48044966697c2365712ab53a20d7bc5d8958..d09f6edda0ff8d55f31ef04dba7f4720852f7a8d 100644 (file)
@@ -587,7 +587,6 @@ TUNE_ATTR(max_readahead, 0);
 TUNE_ATTR(complain_secs, 0);
 TUNE_ATTR(statfs_slow, 0);
 TUNE_ATTR(new_files_jdata, 0);
-TUNE_ATTR(quota_simul_sync, 1);
 TUNE_ATTR(statfs_quantum, 1);
 TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
 
@@ -597,7 +596,6 @@ static struct attribute *tune_attrs[] = {
        &tune_attr_max_readahead.attr,
        &tune_attr_complain_secs.attr,
        &tune_attr_statfs_slow.attr,
-       &tune_attr_quota_simul_sync.attr,
        &tune_attr_statfs_quantum.attr,
        &tune_attr_quota_scale.attr,
        &tune_attr_new_files_jdata.attr,
index 6402fb69d71bd838d943c4d60d1a8879d15f6217..f7109f689e6132d08b7fcfc6a0c69db63dc0c24f 100644 (file)
@@ -268,23 +268,3 @@ int gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
        return rv;
 }
 
-void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
-                     unsigned int bit, int new_value)
-{
-       unsigned int c, o, b = bit;
-       int old_value;
-
-       c = b / (8 * PAGE_SIZE);
-       b %= 8 * PAGE_SIZE;
-       o = b / 8;
-       b %= 8;
-
-       old_value = (bitmap[c][o] & (1 << b));
-       gfs2_assert_withdraw(sdp, !old_value != !new_value);
-
-       if (new_value)
-               bitmap[c][o] |= 1 << b;
-       else
-               bitmap[c][o] &= ~(1 << b);
-}
-
index 80535739ac7b2c5c1f98ac46e9c2cb4faf5d0eb5..b7ffb09b99ea2d231011b92c21fa64a3e0bcfa6d 100644 (file)
@@ -164,8 +164,6 @@ static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
 #define gfs2_tune_get(sdp, field) \
 gfs2_tune_get_i(&(sdp)->sd_tune, &(sdp)->sd_tune.field)
 
-void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
-                     unsigned int bit, int new_value);
 int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...);
 
 #endif /* __UTIL_DOT_H__ */
index ecd37f30ab91986923c5fa8c0cdf02f1bd3d330b..8c6a6f6bdba978f9c3b37703a79d4c409516557b 100644 (file)
@@ -723,6 +723,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
                             unsigned int blks,
                             ea_skeleton_call_t skeleton_call, void *private)
 {
+       struct gfs2_alloc_parms ap = { .target = blks };
        struct buffer_head *dibh;
        int error;
 
@@ -734,7 +735,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
        if (error)
                return error;
 
-       error = gfs2_inplace_reserve(ip, blks, 0);
+       error = gfs2_inplace_reserve(ip, &ap);
        if (error)
                goto out_gunlock_q;
 
index 380ab31b5e0f4870ee966cbcfd5af1e805035f58..3fe7b8e53290f94649865421aa9f458451599f07 100644 (file)
@@ -125,15 +125,14 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
 }
 
 static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
-               const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+               struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        struct inode *inode = file_inode(file)->i_mapping->host;
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
-                                hfs_get_block);
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, hfs_get_block);
 
        /*
         * In case of error extending write may have instantiated a few
@@ -141,7 +140,7 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
         */
        if (unlikely((rw & WRITE) && ret < 0)) {
                loff_t isize = i_size_read(inode);
-               loff_t end = offset + iov_length(iov, nr_segs);
+               loff_t end = offset + iov_iter_count(iter);
 
                if (end > isize)
                        hfs_write_failed(mapping, end);
@@ -675,9 +674,9 @@ static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end,
 static const struct file_operations hfs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .splice_read    = generic_file_splice_read,
        .fsync          = hfs_file_fsync,
index 37213d075f3c5c9f29029b280b093781ddb526ca..96d7a2ccded28f82ccdadde9bbe0820b4a86c7cb 100644 (file)
@@ -123,14 +123,14 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
 }
 
 static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
-               const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+               struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        struct inode *inode = file_inode(file)->i_mapping->host;
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
                                 hfsplus_get_block);
 
        /*
@@ -139,7 +139,7 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
         */
        if (unlikely((rw & WRITE) && ret < 0)) {
                loff_t isize = i_size_read(inode);
-               loff_t end = offset + iov_length(iov, nr_segs);
+               loff_t end = offset + iov_iter_count(iter);
 
                if (end > isize)
                        hfsplus_write_failed(mapping, end);
@@ -399,9 +399,9 @@ static const struct inode_operations hfsplus_file_inode_operations = {
 static const struct file_operations hfsplus_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .splice_read    = generic_file_splice_read,
        .fsync          = hfsplus_file_fsync,
index 25437280a2071b8970efe6e394edb97a4433acd8..111a9916bcf518339335369592dcefe1280a29f4 100644 (file)
@@ -388,8 +388,8 @@ static const struct file_operations hostfs_file_fops = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .splice_read    = generic_file_splice_read,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .write          = do_sync_write,
        .mmap           = generic_file_mmap,
        .open           = hostfs_file_open,
index 67c1a61e09558e0bb632638b0f65d8316ab9b5f2..1ff95c19a4695e5777e2a5749c61b8bec9e889f4 100644 (file)
@@ -198,9 +198,9 @@ const struct file_operations hpfs_file_ops =
 {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .release        = hpfs_file_release,
        .fsync          = hpfs_file_fsync,
index 513e0d859a6c18d7b274a9ebe16afcc9e7f8eca4..6964003cfef80eda8d8b927cac3dffb812729567 100644 (file)
@@ -140,6 +140,10 @@ extern long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan,
  */
 extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
 extern int rw_verify_area(int, struct file *, const loff_t *, size_t);
+extern ssize_t do_aio_read(struct kiocb *kiocb, const struct iovec *iov,
+                          unsigned long nr_segs, loff_t pos);
+extern ssize_t do_aio_write(struct kiocb *kiocb, const struct iovec *iov,
+                           unsigned long nr_segs, loff_t pos);
 
 /*
  * splice.c
diff --git a/fs/iov-iter.c b/fs/iov-iter.c
new file mode 100644 (file)
index 0000000..ec461c8
--- /dev/null
@@ -0,0 +1,411 @@
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/uio.h>
+#include <linux/hardirq.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/bio.h>
+
+static size_t __iovec_copy_to_user(char *vaddr, const struct iovec *iov,
+                                  size_t base, size_t bytes, int atomic)
+{
+       size_t copied = 0, left = 0;
+
+       while (bytes) {
+               char __user *buf = iov->iov_base + base;
+               int copy = min(bytes, iov->iov_len - base);
+
+               base = 0;
+               if (atomic)
+                       left = __copy_to_user_inatomic(buf, vaddr, copy);
+               else
+                       left = __copy_to_user(buf, vaddr, copy);
+               copied += copy;
+               bytes -= copy;
+               vaddr += copy;
+               iov++;
+
+               if (unlikely(left))
+                       break;
+       }
+       return copied - left;
+}
+
+/*
+ * Copy as much as we can into the page and return the number of bytes which
+ * were sucessfully copied.  If a fault is encountered then return the number of
+ * bytes which were copied.
+ */
+static size_t ii_iovec_copy_to_user_atomic(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       struct iovec *iov = (struct iovec *)i->data;
+       char *kaddr;
+       size_t copied;
+
+       BUG_ON(!in_atomic());
+       kaddr = kmap_atomic(page);
+       if (likely(i->nr_segs == 1)) {
+               int left;
+               char __user *buf = iov->iov_base + i->iov_offset;
+               left = __copy_to_user_inatomic(buf, kaddr + offset, bytes);
+               copied = bytes - left;
+       } else {
+               copied = __iovec_copy_to_user(kaddr + offset, iov,
+                                             i->iov_offset, bytes, 1);
+       }
+       kunmap_atomic(kaddr);
+
+       return copied;
+}
+
+/*
+ * This has the same sideeffects and return value as
+ * ii_iovec_copy_to_user_atomic().
+ * The difference is that it attempts to resolve faults.
+ * Page must not be locked.
+ */
+static size_t ii_iovec_copy_to_user(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes,
+               int check_access)
+{
+       struct iovec *iov = (struct iovec *)i->data;
+       char *kaddr;
+       size_t copied;
+
+       if (check_access) {
+               might_sleep();
+               if (generic_segment_checks(iov, &i->nr_segs, &bytes,
+                                          VERIFY_WRITE))
+                       return 0;
+       }
+
+       if (likely(i->nr_segs == 1)) {
+               int left;
+               char __user *buf = iov->iov_base + i->iov_offset;
+               /*
+                * Faults on the destination of a read are common, so do it
+                * before taking the kmap.
+                */
+               if (!fault_in_pages_writeable(buf, bytes)) {
+                       kaddr = kmap_atomic(page);
+                       left = __copy_to_user_inatomic(buf, kaddr + offset,
+                                                    bytes);
+                       kunmap_atomic(kaddr);
+                       if (left == 0)
+                               goto success;
+               }
+               kaddr = kmap(page);
+               left = copy_to_user(buf, kaddr + offset, bytes);
+               kunmap(page);
+success:
+               copied = bytes - left;
+       } else {
+               kaddr = kmap(page);
+               copied = __iovec_copy_to_user(kaddr + offset, iov,
+                                             i->iov_offset, bytes, 0);
+               kunmap(page);
+       }
+       return copied;
+}
+
+#ifdef CONFIG_BLOCK
+/*
+ * As an easily verifiable first pass, we implement all the methods that
+ * copy data to and from bvec pages with one function.  We implement it
+ * all with kmap_atomic().
+ */
+static size_t bvec_copy_tofrom_page(struct iov_iter *iter, struct page *page,
+                                   unsigned long page_offset, size_t bytes,
+                                   int topage)
+{
+       struct bio_vec *bvec = (struct bio_vec *)iter->data;
+       size_t bvec_offset = iter->iov_offset;
+       size_t remaining = bytes;
+       void *bvec_map;
+       void *page_map;
+       size_t copy;
+
+       page_map = kmap_atomic(page);
+
+       BUG_ON(bytes > iter->count);
+       while (remaining) {
+               BUG_ON(bvec->bv_len == 0);
+               BUG_ON(bvec_offset >= bvec->bv_len);
+               copy = min(remaining, bvec->bv_len - bvec_offset);
+               bvec_map = kmap_atomic(bvec->bv_page);
+               if (topage)
+                       memcpy(page_map + page_offset,
+                              bvec_map + bvec->bv_offset + bvec_offset,
+                              copy);
+               else
+                       memcpy(bvec_map + bvec->bv_offset + bvec_offset,
+                              page_map + page_offset,
+                              copy);
+               kunmap_atomic(bvec_map);
+               remaining -= copy;
+               bvec_offset += copy;
+               page_offset += copy;
+               if (bvec_offset == bvec->bv_len) {
+                       bvec_offset = 0;
+                       bvec++;
+               }
+       }
+
+       kunmap_atomic(page_map);
+
+       return bytes;
+}
+
+static size_t ii_bvec_copy_to_user_atomic(struct page *page, struct iov_iter *i,
+                                         unsigned long offset, size_t bytes)
+{
+       return bvec_copy_tofrom_page(i, page, offset, bytes, 0);
+}
+static size_t ii_bvec_copy_to_user(struct page *page, struct iov_iter *i,
+                                  unsigned long offset, size_t bytes,
+                                  int check_access)
+{
+       return bvec_copy_tofrom_page(i, page, offset, bytes, 0);
+}
+static size_t ii_bvec_copy_from_user_atomic(struct page *page,
+                                           struct iov_iter *i,
+                                           unsigned long offset, size_t bytes)
+{
+       return bvec_copy_tofrom_page(i, page, offset, bytes, 1);
+}
+static size_t ii_bvec_copy_from_user(struct page *page, struct iov_iter *i,
+                                    unsigned long offset, size_t bytes)
+{
+       return bvec_copy_tofrom_page(i, page, offset, bytes, 1);
+}
+
+/*
+ * bio_vecs have a stricter structure than iovecs that might have
+ * come from userspace.  There are no zero length bio_vec elements.
+ */
+static void ii_bvec_advance(struct iov_iter *i, size_t bytes)
+{
+       struct bio_vec *bvec = (struct bio_vec *)i->data;
+       size_t offset = i->iov_offset;
+       size_t delta;
+
+       BUG_ON(i->count < bytes);
+       while (bytes) {
+               BUG_ON(bvec->bv_len == 0);
+               BUG_ON(bvec->bv_len <= offset);
+               delta = min(bytes, bvec->bv_len - offset);
+               offset += delta;
+               i->count -= delta;
+               bytes -= delta;
+               if (offset == bvec->bv_len) {
+                       bvec++;
+                       offset = 0;
+               }
+       }
+
+       i->data = (unsigned long)bvec;
+       i->iov_offset = offset;
+}
+
+/*
+ * pages pointed to by bio_vecs are always pinned.
+ */
+static int ii_bvec_fault_in_readable(struct iov_iter *i, size_t bytes)
+{
+       return 0;
+}
+
+static size_t ii_bvec_single_seg_count(const struct iov_iter *i)
+{
+       const struct bio_vec *bvec = (struct bio_vec *)i->data;
+       if (i->nr_segs == 1)
+               return i->count;
+       else
+               return min(i->count, bvec->bv_len - i->iov_offset);
+}
+
+static int ii_bvec_shorten(struct iov_iter *i, size_t count)
+{
+       return -EINVAL;
+}
+
+struct iov_iter_ops ii_bvec_ops = {
+       .ii_copy_to_user_atomic = ii_bvec_copy_to_user_atomic,
+       .ii_copy_to_user = ii_bvec_copy_to_user,
+       .ii_copy_from_user_atomic = ii_bvec_copy_from_user_atomic,
+       .ii_copy_from_user = ii_bvec_copy_from_user,
+       .ii_advance = ii_bvec_advance,
+       .ii_fault_in_readable = ii_bvec_fault_in_readable,
+       .ii_single_seg_count = ii_bvec_single_seg_count,
+       .ii_shorten = ii_bvec_shorten,
+};
+EXPORT_SYMBOL(ii_bvec_ops);
+#endif /* CONFIG_BLOCK */
+
+static size_t __iovec_copy_from_user(char *vaddr, const struct iovec *iov,
+                                    size_t base, size_t bytes, int atomic)
+{
+       size_t copied = 0, left = 0;
+
+       while (bytes) {
+               char __user *buf = iov->iov_base + base;
+               int copy = min(bytes, iov->iov_len - base);
+
+               base = 0;
+               if (atomic)
+                       left = __copy_from_user_inatomic(vaddr, buf, copy);
+               else
+                       left = __copy_from_user(vaddr, buf, copy);
+               copied += copy;
+               bytes -= copy;
+               vaddr += copy;
+               iov++;
+
+               if (unlikely(left))
+                       break;
+       }
+       return copied - left;
+}
+
+/*
+ * Copy as much as we can into the page and return the number of bytes which
+ * were successfully copied.  If a fault is encountered then return the number
+ * of bytes which were copied.
+ */
+static size_t ii_iovec_copy_from_user_atomic(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       struct iovec *iov = (struct iovec *)i->data;
+       char *kaddr;
+       size_t copied;
+
+       BUG_ON(!in_atomic());
+       kaddr = kmap_atomic(page);
+       if (likely(i->nr_segs == 1)) {
+               int left;
+               char __user *buf = iov->iov_base + i->iov_offset;
+               left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
+               copied = bytes - left;
+       } else {
+               copied = __iovec_copy_from_user(kaddr + offset, iov,
+                                               i->iov_offset, bytes, 1);
+       }
+       kunmap_atomic(kaddr);
+
+       return copied;
+}
+EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
+
+/*
+ * This has the same sideeffects and return value as
+ * ii_iovec_copy_from_user_atomic().
+ * The difference is that it attempts to resolve faults.
+ * Page must not be locked.
+ */
+static size_t ii_iovec_copy_from_user(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       struct iovec *iov = (struct iovec *)i->data;
+       char *kaddr;
+       size_t copied;
+
+       kaddr = kmap(page);
+       if (likely(i->nr_segs == 1)) {
+               int left;
+               char __user *buf = iov->iov_base + i->iov_offset;
+               left = __copy_from_user(kaddr + offset, buf, bytes);
+               copied = bytes - left;
+       } else {
+               copied = __iovec_copy_from_user(kaddr + offset, iov,
+                                               i->iov_offset, bytes, 0);
+       }
+       kunmap(page);
+       return copied;
+}
+
+static void ii_iovec_advance(struct iov_iter *i, size_t bytes)
+{
+       BUG_ON(i->count < bytes);
+
+       if (likely(i->nr_segs == 1)) {
+               i->iov_offset += bytes;
+               i->count -= bytes;
+       } else {
+               struct iovec *iov = (struct iovec *)i->data;
+               size_t base = i->iov_offset;
+               unsigned long nr_segs = i->nr_segs;
+
+               /*
+                * The !iov->iov_len check ensures we skip over unlikely
+                * zero-length segments (without overruning the iovec).
+                */
+               while (bytes || unlikely(i->count && !iov->iov_len)) {
+                       int copy;
+
+                       copy = min(bytes, iov->iov_len - base);
+                       BUG_ON(!i->count || i->count < copy);
+                       i->count -= copy;
+                       bytes -= copy;
+                       base += copy;
+                       if (iov->iov_len == base) {
+                               iov++;
+                               nr_segs--;
+                               base = 0;
+                       }
+               }
+               i->data = (unsigned long)iov;
+               i->iov_offset = base;
+               i->nr_segs = nr_segs;
+       }
+}
+
+/*
+ * Fault in the first iovec of the given iov_iter, to a maximum length
+ * of bytes. Returns 0 on success, or non-zero if the memory could not be
+ * accessed (ie. because it is an invalid address).
+ *
+ * writev-intensive code may want this to prefault several iovecs -- that
+ * would be possible (callers must not rely on the fact that _only_ the
+ * first iovec will be faulted with the current implementation).
+ */
+static int ii_iovec_fault_in_readable(struct iov_iter *i, size_t bytes)
+{
+       struct iovec *iov = (struct iovec *)i->data;
+       char __user *buf = iov->iov_base + i->iov_offset;
+       bytes = min(bytes, iov->iov_len - i->iov_offset);
+       return fault_in_pages_readable(buf, bytes);
+}
+
+/*
+ * Return the count of just the current iov_iter segment.
+ */
+static size_t ii_iovec_single_seg_count(const struct iov_iter *i)
+{
+       const struct iovec *iov = (struct iovec *)i->data;
+       if (i->nr_segs == 1)
+               return i->count;
+       else
+               return min(i->count, iov->iov_len - i->iov_offset);
+}
+
+static int ii_iovec_shorten(struct iov_iter *i, size_t count)
+{
+       struct iovec *iov = (struct iovec *)i->data;
+       i->nr_segs = iov_shorten(iov, i->nr_segs, count);
+       i->count = min(i->count, count);
+       return 0;
+}
+
+struct iov_iter_ops ii_iovec_ops = {
+       .ii_copy_to_user_atomic = ii_iovec_copy_to_user_atomic,
+       .ii_copy_to_user = ii_iovec_copy_to_user,
+       .ii_copy_from_user_atomic = ii_iovec_copy_from_user_atomic,
+       .ii_copy_from_user = ii_iovec_copy_from_user,
+       .ii_advance = ii_iovec_advance,
+       .ii_fault_in_readable = ii_iovec_fault_in_readable,
+       .ii_single_seg_count = ii_iovec_single_seg_count,
+       .ii_shorten = ii_iovec_shorten,
+};
+EXPORT_SYMBOL(ii_iovec_ops);
index 1506673c087e11ae820245baadc8ae74cb9f01b2..1d7ab8b7d41e4e19c2053ee5be00375a5d9ca3c4 100644 (file)
@@ -51,10 +51,10 @@ const struct file_operations jffs2_file_operations =
 {
        .llseek =       generic_file_llseek,
        .open =         generic_file_open,
-       .read =         do_sync_read,
-       .aio_read =     generic_file_aio_read,
-       .write =        do_sync_write,
-       .aio_write =    generic_file_aio_write,
+       .read =         do_sync_read,
+       .read_iter =    generic_file_read_iter,
+       .write =        do_sync_write,
+       .write_iter =   generic_file_write_iter,
        .unlocked_ioctl=jffs2_ioctl,
        .mmap =         generic_file_readonly_mmap,
        .fsync =        jffs2_fsync,
index fe3c0527545f3b96d495f422a220339b0373e620..09b3ed45572475feb68b117fcf5b420a1a5e4d8f 100644 (file)
@@ -515,6 +515,10 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
 
        c = JFFS2_SB_INFO(sb);
 
+       /* Do not support the MLC nand */
+       if (c->mtd->type == MTD_MLCNANDFLASH)
+               return -EINVAL;
+
 #ifndef CONFIG_JFFS2_FS_WRITEBUFFER
        if (c->mtd->type == MTD_NANDFLASH) {
                pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n");
index dd7442c5835864b0e04bdff5befbd0f020232bfb..040b6c7725ad878d5347274ae1ca4cb76003a83c 100644 (file)
@@ -151,8 +151,8 @@ const struct file_operations jfs_file_operations = {
        .llseek         = generic_file_llseek,
        .write          = do_sync_write,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .splice_read    = generic_file_splice_read,
        .splice_write   = generic_file_splice_write,
index f4aab719add57bf354f90536486d88182be0b1b3..51652aaa3dc8c6eb22a7c576b7248b8b66b8c1a5 100644 (file)
@@ -331,15 +331,14 @@ static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
 }
 
 static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
-       const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+                            struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        struct inode *inode = file->f_mapping->host;
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
-                                jfs_get_block);
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, jfs_get_block);
 
        /*
         * In case of error extending write may have instantiated a few
@@ -347,7 +346,7 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
         */
        if (unlikely((rw & WRITE) && ret < 0)) {
                loff_t isize = i_size_read(inode);
-               loff_t end = offset + iov_length(iov, nr_segs);
+               loff_t end = offset + iov_iter_count(iter);
 
                if (end > isize)
                        jfs_write_failed(mapping, end);
index c1a3e603279c9cbe4fb141fc2b9bdcfa1d76a033..7f464c513ba0a85a2fd4fe7923a9799bb319e92b 100644 (file)
@@ -95,7 +95,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
 
        if (insert_inode_locked(inode) < 0) {
                rc = -EINVAL;
-               goto fail_unlock;
+               goto fail_put;
        }
 
        inode_init_owner(inode, parent, mode);
@@ -156,7 +156,6 @@ struct inode *ialloc(struct inode *parent, umode_t mode)
 fail_drop:
        dquot_drop(inode);
        inode->i_flags |= S_NOQUOTA;
-fail_unlock:
        clear_nlink(inode);
        unlock_new_inode(inode);
 fail_put:
index 3a3a9b53bf5a974d3c206f31ae402aaf752dd39c..8c50184931547791f0caa3e22d24cb138395409e 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/vfs.h>
 #include <linux/quotaops.h>
 #include <linux/mutex.h>
+#include <linux/namei.h>
 #include <linux/exportfs.h>
 #include <linux/writeback.h>
 #include <linux/buffer_head.h> /* sync_mapping_buffers */
@@ -31,6 +32,7 @@ int simple_getattr(struct vfsmount *mnt, struct dentry *dentry,
        stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9);
        return 0;
 }
+EXPORT_SYMBOL(simple_getattr);
 
 int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
@@ -39,6 +41,7 @@ int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
        buf->f_namelen = NAME_MAX;
        return 0;
 }
+EXPORT_SYMBOL(simple_statfs);
 
 /*
  * Retaining negative dentries for an in-memory filesystem just wastes
@@ -66,6 +69,7 @@ struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, unsigned
        d_add(dentry, NULL);
        return NULL;
 }
+EXPORT_SYMBOL(simple_lookup);
 
 int dcache_dir_open(struct inode *inode, struct file *file)
 {
@@ -75,12 +79,14 @@ int dcache_dir_open(struct inode *inode, struct file *file)
 
        return file->private_data ? 0 : -ENOMEM;
 }
+EXPORT_SYMBOL(dcache_dir_open);
 
 int dcache_dir_close(struct inode *inode, struct file *file)
 {
        dput(file->private_data);
        return 0;
 }
+EXPORT_SYMBOL(dcache_dir_close);
 
 loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
 {
@@ -123,6 +129,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
        mutex_unlock(&dentry->d_inode->i_mutex);
        return offset;
 }
+EXPORT_SYMBOL(dcache_dir_lseek);
 
 /* Relationship between i_mode and the DT_xxx types */
 static inline unsigned char dt_type(struct inode *inode)
@@ -172,11 +179,13 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
        spin_unlock(&dentry->d_lock);
        return 0;
 }
+EXPORT_SYMBOL(dcache_readdir);
 
 ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos)
 {
        return -EISDIR;
 }
+EXPORT_SYMBOL(generic_read_dir);
 
 const struct file_operations simple_dir_operations = {
        .open           = dcache_dir_open,
@@ -186,10 +195,12 @@ const struct file_operations simple_dir_operations = {
        .iterate        = dcache_readdir,
        .fsync          = noop_fsync,
 };
+EXPORT_SYMBOL(simple_dir_operations);
 
 const struct inode_operations simple_dir_inode_operations = {
        .lookup         = simple_lookup,
 };
+EXPORT_SYMBOL(simple_dir_inode_operations);
 
 static const struct super_operations simple_super_operations = {
        .statfs         = simple_statfs,
@@ -244,6 +255,7 @@ Enomem:
        deactivate_locked_super(s);
        return ERR_PTR(-ENOMEM);
 }
+EXPORT_SYMBOL(mount_pseudo);
 
 int simple_open(struct inode *inode, struct file *file)
 {
@@ -251,6 +263,7 @@ int simple_open(struct inode *inode, struct file *file)
                file->private_data = inode->i_private;
        return 0;
 }
+EXPORT_SYMBOL(simple_open);
 
 int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
 {
@@ -263,6 +276,7 @@ int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *den
        d_instantiate(dentry, inode);
        return 0;
 }
+EXPORT_SYMBOL(simple_link);
 
 int simple_empty(struct dentry *dentry)
 {
@@ -283,6 +297,7 @@ out:
        spin_unlock(&dentry->d_lock);
        return ret;
 }
+EXPORT_SYMBOL(simple_empty);
 
 int simple_unlink(struct inode *dir, struct dentry *dentry)
 {
@@ -293,6 +308,7 @@ int simple_unlink(struct inode *dir, struct dentry *dentry)
        dput(dentry);
        return 0;
 }
+EXPORT_SYMBOL(simple_unlink);
 
 int simple_rmdir(struct inode *dir, struct dentry *dentry)
 {
@@ -304,6 +320,7 @@ int simple_rmdir(struct inode *dir, struct dentry *dentry)
        drop_nlink(dir);
        return 0;
 }
+EXPORT_SYMBOL(simple_rmdir);
 
 int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
                struct inode *new_dir, struct dentry *new_dentry)
@@ -330,6 +347,7 @@ int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        return 0;
 }
+EXPORT_SYMBOL(simple_rename);
 
 /**
  * simple_setattr - setattr for simple filesystem
@@ -370,6 +388,7 @@ int simple_readpage(struct file *file, struct page *page)
        unlock_page(page);
        return 0;
 }
+EXPORT_SYMBOL(simple_readpage);
 
 int simple_write_begin(struct file *file, struct address_space *mapping,
                        loff_t pos, unsigned len, unsigned flags,
@@ -393,6 +412,7 @@ int simple_write_begin(struct file *file, struct address_space *mapping,
        }
        return 0;
 }
+EXPORT_SYMBOL(simple_write_begin);
 
 /**
  * simple_write_end - .write_end helper for non-block-device FSes
@@ -444,6 +464,7 @@ int simple_write_end(struct file *file, struct address_space *mapping,
 
        return copied;
 }
+EXPORT_SYMBOL(simple_write_end);
 
 /*
  * the inodes created here are not hashed. If you use iunique to generate
@@ -512,6 +533,7 @@ out:
        dput(root);
        return -ENOMEM;
 }
+EXPORT_SYMBOL(simple_fill_super);
 
 static DEFINE_SPINLOCK(pin_fs_lock);
 
@@ -534,6 +556,7 @@ int simple_pin_fs(struct file_system_type *type, struct vfsmount **mount, int *c
        mntput(mnt);
        return 0;
 }
+EXPORT_SYMBOL(simple_pin_fs);
 
 void simple_release_fs(struct vfsmount **mount, int *count)
 {
@@ -545,6 +568,7 @@ void simple_release_fs(struct vfsmount **mount, int *count)
        spin_unlock(&pin_fs_lock);
        mntput(mnt);
 }
+EXPORT_SYMBOL(simple_release_fs);
 
 /**
  * simple_read_from_buffer - copy data from the buffer to user space
@@ -579,6 +603,7 @@ ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos,
        *ppos = pos + count;
        return count;
 }
+EXPORT_SYMBOL(simple_read_from_buffer);
 
 /**
  * simple_write_to_buffer - copy data from user space to the buffer
@@ -613,6 +638,7 @@ ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
        *ppos = pos + count;
        return count;
 }
+EXPORT_SYMBOL(simple_write_to_buffer);
 
 /**
  * memory_read_from_buffer - copy data from the buffer
@@ -644,6 +670,7 @@ ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
 
        return count;
 }
+EXPORT_SYMBOL(memory_read_from_buffer);
 
 /*
  * Transaction based IO.
@@ -665,6 +692,7 @@ void simple_transaction_set(struct file *file, size_t n)
        smp_mb();
        ar->size = n;
 }
+EXPORT_SYMBOL(simple_transaction_set);
 
 char *simple_transaction_get(struct file *file, const char __user *buf, size_t size)
 {
@@ -696,6 +724,7 @@ char *simple_transaction_get(struct file *file, const char __user *buf, size_t s
 
        return ar->data;
 }
+EXPORT_SYMBOL(simple_transaction_get);
 
 ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size, loff_t *pos)
 {
@@ -705,12 +734,14 @@ ssize_t simple_transaction_read(struct file *file, char __user *buf, size_t size
                return 0;
        return simple_read_from_buffer(buf, size, pos, ar->data, ar->size);
 }
+EXPORT_SYMBOL(simple_transaction_read);
 
 int simple_transaction_release(struct inode *inode, struct file *file)
 {
        free_page((unsigned long)file->private_data);
        return 0;
 }
+EXPORT_SYMBOL(simple_transaction_release);
 
 /* Simple attribute files */
 
@@ -746,12 +777,14 @@ int simple_attr_open(struct inode *inode, struct file *file,
 
        return nonseekable_open(inode, file);
 }
+EXPORT_SYMBOL_GPL(simple_attr_open);
 
 int simple_attr_release(struct inode *inode, struct file *file)
 {
        kfree(file->private_data);
        return 0;
 }
+EXPORT_SYMBOL_GPL(simple_attr_release);        /* GPL-only?  This?  Really? */
 
 /* read from the buffer that is filled with the get function */
 ssize_t simple_attr_read(struct file *file, char __user *buf,
@@ -787,6 +820,7 @@ out:
        mutex_unlock(&attr->mutex);
        return ret;
 }
+EXPORT_SYMBOL_GPL(simple_attr_read);
 
 /* interpret the buffer as a number to call the set function with */
 ssize_t simple_attr_write(struct file *file, const char __user *buf,
@@ -819,6 +853,7 @@ out:
        mutex_unlock(&attr->mutex);
        return ret;
 }
+EXPORT_SYMBOL_GPL(simple_attr_write);
 
 /**
  * generic_fh_to_dentry - generic helper for the fh_to_dentry export operation
@@ -957,39 +992,13 @@ int noop_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
        return 0;
 }
-
-EXPORT_SYMBOL(dcache_dir_close);
-EXPORT_SYMBOL(dcache_dir_lseek);
-EXPORT_SYMBOL(dcache_dir_open);
-EXPORT_SYMBOL(dcache_readdir);
-EXPORT_SYMBOL(generic_read_dir);
-EXPORT_SYMBOL(mount_pseudo);
-EXPORT_SYMBOL(simple_write_begin);
-EXPORT_SYMBOL(simple_write_end);
-EXPORT_SYMBOL(simple_dir_inode_operations);
-EXPORT_SYMBOL(simple_dir_operations);
-EXPORT_SYMBOL(simple_empty);
-EXPORT_SYMBOL(simple_fill_super);
-EXPORT_SYMBOL(simple_getattr);
-EXPORT_SYMBOL(simple_open);
-EXPORT_SYMBOL(simple_link);
-EXPORT_SYMBOL(simple_lookup);
-EXPORT_SYMBOL(simple_pin_fs);
-EXPORT_SYMBOL(simple_readpage);
-EXPORT_SYMBOL(simple_release_fs);
-EXPORT_SYMBOL(simple_rename);
-EXPORT_SYMBOL(simple_rmdir);
-EXPORT_SYMBOL(simple_statfs);
 EXPORT_SYMBOL(noop_fsync);
-EXPORT_SYMBOL(simple_unlink);
-EXPORT_SYMBOL(simple_read_from_buffer);
-EXPORT_SYMBOL(simple_write_to_buffer);
-EXPORT_SYMBOL(memory_read_from_buffer);
-EXPORT_SYMBOL(simple_transaction_set);
-EXPORT_SYMBOL(simple_transaction_get);
-EXPORT_SYMBOL(simple_transaction_read);
-EXPORT_SYMBOL(simple_transaction_release);
-EXPORT_SYMBOL_GPL(simple_attr_open);
-EXPORT_SYMBOL_GPL(simple_attr_release);
-EXPORT_SYMBOL_GPL(simple_attr_read);
-EXPORT_SYMBOL_GPL(simple_attr_write);
+
+void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
+                               void *cookie)
+{
+       char *s = nd_get_link(nd);
+       if (!IS_ERR(s))
+               kfree(s);
+}
+EXPORT_SYMBOL(kfree_put_link);
index 9c501449450dc9be6891e5d9c1a035ca31b5687b..427bb73e298f197d4cfc73b17baeffdd1c848c5d 100644 (file)
@@ -245,8 +245,8 @@ static int logfs_mtd_can_write_buf(struct super_block *sb, u64 ofs)
                goto out;
        if (memchr_inv(buf, 0xff, super->s_writesize))
                err = -EIO;
-       kfree(buf);
 out:
+       kfree(buf);
        return err;
 }
 
index 57914fc32b62538f43909d35ffc031742b98a881..57f994e887b5344daa7a47f279c233d41188fba1 100644 (file)
@@ -264,8 +264,8 @@ const struct inode_operations logfs_reg_iops = {
 };
 
 const struct file_operations logfs_reg_fops = {
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .fsync          = logfs_fsync,
        .unlocked_ioctl = logfs_ioctl,
        .llseek         = generic_file_llseek,
index 54360293bcb5cd0680c3042e6f0b9b87e342c649..b256c0690e5b66f5b16a2b81be033cc3b345dec6 100644 (file)
@@ -287,14 +287,14 @@ static int logfs_make_writeable(struct super_block *sb)
        if (err)
                return err;
 
+       /* Do one GC pass before any data gets dirtied */
+       logfs_gc_pass(sb);
+
        /* Check areas for trailing unaccounted data */
        err = logfs_check_areas(sb);
        if (err)
                return err;
 
-       /* Do one GC pass before any data gets dirtied */
-       logfs_gc_pass(sb);
-
        /* after all initializations are done, replay the journal
         * for rw-mounts, if necessary */
        err = logfs_replay_journal(sb);
index 6624684dd5decbd55845113000ad1b783f029ebb..f2a0cfcef11dc6e82044d2bf06013104063ee4a3 100644 (file)
@@ -18,7 +18,7 @@ config MINIX_FS
 
 config MINIX_FS_NATIVE_ENDIAN
        def_bool MINIX_FS
-       depends on H8300 || M32R || MICROBLAZE || MIPS || S390 || SUPERH || SPARC || XTENSA || (M68K && !MMU)
+       depends on M32R || MICROBLAZE || MIPS || S390 || SUPERH || SPARC || XTENSA || (M68K && !MMU)
 
 config MINIX_FS_BIG_ENDIAN_16BIT_INDEXED
        def_bool MINIX_FS
index adc6f5494231bc947f45d8a3c526db0b36f39bf3..346d8f37d342df53f5a7d9736431283e33eea431 100644 (file)
@@ -15,9 +15,9 @@
 const struct file_operations minix_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .fsync          = generic_file_fsync,
        .splice_read    = generic_file_splice_read,
index 645268f23eb64cb8c2931ce33391beb2d0080c36..caa28051e197e898e3c2bc52afce37bcbb284853 100644 (file)
@@ -2294,10 +2294,11 @@ out:
  * path_mountpoint - look up a path to be umounted
  * @dfd:       directory file descriptor to start walk from
  * @name:      full pathname to walk
+ * @path:      pointer to container for result
  * @flags:     lookup flags
  *
  * Look up the given name, but don't attempt to revalidate the last component.
- * Returns 0 and "path" will be valid on success; Retuns error otherwise.
+ * Returns 0 and "path" will be valid on success; Returns error otherwise.
  */
 static int
 path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags)
index da5c494834306178dc9efd3b3826d1b7f0e0d17b..3ee6e59ead55a9ca2398a30b86f347315532a8b5 100644 (file)
@@ -39,7 +39,7 @@ static int mnt_group_start = 1;
 static struct list_head *mount_hashtable __read_mostly;
 static struct list_head *mountpoint_hashtable __read_mostly;
 static struct kmem_cache *mnt_cache __read_mostly;
-static struct rw_semaphore namespace_sem;
+static DECLARE_RWSEM(namespace_sem);
 
 /* /sys/fs */
 struct kobject *fs_kobj;
@@ -1849,14 +1849,10 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
                br_write_lock(&vfsmount_lock);
                mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
                mnt->mnt.mnt_flags = mnt_flags;
-               br_write_unlock(&vfsmount_lock);
-       }
-       up_write(&sb->s_umount);
-       if (!err) {
-               br_write_lock(&vfsmount_lock);
                touch_mnt_namespace(mnt->mnt_ns);
                br_write_unlock(&vfsmount_lock);
        }
+       up_write(&sb->s_umount);
        return err;
 }
 
@@ -2444,9 +2440,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
                return ERR_CAST(new);
        }
        new_ns->root = new;
-       br_write_lock(&vfsmount_lock);
        list_add_tail(&new_ns->list, &new->mnt_list);
-       br_write_unlock(&vfsmount_lock);
 
        /*
         * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
@@ -2767,8 +2761,6 @@ void __init mnt_init(void)
        unsigned u;
        int err;
 
-       init_rwsem(&namespace_sem);
-
        mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
                        0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
 
@@ -2802,11 +2794,7 @@ void put_mnt_ns(struct mnt_namespace *ns)
 {
        if (!atomic_dec_and_test(&ns->count))
                return;
-       namespace_lock();
-       br_write_lock(&vfsmount_lock);
-       umount_tree(ns->root, 0);
-       br_write_unlock(&vfsmount_lock);
-       namespace_unlock();
+       drop_collected_mounts(&ns->root->mnt);
        free_mnt_ns(ns);
 }
 
@@ -2875,7 +2863,7 @@ bool fs_fully_visible(struct file_system_type *type)
        if (unlikely(!ns))
                return false;
 
-       namespace_lock();
+       down_read(&namespace_sem);
        list_for_each_entry(mnt, &ns->list, mnt_list) {
                struct mount *child;
                if (mnt->mnt.mnt_sb->s_type != type)
@@ -2896,7 +2884,7 @@ bool fs_fully_visible(struct file_system_type *type)
        next:   ;
        }
 found:
-       namespace_unlock();
+       up_read(&namespace_sem);
        return visible;
 }
 
index 3be047474bfc355731c08e2a6c27b40e73ce9e8b..c320ac52353e458723f46613893bcc0423ccc463 100644 (file)
@@ -339,9 +339,8 @@ ncp_lookup_validate(struct dentry *dentry, unsigned int flags)
        if (val)
                goto finished;
 
-       DDPRINTK("ncp_lookup_validate: %s/%s not valid, age=%ld, server lookup\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name,
-               NCP_GET_AGE(dentry));
+       DDPRINTK("ncp_lookup_validate: %pd2 not valid, age=%ld, server lookup\n",
+               dentry, NCP_GET_AGE(dentry));
 
        len = sizeof(__name);
        if (ncp_is_server_root(dir)) {
@@ -359,8 +358,8 @@ ncp_lookup_validate(struct dentry *dentry, unsigned int flags)
                        res = ncp_obtain_info(server, dir, __name, &(finfo.i));
        }
        finfo.volume = finfo.i.volNumber;
-       DDPRINTK("ncp_lookup_validate: looked for %s/%s, res=%d\n",
-               dentry->d_parent->d_name.name, __name, res);
+       DDPRINTK("ncp_lookup_validate: looked for %pd/%s, res=%d\n",
+               dentry->d_parent, __name, res);
        /*
         * If we didn't find it, or if it has a different dirEntNum to
         * what we remember, it's not valid any more.
@@ -454,8 +453,7 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx)
        ctl.page  = NULL;
        ctl.cache = NULL;
 
-       DDPRINTK("ncp_readdir: reading %s/%s, pos=%d\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name,
+       DDPRINTK("ncp_readdir: reading %pD2, pos=%d\n", file,
                (int) ctx->pos);
 
        result = -EIO;
@@ -740,12 +738,10 @@ ncp_do_readdir(struct file *file, struct dir_context *ctx,
        int more;
        size_t bufsize;
 
-       DPRINTK("ncp_do_readdir: %s/%s, fpos=%ld\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name,
+       DPRINTK("ncp_do_readdir: %pD2, fpos=%ld\n", file,
                (unsigned long) ctx->pos);
-       PPRINTK("ncp_do_readdir: init %s, volnum=%d, dirent=%u\n",
-               dentry->d_name.name, NCP_FINFO(dir)->volNumber,
-               NCP_FINFO(dir)->dirEntNum);
+       PPRINTK("ncp_do_readdir: init %pD, volnum=%d, dirent=%u\n",
+               file, NCP_FINFO(dir)->volNumber, NCP_FINFO(dir)->dirEntNum);
 
        err = ncp_initialize_search(server, dir, &seq);
        if (err) {
@@ -850,8 +846,7 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, unsig
        if (!ncp_conn_valid(server))
                goto finished;
 
-       PPRINTK("ncp_lookup: server lookup for %s/%s\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       PPRINTK("ncp_lookup: server lookup for %pd2\n", dentry);
 
        len = sizeof(__name);
        if (ncp_is_server_root(dir)) {
@@ -867,8 +862,7 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, unsig
                if (!res)
                        res = ncp_obtain_info(server, dir, __name, &(finfo.i));
        }
-       PPRINTK("ncp_lookup: looked for %s/%s, res=%d\n",
-               dentry->d_parent->d_name.name, __name, res);
+       PPRINTK("ncp_lookup: looked for %pd2, res=%d\n", dentry, res);
        /*
         * If we didn't find an entry, make a negative dentry.
         */
@@ -915,8 +909,7 @@ out:
        return error;
 
 out_close:
-       PPRINTK("ncp_instantiate: %s/%s failed, closing file\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       PPRINTK("ncp_instantiate: %pd2 failed, closing file\n", dentry);
        ncp_close_file(NCP_SERVER(dir), finfo->file_handle);
        goto out;
 }
@@ -930,8 +923,7 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, umode_t mode,
        int opmode;
        __u8 __name[NCP_MAXPATHLEN + 1];
        
-       PPRINTK("ncp_create_new: creating %s/%s, mode=%hx\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name, mode);
+       PPRINTK("ncp_create_new: creating %pd2, mode=%hx\n", dentry, mode);
 
        ncp_age_dentry(server, dentry);
        len = sizeof(__name);
@@ -960,8 +952,7 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, umode_t mode,
                                error = -ENAMETOOLONG;
                        else if (result < 0)
                                error = result;
-                       DPRINTK("ncp_create: %s/%s failed\n",
-                               dentry->d_parent->d_name.name, dentry->d_name.name);
+                       DPRINTK("ncp_create: %pd2 failed\n", dentry);
                        goto out;
                }
                opmode = O_WRONLY;
@@ -994,8 +985,7 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        int error, len;
        __u8 __name[NCP_MAXPATHLEN + 1];
 
-       DPRINTK("ncp_mkdir: making %s/%s\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       DPRINTK("ncp_mkdir: making %pd2\n", dentry);
 
        ncp_age_dentry(server, dentry);
        len = sizeof(__name);
@@ -1032,8 +1022,7 @@ static int ncp_rmdir(struct inode *dir, struct dentry *dentry)
        int error, result, len;
        __u8 __name[NCP_MAXPATHLEN + 1];
 
-       DPRINTK("ncp_rmdir: removing %s/%s\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       DPRINTK("ncp_rmdir: removing %pd2\n", dentry);
 
        len = sizeof(__name);
        error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
@@ -1078,8 +1067,7 @@ static int ncp_unlink(struct inode *dir, struct dentry *dentry)
        int error;
 
        server = NCP_SERVER(dir);
-       DPRINTK("ncp_unlink: unlinking %s/%s\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       DPRINTK("ncp_unlink: unlinking %pd2\n", dentry);
        
        /*
         * Check whether to close the file ...
@@ -1099,8 +1087,7 @@ static int ncp_unlink(struct inode *dir, struct dentry *dentry)
 #endif
        switch (error) {
                case 0x00:
-                       DPRINTK("ncp: removed %s/%s\n",
-                               dentry->d_parent->d_name.name, dentry->d_name.name);
+                       DPRINTK("ncp: removed %pd2\n", dentry);
                        break;
                case 0x85:
                case 0x8A:
@@ -1133,9 +1120,7 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
        int old_len, new_len;
        __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
 
-       DPRINTK("ncp_rename: %s/%s to %s/%s\n",
-               old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
-               new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
+       DPRINTK("ncp_rename: %pd2 to %pd2\n", old_dentry, new_dentry);
 
        ncp_age_dentry(server, old_dentry);
        ncp_age_dentry(server, new_dentry);
@@ -1165,8 +1150,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
 #endif
        switch (error) {
                case 0x00:
-                               DPRINTK("ncp renamed %s -> %s.\n",
-                                old_dentry->d_name.name,new_dentry->d_name.name);
+                               DPRINTK("ncp renamed %pd -> %pd.\n",
+                                old_dentry, new_dentry);
                        break;
                case 0x9E:
                        error = -ENAMETOOLONG;
index 122e260247f53c663550073fda567a4342b0ba63..8f5074e1ecb9eaf2b39b0064edc17f5528516ca2 100644 (file)
@@ -107,8 +107,7 @@ ncp_file_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
        void* freepage;
        size_t freelen;
 
-       DPRINTK("ncp_file_read: enter %s/%s\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       DPRINTK("ncp_file_read: enter %pd2\n", dentry);
 
        pos = *ppos;
 
@@ -166,8 +165,7 @@ ncp_file_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 
        file_accessed(file);
 
-       DPRINTK("ncp_file_read: exit %s/%s\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       DPRINTK("ncp_file_read: exit %pd2\n", dentry);
 outrel:
        ncp_inode_close(inode);         
        return already_read ? already_read : error;
@@ -184,8 +182,7 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
        int errno;
        void* bouncebuffer;
 
-       DPRINTK("ncp_file_write: enter %s/%s\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       DPRINTK("ncp_file_write: enter %pd2\n", dentry);
        if ((ssize_t) count < 0)
                return -EINVAL;
        pos = *ppos;
@@ -264,8 +261,7 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
                        i_size_write(inode, pos);
                mutex_unlock(&inode->i_mutex);
        }
-       DPRINTK("ncp_file_write: exit %s/%s\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       DPRINTK("ncp_file_write: exit %pd2\n", dentry);
 outrel:
        ncp_inode_close(inode);         
        return already_written ? already_written : errno;
index 2dceee4db07652fd449ef713037a8a268d864f00..af0325864df667e91ef7e03faae6323b7f99532b 100644 (file)
@@ -590,6 +590,8 @@ int nfs_create_rpc_client(struct nfs_client *clp,
 
        if (test_bit(NFS_CS_DISCRTRY, &clp->cl_flags))
                args.flags |= RPC_CLNT_CREATE_DISCRTRY;
+       if (test_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags))
+               args.flags |= RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT;
        if (test_bit(NFS_CS_NORESVPORT, &clp->cl_flags))
                args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
        if (test_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags))
index 02b0df769e2db23d18d7787e27066ebaa684bbd0..76548d81f926c8843a291350a080f09938c0d0fd 100644 (file)
@@ -98,9 +98,7 @@ nfs_opendir(struct inode *inode, struct file *filp)
        struct nfs_open_dir_context *ctx;
        struct rpc_cred *cred;
 
-       dfprintk(FILE, "NFS: open dir(%s/%s)\n",
-                       filp->f_path.dentry->d_parent->d_name.name,
-                       filp->f_path.dentry->d_name.name);
+       dfprintk(FILE, "NFS: open dir(%pD2)\n", filp);
 
        nfs_inc_stats(inode, NFSIOS_VFSOPEN);
 
@@ -297,11 +295,10 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des
                                if (ctx->duped > 0
                                    && ctx->dup_cookie == *desc->dir_cookie) {
                                        if (printk_ratelimit()) {
-                                               pr_notice("NFS: directory %s/%s contains a readdir loop."
+                                               pr_notice("NFS: directory %pD2 contains a readdir loop."
                                                                "Please contact your server vendor.  "
                                                                "The file: %s has duplicate cookie %llu\n",
-                                                               desc->file->f_dentry->d_parent->d_name.name,
-                                                               desc->file->f_dentry->d_name.name,
+                                                               desc->file,
                                                                array->array[i].string.name,
                                                                *desc->dir_cookie);
                                        }
@@ -822,9 +819,8 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
        struct nfs_open_dir_context *dir_ctx = file->private_data;
        int res = 0;
 
-       dfprintk(FILE, "NFS: readdir(%s/%s) starting at cookie %llu\n",
-                       dentry->d_parent->d_name.name, dentry->d_name.name,
-                       (long long)ctx->pos);
+       dfprintk(FILE, "NFS: readdir(%pD2) starting at cookie %llu\n",
+                       file, (long long)ctx->pos);
        nfs_inc_stats(inode, NFSIOS_VFSGETDENTS);
 
        /*
@@ -880,22 +876,17 @@ out:
        nfs_unblock_sillyrename(dentry);
        if (res > 0)
                res = 0;
-       dfprintk(FILE, "NFS: readdir(%s/%s) returns %d\n",
-                       dentry->d_parent->d_name.name, dentry->d_name.name,
-                       res);
+       dfprintk(FILE, "NFS: readdir(%pD2) returns %d\n", file, res);
        return res;
 }
 
 static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
 {
-       struct dentry *dentry = filp->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        struct nfs_open_dir_context *dir_ctx = filp->private_data;
 
-       dfprintk(FILE, "NFS: llseek dir(%s/%s, %lld, %d)\n",
-                       dentry->d_parent->d_name.name,
-                       dentry->d_name.name,
-                       offset, whence);
+       dfprintk(FILE, "NFS: llseek dir(%pD2, %lld, %d)\n",
+                       filp, offset, whence);
 
        mutex_lock(&inode->i_mutex);
        switch (whence) {
@@ -925,15 +916,12 @@ out:
 static int nfs_fsync_dir(struct file *filp, loff_t start, loff_t end,
                         int datasync)
 {
-       struct dentry *dentry = filp->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = file_inode(filp);
 
-       dfprintk(FILE, "NFS: fsync dir(%s/%s) datasync %d\n",
-                       dentry->d_parent->d_name.name, dentry->d_name.name,
-                       datasync);
+       dfprintk(FILE, "NFS: fsync dir(%pD2) datasync %d\n", filp, datasync);
 
        mutex_lock(&inode->i_mutex);
-       nfs_inc_stats(dentry->d_inode, NFSIOS_VFSFSYNC);
+       nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
        mutex_unlock(&inode->i_mutex);
        return 0;
 }
@@ -1073,9 +1061,8 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
        }
 
        if (is_bad_inode(inode)) {
-               dfprintk(LOOKUPCACHE, "%s: %s/%s has dud inode\n",
-                               __func__, dentry->d_parent->d_name.name,
-                               dentry->d_name.name);
+               dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n",
+                               __func__, dentry);
                goto out_bad;
        }
 
@@ -1125,9 +1112,8 @@ out_set_verifier:
        nfs_advise_use_readdirplus(dir);
  out_valid_noent:
        dput(parent);
-       dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is valid\n",
-                       __func__, dentry->d_parent->d_name.name,
-                       dentry->d_name.name);
+       dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n",
+                       __func__, dentry);
        return 1;
 out_zap_parent:
        nfs_zap_caches(dir);
@@ -1147,18 +1133,16 @@ out_zap_parent:
                goto out_valid;
 
        dput(parent);
-       dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is invalid\n",
-                       __func__, dentry->d_parent->d_name.name,
-                       dentry->d_name.name);
+       dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
+                       __func__, dentry);
        return 0;
 out_error:
        nfs_free_fattr(fattr);
        nfs_free_fhandle(fhandle);
        nfs4_label_free(label);
        dput(parent);
-       dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) lookup returned error %d\n",
-                       __func__, dentry->d_parent->d_name.name,
-                       dentry->d_name.name, error);
+       dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) lookup returned error %d\n",
+                       __func__, dentry, error);
        return error;
 }
 
@@ -1182,16 +1166,14 @@ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
         * eventually need to do something more here.
         */
        if (!inode) {
-               dfprintk(LOOKUPCACHE, "%s: %s/%s has negative inode\n",
-                               __func__, dentry->d_parent->d_name.name,
-                               dentry->d_name.name);
+               dfprintk(LOOKUPCACHE, "%s: %pd2 has negative inode\n",
+                               __func__, dentry);
                return 1;
        }
 
        if (is_bad_inode(inode)) {
-               dfprintk(LOOKUPCACHE, "%s: %s/%s has dud inode\n",
-                               __func__, dentry->d_parent->d_name.name,
-                               dentry->d_name.name);
+               dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n",
+                               __func__, dentry);
                return 0;
        }
 
@@ -1206,9 +1188,8 @@ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
  */
 static int nfs_dentry_delete(const struct dentry *dentry)
 {
-       dfprintk(VFS, "NFS: dentry_delete(%s/%s, %x)\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name,
-               dentry->d_flags);
+       dfprintk(VFS, "NFS: dentry_delete(%pd2, %x)\n",
+               dentry, dentry->d_flags);
 
        /* Unhash any dentry with a stale inode */
        if (dentry->d_inode != NULL && NFS_STALE(dentry->d_inode))
@@ -1286,8 +1267,7 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in
        struct nfs4_label *label = NULL;
        int error;
 
-       dfprintk(VFS, "NFS: lookup(%s/%s)\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       dfprintk(VFS, "NFS: lookup(%pd2)\n", dentry);
        nfs_inc_stats(dir, NFSIOS_VFSLOOKUP);
 
        res = ERR_PTR(-ENAMETOOLONG);
@@ -1381,7 +1361,7 @@ static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, i
 
 static int do_open(struct inode *inode, struct file *filp)
 {
-       nfs_fscache_set_inode_cookie(inode, filp);
+       nfs_fscache_open_file(inode, filp);
        return 0;
 }
 
@@ -1418,8 +1398,8 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
        /* Expect a negative dentry */
        BUG_ON(dentry->d_inode);
 
-       dfprintk(VFS, "NFS: atomic_open(%s/%ld), %s\n",
-                       dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+       dfprintk(VFS, "NFS: atomic_open(%s/%ld), %pd\n",
+                       dir->i_sb->s_id, dir->i_ino, dentry);
 
        err = nfs_check_flags(open_flags);
        if (err)
@@ -1608,8 +1588,8 @@ int nfs_create(struct inode *dir, struct dentry *dentry,
        int open_flags = excl ? O_CREAT | O_EXCL : O_CREAT;
        int error;
 
-       dfprintk(VFS, "NFS: create(%s/%ld), %s\n",
-                       dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+       dfprintk(VFS, "NFS: create(%s/%ld), %pd\n",
+                       dir->i_sb->s_id, dir->i_ino, dentry);
 
        attr.ia_mode = mode;
        attr.ia_valid = ATTR_MODE;
@@ -1635,8 +1615,8 @@ nfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
        struct iattr attr;
        int status;
 
-       dfprintk(VFS, "NFS: mknod(%s/%ld), %s\n",
-                       dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+       dfprintk(VFS, "NFS: mknod(%s/%ld), %pd\n",
+                       dir->i_sb->s_id, dir->i_ino, dentry);
 
        if (!new_valid_dev(rdev))
                return -EINVAL;
@@ -1664,8 +1644,8 @@ int nfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        struct iattr attr;
        int error;
 
-       dfprintk(VFS, "NFS: mkdir(%s/%ld), %s\n",
-                       dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+       dfprintk(VFS, "NFS: mkdir(%s/%ld), %pd\n",
+                       dir->i_sb->s_id, dir->i_ino, dentry);
 
        attr.ia_valid = ATTR_MODE;
        attr.ia_mode = mode | S_IFDIR;
@@ -1692,8 +1672,8 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry)
 {
        int error;
 
-       dfprintk(VFS, "NFS: rmdir(%s/%ld), %s\n",
-                       dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+       dfprintk(VFS, "NFS: rmdir(%s/%ld), %pd\n",
+                       dir->i_sb->s_id, dir->i_ino, dentry);
 
        trace_nfs_rmdir_enter(dir, dentry);
        if (dentry->d_inode) {
@@ -1728,8 +1708,7 @@ static int nfs_safe_remove(struct dentry *dentry)
        struct inode *inode = dentry->d_inode;
        int error = -EBUSY;
                
-       dfprintk(VFS, "NFS: safe_remove(%s/%s)\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       dfprintk(VFS, "NFS: safe_remove(%pd2)\n", dentry);
 
        /* If the dentry was sillyrenamed, we simply call d_delete() */
        if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
@@ -1762,8 +1741,8 @@ int nfs_unlink(struct inode *dir, struct dentry *dentry)
        int error;
        int need_rehash = 0;
 
-       dfprintk(VFS, "NFS: unlink(%s/%ld, %s)\n", dir->i_sb->s_id,
-               dir->i_ino, dentry->d_name.name);
+       dfprintk(VFS, "NFS: unlink(%s/%ld, %pd)\n", dir->i_sb->s_id,
+               dir->i_ino, dentry);
 
        trace_nfs_unlink_enter(dir, dentry);
        spin_lock(&dentry->d_lock);
@@ -1813,8 +1792,8 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
        unsigned int pathlen = strlen(symname);
        int error;
 
-       dfprintk(VFS, "NFS: symlink(%s/%ld, %s, %s)\n", dir->i_sb->s_id,
-               dir->i_ino, dentry->d_name.name, symname);
+       dfprintk(VFS, "NFS: symlink(%s/%ld, %pd, %s)\n", dir->i_sb->s_id,
+               dir->i_ino, dentry, symname);
 
        if (pathlen > PAGE_SIZE)
                return -ENAMETOOLONG;
@@ -1836,9 +1815,9 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
        error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr);
        trace_nfs_symlink_exit(dir, dentry, error);
        if (error != 0) {
-               dfprintk(VFS, "NFS: symlink(%s/%ld, %s, %s) error %d\n",
+               dfprintk(VFS, "NFS: symlink(%s/%ld, %pd, %s) error %d\n",
                        dir->i_sb->s_id, dir->i_ino,
-                       dentry->d_name.name, symname, error);
+                       dentry, symname, error);
                d_drop(dentry);
                __free_page(page);
                return error;
@@ -1865,9 +1844,8 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
        struct inode *inode = old_dentry->d_inode;
        int error;
 
-       dfprintk(VFS, "NFS: link(%s/%s -> %s/%s)\n",
-               old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       dfprintk(VFS, "NFS: link(%pd2 -> %pd2)\n",
+               old_dentry, dentry);
 
        trace_nfs_link_enter(inode, dir, dentry);
        NFS_PROTO(inode)->return_delegation(inode);
@@ -1915,9 +1893,8 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct dentry *dentry = NULL, *rehash = NULL;
        int error = -EBUSY;
 
-       dfprintk(VFS, "NFS: rename(%s/%s -> %s/%s, ct=%d)\n",
-                old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
-                new_dentry->d_parent->d_name.name, new_dentry->d_name.name,
+       dfprintk(VFS, "NFS: rename(%pd2 -> %pd2, ct=%d)\n",
+                old_dentry, new_dentry,
                 d_count(new_dentry));
 
        trace_nfs_rename_enter(old_dir, old_dentry, new_dir, new_dentry);
index 91ff089d34126d8ae5d8d4a96bca232fbd921958..87a6475eb170996972969b30779949742b1c4a57 100644 (file)
@@ -90,6 +90,7 @@ struct nfs_direct_req {
        int                     flags;
 #define NFS_ODIRECT_DO_COMMIT          (1)     /* an unstable reply was received */
 #define NFS_ODIRECT_RESCHED_WRITES     (2)     /* write verification failed */
+#define NFS_ODIRECT_MARK_DIRTY         (4)     /* mark read pages dirty */
        struct nfs_writeverf    verf;           /* unstable write verifier */
 };
 
@@ -112,32 +113,22 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
  * nfs_direct_IO - NFS address space operation for direct I/O
  * @rw: direction (read or write)
  * @iocb: target I/O control block
- * @iov: array of vectors that define I/O buffer
+ * @iter: array of vectors that define I/O buffer
  * @pos: offset in file to begin the operation
  * @nr_segs: size of iovec array
  *
  * The presence of this routine in the address space ops vector means
- * the NFS client supports direct I/O. However, for most direct IO, we
- * shunt off direct read and write requests before the VFS gets them,
- * so this method is only ever called for swap.
+ * the NFS client supports direct I/O. However, we shunt off direct
+ * read and write requests before the VFS gets them, so this method
+ * should never be called.
  */
-ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
+ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
+                     loff_t pos)
 {
-#ifndef CONFIG_NFS_SWAP
-       dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
-                       iocb->ki_filp->f_path.dentry->d_name.name,
-                       (long long) pos, nr_segs);
+       dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
+                       iocb->ki_filp, (long long) pos, iter->nr_segs);
 
        return -EINVAL;
-#else
-       VM_BUG_ON(iocb->ki_nbytes != PAGE_SIZE);
-
-       if (rw == READ || rw == KERNEL_READ)
-               return nfs_file_direct_read(iocb, iov, nr_segs, pos,
-                               rw == READ ? true : false);
-       return nfs_file_direct_write(iocb, iov, nr_segs, pos,
-                               rw == WRITE ? true : false);
-#endif /* CONFIG_NFS_SWAP */
 }
 
 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
@@ -265,7 +256,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
                struct nfs_page *req = nfs_list_entry(hdr->pages.next);
                struct page *page = req->wb_page;
 
-               if (!PageCompound(page) && bytes < hdr->good_bytes)
+               if ((dreq->flags & NFS_ODIRECT_MARK_DIRTY) &&
+                   !PageCompound(page) && bytes < hdr->good_bytes)
                        set_page_dirty(page);
                bytes += req->wb_bytes;
                nfs_list_remove_request(req);
@@ -308,7 +300,7 @@ static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
  */
 static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
                                                const struct iovec *iov,
-                                               loff_t pos, bool uio)
+                                               loff_t pos)
 {
        struct nfs_direct_req *dreq = desc->pg_dreq;
        struct nfs_open_context *ctx = dreq->ctx;
@@ -336,20 +328,12 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *de
                                          GFP_KERNEL);
                if (!pagevec)
                        break;
-               if (uio) {
-                       down_read(&current->mm->mmap_sem);
-                       result = get_user_pages(current, current->mm, user_addr,
+               down_read(&current->mm->mmap_sem);
+               result = get_user_pages(current, current->mm, user_addr,
                                        npages, 1, 0, pagevec, NULL);
-                       up_read(&current->mm->mmap_sem);
-                       if (result < 0)
-                               break;
-               } else {
-                       WARN_ON(npages != 1);
-                       result = get_kernel_page(user_addr, 1, pagevec);
-                       if (WARN_ON(result != 1))
-                               break;
-               }
-
+               up_read(&current->mm->mmap_sem);
+               if (result < 0)
+                       break;
                if ((unsigned)result < npages) {
                        bytes = result * PAGE_SIZE;
                        if (bytes <= pgbase) {
@@ -397,24 +381,17 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *de
        return result < 0 ? (ssize_t) result : -EFAULT;
 }
 
-static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
-                                             const struct iovec *iov,
-                                             unsigned long nr_segs,
-                                             loff_t pos, bool uio)
+static ssize_t nfs_direct_do_schedule_read_iovec(
+               struct nfs_pageio_descriptor *desc, const struct iovec *iov,
+               unsigned long nr_segs, loff_t pos)
 {
-       struct nfs_pageio_descriptor desc;
        ssize_t result = -EINVAL;
        size_t requested_bytes = 0;
        unsigned long seg;
 
-       NFS_PROTO(dreq->inode)->read_pageio_init(&desc, dreq->inode,
-                            &nfs_direct_read_completion_ops);
-       get_dreq(dreq);
-       desc.pg_dreq = dreq;
-
        for (seg = 0; seg < nr_segs; seg++) {
                const struct iovec *vec = &iov[seg];
-               result = nfs_direct_read_schedule_segment(&desc, vec, pos, uio);
+               result = nfs_direct_read_schedule_segment(desc, vec, pos);
                if (result < 0)
                        break;
                requested_bytes += result;
@@ -422,6 +399,75 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
                        break;
                pos += vec->iov_len;
        }
+       if (requested_bytes)
+               return requested_bytes;
+
+       return result < 0 ? result : -EIO;
+}
+
+#ifdef CONFIG_BLOCK
+static ssize_t nfs_direct_do_schedule_read_bvec(
+               struct nfs_pageio_descriptor *desc,
+               struct bio_vec *bvec, unsigned long nr_segs, loff_t pos)
+{
+       struct nfs_direct_req *dreq = desc->pg_dreq;
+       struct nfs_open_context *ctx = dreq->ctx;
+       struct inode *inode = ctx->dentry->d_inode;
+       ssize_t result = -EINVAL;
+       size_t requested_bytes = 0;
+       unsigned long seg;
+       struct nfs_page *req;
+       unsigned int req_len;
+
+       for (seg = 0; seg < nr_segs; seg++) {
+               result = -EIO;
+               req_len = bvec[seg].bv_len;
+               req = nfs_create_request(ctx, inode,
+                                        bvec[seg].bv_page,
+                                        bvec[seg].bv_offset, req_len);
+               if (IS_ERR(req)) {
+                       result = PTR_ERR(req);
+                       break;
+               }
+               req->wb_index = pos >> PAGE_SHIFT;
+               req->wb_offset = pos & ~PAGE_MASK;
+               if (!nfs_pageio_add_request(desc, req)) {
+                       result = desc->pg_error;
+                       nfs_release_request(req);
+                       break;
+               }
+               requested_bytes += req_len;
+               pos += req_len;
+       }
+
+       if (requested_bytes)
+               return requested_bytes;
+
+       return result < 0 ? result : -EIO;
+}
+#endif /* CONFIG_BLOCK */
+
+static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq,
+                                       struct iov_iter *iter, loff_t pos)
+{
+       struct nfs_pageio_descriptor desc;
+       ssize_t result;
+
+       NFS_PROTO(dreq->inode)->read_pageio_init(&desc, dreq->inode,
+                            &nfs_direct_read_completion_ops);
+       get_dreq(dreq);
+       desc.pg_dreq = dreq;
+
+       if (iov_iter_has_iovec(iter)) {
+               result = nfs_direct_do_schedule_read_iovec(&desc,
+                               iov_iter_iovec(iter), iter->nr_segs, pos);
+#ifdef CONFIG_BLOCK
+       } else if (iov_iter_has_bvec(iter)) {
+               result = nfs_direct_do_schedule_read_bvec(&desc,
+                               iov_iter_bvec(iter), iter->nr_segs, pos);
+#endif
+       } else
+               BUG();
 
        nfs_pageio_complete(&desc);
 
@@ -429,9 +475,9 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
         * If no bytes were started, return the error, and let the
         * generic layer handle the completion.
         */
-       if (requested_bytes == 0) {
+       if (result < 0) {
                nfs_direct_req_release(dreq);
-               return result < 0 ? result : -EIO;
+               return result;
        }
 
        if (put_dreq(dreq))
@@ -439,8 +485,8 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
        return 0;
 }
 
-static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
-                              unsigned long nr_segs, loff_t pos, bool uio)
+static ssize_t nfs_direct_read(struct kiocb *iocb, struct iov_iter *iter,
+                              loff_t pos)
 {
        ssize_t result = -ENOMEM;
        struct inode *inode = iocb->ki_filp->f_mapping->host;
@@ -452,7 +498,7 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
                goto out;
 
        dreq->inode = inode;
-       dreq->bytes_left = iov_length(iov, nr_segs);
+       dreq->bytes_left = iov_iter_count(iter);
        dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
        l_ctx = nfs_get_lock_context(dreq->ctx);
        if (IS_ERR(l_ctx)) {
@@ -463,8 +509,8 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
        if (!is_sync_kiocb(iocb))
                dreq->iocb = iocb;
 
-       NFS_I(inode)->read_io += iov_length(iov, nr_segs);
-       result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos, uio);
+       NFS_I(inode)->read_io += iov_iter_count(iter);
+       result = nfs_direct_read_schedule(dreq, iter, pos);
        if (!result)
                result = nfs_direct_wait(dreq);
 out_release:
@@ -629,7 +675,7 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode
  */
 static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
                                                 const struct iovec *iov,
-                                                loff_t pos, bool uio)
+                                                loff_t pos)
 {
        struct nfs_direct_req *dreq = desc->pg_dreq;
        struct nfs_open_context *ctx = dreq->ctx;
@@ -657,19 +703,12 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *d
                if (!pagevec)
                        break;
 
-               if (uio) {
-                       down_read(&current->mm->mmap_sem);
-                       result = get_user_pages(current, current->mm, user_addr,
-                                               npages, 0, 0, pagevec, NULL);
-                       up_read(&current->mm->mmap_sem);
-                       if (result < 0)
-                               break;
-               } else {
-                       WARN_ON(npages != 1);
-                       result = get_kernel_page(user_addr, 0, pagevec);
-                       if (WARN_ON(result != 1))
-                               break;
-               }
+               down_read(&current->mm->mmap_sem);
+               result = get_user_pages(current, current->mm, user_addr,
+                                       npages, 0, 0, pagevec, NULL);
+               up_read(&current->mm->mmap_sem);
+               if (result < 0)
+                       break;
 
                if ((unsigned)result < npages) {
                        bytes = result * PAGE_SIZE;
@@ -798,27 +837,18 @@ static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
        .completion = nfs_direct_write_completion,
 };
 
-static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
-                                              const struct iovec *iov,
-                                              unsigned long nr_segs,
-                                              loff_t pos, bool uio)
+static ssize_t nfs_direct_do_schedule_write_iovec(
+               struct nfs_pageio_descriptor *desc, const struct iovec *iov,
+               unsigned long nr_segs, loff_t pos)
 {
-       struct nfs_pageio_descriptor desc;
-       struct inode *inode = dreq->inode;
-       ssize_t result = 0;
+       ssize_t result = -EINVAL;
        size_t requested_bytes = 0;
        unsigned long seg;
 
-       NFS_PROTO(inode)->write_pageio_init(&desc, inode, FLUSH_COND_STABLE,
-                             &nfs_direct_write_completion_ops);
-       desc.pg_dreq = dreq;
-       get_dreq(dreq);
-       atomic_inc(&inode->i_dio_count);
-
-       NFS_I(dreq->inode)->write_io += iov_length(iov, nr_segs);
        for (seg = 0; seg < nr_segs; seg++) {
                const struct iovec *vec = &iov[seg];
-               result = nfs_direct_write_schedule_segment(&desc, vec, pos, uio);
+               result = nfs_direct_write_schedule_segment(desc, vec,
+                                                          pos);
                if (result < 0)
                        break;
                requested_bytes += result;
@@ -826,16 +856,91 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
                        break;
                pos += vec->iov_len;
        }
+
+       if (requested_bytes)
+               return requested_bytes;
+
+       return result < 0 ? result : -EIO;
+}
+
+#ifdef CONFIG_BLOCK
+static ssize_t nfs_direct_do_schedule_write_bvec(
+               struct nfs_pageio_descriptor *desc,
+               struct bio_vec *bvec, unsigned long nr_segs, loff_t pos)
+{
+       struct nfs_direct_req *dreq = desc->pg_dreq;
+       struct nfs_open_context *ctx = dreq->ctx;
+       struct inode *inode = dreq->inode;
+       ssize_t result = 0;
+       size_t requested_bytes = 0;
+       unsigned long seg;
+       struct nfs_page *req;
+       unsigned int req_len;
+
+       for (seg = 0; seg < nr_segs; seg++) {
+               req_len = bvec[seg].bv_len;
+
+               req = nfs_create_request(ctx, inode, bvec[seg].bv_page,
+                                        bvec[seg].bv_offset, req_len);
+               if (IS_ERR(req)) {
+                       result = PTR_ERR(req);
+                       break;
+               }
+               nfs_lock_request(req);
+               req->wb_index = pos >> PAGE_SHIFT;
+               req->wb_offset = pos & ~PAGE_MASK;
+               if (!nfs_pageio_add_request(desc, req)) {
+                       result = desc->pg_error;
+                       nfs_unlock_and_release_request(req);
+                       break;
+               }
+               requested_bytes += req_len;
+               pos += req_len;
+       }
+
+       if (requested_bytes)
+               return requested_bytes;
+
+       return result < 0 ? result : -EIO;
+}
+#endif /* CONFIG_BLOCK */
+
+static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq,
+                                        struct iov_iter *iter, loff_t pos)
+{
+       struct nfs_pageio_descriptor desc;
+       struct inode *inode = dreq->inode;
+       ssize_t result = 0;
+
+       NFS_PROTO(inode)->write_pageio_init(&desc, inode, FLUSH_COND_STABLE,
+                             &nfs_direct_write_completion_ops);
+       desc.pg_dreq = dreq;
+       get_dreq(dreq);
+       atomic_inc(&inode->i_dio_count);
+
+       NFS_I(dreq->inode)->write_io += iov_iter_count(iter);
+
+       if (iov_iter_has_iovec(iter)) {
+               result = nfs_direct_do_schedule_write_iovec(&desc,
+                               iov_iter_iovec(iter), iter->nr_segs, pos);
+#ifdef CONFIG_BLOCK
+       } else if (iov_iter_has_bvec(iter)) {
+               result = nfs_direct_do_schedule_write_bvec(&desc,
+                               iov_iter_bvec(iter), iter->nr_segs, pos);
+#endif
+       } else
+               BUG();
+
        nfs_pageio_complete(&desc);
 
        /*
         * If no bytes were started, return the error, and let the
         * generic layer handle the completion.
         */
-       if (requested_bytes == 0) {
+       if (result < 0) {
                inode_dio_done(inode);
                nfs_direct_req_release(dreq);
-               return result < 0 ? result : -EIO;
+               return result;
        }
 
        if (put_dreq(dreq))
@@ -843,9 +948,8 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
        return 0;
 }
 
-static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
-                               unsigned long nr_segs, loff_t pos,
-                               size_t count, bool uio)
+static ssize_t nfs_direct_write(struct kiocb *iocb, struct iov_iter *iter,
+                               loff_t pos)
 {
        ssize_t result = -ENOMEM;
        struct inode *inode = iocb->ki_filp->f_mapping->host;
@@ -857,7 +961,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
                goto out;
 
        dreq->inode = inode;
-       dreq->bytes_left = count;
+       dreq->bytes_left = iov_iter_count(iter);
        dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
        l_ctx = nfs_get_lock_context(dreq->ctx);
        if (IS_ERR(l_ctx)) {
@@ -868,7 +972,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
        if (!is_sync_kiocb(iocb))
                dreq->iocb = iocb;
 
-       result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, uio);
+       result = nfs_direct_write_schedule(dreq, iter, pos);
        if (!result)
                result = nfs_direct_wait(dreq);
 out_release:
@@ -880,12 +984,11 @@ out:
 /**
  * nfs_file_direct_read - file direct read operation for NFS files
  * @iocb: target I/O control block
- * @iov: vector of user buffers into which to read data
- * @nr_segs: size of iov vector
+ * @iter: vector of buffers into which to read data
  * @pos: byte offset in file where reading starts
  *
  * We use this function for direct reads instead of calling
- * generic_file_aio_read() in order to avoid gfar's check to see if
+ * generic_file_read_iter() in order to avoid gfar's check to see if
  * the request starts before the end of the file.  For that check
  * to work, we must generate a GETATTR before each direct read, and
  * even then there is a window between the GETATTR and the subsequent
@@ -898,21 +1001,19 @@ out:
  * client must read the updated atime from the server back into its
  * cache.
  */
-ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
-                               unsigned long nr_segs, loff_t pos, bool uio)
+ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
+                            loff_t pos)
 {
        ssize_t retval = -EINVAL;
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        size_t count;
 
-       count = iov_length(iov, nr_segs);
+       count = iov_iter_count(iter);
        nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
 
-       dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
-               file->f_path.dentry->d_parent->d_name.name,
-               file->f_path.dentry->d_name.name,
-               count, (long long) pos);
+       dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
+               file, count, (long long) pos);
 
        retval = 0;
        if (!count)
@@ -924,7 +1025,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
 
        task_io_account_read(count);
 
-       retval = nfs_direct_read(iocb, iov, nr_segs, pos, uio);
+       retval = nfs_direct_read(iocb, iter, pos);
        if (retval > 0)
                iocb->ki_pos = pos + retval;
 
@@ -935,12 +1036,11 @@ out:
 /**
  * nfs_file_direct_write - file direct write operation for NFS files
  * @iocb: target I/O control block
- * @iov: vector of user buffers from which to write data
- * @nr_segs: size of iov vector
+ * @iter: vector of buffers from which to write data
  * @pos: byte offset in file where writing starts
  *
  * We use this function for direct writes instead of calling
- * generic_file_aio_write() in order to avoid taking the inode
+ * generic_file_write_iter() in order to avoid taking the inode
  * semaphore and updating the i_size.  The NFS server will set
  * the new i_size and this client must read the updated size
  * back into its cache.  We let the server do generic write
@@ -954,21 +1054,19 @@ out:
  * Note that O_APPEND is not supported for NFS direct writes, as there
  * is no atomic O_APPEND write facility in the NFS protocol.
  */
-ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
-                               unsigned long nr_segs, loff_t pos, bool uio)
+ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
+                             loff_t pos)
 {
        ssize_t retval = -EINVAL;
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        size_t count;
 
-       count = iov_length(iov, nr_segs);
+       count = iov_iter_count(iter);
        nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
 
-       dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
-               file->f_path.dentry->d_parent->d_name.name,
-               file->f_path.dentry->d_name.name,
-               count, (long long) pos);
+       dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
+               file, count, (long long) pos);
 
        retval = generic_write_checks(file, &pos, &count, 0);
        if (retval)
@@ -987,7 +1085,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
 
        task_io_account_write(count);
 
-       retval = nfs_direct_write(iocb, iov, nr_segs, pos, count, uio);
+       retval = nfs_direct_write(iocb, iter, pos);
        if (retval > 0) {
                struct inode *inode = mapping->host;
 
index 1e6bfdbc1aff4403a194798d3c3928993948710d..e022fe909ded953ae0403e1252d6fb8597d7c4c7 100644 (file)
@@ -65,9 +65,7 @@ nfs_file_open(struct inode *inode, struct file *filp)
 {
        int res;
 
-       dprintk("NFS: open file(%s/%s)\n",
-                       filp->f_path.dentry->d_parent->d_name.name,
-                       filp->f_path.dentry->d_name.name);
+       dprintk("NFS: open file(%pD2)\n", filp);
 
        nfs_inc_stats(inode, NFSIOS_VFSOPEN);
        res = nfs_check_flags(filp->f_flags);
@@ -81,9 +79,7 @@ nfs_file_open(struct inode *inode, struct file *filp)
 int
 nfs_file_release(struct inode *inode, struct file *filp)
 {
-       dprintk("NFS: release(%s/%s)\n",
-                       filp->f_path.dentry->d_parent->d_name.name,
-                       filp->f_path.dentry->d_name.name);
+       dprintk("NFS: release(%pD2)\n", filp);
 
        nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
        return nfs_release(inode, filp);
@@ -123,10 +119,8 @@ force_reval:
 
 loff_t nfs_file_llseek(struct file *filp, loff_t offset, int whence)
 {
-       dprintk("NFS: llseek file(%s/%s, %lld, %d)\n",
-                       filp->f_path.dentry->d_parent->d_name.name,
-                       filp->f_path.dentry->d_name.name,
-                       offset, whence);
+       dprintk("NFS: llseek file(%pD2, %lld, %d)\n",
+                       filp, offset, whence);
 
        /*
         * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
@@ -150,12 +144,9 @@ EXPORT_SYMBOL_GPL(nfs_file_llseek);
 int
 nfs_file_flush(struct file *file, fl_owner_t id)
 {
-       struct dentry   *dentry = file->f_path.dentry;
-       struct inode    *inode = dentry->d_inode;
+       struct inode    *inode = file_inode(file);
 
-       dprintk("NFS: flush(%s/%s)\n",
-                       dentry->d_parent->d_name.name,
-                       dentry->d_name.name);
+       dprintk("NFS: flush(%pD2)\n", file);
 
        nfs_inc_stats(inode, NFSIOS_VFSFLUSH);
        if ((file->f_mode & FMODE_WRITE) == 0)
@@ -174,42 +165,38 @@ nfs_file_flush(struct file *file, fl_owner_t id)
 EXPORT_SYMBOL_GPL(nfs_file_flush);
 
 ssize_t
-nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long nr_segs, loff_t pos)
+nfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
-       struct dentry * dentry = iocb->ki_filp->f_path.dentry;
-       struct inode * inode = dentry->d_inode;
+       struct inode *inode = file_inode(iocb->ki_filp);
        ssize_t result;
 
        if (iocb->ki_filp->f_flags & O_DIRECT)
-               return nfs_file_direct_read(iocb, iov, nr_segs, pos, true);
+               return nfs_file_direct_read(iocb, iter, pos);
 
-       dprintk("NFS: read(%s/%s, %lu@%lu)\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name,
-               (unsigned long) iov_length(iov, nr_segs), (unsigned long) pos);
+       dprintk("NFS: read_iter(%pD2, %lu@%lu)\n",
+               iocb->ki_filp,
+               (unsigned long) iov_iter_count(iter), (unsigned long) pos);
 
        result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
        if (!result) {
-               result = generic_file_aio_read(iocb, iov, nr_segs, pos);
+               result = generic_file_read_iter(iocb, iter, pos);
                if (result > 0)
                        nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result);
        }
        return result;
 }
-EXPORT_SYMBOL_GPL(nfs_file_read);
+EXPORT_SYMBOL_GPL(nfs_file_read_iter);
 
 ssize_t
 nfs_file_splice_read(struct file *filp, loff_t *ppos,
                     struct pipe_inode_info *pipe, size_t count,
                     unsigned int flags)
 {
-       struct dentry *dentry = filp->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        ssize_t res;
 
-       dprintk("NFS: splice_read(%s/%s, %lu@%Lu)\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name,
-               (unsigned long) count, (unsigned long long) *ppos);
+       dprintk("NFS: splice_read(%pD2, %lu@%Lu)\n",
+               filp, (unsigned long) count, (unsigned long long) *ppos);
 
        res = nfs_revalidate_mapping(inode, filp->f_mapping);
        if (!res) {
@@ -224,12 +211,10 @@ EXPORT_SYMBOL_GPL(nfs_file_splice_read);
 int
 nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
 {
-       struct dentry *dentry = file->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = file_inode(file);
        int     status;
 
-       dprintk("NFS: mmap(%s/%s)\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       dprintk("NFS: mmap(%pD2)\n", file);
 
        /* Note: generic_file_mmap() returns ENOSYS on nommu systems
         *       so we call that before revalidating the mapping
@@ -252,21 +237,18 @@ EXPORT_SYMBOL_GPL(nfs_file_mmap);
  * disk, but it retrieves and clears ctx->error after synching, despite
  * the two being set at the same time in nfs_context_set_write_error().
  * This is because the former is used to notify the _next_ call to
- * nfs_file_write() that a write error occurred, and hence cause it to
+ * nfs_file_write_iter() that a write error occurred, and hence cause it to
  * fall back to doing a synchronous write.
  */
 int
 nfs_file_fsync_commit(struct file *file, loff_t start, loff_t end, int datasync)
 {
-       struct dentry *dentry = file->f_path.dentry;
        struct nfs_open_context *ctx = nfs_file_open_context(file);
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = file_inode(file);
        int have_error, do_resend, status;
        int ret = 0;
 
-       dprintk("NFS: fsync file(%s/%s) datasync %d\n",
-                       dentry->d_parent->d_name.name, dentry->d_name.name,
-                       datasync);
+       dprintk("NFS: fsync file(%pD2) datasync %d\n", file, datasync);
 
        nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
        do_resend = test_and_clear_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags);
@@ -371,10 +353,8 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
        struct page *page;
        int once_thru = 0;
 
-       dfprintk(PAGECACHE, "NFS: write_begin(%s/%s(%ld), %u@%lld)\n",
-               file->f_path.dentry->d_parent->d_name.name,
-               file->f_path.dentry->d_name.name,
-               mapping->host->i_ino, len, (long long) pos);
+       dfprintk(PAGECACHE, "NFS: write_begin(%pD2(%ld), %u@%lld)\n",
+               file, mapping->host->i_ino, len, (long long) pos);
 
 start:
        /*
@@ -414,10 +394,8 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
        struct nfs_open_context *ctx = nfs_file_open_context(file);
        int status;
 
-       dfprintk(PAGECACHE, "NFS: write_end(%s/%s(%ld), %u@%lld)\n",
-               file->f_path.dentry->d_parent->d_name.name,
-               file->f_path.dentry->d_name.name,
-               mapping->host->i_ino, len, (long long) pos);
+       dfprintk(PAGECACHE, "NFS: write_end(%pD2(%ld), %u@%lld)\n",
+               file, mapping->host->i_ino, len, (long long) pos);
 
        /*
         * Zero any uninitialised parts of the page, and then mark the page
@@ -601,22 +579,21 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct page *page = vmf->page;
        struct file *filp = vma->vm_file;
-       struct dentry *dentry = filp->f_path.dentry;
+       struct inode *inode = file_inode(filp);
        unsigned pagelen;
        int ret = VM_FAULT_NOPAGE;
        struct address_space *mapping;
 
-       dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%s/%s(%ld), offset %lld)\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name,
-               filp->f_mapping->host->i_ino,
+       dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%pD2(%ld), offset %lld)\n",
+               filp, filp->f_mapping->host->i_ino,
                (long long)page_offset(page));
 
        /* make sure the cache has finished storing the page */
-       nfs_fscache_wait_on_page_write(NFS_I(dentry->d_inode), page);
+       nfs_fscache_wait_on_page_write(NFS_I(inode), page);
 
        lock_page(page);
        mapping = page_file_mapping(page);
-       if (mapping != dentry->d_inode->i_mapping)
+       if (mapping != inode->i_mapping)
                goto out_unlock;
 
        wait_on_page_writeback(page);
@@ -656,25 +633,24 @@ static int nfs_need_sync_write(struct file *filp, struct inode *inode)
        return 0;
 }
 
-ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
-                      unsigned long nr_segs, loff_t pos)
+ssize_t nfs_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                           loff_t pos)
 {
-       struct dentry * dentry = iocb->ki_filp->f_path.dentry;
-       struct inode * inode = dentry->d_inode;
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file_inode(file);
        unsigned long written = 0;
        ssize_t result;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(iter);
 
-       result = nfs_key_timeout_notify(iocb->ki_filp, inode);
+       result = nfs_key_timeout_notify(file, inode);
        if (result)
                return result;
 
-       if (iocb->ki_filp->f_flags & O_DIRECT)
-               return nfs_file_direct_write(iocb, iov, nr_segs, pos, true);
+       if (file->f_flags & O_DIRECT)
+               return nfs_file_direct_write(iocb, iter, pos);
 
-       dprintk("NFS: write(%s/%s, %lu@%Ld)\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name,
-               (unsigned long) count, (long long) pos);
+       dprintk("NFS: write_iter(%pD2, %lu@%Ld)\n",
+               file, (unsigned long) count, (long long) pos);
 
        result = -EBUSY;
        if (IS_SWAPFILE(inode))
@@ -682,8 +658,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
        /*
         * O_APPEND implies that we must revalidate the file length.
         */
-       if (iocb->ki_filp->f_flags & O_APPEND) {
-               result = nfs_revalidate_file_size(inode, iocb->ki_filp);
+       if (file->f_flags & O_APPEND) {
+               result = nfs_revalidate_file_size(inode, file);
                if (result)
                        goto out;
        }
@@ -692,13 +668,13 @@ ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
        if (!count)
                goto out;
 
-       result = generic_file_aio_write(iocb, iov, nr_segs, pos);
+       result = generic_file_write_iter(iocb, iter, pos);
        if (result > 0)
                written = result;
 
        /* Return error values for O_DSYNC and IS_SYNC() */
-       if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) {
-               int err = vfs_fsync(iocb->ki_filp, 0);
+       if (result >= 0 && nfs_need_sync_write(file, inode)) {
+               int err = vfs_fsync(file, 0);
                if (err < 0)
                        result = err;
        }
@@ -711,20 +687,18 @@ out_swapfile:
        printk(KERN_INFO "NFS: attempt to write to active swap file!\n");
        goto out;
 }
-EXPORT_SYMBOL_GPL(nfs_file_write);
+EXPORT_SYMBOL_GPL(nfs_file_write_iter);
 
 ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
                              struct file *filp, loff_t *ppos,
                              size_t count, unsigned int flags)
 {
-       struct dentry *dentry = filp->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = file_inode(filp);
        unsigned long written = 0;
        ssize_t ret;
 
-       dprintk("NFS splice_write(%s/%s, %lu@%llu)\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name,
-               (unsigned long) count, (unsigned long long) *ppos);
+       dprintk("NFS splice_write(%pD2, %lu@%llu)\n",
+               filp, (unsigned long) count, (unsigned long long) *ppos);
 
        /*
         * The combination of splice and an O_APPEND destination is disallowed.
@@ -883,10 +857,8 @@ int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
        int ret = -ENOLCK;
        int is_local = 0;
 
-       dprintk("NFS: lock(%s/%s, t=%x, fl=%x, r=%lld:%lld)\n",
-                       filp->f_path.dentry->d_parent->d_name.name,
-                       filp->f_path.dentry->d_name.name,
-                       fl->fl_type, fl->fl_flags,
+       dprintk("NFS: lock(%pD2, t=%x, fl=%x, r=%lld:%lld)\n",
+                       filp, fl->fl_type, fl->fl_flags,
                        (long long)fl->fl_start, (long long)fl->fl_end);
 
        nfs_inc_stats(inode, NFSIOS_VFSLOCK);
@@ -923,10 +895,8 @@ int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
        struct inode *inode = filp->f_mapping->host;
        int is_local = 0;
 
-       dprintk("NFS: flock(%s/%s, t=%x, fl=%x)\n",
-                       filp->f_path.dentry->d_parent->d_name.name,
-                       filp->f_path.dentry->d_name.name,
-                       fl->fl_type, fl->fl_flags);
+       dprintk("NFS: flock(%pD2, t=%x, fl=%x)\n",
+                       filp, fl->fl_type, fl->fl_flags);
 
        if (!(fl->fl_flags & FL_FLOCK))
                return -ENOLCK;
@@ -960,9 +930,7 @@ EXPORT_SYMBOL_GPL(nfs_flock);
  */
 int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
 {
-       dprintk("NFS: setlease(%s/%s, arg=%ld)\n",
-                       file->f_path.dentry->d_parent->d_name.name,
-                       file->f_path.dentry->d_name.name, arg);
+       dprintk("NFS: setlease(%pD2, arg=%ld)\n", file, arg);
        return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(nfs_setlease);
@@ -971,8 +939,8 @@ const struct file_operations nfs_file_operations = {
        .llseek         = nfs_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = nfs_file_read,
-       .aio_write      = nfs_file_write,
+       .read_iter      = nfs_file_read_iter,
+       .write_iter     = nfs_file_write_iter,
        .mmap           = nfs_file_mmap,
        .open           = nfs_file_open,
        .flush          = nfs_file_flush,
index 24d1d1c5fcaf9e50b7528d7ef2a9eff6fcf9f018..3ef01f0ba0bcafa2bdfe0c6088e367c12dd76668 100644 (file)
@@ -39,7 +39,7 @@ void nfs_fscache_get_client_cookie(struct nfs_client *clp)
        /* create a cache index for looking up filehandles */
        clp->fscache = fscache_acquire_cookie(nfs_fscache_netfs.primary_index,
                                              &nfs_fscache_server_index_def,
-                                             clp);
+                                             clp, true);
        dfprintk(FSCACHE, "NFS: get client cookie (0x%p/0x%p)\n",
                 clp, clp->fscache);
 }
@@ -139,7 +139,7 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
        /* create a cache index for looking up filehandles */
        nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache,
                                               &nfs_fscache_super_index_def,
-                                              nfss);
+                                              nfss, true);
        dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n",
                 nfss, nfss->fscache);
        return;
@@ -178,163 +178,79 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
 /*
  * Initialise the per-inode cache cookie pointer for an NFS inode.
  */
-void nfs_fscache_init_inode_cookie(struct inode *inode)
+void nfs_fscache_init_inode(struct inode *inode)
 {
-       NFS_I(inode)->fscache = NULL;
-       if (S_ISREG(inode->i_mode))
-               set_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
-}
-
-/*
- * Get the per-inode cache cookie for an NFS inode.
- */
-static void nfs_fscache_enable_inode_cookie(struct inode *inode)
-{
-       struct super_block *sb = inode->i_sb;
        struct nfs_inode *nfsi = NFS_I(inode);
 
-       if (nfsi->fscache || !NFS_FSCACHE(inode))
+       nfsi->fscache = NULL;
+       if (!S_ISREG(inode->i_mode))
                return;
-
-       if ((NFS_SB(sb)->options & NFS_OPTION_FSCACHE)) {
-               nfsi->fscache = fscache_acquire_cookie(
-                       NFS_SB(sb)->fscache,
-                       &nfs_fscache_inode_object_def,
-                       nfsi);
-
-               dfprintk(FSCACHE, "NFS: get FH cookie (0x%p/0x%p/0x%p)\n",
-                        sb, nfsi, nfsi->fscache);
-       }
+       nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache,
+                                              &nfs_fscache_inode_object_def,
+                                              nfsi, false);
 }
 
 /*
  * Release a per-inode cookie.
  */
-void nfs_fscache_release_inode_cookie(struct inode *inode)
+void nfs_fscache_clear_inode(struct inode *inode)
 {
        struct nfs_inode *nfsi = NFS_I(inode);
+       struct fscache_cookie *cookie = nfs_i_fscache(inode);
 
-       dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n",
-                nfsi, nfsi->fscache);
+       dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie);
 
-       fscache_relinquish_cookie(nfsi->fscache, 0);
+       fscache_relinquish_cookie(cookie, false);
        nfsi->fscache = NULL;
 }
 
-/*
- * Retire a per-inode cookie, destroying the data attached to it.
- */
-void nfs_fscache_zap_inode_cookie(struct inode *inode)
+static bool nfs_fscache_can_enable(void *data)
 {
-       struct nfs_inode *nfsi = NFS_I(inode);
+       struct inode *inode = data;
 
-       dfprintk(FSCACHE, "NFS: zapping cookie (0x%p/0x%p)\n",
-                nfsi, nfsi->fscache);
-
-       fscache_relinquish_cookie(nfsi->fscache, 1);
-       nfsi->fscache = NULL;
+       return !inode_is_open_for_write(inode);
 }
 
 /*
- * Turn off the cache with regard to a per-inode cookie if opened for writing,
- * invalidating all the pages in the page cache relating to the associated
- * inode to clear the per-page caching.
- */
-static void nfs_fscache_disable_inode_cookie(struct inode *inode)
-{
-       clear_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
-
-       if (NFS_I(inode)->fscache) {
-               dfprintk(FSCACHE,
-                        "NFS: nfsi 0x%p turning cache off\n", NFS_I(inode));
-
-               /* Need to uncache any pages attached to this inode that
-                * fscache knows about before turning off the cache.
-                */
-               fscache_uncache_all_inode_pages(NFS_I(inode)->fscache, inode);
-               nfs_fscache_zap_inode_cookie(inode);
-       }
-}
-
-/*
- * wait_on_bit() sleep function for uninterruptible waiting
- */
-static int nfs_fscache_wait_bit(void *flags)
-{
-       schedule();
-       return 0;
-}
-
-/*
- * Lock against someone else trying to also acquire or relinquish a cookie
- */
-static inline void nfs_fscache_inode_lock(struct inode *inode)
-{
-       struct nfs_inode *nfsi = NFS_I(inode);
-
-       while (test_and_set_bit(NFS_INO_FSCACHE_LOCK, &nfsi->flags))
-               wait_on_bit(&nfsi->flags, NFS_INO_FSCACHE_LOCK,
-                           nfs_fscache_wait_bit, TASK_UNINTERRUPTIBLE);
-}
-
-/*
- * Unlock cookie management lock
- */
-static inline void nfs_fscache_inode_unlock(struct inode *inode)
-{
-       struct nfs_inode *nfsi = NFS_I(inode);
-
-       smp_mb__before_clear_bit();
-       clear_bit(NFS_INO_FSCACHE_LOCK, &nfsi->flags);
-       smp_mb__after_clear_bit();
-       wake_up_bit(&nfsi->flags, NFS_INO_FSCACHE_LOCK);
-}
-
-/*
- * Decide if we should enable or disable local caching for this inode.
- * - For now, with NFS, only regular files that are open read-only will be able
- *   to use the cache.
- * - May be invoked multiple times in parallel by parallel nfs_open() functions.
- */
-void nfs_fscache_set_inode_cookie(struct inode *inode, struct file *filp)
-{
-       if (NFS_FSCACHE(inode)) {
-               nfs_fscache_inode_lock(inode);
-               if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
-                       nfs_fscache_disable_inode_cookie(inode);
-               else
-                       nfs_fscache_enable_inode_cookie(inode);
-               nfs_fscache_inode_unlock(inode);
-       }
-}
-EXPORT_SYMBOL_GPL(nfs_fscache_set_inode_cookie);
-
-/*
- * Replace a per-inode cookie due to revalidation detecting a file having
- * changed on the server.
+ * Enable or disable caching for a file that is being opened as appropriate.
+ * The cookie is allocated when the inode is initialised, but is not enabled at
+ * that time.  Enablement is deferred to file-open time to avoid stat() and
+ * access() thrashing the cache.
+ *
+ * For now, with NFS, only regular files that are open read-only will be able
+ * to use the cache.
+ *
+ * We enable the cache for an inode if we open it read-only and it isn't
+ * currently open for writing.  We disable the cache if the inode is open
+ * write-only.
+ *
+ * The caller uses the file struct to pin i_writecount on the inode before
+ * calling us when a file is opened for writing, so we can make use of that.
+ *
+ * Note that this may be invoked multiple times in parallel by parallel
+ * nfs_open() functions.
  */
-void nfs_fscache_reset_inode_cookie(struct inode *inode)
+void nfs_fscache_open_file(struct inode *inode, struct file *filp)
 {
        struct nfs_inode *nfsi = NFS_I(inode);
-       struct nfs_server *nfss = NFS_SERVER(inode);
-       NFS_IFDEBUG(struct fscache_cookie *old = nfsi->fscache);
+       struct fscache_cookie *cookie = nfs_i_fscache(inode);
 
-       nfs_fscache_inode_lock(inode);
-       if (nfsi->fscache) {
-               /* retire the current fscache cache and get a new one */
-               fscache_relinquish_cookie(nfsi->fscache, 1);
-
-               nfsi->fscache = fscache_acquire_cookie(
-                       nfss->nfs_client->fscache,
-                       &nfs_fscache_inode_object_def,
-                       nfsi);
+       if (!fscache_cookie_valid(cookie))
+               return;
 
-               dfprintk(FSCACHE,
-                        "NFS: revalidation new cookie (0x%p/0x%p/0x%p/0x%p)\n",
-                        nfss, nfsi, old, nfsi->fscache);
+       if (inode_is_open_for_write(inode)) {
+               dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi);
+               clear_bit(NFS_INO_FSCACHE, &nfsi->flags);
+               fscache_disable_cookie(cookie, true);
+               fscache_uncache_all_inode_pages(cookie, inode);
+       } else {
+               dfprintk(FSCACHE, "NFS: nfsi 0x%p enabling cache\n", nfsi);
+               fscache_enable_cookie(cookie, nfs_fscache_can_enable, inode);
+               if (fscache_cookie_enabled(cookie))
+                       set_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
        }
-       nfs_fscache_inode_unlock(inode);
 }
+EXPORT_SYMBOL_GPL(nfs_fscache_open_file);
 
 /*
  * Release the caching state associated with a page, if the page isn't busy
@@ -344,12 +260,11 @@ void nfs_fscache_reset_inode_cookie(struct inode *inode)
 int nfs_fscache_release_page(struct page *page, gfp_t gfp)
 {
        if (PageFsCache(page)) {
-               struct nfs_inode *nfsi = NFS_I(page->mapping->host);
-               struct fscache_cookie *cookie = nfsi->fscache;
+               struct fscache_cookie *cookie = nfs_i_fscache(page->mapping->host);
 
                BUG_ON(!cookie);
                dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
-                        cookie, page, nfsi);
+                        cookie, page, NFS_I(page->mapping->host));
 
                if (!fscache_maybe_release_page(cookie, page, gfp))
                        return 0;
@@ -367,13 +282,12 @@ int nfs_fscache_release_page(struct page *page, gfp_t gfp)
  */
 void __nfs_fscache_invalidate_page(struct page *page, struct inode *inode)
 {
-       struct nfs_inode *nfsi = NFS_I(inode);
-       struct fscache_cookie *cookie = nfsi->fscache;
+       struct fscache_cookie *cookie = nfs_i_fscache(inode);
 
        BUG_ON(!cookie);
 
        dfprintk(FSCACHE, "NFS: fscache invalidatepage (0x%p/0x%p/0x%p)\n",
-                cookie, page, nfsi);
+                cookie, page, NFS_I(inode));
 
        fscache_wait_on_page_write(cookie, page);
 
@@ -417,9 +331,9 @@ int __nfs_readpage_from_fscache(struct nfs_open_context *ctx,
 
        dfprintk(FSCACHE,
                 "NFS: readpage_from_fscache(fsc:%p/p:%p(i:%lx f:%lx)/0x%p)\n",
-                NFS_I(inode)->fscache, page, page->index, page->flags, inode);
+                nfs_i_fscache(inode), page, page->index, page->flags, inode);
 
-       ret = fscache_read_or_alloc_page(NFS_I(inode)->fscache,
+       ret = fscache_read_or_alloc_page(nfs_i_fscache(inode),
                                         page,
                                         nfs_readpage_from_fscache_complete,
                                         ctx,
@@ -459,9 +373,9 @@ int __nfs_readpages_from_fscache(struct nfs_open_context *ctx,
        int ret;
 
        dfprintk(FSCACHE, "NFS: nfs_getpages_from_fscache (0x%p/%u/0x%p)\n",
-                NFS_I(inode)->fscache, npages, inode);
+                nfs_i_fscache(inode), npages, inode);
 
-       ret = fscache_read_or_alloc_pages(NFS_I(inode)->fscache,
+       ret = fscache_read_or_alloc_pages(nfs_i_fscache(inode),
                                          mapping, pages, nr_pages,
                                          nfs_readpage_from_fscache_complete,
                                          ctx,
@@ -506,15 +420,15 @@ void __nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync)
 
        dfprintk(FSCACHE,
                 "NFS: readpage_to_fscache(fsc:%p/p:%p(i:%lx f:%lx)/%d)\n",
-                NFS_I(inode)->fscache, page, page->index, page->flags, sync);
+                nfs_i_fscache(inode), page, page->index, page->flags, sync);
 
-       ret = fscache_write_page(NFS_I(inode)->fscache, page, GFP_KERNEL);
+       ret = fscache_write_page(nfs_i_fscache(inode), page, GFP_KERNEL);
        dfprintk(FSCACHE,
                 "NFS:     readpage_to_fscache: p:%p(i:%lu f:%lx) ret %d\n",
                 page, page->index, page->flags, ret);
 
        if (ret != 0) {
-               fscache_uncache_page(NFS_I(inode)->fscache, page);
+               fscache_uncache_page(nfs_i_fscache(inode), page);
                nfs_add_fscache_stats(inode,
                                      NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL, 1);
                nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
index 4ecb76652eba6059402c0d9e7320e4cdc1603b82..d7fe3e799f2fc0ee4c83f226f5dee1cc72ca0cc7 100644 (file)
@@ -76,11 +76,9 @@ extern void nfs_fscache_release_client_cookie(struct nfs_client *);
 extern void nfs_fscache_get_super_cookie(struct super_block *, const char *, int);
 extern void nfs_fscache_release_super_cookie(struct super_block *);
 
-extern void nfs_fscache_init_inode_cookie(struct inode *);
-extern void nfs_fscache_release_inode_cookie(struct inode *);
-extern void nfs_fscache_zap_inode_cookie(struct inode *);
-extern void nfs_fscache_set_inode_cookie(struct inode *, struct file *);
-extern void nfs_fscache_reset_inode_cookie(struct inode *);
+extern void nfs_fscache_init_inode(struct inode *);
+extern void nfs_fscache_clear_inode(struct inode *);
+extern void nfs_fscache_open_file(struct inode *, struct file *);
 
 extern void __nfs_fscache_invalidate_page(struct page *, struct inode *);
 extern int nfs_fscache_release_page(struct page *, gfp_t);
@@ -187,12 +185,10 @@ static inline void nfs_fscache_release_client_cookie(struct nfs_client *clp) {}
 
 static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {}
 
-static inline void nfs_fscache_init_inode_cookie(struct inode *inode) {}
-static inline void nfs_fscache_release_inode_cookie(struct inode *inode) {}
-static inline void nfs_fscache_zap_inode_cookie(struct inode *inode) {}
-static inline void nfs_fscache_set_inode_cookie(struct inode *inode,
-                                               struct file *filp) {}
-static inline void nfs_fscache_reset_inode_cookie(struct inode *inode) {}
+static inline void nfs_fscache_init_inode(struct inode *inode) {}
+static inline void nfs_fscache_clear_inode(struct inode *inode) {}
+static inline void nfs_fscache_open_file(struct inode *inode,
+                                        struct file *filp) {}
 
 static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
 {
index eda8879171c47e3225ee6891a77aac70e1531af2..bb90bff0cb7a296f4b50a40b78974484fb221a9d 100644 (file)
@@ -122,7 +122,7 @@ void nfs_clear_inode(struct inode *inode)
        WARN_ON_ONCE(!list_empty(&NFS_I(inode)->open_files));
        nfs_zap_acl_cache(inode);
        nfs_access_zap_cache(inode);
-       nfs_fscache_release_inode_cookie(inode);
+       nfs_fscache_clear_inode(inode);
 }
 EXPORT_SYMBOL_GPL(nfs_clear_inode);
 
@@ -459,7 +459,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
                nfsi->attrtimeo_timestamp = now;
                nfsi->access_cache = RB_ROOT;
 
-               nfs_fscache_init_inode_cookie(inode);
+               nfs_fscache_init_inode(inode);
 
                unlock_new_inode(inode);
        } else
@@ -854,7 +854,7 @@ int nfs_open(struct inode *inode, struct file *filp)
                return PTR_ERR(ctx);
        nfs_file_set_open_context(filp, ctx);
        put_nfs_open_context(ctx);
-       nfs_fscache_set_inode_cookie(inode, filp);
+       nfs_fscache_open_file(inode, filp);
        return 0;
 }
 
index 38da8c2b81ac09d526b3b402d1964e0c7c17c2dc..32f7a4f415a1559e5b7b5de5e7627a860dae1938 100644 (file)
@@ -291,11 +291,11 @@ int nfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *)
 int nfs_file_fsync_commit(struct file *, loff_t, loff_t, int);
 loff_t nfs_file_llseek(struct file *, loff_t, int);
 int nfs_file_flush(struct file *, fl_owner_t);
-ssize_t nfs_file_read(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+ssize_t nfs_file_read_iter(struct kiocb *, struct iov_iter *, loff_t);
 ssize_t nfs_file_splice_read(struct file *, loff_t *, struct pipe_inode_info *,
                             size_t, unsigned int);
 int nfs_file_mmap(struct file *, struct vm_area_struct *);
-ssize_t nfs_file_write(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+ssize_t nfs_file_write_iter(struct kiocb *, struct iov_iter *, loff_t);
 int nfs_file_release(struct inode *, struct file *);
 int nfs_lock(struct file *, int, struct file_lock *);
 int nfs_flock(struct file *, int, struct file_lock *);
index 348b535cd7866d9e18cfa6f9412b650266244f0a..b5a0afc3ee101b988fc18df2b009c20e56812534 100644 (file)
@@ -253,9 +253,8 @@ struct vfsmount *nfs_do_submount(struct dentry *dentry, struct nfs_fh *fh,
 
        dprintk("--> nfs_do_submount()\n");
 
-       dprintk("%s: submounting on %s/%s\n", __func__,
-                       dentry->d_parent->d_name.name,
-                       dentry->d_name.name);
+       dprintk("%s: submounting on %pd2\n", __func__,
+                       dentry);
        if (page == NULL)
                goto out;
        devname = nfs_devname(dentry, page, PAGE_SIZE);
index 90cb10d7b6936d1fc478f46572728e65e3874f29..01b6f6a49d162ef0ea8720786e259fd5f66aa9d3 100644 (file)
@@ -321,7 +321,7 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
        umode_t mode = sattr->ia_mode;
        int status = -ENOMEM;
 
-       dprintk("NFS call  create %s\n", dentry->d_name.name);
+       dprintk("NFS call  create %pd\n", dentry);
 
        data = nfs3_alloc_createdata();
        if (data == NULL)
@@ -548,7 +548,7 @@ nfs3_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
        if (len > NFS3_MAXPATHLEN)
                return -ENAMETOOLONG;
 
-       dprintk("NFS call  symlink %s\n", dentry->d_name.name);
+       dprintk("NFS call  symlink %pd\n", dentry);
 
        data = nfs3_alloc_createdata();
        if (data == NULL)
@@ -576,7 +576,7 @@ nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
        umode_t mode = sattr->ia_mode;
        int status = -ENOMEM;
 
-       dprintk("NFS call  mkdir %s\n", dentry->d_name.name);
+       dprintk("NFS call  mkdir %pd\n", dentry);
 
        sattr->ia_mode &= ~current_umask();
 
@@ -695,7 +695,7 @@ nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
        umode_t mode = sattr->ia_mode;
        int status = -ENOMEM;
 
-       dprintk("NFS call  mknod %s %u:%u\n", dentry->d_name.name,
+       dprintk("NFS call  mknod %pd %u:%u\n", dentry,
                        MAJOR(rdev), MINOR(rdev));
 
        sattr->ia_mode &= ~current_umask();
index a860ab566d6e98e5ce2f2f1c42686a15837254bc..511cdce6ecf2f067b4effb1026cf1d236cbb0ae9 100644 (file)
@@ -368,6 +368,7 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
        if (clp->cl_minorversion != 0)
                __set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags);
        __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
+       __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
        error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_GSS_KRB5I);
        if (error == -EINVAL)
                error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX);
index 77efaf15ec9019a2a6ed4527b214a166796497c4..c34007ae921af48bbf9c463478f5b093b4713b99 100644 (file)
@@ -31,9 +31,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
         * -EOPENSTALE.  The VFS will retry the lookup/create/open.
         */
 
-       dprintk("NFS: open file(%s/%s)\n",
-               dentry->d_parent->d_name.name,
-               dentry->d_name.name);
+       dprintk("NFS: open file(%pd2)\n", dentry);
 
        if ((openflags & O_ACCMODE) == 3)
                openflags--;
@@ -75,7 +73,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
 
        nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
        nfs_file_set_open_context(filp, ctx);
-       nfs_fscache_set_inode_cookie(inode, filp);
+       nfs_fscache_open_file(inode, filp);
        err = 0;
 
 out_put_ctx:
@@ -122,8 +120,8 @@ const struct file_operations nfs4_file_operations = {
        .llseek         = nfs_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = nfs_file_read,
-       .aio_write      = nfs_file_write,
+       .read_iter      = nfs_file_read_iter,
+       .write_iter     = nfs_file_write_iter,
        .mmap           = nfs_file_mmap,
        .open           = nfs4_file_open,
        .flush          = nfs_file_flush,
index 2288cd3c92784305c9669fe5e87682ad3293c843..049b9fb0d2c9c613e16eaa1fce5179acaa7d2ff1 100644 (file)
@@ -283,8 +283,7 @@ static struct vfsmount *nfs_follow_referral(struct dentry *dentry,
        if (locations == NULL || locations->nlocations <= 0)
                goto out;
 
-       dprintk("%s: referral at %s/%s\n", __func__,
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       dprintk("%s: referral at %pd2\n", __func__, dentry);
 
        page = (char *) __get_free_page(GFP_USER);
        if (!page)
@@ -348,8 +347,8 @@ static struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *
        mnt = ERR_PTR(-ENOENT);
 
        parent = dget_parent(dentry);
-       dprintk("%s: getting locations for %s/%s\n",
-               __func__, parent->d_name.name, dentry->d_name.name);
+       dprintk("%s: getting locations for %pd2\n",
+               __func__, dentry);
 
        err = nfs4_proc_fs_locations(client, parent->d_inode, &dentry->d_name, fs_locations, page);
        dput(parent);
index d53d6785cba27f5c6442831e51eded9d04b054f3..30ffc4a3e42b4e3d75c6dc31a87b0820a8b8d350 100644 (file)
@@ -3738,9 +3738,8 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
        };
        int                     status;
 
-       dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
-                       dentry->d_parent->d_name.name,
-                       dentry->d_name.name,
+       dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
+                       dentry,
                        (unsigned long long)cookie);
        nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
        res.pgbase = args.pgbase;
@@ -5106,6 +5105,7 @@ static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock
                        status = 0;
        }
        request->fl_ops->fl_release_private(request);
+       request->fl_ops = NULL;
 out:
        return status;
 }
index a8f57c728df561ac58e158c9fb46ca3b7c77004e..fddbba2d9eff028fa5c15e12b799aedf35f40e4d 100644 (file)
@@ -235,7 +235,7 @@ nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
        };
        int status = -ENOMEM;
 
-       dprintk("NFS call  create %s\n", dentry->d_name.name);
+       dprintk("NFS call  create %pd\n", dentry);
        data = nfs_alloc_createdata(dir, dentry, sattr);
        if (data == NULL)
                goto out;
@@ -265,7 +265,7 @@ nfs_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
        umode_t mode;
        int status = -ENOMEM;
 
-       dprintk("NFS call  mknod %s\n", dentry->d_name.name);
+       dprintk("NFS call  mknod %pd\n", dentry);
 
        mode = sattr->ia_mode;
        if (S_ISFIFO(mode)) {
@@ -423,7 +423,7 @@ nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
        };
        int status = -ENAMETOOLONG;
 
-       dprintk("NFS call  symlink %s\n", dentry->d_name.name);
+       dprintk("NFS call  symlink %pd\n", dentry);
 
        if (len > NFS2_MAXPATHLEN)
                goto out;
@@ -462,7 +462,7 @@ nfs_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
        };
        int status = -ENOMEM;
 
-       dprintk("NFS call  mkdir %s\n", dentry->d_name.name);
+       dprintk("NFS call  mkdir %pd\n", dentry);
        data = nfs_alloc_createdata(dir, dentry, sattr);
        if (data == NULL)
                goto out;
index bb939edd4c998cb98b7aeb56ae1aa308e4d9009d..8285de9eaad24eefc3aa7325438e21e98c625d8a 100644 (file)
@@ -495,9 +495,8 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
        struct rpc_task *task;
        int            error = -EIO;
 
-       dfprintk(VFS, "NFS: silly-rename(%s/%s, ct=%d)\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name,
-               d_count(dentry));
+       dfprintk(VFS, "NFS: silly-rename(%pd2, ct=%d)\n",
+               dentry, d_count(dentry));
        nfs_inc_stats(dir, NFSIOS_SILLYRENAME);
 
        /*
@@ -522,8 +521,8 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
                                SILLYNAME_FILEID_LEN, fileid,
                                SILLYNAME_COUNTER_LEN, sillycounter);
 
-               dfprintk(VFS, "NFS: trying to rename %s to %s\n",
-                               dentry->d_name.name, silly);
+               dfprintk(VFS, "NFS: trying to rename %pd to %s\n",
+                               dentry, silly);
 
                sdentry = lookup_one_len(silly, dentry->d_parent, slen);
                /*
index ac1dc331ba31212108cd5c93352ecdb620122690..c1d548211c31dfab94dec9252cc3d929cfe42daa 100644 (file)
@@ -954,10 +954,8 @@ int nfs_updatepage(struct file *file, struct page *page,
 
        nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
 
-       dprintk("NFS:       nfs_updatepage(%s/%s %d@%lld)\n",
-               file->f_path.dentry->d_parent->d_name.name,
-               file->f_path.dentry->d_name.name, count,
-               (long long)(page_file_offset(page) + offset));
+       dprintk("NFS:       nfs_updatepage(%pD2 %d@%lld)\n",
+               file, count, (long long)(page_file_offset(page) + offset));
 
        if (nfs_can_extend_write(file, page, inode)) {
                count = max(count + offset, nfs_page_length(page));
index e0a65a9e37e97ac1a8702a48487349d599be4aab..9c271f42604a3644e9e826af6f252348e1429249 100644 (file)
@@ -385,8 +385,8 @@ purge_old(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
 
        status = vfs_rmdir(parent->d_inode, child);
        if (status)
-               printk("failed to remove client recovery directory %s\n",
-                               child->d_name.name);
+               printk("failed to remove client recovery directory %pd\n",
+                               child);
        /* Keep trying, success or failure: */
        return 0;
 }
@@ -410,15 +410,15 @@ out:
        nfs4_release_reclaim(nn);
        if (status)
                printk("nfsd4: failed to purge old clients from recovery"
-                       " directory %s\n", nn->rec_file->f_path.dentry->d_name.name);
+                       " directory %pD\n", nn->rec_file);
 }
 
 static int
 load_recdir(struct dentry *parent, struct dentry *child, struct nfsd_net *nn)
 {
        if (child->d_name.len != HEXDIR_LEN - 1) {
-               printk("nfsd4: illegal name %s in recovery directory\n",
-                               child->d_name.name);
+               printk("nfsd4: illegal name %pd in recovery directory\n",
+                               child);
                /* Keep trying; maybe the others are OK: */
                return 0;
        }
@@ -437,7 +437,7 @@ nfsd4_recdir_load(struct net *net) {
        status = nfsd4_list_rec_dir(load_recdir, nn);
        if (status)
                printk("nfsd4: failed loading clients from recovery"
-                       " directory %s\n", nn->rec_file->f_path.dentry->d_name.name);
+                       " directory %pD\n", nn->rec_file);
        return status;
 }
 
index 0874998a49cd40081dc34483bb8f400a64ad2528..a601fd49f997bc441397cb432f2fe0d41dacf8ec 100644 (file)
@@ -3843,9 +3843,8 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        struct nfs4_ol_stateid *stp;
        struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
 
-       dprintk("NFSD: nfsd4_open_confirm on file %.*s\n",
-                       (int)cstate->current_fh.fh_dentry->d_name.len,
-                       cstate->current_fh.fh_dentry->d_name.name);
+       dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
+                       cstate->current_fh.fh_dentry);
 
        status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
        if (status)
@@ -3922,9 +3921,8 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
        struct nfs4_ol_stateid *stp;
        struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
 
-       dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n", 
-                       (int)cstate->current_fh.fh_dentry->d_name.len,
-                       cstate->current_fh.fh_dentry->d_name.name);
+       dprintk("NFSD: nfsd4_open_downgrade on file %pd\n", 
+                       cstate->current_fh.fh_dentry);
 
        /* We don't yet support WANT bits: */
        if (od->od_deleg_want)
@@ -3980,9 +3978,8 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        struct net *net = SVC_NET(rqstp);
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-       dprintk("NFSD: nfsd4_close on file %.*s\n", 
-                       (int)cstate->current_fh.fh_dentry->d_name.len,
-                       cstate->current_fh.fh_dentry->d_name.name);
+       dprintk("NFSD: nfsd4_close on file %pd\n", 
+                       cstate->current_fh.fh_dentry);
 
        nfs4_lock_state();
        status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
index 814afaa4458a9285fc3168c47d1df6a1f8164c12..3d0e15ae6f726311ef82b337e66bea91a26792e4 100644 (file)
@@ -47,7 +47,7 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry)
                tdentry = parent;
        }
        if (tdentry != exp->ex_path.dentry)
-               dprintk("nfsd_acceptable failed at %p %s\n", tdentry, tdentry->d_name.name);
+               dprintk("nfsd_acceptable failed at %p %pd\n", tdentry, tdentry);
        rv = (tdentry == exp->ex_path.dentry);
        dput(tdentry);
        return rv;
@@ -253,8 +253,8 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
 
        if (S_ISDIR(dentry->d_inode->i_mode) &&
                        (dentry->d_flags & DCACHE_DISCONNECTED)) {
-               printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %s/%s\n",
-                               dentry->d_parent->d_name.name, dentry->d_name.name);
+               printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %pd2\n",
+                               dentry);
        }
 
        fhp->fh_dentry = dentry;
@@ -361,10 +361,9 @@ skip_pseudoflavor_check:
        error = nfsd_permission(rqstp, exp, dentry, access);
 
        if (error) {
-               dprintk("fh_verify: %s/%s permission failure, "
+               dprintk("fh_verify: %pd2 permission failure, "
                        "acc=%x, error=%d\n",
-                       dentry->d_parent->d_name.name,
-                       dentry->d_name.name,
+                       dentry,
                        access, ntohl(error));
        }
 out:
@@ -514,14 +513,13 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
         */
 
        struct inode * inode = dentry->d_inode;
-       struct dentry *parent = dentry->d_parent;
        __u32 *datap;
        dev_t ex_dev = exp_sb(exp)->s_dev;
 
-       dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %s/%s, ino=%ld)\n",
+       dprintk("nfsd: fh_compose(exp %02x:%02x/%ld %pd2, ino=%ld)\n",
                MAJOR(ex_dev), MINOR(ex_dev),
                (long) exp->ex_path.dentry->d_inode->i_ino,
-               parent->d_name.name, dentry->d_name.name,
+               dentry,
                (inode ? inode->i_ino : 0));
 
        /* Choose filehandle version and fsid type based on
@@ -534,13 +532,13 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
                fh_put(ref_fh);
 
        if (fhp->fh_locked || fhp->fh_dentry) {
-               printk(KERN_ERR "fh_compose: fh %s/%s not initialized!\n",
-                      parent->d_name.name, dentry->d_name.name);
+               printk(KERN_ERR "fh_compose: fh %pd2 not initialized!\n",
+                      dentry);
        }
        if (fhp->fh_maxsize < NFS_FHSIZE)
-               printk(KERN_ERR "fh_compose: called with maxsize %d! %s/%s\n",
+               printk(KERN_ERR "fh_compose: called with maxsize %d! %pd2\n",
                       fhp->fh_maxsize,
-                      parent->d_name.name, dentry->d_name.name);
+                      dentry);
 
        fhp->fh_dentry = dget(dentry); /* our internal copy */
        fhp->fh_export = exp;
@@ -613,8 +611,8 @@ out_bad:
        printk(KERN_ERR "fh_update: fh not verified!\n");
        goto out;
 out_negative:
-       printk(KERN_ERR "fh_update: %s/%s still negative!\n",
-               dentry->d_parent->d_name.name, dentry->d_name.name);
+       printk(KERN_ERR "fh_update: %pd2 still negative!\n",
+               dentry);
        goto out;
 }
 
index e5e6707ba687715809ef510b4167f011fb40b172..4775bc4896c8b363123f852d1fd517f59fe7fd6e 100644 (file)
@@ -173,8 +173,8 @@ fh_lock_nested(struct svc_fh *fhp, unsigned int subclass)
        BUG_ON(!dentry);
 
        if (fhp->fh_locked) {
-               printk(KERN_WARNING "fh_lock: %s/%s already locked!\n",
-                       dentry->d_parent->d_name.name, dentry->d_name.name);
+               printk(KERN_WARNING "fh_lock: %pd2 already locked!\n",
+                       dentry);
                return;
        }
 
index c827acb0e943bdab08fd077e56228b170a6a9549..13886f7f40d5e8a868182936d3cda837492b7b61 100644 (file)
@@ -1317,9 +1317,8 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
                if (!fhp->fh_locked) {
                        /* not actually possible */
                        printk(KERN_ERR
-                               "nfsd_create: parent %s/%s not locked!\n",
-                               dentry->d_parent->d_name.name,
-                               dentry->d_name.name);
+                               "nfsd_create: parent %pd2 not locked!\n",
+                               dentry);
                        err = nfserr_io;
                        goto out;
                }
@@ -1329,8 +1328,8 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
         */
        err = nfserr_exist;
        if (dchild->d_inode) {
-               dprintk("nfsd_create: dentry %s/%s not negative!\n",
-                       dentry->d_name.name, dchild->d_name.name);
+               dprintk("nfsd_create: dentry %pd/%pd not negative!\n",
+                       dentry, dchild);
                goto out; 
        }
 
index 08fdb77852acd4f2ca692c5c8eb010e55b00aaf5..7aeb8ee013050453d136fc86e80022616118a07f 100644 (file)
@@ -153,8 +153,8 @@ const struct file_operations nilfs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = generic_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .unlocked_ioctl = nilfs_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = nilfs_compat_ioctl,
index 7e350c562e0ea1dd491a8ee6c72371b770666e1f..4a99a24b54a276108d584b4f540936e8e16c4699 100644 (file)
@@ -298,8 +298,8 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping,
 }
 
 static ssize_t
-nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-               loff_t offset, unsigned long nr_segs)
+nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
+               loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -310,7 +310,7 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
                return 0;
 
        /* Needs synchronization with the cleaner */
-       size = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
+       size = blockdev_direct_IO(rw, iocb, inode, iter, offset,
                                  nilfs_get_block);
 
        /*
@@ -319,7 +319,7 @@ nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
         */
        if (unlikely((rw & WRITE) && size < 0)) {
                loff_t isize = i_size_read(inode);
-               loff_t end = offset + iov_length(iov, nr_segs);
+               loff_t end = offset + iov_iter_count(iter);
 
                if (end > isize)
                        nilfs_write_failed(mapping, end);
index f37d3c0e20535eacce542c7184f48d8b509884a2..2921dcf300d36c62fc2403fcfbd066c7c0ed1df6 100644 (file)
@@ -616,9 +616,8 @@ static int ocfs2_releasepage(struct page *page, gfp_t wait)
 
 static ssize_t ocfs2_direct_IO(int rw,
                               struct kiocb *iocb,
-                              const struct iovec *iov,
-                              loff_t offset,
-                              unsigned long nr_segs)
+                              struct iov_iter *iter,
+                              loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file)->i_mapping->host;
@@ -635,8 +634,7 @@ static ssize_t ocfs2_direct_IO(int rw,
                return 0;
 
        return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
-                                   iov, offset, nr_segs,
-                                   ocfs2_direct_IO_get_blocks,
+                                   iter, offset, ocfs2_direct_IO_get_blocks,
                                    ocfs2_dio_end_io, NULL, 0);
 }
 
index f671e49beb348b5c33dfba3e3c4b5f322c73e43b..573f41d1e45935ff67ea600d6c54edb3df0c0829 100644 (file)
@@ -74,7 +74,7 @@ static inline void ocfs2_iocb_set_rw_locked(struct kiocb *iocb, int level)
 /*
  * Using a named enum representing lock types in terms of #N bit stored in
  * iocb->private, which is going to be used for communication between
- * ocfs2_dio_end_io() and ocfs2_file_aio_write/read().
+ * ocfs2_dio_end_io() and ocfs2_file_write/read_iter().
  */
 enum ocfs2_iocb_lock_bits {
        OCFS2_IOCB_RW_LOCK = 0,
index d71903c6068b94f37836854762691a7a1c9d9f12..1d85492684ac02932617d107d969c00c9092bb64 100644 (file)
@@ -2217,15 +2217,13 @@ out:
        return ret;
 }
 
-static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
-                                   const struct iovec *iov,
-                                   unsigned long nr_segs,
-                                   loff_t pos)
+static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
+                                    struct iov_iter *iter,
+                                    loff_t pos)
 {
        int ret, direct_io, appending, rw_level, have_alloc_sem  = 0;
        int can_do_direct, has_refcount = 0;
        ssize_t written = 0;
-       size_t ocount;          /* original count */
        size_t count;           /* after file limit checks */
        loff_t old_size, *ppos = &iocb->ki_pos;
        u32 old_clusters;
@@ -2236,11 +2234,11 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
                               OCFS2_MOUNT_COHERENCY_BUFFERED);
        int unaligned_dio = 0;
 
-       trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
+       trace_ocfs2_file_write_iter(inode, file, file->f_path.dentry,
                (unsigned long long)OCFS2_I(inode)->ip_blkno,
                file->f_path.dentry->d_name.len,
                file->f_path.dentry->d_name.name,
-               (unsigned int)nr_segs);
+               (unsigned long)pos);
 
        if (iocb->ki_nbytes == 0)
                return 0;
@@ -2340,28 +2338,24 @@ relock:
        /* communicate with ocfs2_dio_end_io */
        ocfs2_iocb_set_rw_locked(iocb, rw_level);
 
-       ret = generic_segment_checks(iov, &nr_segs, &ocount,
-                                    VERIFY_READ);
-       if (ret)
-               goto out_dio;
 
-       count = ocount;
+       count = iov_iter_count(iter);
        ret = generic_write_checks(file, ppos, &count,
                                   S_ISBLK(inode->i_mode));
        if (ret)
                goto out_dio;
 
        if (direct_io) {
-               written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
-                                                   ppos, count, ocount);
+               written = generic_file_direct_write_iter(iocb, iter, *ppos,
+                                                   ppos, count);
                if (written < 0) {
                        ret = written;
                        goto out_dio;
                }
        } else {
                current->backing_dev_info = file->f_mapping->backing_dev_info;
-               written = generic_file_buffered_write(iocb, iov, nr_segs, *ppos,
-                                                     ppos, count, 0);
+               written = generic_file_buffered_write_iter(iocb, iter, *ppos,
+                                                          ppos, count, 0);
                current->backing_dev_info = NULL;
        }
 
@@ -2517,7 +2511,7 @@ static ssize_t ocfs2_file_splice_read(struct file *in,
                        in->f_path.dentry->d_name.name, len);
 
        /*
-        * See the comment in ocfs2_file_aio_read()
+        * See the comment in ocfs2_file_read_iter()
         */
        ret = ocfs2_inode_lock_atime(inode, in->f_path.mnt, &lock_level);
        if (ret < 0) {
@@ -2532,19 +2526,18 @@ bail:
        return ret;
 }
 
-static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
-                                  const struct iovec *iov,
-                                  unsigned long nr_segs,
-                                  loff_t pos)
+static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
+                                   struct iov_iter *iter,
+                                   loff_t pos)
 {
        int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
        struct file *filp = iocb->ki_filp;
        struct inode *inode = file_inode(filp);
 
-       trace_ocfs2_file_aio_read(inode, filp, filp->f_path.dentry,
+       trace_ocfs2_file_read_iter(inode, filp, filp->f_path.dentry,
                        (unsigned long long)OCFS2_I(inode)->ip_blkno,
                        filp->f_path.dentry->d_name.len,
-                       filp->f_path.dentry->d_name.name, nr_segs);
+                       filp->f_path.dentry->d_name.name, pos);
 
 
        if (!inode) {
@@ -2580,7 +2573,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
         *
         * Take and drop the meta data lock to update inode fields
         * like i_size. This allows the checks down below
-        * generic_file_aio_read() a chance of actually working.
+        * generic_file_read_iter() a chance of actually working.
         */
        ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level);
        if (ret < 0) {
@@ -2589,13 +2582,13 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
        }
        ocfs2_inode_unlock(inode, lock_level);
 
-       ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
-       trace_generic_file_aio_read_ret(ret);
+       ret = generic_file_read_iter(iocb, iter, iocb->ki_pos);
+       trace_generic_file_read_iter_ret(ret);
 
        /* buffered aio wouldn't have proper lock coverage today */
        BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
 
-       /* see ocfs2_file_aio_write */
+       /* see ocfs2_file_write_iter */
        if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
                rw_level = -1;
                have_alloc_sem = 0;
@@ -2683,8 +2676,8 @@ const struct file_operations ocfs2_fops = {
        .fsync          = ocfs2_sync_file,
        .release        = ocfs2_file_release,
        .open           = ocfs2_file_open,
-       .aio_read       = ocfs2_file_aio_read,
-       .aio_write      = ocfs2_file_aio_write,
+       .read_iter      = ocfs2_file_read_iter,
+       .write_iter     = ocfs2_file_write_iter,
        .unlocked_ioctl = ocfs2_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ocfs2_compat_ioctl,
@@ -2731,8 +2724,8 @@ const struct file_operations ocfs2_fops_no_plocks = {
        .fsync          = ocfs2_sync_file,
        .release        = ocfs2_file_release,
        .open           = ocfs2_file_open,
-       .aio_read       = ocfs2_file_aio_read,
-       .aio_write      = ocfs2_file_aio_write,
+       .read_iter      = ocfs2_file_read_iter,
+       .write_iter     = ocfs2_file_write_iter,
        .unlocked_ioctl = ocfs2_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ocfs2_compat_ioctl,
index 1b60c62aa9d6fa62940c0a9b8f9889943604170e..67f08ba77260674c690e1b9121262a72a3327f6a 100644 (file)
@@ -1310,13 +1310,13 @@ DEFINE_OCFS2_FILE_OPS(ocfs2_file_release);
 
 DEFINE_OCFS2_FILE_OPS(ocfs2_sync_file);
 
-DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_write);
+DEFINE_OCFS2_FILE_OPS(ocfs2_file_write_iter);
 
 DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_write);
 
 DEFINE_OCFS2_FILE_OPS(ocfs2_file_splice_read);
 
-DEFINE_OCFS2_FILE_OPS(ocfs2_file_aio_read);
+DEFINE_OCFS2_FILE_OPS(ocfs2_file_read_iter);
 
 DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_truncate_file);
 
@@ -1474,7 +1474,7 @@ TRACE_EVENT(ocfs2_prepare_inode_for_write,
                  __entry->direct_io, __entry->has_refcount)
 );
 
-DEFINE_OCFS2_INT_EVENT(generic_file_aio_read_ret);
+DEFINE_OCFS2_INT_EVENT(generic_file_read_iter_ret);
 
 /* End of trace events for fs/ocfs2/file.c. */
 
index 54d57d6ba68dd5b91df6cbc9269e1ccf3c05a950..0fe505b2cded1e899e59a33cb20001127cafb36e 100644 (file)
@@ -339,8 +339,8 @@ const struct file_operations omfs_file_operations = {
        .llseek = generic_file_llseek,
        .read = do_sync_read,
        .write = do_sync_write,
-       .aio_read = generic_file_aio_read,
-       .aio_write = generic_file_aio_write,
+       .read_iter = generic_file_read_iter,
+       .write_iter = generic_file_write_iter,
        .mmap = generic_file_mmap,
        .fsync = generic_file_fsync,
        .splice_read = generic_file_splice_read,
index 9f8ef9b7674db1ca1004b682b40a6e5500dcfc24..8eaa1ba793fc188879d405e768a81aedbaa4bf76 100644 (file)
@@ -288,10 +288,14 @@ static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
 static unsigned long proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
 {
        struct proc_dir_entry *pde = PDE(file_inode(file));
-       int rv = -EIO;
-       unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+       unsigned long rv = -EIO;
+       unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) = NULL;
        if (use_pde(pde)) {
-               get_unmapped_area = pde->proc_fops->get_unmapped_area;
+#ifdef CONFIG_MMU
+               get_unmapped_area = current->mm->get_unmapped_area;
+#endif
+               if (pde->proc_fops->get_unmapped_area)
+                       get_unmapped_area = pde->proc_fops->get_unmapped_area;
                if (get_unmapped_area)
                        rv = get_unmapped_area(file, orig_addr, len, pgoff, flags);
                unuse_pde(pde);
index 6b6a993b5c25a0ccb277bcf53b0c19489582f316..ffeb202ec942d3f3f83594d517e329bafa89ce98 100644 (file)
@@ -36,18 +36,10 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
        return NULL;
 }
 
-static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
-                               void *cookie)
-{
-       char *s = nd_get_link(nd);
-       if (!IS_ERR(s))
-               kfree(s);
-}
-
 static const struct inode_operations proc_self_inode_operations = {
        .readlink       = proc_self_readlink,
        .follow_link    = proc_self_follow_link,
-       .put_link       = proc_self_put_link,
+       .put_link       = kfree_put_link,
 };
 
 static unsigned self_inum;
index 7366e9d63cee7d8658e9fed949a2fe7b22519c6f..390bdab01c3c782cc14b55c6d1ef35ebddcc4197 100644 (file)
@@ -941,6 +941,8 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
                frame = pte_pfn(pte);
                flags = PM_PRESENT;
                page = vm_normal_page(vma, addr, pte);
+               if (pte_soft_dirty(pte))
+                       flags2 |= __PM_SOFT_DIRTY;
        } else if (is_swap_pte(pte)) {
                swp_entry_t entry;
                if (pte_swp_soft_dirty(pte))
@@ -960,7 +962,7 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
 
        if (page && !PageAnon(page))
                flags |= PM_FILE;
-       if ((vma->vm_flags & VM_SOFTDIRTY) || pte_soft_dirty(pte))
+       if ((vma->vm_flags & VM_SOFTDIRTY))
                flags2 |= __PM_SOFT_DIRTY;
 
        *pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags);
index 4884ac5ae9bea224517e384588847a44f4f8e462..c4d8572a37dfcf89be3e0f1d981a2186b96a5fc4 100644 (file)
@@ -39,9 +39,9 @@ const struct address_space_operations ramfs_aops = {
 
 const struct file_operations ramfs_file_operations = {
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .fsync          = noop_fsync,
        .splice_read    = generic_file_splice_read,
index 8d5b438cc18859868fc0cf2206fa51d147ba9eb4..f2487c3cc3f33bd004075b633a4204d11a839a5b 100644 (file)
@@ -39,9 +39,9 @@ const struct file_operations ramfs_file_operations = {
        .mmap                   = ramfs_nommu_mmap,
        .get_unmapped_area      = ramfs_nommu_get_unmapped_area,
        .read                   = do_sync_read,
-       .aio_read               = generic_file_aio_read,
+       .read_iter              = generic_file_read_iter,
        .write                  = do_sync_write,
-       .aio_write              = generic_file_aio_write,
+       .write_iter             = generic_file_write_iter,
        .fsync                  = noop_fsync,
        .splice_read            = generic_file_splice_read,
        .splice_write           = generic_file_splice_write,
index e3cd280b158c1132a98c1b53ab2ab8bcdb14de02..296b5711a78b616523215d6e0b8e621433a9b32e 100644 (file)
@@ -29,7 +29,7 @@ typedef ssize_t (*iov_fn_t)(struct kiocb *, const struct iovec *,
 const struct file_operations generic_ro_fops = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .mmap           = generic_file_readonly_mmap,
        .splice_read    = generic_file_splice_read,
 };
@@ -359,6 +359,29 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t
        return count > MAX_RW_COUNT ? MAX_RW_COUNT : count;
 }
 
+ssize_t do_aio_read(struct kiocb *kiocb, const struct iovec *iov,
+                   unsigned long nr_segs, loff_t pos)
+{
+       struct file *file = kiocb->ki_filp;
+
+       if (file->f_op->read_iter) {
+               size_t count;
+               struct iov_iter iter;
+               int ret;
+
+               count = 0;
+               ret = generic_segment_checks(iov, &nr_segs, &count,
+                                            VERIFY_WRITE);
+               if (ret)
+                       return ret;
+
+               iov_iter_init(&iter, iov, nr_segs, count, 0);
+               return file->f_op->read_iter(kiocb, &iter, pos);
+       }
+
+       return file->f_op->aio_read(kiocb, iov, nr_segs, pos);
+}
+
 ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
 {
        struct iovec iov = { .iov_base = buf, .iov_len = len };
@@ -369,7 +392,7 @@ ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *pp
        kiocb.ki_pos = *ppos;
        kiocb.ki_nbytes = len;
 
-       ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
+       ret = do_aio_read(&kiocb, &iov, 1, kiocb.ki_pos);
        if (-EIOCBQUEUED == ret)
                ret = wait_on_sync_kiocb(&kiocb);
        *ppos = kiocb.ki_pos;
@@ -384,7 +407,7 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
 
        if (!(file->f_mode & FMODE_READ))
                return -EBADF;
-       if (!file->f_op || (!file->f_op->read && !file->f_op->aio_read))
+       if (!file_readable(file))
                return -EINVAL;
        if (unlikely(!access_ok(VERIFY_WRITE, buf, count)))
                return -EFAULT;
@@ -408,6 +431,29 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
 
 EXPORT_SYMBOL(vfs_read);
 
+ssize_t do_aio_write(struct kiocb *kiocb, const struct iovec *iov,
+                    unsigned long nr_segs, loff_t pos)
+{
+       struct file *file = kiocb->ki_filp;
+
+       if (file->f_op->write_iter) {
+               size_t count;
+               struct iov_iter iter;
+               int ret;
+
+               count = 0;
+               ret = generic_segment_checks(iov, &nr_segs, &count,
+                                            VERIFY_READ);
+               if (ret)
+                       return ret;
+
+               iov_iter_init(&iter, iov, nr_segs, count, 0);
+               return file->f_op->write_iter(kiocb, &iter, pos);
+       }
+
+       return file->f_op->aio_write(kiocb, iov, nr_segs, pos);
+}
+
 ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
 {
        struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
@@ -418,7 +464,7 @@ ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, lof
        kiocb.ki_pos = *ppos;
        kiocb.ki_nbytes = len;
 
-       ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
+       ret = do_aio_write(&kiocb, &iov, 1, kiocb.ki_pos);
        if (-EIOCBQUEUED == ret)
                ret = wait_on_sync_kiocb(&kiocb);
        *ppos = kiocb.ki_pos;
@@ -433,7 +479,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
        const char __user *p;
        ssize_t ret;
 
-       if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
+       if (!file_writable(file))
                return -EINVAL;
 
        old_fs = get_fs();
@@ -460,7 +506,7 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
 
        if (!(file->f_mode & FMODE_WRITE))
                return -EBADF;
-       if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write))
+       if (!file_writable(file))
                return -EINVAL;
        if (unlikely(!access_ok(VERIFY_READ, buf, count)))
                return -EFAULT;
@@ -745,10 +791,12 @@ static ssize_t do_readv_writev(int type, struct file *file,
        fnv = NULL;
        if (type == READ) {
                fn = file->f_op->read;
-               fnv = file->f_op->aio_read;
+               if (file->f_op->aio_read || file->f_op->read_iter)
+                       fnv = do_aio_read;
        } else {
                fn = (io_fn_t)file->f_op->write;
-               fnv = file->f_op->aio_write;
+               if (file->f_op->aio_write || file->f_op->write_iter)
+                       fnv = do_aio_write;
                file_start_write(file);
        }
 
@@ -778,7 +826,7 @@ ssize_t vfs_readv(struct file *file, const struct iovec __user *vec,
 {
        if (!(file->f_mode & FMODE_READ))
                return -EBADF;
-       if (!file->f_op || (!file->f_op->aio_read && !file->f_op->read))
+       if (!file_readable(file))
                return -EINVAL;
 
        return do_readv_writev(READ, file, vec, vlen, pos);
@@ -791,7 +839,7 @@ ssize_t vfs_writev(struct file *file, const struct iovec __user *vec,
 {
        if (!(file->f_mode & FMODE_WRITE))
                return -EBADF;
-       if (!file->f_op || (!file->f_op->aio_write && !file->f_op->write))
+       if (!file_writable(file))
                return -EINVAL;
 
        return do_readv_writev(WRITE, file, vec, vlen, pos);
@@ -927,10 +975,12 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
        fnv = NULL;
        if (type == READ) {
                fn = file->f_op->read;
-               fnv = file->f_op->aio_read;
+               if (file->f_op->aio_read || file->f_op->read_iter)
+                       fnv = do_aio_read;
        } else {
                fn = (io_fn_t)file->f_op->write;
-               fnv = file->f_op->aio_write;
+               if (file->f_op->aio_write || file->f_op->write_iter)
+                       fnv = do_aio_write;
                file_start_write(file);
        }
 
@@ -965,7 +1015,7 @@ static size_t compat_readv(struct file *file,
                goto out;
 
        ret = -EINVAL;
-       if (!file->f_op || (!file->f_op->aio_read && !file->f_op->read))
+       if (!file_readable(file))
                goto out;
 
        ret = compat_do_readv_writev(READ, file, vec, vlen, pos);
@@ -1032,7 +1082,7 @@ static size_t compat_writev(struct file *file,
                goto out;
 
        ret = -EINVAL;
-       if (!file->f_op || (!file->f_op->aio_write && !file->f_op->write))
+       if (!file_writable(file))
                goto out;
 
        ret = compat_do_readv_writev(WRITE, file, vec, vlen, pos);
index dcaafcfc23b007c845f490a4f293cc485c324b51..f98feb229ec4646613463a305f0a3db275ad1e8d 100644 (file)
@@ -245,8 +245,8 @@ const struct file_operations reiserfs_file_operations = {
        .open = reiserfs_file_open,
        .release = reiserfs_file_release,
        .fsync = reiserfs_sync_file,
-       .aio_read = generic_file_aio_read,
-       .aio_write = generic_file_aio_write,
+       .read_iter = generic_file_read_iter,
+       .write_iter = generic_file_write_iter,
        .splice_read = generic_file_splice_read,
        .splice_write = generic_file_splice_write,
        .llseek = generic_file_llseek,
index ad62bdbb451ee77f1cfcee9be8cae70537168667..6d652af02c5b2c7f8bfc449451c909e0ae1ec6cf 100644 (file)
@@ -3083,14 +3083,13 @@ static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
 /* We thank Mingming Cao for helping us understand in great detail what
    to do in this section of the code. */
 static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
-                                 const struct iovec *iov, loff_t offset,
-                                 unsigned long nr_segs)
+                                 struct iov_iter *iter, loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
                                  reiserfs_get_blocks_direct_io);
 
        /*
@@ -3099,7 +3098,7 @@ static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
         */
        if (unlikely((rw & WRITE) && ret < 0)) {
                loff_t isize = i_size_read(inode);
-               loff_t end = offset + iov_length(iov, nr_segs);
+               loff_t end = offset + iov_iter_count(iter);
 
                if ((end > isize) && inode_newsize_ok(inode, isize) == 0) {
                        truncate_setsize(inode, isize);
index f373bde8f545da481ba0a7caa873271dd599b30d..f8a9e2bf8d8bf3b04b9a68133cbbfb62e4d2f49e 100644 (file)
@@ -73,7 +73,7 @@ static int romfs_mmap(struct file *file, struct vm_area_struct *vma)
 const struct file_operations romfs_ro_fops = {
        .llseek                 = generic_file_llseek,
        .read                   = do_sync_read,
-       .aio_read               = generic_file_aio_read,
+       .read_iter              = generic_file_read_iter,
        .splice_read            = generic_file_splice_read,
        .mmap                   = romfs_mmap,
        .get_unmapped_area      = romfs_get_unmapped_area,
index c219e733f55330741962173e994ac8d4e894a70f..083dc0ac91408870254cac60ed4b06580deba610 100644 (file)
@@ -94,7 +94,7 @@ retry:
 
 int fd_statfs(int fd, struct kstatfs *st)
 {
-       struct fd f = fdget(fd);
+       struct fd f = fdget_raw(fd);
        int error = -EBADF;
        if (f.file) {
                error = vfs_statfs(&f.file->f_path, st);
index 9d4dc6831792a23270148c2d26b0423f656b505c..ff4b363ba5c95c78bbe92a4203dbd86e338c28de 100644 (file)
@@ -22,9 +22,9 @@
 const struct file_operations sysv_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .fsync          = generic_file_fsync,
        .splice_read    = generic_file_splice_read,
index 6b4947f75af7a00599c129b2d4e5a97a27222f4a..ea41649e4ca55e299853bb2dff7592c5ad171f2c 100644 (file)
@@ -192,8 +192,7 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
        struct ubifs_dent_node *dent;
        struct ubifs_info *c = dir->i_sb->s_fs_info;
 
-       dbg_gen("'%.*s' in dir ino %lu",
-               dentry->d_name.len, dentry->d_name.name, dir->i_ino);
+       dbg_gen("'%pd' in dir ino %lu", dentry, dir->i_ino);
 
        if (dentry->d_name.len > UBIFS_MAX_NLEN)
                return ERR_PTR(-ENAMETOOLONG);
@@ -225,8 +224,8 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
                 * checking.
                 */
                err = PTR_ERR(inode);
-               ubifs_err("dead directory entry '%.*s', error %d",
-                         dentry->d_name.len, dentry->d_name.name, err);
+               ubifs_err("dead directory entry '%pd', error %d",
+                         dentry, err);
                ubifs_ro_mode(c, err);
                goto out;
        }
@@ -260,8 +259,8 @@ static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
         * parent directory inode.
         */
 
-       dbg_gen("dent '%.*s', mode %#hx in dir ino %lu",
-               dentry->d_name.len, dentry->d_name.name, mode, dir->i_ino);
+       dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
+               dentry, mode, dir->i_ino);
 
        err = ubifs_budget_space(c, &req);
        if (err)
@@ -509,8 +508,8 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
         * changing the parent inode.
         */
 
-       dbg_gen("dent '%.*s' to ino %lu (nlink %d) in dir ino %lu",
-               dentry->d_name.len, dentry->d_name.name, inode->i_ino,
+       dbg_gen("dent '%pd' to ino %lu (nlink %d) in dir ino %lu",
+               dentry, inode->i_ino,
                inode->i_nlink, dir->i_ino);
        ubifs_assert(mutex_is_locked(&dir->i_mutex));
        ubifs_assert(mutex_is_locked(&inode->i_mutex));
@@ -566,8 +565,8 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
         * deletions.
         */
 
-       dbg_gen("dent '%.*s' from ino %lu (nlink %d) in dir ino %lu",
-               dentry->d_name.len, dentry->d_name.name, inode->i_ino,
+       dbg_gen("dent '%pd' from ino %lu (nlink %d) in dir ino %lu",
+               dentry, inode->i_ino,
                inode->i_nlink, dir->i_ino);
        ubifs_assert(mutex_is_locked(&dir->i_mutex));
        ubifs_assert(mutex_is_locked(&inode->i_mutex));
@@ -656,8 +655,8 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
         * because we have extra space reserved for deletions.
         */
 
-       dbg_gen("directory '%.*s', ino %lu in dir ino %lu", dentry->d_name.len,
-               dentry->d_name.name, inode->i_ino, dir->i_ino);
+       dbg_gen("directory '%pd', ino %lu in dir ino %lu", dentry,
+               inode->i_ino, dir->i_ino);
        ubifs_assert(mutex_is_locked(&dir->i_mutex));
        ubifs_assert(mutex_is_locked(&inode->i_mutex));
        err = check_dir_empty(c, dentry->d_inode);
@@ -716,8 +715,8 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
         * directory inode.
         */
 
-       dbg_gen("dent '%.*s', mode %#hx in dir ino %lu",
-               dentry->d_name.len, dentry->d_name.name, mode, dir->i_ino);
+       dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
+               dentry, mode, dir->i_ino);
 
        err = ubifs_budget_space(c, &req);
        if (err)
@@ -778,8 +777,7 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
         * directory inode.
         */
 
-       dbg_gen("dent '%.*s' in dir ino %lu",
-               dentry->d_name.len, dentry->d_name.name, dir->i_ino);
+       dbg_gen("dent '%pd' in dir ino %lu", dentry, dir->i_ino);
 
        if (!new_valid_dev(rdev))
                return -EINVAL;
@@ -853,8 +851,8 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
         * directory inode.
         */
 
-       dbg_gen("dent '%.*s', target '%s' in dir ino %lu", dentry->d_name.len,
-               dentry->d_name.name, symname, dir->i_ino);
+       dbg_gen("dent '%pd', target '%s' in dir ino %lu", dentry,
+               symname, dir->i_ino);
 
        if (len > UBIFS_MAX_INO_DATA)
                return -ENAMETOOLONG;
@@ -979,10 +977,9 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
         * separately.
         */
 
-       dbg_gen("dent '%.*s' ino %lu in dir ino %lu to dent '%.*s' in dir ino %lu",
-               old_dentry->d_name.len, old_dentry->d_name.name,
-               old_inode->i_ino, old_dir->i_ino, new_dentry->d_name.len,
-               new_dentry->d_name.name, new_dir->i_ino);
+       dbg_gen("dent '%pd' ino %lu in dir ino %lu to dent '%pd' in dir ino %lu",
+               old_dentry, old_inode->i_ino, old_dir->i_ino,
+               new_dentry, new_dir->i_ino);
        ubifs_assert(mutex_is_locked(&old_dir->i_mutex));
        ubifs_assert(mutex_is_locked(&new_dir->i_mutex));
        if (unlink)
index 123c79b7261ef8092e57477bd1141d365a0a2a3f..22924e048ac04aab77dba789e096a093f118e50f 100644 (file)
@@ -44,7 +44,7 @@
  * 'ubifs_writepage()' we are only guaranteed that the page is locked.
  *
  * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the
- * read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
+ * read-ahead path does not lock it ("sys_read -> generic_file_read_iter ->
  * ondemand_readahead -> readpage"). In case of readahead, @I_SYNC flag is not
  * set as well. However, UBIFS disables readahead.
  */
@@ -1396,8 +1396,8 @@ static int update_mctime(struct ubifs_info *c, struct inode *inode)
        return 0;
 }
 
-static ssize_t ubifs_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                              unsigned long nr_segs, loff_t pos)
+static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                               loff_t pos)
 {
        int err;
        struct inode *inode = iocb->ki_filp->f_mapping->host;
@@ -1407,7 +1407,7 @@ static ssize_t ubifs_aio_write(struct kiocb *iocb, const struct iovec *iov,
        if (err)
                return err;
 
-       return generic_file_aio_write(iocb, iov, nr_segs, pos);
+       return generic_file_write_iter(iocb, iter, pos);
 }
 
 static int ubifs_set_page_dirty(struct page *page)
@@ -1583,8 +1583,8 @@ const struct file_operations ubifs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = generic_file_aio_read,
-       .aio_write      = ubifs_aio_write,
+       .read_iter       = generic_file_read_iter,
+       .write_iter      = ubifs_write_iter,
        .mmap           = ubifs_file_mmap,
        .fsync          = ubifs_fsync,
        .unlocked_ioctl = ubifs_ioctl,
index 76ca53cd3eeec52c53ab064221a6519d8c70c80c..9718da86ad01a804db1e3c7e2a2ca6923f6299a3 100644 (file)
@@ -668,8 +668,7 @@ int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
        ubifs_assert(!wbuf->used);
 
        for (i = 0; ; i++) {
-               int space_before = c->leb_size - wbuf->offs - wbuf->used;
-               int space_after;
+               int space_before, space_after;
 
                cond_resched();
 
index afaad07f3b29f4465ba2b3e0702a947f7b60c071..0e045e75abd8ab7ac1a17cd502c9ff0c22d691e3 100644 (file)
@@ -933,10 +933,8 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
        int move = (old_dir != new_dir);
        struct ubifs_inode *uninitialized_var(new_ui);
 
-       dbg_jnl("dent '%.*s' in dir ino %lu to dent '%.*s' in dir ino %lu",
-               old_dentry->d_name.len, old_dentry->d_name.name,
-               old_dir->i_ino, new_dentry->d_name.len,
-               new_dentry->d_name.name, new_dir->i_ino);
+       dbg_jnl("dent '%pd' in dir ino %lu to dent '%pd' in dir ino %lu",
+               old_dentry, old_dir->i_ino, new_dentry, new_dir->i_ino);
        ubifs_assert(ubifs_inode(old_dir)->data_len == 0);
        ubifs_assert(ubifs_inode(new_dir)->data_len == 0);
        ubifs_assert(mutex_is_locked(&ubifs_inode(old_dir)->ui_mutex));
index 0f7139bdb2c20370f7f81489b14db155128c2f62..5e0a63b1b0d54a24d407c08cf1f3fef25008e0f9 100644 (file)
@@ -303,8 +303,8 @@ int ubifs_setxattr(struct dentry *dentry, const char *name,
        union ubifs_key key;
        int err, type;
 
-       dbg_gen("xattr '%s', host ino %lu ('%.*s'), size %zd", name,
-               host->i_ino, dentry->d_name.len, dentry->d_name.name, size);
+       dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd", name,
+               host->i_ino, dentry, size);
        ubifs_assert(mutex_is_locked(&host->i_mutex));
 
        if (size > UBIFS_MAX_INO_DATA)
@@ -367,8 +367,8 @@ ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf,
        union ubifs_key key;
        int err;
 
-       dbg_gen("xattr '%s', ino %lu ('%.*s'), buf size %zd", name,
-               host->i_ino, dentry->d_name.len, dentry->d_name.name, size);
+       dbg_gen("xattr '%s', ino %lu ('%pd'), buf size %zd", name,
+               host->i_ino, dentry, size);
 
        err = check_namespace(&nm);
        if (err < 0)
@@ -426,8 +426,8 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size)
        int err, len, written = 0;
        struct qstr nm = { .name = NULL };
 
-       dbg_gen("ino %lu ('%.*s'), buffer size %zd", host->i_ino,
-               dentry->d_name.len, dentry->d_name.name, size);
+       dbg_gen("ino %lu ('%pd'), buffer size %zd", host->i_ino,
+               dentry, size);
 
        len = host_ui->xattr_names + host_ui->xattr_cnt;
        if (!buffer)
@@ -529,8 +529,8 @@ int ubifs_removexattr(struct dentry *dentry, const char *name)
        union ubifs_key key;
        int err;
 
-       dbg_gen("xattr '%s', ino %lu ('%.*s')", name,
-               host->i_ino, dentry->d_name.len, dentry->d_name.name);
+       dbg_gen("xattr '%s', ino %lu ('%pd')", name,
+               host->i_ino, dentry);
        ubifs_assert(mutex_is_locked(&host->i_mutex));
 
        err = check_namespace(&nm);
index c02a27a19c6df0984eb3ea13fc5a06c5e251aaa5..9985beecffcabbe5fd97680ef1902e55f3d02ee7 100644 (file)
@@ -119,8 +119,7 @@ static int udf_adinicb_write_end(struct file *file,
 }
 
 static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb,
-                                    const struct iovec *iov,
-                                    loff_t offset, unsigned long nr_segs)
+                                    struct iov_iter *iter, loff_t offset)
 {
        /* Fallback to buffered I/O. */
        return 0;
@@ -134,8 +133,8 @@ const struct address_space_operations udf_adinicb_aops = {
        .direct_IO      = udf_adinicb_direct_IO,
 };
 
-static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                                 unsigned long nr_segs, loff_t ppos)
+static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                                  loff_t ppos)
 {
        ssize_t retval;
        struct file *file = iocb->ki_filp;
@@ -169,7 +168,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
        } else
                up_write(&iinfo->i_data_sem);
 
-       retval = generic_file_aio_write(iocb, iov, nr_segs, ppos);
+       retval = generic_file_write_iter(iocb, iter, ppos);
        if (retval > 0)
                mark_inode_dirty(inode);
 
@@ -243,12 +242,12 @@ static int udf_release_file(struct inode *inode, struct file *filp)
 
 const struct file_operations udf_file_operations = {
        .read                   = do_sync_read,
-       .aio_read               = generic_file_aio_read,
+       .read_iter              = generic_file_read_iter,
        .unlocked_ioctl         = udf_ioctl,
        .open                   = generic_file_open,
        .mmap                   = generic_file_mmap,
        .write                  = do_sync_write,
-       .aio_write              = udf_file_aio_write,
+       .write_iter             = udf_file_write_iter,
        .release                = udf_release_file,
        .fsync                  = generic_file_fsync,
        .splice_read            = generic_file_splice_read,
index 062b7925bca04c02949919ac37aab790d7b982a2..986e11ad176b2040f8b61c1ba318a69ecf736aa2 100644 (file)
@@ -216,19 +216,17 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
        return ret;
 }
 
-static ssize_t udf_direct_IO(int rw, struct kiocb *iocb,
-                            const struct iovec *iov,
-                            loff_t offset, unsigned long nr_segs)
+static ssize_t udf_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
+                            loff_t offset)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        struct inode *inode = mapping->host;
        ssize_t ret;
 
-       ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
-                                 udf_get_block);
+       ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, udf_get_block);
        if (unlikely(ret < 0 && (rw & WRITE)))
-               udf_write_failed(mapping, offset + iov_length(iov, nr_segs));
+               udf_write_failed(mapping, offset + iov_iter_count(iter));
        return ret;
 }
 
index 33afa20d450982eafb4e1bcc77193cce152270d1..e155e4c4af879c46b3bbb682366b1cc99f981aed 100644 (file)
@@ -36,9 +36,9 @@
 const struct file_operations ufs_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = do_sync_read,
-       .aio_read       = generic_file_aio_read,
+       .read_iter      = generic_file_read_iter,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
        .open           = generic_file_open,
        .fsync          = generic_file_fsync,
index 0719e4db93f274de9af844f3ef66ce387b02a716..33a69fabfd83306b8f6b7fb7f48d328f3e81fa75 100644 (file)
@@ -72,6 +72,7 @@ xfs-y                         += xfs_alloc.o \
                                   xfs_dir2_leaf.o \
                                   xfs_dir2_node.o \
                                   xfs_dir2_sf.o \
+                                  xfs_dquot_buf.o \
                                   xfs_ialloc.o \
                                   xfs_ialloc_btree.o \
                                   xfs_icreate_item.o \
@@ -103,7 +104,11 @@ xfs-$(CONFIG_XFS_QUOTA)            += xfs_dquot.o \
                                   xfs_qm_bhv.o \
                                   xfs_qm.o \
                                   xfs_quotaops.o
-xfs-$(CONFIG_XFS_RT)           += xfs_rtalloc.o
+
+# xfs_rtbitmap is shared with libxfs
+xfs-$(CONFIG_XFS_RT)           += xfs_rtalloc.o \
+                                  xfs_rtbitmap.o
+
 xfs-$(CONFIG_XFS_POSIX_ACL)    += xfs_acl.o
 xfs-$(CONFIG_PROC_FS)          += xfs_stats.o
 xfs-$(CONFIG_SYSCTL)           += xfs_sysctl.o
index 0e2f37efedd0547a05b5bec4c0109db83192bb72..370eb3e121d1b2f292c65ddabd660ad08ae65537 100644 (file)
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
+#include "xfs_format.h"
 #include "xfs_log_format.h"
 #include "xfs_trans_resv.h"
-#include "xfs_acl.h"
-#include "xfs_attr.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_inode.h"
 #include "xfs_ag.h"
 #include "xfs_sb.h"
 #include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_acl.h"
+#include "xfs_attr.h"
 #include "xfs_trace.h"
 #include <linux/slab.h>
 #include <linux/xattr.h>
index 1cb740afd674e8fd3f5ce0a3f47bfc00f8b36471..3fc109819c34eb7a42973d7640525fefe66cc08e 100644 (file)
@@ -128,8 +128,6 @@ typedef struct xfs_agf {
 extern int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp,
                        xfs_agnumber_t agno, int flags, struct xfs_buf **bpp);
 
-extern const struct xfs_buf_ops xfs_agf_buf_ops;
-
 /*
  * Size of the unlinked inode hash table in the agi.
  */
@@ -191,8 +189,6 @@ typedef struct xfs_agi {
 extern int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp,
                                xfs_agnumber_t agno, struct xfs_buf **bpp);
 
-extern const struct xfs_buf_ops xfs_agi_buf_ops;
-
 /*
  * The third a.g. block contains the a.g. freelist, an array
  * of block pointers to blocks owned by the allocation btree code.
index 5a1393f5e020739a648612002f7ae9a3f704d032..bcf16528bac5dc87302294e0008557827978269a 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
+#include "xfs_alloc_btree.h"
 #include "xfs_alloc.h"
 #include "xfs_extent_busy.h"
 #include "xfs_error.h"
 #include "xfs_cksum.h"
 #include "xfs_trace.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
+#include "xfs_log.h"
 
 struct workqueue_struct *xfs_alloc_wq;
 
index 99d0a61015587f7b8400bb8519bec6717b91c1cf..feacb061bab78bb3492ee0ff37eed5cc0fb95894 100644 (file)
@@ -231,7 +231,4 @@ xfs_alloc_get_rec(
        xfs_extlen_t            *len,   /* output: length of extent */
        int                     *stat); /* output: success/failure */
 
-extern const struct xfs_buf_ops xfs_agf_buf_ops;
-extern const struct xfs_buf_ops xfs_agfl_buf_ops;
-
 #endif /* __XFS_ALLOC_H__ */
index cafc90251d1993e76c10d0b6c6dae9d875dcd21e..698587f6c60a68f84969acb74b6f70f6bce9c006 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
 #include "xfs_btree.h"
+#include "xfs_alloc_btree.h"
 #include "xfs_alloc.h"
 #include "xfs_extent_busy.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
+#include "xfs_trans.h"
 
 
 STATIC struct xfs_btree_cur *
index e3a3f7424192ecd15a21169d94817e507cc1e6af..45e189e7e81c31b18a0de124cd4d256407d2a120 100644 (file)
@@ -26,39 +26,6 @@ struct xfs_buf;
 struct xfs_btree_cur;
 struct xfs_mount;
 
-/*
- * There are two on-disk btrees, one sorted by blockno and one sorted
- * by blockcount and blockno.  All blocks look the same to make the code
- * simpler; if we have time later, we'll make the optimizations.
- */
-#define        XFS_ABTB_MAGIC          0x41425442      /* 'ABTB' for bno tree */
-#define        XFS_ABTB_CRC_MAGIC      0x41423342      /* 'AB3B' */
-#define        XFS_ABTC_MAGIC          0x41425443      /* 'ABTC' for cnt tree */
-#define        XFS_ABTC_CRC_MAGIC      0x41423343      /* 'AB3C' */
-
-/*
- * Data record/key structure
- */
-typedef struct xfs_alloc_rec {
-       __be32          ar_startblock;  /* starting block number */
-       __be32          ar_blockcount;  /* count of free blocks */
-} xfs_alloc_rec_t, xfs_alloc_key_t;
-
-typedef struct xfs_alloc_rec_incore {
-       xfs_agblock_t   ar_startblock;  /* starting block number */
-       xfs_extlen_t    ar_blockcount;  /* count of free blocks */
-} xfs_alloc_rec_incore_t;
-
-/* btree pointer type */
-typedef __be32 xfs_alloc_ptr_t;
-
-/*
- * Block numbers in the AG:
- * SB is sector 0, AGF is sector 1, AGI is sector 2, AGFL is sector 3.
- */
-#define        XFS_BNO_BLOCK(mp)       ((xfs_agblock_t)(XFS_AGFL_BLOCK(mp) + 1))
-#define        XFS_CNT_BLOCK(mp)       ((xfs_agblock_t)(XFS_BNO_BLOCK(mp) + 1))
-
 /*
  * Btree block header size depends on a superblock flag.
  */
@@ -95,6 +62,4 @@ extern struct xfs_btree_cur *xfs_allocbt_init_cursor(struct xfs_mount *,
                xfs_agnumber_t, xfs_btnum_t);
 extern int xfs_allocbt_maxrecs(struct xfs_mount *, int, int);
 
-extern const struct xfs_buf_ops xfs_allocbt_buf_ops;
-
 #endif /* __XFS_ALLOC_BTREE_H__ */
index e51e581454e93113c7ead8d81136ab82bd29da94..20ba95e6096689e0f8c1dcf9b938696f53ab2b53 100644 (file)
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
-#include "xfs_log.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_trans.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_alloc.h"
 #include "xfs_error.h"
@@ -31,6 +32,8 @@
 #include "xfs_trace.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_dinode.h"
 #include <linux/aio.h>
 #include <linux/gfp.h>
 #include <linux/mpage.h>
@@ -333,7 +336,7 @@ xfs_map_blocks(
 
        if (type == XFS_IO_DELALLOC &&
            (!nimaps || isnullstartblock(imap->br_startblock))) {
-               error = xfs_iomap_write_allocate(ip, offset, count, imap);
+               error = xfs_iomap_write_allocate(ip, offset, imap);
                if (!error)
                        trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
                return -XFS_ERROR(error);
@@ -1413,9 +1416,8 @@ STATIC ssize_t
 xfs_vm_direct_IO(
        int                     rw,
        struct kiocb            *iocb,
-       const struct iovec      *iov,
-       loff_t                  offset,
-       unsigned long           nr_segs)
+       struct iov_iter         *iter,
+       loff_t                  offset)
 {
        struct inode            *inode = iocb->ki_filp->f_mapping->host;
        struct block_device     *bdev = xfs_find_bdev_for_inode(inode);
@@ -1423,7 +1425,7 @@ xfs_vm_direct_IO(
        ssize_t                 ret;
 
        if (rw & WRITE) {
-               size_t size = iov_length(iov, nr_segs);
+               size_t size = iov_iter_count(iter);
 
                /*
                 * We cannot preallocate a size update transaction here as we
@@ -1435,15 +1437,13 @@ xfs_vm_direct_IO(
                if (offset + size > XFS_I(inode)->i_d.di_size)
                        ioend->io_isdirect = 1;
 
-               ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
-                                           offset, nr_segs,
+               ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset,
                                            xfs_get_blocks_direct,
                                            xfs_end_io_direct_write, NULL, 0);
                if (ret != -EIOCBQUEUED && iocb->private)
                        goto out_destroy_ioend;
        } else {
-               ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
-                                           offset, nr_segs,
+               ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset,
                                            xfs_get_blocks_direct,
                                            NULL, NULL, 0);
        }
index ddcf2267ffa6fdf1bcf33cf7439c6b379472c2b4..b86127072ac3c2b9bd201b34cf208c3f1735cd68 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_alloc.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_attr.h"
 #include "xfs_attr_leaf.h"
 #include "xfs_attr_remote.h"
@@ -41,6 +42,7 @@
 #include "xfs_quota.h"
 #include "xfs_trans_space.h"
 #include "xfs_trace.h"
+#include "xfs_dinode.h"
 
 /*
  * xfs_attr.c
index bb24b07cbedb3d46a6286e8e169a734ad9fa3baa..f33fb62b7f17d36c3cba87eac0cedaae8475fa49 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
+#include "xfs_inode.h"
 #include "xfs_alloc.h"
-#include "xfs_btree.h"
 #include "xfs_attr_remote.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_attr.h"
@@ -41,7 +39,7 @@
 #include "xfs_error.h"
 #include "xfs_quota.h"
 #include "xfs_trace.h"
-#include "xfs_trans_priv.h"
+#include "xfs_dinode.h"
 
 /*
  * Look at all the extents for this logical region,
index 86db20a9cc02b5df2dd7ecb3c44eca39d7498dd7..a0f90193a247bfce514fdc5b13b2aafa6bc98ca4 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_attr_remote.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_bmap.h"
+#include "xfs_attr_sf.h"
+#include "xfs_attr_remote.h"
 #include "xfs_attr.h"
 #include "xfs_attr_leaf.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_buf_item.h"
 #include "xfs_cksum.h"
+#include "xfs_dinode.h"
 
 
 /*
index c1022138c7e6f3261819a0c52618b6bb1a9cf34f..3ec5ec0b86789004428591b824d982a16262a829 100644 (file)
 #ifndef __XFS_ATTR_LEAF_H__
 #define        __XFS_ATTR_LEAF_H__
 
-/*
- * Attribute storage layout, internal structure, access macros, etc.
- *
- * Attribute lists are structured around Btrees where all the data
- * elements are in the leaf nodes.  Attribute names are hashed into an int,
- * then that int is used as the index into the Btree.  Since the hashval
- * of an attribute name may not be unique, we may have duplicate keys.  The
- * internal links in the Btree are logical block offsets into the file.
- */
-
 struct attrlist;
 struct attrlist_cursor_kern;
 struct xfs_attr_list_context;
@@ -38,226 +28,6 @@ struct xfs_da_state_blk;
 struct xfs_inode;
 struct xfs_trans;
 
-/*========================================================================
- * Attribute structure when equal to XFS_LBSIZE(mp) bytes.
- *========================================================================*/
-
-/*
- * This is the structure of the leaf nodes in the Btree.
- *
- * Struct leaf_entry's are packed from the top.  Name/values grow from the
- * bottom but are not packed.  The freemap contains run-length-encoded entries
- * for the free bytes after the leaf_entry's, but only the N largest such,
- * smaller runs are dropped.  When the freemap doesn't show enough space
- * for an allocation, we compact the name/value area and try again.  If we
- * still don't have enough space, then we have to split the block.  The
- * name/value structs (both local and remote versions) must be 32bit aligned.
- *
- * Since we have duplicate hash keys, for each key that matches, compare
- * the actual name string.  The root and intermediate node search always
- * takes the first-in-the-block key match found, so we should only have
- * to work "forw"ard.  If none matches, continue with the "forw"ard leaf
- * nodes until the hash key changes or the attribute name is found.
- *
- * We store the fact that an attribute is a ROOT/USER/SECURE attribute in
- * the leaf_entry.  The namespaces are independent only because we also look
- * at the namespace bit when we are looking for a matching attribute name.
- *
- * We also store an "incomplete" bit in the leaf_entry.  It shows that an
- * attribute is in the middle of being created and should not be shown to
- * the user if we crash during the time that the bit is set.  We clear the
- * bit when we have finished setting up the attribute.  We do this because
- * we cannot create some large attributes inside a single transaction, and we
- * need some indication that we weren't finished if we crash in the middle.
- */
-#define XFS_ATTR_LEAF_MAPSIZE  3       /* how many freespace slots */
-
-typedef struct xfs_attr_leaf_map {     /* RLE map of free bytes */
-       __be16  base;                     /* base of free region */
-       __be16  size;                     /* length of free region */
-} xfs_attr_leaf_map_t;
-
-typedef struct xfs_attr_leaf_hdr {     /* constant-structure header block */
-       xfs_da_blkinfo_t info;          /* block type, links, etc. */
-       __be16  count;                  /* count of active leaf_entry's */
-       __be16  usedbytes;              /* num bytes of names/values stored */
-       __be16  firstused;              /* first used byte in name area */
-       __u8    holes;                  /* != 0 if blk needs compaction */
-       __u8    pad1;
-       xfs_attr_leaf_map_t freemap[XFS_ATTR_LEAF_MAPSIZE];
-                                       /* N largest free regions */
-} xfs_attr_leaf_hdr_t;
-
-typedef struct xfs_attr_leaf_entry {   /* sorted on key, not name */
-       __be32  hashval;                /* hash value of name */
-       __be16  nameidx;                /* index into buffer of name/value */
-       __u8    flags;                  /* LOCAL/ROOT/SECURE/INCOMPLETE flag */
-       __u8    pad2;                   /* unused pad byte */
-} xfs_attr_leaf_entry_t;
-
-typedef struct xfs_attr_leaf_name_local {
-       __be16  valuelen;               /* number of bytes in value */
-       __u8    namelen;                /* length of name bytes */
-       __u8    nameval[1];             /* name/value bytes */
-} xfs_attr_leaf_name_local_t;
-
-typedef struct xfs_attr_leaf_name_remote {
-       __be32  valueblk;               /* block number of value bytes */
-       __be32  valuelen;               /* number of bytes in value */
-       __u8    namelen;                /* length of name bytes */
-       __u8    name[1];                /* name bytes */
-} xfs_attr_leaf_name_remote_t;
-
-typedef struct xfs_attr_leafblock {
-       xfs_attr_leaf_hdr_t     hdr;    /* constant-structure header block */
-       xfs_attr_leaf_entry_t   entries[1];     /* sorted on key, not name */
-       xfs_attr_leaf_name_local_t namelist;    /* grows from bottom of buf */
-       xfs_attr_leaf_name_remote_t valuelist;  /* grows from bottom of buf */
-} xfs_attr_leafblock_t;
-
-/*
- * CRC enabled leaf structures. Called "version 3" structures to match the
- * version number of the directory and dablk structures for this feature, and
- * attr2 is already taken by the variable inode attribute fork size feature.
- */
-struct xfs_attr3_leaf_hdr {
-       struct xfs_da3_blkinfo  info;
-       __be16                  count;
-       __be16                  usedbytes;
-       __be16                  firstused;
-       __u8                    holes;
-       __u8                    pad1;
-       struct xfs_attr_leaf_map freemap[XFS_ATTR_LEAF_MAPSIZE];
-       __be32                  pad2;           /* 64 bit alignment */
-};
-
-#define XFS_ATTR3_LEAF_CRC_OFF (offsetof(struct xfs_attr3_leaf_hdr, info.crc))
-
-struct xfs_attr3_leafblock {
-       struct xfs_attr3_leaf_hdr       hdr;
-       struct xfs_attr_leaf_entry      entries[1];
-
-       /*
-        * The rest of the block contains the following structures after the
-        * leaf entries, growing from the bottom up. The variables are never
-        * referenced, the locations accessed purely from helper functions.
-        *
-        * struct xfs_attr_leaf_name_local
-        * struct xfs_attr_leaf_name_remote
-        */
-};
-
-/*
- * incore, neutral version of the attribute leaf header
- */
-struct xfs_attr3_icleaf_hdr {
-       __uint32_t      forw;
-       __uint32_t      back;
-       __uint16_t      magic;
-       __uint16_t      count;
-       __uint16_t      usedbytes;
-       __uint16_t      firstused;
-       __u8            holes;
-       struct {
-               __uint16_t      base;
-               __uint16_t      size;
-       } freemap[XFS_ATTR_LEAF_MAPSIZE];
-};
-
-/*
- * Flags used in the leaf_entry[i].flags field.
- * NOTE: the INCOMPLETE bit must not collide with the flags bits specified
- * on the system call, they are "or"ed together for various operations.
- */
-#define        XFS_ATTR_LOCAL_BIT      0       /* attr is stored locally */
-#define        XFS_ATTR_ROOT_BIT       1       /* limit access to trusted attrs */
-#define        XFS_ATTR_SECURE_BIT     2       /* limit access to secure attrs */
-#define        XFS_ATTR_INCOMPLETE_BIT 7       /* attr in middle of create/delete */
-#define XFS_ATTR_LOCAL         (1 << XFS_ATTR_LOCAL_BIT)
-#define XFS_ATTR_ROOT          (1 << XFS_ATTR_ROOT_BIT)
-#define XFS_ATTR_SECURE                (1 << XFS_ATTR_SECURE_BIT)
-#define XFS_ATTR_INCOMPLETE    (1 << XFS_ATTR_INCOMPLETE_BIT)
-
-/*
- * Conversion macros for converting namespace bits from argument flags
- * to ondisk flags.
- */
-#define XFS_ATTR_NSP_ARGS_MASK         (ATTR_ROOT | ATTR_SECURE)
-#define XFS_ATTR_NSP_ONDISK_MASK       (XFS_ATTR_ROOT | XFS_ATTR_SECURE)
-#define XFS_ATTR_NSP_ONDISK(flags)     ((flags) & XFS_ATTR_NSP_ONDISK_MASK)
-#define XFS_ATTR_NSP_ARGS(flags)       ((flags) & XFS_ATTR_NSP_ARGS_MASK)
-#define XFS_ATTR_NSP_ARGS_TO_ONDISK(x) (((x) & ATTR_ROOT ? XFS_ATTR_ROOT : 0) |\
-                                        ((x) & ATTR_SECURE ? XFS_ATTR_SECURE : 0))
-#define XFS_ATTR_NSP_ONDISK_TO_ARGS(x) (((x) & XFS_ATTR_ROOT ? ATTR_ROOT : 0) |\
-                                        ((x) & XFS_ATTR_SECURE ? ATTR_SECURE : 0))
-
-/*
- * Alignment for namelist and valuelist entries (since they are mixed
- * there can be only one alignment value)
- */
-#define        XFS_ATTR_LEAF_NAME_ALIGN        ((uint)sizeof(xfs_dablk_t))
-
-static inline int
-xfs_attr3_leaf_hdr_size(struct xfs_attr_leafblock *leafp)
-{
-       if (leafp->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC))
-               return sizeof(struct xfs_attr3_leaf_hdr);
-       return sizeof(struct xfs_attr_leaf_hdr);
-}
-
-static inline struct xfs_attr_leaf_entry *
-xfs_attr3_leaf_entryp(xfs_attr_leafblock_t *leafp)
-{
-       if (leafp->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC))
-               return &((struct xfs_attr3_leafblock *)leafp)->entries[0];
-       return &leafp->entries[0];
-}
-
-/*
- * Cast typed pointers for "local" and "remote" name/value structs.
- */
-static inline char *
-xfs_attr3_leaf_name(xfs_attr_leafblock_t *leafp, int idx)
-{
-       struct xfs_attr_leaf_entry *entries = xfs_attr3_leaf_entryp(leafp);
-
-       return &((char *)leafp)[be16_to_cpu(entries[idx].nameidx)];
-}
-
-static inline xfs_attr_leaf_name_remote_t *
-xfs_attr3_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx)
-{
-       return (xfs_attr_leaf_name_remote_t *)xfs_attr3_leaf_name(leafp, idx);
-}
-
-static inline xfs_attr_leaf_name_local_t *
-xfs_attr3_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx)
-{
-       return (xfs_attr_leaf_name_local_t *)xfs_attr3_leaf_name(leafp, idx);
-}
-
-/*
- * Calculate total bytes used (including trailing pad for alignment) for
- * a "local" name/value structure, a "remote" name/value structure, and
- * a pointer which might be either.
- */
-static inline int xfs_attr_leaf_entsize_remote(int nlen)
-{
-       return ((uint)sizeof(xfs_attr_leaf_name_remote_t) - 1 + (nlen) + \
-               XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
-}
-
-static inline int xfs_attr_leaf_entsize_local(int nlen, int vlen)
-{
-       return ((uint)sizeof(xfs_attr_leaf_name_local_t) - 1 + (nlen) + (vlen) +
-               XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
-}
-
-static inline int xfs_attr_leaf_entsize_local_max(int bsize)
-{
-       return (((bsize) >> 1) + ((bsize) >> 2));
-}
-
 /*
  * Used to keep a list of "remote value" extents when unlinking an inode.
  */
@@ -336,6 +106,4 @@ void        xfs_attr3_leaf_hdr_from_disk(struct xfs_attr3_icleaf_hdr *to,
 void   xfs_attr3_leaf_hdr_to_disk(struct xfs_attr_leafblock *to,
                                   struct xfs_attr3_icleaf_hdr *from);
 
-extern const struct xfs_buf_ops xfs_attr3_leaf_buf_ops;
-
 #endif /* __XFS_ATTR_LEAF_H__ */
index cbc80d4851772a9df2cb51abbd83e5ef799c6870..46c4ce148a432662c6038376b9ce70497a5c720a 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_attr_remote.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_attr.h"
+#include "xfs_attr_sf.h"
+#include "xfs_attr_remote.h"
 #include "xfs_attr_leaf.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_buf_item.h"
 #include "xfs_cksum.h"
+#include "xfs_dinode.h"
 
 STATIC int
 xfs_attr_shortform_compare(const void *a, const void *b)
index 712a502de619b097df202f744ba481bd3471847d..2e5530467f2d3fb28a4a1e871c87994994c6c02f 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_error.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_alloc.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
@@ -42,6 +40,7 @@
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
 #include "xfs_buf_item.h"
+#include "xfs_error.h"
 
 #define ATTR_RMTVALUE_MAPSIZE  1       /* # of map entries at once */
 
index 92a8fd7977cc2b48649ed3e5c22344fe83fcd8c1..5a9acfa156d7af59855fde90fb5e6d8721ea289b 100644 (file)
 #ifndef __XFS_ATTR_REMOTE_H__
 #define        __XFS_ATTR_REMOTE_H__
 
-#define XFS_ATTR3_RMT_MAGIC    0x5841524d      /* XARM */
-
-/*
- * There is one of these headers per filesystem block in a remote attribute.
- * This is done to ensure there is a 1:1 mapping between the attribute value
- * length and the number of blocks needed to store the attribute. This makes the
- * verification of a buffer a little more complex, but greatly simplifies the
- * allocation, reading and writing of these attributes as we don't have to guess
- * the number of blocks needed to store the attribute data.
- */
-struct xfs_attr3_rmt_hdr {
-       __be32  rm_magic;
-       __be32  rm_offset;
-       __be32  rm_bytes;
-       __be32  rm_crc;
-       uuid_t  rm_uuid;
-       __be64  rm_owner;
-       __be64  rm_blkno;
-       __be64  rm_lsn;
-};
-
-#define XFS_ATTR3_RMT_CRC_OFF  offsetof(struct xfs_attr3_rmt_hdr, rm_crc)
-
-#define XFS_ATTR3_RMT_BUF_SPACE(mp, bufsize)   \
-       ((bufsize) - (xfs_sb_version_hascrc(&(mp)->m_sb) ? \
-                       sizeof(struct xfs_attr3_rmt_hdr) : 0))
-
-extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops;
-
 int xfs_attr3_rmt_blocks(struct xfs_mount *mp, int attrlen);
 
 int xfs_attr_rmtval_get(struct xfs_da_args *args);
index 48228848f5ae3e2e6f52900b6d0ea7215274a36d..16ce44a2b43eaac5730be0e3084ed44fd93e86e1 100644 (file)
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_buf_item.h"
+#include "xfs_log_format.h"
 
 /*
  * XFS bit manipulation routines, used in non-realtime code.
index f47e65c30be6ddde5caa276d3262540d603d07dc..1c02da8bb7df5a0bf5729cd5375a0266731e545c 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
-#include "xfs_mount.h"
-#include "xfs_itable.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_extfree_item.h"
 #include "xfs_alloc.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
-#include "xfs_attr_leaf.h"
 #include "xfs_quota.h"
 #include "xfs_trans_space.h"
 #include "xfs_buf_item.h"
-#include "xfs_filestream.h"
 #include "xfs_trace.h"
 #include "xfs_symlink.h"
+#include "xfs_attr_leaf.h"
+#include "xfs_dinode.h"
+#include "xfs_filestream.h"
 
 
 kmem_zone_t            *xfs_bmap_free_item_zone;
@@ -1482,7 +1480,7 @@ xfs_bmap_search_extents(
                xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
                                "Access to block zero in inode %llu "
                                "start_block: %llx start_off: %llx "
-                               "blkcnt: %llx extent-state: %x lastx: %x\n",
+                               "blkcnt: %llx extent-state: %x lastx: %x",
                        (unsigned long long)ip->i_ino,
                        (unsigned long long)gotp->br_startblock,
                        (unsigned long long)gotp->br_startoff,
index bb8de8e399c4b351effa08e6669e000438751d37..2fb4a2202e1731d153a5250dca593985f0191094 100644 (file)
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_alloc.h"
 #include "xfs_btree.h"
-#include "xfs_itable.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_bmap.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
+#include "xfs_dinode.h"
 
 /*
  * Determine the extent state.
index e367461a638e5b65ffdedc77bed6121dc1150161..6e42e1e50b89394e2c0a0a003c339ac03d127445 100644 (file)
 #ifndef __XFS_BMAP_BTREE_H__
 #define __XFS_BMAP_BTREE_H__
 
-#define XFS_BMAP_MAGIC         0x424d4150      /* 'BMAP' */
-#define XFS_BMAP_CRC_MAGIC     0x424d4133      /* 'BMA3' */
-
 struct xfs_btree_cur;
 struct xfs_btree_block;
 struct xfs_mount;
 struct xfs_inode;
 struct xfs_trans;
 
-/*
- * Bmap root header, on-disk form only.
- */
-typedef struct xfs_bmdr_block {
-       __be16          bb_level;       /* 0 is a leaf */
-       __be16          bb_numrecs;     /* current # of data records */
-} xfs_bmdr_block_t;
-
-/*
- * Bmap btree record and extent descriptor.
- *  l0:63 is an extent flag (value 1 indicates non-normal).
- *  l0:9-62 are startoff.
- *  l0:0-8 and l1:21-63 are startblock.
- *  l1:0-20 are blockcount.
- */
-#define BMBT_EXNTFLAG_BITLEN   1
-#define BMBT_STARTOFF_BITLEN   54
-#define BMBT_STARTBLOCK_BITLEN 52
-#define BMBT_BLOCKCOUNT_BITLEN 21
-
-typedef struct xfs_bmbt_rec {
-       __be64                  l0, l1;
-} xfs_bmbt_rec_t;
-
-typedef __uint64_t     xfs_bmbt_rec_base_t;    /* use this for casts */
-typedef xfs_bmbt_rec_t xfs_bmdr_rec_t;
-
-typedef struct xfs_bmbt_rec_host {
-       __uint64_t              l0, l1;
-} xfs_bmbt_rec_host_t;
-
-/*
- * Values and macros for delayed-allocation startblock fields.
- */
-#define STARTBLOCKVALBITS      17
-#define STARTBLOCKMASKBITS     (15 + XFS_BIG_BLKNOS * 20)
-#define DSTARTBLOCKMASKBITS    (15 + 20)
-#define STARTBLOCKMASK         \
-       (((((xfs_fsblock_t)1) << STARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
-#define DSTARTBLOCKMASK                \
-       (((((xfs_dfsbno_t)1) << DSTARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
-
-static inline int isnullstartblock(xfs_fsblock_t x)
-{
-       return ((x) & STARTBLOCKMASK) == STARTBLOCKMASK;
-}
-
-static inline int isnulldstartblock(xfs_dfsbno_t x)
-{
-       return ((x) & DSTARTBLOCKMASK) == DSTARTBLOCKMASK;
-}
-
-static inline xfs_fsblock_t nullstartblock(int k)
-{
-       ASSERT(k < (1 << STARTBLOCKVALBITS));
-       return STARTBLOCKMASK | (k);
-}
-
-static inline xfs_filblks_t startblockval(xfs_fsblock_t x)
-{
-       return (xfs_filblks_t)((x) & ~STARTBLOCKMASK);
-}
-
-/*
- * Possible extent formats.
- */
-typedef enum {
-       XFS_EXTFMT_NOSTATE = 0,
-       XFS_EXTFMT_HASSTATE
-} xfs_exntfmt_t;
-
-/*
- * Possible extent states.
- */
-typedef enum {
-       XFS_EXT_NORM, XFS_EXT_UNWRITTEN,
-       XFS_EXT_DMAPI_OFFLINE, XFS_EXT_INVALID
-} xfs_exntst_t;
-
 /*
  * Extent state and extent format macros.
  */
@@ -114,27 +32,6 @@ typedef enum {
                XFS_EXTFMT_HASSTATE : XFS_EXTFMT_NOSTATE)
 #define ISUNWRITTEN(x) ((x)->br_state == XFS_EXT_UNWRITTEN)
 
-/*
- * Incore version of above.
- */
-typedef struct xfs_bmbt_irec
-{
-       xfs_fileoff_t   br_startoff;    /* starting file offset */
-       xfs_fsblock_t   br_startblock;  /* starting block number */
-       xfs_filblks_t   br_blockcount;  /* number of blocks */
-       xfs_exntst_t    br_state;       /* extent state */
-} xfs_bmbt_irec_t;
-
-/*
- * Key structure for non-leaf levels of the tree.
- */
-typedef struct xfs_bmbt_key {
-       __be64          br_startoff;    /* starting file offset */
-} xfs_bmbt_key_t, xfs_bmdr_key_t;
-
-/* btree pointer type */
-typedef __be64 xfs_bmbt_ptr_t, xfs_bmdr_ptr_t;
-
 /*
  * Btree block header size depends on a superblock flag.
  */
@@ -243,6 +140,4 @@ extern int xfs_bmbt_change_owner(struct xfs_trans *tp, struct xfs_inode *ip,
 extern struct xfs_btree_cur *xfs_bmbt_init_cursor(struct xfs_mount *,
                struct xfs_trans *, struct xfs_inode *, int);
 
-extern const struct xfs_buf_ops xfs_bmbt_buf_ops;
-
 #endif /* __XFS_BMAP_BTREE_H__ */
index 97f952caea74bd8311a3269ca7520e87b01fcb18..5887e41c0323ae85f867cc9bc83bbdf8c1e41cd1 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
+#include "xfs_trans.h"
 #include "xfs_extfree_item.h"
 #include "xfs_alloc.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
 #include "xfs_trans_space.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
+#include "xfs_log.h"
+#include "xfs_dinode.h"
 
 /* Kernel only BMAP related definitions and functions */
 
@@ -965,32 +965,12 @@ xfs_free_eofblocks(
        return error;
 }
 
-/*
- * xfs_alloc_file_space()
- *      This routine allocates disk space for the given file.
- *
- *     If alloc_type == 0, this request is for an ALLOCSP type
- *     request which will change the file size.  In this case, no
- *     DMAPI event will be generated by the call.  A TRUNCATE event
- *     will be generated later by xfs_setattr.
- *
- *     If alloc_type != 0, this request is for a RESVSP type
- *     request, and a DMAPI DM_EVENT_WRITE will be generated if the
- *     lower block boundary byte address is less than the file's
- *     length.
- *
- * RETURNS:
- *       0 on success
- *      errno on error
- *
- */
-STATIC int
+int
 xfs_alloc_file_space(
-       xfs_inode_t             *ip,
+       struct xfs_inode        *ip,
        xfs_off_t               offset,
        xfs_off_t               len,
-       int                     alloc_type,
-       int                     attr_flags)
+       int                     alloc_type)
 {
        xfs_mount_t             *mp = ip->i_mount;
        xfs_off_t               count;
@@ -1232,24 +1212,11 @@ xfs_zero_remaining_bytes(
        return error;
 }
 
-/*
- * xfs_free_file_space()
- *      This routine frees disk space for the given file.
- *
- *     This routine is only called by xfs_change_file_space
- *     for an UNRESVSP type call.
- *
- * RETURNS:
- *       0 on success
- *      errno on error
- *
- */
-STATIC int
+int
 xfs_free_file_space(
-       xfs_inode_t             *ip,
+       struct xfs_inode        *ip,
        xfs_off_t               offset,
-       xfs_off_t               len,
-       int                     attr_flags)
+       xfs_off_t               len)
 {
        int                     committed;
        int                     done;
@@ -1267,7 +1234,6 @@ xfs_free_file_space(
        int                     rt;
        xfs_fileoff_t           startoffset_fsb;
        xfs_trans_t             *tp;
-       int                     need_iolock = 1;
 
        mp = ip->i_mount;
 
@@ -1284,20 +1250,15 @@ xfs_free_file_space(
        startoffset_fsb = XFS_B_TO_FSB(mp, offset);
        endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
 
-       if (attr_flags & XFS_ATTR_NOLOCK)
-               need_iolock = 0;
-       if (need_iolock) {
-               xfs_ilock(ip, XFS_IOLOCK_EXCL);
-               /* wait for the completion of any pending DIOs */
-               inode_dio_wait(VFS_I(ip));
-       }
+       /* wait for the completion of any pending DIOs */
+       inode_dio_wait(VFS_I(ip));
 
        rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
        ioffset = offset & ~(rounding - 1);
        error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
                                              ioffset, -1);
        if (error)
-               goto out_unlock_iolock;
+               goto out;
        truncate_pagecache_range(VFS_I(ip), ioffset, -1);
 
        /*
@@ -1311,7 +1272,7 @@ xfs_free_file_space(
                error = xfs_bmapi_read(ip, startoffset_fsb, 1,
                                        &imap, &nimap, 0);
                if (error)
-                       goto out_unlock_iolock;
+                       goto out;
                ASSERT(nimap == 0 || nimap == 1);
                if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
                        xfs_daddr_t     block;
@@ -1326,7 +1287,7 @@ xfs_free_file_space(
                error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
                                        &imap, &nimap, 0);
                if (error)
-                       goto out_unlock_iolock;
+                       goto out;
                ASSERT(nimap == 0 || nimap == 1);
                if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
                        ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
@@ -1412,27 +1373,23 @@ xfs_free_file_space(
                xfs_iunlock(ip, XFS_ILOCK_EXCL);
        }
 
- out_unlock_iolock:
-       if (need_iolock)
-               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ out:
        return error;
 
  error0:
        xfs_bmap_cancel(&free_list);
  error1:
        xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
-       xfs_iunlock(ip, need_iolock ? (XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL) :
-                   XFS_ILOCK_EXCL);
-       return error;
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+       goto out;
 }
 
 
-STATIC int
+int
 xfs_zero_file_space(
        struct xfs_inode        *ip,
        xfs_off_t               offset,
-       xfs_off_t               len,
-       int                     attr_flags)
+       xfs_off_t               len)
 {
        struct xfs_mount        *mp = ip->i_mount;
        uint                    granularity;
@@ -1453,9 +1410,6 @@ xfs_zero_file_space(
        ASSERT(start_boundary >= offset);
        ASSERT(end_boundary <= offset + len);
 
-       if (!(attr_flags & XFS_ATTR_NOLOCK))
-               xfs_ilock(ip, XFS_IOLOCK_EXCL);
-
        if (start_boundary < end_boundary - 1) {
                /* punch out the page cache over the conversion range */
                truncate_pagecache_range(VFS_I(ip), start_boundary,
@@ -1463,16 +1417,16 @@ xfs_zero_file_space(
                /* convert the blocks */
                error = xfs_alloc_file_space(ip, start_boundary,
                                        end_boundary - start_boundary - 1,
-                                       XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT,
-                                       attr_flags);
+                                       XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT);
                if (error)
-                       goto out_unlock;
+                       goto out;
 
                /* We've handled the interior of the range, now for the edges */
-               if (start_boundary != offset)
+               if (start_boundary != offset) {
                        error = xfs_iozero(ip, offset, start_boundary - offset);
-               if (error)
-                       goto out_unlock;
+                       if (error)
+                               goto out;
+               }
 
                if (end_boundary != offset + len)
                        error = xfs_iozero(ip, end_boundary,
@@ -1486,196 +1440,11 @@ xfs_zero_file_space(
                error = xfs_iozero(ip, offset, len);
        }
 
-out_unlock:
-       if (!(attr_flags & XFS_ATTR_NOLOCK))
-               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+out:
        return error;
 
 }
 
-/*
- * xfs_change_file_space()
- *      This routine allocates or frees disk space for the given file.
- *      The user specified parameters are checked for alignment and size
- *      limitations.
- *
- * RETURNS:
- *       0 on success
- *      errno on error
- *
- */
-int
-xfs_change_file_space(
-       xfs_inode_t     *ip,
-       int             cmd,
-       xfs_flock64_t   *bf,
-       xfs_off_t       offset,
-       int             attr_flags)
-{
-       xfs_mount_t     *mp = ip->i_mount;
-       int             clrprealloc;
-       int             error;
-       xfs_fsize_t     fsize;
-       int             setprealloc;
-       xfs_off_t       startoffset;
-       xfs_trans_t     *tp;
-       struct iattr    iattr;
-
-       if (!S_ISREG(ip->i_d.di_mode))
-               return XFS_ERROR(EINVAL);
-
-       switch (bf->l_whence) {
-       case 0: /*SEEK_SET*/
-               break;
-       case 1: /*SEEK_CUR*/
-               bf->l_start += offset;
-               break;
-       case 2: /*SEEK_END*/
-               bf->l_start += XFS_ISIZE(ip);
-               break;
-       default:
-               return XFS_ERROR(EINVAL);
-       }
-
-       /*
-        * length of <= 0 for resv/unresv/zero is invalid.  length for
-        * alloc/free is ignored completely and we have no idea what userspace
-        * might have set it to, so set it to zero to allow range
-        * checks to pass.
-        */
-       switch (cmd) {
-       case XFS_IOC_ZERO_RANGE:
-       case XFS_IOC_RESVSP:
-       case XFS_IOC_RESVSP64:
-       case XFS_IOC_UNRESVSP:
-       case XFS_IOC_UNRESVSP64:
-               if (bf->l_len <= 0)
-                       return XFS_ERROR(EINVAL);
-               break;
-       default:
-               bf->l_len = 0;
-               break;
-       }
-
-       if (bf->l_start < 0 ||
-           bf->l_start > mp->m_super->s_maxbytes ||
-           bf->l_start + bf->l_len < 0 ||
-           bf->l_start + bf->l_len >= mp->m_super->s_maxbytes)
-               return XFS_ERROR(EINVAL);
-
-       bf->l_whence = 0;
-
-       startoffset = bf->l_start;
-       fsize = XFS_ISIZE(ip);
-
-       setprealloc = clrprealloc = 0;
-       switch (cmd) {
-       case XFS_IOC_ZERO_RANGE:
-               error = xfs_zero_file_space(ip, startoffset, bf->l_len,
-                                               attr_flags);
-               if (error)
-                       return error;
-               setprealloc = 1;
-               break;
-
-       case XFS_IOC_RESVSP:
-       case XFS_IOC_RESVSP64:
-               error = xfs_alloc_file_space(ip, startoffset, bf->l_len,
-                                               XFS_BMAPI_PREALLOC, attr_flags);
-               if (error)
-                       return error;
-               setprealloc = 1;
-               break;
-
-       case XFS_IOC_UNRESVSP:
-       case XFS_IOC_UNRESVSP64:
-               if ((error = xfs_free_file_space(ip, startoffset, bf->l_len,
-                                                               attr_flags)))
-                       return error;
-               break;
-
-       case XFS_IOC_ALLOCSP:
-       case XFS_IOC_ALLOCSP64:
-       case XFS_IOC_FREESP:
-       case XFS_IOC_FREESP64:
-               /*
-                * These operations actually do IO when extending the file, but
-                * the allocation is done seperately to the zeroing that is
-                * done. This set of operations need to be serialised against
-                * other IO operations, such as truncate and buffered IO. We
-                * need to take the IOLOCK here to serialise the allocation and
-                * zeroing IO to prevent other IOLOCK holders (e.g. getbmap,
-                * truncate, direct IO) from racing against the transient
-                * allocated but not written state we can have here.
-                */
-               xfs_ilock(ip, XFS_IOLOCK_EXCL);
-               if (startoffset > fsize) {
-                       error = xfs_alloc_file_space(ip, fsize,
-                                       startoffset - fsize, 0,
-                                       attr_flags | XFS_ATTR_NOLOCK);
-                       if (error) {
-                               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-                               break;
-                       }
-               }
-
-               iattr.ia_valid = ATTR_SIZE;
-               iattr.ia_size = startoffset;
-
-               error = xfs_setattr_size(ip, &iattr,
-                                        attr_flags | XFS_ATTR_NOLOCK);
-               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-
-               if (error)
-                       return error;
-
-               clrprealloc = 1;
-               break;
-
-       default:
-               ASSERT(0);
-               return XFS_ERROR(EINVAL);
-       }
-
-       /*
-        * update the inode timestamp, mode, and prealloc flag bits
-        */
-       tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
-       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_writeid, 0, 0);
-       if (error) {
-               xfs_trans_cancel(tp, 0);
-               return error;
-       }
-
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
-       if ((attr_flags & XFS_ATTR_DMI) == 0) {
-               ip->i_d.di_mode &= ~S_ISUID;
-
-               /*
-                * Note that we don't have to worry about mandatory
-                * file locking being disabled here because we only
-                * clear the S_ISGID bit if the Group execute bit is
-                * on, but if it was on then mandatory locking wouldn't
-                * have been enabled.
-                */
-               if (ip->i_d.di_mode & S_IXGRP)
-                       ip->i_d.di_mode &= ~S_ISGID;
-
-               xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
-       }
-       if (setprealloc)
-               ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
-       else if (clrprealloc)
-               ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
-
-       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-       if (attr_flags & XFS_ATTR_SYNC)
-               xfs_trans_set_sync(tp);
-       return xfs_trans_commit(tp, 0);
-}
-
 /*
  * We need to check that the format of the data fork in the temporary inode is
  * valid for the target inode before doing the swap. This is not a problem with
index 061260946f7a85ae7d972ea11f0142daca9ba482..900747b25772c2b1a41821fba8e99c3cde2b3ffc 100644 (file)
@@ -93,9 +93,12 @@ int  xfs_bmap_last_extent(struct xfs_trans *tp, struct xfs_inode *ip,
                             int *is_empty);
 
 /* preallocation and hole punch interface */
-int    xfs_change_file_space(struct xfs_inode *ip, int cmd,
-                             xfs_flock64_t *bf, xfs_off_t offset,
-                             int attr_flags);
+int    xfs_alloc_file_space(struct xfs_inode *ip, xfs_off_t offset,
+                            xfs_off_t len, int alloc_type);
+int    xfs_free_file_space(struct xfs_inode *ip, xfs_off_t offset,
+                           xfs_off_t len);
+int    xfs_zero_file_space(struct xfs_inode *ip, xfs_off_t offset,
+                           xfs_off_t len);
 
 /* EOF block manipulation functions */
 bool   xfs_can_free_eofblocks(struct xfs_inode *ip, bool force);
index 5690e102243d70e7b87876f0ef9bc07be058da86..9adaae4f3e2fd21c647c9e7fa023a120e5012272 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_buf_item.h"
 #include "xfs_btree.h"
index 06729b67ad58ec2c394a3ecdb1104938ced672c2..91e34f21bacea773eaadecfc0a3c29595e912601 100644 (file)
@@ -26,73 +26,6 @@ struct xfs_trans;
 
 extern kmem_zone_t     *xfs_btree_cur_zone;
 
-/*
- * This nonsense is to make -wlint happy.
- */
-#define        XFS_LOOKUP_EQ   ((xfs_lookup_t)XFS_LOOKUP_EQi)
-#define        XFS_LOOKUP_LE   ((xfs_lookup_t)XFS_LOOKUP_LEi)
-#define        XFS_LOOKUP_GE   ((xfs_lookup_t)XFS_LOOKUP_GEi)
-
-#define        XFS_BTNUM_BNO   ((xfs_btnum_t)XFS_BTNUM_BNOi)
-#define        XFS_BTNUM_CNT   ((xfs_btnum_t)XFS_BTNUM_CNTi)
-#define        XFS_BTNUM_BMAP  ((xfs_btnum_t)XFS_BTNUM_BMAPi)
-#define        XFS_BTNUM_INO   ((xfs_btnum_t)XFS_BTNUM_INOi)
-
-/*
- * Generic btree header.
- *
- * This is a combination of the actual format used on disk for short and long
- * format btrees.  The first three fields are shared by both format, but the
- * pointers are different and should be used with care.
- *
- * To get the size of the actual short or long form headers please use the size
- * macros below.  Never use sizeof(xfs_btree_block).
- *
- * The blkno, crc, lsn, owner and uuid fields are only available in filesystems
- * with the crc feature bit, and all accesses to them must be conditional on
- * that flag.
- */
-struct xfs_btree_block {
-       __be32          bb_magic;       /* magic number for block type */
-       __be16          bb_level;       /* 0 is a leaf */
-       __be16          bb_numrecs;     /* current # of data records */
-       union {
-               struct {
-                       __be32          bb_leftsib;
-                       __be32          bb_rightsib;
-
-                       __be64          bb_blkno;
-                       __be64          bb_lsn;
-                       uuid_t          bb_uuid;
-                       __be32          bb_owner;
-                       __le32          bb_crc;
-               } s;                    /* short form pointers */
-               struct  {
-                       __be64          bb_leftsib;
-                       __be64          bb_rightsib;
-
-                       __be64          bb_blkno;
-                       __be64          bb_lsn;
-                       uuid_t          bb_uuid;
-                       __be64          bb_owner;
-                       __le32          bb_crc;
-                       __be32          bb_pad; /* padding for alignment */
-               } l;                    /* long form pointers */
-       } bb_u;                         /* rest */
-};
-
-#define XFS_BTREE_SBLOCK_LEN   16      /* size of a short form block */
-#define XFS_BTREE_LBLOCK_LEN   24      /* size of a long form block */
-
-/* sizes of CRC enabled btree blocks */
-#define XFS_BTREE_SBLOCK_CRC_LEN       (XFS_BTREE_SBLOCK_LEN + 40)
-#define XFS_BTREE_LBLOCK_CRC_LEN       (XFS_BTREE_LBLOCK_LEN + 48)
-
-#define XFS_BTREE_SBLOCK_CRC_OFF \
-       offsetof(struct xfs_btree_block, bb_u.s.bb_crc)
-#define XFS_BTREE_LBLOCK_CRC_OFF \
-       offsetof(struct xfs_btree_block, bb_u.l.bb_crc)
-
 /*
  * Generic key, ptr and record wrapper structures.
  *
@@ -118,6 +51,18 @@ union xfs_btree_rec {
        xfs_inobt_rec_t         inobt;
 };
 
+/*
+ * This nonsense is to make -wlint happy.
+ */
+#define        XFS_LOOKUP_EQ   ((xfs_lookup_t)XFS_LOOKUP_EQi)
+#define        XFS_LOOKUP_LE   ((xfs_lookup_t)XFS_LOOKUP_LEi)
+#define        XFS_LOOKUP_GE   ((xfs_lookup_t)XFS_LOOKUP_GEi)
+
+#define        XFS_BTNUM_BNO   ((xfs_btnum_t)XFS_BTNUM_BNOi)
+#define        XFS_BTNUM_CNT   ((xfs_btnum_t)XFS_BTNUM_CNTi)
+#define        XFS_BTNUM_BMAP  ((xfs_btnum_t)XFS_BTNUM_BMAPi)
+#define        XFS_BTNUM_INO   ((xfs_btnum_t)XFS_BTNUM_INOi)
+
 /*
  * For logging record fields.
  */
index 263470075ea2ce11dd39a83603e0b9fa1f0f5a60..c7f0b77dcb0090046b84eda27c68d870af25d45a 100644 (file)
 #include <linux/backing-dev.h>
 #include <linux/freezer.h>
 
-#include "xfs_sb.h"
+#include "xfs_log_format.h"
 #include "xfs_trans_resv.h"
-#include "xfs_log.h"
+#include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
 #include "xfs_trace.h"
+#include "xfs_log.h"
 
 static kmem_zone_t *xfs_buf_zone;
 
@@ -590,7 +591,7 @@ found:
                error = _xfs_buf_map_pages(bp, flags);
                if (unlikely(error)) {
                        xfs_warn(target->bt_mount,
-                               "%s: failed to map pages\n", __func__);
+                               "%s: failed to map pagesn", __func__);
                        xfs_buf_relse(bp);
                        return NULL;
                }
@@ -809,7 +810,7 @@ xfs_buf_get_uncached(
        error = _xfs_buf_map_pages(bp, 0);
        if (unlikely(error)) {
                xfs_warn(target->bt_mount,
-                       "%s: failed to map pages\n", __func__);
+                       "%s: failed to map pages", __func__);
                goto fail_free_mem;
        }
 
@@ -1618,7 +1619,7 @@ xfs_setsize_buftarg_flags(
                bdevname(btp->bt_bdev, name);
 
                xfs_warn(btp->bt_mount,
-                       "Cannot set_blocksize to %u on device %s\n",
+                       "Cannot set_blocksize to %u on device %s",
                        sectorsize, name);
                return EINVAL;
        }
index f1d85cfc0a54d1cb54ebe95937872127f2e63142..b6d20c55282b9f20a95eae2d10294863b4dd05cd 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
+#include "xfs_log.h"
 
 
 kmem_zone_t    *xfs_buf_item_zone;
index db6371087fe8ea9b786f82189b387c55979a2460..3f3455a415102de167271a7467725da410a21f24 100644 (file)
@@ -71,10 +71,6 @@ void xfs_buf_attach_iodone(struct xfs_buf *,
 void   xfs_buf_iodone_callbacks(struct xfs_buf *);
 void   xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *);
 
-void   xfs_trans_buf_set_type(struct xfs_trans *, struct xfs_buf *,
-                              enum xfs_blft);
-void   xfs_trans_buf_copy_type(struct xfs_buf *dst_bp, struct xfs_buf *src_bp);
-
 extern kmem_zone_t     *xfs_buf_item_zone;
 
 #endif /* __XFS_BUF_ITEM_H__ */
index 20bf8e8002d6fd2733782af97f373ddaf2bcd8d6..eb65c546ffd851e134eb27479b0629b827669945 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_alloc.h"
 #include "xfs_bmap.h"
index b1f267995dea32e97dc041c7dd14af77e1630dc8..e492dcadd0322dc00c4c156b4556811bef200890 100644 (file)
@@ -24,146 +24,6 @@ struct xfs_inode;
 struct xfs_trans;
 struct zone;
 
-/*========================================================================
- * Directory Structure when greater than XFS_LBSIZE(mp) bytes.
- *========================================================================*/
-
-/*
- * This structure is common to both leaf nodes and non-leaf nodes in the Btree.
- *
- * It is used to manage a doubly linked list of all blocks at the same
- * level in the Btree, and to identify which type of block this is.
- */
-#define XFS_DA_NODE_MAGIC      0xfebe  /* magic number: non-leaf blocks */
-#define XFS_ATTR_LEAF_MAGIC    0xfbee  /* magic number: attribute leaf blks */
-#define        XFS_DIR2_LEAF1_MAGIC    0xd2f1  /* magic number: v2 dirlf single blks */
-#define        XFS_DIR2_LEAFN_MAGIC    0xd2ff  /* magic number: v2 dirlf multi blks */
-
-typedef struct xfs_da_blkinfo {
-       __be32          forw;                   /* previous block in list */
-       __be32          back;                   /* following block in list */
-       __be16          magic;                  /* validity check on block */
-       __be16          pad;                    /* unused */
-} xfs_da_blkinfo_t;
-
-/*
- * CRC enabled directory structure types
- *
- * The headers change size for the additional verification information, but
- * otherwise the tree layouts and contents are unchanged. Hence the da btree
- * code can use the struct xfs_da_blkinfo for manipulating the tree links and
- * magic numbers without modification for both v2 and v3 nodes.
- */
-#define XFS_DA3_NODE_MAGIC     0x3ebe  /* magic number: non-leaf blocks */
-#define XFS_ATTR3_LEAF_MAGIC   0x3bee  /* magic number: attribute leaf blks */
-#define        XFS_DIR3_LEAF1_MAGIC    0x3df1  /* magic number: v2 dirlf single blks */
-#define        XFS_DIR3_LEAFN_MAGIC    0x3dff  /* magic number: v2 dirlf multi blks */
-
-struct xfs_da3_blkinfo {
-       /*
-        * the node link manipulation code relies on the fact that the first
-        * element of this structure is the struct xfs_da_blkinfo so it can
-        * ignore the differences in the rest of the structures.
-        */
-       struct xfs_da_blkinfo   hdr;
-       __be32                  crc;    /* CRC of block */
-       __be64                  blkno;  /* first block of the buffer */
-       __be64                  lsn;    /* sequence number of last write */
-       uuid_t                  uuid;   /* filesystem we belong to */
-       __be64                  owner;  /* inode that owns the block */
-};
-
-/*
- * This is the structure of the root and intermediate nodes in the Btree.
- * The leaf nodes are defined above.
- *
- * Entries are not packed.
- *
- * Since we have duplicate keys, use a binary search but always follow
- * all match in the block, not just the first match found.
- */
-#define        XFS_DA_NODE_MAXDEPTH    5       /* max depth of Btree */
-
-typedef struct xfs_da_node_hdr {
-       struct xfs_da_blkinfo   info;   /* block type, links, etc. */
-       __be16                  __count; /* count of active entries */
-       __be16                  __level; /* level above leaves (leaf == 0) */
-} xfs_da_node_hdr_t;
-
-struct xfs_da3_node_hdr {
-       struct xfs_da3_blkinfo  info;   /* block type, links, etc. */
-       __be16                  __count; /* count of active entries */
-       __be16                  __level; /* level above leaves (leaf == 0) */
-       __be32                  __pad32;
-};
-
-#define XFS_DA3_NODE_CRC_OFF   (offsetof(struct xfs_da3_node_hdr, info.crc))
-
-typedef struct xfs_da_node_entry {
-       __be32  hashval;        /* hash value for this descendant */
-       __be32  before;         /* Btree block before this key */
-} xfs_da_node_entry_t;
-
-typedef struct xfs_da_intnode {
-       struct xfs_da_node_hdr  hdr;
-       struct xfs_da_node_entry __btree[];
-} xfs_da_intnode_t;
-
-struct xfs_da3_intnode {
-       struct xfs_da3_node_hdr hdr;
-       struct xfs_da_node_entry __btree[];
-};
-
-/*
- * In-core version of the node header to abstract the differences in the v2 and
- * v3 disk format of the headers. Callers need to convert to/from disk format as
- * appropriate.
- */
-struct xfs_da3_icnode_hdr {
-       __uint32_t      forw;
-       __uint32_t      back;
-       __uint16_t      magic;
-       __uint16_t      count;
-       __uint16_t      level;
-};
-
-extern void xfs_da3_node_hdr_from_disk(struct xfs_da3_icnode_hdr *to,
-                                      struct xfs_da_intnode *from);
-extern void xfs_da3_node_hdr_to_disk(struct xfs_da_intnode *to,
-                                    struct xfs_da3_icnode_hdr *from);
-
-static inline int
-__xfs_da3_node_hdr_size(bool v3)
-{
-       if (v3)
-               return sizeof(struct xfs_da3_node_hdr);
-       return sizeof(struct xfs_da_node_hdr);
-}
-static inline int
-xfs_da3_node_hdr_size(struct xfs_da_intnode *dap)
-{
-       bool    v3 = dap->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC);
-
-       return __xfs_da3_node_hdr_size(v3);
-}
-
-static inline struct xfs_da_node_entry *
-xfs_da3_node_tree_p(struct xfs_da_intnode *dap)
-{
-       if (dap->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
-               struct xfs_da3_intnode *dap3 = (struct xfs_da3_intnode *)dap;
-               return dap3->__btree;
-       }
-       return dap->__btree;
-}
-
-extern void xfs_da3_intnode_from_disk(struct xfs_da3_icnode_hdr *to,
-                                     struct xfs_da_intnode *from);
-extern void xfs_da3_intnode_to_disk(struct xfs_da_intnode *to,
-                                   struct xfs_da3_icnode_hdr *from);
-
-#define        XFS_LBSIZE(mp)  (mp)->m_sb.sb_blocksize
-
 /*========================================================================
  * Btree searching and modification structure definitions.
  *========================================================================*/
@@ -309,8 +169,6 @@ int xfs_da3_node_read(struct xfs_trans *tp, struct xfs_inode *dp,
                         xfs_dablk_t bno, xfs_daddr_t mappedbno,
                         struct xfs_buf **bpp, int which_fork);
 
-extern const struct xfs_buf_ops xfs_da3_node_buf_ops;
-
 /*
  * Utility routines.
  */
similarity index 65%
rename from fs/xfs/xfs_dir2_format.h
rename to fs/xfs/xfs_da_format.h
index 9cf67381adf6769d0b6fc984b6fe5ecaf8cef5b4..89a1a219c8ff57cc35a6e8c10f2309fff8ee183f 100644 (file)
  * along with this program; if not, write the Free Software Foundation,
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
-#ifndef __XFS_DIR2_FORMAT_H__
-#define __XFS_DIR2_FORMAT_H__
+#ifndef __XFS_DA_FORMAT_H__
+#define __XFS_DA_FORMAT_H__
+
+/*========================================================================
+ * Directory Structure when greater than XFS_LBSIZE(mp) bytes.
+ *========================================================================*/
+
+/*
+ * This structure is common to both leaf nodes and non-leaf nodes in the Btree.
+ *
+ * It is used to manage a doubly linked list of all blocks at the same
+ * level in the Btree, and to identify which type of block this is.
+ */
+#define XFS_DA_NODE_MAGIC      0xfebe  /* magic number: non-leaf blocks */
+#define XFS_ATTR_LEAF_MAGIC    0xfbee  /* magic number: attribute leaf blks */
+#define        XFS_DIR2_LEAF1_MAGIC    0xd2f1  /* magic number: v2 dirlf single blks */
+#define        XFS_DIR2_LEAFN_MAGIC    0xd2ff  /* magic number: v2 dirlf multi blks */
+
+typedef struct xfs_da_blkinfo {
+       __be32          forw;                   /* previous block in list */
+       __be32          back;                   /* following block in list */
+       __be16          magic;                  /* validity check on block */
+       __be16          pad;                    /* unused */
+} xfs_da_blkinfo_t;
+
+/*
+ * CRC enabled directory structure types
+ *
+ * The headers change size for the additional verification information, but
+ * otherwise the tree layouts and contents are unchanged. Hence the da btree
+ * code can use the struct xfs_da_blkinfo for manipulating the tree links and
+ * magic numbers without modification for both v2 and v3 nodes.
+ */
+#define XFS_DA3_NODE_MAGIC     0x3ebe  /* magic number: non-leaf blocks */
+#define XFS_ATTR3_LEAF_MAGIC   0x3bee  /* magic number: attribute leaf blks */
+#define        XFS_DIR3_LEAF1_MAGIC    0x3df1  /* magic number: v2 dirlf single blks */
+#define        XFS_DIR3_LEAFN_MAGIC    0x3dff  /* magic number: v2 dirlf multi blks */
+
+struct xfs_da3_blkinfo {
+       /*
+        * the node link manipulation code relies on the fact that the first
+        * element of this structure is the struct xfs_da_blkinfo so it can
+        * ignore the differences in the rest of the structures.
+        */
+       struct xfs_da_blkinfo   hdr;
+       __be32                  crc;    /* CRC of block */
+       __be64                  blkno;  /* first block of the buffer */
+       __be64                  lsn;    /* sequence number of last write */
+       uuid_t                  uuid;   /* filesystem we belong to */
+       __be64                  owner;  /* inode that owns the block */
+};
+
+/*
+ * This is the structure of the root and intermediate nodes in the Btree.
+ * The leaf nodes are defined above.
+ *
+ * Entries are not packed.
+ *
+ * Since we have duplicate keys, use a binary search but always follow
+ * all match in the block, not just the first match found.
+ */
+#define        XFS_DA_NODE_MAXDEPTH    5       /* max depth of Btree */
+
+typedef struct xfs_da_node_hdr {
+       struct xfs_da_blkinfo   info;   /* block type, links, etc. */
+       __be16                  __count; /* count of active entries */
+       __be16                  __level; /* level above leaves (leaf == 0) */
+} xfs_da_node_hdr_t;
+
+struct xfs_da3_node_hdr {
+       struct xfs_da3_blkinfo  info;   /* block type, links, etc. */
+       __be16                  __count; /* count of active entries */
+       __be16                  __level; /* level above leaves (leaf == 0) */
+       __be32                  __pad32;
+};
+
+#define XFS_DA3_NODE_CRC_OFF   (offsetof(struct xfs_da3_node_hdr, info.crc))
+
+typedef struct xfs_da_node_entry {
+       __be32  hashval;        /* hash value for this descendant */
+       __be32  before;         /* Btree block before this key */
+} xfs_da_node_entry_t;
+
+typedef struct xfs_da_intnode {
+       struct xfs_da_node_hdr  hdr;
+       struct xfs_da_node_entry __btree[];
+} xfs_da_intnode_t;
+
+struct xfs_da3_intnode {
+       struct xfs_da3_node_hdr hdr;
+       struct xfs_da_node_entry __btree[];
+};
+
+/*
+ * In-core version of the node header to abstract the differences in the v2 and
+ * v3 disk format of the headers. Callers need to convert to/from disk format as
+ * appropriate.
+ */
+struct xfs_da3_icnode_hdr {
+       __uint32_t      forw;
+       __uint32_t      back;
+       __uint16_t      magic;
+       __uint16_t      count;
+       __uint16_t      level;
+};
+
+extern void xfs_da3_node_hdr_from_disk(struct xfs_da3_icnode_hdr *to,
+                                      struct xfs_da_intnode *from);
+extern void xfs_da3_node_hdr_to_disk(struct xfs_da_intnode *to,
+                                    struct xfs_da3_icnode_hdr *from);
+
+static inline int
+__xfs_da3_node_hdr_size(bool v3)
+{
+       if (v3)
+               return sizeof(struct xfs_da3_node_hdr);
+       return sizeof(struct xfs_da_node_hdr);
+}
+static inline int
+xfs_da3_node_hdr_size(struct xfs_da_intnode *dap)
+{
+       bool    v3 = dap->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC);
+
+       return __xfs_da3_node_hdr_size(v3);
+}
+
+static inline struct xfs_da_node_entry *
+xfs_da3_node_tree_p(struct xfs_da_intnode *dap)
+{
+       if (dap->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
+               struct xfs_da3_intnode *dap3 = (struct xfs_da3_intnode *)dap;
+               return dap3->__btree;
+       }
+       return dap->__btree;
+}
+
+extern void xfs_da3_intnode_from_disk(struct xfs_da3_icnode_hdr *to,
+                                     struct xfs_da_intnode *from);
+extern void xfs_da3_intnode_to_disk(struct xfs_da_intnode *to,
+                                   struct xfs_da3_icnode_hdr *from);
+
+#define        XFS_LBSIZE(mp)  (mp)->m_sb.sb_blocksize
 
 /*
  * Directory version 2.
@@ -961,4 +1101,262 @@ xfs_dir2_block_leaf_p(struct xfs_dir2_block_tail *btp)
        return ((struct xfs_dir2_leaf_entry *)btp) - be32_to_cpu(btp->count);
 }
 
-#endif /* __XFS_DIR2_FORMAT_H__ */
+
+/*
+ * Attribute storage layout
+ *
+ * Attribute lists are structured around Btrees where all the data
+ * elements are in the leaf nodes.  Attribute names are hashed into an int,
+ * then that int is used as the index into the Btree.  Since the hashval
+ * of an attribute name may not be unique, we may have duplicate keys.  The
+ * internal links in the Btree are logical block offsets into the file.
+ *
+ *========================================================================
+ * Attribute structure when equal to XFS_LBSIZE(mp) bytes.
+ *========================================================================
+ *
+ * Struct leaf_entry's are packed from the top.  Name/values grow from the
+ * bottom but are not packed.  The freemap contains run-length-encoded entries
+ * for the free bytes after the leaf_entry's, but only the N largest such,
+ * smaller runs are dropped.  When the freemap doesn't show enough space
+ * for an allocation, we compact the name/value area and try again.  If we
+ * still don't have enough space, then we have to split the block.  The
+ * name/value structs (both local and remote versions) must be 32bit aligned.
+ *
+ * Since we have duplicate hash keys, for each key that matches, compare
+ * the actual name string.  The root and intermediate node search always
+ * takes the first-in-the-block key match found, so we should only have
+ * to work "forw"ard.  If none matches, continue with the "forw"ard leaf
+ * nodes until the hash key changes or the attribute name is found.
+ *
+ * We store the fact that an attribute is a ROOT/USER/SECURE attribute in
+ * the leaf_entry.  The namespaces are independent only because we also look
+ * at the namespace bit when we are looking for a matching attribute name.
+ *
+ * We also store an "incomplete" bit in the leaf_entry.  It shows that an
+ * attribute is in the middle of being created and should not be shown to
+ * the user if we crash during the time that the bit is set.  We clear the
+ * bit when we have finished setting up the attribute.  We do this because
+ * we cannot create some large attributes inside a single transaction, and we
+ * need some indication that we weren't finished if we crash in the middle.
+ */
+#define XFS_ATTR_LEAF_MAPSIZE  3       /* how many freespace slots */
+
+typedef struct xfs_attr_leaf_map {     /* RLE map of free bytes */
+       __be16  base;                     /* base of free region */
+       __be16  size;                     /* length of free region */
+} xfs_attr_leaf_map_t;
+
+typedef struct xfs_attr_leaf_hdr {     /* constant-structure header block */
+       xfs_da_blkinfo_t info;          /* block type, links, etc. */
+       __be16  count;                  /* count of active leaf_entry's */
+       __be16  usedbytes;              /* num bytes of names/values stored */
+       __be16  firstused;              /* first used byte in name area */
+       __u8    holes;                  /* != 0 if blk needs compaction */
+       __u8    pad1;
+       xfs_attr_leaf_map_t freemap[XFS_ATTR_LEAF_MAPSIZE];
+                                       /* N largest free regions */
+} xfs_attr_leaf_hdr_t;
+
+typedef struct xfs_attr_leaf_entry {   /* sorted on key, not name */
+       __be32  hashval;                /* hash value of name */
+       __be16  nameidx;                /* index into buffer of name/value */
+       __u8    flags;                  /* LOCAL/ROOT/SECURE/INCOMPLETE flag */
+       __u8    pad2;                   /* unused pad byte */
+} xfs_attr_leaf_entry_t;
+
+typedef struct xfs_attr_leaf_name_local {
+       __be16  valuelen;               /* number of bytes in value */
+       __u8    namelen;                /* length of name bytes */
+       __u8    nameval[1];             /* name/value bytes */
+} xfs_attr_leaf_name_local_t;
+
+typedef struct xfs_attr_leaf_name_remote {
+       __be32  valueblk;               /* block number of value bytes */
+       __be32  valuelen;               /* number of bytes in value */
+       __u8    namelen;                /* length of name bytes */
+       __u8    name[1];                /* name bytes */
+} xfs_attr_leaf_name_remote_t;
+
+typedef struct xfs_attr_leafblock {
+       xfs_attr_leaf_hdr_t     hdr;    /* constant-structure header block */
+       xfs_attr_leaf_entry_t   entries[1];     /* sorted on key, not name */
+       xfs_attr_leaf_name_local_t namelist;    /* grows from bottom of buf */
+       xfs_attr_leaf_name_remote_t valuelist;  /* grows from bottom of buf */
+} xfs_attr_leafblock_t;
+
+/*
+ * CRC enabled leaf structures. Called "version 3" structures to match the
+ * version number of the directory and dablk structures for this feature, and
+ * attr2 is already taken by the variable inode attribute fork size feature.
+ */
+struct xfs_attr3_leaf_hdr {
+       struct xfs_da3_blkinfo  info;
+       __be16                  count;
+       __be16                  usedbytes;
+       __be16                  firstused;
+       __u8                    holes;
+       __u8                    pad1;
+       struct xfs_attr_leaf_map freemap[XFS_ATTR_LEAF_MAPSIZE];
+       __be32                  pad2;           /* 64 bit alignment */
+};
+
+#define XFS_ATTR3_LEAF_CRC_OFF (offsetof(struct xfs_attr3_leaf_hdr, info.crc))
+
+struct xfs_attr3_leafblock {
+       struct xfs_attr3_leaf_hdr       hdr;
+       struct xfs_attr_leaf_entry      entries[1];
+
+       /*
+        * The rest of the block contains the following structures after the
+        * leaf entries, growing from the bottom up. The variables are never
+        * referenced, the locations accessed purely from helper functions.
+        *
+        * struct xfs_attr_leaf_name_local
+        * struct xfs_attr_leaf_name_remote
+        */
+};
+
+/*
+ * incore, neutral version of the attribute leaf header
+ */
+struct xfs_attr3_icleaf_hdr {
+       __uint32_t      forw;
+       __uint32_t      back;
+       __uint16_t      magic;
+       __uint16_t      count;
+       __uint16_t      usedbytes;
+       __uint16_t      firstused;
+       __u8            holes;
+       struct {
+               __uint16_t      base;
+               __uint16_t      size;
+       } freemap[XFS_ATTR_LEAF_MAPSIZE];
+};
+
+/*
+ * Flags used in the leaf_entry[i].flags field.
+ * NOTE: the INCOMPLETE bit must not collide with the flags bits specified
+ * on the system call, they are "or"ed together for various operations.
+ */
+#define        XFS_ATTR_LOCAL_BIT      0       /* attr is stored locally */
+#define        XFS_ATTR_ROOT_BIT       1       /* limit access to trusted attrs */
+#define        XFS_ATTR_SECURE_BIT     2       /* limit access to secure attrs */
+#define        XFS_ATTR_INCOMPLETE_BIT 7       /* attr in middle of create/delete */
+#define XFS_ATTR_LOCAL         (1 << XFS_ATTR_LOCAL_BIT)
+#define XFS_ATTR_ROOT          (1 << XFS_ATTR_ROOT_BIT)
+#define XFS_ATTR_SECURE                (1 << XFS_ATTR_SECURE_BIT)
+#define XFS_ATTR_INCOMPLETE    (1 << XFS_ATTR_INCOMPLETE_BIT)
+
+/*
+ * Conversion macros for converting namespace bits from argument flags
+ * to ondisk flags.
+ */
+#define XFS_ATTR_NSP_ARGS_MASK         (ATTR_ROOT | ATTR_SECURE)
+#define XFS_ATTR_NSP_ONDISK_MASK       (XFS_ATTR_ROOT | XFS_ATTR_SECURE)
+#define XFS_ATTR_NSP_ONDISK(flags)     ((flags) & XFS_ATTR_NSP_ONDISK_MASK)
+#define XFS_ATTR_NSP_ARGS(flags)       ((flags) & XFS_ATTR_NSP_ARGS_MASK)
+#define XFS_ATTR_NSP_ARGS_TO_ONDISK(x) (((x) & ATTR_ROOT ? XFS_ATTR_ROOT : 0) |\
+                                        ((x) & ATTR_SECURE ? XFS_ATTR_SECURE : 0))
+#define XFS_ATTR_NSP_ONDISK_TO_ARGS(x) (((x) & XFS_ATTR_ROOT ? ATTR_ROOT : 0) |\
+                                        ((x) & XFS_ATTR_SECURE ? ATTR_SECURE : 0))
+
+/*
+ * Alignment for namelist and valuelist entries (since they are mixed
+ * there can be only one alignment value)
+ */
+#define        XFS_ATTR_LEAF_NAME_ALIGN        ((uint)sizeof(xfs_dablk_t))
+
+static inline int
+xfs_attr3_leaf_hdr_size(struct xfs_attr_leafblock *leafp)
+{
+       if (leafp->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC))
+               return sizeof(struct xfs_attr3_leaf_hdr);
+       return sizeof(struct xfs_attr_leaf_hdr);
+}
+
+static inline struct xfs_attr_leaf_entry *
+xfs_attr3_leaf_entryp(xfs_attr_leafblock_t *leafp)
+{
+       if (leafp->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC))
+               return &((struct xfs_attr3_leafblock *)leafp)->entries[0];
+       return &leafp->entries[0];
+}
+
+/*
+ * Cast typed pointers for "local" and "remote" name/value structs.
+ */
+static inline char *
+xfs_attr3_leaf_name(xfs_attr_leafblock_t *leafp, int idx)
+{
+       struct xfs_attr_leaf_entry *entries = xfs_attr3_leaf_entryp(leafp);
+
+       return &((char *)leafp)[be16_to_cpu(entries[idx].nameidx)];
+}
+
+static inline xfs_attr_leaf_name_remote_t *
+xfs_attr3_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx)
+{
+       return (xfs_attr_leaf_name_remote_t *)xfs_attr3_leaf_name(leafp, idx);
+}
+
+static inline xfs_attr_leaf_name_local_t *
+xfs_attr3_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx)
+{
+       return (xfs_attr_leaf_name_local_t *)xfs_attr3_leaf_name(leafp, idx);
+}
+
+/*
+ * Calculate total bytes used (including trailing pad for alignment) for
+ * a "local" name/value structure, a "remote" name/value structure, and
+ * a pointer which might be either.
+ */
+static inline int xfs_attr_leaf_entsize_remote(int nlen)
+{
+       return ((uint)sizeof(xfs_attr_leaf_name_remote_t) - 1 + (nlen) + \
+               XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
+}
+
+static inline int xfs_attr_leaf_entsize_local(int nlen, int vlen)
+{
+       return ((uint)sizeof(xfs_attr_leaf_name_local_t) - 1 + (nlen) + (vlen) +
+               XFS_ATTR_LEAF_NAME_ALIGN - 1) & ~(XFS_ATTR_LEAF_NAME_ALIGN - 1);
+}
+
+static inline int xfs_attr_leaf_entsize_local_max(int bsize)
+{
+       return (((bsize) >> 1) + ((bsize) >> 2));
+}
+
+
+
+/*
+ * Remote attribute block format definition
+ *
+ * There is one of these headers per filesystem block in a remote attribute.
+ * This is done to ensure there is a 1:1 mapping between the attribute value
+ * length and the number of blocks needed to store the attribute. This makes the
+ * verification of a buffer a little more complex, but greatly simplifies the
+ * allocation, reading and writing of these attributes as we don't have to guess
+ * the number of blocks needed to store the attribute data.
+ */
+#define XFS_ATTR3_RMT_MAGIC    0x5841524d      /* XARM */
+
+struct xfs_attr3_rmt_hdr {
+       __be32  rm_magic;
+       __be32  rm_offset;
+       __be32  rm_bytes;
+       __be32  rm_crc;
+       uuid_t  rm_uuid;
+       __be64  rm_owner;
+       __be64  rm_blkno;
+       __be64  rm_lsn;
+};
+
+#define XFS_ATTR3_RMT_CRC_OFF  offsetof(struct xfs_attr3_rmt_hdr, rm_crc)
+
+#define XFS_ATTR3_RMT_BUF_SPACE(mp, bufsize)   \
+       ((bufsize) - (xfs_sb_version_hascrc(&(mp)->m_sb) ? \
+                       sizeof(struct xfs_attr3_rmt_hdr) : 0))
+
+#endif /* __XFS_DA_FORMAT_H__ */
index edf203ab50afa734a74faaace8e626721e7fc1bb..38bf9324302c497aadf73d22524c1271b80f18a5 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
+#include "xfs_dinode.h"
 
 struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR };
 
index 12dad188939df2e29c4420574bd20bfb319aae35..9f3f83a5e2da9db832e2dc22b6e74a7eb8655350 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_buf_item.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
+#include "xfs_dinode.h"
 
 /*
  * Local function prototypes.
index 47e1326c169a08c71d8d21ac51e8d113444d3295..ccfeb4d8376a6aa91985ce30c19285bc221640f4 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_cksum.h"
 
index 1021c8356d0836318e300adefc4a35f170d120c3..51fdc11a1e2c1e4455ac8aa5c8649e21bf4076eb 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_bmap.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_cksum.h"
 
index 4c3dba7ffb7439d250b0af44e4de237a9359a795..b8381646b8af6d9c6715781a84f704b558d8c25c 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_bmap.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_cksum.h"
 
@@ -1101,7 +1100,7 @@ xfs_dir2_leafn_rebalance(
                state->inleaf = 1;
                blk2->index = 0;
                xfs_alert(args->dp->i_mount,
-       "%s: picked the wrong leaf? reverting original leaf: blk1->index %d\n",
+       "%s: picked the wrong leaf? reverting original leaf: blk1->index %d",
                        __func__, blk1->index);
        }
 }
index 8f84153e98a8c0f52e27aa9a0a1352779e29e61e..45c9ce8cdb28110a4e1436d5b48ad7d30807c650 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_bmap.h"
+#include "xfs_trans.h"
+#include "xfs_dinode.h"
 
 /*
  * Directory file type support functions
index 3ef6d402084ccf9dabc622ee5db22b41170e3e54..8811ee5eaec65e7762d261781a6f0c2f7303c64b 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_error.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_trace.h"
+#include "xfs_dinode.h"
 
 /*
  * Prototypes for internal functions.
index 45560ee1a4ba8b1ccfdc36f9616cc5355e03558d..8367d6dc18c9df7f51cd565e11395cba24929a53 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
 #include "xfs_quota.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_btree.h"
 #include "xfs_inode.h"
+#include "xfs_btree.h"
+#include "xfs_alloc_btree.h"
 #include "xfs_alloc.h"
 #include "xfs_error.h"
 #include "xfs_extent_busy.h"
 #include "xfs_discard.h"
 #include "xfs_trace.h"
+#include "xfs_log.h"
 
 STATIC int
 xfs_trim_extents(
index 1ee776d477c3d1b240378993f946e9716f62c83d..6b1e695caf0ebf8bf256150d2efc9483667b837d 100644 (file)
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
-#include "xfs_rtalloc.h"
+#include "xfs_alloc.h"
+#include "xfs_quota.h"
 #include "xfs_error.h"
-#include "xfs_itable.h"
-#include "xfs_attr.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_space.h"
 #include "xfs_trans_priv.h"
 #include "xfs_qm.h"
 #include "xfs_cksum.h"
 #include "xfs_trace.h"
+#include "xfs_log.h"
+#include "xfs_bmap_btree.h"
 
 /*
  * Lock order:
@@ -292,118 +292,6 @@ xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
        dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
 }
 
-STATIC bool
-xfs_dquot_buf_verify_crc(
-       struct xfs_mount        *mp,
-       struct xfs_buf          *bp)
-{
-       struct xfs_dqblk        *d = (struct xfs_dqblk *)bp->b_addr;
-       int                     ndquots;
-       int                     i;
-
-       if (!xfs_sb_version_hascrc(&mp->m_sb))
-               return true;
-
-       /*
-        * if we are in log recovery, the quota subsystem has not been
-        * initialised so we have no quotainfo structure. In that case, we need
-        * to manually calculate the number of dquots in the buffer.
-        */
-       if (mp->m_quotainfo)
-               ndquots = mp->m_quotainfo->qi_dqperchunk;
-       else
-               ndquots = xfs_qm_calc_dquots_per_chunk(mp,
-                                       XFS_BB_TO_FSB(mp, bp->b_length));
-
-       for (i = 0; i < ndquots; i++, d++) {
-               if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
-                                XFS_DQUOT_CRC_OFF))
-                       return false;
-               if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid))
-                       return false;
-       }
-       return true;
-}
-
-STATIC bool
-xfs_dquot_buf_verify(
-       struct xfs_mount        *mp,
-       struct xfs_buf          *bp)
-{
-       struct xfs_dqblk        *d = (struct xfs_dqblk *)bp->b_addr;
-       xfs_dqid_t              id = 0;
-       int                     ndquots;
-       int                     i;
-
-       /*
-        * if we are in log recovery, the quota subsystem has not been
-        * initialised so we have no quotainfo structure. In that case, we need
-        * to manually calculate the number of dquots in the buffer.
-        */
-       if (mp->m_quotainfo)
-               ndquots = mp->m_quotainfo->qi_dqperchunk;
-       else
-               ndquots = xfs_qm_calc_dquots_per_chunk(mp, bp->b_length);
-
-       /*
-        * On the first read of the buffer, verify that each dquot is valid.
-        * We don't know what the id of the dquot is supposed to be, just that
-        * they should be increasing monotonically within the buffer. If the
-        * first id is corrupt, then it will fail on the second dquot in the
-        * buffer so corruptions could point to the wrong dquot in this case.
-        */
-       for (i = 0; i < ndquots; i++) {
-               struct xfs_disk_dquot   *ddq;
-               int                     error;
-
-               ddq = &d[i].dd_diskdq;
-
-               if (i == 0)
-                       id = be32_to_cpu(ddq->d_id);
-
-               error = xfs_qm_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
-                                      "xfs_dquot_buf_verify");
-               if (error)
-                       return false;
-       }
-       return true;
-}
-
-static void
-xfs_dquot_buf_read_verify(
-       struct xfs_buf  *bp)
-{
-       struct xfs_mount        *mp = bp->b_target->bt_mount;
-
-       if (!xfs_dquot_buf_verify_crc(mp, bp) || !xfs_dquot_buf_verify(mp, bp)) {
-               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
-               xfs_buf_ioerror(bp, EFSCORRUPTED);
-       }
-}
-
-/*
- * we don't calculate the CRC here as that is done when the dquot is flushed to
- * the buffer after the update is done. This ensures that the dquot in the
- * buffer always has an up-to-date CRC value.
- */
-void
-xfs_dquot_buf_write_verify(
-       struct xfs_buf  *bp)
-{
-       struct xfs_mount        *mp = bp->b_target->bt_mount;
-
-       if (!xfs_dquot_buf_verify(mp, bp)) {
-               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
-               xfs_buf_ioerror(bp, EFSCORRUPTED);
-               return;
-       }
-}
-
-const struct xfs_buf_ops xfs_dquot_buf_ops = {
-       .verify_read = xfs_dquot_buf_read_verify,
-       .verify_write = xfs_dquot_buf_write_verify,
-};
-
 /*
  * Allocate a block and fill it with dquots.
  * This is called when the bmapi finds a hole.
@@ -514,6 +402,7 @@ xfs_qm_dqalloc(
 
        return (error);
 }
+
 STATIC int
 xfs_qm_dqrepair(
        struct xfs_mount        *mp,
@@ -547,7 +436,7 @@ xfs_qm_dqrepair(
        /* Do the actual repair of dquots in this buffer */
        for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
                ddq = &d[i].dd_diskdq;
-               error = xfs_qm_dqcheck(mp, ddq, firstid + i,
+               error = xfs_dqcheck(mp, ddq, firstid + i,
                                       dqp->dq_flags & XFS_DQ_ALLTYPES,
                                       XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
                if (error) {
@@ -1133,7 +1022,7 @@ xfs_qm_dqflush(
        /*
         * A simple sanity check in case we got a corrupted dquot..
         */
-       error = xfs_qm_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
+       error = xfs_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
                           XFS_QMOPT_DOWARN, "dqflush (incore copy)");
        if (error) {
                xfs_buf_relse(bp);
index 55abbca2883d84231e7ca61a722f2b394b41376d..d22ed0053c32ea0dd687f7e2ab439b8181d4cbc8 100644 (file)
@@ -172,6 +172,4 @@ static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp)
        return dqp;
 }
 
-extern const struct xfs_buf_ops xfs_dquot_buf_ops;
-
 #endif /* __XFS_DQUOT_H__ */
diff --git a/fs/xfs/xfs_dquot_buf.c b/fs/xfs/xfs_dquot_buf.c
new file mode 100644 (file)
index 0000000..aaaf41b
--- /dev/null
@@ -0,0 +1,287 @@
+/*
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
+ * Copyright (c) 2013 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_quota.h"
+#include "xfs_trans.h"
+#include "xfs_qm.h"
+#include "xfs_error.h"
+#include "xfs_cksum.h"
+#include "xfs_trace.h"
+
+int
+xfs_calc_dquots_per_chunk(
+       struct xfs_mount        *mp,
+       unsigned int            nbblks) /* basic block units */
+{
+       unsigned int    ndquots;
+
+       ASSERT(nbblks > 0);
+       ndquots = BBTOB(nbblks);
+       do_div(ndquots, sizeof(xfs_dqblk_t));
+
+       return ndquots;
+}
+
+/*
+ * Do some primitive error checking on ondisk dquot data structures.
+ */
+int
+xfs_dqcheck(
+       struct xfs_mount *mp,
+       xfs_disk_dquot_t *ddq,
+       xfs_dqid_t       id,
+       uint             type,    /* used only when IO_dorepair is true */
+       uint             flags,
+       char             *str)
+{
+       xfs_dqblk_t      *d = (xfs_dqblk_t *)ddq;
+       int             errs = 0;
+
+       /*
+        * We can encounter an uninitialized dquot buffer for 2 reasons:
+        * 1. If we crash while deleting the quotainode(s), and those blks got
+        *    used for user data. This is because we take the path of regular
+        *    file deletion; however, the size field of quotainodes is never
+        *    updated, so all the tricks that we play in itruncate_finish
+        *    don't quite matter.
+        *
+        * 2. We don't play the quota buffers when there's a quotaoff logitem.
+        *    But the allocation will be replayed so we'll end up with an
+        *    uninitialized quota block.
+        *
+        * This is all fine; things are still consistent, and we haven't lost
+        * any quota information. Just don't complain about bad dquot blks.
+        */
+       if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
+               if (flags & XFS_QMOPT_DOWARN)
+                       xfs_alert(mp,
+                       "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
+                       str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
+               errs++;
+       }
+       if (ddq->d_version != XFS_DQUOT_VERSION) {
+               if (flags & XFS_QMOPT_DOWARN)
+                       xfs_alert(mp,
+                       "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
+                       str, id, ddq->d_version, XFS_DQUOT_VERSION);
+               errs++;
+       }
+
+       if (ddq->d_flags != XFS_DQ_USER &&
+           ddq->d_flags != XFS_DQ_PROJ &&
+           ddq->d_flags != XFS_DQ_GROUP) {
+               if (flags & XFS_QMOPT_DOWARN)
+                       xfs_alert(mp,
+                       "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
+                       str, id, ddq->d_flags);
+               errs++;
+       }
+
+       if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
+               if (flags & XFS_QMOPT_DOWARN)
+                       xfs_alert(mp,
+                       "%s : ondisk-dquot 0x%p, ID mismatch: "
+                       "0x%x expected, found id 0x%x",
+                       str, ddq, id, be32_to_cpu(ddq->d_id));
+               errs++;
+       }
+
+       if (!errs && ddq->d_id) {
+               if (ddq->d_blk_softlimit &&
+                   be64_to_cpu(ddq->d_bcount) >
+                               be64_to_cpu(ddq->d_blk_softlimit)) {
+                       if (!ddq->d_btimer) {
+                               if (flags & XFS_QMOPT_DOWARN)
+                                       xfs_alert(mp,
+                       "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
+                                       str, (int)be32_to_cpu(ddq->d_id), ddq);
+                               errs++;
+                       }
+               }
+               if (ddq->d_ino_softlimit &&
+                   be64_to_cpu(ddq->d_icount) >
+                               be64_to_cpu(ddq->d_ino_softlimit)) {
+                       if (!ddq->d_itimer) {
+                               if (flags & XFS_QMOPT_DOWARN)
+                                       xfs_alert(mp,
+                       "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
+                                       str, (int)be32_to_cpu(ddq->d_id), ddq);
+                               errs++;
+                       }
+               }
+               if (ddq->d_rtb_softlimit &&
+                   be64_to_cpu(ddq->d_rtbcount) >
+                               be64_to_cpu(ddq->d_rtb_softlimit)) {
+                       if (!ddq->d_rtbtimer) {
+                               if (flags & XFS_QMOPT_DOWARN)
+                                       xfs_alert(mp,
+                       "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
+                                       str, (int)be32_to_cpu(ddq->d_id), ddq);
+                               errs++;
+                       }
+               }
+       }
+
+       if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
+               return errs;
+
+       if (flags & XFS_QMOPT_DOWARN)
+               xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
+
+       /*
+        * Typically, a repair is only requested by quotacheck.
+        */
+       ASSERT(id != -1);
+       ASSERT(flags & XFS_QMOPT_DQREPAIR);
+       memset(d, 0, sizeof(xfs_dqblk_t));
+
+       d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
+       d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
+       d->dd_diskdq.d_flags = type;
+       d->dd_diskdq.d_id = cpu_to_be32(id);
+
+       if (xfs_sb_version_hascrc(&mp->m_sb)) {
+               uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
+               xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
+                                XFS_DQUOT_CRC_OFF);
+       }
+
+       return errs;
+}
+
+STATIC bool
+xfs_dquot_buf_verify_crc(
+       struct xfs_mount        *mp,
+       struct xfs_buf          *bp)
+{
+       struct xfs_dqblk        *d = (struct xfs_dqblk *)bp->b_addr;
+       int                     ndquots;
+       int                     i;
+
+       if (!xfs_sb_version_hascrc(&mp->m_sb))
+               return true;
+
+       /*
+        * if we are in log recovery, the quota subsystem has not been
+        * initialised so we have no quotainfo structure. In that case, we need
+        * to manually calculate the number of dquots in the buffer.
+        */
+       if (mp->m_quotainfo)
+               ndquots = mp->m_quotainfo->qi_dqperchunk;
+       else
+               ndquots = xfs_calc_dquots_per_chunk(mp,
+                                       XFS_BB_TO_FSB(mp, bp->b_length));
+
+       for (i = 0; i < ndquots; i++, d++) {
+               if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
+                                XFS_DQUOT_CRC_OFF))
+                       return false;
+               if (!uuid_equal(&d->dd_uuid, &mp->m_sb.sb_uuid))
+                       return false;
+       }
+       return true;
+}
+
+STATIC bool
+xfs_dquot_buf_verify(
+       struct xfs_mount        *mp,
+       struct xfs_buf          *bp)
+{
+       struct xfs_dqblk        *d = (struct xfs_dqblk *)bp->b_addr;
+       xfs_dqid_t              id = 0;
+       int                     ndquots;
+       int                     i;
+
+       /*
+        * if we are in log recovery, the quota subsystem has not been
+        * initialised so we have no quotainfo structure. In that case, we need
+        * to manually calculate the number of dquots in the buffer.
+        */
+       if (mp->m_quotainfo)
+               ndquots = mp->m_quotainfo->qi_dqperchunk;
+       else
+               ndquots = xfs_calc_dquots_per_chunk(mp, bp->b_length);
+
+       /*
+        * On the first read of the buffer, verify that each dquot is valid.
+        * We don't know what the id of the dquot is supposed to be, just that
+        * they should be increasing monotonically within the buffer. If the
+        * first id is corrupt, then it will fail on the second dquot in the
+        * buffer so corruptions could point to the wrong dquot in this case.
+        */
+       for (i = 0; i < ndquots; i++) {
+               struct xfs_disk_dquot   *ddq;
+               int                     error;
+
+               ddq = &d[i].dd_diskdq;
+
+               if (i == 0)
+                       id = be32_to_cpu(ddq->d_id);
+
+               error = xfs_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
+                                      "xfs_dquot_buf_verify");
+               if (error)
+                       return false;
+       }
+       return true;
+}
+
+static void
+xfs_dquot_buf_read_verify(
+       struct xfs_buf  *bp)
+{
+       struct xfs_mount        *mp = bp->b_target->bt_mount;
+
+       if (!xfs_dquot_buf_verify_crc(mp, bp) || !xfs_dquot_buf_verify(mp, bp)) {
+               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
+               xfs_buf_ioerror(bp, EFSCORRUPTED);
+       }
+}
+
+/*
+ * we don't calculate the CRC here as that is done when the dquot is flushed to
+ * the buffer after the update is done. This ensures that the dquot in the
+ * buffer always has an up-to-date CRC value.
+ */
+void
+xfs_dquot_buf_write_verify(
+       struct xfs_buf  *bp)
+{
+       struct xfs_mount        *mp = bp->b_target->bt_mount;
+
+       if (!xfs_dquot_buf_verify(mp, bp)) {
+               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
+               xfs_buf_ioerror(bp, EFSCORRUPTED);
+               return;
+       }
+}
+
+const struct xfs_buf_ops xfs_dquot_buf_ops = {
+       .verify_read = xfs_dquot_buf_read_verify,
+       .verify_write = xfs_dquot_buf_write_verify,
+};
+
index e838d84b4e85697917c8623418c06ae28140a8b7..92e5f62eefc6612fb5890631edd42629af1e768b 100644 (file)
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
-#include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
+#include "xfs_quota.h"
 #include "xfs_error.h"
-#include "xfs_itable.h"
-#include "xfs_attr.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_priv.h"
 #include "xfs_qm.h"
+#include "xfs_log.h"
 
 static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip)
 {
index 1123d93ff79546efe3a9d962460ab1e44e2e1bac..9995b807d627eb564da0747124359caa6141ee57 100644 (file)
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
+#include "xfs_format.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
 #include "xfs_error.h"
 
 #ifdef DEBUG
@@ -159,7 +156,7 @@ xfs_error_report(
 {
        if (level <= xfs_error_level) {
                xfs_alert_tag(mp, XFS_PTAG_ERROR_REPORT,
-               "Internal error %s at line %d of file %s.  Caller 0x%p\n",
+               "Internal error %s at line %d of file %s.  Caller 0x%p",
                            tag, linenum, filename, ra);
 
                xfs_stack_trace();
index 066df425c14ffca5b4dacb20f7b3c6fcdd139acb..1399e187d425dc7af0f8b5062afaf312fe5a0927 100644 (file)
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
+#include "xfs_da_format.h"
 #include "xfs_dir2.h"
 #include "xfs_export.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
+#include "xfs_log.h"
 
 /*
  * Note that we only accept fileids which are long enough rather than allow
index e43708e2f0806d4daae241ee32e18abcac1a017f..fd22f69049d49861ff1f853ec11a2d23e3eef03f 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_alloc.h"
-#include "xfs_inode.h"
 #include "xfs_extent_busy.h"
 #include "xfs_trace.h"
+#include "xfs_trans.h"
+#include "xfs_log.h"
 
 void
 xfs_extent_busy_insert(
index 985412d65ba5119519300a5b414d7d2ea8ba1707..bfff284d2dcce434a16c72df62713098a812dfd4 100644 (file)
 #ifndef __XFS_EXTENT_BUSY_H__
 #define        __XFS_EXTENT_BUSY_H__
 
+struct xfs_mount;
+struct xfs_trans;
+struct xfs_alloc_arg;
+
 /*
  * Busy block/extent entry.  Indexed by a rbtree in perag to mark blocks that
  * have been freed but whose transactions aren't committed to disk yet.
index dc53e8febbbeaa54812b4e72dc25938718da69c1..3680d04f973fa1c9de4390079a5c5ba3d8e69eae 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_buf_item.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
+#include "xfs_buf_item.h"
 #include "xfs_extfree_item.h"
 
 
index 4c749ab543d0de17646993a282f925ebd0314ccf..e6035bd58294d1a03c50933ec113c46b0465d4aa 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_log.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_trans.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
 #include "xfs_error.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_dir2_priv.h"
 #include "xfs_ioctl.h"
 #include "xfs_trace.h"
+#include "xfs_log.h"
+#include "xfs_dinode.h"
 
 #include <linux/aio.h>
 #include <linux/dcache.h>
@@ -227,10 +229,9 @@ xfs_file_fsync(
 }
 
 STATIC ssize_t
-xfs_file_aio_read(
+xfs_file_read_iter(
        struct kiocb            *iocb,
-       const struct iovec      *iovp,
-       unsigned long           nr_segs,
+       struct iov_iter         *iter,
        loff_t                  pos)
 {
        struct file             *file = iocb->ki_filp;
@@ -251,9 +252,7 @@ xfs_file_aio_read(
        if (file->f_mode & FMODE_NOCMTIME)
                ioflags |= IO_INVIS;
 
-       ret = generic_segment_checks(iovp, &nr_segs, &size, VERIFY_WRITE);
-       if (ret < 0)
-               return ret;
+       size = iov_iter_count(iter);
 
        if (unlikely(ioflags & IO_ISDIRECT)) {
                xfs_buftarg_t   *target =
@@ -306,7 +305,7 @@ xfs_file_aio_read(
 
        trace_xfs_file_read(ip, size, pos, ioflags);
 
-       ret = generic_file_aio_read(iocb, iovp, nr_segs, pos);
+       ret = generic_file_read_iter(iocb, iter, pos);
        if (ret > 0)
                XFS_STATS_ADD(xs_read_bytes, ret);
 
@@ -622,10 +621,9 @@ restart:
 STATIC ssize_t
 xfs_file_dio_aio_write(
        struct kiocb            *iocb,
-       const struct iovec      *iovp,
-       unsigned long           nr_segs,
+       struct iov_iter         *iter,
        loff_t                  pos,
-       size_t                  ocount)
+       size_t                  count)
 {
        struct file             *file = iocb->ki_filp;
        struct address_space    *mapping = file->f_mapping;
@@ -633,7 +631,6 @@ xfs_file_dio_aio_write(
        struct xfs_inode        *ip = XFS_I(inode);
        struct xfs_mount        *mp = ip->i_mount;
        ssize_t                 ret = 0;
-       size_t                  count = ocount;
        int                     unaligned_io = 0;
        int                     iolock;
        struct xfs_buftarg      *target = XFS_IS_REALTIME_INODE(ip) ?
@@ -693,8 +690,8 @@ xfs_file_dio_aio_write(
        }
 
        trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
-       ret = generic_file_direct_write(iocb, iovp,
-                       &nr_segs, pos, &iocb->ki_pos, count, ocount);
+       ret = generic_file_direct_write_iter(iocb, iter,
+                       pos, &iocb->ki_pos, count);
 
 out:
        xfs_rw_iunlock(ip, iolock);
@@ -707,10 +704,9 @@ out:
 STATIC ssize_t
 xfs_file_buffered_aio_write(
        struct kiocb            *iocb,
-       const struct iovec      *iovp,
-       unsigned long           nr_segs,
+       struct iov_iter         *iter,
        loff_t                  pos,
-       size_t                  ocount)
+       size_t                  count)
 {
        struct file             *file = iocb->ki_filp;
        struct address_space    *mapping = file->f_mapping;
@@ -719,7 +715,6 @@ xfs_file_buffered_aio_write(
        ssize_t                 ret;
        int                     enospc = 0;
        int                     iolock = XFS_IOLOCK_EXCL;
-       size_t                  count = ocount;
 
        xfs_rw_ilock(ip, iolock);
 
@@ -732,7 +727,7 @@ xfs_file_buffered_aio_write(
 
 write_retry:
        trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
-       ret = generic_file_buffered_write(iocb, iovp, nr_segs,
+       ret = generic_file_buffered_write_iter(iocb, iter,
                        pos, &iocb->ki_pos, count, 0);
 
        /*
@@ -753,10 +748,9 @@ out:
 }
 
 STATIC ssize_t
-xfs_file_aio_write(
+xfs_file_write_iter(
        struct kiocb            *iocb,
-       const struct iovec      *iovp,
-       unsigned long           nr_segs,
+       struct iov_iter         *iter,
        loff_t                  pos)
 {
        struct file             *file = iocb->ki_filp;
@@ -764,17 +758,15 @@ xfs_file_aio_write(
        struct inode            *inode = mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
        ssize_t                 ret;
-       size_t                  ocount = 0;
+       size_t                  count = 0;
 
        XFS_STATS_INC(xs_write_calls);
 
        BUG_ON(iocb->ki_pos != pos);
 
-       ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ);
-       if (ret)
-               return ret;
+       count = iov_iter_count(iter);
 
-       if (ocount == 0)
+       if (count == 0)
                return 0;
 
        if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
@@ -783,10 +775,9 @@ xfs_file_aio_write(
        }
 
        if (unlikely(file->f_flags & O_DIRECT))
-               ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount);
+               ret = xfs_file_dio_aio_write(iocb, iter, pos, count);
        else
-               ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos,
-                                                 ocount);
+               ret = xfs_file_buffered_aio_write(iocb, iter, pos, count);
 
        if (ret > 0) {
                ssize_t err;
@@ -805,44 +796,64 @@ out:
 
 STATIC long
 xfs_file_fallocate(
-       struct file     *file,
-       int             mode,
-       loff_t          offset,
-       loff_t          len)
+       struct file             *file,
+       int                     mode,
+       loff_t                  offset,
+       loff_t                  len)
 {
-       struct inode    *inode = file_inode(file);
-       long            error;
-       loff_t          new_size = 0;
-       xfs_flock64_t   bf;
-       xfs_inode_t     *ip = XFS_I(inode);
-       int             cmd = XFS_IOC_RESVSP;
-       int             attr_flags = XFS_ATTR_NOLOCK;
+       struct inode            *inode = file_inode(file);
+       struct xfs_inode        *ip = XFS_I(inode);
+       struct xfs_trans        *tp;
+       long                    error;
+       loff_t                  new_size = 0;
 
+       if (!S_ISREG(inode->i_mode))
+               return -EINVAL;
        if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
                return -EOPNOTSUPP;
 
-       bf.l_whence = 0;
-       bf.l_start = offset;
-       bf.l_len = len;
-
        xfs_ilock(ip, XFS_IOLOCK_EXCL);
+       if (mode & FALLOC_FL_PUNCH_HOLE) {
+               error = xfs_free_file_space(ip, offset, len);
+               if (error)
+                       goto out_unlock;
+       } else {
+               if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+                   offset + len > i_size_read(inode)) {
+                       new_size = offset + len;
+                       error = -inode_newsize_ok(inode, new_size);
+                       if (error)
+                               goto out_unlock;
+               }
 
-       if (mode & FALLOC_FL_PUNCH_HOLE)
-               cmd = XFS_IOC_UNRESVSP;
-
-       /* check the new inode size is valid before allocating */
-       if (!(mode & FALLOC_FL_KEEP_SIZE) &&
-           offset + len > i_size_read(inode)) {
-               new_size = offset + len;
-               error = inode_newsize_ok(inode, new_size);
+               error = xfs_alloc_file_space(ip, offset, len,
+                                            XFS_BMAPI_PREALLOC);
                if (error)
                        goto out_unlock;
        }
 
-       if (file->f_flags & O_DSYNC)
-               attr_flags |= XFS_ATTR_SYNC;
+       tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
+       error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               goto out_unlock;
+       }
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+       ip->i_d.di_mode &= ~S_ISUID;
+       if (ip->i_d.di_mode & S_IXGRP)
+               ip->i_d.di_mode &= ~S_ISGID;
+
+       if (!(mode & FALLOC_FL_PUNCH_HOLE))
+               ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
 
-       error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags);
+       xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+       if (file->f_flags & O_DSYNC)
+               xfs_trans_set_sync(tp);
+       error = xfs_trans_commit(tp, 0);
        if (error)
                goto out_unlock;
 
@@ -852,12 +863,12 @@ xfs_file_fallocate(
 
                iattr.ia_valid = ATTR_SIZE;
                iattr.ia_size = new_size;
-               error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK);
+               error = xfs_setattr_size(ip, &iattr);
        }
 
 out_unlock:
        xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-       return error;
+       return -error;
 }
 
 
@@ -1411,8 +1422,8 @@ const struct file_operations xfs_file_operations = {
        .llseek         = xfs_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = xfs_file_aio_read,
-       .aio_write      = xfs_file_aio_write,
+       .read_iter      = xfs_file_read_iter,
+       .write_iter     = xfs_file_write_iter,
        .splice_read    = xfs_file_splice_read,
        .splice_write   = xfs_file_splice_write,
        .unlocked_ioctl = xfs_file_ioctl,
index ce78e654d37b73693aa4c637e021dda9154ad5d6..12b6e7701985378e56f619dfd58f79be0d45c94c 100644 (file)
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
-#include "xfs_log.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_inum.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_ag.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_mount.h"
+#include "xfs_inum.h"
+#include "xfs_inode.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
 #include "xfs_alloc.h"
 #include "xfs_mru_cache.h"
+#include "xfs_dinode.h"
 #include "xfs_filestream.h"
 #include "xfs_trace.h"
 
index 35c08ff54ca079dcf6c718d6b2d6b75bf24f1721..b6ab5a3cfa125d2204d19760dccac917d0ad957f 100644 (file)
@@ -156,14 +156,259 @@ struct xfs_dsymlink_hdr {
        ((bufsize) - (xfs_sb_version_hascrc(&(mp)->m_sb) ? \
                        sizeof(struct xfs_dsymlink_hdr) : 0))
 
-int xfs_symlink_blocks(struct xfs_mount *mp, int pathlen);
-int xfs_symlink_hdr_set(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset,
-                       uint32_t size, struct xfs_buf *bp);
-bool xfs_symlink_hdr_ok(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset,
-                       uint32_t size, struct xfs_buf *bp);
-void xfs_symlink_local_to_remote(struct xfs_trans *tp, struct xfs_buf *bp,
-                                struct xfs_inode *ip, struct xfs_ifork *ifp);
-
-extern const struct xfs_buf_ops xfs_symlink_buf_ops;
+
+/*
+ * Allocation Btree format definitions
+ *
+ * There are two on-disk btrees, one sorted by blockno and one sorted
+ * by blockcount and blockno.  All blocks look the same to make the code
+ * simpler; if we have time later, we'll make the optimizations.
+ */
+#define        XFS_ABTB_MAGIC          0x41425442      /* 'ABTB' for bno tree */
+#define        XFS_ABTB_CRC_MAGIC      0x41423342      /* 'AB3B' */
+#define        XFS_ABTC_MAGIC          0x41425443      /* 'ABTC' for cnt tree */
+#define        XFS_ABTC_CRC_MAGIC      0x41423343      /* 'AB3C' */
+
+/*
+ * Data record/key structure
+ */
+typedef struct xfs_alloc_rec {
+       __be32          ar_startblock;  /* starting block number */
+       __be32          ar_blockcount;  /* count of free blocks */
+} xfs_alloc_rec_t, xfs_alloc_key_t;
+
+typedef struct xfs_alloc_rec_incore {
+       xfs_agblock_t   ar_startblock;  /* starting block number */
+       xfs_extlen_t    ar_blockcount;  /* count of free blocks */
+} xfs_alloc_rec_incore_t;
+
+/* btree pointer type */
+typedef __be32 xfs_alloc_ptr_t;
+
+/*
+ * Block numbers in the AG:
+ * SB is sector 0, AGF is sector 1, AGI is sector 2, AGFL is sector 3.
+ */
+#define        XFS_BNO_BLOCK(mp)       ((xfs_agblock_t)(XFS_AGFL_BLOCK(mp) + 1))
+#define        XFS_CNT_BLOCK(mp)       ((xfs_agblock_t)(XFS_BNO_BLOCK(mp) + 1))
+
+
+/*
+ * Inode Allocation Btree format definitions
+ *
+ * There is a btree for the inode map per allocation group.
+ */
+#define        XFS_IBT_MAGIC           0x49414254      /* 'IABT' */
+#define        XFS_IBT_CRC_MAGIC       0x49414233      /* 'IAB3' */
+
+typedef        __uint64_t      xfs_inofree_t;
+#define        XFS_INODES_PER_CHUNK            (NBBY * sizeof(xfs_inofree_t))
+#define        XFS_INODES_PER_CHUNK_LOG        (XFS_NBBYLOG + 3)
+#define        XFS_INOBT_ALL_FREE              ((xfs_inofree_t)-1)
+#define        XFS_INOBT_MASK(i)               ((xfs_inofree_t)1 << (i))
+
+static inline xfs_inofree_t xfs_inobt_maskn(int i, int n)
+{
+       return ((n >= XFS_INODES_PER_CHUNK ? 0 : XFS_INOBT_MASK(n)) - 1) << i;
+}
+
+/*
+ * Data record structure
+ */
+typedef struct xfs_inobt_rec {
+       __be32          ir_startino;    /* starting inode number */
+       __be32          ir_freecount;   /* count of free inodes (set bits) */
+       __be64          ir_free;        /* free inode mask */
+} xfs_inobt_rec_t;
+
+typedef struct xfs_inobt_rec_incore {
+       xfs_agino_t     ir_startino;    /* starting inode number */
+       __int32_t       ir_freecount;   /* count of free inodes (set bits) */
+       xfs_inofree_t   ir_free;        /* free inode mask */
+} xfs_inobt_rec_incore_t;
+
+
+/*
+ * Key structure
+ */
+typedef struct xfs_inobt_key {
+       __be32          ir_startino;    /* starting inode number */
+} xfs_inobt_key_t;
+
+/* btree pointer type */
+typedef __be32 xfs_inobt_ptr_t;
+
+/*
+ * block numbers in the AG.
+ */
+#define        XFS_IBT_BLOCK(mp)               ((xfs_agblock_t)(XFS_CNT_BLOCK(mp) + 1))
+#define        XFS_PREALLOC_BLOCKS(mp)         ((xfs_agblock_t)(XFS_IBT_BLOCK(mp) + 1))
+
+
+
+/*
+ * BMAP Btree format definitions
+ *
+ * This includes both the root block definition that sits inside an inode fork
+ * and the record/pointer formats for the leaf/node in the blocks.
+ */
+#define XFS_BMAP_MAGIC         0x424d4150      /* 'BMAP' */
+#define XFS_BMAP_CRC_MAGIC     0x424d4133      /* 'BMA3' */
+
+/*
+ * Bmap root header, on-disk form only.
+ */
+typedef struct xfs_bmdr_block {
+       __be16          bb_level;       /* 0 is a leaf */
+       __be16          bb_numrecs;     /* current # of data records */
+} xfs_bmdr_block_t;
+
+/*
+ * Bmap btree record and extent descriptor.
+ *  l0:63 is an extent flag (value 1 indicates non-normal).
+ *  l0:9-62 are startoff.
+ *  l0:0-8 and l1:21-63 are startblock.
+ *  l1:0-20 are blockcount.
+ */
+#define BMBT_EXNTFLAG_BITLEN   1
+#define BMBT_STARTOFF_BITLEN   54
+#define BMBT_STARTBLOCK_BITLEN 52
+#define BMBT_BLOCKCOUNT_BITLEN 21
+
+typedef struct xfs_bmbt_rec {
+       __be64                  l0, l1;
+} xfs_bmbt_rec_t;
+
+typedef __uint64_t     xfs_bmbt_rec_base_t;    /* use this for casts */
+typedef xfs_bmbt_rec_t xfs_bmdr_rec_t;
+
+typedef struct xfs_bmbt_rec_host {
+       __uint64_t              l0, l1;
+} xfs_bmbt_rec_host_t;
+
+/*
+ * Values and macros for delayed-allocation startblock fields.
+ */
+#define STARTBLOCKVALBITS      17
+#define STARTBLOCKMASKBITS     (15 + XFS_BIG_BLKNOS * 20)
+#define DSTARTBLOCKMASKBITS    (15 + 20)
+#define STARTBLOCKMASK         \
+       (((((xfs_fsblock_t)1) << STARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
+#define DSTARTBLOCKMASK                \
+       (((((xfs_dfsbno_t)1) << DSTARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
+
+static inline int isnullstartblock(xfs_fsblock_t x)
+{
+       return ((x) & STARTBLOCKMASK) == STARTBLOCKMASK;
+}
+
+static inline int isnulldstartblock(xfs_dfsbno_t x)
+{
+       return ((x) & DSTARTBLOCKMASK) == DSTARTBLOCKMASK;
+}
+
+static inline xfs_fsblock_t nullstartblock(int k)
+{
+       ASSERT(k < (1 << STARTBLOCKVALBITS));
+       return STARTBLOCKMASK | (k);
+}
+
+static inline xfs_filblks_t startblockval(xfs_fsblock_t x)
+{
+       return (xfs_filblks_t)((x) & ~STARTBLOCKMASK);
+}
+
+/*
+ * Possible extent formats.
+ */
+typedef enum {
+       XFS_EXTFMT_NOSTATE = 0,
+       XFS_EXTFMT_HASSTATE
+} xfs_exntfmt_t;
+
+/*
+ * Possible extent states.
+ */
+typedef enum {
+       XFS_EXT_NORM, XFS_EXT_UNWRITTEN,
+       XFS_EXT_DMAPI_OFFLINE, XFS_EXT_INVALID
+} xfs_exntst_t;
+
+/*
+ * Incore version of above.
+ */
+typedef struct xfs_bmbt_irec
+{
+       xfs_fileoff_t   br_startoff;    /* starting file offset */
+       xfs_fsblock_t   br_startblock;  /* starting block number */
+       xfs_filblks_t   br_blockcount;  /* number of blocks */
+       xfs_exntst_t    br_state;       /* extent state */
+} xfs_bmbt_irec_t;
+
+/*
+ * Key structure for non-leaf levels of the tree.
+ */
+typedef struct xfs_bmbt_key {
+       __be64          br_startoff;    /* starting file offset */
+} xfs_bmbt_key_t, xfs_bmdr_key_t;
+
+/* btree pointer type */
+typedef __be64 xfs_bmbt_ptr_t, xfs_bmdr_ptr_t;
+
+
+/*
+ * Generic Btree block format definitions
+ *
+ * This is a combination of the actual format used on disk for short and long
+ * format btrees.  The first three fields are shared by both format, but the
+ * pointers are different and should be used with care.
+ *
+ * To get the size of the actual short or long form headers please use the size
+ * macros below.  Never use sizeof(xfs_btree_block).
+ *
+ * The blkno, crc, lsn, owner and uuid fields are only available in filesystems
+ * with the crc feature bit, and all accesses to them must be conditional on
+ * that flag.
+ */
+struct xfs_btree_block {
+       __be32          bb_magic;       /* magic number for block type */
+       __be16          bb_level;       /* 0 is a leaf */
+       __be16          bb_numrecs;     /* current # of data records */
+       union {
+               struct {
+                       __be32          bb_leftsib;
+                       __be32          bb_rightsib;
+
+                       __be64          bb_blkno;
+                       __be64          bb_lsn;
+                       uuid_t          bb_uuid;
+                       __be32          bb_owner;
+                       __le32          bb_crc;
+               } s;                    /* short form pointers */
+               struct  {
+                       __be64          bb_leftsib;
+                       __be64          bb_rightsib;
+
+                       __be64          bb_blkno;
+                       __be64          bb_lsn;
+                       uuid_t          bb_uuid;
+                       __be64          bb_owner;
+                       __le32          bb_crc;
+                       __be32          bb_pad; /* padding for alignment */
+               } l;                    /* long form pointers */
+       } bb_u;                         /* rest */
+};
+
+#define XFS_BTREE_SBLOCK_LEN   16      /* size of a short form block */
+#define XFS_BTREE_LBLOCK_LEN   24      /* size of a long form block */
+
+/* sizes of CRC enabled btree blocks */
+#define XFS_BTREE_SBLOCK_CRC_LEN       (XFS_BTREE_SBLOCK_LEN + 40)
+#define XFS_BTREE_LBLOCK_CRC_LEN       (XFS_BTREE_LBLOCK_LEN + 48)
+
+#define XFS_BTREE_SBLOCK_CRC_OFF \
+       offsetof(struct xfs_btree_block, bb_u.s.bb_crc)
+#define XFS_BTREE_LBLOCK_CRC_OFF \
+       offsetof(struct xfs_btree_block, bb_u.l.bb_crc)
 
 #endif /* __XFS_FORMAT_H__ */
index 18272c766a508ab53deb5465448ca26bf02110d6..c5fc116dfaa307b156e0b652bfffd4f80c173b89 100644 (file)
@@ -233,11 +233,11 @@ typedef struct xfs_fsop_resblks {
 #define XFS_FSOP_GEOM_FLAGS_LOGV2      0x0100  /* log format version 2 */
 #define XFS_FSOP_GEOM_FLAGS_SECTOR     0x0200  /* sector sizes >1BB    */
 #define XFS_FSOP_GEOM_FLAGS_ATTR2      0x0400  /* inline attributes rework */
-#define XFS_FSOP_GEOM_FLAGS_PROJID32   0x0800  /* 32-bit project IDs   */
+#define XFS_FSOP_GEOM_FLAGS_PROJID32   0x0800  /* 32-bit project IDs   */
 #define XFS_FSOP_GEOM_FLAGS_DIRV2CI    0x1000  /* ASCII only CI names  */
 #define XFS_FSOP_GEOM_FLAGS_LAZYSB     0x4000  /* lazy superblock counters */
 #define XFS_FSOP_GEOM_FLAGS_V5SB       0x8000  /* version 5 superblock */
-
+#define XFS_FSOP_GEOM_FLAGS_FTYPE      0x10000 /* inode directory types */
 
 /*
  * Minimum and maximum sizes need for growth checks.
index e64ee5288b86be2d0c0267b383d3f6f9297e60a1..a6e54b3319bd0f5deb573623f332486590fbe165 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
-#include "xfs_btree.h"
 #include "xfs_error.h"
+#include "xfs_btree.h"
+#include "xfs_alloc_btree.h"
 #include "xfs_alloc.h"
 #include "xfs_ialloc.h"
 #include "xfs_fsops.h"
 #include "xfs_itable.h"
 #include "xfs_trans_space.h"
 #include "xfs_rtalloc.h"
-#include "xfs_filestream.h"
 #include "xfs_trace.h"
+#include "xfs_log.h"
+#include "xfs_dinode.h"
+#include "xfs_filestream.h"
 
 /*
  * File system operations
@@ -101,7 +102,9 @@ xfs_fs_geometry(
                        (xfs_sb_version_hasprojid32bit(&mp->m_sb) ?
                                XFS_FSOP_GEOM_FLAGS_PROJID32 : 0) |
                        (xfs_sb_version_hascrc(&mp->m_sb) ?
-                               XFS_FSOP_GEOM_FLAGS_V5SB : 0);
+                               XFS_FSOP_GEOM_FLAGS_V5SB : 0) |
+                       (xfs_sb_version_hasftype(&mp->m_sb) ?
+                               XFS_FSOP_GEOM_FLAGS_FTYPE : 0);
                geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
                                mp->m_sb.sb_logsectsize : BBSIZE;
                geo->rtsectsize = mp->m_sb.sb_blocksize;
@@ -153,7 +156,7 @@ xfs_growfs_data_private(
        xfs_buf_t               *bp;
        int                     bucket;
        int                     dpct;
-       int                     error;
+       int                     error, saved_error = 0;
        xfs_agnumber_t          nagcount;
        xfs_agnumber_t          nagimax = 0;
        xfs_rfsblock_t          nb, nb_mod;
@@ -496,29 +499,33 @@ xfs_growfs_data_private(
                                error = ENOMEM;
                }
 
+               /*
+                * If we get an error reading or writing alternate superblocks,
+                * continue.  xfs_repair chooses the "best" superblock based
+                * on most matches; if we break early, we'll leave more
+                * superblocks un-updated than updated, and xfs_repair may
+                * pick them over the properly-updated primary.
+                */
                if (error) {
                        xfs_warn(mp,
                "error %d reading secondary superblock for ag %d",
                                error, agno);
-                       break;
+                       saved_error = error;
+                       continue;
                }
                xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, XFS_SB_ALL_BITS);
 
-               /*
-                * If we get an error writing out the alternate superblocks,
-                * just issue a warning and continue.  The real work is
-                * already done and committed.
-                */
                error = xfs_bwrite(bp);
                xfs_buf_relse(bp);
                if (error) {
                        xfs_warn(mp,
                "write error %d updating secondary superblock for ag %d",
                                error, agno);
-                       break; /* no point in continuing */
+                       saved_error = error;
+                       continue;
                }
        }
-       return error;
+       return saved_error ? saved_error : error;
 
  error0:
        xfs_trans_cancel(tp, XFS_TRANS_ABORT);
index ccf2fb1439629fae273a239625f97f6879192878..14d732f61a410317442004f234a2e642399dd62b 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
 #include "xfs_ialloc.h"
+#include "xfs_ialloc_btree.h"
 #include "xfs_alloc.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
 #include "xfs_bmap.h"
 #include "xfs_cksum.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_icreate_item.h"
 #include "xfs_icache.h"
+#include "xfs_dinode.h"
 
 
 /*
index 68c07320f096e53d31f3050850414e1038c79c4a..a8f76a5ff4184b316c53b78cb22445006926e90f 100644 (file)
@@ -23,6 +23,7 @@ struct xfs_dinode;
 struct xfs_imap;
 struct xfs_mount;
 struct xfs_trans;
+struct xfs_btree_cur;
 
 /*
  * Allocation parameters for inode allocation.
@@ -42,7 +43,7 @@ struct xfs_trans;
 static inline struct xfs_dinode *
 xfs_make_iptr(struct xfs_mount *mp, struct xfs_buf *b, int o)
 {
-       return (xfs_dinode_t *)
+       return (struct xfs_dinode *)
                (xfs_buf_offset(b, o << (mp)->m_sb.sb_inodelog));
 }
 
@@ -158,6 +159,4 @@ int xfs_ialloc_inode_init(struct xfs_mount *mp, struct xfs_trans *tp,
                          xfs_agnumber_t agno, xfs_agblock_t agbno,
                          xfs_agblock_t length, unsigned int gen);
 
-extern const struct xfs_buf_ops xfs_agi_buf_ops;
-
 #endif /* __XFS_IALLOC_H__ */
index 5448eb6b8c12ad1acdf9d621750ec39e6c280a80..1fa142dc86cbd6397dc52e0b7bf9191d12f74eeb 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
 #include "xfs_ialloc.h"
+#include "xfs_ialloc_btree.h"
 #include "xfs_alloc.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
+#include "xfs_trans.h"
 
 
 STATIC int
index 3ac36b7642e9a09960ead79d38583b08e1be1213..f38b22011c4e4604a5e9e74ec9f4afe344dccfdb 100644 (file)
@@ -26,55 +26,6 @@ struct xfs_buf;
 struct xfs_btree_cur;
 struct xfs_mount;
 
-/*
- * There is a btree for the inode map per allocation group.
- */
-#define        XFS_IBT_MAGIC           0x49414254      /* 'IABT' */
-#define        XFS_IBT_CRC_MAGIC       0x49414233      /* 'IAB3' */
-
-typedef        __uint64_t      xfs_inofree_t;
-#define        XFS_INODES_PER_CHUNK            (NBBY * sizeof(xfs_inofree_t))
-#define        XFS_INODES_PER_CHUNK_LOG        (XFS_NBBYLOG + 3)
-#define        XFS_INOBT_ALL_FREE              ((xfs_inofree_t)-1)
-#define        XFS_INOBT_MASK(i)               ((xfs_inofree_t)1 << (i))
-
-static inline xfs_inofree_t xfs_inobt_maskn(int i, int n)
-{
-       return ((n >= XFS_INODES_PER_CHUNK ? 0 : XFS_INOBT_MASK(n)) - 1) << i;
-}
-
-/*
- * Data record structure
- */
-typedef struct xfs_inobt_rec {
-       __be32          ir_startino;    /* starting inode number */
-       __be32          ir_freecount;   /* count of free inodes (set bits) */
-       __be64          ir_free;        /* free inode mask */
-} xfs_inobt_rec_t;
-
-typedef struct xfs_inobt_rec_incore {
-       xfs_agino_t     ir_startino;    /* starting inode number */
-       __int32_t       ir_freecount;   /* count of free inodes (set bits) */
-       xfs_inofree_t   ir_free;        /* free inode mask */
-} xfs_inobt_rec_incore_t;
-
-
-/*
- * Key structure
- */
-typedef struct xfs_inobt_key {
-       __be32          ir_startino;    /* starting inode number */
-} xfs_inobt_key_t;
-
-/* btree pointer type */
-typedef __be32 xfs_inobt_ptr_t;
-
-/*
- * block numbers in the AG.
- */
-#define        XFS_IBT_BLOCK(mp)               ((xfs_agblock_t)(XFS_CNT_BLOCK(mp) + 1))
-#define        XFS_PREALLOC_BLOCKS(mp)         ((xfs_agblock_t)(XFS_IBT_BLOCK(mp) + 1))
-
 /*
  * Btree block header size depends on a superblock flag.
  */
@@ -110,6 +61,4 @@ extern struct xfs_btree_cur *xfs_inobt_init_cursor(struct xfs_mount *,
                struct xfs_trans *, struct xfs_buf *, xfs_agnumber_t);
 extern int xfs_inobt_maxrecs(struct xfs_mount *, int, int);
 
-extern const struct xfs_buf_ops xfs_inobt_buf_ops;
-
 #endif /* __XFS_IALLOC_BTREE_H__ */
index 474807a401c864e7681d3f8d0f111358c45536fc..98d35244eecc936bdb0a9702ae70a9de1d980de8 100644 (file)
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_log_priv.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
-#include "xfs_dinode.h"
 #include "xfs_error.h"
-#include "xfs_filestream.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
 #include "xfs_inode_item.h"
 #include "xfs_quota.h"
 #include "xfs_trace.h"
-#include "xfs_fsops.h"
 #include "xfs_icache.h"
 #include "xfs_bmap_util.h"
 
@@ -500,11 +495,6 @@ xfs_inode_ag_walk_grab(
        if (!igrab(inode))
                return ENOENT;
 
-       if (is_bad_inode(inode)) {
-               IRELE(ip);
-               return ENOENT;
-       }
-
        /* inode is valid */
        return 0;
 
@@ -918,8 +908,6 @@ restart:
                xfs_iflock(ip);
        }
 
-       if (is_bad_inode(VFS_I(ip)))
-               goto reclaim;
        if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
                xfs_iunpin_wait(ip);
                xfs_iflush_abort(ip, false);
index 5a5a593994d4196d3b18c2e959df90dc28c7588f..d2eaccfa73f4b14c622e66b39787396433e87604 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
 #include "xfs_error.h"
 #include "xfs_icreate_item.h"
index e3d75385aa76a6e45b7711a65bb39c268c9f689b..326b94dbe159b99debd7b78dc4acca2136ab0aab 100644 (file)
 
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_space.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_da_format.h"
 #include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
 #include "xfs_attr_sf.h"
 #include "xfs_attr.h"
-#include "xfs_dinode.h"
-#include "xfs_inode.h"
+#include "xfs_trans_space.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_inode_item.h"
-#include "xfs_btree.h"
-#include "xfs_alloc.h"
 #include "xfs_ialloc.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
+#include "xfs_dinode.h"
 #include "xfs_filestream.h"
 #include "xfs_cksum.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
 #include "xfs_symlink.h"
+#include "xfs_trans_priv.h"
+#include "xfs_log.h"
+#include "xfs_bmap_btree.h"
 
 kmem_zone_t *xfs_inode_zone;
 
@@ -1662,6 +1661,126 @@ xfs_release(
        return 0;
 }
 
+/*
+ * xfs_inactive_truncate
+ *
+ * Called to perform a truncate when an inode becomes unlinked.
+ */
+STATIC int
+xfs_inactive_truncate(
+       struct xfs_inode *ip)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_trans        *tp;
+       int                     error;
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+       if (error) {
+               ASSERT(XFS_FORCED_SHUTDOWN(mp));
+               xfs_trans_cancel(tp, 0);
+               return error;
+       }
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, 0);
+
+       /*
+        * Log the inode size first to prevent stale data exposure in the event
+        * of a system crash before the truncate completes. See the related
+        * comment in xfs_setattr_size() for details.
+        */
+       ip->i_d.di_size = 0;
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+       error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
+       if (error)
+               goto error_trans_cancel;
+
+       ASSERT(ip->i_d.di_nextents == 0);
+
+       error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+       if (error)
+               goto error_unlock;
+
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+       return 0;
+
+error_trans_cancel:
+       xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+error_unlock:
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+       return error;
+}
+
+/*
+ * xfs_inactive_ifree()
+ *
+ * Perform the inode free when an inode is unlinked.
+ */
+STATIC int
+xfs_inactive_ifree(
+       struct xfs_inode *ip)
+{
+       xfs_bmap_free_t         free_list;
+       xfs_fsblock_t           first_block;
+       int                     committed;
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_trans        *tp;
+       int                     error;
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ifree, 0, 0);
+       if (error) {
+               ASSERT(XFS_FORCED_SHUTDOWN(mp));
+               xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES);
+               return error;
+       }
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, 0);
+
+       xfs_bmap_init(&free_list, &first_block);
+       error = xfs_ifree(tp, ip, &free_list);
+       if (error) {
+               /*
+                * If we fail to free the inode, shut down.  The cancel
+                * might do that, we need to make sure.  Otherwise the
+                * inode might be lost for a long time or forever.
+                */
+               if (!XFS_FORCED_SHUTDOWN(mp)) {
+                       xfs_notice(mp, "%s: xfs_ifree returned error %d",
+                               __func__, error);
+                       xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
+               }
+               xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
+               xfs_iunlock(ip, XFS_ILOCK_EXCL);
+               return error;
+       }
+
+       /*
+        * Credit the quota account(s). The inode is gone.
+        */
+       xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
+
+       /*
+        * Just ignore errors at this point.  There is nothing we can
+        * do except to try to keep going. Make sure it's not a silent
+        * error.
+        */
+       error = xfs_bmap_finish(&tp,  &free_list, &committed);
+       if (error)
+               xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
+                       __func__, error);
+       error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+       if (error)
+               xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
+                       __func__, error);
+
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+       return 0;
+}
+
 /*
  * xfs_inactive
  *
@@ -1670,16 +1789,11 @@ xfs_release(
  * now be truncated.  Also, we clear all of the read-ahead state
  * kept for the inode here since the file is now closed.
  */
-int
+void
 xfs_inactive(
        xfs_inode_t     *ip)
 {
-       xfs_bmap_free_t         free_list;
-       xfs_fsblock_t           first_block;
-       int                     committed;
-       struct xfs_trans        *tp;
        struct xfs_mount        *mp;
-       struct xfs_trans_res    *resp;
        int                     error;
        int                     truncate = 0;
 
@@ -1687,19 +1801,17 @@ xfs_inactive(
         * If the inode is already free, then there can be nothing
         * to clean up here.
         */
-       if (ip->i_d.di_mode == 0 || is_bad_inode(VFS_I(ip))) {
+       if (ip->i_d.di_mode == 0) {
                ASSERT(ip->i_df.if_real_bytes == 0);
                ASSERT(ip->i_df.if_broot_bytes == 0);
-               return VN_INACTIVE_CACHE;
+               return;
        }
 
        mp = ip->i_mount;
 
-       error = 0;
-
        /* If this is a read-only mount, don't do this (would generate I/O) */
        if (mp->m_flags & XFS_MOUNT_RDONLY)
-               goto out;
+               return;
 
        if (ip->i_d.di_nlink != 0) {
                /*
@@ -1707,12 +1819,10 @@ xfs_inactive(
                 * cache. Post-eof blocks must be freed, lest we end up with
                 * broken free space accounting.
                 */
-               if (xfs_can_free_eofblocks(ip, true)) {
-                       error = xfs_free_eofblocks(mp, ip, false);
-                       if (error)
-                               return VN_INACTIVE_CACHE;
-               }
-               goto out;
+               if (xfs_can_free_eofblocks(ip, true))
+                       xfs_free_eofblocks(mp, ip, false);
+
+               return;
        }
 
        if (S_ISREG(ip->i_d.di_mode) &&
@@ -1722,36 +1832,14 @@ xfs_inactive(
 
        error = xfs_qm_dqattach(ip, 0);
        if (error)
-               return VN_INACTIVE_CACHE;
-
-       tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
-       resp = (truncate || S_ISLNK(ip->i_d.di_mode)) ?
-               &M_RES(mp)->tr_itruncate : &M_RES(mp)->tr_ifree;
+               return;
 
-       error = xfs_trans_reserve(tp, resp, 0, 0);
-       if (error) {
-               ASSERT(XFS_FORCED_SHUTDOWN(mp));
-               xfs_trans_cancel(tp, 0);
-               return VN_INACTIVE_CACHE;
-       }
-
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-       xfs_trans_ijoin(tp, ip, 0);
-
-       if (S_ISLNK(ip->i_d.di_mode)) {
-               error = xfs_inactive_symlink(ip, &tp);
-               if (error)
-                       goto out_cancel;
-       } else if (truncate) {
-               ip->i_d.di_size = 0;
-               xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
-               error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
-               if (error)
-                       goto out_cancel;
-
-               ASSERT(ip->i_d.di_nextents == 0);
-       }
+       if (S_ISLNK(ip->i_d.di_mode))
+               error = xfs_inactive_symlink(ip);
+       else if (truncate)
+               error = xfs_inactive_truncate(ip);
+       if (error)
+               return;
 
        /*
         * If there are attributes associated with the file then blow them away
@@ -1762,25 +1850,9 @@ xfs_inactive(
        if (ip->i_d.di_anextents > 0) {
                ASSERT(ip->i_d.di_forkoff != 0);
 
-               error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-               if (error)
-                       goto out_unlock;
-
-               xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
                error = xfs_attr_inactive(ip);
                if (error)
-                       goto out;
-
-               tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
-               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ifree, 0, 0);
-               if (error) {
-                       xfs_trans_cancel(tp, 0);
-                       goto out;
-               }
-
-               xfs_ilock(ip, XFS_ILOCK_EXCL);
-               xfs_trans_ijoin(tp, ip, 0);
+                       return;
        }
 
        if (ip->i_afp)
@@ -1791,52 +1863,14 @@ xfs_inactive(
        /*
         * Free the inode.
         */
-       xfs_bmap_init(&free_list, &first_block);
-       error = xfs_ifree(tp, ip, &free_list);
-       if (error) {
-               /*
-                * If we fail to free the inode, shut down.  The cancel
-                * might do that, we need to make sure.  Otherwise the
-                * inode might be lost for a long time or forever.
-                */
-               if (!XFS_FORCED_SHUTDOWN(mp)) {
-                       xfs_notice(mp, "%s: xfs_ifree returned error %d",
-                               __func__, error);
-                       xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
-               }
-               xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
-       } else {
-               /*
-                * Credit the quota account(s). The inode is gone.
-                */
-               xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
-
-               /*
-                * Just ignore errors at this point.  There is nothing we can
-                * do except to try to keep going. Make sure it's not a silent
-                * error.
-                */
-               error = xfs_bmap_finish(&tp,  &free_list, &committed);
-               if (error)
-                       xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
-                               __func__, error);
-               error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-               if (error)
-                       xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
-                               __func__, error);
-       }
+       error = xfs_inactive_ifree(ip);
+       if (error)
+               return;
 
        /*
         * Release the dquots held by inode, if any.
         */
        xfs_qm_dqdetach(ip);
-out_unlock:
-       xfs_iunlock(ip, XFS_ILOCK_EXCL);
-out:
-       return VN_INACTIVE_CACHE;
-out_cancel:
-       xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
-       goto out_unlock;
 }
 
 /*
index 4a91358c1470b9ac029d451dd38c3fa77daf6fc0..66675877f38cd273d7b9fe51e10d93980b9f8422 100644 (file)
@@ -24,7 +24,6 @@
 /*
  * Kernel only inode definitions
  */
-
 struct xfs_dinode;
 struct xfs_inode;
 struct xfs_buf;
@@ -316,7 +315,7 @@ static inline int xfs_isiflocked(struct xfs_inode *ip)
 
 
 int            xfs_release(struct xfs_inode *ip);
-int            xfs_inactive(struct xfs_inode *ip);
+void           xfs_inactive(struct xfs_inode *ip);
 int            xfs_lookup(struct xfs_inode *dp, struct xfs_name *name,
                           struct xfs_inode **ipp, struct xfs_name *ci_name);
 int            xfs_create(struct xfs_inode *dp, struct xfs_name *name,
index 63382d37f5658c8ee774668df7a50a87eeec5834..4fc9f39dd89e7b8ed64e271ca6ced6bc43a191f6 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_error.h"
 #include "xfs_cksum.h"
 #include "xfs_icache.h"
+#include "xfs_trans.h"
 #include "xfs_ialloc.h"
+#include "xfs_dinode.h"
 
 /*
  * Check that none of the inode's in the buffer have a next
index abba0ae8cf2da2b4012445bc2cc5cbf63b8c9a66..9308c47f2a527dc08b75b66de5d064e0b13e0cfe 100644 (file)
@@ -47,7 +47,4 @@ void  xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
 #define        xfs_inobp_check(mp, bp)
 #endif /* DEBUG */
 
-extern const struct xfs_buf_ops xfs_inode_buf_ops;
-extern const struct xfs_buf_ops xfs_inode_buf_ra_ops;
-
 #endif /* __XFS_INODE_BUF_H__ */
index 02f1083955bb1dfa19c295adea18b663b25f93f7..22c9837c5d4beb680514213369aa0722d189503f 100644 (file)
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_attr_sf.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_buf_item.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
-#include "xfs_btree.h"
-#include "xfs_alloc.h"
-#include "xfs_ialloc.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_bmap.h"
 #include "xfs_error.h"
-#include "xfs_quota.h"
-#include "xfs_filestream.h"
-#include "xfs_cksum.h"
 #include "xfs_trace.h"
-#include "xfs_icache.h"
+#include "xfs_attr_sf.h"
+#include "xfs_dinode.h"
 
 kmem_zone_t *xfs_ifork_zone;
 
@@ -1359,7 +1349,7 @@ xfs_iext_remove_indirect(
 void
 xfs_iext_realloc_direct(
        xfs_ifork_t     *ifp,           /* inode fork pointer */
-       int             new_size)       /* new size of extents */
+       int             new_size)       /* new size of extents after adding */
 {
        int             rnew_size;      /* real new size of extents */
 
@@ -1397,13 +1387,8 @@ xfs_iext_realloc_direct(
                                rnew_size - ifp->if_real_bytes);
                }
        }
-       /*
-        * Switch from the inline extent buffer to a direct
-        * extent list. Be sure to include the inline extent
-        * bytes in new_size.
-        */
+       /* Switch from the inline extent buffer to a direct extent list */
        else {
-               new_size += ifp->if_bytes;
                if (!is_power_of_2(new_size)) {
                        rnew_size = roundup_pow_of_two(new_size);
                }
index 28661a0d90583bc2d20ab37089b79f921eaf135c..eb329a1ea8886a3d878489765bc5d13dbbf26068 100644 (file)
@@ -19,6 +19,7 @@
 #define        __XFS_INODE_FORK_H__
 
 struct xfs_inode_log_item;
+struct xfs_dinode;
 
 /*
  * The following xfs_ext_irec_t struct introduces a second (top) level
index 378081109844b09b2bbfd07dcb4214027fefe2c2..7c0d391f9a6e0bd3f4f6a72cc6f0e65323cc7183 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_trans_priv.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_inode_item.h"
 #include "xfs_error.h"
 #include "xfs_trace.h"
+#include "xfs_trans_priv.h"
+#include "xfs_dinode.h"
 
 
 kmem_zone_t    *xfs_ili_zone;          /* inode log item zone */
index 668e8f4ccf5e7201e8e6a360cd26668484f6d515..4d613401a5e056a08dd2f8b21b77833bfe14cbc6 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_ioctl.h"
+#include "xfs_alloc.h"
 #include "xfs_rtalloc.h"
 #include "xfs_itable.h"
 #include "xfs_error.h"
 #include "xfs_attr.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
-#include "xfs_buf_item.h"
 #include "xfs_fsops.h"
 #include "xfs_discard.h"
 #include "xfs_quota.h"
-#include "xfs_inode_item.h"
 #include "xfs_export.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
 #include "xfs_symlink.h"
+#include "xfs_dinode.h"
+#include "xfs_trans.h"
 
 #include <linux/capability.h>
 #include <linux/dcache.h>
@@ -641,7 +640,11 @@ xfs_ioc_space(
        unsigned int            cmd,
        xfs_flock64_t           *bf)
 {
-       int                     attr_flags = 0;
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_trans        *tp;
+       struct iattr            iattr;
+       bool                    setprealloc = false;
+       bool                    clrprealloc = false;
        int                     error;
 
        /*
@@ -661,19 +664,128 @@ xfs_ioc_space(
        if (!S_ISREG(inode->i_mode))
                return -XFS_ERROR(EINVAL);
 
-       if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
-               attr_flags |= XFS_ATTR_NONBLOCK;
+       error = mnt_want_write_file(filp);
+       if (error)
+               return error;
 
-       if (filp->f_flags & O_DSYNC)
-               attr_flags |= XFS_ATTR_SYNC;
+       xfs_ilock(ip, XFS_IOLOCK_EXCL);
+
+       switch (bf->l_whence) {
+       case 0: /*SEEK_SET*/
+               break;
+       case 1: /*SEEK_CUR*/
+               bf->l_start += filp->f_pos;
+               break;
+       case 2: /*SEEK_END*/
+               bf->l_start += XFS_ISIZE(ip);
+               break;
+       default:
+               error = XFS_ERROR(EINVAL);
+               goto out_unlock;
+       }
 
-       if (ioflags & IO_INVIS)
-               attr_flags |= XFS_ATTR_DMI;
+       /*
+        * length of <= 0 for resv/unresv/zero is invalid.  length for
+        * alloc/free is ignored completely and we have no idea what userspace
+        * might have set it to, so set it to zero to allow range
+        * checks to pass.
+        */
+       switch (cmd) {
+       case XFS_IOC_ZERO_RANGE:
+       case XFS_IOC_RESVSP:
+       case XFS_IOC_RESVSP64:
+       case XFS_IOC_UNRESVSP:
+       case XFS_IOC_UNRESVSP64:
+               if (bf->l_len <= 0) {
+                       error = XFS_ERROR(EINVAL);
+                       goto out_unlock;
+               }
+               break;
+       default:
+               bf->l_len = 0;
+               break;
+       }
+
+       if (bf->l_start < 0 ||
+           bf->l_start > mp->m_super->s_maxbytes ||
+           bf->l_start + bf->l_len < 0 ||
+           bf->l_start + bf->l_len >= mp->m_super->s_maxbytes) {
+               error = XFS_ERROR(EINVAL);
+               goto out_unlock;
+       }
+
+       switch (cmd) {
+       case XFS_IOC_ZERO_RANGE:
+               error = xfs_zero_file_space(ip, bf->l_start, bf->l_len);
+               if (!error)
+                       setprealloc = true;
+               break;
+       case XFS_IOC_RESVSP:
+       case XFS_IOC_RESVSP64:
+               error = xfs_alloc_file_space(ip, bf->l_start, bf->l_len,
+                                               XFS_BMAPI_PREALLOC);
+               if (!error)
+                       setprealloc = true;
+               break;
+       case XFS_IOC_UNRESVSP:
+       case XFS_IOC_UNRESVSP64:
+               error = xfs_free_file_space(ip, bf->l_start, bf->l_len);
+               break;
+       case XFS_IOC_ALLOCSP:
+       case XFS_IOC_ALLOCSP64:
+       case XFS_IOC_FREESP:
+       case XFS_IOC_FREESP64:
+               if (bf->l_start > XFS_ISIZE(ip)) {
+                       error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
+                                       bf->l_start - XFS_ISIZE(ip), 0);
+                       if (error)
+                               goto out_unlock;
+               }
+
+               iattr.ia_valid = ATTR_SIZE;
+               iattr.ia_size = bf->l_start;
+
+               error = xfs_setattr_size(ip, &iattr);
+               if (!error)
+                       clrprealloc = true;
+               break;
+       default:
+               ASSERT(0);
+               error = XFS_ERROR(EINVAL);
+       }
 
-       error = mnt_want_write_file(filp);
        if (error)
-               return error;
-       error = xfs_change_file_space(ip, cmd, bf, filp->f_pos, attr_flags);
+               goto out_unlock;
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_writeid, 0, 0);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               goto out_unlock;
+       }
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+       if (!(ioflags & IO_INVIS)) {
+               ip->i_d.di_mode &= ~S_ISUID;
+               if (ip->i_d.di_mode & S_IXGRP)
+                       ip->i_d.di_mode &= ~S_ISGID;
+               xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+       }
+
+       if (setprealloc)
+               ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
+       else if (clrprealloc)
+               ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
+
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+       if (filp->f_flags & O_DSYNC)
+               xfs_trans_set_sync(tp);
+       error = xfs_trans_commit(tp, 0);
+
+out_unlock:
+       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
        mnt_drop_write_file(filp);
        return -error;
 }
index f671f7e472ac008511ca4df2068c9d4fb91d169c..e8fb1231db8124dc08b2ffbcc551d6bfa1bc21f5 100644 (file)
 #include <asm/uaccess.h>
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_vnode.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_itable.h"
 #include "xfs_error.h"
index 8d4d49b6fbf347b3add01ed4675b489a653a6dfb..22d1cbea283d4734515218ef65b23ec78bfdeff6 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_inode_item.h"
 #include "xfs_btree.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
-#include "xfs_rtalloc.h"
 #include "xfs_error.h"
-#include "xfs_itable.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
+#include "xfs_trans.h"
 #include "xfs_trans_space.h"
 #include "xfs_iomap.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
+#include "xfs_quota.h"
 #include "xfs_dquot_item.h"
 #include "xfs_dquot.h"
+#include "xfs_dinode.h"
 
 
 #define XFS_WRITEIO_ALIGN(mp,off)      (((off) >> mp->m_writeio_log) \
@@ -110,7 +104,7 @@ xfs_alert_fsblock_zero(
        xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
                        "Access to block zero in inode %llu "
                        "start_block: %llx start_off: %llx "
-                       "blkcnt: %llx extent-state: %x\n",
+                       "blkcnt: %llx extent-state: %x",
                (unsigned long long)ip->i_ino,
                (unsigned long long)imap->br_startblock,
                (unsigned long long)imap->br_startoff,
@@ -655,7 +649,6 @@ int
 xfs_iomap_write_allocate(
        xfs_inode_t     *ip,
        xfs_off_t       offset,
-       size_t          count,
        xfs_bmbt_irec_t *imap)
 {
        xfs_mount_t     *mp = ip->i_mount;
index 80615760959ae169dddb471af1964c4ee03f448e..411fbb8919ef02456e82bb39c7b97443bf1e4da0 100644 (file)
 struct xfs_inode;
 struct xfs_bmbt_irec;
 
-extern int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
+int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
                        struct xfs_bmbt_irec *, int);
-extern int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t,
+int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t,
                        struct xfs_bmbt_irec *);
-extern int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t,
+int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t,
                        struct xfs_bmbt_irec *);
-extern int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t);
+int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t);
 
 #endif /* __XFS_IOMAP_H__*/
index 2b8952d9199bbd145473a48b326120b8d43ed9b6..718b62b0fe05165d3749249869dddc0eebede87e 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_acl.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
-#include "xfs_rtalloc.h"
+#include "xfs_acl.h"
+#include "xfs_quota.h"
 #include "xfs_error.h"
-#include "xfs_itable.h"
 #include "xfs_attr.h"
-#include "xfs_buf_item.h"
-#include "xfs_inode_item.h"
+#include "xfs_trans.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
 #include "xfs_symlink.h"
 #include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2_priv.h"
+#include "xfs_dinode.h"
 
 #include <linux/capability.h>
 #include <linux/xattr.h>
@@ -709,8 +705,7 @@ out_dqrele:
 int
 xfs_setattr_size(
        struct xfs_inode        *ip,
-       struct iattr            *iattr,
-       int                     flags)
+       struct iattr            *iattr)
 {
        struct xfs_mount        *mp = ip->i_mount;
        struct inode            *inode = VFS_I(ip);
@@ -733,15 +728,11 @@ xfs_setattr_size(
        if (error)
                return XFS_ERROR(error);
 
+       ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
        ASSERT(S_ISREG(ip->i_d.di_mode));
        ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
                        ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
 
-       if (!(flags & XFS_ATTR_NOLOCK)) {
-               lock_flags |= XFS_IOLOCK_EXCL;
-               xfs_ilock(ip, lock_flags);
-       }
-
        oldsize = inode->i_size;
        newsize = iattr->ia_size;
 
@@ -750,12 +741,11 @@ xfs_setattr_size(
         */
        if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) {
                if (!(mask & (ATTR_CTIME|ATTR_MTIME)))
-                       goto out_unlock;
+                       return 0;
 
                /*
                 * Use the regular setattr path to update the timestamps.
                 */
-               xfs_iunlock(ip, lock_flags);
                iattr->ia_valid &= ~ATTR_SIZE;
                return xfs_setattr_nonsize(ip, iattr, 0);
        }
@@ -765,7 +755,7 @@ xfs_setattr_size(
         */
        error = xfs_qm_dqattach(ip, 0);
        if (error)
-               goto out_unlock;
+               return error;
 
        /*
         * Now we can make the changes.  Before we join the inode to the
@@ -783,7 +773,7 @@ xfs_setattr_size(
                 */
                error = xfs_zero_eof(ip, newsize, oldsize);
                if (error)
-                       goto out_unlock;
+                       return error;
        }
 
        /*
@@ -802,7 +792,7 @@ xfs_setattr_size(
                error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
                                                      ip->i_d.di_size, newsize);
                if (error)
-                       goto out_unlock;
+                       return error;
        }
 
        /*
@@ -812,7 +802,7 @@ xfs_setattr_size(
 
        error = -block_truncate_page(inode->i_mapping, newsize, xfs_get_blocks);
        if (error)
-               goto out_unlock;
+               return error;
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);
        error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
@@ -916,12 +906,21 @@ out_trans_cancel:
 
 STATIC int
 xfs_vn_setattr(
-       struct dentry   *dentry,
-       struct iattr    *iattr)
+       struct dentry           *dentry,
+       struct iattr            *iattr)
 {
-       if (iattr->ia_valid & ATTR_SIZE)
-               return -xfs_setattr_size(XFS_I(dentry->d_inode), iattr, 0);
-       return -xfs_setattr_nonsize(XFS_I(dentry->d_inode), iattr, 0);
+       struct xfs_inode        *ip = XFS_I(dentry->d_inode);
+       int                     error;
+
+       if (iattr->ia_valid & ATTR_SIZE) {
+               xfs_ilock(ip, XFS_IOLOCK_EXCL);
+               error = xfs_setattr_size(ip, iattr);
+               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+       } else {
+               error = xfs_setattr_nonsize(ip, iattr, 0);
+       }
+
+       return -error;
 }
 
 STATIC int
index d81fb41205ec97b9a00ecc0789e99763adc5af71..d2c5057b5cc4b4b701d05478e045cbf02a3ed408 100644 (file)
@@ -30,14 +30,10 @@ extern void xfs_setup_inode(struct xfs_inode *);
 /*
  * Internal setattr interfaces.
  */
-#define        XFS_ATTR_DMI            0x01    /* invocation from a DMI function */
-#define        XFS_ATTR_NONBLOCK       0x02    /* return EAGAIN if op would block */
-#define XFS_ATTR_NOLOCK                0x04    /* Don't grab any conflicting locks */
-#define XFS_ATTR_NOACL         0x08    /* Don't call xfs_acl_chmod */
-#define XFS_ATTR_SYNC          0x10    /* synchronous operation required */
+#define XFS_ATTR_NOACL         0x01    /* Don't call xfs_acl_chmod */
 
 extern int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap,
                               int flags);
-extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap, int flags);
+extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap);
 
 #endif /* __XFS_IOPS_H__ */
index 084b3e1741fd0346cac1d8279ed8085553ee7486..c237ad15d500f767b81014a428720a7351431077 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_btree.h"
 #include "xfs_ialloc.h"
+#include "xfs_ialloc_btree.h"
 #include "xfs_itable.h"
 #include "xfs_error.h"
-#include "xfs_btree.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
+#include "xfs_dinode.h"
 
 STATIC int
 xfs_internal_inum(
index a2dea108071ae6e81d0e683a98a4a011b74f23ee..e523396753c5068cebe7ee8dd3b889623bf1c8f0 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
 #include "xfs_error.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_log.h"
 #include "xfs_log_priv.h"
-#include "xfs_buf_item.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
 #include "xfs_log_recover.h"
-#include "xfs_trans_priv.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_trace.h"
 #include "xfs_fsops.h"
@@ -1000,27 +998,34 @@ xfs_log_space_wake(
 }
 
 /*
- * Determine if we have a transaction that has gone to disk
- * that needs to be covered. To begin the transition to the idle state
- * firstly the log needs to be idle (no AIL and nothing in the iclogs).
- * If we are then in a state where covering is needed, the caller is informed
- * that dummy transactions are required to move the log into the idle state.
+ * Determine if we have a transaction that has gone to disk that needs to be
+ * covered. To begin the transition to the idle state firstly the log needs to
+ * be idle. That means the CIL, the AIL and the iclogs needs to be empty before
+ * we start attempting to cover the log.
+ *
+ * Only if we are then in a state where covering is needed, the caller is
+ * informed that dummy transactions are required to move the log into the idle
+ * state.
  *
- * Because this is called as part of the sync process, we should also indicate
- * that dummy transactions should be issued in anything but the covered or
- * idle states. This ensures that the log tail is accurately reflected in
- * the log at the end of the sync, hence if a crash occurrs avoids replay
- * of transactions where the metadata is already on disk.
+ * If there are any items in the AIl or CIL, then we do not want to attempt to
+ * cover the log as we may be in a situation where there isn't log space
+ * available to run a dummy transaction and this can lead to deadlocks when the
+ * tail of the log is pinned by an item that is modified in the CIL.  Hence
+ * there's no point in running a dummy transaction at this point because we
+ * can't start trying to idle the log until both the CIL and AIL are empty.
  */
 int
 xfs_log_need_covered(xfs_mount_t *mp)
 {
-       int             needed = 0;
        struct xlog     *log = mp->m_log;
+       int             needed = 0;
 
        if (!xfs_fs_writable(mp))
                return 0;
 
+       if (!xlog_cil_empty(log))
+               return 0;
+
        spin_lock(&log->l_icloglock);
        switch (log->l_covered_state) {
        case XLOG_STATE_COVER_DONE:
@@ -1029,14 +1034,17 @@ xfs_log_need_covered(xfs_mount_t *mp)
                break;
        case XLOG_STATE_COVER_NEED:
        case XLOG_STATE_COVER_NEED2:
-               if (!xfs_ail_min_lsn(log->l_ailp) &&
-                   xlog_iclogs_empty(log)) {
-                       if (log->l_covered_state == XLOG_STATE_COVER_NEED)
-                               log->l_covered_state = XLOG_STATE_COVER_DONE;
-                       else
-                               log->l_covered_state = XLOG_STATE_COVER_DONE2;
-               }
-               /* FALLTHRU */
+               if (xfs_ail_min_lsn(log->l_ailp))
+                       break;
+               if (!xlog_iclogs_empty(log))
+                       break;
+
+               needed = 1;
+               if (log->l_covered_state == XLOG_STATE_COVER_NEED)
+                       log->l_covered_state = XLOG_STATE_COVER_DONE;
+               else
+                       log->l_covered_state = XLOG_STATE_COVER_DONE2;
+               break;
        default:
                needed = 1;
                break;
@@ -1979,7 +1987,7 @@ xlog_print_tic_res(
 
        for (i = 0; i < ticket->t_res_num; i++) {
                uint r_type = ticket->t_res_arr[i].r_type;
-               xfs_warn(mp, "region[%u]: %s - %u bytes\n", i,
+               xfs_warn(mp, "region[%u]: %s - %u bytes", i,
                            ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ?
                            "bad-rtype" : res_type_str[r_type-1]),
                            ticket->t_res_arr[i].r_len);
index 1c458487f000a42306cb44f14509d80fea0c2f02..e148719e0a5d59dba022034cd5e988ae47552bde 100644 (file)
@@ -18,8 +18,6 @@
 #ifndef        __XFS_LOG_H__
 #define __XFS_LOG_H__
 
-#include "xfs_log_format.h"
-
 struct xfs_log_vec {
        struct xfs_log_vec      *lv_next;       /* next lv in build list */
        int                     lv_niovecs;     /* number of iovecs in lv */
@@ -82,11 +80,7 @@ struct xlog_ticket;
 struct xfs_log_item;
 struct xfs_item_ops;
 struct xfs_trans;
-
-void   xfs_log_item_init(struct xfs_mount      *mp,
-                       struct xfs_log_item     *item,
-                       int                     type,
-                       const struct xfs_item_ops *ops);
+struct xfs_log_callback;
 
 xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
                       struct xlog_ticket *ticket,
@@ -114,7 +108,7 @@ xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
 void     xfs_log_space_wake(struct xfs_mount *mp);
 int      xfs_log_notify(struct xfs_mount       *mp,
                         struct xlog_in_core    *iclog,
-                        xfs_log_callback_t     *callback_entry);
+                        struct xfs_log_callback *callback_entry);
 int      xfs_log_release_iclog(struct xfs_mount *mp,
                         struct xlog_in_core     *iclog);
 int      xfs_log_reserve(struct xfs_mount *mp,
index cfe97973ba36d1d586c3704b536aebce2e391af1..5eb51fc5eb844f2553e5a2d4711c9a32a74bdd18 100644 (file)
 
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
-#include "xfs_log_priv.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
 #include "xfs_alloc.h"
 #include "xfs_extent_busy.h"
 #include "xfs_discard.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_log.h"
+#include "xfs_log_priv.h"
 
 /*
  * Allocate a new ticket. Failing to get a new ticket makes it really hard to
@@ -711,6 +713,20 @@ xlog_cil_push_foreground(
        xlog_cil_push(log);
 }
 
+bool
+xlog_cil_empty(
+       struct xlog     *log)
+{
+       struct xfs_cil  *cil = log->l_cilp;
+       bool            empty = false;
+
+       spin_lock(&cil->xc_push_lock);
+       if (list_empty(&cil->xc_cil))
+               empty = true;
+       spin_unlock(&cil->xc_push_lock);
+       return empty;
+}
+
 /*
  * Commit a transaction with the given vector to the Committed Item List.
  *
index ca7e28a8ed31d996f7e56862e38af43b449235cc..f0969c77bdbe1ea7d87e732ba27a8c4d3fbd54c4 100644 (file)
@@ -233,178 +233,6 @@ typedef struct xfs_trans_header {
        { XFS_LI_QUOTAOFF,      "XFS_LI_QUOTAOFF" }, \
        { XFS_LI_ICREATE,       "XFS_LI_ICREATE" }
 
-/*
- * Transaction types.  Used to distinguish types of buffers.
- */
-#define XFS_TRANS_SETATTR_NOT_SIZE     1
-#define XFS_TRANS_SETATTR_SIZE         2
-#define XFS_TRANS_INACTIVE             3
-#define XFS_TRANS_CREATE               4
-#define XFS_TRANS_CREATE_TRUNC         5
-#define XFS_TRANS_TRUNCATE_FILE                6
-#define XFS_TRANS_REMOVE               7
-#define XFS_TRANS_LINK                 8
-#define XFS_TRANS_RENAME               9
-#define XFS_TRANS_MKDIR                        10
-#define XFS_TRANS_RMDIR                        11
-#define XFS_TRANS_SYMLINK              12
-#define XFS_TRANS_SET_DMATTRS          13
-#define XFS_TRANS_GROWFS               14
-#define XFS_TRANS_STRAT_WRITE          15
-#define XFS_TRANS_DIOSTRAT             16
-/* 17 was XFS_TRANS_WRITE_SYNC */
-#define        XFS_TRANS_WRITEID               18
-#define        XFS_TRANS_ADDAFORK              19
-#define        XFS_TRANS_ATTRINVAL             20
-#define        XFS_TRANS_ATRUNCATE             21
-#define        XFS_TRANS_ATTR_SET              22
-#define        XFS_TRANS_ATTR_RM               23
-#define        XFS_TRANS_ATTR_FLAG             24
-#define        XFS_TRANS_CLEAR_AGI_BUCKET      25
-#define XFS_TRANS_QM_SBCHANGE          26
-/*
- * Dummy entries since we use the transaction type to index into the
- * trans_type[] in xlog_recover_print_trans_head()
- */
-#define XFS_TRANS_DUMMY1               27
-#define XFS_TRANS_DUMMY2               28
-#define XFS_TRANS_QM_QUOTAOFF          29
-#define XFS_TRANS_QM_DQALLOC           30
-#define XFS_TRANS_QM_SETQLIM           31
-#define XFS_TRANS_QM_DQCLUSTER         32
-#define XFS_TRANS_QM_QINOCREATE                33
-#define XFS_TRANS_QM_QUOTAOFF_END      34
-#define XFS_TRANS_SB_UNIT              35
-#define XFS_TRANS_FSYNC_TS             36
-#define        XFS_TRANS_GROWFSRT_ALLOC        37
-#define        XFS_TRANS_GROWFSRT_ZERO         38
-#define        XFS_TRANS_GROWFSRT_FREE         39
-#define        XFS_TRANS_SWAPEXT               40
-#define        XFS_TRANS_SB_COUNT              41
-#define        XFS_TRANS_CHECKPOINT            42
-#define        XFS_TRANS_ICREATE               43
-#define        XFS_TRANS_TYPE_MAX              43
-/* new transaction types need to be reflected in xfs_logprint(8) */
-
-#define XFS_TRANS_TYPES \
-       { XFS_TRANS_SETATTR_NOT_SIZE,   "SETATTR_NOT_SIZE" }, \
-       { XFS_TRANS_SETATTR_SIZE,       "SETATTR_SIZE" }, \
-       { XFS_TRANS_INACTIVE,           "INACTIVE" }, \
-       { XFS_TRANS_CREATE,             "CREATE" }, \
-       { XFS_TRANS_CREATE_TRUNC,       "CREATE_TRUNC" }, \
-       { XFS_TRANS_TRUNCATE_FILE,      "TRUNCATE_FILE" }, \
-       { XFS_TRANS_REMOVE,             "REMOVE" }, \
-       { XFS_TRANS_LINK,               "LINK" }, \
-       { XFS_TRANS_RENAME,             "RENAME" }, \
-       { XFS_TRANS_MKDIR,              "MKDIR" }, \
-       { XFS_TRANS_RMDIR,              "RMDIR" }, \
-       { XFS_TRANS_SYMLINK,            "SYMLINK" }, \
-       { XFS_TRANS_SET_DMATTRS,        "SET_DMATTRS" }, \
-       { XFS_TRANS_GROWFS,             "GROWFS" }, \
-       { XFS_TRANS_STRAT_WRITE,        "STRAT_WRITE" }, \
-       { XFS_TRANS_DIOSTRAT,           "DIOSTRAT" }, \
-       { XFS_TRANS_WRITEID,            "WRITEID" }, \
-       { XFS_TRANS_ADDAFORK,           "ADDAFORK" }, \
-       { XFS_TRANS_ATTRINVAL,          "ATTRINVAL" }, \
-       { XFS_TRANS_ATRUNCATE,          "ATRUNCATE" }, \
-       { XFS_TRANS_ATTR_SET,           "ATTR_SET" }, \
-       { XFS_TRANS_ATTR_RM,            "ATTR_RM" }, \
-       { XFS_TRANS_ATTR_FLAG,          "ATTR_FLAG" }, \
-       { XFS_TRANS_CLEAR_AGI_BUCKET,   "CLEAR_AGI_BUCKET" }, \
-       { XFS_TRANS_QM_SBCHANGE,        "QM_SBCHANGE" }, \
-       { XFS_TRANS_QM_QUOTAOFF,        "QM_QUOTAOFF" }, \
-       { XFS_TRANS_QM_DQALLOC,         "QM_DQALLOC" }, \
-       { XFS_TRANS_QM_SETQLIM,         "QM_SETQLIM" }, \
-       { XFS_TRANS_QM_DQCLUSTER,       "QM_DQCLUSTER" }, \
-       { XFS_TRANS_QM_QINOCREATE,      "QM_QINOCREATE" }, \
-       { XFS_TRANS_QM_QUOTAOFF_END,    "QM_QOFF_END" }, \
-       { XFS_TRANS_SB_UNIT,            "SB_UNIT" }, \
-       { XFS_TRANS_FSYNC_TS,           "FSYNC_TS" }, \
-       { XFS_TRANS_GROWFSRT_ALLOC,     "GROWFSRT_ALLOC" }, \
-       { XFS_TRANS_GROWFSRT_ZERO,      "GROWFSRT_ZERO" }, \
-       { XFS_TRANS_GROWFSRT_FREE,      "GROWFSRT_FREE" }, \
-       { XFS_TRANS_SWAPEXT,            "SWAPEXT" }, \
-       { XFS_TRANS_SB_COUNT,           "SB_COUNT" }, \
-       { XFS_TRANS_CHECKPOINT,         "CHECKPOINT" }, \
-       { XFS_TRANS_DUMMY1,             "DUMMY1" }, \
-       { XFS_TRANS_DUMMY2,             "DUMMY2" }, \
-       { XLOG_UNMOUNT_REC_TYPE,        "UNMOUNT" }
-
-/*
- * This structure is used to track log items associated with
- * a transaction.  It points to the log item and keeps some
- * flags to track the state of the log item.  It also tracks
- * the amount of space needed to log the item it describes
- * once we get to commit processing (see xfs_trans_commit()).
- */
-struct xfs_log_item_desc {
-       struct xfs_log_item     *lid_item;
-       struct list_head        lid_trans;
-       unsigned char           lid_flags;
-};
-
-#define XFS_LID_DIRTY          0x1
-
-/*
- * Values for t_flags.
- */
-#define        XFS_TRANS_DIRTY         0x01    /* something needs to be logged */
-#define        XFS_TRANS_SB_DIRTY      0x02    /* superblock is modified */
-#define        XFS_TRANS_PERM_LOG_RES  0x04    /* xact took a permanent log res */
-#define        XFS_TRANS_SYNC          0x08    /* make commit synchronous */
-#define XFS_TRANS_DQ_DIRTY     0x10    /* at least one dquot in trx dirty */
-#define XFS_TRANS_RESERVE      0x20    /* OK to use reserved data blocks */
-#define XFS_TRANS_FREEZE_PROT  0x40    /* Transaction has elevated writer
-                                          count in superblock */
-
-/*
- * Values for call flags parameter.
- */
-#define        XFS_TRANS_RELEASE_LOG_RES       0x4
-#define        XFS_TRANS_ABORT                 0x8
-
-/*
- * Field values for xfs_trans_mod_sb.
- */
-#define        XFS_TRANS_SB_ICOUNT             0x00000001
-#define        XFS_TRANS_SB_IFREE              0x00000002
-#define        XFS_TRANS_SB_FDBLOCKS           0x00000004
-#define        XFS_TRANS_SB_RES_FDBLOCKS       0x00000008
-#define        XFS_TRANS_SB_FREXTENTS          0x00000010
-#define        XFS_TRANS_SB_RES_FREXTENTS      0x00000020
-#define        XFS_TRANS_SB_DBLOCKS            0x00000040
-#define        XFS_TRANS_SB_AGCOUNT            0x00000080
-#define        XFS_TRANS_SB_IMAXPCT            0x00000100
-#define        XFS_TRANS_SB_REXTSIZE           0x00000200
-#define        XFS_TRANS_SB_RBMBLOCKS          0x00000400
-#define        XFS_TRANS_SB_RBLOCKS            0x00000800
-#define        XFS_TRANS_SB_REXTENTS           0x00001000
-#define        XFS_TRANS_SB_REXTSLOG           0x00002000
-
-/*
- * Here we centralize the specification of XFS meta-data buffer
- * reference count values.  This determine how hard the buffer
- * cache tries to hold onto the buffer.
- */
-#define        XFS_AGF_REF             4
-#define        XFS_AGI_REF             4
-#define        XFS_AGFL_REF            3
-#define        XFS_INO_BTREE_REF       3
-#define        XFS_ALLOC_BTREE_REF     2
-#define        XFS_BMAP_BTREE_REF      2
-#define        XFS_DIR_BTREE_REF       2
-#define        XFS_INO_REF             2
-#define        XFS_ATTR_BTREE_REF      1
-#define        XFS_DQUOT_REF           1
-
-/*
- * Flags for xfs_trans_ichgtime().
- */
-#define        XFS_ICHGTIME_MOD        0x1     /* data fork modification timestamp */
-#define        XFS_ICHGTIME_CHG        0x2     /* inode field change timestamp */
-#define        XFS_ICHGTIME_CREATE     0x4     /* inode create timestamp */
-
-
 /*
  * Inode Log Item Format definitions.
  *
@@ -797,7 +625,6 @@ typedef struct xfs_qoff_logformat {
        char                    qf_pad[12];     /* padding for future */
 } xfs_qoff_logformat_t;
 
-
 /*
  * Disk quotas status in m_qflags, and also sb_qflags. 16 bits.
  */
@@ -849,8 +676,4 @@ struct xfs_icreate_log {
        __be32          icl_gen;        /* inode generation number to use */
 };
 
-int    xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes);
-int    xfs_log_calc_minimum_size(struct xfs_mount *);
-
-
 #endif /* __XFS_LOG_FORMAT_H__ */
index 136654b9400df9b28a40415b1fbca3c9b7d6a958..9bc403a9e54f300f570e6e200574c36fb46db9b1 100644 (file)
@@ -22,6 +22,7 @@ struct xfs_buf;
 struct xlog;
 struct xlog_ticket;
 struct xfs_mount;
+struct xfs_log_callback;
 
 /*
  * Flags for log structure
@@ -227,8 +228,8 @@ typedef struct xlog_in_core {
 
        /* Callback structures need their own cacheline */
        spinlock_t              ic_callback_lock ____cacheline_aligned_in_smp;
-       xfs_log_callback_t      *ic_callback;
-       xfs_log_callback_t      **ic_callback_tail;
+       struct xfs_log_callback *ic_callback;
+       struct xfs_log_callback **ic_callback_tail;
 
        /* reference counts need their own cacheline */
        atomic_t                ic_refcnt ____cacheline_aligned_in_smp;
@@ -254,7 +255,7 @@ struct xfs_cil_ctx {
        int                     space_used;     /* aggregate size of regions */
        struct list_head        busy_extents;   /* busy extents in chkpt */
        struct xfs_log_vec      *lv_chain;      /* logvecs being pushed */
-       xfs_log_callback_t      log_cb;         /* completion callback hook. */
+       struct xfs_log_callback log_cb;         /* completion callback hook. */
        struct list_head        committing;     /* ctx committing list */
 };
 
@@ -514,12 +515,10 @@ xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
 /*
  * Committed Item List interfaces
  */
-int
-xlog_cil_init(struct xlog *log);
-void
-xlog_cil_init_post_recovery(struct xlog *log);
-void
-xlog_cil_destroy(struct xlog *log);
+int    xlog_cil_init(struct xlog *log);
+void   xlog_cil_init_post_recovery(struct xlog *log);
+void   xlog_cil_destroy(struct xlog *log);
+bool   xlog_cil_empty(struct xlog *log);
 
 /*
  * CIL force routines
index 39797490a1f1996e3f92f51efb532f15a75da8fa..b6b669df40f3ab335e75cd3a67903601be0128a4 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_error.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
-#include "xfs_inode_item.h"
-#include "xfs_alloc.h"
-#include "xfs_ialloc.h"
+#include "xfs_trans.h"
+#include "xfs_log.h"
 #include "xfs_log_priv.h"
-#include "xfs_buf_item.h"
 #include "xfs_log_recover.h"
+#include "xfs_inode_item.h"
 #include "xfs_extfree_item.h"
 #include "xfs_trans_priv.h"
+#include "xfs_alloc.h"
+#include "xfs_ialloc.h"
 #include "xfs_quota.h"
 #include "xfs_cksum.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
-#include "xfs_icreate_item.h"
-
-/* Need all the magic numbers and buffer ops structures from these headers */
-#include "xfs_symlink.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_dinode.h"
+#include "xfs_error.h"
 #include "xfs_dir2.h"
-#include "xfs_attr_leaf.h"
-#include "xfs_attr_remote.h"
 
 #define BLK_AVG(blk1, blk2)    ((blk1+blk2) >> 1)
 
@@ -305,9 +297,9 @@ xlog_header_check_dump(
        xfs_mount_t             *mp,
        xlog_rec_header_t       *head)
 {
-       xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d\n",
+       xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d",
                __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
-       xfs_debug(mp, "    log : uuid = %pU, fmt = %d\n",
+       xfs_debug(mp, "    log : uuid = %pU, fmt = %d",
                &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
 }
 #else
@@ -2362,7 +2354,7 @@ xlog_recover_do_reg_buffer(
                                        item->ri_buf[i].i_len, __func__);
                                goto next;
                        }
-                       error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr,
+                       error = xfs_dqcheck(mp, item->ri_buf[i].i_addr,
                                               -1, 0, XFS_QMOPT_DOWARN,
                                               "dquot_buf_recover");
                        if (error)
@@ -2393,133 +2385,6 @@ xlog_recover_do_reg_buffer(
                xlog_recover_validate_buf_type(mp, bp, buf_f);
 }
 
-/*
- * Do some primitive error checking on ondisk dquot data structures.
- */
-int
-xfs_qm_dqcheck(
-       struct xfs_mount *mp,
-       xfs_disk_dquot_t *ddq,
-       xfs_dqid_t       id,
-       uint             type,    /* used only when IO_dorepair is true */
-       uint             flags,
-       char             *str)
-{
-       xfs_dqblk_t      *d = (xfs_dqblk_t *)ddq;
-       int             errs = 0;
-
-       /*
-        * We can encounter an uninitialized dquot buffer for 2 reasons:
-        * 1. If we crash while deleting the quotainode(s), and those blks got
-        *    used for user data. This is because we take the path of regular
-        *    file deletion; however, the size field of quotainodes is never
-        *    updated, so all the tricks that we play in itruncate_finish
-        *    don't quite matter.
-        *
-        * 2. We don't play the quota buffers when there's a quotaoff logitem.
-        *    But the allocation will be replayed so we'll end up with an
-        *    uninitialized quota block.
-        *
-        * This is all fine; things are still consistent, and we haven't lost
-        * any quota information. Just don't complain about bad dquot blks.
-        */
-       if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
-               if (flags & XFS_QMOPT_DOWARN)
-                       xfs_alert(mp,
-                       "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
-                       str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
-               errs++;
-       }
-       if (ddq->d_version != XFS_DQUOT_VERSION) {
-               if (flags & XFS_QMOPT_DOWARN)
-                       xfs_alert(mp,
-                       "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
-                       str, id, ddq->d_version, XFS_DQUOT_VERSION);
-               errs++;
-       }
-
-       if (ddq->d_flags != XFS_DQ_USER &&
-           ddq->d_flags != XFS_DQ_PROJ &&
-           ddq->d_flags != XFS_DQ_GROUP) {
-               if (flags & XFS_QMOPT_DOWARN)
-                       xfs_alert(mp,
-                       "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
-                       str, id, ddq->d_flags);
-               errs++;
-       }
-
-       if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
-               if (flags & XFS_QMOPT_DOWARN)
-                       xfs_alert(mp,
-                       "%s : ondisk-dquot 0x%p, ID mismatch: "
-                       "0x%x expected, found id 0x%x",
-                       str, ddq, id, be32_to_cpu(ddq->d_id));
-               errs++;
-       }
-
-       if (!errs && ddq->d_id) {
-               if (ddq->d_blk_softlimit &&
-                   be64_to_cpu(ddq->d_bcount) >
-                               be64_to_cpu(ddq->d_blk_softlimit)) {
-                       if (!ddq->d_btimer) {
-                               if (flags & XFS_QMOPT_DOWARN)
-                                       xfs_alert(mp,
-                       "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
-                                       str, (int)be32_to_cpu(ddq->d_id), ddq);
-                               errs++;
-                       }
-               }
-               if (ddq->d_ino_softlimit &&
-                   be64_to_cpu(ddq->d_icount) >
-                               be64_to_cpu(ddq->d_ino_softlimit)) {
-                       if (!ddq->d_itimer) {
-                               if (flags & XFS_QMOPT_DOWARN)
-                                       xfs_alert(mp,
-                       "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
-                                       str, (int)be32_to_cpu(ddq->d_id), ddq);
-                               errs++;
-                       }
-               }
-               if (ddq->d_rtb_softlimit &&
-                   be64_to_cpu(ddq->d_rtbcount) >
-                               be64_to_cpu(ddq->d_rtb_softlimit)) {
-                       if (!ddq->d_rtbtimer) {
-                               if (flags & XFS_QMOPT_DOWARN)
-                                       xfs_alert(mp,
-                       "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
-                                       str, (int)be32_to_cpu(ddq->d_id), ddq);
-                               errs++;
-                       }
-               }
-       }
-
-       if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
-               return errs;
-
-       if (flags & XFS_QMOPT_DOWARN)
-               xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
-
-       /*
-        * Typically, a repair is only requested by quotacheck.
-        */
-       ASSERT(id != -1);
-       ASSERT(flags & XFS_QMOPT_DQREPAIR);
-       memset(d, 0, sizeof(xfs_dqblk_t));
-
-       d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
-       d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
-       d->dd_diskdq.d_flags = type;
-       d->dd_diskdq.d_id = cpu_to_be32(id);
-
-       if (xfs_sb_version_hascrc(&mp->m_sb)) {
-               uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
-               xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
-                                XFS_DQUOT_CRC_OFF);
-       }
-
-       return errs;
-}
-
 /*
  * Perform a dquot buffer recovery.
  * Simple algorithm: if we have found a QUOTAOFF log item of the same type
@@ -3125,7 +2990,7 @@ xlog_recover_dquot_pass2(
         */
        dq_f = item->ri_buf[0].i_addr;
        ASSERT(dq_f);
-       error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
+       error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
                           "xlog_recover_dquot_pass2 (log copy)");
        if (error)
                return XFS_ERROR(EIO);
@@ -3145,7 +3010,7 @@ xlog_recover_dquot_pass2(
         * was among a chunk of dquots created earlier, and we did some
         * minimal initialization then.
         */
-       error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
+       error = xfs_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
                           "xlog_recover_dquot_pass2");
        if (error) {
                xfs_buf_relse(bp);
@@ -4077,7 +3942,7 @@ xlog_unpack_data_crc(
        if (crc != rhead->h_crc) {
                if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
                        xfs_alert(log->l_mp,
-               "log record CRC mismatch: found 0x%x, expected 0x%x.\n",
+               "log record CRC mismatch: found 0x%x, expected 0x%x.",
                                        le32_to_cpu(rhead->h_crc),
                                        le32_to_cpu(crc));
                        xfs_hex_dump(dp, 32);
index bbcec0bbc12da830f4a16479bfc8748589c4ddbc..2af1a0a4d0f17109ca2b5d1cdf12903c74451100 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_ag.h"
 #include "xfs_sb.h"
 #include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_trans_space.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
 #include "xfs_da_btree.h"
 #include "xfs_attr_leaf.h"
+#include "xfs_bmap_btree.h"
 
 /*
  * Calculate the maximum length in bytes that would be required for a local
index 9163dc14053244c7ff6a70f60e91271995bbe87c..63ca2f0420b108206ed1d610e50fe11f64bf00c4 100644 (file)
@@ -17,9 +17,8 @@
 
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
index 5dcc68019d1bc8c49a799695436045d69d49167f..da88f167af78dbf04df4eb0c1cdcfcad7a5bd699 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
-#include "xfs_dir2.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
-#include "xfs_btree.h"
+#include "xfs_dir2.h"
 #include "xfs_ialloc.h"
 #include "xfs_alloc.h"
 #include "xfs_rtalloc.h"
 #include "xfs_bmap.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_log.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
 #include "xfs_fsops.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
-#include "xfs_cksum.h"
-#include "xfs_buf_item.h"
 
 
 #ifdef HAVE_PERCPU_SB
index 3e6c2e6c9cd24d145514c95b7357ce91acc219f0..14a4996cfec6cb4fbeaa1d3f8432548ebb57d48b 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_ialloc.h"
 #include "xfs_itable.h"
-#include "xfs_rtalloc.h"
+#include "xfs_quota.h"
 #include "xfs_error.h"
 #include "xfs_bmap.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_trans.h"
 #include "xfs_trans_space.h"
 #include "xfs_qm.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
 #include "xfs_cksum.h"
+#include "xfs_dinode.h"
 
 /*
  * The global quota manager. There is only one of these for the entire
@@ -664,20 +661,6 @@ xfs_qm_dqdetach(
        }
 }
 
-int
-xfs_qm_calc_dquots_per_chunk(
-       struct xfs_mount        *mp,
-       unsigned int            nbblks) /* basic block units */
-{
-       unsigned int    ndquots;
-
-       ASSERT(nbblks > 0);
-       ndquots = BBTOB(nbblks);
-       do_div(ndquots, sizeof(xfs_dqblk_t));
-
-       return ndquots;
-}
-
 struct xfs_qm_isolate {
        struct list_head        buffers;
        struct list_head        dispose;
@@ -858,7 +841,7 @@ xfs_qm_init_quotainfo(
 
        /* Precalc some constants */
        qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
-       qinf->qi_dqperchunk = xfs_qm_calc_dquots_per_chunk(mp,
+       qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(mp,
                                                        qinf->qi_dqchunklen);
 
        mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
@@ -1092,10 +1075,10 @@ xfs_qm_reset_dqcounts(
                /*
                 * Do a sanity check, and if needed, repair the dqblk. Don't
                 * output any warnings because it's perfectly possible to
-                * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
+                * find uninitialised dquot blks. See comment in xfs_dqcheck.
                 */
-               (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
-                                     "xfs_quotacheck");
+               xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
+                           "xfs_quotacheck");
                ddq->d_bcount = 0;
                ddq->d_icount = 0;
                ddq->d_rtbcount = 0;
index 2b602df9c242d3c3efe7b6841abe5fdbee28d00e..a788b66a5cb1c7e73898c877b0c3629eeb8e17a7 100644 (file)
@@ -103,8 +103,6 @@ xfs_dq_to_quota_inode(struct xfs_dquot *dqp)
        return NULL;
 }
 
-extern int     xfs_qm_calc_dquots_per_chunk(struct xfs_mount *mp,
-                                            unsigned int nbblks);
 extern void    xfs_trans_mod_dquot(struct xfs_trans *,
                                        struct xfs_dquot *, uint, long);
 extern int     xfs_trans_reserve_quota_bydquots(struct xfs_trans *,
index 3af50ccdfac1a10da858ef26e80b040ad4a474f1..e9be63abd8d29f9003521ce5efa0b2aeaae2f467 100644 (file)
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
 #include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
-#include "xfs_itable.h"
-#include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
 #include "xfs_error.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
+#include "xfs_trans.h"
 #include "xfs_qm.h"
 
 
index 8174aad0b38813ec836ea044d9a1b7b4dc06f300..437c9198031a49a940aecaa441dc305de0e9a145 100644 (file)
 
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
-#include "xfs_inode_item.h"
-#include "xfs_itable.h"
-#include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
+#include "xfs_trans.h"
 #include "xfs_error.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
+#include "xfs_quota.h"
 #include "xfs_qm.h"
 #include "xfs_trace.h"
 #include "xfs_icache.h"
@@ -287,7 +281,7 @@ xfs_qm_scall_trunc_qfiles(
        int             error = 0, error2 = 0;
 
        if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
-               xfs_debug(mp, "%s: flags=%x m_qflags=%x\n",
+               xfs_debug(mp, "%s: flags=%x m_qflags=%x",
                        __func__, flags, mp->m_qflags);
                return XFS_ERROR(EINVAL);
        }
@@ -325,7 +319,7 @@ xfs_qm_scall_quotaon(
        sbflags = 0;
 
        if (flags == 0) {
-               xfs_debug(mp, "%s: zero flags, m_qflags=%x\n",
+               xfs_debug(mp, "%s: zero flags, m_qflags=%x",
                        __func__, mp->m_qflags);
                return XFS_ERROR(EINVAL);
        }
@@ -348,7 +342,7 @@ xfs_qm_scall_quotaon(
             (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
             (flags & XFS_PQUOTA_ENFD))) {
                xfs_debug(mp,
-                       "%s: Can't enforce without acct, flags=%x sbflags=%x\n",
+                       "%s: Can't enforce without acct, flags=%x sbflags=%x",
                        __func__, flags, mp->m_sb.sb_qflags);
                return XFS_ERROR(EINVAL);
        }
@@ -648,7 +642,7 @@ xfs_qm_scall_setqlim(
                        q->qi_bsoftlimit = soft;
                }
        } else {
-               xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft);
+               xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
        }
        hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
                (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
@@ -664,7 +658,7 @@ xfs_qm_scall_setqlim(
                        q->qi_rtbsoftlimit = soft;
                }
        } else {
-               xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
+               xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
        }
 
        hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
@@ -681,7 +675,7 @@ xfs_qm_scall_setqlim(
                        q->qi_isoftlimit = soft;
                }
        } else {
-               xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft);
+               xfs_debug(mp, "ihard %Ld < isoft %Ld", hard, soft);
        }
 
        /*
index e7d84d2d86830a25a5cd9778521766d4a592c45b..5376dd406ba2c099e23230014b1b23cbb832f6ad 100644 (file)
@@ -150,10 +150,6 @@ static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp,
        xfs_trans_reserve_quota_bydquots(tp, mp, ud, gd, pd, nb, ni, \
                                f | XFS_QMOPT_RES_REGBLKS)
 
-extern int xfs_qm_dqcheck(struct xfs_mount *, xfs_disk_dquot_t *,
-                               xfs_dqid_t, uint, uint, char *);
 extern int xfs_mount_reset_sbqflags(struct xfs_mount *);
 
-extern const struct xfs_buf_ops xfs_dquot_buf_ops;
-
 #endif /* __XFS_QUOTA_H__ */
index e6b0d6e1f4f2b0560a486e1e14e179193325b71f..b3b2b1065c0f4db8a6c972880b2dc8601e189e03 100644 (file)
@@ -154,4 +154,8 @@ typedef __uint16_t  xfs_qwarncnt_t;
                (XFS_QMOPT_UQUOTA | XFS_QMOPT_PQUOTA | XFS_QMOPT_GQUOTA)
 #define XFS_QMOPT_RESBLK_MASK  (XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_RES_RTBLKS)
 
+extern int xfs_dqcheck(struct xfs_mount *mp, xfs_disk_dquot_t *ddq,
+                      xfs_dqid_t id, uint type, uint flags, char *str);
+extern int xfs_calc_dquots_per_chunk(struct xfs_mount *mp, unsigned int nbblks);
+
 #endif /* __XFS_QUOTA_H__ */
index 1326d81596c2920b27f45021b67b76979e5ef387..af33cafe69b6417c201c90175a8f3e9bb4417814 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
 #include "xfs_trans_resv.h"
-#include "xfs_log.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_inode.h"
 #include "xfs_quota.h"
 #include "xfs_trans.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_inode.h"
 #include "xfs_qm.h"
 #include <linux/quota.h>
 
index 6f9e63c9fc2617ab89966447083527f1d0c94257..a6a76b2b6a85db9ece8acb0565e82e310319ec9d 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_alloc.h"
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
-#include "xfs_rtalloc.h"
-#include "xfs_fsops.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc.h"
 #include "xfs_error.h"
-#include "xfs_inode_item.h"
+#include "xfs_trans.h"
 #include "xfs_trans_space.h"
 #include "xfs_trace.h"
 #include "xfs_buf.h"
 #include "xfs_icache.h"
+#include "xfs_dinode.h"
+#include "xfs_rtalloc.h"
 
 
 /*
- * Prototypes for internal functions.
- */
-
-
-STATIC int xfs_rtallocate_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
-               xfs_extlen_t, xfs_buf_t **, xfs_fsblock_t *);
-STATIC int xfs_rtany_summary(xfs_mount_t *, xfs_trans_t *, int, int,
-               xfs_rtblock_t, xfs_buf_t **, xfs_fsblock_t *, int *);
-STATIC int xfs_rtcheck_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
-               xfs_extlen_t, int, xfs_rtblock_t *, int *);
-STATIC int xfs_rtfind_back(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
-               xfs_rtblock_t, xfs_rtblock_t *);
-STATIC int xfs_rtfind_forw(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
-               xfs_rtblock_t, xfs_rtblock_t *);
-STATIC int xfs_rtget_summary( xfs_mount_t *, xfs_trans_t *, int,
-               xfs_rtblock_t, xfs_buf_t **, xfs_fsblock_t *, xfs_suminfo_t *);
-STATIC int xfs_rtmodify_range(xfs_mount_t *, xfs_trans_t *, xfs_rtblock_t,
-               xfs_extlen_t, int);
-STATIC int xfs_rtmodify_summary(xfs_mount_t *, xfs_trans_t *, int,
-               xfs_rtblock_t, int, xfs_buf_t **, xfs_fsblock_t *);
-
-/*
- * Internal functions.
- */
-
-/*
- * Allocate space to the bitmap or summary file, and zero it, for growfs.
+ * Read and return the summary information for a given extent size,
+ * bitmap block combination.
+ * Keeps track of a current summary block, so we don't keep reading
+ * it from the buffer cache.
  */
 STATIC int                             /* error */
-xfs_growfs_rt_alloc(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_extlen_t    oblocks,        /* old count of blocks */
-       xfs_extlen_t    nblocks,        /* new count of blocks */
-       xfs_inode_t     *ip)            /* inode (bitmap/summary) */
+xfs_rtget_summary(
+       xfs_mount_t     *mp,            /* file system mount structure */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       int             log,            /* log2 of extent size */
+       xfs_rtblock_t   bbno,           /* bitmap block number */
+       xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
+       xfs_fsblock_t   *rsb,           /* in/out: summary block number */
+       xfs_suminfo_t   *sum)           /* out: summary info for this block */
 {
-       xfs_fileoff_t   bno;            /* block number in file */
-       xfs_buf_t       *bp;            /* temporary buffer for zeroing */
-       int             committed;      /* transaction committed flag */
-       xfs_daddr_t     d;              /* disk block address */
-       int             error;          /* error return value */
-       xfs_fsblock_t   firstblock;     /* first block allocated in xaction */
-       xfs_bmap_free_t flist;          /* list of freed blocks */
-       xfs_fsblock_t   fsbno;          /* filesystem block for bno */
-       xfs_bmbt_irec_t map;            /* block map output */
-       int             nmap;           /* number of block maps */
-       int             resblks;        /* space reservation */
+       xfs_buf_t       *bp;            /* buffer for summary block */
+       int             error;          /* error value */
+       xfs_fsblock_t   sb;             /* summary fsblock */
+       int             so;             /* index into the summary file */
+       xfs_suminfo_t   *sp;            /* pointer to returned data */
 
        /*
-        * Allocate space to the file, as necessary.
+        * Compute entry number in the summary file.
         */
-       while (oblocks < nblocks) {
-               int             cancelflags = 0;
-               xfs_trans_t     *tp;
-
-               tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ALLOC);
-               resblks = XFS_GROWFSRT_SPACE_RES(mp, nblocks - oblocks);
-               /*
-                * Reserve space & log for one extent added to the file.
-                */
-               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growdata,
-                                         resblks, 0);
-               if (error)
-                       goto error_cancel;
-               cancelflags = XFS_TRANS_RELEASE_LOG_RES;
-               /*
-                * Lock the inode.
-                */
-               xfs_ilock(ip, XFS_ILOCK_EXCL);
-               xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
-               xfs_bmap_init(&flist, &firstblock);
-               /*
-                * Allocate blocks to the bitmap file.
-                */
-               nmap = 1;
-               cancelflags |= XFS_TRANS_ABORT;
-               error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks,
-                                       XFS_BMAPI_METADATA, &firstblock,
-                                       resblks, &map, &nmap, &flist);
-               if (!error && nmap < 1)
-                       error = XFS_ERROR(ENOSPC);
-               if (error)
-                       goto error_cancel;
-               /*
-                * Free any blocks freed up in the transaction, then commit.
-                */
-               error = xfs_bmap_finish(&tp, &flist, &committed);
-               if (error)
-                       goto error_cancel;
-               error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-               if (error)
-                       goto error;
+       so = XFS_SUMOFFS(mp, log, bbno);
+       /*
+        * Compute the block number in the summary file.
+        */
+       sb = XFS_SUMOFFSTOBLOCK(mp, so);
+       /*
+        * If we have an old buffer, and the block number matches, use that.
+        */
+       if (rbpp && *rbpp && *rsb == sb)
+               bp = *rbpp;
+       /*
+        * Otherwise we have to get the buffer.
+        */
+       else {
                /*
-                * Now we need to clear the allocated blocks.
-                * Do this one block per transaction, to keep it simple.
+                * If there was an old one, get rid of it first.
                 */
-               cancelflags = 0;
-               for (bno = map.br_startoff, fsbno = map.br_startblock;
-                    bno < map.br_startoff + map.br_blockcount;
-                    bno++, fsbno++) {
-                       tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ZERO);
-                       /*
-                        * Reserve log for one block zeroing.
-                        */
-                       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtzero,
-                                                 0, 0);
-                       if (error)
-                               goto error_cancel;
-                       /*
-                        * Lock the bitmap inode.
-                        */
-                       xfs_ilock(ip, XFS_ILOCK_EXCL);
-                       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-                       /*
-                        * Get a buffer for the block.
-                        */
-                       d = XFS_FSB_TO_DADDR(mp, fsbno);
-                       bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
-                               mp->m_bsize, 0);
-                       if (bp == NULL) {
-                               error = XFS_ERROR(EIO);
-error_cancel:
-                               xfs_trans_cancel(tp, cancelflags);
-                               goto error;
-                       }
-                       memset(bp->b_addr, 0, mp->m_sb.sb_blocksize);
-                       xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
-                       /*
-                        * Commit the transaction.
-                        */
-                       error = xfs_trans_commit(tp, 0);
-                       if (error)
-                               goto error;
+               if (rbpp && *rbpp)
+                       xfs_trans_brelse(tp, *rbpp);
+               error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
+               if (error) {
+                       return error;
                }
                /*
-                * Go on to the next extent, if any.
+                * Remember this buffer and block for the next call.
                 */
-               oblocks = map.br_startoff + map.br_blockcount;
+               if (rbpp) {
+                       *rbpp = bp;
+                       *rsb = sb;
+               }
        }
+       /*
+        * Point to the summary information & copy it out.
+        */
+       sp = XFS_SUMPTR(mp, bp, so);
+       *sum = *sp;
+       /*
+        * Drop the buffer if we're not asked to remember it.
+        */
+       if (!rbpp)
+               xfs_trans_brelse(tp, bp);
        return 0;
-
-error:
-       return error;
 }
 
+
 /*
- * Attempt to allocate an extent minlen<=len<=maxlen starting from
- * bitmap block bbno.  If we don't get maxlen then use prod to trim
- * the length, if given.  Returns error; returns starting block in *rtblock.
- * The lengths are all in rtextents.
+ * Return whether there are any free extents in the size range given
+ * by low and high, for the bitmap block bbno.
  */
 STATIC int                             /* error */
-xfs_rtallocate_extent_block(
-       xfs_mount_t     *mp,            /* file system mount point */
+xfs_rtany_summary(
+       xfs_mount_t     *mp,            /* file system mount structure */
        xfs_trans_t     *tp,            /* transaction pointer */
+       int             low,            /* low log2 extent size */
+       int             high,           /* high log2 extent size */
        xfs_rtblock_t   bbno,           /* bitmap block number */
-       xfs_extlen_t    minlen,         /* minimum length to allocate */
-       xfs_extlen_t    maxlen,         /* maximum length to allocate */
-       xfs_extlen_t    *len,           /* out: actual length allocated */
-       xfs_rtblock_t   *nextp,         /* out: next block to try */
        xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
        xfs_fsblock_t   *rsb,           /* in/out: summary block number */
-       xfs_extlen_t    prod,           /* extent product factor */
-       xfs_rtblock_t   *rtblock)       /* out: start block allocated */
+       int             *stat)          /* out: any good extents here? */
 {
-       xfs_rtblock_t   besti;          /* best rtblock found so far */
-       xfs_rtblock_t   bestlen;        /* best length found so far */
-       xfs_rtblock_t   end;            /* last rtblock in chunk */
        int             error;          /* error value */
-       xfs_rtblock_t   i;              /* current rtblock trying */
-       xfs_rtblock_t   next;           /* next rtblock to try */
-       int             stat;           /* status from internal calls */
+       int             log;            /* loop counter, log2 of ext. size */
+       xfs_suminfo_t   sum;            /* summary data */
 
        /*
-        * Loop over all the extents starting in this bitmap block,
-        * looking for one that's long enough.
+        * Loop over logs of extent sizes.  Order is irrelevant.
         */
-       for (i = XFS_BLOCKTOBIT(mp, bbno), besti = -1, bestlen = 0,
-               end = XFS_BLOCKTOBIT(mp, bbno + 1) - 1;
-            i <= end;
-            i++) {
+       for (log = low; log <= high; log++) {
                /*
-                * See if there's a free extent of maxlen starting at i.
-                * If it's not so then next will contain the first non-free.
+                * Get one summary datum.
                 */
-               error = xfs_rtcheck_range(mp, tp, i, maxlen, 1, &next, &stat);
+               error = xfs_rtget_summary(mp, tp, log, bbno, rbpp, rsb, &sum);
                if (error) {
                        return error;
                }
-               if (stat) {
-                       /*
-                        * i for maxlen is all free, allocate and return that.
-                        */
-                       error = xfs_rtallocate_range(mp, tp, i, maxlen, rbpp,
-                               rsb);
-                       if (error) {
-                               return error;
-                       }
-                       *len = maxlen;
-                       *rtblock = i;
-                       return 0;
-               }
                /*
-                * In the case where we have a variable-sized allocation
-                * request, figure out how big this free piece is,
-                * and if it's big enough for the minimum, and the best
-                * so far, remember it.
+                * If there are any, return success.
                 */
-               if (minlen < maxlen) {
-                       xfs_rtblock_t   thislen;        /* this extent size */
-
-                       thislen = next - i;
-                       if (thislen >= minlen && thislen > bestlen) {
-                               besti = i;
-                               bestlen = thislen;
-                       }
+               if (sum) {
+                       *stat = 1;
+                       return 0;
                }
-               /*
-                * If not done yet, find the start of the next free space.
-                */
-               if (next < end) {
-                       error = xfs_rtfind_forw(mp, tp, next, end, &i);
-                       if (error) {
-                               return error;
-                       }
-               } else
-                       break;
        }
        /*
-        * Searched the whole thing & didn't find a maxlen free extent.
+        * Found nothing, return failure.
+        */
+       *stat = 0;
+       return 0;
+}
+
+
+/*
+ * Copy and transform the summary file, given the old and new
+ * parameters in the mount structures.
+ */
+STATIC int                             /* error */
+xfs_rtcopy_summary(
+       xfs_mount_t     *omp,           /* old file system mount point */
+       xfs_mount_t     *nmp,           /* new file system mount point */
+       xfs_trans_t     *tp)            /* transaction pointer */
+{
+       xfs_rtblock_t   bbno;           /* bitmap block number */
+       xfs_buf_t       *bp;            /* summary buffer */
+       int             error;          /* error return value */
+       int             log;            /* summary level number (log length) */
+       xfs_suminfo_t   sum;            /* summary data */
+       xfs_fsblock_t   sumbno;         /* summary block number */
+
+       bp = NULL;
+       for (log = omp->m_rsumlevels - 1; log >= 0; log--) {
+               for (bbno = omp->m_sb.sb_rbmblocks - 1;
+                    (xfs_srtblock_t)bbno >= 0;
+                    bbno--) {
+                       error = xfs_rtget_summary(omp, tp, log, bbno, &bp,
+                               &sumbno, &sum);
+                       if (error)
+                               return error;
+                       if (sum == 0)
+                               continue;
+                       error = xfs_rtmodify_summary(omp, tp, log, bbno, -sum,
+                               &bp, &sumbno);
+                       if (error)
+                               return error;
+                       error = xfs_rtmodify_summary(nmp, tp, log, bbno, sum,
+                               &bp, &sumbno);
+                       if (error)
+                               return error;
+                       ASSERT(sum > 0);
+               }
+       }
+       return 0;
+}
+/*
+ * Mark an extent specified by start and len allocated.
+ * Updates all the summary information as well as the bitmap.
+ */
+STATIC int                             /* error */
+xfs_rtallocate_range(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_rtblock_t   start,          /* start block to allocate */
+       xfs_extlen_t    len,            /* length to allocate */
+       xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
+       xfs_fsblock_t   *rsb)           /* in/out: summary block number */
+{
+       xfs_rtblock_t   end;            /* end of the allocated extent */
+       int             error;          /* error value */
+       xfs_rtblock_t   postblock = 0;  /* first block allocated > end */
+       xfs_rtblock_t   preblock = 0;   /* first block allocated < start */
+
+       end = start + len - 1;
+       /*
+        * Assume we're allocating out of the middle of a free extent.
+        * We need to find the beginning and end of the extent so we can
+        * properly update the summary.
+        */
+       error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
+       if (error) {
+               return error;
+       }
+       /*
+        * Find the next allocated block (end of free extent).
+        */
+       error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
+               &postblock);
+       if (error) {
+               return error;
+       }
+       /*
+        * Decrement the summary information corresponding to the entire
+        * (old) free extent.
+        */
+       error = xfs_rtmodify_summary(mp, tp,
+               XFS_RTBLOCKLOG(postblock + 1 - preblock),
+               XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
+       if (error) {
+               return error;
+       }
+       /*
+        * If there are blocks not being allocated at the front of the
+        * old extent, add summary data for them to be free.
+        */
+       if (preblock < start) {
+               error = xfs_rtmodify_summary(mp, tp,
+                       XFS_RTBLOCKLOG(start - preblock),
+                       XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
+               if (error) {
+                       return error;
+               }
+       }
+       /*
+        * If there are blocks not being allocated at the end of the
+        * old extent, add summary data for them to be free.
+        */
+       if (postblock > end) {
+               error = xfs_rtmodify_summary(mp, tp,
+                       XFS_RTBLOCKLOG(postblock - end),
+                       XFS_BITTOBLOCK(mp, end + 1), 1, rbpp, rsb);
+               if (error) {
+                       return error;
+               }
+       }
+       /*
+        * Modify the bitmap to mark this extent allocated.
+        */
+       error = xfs_rtmodify_range(mp, tp, start, len, 0);
+       return error;
+}
+
+/*
+ * Attempt to allocate an extent minlen<=len<=maxlen starting from
+ * bitmap block bbno.  If we don't get maxlen then use prod to trim
+ * the length, if given.  Returns error; returns starting block in *rtblock.
+ * The lengths are all in rtextents.
+ */
+STATIC int                             /* error */
+xfs_rtallocate_extent_block(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_rtblock_t   bbno,           /* bitmap block number */
+       xfs_extlen_t    minlen,         /* minimum length to allocate */
+       xfs_extlen_t    maxlen,         /* maximum length to allocate */
+       xfs_extlen_t    *len,           /* out: actual length allocated */
+       xfs_rtblock_t   *nextp,         /* out: next block to try */
+       xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
+       xfs_fsblock_t   *rsb,           /* in/out: summary block number */
+       xfs_extlen_t    prod,           /* extent product factor */
+       xfs_rtblock_t   *rtblock)       /* out: start block allocated */
+{
+       xfs_rtblock_t   besti;          /* best rtblock found so far */
+       xfs_rtblock_t   bestlen;        /* best length found so far */
+       xfs_rtblock_t   end;            /* last rtblock in chunk */
+       int             error;          /* error value */
+       xfs_rtblock_t   i;              /* current rtblock trying */
+       xfs_rtblock_t   next;           /* next rtblock to try */
+       int             stat;           /* status from internal calls */
+
+       /*
+        * Loop over all the extents starting in this bitmap block,
+        * looking for one that's long enough.
+        */
+       for (i = XFS_BLOCKTOBIT(mp, bbno), besti = -1, bestlen = 0,
+               end = XFS_BLOCKTOBIT(mp, bbno + 1) - 1;
+            i <= end;
+            i++) {
+               /*
+                * See if there's a free extent of maxlen starting at i.
+                * If it's not so then next will contain the first non-free.
+                */
+               error = xfs_rtcheck_range(mp, tp, i, maxlen, 1, &next, &stat);
+               if (error) {
+                       return error;
+               }
+               if (stat) {
+                       /*
+                        * i for maxlen is all free, allocate and return that.
+                        */
+                       error = xfs_rtallocate_range(mp, tp, i, maxlen, rbpp,
+                               rsb);
+                       if (error) {
+                               return error;
+                       }
+                       *len = maxlen;
+                       *rtblock = i;
+                       return 0;
+               }
+               /*
+                * In the case where we have a variable-sized allocation
+                * request, figure out how big this free piece is,
+                * and if it's big enough for the minimum, and the best
+                * so far, remember it.
+                */
+               if (minlen < maxlen) {
+                       xfs_rtblock_t   thislen;        /* this extent size */
+
+                       thislen = next - i;
+                       if (thislen >= minlen && thislen > bestlen) {
+                               besti = i;
+                               bestlen = thislen;
+                       }
+               }
+               /*
+                * If not done yet, find the start of the next free space.
+                */
+               if (next < end) {
+                       error = xfs_rtfind_forw(mp, tp, next, end, &i);
+                       if (error) {
+                               return error;
+                       }
+               } else
+                       break;
+       }
+       /*
+        * Searched the whole thing & didn't find a maxlen free extent.
         */
        if (minlen < maxlen && besti != -1) {
                xfs_extlen_t    p;      /* amount to trim length by */
@@ -639,1191 +727,205 @@ xfs_rtallocate_extent_size(
                         */
                        if (r != NULLRTBLOCK) {
                                *rtblock = r;
-                               return 0;
-                       }
-                       /*
-                        * If the "next block to try" returned from the
-                        * allocator is beyond the next bitmap block,
-                        * skip to that bitmap block.
-                        */
-                       if (XFS_BITTOBLOCK(mp, n) > i + 1)
-                               i = XFS_BITTOBLOCK(mp, n) - 1;
-               }
-       }
-       /*
-        * Didn't find any maxlen blocks.  Try smaller ones, unless
-        * we're asking for a fixed size extent.
-        */
-       if (minlen > --maxlen) {
-               *rtblock = NULLRTBLOCK;
-               return 0;
-       }
-       ASSERT(minlen != 0);
-       ASSERT(maxlen != 0);
-
-       /*
-        * Loop over sizes, from maxlen down to minlen.
-        * This time, when we do the allocations, allow smaller ones
-        * to succeed.
-        */
-       for (l = xfs_highbit32(maxlen); l >= xfs_highbit32(minlen); l--) {
-               /*
-                * Loop over all the bitmap blocks, try an allocation
-                * starting in that block.
-                */
-               for (i = 0; i < mp->m_sb.sb_rbmblocks; i++) {
-                       /*
-                        * Get the summary information for this level/block.
-                        */
-                       error = xfs_rtget_summary(mp, tp, l, i, rbpp, rsb,
-                                                 &sum);
-                       if (error) {
-                               return error;
-                       }
-                       /*
-                        * If nothing there, go on to next.
-                        */
-                       if (!sum)
-                               continue;
-                       /*
-                        * Try the allocation.  Make sure the specified
-                        * minlen/maxlen are in the possible range for
-                        * this summary level.
-                        */
-                       error = xfs_rtallocate_extent_block(mp, tp, i,
-                                       XFS_RTMAX(minlen, 1 << l),
-                                       XFS_RTMIN(maxlen, (1 << (l + 1)) - 1),
-                                       len, &n, rbpp, rsb, prod, &r);
-                       if (error) {
-                               return error;
-                       }
-                       /*
-                        * If it worked, return that extent.
-                        */
-                       if (r != NULLRTBLOCK) {
-                               *rtblock = r;
-                               return 0;
-                       }
-                       /*
-                        * If the "next block to try" returned from the
-                        * allocator is beyond the next bitmap block,
-                        * skip to that bitmap block.
-                        */
-                       if (XFS_BITTOBLOCK(mp, n) > i + 1)
-                               i = XFS_BITTOBLOCK(mp, n) - 1;
-               }
-       }
-       /*
-        * Got nothing, return failure.
-        */
-       *rtblock = NULLRTBLOCK;
-       return 0;
-}
-
-/*
- * Mark an extent specified by start and len allocated.
- * Updates all the summary information as well as the bitmap.
- */
-STATIC int                             /* error */
-xfs_rtallocate_range(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   start,          /* start block to allocate */
-       xfs_extlen_t    len,            /* length to allocate */
-       xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
-       xfs_fsblock_t   *rsb)           /* in/out: summary block number */
-{
-       xfs_rtblock_t   end;            /* end of the allocated extent */
-       int             error;          /* error value */
-       xfs_rtblock_t   postblock = 0;  /* first block allocated > end */
-       xfs_rtblock_t   preblock = 0;   /* first block allocated < start */
-
-       end = start + len - 1;
-       /*
-        * Assume we're allocating out of the middle of a free extent.
-        * We need to find the beginning and end of the extent so we can
-        * properly update the summary.
-        */
-       error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
-       if (error) {
-               return error;
-       }
-       /*
-        * Find the next allocated block (end of free extent).
-        */
-       error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
-               &postblock);
-       if (error) {
-               return error;
-       }
-       /*
-        * Decrement the summary information corresponding to the entire
-        * (old) free extent.
-        */
-       error = xfs_rtmodify_summary(mp, tp,
-               XFS_RTBLOCKLOG(postblock + 1 - preblock),
-               XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
-       if (error) {
-               return error;
-       }
-       /*
-        * If there are blocks not being allocated at the front of the
-        * old extent, add summary data for them to be free.
-        */
-       if (preblock < start) {
-               error = xfs_rtmodify_summary(mp, tp,
-                       XFS_RTBLOCKLOG(start - preblock),
-                       XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
-               if (error) {
-                       return error;
-               }
-       }
-       /*
-        * If there are blocks not being allocated at the end of the
-        * old extent, add summary data for them to be free.
-        */
-       if (postblock > end) {
-               error = xfs_rtmodify_summary(mp, tp,
-                       XFS_RTBLOCKLOG(postblock - end),
-                       XFS_BITTOBLOCK(mp, end + 1), 1, rbpp, rsb);
-               if (error) {
-                       return error;
-               }
-       }
-       /*
-        * Modify the bitmap to mark this extent allocated.
-        */
-       error = xfs_rtmodify_range(mp, tp, start, len, 0);
-       return error;
-}
-
-/*
- * Return whether there are any free extents in the size range given
- * by low and high, for the bitmap block bbno.
- */
-STATIC int                             /* error */
-xfs_rtany_summary(
-       xfs_mount_t     *mp,            /* file system mount structure */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       int             low,            /* low log2 extent size */
-       int             high,           /* high log2 extent size */
-       xfs_rtblock_t   bbno,           /* bitmap block number */
-       xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
-       xfs_fsblock_t   *rsb,           /* in/out: summary block number */
-       int             *stat)          /* out: any good extents here? */
-{
-       int             error;          /* error value */
-       int             log;            /* loop counter, log2 of ext. size */
-       xfs_suminfo_t   sum;            /* summary data */
-
-       /*
-        * Loop over logs of extent sizes.  Order is irrelevant.
-        */
-       for (log = low; log <= high; log++) {
-               /*
-                * Get one summary datum.
-                */
-               error = xfs_rtget_summary(mp, tp, log, bbno, rbpp, rsb, &sum);
-               if (error) {
-                       return error;
-               }
-               /*
-                * If there are any, return success.
-                */
-               if (sum) {
-                       *stat = 1;
-                       return 0;
-               }
-       }
-       /*
-        * Found nothing, return failure.
-        */
-       *stat = 0;
-       return 0;
-}
-
-/*
- * Get a buffer for the bitmap or summary file block specified.
- * The buffer is returned read and locked.
- */
-STATIC int                             /* error */
-xfs_rtbuf_get(
-       xfs_mount_t     *mp,            /* file system mount structure */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   block,          /* block number in bitmap or summary */
-       int             issum,          /* is summary not bitmap */
-       xfs_buf_t       **bpp)          /* output: buffer for the block */
-{
-       xfs_buf_t       *bp;            /* block buffer, result */
-       xfs_inode_t     *ip;            /* bitmap or summary inode */
-       xfs_bmbt_irec_t map;
-       int             nmap = 1;
-       int             error;          /* error value */
-
-       ip = issum ? mp->m_rsumip : mp->m_rbmip;
-
-       error = xfs_bmapi_read(ip, block, 1, &map, &nmap, XFS_DATA_FORK);
-       if (error)
-               return error;
-
-       ASSERT(map.br_startblock != NULLFSBLOCK);
-       error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
-                                  XFS_FSB_TO_DADDR(mp, map.br_startblock),
-                                  mp->m_bsize, 0, &bp, NULL);
-       if (error)
-               return error;
-       ASSERT(!xfs_buf_geterror(bp));
-       *bpp = bp;
-       return 0;
-}
-
-#ifdef DEBUG
-/*
- * Check that the given extent (block range) is allocated already.
- */
-STATIC int                             /* error */
-xfs_rtcheck_alloc_range(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   bno,            /* starting block number of extent */
-       xfs_extlen_t    len,            /* length of extent */
-       int             *stat)          /* out: 1 for allocated, 0 for not */
-{
-       xfs_rtblock_t   new;            /* dummy for xfs_rtcheck_range */
-
-       return xfs_rtcheck_range(mp, tp, bno, len, 0, &new, stat);
-}
-#endif
-
-/*
- * Check that the given range is either all allocated (val = 0) or
- * all free (val = 1).
- */
-STATIC int                             /* error */
-xfs_rtcheck_range(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   start,          /* starting block number of extent */
-       xfs_extlen_t    len,            /* length of extent */
-       int             val,            /* 1 for free, 0 for allocated */
-       xfs_rtblock_t   *new,           /* out: first block not matching */
-       int             *stat)          /* out: 1 for matches, 0 for not */
-{
-       xfs_rtword_t    *b;             /* current word in buffer */
-       int             bit;            /* bit number in the word */
-       xfs_rtblock_t   block;          /* bitmap block number */
-       xfs_buf_t       *bp;            /* buf for the block */
-       xfs_rtword_t    *bufp;          /* starting word in buffer */
-       int             error;          /* error value */
-       xfs_rtblock_t   i;              /* current bit number rel. to start */
-       xfs_rtblock_t   lastbit;        /* last useful bit in word */
-       xfs_rtword_t    mask;           /* mask of relevant bits for value */
-       xfs_rtword_t    wdiff;          /* difference from wanted value */
-       int             word;           /* word number in the buffer */
-
-       /*
-        * Compute starting bitmap block number
-        */
-       block = XFS_BITTOBLOCK(mp, start);
-       /*
-        * Read the bitmap block.
-        */
-       error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
-       if (error) {
-               return error;
-       }
-       bufp = bp->b_addr;
-       /*
-        * Compute the starting word's address, and starting bit.
-        */
-       word = XFS_BITTOWORD(mp, start);
-       b = &bufp[word];
-       bit = (int)(start & (XFS_NBWORD - 1));
-       /*
-        * 0 (allocated) => all zero's; 1 (free) => all one's.
-        */
-       val = -val;
-       /*
-        * If not starting on a word boundary, deal with the first
-        * (partial) word.
-        */
-       if (bit) {
-               /*
-                * Compute first bit not examined.
-                */
-               lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
-               /*
-                * Mask of relevant bits.
-                */
-               mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
-               /*
-                * Compute difference between actual and desired value.
-                */
-               if ((wdiff = (*b ^ val) & mask)) {
-                       /*
-                        * Different, compute first wrong bit and return.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       i = XFS_RTLOBIT(wdiff) - bit;
-                       *new = start + i;
-                       *stat = 0;
-                       return 0;
-               }
-               i = lastbit - bit;
-               /*
-                * Go on to next block if that's where the next word is
-                * and we need the next word.
-                */
-               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
-                       /*
-                        * If done with this block, get the next one.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
-                       if (error) {
-                               return error;
-                       }
-                       b = bufp = bp->b_addr;
-                       word = 0;
-               } else {
-                       /*
-                        * Go on to the next word in the buffer.
-                        */
-                       b++;
-               }
-       } else {
-               /*
-                * Starting on a word boundary, no partial word.
-                */
-               i = 0;
-       }
-       /*
-        * Loop over whole words in buffers.  When we use up one buffer
-        * we move on to the next one.
-        */
-       while (len - i >= XFS_NBWORD) {
-               /*
-                * Compute difference between actual and desired value.
-                */
-               if ((wdiff = *b ^ val)) {
-                       /*
-                        * Different, compute first wrong bit and return.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       i += XFS_RTLOBIT(wdiff);
-                       *new = start + i;
-                       *stat = 0;
-                       return 0;
-               }
-               i += XFS_NBWORD;
-               /*
-                * Go on to next block if that's where the next word is
-                * and we need the next word.
-                */
-               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
-                       /*
-                        * If done with this block, get the next one.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
-                       if (error) {
-                               return error;
-                       }
-                       b = bufp = bp->b_addr;
-                       word = 0;
-               } else {
-                       /*
-                        * Go on to the next word in the buffer.
-                        */
-                       b++;
-               }
-       }
-       /*
-        * If not ending on a word boundary, deal with the last
-        * (partial) word.
-        */
-       if ((lastbit = len - i)) {
-               /*
-                * Mask of relevant bits.
-                */
-               mask = ((xfs_rtword_t)1 << lastbit) - 1;
-               /*
-                * Compute difference between actual and desired value.
-                */
-               if ((wdiff = (*b ^ val) & mask)) {
-                       /*
-                        * Different, compute first wrong bit and return.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       i += XFS_RTLOBIT(wdiff);
-                       *new = start + i;
-                       *stat = 0;
-                       return 0;
-               } else
-                       i = len;
-       }
-       /*
-        * Successful, return.
-        */
-       xfs_trans_brelse(tp, bp);
-       *new = start + i;
-       *stat = 1;
-       return 0;
-}
-
-/*
- * Copy and transform the summary file, given the old and new
- * parameters in the mount structures.
- */
-STATIC int                             /* error */
-xfs_rtcopy_summary(
-       xfs_mount_t     *omp,           /* old file system mount point */
-       xfs_mount_t     *nmp,           /* new file system mount point */
-       xfs_trans_t     *tp)            /* transaction pointer */
-{
-       xfs_rtblock_t   bbno;           /* bitmap block number */
-       xfs_buf_t       *bp;            /* summary buffer */
-       int             error;          /* error return value */
-       int             log;            /* summary level number (log length) */
-       xfs_suminfo_t   sum;            /* summary data */
-       xfs_fsblock_t   sumbno;         /* summary block number */
-
-       bp = NULL;
-       for (log = omp->m_rsumlevels - 1; log >= 0; log--) {
-               for (bbno = omp->m_sb.sb_rbmblocks - 1;
-                    (xfs_srtblock_t)bbno >= 0;
-                    bbno--) {
-                       error = xfs_rtget_summary(omp, tp, log, bbno, &bp,
-                               &sumbno, &sum);
-                       if (error)
-                               return error;
-                       if (sum == 0)
-                               continue;
-                       error = xfs_rtmodify_summary(omp, tp, log, bbno, -sum,
-                               &bp, &sumbno);
-                       if (error)
-                               return error;
-                       error = xfs_rtmodify_summary(nmp, tp, log, bbno, sum,
-                               &bp, &sumbno);
-                       if (error)
-                               return error;
-                       ASSERT(sum > 0);
-               }
-       }
-       return 0;
-}
-
-/*
- * Searching backward from start to limit, find the first block whose
- * allocated/free state is different from start's.
- */
-STATIC int                             /* error */
-xfs_rtfind_back(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   start,          /* starting block to look at */
-       xfs_rtblock_t   limit,          /* last block to look at */
-       xfs_rtblock_t   *rtblock)       /* out: start block found */
-{
-       xfs_rtword_t    *b;             /* current word in buffer */
-       int             bit;            /* bit number in the word */
-       xfs_rtblock_t   block;          /* bitmap block number */
-       xfs_buf_t       *bp;            /* buf for the block */
-       xfs_rtword_t    *bufp;          /* starting word in buffer */
-       int             error;          /* error value */
-       xfs_rtblock_t   firstbit;       /* first useful bit in the word */
-       xfs_rtblock_t   i;              /* current bit number rel. to start */
-       xfs_rtblock_t   len;            /* length of inspected area */
-       xfs_rtword_t    mask;           /* mask of relevant bits for value */
-       xfs_rtword_t    want;           /* mask for "good" values */
-       xfs_rtword_t    wdiff;          /* difference from wanted value */
-       int             word;           /* word number in the buffer */
-
-       /*
-        * Compute and read in starting bitmap block for starting block.
-        */
-       block = XFS_BITTOBLOCK(mp, start);
-       error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
-       if (error) {
-               return error;
-       }
-       bufp = bp->b_addr;
-       /*
-        * Get the first word's index & point to it.
-        */
-       word = XFS_BITTOWORD(mp, start);
-       b = &bufp[word];
-       bit = (int)(start & (XFS_NBWORD - 1));
-       len = start - limit + 1;
-       /*
-        * Compute match value, based on the bit at start: if 1 (free)
-        * then all-ones, else all-zeroes.
-        */
-       want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
-       /*
-        * If the starting position is not word-aligned, deal with the
-        * partial word.
-        */
-       if (bit < XFS_NBWORD - 1) {
-               /*
-                * Calculate first (leftmost) bit number to look at,
-                * and mask for all the relevant bits in this word.
-                */
-               firstbit = XFS_RTMAX((xfs_srtblock_t)(bit - len + 1), 0);
-               mask = (((xfs_rtword_t)1 << (bit - firstbit + 1)) - 1) <<
-                       firstbit;
-               /*
-                * Calculate the difference between the value there
-                * and what we're looking for.
-                */
-               if ((wdiff = (*b ^ want) & mask)) {
-                       /*
-                        * Different.  Mark where we are and return.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       i = bit - XFS_RTHIBIT(wdiff);
-                       *rtblock = start - i + 1;
-                       return 0;
-               }
-               i = bit - firstbit + 1;
-               /*
-                * Go on to previous block if that's where the previous word is
-                * and we need the previous word.
-                */
-               if (--word == -1 && i < len) {
-                       /*
-                        * If done with this block, get the previous one.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
-                       if (error) {
-                               return error;
-                       }
-                       bufp = bp->b_addr;
-                       word = XFS_BLOCKWMASK(mp);
-                       b = &bufp[word];
-               } else {
-                       /*
-                        * Go on to the previous word in the buffer.
-                        */
-                       b--;
-               }
-       } else {
-               /*
-                * Starting on a word boundary, no partial word.
-                */
-               i = 0;
-       }
-       /*
-        * Loop over whole words in buffers.  When we use up one buffer
-        * we move on to the previous one.
-        */
-       while (len - i >= XFS_NBWORD) {
-               /*
-                * Compute difference between actual and desired value.
-                */
-               if ((wdiff = *b ^ want)) {
-                       /*
-                        * Different, mark where we are and return.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
-                       *rtblock = start - i + 1;
-                       return 0;
-               }
-               i += XFS_NBWORD;
-               /*
-                * Go on to previous block if that's where the previous word is
-                * and we need the previous word.
-                */
-               if (--word == -1 && i < len) {
-                       /*
-                        * If done with this block, get the previous one.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
-                       if (error) {
-                               return error;
-                       }
-                       bufp = bp->b_addr;
-                       word = XFS_BLOCKWMASK(mp);
-                       b = &bufp[word];
-               } else {
-                       /*
-                        * Go on to the previous word in the buffer.
-                        */
-                       b--;
-               }
-       }
-       /*
-        * If not ending on a word boundary, deal with the last
-        * (partial) word.
-        */
-       if (len - i) {
-               /*
-                * Calculate first (leftmost) bit number to look at,
-                * and mask for all the relevant bits in this word.
-                */
-               firstbit = XFS_NBWORD - (len - i);
-               mask = (((xfs_rtword_t)1 << (len - i)) - 1) << firstbit;
-               /*
-                * Compute difference between actual and desired value.
-                */
-               if ((wdiff = (*b ^ want) & mask)) {
-                       /*
-                        * Different, mark where we are and return.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
-                       *rtblock = start - i + 1;
-                       return 0;
-               } else
-                       i = len;
-       }
-       /*
-        * No match, return that we scanned the whole area.
-        */
-       xfs_trans_brelse(tp, bp);
-       *rtblock = start - i + 1;
-       return 0;
-}
-
-/*
- * Searching forward from start to limit, find the first block whose
- * allocated/free state is different from start's.
- */
-STATIC int                             /* error */
-xfs_rtfind_forw(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   start,          /* starting block to look at */
-       xfs_rtblock_t   limit,          /* last block to look at */
-       xfs_rtblock_t   *rtblock)       /* out: start block found */
-{
-       xfs_rtword_t    *b;             /* current word in buffer */
-       int             bit;            /* bit number in the word */
-       xfs_rtblock_t   block;          /* bitmap block number */
-       xfs_buf_t       *bp;            /* buf for the block */
-       xfs_rtword_t    *bufp;          /* starting word in buffer */
-       int             error;          /* error value */
-       xfs_rtblock_t   i;              /* current bit number rel. to start */
-       xfs_rtblock_t   lastbit;        /* last useful bit in the word */
-       xfs_rtblock_t   len;            /* length of inspected area */
-       xfs_rtword_t    mask;           /* mask of relevant bits for value */
-       xfs_rtword_t    want;           /* mask for "good" values */
-       xfs_rtword_t    wdiff;          /* difference from wanted value */
-       int             word;           /* word number in the buffer */
-
-       /*
-        * Compute and read in starting bitmap block for starting block.
-        */
-       block = XFS_BITTOBLOCK(mp, start);
-       error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
-       if (error) {
-               return error;
-       }
-       bufp = bp->b_addr;
-       /*
-        * Get the first word's index & point to it.
-        */
-       word = XFS_BITTOWORD(mp, start);
-       b = &bufp[word];
-       bit = (int)(start & (XFS_NBWORD - 1));
-       len = limit - start + 1;
-       /*
-        * Compute match value, based on the bit at start: if 1 (free)
-        * then all-ones, else all-zeroes.
-        */
-       want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
-       /*
-        * If the starting position is not word-aligned, deal with the
-        * partial word.
-        */
-       if (bit) {
-               /*
-                * Calculate last (rightmost) bit number to look at,
-                * and mask for all the relevant bits in this word.
-                */
-               lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
-               mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
-               /*
-                * Calculate the difference between the value there
-                * and what we're looking for.
-                */
-               if ((wdiff = (*b ^ want) & mask)) {
-                       /*
-                        * Different.  Mark where we are and return.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       i = XFS_RTLOBIT(wdiff) - bit;
-                       *rtblock = start + i - 1;
-                       return 0;
-               }
-               i = lastbit - bit;
-               /*
-                * Go on to next block if that's where the next word is
-                * and we need the next word.
-                */
-               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
-                       /*
-                        * If done with this block, get the previous one.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
-                       if (error) {
-                               return error;
-                       }
-                       b = bufp = bp->b_addr;
-                       word = 0;
-               } else {
-                       /*
-                        * Go on to the previous word in the buffer.
-                        */
-                       b++;
-               }
-       } else {
-               /*
-                * Starting on a word boundary, no partial word.
-                */
-               i = 0;
-       }
-       /*
-        * Loop over whole words in buffers.  When we use up one buffer
-        * we move on to the next one.
-        */
-       while (len - i >= XFS_NBWORD) {
-               /*
-                * Compute difference between actual and desired value.
-                */
-               if ((wdiff = *b ^ want)) {
-                       /*
-                        * Different, mark where we are and return.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       i += XFS_RTLOBIT(wdiff);
-                       *rtblock = start + i - 1;
-                       return 0;
-               }
-               i += XFS_NBWORD;
-               /*
-                * Go on to next block if that's where the next word is
-                * and we need the next word.
-                */
-               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
-                       /*
-                        * If done with this block, get the next one.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
-                       if (error) {
-                               return error;
-                       }
-                       b = bufp = bp->b_addr;
-                       word = 0;
-               } else {
-                       /*
-                        * Go on to the next word in the buffer.
-                        */
-                       b++;
-               }
-       }
-       /*
-        * If not ending on a word boundary, deal with the last
-        * (partial) word.
-        */
-       if ((lastbit = len - i)) {
-               /*
-                * Calculate mask for all the relevant bits in this word.
-                */
-               mask = ((xfs_rtword_t)1 << lastbit) - 1;
-               /*
-                * Compute difference between actual and desired value.
-                */
-               if ((wdiff = (*b ^ want) & mask)) {
-                       /*
-                        * Different, mark where we are and return.
-                        */
-                       xfs_trans_brelse(tp, bp);
-                       i += XFS_RTLOBIT(wdiff);
-                       *rtblock = start + i - 1;
-                       return 0;
-               } else
-                       i = len;
-       }
-       /*
-        * No match, return that we scanned the whole area.
-        */
-       xfs_trans_brelse(tp, bp);
-       *rtblock = start + i - 1;
-       return 0;
-}
-
-/*
- * Mark an extent specified by start and len freed.
- * Updates all the summary information as well as the bitmap.
- */
-STATIC int                             /* error */
-xfs_rtfree_range(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   start,          /* starting block to free */
-       xfs_extlen_t    len,            /* length to free */
-       xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
-       xfs_fsblock_t   *rsb)           /* in/out: summary block number */
-{
-       xfs_rtblock_t   end;            /* end of the freed extent */
-       int             error;          /* error value */
-       xfs_rtblock_t   postblock;      /* first block freed > end */
-       xfs_rtblock_t   preblock;       /* first block freed < start */
-
-       end = start + len - 1;
-       /*
-        * Modify the bitmap to mark this extent freed.
-        */
-       error = xfs_rtmodify_range(mp, tp, start, len, 1);
-       if (error) {
-               return error;
-       }
-       /*
-        * Assume we're freeing out of the middle of an allocated extent.
-        * We need to find the beginning and end of the extent so we can
-        * properly update the summary.
-        */
-       error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
-       if (error) {
-               return error;
-       }
-       /*
-        * Find the next allocated block (end of allocated extent).
-        */
-       error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
-               &postblock);
-       if (error)
-               return error;
-       /*
-        * If there are blocks not being freed at the front of the
-        * old extent, add summary data for them to be allocated.
-        */
-       if (preblock < start) {
-               error = xfs_rtmodify_summary(mp, tp,
-                       XFS_RTBLOCKLOG(start - preblock),
-                       XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
-               if (error) {
-                       return error;
-               }
-       }
-       /*
-        * If there are blocks not being freed at the end of the
-        * old extent, add summary data for them to be allocated.
-        */
-       if (postblock > end) {
-               error = xfs_rtmodify_summary(mp, tp,
-                       XFS_RTBLOCKLOG(postblock - end),
-                       XFS_BITTOBLOCK(mp, end + 1), -1, rbpp, rsb);
-               if (error) {
-                       return error;
-               }
-       }
-       /*
-        * Increment the summary information corresponding to the entire
-        * (new) free extent.
-        */
-       error = xfs_rtmodify_summary(mp, tp,
-               XFS_RTBLOCKLOG(postblock + 1 - preblock),
-               XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
-       return error;
-}
-
-/*
- * Read and return the summary information for a given extent size,
- * bitmap block combination.
- * Keeps track of a current summary block, so we don't keep reading
- * it from the buffer cache.
- */
-STATIC int                             /* error */
-xfs_rtget_summary(
-       xfs_mount_t     *mp,            /* file system mount structure */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       int             log,            /* log2 of extent size */
-       xfs_rtblock_t   bbno,           /* bitmap block number */
-       xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
-       xfs_fsblock_t   *rsb,           /* in/out: summary block number */
-       xfs_suminfo_t   *sum)           /* out: summary info for this block */
-{
-       xfs_buf_t       *bp;            /* buffer for summary block */
-       int             error;          /* error value */
-       xfs_fsblock_t   sb;             /* summary fsblock */
-       int             so;             /* index into the summary file */
-       xfs_suminfo_t   *sp;            /* pointer to returned data */
-
-       /*
-        * Compute entry number in the summary file.
-        */
-       so = XFS_SUMOFFS(mp, log, bbno);
-       /*
-        * Compute the block number in the summary file.
-        */
-       sb = XFS_SUMOFFSTOBLOCK(mp, so);
-       /*
-        * If we have an old buffer, and the block number matches, use that.
-        */
-       if (rbpp && *rbpp && *rsb == sb)
-               bp = *rbpp;
-       /*
-        * Otherwise we have to get the buffer.
-        */
-       else {
-               /*
-                * If there was an old one, get rid of it first.
-                */
-               if (rbpp && *rbpp)
-                       xfs_trans_brelse(tp, *rbpp);
-               error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
-               if (error) {
-                       return error;
-               }
-               /*
-                * Remember this buffer and block for the next call.
-                */
-               if (rbpp) {
-                       *rbpp = bp;
-                       *rsb = sb;
-               }
-       }
-       /*
-        * Point to the summary information & copy it out.
-        */
-       sp = XFS_SUMPTR(mp, bp, so);
-       *sum = *sp;
-       /*
-        * Drop the buffer if we're not asked to remember it.
-        */
-       if (!rbpp)
-               xfs_trans_brelse(tp, bp);
-       return 0;
-}
-
-/*
- * Set the given range of bitmap bits to the given value.
- * Do whatever I/O and logging is required.
- */
-STATIC int                             /* error */
-xfs_rtmodify_range(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   start,          /* starting block to modify */
-       xfs_extlen_t    len,            /* length of extent to modify */
-       int             val)            /* 1 for free, 0 for allocated */
-{
-       xfs_rtword_t    *b;             /* current word in buffer */
-       int             bit;            /* bit number in the word */
-       xfs_rtblock_t   block;          /* bitmap block number */
-       xfs_buf_t       *bp;            /* buf for the block */
-       xfs_rtword_t    *bufp;          /* starting word in buffer */
-       int             error;          /* error value */
-       xfs_rtword_t    *first;         /* first used word in the buffer */
-       int             i;              /* current bit number rel. to start */
-       int             lastbit;        /* last useful bit in word */
-       xfs_rtword_t    mask;           /* mask o frelevant bits for value */
-       int             word;           /* word number in the buffer */
-
-       /*
-        * Compute starting bitmap block number.
-        */
-       block = XFS_BITTOBLOCK(mp, start);
-       /*
-        * Read the bitmap block, and point to its data.
-        */
-       error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
-       if (error) {
-               return error;
+                               return 0;
+                       }
+                       /*
+                        * If the "next block to try" returned from the
+                        * allocator is beyond the next bitmap block,
+                        * skip to that bitmap block.
+                        */
+                       if (XFS_BITTOBLOCK(mp, n) > i + 1)
+                               i = XFS_BITTOBLOCK(mp, n) - 1;
+               }
        }
-       bufp = bp->b_addr;
-       /*
-        * Compute the starting word's address, and starting bit.
-        */
-       word = XFS_BITTOWORD(mp, start);
-       first = b = &bufp[word];
-       bit = (int)(start & (XFS_NBWORD - 1));
        /*
-        * 0 (allocated) => all zeroes; 1 (free) => all ones.
+        * Didn't find any maxlen blocks.  Try smaller ones, unless
+        * we're asking for a fixed size extent.
         */
-       val = -val;
+       if (minlen > --maxlen) {
+               *rtblock = NULLRTBLOCK;
+               return 0;
+       }
+       ASSERT(minlen != 0);
+       ASSERT(maxlen != 0);
+
        /*
-        * If not starting on a word boundary, deal with the first
-        * (partial) word.
+        * Loop over sizes, from maxlen down to minlen.
+        * This time, when we do the allocations, allow smaller ones
+        * to succeed.
         */
-       if (bit) {
-               /*
-                * Compute first bit not changed and mask of relevant bits.
-                */
-               lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
-               mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
-               /*
-                * Set/clear the active bits.
-                */
-               if (val)
-                       *b |= mask;
-               else
-                       *b &= ~mask;
-               i = lastbit - bit;
+       for (l = xfs_highbit32(maxlen); l >= xfs_highbit32(minlen); l--) {
                /*
-                * Go on to the next block if that's where the next word is
-                * and we need the next word.
+                * Loop over all the bitmap blocks, try an allocation
+                * starting in that block.
                 */
-               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+               for (i = 0; i < mp->m_sb.sb_rbmblocks; i++) {
                        /*
-                        * Log the changed part of this block.
-                        * Get the next one.
+                        * Get the summary information for this level/block.
                         */
-                       xfs_trans_log_buf(tp, bp,
-                               (uint)((char *)first - (char *)bufp),
-                               (uint)((char *)b - (char *)bufp));
-                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+                       error = xfs_rtget_summary(mp, tp, l, i, rbpp, rsb,
+                                                 &sum);
                        if (error) {
                                return error;
                        }
-                       first = b = bufp = bp->b_addr;
-                       word = 0;
-               } else {
                        /*
-                        * Go on to the next word in the buffer
+                        * If nothing there, go on to next.
                         */
-                       b++;
-               }
-       } else {
-               /*
-                * Starting on a word boundary, no partial word.
-                */
-               i = 0;
-       }
-       /*
-        * Loop over whole words in buffers.  When we use up one buffer
-        * we move on to the next one.
-        */
-       while (len - i >= XFS_NBWORD) {
-               /*
-                * Set the word value correctly.
-                */
-               *b = val;
-               i += XFS_NBWORD;
-               /*
-                * Go on to the next block if that's where the next word is
-                * and we need the next word.
-                */
-               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+                       if (!sum)
+                               continue;
                        /*
-                        * Log the changed part of this block.
-                        * Get the next one.
+                        * Try the allocation.  Make sure the specified
+                        * minlen/maxlen are in the possible range for
+                        * this summary level.
                         */
-                       xfs_trans_log_buf(tp, bp,
-                               (uint)((char *)first - (char *)bufp),
-                               (uint)((char *)b - (char *)bufp));
-                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+                       error = xfs_rtallocate_extent_block(mp, tp, i,
+                                       XFS_RTMAX(minlen, 1 << l),
+                                       XFS_RTMIN(maxlen, (1 << (l + 1)) - 1),
+                                       len, &n, rbpp, rsb, prod, &r);
                        if (error) {
                                return error;
                        }
-                       first = b = bufp = bp->b_addr;
-                       word = 0;
-               } else {
                        /*
-                        * Go on to the next word in the buffer
+                        * If it worked, return that extent.
+                        */
+                       if (r != NULLRTBLOCK) {
+                               *rtblock = r;
+                               return 0;
+                       }
+                       /*
+                        * If the "next block to try" returned from the
+                        * allocator is beyond the next bitmap block,
+                        * skip to that bitmap block.
                         */
-                       b++;
+                       if (XFS_BITTOBLOCK(mp, n) > i + 1)
+                               i = XFS_BITTOBLOCK(mp, n) - 1;
                }
        }
        /*
-        * If not ending on a word boundary, deal with the last
-        * (partial) word.
-        */
-       if ((lastbit = len - i)) {
-               /*
-                * Compute a mask of relevant bits.
-                */
-               bit = 0;
-               mask = ((xfs_rtword_t)1 << lastbit) - 1;
-               /*
-                * Set/clear the active bits.
-                */
-               if (val)
-                       *b |= mask;
-               else
-                       *b &= ~mask;
-               b++;
-       }
-       /*
-        * Log any remaining changed bytes.
+        * Got nothing, return failure.
         */
-       if (b > first)
-               xfs_trans_log_buf(tp, bp, (uint)((char *)first - (char *)bufp),
-                       (uint)((char *)b - (char *)bufp - 1));
+       *rtblock = NULLRTBLOCK;
        return 0;
 }
 
 /*
- * Read and modify the summary information for a given extent size,
- * bitmap block combination.
- * Keeps track of a current summary block, so we don't keep reading
- * it from the buffer cache.
+ * Allocate space to the bitmap or summary file, and zero it, for growfs.
  */
 STATIC int                             /* error */
-xfs_rtmodify_summary(
+xfs_growfs_rt_alloc(
        xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       int             log,            /* log2 of extent size */
-       xfs_rtblock_t   bbno,           /* bitmap block number */
-       int             delta,          /* change to make to summary info */
-       xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
-       xfs_fsblock_t   *rsb)           /* in/out: summary block number */
+       xfs_extlen_t    oblocks,        /* old count of blocks */
+       xfs_extlen_t    nblocks,        /* new count of blocks */
+       xfs_inode_t     *ip)            /* inode (bitmap/summary) */
 {
-       xfs_buf_t       *bp;            /* buffer for the summary block */
-       int             error;          /* error value */
-       xfs_fsblock_t   sb;             /* summary fsblock */
-       int             so;             /* index into the summary file */
-       xfs_suminfo_t   *sp;            /* pointer to returned data */
+       xfs_fileoff_t   bno;            /* block number in file */
+       xfs_buf_t       *bp;            /* temporary buffer for zeroing */
+       int             committed;      /* transaction committed flag */
+       xfs_daddr_t     d;              /* disk block address */
+       int             error;          /* error return value */
+       xfs_fsblock_t   firstblock;     /* first block allocated in xaction */
+       xfs_bmap_free_t flist;          /* list of freed blocks */
+       xfs_fsblock_t   fsbno;          /* filesystem block for bno */
+       xfs_bmbt_irec_t map;            /* block map output */
+       int             nmap;           /* number of block maps */
+       int             resblks;        /* space reservation */
 
        /*
-        * Compute entry number in the summary file.
-        */
-       so = XFS_SUMOFFS(mp, log, bbno);
-       /*
-        * Compute the block number in the summary file.
-        */
-       sb = XFS_SUMOFFSTOBLOCK(mp, so);
-       /*
-        * If we have an old buffer, and the block number matches, use that.
-        */
-       if (rbpp && *rbpp && *rsb == sb)
-               bp = *rbpp;
-       /*
-        * Otherwise we have to get the buffer.
+        * Allocate space to the file, as necessary.
         */
-       else {
+       while (oblocks < nblocks) {
+               int             cancelflags = 0;
+               xfs_trans_t     *tp;
+
+               tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ALLOC);
+               resblks = XFS_GROWFSRT_SPACE_RES(mp, nblocks - oblocks);
                /*
-                * If there was an old one, get rid of it first.
+                * Reserve space & log for one extent added to the file.
                 */
-               if (rbpp && *rbpp)
-                       xfs_trans_brelse(tp, *rbpp);
-               error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
-               if (error) {
-                       return error;
-               }
+               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growdata,
+                                         resblks, 0);
+               if (error)
+                       goto error_cancel;
+               cancelflags = XFS_TRANS_RELEASE_LOG_RES;
                /*
-                * Remember this buffer and block for the next call.
+                * Lock the inode.
                 */
-               if (rbpp) {
-                       *rbpp = bp;
-                       *rsb = sb;
+               xfs_ilock(ip, XFS_ILOCK_EXCL);
+               xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+               xfs_bmap_init(&flist, &firstblock);
+               /*
+                * Allocate blocks to the bitmap file.
+                */
+               nmap = 1;
+               cancelflags |= XFS_TRANS_ABORT;
+               error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks,
+                                       XFS_BMAPI_METADATA, &firstblock,
+                                       resblks, &map, &nmap, &flist);
+               if (!error && nmap < 1)
+                       error = XFS_ERROR(ENOSPC);
+               if (error)
+                       goto error_cancel;
+               /*
+                * Free any blocks freed up in the transaction, then commit.
+                */
+               error = xfs_bmap_finish(&tp, &flist, &committed);
+               if (error)
+                       goto error_cancel;
+               error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+               if (error)
+                       goto error;
+               /*
+                * Now we need to clear the allocated blocks.
+                * Do this one block per transaction, to keep it simple.
+                */
+               cancelflags = 0;
+               for (bno = map.br_startoff, fsbno = map.br_startblock;
+                    bno < map.br_startoff + map.br_blockcount;
+                    bno++, fsbno++) {
+                       tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ZERO);
+                       /*
+                        * Reserve log for one block zeroing.
+                        */
+                       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtzero,
+                                                 0, 0);
+                       if (error)
+                               goto error_cancel;
+                       /*
+                        * Lock the bitmap inode.
+                        */
+                       xfs_ilock(ip, XFS_ILOCK_EXCL);
+                       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+                       /*
+                        * Get a buffer for the block.
+                        */
+                       d = XFS_FSB_TO_DADDR(mp, fsbno);
+                       bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
+                               mp->m_bsize, 0);
+                       if (bp == NULL) {
+                               error = XFS_ERROR(EIO);
+error_cancel:
+                               xfs_trans_cancel(tp, cancelflags);
+                               goto error;
+                       }
+                       memset(bp->b_addr, 0, mp->m_sb.sb_blocksize);
+                       xfs_trans_log_buf(tp, bp, 0, mp->m_sb.sb_blocksize - 1);
+                       /*
+                        * Commit the transaction.
+                        */
+                       error = xfs_trans_commit(tp, 0);
+                       if (error)
+                               goto error;
                }
+               /*
+                * Go on to the next extent, if any.
+                */
+               oblocks = map.br_startoff + map.br_blockcount;
        }
-       /*
-        * Point to the summary information, modify and log it.
-        */
-       sp = XFS_SUMPTR(mp, bp, so);
-       *sp += delta;
-       xfs_trans_log_buf(tp, bp, (uint)((char *)sp - (char *)bp->b_addr),
-               (uint)((char *)sp - (char *)bp->b_addr + sizeof(*sp) - 1));
        return 0;
+
+error:
+       return error;
 }
 
 /*
@@ -2128,66 +1230,6 @@ xfs_rtallocate_extent(
        return 0;
 }
 
-/*
- * Free an extent in the realtime subvolume.  Length is expressed in
- * realtime extents, as is the block number.
- */
-int                                    /* error */
-xfs_rtfree_extent(
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   bno,            /* starting block number to free */
-       xfs_extlen_t    len)            /* length of extent freed */
-{
-       int             error;          /* error value */
-       xfs_mount_t     *mp;            /* file system mount structure */
-       xfs_fsblock_t   sb;             /* summary file block number */
-       xfs_buf_t       *sumbp;         /* summary file block buffer */
-
-       mp = tp->t_mountp;
-
-       ASSERT(mp->m_rbmip->i_itemp != NULL);
-       ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
-
-#ifdef DEBUG
-       /*
-        * Check to see that this whole range is currently allocated.
-        */
-       {
-               int     stat;           /* result from checking range */
-
-               error = xfs_rtcheck_alloc_range(mp, tp, bno, len, &stat);
-               if (error) {
-                       return error;
-               }
-               ASSERT(stat);
-       }
-#endif
-       sumbp = NULL;
-       /*
-        * Free the range of realtime blocks.
-        */
-       error = xfs_rtfree_range(mp, tp, bno, len, &sumbp, &sb);
-       if (error) {
-               return error;
-       }
-       /*
-        * Mark more blocks free in the superblock.
-        */
-       xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, (long)len);
-       /*
-        * If we've now freed all the blocks, reset the file sequence
-        * number to 0.
-        */
-       if (tp->t_frextents_delta + mp->m_sb.sb_frextents ==
-           mp->m_sb.sb_rextents) {
-               if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM))
-                       mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
-               *(__uint64_t *)&mp->m_rbmip->i_d.di_atime = 0;
-               xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
-       }
-       return 0;
-}
-
 /*
  * Initialize realtime fields in the mount structure.
  */
index b2a1a24c0e2f3d8037cdd03f2b8deffc298d38c9..752b63d103003288d48c463571cc59279f8c531d 100644 (file)
@@ -95,6 +95,30 @@ xfs_growfs_rt(
        struct xfs_mount        *mp,    /* file system mount structure */
        xfs_growfs_rt_t         *in);   /* user supplied growfs struct */
 
+/*
+ * From xfs_rtbitmap.c
+ */
+int xfs_rtbuf_get(struct xfs_mount *mp, struct xfs_trans *tp,
+                 xfs_rtblock_t block, int issum, struct xfs_buf **bpp);
+int xfs_rtcheck_range(struct xfs_mount *mp, struct xfs_trans *tp,
+                     xfs_rtblock_t start, xfs_extlen_t len, int val,
+                     xfs_rtblock_t *new, int *stat);
+int xfs_rtfind_back(struct xfs_mount *mp, struct xfs_trans *tp,
+                   xfs_rtblock_t start, xfs_rtblock_t limit,
+                   xfs_rtblock_t *rtblock);
+int xfs_rtfind_forw(struct xfs_mount *mp, struct xfs_trans *tp,
+                   xfs_rtblock_t start, xfs_rtblock_t limit,
+                   xfs_rtblock_t *rtblock);
+int xfs_rtmodify_range(struct xfs_mount *mp, struct xfs_trans *tp,
+                      xfs_rtblock_t start, xfs_extlen_t len, int val);
+int xfs_rtmodify_summary(struct xfs_mount *mp, struct xfs_trans *tp, int log,
+                        xfs_rtblock_t bbno, int delta, xfs_buf_t **rbpp,
+                        xfs_fsblock_t *rsb);
+int xfs_rtfree_range(struct xfs_mount *mp, struct xfs_trans *tp,
+                    xfs_rtblock_t start, xfs_extlen_t len,
+                    struct xfs_buf **rbpp, xfs_fsblock_t *rsb);
+
+
 #else
 # define xfs_rtallocate_extent(t,b,min,max,l,a,f,p,rb)  (ENOSYS)
 # define xfs_rtfree_extent(t,b,l)                       (ENOSYS)
diff --git a/fs/xfs/xfs_rtbitmap.c b/fs/xfs/xfs_rtbitmap.c
new file mode 100644 (file)
index 0000000..e30efe8
--- /dev/null
@@ -0,0 +1,973 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc.h"
+#include "xfs_error.h"
+#include "xfs_trans.h"
+#include "xfs_trans_space.h"
+#include "xfs_trace.h"
+#include "xfs_buf.h"
+#include "xfs_icache.h"
+#include "xfs_dinode.h"
+
+
+/*
+ * Realtime allocator bitmap functions shared with userspace.
+ */
+
+/*
+ * Get a buffer for the bitmap or summary file block specified.
+ * The buffer is returned read and locked.
+ */
+int
+xfs_rtbuf_get(
+       xfs_mount_t     *mp,            /* file system mount structure */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_rtblock_t   block,          /* block number in bitmap or summary */
+       int             issum,          /* is summary not bitmap */
+       xfs_buf_t       **bpp)          /* output: buffer for the block */
+{
+       xfs_buf_t       *bp;            /* block buffer, result */
+       xfs_inode_t     *ip;            /* bitmap or summary inode */
+       xfs_bmbt_irec_t map;
+       int             nmap = 1;
+       int             error;          /* error value */
+
+       ip = issum ? mp->m_rsumip : mp->m_rbmip;
+
+       error = xfs_bmapi_read(ip, block, 1, &map, &nmap, XFS_DATA_FORK);
+       if (error)
+               return error;
+
+       ASSERT(map.br_startblock != NULLFSBLOCK);
+       error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
+                                  XFS_FSB_TO_DADDR(mp, map.br_startblock),
+                                  mp->m_bsize, 0, &bp, NULL);
+       if (error)
+               return error;
+       ASSERT(!xfs_buf_geterror(bp));
+       *bpp = bp;
+       return 0;
+}
+
+/*
+ * Searching backward from start to limit, find the first block whose
+ * allocated/free state is different from start's.
+ */
+int
+xfs_rtfind_back(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_rtblock_t   start,          /* starting block to look at */
+       xfs_rtblock_t   limit,          /* last block to look at */
+       xfs_rtblock_t   *rtblock)       /* out: start block found */
+{
+       xfs_rtword_t    *b;             /* current word in buffer */
+       int             bit;            /* bit number in the word */
+       xfs_rtblock_t   block;          /* bitmap block number */
+       xfs_buf_t       *bp;            /* buf for the block */
+       xfs_rtword_t    *bufp;          /* starting word in buffer */
+       int             error;          /* error value */
+       xfs_rtblock_t   firstbit;       /* first useful bit in the word */
+       xfs_rtblock_t   i;              /* current bit number rel. to start */
+       xfs_rtblock_t   len;            /* length of inspected area */
+       xfs_rtword_t    mask;           /* mask of relevant bits for value */
+       xfs_rtword_t    want;           /* mask for "good" values */
+       xfs_rtword_t    wdiff;          /* difference from wanted value */
+       int             word;           /* word number in the buffer */
+
+       /*
+        * Compute and read in starting bitmap block for starting block.
+        */
+       block = XFS_BITTOBLOCK(mp, start);
+       error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+       if (error) {
+               return error;
+       }
+       bufp = bp->b_addr;
+       /*
+        * Get the first word's index & point to it.
+        */
+       word = XFS_BITTOWORD(mp, start);
+       b = &bufp[word];
+       bit = (int)(start & (XFS_NBWORD - 1));
+       len = start - limit + 1;
+       /*
+        * Compute match value, based on the bit at start: if 1 (free)
+        * then all-ones, else all-zeroes.
+        */
+       want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
+       /*
+        * If the starting position is not word-aligned, deal with the
+        * partial word.
+        */
+       if (bit < XFS_NBWORD - 1) {
+               /*
+                * Calculate first (leftmost) bit number to look at,
+                * and mask for all the relevant bits in this word.
+                */
+               firstbit = XFS_RTMAX((xfs_srtblock_t)(bit - len + 1), 0);
+               mask = (((xfs_rtword_t)1 << (bit - firstbit + 1)) - 1) <<
+                       firstbit;
+               /*
+                * Calculate the difference between the value there
+                * and what we're looking for.
+                */
+               if ((wdiff = (*b ^ want) & mask)) {
+                       /*
+                        * Different.  Mark where we are and return.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       i = bit - XFS_RTHIBIT(wdiff);
+                       *rtblock = start - i + 1;
+                       return 0;
+               }
+               i = bit - firstbit + 1;
+               /*
+                * Go on to previous block if that's where the previous word is
+                * and we need the previous word.
+                */
+               if (--word == -1 && i < len) {
+                       /*
+                        * If done with this block, get the previous one.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
+                       if (error) {
+                               return error;
+                       }
+                       bufp = bp->b_addr;
+                       word = XFS_BLOCKWMASK(mp);
+                       b = &bufp[word];
+               } else {
+                       /*
+                        * Go on to the previous word in the buffer.
+                        */
+                       b--;
+               }
+       } else {
+               /*
+                * Starting on a word boundary, no partial word.
+                */
+               i = 0;
+       }
+       /*
+        * Loop over whole words in buffers.  When we use up one buffer
+        * we move on to the previous one.
+        */
+       while (len - i >= XFS_NBWORD) {
+               /*
+                * Compute difference between actual and desired value.
+                */
+               if ((wdiff = *b ^ want)) {
+                       /*
+                        * Different, mark where we are and return.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
+                       *rtblock = start - i + 1;
+                       return 0;
+               }
+               i += XFS_NBWORD;
+               /*
+                * Go on to previous block if that's where the previous word is
+                * and we need the previous word.
+                */
+               if (--word == -1 && i < len) {
+                       /*
+                        * If done with this block, get the previous one.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
+                       if (error) {
+                               return error;
+                       }
+                       bufp = bp->b_addr;
+                       word = XFS_BLOCKWMASK(mp);
+                       b = &bufp[word];
+               } else {
+                       /*
+                        * Go on to the previous word in the buffer.
+                        */
+                       b--;
+               }
+       }
+       /*
+        * If not ending on a word boundary, deal with the last
+        * (partial) word.
+        */
+       if (len - i) {
+               /*
+                * Calculate first (leftmost) bit number to look at,
+                * and mask for all the relevant bits in this word.
+                */
+               firstbit = XFS_NBWORD - (len - i);
+               mask = (((xfs_rtword_t)1 << (len - i)) - 1) << firstbit;
+               /*
+                * Compute difference between actual and desired value.
+                */
+               if ((wdiff = (*b ^ want) & mask)) {
+                       /*
+                        * Different, mark where we are and return.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
+                       *rtblock = start - i + 1;
+                       return 0;
+               } else
+                       i = len;
+       }
+       /*
+        * No match, return that we scanned the whole area.
+        */
+       xfs_trans_brelse(tp, bp);
+       *rtblock = start - i + 1;
+       return 0;
+}
+
+/*
+ * Searching forward from start to limit, find the first block whose
+ * allocated/free state is different from start's.
+ */
+int
+xfs_rtfind_forw(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_rtblock_t   start,          /* starting block to look at */
+       xfs_rtblock_t   limit,          /* last block to look at */
+       xfs_rtblock_t   *rtblock)       /* out: start block found */
+{
+       xfs_rtword_t    *b;             /* current word in buffer */
+       int             bit;            /* bit number in the word */
+       xfs_rtblock_t   block;          /* bitmap block number */
+       xfs_buf_t       *bp;            /* buf for the block */
+       xfs_rtword_t    *bufp;          /* starting word in buffer */
+       int             error;          /* error value */
+       xfs_rtblock_t   i;              /* current bit number rel. to start */
+       xfs_rtblock_t   lastbit;        /* last useful bit in the word */
+       xfs_rtblock_t   len;            /* length of inspected area */
+       xfs_rtword_t    mask;           /* mask of relevant bits for value */
+       xfs_rtword_t    want;           /* mask for "good" values */
+       xfs_rtword_t    wdiff;          /* difference from wanted value */
+       int             word;           /* word number in the buffer */
+
+       /*
+        * Compute and read in starting bitmap block for starting block.
+        */
+       block = XFS_BITTOBLOCK(mp, start);
+       error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+       if (error) {
+               return error;
+       }
+       bufp = bp->b_addr;
+       /*
+        * Get the first word's index & point to it.
+        */
+       word = XFS_BITTOWORD(mp, start);
+       b = &bufp[word];
+       bit = (int)(start & (XFS_NBWORD - 1));
+       len = limit - start + 1;
+       /*
+        * Compute match value, based on the bit at start: if 1 (free)
+        * then all-ones, else all-zeroes.
+        */
+       want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
+       /*
+        * If the starting position is not word-aligned, deal with the
+        * partial word.
+        */
+       if (bit) {
+               /*
+                * Calculate last (rightmost) bit number to look at,
+                * and mask for all the relevant bits in this word.
+                */
+               lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
+               mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
+               /*
+                * Calculate the difference between the value there
+                * and what we're looking for.
+                */
+               if ((wdiff = (*b ^ want) & mask)) {
+                       /*
+                        * Different.  Mark where we are and return.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       i = XFS_RTLOBIT(wdiff) - bit;
+                       *rtblock = start + i - 1;
+                       return 0;
+               }
+               i = lastbit - bit;
+               /*
+                * Go on to next block if that's where the next word is
+                * and we need the next word.
+                */
+               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+                       /*
+                        * If done with this block, get the previous one.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+                       if (error) {
+                               return error;
+                       }
+                       b = bufp = bp->b_addr;
+                       word = 0;
+               } else {
+                       /*
+                        * Go on to the previous word in the buffer.
+                        */
+                       b++;
+               }
+       } else {
+               /*
+                * Starting on a word boundary, no partial word.
+                */
+               i = 0;
+       }
+       /*
+        * Loop over whole words in buffers.  When we use up one buffer
+        * we move on to the next one.
+        */
+       while (len - i >= XFS_NBWORD) {
+               /*
+                * Compute difference between actual and desired value.
+                */
+               if ((wdiff = *b ^ want)) {
+                       /*
+                        * Different, mark where we are and return.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       i += XFS_RTLOBIT(wdiff);
+                       *rtblock = start + i - 1;
+                       return 0;
+               }
+               i += XFS_NBWORD;
+               /*
+                * Go on to next block if that's where the next word is
+                * and we need the next word.
+                */
+               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+                       /*
+                        * If done with this block, get the next one.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+                       if (error) {
+                               return error;
+                       }
+                       b = bufp = bp->b_addr;
+                       word = 0;
+               } else {
+                       /*
+                        * Go on to the next word in the buffer.
+                        */
+                       b++;
+               }
+       }
+       /*
+        * If not ending on a word boundary, deal with the last
+        * (partial) word.
+        */
+       if ((lastbit = len - i)) {
+               /*
+                * Calculate mask for all the relevant bits in this word.
+                */
+               mask = ((xfs_rtword_t)1 << lastbit) - 1;
+               /*
+                * Compute difference between actual and desired value.
+                */
+               if ((wdiff = (*b ^ want) & mask)) {
+                       /*
+                        * Different, mark where we are and return.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       i += XFS_RTLOBIT(wdiff);
+                       *rtblock = start + i - 1;
+                       return 0;
+               } else
+                       i = len;
+       }
+       /*
+        * No match, return that we scanned the whole area.
+        */
+       xfs_trans_brelse(tp, bp);
+       *rtblock = start + i - 1;
+       return 0;
+}
+
+/*
+ * Read and modify the summary information for a given extent size,
+ * bitmap block combination.
+ * Keeps track of a current summary block, so we don't keep reading
+ * it from the buffer cache.
+ */
+int
+xfs_rtmodify_summary(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       int             log,            /* log2 of extent size */
+       xfs_rtblock_t   bbno,           /* bitmap block number */
+       int             delta,          /* change to make to summary info */
+       xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
+       xfs_fsblock_t   *rsb)           /* in/out: summary block number */
+{
+       xfs_buf_t       *bp;            /* buffer for the summary block */
+       int             error;          /* error value */
+       xfs_fsblock_t   sb;             /* summary fsblock */
+       int             so;             /* index into the summary file */
+       xfs_suminfo_t   *sp;            /* pointer to returned data */
+
+       /*
+        * Compute entry number in the summary file.
+        */
+       so = XFS_SUMOFFS(mp, log, bbno);
+       /*
+        * Compute the block number in the summary file.
+        */
+       sb = XFS_SUMOFFSTOBLOCK(mp, so);
+       /*
+        * If we have an old buffer, and the block number matches, use that.
+        */
+       if (rbpp && *rbpp && *rsb == sb)
+               bp = *rbpp;
+       /*
+        * Otherwise we have to get the buffer.
+        */
+       else {
+               /*
+                * If there was an old one, get rid of it first.
+                */
+               if (rbpp && *rbpp)
+                       xfs_trans_brelse(tp, *rbpp);
+               error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
+               if (error) {
+                       return error;
+               }
+               /*
+                * Remember this buffer and block for the next call.
+                */
+               if (rbpp) {
+                       *rbpp = bp;
+                       *rsb = sb;
+               }
+       }
+       /*
+        * Point to the summary information, modify and log it.
+        */
+       sp = XFS_SUMPTR(mp, bp, so);
+       *sp += delta;
+       xfs_trans_log_buf(tp, bp, (uint)((char *)sp - (char *)bp->b_addr),
+               (uint)((char *)sp - (char *)bp->b_addr + sizeof(*sp) - 1));
+       return 0;
+}
+
+/*
+ * Set the given range of bitmap bits to the given value.
+ * Do whatever I/O and logging is required.
+ */
+int
+xfs_rtmodify_range(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_rtblock_t   start,          /* starting block to modify */
+       xfs_extlen_t    len,            /* length of extent to modify */
+       int             val)            /* 1 for free, 0 for allocated */
+{
+       xfs_rtword_t    *b;             /* current word in buffer */
+       int             bit;            /* bit number in the word */
+       xfs_rtblock_t   block;          /* bitmap block number */
+       xfs_buf_t       *bp;            /* buf for the block */
+       xfs_rtword_t    *bufp;          /* starting word in buffer */
+       int             error;          /* error value */
+       xfs_rtword_t    *first;         /* first used word in the buffer */
+       int             i;              /* current bit number rel. to start */
+       int             lastbit;        /* last useful bit in word */
+       xfs_rtword_t    mask;           /* mask o frelevant bits for value */
+       int             word;           /* word number in the buffer */
+
+       /*
+        * Compute starting bitmap block number.
+        */
+       block = XFS_BITTOBLOCK(mp, start);
+       /*
+        * Read the bitmap block, and point to its data.
+        */
+       error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+       if (error) {
+               return error;
+       }
+       bufp = bp->b_addr;
+       /*
+        * Compute the starting word's address, and starting bit.
+        */
+       word = XFS_BITTOWORD(mp, start);
+       first = b = &bufp[word];
+       bit = (int)(start & (XFS_NBWORD - 1));
+       /*
+        * 0 (allocated) => all zeroes; 1 (free) => all ones.
+        */
+       val = -val;
+       /*
+        * If not starting on a word boundary, deal with the first
+        * (partial) word.
+        */
+       if (bit) {
+               /*
+                * Compute first bit not changed and mask of relevant bits.
+                */
+               lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
+               mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
+               /*
+                * Set/clear the active bits.
+                */
+               if (val)
+                       *b |= mask;
+               else
+                       *b &= ~mask;
+               i = lastbit - bit;
+               /*
+                * Go on to the next block if that's where the next word is
+                * and we need the next word.
+                */
+               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+                       /*
+                        * Log the changed part of this block.
+                        * Get the next one.
+                        */
+                       xfs_trans_log_buf(tp, bp,
+                               (uint)((char *)first - (char *)bufp),
+                               (uint)((char *)b - (char *)bufp));
+                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+                       if (error) {
+                               return error;
+                       }
+                       first = b = bufp = bp->b_addr;
+                       word = 0;
+               } else {
+                       /*
+                        * Go on to the next word in the buffer
+                        */
+                       b++;
+               }
+       } else {
+               /*
+                * Starting on a word boundary, no partial word.
+                */
+               i = 0;
+       }
+       /*
+        * Loop over whole words in buffers.  When we use up one buffer
+        * we move on to the next one.
+        */
+       while (len - i >= XFS_NBWORD) {
+               /*
+                * Set the word value correctly.
+                */
+               *b = val;
+               i += XFS_NBWORD;
+               /*
+                * Go on to the next block if that's where the next word is
+                * and we need the next word.
+                */
+               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+                       /*
+                        * Log the changed part of this block.
+                        * Get the next one.
+                        */
+                       xfs_trans_log_buf(tp, bp,
+                               (uint)((char *)first - (char *)bufp),
+                               (uint)((char *)b - (char *)bufp));
+                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+                       if (error) {
+                               return error;
+                       }
+                       first = b = bufp = bp->b_addr;
+                       word = 0;
+               } else {
+                       /*
+                        * Go on to the next word in the buffer
+                        */
+                       b++;
+               }
+       }
+       /*
+        * If not ending on a word boundary, deal with the last
+        * (partial) word.
+        */
+       if ((lastbit = len - i)) {
+               /*
+                * Compute a mask of relevant bits.
+                */
+               bit = 0;
+               mask = ((xfs_rtword_t)1 << lastbit) - 1;
+               /*
+                * Set/clear the active bits.
+                */
+               if (val)
+                       *b |= mask;
+               else
+                       *b &= ~mask;
+               b++;
+       }
+       /*
+        * Log any remaining changed bytes.
+        */
+       if (b > first)
+               xfs_trans_log_buf(tp, bp, (uint)((char *)first - (char *)bufp),
+                       (uint)((char *)b - (char *)bufp - 1));
+       return 0;
+}
+
+/*
+ * Mark an extent specified by start and len freed.
+ * Updates all the summary information as well as the bitmap.
+ */
+int
+xfs_rtfree_range(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_rtblock_t   start,          /* starting block to free */
+       xfs_extlen_t    len,            /* length to free */
+       xfs_buf_t       **rbpp,         /* in/out: summary block buffer */
+       xfs_fsblock_t   *rsb)           /* in/out: summary block number */
+{
+       xfs_rtblock_t   end;            /* end of the freed extent */
+       int             error;          /* error value */
+       xfs_rtblock_t   postblock;      /* first block freed > end */
+       xfs_rtblock_t   preblock;       /* first block freed < start */
+
+       end = start + len - 1;
+       /*
+        * Modify the bitmap to mark this extent freed.
+        */
+       error = xfs_rtmodify_range(mp, tp, start, len, 1);
+       if (error) {
+               return error;
+       }
+       /*
+        * Assume we're freeing out of the middle of an allocated extent.
+        * We need to find the beginning and end of the extent so we can
+        * properly update the summary.
+        */
+       error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
+       if (error) {
+               return error;
+       }
+       /*
+        * Find the next allocated block (end of allocated extent).
+        */
+       error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
+               &postblock);
+       if (error)
+               return error;
+       /*
+        * If there are blocks not being freed at the front of the
+        * old extent, add summary data for them to be allocated.
+        */
+       if (preblock < start) {
+               error = xfs_rtmodify_summary(mp, tp,
+                       XFS_RTBLOCKLOG(start - preblock),
+                       XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
+               if (error) {
+                       return error;
+               }
+       }
+       /*
+        * If there are blocks not being freed at the end of the
+        * old extent, add summary data for them to be allocated.
+        */
+       if (postblock > end) {
+               error = xfs_rtmodify_summary(mp, tp,
+                       XFS_RTBLOCKLOG(postblock - end),
+                       XFS_BITTOBLOCK(mp, end + 1), -1, rbpp, rsb);
+               if (error) {
+                       return error;
+               }
+       }
+       /*
+        * Increment the summary information corresponding to the entire
+        * (new) free extent.
+        */
+       error = xfs_rtmodify_summary(mp, tp,
+               XFS_RTBLOCKLOG(postblock + 1 - preblock),
+               XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
+       return error;
+}
+
+/*
+ * Check that the given range is either all allocated (val = 0) or
+ * all free (val = 1).
+ */
+int
+xfs_rtcheck_range(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_rtblock_t   start,          /* starting block number of extent */
+       xfs_extlen_t    len,            /* length of extent */
+       int             val,            /* 1 for free, 0 for allocated */
+       xfs_rtblock_t   *new,           /* out: first block not matching */
+       int             *stat)          /* out: 1 for matches, 0 for not */
+{
+       xfs_rtword_t    *b;             /* current word in buffer */
+       int             bit;            /* bit number in the word */
+       xfs_rtblock_t   block;          /* bitmap block number */
+       xfs_buf_t       *bp;            /* buf for the block */
+       xfs_rtword_t    *bufp;          /* starting word in buffer */
+       int             error;          /* error value */
+       xfs_rtblock_t   i;              /* current bit number rel. to start */
+       xfs_rtblock_t   lastbit;        /* last useful bit in word */
+       xfs_rtword_t    mask;           /* mask of relevant bits for value */
+       xfs_rtword_t    wdiff;          /* difference from wanted value */
+       int             word;           /* word number in the buffer */
+
+       /*
+        * Compute starting bitmap block number
+        */
+       block = XFS_BITTOBLOCK(mp, start);
+       /*
+        * Read the bitmap block.
+        */
+       error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
+       if (error) {
+               return error;
+       }
+       bufp = bp->b_addr;
+       /*
+        * Compute the starting word's address, and starting bit.
+        */
+       word = XFS_BITTOWORD(mp, start);
+       b = &bufp[word];
+       bit = (int)(start & (XFS_NBWORD - 1));
+       /*
+        * 0 (allocated) => all zero's; 1 (free) => all one's.
+        */
+       val = -val;
+       /*
+        * If not starting on a word boundary, deal with the first
+        * (partial) word.
+        */
+       if (bit) {
+               /*
+                * Compute first bit not examined.
+                */
+               lastbit = XFS_RTMIN(bit + len, XFS_NBWORD);
+               /*
+                * Mask of relevant bits.
+                */
+               mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit;
+               /*
+                * Compute difference between actual and desired value.
+                */
+               if ((wdiff = (*b ^ val) & mask)) {
+                       /*
+                        * Different, compute first wrong bit and return.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       i = XFS_RTLOBIT(wdiff) - bit;
+                       *new = start + i;
+                       *stat = 0;
+                       return 0;
+               }
+               i = lastbit - bit;
+               /*
+                * Go on to next block if that's where the next word is
+                * and we need the next word.
+                */
+               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+                       /*
+                        * If done with this block, get the next one.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+                       if (error) {
+                               return error;
+                       }
+                       b = bufp = bp->b_addr;
+                       word = 0;
+               } else {
+                       /*
+                        * Go on to the next word in the buffer.
+                        */
+                       b++;
+               }
+       } else {
+               /*
+                * Starting on a word boundary, no partial word.
+                */
+               i = 0;
+       }
+       /*
+        * Loop over whole words in buffers.  When we use up one buffer
+        * we move on to the next one.
+        */
+       while (len - i >= XFS_NBWORD) {
+               /*
+                * Compute difference between actual and desired value.
+                */
+               if ((wdiff = *b ^ val)) {
+                       /*
+                        * Different, compute first wrong bit and return.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       i += XFS_RTLOBIT(wdiff);
+                       *new = start + i;
+                       *stat = 0;
+                       return 0;
+               }
+               i += XFS_NBWORD;
+               /*
+                * Go on to next block if that's where the next word is
+                * and we need the next word.
+                */
+               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+                       /*
+                        * If done with this block, get the next one.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
+                       if (error) {
+                               return error;
+                       }
+                       b = bufp = bp->b_addr;
+                       word = 0;
+               } else {
+                       /*
+                        * Go on to the next word in the buffer.
+                        */
+                       b++;
+               }
+       }
+       /*
+        * If not ending on a word boundary, deal with the last
+        * (partial) word.
+        */
+       if ((lastbit = len - i)) {
+               /*
+                * Mask of relevant bits.
+                */
+               mask = ((xfs_rtword_t)1 << lastbit) - 1;
+               /*
+                * Compute difference between actual and desired value.
+                */
+               if ((wdiff = (*b ^ val) & mask)) {
+                       /*
+                        * Different, compute first wrong bit and return.
+                        */
+                       xfs_trans_brelse(tp, bp);
+                       i += XFS_RTLOBIT(wdiff);
+                       *new = start + i;
+                       *stat = 0;
+                       return 0;
+               } else
+                       i = len;
+       }
+       /*
+        * Successful, return.
+        */
+       xfs_trans_brelse(tp, bp);
+       *new = start + i;
+       *stat = 1;
+       return 0;
+}
+
+#ifdef DEBUG
+/*
+ * Check that the given extent (block range) is allocated already.
+ */
+STATIC int                             /* error */
+xfs_rtcheck_alloc_range(
+       xfs_mount_t     *mp,            /* file system mount point */
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_rtblock_t   bno,            /* starting block number of extent */
+       xfs_extlen_t    len)            /* length of extent */
+{
+       xfs_rtblock_t   new;            /* dummy for xfs_rtcheck_range */
+       int             stat;
+       int             error;
+
+       error = xfs_rtcheck_range(mp, tp, bno, len, 0, &new, &stat);
+       if (error)
+               return error;
+       ASSERT(stat);
+       return 0;
+}
+#else
+#define xfs_rtcheck_alloc_range(m,t,b,l)       (0)
+#endif
+/*
+ * Free an extent in the realtime subvolume.  Length is expressed in
+ * realtime extents, as is the block number.
+ */
+int                                    /* error */
+xfs_rtfree_extent(
+       xfs_trans_t     *tp,            /* transaction pointer */
+       xfs_rtblock_t   bno,            /* starting block number to free */
+       xfs_extlen_t    len)            /* length of extent freed */
+{
+       int             error;          /* error value */
+       xfs_mount_t     *mp;            /* file system mount structure */
+       xfs_fsblock_t   sb;             /* summary file block number */
+       xfs_buf_t       *sumbp = NULL;  /* summary file block buffer */
+
+       mp = tp->t_mountp;
+
+       ASSERT(mp->m_rbmip->i_itemp != NULL);
+       ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
+
+       error = xfs_rtcheck_alloc_range(mp, tp, bno, len);
+       if (error)
+               return error;
+
+       /*
+        * Free the range of realtime blocks.
+        */
+       error = xfs_rtfree_range(mp, tp, bno, len, &sumbp, &sb);
+       if (error) {
+               return error;
+       }
+       /*
+        * Mark more blocks free in the superblock.
+        */
+       xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, (long)len);
+       /*
+        * If we've now freed all the blocks, reset the file sequence
+        * number to 0.
+        */
+       if (tp->t_frextents_delta + mp->m_sb.sb_frextents ==
+           mp->m_sb.sb_rextents) {
+               if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM))
+                       mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
+               *(__uint64_t *)&mp->m_rbmip->i_d.di_atime = 0;
+               xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
+       }
+       return 0;
+}
+
index a5b59d92eb7095cca4039dbfb2ba5c398ea8fbe2..05b5493d2baa0ab44d86e54b84f678a209801020 100644 (file)
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_inum.h"
-#include "xfs_trans.h"
-#include "xfs_trans_priv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
-#include "xfs_dir2.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_btree.h"
 #include "xfs_ialloc.h"
 #include "xfs_alloc.h"
-#include "xfs_rtalloc.h"
-#include "xfs_bmap.h"
 #include "xfs_error.h"
-#include "xfs_quota.h"
-#include "xfs_fsops.h"
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
+#include "xfs_dinode.h"
+#include "xfs_bmap_btree.h"
+#include "xfs_alloc_btree.h"
+#include "xfs_ialloc_btree.h"
 
 /*
  * Physical superblock buffer manipulations. Shared with libxfs in userspace.
@@ -249,13 +240,13 @@ xfs_mount_validate_sb(
        if (xfs_sb_version_has_pquotino(sbp)) {
                if (sbp->sb_qflags & (XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD)) {
                        xfs_notice(mp,
-                          "Version 5 of Super block has XFS_OQUOTA bits.\n");
+                          "Version 5 of Super block has XFS_OQUOTA bits.");
                        return XFS_ERROR(EFSCORRUPTED);
                }
        } else if (sbp->sb_qflags & (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD |
                                XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) {
                        xfs_notice(mp,
-"Superblock earlier than Version 5 has XFS_[PQ]UOTA_{ENFD|CHKD} bits.\n");
+"Superblock earlier than Version 5 has XFS_[PQ]UOTA_{ENFD|CHKD} bits.");
                        return XFS_ERROR(EFSCORRUPTED);
        }
 
@@ -624,8 +615,9 @@ xfs_sb_read_verify(
 
 out_error:
        if (error) {
-               XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
-                                    mp, bp->b_addr);
+               if (error != EWRONGFS)
+                       XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
+                                            mp, bp->b_addr);
                xfs_buf_ioerror(bp, error);
        }
 }
index 6835b44f850e58e780c2712e24530dbb3e57f5f4..35061d4b614c7ab9fabb80e1b93ffb6bc8b586d2 100644 (file)
@@ -699,7 +699,4 @@ extern void xfs_sb_from_disk(struct xfs_sb *, struct xfs_dsb *);
 extern void    xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t);
 extern void    xfs_sb_quota_from_disk(struct xfs_sb *sbp);
 
-extern const struct xfs_buf_ops xfs_sb_buf_ops;
-extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops;
-
 #endif /* __XFS_SB_H__ */
diff --git a/fs/xfs/xfs_shared.h b/fs/xfs/xfs_shared.h
new file mode 100644 (file)
index 0000000..8c5035a
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2013 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#ifndef __XFS_SHARED_H__
+#define __XFS_SHARED_H__
+
+/*
+ * Definitions shared between kernel and userspace that don't fit into any other
+ * header file that is shared with userspace.
+ */
+struct xfs_ifork;
+struct xfs_buf;
+struct xfs_buf_ops;
+struct xfs_mount;
+struct xfs_trans;
+struct xfs_inode;
+
+/*
+ * Buffer verifier operations are widely used, including userspace tools
+ */
+extern const struct xfs_buf_ops xfs_agf_buf_ops;
+extern const struct xfs_buf_ops xfs_agi_buf_ops;
+extern const struct xfs_buf_ops xfs_agf_buf_ops;
+extern const struct xfs_buf_ops xfs_agfl_buf_ops;
+extern const struct xfs_buf_ops xfs_allocbt_buf_ops;
+extern const struct xfs_buf_ops xfs_attr3_leaf_buf_ops;
+extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops;
+extern const struct xfs_buf_ops xfs_bmbt_buf_ops;
+extern const struct xfs_buf_ops xfs_da3_node_buf_ops;
+extern const struct xfs_buf_ops xfs_dquot_buf_ops;
+extern const struct xfs_buf_ops xfs_symlink_buf_ops;
+extern const struct xfs_buf_ops xfs_agi_buf_ops;
+extern const struct xfs_buf_ops xfs_inobt_buf_ops;
+extern const struct xfs_buf_ops xfs_inode_buf_ops;
+extern const struct xfs_buf_ops xfs_inode_buf_ra_ops;
+extern const struct xfs_buf_ops xfs_dquot_buf_ops;
+extern const struct xfs_buf_ops xfs_sb_buf_ops;
+extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops;
+extern const struct xfs_buf_ops xfs_symlink_buf_ops;
+
+/*
+ * Transaction types.  Used to distinguish types of buffers. These never reach
+ * the log.
+ */
+#define XFS_TRANS_SETATTR_NOT_SIZE     1
+#define XFS_TRANS_SETATTR_SIZE         2
+#define XFS_TRANS_INACTIVE             3
+#define XFS_TRANS_CREATE               4
+#define XFS_TRANS_CREATE_TRUNC         5
+#define XFS_TRANS_TRUNCATE_FILE                6
+#define XFS_TRANS_REMOVE               7
+#define XFS_TRANS_LINK                 8
+#define XFS_TRANS_RENAME               9
+#define XFS_TRANS_MKDIR                        10
+#define XFS_TRANS_RMDIR                        11
+#define XFS_TRANS_SYMLINK              12
+#define XFS_TRANS_SET_DMATTRS          13
+#define XFS_TRANS_GROWFS               14
+#define XFS_TRANS_STRAT_WRITE          15
+#define XFS_TRANS_DIOSTRAT             16
+/* 17 was XFS_TRANS_WRITE_SYNC */
+#define        XFS_TRANS_WRITEID               18
+#define        XFS_TRANS_ADDAFORK              19
+#define        XFS_TRANS_ATTRINVAL             20
+#define        XFS_TRANS_ATRUNCATE             21
+#define        XFS_TRANS_ATTR_SET              22
+#define        XFS_TRANS_ATTR_RM               23
+#define        XFS_TRANS_ATTR_FLAG             24
+#define        XFS_TRANS_CLEAR_AGI_BUCKET      25
+#define XFS_TRANS_QM_SBCHANGE          26
+/*
+ * Dummy entries since we use the transaction type to index into the
+ * trans_type[] in xlog_recover_print_trans_head()
+ */
+#define XFS_TRANS_DUMMY1               27
+#define XFS_TRANS_DUMMY2               28
+#define XFS_TRANS_QM_QUOTAOFF          29
+#define XFS_TRANS_QM_DQALLOC           30
+#define XFS_TRANS_QM_SETQLIM           31
+#define XFS_TRANS_QM_DQCLUSTER         32
+#define XFS_TRANS_QM_QINOCREATE                33
+#define XFS_TRANS_QM_QUOTAOFF_END      34
+#define XFS_TRANS_SB_UNIT              35
+#define XFS_TRANS_FSYNC_TS             36
+#define        XFS_TRANS_GROWFSRT_ALLOC        37
+#define        XFS_TRANS_GROWFSRT_ZERO         38
+#define        XFS_TRANS_GROWFSRT_FREE         39
+#define        XFS_TRANS_SWAPEXT               40
+#define        XFS_TRANS_SB_COUNT              41
+#define        XFS_TRANS_CHECKPOINT            42
+#define        XFS_TRANS_ICREATE               43
+#define        XFS_TRANS_TYPE_MAX              43
+/* new transaction types need to be reflected in xfs_logprint(8) */
+
+#define XFS_TRANS_TYPES \
+       { XFS_TRANS_SETATTR_NOT_SIZE,   "SETATTR_NOT_SIZE" }, \
+       { XFS_TRANS_SETATTR_SIZE,       "SETATTR_SIZE" }, \
+       { XFS_TRANS_INACTIVE,           "INACTIVE" }, \
+       { XFS_TRANS_CREATE,             "CREATE" }, \
+       { XFS_TRANS_CREATE_TRUNC,       "CREATE_TRUNC" }, \
+       { XFS_TRANS_TRUNCATE_FILE,      "TRUNCATE_FILE" }, \
+       { XFS_TRANS_REMOVE,             "REMOVE" }, \
+       { XFS_TRANS_LINK,               "LINK" }, \
+       { XFS_TRANS_RENAME,             "RENAME" }, \
+       { XFS_TRANS_MKDIR,              "MKDIR" }, \
+       { XFS_TRANS_RMDIR,              "RMDIR" }, \
+       { XFS_TRANS_SYMLINK,            "SYMLINK" }, \
+       { XFS_TRANS_SET_DMATTRS,        "SET_DMATTRS" }, \
+       { XFS_TRANS_GROWFS,             "GROWFS" }, \
+       { XFS_TRANS_STRAT_WRITE,        "STRAT_WRITE" }, \
+       { XFS_TRANS_DIOSTRAT,           "DIOSTRAT" }, \
+       { XFS_TRANS_WRITEID,            "WRITEID" }, \
+       { XFS_TRANS_ADDAFORK,           "ADDAFORK" }, \
+       { XFS_TRANS_ATTRINVAL,          "ATTRINVAL" }, \
+       { XFS_TRANS_ATRUNCATE,          "ATRUNCATE" }, \
+       { XFS_TRANS_ATTR_SET,           "ATTR_SET" }, \
+       { XFS_TRANS_ATTR_RM,            "ATTR_RM" }, \
+       { XFS_TRANS_ATTR_FLAG,          "ATTR_FLAG" }, \
+       { XFS_TRANS_CLEAR_AGI_BUCKET,   "CLEAR_AGI_BUCKET" }, \
+       { XFS_TRANS_QM_SBCHANGE,        "QM_SBCHANGE" }, \
+       { XFS_TRANS_QM_QUOTAOFF,        "QM_QUOTAOFF" }, \
+       { XFS_TRANS_QM_DQALLOC,         "QM_DQALLOC" }, \
+       { XFS_TRANS_QM_SETQLIM,         "QM_SETQLIM" }, \
+       { XFS_TRANS_QM_DQCLUSTER,       "QM_DQCLUSTER" }, \
+       { XFS_TRANS_QM_QINOCREATE,      "QM_QINOCREATE" }, \
+       { XFS_TRANS_QM_QUOTAOFF_END,    "QM_QOFF_END" }, \
+       { XFS_TRANS_SB_UNIT,            "SB_UNIT" }, \
+       { XFS_TRANS_FSYNC_TS,           "FSYNC_TS" }, \
+       { XFS_TRANS_GROWFSRT_ALLOC,     "GROWFSRT_ALLOC" }, \
+       { XFS_TRANS_GROWFSRT_ZERO,      "GROWFSRT_ZERO" }, \
+       { XFS_TRANS_GROWFSRT_FREE,      "GROWFSRT_FREE" }, \
+       { XFS_TRANS_SWAPEXT,            "SWAPEXT" }, \
+       { XFS_TRANS_SB_COUNT,           "SB_COUNT" }, \
+       { XFS_TRANS_CHECKPOINT,         "CHECKPOINT" }, \
+       { XFS_TRANS_DUMMY1,             "DUMMY1" }, \
+       { XFS_TRANS_DUMMY2,             "DUMMY2" }, \
+       { XLOG_UNMOUNT_REC_TYPE,        "UNMOUNT" }
+
+/*
+ * This structure is used to track log items associated with
+ * a transaction.  It points to the log item and keeps some
+ * flags to track the state of the log item.  It also tracks
+ * the amount of space needed to log the item it describes
+ * once we get to commit processing (see xfs_trans_commit()).
+ */
+struct xfs_log_item_desc {
+       struct xfs_log_item     *lid_item;
+       struct list_head        lid_trans;
+       unsigned char           lid_flags;
+};
+
+#define XFS_LID_DIRTY          0x1
+
+/* log size calculation functions */
+int    xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes);
+int    xfs_log_calc_minimum_size(struct xfs_mount *);
+
+
+/*
+ * Values for t_flags.
+ */
+#define        XFS_TRANS_DIRTY         0x01    /* something needs to be logged */
+#define        XFS_TRANS_SB_DIRTY      0x02    /* superblock is modified */
+#define        XFS_TRANS_PERM_LOG_RES  0x04    /* xact took a permanent log res */
+#define        XFS_TRANS_SYNC          0x08    /* make commit synchronous */
+#define XFS_TRANS_DQ_DIRTY     0x10    /* at least one dquot in trx dirty */
+#define XFS_TRANS_RESERVE      0x20    /* OK to use reserved data blocks */
+#define XFS_TRANS_FREEZE_PROT  0x40    /* Transaction has elevated writer
+                                          count in superblock */
+/*
+ * Values for call flags parameter.
+ */
+#define        XFS_TRANS_RELEASE_LOG_RES       0x4
+#define        XFS_TRANS_ABORT                 0x8
+
+/*
+ * Field values for xfs_trans_mod_sb.
+ */
+#define        XFS_TRANS_SB_ICOUNT             0x00000001
+#define        XFS_TRANS_SB_IFREE              0x00000002
+#define        XFS_TRANS_SB_FDBLOCKS           0x00000004
+#define        XFS_TRANS_SB_RES_FDBLOCKS       0x00000008
+#define        XFS_TRANS_SB_FREXTENTS          0x00000010
+#define        XFS_TRANS_SB_RES_FREXTENTS      0x00000020
+#define        XFS_TRANS_SB_DBLOCKS            0x00000040
+#define        XFS_TRANS_SB_AGCOUNT            0x00000080
+#define        XFS_TRANS_SB_IMAXPCT            0x00000100
+#define        XFS_TRANS_SB_REXTSIZE           0x00000200
+#define        XFS_TRANS_SB_RBMBLOCKS          0x00000400
+#define        XFS_TRANS_SB_RBLOCKS            0x00000800
+#define        XFS_TRANS_SB_REXTENTS           0x00001000
+#define        XFS_TRANS_SB_REXTSLOG           0x00002000
+
+/*
+ * Here we centralize the specification of XFS meta-data buffer reference count
+ * values.  This determines how hard the buffer cache tries to hold onto the
+ * buffer.
+ */
+#define        XFS_AGF_REF             4
+#define        XFS_AGI_REF             4
+#define        XFS_AGFL_REF            3
+#define        XFS_INO_BTREE_REF       3
+#define        XFS_ALLOC_BTREE_REF     2
+#define        XFS_BMAP_BTREE_REF      2
+#define        XFS_DIR_BTREE_REF       2
+#define        XFS_INO_REF             2
+#define        XFS_ATTR_BTREE_REF      1
+#define        XFS_DQUOT_REF           1
+
+/*
+ * Flags for xfs_trans_ichgtime().
+ */
+#define        XFS_ICHGTIME_MOD        0x1     /* data fork modification timestamp */
+#define        XFS_ICHGTIME_CHG        0x2     /* inode field change timestamp */
+#define        XFS_ICHGTIME_CREATE     0x4     /* inode create timestamp */
+
+
+/*
+ * Symlink decoding/encoding functions
+ */
+int xfs_symlink_blocks(struct xfs_mount *mp, int pathlen);
+int xfs_symlink_hdr_set(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset,
+                       uint32_t size, struct xfs_buf *bp);
+bool xfs_symlink_hdr_ok(struct xfs_mount *mp, xfs_ino_t ino, uint32_t offset,
+                       uint32_t size, struct xfs_buf *bp);
+void xfs_symlink_local_to_remote(struct xfs_trans *tp, struct xfs_buf *bp,
+                                struct xfs_inode *ip, struct xfs_ifork *ifp);
+
+#endif /* __XFS_SHARED_H__ */
index 15188cc9944919e275e43e4978056efa91801343..4eb63ad87d7dbe88c59971222a7aa82697231a0d 100644 (file)
  */
 
 #include "xfs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_inum.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
-#include "xfs_ialloc.h"
 #include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
+#include "xfs_alloc.h"
 #include "xfs_error.h"
-#include "xfs_itable.h"
 #include "xfs_fsops.h"
-#include "xfs_attr.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
+#include "xfs_log.h"
 #include "xfs_log_priv.h"
-#include "xfs_trans_priv.h"
-#include "xfs_filestream.h"
 #include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
 #include "xfs_dir2.h"
 #include "xfs_extfree_item.h"
 #include "xfs_mru_cache.h"
@@ -52,6 +44,9 @@
 #include "xfs_icache.h"
 #include "xfs_trace.h"
 #include "xfs_icreate_item.h"
+#include "xfs_dinode.h"
+#include "xfs_filestream.h"
+#include "xfs_quota.h"
 
 #include <linux/namei.h>
 #include <linux/init.h>
@@ -946,10 +941,6 @@ xfs_fs_destroy_inode(
 
        XFS_STATS_INC(vn_reclaim);
 
-       /* bad inode, get out here ASAP */
-       if (is_bad_inode(inode))
-               goto out_reclaim;
-
        ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
 
        /*
@@ -965,7 +956,6 @@ xfs_fs_destroy_inode(
         * this more efficiently than we can here, so simply let background
         * reclaim tear down all inodes.
         */
-out_reclaim:
        xfs_inode_set_reclaim_tag(ip);
 }
 
@@ -1246,7 +1236,7 @@ xfs_fs_remount(
                         */
 #if 0
                        xfs_info(mp,
-               "mount option \"%s\" not supported for remount\n", p);
+               "mount option \"%s\" not supported for remount", p);
                        return -EINVAL;
 #else
                        break;
@@ -1491,10 +1481,6 @@ xfs_fs_fill_super(
                error = ENOENT;
                goto out_unmount;
        }
-       if (is_bad_inode(root)) {
-               error = EINVAL;
-               goto out_unmount;
-       }
        sb->s_root = d_make_root(root);
        if (!sb->s_root) {
                error = ENOMEM;
index f622a97a7e3383d287d85a3787c2feecc8a36b3f..14e58f2c96bd71708f1c608536b835b25f05e795 100644 (file)
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include "xfs.h"
+#include "xfs_shared.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_bit.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_da_btree.h"
-#include "xfs_dir2_format.h"
+#include "xfs_da_format.h"
 #include "xfs_dir2.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
 #include "xfs_ialloc.h"
 #include "xfs_alloc.h"
 #include "xfs_bmap.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_bmap_util.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
 #include "xfs_trans_space.h"
 #include "xfs_trace.h"
 #include "xfs_symlink.h"
-#include "xfs_buf_item.h"
+#include "xfs_trans.h"
+#include "xfs_log.h"
+#include "xfs_dinode.h"
 
 /* ----- Kernel only functions below ----- */
 STATIC int
@@ -424,8 +424,7 @@ xfs_symlink(
  */
 STATIC int
 xfs_inactive_symlink_rmt(
-       xfs_inode_t     *ip,
-       xfs_trans_t     **tpp)
+       struct xfs_inode *ip)
 {
        xfs_buf_t       *bp;
        int             committed;
@@ -437,11 +436,9 @@ xfs_inactive_symlink_rmt(
        xfs_mount_t     *mp;
        xfs_bmbt_irec_t mval[XFS_SYMLINK_MAPS];
        int             nmaps;
-       xfs_trans_t     *ntp;
        int             size;
        xfs_trans_t     *tp;
 
-       tp = *tpp;
        mp = ip->i_mount;
        ASSERT(ip->i_df.if_flags & XFS_IFEXTENTS);
        /*
@@ -453,6 +450,16 @@ xfs_inactive_symlink_rmt(
         */
        ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2);
 
+       tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               return error;
+       }
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, 0);
+
        /*
         * Lock the inode, fix the size, and join it to the transaction.
         * Hold it so in the normal path, we still have it locked for
@@ -471,7 +478,7 @@ xfs_inactive_symlink_rmt(
        error = xfs_bmapi_read(ip, 0, xfs_symlink_blocks(mp, size),
                                mval, &nmaps, 0);
        if (error)
-               goto error0;
+               goto error_trans_cancel;
        /*
         * Invalidate the block(s). No validation is done.
         */
@@ -481,22 +488,24 @@ xfs_inactive_symlink_rmt(
                        XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0);
                if (!bp) {
                        error = ENOMEM;
-                       goto error1;
+                       goto error_bmap_cancel;
                }
                xfs_trans_binval(tp, bp);
        }
        /*
         * Unmap the dead block(s) to the free_list.
         */
-       if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps,
-                       &first_block, &free_list, &done)))
-               goto error1;
+       error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps,
+                           &first_block, &free_list, &done);
+       if (error)
+               goto error_bmap_cancel;
        ASSERT(done);
        /*
         * Commit the first transaction.  This logs the EFI and the inode.
         */
-       if ((error = xfs_bmap_finish(&tp, &free_list, &committed)))
-               goto error1;
+       error = xfs_bmap_finish(&tp, &free_list, &committed);
+       if (error)
+               goto error_bmap_cancel;
        /*
         * The transaction must have been committed, since there were
         * actually extents freed by xfs_bunmapi.  See xfs_bmap_finish.
@@ -510,27 +519,14 @@ xfs_inactive_symlink_rmt(
         */
        xfs_trans_ijoin(tp, ip, 0);
        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-       /*
-        * Get a new, empty transaction to return to our caller.
-        */
-       ntp = xfs_trans_dup(tp);
        /*
         * Commit the transaction containing extent freeing and EFDs.
-        * If we get an error on the commit here or on the reserve below,
-        * we need to unlock the inode since the new transaction doesn't
-        * have the inode attached.
         */
-       error = xfs_trans_commit(tp, 0);
-       tp = ntp;
+       error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
        if (error) {
                ASSERT(XFS_FORCED_SHUTDOWN(mp));
-               goto error0;
+               goto error_unlock;
        }
-       /*
-        * transaction commit worked ok so we can drop the extra ticket
-        * reference that we gained in xfs_trans_dup()
-        */
-       xfs_log_ticket_put(tp->t_ticket);
 
        /*
         * Remove the memory for extent descriptions (just bookkeeping).
@@ -538,23 +534,16 @@ xfs_inactive_symlink_rmt(
        if (ip->i_df.if_bytes)
                xfs_idata_realloc(ip, -ip->i_df.if_bytes, XFS_DATA_FORK);
        ASSERT(ip->i_df.if_bytes == 0);
-       /*
-        * Put an itruncate log reservation in the new transaction
-        * for our caller.
-        */
-       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
-       if (error) {
-               ASSERT(XFS_FORCED_SHUTDOWN(mp));
-               goto error0;
-       }
 
-       xfs_trans_ijoin(tp, ip, 0);
-       *tpp = tp;
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
        return 0;
 
- error1:
+error_bmap_cancel:
        xfs_bmap_cancel(&free_list);
- error0:
+error_trans_cancel:
+       xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
+error_unlock:
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
        return error;
 }
 
@@ -563,41 +552,46 @@ xfs_inactive_symlink_rmt(
  */
 int
 xfs_inactive_symlink(
-       struct xfs_inode        *ip,
-       struct xfs_trans        **tp)
+       struct xfs_inode        *ip)
 {
        struct xfs_mount        *mp = ip->i_mount;
        int                     pathlen;
 
        trace_xfs_inactive_symlink(ip);
 
-       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
-
        if (XFS_FORCED_SHUTDOWN(mp))
                return XFS_ERROR(EIO);
 
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+
        /*
         * Zero length symlinks _can_ exist.
         */
        pathlen = (int)ip->i_d.di_size;
-       if (!pathlen)
+       if (!pathlen) {
+               xfs_iunlock(ip, XFS_ILOCK_EXCL);
                return 0;
+       }
 
        if (pathlen < 0 || pathlen > MAXPATHLEN) {
                xfs_alert(mp, "%s: inode (0x%llx) bad symlink length (%d)",
                         __func__, (unsigned long long)ip->i_ino, pathlen);
+               xfs_iunlock(ip, XFS_ILOCK_EXCL);
                ASSERT(0);
                return XFS_ERROR(EFSCORRUPTED);
        }
 
        if (ip->i_df.if_flags & XFS_IFINLINE) {
-               if (ip->i_df.if_bytes > 0)
+               if (ip->i_df.if_bytes > 0) 
                        xfs_idata_realloc(ip, -(ip->i_df.if_bytes),
                                          XFS_DATA_FORK);
+               xfs_iunlock(ip, XFS_ILOCK_EXCL);
                ASSERT(ip->i_df.if_bytes == 0);
                return 0;
        }
 
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
        /* remove the remote symlink */
-       return xfs_inactive_symlink_rmt(ip, tp);
+       return xfs_inactive_symlink_rmt(ip);
 }
index 99338ba666ac68c11350fb1b24906364c7a6de01..e75245d0911611aeca2e78dfeae633c03bd34c3e 100644 (file)
@@ -22,6 +22,6 @@
 int xfs_symlink(struct xfs_inode *dp, struct xfs_name *link_name,
                const char *target_path, umode_t mode, struct xfs_inode **ipp);
 int xfs_readlink(struct xfs_inode *ip, char *link);
-int xfs_inactive_symlink(struct xfs_inode *ip, struct xfs_trans **tpp);
+int xfs_inactive_symlink(struct xfs_inode *ip);
 
 #endif /* __XFS_SYMLINK_H */
index 01c85e3f64703e6b7a5c170c9e21421fe36d4e4e..bf59a2b45f8c40c431de3e8f52f3131d80d68a1c 100644 (file)
@@ -19,8 +19,9 @@
 #include "xfs.h"
 #include "xfs_fs.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_shared.h"
+#include "xfs_trans_resv.h"
 #include "xfs_ag.h"
 #include "xfs_sb.h"
 #include "xfs_mount.h"
@@ -30,6 +31,7 @@
 #include "xfs_trace.h"
 #include "xfs_symlink.h"
 #include "xfs_cksum.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 
 
index 5d7b3e40705ffe4a96c75493d09ac74d9ae265cd..dee3279c095e6a32dcf460ca58acda0aa924bb2d 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
 #include "xfs_btree.h"
-#include "xfs_mount.h"
 #include "xfs_da_btree.h"
 #include "xfs_ialloc.h"
 #include "xfs_itable.h"
@@ -37,6 +34,8 @@
 #include "xfs_bmap.h"
 #include "xfs_attr.h"
 #include "xfs_attr_leaf.h"
+#include "xfs_trans.h"
+#include "xfs_log.h"
 #include "xfs_log_priv.h"
 #include "xfs_buf_item.h"
 #include "xfs_quota.h"
@@ -46,6 +45,7 @@
 #include "xfs_dquot.h"
 #include "xfs_log_recover.h"
 #include "xfs_inode_item.h"
+#include "xfs_bmap_btree.h"
 
 /*
  * We include this last to have the helpers above available for the trace
index 5411e01ab4527318b187846fa3e7a53ad6300eb3..c812c5c060de1caa7532f1cdcc63766a4a227207 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_error.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_btree.h"
-#include "xfs_ialloc.h"
-#include "xfs_alloc.h"
 #include "xfs_extent_busy.h"
-#include "xfs_bmap.h"
 #include "xfs_quota.h"
-#include "xfs_qm.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
-#include "xfs_trans_space.h"
-#include "xfs_inode_item.h"
-#include "xfs_log_priv.h"
-#include "xfs_buf_item.h"
+#include "xfs_log.h"
 #include "xfs_trace.h"
+#include "xfs_error.h"
 
 kmem_zone_t    *xfs_trans_zone;
 kmem_zone_t    *xfs_log_item_desc_zone;
index 09cf40b89e8c1d85817cc649b22a12c3ba97aa78..9b96d35e483de4cb075170aef47db2a1f06a307e 100644 (file)
 #ifndef        __XFS_TRANS_H__
 #define        __XFS_TRANS_H__
 
-struct xfs_log_item;
-
-#include "xfs_trans_resv.h"
-
 /* kernel only transaction subsystem defines */
 
 struct xfs_buf;
@@ -77,6 +73,9 @@ struct xfs_item_ops {
        void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
 };
 
+void   xfs_log_item_init(struct xfs_mount *mp, struct xfs_log_item *item,
+                         int type, const struct xfs_item_ops *ops);
+
 /*
  * Return values for the iop_push() routines.
  */
@@ -85,18 +84,12 @@ struct xfs_item_ops {
 #define XFS_ITEM_LOCKED                2
 #define XFS_ITEM_FLUSHING      3
 
-/*
- * This is the type of function which can be given to xfs_trans_callback()
- * to be called upon the transaction's commit to disk.
- */
-typedef void (*xfs_trans_callback_t)(struct xfs_trans *, void *);
 
 /*
  * This is the structure maintained for every active transaction.
  */
 typedef struct xfs_trans {
        unsigned int            t_magic;        /* magic number */
-       xfs_log_callback_t      t_logcb;        /* log callback struct */
        unsigned int            t_type;         /* transaction type */
        unsigned int            t_log_res;      /* amt of log space resvd */
        unsigned int            t_log_count;    /* count for perm log res */
@@ -132,7 +125,6 @@ typedef struct xfs_trans {
        int64_t                 t_rextents_delta;/* superblocks rextents chg */
        int64_t                 t_rextslog_delta;/* superblocks rextslog chg */
        struct list_head        t_items;        /* log item descriptors */
-       xfs_trans_header_t      t_header;       /* header for in-log trans */
        struct list_head        t_busy;         /* list of busy extents */
        unsigned long           t_pflags;       /* saved process flags state */
 } xfs_trans_t;
@@ -237,10 +229,16 @@ void              xfs_trans_log_efd_extent(xfs_trans_t *,
                                         xfs_fsblock_t,
                                         xfs_extlen_t);
 int            xfs_trans_commit(xfs_trans_t *, uint flags);
+int            xfs_trans_roll(struct xfs_trans **, struct xfs_inode *);
 void           xfs_trans_cancel(xfs_trans_t *, int);
 int            xfs_trans_ail_init(struct xfs_mount *);
 void           xfs_trans_ail_destroy(struct xfs_mount *);
 
+void           xfs_trans_buf_set_type(struct xfs_trans *, struct xfs_buf *,
+                                      enum xfs_blft);
+void           xfs_trans_buf_copy_type(struct xfs_buf *dst_bp,
+                                       struct xfs_buf *src_bp);
+
 extern kmem_zone_t     *xfs_trans_zone;
 extern kmem_zone_t     *xfs_log_item_desc_zone;
 
index 21c6d7ddbc06b474e102b30cb6a13c7690d1a2a5..4b47cfebd25b8ad34f353469281484d4a8df1295 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
 #include "xfs_trace.h"
 #include "xfs_error.h"
+#include "xfs_log.h"
 
 #ifdef DEBUG
 /*
index 8c75b8f672702419beede8e54363ec259616039a..c035d11b7734196c4fd689121d945e598a8d6d7a 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
+#include "xfs_trans.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_priv.h"
 #include "xfs_error.h"
index 54ee3c5dee76093b6a6136a5fde759a8be309ccd..cd2a10e15d3ac5520661ed2877f2e5c9cdac48d9 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_alloc.h"
-#include "xfs_quota.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
-#include "xfs_itable.h"
-#include "xfs_bmap.h"
-#include "xfs_rtalloc.h"
 #include "xfs_error.h"
-#include "xfs_attr.h"
-#include "xfs_buf_item.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
+#include "xfs_quota.h"
 #include "xfs_qm.h"
 
 STATIC void    xfs_trans_alloc_dqinfo(xfs_trans_t *);
index 8d71b16eccaeaca46a0a7e92a7bb69ac3d1a4b4e..47978ba89dae35307c5341d7b50bc3dc3728d140 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
 #include "xfs_extfree_item.h"
 
index 53dfe46f3680791a8eab2e72c1a92db3cb17c091..1bba7f60d94cab1fe153b073b8ca42f24fbd4bfc 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
-#include "xfs_types.h"
-#include "xfs_log.h"
-#include "xfs_trans.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
 #include "xfs_inode.h"
-#include "xfs_btree.h"
+#include "xfs_trans.h"
 #include "xfs_trans_priv.h"
 #include "xfs_inode_item.h"
 #include "xfs_trace.h"
index c52def0b441cd89f2cb207e1b7ba5a9f22cc915c..12e86af9d9b94327ea13384fe4a6c2018bf84e1d 100644 (file)
@@ -27,7 +27,6 @@ struct xfs_log_vec;
 
 
 void   xfs_trans_init(struct xfs_mount *);
-int    xfs_trans_roll(struct xfs_trans **, struct xfs_inode *);
 void   xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
 void   xfs_trans_del_item(struct xfs_log_item *);
 void   xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn,
index a65a3cc40610abe92b0b158a83329190d6cbdf91..d53d9f0627a779cacab8adaa5aea71be36f8e41e 100644 (file)
  */
 #include "xfs.h"
 #include "xfs_fs.h"
+#include "xfs_shared.h"
 #include "xfs_format.h"
-#include "xfs_log.h"
+#include "xfs_log_format.h"
 #include "xfs_trans_resv.h"
-#include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_mount.h"
-#include "xfs_error.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_dinode.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
-#include "xfs_btree.h"
+#include "xfs_bmap_btree.h"
 #include "xfs_ialloc.h"
-#include "xfs_alloc.h"
-#include "xfs_extent_busy.h"
-#include "xfs_bmap.h"
-#include "xfs_bmap_util.h"
 #include "xfs_quota.h"
+#include "xfs_trans.h"
 #include "xfs_qm.h"
 #include "xfs_trans_space.h"
 #include "xfs_trace.h"
index db14d0c08682b90949f031532069f715ca628804..3e8e797c6d110ce01b0964a2e3845d8a59defd0b 100644 (file)
@@ -24,14 +24,6 @@ struct file;
 struct xfs_inode;
 struct attrlist_cursor_kern;
 
-/*
- * Return values for xfs_inactive.  A return value of
- * VN_INACTIVE_NOCACHE implies that the file system behavior
- * has disassociated its state and bhv_desc_t from the vnode.
- */
-#define        VN_INACTIVE_CACHE       0
-#define        VN_INACTIVE_NOCACHE     1
-
 /*
  * Flags for read/write calls - same values as IRIX
  */
index e01f35ea76ba436310f11d82b9fdf78322d8f6fe..9d479073ba415d6b482fbecbf160b6b99e4e859c 100644 (file)
  */
 
 #include "xfs.h"
+#include "xfs_format.h"
 #include "xfs_log_format.h"
-#include "xfs_da_btree.h"
-#include "xfs_bmap_btree.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_ag.h"
+#include "xfs_mount.h"
+#include "xfs_da_format.h"
 #include "xfs_inode.h"
 #include "xfs_attr.h"
 #include "xfs_attr_leaf.h"
index cf051e05a8fe682894251ec5944299a8ead8277f..4e280bd226ddaa107a3d19f08b07389279b53922 100644 (file)
@@ -125,8 +125,9 @@ struct acpi_exception_info {
 #define AE_NO_HANDLER                   EXCEP_ENV (0x001A)
 #define AE_OWNER_ID_LIMIT               EXCEP_ENV (0x001B)
 #define AE_NOT_CONFIGURED               EXCEP_ENV (0x001C)
+#define AE_ACCESS                       EXCEP_ENV (0x001D)
 
-#define AE_CODE_ENV_MAX                 0x001C
+#define AE_CODE_ENV_MAX                 0x001D
 
 /*
  * Programmer exceptions
@@ -227,7 +228,7 @@ static const struct acpi_exception_info acpi_gbl_exception_names_env[] = {
        EXCEP_TXT("AE_NO_ACPI_TABLES", "ACPI tables could not be found"),
        EXCEP_TXT("AE_NO_NAMESPACE", "A namespace has not been loaded"),
        EXCEP_TXT("AE_NO_MEMORY", "Insufficient dynamic memory"),
-       EXCEP_TXT("AE_NOT_FOUND", "The name was not found in the namespace"),
+       EXCEP_TXT("AE_NOT_FOUND", "A requested entity is not found"),
        EXCEP_TXT("AE_NOT_EXIST", "A required entity does not exist"),
        EXCEP_TXT("AE_ALREADY_EXISTS", "An entity already exists"),
        EXCEP_TXT("AE_TYPE", "The object type is incorrect"),
@@ -259,7 +260,8 @@ static const struct acpi_exception_info acpi_gbl_exception_names_env[] = {
        EXCEP_TXT("AE_OWNER_ID_LIMIT",
                  "There are no more Owner IDs available for ACPI tables or control methods"),
        EXCEP_TXT("AE_NOT_CONFIGURED",
-                 "The interface is not part of the current subsystem configuration")
+                 "The interface is not part of the current subsystem configuration"),
+       EXCEP_TXT("AE_ACCESS", "Permission denied for the requested operation")
 };
 
 static const struct acpi_exception_info acpi_gbl_exception_names_pgm[] = {
index 02e113bb8b7d5c777fdc435a5285208238ce66e9..15100f625e6550a94bc2afe721ccf3804a2360d5 100644 (file)
@@ -222,7 +222,8 @@ struct acpi_device_power_flags {
        u32 power_resources:1;  /* Power resources */
        u32 inrush_current:1;   /* Serialize Dx->D0 */
        u32 power_removed:1;    /* Optimize Dx->D0 */
-       u32 reserved:28;
+       u32 ignore_parent:1;    /* Power is independent of parent power state */
+       u32 reserved:27;
 };
 
 struct acpi_device_power_state {
@@ -311,7 +312,6 @@ struct acpi_device {
        unsigned int physical_node_count;
        struct list_head physical_node_list;
        struct mutex physical_node_lock;
-       struct list_head power_dependent;
        void (*remove)(struct acpi_device *);
 };
 
@@ -456,8 +456,6 @@ acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
 acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
                                    acpi_notify_handler handler);
 int acpi_pm_device_sleep_state(struct device *, int *, int);
-void acpi_dev_pm_add_dependent(acpi_handle handle, struct device *depdev);
-void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev);
 #else
 static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
                                               acpi_notify_handler handler,
@@ -478,10 +476,6 @@ static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m)
        return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3_COLD) ?
                m : ACPI_STATE_D0;
 }
-static inline void acpi_dev_pm_add_dependent(acpi_handle handle,
-                                            struct device *depdev) {}
-static inline void acpi_dev_pm_remove_dependent(acpi_handle handle,
-                                               struct device *depdev) {}
 #endif
 
 #ifdef CONFIG_PM_RUNTIME
index 85bfdbe178052bd2231dce137b1463d174b2bffe..c7b1475422b3f3ed140e852f9a3407719c8f68b2 100644 (file)
@@ -46,7 +46,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20130725
+#define ACPI_CA_VERSION                 0x20130823
 
 #include <acpi/acconfig.h>
 #include <acpi/actypes.h>
@@ -280,9 +280,16 @@ acpi_status
 acpi_install_initialization_handler(acpi_init_handler handler, u32 function);
 
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
-                               acpi_install_global_event_handler
-                               (acpi_gbl_event_handler handler, void *context))
-
+                               acpi_install_sci_handler(acpi_sci_handler
+                                                        address,
+                                                        void *context))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+                                acpi_remove_sci_handler(acpi_sci_handler
+                                                        address))
+ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
+                                acpi_install_global_event_handler
+                                (acpi_gbl_event_handler handler,
+                                 void *context))
 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
                                 acpi_install_fixed_event_handler(u32
                                                                  acpi_event,
index b748aefce929983cf94d3729db782e62c2843768..f6abf23ad0a71d51df5c86806c1b504b678a4bc1 100644 (file)
@@ -474,6 +474,11 @@ typedef u64 acpi_integer;
 #define ACPI_MOVE_NAME(dest,src)        (ACPI_STRNCPY (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAME_SIZE))
 #endif
 
+/* Support for the special RSDP signature (8 characters) */
+
+#define ACPI_VALIDATE_RSDP_SIG(a)       (!ACPI_STRNCMP (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8))
+#define ACPI_MAKE_RSDP_SIG(dest)        (ACPI_MEMCPY (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
+
 /*******************************************************************************
  *
  * Miscellaneous constants
@@ -945,6 +950,9 @@ typedef void
 /*
  * Various handlers and callback procedures
  */
+typedef
+u32 (*acpi_sci_handler) (void *context);
+
 typedef
 void (*acpi_gbl_event_handler) (u32 event_type,
                               acpi_handle device,
index 68534ef86ec81a903e09ccd93169baf69b4bb73e..fda0f3e35c03fc40043cbfdba951cbe1852d9ca6 100644 (file)
@@ -87,7 +87,7 @@
 #define ACPI_FLUSH_CPU_CACHE()
 #define ACPI_CAST_PTHREAD_T(pthread) ((acpi_thread_id) (pthread))
 
-#if defined(__ia64__) || defined(__x86_64__)
+#if defined(__ia64__) || defined(__x86_64__) || defined(__aarch64__)
 #define ACPI_MACHINE_WIDTH          64
 #define COMPILER_DEPENDENT_INT64    long
 #define COMPILER_DEPENDENT_UINT64   unsigned long
index 66096d06925e417d70d6b8974a00a035d05ef1d8..7816e45f5d5a0177b7aac0b4b40fdd9e125a97bb 100644 (file)
@@ -199,6 +199,7 @@ struct acpi_processor_flags {
 struct acpi_processor {
        acpi_handle handle;
        u32 acpi_id;
+       u32 apic_id;
        u32 id;
        u32 pblk;
        int performance_platform_limit;
@@ -314,6 +315,8 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
 
 /* in processor_core.c */
 void acpi_processor_set_pdc(acpi_handle handle);
+int acpi_get_apicid(acpi_handle, int type, u32 acpi_id);
+int acpi_map_cpuid(int apic_id, u32 acpi_id);
 int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
 
 /* in processor_throttling.c */
diff --git a/include/asm-generic/simd.h b/include/asm-generic/simd.h
new file mode 100644 (file)
index 0000000..f57eb7b
--- /dev/null
@@ -0,0 +1,14 @@
+
+#include <linux/hardirq.h>
+
+/*
+ * may_use_simd - whether it is allowable at this time to issue SIMD
+ *                instructions or access the SIMD register file
+ *
+ * As architectures typically don't preserve the SIMD register file when
+ * taking an interrupt, !in_interrupt() should be a reasonable default.
+ */
+static __must_check inline bool may_use_simd(void)
+{
+       return !in_interrupt();
+}
index 83e2c31e8b007528f67f726ec47bc648f24bdda8..bc2121fa9132cc9cad2209cacee3896fdbeb5370 100644 (file)
 #define KERNEL_CTORS() . = ALIGN(8);                      \
                        VMLINUX_SYMBOL(__ctors_start) = .; \
                        *(.ctors)                          \
+                       *(.init_array)                     \
                        VMLINUX_SYMBOL(__ctors_end) = .;
 #else
 #define KERNEL_CTORS()
index 418d270e18063517750f39c68490f56fb7cd24c3..e73c19e90e38f49e9ebdff10bfed3cdca90a9f9a 100644 (file)
@@ -386,5 +386,21 @@ static inline int crypto_requires_sync(u32 type, u32 mask)
        return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC;
 }
 
-#endif /* _CRYPTO_ALGAPI_H */
+noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size);
+
+/**
+ * crypto_memneq - Compare two areas of memory without leaking
+ *                timing information.
+ *
+ * @a: One area of memory
+ * @b: Another area of memory
+ * @size: The size of the area.
+ *
+ * Returns 0 when data is equal, 1 otherwise.
+ */
+static inline int crypto_memneq(const void *a, const void *b, size_t size)
+{
+       return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
+}
 
+#endif /* _CRYPTO_ALGAPI_H */
index e47b044929a84b7cd1e54fb17b8e87de3020d7b6..6775059539b56f2ffe870d28c4d2ab35821c013a 100644 (file)
@@ -23,5 +23,15 @@ struct crypto_authenc_key_param {
        __be32 enckeylen;
 };
 
-#endif /* _CRYPTO_AUTHENC_H */
+struct crypto_authenc_keys {
+       const u8 *authkey;
+       const u8 *enckey;
+
+       unsigned int authkeylen;
+       unsigned int enckeylen;
+};
 
+int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
+                              unsigned int keylen);
+
+#endif /* _CRYPTO_AUTHENC_H */
index b46fb45f2cca4a5881b64d93577e6ae03a221670..e6d0cd9f518efa36733f5769e9836dced310e628 100644 (file)
@@ -150,6 +150,7 @@ int drm_err(const char *func, const char *format, ...);
 #define DRIVER_BUS_PCI 0x1
 #define DRIVER_BUS_PLATFORM 0x2
 #define DRIVER_BUS_USB 0x3
+#define DRIVER_BUS_HOST1X 0x4
 
 /***********************************************************************/
 /** \name Begin the DRM... */
@@ -433,6 +434,9 @@ struct drm_file {
        struct drm_master *master; /* master this node is currently associated with
                                      N.B. not always minor->master */
 
+       /* true when the client has asked us to expose stereo 3D mode flags */
+       bool stereo_allowed;
+
        /**
         * fbs - List of framebuffers associated with this file.
         *
@@ -667,8 +671,6 @@ struct drm_gem_object {
        uint32_t pending_read_domains;
        uint32_t pending_write_domain;
 
-       void *driver_private;
-
        /**
         * dma_buf - dma buf associated with this GEM object
         *
@@ -922,7 +924,6 @@ struct drm_driver {
         *
         * Returns 0 on success.
         */
-       int (*gem_init_object) (struct drm_gem_object *obj);
        void (*gem_free_object) (struct drm_gem_object *obj);
        int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
        void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
@@ -1046,7 +1047,7 @@ struct drm_minor {
        int index;                      /**< Minor device number */
        int type;                       /**< Control or render */
        dev_t device;                   /**< Device number for mknod */
-       struct device kdev;             /**< Linux device */
+       struct device *kdev;            /**< Linux device */
        struct drm_device *dev;
 
        struct dentry *debugfs_root;
@@ -1081,6 +1082,19 @@ struct drm_pending_vblank_event {
        struct drm_event_vblank event;
 };
 
+struct drm_vblank_crtc {
+       wait_queue_head_t queue;        /**< VBLANK wait queue */
+       struct timeval time[DRM_VBLANKTIME_RBSIZE];     /**< timestamp of current count */
+       atomic_t count;                 /**< number of VBLANK interrupts */
+       atomic_t refcount;              /* number of users of vblank interruptsper crtc */
+       u32 last;                       /* protected by dev->vbl_lock, used */
+                                       /* for wraparound handling */
+       u32 last_wait;                  /* Last vblank seqno waited per CRTC */
+       unsigned int inmodeset;         /* Display driver is setting mode */
+       bool enabled;                   /* so we don't call enable more than
+                                          once per disable */
+};
+
 /**
  * DRM device structure. This structure represent a complete card that
  * may contain multiple heads.
@@ -1105,25 +1119,16 @@ struct drm_device {
        atomic_t buf_alloc;             /**< Buffer allocation in progress */
        /*@} */
 
-       /** \name Performance counters */
-       /*@{ */
-       unsigned long counters;
-       enum drm_stat_type types[15];
-       atomic_t counts[15];
-       /*@} */
-
        struct list_head filelist;
 
        /** \name Memory management */
        /*@{ */
        struct list_head maplist;       /**< Linked list of regions */
-       int map_count;                  /**< Number of mappable regions */
        struct drm_open_hash map_hash;  /**< User token hash table for maps */
 
        /** \name Context handle management */
        /*@{ */
        struct list_head ctxlist;       /**< Linked list of context handles */
-       int ctx_count;                  /**< Number of context handles */
        struct mutex ctxlist_mutex;     /**< For ctxlist */
 
        struct idr ctx_idr;
@@ -1139,12 +1144,11 @@ struct drm_device {
 
        /** \name Context support */
        /*@{ */
-       int irq_enabled;                /**< True if irq handler is enabled */
+       bool irq_enabled;               /**< True if irq handler is enabled */
        __volatile__ long context_flag; /**< Context swapping flag */
        int last_context;               /**< Last current context */
        /*@} */
 
-       struct work_struct work;
        /** \name VBLANK IRQ support */
        /*@{ */
 
@@ -1154,20 +1158,13 @@ struct drm_device {
         * Once the modeset ioctl *has* been called though, we can safely
         * disable them when unused.
         */
-       int vblank_disable_allowed;
+       bool vblank_disable_allowed;
+
+       /* array of size num_crtcs */
+       struct drm_vblank_crtc *vblank;
 
-       wait_queue_head_t *vbl_queue;   /**< VBLANK wait queue */
-       atomic_t *_vblank_count;        /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
-       struct timeval *_vblank_time;   /**< timestamp of current vblank_count (drivers must alloc right number of fields) */
        spinlock_t vblank_time_lock;    /**< Protects vblank count and time updates during vblank enable/disable */
        spinlock_t vbl_lock;
-       atomic_t *vblank_refcount;      /* number of users of vblank interruptsper crtc */
-       u32 *last_vblank;               /* protected by dev->vbl_lock, used */
-                                       /* for wraparound handling */
-       int *vblank_enabled;            /* so we don't call enable more than
-                                          once per disable */
-       int *vblank_inmodeset;          /* Display driver is setting mode */
-       u32 *last_vblank_wait;          /* Last vblank seqno waited per CRTC */
        struct timer_list vblank_disable_timer;
 
        u32 max_vblank_count;           /**< size of vblank counter register */
@@ -1184,8 +1181,6 @@ struct drm_device {
 
        struct device *dev;             /**< Device structure */
        struct pci_dev *pdev;           /**< PCI device structure */
-       int pci_vendor;                 /**< PCI vendor id */
-       int pci_device;                 /**< PCI device id */
 #ifdef __alpha__
        struct pci_controller *hose;
 #endif
@@ -1303,6 +1298,8 @@ extern int drm_getstats(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 extern int drm_getcap(struct drm_device *dev, void *data,
                      struct drm_file *file_priv);
+extern int drm_setclientcap(struct drm_device *dev, void *data,
+                           struct drm_file *file_priv);
 extern int drm_setversion(struct drm_device *dev, void *data,
                          struct drm_file *file_priv);
 extern int drm_noop(struct drm_device *dev, void *data,
@@ -1556,8 +1553,6 @@ int drm_gem_init(struct drm_device *dev);
 void drm_gem_destroy(struct drm_device *dev);
 void drm_gem_object_release(struct drm_gem_object *obj);
 void drm_gem_object_free(struct kref *kref);
-struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
-                                           size_t size);
 int drm_gem_object_init(struct drm_device *dev,
                        struct drm_gem_object *obj, size_t size);
 void drm_gem_private_object_init(struct drm_device *dev,
@@ -1645,9 +1640,11 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map)
 
 #include <drm/drm_mem_util.h>
 
-extern int drm_fill_in_dev(struct drm_device *dev,
-                          const struct pci_device_id *ent,
-                          struct drm_driver *driver);
+struct drm_device *drm_dev_alloc(struct drm_driver *driver,
+                                struct device *parent);
+void drm_dev_free(struct drm_device *dev);
+int drm_dev_register(struct drm_device *dev, unsigned long flags);
+void drm_dev_unregister(struct drm_device *dev);
 int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type);
 /*@}*/
 
index 24f499569a2f70480d87dca321c222c50755190d..d3a91ade1d37bebf8a0931a0d30942dc1e7a82bf 100644 (file)
@@ -108,6 +108,7 @@ enum drm_mode_status {
     MODE_ONE_HEIGHT,    /* only one height is supported */
     MODE_ONE_SIZE,      /* only one resolution is supported */
     MODE_NO_REDUCED,    /* monitor doesn't accept reduced blanking */
+    MODE_NO_STEREO,    /* stereo modes not supported */
     MODE_UNVERIFIED = -3, /* mode needs to reverified */
     MODE_BAD = -2,     /* unspecified reason */
     MODE_ERROR = -1    /* error condition */
@@ -124,7 +125,10 @@ enum drm_mode_status {
        .vscan = (vs), .flags = (f), \
        .base.type = DRM_MODE_OBJECT_MODE
 
-#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
+#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
+#define CRTC_STEREO_DOUBLE     (1 << 1) /* adjust timings for stereo modes */
+
+#define DRM_MODE_FLAG_3D_MAX   DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF
 
 struct drm_display_mode {
        /* Header */
@@ -155,8 +159,7 @@ struct drm_display_mode {
        int height_mm;
 
        /* Actual mode we give to hw */
-       int clock_index;
-       int synth_clock;
+       int crtc_clock;         /* in KHz */
        int crtc_hdisplay;
        int crtc_hblank_start;
        int crtc_hblank_end;
@@ -180,6 +183,11 @@ struct drm_display_mode {
        int hsync;              /* in kHz */
 };
 
+static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode)
+{
+       return mode->flags & DRM_MODE_FLAG_3D_MASK;
+}
+
 enum drm_connector_status {
        connector_status_connected = 1,
        connector_status_disconnected = 2,
@@ -587,7 +595,7 @@ enum drm_connector_force {
  */
 struct drm_connector {
        struct drm_device *dev;
-       struct device kdev;
+       struct device *kdev;
        struct device_attribute *attr;
        struct list_head head;
 
@@ -597,6 +605,7 @@ struct drm_connector {
        int connector_type_id;
        bool interlace_allowed;
        bool doublescan_allowed;
+       bool stereo_allowed;
        struct list_head modes; /* list of modes on this connector */
 
        enum drm_connector_status status;
@@ -964,6 +973,7 @@ extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_m
 extern bool drm_probe_ddc(struct i2c_adapter *adapter);
 extern struct edid *drm_get_edid(struct drm_connector *connector,
                                 struct i2c_adapter *adapter);
+extern struct edid *drm_edid_duplicate(const struct edid *edid);
 extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
 extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
 extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
@@ -975,7 +985,7 @@ extern void drm_mode_config_reset(struct drm_device *dev);
 extern void drm_mode_config_cleanup(struct drm_device *dev);
 extern void drm_mode_set_name(struct drm_display_mode *mode);
 extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
-extern bool drm_mode_equal_no_clocks(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
+extern bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
 extern int drm_mode_width(const struct drm_display_mode *mode);
 extern int drm_mode_height(const struct drm_display_mode *mode);
 
@@ -1135,4 +1145,21 @@ extern int drm_format_horz_chroma_subsampling(uint32_t format);
 extern int drm_format_vert_chroma_subsampling(uint32_t format);
 extern const char *drm_get_format_name(uint32_t format);
 
+/* Helpers */
+static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev,
+       uint32_t id)
+{
+       struct drm_mode_object *mo;
+       mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CRTC);
+       return mo ? obj_to_crtc(mo) : NULL;
+}
+
+static inline struct drm_encoder *drm_encoder_find(struct drm_device *dev,
+       uint32_t id)
+{
+       struct drm_mode_object *mo;
+       mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
+       return mo ? obj_to_encoder(mo) : NULL;
+}
+
 #endif /* __DRM_CRTC_H__ */
index ae8dbfb1207c71a6cbe44dc166180988083c00e6..a92c3754e3bbffe56c286d85bd2212f753ebe2c3 100644 (file)
 #define DP_DOWNSTREAMPORT_PRESENT           0x005
 # define DP_DWN_STRM_PORT_PRESENT           (1 << 0)
 # define DP_DWN_STRM_PORT_TYPE_MASK         0x06
-/* 00b = DisplayPort */
-/* 01b = Analog */
-/* 10b = TMDS or HDMI */
-/* 11b = Other */
+# define DP_DWN_STRM_PORT_TYPE_DP           (0 << 1)
+# define DP_DWN_STRM_PORT_TYPE_ANALOG       (1 << 1)
+# define DP_DWN_STRM_PORT_TYPE_TMDS         (2 << 1)
+# define DP_DWN_STRM_PORT_TYPE_OTHER        (3 << 1)
 # define DP_FORMAT_CONVERSION               (1 << 3)
 # define DP_DETAILED_CAP_INFO_AVAILABLE            (1 << 4) /* DPI */
 
@@ -333,20 +333,20 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
 
 
 #define DP_LINK_STATUS_SIZE       6
-bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
                          int lane_count);
-bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
                              int lane_count);
-u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
+u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
                                     int lane);
-u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
+u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
                                          int lane);
 
 #define DP_RECEIVER_CAP_SIZE           0xf
 #define EDP_PSR_RECEIVER_CAP_SIZE      2
 
-void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
-void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
 
 u8 drm_dp_link_rate_to_bw_code(int link_rate);
 int drm_dp_bw_code_to_link_rate(u8 link_bw);
@@ -379,15 +379,22 @@ struct edp_vsc_psr {
 #define EDP_VSC_PSR_CRC_VALUES_VALID   (1<<2)
 
 static inline int
-drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+drm_dp_max_link_rate(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
 {
        return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
 }
 
 static inline u8
-drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE])
+drm_dp_max_lane_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
 {
        return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
 }
 
+static inline bool
+drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+       return dpcd[DP_DPCD_REV] >= 0x11 &&
+               (dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP);
+}
+
 #endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
new file mode 100644 (file)
index 0000000..4c8760b
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2013, NVIDIA Corporation.  All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __DRM_PANEL_H__
+#define __DRM_PANEL_H__
+
+#include <linux/list.h>
+
+struct drm_connector;
+struct drm_device;
+struct drm_panel;
+
+struct drm_panel_funcs {
+       void (*disable)(struct drm_panel *panel);
+       void (*enable)(struct drm_panel *panel);
+       int (*get_modes)(struct drm_panel *panel);
+};
+
+struct drm_panel {
+       struct drm_device *drm;
+       struct drm_connector *connector;
+       struct device *dev;
+
+       const struct drm_panel_funcs *funcs;
+
+       struct list_head list;
+};
+
+static inline void drm_panel_disable(struct drm_panel *panel)
+{
+       if (panel && panel->funcs && panel->funcs->disable)
+               panel->funcs->disable(panel);
+}
+
+static inline void drm_panel_enable(struct drm_panel *panel)
+{
+       if (panel && panel->funcs && panel->funcs->enable)
+               panel->funcs->enable(panel);
+}
+
+void drm_panel_init(struct drm_panel *panel);
+
+int drm_panel_add(struct drm_panel *panel);
+void drm_panel_remove(struct drm_panel *panel);
+
+int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector);
+int drm_panel_detach(struct drm_panel *panel);
+
+#ifdef CONFIG_OF
+struct drm_panel *of_drm_find_panel(struct device_node *np);
+#else
+static inline struct drm_panel *of_drm_find_panel(struct device_node *np)
+{
+       return NULL;
+}
+#endif
+
+#endif
index edbd250809cb6d415e893a2d26a9d6db302925fd..bed35e36fd2748515ed75f47c9d642462223c852 100644 (file)
@@ -23,7 +23,7 @@
 #define PULL_UP                        (1 << 4)
 #define ALTELECTRICALSEL       (1 << 5)
 
-/* 34xx specific mux bit defines */
+/* omap3/4/5 specific mux bit defines */
 #define INPUT_EN               (1 << 8)
 #define OFF_EN                 (1 << 9)
 #define OFFOUT_EN              (1 << 10)
@@ -31,8 +31,6 @@
 #define OFF_PULL_EN            (1 << 12)
 #define OFF_PULL_UP            (1 << 13)
 #define WAKEUP_EN              (1 << 14)
-
-/* 44xx specific mux bit defines */
 #define WAKEUP_EVENT           (1 << 15)
 
 /* Active pin states */
index a5db4aeefa3642107e28b431c21a89c195569993..35e68358ad066e8f34b18e31e548228d4706db5f 100644 (file)
@@ -116,7 +116,7 @@ void acpi_numa_arch_fixup(void);
 
 #ifdef CONFIG_ACPI_HOTPLUG_CPU
 /* Arch dependent functions for cpu hotplug support */
-int acpi_map_lsapic(acpi_handle handle, int *pcpu);
+int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu);
 int acpi_unmap_lsapic(int cpu);
 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
 
@@ -294,58 +294,51 @@ void __init acpi_nvs_nosave_s3(void);
 #endif /* CONFIG_PM_SLEEP */
 
 struct acpi_osc_context {
-       char *uuid_str; /* uuid string */
+       char *uuid_str;                 /* UUID string */
        int rev;
-       struct acpi_buffer cap; /* arg2/arg3 */
-       struct acpi_buffer ret; /* free by caller if success */
+       struct acpi_buffer cap;         /* list of DWORD capabilities */
+       struct acpi_buffer ret;         /* free by caller if success */
 };
 
-#define OSC_QUERY_TYPE                 0
-#define OSC_SUPPORT_TYPE               1
-#define OSC_CONTROL_TYPE               2
-
-/* _OSC DW0 Definition */
-#define OSC_QUERY_ENABLE               1
-#define OSC_REQUEST_ERROR              2
-#define OSC_INVALID_UUID_ERROR         4
-#define OSC_INVALID_REVISION_ERROR     8
-#define OSC_CAPABILITIES_MASK_ERROR    16
-
 acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
 
-/* platform-wide _OSC bits */
-#define OSC_SB_PAD_SUPPORT             1
-#define OSC_SB_PPC_OST_SUPPORT         2
-#define OSC_SB_PR3_SUPPORT             4
-#define OSC_SB_HOTPLUG_OST_SUPPORT     8
-#define OSC_SB_APEI_SUPPORT            16
+/* Indexes into _OSC Capabilities Buffer (DWORDs 2 & 3 are device-specific) */
+#define OSC_QUERY_DWORD                                0       /* DWORD 1 */
+#define OSC_SUPPORT_DWORD                      1       /* DWORD 2 */
+#define OSC_CONTROL_DWORD                      2       /* DWORD 3 */
+
+/* _OSC Capabilities DWORD 1: Query/Control and Error Returns (generic) */
+#define OSC_QUERY_ENABLE                       0x00000001  /* input */
+#define OSC_REQUEST_ERROR                      0x00000002  /* return */
+#define OSC_INVALID_UUID_ERROR                 0x00000004  /* return */
+#define OSC_INVALID_REVISION_ERROR             0x00000008  /* return */
+#define OSC_CAPABILITIES_MASK_ERROR            0x00000010  /* return */
+
+/* Platform-Wide Capabilities _OSC: Capabilities DWORD 2: Support Field */
+#define OSC_SB_PAD_SUPPORT                     0x00000001
+#define OSC_SB_PPC_OST_SUPPORT                 0x00000002
+#define OSC_SB_PR3_SUPPORT                     0x00000004
+#define OSC_SB_HOTPLUG_OST_SUPPORT             0x00000008
+#define OSC_SB_APEI_SUPPORT                    0x00000010
+#define OSC_SB_CPC_SUPPORT                     0x00000020
 
 extern bool osc_sb_apei_support_acked;
 
-/* PCI defined _OSC bits */
-/* _OSC DW1 Definition (OS Support Fields) */
-#define OSC_EXT_PCI_CONFIG_SUPPORT             1
-#define OSC_ACTIVE_STATE_PWR_SUPPORT           2
-#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT       4
-#define OSC_PCI_SEGMENT_GROUPS_SUPPORT         8
-#define OSC_MSI_SUPPORT                                16
-#define OSC_PCI_SUPPORT_MASKS                  0x1f
-
-/* _OSC DW1 Definition (OS Control Fields) */
-#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL      1
-#define OSC_SHPC_NATIVE_HP_CONTROL             2
-#define OSC_PCI_EXPRESS_PME_CONTROL            4
-#define OSC_PCI_EXPRESS_AER_CONTROL            8
-#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL  16
-
-#define OSC_PCI_CONTROL_MASKS  (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |    \
-                               OSC_SHPC_NATIVE_HP_CONTROL |            \
-                               OSC_PCI_EXPRESS_PME_CONTROL |           \
-                               OSC_PCI_EXPRESS_AER_CONTROL |           \
-                               OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
-
-#define OSC_PCI_NATIVE_HOTPLUG (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |    \
-                               OSC_SHPC_NATIVE_HP_CONTROL)
+/* PCI Host Bridge _OSC: Capabilities DWORD 2: Support Field */
+#define OSC_PCI_EXT_CONFIG_SUPPORT             0x00000001
+#define OSC_PCI_ASPM_SUPPORT                   0x00000002
+#define OSC_PCI_CLOCK_PM_SUPPORT               0x00000004
+#define OSC_PCI_SEGMENT_GROUPS_SUPPORT         0x00000008
+#define OSC_PCI_MSI_SUPPORT                    0x00000010
+#define OSC_PCI_SUPPORT_MASKS                  0x0000001f
+
+/* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */
+#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL      0x00000001
+#define OSC_PCI_SHPC_NATIVE_HP_CONTROL         0x00000002
+#define OSC_PCI_EXPRESS_PME_CONTROL            0x00000004
+#define OSC_PCI_EXPRESS_AER_CONTROL            0x00000008
+#define OSC_PCI_EXPRESS_CAPABILITY_CONTROL     0x00000010
+#define OSC_PCI_CONTROL_MASKS                  0x0000001f
 
 extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
                                             u32 *mask, u32 req);
index d9c92daa3944e43a13f285a7baaa1443868cce3d..f01e7e370691ee5ca840d4e011bb4a1545de1f12 100644 (file)
@@ -14,6 +14,14 @@ struct kiocb;
 
 #define KIOCB_KEY              0
 
+/*
+ * opcode values not exposed to user space
+ */
+enum {
+       IOCB_CMD_READ_ITER = 0x10000,
+       IOCB_CMD_WRITE_ITER = 0x10001,
+};
+
 /*
  * We use ki_cancel == KIOCB_CANCELLED to indicate that a kiocb has been either
  * cancelled or completed (this makes a certain amount of sense because
@@ -31,13 +39,15 @@ typedef int (kiocb_cancel_fn)(struct kiocb *);
 
 struct kiocb {
        struct file             *ki_filp;
-       struct kioctx           *ki_ctx;        /* NULL for sync ops */
+       struct kioctx           *ki_ctx;        /* NULL for sync ops,
+                                                * -1 for kernel caller */
        kiocb_cancel_fn         *ki_cancel;
        void                    *private;
 
        union {
                void __user             *user;
                struct task_struct      *tsk;
+               void                    (*complete)(u64 user_data, long res);
        } ki_obj;
 
        __u64                   ki_user_data;   /* user's data for completion */
@@ -59,6 +69,11 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
        return kiocb->ki_ctx == NULL;
 }
 
+static inline bool is_kernel_kiocb(struct kiocb *kiocb)
+{
+       return kiocb->ki_ctx == (void *)-1;
+}
+
 static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
 {
        *kiocb = (struct kiocb) {
@@ -77,6 +92,14 @@ extern void exit_aio(struct mm_struct *mm);
 extern long do_io_submit(aio_context_t ctx_id, long nr,
                         struct iocb __user *__user *iocbpp, bool compat);
 void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel);
+struct kiocb *aio_kernel_alloc(gfp_t gfp);
+void aio_kernel_free(struct kiocb *iocb);
+void aio_kernel_init_rw(struct kiocb *iocb, struct file *filp, size_t nr,
+                       loff_t off);
+void aio_kernel_init_callback(struct kiocb *iocb,
+                             void (*complete)(u64 user_data, long res),
+                             u64 user_data);
+int aio_kernel_submit(struct kiocb *iocb, unsigned op, void *ptr);
 #else
 static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
 static inline void aio_complete(struct kiocb *iocb, long res, long res2) { }
index 43ec7e247a8086972ae7ef0e87efde66dd4ac0e7..682df0e1954a96718ae1e439db73441681101145 100644 (file)
@@ -30,7 +30,6 @@ struct amba_device {
        struct device           dev;
        struct resource         res;
        struct clk              *pclk;
-       u64                     dma_mask;
        unsigned int            periphid;
        unsigned int            irq[AMBA_NR_IRQS];
 };
@@ -131,7 +130,6 @@ struct amba_device name##_device = {                                \
 struct amba_device name##_device = {                           \
        .dev = __AMBA_DEV(busid, data, ~0ULL),                  \
        .res = DEFINE_RES_MEM(base, SZ_4K),                     \
-       .dma_mask = ~0ULL,                                      \
        .irq = irqs,                                            \
        .periphid = id,                                         \
 }
index 53b77949c79d4c84b6595614206857e2b6796b1f..5f9cd963213dd3a3150abd3e34f62a6925096053 100644 (file)
@@ -100,6 +100,9 @@ struct backlight_device {
        /* The framebuffer notifier block */
        struct notifier_block fb_notif;
 
+       /* list entry of all registered backlight devices */
+       struct list_head entry;
+
        struct device dev;
 };
 
@@ -123,6 +126,7 @@ extern void devm_backlight_device_unregister(struct device *dev,
                                        struct backlight_device *bd);
 extern void backlight_force_update(struct backlight_device *bd,
                                   enum backlight_update_reason reason);
+extern bool backlight_device_registered(enum backlight_type type);
 
 #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev)
 
index ec48bac5b039d25cd74829b1ba4c6a1506412dec..4fd52534259697abc49023fe28a7b30cbaf1cc23 100644 (file)
@@ -307,6 +307,14 @@ extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
 extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
 extern unsigned int bvec_nr_vecs(unsigned short idx);
 
+static inline ssize_t bvec_length(const struct bio_vec *bvec, unsigned long nr)
+{
+       ssize_t bytes = 0;
+       while (nr--)
+               bytes += (bvec++)->bv_len;
+       return bytes;
+}
+
 #ifdef CONFIG_BLK_CGROUP
 int bio_associate_current(struct bio *bio);
 void bio_disassociate_task(struct bio *bio);
index a3b6b82108b9ad239dd1dda1abe6c8e3f585d3b3..5a1c8b71ccd84616b0471b833ee909c9d794be5f 100644 (file)
@@ -4,8 +4,11 @@
 
 #ifdef __KERNEL__
 #define BIT(nr)                        (1UL << (nr))
+#define BIT_ULL(nr)            (1ULL << (nr))
 #define BIT_MASK(nr)           (1UL << ((nr) % BITS_PER_LONG))
 #define BIT_WORD(nr)           ((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr)       (1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr)       ((nr) / BITS_PER_LONG_LONG)
 #define BITS_PER_BYTE          8
 #define BITS_TO_LONGS(nr)      DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
 #endif
index fa1abeb45b7602a4f0c1a4098f05f63d7a075281..1bea25f14796f8fabed2b310adde5316fe893af1 100644 (file)
@@ -176,7 +176,6 @@ enum rq_flag_bits {
        __REQ_FLUSH_SEQ,        /* request for flush sequence */
        __REQ_IO_STAT,          /* account I/O stat */
        __REQ_MIXED_MERGE,      /* merge of different types, fail separately */
-       __REQ_KERNEL,           /* direct IO to kernel pages */
        __REQ_PM,               /* runtime pm request */
        __REQ_NR_BITS,          /* stops here */
 };
@@ -227,7 +226,6 @@ enum rq_flag_bits {
 #define REQ_IO_STAT            (1 << __REQ_IO_STAT)
 #define REQ_MIXED_MERGE                (1 << __REQ_MIXED_MERGE)
 #define REQ_SECURE             (1 << __REQ_SECURE)
-#define REQ_KERNEL             (1 << __REQ_KERNEL)
 #define REQ_PM                 (1 << __REQ_PM)
 
 #endif /* __LINUX_BLK_TYPES_H */
index 842de225055fc5b8c769c59ff37e5afa81574f38..ded429966c1f447db9106359b174fb742fd3fe54 100644 (file)
 #define __visible __attribute__((externally_visible))
 #endif
 
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ * Fixed in GCC 4.8.2 and later versions.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#if GCC_VERSION <= 40801
+# define asm_volatile_goto(x...)       do { asm goto(x); asm (""); } while (0)
+#else
+# define asm_volatile_goto(x...)       do { asm goto(x); } while (0)
+#endif
 
 #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
 #if GCC_VERSION >= 40400
index 801ff9e73679a00d7f945d82253454a236319966..3434ef7de017de0d5a33e7dd5dbfb173678c58c0 100644 (file)
@@ -185,19 +185,6 @@ extern void cpu_hotplug_enable(void);
 void clear_tasks_mm_cpumask(int cpu);
 int cpu_down(unsigned int cpu);
 
-#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
-extern void cpu_hotplug_driver_lock(void);
-extern void cpu_hotplug_driver_unlock(void);
-#else
-static inline void cpu_hotplug_driver_lock(void)
-{
-}
-
-static inline void cpu_hotplug_driver_unlock(void)
-{
-}
-#endif
-
 #else          /* CONFIG_HOTPLUG_CPU */
 
 static inline void cpu_hotplug_begin(void) {}
index fcabc42d66ab413a38730cb78d8eedc9bda91084..fb949acf5a07edd5243c0a7603b74cda8545b615 100644 (file)
@@ -93,8 +93,16 @@ struct cpufreq_policy {
 #define CPUFREQ_SHARED_TYPE_ALL         (2) /* All dependent CPUs should set freq */
 #define CPUFREQ_SHARED_TYPE_ANY         (3) /* Freq can be set from any dependent CPU*/
 
+#ifdef CONFIG_CPU_FREQ
 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
 void cpufreq_cpu_put(struct cpufreq_policy *policy);
+#else
+static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
+{
+       return NULL;
+}
+static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
+#endif
 
 static inline bool policy_is_shared(struct cpufreq_policy *policy)
 {
@@ -180,13 +188,6 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
 struct cpufreq_driver {
        char                    name[CPUFREQ_NAME_LEN];
        u8                      flags;
-       /*
-        * This should be set by platforms having multiple clock-domains, i.e.
-        * supporting multiple policies. With this sysfs directories of governor
-        * would be created in cpu/cpu<num>/cpufreq/ directory and so they can
-        * use the same governor with different tunables for different clusters.
-        */
-       bool                    have_governor_per_policy;
 
        /* needed by all drivers */
        int     (*init)         (struct cpufreq_policy *policy);
@@ -211,13 +212,22 @@ struct cpufreq_driver {
 };
 
 /* flags */
-#define CPUFREQ_STICKY         0x01    /* the driver isn't removed even if
-                                        * all ->init() calls failed */
-#define CPUFREQ_CONST_LOOPS    0x02    /* loops_per_jiffy or other kernel
-                                        * "constants" aren't affected by
-                                        * frequency transitions */
-#define CPUFREQ_PM_NO_WARN     0x04    /* don't warn on suspend/resume speed
-                                        * mismatches */
+#define CPUFREQ_STICKY         (1 << 0)        /* driver isn't removed even if
+                                                  all ->init() calls failed */
+#define CPUFREQ_CONST_LOOPS    (1 << 1)        /* loops_per_jiffy or other
+                                                  kernel "constants" aren't
+                                                  affected by frequency
+                                                  transitions */
+#define CPUFREQ_PM_NO_WARN     (1 << 2)        /* don't warn on suspend/resume
+                                                  speed mismatches */
+
+/*
+ * This should be set by platforms having multiple clock-domains, i.e.
+ * supporting multiple policies. With this sysfs directories of governor would
+ * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same
+ * governor with different tunables for different clusters.
+ */
+#define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3)
 
 int cpufreq_register_driver(struct cpufreq_driver *driver_data);
 int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
@@ -240,6 +250,13 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
        return;
 }
 
+static inline void
+cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
+{
+       cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+                       policy->cpuinfo.max_freq);
+}
+
 /*********************************************************************
  *                     CPUFREQ NOTIFIER INTERFACE                    *
  *********************************************************************/
@@ -392,6 +409,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
 
 int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
                                   struct cpufreq_frequency_table *table);
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy);
 
 int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
                                   struct cpufreq_frequency_table *table,
@@ -407,8 +425,20 @@ struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
 
 /* the following are really really optional */
 extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
+extern struct freq_attr *cpufreq_generic_attr[];
 void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
                                      unsigned int cpu);
 void cpufreq_frequency_table_put_attr(unsigned int cpu);
+int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
+                                     struct cpufreq_frequency_table *table);
+
+int cpufreq_generic_init(struct cpufreq_policy *policy,
+               struct cpufreq_frequency_table *table,
+               unsigned int transition_latency);
+static inline int cpufreq_generic_exit(struct cpufreq_policy *policy)
+{
+       cpufreq_frequency_table_put_attr(policy->cpu);
+       return 0;
+}
 
 #endif /* _LINUX_CPUFREQ_H */
index 59066e0b4ff134fde5679d582246e942df9f86fc..716c3760ee3970908577ccdb4ea32fa0275d7e33 100644 (file)
@@ -224,6 +224,7 @@ static inline int dname_external(const struct dentry *dentry)
 extern void d_instantiate(struct dentry *, struct inode *);
 extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
 extern struct dentry * d_materialise_unique(struct dentry *, struct inode *);
+extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
 extern void __d_drop(struct dentry *dentry);
 extern void d_drop(struct dentry *dentry);
 extern void d_delete(struct dentry *);
index 5f1ab92107e63b1889da7e544189f8b73cc740fb..7a7cc74d7f27e9901e59d2a61b850c0ec6acbe9e 100644 (file)
@@ -15,7 +15,7 @@
 
 #include <linux/device.h>
 #include <linux/notifier.h>
-#include <linux/opp.h>
+#include <linux/pm_opp.h>
 
 #define DEVFREQ_NAME_LEN 16
 
@@ -187,7 +187,7 @@ extern int devfreq_suspend_device(struct devfreq *devfreq);
 extern int devfreq_resume_device(struct devfreq *devfreq);
 
 /* Helper functions for devfreq user device driver with OPP. */
-extern struct opp *devfreq_recommended_opp(struct device *dev,
+extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
                                           unsigned long *freq, u32 flags);
 extern int devfreq_register_opp_notifier(struct device *dev,
                                         struct devfreq *devfreq);
@@ -238,7 +238,7 @@ static inline int devfreq_resume_device(struct devfreq *devfreq)
        return 0;
 }
 
-static inline struct opp *devfreq_recommended_opp(struct device *dev,
+static inline struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
                                           unsigned long *freq, u32 flags)
 {
        return ERR_PTR(-EINVAL);
index 3a8d0a2af6077b45acc0126f7f6c57a3661caf7f..fd4aee29ad10caa5bd8073a31c7827baa85d11ae 100644 (file)
@@ -97,6 +97,30 @@ static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
 }
 #endif
 
+/*
+ * Set both the DMA mask and the coherent DMA mask to the same thing.
+ * Note that we don't check the return value from dma_set_coherent_mask()
+ * as the DMA API guarantees that the coherent DMA mask can be set to
+ * the same or smaller than the streaming DMA mask.
+ */
+static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
+{
+       int rc = dma_set_mask(dev, mask);
+       if (rc == 0)
+               dma_set_coherent_mask(dev, mask);
+       return rc;
+}
+
+/*
+ * Similar to the above, except it deals with the case where the device
+ * does not have dev->dma_mask appropriately setup.
+ */
+static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
+{
+       dev->dma_mask = &dev->coherent_dma_mask;
+       return dma_set_mask_and_coherent(dev, mask);
+}
+
 extern u64 dma_get_required_mask(struct device *dev);
 
 static inline unsigned int dma_get_max_seg_size(struct device *dev)
@@ -129,6 +153,13 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
                return -EIO;
 }
 
+#ifndef dma_max_pfn
+static inline unsigned long dma_max_pfn(struct device *dev)
+{
+       return *dev->dma_mask >> PAGE_SHIFT;
+}
+#endif
+
 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
                                        dma_addr_t *dma_handle, gfp_t flag)
 {
index 0bc727534108d5a2d5d527e75eaa8020a3ccd239..4b460a683968670f5eaf836fde0ff16e88e39d9a 100644 (file)
@@ -45,13 +45,13 @@ static inline int dma_submit_error(dma_cookie_t cookie)
 
 /**
  * enum dma_status - DMA transaction status
- * @DMA_SUCCESS: transaction completed successfully
+ * @DMA_COMPLETE: transaction completed
  * @DMA_IN_PROGRESS: transaction not yet processed
  * @DMA_PAUSED: transaction is paused
  * @DMA_ERROR: transaction failed
  */
 enum dma_status {
-       DMA_SUCCESS,
+       DMA_COMPLETE,
        DMA_IN_PROGRESS,
        DMA_PAUSED,
        DMA_ERROR,
@@ -979,10 +979,10 @@ static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
 {
        if (last_complete <= last_used) {
                if ((cookie <= last_complete) || (cookie > last_used))
-                       return DMA_SUCCESS;
+                       return DMA_COMPLETE;
        } else {
                if ((cookie <= last_complete) && (cookie > last_used))
-                       return DMA_SUCCESS;
+                       return DMA_COMPLETE;
        }
        return DMA_IN_PROGRESS;
 }
@@ -1013,11 +1013,11 @@ static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_typ
 }
 static inline enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
 {
-       return DMA_SUCCESS;
+       return DMA_COMPLETE;
 }
 static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
 {
-       return DMA_SUCCESS;
+       return DMA_COMPLETE;
 }
 static inline void dma_issue_pending_all(void)
 {
index d8b512496e50c155e99712f5226e115159beabc1..fc4a9aa7dd82c7a26e69ac21cce27bdc399dc9b1 100644 (file)
 #include <asm/unaligned.h>
 
 #ifdef __KERNEL__
-extern __be16          eth_type_trans(struct sk_buff *skb, struct net_device *dev);
+__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
 extern const struct header_ops eth_header_ops;
 
-extern int eth_header(struct sk_buff *skb, struct net_device *dev,
-                     unsigned short type,
-                     const void *daddr, const void *saddr, unsigned len);
-extern int eth_rebuild_header(struct sk_buff *skb);
-extern int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
-extern int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
-extern void eth_header_cache_update(struct hh_cache *hh,
-                                   const struct net_device *dev,
-                                   const unsigned char *haddr);
-extern int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
-extern void eth_commit_mac_addr_change(struct net_device *dev, void *p);
-extern int eth_mac_addr(struct net_device *dev, void *p);
-extern int eth_change_mtu(struct net_device *dev, int new_mtu);
-extern int eth_validate_addr(struct net_device *dev);
-
-
-
-extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
+int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
+              const void *daddr, const void *saddr, unsigned len);
+int eth_rebuild_header(struct sk_buff *skb);
+int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
+int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
+                    __be16 type);
+void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
+                            const unsigned char *haddr);
+int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
+void eth_commit_mac_addr_change(struct net_device *dev, void *p);
+int eth_mac_addr(struct net_device *dev, void *p);
+int eth_change_mtu(struct net_device *dev, int new_mtu);
+int eth_validate_addr(struct net_device *dev);
+
+struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
                                            unsigned int rxqs);
 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
 #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
index e460ef8319841dd83d6f6f1eb8653dfd5452d6e0..5009fa16b5d8f08369ccdcb6537bdeb426598d0a 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/if_fc.h>
 
 #ifdef __KERNEL__
-extern struct net_device *alloc_fcdev(int sizeof_priv);
+struct net_device *alloc_fcdev(int sizeof_priv);
 #endif
 
 #endif /* _LINUX_FCDEVICE_H */
index 155bafd9e886607f5ee6b9bd824c2a1fa8ec6c32..9a79f0106da1a66ea0f7f647468fc3fd5c351108 100644 (file)
 #include <linux/if_fddi.h>
 
 #ifdef __KERNEL__
-extern __be16  fddi_type_trans(struct sk_buff *skb,
-                               struct net_device *dev);
-extern int fddi_change_mtu(struct net_device *dev, int new_mtu);
-extern struct net_device *alloc_fddidev(int sizeof_priv);
+__be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev);
+int fddi_change_mtu(struct net_device *dev, int new_mtu);
+struct net_device *alloc_fddidev(int sizeof_priv);
 #endif
 
 #endif /* _LINUX_FDDIDEVICE_H */
index a6ac84871d6d415eb0e671b45daeddacbc7ad4a3..ff4e40cd45b1dcb15c66f1e178b655df7c40c0eb 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <linux/atomic.h>
 #include <linux/compat.h>
+#include <linux/workqueue.h>
 #include <uapi/linux/filter.h>
 
 #ifdef CONFIG_COMPAT
@@ -25,15 +26,19 @@ struct sk_filter
 {
        atomic_t                refcnt;
        unsigned int            len;    /* Number of filter blocks */
+       struct rcu_head         rcu;
        unsigned int            (*bpf_func)(const struct sk_buff *skb,
                                            const struct sock_filter *filter);
-       struct rcu_head         rcu;
-       struct sock_filter      insns[0];
+       union {
+               struct sock_filter      insns[0];
+               struct work_struct      work;
+       };
 };
 
-static inline unsigned int sk_filter_len(const struct sk_filter *fp)
+static inline unsigned int sk_filter_size(unsigned int proglen)
 {
-       return fp->len * sizeof(struct sock_filter) + sizeof(*fp);
+       return max(sizeof(struct sk_filter),
+                  offsetof(struct sk_filter, insns[proglen]));
 }
 
 extern int sk_filter(struct sock *sk, struct sk_buff *skb);
@@ -67,11 +72,13 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
 }
 #define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns)
 #else
+#include <linux/slab.h>
 static inline void bpf_jit_compile(struct sk_filter *fp)
 {
 }
 static inline void bpf_jit_free(struct sk_filter *fp)
 {
+       kfree(fp);
 }
 #define SK_RUN_FILTER(FILTER, SKB) sk_run_filter(SKB, FILTER->insns)
 #endif
index 3f40547ba1917cd038f085bcdb6c6e577a9d4538..4c743ed2e46e5bb771911fb6b28a9400b7c47d5b 100644 (file)
@@ -182,8 +182,6 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
 #define READ                   0
 #define WRITE                  RW_MASK
 #define READA                  RWA_MASK
-#define KERNEL_READ            (READ|REQ_KERNEL)
-#define KERNEL_WRITE           (WRITE|REQ_KERNEL)
 
 #define READ_SYNC              (READ | REQ_SYNC)
 #define WRITE_SYNC             (WRITE | REQ_SYNC | REQ_NOIDLE)
@@ -291,25 +289,108 @@ struct address_space;
 struct writeback_control;
 
 struct iov_iter {
-       const struct iovec *iov;
+       struct iov_iter_ops *ops;
+       unsigned long data;
        unsigned long nr_segs;
        size_t iov_offset;
        size_t count;
 };
 
-size_t iov_iter_copy_from_user_atomic(struct page *page,
-               struct iov_iter *i, unsigned long offset, size_t bytes);
-size_t iov_iter_copy_from_user(struct page *page,
-               struct iov_iter *i, unsigned long offset, size_t bytes);
-void iov_iter_advance(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
-size_t iov_iter_single_seg_count(const struct iov_iter *i);
+struct iov_iter_ops {
+       size_t (*ii_copy_to_user_atomic)(struct page *, struct iov_iter *,
+                                        unsigned long, size_t);
+       size_t (*ii_copy_to_user)(struct page *, struct iov_iter *,
+                                 unsigned long, size_t, int);
+       size_t (*ii_copy_from_user_atomic)(struct page *, struct iov_iter *,
+                                          unsigned long, size_t);
+       size_t (*ii_copy_from_user)(struct page *, struct iov_iter *,
+                                         unsigned long, size_t);
+       void (*ii_advance)(struct iov_iter *, size_t);
+       int (*ii_fault_in_readable)(struct iov_iter *, size_t);
+       size_t (*ii_single_seg_count)(const struct iov_iter *);
+       int (*ii_shorten)(struct iov_iter *, size_t);
+};
+
+static inline size_t iov_iter_copy_to_user_atomic(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       return i->ops->ii_copy_to_user_atomic(page, i, offset, bytes);
+}
+static inline size_t __iov_iter_copy_to_user(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       return i->ops->ii_copy_to_user(page, i, offset, bytes, 0);
+}
+static inline size_t iov_iter_copy_to_user(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       return i->ops->ii_copy_to_user(page, i, offset, bytes, 1);
+}
+static inline size_t iov_iter_copy_from_user_atomic(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       return i->ops->ii_copy_from_user_atomic(page, i, offset, bytes);
+}
+static inline size_t iov_iter_copy_from_user(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       return i->ops->ii_copy_from_user(page, i, offset, bytes);
+}
+static inline void iov_iter_advance(struct iov_iter *i, size_t bytes)
+{
+       return i->ops->ii_advance(i, bytes);
+}
+static inline int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+{
+       return i->ops->ii_fault_in_readable(i, bytes);
+}
+static inline size_t iov_iter_single_seg_count(const struct iov_iter *i)
+{
+       return i->ops->ii_single_seg_count(i);
+}
+static inline int iov_iter_shorten(struct iov_iter *i, size_t count)
+{
+       return i->ops->ii_shorten(i, count);
+}
+
+#ifdef CONFIG_BLOCK
+extern struct iov_iter_ops ii_bvec_ops;
+
+struct bio_vec;
+static inline void iov_iter_init_bvec(struct iov_iter *i,
+                                     struct bio_vec *bvec,
+                                     unsigned long nr_segs,
+                                     size_t count, size_t written)
+{
+       i->ops = &ii_bvec_ops;
+       i->data = (unsigned long)bvec;
+       i->nr_segs = nr_segs;
+       i->iov_offset = 0;
+       i->count = count + written;
+
+       iov_iter_advance(i, written);
+}
+
+static inline int iov_iter_has_bvec(struct iov_iter *i)
+{
+       return i->ops == &ii_bvec_ops;
+}
+
+static inline struct bio_vec *iov_iter_bvec(struct iov_iter *i)
+{
+       BUG_ON(!iov_iter_has_bvec(i));
+       return (struct bio_vec *)i->data;
+}
+#endif
+
+extern struct iov_iter_ops ii_iovec_ops;
 
 static inline void iov_iter_init(struct iov_iter *i,
                        const struct iovec *iov, unsigned long nr_segs,
                        size_t count, size_t written)
 {
-       i->iov = iov;
+       i->ops = &ii_iovec_ops;
+       i->data = (unsigned long)iov;
        i->nr_segs = nr_segs;
        i->iov_offset = 0;
        i->count = count + written;
@@ -317,6 +398,17 @@ static inline void iov_iter_init(struct iov_iter *i,
        iov_iter_advance(i, written);
 }
 
+static inline int iov_iter_has_iovec(struct iov_iter *i)
+{
+       return i->ops == &ii_iovec_ops;
+}
+
+static inline struct iovec *iov_iter_iovec(struct iov_iter *i)
+{
+       BUG_ON(!iov_iter_has_iovec(i));
+       return (struct iovec *)i->data;
+}
+
 static inline size_t iov_iter_count(struct iov_iter *i)
 {
        return i->count;
@@ -369,8 +461,8 @@ struct address_space_operations {
        void (*invalidatepage) (struct page *, unsigned int, unsigned int);
        int (*releasepage) (struct page *, gfp_t);
        void (*freepage)(struct page *);
-       ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
-                       loff_t offset, unsigned long nr_segs);
+       ssize_t (*direct_IO)(int, struct kiocb *, struct iov_iter *iter,
+                       loff_t offset);
        int (*get_xip_mem)(struct address_space *, pgoff_t, int,
                                                void **, unsigned long *);
        /*
@@ -1529,7 +1621,9 @@ struct file_operations {
        ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
        ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+       ssize_t (*read_iter) (struct kiocb *, struct iov_iter *, loff_t);
        ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
+       ssize_t (*write_iter) (struct kiocb *, struct iov_iter *, loff_t);
        int (*iterate) (struct file *, struct dir_context *);
        unsigned int (*poll) (struct file *, struct poll_table_struct *);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
@@ -1554,6 +1648,18 @@ struct file_operations {
        int (*show_fdinfo)(struct seq_file *m, struct file *f);
 };
 
+static inline int file_readable(struct file *filp)
+{
+       return filp && (filp->f_op->read || filp->f_op->aio_read ||
+                       filp->f_op->read_iter);
+}
+
+static inline int file_writable(struct file *filp)
+{
+       return filp && (filp->f_op->write || filp->f_op->aio_write ||
+                       filp->f_op->write_iter);
+}
+
 struct inode_operations {
        struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
        void * (*follow_link) (struct dentry *, struct nameidata *);
@@ -2292,6 +2398,11 @@ static inline void allow_write_access(struct file *file)
        if (file)
                atomic_inc(&file_inode(file)->i_writecount);
 }
+static inline bool inode_is_open_for_write(const struct inode *inode)
+{
+       return atomic_read(&inode->i_writecount) > 0;
+}
+
 #ifdef CONFIG_IMA
 static inline void i_readcount_dec(struct inode *inode)
 {
@@ -2398,25 +2509,36 @@ extern int sb_min_blocksize(struct super_block *, int);
 extern int generic_file_mmap(struct file *, struct vm_area_struct *);
 extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
 extern int generic_file_remap_pages(struct vm_area_struct *, unsigned long addr,
-               unsigned long size, pgoff_t pgoff);
-extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
+                               unsigned long size, pgoff_t pgoff);
+extern int file_read_iter_actor(read_descriptor_t *desc, struct page *page,
+                               unsigned long offset, unsigned long size);
 int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
 extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *,
+               loff_t);
 extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long,
                loff_t *);
+extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *,
+               loff_t *);
 extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t);
+extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *,
+               loff_t);
 extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *,
                unsigned long *, loff_t, loff_t *, size_t, size_t);
+extern ssize_t generic_file_direct_write_iter(struct kiocb *, struct iov_iter *,
+               loff_t, loff_t *, size_t);
 extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *,
                unsigned long, loff_t, loff_t *, size_t, ssize_t);
+extern ssize_t generic_file_buffered_write_iter(struct kiocb *,
+               struct iov_iter *, loff_t, loff_t *, size_t, ssize_t);
 extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
 extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
 extern int generic_segment_checks(const struct iovec *iov,
                unsigned long *nr_segs, size_t *count, int access_flags);
 
 /* fs/block_dev.c */
-extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                               unsigned long nr_segs, loff_t pos);
+extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                                loff_t pos);
 extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
                        int datasync);
 extern void block_sync_page(struct page *page);
@@ -2473,16 +2595,16 @@ enum {
 void dio_end_io(struct bio *bio, int error);
 
 ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
-       struct block_device *bdev, const struct iovec *iov, loff_t offset,
-       unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
-       dio_submit_t submit_io, int flags);
+       struct block_device *bdev, struct iov_iter *iter, loff_t offset,
+       get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io,
+       int flags);
 
 static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
-               struct inode *inode, const struct iovec *iov, loff_t offset,
-               unsigned long nr_segs, get_block_t get_block)
+               struct inode *inode, struct iov_iter *iter, loff_t offset,
+               get_block_t get_block)
 {
-       return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
-                                   offset, nr_segs, get_block, NULL, NULL,
+       return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter,
+                                   offset, get_block, NULL, NULL,
                                    DIO_LOCKING | DIO_SKIP_HOLES);
 }
 #endif
@@ -2502,6 +2624,7 @@ extern int __page_symlink(struct inode *inode, const char *symname, int len,
                int nofs);
 extern int page_symlink(struct inode *inode, const char *symname, int len);
 extern const struct inode_operations page_symlink_inode_operations;
+extern void kfree_put_link(struct dentry *, struct nameidata *, void *);
 extern int generic_readlink(struct dentry *, char __user *, int);
 extern void generic_fillattr(struct inode *, struct kstat *);
 extern int vfs_getattr(struct path *, struct kstat *);
index 7823e9ef995e2beaaa2b8e7a26a3c9619ee2e370..771484993ca7c662e6dc07c115e421050459256a 100644 (file)
@@ -308,36 +308,6 @@ struct fscache_cache_ops {
        void (*dissociate_pages)(struct fscache_cache *cache);
 };
 
-/*
- * data file or index object cookie
- * - a file will only appear in one cache
- * - a request to cache a file may or may not be honoured, subject to
- *   constraints such as disk space
- * - indices are created on disk just-in-time
- */
-struct fscache_cookie {
-       atomic_t                        usage;          /* number of users of this cookie */
-       atomic_t                        n_children;     /* number of children of this cookie */
-       atomic_t                        n_active;       /* number of active users of netfs ptrs */
-       spinlock_t                      lock;
-       spinlock_t                      stores_lock;    /* lock on page store tree */
-       struct hlist_head               backing_objects; /* object(s) backing this file/index */
-       const struct fscache_cookie_def *def;           /* definition */
-       struct fscache_cookie           *parent;        /* parent of this entry */
-       void                            *netfs_data;    /* back pointer to netfs */
-       struct radix_tree_root          stores;         /* pages to be stored on this cookie */
-#define FSCACHE_COOKIE_PENDING_TAG     0               /* pages tag: pending write to cache */
-#define FSCACHE_COOKIE_STORING_TAG     1               /* pages tag: writing to cache */
-
-       unsigned long                   flags;
-#define FSCACHE_COOKIE_LOOKING_UP      0       /* T if non-index cookie being looked up still */
-#define FSCACHE_COOKIE_NO_DATA_YET     1       /* T if new object with no cached data yet */
-#define FSCACHE_COOKIE_UNAVAILABLE     2       /* T if cookie is unavailable (error, etc) */
-#define FSCACHE_COOKIE_INVALIDATING    3       /* T if cookie is being invalidated */
-#define FSCACHE_COOKIE_RELINQUISHED    4       /* T if cookie has been relinquished */
-#define FSCACHE_COOKIE_RETIRED         5       /* T if cookie was retired */
-};
-
 extern struct fscache_cookie fscache_fsdef_index;
 
 /*
@@ -400,6 +370,7 @@ struct fscache_object {
 #define FSCACHE_OBJECT_IS_LIVE         3       /* T if object is not withdrawn or relinquished */
 #define FSCACHE_OBJECT_IS_LOOKED_UP    4       /* T if object has been looked up */
 #define FSCACHE_OBJECT_IS_AVAILABLE    5       /* T if object has become active */
+#define FSCACHE_OBJECT_RETIRED         6       /* T if object was retired on relinquishment */
 
        struct list_head        cache_link;     /* link in cache->object_list */
        struct hlist_node       cookie_link;    /* link in cookie->backing_objects */
@@ -511,6 +482,11 @@ static inline void fscache_end_io(struct fscache_retrieval *op,
        op->end_io_func(page, op->context, error);
 }
 
+static inline void __fscache_use_cookie(struct fscache_cookie *cookie)
+{
+       atomic_inc(&cookie->n_active);
+}
+
 /**
  * fscache_use_cookie - Request usage of cookie attached to an object
  * @object: Object description
@@ -524,6 +500,16 @@ static inline bool fscache_use_cookie(struct fscache_object *object)
        return atomic_inc_not_zero(&cookie->n_active) != 0;
 }
 
+static inline bool __fscache_unuse_cookie(struct fscache_cookie *cookie)
+{
+       return atomic_dec_and_test(&cookie->n_active);
+}
+
+static inline void __fscache_wake_unused_cookie(struct fscache_cookie *cookie)
+{
+       wake_up_atomic_t(&cookie->n_active);
+}
+
 /**
  * fscache_unuse_cookie - Cease usage of cookie attached to an object
  * @object: Object description
@@ -534,8 +520,8 @@ static inline bool fscache_use_cookie(struct fscache_object *object)
 static inline void fscache_unuse_cookie(struct fscache_object *object)
 {
        struct fscache_cookie *cookie = object->cookie;
-       if (atomic_dec_and_test(&cookie->n_active))
-               wake_up_atomic_t(&cookie->n_active);
+       if (__fscache_unuse_cookie(cookie))
+               __fscache_wake_unused_cookie(cookie);
 }
 
 /*
index 19b46458e4e88e3e25e55692a00ace4987897441..115bb81912ccc8759a54d206487cc8936e2a90c7 100644 (file)
@@ -166,6 +166,42 @@ struct fscache_netfs {
        struct list_head                link;           /* internal link */
 };
 
+/*
+ * data file or index object cookie
+ * - a file will only appear in one cache
+ * - a request to cache a file may or may not be honoured, subject to
+ *   constraints such as disk space
+ * - indices are created on disk just-in-time
+ */
+struct fscache_cookie {
+       atomic_t                        usage;          /* number of users of this cookie */
+       atomic_t                        n_children;     /* number of children of this cookie */
+       atomic_t                        n_active;       /* number of active users of netfs ptrs */
+       spinlock_t                      lock;
+       spinlock_t                      stores_lock;    /* lock on page store tree */
+       struct hlist_head               backing_objects; /* object(s) backing this file/index */
+       const struct fscache_cookie_def *def;           /* definition */
+       struct fscache_cookie           *parent;        /* parent of this entry */
+       void                            *netfs_data;    /* back pointer to netfs */
+       struct radix_tree_root          stores;         /* pages to be stored on this cookie */
+#define FSCACHE_COOKIE_PENDING_TAG     0               /* pages tag: pending write to cache */
+#define FSCACHE_COOKIE_STORING_TAG     1               /* pages tag: writing to cache */
+
+       unsigned long                   flags;
+#define FSCACHE_COOKIE_LOOKING_UP      0       /* T if non-index cookie being looked up still */
+#define FSCACHE_COOKIE_NO_DATA_YET     1       /* T if new object with no cached data yet */
+#define FSCACHE_COOKIE_UNAVAILABLE     2       /* T if cookie is unavailable (error, etc) */
+#define FSCACHE_COOKIE_INVALIDATING    3       /* T if cookie is being invalidated */
+#define FSCACHE_COOKIE_RELINQUISHED    4       /* T if cookie has been relinquished */
+#define FSCACHE_COOKIE_ENABLED         5       /* T if cookie is enabled */
+#define FSCACHE_COOKIE_ENABLEMENT_LOCK 6       /* T if cookie is being en/disabled */
+};
+
+static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
+{
+       return test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
+}
+
 /*
  * slow-path functions for when there is actually caching available, and the
  * netfs does actually have a valid token
@@ -181,8 +217,8 @@ extern void __fscache_release_cache_tag(struct fscache_cache_tag *);
 extern struct fscache_cookie *__fscache_acquire_cookie(
        struct fscache_cookie *,
        const struct fscache_cookie_def *,
-       void *);
-extern void __fscache_relinquish_cookie(struct fscache_cookie *, int);
+       void *, bool);
+extern void __fscache_relinquish_cookie(struct fscache_cookie *, bool);
 extern int __fscache_check_consistency(struct fscache_cookie *);
 extern void __fscache_update_cookie(struct fscache_cookie *);
 extern int __fscache_attr_changed(struct fscache_cookie *);
@@ -211,6 +247,9 @@ extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *,
                                              struct inode *);
 extern void __fscache_readpages_cancel(struct fscache_cookie *cookie,
                                       struct list_head *pages);
+extern void __fscache_disable_cookie(struct fscache_cookie *, bool);
+extern void __fscache_enable_cookie(struct fscache_cookie *,
+                                   bool (*)(void *), void *);
 
 /**
  * fscache_register_netfs - Register a filesystem as desiring caching services
@@ -289,6 +328,7 @@ void fscache_release_cache_tag(struct fscache_cache_tag *tag)
  * @def: A description of the cache object, including callback operations
  * @netfs_data: An arbitrary piece of data to be kept in the cookie to
  * represent the cache object to the netfs
+ * @enable: Whether or not to enable a data cookie immediately
  *
  * This function is used to inform FS-Cache about part of an index hierarchy
  * that can be used to locate files.  This is done by requesting a cookie for
@@ -301,10 +341,12 @@ static inline
 struct fscache_cookie *fscache_acquire_cookie(
        struct fscache_cookie *parent,
        const struct fscache_cookie_def *def,
-       void *netfs_data)
+       void *netfs_data,
+       bool enable)
 {
-       if (fscache_cookie_valid(parent))
-               return __fscache_acquire_cookie(parent, def, netfs_data);
+       if (fscache_cookie_valid(parent) && fscache_cookie_enabled(parent))
+               return __fscache_acquire_cookie(parent, def, netfs_data,
+                                               enable);
        else
                return NULL;
 }
@@ -322,7 +364,7 @@ struct fscache_cookie *fscache_acquire_cookie(
  * description.
  */
 static inline
-void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
+void fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
 {
        if (fscache_cookie_valid(cookie))
                __fscache_relinquish_cookie(cookie, retire);
@@ -341,7 +383,7 @@ void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
 static inline
 int fscache_check_consistency(struct fscache_cookie *cookie)
 {
-       if (fscache_cookie_valid(cookie))
+       if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
                return __fscache_check_consistency(cookie);
        else
                return 0;
@@ -360,7 +402,7 @@ int fscache_check_consistency(struct fscache_cookie *cookie)
 static inline
 void fscache_update_cookie(struct fscache_cookie *cookie)
 {
-       if (fscache_cookie_valid(cookie))
+       if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
                __fscache_update_cookie(cookie);
 }
 
@@ -407,7 +449,7 @@ void fscache_unpin_cookie(struct fscache_cookie *cookie)
 static inline
 int fscache_attr_changed(struct fscache_cookie *cookie)
 {
-       if (fscache_cookie_valid(cookie))
+       if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
                return __fscache_attr_changed(cookie);
        else
                return -ENOBUFS;
@@ -429,7 +471,7 @@ int fscache_attr_changed(struct fscache_cookie *cookie)
 static inline
 void fscache_invalidate(struct fscache_cookie *cookie)
 {
-       if (fscache_cookie_valid(cookie))
+       if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
                __fscache_invalidate(cookie);
 }
 
@@ -503,7 +545,7 @@ int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
                               void *context,
                               gfp_t gfp)
 {
-       if (fscache_cookie_valid(cookie))
+       if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
                return __fscache_read_or_alloc_page(cookie, page, end_io_func,
                                                    context, gfp);
        else
@@ -554,7 +596,7 @@ int fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
                                void *context,
                                gfp_t gfp)
 {
-       if (fscache_cookie_valid(cookie))
+       if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
                return __fscache_read_or_alloc_pages(cookie, mapping, pages,
                                                     nr_pages, end_io_func,
                                                     context, gfp);
@@ -585,7 +627,7 @@ int fscache_alloc_page(struct fscache_cookie *cookie,
                       struct page *page,
                       gfp_t gfp)
 {
-       if (fscache_cookie_valid(cookie))
+       if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
                return __fscache_alloc_page(cookie, page, gfp);
        else
                return -ENOBUFS;
@@ -634,7 +676,7 @@ int fscache_write_page(struct fscache_cookie *cookie,
                       struct page *page,
                       gfp_t gfp)
 {
-       if (fscache_cookie_valid(cookie))
+       if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
                return __fscache_write_page(cookie, page, gfp);
        else
                return -ENOBUFS;
@@ -744,4 +786,47 @@ void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
                __fscache_uncache_all_inode_pages(cookie, inode);
 }
 
+/**
+ * fscache_disable_cookie - Disable a cookie
+ * @cookie: The cookie representing the cache object
+ * @invalidate: Invalidate the backing object
+ *
+ * Disable a cookie from accepting further alloc, read, write, invalidate,
+ * update or acquire operations.  Outstanding operations can still be waited
+ * upon and pages can still be uncached and the cookie relinquished.
+ *
+ * This will not return until all outstanding operations have completed.
+ *
+ * If @invalidate is set, then the backing object will be invalidated and
+ * detached, otherwise it will just be detached.
+ */
+static inline
+void fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
+{
+       if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
+               __fscache_disable_cookie(cookie, invalidate);
+}
+
+/**
+ * fscache_enable_cookie - Reenable a cookie
+ * @cookie: The cookie representing the cache object
+ * @can_enable: A function to permit enablement once lock is held
+ * @data: Data for can_enable()
+ *
+ * Reenable a previously disabled cookie, allowing it to accept further alloc,
+ * read, write, invalidate, update or acquire operations.  An attempt will be
+ * made to immediately reattach the cookie to a backing object.
+ *
+ * The can_enable() function is called (if not NULL) once the enablement lock
+ * is held to rule on whether enablement is still permitted to go ahead.
+ */
+static inline
+void fscache_enable_cookie(struct fscache_cookie *cookie,
+                          bool (*can_enable)(void *data),
+                          void *data)
+{
+       if (fscache_cookie_valid(cookie) && !fscache_cookie_enabled(cookie))
+               __fscache_enable_cookie(cookie, can_enable, data);
+}
+
 #endif /* _LINUX_FSCACHE_H */
index a9df51f5d54c7b01ff0fc8cac9aa0afe803e8727..519b6e2d769ede04d120f6ea3bc5f848e4978148 100644 (file)
@@ -173,6 +173,21 @@ static inline void hash_del_rcu(struct hlist_node *node)
        hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
                member)
 
+/**
+ * hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing
+ * to the same bucket in an rcu enabled hashtable in a rcu enabled hashtable
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ *
+ * This is the same as hash_for_each_possible_rcu() except that it does
+ * not do any RCU debugging or tracing.
+ */
+#define hash_for_each_possible_rcu_notrace(name, obj, member, key) \
+       hlist_for_each_entry_rcu_notrace(obj, \
+               &name[hash_min(key, HASH_BITS(name))], member)
+
 /**
  * hash_for_each_possible_safe - iterate over all possible objects hashing to the
  * same bucket safe against removals
index f148e49084106fca84ae61b2880420dc0bc2798d..8ec23fb0b412290c767ab444b564029911ba7786 100644 (file)
@@ -31,11 +31,11 @@ struct hippi_cb {
        __u32   ifield;
 };
 
-extern __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
-extern int hippi_change_mtu(struct net_device *dev, int new_mtu);
-extern int hippi_mac_addr(struct net_device *dev, void *p);
-extern int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
-extern struct net_device *alloc_hippi_dev(int sizeof_priv);
+__be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
+int hippi_change_mtu(struct net_device *dev, int new_mtu);
+int hippi_mac_addr(struct net_device *dev, void *p);
+int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
+struct net_device *alloc_hippi_dev(int sizeof_priv);
 #endif
 
 #endif /* _LINUX_HIPPIDEVICE_H */
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
new file mode 100644 (file)
index 0000000..1198002
--- /dev/null
@@ -0,0 +1,279 @@
+/*
+ * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __LINUX_HOST1X_H
+#define __LINUX_HOST1X_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+enum host1x_class {
+       HOST1X_CLASS_HOST1X = 0x1,
+       HOST1X_CLASS_GR2D = 0x51,
+       HOST1X_CLASS_GR2D_SB = 0x52,
+       HOST1X_CLASS_GR3D = 0x60,
+};
+
+struct host1x_client;
+
+struct host1x_client_ops {
+       int (*init)(struct host1x_client *client);
+       int (*exit)(struct host1x_client *client);
+};
+
+struct host1x_client {
+       struct list_head list;
+       struct device *parent;
+       struct device *dev;
+
+       const struct host1x_client_ops *ops;
+
+       enum host1x_class class;
+       struct host1x_channel *channel;
+
+       struct host1x_syncpt **syncpts;
+       unsigned int num_syncpts;
+};
+
+/*
+ * host1x buffer objects
+ */
+
+struct host1x_bo;
+struct sg_table;
+
+struct host1x_bo_ops {
+       struct host1x_bo *(*get)(struct host1x_bo *bo);
+       void (*put)(struct host1x_bo *bo);
+       dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
+       void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
+       void *(*mmap)(struct host1x_bo *bo);
+       void (*munmap)(struct host1x_bo *bo, void *addr);
+       void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
+       void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
+};
+
+struct host1x_bo {
+       const struct host1x_bo_ops *ops;
+};
+
+static inline void host1x_bo_init(struct host1x_bo *bo,
+                                 const struct host1x_bo_ops *ops)
+{
+       bo->ops = ops;
+}
+
+static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
+{
+       return bo->ops->get(bo);
+}
+
+static inline void host1x_bo_put(struct host1x_bo *bo)
+{
+       bo->ops->put(bo);
+}
+
+static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
+                                      struct sg_table **sgt)
+{
+       return bo->ops->pin(bo, sgt);
+}
+
+static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
+{
+       bo->ops->unpin(bo, sgt);
+}
+
+static inline void *host1x_bo_mmap(struct host1x_bo *bo)
+{
+       return bo->ops->mmap(bo);
+}
+
+static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
+{
+       bo->ops->munmap(bo, addr);
+}
+
+static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
+{
+       return bo->ops->kmap(bo, pagenum);
+}
+
+static inline void host1x_bo_kunmap(struct host1x_bo *bo,
+                                   unsigned int pagenum, void *addr)
+{
+       bo->ops->kunmap(bo, pagenum, addr);
+}
+
+/*
+ * host1x syncpoints
+ */
+
+struct host1x_syncpt;
+struct host1x;
+
+struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
+u32 host1x_syncpt_id(struct host1x_syncpt *sp);
+u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
+u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
+int host1x_syncpt_incr(struct host1x_syncpt *sp);
+int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
+                      u32 *value);
+struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
+                                           bool client_managed);
+void host1x_syncpt_free(struct host1x_syncpt *sp);
+
+/*
+ * host1x channel
+ */
+
+struct host1x_channel;
+struct host1x_job;
+
+struct host1x_channel *host1x_channel_request(struct device *dev);
+void host1x_channel_free(struct host1x_channel *channel);
+struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
+void host1x_channel_put(struct host1x_channel *channel);
+int host1x_job_submit(struct host1x_job *job);
+
+/*
+ * host1x job
+ */
+
+struct host1x_reloc {
+       struct host1x_bo *cmdbuf;
+       u32 cmdbuf_offset;
+       struct host1x_bo *target;
+       u32 target_offset;
+       u32 shift;
+       u32 pad;
+};
+
+struct host1x_job {
+       /* When refcount goes to zero, job can be freed */
+       struct kref ref;
+
+       /* List entry */
+       struct list_head list;
+
+       /* Channel where job is submitted to */
+       struct host1x_channel *channel;
+
+       u32 client;
+
+       /* Gathers and their memory */
+       struct host1x_job_gather *gathers;
+       unsigned int num_gathers;
+
+       /* Wait checks to be processed at submit time */
+       struct host1x_waitchk *waitchk;
+       unsigned int num_waitchk;
+       u32 waitchk_mask;
+
+       /* Array of handles to be pinned & unpinned */
+       struct host1x_reloc *relocarray;
+       unsigned int num_relocs;
+       struct host1x_job_unpin_data *unpins;
+       unsigned int num_unpins;
+
+       dma_addr_t *addr_phys;
+       dma_addr_t *gather_addr_phys;
+       dma_addr_t *reloc_addr_phys;
+
+       /* Sync point id, number of increments and end related to the submit */
+       u32 syncpt_id;
+       u32 syncpt_incrs;
+       u32 syncpt_end;
+
+       /* Maximum time to wait for this job */
+       unsigned int timeout;
+
+       /* Index and number of slots used in the push buffer */
+       unsigned int first_get;
+       unsigned int num_slots;
+
+       /* Copy of gathers */
+       size_t gather_copy_size;
+       dma_addr_t gather_copy;
+       u8 *gather_copy_mapped;
+
+       /* Check if register is marked as an address reg */
+       int (*is_addr_reg)(struct device *dev, u32 reg, u32 class);
+
+       /* Request a SETCLASS to this class */
+       u32 class;
+
+       /* Add a channel wait for previous ops to complete */
+       bool serialize;
+};
+
+struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
+                                   u32 num_cmdbufs, u32 num_relocs,
+                                   u32 num_waitchks);
+void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *mem_id,
+                          u32 words, u32 offset);
+struct host1x_job *host1x_job_get(struct host1x_job *job);
+void host1x_job_put(struct host1x_job *job);
+int host1x_job_pin(struct host1x_job *job, struct device *dev);
+void host1x_job_unpin(struct host1x_job *job);
+
+/*
+ * subdevice probe infrastructure
+ */
+
+struct host1x_device;
+
+struct host1x_driver {
+       const struct of_device_id *subdevs;
+       struct list_head list;
+       const char *name;
+
+       int (*probe)(struct host1x_device *device);
+       int (*remove)(struct host1x_device *device);
+};
+
+int host1x_driver_register(struct host1x_driver *driver);
+void host1x_driver_unregister(struct host1x_driver *driver);
+
+struct host1x_device {
+       struct host1x_driver *driver;
+       struct list_head list;
+       struct device dev;
+
+       struct mutex subdevs_lock;
+       struct list_head subdevs;
+       struct list_head active;
+
+       struct mutex clients_lock;
+       struct list_head clients;
+};
+
+static inline struct host1x_device *to_host1x_device(struct device *dev)
+{
+       return container_of(dev, struct host1x_device, dev);
+}
+
+int host1x_device_init(struct host1x_device *device);
+int host1x_device_exit(struct host1x_device *device);
+
+int host1x_client_register(struct host1x_client *client);
+int host1x_client_unregister(struct host1x_client *client);
+
+int tegra_mipi_calibrate(struct device *device);
+
+#endif
index f346e4d5381ca40ccf3bc19e1374c7fdf25fc77c..da0a680e2f6d759d44c3e6e770e60db1dd8b8bc6 100644 (file)
@@ -38,7 +38,7 @@ static inline int vid_to_reg(int val, u8 vrm)
                return ((val >= 1100) && (val <= 1850) ?
                        ((18499 - val * 10) / 25 + 5) / 10 : -1);
        default:
-               return -1;
+               return -EINVAL;
        }
 }
 
index b2514f70d591d799197cdb2c5050bd131641f925..09354f6c1d63de2b5cb9d5dd9547ffc03a145af2 100644 (file)
 #define _HWMON_H_
 
 struct device;
+struct attribute_group;
 
 struct device *hwmon_device_register(struct device *dev);
+struct device *
+hwmon_device_register_with_groups(struct device *dev, const char *name,
+                                 void *drvdata,
+                                 const struct attribute_group **groups);
+struct device *
+devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
+                                      void *drvdata,
+                                      const struct attribute_group **groups);
 
 void hwmon_device_unregister(struct device *dev);
+void devm_hwmon_device_unregister(struct device *dev);
 
 #endif
index 2ab11dc38077c89f7c60d767ed1096b909ab6039..eff50e062be850189ed9e78a9b6058a5eea49b74 100644 (file)
@@ -205,7 +205,6 @@ struct i2c_driver {
  * @name: Indicates the type of the device, usually a chip name that's
  *     generic enough to hide second-sourcing and compatible revisions.
  * @adapter: manages the bus segment hosting this I2C device
- * @driver: device's driver, hence pointer to access routines
  * @dev: Driver model device node for the slave.
  * @irq: indicates the IRQ generated by this device (if any)
  * @detected: member of an i2c_driver.clients list or i2c-core's
@@ -222,7 +221,6 @@ struct i2c_client {
                                        /* _LOWER_ 7 bits               */
        char name[I2C_NAME_SIZE];
        struct i2c_adapter *adapter;    /* the adapter we sit on        */
-       struct i2c_driver *driver;      /* and our access routines      */
        struct device dev;              /* the device structure         */
        int irq;                        /* irq issued by device         */
        struct list_head detected;
index a5b598a79becb99eb3554886cbec7708ee426309..7c1e1ebc0e2396cc7697bccc31d18be5566dc944 100644 (file)
@@ -1391,8 +1391,8 @@ struct ieee80211_vht_operation {
 #define IEEE80211_VHT_CAP_RXSTBC_MASK                          0x00000700
 #define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE                        0x00000800
 #define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE                        0x00001000
-#define IEEE80211_VHT_CAP_BEAMFORMER_ANTENNAS_MAX              0x00006000
-#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MAX              0x00030000
+#define IEEE80211_VHT_CAP_BEAMFORMEE_STS_MAX                   0x0000e000
+#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MAX              0x00070000
 #define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE                        0x00080000
 #define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE                        0x00100000
 #define IEEE80211_VHT_CAP_VHT_TXOP_PS                          0x00200000
index 79640e015a86c6eb4a8bab2c0d2ee78eaf241558..0d678aefe69df6155b683ce039a42e4b7c4a1cab 100644 (file)
@@ -147,25 +147,27 @@ struct in_ifaddr {
        unsigned long           ifa_tstamp; /* updated timestamp */
 };
 
-extern int register_inetaddr_notifier(struct notifier_block *nb);
-extern int unregister_inetaddr_notifier(struct notifier_block *nb);
+int register_inetaddr_notifier(struct notifier_block *nb);
+int unregister_inetaddr_notifier(struct notifier_block *nb);
 
-extern void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
-                                       struct ipv4_devconf *devconf);
+void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
+                                struct ipv4_devconf *devconf);
 
-extern struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref);
+struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref);
 static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
 {
        return __ip_dev_find(net, addr, true);
 }
 
-extern int             inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
-extern int             devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
-extern void            devinet_init(void);
-extern struct in_device        *inetdev_by_index(struct net *, int);
-extern __be32          inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
-extern __be32          inet_confirm_addr(struct in_device *in_dev, __be32 dst, __be32 local, int scope);
-extern struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, __be32 mask);
+int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
+int devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
+void devinet_init(void);
+struct in_device *inetdev_by_index(struct net *, int);
+__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
+__be32 inet_confirm_addr(struct in_device *in_dev, __be32 dst, __be32 local,
+                        int scope);
+struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
+                                   __be32 mask);
 
 static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
 {
@@ -218,7 +220,7 @@ static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
        return rtnl_dereference(dev->ip_ptr);
 }
 
-extern void in_dev_finish_destroy(struct in_device *idev);
+void in_dev_finish_destroy(struct in_device *idev);
 
 static inline void in_dev_put(struct in_device *idev)
 {
index 28ea38439313226861c5c6df902b7b92c3cb5969..a80a63cfb70c51cb911ff54293ac447d551c45e5 100644 (file)
@@ -115,16 +115,8 @@ static inline int inet6_iif(const struct sk_buff *skb)
        return IP6CB(skb)->iif;
 }
 
-struct inet6_request_sock {
-       struct in6_addr         loc_addr;
-       struct in6_addr         rmt_addr;
-       struct sk_buff          *pktopts;
-       int                     iif;
-};
-
 struct tcp6_request_sock {
        struct tcp_request_sock   tcp6rsk_tcp;
-       struct inet6_request_sock tcp6rsk_inet6;
 };
 
 struct ipv6_mc_socklist;
@@ -141,8 +133,6 @@ struct ipv6_fl_socklist;
  */
 struct ipv6_pinfo {
        struct in6_addr         saddr;
-       struct in6_addr         rcv_saddr;
-       struct in6_addr         daddr;
        struct in6_pktinfo      sticky_pktinfo;
        const struct in6_addr           *daddr_cache;
 #ifdef CONFIG_IPV6_SUBTREES
@@ -256,48 +246,22 @@ struct tcp6_sock {
 
 extern int inet6_sk_rebuild_header(struct sock *sk);
 
-struct inet6_timewait_sock {
-       struct in6_addr tw_v6_daddr;
-       struct in6_addr tw_v6_rcv_saddr;
-};
-
 struct tcp6_timewait_sock {
        struct tcp_timewait_sock   tcp6tw_tcp;
-       struct inet6_timewait_sock tcp6tw_inet6;
 };
 
-static inline struct inet6_timewait_sock *inet6_twsk(const struct sock *sk)
-{
-       return (struct inet6_timewait_sock *)(((u8 *)sk) +
-                                             inet_twsk(sk)->tw_ipv6_offset);
-}
-
 #if IS_ENABLED(CONFIG_IPV6)
 static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
 {
        return inet_sk(__sk)->pinet6;
 }
 
-static inline struct inet6_request_sock *
-                       inet6_rsk(const struct request_sock *rsk)
-{
-       return (struct inet6_request_sock *)(((u8 *)rsk) +
-                                            inet_rsk(rsk)->inet6_rsk_offset);
-}
-
-static inline u32 inet6_rsk_offset(struct request_sock *rsk)
-{
-       return rsk->rsk_ops->obj_size - sizeof(struct inet6_request_sock);
-}
-
 static inline struct request_sock *inet6_reqsk_alloc(struct request_sock_ops *ops)
 {
        struct request_sock *req = reqsk_alloc(ops);
 
-       if (req != NULL) {
-               inet_rsk(req)->inet6_rsk_offset = inet6_rsk_offset(req);
-               inet6_rsk(req)->pktopts = NULL;
-       }
+       if (req)
+               inet_rsk(req)->pktopts = NULL;
 
        return req;
 }
@@ -321,21 +285,11 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
 #define __ipv6_only_sock(sk)   (inet6_sk(sk)->ipv6only)
 #define ipv6_only_sock(sk)     ((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk))
 
-static inline u16 inet6_tw_offset(const struct proto *prot)
+static inline const struct in6_addr *inet6_rcv_saddr(const struct sock *sk)
 {
-       return prot->twsk_prot->twsk_obj_size -
-                       sizeof(struct inet6_timewait_sock);
-}
-
-static inline struct in6_addr *__inet6_rcv_saddr(const struct sock *sk)
-{
-       return likely(sk->sk_state != TCP_TIME_WAIT) ?
-               &inet6_sk(sk)->rcv_saddr : &inet6_twsk(sk)->tw_v6_rcv_saddr;
-}
-
-static inline struct in6_addr *inet6_rcv_saddr(const struct sock *sk)
-{
-       return sk->sk_family == AF_INET6 ? __inet6_rcv_saddr(sk) : NULL;
+       if (sk->sk_family == AF_INET6)
+               return &sk->sk_v6_rcv_saddr;
+       return NULL;
 }
 
 static inline int inet_v6_ipv6only(const struct sock *sk)
@@ -363,28 +317,18 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
        return NULL;
 }
 
-#define __inet6_rcv_saddr(__sk)        NULL
 #define inet6_rcv_saddr(__sk)  NULL
 #define tcp_twsk_ipv6only(__sk)                0
 #define inet_v6_ipv6only(__sk)         0
 #endif /* IS_ENABLED(CONFIG_IPV6) */
 
 #define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif)     \
-       ((inet_sk(__sk)->inet_portpair == (__ports))            &&      \
+       (((__sk)->sk_portpair == (__ports))                     &&      \
         ((__sk)->sk_family == AF_INET6)                        &&      \
-        ipv6_addr_equal(&inet6_sk(__sk)->daddr, (__saddr))     &&      \
-        ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) &&      \
+        ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr))               &&      \
+        ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr))   &&      \
         (!(__sk)->sk_bound_dev_if      ||                              \
           ((__sk)->sk_bound_dev_if == (__dif)))                &&      \
         net_eq(sock_net(__sk), (__net)))
 
-#define INET6_TW_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif)     \
-       ((inet_twsk(__sk)->tw_portpair == (__ports))                    && \
-        ((__sk)->sk_family == AF_INET6)                                && \
-        ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_daddr, (__saddr))     && \
-        ipv6_addr_equal(&inet6_twsk(__sk)->tw_v6_rcv_saddr, (__daddr)) && \
-        (!(__sk)->sk_bound_dev_if      ||                                 \
-         ((__sk)->sk_bound_dev_if == (__dif)))                         && \
-        net_eq(sock_net(__sk), (__net)))
-
 #endif /* _IPV6_H */
index 0e5d9ecdb2b672d901b47f184a4b720e604317e2..cac496b1e279293164066ea0204c87309169bd49 100644 (file)
@@ -31,6 +31,8 @@
 #define GIC_DIST_TARGET                        0x800
 #define GIC_DIST_CONFIG                        0xc00
 #define GIC_DIST_SOFTINT               0xf00
+#define GIC_DIST_SGI_PENDING_CLEAR     0xf10
+#define GIC_DIST_SGI_PENDING_SET       0xf20
 
 #define GICH_HCR                       0x0
 #define GICH_VTR                       0x4
@@ -74,6 +76,11 @@ static inline void gic_init(unsigned int nr, int start,
        gic_init_bases(nr, start, dist, cpu, 0, NULL);
 }
 
+void gic_send_sgi(unsigned int cpu_id, unsigned int irq);
+int gic_get_cpu_id(unsigned int cpu);
+void gic_migrate_target(unsigned int new_cpu_id);
+unsigned long gic_get_sgir_physaddr(void);
+
 #endif /* __ASSEMBLY */
 
 #endif
index a5079072da663e6b1e426504c3dfa39438e42127..e96be7245717f723486b8ad1bf6d2795dd4bed73 100644 (file)
 
 #include <linux/types.h>
 #include <linux/compiler.h>
+#include <linux/bug.h>
+
+extern bool static_key_initialized;
+
+#define STATIC_KEY_CHECK_USE() WARN(!static_key_initialized,                 \
+                                   "%s used before call to jump_label_init", \
+                                   __func__)
 
 #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
 
@@ -128,6 +135,7 @@ struct static_key {
 
 static __always_inline void jump_label_init(void)
 {
+       static_key_initialized = true;
 }
 
 static __always_inline bool static_key_false(struct static_key *key)
@@ -146,11 +154,13 @@ static __always_inline bool static_key_true(struct static_key *key)
 
 static inline void static_key_slow_inc(struct static_key *key)
 {
+       STATIC_KEY_CHECK_USE();
        atomic_inc(&key->enabled);
 }
 
 static inline void static_key_slow_dec(struct static_key *key)
 {
+       STATIC_KEY_CHECK_USE();
        atomic_dec(&key->enabled);
 }
 
index 113788389b3dc25c2de62c32268da828719c7d68..089f70f83e97c9a1adf1087f10d095a02bd3e152 100644 (file)
@@ -23,12 +23,14 @@ struct static_key_deferred {
 };
 static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
 {
+       STATIC_KEY_CHECK_USE();
        static_key_slow_dec(&key->key);
 }
 static inline void
 jump_label_rate_limit(struct static_key_deferred *key,
                unsigned long rl)
 {
+       STATIC_KEY_CHECK_USE();
 }
 #endif /* HAVE_JUMP_LABEL */
 #endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */
index f279ed9a91631cca7f236d20ef5a29d152332932..13dfd36a329474df228102dd49bafbdf1b5c687d 100644 (file)
@@ -36,4 +36,10 @@ extern int lockref_put_or_lock(struct lockref *);
 extern void lockref_mark_dead(struct lockref *);
 extern int lockref_get_not_dead(struct lockref *);
 
+/* Must be called under spinlock for reliable results */
+static inline int __lockref_is_dead(const struct lockref *l)
+{
+       return ((int)l->count < 0);
+}
+
 #endif /* __LINUX_LOCKREF_H */
index ecc82b37c4ccf00fb863ecf6981de19bbccec52a..b3e7a667e03c24ca5c3d1c54c0db5c7b0bdde052 100644 (file)
@@ -137,47 +137,24 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
 extern void mem_cgroup_replace_page_cache(struct page *oldpage,
                                        struct page *newpage);
 
-/**
- * mem_cgroup_toggle_oom - toggle the memcg OOM killer for the current task
- * @new: true to enable, false to disable
- *
- * Toggle whether a failed memcg charge should invoke the OOM killer
- * or just return -ENOMEM.  Returns the previous toggle state.
- *
- * NOTE: Any path that enables the OOM killer before charging must
- *       call mem_cgroup_oom_synchronize() afterward to finalize the
- *       OOM handling and clean up.
- */
-static inline bool mem_cgroup_toggle_oom(bool new)
+static inline void mem_cgroup_oom_enable(void)
 {
-       bool old;
-
-       old = current->memcg_oom.may_oom;
-       current->memcg_oom.may_oom = new;
-
-       return old;
+       WARN_ON(current->memcg_oom.may_oom);
+       current->memcg_oom.may_oom = 1;
 }
 
-static inline void mem_cgroup_enable_oom(void)
+static inline void mem_cgroup_oom_disable(void)
 {
-       bool old = mem_cgroup_toggle_oom(true);
-
-       WARN_ON(old == true);
-}
-
-static inline void mem_cgroup_disable_oom(void)
-{
-       bool old = mem_cgroup_toggle_oom(false);
-
-       WARN_ON(old == false);
+       WARN_ON(!current->memcg_oom.may_oom);
+       current->memcg_oom.may_oom = 0;
 }
 
 static inline bool task_in_memcg_oom(struct task_struct *p)
 {
-       return p->memcg_oom.in_memcg_oom;
+       return p->memcg_oom.memcg;
 }
 
-bool mem_cgroup_oom_synchronize(void);
+bool mem_cgroup_oom_synchronize(bool wait);
 
 #ifdef CONFIG_MEMCG_SWAP
 extern int do_swap_account;
@@ -402,16 +379,11 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page,
 {
 }
 
-static inline bool mem_cgroup_toggle_oom(bool new)
-{
-       return false;
-}
-
-static inline void mem_cgroup_enable_oom(void)
+static inline void mem_cgroup_oom_enable(void)
 {
 }
 
-static inline void mem_cgroup_disable_oom(void)
+static inline void mem_cgroup_oom_disable(void)
 {
 }
 
@@ -420,7 +392,7 @@ static inline bool task_in_memcg_oom(struct task_struct *p)
        return false;
 }
 
-static inline bool mem_cgroup_oom_synchronize(void)
+static inline bool mem_cgroup_oom_synchronize(bool wait)
 {
        return false;
 }
index 41ed59276c002fc77b598ed60fb5fc68501e72c6..67c17b5a6f449cc760a28a621daf5f5ef0529586 100644 (file)
@@ -41,6 +41,13 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx,
                unsigned int mode, unsigned int channel,
                u8 ato, bool atox, unsigned int *sample);
 
+#define MC13783_AUDIO_RX0      36
+#define MC13783_AUDIO_RX1      37
+#define MC13783_AUDIO_TX       38
+#define MC13783_SSI_NETWORK    39
+#define MC13783_AUDIO_CODEC    40
+#define MC13783_AUDIO_DAC      41
+
 #define MC13XXX_IRQ_ADCDONE    0
 #define MC13XXX_IRQ_ADCBISDONE 1
 #define MC13XXX_IRQ_TS         2
index b6bdcd66c07d2e758ba677f9c9c1796ada542bc8..e00e9f362fd50641e3dbf79722e06cfa9a752fb5 100644 (file)
 
 #define IMX6Q_GPR5_L2_CLK_STOP                 BIT(8)
 
+#define IMX6Q_GPR8_TX_SWING_LOW                        (0x7f << 25)
+#define IMX6Q_GPR8_TX_SWING_FULL               (0x7f << 18)
+#define IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB          (0x3f << 12)
+#define IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB                (0x3f << 6)
+#define IMX6Q_GPR8_TX_DEEMPH_GEN1              (0x3f << 0)
+
 #define IMX6Q_GPR9_TZASC2_BYP                  BIT(1)
 #define IMX6Q_GPR9_TZASC1_BYP                  BIT(0)
 
 #define IMX6Q_GPR12_ARMP_AHB_CLK_EN            BIT(26)
 #define IMX6Q_GPR12_ARMP_ATB_CLK_EN            BIT(25)
 #define IMX6Q_GPR12_ARMP_APB_CLK_EN            BIT(24)
+#define IMX6Q_GPR12_DEVICE_TYPE                        (0xf << 12)
 #define IMX6Q_GPR12_PCIE_CTL_2                 BIT(10)
+#define IMX6Q_GPR12_LOS_LEVEL                  (0x1f << 4)
 
 #define IMX6Q_GPR13_SDMA_STOP_REQ              BIT(30)
 #define IMX6Q_GPR13_CAN2_STOP_REQ              BIT(29)
index 09c2300ddb3723188636867fa9b5c21bddab47df..f7eaf2d60083a5c7d216456e631b1f2b56ea7892 100644 (file)
@@ -31,6 +31,7 @@
 #define I2O_MINOR              166
 #define MICROCODE_MINOR                184
 #define TUN_MINOR              200
+#define CUSE_MINOR             203
 #define MWAVE_MINOR            219     /* ACP/Mwave Modem */
 #define MPT_MINOR              220
 #define MPT2SAS_MINOR          221
@@ -45,6 +46,7 @@
 #define MAPPER_CTRL_MINOR      236
 #define LOOP_CTRL_MINOR                237
 #define VHOST_NET_MINOR                238
+#define UHID_MINOR             239
 #define MISC_DYNAMIC_MINOR     255
 
 struct device;
index cd1fdf75103b7bd26350490cffc7de8e054bc366..8df61bc5da00ff9d751727c42ee93b2c6ad7de9f 100644 (file)
@@ -154,10 +154,6 @@ enum {
        MLX4_CMD_QUERY_IF_STAT   = 0X54,
        MLX4_CMD_SET_IF_STAT     = 0X55,
 
-       /* set port opcode modifiers */
-       MLX4_SET_PORT_PRIO2TC = 0x8,
-       MLX4_SET_PORT_SCHEDULER  = 0x9,
-
        /* register/delete flow steering network rules */
        MLX4_QP_FLOW_STEERING_ATTACH = 0x65,
        MLX4_QP_FLOW_STEERING_DETACH = 0x66,
@@ -182,6 +178,8 @@ enum {
        MLX4_SET_PORT_VLAN_TABLE = 0x3,
        MLX4_SET_PORT_PRIO_MAP  = 0x4,
        MLX4_SET_PORT_GID_TABLE = 0x5,
+       MLX4_SET_PORT_PRIO2TC   = 0x8,
+       MLX4_SET_PORT_SCHEDULER = 0x9,
 };
 
 enum {
index 24ce6bdd540ef65f1a8560ae6f089a0df1e5aeb1..9ad0c18495ad059e57ad77bef5a9579bb581a066 100644 (file)
@@ -155,7 +155,7 @@ enum {
        MLX4_DEV_CAP_FLAG2_RSS_TOP              = 1LL <<  1,
        MLX4_DEV_CAP_FLAG2_RSS_XOR              = 1LL <<  2,
        MLX4_DEV_CAP_FLAG2_FS_EN                = 1LL <<  3,
-       MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN     = 1LL <<  4,
+       MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN      = 1LL <<  4,
        MLX4_DEV_CAP_FLAG2_TS                   = 1LL <<  5,
        MLX4_DEV_CAP_FLAG2_VLAN_CONTROL         = 1LL <<  6,
        MLX4_DEV_CAP_FLAG2_FSM                  = 1LL <<  7,
index 68029b30c3dc89e2d2620fd41101b2ab05069344..5eb4e31af22b8e05356dfef793c1b96497af8cb3 100644 (file)
@@ -181,7 +181,7 @@ enum {
        MLX5_DEV_CAP_FLAG_TLP_HINTS     = 1LL << 39,
        MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
        MLX5_DEV_CAP_FLAG_DCT           = 1LL << 41,
-       MLX5_DEV_CAP_FLAG_CMDIF_CSUM    = 1LL << 46,
+       MLX5_DEV_CAP_FLAG_CMDIF_CSUM    = 3LL << 46,
 };
 
 enum {
@@ -417,7 +417,7 @@ struct mlx5_init_seg {
        struct health_buffer    health;
        __be32                  rsvd2[884];
        __be32                  health_counter;
-       __be32                  rsvd3[1023];
+       __be32                  rsvd3[1019];
        __be64                  ieee1588_clk;
        __be32                  ieee1588_clk_type;
        __be32                  clr_intx;
index 8888381fc150b8f3f852077407ee8187a54cb7aa..6b8c496572c841d8ec8be70740557f1caf50b79a 100644 (file)
@@ -82,7 +82,7 @@ enum {
 };
 
 enum {
-       MLX5_MAX_EQ_NAME        = 20
+       MLX5_MAX_EQ_NAME        = 32
 };
 
 enum {
@@ -747,8 +747,7 @@ static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
 
 enum {
        MLX5_PROF_MASK_QP_SIZE          = (u64)1 << 0,
-       MLX5_PROF_MASK_CMDIF_CSUM       = (u64)1 << 1,
-       MLX5_PROF_MASK_MR_CACHE         = (u64)1 << 2,
+       MLX5_PROF_MASK_MR_CACHE         = (u64)1 << 1,
 };
 
 enum {
@@ -758,7 +757,6 @@ enum {
 struct mlx5_profile {
        u64     mask;
        u32     log_max_qp;
-       int     cmdif_csum;
        struct {
                int     size;
                int     limit;
index 8b6e55ee885576d3dc3613a23a990cd20da4bfed..1a0668e5a4eef0377b708b9aded0e11f8da67a80 100644 (file)
@@ -297,12 +297,26 @@ static inline int put_page_testzero(struct page *page)
 /*
  * Try to grab a ref unless the page has a refcount of zero, return false if
  * that is the case.
+ * This can be called when MMU is off so it must not access
+ * any of the virtual mappings.
  */
 static inline int get_page_unless_zero(struct page *page)
 {
        return atomic_inc_not_zero(&page->_count);
 }
 
+/*
+ * Try to drop a ref unless the page has a refcount of one, return false if
+ * that is the case.
+ * This is to make sure that the refcount won't become zero after this drop.
+ * This can be called when MMU is off so it must not access
+ * any of the virtual mappings.
+ */
+static inline int put_page_unless_one(struct page *page)
+{
+       return atomic_add_unless(&page->_count, -1, 1);
+}
+
 extern int page_is_ram(unsigned long pfn);
 
 /* Support for virtually mapped pages */
index 05f2447f8c15ba5c13b2ec329baf6feaea0b031e..15cd6b1b211e731c5c133dabd0d26e890a56e135 100644 (file)
@@ -367,9 +367,6 @@ struct module
        /* What modules do I depend on? */
        struct list_head target_list;
 
-       /* Who is waiting for us to be unloaded */
-       struct task_struct *waiter;
-
        /* Destruction function. */
        void (*exit)(void);
 
index 4b02512e421c6601db2cd1902094193a7aa7b647..5f487d77641174626c14c0857d2cad96aea92ffd 100644 (file)
@@ -365,7 +365,7 @@ static inline map_word map_word_load_partial(struct map_info *map, map_word orig
                        bitpos = (map_bankwidth(map)-1-i)*8;
 #endif
                        orig.x[0] &= ~(0xff << bitpos);
-                       orig.x[0] |= buf[i-start] << bitpos;
+                       orig.x[0] |= (unsigned long)buf[i-start] << bitpos;
                }
        }
        return orig;
@@ -384,7 +384,7 @@ static inline map_word map_word_ff(struct map_info *map)
 
        if (map_bankwidth(map) < MAP_FF_LIMIT) {
                int bw = 8 * map_bankwidth(map);
-               r.x[0] = (1 << bw) - 1;
+               r.x[0] = (1UL << bw) - 1;
        } else {
                for (i=0; i<map_words(map); i++)
                        r.x[i] = ~0UL;
index f9bfe526d3102175ab3d32149966f61aab35da62..8cc0e2fb68941f5169593076ad50411bb8939c85 100644 (file)
@@ -29,9 +29,6 @@
 
 #include <asm/div64.h>
 
-#define MTD_CHAR_MAJOR 90
-#define MTD_BLOCK_MAJOR 31
-
 #define MTD_ERASE_PENDING      0x01
 #define MTD_ERASING            0x02
 #define MTD_ERASE_SUSPEND      0x04
@@ -354,6 +351,11 @@ static inline int mtd_has_oob(const struct mtd_info *mtd)
        return mtd->_read_oob && mtd->_write_oob;
 }
 
+static inline int mtd_type_is_nand(const struct mtd_info *mtd)
+{
+       return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH;
+}
+
 static inline int mtd_can_have_bb(const struct mtd_info *mtd)
 {
        return !!mtd->_block_isbad;
index ac8e89d5a7929b6bdd40c424928a7d0d9872a73e..9e6c8f9f306e016923a0c32d3ae0e2d9f0ce0d1f 100644 (file)
@@ -198,6 +198,7 @@ typedef enum {
 /* Cell info constants */
 #define NAND_CI_CHIPNR_MSK     0x03
 #define NAND_CI_CELLTYPE_MSK   0x0C
+#define NAND_CI_CELLTYPE_SHIFT 2
 
 /* Keep gcc happy */
 struct nand_chip;
@@ -477,7 +478,7 @@ struct nand_buffers {
  * @badblockbits:      [INTERN] minimum number of set bits in a good block's
  *                     bad block marker position; i.e., BBM == 11110111b is
  *                     not bad when badblockbits == 7
- * @cellinfo:          [INTERN] MLC/multichip data from chip ident
+ * @bits_per_cell:     [INTERN] number of bits per cell. i.e., 1 means SLC.
  * @ecc_strength_ds:   [INTERN] ECC correctability from the datasheet.
  *                     Minimum amount of bit errors per @ecc_step_ds guaranteed
  *                     to be correctable. If unknown, set to zero.
@@ -498,7 +499,6 @@ struct nand_buffers {
  *                     supported, 0 otherwise.
  * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand
  * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand
- * @ecclayout:         [REPLACEABLE] the default ECC placement scheme
  * @bbt:               [INTERN] bad block table pointer
  * @bbt_td:            [REPLACEABLE] bad block table descriptor for flash
  *                     lookup.
@@ -559,7 +559,7 @@ struct nand_chip {
        int pagebuf;
        unsigned int pagebuf_bitflips;
        int subpagesize;
-       uint8_t cellinfo;
+       uint8_t bits_per_cell;
        uint16_t ecc_strength_ds;
        uint16_t ecc_step_ds;
        int badblockpos;
@@ -572,7 +572,6 @@ struct nand_chip {
 
        uint8_t *oob_poi;
        struct nand_hw_control *controller;
-       struct nand_ecclayout *ecclayout;
 
        struct nand_ecc_ctrl ecc;
        struct nand_buffers *buffers;
@@ -797,4 +796,13 @@ static inline int onfi_get_sync_timing_mode(struct nand_chip *chip)
        return le16_to_cpu(chip->onfi_params.src_sync_timing_mode);
 }
 
+/*
+ * Check if it is a SLC nand.
+ * The !nand_is_slc() can be used to check the MLC/TLC nand chips.
+ * We do not distinguish the MLC and TLC now.
+ */
+static inline bool nand_is_slc(struct nand_chip *chip)
+{
+       return chip->bits_per_cell == 1;
+}
 #endif /* __LINUX_MTD_NAND_H */
index 4f27575ce1d67ebe74d1bba4f40a70f6edbdea11..aca446b46754fe681dcb0fb99028ac62c90a68fe 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/fcntl.h>       /* For O_CLOEXEC and O_NONBLOCK */
 #include <linux/kmemcheck.h>
 #include <linux/rcupdate.h>
+#include <linux/jump_label.h>
 #include <uapi/linux/net.h>
 
 struct poll_table_struct;
@@ -195,27 +196,23 @@ enum {
        SOCK_WAKE_URG,
 };
 
-extern int          sock_wake_async(struct socket *sk, int how, int band);
-extern int          sock_register(const struct net_proto_family *fam);
-extern void         sock_unregister(int family);
-extern int          __sock_create(struct net *net, int family, int type, int proto,
-                                struct socket **res, int kern);
-extern int          sock_create(int family, int type, int proto,
-                                struct socket **res);
-extern int          sock_create_kern(int family, int type, int proto,
-                                     struct socket **res);
-extern int          sock_create_lite(int family, int type, int proto,
-                                     struct socket **res); 
-extern void         sock_release(struct socket *sock);
-extern int          sock_sendmsg(struct socket *sock, struct msghdr *msg,
-                                 size_t len);
-extern int          sock_recvmsg(struct socket *sock, struct msghdr *msg,
-                                 size_t size, int flags);
-extern struct file  *sock_alloc_file(struct socket *sock, int flags, const char *dname);
-extern struct socket *sockfd_lookup(int fd, int *err);
-extern struct socket *sock_from_file(struct file *file, int *err);
+int sock_wake_async(struct socket *sk, int how, int band);
+int sock_register(const struct net_proto_family *fam);
+void sock_unregister(int family);
+int __sock_create(struct net *net, int family, int type, int proto,
+                 struct socket **res, int kern);
+int sock_create(int family, int type, int proto, struct socket **res);
+int sock_create_kern(int family, int type, int proto, struct socket **res);
+int sock_create_lite(int family, int type, int proto, struct socket **res);
+void sock_release(struct socket *sock);
+int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t len);
+int sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+                int flags);
+struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
+struct socket *sockfd_lookup(int fd, int *err);
+struct socket *sock_from_file(struct file *file, int *err);
 #define                     sockfd_put(sock) fput(sock->file)
-extern int          net_ratelimit(void);
+int net_ratelimit(void);
 
 #define net_ratelimited_function(function, ...)                        \
 do {                                                           \
@@ -243,32 +240,53 @@ do {                                                              \
 #define net_random()           prandom_u32()
 #define net_srandom(seed)      prandom_seed((__force u32)(seed))
 
-extern int          kernel_sendmsg(struct socket *sock, struct msghdr *msg,
-                                   struct kvec *vec, size_t num, size_t len);
-extern int          kernel_recvmsg(struct socket *sock, struct msghdr *msg,
-                                   struct kvec *vec, size_t num,
-                                   size_t len, int flags);
-
-extern int kernel_bind(struct socket *sock, struct sockaddr *addr,
-                      int addrlen);
-extern int kernel_listen(struct socket *sock, int backlog);
-extern int kernel_accept(struct socket *sock, struct socket **newsock,
-                        int flags);
-extern int kernel_connect(struct socket *sock, struct sockaddr *addr,
-                         int addrlen, int flags);
-extern int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
-                             int *addrlen);
-extern int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
-                             int *addrlen);
-extern int kernel_getsockopt(struct socket *sock, int level, int optname,
-                            char *optval, int *optlen);
-extern int kernel_setsockopt(struct socket *sock, int level, int optname,
-                            char *optval, unsigned int optlen);
-extern int kernel_sendpage(struct socket *sock, struct page *page, int offset,
-                          size_t size, int flags);
-extern int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
-extern int kernel_sock_shutdown(struct socket *sock,
-                               enum sock_shutdown_cmd how);
+bool __net_get_random_once(void *buf, int nbytes, bool *done,
+                          struct static_key *done_key);
+
+#ifdef HAVE_JUMP_LABEL
+#define ___NET_RANDOM_STATIC_KEY_INIT ((struct static_key) \
+               { .enabled = ATOMIC_INIT(0), .entries = (void *)1 })
+#else /* !HAVE_JUMP_LABEL */
+#define ___NET_RANDOM_STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
+#endif /* HAVE_JUMP_LABEL */
+
+/* BE CAREFUL: this function is not interrupt safe */
+#define net_get_random_once(buf, nbytes)                               \
+       ({                                                              \
+               bool ___ret = false;                                    \
+               static bool ___done = false;                            \
+               static struct static_key ___done_key =                  \
+                       ___NET_RANDOM_STATIC_KEY_INIT;                  \
+               if (!static_key_true(&___done_key))                     \
+                       ___ret = __net_get_random_once(buf,             \
+                                                      nbytes,          \
+                                                      &___done,        \
+                                                      &___done_key);   \
+               ___ret;                                                 \
+       })
+
+int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
+                  size_t num, size_t len);
+int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
+                  size_t num, size_t len, int flags);
+
+int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen);
+int kernel_listen(struct socket *sock, int backlog);
+int kernel_accept(struct socket *sock, struct socket **newsock, int flags);
+int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
+                  int flags);
+int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
+                      int *addrlen);
+int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
+                      int *addrlen);
+int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval,
+                     int *optlen);
+int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval,
+                     unsigned int optlen);
+int kernel_sendpage(struct socket *sock, struct page *page, int offset,
+                   size_t size, int flags);
+int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
+int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
 
 #define MODULE_ALIAS_NETPROTO(proto) \
        MODULE_ALIAS("net-pf-" __stringify(proto))
index a2a89a5c7be55b15baec6271a8cc4329b6f642bd..b05a4b501ab50f54fbd5d30e54a8086fcb964392 100644 (file)
@@ -42,6 +42,8 @@ enum {
        NETIF_F_TSO6_BIT,               /* ... TCPv6 segmentation */
        NETIF_F_FSO_BIT,                /* ... FCoE segmentation */
        NETIF_F_GSO_GRE_BIT,            /* ... GRE with TSO */
+       NETIF_F_GSO_IPIP_BIT,           /* ... IPIP tunnel with TSO */
+       NETIF_F_GSO_SIT_BIT,            /* ... SIT tunnel with TSO */
        NETIF_F_GSO_UDP_TUNNEL_BIT,     /* ... UDP TUNNEL with TSO */
        NETIF_F_GSO_MPLS_BIT,           /* ... MPLS segmentation */
        /**/NETIF_F_GSO_LAST =          /* last bit, see GSO_MASK */
@@ -107,6 +109,8 @@ enum {
 #define NETIF_F_RXFCS          __NETIF_F(RXFCS)
 #define NETIF_F_RXALL          __NETIF_F(RXALL)
 #define NETIF_F_GSO_GRE                __NETIF_F(GSO_GRE)
+#define NETIF_F_GSO_IPIP       __NETIF_F(GSO_IPIP)
+#define NETIF_F_GSO_SIT                __NETIF_F(GSO_SIT)
 #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
 #define NETIF_F_GSO_MPLS       __NETIF_F(GSO_MPLS)
 #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
index 3de49aca451970a738b5ae55cfd74656248ac9ea..27f62f746621a8758b7beb60b8b530604aa0fb74 100644 (file)
@@ -60,8 +60,8 @@ struct wireless_dev;
 #define SET_ETHTOOL_OPS(netdev,ops) \
        ( (netdev)->ethtool_ops = (ops) )
 
-extern void netdev_set_default_ethtool_ops(struct net_device *dev,
-                                          const struct ethtool_ops *ops);
+void netdev_set_default_ethtool_ops(struct net_device *dev,
+                                   const struct ethtool_ops *ops);
 
 /* hardware address assignment types */
 #define NET_ADDR_PERM          0       /* address is permanent (default) */
@@ -298,7 +298,7 @@ struct netdev_boot_setup {
 };
 #define NETDEV_BOOT_SETUP_MAX 8
 
-extern int __init netdev_boot_setup(char *str);
+int __init netdev_boot_setup(char *str);
 
 /*
  * Structure for NAPI scheduling similar to tasklet but with weighting
@@ -394,7 +394,7 @@ enum rx_handler_result {
 typedef enum rx_handler_result rx_handler_result_t;
 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
 
-extern void __napi_schedule(struct napi_struct *n);
+void __napi_schedule(struct napi_struct *n);
 
 static inline bool napi_disable_pending(struct napi_struct *n)
 {
@@ -445,8 +445,8 @@ static inline bool napi_reschedule(struct napi_struct *napi)
  *
  * Mark NAPI processing as complete.
  */
-extern void __napi_complete(struct napi_struct *n);
-extern void napi_complete(struct napi_struct *n);
+void __napi_complete(struct napi_struct *n);
+void napi_complete(struct napi_struct *n);
 
 /**
  *     napi_by_id - lookup a NAPI by napi_id
@@ -455,7 +455,7 @@ extern void napi_complete(struct napi_struct *n);
  * lookup @napi_id in napi_hash table
  * must be called under rcu_read_lock()
  */
-extern struct napi_struct *napi_by_id(unsigned int napi_id);
+struct napi_struct *napi_by_id(unsigned int napi_id);
 
 /**
  *     napi_hash_add - add a NAPI to global hashtable
@@ -463,7 +463,7 @@ extern struct napi_struct *napi_by_id(unsigned int napi_id);
  *
  * generate a new napi_id and store a @napi under it in napi_hash
  */
-extern void napi_hash_add(struct napi_struct *napi);
+void napi_hash_add(struct napi_struct *napi);
 
 /**
  *     napi_hash_del - remove a NAPI from global table
@@ -472,7 +472,7 @@ extern void napi_hash_add(struct napi_struct *napi);
  * Warning: caller must observe rcu grace period
  * before freeing memory containing @napi
  */
-extern void napi_hash_del(struct napi_struct *napi);
+void napi_hash_del(struct napi_struct *napi);
 
 /**
  *     napi_disable - prevent NAPI from scheduling
@@ -664,8 +664,8 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
 
 #ifdef CONFIG_RFS_ACCEL
-extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
-                               u32 flow_id, u16 filter_id);
+bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
+                        u16 filter_id);
 #endif
 
 /* This structure contains an instance of an RX queue. */
@@ -1143,8 +1143,19 @@ struct net_device {
        struct list_head        dev_list;
        struct list_head        napi_list;
        struct list_head        unreg_list;
-       struct list_head        upper_dev_list; /* List of upper devices */
-       struct list_head        lower_dev_list;
+       struct list_head        close_list;
+
+       /* directly linked devices, like slaves for bonding */
+       struct {
+               struct list_head upper;
+               struct list_head lower;
+       } adj_list;
+
+       /* all linked devices, *including* neighbours */
+       struct {
+               struct list_head upper;
+               struct list_head lower;
+       } all_adj_list;
 
 
        /* currently active device features */
@@ -1487,9 +1498,9 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
                f(dev, &dev->_tx[i], arg);
 }
 
-extern struct netdev_queue *netdev_pick_tx(struct net_device *dev,
-                                          struct sk_buff *skb);
-extern u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
+struct netdev_queue *netdev_pick_tx(struct net_device *dev,
+                                   struct sk_buff *skb);
+u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
 
 /*
  * Net namespace inlines
@@ -1673,8 +1684,8 @@ struct packet_offload {
 #define NETDEV_CHANGEUPPER     0x0015
 #define NETDEV_RESEND_IGMP     0x0016
 
-extern int register_netdevice_notifier(struct notifier_block *nb);
-extern int unregister_netdevice_notifier(struct notifier_block *nb);
+int register_netdevice_notifier(struct notifier_block *nb);
+int unregister_netdevice_notifier(struct notifier_block *nb);
 
 struct netdev_notifier_info {
        struct net_device *dev;
@@ -1697,9 +1708,9 @@ netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
        return info->dev;
 }
 
-extern int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
-                                        struct netdev_notifier_info *info);
-extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
+int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
+                                 struct netdev_notifier_info *info);
+int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
 
 
 extern rwlock_t                                dev_base_lock;          /* Device list lock */
@@ -1754,54 +1765,52 @@ static inline struct net_device *first_net_device_rcu(struct net *net)
        return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
 }
 
-extern int                     netdev_boot_setup_check(struct net_device *dev);
-extern unsigned long           netdev_boot_base(const char *prefix, int unit);
-extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
-                                             const char *hwaddr);
-extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
-extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
-extern void            dev_add_pack(struct packet_type *pt);
-extern void            dev_remove_pack(struct packet_type *pt);
-extern void            __dev_remove_pack(struct packet_type *pt);
-extern void            dev_add_offload(struct packet_offload *po);
-extern void            dev_remove_offload(struct packet_offload *po);
-extern void            __dev_remove_offload(struct packet_offload *po);
-
-extern struct net_device       *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
-                                                     unsigned short mask);
-extern struct net_device       *dev_get_by_name(struct net *net, const char *name);
-extern struct net_device       *dev_get_by_name_rcu(struct net *net, const char *name);
-extern struct net_device       *__dev_get_by_name(struct net *net, const char *name);
-extern int             dev_alloc_name(struct net_device *dev, const char *name);
-extern int             dev_open(struct net_device *dev);
-extern int             dev_close(struct net_device *dev);
-extern void            dev_disable_lro(struct net_device *dev);
-extern int             dev_loopback_xmit(struct sk_buff *newskb);
-extern int             dev_queue_xmit(struct sk_buff *skb);
-extern int             register_netdevice(struct net_device *dev);
-extern void            unregister_netdevice_queue(struct net_device *dev,
-                                                  struct list_head *head);
-extern void            unregister_netdevice_many(struct list_head *head);
+int netdev_boot_setup_check(struct net_device *dev);
+unsigned long netdev_boot_base(const char *prefix, int unit);
+struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
+                                      const char *hwaddr);
+struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
+struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
+void dev_add_pack(struct packet_type *pt);
+void dev_remove_pack(struct packet_type *pt);
+void __dev_remove_pack(struct packet_type *pt);
+void dev_add_offload(struct packet_offload *po);
+void dev_remove_offload(struct packet_offload *po);
+void __dev_remove_offload(struct packet_offload *po);
+
+struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
+                                       unsigned short mask);
+struct net_device *dev_get_by_name(struct net *net, const char *name);
+struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
+struct net_device *__dev_get_by_name(struct net *net, const char *name);
+int dev_alloc_name(struct net_device *dev, const char *name);
+int dev_open(struct net_device *dev);
+int dev_close(struct net_device *dev);
+void dev_disable_lro(struct net_device *dev);
+int dev_loopback_xmit(struct sk_buff *newskb);
+int dev_queue_xmit(struct sk_buff *skb);
+int register_netdevice(struct net_device *dev);
+void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
+void unregister_netdevice_many(struct list_head *head);
 static inline void unregister_netdevice(struct net_device *dev)
 {
        unregister_netdevice_queue(dev, NULL);
 }
 
-extern int             netdev_refcnt_read(const struct net_device *dev);
-extern void            free_netdev(struct net_device *dev);
-extern void            synchronize_net(void);
-extern int             init_dummy_netdev(struct net_device *dev);
+int netdev_refcnt_read(const struct net_device *dev);
+void free_netdev(struct net_device *dev);
+void synchronize_net(void);
+int init_dummy_netdev(struct net_device *dev);
 
-extern struct net_device       *dev_get_by_index(struct net *net, int ifindex);
-extern struct net_device       *__dev_get_by_index(struct net *net, int ifindex);
-extern struct net_device       *dev_get_by_index_rcu(struct net *net, int ifindex);
-extern int             netdev_get_name(struct net *net, char *name, int ifindex);
-extern int             dev_restart(struct net_device *dev);
+struct net_device *dev_get_by_index(struct net *net, int ifindex);
+struct net_device *__dev_get_by_index(struct net *net, int ifindex);
+struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
+int netdev_get_name(struct net *net, char *name, int ifindex);
+int dev_restart(struct net_device *dev);
 #ifdef CONFIG_NETPOLL_TRAP
-extern int             netpoll_trap(void);
+int netpoll_trap(void);
 #endif
-extern int            skb_gro_receive(struct sk_buff **head,
-                                      struct sk_buff *skb);
+int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
 
 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
 {
@@ -1873,7 +1882,7 @@ static inline int dev_parse_header(const struct sk_buff *skb,
 }
 
 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
-extern int             register_gifconf(unsigned int family, gifconf_func_t * gifconf);
+int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
 static inline int unregister_gifconf(unsigned int family)
 {
        return register_gifconf(family, NULL);
@@ -1944,7 +1953,7 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd,
 
 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
 
-extern void __netif_schedule(struct Qdisc *q);
+void __netif_schedule(struct Qdisc *q);
 
 static inline void netif_schedule_queue(struct netdev_queue *txq)
 {
@@ -2264,11 +2273,11 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
 }
 
 #ifdef CONFIG_XPS
-extern int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask,
-                              u16 index);
+int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
+                       u16 index);
 #else
 static inline int netif_set_xps_queue(struct net_device *dev,
-                                     struct cpumask *mask,
+                                     const struct cpumask *mask,
                                      u16 index)
 {
        return 0;
@@ -2296,12 +2305,10 @@ static inline bool netif_is_multiqueue(const struct net_device *dev)
        return dev->num_tx_queues > 1;
 }
 
-extern int netif_set_real_num_tx_queues(struct net_device *dev,
-                                       unsigned int txq);
+int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
 
 #ifdef CONFIG_RPS
-extern int netif_set_real_num_rx_queues(struct net_device *dev,
-                                       unsigned int rxq);
+int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
 #else
 static inline int netif_set_real_num_rx_queues(struct net_device *dev,
                                                unsigned int rxq)
@@ -2328,28 +2335,27 @@ static inline int netif_copy_real_num_queues(struct net_device *to_dev,
 }
 
 #define DEFAULT_MAX_NUM_RSS_QUEUES     (8)
-extern int netif_get_num_default_rss_queues(void);
+int netif_get_num_default_rss_queues(void);
 
 /* Use this variant when it is known for sure that it
  * is executing from hardware interrupt context or with hardware interrupts
  * disabled.
  */
-extern void dev_kfree_skb_irq(struct sk_buff *skb);
+void dev_kfree_skb_irq(struct sk_buff *skb);
 
 /* Use this variant in places where it could be invoked
  * from either hardware interrupt or other context, with hardware interrupts
  * either disabled or enabled.
  */
-extern void dev_kfree_skb_any(struct sk_buff *skb);
+void dev_kfree_skb_any(struct sk_buff *skb);
 
-extern int             netif_rx(struct sk_buff *skb);
-extern int             netif_rx_ni(struct sk_buff *skb);
-extern int             netif_receive_skb(struct sk_buff *skb);
-extern gro_result_t    napi_gro_receive(struct napi_struct *napi,
-                                        struct sk_buff *skb);
-extern void            napi_gro_flush(struct napi_struct *napi, bool flush_old);
-extern struct sk_buff *        napi_get_frags(struct napi_struct *napi);
-extern gro_result_t    napi_gro_frags(struct napi_struct *napi);
+int netif_rx(struct sk_buff *skb);
+int netif_rx_ni(struct sk_buff *skb);
+int netif_receive_skb(struct sk_buff *skb);
+gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
+void napi_gro_flush(struct napi_struct *napi, bool flush_old);
+struct sk_buff *napi_get_frags(struct napi_struct *napi);
+gro_result_t napi_gro_frags(struct napi_struct *napi);
 
 static inline void napi_free_frags(struct napi_struct *napi)
 {
@@ -2357,40 +2363,36 @@ static inline void napi_free_frags(struct napi_struct *napi)
        napi->skb = NULL;
 }
 
-extern int netdev_rx_handler_register(struct net_device *dev,
-                                     rx_handler_func_t *rx_handler,
-                                     void *rx_handler_data);
-extern void netdev_rx_handler_unregister(struct net_device *dev);
-
-extern bool            dev_valid_name(const char *name);
-extern int             dev_ioctl(struct net *net, unsigned int cmd, void __user *);
-extern int             dev_ethtool(struct net *net, struct ifreq *);
-extern unsigned int    dev_get_flags(const struct net_device *);
-extern int             __dev_change_flags(struct net_device *, unsigned int flags);
-extern int             dev_change_flags(struct net_device *, unsigned int);
-extern void            __dev_notify_flags(struct net_device *, unsigned int old_flags);
-extern int             dev_change_name(struct net_device *, const char *);
-extern int             dev_set_alias(struct net_device *, const char *, size_t);
-extern int             dev_change_net_namespace(struct net_device *,
-                                                struct net *, const char *);
-extern int             dev_set_mtu(struct net_device *, int);
-extern void            dev_set_group(struct net_device *, int);
-extern int             dev_set_mac_address(struct net_device *,
-                                           struct sockaddr *);
-extern int             dev_change_carrier(struct net_device *,
-                                          bool new_carrier);
-extern int             dev_get_phys_port_id(struct net_device *dev,
-                                            struct netdev_phys_port_id *ppid);
-extern int             dev_hard_start_xmit(struct sk_buff *skb,
-                                           struct net_device *dev,
-                                           struct netdev_queue *txq);
-extern int             dev_forward_skb(struct net_device *dev,
-                                       struct sk_buff *skb);
+int netdev_rx_handler_register(struct net_device *dev,
+                              rx_handler_func_t *rx_handler,
+                              void *rx_handler_data);
+void netdev_rx_handler_unregister(struct net_device *dev);
+
+bool dev_valid_name(const char *name);
+int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
+int dev_ethtool(struct net *net, struct ifreq *);
+unsigned int dev_get_flags(const struct net_device *);
+int __dev_change_flags(struct net_device *, unsigned int flags);
+int dev_change_flags(struct net_device *, unsigned int);
+void __dev_notify_flags(struct net_device *, unsigned int old_flags,
+                       unsigned int gchanges);
+int dev_change_name(struct net_device *, const char *);
+int dev_set_alias(struct net_device *, const char *, size_t);
+int dev_change_net_namespace(struct net_device *, struct net *, const char *);
+int dev_set_mtu(struct net_device *, int);
+void dev_set_group(struct net_device *, int);
+int dev_set_mac_address(struct net_device *, struct sockaddr *);
+int dev_change_carrier(struct net_device *, bool new_carrier);
+int dev_get_phys_port_id(struct net_device *dev,
+                        struct netdev_phys_port_id *ppid);
+int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
+                       struct netdev_queue *txq);
+int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
 
 extern int             netdev_budget;
 
 /* Called by rtnetlink.c:rtnl_unlock() */
-extern void netdev_run_todo(void);
+void netdev_run_todo(void);
 
 /**
  *     dev_put - release reference to device
@@ -2423,9 +2425,9 @@ static inline void dev_hold(struct net_device *dev)
  * kind of lower layer not just hardware media.
  */
 
-extern void linkwatch_init_dev(struct net_device *dev);
-extern void linkwatch_fire_event(struct net_device *dev);
-extern void linkwatch_forget_dev(struct net_device *dev);
+void linkwatch_init_dev(struct net_device *dev);
+void linkwatch_fire_event(struct net_device *dev);
+void linkwatch_forget_dev(struct net_device *dev);
 
 /**
  *     netif_carrier_ok - test if carrier present
@@ -2438,13 +2440,13 @@ static inline bool netif_carrier_ok(const struct net_device *dev)
        return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
 }
 
-extern unsigned long dev_trans_start(struct net_device *dev);
+unsigned long dev_trans_start(struct net_device *dev);
 
-extern void __netdev_watchdog_up(struct net_device *dev);
+void __netdev_watchdog_up(struct net_device *dev);
 
-extern void netif_carrier_on(struct net_device *dev);
+void netif_carrier_on(struct net_device *dev);
 
-extern void netif_carrier_off(struct net_device *dev);
+void netif_carrier_off(struct net_device *dev);
 
 /**
  *     netif_dormant_on - mark device as dormant.
@@ -2512,9 +2514,9 @@ static inline bool netif_device_present(struct net_device *dev)
        return test_bit(__LINK_STATE_PRESENT, &dev->state);
 }
 
-extern void netif_device_detach(struct net_device *dev);
+void netif_device_detach(struct net_device *dev);
 
-extern void netif_device_attach(struct net_device *dev);
+void netif_device_attach(struct net_device *dev);
 
 /*
  * Network interface message level settings
@@ -2723,119 +2725,138 @@ static inline void netif_addr_unlock_bh(struct net_device *dev)
 
 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
 
-extern void            ether_setup(struct net_device *dev);
+void ether_setup(struct net_device *dev);
 
 /* Support for loadable net-drivers */
-extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
-                                      void (*setup)(struct net_device *),
-                                      unsigned int txqs, unsigned int rxqs);
+struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+                                   void (*setup)(struct net_device *),
+                                   unsigned int txqs, unsigned int rxqs);
 #define alloc_netdev(sizeof_priv, name, setup) \
        alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
 
 #define alloc_netdev_mq(sizeof_priv, name, setup, count) \
        alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
 
-extern int             register_netdev(struct net_device *dev);
-extern void            unregister_netdev(struct net_device *dev);
+int register_netdev(struct net_device *dev);
+void unregister_netdev(struct net_device *dev);
 
 /* General hardware address lists handling functions */
-extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
-                                 struct netdev_hw_addr_list *from_list,
-                                 int addr_len, unsigned char addr_type);
-extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
-                                  struct netdev_hw_addr_list *from_list,
-                                  int addr_len, unsigned char addr_type);
-extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
-                         struct netdev_hw_addr_list *from_list,
-                         int addr_len);
-extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
-                            struct netdev_hw_addr_list *from_list,
-                            int addr_len);
-extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
-extern void __hw_addr_init(struct netdev_hw_addr_list *list);
+int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
+                          struct netdev_hw_addr_list *from_list,
+                          int addr_len, unsigned char addr_type);
+void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
+                           struct netdev_hw_addr_list *from_list,
+                           int addr_len, unsigned char addr_type);
+int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
+                  struct netdev_hw_addr_list *from_list, int addr_len);
+void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
+                     struct netdev_hw_addr_list *from_list, int addr_len);
+void __hw_addr_flush(struct netdev_hw_addr_list *list);
+void __hw_addr_init(struct netdev_hw_addr_list *list);
 
 /* Functions used for device addresses handling */
-extern int dev_addr_add(struct net_device *dev, const unsigned char *addr,
-                       unsigned char addr_type);
-extern int dev_addr_del(struct net_device *dev, const unsigned char *addr,
-                       unsigned char addr_type);
-extern int dev_addr_add_multiple(struct net_device *to_dev,
-                                struct net_device *from_dev,
-                                unsigned char addr_type);
-extern int dev_addr_del_multiple(struct net_device *to_dev,
-                                struct net_device *from_dev,
-                                unsigned char addr_type);
-extern void dev_addr_flush(struct net_device *dev);
-extern int dev_addr_init(struct net_device *dev);
+int dev_addr_add(struct net_device *dev, const unsigned char *addr,
+                unsigned char addr_type);
+int dev_addr_del(struct net_device *dev, const unsigned char *addr,
+                unsigned char addr_type);
+int dev_addr_add_multiple(struct net_device *to_dev,
+                         struct net_device *from_dev, unsigned char addr_type);
+int dev_addr_del_multiple(struct net_device *to_dev,
+                         struct net_device *from_dev, unsigned char addr_type);
+void dev_addr_flush(struct net_device *dev);
+int dev_addr_init(struct net_device *dev);
 
 /* Functions used for unicast addresses handling */
-extern int dev_uc_add(struct net_device *dev, const unsigned char *addr);
-extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
-extern int dev_uc_del(struct net_device *dev, const unsigned char *addr);
-extern int dev_uc_sync(struct net_device *to, struct net_device *from);
-extern int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
-extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
-extern void dev_uc_flush(struct net_device *dev);
-extern void dev_uc_init(struct net_device *dev);
+int dev_uc_add(struct net_device *dev, const unsigned char *addr);
+int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
+int dev_uc_del(struct net_device *dev, const unsigned char *addr);
+int dev_uc_sync(struct net_device *to, struct net_device *from);
+int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
+void dev_uc_unsync(struct net_device *to, struct net_device *from);
+void dev_uc_flush(struct net_device *dev);
+void dev_uc_init(struct net_device *dev);
 
 /* Functions used for multicast addresses handling */
-extern int dev_mc_add(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_del(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
-extern int dev_mc_sync(struct net_device *to, struct net_device *from);
-extern int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
-extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
-extern void dev_mc_flush(struct net_device *dev);
-extern void dev_mc_init(struct net_device *dev);
+int dev_mc_add(struct net_device *dev, const unsigned char *addr);
+int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
+int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
+int dev_mc_del(struct net_device *dev, const unsigned char *addr);
+int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
+int dev_mc_sync(struct net_device *to, struct net_device *from);
+int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
+void dev_mc_unsync(struct net_device *to, struct net_device *from);
+void dev_mc_flush(struct net_device *dev);
+void dev_mc_init(struct net_device *dev);
 
 /* Functions used for secondary unicast and multicast support */
-extern void            dev_set_rx_mode(struct net_device *dev);
-extern void            __dev_set_rx_mode(struct net_device *dev);
-extern int             dev_set_promiscuity(struct net_device *dev, int inc);
-extern int             dev_set_allmulti(struct net_device *dev, int inc);
-extern void            netdev_state_change(struct net_device *dev);
-extern void            netdev_notify_peers(struct net_device *dev);
-extern void            netdev_features_change(struct net_device *dev);
+void dev_set_rx_mode(struct net_device *dev);
+void __dev_set_rx_mode(struct net_device *dev);
+int dev_set_promiscuity(struct net_device *dev, int inc);
+int dev_set_allmulti(struct net_device *dev, int inc);
+void netdev_state_change(struct net_device *dev);
+void netdev_notify_peers(struct net_device *dev);
+void netdev_features_change(struct net_device *dev);
 /* Load a device via the kmod */
-extern void            dev_load(struct net *net, const char *name);
-extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
-                                              struct rtnl_link_stats64 *storage);
-extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
-                                   const struct net_device_stats *netdev_stats);
+void dev_load(struct net *net, const char *name);
+struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+                                       struct rtnl_link_stats64 *storage);
+void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
+                            const struct net_device_stats *netdev_stats);
 
 extern int             netdev_max_backlog;
 extern int             netdev_tstamp_prequeue;
 extern int             weight_p;
 extern int             bpf_jit_enable;
 
-extern bool netdev_has_upper_dev(struct net_device *dev,
-                                struct net_device *upper_dev);
-extern bool netdev_has_any_upper_dev(struct net_device *dev);
-extern struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
-                                                       struct list_head **iter);
+bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
+bool netdev_has_any_upper_dev(struct net_device *dev);
+struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
+                                                    struct list_head **iter);
 
 /* iterate through upper list, must be called under RCU read lock */
-#define netdev_for_each_upper_dev_rcu(dev, upper, iter) \
-       for (iter = &(dev)->upper_dev_list, \
-            upper = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
-            upper; \
-            upper = netdev_upper_get_next_dev_rcu(dev, &(iter)))
-
-extern struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
-extern struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
-extern int netdev_upper_dev_link(struct net_device *dev,
+#define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
+       for (iter = &(dev)->all_adj_list.upper, \
+            updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
+            updev; \
+            updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
+
+void *netdev_lower_get_next_private(struct net_device *dev,
+                                   struct list_head **iter);
+void *netdev_lower_get_next_private_rcu(struct net_device *dev,
+                                       struct list_head **iter);
+
+#define netdev_for_each_lower_private(dev, priv, iter) \
+       for (iter = (dev)->adj_list.lower.next, \
+            priv = netdev_lower_get_next_private(dev, &(iter)); \
+            priv; \
+            priv = netdev_lower_get_next_private(dev, &(iter)))
+
+#define netdev_for_each_lower_private_rcu(dev, priv, iter) \
+       for (iter = &(dev)->adj_list.lower, \
+            priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
+            priv; \
+            priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
+
+void *netdev_adjacent_get_private(struct list_head *adj_list);
+struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
+struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
+int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
+int netdev_master_upper_dev_link(struct net_device *dev,
                                 struct net_device *upper_dev);
-extern int netdev_master_upper_dev_link(struct net_device *dev,
-                                       struct net_device *upper_dev);
-extern void netdev_upper_dev_unlink(struct net_device *dev,
-                                   struct net_device *upper_dev);
-extern int skb_checksum_help(struct sk_buff *skb);
-extern struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
-       netdev_features_t features, bool tx_path);
-extern struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
-                                         netdev_features_t features);
+int netdev_master_upper_dev_link_private(struct net_device *dev,
+                                        struct net_device *upper_dev,
+                                        void *private);
+void netdev_upper_dev_unlink(struct net_device *dev,
+                            struct net_device *upper_dev);
+void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
+                                      struct net_device *lower_dev);
+void *netdev_lower_dev_get_private(struct net_device *dev,
+                                  struct net_device *lower_dev);
+int skb_checksum_help(struct sk_buff *skb);
+struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
+                                 netdev_features_t features, bool tx_path);
+struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
+                                   netdev_features_t features);
 
 static inline
 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
@@ -2857,30 +2878,30 @@ static inline bool can_checksum_protocol(netdev_features_t features,
 }
 
 #ifdef CONFIG_BUG
-extern void netdev_rx_csum_fault(struct net_device *dev);
+void netdev_rx_csum_fault(struct net_device *dev);
 #else
 static inline void netdev_rx_csum_fault(struct net_device *dev)
 {
 }
 #endif
 /* rx skb timestamps */
-extern void            net_enable_timestamp(void);
-extern void            net_disable_timestamp(void);
+void net_enable_timestamp(void);
+void net_disable_timestamp(void);
 
 #ifdef CONFIG_PROC_FS
-extern int __init dev_proc_init(void);
+int __init dev_proc_init(void);
 #else
 #define dev_proc_init() 0
 #endif
 
-extern int netdev_class_create_file(struct class_attribute *class_attr);
-extern void netdev_class_remove_file(struct class_attribute *class_attr);
+int netdev_class_create_file(struct class_attribute *class_attr);
+void netdev_class_remove_file(struct class_attribute *class_attr);
 
 extern struct kobj_ns_type_operations net_ns_type_operations;
 
-extern const char *netdev_drivername(const struct net_device *dev);
+const char *netdev_drivername(const struct net_device *dev);
 
-extern void linkwatch_run_queue(void);
+void linkwatch_run_queue(void);
 
 static inline netdev_features_t netdev_get_wanted_features(
        struct net_device *dev)
@@ -2972,22 +2993,22 @@ static inline const char *netdev_name(const struct net_device *dev)
        return dev->name;
 }
 
-extern __printf(3, 4)
+__printf(3, 4)
 int netdev_printk(const char *level, const struct net_device *dev,
                  const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_emerg(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_alert(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_crit(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_err(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_warn(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_notice(const struct net_device *dev, const char *format, ...);
-extern __printf(2, 3)
+__printf(2, 3)
 int netdev_info(const struct net_device *dev, const char *format, ...);
 
 #define MODULE_ALIAS_NETDEV(device) \
@@ -3028,7 +3049,7 @@ do {                                                              \
  * file/line information and a backtrace.
  */
 #define netdev_WARN(dev, format, args...)                      \
-       WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
+       WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args)
 
 /* netif printk helpers, similar to netdev_printk */
 
index 708fe72ab913ba71e5421b080d0c9a8c90c7a3d9..2077489f98873bcbe4ca083cd1f0eb1b99eeab5e 100644 (file)
@@ -35,14 +35,15 @@ static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
        result->all[3] = a1->all[3] & mask->all[3];
 }
 
-extern int netfilter_init(void);
+int netfilter_init(void);
 
 /* Largest hook number + 1 */
 #define NF_MAX_HOOKS 8
 
 struct sk_buff;
 
-typedef unsigned int nf_hookfn(unsigned int hooknum,
+struct nf_hook_ops;
+typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops,
                               struct sk_buff *skb,
                               const struct net_device *in,
                               const struct net_device *out,
@@ -52,12 +53,13 @@ struct nf_hook_ops {
        struct list_head list;
 
        /* User fills in from here down. */
-       nf_hookfn *hook;
-       struct module *owner;
-       u_int8_t pf;
-       unsigned int hooknum;
+       nf_hookfn       *hook;
+       struct module   *owner;
+       void            *priv;
+       u_int8_t        pf;
+       unsigned int    hooknum;
        /* Hooks are ordered in ascending priority. */
-       int priority;
+       int             priority;
 };
 
 struct nf_sockopt_ops {
@@ -208,7 +210,7 @@ int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
 /* Call this before modifying an existing packet: ensures it is
    modifiable and linear to the point you care about (writable_len).
    Returns true or false. */
-extern int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
+int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
 
 struct flowi;
 struct nf_queue_entry;
@@ -269,8 +271,8 @@ nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
        return csum;
 }
 
-extern int nf_register_afinfo(const struct nf_afinfo *afinfo);
-extern void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
+int nf_register_afinfo(const struct nf_afinfo *afinfo);
+void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
 
 #include <net/flow.h>
 extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
@@ -315,7 +317,7 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
 
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
-extern void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
+void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
 extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
 
 struct nf_conn;
index 9ac9fbde7b61a38795d631ab545b9bd3140fe6f6..7967516adc0d1ba2335111e863cc8c1899050bc3 100644 (file)
@@ -49,31 +49,68 @@ enum ip_set_feature {
 
 /* Set extensions */
 enum ip_set_extension {
-       IPSET_EXT_NONE = 0,
-       IPSET_EXT_BIT_TIMEOUT = 1,
+       IPSET_EXT_BIT_TIMEOUT = 0,
        IPSET_EXT_TIMEOUT = (1 << IPSET_EXT_BIT_TIMEOUT),
-       IPSET_EXT_BIT_COUNTER = 2,
+       IPSET_EXT_BIT_COUNTER = 1,
        IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER),
-};
-
-/* Extension offsets */
-enum ip_set_offset {
-       IPSET_OFFSET_TIMEOUT = 0,
-       IPSET_OFFSET_COUNTER,
-       IPSET_OFFSET_MAX,
+       IPSET_EXT_BIT_COMMENT = 2,
+       IPSET_EXT_COMMENT = (1 << IPSET_EXT_BIT_COMMENT),
+       /* Mark set with an extension which needs to call destroy */
+       IPSET_EXT_BIT_DESTROY = 7,
+       IPSET_EXT_DESTROY = (1 << IPSET_EXT_BIT_DESTROY),
 };
 
 #define SET_WITH_TIMEOUT(s)    ((s)->extensions & IPSET_EXT_TIMEOUT)
 #define SET_WITH_COUNTER(s)    ((s)->extensions & IPSET_EXT_COUNTER)
+#define SET_WITH_COMMENT(s)    ((s)->extensions & IPSET_EXT_COMMENT)
+
+/* Extension id, in size order */
+enum ip_set_ext_id {
+       IPSET_EXT_ID_COUNTER = 0,
+       IPSET_EXT_ID_TIMEOUT,
+       IPSET_EXT_ID_COMMENT,
+       IPSET_EXT_ID_MAX,
+};
+
+/* Extension type */
+struct ip_set_ext_type {
+       /* Destroy extension private data (can be NULL) */
+       void (*destroy)(void *ext);
+       enum ip_set_extension type;
+       enum ipset_cadt_flags flag;
+       /* Size and minimal alignment */
+       u8 len;
+       u8 align;
+};
+
+extern const struct ip_set_ext_type ip_set_extensions[];
 
 struct ip_set_ext {
-       unsigned long timeout;
        u64 packets;
        u64 bytes;
+       u32 timeout;
+       char *comment;
+};
+
+struct ip_set_counter {
+       atomic64_t bytes;
+       atomic64_t packets;
+};
+
+struct ip_set_comment {
+       char *str;
 };
 
 struct ip_set;
 
+#define ext_timeout(e, s)      \
+(unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT])
+#define ext_counter(e, s)      \
+(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER])
+#define ext_comment(e, s)      \
+(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT])
+
+
 typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
                           const struct ip_set_ext *ext,
                           struct ip_set_ext *mext, u32 cmdflags);
@@ -147,7 +184,8 @@ struct ip_set_type {
        u8 revision_min, revision_max;
 
        /* Create set */
-       int (*create)(struct ip_set *set, struct nlattr *tb[], u32 flags);
+       int (*create)(struct net *net, struct ip_set *set,
+                     struct nlattr *tb[], u32 flags);
 
        /* Attribute policies */
        const struct nla_policy create_policy[IPSET_ATTR_CREATE_MAX + 1];
@@ -179,14 +217,45 @@ struct ip_set {
        u8 revision;
        /* Extensions */
        u8 extensions;
+       /* Default timeout value, if enabled */
+       u32 timeout;
+       /* Element data size */
+       size_t dsize;
+       /* Offsets to extensions in elements */
+       size_t offset[IPSET_EXT_ID_MAX];
        /* The type specific data */
        void *data;
 };
 
-struct ip_set_counter {
-       atomic64_t bytes;
-       atomic64_t packets;
-};
+static inline void
+ip_set_ext_destroy(struct ip_set *set, void *data)
+{
+       /* Check that the extension is enabled for the set and
+        * call it's destroy function for its extension part in data.
+        */
+       if (SET_WITH_COMMENT(set))
+               ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy(
+                       ext_comment(data, set));
+}
+
+static inline int
+ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
+{
+       u32 cadt_flags = 0;
+
+       if (SET_WITH_TIMEOUT(set))
+               if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                                          htonl(set->timeout))))
+                       return -EMSGSIZE;
+       if (SET_WITH_COUNTER(set))
+               cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
+       if (SET_WITH_COMMENT(set))
+               cadt_flags |= IPSET_FLAG_WITH_COMMENT;
+
+       if (!cadt_flags)
+               return 0;
+       return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
+}
 
 static inline void
 ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
@@ -248,12 +317,13 @@ ip_set_init_counter(struct ip_set_counter *counter,
 }
 
 /* register and unregister set references */
-extern ip_set_id_t ip_set_get_byname(const char *name, struct ip_set **set);
-extern void ip_set_put_byindex(ip_set_id_t index);
-extern const char *ip_set_name_byindex(ip_set_id_t index);
-extern ip_set_id_t ip_set_nfnl_get(const char *name);
-extern ip_set_id_t ip_set_nfnl_get_byindex(ip_set_id_t index);
-extern void ip_set_nfnl_put(ip_set_id_t index);
+extern ip_set_id_t ip_set_get_byname(struct net *net,
+                                    const char *name, struct ip_set **set);
+extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
+extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
+extern ip_set_id_t ip_set_nfnl_get(struct net *net, const char *name);
+extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
+extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
 
 /* API for iptables set match, and SET target */
 
@@ -272,6 +342,8 @@ extern void *ip_set_alloc(size_t size);
 extern void ip_set_free(void *members);
 extern int ip_set_get_ipaddr4(struct nlattr *nla,  __be32 *ipaddr);
 extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
+extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
+                             size_t len);
 extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
                                 struct ip_set_ext *ext);
 
@@ -389,13 +461,40 @@ bitmap_bytes(u32 a, u32 b)
 }
 
 #include <linux/netfilter/ipset/ip_set_timeout.h>
+#include <linux/netfilter/ipset/ip_set_comment.h>
 
-#define IP_SET_INIT_KEXT(skb, opt, map)                        \
+static inline int
+ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
+                     const void *e, bool active)
+{
+       if (SET_WITH_TIMEOUT(set)) {
+               unsigned long *timeout = ext_timeout(e, set);
+
+               if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
+                       htonl(active ? ip_set_timeout_get(timeout)
+                               : *timeout)))
+                       return -EMSGSIZE;
+       }
+       if (SET_WITH_COUNTER(set) &&
+           ip_set_put_counter(skb, ext_counter(e, set)))
+               return -EMSGSIZE;
+       if (SET_WITH_COMMENT(set) &&
+           ip_set_put_comment(skb, ext_comment(e, set)))
+               return -EMSGSIZE;
+       return 0;
+}
+
+#define IP_SET_INIT_KEXT(skb, opt, set)                        \
        { .bytes = (skb)->len, .packets = 1,            \
-         .timeout = ip_set_adt_opt_timeout(opt, map) }
+         .timeout = ip_set_adt_opt_timeout(opt, set) }
 
-#define IP_SET_INIT_UEXT(map)                          \
+#define IP_SET_INIT_UEXT(set)                          \
        { .bytes = ULLONG_MAX, .packets = ULLONG_MAX,   \
-         .timeout = (map)->timeout }
+         .timeout = (set)->timeout }
+
+#define IP_SET_INIT_CIDR(a, b) ((a) ? (a) : (b))
+
+#define IPSET_CONCAT(a, b)             a##b
+#define IPSET_TOKEN(a, b)              IPSET_CONCAT(a, b)
 
 #endif /*_IP_SET_H */
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
new file mode 100644 (file)
index 0000000..21217ea
--- /dev/null
@@ -0,0 +1,57 @@
+#ifndef _IP_SET_COMMENT_H
+#define _IP_SET_COMMENT_H
+
+/* Copyright (C) 2013 Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifdef __KERNEL__
+
+static inline char*
+ip_set_comment_uget(struct nlattr *tb)
+{
+       return nla_data(tb);
+}
+
+static inline void
+ip_set_init_comment(struct ip_set_comment *comment,
+                   const struct ip_set_ext *ext)
+{
+       size_t len = ext->comment ? strlen(ext->comment) : 0;
+
+       if (unlikely(comment->str)) {
+               kfree(comment->str);
+               comment->str = NULL;
+       }
+       if (!len)
+               return;
+       if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
+               len = IPSET_MAX_COMMENT_SIZE;
+       comment->str = kzalloc(len + 1, GFP_ATOMIC);
+       if (unlikely(!comment->str))
+               return;
+       strlcpy(comment->str, ext->comment, len + 1);
+}
+
+static inline int
+ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
+{
+       if (!comment->str)
+               return 0;
+       return nla_put_string(skb, IPSET_ATTR_COMMENT, comment->str);
+}
+
+static inline void
+ip_set_comment_free(struct ip_set_comment *comment)
+{
+       if (unlikely(!comment->str))
+               return;
+       kfree(comment->str);
+       comment->str = NULL;
+}
+
+#endif
+#endif
index 3aac04167ca70699a60d63c50486eff9d0cc910a..83c2f9e0886cc537f57bf1db5cbfc035fbe93596 100644 (file)
@@ -23,8 +23,8 @@
 /* Set is defined with timeout support: timeout value may be 0 */
 #define IPSET_NO_TIMEOUT       UINT_MAX
 
-#define ip_set_adt_opt_timeout(opt, map)       \
-((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (map)->timeout)
+#define ip_set_adt_opt_timeout(opt, set)       \
+((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout)
 
 static inline unsigned int
 ip_set_timeout_uget(struct nlattr *tb)
index 127d0b90604fa08486a88437fbd9a8e09b315148..275505792664ae4e835401a0503d5d3d9fa66808 100644 (file)
@@ -23,6 +23,6 @@ struct ip_conntrack_stat {
 };
 
 /* call to create an explicit dependency on nf_conntrack. */
-extern void need_conntrack(void);
+void need_conntrack(void);
 
 #endif /* _NF_CONNTRACK_COMMON_H */
index f381020eee92835fa68ae19858e1a4ca0437138c..858d9b214053ff141e5e856ce98edbf50412bc00 100644 (file)
@@ -29,13 +29,13 @@ struct nf_ct_h323_master {
 
 struct nf_conn;
 
-extern int get_h225_addr(struct nf_conn *ct, unsigned char *data,
-                        TransportAddress *taddr,
-                        union nf_inet_addr *addr, __be16 *port);
-extern void nf_conntrack_h245_expect(struct nf_conn *new,
-                                    struct nf_conntrack_expect *this);
-extern void nf_conntrack_q931_expect(struct nf_conn *new,
-                                    struct nf_conntrack_expect *this);
+int get_h225_addr(struct nf_conn *ct, unsigned char *data,
+                 TransportAddress *taddr, union nf_inet_addr *addr,
+                 __be16 *port);
+void nf_conntrack_h245_expect(struct nf_conn *new,
+                             struct nf_conntrack_expect *this);
+void nf_conntrack_q931_expect(struct nf_conn *new,
+                             struct nf_conntrack_expect *this);
 extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff,
                                  unsigned char **data, int dataoff,
                                  H245_TransportAddress *taddr,
index 6a0664c0c45197a7abece81ecb35fae6e638d8ae..ec2ffaf418c8e8d31e1cca95fea02a85aa48089f 100644 (file)
@@ -87,8 +87,8 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
 /* delete keymap entries */
 void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
 
-extern void nf_ct_gre_keymap_flush(struct net *net);
-extern void nf_nat_need_gre(void);
+void nf_ct_gre_keymap_flush(struct net *net);
+void nf_nat_need_gre(void);
 
 #endif /* __KERNEL__ */
 #endif /* _CONNTRACK_PROTO_GRE_H */
index ba7f571a2b1cba1f2381ee50c7030946543f31ac..d5af3c27fb7de0385b11396ac241991bacb3684e 100644 (file)
@@ -107,85 +107,93 @@ enum sdp_header_types {
        SDP_HDR_MEDIA,
 };
 
-extern unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb,
-                                      unsigned int protoff,
-                                      unsigned int dataoff,
-                                      const char **dptr,
-                                      unsigned int *datalen);
-extern void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb,
-                                         unsigned int protoff, s16 off);
-extern unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
-                                             unsigned int protoff,
-                                             unsigned int dataoff,
-                                             const char **dptr,
-                                             unsigned int *datalen,
-                                             struct nf_conntrack_expect *exp,
-                                             unsigned int matchoff,
-                                             unsigned int matchlen);
-extern unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb,
-                                           unsigned int protoff,
-                                           unsigned int dataoff,
-                                           const char **dptr,
-                                           unsigned int *datalen,
-                                           unsigned int sdpoff,
-                                           enum sdp_header_types type,
-                                           enum sdp_header_types term,
-                                           const union nf_inet_addr *addr);
-extern unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb,
-                                           unsigned int protoff,
-                                           unsigned int dataoff,
-                                           const char **dptr,
-                                           unsigned int *datalen,
-                                           unsigned int matchoff,
-                                           unsigned int matchlen,
-                                           u_int16_t port);
-extern unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
-                                              unsigned int protoff,
-                                              unsigned int dataoff,
-                                              const char **dptr,
-                                              unsigned int *datalen,
-                                              unsigned int sdpoff,
-                                              const union nf_inet_addr *addr);
-extern unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb,
-                                            unsigned int protoff,
-                                            unsigned int dataoff,
-                                            const char **dptr,
-                                            unsigned int *datalen,
-                                            struct nf_conntrack_expect *rtp_exp,
-                                            struct nf_conntrack_expect *rtcp_exp,
-                                            unsigned int mediaoff,
-                                            unsigned int medialen,
-                                            union nf_inet_addr *rtp_addr);
-
-extern int ct_sip_parse_request(const struct nf_conn *ct,
-                               const char *dptr, unsigned int datalen,
-                               unsigned int *matchoff, unsigned int *matchlen,
-                               union nf_inet_addr *addr, __be16 *port);
-extern int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
-                            unsigned int dataoff, unsigned int datalen,
-                            enum sip_header_types type,
-                            unsigned int *matchoff, unsigned int *matchlen);
-extern int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
-                                  unsigned int *dataoff, unsigned int datalen,
-                                  enum sip_header_types type, int *in_header,
-                                  unsigned int *matchoff, unsigned int *matchlen,
-                                  union nf_inet_addr *addr, __be16 *port);
-extern int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
-                                     unsigned int dataoff, unsigned int datalen,
-                                     const char *name,
-                                     unsigned int *matchoff, unsigned int *matchlen,
-                                     union nf_inet_addr *addr, bool delim);
-extern int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
-                                       unsigned int off, unsigned int datalen,
-                                       const char *name,
-                                       unsigned int *matchoff, unsigned int *matchen,
-                                       unsigned int *val);
-
-extern int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
-                                unsigned int dataoff, unsigned int datalen,
+struct nf_nat_sip_hooks {
+       unsigned int (*msg)(struct sk_buff *skb,
+                           unsigned int protoff,
+                           unsigned int dataoff,
+                           const char **dptr,
+                           unsigned int *datalen);
+
+       void (*seq_adjust)(struct sk_buff *skb,
+                          unsigned int protoff, s16 off);
+
+       unsigned int (*expect)(struct sk_buff *skb,
+                              unsigned int protoff,
+                              unsigned int dataoff,
+                              const char **dptr,
+                              unsigned int *datalen,
+                              struct nf_conntrack_expect *exp,
+                              unsigned int matchoff,
+                              unsigned int matchlen);
+
+       unsigned int (*sdp_addr)(struct sk_buff *skb,
+                                unsigned int protoff,
+                                unsigned int dataoff,
+                                const char **dptr,
+                                unsigned int *datalen,
+                                unsigned int sdpoff,
                                 enum sdp_header_types type,
                                 enum sdp_header_types term,
-                                unsigned int *matchoff, unsigned int *matchlen);
+                                const union nf_inet_addr *addr);
+
+       unsigned int (*sdp_port)(struct sk_buff *skb,
+                                unsigned int protoff,
+                                unsigned int dataoff,
+                                const char **dptr,
+                                unsigned int *datalen,
+                                unsigned int matchoff,
+                                unsigned int matchlen,
+                                u_int16_t port);
+
+       unsigned int (*sdp_session)(struct sk_buff *skb,
+                                   unsigned int protoff,
+                                   unsigned int dataoff,
+                                   const char **dptr,
+                                   unsigned int *datalen,
+                                   unsigned int sdpoff,
+                                   const union nf_inet_addr *addr);
+
+       unsigned int (*sdp_media)(struct sk_buff *skb,
+                                 unsigned int protoff,
+                                 unsigned int dataoff,
+                                 const char **dptr,
+                                 unsigned int *datalen,
+                                 struct nf_conntrack_expect *rtp_exp,
+                                 struct nf_conntrack_expect *rtcp_exp,
+                                 unsigned int mediaoff,
+                                 unsigned int medialen,
+                                 union nf_inet_addr *rtp_addr);
+};
+extern const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
+
+int ct_sip_parse_request(const struct nf_conn *ct, const char *dptr,
+                        unsigned int datalen, unsigned int *matchoff,
+                        unsigned int *matchlen, union nf_inet_addr *addr,
+                        __be16 *port);
+int ct_sip_get_header(const struct nf_conn *ct, const char *dptr,
+                     unsigned int dataoff, unsigned int datalen,
+                     enum sip_header_types type, unsigned int *matchoff,
+                     unsigned int *matchlen);
+int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr,
+                           unsigned int *dataoff, unsigned int datalen,
+                           enum sip_header_types type, int *in_header,
+                           unsigned int *matchoff, unsigned int *matchlen,
+                           union nf_inet_addr *addr, __be16 *port);
+int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr,
+                              unsigned int dataoff, unsigned int datalen,
+                              const char *name, unsigned int *matchoff,
+                              unsigned int *matchlen, union nf_inet_addr *addr,
+                              bool delim);
+int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr,
+                                unsigned int off, unsigned int datalen,
+                                const char *name, unsigned int *matchoff,
+                                unsigned int *matchen, unsigned int *val);
+
+int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
+                         unsigned int dataoff, unsigned int datalen,
+                         enum sdp_header_types type,
+                         enum sdp_header_types term,
+                         unsigned int *matchoff, unsigned int *matchlen);
 
 #endif /* __KERNEL__ */
 #endif /* __NF_CONNTRACK_SIP_H__ */
index cadb7402d7a713fdcedbe4b3cb77c6095854f3ff..28c74367e900ac679aa5feba98b7629ea6338af3 100644 (file)
@@ -14,6 +14,9 @@ struct nfnl_callback {
        int (*call_rcu)(struct sock *nl, struct sk_buff *skb, 
                    const struct nlmsghdr *nlh,
                    const struct nlattr * const cda[]);
+       int (*call_batch)(struct sock *nl, struct sk_buff *skb,
+                         const struct nlmsghdr *nlh,
+                         const struct nlattr * const cda[]);
        const struct nla_policy *policy;        /* netlink attribute policy */
        const u_int16_t attr_count;             /* number of nlattr's */
 };
@@ -23,22 +26,24 @@ struct nfnetlink_subsystem {
        __u8 subsys_id;                 /* nfnetlink subsystem ID */
        __u8 cb_count;                  /* number of callbacks */
        const struct nfnl_callback *cb; /* callback for individual types */
+       int (*commit)(struct sk_buff *skb);
+       int (*abort)(struct sk_buff *skb);
 };
 
-extern int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
-extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
+int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
+int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
 
-extern int nfnetlink_has_listeners(struct net *net, unsigned int group);
-extern struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size,
-                                          u32 dst_portid, gfp_t gfp_mask);
-extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
-                         unsigned int group, int echo, gfp_t flags);
-extern int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
-extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net,
-                            u32 portid, int flags);
+int nfnetlink_has_listeners(struct net *net, unsigned int group);
+struct sk_buff *nfnetlink_alloc_skb(struct net *net, unsigned int size,
+                                   u32 dst_portid, gfp_t gfp_mask);
+int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
+                  unsigned int group, int echo, gfp_t flags);
+int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
+int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
+                     int flags);
 
-extern void nfnl_lock(__u8 subsys_id);
-extern void nfnl_unlock(__u8 subsys_id);
+void nfnl_lock(__u8 subsys_id);
+void nfnl_unlock(__u8 subsys_id);
 
 #define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
        MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
index bb4bbc9b7a18c4834724eabe574b76670806c8c4..b2e85e59f76085cb0e56294c78b63d3a04a0762a 100644 (file)
@@ -6,8 +6,8 @@
 
 struct nf_acct;
 
-extern struct nf_acct *nfnl_acct_find_get(const char *filter_name);
-extern void nfnl_acct_put(struct nf_acct *acct);
-extern void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
+struct nf_acct *nfnl_acct_find_get(const char *filter_name);
+void nfnl_acct_put(struct nf_acct *acct);
+void nfnl_acct_update(const struct sk_buff *skb, struct nf_acct *nfacct);
 
 #endif /* _NFNL_ACCT_H */
index dd49566315c616f1d4db4cba47ee1dc9ecaefa26..a3e215bb0241d47379bce4ff6e81ba2b4995d3c3 100644 (file)
@@ -229,50 +229,48 @@ struct xt_table_info {
 
 #define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \
                          + nr_cpu_ids * sizeof(char *))
-extern int xt_register_target(struct xt_target *target);
-extern void xt_unregister_target(struct xt_target *target);
-extern int xt_register_targets(struct xt_target *target, unsigned int n);
-extern void xt_unregister_targets(struct xt_target *target, unsigned int n);
-
-extern int xt_register_match(struct xt_match *target);
-extern void xt_unregister_match(struct xt_match *target);
-extern int xt_register_matches(struct xt_match *match, unsigned int n);
-extern void xt_unregister_matches(struct xt_match *match, unsigned int n);
-
-extern int xt_check_match(struct xt_mtchk_param *,
-                         unsigned int size, u_int8_t proto, bool inv_proto);
-extern int xt_check_target(struct xt_tgchk_param *,
-                          unsigned int size, u_int8_t proto, bool inv_proto);
-
-extern struct xt_table *xt_register_table(struct net *net,
-                                         const struct xt_table *table,
-                                         struct xt_table_info *bootstrap,
-                                         struct xt_table_info *newinfo);
-extern void *xt_unregister_table(struct xt_table *table);
-
-extern struct xt_table_info *xt_replace_table(struct xt_table *table,
-                                             unsigned int num_counters,
-                                             struct xt_table_info *newinfo,
-                                             int *error);
-
-extern struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
-extern struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
-extern struct xt_match *xt_request_find_match(u8 af, const char *name,
-                                             u8 revision);
-extern struct xt_target *xt_request_find_target(u8 af, const char *name,
-                                               u8 revision);
-extern int xt_find_revision(u8 af, const char *name, u8 revision,
-                           int target, int *err);
-
-extern struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
-                                          const char *name);
-extern void xt_table_unlock(struct xt_table *t);
-
-extern int xt_proto_init(struct net *net, u_int8_t af);
-extern void xt_proto_fini(struct net *net, u_int8_t af);
-
-extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
-extern void xt_free_table_info(struct xt_table_info *info);
+int xt_register_target(struct xt_target *target);
+void xt_unregister_target(struct xt_target *target);
+int xt_register_targets(struct xt_target *target, unsigned int n);
+void xt_unregister_targets(struct xt_target *target, unsigned int n);
+
+int xt_register_match(struct xt_match *target);
+void xt_unregister_match(struct xt_match *target);
+int xt_register_matches(struct xt_match *match, unsigned int n);
+void xt_unregister_matches(struct xt_match *match, unsigned int n);
+
+int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
+                  bool inv_proto);
+int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
+                   bool inv_proto);
+
+struct xt_table *xt_register_table(struct net *net,
+                                  const struct xt_table *table,
+                                  struct xt_table_info *bootstrap,
+                                  struct xt_table_info *newinfo);
+void *xt_unregister_table(struct xt_table *table);
+
+struct xt_table_info *xt_replace_table(struct xt_table *table,
+                                      unsigned int num_counters,
+                                      struct xt_table_info *newinfo,
+                                      int *error);
+
+struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
+struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
+struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision);
+struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
+int xt_find_revision(u8 af, const char *name, u8 revision, int target,
+                    int *err);
+
+struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
+                                   const char *name);
+void xt_table_unlock(struct xt_table *t);
+
+int xt_proto_init(struct net *net, u_int8_t af);
+void xt_proto_fini(struct net *net, u_int8_t af);
+
+struct xt_table_info *xt_alloc_table_info(unsigned int size);
+void xt_free_table_info(struct xt_table_info *info);
 
 /**
  * xt_recseq - recursive seqcount for netfilter use
@@ -353,8 +351,8 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
        return ret;
 }
 
-extern struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
-extern void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
+struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *);
+void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *);
 
 #ifdef CONFIG_COMPAT
 #include <net/compat.h>
@@ -414,25 +412,25 @@ struct _compat_xt_align {
 
 #define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align))
 
-extern void xt_compat_lock(u_int8_t af);
-extern void xt_compat_unlock(u_int8_t af);
-
-extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
-extern void xt_compat_flush_offsets(u_int8_t af);
-extern void xt_compat_init_offsets(u_int8_t af, unsigned int number);
-extern int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
-
-extern int xt_compat_match_offset(const struct xt_match *match);
-extern int xt_compat_match_from_user(struct xt_entry_match *m,
-                                    void **dstptr, unsigned int *size);
-extern int xt_compat_match_to_user(const struct xt_entry_match *m,
-                                  void __user **dstptr, unsigned int *size);
-
-extern int xt_compat_target_offset(const struct xt_target *target);
-extern void xt_compat_target_from_user(struct xt_entry_target *t,
-                                      void **dstptr, unsigned int *size);
-extern int xt_compat_target_to_user(const struct xt_entry_target *t,
-                                   void __user **dstptr, unsigned int *size);
+void xt_compat_lock(u_int8_t af);
+void xt_compat_unlock(u_int8_t af);
+
+int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
+void xt_compat_flush_offsets(u_int8_t af);
+void xt_compat_init_offsets(u_int8_t af, unsigned int number);
+int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
+
+int xt_compat_match_offset(const struct xt_match *match);
+int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+                             unsigned int *size);
+int xt_compat_match_to_user(const struct xt_entry_match *m,
+                           void __user **dstptr, unsigned int *size);
+
+int xt_compat_target_offset(const struct xt_target *target);
+void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
+                               unsigned int *size);
+int xt_compat_target_to_user(const struct xt_entry_target *t,
+                            void __user **dstptr, unsigned int *size);
 
 #endif /* CONFIG_COMPAT */
 #endif /* _X_TABLES_H */
index dfb4d9e52bcb3a1775eebd627d1d758aaaed6c4b..8ab1c278b66da77229647e08ef8e0d948d4e6122 100644 (file)
@@ -25,7 +25,7 @@ enum nf_br_hook_priorities {
 #define BRNF_PPPoE                     0x20
 
 /* Only used in br_forward.c */
-extern int nf_bridge_copy_header(struct sk_buff *skb);
+int nf_bridge_copy_header(struct sk_buff *skb);
 static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
 {
        if (skb->nf_bridge &&
@@ -53,7 +53,7 @@ static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
        return 0;
 }
 
-extern int br_handle_frame_finish(struct sk_buff *skb);
+int br_handle_frame_finish(struct sk_buff *skb);
 /* Only used in br_device.c */
 static inline int br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
 {
index dfaf116b3e8125a3f36511bfe35b64fc81d463e3..6e4591bb54d495d2f4ee3f82058305be7539d025 100644 (file)
@@ -6,7 +6,7 @@
 
 #include <uapi/linux/netfilter_ipv4.h>
 
-extern int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
-extern __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
-                                  unsigned int dataoff, u_int8_t protocol);
+int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type);
+__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
+                      unsigned int dataoff, u_int8_t protocol);
 #endif /*__LINUX_IP_NETFILTER_H*/
index 2d4df6ce043efab2f9017bc5359b90846f085fb7..64dad1cc1a4bc86d391e8e9cbffea5091b9bfa53 100644 (file)
 
 
 #ifdef CONFIG_NETFILTER
-extern int ip6_route_me_harder(struct sk_buff *skb);
-extern __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
-                                   unsigned int dataoff, u_int8_t protocol);
+int ip6_route_me_harder(struct sk_buff *skb);
+__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
+                       unsigned int dataoff, u_int8_t protocol);
 
-extern int ipv6_netfilter_init(void);
-extern void ipv6_netfilter_fini(void);
+int ipv6_netfilter_init(void);
+void ipv6_netfilter_fini(void);
 
 /*
  * Hook functions for ipv6 to allow xt_* modules to be built-in even
index 3ea4cde8701ce1e97d6a39a4a1264452468a5254..96235b53a3fddfa840d56461e1c48ec0fd491548 100644 (file)
@@ -269,9 +269,13 @@ static inline int NFS_STALE(const struct inode *inode)
        return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
 }
 
-static inline int NFS_FSCACHE(const struct inode *inode)
+static inline struct fscache_cookie *nfs_i_fscache(struct inode *inode)
 {
-       return test_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
+#ifdef CONFIG_NFS_FSCACHE
+       return NFS_I(inode)->fscache;
+#else
+       return NULL;
+#endif
 }
 
 static inline __u64 NFS_FILEID(const struct inode *inode)
@@ -457,14 +461,11 @@ extern int nfs3_removexattr (struct dentry *, const char *name);
 /*
  * linux/fs/nfs/direct.c
  */
-extern ssize_t nfs_direct_IO(int, struct kiocb *, const struct iovec *, loff_t,
-                       unsigned long);
-extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
-                       const struct iovec *iov, unsigned long nr_segs,
-                       loff_t pos, bool uio);
-extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
-                       const struct iovec *iov, unsigned long nr_segs,
-                       loff_t pos, bool uio);
+extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t);
+extern ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
+                       loff_t pos);
+extern ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
+                       loff_t pos);
 
 /*
  * linux/fs/nfs/dir.c
index b8cedced50c9c70dd00c680d42a424bcdb90a8a9..f9c0a6cb41e945e148db3d4c927f84e25e76c33b 100644 (file)
@@ -41,6 +41,7 @@ struct nfs_client {
 #define NFS_CS_DISCRTRY                1               /* - disconnect on RPC retry */
 #define NFS_CS_MIGRATION       2               /* - transparent state migr */
 #define NFS_CS_INFINITE_SLOTS  3               /* - don't limit TCP slots */
+#define NFS_CS_NO_RETRANS_TIMEOUT      4       /* - Disable retransmit timeouts */
        struct sockaddr_storage cl_addr;        /* server identifier */
        size_t                  cl_addrlen;
        char *                  cl_hostname;    /* hostname of server */
index ed7f267e63897794773ee201e32ad01d89d7bd7d..6f10e938ff7e74d4db19b991a55193f5073946e7 100644 (file)
 #define __LINUX_OF_NET_H
 
 #ifdef CONFIG_OF_MTD
+
 #include <linux/of.h>
 int of_get_nand_ecc_mode(struct device_node *np);
 int of_get_nand_bus_width(struct device_node *np);
 bool of_get_nand_on_flash_bbt(struct device_node *np);
-#endif
+
+#else /* CONFIG_OF_MTD */
+
+static inline int of_get_nand_ecc_mode(struct device_node *np)
+{
+       return -ENOSYS;
+}
+
+static inline int of_get_nand_bus_width(struct device_node *np)
+{
+       return -ENOSYS;
+}
+
+static inline bool of_get_nand_on_flash_bbt(struct device_node *np)
+{
+       return false;
+}
+
+#endif /* CONFIG_OF_MTD */
 
 #endif /* __LINUX_OF_MTD_H */
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
deleted file mode 100644 (file)
index c841282..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef __OF_RESERVED_MEM_H
-#define __OF_RESERVED_MEM_H
-
-#ifdef CONFIG_OF_RESERVED_MEM
-void of_reserved_mem_device_init(struct device *dev);
-void of_reserved_mem_device_release(struct device *dev);
-void early_init_dt_scan_reserved_mem(void);
-#else
-static inline void of_reserved_mem_device_init(struct device *dev) { }
-static inline void of_reserved_mem_device_release(struct device *dev) { }
-static inline void early_init_dt_scan_reserved_mem(void) { }
-#endif
-
-#endif /* __OF_RESERVED_MEM_H */
diff --git a/include/linux/opp.h b/include/linux/opp.h
deleted file mode 100644 (file)
index 3aca2b8..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Generic OPP Interface
- *
- * Copyright (C) 2009-2010 Texas Instruments Incorporated.
- *     Nishanth Menon
- *     Romit Dasgupta
- *     Kevin Hilman
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __LINUX_OPP_H__
-#define __LINUX_OPP_H__
-
-#include <linux/err.h>
-#include <linux/cpufreq.h>
-#include <linux/notifier.h>
-
-struct opp;
-struct device;
-
-enum opp_event {
-       OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
-};
-
-#if defined(CONFIG_PM_OPP)
-
-unsigned long opp_get_voltage(struct opp *opp);
-
-unsigned long opp_get_freq(struct opp *opp);
-
-int opp_get_opp_count(struct device *dev);
-
-struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
-                               bool available);
-
-struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq);
-
-struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq);
-
-int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt);
-
-int opp_enable(struct device *dev, unsigned long freq);
-
-int opp_disable(struct device *dev, unsigned long freq);
-
-struct srcu_notifier_head *opp_get_notifier(struct device *dev);
-#else
-static inline unsigned long opp_get_voltage(struct opp *opp)
-{
-       return 0;
-}
-
-static inline unsigned long opp_get_freq(struct opp *opp)
-{
-       return 0;
-}
-
-static inline int opp_get_opp_count(struct device *dev)
-{
-       return 0;
-}
-
-static inline struct opp *opp_find_freq_exact(struct device *dev,
-                                       unsigned long freq, bool available)
-{
-       return ERR_PTR(-EINVAL);
-}
-
-static inline struct opp *opp_find_freq_floor(struct device *dev,
-                                       unsigned long *freq)
-{
-       return ERR_PTR(-EINVAL);
-}
-
-static inline struct opp *opp_find_freq_ceil(struct device *dev,
-                                       unsigned long *freq)
-{
-       return ERR_PTR(-EINVAL);
-}
-
-static inline int opp_add(struct device *dev, unsigned long freq,
-                                       unsigned long u_volt)
-{
-       return -EINVAL;
-}
-
-static inline int opp_enable(struct device *dev, unsigned long freq)
-{
-       return 0;
-}
-
-static inline int opp_disable(struct device *dev, unsigned long freq)
-{
-       return 0;
-}
-
-static inline struct srcu_notifier_head *opp_get_notifier(struct device *dev)
-{
-       return ERR_PTR(-EINVAL);
-}
-#endif         /* CONFIG_PM_OPP */
-
-#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
-int of_init_opp_table(struct device *dev);
-#else
-static inline int of_init_opp_table(struct device *dev)
-{
-       return -EINVAL;
-}
-#endif
-
-#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
-int opp_init_cpufreq_table(struct device *dev,
-                           struct cpufreq_frequency_table **table);
-void opp_free_cpufreq_table(struct device *dev,
-                               struct cpufreq_frequency_table **table);
-#else
-static inline int opp_init_cpufreq_table(struct device *dev,
-                           struct cpufreq_frequency_table **table)
-{
-       return -EINVAL;
-}
-
-static inline
-void opp_free_cpufreq_table(struct device *dev,
-                               struct cpufreq_frequency_table **table)
-{
-}
-#endif         /* CONFIG_CPU_FREQ */
-
-#endif         /* __LINUX_OPP_H__ */
index 6d53675c2b54691225b12f3f23c914aca86c35ac..98ada58f9942855b90583de9c5ed324993d39101 100644 (file)
@@ -329,7 +329,9 @@ static inline void set_page_writeback(struct page *page)
  * System with lots of page flags available. This allows separate
  * flags for PageHead() and PageTail() checks of compound pages so that bit
  * tests can be used in performance sensitive paths. PageCompound is
- * generally not used in hot code paths.
+ * generally not used in hot code paths except arch/powerpc/mm/init_64.c
+ * and arch/powerpc/kvm/book3s_64_vio_hv.c which use it to detect huge pages
+ * and avoid handling those in real mode.
  */
 __PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head)
 __PAGEFLAG(Tail, tail)
index da172f956ad6f0a6ff883d125ca70e9f97510612..d3a888ae4b2e3ad030bdb5f89354eea2f2567d0b 100644 (file)
@@ -330,8 +330,6 @@ struct pci_dev {
        unsigned int    msix_enabled:1;
        unsigned int    ari_enabled:1;  /* ARI forwarding */
        unsigned int    is_managed:1;
-       unsigned int    is_pcie:1;      /* Obsolete. Will be removed.
-                                          Use pci_is_pcie() instead */
        unsigned int    needs_freset:1; /* Dev requires fundamental reset */
        unsigned int    state_saved:1;
        unsigned int    is_physfn:1;
@@ -472,6 +470,10 @@ struct pci_bus {
 /*
  * Returns true if the pci bus is root (behind host-pci bridge),
  * false otherwise
+ *
+ * Some code assumes that "bus->self == NULL" means that bus is a root bus.
+ * This is incorrect because "virtual" buses added for SR-IOV (via
+ * virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
  */
 static inline bool pci_is_root_bus(struct pci_bus *pbus)
 {
@@ -1749,11 +1751,11 @@ static inline int pci_pcie_cap(struct pci_dev *dev)
  * pci_is_pcie - check if the PCI device is PCI Express capable
  * @dev: PCI device
  *
- * Retrun true if the PCI device is PCI Express capable, false otherwise.
+ * Returns: true if the PCI device is PCI Express capable, false otherwise.
  */
 static inline bool pci_is_pcie(struct pci_dev *dev)
 {
-       return !!pci_pcie_cap(dev);
+       return pci_pcie_cap(dev);
 }
 
 /**
index 866e85c5eb94517fc87ca1d49bc9acbde74dcc74..c8ba627c1d608733b8480bb929d9e81d97e57fa0 100644 (file)
@@ -294,9 +294,31 @@ struct ring_buffer;
  */
 struct perf_event {
 #ifdef CONFIG_PERF_EVENTS
-       struct list_head                group_entry;
+       /*
+        * entry onto perf_event_context::event_list;
+        *   modifications require ctx->lock
+        *   RCU safe iterations.
+        */
        struct list_head                event_entry;
+
+       /*
+        * XXX: group_entry and sibling_list should be mutually exclusive;
+        * either you're a sibling on a group, or you're the group leader.
+        * Rework the code to always use the same list element.
+        *
+        * Locked for modification by both ctx->mutex and ctx->lock; holding
+        * either sufficies for read.
+        */
+       struct list_head                group_entry;
        struct list_head                sibling_list;
+
+       /*
+        * We need storage to track the entries in perf_pmu_migrate_context; we
+        * cannot use the event_entry because of RCU and we want to keep the
+        * group in tact which avoids us using the other two entries.
+        */
+       struct list_head                migrate_entry;
+
        struct hlist_node               hlist_entry;
        int                             nr_siblings;
        int                             group_flags;
similarity index 97%
rename from include/linux/i2c/at24.h
rename to include/linux/platform_data/at24.h
index 285025a9cdc9a0533e2fb61abbdc71b1808224e8..c42aa89d34eeb46ff0ec5f4e68229ad1fa0fad2a 100644 (file)
@@ -28,7 +28,7 @@
  *
  * void get_mac_addr(struct memory_accessor *mem_acc, void *context)
  * {
- *     u8 *mac_addr = ethernet_pdata->mac_addr;
+ *     u8 *mac_addr = ethernet_pdata->mac_addr;
  *     off_t offset = context;
  *
  *     // Read MAC addr from EEPROM
index 8db5ae03b6e3f679c16988e492e8241f1567b2f5..689a856b86f90e1329515cf1ad9b80d3e2e7cdf5 100644 (file)
@@ -84,6 +84,8 @@ struct snd_platform_data {
        u8 version;
        u8 txnumevt;
        u8 rxnumevt;
+       int tx_dma_channel;
+       int rx_dma_channel;
 };
 
 enum {
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
new file mode 100644 (file)
index 0000000..5151b00
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ * Generic OPP Interface
+ *
+ * Copyright (C) 2009-2010 Texas Instruments Incorporated.
+ *     Nishanth Menon
+ *     Romit Dasgupta
+ *     Kevin Hilman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_OPP_H__
+#define __LINUX_OPP_H__
+
+#include <linux/err.h>
+#include <linux/cpufreq.h>
+#include <linux/notifier.h>
+
+struct dev_pm_opp;
+struct device;
+
+enum dev_pm_opp_event {
+       OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
+};
+
+#if defined(CONFIG_PM_OPP)
+
+unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
+
+unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
+
+int dev_pm_opp_get_opp_count(struct device *dev);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+                                             unsigned long freq,
+                                             bool available);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+                                             unsigned long *freq);
+
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+                                            unsigned long *freq);
+
+int dev_pm_opp_add(struct device *dev, unsigned long freq,
+                  unsigned long u_volt);
+
+int dev_pm_opp_enable(struct device *dev, unsigned long freq);
+
+int dev_pm_opp_disable(struct device *dev, unsigned long freq);
+
+struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev);
+#else
+static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
+{
+       return 0;
+}
+
+static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
+{
+       return 0;
+}
+
+static inline int dev_pm_opp_get_opp_count(struct device *dev)
+{
+       return 0;
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
+                                       unsigned long freq, bool available)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
+                                       unsigned long *freq)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
+                                       unsigned long *freq)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
+                                       unsigned long u_volt)
+{
+       return -EINVAL;
+}
+
+static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
+{
+       return 0;
+}
+
+static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
+{
+       return 0;
+}
+
+static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
+                                                       struct device *dev)
+{
+       return ERR_PTR(-EINVAL);
+}
+#endif         /* CONFIG_PM_OPP */
+
+#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
+int of_init_opp_table(struct device *dev);
+#else
+static inline int of_init_opp_table(struct device *dev)
+{
+       return -EINVAL;
+}
+#endif
+
+#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
+int dev_pm_opp_init_cpufreq_table(struct device *dev,
+                           struct cpufreq_frequency_table **table);
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
+                               struct cpufreq_frequency_table **table);
+#else
+static inline int dev_pm_opp_init_cpufreq_table(struct device *dev,
+                           struct cpufreq_frequency_table **table)
+{
+       return -EINVAL;
+}
+
+static inline
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
+                               struct cpufreq_frequency_table **table)
+{
+}
+#endif         /* CONFIG_CPU_FREQ */
+
+#endif         /* __LINUX_OPP_H__ */
diff --git a/include/linux/powercap.h b/include/linux/powercap.h
new file mode 100644 (file)
index 0000000..4e25041
--- /dev/null
@@ -0,0 +1,325 @@
+/*
+ * powercap.h: Data types and headers for sysfs power capping interface
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.
+ *
+ */
+
+#ifndef __POWERCAP_H__
+#define __POWERCAP_H__
+
+#include <linux/device.h>
+#include <linux/idr.h>
+
+/*
+ * A power cap class device can contain multiple powercap control_types.
+ * Each control_type can have multiple power zones, which can be independently
+ * controlled. Each power zone can have one or more constraints.
+ */
+
+struct powercap_control_type;
+struct powercap_zone;
+struct powercap_zone_constraint;
+
+/**
+ * struct powercap_control_type_ops - Define control type callbacks
+ * @set_enable:                Enable/Disable whole control type.
+ *                     Default is enabled. But this callback allows all zones
+ *                     to be in disable state and remove any applied power
+ *                     limits. If disabled power zone can only be monitored
+ *                     not controlled.
+ * @get_enable:                get Enable/Disable status.
+ * @release:           Callback to inform that last reference to this
+ *                     control type is closed. So it is safe to free data
+ *                     structure associated with this control type.
+ *                     This callback is mandatory if the client own memory
+ *                     for the control type.
+ *
+ * This structure defines control type callbacks to be implemented by client
+ * drivers
+ */
+struct powercap_control_type_ops {
+       int (*set_enable) (struct powercap_control_type *, bool mode);
+       int (*get_enable) (struct powercap_control_type *, bool *mode);
+       int (*release) (struct powercap_control_type *);
+};
+
+/**
+ * struct powercap_control_type- Defines a powercap control_type
+ * @name:              name of control_type
+ * @dev:               device for this control_type
+ * @idr:               idr to have unique id for its child
+ * @root_node:         Root holding power zones for this control_type
+ * @ops:               Pointer to callback struct
+ * @node_lock:         mutex for control type
+ * @allocated:         This is possible that client owns the memory
+ *                     used by this structure. In this case
+ *                     this flag is set to false by framework to
+ *                     prevent deallocation during release process.
+ *                     Otherwise this flag is set to true.
+ * @ctrl_inst:         link to the control_type list
+ *
+ * Defines powercap control_type. This acts as a container for power
+ * zones, which use same method to control power. E.g. RAPL, RAPL-PCI etc.
+ * All fields are private and should not be used by client drivers.
+ */
+struct powercap_control_type {
+       struct device dev;
+       struct idr idr;
+       int nr_zones;
+       const struct powercap_control_type_ops *ops;
+       struct mutex lock;
+       bool allocated;
+       struct list_head node;
+};
+
+/**
+ * struct powercap_zone_ops - Define power zone callbacks
+ * @get_max_energy_range_uj:   Get maximum range of energy counter in
+ *                             micro-joules.
+ * @get_energy_uj:             Get current energy counter in micro-joules.
+ * @reset_energy_uj:           Reset micro-joules energy counter.
+ * @get_max_power_range_uw:    Get maximum range of power counter in
+ *                             micro-watts.
+ * @get_power_uw:              Get current power counter in micro-watts.
+ * @set_enable:                        Enable/Disable power zone controls.
+ *                             Default is enabled.
+ * @get_enable:                        get Enable/Disable status.
+ * @release:                   Callback to inform that last reference to this
+ *                             control type is closed. So it is safe to free
+ *                             data structure associated with this
+ *                             control type. Mandatory, if client driver owns
+ *                             the power_zone memory.
+ *
+ * This structure defines zone callbacks to be implemented by client drivers.
+ * Client drives can define both energy and power related callbacks. But at
+ * the least one type (either power or energy) is mandatory. Client drivers
+ * should handle mutual exclusion, if required in callbacks.
+ */
+struct powercap_zone_ops {
+       int (*get_max_energy_range_uj) (struct powercap_zone *, u64 *);
+       int (*get_energy_uj) (struct powercap_zone *, u64 *);
+       int (*reset_energy_uj) (struct powercap_zone *);
+       int (*get_max_power_range_uw) (struct powercap_zone *, u64 *);
+       int (*get_power_uw) (struct powercap_zone *, u64 *);
+       int (*set_enable) (struct powercap_zone *, bool mode);
+       int (*get_enable) (struct powercap_zone *, bool *mode);
+       int (*release) (struct powercap_zone *);
+};
+
+#define        POWERCAP_ZONE_MAX_ATTRS         6
+#define        POWERCAP_CONSTRAINTS_ATTRS      8
+#define MAX_CONSTRAINTS_PER_ZONE       10
+/**
+ * struct powercap_zone- Defines instance of a power cap zone
+ * @id:                        Unique id
+ * @name:              Power zone name.
+ * @control_type_inst: Control type instance for this zone.
+ * @ops:               Pointer to the zone operation structure.
+ * @dev:               Instance of a device.
+ * @const_id_cnt:      Number of constraint defined.
+ * @idr:               Instance to an idr entry for children zones.
+ * @parent_idr:                To remove reference from the parent idr.
+ * @private_data:      Private data pointer if any for this zone.
+ * @zone_dev_attrs:    Attributes associated with this device.
+ * @zone_attr_count:   Attribute count.
+ * @dev_zone_attr_group: Attribute group for attributes.
+ * @dev_attr_groups:   Attribute group store to register with device.
+ * @allocated:         This is possible that client owns the memory
+ *                     used by this structure. In this case
+ *                     this flag is set to false by framework to
+ *                     prevent deallocation during release process.
+ *                     Otherwise this flag is set to true.
+ * @constraint_ptr:    List of constraints for this zone.
+ *
+ * This defines a power zone instance. The fields of this structure are
+ * private, and should not be used by client drivers.
+ */
+struct powercap_zone {
+       int id;
+       char *name;
+       void *control_type_inst;
+       const struct powercap_zone_ops *ops;
+       struct device dev;
+       int const_id_cnt;
+       struct idr idr;
+       struct idr *parent_idr;
+       void *private_data;
+       struct attribute **zone_dev_attrs;
+       int zone_attr_count;
+       struct attribute_group dev_zone_attr_group;
+       const struct attribute_group *dev_attr_groups[2]; /* 1 group + NULL */
+       bool allocated;
+       struct powercap_zone_constraint *constraints;
+};
+
+/**
+ * struct powercap_zone_constraint_ops - Define constraint callbacks
+ * @set_power_limit_uw:                Set power limit in micro-watts.
+ * @get_power_limit_uw:                Get power limit in micro-watts.
+ * @set_time_window_us:                Set time window in micro-seconds.
+ * @get_time_window_us:                Get time window in micro-seconds.
+ * @get_max_power_uw:          Get max power allowed in micro-watts.
+ * @get_min_power_uw:          Get min power allowed in micro-watts.
+ * @get_max_time_window_us:    Get max time window allowed in micro-seconds.
+ * @get_min_time_window_us:    Get min time window allowed in micro-seconds.
+ * @get_name:                  Get the name of constraint
+ *
+ * This structure is used to define the constraint callbacks for the client
+ * drivers. The following callbacks are mandatory and can't be NULL:
+ *  set_power_limit_uw
+ *  get_power_limit_uw
+ *  set_time_window_us
+ *  get_time_window_us
+ *  get_name
+ *  Client drivers should handle mutual exclusion, if required in callbacks.
+ */
+struct powercap_zone_constraint_ops {
+       int (*set_power_limit_uw) (struct powercap_zone *, int, u64);
+       int (*get_power_limit_uw) (struct powercap_zone *, int, u64 *);
+       int (*set_time_window_us) (struct powercap_zone *, int, u64);
+       int (*get_time_window_us) (struct powercap_zone *, int, u64 *);
+       int (*get_max_power_uw) (struct powercap_zone *, int, u64 *);
+       int (*get_min_power_uw) (struct powercap_zone *, int, u64 *);
+       int (*get_max_time_window_us) (struct powercap_zone *, int, u64 *);
+       int (*get_min_time_window_us) (struct powercap_zone *, int, u64 *);
+       const char *(*get_name) (struct powercap_zone *, int);
+};
+
+/**
+ * struct powercap_zone_constraint- Defines instance of a constraint
+ * @id:                        Instance Id of this constraint.
+ * @power_zone:                Pointer to the power zone for this constraint.
+ * @ops:               Pointer to the constraint callbacks.
+ *
+ * This defines a constraint instance.
+ */
+struct powercap_zone_constraint {
+       int id;
+       struct powercap_zone *power_zone;
+       struct powercap_zone_constraint_ops *ops;
+};
+
+
+/* For clients to get their device pointer, may be used for dev_dbgs */
+#define POWERCAP_GET_DEV(power_zone)   (&power_zone->dev)
+
+/**
+* powercap_set_zone_data() - Set private data for a zone
+* @power_zone: A pointer to the valid zone instance.
+* @pdata:      A pointer to the user private data.
+*
+* Allows client drivers to associate some private data to zone instance.
+*/
+static inline void powercap_set_zone_data(struct powercap_zone *power_zone,
+                                               void *pdata)
+{
+       if (power_zone)
+               power_zone->private_data = pdata;
+}
+
+/**
+* powercap_get_zone_data() - Get private data for a zone
+* @power_zone: A pointer to the valid zone instance.
+*
+* Allows client drivers to get private data associate with a zone,
+* using call to powercap_set_zone_data.
+*/
+static inline void *powercap_get_zone_data(struct powercap_zone *power_zone)
+{
+       if (power_zone)
+               return power_zone->private_data;
+       return NULL;
+}
+
+/**
+* powercap_register_control_type() - Register a control_type with framework
+* @control_type:       Pointer to client allocated memory for the control type
+*                      structure storage. If this is NULL, powercap framework
+*                      will allocate memory and own it.
+*                      Advantage of this parameter is that client can embed
+*                      this data in its data structures and allocate in a
+*                      single call, preventing multiple allocations.
+* @control_type_name:  The Name of this control_type, which will be shown
+*                      in the sysfs Interface.
+* @ops:                        Callbacks for control type. This parameter is optional.
+*
+* Used to create a control_type with the power capping class. Here control_type
+* can represent a type of technology, which can control a range of power zones.
+* For example a control_type can be RAPL (Running Average Power Limit)
+* Intel® 64 and IA-32 Processor Architectures. The name can be any string
+* which must be unique, otherwise this function returns NULL.
+* A pointer to the control_type instance is returned on success.
+*/
+struct powercap_control_type *powercap_register_control_type(
+                               struct powercap_control_type *control_type,
+                               const char *name,
+                               const struct powercap_control_type_ops *ops);
+
+/**
+* powercap_unregister_control_type() - Unregister a control_type from framework
+* @instance:   A pointer to the valid control_type instance.
+*
+* Used to unregister a control_type with the power capping class.
+* All power zones registered under this control type have to be unregistered
+* before calling this function, or it will fail with an error code.
+*/
+int powercap_unregister_control_type(struct powercap_control_type *instance);
+
+/* Zone register/unregister API */
+
+/**
+* powercap_register_zone() - Register a power zone
+* @power_zone: Pointer to client allocated memory for the power zone structure
+*              storage. If this is NULL, powercap framework will allocate
+*              memory and own it. Advantage of this parameter is that client
+*              can embed this data in its data structures and allocate in a
+*              single call, preventing multiple allocations.
+* @control_type: A control_type instance under which this zone operates.
+* @name:       A name for this zone.
+* @parent:     A pointer to the parent power zone instance if any or NULL
+* @ops:                Pointer to zone operation callback structure.
+* @no_constraints: Number of constraints for this zone
+* @const_ops:  Pointer to constraint callback structure
+*
+* Register a power zone under a given control type. A power zone must register
+* a pointer to a structure representing zone callbacks.
+* A power zone can be located under a parent power zone, in which case @parent
+* should point to it.  Otherwise, if @parent is NULL, the new power zone will
+* be located directly under the given control type
+* For each power zone there may be a number of constraints that appear in the
+* sysfs under that zone as attributes with unique numeric IDs.
+* Returns pointer to the power_zone on success.
+*/
+struct powercap_zone *powercap_register_zone(
+                       struct powercap_zone *power_zone,
+                       struct powercap_control_type *control_type,
+                       const char *name,
+                       struct powercap_zone *parent,
+                       const struct powercap_zone_ops *ops,
+                       int nr_constraints,
+                       struct powercap_zone_constraint_ops *const_ops);
+
+/**
+* powercap_unregister_zone() - Unregister a zone device
+* @control_type:       A pointer to the valid instance of a control_type.
+* @power_zone: A pointer to the valid zone instance for a control_type
+*
+* Used to unregister a zone device for a control_type.  Caller should
+* make sure that children for this zone are unregistered first.
+*/
+int powercap_unregister_zone(struct powercap_control_type *control_type,
+                               struct powercap_zone *power_zone);
+
+#endif
index 3b9377d6b7a5fd63b13d02fc238d7da99fbef026..6312dd9ba449b4d65f5bb6bcdc01d606fc2fdfa8 100644 (file)
@@ -17,6 +17,7 @@ extern void add_interrupt_randomness(int irq, int irq_flags);
 extern void get_random_bytes(void *buf, int nbytes);
 extern void get_random_bytes_arch(void *buf, int nbytes);
 void generate_random_uuid(unsigned char uuid_out[16]);
+extern int random_int_secret_init(void);
 
 #ifndef MODULE
 extern const struct file_operations random_fops, urandom_fops;
index a10380bfbeac9b4c4d018c99a5c914d672833f04..dc90b8c134a12a9d5b87755a3e27fcfa7b01e0c8 100644 (file)
@@ -374,6 +374,7 @@ int regmap_reinit_cache(struct regmap *map,
                        const struct regmap_config *config);
 struct regmap *dev_get_regmap(struct device *dev, const char *name);
 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val);
+int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val);
 int regmap_raw_write(struct regmap *map, unsigned int reg,
                     const void *val, size_t val_len);
 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
@@ -387,9 +388,14 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
                     size_t val_count);
 int regmap_update_bits(struct regmap *map, unsigned int reg,
                       unsigned int mask, unsigned int val);
+int regmap_update_bits_async(struct regmap *map, unsigned int reg,
+                            unsigned int mask, unsigned int val);
 int regmap_update_bits_check(struct regmap *map, unsigned int reg,
                             unsigned int mask, unsigned int val,
                             bool *change);
+int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
+                                  unsigned int mask, unsigned int val,
+                                  bool *change);
 int regmap_get_val_bytes(struct regmap *map);
 int regmap_async_complete(struct regmap *map);
 bool regmap_can_raw_write(struct regmap *map);
@@ -425,11 +431,15 @@ bool regmap_reg_in_ranges(unsigned int reg,
  * @reg: Offset of the register within the regmap bank
  * @lsb: lsb of the register field.
  * @reg: msb of the register field.
+ * @id_size: port size if it has some ports
+ * @id_offset: address offset for each ports
  */
 struct reg_field {
        unsigned int reg;
        unsigned int lsb;
        unsigned int msb;
+       unsigned int id_size;
+       unsigned int id_offset;
 };
 
 #define REG_FIELD(_reg, _lsb, _msb) {          \
@@ -448,6 +458,15 @@ void devm_regmap_field_free(struct device *dev,    struct regmap_field *field);
 
 int regmap_field_read(struct regmap_field *field, unsigned int *val);
 int regmap_field_write(struct regmap_field *field, unsigned int val);
+int regmap_field_update_bits(struct regmap_field *field,
+                            unsigned int mask, unsigned int val);
+
+int regmap_fields_write(struct regmap_field *field, unsigned int id,
+                       unsigned int val);
+int regmap_fields_read(struct regmap_field *field, unsigned int id,
+                      unsigned int *val);
+int regmap_fields_update_bits(struct regmap_field *field,  unsigned int id,
+                             unsigned int mask, unsigned int val);
 
 /**
  * Description of an IRQ for the generic regmap irq_chip.
@@ -527,6 +546,13 @@ static inline int regmap_write(struct regmap *map, unsigned int reg,
        return -EINVAL;
 }
 
+static inline int regmap_write_async(struct regmap *map, unsigned int reg,
+                                    unsigned int val)
+{
+       WARN_ONCE(1, "regmap API is disabled");
+       return -EINVAL;
+}
+
 static inline int regmap_raw_write(struct regmap *map, unsigned int reg,
                                   const void *val, size_t val_len)
 {
@@ -576,6 +602,14 @@ static inline int regmap_update_bits(struct regmap *map, unsigned int reg,
        return -EINVAL;
 }
 
+static inline int regmap_update_bits_async(struct regmap *map,
+                                          unsigned int reg,
+                                          unsigned int mask, unsigned int val)
+{
+       WARN_ONCE(1, "regmap API is disabled");
+       return -EINVAL;
+}
+
 static inline int regmap_update_bits_check(struct regmap *map,
                                           unsigned int reg,
                                           unsigned int mask, unsigned int val,
@@ -585,6 +619,16 @@ static inline int regmap_update_bits_check(struct regmap *map,
        return -EINVAL;
 }
 
+static inline int regmap_update_bits_check_async(struct regmap *map,
+                                                unsigned int reg,
+                                                unsigned int mask,
+                                                unsigned int val,
+                                                bool *change)
+{
+       WARN_ONCE(1, "regmap API is disabled");
+       return -EINVAL;
+}
+
 static inline int regmap_get_val_bytes(struct regmap *map)
 {
        WARN_ONCE(1, "regmap API is disabled");
index 6682da36b293cfad598a0c5803e9e32038f72aa3..e27baeeda3f470ed99ae899d8de22da062856bf8 100644 (file)
@@ -1394,11 +1394,10 @@ struct task_struct {
        } memcg_batch;
        unsigned int memcg_kmem_skip_account;
        struct memcg_oom_info {
+               struct mem_cgroup *memcg;
+               gfp_t gfp_mask;
+               int order;
                unsigned int may_oom:1;
-               unsigned int in_memcg_oom:1;
-               unsigned int oom_locked:1;
-               int wakeups;
-               struct mem_cgroup *wait_on_memcg;
        } memcg_oom;
 #endif
 #ifdef CONFIG_UPROBES
index d34049712a4d7cee24958816840ca1c843e0541f..3dbdf7e53dcc0942153be0a59cdeebd73caa0973 100644 (file)
@@ -5,18 +5,22 @@
 #include <linux/sh_dma.h>
 
 /*
- * Generic header for SuperH (H)SCI(F) (used by sh/sh64/h8300 and related parts)
+ * Generic header for SuperH (H)SCI(F) (used by sh/sh64 and related parts)
  */
 
 #define SCIx_NOT_SUPPORTED     (-1)
 
 enum {
+       SCBRR_ALGO_INVALID,
+
        SCBRR_ALGO_1,           /* ((clk + 16 * bps) / (16 * bps) - 1) */
        SCBRR_ALGO_2,           /* ((clk + 16 * bps) / (32 * bps) - 1) */
        SCBRR_ALGO_3,           /* (((clk * 2) + 16 * bps) / (16 * bps) - 1) */
        SCBRR_ALGO_4,           /* (((clk * 2) + 16 * bps) / (32 * bps) - 1) */
        SCBRR_ALGO_5,           /* (((clk * 1000 / 32) / bps) - 1) */
        SCBRR_ALGO_6,           /* HSCIF variable sample rate algorithm */
+
+       SCBRR_NR_ALGOS,
 };
 
 #define SCSCR_TIE      (1 << 7)
index c2d89335f6370d6e72a06470290a2a6e8a4cac7b..2c154976394b8a50baec6b4f17486dfb32fdac28 100644 (file)
@@ -318,9 +318,13 @@ enum {
 
        SKB_GSO_GRE = 1 << 6,
 
-       SKB_GSO_UDP_TUNNEL = 1 << 7,
+       SKB_GSO_IPIP = 1 << 7,
 
-       SKB_GSO_MPLS = 1 << 8,
+       SKB_GSO_SIT = 1 << 8,
+
+       SKB_GSO_UDP_TUNNEL = 1 << 9,
+
+       SKB_GSO_MPLS = 1 << 10,
 };
 
 #if BITS_PER_LONG > 32
@@ -585,8 +589,8 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
        skb->_skb_refdst = (unsigned long)dst;
 }
 
-extern void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
-                               bool force);
+void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
+                        bool force);
 
 /**
  * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
@@ -634,20 +638,20 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb)
        return (struct rtable *)skb_dst(skb);
 }
 
-extern void kfree_skb(struct sk_buff *skb);
-extern void kfree_skb_list(struct sk_buff *segs);
-extern void skb_tx_error(struct sk_buff *skb);
-extern void consume_skb(struct sk_buff *skb);
-extern void           __kfree_skb(struct sk_buff *skb);
+void kfree_skb(struct sk_buff *skb);
+void kfree_skb_list(struct sk_buff *segs);
+void skb_tx_error(struct sk_buff *skb);
+void consume_skb(struct sk_buff *skb);
+void  __kfree_skb(struct sk_buff *skb);
 extern struct kmem_cache *skbuff_head_cache;
 
-extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
-extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
-                            bool *fragstolen, int *delta_truesize);
+void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
+bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
+                     bool *fragstolen, int *delta_truesize);
 
-extern struct sk_buff *__alloc_skb(unsigned int size,
-                                  gfp_t priority, int flags, int node);
-extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
+struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
+                           int node);
+struct sk_buff *build_skb(void *data, unsigned int frag_size);
 static inline struct sk_buff *alloc_skb(unsigned int size,
                                        gfp_t priority)
 {
@@ -660,41 +664,33 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
        return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
 }
 
-extern struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
+struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
 static inline struct sk_buff *alloc_skb_head(gfp_t priority)
 {
        return __alloc_skb_head(priority, -1);
 }
 
-extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
-extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
-extern struct sk_buff *skb_clone(struct sk_buff *skb,
-                                gfp_t priority);
-extern struct sk_buff *skb_copy(const struct sk_buff *skb,
-                               gfp_t priority);
-extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
-                                int headroom, gfp_t gfp_mask);
-
-extern int            pskb_expand_head(struct sk_buff *skb,
-                                       int nhead, int ntail,
-                                       gfp_t gfp_mask);
-extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
-                                           unsigned int headroom);
-extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
-                                      int newheadroom, int newtailroom,
-                                      gfp_t priority);
-extern int            skb_to_sgvec(struct sk_buff *skb,
-                                   struct scatterlist *sg, int offset,
-                                   int len);
-extern int            skb_cow_data(struct sk_buff *skb, int tailbits,
-                                   struct sk_buff **trailer);
-extern int            skb_pad(struct sk_buff *skb, int pad);
+struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
+int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
+struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
+struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t priority);
+struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask);
+
+int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask);
+struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
+                                    unsigned int headroom);
+struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
+                               int newtailroom, gfp_t priority);
+int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
+                int len);
+int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
+int skb_pad(struct sk_buff *skb, int pad);
 #define dev_kfree_skb(a)       consume_skb(a)
 
-extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
-                       int getfrag(void *from, char *to, int offset,
-                       int len,int odd, struct sk_buff *skb),
-                       void *from, int length);
+int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
+                           int getfrag(void *from, char *to, int offset,
+                                       int len, int odd, struct sk_buff *skb),
+                           void *from, int length);
 
 struct skb_seq_state {
        __u32           lower_offset;
@@ -706,18 +702,17 @@ struct skb_seq_state {
        __u8            *frag_data;
 };
 
-extern void          skb_prepare_seq_read(struct sk_buff *skb,
-                                          unsigned int from, unsigned int to,
-                                          struct skb_seq_state *st);
-extern unsigned int   skb_seq_read(unsigned int consumed, const u8 **data,
-                                  struct skb_seq_state *st);
-extern void          skb_abort_seq_read(struct skb_seq_state *st);
+void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
+                         unsigned int to, struct skb_seq_state *st);
+unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
+                         struct skb_seq_state *st);
+void skb_abort_seq_read(struct skb_seq_state *st);
 
-extern unsigned int   skb_find_text(struct sk_buff *skb, unsigned int from,
-                                   unsigned int to, struct ts_config *config,
-                                   struct ts_state *state);
+unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
+                          unsigned int to, struct ts_config *config,
+                          struct ts_state *state);
 
-extern void __skb_get_rxhash(struct sk_buff *skb);
+void __skb_get_rxhash(struct sk_buff *skb);
 static inline __u32 skb_get_rxhash(struct sk_buff *skb)
 {
        if (!skb->l4_rxhash)
@@ -1095,7 +1090,8 @@ static inline void skb_queue_head_init_class(struct sk_buff_head *list,
  *     The "__skb_xxxx()" functions are the non-atomic ones that
  *     can only be called with interrupts disabled.
  */
-extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
+void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
+               struct sk_buff_head *list);
 static inline void __skb_insert(struct sk_buff *newsk,
                                struct sk_buff *prev, struct sk_buff *next,
                                struct sk_buff_head *list)
@@ -1201,8 +1197,8 @@ static inline void __skb_queue_after(struct sk_buff_head *list,
        __skb_insert(newsk, prev, prev->next, list);
 }
 
-extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
-                      struct sk_buff_head *list);
+void skb_append(struct sk_buff *old, struct sk_buff *newsk,
+               struct sk_buff_head *list);
 
 static inline void __skb_queue_before(struct sk_buff_head *list,
                                      struct sk_buff *next,
@@ -1221,7 +1217,7 @@ static inline void __skb_queue_before(struct sk_buff_head *list,
  *
  *     A buffer cannot be placed on two lists at the same time.
  */
-extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
+void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
 static inline void __skb_queue_head(struct sk_buff_head *list,
                                    struct sk_buff *newsk)
 {
@@ -1238,7 +1234,7 @@ static inline void __skb_queue_head(struct sk_buff_head *list,
  *
  *     A buffer cannot be placed on two lists at the same time.
  */
-extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
+void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
 static inline void __skb_queue_tail(struct sk_buff_head *list,
                                   struct sk_buff *newsk)
 {
@@ -1249,7 +1245,7 @@ static inline void __skb_queue_tail(struct sk_buff_head *list,
  * remove sk_buff from list. _Must_ be called atomically, and with
  * the list known..
  */
-extern void       skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
+void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
 {
        struct sk_buff *next, *prev;
@@ -1270,7 +1266,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
  *     so must be used with appropriate locks held only. The head item is
  *     returned or %NULL if the list is empty.
  */
-extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
+struct sk_buff *skb_dequeue(struct sk_buff_head *list);
 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
 {
        struct sk_buff *skb = skb_peek(list);
@@ -1287,7 +1283,7 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
  *     so must be used with appropriate locks held only. The tail item is
  *     returned or %NULL if the list is empty.
  */
-extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
+struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
 {
        struct sk_buff *skb = skb_peek_tail(list);
@@ -1373,8 +1369,8 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
        skb_shinfo(skb)->nr_frags = i + 1;
 }
 
-extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
-                           int off, int size, unsigned int truesize);
+void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
+                    int size, unsigned int truesize);
 
 #define SKB_PAGE_ASSERT(skb)   BUG_ON(skb_shinfo(skb)->nr_frags)
 #define SKB_FRAG_ASSERT(skb)   BUG_ON(skb_has_frag_list(skb))
@@ -1418,7 +1414,7 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
 /*
  *     Add data to an sk_buff
  */
-extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
+unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
 {
        unsigned char *tmp = skb_tail_pointer(skb);
@@ -1428,7 +1424,7 @@ static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
        return tmp;
 }
 
-extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
+unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
 {
        skb->data -= len;
@@ -1436,7 +1432,7 @@ static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
        return skb->data;
 }
 
-extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
+unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
 {
        skb->len -= len;
@@ -1449,7 +1445,7 @@ static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int l
        return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
 }
 
-extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
+unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
 
 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
 {
@@ -1753,7 +1749,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
 #define NET_SKB_PAD    max(32, L1_CACHE_BYTES)
 #endif
 
-extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
+int ___pskb_trim(struct sk_buff *skb, unsigned int len);
 
 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
 {
@@ -1765,7 +1761,7 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
        skb_set_tail_pointer(skb, len);
 }
 
-extern void skb_trim(struct sk_buff *skb, unsigned int len);
+void skb_trim(struct sk_buff *skb, unsigned int len);
 
 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
 {
@@ -1838,7 +1834,7 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
  *     the list and one reference dropped. This function does not take the
  *     list lock and the caller must hold the relevant locks to use it.
  */
-extern void skb_queue_purge(struct sk_buff_head *list);
+void skb_queue_purge(struct sk_buff_head *list);
 static inline void __skb_queue_purge(struct sk_buff_head *list)
 {
        struct sk_buff *skb;
@@ -1850,11 +1846,10 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
 #define NETDEV_FRAG_PAGE_MAX_SIZE  (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
 #define NETDEV_PAGECNT_MAX_BIAS           NETDEV_FRAG_PAGE_MAX_SIZE
 
-extern void *netdev_alloc_frag(unsigned int fragsz);
+void *netdev_alloc_frag(unsigned int fragsz);
 
-extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
-                                         unsigned int length,
-                                         gfp_t gfp_mask);
+struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
+                                  gfp_t gfp_mask);
 
 /**
  *     netdev_alloc_skb - allocate an skbuff for rx on a specific device
@@ -2071,6 +2066,8 @@ static inline void skb_frag_set_page(struct sk_buff *skb, int f,
        __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
 }
 
+bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio);
+
 /**
  * skb_frag_dma_map - maps a paged fragment via the DMA API
  * @dev: the device to map the fragment to
@@ -2342,60 +2339,42 @@ static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
 #define skb_walk_frags(skb, iter)      \
        for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
 
-extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
-                                          int *peeked, int *off, int *err);
-extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
-                                        int noblock, int *err);
-extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
-                                    struct poll_table_struct *wait);
-extern int            skb_copy_datagram_iovec(const struct sk_buff *from,
-                                              int offset, struct iovec *to,
-                                              int size);
-extern int            skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
-                                                       int hlen,
-                                                       struct iovec *iov);
-extern int            skb_copy_datagram_from_iovec(struct sk_buff *skb,
-                                                   int offset,
-                                                   const struct iovec *from,
-                                                   int from_offset,
-                                                   int len);
-extern int            zerocopy_sg_from_iovec(struct sk_buff *skb,
-                                             const struct iovec *frm,
-                                             int offset,
-                                             size_t count);
-extern int            skb_copy_datagram_const_iovec(const struct sk_buff *from,
-                                                    int offset,
-                                                    const struct iovec *to,
-                                                    int to_offset,
-                                                    int size);
-extern void           skb_free_datagram(struct sock *sk, struct sk_buff *skb);
-extern void           skb_free_datagram_locked(struct sock *sk,
-                                               struct sk_buff *skb);
-extern int            skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
-                                        unsigned int flags);
-extern __wsum         skb_checksum(const struct sk_buff *skb, int offset,
-                                   int len, __wsum csum);
-extern int            skb_copy_bits(const struct sk_buff *skb, int offset,
-                                    void *to, int len);
-extern int            skb_store_bits(struct sk_buff *skb, int offset,
-                                     const void *from, int len);
-extern __wsum         skb_copy_and_csum_bits(const struct sk_buff *skb,
-                                             int offset, u8 *to, int len,
-                                             __wsum csum);
-extern int             skb_splice_bits(struct sk_buff *skb,
-                                               unsigned int offset,
-                                               struct pipe_inode_info *pipe,
-                                               unsigned int len,
-                                               unsigned int flags);
-extern void           skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
-extern void           skb_split(struct sk_buff *skb,
-                                struct sk_buff *skb1, const u32 len);
-extern int            skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
-                                int shiftlen);
-extern void           skb_scrub_packet(struct sk_buff *skb, bool xnet);
-
-extern struct sk_buff *skb_segment(struct sk_buff *skb,
-                                  netdev_features_t features);
+struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
+                                   int *peeked, int *off, int *err);
+struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
+                                 int *err);
+unsigned int datagram_poll(struct file *file, struct socket *sock,
+                          struct poll_table_struct *wait);
+int skb_copy_datagram_iovec(const struct sk_buff *from, int offset,
+                           struct iovec *to, int size);
+int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen,
+                                    struct iovec *iov);
+int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
+                                const struct iovec *from, int from_offset,
+                                int len);
+int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm,
+                          int offset, size_t count);
+int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset,
+                                 const struct iovec *to, int to_offset,
+                                 int size);
+void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
+void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
+int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
+__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
+                   __wsum csum);
+int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
+int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len);
+__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to,
+                             int len, __wsum csum);
+int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
+                   struct pipe_inode_info *pipe, unsigned int len,
+                   unsigned int flags);
+void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
+void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
+int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
+void skb_scrub_packet(struct sk_buff *skb, bool xnet);
+
+struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
 
 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
                                       int len, void *buffer)
@@ -2440,7 +2419,7 @@ static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
        memcpy(skb->data + offset, from, len);
 }
 
-extern void skb_init(void);
+void skb_init(void);
 
 static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
 {
@@ -2483,12 +2462,12 @@ static inline ktime_t net_invalid_timestamp(void)
        return ktime_set(0, 0);
 }
 
-extern void skb_timestamping_init(void);
+void skb_timestamping_init(void);
 
 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
 
-extern void skb_clone_tx_timestamp(struct sk_buff *skb);
-extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
+void skb_clone_tx_timestamp(struct sk_buff *skb);
+bool skb_defer_rx_timestamp(struct sk_buff *skb);
 
 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
 
@@ -2529,8 +2508,8 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
  * generates a software time stamp (otherwise), then queues the clone
  * to the error queue of the socket.  Errors are silently ignored.
  */
-extern void skb_tstamp_tx(struct sk_buff *orig_skb,
-                       struct skb_shared_hwtstamps *hwtstamps);
+void skb_tstamp_tx(struct sk_buff *orig_skb,
+                  struct skb_shared_hwtstamps *hwtstamps);
 
 static inline void sw_tx_timestamp(struct sk_buff *skb)
 {
@@ -2562,8 +2541,8 @@ static inline void skb_tx_timestamp(struct sk_buff *skb)
  */
 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
 
-extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
-extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
+__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
+__sum16 __skb_checksum_complete(struct sk_buff *skb);
 
 static inline int skb_csum_unnecessary(const struct sk_buff *skb)
 {
@@ -2593,7 +2572,7 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
 }
 
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
+void nf_conntrack_destroy(struct nf_conntrack *nfct);
 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
 {
        if (nfct && atomic_dec_and_test(&nfct->use))
@@ -2732,28 +2711,27 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
        return skb->queue_mapping != 0;
 }
 
-extern u16 __skb_tx_hash(const struct net_device *dev,
-                        const struct sk_buff *skb,
-                        unsigned int num_tx_queues);
+u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
+                 unsigned int num_tx_queues);
 
-#ifdef CONFIG_XFRM
 static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
 {
+#ifdef CONFIG_XFRM
        return skb->sp;
-}
 #else
-static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
-{
        return NULL;
-}
 #endif
+}
 
 /* Keeps track of mac header offset relative to skb->head.
  * It is useful for TSO of Tunneling protocol. e.g. GRE.
  * For non-tunnel skb it points to skb_mac_header() and for
- * tunnel skb it points to outer mac header. */
+ * tunnel skb it points to outer mac header.
+ * Keeps track of level of encapsulation of network headers.
+ */
 struct skb_gso_cb {
-       int mac_offset;
+       int     mac_offset;
+       int     encap_level;
 };
 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
 
@@ -2783,12 +2761,13 @@ static inline bool skb_is_gso(const struct sk_buff *skb)
        return skb_shinfo(skb)->gso_size;
 }
 
+/* Note: Should be called only if skb_is_gso(skb) is true */
 static inline bool skb_is_gso_v6(const struct sk_buff *skb)
 {
        return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
 }
 
-extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
+void __skb_warn_lro_forwarding(const struct sk_buff *skb);
 
 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
 {
index 86a12b0cb239850d903e53b52091ad53fd179115..0688472500bbabb1274a55c1ab0720d39b10922b 100644 (file)
@@ -108,6 +108,16 @@ static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
        return 0;
 }
 
+/* Get the device phy address */
+static inline int ssb_gige_get_phyaddr(struct pci_dev *pdev)
+{
+       struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
+       if (!dev)
+               return -ENODEV;
+
+       return dev->dev->bus->sprom.et0phyaddr;
+}
+
 extern int ssb_gige_pcibios_plat_dev_init(struct ssb_device *sdev,
                                          struct pci_dev *pdev);
 extern int ssb_gige_map_irq(struct ssb_device *sdev,
@@ -174,6 +184,10 @@ static inline int ssb_gige_get_macaddr(struct pci_dev *pdev, u8 *macaddr)
 {
        return -ENODEV;
 }
+static inline int ssb_gige_get_phyaddr(struct pci_dev *pdev)
+{
+       return -ENODEV;
+}
 
 #endif /* CONFIG_SSB_DRIVER_GIGE */
 #endif /* LINUX_SSB_DRIVER_GIGE_H_ */
index 6740801aa71ab519c59bd21a7e50ee5793cc242a..943ee895f2d1d02b9cda88875f3765af30c917a6 100644 (file)
@@ -49,6 +49,7 @@ struct rpc_clnt {
 
        unsigned int            cl_softrtry : 1,/* soft timeouts */
                                cl_discrtry : 1,/* disconnect before retry */
+                               cl_noretranstimeo: 1,/* No retransmit timeouts */
                                cl_autobind : 1,/* use getport() */
                                cl_chatty   : 1;/* be verbose */
 
@@ -126,6 +127,7 @@ struct rpc_create_args {
 #define RPC_CLNT_CREATE_QUIET          (1UL << 6)
 #define RPC_CLNT_CREATE_INFINITE_SLOTS (1UL << 7)
 #define RPC_CLNT_CREATE_NO_IDLE_TIMEOUT        (1UL << 8)
+#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT     (1UL << 9)
 
 struct rpc_clnt *rpc_create(struct rpc_create_args *args);
 struct rpc_clnt        *rpc_bind_new_program(struct rpc_clnt *,
index 096ee58be11a83f2fc05107639a2273cc0c677e6..3a847de83fabd5757ddcee00910a6ab991f79a98 100644 (file)
@@ -122,6 +122,7 @@ struct rpc_task_setup {
 #define RPC_TASK_SENT          0x0800          /* message was sent */
 #define RPC_TASK_TIMEOUT       0x1000          /* fail with ETIMEDOUT on timeout */
 #define RPC_TASK_NOCONNECT     0x2000          /* return ENOTCONN if not connected */
+#define RPC_TASK_NO_RETRANS_TIMEOUT    0x4000          /* wait forever for a reply */
 
 #define RPC_IS_ASYNC(t)                ((t)->tk_flags & RPC_TASK_ASYNC)
 #define RPC_IS_SWAPPER(t)      ((t)->tk_flags & RPC_TASK_SWAPPER)
index cec7b9b5e1bfb6591ad10997485b05ddc7421120..8097b9df677326717517f256b66dbaa17e27df93 100644 (file)
@@ -288,7 +288,7 @@ int                 xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
 int                    xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
 void                   xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
 void                   xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
-int                    xprt_prepare_transmit(struct rpc_task *task);
+bool                   xprt_prepare_transmit(struct rpc_task *task);
 void                   xprt_transmit(struct rpc_task *task);
 void                   xprt_end_transmit(struct rpc_task *task);
 int                    xprt_adjust_timeout(struct rpc_rqst *req);
diff --git a/include/linux/thinkpad_acpi.h b/include/linux/thinkpad_acpi.h
new file mode 100644 (file)
index 0000000..361de59
--- /dev/null
@@ -0,0 +1,15 @@
+#ifndef __THINKPAD_ACPI_H__
+#define __THINKPAD_ACPI_H__
+
+/* These two functions return 0 if success, or negative error code
+   (e g -ENODEV if no led present) */
+
+enum {
+       TPACPI_LED_MUTE,
+       TPACPI_LED_MICMUTE,
+       TPACPI_LED_MAX,
+};
+
+int tpacpi_led_set(int whichled, bool on);
+
+#endif
index dd3edd7dfc94dc73de9389e55ed7f35bdcadfcbd..9d3f1a5b6178a9dd1aa3914b35051b96f5ceb279 100644 (file)
 
 #include <asm/timex.h>
 
+#ifndef random_get_entropy
+/*
+ * The random_get_entropy() function is used by the /dev/random driver
+ * in order to extract entropy via the relative unpredictability of
+ * when an interrupt takes places versus a high speed, fine-grained
+ * timing source or cycle counter.  Since it will be occurred on every
+ * single interrupt, it must have a very low cost/overhead.
+ *
+ * By default we use get_cycles() for this purpose, but individual
+ * architectures may override this in their asm/timex.h header file.
+ */
+#define random_get_entropy()   get_cycles()
+#endif
+
 /*
  * SHIFT_PLL is used as a dampening factor to define how much we
  * adjust the frequency correction for a given offset in PLL mode.
index f9a7e7bc925be61322a24304076503bd918a520a..11d85b9c1b081af6a9f4f30ac3c1458766ed4238 100644 (file)
@@ -12,7 +12,7 @@ struct usb_phy_gen_xceiv_platform_data {
        unsigned int needs_reset:1;
 };
 
-#if IS_ENABLED(CONFIG_NOP_USB_XCEIV)
+#if defined(CONFIG_NOP_USB_XCEIV) || (defined(CONFIG_NOP_USB_XCEIV_MODULE) && defined(MODULE))
 /* sometimes transceivers are accessed only through e.g. ULPI */
 extern void usb_nop_xceiv_register(void);
 extern void usb_nop_xceiv_unregister(void);
index bf99cd01be206ebeb170d826e176c3ac22e1493e..630356866030d88a355390a932105ff9c86b98bb 100644 (file)
@@ -66,7 +66,9 @@
        US_FLAG(INITIAL_READ10, 0x00100000)                     \
                /* Initial READ(10) (and others) must be retried */     \
        US_FLAG(WRITE_CACHE,    0x00200000)                     \
-               /* Write Cache status is not available */
+               /* Write Cache status is not available */       \
+       US_FLAG(NEEDS_CAP16,    0x00400000)
+               /* cannot handle READ_CAPACITY_10 */
 
 #define US_FLAG(name, value)   US_FL_##name = value ,
 enum { US_DO_ALL_FLAGS };
index 80cf8173a65b1491bd7e1da05978024acd7889b1..2c02f3a8d2ba3f4ba079a51f537be25742b17162 100644 (file)
@@ -65,15 +65,8 @@ struct pci_dev;
  *     out of the arbitration process (and can be safe to take
  *     interrupts at any time.
  */
-#if defined(CONFIG_VGA_ARB)
 extern void vga_set_legacy_decoding(struct pci_dev *pdev,
                                    unsigned int decodes);
-#else
-static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
-                                          unsigned int decodes)
-{
-}
-#endif
 
 /**
  *     vga_get         - acquire & locks VGA resources
index 29b9104232b40c038bf29969d9c63795e8575fce..e8f8f71e843c96a1a8b7473e3a0f0b3b0334a458 100644 (file)
@@ -96,33 +96,6 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
        return test_bit(fbit, vdev->features);
 }
 
-/**
- * virtio_config_val - look for a feature and get a virtio config entry.
- * @vdev: the virtio device
- * @fbit: the feature bit
- * @offset: the type to search for.
- * @v: a pointer to the value to fill in.
- *
- * The return value is -ENOENT if the feature doesn't exist.  Otherwise
- * the config value is copied into whatever is pointed to by v. */
-#define virtio_config_val(vdev, fbit, offset, v) \
-       virtio_config_buf((vdev), (fbit), (offset), (v), sizeof(*v))
-
-#define virtio_config_val_len(vdev, fbit, offset, v, len) \
-       virtio_config_buf((vdev), (fbit), (offset), (v), (len))
-
-static inline int virtio_config_buf(struct virtio_device *vdev,
-                                   unsigned int fbit,
-                                   unsigned int offset,
-                                   void *buf, unsigned len)
-{
-       if (!virtio_has_feature(vdev, fbit))
-               return -ENOENT;
-
-       vdev->config->get(vdev, offset, buf, len);
-       return 0;
-}
-
 static inline
 struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
                                        vq_callback_t *c, const char *n)
@@ -162,5 +135,139 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
        return 0;
 }
 
+/* Config space accessors. */
+#define virtio_cread(vdev, structname, member, ptr)                    \
+       do {                                                            \
+               /* Must match the member's type, and be integer */      \
+               if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
+                       (*ptr) = 1;                                     \
+                                                                       \
+               switch (sizeof(*ptr)) {                                 \
+               case 1:                                                 \
+                       *(ptr) = virtio_cread8(vdev,                    \
+                                              offsetof(structname, member)); \
+                       break;                                          \
+               case 2:                                                 \
+                       *(ptr) = virtio_cread16(vdev,                   \
+                                               offsetof(structname, member)); \
+                       break;                                          \
+               case 4:                                                 \
+                       *(ptr) = virtio_cread32(vdev,                   \
+                                               offsetof(structname, member)); \
+                       break;                                          \
+               case 8:                                                 \
+                       *(ptr) = virtio_cread64(vdev,                   \
+                                               offsetof(structname, member)); \
+                       break;                                          \
+               default:                                                \
+                       BUG();                                          \
+               }                                                       \
+       } while(0)
+
+/* Config space accessors. */
+#define virtio_cwrite(vdev, structname, member, ptr)                   \
+       do {                                                            \
+               /* Must match the member's type, and be integer */      \
+               if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \
+                       BUG_ON((*ptr) == 1);                            \
+                                                                       \
+               switch (sizeof(*ptr)) {                                 \
+               case 1:                                                 \
+                       virtio_cwrite8(vdev,                            \
+                                      offsetof(structname, member),    \
+                                      *(ptr));                         \
+                       break;                                          \
+               case 2:                                                 \
+                       virtio_cwrite16(vdev,                           \
+                                       offsetof(structname, member),   \
+                                       *(ptr));                        \
+                       break;                                          \
+               case 4:                                                 \
+                       virtio_cwrite32(vdev,                           \
+                                       offsetof(structname, member),   \
+                                       *(ptr));                        \
+                       break;                                          \
+               case 8:                                                 \
+                       virtio_cwrite64(vdev,                           \
+                                       offsetof(structname, member),   \
+                                       *(ptr));                        \
+                       break;                                          \
+               default:                                                \
+                       BUG();                                          \
+               }                                                       \
+       } while(0)
+
+static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
+{
+       u8 ret;
+       vdev->config->get(vdev, offset, &ret, sizeof(ret));
+       return ret;
+}
+
+static inline void virtio_cread_bytes(struct virtio_device *vdev,
+                                     unsigned int offset,
+                                     void *buf, size_t len)
+{
+       vdev->config->get(vdev, offset, buf, len);
+}
+
+static inline void virtio_cwrite8(struct virtio_device *vdev,
+                                 unsigned int offset, u8 val)
+{
+       vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+static inline u16 virtio_cread16(struct virtio_device *vdev,
+                                unsigned int offset)
+{
+       u16 ret;
+       vdev->config->get(vdev, offset, &ret, sizeof(ret));
+       return ret;
+}
+
+static inline void virtio_cwrite16(struct virtio_device *vdev,
+                                  unsigned int offset, u16 val)
+{
+       vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+static inline u32 virtio_cread32(struct virtio_device *vdev,
+                                unsigned int offset)
+{
+       u32 ret;
+       vdev->config->get(vdev, offset, &ret, sizeof(ret));
+       return ret;
+}
+
+static inline void virtio_cwrite32(struct virtio_device *vdev,
+                                  unsigned int offset, u32 val)
+{
+       vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+static inline u64 virtio_cread64(struct virtio_device *vdev,
+                                unsigned int offset)
+{
+       u64 ret;
+       vdev->config->get(vdev, offset, &ret, sizeof(ret));
+       return ret;
+}
+
+static inline void virtio_cwrite64(struct virtio_device *vdev,
+                                  unsigned int offset, u64 val)
+{
+       vdev->config->set(vdev, offset, &val, sizeof(val));
+}
+
+/* Conditional config space accessors. */
+#define virtio_cread_feature(vdev, fbit, structname, member, ptr)      \
+       ({                                                              \
+               int _r = 0;                                             \
+               if (!virtio_has_feature(vdev, fbit))                    \
+                       _r = -ENOENT;                                   \
+               else                                                    \
+                       virtio_cread((vdev), structname, member, ptr);  \
+               _r;                                                     \
+       })
 
 #endif /* _LINUX_VIRTIO_CONFIG_H */
index 7fe28228b2742efc0c9eb69c7bbc866f0194ef19..512cdc2fb80f40f99bb1b09fe1d0d4dcfeab345c 100644 (file)
@@ -77,6 +77,6 @@ struct yamdrv_ioctl_cfg {
 
 struct yamdrv_ioctl_mcs {
        int cmd;
-       int bitrate;
+       unsigned int bitrate;
        unsigned char bits[YAM_FPGA_SIZE];
 };
index 16550c4390081a03a8f2d4c8d3fdc51de54cc006..a707529841e27d92e2bfe1abfda2b5f0b81b1560 100644 (file)
@@ -35,7 +35,7 @@
        printk(level "%s %d-%04x: " fmt, name, i2c_adapter_id(adapter), addr , ## arg)
 
 #define v4l_client_printk(level, client, fmt, arg...)                      \
-       v4l_printk(level, (client)->driver->driver.name, (client)->adapter, \
+       v4l_printk(level, (client)->dev.driver->name, (client)->adapter, \
                   (client)->addr, fmt , ## arg)
 
 #define v4l_err(client, fmt, arg...) \
index 6781258d0b677f4a7092b4d29d0c1c7094e8a8a0..bd8218b15009a810af8abd5df5a4bcad31dfdb20 100644 (file)
@@ -391,7 +391,7 @@ unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
 unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait);
 size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
                loff_t *ppos, int nonblock);
-size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count,
+size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
                loff_t *ppos, int nonblock);
 
 /**
@@ -491,7 +491,7 @@ int vb2_ioctl_expbuf(struct file *file, void *priv,
 
 int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma);
 int vb2_fop_release(struct file *file);
-ssize_t vb2_fop_write(struct file *file, char __user *buf,
+ssize_t vb2_fop_write(struct file *file, const char __user *buf,
                size_t count, loff_t *ppos);
 ssize_t vb2_fop_read(struct file *file, char __user *buf,
                size_t count, loff_t *ppos);
index 0038526b8ef7c0495bf12784a6c3c7a855c0b144..7b89852779af77b0f156f635f7b8e5287d9a9be1 100644 (file)
 
 #include <media/videobuf2-core.h>
 
-struct vb2_dma_sg_desc {
-       unsigned long           size;
-       unsigned int            num_pages;
-       struct scatterlist      *sglist;
-};
-
-static inline struct vb2_dma_sg_desc *vb2_dma_sg_plane_desc(
+static inline struct sg_table *vb2_dma_sg_plane_desc(
                struct vb2_buffer *vb, unsigned int plane_no)
 {
-       return (struct vb2_dma_sg_desc *)vb2_plane_cookie(vb, plane_no);
+       return (struct sg_table *)vb2_plane_cookie(vb, plane_no);
 }
 
 extern const struct vb2_mem_ops vb2_dma_sg_memops;
index 10d43d8c7037ac58bea44788af5c7086dbf40f90..2a628b28249ffd00f8b4bd36d4e024583599ad67 100644 (file)
@@ -197,8 +197,8 @@ static inline bool bdaddr_type_is_le(__u8 type)
        return false;
 }
 
-#define BDADDR_ANY   (&(bdaddr_t) {{0, 0, 0, 0, 0, 0} })
-#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff} })
+#define BDADDR_ANY  (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}})
+#define BDADDR_NONE (&(bdaddr_t) {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}})
 
 /* Copy, swap, convert BD Address */
 static inline int bacmp(const bdaddr_t *ba1, const bdaddr_t *ba2)
@@ -218,11 +218,10 @@ void baswap(bdaddr_t *dst, bdaddr_t *src);
 
 struct bt_sock {
        struct sock sk;
-       bdaddr_t    src;
-       bdaddr_t    dst;
        struct list_head accept_q;
        struct sock *parent;
        unsigned long flags;
+       void (*skb_msg_name)(struct sk_buff *, void *, int *);
 };
 
 enum {
@@ -249,6 +248,7 @@ int  bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
 uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
 int  bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int  bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
+int  bt_sock_wait_ready(struct sock *sk, unsigned long flags);
 
 void bt_accept_enqueue(struct sock *parent, struct sock *sk);
 void bt_accept_unlink(struct sock *sk);
@@ -282,8 +282,11 @@ struct bt_skb_cb {
        __u8 incoming;
        __u16 expect;
        __u8 force_active;
+       struct l2cap_chan *chan;
        struct l2cap_ctrl control;
        struct hci_req_ctrl req;
+       bdaddr_t bdaddr;
+       __le16 psm;
 };
 #define bt_cb(skb) ((struct bt_skb_cb *)((skb)->cb))
 
@@ -331,16 +334,16 @@ out:
 
 int bt_to_errno(__u16 code);
 
-extern int hci_sock_init(void);
-extern void hci_sock_cleanup(void);
+int hci_sock_init(void);
+void hci_sock_cleanup(void);
 
-extern int bt_sysfs_init(void);
-extern void bt_sysfs_cleanup(void);
+int bt_sysfs_init(void);
+void bt_sysfs_cleanup(void);
 
-extern int  bt_procfs_init(struct net *net, const char *name,
-                          struct bt_sock_list* sk_list,
-                          int (* seq_show)(struct seq_file *, void *));
-extern void bt_procfs_cleanup(struct net *net, const char *name);
+int bt_procfs_init(struct net *net, const char *name,
+                  struct bt_sock_list *sk_list,
+                  int (*seq_show)(struct seq_file *, void *));
+void bt_procfs_cleanup(struct net *net, const char *name);
 
 extern struct dentry *bt_debugfs;
 
index 15f10841e2b5ddedb94dadfe166e2fe19b09cfee..1784c48699f04dd425027d2e51d0aabc3fd87f4a 100644 (file)
@@ -35,6 +35,8 @@
 
 #define HCI_MAX_AMP_ASSOC_SIZE 672
 
+#define HCI_MAX_CSB_DATA_SIZE  252
+
 /* HCI dev events */
 #define HCI_DEV_REG                    1
 #define HCI_DEV_UNREG                  2
 #define HCI_AMP                0x01
 
 /* First BR/EDR Controller shall have ID = 0 */
-#define HCI_BREDR_ID   0
+#define AMP_ID_BREDR   0x00
+
+/* AMP controller types */
+#define AMP_TYPE_BREDR 0x00
+#define AMP_TYPE_80211 0x01
 
 /* AMP controller status */
-#define AMP_CTRL_POWERED_DOWN                  0x00
-#define AMP_CTRL_BLUETOOTH_ONLY                        0x01
-#define AMP_CTRL_NO_CAPACITY                   0x02
-#define AMP_CTRL_LOW_CAPACITY                  0x03
-#define AMP_CTRL_MEDIUM_CAPACITY               0x04
-#define AMP_CTRL_HIGH_CAPACITY                 0x05
-#define AMP_CTRL_FULL_CAPACITY                 0x06
+#define AMP_STATUS_POWERED_DOWN                        0x00
+#define AMP_STATUS_BLUETOOTH_ONLY              0x01
+#define AMP_STATUS_NO_CAPACITY                 0x02
+#define AMP_STATUS_LOW_CAPACITY                        0x03
+#define AMP_STATUS_MEDIUM_CAPACITY             0x04
+#define AMP_STATUS_HIGH_CAPACITY               0x05
+#define AMP_STATUS_FULL_CAPACITY               0x06
 
 /* HCI device quirks */
 enum {
@@ -109,18 +115,22 @@ enum {
        HCI_PAIRABLE,
        HCI_SERVICE_CACHE,
        HCI_DEBUG_KEYS,
+       HCI_DUT_MODE,
        HCI_UNREGISTER,
+       HCI_USER_CHANNEL,
 
        HCI_LE_SCAN,
        HCI_SSP_ENABLED,
        HCI_HS_ENABLED,
        HCI_LE_ENABLED,
-       HCI_LE_PERIPHERAL,
+       HCI_ADVERTISING,
        HCI_CONNECTABLE,
        HCI_DISCOVERABLE,
+       HCI_LIMITED_DISCOVERABLE,
        HCI_LINK_SECURITY,
        HCI_PERIODIC_INQ,
        HCI_FAST_CONNECTABLE,
+       HCI_BREDR_ENABLED,
 };
 
 /* A mask for the flags that are supposed to remain when a reset happens
@@ -624,6 +634,24 @@ struct hci_rp_logical_link_cancel {
        __u8     flow_spec_id;
 } __packed;
 
+#define HCI_OP_SET_CSB                 0x0441
+struct hci_cp_set_csb {
+       __u8    enable;
+       __u8    lt_addr;
+       __u8    lpo_allowed;
+       __le16  packet_type;
+       __le16  interval_min;
+       __le16  interval_max;
+       __le16  csb_sv_tout;
+} __packed;
+struct hci_rp_set_csb {
+       __u8    status;
+       __u8    lt_addr;
+       __le16  interval;
+} __packed;
+
+#define HCI_OP_START_SYNC_TRAIN                0x0443
+
 #define HCI_OP_SNIFF_MODE              0x0803
 struct hci_cp_sniff_mode {
        __le16   handle;
@@ -694,9 +722,6 @@ struct hci_cp_sniff_subrate {
 } __packed;
 
 #define HCI_OP_SET_EVENT_MASK          0x0c01
-struct hci_cp_set_event_mask {
-       __u8     mask[8];
-} __packed;
 
 #define HCI_OP_RESET                   0x0c03
 
@@ -792,6 +817,20 @@ struct hci_cp_host_buffer_size {
        __le16   sco_max_pkt;
 } __packed;
 
+#define HCI_OP_READ_NUM_SUPPORTED_IAC  0x0c38
+struct hci_rp_read_num_supported_iac {
+       __u8    status;
+       __u8    num_iac;
+} __packed;
+
+#define HCI_OP_READ_CURRENT_IAC_LAP    0x0c39
+
+#define HCI_OP_WRITE_CURRENT_IAC_LAP   0x0c3a
+struct hci_cp_write_current_iac_lap {
+       __u8    num_iac;
+       __u8    iac_lap[6];
+} __packed;
+
 #define HCI_OP_WRITE_INQUIRY_MODE      0x0c45
 
 #define HCI_MAX_EIR_LENGTH             240
@@ -826,6 +865,10 @@ struct hci_rp_read_inq_rsp_tx_power {
        __s8     tx_power;
 } __packed;
 
+#define HCI_OP_SET_EVENT_MASK_PAGE_2   0x0c63
+
+#define HCI_OP_READ_LOCATION_DATA      0x0c64
+
 #define HCI_OP_READ_FLOW_CONTROL_MODE  0x0c66
 struct hci_rp_read_flow_control_mode {
        __u8     status;
@@ -838,6 +881,50 @@ struct hci_cp_write_le_host_supported {
        __u8    simul;
 } __packed;
 
+#define HCI_OP_SET_RESERVED_LT_ADDR    0x0c74
+struct hci_cp_set_reserved_lt_addr {
+       __u8    lt_addr;
+} __packed;
+struct hci_rp_set_reserved_lt_addr {
+       __u8    status;
+       __u8    lt_addr;
+} __packed;
+
+#define HCI_OP_DELETE_RESERVED_LT_ADDR 0x0c75
+struct hci_cp_delete_reserved_lt_addr {
+       __u8    lt_addr;
+} __packed;
+struct hci_rp_delete_reserved_lt_addr {
+       __u8    status;
+       __u8    lt_addr;
+} __packed;
+
+#define HCI_OP_SET_CSB_DATA            0x0c76
+struct hci_cp_set_csb_data {
+       __u8    lt_addr;
+       __u8    fragment;
+       __u8    data_length;
+       __u8    data[HCI_MAX_CSB_DATA_SIZE];
+} __packed;
+struct hci_rp_set_csb_data {
+       __u8    status;
+       __u8    lt_addr;
+} __packed;
+
+#define HCI_OP_READ_SYNC_TRAIN_PARAMS  0x0c77
+
+#define HCI_OP_WRITE_SYNC_TRAIN_PARAMS 0x0c78
+struct hci_cp_write_sync_train_params {
+       __le16  interval_min;
+       __le16  interval_max;
+       __le32  sync_train_tout;
+       __u8    service_data;
+} __packed;
+struct hci_rp_write_sync_train_params {
+       __u8    status;
+       __le16  sync_train_int;
+} __packed;
+
 #define HCI_OP_READ_LOCAL_VERSION      0x1001
 struct hci_rp_read_local_version {
        __u8     status;
@@ -957,6 +1044,10 @@ struct hci_rp_write_remote_amp_assoc {
        __u8     phy_handle;
 } __packed;
 
+#define HCI_OP_ENABLE_DUT_MODE         0x1803
+
+#define HCI_OP_WRITE_SSP_DEBUG_MODE    0x1804
+
 #define HCI_OP_LE_SET_EVENT_MASK       0x2001
 struct hci_cp_le_set_event_mask {
        __u8     mask[8];
@@ -975,6 +1066,20 @@ struct hci_rp_le_read_local_features {
        __u8     features[8];
 } __packed;
 
+#define HCI_OP_LE_SET_RANDOM_ADDR      0x2005
+
+#define HCI_OP_LE_SET_ADV_PARAM                0x2006
+struct hci_cp_le_set_adv_param {
+       __le16   min_interval;
+       __le16   max_interval;
+       __u8     type;
+       __u8     own_address_type;
+       __u8     direct_addr_type;
+       bdaddr_t direct_addr;
+       __u8     channel_map;
+       __u8     filter_policy;
+} __packed;
+
 #define HCI_OP_LE_READ_ADV_TX_POWER    0x2007
 struct hci_rp_le_read_adv_tx_power {
        __u8    status;
@@ -989,6 +1094,12 @@ struct hci_cp_le_set_adv_data {
        __u8    data[HCI_MAX_AD_LENGTH];
 } __packed;
 
+#define HCI_OP_LE_SET_SCAN_RSP_DATA    0x2009
+struct hci_cp_le_set_scan_rsp_data {
+       __u8    length;
+       __u8    data[HCI_MAX_AD_LENGTH];
+} __packed;
+
 #define HCI_OP_LE_SET_ADV_ENABLE       0x200a
 
 #define LE_SCAN_PASSIVE                        0x00
@@ -1438,6 +1549,13 @@ struct hci_ev_num_comp_blocks {
        struct hci_comp_blocks_info handles[0];
 } __packed;
 
+#define HCI_EV_SYNC_TRAIN_COMPLETE     0x4F
+struct hci_ev_sync_train_complete {
+       __u8    status;
+} __packed;
+
+#define HCI_EV_SLAVE_PAGE_RESP_TIMEOUT 0x54
+
 /* Low energy meta events */
 #define LE_CONN_ROLE_MASTER    0x00
 
@@ -1462,11 +1580,11 @@ struct hci_ev_le_ltk_req {
 } __packed;
 
 /* Advertising report event types */
-#define ADV_IND                0x00
-#define ADV_DIRECT_IND 0x01
-#define ADV_SCAN_IND   0x02
-#define ADV_NONCONN_IND        0x03
-#define ADV_SCAN_RSP   0x04
+#define LE_ADV_IND             0x00
+#define LE_ADV_DIRECT_IND      0x01
+#define LE_ADV_SCAN_IND                0x02
+#define LE_ADV_NONCONN_IND     0x03
+#define LE_ADV_SCAN_RSP                0x04
 
 #define ADDR_LE_DEV_PUBLIC     0x00
 #define ADDR_LE_DEV_RANDOM     0x01
@@ -1571,6 +1689,7 @@ struct sockaddr_hci {
 #define HCI_DEV_NONE   0xffff
 
 #define HCI_CHANNEL_RAW                0
+#define HCI_CHANNEL_USER       1
 #define HCI_CHANNEL_MONITOR    2
 #define HCI_CHANNEL_CONTROL    3
 
@@ -1673,6 +1792,4 @@ struct hci_inquiry_req {
 };
 #define IREQ_CACHE_FLUSH 0x0001
 
-extern bool enable_hs;
-
 #endif /* __HCI_H */
index 3ede820d328f9798a12e43ef77db50d0a572d29f..f8555ad7b10485925b07393019579c1ee706b2b0 100644 (file)
@@ -81,6 +81,7 @@ struct hci_conn_hash {
 struct bdaddr_list {
        struct list_head list;
        bdaddr_t bdaddr;
+       u8 bdaddr_type;
 };
 
 struct bt_uuid {
@@ -140,6 +141,8 @@ struct hci_dev {
        __u8            bus;
        __u8            dev_type;
        bdaddr_t        bdaddr;
+       bdaddr_t        static_addr;
+       __u8            own_addr_type;
        __u8            dev_name[HCI_MAX_NAME_LENGTH];
        __u8            short_name[HCI_MAX_SHORT_NAME_LENGTH];
        __u8            eir[HCI_MAX_EIR_LENGTH];
@@ -158,11 +161,17 @@ struct hci_dev {
        __u16           manufacturer;
        __u16           lmp_subver;
        __u16           voice_setting;
+       __u8            num_iac;
        __u8            io_capability;
        __s8            inq_tx_power;
        __u16           page_scan_interval;
        __u16           page_scan_window;
        __u8            page_scan_type;
+       __u16           le_scan_interval;
+       __u16           le_scan_window;
+       __u16           le_conn_min_interval;
+       __u16           le_conn_max_interval;
+       __u8            ssp_debug_mode;
 
        __u16           devid_source;
        __u16           devid_vendor;
@@ -279,14 +288,15 @@ struct hci_dev {
        __s8                    adv_tx_power;
        __u8                    adv_data[HCI_MAX_AD_LENGTH];
        __u8                    adv_data_len;
+       __u8                    scan_rsp_data[HCI_MAX_AD_LENGTH];
+       __u8                    scan_rsp_data_len;
 
        int (*open)(struct hci_dev *hdev);
        int (*close)(struct hci_dev *hdev);
        int (*flush)(struct hci_dev *hdev);
        int (*setup)(struct hci_dev *hdev);
-       int (*send)(struct sk_buff *skb);
+       int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
        void (*notify)(struct hci_dev *hdev, unsigned int evt);
-       int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
 };
 
 #define HCI_PHY_HANDLE(handle) (handle & 0xff)
@@ -298,6 +308,8 @@ struct hci_conn {
 
        bdaddr_t        dst;
        __u8            dst_type;
+       bdaddr_t        src;
+       __u8            src_type;
        __u16           handle;
        __u16           state;
        __u8            mode;
@@ -306,7 +318,6 @@ struct hci_conn {
        __u8            attempt;
        __u8            dev_class[3];
        __u8            features[HCI_MAX_PAGES][8];
-       __u16           interval;
        __u16           pkt_type;
        __u16           link_policy;
        __u32           link_mode;
@@ -334,8 +345,8 @@ struct hci_conn {
        struct list_head chan_list;
 
        struct delayed_work disc_work;
-       struct timer_list idle_timer;
-       struct timer_list auto_accept_timer;
+       struct delayed_work auto_accept_work;
+       struct delayed_work idle_work;
 
        struct device   dev;
 
@@ -367,18 +378,17 @@ extern rwlock_t hci_dev_list_lock;
 extern rwlock_t hci_cb_list_lock;
 
 /* ----- HCI interface to upper protocols ----- */
-extern int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
-extern void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
-extern int l2cap_disconn_ind(struct hci_conn *hcon);
-extern void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
-extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
-extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb,
-                             u16 flags);
-
-extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
-extern void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
-extern void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
-extern int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
+int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
+void l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
+int l2cap_disconn_ind(struct hci_conn *hcon);
+void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
+int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
+int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
+
+int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
+void sco_connect_cfm(struct hci_conn *hcon, __u8 status);
+void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason);
+int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
 
 /* ----- Inquiry cache ----- */
 #define INQUIRY_CACHE_AGE_MAX   (HZ*30)   /* 30 seconds */
@@ -644,7 +654,7 @@ static inline void hci_conn_drop(struct hci_conn *conn)
                switch (conn->type) {
                case ACL_LINK:
                case LE_LINK:
-                       del_timer(&conn->idle_timer);
+                       cancel_delayed_work(&conn->idle_work);
                        if (conn->state == BT_CONNECTED) {
                                timeo = conn->disc_timeout;
                                if (!conn->out)
@@ -703,19 +713,6 @@ static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
        dev_set_drvdata(&hdev->dev, data);
 }
 
-/* hci_dev_list shall be locked */
-static inline uint8_t __hci_num_ctrl(void)
-{
-       uint8_t count = 0;
-       struct list_head *p;
-
-       list_for_each(p, &hci_dev_list) {
-               count++;
-       }
-
-       return count;
-}
-
 struct hci_dev *hci_dev_get(int index);
 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src);
 
@@ -738,7 +735,7 @@ int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
 int hci_inquiry(void __user *arg);
 
 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
-                                        bdaddr_t *bdaddr);
+                                        bdaddr_t *bdaddr, u8 type);
 int hci_blacklist_clear(struct hci_dev *hdev);
 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
@@ -768,13 +765,11 @@ int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr);
 
 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
 
-int hci_recv_frame(struct sk_buff *skb);
+int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
 
 void hci_init_sysfs(struct hci_dev *hdev);
-int hci_add_sysfs(struct hci_dev *hdev);
-void hci_del_sysfs(struct hci_dev *hdev);
 void hci_conn_init_sysfs(struct hci_conn *conn);
 void hci_conn_add_sysfs(struct hci_conn *conn);
 void hci_conn_del_sysfs(struct hci_conn *conn);
@@ -807,22 +802,6 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
 #define lmp_host_le_capable(dev)   (!!((dev)->features[1][0] & LMP_HOST_LE))
 #define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
 
-/* returns true if at least one AMP active */
-static inline bool hci_amp_capable(void)
-{
-       struct hci_dev *hdev;
-       bool ret = false;
-
-       read_lock(&hci_dev_list_lock);
-       list_for_each_entry(hdev, &hci_dev_list, list)
-               if (hdev->amp_type == HCI_AMP &&
-                   test_bit(HCI_UP, &hdev->flags))
-                       ret = true;
-       read_unlock(&hci_dev_list_lock);
-
-       return ret;
-}
-
 /* ----- HCI protocols ----- */
 #define HCI_PROTO_DEFER             0x01
 
@@ -1033,34 +1012,6 @@ static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type)
        return false;
 }
 
-static inline size_t eir_get_length(u8 *eir, size_t eir_len)
-{
-       size_t parsed = 0;
-
-       while (parsed < eir_len) {
-               u8 field_len = eir[0];
-
-               if (field_len == 0)
-                       return parsed;
-
-               parsed += field_len + 1;
-               eir += field_len + 1;
-       }
-
-       return eir_len;
-}
-
-static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
-                                 u8 data_len)
-{
-       eir[eir_len++] = sizeof(type) + data_len;
-       eir[eir_len++] = type;
-       memcpy(&eir[eir_len], data, data_len);
-       eir_len += data_len;
-
-       return eir_len;
-}
-
 int hci_register_cb(struct hci_cb *hcb);
 int hci_unregister_cb(struct hci_cb *hcb);
 
@@ -1120,29 +1071,30 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event);
 #define DISCOV_BREDR_INQUIRY_LEN       0x08
 
 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
-int mgmt_index_added(struct hci_dev *hdev);
-int mgmt_index_removed(struct hci_dev *hdev);
-int mgmt_set_powered_failed(struct hci_dev *hdev, int err);
+void mgmt_index_added(struct hci_dev *hdev);
+void mgmt_index_removed(struct hci_dev *hdev);
+void mgmt_set_powered_failed(struct hci_dev *hdev, int err);
 int mgmt_powered(struct hci_dev *hdev, u8 powered);
-int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
-int mgmt_connectable(struct hci_dev *hdev, u8 connectable);
-int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
-int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
-                     bool persistent);
-int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-                         u8 addr_type, u32 flags, u8 *name, u8 name_len,
-                         u8 *dev_class);
-int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
-                            u8 link_type, u8 addr_type, u8 reason);
-int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
-                          u8 link_type, u8 addr_type, u8 status);
-int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-                       u8 addr_type, u8 status);
-int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
-int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
-                                u8 status);
-int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
-                                    u8 status);
+void mgmt_discoverable_timeout(struct hci_dev *hdev);
+void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable);
+void mgmt_connectable(struct hci_dev *hdev, u8 connectable);
+void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status);
+void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+                      bool persistent);
+void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                          u8 addr_type, u32 flags, u8 *name, u8 name_len,
+                          u8 *dev_class);
+void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                             u8 link_type, u8 addr_type, u8 reason);
+void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                           u8 link_type, u8 addr_type, u8 status);
+void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                        u8 addr_type, u8 status);
+void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
+void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                 u8 status);
+void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                     u8 status);
 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
                              u8 link_type, u8 addr_type, __le32 value,
                              u8 confirm_hint);
@@ -1159,26 +1111,25 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
                             u8 link_type, u8 addr_type, u32 passkey,
                             u8 entered);
-int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-                    u8 addr_type, u8 status);
-int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
-int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
-int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
-                                  u8 status);
-int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
-int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
-                                           u8 *randomizer, u8 status);
-int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
-int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-                     u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
-                     u8 ssp, u8 *eir, u16 eir_len);
-int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-                    u8 addr_type, s8 rssi, u8 *name, u8 name_len);
-int mgmt_discovering(struct hci_dev *hdev, u8 discovering);
+void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                     u8 addr_type, u8 status);
+void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
+void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
+void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
+                                   u8 status);
+void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
+void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
+                                            u8 *randomizer, u8 status);
+void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                      u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name,
+                      u8 ssp, u8 *eir, u16 eir_len);
+void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                     u8 addr_type, s8 rssi, u8 *name, u8 name_len);
+void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
-bool mgmt_valid_hdev(struct hci_dev *hdev);
-int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent);
+void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent);
+void mgmt_reenable_advertising(struct hci_dev *hdev);
 
 /* HCI info for socket */
 #define hci_pi(sk) ((struct hci_pinfo *) sk)
@@ -1208,15 +1159,11 @@ struct hci_sec_filter {
 #define hci_req_lock(d)                mutex_lock(&d->req_lock)
 #define hci_req_unlock(d)      mutex_unlock(&d->req_lock)
 
-void hci_update_ad(struct hci_request *req);
-
 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
                                        u16 latency, u16 to_multiplier);
 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
                                                        __u8 ltk[16]);
 
-u8 bdaddr_to_le(u8 bdaddr_type);
-
 #define SCO_AIRMODE_MASK       0x0003
 #define SCO_AIRMODE_CVSD       0x0000
 #define SCO_AIRMODE_TRANSP     0x0003
index 1a966afbbfa8408d159503f711f00855e0957d4b..51329905bfaafda1c290a26103914d5879144a01 100644 (file)
@@ -131,6 +131,7 @@ struct l2cap_conninfo {
 
 /* L2CAP fixed channels */
 #define L2CAP_FC_L2CAP         0x02
+#define L2CAP_FC_CONNLESS      0x04
 #define L2CAP_FC_A2MP          0x08
 
 /* L2CAP Control Field bit masks */
@@ -237,6 +238,7 @@ struct l2cap_conn_rsp {
 /* protocol/service multiplexer (PSM) */
 #define L2CAP_PSM_SDP          0x0001
 #define L2CAP_PSM_RFCOMM       0x0003
+#define L2CAP_PSM_3DSP         0x0021
 
 /* channel indentifier */
 #define L2CAP_CID_SIGNALING    0x0001
@@ -433,8 +435,6 @@ struct l2cap_seq_list {
 #define L2CAP_SEQ_LIST_TAIL    0x8000
 
 struct l2cap_chan {
-       struct sock *sk;
-
        struct l2cap_conn       *conn;
        struct hci_conn         *hs_hcon;
        struct hci_chan         *hs_hchan;
@@ -442,7 +442,12 @@ struct l2cap_chan {
 
        __u8            state;
 
+       bdaddr_t        dst;
+       __u8            dst_type;
+       bdaddr_t        src;
+       __u8            src_type;
        __le16          psm;
+       __le16          sport;
        __u16           dcid;
        __u16           scid;
 
@@ -453,8 +458,6 @@ struct l2cap_chan {
        __u8            chan_type;
        __u8            chan_policy;
 
-       __le16          sport;
-
        __u8            sec_level;
 
        __u8            ident;
@@ -546,9 +549,12 @@ struct l2cap_ops {
        void                    (*teardown) (struct l2cap_chan *chan, int err);
        void                    (*close) (struct l2cap_chan *chan);
        void                    (*state_change) (struct l2cap_chan *chan,
-                                                int state);
+                                                int state, int err);
        void                    (*ready) (struct l2cap_chan *chan);
        void                    (*defer) (struct l2cap_chan *chan);
+       void                    (*resume) (struct l2cap_chan *chan);
+       void                    (*set_shutdown) (struct l2cap_chan *chan);
+       long                    (*get_sndtimeo) (struct l2cap_chan *chan);
        struct sk_buff          *(*alloc_skb) (struct l2cap_chan *chan,
                                               unsigned long len, int nb);
 };
@@ -557,13 +563,11 @@ struct l2cap_conn {
        struct hci_conn         *hcon;
        struct hci_chan         *hchan;
 
-       bdaddr_t                *dst;
-       bdaddr_t                *src;
-
        unsigned int            mtu;
 
        __u32                   feat_mask;
        __u8                    fixed_chan_mask;
+       bool                    hs_enabled;
 
        __u8                    info_state;
        __u8                    info_ident;
@@ -649,6 +653,7 @@ enum {
        FLAG_FLUSHABLE,
        FLAG_EXT_CTRL,
        FLAG_EFS_ENABLE,
+       FLAG_DEFER_SETUP,
 };
 
 enum {
@@ -790,6 +795,19 @@ static inline void l2cap_chan_no_defer(struct l2cap_chan *chan)
 {
 }
 
+static inline void l2cap_chan_no_resume(struct l2cap_chan *chan)
+{
+}
+
+static inline void l2cap_chan_no_set_shutdown(struct l2cap_chan *chan)
+{
+}
+
+static inline long l2cap_chan_no_get_sndtimeo(struct l2cap_chan *chan)
+{
+       return 0;
+}
+
 extern bool disable_ertm;
 
 int l2cap_init_sockets(void);
@@ -797,7 +815,6 @@ void l2cap_cleanup_sockets(void);
 bool l2cap_is_socket(struct socket *sock);
 
 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan);
-int __l2cap_wait_ack(struct sock *sk);
 
 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm);
 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid);
index 9944c3e68c5d1dd57f40f0b72012a0fbfcdf1a4b..518c5c84e39a67ef4c9789eacd8d88117bf6ccbd 100644 (file)
@@ -93,6 +93,7 @@ struct mgmt_rp_read_index_list {
 #define MGMT_SETTING_BREDR             0x00000080
 #define MGMT_SETTING_HS                        0x00000100
 #define MGMT_SETTING_LE                        0x00000200
+#define MGMT_SETTING_ADVERTISING       0x00000400
 
 #define MGMT_OP_READ_INFO              0x0004
 #define MGMT_READ_INFO_SIZE            0
@@ -351,6 +352,23 @@ struct mgmt_cp_set_device_id {
 } __packed;
 #define MGMT_SET_DEVICE_ID_SIZE                8
 
+#define MGMT_OP_SET_ADVERTISING                0x0029
+
+#define MGMT_OP_SET_BREDR              0x002A
+
+#define MGMT_OP_SET_STATIC_ADDRESS     0x002B
+struct mgmt_cp_set_static_address {
+       bdaddr_t bdaddr;
+} __packed;
+#define MGMT_SET_STATIC_ADDRESS_SIZE   6
+
+#define MGMT_OP_SET_SCAN_PARAMS                0x002C
+struct mgmt_cp_set_scan_params {
+       __le16  interval;
+       __le16  window;
+} __packed;
+#define MGMT_SET_SCAN_PARAMS_SIZE      4
+
 #define MGMT_EV_CMD_COMPLETE           0x0001
 struct mgmt_ev_cmd_complete {
        __le16  opcode;
index 7afd4199d6b6cfab22c74fb6bf0544a887bbd5af..486213a1aed8d07ad63aaba57957e7a651657696 100644 (file)
@@ -256,8 +256,8 @@ static inline void rfcomm_dlc_put(struct rfcomm_dlc *d)
                rfcomm_dlc_free(d);
 }
 
-extern void __rfcomm_dlc_throttle(struct rfcomm_dlc *d);
-extern void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d);
+void __rfcomm_dlc_throttle(struct rfcomm_dlc *d);
+void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d);
 
 static inline void rfcomm_dlc_throttle(struct rfcomm_dlc *d)
 {
@@ -300,6 +300,8 @@ struct rfcomm_conninfo {
 
 struct rfcomm_pinfo {
        struct bt_sock bt;
+       bdaddr_t src;
+       bdaddr_t dst;
        struct rfcomm_dlc   *dlc;
        u8     channel;
        u8     sec_level;
index e252a31ee6b6389f54d1a2483a5f143d6032d57f..2019d1a0996a80d9d2fc9ef29fb33e102f2864bd 100644 (file)
@@ -55,9 +55,6 @@ struct sco_conninfo {
 struct sco_conn {
        struct hci_conn *hcon;
 
-       bdaddr_t        *dst;
-       bdaddr_t        *src;
-
        spinlock_t      lock;
        struct sock     *sk;
 
@@ -72,6 +69,8 @@ struct sco_conn {
 
 struct sco_pinfo {
        struct bt_sock  bt;
+       bdaddr_t        src;
+       bdaddr_t        dst;
        __u32           flags;
        __u16           setting;
        struct sco_conn *conn;
index 4795e817afe508b6f404aebbd71e6ebe4659cb62..097f69cfaa75fbe2ce56c2ae33f4c96351b3add6 100644 (file)
@@ -195,6 +195,6 @@ enum ifla_caif_hsi {
        __IFLA_CAIF_HSI_MAX
 };
 
-extern struct cfhsi_ops *cfhsi_get_ops(void);
+struct cfhsi_ops *cfhsi_get_ops(void);
 
 #endif         /* CAIF_HSI_H_ */
index cb710913d5c8db68e20bd3cca10f72710d1ef1ae..419202ce3f958b4a98b5faf9c3bab145e420dbac 100644 (file)
@@ -436,6 +436,15 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
                             const struct cfg80211_chan_def *chandef,
                             u32 prohibited_flags);
 
+/**
+ * cfg80211_chandef_dfs_required - checks if radar detection is required
+ * @wiphy: the wiphy to validate against
+ * @chandef: the channel definition to check
+ * Return: 1 if radar detection is required, 0 if it is not, < 0 on error
+ */
+int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
+                                 const struct cfg80211_chan_def *chandef);
+
 /**
  * ieee80211_chandef_rate_flags - returns rate flags for a channel
  *
@@ -3474,6 +3483,15 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
 const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
                                               u32 center_freq);
 
+/**
+ * reg_initiator_name - map regulatory request initiator enum to name
+ * @initiator: the regulatory request initiator
+ *
+ * You can use this to map the regulatory request initiator enum to a
+ * proper string representation.
+ */
+const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
+
 /*
  * callbacks for asynchronous cfg80211 methods, notification
  * functions and BSS handling helpers
index a7a683e30b64e6beb2bc87907c85576d85385007..a8c2ef6d3b932abbb42e28be158d472effd02513 100644 (file)
@@ -290,6 +290,7 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
        unsigned char err_offset = 0;
        u8 opt_len = opt[1];
        u8 opt_iter;
+       u8 tag_len;
 
        if (opt_len < 8) {
                err_offset = 1;
@@ -302,11 +303,12 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
        }
 
        for (opt_iter = 6; opt_iter < opt_len;) {
-               if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
+               tag_len = opt[opt_iter + 1];
+               if ((tag_len == 0) || (opt[opt_iter + 1] > (opt_len - opt_iter))) {
                        err_offset = opt_iter + 1;
                        goto out;
                }
-               opt_iter += opt[opt_iter + 1];
+               opt_iter += tag_len;
        }
 
 out:
index 6e9565324989dd0ad4eb92809af943f61055a047..3b603b199c01c5d554202323dde4253e01f7a4ab 100644 (file)
@@ -29,8 +29,8 @@ struct compat_cmsghdr {
        compat_int_t    cmsg_type;
 };
 
-extern int compat_sock_get_timestamp(struct sock *, struct timeval __user *);
-extern int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
+int compat_sock_get_timestamp(struct sock *, struct timeval __user *);
+int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
 
 #else /* defined(CONFIG_COMPAT) */
 /*
@@ -40,24 +40,30 @@ extern int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
 #define compat_mmsghdr mmsghdr
 #endif /* defined(CONFIG_COMPAT) */
 
-extern int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *);
-extern int verify_compat_iovec(struct msghdr *, struct iovec *, struct sockaddr_storage *, int);
-extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsigned int);
-extern asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
-                                          unsigned int, unsigned int);
-extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned int);
-extern asmlinkage long compat_sys_recvmmsg(int, struct compat_mmsghdr __user *,
-                                          unsigned int, unsigned int,
-                                          struct compat_timespec __user *);
-extern asmlinkage long compat_sys_getsockopt(int, int, int, char __user *, int __user *);
-extern int put_cmsg_compat(struct msghdr*, int, int, int, void *);
-
-extern int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, unsigned char *, int);
-
-extern int compat_mc_setsockopt(struct sock *, int, int, char __user *, unsigned int,
-       int (*)(struct sock *, int, int, char __user *, unsigned int));
-extern int compat_mc_getsockopt(struct sock *, int, int, char __user *,
-       int __user *, int (*)(struct sock *, int, int, char __user *,
-                               int __user *));
+int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *);
+int verify_compat_iovec(struct msghdr *, struct iovec *,
+                       struct sockaddr_storage *, int);
+asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *,
+                                  unsigned int);
+asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
+                                   unsigned int, unsigned int);
+asmlinkage long compat_sys_recvmsg(int, struct compat_msghdr __user *,
+                                  unsigned int);
+asmlinkage long compat_sys_recvmmsg(int, struct compat_mmsghdr __user *,
+                                   unsigned int, unsigned int,
+                                   struct compat_timespec __user *);
+asmlinkage long compat_sys_getsockopt(int, int, int, char __user *,
+                                     int __user *);
+int put_cmsg_compat(struct msghdr*, int, int, int, void *);
+
+int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *,
+                                    unsigned char *, int);
+
+int compat_mc_setsockopt(struct sock *, int, int, char __user *, unsigned int,
+                        int (*)(struct sock *, int, int, char __user *,
+                                unsigned int));
+int compat_mc_getsockopt(struct sock *, int, int, char __user *, int __user *,
+                        int (*)(struct sock *, int, int, char __user *,
+                                int __user *));
 
 #endif /* NET_COMPAT_H */
index 443626ed4cbc4daba803c58dfc76128c5c0f5b67..d2f3041c0dfaa685b2f32a633ee0931bad036329 100644 (file)
@@ -25,9 +25,9 @@ enum dcbevent_notif_type {
 };
 
 #ifdef CONFIG_DCB
-extern int register_dcbevent_notifier(struct notifier_block *nb);
-extern int unregister_dcbevent_notifier(struct notifier_block *nb);
-extern int call_dcbevent_notifiers(unsigned long val, void *v);
+int register_dcbevent_notifier(struct notifier_block *nb);
+int unregister_dcbevent_notifier(struct notifier_block *nb);
+int call_dcbevent_notifiers(unsigned long val, void *v);
 #else
 static inline int
 register_dcbevent_notifier(struct notifier_block *nb)
index c88bf4ebd330723de80a330b11baf65507e94d24..ccc15588d108ecc2a33d5b2544b8e0e1feb33069 100644 (file)
@@ -199,24 +199,26 @@ static inline void dn_sk_ports_copy(struct flowidn *fld, struct dn_scp *scp)
        fld->fld_dport = scp->addrrem;
 }
 
-extern unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
+unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
 
 #define DN_MENUVER_ACC 0x01
 #define DN_MENUVER_USR 0x02
 #define DN_MENUVER_PRX 0x04
 #define DN_MENUVER_UIC 0x08
 
-extern struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr);
-extern struct sock *dn_find_by_skb(struct sk_buff *skb);
+struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr);
+struct sock *dn_find_by_skb(struct sk_buff *skb);
 #define DN_ASCBUF_LEN 9
-extern char *dn_addr2asc(__u16, char *);
-extern int dn_destroy_timer(struct sock *sk);
+char *dn_addr2asc(__u16, char *);
+int dn_destroy_timer(struct sock *sk);
 
-extern int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf, unsigned char type);
-extern int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *addr, unsigned char *type);
+int dn_sockaddr2username(struct sockaddr_dn *addr, unsigned char *buf,
+                        unsigned char type);
+int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *addr,
+                        unsigned char *type);
 
-extern void dn_start_slow_timer(struct sock *sk);
-extern void dn_stop_slow_timer(struct sock *sk);
+void dn_start_slow_timer(struct sock *sk);
+void dn_stop_slow_timer(struct sock *sk);
 
 extern __le16 decnet_address;
 extern int decnet_debug_level;
index b9e32db03f2040e76d7d73295406772de0029930..20b5ab06032d35dc1f5f3175d9b88d59db2e2232 100644 (file)
@@ -148,27 +148,27 @@ struct rtnode_hello_message {
 } __packed;
 
 
-extern void dn_dev_init(void);
-extern void dn_dev_cleanup(void);
+void dn_dev_init(void);
+void dn_dev_cleanup(void);
 
-extern int dn_dev_ioctl(unsigned int cmd, void __user *arg);
+int dn_dev_ioctl(unsigned int cmd, void __user *arg);
 
-extern void dn_dev_devices_off(void);
-extern void dn_dev_devices_on(void);
+void dn_dev_devices_off(void);
+void dn_dev_devices_on(void);
 
-extern void dn_dev_init_pkt(struct sk_buff *skb);
-extern void dn_dev_veri_pkt(struct sk_buff *skb);
-extern void dn_dev_hello(struct sk_buff *skb);
+void dn_dev_init_pkt(struct sk_buff *skb);
+void dn_dev_veri_pkt(struct sk_buff *skb);
+void dn_dev_hello(struct sk_buff *skb);
 
-extern void dn_dev_up(struct net_device *);
-extern void dn_dev_down(struct net_device *);
+void dn_dev_up(struct net_device *);
+void dn_dev_down(struct net_device *);
 
-extern int dn_dev_set_default(struct net_device *dev, int force);
-extern struct net_device *dn_dev_get_default(void);
-extern int dn_dev_bind_default(__le16 *addr);
+int dn_dev_set_default(struct net_device *dev, int force);
+struct net_device *dn_dev_get_default(void);
+int dn_dev_bind_default(__le16 *addr);
 
-extern int register_dnaddr_notifier(struct notifier_block *nb);
-extern int unregister_dnaddr_notifier(struct notifier_block *nb);
+int register_dnaddr_notifier(struct notifier_block *nb);
+int unregister_dnaddr_notifier(struct notifier_block *nb);
 
 static inline int dn_dev_islocal(struct net_device *dev, __le16 addr)
 {
index 74004af31c482c2bef8c9240b47e82c2f4a9bfbd..f2ca135ddcc979b07260c5d5996dc79bf7659536 100644 (file)
@@ -95,41 +95,38 @@ struct dn_fib_table {
 /*
  * dn_fib.c
  */
-extern void dn_fib_init(void);
-extern void dn_fib_cleanup(void);
-
-extern int dn_fib_ioctl(struct socket *sock, unsigned int cmd, 
-                       unsigned long arg);
-extern struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, 
-                               struct nlattr *attrs[],
-                               const struct nlmsghdr *nlh, int *errp);
-extern int dn_fib_semantic_match(int type, struct dn_fib_info *fi, 
-                       const struct flowidn *fld,
-                       struct dn_fib_res *res);
-extern void dn_fib_release_info(struct dn_fib_info *fi);
-extern void dn_fib_flush(void);
-extern void dn_fib_select_multipath(const struct flowidn *fld,
-                                       struct dn_fib_res *res);
+void dn_fib_init(void);
+void dn_fib_cleanup(void);
+
+int dn_fib_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r,
+                                      struct nlattr *attrs[],
+                                      const struct nlmsghdr *nlh, int *errp);
+int dn_fib_semantic_match(int type, struct dn_fib_info *fi,
+                         const struct flowidn *fld, struct dn_fib_res *res);
+void dn_fib_release_info(struct dn_fib_info *fi);
+void dn_fib_flush(void);
+void dn_fib_select_multipath(const struct flowidn *fld, struct dn_fib_res *res);
 
 /*
  * dn_tables.c
  */
-extern struct dn_fib_table *dn_fib_get_table(u32 n, int creat);
-extern struct dn_fib_table *dn_fib_empty_table(void);
-extern void dn_fib_table_init(void);
-extern void dn_fib_table_cleanup(void);
+struct dn_fib_table *dn_fib_get_table(u32 n, int creat);
+struct dn_fib_table *dn_fib_empty_table(void);
+void dn_fib_table_init(void);
+void dn_fib_table_cleanup(void);
 
 /*
  * dn_rules.c
  */
-extern void dn_fib_rules_init(void);
-extern void dn_fib_rules_cleanup(void);
-extern unsigned int dnet_addr_type(__le16 addr);
-extern int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res);
+void dn_fib_rules_init(void);
+void dn_fib_rules_cleanup(void);
+unsigned int dnet_addr_type(__le16 addr);
+int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res);
 
-extern int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
+int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
 
-extern void dn_fib_free_info(struct dn_fib_info *fi);
+void dn_fib_free_info(struct dn_fib_info *fi);
 
 static inline void dn_fib_info_put(struct dn_fib_info *fi)
 {
index 4cb4ae7fb81fddc3915402e26b90a4ba8ac28c78..fac4e3f4a6d3c0ede92af3d4df4c24319a94c0a0 100644 (file)
@@ -16,12 +16,12 @@ struct dn_neigh {
        __u8 priority;
 };
 
-extern void dn_neigh_init(void);
-extern void dn_neigh_cleanup(void);
-extern int dn_neigh_router_hello(struct sk_buff *skb);
-extern int dn_neigh_endnode_hello(struct sk_buff *skb);
-extern void dn_neigh_pointopoint_hello(struct sk_buff *skb);
-extern int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
+void dn_neigh_init(void);
+void dn_neigh_cleanup(void);
+int dn_neigh_router_hello(struct sk_buff *skb);
+int dn_neigh_endnode_hello(struct sk_buff *skb);
+void dn_neigh_pointopoint_hello(struct sk_buff *skb);
+int dn_neigh_elist(struct net_device *dev, unsigned char *ptr, int n);
 
 extern struct neigh_table dn_neigh_table;
 
index e43a2893f132f0767849271f3eac0f625dbd8828..3a3e33d184562ae778db49b0cabb1e524c2ee548 100644 (file)
 *******************************************************************************/
 /* dn_nsp.c functions prototyping */
 
-extern void dn_nsp_send_data_ack(struct sock *sk);
-extern void dn_nsp_send_oth_ack(struct sock *sk);
-extern void dn_nsp_delayed_ack(struct sock *sk);
-extern void dn_send_conn_ack(struct sock *sk);
-extern void dn_send_conn_conf(struct sock *sk, gfp_t gfp);
-extern void dn_nsp_send_disc(struct sock *sk, unsigned char type, 
-                       unsigned short reason, gfp_t gfp);
-extern void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type,
-                               unsigned short reason);
-extern void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
-extern void dn_nsp_send_conninit(struct sock *sk, unsigned char flags);
-
-extern void dn_nsp_output(struct sock *sk);
-extern int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum);
-extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp, int oob);
-extern unsigned long dn_nsp_persist(struct sock *sk);
-extern int dn_nsp_xmit_timeout(struct sock *sk);
-
-extern int dn_nsp_rx(struct sk_buff *);
-extern int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
-
-extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
-extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err);
+void dn_nsp_send_data_ack(struct sock *sk);
+void dn_nsp_send_oth_ack(struct sock *sk);
+void dn_nsp_delayed_ack(struct sock *sk);
+void dn_send_conn_ack(struct sock *sk);
+void dn_send_conn_conf(struct sock *sk, gfp_t gfp);
+void dn_nsp_send_disc(struct sock *sk, unsigned char type,
+                     unsigned short reason, gfp_t gfp);
+void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type,
+                       unsigned short reason);
+void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
+void dn_nsp_send_conninit(struct sock *sk, unsigned char flags);
+
+void dn_nsp_output(struct sock *sk);
+int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb,
+                           struct sk_buff_head *q, unsigned short acknum);
+void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp,
+                      int oob);
+unsigned long dn_nsp_persist(struct sock *sk);
+int dn_nsp_xmit_timeout(struct sock *sk);
+
+int dn_nsp_rx(struct sk_buff *);
+int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+
+struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
+struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock,
+                                 long timeo, int *err);
 
 #define NSP_REASON_OK 0                /* No error */
 #define NSP_REASON_NR 1                /* No resources */
index 2e9d317c82dcc7164ef9c3cfbfc8429c6364031a..b409ad6b8d7adbb5bd25ad3b71cbea5c8723b70e 100644 (file)
     GNU General Public License for more details.
 *******************************************************************************/
 
-extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
-extern int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *, struct sock *sk, int flags);
-extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
-extern void dn_rt_cache_flush(int delay);
+struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
+int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *,
+                        struct sock *sk, int flags);
+int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
+void dn_rt_cache_flush(int delay);
 
 /* Masks for flags field */
 #define DN_RT_F_PID 0x07 /* Mask for packet type                      */
@@ -92,8 +93,8 @@ static inline bool dn_is_output_route(struct dn_route *rt)
        return rt->fld.flowidn_iif == 0;
 }
 
-extern void dn_route_init(void);
-extern void dn_route_cleanup(void);
+void dn_route_init(void);
+void dn_route_cleanup(void);
 
 #include <net/sock.h>
 #include <linux/if_arp.h>
index 3bc4865f82679137077db4dd82c177bd39b51515..44995c13e941df814db2433819d2934869df4d0f 100644 (file)
@@ -106,7 +106,7 @@ struct dst_entry {
        };
 };
 
-extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
+u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
 extern const u32 dst_default_metrics[];
 
 #define DST_METRICS_READ_ONLY  0x1UL
@@ -119,7 +119,7 @@ static inline bool dst_metrics_read_only(const struct dst_entry *dst)
        return dst->_metrics & DST_METRICS_READ_ONLY;
 }
 
-extern void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
+void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old);
 
 static inline void dst_destroy_metrics_generic(struct dst_entry *dst)
 {
@@ -262,7 +262,7 @@ static inline struct dst_entry *dst_clone(struct dst_entry *dst)
        return dst;
 }
 
-extern void dst_release(struct dst_entry *dst);
+void dst_release(struct dst_entry *dst);
 
 static inline void refdst_drop(unsigned long refdst)
 {
@@ -362,12 +362,11 @@ static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
        return child;
 }
 
-extern int dst_discard(struct sk_buff *skb);
-extern void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
-                      int initial_ref, int initial_obsolete,
-                      unsigned short flags);
-extern void __dst_free(struct dst_entry *dst);
-extern struct dst_entry *dst_destroy(struct dst_entry *dst);
+int dst_discard(struct sk_buff *skb);
+void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
+               int initial_obsolete, unsigned short flags);
+void __dst_free(struct dst_entry *dst);
+struct dst_entry *dst_destroy(struct dst_entry *dst);
 
 static inline void dst_free(struct dst_entry *dst)
 {
@@ -463,7 +462,7 @@ static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
        return dst;
 }
 
-extern void            dst_init(void);
+void dst_init(void);
 
 /* Flags for xfrm_lookup flags argument. */
 enum {
@@ -479,10 +478,22 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
 {
        return dst_orig;
 } 
+
+static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
+{
+       return NULL;
+}
+
 #else
-extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
-                                    const struct flowi *fl, struct sock *sk,
-                                    int flags);
+struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
+                             const struct flowi *fl, struct sock *sk,
+                             int flags);
+
+/* skb attached with this dst needs transformation if dst->xfrm is valid */
+static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
+{
+       return dst->xfrm;
+}
 #endif
 
 #endif /* _NET_DST_H */
index d58451331dbde8a0d9b4231a61bd4957f62cdeec..1356dda00d22f9cd3fad3ea0eb625b667f76c017 100644 (file)
@@ -13,7 +13,7 @@ struct esp_data {
        struct crypto_aead *aead;
 };
 
-extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
+void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len);
 
 struct ip_esp_hdr;
 
index 4b2b557fb0e8c2d709e1733a13e4ca3dbc02ff0e..e584de16e4c3629ccebef06d000b47166e5cc65d 100644 (file)
@@ -115,14 +115,13 @@ static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla)
        return frh->table;
 }
 
-extern struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *, struct net *);
-extern void fib_rules_unregister(struct fib_rules_ops *);
+struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *,
+                                        struct net *);
+void fib_rules_unregister(struct fib_rules_ops *);
 
-extern int                     fib_rules_lookup(struct fib_rules_ops *,
-                                                struct flowi *, int flags,
-                                                struct fib_lookup_arg *);
-extern int                     fib_default_rule_add(struct fib_rules_ops *,
-                                                    u32 pref, u32 table,
-                                                    u32 flags);
-extern u32                     fib_default_rule_pref(struct fib_rules_ops *ops);
+int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
+                    struct fib_lookup_arg *);
+int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
+                        u32 flags);
+u32 fib_default_rule_pref(struct fib_rules_ops *ops);
 #endif
index 628e11b98c580d7f66ace375f5a4e0598d03b471..65ce471d2ab577f81cc62040018aff980ea82f65 100644 (file)
@@ -215,12 +215,13 @@ typedef struct flow_cache_object *(*flow_resolve_t)(
                struct net *net, const struct flowi *key, u16 family,
                u8 dir, struct flow_cache_object *oldobj, void *ctx);
 
-extern struct flow_cache_object *flow_cache_lookup(
-               struct net *net, const struct flowi *key, u16 family,
-               u8 dir, flow_resolve_t resolver, void *ctx);
+struct flow_cache_object *flow_cache_lookup(struct net *net,
+                                           const struct flowi *key, u16 family,
+                                           u8 dir, flow_resolve_t resolver,
+                                           void *ctx);
 
-extern void flow_cache_flush(void);
-extern void flow_cache_flush_deferred(void);
+void flow_cache_flush(void);
+void flow_cache_flush_deferred(void);
 extern atomic_t flow_cache_genid;
 
 #endif
index bb8271d487b7bea10861c6548dde50d0964c4a20..7e64bd8bbda941319b494bcb7b66e79b7f52370a 100644 (file)
@@ -13,5 +13,6 @@ struct flow_keys {
        u8 ip_proto;
 };
 
-extern bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
+bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
+__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto);
 #endif
index 834d8add9e5fd51112f08cde966e0044604049cb..abf33bbd2e6a0413193769d353bb9ce82a10ebec 100644 (file)
@@ -112,19 +112,18 @@ struct garp_port {
        struct rcu_head                 rcu;
 };
 
-extern int     garp_register_application(struct garp_application *app);
-extern void    garp_unregister_application(struct garp_application *app);
-
-extern int     garp_init_applicant(struct net_device *dev,
-                                   struct garp_application *app);
-extern void    garp_uninit_applicant(struct net_device *dev,
-                                     struct garp_application *app);
-
-extern int     garp_request_join(const struct net_device *dev,
-                                 const struct garp_application *app,
-                                 const void *data, u8 len, u8 type);
-extern void    garp_request_leave(const struct net_device *dev,
-                                  const struct garp_application *app,
-                                  const void *data, u8 len, u8 type);
+int garp_register_application(struct garp_application *app);
+void garp_unregister_application(struct garp_application *app);
+
+int garp_init_applicant(struct net_device *dev, struct garp_application *app);
+void garp_uninit_applicant(struct net_device *dev,
+                          struct garp_application *app);
+
+int garp_request_join(const struct net_device *dev,
+                     const struct garp_application *app, const void *data,
+                     u8 len, u8 type);
+void garp_request_leave(const struct net_device *dev,
+                       const struct garp_application *app,
+                       const void *data, u8 len, u8 type);
 
 #endif /* _NET_GARP_H */
index cf8439ba4d11062a326919fa885c3ae5a24e62c5..ea4271dceff0da2df04497e424e49c18b9a6d6f5 100644 (file)
@@ -19,32 +19,31 @@ struct gnet_dump {
        struct tc_stats   tc_stats;
 };
 
-extern int gnet_stats_start_copy(struct sk_buff *skb, int type,
+int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
+                         struct gnet_dump *d);
+
+int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
+                                int tc_stats_type, int xstats_type,
                                 spinlock_t *lock, struct gnet_dump *d);
 
-extern int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
-                                       int tc_stats_type,int xstats_type,
-                                       spinlock_t *lock, struct gnet_dump *d);
-
-extern int gnet_stats_copy_basic(struct gnet_dump *d,
-                                struct gnet_stats_basic_packed *b);
-extern int gnet_stats_copy_rate_est(struct gnet_dump *d,
-                                   const struct gnet_stats_basic_packed *b,
-                                   struct gnet_stats_rate_est64 *r);
-extern int gnet_stats_copy_queue(struct gnet_dump *d,
-                                struct gnet_stats_queue *q);
-extern int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
-
-extern int gnet_stats_finish_copy(struct gnet_dump *d);
-
-extern int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
-                            struct gnet_stats_rate_est64 *rate_est,
-                            spinlock_t *stats_lock, struct nlattr *opt);
-extern void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
-                              struct gnet_stats_rate_est64 *rate_est);
-extern int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
-                                struct gnet_stats_rate_est64 *rate_est,
-                                spinlock_t *stats_lock, struct nlattr *opt);
-extern bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
-                                const struct gnet_stats_rate_est64 *rate_est);
+int gnet_stats_copy_basic(struct gnet_dump *d,
+                         struct gnet_stats_basic_packed *b);
+int gnet_stats_copy_rate_est(struct gnet_dump *d,
+                            const struct gnet_stats_basic_packed *b,
+                            struct gnet_stats_rate_est64 *r);
+int gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q);
+int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len);
+
+int gnet_stats_finish_copy(struct gnet_dump *d);
+
+int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
+                     struct gnet_stats_rate_est64 *rate_est,
+                     spinlock_t *stats_lock, struct nlattr *opt);
+void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
+                       struct gnet_stats_rate_est64 *rate_est);
+int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
+                         struct gnet_stats_rate_est64 *rate_est,
+                         spinlock_t *stats_lock, struct nlattr *opt);
+bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
+                         const struct gnet_stats_rate_est64 *rate_est);
 #endif
index 8e0b6c856a1302227812acbc10cb5e6b1f230bf7..9b787b62cf16066e6f2335301865849fa674f7a1 100644 (file)
@@ -122,7 +122,7 @@ struct genl_ops {
        struct list_head        ops_list;
 };
 
-extern int __genl_register_family(struct genl_family *family);
+int __genl_register_family(struct genl_family *family);
 
 static inline int genl_register_family(struct genl_family *family)
 {
@@ -130,8 +130,8 @@ static inline int genl_register_family(struct genl_family *family)
        return __genl_register_family(family);
 }
 
-extern int __genl_register_family_with_ops(struct genl_family *family,
-       struct genl_ops *ops, size_t n_ops);
+int __genl_register_family_with_ops(struct genl_family *family,
+                                   struct genl_ops *ops, size_t n_ops);
 
 static inline int genl_register_family_with_ops(struct genl_family *family,
        struct genl_ops *ops, size_t n_ops)
@@ -140,18 +140,18 @@ static inline int genl_register_family_with_ops(struct genl_family *family,
        return __genl_register_family_with_ops(family, ops, n_ops);
 }
 
-extern int genl_unregister_family(struct genl_family *family);
-extern int genl_register_ops(struct genl_family *, struct genl_ops *ops);
-extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
-extern int genl_register_mc_group(struct genl_family *family,
-                                 struct genl_multicast_group *grp);
-extern void genl_unregister_mc_group(struct genl_family *family,
-                                    struct genl_multicast_group *grp);
-extern void genl_notify(struct sk_buff *skb, struct net *net, u32 portid,
-                       u32 group, struct nlmsghdr *nlh, gfp_t flags);
+int genl_unregister_family(struct genl_family *family);
+int genl_register_ops(struct genl_family *, struct genl_ops *ops);
+int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
+int genl_register_mc_group(struct genl_family *family,
+                          struct genl_multicast_group *grp);
+void genl_unregister_mc_group(struct genl_family *family,
+                             struct genl_multicast_group *grp);
+void genl_notify(struct sk_buff *skb, struct net *net, u32 portid,
+                u32 group, struct nlmsghdr *nlh, gfp_t flags);
 
 void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
-                               struct genl_family *family, int flags, u8 cmd);
+                 struct genl_family *family, int flags, u8 cmd);
 
 /**
  * genlmsg_nlhdr - Obtain netlink header from user specified header
index 57e4afdf7879ca9436fa139c31e3f162a407e502..dcd9ae3270d386b06d7565cfb8b7c4bb59dde740 100644 (file)
@@ -38,7 +38,13 @@ void gre_offload_exit(void);
 
 void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
                      int hdr_len);
-struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum);
+
+static inline struct sk_buff *gre_handle_offloads(struct sk_buff *skb,
+                                                 bool gre_csum)
+{
+       return iptunnel_handle_offloads(skb, gre_csum, SKB_GSO_GRE);
+}
+
 
 static inline int ip_gre_calc_hlen(__be16 o_flags)
 {
index 081439fd070ed7ce932ff1e1877398d1d119207c..970028e13382807108764f8784d9b1f11d7078d7 100644 (file)
@@ -39,10 +39,10 @@ struct net_proto_family;
 struct sk_buff;
 struct net;
 
-extern void    icmp_send(struct sk_buff *skb_in,  int type, int code, __be32 info);
-extern int     icmp_rcv(struct sk_buff *skb);
-extern void    icmp_err(struct sk_buff *, u32 info);
-extern int     icmp_init(void);
-extern void    icmp_out_count(struct net *net, unsigned char type);
+void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
+int icmp_rcv(struct sk_buff *skb);
+void icmp_err(struct sk_buff *skb, u32 info);
+int icmp_init(void);
+void icmp_out_count(struct net *net, unsigned char type);
 
 #endif /* _ICMP_H */
index 04642c920431151fd09f00bc1400c164e0a2f9c0..f981ba7adeed4c5dbaba8d5e662768d3503bfec7 100644 (file)
@@ -22,27 +22,25 @@ struct sk_buff;
 struct sock;
 struct sockaddr;
 
-extern int inet6_csk_bind_conflict(const struct sock *sk,
-                                  const struct inet_bind_bucket *tb, bool relax);
+int inet6_csk_bind_conflict(const struct sock *sk,
+                           const struct inet_bind_bucket *tb, bool relax);
 
-extern struct dst_entry* inet6_csk_route_req(struct sock *sk,
-                                            struct flowi6 *fl6,
-                                            const struct request_sock *req);
+struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6,
+                                     const struct request_sock *req);
 
-extern struct request_sock *inet6_csk_search_req(const struct sock *sk,
-                                                struct request_sock ***prevp,
-                                                const __be16 rport,
-                                                const struct in6_addr *raddr,
-                                                const struct in6_addr *laddr,
-                                                const int iif);
+struct request_sock *inet6_csk_search_req(const struct sock *sk,
+                                         struct request_sock ***prevp,
+                                         const __be16 rport,
+                                         const struct in6_addr *raddr,
+                                         const struct in6_addr *laddr,
+                                         const int iif);
 
-extern void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
-                                          struct request_sock *req,
-                                          const unsigned long timeout);
+void inet6_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+                                   const unsigned long timeout);
 
-extern void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
+void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
 
-extern int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl);
+int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl);
 
-extern struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu);
+struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu);
 #endif /* _INET6_CONNECTION_SOCK_H */
index fd4ee016ba5cd5e4dc87b08cf06aef17a6b9cc27..ae061354430836687225be9ae1a8c1f09a35ed8a 100644 (file)
 
 struct inet_hashinfo;
 
-static inline unsigned int inet6_ehashfn(struct net *net,
-                               const struct in6_addr *laddr, const u16 lport,
-                               const struct in6_addr *faddr, const __be16 fport)
+static inline unsigned int __inet6_ehashfn(const u32 lhash,
+                                   const u16 lport,
+                                   const u32 fhash,
+                                   const __be16 fport,
+                                   const u32 initval)
 {
-       u32 ports = (((u32)lport) << 16) | (__force u32)fport;
-
-       return jhash_3words((__force u32)laddr->s6_addr32[3],
-                           ipv6_addr_jhash(faddr),
-                           ports,
-                           inet_ehash_secret + net_hash_mix(net));
-}
-
-static inline int inet6_sk_ehashfn(const struct sock *sk)
-{
-       const struct inet_sock *inet = inet_sk(sk);
-       const struct ipv6_pinfo *np = inet6_sk(sk);
-       const struct in6_addr *laddr = &np->rcv_saddr;
-       const struct in6_addr *faddr = &np->daddr;
-       const __u16 lport = inet->inet_num;
-       const __be16 fport = inet->inet_dport;
-       struct net *net = sock_net(sk);
-
-       return inet6_ehashfn(net, laddr, lport, faddr, fport);
+       const u32 ports = (((u32)lport) << 16) | (__force u32)fport;
+       return jhash_3words(lhash, fhash, ports, initval);
 }
 
-extern int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp);
+int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp);
 
 /*
  * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
@@ -61,21 +46,19 @@ extern int __inet6_hash(struct sock *sk, struct inet_timewait_sock *twp);
  *
  * The sockhash lock must be held as a reader here.
  */
-extern struct sock *__inet6_lookup_established(struct net *net,
-                                          struct inet_hashinfo *hashinfo,
-                                          const struct in6_addr *saddr,
-                                          const __be16 sport,
-                                          const struct in6_addr *daddr,
-                                          const u16 hnum,
-                                          const int dif);
-
-extern struct sock *inet6_lookup_listener(struct net *net,
-                                         struct inet_hashinfo *hashinfo,
-                                         const struct in6_addr *saddr,
-                                         const __be16 sport,
-                                         const struct in6_addr *daddr,
-                                         const unsigned short hnum,
-                                         const int dif);
+struct sock *__inet6_lookup_established(struct net *net,
+                                       struct inet_hashinfo *hashinfo,
+                                       const struct in6_addr *saddr,
+                                       const __be16 sport,
+                                       const struct in6_addr *daddr,
+                                       const u16 hnum, const int dif);
+
+struct sock *inet6_lookup_listener(struct net *net,
+                                  struct inet_hashinfo *hashinfo,
+                                  const struct in6_addr *saddr,
+                                  const __be16 sport,
+                                  const struct in6_addr *daddr,
+                                  const unsigned short hnum, const int dif);
 
 static inline struct sock *__inet6_lookup(struct net *net,
                                          struct inet_hashinfo *hashinfo,
@@ -110,9 +93,9 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo,
                              inet6_iif(skb));
 }
 
-extern struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
-                                const struct in6_addr *saddr, const __be16 sport,
-                                const struct in6_addr *daddr, const __be16 dport,
-                                const int dif);
+struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
+                         const struct in6_addr *saddr, const __be16 sport,
+                         const struct in6_addr *daddr, const __be16 dport,
+                         const int dif);
 #endif /* IS_ENABLED(CONFIG_IPV6) */
 #endif /* _INET6_HASHTABLES_H */
index 234008782c8cca00e2af6394c17391189abd974a..fe7994c48b75685174134e7817bbb20ef375f890 100644 (file)
@@ -13,30 +13,30 @@ struct sock;
 struct sockaddr;
 struct socket;
 
-extern int inet_release(struct socket *sock);
-extern int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
-                              int addr_len, int flags);
-extern int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
-                                int addr_len, int flags);
-extern int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
-                             int addr_len, int flags);
-extern int inet_accept(struct socket *sock, struct socket *newsock, int flags);
-extern int inet_sendmsg(struct kiocb *iocb, struct socket *sock,
-                       struct msghdr *msg, size_t size);
-extern ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
-                            size_t size, int flags);
-extern int inet_recvmsg(struct kiocb *iocb, struct socket *sock,
-                       struct msghdr *msg, size_t size, int flags);
-extern int inet_shutdown(struct socket *sock, int how);
-extern int inet_listen(struct socket *sock, int backlog);
-extern void inet_sock_destruct(struct sock *sk);
-extern int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
-extern int inet_getname(struct socket *sock, struct sockaddr *uaddr,
-                       int *uaddr_len, int peer);
-extern int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
-extern int inet_ctl_sock_create(struct sock **sk, unsigned short family,
-                               unsigned short type, unsigned char protocol,
-                               struct net *net);
+int inet_release(struct socket *sock);
+int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+                       int addr_len, int flags);
+int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+                         int addr_len, int flags);
+int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
+                      int addr_len, int flags);
+int inet_accept(struct socket *sock, struct socket *newsock, int flags);
+int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+                size_t size);
+ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
+                     size_t size, int flags);
+int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
+                size_t size, int flags);
+int inet_shutdown(struct socket *sock, int how);
+int inet_listen(struct socket *sock, int backlog);
+void inet_sock_destruct(struct sock *sk);
+int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
+int inet_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len,
+                int peer);
+int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+int inet_ctl_sock_create(struct sock **sk, unsigned short family,
+                        unsigned short type, unsigned char protocol,
+                        struct net *net);
 
 static inline void inet_ctl_sock_destroy(struct sock *sk)
 {
index de2c78529afaf81f00abff62bb29708f7f6941a2..c55aeed41acea4ce12b49b2d77862a4cf609363a 100644 (file)
@@ -146,9 +146,9 @@ static inline void *inet_csk_ca(const struct sock *sk)
        return (void *)inet_csk(sk)->icsk_ca_priv;
 }
 
-extern struct sock *inet_csk_clone_lock(const struct sock *sk,
-                                       const struct request_sock *req,
-                                       const gfp_t priority);
+struct sock *inet_csk_clone_lock(const struct sock *sk,
+                                const struct request_sock *req,
+                                const gfp_t priority);
 
 enum inet_csk_ack_state_t {
        ICSK_ACK_SCHED  = 1,
@@ -157,11 +157,11 @@ enum inet_csk_ack_state_t {
        ICSK_ACK_PUSHED2 = 8
 };
 
-extern void inet_csk_init_xmit_timers(struct sock *sk,
-                                     void (*retransmit_handler)(unsigned long),
-                                     void (*delack_handler)(unsigned long),
-                                     void (*keepalive_handler)(unsigned long));
-extern void inet_csk_clear_xmit_timers(struct sock *sk);
+void inet_csk_init_xmit_timers(struct sock *sk,
+                              void (*retransmit_handler)(unsigned long),
+                              void (*delack_handler)(unsigned long),
+                              void (*keepalive_handler)(unsigned long));
+void inet_csk_clear_xmit_timers(struct sock *sk);
 
 static inline void inet_csk_schedule_ack(struct sock *sk)
 {
@@ -178,8 +178,8 @@ static inline void inet_csk_delack_init(struct sock *sk)
        memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack));
 }
 
-extern void inet_csk_delete_keepalive_timer(struct sock *sk);
-extern void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
+void inet_csk_delete_keepalive_timer(struct sock *sk);
+void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
 
 #ifdef INET_CSK_DEBUG
 extern const char inet_csk_timer_bug_msg[];
@@ -241,23 +241,21 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
 #endif
 }
 
-extern struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
+struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
 
-extern struct request_sock *inet_csk_search_req(const struct sock *sk,
-                                               struct request_sock ***prevp,
-                                               const __be16 rport,
-                                               const __be32 raddr,
-                                               const __be32 laddr);
-extern int inet_csk_bind_conflict(const struct sock *sk,
-                                 const struct inet_bind_bucket *tb, bool relax);
-extern int inet_csk_get_port(struct sock *sk, unsigned short snum);
+struct request_sock *inet_csk_search_req(const struct sock *sk,
+                                        struct request_sock ***prevp,
+                                        const __be16 rport,
+                                        const __be32 raddr,
+                                        const __be32 laddr);
+int inet_csk_bind_conflict(const struct sock *sk,
+                          const struct inet_bind_bucket *tb, bool relax);
+int inet_csk_get_port(struct sock *sk, unsigned short snum);
 
-extern struct dst_entry* inet_csk_route_req(struct sock *sk,
-                                           struct flowi4 *fl4,
+struct dst_entry *inet_csk_route_req(struct sock *sk, struct flowi4 *fl4,
+                                    const struct request_sock *req);
+struct dst_entry *inet_csk_route_child_sock(struct sock *sk, struct sock *newsk,
                                            const struct request_sock *req);
-extern struct dst_entry* inet_csk_route_child_sock(struct sock *sk,
-                                                  struct sock *newsk,
-                                                  const struct request_sock *req);
 
 static inline void inet_csk_reqsk_queue_add(struct sock *sk,
                                            struct request_sock *req,
@@ -266,9 +264,8 @@ static inline void inet_csk_reqsk_queue_add(struct sock *sk,
        reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
 }
 
-extern void inet_csk_reqsk_queue_hash_add(struct sock *sk,
-                                         struct request_sock *req,
-                                         unsigned long timeout);
+void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
+                                  unsigned long timeout);
 
 static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
                                                struct request_sock *req)
@@ -315,13 +312,13 @@ static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
        reqsk_free(req);
 }
 
-extern void inet_csk_reqsk_queue_prune(struct sock *parent,
-                                      const unsigned long interval,
-                                      const unsigned long timeout,
-                                      const unsigned long max_rto);
+void inet_csk_reqsk_queue_prune(struct sock *parent,
+                               const unsigned long interval,
+                               const unsigned long timeout,
+                               const unsigned long max_rto);
 
-extern void inet_csk_destroy_sock(struct sock *sk);
-extern void inet_csk_prepare_forced_close(struct sock *sk);
+void inet_csk_destroy_sock(struct sock *sk);
+void inet_csk_prepare_forced_close(struct sock *sk);
 
 /*
  * LISTEN is a special case for poll..
@@ -332,15 +329,15 @@ static inline unsigned int inet_csk_listen_poll(const struct sock *sk)
                        (POLLIN | POLLRDNORM) : 0;
 }
 
-extern int  inet_csk_listen_start(struct sock *sk, const int nr_table_entries);
-extern void inet_csk_listen_stop(struct sock *sk);
+int inet_csk_listen_start(struct sock *sk, const int nr_table_entries);
+void inet_csk_listen_stop(struct sock *sk);
 
-extern void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
+void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
 
-extern int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
-                                     char __user *optval, int __user *optlen);
-extern int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
-                                     char __user *optval, unsigned int optlen);
+int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
+                              char __user *optval, int __user *optlen);
+int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
+                              char __user *optval, unsigned int optlen);
 
-extern struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
+struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
 #endif /* _INET_CONNECTION_SOCK_H */
index bfcbc0017950372ddd39dd540ff3f6da99f2bee6..6f59de98dabde3be0daa202f8be4528d49676f7f 100644 (file)
@@ -64,6 +64,10 @@ struct inet_frags {
        rwlock_t                lock ____cacheline_aligned_in_smp;
        int                     secret_interval;
        struct timer_list       secret_timer;
+
+       /* The first call to hashfn is responsible to initialize
+        * rnd. This is best done with net_get_random_once.
+        */
        u32                     rnd;
        int                     qsize;
 
index ef83d9e844b54f0468034ce54da3d4552868f7e9..1bdb47715def0e21496ae89a59d3d9bd5f1f2c81 100644 (file)
 #include <asm/byteorder.h>
 
 /* This is for all connections with a full identity, no wildcards.
- * One chain is dedicated to TIME_WAIT sockets.
- * I'll experiment with dynamic table growth later.
+ * The 'e' prefix stands for Establish, but we really put all sockets
+ * but LISTEN ones.
  */
 struct inet_ehash_bucket {
        struct hlist_nulls_head chain;
-       struct hlist_nulls_head twchain;
 };
 
 /* There are a few simple rules, which allow for local port reuse by
@@ -123,7 +122,6 @@ struct inet_hashinfo {
         *
         *          TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
         *
-        * TIME_WAIT sockets use a separate chain (twchain).
         */
        struct inet_ehash_bucket        *ehash;
        spinlock_t                      *ehash_locks;
@@ -218,22 +216,21 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
        }
 }
 
-extern struct inet_bind_bucket *
-                   inet_bind_bucket_create(struct kmem_cache *cachep,
-                                           struct net *net,
-                                           struct inet_bind_hashbucket *head,
-                                           const unsigned short snum);
-extern void inet_bind_bucket_destroy(struct kmem_cache *cachep,
-                                    struct inet_bind_bucket *tb);
+struct inet_bind_bucket *
+inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
+                       struct inet_bind_hashbucket *head,
+                       const unsigned short snum);
+void inet_bind_bucket_destroy(struct kmem_cache *cachep,
+                             struct inet_bind_bucket *tb);
 
-static inline int inet_bhashfn(struct net *net,
-               const __u16 lport, const int bhash_size)
+static inline int inet_bhashfn(struct net *net, const __u16 lport,
+                              const int bhash_size)
 {
        return (lport + net_hash_mix(net)) & (bhash_size - 1);
 }
 
-extern void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
-                          const unsigned short snum);
+void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
+                   const unsigned short snum);
 
 /* These can have wildcards, don't try too hard. */
 static inline int inet_lhashfn(struct net *net, const unsigned short num)
@@ -247,23 +244,22 @@ static inline int inet_sk_listen_hashfn(const struct sock *sk)
 }
 
 /* Caller must disable local BH processing. */
-extern int __inet_inherit_port(struct sock *sk, struct sock *child);
+int __inet_inherit_port(struct sock *sk, struct sock *child);
 
-extern void inet_put_port(struct sock *sk);
+void inet_put_port(struct sock *sk);
 
 void inet_hashinfo_init(struct inet_hashinfo *h);
 
-extern int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
-extern void inet_hash(struct sock *sk);
-extern void inet_unhash(struct sock *sk);
+int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
+void inet_hash(struct sock *sk);
+void inet_unhash(struct sock *sk);
 
-extern struct sock *__inet_lookup_listener(struct net *net,
-                                          struct inet_hashinfo *hashinfo,
-                                          const __be32 saddr,
-                                          const __be16 sport,
-                                          const __be32 daddr,
-                                          const unsigned short hnum,
-                                          const int dif);
+struct sock *__inet_lookup_listener(struct net *net,
+                                   struct inet_hashinfo *hashinfo,
+                                   const __be32 saddr, const __be16 sport,
+                                   const __be32 daddr,
+                                   const unsigned short hnum,
+                                   const int dif);
 
 static inline struct sock *inet_lookup_listener(struct net *net,
                struct inet_hashinfo *hashinfo,
@@ -304,30 +300,17 @@ static inline struct sock *inet_lookup_listener(struct net *net,
                                   ((__force __u64)(__be32)(__saddr)));
 #endif /* __BIG_ENDIAN */
 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif)    \
-       ((inet_sk(__sk)->inet_portpair == (__ports))            &&      \
-        (inet_sk(__sk)->inet_addrpair == (__cookie))           &&      \
+       (((__sk)->sk_portpair == (__ports))                     &&      \
+        ((__sk)->sk_addrpair == (__cookie))                    &&      \
         (!(__sk)->sk_bound_dev_if      ||                              \
           ((__sk)->sk_bound_dev_if == (__dif)))                &&      \
         net_eq(sock_net(__sk), (__net)))
-#define INET_TW_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif)\
-       ((inet_twsk(__sk)->tw_portpair == (__ports))    &&              \
-        (inet_twsk(__sk)->tw_addrpair == (__cookie))   &&              \
-        (!(__sk)->sk_bound_dev_if      ||                              \
-          ((__sk)->sk_bound_dev_if == (__dif)))        &&              \
-        net_eq(sock_net(__sk), (__net)))
 #else /* 32-bit arch */
 #define INET_ADDR_COOKIE(__name, __saddr, __daddr)
 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
-       ((inet_sk(__sk)->inet_portpair == (__ports))    &&              \
-        (inet_sk(__sk)->inet_daddr     == (__saddr))   &&              \
-        (inet_sk(__sk)->inet_rcv_saddr == (__daddr))   &&              \
-        (!(__sk)->sk_bound_dev_if      ||                              \
-          ((__sk)->sk_bound_dev_if == (__dif)))        &&              \
-        net_eq(sock_net(__sk), (__net)))
-#define INET_TW_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \
-       ((inet_twsk(__sk)->tw_portpair == (__ports))    &&              \
-        (inet_twsk(__sk)->tw_daddr     == (__saddr))   &&              \
-        (inet_twsk(__sk)->tw_rcv_saddr == (__daddr))   &&              \
+       (((__sk)->sk_portpair == (__ports))             &&              \
+        ((__sk)->sk_daddr      == (__saddr))           &&              \
+        ((__sk)->sk_rcv_saddr  == (__daddr))           &&              \
         (!(__sk)->sk_bound_dev_if      ||                              \
           ((__sk)->sk_bound_dev_if == (__dif)))        &&              \
         net_eq(sock_net(__sk), (__net)))
@@ -339,10 +322,11 @@ static inline struct sock *inet_lookup_listener(struct net *net,
  *
  * Local BH must be disabled here.
  */
-extern struct sock * __inet_lookup_established(struct net *net,
-               struct inet_hashinfo *hashinfo,
-               const __be32 saddr, const __be16 sport,
-               const __be32 daddr, const u16 hnum, const int dif);
+struct sock *__inet_lookup_established(struct net *net,
+                                      struct inet_hashinfo *hashinfo,
+                                      const __be32 saddr, const __be16 sport,
+                                      const __be32 daddr, const u16 hnum,
+                                      const int dif);
 
 static inline struct sock *
        inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
@@ -399,13 +383,14 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
                                     iph->daddr, dport, inet_iif(skb));
 }
 
-extern int __inet_hash_connect(struct inet_timewait_death_row *death_row,
-               struct sock *sk,
-               u32 port_offset,
-               int (*check_established)(struct inet_timewait_death_row *,
-                       struct sock *, __u16, struct inet_timewait_sock **),
-               int (*hash)(struct sock *sk, struct inet_timewait_sock *twp));
+int __inet_hash_connect(struct inet_timewait_death_row *death_row,
+                       struct sock *sk, u32 port_offset,
+                       int (*check_established)(struct inet_timewait_death_row *,
+                                                struct sock *, __u16,
+                                                struct inet_timewait_sock **),
+                       int (*hash)(struct sock *sk,
+                                   struct inet_timewait_sock *twp));
 
-extern int inet_hash_connect(struct inet_timewait_death_row *death_row,
-                            struct sock *sk);
+int inet_hash_connect(struct inet_timewait_death_row *death_row,
+                     struct sock *sk);
 #endif /* _INET_HASHTABLES_H */
index b21a7f06d6a4955910b577a23b04eb904207265a..1833c3f389ee64a0c6b3862d4f2fbc6db0984b0a 100644 (file)
@@ -70,13 +70,14 @@ struct ip_options_data {
 
 struct inet_request_sock {
        struct request_sock     req;
-#if IS_ENABLED(CONFIG_IPV6)
-       u16                     inet6_rsk_offset;
-#endif
-       __be16                  loc_port;
-       __be32                  loc_addr;
-       __be32                  rmt_addr;
-       __be16                  rmt_port;
+#define ir_loc_addr            req.__req_common.skc_rcv_saddr
+#define ir_rmt_addr            req.__req_common.skc_daddr
+#define ir_num                 req.__req_common.skc_num
+#define ir_rmt_port            req.__req_common.skc_dport
+#define ir_v6_rmt_addr         req.__req_common.skc_v6_daddr
+#define ir_v6_loc_addr         req.__req_common.skc_v6_rcv_saddr
+#define ir_iif                 req.__req_common.skc_bound_dev_if
+
        kmemcheck_bitfield_begin(flags);
        u16                     snd_wscale : 4,
                                rcv_wscale : 4,
@@ -88,6 +89,7 @@ struct inet_request_sock {
                                no_srccheck: 1;
        kmemcheck_bitfield_end(flags);
        struct ip_options_rcu   *opt;
+       struct sk_buff          *pktopts;
 };
 
 static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
@@ -103,6 +105,9 @@ struct inet_cork {
        int                     length; /* Total length of all frames */
        struct dst_entry        *dst;
        u8                      tx_flags;
+       __u8                    ttl;
+       __s16                   tos;
+       char                    priority;
 };
 
 struct inet_cork_full {
@@ -143,10 +148,8 @@ struct inet_sock {
        /* Socket demultiplex comparisons on incoming packets. */
 #define inet_daddr             sk.__sk_common.skc_daddr
 #define inet_rcv_saddr         sk.__sk_common.skc_rcv_saddr
-#define inet_addrpair          sk.__sk_common.skc_addrpair
 #define inet_dport             sk.__sk_common.skc_dport
 #define inet_num               sk.__sk_common.skc_num
-#define inet_portpair          sk.__sk_common.skc_portpair
 
        __be32                  inet_saddr;
        __s16                   uc_ttl;
@@ -199,32 +202,18 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
 }
 #endif
 
-extern int inet_sk_rebuild_header(struct sock *sk);
-
-extern u32 inet_ehash_secret;
-extern u32 ipv6_hash_secret;
-extern void build_ehash_secret(void);
+int inet_sk_rebuild_header(struct sock *sk);
 
-static inline unsigned int inet_ehashfn(struct net *net,
-                                       const __be32 laddr, const __u16 lport,
-                                       const __be32 faddr, const __be16 fport)
+static inline unsigned int __inet_ehashfn(const __be32 laddr,
+                                         const __u16 lport,
+                                         const __be32 faddr,
+                                         const __be16 fport,
+                                         u32 initval)
 {
        return jhash_3words((__force __u32) laddr,
                            (__force __u32) faddr,
                            ((__u32) lport) << 16 | (__force __u32)fport,
-                           inet_ehash_secret + net_hash_mix(net));
-}
-
-static inline int inet_sk_ehashfn(const struct sock *sk)
-{
-       const struct inet_sock *inet = inet_sk(sk);
-       const __be32 laddr = inet->inet_rcv_saddr;
-       const __u16 lport = inet->inet_num;
-       const __be32 faddr = inet->inet_daddr;
-       const __be16 fport = inet->inet_dport;
-       struct net *net = sock_net(sk);
-
-       return inet_ehashfn(net, laddr, lport, faddr, fport);
+                           initval);
 }
 
 static inline struct request_sock *inet_reqsk_alloc(struct request_sock_ops *ops)
index f908dfc06505850b54e6868c53da957ff71c3527..71c6e264e5b521bfa43033b13a0524ac2556d002 100644 (file)
@@ -58,6 +58,11 @@ struct inet_hashinfo;
 # define INET_TWDR_RECYCLE_TICK (12 + 2 - INET_TWDR_RECYCLE_SLOTS_LOG)
 #endif
 
+static inline u32 inet_tw_time_stamp(void)
+{
+       return jiffies;
+}
+
 /* TIME_WAIT reaping mechanism. */
 #define INET_TWDR_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
 
@@ -83,9 +88,9 @@ struct inet_timewait_death_row {
        int                     sysctl_max_tw_buckets;
 };
 
-extern void inet_twdr_hangman(unsigned long data);
-extern void inet_twdr_twkill_work(struct work_struct *work);
-extern void inet_twdr_twcal_tick(unsigned long data);
+void inet_twdr_hangman(unsigned long data);
+void inet_twdr_twkill_work(struct work_struct *work);
+void inet_twdr_twcal_tick(unsigned long data);
 
 struct inet_bind_bucket;
 
@@ -111,11 +116,11 @@ struct inet_timewait_sock {
 #define tw_prot                        __tw_common.skc_prot
 #define tw_net                 __tw_common.skc_net
 #define tw_daddr               __tw_common.skc_daddr
+#define tw_v6_daddr            __tw_common.skc_v6_daddr
 #define tw_rcv_saddr           __tw_common.skc_rcv_saddr
-#define tw_addrpair            __tw_common.skc_addrpair
+#define tw_v6_rcv_saddr        __tw_common.skc_v6_rcv_saddr
 #define tw_dport               __tw_common.skc_dport
 #define tw_num                 __tw_common.skc_num
-#define tw_portpair            __tw_common.skc_portpair
 
        int                     tw_timeout;
        volatile unsigned char  tw_substate;
@@ -130,26 +135,14 @@ struct inet_timewait_sock {
                                tw_transparent  : 1,
                                tw_pad          : 6,    /* 6 bits hole */
                                tw_tos          : 8,
-                               tw_ipv6_offset  : 16;
+                               tw_pad2         : 16;   /* 16 bits hole */
        kmemcheck_bitfield_end(flags);
-       unsigned long           tw_ttd;
+       u32                     tw_ttd;
        struct inet_bind_bucket *tw_tb;
        struct hlist_node       tw_death_node;
 };
 #define tw_tclass tw_tos
 
-static inline void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
-                                     struct hlist_nulls_head *list)
-{
-       hlist_nulls_add_head_rcu(&tw->tw_node, list);
-}
-
-static inline void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
-                                          struct hlist_head *list)
-{
-       hlist_add_head(&tw->tw_bind_node, list);
-}
-
 static inline int inet_twsk_dead_hashed(const struct inet_timewait_sock *tw)
 {
        return !hlist_unhashed(&tw->tw_death_node);
@@ -189,34 +182,28 @@ static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
        return (struct inet_timewait_sock *)sk;
 }
 
-static inline __be32 sk_rcv_saddr(const struct sock *sk)
-{
-/* both inet_sk() and inet_twsk() store rcv_saddr in skc_rcv_saddr */
-       return sk->__sk_common.skc_rcv_saddr;
-}
-
-extern void inet_twsk_put(struct inet_timewait_sock *tw);
+void inet_twsk_free(struct inet_timewait_sock *tw);
+void inet_twsk_put(struct inet_timewait_sock *tw);
 
-extern int inet_twsk_unhash(struct inet_timewait_sock *tw);
+int inet_twsk_unhash(struct inet_timewait_sock *tw);
 
-extern int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
-                                struct inet_hashinfo *hashinfo);
+int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+                         struct inet_hashinfo *hashinfo);
 
-extern struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
-                                                 const int state);
+struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
+                                          const int state);
 
-extern void __inet_twsk_hashdance(struct inet_timewait_sock *tw,
-                                 struct sock *sk,
-                                 struct inet_hashinfo *hashinfo);
+void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+                          struct inet_hashinfo *hashinfo);
 
-extern void inet_twsk_schedule(struct inet_timewait_sock *tw,
-                              struct inet_timewait_death_row *twdr,
-                              const int timeo, const int timewait_len);
-extern void inet_twsk_deschedule(struct inet_timewait_sock *tw,
-                                struct inet_timewait_death_row *twdr);
+void inet_twsk_schedule(struct inet_timewait_sock *tw,
+                       struct inet_timewait_death_row *twdr,
+                       const int timeo, const int timewait_len);
+void inet_twsk_deschedule(struct inet_timewait_sock *tw,
+                         struct inet_timewait_death_row *twdr);
 
-extern void inet_twsk_purge(struct inet_hashinfo *hashinfo,
-                           struct inet_timewait_death_row *twdr, int family);
+void inet_twsk_purge(struct inet_hashinfo *hashinfo,
+                    struct inet_timewait_death_row *twdr, int family);
 
 static inline
 struct net *twsk_net(const struct inet_timewait_sock *twsk)
index 53f464d7cddcd6ce18925c521b99bfaefac66f2f..f4e127af4e179dd022007ed4a416ab32c9146533 100644 (file)
@@ -120,9 +120,9 @@ static inline void inetpeer_transfer_peer(unsigned long *to, unsigned long *from
        }
 }
 
-extern void inet_peer_base_init(struct inet_peer_base *);
+void inet_peer_base_init(struct inet_peer_base *);
 
-void                   inet_initpeers(void) __init;
+void inet_initpeers(void) __init;
 
 #define INETPEER_METRICS_NEW   (~(u32) 0)
 
@@ -159,11 +159,11 @@ static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
 }
 
 /* can be called from BH context or outside */
-extern void inet_putpeer(struct inet_peer *p);
-extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
+void inet_putpeer(struct inet_peer *p);
+bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
 
-extern void inetpeer_invalidate_tree(struct inet_peer_base *);
-extern void inetpeer_invalidate_family(int family);
+void inetpeer_invalidate_tree(struct inet_peer_base *);
+void inetpeer_invalidate_family(int family);
 
 /*
  * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
index 5e5268807a1ceb521f673ca05ec3233389a97f61..217bc5bfc6c6e99cf52117312ceda95c98f005a6 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/skbuff.h>
 
 #include <net/inet_sock.h>
+#include <net/route.h>
 #include <net/snmp.h>
 #include <net/flow.h>
 
@@ -56,6 +57,9 @@ struct ipcm_cookie {
        int                     oif;
        struct ip_options_rcu   *opt;
        __u8                    tx_flags;
+       __u8                    ttl;
+       __s16                   tos;
+       char                    priority;
 };
 
 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
@@ -86,64 +90,71 @@ struct packet_type;
 struct rtable;
 struct sockaddr;
 
-extern int             igmp_mc_proc_init(void);
+int igmp_mc_proc_init(void);
 
 /*
  *     Functions provided by ip.c
  */
 
-extern int             ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
-                                             __be32 saddr, __be32 daddr,
-                                             struct ip_options_rcu *opt);
-extern int             ip_rcv(struct sk_buff *skb, struct net_device *dev,
-                              struct packet_type *pt, struct net_device *orig_dev);
-extern int             ip_local_deliver(struct sk_buff *skb);
-extern int             ip_mr_input(struct sk_buff *skb);
-extern int             ip_output(struct sk_buff *skb);
-extern int             ip_mc_output(struct sk_buff *skb);
-extern int             ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
-extern int             ip_do_nat(struct sk_buff *skb);
-extern void            ip_send_check(struct iphdr *ip);
-extern int             __ip_local_out(struct sk_buff *skb);
-extern int             ip_local_out(struct sk_buff *skb);
-extern int             ip_queue_xmit(struct sk_buff *skb, struct flowi *fl);
-extern void            ip_init(void);
-extern int             ip_append_data(struct sock *sk, struct flowi4 *fl4,
-                                      int getfrag(void *from, char *to, int offset, int len,
-                                                  int odd, struct sk_buff *skb),
-                               void *from, int len, int protolen,
-                               struct ipcm_cookie *ipc,
-                               struct rtable **rt,
-                               unsigned int flags);
-extern int             ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb);
-extern ssize_t         ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
-                               int offset, size_t size, int flags);
-extern struct sk_buff  *__ip_make_skb(struct sock *sk,
-                                     struct flowi4 *fl4,
-                                     struct sk_buff_head *queue,
-                                     struct inet_cork *cork);
-extern int             ip_send_skb(struct net *net, struct sk_buff *skb);
-extern int             ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
-extern void            ip_flush_pending_frames(struct sock *sk);
-extern struct sk_buff  *ip_make_skb(struct sock *sk,
-                                   struct flowi4 *fl4,
-                                   int getfrag(void *from, char *to, int offset, int len,
-                                               int odd, struct sk_buff *skb),
-                                   void *from, int length, int transhdrlen,
-                                   struct ipcm_cookie *ipc,
-                                   struct rtable **rtp,
-                                   unsigned int flags);
+int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
+                         __be32 saddr, __be32 daddr,
+                         struct ip_options_rcu *opt);
+int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
+          struct net_device *orig_dev);
+int ip_local_deliver(struct sk_buff *skb);
+int ip_mr_input(struct sk_buff *skb);
+int ip_output(struct sk_buff *skb);
+int ip_mc_output(struct sk_buff *skb);
+int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
+int ip_do_nat(struct sk_buff *skb);
+void ip_send_check(struct iphdr *ip);
+int __ip_local_out(struct sk_buff *skb);
+int ip_local_out(struct sk_buff *skb);
+int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl);
+void ip_init(void);
+int ip_append_data(struct sock *sk, struct flowi4 *fl4,
+                  int getfrag(void *from, char *to, int offset, int len,
+                              int odd, struct sk_buff *skb),
+                  void *from, int len, int protolen,
+                  struct ipcm_cookie *ipc,
+                  struct rtable **rt,
+                  unsigned int flags);
+int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
+                      struct sk_buff *skb);
+ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
+                      int offset, size_t size, int flags);
+struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
+                             struct sk_buff_head *queue,
+                             struct inet_cork *cork);
+int ip_send_skb(struct net *net, struct sk_buff *skb);
+int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
+void ip_flush_pending_frames(struct sock *sk);
+struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
+                           int getfrag(void *from, char *to, int offset,
+                                       int len, int odd, struct sk_buff *skb),
+                           void *from, int length, int transhdrlen,
+                           struct ipcm_cookie *ipc, struct rtable **rtp,
+                           unsigned int flags);
 
 static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
 {
        return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
 }
 
+static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
+{
+       return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
+}
+
+static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
+{
+       return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
+}
+
 /* datagram.c */
-extern int             ip4_datagram_connect(struct sock *sk, 
-                                            struct sockaddr *uaddr, int addr_len);
+int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 
-extern void ip4_datagram_release_cb(struct sock *sk);
+void ip4_datagram_release_cb(struct sock *sk);
 
 struct ip_reply_arg {
        struct kvec iov[1];   
@@ -184,16 +195,16 @@ extern struct ipv4_config ipv4_config;
 #define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
 #define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
 
-extern unsigned long snmp_fold_field(void __percpu *mib[], int offt);
+unsigned long snmp_fold_field(void __percpu *mib[], int offt);
 #if BITS_PER_LONG==32
-extern u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off);
+u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off);
 #else
 static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_off)
 {
        return snmp_fold_field(mib, offt);
 }
 #endif
-extern int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
+int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
 
 static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
 {
@@ -206,11 +217,7 @@ static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
        }
 }
 
-extern struct local_ports {
-       seqlock_t       lock;
-       int             range[2];
-} sysctl_local_ports;
-extern void inet_get_local_port_range(int *low, int *high);
+void inet_get_local_port_range(struct net *net, int *low, int *high);
 
 extern unsigned long *sysctl_local_reserved_ports;
 static inline int inet_is_reserved_local_port(int port)
@@ -231,9 +238,9 @@ extern int sysctl_ip_early_demux;
 /* From ip_output.c */
 extern int sysctl_ip_dynaddr;
 
-extern void ipfrag_init(void);
+void ipfrag_init(void);
 
-extern void ip_static_sysctl_init(void);
+void ip_static_sysctl_init(void);
 
 static inline bool ip_is_fragment(const struct iphdr *iph)
 {
@@ -262,7 +269,7 @@ int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
                 !(dst_metric_locked(dst, RTAX_MTU)));
 }
 
-extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
+void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
 
 static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk)
 {
@@ -367,7 +374,7 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
                struct ipv6_pinfo *np = inet6_sk(sk);
 
                memset(&np->saddr, 0, sizeof(np->saddr));
-               memset(&np->rcv_saddr, 0, sizeof(np->rcv_saddr));
+               memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
        }
 #endif
 }
@@ -390,7 +397,7 @@ static inline int sk_mc_loop(struct sock *sk)
        return 1;
 }
 
-extern bool ip_call_ra_chain(struct sk_buff *skb);
+bool ip_call_ra_chain(struct sk_buff *skb);
 
 /*
  *     Functions provided by ip_fragment.c
@@ -428,50 +435,52 @@ int ip_frag_nqueues(struct net *net);
  *     Functions provided by ip_forward.c
  */
  
-extern int ip_forward(struct sk_buff *skb);
+int ip_forward(struct sk_buff *skb);
  
 /*
  *     Functions provided by ip_options.c
  */
  
-extern void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
-                            __be32 daddr, struct rtable *rt, int is_frag);
-extern int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb);
-extern void ip_options_fragment(struct sk_buff *skb);
-extern int ip_options_compile(struct net *net,
-                             struct ip_options *opt, struct sk_buff *skb);
-extern int ip_options_get(struct net *net, struct ip_options_rcu **optp,
-                         unsigned char *data, int optlen);
-extern int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
-                                   unsigned char __user *data, int optlen);
-extern void ip_options_undo(struct ip_options * opt);
-extern void ip_forward_options(struct sk_buff *skb);
-extern int ip_options_rcv_srr(struct sk_buff *skb);
+void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
+                     __be32 daddr, struct rtable *rt, int is_frag);
+int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb);
+void ip_options_fragment(struct sk_buff *skb);
+int ip_options_compile(struct net *net, struct ip_options *opt,
+                      struct sk_buff *skb);
+int ip_options_get(struct net *net, struct ip_options_rcu **optp,
+                  unsigned char *data, int optlen);
+int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
+                            unsigned char __user *data, int optlen);
+void ip_options_undo(struct ip_options *opt);
+void ip_forward_options(struct sk_buff *skb);
+int ip_options_rcv_srr(struct sk_buff *skb);
 
 /*
  *     Functions provided by ip_sockglue.c
  */
 
-extern void    ipv4_pktinfo_prepare(struct sk_buff *skb);
-extern void    ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
-extern int     ip_cmsg_send(struct net *net,
-                            struct msghdr *msg, struct ipcm_cookie *ipc);
-extern int     ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen);
-extern int     ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen);
-extern int     compat_ip_setsockopt(struct sock *sk, int level,
-                       int optname, char __user *optval, unsigned int optlen);
-extern int     compat_ip_getsockopt(struct sock *sk, int level,
-                       int optname, char __user *optval, int __user *optlen);
-extern int     ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *));
-
-extern int     ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
-extern void    ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, 
-                             __be16 port, u32 info, u8 *payload);
-extern void    ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
-                              u32 info);
+void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
+void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
+int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc);
+int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
+                 unsigned int optlen);
+int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
+                 int __user *optlen);
+int compat_ip_setsockopt(struct sock *sk, int level, int optname,
+                        char __user *optval, unsigned int optlen);
+int compat_ip_getsockopt(struct sock *sk, int level, int optname,
+                        char __user *optval, int __user *optlen);
+int ip_ra_control(struct sock *sk, unsigned char on,
+                 void (*destructor)(struct sock *));
+
+int ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
+void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
+                  u32 info, u8 *payload);
+void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
+                   u32 info);
 
 #ifdef CONFIG_PROC_FS
-extern int ip_misc_proc_init(void);
+int ip_misc_proc_init(void);
 #endif
 
 #endif /* _IP_H */
index 7686e3f5033d8364da7aad69be58b86018af7589..9e3c540c1b110c71b65003a6aac22cc6c333be5a 100644 (file)
@@ -66,12 +66,14 @@ static inline void __tcp_v6_send_check(struct sk_buff *skb,
        }
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
 static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
 
-       __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
+       __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
 }
+#endif
 
 int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto);
 #endif
index 48ec25a7fcb60a09a2c323c70221a31853a82261..6738f3409a6f701e82f431a760750f749863a73c 100644 (file)
@@ -267,48 +267,39 @@ typedef struct rt6_info *(*pol_lookup_t)(struct net *,
  *     exported functions
  */
 
-extern struct fib6_table        *fib6_get_table(struct net *net, u32 id);
-extern struct fib6_table        *fib6_new_table(struct net *net, u32 id);
-extern struct dst_entry         *fib6_rule_lookup(struct net *net,
-                                                 struct flowi6 *fl6, int flags,
-                                                 pol_lookup_t lookup);
+struct fib6_table *fib6_get_table(struct net *net, u32 id);
+struct fib6_table *fib6_new_table(struct net *net, u32 id);
+struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
+                                  int flags, pol_lookup_t lookup);
 
-extern struct fib6_node                *fib6_lookup(struct fib6_node *root,
-                                            const struct in6_addr *daddr,
-                                            const struct in6_addr *saddr);
+struct fib6_node *fib6_lookup(struct fib6_node *root,
+                             const struct in6_addr *daddr,
+                             const struct in6_addr *saddr);
 
-struct fib6_node               *fib6_locate(struct fib6_node *root,
-                                            const struct in6_addr *daddr, int dst_len,
-                                            const struct in6_addr *saddr, int src_len);
+struct fib6_node *fib6_locate(struct fib6_node *root,
+                             const struct in6_addr *daddr, int dst_len,
+                             const struct in6_addr *saddr, int src_len);
 
-extern void                    fib6_clean_all_ro(struct net *net,
-                                              int (*func)(struct rt6_info *, void *arg),
-                                              int prune, void *arg);
+void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
+                   int prune, void *arg);
 
-extern void                    fib6_clean_all(struct net *net,
-                                              int (*func)(struct rt6_info *, void *arg),
-                                              int prune, void *arg);
+int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info);
 
-extern int                     fib6_add(struct fib6_node *root,
-                                        struct rt6_info *rt,
-                                        struct nl_info *info);
+int fib6_del(struct rt6_info *rt, struct nl_info *info);
 
-extern int                     fib6_del(struct rt6_info *rt,
-                                        struct nl_info *info);
+void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info);
 
-extern void                    inet6_rt_notify(int event, struct rt6_info *rt,
-                                               struct nl_info *info);
+void fib6_run_gc(unsigned long expires, struct net *net, bool force);
 
-extern void                    fib6_run_gc(unsigned long expires,
-                                           struct net *net, bool force);
+void fib6_gc_cleanup(void);
 
-extern void                    fib6_gc_cleanup(void);
+int fib6_init(void);
 
-extern int                     fib6_init(void);
+int ipv6_route_open(struct inode *inode, struct file *file);
 
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
-extern int                     fib6_rules_init(void);
-extern void                    fib6_rules_cleanup(void);
+int fib6_rules_init(void);
+void fib6_rules_cleanup(void);
 #else
 static inline int               fib6_rules_init(void)
 {
index f525e7038cca4eaae4fd7b011ae1f97072ddffc9..733747ce163c1a08a91a24eb5277832f5c04d6ee 100644 (file)
@@ -51,7 +51,7 @@ static inline unsigned int rt6_flags2srcprefs(int flags)
        return (flags >> 3) & 7;
 }
 
-extern void rt6_bind_peer(struct rt6_info *rt, int create);
+void rt6_bind_peer(struct rt6_info *rt, int create);
 
 static inline struct inet_peer *__rt6_get_peer(struct rt6_info *rt, int create)
 {
@@ -72,70 +72,58 @@ static inline struct inet_peer *rt6_get_peer_create(struct rt6_info *rt)
        return __rt6_get_peer(rt, 1);
 }
 
-extern void                    ip6_route_input(struct sk_buff *skb);
+void ip6_route_input(struct sk_buff *skb);
 
-extern struct dst_entry *      ip6_route_output(struct net *net,
-                                                const struct sock *sk,
-                                                struct flowi6 *fl6);
-extern struct dst_entry *      ip6_route_lookup(struct net *net,
-                                                struct flowi6 *fl6, int flags);
+struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
+                                  struct flowi6 *fl6);
+struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
+                                  int flags);
 
-extern int                     ip6_route_init(void);
-extern void                    ip6_route_cleanup(void);
+int ip6_route_init(void);
+void ip6_route_cleanup(void);
 
-extern int                     ipv6_route_ioctl(struct net *net,
-                                                unsigned int cmd,
-                                                void __user *arg);
+int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg);
 
-extern int                     ip6_route_add(struct fib6_config *cfg);
-extern int                     ip6_ins_rt(struct rt6_info *);
-extern int                     ip6_del_rt(struct rt6_info *);
+int ip6_route_add(struct fib6_config *cfg);
+int ip6_ins_rt(struct rt6_info *);
+int ip6_del_rt(struct rt6_info *);
 
-extern int                     ip6_route_get_saddr(struct net *net,
-                                                   struct rt6_info *rt,
-                                                   const struct in6_addr *daddr,
-                                                   unsigned int prefs,
-                                                   struct in6_addr *saddr);
+int ip6_route_get_saddr(struct net *net, struct rt6_info *rt,
+                       const struct in6_addr *daddr, unsigned int prefs,
+                       struct in6_addr *saddr);
 
-extern struct rt6_info         *rt6_lookup(struct net *net,
-                                           const struct in6_addr *daddr,
-                                           const struct in6_addr *saddr,
-                                           int oif, int flags);
+struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
+                           const struct in6_addr *saddr, int oif, int flags);
 
-extern struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
-                                        struct flowi6 *fl6);
-extern int icmp6_dst_gc(void);
+struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct flowi6 *fl6);
+int icmp6_dst_gc(void);
 
-extern void fib6_force_start_gc(struct net *net);
+void fib6_force_start_gc(struct net *net);
 
-extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
-                                          const struct in6_addr *addr,
-                                          bool anycast);
+struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
+                                   const struct in6_addr *addr, bool anycast);
 
 /*
  *     support functions for ND
  *
  */
-extern struct rt6_info *       rt6_get_dflt_router(const struct in6_addr *addr,
-                                                   struct net_device *dev);
-extern struct rt6_info *       rt6_add_dflt_router(const struct in6_addr *gwaddr,
-                                                   struct net_device *dev,
-                                                   unsigned int pref);
-
-extern void                    rt6_purge_dflt_routers(struct net *net);
-
-extern int                     rt6_route_rcv(struct net_device *dev,
-                                             u8 *opt, int len,
-                                             const struct in6_addr *gwaddr);
-
-extern void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
-                           int oif, u32 mark);
-extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk,
-                              __be32 mtu);
-extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
-extern void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
-                                  u32 mark);
-extern void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
+struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr,
+                                    struct net_device *dev);
+struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
+                                    struct net_device *dev, unsigned int pref);
+
+void rt6_purge_dflt_routers(struct net *net);
+
+int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
+                 const struct in6_addr *gwaddr);
+
+void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif,
+                    u32 mark);
+void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu);
+void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
+void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
+                           u32 mark);
+void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
 
 struct netlink_callback;
 
@@ -145,10 +133,10 @@ struct rt6_rtnl_dump_arg {
        struct net *net;
 };
 
-extern int rt6_dump_route(struct rt6_info *rt, void *p_arg);
-extern void rt6_ifdown(struct net *net, struct net_device *dev);
-extern void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
-extern void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
+int rt6_dump_route(struct rt6_info *rt, void *p_arg);
+void rt6_ifdown(struct net *net, struct net_device *dev);
+void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
+void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
 
 
 /*
@@ -194,11 +182,9 @@ static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
               skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
 }
 
-static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, struct in6_addr *dest)
+static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt)
 {
-       if (rt->rt6i_flags & RTF_GATEWAY)
-               return &rt->rt6i_gateway;
-       return dest;
+       return &rt->rt6i_gateway;
 }
 
 #endif
index cbf2be37c91ac603dc883fa3890e62ec1b5c7a80..9922093f575e20fa2e53a903a68da7773469c600 100644 (file)
@@ -165,7 +165,7 @@ struct fib_result_nl {
 #define FIB_TABLE_HASHSZ 2
 #endif
 
-extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
+__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
 
 #define FIB_RES_SADDR(net, res)                                \
        ((FIB_RES_NH(res).nh_saddr_genid ==             \
@@ -187,14 +187,14 @@ struct fib_table {
        unsigned long           tb_data[0];
 };
 
-extern int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
-                           struct fib_result *res, int fib_flags);
-extern int fib_table_insert(struct fib_table *, struct fib_config *);
-extern int fib_table_delete(struct fib_table *, struct fib_config *);
-extern int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
-                         struct netlink_callback *cb);
-extern int fib_table_flush(struct fib_table *table);
-extern void fib_free_table(struct fib_table *tb);
+int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
+                    struct fib_result *res, int fib_flags);
+int fib_table_insert(struct fib_table *, struct fib_config *);
+int fib_table_delete(struct fib_table *, struct fib_config *);
+int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
+                  struct netlink_callback *cb);
+int fib_table_flush(struct fib_table *table);
+void fib_free_table(struct fib_table *tb);
 
 
 
@@ -234,14 +234,13 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
 }
 
 #else /* CONFIG_IP_MULTIPLE_TABLES */
-extern int __net_init fib4_rules_init(struct net *net);
-extern void __net_exit fib4_rules_exit(struct net *net);
+int __net_init fib4_rules_init(struct net *net);
+void __net_exit fib4_rules_exit(struct net *net);
 
-extern struct fib_table *fib_new_table(struct net *net, u32 id);
-extern struct fib_table *fib_get_table(struct net *net, u32 id);
+struct fib_table *fib_new_table(struct net *net, u32 id);
+struct fib_table *fib_get_table(struct net *net, u32 id);
 
-extern int __fib_lookup(struct net *net, struct flowi4 *flp,
-                       struct fib_result *res);
+int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res);
 
 static inline int fib_lookup(struct net *net, struct flowi4 *flp,
                             struct fib_result *res)
@@ -269,12 +268,12 @@ static inline int fib_lookup(struct net *net, struct flowi4 *flp,
 
 /* Exported by fib_frontend.c */
 extern const struct nla_policy rtm_ipv4_policy[];
-extern void            ip_fib_init(void);
-extern __be32 fib_compute_spec_dst(struct sk_buff *skb);
-extern int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
-                              u8 tos, int oif, struct net_device *dev,
-                              struct in_device *idev, u32 *itag);
-extern void fib_select_default(struct fib_result *res);
+void ip_fib_init(void);
+__be32 fib_compute_spec_dst(struct sk_buff *skb);
+int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
+                       u8 tos, int oif, struct net_device *dev,
+                       struct in_device *idev, u32 *itag);
+void fib_select_default(struct fib_result *res);
 #ifdef CONFIG_IP_ROUTE_CLASSID
 static inline int fib_num_tclassid_users(struct net *net)
 {
@@ -288,15 +287,15 @@ static inline int fib_num_tclassid_users(struct net *net)
 #endif
 
 /* Exported by fib_semantics.c */
-extern int ip_fib_check_default(__be32 gw, struct net_device *dev);
-extern int fib_sync_down_dev(struct net_device *dev, int force);
-extern int fib_sync_down_addr(struct net *net, __be32 local);
-extern int fib_sync_up(struct net_device *dev);
-extern void fib_select_multipath(struct fib_result *res);
+int ip_fib_check_default(__be32 gw, struct net_device *dev);
+int fib_sync_down_dev(struct net_device *dev, int force);
+int fib_sync_down_addr(struct net *net, __be32 local);
+int fib_sync_up(struct net_device *dev);
+void fib_select_multipath(struct fib_result *res);
 
 /* Exported by fib_trie.c */
-extern void fib_trie_init(void);
-extern struct fib_table *fib_trie_table(u32 id);
+void fib_trie_init(void);
+struct fib_table *fib_trie_table(u32 id);
 
 static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
 {
@@ -314,7 +313,7 @@ static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
 #endif
 }
 
-extern void free_fib_info(struct fib_info *fi);
+void free_fib_info(struct fib_info *fi);
 
 static inline void fib_info_put(struct fib_info *fi)
 {
@@ -323,8 +322,8 @@ static inline void fib_info_put(struct fib_info *fi)
 }
 
 #ifdef CONFIG_PROC_FS
-extern int __net_init  fib_proc_init(struct net *net);
-extern void __net_exit fib_proc_exit(struct net *net);
+int __net_init fib_proc_init(struct net *net);
+void __net_exit fib_proc_exit(struct net *net);
 #else
 static inline int fib_proc_init(struct net *net)
 {
index a0a4a100f5c9a74ce544097dee5f3d25e68e8b9b..732f8c6ae975877ccd9c5873369f9be6490f06e9 100644 (file)
@@ -150,6 +150,9 @@ int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
                  __be32 src, __be32 dst, __u8 proto,
                  __u8 tos, __u8 ttl, __be16 df, bool xnet);
 
+struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
+                                        int gso_type_mask);
+
 static inline void iptunnel_xmit_stats(int err,
                                       struct net_device_stats *err_stats,
                                       struct pcpu_tstats __percpu *stats)
index 9c4d37ec45a1e5dd0536dcbef7fdfa8eac684401..1c2e1b9f6b8603aecdf14452a6b424b227cd504b 100644 (file)
@@ -236,7 +236,7 @@ static inline int ip_vs_addr_equal(int af, const union nf_inet_addr *a,
 #ifdef CONFIG_IP_VS_DEBUG
 #include <linux/net.h>
 
-extern int ip_vs_get_debug_level(void);
+int ip_vs_get_debug_level(void);
 
 static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
                                         const union nf_inet_addr *addr,
@@ -532,9 +532,9 @@ struct ip_vs_proto_data {
        struct tcp_states_t     *tcp_state_table;
 };
 
-extern struct ip_vs_protocol   *ip_vs_proto_get(unsigned short proto);
-extern struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net,
-                                                    unsigned short proto);
+struct ip_vs_protocol   *ip_vs_proto_get(unsigned short proto);
+struct ip_vs_proto_data *ip_vs_proto_data_get(struct net *net,
+                                             unsigned short proto);
 
 struct ip_vs_conn_param {
        struct net                      *net;
@@ -1173,8 +1173,8 @@ static inline int sysctl_backup_only(struct netns_ipvs *ipvs)
  *      IPVS core functions
  *      (from ip_vs_core.c)
  */
-extern const char *ip_vs_proto_name(unsigned int proto);
-extern void ip_vs_init_hash_table(struct list_head *table, int rows);
+const char *ip_vs_proto_name(unsigned int proto);
+void ip_vs_init_hash_table(struct list_head *table, int rows);
 #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
 
 #define IP_VS_APP_TYPE_FTP     1
@@ -1237,22 +1237,22 @@ static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
        smp_mb__before_atomic_dec();
        atomic_dec(&cp->refcnt);
 }
-extern void ip_vs_conn_put(struct ip_vs_conn *cp);
-extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
+void ip_vs_conn_put(struct ip_vs_conn *cp);
+void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
 
 struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p,
                                  const union nf_inet_addr *daddr,
                                  __be16 dport, unsigned int flags,
                                  struct ip_vs_dest *dest, __u32 fwmark);
-extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
+void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
 
-extern const char * ip_vs_state_name(__u16 proto, int state);
+const char *ip_vs_state_name(__u16 proto, int state);
 
-extern void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp);
-extern int ip_vs_check_template(struct ip_vs_conn *ct);
-extern void ip_vs_random_dropentry(struct net *net);
-extern int ip_vs_conn_init(void);
-extern void ip_vs_conn_cleanup(void);
+void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp);
+int ip_vs_check_template(struct ip_vs_conn *ct);
+void ip_vs_random_dropentry(struct net *net);
+int ip_vs_conn_init(void);
+void ip_vs_conn_cleanup(void);
 
 static inline void ip_vs_control_del(struct ip_vs_conn *cp)
 {
@@ -1317,37 +1317,36 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
 /*
  * IPVS netns init & cleanup functions
  */
-extern int ip_vs_estimator_net_init(struct net *net);
-extern int ip_vs_control_net_init(struct net *net);
-extern int ip_vs_protocol_net_init(struct net *net);
-extern int ip_vs_app_net_init(struct net *net);
-extern int ip_vs_conn_net_init(struct net *net);
-extern int ip_vs_sync_net_init(struct net *net);
-extern void ip_vs_conn_net_cleanup(struct net *net);
-extern void ip_vs_app_net_cleanup(struct net *net);
-extern void ip_vs_protocol_net_cleanup(struct net *net);
-extern void ip_vs_control_net_cleanup(struct net *net);
-extern void ip_vs_estimator_net_cleanup(struct net *net);
-extern void ip_vs_sync_net_cleanup(struct net *net);
-extern void ip_vs_service_net_cleanup(struct net *net);
+int ip_vs_estimator_net_init(struct net *net);
+int ip_vs_control_net_init(struct net *net);
+int ip_vs_protocol_net_init(struct net *net);
+int ip_vs_app_net_init(struct net *net);
+int ip_vs_conn_net_init(struct net *net);
+int ip_vs_sync_net_init(struct net *net);
+void ip_vs_conn_net_cleanup(struct net *net);
+void ip_vs_app_net_cleanup(struct net *net);
+void ip_vs_protocol_net_cleanup(struct net *net);
+void ip_vs_control_net_cleanup(struct net *net);
+void ip_vs_estimator_net_cleanup(struct net *net);
+void ip_vs_sync_net_cleanup(struct net *net);
+void ip_vs_service_net_cleanup(struct net *net);
 
 /*
  *      IPVS application functions
  *      (from ip_vs_app.c)
  */
 #define IP_VS_APP_MAX_PORTS  8
-extern struct ip_vs_app *register_ip_vs_app(struct net *net,
-                                           struct ip_vs_app *app);
-extern void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
-extern int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
-extern void ip_vs_unbind_app(struct ip_vs_conn *cp);
-extern int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app,
-                                 __u16 proto, __u16 port);
-extern int ip_vs_app_inc_get(struct ip_vs_app *inc);
-extern void ip_vs_app_inc_put(struct ip_vs_app *inc);
-
-extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb);
-extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb);
+struct ip_vs_app *register_ip_vs_app(struct net *net, struct ip_vs_app *app);
+void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app);
+int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
+void ip_vs_unbind_app(struct ip_vs_conn *cp);
+int register_ip_vs_app_inc(struct net *net, struct ip_vs_app *app, __u16 proto,
+                          __u16 port);
+int ip_vs_app_inc_get(struct ip_vs_app *inc);
+void ip_vs_app_inc_put(struct ip_vs_app *inc);
+
+int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb);
+int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb);
 
 int register_ip_vs_pe(struct ip_vs_pe *pe);
 int unregister_ip_vs_pe(struct ip_vs_pe *pe);
@@ -1368,17 +1367,15 @@ struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name);
 /*
  *     IPVS protocol functions (from ip_vs_proto.c)
  */
-extern int ip_vs_protocol_init(void);
-extern void ip_vs_protocol_cleanup(void);
-extern void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
-extern int *ip_vs_create_timeout_table(int *table, int size);
-extern int
-ip_vs_set_state_timeout(int *table, int num, const char *const *names,
-                       const char *name, int to);
-extern void
-ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
-                         const struct sk_buff *skb,
-                         int offset, const char *msg);
+int ip_vs_protocol_init(void);
+void ip_vs_protocol_cleanup(void);
+void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
+int *ip_vs_create_timeout_table(int *table, int size);
+int ip_vs_set_state_timeout(int *table, int num, const char *const *names,
+                           const char *name, int to);
+void ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
+                              const struct sk_buff *skb, int offset,
+                              const char *msg);
 
 extern struct ip_vs_protocol ip_vs_protocol_tcp;
 extern struct ip_vs_protocol ip_vs_protocol_udp;
@@ -1391,22 +1388,22 @@ extern struct ip_vs_protocol ip_vs_protocol_sctp;
  *      Registering/unregistering scheduler functions
  *      (from ip_vs_sched.c)
  */
-extern int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
-extern int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
-extern int ip_vs_bind_scheduler(struct ip_vs_service *svc,
-                               struct ip_vs_scheduler *scheduler);
-extern void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
-                                  struct ip_vs_scheduler *sched);
-extern struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name);
-extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
-extern struct ip_vs_conn *
+int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
+int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
+int ip_vs_bind_scheduler(struct ip_vs_service *svc,
+                        struct ip_vs_scheduler *scheduler);
+void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
+                           struct ip_vs_scheduler *sched);
+struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name);
+void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
+struct ip_vs_conn *
 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
               struct ip_vs_proto_data *pd, int *ignored,
               struct ip_vs_iphdr *iph);
-extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
-                       struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph);
+int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
+               struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph);
 
-extern void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
+void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
 
 
 /*
@@ -1415,25 +1412,24 @@ extern void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
 extern struct ip_vs_stats ip_vs_stats;
 extern int sysctl_ip_vs_sync_ver;
 
-extern struct ip_vs_service *
+struct ip_vs_service *
 ip_vs_service_find(struct net *net, int af, __u32 fwmark, __u16 protocol,
                  const union nf_inet_addr *vaddr, __be16 vport);
 
-extern bool
-ip_vs_has_real_service(struct net *net, int af, __u16 protocol,
-                      const union nf_inet_addr *daddr, __be16 dport);
-
-extern int ip_vs_use_count_inc(void);
-extern void ip_vs_use_count_dec(void);
-extern int ip_vs_register_nl_ioctl(void);
-extern void ip_vs_unregister_nl_ioctl(void);
-extern int ip_vs_control_init(void);
-extern void ip_vs_control_cleanup(void);
-extern struct ip_vs_dest *
+bool ip_vs_has_real_service(struct net *net, int af, __u16 protocol,
+                           const union nf_inet_addr *daddr, __be16 dport);
+
+int ip_vs_use_count_inc(void);
+void ip_vs_use_count_dec(void);
+int ip_vs_register_nl_ioctl(void);
+void ip_vs_unregister_nl_ioctl(void);
+int ip_vs_control_init(void);
+void ip_vs_control_cleanup(void);
+struct ip_vs_dest *
 ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
                __be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
                __u16 protocol, __u32 fwmark, __u32 flags);
-extern void ip_vs_try_bind_dest(struct ip_vs_conn *cp);
+void ip_vs_try_bind_dest(struct ip_vs_conn *cp);
 
 static inline void ip_vs_dest_hold(struct ip_vs_dest *dest)
 {
@@ -1450,56 +1446,49 @@ static inline void ip_vs_dest_put(struct ip_vs_dest *dest)
  *      IPVS sync daemon data and function prototypes
  *      (from ip_vs_sync.c)
  */
-extern int start_sync_thread(struct net *net, int state, char *mcast_ifn,
-                            __u8 syncid);
-extern int stop_sync_thread(struct net *net, int state);
-extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
-
+int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid);
+int stop_sync_thread(struct net *net, int state);
+void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts);
 
 /*
  *      IPVS rate estimator prototypes (from ip_vs_est.c)
  */
-extern void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
-extern void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
-extern void ip_vs_zero_estimator(struct ip_vs_stats *stats);
-extern void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
-                                struct ip_vs_stats *stats);
+void ip_vs_start_estimator(struct net *net, struct ip_vs_stats *stats);
+void ip_vs_stop_estimator(struct net *net, struct ip_vs_stats *stats);
+void ip_vs_zero_estimator(struct ip_vs_stats *stats);
+void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
+                         struct ip_vs_stats *stats);
 
 /*
  *     Various IPVS packet transmitters (from ip_vs_xmit.c)
  */
-extern int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-                          struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
-extern int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-                            struct ip_vs_protocol *pp,
-                            struct ip_vs_iphdr *iph);
-extern int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-                         struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
-extern int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-                            struct ip_vs_protocol *pp,
-                            struct ip_vs_iphdr *iph);
-extern int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-                        struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
-extern int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
-                          struct ip_vs_protocol *pp, int offset,
-                          unsigned int hooknum, struct ip_vs_iphdr *iph);
-extern void ip_vs_dest_dst_rcu_free(struct rcu_head *head);
+int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+                   struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+                     struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+                  struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+                     struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+                 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
+                   struct ip_vs_protocol *pp, int offset,
+                   unsigned int hooknum, struct ip_vs_iphdr *iph);
+void ip_vs_dest_dst_rcu_free(struct rcu_head *head);
 
 #ifdef CONFIG_IP_VS_IPV6
-extern int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
-                               struct ip_vs_protocol *pp,
-                               struct ip_vs_iphdr *iph);
-extern int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
-                            struct ip_vs_protocol *pp,
-                            struct ip_vs_iphdr *iph);
-extern int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
-                               struct ip_vs_protocol *pp,
-                               struct ip_vs_iphdr *iph);
-extern int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
-                           struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
-extern int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
-                             struct ip_vs_protocol *pp, int offset,
-                             unsigned int hooknum, struct ip_vs_iphdr *iph);
+int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+                        struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+                     struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+                        struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+                    struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph);
+int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
+                      struct ip_vs_protocol *pp, int offset,
+                      unsigned int hooknum, struct ip_vs_iphdr *iph);
 #endif
 
 #ifdef CONFIG_SYSCTL
@@ -1548,15 +1537,15 @@ static inline char ip_vs_fwd_tag(struct ip_vs_conn *cp)
        return fwd;
 }
 
-extern void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
-                          struct ip_vs_conn *cp, int dir);
+void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp,
+                   struct ip_vs_conn *cp, int dir);
 
 #ifdef CONFIG_IP_VS_IPV6
-extern void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
-                             struct ip_vs_conn *cp, int dir);
+void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
+                      struct ip_vs_conn *cp, int dir);
 #endif
 
-extern __sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset);
+__sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset);
 
 static inline __wsum ip_vs_check_diff4(__be32 old, __be32 new, __wsum oldsum)
 {
@@ -1615,13 +1604,13 @@ static inline int ip_vs_conntrack_enabled(struct netns_ipvs *ipvs)
 #endif
 }
 
-extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
-                                  int outin);
-extern int ip_vs_confirm_conntrack(struct sk_buff *skb);
-extern void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
-                                     struct ip_vs_conn *cp, u_int8_t proto,
-                                     const __be16 port, int from_rs);
-extern void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp);
+void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp,
+                           int outin);
+int ip_vs_confirm_conntrack(struct sk_buff *skb);
+void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct,
+                              struct ip_vs_conn *cp, u_int8_t proto,
+                              const __be16 port, int from_rs);
+void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp);
 
 #else
 
index bbf1c8fb8511c70220c4a2b7c046f6007cf525b5..dd96638ab8ff6390b498aa3c8729eb16d46496a4 100644 (file)
@@ -244,14 +244,14 @@ struct ipv6_fl_socklist {
        struct rcu_head                 rcu;
 };
 
-extern struct ip6_flowlabel    *fl6_sock_lookup(struct sock *sk, __be32 label);
-extern struct ipv6_txoptions   *fl6_merge_options(struct ipv6_txoptions * opt_space,
-                                                  struct ip6_flowlabel * fl,
-                                                  struct ipv6_txoptions * fopt);
-extern void                    fl6_free_socklist(struct sock *sk);
-extern int                     ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen);
-extern int                     ip6_flowlabel_init(void);
-extern void                    ip6_flowlabel_cleanup(void);
+struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
+struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
+                                        struct ip6_flowlabel *fl,
+                                        struct ipv6_txoptions *fopt);
+void fl6_free_socklist(struct sock *sk);
+int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen);
+int ip6_flowlabel_init(void);
+void ip6_flowlabel_cleanup(void);
 
 static inline void fl6_sock_release(struct ip6_flowlabel *fl)
 {
@@ -259,7 +259,7 @@ static inline void fl6_sock_release(struct ip6_flowlabel *fl)
                atomic_dec(&fl->users);
 }
 
-extern void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
+void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
 
 int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
                               struct icmp6hdr *thdr, int len);
@@ -267,19 +267,21 @@ int icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
 struct dst_entry *icmpv6_route_lookup(struct net *net, struct sk_buff *skb,
                                      struct sock *sk, struct flowi6 *fl6);
 
-extern int                     ip6_ra_control(struct sock *sk, int sel);
+int ip6_ra_control(struct sock *sk, int sel);
 
-extern int                     ipv6_parse_hopopts(struct sk_buff *skb);
+int ipv6_parse_hopopts(struct sk_buff *skb);
 
-extern struct ipv6_txoptions *  ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt);
-extern struct ipv6_txoptions * ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
-                                                  int newtype,
-                                                  struct ipv6_opt_hdr __user *newopt,
-                                                  int newoptlen);
+struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
+                                       struct ipv6_txoptions *opt);
+struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
+                                         struct ipv6_txoptions *opt,
+                                         int newtype,
+                                         struct ipv6_opt_hdr __user *newopt,
+                                         int newoptlen);
 struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
                                          struct ipv6_txoptions *opt);
 
-extern bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb);
+bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb);
 
 static inline bool ipv6_accept_ra(struct inet6_dev *idev)
 {
@@ -306,7 +308,7 @@ static inline int ip6_frag_mem(struct net *net)
 #define IPV6_FRAG_LOW_THRESH   (3 * 1024*1024) /* 3145728 */
 #define IPV6_FRAG_TIMEOUT      (60 * HZ)       /* 60 seconds */
 
-extern int __ipv6_addr_type(const struct in6_addr *addr);
+int __ipv6_addr_type(const struct in6_addr *addr);
 static inline int ipv6_addr_type(const struct in6_addr *addr)
 {
        return __ipv6_addr_type(addr) & 0xffff;
@@ -537,14 +539,14 @@ static inline u32 ipv6_addr_hash(const struct in6_addr *a)
 }
 
 /* more secured version of ipv6_addr_hash() */
-static inline u32 ipv6_addr_jhash(const struct in6_addr *a)
+static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
 {
        u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1];
 
        return jhash_3words(v,
                            (__force u32)a->s6_addr32[2],
                            (__force u32)a->s6_addr32[3],
-                           ipv6_hash_secret);
+                           initval);
 }
 
 static inline bool ipv6_addr_loopback(const struct in6_addr *a)
@@ -656,9 +658,9 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
        return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
 }
 
-extern void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
+void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
 
-extern int ip6_dst_hoplimit(struct dst_entry *dst);
+int ip6_dst_hoplimit(struct dst_entry *dst);
 
 /*
  *     Header manipulation
@@ -682,83 +684,65 @@ static inline __be32 ip6_flowinfo(const struct ipv6hdr *hdr)
  *     rcv function (called from netdevice level)
  */
 
-extern int                     ipv6_rcv(struct sk_buff *skb, 
-                                        struct net_device *dev, 
-                                        struct packet_type *pt,
-                                        struct net_device *orig_dev);
+int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
+            struct packet_type *pt, struct net_device *orig_dev);
 
-extern int                     ip6_rcv_finish(struct sk_buff *skb);
+int ip6_rcv_finish(struct sk_buff *skb);
 
 /*
  *     upper-layer output functions
  */
-extern int                     ip6_xmit(struct sock *sk,
-                                        struct sk_buff *skb,
-                                        struct flowi6 *fl6,
-                                        struct ipv6_txoptions *opt,
-                                        int tclass);
-
-extern int                     ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
-
-extern int                     ip6_append_data(struct sock *sk,
-                                               int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb),
-                                               void *from,
-                                               int length,
-                                               int transhdrlen,
-                                               int hlimit,
-                                               int tclass,
-                                               struct ipv6_txoptions *opt,
-                                               struct flowi6 *fl6,
-                                               struct rt6_info *rt,
-                                               unsigned int flags,
-                                               int dontfrag);
-
-extern int                     ip6_push_pending_frames(struct sock *sk);
-
-extern void                    ip6_flush_pending_frames(struct sock *sk);
-
-extern int                     ip6_dst_lookup(struct sock *sk,
-                                              struct dst_entry **dst,
-                                              struct flowi6 *fl6);
-extern struct dst_entry *      ip6_dst_lookup_flow(struct sock *sk,
-                                                   struct flowi6 *fl6,
-                                                   const struct in6_addr *final_dst,
-                                                   bool can_sleep);
-extern struct dst_entry *      ip6_sk_dst_lookup_flow(struct sock *sk,
-                                                      struct flowi6 *fl6,
-                                                      const struct in6_addr *final_dst,
-                                                      bool can_sleep);
-extern struct dst_entry *      ip6_blackhole_route(struct net *net,
-                                                   struct dst_entry *orig_dst);
+int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
+            struct ipv6_txoptions *opt, int tclass);
+
+int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
+
+int ip6_append_data(struct sock *sk,
+                   int getfrag(void *from, char *to, int offset, int len,
+                               int odd, struct sk_buff *skb),
+                   void *from, int length, int transhdrlen, int hlimit,
+                   int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
+                   struct rt6_info *rt, unsigned int flags, int dontfrag);
+
+int ip6_push_pending_frames(struct sock *sk);
+
+void ip6_flush_pending_frames(struct sock *sk);
+
+int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6);
+struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+                                     const struct in6_addr *final_dst,
+                                     bool can_sleep);
+struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
+                                        const struct in6_addr *final_dst,
+                                        bool can_sleep);
+struct dst_entry *ip6_blackhole_route(struct net *net,
+                                     struct dst_entry *orig_dst);
 
 /*
  *     skb processing functions
  */
 
-extern int                     ip6_output(struct sk_buff *skb);
-extern int                     ip6_forward(struct sk_buff *skb);
-extern int                     ip6_input(struct sk_buff *skb);
-extern int                     ip6_mc_input(struct sk_buff *skb);
+int ip6_output(struct sk_buff *skb);
+int ip6_forward(struct sk_buff *skb);
+int ip6_input(struct sk_buff *skb);
+int ip6_mc_input(struct sk_buff *skb);
 
-extern int                     __ip6_local_out(struct sk_buff *skb);
-extern int                     ip6_local_out(struct sk_buff *skb);
+int __ip6_local_out(struct sk_buff *skb);
+int ip6_local_out(struct sk_buff *skb);
 
 /*
  *     Extension header (options) processing
  */
 
-extern void                    ipv6_push_nfrag_opts(struct sk_buff *skb,
-                                                    struct ipv6_txoptions *opt,
-                                                    u8 *proto,
-                                                    struct in6_addr **daddr_p);
-extern void                    ipv6_push_frag_opts(struct sk_buff *skb,
-                                                   struct ipv6_txoptions *opt,
-                                                   u8 *proto);
+void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
+                         u8 *proto, struct in6_addr **daddr_p);
+void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
+                        u8 *proto);
 
-extern int                     ipv6_skip_exthdr(const struct sk_buff *, int start,
-                                                u8 *nexthdrp, __be16 *frag_offp);
+int ipv6_skip_exthdr(const struct sk_buff *, int start, u8 *nexthdrp,
+                    __be16 *frag_offp);
 
-extern bool                    ipv6_ext_hdr(u8 nexthdr);
+bool ipv6_ext_hdr(u8 nexthdr);
 
 enum {
        IP6_FH_F_FRAG           = (1 << 0),
@@ -767,57 +751,44 @@ enum {
 };
 
 /* find specified header and get offset to it */
-extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
-                        int target, unsigned short *fragoff, int *fragflg);
+int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target,
+                 unsigned short *fragoff, int *fragflg);
 
-extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type);
+int ipv6_find_tlv(struct sk_buff *skb, int offset, int type);
 
-extern struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
-                                      const struct ipv6_txoptions *opt,
-                                      struct in6_addr *orig);
+struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
+                               const struct ipv6_txoptions *opt,
+                               struct in6_addr *orig);
 
 /*
  *     socket options (ipv6_sockglue.c)
  */
 
-extern int                     ipv6_setsockopt(struct sock *sk, int level, 
-                                               int optname,
-                                               char __user *optval, 
-                                               unsigned int optlen);
-extern int                     ipv6_getsockopt(struct sock *sk, int level, 
-                                               int optname,
-                                               char __user *optval, 
-                                               int __user *optlen);
-extern int                     compat_ipv6_setsockopt(struct sock *sk,
-                                               int level,
-                                               int optname,
-                                               char __user *optval,
-                                               unsigned int optlen);
-extern int                     compat_ipv6_getsockopt(struct sock *sk,
-                                               int level,
-                                               int optname,
-                                               char __user *optval,
-                                               int __user *optlen);
-
-extern int                     ip6_datagram_connect(struct sock *sk, 
-                                                    struct sockaddr *addr, int addr_len);
-
-extern int                     ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
-extern int                     ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len);
-extern void                    ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
-                                               u32 info, u8 *payload);
-extern void                    ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
-extern void                    ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
-
-extern int inet6_release(struct socket *sock);
-extern int inet6_bind(struct socket *sock, struct sockaddr *uaddr, 
-                     int addr_len);
-extern int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
-                        int *uaddr_len, int peer);
-extern int inet6_ioctl(struct socket *sock, unsigned int cmd, 
-                      unsigned long arg);
-
-extern int inet6_hash_connect(struct inet_timewait_death_row *death_row,
+int ipv6_setsockopt(struct sock *sk, int level, int optname,
+                   char __user *optval, unsigned int optlen);
+int ipv6_getsockopt(struct sock *sk, int level, int optname,
+                   char __user *optval, int __user *optlen);
+int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
+                          char __user *optval, unsigned int optlen);
+int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
+                          char __user *optval, int __user *optlen);
+
+int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len);
+
+int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
+int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len);
+void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
+                    u32 info, u8 *payload);
+void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
+void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu);
+
+int inet6_release(struct socket *sock);
+int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len);
+int inet6_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len,
+                 int peer);
+int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+
+int inet6_hash_connect(struct inet_timewait_death_row *death_row,
                              struct sock *sk);
 
 /*
@@ -829,30 +800,27 @@ extern const struct proto_ops inet6_dgram_ops;
 struct group_source_req;
 struct group_filter;
 
-extern int ip6_mc_source(int add, int omode, struct sock *sk,
-                        struct group_source_req *pgsr);
-extern int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf);
-extern int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
-                        struct group_filter __user *optval,
-                        int __user *optlen);
-extern unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
-                                   const struct in6_addr *daddr, u32 rnd);
+int ip6_mc_source(int add, int omode, struct sock *sk,
+                 struct group_source_req *pgsr);
+int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf);
+int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
+                 struct group_filter __user *optval, int __user *optlen);
 
 #ifdef CONFIG_PROC_FS
-extern int  ac6_proc_init(struct net *net);
-extern void ac6_proc_exit(struct net *net);
-extern int  raw6_proc_init(void);
-extern void raw6_proc_exit(void);
-extern int  tcp6_proc_init(struct net *net);
-extern void tcp6_proc_exit(struct net *net);
-extern int  udp6_proc_init(struct net *net);
-extern void udp6_proc_exit(struct net *net);
-extern int  udplite6_proc_init(void);
-extern void udplite6_proc_exit(void);
-extern int  ipv6_misc_proc_init(void);
-extern void ipv6_misc_proc_exit(void);
-extern int snmp6_register_dev(struct inet6_dev *idev);
-extern int snmp6_unregister_dev(struct inet6_dev *idev);
+int ac6_proc_init(struct net *net);
+void ac6_proc_exit(struct net *net);
+int raw6_proc_init(void);
+void raw6_proc_exit(void);
+int tcp6_proc_init(struct net *net);
+void tcp6_proc_exit(struct net *net);
+int udp6_proc_init(struct net *net);
+void udp6_proc_exit(struct net *net);
+int udplite6_proc_init(void);
+void udplite6_proc_exit(void);
+int ipv6_misc_proc_init(void);
+void ipv6_misc_proc_exit(void);
+int snmp6_register_dev(struct inet6_dev *idev);
+int snmp6_unregister_dev(struct inet6_dev *idev);
 
 #else
 static inline int ac6_proc_init(struct net *net) { return 0; }
@@ -865,10 +833,10 @@ static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; }
 extern struct ctl_table ipv6_route_table_template[];
 extern struct ctl_table ipv6_icmp_table_template[];
 
-extern struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
-extern struct ctl_table *ipv6_route_sysctl_init(struct net *net);
-extern int ipv6_sysctl_register(void);
-extern void ipv6_sysctl_unregister(void);
+struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
+struct ctl_table *ipv6_route_sysctl_init(struct net *net);
+int ipv6_sysctl_register(void);
+void ipv6_sysctl_unregister(void);
 #endif
 
 #endif /* _NET_IPV6_H */
index c1fec6b464cc7b3926bfa12e55893994d772cc32..9e9e35465bafbaff131c893814abd44d99be712f 100644 (file)
@@ -123,23 +123,23 @@ extern struct list_head ipx_routes;
 extern rwlock_t ipx_routes_lock;
 
 extern struct list_head ipx_interfaces;
-extern struct ipx_interface *ipx_interfaces_head(void);
+struct ipx_interface *ipx_interfaces_head(void);
 extern spinlock_t ipx_interfaces_lock;
 
 extern struct ipx_interface *ipx_primary_net;
 
-extern int ipx_proc_init(void);
-extern void ipx_proc_exit(void);
+int ipx_proc_init(void);
+void ipx_proc_exit(void);
 
-extern const char *ipx_frame_name(__be16);
-extern const char *ipx_device_name(struct ipx_interface *intrfc);
+const char *ipx_frame_name(__be16);
+const char *ipx_device_name(struct ipx_interface *intrfc);
 
 static __inline__ void ipxitf_hold(struct ipx_interface *intrfc)
 {
        atomic_inc(&intrfc->refcnt);
 }
 
-extern void ipxitf_down(struct ipx_interface *intrfc);
+void ipxitf_down(struct ipx_interface *intrfc);
 
 static __inline__ void ipxitf_put(struct ipx_interface *intrfc)
 {
index 80ffde3bb164e2a3a856838f0c8c4c2906bfeaf6..0224402260a77a3653a276e0c33f2e9281601f80 100644 (file)
@@ -105,13 +105,13 @@ struct ircomm_tty_cb {
 void ircomm_tty_start(struct tty_struct *tty);
 void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self);
 
-extern int ircomm_tty_tiocmget(struct tty_struct *tty);
-extern int ircomm_tty_tiocmset(struct tty_struct *tty,
-                              unsigned int set, unsigned int clear);
-extern int ircomm_tty_ioctl(struct tty_struct *tty, 
-                           unsigned int cmd, unsigned long arg);
-extern void ircomm_tty_set_termios(struct tty_struct *tty, 
-                                  struct ktermios *old_termios);
+int ircomm_tty_tiocmget(struct tty_struct *tty);
+int ircomm_tty_tiocmset(struct tty_struct *tty, unsigned int set,
+                       unsigned int clear);
+int ircomm_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+                    unsigned long arg);
+void ircomm_tty_set_termios(struct tty_struct *tty,
+                           struct ktermios *old_termios);
 
 #endif
 
index 3bed61d379a8aa1de70fe25935810c830cd70ac2..a059465101ff3b5a35120d09782b6f10db9c98fa 100644 (file)
@@ -112,20 +112,19 @@ do { if(!(expr)) { \
 struct net_device;
 struct packet_type;
 
-extern void irda_proc_register(void);
-extern void irda_proc_unregister(void);
+void irda_proc_register(void);
+void irda_proc_unregister(void);
 
-extern int irda_sysctl_register(void);
-extern void irda_sysctl_unregister(void);
+int irda_sysctl_register(void);
+void irda_sysctl_unregister(void);
 
-extern int irsock_init(void);
-extern void irsock_cleanup(void);
+int irsock_init(void);
+void irsock_cleanup(void);
 
-extern int irda_nl_register(void);
-extern void irda_nl_unregister(void);
+int irda_nl_register(void);
+void irda_nl_unregister(void);
 
-extern int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
-                           struct packet_type *ptype,
-                           struct net_device *orig_dev);
+int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev,
+                    struct packet_type *ptype, struct net_device *orig_dev);
 
 #endif /* NET_IRDA_H */
index 94c852d47d0f8d5c7d033279bf2f67ca3fd861dd..11417475a6c31059aa28d5db55ce90c2a536a703 100644 (file)
@@ -162,7 +162,7 @@ typedef struct {
         int irq, irq2;        /* Interrupts used */
         int dma, dma2;        /* DMA channel(s) used */
         int fifo_size;        /* FIFO size */
-        int irqflags;         /* interrupt flags (ie, IRQF_SHARED|IRQF_DISABLED) */
+        int irqflags;         /* interrupt flags (ie, IRQF_SHARED) */
        int direction;        /* Link direction, used by some FIR drivers */
        int enabled;          /* Powered on? */
        int suspended;        /* Suspended by APM */
index 4c90824c50fb1ee002aa6e82f50f32fe0c2b940a..f9d88da97af2c15844caf5b9ba875cffb8a5f8c3 100644 (file)
@@ -126,6 +126,6 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event,
                    struct sk_buff *skb, struct irlap_info *info);
 void irlap_print_event(IRLAP_EVENT event);
 
-extern int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb);
+int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb);
 
 #endif
index 6b1dc4f8eca58a231ff481ee6fd5be8ac1a8f15c..57173ae398aed4096dd8b2cc7374bd4645edc3e7 100644 (file)
@@ -163,7 +163,7 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command);
 void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb,
                         __u8 caddr, int command);
 
-extern int irlap_insert_qos_negotiation_params(struct irlap_cb *self, 
-                                              struct sk_buff *skb);
+int irlap_insert_qos_negotiation_params(struct irlap_cb *self,
+                                       struct sk_buff *skb);
 
 #endif
index 5d5a6a4732effd8865918fe31c390c5067505534..a830b01baba4b08a1d5054557c540312084b8f50 100644 (file)
@@ -432,44 +432,32 @@ struct iw_public_data {
 /* First : function strictly used inside the kernel */
 
 /* Handle /proc/net/wireless, called in net/code/dev.c */
-extern int dev_get_wireless_info(char * buffer, char **start, off_t offset,
-                                int length);
+int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length);
 
 /* Second : functions that may be called by driver modules */
 
 /* Send a single event to user space */
-extern void wireless_send_event(struct net_device *    dev,
-                               unsigned int            cmd,
-                               union iwreq_data *      wrqu,
-                               const char *            extra);
+void wireless_send_event(struct net_device *dev, unsigned int cmd,
+                        union iwreq_data *wrqu, const char *extra);
 
 /* We may need a function to send a stream of events to user space.
  * More on that later... */
 
 /* Standard handler for SIOCSIWSPY */
-extern int iw_handler_set_spy(struct net_device *      dev,
-                             struct iw_request_info *  info,
-                             union iwreq_data *        wrqu,
-                             char *                    extra);
+int iw_handler_set_spy(struct net_device *dev, struct iw_request_info *info,
+                      union iwreq_data *wrqu, char *extra);
 /* Standard handler for SIOCGIWSPY */
-extern int iw_handler_get_spy(struct net_device *      dev,
-                             struct iw_request_info *  info,
-                             union iwreq_data *        wrqu,
-                             char *                    extra);
+int iw_handler_get_spy(struct net_device *dev, struct iw_request_info *info,
+                      union iwreq_data *wrqu, char *extra);
 /* Standard handler for SIOCSIWTHRSPY */
-extern int iw_handler_set_thrspy(struct net_device *   dev,
-                                struct iw_request_info *info,
-                                union iwreq_data *     wrqu,
-                                char *                 extra);
+int iw_handler_set_thrspy(struct net_device *dev, struct iw_request_info *info,
+                         union iwreq_data *wrqu, char *extra);
 /* Standard handler for SIOCGIWTHRSPY */
-extern int iw_handler_get_thrspy(struct net_device *   dev,
-                                struct iw_request_info *info,
-                                union iwreq_data *     wrqu,
-                                char *                 extra);
+int iw_handler_get_thrspy(struct net_device *dev, struct iw_request_info *info,
+                         union iwreq_data *wrqu, char *extra);
 /* Driver call to update spy records */
-extern void wireless_spy_update(struct net_device *    dev,
-                               unsigned char *         address,
-                               struct iw_quality *     wstats);
+void wireless_spy_update(struct net_device *dev, unsigned char *address,
+                        struct iw_quality *wstats);
 
 /************************* INLINE FUNTIONS *************************/
 /*
index df892a94f2c6570b642a33391fb9301ee99bcedb..9510f8725f03c316ab4eb61e9ee503218d2056c8 100644 (file)
@@ -105,40 +105,40 @@ struct lapb_cb {
 };
 
 /* lapb_iface.c */
-extern void lapb_connect_confirmation(struct lapb_cb *lapb, int);
-extern void lapb_connect_indication(struct lapb_cb *lapb, int);
-extern void lapb_disconnect_confirmation(struct lapb_cb *lapb, int);
-extern void lapb_disconnect_indication(struct lapb_cb *lapb, int);
-extern int  lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *);
-extern int  lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *);
+void lapb_connect_confirmation(struct lapb_cb *lapb, int);
+void lapb_connect_indication(struct lapb_cb *lapb, int);
+void lapb_disconnect_confirmation(struct lapb_cb *lapb, int);
+void lapb_disconnect_indication(struct lapb_cb *lapb, int);
+int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *);
+int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *);
 
 /* lapb_in.c */
-extern void lapb_data_input(struct lapb_cb *lapb, struct sk_buff *);
+void lapb_data_input(struct lapb_cb *lapb, struct sk_buff *);
 
 /* lapb_out.c */
-extern void lapb_kick(struct lapb_cb *lapb);
-extern void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *, int);
-extern void lapb_establish_data_link(struct lapb_cb *lapb);
-extern void lapb_enquiry_response(struct lapb_cb *lapb);
-extern void lapb_timeout_response(struct lapb_cb *lapb);
-extern void lapb_check_iframes_acked(struct lapb_cb *lapb, unsigned short);
-extern void lapb_check_need_response(struct lapb_cb *lapb, int, int);
+void lapb_kick(struct lapb_cb *lapb);
+void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *, int);
+void lapb_establish_data_link(struct lapb_cb *lapb);
+void lapb_enquiry_response(struct lapb_cb *lapb);
+void lapb_timeout_response(struct lapb_cb *lapb);
+void lapb_check_iframes_acked(struct lapb_cb *lapb, unsigned short);
+void lapb_check_need_response(struct lapb_cb *lapb, int, int);
 
 /* lapb_subr.c */
-extern void lapb_clear_queues(struct lapb_cb *lapb);
-extern void lapb_frames_acked(struct lapb_cb *lapb, unsigned short);
-extern void lapb_requeue_frames(struct lapb_cb *lapb);
-extern int  lapb_validate_nr(struct lapb_cb *lapb, unsigned short);
-extern int lapb_decode(struct lapb_cb *lapb, struct sk_buff *, struct lapb_frame *);
-extern void lapb_send_control(struct lapb_cb *lapb, int, int, int);
-extern void lapb_transmit_frmr(struct lapb_cb *lapb);
+void lapb_clear_queues(struct lapb_cb *lapb);
+void lapb_frames_acked(struct lapb_cb *lapb, unsigned short);
+void lapb_requeue_frames(struct lapb_cb *lapb);
+int lapb_validate_nr(struct lapb_cb *lapb, unsigned short);
+int lapb_decode(struct lapb_cb *lapb, struct sk_buff *, struct lapb_frame *);
+void lapb_send_control(struct lapb_cb *lapb, int, int, int);
+void lapb_transmit_frmr(struct lapb_cb *lapb);
 
 /* lapb_timer.c */
-extern void lapb_start_t1timer(struct lapb_cb *lapb);
-extern void lapb_start_t2timer(struct lapb_cb *lapb);
-extern void lapb_stop_t1timer(struct lapb_cb *lapb);
-extern void lapb_stop_t2timer(struct lapb_cb *lapb);
-extern int  lapb_t1timer_running(struct lapb_cb *lapb);
+void lapb_start_t1timer(struct lapb_cb *lapb);
+void lapb_start_t2timer(struct lapb_cb *lapb);
+void lapb_stop_t1timer(struct lapb_cb *lapb);
+void lapb_stop_t2timer(struct lapb_cb *lapb);
+int lapb_t1timer_running(struct lapb_cb *lapb);
 
 /*
  * Debug levels.
index 9e7d7f08ef77c5539ea887b483b274fc0ddb91fd..68490cbc8a659de87fefbfe1d80f8b29bed85dbc 100644 (file)
@@ -95,29 +95,29 @@ struct hlist_nulls_head *llc_sk_laddr_hash(struct llc_sap *sap,
 extern struct list_head llc_sap_list;
 extern spinlock_t llc_sap_list_lock;
 
-extern int llc_rcv(struct sk_buff *skb, struct net_device *dev,
-                  struct packet_type *pt, struct net_device *orig_dev);
+int llc_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
+           struct net_device *orig_dev);
 
-extern int llc_mac_hdr_init(struct sk_buff *skb,
-                           const unsigned char *sa, const unsigned char *da);
+int llc_mac_hdr_init(struct sk_buff *skb, const unsigned char *sa,
+                    const unsigned char *da);
 
-extern void llc_add_pack(int type, void (*handler)(struct llc_sap *sap,
-                                                  struct sk_buff *skb));
-extern void llc_remove_pack(int type);
+void llc_add_pack(int type,
+                 void (*handler)(struct llc_sap *sap, struct sk_buff *skb));
+void llc_remove_pack(int type);
 
-extern void llc_set_station_handler(void (*handler)(struct sk_buff *skb));
+void llc_set_station_handler(void (*handler)(struct sk_buff *skb));
 
-extern struct llc_sap *llc_sap_open(unsigned char lsap,
-                                   int (*rcv)(struct sk_buff *skb,
-                                              struct net_device *dev,
-                                              struct packet_type *pt,
-                                              struct net_device *orig_dev));
+struct llc_sap *llc_sap_open(unsigned char lsap,
+                            int (*rcv)(struct sk_buff *skb,
+                                       struct net_device *dev,
+                                       struct packet_type *pt,
+                                       struct net_device *orig_dev));
 static inline void llc_sap_hold(struct llc_sap *sap)
 {
        atomic_inc(&sap->refcnt);
 }
 
-extern void llc_sap_close(struct llc_sap *sap);
+void llc_sap_close(struct llc_sap *sap);
 
 static inline void llc_sap_put(struct llc_sap *sap)
 {
@@ -125,27 +125,27 @@ static inline void llc_sap_put(struct llc_sap *sap)
                llc_sap_close(sap);
 }
 
-extern struct llc_sap *llc_sap_find(unsigned char sap_value);
+struct llc_sap *llc_sap_find(unsigned char sap_value);
 
-extern int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
-                                    unsigned char *dmac, unsigned char dsap);
+int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb,
+                             unsigned char *dmac, unsigned char dsap);
 
-extern void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
-extern void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
+void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb);
+void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb);
 
-extern void llc_station_init(void);
-extern void llc_station_exit(void);
+void llc_station_init(void);
+void llc_station_exit(void);
 
 #ifdef CONFIG_PROC_FS
-extern int llc_proc_init(void);
-extern void llc_proc_exit(void);
+int llc_proc_init(void);
+void llc_proc_exit(void);
 #else
 #define llc_proc_init()        (0)
 #define llc_proc_exit()        do { } while(0)
 #endif /* CONFIG_PROC_FS */
 #ifdef CONFIG_SYSCTL
-extern int llc_sysctl_init(void);
-extern void llc_sysctl_exit(void);
+int llc_sysctl_init(void);
+void llc_sysctl_exit(void);
 
 extern int sysctl_llc2_ack_timeout;
 extern int sysctl_llc2_busy_timeout;
index df83f69d2de41903bc6b21467475853322828bdc..f3be818e73c1fbe1034d63bf6f2d132848060908 100644 (file)
 
 typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
 
-extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ac_conn_confirm(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_data_ind(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_disc_ind(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_rst_ind(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_rst_confirm(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock* sk,
-                                                  struct sk_buff *skb);
-extern int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock* sk,
-                                                     struct sk_buff *skb);
-extern int llc_conn_ac_send_disc_cmd_p_set_x(struct sock* sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ac_send_dm_rsp_f_set_p(struct sock* sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ac_send_dm_rsp_f_set_1(struct sock* sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock* sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock* sk,
-                                              struct sk_buff *skb);
-extern int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock* sk,
-                                              struct sk_buff *skb);
-extern int llc_conn_ac_send_i_cmd_p_set_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_send_i_xxx_x_set_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_resend_i_xxx_x_set_0(struct sock* sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock* sk,
-                                                      struct sk_buff *skb);
-extern int llc_conn_ac_resend_i_rsp_f_set_1(struct sock* sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ac_send_rej_cmd_p_set_1(struct sock* sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ac_send_rej_rsp_f_set_1(struct sock* sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ac_send_rej_xxx_x_set_0(struct sock* sk,
+int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock *sk,
                                            struct sk_buff *skb);
-extern int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock* sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock* sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock* sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ac_set_remote_busy(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock* sk,
+int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk,
+                                              struct sk_buff *skb);
+int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock *sk,
                                                struct sk_buff *skb);
-extern int llc_conn_ac_send_rr_cmd_p_set_1(struct sock* sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ac_send_rr_rsp_f_set_1(struct sock* sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ac_send_ack_rsp_f_set_1(struct sock* sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ac_send_rr_xxx_x_set_0(struct sock* sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ac_send_ack_xxx_x_set_0(struct sock* sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock* sk,
-                                             struct sk_buff *skb);
-extern int llc_conn_ac_send_ua_rsp_f_set_p(struct sock* sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ac_set_s_flag_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_s_flag_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_start_p_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_start_ack_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_start_rej_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_start_ack_tmr_if_not_running(struct sock* sk,
-                                                   struct sk_buff *skb);
-extern int llc_conn_ac_stop_ack_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_stop_p_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_stop_rej_timer(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_stop_all_timers(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_stop_other_timers(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_upd_nr_received(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_inc_tx_win_size(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_dec_tx_win_size(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_upd_p_flag(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_data_flag_2(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_data_flag_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_data_flag_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock* sk,
-                                                        struct sk_buff *skb);
-extern int llc_conn_ac_set_p_flag_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_remote_busy_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_retry_cnt_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_cause_flag_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_cause_flag_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_inc_retry_cnt_by_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_vr_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_inc_vr_by_1(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_vs_0(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_set_vs_nr(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_rst_vs(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_upd_vs(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_disc(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_reset(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_disc_confirm(struct sock* sk, struct sk_buff *skb);
-extern u8 llc_circular_between(u8 a, u8 b, u8 c);
-extern int llc_conn_ac_send_ack_if_needed(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_adjust_npta_by_rr(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_adjust_npta_by_rnr(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_rst_sendack_flag(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_send_i_rsp_as_ack(struct sock* sk, struct sk_buff *skb);
-extern int llc_conn_ac_send_i_as_ack(struct sock* sk, struct sk_buff *skb);
+int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_s_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_s_flag_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk,
+                                            struct sk_buff *skb);
+int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_dec_tx_win_size(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_upd_p_flag(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock *sk,
+                                                 struct sk_buff *skb);
+int llc_conn_ac_set_p_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_remote_busy_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_retry_cnt_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_cause_flag_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_cause_flag_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_inc_retry_cnt_by_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_vr_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_inc_vr_by_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_vs_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_set_vs_nr(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_upd_vs(struct sock *sk, struct sk_buff *skb);
+int llc_conn_disc(struct sock *sk, struct sk_buff *skb);
+int llc_conn_reset(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_disc_confirm(struct sock *sk, struct sk_buff *skb);
+u8 llc_circular_between(u8 a, u8 b, u8 c);
+int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_rsp_as_ack(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb);
 
-extern void llc_conn_busy_tmr_cb(unsigned long timeout_data);
-extern void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data);
-extern void llc_conn_ack_tmr_cb(unsigned long timeout_data);
-extern void llc_conn_rej_tmr_cb(unsigned long timeout_data);
+void llc_conn_busy_tmr_cb(unsigned long timeout_data);
+void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data);
+void llc_conn_ack_tmr_cb(unsigned long timeout_data);
+void llc_conn_rej_tmr_cb(unsigned long timeout_data);
 
-extern void llc_conn_set_p_flag(struct sock *sk, u8 value);
+void llc_conn_set_p_flag(struct sock *sk, u8 value);
 #endif /* LLC_C_AC_H */
index 6ca3113df39eed10eb2032a11c6b4a7c43abe003..3948cf111dd0d6d4948ed3b7c40740b82bfe41bc 100644 (file)
@@ -128,138 +128,93 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
 typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
 typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
 
-extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_local_busy_detected(struct sock *sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk,
+int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_disc_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rst_req(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_local_busy_detected(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_local_busy_cleared(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_bad_pdu(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_disc_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk,
+                                            struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk,
                                              struct sk_buff *skb);
-extern int llc_conn_ev_rx_dm_rsp_fbit_set_x(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_rx_frmr_rsp_fbit_set_x(struct sock *sk,
-                                             struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns(struct sock *sk,
-                                                   struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_x(struct sock *sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns(struct sock *sk,
-                                                 struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk,
-                                                 struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk,
+int llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns(struct sock *sk,
+                                            struct sk_buff *skb);
+int llc_conn_ev_rx_rej_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_sabme_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk,
                                               struct sk_buff *skb);
-extern int llc_conn_ev_rx_ua_rsp_fbit_set_x(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_rx_xxx_cmd_pbit_set_x(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_xxx_rsp_fbit_set_x(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr(struct sock *sk,
-                                                     struct sk_buff *skb);
-extern int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk,
-                                                     struct sk_buff *skb);
-extern int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr(struct sock *sk,
+                                              struct sk_buff *skb);
+int llc_conn_ev_p_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_ack_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rej_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_busy_tmr_exp(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_sendack_tmr_exp(struct sock *sk, struct sk_buff *skb);
 /* NOT_USED functions and their variations */
-extern int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk,
-                                                 struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk,
-                                                 struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk,
-                                                 struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk,
-                                                 struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_xxx_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns(struct sock *sk,
+                                             struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns(struct sock *sk,
+                                             struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns(struct sock *sk,
+                                             struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns(struct sock *sk,
+                                             struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_i_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rnr_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_cmd_pbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_cmd_pbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_rsp_fbit_set_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_rej_rsp_fbit_set_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_rx_any_frame(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_tx_buffer_full(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_init_p_f_cycle(struct sock *sk, struct sk_buff *skb);
 
 /* Available connection action qualifiers */
-extern int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk,
-                                          struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk,
-                                            struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk,
+int llc_conn_ev_qlfy_data_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_data_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_data_flag_eq_2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_p_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_last_frame_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_last_frame_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_p_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_p_flag_eq_f(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_remote_busy_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_remote_busy_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_retry_cnt_lt_n2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_retry_cnt_gte_n2(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_s_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_s_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_cause_flag_eq_1(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_cause_flag_eq_0(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_conn(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_disc(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_failed(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk,
                                            struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_conn(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_disc(struct sock *sk,
-                                           struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_failed(struct sock *sk,
-                                             struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_remote_busy(struct sock *sk,
-                                                 struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk,
-                                             struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk,
-                                               struct sk_buff *skb);
-extern int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk,
-                                               struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_refuse(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_conflict(struct sock *sk, struct sk_buff *skb);
+int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, struct sk_buff *skb);
 
 static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb)
 {
index 2f97d8ddce924dbfc61f473896cacda3cfb70456..0134681acc4cfe354c6546be1a1ffefa5c078cde 100644 (file)
@@ -95,28 +95,24 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
        return skb->cb[sizeof(skb->cb) - 1];
 }
 
-extern struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
-                                struct proto *prot);
-extern void llc_sk_free(struct sock *sk);
+struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
+                         struct proto *prot);
+void llc_sk_free(struct sock *sk);
 
-extern void llc_sk_reset(struct sock *sk);
+void llc_sk_reset(struct sock *sk);
 
 /* Access to a connection */
-extern int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
-extern void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
-extern void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
-extern void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr,
-                                        u8 first_p_bit);
-extern void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr,
-                                        u8 first_f_bit);
-extern int llc_conn_remove_acked_pdus(struct sock *conn, u8 nr,
-                                     u16 *how_many_unacked);
-extern struct sock *llc_lookup_established(struct llc_sap *sap,
-                                          struct llc_addr *daddr,
-                                          struct llc_addr *laddr);
-extern void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk);
-extern void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk);
+int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
+void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
+void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
+void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
+void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
+int llc_conn_remove_acked_pdus(struct sock *conn, u8 nr, u16 *how_many_unacked);
+struct sock *llc_lookup_established(struct llc_sap *sap, struct llc_addr *daddr,
+                                   struct llc_addr *laddr);
+void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk);
+void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk);
 
-extern u8 llc_data_accept_state(u8 state);
-extern void llc_build_offset_table(void);
+u8 llc_data_accept_state(u8 state);
+void llc_build_offset_table(void);
 #endif /* LLC_CONN_H */
index f0cb909b60eb8c73670cf969a5bf544267dd7730..8d5c543cd620037590b1ee823548a9c091936de3 100644 (file)
@@ -62,8 +62,7 @@
 #define LLC_STATUS_CONFLICT    7 /* disconnect conn */
 #define LLC_STATUS_RESET_DONE  8 /*  */
 
-extern int llc_establish_connection(struct sock *sk, u8 *lmac,
-                                   u8 *dmac, u8 dsap);
-extern int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
-extern int llc_send_disc(struct sock *sk);
+int llc_establish_connection(struct sock *sk, u8 *lmac, u8 *dmac, u8 dsap);
+int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
+int llc_send_disc(struct sock *sk);
 #endif /* LLC_IF_H */
index 5a93d13ac95c8177542a3f2b327ae60b26065cea..31e2de7d57c5d6c210e1b445b19551407fd31b6c 100644 (file)
@@ -410,21 +410,20 @@ struct llc_frmr_info {
        u8  ind_bits;           /* indicator bits set with macro */
 } __packed;
 
-extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type);
-extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
-extern void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit);
-extern void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit);
-extern void llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr);
-extern void llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
-extern void llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
-extern void llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
-extern void llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit);
-extern void llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit);
-extern void llc_pdu_init_as_frmr_rsp(struct sk_buff *skb,
-                                    struct llc_pdu_sn *prev_pdu,
-                                    u8 f_bit, u8 vs, u8 vr, u8 vzyxw);
-extern void llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
-extern void llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
-extern void llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
-extern void llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit);
+void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type);
+void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
+void llc_pdu_decode_pf_bit(struct sk_buff *skb, u8 *pf_bit);
+void llc_pdu_init_as_disc_cmd(struct sk_buff *skb, u8 p_bit);
+void llc_pdu_init_as_i_cmd(struct sk_buff *skb, u8 p_bit, u8 ns, u8 nr);
+void llc_pdu_init_as_rej_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+void llc_pdu_init_as_rnr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+void llc_pdu_init_as_rr_cmd(struct sk_buff *skb, u8 p_bit, u8 nr);
+void llc_pdu_init_as_sabme_cmd(struct sk_buff *skb, u8 p_bit);
+void llc_pdu_init_as_dm_rsp(struct sk_buff *skb, u8 f_bit);
+void llc_pdu_init_as_frmr_rsp(struct sk_buff *skb, struct llc_pdu_sn *prev_pdu,
+                             u8 f_bit, u8 vs, u8 vr, u8 vzyxw);
+void llc_pdu_init_as_rr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+void llc_pdu_init_as_rej_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+void llc_pdu_init_as_rnr_rsp(struct sk_buff *skb, u8 f_bit, u8 nr);
+void llc_pdu_init_as_ua_rsp(struct sk_buff *skb, u8 f_bit);
 #endif /* LLC_PDU_H */
index 37a3bbd02394b3497e91316275babc01214f5d08..a61b98c108ee2c4df241a7d2318249427b686951 100644 (file)
 /* All action functions must look like this */
 typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
 
-extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
-                                      struct sk_buff *skb);
-extern int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_report_status(struct llc_sap *sap,
-                                       struct sk_buff *skb);
-extern int llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_report_status(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb);
 #endif /* LLC_S_AC_H */
index e3acb9329e4af4106bc221af99e94670b2393e93..84db3a59ed2880bb4b797fd64ea012beb9136ea1 100644 (file)
@@ -53,15 +53,14 @@ struct llc_sap;
 
 typedef int (*llc_sap_ev_t)(struct llc_sap *sap, struct sk_buff *skb);
 
-extern int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb);
-extern int llc_sap_ev_deactivation_req(struct llc_sap *sap,
-                                      struct sk_buff *skb);
+int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb);
+int llc_sap_ev_deactivation_req(struct llc_sap *sap, struct sk_buff *skb);
 #endif /* LLC_S_EV_H */
index ed25bec2f6483aa7f2b6bda6a6324eb2206ee383..1e4df9fd9fb2cf6b06acdf2b923ed41fdfad142c 100644 (file)
@@ -19,18 +19,14 @@ struct net_device;
 struct sk_buff;
 struct sock;
 
-extern void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb);
-extern void llc_save_primitive(struct sock *sk, struct sk_buff* skb,
-                              unsigned char prim);
-extern struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev,
-                                      u8 type, u32 data_size);
+void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb);
+void llc_save_primitive(struct sock *sk, struct sk_buff *skb,
+                       unsigned char prim);
+struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev,
+                               u8 type, u32 data_size);
 
-extern void llc_build_and_send_test_pkt(struct llc_sap *sap,
-                                       struct sk_buff *skb,
-                                       unsigned char *dmac,
-                                       unsigned char dsap);
-extern void llc_build_and_send_xid_pkt(struct llc_sap *sap,
-                                      struct sk_buff *skb,
-                                      unsigned char *dmac,
-                                      unsigned char dsap);
+void llc_build_and_send_test_pkt(struct llc_sap *sap, struct sk_buff *skb,
+                                unsigned char *dmac, unsigned char dsap);
+void llc_build_and_send_xid_pkt(struct llc_sap *sap, struct sk_buff *skb,
+                               unsigned char *dmac, unsigned char dsap);
 #endif /* LLC_SAP_H */
index cc6035f1a2f11c1b68f5a82235d4698441e8cddd..f386c480e1341dd70308d02300dc26f60af275f0 100644 (file)
@@ -829,6 +829,15 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  * @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
  * @RX_FLAG_10MHZ: 10 MHz (half channel) was used
  * @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used
+ * @RX_FLAG_AMSDU_MORE: Some drivers may prefer to report separate A-MSDU
+ *     subframes instead of a one huge frame for performance reasons.
+ *     All, but the last MSDU from an A-MSDU should have this flag set. E.g.
+ *     if an A-MSDU has 3 frames, the first 2 must have the flag set, while
+ *     the 3rd (last) one must not have this flag set. The flag is used to
+ *     deal with retransmission/duplication recovery properly since A-MSDU
+ *     subframes share the same sequence number. Reported subframes can be
+ *     either regular MSDU or singly A-MSDUs. Subframes must not be
+ *     interleaved with other frames.
  */
 enum mac80211_rx_flags {
        RX_FLAG_MMIC_ERROR              = BIT(0),
@@ -859,6 +868,7 @@ enum mac80211_rx_flags {
        RX_FLAG_STBC_MASK               = BIT(26) | BIT(27),
        RX_FLAG_10MHZ                   = BIT(28),
        RX_FLAG_5MHZ                    = BIT(29),
+       RX_FLAG_AMSDU_MORE              = BIT(30),
 };
 
 #define RX_FLAG_STBC_SHIFT             26
@@ -1492,6 +1502,11 @@ struct ieee80211_tx_control {
  *
  * @IEEE80211_HW_TIMING_BEACON_ONLY: Use sync timing from beacon frames
  *     only, to allow getting TBTT of a DTIM beacon.
+ *
+ * @IEEE80211_HW_CHANCTX_STA_CSA: Support 802.11h based channel-switch (CSA)
+ *     for a single active channel while using channel contexts. When support
+ *     is not enabled the default action is to disconnect when getting the
+ *     CSA frame.
  */
 enum ieee80211_hw_flags {
        IEEE80211_HW_HAS_RATE_CONTROL                   = 1<<0,
@@ -1522,6 +1537,7 @@ enum ieee80211_hw_flags {
        IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF              = 1<<25,
        IEEE80211_HW_TIMING_BEACON_ONLY                 = 1<<26,
        IEEE80211_HW_SUPPORTS_HT_CCK_RATES              = 1<<27,
+       IEEE80211_HW_CHANCTX_STA_CSA                    = 1<<28,
 };
 
 /**
@@ -2666,6 +2682,10 @@ enum ieee80211_roc_type {
  *     zero using ieee80211_csa_is_complete() after the beacon has been
  *     transmitted and then call ieee80211_csa_finish().
  *
+ * @join_ibss: Join an IBSS (on an IBSS interface); this is called after all
+ *     information in bss_conf is set up and the beacon can be retrieved. A
+ *     channel context is bound before this is called.
+ * @leave_ibss: Leave the IBSS again.
  */
 struct ieee80211_ops {
        void (*tx)(struct ieee80211_hw *hw,
@@ -2857,6 +2877,9 @@ struct ieee80211_ops {
        void (*channel_switch_beacon)(struct ieee80211_hw *hw,
                                      struct ieee80211_vif *vif,
                                      struct cfg80211_chan_def *chandef);
+
+       int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+       void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
 };
 
 /**
@@ -3919,6 +3942,25 @@ void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
                                                    struct ieee80211_vif *vif),
                                                void *data);
 
+/**
+ * ieee80211_iterate_active_interfaces_rtnl - iterate active interfaces
+ *
+ * This function iterates over the interfaces associated with a given
+ * hardware that are currently active and calls the callback for them.
+ * This version can only be used while holding the RTNL.
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iter_flags: iteration flags, see &enum ieee80211_interface_iteration_flags
+ * @iterator: the iterator function to call, cannot sleep
+ * @data: first argument of the iterator function
+ */
+void ieee80211_iterate_active_interfaces_rtnl(struct ieee80211_hw *hw,
+                                             u32 iter_flags,
+                                             void (*iterator)(void *data,
+                                               u8 *mac,
+                                               struct ieee80211_vif *vif),
+                                             void *data);
+
 /**
  * ieee80211_queue_work - add work onto the mac80211 workqueue
  *
index d0d11df9cba1ca3b4f8196071f8a0beed8b32f25..807d6b7a943fecab78db1afd6b57ee33b70ded72 100644 (file)
@@ -133,7 +133,7 @@ struct ieee802154_ops {
 
 /* Basic interface to register ieee802154 device */
 struct ieee802154_dev *
-ieee802154_alloc_device(size_t priv_data_lex, struct ieee802154_ops *ops);
+ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops);
 void ieee802154_free_device(struct ieee802154_dev *dev);
 int ieee802154_register_device(struct ieee802154_dev *dev);
 void ieee802154_unregister_device(struct ieee802154_dev *dev);
index 0f7558b638ae6ff9d0cde2b9e45e13990269005b..31912c3be772bfa5971e3f31a11f982cdd22ad2d 100644 (file)
@@ -126,19 +126,17 @@ struct mrp_port {
        struct rcu_head                 rcu;
 };
 
-extern int     mrp_register_application(struct mrp_application *app);
-extern void    mrp_unregister_application(struct mrp_application *app);
-
-extern int     mrp_init_applicant(struct net_device *dev,
-                                   struct mrp_application *app);
-extern void    mrp_uninit_applicant(struct net_device *dev,
-                                     struct mrp_application *app);
-
-extern int     mrp_request_join(const struct net_device *dev,
-                                 const struct mrp_application *app,
-                                 const void *value, u8 len, u8 type);
-extern void    mrp_request_leave(const struct net_device *dev,
-                                  const struct mrp_application *app,
-                                  const void *value, u8 len, u8 type);
+int mrp_register_application(struct mrp_application *app);
+void mrp_unregister_application(struct mrp_application *app);
+
+int mrp_init_applicant(struct net_device *dev, struct mrp_application *app);
+void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *app);
+
+int mrp_request_join(const struct net_device *dev,
+                    const struct mrp_application *app,
+                    const void *value, u8 len, u8 type);
+void mrp_request_leave(const struct net_device *dev,
+                      const struct mrp_application *app,
+                      const void *value, u8 len, u8 type);
 
 #endif /* _NET_MRP_H */
index ea0cc26ab70e1b447a0e834992c746a9f33c9938..6bbda34d5e59d030a1e2d69e93cda754d06ae5c0 100644 (file)
@@ -110,8 +110,8 @@ struct ndisc_options {
 
 #define NDISC_OPT_SPACE(len) (((len)+2+7)&~7)
 
-extern struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
-                                                struct ndisc_options *ndopts);
+struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
+                                         struct ndisc_options *ndopts);
 
 /*
  * Return the padding between the option length and the start of the
@@ -189,60 +189,51 @@ static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, cons
        return n;
 }
 
-extern int                     ndisc_init(void);
-extern int                     ndisc_late_init(void);
+int ndisc_init(void);
+int ndisc_late_init(void);
 
-extern void                    ndisc_late_cleanup(void);
-extern void                    ndisc_cleanup(void);
+void ndisc_late_cleanup(void);
+void ndisc_cleanup(void);
 
-extern int                     ndisc_rcv(struct sk_buff *skb);
+int ndisc_rcv(struct sk_buff *skb);
 
-extern void                    ndisc_send_ns(struct net_device *dev,
-                                             struct neighbour *neigh,
-                                             const struct in6_addr *solicit,
-                                             const struct in6_addr *daddr,
-                                             const struct in6_addr *saddr);
+void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
+                  const struct in6_addr *solicit,
+                  const struct in6_addr *daddr, const struct in6_addr *saddr);
 
-extern void                    ndisc_send_rs(struct net_device *dev,
-                                             const struct in6_addr *saddr,
-                                             const struct in6_addr *daddr);
-extern void                    ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
-                                             const struct in6_addr *daddr,
-                                             const struct in6_addr *solicited_addr,
-                                             bool router, bool solicited, bool override,
-                                             bool inc_opt);
+void ndisc_send_rs(struct net_device *dev,
+                  const struct in6_addr *saddr, const struct in6_addr *daddr);
+void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
+                  const struct in6_addr *daddr,
+                  const struct in6_addr *solicited_addr,
+                  bool router, bool solicited, bool override, bool inc_opt);
 
-extern void                    ndisc_send_redirect(struct sk_buff *skb,
-                                                   const struct in6_addr *target);
+void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target);
 
-extern int                     ndisc_mc_map(const struct in6_addr *addr, char *buf,
-                                            struct net_device *dev, int dir);
+int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev,
+                int dir);
 
 
 /*
  *     IGMP
  */
-extern int                     igmp6_init(void);
+int igmp6_init(void);
 
-extern void                    igmp6_cleanup(void);
+void igmp6_cleanup(void);
 
-extern int                     igmp6_event_query(struct sk_buff *skb);
+int igmp6_event_query(struct sk_buff *skb);
 
-extern int                     igmp6_event_report(struct sk_buff *skb);
+int igmp6_event_report(struct sk_buff *skb);
 
 
 #ifdef CONFIG_SYSCTL
-extern int                     ndisc_ifinfo_sysctl_change(struct ctl_table *ctl,
-                                                          int write,
-                                                          void __user *buffer,
-                                                          size_t *lenp,
-                                                          loff_t *ppos);
+int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write,
+                              void __user *buffer, size_t *lenp, loff_t *ppos);
 int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl,
                                 void __user *oldval, size_t __user *oldlenp,
                                 void __user *newval, size_t newlen);
 #endif
 
-extern void                    inet6_ifinfo_notify(int event,
-                                                   struct inet6_dev *idev);
+void inet6_ifinfo_notify(int event, struct inet6_dev *idev);
 
 #endif
index 9d22f08896c6c16d5a1c26fad0ef65461980d02a..da68c9a90ac56932638feca9cdbba6a61b75ba64 100644 (file)
@@ -22,6 +22,7 @@
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 #include <net/netns/conntrack.h>
 #endif
+#include <net/netns/nftables.h>
 #include <net/netns/xfrm.h>
 
 struct user_namespace;
@@ -101,6 +102,9 @@ struct net {
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
        struct netns_ct         ct;
 #endif
+#if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE)
+       struct netns_nftables   nft;
+#endif
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
        struct netns_nf_frag    nf_frag;
 #endif
@@ -137,8 +141,8 @@ struct net {
 extern struct net init_net;
 
 #ifdef CONFIG_NET_NS
-extern struct net *copy_net_ns(unsigned long flags,
-       struct user_namespace *user_ns, struct net *old_net);
+struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
+                       struct net *old_net);
 
 #else /* CONFIG_NET_NS */
 #include <linux/sched.h>
@@ -155,11 +159,11 @@ static inline struct net *copy_net_ns(unsigned long flags,
 
 extern struct list_head net_namespace_list;
 
-extern struct net *get_net_ns_by_pid(pid_t pid);
-extern struct net *get_net_ns_by_fd(int pid);
+struct net *get_net_ns_by_pid(pid_t pid);
+struct net *get_net_ns_by_fd(int pid);
 
 #ifdef CONFIG_NET_NS
-extern void __put_net(struct net *net);
+void __put_net(struct net *net);
 
 static inline struct net *get_net(struct net *net)
 {
@@ -191,7 +195,7 @@ int net_eq(const struct net *net1, const struct net *net2)
        return net1 == net2;
 }
 
-extern void net_drop_ns(void *);
+void net_drop_ns(void *);
 
 #else
 
@@ -308,19 +312,19 @@ struct pernet_operations {
  * device which caused kernel oops, and panics during network
  * namespace cleanup.   So please don't get this wrong.
  */
-extern int register_pernet_subsys(struct pernet_operations *);
-extern void unregister_pernet_subsys(struct pernet_operations *);
-extern int register_pernet_device(struct pernet_operations *);
-extern void unregister_pernet_device(struct pernet_operations *);
+int register_pernet_subsys(struct pernet_operations *);
+void unregister_pernet_subsys(struct pernet_operations *);
+int register_pernet_device(struct pernet_operations *);
+void unregister_pernet_device(struct pernet_operations *);
 
 struct ctl_table;
 struct ctl_table_header;
 
 #ifdef CONFIG_SYSCTL
-extern int net_sysctl_init(void);
-extern struct ctl_table_header *register_net_sysctl(struct net *net,
-       const char *path, struct ctl_table *table);
-extern void unregister_net_sysctl_table(struct ctl_table_header *header);
+int net_sysctl_init(void);
+struct ctl_table_header *register_net_sysctl(struct net *net, const char *path,
+                                            struct ctl_table *table);
+void unregister_net_sysctl_table(struct ctl_table_header *header);
 #else
 static inline int net_sysctl_init(void) { return 0; }
 static inline struct ctl_table_header *register_net_sysctl(struct net *net,
index fe630dde35c3f3688d30e1e33a5fb5d06393bba6..d8bbb38584b603ac5cab30d3f86049565b855106 100644 (file)
@@ -26,8 +26,8 @@ enum netevent_notif_type {
        NETEVENT_REDIRECT,         /* arg is struct netevent_redirect ptr */
 };
 
-extern int register_netevent_notifier(struct notifier_block *nb);
-extern int unregister_netevent_notifier(struct notifier_block *nb);
-extern int call_netevent_notifiers(unsigned long val, void *v);
+int register_netevent_notifier(struct notifier_block *nb);
+int unregister_netevent_notifier(struct notifier_block *nb);
+int call_netevent_notifiers(unsigned long val, void *v);
 
 #endif
index 7573d52a43469a9dbc3feeb6555810ceb9e1a740..6c3d12e2949f908c3927cb4bfe29ac30c49e1e4a 100644 (file)
@@ -16,9 +16,9 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
 
-extern int nf_conntrack_ipv4_compat_init(void);
-extern void nf_conntrack_ipv4_compat_fini(void);
+int nf_conntrack_ipv4_compat_init(void);
+void nf_conntrack_ipv4_compat_fini(void);
 
-extern void need_ipv4_conntrack(void);
+void need_ipv4_conntrack(void);
 
 #endif /*_NF_CONNTRACK_IPV4_H*/
index 6b00ea38546b8e829f5b7a97bc5c33b35b0a030a..f01ef208dff6ec151f82eb2a33626fdae7edea42 100644 (file)
@@ -1,6 +1,6 @@
 #ifndef _NF_DEFRAG_IPV4_H
 #define _NF_DEFRAG_IPV4_H
 
-extern void nf_defrag_ipv4_enable(void);
+void nf_defrag_ipv4_enable(void);
 
 #endif /* _NF_DEFRAG_IPV4_H */
index fd79c9a1779d19d6a5dd54ef7380b990763a00ea..5613412e7dc29a996cb5991aa809526e3f440875 100644 (file)
@@ -1,15 +1,14 @@
 #ifndef _NF_DEFRAG_IPV6_H
 #define _NF_DEFRAG_IPV6_H
 
-extern void nf_defrag_ipv6_enable(void);
-
-extern int nf_ct_frag6_init(void);
-extern void nf_ct_frag6_cleanup(void);
-extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
-extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
-                              struct net_device *in,
-                              struct net_device *out,
-                              int (*okfn)(struct sk_buff *));
+void nf_defrag_ipv6_enable(void);
+
+int nf_ct_frag6_init(void);
+void nf_ct_frag6_cleanup(void);
+struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
+void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
+                       struct net_device *in, struct net_device *out,
+                       int (*okfn)(struct sk_buff *));
 
 struct inet_frags_ctl;
 
index 0c1288a50e8bde05d896ebf243d0b7c3a01deb11..01ea6eed1bb1ddcc9b8c9001286ae85730bea416 100644 (file)
@@ -139,15 +139,13 @@ static inline struct net *nf_ct_net(const struct nf_conn *ct)
 }
 
 /* Alter reply tuple (maybe alter helper). */
-extern void
-nf_conntrack_alter_reply(struct nf_conn *ct,
-                        const struct nf_conntrack_tuple *newreply);
+void nf_conntrack_alter_reply(struct nf_conn *ct,
+                             const struct nf_conntrack_tuple *newreply);
 
 /* Is this tuple taken? (ignoring any belonging to the given
    conntrack). */
-extern int
-nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
-                        const struct nf_conn *ignored_conntrack);
+int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+                            const struct nf_conn *ignored_conntrack);
 
 /* Return conntrack_info and tuple hash for given skb. */
 static inline struct nf_conn *
@@ -165,37 +163,34 @@ static inline void nf_ct_put(struct nf_conn *ct)
 }
 
 /* Protocol module loading */
-extern int nf_ct_l3proto_try_module_get(unsigned short l3proto);
-extern void nf_ct_l3proto_module_put(unsigned short l3proto);
+int nf_ct_l3proto_try_module_get(unsigned short l3proto);
+void nf_ct_l3proto_module_put(unsigned short l3proto);
 
 /*
  * Allocate a hashtable of hlist_head (if nulls == 0),
  * or hlist_nulls_head (if nulls == 1)
  */
-extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
+void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
 
-extern void nf_ct_free_hashtable(void *hash, unsigned int size);
+void nf_ct_free_hashtable(void *hash, unsigned int size);
 
-extern struct nf_conntrack_tuple_hash *
+struct nf_conntrack_tuple_hash *
 __nf_conntrack_find(struct net *net, u16 zone,
                    const struct nf_conntrack_tuple *tuple);
 
-extern int nf_conntrack_hash_check_insert(struct nf_conn *ct);
+int nf_conntrack_hash_check_insert(struct nf_conn *ct);
 bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
 
-extern void nf_conntrack_flush_report(struct net *net, u32 portid, int report);
+void nf_conntrack_flush_report(struct net *net, u32 portid, int report);
 
-extern bool nf_ct_get_tuplepr(const struct sk_buff *skb,
-                             unsigned int nhoff, u_int16_t l3num,
-                             struct nf_conntrack_tuple *tuple);
-extern bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
-                                const struct nf_conntrack_tuple *orig);
+bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
+                      u_int16_t l3num, struct nf_conntrack_tuple *tuple);
+bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
+                         const struct nf_conntrack_tuple *orig);
 
-extern void __nf_ct_refresh_acct(struct nf_conn *ct,
-                                enum ip_conntrack_info ctinfo,
-                                const struct sk_buff *skb,
-                                unsigned long extra_jiffies,
-                                int do_acct);
+void __nf_ct_refresh_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+                         const struct sk_buff *skb,
+                         unsigned long extra_jiffies, int do_acct);
 
 /* Refresh conntrack for this many jiffies and do accounting */
 static inline void nf_ct_refresh_acct(struct nf_conn *ct,
@@ -214,10 +209,8 @@ static inline void nf_ct_refresh(struct nf_conn *ct,
        __nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0);
 }
 
-extern bool __nf_ct_kill_acct(struct nf_conn *ct,
-                             enum ip_conntrack_info ctinfo,
-                             const struct sk_buff *skb,
-                             int do_acct);
+bool __nf_ct_kill_acct(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+                      const struct sk_buff *skb, int do_acct);
 
 /* kill conntrack and do accounting */
 static inline bool nf_ct_kill_acct(struct nf_conn *ct,
@@ -244,19 +237,17 @@ static inline struct nf_conn *nf_ct_untracked_get(void)
 {
        return &__raw_get_cpu_var(nf_conntrack_untracked);
 }
-extern void nf_ct_untracked_status_or(unsigned long bits);
+void nf_ct_untracked_status_or(unsigned long bits);
 
 /* Iterate over all conntracks: if iter returns true, it's deleted. */
-extern void
-nf_ct_iterate_cleanup(struct net *net,
-                     int (*iter)(struct nf_conn *i, void *data),
-                     void *data, u32 portid, int report);
-extern void nf_conntrack_free(struct nf_conn *ct);
-extern struct nf_conn *
-nf_conntrack_alloc(struct net *net, u16 zone,
-                  const struct nf_conntrack_tuple *orig,
-                  const struct nf_conntrack_tuple *repl,
-                  gfp_t gfp);
+void nf_ct_iterate_cleanup(struct net *net,
+                          int (*iter)(struct nf_conn *i, void *data),
+                          void *data, u32 portid, int report);
+void nf_conntrack_free(struct nf_conn *ct);
+struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
+                                  const struct nf_conntrack_tuple *orig,
+                                  const struct nf_conntrack_tuple *repl,
+                                  gfp_t gfp);
 
 static inline int nf_ct_is_template(const struct nf_conn *ct)
 {
@@ -287,7 +278,7 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
 
 struct kernel_param;
 
-extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
+int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
 extern unsigned int nf_conntrack_htable_size;
 extern unsigned int nf_conntrack_max;
 extern unsigned int nf_conntrack_hash_rnd;
index 2bdb7a15fe06b102a5fc88c2cc3da5662ff53a95..fef44edf49c1221fcfd2ce006c18af0c67c761e8 100644 (file)
@@ -42,8 +42,8 @@ struct nf_conn_counter *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp)
        return acct;
 };
 
-extern unsigned int
-seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir);
+unsigned int seq_print_acct(struct seq_file *s, const struct nf_conn *ct,
+                           int dir);
 
 /* Check if connection tracking accounting is enabled */
 static inline bool nf_ct_acct_enabled(struct net *net)
@@ -57,9 +57,9 @@ static inline void nf_ct_set_acct(struct net *net, bool enable)
        net->ct.sysctl_acct = enable;
 }
 
-extern int nf_conntrack_acct_pernet_init(struct net *net);
-extern void nf_conntrack_acct_pernet_fini(struct net *net);
+int nf_conntrack_acct_pernet_init(struct net *net);
+void nf_conntrack_acct_pernet_fini(struct net *net);
 
-extern int nf_conntrack_acct_init(void);
-extern void nf_conntrack_acct_fini(void);
+int nf_conntrack_acct_init(void);
+void nf_conntrack_acct_fini(void);
 #endif /* _NF_CONNTRACK_ACCT_H */
index fb2b6234e9375846bdb37727717c7dbd432550d9..15308b8eb5b5218330c3043c46a02537d00779a2 100644 (file)
 /* This header is used to share core functionality between the
    standalone connection tracking module, and the compatibility layer's use
    of connection tracking. */
-extern unsigned int nf_conntrack_in(struct net *net,
-                                   u_int8_t pf,
-                                   unsigned int hooknum,
-                                   struct sk_buff *skb);
-
-extern int nf_conntrack_init_net(struct net *net);
-extern void nf_conntrack_cleanup_net(struct net *net);
-extern void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list);
-
-extern int nf_conntrack_proto_pernet_init(struct net *net);
-extern void nf_conntrack_proto_pernet_fini(struct net *net);
-
-extern int nf_conntrack_proto_init(void);
-extern void nf_conntrack_proto_fini(void);
-
-extern int nf_conntrack_init_start(void);
-extern void nf_conntrack_cleanup_start(void);
-
-extern void nf_conntrack_init_end(void);
-extern void nf_conntrack_cleanup_end(void);
-
-extern bool
-nf_ct_get_tuple(const struct sk_buff *skb,
-               unsigned int nhoff,
-               unsigned int dataoff,
-               u_int16_t l3num,
-               u_int8_t protonum,
-               struct nf_conntrack_tuple *tuple,
-               const struct nf_conntrack_l3proto *l3proto,
-               const struct nf_conntrack_l4proto *l4proto);
-
-extern bool
-nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
-                  const struct nf_conntrack_tuple *orig,
-                  const struct nf_conntrack_l3proto *l3proto,
-                  const struct nf_conntrack_l4proto *l4proto);
+unsigned int nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
+                            struct sk_buff *skb);
+
+int nf_conntrack_init_net(struct net *net);
+void nf_conntrack_cleanup_net(struct net *net);
+void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list);
+
+int nf_conntrack_proto_pernet_init(struct net *net);
+void nf_conntrack_proto_pernet_fini(struct net *net);
+
+int nf_conntrack_proto_init(void);
+void nf_conntrack_proto_fini(void);
+
+int nf_conntrack_init_start(void);
+void nf_conntrack_cleanup_start(void);
+
+void nf_conntrack_init_end(void);
+void nf_conntrack_cleanup_end(void);
+
+bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff,
+                    unsigned int dataoff, u_int16_t l3num, u_int8_t protonum,
+                    struct nf_conntrack_tuple *tuple,
+                    const struct nf_conntrack_l3proto *l3proto,
+                    const struct nf_conntrack_l4proto *l4proto);
+
+bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
+                       const struct nf_conntrack_tuple *orig,
+                       const struct nf_conntrack_l3proto *l3proto,
+                       const struct nf_conntrack_l4proto *l4proto);
 
 /* Find a connection corresponding to a tuple. */
-extern struct nf_conntrack_tuple_hash *
+struct nf_conntrack_tuple_hash *
 nf_conntrack_find_get(struct net *net, u16 zone,
                      const struct nf_conntrack_tuple *tuple);
 
-extern int __nf_conntrack_confirm(struct sk_buff *skb);
+int __nf_conntrack_confirm(struct sk_buff *skb);
 
 /* Confirm a connection: returns NF_DROP if packet must be dropped. */
 static inline int nf_conntrack_confirm(struct sk_buff *skb)
index 092dc651689f81d85a6ae686c5d761aee006918d..0e3d08e4b1d3e59fa101607d88e4c66f7664566d 100644 (file)
@@ -68,10 +68,12 @@ struct nf_ct_event_notifier {
        int (*fcn)(unsigned int events, struct nf_ct_event *item);
 };
 
-extern int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *nb);
-extern void nf_conntrack_unregister_notifier(struct net *net, struct nf_ct_event_notifier *nb);
+int nf_conntrack_register_notifier(struct net *net,
+                                  struct nf_ct_event_notifier *nb);
+void nf_conntrack_unregister_notifier(struct net *net,
+                                     struct nf_ct_event_notifier *nb);
 
-extern void nf_ct_deliver_cached_events(struct nf_conn *ct);
+void nf_ct_deliver_cached_events(struct nf_conn *ct);
 
 static inline void
 nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
@@ -166,8 +168,10 @@ struct nf_exp_event_notifier {
        int (*fcn)(unsigned int events, struct nf_exp_event *item);
 };
 
-extern int nf_ct_expect_register_notifier(struct net *net, struct nf_exp_event_notifier *nb);
-extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_event_notifier *nb);
+int nf_ct_expect_register_notifier(struct net *net,
+                                  struct nf_exp_event_notifier *nb);
+void nf_ct_expect_unregister_notifier(struct net *net,
+                                     struct nf_exp_event_notifier *nb);
 
 static inline void
 nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
@@ -207,11 +211,11 @@ nf_ct_expect_event(enum ip_conntrack_expect_events event,
        nf_ct_expect_event_report(event, exp, 0, 0);
 }
 
-extern int nf_conntrack_ecache_pernet_init(struct net *net);
-extern void nf_conntrack_ecache_pernet_fini(struct net *net);
+int nf_conntrack_ecache_pernet_init(struct net *net);
+void nf_conntrack_ecache_pernet_fini(struct net *net);
 
-extern int nf_conntrack_ecache_init(void);
-extern void nf_conntrack_ecache_fini(void);
+int nf_conntrack_ecache_init(void);
+void nf_conntrack_ecache_fini(void);
 #else /* CONFIG_NF_CONNTRACK_EVENTS */
 
 static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
index 88a1d4060d5260f7ac51e7b2d74f87ab409cd4de..86372ae0ee840a7eb7bfefe6c705ed748e0582ba 100644 (file)
@@ -73,7 +73,7 @@ static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id)
        ((id##_TYPE *)__nf_ct_ext_find((ext), (id)))
 
 /* Destroy all relationships */
-extern void __nf_ct_ext_destroy(struct nf_conn *ct);
+void __nf_ct_ext_destroy(struct nf_conn *ct);
 static inline void nf_ct_ext_destroy(struct nf_conn *ct)
 {
        if (ct->ext)
index 26c4ae5bfbb8df49abbdcf19dc67de9273575601..6cf614bc0029326da74bdb6145d009f95f3422e5 100644 (file)
@@ -52,21 +52,24 @@ struct nf_conntrack_helper {
        unsigned int queue_num;         /* For user-space helpers. */
 };
 
-extern struct nf_conntrack_helper *
-__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum);
+struct nf_conntrack_helper *__nf_conntrack_helper_find(const char *name,
+                                                      u16 l3num, u8 protonum);
 
-extern struct nf_conntrack_helper *
-nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum);
+struct nf_conntrack_helper *nf_conntrack_helper_try_module_get(const char *name,
+                                                              u16 l3num,
+                                                              u8 protonum);
 
-extern int nf_conntrack_helper_register(struct nf_conntrack_helper *);
-extern void nf_conntrack_helper_unregister(struct nf_conntrack_helper *);
+int nf_conntrack_helper_register(struct nf_conntrack_helper *);
+void nf_conntrack_helper_unregister(struct nf_conntrack_helper *);
 
-extern struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, struct nf_conntrack_helper *helper, gfp_t gfp);
+struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct,
+                                         struct nf_conntrack_helper *helper,
+                                         gfp_t gfp);
 
-extern int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
-                                    gfp_t flags);
+int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
+                             gfp_t flags);
 
-extern void nf_ct_helper_destroy(struct nf_conn *ct);
+void nf_ct_helper_destroy(struct nf_conn *ct);
 
 static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
 {
@@ -82,17 +85,16 @@ static inline void *nfct_help_data(const struct nf_conn *ct)
        return (void *)help->data;
 }
 
-extern int nf_conntrack_helper_pernet_init(struct net *net);
-extern void nf_conntrack_helper_pernet_fini(struct net *net);
+int nf_conntrack_helper_pernet_init(struct net *net);
+void nf_conntrack_helper_pernet_fini(struct net *net);
 
-extern int nf_conntrack_helper_init(void);
-extern void nf_conntrack_helper_fini(void);
+int nf_conntrack_helper_init(void);
+void nf_conntrack_helper_fini(void);
 
-extern int nf_conntrack_broadcast_help(struct sk_buff *skb,
-                                      unsigned int protoff,
-                                      struct nf_conn *ct,
-                                      enum ip_conntrack_info ctinfo,
-                                      unsigned int timeout);
+int nf_conntrack_broadcast_help(struct sk_buff *skb, unsigned int protoff,
+                               struct nf_conn *ct,
+                               enum ip_conntrack_info ctinfo,
+                               unsigned int timeout);
 
 struct nf_ct_helper_expectfn {
        struct list_head head;
index 3bb89eac3fa130a477b925d87c441189d27836e1..3efab704b7eb9b303b433549bb342d923e43f67f 100644 (file)
@@ -77,17 +77,17 @@ struct nf_conntrack_l3proto {
 extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX];
 
 /* Protocol pernet registration. */
-extern int nf_ct_l3proto_pernet_register(struct net *net,
-                                        struct nf_conntrack_l3proto *proto);
-extern void nf_ct_l3proto_pernet_unregister(struct net *net,
-                                           struct nf_conntrack_l3proto *proto);
+int nf_ct_l3proto_pernet_register(struct net *net,
+                                 struct nf_conntrack_l3proto *proto);
+void nf_ct_l3proto_pernet_unregister(struct net *net,
+                                    struct nf_conntrack_l3proto *proto);
 
 /* Protocol global registration. */
-extern int nf_ct_l3proto_register(struct nf_conntrack_l3proto *proto);
-extern void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto);
+int nf_ct_l3proto_register(struct nf_conntrack_l3proto *proto);
+void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto);
 
-extern struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
-extern void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p);
+struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
+void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p);
 
 /* Existing built-in protocols */
 extern struct nf_conntrack_l3proto nf_conntrack_l3proto_generic;
index b411d7b17dec40a64cc41df315f6188004ac65fa..4c8d573830b7e606098fbd8fa2b5e483111601b0 100644 (file)
@@ -114,22 +114,22 @@ extern struct nf_conntrack_l4proto nf_conntrack_l4proto_generic;
 
 #define MAX_NF_CT_PROTO 256
 
-extern struct nf_conntrack_l4proto *
-__nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto);
+struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u_int16_t l3proto,
+                                                 u_int8_t l4proto);
 
-extern struct nf_conntrack_l4proto *
-nf_ct_l4proto_find_get(u_int16_t l3proto, u_int8_t l4proto);
-extern void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p);
+struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u_int16_t l3proto,
+                                                   u_int8_t l4proto);
+void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p);
 
 /* Protocol pernet registration. */
-extern int nf_ct_l4proto_pernet_register(struct net *net,
-                                        struct nf_conntrack_l4proto *proto);
-extern void nf_ct_l4proto_pernet_unregister(struct net *net,
-                                           struct nf_conntrack_l4proto *proto);
+int nf_ct_l4proto_pernet_register(struct net *net,
+                                 struct nf_conntrack_l4proto *proto);
+void nf_ct_l4proto_pernet_unregister(struct net *net,
+                                    struct nf_conntrack_l4proto *proto);
 
 /* Protocol global registration. */
-extern int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto);
-extern void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto);
+int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto);
+void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto);
 
 static inline void nf_ct_kfree_compat_sysctl_table(struct nf_proto_net *pn)
 {
@@ -140,11 +140,11 @@ static inline void nf_ct_kfree_compat_sysctl_table(struct nf_proto_net *pn)
 }
 
 /* Generic netlink helpers */
-extern int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
-                                     const struct nf_conntrack_tuple *tuple);
-extern int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
-                                     struct nf_conntrack_tuple *t);
-extern int nf_ct_port_nlattr_tuple_size(void);
+int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
+                              const struct nf_conntrack_tuple *tuple);
+int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
+                              struct nf_conntrack_tuple *t);
+int nf_ct_port_nlattr_tuple_size(void);
 extern const struct nla_policy nf_ct_port_nla_policy[];
 
 #ifdef CONFIG_SYSCTL
index f6177a5fe0cafe6de0e00ac6d2abd1ce9856d897..4b3362991a25f9c9f09b9794c96a0d0e15e89be9 100644 (file)
@@ -30,22 +30,18 @@ static inline struct nf_conn_seqadj *nfct_seqadj_ext_add(struct nf_conn *ct)
        return nf_ct_ext_add(ct, NF_CT_EXT_SEQADJ, GFP_ATOMIC);
 }
 
-extern int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
-                            s32 off);
-extern int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
-                           __be32 seq, s32 off);
-extern void nf_ct_tcp_seqadj_set(struct sk_buff *skb,
-                                struct nf_conn *ct,
-                                enum ip_conntrack_info ctinfo,
-                                s32 off);
-
-extern int nf_ct_seq_adjust(struct sk_buff *skb,
-                           struct nf_conn *ct, enum ip_conntrack_info ctinfo,
-                           unsigned int protoff);
-extern s32 nf_ct_seq_offset(const struct nf_conn *ct, enum ip_conntrack_dir,
-                           u32 seq);
-
-extern int nf_conntrack_seqadj_init(void);
-extern void nf_conntrack_seqadj_fini(void);
+int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+                     s32 off);
+int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+                    __be32 seq, s32 off);
+void nf_ct_tcp_seqadj_set(struct sk_buff *skb, struct nf_conn *ct,
+                         enum ip_conntrack_info ctinfo, s32 off);
+
+int nf_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
+                    enum ip_conntrack_info ctinfo, unsigned int protoff);
+s32 nf_ct_seq_offset(const struct nf_conn *ct, enum ip_conntrack_dir, u32 seq);
+
+int nf_conntrack_seqadj_init(void);
+void nf_conntrack_seqadj_fini(void);
 
 #endif /* _NF_CONNTRACK_SEQADJ_H */
index f572f313d6f1e0d7ae61f345b9aeb15a7dfea0a2..6793614e6502a0eb51ca7efabc593e7bb6866576 100644 (file)
@@ -56,22 +56,20 @@ struct synproxy_options {
 
 struct tcphdr;
 struct xt_synproxy_info;
-extern bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
-                                  const struct tcphdr *th,
-                                  struct synproxy_options *opts);
-extern unsigned int synproxy_options_size(const struct synproxy_options *opts);
-extern void synproxy_build_options(struct tcphdr *th,
-                                  const struct synproxy_options *opts);
+bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
+                           const struct tcphdr *th,
+                           struct synproxy_options *opts);
+unsigned int synproxy_options_size(const struct synproxy_options *opts);
+void synproxy_build_options(struct tcphdr *th,
+                           const struct synproxy_options *opts);
 
-extern void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
-                                          struct synproxy_options *opts);
-extern void synproxy_check_timestamp_cookie(struct synproxy_options *opts);
+void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
+                                   struct synproxy_options *opts);
+void synproxy_check_timestamp_cookie(struct synproxy_options *opts);
 
-extern unsigned int synproxy_tstamp_adjust(struct sk_buff *skb,
-                                          unsigned int protoff,
-                                          struct tcphdr *th,
-                                          struct nf_conn *ct,
-                                          enum ip_conntrack_info ctinfo,
-                                          const struct nf_conn_synproxy *synproxy);
+unsigned int synproxy_tstamp_adjust(struct sk_buff *skb, unsigned int protoff,
+                                   struct tcphdr *th, struct nf_conn *ct,
+                                   enum ip_conntrack_info ctinfo,
+                                   const struct nf_conn_synproxy *synproxy);
 
 #endif /* _NF_CONNTRACK_SYNPROXY_H */
index d23aceb16d9443a865bc0e789a60089b0d9c40bc..62308713dd7fa1704409e7db22fca725cc0200b9 100644 (file)
@@ -76,8 +76,8 @@ nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct,
 }
 
 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-extern int nf_conntrack_timeout_init(void);
-extern void nf_conntrack_timeout_fini(void);
+int nf_conntrack_timeout_init(void);
+void nf_conntrack_timeout_fini(void);
 #else
 static inline int nf_conntrack_timeout_init(void)
 {
index b00461413efd4d49ec3e11ba4a3e0786594a32f2..300ae2209f251e7e669c8c353b8de7fb50b16f55 100644 (file)
@@ -48,11 +48,11 @@ static inline void nf_ct_set_tstamp(struct net *net, bool enable)
 }
 
 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
-extern int nf_conntrack_tstamp_pernet_init(struct net *net);
-extern void nf_conntrack_tstamp_pernet_fini(struct net *net);
+int nf_conntrack_tstamp_pernet_init(struct net *net);
+void nf_conntrack_tstamp_pernet_fini(struct net *net);
 
-extern int nf_conntrack_tstamp_init(void);
-extern void nf_conntrack_tstamp_fini(void);
+int nf_conntrack_tstamp_init(void);
+void nf_conntrack_tstamp_fini(void);
 #else
 static inline int nf_conntrack_tstamp_pernet_init(struct net *net)
 {
index 59a1924200536b9fc5847998b7129c8aefeb7060..07eaaf60409215198961cea9834c2d770a90f02e 100644 (file)
@@ -41,13 +41,16 @@ struct nf_conn_nat {
 };
 
 /* Set up the info structure to map into this range. */
-extern unsigned int nf_nat_setup_info(struct nf_conn *ct,
-                                     const struct nf_nat_range *range,
-                                     enum nf_nat_manip_type maniptype);
+unsigned int nf_nat_setup_info(struct nf_conn *ct,
+                              const struct nf_nat_range *range,
+                              enum nf_nat_manip_type maniptype);
+
+extern unsigned int nf_nat_alloc_null_binding(struct nf_conn *ct,
+                                             unsigned int hooknum);
 
 /* Is this tuple already taken? (not by us)*/
-extern int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
-                            const struct nf_conn *ignored_conntrack);
+int nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
+                     const struct nf_conn *ignored_conntrack);
 
 static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct)
 {
index 972e1e47ec79819f610cf9081be732b9131aa46a..fbfd1ba4254e0c356ca382626cae9d7653e8295e 100644 (file)
@@ -7,12 +7,10 @@
 /* This header used to share core functionality between the standalone
    NAT module, and the compatibility layer's use of NAT for masquerading. */
 
-extern unsigned int nf_nat_packet(struct nf_conn *ct,
-                                 enum ip_conntrack_info ctinfo,
-                                 unsigned int hooknum,
-                                 struct sk_buff *skb);
+unsigned int nf_nat_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+                          unsigned int hooknum, struct sk_buff *skb);
 
-extern int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family);
+int nf_xfrm_me_harder(struct sk_buff *skb, unsigned int family);
 
 static inline int nf_nat_initialized(struct nf_conn *ct,
                                     enum nf_nat_manip_type manip)
index 404324d1d0c4c2a07cb14352ec8623a53b08b541..01bcc6bfbcc9034e399a938a6a6dd83b2bed6ef7 100644 (file)
@@ -7,14 +7,11 @@
 struct sk_buff;
 
 /* These return true or false. */
-extern int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
-                                     struct nf_conn *ct,
-                                     enum ip_conntrack_info ctinfo,
-                                     unsigned int protoff,
-                                     unsigned int match_offset,
-                                     unsigned int match_len,
-                                     const char *rep_buffer,
-                                     unsigned int rep_len, bool adjust);
+int __nf_nat_mangle_tcp_packet(struct sk_buff *skb, struct nf_conn *ct,
+                              enum ip_conntrack_info ctinfo,
+                              unsigned int protoff, unsigned int match_offset,
+                              unsigned int match_len, const char *rep_buffer,
+                              unsigned int rep_len, bool adjust);
 
 static inline int nf_nat_mangle_tcp_packet(struct sk_buff *skb,
                                           struct nf_conn *ct,
@@ -30,18 +27,14 @@ static inline int nf_nat_mangle_tcp_packet(struct sk_buff *skb,
                                          rep_buffer, rep_len, true);
 }
 
-extern int nf_nat_mangle_udp_packet(struct sk_buff *skb,
-                                   struct nf_conn *ct,
-                                   enum ip_conntrack_info ctinfo,
-                                   unsigned int protoff,
-                                   unsigned int match_offset,
-                                   unsigned int match_len,
-                                   const char *rep_buffer,
-                                   unsigned int rep_len);
+int nf_nat_mangle_udp_packet(struct sk_buff *skb, struct nf_conn *ct,
+                            enum ip_conntrack_info ctinfo,
+                            unsigned int protoff, unsigned int match_offset,
+                            unsigned int match_len, const char *rep_buffer,
+                            unsigned int rep_len);
 
 /* Setup NAT on this expected conntrack so it follows master, but goes
  * to port ct->master->saved_proto. */
-extern void nf_nat_follow_master(struct nf_conn *ct,
-                                struct nf_conntrack_expect *this);
+void nf_nat_follow_master(struct nf_conn *ct, struct nf_conntrack_expect *this);
 
 #endif
index bd3b97e02c82a86eaf8aafc20a3a94b71830fa1f..5a2919b2e09af396540348bad01f12b7deab5ef9 100644 (file)
@@ -35,18 +35,15 @@ struct nf_nat_l3proto {
                                   struct nf_nat_range *range);
 };
 
-extern int nf_nat_l3proto_register(const struct nf_nat_l3proto *);
-extern void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *);
-extern const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto);
-
-extern int nf_nat_icmp_reply_translation(struct sk_buff *skb,
-                                        struct nf_conn *ct,
-                                        enum ip_conntrack_info ctinfo,
-                                        unsigned int hooknum);
-extern int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
-                                          struct nf_conn *ct,
-                                          enum ip_conntrack_info ctinfo,
-                                          unsigned int hooknum,
-                                          unsigned int hdrlen);
+int nf_nat_l3proto_register(const struct nf_nat_l3proto *);
+void nf_nat_l3proto_unregister(const struct nf_nat_l3proto *);
+const struct nf_nat_l3proto *__nf_nat_l3proto_find(u8 l3proto);
+
+int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
+                                 enum ip_conntrack_info ctinfo,
+                                 unsigned int hooknum);
+int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
+                                   enum ip_conntrack_info ctinfo,
+                                   unsigned int hooknum, unsigned int hdrlen);
 
 #endif /* _NF_NAT_L3PROTO_H */
index 24feb68d1bccd6698be75deaa15f8bb91a7bf0d8..12f4cc841b6eddba6bdfc4132e448c31781b4e84 100644 (file)
@@ -42,10 +42,11 @@ struct nf_nat_l4proto {
 };
 
 /* Protocol registration. */
-extern int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto);
-extern void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto);
+int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto);
+void nf_nat_l4proto_unregister(u8 l3proto,
+                              const struct nf_nat_l4proto *l4proto);
 
-extern const struct nf_nat_l4proto *__nf_nat_l4proto_find(u8 l3proto, u8 l4proto);
+const struct nf_nat_l4proto *__nf_nat_l4proto_find(u8 l3proto, u8 l4proto);
 
 /* Built-in protocols. */
 extern const struct nf_nat_l4proto nf_nat_l4proto_tcp;
@@ -54,19 +55,18 @@ extern const struct nf_nat_l4proto nf_nat_l4proto_icmp;
 extern const struct nf_nat_l4proto nf_nat_l4proto_icmpv6;
 extern const struct nf_nat_l4proto nf_nat_l4proto_unknown;
 
-extern bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
-                                   enum nf_nat_manip_type maniptype,
-                                   const union nf_conntrack_man_proto *min,
-                                   const union nf_conntrack_man_proto *max);
+bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
+                            enum nf_nat_manip_type maniptype,
+                            const union nf_conntrack_man_proto *min,
+                            const union nf_conntrack_man_proto *max);
 
-extern void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
-                                       struct nf_conntrack_tuple *tuple,
-                                       const struct nf_nat_range *range,
-                                       enum nf_nat_manip_type maniptype,
-                                       const struct nf_conn *ct,
-                                       u16 *rover);
+void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                                struct nf_conntrack_tuple *tuple,
+                                const struct nf_nat_range *range,
+                                enum nf_nat_manip_type maniptype,
+                                const struct nf_conn *ct, u16 *rover);
 
-extern int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
-                                         struct nf_nat_range *range);
+int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
+                                  struct nf_nat_range *range);
 
 #endif /*_NF_NAT_L4PROTO_H*/
index aaba4bbcdda0e9e7b4b7adfcb4c19e3555bb4219..c1d5b3e34a211a8df6fd4eba749b4fcd9e62131f 100644 (file)
@@ -28,7 +28,7 @@ struct nf_queue_handler {
 
 void nf_register_queue_handler(const struct nf_queue_handler *qh);
 void nf_unregister_queue_handler(void);
-extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
+void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict);
 
 bool nf_queue_entry_get_refs(struct nf_queue_entry *entry);
 void nf_queue_entry_release_refs(struct nf_queue_entry *entry);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
new file mode 100644 (file)
index 0000000..5a91abf
--- /dev/null
@@ -0,0 +1,519 @@
+#ifndef _NET_NF_TABLES_H
+#define _NET_NF_TABLES_H
+
+#include <linux/list.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netlink.h>
+
+#define NFT_JUMP_STACK_SIZE    16
+
+struct nft_pktinfo {
+       struct sk_buff                  *skb;
+       const struct net_device         *in;
+       const struct net_device         *out;
+       u8                              hooknum;
+       u8                              nhoff;
+       u8                              thoff;
+       /* for x_tables compatibility */
+       struct xt_action_param          xt;
+};
+
+static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
+                                  const struct nf_hook_ops *ops,
+                                  struct sk_buff *skb,
+                                  const struct net_device *in,
+                                  const struct net_device *out)
+{
+       pkt->skb = skb;
+       pkt->in = pkt->xt.in = in;
+       pkt->out = pkt->xt.out = out;
+       pkt->hooknum = pkt->xt.hooknum = ops->hooknum;
+       pkt->xt.family = ops->pf;
+}
+
+struct nft_data {
+       union {
+               u32                             data[4];
+               struct {
+                       u32                     verdict;
+                       struct nft_chain        *chain;
+               };
+       };
+} __attribute__((aligned(__alignof__(u64))));
+
+static inline int nft_data_cmp(const struct nft_data *d1,
+                              const struct nft_data *d2,
+                              unsigned int len)
+{
+       return memcmp(d1->data, d2->data, len);
+}
+
+static inline void nft_data_copy(struct nft_data *dst,
+                                const struct nft_data *src)
+{
+       BUILD_BUG_ON(__alignof__(*dst) != __alignof__(u64));
+       *(u64 *)&dst->data[0] = *(u64 *)&src->data[0];
+       *(u64 *)&dst->data[2] = *(u64 *)&src->data[2];
+}
+
+static inline void nft_data_debug(const struct nft_data *data)
+{
+       pr_debug("data[0]=%x data[1]=%x data[2]=%x data[3]=%x\n",
+                data->data[0], data->data[1],
+                data->data[2], data->data[3]);
+}
+
+/**
+ *     struct nft_ctx - nf_tables rule/set context
+ *
+ *     @net: net namespace
+ *     @skb: netlink skb
+ *     @nlh: netlink message header
+ *     @afi: address family info
+ *     @table: the table the chain is contained in
+ *     @chain: the chain the rule is contained in
+ *     @nla: netlink attributes
+ */
+struct nft_ctx {
+       struct net                      *net;
+       const struct sk_buff            *skb;
+       const struct nlmsghdr           *nlh;
+       const struct nft_af_info        *afi;
+       const struct nft_table          *table;
+       const struct nft_chain          *chain;
+       const struct nlattr * const     *nla;
+};
+
+struct nft_data_desc {
+       enum nft_data_types             type;
+       unsigned int                    len;
+};
+
+int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
+                 struct nft_data_desc *desc, const struct nlattr *nla);
+void nft_data_uninit(const struct nft_data *data, enum nft_data_types type);
+int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
+                 enum nft_data_types type, unsigned int len);
+
+static inline enum nft_data_types nft_dreg_to_type(enum nft_registers reg)
+{
+       return reg == NFT_REG_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE;
+}
+
+static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
+{
+       return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1;
+}
+
+int nft_validate_input_register(enum nft_registers reg);
+int nft_validate_output_register(enum nft_registers reg);
+int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg,
+                          const struct nft_data *data,
+                          enum nft_data_types type);
+
+/**
+ *     struct nft_set_elem - generic representation of set elements
+ *
+ *     @cookie: implementation specific element cookie
+ *     @key: element key
+ *     @data: element data (maps only)
+ *     @flags: element flags (end of interval)
+ *
+ *     The cookie can be used to store a handle to the element for subsequent
+ *     removal.
+ */
+struct nft_set_elem {
+       void                    *cookie;
+       struct nft_data         key;
+       struct nft_data         data;
+       u32                     flags;
+};
+
+struct nft_set;
+struct nft_set_iter {
+       unsigned int    count;
+       unsigned int    skip;
+       int             err;
+       int             (*fn)(const struct nft_ctx *ctx,
+                             const struct nft_set *set,
+                             const struct nft_set_iter *iter,
+                             const struct nft_set_elem *elem);
+};
+
+/**
+ *     struct nft_set_ops - nf_tables set operations
+ *
+ *     @lookup: look up an element within the set
+ *     @insert: insert new element into set
+ *     @remove: remove element from set
+ *     @walk: iterate over all set elemeennts
+ *     @privsize: function to return size of set private data
+ *     @init: initialize private data of new set instance
+ *     @destroy: destroy private data of set instance
+ *     @list: nf_tables_set_ops list node
+ *     @owner: module reference
+ *     @features: features supported by the implementation
+ */
+struct nft_set_ops {
+       bool                            (*lookup)(const struct nft_set *set,
+                                                 const struct nft_data *key,
+                                                 struct nft_data *data);
+       int                             (*get)(const struct nft_set *set,
+                                              struct nft_set_elem *elem);
+       int                             (*insert)(const struct nft_set *set,
+                                                 const struct nft_set_elem *elem);
+       void                            (*remove)(const struct nft_set *set,
+                                                 const struct nft_set_elem *elem);
+       void                            (*walk)(const struct nft_ctx *ctx,
+                                               const struct nft_set *set,
+                                               struct nft_set_iter *iter);
+
+       unsigned int                    (*privsize)(const struct nlattr * const nla[]);
+       int                             (*init)(const struct nft_set *set,
+                                               const struct nlattr * const nla[]);
+       void                            (*destroy)(const struct nft_set *set);
+
+       struct list_head                list;
+       struct module                   *owner;
+       u32                             features;
+};
+
+int nft_register_set(struct nft_set_ops *ops);
+void nft_unregister_set(struct nft_set_ops *ops);
+
+/**
+ *     struct nft_set - nf_tables set instance
+ *
+ *     @list: table set list node
+ *     @bindings: list of set bindings
+ *     @name: name of the set
+ *     @ktype: key type (numeric type defined by userspace, not used in the kernel)
+ *     @dtype: data type (verdict or numeric type defined by userspace)
+ *     @ops: set ops
+ *     @flags: set flags
+ *     @klen: key length
+ *     @dlen: data length
+ *     @data: private set data
+ */
+struct nft_set {
+       struct list_head                list;
+       struct list_head                bindings;
+       char                            name[IFNAMSIZ];
+       u32                             ktype;
+       u32                             dtype;
+       /* runtime data below here */
+       const struct nft_set_ops        *ops ____cacheline_aligned;
+       u16                             flags;
+       u8                              klen;
+       u8                              dlen;
+       unsigned char                   data[]
+               __attribute__((aligned(__alignof__(u64))));
+};
+
+static inline void *nft_set_priv(const struct nft_set *set)
+{
+       return (void *)set->data;
+}
+
+struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
+                                    const struct nlattr *nla);
+
+/**
+ *     struct nft_set_binding - nf_tables set binding
+ *
+ *     @list: set bindings list node
+ *     @chain: chain containing the rule bound to the set
+ *
+ *     A set binding contains all information necessary for validation
+ *     of new elements added to a bound set.
+ */
+struct nft_set_binding {
+       struct list_head                list;
+       const struct nft_chain          *chain;
+};
+
+int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
+                      struct nft_set_binding *binding);
+void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+                         struct nft_set_binding *binding);
+
+
+/**
+ *     struct nft_expr_type - nf_tables expression type
+ *
+ *     @select_ops: function to select nft_expr_ops
+ *     @ops: default ops, used when no select_ops functions is present
+ *     @list: used internally
+ *     @name: Identifier
+ *     @owner: module reference
+ *     @policy: netlink attribute policy
+ *     @maxattr: highest netlink attribute number
+ */
+struct nft_expr_type {
+       const struct nft_expr_ops       *(*select_ops)(const struct nft_ctx *,
+                                                      const struct nlattr * const tb[]);
+       const struct nft_expr_ops       *ops;
+       struct list_head                list;
+       const char                      *name;
+       struct module                   *owner;
+       const struct nla_policy         *policy;
+       unsigned int                    maxattr;
+};
+
+/**
+ *     struct nft_expr_ops - nf_tables expression operations
+ *
+ *     @eval: Expression evaluation function
+ *     @size: full expression size, including private data size
+ *     @init: initialization function
+ *     @destroy: destruction function
+ *     @dump: function to dump parameters
+ *     @type: expression type
+ *     @validate: validate expression, called during loop detection
+ *     @data: extra data to attach to this expression operation
+ */
+struct nft_expr;
+struct nft_expr_ops {
+       void                            (*eval)(const struct nft_expr *expr,
+                                               struct nft_data data[NFT_REG_MAX + 1],
+                                               const struct nft_pktinfo *pkt);
+       unsigned int                    size;
+
+       int                             (*init)(const struct nft_ctx *ctx,
+                                               const struct nft_expr *expr,
+                                               const struct nlattr * const tb[]);
+       void                            (*destroy)(const struct nft_expr *expr);
+       int                             (*dump)(struct sk_buff *skb,
+                                               const struct nft_expr *expr);
+       int                             (*validate)(const struct nft_ctx *ctx,
+                                                   const struct nft_expr *expr,
+                                                   const struct nft_data **data);
+       const struct nft_expr_type      *type;
+       void                            *data;
+};
+
+#define NFT_EXPR_MAXATTR               16
+#define NFT_EXPR_SIZE(size)            (sizeof(struct nft_expr) + \
+                                        ALIGN(size, __alignof__(struct nft_expr)))
+
+/**
+ *     struct nft_expr - nf_tables expression
+ *
+ *     @ops: expression ops
+ *     @data: expression private data
+ */
+struct nft_expr {
+       const struct nft_expr_ops       *ops;
+       unsigned char                   data[];
+};
+
+static inline void *nft_expr_priv(const struct nft_expr *expr)
+{
+       return (void *)expr->data;
+}
+
+/**
+ *     struct nft_rule - nf_tables rule
+ *
+ *     @list: used internally
+ *     @rcu_head: used internally for rcu
+ *     @handle: rule handle
+ *     @genmask: generation mask
+ *     @dlen: length of expression data
+ *     @data: expression data
+ */
+struct nft_rule {
+       struct list_head                list;
+       struct rcu_head                 rcu_head;
+       u64                             handle:46,
+                                       genmask:2,
+                                       dlen:16;
+       unsigned char                   data[]
+               __attribute__((aligned(__alignof__(struct nft_expr))));
+};
+
+/**
+ *     struct nft_rule_trans - nf_tables rule update in transaction
+ *
+ *     @list: used internally
+ *     @rule: rule that needs to be updated
+ *     @chain: chain that this rule belongs to
+ *     @table: table for which this chain applies
+ *     @nlh: netlink header of the message that contain this update
+ *     @family: family expressesed as AF_*
+ */
+struct nft_rule_trans {
+       struct list_head                list;
+       struct nft_rule                 *rule;
+       const struct nft_chain          *chain;
+       const struct nft_table          *table;
+       const struct nlmsghdr           *nlh;
+       u8                              family;
+};
+
+static inline struct nft_expr *nft_expr_first(const struct nft_rule *rule)
+{
+       return (struct nft_expr *)&rule->data[0];
+}
+
+static inline struct nft_expr *nft_expr_next(const struct nft_expr *expr)
+{
+       return ((void *)expr) + expr->ops->size;
+}
+
+static inline struct nft_expr *nft_expr_last(const struct nft_rule *rule)
+{
+       return (struct nft_expr *)&rule->data[rule->dlen];
+}
+
+/*
+ * The last pointer isn't really necessary, but the compiler isn't able to
+ * determine that the result of nft_expr_last() is always the same since it
+ * can't assume that the dlen value wasn't changed within calls in the loop.
+ */
+#define nft_rule_for_each_expr(expr, last, rule) \
+       for ((expr) = nft_expr_first(rule), (last) = nft_expr_last(rule); \
+            (expr) != (last); \
+            (expr) = nft_expr_next(expr))
+
+enum nft_chain_flags {
+       NFT_BASE_CHAIN                  = 0x1,
+};
+
+/**
+ *     struct nft_chain - nf_tables chain
+ *
+ *     @rules: list of rules in the chain
+ *     @list: used internally
+ *     @rcu_head: used internally
+ *     @net: net namespace that this chain belongs to
+ *     @table: table that this chain belongs to
+ *     @handle: chain handle
+ *     @flags: bitmask of enum nft_chain_flags
+ *     @use: number of jump references to this chain
+ *     @level: length of longest path to this chain
+ *     @name: name of the chain
+ */
+struct nft_chain {
+       struct list_head                rules;
+       struct list_head                list;
+       struct rcu_head                 rcu_head;
+       struct net                      *net;
+       struct nft_table                *table;
+       u64                             handle;
+       u8                              flags;
+       u16                             use;
+       u16                             level;
+       char                            name[NFT_CHAIN_MAXNAMELEN];
+};
+
+enum nft_chain_type {
+       NFT_CHAIN_T_DEFAULT = 0,
+       NFT_CHAIN_T_ROUTE,
+       NFT_CHAIN_T_NAT,
+       NFT_CHAIN_T_MAX
+};
+
+struct nft_stats {
+       u64 bytes;
+       u64 pkts;
+};
+
+/**
+ *     struct nft_base_chain - nf_tables base chain
+ *
+ *     @ops: netfilter hook ops
+ *     @type: chain type
+ *     @policy: default policy
+ *     @stats: per-cpu chain stats
+ *     @chain: the chain
+ */
+struct nft_base_chain {
+       struct nf_hook_ops              ops;
+       enum nft_chain_type             type;
+       u8                              policy;
+       struct nft_stats __percpu       *stats;
+       struct nft_chain                chain;
+};
+
+static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chain)
+{
+       return container_of(chain, struct nft_base_chain, chain);
+}
+
+unsigned int nft_do_chain_pktinfo(struct nft_pktinfo *pkt,
+                                 const struct nf_hook_ops *ops);
+
+/**
+ *     struct nft_table - nf_tables table
+ *
+ *     @list: used internally
+ *     @chains: chains in the table
+ *     @sets: sets in the table
+ *     @hgenerator: handle generator state
+ *     @use: number of chain references to this table
+ *     @flags: table flag (see enum nft_table_flags)
+ *     @name: name of the table
+ */
+struct nft_table {
+       struct list_head                list;
+       struct list_head                chains;
+       struct list_head                sets;
+       u64                             hgenerator;
+       u32                             use;
+       u16                             flags;
+       char                            name[];
+};
+
+/**
+ *     struct nft_af_info - nf_tables address family info
+ *
+ *     @list: used internally
+ *     @family: address family
+ *     @nhooks: number of hooks in this family
+ *     @owner: module owner
+ *     @tables: used internally
+ *     @hooks: hookfn overrides for packet validation
+ */
+struct nft_af_info {
+       struct list_head                list;
+       int                             family;
+       unsigned int                    nhooks;
+       struct module                   *owner;
+       struct list_head                tables;
+       nf_hookfn                       *hooks[NF_MAX_HOOKS];
+};
+
+int nft_register_afinfo(struct net *, struct nft_af_info *);
+void nft_unregister_afinfo(struct nft_af_info *);
+
+struct nf_chain_type {
+       unsigned int            hook_mask;
+       const char              *name;
+       enum nft_chain_type     type;
+       nf_hookfn               *fn[NF_MAX_HOOKS];
+       struct module           *me;
+       int                     family;
+};
+
+int nft_register_chain_type(struct nf_chain_type *);
+void nft_unregister_chain_type(struct nf_chain_type *);
+
+int nft_register_expr(struct nft_expr_type *);
+void nft_unregister_expr(struct nft_expr_type *);
+
+#define MODULE_ALIAS_NFT_FAMILY(family)        \
+       MODULE_ALIAS("nft-afinfo-" __stringify(family))
+
+#define MODULE_ALIAS_NFT_CHAIN(family, name) \
+       MODULE_ALIAS("nft-chain-" __stringify(family) "-" name)
+
+#define MODULE_ALIAS_NFT_EXPR(name) \
+       MODULE_ALIAS("nft-expr-" name)
+
+#define MODULE_ALIAS_NFT_SET() \
+       MODULE_ALIAS("nft-set")
+
+#endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
new file mode 100644 (file)
index 0000000..cf2b7ae
--- /dev/null
@@ -0,0 +1,42 @@
+#ifndef _NET_NF_TABLES_CORE_H
+#define _NET_NF_TABLES_CORE_H
+
+int nf_tables_core_module_init(void);
+void nf_tables_core_module_exit(void);
+
+int nft_immediate_module_init(void);
+void nft_immediate_module_exit(void);
+
+struct nft_cmp_fast_expr {
+       u32                     data;
+       enum nft_registers      sreg:8;
+       u8                      len;
+};
+
+extern const struct nft_expr_ops nft_cmp_fast_ops;
+
+int nft_cmp_module_init(void);
+void nft_cmp_module_exit(void);
+
+int nft_lookup_module_init(void);
+void nft_lookup_module_exit(void);
+
+int nft_bitwise_module_init(void);
+void nft_bitwise_module_exit(void);
+
+int nft_byteorder_module_init(void);
+void nft_byteorder_module_exit(void);
+
+struct nft_payload {
+       enum nft_payload_bases  base:8;
+       u8                      offset;
+       u8                      len;
+       enum nft_registers      dreg:8;
+};
+
+extern const struct nft_expr_ops nft_payload_fast_ops;
+
+int nft_payload_module_init(void);
+void nft_payload_module_exit(void);
+
+#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
new file mode 100644 (file)
index 0000000..1be1c2c
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef _NF_TABLES_IPV4_H_
+#define _NF_TABLES_IPV4_H_
+
+#include <net/netfilter/nf_tables.h>
+#include <net/ip.h>
+
+static inline void
+nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
+                    const struct nf_hook_ops *ops,
+                    struct sk_buff *skb,
+                    const struct net_device *in,
+                    const struct net_device *out)
+{
+       struct iphdr *ip;
+
+       nft_set_pktinfo(pkt, ops, skb, in, out);
+
+       pkt->xt.thoff = ip_hdrlen(pkt->skb);
+       ip = ip_hdr(pkt->skb);
+       pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
+}
+
+#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
new file mode 100644 (file)
index 0000000..4a9b88a
--- /dev/null
@@ -0,0 +1,30 @@
+#ifndef _NF_TABLES_IPV6_H_
+#define _NF_TABLES_IPV6_H_
+
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <net/ipv6.h>
+
+static inline int
+nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
+                    const struct nf_hook_ops *ops,
+                    struct sk_buff *skb,
+                    const struct net_device *in,
+                    const struct net_device *out)
+{
+       int protohdr, thoff = 0;
+       unsigned short frag_off;
+
+       nft_set_pktinfo(pkt, ops, skb, in, out);
+
+       protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
+       /* If malformed, drop it */
+       if (protohdr < 0)
+               return -1;
+
+       pkt->xt.thoff = thoff;
+       pkt->xt.fragoff = frag_off;
+
+       return 0;
+}
+
+#endif
index 495c71f66e7e9334b3506174dc7e5352399938f4..79f45e19f31eaa54deb8437ef4b883bec78def84 100644 (file)
@@ -16,7 +16,7 @@ struct xt_rateest {
        struct rcu_head                 rcu;
 };
 
-extern struct xt_rateest *xt_rateest_lookup(const char *name);
-extern void xt_rateest_put(struct xt_rateest *est);
+struct xt_rateest *xt_rateest_lookup(const char *name);
+void xt_rateest_put(struct xt_rateest *est);
 
 #endif /* _XT_RATEEST_H */
index 9690b0f6698a1d1b433ea572bdc4efb938f501c5..2b47eaadba8fad95b9bde8aeeace37fa03d1fc31 100644 (file)
@@ -225,44 +225,31 @@ struct nl_info {
        u32                     portid;
 };
 
-extern int             netlink_rcv_skb(struct sk_buff *skb,
-                                       int (*cb)(struct sk_buff *,
-                                                 struct nlmsghdr *));
-extern int             nlmsg_notify(struct sock *sk, struct sk_buff *skb,
-                                    u32 portid, unsigned int group, int report,
-                                    gfp_t flags);
-
-extern int             nla_validate(const struct nlattr *head,
-                                    int len, int maxtype,
-                                    const struct nla_policy *policy);
-extern int             nla_parse(struct nlattr **tb, int maxtype,
-                                 const struct nlattr *head, int len,
-                                 const struct nla_policy *policy);
-extern int             nla_policy_len(const struct nla_policy *, int);
-extern struct nlattr * nla_find(const struct nlattr *head,
-                                int len, int attrtype);
-extern size_t          nla_strlcpy(char *dst, const struct nlattr *nla,
-                                   size_t dstsize);
-extern int             nla_memcpy(void *dest, const struct nlattr *src, int count);
-extern int             nla_memcmp(const struct nlattr *nla, const void *data,
-                                  size_t size);
-extern int             nla_strcmp(const struct nlattr *nla, const char *str);
-extern struct nlattr * __nla_reserve(struct sk_buff *skb, int attrtype,
-                                     int attrlen);
-extern void *          __nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
-extern struct nlattr * nla_reserve(struct sk_buff *skb, int attrtype,
-                                   int attrlen);
-extern void *          nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
-extern void            __nla_put(struct sk_buff *skb, int attrtype,
-                                 int attrlen, const void *data);
-extern void            __nla_put_nohdr(struct sk_buff *skb, int attrlen,
-                                       const void *data);
-extern int             nla_put(struct sk_buff *skb, int attrtype,
-                               int attrlen, const void *data);
-extern int             nla_put_nohdr(struct sk_buff *skb, int attrlen,
-                                     const void *data);
-extern int             nla_append(struct sk_buff *skb, int attrlen,
-                                  const void *data);
+int netlink_rcv_skb(struct sk_buff *skb,
+                   int (*cb)(struct sk_buff *, struct nlmsghdr *));
+int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
+                unsigned int group, int report, gfp_t flags);
+
+int nla_validate(const struct nlattr *head, int len, int maxtype,
+                const struct nla_policy *policy);
+int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
+             int len, const struct nla_policy *policy);
+int nla_policy_len(const struct nla_policy *, int);
+struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype);
+size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize);
+int nla_memcpy(void *dest, const struct nlattr *src, int count);
+int nla_memcmp(const struct nlattr *nla, const void *data, size_t size);
+int nla_strcmp(const struct nlattr *nla, const char *str);
+struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
+void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
+struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen);
+void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen);
+void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
+              const void *data);
+void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
+int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
+int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data);
+int nla_append(struct sk_buff *skb, int attrlen, const void *data);
 
 /**************************************************************************
  * Netlink Messages
index bf2ec2202c5698b1bb73e22df60b4a3bc9d95c01..ee520cba2ec2f9f1ce1f011b985524946085fb60 100644 (file)
@@ -15,6 +15,10 @@ struct fib_rules_ops;
 struct hlist_head;
 struct fib_table;
 struct sock;
+struct local_ports {
+       seqlock_t       lock;
+       int             range[2];
+};
 
 struct netns_ipv4 {
 #ifdef CONFIG_SYSCTL
@@ -62,10 +66,11 @@ struct netns_ipv4 {
        int sysctl_icmp_ratemask;
        int sysctl_icmp_errors_use_inbound_ifaddr;
 
+       struct local_ports sysctl_local_ports;
+
        int sysctl_tcp_ecn;
 
        kgid_t sysctl_ping_group_range[2];
-       long sysctl_tcp_mem[3];
 
        atomic_t dev_addr_genid;
 
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
new file mode 100644 (file)
index 0000000..15d056d
--- /dev/null
@@ -0,0 +1,19 @@
+#ifndef _NETNS_NFTABLES_H_
+#define _NETNS_NFTABLES_H_
+
+#include <linux/list.h>
+
+struct nft_af_info;
+
+struct netns_nftables {
+       struct list_head        af_info;
+       struct list_head        commit_list;
+       struct nft_af_info      *ipv4;
+       struct nft_af_info      *ipv6;
+       struct nft_af_info      *arp;
+       struct nft_af_info      *bridge;
+       u8                      gencursor;
+       u8                      genctr;
+};
+
+#endif
index 121dcf854db54ea418aff35a05677013d05e53bc..110350aca3dfe1c846e05f3f9a61e5cc8afe64d4 100644 (file)
@@ -183,51 +183,50 @@ extern int  sysctl_netrom_routing_control;
 extern int  sysctl_netrom_link_fails_count;
 extern int  sysctl_netrom_reset_circuit;
 
-extern int  nr_rx_frame(struct sk_buff *, struct net_device *);
-extern void nr_destroy_socket(struct sock *);
+int nr_rx_frame(struct sk_buff *, struct net_device *);
+void nr_destroy_socket(struct sock *);
 
 /* nr_dev.c */
-extern int  nr_rx_ip(struct sk_buff *, struct net_device *);
-extern void nr_setup(struct net_device *);
+int nr_rx_ip(struct sk_buff *, struct net_device *);
+void nr_setup(struct net_device *);
 
 /* nr_in.c */
-extern int  nr_process_rx_frame(struct sock *, struct sk_buff *);
+int nr_process_rx_frame(struct sock *, struct sk_buff *);
 
 /* nr_loopback.c */
-extern void nr_loopback_init(void);
-extern void nr_loopback_clear(void);
-extern int  nr_loopback_queue(struct sk_buff *);
+void nr_loopback_init(void);
+void nr_loopback_clear(void);
+int nr_loopback_queue(struct sk_buff *);
 
 /* nr_out.c */
-extern void nr_output(struct sock *, struct sk_buff *);
-extern void nr_send_nak_frame(struct sock *);
-extern void nr_kick(struct sock *);
-extern void nr_transmit_buffer(struct sock *, struct sk_buff *);
-extern void nr_establish_data_link(struct sock *);
-extern void nr_enquiry_response(struct sock *);
-extern void nr_check_iframes_acked(struct sock *, unsigned short);
+void nr_output(struct sock *, struct sk_buff *);
+void nr_send_nak_frame(struct sock *);
+void nr_kick(struct sock *);
+void nr_transmit_buffer(struct sock *, struct sk_buff *);
+void nr_establish_data_link(struct sock *);
+void nr_enquiry_response(struct sock *);
+void nr_check_iframes_acked(struct sock *, unsigned short);
 
 /* nr_route.c */
-extern void nr_rt_device_down(struct net_device *);
-extern struct net_device *nr_dev_first(void);
-extern struct net_device *nr_dev_get(ax25_address *);
-extern int  nr_rt_ioctl(unsigned int, void __user *);
-extern void nr_link_failed(ax25_cb *, int);
-extern int  nr_route_frame(struct sk_buff *, ax25_cb *);
+void nr_rt_device_down(struct net_device *);
+struct net_device *nr_dev_first(void);
+struct net_device *nr_dev_get(ax25_address *);
+int nr_rt_ioctl(unsigned int, void __user *);
+void nr_link_failed(ax25_cb *, int);
+int nr_route_frame(struct sk_buff *, ax25_cb *);
 extern const struct file_operations nr_nodes_fops;
 extern const struct file_operations nr_neigh_fops;
-extern void nr_rt_free(void);
+void nr_rt_free(void);
 
 /* nr_subr.c */
-extern void nr_clear_queues(struct sock *);
-extern void nr_frames_acked(struct sock *, unsigned short);
-extern void nr_requeue_frames(struct sock *);
-extern int  nr_validate_nr(struct sock *, unsigned short);
-extern int  nr_in_rx_window(struct sock *, unsigned short);
-extern void nr_write_internal(struct sock *, int);
+void nr_clear_queues(struct sock *);
+void nr_frames_acked(struct sock *, unsigned short);
+void nr_requeue_frames(struct sock *);
+int nr_validate_nr(struct sock *, unsigned short);
+int nr_in_rx_window(struct sock *, unsigned short);
+void nr_write_internal(struct sock *, int);
 
-extern void __nr_transmit_reply(struct sk_buff *skb, int mine,
-       unsigned char cmdflags);
+void __nr_transmit_reply(struct sk_buff *skb, int mine, unsigned char cmdflags);
 
 /*
  * This routine is called when a Connect Acknowledge with the Choke Flag
@@ -247,24 +246,24 @@ do {                                                                      \
        __nr_transmit_reply((skb), (mine), NR_RESET);                   \
 } while (0)
 
-extern void nr_disconnect(struct sock *, int);
+void nr_disconnect(struct sock *, int);
 
 /* nr_timer.c */
-extern void nr_init_timers(struct sock *sk);
-extern void nr_start_heartbeat(struct sock *);
-extern void nr_start_t1timer(struct sock *);
-extern void nr_start_t2timer(struct sock *);
-extern void nr_start_t4timer(struct sock *);
-extern void nr_start_idletimer(struct sock *);
-extern void nr_stop_heartbeat(struct sock *);
-extern void nr_stop_t1timer(struct sock *);
-extern void nr_stop_t2timer(struct sock *);
-extern void nr_stop_t4timer(struct sock *);
-extern void nr_stop_idletimer(struct sock *);
-extern int  nr_t1timer_running(struct sock *);
+void nr_init_timers(struct sock *sk);
+void nr_start_heartbeat(struct sock *);
+void nr_start_t1timer(struct sock *);
+void nr_start_t2timer(struct sock *);
+void nr_start_t4timer(struct sock *);
+void nr_start_idletimer(struct sock *);
+void nr_stop_heartbeat(struct sock *);
+void nr_stop_t1timer(struct sock *);
+void nr_stop_t2timer(struct sock *);
+void nr_stop_t4timer(struct sock *);
+void nr_stop_idletimer(struct sock *);
+int nr_t1timer_running(struct sock *);
 
 /* sysctl_net_netrom.c */
-extern void nr_register_sysctl(void);
-extern void nr_unregister_sysctl(void);
+void nr_register_sysctl(void);
+void nr_unregister_sysctl(void);
 
 #endif
diff --git a/include/net/nfc/digital.h b/include/net/nfc/digital.h
new file mode 100644 (file)
index 0000000..36acecd
--- /dev/null
@@ -0,0 +1,227 @@
+/*
+ * NFC Digital Protocol stack
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef __NFC_DIGITAL_H
+#define __NFC_DIGITAL_H
+
+#include <linux/skbuff.h>
+#include <net/nfc/nfc.h>
+
+/**
+ * Configuration types for in_configure_hw and tg_configure_hw.
+ */
+enum {
+       NFC_DIGITAL_CONFIG_RF_TECH = 0,
+       NFC_DIGITAL_CONFIG_FRAMING,
+};
+
+/**
+ * RF technology values passed as param argument to in_configure_hw and
+ * tg_configure_hw for NFC_DIGITAL_CONFIG_RF_TECH configuration type.
+ */
+enum {
+       NFC_DIGITAL_RF_TECH_106A = 0,
+       NFC_DIGITAL_RF_TECH_212F,
+       NFC_DIGITAL_RF_TECH_424F,
+
+       NFC_DIGITAL_RF_TECH_LAST,
+};
+
+/**
+ * Framing configuration passed as param argument to in_configure_hw and
+ * tg_configure_hw for NFC_DIGITAL_CONFIG_FRAMING configuration type.
+ */
+enum {
+       NFC_DIGITAL_FRAMING_NFCA_SHORT = 0,
+       NFC_DIGITAL_FRAMING_NFCA_STANDARD,
+       NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A,
+
+       NFC_DIGITAL_FRAMING_NFCA_T1T,
+       NFC_DIGITAL_FRAMING_NFCA_T2T,
+       NFC_DIGITAL_FRAMING_NFCA_NFC_DEP,
+
+       NFC_DIGITAL_FRAMING_NFCF,
+       NFC_DIGITAL_FRAMING_NFCF_T3T,
+       NFC_DIGITAL_FRAMING_NFCF_NFC_DEP,
+       NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED,
+
+       NFC_DIGITAL_FRAMING_LAST,
+};
+
+#define DIGITAL_MDAA_NFCID1_SIZE 3
+
+struct digital_tg_mdaa_params {
+       u16 sens_res;
+       u8 nfcid1[DIGITAL_MDAA_NFCID1_SIZE];
+       u8 sel_res;
+
+       u8 nfcid2[NFC_NFCID2_MAXSIZE];
+       u16 sc;
+};
+
+struct nfc_digital_dev;
+
+/**
+ * nfc_digital_cmd_complete_t - Definition of command result callback
+ *
+ * @ddev: nfc_digital_device ref
+ * @arg: user data
+ * @resp: response data
+ *
+ * resp pointer can be an error code and will be checked with IS_ERR() macro.
+ * The callback is responsible for freeing resp sk_buff.
+ */
+typedef void (*nfc_digital_cmd_complete_t)(struct nfc_digital_dev *ddev,
+                                          void *arg, struct sk_buff *resp);
+
+/**
+ * Device side NFC Digital operations
+ *
+ * Initiator mode:
+ * @in_configure_hw: Hardware configuration for RF technology and communication
+ *     framing in initiator mode. This is a synchronous function.
+ * @in_send_cmd: Initiator mode data exchange using RF technology and framing
+ *     previously set with in_configure_hw. The peer response is returned
+ *     through callback cb. If an io error occurs or the peer didn't reply
+ *     within the specified timeout (ms), the error code is passed back through
+ *     the resp pointer. This is an asynchronous function.
+ *
+ * Target mode: Only NFC-DEP protocol is supported in target mode.
+ * @tg_configure_hw: Hardware configuration for RF technology and communication
+ *     framing in target mode. This is a synchronous function.
+ * @tg_send_cmd: Target mode data exchange using RF technology and framing
+ *     previously set with tg_configure_hw. The peer next command is returned
+ *     through callback cb. If an io error occurs or the peer didn't reply
+ *     within the specified timeout (ms), the error code is passed back through
+ *     the resp pointer. This is an asynchronous function.
+ * @tg_listen: Put the device in listen mode waiting for data from the peer
+ *     device. This is an asynchronous function.
+ * @tg_listen_mdaa: If supported, put the device in automatic listen mode with
+ *     mode detection and automatic anti-collision. In this mode, the device
+ *     automatically detects the RF technology and executes the anti-collision
+ *     detection using the command responses specified in mdaa_params. The
+ *     mdaa_params structure contains SENS_RES, NFCID1, and SEL_RES for 106A RF
+ *     tech. NFCID2 and system code (sc) for 212F and 424F. The driver returns
+ *     the NFC-DEP ATR_REQ command through cb. The digital stack deducts the RF
+ *     tech by analyzing the SoD of the frame containing the ATR_REQ command.
+ *     This is an asynchronous function.
+ *
+ * @switch_rf: Turns device radio on or off. The stack does not call explicitly
+ *     switch_rf to turn the radio on. A call to in|tg_configure_hw must turn
+ *     the device radio on.
+ * @abort_cmd: Discard the last sent command.
+ */
+struct nfc_digital_ops {
+       int (*in_configure_hw)(struct nfc_digital_dev *ddev, int type,
+                              int param);
+       int (*in_send_cmd)(struct nfc_digital_dev *ddev, struct sk_buff *skb,
+                          u16 timeout, nfc_digital_cmd_complete_t cb,
+                          void *arg);
+
+       int (*tg_configure_hw)(struct nfc_digital_dev *ddev, int type,
+                              int param);
+       int (*tg_send_cmd)(struct nfc_digital_dev *ddev, struct sk_buff *skb,
+                          u16 timeout, nfc_digital_cmd_complete_t cb,
+                          void *arg);
+       int (*tg_listen)(struct nfc_digital_dev *ddev, u16 timeout,
+                        nfc_digital_cmd_complete_t cb, void *arg);
+       int (*tg_listen_mdaa)(struct nfc_digital_dev *ddev,
+                             struct digital_tg_mdaa_params *mdaa_params,
+                             u16 timeout, nfc_digital_cmd_complete_t cb,
+                             void *arg);
+
+       int (*switch_rf)(struct nfc_digital_dev *ddev, bool on);
+       void (*abort_cmd)(struct nfc_digital_dev *ddev);
+};
+
+#define NFC_DIGITAL_POLL_MODE_COUNT_MAX        6 /* 106A, 212F, and 424F in & tg */
+
+typedef int (*digital_poll_t)(struct nfc_digital_dev *ddev, u8 rf_tech);
+
+struct digital_poll_tech {
+       u8 rf_tech;
+       digital_poll_t poll_func;
+};
+
+/**
+ * Driver capabilities - bit mask made of the following values
+ *
+ * @NFC_DIGITAL_DRV_CAPS_IN_CRC: The driver handles CRC calculation in initiator
+ *     mode.
+ * @NFC_DIGITAL_DRV_CAPS_TG_CRC: The driver handles CRC calculation in target
+ *     mode.
+ */
+#define NFC_DIGITAL_DRV_CAPS_IN_CRC    0x0001
+#define NFC_DIGITAL_DRV_CAPS_TG_CRC    0x0002
+
+struct nfc_digital_dev {
+       struct nfc_dev *nfc_dev;
+       struct nfc_digital_ops *ops;
+
+       u32 protocols;
+
+       int tx_headroom;
+       int tx_tailroom;
+
+       u32 driver_capabilities;
+       void *driver_data;
+
+       struct digital_poll_tech poll_techs[NFC_DIGITAL_POLL_MODE_COUNT_MAX];
+       u8 poll_tech_count;
+       u8 poll_tech_index;
+       struct mutex poll_lock;
+
+       struct work_struct cmd_work;
+       struct work_struct cmd_complete_work;
+       struct list_head cmd_queue;
+       struct mutex cmd_lock;
+
+       struct work_struct poll_work;
+
+       u8 curr_protocol;
+       u8 curr_rf_tech;
+       u8 curr_nfc_dep_pni;
+
+       int (*skb_check_crc)(struct sk_buff *skb);
+       void (*skb_add_crc)(struct sk_buff *skb);
+};
+
+struct nfc_digital_dev *nfc_digital_allocate_device(struct nfc_digital_ops *ops,
+                                                   __u32 supported_protocols,
+                                                   __u32 driver_capabilities,
+                                                   int tx_headroom,
+                                                   int tx_tailroom);
+void nfc_digital_free_device(struct nfc_digital_dev *ndev);
+int nfc_digital_register_device(struct nfc_digital_dev *ndev);
+void nfc_digital_unregister_device(struct nfc_digital_dev *ndev);
+
+static inline void nfc_digital_set_parent_dev(struct nfc_digital_dev *ndev,
+                                             struct device *dev)
+{
+       nfc_set_parent_dev(ndev->nfc_dev, dev);
+}
+
+static inline void nfc_digital_set_drvdata(struct nfc_digital_dev *dev,
+                                          void *data)
+{
+       dev->driver_data = data;
+}
+
+static inline void *nfc_digital_get_drvdata(struct nfc_digital_dev *dev)
+{
+       return dev->driver_data;
+}
+
+#endif /* __NFC_DIGITAL_H */
index b64b7bce4b94302a8edb923bc54bc1e4b2c8b997..2eca2960ca9c54116bd187b6e1e18b70060f0282 100644 (file)
 
 #include <net/nfc/nfc.h>
 
-struct nfc_phy_ops {
-       int (*write)(void *dev_id, struct sk_buff *skb);
-       int (*enable)(void *dev_id);
-       void (*disable)(void *dev_id);
-};
-
 struct nfc_hci_dev;
 
 struct nfc_hci_ops {
index 88785e5c6b2cf6d2c32a8af553259a9d21678025..e5aa5acafea0e0e176e83ce50d58d33529be4669 100644 (file)
 #define NCI_GID_NFCEE_MGMT                                     0x2
 #define NCI_GID_PROPRIETARY                                    0xf
 
+/* ----- NCI over SPI head/crc(tail) room needed for outgoing frames ----- */
+#define NCI_SPI_HDR_LEN                                                4
+#define NCI_SPI_CRC_LEN                                                2
+
 /* ---- NCI Packet structures ---- */
 #define NCI_CTRL_HDR_SIZE                                      3
 #define NCI_DATA_HDR_SIZE                                      3
index 99fc1f3a392af172b2c3d5b5b499898f5cfbae39..6126f1f992b40068923771cf0a7967395f88017e 100644 (file)
@@ -207,19 +207,9 @@ int nci_to_errno(__u8 code);
 #define NCI_SPI_CRC_ENABLED    0x01
 
 /* ----- NCI SPI structures ----- */
-struct nci_spi_dev;
-
-struct nci_spi_ops {
-       int (*open)(struct nci_spi_dev *ndev);
-       int (*close)(struct nci_spi_dev *ndev);
-       void (*assert_int)(struct nci_spi_dev *ndev);
-       void (*deassert_int)(struct nci_spi_dev *ndev);
-};
-
-struct nci_spi_dev {
-       struct nci_dev          *nci_dev;
+struct nci_spi {
+       struct nci_dev          *ndev;
        struct spi_device       *spi;
-       struct nci_spi_ops      *ops;
 
        unsigned int            xfer_udelay;    /* microseconds delay between
                                                  transactions */
@@ -227,31 +217,15 @@ struct nci_spi_dev {
 
        struct completion       req_completion;
        u8                      req_result;
-
-       void                    *driver_data;
 };
 
-/* ----- NCI SPI Devices ----- */
-struct nci_spi_dev *nci_spi_allocate_device(struct spi_device *spi,
-                                               struct nci_spi_ops *ops,
-                                               u32 supported_protocols,
-                                               u32 supported_se,
-                                               u8 acknowledge_mode,
-                                               unsigned int delay);
-void nci_spi_free_device(struct nci_spi_dev *ndev);
-int nci_spi_register_device(struct nci_spi_dev *ndev);
-void nci_spi_unregister_device(struct nci_spi_dev *ndev);
-int nci_spi_recv_frame(struct nci_spi_dev *ndev);
-
-static inline void nci_spi_set_drvdata(struct nci_spi_dev *ndev,
-                                           void *data)
-{
-       ndev->driver_data = data;
-}
-
-static inline void *nci_spi_get_drvdata(struct nci_spi_dev *ndev)
-{
-       return ndev->driver_data;
-}
+/* ----- NCI SPI ----- */
+struct nci_spi *nci_spi_allocate_spi(struct spi_device *spi,
+                                    u8 acknowledge_mode, unsigned int delay,
+                                    struct nci_dev *ndev);
+int nci_spi_send(struct nci_spi *nspi,
+                struct completion *write_handshake_completion,
+                struct sk_buff *skb);
+struct sk_buff *nci_spi_read(struct nci_spi *nspi);
 
 #endif /* __NCI_CORE_H */
index f68ee68e4e3e97cd055bc8cc760ab6bba9a11a4b..82fc4e43fc6e9ec156e19e18dddc4d3ec1fbe037 100644 (file)
 #include <linux/device.h>
 #include <linux/skbuff.h>
 
-#define nfc_dev_info(dev, fmt, arg...) dev_info((dev), "NFC: " fmt "\n", ## arg)
-#define nfc_dev_err(dev, fmt, arg...) dev_err((dev), "NFC: " fmt "\n", ## arg)
-#define nfc_dev_dbg(dev, fmt, arg...) dev_dbg((dev), fmt "\n", ## arg)
+#define nfc_info(dev, fmt, ...) dev_info((dev), "NFC: " fmt, ##__VA_ARGS__)
+#define nfc_err(dev, fmt, ...) dev_err((dev), "NFC: " fmt, ##__VA_ARGS__)
+
+struct nfc_phy_ops {
+       int (*write)(void *dev_id, struct sk_buff *skb);
+       int (*enable)(void *dev_id);
+       void (*disable)(void *dev_id);
+};
 
 struct nfc_dev;
 
@@ -48,6 +53,8 @@ struct nfc_dev;
 typedef void (*data_exchange_cb_t)(void *context, struct sk_buff *skb,
                                                                int err);
 
+typedef void (*se_io_cb_t)(void *context, u8 *apdu, size_t apdu_len, int err);
+
 struct nfc_target;
 
 struct nfc_ops {
@@ -74,12 +81,23 @@ struct nfc_ops {
        int (*discover_se)(struct nfc_dev *dev);
        int (*enable_se)(struct nfc_dev *dev, u32 se_idx);
        int (*disable_se)(struct nfc_dev *dev, u32 se_idx);
+       int (*se_io) (struct nfc_dev *dev, u32 se_idx,
+                     u8 *apdu, size_t apdu_length,
+                     se_io_cb_t cb, void *cb_context);
 };
 
 #define NFC_TARGET_IDX_ANY -1
 #define NFC_MAX_GT_LEN 48
 #define NFC_ATR_RES_GT_OFFSET 15
 
+/**
+ * struct nfc_target - NFC target descriptiom
+ *
+ * @sens_res: 2 bytes describing the target SENS_RES response, if the target
+ *     is a type A one. The %sens_res most significant byte must be byte 2
+ *     as described by the NFC Forum digital specification (i.e. the platform
+ *     configuration one) while %sens_res least significant byte is byte 1.
+ */
 struct nfc_target {
        u32 idx;
        u32 supported_protocols;
@@ -243,5 +261,6 @@ void nfc_driver_failure(struct nfc_dev *dev, int err);
 
 int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type);
 int nfc_remove_se(struct nfc_dev *dev, u32 se_idx);
+struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx);
 
 #endif /* __NET_NFC_H */
index 42e9fac51b3115f86d0a766d8d19c90cc812ac0e..05e41383856bdb0a10db5606c5fbd3040a297a21 100644 (file)
@@ -1,13 +1,13 @@
 #ifndef _NET_P8022_H
 #define _NET_P8022_H
-extern struct datalink_proto *
-       register_8022_client(unsigned char type,
-                            int (*func)(struct sk_buff *skb,
-                                        struct net_device *dev,
-                                        struct packet_type *pt,
-                                        struct net_device *orig_dev));
-extern void unregister_8022_client(struct datalink_proto *proto);
+struct datalink_proto *
+register_8022_client(unsigned char type,
+                    int (*func)(struct sk_buff *skb,
+                                struct net_device *dev,
+                                struct packet_type *pt,
+                                struct net_device *orig_dev));
+void unregister_8022_client(struct datalink_proto *proto);
 
-extern struct datalink_proto *make_8023_client(void);
-extern void destroy_8023_client(struct datalink_proto *dl);
+struct datalink_proto *make_8023_client(void);
+void destroy_8023_client(struct datalink_proto *dl);
 #endif
index 5db0224b73ac47b1513ffb9361e48aa0167bb24d..3f67704f3747281b3ad0852fd5d69dcd49c9b01c 100644 (file)
@@ -103,8 +103,8 @@ void ping_seq_stop(struct seq_file *seq, void *v);
 int ping_proc_register(struct net *net, struct ping_seq_afinfo *afinfo);
 void ping_proc_unregister(struct net *net, struct ping_seq_afinfo *afinfo);
 
-extern int __init ping_proc_init(void);
-extern void ping_proc_exit(void);
+int __init ping_proc_init(void);
+void ping_proc_exit(void);
 #endif
 
 void __init ping_init(void);
index 047c0476c0a095c2b271713ca232f7099564558b..fbf7676c9a02e352890b66d0bc3caf1775dbb199 100644 (file)
@@ -96,20 +96,20 @@ extern const struct net_offload __rcu *inet6_offloads[MAX_INET_PROTOS];
 extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
 #endif
 
-extern int     inet_add_protocol(const struct net_protocol *prot, unsigned char num);
-extern int     inet_del_protocol(const struct net_protocol *prot, unsigned char num);
-extern int     inet_add_offload(const struct net_offload *prot, unsigned char num);
-extern int     inet_del_offload(const struct net_offload *prot, unsigned char num);
-extern void    inet_register_protosw(struct inet_protosw *p);
-extern void    inet_unregister_protosw(struct inet_protosw *p);
+int inet_add_protocol(const struct net_protocol *prot, unsigned char num);
+int inet_del_protocol(const struct net_protocol *prot, unsigned char num);
+int inet_add_offload(const struct net_offload *prot, unsigned char num);
+int inet_del_offload(const struct net_offload *prot, unsigned char num);
+void inet_register_protosw(struct inet_protosw *p);
+void inet_unregister_protosw(struct inet_protosw *p);
 
 #if IS_ENABLED(CONFIG_IPV6)
-extern int     inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
-extern int     inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
-extern int     inet6_register_protosw(struct inet_protosw *p);
-extern void    inet6_unregister_protosw(struct inet_protosw *p);
+int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char num);
+int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char num);
+int inet6_register_protosw(struct inet_protosw *p);
+void inet6_unregister_protosw(struct inet_protosw *p);
 #endif
-extern int     inet6_add_offload(const struct net_offload *prot, unsigned char num);
-extern int     inet6_del_offload(const struct net_offload *prot, unsigned char num);
+int inet6_add_offload(const struct net_offload *prot, unsigned char num);
+int inet6_del_offload(const struct net_offload *prot, unsigned char num);
 
 #endif /* _PROTOCOL_H */
index fe456c295b04400e3e05d489ea7d22349ec486b4..78db4cc1306a147ccb178265f265b9fd8e864ed5 100644 (file)
@@ -1,11 +1,11 @@
 #ifndef _NET_PSNAP_H
 #define _NET_PSNAP_H
 
-extern struct datalink_proto *
+struct datalink_proto *
 register_snap_client(const unsigned char *desc,
                     int (*rcvfunc)(struct sk_buff *, struct net_device *,
                                    struct packet_type *,
                                    struct net_device *orig_dev));
-extern void unregister_snap_client(struct datalink_proto *proto);
+void unregister_snap_client(struct datalink_proto *proto);
 
 #endif
index 42ce6fe7a2d519c84c0f171cebaad87f537b23b9..6a40c6562dd2a39ac5b4f6ee4a1331b0559bfb02 100644 (file)
@@ -26,7 +26,7 @@ extern struct proto raw_prot;
 void raw_icmp_error(struct sk_buff *, int, u32);
 int raw_local_deliver(struct sk_buff *, int);
 
-extern int     raw_rcv(struct sock *, struct sk_buff *);
+int raw_rcv(struct sock *, struct sk_buff *);
 
 #define RAW_HTABLE_SIZE        MAX_INET_PROTOS
 
@@ -36,8 +36,8 @@ struct raw_hashinfo {
 };
 
 #ifdef CONFIG_PROC_FS
-extern int  raw_proc_init(void);
-extern void raw_proc_exit(void);
+int raw_proc_init(void);
+void raw_proc_exit(void);
 
 struct raw_iter_state {
        struct seq_net_private p;
index e7ea660e4db606139e54567d2ed7d14b93eec4f4..87783dea0791c616a9bac5a164e4421dc3ef2363 100644 (file)
@@ -7,8 +7,7 @@ void raw6_icmp_error(struct sk_buff *, int nexthdr,
                u8 type, u8 code, int inner_offset, __be32);
 bool raw6_local_deliver(struct sk_buff *, int);
 
-extern int                     rawv6_rcv(struct sock *sk,
-                                         struct sk_buff *skb);
+int rawv6_rcv(struct sock *sk, struct sk_buff *skb);
 
 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
 int rawv6_mh_filter_register(int (*filter)(struct sock *sock,
index 59795e42c8b61464512184990c3a12698c9a184f..7f830ff67f08c318717e809221d1586f17948185 100644 (file)
@@ -43,11 +43,12 @@ struct request_sock_ops {
                                           struct request_sock *req);
 };
 
-extern int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
+int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
 
 /* struct request_sock - mini sock to represent a connection request
  */
 struct request_sock {
+       struct sock_common              __req_common;
        struct request_sock             *dl_next;
        u16                             mss;
        u8                              num_retrans; /* number of retransmits */
@@ -162,13 +163,13 @@ struct request_sock_queue {
                                             */
 };
 
-extern int reqsk_queue_alloc(struct request_sock_queue *queue,
-                            unsigned int nr_table_entries);
+int reqsk_queue_alloc(struct request_sock_queue *queue,
+                     unsigned int nr_table_entries);
 
-extern void __reqsk_queue_destroy(struct request_sock_queue *queue);
-extern void reqsk_queue_destroy(struct request_sock_queue *queue);
-extern void reqsk_fastopen_remove(struct sock *sk,
-                                 struct request_sock *req, bool reset);
+void __reqsk_queue_destroy(struct request_sock_queue *queue);
+void reqsk_queue_destroy(struct request_sock_queue *queue);
+void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
+                          bool reset);
 
 static inline struct request_sock *
        reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
index 555dd198aab755c9e9ef21a751992ccf653b64c7..50811fe2c585b0e9fd8ffb365971229872eb353e 100644 (file)
@@ -160,38 +160,42 @@ extern int  sysctl_rose_routing_control;
 extern int  sysctl_rose_link_fail_timeout;
 extern int  sysctl_rose_maximum_vcs;
 extern int  sysctl_rose_window_size;
-extern int  rosecmp(rose_address *, rose_address *);
-extern int  rosecmpm(rose_address *, rose_address *, unsigned short);
-extern char *rose2asc(char *buf, const rose_address *);
-extern struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
-extern void rose_kill_by_neigh(struct rose_neigh *);
-extern unsigned int rose_new_lci(struct rose_neigh *);
-extern int  rose_rx_call_request(struct sk_buff *, struct net_device *, struct rose_neigh *, unsigned int);
-extern void rose_destroy_socket(struct sock *);
+
+int rosecmp(rose_address *, rose_address *);
+int rosecmpm(rose_address *, rose_address *, unsigned short);
+char *rose2asc(char *buf, const rose_address *);
+struct sock *rose_find_socket(unsigned int, struct rose_neigh *);
+void rose_kill_by_neigh(struct rose_neigh *);
+unsigned int rose_new_lci(struct rose_neigh *);
+int rose_rx_call_request(struct sk_buff *, struct net_device *,
+                        struct rose_neigh *, unsigned int);
+void rose_destroy_socket(struct sock *);
 
 /* rose_dev.c */
-extern void  rose_setup(struct net_device *);
+void rose_setup(struct net_device *);
 
 /* rose_in.c */
-extern int  rose_process_rx_frame(struct sock *, struct sk_buff *);
+int rose_process_rx_frame(struct sock *, struct sk_buff *);
 
 /* rose_link.c */
-extern void rose_start_ftimer(struct rose_neigh *);
-extern void rose_stop_ftimer(struct rose_neigh *);
-extern void rose_stop_t0timer(struct rose_neigh *);
-extern int  rose_ftimer_running(struct rose_neigh *);
-extern void rose_link_rx_restart(struct sk_buff *, struct rose_neigh *, unsigned short);
-extern void rose_transmit_clear_request(struct rose_neigh *, unsigned int, unsigned char, unsigned char);
-extern void rose_transmit_link(struct sk_buff *, struct rose_neigh *);
+void rose_start_ftimer(struct rose_neigh *);
+void rose_stop_ftimer(struct rose_neigh *);
+void rose_stop_t0timer(struct rose_neigh *);
+int rose_ftimer_running(struct rose_neigh *);
+void rose_link_rx_restart(struct sk_buff *, struct rose_neigh *,
+                         unsigned short);
+void rose_transmit_clear_request(struct rose_neigh *, unsigned int,
+                                unsigned char, unsigned char);
+void rose_transmit_link(struct sk_buff *, struct rose_neigh *);
 
 /* rose_loopback.c */
-extern void rose_loopback_init(void);
-extern void rose_loopback_clear(void);
-extern int  rose_loopback_queue(struct sk_buff *, struct rose_neigh *);
+void rose_loopback_init(void);
+void rose_loopback_clear(void);
+int rose_loopback_queue(struct sk_buff *, struct rose_neigh *);
 
 /* rose_out.c */
-extern void rose_kick(struct sock *);
-extern void rose_enquiry_response(struct sock *);
+void rose_kick(struct sock *);
+void rose_enquiry_response(struct sock *);
 
 /* rose_route.c */
 extern struct rose_neigh *rose_loopback_neigh;
@@ -199,43 +203,45 @@ extern const struct file_operations rose_neigh_fops;
 extern const struct file_operations rose_nodes_fops;
 extern const struct file_operations rose_routes_fops;
 
-extern void rose_add_loopback_neigh(void);
-extern int __must_check rose_add_loopback_node(rose_address *);
-extern void rose_del_loopback_node(rose_address *);
-extern void rose_rt_device_down(struct net_device *);
-extern void rose_link_device_down(struct net_device *);
-extern struct net_device *rose_dev_first(void);
-extern struct net_device *rose_dev_get(rose_address *);
-extern struct rose_route *rose_route_free_lci(unsigned int, struct rose_neigh *);
-extern struct rose_neigh *rose_get_neigh(rose_address *, unsigned char *, unsigned char *, int);
-extern int  rose_rt_ioctl(unsigned int, void __user *);
-extern void rose_link_failed(ax25_cb *, int);
-extern int  rose_route_frame(struct sk_buff *, ax25_cb *);
-extern void rose_rt_free(void);
+void rose_add_loopback_neigh(void);
+int __must_check rose_add_loopback_node(rose_address *);
+void rose_del_loopback_node(rose_address *);
+void rose_rt_device_down(struct net_device *);
+void rose_link_device_down(struct net_device *);
+struct net_device *rose_dev_first(void);
+struct net_device *rose_dev_get(rose_address *);
+struct rose_route *rose_route_free_lci(unsigned int, struct rose_neigh *);
+struct rose_neigh *rose_get_neigh(rose_address *, unsigned char *,
+                                 unsigned char *, int);
+int rose_rt_ioctl(unsigned int, void __user *);
+void rose_link_failed(ax25_cb *, int);
+int rose_route_frame(struct sk_buff *, ax25_cb *);
+void rose_rt_free(void);
 
 /* rose_subr.c */
-extern void rose_clear_queues(struct sock *);
-extern void rose_frames_acked(struct sock *, unsigned short);
-extern void rose_requeue_frames(struct sock *);
-extern int  rose_validate_nr(struct sock *, unsigned short);
-extern void rose_write_internal(struct sock *, int);
-extern int  rose_decode(struct sk_buff *, int *, int *, int *, int *, int *);
-extern int  rose_parse_facilities(unsigned char *, unsigned int, struct rose_facilities_struct *);
-extern void rose_disconnect(struct sock *, int, int, int);
+void rose_clear_queues(struct sock *);
+void rose_frames_acked(struct sock *, unsigned short);
+void rose_requeue_frames(struct sock *);
+int rose_validate_nr(struct sock *, unsigned short);
+void rose_write_internal(struct sock *, int);
+int rose_decode(struct sk_buff *, int *, int *, int *, int *, int *);
+int rose_parse_facilities(unsigned char *, unsigned int,
+                         struct rose_facilities_struct *);
+void rose_disconnect(struct sock *, int, int, int);
 
 /* rose_timer.c */
-extern void rose_start_heartbeat(struct sock *);
-extern void rose_start_t1timer(struct sock *);
-extern void rose_start_t2timer(struct sock *);
-extern void rose_start_t3timer(struct sock *);
-extern void rose_start_hbtimer(struct sock *);
-extern void rose_start_idletimer(struct sock *);
-extern void rose_stop_heartbeat(struct sock *);
-extern void rose_stop_timer(struct sock *);
-extern void rose_stop_idletimer(struct sock *);
+void rose_start_heartbeat(struct sock *);
+void rose_start_t1timer(struct sock *);
+void rose_start_t2timer(struct sock *);
+void rose_start_t3timer(struct sock *);
+void rose_start_hbtimer(struct sock *);
+void rose_start_idletimer(struct sock *);
+void rose_stop_heartbeat(struct sock *);
+void rose_stop_timer(struct sock *);
+void rose_stop_idletimer(struct sock *);
 
 /* sysctl_net_rose.c */
-extern void rose_register_sysctl(void);
-extern void rose_unregister_sysctl(void);
+void rose_register_sysctl(void);
+void rose_unregister_sysctl(void);
 
 #endif
index afdeeb5bec251ac773f03d8c7f0b830c9fa137e2..dd4ae0029fd802936b6628af9544cb51f77616ff 100644 (file)
@@ -39,6 +39,7 @@
 #define RTO_ONLINK     0x01
 
 #define RT_CONN_FLAGS(sk)   (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
+#define RT_CONN_FLAGS_TOS(sk,tos)   (RT_TOS(tos) | sock_flag(sk, SOCK_LOCALROUTE))
 
 struct fib_nh;
 struct fib_info;
@@ -87,34 +88,28 @@ struct ip_rt_acct {
 };
 
 struct rt_cache_stat {
-        unsigned int in_hit;
         unsigned int in_slow_tot;
         unsigned int in_slow_mc;
         unsigned int in_no_route;
         unsigned int in_brd;
         unsigned int in_martian_dst;
         unsigned int in_martian_src;
-        unsigned int out_hit;
         unsigned int out_slow_tot;
         unsigned int out_slow_mc;
-        unsigned int gc_total;
-        unsigned int gc_ignored;
-        unsigned int gc_goal_miss;
-        unsigned int gc_dst_overflow;
-        unsigned int in_hlist_search;
-        unsigned int out_hlist_search;
 };
 
 extern struct ip_rt_acct __percpu *ip_rt_acct;
 
 struct in_device;
-extern int             ip_rt_init(void);
-extern void            rt_cache_flush(struct net *net);
-extern void            rt_flush_dev(struct net_device *dev);
-extern struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp);
-extern struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
-                                          struct sock *sk);
-extern struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig);
+
+int ip_rt_init(void);
+void rt_cache_flush(struct net *net);
+void rt_flush_dev(struct net_device *dev);
+struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp);
+struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
+                                   struct sock *sk);
+struct dst_entry *ipv4_blackhole_route(struct net *net,
+                                      struct dst_entry *dst_orig);
 
 static inline struct rtable *ip_route_output_key(struct net *net, struct flowi4 *flp)
 {
@@ -162,8 +157,8 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4
        return ip_route_output_key(net, fl4);
 }
 
-extern int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
-                               u8 tos, struct net_device *devin);
+int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
+                        u8 tos, struct net_device *devin);
 
 static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
                                 u8 tos, struct net_device *devin)
@@ -179,24 +174,25 @@ static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
        return err;
 }
 
-extern void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
-                            int oif, u32 mark, u8 protocol, int flow_flags);
-extern void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu);
-extern void ipv4_redirect(struct sk_buff *skb, struct net *net,
-                         int oif, u32 mark, u8 protocol, int flow_flags);
-extern void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk);
-extern void ip_rt_send_redirect(struct sk_buff *skb);
-
-extern unsigned int            inet_addr_type(struct net *net, __be32 addr);
-extern unsigned int            inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr);
-extern void            ip_rt_multicast_event(struct in_device *);
-extern int             ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
-extern void            ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
-extern int             ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb);
+void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif,
+                     u32 mark, u8 protocol, int flow_flags);
+void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu);
+void ipv4_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
+                  u8 protocol, int flow_flags);
+void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk);
+void ip_rt_send_redirect(struct sk_buff *skb);
+
+unsigned int inet_addr_type(struct net *net, __be32 addr);
+unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
+                               __be32 addr);
+void ip_rt_multicast_event(struct in_device *);
+int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
+void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
+int ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb);
 
 struct in_ifaddr;
-extern void fib_add_ifaddr(struct in_ifaddr *);
-extern void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
+void fib_add_ifaddr(struct in_ifaddr *);
+void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
 
 static inline void ip_rt_put(struct rtable *rt)
 {
index 702664833a53d65c24e3b60316bbf37804e013c3..bb13a182fba6ed076851f58590ab662e429bf01a 100644 (file)
@@ -8,14 +8,12 @@ typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *);
 typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
 typedef u16 (*rtnl_calcit_func)(struct sk_buff *, struct nlmsghdr *);
 
-extern int     __rtnl_register(int protocol, int msgtype,
-                               rtnl_doit_func, rtnl_dumpit_func,
-                               rtnl_calcit_func);
-extern void    rtnl_register(int protocol, int msgtype,
-                             rtnl_doit_func, rtnl_dumpit_func,
-                             rtnl_calcit_func);
-extern int     rtnl_unregister(int protocol, int msgtype);
-extern void    rtnl_unregister_all(int protocol);
+int __rtnl_register(int protocol, int msgtype,
+                   rtnl_doit_func, rtnl_dumpit_func, rtnl_calcit_func);
+void rtnl_register(int protocol, int msgtype,
+                  rtnl_doit_func, rtnl_dumpit_func, rtnl_calcit_func);
+int rtnl_unregister(int protocol, int msgtype);
+void rtnl_unregister_all(int protocol);
 
 static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
 {
@@ -83,11 +81,11 @@ struct rtnl_link_ops {
        unsigned int            (*get_num_rx_queues)(void);
 };
 
-extern int     __rtnl_link_register(struct rtnl_link_ops *ops);
-extern void    __rtnl_link_unregister(struct rtnl_link_ops *ops);
+int __rtnl_link_register(struct rtnl_link_ops *ops);
+void __rtnl_link_unregister(struct rtnl_link_ops *ops);
 
-extern int     rtnl_link_register(struct rtnl_link_ops *ops);
-extern void    rtnl_link_unregister(struct rtnl_link_ops *ops);
+int rtnl_link_register(struct rtnl_link_ops *ops);
+void rtnl_link_unregister(struct rtnl_link_ops *ops);
 
 /**
  *     struct rtnl_af_ops - rtnetlink address family operations
@@ -117,18 +115,18 @@ struct rtnl_af_ops {
                                               const struct nlattr *attr);
 };
 
-extern int     __rtnl_af_register(struct rtnl_af_ops *ops);
-extern void    __rtnl_af_unregister(struct rtnl_af_ops *ops);
+int __rtnl_af_register(struct rtnl_af_ops *ops);
+void __rtnl_af_unregister(struct rtnl_af_ops *ops);
 
-extern int     rtnl_af_register(struct rtnl_af_ops *ops);
-extern void    rtnl_af_unregister(struct rtnl_af_ops *ops);
+int rtnl_af_register(struct rtnl_af_ops *ops);
+void rtnl_af_unregister(struct rtnl_af_ops *ops);
 
+struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
+struct net_device *rtnl_create_link(struct net *net, char *ifname,
+                                   const struct rtnl_link_ops *ops,
+                                   struct nlattr *tb[]);
+int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
 
-extern struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
-extern struct net_device *rtnl_create_link(struct net *net,
-       char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]);
-extern int rtnl_configure_link(struct net_device *dev,
-                              const struct ifinfomsg *ifm);
 extern const struct nla_policy ifla_policy[IFLA_MAX+1];
 
 #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
index f4eb365f7dcdadbab70e60940c519e3f83ea3253..d0a6321c302ef2316628b747da27f37cada0b920 100644 (file)
@@ -702,13 +702,20 @@ static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
 }
 
 void psched_ratecfg_precompute(struct psched_ratecfg *r,
-                              const struct tc_ratespec *conf);
+                              const struct tc_ratespec *conf,
+                              u64 rate64);
 
 static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
                                          const struct psched_ratecfg *r)
 {
        memset(res, 0, sizeof(*res));
-       res->rate = r->rate_bytes_ps;
+
+       /* legacy struct tc_ratespec has a 32bit @rate field
+        * Qdisc using 64bit rate should add new attributes
+        * in order to maintain compatibility.
+        */
+       res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
+
        res->overhead = r->overhead;
        res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
 }
index 8de2d37d2077f2df685baec887b1f836a8fa6514..262532d111f51e3a91a06af785b4721e8fab9d56 100644 (file)
@@ -33,11 +33,11 @@ struct scm_cookie {
 #endif
 };
 
-extern void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm);
-extern void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm);
-extern int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm);
-extern void __scm_destroy(struct scm_cookie *scm);
-extern struct scm_fp_list * scm_fp_dup(struct scm_fp_list *fpl);
+void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm);
+void scm_detach_fds_compat(struct msghdr *msg, struct scm_cookie *scm);
+int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm);
+void __scm_destroy(struct scm_cookie *scm);
+struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl);
 
 #ifdef CONFIG_SECURITY_NETWORK
 static __inline__ void unix_get_peersec_dgram(struct socket *sock, struct scm_cookie *scm)
index 3794c5ad20fef72a960c34323dab275d6f26bac7..c5fe80697f8d442d9c52cb4aabc9b4edfff85223 100644 (file)
 /*
  * sctp/protocol.c
  */
-extern int sctp_copy_local_addr_list(struct net *, struct sctp_bind_addr *,
-                                    sctp_scope_t, gfp_t gfp,
-                                    int flags);
-extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
-extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
-extern void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
+int sctp_copy_local_addr_list(struct net *, struct sctp_bind_addr *,
+                             sctp_scope_t, gfp_t gfp, int flags);
+struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
+int sctp_register_pf(struct sctp_pf *, sa_family_t);
+void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
 
 /*
  * sctp/socket.c
@@ -110,7 +109,7 @@ void sctp_sock_rfree(struct sk_buff *skb);
 void sctp_copy_sock(struct sock *newsk, struct sock *sk,
                    struct sctp_association *asoc);
 extern struct percpu_counter sctp_sockets_allocated;
-extern int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
+int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
 
 /*
  * sctp/primitive.c
index c2e542b27a5a8316f3b105b2338bc96d5698690e..f257486f17be4bed528826544359c569e6a4b2dd 100644 (file)
@@ -3,18 +3,18 @@
 
 #include <linux/types.h>
 
-extern __u32 secure_ip_id(__be32 daddr);
-extern __u32 secure_ipv6_id(const __be32 daddr[4]);
-extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
-extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
-                                     __be16 dport);
-extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
-                                       __be16 sport, __be16 dport);
-extern __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
-                                         __be16 sport, __be16 dport);
-extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
-                                      __be16 sport, __be16 dport);
-extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
-                                        __be16 sport, __be16 dport);
+__u32 secure_ip_id(__be32 daddr);
+__u32 secure_ipv6_id(const __be32 daddr[4]);
+u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
+u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+                              __be16 dport);
+__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+                                __be16 sport, __be16 dport);
+__u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
+                                  __be16 sport, __be16 dport);
+u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
+                               __be16 sport, __be16 dport);
+u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+                                 __be16 sport, __be16 dport);
 
 #endif /* _NET_SECURE_SEQ */
index 1d37a8086bed53575fedc30b9c1db750d4cd2822..e3a18ff0c38b58aec20413489bf7237c54a14a77 100644 (file)
@@ -156,7 +156,7 @@ typedef __u64 __bitwise __addrpair;
  */
 struct sock_common {
        /* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned
-        * address on 64bit arches : cf INET_MATCH() and INET_TW_MATCH()
+        * address on 64bit arches : cf INET_MATCH()
         */
        union {
                __addrpair      skc_addrpair;
@@ -191,6 +191,12 @@ struct sock_common {
 #ifdef CONFIG_NET_NS
        struct net              *skc_net;
 #endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+       struct in6_addr         skc_v6_daddr;
+       struct in6_addr         skc_v6_rcv_saddr;
+#endif
+
        /*
         * fields between dontcopy_begin/dontcopy_end
         * are not copied in sock_copy()
@@ -218,7 +224,7 @@ struct cg_proto;
   *    @sk_lock:       synchronizer
   *    @sk_rcvbuf: size of receive buffer in bytes
   *    @sk_wq: sock wait queue and async head
-  *    @sk_rx_dst: receive input route used by early tcp demux
+  *    @sk_rx_dst: receive input route used by early demux
   *    @sk_dst_cache: destination cache
   *    @sk_dst_lock: destination cache lock
   *    @sk_policy: flow policy
@@ -233,6 +239,7 @@ struct cg_proto;
   *    @sk_ll_usec: usecs to busypoll when there is no data
   *    @sk_allocation: allocation mode
   *    @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
+  *    @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
   *    @sk_sndbuf: size of send buffer in bytes
   *    @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
   *               %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
@@ -299,6 +306,12 @@ struct sock {
 #define sk_dontcopy_begin      __sk_common.skc_dontcopy_begin
 #define sk_dontcopy_end                __sk_common.skc_dontcopy_end
 #define sk_hash                        __sk_common.skc_hash
+#define sk_portpair            __sk_common.skc_portpair
+#define sk_num                 __sk_common.skc_num
+#define sk_dport               __sk_common.skc_dport
+#define sk_addrpair            __sk_common.skc_addrpair
+#define sk_daddr               __sk_common.skc_daddr
+#define sk_rcv_saddr           __sk_common.skc_rcv_saddr
 #define sk_family              __sk_common.skc_family
 #define sk_state               __sk_common.skc_state
 #define sk_reuse               __sk_common.skc_reuse
@@ -307,6 +320,9 @@ struct sock {
 #define sk_bind_node           __sk_common.skc_bind_node
 #define sk_prot                        __sk_common.skc_prot
 #define sk_net                 __sk_common.skc_net
+#define sk_v6_daddr            __sk_common.skc_v6_daddr
+#define sk_v6_rcv_saddr        __sk_common.skc_v6_rcv_saddr
+
        socket_lock_t           sk_lock;
        struct sk_buff_head     sk_receive_queue;
        /*
@@ -363,6 +379,7 @@ struct sock {
        int                     sk_wmem_queued;
        gfp_t                   sk_allocation;
        u32                     sk_pacing_rate; /* bytes per second */
+       u32                     sk_max_pacing_rate;
        netdev_features_t       sk_route_caps;
        netdev_features_t       sk_route_nocaps;
        int                     sk_gso_type;
@@ -751,7 +768,7 @@ static inline int sk_stream_wspace(const struct sock *sk)
        return sk->sk_sndbuf - sk->sk_wmem_queued;
 }
 
-extern void sk_stream_write_space(struct sock *sk);
+void sk_stream_write_space(struct sock *sk);
 
 /* OOB backlog add */
 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
@@ -793,7 +810,7 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
        return 0;
 }
 
-extern int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
 
 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 {
@@ -858,15 +875,15 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
                __rc;                                                   \
        })
 
-extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
-extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
-extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
-extern int sk_stream_error(struct sock *sk, int flags, int err);
-extern void sk_stream_kill_queues(struct sock *sk);
-extern void sk_set_memalloc(struct sock *sk);
-extern void sk_clear_memalloc(struct sock *sk);
+int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
+int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
+void sk_stream_wait_close(struct sock *sk, long timeo_p);
+int sk_stream_error(struct sock *sk, int flags, int err);
+void sk_stream_kill_queues(struct sock *sk);
+void sk_set_memalloc(struct sock *sk);
+void sk_clear_memalloc(struct sock *sk);
 
-extern int sk_wait_data(struct sock *sk, long *timeo);
+int sk_wait_data(struct sock *sk, long *timeo);
 
 struct request_sock_ops;
 struct timewait_sock_ops;
@@ -1019,10 +1036,10 @@ enum cg_proto_flags {
 
 struct cg_proto {
        void                    (*enter_memory_pressure)(struct sock *sk);
-       struct res_counter      *memory_allocated;      /* Current allocated memory. */
-       struct percpu_counter   *sockets_allocated;     /* Current number of sockets. */
-       int                     *memory_pressure;
-       long                    *sysctl_mem;
+       struct res_counter      memory_allocated;       /* Current allocated memory. */
+       struct percpu_counter   sockets_allocated;      /* Current number of sockets. */
+       int                     memory_pressure;
+       long                    sysctl_mem[3];
        unsigned long           flags;
        /*
         * memcg field is used to find which memcg we belong directly
@@ -1036,8 +1053,8 @@ struct cg_proto {
        struct mem_cgroup       *memcg;
 };
 
-extern int proto_register(struct proto *prot, int alloc_slab);
-extern void proto_unregister(struct proto *prot);
+int proto_register(struct proto *prot, int alloc_slab);
+void proto_unregister(struct proto *prot);
 
 static inline bool memcg_proto_active(struct cg_proto *cg_proto)
 {
@@ -1118,7 +1135,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
                return false;
 
        if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
-               return !!*sk->sk_cgrp->memory_pressure;
+               return !!sk->sk_cgrp->memory_pressure;
 
        return !!*sk->sk_prot->memory_pressure;
 }
@@ -1138,8 +1155,8 @@ static inline void sk_leave_memory_pressure(struct sock *sk)
                struct proto *prot = sk->sk_prot;
 
                for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
-                       if (*cg_proto->memory_pressure)
-                               *cg_proto->memory_pressure = 0;
+                       if (cg_proto->memory_pressure)
+                               cg_proto->memory_pressure = 0;
        }
 
 }
@@ -1175,7 +1192,7 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot,
        struct res_counter *fail;
        int ret;
 
-       ret = res_counter_charge_nofail(prot->memory_allocated,
+       ret = res_counter_charge_nofail(&prot->memory_allocated,
                                        amt << PAGE_SHIFT, &fail);
        if (ret < 0)
                *parent_status = OVER_LIMIT;
@@ -1184,13 +1201,13 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot,
 static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
                                              unsigned long amt)
 {
-       res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT);
+       res_counter_uncharge(&prot->memory_allocated, amt << PAGE_SHIFT);
 }
 
 static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
 {
        u64 ret;
-       ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE);
+       ret = res_counter_read_u64(&prot->memory_allocated, RES_USAGE);
        return ret >> PAGE_SHIFT;
 }
 
@@ -1238,7 +1255,7 @@ static inline void sk_sockets_allocated_dec(struct sock *sk)
                struct cg_proto *cg_proto = sk->sk_cgrp;
 
                for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
-                       percpu_counter_dec(cg_proto->sockets_allocated);
+                       percpu_counter_dec(&cg_proto->sockets_allocated);
        }
 
        percpu_counter_dec(prot->sockets_allocated);
@@ -1252,7 +1269,7 @@ static inline void sk_sockets_allocated_inc(struct sock *sk)
                struct cg_proto *cg_proto = sk->sk_cgrp;
 
                for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
-                       percpu_counter_inc(cg_proto->sockets_allocated);
+                       percpu_counter_inc(&cg_proto->sockets_allocated);
        }
 
        percpu_counter_inc(prot->sockets_allocated);
@@ -1264,7 +1281,7 @@ sk_sockets_allocated_read_positive(struct sock *sk)
        struct proto *prot = sk->sk_prot;
 
        if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
-               return percpu_counter_read_positive(sk->sk_cgrp->sockets_allocated);
+               return percpu_counter_read_positive(&sk->sk_cgrp->sockets_allocated);
 
        return percpu_counter_read_positive(prot->sockets_allocated);
 }
@@ -1292,8 +1309,8 @@ proto_memory_pressure(struct proto *prot)
 
 #ifdef CONFIG_PROC_FS
 /* Called with local bh disabled */
-extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
-extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
+void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
+int sock_prot_inuse_get(struct net *net, struct proto *proto);
 #else
 static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
                int inc)
@@ -1369,8 +1386,8 @@ static inline struct inode *SOCK_INODE(struct socket *socket)
 /*
  * Functions for memory accounting
  */
-extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
-extern void __sk_mem_reclaim(struct sock *sk);
+int __sk_mem_schedule(struct sock *sk, int size, int kind);
+void __sk_mem_reclaim(struct sock *sk);
 
 #define SK_MEM_QUANTUM ((int)PAGE_SIZE)
 #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
@@ -1478,14 +1495,14 @@ do {                                                                    \
        lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);     \
 } while (0)
 
-extern void lock_sock_nested(struct sock *sk, int subclass);
+void lock_sock_nested(struct sock *sk, int subclass);
 
 static inline void lock_sock(struct sock *sk)
 {
        lock_sock_nested(sk, 0);
 }
 
-extern void release_sock(struct sock *sk);
+void release_sock(struct sock *sk);
 
 /* BH context may only use the following locking interface. */
 #define bh_lock_sock(__sk)     spin_lock(&((__sk)->sk_lock.slock))
@@ -1494,7 +1511,7 @@ extern void release_sock(struct sock *sk);
                                SINGLE_DEPTH_NESTING)
 #define bh_unlock_sock(__sk)   spin_unlock(&((__sk)->sk_lock.slock))
 
-extern bool lock_sock_fast(struct sock *sk);
+bool lock_sock_fast(struct sock *sk);
 /**
  * unlock_sock_fast - complement of lock_sock_fast
  * @sk: socket
@@ -1512,108 +1529,84 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
 }
 
 
-extern struct sock             *sk_alloc(struct net *net, int family,
-                                         gfp_t priority,
-                                         struct proto *prot);
-extern void                    sk_free(struct sock *sk);
-extern void                    sk_release_kernel(struct sock *sk);
-extern struct sock             *sk_clone_lock(const struct sock *sk,
-                                              const gfp_t priority);
-
-extern struct sk_buff          *sock_wmalloc(struct sock *sk,
-                                             unsigned long size, int force,
-                                             gfp_t priority);
-extern struct sk_buff          *sock_rmalloc(struct sock *sk,
-                                             unsigned long size, int force,
-                                             gfp_t priority);
-extern void                    sock_wfree(struct sk_buff *skb);
-extern void                    skb_orphan_partial(struct sk_buff *skb);
-extern void                    sock_rfree(struct sk_buff *skb);
-extern void                    sock_edemux(struct sk_buff *skb);
-
-extern int                     sock_setsockopt(struct socket *sock, int level,
-                                               int op, char __user *optval,
-                                               unsigned int optlen);
-
-extern int                     sock_getsockopt(struct socket *sock, int level,
-                                               int op, char __user *optval,
-                                               int __user *optlen);
-extern struct sk_buff          *sock_alloc_send_skb(struct sock *sk,
-                                                    unsigned long size,
-                                                    int noblock,
-                                                    int *errcode);
-extern struct sk_buff          *sock_alloc_send_pskb(struct sock *sk,
-                                                     unsigned long header_len,
-                                                     unsigned long data_len,
-                                                     int noblock,
-                                                     int *errcode,
-                                                     int max_page_order);
-extern void *sock_kmalloc(struct sock *sk, int size,
-                         gfp_t priority);
-extern void sock_kfree_s(struct sock *sk, void *mem, int size);
-extern void sk_send_sigurg(struct sock *sk);
+struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
+                     struct proto *prot);
+void sk_free(struct sock *sk);
+void sk_release_kernel(struct sock *sk);
+struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority);
+
+struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
+                            gfp_t priority);
+struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
+                            gfp_t priority);
+void sock_wfree(struct sk_buff *skb);
+void skb_orphan_partial(struct sk_buff *skb);
+void sock_rfree(struct sk_buff *skb);
+void sock_edemux(struct sk_buff *skb);
+
+int sock_setsockopt(struct socket *sock, int level, int op,
+                   char __user *optval, unsigned int optlen);
+
+int sock_getsockopt(struct socket *sock, int level, int op,
+                   char __user *optval, int __user *optlen);
+struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
+                                   int noblock, int *errcode);
+struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
+                                    unsigned long data_len, int noblock,
+                                    int *errcode, int max_page_order);
+void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
+void sock_kfree_s(struct sock *sk, void *mem, int size);
+void sk_send_sigurg(struct sock *sk);
 
 /*
  * Functions to fill in entries in struct proto_ops when a protocol
  * does not implement a particular function.
  */
-extern int                      sock_no_bind(struct socket *,
-                                            struct sockaddr *, int);
-extern int                      sock_no_connect(struct socket *,
-                                               struct sockaddr *, int, int);
-extern int                      sock_no_socketpair(struct socket *,
-                                                  struct socket *);
-extern int                      sock_no_accept(struct socket *,
-                                              struct socket *, int);
-extern int                      sock_no_getname(struct socket *,
-                                               struct sockaddr *, int *, int);
-extern unsigned int             sock_no_poll(struct file *, struct socket *,
-                                            struct poll_table_struct *);
-extern int                      sock_no_ioctl(struct socket *, unsigned int,
-                                             unsigned long);
-extern int                     sock_no_listen(struct socket *, int);
-extern int                      sock_no_shutdown(struct socket *, int);
-extern int                     sock_no_getsockopt(struct socket *, int , int,
-                                                  char __user *, int __user *);
-extern int                     sock_no_setsockopt(struct socket *, int, int,
-                                                  char __user *, unsigned int);
-extern int                      sock_no_sendmsg(struct kiocb *, struct socket *,
-                                               struct msghdr *, size_t);
-extern int                      sock_no_recvmsg(struct kiocb *, struct socket *,
-                                               struct msghdr *, size_t, int);
-extern int                     sock_no_mmap(struct file *file,
-                                            struct socket *sock,
-                                            struct vm_area_struct *vma);
-extern ssize_t                 sock_no_sendpage(struct socket *sock,
-                                               struct page *page,
-                                               int offset, size_t size,
-                                               int flags);
+int sock_no_bind(struct socket *, struct sockaddr *, int);
+int sock_no_connect(struct socket *, struct sockaddr *, int, int);
+int sock_no_socketpair(struct socket *, struct socket *);
+int sock_no_accept(struct socket *, struct socket *, int);
+int sock_no_getname(struct socket *, struct sockaddr *, int *, int);
+unsigned int sock_no_poll(struct file *, struct socket *,
+                         struct poll_table_struct *);
+int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
+int sock_no_listen(struct socket *, int);
+int sock_no_shutdown(struct socket *, int);
+int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
+int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
+int sock_no_sendmsg(struct kiocb *, struct socket *, struct msghdr *, size_t);
+int sock_no_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t,
+                   int);
+int sock_no_mmap(struct file *file, struct socket *sock,
+                struct vm_area_struct *vma);
+ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset,
+                        size_t size, int flags);
 
 /*
  * Functions to fill in entries in struct proto_ops when a protocol
  * uses the inet style.
  */
-extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
+int sock_common_getsockopt(struct socket *sock, int level, int optname,
                                  char __user *optval, int __user *optlen);
-extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
+int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
                               struct msghdr *msg, size_t size, int flags);
-extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
+int sock_common_setsockopt(struct socket *sock, int level, int optname,
                                  char __user *optval, unsigned int optlen);
-extern int compat_sock_common_getsockopt(struct socket *sock, int level,
+int compat_sock_common_getsockopt(struct socket *sock, int level,
                int optname, char __user *optval, int __user *optlen);
-extern int compat_sock_common_setsockopt(struct socket *sock, int level,
+int compat_sock_common_setsockopt(struct socket *sock, int level,
                int optname, char __user *optval, unsigned int optlen);
 
-extern void sk_common_release(struct sock *sk);
+void sk_common_release(struct sock *sk);
 
 /*
  *     Default socket callbacks and setup code
  */
 
 /* Initialise core socket variables */
-extern void sock_init_data(struct socket *sock, struct sock *sk);
+void sock_init_data(struct socket *sock, struct sock *sk);
 
-extern void sk_filter_release_rcu(struct rcu_head *rcu);
+void sk_filter_release_rcu(struct rcu_head *rcu);
 
 /**
  *     sk_filter_release - release a socket filter
@@ -1630,16 +1623,14 @@ static inline void sk_filter_release(struct sk_filter *fp)
 
 static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
 {
-       unsigned int size = sk_filter_len(fp);
-
-       atomic_sub(size, &sk->sk_omem_alloc);
+       atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
        sk_filter_release(fp);
 }
 
 static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
 {
        atomic_inc(&fp->refcnt);
-       atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
+       atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
 }
 
 /*
@@ -1673,9 +1664,12 @@ static inline void sock_put(struct sock *sk)
        if (atomic_dec_and_test(&sk->sk_refcnt))
                sk_free(sk);
 }
+/* Generic version of sock_put(), dealing with all sockets
+ * (TCP_TIMEWAIT, ESTABLISHED...)
+ */
+void sock_gen_put(struct sock *sk);
 
-extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
-                         const int nested);
+int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested);
 
 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
 {
@@ -1729,8 +1723,8 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
        write_unlock_bh(&sk->sk_callback_lock);
 }
 
-extern kuid_t sock_i_uid(struct sock *sk);
-extern unsigned long sock_i_ino(struct sock *sk);
+kuid_t sock_i_uid(struct sock *sk);
+unsigned long sock_i_ino(struct sock *sk);
 
 static inline struct dst_entry *
 __sk_dst_get(struct sock *sk)
@@ -1752,8 +1746,6 @@ sk_dst_get(struct sock *sk)
        return dst;
 }
 
-extern void sk_reset_txq(struct sock *sk);
-
 static inline void dst_negative_advice(struct sock *sk)
 {
        struct dst_entry *ndst, *dst = __sk_dst_get(sk);
@@ -1763,7 +1755,7 @@ static inline void dst_negative_advice(struct sock *sk)
 
                if (ndst != dst) {
                        rcu_assign_pointer(sk->sk_dst_cache, ndst);
-                       sk_reset_txq(sk);
+                       sk_tx_queue_clear(sk);
                }
        }
 }
@@ -1805,16 +1797,16 @@ sk_dst_reset(struct sock *sk)
        spin_unlock(&sk->sk_dst_lock);
 }
 
-extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
+struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
 
-extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
+struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
 
 static inline bool sk_can_gso(const struct sock *sk)
 {
        return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
 }
 
-extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
+void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
 
 static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
 {
@@ -2027,14 +2019,14 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
        sk_mem_charge(sk, skb->truesize);
 }
 
-extern void sk_reset_timer(struct sock *sk, struct timer_list *timer,
-                          unsigned long expires);
+void sk_reset_timer(struct sock *sk, struct timer_list *timer,
+                   unsigned long expires);
 
-extern void sk_stop_timer(struct sock *sk, struct timer_list *timer);
+void sk_stop_timer(struct sock *sk, struct timer_list *timer);
 
-extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
 
-extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
+int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
 
 /*
  *     Recover an error report and clear atomically
@@ -2102,7 +2094,7 @@ static inline struct page_frag *sk_page_frag(struct sock *sk)
        return &sk->sk_frag;
 }
 
-extern bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
+bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
 
 /*
  *     Default write policy as shown to user space via poll/select/SIGIO
@@ -2140,10 +2132,10 @@ static inline int sock_intr_errno(long timeo)
        return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
 }
 
-extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
-       struct sk_buff *skb);
-extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
-       struct sk_buff *skb);
+void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+                          struct sk_buff *skb);
+void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
+                            struct sk_buff *skb);
 
 static inline void
 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
@@ -2176,8 +2168,8 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
                __sock_recv_wifi_status(msg, sk, skb);
 }
 
-extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
-                                    struct sk_buff *skb);
+void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
+                             struct sk_buff *skb);
 
 static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
                                          struct sk_buff *skb)
@@ -2202,7 +2194,7 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
  *
  * Currently only depends on SOCK_TIMESTAMPING* flags.
  */
-extern void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
+void sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
 
 /**
  * sk_eat_skb - Release a skb if it is no longer needed
@@ -2266,11 +2258,11 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb)
        return NULL;
 }
 
-extern void sock_enable_timestamp(struct sock *sk, int flag);
-extern int sock_get_timestamp(struct sock *, struct timeval __user *);
-extern int sock_get_timestampns(struct sock *, struct timespec __user *);
-extern int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
-                             int level, int type);
+void sock_enable_timestamp(struct sock *sk, int flag);
+int sock_get_timestamp(struct sock *, struct timeval __user *);
+int sock_get_timestampns(struct sock *, struct timespec __user *);
+int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
+                      int type);
 
 /*
  *     Enable debug/info messages
index ad447f105417def47daff3e65f93de65d26f0917..3af174d70d9e8ca988122b75358703b99c0b4d4a 100644 (file)
@@ -8,7 +8,7 @@ struct stp_proto {
        void            *data;
 };
 
-extern int stp_proto_register(const struct stp_proto *proto);
-extern void stp_proto_unregister(const struct stp_proto *proto);
+int stp_proto_register(const struct stp_proto *proto);
+void stp_proto_unregister(const struct stp_proto *proto);
 
 #endif /* _NET_STP_H */
index b1aa324c5e6512791cbf3340aca34dacc73bdfdb..2d7b4bdc972ff38ac5a0a0f357c8c081c7d09014 100644 (file)
@@ -50,7 +50,7 @@
 extern struct inet_hashinfo tcp_hashinfo;
 
 extern struct percpu_counter tcp_orphan_count;
-extern void tcp_time_wait(struct sock *sk, int state, int timeo);
+void tcp_time_wait(struct sock *sk, int state, int timeo);
 
 #define MAX_TCP_HEADER (128 + MAX_HEADER)
 #define MAX_TCP_OPTION_SPACE 40
@@ -259,6 +259,7 @@ extern int sysctl_tcp_max_orphans;
 extern int sysctl_tcp_fack;
 extern int sysctl_tcp_reordering;
 extern int sysctl_tcp_dsack;
+extern long sysctl_tcp_mem[3];
 extern int sysctl_tcp_wmem[3];
 extern int sysctl_tcp_rmem[3];
 extern int sysctl_tcp_app_win;
@@ -325,7 +326,7 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
        return false;
 }
 
-extern bool tcp_check_oom(struct sock *sk, int shift);
+bool tcp_check_oom(struct sock *sk, int shift);
 
 /* syncookies: remember time of last synqueue overflow */
 static inline void tcp_synq_overflow(struct sock *sk)
@@ -348,38 +349,36 @@ extern struct proto tcp_prot;
 #define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
 #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
 
-extern void tcp_init_mem(struct net *net);
-
-extern void tcp_tasklet_init(void);
-
-extern void tcp_v4_err(struct sk_buff *skb, u32);
-
-extern void tcp_shutdown (struct sock *sk, int how);
-
-extern void tcp_v4_early_demux(struct sk_buff *skb);
-extern int tcp_v4_rcv(struct sk_buff *skb);
-
-extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
-extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                      size_t size);
-extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
-                       size_t size, int flags);
-extern void tcp_release_cb(struct sock *sk);
-extern void tcp_wfree(struct sk_buff *skb);
-extern void tcp_write_timer_handler(struct sock *sk);
-extern void tcp_delack_timer_handler(struct sock *sk);
-extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
-                                const struct tcphdr *th, unsigned int len);
-extern void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
-                               const struct tcphdr *th, unsigned int len);
-extern void tcp_rcv_space_adjust(struct sock *sk);
-extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
-extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
-extern void tcp_twsk_destructor(struct sock *sk);
-extern ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
-                              struct pipe_inode_info *pipe, size_t len,
-                              unsigned int flags);
+void tcp_tasklet_init(void);
+
+void tcp_v4_err(struct sk_buff *skb, u32);
+
+void tcp_shutdown(struct sock *sk, int how);
+
+void tcp_v4_early_demux(struct sk_buff *skb);
+int tcp_v4_rcv(struct sk_buff *skb);
+
+int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
+int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+               size_t size);
+int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
+                int flags);
+void tcp_release_cb(struct sock *sk);
+void tcp_wfree(struct sk_buff *skb);
+void tcp_write_timer_handler(struct sock *sk);
+void tcp_delack_timer_handler(struct sock *sk);
+int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+                         const struct tcphdr *th, unsigned int len);
+void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+                        const struct tcphdr *th, unsigned int len);
+void tcp_rcv_space_adjust(struct sock *sk);
+void tcp_cleanup_rbuf(struct sock *sk, int copied);
+int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
+void tcp_twsk_destructor(struct sock *sk);
+ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
+                       struct pipe_inode_info *pipe, size_t len,
+                       unsigned int flags);
 
 static inline void tcp_dec_quickack_mode(struct sock *sk,
                                         const unsigned int pkts)
@@ -409,66 +408,65 @@ enum tcp_tw_status {
 };
 
 
-extern enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
-                                                    struct sk_buff *skb,
-                                                    const struct tcphdr *th);
-extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
-                                  struct request_sock *req,
-                                  struct request_sock **prev,
-                                  bool fastopen);
-extern int tcp_child_process(struct sock *parent, struct sock *child,
-                            struct sk_buff *skb);
-extern void tcp_enter_loss(struct sock *sk, int how);
-extern void tcp_clear_retrans(struct tcp_sock *tp);
-extern void tcp_update_metrics(struct sock *sk);
-extern void tcp_init_metrics(struct sock *sk);
-extern void tcp_metrics_init(void);
-extern bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check);
-extern bool tcp_remember_stamp(struct sock *sk);
-extern bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
-extern void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
-extern void tcp_disable_fack(struct tcp_sock *tp);
-extern void tcp_close(struct sock *sk, long timeout);
-extern void tcp_init_sock(struct sock *sk);
-extern unsigned int tcp_poll(struct file * file, struct socket *sock,
-                            struct poll_table_struct *wait);
-extern int tcp_getsockopt(struct sock *sk, int level, int optname,
+enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
+                                             struct sk_buff *skb,
+                                             const struct tcphdr *th);
+struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
+                          struct request_sock *req, struct request_sock **prev,
+                          bool fastopen);
+int tcp_child_process(struct sock *parent, struct sock *child,
+                     struct sk_buff *skb);
+void tcp_enter_loss(struct sock *sk, int how);
+void tcp_clear_retrans(struct tcp_sock *tp);
+void tcp_update_metrics(struct sock *sk);
+void tcp_init_metrics(struct sock *sk);
+void tcp_metrics_init(void);
+bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
+                       bool paws_check);
+bool tcp_remember_stamp(struct sock *sk);
+bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
+void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
+void tcp_disable_fack(struct tcp_sock *tp);
+void tcp_close(struct sock *sk, long timeout);
+void tcp_init_sock(struct sock *sk);
+unsigned int tcp_poll(struct file *file, struct socket *sock,
+                     struct poll_table_struct *wait);
+int tcp_getsockopt(struct sock *sk, int level, int optname,
+                  char __user *optval, int __user *optlen);
+int tcp_setsockopt(struct sock *sk, int level, int optname,
+                  char __user *optval, unsigned int optlen);
+int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
                          char __user *optval, int __user *optlen);
-extern int tcp_setsockopt(struct sock *sk, int level, int optname,
+int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
                          char __user *optval, unsigned int optlen);
-extern int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
-                                char __user *optval, int __user *optlen);
-extern int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
-                                char __user *optval, unsigned int optlen);
-extern void tcp_set_keepalive(struct sock *sk, int val);
-extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
-extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                      size_t len, int nonblock, int flags, int *addr_len);
-extern void tcp_parse_options(const struct sk_buff *skb,
-                             struct tcp_options_received *opt_rx,
-                             int estab, struct tcp_fastopen_cookie *foc);
-extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
+void tcp_set_keepalive(struct sock *sk, int val);
+void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
+int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+               size_t len, int nonblock, int flags, int *addr_len);
+void tcp_parse_options(const struct sk_buff *skb,
+                      struct tcp_options_received *opt_rx,
+                      int estab, struct tcp_fastopen_cookie *foc);
+const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
 
 /*
  *     TCP v4 functions exported for the inet6 API
  */
 
-extern void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
-extern int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
-extern struct sock * tcp_create_openreq_child(struct sock *sk,
-                                             struct request_sock *req,
-                                             struct sk_buff *skb);
-extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
-                                         struct request_sock *req,
-                                         struct dst_entry *dst);
-extern int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
-extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
-                         int addr_len);
-extern int tcp_connect(struct sock *sk);
-extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
-                                       struct request_sock *req,
-                                       struct tcp_fastopen_cookie *foc);
-extern int tcp_disconnect(struct sock *sk, int flags);
+void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
+int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
+struct sock *tcp_create_openreq_child(struct sock *sk,
+                                     struct request_sock *req,
+                                     struct sk_buff *skb);
+struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+                                 struct request_sock *req,
+                                 struct dst_entry *dst);
+int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
+int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+int tcp_connect(struct sock *sk);
+struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
+                               struct request_sock *req,
+                               struct tcp_fastopen_cookie *foc);
+int tcp_disconnect(struct sock *sk, int flags);
 
 void tcp_connect_init(struct sock *sk);
 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
@@ -476,16 +474,32 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
 
 /* From syncookies.c */
-extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
-extern int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
-                            u32 cookie);
-extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, 
-                                   struct ip_options *opt);
+int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
+                     u32 cookie);
+struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
+                            struct ip_options *opt);
 #ifdef CONFIG_SYN_COOKIES
-extern u32 __cookie_v4_init_sequence(const struct iphdr *iph,
-                                    const struct tcphdr *th, u16 *mssp);
-extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, 
-                                    __u16 *mss);
+#include <linux/ktime.h>
+
+/* Syncookies use a monotonic timer which increments every 64 seconds.
+ * This counter is used both as a hash input and partially encoded into
+ * the cookie value.  A cookie is only validated further if the delta
+ * between the current counter value and the encoded one is less than this,
+ * i.e. a sent cookie is valid only at most for 128 seconds (or less if
+ * the counter advances immediately after a cookie is generated).
+ */
+#define MAX_SYNCOOKIE_AGE 2
+
+static inline u32 tcp_cookie_time(void)
+{
+       struct timespec now;
+       getnstimeofday(&now);
+       return now.tv_sec >> 6; /* 64 seconds granularity */
+}
+
+u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
+                             u16 *mssp);
+__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mss);
 #else
 static inline __u32 cookie_v4_init_sequence(struct sock *sk,
                                            struct sk_buff *skb,
@@ -495,19 +509,19 @@ static inline __u32 cookie_v4_init_sequence(struct sock *sk,
 }
 #endif
 
-extern __u32 cookie_init_timestamp(struct request_sock *req);
-extern bool cookie_check_timestamp(struct tcp_options_received *opt,
-                               struct net *net, bool *ecn_ok);
+__u32 cookie_init_timestamp(struct request_sock *req);
+bool cookie_check_timestamp(struct tcp_options_received *opt, struct net *net,
+                           bool *ecn_ok);
 
 /* From net/ipv6/syncookies.c */
-extern int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
-                            u32 cookie);
-extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
+int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
+                     u32 cookie);
+struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
 #ifdef CONFIG_SYN_COOKIES
-extern u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
-                                    const struct tcphdr *th, u16 *mssp);
-extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
-                                    __u16 *mss);
+u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
+                             const struct tcphdr *th, u16 *mssp);
+__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
+                             __u16 *mss);
 #else
 static inline __u32 cookie_v6_init_sequence(struct sock *sk,
                                            struct sk_buff *skb,
@@ -518,47 +532,46 @@ static inline __u32 cookie_v6_init_sequence(struct sock *sk,
 #endif
 /* tcp_output.c */
 
-extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
-                                     int nonagle);
-extern bool tcp_may_send_now(struct sock *sk);
-extern int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
-extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
-extern void tcp_retransmit_timer(struct sock *sk);
-extern void tcp_xmit_retransmit_queue(struct sock *);
-extern void tcp_simple_retransmit(struct sock *);
-extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
-
-extern void tcp_send_probe0(struct sock *);
-extern void tcp_send_partial(struct sock *);
-extern int tcp_write_wakeup(struct sock *);
-extern void tcp_send_fin(struct sock *sk);
-extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
-extern int tcp_send_synack(struct sock *);
-extern bool tcp_syn_flood_action(struct sock *sk,
-                                const struct sk_buff *skb,
-                                const char *proto);
-extern void tcp_push_one(struct sock *, unsigned int mss_now);
-extern void tcp_send_ack(struct sock *sk);
-extern void tcp_send_delayed_ack(struct sock *sk);
-extern void tcp_send_loss_probe(struct sock *sk);
-extern bool tcp_schedule_loss_probe(struct sock *sk);
+void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
+                              int nonagle);
+bool tcp_may_send_now(struct sock *sk);
+int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
+int tcp_retransmit_skb(struct sock *, struct sk_buff *);
+void tcp_retransmit_timer(struct sock *sk);
+void tcp_xmit_retransmit_queue(struct sock *);
+void tcp_simple_retransmit(struct sock *);
+int tcp_trim_head(struct sock *, struct sk_buff *, u32);
+int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
+
+void tcp_send_probe0(struct sock *);
+void tcp_send_partial(struct sock *);
+int tcp_write_wakeup(struct sock *);
+void tcp_send_fin(struct sock *sk);
+void tcp_send_active_reset(struct sock *sk, gfp_t priority);
+int tcp_send_synack(struct sock *);
+bool tcp_syn_flood_action(struct sock *sk, const struct sk_buff *skb,
+                         const char *proto);
+void tcp_push_one(struct sock *, unsigned int mss_now);
+void tcp_send_ack(struct sock *sk);
+void tcp_send_delayed_ack(struct sock *sk);
+void tcp_send_loss_probe(struct sock *sk);
+bool tcp_schedule_loss_probe(struct sock *sk);
 
 /* tcp_input.c */
-extern void tcp_cwnd_application_limited(struct sock *sk);
-extern void tcp_resume_early_retransmit(struct sock *sk);
-extern void tcp_rearm_rto(struct sock *sk);
-extern void tcp_reset(struct sock *sk);
+void tcp_cwnd_application_limited(struct sock *sk);
+void tcp_resume_early_retransmit(struct sock *sk);
+void tcp_rearm_rto(struct sock *sk);
+void tcp_reset(struct sock *sk);
 
 /* tcp_timer.c */
-extern void tcp_init_xmit_timers(struct sock *);
+void tcp_init_xmit_timers(struct sock *);
 static inline void tcp_clear_xmit_timers(struct sock *sk)
 {
        inet_csk_clear_xmit_timers(sk);
 }
 
-extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
-extern unsigned int tcp_current_mss(struct sock *sk);
+unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
+unsigned int tcp_current_mss(struct sock *sk);
 
 /* Bound MSS / TSO packet size with the half of the window */
 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
@@ -584,20 +597,20 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
 }
 
 /* tcp.c */
-extern void tcp_get_info(const struct sock *, struct tcp_info *);
+void tcp_get_info(const struct sock *, struct tcp_info *);
 
 /* Read 'sendfile()'-style from a TCP socket */
 typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
                                unsigned int, size_t);
-extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
-                        sk_read_actor_t recv_actor);
+int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
+                 sk_read_actor_t recv_actor);
 
-extern void tcp_initialize_rcv_mss(struct sock *sk);
+void tcp_initialize_rcv_mss(struct sock *sk);
 
-extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
-extern int tcp_mss_to_mtu(struct sock *sk, int mss);
-extern void tcp_mtup_init(struct sock *sk);
-extern void tcp_init_buffer_space(struct sock *sk);
+int tcp_mtu_to_mss(struct sock *sk, int pmtu);
+int tcp_mss_to_mtu(struct sock *sk, int mss);
+void tcp_mtup_init(struct sock *sk);
+void tcp_init_buffer_space(struct sock *sk);
 
 static inline void tcp_bound_rto(const struct sock *sk)
 {
@@ -610,7 +623,7 @@ static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
        return (tp->srtt >> 3) + tp->rttvar;
 }
 
-extern void tcp_set_rto(struct sock *sk);
+void tcp_set_rto(struct sock *sk);
 
 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
 {
@@ -663,7 +676,7 @@ static inline u32 tcp_receive_window(const struct tcp_sock *tp)
  * scaling applied to the result.  The caller does these things
  * if necessary.  This is a "raw" window selection.
  */
-extern u32 __tcp_select_window(struct sock *sk);
+u32 __tcp_select_window(struct sock *sk);
 
 void tcp_send_window_probe(struct sock *sk);
 
@@ -800,24 +813,24 @@ struct tcp_congestion_ops {
        struct module   *owner;
 };
 
-extern int tcp_register_congestion_control(struct tcp_congestion_ops *type);
-extern void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
+int tcp_register_congestion_control(struct tcp_congestion_ops *type);
+void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
 
-extern void tcp_init_congestion_control(struct sock *sk);
-extern void tcp_cleanup_congestion_control(struct sock *sk);
-extern int tcp_set_default_congestion_control(const char *name);
-extern void tcp_get_default_congestion_control(char *name);
-extern void tcp_get_available_congestion_control(char *buf, size_t len);
-extern void tcp_get_allowed_congestion_control(char *buf, size_t len);
-extern int tcp_set_allowed_congestion_control(char *allowed);
-extern int tcp_set_congestion_control(struct sock *sk, const char *name);
-extern void tcp_slow_start(struct tcp_sock *tp);
-extern void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
+void tcp_init_congestion_control(struct sock *sk);
+void tcp_cleanup_congestion_control(struct sock *sk);
+int tcp_set_default_congestion_control(const char *name);
+void tcp_get_default_congestion_control(char *name);
+void tcp_get_available_congestion_control(char *buf, size_t len);
+void tcp_get_allowed_congestion_control(char *buf, size_t len);
+int tcp_set_allowed_congestion_control(char *allowed);
+int tcp_set_congestion_control(struct sock *sk, const char *name);
+void tcp_slow_start(struct tcp_sock *tp);
+void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
 
 extern struct tcp_congestion_ops tcp_init_congestion_ops;
-extern u32 tcp_reno_ssthresh(struct sock *sk);
-extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
-extern u32 tcp_reno_min_cwnd(const struct sock *sk);
+u32 tcp_reno_ssthresh(struct sock *sk);
+void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight);
+u32 tcp_reno_min_cwnd(const struct sock *sk);
 extern struct tcp_congestion_ops tcp_reno;
 
 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
@@ -936,8 +949,8 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
 /* Use define here intentionally to get WARN_ON location shown at the caller */
 #define tcp_verify_left_out(tp)        WARN_ON(tcp_left_out(tp) > tp->packets_out)
 
-extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
-extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
+void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
+__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
 
 /* The maximum number of MSS of available cwnd for which TSO defers
  * sending if not using sysctl_tcp_tso_win_divisor.
@@ -963,7 +976,7 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
 {
        return tp->snd_una + tp->snd_wnd;
 }
-extern bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
+bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight);
 
 static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
                                       const struct sk_buff *skb)
@@ -1028,7 +1041,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
 #endif
 }
 
-extern bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
+bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
 
 #undef STATE_TRACE
 
@@ -1039,9 +1052,9 @@ static const char *statename[]={
        "Close Wait","Last ACK","Listen","Closing"
 };
 #endif
-extern void tcp_set_state(struct sock *sk, int state);
+void tcp_set_state(struct sock *sk, int state);
 
-extern void tcp_done(struct sock *sk);
+void tcp_done(struct sock *sk);
 
 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
 {
@@ -1049,13 +1062,12 @@ static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
        rx_opt->num_sacks = 0;
 }
 
-extern u32 tcp_default_init_rwnd(u32 mss);
+u32 tcp_default_init_rwnd(u32 mss);
 
 /* Determine a window scaling and initial window to offer. */
-extern void tcp_select_initial_window(int __space, __u32 mss,
-                                     __u32 *rcv_wnd, __u32 *window_clamp,
-                                     int wscale_ok, __u8 *rcv_wscale,
-                                     __u32 init_rcv_wnd);
+void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
+                              __u32 *window_clamp, int wscale_ok,
+                              __u8 *rcv_wscale, __u32 init_rcv_wnd);
 
 static inline int tcp_win_from_space(int space)
 {
@@ -1095,11 +1107,11 @@ static inline void tcp_openreq_init(struct request_sock *req,
        ireq->wscale_ok = rx_opt->wscale_ok;
        ireq->acked = 0;
        ireq->ecn_ok = 0;
-       ireq->rmt_port = tcp_hdr(skb)->source;
-       ireq->loc_port = tcp_hdr(skb)->dest;
+       ireq->ir_rmt_port = tcp_hdr(skb)->source;
+       ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
 }
 
-extern void tcp_enter_memory_pressure(struct sock *sk);
+void tcp_enter_memory_pressure(struct sock *sk);
 
 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
 {
@@ -1252,21 +1264,20 @@ struct tcp_md5sig_pool {
 };
 
 /* - functions */
-extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
-                              const struct sock *sk,
-                              const struct request_sock *req,
-                              const struct sk_buff *skb);
-extern int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
-                         int family, const u8 *newkey,
-                         u8 newkeylen, gfp_t gfp);
-extern int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
-                         int family);
-extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
+int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
+                       const struct sock *sk, const struct request_sock *req,
+                       const struct sk_buff *skb);
+int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
+                  int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
+int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
+                  int family);
+struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
                                         struct sock *addr_sk);
 
 #ifdef CONFIG_TCP_MD5SIG
-extern struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
-                       const union tcp_md5_addr *addr, int family);
+struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
+                                        const union tcp_md5_addr *addr,
+                                        int family);
 #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
 #else
 static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
@@ -1278,27 +1289,26 @@ static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
 #define tcp_twsk_md5_key(twsk) NULL
 #endif
 
-extern bool tcp_alloc_md5sig_pool(void);
+bool tcp_alloc_md5sig_pool(void);
 
-extern struct tcp_md5sig_pool  *tcp_get_md5sig_pool(void);
+struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
 static inline void tcp_put_md5sig_pool(void)
 {
        local_bh_enable();
 }
 
-extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
-extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
-                                unsigned int header_len);
-extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
-                           const struct tcp_md5sig_key *key);
+int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
+int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
+                         unsigned int header_len);
+int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
+                    const struct tcp_md5sig_key *key);
 
 /* From tcp_fastopen.c */
-extern void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
-                                  struct tcp_fastopen_cookie *cookie,
-                                  int *syn_loss, unsigned long *last_syn_loss);
-extern void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
-                                  struct tcp_fastopen_cookie *cookie,
-                                  bool syn_lost);
+void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
+                           struct tcp_fastopen_cookie *cookie, int *syn_loss,
+                           unsigned long *last_syn_loss);
+void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
+                           struct tcp_fastopen_cookie *cookie, bool syn_lost);
 struct tcp_fastopen_request {
        /* Fast Open cookie. Size 0 means a cookie request */
        struct tcp_fastopen_cookie      cookie;
@@ -1309,9 +1319,9 @@ void tcp_free_fastopen_req(struct tcp_sock *tp);
 
 extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
 int tcp_fastopen_reset_cipher(void *key, unsigned int len);
-extern void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
-                                   struct tcp_fastopen_cookie *foc);
-
+void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
+                            struct tcp_fastopen_cookie *foc);
+void tcp_fastopen_init_key_once(bool publish);
 #define TCP_FASTOPEN_KEY_LENGTH 16
 
 /* Fastopen key context */
@@ -1507,7 +1517,6 @@ enum tcp_seq_states {
        TCP_SEQ_STATE_LISTENING,
        TCP_SEQ_STATE_OPENREQ,
        TCP_SEQ_STATE_ESTABLISHED,
-       TCP_SEQ_STATE_TIME_WAIT,
 };
 
 int tcp_seq_open(struct inode *inode, struct file *file);
@@ -1529,22 +1538,20 @@ struct tcp_iter_state {
        loff_t                  last_pos;
 };
 
-extern int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
-extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
+int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
+void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
 
 extern struct request_sock_ops tcp_request_sock_ops;
 extern struct request_sock_ops tcp6_request_sock_ops;
 
-extern void tcp_v4_destroy_sock(struct sock *sk);
+void tcp_v4_destroy_sock(struct sock *sk);
 
-extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
-                                      netdev_features_t features);
-extern struct sk_buff **tcp_gro_receive(struct sk_buff **head,
-                                       struct sk_buff *skb);
-extern int tcp_gro_complete(struct sk_buff *skb);
+struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
+                               netdev_features_t features);
+struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
+int tcp_gro_complete(struct sk_buff *skb);
 
-extern void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr,
-                               __be32 daddr);
+void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
 
 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
 {
@@ -1560,8 +1567,8 @@ static inline bool tcp_stream_memory_free(const struct sock *sk)
 }
 
 #ifdef CONFIG_PROC_FS
-extern int tcp4_proc_init(void);
-extern void tcp4_proc_exit(void);
+int tcp4_proc_init(void);
+void tcp4_proc_exit(void);
 #endif
 
 /* TCP af-specific functions */
@@ -1592,9 +1599,9 @@ struct tcp_request_sock_ops {
 #endif
 };
 
-extern int tcpv4_offload_init(void);
+int tcpv4_offload_init(void);
 
-extern void tcp_v4_init(void);
-extern void tcp_init(void);
+void tcp_v4_init(void);
+void tcp_init(void);
 
 #endif /* _TCP_H */
index 7df18bc43a97fccef8478436f8f43f3a5553b997..05b94d9453de8b036f1c275caa268b6f8113eac7 100644 (file)
@@ -1,19 +1,7 @@
 #ifndef _TCP_MEMCG_H
 #define _TCP_MEMCG_H
 
-struct tcp_memcontrol {
-       struct cg_proto cg_proto;
-       /* per-cgroup tcp memory pressure knobs */
-       struct res_counter tcp_memory_allocated;
-       struct percpu_counter tcp_sockets_allocated;
-       /* those two are read-mostly, leave them at the end */
-       long tcp_prot_mem[3];
-       int tcp_memory_pressure;
-};
-
 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg);
 int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss);
 void tcp_destroy_cgroup(struct mem_cgroup *memcg);
-unsigned long long tcp_max_memory(const struct mem_cgroup *memcg);
-void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx);
 #endif /* _TCP_MEMCG_H */
index ef2e0b7843a0036c1b31d8e67e78b624bf05f44f..fe4ba9f32429ab7c2fc327cafd29bafadeb3561b 100644 (file)
@@ -79,7 +79,7 @@ struct udp_table {
        unsigned int            log;
 };
 extern struct udp_table udp_table;
-extern void udp_table_init(struct udp_table *, const char *);
+void udp_table_init(struct udp_table *, const char *);
 static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
                                             struct net *net, unsigned int num)
 {
@@ -162,52 +162,53 @@ static inline void udp_lib_hash(struct sock *sk)
        BUG();
 }
 
-extern void udp_lib_unhash(struct sock *sk);
-extern void udp_lib_rehash(struct sock *sk, u16 new_hash);
+void udp_lib_unhash(struct sock *sk);
+void udp_lib_rehash(struct sock *sk, u16 new_hash);
 
 static inline void udp_lib_close(struct sock *sk, long timeout)
 {
        sk_common_release(sk);
 }
 
-extern int udp_lib_get_port(struct sock *sk, unsigned short snum,
-                           int (*)(const struct sock *,const struct sock *),
-                           unsigned int hash2_nulladdr);
+int udp_lib_get_port(struct sock *sk, unsigned short snum,
+                    int (*)(const struct sock *, const struct sock *),
+                    unsigned int hash2_nulladdr);
 
 /* net/ipv4/udp.c */
-extern int udp_get_port(struct sock *sk, unsigned short snum,
-                       int (*saddr_cmp)(const struct sock *,
-                                        const struct sock *));
-extern void udp_err(struct sk_buff *, u32);
-extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
-                           struct msghdr *msg, size_t len);
-extern int udp_push_pending_frames(struct sock *sk);
-extern void udp_flush_pending_frames(struct sock *sk);
-extern void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
-extern int udp_rcv(struct sk_buff *skb);
-extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-extern int udp_disconnect(struct sock *sk, int flags);
-extern unsigned int udp_poll(struct file *file, struct socket *sock,
-                            poll_table *wait);
-extern struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
-                                             netdev_features_t features);
-extern int udp_lib_getsockopt(struct sock *sk, int level, int optname,
-                             char __user *optval, int __user *optlen);
-extern int udp_lib_setsockopt(struct sock *sk, int level, int optname,
-                             char __user *optval, unsigned int optlen,
-                             int (*push_pending_frames)(struct sock *));
-extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
-                                   __be32 daddr, __be16 dport,
-                                   int dif);
-extern struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
-                                   __be32 daddr, __be16 dport,
-                                   int dif, struct udp_table *tbl);
-extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
-                                   const struct in6_addr *daddr, __be16 dport,
-                                   int dif);
-extern struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
-                                   const struct in6_addr *daddr, __be16 dport,
-                                   int dif, struct udp_table *tbl);
+void udp_v4_early_demux(struct sk_buff *skb);
+int udp_get_port(struct sock *sk, unsigned short snum,
+                int (*saddr_cmp)(const struct sock *,
+                                 const struct sock *));
+void udp_err(struct sk_buff *, u32);
+int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+               size_t len);
+int udp_push_pending_frames(struct sock *sk);
+void udp_flush_pending_frames(struct sock *sk);
+void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
+int udp_rcv(struct sk_buff *skb);
+int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int udp_disconnect(struct sock *sk, int flags);
+unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait);
+struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
+                                      netdev_features_t features);
+int udp_lib_getsockopt(struct sock *sk, int level, int optname,
+                      char __user *optval, int __user *optlen);
+int udp_lib_setsockopt(struct sock *sk, int level, int optname,
+                      char __user *optval, unsigned int optlen,
+                      int (*push_pending_frames)(struct sock *));
+struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+                            __be32 daddr, __be16 dport, int dif);
+struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
+                              __be32 daddr, __be16 dport, int dif,
+                              struct udp_table *tbl);
+struct sock *udp6_lib_lookup(struct net *net,
+                            const struct in6_addr *saddr, __be16 sport,
+                            const struct in6_addr *daddr, __be16 dport,
+                            int dif);
+struct sock *__udp6_lib_lookup(struct net *net,
+                              const struct in6_addr *saddr, __be16 sport,
+                              const struct in6_addr *daddr, __be16 dport,
+                              int dif, struct udp_table *tbl);
 
 /*
  *     SNMP statistics for UDP and UDP-Lite
@@ -259,19 +260,19 @@ struct udp_iter_state {
 };
 
 #ifdef CONFIG_PROC_FS
-extern int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo);
-extern void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo);
+int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo);
+void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo);
 
-extern int udp4_proc_init(void);
-extern void udp4_proc_exit(void);
+int udp4_proc_init(void);
+void udp4_proc_exit(void);
 #endif
 
-extern int udpv4_offload_init(void);
+int udpv4_offload_init(void);
 
-extern void udp_init(void);
+void udp_init(void);
 
-extern void udp_encap_enable(void);
+void udp_encap_enable(void);
 #if IS_ENABLED(CONFIG_IPV6)
-extern void udpv6_encap_enable(void);
+void udpv6_encap_enable(void);
 #endif
 #endif /* _UDP_H */
index 71375459a8843da6cbbf7062a0e71bf90a6ee716..2caadabcd07baf6552098082fcb78672f772dcb5 100644 (file)
@@ -126,7 +126,7 @@ static inline __wsum udplite_csum(struct sk_buff *skb)
        return skb_checksum(skb, off, len, 0);
 }
 
-extern void    udplite4_register(void);
-extern int     udplite_get_port(struct sock *sk, unsigned short snum,
-                       int (*scmp)(const struct sock *, const struct sock *));
+void udplite4_register(void);
+int udplite_get_port(struct sock *sk, unsigned short snum,
+                    int (*scmp)(const struct sock *, const struct sock *));
 #endif /* _UDPLITE_H */
index 4f6e7423174cd55890d7845326c8352f66146f04..345911965dbb8f53289d63535828969454ae3333 100644 (file)
@@ -6,13 +6,13 @@
 struct net;
 
 #ifdef CONFIG_WEXT_CORE
-extern int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
-                            void __user *arg);
-extern int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
-                                   unsigned long arg);
+int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
+                     void __user *arg);
+int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
+                            unsigned long arg);
 
-extern struct iw_statistics *get_wireless_stats(struct net_device *dev);
-extern int call_commit_handler(struct net_device *dev);
+struct iw_statistics *get_wireless_stats(struct net_device *dev);
+int call_commit_handler(struct net_device *dev);
 #else
 static inline int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd,
                                    void __user *arg)
@@ -27,8 +27,8 @@ static inline int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
 #endif
 
 #ifdef CONFIG_WEXT_PROC
-extern int wext_proc_init(struct net *net);
-extern void wext_proc_exit(struct net *net);
+int wext_proc_init(struct net *net);
+void wext_proc_exit(struct net *net);
 #else
 static inline int wext_proc_init(struct net *net)
 {
index bbb74f990cab784a5e9ffb4dc6b391977538cf49..98498e1daa060524af05c639fe74a951f2d307e8 100644 (file)
@@ -438,9 +438,9 @@ struct wimax_dev {
  *
  * These functions are not exported to user space.
  */
-extern void wimax_dev_init(struct wimax_dev *);
-extern int wimax_dev_add(struct wimax_dev *, struct net_device *);
-extern void wimax_dev_rm(struct wimax_dev *);
+void wimax_dev_init(struct wimax_dev *);
+int wimax_dev_add(struct wimax_dev *, struct net_device *);
+void wimax_dev_rm(struct wimax_dev *);
 
 static inline
 struct wimax_dev *net_dev_to_wimax(struct net_device *net_dev)
@@ -454,8 +454,8 @@ struct device *wimax_dev_to_dev(struct wimax_dev *wimax_dev)
        return wimax_dev->net_dev->dev.parent;
 }
 
-extern void wimax_state_change(struct wimax_dev *, enum wimax_st);
-extern enum wimax_st wimax_state_get(struct wimax_dev *);
+void wimax_state_change(struct wimax_dev *, enum wimax_st);
+enum wimax_st wimax_state_get(struct wimax_dev *);
 
 /*
  * Radio Switch state reporting.
@@ -463,8 +463,8 @@ extern enum wimax_st wimax_state_get(struct wimax_dev *);
  * enum wimax_rf_state is declared in linux/wimax.h so the exports
  * to user space can use it.
  */
-extern void wimax_report_rfkill_hw(struct wimax_dev *, enum wimax_rf_state);
-extern void wimax_report_rfkill_sw(struct wimax_dev *, enum wimax_rf_state);
+void wimax_report_rfkill_hw(struct wimax_dev *, enum wimax_rf_state);
+void wimax_report_rfkill_sw(struct wimax_dev *, enum wimax_rf_state);
 
 
 /*
@@ -490,15 +490,14 @@ extern void wimax_report_rfkill_sw(struct wimax_dev *, enum wimax_rf_state);
  * send diagnostics information that a device-specific diagnostics
  * tool would be interested in.
  */
-extern struct sk_buff *wimax_msg_alloc(struct wimax_dev *, const char *,
-                                      const void *, size_t, gfp_t);
-extern int wimax_msg_send(struct wimax_dev *, struct sk_buff *);
-extern int wimax_msg(struct wimax_dev *, const char *,
-                    const void *, size_t, gfp_t);
+struct sk_buff *wimax_msg_alloc(struct wimax_dev *, const char *, const void *,
+                               size_t, gfp_t);
+int wimax_msg_send(struct wimax_dev *, struct sk_buff *);
+int wimax_msg(struct wimax_dev *, const char *, const void *, size_t, gfp_t);
 
-extern const void *wimax_msg_data_len(struct sk_buff *, size_t *);
-extern const void *wimax_msg_data(struct sk_buff *);
-extern ssize_t wimax_msg_len(struct sk_buff *);
+const void *wimax_msg_data_len(struct sk_buff *, size_t *);
+const void *wimax_msg_data(struct sk_buff *);
+ssize_t wimax_msg_len(struct sk_buff *);
 
 
 /*
@@ -513,7 +512,7 @@ extern ssize_t wimax_msg_len(struct sk_buff *);
  * device's control structure and (as such) the 'struct wimax_dev' is
  * referenced by the caller.
  */
-extern int wimax_rfkill(struct wimax_dev *, enum wimax_rf_state);
-extern int wimax_reset(struct wimax_dev *);
+int wimax_rfkill(struct wimax_dev *, enum wimax_rf_state);
+int wimax_reset(struct wimax_dev *);
 
 #endif /* #ifndef __NET__WIMAX_H__ */
index b4a8a8923128fbc68a9aa0d6dd2fb5dd5524de32..c383aa4edbf0c27980ef7aad22ff160c72b41bbd 100644 (file)
@@ -187,57 +187,57 @@ extern int  sysctl_x25_clear_request_timeout;
 extern int  sysctl_x25_ack_holdback_timeout;
 extern int  sysctl_x25_forward;
 
-extern int x25_parse_address_block(struct sk_buff *skb,
-               struct x25_address *called_addr,
-               struct x25_address *calling_addr);
-
-extern int  x25_addr_ntoa(unsigned char *, struct x25_address *,
-                         struct x25_address *);
-extern int  x25_addr_aton(unsigned char *, struct x25_address *,
-                         struct x25_address *);
-extern struct sock *x25_find_socket(unsigned int, struct x25_neigh *);
-extern void x25_destroy_socket_from_timer(struct sock *);
-extern int  x25_rx_call_request(struct sk_buff *, struct x25_neigh *, unsigned int);
-extern void x25_kill_by_neigh(struct x25_neigh *);
+int x25_parse_address_block(struct sk_buff *skb,
+                           struct x25_address *called_addr,
+                           struct x25_address *calling_addr);
+
+int x25_addr_ntoa(unsigned char *, struct x25_address *, struct x25_address *);
+int x25_addr_aton(unsigned char *, struct x25_address *, struct x25_address *);
+struct sock *x25_find_socket(unsigned int, struct x25_neigh *);
+void x25_destroy_socket_from_timer(struct sock *);
+int x25_rx_call_request(struct sk_buff *, struct x25_neigh *, unsigned int);
+void x25_kill_by_neigh(struct x25_neigh *);
 
 /* x25_dev.c */
-extern void x25_send_frame(struct sk_buff *, struct x25_neigh *);
-extern int  x25_lapb_receive_frame(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
-extern void x25_establish_link(struct x25_neigh *);
-extern void x25_terminate_link(struct x25_neigh *);
+void x25_send_frame(struct sk_buff *, struct x25_neigh *);
+int x25_lapb_receive_frame(struct sk_buff *, struct net_device *,
+                          struct packet_type *, struct net_device *);
+void x25_establish_link(struct x25_neigh *);
+void x25_terminate_link(struct x25_neigh *);
 
 /* x25_facilities.c */
-extern int x25_parse_facilities(struct sk_buff *, struct x25_facilities *,
-                               struct x25_dte_facilities *, unsigned long *);
-extern int x25_create_facilities(unsigned char *, struct x25_facilities *,
-                               struct x25_dte_facilities *, unsigned long);
-extern int x25_negotiate_facilities(struct sk_buff *, struct sock *,
-                               struct x25_facilities *,
-                               struct x25_dte_facilities *);
-extern void x25_limit_facilities(struct x25_facilities *, struct x25_neigh *);
+int x25_parse_facilities(struct sk_buff *, struct x25_facilities *,
+                        struct x25_dte_facilities *, unsigned long *);
+int x25_create_facilities(unsigned char *, struct x25_facilities *,
+                         struct x25_dte_facilities *, unsigned long);
+int x25_negotiate_facilities(struct sk_buff *, struct sock *,
+                            struct x25_facilities *,
+                            struct x25_dte_facilities *);
+void x25_limit_facilities(struct x25_facilities *, struct x25_neigh *);
 
 /* x25_forward.c */
-extern void x25_clear_forward_by_lci(unsigned int lci);
-extern void x25_clear_forward_by_dev(struct net_device *);
-extern int x25_forward_data(int, struct x25_neigh *, struct sk_buff *);
-extern int x25_forward_call(struct x25_address *, struct x25_neigh *,
-                               struct sk_buff *, int);
+void x25_clear_forward_by_lci(unsigned int lci);
+void x25_clear_forward_by_dev(struct net_device *);
+int x25_forward_data(int, struct x25_neigh *, struct sk_buff *);
+int x25_forward_call(struct x25_address *, struct x25_neigh *, struct sk_buff *,
+                    int);
 
 /* x25_in.c */
-extern int  x25_process_rx_frame(struct sock *, struct sk_buff *);
-extern int  x25_backlog_rcv(struct sock *, struct sk_buff *);
+int x25_process_rx_frame(struct sock *, struct sk_buff *);
+int x25_backlog_rcv(struct sock *, struct sk_buff *);
 
 /* x25_link.c */
-extern void x25_link_control(struct sk_buff *, struct x25_neigh *, unsigned short);
-extern void x25_link_device_up(struct net_device *);
-extern void x25_link_device_down(struct net_device *);
-extern void x25_link_established(struct x25_neigh *);
-extern void x25_link_terminated(struct x25_neigh *);
-extern void x25_transmit_clear_request(struct x25_neigh *, unsigned int, unsigned char);
-extern void x25_transmit_link(struct sk_buff *, struct x25_neigh *);
-extern int  x25_subscr_ioctl(unsigned int, void __user *);
-extern struct x25_neigh *x25_get_neigh(struct net_device *);
-extern void x25_link_free(void);
+void x25_link_control(struct sk_buff *, struct x25_neigh *, unsigned short);
+void x25_link_device_up(struct net_device *);
+void x25_link_device_down(struct net_device *);
+void x25_link_established(struct x25_neigh *);
+void x25_link_terminated(struct x25_neigh *);
+void x25_transmit_clear_request(struct x25_neigh *, unsigned int,
+                               unsigned char);
+void x25_transmit_link(struct sk_buff *, struct x25_neigh *);
+int x25_subscr_ioctl(unsigned int, void __user *);
+struct x25_neigh *x25_get_neigh(struct net_device *);
+void x25_link_free(void);
 
 /* x25_neigh.c */
 static __inline__ void x25_neigh_hold(struct x25_neigh *nb)
@@ -252,16 +252,16 @@ static __inline__ void x25_neigh_put(struct x25_neigh *nb)
 }
 
 /* x25_out.c */
-extern  int x25_output(struct sock *, struct sk_buff *);
-extern void x25_kick(struct sock *);
-extern void x25_enquiry_response(struct sock *);
+int x25_output(struct sock *, struct sk_buff *);
+void x25_kick(struct sock *);
+void x25_enquiry_response(struct sock *);
 
 /* x25_route.c */
-extern struct x25_route *x25_get_route(struct x25_address *addr);
-extern struct net_device *x25_dev_get(char *);
-extern void x25_route_device_down(struct net_device *dev);
-extern int  x25_route_ioctl(unsigned int, void __user *);
-extern void x25_route_free(void);
+struct x25_route *x25_get_route(struct x25_address *addr);
+struct net_device *x25_dev_get(char *);
+void x25_route_device_down(struct net_device *dev);
+int x25_route_ioctl(unsigned int, void __user *);
+void x25_route_free(void);
 
 static __inline__ void x25_route_hold(struct x25_route *rt)
 {
@@ -275,30 +275,31 @@ static __inline__ void x25_route_put(struct x25_route *rt)
 }
 
 /* x25_subr.c */
-extern void x25_clear_queues(struct sock *);
-extern void x25_frames_acked(struct sock *, unsigned short);
-extern void x25_requeue_frames(struct sock *);
-extern int  x25_validate_nr(struct sock *, unsigned short);
-extern void x25_write_internal(struct sock *, int);
-extern int  x25_decode(struct sock *, struct sk_buff *, int *, int *, int *, int *, int *);
-extern void x25_disconnect(struct sock *, int, unsigned char, unsigned char);
+void x25_clear_queues(struct sock *);
+void x25_frames_acked(struct sock *, unsigned short);
+void x25_requeue_frames(struct sock *);
+int x25_validate_nr(struct sock *, unsigned short);
+void x25_write_internal(struct sock *, int);
+int x25_decode(struct sock *, struct sk_buff *, int *, int *, int *, int *,
+              int *);
+void x25_disconnect(struct sock *, int, unsigned char, unsigned char);
 
 /* x25_timer.c */
-extern void x25_init_timers(struct sock *sk);
-extern void x25_start_heartbeat(struct sock *);
-extern void x25_start_t2timer(struct sock *);
-extern void x25_start_t21timer(struct sock *);
-extern void x25_start_t22timer(struct sock *);
-extern void x25_start_t23timer(struct sock *);
-extern void x25_stop_heartbeat(struct sock *);
-extern void x25_stop_timer(struct sock *);
-extern unsigned long x25_display_timer(struct sock *);
-extern void x25_check_rbuf(struct sock *);
+void x25_init_timers(struct sock *sk);
+void x25_start_heartbeat(struct sock *);
+void x25_start_t2timer(struct sock *);
+void x25_start_t21timer(struct sock *);
+void x25_start_t22timer(struct sock *);
+void x25_start_t23timer(struct sock *);
+void x25_stop_heartbeat(struct sock *);
+void x25_stop_timer(struct sock *);
+unsigned long x25_display_timer(struct sock *);
+void x25_check_rbuf(struct sock *);
 
 /* sysctl_net_x25.c */
 #ifdef CONFIG_SYSCTL
-extern void x25_register_sysctl(void);
-extern void x25_unregister_sysctl(void);
+void x25_register_sysctl(void);
+void x25_unregister_sysctl(void);
 #else
 static inline void x25_register_sysctl(void) {};
 static inline void x25_unregister_sysctl(void) {};
@@ -318,6 +319,6 @@ extern rwlock_t x25_forward_list_lock;
 extern struct list_head x25_neigh_list;
 extern rwlock_t x25_neigh_list_lock;
 
-extern int x25_proc_init(void);
-extern void x25_proc_exit(void);
+int x25_proc_init(void);
+void x25_proc_exit(void);
 #endif
index e253bf0cc7ef005721de3b7e8729cfabce52854f..6b82fdf4ba716898ea53fb0cf2e66690a66d1479 100644 (file)
@@ -307,15 +307,17 @@ struct xfrm_policy_afinfo {
        struct dst_entry        *(*blackhole_route)(struct net *net, struct dst_entry *orig);
 };
 
-extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
-extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
-extern void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c);
-extern void km_state_notify(struct xfrm_state *x, const struct km_event *c);
+int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
+int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
+void km_policy_notify(struct xfrm_policy *xp, int dir,
+                     const struct km_event *c);
+void km_state_notify(struct xfrm_state *x, const struct km_event *c);
 
 struct xfrm_tmpl;
-extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
-extern void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
-extern int __xfrm_state_delete(struct xfrm_state *x);
+int km_query(struct xfrm_state *x, struct xfrm_tmpl *t,
+            struct xfrm_policy *pol);
+void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
+int __xfrm_state_delete(struct xfrm_state *x);
 
 struct xfrm_state_afinfo {
        unsigned int            family;
@@ -344,12 +346,12 @@ struct xfrm_state_afinfo {
        void                    (*local_error)(struct sk_buff *skb, u32 mtu);
 };
 
-extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
-extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
-extern struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
-extern void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
+int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
+int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
+struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
+void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
 
-extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
+void xfrm_state_delete_tunnel(struct xfrm_state *x);
 
 struct xfrm_type {
        char                    *description;
@@ -372,8 +374,8 @@ struct xfrm_type {
        u32                     (*get_mtu)(struct xfrm_state *, int size);
 };
 
-extern int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
-extern int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
+int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
+int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
 
 struct xfrm_mode {
        /*
@@ -434,8 +436,8 @@ enum {
        XFRM_MODE_FLAG_TUNNEL = 1,
 };
 
-extern int xfrm_register_mode(struct xfrm_mode *mode, int family);
-extern int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
+int xfrm_register_mode(struct xfrm_mode *mode, int family);
+int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
 
 static inline int xfrm_af2proto(unsigned int family)
 {
@@ -595,8 +597,8 @@ struct xfrm_mgr {
                                           const struct xfrm_kmaddress *k);
 };
 
-extern int xfrm_register_km(struct xfrm_mgr *km);
-extern int xfrm_unregister_km(struct xfrm_mgr *km);
+int xfrm_register_km(struct xfrm_mgr *km);
+int xfrm_unregister_km(struct xfrm_mgr *km);
 
 /*
  * This structure is used for the duration where packets are being
@@ -713,23 +715,23 @@ static inline void xfrm_audit_helper_usrinfo(kuid_t auid, u32 ses, u32 secid,
                audit_log_task_context(audit_buf);
 }
 
-extern void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
-                                 kuid_t auid, u32 ses, u32 secid);
-extern void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
-                                 kuid_t auid, u32 ses, u32 secid);
-extern void xfrm_audit_state_add(struct xfrm_state *x, int result,
-                                kuid_t auid, u32 ses, u32 secid);
-extern void xfrm_audit_state_delete(struct xfrm_state *x, int result,
-                                   kuid_t auid, u32 ses, u32 secid);
-extern void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
-                                            struct sk_buff *skb);
-extern void xfrm_audit_state_replay(struct xfrm_state *x,
-                                   struct sk_buff *skb, __be32 net_seq);
-extern void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
-extern void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
-                                     __be32 net_spi, __be32 net_seq);
-extern void xfrm_audit_state_icvfail(struct xfrm_state *x,
-                                    struct sk_buff *skb, u8 proto);
+void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, kuid_t auid,
+                          u32 ses, u32 secid);
+void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, kuid_t auid,
+                             u32 ses, u32 secid);
+void xfrm_audit_state_add(struct xfrm_state *x, int result, kuid_t auid,
+                         u32 ses, u32 secid);
+void xfrm_audit_state_delete(struct xfrm_state *x, int result, kuid_t auid,
+                            u32 ses, u32 secid);
+void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
+                                     struct sk_buff *skb);
+void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
+                            __be32 net_seq);
+void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
+void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi,
+                              __be32 net_seq);
+void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
+                             u8 proto);
 #else
 
 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
@@ -784,7 +786,7 @@ static inline void xfrm_pol_hold(struct xfrm_policy *policy)
                atomic_inc(&policy->refcnt);
 }
 
-extern void xfrm_policy_destroy(struct xfrm_policy *policy);
+void xfrm_policy_destroy(struct xfrm_policy *policy);
 
 static inline void xfrm_pol_put(struct xfrm_policy *policy)
 {
@@ -799,7 +801,7 @@ static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
                xfrm_pol_put(pols[i]);
 }
 
-extern void __xfrm_state_destroy(struct xfrm_state *);
+void __xfrm_state_destroy(struct xfrm_state *);
 
 static inline void __xfrm_state_put(struct xfrm_state *x)
 {
@@ -903,9 +905,8 @@ __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
        return port;
 }
 
-extern bool xfrm_selector_match(const struct xfrm_selector *sel,
-                               const struct flowi *fl,
-                               unsigned short family);
+bool xfrm_selector_match(const struct xfrm_selector *sel,
+                        const struct flowi *fl, unsigned short family);
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
 /*     If neither has a context --> match
@@ -975,7 +976,7 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
 }
 #endif
 
-extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
+void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
 
 struct sec_path {
        atomic_t                refcnt;
@@ -1000,7 +1001,7 @@ secpath_get(struct sec_path *sp)
        return sp;
 }
 
-extern void __secpath_destroy(struct sec_path *sp);
+void __secpath_destroy(struct sec_path *sp);
 
 static inline void
 secpath_put(struct sec_path *sp)
@@ -1009,7 +1010,7 @@ secpath_put(struct sec_path *sp)
                __secpath_destroy(sp);
 }
 
-extern struct sec_path *secpath_dup(struct sec_path *src);
+struct sec_path *secpath_dup(struct sec_path *src);
 
 static inline void
 secpath_reset(struct sk_buff *skb)
@@ -1059,7 +1060,8 @@ xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, un
 }
 
 #ifdef CONFIG_XFRM
-extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family);
+int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
+                       unsigned short family);
 
 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
                                       struct sk_buff *skb,
@@ -1103,8 +1105,8 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
        return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
 }
 
-extern int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
-                                unsigned int family, int reverse);
+int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
+                         unsigned int family, int reverse);
 
 static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
                                      unsigned int family)
@@ -1119,7 +1121,7 @@ static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
        return __xfrm_decode_session(skb, fl, family, 1);
 }
 
-extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
+int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
 
 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
 {
@@ -1140,7 +1142,7 @@ static inline int xfrm6_route_forward(struct sk_buff *skb)
        return xfrm_route_forward(skb, AF_INET6);
 }
 
-extern int __xfrm_sk_clone_policy(struct sock *sk);
+int __xfrm_sk_clone_policy(struct sock *sk);
 
 static inline int xfrm_sk_clone_policy(struct sock *sk)
 {
@@ -1149,7 +1151,7 @@ static inline int xfrm_sk_clone_policy(struct sock *sk)
        return 0;
 }
 
-extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
+int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
 
 static inline void xfrm_sk_free_policy(struct sock *sk)
 {
@@ -1163,7 +1165,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
        }
 }
 
-extern void xfrm_garbage_collect(struct net *net);
+void xfrm_garbage_collect(struct net *net);
 
 #else
 
@@ -1355,6 +1357,12 @@ struct xfrm_tunnel {
        int priority;
 };
 
+struct xfrm_tunnel_notifier {
+       int (*handler)(struct sk_buff *skb);
+       struct xfrm_tunnel_notifier __rcu *next;
+       int priority;
+};
+
 struct xfrm6_tunnel {
        int (*handler)(struct sk_buff *skb);
        int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
@@ -1363,16 +1371,16 @@ struct xfrm6_tunnel {
        int priority;
 };
 
-extern void xfrm_init(void);
-extern void xfrm4_init(void);
-extern int xfrm_state_init(struct net *net);
-extern void xfrm_state_fini(struct net *net);
-extern void xfrm4_state_init(void);
+void xfrm_init(void);
+void xfrm4_init(void);
+int xfrm_state_init(struct net *net);
+void xfrm_state_fini(struct net *net);
+void xfrm4_state_init(void);
 #ifdef CONFIG_XFRM
-extern int xfrm6_init(void);
-extern void xfrm6_fini(void);
-extern int xfrm6_state_init(void);
-extern void xfrm6_state_fini(void);
+int xfrm6_init(void);
+void xfrm6_fini(void);
+int xfrm6_state_init(void);
+void xfrm6_state_fini(void);
 #else
 static inline int xfrm6_init(void)
 {
@@ -1385,52 +1393,52 @@ static inline void xfrm6_fini(void)
 #endif
 
 #ifdef CONFIG_XFRM_STATISTICS
-extern int xfrm_proc_init(struct net *net);
-extern void xfrm_proc_fini(struct net *net);
+int xfrm_proc_init(struct net *net);
+void xfrm_proc_fini(struct net *net);
 #endif
 
-extern int xfrm_sysctl_init(struct net *net);
+int xfrm_sysctl_init(struct net *net);
 #ifdef CONFIG_SYSCTL
-extern void xfrm_sysctl_fini(struct net *net);
+void xfrm_sysctl_fini(struct net *net);
 #else
 static inline void xfrm_sysctl_fini(struct net *net)
 {
 }
 #endif
 
-extern void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto);
-extern int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
-                          int (*func)(struct xfrm_state *, int, void*), void *);
-extern void xfrm_state_walk_done(struct xfrm_state_walk *walk);
-extern struct xfrm_state *xfrm_state_alloc(struct net *net);
-extern struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
-                                         const xfrm_address_t *saddr,
-                                         const struct flowi *fl,
-                                         struct xfrm_tmpl *tmpl,
-                                         struct xfrm_policy *pol, int *err,
-                                         unsigned short family);
-extern struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
-                                              xfrm_address_t *daddr,
-                                              xfrm_address_t *saddr,
-                                              unsigned short family,
-                                              u8 mode, u8 proto, u32 reqid);
-extern int xfrm_state_check_expire(struct xfrm_state *x);
-extern void xfrm_state_insert(struct xfrm_state *x);
-extern int xfrm_state_add(struct xfrm_state *x);
-extern int xfrm_state_update(struct xfrm_state *x);
-extern struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
-                                           const xfrm_address_t *daddr, __be32 spi,
-                                           u8 proto, unsigned short family);
-extern struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
-                                                  const xfrm_address_t *daddr,
-                                                  const xfrm_address_t *saddr,
-                                                  u8 proto,
-                                                  unsigned short family);
+void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto);
+int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
+                   int (*func)(struct xfrm_state *, int, void*), void *);
+void xfrm_state_walk_done(struct xfrm_state_walk *walk);
+struct xfrm_state *xfrm_state_alloc(struct net *net);
+struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
+                                  const xfrm_address_t *saddr,
+                                  const struct flowi *fl,
+                                  struct xfrm_tmpl *tmpl,
+                                  struct xfrm_policy *pol, int *err,
+                                  unsigned short family);
+struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
+                                      xfrm_address_t *daddr,
+                                      xfrm_address_t *saddr,
+                                      unsigned short family,
+                                      u8 mode, u8 proto, u32 reqid);
+int xfrm_state_check_expire(struct xfrm_state *x);
+void xfrm_state_insert(struct xfrm_state *x);
+int xfrm_state_add(struct xfrm_state *x);
+int xfrm_state_update(struct xfrm_state *x);
+struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
+                                    const xfrm_address_t *daddr, __be32 spi,
+                                    u8 proto, unsigned short family);
+struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
+                                           const xfrm_address_t *daddr,
+                                           const xfrm_address_t *saddr,
+                                           u8 proto,
+                                           unsigned short family);
 #ifdef CONFIG_XFRM_SUB_POLICY
-extern int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
-                         int n, unsigned short family);
-extern int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
-                          int n, unsigned short family);
+int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
+                  unsigned short family);
+int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
+                   unsigned short family);
 #else
 static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
                                 int n, unsigned short family)
@@ -1462,68 +1470,69 @@ struct xfrmk_spdinfo {
        u32 spdhmcnt;
 };
 
-extern struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark,
-                                             u32 seq);
-extern int xfrm_state_delete(struct xfrm_state *x);
-extern int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info);
-extern void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
-extern void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
-extern u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
-extern int xfrm_init_replay(struct xfrm_state *x);
-extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
-extern int __xfrm_init_state(struct xfrm_state *x, bool init_replay);
-extern int xfrm_init_state(struct xfrm_state *x);
-extern int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi,
-                     int encap_type);
-extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
-extern int xfrm_output_resume(struct sk_buff *skb, int err);
-extern int xfrm_output(struct sk_buff *skb);
-extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
-extern void xfrm_local_error(struct sk_buff *skb, int mtu);
-extern int xfrm4_extract_header(struct sk_buff *skb);
-extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
-                          int encap_type);
-extern int xfrm4_transport_finish(struct sk_buff *skb, int async);
-extern int xfrm4_rcv(struct sk_buff *skb);
+struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
+int xfrm_state_delete(struct xfrm_state *x);
+int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info);
+void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
+void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
+u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
+int xfrm_init_replay(struct xfrm_state *x);
+int xfrm_state_mtu(struct xfrm_state *x, int mtu);
+int __xfrm_init_state(struct xfrm_state *x, bool init_replay);
+int xfrm_init_state(struct xfrm_state *x);
+int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
+int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
+int xfrm_output_resume(struct sk_buff *skb, int err);
+int xfrm_output(struct sk_buff *skb);
+int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
+void xfrm_local_error(struct sk_buff *skb, int mtu);
+int xfrm4_extract_header(struct sk_buff *skb);
+int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
+                   int encap_type);
+int xfrm4_transport_finish(struct sk_buff *skb, int async);
+int xfrm4_rcv(struct sk_buff *skb);
 
 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
 {
        return xfrm4_rcv_encap(skb, nexthdr, spi, 0);
 }
 
-extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm4_output(struct sk_buff *skb);
-extern int xfrm4_output_finish(struct sk_buff *skb);
-extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
-extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
-extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler);
-extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler);
-extern void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
-extern int xfrm6_extract_header(struct sk_buff *skb);
-extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
-extern int xfrm6_transport_finish(struct sk_buff *skb, int async);
-extern int xfrm6_rcv(struct sk_buff *skb);
-extern int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
-                           xfrm_address_t *saddr, u8 proto);
-extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
-extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
-extern __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
-extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
-extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
-extern int xfrm6_output(struct sk_buff *skb);
-extern int xfrm6_output_finish(struct sk_buff *skb);
-extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
-                                u8 **prevhdr);
-extern void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
+int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm4_output(struct sk_buff *skb);
+int xfrm4_output_finish(struct sk_buff *skb);
+int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
+int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
+void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
+int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler);
+int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler);
+int xfrm6_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler);
+int xfrm6_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler);
+int xfrm6_extract_header(struct sk_buff *skb);
+int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
+int xfrm6_transport_finish(struct sk_buff *skb, int async);
+int xfrm6_rcv(struct sk_buff *skb);
+int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
+                    xfrm_address_t *saddr, u8 proto);
+void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
+int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
+int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
+__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
+__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
+int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
+int xfrm6_output(struct sk_buff *skb);
+int xfrm6_output_finish(struct sk_buff *skb);
+int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
+                         u8 **prevhdr);
 
 #ifdef CONFIG_XFRM
-extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
-extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen);
+int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
+int xfrm_user_policy(struct sock *sk, int optname,
+                    u8 __user *optval, int optlen);
 #else
 static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
 {
@@ -1540,59 +1549,62 @@ static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
 
 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
 
-extern void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
-extern int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
-       int (*func)(struct xfrm_policy *, int, int, void*), void *);
-extern void xfrm_policy_walk_done(struct xfrm_policy_walk *walk);
+void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
+int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
+                    int (*func)(struct xfrm_policy *, int, int, void*),
+                    void *);
+void xfrm_policy_walk_done(struct xfrm_policy_walk *walk);
 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
                                          u8 type, int dir,
                                          struct xfrm_selector *sel,
                                          struct xfrm_sec_ctx *ctx, int delete,
                                          int *err);
-struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, u32 id, int delete, int *err);
+struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir,
+                                    u32 id, int delete, int *err);
 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info);
 u32 xfrm_get_acqseq(void);
-extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
+int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
 struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
                                 u8 mode, u32 reqid, u8 proto,
                                 const xfrm_address_t *daddr,
                                 const xfrm_address_t *saddr, int create,
                                 unsigned short family);
-extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
+int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
 
 #ifdef CONFIG_XFRM_MIGRATE
-extern int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
-                     const struct xfrm_migrate *m, int num_bundles,
-                     const struct xfrm_kmaddress *k);
-extern struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m);
-extern struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
-                                             struct xfrm_migrate *m);
-extern int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
-                       struct xfrm_migrate *m, int num_bundles,
-                       struct xfrm_kmaddress *k);
+int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+              const struct xfrm_migrate *m, int num_bundles,
+              const struct xfrm_kmaddress *k);
+struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m);
+struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
+                                     struct xfrm_migrate *m);
+int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+                struct xfrm_migrate *m, int num_bundles,
+                struct xfrm_kmaddress *k);
 #endif
 
-extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
-extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
-extern int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
-
-extern void xfrm_input_init(void);
-extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
-
-extern void xfrm_probe_algs(void);
-extern int xfrm_count_pfkey_auth_supported(void);
-extern int xfrm_count_pfkey_enc_supported(void);
-extern struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
-extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
-extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
-extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
-extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
-extern struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
-extern struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
-                                                  int probe);
+int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
+void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
+int km_report(struct net *net, u8 proto, struct xfrm_selector *sel,
+             xfrm_address_t *addr);
+
+void xfrm_input_init(void);
+int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
+
+void xfrm_probe_algs(void);
+int xfrm_count_pfkey_auth_supported(void);
+int xfrm_count_pfkey_enc_supported(void);
+struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
+struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
+struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
+struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
+struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
+struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
+struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
+struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
+struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
+                                           int probe);
 
 static inline bool xfrm6_addr_equal(const xfrm_address_t *a,
                                    const xfrm_address_t *b)
diff --git a/include/sound/cs42l73.h b/include/sound/cs42l73.h
new file mode 100644 (file)
index 0000000..f354be4
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * linux/sound/cs42l73.h -- Platform data for CS42L73
+ *
+ * Copyright (c) 2012 Cirrus Logic Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CS42L73_H
+#define __CS42L73_H
+
+struct cs42l73_platform_data {
+       /* RST GPIO */
+       unsigned int reset_gpio;
+       unsigned int chgfreq;
+       int jack_detection;
+       unsigned int mclk_freq;
+};
+
+#endif /* __CS42L73_H */
index f11c35cd5532df9d549bee84fa4d414837ccc2b3..83b2c3e95a1a1267bcd36da04b65aacc8db461b1 100644 (file)
@@ -61,6 +61,7 @@ struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream)
  * @slave_id: Slave requester id for the DMA channel.
  * @filter_data: Custom DMA channel filter data, this will usually be used when
  * requesting the DMA channel.
+ * @fifo_size: FIFO size of the DAI controller in bytes
  */
 struct snd_dmaengine_dai_dma_data {
        dma_addr_t addr;
@@ -68,6 +69,7 @@ struct snd_dmaengine_dai_dma_data {
        u32 maxburst;
        unsigned int slave_id;
        void *filter_data;
+       unsigned int fifo_size;
 };
 
 void snd_dmaengine_pcm_set_config_from_dai_data(
index fe66533e9b7a51ef143acd33ff398bee47f71398..12afab18945d056709df7495b75b0da6c0155426 100644 (file)
@@ -36,7 +36,6 @@
 #define RSND_SSI_CLK_PIN_SHARE         (1 << 31)
 #define RSND_SSI_CLK_FROM_ADG          (1 << 30) /* clock parent is master */
 #define RSND_SSI_SYNC                  (1 << 29) /* SSI34_sync etc */
-#define RSND_SSI_DEPENDENT             (1 << 28) /* SSI needs SRU/SCU */
 
 #define RSND_SSI_PLAY                  (1 << 24)
 
@@ -68,6 +67,7 @@ struct rsnd_scu_platform_info {
  *
  * A : generation
  */
+#define RSND_GEN_MASK  (0xF << 0)
 #define RSND_GEN1      (1 << 0) /* fixme */
 #define RSND_GEN2      (2 << 0) /* fixme */
 
index ae9a227d35d3538c233b2b1cb777afcbcb4e639d..800c101bb096fd6f5c1170ebbdce1ab06cead062 100644 (file)
@@ -105,6 +105,8 @@ int snd_soc_dai_set_clkdiv(struct snd_soc_dai *dai,
 int snd_soc_dai_set_pll(struct snd_soc_dai *dai,
        int pll_id, int source, unsigned int freq_in, unsigned int freq_out);
 
+int snd_soc_dai_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio);
+
 /* Digital Audio interface formatting */
 int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt);
 
@@ -131,6 +133,7 @@ struct snd_soc_dai_ops {
        int (*set_pll)(struct snd_soc_dai *dai, int pll_id, int source,
                unsigned int freq_in, unsigned int freq_out);
        int (*set_clkdiv)(struct snd_soc_dai *dai, int div_id, int div);
+       int (*set_bclk_ratio)(struct snd_soc_dai *dai, unsigned int ratio);
 
        /*
         * DAI format configuration
@@ -166,6 +169,13 @@ struct snd_soc_dai_ops {
                struct snd_soc_dai *);
        int (*prepare)(struct snd_pcm_substream *,
                struct snd_soc_dai *);
+       /*
+        * NOTE: Commands passed to the trigger function are not necessarily
+        * compatible with the current state of the dai. For example this
+        * sequence of commands is possible: START STOP STOP.
+        * So do not unconditionally use refcounting functions in the trigger
+        * function, e.g. clk_enable/disable.
+        */
        int (*trigger)(struct snd_pcm_substream *, int,
                struct snd_soc_dai *);
        int (*bespoke_trigger)(struct snd_pcm_substream *, int,
@@ -276,6 +286,13 @@ static inline void snd_soc_dai_set_dma_data(struct snd_soc_dai *dai,
                dai->capture_dma_data = data;
 }
 
+static inline void snd_soc_dai_init_dma_data(struct snd_soc_dai *dai,
+                                            void *playback, void *capture)
+{
+       dai->playback_dma_data = playback;
+       dai->capture_dma_data = capture;
+}
+
 static inline void snd_soc_dai_set_drvdata(struct snd_soc_dai *dai,
                void *data)
 {
index 27a72d5d4b00ff36133fb19a2a1580044ac27d11..2037c45adfe648e907fa86290f7dfea847989c20 100644 (file)
@@ -286,6 +286,8 @@ struct device;
        .info = snd_soc_info_volsw, \
        .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
        .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) }
+#define SOC_DAPM_SINGLE_VIRT(xname, max) \
+       SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0)
 #define SOC_DAPM_SINGLE_TLV(xname, reg, shift, max, invert, tlv_array) \
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
        .info = snd_soc_info_volsw, \
@@ -300,6 +302,8 @@ struct device;
        .tlv.p = (tlv_array), \
        .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
        .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
+#define SOC_DAPM_SINGLE_TLV_VIRT(xname, max, tlv_array) \
+       SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0, tlv_array)
 #define SOC_DAPM_ENUM(xname, xenum) \
 {      .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
        .info = snd_soc_info_enum_double, \
index d22cb0a06feb36949476ac8c761e3f1c0efa671c..1f741cb24f337c3e4662c2ff2cd45f78985c54c1 100644 (file)
@@ -13,6 +13,7 @@
 #ifndef __LINUX_SND_SOC_H
 #define __LINUX_SND_SOC_H
 
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/types.h>
 #include <linux/notifier.h>
@@ -330,7 +331,6 @@ struct soc_enum;
 struct snd_soc_jack;
 struct snd_soc_jack_zone;
 struct snd_soc_jack_pin;
-struct snd_soc_cache_ops;
 #include <sound/soc-dapm.h>
 #include <sound/soc-dpcm.h>
 
@@ -348,10 +348,6 @@ enum snd_soc_control_type {
        SND_SOC_REGMAP,
 };
 
-enum snd_soc_compress_type {
-       SND_SOC_FLAT_COMPRESSION = 1,
-};
-
 enum snd_soc_pcm_subclass {
        SND_SOC_PCM_CLASS_PCM   = 0,
        SND_SOC_PCM_CLASS_BE    = 1,
@@ -369,6 +365,7 @@ int snd_soc_codec_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
 
 int snd_soc_register_card(struct snd_soc_card *card);
 int snd_soc_unregister_card(struct snd_soc_card *card);
+int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card);
 int snd_soc_suspend(struct device *dev);
 int snd_soc_resume(struct device *dev);
 int snd_soc_poweroff(struct device *dev);
@@ -386,6 +383,9 @@ void snd_soc_unregister_codec(struct device *dev);
 int snd_soc_register_component(struct device *dev,
                         const struct snd_soc_component_driver *cmpnt_drv,
                         struct snd_soc_dai_driver *dai_drv, int num_dai);
+int devm_snd_soc_register_component(struct device *dev,
+                        const struct snd_soc_component_driver *cmpnt_drv,
+                        struct snd_soc_dai_driver *dai_drv, int num_dai);
 void snd_soc_unregister_component(struct device *dev);
 int snd_soc_codec_volatile_register(struct snd_soc_codec *codec,
                                    unsigned int reg);
@@ -403,12 +403,6 @@ int snd_soc_cache_write(struct snd_soc_codec *codec,
                        unsigned int reg, unsigned int value);
 int snd_soc_cache_read(struct snd_soc_codec *codec,
                       unsigned int reg, unsigned int *value);
-int snd_soc_default_volatile_register(struct snd_soc_codec *codec,
-                                     unsigned int reg);
-int snd_soc_default_readable_register(struct snd_soc_codec *codec,
-                                     unsigned int reg);
-int snd_soc_default_writable_register(struct snd_soc_codec *codec,
-                                     unsigned int reg);
 int snd_soc_platform_read(struct snd_soc_platform *platform,
                                        unsigned int reg);
 int snd_soc_platform_write(struct snd_soc_platform *platform,
@@ -541,22 +535,6 @@ int snd_soc_get_strobe(struct snd_kcontrol *kcontrol,
 int snd_soc_put_strobe(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol);
 
-/**
- * struct snd_soc_reg_access - Describes whether a given register is
- * readable, writable or volatile.
- *
- * @reg: the register number
- * @read: whether this register is readable
- * @write: whether this register is writable
- * @vol: whether this register is volatile
- */
-struct snd_soc_reg_access {
-       u16 reg;
-       u16 read;
-       u16 write;
-       u16 vol;
-};
-
 /**
  * struct snd_soc_jack_pin - Describes a pin to update based on jack detection
  *
@@ -657,17 +635,26 @@ struct snd_soc_compr_ops {
        int (*trigger)(struct snd_compr_stream *);
 };
 
-/* SoC cache ops */
-struct snd_soc_cache_ops {
+/* component interface */
+struct snd_soc_component_driver {
+       const char *name;
+
+       /* DT */
+       int (*of_xlate_dai_name)(struct snd_soc_component *component,
+                                struct of_phandle_args *args,
+                                const char **dai_name);
+};
+
+struct snd_soc_component {
        const char *name;
-       enum snd_soc_compress_type id;
-       int (*init)(struct snd_soc_codec *codec);
-       int (*exit)(struct snd_soc_codec *codec);
-       int (*read)(struct snd_soc_codec *codec, unsigned int reg,
-               unsigned int *value);
-       int (*write)(struct snd_soc_codec *codec, unsigned int reg,
-               unsigned int value);
-       int (*sync)(struct snd_soc_codec *codec);
+       int id;
+       struct device *dev;
+       struct list_head list;
+
+       struct snd_soc_dai_driver *dai_drv;
+       int num_dai;
+
+       const struct snd_soc_component_driver *driver;
 };
 
 /* SoC Audio Codec device */
@@ -683,8 +670,6 @@ struct snd_soc_codec {
        struct list_head list;
        struct list_head card_list;
        int num_dai;
-       enum snd_soc_compress_type compress_type;
-       size_t reg_size;        /* reg_cache_size * reg_word_size */
        int (*volatile_register)(struct snd_soc_codec *, unsigned int);
        int (*readable_register)(struct snd_soc_codec *, unsigned int);
        int (*writable_register)(struct snd_soc_codec *, unsigned int);
@@ -708,13 +693,13 @@ struct snd_soc_codec {
        unsigned int (*hw_read)(struct snd_soc_codec *, unsigned int);
        unsigned int (*read)(struct snd_soc_codec *, unsigned int);
        int (*write)(struct snd_soc_codec *, unsigned int, unsigned int);
-       int (*bulk_write_raw)(struct snd_soc_codec *, unsigned int, const void *, size_t);
        void *reg_cache;
-       const void *reg_def_copy;
-       const struct snd_soc_cache_ops *cache_ops;
        struct mutex cache_rw_mutex;
        int val_bytes;
 
+       /* component */
+       struct snd_soc_component component;
+
        /* dapm */
        struct snd_soc_dapm_context dapm;
        unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */
@@ -733,6 +718,7 @@ struct snd_soc_codec_driver {
        int (*remove)(struct snd_soc_codec *);
        int (*suspend)(struct snd_soc_codec *);
        int (*resume)(struct snd_soc_codec *);
+       struct snd_soc_component_driver component_driver;
 
        /* Default control and setup, added after probe() is run */
        const struct snd_kcontrol_new *controls;
@@ -760,9 +746,6 @@ struct snd_soc_codec_driver {
        short reg_cache_step;
        short reg_word_size;
        const void *reg_cache_default;
-       short reg_access_size;
-       const struct snd_soc_reg_access *reg_access_default;
-       enum snd_soc_compress_type compress_type;
 
        /* codec bias level */
        int (*set_bias_level)(struct snd_soc_codec *,
@@ -849,20 +832,6 @@ struct snd_soc_platform {
 #endif
 };
 
-struct snd_soc_component_driver {
-       const char *name;
-};
-
-struct snd_soc_component {
-       const char *name;
-       int id;
-       int num_dai;
-       struct device *dev;
-       struct list_head list;
-
-       const struct snd_soc_component_driver *driver;
-};
-
 struct snd_soc_dai_link {
        /* config - must be set by machine driver */
        const char *name;                       /* Codec name */
@@ -944,12 +913,6 @@ struct snd_soc_codec_conf {
         * associated per device
         */
        const char *name_prefix;
-
-       /*
-        * set this to the desired compression type if you want to
-        * override the one supplied in codec->driver->compress_type
-        */
-       enum snd_soc_compress_type compress_type;
 };
 
 struct snd_soc_aux_dev {
@@ -1088,7 +1051,8 @@ struct snd_soc_pcm_runtime {
 /* mixer control */
 struct soc_mixer_control {
        int min, max, platform_max;
-       unsigned int reg, rreg, shift, rshift;
+       int reg, rreg;
+       unsigned int shift, rshift;
        unsigned int invert:1;
        unsigned int autodisable:1;
 };
@@ -1121,8 +1085,6 @@ struct soc_enum {
 unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg);
 unsigned int snd_soc_write(struct snd_soc_codec *codec,
                           unsigned int reg, unsigned int val);
-unsigned int snd_soc_bulk_write_raw(struct snd_soc_codec *codec,
-                                   unsigned int reg, const void *data, size_t len);
 
 /* device driver data */
 
@@ -1201,6 +1163,8 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
                                   const char *propname);
 unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
                                     const char *prefix);
+int snd_soc_of_get_dai_name(struct device_node *of_node,
+                           const char **dai_name);
 
 #include <sound/soc-dai.h>
 
index 5fc2dcdd21cddcadc15daa9dc3e62dde7b41b5d2..03996b2bb04f86bcb4cea7c1aab4971b2d4c310d 100644 (file)
@@ -14,6 +14,7 @@ struct snd_soc_codec;
 struct snd_soc_platform;
 struct snd_soc_card;
 struct snd_soc_dapm_widget;
+struct snd_soc_dapm_path;
 
 /*
  * Log register events
diff --git a/include/trace/events/power_cpu_migrate.h b/include/trace/events/power_cpu_migrate.h
new file mode 100644 (file)
index 0000000..f76dd4d
--- /dev/null
@@ -0,0 +1,67 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM power
+
+#if !defined(_TRACE_POWER_CPU_MIGRATE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_POWER_CPU_MIGRATE_H
+
+#include <linux/tracepoint.h>
+
+#define __cpu_migrate_proto                    \
+       TP_PROTO(u64 timestamp,                 \
+                u32 cpu_hwid)
+#define __cpu_migrate_args                     \
+       TP_ARGS(timestamp,                      \
+               cpu_hwid)
+
+DECLARE_EVENT_CLASS(cpu_migrate,
+
+       __cpu_migrate_proto,
+       __cpu_migrate_args,
+
+       TP_STRUCT__entry(
+               __field(u64,    timestamp               )
+               __field(u32,    cpu_hwid                )
+       ),
+
+       TP_fast_assign(
+               __entry->timestamp = timestamp;
+               __entry->cpu_hwid = cpu_hwid;
+       ),
+
+       TP_printk("timestamp=%llu cpu_hwid=0x%08lX",
+               (unsigned long long)__entry->timestamp,
+               (unsigned long)__entry->cpu_hwid
+       )
+);
+
+#define __define_cpu_migrate_event(name)               \
+       DEFINE_EVENT(cpu_migrate, cpu_migrate_##name,   \
+               __cpu_migrate_proto,                    \
+               __cpu_migrate_args                      \
+       )
+
+__define_cpu_migrate_event(begin);
+__define_cpu_migrate_event(finish);
+__define_cpu_migrate_event(current);
+
+#undef __define_cpu_migrate
+#undef __cpu_migrate_proto
+#undef __cpu_migrate_args
+
+/* This file can get included multiple times, TRACE_HEADER_MULTI_READ at top */
+#ifndef _PWR_CPU_MIGRATE_EVENT_AVOID_DOUBLE_DEFINING
+#define _PWR_CPU_MIGRATE_EVENT_AVOID_DOUBLE_DEFINING
+
+/*
+ * Set from_phys_cpu and to_phys_cpu to CPU_MIGRATE_ALL_CPUS to indicate
+ * a whole-cluster migration:
+ */
+#define CPU_MIGRATE_ALL_CPUS 0x80000000U
+#endif
+
+#endif /* _TRACE_POWER_CPU_MIGRATE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE power_cpu_migrate
+#include <trace/define_trace.h>
index f04b69b6abf251d6f0ba720db25822b11b953489..38f14d0264c3aa78f3b62b93cba405c655e23ae6 100644 (file)
@@ -78,4 +78,6 @@
 
 #define SO_BUSY_POLL           46
 
+#define SO_MAX_PACING_RATE     47
+
 #endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/drm/armada_drm.h b/include/uapi/drm/armada_drm.h
new file mode 100644 (file)
index 0000000..8dec3fd
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2012 Russell King
+ *  With inspiration from the i915 driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef DRM_ARMADA_IOCTL_H
+#define DRM_ARMADA_IOCTL_H
+
+#define DRM_ARMADA_GEM_CREATE          0x00
+#define DRM_ARMADA_GEM_MMAP            0x02
+#define DRM_ARMADA_GEM_PWRITE          0x03
+
+#define ARMADA_IOCTL(dir, name, str) \
+       DRM_##dir(DRM_COMMAND_BASE + DRM_ARMADA_##name, struct drm_armada_##str)
+
+struct drm_armada_gem_create {
+       uint32_t handle;
+       uint32_t size;
+};
+#define DRM_IOCTL_ARMADA_GEM_CREATE \
+       ARMADA_IOCTL(IOWR, GEM_CREATE, gem_create)
+
+struct drm_armada_gem_mmap {
+       uint32_t handle;
+       uint32_t pad;
+       uint64_t offset;
+       uint64_t size;
+       uint64_t addr;
+};
+#define DRM_IOCTL_ARMADA_GEM_MMAP \
+       ARMADA_IOCTL(IOWR, GEM_MMAP, gem_mmap)
+
+struct drm_armada_gem_pwrite {
+       uint64_t ptr;
+       uint32_t handle;
+       uint32_t offset;
+       uint32_t size;
+};
+#define DRM_IOCTL_ARMADA_GEM_PWRITE \
+       ARMADA_IOCTL(IOW, GEM_PWRITE, gem_pwrite)
+
+#endif
index ece867889cc789ef3d6a467e535723911ac5d1d4..9b24d65fed72b9d06843cba89ebeeabff07fb71f 100644 (file)
@@ -611,12 +611,37 @@ struct drm_gem_open {
        __u64 size;
 };
 
+#define DRM_CAP_DUMB_BUFFER            0x1
+#define DRM_CAP_VBLANK_HIGH_CRTC       0x2
+#define DRM_CAP_DUMB_PREFERRED_DEPTH   0x3
+#define DRM_CAP_DUMB_PREFER_SHADOW     0x4
+#define DRM_CAP_PRIME                  0x5
+#define  DRM_PRIME_CAP_IMPORT          0x1
+#define  DRM_PRIME_CAP_EXPORT          0x2
+#define DRM_CAP_TIMESTAMP_MONOTONIC    0x6
+#define DRM_CAP_ASYNC_PAGE_FLIP                0x7
+
 /** DRM_IOCTL_GET_CAP ioctl argument type */
 struct drm_get_cap {
        __u64 capability;
        __u64 value;
 };
 
+/**
+ * DRM_CLIENT_CAP_STEREO_3D
+ *
+ * if set to 1, the DRM core will expose the stereo 3D capabilities of the
+ * monitor by advertising the supported 3D layouts in the flags of struct
+ * drm_mode_modeinfo.
+ */
+#define DRM_CLIENT_CAP_STEREO_3D       1
+
+/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
+struct drm_set_client_cap {
+       __u64 capability;
+       __u64 value;
+};
+
 #define DRM_CLOEXEC O_CLOEXEC
 struct drm_prime_handle {
        __u32 handle;
@@ -649,6 +674,7 @@ struct drm_prime_handle {
 #define DRM_IOCTL_GEM_FLINK            DRM_IOWR(0x0a, struct drm_gem_flink)
 #define DRM_IOCTL_GEM_OPEN             DRM_IOWR(0x0b, struct drm_gem_open)
 #define DRM_IOCTL_GET_CAP              DRM_IOWR(0x0c, struct drm_get_cap)
+#define DRM_IOCTL_SET_CLIENT_CAP       DRM_IOW( 0x0d, struct drm_set_client_cap)
 
 #define DRM_IOCTL_SET_UNIQUE           DRM_IOW( 0x10, struct drm_unique)
 #define DRM_IOCTL_AUTH_MAGIC           DRM_IOW( 0x11, struct drm_auth)
@@ -774,17 +800,6 @@ struct drm_event_vblank {
        __u32 reserved;
 };
 
-#define DRM_CAP_DUMB_BUFFER 0x1
-#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
-#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
-#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
-#define DRM_CAP_PRIME 0x5
-#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
-#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
-
-#define DRM_PRIME_CAP_IMPORT 0x1
-#define DRM_PRIME_CAP_EXPORT 0x2
-
 /* typedef area */
 #ifndef __KERNEL__
 typedef struct drm_clip_rect drm_clip_rect_t;
index 550811712f78c71aad43b4a531b07e1df536b64f..f104c2603ebe44397d62a45c50e39e18c5ae2b2e 100644 (file)
 
 /* Video mode flags */
 /* bit compatible with the xorg definitions. */
-#define DRM_MODE_FLAG_PHSYNC   (1<<0)
-#define DRM_MODE_FLAG_NHSYNC   (1<<1)
-#define DRM_MODE_FLAG_PVSYNC   (1<<2)
-#define DRM_MODE_FLAG_NVSYNC   (1<<3)
-#define DRM_MODE_FLAG_INTERLACE        (1<<4)
-#define DRM_MODE_FLAG_DBLSCAN  (1<<5)
-#define DRM_MODE_FLAG_CSYNC    (1<<6)
-#define DRM_MODE_FLAG_PCSYNC   (1<<7)
-#define DRM_MODE_FLAG_NCSYNC   (1<<8)
-#define DRM_MODE_FLAG_HSKEW    (1<<9) /* hskew provided */
-#define DRM_MODE_FLAG_BCAST    (1<<10)
-#define DRM_MODE_FLAG_PIXMUX   (1<<11)
-#define DRM_MODE_FLAG_DBLCLK   (1<<12)
-#define DRM_MODE_FLAG_CLKDIV2  (1<<13)
+#define DRM_MODE_FLAG_PHSYNC                   (1<<0)
+#define DRM_MODE_FLAG_NHSYNC                   (1<<1)
+#define DRM_MODE_FLAG_PVSYNC                   (1<<2)
+#define DRM_MODE_FLAG_NVSYNC                   (1<<3)
+#define DRM_MODE_FLAG_INTERLACE                        (1<<4)
+#define DRM_MODE_FLAG_DBLSCAN                  (1<<5)
+#define DRM_MODE_FLAG_CSYNC                    (1<<6)
+#define DRM_MODE_FLAG_PCSYNC                   (1<<7)
+#define DRM_MODE_FLAG_NCSYNC                   (1<<8)
+#define DRM_MODE_FLAG_HSKEW                    (1<<9) /* hskew provided */
+#define DRM_MODE_FLAG_BCAST                    (1<<10)
+#define DRM_MODE_FLAG_PIXMUX                   (1<<11)
+#define DRM_MODE_FLAG_DBLCLK                   (1<<12)
+#define DRM_MODE_FLAG_CLKDIV2                  (1<<13)
+ /*
+  * When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX
+  * (define not exposed to user space).
+  */
+#define DRM_MODE_FLAG_3D_MASK                  (0x1f<<14)
+#define  DRM_MODE_FLAG_3D_NONE                 (0<<14)
+#define  DRM_MODE_FLAG_3D_FRAME_PACKING                (1<<14)
+#define  DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE    (2<<14)
+#define  DRM_MODE_FLAG_3D_LINE_ALTERNATIVE     (3<<14)
+#define  DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL    (4<<14)
+#define  DRM_MODE_FLAG_3D_L_DEPTH              (5<<14)
+#define  DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH        (6<<14)
+#define  DRM_MODE_FLAG_3D_TOP_AND_BOTTOM       (7<<14)
+#define  DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF    (8<<14)
+
 
 /* DPMS flags */
 /* bit compatible with the xorg definitions. */
@@ -165,6 +180,7 @@ struct drm_mode_get_plane_res {
 #define DRM_MODE_ENCODER_LVDS  3
 #define DRM_MODE_ENCODER_TVDAC 4
 #define DRM_MODE_ENCODER_VIRTUAL 5
+#define DRM_MODE_ENCODER_DSI   6
 
 struct drm_mode_get_encoder {
        __u32 encoder_id;
@@ -203,6 +219,7 @@ struct drm_mode_get_encoder {
 #define DRM_MODE_CONNECTOR_TV          13
 #define DRM_MODE_CONNECTOR_eDP         14
 #define DRM_MODE_CONNECTOR_VIRTUAL      15
+#define DRM_MODE_CONNECTOR_DSI         16
 
 struct drm_mode_get_connector {
 
@@ -223,6 +240,8 @@ struct drm_mode_get_connector {
        __u32 connection;
        __u32 mm_width, mm_height; /**< HxW in millimeters */
        __u32 subpixel;
+
+       __u32 pad;
 };
 
 #define DRM_MODE_PROP_PENDING  (1<<0)
index 55bb5729bd78a534da094f07fe74714e162a2c9c..3a4e97bd860771b6ad8e3b5d026a35200fcad4d3 100644 (file)
  *
  * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
  *     event from the gpu l3 cache. Additional information supplied is ROW,
- *     BANK, SUBBANK of the affected cacheline. Userspace should keep track of
- *     these events and if a specific cache-line seems to have a persistent
- *     error remap it with the l3 remapping tool supplied in intel-gpu-tools.
- *     The value supplied with the event is always 1.
+ *     BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
+ *     track of these events and if a specific cache-line seems to have a
+ *     persistent error remap it with the l3 remapping tool supplied in
+ *     intel-gpu-tools.  The value supplied with the event is always 1.
  *
  * I915_ERROR_UEVENT - Generated upon error detection, currently only via
  *     hangcheck. The error detection event is a good indicator of when things
index 73bde4eaf16c2042038ce6435072ab8d39632e41..0f8575f58db8aa343e4c24eb107d4322fbf9798a 100644 (file)
@@ -19,6 +19,9 @@
 
 #include <drm/drm.h>
 
+#define DRM_TEGRA_GEM_CREATE_TILED     (1 << 0)
+#define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
+
 struct drm_tegra_gem_create {
        __u64 size;
        __u32 flags;
index 75cef3fd97add201693b6bf21f4e2f1754c96f43..db0b825b48109f2e8cc011f211749043757a33e5 100644 (file)
@@ -329,7 +329,6 @@ enum {
 #define AUDIT_ARCH_ARMEB       (EM_ARM)
 #define AUDIT_ARCH_CRIS                (EM_CRIS|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_FRV         (EM_FRV)
-#define AUDIT_ARCH_H8300       (EM_H8_300)
 #define AUDIT_ARCH_I386                (EM_386|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_IA64                (EM_IA_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_M32R                (EM_M32R)
index 3ebe387fea4d809790a26bbfc9aa3cea02f20cfb..382251a1d21403acd817577d83c21f47d0389865 100644 (file)
@@ -7,6 +7,38 @@
  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  * All rights reserved.
  *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
  */
 
 #ifndef CAN_BCM_H
index 7b7148bded711b1346a2274a09af5b505e7af72e..b632045453202074ada263866052bc2a806e85bc 100644 (file)
@@ -7,6 +7,38 @@
  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  * All rights reserved.
  *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
  */
 
 #ifndef CAN_ERROR_H
index 4e27c82b564a13a6a4b55860c3cb78b51f86dc7e..844c8964bdfee3a3f4a7308bf0fd832e82754a89 100644 (file)
@@ -7,6 +7,38 @@
  * Copyright (c) 2011 Volkswagen Group Electronic Research
  * All rights reserved.
  *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
  */
 
 #ifndef CAN_GW_H
index 14966ddb7df1c5b056578ff622c77cd6400e8094..df944ed206a8e4bd23d9f9a3ec9e08c9e0ad6e3b 100644 (file)
@@ -5,6 +5,14 @@
  *
  * Copyright (c) 2009 Wolfgang Grandegger <wg@grandegger.com>
  *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
  */
 
 #ifndef CAN_NETLINK_H
index a814062b07191819c80812b843a46bd80870344b..c7d8c334e0ce26838c7cc611bd3ad1eb5a31a6c4 100644 (file)
@@ -8,6 +8,38 @@
  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  * All rights reserved.
  *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
  */
 
 #ifndef CAN_RAW_H
index 59c17a2d38ad4ce95be09055423907beed49b9a2..01529bd964387b4d571bbf689bf2bbf98328692d 100644 (file)
@@ -31,7 +31,6 @@
 #define EM_CRIS                76      /* Axis Communications 32-bit embedded processor */
 #define EM_V850                87      /* NEC v850 */
 #define EM_M32R                88      /* Renesas M32R */
-#define EM_H8_300      46      /* Renesas H8/300,300H,H8S */
 #define EM_MN10300     89      /* Panasonic/MEI MN10300, AM33 */
 #define EM_BLACKFIN     106     /* ADI Blackfin Processor */
 #define EM_TI_C6000    140     /* TI C6X DSPs */
index a17edda8a7816c2ba92e7c1b7aee3cce23b93f8f..9635a62f6f89c781a9086af47df6382c59fea161 100644 (file)
@@ -91,6 +91,8 @@
 #define BOND_XMIT_POLICY_LAYER2                0 /* layer 2 (MAC only), default */
 #define BOND_XMIT_POLICY_LAYER34       1 /* layer 3+4 (IP ^ (TCP || UDP)) */
 #define BOND_XMIT_POLICY_LAYER23       2 /* layer 2+3 (IP ^ MAC) */
+#define BOND_XMIT_POLICY_ENCAP23       3 /* encapsulated layer 2+3 */
+#define BOND_XMIT_POLICY_ENCAP34       4 /* encapsulated layer 3+4 */
 
 typedef struct ifbond {
        __s32 bond_mode;
index 80394e8dc3a348ed6ac7779a410c0bcda80ac284..8a1e346243b7cdcb0e9ab676e3ddcf086284d90f 100644 (file)
@@ -325,6 +325,17 @@ struct ifla_vxlan_port_range {
        __be16  high;
 };
 
+/* Bonding section */
+
+enum {
+       IFLA_BOND_UNSPEC,
+       IFLA_BOND_MODE,
+       IFLA_BOND_ACTIVE_SLAVE,
+       __IFLA_BOND_MAX,
+};
+
+#define IFLA_BOND_MAX  (__IFLA_BOND_MAX - 1)
+
 /* SR-IOV virtual function management section */
 
 enum {
index e0cecd2eabdc5f2289a1940bf3d00ad8ebc70ee8..6edc6b68badd5385d2fdea6e5f524714c4f3cb90 100644 (file)
@@ -21,6 +21,7 @@ enum {
        LO_FLAGS_READ_ONLY      = 1,
        LO_FLAGS_AUTOCLEAR      = 4,
        LO_FLAGS_PARTSCAN       = 8,
+       LO_FLAGS_USE_AIO        = 16,
 };
 
 #include <asm/posix_types.h>   /* for __kernel_old_dev_t */
index 6a8ca98c9a962ee67fbcf3da94fe0222f6885c2c..620252e69b44a525b3fae1419f66f631d67e2de3 100644 (file)
@@ -54,6 +54,7 @@
 #define ACSI_MAJOR             28
 #define AZTECH_CDROM_MAJOR     29
 #define FB_MAJOR               29   /* /dev/fb* framebuffers */
+#define MTD_BLOCK_MAJOR                31
 #define CM206_CDROM_MAJOR      32
 #define IDE2_MAJOR             33
 #define IDE3_MAJOR             34
 #define IDE6_MAJOR             88
 #define IDE7_MAJOR             89
 #define IDE8_MAJOR             90
+#define MTD_CHAR_MAJOR         90
 #define IDE9_MAJOR             91
 
 #define DASD_MAJOR             94
index 174915420d3fe8231dc151519ceac551aceb148e..17c3af2c4bb958b636ec8553056709769dd9d73b 100644 (file)
@@ -5,6 +5,8 @@ header-y += nf_conntrack_ftp.h
 header-y += nf_conntrack_sctp.h
 header-y += nf_conntrack_tcp.h
 header-y += nf_conntrack_tuple_common.h
+header-y += nf_tables.h
+header-y += nf_tables_compat.h
 header-y += nf_nat.h
 header-y += nfnetlink.h
 header-y += nfnetlink_acct.h
index 8024cdf13b700560e9bd0c1f3df1bacbdd90b900..25d3b2f79c022e92cfd8f851e0f9ae8a2f7b731a 100644 (file)
 #ifndef _UAPI_IP_SET_H
 #define _UAPI_IP_SET_H
 
-
 #include <linux/types.h>
 
 /* The protocol version */
 #define IPSET_PROTOCOL         6
 
+/* The maximum permissible comment length we will accept over netlink */
+#define IPSET_MAX_COMMENT_SIZE 255
+
 /* The max length of strings including NUL: set and type identifiers */
 #define IPSET_MAXNAMELEN       32
 
@@ -110,6 +112,7 @@ enum {
        IPSET_ATTR_IFACE,
        IPSET_ATTR_BYTES,
        IPSET_ATTR_PACKETS,
+       IPSET_ATTR_COMMENT,
        __IPSET_ATTR_ADT_MAX,
 };
 #define IPSET_ATTR_ADT_MAX     (__IPSET_ATTR_ADT_MAX - 1)
@@ -140,6 +143,7 @@ enum ipset_errno {
        IPSET_ERR_IPADDR_IPV4,
        IPSET_ERR_IPADDR_IPV6,
        IPSET_ERR_COUNTER,
+       IPSET_ERR_COMMENT,
 
        /* Type specific error codes */
        IPSET_ERR_TYPE_SPECIFIC = 4352,
@@ -176,6 +180,8 @@ enum ipset_cadt_flags {
        IPSET_FLAG_NOMATCH      = (1 << IPSET_FLAG_BIT_NOMATCH),
        IPSET_FLAG_BIT_WITH_COUNTERS = 3,
        IPSET_FLAG_WITH_COUNTERS = (1 << IPSET_FLAG_BIT_WITH_COUNTERS),
+       IPSET_FLAG_BIT_WITH_COMMENT = 4,
+       IPSET_FLAG_WITH_COMMENT = (1 << IPSET_FLAG_BIT_WITH_COMMENT),
        IPSET_FLAG_CADT_MAX     = 15,
 };
 
@@ -250,6 +256,14 @@ struct ip_set_req_get_set {
 #define IP_SET_OP_GET_BYINDEX  0x00000007      /* Get set name by index */
 /* Uses ip_set_req_get_set */
 
+#define IP_SET_OP_GET_FNAME    0x00000008      /* Get set index and family */
+struct ip_set_req_get_set_family {
+       unsigned int op;
+       unsigned int version;
+       unsigned int family;
+       union ip_set_name_index set;
+};
+
 #define IP_SET_OP_VERSION      0x00000100      /* Ask kernel version */
 struct ip_set_req_version {
        unsigned int op;
index 8dd803818ebe34c63db7792dadf4614632bdd93e..319f47128db8c117563efa26d4cd54e1c3ac2cc2 100644 (file)
@@ -25,6 +25,10 @@ enum ip_conntrack_info {
        IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1
 };
 
+#define NF_CT_STATE_INVALID_BIT                        (1 << 0)
+#define NF_CT_STATE_BIT(ctinfo)                        (1 << ((ctinfo) % IP_CT_IS_REPLY + 1))
+#define NF_CT_STATE_UNTRACKED_BIT              (1 << (IP_CT_NUMBER + 1))
+
 /* Bitset representing status of connection. */
 enum ip_conntrack_status {
        /* It's an expected connection: bit 0 set.  This bit never changed */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
new file mode 100644 (file)
index 0000000..fbfd229
--- /dev/null
@@ -0,0 +1,718 @@
+#ifndef _LINUX_NF_TABLES_H
+#define _LINUX_NF_TABLES_H
+
+#define NFT_CHAIN_MAXNAMELEN 32
+
+enum nft_registers {
+       NFT_REG_VERDICT,
+       NFT_REG_1,
+       NFT_REG_2,
+       NFT_REG_3,
+       NFT_REG_4,
+       __NFT_REG_MAX
+};
+#define NFT_REG_MAX    (__NFT_REG_MAX - 1)
+
+/**
+ * enum nft_verdicts - nf_tables internal verdicts
+ *
+ * @NFT_CONTINUE: continue evaluation of the current rule
+ * @NFT_BREAK: terminate evaluation of the current rule
+ * @NFT_JUMP: push the current chain on the jump stack and jump to a chain
+ * @NFT_GOTO: jump to a chain without pushing the current chain on the jump stack
+ * @NFT_RETURN: return to the topmost chain on the jump stack
+ *
+ * The nf_tables verdicts share their numeric space with the netfilter verdicts.
+ */
+enum nft_verdicts {
+       NFT_CONTINUE    = -1,
+       NFT_BREAK       = -2,
+       NFT_JUMP        = -3,
+       NFT_GOTO        = -4,
+       NFT_RETURN      = -5,
+};
+
+/**
+ * enum nf_tables_msg_types - nf_tables netlink message types
+ *
+ * @NFT_MSG_NEWTABLE: create a new table (enum nft_table_attributes)
+ * @NFT_MSG_GETTABLE: get a table (enum nft_table_attributes)
+ * @NFT_MSG_DELTABLE: delete a table (enum nft_table_attributes)
+ * @NFT_MSG_NEWCHAIN: create a new chain (enum nft_chain_attributes)
+ * @NFT_MSG_GETCHAIN: get a chain (enum nft_chain_attributes)
+ * @NFT_MSG_DELCHAIN: delete a chain (enum nft_chain_attributes)
+ * @NFT_MSG_NEWRULE: create a new rule (enum nft_rule_attributes)
+ * @NFT_MSG_GETRULE: get a rule (enum nft_rule_attributes)
+ * @NFT_MSG_DELRULE: delete a rule (enum nft_rule_attributes)
+ * @NFT_MSG_NEWSET: create a new set (enum nft_set_attributes)
+ * @NFT_MSG_GETSET: get a set (enum nft_set_attributes)
+ * @NFT_MSG_DELSET: delete a set (enum nft_set_attributes)
+ * @NFT_MSG_NEWSETELEM: create a new set element (enum nft_set_elem_attributes)
+ * @NFT_MSG_GETSETELEM: get a set element (enum nft_set_elem_attributes)
+ * @NFT_MSG_DELSETELEM: delete a set element (enum nft_set_elem_attributes)
+ */
+enum nf_tables_msg_types {
+       NFT_MSG_NEWTABLE,
+       NFT_MSG_GETTABLE,
+       NFT_MSG_DELTABLE,
+       NFT_MSG_NEWCHAIN,
+       NFT_MSG_GETCHAIN,
+       NFT_MSG_DELCHAIN,
+       NFT_MSG_NEWRULE,
+       NFT_MSG_GETRULE,
+       NFT_MSG_DELRULE,
+       NFT_MSG_NEWSET,
+       NFT_MSG_GETSET,
+       NFT_MSG_DELSET,
+       NFT_MSG_NEWSETELEM,
+       NFT_MSG_GETSETELEM,
+       NFT_MSG_DELSETELEM,
+       NFT_MSG_MAX,
+};
+
+/**
+ * enum nft_list_attributes - nf_tables generic list netlink attributes
+ *
+ * @NFTA_LIST_ELEM: list element (NLA_NESTED)
+ */
+enum nft_list_attributes {
+       NFTA_LIST_UNPEC,
+       NFTA_LIST_ELEM,
+       __NFTA_LIST_MAX
+};
+#define NFTA_LIST_MAX          (__NFTA_LIST_MAX - 1)
+
+/**
+ * enum nft_hook_attributes - nf_tables netfilter hook netlink attributes
+ *
+ * @NFTA_HOOK_HOOKNUM: netfilter hook number (NLA_U32)
+ * @NFTA_HOOK_PRIORITY: netfilter hook priority (NLA_U32)
+ */
+enum nft_hook_attributes {
+       NFTA_HOOK_UNSPEC,
+       NFTA_HOOK_HOOKNUM,
+       NFTA_HOOK_PRIORITY,
+       __NFTA_HOOK_MAX
+};
+#define NFTA_HOOK_MAX          (__NFTA_HOOK_MAX - 1)
+
+/**
+ * enum nft_table_flags - nf_tables table flags
+ *
+ * @NFT_TABLE_F_DORMANT: this table is not active
+ */
+enum nft_table_flags {
+       NFT_TABLE_F_DORMANT     = 0x1,
+};
+
+/**
+ * enum nft_table_attributes - nf_tables table netlink attributes
+ *
+ * @NFTA_TABLE_NAME: name of the table (NLA_STRING)
+ * @NFTA_TABLE_FLAGS: bitmask of enum nft_table_flags (NLA_U32)
+ */
+enum nft_table_attributes {
+       NFTA_TABLE_UNSPEC,
+       NFTA_TABLE_NAME,
+       NFTA_TABLE_FLAGS,
+       __NFTA_TABLE_MAX
+};
+#define NFTA_TABLE_MAX         (__NFTA_TABLE_MAX - 1)
+
+/**
+ * enum nft_chain_attributes - nf_tables chain netlink attributes
+ *
+ * @NFTA_CHAIN_TABLE: name of the table containing the chain (NLA_STRING)
+ * @NFTA_CHAIN_HANDLE: numeric handle of the chain (NLA_U64)
+ * @NFTA_CHAIN_NAME: name of the chain (NLA_STRING)
+ * @NFTA_CHAIN_HOOK: hook specification for basechains (NLA_NESTED: nft_hook_attributes)
+ * @NFTA_CHAIN_POLICY: numeric policy of the chain (NLA_U32)
+ * @NFTA_CHAIN_USE: number of references to this chain (NLA_U32)
+ * @NFTA_CHAIN_TYPE: type name of the string (NLA_NUL_STRING)
+ * @NFTA_CHAIN_COUNTERS: counter specification of the chain (NLA_NESTED: nft_counter_attributes)
+ */
+enum nft_chain_attributes {
+       NFTA_CHAIN_UNSPEC,
+       NFTA_CHAIN_TABLE,
+       NFTA_CHAIN_HANDLE,
+       NFTA_CHAIN_NAME,
+       NFTA_CHAIN_HOOK,
+       NFTA_CHAIN_POLICY,
+       NFTA_CHAIN_USE,
+       NFTA_CHAIN_TYPE,
+       NFTA_CHAIN_COUNTERS,
+       __NFTA_CHAIN_MAX
+};
+#define NFTA_CHAIN_MAX         (__NFTA_CHAIN_MAX - 1)
+
+/**
+ * enum nft_rule_attributes - nf_tables rule netlink attributes
+ *
+ * @NFTA_RULE_TABLE: name of the table containing the rule (NLA_STRING)
+ * @NFTA_RULE_CHAIN: name of the chain containing the rule (NLA_STRING)
+ * @NFTA_RULE_HANDLE: numeric handle of the rule (NLA_U64)
+ * @NFTA_RULE_EXPRESSIONS: list of expressions (NLA_NESTED: nft_expr_attributes)
+ * @NFTA_RULE_COMPAT: compatibility specifications of the rule (NLA_NESTED: nft_rule_compat_attributes)
+ * @NFTA_RULE_POSITION: numeric handle of the previous rule (NLA_U64)
+ */
+enum nft_rule_attributes {
+       NFTA_RULE_UNSPEC,
+       NFTA_RULE_TABLE,
+       NFTA_RULE_CHAIN,
+       NFTA_RULE_HANDLE,
+       NFTA_RULE_EXPRESSIONS,
+       NFTA_RULE_COMPAT,
+       NFTA_RULE_POSITION,
+       __NFTA_RULE_MAX
+};
+#define NFTA_RULE_MAX          (__NFTA_RULE_MAX - 1)
+
+/**
+ * enum nft_rule_compat_flags - nf_tables rule compat flags
+ *
+ * @NFT_RULE_COMPAT_F_INV: invert the check result
+ */
+enum nft_rule_compat_flags {
+       NFT_RULE_COMPAT_F_INV   = (1 << 1),
+       NFT_RULE_COMPAT_F_MASK  = NFT_RULE_COMPAT_F_INV,
+};
+
+/**
+ * enum nft_rule_compat_attributes - nf_tables rule compat attributes
+ *
+ * @NFTA_RULE_COMPAT_PROTO: numerice value of handled protocol (NLA_U32)
+ * @NFTA_RULE_COMPAT_FLAGS: bitmask of enum nft_rule_compat_flags (NLA_U32)
+ */
+enum nft_rule_compat_attributes {
+       NFTA_RULE_COMPAT_UNSPEC,
+       NFTA_RULE_COMPAT_PROTO,
+       NFTA_RULE_COMPAT_FLAGS,
+       __NFTA_RULE_COMPAT_MAX
+};
+#define NFTA_RULE_COMPAT_MAX   (__NFTA_RULE_COMPAT_MAX - 1)
+
+/**
+ * enum nft_set_flags - nf_tables set flags
+ *
+ * @NFT_SET_ANONYMOUS: name allocation, automatic cleanup on unlink
+ * @NFT_SET_CONSTANT: set contents may not change while bound
+ * @NFT_SET_INTERVAL: set contains intervals
+ * @NFT_SET_MAP: set is used as a dictionary
+ */
+enum nft_set_flags {
+       NFT_SET_ANONYMOUS               = 0x1,
+       NFT_SET_CONSTANT                = 0x2,
+       NFT_SET_INTERVAL                = 0x4,
+       NFT_SET_MAP                     = 0x8,
+};
+
+/**
+ * enum nft_set_attributes - nf_tables set netlink attributes
+ *
+ * @NFTA_SET_TABLE: table name (NLA_STRING)
+ * @NFTA_SET_NAME: set name (NLA_STRING)
+ * @NFTA_SET_FLAGS: bitmask of enum nft_set_flags (NLA_U32)
+ * @NFTA_SET_KEY_TYPE: key data type, informational purpose only (NLA_U32)
+ * @NFTA_SET_KEY_LEN: key data length (NLA_U32)
+ * @NFTA_SET_DATA_TYPE: mapping data type (NLA_U32)
+ * @NFTA_SET_DATA_LEN: mapping data length (NLA_U32)
+ */
+enum nft_set_attributes {
+       NFTA_SET_UNSPEC,
+       NFTA_SET_TABLE,
+       NFTA_SET_NAME,
+       NFTA_SET_FLAGS,
+       NFTA_SET_KEY_TYPE,
+       NFTA_SET_KEY_LEN,
+       NFTA_SET_DATA_TYPE,
+       NFTA_SET_DATA_LEN,
+       __NFTA_SET_MAX
+};
+#define NFTA_SET_MAX           (__NFTA_SET_MAX - 1)
+
+/**
+ * enum nft_set_elem_flags - nf_tables set element flags
+ *
+ * @NFT_SET_ELEM_INTERVAL_END: element ends the previous interval
+ */
+enum nft_set_elem_flags {
+       NFT_SET_ELEM_INTERVAL_END       = 0x1,
+};
+
+/**
+ * enum nft_set_elem_attributes - nf_tables set element netlink attributes
+ *
+ * @NFTA_SET_ELEM_KEY: key value (NLA_NESTED: nft_data)
+ * @NFTA_SET_ELEM_DATA: data value of mapping (NLA_NESTED: nft_data_attributes)
+ * @NFTA_SET_ELEM_FLAGS: bitmask of nft_set_elem_flags (NLA_U32)
+ */
+enum nft_set_elem_attributes {
+       NFTA_SET_ELEM_UNSPEC,
+       NFTA_SET_ELEM_KEY,
+       NFTA_SET_ELEM_DATA,
+       NFTA_SET_ELEM_FLAGS,
+       __NFTA_SET_ELEM_MAX
+};
+#define NFTA_SET_ELEM_MAX      (__NFTA_SET_ELEM_MAX - 1)
+
+/**
+ * enum nft_set_elem_list_attributes - nf_tables set element list netlink attributes
+ *
+ * @NFTA_SET_ELEM_LIST_TABLE: table of the set to be changed (NLA_STRING)
+ * @NFTA_SET_ELEM_LIST_SET: name of the set to be changed (NLA_STRING)
+ * @NFTA_SET_ELEM_LIST_ELEMENTS: list of set elements (NLA_NESTED: nft_set_elem_attributes)
+ */
+enum nft_set_elem_list_attributes {
+       NFTA_SET_ELEM_LIST_UNSPEC,
+       NFTA_SET_ELEM_LIST_TABLE,
+       NFTA_SET_ELEM_LIST_SET,
+       NFTA_SET_ELEM_LIST_ELEMENTS,
+       __NFTA_SET_ELEM_LIST_MAX
+};
+#define NFTA_SET_ELEM_LIST_MAX (__NFTA_SET_ELEM_LIST_MAX - 1)
+
+/**
+ * enum nft_data_types - nf_tables data types
+ *
+ * @NFT_DATA_VALUE: generic data
+ * @NFT_DATA_VERDICT: netfilter verdict
+ *
+ * The type of data is usually determined by the kernel directly and is not
+ * explicitly specified by userspace. The only difference are sets, where
+ * userspace specifies the key and mapping data types.
+ *
+ * The values 0xffffff00-0xffffffff are reserved for internally used types.
+ * The remaining range can be freely used by userspace to encode types, all
+ * values are equivalent to NFT_DATA_VALUE.
+ */
+enum nft_data_types {
+       NFT_DATA_VALUE,
+       NFT_DATA_VERDICT        = 0xffffff00U,
+};
+
+#define NFT_DATA_RESERVED_MASK 0xffffff00U
+
+/**
+ * enum nft_data_attributes - nf_tables data netlink attributes
+ *
+ * @NFTA_DATA_VALUE: generic data (NLA_BINARY)
+ * @NFTA_DATA_VERDICT: nf_tables verdict (NLA_NESTED: nft_verdict_attributes)
+ */
+enum nft_data_attributes {
+       NFTA_DATA_UNSPEC,
+       NFTA_DATA_VALUE,
+       NFTA_DATA_VERDICT,
+       __NFTA_DATA_MAX
+};
+#define NFTA_DATA_MAX          (__NFTA_DATA_MAX - 1)
+
+/**
+ * enum nft_verdict_attributes - nf_tables verdict netlink attributes
+ *
+ * @NFTA_VERDICT_CODE: nf_tables verdict (NLA_U32: enum nft_verdicts)
+ * @NFTA_VERDICT_CHAIN: jump target chain name (NLA_STRING)
+ */
+enum nft_verdict_attributes {
+       NFTA_VERDICT_UNSPEC,
+       NFTA_VERDICT_CODE,
+       NFTA_VERDICT_CHAIN,
+       __NFTA_VERDICT_MAX
+};
+#define NFTA_VERDICT_MAX       (__NFTA_VERDICT_MAX - 1)
+
+/**
+ * enum nft_expr_attributes - nf_tables expression netlink attributes
+ *
+ * @NFTA_EXPR_NAME: name of the expression type (NLA_STRING)
+ * @NFTA_EXPR_DATA: type specific data (NLA_NESTED)
+ */
+enum nft_expr_attributes {
+       NFTA_EXPR_UNSPEC,
+       NFTA_EXPR_NAME,
+       NFTA_EXPR_DATA,
+       __NFTA_EXPR_MAX
+};
+#define NFTA_EXPR_MAX          (__NFTA_EXPR_MAX - 1)
+
+/**
+ * enum nft_immediate_attributes - nf_tables immediate expression netlink attributes
+ *
+ * @NFTA_IMMEDIATE_DREG: destination register to load data into (NLA_U32)
+ * @NFTA_IMMEDIATE_DATA: data to load (NLA_NESTED: nft_data_attributes)
+ */
+enum nft_immediate_attributes {
+       NFTA_IMMEDIATE_UNSPEC,
+       NFTA_IMMEDIATE_DREG,
+       NFTA_IMMEDIATE_DATA,
+       __NFTA_IMMEDIATE_MAX
+};
+#define NFTA_IMMEDIATE_MAX     (__NFTA_IMMEDIATE_MAX - 1)
+
+/**
+ * enum nft_bitwise_attributes - nf_tables bitwise expression netlink attributes
+ *
+ * @NFTA_BITWISE_SREG: source register (NLA_U32: nft_registers)
+ * @NFTA_BITWISE_DREG: destination register (NLA_U32: nft_registers)
+ * @NFTA_BITWISE_LEN: length of operands (NLA_U32)
+ * @NFTA_BITWISE_MASK: mask value (NLA_NESTED: nft_data_attributes)
+ * @NFTA_BITWISE_XOR: xor value (NLA_NESTED: nft_data_attributes)
+ *
+ * The bitwise expression performs the following operation:
+ *
+ * dreg = (sreg & mask) ^ xor
+ *
+ * which allow to express all bitwise operations:
+ *
+ *             mask    xor
+ * NOT:                1       1
+ * OR:         0       x
+ * XOR:                1       x
+ * AND:                x       0
+ */
+enum nft_bitwise_attributes {
+       NFTA_BITWISE_UNSPEC,
+       NFTA_BITWISE_SREG,
+       NFTA_BITWISE_DREG,
+       NFTA_BITWISE_LEN,
+       NFTA_BITWISE_MASK,
+       NFTA_BITWISE_XOR,
+       __NFTA_BITWISE_MAX
+};
+#define NFTA_BITWISE_MAX       (__NFTA_BITWISE_MAX - 1)
+
+/**
+ * enum nft_byteorder_ops - nf_tables byteorder operators
+ *
+ * @NFT_BYTEORDER_NTOH: network to host operator
+ * @NFT_BYTEORDER_HTON: host to network opertaor
+ */
+enum nft_byteorder_ops {
+       NFT_BYTEORDER_NTOH,
+       NFT_BYTEORDER_HTON,
+};
+
+/**
+ * enum nft_byteorder_attributes - nf_tables byteorder expression netlink attributes
+ *
+ * @NFTA_BYTEORDER_SREG: source register (NLA_U32: nft_registers)
+ * @NFTA_BYTEORDER_DREG: destination register (NLA_U32: nft_registers)
+ * @NFTA_BYTEORDER_OP: operator (NLA_U32: enum nft_byteorder_ops)
+ * @NFTA_BYTEORDER_LEN: length of the data (NLA_U32)
+ * @NFTA_BYTEORDER_SIZE: data size in bytes (NLA_U32: 2 or 4)
+ */
+enum nft_byteorder_attributes {
+       NFTA_BYTEORDER_UNSPEC,
+       NFTA_BYTEORDER_SREG,
+       NFTA_BYTEORDER_DREG,
+       NFTA_BYTEORDER_OP,
+       NFTA_BYTEORDER_LEN,
+       NFTA_BYTEORDER_SIZE,
+       __NFTA_BYTEORDER_MAX
+};
+#define NFTA_BYTEORDER_MAX     (__NFTA_BYTEORDER_MAX - 1)
+
+/**
+ * enum nft_cmp_ops - nf_tables relational operator
+ *
+ * @NFT_CMP_EQ: equal
+ * @NFT_CMP_NEQ: not equal
+ * @NFT_CMP_LT: less than
+ * @NFT_CMP_LTE: less than or equal to
+ * @NFT_CMP_GT: greater than
+ * @NFT_CMP_GTE: greater than or equal to
+ */
+enum nft_cmp_ops {
+       NFT_CMP_EQ,
+       NFT_CMP_NEQ,
+       NFT_CMP_LT,
+       NFT_CMP_LTE,
+       NFT_CMP_GT,
+       NFT_CMP_GTE,
+};
+
+/**
+ * enum nft_cmp_attributes - nf_tables cmp expression netlink attributes
+ *
+ * @NFTA_CMP_SREG: source register of data to compare (NLA_U32: nft_registers)
+ * @NFTA_CMP_OP: cmp operation (NLA_U32: nft_cmp_ops)
+ * @NFTA_CMP_DATA: data to compare against (NLA_NESTED: nft_data_attributes)
+ */
+enum nft_cmp_attributes {
+       NFTA_CMP_UNSPEC,
+       NFTA_CMP_SREG,
+       NFTA_CMP_OP,
+       NFTA_CMP_DATA,
+       __NFTA_CMP_MAX
+};
+#define NFTA_CMP_MAX           (__NFTA_CMP_MAX - 1)
+
+/**
+ * enum nft_lookup_attributes - nf_tables set lookup expression netlink attributes
+ *
+ * @NFTA_LOOKUP_SET: name of the set where to look for (NLA_STRING)
+ * @NFTA_LOOKUP_SREG: source register of the data to look for (NLA_U32: nft_registers)
+ * @NFTA_LOOKUP_DREG: destination register (NLA_U32: nft_registers)
+ */
+enum nft_lookup_attributes {
+       NFTA_LOOKUP_UNSPEC,
+       NFTA_LOOKUP_SET,
+       NFTA_LOOKUP_SREG,
+       NFTA_LOOKUP_DREG,
+       __NFTA_LOOKUP_MAX
+};
+#define NFTA_LOOKUP_MAX                (__NFTA_LOOKUP_MAX - 1)
+
+/**
+ * enum nft_payload_bases - nf_tables payload expression offset bases
+ *
+ * @NFT_PAYLOAD_LL_HEADER: link layer header
+ * @NFT_PAYLOAD_NETWORK_HEADER: network header
+ * @NFT_PAYLOAD_TRANSPORT_HEADER: transport header
+ */
+enum nft_payload_bases {
+       NFT_PAYLOAD_LL_HEADER,
+       NFT_PAYLOAD_NETWORK_HEADER,
+       NFT_PAYLOAD_TRANSPORT_HEADER,
+};
+
+/**
+ * enum nft_payload_attributes - nf_tables payload expression netlink attributes
+ *
+ * @NFTA_PAYLOAD_DREG: destination register to load data into (NLA_U32: nft_registers)
+ * @NFTA_PAYLOAD_BASE: payload base (NLA_U32: nft_payload_bases)
+ * @NFTA_PAYLOAD_OFFSET: payload offset relative to base (NLA_U32)
+ * @NFTA_PAYLOAD_LEN: payload length (NLA_U32)
+ */
+enum nft_payload_attributes {
+       NFTA_PAYLOAD_UNSPEC,
+       NFTA_PAYLOAD_DREG,
+       NFTA_PAYLOAD_BASE,
+       NFTA_PAYLOAD_OFFSET,
+       NFTA_PAYLOAD_LEN,
+       __NFTA_PAYLOAD_MAX
+};
+#define NFTA_PAYLOAD_MAX       (__NFTA_PAYLOAD_MAX - 1)
+
+/**
+ * enum nft_exthdr_attributes - nf_tables IPv6 extension header expression netlink attributes
+ *
+ * @NFTA_EXTHDR_DREG: destination register (NLA_U32: nft_registers)
+ * @NFTA_EXTHDR_TYPE: extension header type (NLA_U8)
+ * @NFTA_EXTHDR_OFFSET: extension header offset (NLA_U32)
+ * @NFTA_EXTHDR_LEN: extension header length (NLA_U32)
+ */
+enum nft_exthdr_attributes {
+       NFTA_EXTHDR_UNSPEC,
+       NFTA_EXTHDR_DREG,
+       NFTA_EXTHDR_TYPE,
+       NFTA_EXTHDR_OFFSET,
+       NFTA_EXTHDR_LEN,
+       __NFTA_EXTHDR_MAX
+};
+#define NFTA_EXTHDR_MAX                (__NFTA_EXTHDR_MAX - 1)
+
+/**
+ * enum nft_meta_keys - nf_tables meta expression keys
+ *
+ * @NFT_META_LEN: packet length (skb->len)
+ * @NFT_META_PROTOCOL: packet ethertype protocol (skb->protocol), invalid in OUTPUT
+ * @NFT_META_PRIORITY: packet priority (skb->priority)
+ * @NFT_META_MARK: packet mark (skb->mark)
+ * @NFT_META_IIF: packet input interface index (dev->ifindex)
+ * @NFT_META_OIF: packet output interface index (dev->ifindex)
+ * @NFT_META_IIFNAME: packet input interface name (dev->name)
+ * @NFT_META_OIFNAME: packet output interface name (dev->name)
+ * @NFT_META_IIFTYPE: packet input interface type (dev->type)
+ * @NFT_META_OIFTYPE: packet output interface type (dev->type)
+ * @NFT_META_SKUID: originating socket UID (fsuid)
+ * @NFT_META_SKGID: originating socket GID (fsgid)
+ * @NFT_META_NFTRACE: packet nftrace bit
+ * @NFT_META_RTCLASSID: realm value of packet's route (skb->dst->tclassid)
+ * @NFT_META_SECMARK: packet secmark (skb->secmark)
+ */
+enum nft_meta_keys {
+       NFT_META_LEN,
+       NFT_META_PROTOCOL,
+       NFT_META_PRIORITY,
+       NFT_META_MARK,
+       NFT_META_IIF,
+       NFT_META_OIF,
+       NFT_META_IIFNAME,
+       NFT_META_OIFNAME,
+       NFT_META_IIFTYPE,
+       NFT_META_OIFTYPE,
+       NFT_META_SKUID,
+       NFT_META_SKGID,
+       NFT_META_NFTRACE,
+       NFT_META_RTCLASSID,
+       NFT_META_SECMARK,
+};
+
+/**
+ * enum nft_meta_attributes - nf_tables meta expression netlink attributes
+ *
+ * @NFTA_META_DREG: destination register (NLA_U32)
+ * @NFTA_META_KEY: meta data item to load (NLA_U32: nft_meta_keys)
+ */
+enum nft_meta_attributes {
+       NFTA_META_UNSPEC,
+       NFTA_META_DREG,
+       NFTA_META_KEY,
+       __NFTA_META_MAX
+};
+#define NFTA_META_MAX          (__NFTA_META_MAX - 1)
+
+/**
+ * enum nft_ct_keys - nf_tables ct expression keys
+ *
+ * @NFT_CT_STATE: conntrack state (bitmask of enum ip_conntrack_info)
+ * @NFT_CT_DIRECTION: conntrack direction (enum ip_conntrack_dir)
+ * @NFT_CT_STATUS: conntrack status (bitmask of enum ip_conntrack_status)
+ * @NFT_CT_MARK: conntrack mark value
+ * @NFT_CT_SECMARK: conntrack secmark value
+ * @NFT_CT_EXPIRATION: relative conntrack expiration time in ms
+ * @NFT_CT_HELPER: connection tracking helper assigned to conntrack
+ * @NFT_CT_L3PROTOCOL: conntrack layer 3 protocol
+ * @NFT_CT_SRC: conntrack layer 3 protocol source (IPv4/IPv6 address)
+ * @NFT_CT_DST: conntrack layer 3 protocol destination (IPv4/IPv6 address)
+ * @NFT_CT_PROTOCOL: conntrack layer 4 protocol
+ * @NFT_CT_PROTO_SRC: conntrack layer 4 protocol source
+ * @NFT_CT_PROTO_DST: conntrack layer 4 protocol destination
+ */
+enum nft_ct_keys {
+       NFT_CT_STATE,
+       NFT_CT_DIRECTION,
+       NFT_CT_STATUS,
+       NFT_CT_MARK,
+       NFT_CT_SECMARK,
+       NFT_CT_EXPIRATION,
+       NFT_CT_HELPER,
+       NFT_CT_L3PROTOCOL,
+       NFT_CT_SRC,
+       NFT_CT_DST,
+       NFT_CT_PROTOCOL,
+       NFT_CT_PROTO_SRC,
+       NFT_CT_PROTO_DST,
+};
+
+/**
+ * enum nft_ct_attributes - nf_tables ct expression netlink attributes
+ *
+ * @NFTA_CT_DREG: destination register (NLA_U32)
+ * @NFTA_CT_KEY: conntrack data item to load (NLA_U32: nft_ct_keys)
+ * @NFTA_CT_DIRECTION: direction in case of directional keys (NLA_U8)
+ */
+enum nft_ct_attributes {
+       NFTA_CT_UNSPEC,
+       NFTA_CT_DREG,
+       NFTA_CT_KEY,
+       NFTA_CT_DIRECTION,
+       __NFTA_CT_MAX
+};
+#define NFTA_CT_MAX            (__NFTA_CT_MAX - 1)
+
+/**
+ * enum nft_limit_attributes - nf_tables limit expression netlink attributes
+ *
+ * @NFTA_LIMIT_RATE: refill rate (NLA_U64)
+ * @NFTA_LIMIT_UNIT: refill unit (NLA_U64)
+ */
+enum nft_limit_attributes {
+       NFTA_LIMIT_UNSPEC,
+       NFTA_LIMIT_RATE,
+       NFTA_LIMIT_UNIT,
+       __NFTA_LIMIT_MAX
+};
+#define NFTA_LIMIT_MAX         (__NFTA_LIMIT_MAX - 1)
+
+/**
+ * enum nft_counter_attributes - nf_tables counter expression netlink attributes
+ *
+ * @NFTA_COUNTER_BYTES: number of bytes (NLA_U64)
+ * @NFTA_COUNTER_PACKETS: number of packets (NLA_U64)
+ */
+enum nft_counter_attributes {
+       NFTA_COUNTER_UNSPEC,
+       NFTA_COUNTER_BYTES,
+       NFTA_COUNTER_PACKETS,
+       __NFTA_COUNTER_MAX
+};
+#define NFTA_COUNTER_MAX       (__NFTA_COUNTER_MAX - 1)
+
+/**
+ * enum nft_log_attributes - nf_tables log expression netlink attributes
+ *
+ * @NFTA_LOG_GROUP: netlink group to send messages to (NLA_U32)
+ * @NFTA_LOG_PREFIX: prefix to prepend to log messages (NLA_STRING)
+ * @NFTA_LOG_SNAPLEN: length of payload to include in netlink message (NLA_U32)
+ * @NFTA_LOG_QTHRESHOLD: queue threshold (NLA_U32)
+ */
+enum nft_log_attributes {
+       NFTA_LOG_UNSPEC,
+       NFTA_LOG_GROUP,
+       NFTA_LOG_PREFIX,
+       NFTA_LOG_SNAPLEN,
+       NFTA_LOG_QTHRESHOLD,
+       __NFTA_LOG_MAX
+};
+#define NFTA_LOG_MAX           (__NFTA_LOG_MAX - 1)
+
+/**
+ * enum nft_reject_types - nf_tables reject expression reject types
+ *
+ * @NFT_REJECT_ICMP_UNREACH: reject using ICMP unreachable
+ * @NFT_REJECT_TCP_RST: reject using TCP RST
+ */
+enum nft_reject_types {
+       NFT_REJECT_ICMP_UNREACH,
+       NFT_REJECT_TCP_RST,
+};
+
+/**
+ * enum nft_reject_attributes - nf_tables reject expression netlink attributes
+ *
+ * @NFTA_REJECT_TYPE: packet type to use (NLA_U32: nft_reject_types)
+ * @NFTA_REJECT_ICMP_CODE: ICMP code to use (NLA_U8)
+ */
+enum nft_reject_attributes {
+       NFTA_REJECT_UNSPEC,
+       NFTA_REJECT_TYPE,
+       NFTA_REJECT_ICMP_CODE,
+       __NFTA_REJECT_MAX
+};
+#define NFTA_REJECT_MAX                (__NFTA_REJECT_MAX - 1)
+
+/**
+ * enum nft_nat_types - nf_tables nat expression NAT types
+ *
+ * @NFT_NAT_SNAT: source NAT
+ * @NFT_NAT_DNAT: destination NAT
+ */
+enum nft_nat_types {
+       NFT_NAT_SNAT,
+       NFT_NAT_DNAT,
+};
+
+/**
+ * enum nft_nat_attributes - nf_tables nat expression netlink attributes
+ *
+ * @NFTA_NAT_TYPE: NAT type (NLA_U32: nft_nat_types)
+ * @NFTA_NAT_FAMILY: NAT family (NLA_U32)
+ * @NFTA_NAT_REG_ADDR_MIN: source register of address range start (NLA_U32: nft_registers)
+ * @NFTA_NAT_REG_ADDR_MAX: source register of address range end (NLA_U32: nft_registers)
+ * @NFTA_NAT_REG_PROTO_MIN: source register of proto range start (NLA_U32: nft_registers)
+ * @NFTA_NAT_REG_PROTO_MAX: source register of proto range end (NLA_U32: nft_registers)
+ */
+enum nft_nat_attributes {
+       NFTA_NAT_UNSPEC,
+       NFTA_NAT_TYPE,
+       NFTA_NAT_FAMILY,
+       NFTA_NAT_REG_ADDR_MIN,
+       NFTA_NAT_REG_ADDR_MAX,
+       NFTA_NAT_REG_PROTO_MIN,
+       NFTA_NAT_REG_PROTO_MAX,
+       __NFTA_NAT_MAX
+};
+#define NFTA_NAT_MAX           (__NFTA_NAT_MAX - 1)
+
+#endif /* _LINUX_NF_TABLES_H */
diff --git a/include/uapi/linux/netfilter/nf_tables_compat.h b/include/uapi/linux/netfilter/nf_tables_compat.h
new file mode 100644 (file)
index 0000000..8310f5f
--- /dev/null
@@ -0,0 +1,38 @@
+#ifndef _NFT_COMPAT_NFNETLINK_H_
+#define _NFT_COMPAT_NFNETLINK_H_
+
+enum nft_target_attributes {
+       NFTA_TARGET_UNSPEC,
+       NFTA_TARGET_NAME,
+       NFTA_TARGET_REV,
+       NFTA_TARGET_INFO,
+       __NFTA_TARGET_MAX
+};
+#define NFTA_TARGET_MAX                (__NFTA_TARGET_MAX - 1)
+
+enum nft_match_attributes {
+       NFTA_MATCH_UNSPEC,
+       NFTA_MATCH_NAME,
+       NFTA_MATCH_REV,
+       NFTA_MATCH_INFO,
+       __NFTA_MATCH_MAX
+};
+#define NFTA_MATCH_MAX         (__NFTA_MATCH_MAX - 1)
+
+#define NFT_COMPAT_NAME_MAX    32
+
+enum {
+       NFNL_MSG_COMPAT_GET,
+       NFNL_MSG_COMPAT_MAX
+};
+
+enum {
+       NFTA_COMPAT_UNSPEC = 0,
+       NFTA_COMPAT_NAME,
+       NFTA_COMPAT_REV,
+       NFTA_COMPAT_TYPE,
+       __NFTA_COMPAT_MAX,
+};
+#define NFTA_COMPAT_MAX (__NFTA_COMPAT_MAX - 1)
+
+#endif
index 4a4efafad5f46664e1f60914eb52d8eb00805b84..596ddd45253c02b1f611c0655b649e651109485a 100644 (file)
@@ -18,6 +18,8 @@ enum nfnetlink_groups {
 #define NFNLGRP_CONNTRACK_EXP_UPDATE   NFNLGRP_CONNTRACK_EXP_UPDATE
        NFNLGRP_CONNTRACK_EXP_DESTROY,
 #define NFNLGRP_CONNTRACK_EXP_DESTROY  NFNLGRP_CONNTRACK_EXP_DESTROY
+       NFNLGRP_NFTABLES,
+#define NFNLGRP_NFTABLES                NFNLGRP_NFTABLES
        __NFNLGRP_MAX,
 };
 #define NFNLGRP_MAX    (__NFNLGRP_MAX - 1)
@@ -51,6 +53,12 @@ struct nfgenmsg {
 #define NFNL_SUBSYS_ACCT               7
 #define NFNL_SUBSYS_CTNETLINK_TIMEOUT  8
 #define NFNL_SUBSYS_CTHELPER           9
-#define NFNL_SUBSYS_COUNT              10
+#define NFNL_SUBSYS_NFTABLES           10
+#define NFNL_SUBSYS_NFT_COMPAT         11
+#define NFNL_SUBSYS_COUNT              12
+
+/* Reserved control nfnetlink messages */
+#define NFNL_MSG_BATCH_BEGIN           NLMSG_MIN_TYPE
+#define NFNL_MSG_BATCH_END             NLMSG_MIN_TYPE+1
 
 #endif /* _UAPI_NFNETLINK_H */
index a2810a7c5e3002d63c9651e25830431a39d51abb..1ab0b97b3a1e68336485b2679045024e86ff5444 100644 (file)
@@ -6,6 +6,8 @@ enum ctnl_timeout_msg_types {
        IPCTNL_MSG_TIMEOUT_NEW,
        IPCTNL_MSG_TIMEOUT_GET,
        IPCTNL_MSG_TIMEOUT_DELETE,
+       IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
+       IPCTNL_MSG_TIMEOUT_DEFAULT_GET,
 
        IPCTNL_MSG_TIMEOUT_MAX
 };
index 29bed72a4ac43e4bd141b87a8b5e9d410ab21cff..6ad6cc03ccd3aeec4074f187836efac62ed31a0f 100644 (file)
@@ -85,6 +85,7 @@
  *     a specific SE notifies us about the end of a transaction. The parameter
  *     for this event is the application ID (AID).
  * @NFC_CMD_GET_SE: Dump all discovered secure elements from an NFC controller.
+ * @NFC_CMD_SE_IO: Send/Receive APDUs to/from the selected secure element.
  */
 enum nfc_commands {
        NFC_CMD_UNSPEC,
@@ -114,6 +115,7 @@ enum nfc_commands {
        NFC_EVENT_SE_CONNECTIVITY,
        NFC_EVENT_SE_TRANSACTION,
        NFC_CMD_GET_SE,
+       NFC_CMD_SE_IO,
 /* private: internal use only */
        __NFC_CMD_AFTER_LAST
 };
@@ -147,6 +149,7 @@ enum nfc_commands {
  * @NFC_ATTR_SE_INDEX: Secure element index
  * @NFC_ATTR_SE_TYPE: Secure element type (UICC or EMBEDDED)
  * @NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS: Firmware download operation status
+ * @NFC_ATTR_APDU: Secure element APDU
  */
 enum nfc_attrs {
        NFC_ATTR_UNSPEC,
@@ -174,6 +177,7 @@ enum nfc_attrs {
        NFC_ATTR_SE_TYPE,
        NFC_ATTR_SE_AID,
        NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS,
+       NFC_ATTR_SE_APDU,
 /* private: internal use only */
        __NFC_ATTR_AFTER_LAST
 };
index baa7852468ef6eced239446f81b7b88b765e083e..0890556f779e489c16aa57490f3d4fef97e7e86e 100644 (file)
 #define PCI_MSIX_PBA           8       /* Pending Bit Array offset */
 #define  PCI_MSIX_PBA_BIR      0x00000007 /* BAR index */
 #define  PCI_MSIX_PBA_OFFSET   0xfffffff8 /* Offset into specified BAR */
-#define  PCI_MSIX_FLAGS_BIRMASK        (7 << 0)   /* deprecated */
 #define PCI_CAP_MSIX_SIZEOF    12      /* size of MSIX registers */
 
 /* MSI-X entry's format */
 #define  PCI_EXP_DEVCAP2_OBFF_MSG      0x00040000 /* New message signaling */
 #define  PCI_EXP_DEVCAP2_OBFF_WAKE     0x00080000 /* Re-use WAKE# for OBFF */
 #define PCI_EXP_DEVCTL2                40      /* Device Control 2 */
-#define  PCI_EXP_DEVCTL2_ARI           0x20    /* Alternative Routing-ID */
+#define  PCI_EXP_DEVCTL2_COMP_TIMEOUT  0x000f  /* Completion Timeout Value */
+#define  PCI_EXP_DEVCTL2_ARI           0x0020  /* Alternative Routing-ID */
 #define  PCI_EXP_DEVCTL2_IDO_REQ_EN    0x0100  /* Allow IDO for requests */
 #define  PCI_EXP_DEVCTL2_IDO_CMP_EN    0x0200  /* Allow IDO for completions */
 #define  PCI_EXP_DEVCTL2_LTR_EN                0x0400  /* Enable LTR mechanism */
index 9b829134d42243bdfc6816f9272b75ea82c9b934..f2624b549e6187155c2f6677d308b2bfc3e60acd 100644 (file)
@@ -357,6 +357,8 @@ enum {
        TCA_HTB_CTAB,
        TCA_HTB_RTAB,
        TCA_HTB_DIRECT_QLEN,
+       TCA_HTB_RATE64,
+       TCA_HTB_CEIL64,
        __TCA_HTB_MAX,
 };
 
index 0623ec4e728f09550fd8c5e0e464f4a771265bdb..56f121605c998c28e8a173067e94e24ec7d9c653 100644 (file)
@@ -1,5 +1,6 @@
 # UAPI Header export list
 header-y += tc_csum.h
+header-y += tc_defact.h
 header-y += tc_gact.h
 header-y += tc_ipt.h
 header-y += tc_mirred.h
similarity index 75%
rename from include/linux/tc_act/tc_defact.h
rename to include/uapi/linux/tc_act/tc_defact.h
index 6f65d07c7ce25cf343a2c80ecd6ba70562331bfc..17dddb40f74043e40ac09e36e15c925c5aa13df0 100644 (file)
@@ -6,7 +6,7 @@
 struct tc_defact {
        tc_gen;
 };
-                                                                                
+
 enum {
        TCA_DEF_UNSPEC,
        TCA_DEF_TM,
index 36eace03b2ac79229984b09254db9aed3e3b8d5f..e272ea060e3851959878932ef2dd61c87e9d185e 100644 (file)
@@ -94,10 +94,10 @@ struct mtd_write_req {
 #define MTD_RAM                        1
 #define MTD_ROM                        2
 #define MTD_NORFLASH           3
-#define MTD_NANDFLASH          4
+#define MTD_NANDFLASH          4       /* SLC NAND */
 #define MTD_DATAFLASH          6
 #define MTD_UBIVOLUME          7
-#define MTD_MLCNANDFLASH       8
+#define MTD_MLCNANDFLASH       8       /* MLC NAND (including TLC) */
 
 #define MTD_WRITEABLE          0x400   /* Device is writeable */
 #define MTD_BIT_WRITEABLE      0x800   /* Single bits can be flipped */
@@ -275,4 +275,9 @@ enum mtd_file_modes {
        MTD_FILE_MODE_RAW,
 };
 
+static inline int mtd_type_is_nand_user(const struct mtd_info_user *mtd)
+{
+       return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH;
+}
+
 #endif /* __MTD_ABI_H__ */
index 0b233c56b0e402ef75f691b5dcf6c6f1cc87a01b..e3ddd86c90a68610a20625729a99cf8effea8f31 100644 (file)
@@ -87,8 +87,10 @@ enum {
        IB_USER_VERBS_CMD_CLOSE_XRCD,
        IB_USER_VERBS_CMD_CREATE_XSRQ,
        IB_USER_VERBS_CMD_OPEN_QP,
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
        IB_USER_VERBS_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
        IB_USER_VERBS_CMD_DESTROY_FLOW
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 };
 
 /*
@@ -126,6 +128,7 @@ struct ib_uverbs_cmd_hdr {
        __u16 out_words;
 };
 
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
 struct ib_uverbs_cmd_hdr_ex {
        __u32 command;
        __u16 in_words;
@@ -134,6 +137,7 @@ struct ib_uverbs_cmd_hdr_ex {
        __u16 provider_out_words;
        __u32 cmd_hdr_reserved;
 };
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 
 struct ib_uverbs_get_context {
        __u64 response;
@@ -696,6 +700,7 @@ struct ib_uverbs_detach_mcast {
        __u64 driver_data[0];
 };
 
+#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
 struct ib_kern_eth_filter {
        __u8  dst_mac[6];
        __u8  src_mac[6];
@@ -780,6 +785,7 @@ struct ib_uverbs_destroy_flow  {
        __u32 comp_mask;
        __u32 flow_handle;
 };
+#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
 
 struct ib_uverbs_create_srq {
        __u64 response;
index 0f7d279ebde35fd1be21cc922def324eaacb240f..a7f27704f9807e7b14eadb9b08cf09c2c183a6a7 100644 (file)
@@ -5,6 +5,7 @@ header-y += asound_fm.h
 header-y += compress_offload.h
 header-y += compress_params.h
 header-y += emu10k1.h
+header-y += firewire.h
 header-y += hdsp.h
 header-y += hdspm.h
 header-y += sb16_csp.h
index 041203f20f6d1cb3cac42df78723356550729054..9fc6219d38482cd3dc8a34f8653d24dfeee2a792 100644 (file)
@@ -93,9 +93,10 @@ enum {
        SNDRV_HWDEP_IFACE_SB_RC,        /* SB Extigy/Audigy2NX remote control */
        SNDRV_HWDEP_IFACE_HDA,          /* HD-audio */
        SNDRV_HWDEP_IFACE_USB_STREAM,   /* direct access to usb stream */
+       SNDRV_HWDEP_IFACE_FW_DICE,      /* TC DICE FireWire device */
 
        /* Don't forget to change the following: */
-       SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_USB_STREAM
+       SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_FW_DICE
 };
 
 struct snd_hwdep_info {
diff --git a/include/uapi/sound/firewire.h b/include/uapi/sound/firewire.h
new file mode 100644 (file)
index 0000000..e86131c
--- /dev/null
@@ -0,0 +1,51 @@
+#ifndef UAPI_SOUND_FIREWIRE_H_INCLUDED
+#define UAPI_SOUND_FIREWIRE_H_INCLUDED
+
+#include <linux/ioctl.h>
+
+/* events can be read() from the hwdep device */
+
+#define SNDRV_FIREWIRE_EVENT_LOCK_STATUS       0x000010cc
+#define SNDRV_FIREWIRE_EVENT_DICE_NOTIFICATION 0xd1ce004e
+
+struct snd_firewire_event_common {
+       unsigned int type; /* SNDRV_FIREWIRE_EVENT_xxx */
+};
+
+struct snd_firewire_event_lock_status {
+       unsigned int type;
+       unsigned int status; /* 0/1 = unlocked/locked */
+};
+
+struct snd_firewire_event_dice_notification {
+       unsigned int type;
+       unsigned int notification; /* DICE-specific bits */
+};
+
+union snd_firewire_event {
+       struct snd_firewire_event_common            common;
+       struct snd_firewire_event_lock_status       lock_status;
+       struct snd_firewire_event_dice_notification dice_notification;
+};
+
+
+#define SNDRV_FIREWIRE_IOCTL_GET_INFO _IOR('H', 0xf8, struct snd_firewire_get_info)
+#define SNDRV_FIREWIRE_IOCTL_LOCK      _IO('H', 0xf9)
+#define SNDRV_FIREWIRE_IOCTL_UNLOCK    _IO('H', 0xfa)
+
+#define SNDRV_FIREWIRE_TYPE_DICE       1
+/* Fireworks, AV/C, RME, MOTU, ... */
+
+struct snd_firewire_get_info {
+       unsigned int type; /* SNDRV_FIREWIRE_TYPE_xxx */
+       unsigned int card; /* same as fw_cdev_get_info.card */
+       unsigned char guid[8];
+       char device_name[16]; /* device node in /dev */
+};
+
+/*
+ * SNDRV_FIREWIRE_IOCTL_LOCK prevents the driver from streaming.
+ * Returns -EBUSY if the driver is already streaming.
+ */
+
+#endif
index eb262e3324d2fca44da8c88bf3127fd4da65b118..c50061db609893a924160556203f7dcf73e24700 100644 (file)
  * node as before.
  */
 
+/*
+ * "feature-no-csum-offload" should be used to turn IPv4 TCP/UDP checksum
+ * offload off or on. If it is missing then the feature is assumed to be on.
+ * "feature-ipv6-csum-offload" should be used to turn IPv6 TCP/UDP checksum
+ * offload on or off. If it is missing then the feature is assumed to be off.
+ */
+
+/*
+ * "feature-gso-tcpv4" and "feature-gso-tcpv6" advertise the capability to
+ * handle large TCP packets (in IPv4 or IPv6 form respectively). Neither
+ * frontends nor backends are assumed to be capable unless the flags are
+ * present.
+ */
+
 /*
  * This is the 'wire' format for packets:
  *  Request 1: xen_netif_tx_request  -- XEN_NETTXF_* (any flags)
@@ -95,8 +109,10 @@ struct xen_netif_tx_request {
 #define _XEN_NETIF_EXTRA_FLAG_MORE     (0)
 #define  XEN_NETIF_EXTRA_FLAG_MORE     (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
 
-/* GSO types - only TCPv4 currently supported. */
+/* GSO types */
+#define XEN_NETIF_GSO_TYPE_NONE                (0)
 #define XEN_NETIF_GSO_TYPE_TCPV4       (1)
+#define XEN_NETIF_GSO_TYPE_TCPV6       (2)
 
 /*
  * This structure needs to fit within both netif_tx_request and
index 3ecd8a1178f102d832cdf3b4af0a908997ea648b..d9887456007a83b212eb41dabf95f7d8c781c6b4 100644 (file)
@@ -284,7 +284,7 @@ config AUDIT
 
 config AUDITSYSCALL
        bool "Enable system-call auditing support"
-       depends on AUDIT && (X86 || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || (ARM && AEABI && !OABI_COMPAT))
+       depends on AUDIT && (X86 || PARISC || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || (ARM && AEABI && !OABI_COMPAT))
        default y if SECURITY_SELINUX
        help
          Enable low-overhead system-call auditing infrastructure that
index af310afbef2867e987973cc341a7b418f7413068..edee99f735746f4bb3b6a8fb17794919469f4888 100644 (file)
@@ -76,6 +76,7 @@
 #include <linux/elevator.h>
 #include <linux/sched_clock.h>
 #include <linux/context_tracking.h>
+#include <linux/random.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -135,6 +136,13 @@ static char *static_command_line;
 static char *execute_command;
 static char *ramdisk_execute_command;
 
+/*
+ * Used to generate warnings if static_key manipulation functions are used
+ * before jump_label_init is called.
+ */
+bool static_key_initialized __read_mostly = false;
+EXPORT_SYMBOL_GPL(static_key_initialized);
+
 /*
  * If set, this is an indication to the drivers that reset the underlying
  * device before going ahead with the initialization otherwise driver might
@@ -780,6 +788,7 @@ static void __init do_basic_setup(void)
        do_ctors();
        usermodehelper_enable();
        do_initcalls();
+       random_int_secret_init();
 }
 
 static void __init do_pre_smp_initcalls(void)
index 8c4f59b0204a2821811a4b28fd48e0ce6163d987..db9d241af133d770cb0a95a22cacf257f79b1215 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1282,6 +1282,12 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
 
        sem_lock(sma, NULL, -1);
 
+       if (sma->sem_perm.deleted) {
+               sem_unlock(sma, -1);
+               rcu_read_unlock();
+               return -EIDRM;
+       }
+
        curr = &sma->sem_base[semnum];
 
        ipc_assert_locked_object(&sma->sem_perm);
@@ -1336,12 +1342,14 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                int i;
 
                sem_lock(sma, NULL, -1);
+               if (sma->sem_perm.deleted) {
+                       err = -EIDRM;
+                       goto out_unlock;
+               }
                if(nsems > SEMMSL_FAST) {
                        if (!ipc_rcu_getref(sma)) {
-                               sem_unlock(sma, -1);
-                               rcu_read_unlock();
                                err = -EIDRM;
-                               goto out_free;
+                               goto out_unlock;
                        }
                        sem_unlock(sma, -1);
                        rcu_read_unlock();
@@ -1354,10 +1362,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                        rcu_read_lock();
                        sem_lock_and_putref(sma);
                        if (sma->sem_perm.deleted) {
-                               sem_unlock(sma, -1);
-                               rcu_read_unlock();
                                err = -EIDRM;
-                               goto out_free;
+                               goto out_unlock;
                        }
                }
                for (i = 0; i < sma->sem_nsems; i++)
@@ -1375,8 +1381,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                struct sem_undo *un;
 
                if (!ipc_rcu_getref(sma)) {
-                       rcu_read_unlock();
-                       return -EIDRM;
+                       err = -EIDRM;
+                       goto out_rcu_wakeup;
                }
                rcu_read_unlock();
 
@@ -1404,10 +1410,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                rcu_read_lock();
                sem_lock_and_putref(sma);
                if (sma->sem_perm.deleted) {
-                       sem_unlock(sma, -1);
-                       rcu_read_unlock();
                        err = -EIDRM;
-                       goto out_free;
+                       goto out_unlock;
                }
 
                for (i = 0; i < nsems; i++)
@@ -1431,6 +1435,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                goto out_rcu_wakeup;
 
        sem_lock(sma, NULL, -1);
+       if (sma->sem_perm.deleted) {
+               err = -EIDRM;
+               goto out_unlock;
+       }
        curr = &sma->sem_base[semnum];
 
        switch (cmd) {
@@ -1836,6 +1844,10 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
        if (error)
                goto out_rcu_wakeup;
 
+       error = -EIDRM;
+       locknum = sem_lock(sma, sops, nsops);
+       if (sma->sem_perm.deleted)
+               goto out_unlock_free;
        /*
         * semid identifiers are not unique - find_alloc_undo may have
         * allocated an undo structure, it was invalidated by an RMID
@@ -1843,8 +1855,6 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
         * This case can be detected checking un->semid. The existence of
         * "un" itself is guaranteed by rcu.
         */
-       error = -EIDRM;
-       locknum = sem_lock(sma, sops, nsops);
        if (un && un->semid == -1)
                goto out_unlock_free;
 
@@ -2057,6 +2067,12 @@ void exit_sem(struct task_struct *tsk)
                }
 
                sem_lock(sma, NULL, -1);
+               /* exit_sem raced with IPC_RMID, nothing to do */
+               if (sma->sem_perm.deleted) {
+                       sem_unlock(sma, -1);
+                       rcu_read_unlock();
+                       continue;
+               }
                un = __lookup_undo(ulp, semid);
                if (un == NULL) {
                        /* exit_sem raced with IPC_RMID+semget() that created
index fdb8ae7407755f9b1c3725e2a7fd38a8a31c7f54..7684f41bce76a9b430f04b7e4e77810b48798a42 100644 (file)
  *            Pavel Emelianov <xemul@openvz.org>
  *
  * General sysv ipc locking scheme:
- *  when doing ipc id lookups, take the ids->rwsem
- *      rcu_read_lock()
- *          obtain the ipc object (kern_ipc_perm)
- *          perform security, capabilities, auditing and permission checks, etc.
- *          acquire the ipc lock (kern_ipc_perm.lock) throught ipc_lock_object()
- *             perform data updates (ie: SET, RMID, LOCK/UNLOCK commands)
+ *     rcu_read_lock()
+ *          obtain the ipc object (kern_ipc_perm) by looking up the id in an idr
+ *         tree.
+ *         - perform initial checks (capabilities, auditing and permission,
+ *           etc).
+ *         - perform read-only operations, such as STAT, INFO commands.
+ *           acquire the ipc lock (kern_ipc_perm.lock) through
+ *           ipc_lock_object()
+ *             - perform data updates, such as SET, RMID commands and
+ *               mechanism-specific operations (semop/semtimedop,
+ *               msgsnd/msgrcv, shmat/shmdt).
+ *         drop the ipc lock, through ipc_unlock_object().
+ *     rcu_read_unlock()
+ *
+ *  The ids->rwsem must be taken when:
+ *     - creating, removing and iterating the existing entries in ipc
+ *       identifier sets.
+ *     - iterating through files under /proc/sysvipc/
+ *
+ *  Note that sems have a special fast path that avoids kern_ipc_perm.lock -
+ *  see sem_lock().
  */
 
 #include <linux/mm.h>
index 2418b6e71a854e187573ec12746481ce863e721a..8bd9cfdc70d7bc020dbc19a05813e2e49443b8e8 100644 (file)
@@ -2039,7 +2039,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
 
                /* @tsk either already exited or can't exit until the end */
                if (tsk->flags & PF_EXITING)
-                       continue;
+                       goto next;
 
                /* as per above, nr_threads may decrease, but not increase. */
                BUG_ON(i >= group_size);
@@ -2047,7 +2047,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
                ent.cgrp = task_cgroup_from_root(tsk, root);
                /* nothing to do if this task is already in the cgroup */
                if (ent.cgrp == cgrp)
-                       continue;
+                       goto next;
                /*
                 * saying GFP_ATOMIC has no effect here because we did prealloc
                 * earlier, but it's good form to communicate our expectations.
@@ -2055,7 +2055,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
                retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
                BUG_ON(retval != 0);
                i++;
-
+       next:
                if (!threadgroup)
                        break;
        } while_each_thread(leader, tsk);
@@ -3188,11 +3188,9 @@ css_next_descendant_post(struct cgroup_subsys_state *pos,
 
        WARN_ON_ONCE(!rcu_read_lock_held());
 
-       /* if first iteration, visit the leftmost descendant */
-       if (!pos) {
-               next = css_leftmost_descendant(root);
-               return next != root ? next : NULL;
-       }
+       /* if first iteration, visit leftmost descendant which may be @root */
+       if (!pos)
+               return css_leftmost_descendant(root);
 
        /* if we visited @root, we're done */
        if (pos == root)
index cb4238e85b38e37886b27dd76351d2590a835924..d49a9d29334cc4d67c24bad9814221a0371a6350 100644 (file)
@@ -7234,15 +7234,15 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
                perf_remove_from_context(event);
                unaccount_event_cpu(event, src_cpu);
                put_ctx(src_ctx);
-               list_add(&event->event_entry, &events);
+               list_add(&event->migrate_entry, &events);
        }
        mutex_unlock(&src_ctx->mutex);
 
        synchronize_rcu();
 
        mutex_lock(&dst_ctx->mutex);
-       list_for_each_entry_safe(event, tmp, &events, event_entry) {
-               list_del(&event->event_entry);
+       list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
+               list_del(&event->migrate_entry);
                if (event->state >= PERF_EVENT_STATE_OFF)
                        event->state = PERF_EVENT_STATE_INACTIVE;
                account_event_cpu(event, dst_cpu);
index 297a9247a3b394f8538db111817e8a88d3c345ab..9019f15deab201127065e4d6987677fdcdd63676 100644 (file)
@@ -58,6 +58,7 @@ static void jump_label_update(struct static_key *key, int enable);
 
 void static_key_slow_inc(struct static_key *key)
 {
+       STATIC_KEY_CHECK_USE();
        if (atomic_inc_not_zero(&key->enabled))
                return;
 
@@ -103,12 +104,14 @@ static void jump_label_update_timeout(struct work_struct *work)
 
 void static_key_slow_dec(struct static_key *key)
 {
+       STATIC_KEY_CHECK_USE();
        __static_key_slow_dec(key, 0, NULL);
 }
 EXPORT_SYMBOL_GPL(static_key_slow_dec);
 
 void static_key_slow_dec_deferred(struct static_key_deferred *key)
 {
+       STATIC_KEY_CHECK_USE();
        __static_key_slow_dec(&key->key, key->timeout, &key->work);
 }
 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
@@ -116,6 +119,7 @@ EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
 void jump_label_rate_limit(struct static_key_deferred *key,
                unsigned long rl)
 {
+       STATIC_KEY_CHECK_USE();
        key->timeout = rl;
        INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
 }
@@ -212,6 +216,7 @@ void __init jump_label_init(void)
                key->next = NULL;
 #endif
        }
+       static_key_initialized = true;
        jump_label_unlock();
 }
 
index dc582749fa1386db25af23929db4fc20b6c7acce..5c9cf84017d52c046f4c9e8b3521171c8da7e402 100644 (file)
@@ -644,8 +644,6 @@ static int module_unload_init(struct module *mod)
 
        /* Hold reference count during initialization. */
        __this_cpu_write(mod->refptr->incs, 1);
-       /* Backwards compatibility macros put refcount during init. */
-       mod->waiter = current;
 
        return 0;
 }
@@ -771,16 +769,9 @@ static int __try_stop_module(void *_sref)
 
 static int try_stop_module(struct module *mod, int flags, int *forced)
 {
-       if (flags & O_NONBLOCK) {
-               struct stopref sref = { mod, flags, forced };
+       struct stopref sref = { mod, flags, forced };
 
-               return stop_machine(__try_stop_module, &sref, NULL);
-       } else {
-               /* We don't need to stop the machine for this. */
-               mod->state = MODULE_STATE_GOING;
-               synchronize_sched();
-               return 0;
-       }
+       return stop_machine(__try_stop_module, &sref, NULL);
 }
 
 unsigned long module_refcount(struct module *mod)
@@ -813,21 +804,6 @@ EXPORT_SYMBOL(module_refcount);
 /* This exists whether we can unload or not */
 static void free_module(struct module *mod);
 
-static void wait_for_zero_refcount(struct module *mod)
-{
-       /* Since we might sleep for some time, release the mutex first */
-       mutex_unlock(&module_mutex);
-       for (;;) {
-               pr_debug("Looking at refcount...\n");
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               if (module_refcount(mod) == 0)
-                       break;
-               schedule();
-       }
-       current->state = TASK_RUNNING;
-       mutex_lock(&module_mutex);
-}
-
 SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
                unsigned int, flags)
 {
@@ -842,6 +818,11 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
                return -EFAULT;
        name[MODULE_NAME_LEN-1] = '\0';
 
+       if (!(flags & O_NONBLOCK)) {
+               printk(KERN_WARNING
+                      "waiting module removal not supported: please upgrade");
+       }
+
        if (mutex_lock_interruptible(&module_mutex) != 0)
                return -EINTR;
 
@@ -859,8 +840,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
 
        /* Doing init or already dying? */
        if (mod->state != MODULE_STATE_LIVE) {
-               /* FIXME: if (force), slam module count and wake up
-                   waiter --RR */
+               /* FIXME: if (force), slam module count damn the torpedoes */
                pr_debug("%s already dying\n", mod->name);
                ret = -EBUSY;
                goto out;
@@ -876,18 +856,11 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
                }
        }
 
-       /* Set this up before setting mod->state */
-       mod->waiter = current;
-
        /* Stop the machine so refcounts can't move and disable module. */
        ret = try_stop_module(mod, flags, &forced);
        if (ret != 0)
                goto out;
 
-       /* Never wait if forced. */
-       if (!forced && module_refcount(mod) != 0)
-               wait_for_zero_refcount(mod);
-
        mutex_unlock(&module_mutex);
        /* Final destruction now no one is using it. */
        if (mod->exit != NULL)
@@ -1005,9 +978,6 @@ void module_put(struct module *module)
                __this_cpu_inc(module->refptr->decs);
 
                trace_module_put(module, _RET_IP_);
-               /* Maybe they're waiting for us to drop reference? */
-               if (unlikely(!module_is_live(module)))
-                       wake_up_process(module->waiter);
                preempt_enable();
        }
 }
@@ -2738,7 +2708,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
        return 0;
 }
 
-static void find_module_sections(struct module *mod, struct load_info *info)
+static int find_module_sections(struct module *mod, struct load_info *info)
 {
        mod->kp = section_objs(info, "__param",
                               sizeof(*mod->kp), &mod->num_kp);
@@ -2768,6 +2738,18 @@ static void find_module_sections(struct module *mod, struct load_info *info)
 #ifdef CONFIG_CONSTRUCTORS
        mod->ctors = section_objs(info, ".ctors",
                                  sizeof(*mod->ctors), &mod->num_ctors);
+       if (!mod->ctors)
+               mod->ctors = section_objs(info, ".init_array",
+                               sizeof(*mod->ctors), &mod->num_ctors);
+       else if (find_sec(info, ".init_array")) {
+               /*
+                * This shouldn't happen with same compiler and binutils
+                * building all parts of the module.
+                */
+               printk(KERN_WARNING "%s: has both .ctors and .init_array.\n",
+                      mod->name);
+               return -EINVAL;
+       }
 #endif
 
 #ifdef CONFIG_TRACEPOINTS
@@ -2806,6 +2788,8 @@ static void find_module_sections(struct module *mod, struct load_info *info)
 
        info->debug = section_objs(info, "__verbose",
                                   sizeof(*info->debug), &info->num_debug);
+
+       return 0;
 }
 
 static int move_module(struct module *mod, struct load_info *info)
@@ -3263,7 +3247,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
 
        /* Now we've got everything in the final locations, we can
         * find optional sections. */
-       find_module_sections(mod, info);
+       err = find_module_sections(mod, info);
+       if (err)
+               goto free_unload;
 
        err = check_module_license_and_versions(mod);
        if (err)
index d444c4e834f4526ec318f71b1076a02c909e34a8..2fac9cc79b3da6183dd56f5e3469a7de54d4167e 100644 (file)
@@ -178,6 +178,22 @@ config PM_SLEEP_DEBUG
        def_bool y
        depends on PM_DEBUG && PM_SLEEP
 
+config DPM_WATCHDOG
+       bool "Device suspend/resume watchdog"
+       depends on PM_DEBUG && PSTORE
+       ---help---
+         Sets up a watchdog timer to capture drivers that are
+         locked up attempting to suspend/resume a device.
+         A detected lockup causes system panic with message
+         captured in pstore device for inspection in subsequent
+         boot session.
+
+config DPM_WATCHDOG_TIMEOUT
+       int "Watchdog timeout in seconds"
+       range 1 120
+       default 12
+       depends on DPM_WATCHDOG
+
 config PM_TRACE
        bool
        help
index a394297f8b2f94ea806a545c6e92e4b7221d2f7e..8dff9b48075af3f61eeab3531b3ad706b88718e4 100644 (file)
@@ -558,30 +558,12 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
        if (count == sizeof(s32)) {
                if (copy_from_user(&value, buf, sizeof(s32)))
                        return -EFAULT;
-       } else if (count <= 11) { /* ASCII perhaps? */
-               char ascii_value[11];
-               unsigned long int ulval;
+       } else {
                int ret;
 
-               if (copy_from_user(ascii_value, buf, count))
-                       return -EFAULT;
-
-               if (count > 10) {
-                       if (ascii_value[10] == '\n')
-                               ascii_value[10] = '\0';
-                       else
-                               return -EINVAL;
-               } else {
-                       ascii_value[count] = '\0';
-               }
-               ret = kstrtoul(ascii_value, 16, &ulval);
-               if (ret) {
-                       pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret);
-                       return -EINVAL;
-               }
-               value = (s32)lower_32_bits(ulval);
-       } else {
-               return -EINVAL;
+               ret = kstrtos32_from_user(buf, count, 16, &value);
+               if (ret)
+                       return ret;
        }
 
        req = filp->private_data;
index 957f06164ad1004b0050dbdcb7563c91c3755e55..24850270c8024948d60c0f827457b18d2de2d9a7 100644 (file)
@@ -36,9 +36,9 @@ static struct snapshot_data {
        struct snapshot_handle handle;
        int swap;
        int mode;
-       char frozen;
-       char ready;
-       char platform_support;
+       bool frozen;
+       bool ready;
+       bool platform_support;
        bool free_bitmaps;
 } snapshot_state;
 
@@ -93,9 +93,9 @@ static int snapshot_open(struct inode *inode, struct file *filp)
        if (error)
                atomic_inc(&snapshot_device_available);
 
-       data->frozen = 0;
-       data->ready = 0;
-       data->platform_support = 0;
+       data->frozen = false;
+       data->ready = false;
+       data->platform_support = false;
 
  Unlock:
        unlock_system_sleep();
@@ -229,7 +229,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
                if (error)
                        thaw_processes();
                else
-                       data->frozen = 1;
+                       data->frozen = true;
 
                break;
 
@@ -240,7 +240,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
                free_basic_memory_bitmaps();
                data->free_bitmaps = false;
                thaw_processes();
-               data->frozen = 0;
+               data->frozen = false;
                break;
 
        case SNAPSHOT_CREATE_IMAGE:
@@ -270,7 +270,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
        case SNAPSHOT_FREE:
                swsusp_free();
                memset(&data->handle, 0, sizeof(struct snapshot_handle));
-               data->ready = 0;
+               data->ready = false;
                /*
                 * It is necessary to thaw kernel threads here, because
                 * SNAPSHOT_CREATE_IMAGE may be invoked directly after
@@ -334,7 +334,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
                 * PM_HIBERNATION_PREPARE
                 */
                error = suspend_devices_and_enter(PM_SUSPEND_MEM);
-               data->ready = 0;
+               data->ready = false;
                break;
 
        case SNAPSHOT_PLATFORM_SUPPORT:
index 669bf190d4fb91770f8912277278a6528c4065c3..084f7b18d0c0a722e215dce8d14dcda0e6ba78d8 100644 (file)
@@ -592,7 +592,7 @@ static void kobject_release(struct kref *kref)
 {
        struct kobject *kobj = container_of(kref, struct kobject, kref);
 #ifdef CONFIG_DEBUG_KOBJECT_RELEASE
-       pr_debug("kobject: '%s' (%p): %s, parent %p (delayed)\n",
+       pr_info("kobject: '%s' (%p): %s, parent %p (delayed)\n",
                 kobject_name(kobj), kobj, __func__, kobj->parent);
        INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup);
        schedule_delayed_work(&kobj->release, HZ);
index 6f9d434c1521eab9ca0b2821d10936af55f2b703..af6e95d0bed6122bf9fd1e4af2bd76a4e2be62b3 100644 (file)
@@ -153,6 +153,7 @@ void lockref_mark_dead(struct lockref *lockref)
        assert_spin_locked(&lockref->lock);
        lockref->count = -128;
 }
+EXPORT_SYMBOL(lockref_mark_dead);
 
 /**
  * lockref_get_not_dead - Increments count unless the ref is dead
index 7deeb6297a483d7272acb30c4f3b0c0d6b0e76b3..1a53d497a8c53ae460686af6107531c4161eae53 100644 (file)
@@ -53,6 +53,7 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
        ref->release = release;
        return 0;
 }
+EXPORT_SYMBOL_GPL(percpu_ref_init);
 
 /**
  * percpu_ref_cancel_init - cancel percpu_ref_init()
@@ -84,6 +85,7 @@ void percpu_ref_cancel_init(struct percpu_ref *ref)
                free_percpu(ref->pcpu_count);
        }
 }
+EXPORT_SYMBOL_GPL(percpu_ref_cancel_init);
 
 static void percpu_ref_kill_rcu(struct rcu_head *rcu)
 {
@@ -156,3 +158,4 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
 
        call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
 }
+EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
index 1e6aec4a2d2ebae29c0fea0405a7e81f422beeb4..ccb87cc8f07c08a5891c7cd40414c54da47449c1 100644 (file)
@@ -1315,44 +1315,6 @@ out:
        file_accessed(filp);
 }
 
-int file_read_actor(read_descriptor_t *desc, struct page *page,
-                       unsigned long offset, unsigned long size)
-{
-       char *kaddr;
-       unsigned long left, count = desc->count;
-
-       if (size > count)
-               size = count;
-
-       /*
-        * Faults on the destination of a read are common, so do it before
-        * taking the kmap.
-        */
-       if (!fault_in_pages_writeable(desc->arg.buf, size)) {
-               kaddr = kmap_atomic(page);
-               left = __copy_to_user_inatomic(desc->arg.buf,
-                                               kaddr + offset, size);
-               kunmap_atomic(kaddr);
-               if (left == 0)
-                       goto success;
-       }
-
-       /* Do it the slow way */
-       kaddr = kmap(page);
-       left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
-       kunmap(page);
-
-       if (left) {
-               size -= left;
-               desc->error = -EFAULT;
-       }
-success:
-       desc->count = count - size;
-       desc->written += size;
-       desc->arg.buf += size;
-       return size;
-}
-
 /*
  * Performs necessary checks before doing a write
  * @iov:       io vector request
@@ -1392,31 +1354,41 @@ int generic_segment_checks(const struct iovec *iov,
 }
 EXPORT_SYMBOL(generic_segment_checks);
 
+int file_read_iter_actor(read_descriptor_t *desc, struct page *page,
+                        unsigned long offset, unsigned long size)
+{
+       struct iov_iter *iter = desc->arg.data;
+       unsigned long copied = 0;
+
+       if (size > desc->count)
+               size = desc->count;
+
+       copied = __iov_iter_copy_to_user(page, iter, offset, size);
+       if (copied < size)
+               desc->error = -EFAULT;
+
+       iov_iter_advance(iter, copied);
+       desc->count -= copied;
+       desc->written += copied;
+
+       return copied;
+}
+
 /**
- * generic_file_aio_read - generic filesystem read routine
+ * generic_file_read_iter - generic filesystem read routine
  * @iocb:      kernel I/O control block
- * @iov:       io vector request
- * @nr_segs:   number of segments in the iovec
+ * @iter:      memory vector
  * @pos:       current file position
- *
- * This is the "read()" routine for all filesystems
- * that can use the page cache directly.
  */
 ssize_t
-generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long nr_segs, loff_t pos)
+generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
        struct file *filp = iocb->ki_filp;
-       ssize_t retval;
-       unsigned long seg = 0;
-       size_t count;
+       read_descriptor_t desc;
+       ssize_t retval = 0;
+       size_t count = iov_iter_count(iter);
        loff_t *ppos = &iocb->ki_pos;
 
-       count = 0;
-       retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
-       if (retval)
-               return retval;
-
        /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
        if (filp->f_flags & O_DIRECT) {
                loff_t size;
@@ -1430,11 +1402,10 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
                size = i_size_read(inode);
                if (pos < size) {
                        retval = filemap_write_and_wait_range(mapping, pos,
-                                       pos + iov_length(iov, nr_segs) - 1);
-                       if (!retval) {
+                                       pos + count - 1);
+                       if (!retval)
                                retval = mapping->a_ops->direct_IO(READ, iocb,
-                                                       iov, pos, nr_segs);
-                       }
+                                                                  iter, pos);
                        if (retval > 0) {
                                *ppos = pos + retval;
                                count -= retval;
@@ -1455,42 +1426,47 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
                }
        }
 
-       count = retval;
-       for (seg = 0; seg < nr_segs; seg++) {
-               read_descriptor_t desc;
-               loff_t offset = 0;
-
-               /*
-                * If we did a short DIO read we need to skip the section of the
-                * iov that we've already read data into.
-                */
-               if (count) {
-                       if (count > iov[seg].iov_len) {
-                               count -= iov[seg].iov_len;
-                               continue;
-                       }
-                       offset = count;
-                       count = 0;
-               }
-
-               desc.written = 0;
-               desc.arg.buf = iov[seg].iov_base + offset;
-               desc.count = iov[seg].iov_len - offset;
-               if (desc.count == 0)
-                       continue;
-               desc.error = 0;
-               do_generic_file_read(filp, ppos, &desc, file_read_actor);
-               retval += desc.written;
-               if (desc.error) {
-                       retval = retval ?: desc.error;
-                       break;
-               }
-               if (desc.count > 0)
-                       break;
-       }
+       desc.written = 0;
+       desc.arg.data = iter;
+       desc.count = count;
+       desc.error = 0;
+       do_generic_file_read(filp, ppos, &desc, file_read_iter_actor);
+       if (desc.written)
+               retval = desc.written;
+       else
+               retval = desc.error;
 out:
        return retval;
 }
+EXPORT_SYMBOL(generic_file_read_iter);
+
+/**
+ * generic_file_aio_read - generic filesystem read routine
+ * @iocb:      kernel I/O control block
+ * @iov:       io vector request
+ * @nr_segs:   number of segments in the iovec
+ * @pos:       current file position
+ *
+ * This is the "read()" routine for all filesystems
+ * that can use the page cache directly.
+ */
+ssize_t
+generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
+               unsigned long nr_segs, loff_t pos)
+{
+       struct iov_iter iter;
+       int ret;
+       size_t count;
+
+       count = 0;
+       ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
+       if (ret)
+               return ret;
+
+       iov_iter_init(&iter, iov, nr_segs, count, 0);
+
+       return generic_file_read_iter(iocb, &iter, pos);
+}
 EXPORT_SYMBOL(generic_file_aio_read);
 
 #ifdef CONFIG_MMU
@@ -1616,7 +1592,6 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct inode *inode = mapping->host;
        pgoff_t offset = vmf->pgoff;
        struct page *page;
-       bool memcg_oom;
        pgoff_t size;
        int ret = 0;
 
@@ -1625,11 +1600,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                return VM_FAULT_SIGBUS;
 
        /*
-        * Do we have something in the page cache already?  Either
-        * way, try readahead, but disable the memcg OOM killer for it
-        * as readahead is optional and no errors are propagated up
-        * the fault stack.  The OOM killer is enabled while trying to
-        * instantiate the faulting page individually below.
+        * Do we have something in the page cache already?
         */
        page = find_get_page(mapping, offset);
        if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
@@ -1637,14 +1608,10 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                 * We found the page, so try async readahead before
                 * waiting for the lock.
                 */
-               memcg_oom = mem_cgroup_toggle_oom(false);
                do_async_mmap_readahead(vma, ra, file, page, offset);
-               mem_cgroup_toggle_oom(memcg_oom);
        } else if (!page) {
                /* No page in the page cache at all */
-               memcg_oom = mem_cgroup_toggle_oom(false);
                do_sync_mmap_readahead(vma, ra, file, offset);
-               mem_cgroup_toggle_oom(memcg_oom);
                count_vm_event(PGMAJFAULT);
                mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
                ret = VM_FAULT_MAJOR;
@@ -1952,150 +1919,6 @@ struct page *read_cache_page(struct address_space *mapping,
 }
 EXPORT_SYMBOL(read_cache_page);
 
-static size_t __iovec_copy_from_user_inatomic(char *vaddr,
-                       const struct iovec *iov, size_t base, size_t bytes)
-{
-       size_t copied = 0, left = 0;
-
-       while (bytes) {
-               char __user *buf = iov->iov_base + base;
-               int copy = min(bytes, iov->iov_len - base);
-
-               base = 0;
-               left = __copy_from_user_inatomic(vaddr, buf, copy);
-               copied += copy;
-               bytes -= copy;
-               vaddr += copy;
-               iov++;
-
-               if (unlikely(left))
-                       break;
-       }
-       return copied - left;
-}
-
-/*
- * Copy as much as we can into the page and return the number of bytes which
- * were successfully copied.  If a fault is encountered then return the number of
- * bytes which were copied.
- */
-size_t iov_iter_copy_from_user_atomic(struct page *page,
-               struct iov_iter *i, unsigned long offset, size_t bytes)
-{
-       char *kaddr;
-       size_t copied;
-
-       BUG_ON(!in_atomic());
-       kaddr = kmap_atomic(page);
-       if (likely(i->nr_segs == 1)) {
-               int left;
-               char __user *buf = i->iov->iov_base + i->iov_offset;
-               left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
-               copied = bytes - left;
-       } else {
-               copied = __iovec_copy_from_user_inatomic(kaddr + offset,
-                                               i->iov, i->iov_offset, bytes);
-       }
-       kunmap_atomic(kaddr);
-
-       return copied;
-}
-EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
-
-/*
- * This has the same sideeffects and return value as
- * iov_iter_copy_from_user_atomic().
- * The difference is that it attempts to resolve faults.
- * Page must not be locked.
- */
-size_t iov_iter_copy_from_user(struct page *page,
-               struct iov_iter *i, unsigned long offset, size_t bytes)
-{
-       char *kaddr;
-       size_t copied;
-
-       kaddr = kmap(page);
-       if (likely(i->nr_segs == 1)) {
-               int left;
-               char __user *buf = i->iov->iov_base + i->iov_offset;
-               left = __copy_from_user(kaddr + offset, buf, bytes);
-               copied = bytes - left;
-       } else {
-               copied = __iovec_copy_from_user_inatomic(kaddr + offset,
-                                               i->iov, i->iov_offset, bytes);
-       }
-       kunmap(page);
-       return copied;
-}
-EXPORT_SYMBOL(iov_iter_copy_from_user);
-
-void iov_iter_advance(struct iov_iter *i, size_t bytes)
-{
-       BUG_ON(i->count < bytes);
-
-       if (likely(i->nr_segs == 1)) {
-               i->iov_offset += bytes;
-               i->count -= bytes;
-       } else {
-               const struct iovec *iov = i->iov;
-               size_t base = i->iov_offset;
-               unsigned long nr_segs = i->nr_segs;
-
-               /*
-                * The !iov->iov_len check ensures we skip over unlikely
-                * zero-length segments (without overruning the iovec).
-                */
-               while (bytes || unlikely(i->count && !iov->iov_len)) {
-                       int copy;
-
-                       copy = min(bytes, iov->iov_len - base);
-                       BUG_ON(!i->count || i->count < copy);
-                       i->count -= copy;
-                       bytes -= copy;
-                       base += copy;
-                       if (iov->iov_len == base) {
-                               iov++;
-                               nr_segs--;
-                               base = 0;
-                       }
-               }
-               i->iov = iov;
-               i->iov_offset = base;
-               i->nr_segs = nr_segs;
-       }
-}
-EXPORT_SYMBOL(iov_iter_advance);
-
-/*
- * Fault in the first iovec of the given iov_iter, to a maximum length
- * of bytes. Returns 0 on success, or non-zero if the memory could not be
- * accessed (ie. because it is an invalid address).
- *
- * writev-intensive code may want this to prefault several iovecs -- that
- * would be possible (callers must not rely on the fact that _only_ the
- * first iovec will be faulted with the current implementation).
- */
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
-{
-       char __user *buf = i->iov->iov_base + i->iov_offset;
-       bytes = min(bytes, i->iov->iov_len - i->iov_offset);
-       return fault_in_pages_readable(buf, bytes);
-}
-EXPORT_SYMBOL(iov_iter_fault_in_readable);
-
-/*
- * Return the count of just the current iov_iter segment.
- */
-size_t iov_iter_single_seg_count(const struct iov_iter *i)
-{
-       const struct iovec *iov = i->iov;
-       if (i->nr_segs == 1)
-               return i->count;
-       else
-               return min(i->count, iov->iov_len - i->iov_offset);
-}
-EXPORT_SYMBOL(iov_iter_single_seg_count);
-
 /*
  * Performs necessary checks before doing a write
  *
@@ -2201,9 +2024,8 @@ int pagecache_write_end(struct file *file, struct address_space *mapping,
 EXPORT_SYMBOL(pagecache_write_end);
 
 ssize_t
-generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long *nr_segs, loff_t pos, loff_t *ppos,
-               size_t count, size_t ocount)
+generic_file_direct_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+               loff_t pos, loff_t *ppos, size_t count)
 {
        struct file     *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
@@ -2212,10 +2034,13 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
        size_t          write_len;
        pgoff_t         end;
 
-       if (count != ocount)
-               *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
+       if (count != iov_iter_count(iter)) {
+               written = iov_iter_shorten(iter, count);
+               if (written)
+                       goto out;
+       }
 
-       write_len = iov_length(iov, *nr_segs);
+       write_len = count;
        end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
 
        written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
@@ -2242,7 +2067,7 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
                }
        }
 
-       written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
+       written = mapping->a_ops->direct_IO(WRITE, iocb, iter, pos);
 
        /*
         * Finally, try again to invalidate clean pages which might have been
@@ -2268,6 +2093,23 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
 out:
        return written;
 }
+EXPORT_SYMBOL(generic_file_direct_write_iter);
+
+ssize_t
+generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
+               unsigned long *nr_segs, loff_t pos, loff_t *ppos,
+               size_t count, size_t ocount)
+{
+       struct iov_iter iter;
+       ssize_t ret;
+
+       iov_iter_init(&iter, iov, *nr_segs, ocount, 0);
+       ret = generic_file_direct_write_iter(iocb, &iter, pos, ppos, count);
+       /* generic_file_direct_write_iter() might have shortened the vec */
+       if (*nr_segs != iter.nr_segs)
+               *nr_segs = iter.nr_segs;
+       return ret;
+}
 EXPORT_SYMBOL(generic_file_direct_write);
 
 /*
@@ -2401,16 +2243,19 @@ again:
 }
 
 ssize_t
-generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long nr_segs, loff_t pos, loff_t *ppos,
-               size_t count, ssize_t written)
+generic_file_buffered_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+               loff_t pos, loff_t *ppos, size_t count, ssize_t written)
 {
        struct file *file = iocb->ki_filp;
        ssize_t status;
-       struct iov_iter i;
 
-       iov_iter_init(&i, iov, nr_segs, count, written);
-       status = generic_perform_write(file, &i, pos);
+       if ((count + written) != iov_iter_count(iter)) {
+               int rc = iov_iter_shorten(iter, count + written);
+               if (rc)
+                       return rc;
+       }
+
+       status = generic_perform_write(file, iter, pos);
 
        if (likely(status >= 0)) {
                written += status;
@@ -2419,13 +2264,24 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
        
        return written ? written : status;
 }
+EXPORT_SYMBOL(generic_file_buffered_write_iter);
+
+ssize_t
+generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
+               unsigned long nr_segs, loff_t pos, loff_t *ppos,
+               size_t count, ssize_t written)
+{
+       struct iov_iter iter;
+       iov_iter_init(&iter, iov, nr_segs, count, written);
+       return generic_file_buffered_write_iter(iocb, &iter, pos, ppos,
+                                               count, written);
+}
 EXPORT_SYMBOL(generic_file_buffered_write);
 
 /**
  * __generic_file_aio_write - write data to a file
  * @iocb:      IO state structure (file, offset, etc.)
- * @iov:       vector with data to write
- * @nr_segs:   number of segments in the vector
+ * @iter:      iov_iter specifying memory to write
  * @ppos:      position where to write
  *
  * This function does all the work needed for actually writing data to a
@@ -2440,24 +2296,18 @@ EXPORT_SYMBOL(generic_file_buffered_write);
  * A caller has to handle it. This is mainly due to the fact that we want to
  * avoid syncing under i_mutex.
  */
-ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                                unsigned long nr_segs, loff_t *ppos)
+ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                                 loff_t *ppos)
 {
        struct file *file = iocb->ki_filp;
        struct address_space * mapping = file->f_mapping;
-       size_t ocount;          /* original count */
        size_t count;           /* after file limit checks */
        struct inode    *inode = mapping->host;
        loff_t          pos;
        ssize_t         written;
        ssize_t         err;
 
-       ocount = 0;
-       err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
-       if (err)
-               return err;
-
-       count = ocount;
+       count = iov_iter_count(iter);
        pos = *ppos;
 
        /* We can write back this queue in page reclaim */
@@ -2484,8 +2334,8 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                loff_t endbyte;
                ssize_t written_buffered;
 
-               written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
-                                                       ppos, count, ocount);
+               written = generic_file_direct_write_iter(iocb, iter, pos,
+                                                        ppos, count);
                if (written < 0 || written == count)
                        goto out;
                /*
@@ -2494,9 +2344,9 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                 */
                pos += written;
                count -= written;
-               written_buffered = generic_file_buffered_write(iocb, iov,
-                                               nr_segs, pos, ppos, count,
-                                               written);
+               iov_iter_advance(iter, written);
+               written_buffered = generic_file_buffered_write_iter(iocb, iter,
+                                               pos, ppos, count, written);
                /*
                 * If generic_file_buffered_write() retuned a synchronous error
                 * then we want to return the number of bytes which were
@@ -2528,13 +2378,57 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                         */
                }
        } else {
-               written = generic_file_buffered_write(iocb, iov, nr_segs,
+               iter->count = count;
+               written = generic_file_buffered_write_iter(iocb, iter,
                                pos, ppos, count, written);
        }
 out:
        current->backing_dev_info = NULL;
        return written ? written : err;
 }
+EXPORT_SYMBOL(__generic_file_write_iter);
+
+ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *iter,
+                               loff_t pos)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file->f_mapping->host;
+       ssize_t ret;
+
+       mutex_lock(&inode->i_mutex);
+       ret = __generic_file_write_iter(iocb, iter, &iocb->ki_pos);
+       mutex_unlock(&inode->i_mutex);
+
+       if (ret > 0 || ret == -EIOCBQUEUED) {
+               ssize_t err;
+
+               err = generic_write_sync(file, pos, ret);
+               if (err < 0 && ret > 0)
+                       ret = err;
+       }
+       return ret;
+}
+EXPORT_SYMBOL(generic_file_write_iter);
+
+ssize_t
+__generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+                        unsigned long nr_segs, loff_t *ppos)
+{
+       struct iov_iter iter;
+       size_t count;
+       int ret;
+
+       count = 0;
+       ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
+       if (ret)
+               goto out;
+
+       iov_iter_init(&iter, iov, nr_segs, count, 0);
+
+       ret = __generic_file_write_iter(iocb, &iter, ppos);
+out:
+       return ret;
+}
 EXPORT_SYMBOL(__generic_file_aio_write);
 
 /**
index 7489884682d84a6b5840fef19e90234076fd374e..610e3df2768a6a5b2ec1e293da4c96dafbbe2d30 100644 (file)
@@ -2697,6 +2697,7 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
 
        mmun_start = haddr;
        mmun_end   = haddr + HPAGE_PMD_SIZE;
+again:
        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_trans_huge(*pmd))) {
@@ -2719,7 +2720,14 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
        split_huge_page(page);
 
        put_page(page);
-       BUG_ON(pmd_trans_huge(*pmd));
+
+       /*
+        * We don't always have down_write of mmap_sem here: a racing
+        * do_huge_pmd_wp_page() might have copied-on-write to another
+        * huge page before our split_huge_page() got the anon_vma lock.
+        */
+       if (unlikely(pmd_trans_huge(*pmd)))
+               goto again;
 }
 
 void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
index b49579c7f2a550462c334f97907f2fc131a65e00..0b7656e804d126cf0fcfa04a8b427396af86deb1 100644 (file)
@@ -653,6 +653,7 @@ static void free_huge_page(struct page *page)
        BUG_ON(page_count(page));
        BUG_ON(page_mapcount(page));
        restore_reserve = PagePrivate(page);
+       ClearPagePrivate(page);
 
        spin_lock(&hugetlb_lock);
        hugetlb_cgroup_uncharge_page(hstate_index(h),
@@ -695,8 +696,22 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
        /* we rely on prep_new_huge_page to set the destructor */
        set_compound_order(page, order);
        __SetPageHead(page);
+       __ClearPageReserved(page);
        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
                __SetPageTail(p);
+               /*
+                * For gigantic hugepages allocated through bootmem at
+                * boot, it's safer to be consistent with the not-gigantic
+                * hugepages and clear the PG_reserved bit from all tail pages
+                * too.  Otherwse drivers using get_user_pages() to access tail
+                * pages may get the reference counting wrong if they see
+                * PG_reserved set on a tail page (despite the head page not
+                * having PG_reserved set).  Enforcing this consistency between
+                * head and tail pages allows drivers to optimize away a check
+                * on the head page when they need know if put_page() is needed
+                * after get_user_pages().
+                */
+               __ClearPageReserved(p);
                set_page_count(p, 0);
                p->first_page = page;
        }
@@ -1329,9 +1344,9 @@ static void __init gather_bootmem_prealloc(void)
 #else
                page = virt_to_page(m);
 #endif
-               __ClearPageReserved(page);
                WARN_ON(page_count(page) != 1);
                prep_compound_huge_page(page, h->order);
+               WARN_ON(PageReserved(page));
                prep_new_huge_page(h, page, page_to_nid(page));
                /*
                 * If we had gigantic hugepages allocated at boot time, we need
index 1c52ddbc839ba1f8f42e940c51bc321ba6b2abfe..9c9c685e4ddca665c11b878bd5a19ed1199f8eca 100644 (file)
@@ -311,7 +311,7 @@ struct mem_cgroup {
 
        atomic_t        dead_count;
 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
-       struct tcp_memcontrol tcp_mem;
+       struct cg_proto tcp_mem;
 #endif
 #if defined(CONFIG_MEMCG_KMEM)
        /* analogous to slab_common's slab_caches list. per-memcg */
@@ -550,13 +550,13 @@ struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
        if (!memcg || mem_cgroup_is_root(memcg))
                return NULL;
 
-       return &memcg->tcp_mem.cg_proto;
+       return &memcg->tcp_mem;
 }
 EXPORT_SYMBOL(tcp_proto_cgroup);
 
 static void disarm_sock_keys(struct mem_cgroup *memcg)
 {
-       if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
+       if (!memcg_proto_activated(&memcg->tcp_mem))
                return;
        static_key_slow_dec(&memcg_socket_limit_enabled);
 }
@@ -866,6 +866,7 @@ static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
        unsigned long val = 0;
        int cpu;
 
+       get_online_cpus();
        for_each_online_cpu(cpu)
                val += per_cpu(memcg->stat->events[idx], cpu);
 #ifdef CONFIG_HOTPLUG_CPU
@@ -873,6 +874,7 @@ static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
        val += memcg->nocpu_base.events[idx];
        spin_unlock(&memcg->pcp_counter_lock);
 #endif
+       put_online_cpus();
        return val;
 }
 
@@ -2159,110 +2161,59 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
                memcg_wakeup_oom(memcg);
 }
 
-/*
- * try to call OOM killer
- */
 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
 {
-       bool locked;
-       int wakeups;
-
        if (!current->memcg_oom.may_oom)
                return;
-
-       current->memcg_oom.in_memcg_oom = 1;
-
        /*
-        * As with any blocking lock, a contender needs to start
-        * listening for wakeups before attempting the trylock,
-        * otherwise it can miss the wakeup from the unlock and sleep
-        * indefinitely.  This is just open-coded because our locking
-        * is so particular to memcg hierarchies.
+        * We are in the middle of the charge context here, so we
+        * don't want to block when potentially sitting on a callstack
+        * that holds all kinds of filesystem and mm locks.
+        *
+        * Also, the caller may handle a failed allocation gracefully
+        * (like optional page cache readahead) and so an OOM killer
+        * invocation might not even be necessary.
+        *
+        * That's why we don't do anything here except remember the
+        * OOM context and then deal with it at the end of the page
+        * fault when the stack is unwound, the locks are released,
+        * and when we know whether the fault was overall successful.
         */
-       wakeups = atomic_read(&memcg->oom_wakeups);
-       mem_cgroup_mark_under_oom(memcg);
-
-       locked = mem_cgroup_oom_trylock(memcg);
-
-       if (locked)
-               mem_cgroup_oom_notify(memcg);
-
-       if (locked && !memcg->oom_kill_disable) {
-               mem_cgroup_unmark_under_oom(memcg);
-               mem_cgroup_out_of_memory(memcg, mask, order);
-               mem_cgroup_oom_unlock(memcg);
-               /*
-                * There is no guarantee that an OOM-lock contender
-                * sees the wakeups triggered by the OOM kill
-                * uncharges.  Wake any sleepers explicitely.
-                */
-               memcg_oom_recover(memcg);
-       } else {
-               /*
-                * A system call can just return -ENOMEM, but if this
-                * is a page fault and somebody else is handling the
-                * OOM already, we need to sleep on the OOM waitqueue
-                * for this memcg until the situation is resolved.
-                * Which can take some time because it might be
-                * handled by a userspace task.
-                *
-                * However, this is the charge context, which means
-                * that we may sit on a large call stack and hold
-                * various filesystem locks, the mmap_sem etc. and we
-                * don't want the OOM handler to deadlock on them
-                * while we sit here and wait.  Store the current OOM
-                * context in the task_struct, then return -ENOMEM.
-                * At the end of the page fault handler, with the
-                * stack unwound, pagefault_out_of_memory() will check
-                * back with us by calling
-                * mem_cgroup_oom_synchronize(), possibly putting the
-                * task to sleep.
-                */
-               current->memcg_oom.oom_locked = locked;
-               current->memcg_oom.wakeups = wakeups;
-               css_get(&memcg->css);
-               current->memcg_oom.wait_on_memcg = memcg;
-       }
+       css_get(&memcg->css);
+       current->memcg_oom.memcg = memcg;
+       current->memcg_oom.gfp_mask = mask;
+       current->memcg_oom.order = order;
 }
 
 /**
  * mem_cgroup_oom_synchronize - complete memcg OOM handling
+ * @handle: actually kill/wait or just clean up the OOM state
  *
- * This has to be called at the end of a page fault if the the memcg
- * OOM handler was enabled and the fault is returning %VM_FAULT_OOM.
+ * This has to be called at the end of a page fault if the memcg OOM
+ * handler was enabled.
  *
- * Memcg supports userspace OOM handling, so failed allocations must
+ * Memcg supports userspace OOM handling where failed allocations must
  * sleep on a waitqueue until the userspace task resolves the
  * situation.  Sleeping directly in the charge context with all kinds
  * of locks held is not a good idea, instead we remember an OOM state
  * in the task and mem_cgroup_oom_synchronize() has to be called at
- * the end of the page fault to put the task to sleep and clean up the
- * OOM state.
+ * the end of the page fault to complete the OOM handling.
  *
  * Returns %true if an ongoing memcg OOM situation was detected and
- * finalized, %false otherwise.
+ * completed, %false otherwise.
  */
-bool mem_cgroup_oom_synchronize(void)
+bool mem_cgroup_oom_synchronize(bool handle)
 {
+       struct mem_cgroup *memcg = current->memcg_oom.memcg;
        struct oom_wait_info owait;
-       struct mem_cgroup *memcg;
+       bool locked;
 
        /* OOM is global, do not handle */
-       if (!current->memcg_oom.in_memcg_oom)
-               return false;
-
-       /*
-        * We invoked the OOM killer but there is a chance that a kill
-        * did not free up any charges.  Everybody else might already
-        * be sleeping, so restart the fault and keep the rampage
-        * going until some charges are released.
-        */
-       memcg = current->memcg_oom.wait_on_memcg;
        if (!memcg)
-               goto out;
+               return false;
 
-       if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
-               goto out_memcg;
+       if (!handle)
+               goto cleanup;
 
        owait.memcg = memcg;
        owait.wait.flags = 0;
@@ -2271,13 +2222,25 @@ bool mem_cgroup_oom_synchronize(void)
        INIT_LIST_HEAD(&owait.wait.task_list);
 
        prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
-       /* Only sleep if we didn't miss any wakeups since OOM */
-       if (atomic_read(&memcg->oom_wakeups) == current->memcg_oom.wakeups)
+       mem_cgroup_mark_under_oom(memcg);
+
+       locked = mem_cgroup_oom_trylock(memcg);
+
+       if (locked)
+               mem_cgroup_oom_notify(memcg);
+
+       if (locked && !memcg->oom_kill_disable) {
+               mem_cgroup_unmark_under_oom(memcg);
+               finish_wait(&memcg_oom_waitq, &owait.wait);
+               mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
+                                        current->memcg_oom.order);
+       } else {
                schedule();
-       finish_wait(&memcg_oom_waitq, &owait.wait);
-out_memcg:
-       mem_cgroup_unmark_under_oom(memcg);
-       if (current->memcg_oom.oom_locked) {
+               mem_cgroup_unmark_under_oom(memcg);
+               finish_wait(&memcg_oom_waitq, &owait.wait);
+       }
+
+       if (locked) {
                mem_cgroup_oom_unlock(memcg);
                /*
                 * There is no guarantee that an OOM-lock contender
@@ -2286,10 +2249,9 @@ out_memcg:
                 */
                memcg_oom_recover(memcg);
        }
+cleanup:
+       current->memcg_oom.memcg = NULL;
        css_put(&memcg->css);
-       current->memcg_oom.wait_on_memcg = NULL;
-out:
-       current->memcg_oom.in_memcg_oom = 0;
        return true;
 }
 
@@ -2703,6 +2665,9 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                     || fatal_signal_pending(current)))
                goto bypass;
 
+       if (unlikely(task_in_memcg_oom(current)))
+               goto bypass;
+
        /*
         * We always charge the cgroup the mm_struct belongs to.
         * The mm_struct's mem_cgroup changes on task migration if the
@@ -2801,6 +2766,8 @@ done:
        return 0;
 nomem:
        *ptr = NULL;
+       if (gfp_mask & __GFP_NOFAIL)
+               return 0;
        return -ENOMEM;
 bypass:
        *ptr = root_mem_cgroup;
index ca00039471152eae75bb8321a129a4edfc6f580e..1311f26497e6a0f776682ed8a7e23b8620a5b9ad 100644 (file)
@@ -837,6 +837,8 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                                         */
                                        make_migration_entry_read(&entry);
                                        pte = swp_entry_to_pte(entry);
+                                       if (pte_swp_soft_dirty(*src_pte))
+                                               pte = pte_swp_mksoft_dirty(pte);
                                        set_pte_at(src_mm, addr, src_pte, pte);
                                }
                        }
@@ -3863,15 +3865,21 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * space.  Kernel faults are handled more gracefully.
         */
        if (flags & FAULT_FLAG_USER)
-               mem_cgroup_enable_oom();
+               mem_cgroup_oom_enable();
 
        ret = __handle_mm_fault(mm, vma, address, flags);
 
-       if (flags & FAULT_FLAG_USER)
-               mem_cgroup_disable_oom();
-
-       if (WARN_ON(task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM)))
-               mem_cgroup_oom_synchronize();
+       if (flags & FAULT_FLAG_USER) {
+               mem_cgroup_oom_disable();
+                /*
+                 * The task may have entered a memcg OOM situation but
+                 * if the allocation error was handled gracefully (no
+                 * VM_FAULT_OOM), there is no need to kill anything.
+                 * Just clean up the OOM state peacefully.
+                 */
+                if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
+                        mem_cgroup_oom_synchronize(false);
+       }
 
        return ret;
 }
index a26bccd44ccb0a907662c08135399462bd816b9c..7a7325ee1d089696a8073a84d2f748f326124805 100644 (file)
@@ -161,6 +161,8 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
 
        get_page(new);
        pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
+       if (pte_swp_soft_dirty(*ptep))
+               pte = pte_mksoft_dirty(pte);
        if (is_write_migration_entry(entry))
                pte = pte_mkwrite(pte);
 #ifdef CONFIG_HUGETLB_PAGE
index 94722a4d6b438311de1d1690d81a0d598a907339..a3af058f68e4d9f434337d0dcd6127d5d8c6d039 100644 (file)
@@ -94,13 +94,16 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                        swp_entry_t entry = pte_to_swp_entry(oldpte);
 
                        if (is_write_migration_entry(entry)) {
+                               pte_t newpte;
                                /*
                                 * A protection check is difficult so
                                 * just be safe and disable write
                                 */
                                make_migration_entry_read(&entry);
-                               set_pte_at(mm, addr, pte,
-                                       swp_entry_to_pte(entry));
+                               newpte = swp_entry_to_pte(entry);
+                               if (pte_swp_soft_dirty(oldpte))
+                                       newpte = pte_swp_mksoft_dirty(newpte);
+                               set_pte_at(mm, addr, pte, newpte);
                        }
                        pages++;
                }
index 91b13d6a16d453b50894e6028800b92399bf8f14..0843feb66f3d0236abd4386b5bfd0170c24ae0ef 100644 (file)
@@ -25,7 +25,6 @@
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
-#include <asm/pgalloc.h>
 
 #include "internal.h"
 
@@ -63,10 +62,8 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
                return NULL;
 
        pmd = pmd_alloc(mm, pud, addr);
-       if (!pmd) {
-               pud_free(mm, pud);
+       if (!pmd)
                return NULL;
-       }
 
        VM_BUG_ON(pmd_trans_huge(*pmd));
 
index 314e9d2743813ea5e52c08929b8a4cbb4621dfa9..6738c47f1f7280edc5f3fe610b2658195a0a77e0 100644 (file)
@@ -680,7 +680,7 @@ void pagefault_out_of_memory(void)
 {
        struct zonelist *zonelist;
 
-       if (mem_cgroup_oom_synchronize())
+       if (mem_cgroup_oom_synchronize(true))
                return;
 
        zonelist = node_zonelist(first_online_node, GFP_KERNEL);
index f5236f804aa6cdf9800f445c31950a52cdd6b88f..63807583d8e89f1c96f8b05bcf5fe422ed200c26 100644 (file)
@@ -1210,11 +1210,11 @@ static unsigned long dirty_poll_interval(unsigned long dirty,
        return 1;
 }
 
-static long bdi_max_pause(struct backing_dev_info *bdi,
-                         unsigned long bdi_dirty)
+static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
+                                  unsigned long bdi_dirty)
 {
-       long bw = bdi->avg_write_bandwidth;
-       long t;
+       unsigned long bw = bdi->avg_write_bandwidth;
+       unsigned long t;
 
        /*
         * Limit pause time for small memory systems. If sleeping for too long
@@ -1226,7 +1226,7 @@ static long bdi_max_pause(struct backing_dev_info *bdi,
        t = bdi_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
        t++;
 
-       return min_t(long, t, MAX_PAUSE);
+       return min_t(unsigned long, t, MAX_PAUSE);
 }
 
 static long bdi_min_pause(struct backing_dev_info *bdi,
index 8c79a4764be0c99a1d573d174e2d247e90079519..e4e6a4f57b09f2c304683e026a8dab05f98a500d 100644 (file)
@@ -258,11 +258,14 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
        if (sis->flags & SWP_FILE) {
                struct kiocb kiocb;
                struct file *swap_file = sis->swap_file;
-               struct address_space *mapping = swap_file->f_mapping;
-               struct iovec iov = {
-                       .iov_base = kmap(page),
-                       .iov_len  = PAGE_SIZE,
+               struct bio_vec bvec = {
+                       .bv_page = kmap(page),
+                       .bv_len = PAGE_SIZE,
+                       .bv_offset = 0,
                };
+               struct iov_iter iter;
+
+               iov_iter_init_bvec(&iter, &bvec, 1, PAGE_SIZE, 0);
 
                init_sync_kiocb(&kiocb, swap_file);
                kiocb.ki_pos = page_file_offset(page);
@@ -270,9 +273,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
 
                set_page_writeback(page);
                unlock_page(page);
-               ret = mapping->a_ops->direct_IO(KERNEL_WRITE,
-                                               &kiocb, &iov,
-                                               kiocb.ki_pos, 1);
+               ret = swap_file->f_op->write_iter(&kiocb, &iter, kiocb.ki_pos);
                kunmap(page);
                if (ret == PAGE_SIZE) {
                        count_vm_event(PSWPOUT);
index 8297623fcaedec21b37b5080d376967e673afe92..8612a95d7d7e2f7ddba00076b3db0e13b2f8c7c5 100644 (file)
@@ -1464,14 +1464,23 @@ shmem_write_end(struct file *file, struct address_space *mapping,
        return copied;
 }
 
-static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
+static ssize_t shmem_file_read_iter(struct kiocb *iocb,
+                                   struct iov_iter *iter, loff_t pos)
 {
+       read_descriptor_t desc;
+       loff_t *ppos = &iocb->ki_pos;
+       struct file *filp = iocb->ki_filp;
        struct inode *inode = file_inode(filp);
        struct address_space *mapping = inode->i_mapping;
        pgoff_t index;
        unsigned long offset;
        enum sgp_type sgp = SGP_READ;
 
+       desc.written = 0;
+       desc.count = iov_iter_count(iter);
+       desc.arg.data = iter;
+       desc.error = 0;
+
        /*
         * Might this read be for a stacking filesystem?  Then when reading
         * holes of a sparse file, we actually need to allocate those pages,
@@ -1498,10 +1507,10 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
                                break;
                }
 
-               desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
-               if (desc->error) {
-                       if (desc->error == -EINVAL)
-                               desc->error = 0;
+               desc.error = shmem_getpage(inode, index, &page, sgp, NULL);
+               if (desc.error) {
+                       if (desc.error == -EINVAL)
+                               desc.error = 0;
                        break;
                }
                if (page)
@@ -1552,13 +1561,13 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
                 * "pos" here (the actor routine has to update the user buffer
                 * pointers and the remaining count).
                 */
-               ret = actor(desc, page, offset, nr);
+               ret = file_read_iter_actor(&desc, page, offset, nr);
                offset += ret;
                index += offset >> PAGE_CACHE_SHIFT;
                offset &= ~PAGE_CACHE_MASK;
 
                page_cache_release(page);
-               if (ret != nr || !desc->count)
+               if (ret != nr || !desc.count)
                        break;
 
                cond_resched();
@@ -1566,40 +1575,8 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
 
        *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
        file_accessed(filp);
-}
-
-static ssize_t shmem_file_aio_read(struct kiocb *iocb,
-               const struct iovec *iov, unsigned long nr_segs, loff_t pos)
-{
-       struct file *filp = iocb->ki_filp;
-       ssize_t retval;
-       unsigned long seg;
-       size_t count;
-       loff_t *ppos = &iocb->ki_pos;
 
-       retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
-       if (retval)
-               return retval;
-
-       for (seg = 0; seg < nr_segs; seg++) {
-               read_descriptor_t desc;
-
-               desc.written = 0;
-               desc.arg.buf = iov[seg].iov_base;
-               desc.count = iov[seg].iov_len;
-               if (desc.count == 0)
-                       continue;
-               desc.error = 0;
-               do_shmem_file_read(filp, ppos, &desc, file_read_actor);
-               retval += desc.written;
-               if (desc.error) {
-                       retval = retval ?: desc.error;
-                       break;
-               }
-               if (desc.count > 0)
-                       break;
-       }
-       return retval;
+       return desc.written ? desc.written : desc.error;
 }
 
 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
@@ -2724,8 +2701,8 @@ static const struct file_operations shmem_file_operations = {
        .llseek         = shmem_file_llseek,
        .read           = do_sync_read,
        .write          = do_sync_write,
-       .aio_read       = shmem_file_aio_read,
-       .aio_write      = generic_file_aio_write,
+       .read_iter      = shmem_file_read_iter,
+       .write_iter     = generic_file_write_iter,
        .fsync          = noop_fsync,
        .splice_read    = shmem_file_splice_read,
        .splice_write   = generic_file_splice_write,
index a3443278ce3a693b4c2625df3752a59e975abca3..e2e98af703ea9fdcbab4bcd034bc82a1482c9ca6 100644 (file)
@@ -56,6 +56,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
                        continue;
                }
 
+#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
                /*
                 * For simplicity, we won't check this in the list of memcg
                 * caches. We have control over memcg naming, and if there
@@ -69,6 +70,7 @@ static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
                        s = NULL;
                        return -EINVAL;
                }
+#endif
        }
 
        WARN_ON(strchr(name, ' '));     /* It confuses parsers */
index 3963fc24fcc1b6f8c4d365de4d99bda8993311b7..de7c904e52e507079f5bae7a2b7854a9d7cf80b0 100644 (file)
@@ -1824,6 +1824,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
        struct filename *pathname;
        int i, type, prev;
        int err;
+       unsigned int old_block_size;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
@@ -1914,6 +1915,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
        }
 
        swap_file = p->swap_file;
+       old_block_size = p->old_block_size;
        p->swap_file = NULL;
        p->max = 0;
        swap_map = p->swap_map;
@@ -1938,7 +1940,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
        inode = mapping->host;
        if (S_ISBLK(inode->i_mode)) {
                struct block_device *bdev = I_BDEV(inode);
-               set_blocksize(bdev, p->old_block_size);
+               set_blocksize(bdev, old_block_size);
                blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
        } else {
                mutex_lock(&inode->i_mutex);
index 53f2f82f83ae0d16bf19646cdb5b3bce5fc4e4cf..eea668d9cff6c578ada0cf6c02eca5e22de5598d 100644 (file)
@@ -211,6 +211,7 @@ void unregister_shrinker(struct shrinker *shrinker)
        down_write(&shrinker_rwsem);
        list_del(&shrinker->list);
        up_write(&shrinker_rwsem);
+       kfree(shrinker->nr_deferred);
 }
 EXPORT_SYMBOL(unregister_shrinker);
 
index 841e35f1db22caff3afd03c104403f0c744776cb..d93510c6aa2da860d779570cbf0a4c7cee278ad5 100644 (file)
@@ -804,6 +804,10 @@ static void zswap_frontswap_invalidate_area(unsigned type)
        }
        tree->rbroot = RB_ROOT;
        spin_unlock(&tree->lock);
+
+       zbud_destroy_pool(tree->pool);
+       kfree(tree);
+       zswap_trees[type] = NULL;
 }
 
 static struct zbud_ops zswap_zbud_ops = {
index 61fc573f1142f707fee7d886e67153a62c91c14a..b3d17d1c49c3fbecdd5701da18c14c9767f7d969 100644 (file)
@@ -98,14 +98,14 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
                vlan_gvrp_request_leave(dev);
 
        vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL);
+
+       netdev_upper_dev_unlink(real_dev, dev);
        /* Because unregister_netdevice_queue() makes sure at least one rcu
         * grace period is respected before device freeing,
         * we dont need to call synchronize_net() here.
         */
        unregister_netdevice_queue(dev, head);
 
-       netdev_upper_dev_unlink(real_dev, dev);
-
        if (grp->nr_vlan_devs == 0) {
                vlan_mvrp_uninit_applicant(real_dev);
                vlan_gvrp_uninit_applicant(real_dev);
@@ -169,13 +169,13 @@ int register_vlan_dev(struct net_device *dev)
        if (err < 0)
                goto out_uninit_mvrp;
 
-       err = netdev_upper_dev_link(real_dev, dev);
-       if (err)
-               goto out_uninit_mvrp;
-
        err = register_netdevice(dev);
        if (err < 0)
-               goto out_upper_dev_unlink;
+               goto out_uninit_mvrp;
+
+       err = netdev_upper_dev_link(real_dev, dev);
+       if (err)
+               goto out_unregister_netdev;
 
        /* Account for reference in struct vlan_dev_priv */
        dev_hold(real_dev);
@@ -191,8 +191,8 @@ int register_vlan_dev(struct net_device *dev)
 
        return 0;
 
-out_upper_dev_unlink:
-       netdev_upper_dev_unlink(real_dev, dev);
+out_unregister_netdev:
+       unregister_netdevice(dev);
 out_uninit_mvrp:
        if (grp->nr_vlan_devs == 0)
                vlan_mvrp_uninit_applicant(real_dev);
index ba5983f34c42f8076f4416af164f229678ffc4e1..a2caf00b82cc7bcf23526305917691d96855f761 100644 (file)
@@ -196,12 +196,12 @@ static inline u32 vlan_get_ingress_priority(struct net_device *dev,
 }
 
 #ifdef CONFIG_VLAN_8021Q_GVRP
-extern int vlan_gvrp_request_join(const struct net_device *dev);
-extern void vlan_gvrp_request_leave(const struct net_device *dev);
-extern int vlan_gvrp_init_applicant(struct net_device *dev);
-extern void vlan_gvrp_uninit_applicant(struct net_device *dev);
-extern int vlan_gvrp_init(void);
-extern void vlan_gvrp_uninit(void);
+int vlan_gvrp_request_join(const struct net_device *dev);
+void vlan_gvrp_request_leave(const struct net_device *dev);
+int vlan_gvrp_init_applicant(struct net_device *dev);
+void vlan_gvrp_uninit_applicant(struct net_device *dev);
+int vlan_gvrp_init(void);
+void vlan_gvrp_uninit(void);
 #else
 static inline int vlan_gvrp_request_join(const struct net_device *dev) { return 0; }
 static inline void vlan_gvrp_request_leave(const struct net_device *dev) {}
@@ -212,12 +212,12 @@ static inline void vlan_gvrp_uninit(void) {}
 #endif
 
 #ifdef CONFIG_VLAN_8021Q_MVRP
-extern int vlan_mvrp_request_join(const struct net_device *dev);
-extern void vlan_mvrp_request_leave(const struct net_device *dev);
-extern int vlan_mvrp_init_applicant(struct net_device *dev);
-extern void vlan_mvrp_uninit_applicant(struct net_device *dev);
-extern int vlan_mvrp_init(void);
-extern void vlan_mvrp_uninit(void);
+int vlan_mvrp_request_join(const struct net_device *dev);
+void vlan_mvrp_request_leave(const struct net_device *dev);
+int vlan_mvrp_init_applicant(struct net_device *dev);
+void vlan_mvrp_uninit_applicant(struct net_device *dev);
+int vlan_mvrp_init(void);
+void vlan_mvrp_uninit(void);
 #else
 static inline int vlan_mvrp_request_join(const struct net_device *dev) { return 0; }
 static inline void vlan_mvrp_request_leave(const struct net_device *dev) {}
@@ -229,8 +229,8 @@ static inline void vlan_mvrp_uninit(void) {}
 
 extern const char vlan_fullname[];
 extern const char vlan_version[];
-extern int vlan_netlink_init(void);
-extern void vlan_netlink_fini(void);
+int vlan_netlink_init(void);
+void vlan_netlink_fini(void);
 
 extern struct rtnl_link_ops vlan_link_ops;
 
index 309129732285fd610159446bade5ede27c835d1c..c7e634af85165613822074b28ceeca4af7153ae7 100644 (file)
@@ -171,7 +171,7 @@ static size_t vlan_get_size(const struct net_device *dev)
 
        return nla_total_size(2) +      /* IFLA_VLAN_PROTOCOL */
               nla_total_size(2) +      /* IFLA_VLAN_ID */
-              sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
+              nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */
               vlan_qos_map_size(vlan->nr_ingress_mappings) +
               vlan_qos_map_size(vlan->nr_egress_mappings);
 }
index 990afab2be1bcc1732167def7eb0336e38509588..9c5a1aa34d1253c725af29889d83759946af37c3 100644 (file)
@@ -544,9 +544,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
 
        chan->inuse = false;
        if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) {
-               vdev->config->get(vdev,
-                               offsetof(struct virtio_9p_config, tag_len),
-                               &tag_len, sizeof(tag_len));
+               virtio_cread(vdev, struct virtio_9p_config, tag_len, &tag_len);
        } else {
                err = -EINVAL;
                goto out_free_vq;
@@ -556,8 +554,9 @@ static int p9_virtio_probe(struct virtio_device *vdev)
                err = -ENOMEM;
                goto out_free_vq;
        }
-       vdev->config->get(vdev, offsetof(struct virtio_9p_config, tag),
-                       tag, tag_len);
+
+       virtio_cread_bytes(vdev, offsetof(struct virtio_9p_config, tag),
+                          tag, tag_len);
        chan->tag = tag;
        chan->tag_len = tag_len;
        err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
index 4b4d2b779ec1a08202303863fea1b74c67f04383..a00123ebb0ae0705c8e45e8433a07e96a2a06fe5 100644 (file)
@@ -1735,7 +1735,7 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                        res = -EFAULT;
                        break;
                }
-               if (amount > AX25_NOUID_BLOCK) {
+               if (amount < 0 || amount > AX25_NOUID_BLOCK) {
                        res = -EINVAL;
                        break;
                }
index 489bb36f1b9464381d2aa83c78740977e3899b77..4f4aabbd8eab24c4d12983ccf656966f4f7084b0 100644 (file)
@@ -24,6 +24,7 @@ batman-adv-y += bitarray.o
 batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
 batman-adv-y += debugfs.o
 batman-adv-$(CONFIG_BATMAN_ADV_DAT) += distributed-arp-table.o
+batman-adv-y += fragmentation.o
 batman-adv-y += gateway_client.o
 batman-adv-y += gateway_common.o
 batman-adv-y += hard-interface.o
@@ -37,5 +38,3 @@ batman-adv-y += send.o
 batman-adv-y += soft-interface.o
 batman-adv-y += sysfs.o
 batman-adv-y += translation-table.o
-batman-adv-y += unicast.o
-batman-adv-y += vis.o
index 0a8a80cd4bf19819f797169a9b5c6122f3dc47b3..a2b480a908723a37d47d14bd76e63628518e6561 100644 (file)
@@ -87,22 +87,198 @@ static uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[])
        return (uint8_t)(sum / count);
 }
 
+/**
+ * batadv_iv_ogm_orig_free - free the private resources allocated for this
+ *  orig_node
+ * @orig_node: the orig_node for which the resources have to be free'd
+ */
+static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node)
+{
+       kfree(orig_node->bat_iv.bcast_own);
+       kfree(orig_node->bat_iv.bcast_own_sum);
+}
+
+/**
+ * batadv_iv_ogm_orig_add_if - change the private structures of the orig_node to
+ *  include the new hard-interface
+ * @orig_node: the orig_node that has to be changed
+ * @max_if_num: the current amount of interfaces
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
+                                    int max_if_num)
+{
+       void *data_ptr;
+       size_t data_size, old_size;
+       int ret = -ENOMEM;
+
+       spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+       data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS;
+       old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS;
+       data_ptr = kmalloc(data_size, GFP_ATOMIC);
+       if (!data_ptr)
+               goto unlock;
+
+       memcpy(data_ptr, orig_node->bat_iv.bcast_own, old_size);
+       kfree(orig_node->bat_iv.bcast_own);
+       orig_node->bat_iv.bcast_own = data_ptr;
+
+       data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
+       if (!data_ptr) {
+               kfree(orig_node->bat_iv.bcast_own);
+               goto unlock;
+       }
+
+       memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum,
+              (max_if_num - 1) * sizeof(uint8_t));
+       kfree(orig_node->bat_iv.bcast_own_sum);
+       orig_node->bat_iv.bcast_own_sum = data_ptr;
+
+       ret = 0;
+
+unlock:
+       spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+       return ret;
+}
+
+/**
+ * batadv_iv_ogm_orig_del_if - change the private structures of the orig_node to
+ *  exclude the removed interface
+ * @orig_node: the orig_node that has to be changed
+ * @max_if_num: the current amount of interfaces
+ * @del_if_num: the index of the interface being removed
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
+                                    int max_if_num, int del_if_num)
+{
+       int chunk_size,  ret = -ENOMEM, if_offset;
+       void *data_ptr = NULL;
+
+       spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+       /* last interface was removed */
+       if (max_if_num == 0)
+               goto free_bcast_own;
+
+       chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
+       data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
+       if (!data_ptr)
+               goto unlock;
+
+       /* copy first part */
+       memcpy(data_ptr, orig_node->bat_iv.bcast_own, del_if_num * chunk_size);
+
+       /* copy second part */
+       memcpy((char *)data_ptr + del_if_num * chunk_size,
+              orig_node->bat_iv.bcast_own + ((del_if_num + 1) * chunk_size),
+              (max_if_num - del_if_num) * chunk_size);
+
+free_bcast_own:
+       kfree(orig_node->bat_iv.bcast_own);
+       orig_node->bat_iv.bcast_own = data_ptr;
+
+       if (max_if_num == 0)
+               goto free_own_sum;
+
+       data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
+       if (!data_ptr) {
+               kfree(orig_node->bat_iv.bcast_own);
+               goto unlock;
+       }
+
+       memcpy(data_ptr, orig_node->bat_iv.bcast_own_sum,
+              del_if_num * sizeof(uint8_t));
+
+       if_offset = (del_if_num + 1) * sizeof(uint8_t);
+       memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
+              orig_node->bat_iv.bcast_own_sum + if_offset,
+              (max_if_num - del_if_num) * sizeof(uint8_t));
+
+free_own_sum:
+       kfree(orig_node->bat_iv.bcast_own_sum);
+       orig_node->bat_iv.bcast_own_sum = data_ptr;
+
+       ret = 0;
+unlock:
+       spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+
+       return ret;
+}
+
+/**
+ * batadv_iv_ogm_orig_get - retrieve or create (if does not exist) an originator
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: mac address of the originator
+ *
+ * Returns the originator object corresponding to the passed mac address or NULL
+ * on failure.
+ * If the object does not exists it is created an initialised.
+ */
+static struct batadv_orig_node *
+batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const uint8_t *addr)
+{
+       struct batadv_orig_node *orig_node;
+       int size, hash_added;
+
+       orig_node = batadv_orig_hash_find(bat_priv, addr);
+       if (orig_node)
+               return orig_node;
+
+       orig_node = batadv_orig_node_new(bat_priv, addr);
+       if (!orig_node)
+               return NULL;
+
+       spin_lock_init(&orig_node->bat_iv.ogm_cnt_lock);
+
+       size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS;
+       orig_node->bat_iv.bcast_own = kzalloc(size, GFP_ATOMIC);
+       if (!orig_node->bat_iv.bcast_own)
+               goto free_orig_node;
+
+       size = bat_priv->num_ifaces * sizeof(uint8_t);
+       orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC);
+       if (!orig_node->bat_iv.bcast_own_sum)
+               goto free_bcast_own;
+
+       hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
+                                    batadv_choose_orig, orig_node,
+                                    &orig_node->hash_entry);
+       if (hash_added != 0)
+               goto free_bcast_own;
+
+       return orig_node;
+
+free_bcast_own:
+       kfree(orig_node->bat_iv.bcast_own);
+free_orig_node:
+       batadv_orig_node_free_ref(orig_node);
+
+       return NULL;
+}
+
 static struct batadv_neigh_node *
 batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
                        const uint8_t *neigh_addr,
                        struct batadv_orig_node *orig_node,
                        struct batadv_orig_node *orig_neigh)
 {
+       struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        struct batadv_neigh_node *neigh_node;
 
-       neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr);
+       neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node);
        if (!neigh_node)
                goto out;
 
-       INIT_LIST_HEAD(&neigh_node->bonding_list);
+       spin_lock_init(&neigh_node->bat_iv.lq_update_lock);
 
-       neigh_node->orig_node = orig_neigh;
-       neigh_node->if_incoming = hard_iface;
+       batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+                  "Creating new neighbor %pM for orig_node %pM on interface %s\n",
+                  neigh_addr, orig_node->orig, hard_iface->net_dev->name);
 
        spin_lock_bh(&orig_node->neigh_list_lock);
        hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
@@ -135,9 +311,8 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
        batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION;
        batadv_ogm_packet->header.ttl = 2;
        batadv_ogm_packet->flags = BATADV_NO_FLAGS;
+       batadv_ogm_packet->reserved = 0;
        batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
-       batadv_ogm_packet->tt_num_changes = 0;
-       batadv_ogm_packet->ttvn = 0;
 
        res = 0;
 
@@ -207,12 +382,12 @@ static uint8_t batadv_hop_penalty(uint8_t tq,
 
 /* is there another aggregated packet here? */
 static int batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
-                                    int tt_num_changes)
+                                    __be16 tvlv_len)
 {
        int next_buff_pos = 0;
 
        next_buff_pos += buff_pos + BATADV_OGM_HLEN;
-       next_buff_pos += batadv_tt_len(tt_num_changes);
+       next_buff_pos += ntohs(tvlv_len);
 
        return (next_buff_pos <= packet_len) &&
               (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
@@ -240,7 +415,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
 
        /* adjust all flags and log packets */
        while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
-                                        batadv_ogm_packet->tt_num_changes)) {
+                                        batadv_ogm_packet->tvlv_len)) {
                /* we might have aggregated direct link packets with an
                 * ordinary base packet
                 */
@@ -256,18 +431,18 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
                        fwd_str = "Sending own";
 
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                          "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
+                          "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s) on interface %s [%pM]\n",
                           fwd_str, (packet_num > 0 ? "aggregated " : ""),
                           batadv_ogm_packet->orig,
                           ntohl(batadv_ogm_packet->seqno),
                           batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl,
                           (batadv_ogm_packet->flags & BATADV_DIRECTLINK ?
                            "on" : "off"),
-                          batadv_ogm_packet->ttvn, hard_iface->net_dev->name,
+                          hard_iface->net_dev->name,
                           hard_iface->net_dev->dev_addr);
 
                buff_pos += BATADV_OGM_HLEN;
-               buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
+               buff_pos += ntohs(batadv_ogm_packet->tvlv_len);
                packet_num++;
                packet_pos = forw_packet->skb->data + buff_pos;
                batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
@@ -601,7 +776,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
                                  struct batadv_hard_iface *if_incoming)
 {
        struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
-       uint8_t tt_num_changes;
+       uint16_t tvlv_len;
 
        if (batadv_ogm_packet->header.ttl <= 1) {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
@@ -621,7 +796,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
                        return;
        }
 
-       tt_num_changes = batadv_ogm_packet->tt_num_changes;
+       tvlv_len = ntohs(batadv_ogm_packet->tvlv_len);
 
        batadv_ogm_packet->header.ttl--;
        memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
@@ -642,7 +817,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
                batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
 
        batadv_iv_ogm_queue_add(bat_priv, (unsigned char *)batadv_ogm_packet,
-                               BATADV_OGM_HLEN + batadv_tt_len(tt_num_changes),
+                               BATADV_OGM_HLEN + tvlv_len,
                                if_incoming, 0, batadv_iv_ogm_fwd_send_time());
 }
 
@@ -662,20 +837,22 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
        uint32_t i;
        size_t word_index;
        uint8_t *w;
+       int if_num;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
                rcu_read_lock();
                hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-                       spin_lock_bh(&orig_node->ogm_cnt_lock);
+                       spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
                        word_index = hard_iface->if_num * BATADV_NUM_WORDS;
-                       word = &(orig_node->bcast_own[word_index]);
+                       word = &(orig_node->bat_iv.bcast_own[word_index]);
 
                        batadv_bit_get_packet(bat_priv, word, 1, 0);
-                       w = &orig_node->bcast_own_sum[hard_iface->if_num];
+                       if_num = hard_iface->if_num;
+                       w = &orig_node->bat_iv.bcast_own_sum[if_num];
                        *w = bitmap_weight(word, BATADV_TQ_LOCAL_WINDOW_SIZE);
-                       spin_unlock_bh(&orig_node->ogm_cnt_lock);
+                       spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
                }
                rcu_read_unlock();
        }
@@ -688,43 +865,29 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
        struct batadv_ogm_packet *batadv_ogm_packet;
        struct batadv_hard_iface *primary_if;
        int *ogm_buff_len = &hard_iface->bat_iv.ogm_buff_len;
-       int vis_server, tt_num_changes = 0;
        uint32_t seqno;
-       uint8_t bandwidth;
+       uint16_t tvlv_len = 0;
 
-       vis_server = atomic_read(&bat_priv->vis_mode);
        primary_if = batadv_primary_if_get_selected(bat_priv);
 
-       if (hard_iface == primary_if)
-               tt_num_changes = batadv_tt_append_diff(bat_priv, ogm_buff,
-                                                      ogm_buff_len,
-                                                      BATADV_OGM_HLEN);
+       if (hard_iface == primary_if) {
+               /* tt changes have to be committed before the tvlv data is
+                * appended as it may alter the tt tvlv container
+                */
+               batadv_tt_local_commit_changes(bat_priv);
+               tvlv_len = batadv_tvlv_container_ogm_append(bat_priv, ogm_buff,
+                                                           ogm_buff_len,
+                                                           BATADV_OGM_HLEN);
+       }
 
        batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
+       batadv_ogm_packet->tvlv_len = htons(tvlv_len);
 
        /* change sequence number to network order */
        seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
        batadv_ogm_packet->seqno = htonl(seqno);
        atomic_inc(&hard_iface->bat_iv.ogm_seqno);
 
-       batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
-       batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
-       if (tt_num_changes >= 0)
-               batadv_ogm_packet->tt_num_changes = tt_num_changes;
-
-       if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC)
-               batadv_ogm_packet->flags |= BATADV_VIS_SERVER;
-       else
-               batadv_ogm_packet->flags &= ~BATADV_VIS_SERVER;
-
-       if (hard_iface == primary_if &&
-           atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER) {
-               bandwidth = (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
-               batadv_ogm_packet->gw_flags = bandwidth;
-       } else {
-               batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS;
-       }
-
        batadv_iv_ogm_slide_own_bcast_window(hard_iface);
        batadv_iv_ogm_queue_add(bat_priv, hard_iface->bat_iv.ogm_buff,
                                hard_iface->bat_iv.ogm_buff_len, hard_iface, 1,
@@ -770,18 +933,18 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
                if (dup_status != BATADV_NO_DUP)
                        continue;
 
-               spin_lock_bh(&tmp_neigh_node->lq_update_lock);
-               batadv_ring_buffer_set(tmp_neigh_node->tq_recv,
-                                      &tmp_neigh_node->tq_index, 0);
-               tq_avg = batadv_ring_buffer_avg(tmp_neigh_node->tq_recv);
-               tmp_neigh_node->tq_avg = tq_avg;
-               spin_unlock_bh(&tmp_neigh_node->lq_update_lock);
+               spin_lock_bh(&tmp_neigh_node->bat_iv.lq_update_lock);
+               batadv_ring_buffer_set(tmp_neigh_node->bat_iv.tq_recv,
+                                      &tmp_neigh_node->bat_iv.tq_index, 0);
+               tq_avg = batadv_ring_buffer_avg(tmp_neigh_node->bat_iv.tq_recv);
+               tmp_neigh_node->bat_iv.tq_avg = tq_avg;
+               spin_unlock_bh(&tmp_neigh_node->bat_iv.lq_update_lock);
        }
 
        if (!neigh_node) {
                struct batadv_orig_node *orig_tmp;
 
-               orig_tmp = batadv_get_orig_node(bat_priv, ethhdr->h_source);
+               orig_tmp = batadv_iv_ogm_orig_get(bat_priv, ethhdr->h_source);
                if (!orig_tmp)
                        goto unlock;
 
@@ -798,80 +961,55 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
 
        rcu_read_unlock();
 
-       orig_node->flags = batadv_ogm_packet->flags;
        neigh_node->last_seen = jiffies;
 
-       spin_lock_bh(&neigh_node->lq_update_lock);
-       batadv_ring_buffer_set(neigh_node->tq_recv,
-                              &neigh_node->tq_index,
+       spin_lock_bh(&neigh_node->bat_iv.lq_update_lock);
+       batadv_ring_buffer_set(neigh_node->bat_iv.tq_recv,
+                              &neigh_node->bat_iv.tq_index,
                               batadv_ogm_packet->tq);
-       neigh_node->tq_avg = batadv_ring_buffer_avg(neigh_node->tq_recv);
-       spin_unlock_bh(&neigh_node->lq_update_lock);
+       tq_avg = batadv_ring_buffer_avg(neigh_node->bat_iv.tq_recv);
+       neigh_node->bat_iv.tq_avg = tq_avg;
+       spin_unlock_bh(&neigh_node->bat_iv.lq_update_lock);
 
        if (dup_status == BATADV_NO_DUP) {
                orig_node->last_ttl = batadv_ogm_packet->header.ttl;
                neigh_node->last_ttl = batadv_ogm_packet->header.ttl;
        }
 
-       batadv_bonding_candidate_add(orig_node, neigh_node);
+       batadv_bonding_candidate_add(bat_priv, orig_node, neigh_node);
 
        /* if this neighbor already is our next hop there is nothing
         * to change
         */
        router = batadv_orig_node_get_router(orig_node);
        if (router == neigh_node)
-               goto update_tt;
+               goto out;
 
        /* if this neighbor does not offer a better TQ we won't consider it */
-       if (router && (router->tq_avg > neigh_node->tq_avg))
-               goto update_tt;
+       if (router && (router->bat_iv.tq_avg > neigh_node->bat_iv.tq_avg))
+               goto out;
 
        /* if the TQ is the same and the link not more symmetric we
         * won't consider it either
         */
-       if (router && (neigh_node->tq_avg == router->tq_avg)) {
+       if (router && (neigh_node->bat_iv.tq_avg == router->bat_iv.tq_avg)) {
                orig_node_tmp = router->orig_node;
-               spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
+               spin_lock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
                if_num = router->if_incoming->if_num;
-               sum_orig = orig_node_tmp->bcast_own_sum[if_num];
-               spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
+               sum_orig = orig_node_tmp->bat_iv.bcast_own_sum[if_num];
+               spin_unlock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
 
                orig_node_tmp = neigh_node->orig_node;
-               spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
+               spin_lock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
                if_num = neigh_node->if_incoming->if_num;
-               sum_neigh = orig_node_tmp->bcast_own_sum[if_num];
-               spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
+               sum_neigh = orig_node_tmp->bat_iv.bcast_own_sum[if_num];
+               spin_unlock_bh(&orig_node_tmp->bat_iv.ogm_cnt_lock);
 
                if (sum_orig >= sum_neigh)
-                       goto update_tt;
+                       goto out;
        }
 
        batadv_update_route(bat_priv, orig_node, neigh_node);
-
-update_tt:
-       /* I have to check for transtable changes only if the OGM has been
-        * sent through a primary interface
-        */
-       if (((batadv_ogm_packet->orig != ethhdr->h_source) &&
-            (batadv_ogm_packet->header.ttl > 2)) ||
-           (batadv_ogm_packet->flags & BATADV_PRIMARIES_FIRST_HOP))
-               batadv_tt_update_orig(bat_priv, orig_node, tt_buff,
-                                     batadv_ogm_packet->tt_num_changes,
-                                     batadv_ogm_packet->ttvn,
-                                     ntohs(batadv_ogm_packet->tt_crc));
-
-       if (orig_node->gw_flags != batadv_ogm_packet->gw_flags)
-               batadv_gw_node_update(bat_priv, orig_node,
-                                     batadv_ogm_packet->gw_flags);
-
-       orig_node->gw_flags = batadv_ogm_packet->gw_flags;
-
-       /* restart gateway selection if fast or late switching was enabled */
-       if ((orig_node->gw_flags) &&
-           (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_CLIENT) &&
-           (atomic_read(&bat_priv->gw_sel_class) > 2))
-               batadv_gw_check_election(bat_priv, orig_node);
-
        goto out;
 
 unlock:
@@ -893,7 +1031,7 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
        uint8_t total_count;
        uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
        unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
-       int tq_asym_penalty, inv_asym_penalty, ret = 0;
+       int tq_asym_penalty, inv_asym_penalty, if_num, ret = 0;
        unsigned int combined_tq;
 
        /* find corresponding one hop neighbor */
@@ -931,10 +1069,11 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
        orig_node->last_seen = jiffies;
 
        /* find packet count of corresponding one hop neighbor */
-       spin_lock_bh(&orig_node->ogm_cnt_lock);
-       orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
-       neigh_rq_count = neigh_node->real_packet_count;
-       spin_unlock_bh(&orig_node->ogm_cnt_lock);
+       spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
+       if_num = if_incoming->if_num;
+       orig_eq_count = orig_neigh_node->bat_iv.bcast_own_sum[if_num];
+       neigh_rq_count = neigh_node->bat_iv.real_packet_count;
+       spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
 
        /* pay attention to not get a value bigger than 100 % */
        if (orig_eq_count > neigh_rq_count)
@@ -1016,12 +1155,13 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
        uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
        uint8_t *neigh_addr;
        uint8_t packet_count;
+       unsigned long *bitmap;
 
-       orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
+       orig_node = batadv_iv_ogm_orig_get(bat_priv, batadv_ogm_packet->orig);
        if (!orig_node)
                return BATADV_NO_DUP;
 
-       spin_lock_bh(&orig_node->ogm_cnt_lock);
+       spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
        seq_diff = seqno - orig_node->last_real_seqno;
 
        /* signalize caller that the packet is to be dropped. */
@@ -1036,7 +1176,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
        hlist_for_each_entry_rcu(tmp_neigh_node,
                                 &orig_node->neigh_list, list) {
                neigh_addr = tmp_neigh_node->addr;
-               is_dup = batadv_test_bit(tmp_neigh_node->real_bits,
+               is_dup = batadv_test_bit(tmp_neigh_node->bat_iv.real_bits,
                                         orig_node->last_real_seqno,
                                         seqno);
 
@@ -1052,13 +1192,13 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
                }
 
                /* if the window moved, set the update flag. */
-               need_update |= batadv_bit_get_packet(bat_priv,
-                                                    tmp_neigh_node->real_bits,
+               bitmap = tmp_neigh_node->bat_iv.real_bits;
+               need_update |= batadv_bit_get_packet(bat_priv, bitmap,
                                                     seq_diff, set_mark);
 
-               packet_count = bitmap_weight(tmp_neigh_node->real_bits,
+               packet_count = bitmap_weight(tmp_neigh_node->bat_iv.real_bits,
                                             BATADV_TQ_LOCAL_WINDOW_SIZE);
-               tmp_neigh_node->real_packet_count = packet_count;
+               tmp_neigh_node->bat_iv.real_packet_count = packet_count;
        }
        rcu_read_unlock();
 
@@ -1070,7 +1210,7 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
        }
 
 out:
-       spin_unlock_bh(&orig_node->ogm_cnt_lock);
+       spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
        batadv_orig_node_free_ref(orig_node);
        return ret;
 }
@@ -1082,7 +1222,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
 {
        struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
        struct batadv_hard_iface *hard_iface;
-       struct batadv_orig_node *orig_neigh_node, *orig_node;
+       struct batadv_orig_node *orig_neigh_node, *orig_node, *orig_node_tmp;
        struct batadv_neigh_node *router = NULL, *router_router = NULL;
        struct batadv_neigh_node *orig_neigh_router = NULL;
        int has_directlink_flag;
@@ -1122,13 +1262,11 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
                is_single_hop_neigh = true;
 
        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                  "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %#.4x, changes %u, tq %d, TTL %d, V %d, IDF %d)\n",
+                  "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, tq %d, TTL %d, V %d, IDF %d)\n",
                   ethhdr->h_source, if_incoming->net_dev->name,
                   if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig,
                   batadv_ogm_packet->prev_sender,
-                  ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->ttvn,
-                  ntohs(batadv_ogm_packet->tt_crc),
-                  batadv_ogm_packet->tt_num_changes, batadv_ogm_packet->tq,
+                  ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq,
                   batadv_ogm_packet->header.ttl,
                   batadv_ogm_packet->header.version, has_directlink_flag);
 
@@ -1168,8 +1306,8 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
                int16_t if_num;
                uint8_t *weight;
 
-               orig_neigh_node = batadv_get_orig_node(bat_priv,
-                                                      ethhdr->h_source);
+               orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
+                                                        ethhdr->h_source);
                if (!orig_neigh_node)
                        return;
 
@@ -1183,15 +1321,15 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
                        if_num = if_incoming->if_num;
                        offset = if_num * BATADV_NUM_WORDS;
 
-                       spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
-                       word = &(orig_neigh_node->bcast_own[offset]);
+                       spin_lock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
+                       word = &(orig_neigh_node->bat_iv.bcast_own[offset]);
                        bit_pos = if_incoming_seqno - 2;
                        bit_pos -= ntohl(batadv_ogm_packet->seqno);
                        batadv_set_bit(word, bit_pos);
-                       weight = &orig_neigh_node->bcast_own_sum[if_num];
+                       weight = &orig_neigh_node->bat_iv.bcast_own_sum[if_num];
                        *weight = bitmap_weight(word,
                                                BATADV_TQ_LOCAL_WINDOW_SIZE);
-                       spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
+                       spin_unlock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
                }
 
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -1214,7 +1352,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
                return;
        }
 
-       orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
+       orig_node = batadv_iv_ogm_orig_get(bat_priv, batadv_ogm_packet->orig);
        if (!orig_node)
                return;
 
@@ -1235,10 +1373,12 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
        }
 
        router = batadv_orig_node_get_router(orig_node);
-       if (router)
-               router_router = batadv_orig_node_get_router(router->orig_node);
+       if (router) {
+               orig_node_tmp = router->orig_node;
+               router_router = batadv_orig_node_get_router(orig_node_tmp);
+       }
 
-       if ((router && router->tq_avg != 0) &&
+       if ((router && router->bat_iv.tq_avg != 0) &&
            (batadv_compare_eth(router->addr, ethhdr->h_source)))
                is_from_best_next_hop = true;
 
@@ -1254,14 +1394,16 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
                goto out;
        }
 
+       batadv_tvlv_ogm_receive(bat_priv, batadv_ogm_packet, orig_node);
+
        /* if sender is a direct neighbor the sender mac equals
         * originator mac
         */
        if (is_single_hop_neigh)
                orig_neigh_node = orig_node;
        else
-               orig_neigh_node = batadv_get_orig_node(bat_priv,
-                                                      ethhdr->h_source);
+               orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
+                                                        ethhdr->h_source);
 
        if (!orig_neigh_node)
                goto out;
@@ -1350,9 +1492,9 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
        struct batadv_ogm_packet *batadv_ogm_packet;
        struct ethhdr *ethhdr;
        int buff_pos = 0, packet_len;
-       unsigned char *tt_buff, *packet_buff;
-       bool ret;
+       unsigned char *tvlv_buff, *packet_buff;
        uint8_t *packet_pos;
+       bool ret;
 
        ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
        if (!ret)
@@ -1375,14 +1517,14 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
 
        /* unpack the aggregated packets and process them one by one */
        while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
-                                        batadv_ogm_packet->tt_num_changes)) {
-               tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN;
+                                        batadv_ogm_packet->tvlv_len)) {
+               tvlv_buff = packet_buff + buff_pos + BATADV_OGM_HLEN;
 
-               batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff,
-                                     if_incoming);
+               batadv_iv_ogm_process(ethhdr, batadv_ogm_packet,
+                                     tvlv_buff, if_incoming);
 
                buff_pos += BATADV_OGM_HLEN;
-               buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
+               buff_pos += ntohs(batadv_ogm_packet->tvlv_len);
 
                packet_pos = packet_buff + buff_pos;
                batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos;
@@ -1392,6 +1534,106 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
        return NET_RX_SUCCESS;
 }
 
+/**
+ * batadv_iv_ogm_orig_print - print the originator table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @seq: debugfs table seq_file struct
+ */
+static void batadv_iv_ogm_orig_print(struct batadv_priv *bat_priv,
+                                    struct seq_file *seq)
+{
+       struct batadv_neigh_node *neigh_node, *neigh_node_tmp;
+       struct batadv_hashtable *hash = bat_priv->orig_hash;
+       int last_seen_msecs, last_seen_secs;
+       struct batadv_orig_node *orig_node;
+       unsigned long last_seen_jiffies;
+       struct hlist_head *head;
+       int batman_count = 0;
+       uint32_t i;
+
+       seq_printf(seq, "  %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
+                  "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
+                  "Nexthop", "outgoingIF", "Potential nexthops");
+
+       for (i = 0; i < hash->size; i++) {
+               head = &hash->table[i];
+
+               rcu_read_lock();
+               hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+                       neigh_node = batadv_orig_node_get_router(orig_node);
+                       if (!neigh_node)
+                               continue;
+
+                       if (neigh_node->bat_iv.tq_avg == 0)
+                               goto next;
+
+                       last_seen_jiffies = jiffies - orig_node->last_seen;
+                       last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
+                       last_seen_secs = last_seen_msecs / 1000;
+                       last_seen_msecs = last_seen_msecs % 1000;
+
+                       seq_printf(seq, "%pM %4i.%03is   (%3i) %pM [%10s]:",
+                                  orig_node->orig, last_seen_secs,
+                                  last_seen_msecs, neigh_node->bat_iv.tq_avg,
+                                  neigh_node->addr,
+                                  neigh_node->if_incoming->net_dev->name);
+
+                       hlist_for_each_entry_rcu(neigh_node_tmp,
+                                                &orig_node->neigh_list, list) {
+                               seq_printf(seq, " %pM (%3i)",
+                                          neigh_node_tmp->addr,
+                                          neigh_node_tmp->bat_iv.tq_avg);
+                       }
+
+                       seq_puts(seq, "\n");
+                       batman_count++;
+
+next:
+                       batadv_neigh_node_free_ref(neigh_node);
+               }
+               rcu_read_unlock();
+       }
+
+       if (batman_count == 0)
+               seq_puts(seq, "No batman nodes in range ...\n");
+}
+
+/**
+ * batadv_iv_ogm_neigh_cmp - compare the metrics of two neighbors
+ * @neigh1: the first neighbor object of the comparison
+ * @neigh2: the second neighbor object of the comparison
+ *
+ * Returns a value less, equal to or greater than 0 if the metric via neigh1 is
+ * lower, the same as or higher than the metric via neigh2
+ */
+static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1,
+                                  struct batadv_neigh_node *neigh2)
+{
+       uint8_t tq1, tq2;
+
+       tq1 = neigh1->bat_iv.tq_avg;
+       tq2 = neigh2->bat_iv.tq_avg;
+
+       return tq1 - tq2;
+}
+
+/**
+ * batadv_iv_ogm_neigh_is_eob - check if neigh1 is equally good or better than
+ *  neigh2 from the metric prospective
+ * @neigh1: the first neighbor object of the comparison
+ * @neigh2: the second neighbor object of the comparison
+ *
+ * Returns true if the metric via neigh1 is equally good or better than the
+ * metric via neigh2, false otherwise.
+ */
+static bool batadv_iv_ogm_neigh_is_eob(struct batadv_neigh_node *neigh1,
+                                      struct batadv_neigh_node *neigh2)
+{
+       int diff = batadv_iv_ogm_neigh_cmp(neigh1, neigh2);
+
+       return diff > -BATADV_TQ_SIMILARITY_THRESHOLD;
+}
+
 static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
        .name = "BATMAN_IV",
        .bat_iface_enable = batadv_iv_ogm_iface_enable,
@@ -1400,6 +1642,12 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
        .bat_primary_iface_set = batadv_iv_ogm_primary_iface_set,
        .bat_ogm_schedule = batadv_iv_ogm_schedule,
        .bat_ogm_emit = batadv_iv_ogm_emit,
+       .bat_neigh_cmp = batadv_iv_ogm_neigh_cmp,
+       .bat_neigh_is_equiv_or_better = batadv_iv_ogm_neigh_is_eob,
+       .bat_orig_print = batadv_iv_ogm_orig_print,
+       .bat_orig_free = batadv_iv_ogm_orig_free,
+       .bat_orig_add_if = batadv_iv_ogm_orig_add_if,
+       .bat_orig_del_if = batadv_iv_ogm_orig_del_if,
 };
 
 int __init batadv_iv_init(void)
index 264de88db3208290b940cc27b09e44f79e1616c9..28eb5e6d0a02510a0a7b4e268f7dc34a26bc182a 100644 (file)
@@ -411,10 +411,10 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
                return NULL;
        }
 
-       /* this is a gateway now, remove any tt entries */
+       /* this is a gateway now, remove any TT entry on this VLAN */
        orig_node = batadv_orig_hash_find(bat_priv, orig);
        if (orig_node) {
-               batadv_tt_global_del_orig(bat_priv, orig_node,
+               batadv_tt_global_del_orig(bat_priv, orig_node, vid,
                                          "became a backbone gateway");
                batadv_orig_node_free_ref(orig_node);
        }
@@ -858,30 +858,28 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
                                    struct batadv_hard_iface *primary_if,
                                    struct sk_buff *skb)
 {
-       struct ethhdr *ethhdr;
+       struct batadv_bla_claim_dst *bla_dst;
+       uint8_t *hw_src, *hw_dst;
        struct vlan_ethhdr *vhdr;
+       struct ethhdr *ethhdr;
        struct arphdr *arphdr;
-       uint8_t *hw_src, *hw_dst;
-       struct batadv_bla_claim_dst *bla_dst;
-       uint16_t proto;
+       unsigned short vid;
+       __be16 proto;
        int headlen;
-       unsigned short vid = BATADV_NO_FLAGS;
        int ret;
 
+       vid = batadv_get_vid(skb, 0);
        ethhdr = eth_hdr(skb);
 
-       if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
+       proto = ethhdr->h_proto;
+       headlen = ETH_HLEN;
+       if (vid & BATADV_VLAN_HAS_TAG) {
                vhdr = (struct vlan_ethhdr *)ethhdr;
-               vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
-               vid |= BATADV_VLAN_HAS_TAG;
-               proto = ntohs(vhdr->h_vlan_encapsulated_proto);
-               headlen = sizeof(*vhdr);
-       } else {
-               proto = ntohs(ethhdr->h_proto);
-               headlen = ETH_HLEN;
+               proto = vhdr->h_vlan_encapsulated_proto;
+               headlen += VLAN_HLEN;
        }
 
-       if (proto != ETH_P_ARP)
+       if (proto != htons(ETH_P_ARP))
                return 0; /* not a claim frame */
 
        /* this must be a ARP frame. check if it is a claim. */
@@ -1317,12 +1315,14 @@ out:
 
 /* @bat_priv: the bat priv with all the soft interface information
  * @orig: originator mac address
+ * @vid: VLAN identifier
  *
- * check if the originator is a gateway for any VLAN ID.
+ * Check if the originator is a gateway for the VLAN identified by vid.
  *
- * returns 1 if it is found, 0 otherwise
+ * Returns true if orig is a backbone for this vid, false otherwise.
  */
-int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
+bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
+                                   unsigned short vid)
 {
        struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
        struct hlist_head *head;
@@ -1330,25 +1330,26 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
        int i;
 
        if (!atomic_read(&bat_priv->bridge_loop_avoidance))
-               return 0;
+               return false;
 
        if (!hash)
-               return 0;
+               return false;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
                rcu_read_lock();
                hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
-                       if (batadv_compare_eth(backbone_gw->orig, orig)) {
+                       if (batadv_compare_eth(backbone_gw->orig, orig) &&
+                           backbone_gw->vid == vid) {
                                rcu_read_unlock();
-                               return 1;
+                               return true;
                        }
                }
                rcu_read_unlock();
        }
 
-       return 0;
+       return false;
 }
 
 
@@ -1365,10 +1366,8 @@ int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
 int batadv_bla_is_backbone_gw(struct sk_buff *skb,
                              struct batadv_orig_node *orig_node, int hdr_size)
 {
-       struct ethhdr *ethhdr;
-       struct vlan_ethhdr *vhdr;
        struct batadv_bla_backbone_gw *backbone_gw;
-       unsigned short vid = BATADV_NO_FLAGS;
+       unsigned short vid;
 
        if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
                return 0;
@@ -1377,16 +1376,7 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
        if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
                return 0;
 
-       ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size);
-
-       if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
-               if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
-                       return 0;
-
-               vhdr = (struct vlan_ethhdr *)(skb->data + hdr_size);
-               vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
-               vid |= BATADV_VLAN_HAS_TAG;
-       }
+       vid = batadv_get_vid(skb, hdr_size);
 
        /* see if this originator is a backbone gw for this VLAN */
        backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
index 4b102e71e5bd63c2bee71f14bcdb0b799398ef5e..da173e760e775caab684d63d2a1749cb55e0b930 100644 (file)
@@ -30,7 +30,8 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
                                             void *offset);
-int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig);
+bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig,
+                                   unsigned short vid);
 int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
                                   struct sk_buff *skb);
 void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
@@ -74,10 +75,11 @@ static inline int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq,
        return 0;
 }
 
-static inline int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
-                                                uint8_t *orig)
+static inline bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
+                                                 uint8_t *orig,
+                                                 unsigned short vid)
 {
-       return 0;
+       return false;
 }
 
 static inline int
index f186a55b23c3fbd1eda3a2d5877b5b781fd647eb..049a7a2ac5b69b83422a3292ee7ebb2183e36114 100644 (file)
@@ -28,7 +28,6 @@
 #include "gateway_common.h"
 #include "gateway_client.h"
 #include "soft-interface.h"
-#include "vis.h"
 #include "icmp_socket.h"
 #include "bridge_loop_avoidance.h"
 #include "distributed-arp-table.h"
@@ -300,12 +299,6 @@ static int batadv_transtable_local_open(struct inode *inode, struct file *file)
        return single_open(file, batadv_tt_local_seq_print_text, net_dev);
 }
 
-static int batadv_vis_data_open(struct inode *inode, struct file *file)
-{
-       struct net_device *net_dev = (struct net_device *)inode->i_private;
-       return single_open(file, batadv_vis_seq_print_text, net_dev);
-}
-
 struct batadv_debuginfo {
        struct attribute attr;
        const struct file_operations fops;
@@ -356,7 +349,6 @@ static BATADV_DEBUGINFO(dat_cache, S_IRUGO, batadv_dat_cache_open);
 #endif
 static BATADV_DEBUGINFO(transtable_local, S_IRUGO,
                        batadv_transtable_local_open);
-static BATADV_DEBUGINFO(vis_data, S_IRUGO, batadv_vis_data_open);
 #ifdef CONFIG_BATMAN_ADV_NC
 static BATADV_DEBUGINFO(nc_nodes, S_IRUGO, batadv_nc_nodes_open);
 #endif
@@ -373,7 +365,6 @@ static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
        &batadv_debuginfo_dat_cache,
 #endif
        &batadv_debuginfo_transtable_local,
-       &batadv_debuginfo_vis_data,
 #ifdef CONFIG_BATMAN_ADV_NC
        &batadv_debuginfo_nc_nodes,
 #endif
index 06345d401588c949762edfa14aaeb3f018a8e294..6c8c3934bd7b44bac8643683683c494e003f1ed2 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <linux/if_ether.h>
 #include <linux/if_arp.h>
+#include <linux/if_vlan.h>
 #include <net/arp.h>
 
 #include "main.h"
@@ -29,7 +30,6 @@
 #include "send.h"
 #include "types.h"
 #include "translation-table.h"
-#include "unicast.h"
 
 static void batadv_dat_purge(struct work_struct *work);
 
@@ -206,15 +206,11 @@ static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
  */
 static uint32_t batadv_hash_dat(const void *data, uint32_t size)
 {
-       const unsigned char *key = data;
        uint32_t hash = 0;
-       size_t i;
+       const struct batadv_dat_entry *dat = data;
 
-       for (i = 0; i < 4; i++) {
-               hash += key[i];
-               hash += (hash << 10);
-               hash ^= (hash >> 6);
-       }
+       hash = batadv_hash_bytes(hash, &dat->ip, sizeof(dat->ip));
+       hash = batadv_hash_bytes(hash, &dat->vid, sizeof(dat->vid));
 
        hash += (hash << 3);
        hash ^= (hash >> 11);
@@ -228,21 +224,26 @@ static uint32_t batadv_hash_dat(const void *data, uint32_t size)
  * table
  * @bat_priv: the bat priv with all the soft interface information
  * @ip: search key
+ * @vid: VLAN identifier
  *
  * Returns the dat_entry if found, NULL otherwise.
  */
 static struct batadv_dat_entry *
-batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip)
+batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip,
+                          unsigned short vid)
 {
        struct hlist_head *head;
-       struct batadv_dat_entry *dat_entry, *dat_entry_tmp = NULL;
+       struct batadv_dat_entry to_find, *dat_entry, *dat_entry_tmp = NULL;
        struct batadv_hashtable *hash = bat_priv->dat.hash;
        uint32_t index;
 
        if (!hash)
                return NULL;
 
-       index = batadv_hash_dat(&ip, hash->size);
+       to_find.ip = ip;
+       to_find.vid = vid;
+
+       index = batadv_hash_dat(&to_find, hash->size);
        head = &hash->table[index];
 
        rcu_read_lock();
@@ -266,22 +267,24 @@ batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip)
  * @bat_priv: the bat priv with all the soft interface information
  * @ip: ipv4 to add/edit
  * @mac_addr: mac address to assign to the given ipv4
+ * @vid: VLAN identifier
  */
 static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
-                                uint8_t *mac_addr)
+                                uint8_t *mac_addr, unsigned short vid)
 {
        struct batadv_dat_entry *dat_entry;
        int hash_added;
 
-       dat_entry = batadv_dat_entry_hash_find(bat_priv, ip);
+       dat_entry = batadv_dat_entry_hash_find(bat_priv, ip, vid);
        /* if this entry is already known, just update it */
        if (dat_entry) {
                if (!batadv_compare_eth(dat_entry->mac_addr, mac_addr))
                        memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN);
                dat_entry->last_update = jiffies;
                batadv_dbg(BATADV_DBG_DAT, bat_priv,
-                          "Entry updated: %pI4 %pM\n", &dat_entry->ip,
-                          dat_entry->mac_addr);
+                          "Entry updated: %pI4 %pM (vid: %d)\n",
+                          &dat_entry->ip, dat_entry->mac_addr,
+                          BATADV_PRINT_VID(vid));
                goto out;
        }
 
@@ -290,12 +293,13 @@ static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
                goto out;
 
        dat_entry->ip = ip;
+       dat_entry->vid = vid;
        memcpy(dat_entry->mac_addr, mac_addr, ETH_ALEN);
        dat_entry->last_update = jiffies;
        atomic_set(&dat_entry->refcount, 2);
 
        hash_added = batadv_hash_add(bat_priv->dat.hash, batadv_compare_dat,
-                                    batadv_hash_dat, &dat_entry->ip,
+                                    batadv_hash_dat, dat_entry,
                                     &dat_entry->hash_entry);
 
        if (unlikely(hash_added != 0)) {
@@ -304,8 +308,8 @@ static void batadv_dat_entry_add(struct batadv_priv *bat_priv, __be32 ip,
                goto out;
        }
 
-       batadv_dbg(BATADV_DBG_DAT, bat_priv, "New entry added: %pI4 %pM\n",
-                  &dat_entry->ip, dat_entry->mac_addr);
+       batadv_dbg(BATADV_DBG_DAT, bat_priv, "New entry added: %pI4 %pM (vid: %d)\n",
+                  &dat_entry->ip, dat_entry->mac_addr, BATADV_PRINT_VID(vid));
 
 out:
        if (dat_entry)
@@ -419,6 +423,10 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
        bool ret = false;
        int j;
 
+       /* check if orig node candidate is running DAT */
+       if (!(candidate->capabilities & BATADV_ORIG_CAPA_HAS_DAT))
+               goto out;
+
        /* Check if this node has already been selected... */
        for (j = 0; j < select; j++)
                if (res[j].orig_node == candidate)
@@ -588,9 +596,9 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
                        goto free_orig;
 
                tmp_skb = pskb_copy(skb, GFP_ATOMIC);
-               if (!batadv_unicast_4addr_prepare_skb(bat_priv, tmp_skb,
-                                                     cand[i].orig_node,
-                                                     packet_subtype)) {
+               if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, tmp_skb,
+                                                          cand[i].orig_node,
+                                                          packet_subtype)) {
                        kfree_skb(tmp_skb);
                        goto free_neigh;
                }
@@ -625,6 +633,59 @@ out:
        return ret;
 }
 
+/**
+ * batadv_dat_tvlv_container_update - update the dat tvlv container after dat
+ *  setting change
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_dat_tvlv_container_update(struct batadv_priv *bat_priv)
+{
+       char dat_mode;
+
+       dat_mode = atomic_read(&bat_priv->distributed_arp_table);
+
+       switch (dat_mode) {
+       case 0:
+               batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_DAT, 1);
+               break;
+       case 1:
+               batadv_tvlv_container_register(bat_priv, BATADV_TVLV_DAT, 1,
+                                              NULL, 0);
+               break;
+       }
+}
+
+/**
+ * batadv_dat_status_update - update the dat tvlv container after dat
+ *  setting change
+ * @net_dev: the soft interface net device
+ */
+void batadv_dat_status_update(struct net_device *net_dev)
+{
+       struct batadv_priv *bat_priv = netdev_priv(net_dev);
+       batadv_dat_tvlv_container_update(bat_priv);
+}
+
+/**
+ * batadv_gw_tvlv_ogm_handler_v1 - process incoming dat tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the gateway data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+                                          struct batadv_orig_node *orig,
+                                          uint8_t flags,
+                                          void *tvlv_value,
+                                          uint16_t tvlv_value_len)
+{
+       if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
+               orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_DAT;
+       else
+               orig->capabilities |= BATADV_ORIG_CAPA_HAS_DAT;
+}
+
 /**
  * batadv_dat_hash_free - free the local DAT hash table
  * @bat_priv: the bat priv with all the soft interface information
@@ -657,6 +718,10 @@ int batadv_dat_init(struct batadv_priv *bat_priv)
 
        batadv_dat_start_timer(bat_priv);
 
+       batadv_tvlv_handler_register(bat_priv, batadv_dat_tvlv_ogm_handler_v1,
+                                    NULL, BATADV_TVLV_DAT, 1,
+                                    BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
+       batadv_dat_tvlv_container_update(bat_priv);
        return 0;
 }
 
@@ -666,6 +731,9 @@ int batadv_dat_init(struct batadv_priv *bat_priv)
  */
 void batadv_dat_free(struct batadv_priv *bat_priv)
 {
+       batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_DAT, 1);
+       batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_DAT, 1);
+
        cancel_delayed_work_sync(&bat_priv->dat.work);
 
        batadv_dat_hash_free(bat_priv);
@@ -693,8 +761,8 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
                goto out;
 
        seq_printf(seq, "Distributed ARP Table (%s):\n", net_dev->name);
-       seq_printf(seq, "          %-7s          %-13s %5s\n", "IPv4", "MAC",
-                  "last-seen");
+       seq_printf(seq, "          %-7s          %-9s %4s %11s\n", "IPv4",
+                  "MAC", "VID", "last-seen");
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -707,8 +775,9 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
                        last_seen_msecs = last_seen_msecs % 60000;
                        last_seen_secs = last_seen_msecs / 1000;
 
-                       seq_printf(seq, " * %15pI4 %14pM %6i:%02i\n",
+                       seq_printf(seq, " * %15pI4 %14pM %4i %6i:%02i\n",
                                   &dat_entry->ip, dat_entry->mac_addr,
+                                  BATADV_PRINT_VID(dat_entry->vid),
                                   last_seen_mins, last_seen_secs);
                }
                rcu_read_unlock();
@@ -794,6 +863,31 @@ out:
        return type;
 }
 
+/**
+ * batadv_dat_get_vid - extract the VLAN identifier from skb if any
+ * @skb: the buffer containing the packet to extract the VID from
+ * @hdr_size: the size of the batman-adv header encapsulating the packet
+ *
+ * If the packet embedded in the skb is vlan tagged this function returns the
+ * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned.
+ */
+static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size)
+{
+       unsigned short vid;
+
+       vid = batadv_get_vid(skb, *hdr_size);
+
+       /* ARP parsing functions jump forward of hdr_size + ETH_HLEN.
+        * If the header contained in the packet is a VLAN one (which is longer)
+        * hdr_size is updated so that the functions will still skip the
+        * correct amount of bytes.
+        */
+       if (vid & BATADV_VLAN_HAS_TAG)
+               *hdr_size += VLAN_HLEN;
+
+       return vid;
+}
+
 /**
  * batadv_dat_snoop_outgoing_arp_request - snoop the ARP request and try to
  * answer using DAT
@@ -813,26 +907,31 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
        bool ret = false;
        struct batadv_dat_entry *dat_entry = NULL;
        struct sk_buff *skb_new;
+       int hdr_size = 0;
+       unsigned short vid;
 
        if (!atomic_read(&bat_priv->distributed_arp_table))
                goto out;
 
-       type = batadv_arp_get_type(bat_priv, skb, 0);
+       vid = batadv_dat_get_vid(skb, &hdr_size);
+
+       type = batadv_arp_get_type(bat_priv, skb, hdr_size);
        /* If the node gets an ARP_REQUEST it has to send a DHT_GET unicast
         * message to the selected DHT candidates
         */
        if (type != ARPOP_REQUEST)
                goto out;
 
-       batadv_dbg_arp(bat_priv, skb, type, 0, "Parsing outgoing ARP REQUEST");
+       batadv_dbg_arp(bat_priv, skb, type, hdr_size,
+                      "Parsing outgoing ARP REQUEST");
 
-       ip_src = batadv_arp_ip_src(skb, 0);
-       hw_src = batadv_arp_hw_src(skb, 0);
-       ip_dst = batadv_arp_ip_dst(skb, 0);
+       ip_src = batadv_arp_ip_src(skb, hdr_size);
+       hw_src = batadv_arp_hw_src(skb, hdr_size);
+       ip_dst = batadv_arp_ip_dst(skb, hdr_size);
 
-       batadv_dat_entry_add(bat_priv, ip_src, hw_src);
+       batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
 
-       dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
+       dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
        if (dat_entry) {
                /* If the ARP request is destined for a local client the local
                 * client will answer itself. DAT would only generate a
@@ -842,7 +941,8 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                 * additional DAT answer may trigger kernel warnings about
                 * a packet coming from the wrong port.
                 */
-               if (batadv_is_my_client(bat_priv, dat_entry->mac_addr)) {
+               if (batadv_is_my_client(bat_priv, dat_entry->mac_addr,
+                                       BATADV_NO_FLAGS)) {
                        ret = true;
                        goto out;
                }
@@ -853,11 +953,15 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                if (!skb_new)
                        goto out;
 
+               if (vid & BATADV_VLAN_HAS_TAG)
+                       skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
+                                                 vid & VLAN_VID_MASK);
+
                skb_reset_mac_header(skb_new);
                skb_new->protocol = eth_type_trans(skb_new,
                                                   bat_priv->soft_iface);
                bat_priv->stats.rx_packets++;
-               bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
+               bat_priv->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size;
                bat_priv->soft_iface->last_rx = jiffies;
 
                netif_rx(skb_new);
@@ -892,11 +996,14 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
        struct sk_buff *skb_new;
        struct batadv_dat_entry *dat_entry = NULL;
        bool ret = false;
+       unsigned short vid;
        int err;
 
        if (!atomic_read(&bat_priv->distributed_arp_table))
                goto out;
 
+       vid = batadv_dat_get_vid(skb, &hdr_size);
+
        type = batadv_arp_get_type(bat_priv, skb, hdr_size);
        if (type != ARPOP_REQUEST)
                goto out;
@@ -908,9 +1015,9 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
        batadv_dbg_arp(bat_priv, skb, type, hdr_size,
                       "Parsing incoming ARP REQUEST");
 
-       batadv_dat_entry_add(bat_priv, ip_src, hw_src);
+       batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
 
-       dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
+       dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
        if (!dat_entry)
                goto out;
 
@@ -921,17 +1028,22 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
        if (!skb_new)
                goto out;
 
+       if (vid & BATADV_VLAN_HAS_TAG)
+               skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
+                                         vid & VLAN_VID_MASK);
+
        /* To preserve backwards compatibility, the node has choose the outgoing
         * format based on the incoming request packet type. The assumption is
         * that a node not using the 4addr packet format doesn't support it.
         */
        if (hdr_size == sizeof(struct batadv_unicast_4addr_packet))
-               err = batadv_unicast_4addr_send_skb(bat_priv, skb_new,
-                                                   BATADV_P_DAT_CACHE_REPLY);
+               err = batadv_send_skb_via_tt_4addr(bat_priv, skb_new,
+                                                  BATADV_P_DAT_CACHE_REPLY,
+                                                  vid);
        else
-               err = batadv_unicast_send_skb(bat_priv, skb_new);
+               err = batadv_send_skb_via_tt(bat_priv, skb_new, vid);
 
-       if (!err) {
+       if (err != NET_XMIT_DROP) {
                batadv_inc_counter(bat_priv, BATADV_CNT_DAT_CACHED_REPLY_TX);
                ret = true;
        }
@@ -954,23 +1066,28 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
        uint16_t type;
        __be32 ip_src, ip_dst;
        uint8_t *hw_src, *hw_dst;
+       int hdr_size = 0;
+       unsigned short vid;
 
        if (!atomic_read(&bat_priv->distributed_arp_table))
                return;
 
-       type = batadv_arp_get_type(bat_priv, skb, 0);
+       vid = batadv_dat_get_vid(skb, &hdr_size);
+
+       type = batadv_arp_get_type(bat_priv, skb, hdr_size);
        if (type != ARPOP_REPLY)
                return;
 
-       batadv_dbg_arp(bat_priv, skb, type, 0, "Parsing outgoing ARP REPLY");
+       batadv_dbg_arp(bat_priv, skb, type, hdr_size,
+                      "Parsing outgoing ARP REPLY");
 
-       hw_src = batadv_arp_hw_src(skb, 0);
-       ip_src = batadv_arp_ip_src(skb, 0);
-       hw_dst = batadv_arp_hw_dst(skb, 0);
-       ip_dst = batadv_arp_ip_dst(skb, 0);
+       hw_src = batadv_arp_hw_src(skb, hdr_size);
+       ip_src = batadv_arp_ip_src(skb, hdr_size);
+       hw_dst = batadv_arp_hw_dst(skb, hdr_size);
+       ip_dst = batadv_arp_ip_dst(skb, hdr_size);
 
-       batadv_dat_entry_add(bat_priv, ip_src, hw_src);
-       batadv_dat_entry_add(bat_priv, ip_dst, hw_dst);
+       batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
+       batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
 
        /* Send the ARP reply to the candidates for both the IP addresses that
         * the node obtained from the ARP reply
@@ -992,10 +1109,13 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
        __be32 ip_src, ip_dst;
        uint8_t *hw_src, *hw_dst;
        bool ret = false;
+       unsigned short vid;
 
        if (!atomic_read(&bat_priv->distributed_arp_table))
                goto out;
 
+       vid = batadv_dat_get_vid(skb, &hdr_size);
+
        type = batadv_arp_get_type(bat_priv, skb, hdr_size);
        if (type != ARPOP_REPLY)
                goto out;
@@ -1011,13 +1131,13 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
        /* Update our internal cache with both the IP addresses the node got
         * within the ARP reply
         */
-       batadv_dat_entry_add(bat_priv, ip_src, hw_src);
-       batadv_dat_entry_add(bat_priv, ip_dst, hw_dst);
+       batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
+       batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
 
        /* if this REPLY is directed to a client of mine, let's deliver the
         * packet to the interface
         */
-       ret = !batadv_is_my_client(bat_priv, hw_dst);
+       ret = !batadv_is_my_client(bat_priv, hw_dst, vid);
 out:
        if (ret)
                kfree_skb(skb);
@@ -1040,7 +1160,8 @@ bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
        __be32 ip_dst;
        struct batadv_dat_entry *dat_entry = NULL;
        bool ret = false;
-       const size_t bcast_len = sizeof(struct batadv_bcast_packet);
+       int hdr_size = sizeof(struct batadv_bcast_packet);
+       unsigned short vid;
 
        if (!atomic_read(&bat_priv->distributed_arp_table))
                goto out;
@@ -1051,12 +1172,14 @@ bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
        if (forw_packet->num_packets)
                goto out;
 
-       type = batadv_arp_get_type(bat_priv, forw_packet->skb, bcast_len);
+       vid = batadv_dat_get_vid(forw_packet->skb, &hdr_size);
+
+       type = batadv_arp_get_type(bat_priv, forw_packet->skb, hdr_size);
        if (type != ARPOP_REQUEST)
                goto out;
 
-       ip_dst = batadv_arp_ip_dst(forw_packet->skb, bcast_len);
-       dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
+       ip_dst = batadv_arp_ip_dst(forw_packet->skb, hdr_size);
+       dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst, vid);
        /* check if the node already got this entry */
        if (!dat_entry) {
                batadv_dbg(BATADV_DBG_DAT, bat_priv,
index 125c8c6fcfadfed4d8b388d3f7775877a5b6ff93..60d853beb8d8214c78d25f191aff77eafaabaefd 100644 (file)
@@ -29,6 +29,7 @@
 
 #define BATADV_DAT_ADDR_MAX ((batadv_dat_addr_t)~(batadv_dat_addr_t)0)
 
+void batadv_dat_status_update(struct net_device *net_dev);
 bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                                           struct sk_buff *skb);
 bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
@@ -98,6 +99,10 @@ static inline void batadv_dat_inc_counter(struct batadv_priv *bat_priv,
 
 #else
 
+static inline void batadv_dat_status_update(struct net_device *net_dev)
+{
+}
+
 static inline bool
 batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                                      struct sk_buff *skb)
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
new file mode 100644 (file)
index 0000000..271d321
--- /dev/null
@@ -0,0 +1,491 @@
+/* Copyright (C) 2013 B.A.T.M.A.N. contributors:
+ *
+ * Martin Hundebøll <martin@hundeboll.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include "main.h"
+#include "fragmentation.h"
+#include "send.h"
+#include "originator.h"
+#include "routing.h"
+#include "hard-interface.h"
+#include "soft-interface.h"
+
+
+/**
+ * batadv_frag_clear_chain - delete entries in the fragment buffer chain
+ * @head: head of chain with entries.
+ *
+ * Free fragments in the passed hlist. Should be called with appropriate lock.
+ */
+static void batadv_frag_clear_chain(struct hlist_head *head)
+{
+       struct batadv_frag_list_entry *entry;
+       struct hlist_node *node;
+
+       hlist_for_each_entry_safe(entry, node, head, list) {
+               hlist_del(&entry->list);
+               kfree_skb(entry->skb);
+               kfree(entry);
+       }
+}
+
+/**
+ * batadv_frag_purge_orig - free fragments associated to an orig
+ * @orig_node: originator to free fragments from
+ * @check_cb: optional function to tell if an entry should be purged
+ */
+void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
+                           bool (*check_cb)(struct batadv_frag_table_entry *))
+{
+       struct batadv_frag_table_entry *chain;
+       uint8_t i;
+
+       for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
+               chain = &orig_node->fragments[i];
+               spin_lock_bh(&orig_node->fragments[i].lock);
+
+               if (!check_cb || check_cb(chain)) {
+                       batadv_frag_clear_chain(&orig_node->fragments[i].head);
+                       orig_node->fragments[i].size = 0;
+               }
+
+               spin_unlock_bh(&orig_node->fragments[i].lock);
+       }
+}
+
+/**
+ * batadv_frag_size_limit - maximum possible size of packet to be fragmented
+ *
+ * Returns the maximum size of payload that can be fragmented.
+ */
+static int batadv_frag_size_limit(void)
+{
+       int limit = BATADV_FRAG_MAX_FRAG_SIZE;
+
+       limit -= sizeof(struct batadv_frag_packet);
+       limit *= BATADV_FRAG_MAX_FRAGMENTS;
+
+       return limit;
+}
+
+/**
+ * batadv_frag_init_chain - check and prepare fragment chain for new fragment
+ * @chain: chain in fragments table to init
+ * @seqno: sequence number of the received fragment
+ *
+ * Make chain ready for a fragment with sequence number "seqno". Delete existing
+ * entries if they have an "old" sequence number.
+ *
+ * Caller must hold chain->lock.
+ *
+ * Returns true if chain is empty and caller can just insert the new fragment
+ * without searching for the right position.
+ */
+static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
+                                  uint16_t seqno)
+{
+       if (chain->seqno == seqno)
+               return false;
+
+       if (!hlist_empty(&chain->head))
+               batadv_frag_clear_chain(&chain->head);
+
+       chain->size = 0;
+       chain->seqno = seqno;
+
+       return true;
+}
+
+/**
+ * batadv_frag_insert_packet - insert a fragment into a fragment chain
+ * @orig_node: originator that the fragment was received from
+ * @skb: skb to insert
+ * @chain_out: list head to attach complete chains of fragments to
+ *
+ * Insert a new fragment into the reverse ordered chain in the right table
+ * entry. The hash table entry is cleared if "old" fragments exist in it.
+ *
+ * Returns true if skb is buffered, false on error. If the chain has all the
+ * fragments needed to merge the packet, the chain is moved to the passed head
+ * to avoid locking the chain in the table.
+ */
+static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
+                                     struct sk_buff *skb,
+                                     struct hlist_head *chain_out)
+{
+       struct batadv_frag_table_entry *chain;
+       struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
+       struct batadv_frag_packet *frag_packet;
+       uint8_t bucket;
+       uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet);
+       bool ret = false;
+
+       /* Linearize packet to avoid linearizing 16 packets in a row when doing
+        * the later merge. Non-linear merge should be added to remove this
+        * linearization.
+        */
+       if (skb_linearize(skb) < 0)
+               goto err;
+
+       frag_packet = (struct batadv_frag_packet *)skb->data;
+       seqno = ntohs(frag_packet->seqno);
+       bucket = seqno % BATADV_FRAG_BUFFER_COUNT;
+
+       frag_entry_new = kmalloc(sizeof(*frag_entry_new), GFP_ATOMIC);
+       if (!frag_entry_new)
+               goto err;
+
+       frag_entry_new->skb = skb;
+       frag_entry_new->no = frag_packet->no;
+
+       /* Select entry in the "chain table" and delete any prior fragments
+        * with another sequence number. batadv_frag_init_chain() returns true,
+        * if the list is empty at return.
+        */
+       chain = &orig_node->fragments[bucket];
+       spin_lock_bh(&chain->lock);
+       if (batadv_frag_init_chain(chain, seqno)) {
+               hlist_add_head(&frag_entry_new->list, &chain->head);
+               chain->size = skb->len - hdr_size;
+               chain->timestamp = jiffies;
+               ret = true;
+               goto out;
+       }
+
+       /* Find the position for the new fragment. */
+       hlist_for_each_entry(frag_entry_curr, &chain->head, list) {
+               /* Drop packet if fragment already exists. */
+               if (frag_entry_curr->no == frag_entry_new->no)
+                       goto err_unlock;
+
+               /* Order fragments from highest to lowest. */
+               if (frag_entry_curr->no < frag_entry_new->no) {
+                       hlist_add_before(&frag_entry_new->list,
+                                        &frag_entry_curr->list);
+                       chain->size += skb->len - hdr_size;
+                       chain->timestamp = jiffies;
+                       ret = true;
+                       goto out;
+               }
+       }
+
+       /* Reached the end of the list, so insert after 'frag_entry_curr'. */
+       if (likely(frag_entry_curr)) {
+               hlist_add_after(&frag_entry_curr->list, &frag_entry_new->list);
+               chain->size += skb->len - hdr_size;
+               chain->timestamp = jiffies;
+               ret = true;
+       }
+
+out:
+       if (chain->size > batadv_frag_size_limit() ||
+           ntohs(frag_packet->total_size) > batadv_frag_size_limit()) {
+               /* Clear chain if total size of either the list or the packet
+                * exceeds the maximum size of one merged packet.
+                */
+               batadv_frag_clear_chain(&chain->head);
+               chain->size = 0;
+       } else if (ntohs(frag_packet->total_size) == chain->size) {
+               /* All fragments received. Hand over chain to caller. */
+               hlist_move_list(&chain->head, chain_out);
+               chain->size = 0;
+       }
+
+err_unlock:
+       spin_unlock_bh(&chain->lock);
+
+err:
+       if (!ret)
+               kfree(frag_entry_new);
+
+       return ret;
+}
+
+/**
+ * batadv_frag_merge_packets - merge a chain of fragments
+ * @chain: head of chain with fragments
+ * @skb: packet with total size of skb after merging
+ *
+ * Expand the first skb in the chain and copy the content of the remaining
+ * skb's into the expanded one. After doing so, clear the chain.
+ *
+ * Returns the merged skb or NULL on error.
+ */
+static struct sk_buff *
+batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
+{
+       struct batadv_frag_packet *packet;
+       struct batadv_frag_list_entry *entry;
+       struct sk_buff *skb_out = NULL;
+       int size, hdr_size = sizeof(struct batadv_frag_packet);
+
+       /* Make sure incoming skb has non-bogus data. */
+       packet = (struct batadv_frag_packet *)skb->data;
+       size = ntohs(packet->total_size);
+       if (size > batadv_frag_size_limit())
+               goto free;
+
+       /* Remove first entry, as this is the destination for the rest of the
+        * fragments.
+        */
+       entry = hlist_entry(chain->first, struct batadv_frag_list_entry, list);
+       hlist_del(&entry->list);
+       skb_out = entry->skb;
+       kfree(entry);
+
+       /* Make room for the rest of the fragments. */
+       if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) {
+               kfree_skb(skb_out);
+               skb_out = NULL;
+               goto free;
+       }
+
+       /* Move the existing MAC header to just before the payload. (Override
+        * the fragment header.)
+        */
+       skb_pull_rcsum(skb_out, hdr_size);
+       memmove(skb_out->data - ETH_HLEN, skb_mac_header(skb_out), ETH_HLEN);
+       skb_set_mac_header(skb_out, -ETH_HLEN);
+       skb_reset_network_header(skb_out);
+       skb_reset_transport_header(skb_out);
+
+       /* Copy the payload of the each fragment into the last skb */
+       hlist_for_each_entry(entry, chain, list) {
+               size = entry->skb->len - hdr_size;
+               memcpy(skb_put(skb_out, size), entry->skb->data + hdr_size,
+                      size);
+       }
+
+free:
+       /* Locking is not needed, because 'chain' is not part of any orig. */
+       batadv_frag_clear_chain(chain);
+       return skb_out;
+}
+
+/**
+ * batadv_frag_skb_buffer - buffer fragment for later merge
+ * @skb: skb to buffer
+ * @orig_node_src: originator that the skb is received from
+ *
+ * Add fragment to buffer and merge fragments if possible.
+ *
+ * There are three possible outcomes: 1) Packet is merged: Return true and
+ * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
+ * to NULL; 3) Error: Return false and leave skb as is.
+ */
+bool batadv_frag_skb_buffer(struct sk_buff **skb,
+                           struct batadv_orig_node *orig_node_src)
+{
+       struct sk_buff *skb_out = NULL;
+       struct hlist_head head = HLIST_HEAD_INIT;
+       bool ret = false;
+
+       /* Add packet to buffer and table entry if merge is possible. */
+       if (!batadv_frag_insert_packet(orig_node_src, *skb, &head))
+               goto out_err;
+
+       /* Leave if more fragments are needed to merge. */
+       if (hlist_empty(&head))
+               goto out;
+
+       skb_out = batadv_frag_merge_packets(&head, *skb);
+       if (!skb_out)
+               goto out_err;
+
+out:
+       *skb = skb_out;
+       ret = true;
+out_err:
+       return ret;
+}
+
+/**
+ * batadv_frag_skb_fwd - forward fragments that would exceed MTU when merged
+ * @skb: skb to forward
+ * @recv_if: interface that the skb is received on
+ * @orig_node_src: originator that the skb is received from
+ *
+ * Look up the next-hop of the fragments payload and check if the merged packet
+ * will exceed the MTU towards the next-hop. If so, the fragment is forwarded
+ * without merging it.
+ *
+ * Returns true if the fragment is consumed/forwarded, false otherwise.
+ */
+bool batadv_frag_skb_fwd(struct sk_buff *skb,
+                        struct batadv_hard_iface *recv_if,
+                        struct batadv_orig_node *orig_node_src)
+{
+       struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+       struct batadv_orig_node *orig_node_dst = NULL;
+       struct batadv_neigh_node *neigh_node = NULL;
+       struct batadv_frag_packet *packet;
+       uint16_t total_size;
+       bool ret = false;
+
+       packet = (struct batadv_frag_packet *)skb->data;
+       orig_node_dst = batadv_orig_hash_find(bat_priv, packet->dest);
+       if (!orig_node_dst)
+               goto out;
+
+       neigh_node = batadv_find_router(bat_priv, orig_node_dst, recv_if);
+       if (!neigh_node)
+               goto out;
+
+       /* Forward the fragment, if the merged packet would be too big to
+        * be assembled.
+        */
+       total_size = ntohs(packet->total_size);
+       if (total_size > neigh_node->if_incoming->net_dev->mtu) {
+               batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_FWD);
+               batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
+                                  skb->len + ETH_HLEN);
+
+               packet->header.ttl--;
+               batadv_send_skb_packet(skb, neigh_node->if_incoming,
+                                      neigh_node->addr);
+               ret = true;
+       }
+
+out:
+       if (orig_node_dst)
+               batadv_orig_node_free_ref(orig_node_dst);
+       if (neigh_node)
+               batadv_neigh_node_free_ref(neigh_node);
+       return ret;
+}
+
+/**
+ * batadv_frag_create - create a fragment from skb
+ * @skb: skb to create fragment from
+ * @frag_head: header to use in new fragment
+ * @mtu: size of new fragment
+ *
+ * Split the passed skb into two fragments: A new one with size matching the
+ * passed mtu and the old one with the rest. The new skb contains data from the
+ * tail of the old skb.
+ *
+ * Returns the new fragment, NULL on error.
+ */
+static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
+                                         struct batadv_frag_packet *frag_head,
+                                         unsigned int mtu)
+{
+       struct sk_buff *skb_fragment;
+       unsigned header_size = sizeof(*frag_head);
+       unsigned fragment_size = mtu - header_size;
+
+       skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN);
+       if (!skb_fragment)
+               goto err;
+
+       skb->priority = TC_PRIO_CONTROL;
+
+       /* Eat the last mtu-bytes of the skb */
+       skb_reserve(skb_fragment, header_size + ETH_HLEN);
+       skb_split(skb, skb_fragment, skb->len - fragment_size);
+
+       /* Add the header */
+       skb_push(skb_fragment, header_size);
+       memcpy(skb_fragment->data, frag_head, header_size);
+
+err:
+       return skb_fragment;
+}
+
+/**
+ * batadv_frag_send_packet - create up to 16 fragments from the passed skb
+ * @skb: skb to create fragments from
+ * @orig_node: final destination of the created fragments
+ * @neigh_node: next-hop of the created fragments
+ *
+ * Returns true on success, false otherwise.
+ */
+bool batadv_frag_send_packet(struct sk_buff *skb,
+                            struct batadv_orig_node *orig_node,
+                            struct batadv_neigh_node *neigh_node)
+{
+       struct batadv_priv *bat_priv;
+       struct batadv_hard_iface *primary_if;
+       struct batadv_frag_packet frag_header;
+       struct sk_buff *skb_fragment;
+       unsigned mtu = neigh_node->if_incoming->net_dev->mtu;
+       unsigned header_size = sizeof(frag_header);
+       unsigned max_fragment_size, max_packet_size;
+
+       /* To avoid merge and refragmentation at next-hops we never send
+        * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
+        */
+       mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
+       max_fragment_size = (mtu - header_size - ETH_HLEN);
+       max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
+
+       /* Don't even try to fragment, if we need more than 16 fragments */
+       if (skb->len > max_packet_size)
+               goto out_err;
+
+       bat_priv = orig_node->bat_priv;
+       primary_if = batadv_primary_if_get_selected(bat_priv);
+       if (!primary_if)
+               goto out_err;
+
+       /* Create one header to be copied to all fragments */
+       frag_header.header.packet_type = BATADV_UNICAST_FRAG;
+       frag_header.header.version = BATADV_COMPAT_VERSION;
+       frag_header.header.ttl = BATADV_TTL;
+       frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
+       frag_header.reserved = 0;
+       frag_header.no = 0;
+       frag_header.total_size = htons(skb->len);
+       memcpy(frag_header.orig, primary_if->net_dev->dev_addr, ETH_ALEN);
+       memcpy(frag_header.dest, orig_node->orig, ETH_ALEN);
+
+       /* Eat and send fragments from the tail of skb */
+       while (skb->len > max_fragment_size) {
+               skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
+               if (!skb_fragment)
+                       goto out_err;
+
+               batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
+               batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
+                                  skb_fragment->len + ETH_HLEN);
+               batadv_send_skb_packet(skb_fragment, neigh_node->if_incoming,
+                                      neigh_node->addr);
+               frag_header.no++;
+
+               /* The initial check in this function should cover this case */
+               if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1)
+                       goto out_err;
+       }
+
+       /* Make room for the fragment header. */
+       if (batadv_skb_head_push(skb, header_size) < 0 ||
+           pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0)
+               goto out_err;
+
+       memcpy(skb->data, &frag_header, header_size);
+
+       /* Send the last fragment */
+       batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
+       batadv_add_counter(bat_priv, BATADV_CNT_FRAG_TX_BYTES,
+                          skb->len + ETH_HLEN);
+       batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+
+       return true;
+out_err:
+       return false;
+}
diff --git a/net/batman-adv/fragmentation.h b/net/batman-adv/fragmentation.h
new file mode 100644 (file)
index 0000000..ca029e2
--- /dev/null
@@ -0,0 +1,50 @@
+/* Copyright (C) 2013 B.A.T.M.A.N. contributors:
+ *
+ * Martin Hundebøll <martin@hundeboll.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#ifndef _NET_BATMAN_ADV_FRAGMENTATION_H_
+#define _NET_BATMAN_ADV_FRAGMENTATION_H_
+
+void batadv_frag_purge_orig(struct batadv_orig_node *orig,
+                           bool (*check_cb)(struct batadv_frag_table_entry *));
+bool batadv_frag_skb_fwd(struct sk_buff *skb,
+                        struct batadv_hard_iface *recv_if,
+                        struct batadv_orig_node *orig_node_src);
+bool batadv_frag_skb_buffer(struct sk_buff **skb,
+                           struct batadv_orig_node *orig_node);
+bool batadv_frag_send_packet(struct sk_buff *skb,
+                            struct batadv_orig_node *orig_node,
+                            struct batadv_neigh_node *neigh_node);
+
+/**
+ * batadv_frag_check_entry - check if a list of fragments has timed out
+ * @frags_entry: table entry to check
+ *
+ * Returns true if the frags entry has timed out, false otherwise.
+ */
+static inline bool
+batadv_frag_check_entry(struct batadv_frag_table_entry *frags_entry)
+{
+       if (!hlist_empty(&frags_entry->head) &&
+           batadv_has_timed_out(frags_entry->timestamp, BATADV_FRAG_TIMEOUT))
+               return true;
+       else
+               return false;
+}
+
+#endif /* _NET_BATMAN_ADV_FRAGMENTATION_H_ */
index 1ce4b8763ef289f3679177c5a1fdc96df6e6126f..2449afaa7638358500c350af3bc39fb5ccc2d72e 100644 (file)
@@ -118,7 +118,6 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
        uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
        uint32_t gw_divisor;
        uint8_t max_tq = 0;
-       int down, up;
        uint8_t tq_avg;
        struct batadv_orig_node *orig_node;
 
@@ -138,14 +137,13 @@ batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
                if (!atomic_inc_not_zero(&gw_node->refcount))
                        goto next;
 
-               tq_avg = router->tq_avg;
+               tq_avg = router->bat_iv.tq_avg;
 
                switch (atomic_read(&bat_priv->gw_sel_class)) {
                case 1: /* fast connection */
-                       batadv_gw_bandwidth_to_kbit(orig_node->gw_flags,
-                                                   &down, &up);
-
-                       tmp_gw_factor = tq_avg * tq_avg * down * 100 * 100;
+                       tmp_gw_factor = tq_avg * tq_avg;
+                       tmp_gw_factor *= gw_node->bandwidth_down;
+                       tmp_gw_factor *= 100 * 100;
                        tmp_gw_factor /= gw_divisor;
 
                        if ((tmp_gw_factor > max_gw_factor) ||
@@ -223,11 +221,6 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
        struct batadv_neigh_node *router = NULL;
        char gw_addr[18] = { '\0' };
 
-       /* The batman daemon checks here if we already passed a full originator
-        * cycle in order to make sure we don't choose the first gateway we
-        * hear about. This check is based on the daemon's uptime which we
-        * don't have.
-        */
        if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
                goto out;
 
@@ -258,16 +251,22 @@ void batadv_gw_election(struct batadv_priv *bat_priv)
                                    NULL);
        } else if ((!curr_gw) && (next_gw)) {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                          "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
+                          "Adding route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n",
                           next_gw->orig_node->orig,
-                          next_gw->orig_node->gw_flags, router->tq_avg);
+                          next_gw->bandwidth_down / 10,
+                          next_gw->bandwidth_down % 10,
+                          next_gw->bandwidth_up / 10,
+                          next_gw->bandwidth_up % 10, router->bat_iv.tq_avg);
                batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_ADD,
                                    gw_addr);
        } else {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                          "Changing route to gateway %pM (gw_flags: %i, tq: %i)\n",
+                          "Changing route to gateway %pM (bandwidth: %u.%u/%u.%u MBit, tq: %i)\n",
                           next_gw->orig_node->orig,
-                          next_gw->orig_node->gw_flags, router->tq_avg);
+                          next_gw->bandwidth_down / 10,
+                          next_gw->bandwidth_down % 10,
+                          next_gw->bandwidth_up / 10,
+                          next_gw->bandwidth_up % 10, router->bat_iv.tq_avg);
                batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_CHANGE,
                                    gw_addr);
        }
@@ -306,8 +305,8 @@ void batadv_gw_check_election(struct batadv_priv *bat_priv,
        if (!router_orig)
                goto out;
 
-       gw_tq_avg = router_gw->tq_avg;
-       orig_tq_avg = router_orig->tq_avg;
+       gw_tq_avg = router_gw->bat_iv.tq_avg;
+       orig_tq_avg = router_orig->bat_iv.tq_avg;
 
        /* the TQ value has to be better */
        if (orig_tq_avg < gw_tq_avg)
@@ -337,12 +336,20 @@ out:
        return;
 }
 
+/**
+ * batadv_gw_node_add - add gateway node to list of available gateways
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator announcing gateway capabilities
+ * @gateway: announced bandwidth information
+ */
 static void batadv_gw_node_add(struct batadv_priv *bat_priv,
                               struct batadv_orig_node *orig_node,
-                              uint8_t new_gwflags)
+                              struct batadv_tvlv_gateway_data *gateway)
 {
        struct batadv_gw_node *gw_node;
-       int down, up;
+
+       if (gateway->bandwidth_down == 0)
+               return;
 
        gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
        if (!gw_node)
@@ -356,73 +363,116 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
        hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list);
        spin_unlock_bh(&bat_priv->gw.list_lock);
 
-       batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up);
        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                  "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n",
-                  orig_node->orig, new_gwflags,
-                  (down > 2048 ? down / 1024 : down),
-                  (down > 2048 ? "MBit" : "KBit"),
-                  (up > 2048 ? up / 1024 : up),
-                  (up > 2048 ? "MBit" : "KBit"));
+                  "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
+                  orig_node->orig,
+                  ntohl(gateway->bandwidth_down) / 10,
+                  ntohl(gateway->bandwidth_down) % 10,
+                  ntohl(gateway->bandwidth_up) / 10,
+                  ntohl(gateway->bandwidth_up) % 10);
 }
 
-void batadv_gw_node_update(struct batadv_priv *bat_priv,
-                          struct batadv_orig_node *orig_node,
-                          uint8_t new_gwflags)
+/**
+ * batadv_gw_node_get - retrieve gateway node from list of available gateways
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator announcing gateway capabilities
+ *
+ * Returns gateway node if found or NULL otherwise.
+ */
+static struct batadv_gw_node *
+batadv_gw_node_get(struct batadv_priv *bat_priv,
+                  struct batadv_orig_node *orig_node)
 {
-       struct batadv_gw_node *gw_node, *curr_gw;
-
-       /* Note: We don't need a NULL check here, since curr_gw never gets
-        * dereferenced. If curr_gw is NULL we also should not exit as we may
-        * have this gateway in our list (duplication check!) even though we
-        * have no currently selected gateway.
-        */
-       curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+       struct batadv_gw_node *gw_node_tmp, *gw_node = NULL;
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.list, list) {
-               if (gw_node->orig_node != orig_node)
+       hlist_for_each_entry_rcu(gw_node_tmp, &bat_priv->gw.list, list) {
+               if (gw_node_tmp->orig_node != orig_node)
                        continue;
 
-               batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                          "Gateway class of originator %pM changed from %i to %i\n",
-                          orig_node->orig, gw_node->orig_node->gw_flags,
-                          new_gwflags);
+               if (gw_node_tmp->deleted)
+                       continue;
 
-               gw_node->deleted = 0;
+               if (!atomic_inc_not_zero(&gw_node_tmp->refcount))
+                       continue;
 
-               if (new_gwflags == BATADV_NO_FLAGS) {
-                       gw_node->deleted = jiffies;
-                       batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                                  "Gateway %pM removed from gateway list\n",
-                                  orig_node->orig);
+               gw_node = gw_node_tmp;
+               break;
+       }
+       rcu_read_unlock();
 
-                       if (gw_node == curr_gw)
-                               goto deselect;
-               }
+       return gw_node;
+}
 
-               goto unlock;
+/**
+ * batadv_gw_node_update - update list of available gateways with changed
+ *  bandwidth information
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator announcing gateway capabilities
+ * @gateway: announced bandwidth information
+ */
+void batadv_gw_node_update(struct batadv_priv *bat_priv,
+                          struct batadv_orig_node *orig_node,
+                          struct batadv_tvlv_gateway_data *gateway)
+{
+       struct batadv_gw_node *gw_node, *curr_gw = NULL;
+
+       gw_node = batadv_gw_node_get(bat_priv, orig_node);
+       if (!gw_node) {
+               batadv_gw_node_add(bat_priv, orig_node, gateway);
+               goto out;
        }
 
-       if (new_gwflags == BATADV_NO_FLAGS)
-               goto unlock;
+       if ((gw_node->bandwidth_down == ntohl(gateway->bandwidth_down)) &&
+           (gw_node->bandwidth_up == ntohl(gateway->bandwidth_up)))
+               goto out;
 
-       batadv_gw_node_add(bat_priv, orig_node, new_gwflags);
-       goto unlock;
+       batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+                  "Gateway bandwidth of originator %pM changed from %u.%u/%u.%u MBit to %u.%u/%u.%u MBit\n",
+                  orig_node->orig,
+                  gw_node->bandwidth_down / 10,
+                  gw_node->bandwidth_down % 10,
+                  gw_node->bandwidth_up / 10,
+                  gw_node->bandwidth_up % 10,
+                  ntohl(gateway->bandwidth_down) / 10,
+                  ntohl(gateway->bandwidth_down) % 10,
+                  ntohl(gateway->bandwidth_up) / 10,
+                  ntohl(gateway->bandwidth_up) % 10);
+
+       gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
+       gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
+
+       gw_node->deleted = 0;
+       if (ntohl(gateway->bandwidth_down) == 0) {
+               gw_node->deleted = jiffies;
+               batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+                          "Gateway %pM removed from gateway list\n",
+                          orig_node->orig);
 
-deselect:
-       batadv_gw_deselect(bat_priv);
-unlock:
-       rcu_read_unlock();
+               /* Note: We don't need a NULL check here, since curr_gw never
+                * gets dereferenced.
+                */
+               curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+               if (gw_node == curr_gw)
+                       batadv_gw_deselect(bat_priv);
+       }
 
+out:
        if (curr_gw)
                batadv_gw_node_free_ref(curr_gw);
+       if (gw_node)
+               batadv_gw_node_free_ref(gw_node);
 }
 
 void batadv_gw_node_delete(struct batadv_priv *bat_priv,
                           struct batadv_orig_node *orig_node)
 {
-       batadv_gw_node_update(bat_priv, orig_node, 0);
+       struct batadv_tvlv_gateway_data gateway;
+
+       gateway.bandwidth_down = 0;
+       gateway.bandwidth_up = 0;
+
+       batadv_gw_node_update(bat_priv, orig_node, &gateway);
 }
 
 void batadv_gw_node_purge(struct batadv_priv *bat_priv)
@@ -467,9 +517,7 @@ static int batadv_write_buffer_text(struct batadv_priv *bat_priv,
 {
        struct batadv_gw_node *curr_gw;
        struct batadv_neigh_node *router;
-       int down, up, ret = -1;
-
-       batadv_gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
+       int ret = -1;
 
        router = batadv_orig_node_get_router(gw_node->orig_node);
        if (!router)
@@ -477,16 +525,15 @@ static int batadv_write_buffer_text(struct batadv_priv *bat_priv,
 
        curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
 
-       ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
+       ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %u.%u/%u.%u MBit\n",
                         (curr_gw == gw_node ? "=>" : "  "),
                         gw_node->orig_node->orig,
-                        router->tq_avg, router->addr,
+                        router->bat_iv.tq_avg, router->addr,
                         router->if_incoming->net_dev->name,
-                        gw_node->orig_node->gw_flags,
-                        (down > 2048 ? down / 1024 : down),
-                        (down > 2048 ? "MBit" : "KBit"),
-                        (up > 2048 ? up / 1024 : up),
-                        (up > 2048 ? "MBit" : "KBit"));
+                        gw_node->bandwidth_down / 10,
+                        gw_node->bandwidth_down % 10,
+                        gw_node->bandwidth_up / 10,
+                        gw_node->bandwidth_up % 10);
 
        batadv_neigh_node_free_ref(router);
        if (curr_gw)
@@ -508,7 +555,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
                goto out;
 
        seq_printf(seq,
-                  "      %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
+                  "      %-12s (%s/%i) %17s [%10s]: advertised uplink bandwidth ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
                   "Gateway", "#", BATADV_TQ_MAX_VALUE, "Nexthop", "outgoingIF",
                   BATADV_SOURCE_VERSION, primary_if->net_dev->name,
                   primary_if->net_dev->dev_addr, net_dev->name);
@@ -603,24 +650,29 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
        struct iphdr *iphdr;
        struct ipv6hdr *ipv6hdr;
        struct udphdr *udphdr;
+       struct vlan_ethhdr *vhdr;
+       __be16 proto;
 
        /* check for ethernet header */
        if (!pskb_may_pull(skb, *header_len + ETH_HLEN))
                return false;
        ethhdr = (struct ethhdr *)skb->data;
+       proto = ethhdr->h_proto;
        *header_len += ETH_HLEN;
 
        /* check for initial vlan header */
-       if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
+       if (proto == htons(ETH_P_8021Q)) {
                if (!pskb_may_pull(skb, *header_len + VLAN_HLEN))
                        return false;
-               ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
+
+               vhdr = (struct vlan_ethhdr *)skb->data;
+               proto = vhdr->h_vlan_encapsulated_proto;
                *header_len += VLAN_HLEN;
        }
 
        /* check for ip header */
-       switch (ntohs(ethhdr->h_proto)) {
-       case ETH_P_IP:
+       switch (proto) {
+       case htons(ETH_P_IP):
                if (!pskb_may_pull(skb, *header_len + sizeof(*iphdr)))
                        return false;
                iphdr = (struct iphdr *)(skb->data + *header_len);
@@ -631,7 +683,7 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
                        return false;
 
                break;
-       case ETH_P_IPV6:
+       case htons(ETH_P_IPV6):
                if (!pskb_may_pull(skb, *header_len + sizeof(*ipv6hdr)))
                        return false;
                ipv6hdr = (struct ipv6hdr *)(skb->data + *header_len);
@@ -658,28 +710,44 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
        *header_len += sizeof(*udphdr);
 
        /* check for bootp port */
-       if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
-           (ntohs(udphdr->dest) != 67))
+       if ((proto == htons(ETH_P_IP)) &&
+           (udphdr->dest != htons(67)))
                return false;
 
-       if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) &&
-           (ntohs(udphdr->dest) != 547))
+       if ((proto == htons(ETH_P_IPV6)) &&
+           (udphdr->dest != htons(547)))
                return false;
 
        return true;
 }
 
-/* this call might reallocate skb data */
+/**
+ * batadv_gw_out_of_range - check if the dhcp request destination is the best gw
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the outgoing packet
+ *
+ * Check if the skb is a DHCP request and if it is sent to the current best GW
+ * server. Due to topology changes it may be the case that the GW server
+ * previously selected is not the best one anymore.
+ *
+ * Returns true if the packet destination is unicast and it is not the best gw,
+ * false otherwise.
+ *
+ * This call might reallocate skb data.
+ */
 bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
                            struct sk_buff *skb)
 {
        struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
        struct batadv_orig_node *orig_dst_node = NULL;
-       struct batadv_gw_node *curr_gw = NULL;
+       struct batadv_gw_node *gw_node = NULL, *curr_gw = NULL;
        struct ethhdr *ethhdr;
        bool ret, out_of_range = false;
        unsigned int header_len = 0;
        uint8_t curr_tq_avg;
+       unsigned short vid;
+
+       vid = batadv_get_vid(skb, 0);
 
        ret = batadv_gw_is_dhcp_target(skb, &header_len);
        if (!ret)
@@ -687,11 +755,12 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
 
        ethhdr = (struct ethhdr *)skb->data;
        orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
-                                                ethhdr->h_dest);
+                                                ethhdr->h_dest, vid);
        if (!orig_dst_node)
                goto out;
 
-       if (!orig_dst_node->gw_flags)
+       gw_node = batadv_gw_node_get(bat_priv, orig_dst_node);
+       if (!gw_node->bandwidth_down == 0)
                goto out;
 
        ret = batadv_is_type_dhcprequest(skb, header_len);
@@ -723,7 +792,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
                if (!neigh_curr)
                        goto out;
 
-               curr_tq_avg = neigh_curr->tq_avg;
+               curr_tq_avg = neigh_curr->bat_iv.tq_avg;
                break;
        case BATADV_GW_MODE_OFF:
        default:
@@ -734,7 +803,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
        if (!neigh_old)
                goto out;
 
-       if (curr_tq_avg - neigh_old->tq_avg > BATADV_GW_THRESHOLD)
+       if (curr_tq_avg - neigh_old->bat_iv.tq_avg > BATADV_GW_THRESHOLD)
                out_of_range = true;
 
 out:
@@ -742,6 +811,8 @@ out:
                batadv_orig_node_free_ref(orig_dst_node);
        if (curr_gw)
                batadv_gw_node_free_ref(curr_gw);
+       if (gw_node)
+               batadv_gw_node_free_ref(gw_node);
        if (neigh_old)
                batadv_neigh_node_free_ref(neigh_old);
        if (neigh_curr)
index ceef4ebe8bcd6a89711000c0d52b20e536c5375a..d95c2d23195ee962496667d449a25d7b58b2741c 100644 (file)
@@ -29,7 +29,7 @@ void batadv_gw_check_election(struct batadv_priv *bat_priv,
                              struct batadv_orig_node *orig_node);
 void batadv_gw_node_update(struct batadv_priv *bat_priv,
                           struct batadv_orig_node *orig_node,
-                          uint8_t new_gwflags);
+                          struct batadv_tvlv_gateway_data *gateway);
 void batadv_gw_node_delete(struct batadv_priv *bat_priv,
                           struct batadv_orig_node *orig_node);
 void batadv_gw_node_purge(struct batadv_priv *bat_priv);
index 84bb2b18d7110a4597dc71c7b2f68c032d7750a9..b211b0f9cb788efa2b476616dd534675fb746b66 100644 (file)
 #include "gateway_common.h"
 #include "gateway_client.h"
 
-/* calculates the gateway class from kbit */
-static void batadv_kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class)
-{
-       int mdown = 0, tdown, tup, difference;
-       uint8_t sbit, part;
-
-       *gw_srv_class = 0;
-       difference = 0x0FFFFFFF;
-
-       /* test all downspeeds */
-       for (sbit = 0; sbit < 2; sbit++) {
-               for (part = 0; part < 16; part++) {
-                       tdown = 32 * (sbit + 2) * (1 << part);
-
-                       if (abs(tdown - down) < difference) {
-                               *gw_srv_class = (sbit << 7) + (part << 3);
-                               difference = abs(tdown - down);
-                               mdown = tdown;
-                       }
-               }
-       }
-
-       /* test all upspeeds */
-       difference = 0x0FFFFFFF;
-
-       for (part = 0; part < 8; part++) {
-               tup = ((part + 1) * (mdown)) / 8;
-
-               if (abs(tup - up) < difference) {
-                       *gw_srv_class = (*gw_srv_class & 0xF8) | part;
-                       difference = abs(tup - up);
-               }
-       }
-}
-
-/* returns the up and downspeeds in kbit, calculated from the class */
-void batadv_gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up)
-{
-       int sbit = (gw_srv_class & 0x80) >> 7;
-       int dpart = (gw_srv_class & 0x78) >> 3;
-       int upart = (gw_srv_class & 0x07);
-
-       if (!gw_srv_class) {
-               *down = 0;
-               *up = 0;
-               return;
-       }
-
-       *down = 32 * (sbit + 2) * (1 << dpart);
-       *up = ((upart + 1) * (*down)) / 8;
-}
-
+/**
+ * batadv_parse_gw_bandwidth - parse supplied string buffer to extract download
+ *  and upload bandwidth information
+ * @net_dev: the soft interface net device
+ * @buff: string buffer to parse
+ * @down: pointer holding the returned download bandwidth information
+ * @up: pointer holding the returned upload bandwidth information
+ *
+ * Returns false on parse error and true otherwise.
+ */
 static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
-                                     int *up, int *down)
+                                     uint32_t *down, uint32_t *up)
 {
-       int ret, multi = 1;
+       enum batadv_bandwidth_units bw_unit_type = BATADV_BW_UNIT_KBIT;
        char *slash_ptr, *tmp_ptr;
        long ldown, lup;
+       int ret;
 
        slash_ptr = strchr(buff, '/');
        if (slash_ptr)
@@ -88,10 +47,10 @@ static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
                tmp_ptr = buff + strlen(buff) - 4;
 
                if (strnicmp(tmp_ptr, "mbit", 4) == 0)
-                       multi = 1024;
+                       bw_unit_type = BATADV_BW_UNIT_MBIT;
 
                if ((strnicmp(tmp_ptr, "kbit", 4) == 0) ||
-                   (multi > 1))
+                   (bw_unit_type == BATADV_BW_UNIT_MBIT))
                        *tmp_ptr = '\0';
        }
 
@@ -103,20 +62,28 @@ static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
                return false;
        }
 
-       *down = ldown * multi;
+       switch (bw_unit_type) {
+       case BATADV_BW_UNIT_MBIT:
+               *down = ldown * 10;
+               break;
+       case BATADV_BW_UNIT_KBIT:
+       default:
+               *down = ldown / 100;
+               break;
+       }
 
        /* we also got some upload info */
        if (slash_ptr) {
-               multi = 1;
+               bw_unit_type = BATADV_BW_UNIT_KBIT;
 
                if (strlen(slash_ptr + 1) > 4) {
                        tmp_ptr = slash_ptr + 1 - 4 + strlen(slash_ptr + 1);
 
                        if (strnicmp(tmp_ptr, "mbit", 4) == 0)
-                               multi = 1024;
+                               bw_unit_type = BATADV_BW_UNIT_MBIT;
 
                        if ((strnicmp(tmp_ptr, "kbit", 4) == 0) ||
-                           (multi > 1))
+                           (bw_unit_type == BATADV_BW_UNIT_MBIT))
                                *tmp_ptr = '\0';
                }
 
@@ -128,52 +95,149 @@ static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
                        return false;
                }
 
-               *up = lup * multi;
+               switch (bw_unit_type) {
+               case BATADV_BW_UNIT_MBIT:
+                       *up = lup * 10;
+                       break;
+               case BATADV_BW_UNIT_KBIT:
+               default:
+                       *up = lup / 100;
+                       break;
+               }
        }
 
        return true;
 }
 
+/**
+ * batadv_gw_tvlv_container_update - update the gw tvlv container after gateway
+ *  setting change
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv)
+{
+       struct batadv_tvlv_gateway_data gw;
+       uint32_t down, up;
+       char gw_mode;
+
+       gw_mode = atomic_read(&bat_priv->gw_mode);
+
+       switch (gw_mode) {
+       case BATADV_GW_MODE_OFF:
+       case BATADV_GW_MODE_CLIENT:
+               batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_GW, 1);
+               break;
+       case BATADV_GW_MODE_SERVER:
+               down = atomic_read(&bat_priv->gw.bandwidth_down);
+               up = atomic_read(&bat_priv->gw.bandwidth_up);
+               gw.bandwidth_down = htonl(down);
+               gw.bandwidth_up = htonl(up);
+               batadv_tvlv_container_register(bat_priv, BATADV_TVLV_GW, 1,
+                                              &gw, sizeof(gw));
+               break;
+       }
+}
+
 ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
                                size_t count)
 {
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
-       long gw_bandwidth_tmp = 0;
-       int up = 0, down = 0;
+       uint32_t down_curr, up_curr, down_new = 0, up_new = 0;
        bool ret;
 
-       ret = batadv_parse_gw_bandwidth(net_dev, buff, &up, &down);
+       down_curr = (unsigned int)atomic_read(&bat_priv->gw.bandwidth_down);
+       up_curr = (unsigned int)atomic_read(&bat_priv->gw.bandwidth_up);
+
+       ret = batadv_parse_gw_bandwidth(net_dev, buff, &down_new, &up_new);
        if (!ret)
                goto end;
 
-       if ((!down) || (down < 256))
-               down = 2000;
-
-       if (!up)
-               up = down / 5;
+       if (!down_new)
+               down_new = 1;
 
-       batadv_kbit_to_gw_bandwidth(down, up, &gw_bandwidth_tmp);
+       if (!up_new)
+               up_new = down_new / 5;
 
-       /* the gw bandwidth we guessed above might not match the given
-        * speeds, hence we need to calculate it back to show the number
-        * that is going to be propagated
-        */
-       batadv_gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up);
+       if (!up_new)
+               up_new = 1;
 
-       if (atomic_read(&bat_priv->gw_bandwidth) == gw_bandwidth_tmp)
+       if ((down_curr == down_new) && (up_curr == up_new))
                return count;
 
        batadv_gw_deselect(bat_priv);
        batadv_info(net_dev,
-                   "Changing gateway bandwidth from: '%i' to: '%ld' (propagating: %d%s/%d%s)\n",
-                   atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp,
-                   (down > 2048 ? down / 1024 : down),
-                   (down > 2048 ? "MBit" : "KBit"),
-                   (up > 2048 ? up / 1024 : up),
-                   (up > 2048 ? "MBit" : "KBit"));
+                   "Changing gateway bandwidth from: '%u.%u/%u.%u MBit' to: '%u.%u/%u.%u MBit'\n",
+                   down_curr / 10, down_curr % 10, up_curr / 10, up_curr % 10,
+                   down_new / 10, down_new % 10, up_new / 10, up_new % 10);
 
-       atomic_set(&bat_priv->gw_bandwidth, gw_bandwidth_tmp);
+       atomic_set(&bat_priv->gw.bandwidth_down, down_new);
+       atomic_set(&bat_priv->gw.bandwidth_up, up_new);
+       batadv_gw_tvlv_container_update(bat_priv);
 
 end:
        return count;
 }
+
+/**
+ * batadv_gw_tvlv_ogm_handler_v1 - process incoming gateway tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the gateway data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+                                         struct batadv_orig_node *orig,
+                                         uint8_t flags,
+                                         void *tvlv_value,
+                                         uint16_t tvlv_value_len)
+{
+       struct batadv_tvlv_gateway_data gateway, *gateway_ptr;
+
+       /* only fetch the tvlv value if the handler wasn't called via the
+        * CIFNOTFND flag and if there is data to fetch
+        */
+       if ((flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) ||
+           (tvlv_value_len < sizeof(gateway))) {
+               gateway.bandwidth_down = 0;
+               gateway.bandwidth_up = 0;
+       } else {
+               gateway_ptr = tvlv_value;
+               gateway.bandwidth_down = gateway_ptr->bandwidth_down;
+               gateway.bandwidth_up = gateway_ptr->bandwidth_up;
+               if ((gateway.bandwidth_down == 0) ||
+                   (gateway.bandwidth_up == 0)) {
+                       gateway.bandwidth_down = 0;
+                       gateway.bandwidth_up = 0;
+               }
+       }
+
+       batadv_gw_node_update(bat_priv, orig, &gateway);
+
+       /* restart gateway selection if fast or late switching was enabled */
+       if ((gateway.bandwidth_down != 0) &&
+           (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_CLIENT) &&
+           (atomic_read(&bat_priv->gw_sel_class) > 2))
+               batadv_gw_check_election(bat_priv, orig);
+}
+
+/**
+ * batadv_gw_init - initialise the gateway handling internals
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_gw_init(struct batadv_priv *bat_priv)
+{
+       batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1,
+                                    NULL, BATADV_TVLV_GW, 1,
+                                    BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
+}
+
+/**
+ * batadv_gw_free - free the gateway handling internals
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_gw_free(struct batadv_priv *bat_priv)
+{
+       batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_GW, 1);
+       batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_GW, 1);
+}
index 509b2bf8c2f4fa6388e095c998dd0006ec3d9b6e..56384a4cd18c98a2ac9010ea14bb89acf3a2d06d 100644 (file)
@@ -26,12 +26,24 @@ enum batadv_gw_modes {
        BATADV_GW_MODE_SERVER,
 };
 
+/**
+ * enum batadv_bandwidth_units - bandwidth unit types
+ * @BATADV_BW_UNIT_KBIT: unit type kbit
+ * @BATADV_BW_UNIT_MBIT: unit type mbit
+ */
+enum batadv_bandwidth_units {
+       BATADV_BW_UNIT_KBIT,
+       BATADV_BW_UNIT_MBIT,
+};
+
 #define BATADV_GW_MODE_OFF_NAME        "off"
 #define BATADV_GW_MODE_CLIENT_NAME     "client"
 #define BATADV_GW_MODE_SERVER_NAME     "server"
 
-void batadv_gw_bandwidth_to_kbit(uint8_t gw_class, int *down, int *up);
 ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
                                size_t count);
+void batadv_gw_tvlv_container_update(struct batadv_priv *bat_priv);
+void batadv_gw_init(struct batadv_priv *bat_priv);
+void batadv_gw_free(struct batadv_priv *bat_priv);
 
 #endif /* _NET_BATMAN_ADV_GATEWAY_COMMON_H_ */
index c478e6bcf89b8bd570a54a9a811b4309c7a2e5f0..57c2a19dcb5c8e19c410b27528f22eb671ed528a 100644 (file)
@@ -28,6 +28,7 @@
 #include "originator.h"
 #include "hash.h"
 #include "bridge_loop_avoidance.h"
+#include "gateway_client.h"
 
 #include <linux/if_arp.h>
 #include <linux/if_ether.h>
@@ -124,8 +125,11 @@ static int batadv_is_valid_iface(const struct net_device *net_dev)
  *
  * Returns true if the net device is a 802.11 wireless device, false otherwise.
  */
-static bool batadv_is_wifi_netdev(struct net_device *net_device)
+bool batadv_is_wifi_netdev(struct net_device *net_device)
 {
+       if (!net_device)
+               return false;
+
 #ifdef CONFIG_WIRELESS_EXT
        /* pre-cfg80211 drivers have to implement WEXT, so it is possible to
         * check for wireless_handlers != NULL
@@ -141,34 +145,6 @@ static bool batadv_is_wifi_netdev(struct net_device *net_device)
        return false;
 }
 
-/**
- * batadv_is_wifi_iface - check if the given interface represented by ifindex
- *  is a wifi interface
- * @ifindex: interface index to check
- *
- * Returns true if the interface represented by ifindex is a 802.11 wireless
- * device, false otherwise.
- */
-bool batadv_is_wifi_iface(int ifindex)
-{
-       struct net_device *net_device = NULL;
-       bool ret = false;
-
-       if (ifindex == BATADV_NULL_IFINDEX)
-               goto out;
-
-       net_device = dev_get_by_index(&init_net, ifindex);
-       if (!net_device)
-               goto out;
-
-       ret = batadv_is_wifi_netdev(net_device);
-
-out:
-       if (net_device)
-               dev_put(net_device);
-       return ret;
-}
-
 static struct batadv_hard_iface *
 batadv_hardif_get_active(const struct net_device *soft_iface)
 {
@@ -194,22 +170,13 @@ out:
 static void batadv_primary_if_update_addr(struct batadv_priv *bat_priv,
                                          struct batadv_hard_iface *oldif)
 {
-       struct batadv_vis_packet *vis_packet;
        struct batadv_hard_iface *primary_if;
-       struct sk_buff *skb;
 
        primary_if = batadv_primary_if_get_selected(bat_priv);
        if (!primary_if)
                goto out;
 
        batadv_dat_init_own_addr(bat_priv, primary_if);
-
-       skb = bat_priv->vis.my_info->skb_packet;
-       vis_packet = (struct batadv_vis_packet *)skb->data;
-       memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
-       memcpy(vis_packet->sender_orig,
-              primary_if->net_dev->dev_addr, ETH_ALEN);
-
        batadv_bla_update_orig_address(bat_priv, primary_if, oldif);
 out:
        if (primary_if)
@@ -275,16 +242,10 @@ static void batadv_check_known_mac_addr(const struct net_device *net_dev)
 
 int batadv_hardif_min_mtu(struct net_device *soft_iface)
 {
-       const struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+       struct batadv_priv *bat_priv = netdev_priv(soft_iface);
        const struct batadv_hard_iface *hard_iface;
-       /* allow big frames if all devices are capable to do so
-        * (have MTU > 1500 + BAT_HEADER_LEN)
-        */
        int min_mtu = ETH_DATA_LEN;
 
-       if (atomic_read(&bat_priv->fragmentation))
-               goto out;
-
        rcu_read_lock();
        list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
                if ((hard_iface->if_status != BATADV_IF_ACTIVE) &&
@@ -294,23 +255,40 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
                if (hard_iface->soft_iface != soft_iface)
                        continue;
 
-               min_mtu = min_t(int,
-                               hard_iface->net_dev->mtu - BATADV_HEADER_LEN,
-                               min_mtu);
+               min_mtu = min_t(int, hard_iface->net_dev->mtu, min_mtu);
        }
        rcu_read_unlock();
+
+       atomic_set(&bat_priv->packet_size_max, min_mtu);
+
+       if (atomic_read(&bat_priv->fragmentation) == 0)
+               goto out;
+
+       /* with fragmentation enabled the maximum size of internally generated
+        * packets such as translation table exchanges or tvlv containers, etc
+        * has to be calculated
+        */
+       min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE);
+       min_mtu -= sizeof(struct batadv_frag_packet);
+       min_mtu *= BATADV_FRAG_MAX_FRAGMENTS;
+       atomic_set(&bat_priv->packet_size_max, min_mtu);
+
+       /* with fragmentation enabled we can fragment external packets easily */
+       min_mtu = min_t(int, min_mtu, ETH_DATA_LEN);
+
 out:
-       return min_mtu;
+       return min_mtu - batadv_max_header_len();
 }
 
 /* adjusts the MTU if a new interface with a smaller MTU appeared. */
 void batadv_update_min_mtu(struct net_device *soft_iface)
 {
-       int min_mtu;
+       soft_iface->mtu = batadv_hardif_min_mtu(soft_iface);
 
-       min_mtu = batadv_hardif_min_mtu(soft_iface);
-       if (soft_iface->mtu != min_mtu)
-               soft_iface->mtu = min_mtu;
+       /* Check if the local translate table should be cleaned up to match a
+        * new (and smaller) MTU.
+        */
+       batadv_tt_local_resize_to_mtu(soft_iface);
 }
 
 static void
@@ -388,7 +366,8 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
 {
        struct batadv_priv *bat_priv;
        struct net_device *soft_iface, *master;
-       __be16 ethertype = __constant_htons(ETH_P_BATMAN);
+       __be16 ethertype = htons(ETH_P_BATMAN);
+       int max_header_len = batadv_max_header_len();
        int ret;
 
        if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
@@ -453,23 +432,22 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
        hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
        dev_add_pack(&hard_iface->batman_adv_ptype);
 
-       atomic_set(&hard_iface->frag_seqno, 1);
        batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
                    hard_iface->net_dev->name);
 
        if (atomic_read(&bat_priv->fragmentation) &&
-           hard_iface->net_dev->mtu < ETH_DATA_LEN + BATADV_HEADER_LEN)
+           hard_iface->net_dev->mtu < ETH_DATA_LEN + max_header_len)
                batadv_info(hard_iface->soft_iface,
-                           "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n",
+                           "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %i would solve the problem.\n",
                            hard_iface->net_dev->name, hard_iface->net_dev->mtu,
-                           ETH_DATA_LEN + BATADV_HEADER_LEN);
+                           ETH_DATA_LEN + max_header_len);
 
        if (!atomic_read(&bat_priv->fragmentation) &&
-           hard_iface->net_dev->mtu < ETH_DATA_LEN + BATADV_HEADER_LEN)
+           hard_iface->net_dev->mtu < ETH_DATA_LEN + max_header_len)
                batadv_info(hard_iface->soft_iface,
-                           "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n",
+                           "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %i.\n",
                            hard_iface->net_dev->name, hard_iface->net_dev->mtu,
-                           ETH_DATA_LEN + BATADV_HEADER_LEN);
+                           ETH_DATA_LEN + max_header_len);
 
        if (batadv_hardif_is_iface_up(hard_iface))
                batadv_hardif_activate_interface(hard_iface);
@@ -533,8 +511,12 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
        dev_put(hard_iface->soft_iface);
 
        /* nobody uses this interface anymore */
-       if (!bat_priv->num_ifaces && autodel == BATADV_IF_CLEANUP_AUTO)
-               batadv_softif_destroy_sysfs(hard_iface->soft_iface);
+       if (!bat_priv->num_ifaces) {
+               batadv_gw_check_client_stop(bat_priv);
+
+               if (autodel == BATADV_IF_CLEANUP_AUTO)
+                       batadv_softif_destroy_sysfs(hard_iface->soft_iface);
+       }
 
        netdev_upper_dev_unlink(hard_iface->net_dev, hard_iface->soft_iface);
        hard_iface->soft_iface = NULL;
@@ -652,6 +634,8 @@ static int batadv_hard_if_event(struct notifier_block *this,
 
        if (batadv_softif_is_valid(net_dev) && event == NETDEV_REGISTER) {
                batadv_sysfs_add_meshif(net_dev);
+               bat_priv = netdev_priv(net_dev);
+               batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
                return NOTIFY_DONE;
        }
 
index 49892881a7c5bb581fc92837617380ad5b62752f..df4c8bd45c40748a5442999012ab581adf1d1d71 100644 (file)
@@ -41,6 +41,7 @@ enum batadv_hard_if_cleanup {
 
 extern struct notifier_block batadv_hard_if_notifier;
 
+bool batadv_is_wifi_netdev(struct net_device *net_device);
 struct batadv_hard_iface*
 batadv_hardif_get_by_netdev(const struct net_device *net_dev);
 int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
@@ -51,7 +52,6 @@ void batadv_hardif_remove_interfaces(void);
 int batadv_hardif_min_mtu(struct net_device *soft_iface);
 void batadv_update_min_mtu(struct net_device *soft_iface);
 void batadv_hardif_free_rcu(struct rcu_head *rcu);
-bool batadv_is_wifi_iface(int ifindex);
 
 static inline void
 batadv_hardif_free_ref(struct batadv_hard_iface *hard_iface)
index 5a99bb4b6b823c8d2b042aec32cb2ff70ee3a8b9..29ae4efe3543e4bfc6ff013b9c97628b9e395209 100644 (file)
@@ -29,7 +29,7 @@
 static struct batadv_socket_client *batadv_socket_client_hash[256];
 
 static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
-                                    struct batadv_icmp_packet_rr *icmp_packet,
+                                    struct batadv_icmp_header *icmph,
                                     size_t icmp_len);
 
 void batadv_socket_init(void)
@@ -155,13 +155,13 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
        struct batadv_priv *bat_priv = socket_client->bat_priv;
        struct batadv_hard_iface *primary_if = NULL;
        struct sk_buff *skb;
-       struct batadv_icmp_packet_rr *icmp_packet;
-
+       struct batadv_icmp_packet_rr *icmp_packet_rr;
+       struct batadv_icmp_header *icmp_header;
        struct batadv_orig_node *orig_node = NULL;
        struct batadv_neigh_node *neigh_node = NULL;
        size_t packet_len = sizeof(struct batadv_icmp_packet);
 
-       if (len < sizeof(struct batadv_icmp_packet)) {
+       if (len < sizeof(struct batadv_icmp_header)) {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                           "Error - can't send packet from char device: invalid packet size\n");
                return -EINVAL;
@@ -174,8 +174,10 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
                goto out;
        }
 
-       if (len >= sizeof(struct batadv_icmp_packet_rr))
-               packet_len = sizeof(struct batadv_icmp_packet_rr);
+       if (len >= BATADV_ICMP_MAX_PACKET_SIZE)
+               packet_len = BATADV_ICMP_MAX_PACKET_SIZE;
+       else
+               packet_len = len;
 
        skb = netdev_alloc_skb_ip_align(NULL, packet_len + ETH_HLEN);
        if (!skb) {
@@ -185,67 +187,78 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
 
        skb->priority = TC_PRIO_CONTROL;
        skb_reserve(skb, ETH_HLEN);
-       icmp_packet = (struct batadv_icmp_packet_rr *)skb_put(skb, packet_len);
+       icmp_header = (struct batadv_icmp_header *)skb_put(skb, packet_len);
 
-       if (copy_from_user(icmp_packet, buff, packet_len)) {
+       if (copy_from_user(icmp_header, buff, packet_len)) {
                len = -EFAULT;
                goto free_skb;
        }
 
-       if (icmp_packet->header.packet_type != BATADV_ICMP) {
+       if (icmp_header->header.packet_type != BATADV_ICMP) {
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                           "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
                len = -EINVAL;
                goto free_skb;
        }
 
-       if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
+       switch (icmp_header->msg_type) {
+       case BATADV_ECHO_REQUEST:
+               if (len < sizeof(struct batadv_icmp_packet)) {
+                       batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+                                  "Error - can't send packet from char device: invalid packet size\n");
+                       len = -EINVAL;
+                       goto free_skb;
+               }
+
+               if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
+                       goto dst_unreach;
+
+               orig_node = batadv_orig_hash_find(bat_priv, icmp_header->dst);
+               if (!orig_node)
+                       goto dst_unreach;
+
+               neigh_node = batadv_orig_node_get_router(orig_node);
+               if (!neigh_node)
+                       goto dst_unreach;
+
+               if (!neigh_node->if_incoming)
+                       goto dst_unreach;
+
+               if (neigh_node->if_incoming->if_status != BATADV_IF_ACTIVE)
+                       goto dst_unreach;
+
+               icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmp_header;
+               if (packet_len == sizeof(*icmp_packet_rr))
+                       memcpy(icmp_packet_rr->rr,
+                              neigh_node->if_incoming->net_dev->dev_addr,
+                              ETH_ALEN);
+
+               break;
+       default:
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                          "Error - can't send packet from char device: got bogus message type (expected: ECHO_REQUEST)\n");
+                          "Error - can't send packet from char device: got unknown message type\n");
                len = -EINVAL;
                goto free_skb;
        }
 
-       icmp_packet->uid = socket_client->index;
+       icmp_header->uid = socket_client->index;
 
-       if (icmp_packet->header.version != BATADV_COMPAT_VERSION) {
-               icmp_packet->msg_type = BATADV_PARAMETER_PROBLEM;
-               icmp_packet->header.version = BATADV_COMPAT_VERSION;
-               batadv_socket_add_packet(socket_client, icmp_packet,
+       if (icmp_header->header.version != BATADV_COMPAT_VERSION) {
+               icmp_header->msg_type = BATADV_PARAMETER_PROBLEM;
+               icmp_header->header.version = BATADV_COMPAT_VERSION;
+               batadv_socket_add_packet(socket_client, icmp_header,
                                         packet_len);
                goto free_skb;
        }
 
-       if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
-               goto dst_unreach;
-
-       orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->dst);
-       if (!orig_node)
-               goto dst_unreach;
-
-       neigh_node = batadv_orig_node_get_router(orig_node);
-       if (!neigh_node)
-               goto dst_unreach;
-
-       if (!neigh_node->if_incoming)
-               goto dst_unreach;
-
-       if (neigh_node->if_incoming->if_status != BATADV_IF_ACTIVE)
-               goto dst_unreach;
-
-       memcpy(icmp_packet->orig,
-              primary_if->net_dev->dev_addr, ETH_ALEN);
-
-       if (packet_len == sizeof(struct batadv_icmp_packet_rr))
-               memcpy(icmp_packet->rr,
-                      neigh_node->if_incoming->net_dev->dev_addr, ETH_ALEN);
+       memcpy(icmp_header->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
 
        batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
        goto out;
 
 dst_unreach:
-       icmp_packet->msg_type = BATADV_DESTINATION_UNREACHABLE;
-       batadv_socket_add_packet(socket_client, icmp_packet, packet_len);
+       icmp_header->msg_type = BATADV_DESTINATION_UNREACHABLE;
+       batadv_socket_add_packet(socket_client, icmp_header, packet_len);
 free_skb:
        kfree_skb(skb);
 out:
@@ -298,27 +311,40 @@ err:
        return -ENOMEM;
 }
 
+/**
+ * batadv_socket_receive_packet - schedule an icmp packet to be sent to userspace
+ *  on an icmp socket.
+ * @socket_client: the socket this packet belongs to
+ * @icmph: pointer to the header of the icmp packet
+ * @icmp_len: total length of the icmp packet
+ */
 static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
-                                    struct batadv_icmp_packet_rr *icmp_packet,
+                                    struct batadv_icmp_header *icmph,
                                     size_t icmp_len)
 {
        struct batadv_socket_packet *socket_packet;
+       size_t len;
 
        socket_packet = kmalloc(sizeof(*socket_packet), GFP_ATOMIC);
 
        if (!socket_packet)
                return;
 
+       len = icmp_len;
+       /* check the maximum length before filling the buffer */
+       if (len > sizeof(socket_packet->icmp_packet))
+               len = sizeof(socket_packet->icmp_packet);
+
        INIT_LIST_HEAD(&socket_packet->list);
-       memcpy(&socket_packet->icmp_packet, icmp_packet, icmp_len);
-       socket_packet->icmp_len = icmp_len;
+       memcpy(&socket_packet->icmp_packet, icmph, len);
+       socket_packet->icmp_len = len;
 
        spin_lock_bh(&socket_client->lock);
 
        /* while waiting for the lock the socket_client could have been
         * deleted
         */
-       if (!batadv_socket_client_hash[icmp_packet->uid]) {
+       if (!batadv_socket_client_hash[icmph->uid]) {
                spin_unlock_bh(&socket_client->lock);
                kfree(socket_packet);
                return;
@@ -342,12 +368,18 @@ static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
        wake_up(&socket_client->queue_wait);
 }
 
-void batadv_socket_receive_packet(struct batadv_icmp_packet_rr *icmp_packet,
+/**
+ * batadv_socket_receive_packet - schedule an icmp packet to be received
+ *  locally and sent to userspace.
+ * @icmph: pointer to the header of the icmp packet
+ * @icmp_len: total length of the icmp packet
+ */
+void batadv_socket_receive_packet(struct batadv_icmp_header *icmph,
                                  size_t icmp_len)
 {
        struct batadv_socket_client *hash;
 
-       hash = batadv_socket_client_hash[icmp_packet->uid];
+       hash = batadv_socket_client_hash[icmph->uid];
        if (hash)
-               batadv_socket_add_packet(hash, icmp_packet, icmp_len);
+               batadv_socket_add_packet(hash, icmph, icmp_len);
 }
index 1fcca37b62234d92b2e6092dcdc4ccfed4d76ba2..6665080dff7950bc904c54fef35f91dd3135946a 100644 (file)
@@ -24,7 +24,7 @@
 
 void batadv_socket_init(void);
 int batadv_socket_setup(struct batadv_priv *bat_priv);
-void batadv_socket_receive_packet(struct batadv_icmp_packet_rr *icmp_packet,
+void batadv_socket_receive_packet(struct batadv_icmp_header *icmph,
                                  size_t icmp_len);
 
 #endif /* _NET_BATMAN_ADV_ICMP_SOCKET_H_ */
index c72d1bcdcf4906a1a23808cab2a464804a3fcda6..c51a5e568f0a80c08beb4ded5b34b1aff8d5cfdf 100644 (file)
 #include "gateway_client.h"
 #include "bridge_loop_avoidance.h"
 #include "distributed-arp-table.h"
-#include "vis.h"
+#include "gateway_common.h"
 #include "hash.h"
 #include "bat_algo.h"
 #include "network-coding.h"
+#include "fragmentation.h"
 
 
 /* List manipulations on hardif_list have to be rtnl_lock()'ed,
@@ -65,6 +66,7 @@ static int __init batadv_init(void)
        batadv_recv_handler_init();
 
        batadv_iv_init();
+       batadv_nc_init();
 
        batadv_event_workqueue = create_singlethread_workqueue("bat_events");
 
@@ -108,9 +110,11 @@ int batadv_mesh_init(struct net_device *soft_iface)
        spin_lock_init(&bat_priv->tt.req_list_lock);
        spin_lock_init(&bat_priv->tt.roam_list_lock);
        spin_lock_init(&bat_priv->tt.last_changeset_lock);
+       spin_lock_init(&bat_priv->tt.commit_lock);
        spin_lock_init(&bat_priv->gw.list_lock);
-       spin_lock_init(&bat_priv->vis.hash_lock);
-       spin_lock_init(&bat_priv->vis.list_lock);
+       spin_lock_init(&bat_priv->tvlv.container_list_lock);
+       spin_lock_init(&bat_priv->tvlv.handler_list_lock);
+       spin_lock_init(&bat_priv->softif_vlan_list_lock);
 
        INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
        INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
@@ -118,6 +122,9 @@ int batadv_mesh_init(struct net_device *soft_iface)
        INIT_LIST_HEAD(&bat_priv->tt.changes_list);
        INIT_LIST_HEAD(&bat_priv->tt.req_list);
        INIT_LIST_HEAD(&bat_priv->tt.roam_list);
+       INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
+       INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
+       INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
 
        ret = batadv_originator_init(bat_priv);
        if (ret < 0)
@@ -127,13 +134,6 @@ int batadv_mesh_init(struct net_device *soft_iface)
        if (ret < 0)
                goto err;
 
-       batadv_tt_local_add(soft_iface, soft_iface->dev_addr,
-                           BATADV_NULL_IFINDEX);
-
-       ret = batadv_vis_init(bat_priv);
-       if (ret < 0)
-               goto err;
-
        ret = batadv_bla_init(bat_priv);
        if (ret < 0)
                goto err;
@@ -142,10 +142,12 @@ int batadv_mesh_init(struct net_device *soft_iface)
        if (ret < 0)
                goto err;
 
-       ret = batadv_nc_init(bat_priv);
+       ret = batadv_nc_mesh_init(bat_priv);
        if (ret < 0)
                goto err;
 
+       batadv_gw_init(bat_priv);
+
        atomic_set(&bat_priv->gw.reselect, 0);
        atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
 
@@ -164,10 +166,8 @@ void batadv_mesh_free(struct net_device *soft_iface)
 
        batadv_purge_outstanding_packets(bat_priv, NULL);
 
-       batadv_vis_quit(bat_priv);
-
        batadv_gw_node_purge(bat_priv);
-       batadv_nc_free(bat_priv);
+       batadv_nc_mesh_free(bat_priv);
        batadv_dat_free(bat_priv);
        batadv_bla_free(bat_priv);
 
@@ -184,6 +184,8 @@ void batadv_mesh_free(struct net_device *soft_iface)
         */
        batadv_originator_free(bat_priv);
 
+       batadv_gw_free(bat_priv);
+
        free_percpu(bat_priv->bat_counters);
        bat_priv->bat_counters = NULL;
 
@@ -253,6 +255,31 @@ out:
        return primary_if;
 }
 
+/**
+ * batadv_max_header_len - calculate maximum encapsulation overhead for a
+ *  payload packet
+ *
+ * Return the maximum encapsulation overhead in bytes.
+ */
+int batadv_max_header_len(void)
+{
+       int header_len = 0;
+
+       header_len = max_t(int, header_len,
+                          sizeof(struct batadv_unicast_packet));
+       header_len = max_t(int, header_len,
+                          sizeof(struct batadv_unicast_4addr_packet));
+       header_len = max_t(int, header_len,
+                          sizeof(struct batadv_bcast_packet));
+
+#ifdef CONFIG_BATMAN_ADV_NC
+       header_len = max_t(int, header_len,
+                          sizeof(struct batadv_coded_packet));
+#endif
+
+       return header_len;
+}
+
 /**
  * batadv_skb_set_priority - sets skb priority according to packet content
  * @skb: the packet to be sent
@@ -391,22 +418,31 @@ static void batadv_recv_handler_init(void)
        for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
                batadv_rx_handler[i] = batadv_recv_unhandled_packet;
 
-       /* batman icmp packet */
-       batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
+       for (i = BATADV_UNICAST_MIN; i <= BATADV_UNICAST_MAX; i++)
+               batadv_rx_handler[i] = batadv_recv_unhandled_unicast_packet;
+
+       /* compile time checks for struct member offsets */
+       BUILD_BUG_ON(offsetof(struct batadv_unicast_4addr_packet, src) != 10);
+       BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4);
+       BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4);
+       BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4);
+       BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, icmph.dst) != 4);
+       BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, icmph.dst) != 4);
+
+       /* broadcast packet */
+       batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
+
+       /* unicast packets ... */
        /* unicast with 4 addresses packet */
        batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
        /* unicast packet */
        batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
-       /* fragmented unicast packet */
-       batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_ucast_frag_packet;
-       /* broadcast packet */
-       batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
-       /* vis packet */
-       batadv_rx_handler[BATADV_VIS] = batadv_recv_vis_packet;
-       /* Translation table query (request or response) */
-       batadv_rx_handler[BATADV_TT_QUERY] = batadv_recv_tt_query;
-       /* Roaming advertisement */
-       batadv_rx_handler[BATADV_ROAM_ADV] = batadv_recv_roam_adv;
+       /* unicast tvlv packet */
+       batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
+       /* batman icmp packet */
+       batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
+       /* Fragmented packets */
+       batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
 }
 
 int
@@ -414,7 +450,12 @@ batadv_recv_handler_register(uint8_t packet_type,
                             int (*recv_handler)(struct sk_buff *,
                                                 struct batadv_hard_iface *))
 {
-       if (batadv_rx_handler[packet_type] != &batadv_recv_unhandled_packet)
+       int (*curr)(struct sk_buff *,
+                   struct batadv_hard_iface *);
+       curr = batadv_rx_handler[packet_type];
+
+       if ((curr != batadv_recv_unhandled_packet) &&
+           (curr != batadv_recv_unhandled_unicast_packet))
                return -EBUSY;
 
        batadv_rx_handler[packet_type] = recv_handler;
@@ -460,7 +501,9 @@ int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
            !bat_algo_ops->bat_iface_update_mac ||
            !bat_algo_ops->bat_primary_iface_set ||
            !bat_algo_ops->bat_ogm_schedule ||
-           !bat_algo_ops->bat_ogm_emit) {
+           !bat_algo_ops->bat_ogm_emit ||
+           !bat_algo_ops->bat_neigh_cmp ||
+           !bat_algo_ops->bat_neigh_is_equiv_or_better) {
                pr_info("Routing algo '%s' does not implement required ops\n",
                        bat_algo_ops->name);
                ret = -EINVAL;
@@ -535,6 +578,601 @@ __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
        return htonl(crc);
 }
 
+/**
+ * batadv_tvlv_handler_free_ref - decrement the tvlv handler refcounter and
+ *  possibly free it
+ * @tvlv_handler: the tvlv handler to free
+ */
+static void
+batadv_tvlv_handler_free_ref(struct batadv_tvlv_handler *tvlv_handler)
+{
+       if (atomic_dec_and_test(&tvlv_handler->refcount))
+               kfree_rcu(tvlv_handler, rcu);
+}
+
+/**
+ * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
+ *  based on the provided type and version (both need to match)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv handler type to look for
+ * @version: tvlv handler version to look for
+ *
+ * Returns tvlv handler if found or NULL otherwise.
+ */
+static struct batadv_tvlv_handler
+*batadv_tvlv_handler_get(struct batadv_priv *bat_priv,
+                        uint8_t type, uint8_t version)
+{
+       struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(tvlv_handler_tmp,
+                                &bat_priv->tvlv.handler_list, list) {
+               if (tvlv_handler_tmp->type != type)
+                       continue;
+
+               if (tvlv_handler_tmp->version != version)
+                       continue;
+
+               if (!atomic_inc_not_zero(&tvlv_handler_tmp->refcount))
+                       continue;
+
+               tvlv_handler = tvlv_handler_tmp;
+               break;
+       }
+       rcu_read_unlock();
+
+       return tvlv_handler;
+}
+
+/**
+ * batadv_tvlv_container_free_ref - decrement the tvlv container refcounter and
+ *  possibly free it
+ * @tvlv_handler: the tvlv container to free
+ */
+static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv)
+{
+       if (atomic_dec_and_test(&tvlv->refcount))
+               kfree(tvlv);
+}
+
+/**
+ * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
+ *  list based on the provided type and version (both need to match)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv container type to look for
+ * @version: tvlv container version to look for
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (tvlv.container_list_lock).
+ *
+ * Returns tvlv container if found or NULL otherwise.
+ */
+static struct batadv_tvlv_container
+*batadv_tvlv_container_get(struct batadv_priv *bat_priv,
+                          uint8_t type, uint8_t version)
+{
+       struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
+
+       hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) {
+               if (tvlv_tmp->tvlv_hdr.type != type)
+                       continue;
+
+               if (tvlv_tmp->tvlv_hdr.version != version)
+                       continue;
+
+               if (!atomic_inc_not_zero(&tvlv_tmp->refcount))
+                       continue;
+
+               tvlv = tvlv_tmp;
+               break;
+       }
+
+       return tvlv;
+}
+
+/**
+ * batadv_tvlv_container_list_size - calculate the size of the tvlv container
+ *  list entries
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (tvlv.container_list_lock).
+ *
+ * Returns size of all currently registered tvlv containers in bytes.
+ */
+static uint16_t batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
+{
+       struct batadv_tvlv_container *tvlv;
+       uint16_t tvlv_len = 0;
+
+       hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
+               tvlv_len += sizeof(struct batadv_tvlv_hdr);
+               tvlv_len += ntohs(tvlv->tvlv_hdr.len);
+       }
+
+       return tvlv_len;
+}
+
+/**
+ * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
+ *  list
+ * @tvlv: the to be removed tvlv container
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (tvlv.container_list_lock).
+ */
+static void batadv_tvlv_container_remove(struct batadv_tvlv_container *tvlv)
+{
+       if (!tvlv)
+               return;
+
+       hlist_del(&tvlv->list);
+
+       /* first call to decrement the counter, second call to free */
+       batadv_tvlv_container_free_ref(tvlv);
+       batadv_tvlv_container_free_ref(tvlv);
+}
+
+/**
+ * batadv_tvlv_container_unregister - unregister tvlv container based on the
+ *  provided type and version (both need to match)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv container type to unregister
+ * @version: tvlv container type to unregister
+ */
+void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
+                                     uint8_t type, uint8_t version)
+{
+       struct batadv_tvlv_container *tvlv;
+
+       spin_lock_bh(&bat_priv->tvlv.container_list_lock);
+       tvlv = batadv_tvlv_container_get(bat_priv, type, version);
+       batadv_tvlv_container_remove(tvlv);
+       spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
+}
+
+/**
+ * batadv_tvlv_container_register - register tvlv type, version and content
+ *  to be propagated with each (primary interface) OGM
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv container type
+ * @version: tvlv container version
+ * @tvlv_value: tvlv container content
+ * @tvlv_value_len: tvlv container content length
+ *
+ * If a container of the same type and version was already registered the new
+ * content is going to replace the old one.
+ */
+void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
+                                   uint8_t type, uint8_t version,
+                                   void *tvlv_value, uint16_t tvlv_value_len)
+{
+       struct batadv_tvlv_container *tvlv_old, *tvlv_new;
+
+       if (!tvlv_value)
+               tvlv_value_len = 0;
+
+       tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC);
+       if (!tvlv_new)
+               return;
+
+       tvlv_new->tvlv_hdr.version = version;
+       tvlv_new->tvlv_hdr.type = type;
+       tvlv_new->tvlv_hdr.len = htons(tvlv_value_len);
+
+       memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len));
+       INIT_HLIST_NODE(&tvlv_new->list);
+       atomic_set(&tvlv_new->refcount, 1);
+
+       spin_lock_bh(&bat_priv->tvlv.container_list_lock);
+       tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
+       batadv_tvlv_container_remove(tvlv_old);
+       hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
+       spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
+}
+
+/**
+ * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accomodate
+ *  requested packet size
+ * @packet_buff: packet buffer
+ * @packet_buff_len: packet buffer size
+ * @packet_min_len: requested packet minimum size
+ * @additional_packet_len: requested additional packet size on top of minimum
+ *  size
+ *
+ * Returns true of the packet buffer could be changed to the requested size,
+ * false otherwise.
+ */
+static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
+                                           int *packet_buff_len,
+                                           int min_packet_len,
+                                           int additional_packet_len)
+{
+       unsigned char *new_buff;
+
+       new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
+
+       /* keep old buffer if kmalloc should fail */
+       if (new_buff) {
+               memcpy(new_buff, *packet_buff, min_packet_len);
+               kfree(*packet_buff);
+               *packet_buff = new_buff;
+               *packet_buff_len = min_packet_len + additional_packet_len;
+               return true;
+       }
+
+       return false;
+}
+
+/**
+ * batadv_tvlv_container_ogm_append - append tvlv container content to given
+ *  OGM packet buffer
+ * @bat_priv: the bat priv with all the soft interface information
+ * @packet_buff: ogm packet buffer
+ * @packet_buff_len: ogm packet buffer size including ogm header and tvlv
+ *  content
+ * @packet_min_len: ogm header size to be preserved for the OGM itself
+ *
+ * The ogm packet might be enlarged or shrunk depending on the current size
+ * and the size of the to-be-appended tvlv containers.
+ *
+ * Returns size of all appended tvlv containers in bytes.
+ */
+uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
+                                         unsigned char **packet_buff,
+                                         int *packet_buff_len,
+                                         int packet_min_len)
+{
+       struct batadv_tvlv_container *tvlv;
+       struct batadv_tvlv_hdr *tvlv_hdr;
+       uint16_t tvlv_value_len;
+       void *tvlv_value;
+       bool ret;
+
+       spin_lock_bh(&bat_priv->tvlv.container_list_lock);
+       tvlv_value_len = batadv_tvlv_container_list_size(bat_priv);
+
+       ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len,
+                                             packet_min_len, tvlv_value_len);
+
+       if (!ret)
+               goto end;
+
+       if (!tvlv_value_len)
+               goto end;
+
+       tvlv_value = (*packet_buff) + packet_min_len;
+
+       hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
+               tvlv_hdr = tvlv_value;
+               tvlv_hdr->type = tvlv->tvlv_hdr.type;
+               tvlv_hdr->version = tvlv->tvlv_hdr.version;
+               tvlv_hdr->len = tvlv->tvlv_hdr.len;
+               tvlv_value = tvlv_hdr + 1;
+               memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
+               tvlv_value = (uint8_t *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
+       }
+
+end:
+       spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
+       return tvlv_value_len;
+}
+
+/**
+ * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
+ *  appropriate handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tvlv_handler: tvlv callback function handling the tvlv content
+ * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
+ * @orig_node: orig node emitting the ogm packet
+ * @src: source mac address of the unicast packet
+ * @dst: destination mac address of the unicast packet
+ * @tvlv_value: tvlv content
+ * @tvlv_value_len: tvlv content length
+ *
+ * Returns success if handler was not found or the return value of the handler
+ * callback.
+ */
+static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
+                                   struct batadv_tvlv_handler *tvlv_handler,
+                                   bool ogm_source,
+                                   struct batadv_orig_node *orig_node,
+                                   uint8_t *src, uint8_t *dst,
+                                   void *tvlv_value, uint16_t tvlv_value_len)
+{
+       if (!tvlv_handler)
+               return NET_RX_SUCCESS;
+
+       if (ogm_source) {
+               if (!tvlv_handler->ogm_handler)
+                       return NET_RX_SUCCESS;
+
+               if (!orig_node)
+                       return NET_RX_SUCCESS;
+
+               tvlv_handler->ogm_handler(bat_priv, orig_node,
+                                         BATADV_NO_FLAGS,
+                                         tvlv_value, tvlv_value_len);
+               tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
+       } else {
+               if (!src)
+                       return NET_RX_SUCCESS;
+
+               if (!dst)
+                       return NET_RX_SUCCESS;
+
+               if (!tvlv_handler->unicast_handler)
+                       return NET_RX_SUCCESS;
+
+               return tvlv_handler->unicast_handler(bat_priv, src,
+                                                    dst, tvlv_value,
+                                                    tvlv_value_len);
+       }
+
+       return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
+ *  appropriate handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
+ * @orig_node: orig node emitting the ogm packet
+ * @src: source mac address of the unicast packet
+ * @dst: destination mac address of the unicast packet
+ * @tvlv_value: tvlv content
+ * @tvlv_value_len: tvlv content length
+ *
+ * Returns success when processing an OGM or the return value of all called
+ * handler callbacks.
+ */
+int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
+                                  bool ogm_source,
+                                  struct batadv_orig_node *orig_node,
+                                  uint8_t *src, uint8_t *dst,
+                                  void *tvlv_value, uint16_t tvlv_value_len)
+{
+       struct batadv_tvlv_handler *tvlv_handler;
+       struct batadv_tvlv_hdr *tvlv_hdr;
+       uint16_t tvlv_value_cont_len;
+       uint8_t cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
+       int ret = NET_RX_SUCCESS;
+
+       while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
+               tvlv_hdr = tvlv_value;
+               tvlv_value_cont_len = ntohs(tvlv_hdr->len);
+               tvlv_value = tvlv_hdr + 1;
+               tvlv_value_len -= sizeof(*tvlv_hdr);
+
+               if (tvlv_value_cont_len > tvlv_value_len)
+                       break;
+
+               tvlv_handler = batadv_tvlv_handler_get(bat_priv,
+                                                      tvlv_hdr->type,
+                                                      tvlv_hdr->version);
+
+               ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
+                                               ogm_source, orig_node,
+                                               src, dst, tvlv_value,
+                                               tvlv_value_cont_len);
+               if (tvlv_handler)
+                       batadv_tvlv_handler_free_ref(tvlv_handler);
+               tvlv_value = (uint8_t *)tvlv_value + tvlv_value_cont_len;
+               tvlv_value_len -= tvlv_value_cont_len;
+       }
+
+       if (!ogm_source)
+               return ret;
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(tvlv_handler,
+                                &bat_priv->tvlv.handler_list, list) {
+               if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
+                   !(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
+                       tvlv_handler->ogm_handler(bat_priv, orig_node,
+                                                 cifnotfound, NULL, 0);
+
+               tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED;
+       }
+       rcu_read_unlock();
+
+       return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
+ *  handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @batadv_ogm_packet: ogm packet containing the tvlv containers
+ * @orig_node: orig node emitting the ogm packet
+ */
+void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
+                            struct batadv_ogm_packet *batadv_ogm_packet,
+                            struct batadv_orig_node *orig_node)
+{
+       void *tvlv_value;
+       uint16_t tvlv_value_len;
+
+       if (!batadv_ogm_packet)
+               return;
+
+       tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len);
+       if (!tvlv_value_len)
+               return;
+
+       tvlv_value = batadv_ogm_packet + 1;
+
+       batadv_tvlv_containers_process(bat_priv, true, orig_node, NULL, NULL,
+                                      tvlv_value, tvlv_value_len);
+}
+
+/**
+ * batadv_tvlv_handler_register - register tvlv handler based on the provided
+ *  type and version (both need to match) for ogm tvlv payload and/or unicast
+ *  payload
+ * @bat_priv: the bat priv with all the soft interface information
+ * @optr: ogm tvlv handler callback function. This function receives the orig
+ *  node, flags and the tvlv content as argument to process.
+ * @uptr: unicast tvlv handler callback function. This function receives the
+ *  source & destination of the unicast packet as well as the tvlv content
+ *  to process.
+ * @type: tvlv handler type to be registered
+ * @version: tvlv handler version to be registered
+ * @flags: flags to enable or disable TVLV API behavior
+ */
+void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
+                                 void (*optr)(struct batadv_priv *bat_priv,
+                                              struct batadv_orig_node *orig,
+                                              uint8_t flags,
+                                              void *tvlv_value,
+                                              uint16_t tvlv_value_len),
+                                 int (*uptr)(struct batadv_priv *bat_priv,
+                                             uint8_t *src, uint8_t *dst,
+                                             void *tvlv_value,
+                                             uint16_t tvlv_value_len),
+                                 uint8_t type, uint8_t version, uint8_t flags)
+{
+       struct batadv_tvlv_handler *tvlv_handler;
+
+       tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
+       if (tvlv_handler) {
+               batadv_tvlv_handler_free_ref(tvlv_handler);
+               return;
+       }
+
+       tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
+       if (!tvlv_handler)
+               return;
+
+       tvlv_handler->ogm_handler = optr;
+       tvlv_handler->unicast_handler = uptr;
+       tvlv_handler->type = type;
+       tvlv_handler->version = version;
+       tvlv_handler->flags = flags;
+       atomic_set(&tvlv_handler->refcount, 1);
+       INIT_HLIST_NODE(&tvlv_handler->list);
+
+       spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
+       hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
+       spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
+}
+
+/**
+ * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
+ *  provided type and version (both need to match)
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv handler type to be unregistered
+ * @version: tvlv handler version to be unregistered
+ */
+void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
+                                   uint8_t type, uint8_t version)
+{
+       struct batadv_tvlv_handler *tvlv_handler;
+
+       tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
+       if (!tvlv_handler)
+               return;
+
+       batadv_tvlv_handler_free_ref(tvlv_handler);
+       spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
+       hlist_del_rcu(&tvlv_handler->list);
+       spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
+       batadv_tvlv_handler_free_ref(tvlv_handler);
+}
+
+/**
+ * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
+ *  specified host
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: source mac address of the unicast packet
+ * @dst: destination mac address of the unicast packet
+ * @type: tvlv type
+ * @version: tvlv version
+ * @tvlv_value: tvlv content
+ * @tvlv_value_len: tvlv content length
+ */
+void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
+                             uint8_t *dst, uint8_t type, uint8_t version,
+                             void *tvlv_value, uint16_t tvlv_value_len)
+{
+       struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
+       struct batadv_tvlv_hdr *tvlv_hdr;
+       struct batadv_orig_node *orig_node;
+       struct sk_buff *skb = NULL;
+       unsigned char *tvlv_buff;
+       unsigned int tvlv_len;
+       ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
+       bool ret = false;
+
+       orig_node = batadv_orig_hash_find(bat_priv, dst);
+       if (!orig_node)
+               goto out;
+
+       tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
+
+       skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
+       if (!skb)
+               goto out;
+
+       skb->priority = TC_PRIO_CONTROL;
+       skb_reserve(skb, ETH_HLEN);
+       tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
+       unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
+       unicast_tvlv_packet->header.packet_type = BATADV_UNICAST_TVLV;
+       unicast_tvlv_packet->header.version = BATADV_COMPAT_VERSION;
+       unicast_tvlv_packet->header.ttl = BATADV_TTL;
+       unicast_tvlv_packet->reserved = 0;
+       unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
+       unicast_tvlv_packet->align = 0;
+       memcpy(unicast_tvlv_packet->src, src, ETH_ALEN);
+       memcpy(unicast_tvlv_packet->dst, dst, ETH_ALEN);
+
+       tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
+       tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
+       tvlv_hdr->version = version;
+       tvlv_hdr->type = type;
+       tvlv_hdr->len = htons(tvlv_value_len);
+       tvlv_buff += sizeof(*tvlv_hdr);
+       memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
+
+       if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
+               ret = true;
+
+out:
+       if (skb && !ret)
+               kfree_skb(skb);
+       if (orig_node)
+               batadv_orig_node_free_ref(orig_node);
+}
+
+/**
+ * batadv_get_vid - extract the VLAN identifier from skb if any
+ * @skb: the buffer containing the packet
+ * @header_len: length of the batman header preceding the ethernet header
+ *
+ * If the packet embedded in the skb is vlan tagged this function returns the
+ * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned.
+ */
+unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
+{
+       struct ethhdr *ethhdr = (struct ethhdr *)(skb->data + header_len);
+       struct vlan_ethhdr *vhdr;
+       unsigned short vid;
+
+       if (ethhdr->h_proto != htons(ETH_P_8021Q))
+               return BATADV_NO_FLAGS;
+
+       if (!pskb_may_pull(skb, header_len + VLAN_ETH_HLEN))
+               return BATADV_NO_FLAGS;
+
+       vhdr = (struct vlan_ethhdr *)(skb->data + header_len);
+       vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
+       vid |= BATADV_VLAN_HAS_TAG;
+
+       return vid;
+}
+
 static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
 {
        struct batadv_algo_ops *bat_algo_ops;
index 24675523930fe6b17b7ee15e2b1eece635ff87fe..f94f287b867083e755bd62dcc56af88f30837b33 100644 (file)
 #ifndef _NET_BATMAN_ADV_MAIN_H_
 #define _NET_BATMAN_ADV_MAIN_H_
 
-#define BATADV_DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \
-                            "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
+#define BATADV_DRIVER_AUTHOR "Marek Lindner <mareklindner@neomailbox.ch>, " \
+                            "Simon Wunderlich <sw@simonwunderlich.de>"
 #define BATADV_DRIVER_DESC   "B.A.T.M.A.N. advanced"
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2013.4.0"
+#define BATADV_SOURCE_VERSION "2013.5.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
 /* numbers of originator to contact for any PUT/GET DHT operation */
 #define BATADV_DAT_CANDIDATES_NUM 3
 
-#define BATADV_VIS_INTERVAL 5000       /* 5 seconds */
+/**
+ * BATADV_TQ_SIMILARITY_THRESHOLD - TQ points that a secondary metric can differ
+ *  at most from the primary one in order to be still considered acceptable
+ */
+#define BATADV_TQ_SIMILARITY_THRESHOLD 50
 
 /* how much worse secondary interfaces may be to be considered as bonding
  * candidates
@@ -133,6 +137,15 @@ enum batadv_uev_type {
 
 #define BATADV_GW_THRESHOLD    50
 
+/* Number of fragment chains for each orig_node */
+#define BATADV_FRAG_BUFFER_COUNT 8
+/* Maximum number of fragments for one packet */
+#define BATADV_FRAG_MAX_FRAGMENTS 16
+/* Maxumim size of each fragment */
+#define BATADV_FRAG_MAX_FRAG_SIZE 1400
+/* Time to keep fragments while waiting for rest of the fragments */
+#define BATADV_FRAG_TIMEOUT 10000
+
 #define BATADV_DAT_CANDIDATE_NOT_FOUND 0
 #define BATADV_DAT_CANDIDATE_ORIG      1
 
@@ -160,15 +173,9 @@ enum batadv_uev_type {
 #include <net/rtnetlink.h>
 #include <linux/jiffies.h>
 #include <linux/seq_file.h>
-#include "types.h"
+#include <linux/if_vlan.h>
 
-/**
- * batadv_vlan_flags - flags for the four MSB of any vlan ID field
- * @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not
- */
-enum batadv_vlan_flags {
-       BATADV_VLAN_HAS_TAG     = BIT(15),
-};
+#include "types.h"
 
 #define BATADV_PRINT_VID(vid) (vid & BATADV_VLAN_HAS_TAG ? \
                               (int)(vid & VLAN_VID_MASK) : -1)
@@ -184,6 +191,7 @@ void batadv_mesh_free(struct net_device *soft_iface);
 int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);
 struct batadv_hard_iface *
 batadv_seq_print_text_primary_if_get(struct seq_file *seq);
+int batadv_max_header_len(void);
 void batadv_skb_set_priority(struct sk_buff *skb, int offset);
 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
                           struct packet_type *ptype,
@@ -326,4 +334,40 @@ static inline uint64_t batadv_sum_counter(struct batadv_priv *bat_priv,
  */
 #define BATADV_SKB_CB(__skb)       ((struct batadv_skb_cb *)&((__skb)->cb[0]))
 
+void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
+                                   uint8_t type, uint8_t version,
+                                   void *tvlv_value, uint16_t tvlv_value_len);
+uint16_t batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
+                                         unsigned char **packet_buff,
+                                         int *packet_buff_len,
+                                         int packet_min_len);
+void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
+                            struct batadv_ogm_packet *batadv_ogm_packet,
+                            struct batadv_orig_node *orig_node);
+void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
+                                     uint8_t type, uint8_t version);
+
+void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
+                                 void (*optr)(struct batadv_priv *bat_priv,
+                                              struct batadv_orig_node *orig,
+                                              uint8_t flags,
+                                              void *tvlv_value,
+                                              uint16_t tvlv_value_len),
+                                 int (*uptr)(struct batadv_priv *bat_priv,
+                                             uint8_t *src, uint8_t *dst,
+                                             void *tvlv_value,
+                                             uint16_t tvlv_value_len),
+                                 uint8_t type, uint8_t version, uint8_t flags);
+void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
+                                   uint8_t type, uint8_t version);
+int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
+                                  bool ogm_source,
+                                  struct batadv_orig_node *orig_node,
+                                  uint8_t *src, uint8_t *dst,
+                                  void *tvlv_buff, uint16_t tvlv_buff_len);
+void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
+                             uint8_t *dst, uint8_t type, uint8_t version,
+                             void *tvlv_value, uint16_t tvlv_value_len);
+unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len);
+
 #endif /* _NET_BATMAN_ADV_MAIN_H_ */
index a487d46e0aeccdb72ab4ad6e361dd0b4b87a7c05..351e199bc0afff37aa9c1a4b3f40201956dfde91 100644 (file)
@@ -34,6 +34,20 @@ static void batadv_nc_worker(struct work_struct *work);
 static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
                                       struct batadv_hard_iface *recv_if);
 
+/**
+ * batadv_nc_init - one-time initialization for network coding
+ */
+int __init batadv_nc_init(void)
+{
+       int ret;
+
+       /* Register our packet type */
+       ret = batadv_recv_handler_register(BATADV_CODED,
+                                          batadv_nc_recv_coded_packet);
+
+       return ret;
+}
+
 /**
  * batadv_nc_start_timer - initialise the nc periodic worker
  * @bat_priv: the bat priv with all the soft interface information
@@ -45,10 +59,63 @@ static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_nc_init - initialise coding hash table and start house keeping
+ * batadv_nc_tvlv_container_update - update the network coding tvlv container
+ *  after network coding setting change
  * @bat_priv: the bat priv with all the soft interface information
  */
-int batadv_nc_init(struct batadv_priv *bat_priv)
+static void batadv_nc_tvlv_container_update(struct batadv_priv *bat_priv)
+{
+       char nc_mode;
+
+       nc_mode = atomic_read(&bat_priv->network_coding);
+
+       switch (nc_mode) {
+       case 0:
+               batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_NC, 1);
+               break;
+       case 1:
+               batadv_tvlv_container_register(bat_priv, BATADV_TVLV_NC, 1,
+                                              NULL, 0);
+               break;
+       }
+}
+
+/**
+ * batadv_nc_status_update - update the network coding tvlv container after
+ *  network coding setting change
+ * @net_dev: the soft interface net device
+ */
+void batadv_nc_status_update(struct net_device *net_dev)
+{
+       struct batadv_priv *bat_priv = netdev_priv(net_dev);
+       batadv_nc_tvlv_container_update(bat_priv);
+}
+
+/**
+ * batadv_nc_tvlv_ogm_handler_v1 - process incoming nc tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the gateway data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+                                         struct batadv_orig_node *orig,
+                                         uint8_t flags,
+                                         void *tvlv_value,
+                                         uint16_t tvlv_value_len)
+{
+       if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
+               orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_NC;
+       else
+               orig->capabilities |= BATADV_ORIG_CAPA_HAS_NC;
+}
+
+/**
+ * batadv_nc_mesh_init - initialise coding hash table and start house keeping
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
 {
        bat_priv->nc.timestamp_fwd_flush = jiffies;
        bat_priv->nc.timestamp_sniffed_purge = jiffies;
@@ -70,14 +137,13 @@ int batadv_nc_init(struct batadv_priv *bat_priv)
        batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
                                   &batadv_nc_decoding_hash_lock_class_key);
 
-       /* Register our packet type */
-       if (batadv_recv_handler_register(BATADV_CODED,
-                                        batadv_nc_recv_coded_packet) < 0)
-               goto err;
-
        INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
        batadv_nc_start_timer(bat_priv);
 
+       batadv_tvlv_handler_register(bat_priv, batadv_nc_tvlv_ogm_handler_v1,
+                                    NULL, BATADV_TVLV_NC, 1,
+                                    BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
+       batadv_nc_tvlv_container_update(bat_priv);
        return 0;
 
 err:
@@ -793,6 +859,10 @@ void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
        if (!atomic_read(&bat_priv->network_coding))
                goto out;
 
+       /* check if orig node is network coding enabled */
+       if (!(orig_node->capabilities & BATADV_ORIG_CAPA_HAS_NC))
+               goto out;
+
        /* accept ogms from 'good' neighbors and single hop neighbors */
        if (!batadv_can_nc_with_orig(bat_priv, orig_node, ogm_packet) &&
            !is_single_hop_neigh)
@@ -933,7 +1003,7 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
                                   struct batadv_nc_packet *nc_packet,
                                   struct batadv_neigh_node *neigh_node)
 {
-       uint8_t tq_weighted_neigh, tq_weighted_coding;
+       uint8_t tq_weighted_neigh, tq_weighted_coding, tq_tmp;
        struct sk_buff *skb_dest, *skb_src;
        struct batadv_unicast_packet *packet1;
        struct batadv_unicast_packet *packet2;
@@ -958,8 +1028,10 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
        if (!router_coding)
                goto out;
 
-       tq_weighted_neigh = batadv_nc_random_weight_tq(router_neigh->tq_avg);
-       tq_weighted_coding = batadv_nc_random_weight_tq(router_coding->tq_avg);
+       tq_tmp = batadv_nc_random_weight_tq(router_neigh->bat_iv.tq_avg);
+       tq_weighted_neigh = tq_tmp;
+       tq_tmp = batadv_nc_random_weight_tq(router_coding->bat_iv.tq_avg);
+       tq_weighted_coding = tq_tmp;
 
        /* Select one destination for the MAC-header dst-field based on
         * weighted TQ-values.
@@ -1721,12 +1793,13 @@ free_nc_packet:
 }
 
 /**
- * batadv_nc_free - clean up network coding memory
+ * batadv_nc_mesh_free - clean up network coding memory
  * @bat_priv: the bat priv with all the soft interface information
  */
-void batadv_nc_free(struct batadv_priv *bat_priv)
+void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
 {
-       batadv_recv_handler_unregister(BATADV_CODED);
+       batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_NC, 1);
+       batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_NC, 1);
        cancel_delayed_work_sync(&bat_priv->nc.work);
 
        batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL);
index 85a4ec81ad50bda26449cfdadbcaf28e62391b8b..d4fd315b5261904c1ca990578fc562b0bae41f89 100644 (file)
 
 #ifdef CONFIG_BATMAN_ADV_NC
 
-int batadv_nc_init(struct batadv_priv *bat_priv);
-void batadv_nc_free(struct batadv_priv *bat_priv);
+void batadv_nc_status_update(struct net_device *net_dev);
+int batadv_nc_init(void);
+int batadv_nc_mesh_init(struct batadv_priv *bat_priv);
+void batadv_nc_mesh_free(struct batadv_priv *bat_priv);
 void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
                              struct batadv_orig_node *orig_node,
                              struct batadv_orig_node *orig_neigh_node,
@@ -46,12 +48,21 @@ int batadv_nc_init_debugfs(struct batadv_priv *bat_priv);
 
 #else /* ifdef CONFIG_BATMAN_ADV_NC */
 
-static inline int batadv_nc_init(struct batadv_priv *bat_priv)
+static inline void batadv_nc_status_update(struct net_device *net_dev)
+{
+}
+
+static inline int batadv_nc_init(void)
+{
+       return 0;
+}
+
+static inline int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
 {
        return 0;
 }
 
-static inline void batadv_nc_free(struct batadv_priv *bat_priv)
+static inline void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
 {
        return;
 }
index f50553a7de629a411d94d307ba2ea54327e7b28f..8ab14340d10f6433c849face4852e4de1b5b9547 100644 (file)
 #include "routing.h"
 #include "gateway_client.h"
 #include "hard-interface.h"
-#include "unicast.h"
 #include "soft-interface.h"
 #include "bridge_loop_avoidance.h"
 #include "network-coding.h"
+#include "fragmentation.h"
 
 /* hash class keys */
 static struct lock_class_key batadv_orig_hash_lock_class_key;
@@ -36,7 +36,7 @@ static struct lock_class_key batadv_orig_hash_lock_class_key;
 static void batadv_purge_orig(struct work_struct *work);
 
 /* returns 1 if they are the same originator */
-static int batadv_compare_orig(const struct hlist_node *node, const void *data2)
+int batadv_compare_orig(const struct hlist_node *node, const void *data2)
 {
        const void *data1 = container_of(node, struct batadv_orig_node,
                                         hash_entry);
@@ -44,6 +44,88 @@ static int batadv_compare_orig(const struct hlist_node *node, const void *data2)
        return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
 }
 
+/**
+ * batadv_orig_node_vlan_get - get an orig_node_vlan object
+ * @orig_node: the originator serving the VLAN
+ * @vid: the VLAN identifier
+ *
+ * Returns the vlan object identified by vid and belonging to orig_node or NULL
+ * if it does not exist.
+ */
+struct batadv_orig_node_vlan *
+batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
+                         unsigned short vid)
+{
+       struct batadv_orig_node_vlan *vlan = NULL, *tmp;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
+               if (tmp->vid != vid)
+                       continue;
+
+               if (!atomic_inc_not_zero(&tmp->refcount))
+                       continue;
+
+               vlan = tmp;
+
+               break;
+       }
+       rcu_read_unlock();
+
+       return vlan;
+}
+
+/**
+ * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
+ *  object
+ * @orig_node: the originator serving the VLAN
+ * @vid: the VLAN identifier
+ *
+ * Returns NULL in case of failure or the vlan object identified by vid and
+ * belonging to orig_node otherwise. The object is created and added to the list
+ * if it does not exist.
+ *
+ * The object is returned with refcounter increased by 1.
+ */
+struct batadv_orig_node_vlan *
+batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
+                         unsigned short vid)
+{
+       struct batadv_orig_node_vlan *vlan;
+
+       spin_lock_bh(&orig_node->vlan_list_lock);
+
+       /* first look if an object for this vid already exists */
+       vlan = batadv_orig_node_vlan_get(orig_node, vid);
+       if (vlan)
+               goto out;
+
+       vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
+       if (!vlan)
+               goto out;
+
+       atomic_set(&vlan->refcount, 2);
+       vlan->vid = vid;
+
+       list_add_rcu(&vlan->list, &orig_node->vlan_list);
+
+out:
+       spin_unlock_bh(&orig_node->vlan_list_lock);
+
+       return vlan;
+}
+
+/**
+ * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
+ *  the originator-vlan object
+ * @orig_vlan: the originator-vlan object to release
+ */
+void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
+{
+       if (atomic_dec_and_test(&orig_vlan->refcount))
+               kfree_rcu(orig_vlan, rcu);
+}
+
 int batadv_originator_init(struct batadv_priv *bat_priv)
 {
        if (bat_priv->orig_hash)
@@ -90,11 +172,20 @@ batadv_orig_node_get_router(struct batadv_orig_node *orig_node)
        return router;
 }
 
+/**
+ * batadv_neigh_node_new - create and init a new neigh_node object
+ * @hard_iface: the interface where the neighbour is connected to
+ * @neigh_addr: the mac address of the neighbour interface
+ * @orig_node: originator object representing the neighbour
+ *
+ * Allocates a new neigh_node object and initialises all the generic fields.
+ * Returns the new object or NULL on failure.
+ */
 struct batadv_neigh_node *
 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
-                     const uint8_t *neigh_addr)
+                     const uint8_t *neigh_addr,
+                     struct batadv_orig_node *orig_node)
 {
-       struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        struct batadv_neigh_node *neigh_node;
 
        neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
@@ -104,15 +195,14 @@ batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
        INIT_HLIST_NODE(&neigh_node->list);
 
        memcpy(neigh_node->addr, neigh_addr, ETH_ALEN);
-       spin_lock_init(&neigh_node->lq_update_lock);
+       neigh_node->if_incoming = hard_iface;
+       neigh_node->orig_node = orig_node;
+
+       INIT_LIST_HEAD(&neigh_node->bonding_list);
 
        /* extra reference for return */
        atomic_set(&neigh_node->refcount, 2);
 
-       batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                  "Creating new neighbor %pM on interface %s\n", neigh_addr,
-                  hard_iface->net_dev->name);
-
 out:
        return neigh_node;
 }
@@ -146,13 +236,15 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
        /* Free nc_nodes */
        batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
 
-       batadv_frag_list_free(&orig_node->frag_list);
-       batadv_tt_global_del_orig(orig_node->bat_priv, orig_node,
+       batadv_frag_purge_orig(orig_node, NULL);
+
+       batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
                                  "originator timed out");
 
+       if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
+               orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
+
        kfree(orig_node->tt_buff);
-       kfree(orig_node->bcast_own);
-       kfree(orig_node->bcast_own_sum);
        kfree(orig_node);
 }
 
@@ -210,20 +302,22 @@ void batadv_originator_free(struct batadv_priv *bat_priv)
        batadv_hash_destroy(hash);
 }
 
-/* this function finds or creates an originator entry for the given
- * address if it does not exits
+/**
+ * batadv_orig_node_new - creates a new orig_node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the originator
+ *
+ * Creates a new originator object and initialise all the generic fields.
+ * The new object is not added to the originator list.
+ * Returns the newly created object or NULL on failure.
  */
-struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
+struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
                                              const uint8_t *addr)
 {
        struct batadv_orig_node *orig_node;
-       int size;
-       int hash_added;
+       struct batadv_orig_node_vlan *vlan;
        unsigned long reset_time;
-
-       orig_node = batadv_orig_hash_find(bat_priv, addr);
-       if (orig_node)
-               return orig_node;
+       int i;
 
        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                   "Creating new originator: %pM\n", addr);
@@ -234,10 +328,12 @@ struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
 
        INIT_HLIST_HEAD(&orig_node->neigh_list);
        INIT_LIST_HEAD(&orig_node->bond_list);
-       spin_lock_init(&orig_node->ogm_cnt_lock);
+       INIT_LIST_HEAD(&orig_node->vlan_list);
        spin_lock_init(&orig_node->bcast_seqno_lock);
        spin_lock_init(&orig_node->neigh_list_lock);
        spin_lock_init(&orig_node->tt_buff_lock);
+       spin_lock_init(&orig_node->tt_lock);
+       spin_lock_init(&orig_node->vlan_list_lock);
 
        batadv_nc_init_orig(orig_node);
 
@@ -249,43 +345,32 @@ struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
        memcpy(orig_node->orig, addr, ETH_ALEN);
        batadv_dat_init_orig_node_addr(orig_node);
        orig_node->router = NULL;
-       orig_node->tt_crc = 0;
        atomic_set(&orig_node->last_ttvn, 0);
        orig_node->tt_buff = NULL;
        orig_node->tt_buff_len = 0;
-       atomic_set(&orig_node->tt_size, 0);
        reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
        orig_node->bcast_seqno_reset = reset_time;
        orig_node->batman_seqno_reset = reset_time;
 
        atomic_set(&orig_node->bond_candidates, 0);
 
-       size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS;
-
-       orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
-       if (!orig_node->bcast_own)
+       /* create a vlan object for the "untagged" LAN */
+       vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
+       if (!vlan)
                goto free_orig_node;
+       /* batadv_orig_node_vlan_new() increases the refcounter.
+        * Immediately release vlan since it is not needed anymore in this
+        * context
+        */
+       batadv_orig_node_vlan_free_ref(vlan);
 
-       size = bat_priv->num_ifaces * sizeof(uint8_t);
-       orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
-
-       INIT_LIST_HEAD(&orig_node->frag_list);
-       orig_node->last_frag_packet = 0;
-
-       if (!orig_node->bcast_own_sum)
-               goto free_bcast_own;
-
-       hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
-                                    batadv_choose_orig, orig_node,
-                                    &orig_node->hash_entry);
-       if (hash_added != 0)
-               goto free_bcast_own_sum;
+       for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
+               INIT_HLIST_HEAD(&orig_node->fragments[i].head);
+               spin_lock_init(&orig_node->fragments[i].lock);
+               orig_node->fragments[i].size = 0;
+       }
 
        return orig_node;
-free_bcast_own_sum:
-       kfree(orig_node->bcast_own_sum);
-free_bcast_own:
-       kfree(orig_node->bcast_own);
 free_orig_node:
        kfree(orig_node);
        return NULL;
@@ -294,15 +379,16 @@ free_orig_node:
 static bool
 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
                            struct batadv_orig_node *orig_node,
-                           struct batadv_neigh_node **best_neigh_node)
+                           struct batadv_neigh_node **best_neigh)
 {
+       struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
        struct hlist_node *node_tmp;
        struct batadv_neigh_node *neigh_node;
        bool neigh_purged = false;
        unsigned long last_seen;
        struct batadv_hard_iface *if_incoming;
 
-       *best_neigh_node = NULL;
+       *best_neigh = NULL;
 
        spin_lock_bh(&orig_node->neigh_list_lock);
 
@@ -335,9 +421,12 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
                        batadv_bonding_candidate_del(orig_node, neigh_node);
                        batadv_neigh_node_free_ref(neigh_node);
                } else {
-                       if ((!*best_neigh_node) ||
-                           (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
-                               *best_neigh_node = neigh_node;
+                       /* store the best_neighbour if this is the first
+                        * iteration or if a better neighbor has been found
+                        */
+                       if (!*best_neigh ||
+                           bao->bat_neigh_cmp(neigh_node, *best_neigh) > 0)
+                               *best_neigh = neigh_node;
                }
        }
 
@@ -388,17 +477,14 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv)
                hlist_for_each_entry_safe(orig_node, node_tmp,
                                          head, hash_entry) {
                        if (batadv_purge_orig_node(bat_priv, orig_node)) {
-                               if (orig_node->gw_flags)
-                                       batadv_gw_node_delete(bat_priv,
-                                                             orig_node);
+                               batadv_gw_node_delete(bat_priv, orig_node);
                                hlist_del_rcu(&orig_node->hash_entry);
                                batadv_orig_node_free_ref(orig_node);
                                continue;
                        }
 
-                       if (batadv_has_timed_out(orig_node->last_frag_packet,
-                                                BATADV_FRAG_TIMEOUT))
-                               batadv_frag_list_free(&orig_node->frag_list);
+                       batadv_frag_purge_orig(orig_node,
+                                              batadv_frag_check_entry);
                }
                spin_unlock_bh(list_lock);
        }
@@ -429,100 +515,26 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
-       struct batadv_hashtable *hash = bat_priv->orig_hash;
-       struct hlist_head *head;
        struct batadv_hard_iface *primary_if;
-       struct batadv_orig_node *orig_node;
-       struct batadv_neigh_node *neigh_node, *neigh_node_tmp;
-       int batman_count = 0;
-       int last_seen_secs;
-       int last_seen_msecs;
-       unsigned long last_seen_jiffies;
-       uint32_t i;
 
        primary_if = batadv_seq_print_text_primary_if_get(seq);
        if (!primary_if)
-               goto out;
+               return 0;
 
-       seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
+       seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
                   BATADV_SOURCE_VERSION, primary_if->net_dev->name,
-                  primary_if->net_dev->dev_addr, net_dev->name);
-       seq_printf(seq, "  %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
-                  "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
-                  "Nexthop", "outgoingIF", "Potential nexthops");
-
-       for (i = 0; i < hash->size; i++) {
-               head = &hash->table[i];
-
-               rcu_read_lock();
-               hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-                       neigh_node = batadv_orig_node_get_router(orig_node);
-                       if (!neigh_node)
-                               continue;
-
-                       if (neigh_node->tq_avg == 0)
-                               goto next;
-
-                       last_seen_jiffies = jiffies - orig_node->last_seen;
-                       last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
-                       last_seen_secs = last_seen_msecs / 1000;
-                       last_seen_msecs = last_seen_msecs % 1000;
-
-                       seq_printf(seq, "%pM %4i.%03is   (%3i) %pM [%10s]:",
-                                  orig_node->orig, last_seen_secs,
-                                  last_seen_msecs, neigh_node->tq_avg,
-                                  neigh_node->addr,
-                                  neigh_node->if_incoming->net_dev->name);
-
-                       hlist_for_each_entry_rcu(neigh_node_tmp,
-                                                &orig_node->neigh_list, list) {
-                               seq_printf(seq, " %pM (%3i)",
-                                          neigh_node_tmp->addr,
-                                          neigh_node_tmp->tq_avg);
-                       }
+                  primary_if->net_dev->dev_addr, net_dev->name,
+                  bat_priv->bat_algo_ops->name);
 
-                       seq_puts(seq, "\n");
-                       batman_count++;
+       batadv_hardif_free_ref(primary_if);
 
-next:
-                       batadv_neigh_node_free_ref(neigh_node);
-               }
-               rcu_read_unlock();
+       if (!bat_priv->bat_algo_ops->bat_orig_print) {
+               seq_puts(seq,
+                        "No printing function for this routing protocol\n");
+               return 0;
        }
 
-       if (batman_count == 0)
-               seq_puts(seq, "No batman nodes in range ...\n");
-
-out:
-       if (primary_if)
-               batadv_hardif_free_ref(primary_if);
-       return 0;
-}
-
-static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node,
-                                  int max_if_num)
-{
-       void *data_ptr;
-       size_t data_size, old_size;
-
-       data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS;
-       old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS;
-       data_ptr = kmalloc(data_size, GFP_ATOMIC);
-       if (!data_ptr)
-               return -ENOMEM;
-
-       memcpy(data_ptr, orig_node->bcast_own, old_size);
-       kfree(orig_node->bcast_own);
-       orig_node->bcast_own = data_ptr;
-
-       data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
-       if (!data_ptr)
-               return -ENOMEM;
-
-       memcpy(data_ptr, orig_node->bcast_own_sum,
-              (max_if_num - 1) * sizeof(uint8_t));
-       kfree(orig_node->bcast_own_sum);
-       orig_node->bcast_own_sum = data_ptr;
+       bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq);
 
        return 0;
 }
@@ -531,6 +543,7 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
                            int max_if_num)
 {
        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+       struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
        struct batadv_hashtable *hash = bat_priv->orig_hash;
        struct hlist_head *head;
        struct batadv_orig_node *orig_node;
@@ -545,10 +558,10 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
 
                rcu_read_lock();
                hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-                       spin_lock_bh(&orig_node->ogm_cnt_lock);
-                       ret = batadv_orig_node_add_if(orig_node, max_if_num);
-                       spin_unlock_bh(&orig_node->ogm_cnt_lock);
-
+                       ret = 0;
+                       if (bao->bat_orig_add_if)
+                               ret = bao->bat_orig_add_if(orig_node,
+                                                          max_if_num);
                        if (ret == -ENOMEM)
                                goto err;
                }
@@ -562,54 +575,6 @@ err:
        return -ENOMEM;
 }
 
-static int batadv_orig_node_del_if(struct batadv_orig_node *orig_node,
-                                  int max_if_num, int del_if_num)
-{
-       void *data_ptr = NULL;
-       int chunk_size;
-
-       /* last interface was removed */
-       if (max_if_num == 0)
-               goto free_bcast_own;
-
-       chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
-       data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
-       if (!data_ptr)
-               return -ENOMEM;
-
-       /* copy first part */
-       memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
-
-       /* copy second part */
-       memcpy((char *)data_ptr + del_if_num * chunk_size,
-              orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
-              (max_if_num - del_if_num) * chunk_size);
-
-free_bcast_own:
-       kfree(orig_node->bcast_own);
-       orig_node->bcast_own = data_ptr;
-
-       if (max_if_num == 0)
-               goto free_own_sum;
-
-       data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
-       if (!data_ptr)
-               return -ENOMEM;
-
-       memcpy(data_ptr, orig_node->bcast_own_sum,
-              del_if_num * sizeof(uint8_t));
-
-       memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
-              orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
-              (max_if_num - del_if_num) * sizeof(uint8_t));
-
-free_own_sum:
-       kfree(orig_node->bcast_own_sum);
-       orig_node->bcast_own_sum = data_ptr;
-
-       return 0;
-}
-
 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
                            int max_if_num)
 {
@@ -618,6 +583,7 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
        struct hlist_head *head;
        struct batadv_hard_iface *hard_iface_tmp;
        struct batadv_orig_node *orig_node;
+       struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
        uint32_t i;
        int ret;
 
@@ -629,11 +595,11 @@ int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
 
                rcu_read_lock();
                hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-                       spin_lock_bh(&orig_node->ogm_cnt_lock);
-                       ret = batadv_orig_node_del_if(orig_node, max_if_num,
-                                                     hard_iface->if_num);
-                       spin_unlock_bh(&orig_node->ogm_cnt_lock);
-
+                       ret = 0;
+                       if (bao->bat_orig_del_if)
+                               ret = bao->bat_orig_del_if(orig_node,
+                                                          max_if_num,
+                                                          hard_iface->if_num);
                        if (ret == -ENOMEM)
                                goto err;
                }
index 7887b84a9af43adbff91cb8e3695b7f29c36a399..6f77d808a91672d77a48426cd5d391abf366bfb4 100644 (file)
 
 #include "hash.h"
 
+int batadv_compare_orig(const struct hlist_node *node, const void *data2);
 int batadv_originator_init(struct batadv_priv *bat_priv);
 void batadv_originator_free(struct batadv_priv *bat_priv);
 void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
 void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
-struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
+struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
                                              const uint8_t *addr);
 struct batadv_neigh_node *
 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
-                     const uint8_t *neigh_addr);
+                     const uint8_t *neigh_addr,
+                     struct batadv_orig_node *orig_node);
 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node);
 struct batadv_neigh_node *
 batadv_orig_node_get_router(struct batadv_orig_node *orig_node);
@@ -40,6 +42,13 @@ int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
                            int max_if_num);
 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
                            int max_if_num);
+struct batadv_orig_node_vlan *
+batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
+                         unsigned short vid);
+struct batadv_orig_node_vlan *
+batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
+                         unsigned short vid);
+void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan);
 
 
 /* hashfunction to choose an entry in a hash table of given size
index a51ccfc39da4144fa2ccee27b0e0afca741034dd..207459b62966d0975bca70894fcf5ce266a5da29 100644 (file)
 #ifndef _NET_BATMAN_ADV_PACKET_H_
 #define _NET_BATMAN_ADV_PACKET_H_
 
+/**
+ * enum batadv_packettype - types for batman-adv encapsulated packets
+ * @BATADV_IV_OGM: originator messages for B.A.T.M.A.N. IV
+ * @BATADV_BCAST: broadcast packets carrying broadcast payload
+ * @BATADV_CODED: network coded packets
+ *
+ * @BATADV_UNICAST: unicast packets carrying unicast payload traffic
+ * @BATADV_UNICAST_FRAG: unicast packets carrying a fragment of the original
+ *     payload packet
+ * @BATADV_UNICAST_4ADDR: unicast packet including the originator address of
+ *     the sender
+ * @BATADV_ICMP: unicast packet like IP ICMP used for ping or traceroute
+ * @BATADV_UNICAST_TVLV: unicast packet carrying TVLV containers
+ */
 enum batadv_packettype {
-       BATADV_IV_OGM           = 0x01,
-       BATADV_ICMP             = 0x02,
-       BATADV_UNICAST          = 0x03,
-       BATADV_BCAST            = 0x04,
-       BATADV_VIS              = 0x05,
-       BATADV_UNICAST_FRAG     = 0x06,
-       BATADV_TT_QUERY         = 0x07,
-       BATADV_ROAM_ADV         = 0x08,
-       BATADV_UNICAST_4ADDR    = 0x09,
-       BATADV_CODED            = 0x0a,
+       /* 0x00 - 0x3f: local packets or special rules for handling */
+       BATADV_IV_OGM           = 0x00,
+       BATADV_BCAST            = 0x01,
+       BATADV_CODED            = 0x02,
+       /* 0x40 - 0x7f: unicast */
+#define BATADV_UNICAST_MIN     0x40
+       BATADV_UNICAST          = 0x40,
+       BATADV_UNICAST_FRAG     = 0x41,
+       BATADV_UNICAST_4ADDR    = 0x42,
+       BATADV_ICMP             = 0x43,
+       BATADV_UNICAST_TVLV     = 0x44,
+#define BATADV_UNICAST_MAX     0x7f
+       /* 0x80 - 0xff: reserved */
 };
 
 /**
@@ -48,13 +65,21 @@ enum batadv_subtype {
 };
 
 /* this file is included by batctl which needs these defines */
-#define BATADV_COMPAT_VERSION 14
+#define BATADV_COMPAT_VERSION 15
 
+/**
+ * enum batadv_iv_flags - flags used in B.A.T.M.A.N. IV OGM packets
+ * @BATADV_NOT_BEST_NEXT_HOP: flag is set when ogm packet is forwarded and was
+ *     previously received from someone else than the best neighbor.
+ * @BATADV_PRIMARIES_FIRST_HOP: flag is set when the primary interface address
+ *     is used, and the packet travels its first hop.
+ * @BATADV_DIRECTLINK: flag is for the first hop or if rebroadcasted from a
+ *     one hop neighbor on the interface where it was originally received.
+ */
 enum batadv_iv_flags {
-       BATADV_NOT_BEST_NEXT_HOP   = BIT(3),
-       BATADV_PRIMARIES_FIRST_HOP = BIT(4),
-       BATADV_VIS_SERVER          = BIT(5),
-       BATADV_DIRECTLINK          = BIT(6),
+       BATADV_NOT_BEST_NEXT_HOP   = BIT(0),
+       BATADV_PRIMARIES_FIRST_HOP = BIT(1),
+       BATADV_DIRECTLINK          = BIT(2),
 };
 
 /* ICMP message types */
@@ -66,43 +91,44 @@ enum batadv_icmp_packettype {
        BATADV_PARAMETER_PROBLEM       = 12,
 };
 
-/* vis defines */
-enum batadv_vis_packettype {
-       BATADV_VIS_TYPE_SERVER_SYNC   = 0,
-       BATADV_VIS_TYPE_CLIENT_UPDATE = 1,
-};
-
-/* fragmentation defines */
-enum batadv_unicast_frag_flags {
-       BATADV_UNI_FRAG_HEAD      = BIT(0),
-       BATADV_UNI_FRAG_LARGETAIL = BIT(1),
-};
+/* tt data subtypes */
+#define BATADV_TT_DATA_TYPE_MASK 0x0F
 
-/* TT_QUERY subtypes */
-#define BATADV_TT_QUERY_TYPE_MASK 0x3
-
-enum batadv_tt_query_packettype {
-       BATADV_TT_REQUEST  = 0,
-       BATADV_TT_RESPONSE = 1,
-};
-
-/* TT_QUERY flags */
-enum batadv_tt_query_flags {
-       BATADV_TT_FULL_TABLE = BIT(2),
+/**
+ * enum batadv_tt_data_flags - flags for tt data tvlv
+ * @BATADV_TT_OGM_DIFF: TT diff propagated through OGM
+ * @BATADV_TT_REQUEST: TT request message
+ * @BATADV_TT_RESPONSE: TT response message
+ * @BATADV_TT_FULL_TABLE: contains full table to replace existing table
+ */
+enum batadv_tt_data_flags {
+       BATADV_TT_OGM_DIFF   = BIT(0),
+       BATADV_TT_REQUEST    = BIT(1),
+       BATADV_TT_RESPONSE   = BIT(2),
+       BATADV_TT_FULL_TABLE = BIT(4),
 };
 
 /* BATADV_TT_CLIENT flags.
  * Flags from BIT(0) to BIT(7) are sent on the wire, while flags from BIT(8) to
- * BIT(15) are used for local computation only
+ * BIT(15) are used for local computation only.
+ * Flags from BIT(4) to BIT(7) are kept in sync with the rest of the network.
  */
 enum batadv_tt_client_flags {
        BATADV_TT_CLIENT_DEL     = BIT(0),
        BATADV_TT_CLIENT_ROAM    = BIT(1),
-       BATADV_TT_CLIENT_WIFI    = BIT(2),
-       BATADV_TT_CLIENT_TEMP    = BIT(3),
+       BATADV_TT_CLIENT_WIFI    = BIT(4),
        BATADV_TT_CLIENT_NOPURGE = BIT(8),
        BATADV_TT_CLIENT_NEW     = BIT(9),
        BATADV_TT_CLIENT_PENDING = BIT(10),
+       BATADV_TT_CLIENT_TEMP    = BIT(11),
+};
+
+/**
+ * batadv_vlan_flags - flags for the four MSB of any vlan ID field
+ * @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not
+ */
+enum batadv_vlan_flags {
+       BATADV_VLAN_HAS_TAG     = BIT(15),
 };
 
 /* claim frame types for the bridge loop avoidance */
@@ -113,6 +139,22 @@ enum batadv_bla_claimframe {
        BATADV_CLAIM_TYPE_REQUEST       = 0x03,
 };
 
+/**
+ * enum batadv_tvlv_type - tvlv type definitions
+ * @BATADV_TVLV_GW: gateway tvlv
+ * @BATADV_TVLV_DAT: distributed arp table tvlv
+ * @BATADV_TVLV_NC: network coding tvlv
+ * @BATADV_TVLV_TT: translation table tvlv
+ * @BATADV_TVLV_ROAM: roaming advertisement tvlv
+ */
+enum batadv_tvlv_type {
+       BATADV_TVLV_GW          = 0x01,
+       BATADV_TVLV_DAT         = 0x02,
+       BATADV_TVLV_NC          = 0x03,
+       BATADV_TVLV_TT          = 0x04,
+       BATADV_TVLV_ROAM        = 0x05,
+};
+
 /* the destination hardware field in the ARP frame is used to
  * transport the claim type and the group id
  */
@@ -131,47 +173,74 @@ struct batadv_header {
         */
 };
 
+/**
+ * struct batadv_ogm_packet - ogm (routing protocol) packet
+ * @header: common batman packet header
+ * @flags: contains routing relevant flags - see enum batadv_iv_flags
+ * @tvlv_len: length of tvlv data following the ogm header
+ */
 struct batadv_ogm_packet {
        struct batadv_header header;
-       uint8_t  flags;    /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
+       uint8_t  flags;
        __be32   seqno;
        uint8_t  orig[ETH_ALEN];
        uint8_t  prev_sender[ETH_ALEN];
-       uint8_t  gw_flags;  /* flags related to gateway class */
+       uint8_t  reserved;
        uint8_t  tq;
-       uint8_t  tt_num_changes;
-       uint8_t  ttvn; /* translation table version number */
-       __be16   tt_crc;
-} __packed;
+       __be16   tvlv_len;
+       /* __packed is not needed as the struct size is divisible by 4,
+        * and the largest data type in this struct has a size of 4.
+        */
+};
 
 #define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
 
-struct batadv_icmp_packet {
+/**
+ * batadv_icmp_header - common ICMP header
+ * @header: common batman header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
+ */
+struct batadv_icmp_header {
        struct batadv_header header;
        uint8_t  msg_type; /* see ICMP message types above */
        uint8_t  dst[ETH_ALEN];
        uint8_t  orig[ETH_ALEN];
-       __be16   seqno;
        uint8_t  uid;
+};
+
+/**
+ * batadv_icmp_packet - ICMP packet
+ * @icmph: common ICMP header
+ * @reserved: not used - useful for alignment
+ * @seqno: ICMP sequence number
+ */
+struct batadv_icmp_packet {
+       struct batadv_icmp_header icmph;
        uint8_t  reserved;
+       __be16   seqno;
 };
 
 #define BATADV_RR_LEN 16
 
-/* icmp_packet_rr must start with all fields from imcp_packet
- * as this is assumed by code that handles ICMP packets
+/**
+ * batadv_icmp_packet_rr - ICMP RouteRecord packet
+ * @icmph: common ICMP header
+ * @rr_cur: number of entries the rr array
+ * @seqno: ICMP sequence number
+ * @rr: route record array
  */
 struct batadv_icmp_packet_rr {
-       struct batadv_header header;
-       uint8_t  msg_type; /* see ICMP message types above */
-       uint8_t  dst[ETH_ALEN];
-       uint8_t  orig[ETH_ALEN];
-       __be16   seqno;
-       uint8_t  uid;
+       struct batadv_icmp_header icmph;
        uint8_t  rr_cur;
+       __be16   seqno;
        uint8_t  rr[BATADV_RR_LEN][ETH_ALEN];
 };
 
+#define BATADV_ICMP_MAX_PACKET_SIZE    sizeof(struct batadv_icmp_packet_rr)
+
 /* All packet headers in front of an ethernet header have to be completely
  * divisible by 2 but not by 4 to make the payload after the ethernet
  * header again 4 bytes boundary aligned.
@@ -209,15 +278,32 @@ struct batadv_unicast_4addr_packet {
         */
 };
 
-struct batadv_unicast_frag_packet {
-       struct batadv_header header;
-       uint8_t  ttvn; /* destination translation table version number */
-       uint8_t  dest[ETH_ALEN];
-       uint8_t  flags;
-       uint8_t  align;
-       uint8_t  orig[ETH_ALEN];
-       __be16   seqno;
-} __packed;
+/**
+ * struct batadv_frag_packet - fragmented packet
+ * @header: common batman packet header with type, compatversion, and ttl
+ * @dest: final destination used when routing fragments
+ * @orig: originator of the fragment used when merging the packet
+ * @no: fragment number within this sequence
+ * @reserved: reserved byte for alignment
+ * @seqno: sequence identification
+ * @total_size: size of the merged packet
+ */
+struct batadv_frag_packet {
+       struct  batadv_header header;
+#if defined(__BIG_ENDIAN_BITFIELD)
+       uint8_t no:4;
+       uint8_t reserved:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       uint8_t reserved:4;
+       uint8_t no:4;
+#else
+#error "unknown bitfield endianess"
+#endif
+       uint8_t dest[ETH_ALEN];
+       uint8_t orig[ETH_ALEN];
+       __be16  seqno;
+       __be16  total_size;
+};
 
 struct batadv_bcast_packet {
        struct batadv_header header;
@@ -231,54 +317,6 @@ struct batadv_bcast_packet {
 
 #pragma pack()
 
-struct batadv_vis_packet {
-       struct batadv_header header;
-       uint8_t  vis_type;       /* which type of vis-participant sent this? */
-       __be32   seqno;          /* sequence number */
-       uint8_t  entries;        /* number of entries behind this struct */
-       uint8_t  reserved;
-       uint8_t  vis_orig[ETH_ALEN];    /* originator reporting its neighbors */
-       uint8_t  target_orig[ETH_ALEN]; /* who should receive this packet */
-       uint8_t  sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */
-};
-
-struct batadv_tt_query_packet {
-       struct batadv_header header;
-       /* the flag field is a combination of:
-        * - TT_REQUEST or TT_RESPONSE
-        * - TT_FULL_TABLE
-        */
-       uint8_t  flags;
-       uint8_t  dst[ETH_ALEN];
-       uint8_t  src[ETH_ALEN];
-       /* the ttvn field is:
-        * if TT_REQUEST: ttvn that triggered the
-        *                request
-        * if TT_RESPONSE: new ttvn for the src
-        *                 orig_node
-        */
-       uint8_t  ttvn;
-       /* tt_data field is:
-        * if TT_REQUEST: crc associated with the
-        *                ttvn
-        * if TT_RESPONSE: table_size
-        */
-       __be16 tt_data;
-} __packed;
-
-struct batadv_roam_adv_packet {
-       struct batadv_header header;
-       uint8_t  reserved;
-       uint8_t  dst[ETH_ALEN];
-       uint8_t  src[ETH_ALEN];
-       uint8_t  client[ETH_ALEN];
-} __packed;
-
-struct batadv_tt_change {
-       uint8_t flags;
-       uint8_t addr[ETH_ALEN];
-} __packed;
-
 /**
  * struct batadv_coded_packet - network coded packet
  * @header: common batman packet header and ttl of first included packet
@@ -311,4 +349,96 @@ struct batadv_coded_packet {
        __be16   coded_len;
 };
 
+/**
+ * struct batadv_unicast_tvlv - generic unicast packet with tvlv payload
+ * @header: common batman packet header
+ * @reserved: reserved field (for packet alignment)
+ * @src: address of the source
+ * @dst: address of the destination
+ * @tvlv_len: length of tvlv data following the unicast tvlv header
+ * @align: 2 bytes to align the header to a 4 byte boundry
+ */
+struct batadv_unicast_tvlv_packet {
+       struct batadv_header header;
+       uint8_t  reserved;
+       uint8_t  dst[ETH_ALEN];
+       uint8_t  src[ETH_ALEN];
+       __be16   tvlv_len;
+       uint16_t align;
+};
+
+/**
+ * struct batadv_tvlv_hdr - base tvlv header struct
+ * @type: tvlv container type (see batadv_tvlv_type)
+ * @version: tvlv container version
+ * @len: tvlv container length
+ */
+struct batadv_tvlv_hdr {
+       uint8_t type;
+       uint8_t version;
+       __be16  len;
+};
+
+/**
+ * struct batadv_tvlv_gateway_data - gateway data propagated through gw tvlv
+ *  container
+ * @bandwidth_down: advertised uplink download bandwidth
+ * @bandwidth_up: advertised uplink upload bandwidth
+ */
+struct batadv_tvlv_gateway_data {
+       __be32 bandwidth_down;
+       __be32 bandwidth_up;
+};
+
+/**
+ * struct batadv_tvlv_tt_data - tt data propagated through the tt tvlv container
+ * @flags: translation table flags (see batadv_tt_data_flags)
+ * @ttvn: translation table version number
+ * @vlan_num: number of announced VLANs. In the TVLV this struct is followed by
+ *  one batadv_tvlv_tt_vlan_data object per announced vlan
+ */
+struct batadv_tvlv_tt_data {
+       uint8_t flags;
+       uint8_t ttvn;
+       __be16  num_vlan;
+};
+
+/**
+ * struct batadv_tvlv_tt_vlan_data - vlan specific tt data propagated through
+ *  the tt tvlv container
+ * @crc: crc32 checksum of the entries belonging to this vlan
+ * @vid: vlan identifier
+ * @reserved: unused, useful for alignment purposes
+ */
+struct batadv_tvlv_tt_vlan_data {
+       __be32  crc;
+       __be16  vid;
+       uint16_t reserved;
+};
+
+/**
+ * struct batadv_tvlv_tt_change - translation table diff data
+ * @flags: status indicators concerning the non-mesh client (see
+ *  batadv_tt_client_flags)
+ * @reserved: reserved field
+ * @addr: mac address of non-mesh client that triggered this tt change
+ * @vid: VLAN identifier
+ */
+struct batadv_tvlv_tt_change {
+       uint8_t flags;
+       uint8_t reserved;
+       uint8_t addr[ETH_ALEN];
+       __be16 vid;
+};
+
+/**
+ * struct batadv_tvlv_roam_adv - roaming advertisement
+ * @client: mac address of roaming client
+ * @vid: VLAN identifier
+ */
+struct batadv_tvlv_roam_adv {
+       uint8_t  client[ETH_ALEN];
+       __be16 vid;
+};
+
 #endif /* _NET_BATMAN_ADV_PACKET_H_ */
index 0439395d7ba5f3f11eea51ebc53e9760ff175111..d4114d775ad61f659b59437c67aa7054d9a88f92 100644 (file)
 #include "icmp_socket.h"
 #include "translation-table.h"
 #include "originator.h"
-#include "vis.h"
-#include "unicast.h"
 #include "bridge_loop_avoidance.h"
 #include "distributed-arp-table.h"
 #include "network-coding.h"
+#include "fragmentation.h"
+
+#include <linux/if_vlan.h>
 
 static int batadv_route_unicast_packet(struct sk_buff *skb,
                                       struct batadv_hard_iface *recv_if);
@@ -46,7 +47,7 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
        if ((curr_router) && (!neigh_node)) {
                batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
                           "Deleting route towards: %pM\n", orig_node->orig);
-               batadv_tt_global_del_orig(bat_priv, orig_node,
+               batadv_tt_global_del_orig(bat_priv, orig_node, -1,
                                          "Deleted route towards originator");
 
        /* route added */
@@ -114,9 +115,19 @@ out:
        return;
 }
 
-void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
+/**
+ * batadv_bonding_candidate_add - consider a new link for bonding mode towards
+ *  the given originator
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: the target node
+ * @neigh_node: the neighbor representing the new link to consider for bonding
+ *  mode
+ */
+void batadv_bonding_candidate_add(struct batadv_priv *bat_priv,
+                                 struct batadv_orig_node *orig_node,
                                  struct batadv_neigh_node *neigh_node)
 {
+       struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
        struct batadv_neigh_node *tmp_neigh_node, *router = NULL;
        uint8_t interference_candidate = 0;
 
@@ -131,8 +142,9 @@ void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
        if (!router)
                goto candidate_del;
 
+
        /* ... and is good enough to be considered */
-       if (neigh_node->tq_avg < router->tq_avg - BATADV_BONDING_TQ_THRESHOLD)
+       if (bao->bat_neigh_is_equiv_or_better(neigh_node, router))
                goto candidate_del;
 
        /* check if we have another candidate with the same mac address or
@@ -248,46 +260,65 @@ bool batadv_check_management_packet(struct sk_buff *skb,
        return true;
 }
 
+/**
+ * batadv_recv_my_icmp_packet - receive an icmp packet locally
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: icmp packet to process
+ *
+ * Returns NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * otherwise.
+ */
 static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
-                                     struct sk_buff *skb, size_t icmp_len)
+                                     struct sk_buff *skb)
 {
        struct batadv_hard_iface *primary_if = NULL;
        struct batadv_orig_node *orig_node = NULL;
-       struct batadv_icmp_packet_rr *icmp_packet;
-       int ret = NET_RX_DROP;
+       struct batadv_icmp_header *icmph;
+       int res, ret = NET_RX_DROP;
 
-       icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
+       icmph = (struct batadv_icmp_header *)skb->data;
 
-       /* add data to device queue */
-       if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
-               batadv_socket_receive_packet(icmp_packet, icmp_len);
-               goto out;
-       }
+       switch (icmph->msg_type) {
+       case BATADV_ECHO_REPLY:
+       case BATADV_DESTINATION_UNREACHABLE:
+       case BATADV_TTL_EXCEEDED:
+               /* receive the packet */
+               if (skb_linearize(skb) < 0)
+                       break;
 
-       primary_if = batadv_primary_if_get_selected(bat_priv);
-       if (!primary_if)
-               goto out;
+               batadv_socket_receive_packet(icmph, skb->len);
+               break;
+       case BATADV_ECHO_REQUEST:
+               /* answer echo request (ping) */
+               primary_if = batadv_primary_if_get_selected(bat_priv);
+               if (!primary_if)
+                       goto out;
 
-       /* answer echo request (ping) */
-       /* get routing information */
-       orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
-       if (!orig_node)
-               goto out;
+               /* get routing information */
+               orig_node = batadv_orig_hash_find(bat_priv, icmph->orig);
+               if (!orig_node)
+                       goto out;
 
-       /* create a copy of the skb, if needed, to modify it. */
-       if (skb_cow(skb, ETH_HLEN) < 0)
-               goto out;
+               /* create a copy of the skb, if needed, to modify it. */
+               if (skb_cow(skb, ETH_HLEN) < 0)
+                       goto out;
 
-       icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
+               icmph = (struct batadv_icmp_header *)skb->data;
 
-       memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
-       memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
-       icmp_packet->msg_type = BATADV_ECHO_REPLY;
-       icmp_packet->header.ttl = BATADV_TTL;
+               memcpy(icmph->dst, icmph->orig, ETH_ALEN);
+               memcpy(icmph->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
+               icmph->msg_type = BATADV_ECHO_REPLY;
+               icmph->header.ttl = BATADV_TTL;
 
-       if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
-               ret = NET_RX_SUCCESS;
+               res = batadv_send_skb_to_orig(skb, orig_node, NULL);
+               if (res != NET_XMIT_DROP)
+                       ret = NET_RX_SUCCESS;
 
+               break;
+       default:
+               /* drop unknown type */
+               goto out;
+       }
 out:
        if (primary_if)
                batadv_hardif_free_ref(primary_if);
@@ -307,9 +338,9 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
        icmp_packet = (struct batadv_icmp_packet *)skb->data;
 
        /* send TTL exceeded if packet is an echo request (traceroute) */
-       if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
+       if (icmp_packet->icmph.msg_type != BATADV_ECHO_REQUEST) {
                pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
-                        icmp_packet->orig, icmp_packet->dst);
+                        icmp_packet->icmph.orig, icmp_packet->icmph.dst);
                goto out;
        }
 
@@ -318,7 +349,7 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
                goto out;
 
        /* get routing information */
-       orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
+       orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->icmph.orig);
        if (!orig_node)
                goto out;
 
@@ -328,10 +359,11 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
 
        icmp_packet = (struct batadv_icmp_packet *)skb->data;
 
-       memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
-       memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
-       icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
-       icmp_packet->header.ttl = BATADV_TTL;
+       memcpy(icmp_packet->icmph.dst, icmp_packet->icmph.orig, ETH_ALEN);
+       memcpy(icmp_packet->icmph.orig, primary_if->net_dev->dev_addr,
+              ETH_ALEN);
+       icmp_packet->icmph.msg_type = BATADV_TTL_EXCEEDED;
+       icmp_packet->icmph.header.ttl = BATADV_TTL;
 
        if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
                ret = NET_RX_SUCCESS;
@@ -349,16 +381,13 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
                            struct batadv_hard_iface *recv_if)
 {
        struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-       struct batadv_icmp_packet_rr *icmp_packet;
+       struct batadv_icmp_header *icmph;
+       struct batadv_icmp_packet_rr *icmp_packet_rr;
        struct ethhdr *ethhdr;
        struct batadv_orig_node *orig_node = NULL;
-       int hdr_size = sizeof(struct batadv_icmp_packet);
+       int hdr_size = sizeof(struct batadv_icmp_header);
        int ret = NET_RX_DROP;
 
-       /* we truncate all incoming icmp packets if they don't match our size */
-       if (skb->len >= sizeof(struct batadv_icmp_packet_rr))
-               hdr_size = sizeof(struct batadv_icmp_packet_rr);
-
        /* drop packet if it has not necessary minimum size */
        if (unlikely(!pskb_may_pull(skb, hdr_size)))
                goto out;
@@ -377,26 +406,39 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
        if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
                goto out;
 
-       icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
+       icmph = (struct batadv_icmp_header *)skb->data;
 
        /* add record route information if not full */
-       if ((hdr_size == sizeof(struct batadv_icmp_packet_rr)) &&
-           (icmp_packet->rr_cur < BATADV_RR_LEN)) {
-               memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
+       if ((icmph->msg_type == BATADV_ECHO_REPLY ||
+            icmph->msg_type == BATADV_ECHO_REQUEST) &&
+           (skb->len >= sizeof(struct batadv_icmp_packet_rr))) {
+               if (skb_linearize(skb) < 0)
+                       goto out;
+
+               /* create a copy of the skb, if needed, to modify it. */
+               if (skb_cow(skb, ETH_HLEN) < 0)
+                       goto out;
+
+               icmph = (struct batadv_icmp_header *)skb->data;
+               icmp_packet_rr = (struct batadv_icmp_packet_rr *)icmph;
+               if (icmp_packet_rr->rr_cur >= BATADV_RR_LEN)
+                       goto out;
+
+               memcpy(&(icmp_packet_rr->rr[icmp_packet_rr->rr_cur]),
                       ethhdr->h_dest, ETH_ALEN);
-               icmp_packet->rr_cur++;
+               icmp_packet_rr->rr_cur++;
        }
 
        /* packet for me */
-       if (batadv_is_my_mac(bat_priv, icmp_packet->dst))
-               return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size);
+       if (batadv_is_my_mac(bat_priv, icmph->dst))
+               return batadv_recv_my_icmp_packet(bat_priv, skb);
 
        /* TTL exceeded */
-       if (icmp_packet->header.ttl < 2)
+       if (icmph->header.ttl < 2)
                return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
 
        /* get routing information */
-       orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->dst);
+       orig_node = batadv_orig_hash_find(bat_priv, icmph->dst);
        if (!orig_node)
                goto out;
 
@@ -404,10 +446,10 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
        if (skb_cow(skb, ETH_HLEN) < 0)
                goto out;
 
-       icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
+       icmph = (struct batadv_icmp_header *)skb->data;
 
        /* decrement ttl */
-       icmp_packet->header.ttl--;
+       icmph->header.ttl--;
 
        /* route it */
        if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP)
@@ -474,18 +516,25 @@ out:
        return router;
 }
 
-/* Interface Alternating: Use the best of the
- * remaining candidates which are not using
- * this interface.
+/**
+ * batadv_find_ifalter_router - find the best of the remaining candidates which
+ *  are not using this interface
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_orig: the destination
+ * @recv_if: the interface that the router returned by this function has to not
+ *  use
  *
- * Increases the returned router's refcount
+ * Returns the best candidate towards primary_orig that is not using recv_if.
+ * Increases the returned neighbor's refcount
  */
 static struct batadv_neigh_node *
-batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
+batadv_find_ifalter_router(struct batadv_priv *bat_priv,
+                          struct batadv_orig_node *primary_orig,
                           const struct batadv_hard_iface *recv_if)
 {
-       struct batadv_neigh_node *tmp_neigh_node;
        struct batadv_neigh_node *router = NULL, *first_candidate = NULL;
+       struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
+       struct batadv_neigh_node *tmp_neigh_node;
 
        rcu_read_lock();
        list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
@@ -497,7 +546,7 @@ batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
                if (tmp_neigh_node->if_incoming == recv_if)
                        continue;
 
-               if (router && tmp_neigh_node->tq_avg <= router->tq_avg)
+               if (router && bao->bat_neigh_cmp(tmp_neigh_node, router))
                        continue;
 
                if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
@@ -557,126 +606,6 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
        return 0;
 }
 
-int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
-{
-       struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-       struct batadv_tt_query_packet *tt_query;
-       uint16_t tt_size;
-       int hdr_size = sizeof(*tt_query);
-       char tt_flag;
-       size_t packet_size;
-
-       if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
-               return NET_RX_DROP;
-
-       /* I could need to modify it */
-       if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
-               goto out;
-
-       tt_query = (struct batadv_tt_query_packet *)skb->data;
-
-       switch (tt_query->flags & BATADV_TT_QUERY_TYPE_MASK) {
-       case BATADV_TT_REQUEST:
-               batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_RX);
-
-               /* If we cannot provide an answer the tt_request is
-                * forwarded
-                */
-               if (!batadv_send_tt_response(bat_priv, tt_query)) {
-                       if (tt_query->flags & BATADV_TT_FULL_TABLE)
-                               tt_flag = 'F';
-                       else
-                               tt_flag = '.';
-
-                       batadv_dbg(BATADV_DBG_TT, bat_priv,
-                                  "Routing TT_REQUEST to %pM [%c]\n",
-                                  tt_query->dst,
-                                  tt_flag);
-                       return batadv_route_unicast_packet(skb, recv_if);
-               }
-               break;
-       case BATADV_TT_RESPONSE:
-               batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX);
-
-               if (batadv_is_my_mac(bat_priv, tt_query->dst)) {
-                       /* packet needs to be linearized to access the TT
-                        * changes
-                        */
-                       if (skb_linearize(skb) < 0)
-                               goto out;
-                       /* skb_linearize() possibly changed skb->data */
-                       tt_query = (struct batadv_tt_query_packet *)skb->data;
-
-                       tt_size = batadv_tt_len(ntohs(tt_query->tt_data));
-
-                       /* Ensure we have all the claimed data */
-                       packet_size = sizeof(struct batadv_tt_query_packet);
-                       packet_size += tt_size;
-                       if (unlikely(skb_headlen(skb) < packet_size))
-                               goto out;
-
-                       batadv_handle_tt_response(bat_priv, tt_query);
-               } else {
-                       if (tt_query->flags & BATADV_TT_FULL_TABLE)
-                               tt_flag =  'F';
-                       else
-                               tt_flag = '.';
-                       batadv_dbg(BATADV_DBG_TT, bat_priv,
-                                  "Routing TT_RESPONSE to %pM [%c]\n",
-                                  tt_query->dst,
-                                  tt_flag);
-                       return batadv_route_unicast_packet(skb, recv_if);
-               }
-               break;
-       }
-
-out:
-       /* returning NET_RX_DROP will make the caller function kfree the skb */
-       return NET_RX_DROP;
-}
-
-int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
-{
-       struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-       struct batadv_roam_adv_packet *roam_adv_packet;
-       struct batadv_orig_node *orig_node;
-
-       if (batadv_check_unicast_packet(bat_priv, skb,
-                                       sizeof(*roam_adv_packet)) < 0)
-               goto out;
-
-       batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
-
-       roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data;
-
-       if (!batadv_is_my_mac(bat_priv, roam_adv_packet->dst))
-               return batadv_route_unicast_packet(skb, recv_if);
-
-       /* check if it is a backbone gateway. we don't accept
-        * roaming advertisement from it, as it has the same
-        * entries as we have.
-        */
-       if (batadv_bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
-               goto out;
-
-       orig_node = batadv_orig_hash_find(bat_priv, roam_adv_packet->src);
-       if (!orig_node)
-               goto out;
-
-       batadv_dbg(BATADV_DBG_TT, bat_priv,
-                  "Received ROAMING_ADV from %pM (client %pM)\n",
-                  roam_adv_packet->src, roam_adv_packet->client);
-
-       batadv_tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
-                            BATADV_TT_CLIENT_ROAM,
-                            atomic_read(&orig_node->last_ttvn) + 1);
-
-       batadv_orig_node_free_ref(orig_node);
-out:
-       /* returning NET_RX_DROP will make the caller function kfree the skb */
-       return NET_RX_DROP;
-}
-
 /* find a suitable router for this originator, and use
  * bonding if possible. increases the found neighbors
  * refcount.
@@ -751,7 +680,8 @@ batadv_find_router(struct batadv_priv *bat_priv,
        if (bonding_enabled)
                router = batadv_find_bond_router(primary_orig_node, recv_if);
        else
-               router = batadv_find_ifalter_router(primary_orig_node, recv_if);
+               router = batadv_find_ifalter_router(bat_priv, primary_orig_node,
+                                                   recv_if);
 
 return_router:
        if (router && router->if_incoming->if_status != BATADV_IF_ACTIVE)
@@ -772,11 +702,9 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
 {
        struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
        struct batadv_orig_node *orig_node = NULL;
-       struct batadv_neigh_node *neigh_node = NULL;
        struct batadv_unicast_packet *unicast_packet;
        struct ethhdr *ethhdr = eth_hdr(skb);
        int res, hdr_len, ret = NET_RX_DROP;
-       struct sk_buff *new_skb;
 
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
 
@@ -793,46 +721,12 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
        if (!orig_node)
                goto out;
 
-       /* find_router() increases neigh_nodes refcount if found. */
-       neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
-
-       if (!neigh_node)
-               goto out;
-
        /* create a copy of the skb, if needed, to modify it. */
        if (skb_cow(skb, ETH_HLEN) < 0)
                goto out;
 
-       unicast_packet = (struct batadv_unicast_packet *)skb->data;
-
-       if (unicast_packet->header.packet_type == BATADV_UNICAST &&
-           atomic_read(&bat_priv->fragmentation) &&
-           skb->len > neigh_node->if_incoming->net_dev->mtu) {
-               ret = batadv_frag_send_skb(skb, bat_priv,
-                                          neigh_node->if_incoming,
-                                          neigh_node->addr);
-               goto out;
-       }
-
-       if (unicast_packet->header.packet_type == BATADV_UNICAST_FRAG &&
-           batadv_frag_can_reassemble(skb,
-                                      neigh_node->if_incoming->net_dev->mtu)) {
-               ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
-
-               if (ret == NET_RX_DROP)
-                       goto out;
-
-               /* packet was buffered for late merge */
-               if (!new_skb) {
-                       ret = NET_RX_SUCCESS;
-                       goto out;
-               }
-
-               skb = new_skb;
-               unicast_packet = (struct batadv_unicast_packet *)skb->data;
-       }
-
        /* decrement ttl */
+       unicast_packet = (struct batadv_unicast_packet *)skb->data;
        unicast_packet->header.ttl--;
 
        switch (unicast_packet->header.packet_type) {
@@ -867,8 +761,6 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
        }
 
 out:
-       if (neigh_node)
-               batadv_neigh_node_free_ref(neigh_node);
        if (orig_node)
                batadv_orig_node_free_ref(orig_node);
        return ret;
@@ -879,6 +771,7 @@ out:
  * @bat_priv: the bat priv with all the soft interface information
  * @unicast_packet: the unicast header to be updated
  * @dst_addr: the payload destination
+ * @vid: VLAN identifier
  *
  * Search the translation table for dst_addr and update the unicast header with
  * the new corresponding information (originator address where the destination
@@ -889,21 +782,22 @@ out:
 static bool
 batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
                              struct batadv_unicast_packet *unicast_packet,
-                             uint8_t *dst_addr)
+                             uint8_t *dst_addr, unsigned short vid)
 {
        struct batadv_orig_node *orig_node = NULL;
        struct batadv_hard_iface *primary_if = NULL;
        bool ret = false;
        uint8_t *orig_addr, orig_ttvn;
 
-       if (batadv_is_my_client(bat_priv, dst_addr)) {
+       if (batadv_is_my_client(bat_priv, dst_addr, vid)) {
                primary_if = batadv_primary_if_get_selected(bat_priv);
                if (!primary_if)
                        goto out;
                orig_addr = primary_if->net_dev->dev_addr;
                orig_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
        } else {
-               orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr);
+               orig_node = batadv_transtable_search(bat_priv, NULL, dst_addr,
+                                                    vid);
                if (!orig_node)
                        goto out;
 
@@ -930,11 +824,12 @@ out:
 
 static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
                                     struct sk_buff *skb, int hdr_len) {
-       uint8_t curr_ttvn, old_ttvn;
+       struct batadv_unicast_packet *unicast_packet;
+       struct batadv_hard_iface *primary_if;
        struct batadv_orig_node *orig_node;
+       uint8_t curr_ttvn, old_ttvn;
        struct ethhdr *ethhdr;
-       struct batadv_hard_iface *primary_if;
-       struct batadv_unicast_packet *unicast_packet;
+       unsigned short vid;
        int is_old_ttvn;
 
        /* check if there is enough data before accessing it */
@@ -946,6 +841,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
                return 0;
 
        unicast_packet = (struct batadv_unicast_packet *)skb->data;
+       vid = batadv_get_vid(skb, hdr_len);
        ethhdr = (struct ethhdr *)(skb->data + hdr_len);
 
        /* check if the destination client was served by this node and it is now
@@ -953,9 +849,9 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
         * message and that it knows the new destination in the mesh to re-route
         * the packet to
         */
-       if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest)) {
+       if (batadv_tt_local_client_is_roaming(bat_priv, ethhdr->h_dest, vid)) {
                if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
-                                                 ethhdr->h_dest))
+                                                 ethhdr->h_dest, vid))
                        net_ratelimited_function(batadv_dbg, BATADV_DBG_TT,
                                                 bat_priv,
                                                 "Rerouting unicast packet to %pM (dst=%pM): Local Roaming\n",
@@ -1001,7 +897,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
         * target host
         */
        if (batadv_reroute_unicast_packet(bat_priv, unicast_packet,
-                                         ethhdr->h_dest)) {
+                                         ethhdr->h_dest, vid)) {
                net_ratelimited_function(batadv_dbg, BATADV_DBG_TT, bat_priv,
                                         "Rerouting unicast packet to %pM (dst=%pM): TTVN mismatch old_ttvn=%u new_ttvn=%u\n",
                                         unicast_packet->dest, ethhdr->h_dest,
@@ -1013,7 +909,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
         * currently served by this node or there is no destination at all and
         * it is possible to drop the packet
         */
-       if (!batadv_is_my_client(bat_priv, ethhdr->h_dest))
+       if (!batadv_is_my_client(bat_priv, ethhdr->h_dest, vid))
                return 0;
 
        /* update the header in order to let the packet be delivered to this
@@ -1032,6 +928,34 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
        return 1;
 }
 
+/**
+ * batadv_recv_unhandled_unicast_packet - receive and process packets which
+ *     are in the unicast number space but not yet known to the implementation
+ * @skb: unicast tvlv packet to process
+ * @recv_if: pointer to interface this packet was received on
+ *
+ * Returns NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * otherwise.
+ */
+int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb,
+                                        struct batadv_hard_iface *recv_if)
+{
+       struct batadv_unicast_packet *unicast_packet;
+       struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+       int check, hdr_size = sizeof(*unicast_packet);
+
+       check = batadv_check_unicast_packet(bat_priv, skb, hdr_size);
+       if (check < 0)
+               return NET_RX_DROP;
+
+       /* we don't know about this type, drop it. */
+       unicast_packet = (struct batadv_unicast_packet *)skb->data;
+       if (batadv_is_my_mac(bat_priv, unicast_packet->dest))
+               return NET_RX_DROP;
+
+       return batadv_route_unicast_packet(skb, recv_if);
+}
+
 int batadv_recv_unicast_packet(struct sk_buff *skb,
                               struct batadv_hard_iface *recv_if)
 {
@@ -1094,51 +1018,112 @@ rx_success:
        return batadv_route_unicast_packet(skb, recv_if);
 }
 
-int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
-                                 struct batadv_hard_iface *recv_if)
+/**
+ * batadv_recv_unicast_tvlv - receive and process unicast tvlv packets
+ * @skb: unicast tvlv packet to process
+ * @recv_if: pointer to interface this packet was received on
+ * @dst_addr: the payload destination
+ *
+ * Returns NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * otherwise.
+ */
+int batadv_recv_unicast_tvlv(struct sk_buff *skb,
+                            struct batadv_hard_iface *recv_if)
 {
        struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-       struct batadv_unicast_frag_packet *unicast_packet;
-       int hdr_size = sizeof(*unicast_packet);
-       struct sk_buff *new_skb = NULL;
-       int ret;
+       struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
+       unsigned char *tvlv_buff;
+       uint16_t tvlv_buff_len;
+       int hdr_size = sizeof(*unicast_tvlv_packet);
+       int ret = NET_RX_DROP;
 
        if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)
                return NET_RX_DROP;
 
-       if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
+       /* the header is likely to be modified while forwarding */
+       if (skb_cow(skb, hdr_size) < 0)
                return NET_RX_DROP;
 
-       unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
+       /* packet needs to be linearized to access the tvlv content */
+       if (skb_linearize(skb) < 0)
+               return NET_RX_DROP;
 
-       /* packet for me */
-       if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
-               ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
+       unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)skb->data;
 
-               if (ret == NET_RX_DROP)
-                       return NET_RX_DROP;
+       tvlv_buff = (unsigned char *)(skb->data + hdr_size);
+       tvlv_buff_len = ntohs(unicast_tvlv_packet->tvlv_len);
 
-               /* packet was buffered for late merge */
-               if (!new_skb)
-                       return NET_RX_SUCCESS;
+       if (tvlv_buff_len > skb->len - hdr_size)
+               return NET_RX_DROP;
 
-               if (batadv_dat_snoop_incoming_arp_request(bat_priv, new_skb,
-                                                         hdr_size))
-                       goto rx_success;
-               if (batadv_dat_snoop_incoming_arp_reply(bat_priv, new_skb,
-                                                       hdr_size))
-                       goto rx_success;
+       ret = batadv_tvlv_containers_process(bat_priv, false, NULL,
+                                            unicast_tvlv_packet->src,
+                                            unicast_tvlv_packet->dst,
+                                            tvlv_buff, tvlv_buff_len);
 
-               batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if,
-                                   sizeof(struct batadv_unicast_packet), NULL);
+       if (ret != NET_RX_SUCCESS)
+               ret = batadv_route_unicast_packet(skb, recv_if);
 
-rx_success:
-               return NET_RX_SUCCESS;
+       return ret;
+}
+
+/**
+ * batadv_recv_frag_packet - process received fragment
+ * @skb: the received fragment
+ * @recv_if: interface that the skb is received on
+ *
+ * This function does one of the three following things: 1) Forward fragment, if
+ * the assembled packet will exceed our MTU; 2) Buffer fragment, if we till
+ * lack further fragments; 3) Merge fragments, if we have all needed parts.
+ *
+ * Return NET_RX_DROP if the skb is not consumed, NET_RX_SUCCESS otherwise.
+ */
+int batadv_recv_frag_packet(struct sk_buff *skb,
+                           struct batadv_hard_iface *recv_if)
+{
+       struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+       struct batadv_orig_node *orig_node_src = NULL;
+       struct batadv_frag_packet *frag_packet;
+       int ret = NET_RX_DROP;
+
+       if (batadv_check_unicast_packet(bat_priv, skb,
+                                       sizeof(*frag_packet)) < 0)
+               goto out;
+
+       frag_packet = (struct batadv_frag_packet *)skb->data;
+       orig_node_src = batadv_orig_hash_find(bat_priv, frag_packet->orig);
+       if (!orig_node_src)
+               goto out;
+
+       /* Route the fragment if it is not for us and too big to be merged. */
+       if (!batadv_is_my_mac(bat_priv, frag_packet->dest) &&
+           batadv_frag_skb_fwd(skb, recv_if, orig_node_src)) {
+               ret = NET_RX_SUCCESS;
+               goto out;
        }
 
-       return batadv_route_unicast_packet(skb, recv_if);
-}
+       batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_RX);
+       batadv_add_counter(bat_priv, BATADV_CNT_FRAG_RX_BYTES, skb->len);
+
+       /* Add fragment to buffer and merge if possible. */
+       if (!batadv_frag_skb_buffer(&skb, orig_node_src))
+               goto out;
 
+       /* Deliver merged packet to the appropriate handler, if it was
+        * merged
+        */
+       if (skb)
+               batadv_batman_skb_recv(skb, recv_if->net_dev,
+                                      &recv_if->batman_adv_ptype, NULL);
+
+       ret = NET_RX_SUCCESS;
+
+out:
+       if (orig_node_src)
+               batadv_orig_node_free_ref(orig_node_src);
+
+       return ret;
+}
 
 int batadv_recv_bcast_packet(struct sk_buff *skb,
                             struct batadv_hard_iface *recv_if)
@@ -1240,53 +1225,3 @@ out:
                batadv_orig_node_free_ref(orig_node);
        return ret;
 }
-
-int batadv_recv_vis_packet(struct sk_buff *skb,
-                          struct batadv_hard_iface *recv_if)
-{
-       struct batadv_vis_packet *vis_packet;
-       struct ethhdr *ethhdr;
-       struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
-       int hdr_size = sizeof(*vis_packet);
-
-       /* keep skb linear */
-       if (skb_linearize(skb) < 0)
-               return NET_RX_DROP;
-
-       if (unlikely(!pskb_may_pull(skb, hdr_size)))
-               return NET_RX_DROP;
-
-       vis_packet = (struct batadv_vis_packet *)skb->data;
-       ethhdr = eth_hdr(skb);
-
-       /* not for me */
-       if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))
-               return NET_RX_DROP;
-
-       /* ignore own packets */
-       if (batadv_is_my_mac(bat_priv, vis_packet->vis_orig))
-               return NET_RX_DROP;
-
-       if (batadv_is_my_mac(bat_priv, vis_packet->sender_orig))
-               return NET_RX_DROP;
-
-       switch (vis_packet->vis_type) {
-       case BATADV_VIS_TYPE_SERVER_SYNC:
-               batadv_receive_server_sync_packet(bat_priv, vis_packet,
-                                                 skb_headlen(skb));
-               break;
-
-       case BATADV_VIS_TYPE_CLIENT_UPDATE:
-               batadv_receive_client_update_packet(bat_priv, vis_packet,
-                                                   skb_headlen(skb));
-               break;
-
-       default:        /* ignore unknown packet */
-               break;
-       }
-
-       /* We take a copy of the data in the packet, so we should
-        * always free the skbuf.
-        */
-       return NET_RX_DROP;
-}
index 72a29bde201022300194422a9caff389ab409731..19544ddb81b5abf417b69484c096aff239989a10 100644 (file)
@@ -30,23 +30,26 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
                            struct batadv_hard_iface *recv_if);
 int batadv_recv_unicast_packet(struct sk_buff *skb,
                               struct batadv_hard_iface *recv_if);
-int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
-                                 struct batadv_hard_iface *recv_if);
+int batadv_recv_frag_packet(struct sk_buff *skb,
+                           struct batadv_hard_iface *iface);
 int batadv_recv_bcast_packet(struct sk_buff *skb,
                             struct batadv_hard_iface *recv_if);
-int batadv_recv_vis_packet(struct sk_buff *skb,
-                          struct batadv_hard_iface *recv_if);
 int batadv_recv_tt_query(struct sk_buff *skb,
                         struct batadv_hard_iface *recv_if);
 int batadv_recv_roam_adv(struct sk_buff *skb,
                         struct batadv_hard_iface *recv_if);
+int batadv_recv_unicast_tvlv(struct sk_buff *skb,
+                            struct batadv_hard_iface *recv_if);
+int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb,
+                                        struct batadv_hard_iface *recv_if);
 struct batadv_neigh_node *
 batadv_find_router(struct batadv_priv *bat_priv,
                   struct batadv_orig_node *orig_node,
                   const struct batadv_hard_iface *recv_if);
 void batadv_bonding_candidate_del(struct batadv_orig_node *orig_node,
                                  struct batadv_neigh_node *neigh_node);
-void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
+void batadv_bonding_candidate_add(struct batadv_priv *bat_priv,
+                                 struct batadv_orig_node *orig_node,
                                  struct batadv_neigh_node *neigh_node);
 void batadv_bonding_save_primary(const struct batadv_orig_node *orig_node,
                                 struct batadv_orig_node *orig_neigh_node,
index 0266edd0fa7f7acb0be56f896de95ba7775108f6..c83be5ebaa285dffe64d051850e87f2fca02624b 100644 (file)
 #include "translation-table.h"
 #include "soft-interface.h"
 #include "hard-interface.h"
-#include "vis.h"
 #include "gateway_common.h"
+#include "gateway_client.h"
 #include "originator.h"
 #include "network-coding.h"
-
-#include <linux/if_ether.h>
+#include "fragmentation.h"
 
 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
 
@@ -64,10 +63,10 @@ int batadv_send_skb_packet(struct sk_buff *skb,
        ethhdr = eth_hdr(skb);
        memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
        memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
-       ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
+       ethhdr->h_proto = htons(ETH_P_BATMAN);
 
        skb_set_network_header(skb, ETH_HLEN);
-       skb->protocol = __constant_htons(ETH_P_BATMAN);
+       skb->protocol = htons(ETH_P_BATMAN);
 
        skb->dev = hard_iface->net_dev;
 
@@ -109,7 +108,19 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
        /* batadv_find_router() increases neigh_nodes refcount if found. */
        neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
        if (!neigh_node)
-               return ret;
+               goto out;
+
+       /* Check if the skb is too large to send in one piece and fragment
+        * it if needed.
+        */
+       if (atomic_read(&bat_priv->fragmentation) &&
+           skb->len > neigh_node->if_incoming->net_dev->mtu) {
+               /* Fragment and send packet. */
+               if (batadv_frag_send_packet(skb, orig_node, neigh_node))
+                       ret = NET_XMIT_SUCCESS;
+
+               goto out;
+       }
 
        /* try to network code the packet, if it is received on an interface
         * (i.e. being forwarded). If the packet originates from this node or if
@@ -123,11 +134,225 @@ int batadv_send_skb_to_orig(struct sk_buff *skb,
                ret = NET_XMIT_SUCCESS;
        }
 
-       batadv_neigh_node_free_ref(neigh_node);
+out:
+       if (neigh_node)
+               batadv_neigh_node_free_ref(neigh_node);
+
+       return ret;
+}
+
+/**
+ * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
+ *  common fields for unicast packets
+ * @skb: the skb carrying the unicast header to initialize
+ * @hdr_size: amount of bytes to push at the beginning of the skb
+ * @orig_node: the destination node
+ *
+ * Returns false if the buffer extension was not possible or true otherwise.
+ */
+static bool
+batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
+                                 struct batadv_orig_node *orig_node)
+{
+       struct batadv_unicast_packet *unicast_packet;
+       uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+
+       if (batadv_skb_head_push(skb, hdr_size) < 0)
+               return false;
+
+       unicast_packet = (struct batadv_unicast_packet *)skb->data;
+       unicast_packet->header.version = BATADV_COMPAT_VERSION;
+       /* batman packet type: unicast */
+       unicast_packet->header.packet_type = BATADV_UNICAST;
+       /* set unicast ttl */
+       unicast_packet->header.ttl = BATADV_TTL;
+       /* copy the destination for faster routing */
+       memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
+       /* set the destination tt version number */
+       unicast_packet->ttvn = ttvn;
+
+       return true;
+}
+
+/**
+ * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
+ * @skb: the skb containing the payload to encapsulate
+ * @orig_node: the destination node
+ *
+ * Returns false if the payload could not be encapsulated or true otherwise.
+ */
+static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
+                                           struct batadv_orig_node *orig_node)
+{
+       size_t uni_size = sizeof(struct batadv_unicast_packet);
+
+       return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
+}
+
+/**
+ * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
+ *  unicast 4addr header
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the skb containing the payload to encapsulate
+ * @orig_node: the destination node
+ * @packet_subtype: the unicast 4addr packet subtype to use
+ *
+ * Returns false if the payload could not be encapsulated or true otherwise.
+ */
+bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
+                                          struct sk_buff *skb,
+                                          struct batadv_orig_node *orig,
+                                          int packet_subtype)
+{
+       struct batadv_hard_iface *primary_if;
+       struct batadv_unicast_4addr_packet *uc_4addr_packet;
+       bool ret = false;
+
+       primary_if = batadv_primary_if_get_selected(bat_priv);
+       if (!primary_if)
+               goto out;
+
+       /* Pull the header space and fill the unicast_packet substructure.
+        * We can do that because the first member of the uc_4addr_packet
+        * is of type struct unicast_packet
+        */
+       if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
+                                              orig))
+               goto out;
+
+       uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
+       uc_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR;
+       memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
+       uc_4addr_packet->subtype = packet_subtype;
+       uc_4addr_packet->reserved = 0;
+
+       ret = true;
+out:
+       if (primary_if)
+               batadv_hardif_free_ref(primary_if);
+       return ret;
+}
+
+/**
+ * batadv_send_skb_unicast - encapsulate and send an skb via unicast
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: payload to send
+ * @packet_type: the batman unicast packet type to use
+ * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
+ *  4addr packets)
+ * @orig_node: the originator to send the packet to
+ * @vid: the vid to be used to search the translation table
+ *
+ * Wrap the given skb into a batman-adv unicast or unicast-4addr header
+ * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
+ * as packet_type. Then send this frame to the given orig_node and release a
+ * reference to this orig_node.
+ *
+ * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
+                                  struct sk_buff *skb, int packet_type,
+                                  int packet_subtype,
+                                  struct batadv_orig_node *orig_node,
+                                  unsigned short vid)
+{
+       struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
+       struct batadv_unicast_packet *unicast_packet;
+       int ret = NET_XMIT_DROP;
+
+       if (!orig_node)
+               goto out;
+
+       switch (packet_type) {
+       case BATADV_UNICAST:
+               if (!batadv_send_skb_prepare_unicast(skb, orig_node))
+                       goto out;
+               break;
+       case BATADV_UNICAST_4ADDR:
+               if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
+                                                          orig_node,
+                                                          packet_subtype))
+                       goto out;
+               break;
+       default:
+               /* this function supports UNICAST and UNICAST_4ADDR only. It
+                * should never be invoked with any other packet type
+                */
+               goto out;
+       }
+
+       unicast_packet = (struct batadv_unicast_packet *)skb->data;
+
+       /* inform the destination node that we are still missing a correct route
+        * for this client. The destination will receive this packet and will
+        * try to reroute it because the ttvn contained in the header is less
+        * than the current one
+        */
+       if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
+               unicast_packet->ttvn = unicast_packet->ttvn - 1;
 
+       if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
+               ret = NET_XMIT_SUCCESS;
+
+out:
+       if (orig_node)
+               batadv_orig_node_free_ref(orig_node);
+       if (ret == NET_XMIT_DROP)
+               kfree_skb(skb);
        return ret;
 }
 
+/**
+ * batadv_send_skb_via_tt_generic - send an skb via TT lookup
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: payload to send
+ * @packet_type: the batman unicast packet type to use
+ * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
+ *  4addr packets)
+ * @vid: the vid to be used to search the translation table
+ *
+ * Look up the recipient node for the destination address in the ethernet
+ * header via the translation table. Wrap the given skb into a batman-adv
+ * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
+ * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
+ * to the according destination node.
+ *
+ * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
+                                  struct sk_buff *skb, int packet_type,
+                                  int packet_subtype, unsigned short vid)
+{
+       struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
+       struct batadv_orig_node *orig_node;
+
+       orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
+                                            ethhdr->h_dest, vid);
+       return batadv_send_skb_unicast(bat_priv, skb, packet_type,
+                                      packet_subtype, orig_node, vid);
+}
+
+/**
+ * batadv_send_skb_via_gw - send an skb via gateway lookup
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: payload to send
+ * @vid: the vid to be used to search the translation table
+ *
+ * Look up the currently selected gateway. Wrap the given skb into a batman-adv
+ * unicast header and send this frame to this gateway node.
+ *
+ * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
+                          unsigned short vid)
+{
+       struct batadv_orig_node *orig_node;
+
+       orig_node = batadv_gw_get_selected_orig(bat_priv);
+       return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
+                                      orig_node, vid);
+}
+
 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
 {
        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
index e7b17880fca4f46ef77783034e58ae1eeb00b641..aa2e2537a739297f76afa54b30d87d69976d35d2 100644 (file)
@@ -34,5 +34,58 @@ void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work);
 void
 batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
                                 const struct batadv_hard_iface *hard_iface);
+bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
+                                          struct sk_buff *skb,
+                                          struct batadv_orig_node *orig_node,
+                                          int packet_subtype);
+int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
+                                  struct sk_buff *skb, int packet_type,
+                                  int packet_subtype, unsigned short vid);
+int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
+                          unsigned short vid);
+
+/**
+ * batadv_send_skb_via_tt - send an skb via TT lookup
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the payload to send
+ * @vid: the vid to be used to search the translation table
+ *
+ * Look up the recipient node for the destination address in the ethernet
+ * header via the translation table. Wrap the given skb into a batman-adv
+ * unicast header. Then send this frame to the according destination node.
+ *
+ * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv,
+                                        struct sk_buff *skb,
+                                        unsigned short vid)
+{
+       return batadv_send_skb_via_tt_generic(bat_priv, skb, BATADV_UNICAST, 0,
+                                             vid);
+}
+
+/**
+ * batadv_send_skb_via_tt_4addr - send an skb via TT lookup
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: the payload to send
+ * @packet_subtype: the unicast 4addr packet subtype to use
+ * @vid: the vid to be used to search the translation table
+ *
+ * Look up the recipient node for the destination address in the ethernet
+ * header via the translation table. Wrap the given skb into a batman-adv
+ * unicast-4addr header. Then send this frame to the according destination
+ * node.
+ *
+ * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ */
+static inline int batadv_send_skb_via_tt_4addr(struct batadv_priv *bat_priv,
+                                              struct sk_buff *skb,
+                                              int packet_subtype,
+                                              unsigned short vid)
+{
+       return batadv_send_skb_via_tt_generic(bat_priv, skb,
+                                             BATADV_UNICAST_4ADDR,
+                                             packet_subtype, vid);
+}
 
 #endif /* _NET_BATMAN_ADV_SEND_H_ */
index 813db4e646021dea4c089d53ea65504a63ff0d2d..36f050876f8260245a008de079cf0974fa97b76e 100644 (file)
@@ -34,8 +34,6 @@
 #include <linux/ethtool.h>
 #include <linux/etherdevice.h>
 #include <linux/if_vlan.h>
-#include <linux/if_ether.h>
-#include "unicast.h"
 #include "bridge_loop_avoidance.h"
 #include "network-coding.h"
 
@@ -120,9 +118,10 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
 
        /* only modify transtable if it has been initialized before */
        if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) {
-               batadv_tt_local_remove(bat_priv, old_addr,
+               batadv_tt_local_remove(bat_priv, old_addr, BATADV_NO_FLAGS,
                                       "mac address changed", false);
-               batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX);
+               batadv_tt_local_add(dev, addr->sa_data, BATADV_NO_FLAGS,
+                                   BATADV_NULL_IFINDEX);
        }
 
        return 0;
@@ -139,36 +138,48 @@ static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
+/**
+ * batadv_interface_set_rx_mode - set the rx mode of a device
+ * @dev: registered network device to modify
+ *
+ * We do not actually need to set any rx filters for the virtual batman
+ * soft interface. However a dummy handler enables a user to set static
+ * multicast listeners for instance.
+ */
+static void batadv_interface_set_rx_mode(struct net_device *dev)
+{
+}
+
 static int batadv_interface_tx(struct sk_buff *skb,
                               struct net_device *soft_iface)
 {
-       struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
+       struct ethhdr *ethhdr;
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
        struct batadv_hard_iface *primary_if = NULL;
        struct batadv_bcast_packet *bcast_packet;
-       struct vlan_ethhdr *vhdr;
-       __be16 ethertype = __constant_htons(ETH_P_BATMAN);
+       __be16 ethertype = htons(ETH_P_BATMAN);
        static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
                                                   0x00, 0x00};
        static const uint8_t ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
                                                    0x00, 0x00};
+       struct vlan_ethhdr *vhdr;
        unsigned int header_len = 0;
        int data_len = skb->len, ret;
-       unsigned short vid __maybe_unused = BATADV_NO_FLAGS;
-       bool do_bcast = false;
-       uint32_t seqno;
        unsigned long brd_delay = 1;
+       bool do_bcast = false, client_added;
+       unsigned short vid;
+       uint32_t seqno;
 
        if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
                goto dropped;
 
        soft_iface->trans_start = jiffies;
+       vid = batadv_get_vid(skb, 0);
+       ethhdr = (struct ethhdr *)skb->data;
 
        switch (ntohs(ethhdr->h_proto)) {
        case ETH_P_8021Q:
                vhdr = (struct vlan_ethhdr *)skb->data;
-               vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
-               vid |= BATADV_VLAN_HAS_TAG;
 
                if (vhdr->h_vlan_encapsulated_proto != ethertype)
                        break;
@@ -185,8 +196,12 @@ static int batadv_interface_tx(struct sk_buff *skb,
        ethhdr = (struct ethhdr *)skb->data;
 
        /* Register the client MAC in the transtable */
-       if (!is_multicast_ether_addr(ethhdr->h_source))
-               batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
+       if (!is_multicast_ether_addr(ethhdr->h_source)) {
+               client_added = batadv_tt_local_add(soft_iface, ethhdr->h_source,
+                                                  vid, skb->skb_iif);
+               if (!client_added)
+                       goto dropped;
+       }
 
        /* don't accept stp packets. STP does not help in meshes.
         * better use the bridge loop avoidance ...
@@ -286,8 +301,12 @@ static int batadv_interface_tx(struct sk_buff *skb,
 
                batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb);
 
-               ret = batadv_unicast_send_skb(bat_priv, skb);
-               if (ret != 0)
+               if (is_multicast_ether_addr(ethhdr->h_dest))
+                       ret = batadv_send_skb_via_gw(bat_priv, skb, vid);
+               else
+                       ret = batadv_send_skb_via_tt(bat_priv, skb, vid);
+
+               if (ret == NET_XMIT_DROP)
                        goto dropped_freed;
        }
 
@@ -309,12 +328,12 @@ void batadv_interface_rx(struct net_device *soft_iface,
                         struct sk_buff *skb, struct batadv_hard_iface *recv_if,
                         int hdr_size, struct batadv_orig_node *orig_node)
 {
+       struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
-       struct ethhdr *ethhdr;
+       __be16 ethertype = htons(ETH_P_BATMAN);
        struct vlan_ethhdr *vhdr;
-       struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
-       unsigned short vid __maybe_unused = BATADV_NO_FLAGS;
-       __be16 ethertype = __constant_htons(ETH_P_BATMAN);
+       struct ethhdr *ethhdr;
+       unsigned short vid;
        bool is_bcast;
 
        is_bcast = (batadv_header->packet_type == BATADV_BCAST);
@@ -326,13 +345,12 @@ void batadv_interface_rx(struct net_device *soft_iface,
        skb_pull_rcsum(skb, hdr_size);
        skb_reset_mac_header(skb);
 
+       vid = batadv_get_vid(skb, hdr_size);
        ethhdr = eth_hdr(skb);
 
        switch (ntohs(ethhdr->h_proto)) {
        case ETH_P_8021Q:
                vhdr = (struct vlan_ethhdr *)skb->data;
-               vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
-               vid |= BATADV_VLAN_HAS_TAG;
 
                if (vhdr->h_vlan_encapsulated_proto != ethertype)
                        break;
@@ -368,9 +386,10 @@ void batadv_interface_rx(struct net_device *soft_iface,
 
        if (orig_node)
                batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
-                                                    ethhdr->h_source);
+                                                    ethhdr->h_source, vid);
 
-       if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
+       if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest,
+                                 vid))
                goto dropped;
 
        netif_rx(skb);
@@ -382,6 +401,177 @@ out:
        return;
 }
 
+/**
+ * batadv_softif_vlan_free_ref - decrease the vlan object refcounter and
+ *  possibly free it
+ * @softif_vlan: the vlan object to release
+ */
+void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan)
+{
+       if (atomic_dec_and_test(&softif_vlan->refcount))
+               kfree_rcu(softif_vlan, rcu);
+}
+
+/**
+ * batadv_softif_vlan_get - get the vlan object for a specific vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the identifier of the vlan object to retrieve
+ *
+ * Returns the private data of the vlan matching the vid passed as argument or
+ * NULL otherwise. The refcounter of the returned object is incremented by 1.
+ */
+struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
+                                                 unsigned short vid)
+{
+       struct batadv_softif_vlan *vlan_tmp, *vlan = NULL;
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) {
+               if (vlan_tmp->vid != vid)
+                       continue;
+
+               if (!atomic_inc_not_zero(&vlan_tmp->refcount))
+                       continue;
+
+               vlan = vlan_tmp;
+               break;
+       }
+       rcu_read_unlock();
+
+       return vlan;
+}
+
+/**
+ * batadv_create_vlan - allocate the needed resources for a new vlan
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ *
+ * Returns 0 on success, a negative error otherwise.
+ */
+int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
+{
+       struct batadv_softif_vlan *vlan;
+       int err;
+
+       vlan = batadv_softif_vlan_get(bat_priv, vid);
+       if (vlan) {
+               batadv_softif_vlan_free_ref(vlan);
+               return -EEXIST;
+       }
+
+       vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
+       if (!vlan)
+               return -ENOMEM;
+
+       vlan->vid = vid;
+       atomic_set(&vlan->refcount, 1);
+
+       atomic_set(&vlan->ap_isolation, 0);
+
+       err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
+       if (err) {
+               kfree(vlan);
+               return err;
+       }
+
+       /* add a new TT local entry. This one will be marked with the NOPURGE
+        * flag
+        */
+       batadv_tt_local_add(bat_priv->soft_iface,
+                           bat_priv->soft_iface->dev_addr, vid,
+                           BATADV_NULL_IFINDEX);
+
+       spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+       hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
+       spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+
+       return 0;
+}
+
+/**
+ * batadv_softif_destroy_vlan - remove and destroy a softif_vlan object
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vlan: the object to remove
+ */
+static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
+                                      struct batadv_softif_vlan *vlan)
+{
+       spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+       hlist_del_rcu(&vlan->list);
+       spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+
+       batadv_sysfs_del_vlan(bat_priv, vlan);
+
+       /* explicitly remove the associated TT local entry because it is marked
+        * with the NOPURGE flag
+        */
+       batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
+                              vlan->vid, "vlan interface destroyed", false);
+
+       batadv_softif_vlan_free_ref(vlan);
+}
+
+/**
+ * batadv_interface_add_vid - ndo_add_vid API implementation
+ * @dev: the netdev of the mesh interface
+ * @vid: identifier of the new vlan
+ *
+ * Set up all the internal structures for handling the new vlan on top of the
+ * mesh interface
+ *
+ * Returns 0 on success or a negative error code in case of failure.
+ */
+static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
+                                   unsigned short vid)
+{
+       struct batadv_priv *bat_priv = netdev_priv(dev);
+
+       /* only 802.1Q vlans are supported.
+        * batman-adv does not know how to handle other types
+        */
+       if (proto != htons(ETH_P_8021Q))
+               return -EINVAL;
+
+       vid |= BATADV_VLAN_HAS_TAG;
+
+       return batadv_softif_create_vlan(bat_priv, vid);
+}
+
+/**
+ * batadv_interface_kill_vid - ndo_kill_vid API implementation
+ * @dev: the netdev of the mesh interface
+ * @vid: identifier of the deleted vlan
+ *
+ * Destroy all the internal structures used to handle the vlan identified by vid
+ * on top of the mesh interface
+ *
+ * Returns 0 on success, -EINVAL if the specified prototype is not ETH_P_8021Q
+ * or -ENOENT if the specified vlan id wasn't registered.
+ */
+static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
+                                    unsigned short vid)
+{
+       struct batadv_priv *bat_priv = netdev_priv(dev);
+       struct batadv_softif_vlan *vlan;
+
+       /* only 802.1Q vlans are supported. batman-adv does not know how to
+        * handle other types
+        */
+       if (proto != htons(ETH_P_8021Q))
+               return -EINVAL;
+
+       vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG);
+       if (!vlan)
+               return -ENOENT;
+
+       batadv_softif_destroy_vlan(bat_priv, vlan);
+
+       /* finally free the vlan object */
+       batadv_softif_vlan_free_ref(vlan);
+
+       return 0;
+}
+
 /* batman-adv network devices have devices nesting below it and are a special
  * "super class" of normal network devices; split their locks off into a
  * separate class since they always nest.
@@ -421,6 +611,7 @@ static void batadv_set_lockdep_class(struct net_device *dev)
  */
 static void batadv_softif_destroy_finish(struct work_struct *work)
 {
+       struct batadv_softif_vlan *vlan;
        struct batadv_priv *bat_priv;
        struct net_device *soft_iface;
 
@@ -428,6 +619,13 @@ static void batadv_softif_destroy_finish(struct work_struct *work)
                                cleanup_work);
        soft_iface = bat_priv->soft_iface;
 
+       /* destroy the "untagged" VLAN */
+       vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
+       if (vlan) {
+               batadv_softif_destroy_vlan(bat_priv, vlan);
+               batadv_softif_vlan_free_ref(vlan);
+       }
+
        batadv_sysfs_del_meshif(soft_iface);
 
        rtnl_lock();
@@ -444,6 +642,7 @@ static void batadv_softif_destroy_finish(struct work_struct *work)
 static int batadv_softif_init_late(struct net_device *dev)
 {
        struct batadv_priv *bat_priv;
+       uint32_t random_seqno;
        int ret;
        size_t cnt_len = sizeof(uint64_t) * BATADV_CNT_NUM;
 
@@ -468,17 +667,17 @@ static int batadv_softif_init_late(struct net_device *dev)
 #ifdef CONFIG_BATMAN_ADV_DAT
        atomic_set(&bat_priv->distributed_arp_table, 1);
 #endif
-       atomic_set(&bat_priv->ap_isolation, 0);
-       atomic_set(&bat_priv->vis_mode, BATADV_VIS_TYPE_CLIENT_UPDATE);
        atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF);
        atomic_set(&bat_priv->gw_sel_class, 20);
-       atomic_set(&bat_priv->gw_bandwidth, 41);
+       atomic_set(&bat_priv->gw.bandwidth_down, 100);
+       atomic_set(&bat_priv->gw.bandwidth_up, 20);
        atomic_set(&bat_priv->orig_interval, 1000);
        atomic_set(&bat_priv->hop_penalty, 30);
 #ifdef CONFIG_BATMAN_ADV_DEBUG
        atomic_set(&bat_priv->log_level, 0);
 #endif
        atomic_set(&bat_priv->fragmentation, 1);
+       atomic_set(&bat_priv->packet_size_max, ETH_DATA_LEN);
        atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN);
        atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
 
@@ -493,6 +692,10 @@ static int batadv_softif_init_late(struct net_device *dev)
        bat_priv->tt.last_changeset = NULL;
        bat_priv->tt.last_changeset_len = 0;
 
+       /* randomize initial seqno to avoid collision */
+       get_random_bytes(&random_seqno, sizeof(random_seqno));
+       atomic_set(&bat_priv->frag_seqno, random_seqno);
+
        bat_priv->primary_if = NULL;
        bat_priv->num_ifaces = 0;
 
@@ -578,8 +781,11 @@ static const struct net_device_ops batadv_netdev_ops = {
        .ndo_open = batadv_interface_open,
        .ndo_stop = batadv_interface_release,
        .ndo_get_stats = batadv_interface_stats,
+       .ndo_vlan_rx_add_vid = batadv_interface_add_vid,
+       .ndo_vlan_rx_kill_vid = batadv_interface_kill_vid,
        .ndo_set_mac_address = batadv_interface_set_mac_addr,
        .ndo_change_mtu = batadv_interface_change_mtu,
+       .ndo_set_rx_mode = batadv_interface_set_rx_mode,
        .ndo_start_xmit = batadv_interface_tx,
        .ndo_validate_addr = eth_validate_addr,
        .ndo_add_slave = batadv_softif_slave_add,
@@ -616,6 +822,7 @@ static void batadv_softif_init_early(struct net_device *dev)
 
        dev->netdev_ops = &batadv_netdev_ops;
        dev->destructor = batadv_softif_free;
+       dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
        dev->tx_queue_len = 0;
 
        /* can't call min_mtu, because the needed variables
@@ -623,7 +830,7 @@ static void batadv_softif_init_early(struct net_device *dev)
         */
        dev->mtu = ETH_DATA_LEN;
        /* reserve more space in the skbuff for our header */
-       dev->hard_header_len = BATADV_HEADER_LEN;
+       dev->hard_header_len = batadv_max_header_len();
 
        /* generate random address */
        eth_hw_addr_random(dev);
@@ -760,6 +967,12 @@ static const struct {
        { "mgmt_tx_bytes" },
        { "mgmt_rx" },
        { "mgmt_rx_bytes" },
+       { "frag_tx" },
+       { "frag_tx_bytes" },
+       { "frag_rx" },
+       { "frag_rx_bytes" },
+       { "frag_fwd" },
+       { "frag_fwd_bytes" },
        { "tt_request_tx" },
        { "tt_request_rx" },
        { "tt_response_tx" },
index 2f2472c2ea0d91aae4b50314ee1171928c5024ee..06fc91ff5a021bf584db73e75624e7d13d2560f7 100644 (file)
@@ -28,5 +28,9 @@ struct net_device *batadv_softif_create(const char *name);
 void batadv_softif_destroy_sysfs(struct net_device *soft_iface);
 int batadv_softif_is_valid(const struct net_device *net_dev);
 extern struct rtnl_link_ops batadv_link_ops;
+int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid);
+void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan);
+struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
+                                                 unsigned short vid);
 
 #endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
index 4114b961bc2c8400acc6bd776e2732313674b0e4..6335433310aff37a0438bc61367ce142d96cb07f 100644 (file)
 #include "sysfs.h"
 #include "translation-table.h"
 #include "distributed-arp-table.h"
+#include "network-coding.h"
 #include "originator.h"
 #include "hard-interface.h"
+#include "soft-interface.h"
 #include "gateway_common.h"
 #include "gateway_client.h"
-#include "vis.h"
 
 static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
 {
@@ -39,6 +40,53 @@ static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj)
        return netdev_priv(net_dev);
 }
 
+/**
+ * batadv_vlan_kobj_to_batpriv - convert a vlan kobj in the associated batpriv
+ * @obj: kobject to covert
+ *
+ * Returns the associated batadv_priv struct.
+ */
+static struct batadv_priv *batadv_vlan_kobj_to_batpriv(struct kobject *obj)
+{
+       /* VLAN specific attributes are located in the root sysfs folder if they
+        * refer to the untagged VLAN..
+        */
+       if (!strcmp(BATADV_SYSFS_IF_MESH_SUBDIR, obj->name))
+               return batadv_kobj_to_batpriv(obj);
+
+       /* ..while the attributes for the tagged vlans are located in
+        * the in the corresponding "vlan%VID" subfolder
+        */
+       return batadv_kobj_to_batpriv(obj->parent);
+}
+
+/**
+ * batadv_kobj_to_vlan - convert a kobj in the associated softif_vlan struct
+ * @obj: kobject to covert
+ *
+ * Returns the associated softif_vlan struct if found, NULL otherwise.
+ */
+static struct batadv_softif_vlan *
+batadv_kobj_to_vlan(struct batadv_priv *bat_priv, struct kobject *obj)
+{
+       struct batadv_softif_vlan *vlan_tmp, *vlan = NULL;
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) {
+               if (vlan_tmp->kobj != obj)
+                       continue;
+
+               if (!atomic_inc_not_zero(&vlan_tmp->refcount))
+                       continue;
+
+               vlan = vlan_tmp;
+               break;
+       }
+       rcu_read_unlock();
+
+       return vlan;
+}
+
 #define BATADV_UEV_TYPE_VAR    "BATTYPE="
 #define BATADV_UEV_ACTION_VAR  "BATACTION="
 #define BATADV_UEV_DATA_VAR    "BATDATA="
@@ -53,6 +101,15 @@ static char *batadv_uev_type_str[] = {
        "gw"
 };
 
+/* Use this, if you have customized show and store functions for vlan attrs */
+#define BATADV_ATTR_VLAN(_name, _mode, _show, _store)  \
+struct batadv_attribute batadv_attr_vlan_##_name = {   \
+       .attr = {.name = __stringify(_name),            \
+                .mode = _mode },                       \
+       .show   = _show,                                \
+       .store  = _store,                               \
+};
+
 /* Use this, if you have customized show and store functions */
 #define BATADV_ATTR(_name, _mode, _show, _store)       \
 struct batadv_attribute batadv_attr_##_name = {                \
@@ -122,6 +179,41 @@ ssize_t batadv_show_##_name(struct kobject *kobj,                  \
        static BATADV_ATTR(_name, _mode, batadv_show_##_name,           \
                           batadv_store_##_name)
 
+#define BATADV_ATTR_VLAN_STORE_BOOL(_name, _post_func)                 \
+ssize_t batadv_store_vlan_##_name(struct kobject *kobj,                        \
+                                 struct attribute *attr, char *buff,   \
+                                 size_t count)                         \
+{                                                                      \
+       struct batadv_priv *bat_priv = batadv_vlan_kobj_to_batpriv(kobj);\
+       struct batadv_softif_vlan *vlan = batadv_kobj_to_vlan(bat_priv, \
+                                                             kobj);    \
+       size_t res = __batadv_store_bool_attr(buff, count, _post_func,  \
+                                             attr, &vlan->_name,       \
+                                             bat_priv->soft_iface);    \
+       batadv_softif_vlan_free_ref(vlan);                              \
+       return res;                                                     \
+}
+
+#define BATADV_ATTR_VLAN_SHOW_BOOL(_name)                              \
+ssize_t batadv_show_vlan_##_name(struct kobject *kobj,                 \
+                                struct attribute *attr, char *buff)    \
+{                                                                      \
+       struct batadv_priv *bat_priv = batadv_vlan_kobj_to_batpriv(kobj);\
+       struct batadv_softif_vlan *vlan = batadv_kobj_to_vlan(bat_priv, \
+                                                             kobj);    \
+       size_t res = sprintf(buff, "%s\n",                              \
+                            atomic_read(&vlan->_name) == 0 ?           \
+                            "disabled" : "enabled");                   \
+       batadv_softif_vlan_free_ref(vlan);                              \
+       return res;                                                     \
+}
+
+/* Use this, if you are going to turn a [name] in the vlan struct on or off */
+#define BATADV_ATTR_VLAN_BOOL(_name, _mode, _post_func)                        \
+       static BATADV_ATTR_VLAN_STORE_BOOL(_name, _post_func)           \
+       static BATADV_ATTR_VLAN_SHOW_BOOL(_name)                        \
+       static BATADV_ATTR_VLAN(_name, _mode, batadv_show_vlan_##_name, \
+                               batadv_store_vlan_##_name)
 
 static int batadv_store_bool_attr(char *buff, size_t count,
                                  struct net_device *net_dev,
@@ -230,74 +322,6 @@ __batadv_store_uint_attr(const char *buff, size_t count,
        return ret;
 }
 
-static ssize_t batadv_show_vis_mode(struct kobject *kobj,
-                                   struct attribute *attr, char *buff)
-{
-       struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
-       int vis_mode = atomic_read(&bat_priv->vis_mode);
-       const char *mode;
-
-       if (vis_mode == BATADV_VIS_TYPE_CLIENT_UPDATE)
-               mode = "client";
-       else
-               mode = "server";
-
-       return sprintf(buff, "%s\n", mode);
-}
-
-static ssize_t batadv_store_vis_mode(struct kobject *kobj,
-                                    struct attribute *attr, char *buff,
-                                    size_t count)
-{
-       struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
-       struct batadv_priv *bat_priv = netdev_priv(net_dev);
-       unsigned long val;
-       int ret, vis_mode_tmp = -1;
-       const char *old_mode, *new_mode;
-
-       ret = kstrtoul(buff, 10, &val);
-
-       if (((count == 2) && (!ret) &&
-            (val == BATADV_VIS_TYPE_CLIENT_UPDATE)) ||
-           (strncmp(buff, "client", 6) == 0) ||
-           (strncmp(buff, "off", 3) == 0))
-               vis_mode_tmp = BATADV_VIS_TYPE_CLIENT_UPDATE;
-
-       if (((count == 2) && (!ret) &&
-            (val == BATADV_VIS_TYPE_SERVER_SYNC)) ||
-           (strncmp(buff, "server", 6) == 0))
-               vis_mode_tmp = BATADV_VIS_TYPE_SERVER_SYNC;
-
-       if (vis_mode_tmp < 0) {
-               if (buff[count - 1] == '\n')
-                       buff[count - 1] = '\0';
-
-               batadv_info(net_dev,
-                           "Invalid parameter for 'vis mode' setting received: %s\n",
-                           buff);
-               return -EINVAL;
-       }
-
-       if (atomic_read(&bat_priv->vis_mode) == vis_mode_tmp)
-               return count;
-
-       if (atomic_read(&bat_priv->vis_mode) == BATADV_VIS_TYPE_CLIENT_UPDATE)
-               old_mode =  "client";
-       else
-               old_mode = "server";
-
-       if (vis_mode_tmp == BATADV_VIS_TYPE_CLIENT_UPDATE)
-               new_mode =  "client";
-       else
-               new_mode = "server";
-
-       batadv_info(net_dev, "Changing vis mode from: %s to: %s\n", old_mode,
-                   new_mode);
-
-       atomic_set(&bat_priv->vis_mode, (unsigned int)vis_mode_tmp);
-       return count;
-}
-
 static ssize_t batadv_show_bat_algo(struct kobject *kobj,
                                    struct attribute *attr, char *buff)
 {
@@ -390,6 +414,7 @@ static ssize_t batadv_store_gw_mode(struct kobject *kobj,
         */
        batadv_gw_check_client_stop(bat_priv);
        atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp);
+       batadv_gw_tvlv_container_update(bat_priv);
        return count;
 }
 
@@ -397,15 +422,13 @@ static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
                                     struct attribute *attr, char *buff)
 {
        struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
-       int down, up;
-       int gw_bandwidth = atomic_read(&bat_priv->gw_bandwidth);
-
-       batadv_gw_bandwidth_to_kbit(gw_bandwidth, &down, &up);
-       return sprintf(buff, "%i%s/%i%s\n",
-                      (down > 2048 ? down / 1024 : down),
-                      (down > 2048 ? "MBit" : "KBit"),
-                      (up > 2048 ? up / 1024 : up),
-                      (up > 2048 ? "MBit" : "KBit"));
+       uint32_t down, up;
+
+       down = atomic_read(&bat_priv->gw.bandwidth_down);
+       up = atomic_read(&bat_priv->gw.bandwidth_up);
+
+       return sprintf(buff, "%u.%u/%u.%u MBit\n", down / 10,
+                      down % 10, up / 10, up % 10);
 }
 
 static ssize_t batadv_store_gw_bwidth(struct kobject *kobj,
@@ -426,12 +449,10 @@ BATADV_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
 BATADV_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
 #endif
 #ifdef CONFIG_BATMAN_ADV_DAT
-BATADV_ATTR_SIF_BOOL(distributed_arp_table, S_IRUGO | S_IWUSR, NULL);
+BATADV_ATTR_SIF_BOOL(distributed_arp_table, S_IRUGO | S_IWUSR,
+                    batadv_dat_status_update);
 #endif
 BATADV_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, batadv_update_min_mtu);
-BATADV_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
-static BATADV_ATTR(vis_mode, S_IRUGO | S_IWUSR, batadv_show_vis_mode,
-                  batadv_store_vis_mode);
 static BATADV_ATTR(routing_algo, S_IRUGO, batadv_show_bat_algo, NULL);
 static BATADV_ATTR(gw_mode, S_IRUGO | S_IWUSR, batadv_show_gw_mode,
                   batadv_store_gw_mode);
@@ -447,7 +468,8 @@ static BATADV_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, batadv_show_gw_bwidth,
 BATADV_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, BATADV_DBG_ALL, NULL);
 #endif
 #ifdef CONFIG_BATMAN_ADV_NC
-BATADV_ATTR_SIF_BOOL(network_coding, S_IRUGO | S_IWUSR, NULL);
+BATADV_ATTR_SIF_BOOL(network_coding, S_IRUGO | S_IWUSR,
+                    batadv_nc_status_update);
 #endif
 
 static struct batadv_attribute *batadv_mesh_attrs[] = {
@@ -460,8 +482,6 @@ static struct batadv_attribute *batadv_mesh_attrs[] = {
        &batadv_attr_distributed_arp_table,
 #endif
        &batadv_attr_fragmentation,
-       &batadv_attr_ap_isolation,
-       &batadv_attr_vis_mode,
        &batadv_attr_routing_algo,
        &batadv_attr_gw_mode,
        &batadv_attr_orig_interval,
@@ -477,6 +497,16 @@ static struct batadv_attribute *batadv_mesh_attrs[] = {
        NULL,
 };
 
+BATADV_ATTR_VLAN_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
+
+/**
+ * batadv_vlan_attrs - array of vlan specific sysfs attributes
+ */
+static struct batadv_attribute *batadv_vlan_attrs[] = {
+       &batadv_attr_vlan_ap_isolation,
+       NULL,
+};
+
 int batadv_sysfs_add_meshif(struct net_device *dev)
 {
        struct kobject *batif_kobject = &dev->dev.kobj;
@@ -527,6 +557,80 @@ void batadv_sysfs_del_meshif(struct net_device *dev)
        bat_priv->mesh_obj = NULL;
 }
 
+/**
+ * batadv_sysfs_add_vlan - add all the needed sysfs objects for the new vlan
+ * @dev: netdev of the mesh interface
+ * @vlan: private data of the newly added VLAN interface
+ *
+ * Returns 0 on success and -ENOMEM if any of the structure allocations fails.
+ */
+int batadv_sysfs_add_vlan(struct net_device *dev,
+                         struct batadv_softif_vlan *vlan)
+{
+       char vlan_subdir[sizeof(BATADV_SYSFS_VLAN_SUBDIR_PREFIX) + 5];
+       struct batadv_priv *bat_priv = netdev_priv(dev);
+       struct batadv_attribute **bat_attr;
+       int err;
+
+       if (vlan->vid & BATADV_VLAN_HAS_TAG) {
+               sprintf(vlan_subdir, BATADV_SYSFS_VLAN_SUBDIR_PREFIX "%hu",
+                       vlan->vid & VLAN_VID_MASK);
+
+               vlan->kobj = kobject_create_and_add(vlan_subdir,
+                                                   bat_priv->mesh_obj);
+               if (!vlan->kobj) {
+                       batadv_err(dev, "Can't add sysfs directory: %s/%s\n",
+                                  dev->name, vlan_subdir);
+                       goto out;
+               }
+       } else {
+               /* the untagged LAN uses the root folder to store its "VLAN
+                * specific attributes"
+                */
+               vlan->kobj = bat_priv->mesh_obj;
+               kobject_get(bat_priv->mesh_obj);
+       }
+
+       for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr) {
+               err = sysfs_create_file(vlan->kobj,
+                                       &((*bat_attr)->attr));
+               if (err) {
+                       batadv_err(dev, "Can't add sysfs file: %s/%s/%s\n",
+                                  dev->name, vlan_subdir,
+                                  ((*bat_attr)->attr).name);
+                       goto rem_attr;
+               }
+       }
+
+       return 0;
+
+rem_attr:
+       for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr)
+               sysfs_remove_file(vlan->kobj, &((*bat_attr)->attr));
+
+       kobject_put(vlan->kobj);
+       vlan->kobj = NULL;
+out:
+       return -ENOMEM;
+}
+
+/**
+ * batadv_sysfs_del_vlan - remove all the sysfs objects for a given VLAN
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vlan: the private data of the VLAN to destroy
+ */
+void batadv_sysfs_del_vlan(struct batadv_priv *bat_priv,
+                          struct batadv_softif_vlan *vlan)
+{
+       struct batadv_attribute **bat_attr;
+
+       for (bat_attr = batadv_vlan_attrs; *bat_attr; ++bat_attr)
+               sysfs_remove_file(vlan->kobj, &((*bat_attr)->attr));
+
+       kobject_put(vlan->kobj);
+       vlan->kobj = NULL;
+}
+
 static ssize_t batadv_show_mesh_iface(struct kobject *kobj,
                                      struct attribute *attr, char *buff)
 {
index 479acf4c16f47d1068698d15362099459544af79..c7d725de50ade81593c0913ab5d868b23383058c 100644 (file)
 
 #define BATADV_SYSFS_IF_MESH_SUBDIR "mesh"
 #define BATADV_SYSFS_IF_BAT_SUBDIR "batman_adv"
+/**
+ * BATADV_SYSFS_VLAN_SUBDIR_PREFIX - prefix of the subfolder that will be
+ *  created in the sysfs hierarchy for each VLAN interface. The subfolder will
+ *  be named "BATADV_SYSFS_VLAN_SUBDIR_PREFIX%vid".
+ */
+#define BATADV_SYSFS_VLAN_SUBDIR_PREFIX "vlan"
 
 struct batadv_attribute {
        struct attribute attr;
@@ -36,6 +42,10 @@ void batadv_sysfs_del_meshif(struct net_device *dev);
 int batadv_sysfs_add_hardif(struct kobject **hardif_obj,
                            struct net_device *dev);
 void batadv_sysfs_del_hardif(struct kobject **hardif_obj);
+int batadv_sysfs_add_vlan(struct net_device *dev,
+                         struct batadv_softif_vlan *vlan);
+void batadv_sysfs_del_vlan(struct batadv_priv *bat_priv,
+                          struct batadv_softif_vlan *vlan);
 int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
                        enum batadv_uev_action action, const char *data);
 
index 34510f38708fbd192ac5be8d4911e456c5d8ab50..4add57d4857f11e5ea9edfa13aae1f46dab3e4f9 100644 (file)
 #include "routing.h"
 #include "bridge_loop_avoidance.h"
 
-#include <linux/crc16.h>
+#include <linux/crc32c.h>
 
 /* hash class keys */
 static struct lock_class_key batadv_tt_local_hash_lock_class_key;
 static struct lock_class_key batadv_tt_global_hash_lock_class_key;
 
 static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
+                                unsigned short vid,
                                 struct batadv_orig_node *orig_node);
 static void batadv_tt_purge(struct work_struct *work);
 static void
@@ -41,7 +42,8 @@ batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry);
 static void batadv_tt_global_del(struct batadv_priv *bat_priv,
                                 struct batadv_orig_node *orig_node,
                                 const unsigned char *addr,
-                                const char *message, bool roaming);
+                                unsigned short vid, const char *message,
+                                bool roaming);
 
 /* returns 1 if they are the same mac addr */
 static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
@@ -52,43 +54,93 @@ static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
        return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
 }
 
+/**
+ * batadv_choose_tt - return the index of the tt entry in the hash table
+ * @data: pointer to the tt_common_entry object to map
+ * @size: the size of the hash table
+ *
+ * Returns the hash index where the object represented by 'data' should be
+ * stored at.
+ */
+static inline uint32_t batadv_choose_tt(const void *data, uint32_t size)
+{
+       struct batadv_tt_common_entry *tt;
+       uint32_t hash = 0;
+
+       tt = (struct batadv_tt_common_entry *)data;
+       hash = batadv_hash_bytes(hash, &tt->addr, ETH_ALEN);
+       hash = batadv_hash_bytes(hash, &tt->vid, sizeof(tt->vid));
+
+       hash += (hash << 3);
+       hash ^= (hash >> 11);
+       hash += (hash << 15);
+
+       return hash % size;
+}
+
+/**
+ * batadv_tt_hash_find - look for a client in the given hash table
+ * @hash: the hash table to search
+ * @addr: the mac address of the client to look for
+ * @vid: VLAN identifier
+ *
+ * Returns a pointer to the tt_common struct belonging to the searched client if
+ * found, NULL otherwise.
+ */
 static struct batadv_tt_common_entry *
-batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
+batadv_tt_hash_find(struct batadv_hashtable *hash, const uint8_t *addr,
+                   unsigned short vid)
 {
        struct hlist_head *head;
-       struct batadv_tt_common_entry *tt_common_entry;
-       struct batadv_tt_common_entry *tt_common_entry_tmp = NULL;
+       struct batadv_tt_common_entry to_search, *tt, *tt_tmp = NULL;
        uint32_t index;
 
        if (!hash)
                return NULL;
 
-       index = batadv_choose_orig(data, hash->size);
+       memcpy(to_search.addr, addr, ETH_ALEN);
+       to_search.vid = vid;
+
+       index = batadv_choose_tt(&to_search, hash->size);
        head = &hash->table[index];
 
        rcu_read_lock();
-       hlist_for_each_entry_rcu(tt_common_entry, head, hash_entry) {
-               if (!batadv_compare_eth(tt_common_entry, data))
+       hlist_for_each_entry_rcu(tt, head, hash_entry) {
+               if (!batadv_compare_eth(tt, addr))
+                       continue;
+
+               if (tt->vid != vid)
                        continue;
 
-               if (!atomic_inc_not_zero(&tt_common_entry->refcount))
+               if (!atomic_inc_not_zero(&tt->refcount))
                        continue;
 
-               tt_common_entry_tmp = tt_common_entry;
+               tt_tmp = tt;
                break;
        }
        rcu_read_unlock();
 
-       return tt_common_entry_tmp;
+       return tt_tmp;
 }
 
+/**
+ * batadv_tt_local_hash_find - search the local table for a given client
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the client to look for
+ * @vid: VLAN identifier
+ *
+ * Returns a pointer to the corresponding tt_local_entry struct if the client is
+ * found, NULL otherwise.
+ */
 static struct batadv_tt_local_entry *
-batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
+batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const uint8_t *addr,
+                         unsigned short vid)
 {
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local_entry = NULL;
 
-       tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, data);
+       tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, addr,
+                                             vid);
        if (tt_common_entry)
                tt_local_entry = container_of(tt_common_entry,
                                              struct batadv_tt_local_entry,
@@ -96,13 +148,24 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
        return tt_local_entry;
 }
 
+/**
+ * batadv_tt_global_hash_find - search the global table for a given client
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the client to look for
+ * @vid: VLAN identifier
+ *
+ * Returns a pointer to the corresponding tt_global_entry struct if the client
+ * is found, NULL otherwise.
+ */
 static struct batadv_tt_global_entry *
-batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
+batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const uint8_t *addr,
+                          unsigned short vid)
 {
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_global_entry *tt_global_entry = NULL;
 
-       tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, data);
+       tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, addr,
+                                             vid);
        if (tt_common_entry)
                tt_global_entry = container_of(tt_common_entry,
                                               struct batadv_tt_global_entry,
@@ -117,25 +180,17 @@ batadv_tt_local_entry_free_ref(struct batadv_tt_local_entry *tt_local_entry)
                kfree_rcu(tt_local_entry, common.rcu);
 }
 
-static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
-{
-       struct batadv_tt_common_entry *tt_common_entry;
-       struct batadv_tt_global_entry *tt_global_entry;
-
-       tt_common_entry = container_of(rcu, struct batadv_tt_common_entry, rcu);
-       tt_global_entry = container_of(tt_common_entry,
-                                      struct batadv_tt_global_entry, common);
-
-       kfree(tt_global_entry);
-}
-
+/**
+ * batadv_tt_global_entry_free_ref - decrement the refcounter for a
+ *  tt_global_entry and possibly free it
+ * @tt_global_entry: the object to free
+ */
 static void
 batadv_tt_global_entry_free_ref(struct batadv_tt_global_entry *tt_global_entry)
 {
        if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
                batadv_tt_global_del_orig_list(tt_global_entry);
-               call_rcu(&tt_global_entry->common.rcu,
-                        batadv_tt_global_entry_free_rcu);
+               kfree_rcu(tt_global_entry, common.rcu);
        }
 }
 
@@ -153,13 +208,107 @@ static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
        kfree(orig_entry);
 }
 
+/**
+ * batadv_tt_local_size_mod - change the size by v of the local table identified
+ *  by vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier of the sub-table to change
+ * @v: the amount to sum to the local table size
+ */
+static void batadv_tt_local_size_mod(struct batadv_priv *bat_priv,
+                                    unsigned short vid, int v)
+{
+       struct batadv_softif_vlan *vlan;
+
+       vlan = batadv_softif_vlan_get(bat_priv, vid);
+       if (!vlan)
+               return;
+
+       atomic_add(v, &vlan->tt.num_entries);
+
+       batadv_softif_vlan_free_ref(vlan);
+}
+
+/**
+ * batadv_tt_local_size_inc - increase by one the local table size for the given
+ *  vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ */
+static void batadv_tt_local_size_inc(struct batadv_priv *bat_priv,
+                                    unsigned short vid)
+{
+       batadv_tt_local_size_mod(bat_priv, vid, 1);
+}
+
+/**
+ * batadv_tt_local_size_dec - decrease by one the local table size for the given
+ *  vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ */
+static void batadv_tt_local_size_dec(struct batadv_priv *bat_priv,
+                                    unsigned short vid)
+{
+       batadv_tt_local_size_mod(bat_priv, vid, -1);
+}
+
+/**
+ * batadv_tt_global_size_mod - change the size by v of the local table
+ *  identified by vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ * @v: the amount to sum to the global table size
+ */
+static void batadv_tt_global_size_mod(struct batadv_orig_node *orig_node,
+                                     unsigned short vid, int v)
+{
+       struct batadv_orig_node_vlan *vlan;
+
+       vlan = batadv_orig_node_vlan_new(orig_node, vid);
+       if (!vlan)
+               return;
+
+       if (atomic_add_return(v, &vlan->tt.num_entries) == 0) {
+               spin_lock_bh(&orig_node->vlan_list_lock);
+               list_del_rcu(&vlan->list);
+               spin_unlock_bh(&orig_node->vlan_list_lock);
+               batadv_orig_node_vlan_free_ref(vlan);
+       }
+
+       batadv_orig_node_vlan_free_ref(vlan);
+}
+
+/**
+ * batadv_tt_global_size_inc - increase by one the global table size for the
+ *  given vid
+ * @orig_node: the originator which global table size has to be decreased
+ * @vid: the vlan identifier
+ */
+static void batadv_tt_global_size_inc(struct batadv_orig_node *orig_node,
+                                     unsigned short vid)
+{
+       batadv_tt_global_size_mod(orig_node, vid, 1);
+}
+
+/**
+ * batadv_tt_global_size_dec - decrease by one the global table size for the
+ *  given vid
+ * @orig_node: the originator which global table size has to be decreased
+ * @vid: the vlan identifier
+ */
+static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
+                                     unsigned short vid)
+{
+       batadv_tt_global_size_mod(orig_node, vid, -1);
+}
+
 static void
 batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
 {
        if (!atomic_dec_and_test(&orig_entry->refcount))
                return;
-       /* to avoid race conditions, immediately decrease the tt counter */
-       atomic_dec(&orig_entry->orig_node->tt_size);
+
        call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
 }
 
@@ -180,12 +329,13 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
        bool del_op_requested, del_op_entry;
 
        tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
-
        if (!tt_change_node)
                return;
 
        tt_change_node->change.flags = flags;
+       tt_change_node->change.reserved = 0;
        memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN);
+       tt_change_node->change.vid = htons(common->vid);
 
        del_op_requested = flags & BATADV_TT_CLIENT_DEL;
 
@@ -208,6 +358,13 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
                        goto del;
                if (del_op_requested && !del_op_entry)
                        goto del;
+
+               /* this is a second add in the same originator interval. It
+                * means that flags have been changed: update them!
+                */
+               if (!del_op_requested && !del_op_entry)
+                       entry->change.flags = flags;
+
                continue;
 del:
                list_del(&entry->list);
@@ -229,9 +386,55 @@ unlock:
                atomic_inc(&bat_priv->tt.local_changes);
 }
 
-int batadv_tt_len(int changes_num)
+/**
+ * batadv_tt_len - compute length in bytes of given number of tt changes
+ * @changes_num: number of tt changes
+ *
+ * Returns computed length in bytes.
+ */
+static int batadv_tt_len(int changes_num)
+{
+       return changes_num * sizeof(struct batadv_tvlv_tt_change);
+}
+
+/**
+ * batadv_tt_entries - compute the number of entries fitting in tt_len bytes
+ * @tt_len: available space
+ *
+ * Returns the number of entries.
+ */
+static uint16_t batadv_tt_entries(uint16_t tt_len)
+{
+       return tt_len / batadv_tt_len(1);
+}
+
+/**
+ * batadv_tt_local_table_transmit_size - calculates the local translation table
+ *  size when transmitted over the air
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Returns local translation table size in bytes.
+ */
+static int batadv_tt_local_table_transmit_size(struct batadv_priv *bat_priv)
 {
-       return changes_num * sizeof(struct batadv_tt_change);
+       uint16_t num_vlan = 0, tt_local_entries = 0;
+       struct batadv_softif_vlan *vlan;
+       int hdr_size;
+
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+               num_vlan++;
+               tt_local_entries += atomic_read(&vlan->tt.num_entries);
+       }
+       rcu_read_unlock();
+
+       /* header size of tvlv encapsulated tt response payload */
+       hdr_size = sizeof(struct batadv_unicast_tvlv_packet);
+       hdr_size += sizeof(struct batadv_tvlv_hdr);
+       hdr_size += sizeof(struct batadv_tvlv_tt_data);
+       hdr_size += num_vlan * sizeof(struct batadv_tvlv_tt_vlan_data);
+
+       return hdr_size + batadv_tt_len(tt_local_entries);
 }
 
 static int batadv_tt_local_init(struct batadv_priv *bat_priv)
@@ -255,33 +458,51 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
                                  const char *message)
 {
        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                  "Deleting global tt entry %pM: %s\n",
-                  tt_global->common.addr, message);
+                  "Deleting global tt entry %pM (vid: %d): %s\n",
+                  tt_global->common.addr,
+                  BATADV_PRINT_VID(tt_global->common.vid), message);
 
        batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
-                          batadv_choose_orig, tt_global->common.addr);
+                          batadv_choose_tt, &tt_global->common);
        batadv_tt_global_entry_free_ref(tt_global);
 }
 
-void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
-                        int ifindex)
+/**
+ * batadv_tt_local_add - add a new client to the local table or update an
+ *  existing client
+ * @soft_iface: netdev struct of the mesh interface
+ * @addr: the mac address of the client to add
+ * @vid: VLAN identifier
+ * @ifindex: index of the interface where the client is connected to (useful to
+ *  identify wireless clients)
+ *
+ * Returns true if the client was successfully added, false otherwise.
+ */
+bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
+                        unsigned short vid, int ifindex)
 {
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
        struct batadv_tt_local_entry *tt_local;
        struct batadv_tt_global_entry *tt_global;
+       struct net_device *in_dev = NULL;
        struct hlist_head *head;
        struct batadv_tt_orig_list_entry *orig_entry;
-       int hash_added;
-       bool roamed_back = false;
+       int hash_added, table_size, packet_size_max;
+       bool ret = false, roamed_back = false;
+       uint8_t remote_flags;
 
-       tt_local = batadv_tt_local_hash_find(bat_priv, addr);
-       tt_global = batadv_tt_global_hash_find(bat_priv, addr);
+       if (ifindex != BATADV_NULL_IFINDEX)
+               in_dev = dev_get_by_index(&init_net, ifindex);
+
+       tt_local = batadv_tt_local_hash_find(bat_priv, addr, vid);
+       tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid);
 
        if (tt_local) {
                tt_local->last_seen = jiffies;
                if (tt_local->common.flags & BATADV_TT_CLIENT_PENDING) {
                        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                                  "Re-adding pending client %pM\n", addr);
+                                  "Re-adding pending client %pM (vid: %d)\n",
+                                  addr, BATADV_PRINT_VID(vid));
                        /* whatever the reason why the PENDING flag was set,
                         * this is a client which was enqueued to be removed in
                         * this orig_interval. Since it popped up again, the
@@ -293,8 +514,8 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
 
                if (tt_local->common.flags & BATADV_TT_CLIENT_ROAM) {
                        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                                  "Roaming client %pM came back to its original location\n",
-                                  addr);
+                                  "Roaming client %pM (vid: %d) came back to its original location\n",
+                                  addr, BATADV_PRINT_VID(vid));
                        /* the ROAM flag is set because this client roamed away
                         * and the node got a roaming_advertisement message. Now
                         * that the client popped up again at its original
@@ -306,12 +527,24 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
                goto check_roaming;
        }
 
+       /* Ignore the client if we cannot send it in a full table response. */
+       table_size = batadv_tt_local_table_transmit_size(bat_priv);
+       table_size += batadv_tt_len(1);
+       packet_size_max = atomic_read(&bat_priv->packet_size_max);
+       if (table_size > packet_size_max) {
+               net_ratelimited_function(batadv_info, soft_iface,
+                                        "Local translation table size (%i) exceeds maximum packet size (%i); Ignoring new local tt entry: %pM\n",
+                                        table_size, packet_size_max, addr);
+               goto out;
+       }
+
        tt_local = kmalloc(sizeof(*tt_local), GFP_ATOMIC);
        if (!tt_local)
                goto out;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                  "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
+                  "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
+                  addr, BATADV_PRINT_VID(vid),
                   (uint8_t)atomic_read(&bat_priv->tt.vn));
 
        memcpy(tt_local->common.addr, addr, ETH_ALEN);
@@ -320,7 +553,8 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
         * (consistency check)
         */
        tt_local->common.flags = BATADV_TT_CLIENT_NEW;
-       if (batadv_is_wifi_iface(ifindex))
+       tt_local->common.vid = vid;
+       if (batadv_is_wifi_netdev(in_dev))
                tt_local->common.flags |= BATADV_TT_CLIENT_WIFI;
        atomic_set(&tt_local->common.refcount, 2);
        tt_local->last_seen = jiffies;
@@ -331,7 +565,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
                tt_local->common.flags |= BATADV_TT_CLIENT_NOPURGE;
 
        hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
-                                    batadv_choose_orig, &tt_local->common,
+                                    batadv_choose_tt, &tt_local->common,
                                     &tt_local->common.hash_entry);
 
        if (unlikely(hash_added != 0)) {
@@ -353,6 +587,7 @@ check_roaming:
                rcu_read_lock();
                hlist_for_each_entry_rcu(orig_entry, head, list) {
                        batadv_send_roam_adv(bat_priv, tt_global->common.addr,
+                                            tt_global->common.vid,
                                             orig_entry->orig_node);
                }
                rcu_read_unlock();
@@ -369,78 +604,219 @@ check_roaming:
                }
        }
 
+       /* store the current remote flags before altering them. This helps
+        * understanding is flags are changing or not
+        */
+       remote_flags = tt_local->common.flags & BATADV_TT_REMOTE_MASK;
+
+       if (batadv_is_wifi_netdev(in_dev))
+               tt_local->common.flags |= BATADV_TT_CLIENT_WIFI;
+       else
+               tt_local->common.flags &= ~BATADV_TT_CLIENT_WIFI;
+
+       /* if any "dynamic" flag has been modified, resend an ADD event for this
+        * entry so that all the nodes can get the new flags
+        */
+       if (remote_flags ^ (tt_local->common.flags & BATADV_TT_REMOTE_MASK))
+               batadv_tt_local_event(bat_priv, tt_local, BATADV_NO_FLAGS);
+
+       ret = true;
 out:
+       if (in_dev)
+               dev_put(in_dev);
        if (tt_local)
                batadv_tt_local_entry_free_ref(tt_local);
        if (tt_global)
                batadv_tt_global_entry_free_ref(tt_global);
+       return ret;
 }
 
-static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff,
-                                         int *packet_buff_len,
-                                         int min_packet_len,
-                                         int new_packet_len)
+/**
+ * batadv_tt_prepare_tvlv_global_data - prepare the TVLV TT header to send
+ *  within a TT Response directed to another node
+ * @orig_node: originator for which the TT data has to be prepared
+ * @tt_data: uninitialised pointer to the address of the TVLV buffer
+ * @tt_change: uninitialised pointer to the address of the area where the TT
+ *  changed can be stored
+ * @tt_len: pointer to the length to reserve to the tt_change. if -1 this
+ *  function reserves the amount of space needed to send the entire global TT
+ *  table. In case of success the value is updated with the real amount of
+ *  reserved bytes
+
+ * Allocate the needed amount of memory for the entire TT TVLV and write its
+ * header made up by one tvlv_tt_data object and a series of tvlv_tt_vlan_data
+ * objects, one per active VLAN served by the originator node.
+ *
+ * Return the size of the allocated buffer or 0 in case of failure.
+ */
+static uint16_t
+batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
+                                  struct batadv_tvlv_tt_data **tt_data,
+                                  struct batadv_tvlv_tt_change **tt_change,
+                                  int32_t *tt_len)
 {
-       unsigned char *new_buff;
+       uint16_t num_vlan = 0, num_entries = 0, change_offset, tvlv_len;
+       struct batadv_tvlv_tt_vlan_data *tt_vlan;
+       struct batadv_orig_node_vlan *vlan;
+       uint8_t *tt_change_ptr;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+               num_vlan++;
+               num_entries += atomic_read(&vlan->tt.num_entries);
+       }
+
+       change_offset = sizeof(**tt_data);
+       change_offset += num_vlan * sizeof(*tt_vlan);
 
-       new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
+       /* if tt_len is negative, allocate the space needed by the full table */
+       if (*tt_len < 0)
+               *tt_len = batadv_tt_len(num_entries);
 
-       /* keep old buffer if kmalloc should fail */
-       if (new_buff) {
-               memcpy(new_buff, *packet_buff, min_packet_len);
-               kfree(*packet_buff);
-               *packet_buff = new_buff;
-               *packet_buff_len = new_packet_len;
+       tvlv_len = *tt_len;
+       tvlv_len += change_offset;
+
+       *tt_data = kmalloc(tvlv_len, GFP_ATOMIC);
+       if (!*tt_data) {
+               *tt_len = 0;
+               goto out;
+       }
+
+       (*tt_data)->flags = BATADV_NO_FLAGS;
+       (*tt_data)->ttvn = atomic_read(&orig_node->last_ttvn);
+       (*tt_data)->num_vlan = htons(num_vlan);
+
+       tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
+       list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+               tt_vlan->vid = htons(vlan->vid);
+               tt_vlan->crc = htonl(vlan->tt.crc);
+
+               tt_vlan++;
        }
+
+       tt_change_ptr = (uint8_t *)*tt_data + change_offset;
+       *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
+
+out:
+       rcu_read_unlock();
+       return tvlv_len;
 }
 
-static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv,
-                                         unsigned char **packet_buff,
-                                         int *packet_buff_len,
-                                         int min_packet_len)
-{
-       int req_len;
+/**
+ * batadv_tt_prepare_tvlv_local_data - allocate and prepare the TT TVLV for this
+ *  node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: uninitialised pointer to the address of the TVLV buffer
+ * @tt_change: uninitialised pointer to the address of the area where the TT
+ *  changes can be stored
+ * @tt_len: pointer to the length to reserve to the tt_change. if -1 this
+ *  function reserves the amount of space needed to send the entire local TT
+ *  table. In case of success the value is updated with the real amount of
+ *  reserved bytes
+ *
+ * Allocate the needed amount of memory for the entire TT TVLV and write its
+ * header made up by one tvlv_tt_data object and a series of tvlv_tt_vlan_data
+ * objects, one per active VLAN.
+ *
+ * Return the size of the allocated buffer or 0 in case of failure.
+ */
+static uint16_t
+batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
+                                 struct batadv_tvlv_tt_data **tt_data,
+                                 struct batadv_tvlv_tt_change **tt_change,
+                                 int32_t *tt_len)
+{
+       struct batadv_tvlv_tt_vlan_data *tt_vlan;
+       struct batadv_softif_vlan *vlan;
+       uint16_t num_vlan = 0, num_entries = 0, tvlv_len;
+       uint8_t *tt_change_ptr;
+       int change_offset;
 
-       req_len = min_packet_len;
-       req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes));
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+               num_vlan++;
+               num_entries += atomic_read(&vlan->tt.num_entries);
+       }
 
-       /* if we have too many changes for one packet don't send any
-        * and wait for the tt table request which will be fragmented
-        */
-       if (req_len > bat_priv->soft_iface->mtu)
-               req_len = min_packet_len;
+       change_offset = sizeof(**tt_data);
+       change_offset += num_vlan * sizeof(*tt_vlan);
+
+       /* if tt_len is negative, allocate the space needed by the full table */
+       if (*tt_len < 0)
+               *tt_len = batadv_tt_len(num_entries);
+
+       tvlv_len = *tt_len;
+       tvlv_len += change_offset;
+
+       *tt_data = kmalloc(tvlv_len, GFP_ATOMIC);
+       if (!*tt_data) {
+               tvlv_len = 0;
+               goto out;
+       }
+
+       (*tt_data)->flags = BATADV_NO_FLAGS;
+       (*tt_data)->ttvn = atomic_read(&bat_priv->tt.vn);
+       (*tt_data)->num_vlan = htons(num_vlan);
+
+       tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
+       hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+               tt_vlan->vid = htons(vlan->vid);
+               tt_vlan->crc = htonl(vlan->tt.crc);
 
-       batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
-                                     min_packet_len, req_len);
+               tt_vlan++;
+       }
+
+       tt_change_ptr = (uint8_t *)*tt_data + change_offset;
+       *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
+
+out:
+       rcu_read_unlock();
+       return tvlv_len;
 }
 
-static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
-                                      unsigned char **packet_buff,
-                                      int *packet_buff_len,
-                                      int min_packet_len)
+/**
+ * batadv_tt_tvlv_container_update - update the translation table tvlv container
+ *  after local tt changes have been committed
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_change_node *entry, *safe;
-       int count = 0, tot_changes = 0, new_len;
-       unsigned char *tt_buff;
+       struct batadv_tvlv_tt_data *tt_data;
+       struct batadv_tvlv_tt_change *tt_change;
+       int tt_diff_len, tt_change_len = 0;
+       int tt_diff_entries_num = 0, tt_diff_entries_count = 0;
+       uint16_t tvlv_len;
+
+       tt_diff_entries_num = atomic_read(&bat_priv->tt.local_changes);
+       tt_diff_len = batadv_tt_len(tt_diff_entries_num);
+
+       /* if we have too many changes for one packet don't send any
+        * and wait for the tt table request which will be fragmented
+        */
+       if (tt_diff_len > bat_priv->soft_iface->mtu)
+               tt_diff_len = 0;
 
-       batadv_tt_prepare_packet_buff(bat_priv, packet_buff,
-                                     packet_buff_len, min_packet_len);
+       tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv, &tt_data,
+                                                    &tt_change, &tt_diff_len);
+       if (!tvlv_len)
+               return;
 
-       new_len = *packet_buff_len - min_packet_len;
-       tt_buff = *packet_buff + min_packet_len;
+       tt_data->flags = BATADV_TT_OGM_DIFF;
 
-       if (new_len > 0)
-               tot_changes = new_len / batadv_tt_len(1);
+       if (tt_diff_len == 0)
+               goto container_register;
 
        spin_lock_bh(&bat_priv->tt.changes_list_lock);
        atomic_set(&bat_priv->tt.local_changes, 0);
 
        list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
                                 list) {
-               if (count < tot_changes) {
-                       memcpy(tt_buff + batadv_tt_len(count),
-                              &entry->change, sizeof(struct batadv_tt_change));
-                       count++;
+               if (tt_diff_entries_count < tt_diff_entries_num) {
+                       memcpy(tt_change + tt_diff_entries_count,
+                              &entry->change,
+                              sizeof(struct batadv_tvlv_tt_change));
+                       tt_diff_entries_count++;
                }
                list_del(&entry->list);
                kfree(entry);
@@ -452,20 +828,25 @@ static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
        kfree(bat_priv->tt.last_changeset);
        bat_priv->tt.last_changeset_len = 0;
        bat_priv->tt.last_changeset = NULL;
+       tt_change_len = batadv_tt_len(tt_diff_entries_count);
        /* check whether this new OGM has no changes due to size problems */
-       if (new_len > 0) {
+       if (tt_diff_entries_count > 0) {
                /* if kmalloc() fails we will reply with the full table
                 * instead of providing the diff
                 */
-               bat_priv->tt.last_changeset = kmalloc(new_len, GFP_ATOMIC);
+               bat_priv->tt.last_changeset = kzalloc(tt_diff_len, GFP_ATOMIC);
                if (bat_priv->tt.last_changeset) {
-                       memcpy(bat_priv->tt.last_changeset, tt_buff, new_len);
-                       bat_priv->tt.last_changeset_len = new_len;
+                       memcpy(bat_priv->tt.last_changeset,
+                              tt_change, tt_change_len);
+                       bat_priv->tt.last_changeset_len = tt_diff_len;
                }
        }
        spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
 
-       return count;
+container_register:
+       batadv_tvlv_container_register(bat_priv, BATADV_TVLV_TT, 1, tt_data,
+                                      tvlv_len);
+       kfree(tt_data);
 }
 
 int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
@@ -476,7 +857,9 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local;
        struct batadv_hard_iface *primary_if;
+       struct batadv_softif_vlan *vlan;
        struct hlist_head *head;
+       unsigned short vid;
        uint32_t i;
        int last_seen_secs;
        int last_seen_msecs;
@@ -489,11 +872,10 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
                goto out;
 
        seq_printf(seq,
-                  "Locally retrieved addresses (from %s) announced via TT (TTVN: %u CRC: %#.4x):\n",
-                  net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn),
-                  bat_priv->tt.local_crc);
-       seq_printf(seq, "       %-13s %-7s %-10s\n", "Client", "Flags",
-                  "Last seen");
+                  "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
+                  net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn));
+       seq_printf(seq, "       %-13s  %s %-7s %-9s (%-10s)\n", "Client", "VID",
+                  "Flags", "Last seen", "CRC");
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -504,6 +886,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
                        tt_local = container_of(tt_common_entry,
                                                struct batadv_tt_local_entry,
                                                common);
+                       vid = tt_common_entry->vid;
                        last_seen_jiffies = jiffies - tt_local->last_seen;
                        last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
                        last_seen_secs = last_seen_msecs / 1000;
@@ -511,8 +894,17 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
 
                        no_purge = tt_common_entry->flags & np_flag;
 
-                       seq_printf(seq, " * %pM [%c%c%c%c%c] %3u.%03u\n",
+                       vlan = batadv_softif_vlan_get(bat_priv, vid);
+                       if (!vlan) {
+                               seq_printf(seq, "Cannot retrieve VLAN %d\n",
+                                          BATADV_PRINT_VID(vid));
+                               continue;
+                       }
+
+                       seq_printf(seq,
+                                  " * %pM %4i [%c%c%c%c%c] %3u.%03u   (%#.8x)\n",
                                   tt_common_entry->addr,
+                                  BATADV_PRINT_VID(tt_common_entry->vid),
                                   (tt_common_entry->flags &
                                    BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
                                   no_purge ? 'P' : '.',
@@ -523,7 +915,10 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
                                   (tt_common_entry->flags &
                                    BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
                                   no_purge ? 0 : last_seen_secs,
-                                  no_purge ? 0 : last_seen_msecs);
+                                  no_purge ? 0 : last_seen_msecs,
+                                  vlan->tt.crc);
+
+                       batadv_softif_vlan_free_ref(vlan);
                }
                rcu_read_unlock();
        }
@@ -547,27 +942,29 @@ batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
        tt_local_entry->common.flags |= BATADV_TT_CLIENT_PENDING;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                  "Local tt entry (%pM) pending to be removed: %s\n",
-                  tt_local_entry->common.addr, message);
+                  "Local tt entry (%pM, vid: %d) pending to be removed: %s\n",
+                  tt_local_entry->common.addr,
+                  BATADV_PRINT_VID(tt_local_entry->common.vid), message);
 }
 
 /**
  * batadv_tt_local_remove - logically remove an entry from the local table
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the MAC address of the client to remove
+ * @vid: VLAN identifier
  * @message: message to append to the log on deletion
  * @roaming: true if the deletion is due to a roaming event
  *
  * Returns the flags assigned to the local entry before being deleted
  */
 uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
-                               const uint8_t *addr, const char *message,
-                               bool roaming)
+                               const uint8_t *addr, unsigned short vid,
+                               const char *message, bool roaming)
 {
        struct batadv_tt_local_entry *tt_local_entry;
        uint16_t flags, curr_flags = BATADV_NO_FLAGS;
 
-       tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
+       tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
        if (!tt_local_entry)
                goto out;
 
@@ -603,8 +1000,16 @@ out:
        return curr_flags;
 }
 
+/**
+ * batadv_tt_local_purge_list - purge inactive tt local entries
+ * @bat_priv: the bat priv with all the soft interface information
+ * @head: pointer to the list containing the local tt entries
+ * @timeout: parameter deciding whether a given tt local entry is considered
+ *  inactive or not
+ */
 static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
-                                      struct hlist_head *head)
+                                      struct hlist_head *head,
+                                      int timeout)
 {
        struct batadv_tt_local_entry *tt_local_entry;
        struct batadv_tt_common_entry *tt_common_entry;
@@ -622,8 +1027,7 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
                if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
                        continue;
 
-               if (!batadv_has_timed_out(tt_local_entry->last_seen,
-                                         BATADV_TT_LOCAL_TIMEOUT))
+               if (!batadv_has_timed_out(tt_local_entry->last_seen, timeout))
                        continue;
 
                batadv_tt_local_set_pending(bat_priv, tt_local_entry,
@@ -631,7 +1035,14 @@ static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
        }
 }
 
-static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
+/**
+ * batadv_tt_local_purge - purge inactive tt local entries
+ * @bat_priv: the bat priv with all the soft interface information
+ * @timeout: parameter deciding whether a given tt local entry is considered
+ *  inactive or not
+ */
+static void batadv_tt_local_purge(struct batadv_priv *bat_priv,
+                                 int timeout)
 {
        struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct hlist_head *head;
@@ -643,7 +1054,7 @@ static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
                list_lock = &hash->list_locks[i];
 
                spin_lock_bh(list_lock);
-               batadv_tt_local_purge_list(bat_priv, head);
+               batadv_tt_local_purge_list(bat_priv, head, timeout);
                spin_unlock_bh(list_lock);
        }
 }
@@ -784,7 +1195,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
 
        INIT_HLIST_NODE(&orig_entry->list);
        atomic_inc(&orig_node->refcount);
-       atomic_inc(&orig_node->tt_size);
+       batadv_tt_global_size_inc(orig_node, tt_global->common.vid);
        orig_entry->orig_node = orig_node;
        orig_entry->ttvn = ttvn;
        atomic_set(&orig_entry->refcount, 2);
@@ -803,6 +1214,7 @@ out:
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: the originator announcing the client
  * @tt_addr: the mac address of the non-mesh client
+ * @vid: VLAN identifier
  * @flags: TT flags that have to be set for this non-mesh client
  * @ttvn: the tt version number ever announcing this non-mesh client
  *
@@ -813,21 +1225,28 @@ out:
  * If a TT local entry exists for this non-mesh client remove it.
  *
  * The caller must hold orig_node refcount.
+ *
+ * Return true if the new entry has been added, false otherwise
  */
-int batadv_tt_global_add(struct batadv_priv *bat_priv,
-                        struct batadv_orig_node *orig_node,
-                        const unsigned char *tt_addr, uint16_t flags,
-                        uint8_t ttvn)
+static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
+                                struct batadv_orig_node *orig_node,
+                                const unsigned char *tt_addr,
+                                unsigned short vid, uint16_t flags,
+                                uint8_t ttvn)
 {
        struct batadv_tt_global_entry *tt_global_entry;
        struct batadv_tt_local_entry *tt_local_entry;
-       int ret = 0;
+       bool ret = false;
        int hash_added;
        struct batadv_tt_common_entry *common;
        uint16_t local_flags;
 
-       tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr);
-       tt_local_entry = batadv_tt_local_hash_find(bat_priv, tt_addr);
+       /* ignore global entries from backbone nodes */
+       if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid))
+               return true;
+
+       tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr, vid);
+       tt_local_entry = batadv_tt_local_hash_find(bat_priv, tt_addr, vid);
 
        /* if the node already has a local client for this entry, it has to wait
         * for a roaming advertisement instead of manually messing up the global
@@ -844,6 +1263,7 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
 
                common = &tt_global_entry->common;
                memcpy(common->addr, tt_addr, ETH_ALEN);
+               common->vid = vid;
 
                common->flags = flags;
                tt_global_entry->roam_at = 0;
@@ -861,7 +1281,7 @@ int batadv_tt_global_add(struct batadv_priv *bat_priv,
 
                hash_added = batadv_hash_add(bat_priv->tt.global_hash,
                                             batadv_compare_tt,
-                                            batadv_choose_orig, common,
+                                            batadv_choose_tt, common,
                                             &common->hash_entry);
 
                if (unlikely(hash_added != 0)) {
@@ -920,14 +1340,15 @@ add_orig_entry:
        batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                  "Creating new global tt entry: %pM (via %pM)\n",
-                  common->addr, orig_node->orig);
-       ret = 1;
+                  "Creating new global tt entry: %pM (vid: %d, via %pM)\n",
+                  common->addr, BATADV_PRINT_VID(common->vid),
+                  orig_node->orig);
+       ret = true;
 
 out_remove:
 
        /* remove address from local hash if present */
-       local_flags = batadv_tt_local_remove(bat_priv, tt_addr,
+       local_flags = batadv_tt_local_remove(bat_priv, tt_addr, vid,
                                             "global tt received",
                                             flags & BATADV_TT_CLIENT_ROAM);
        tt_global_entry->common.flags |= local_flags & BATADV_TT_CLIENT_WIFI;
@@ -947,18 +1368,20 @@ out:
 }
 
 /* batadv_transtable_best_orig - Get best originator list entry from tt entry
+ * @bat_priv: the bat priv with all the soft interface information
  * @tt_global_entry: global translation table entry to be analyzed
  *
  * This functon assumes the caller holds rcu_read_lock().
  * Returns best originator list entry or NULL on errors.
  */
 static struct batadv_tt_orig_list_entry *
-batadv_transtable_best_orig(struct batadv_tt_global_entry *tt_global_entry)
+batadv_transtable_best_orig(struct batadv_priv *bat_priv,
+                           struct batadv_tt_global_entry *tt_global_entry)
 {
-       struct batadv_neigh_node *router = NULL;
+       struct batadv_neigh_node *router, *best_router = NULL;
+       struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
        struct hlist_head *head;
        struct batadv_tt_orig_list_entry *orig_entry, *best_entry = NULL;
-       int best_tq = 0;
 
        head = &tt_global_entry->orig_list;
        hlist_for_each_entry_rcu(orig_entry, head, list) {
@@ -966,64 +1389,104 @@ batadv_transtable_best_orig(struct batadv_tt_global_entry *tt_global_entry)
                if (!router)
                        continue;
 
-               if (router->tq_avg > best_tq) {
-                       best_entry = orig_entry;
-                       best_tq = router->tq_avg;
+               if (best_router &&
+                   bao->bat_neigh_cmp(router, best_router) <= 0) {
+                       batadv_neigh_node_free_ref(router);
+                       continue;
                }
 
-               batadv_neigh_node_free_ref(router);
+               /* release the refcount for the "old" best */
+               if (best_router)
+                       batadv_neigh_node_free_ref(best_router);
+
+               best_entry = orig_entry;
+               best_router = router;
        }
 
+       if (best_router)
+               batadv_neigh_node_free_ref(best_router);
+
        return best_entry;
 }
 
 /* batadv_tt_global_print_entry - print all orig nodes who announce the address
  * for this global entry
+ * @bat_priv: the bat priv with all the soft interface information
  * @tt_global_entry: global translation table entry to be printed
  * @seq: debugfs table seq_file struct
  *
  * This functon assumes the caller holds rcu_read_lock().
  */
 static void
-batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
+batadv_tt_global_print_entry(struct batadv_priv *bat_priv,
+                            struct batadv_tt_global_entry *tt_global_entry,
                             struct seq_file *seq)
 {
-       struct hlist_head *head;
        struct batadv_tt_orig_list_entry *orig_entry, *best_entry;
        struct batadv_tt_common_entry *tt_common_entry;
-       uint16_t flags;
+       struct batadv_orig_node_vlan *vlan;
+       struct hlist_head *head;
        uint8_t last_ttvn;
+       uint16_t flags;
 
        tt_common_entry = &tt_global_entry->common;
        flags = tt_common_entry->flags;
 
-       best_entry = batadv_transtable_best_orig(tt_global_entry);
+       best_entry = batadv_transtable_best_orig(bat_priv, tt_global_entry);
        if (best_entry) {
+               vlan = batadv_orig_node_vlan_get(best_entry->orig_node,
+                                                tt_common_entry->vid);
+               if (!vlan) {
+                       seq_printf(seq,
+                                  " * Cannot retrieve VLAN %d for originator %pM\n",
+                                  BATADV_PRINT_VID(tt_common_entry->vid),
+                                  best_entry->orig_node->orig);
+                       goto print_list;
+               }
+
                last_ttvn = atomic_read(&best_entry->orig_node->last_ttvn);
                seq_printf(seq,
-                          " %c %pM  (%3u) via %pM     (%3u)   (%#.4x) [%c%c%c]\n",
+                          " %c %pM %4i   (%3u) via %pM     (%3u)   (%#.8x) [%c%c%c]\n",
                           '*', tt_global_entry->common.addr,
+                          BATADV_PRINT_VID(tt_global_entry->common.vid),
                           best_entry->ttvn, best_entry->orig_node->orig,
-                          last_ttvn, best_entry->orig_node->tt_crc,
+                          last_ttvn, vlan->tt.crc,
                           (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
                           (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
                           (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
+
+               batadv_orig_node_vlan_free_ref(vlan);
        }
 
+print_list:
        head = &tt_global_entry->orig_list;
 
        hlist_for_each_entry_rcu(orig_entry, head, list) {
                if (best_entry == orig_entry)
                        continue;
 
+               vlan = batadv_orig_node_vlan_get(orig_entry->orig_node,
+                                                tt_common_entry->vid);
+               if (!vlan) {
+                       seq_printf(seq,
+                                  " + Cannot retrieve VLAN %d for originator %pM\n",
+                                  BATADV_PRINT_VID(tt_common_entry->vid),
+                                  orig_entry->orig_node->orig);
+                       continue;
+               }
+
                last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
-               seq_printf(seq, " %c %pM  (%3u) via %pM     (%3u)   [%c%c%c]\n",
+               seq_printf(seq,
+                          " %c %pM %4d   (%3u) via %pM     (%3u)   (%#.8x) [%c%c%c]\n",
                           '+', tt_global_entry->common.addr,
+                          BATADV_PRINT_VID(tt_global_entry->common.vid),
                           orig_entry->ttvn, orig_entry->orig_node->orig,
-                          last_ttvn,
+                          last_ttvn, vlan->tt.crc,
                           (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
                           (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
                           (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
+
+               batadv_orig_node_vlan_free_ref(vlan);
        }
 }
 
@@ -1045,9 +1508,9 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
        seq_printf(seq,
                   "Globally announced TT entries received via the mesh %s\n",
                   net_dev->name);
-       seq_printf(seq, "       %-13s %s       %-15s %s (%-6s) %s\n",
-                  "Client", "(TTVN)", "Originator", "(Curr TTVN)", "CRC",
-                  "Flags");
+       seq_printf(seq, "       %-13s  %s  %s       %-15s %s (%-10s) %s\n",
+                  "Client", "VID", "(TTVN)", "Originator", "(Curr TTVN)",
+                  "CRC", "Flags");
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -1058,7 +1521,7 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
                        tt_global = container_of(tt_common_entry,
                                                 struct batadv_tt_global_entry,
                                                 common);
-                       batadv_tt_global_print_entry(tt_global, seq);
+                       batadv_tt_global_print_entry(bat_priv, tt_global, seq);
                }
                rcu_read_unlock();
        }
@@ -1080,6 +1543,8 @@ batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
        head = &tt_global_entry->orig_list;
        hlist_for_each_entry_safe(orig_entry, safe, head, list) {
                hlist_del_rcu(&orig_entry->list);
+               batadv_tt_global_size_dec(orig_entry->orig_node,
+                                         tt_global_entry->common.vid);
                batadv_tt_orig_list_entry_free_ref(orig_entry);
        }
        spin_unlock_bh(&tt_global_entry->list_lock);
@@ -1094,16 +1559,21 @@ batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv,
        struct hlist_head *head;
        struct hlist_node *safe;
        struct batadv_tt_orig_list_entry *orig_entry;
+       unsigned short vid;
 
        spin_lock_bh(&tt_global_entry->list_lock);
        head = &tt_global_entry->orig_list;
        hlist_for_each_entry_safe(orig_entry, safe, head, list) {
                if (orig_entry->orig_node == orig_node) {
+                       vid = tt_global_entry->common.vid;
                        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                                  "Deleting %pM from global tt entry %pM: %s\n",
+                                  "Deleting %pM from global tt entry %pM (vid: %d): %s\n",
                                   orig_node->orig,
-                                  tt_global_entry->common.addr, message);
+                                  tt_global_entry->common.addr,
+                                  BATADV_PRINT_VID(vid), message);
                        hlist_del_rcu(&orig_entry->list);
+                       batadv_tt_global_size_dec(orig_node,
+                                                 tt_global_entry->common.vid);
                        batadv_tt_orig_list_entry_free_ref(orig_entry);
                }
        }
@@ -1150,17 +1620,25 @@ batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
                                                orig_node, message);
 }
 
-
-
+/**
+ * batadv_tt_global_del - remove a client from the global table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: an originator serving this client
+ * @addr: the mac address of the client
+ * @vid: VLAN identifier
+ * @message: a message explaining the reason for deleting the client to print
+ *  for debugging purpose
+ * @roaming: true if the deletion has been triggered by a roaming event
+ */
 static void batadv_tt_global_del(struct batadv_priv *bat_priv,
                                 struct batadv_orig_node *orig_node,
-                                const unsigned char *addr,
+                                const unsigned char *addr, unsigned short vid,
                                 const char *message, bool roaming)
 {
        struct batadv_tt_global_entry *tt_global_entry;
        struct batadv_tt_local_entry *local_entry = NULL;
 
-       tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
+       tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
        if (!tt_global_entry)
                goto out;
 
@@ -1189,7 +1667,8 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
         *    the global entry, since it is useless now.
         */
        local_entry = batadv_tt_local_hash_find(bat_priv,
-                                               tt_global_entry->common.addr);
+                                               tt_global_entry->common.addr,
+                                               vid);
        if (local_entry) {
                /* local entry exists, case 2: client roamed to us. */
                batadv_tt_global_del_orig_list(tt_global_entry);
@@ -1207,8 +1686,18 @@ out:
                batadv_tt_local_entry_free_ref(local_entry);
 }
 
+/**
+ * batadv_tt_global_del_orig - remove all the TT global entries belonging to the
+ *  given originator matching the provided vid
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: the originator owning the entries to remove
+ * @match_vid: the VLAN identifier to match. If negative all the entries will be
+ *  removed
+ * @message: debug message to print as "reason"
+ */
 void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
                               struct batadv_orig_node *orig_node,
+                              int32_t match_vid,
                               const char *message)
 {
        struct batadv_tt_global_entry *tt_global;
@@ -1218,6 +1707,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
        struct hlist_node *safe;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
+       unsigned short vid;
 
        if (!hash)
                return;
@@ -1229,6 +1719,10 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
                spin_lock_bh(list_lock);
                hlist_for_each_entry_safe(tt_common_entry, safe,
                                          head, hash_entry) {
+                       /* remove only matching entries */
+                       if (match_vid >= 0 && tt_common_entry->vid != match_vid)
+                               continue;
+
                        tt_global = container_of(tt_common_entry,
                                                 struct batadv_tt_global_entry,
                                                 common);
@@ -1237,9 +1731,11 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
                                                        orig_node, message);
 
                        if (hlist_empty(&tt_global->orig_list)) {
+                               vid = tt_global->common.vid;
                                batadv_dbg(BATADV_DBG_TT, bat_priv,
-                                          "Deleting global tt entry %pM: %s\n",
-                                          tt_global->common.addr, message);
+                                          "Deleting global tt entry %pM (vid: %d): %s\n",
+                                          tt_global->common.addr,
+                                          BATADV_PRINT_VID(vid), message);
                                hlist_del_rcu(&tt_common_entry->hash_entry);
                                batadv_tt_global_entry_free_ref(tt_global);
                        }
@@ -1297,8 +1793,10 @@ static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
                                continue;
 
                        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                                  "Deleting global tt entry (%pM): %s\n",
-                                  tt_global->common.addr, msg);
+                                  "Deleting global tt entry %pM (vid: %d): %s\n",
+                                  tt_global->common.addr,
+                                  BATADV_PRINT_VID(tt_global->common.vid),
+                                  msg);
 
                        hlist_del_rcu(&tt_common->hash_entry);
 
@@ -1357,23 +1855,49 @@ _batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
        return ret;
 }
 
+/**
+ * batadv_transtable_search - get the mesh destination for a given client
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: mac address of the source client
+ * @addr: mac address of the destination client
+ * @vid: VLAN identifier
+ *
+ * Returns a pointer to the originator that was selected as destination in the
+ * mesh for contacting the client 'addr', NULL otherwise.
+ * In case of multiple originators serving the same client, the function returns
+ * the best one (best in terms of metric towards the destination node).
+ *
+ * If the two clients are AP isolated the function returns NULL.
+ */
 struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
                                                  const uint8_t *src,
-                                                 const uint8_t *addr)
+                                                 const uint8_t *addr,
+                                                 unsigned short vid)
 {
        struct batadv_tt_local_entry *tt_local_entry = NULL;
        struct batadv_tt_global_entry *tt_global_entry = NULL;
        struct batadv_orig_node *orig_node = NULL;
        struct batadv_tt_orig_list_entry *best_entry;
+       bool ap_isolation_enabled = false;
+       struct batadv_softif_vlan *vlan;
+
+       /* if the AP isolation is requested on a VLAN, then check for its
+        * setting in the proper VLAN private data structure
+        */
+       vlan = batadv_softif_vlan_get(bat_priv, vid);
+       if (vlan) {
+               ap_isolation_enabled = atomic_read(&vlan->ap_isolation);
+               batadv_softif_vlan_free_ref(vlan);
+       }
 
-       if (src && atomic_read(&bat_priv->ap_isolation)) {
-               tt_local_entry = batadv_tt_local_hash_find(bat_priv, src);
+       if (src && ap_isolation_enabled) {
+               tt_local_entry = batadv_tt_local_hash_find(bat_priv, src, vid);
                if (!tt_local_entry ||
                    (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING))
                        goto out;
        }
 
-       tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
+       tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
        if (!tt_global_entry)
                goto out;
 
@@ -1385,7 +1909,7 @@ struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
                goto out;
 
        rcu_read_lock();
-       best_entry = batadv_transtable_best_orig(tt_global_entry);
+       best_entry = batadv_transtable_best_orig(bat_priv, tt_global_entry);
        /* found anything? */
        if (best_entry)
                orig_node = best_entry->orig_node;
@@ -1402,17 +1926,40 @@ out:
        return orig_node;
 }
 
-/* Calculates the checksum of the local table of a given orig_node */
-static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
-                                    struct batadv_orig_node *orig_node)
+/**
+ * batadv_tt_global_crc - calculates the checksum of the local table belonging
+ *  to the given orig_node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: originator for which the CRC should be computed
+ * @vid: VLAN identifier for which the CRC32 has to be computed
+ *
+ * This function computes the checksum for the global table corresponding to a
+ * specific originator. In particular, the checksum is computed as follows: For
+ * each client connected to the originator the CRC32C of the MAC address and the
+ * VID is computed and then all the CRC32Cs of the various clients are xor'ed
+ * together.
+ *
+ * The idea behind is that CRC32C should be used as much as possible in order to
+ * produce a unique hash of the table, but since the order which is used to feed
+ * the CRC32C function affects the result and since every node in the network
+ * probably sorts the clients differently, the hash function cannot be directly
+ * computed over the entire table. Hence the CRC32C is used only on
+ * the single client entry, while all the results are then xor'ed together
+ * because the XOR operation can combine them all while trying to reduce the
+ * noise as much as possible.
+ *
+ * Returns the checksum of the global table of a given originator.
+ */
+static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
+                                    struct batadv_orig_node *orig_node,
+                                    unsigned short vid)
 {
-       uint16_t total = 0, total_one;
        struct batadv_hashtable *hash = bat_priv->tt.global_hash;
        struct batadv_tt_common_entry *tt_common;
        struct batadv_tt_global_entry *tt_global;
        struct hlist_head *head;
-       uint32_t i;
-       int j;
+       uint32_t i, crc_tmp, crc = 0;
+       uint8_t flags;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -1422,6 +1969,12 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
                        tt_global = container_of(tt_common,
                                                 struct batadv_tt_global_entry,
                                                 common);
+                       /* compute the CRC only for entries belonging to the
+                        * VLAN identified by the vid passed as parameter
+                        */
+                       if (tt_common->vid != vid)
+                               continue;
+
                        /* Roaming clients are in the global table for
                         * consistency only. They don't have to be
                         * taken into account while computing the
@@ -1443,48 +1996,74 @@ static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
                                                             orig_node))
                                continue;
 
-                       total_one = 0;
-                       for (j = 0; j < ETH_ALEN; j++)
-                               total_one = crc16_byte(total_one,
-                                                      tt_common->addr[j]);
-                       total ^= total_one;
+                       crc_tmp = crc32c(0, &tt_common->vid,
+                                        sizeof(tt_common->vid));
+
+                       /* compute the CRC on flags that have to be kept in sync
+                        * among nodes
+                        */
+                       flags = tt_common->flags & BATADV_TT_SYNC_MASK;
+                       crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
+
+                       crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
                }
                rcu_read_unlock();
        }
 
-       return total;
+       return crc;
 }
 
-/* Calculates the checksum of the local table */
-static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
+/**
+ * batadv_tt_local_crc - calculates the checksum of the local table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: VLAN identifier for which the CRC32 has to be computed
+ *
+ * For details about the computation, please refer to the documentation for
+ * batadv_tt_global_crc().
+ *
+ * Returns the checksum of the local table
+ */
+static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv,
+                                   unsigned short vid)
 {
-       uint16_t total = 0, total_one;
        struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common;
        struct hlist_head *head;
-       uint32_t i;
-       int j;
+       uint32_t i, crc_tmp, crc = 0;
+       uint8_t flags;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
 
                rcu_read_lock();
                hlist_for_each_entry_rcu(tt_common, head, hash_entry) {
+                       /* compute the CRC only for entries belonging to the
+                        * VLAN identified by vid
+                        */
+                       if (tt_common->vid != vid)
+                               continue;
+
                        /* not yet committed clients have not to be taken into
                         * account while computing the CRC
                         */
                        if (tt_common->flags & BATADV_TT_CLIENT_NEW)
                                continue;
-                       total_one = 0;
-                       for (j = 0; j < ETH_ALEN; j++)
-                               total_one = crc16_byte(total_one,
-                                                      tt_common->addr[j]);
-                       total ^= total_one;
+
+                       crc_tmp = crc32c(0, &tt_common->vid,
+                                        sizeof(tt_common->vid));
+
+                       /* compute the CRC on flags that have to be kept in sync
+                        * among nodes
+                        */
+                       flags = tt_common->flags & BATADV_TT_SYNC_MASK;
+                       crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
+
+                       crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
                }
                rcu_read_unlock();
        }
 
-       return total;
+       return crc;
 }
 
 static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
@@ -1503,11 +2082,9 @@ static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
 
 static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
                                       struct batadv_orig_node *orig_node,
-                                      const unsigned char *tt_buff,
-                                      uint8_t tt_num_changes)
+                                      const void *tt_buff,
+                                      uint16_t tt_buff_len)
 {
-       uint16_t tt_buff_len = batadv_tt_len(tt_num_changes);
-
        /* Replace the old buffer only if I received something in the
         * last OGM (the OGM could carry no changes)
         */
@@ -1569,9 +2146,14 @@ unlock:
        return tt_req_node;
 }
 
-/* data_ptr is useless here, but has to be kept to respect the prototype */
-static int batadv_tt_local_valid_entry(const void *entry_ptr,
-                                      const void *data_ptr)
+/**
+ * batadv_tt_local_valid - verify that given tt entry is a valid one
+ * @entry_ptr: to be checked local tt entry
+ * @data_ptr: not used but definition required to satisfy the callback prototype
+ *
+ * Returns 1 if the entry is a valid, 0 otherwise.
+ */
+static int batadv_tt_local_valid(const void *entry_ptr, const void *data_ptr)
 {
        const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
 
@@ -1598,41 +2180,30 @@ static int batadv_tt_global_valid(const void *entry_ptr,
        return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node);
 }
 
-static struct sk_buff *
-batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
-                             struct batadv_hashtable *hash,
-                             struct batadv_priv *bat_priv,
-                             int (*valid_cb)(const void *, const void *),
-                             void *cb_data)
+/**
+ * batadv_tt_tvlv_generate - fill the tvlv buff with the tt entries from the
+ *  specified tt hash
+ * @bat_priv: the bat priv with all the soft interface information
+ * @hash: hash table containing the tt entries
+ * @tt_len: expected tvlv tt data buffer length in number of bytes
+ * @tvlv_buff: pointer to the buffer to fill with the TT data
+ * @valid_cb: function to filter tt change entries
+ * @cb_data: data passed to the filter function as argument
+ */
+static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
+                                   struct batadv_hashtable *hash,
+                                   void *tvlv_buff, uint16_t tt_len,
+                                   int (*valid_cb)(const void *, const void *),
+                                   void *cb_data)
 {
        struct batadv_tt_common_entry *tt_common_entry;
-       struct batadv_tt_query_packet *tt_response;
-       struct batadv_tt_change *tt_change;
+       struct batadv_tvlv_tt_change *tt_change;
        struct hlist_head *head;
-       struct sk_buff *skb = NULL;
-       uint16_t tt_tot, tt_count;
-       ssize_t tt_query_size = sizeof(struct batadv_tt_query_packet);
+       uint16_t tt_tot, tt_num_entries = 0;
        uint32_t i;
-       size_t len;
 
-       if (tt_query_size + tt_len > bat_priv->soft_iface->mtu) {
-               tt_len = bat_priv->soft_iface->mtu - tt_query_size;
-               tt_len -= tt_len % sizeof(struct batadv_tt_change);
-       }
-       tt_tot = tt_len / sizeof(struct batadv_tt_change);
-
-       len = tt_query_size + tt_len;
-       skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
-       if (!skb)
-               goto out;
-
-       skb->priority = TC_PRIO_CONTROL;
-       skb_reserve(skb, ETH_HLEN);
-       tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len);
-       tt_response->ttvn = ttvn;
-
-       tt_change = (struct batadv_tt_change *)(skb->data + tt_query_size);
-       tt_count = 0;
+       tt_tot = batadv_tt_entries(tt_len);
+       tt_change = (struct batadv_tvlv_tt_change *)tvlv_buff;
 
        rcu_read_lock();
        for (i = 0; i < hash->size; i++) {
@@ -1640,7 +2211,7 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
 
                hlist_for_each_entry_rcu(tt_common_entry,
                                         head, hash_entry) {
-                       if (tt_count == tt_tot)
+                       if (tt_tot == tt_num_entries)
                                break;
 
                        if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
@@ -1649,33 +2220,123 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
                        memcpy(tt_change->addr, tt_common_entry->addr,
                               ETH_ALEN);
                        tt_change->flags = tt_common_entry->flags;
+                       tt_change->vid = htons(tt_common_entry->vid);
+                       tt_change->reserved = 0;
 
-                       tt_count++;
+                       tt_num_entries++;
                        tt_change++;
                }
        }
        rcu_read_unlock();
+}
 
-       /* store in the message the number of entries we have successfully
-        * copied
-        */
-       tt_response->tt_data = htons(tt_count);
+/**
+ * batadv_tt_global_check_crc - check if all the CRCs are correct
+ * @orig_node: originator for which the CRCs have to be checked
+ * @tt_vlan: pointer to the first tvlv VLAN entry
+ * @num_vlan: number of tvlv VLAN entries
+ * @create: if true, create VLAN objects if not found
+ *
+ * Return true if all the received CRCs match the locally stored ones, false
+ * otherwise
+ */
+static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
+                                      struct batadv_tvlv_tt_vlan_data *tt_vlan,
+                                      uint16_t num_vlan)
+{
+       struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp;
+       struct batadv_orig_node_vlan *vlan;
+       int i;
 
-out:
-       return skb;
+       /* check if each received CRC matches the locally stored one */
+       for (i = 0; i < num_vlan; i++) {
+               tt_vlan_tmp = tt_vlan + i;
+
+               /* if orig_node is a backbone node for this VLAN, don't check
+                * the CRC as we ignore all the global entries over it
+                */
+               if (batadv_bla_is_backbone_gw_orig(orig_node->bat_priv,
+                                                  orig_node->orig,
+                                                  ntohs(tt_vlan_tmp->vid)))
+                       continue;
+
+               vlan = batadv_orig_node_vlan_get(orig_node,
+                                                ntohs(tt_vlan_tmp->vid));
+               if (!vlan)
+                       return false;
+
+               if (vlan->tt.crc != ntohl(tt_vlan_tmp->crc))
+                       return false;
+       }
+
+       return true;
+}
+
+/**
+ * batadv_tt_local_update_crc - update all the local CRCs
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_tt_local_update_crc(struct batadv_priv *bat_priv)
+{
+       struct batadv_softif_vlan *vlan;
+
+       /* recompute the global CRC for each VLAN */
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+               vlan->tt.crc = batadv_tt_local_crc(bat_priv, vlan->vid);
+       }
+       rcu_read_unlock();
+}
+
+/**
+ * batadv_tt_global_update_crc - update all the global CRCs for this orig_node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: the orig_node for which the CRCs have to be updated
+ */
+static void batadv_tt_global_update_crc(struct batadv_priv *bat_priv,
+                                       struct batadv_orig_node *orig_node)
+{
+       struct batadv_orig_node_vlan *vlan;
+       uint32_t crc;
+
+       /* recompute the global CRC for each VLAN */
+       rcu_read_lock();
+       list_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
+               /* if orig_node is a backbone node for this VLAN, don't compute
+                * the CRC as we ignore all the global entries over it
+                */
+               if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig,
+                                                  vlan->vid))
+                       continue;
+
+               crc = batadv_tt_global_crc(bat_priv, orig_node, vlan->vid);
+               vlan->tt.crc = crc;
+       }
+       rcu_read_unlock();
 }
 
+/**
+ * batadv_send_tt_request - send a TT Request message to a given node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @dst_orig_node: the destination of the message
+ * @ttvn: the version number that the source of the message is looking for
+ * @tt_vlan: pointer to the first tvlv VLAN object to request
+ * @num_vlan: number of tvlv VLAN entries
+ * @full_table: ask for the entire translation table if true, while only for the
+ *  last TT diff otherwise
+ */
 static int batadv_send_tt_request(struct batadv_priv *bat_priv,
                                  struct batadv_orig_node *dst_orig_node,
-                                 uint8_t ttvn, uint16_t tt_crc,
-                                 bool full_table)
+                                 uint8_t ttvn,
+                                 struct batadv_tvlv_tt_vlan_data *tt_vlan,
+                                 uint16_t num_vlan, bool full_table)
 {
-       struct sk_buff *skb = NULL;
-       struct batadv_tt_query_packet *tt_request;
-       struct batadv_hard_iface *primary_if;
+       struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
        struct batadv_tt_req_node *tt_req_node = NULL;
-       int ret = 1;
-       size_t tt_req_len;
+       struct batadv_tvlv_tt_vlan_data *tt_vlan_req;
+       struct batadv_hard_iface *primary_if;
+       bool ret = false;
+       int i, size;
 
        primary_if = batadv_primary_if_get_selected(bat_priv);
        if (!primary_if)
@@ -1688,157 +2349,171 @@ static int batadv_send_tt_request(struct batadv_priv *bat_priv,
        if (!tt_req_node)
                goto out;
 
-       skb = netdev_alloc_skb_ip_align(NULL, sizeof(*tt_request) + ETH_HLEN);
-       if (!skb)
+       size = sizeof(*tvlv_tt_data) + sizeof(*tt_vlan_req) * num_vlan;
+       tvlv_tt_data = kzalloc(size, GFP_ATOMIC);
+       if (!tvlv_tt_data)
                goto out;
 
-       skb->priority = TC_PRIO_CONTROL;
-       skb_reserve(skb, ETH_HLEN);
+       tvlv_tt_data->flags = BATADV_TT_REQUEST;
+       tvlv_tt_data->ttvn = ttvn;
+       tvlv_tt_data->num_vlan = htons(num_vlan);
 
-       tt_req_len = sizeof(*tt_request);
-       tt_request = (struct batadv_tt_query_packet *)skb_put(skb, tt_req_len);
+       /* send all the CRCs within the request. This is needed by intermediate
+        * nodes to ensure they have the correct table before replying
+        */
+       tt_vlan_req = (struct batadv_tvlv_tt_vlan_data *)(tvlv_tt_data + 1);
+       for (i = 0; i < num_vlan; i++) {
+               tt_vlan_req->vid = tt_vlan->vid;
+               tt_vlan_req->crc = tt_vlan->crc;
 
-       tt_request->header.packet_type = BATADV_TT_QUERY;
-       tt_request->header.version = BATADV_COMPAT_VERSION;
-       memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
-       memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
-       tt_request->header.ttl = BATADV_TTL;
-       tt_request->ttvn = ttvn;
-       tt_request->tt_data = htons(tt_crc);
-       tt_request->flags = BATADV_TT_REQUEST;
+               tt_vlan_req++;
+               tt_vlan++;
+       }
 
        if (full_table)
-               tt_request->flags |= BATADV_TT_FULL_TABLE;
+               tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv, "Sending TT_REQUEST to %pM [%c]\n",
-                  dst_orig_node->orig, (full_table ? 'F' : '.'));
+                  dst_orig_node->orig, full_table ? 'F' : '.');
 
        batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX);
-
-       if (batadv_send_skb_to_orig(skb, dst_orig_node, NULL) != NET_XMIT_DROP)
-               ret = 0;
+       batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr,
+                                dst_orig_node->orig, BATADV_TVLV_TT, 1,
+                                tvlv_tt_data, size);
+       ret = true;
 
 out:
        if (primary_if)
                batadv_hardif_free_ref(primary_if);
-       if (ret)
-               kfree_skb(skb);
        if (ret && tt_req_node) {
                spin_lock_bh(&bat_priv->tt.req_list_lock);
                list_del(&tt_req_node->list);
                spin_unlock_bh(&bat_priv->tt.req_list_lock);
                kfree(tt_req_node);
        }
+       kfree(tvlv_tt_data);
        return ret;
 }
 
-static bool
-batadv_send_other_tt_response(struct batadv_priv *bat_priv,
-                             struct batadv_tt_query_packet *tt_request)
+/**
+ * batadv_send_other_tt_response - send reply to tt request concerning another
+ *  node's translation table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: tt data containing the tt request information
+ * @req_src: mac address of tt request sender
+ * @req_dst: mac address of tt request recipient
+ *
+ * Returns true if tt request reply was sent, false otherwise.
+ */
+static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
+                                         struct batadv_tvlv_tt_data *tt_data,
+                                         uint8_t *req_src, uint8_t *req_dst)
 {
        struct batadv_orig_node *req_dst_orig_node;
        struct batadv_orig_node *res_dst_orig_node = NULL;
-       uint8_t orig_ttvn, req_ttvn, ttvn;
-       int res, ret = false;
-       unsigned char *tt_buff;
-       bool full_table;
-       uint16_t tt_len, tt_tot;
-       struct sk_buff *skb = NULL;
-       struct batadv_tt_query_packet *tt_response;
-       uint8_t *packet_pos;
-       size_t len;
+       struct batadv_tvlv_tt_change *tt_change;
+       struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
+       struct batadv_tvlv_tt_vlan_data *tt_vlan;
+       bool ret = false, full_table;
+       uint8_t orig_ttvn, req_ttvn;
+       uint16_t tvlv_len;
+       int32_t tt_len;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
-                  tt_request->src, tt_request->ttvn, tt_request->dst,
-                  (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
+                  req_src, tt_data->ttvn, req_dst,
+                  (tt_data->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
 
        /* Let's get the orig node of the REAL destination */
-       req_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->dst);
+       req_dst_orig_node = batadv_orig_hash_find(bat_priv, req_dst);
        if (!req_dst_orig_node)
                goto out;
 
-       res_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
+       res_dst_orig_node = batadv_orig_hash_find(bat_priv, req_src);
        if (!res_dst_orig_node)
                goto out;
 
        orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
-       req_ttvn = tt_request->ttvn;
+       req_ttvn = tt_data->ttvn;
 
-       /* I don't have the requested data */
+       tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1);
+       /* this node doesn't have the requested data */
        if (orig_ttvn != req_ttvn ||
-           tt_request->tt_data != htons(req_dst_orig_node->tt_crc))
+           !batadv_tt_global_check_crc(req_dst_orig_node, tt_vlan,
+                                       ntohs(tt_data->num_vlan)))
                goto out;
 
        /* If the full table has been explicitly requested */
-       if (tt_request->flags & BATADV_TT_FULL_TABLE ||
+       if (tt_data->flags & BATADV_TT_FULL_TABLE ||
            !req_dst_orig_node->tt_buff)
                full_table = true;
        else
                full_table = false;
 
-       /* In this version, fragmentation is not implemented, then
-        * I'll send only one packet with as much TT entries as I can
+       /* TT fragmentation hasn't been implemented yet, so send as many
+        * TT entries fit a single packet as possible only
         */
        if (!full_table) {
                spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
                tt_len = req_dst_orig_node->tt_buff_len;
-               tt_tot = tt_len / sizeof(struct batadv_tt_change);
 
-               len = sizeof(*tt_response) + tt_len;
-               skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
-               if (!skb)
+               tvlv_len = batadv_tt_prepare_tvlv_global_data(req_dst_orig_node,
+                                                             &tvlv_tt_data,
+                                                             &tt_change,
+                                                             &tt_len);
+               if (!tt_len)
                        goto unlock;
 
-               skb->priority = TC_PRIO_CONTROL;
-               skb_reserve(skb, ETH_HLEN);
-               packet_pos = skb_put(skb, len);
-               tt_response = (struct batadv_tt_query_packet *)packet_pos;
-               tt_response->ttvn = req_ttvn;
-               tt_response->tt_data = htons(tt_tot);
-
-               tt_buff = skb->data + sizeof(*tt_response);
                /* Copy the last orig_node's OGM buffer */
-               memcpy(tt_buff, req_dst_orig_node->tt_buff,
+               memcpy(tt_change, req_dst_orig_node->tt_buff,
                       req_dst_orig_node->tt_buff_len);
-
                spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
        } else {
-               tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size);
-               tt_len *= sizeof(struct batadv_tt_change);
-               ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
-
-               skb = batadv_tt_response_fill_table(tt_len, ttvn,
-                                                   bat_priv->tt.global_hash,
-                                                   bat_priv,
-                                                   batadv_tt_global_valid,
-                                                   req_dst_orig_node);
-               if (!skb)
+               /* allocate the tvlv, put the tt_data and all the tt_vlan_data
+                * in the initial part
+                */
+               tt_len = -1;
+               tvlv_len = batadv_tt_prepare_tvlv_global_data(req_dst_orig_node,
+                                                             &tvlv_tt_data,
+                                                             &tt_change,
+                                                             &tt_len);
+               if (!tt_len)
                        goto out;
 
-               tt_response = (struct batadv_tt_query_packet *)skb->data;
+               /* fill the rest of the tvlv with the real TT entries */
+               batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.global_hash,
+                                       tt_change, tt_len,
+                                       batadv_tt_global_valid,
+                                       req_dst_orig_node);
+       }
+
+       /* Don't send the response, if larger than fragmented packet. */
+       tt_len = sizeof(struct batadv_unicast_tvlv_packet) + tvlv_len;
+       if (tt_len > atomic_read(&bat_priv->packet_size_max)) {
+               net_ratelimited_function(batadv_info, bat_priv->soft_iface,
+                                        "Ignoring TT_REQUEST from %pM; Response size exceeds max packet size.\n",
+                                        res_dst_orig_node->orig);
+               goto out;
        }
 
-       tt_response->header.packet_type = BATADV_TT_QUERY;
-       tt_response->header.version = BATADV_COMPAT_VERSION;
-       tt_response->header.ttl = BATADV_TTL;
-       memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
-       memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
-       tt_response->flags = BATADV_TT_RESPONSE;
+       tvlv_tt_data->flags = BATADV_TT_RESPONSE;
+       tvlv_tt_data->ttvn = req_ttvn;
 
        if (full_table)
-               tt_response->flags |= BATADV_TT_FULL_TABLE;
+               tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                  "Sending TT_RESPONSE %pM for %pM (ttvn: %u)\n",
-                  res_dst_orig_node->orig, req_dst_orig_node->orig, req_ttvn);
+                  "Sending TT_RESPONSE %pM for %pM [%c] (ttvn: %u)\n",
+                  res_dst_orig_node->orig, req_dst_orig_node->orig,
+                  full_table ? 'F' : '.', req_ttvn);
 
        batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
 
-       res = batadv_send_skb_to_orig(skb, res_dst_orig_node, NULL);
-       if (res != NET_XMIT_DROP)
-               ret = true;
+       batadv_tvlv_unicast_send(bat_priv, req_dst_orig_node->orig,
+                                req_src, BATADV_TVLV_TT, 1, tvlv_tt_data,
+                                tvlv_len);
 
+       ret = true;
        goto out;
 
 unlock:
@@ -1849,37 +2524,43 @@ out:
                batadv_orig_node_free_ref(res_dst_orig_node);
        if (req_dst_orig_node)
                batadv_orig_node_free_ref(req_dst_orig_node);
-       if (!ret)
-               kfree_skb(skb);
+       kfree(tvlv_tt_data);
        return ret;
 }
 
-static bool
-batadv_send_my_tt_response(struct batadv_priv *bat_priv,
-                          struct batadv_tt_query_packet *tt_request)
+/**
+ * batadv_send_my_tt_response - send reply to tt request concerning this node's
+ *  translation table
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: tt data containing the tt request information
+ * @req_src: mac address of tt request sender
+ *
+ * Returns true if tt request reply was sent, false otherwise.
+ */
+static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
+                                      struct batadv_tvlv_tt_data *tt_data,
+                                      uint8_t *req_src)
 {
-       struct batadv_orig_node *orig_node;
+       struct batadv_tvlv_tt_data *tvlv_tt_data = NULL;
        struct batadv_hard_iface *primary_if = NULL;
-       uint8_t my_ttvn, req_ttvn, ttvn;
-       int ret = false;
-       unsigned char *tt_buff;
+       struct batadv_tvlv_tt_change *tt_change;
+       struct batadv_orig_node *orig_node;
+       uint8_t my_ttvn, req_ttvn;
+       uint16_t tvlv_len;
        bool full_table;
-       uint16_t tt_len, tt_tot;
-       struct sk_buff *skb = NULL;
-       struct batadv_tt_query_packet *tt_response;
-       uint8_t *packet_pos;
-       size_t len;
+       int32_t tt_len;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
-                  tt_request->src, tt_request->ttvn,
-                  (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
+                  req_src, tt_data->ttvn,
+                  (tt_data->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
 
+       spin_lock_bh(&bat_priv->tt.commit_lock);
 
        my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
-       req_ttvn = tt_request->ttvn;
+       req_ttvn = tt_data->ttvn;
 
-       orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
+       orig_node = batadv_orig_hash_find(bat_priv, req_src);
        if (!orig_node)
                goto out;
 
@@ -1890,103 +2571,104 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
        /* If the full table has been explicitly requested or the gap
         * is too big send the whole local translation table
         */
-       if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
+       if (tt_data->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
            !bat_priv->tt.last_changeset)
                full_table = true;
        else
                full_table = false;
 
-       /* In this version, fragmentation is not implemented, then
-        * I'll send only one packet with as much TT entries as I can
+       /* TT fragmentation hasn't been implemented yet, so send as many
+        * TT entries fit a single packet as possible only
         */
        if (!full_table) {
                spin_lock_bh(&bat_priv->tt.last_changeset_lock);
-               tt_len = bat_priv->tt.last_changeset_len;
-               tt_tot = tt_len / sizeof(struct batadv_tt_change);
 
-               len = sizeof(*tt_response) + tt_len;
-               skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
-               if (!skb)
+               tt_len = bat_priv->tt.last_changeset_len;
+               tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv,
+                                                            &tvlv_tt_data,
+                                                            &tt_change,
+                                                            &tt_len);
+               if (!tt_len)
                        goto unlock;
 
-               skb->priority = TC_PRIO_CONTROL;
-               skb_reserve(skb, ETH_HLEN);
-               packet_pos = skb_put(skb, len);
-               tt_response = (struct batadv_tt_query_packet *)packet_pos;
-               tt_response->ttvn = req_ttvn;
-               tt_response->tt_data = htons(tt_tot);
-
-               tt_buff = skb->data + sizeof(*tt_response);
-               memcpy(tt_buff, bat_priv->tt.last_changeset,
+               /* Copy the last orig_node's OGM buffer */
+               memcpy(tt_change, bat_priv->tt.last_changeset,
                       bat_priv->tt.last_changeset_len);
                spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
        } else {
-               tt_len = (uint16_t)atomic_read(&bat_priv->tt.local_entry_num);
-               tt_len *= sizeof(struct batadv_tt_change);
-               ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
-
-               skb = batadv_tt_response_fill_table(tt_len, ttvn,
-                                                   bat_priv->tt.local_hash,
-                                                   bat_priv,
-                                                   batadv_tt_local_valid_entry,
-                                                   NULL);
-               if (!skb)
+               req_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
+
+               /* allocate the tvlv, put the tt_data and all the tt_vlan_data
+                * in the initial part
+                */
+               tt_len = -1;
+               tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv,
+                                                            &tvlv_tt_data,
+                                                            &tt_change,
+                                                            &tt_len);
+               if (!tt_len)
                        goto out;
 
-               tt_response = (struct batadv_tt_query_packet *)skb->data;
+               /* fill the rest of the tvlv with the real TT entries */
+               batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.local_hash,
+                                       tt_change, tt_len,
+                                       batadv_tt_local_valid, NULL);
        }
 
-       tt_response->header.packet_type = BATADV_TT_QUERY;
-       tt_response->header.version = BATADV_COMPAT_VERSION;
-       tt_response->header.ttl = BATADV_TTL;
-       memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
-       memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
-       tt_response->flags = BATADV_TT_RESPONSE;
+       tvlv_tt_data->flags = BATADV_TT_RESPONSE;
+       tvlv_tt_data->ttvn = req_ttvn;
 
        if (full_table)
-               tt_response->flags |= BATADV_TT_FULL_TABLE;
+               tvlv_tt_data->flags |= BATADV_TT_FULL_TABLE;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                  "Sending TT_RESPONSE to %pM [%c]\n",
-                  orig_node->orig,
-                  (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
+                  "Sending TT_RESPONSE to %pM [%c] (ttvn: %u)\n",
+                  orig_node->orig, full_table ? 'F' : '.', req_ttvn);
 
        batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
 
-       if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
-               ret = true;
+       batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr,
+                                req_src, BATADV_TVLV_TT, 1, tvlv_tt_data,
+                                tvlv_len);
+
        goto out;
 
 unlock:
        spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
 out:
+       spin_unlock_bh(&bat_priv->tt.commit_lock);
        if (orig_node)
                batadv_orig_node_free_ref(orig_node);
        if (primary_if)
                batadv_hardif_free_ref(primary_if);
-       if (!ret)
-               kfree_skb(skb);
-       /* This packet was for me, so it doesn't need to be re-routed */
+       kfree(tvlv_tt_data);
+       /* The packet was for this host, so it doesn't need to be re-routed */
        return true;
 }
 
-bool batadv_send_tt_response(struct batadv_priv *bat_priv,
-                            struct batadv_tt_query_packet *tt_request)
+/**
+ * batadv_send_tt_response - send reply to tt request
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: tt data containing the tt request information
+ * @req_src: mac address of tt request sender
+ * @req_dst: mac address of tt request recipient
+ *
+ * Returns true if tt request reply was sent, false otherwise.
+ */
+static bool batadv_send_tt_response(struct batadv_priv *bat_priv,
+                                   struct batadv_tvlv_tt_data *tt_data,
+                                   uint8_t *req_src, uint8_t *req_dst)
 {
-       if (batadv_is_my_mac(bat_priv, tt_request->dst)) {
-               /* don't answer backbone gws! */
-               if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
-                       return true;
-
-               return batadv_send_my_tt_response(bat_priv, tt_request);
-       } else {
-               return batadv_send_other_tt_response(bat_priv, tt_request);
-       }
+       if (batadv_is_my_mac(bat_priv, req_dst))
+               return batadv_send_my_tt_response(bat_priv, tt_data, req_src);
+       else
+               return batadv_send_other_tt_response(bat_priv, tt_data,
+                                                    req_src, req_dst);
 }
 
 static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
                                      struct batadv_orig_node *orig_node,
-                                     struct batadv_tt_change *tt_change,
+                                     struct batadv_tvlv_tt_change *tt_change,
                                      uint16_t tt_num_changes, uint8_t ttvn)
 {
        int i;
@@ -1997,11 +2679,13 @@ static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
                        roams = (tt_change + i)->flags & BATADV_TT_CLIENT_ROAM;
                        batadv_tt_global_del(bat_priv, orig_node,
                                             (tt_change + i)->addr,
+                                            ntohs((tt_change + i)->vid),
                                             "tt removed by changes",
                                             roams);
                } else {
                        if (!batadv_tt_global_add(bat_priv, orig_node,
                                                  (tt_change + i)->addr,
+                                                 ntohs((tt_change + i)->vid),
                                                  (tt_change + i)->flags, ttvn))
                                /* In case of problem while storing a
                                 * global_entry, we stop the updating
@@ -2016,21 +2700,22 @@ static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
 }
 
 static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
-                                 struct batadv_tt_query_packet *tt_response)
+                                 struct batadv_tvlv_tt_change *tt_change,
+                                 uint8_t ttvn, uint8_t *resp_src,
+                                 uint16_t num_entries)
 {
        struct batadv_orig_node *orig_node;
 
-       orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
+       orig_node = batadv_orig_hash_find(bat_priv, resp_src);
        if (!orig_node)
                goto out;
 
        /* Purge the old table first.. */
-       batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table");
+       batadv_tt_global_del_orig(bat_priv, orig_node, -1,
+                                 "Received full table");
 
-       _batadv_tt_update_changes(bat_priv, orig_node,
-                                 (struct batadv_tt_change *)(tt_response + 1),
-                                 ntohs(tt_response->tt_data),
-                                 tt_response->ttvn);
+       _batadv_tt_update_changes(bat_priv, orig_node, tt_change, num_entries,
+                                 ttvn);
 
        spin_lock_bh(&orig_node->tt_buff_lock);
        kfree(orig_node->tt_buff);
@@ -2038,7 +2723,7 @@ static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
        orig_node->tt_buff = NULL;
        spin_unlock_bh(&orig_node->tt_buff_lock);
 
-       atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
+       atomic_set(&orig_node->last_ttvn, ttvn);
 
 out:
        if (orig_node)
@@ -2048,22 +2733,31 @@ out:
 static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
                                     struct batadv_orig_node *orig_node,
                                     uint16_t tt_num_changes, uint8_t ttvn,
-                                    struct batadv_tt_change *tt_change)
+                                    struct batadv_tvlv_tt_change *tt_change)
 {
        _batadv_tt_update_changes(bat_priv, orig_node, tt_change,
                                  tt_num_changes, ttvn);
 
-       batadv_tt_save_orig_buffer(bat_priv, orig_node,
-                                  (unsigned char *)tt_change, tt_num_changes);
+       batadv_tt_save_orig_buffer(bat_priv, orig_node, tt_change,
+                                  batadv_tt_len(tt_num_changes));
        atomic_set(&orig_node->last_ttvn, ttvn);
 }
 
-bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr)
+/**
+ * batadv_is_my_client - check if a client is served by the local node
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac adress of the client to check
+ * @vid: VLAN identifier
+ *
+ * Returns true if the client is served by this node, false otherwise.
+ */
+bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr,
+                        unsigned short vid)
 {
        struct batadv_tt_local_entry *tt_local_entry;
        bool ret = false;
 
-       tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
+       tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
        if (!tt_local_entry)
                goto out;
        /* Check if the client has been logically deleted (but is kept for
@@ -2079,72 +2773,68 @@ out:
        return ret;
 }
 
-void batadv_handle_tt_response(struct batadv_priv *bat_priv,
-                              struct batadv_tt_query_packet *tt_response)
+/**
+ * batadv_handle_tt_response - process incoming tt reply
+ * @bat_priv: the bat priv with all the soft interface information
+ * @tt_data: tt data containing the tt request information
+ * @resp_src: mac address of tt reply sender
+ * @num_entries: number of tt change entries appended to the tt data
+ */
+static void batadv_handle_tt_response(struct batadv_priv *bat_priv,
+                                     struct batadv_tvlv_tt_data *tt_data,
+                                     uint8_t *resp_src, uint16_t num_entries)
 {
        struct batadv_tt_req_node *node, *safe;
        struct batadv_orig_node *orig_node = NULL;
-       struct batadv_tt_change *tt_change;
+       struct batadv_tvlv_tt_change *tt_change;
+       uint8_t *tvlv_ptr = (uint8_t *)tt_data;
+       uint16_t change_offset;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
-                  tt_response->src, tt_response->ttvn,
-                  ntohs(tt_response->tt_data),
-                  (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
-
-       /* we should have never asked a backbone gw */
-       if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src))
-               goto out;
+                  resp_src, tt_data->ttvn, num_entries,
+                  (tt_data->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
 
-       orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
+       orig_node = batadv_orig_hash_find(bat_priv, resp_src);
        if (!orig_node)
                goto out;
 
-       if (tt_response->flags & BATADV_TT_FULL_TABLE) {
-               batadv_tt_fill_gtable(bat_priv, tt_response);
+       spin_lock_bh(&orig_node->tt_lock);
+
+       change_offset = sizeof(struct batadv_tvlv_tt_vlan_data);
+       change_offset *= ntohs(tt_data->num_vlan);
+       change_offset += sizeof(*tt_data);
+       tvlv_ptr += change_offset;
+
+       tt_change = (struct batadv_tvlv_tt_change *)tvlv_ptr;
+       if (tt_data->flags & BATADV_TT_FULL_TABLE) {
+               batadv_tt_fill_gtable(bat_priv, tt_change, tt_data->ttvn,
+                                     resp_src, num_entries);
        } else {
-               tt_change = (struct batadv_tt_change *)(tt_response + 1);
-               batadv_tt_update_changes(bat_priv, orig_node,
-                                        ntohs(tt_response->tt_data),
-                                        tt_response->ttvn, tt_change);
+               batadv_tt_update_changes(bat_priv, orig_node, num_entries,
+                                        tt_data->ttvn, tt_change);
        }
 
+       /* Recalculate the CRC for this orig_node and store it */
+       batadv_tt_global_update_crc(bat_priv, orig_node);
+
+       spin_unlock_bh(&orig_node->tt_lock);
+
        /* Delete the tt_req_node from pending tt_requests list */
        spin_lock_bh(&bat_priv->tt.req_list_lock);
        list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
-               if (!batadv_compare_eth(node->addr, tt_response->src))
+               if (!batadv_compare_eth(node->addr, resp_src))
                        continue;
                list_del(&node->list);
                kfree(node);
        }
-       spin_unlock_bh(&bat_priv->tt.req_list_lock);
 
-       /* Recalculate the CRC for this orig_node and store it */
-       orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
+       spin_unlock_bh(&bat_priv->tt.req_list_lock);
 out:
        if (orig_node)
                batadv_orig_node_free_ref(orig_node);
 }
 
-int batadv_tt_init(struct batadv_priv *bat_priv)
-{
-       int ret;
-
-       ret = batadv_tt_local_init(bat_priv);
-       if (ret < 0)
-               return ret;
-
-       ret = batadv_tt_global_init(bat_priv);
-       if (ret < 0)
-               return ret;
-
-       INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
-       queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
-                          msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
-
-       return 1;
-}
-
 static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
 {
        struct batadv_tt_roam_node *node, *safe;
@@ -2225,14 +2915,28 @@ unlock:
        return ret;
 }
 
+/**
+ * batadv_send_roam_adv - send a roaming advertisement message
+ * @bat_priv: the bat priv with all the soft interface information
+ * @client: mac address of the roaming client
+ * @vid: VLAN identifier
+ * @orig_node: message destination
+ *
+ * Send a ROAMING_ADV message to the node which was previously serving this
+ * client. This is done to inform the node that from now on all traffic destined
+ * for this particular roamed client has to be forwarded to the sender of the
+ * roaming message.
+ */
 static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
+                                unsigned short vid,
                                 struct batadv_orig_node *orig_node)
 {
-       struct sk_buff *skb = NULL;
-       struct batadv_roam_adv_packet *roam_adv_packet;
-       int ret = 1;
        struct batadv_hard_iface *primary_if;
-       size_t len = sizeof(*roam_adv_packet);
+       struct batadv_tvlv_roam_adv tvlv_roam;
+
+       primary_if = batadv_primary_if_get_selected(bat_priv);
+       if (!primary_if)
+               goto out;
 
        /* before going on we have to check whether the client has
         * already roamed to us too many times
@@ -2240,40 +2944,22 @@ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
        if (!batadv_tt_check_roam_count(bat_priv, client))
                goto out;
 
-       skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
-       if (!skb)
-               goto out;
-
-       skb->priority = TC_PRIO_CONTROL;
-       skb_reserve(skb, ETH_HLEN);
-
-       roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len);
-
-       roam_adv_packet->header.packet_type = BATADV_ROAM_ADV;
-       roam_adv_packet->header.version = BATADV_COMPAT_VERSION;
-       roam_adv_packet->header.ttl = BATADV_TTL;
-       roam_adv_packet->reserved = 0;
-       primary_if = batadv_primary_if_get_selected(bat_priv);
-       if (!primary_if)
-               goto out;
-       memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
-       batadv_hardif_free_ref(primary_if);
-       memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
-       memcpy(roam_adv_packet->client, client, ETH_ALEN);
-
        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                  "Sending ROAMING_ADV to %pM (client %pM)\n",
-                  orig_node->orig, client);
+                  "Sending ROAMING_ADV to %pM (client %pM, vid: %d)\n",
+                  orig_node->orig, client, BATADV_PRINT_VID(vid));
 
        batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX);
 
-       if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
-               ret = 0;
+       memcpy(tvlv_roam.client, client, sizeof(tvlv_roam.client));
+       tvlv_roam.vid = htons(vid);
+
+       batadv_tvlv_unicast_send(bat_priv, primary_if->net_dev->dev_addr,
+                                orig_node->orig, BATADV_TVLV_ROAM, 1,
+                                &tvlv_roam, sizeof(tvlv_roam));
 
 out:
-       if (ret && skb)
-               kfree_skb(skb);
-       return;
+       if (primary_if)
+               batadv_hardif_free_ref(primary_if);
 }
 
 static void batadv_tt_purge(struct work_struct *work)
@@ -2286,7 +2972,7 @@ static void batadv_tt_purge(struct work_struct *work)
        priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
        bat_priv = container_of(priv_tt, struct batadv_priv, tt);
 
-       batadv_tt_local_purge(bat_priv);
+       batadv_tt_local_purge(bat_priv, BATADV_TT_LOCAL_TIMEOUT);
        batadv_tt_global_purge(bat_priv);
        batadv_tt_req_purge(bat_priv);
        batadv_tt_roam_purge(bat_priv);
@@ -2297,6 +2983,9 @@ static void batadv_tt_purge(struct work_struct *work)
 
 void batadv_tt_free(struct batadv_priv *bat_priv)
 {
+       batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1);
+       batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_TT, 1);
+
        cancel_delayed_work_sync(&bat_priv->tt.work);
 
        batadv_tt_local_table_free(bat_priv);
@@ -2308,19 +2997,25 @@ void batadv_tt_free(struct batadv_priv *bat_priv)
        kfree(bat_priv->tt.last_changeset);
 }
 
-/* This function will enable or disable the specified flags for all the entries
- * in the given hash table and returns the number of modified entries
+/**
+ * batadv_tt_local_set_flags - set or unset the specified flags on the local
+ *  table and possibly count them in the TT size
+ * @bat_priv: the bat priv with all the soft interface information
+ * @flags: the flag to switch
+ * @enable: whether to set or unset the flag
+ * @count: whether to increase the TT size by the number of changed entries
  */
-static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
-                                   uint16_t flags, bool enable)
+static void batadv_tt_local_set_flags(struct batadv_priv *bat_priv,
+                                     uint16_t flags, bool enable, bool count)
 {
-       uint32_t i;
+       struct batadv_hashtable *hash = bat_priv->tt.local_hash;
+       struct batadv_tt_common_entry *tt_common_entry;
        uint16_t changed_num = 0;
        struct hlist_head *head;
-       struct batadv_tt_common_entry *tt_common_entry;
+       uint32_t i;
 
        if (!hash)
-               goto out;
+               return;
 
        for (i = 0; i < hash->size; i++) {
                head = &hash->table[i];
@@ -2338,11 +3033,15 @@ static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
                                tt_common_entry->flags &= ~flags;
                        }
                        changed_num++;
+
+                       if (!count)
+                               continue;
+
+                       batadv_tt_local_size_inc(bat_priv,
+                                                tt_common_entry->vid);
                }
                rcu_read_unlock();
        }
-out:
-       return changed_num;
 }
 
 /* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
@@ -2370,10 +3069,11 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
                                continue;
 
                        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                                  "Deleting local tt entry (%pM): pending\n",
-                                  tt_common->addr);
+                                  "Deleting local tt entry (%pM, vid: %d): pending\n",
+                                  tt_common->addr,
+                                  BATADV_PRINT_VID(tt_common->vid));
 
-                       atomic_dec(&bat_priv->tt.local_entry_num);
+                       batadv_tt_local_size_dec(bat_priv, tt_common->vid);
                        hlist_del_rcu(&tt_common->hash_entry);
                        tt_local = container_of(tt_common,
                                                struct batadv_tt_local_entry,
@@ -2384,22 +3084,25 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
        }
 }
 
-static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
-                                   unsigned char **packet_buff,
-                                   int *packet_buff_len, int packet_min_len)
+/**
+ * batadv_tt_local_commit_changes_nolock - commit all pending local tt changes
+ *  which have been queued in the time since the last commit
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Caller must hold tt->commit_lock.
+ */
+static void batadv_tt_local_commit_changes_nolock(struct batadv_priv *bat_priv)
 {
-       uint16_t changed_num = 0;
-
-       if (atomic_read(&bat_priv->tt.local_changes) < 1)
-               return -ENOENT;
+       if (atomic_read(&bat_priv->tt.local_changes) < 1) {
+               if (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))
+                       batadv_tt_tvlv_container_update(bat_priv);
+               return;
+       }
 
-       changed_num = batadv_tt_set_flags(bat_priv->tt.local_hash,
-                                         BATADV_TT_CLIENT_NEW, false);
+       batadv_tt_local_set_flags(bat_priv, BATADV_TT_CLIENT_NEW, false, true);
 
-       /* all reset entries have to be counted as local entries */
-       atomic_add(changed_num, &bat_priv->tt.local_entry_num);
        batadv_tt_local_purge_pending_clients(bat_priv);
-       bat_priv->tt.local_crc = batadv_tt_local_crc(bat_priv);
+       batadv_tt_local_update_crc(bat_priv);
 
        /* Increment the TTVN only once per OGM interval */
        atomic_inc(&bat_priv->tt.vn);
@@ -2409,49 +3112,38 @@ static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
 
        /* reset the sending counter */
        atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
-
-       return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
-                                          packet_buff_len, packet_min_len);
+       batadv_tt_tvlv_container_update(bat_priv);
 }
 
-/* when calling this function (hard_iface == primary_if) has to be true */
-int batadv_tt_append_diff(struct batadv_priv *bat_priv,
-                         unsigned char **packet_buff, int *packet_buff_len,
-                         int packet_min_len)
+/**
+ * batadv_tt_local_commit_changes - commit all pending local tt changes which
+ *  have been queued in the time since the last commit
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv)
 {
-       int tt_num_changes;
-
-       /* if at least one change happened */
-       tt_num_changes = batadv_tt_commit_changes(bat_priv, packet_buff,
-                                                 packet_buff_len,
-                                                 packet_min_len);
-
-       /* if the changes have been sent often enough */
-       if ((tt_num_changes < 0) &&
-           (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))) {
-               batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
-                                             packet_min_len, packet_min_len);
-               tt_num_changes = 0;
-       }
-
-       return tt_num_changes;
+       spin_lock_bh(&bat_priv->tt.commit_lock);
+       batadv_tt_local_commit_changes_nolock(bat_priv);
+       spin_unlock_bh(&bat_priv->tt.commit_lock);
 }
 
 bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
-                          uint8_t *dst)
+                          uint8_t *dst, unsigned short vid)
 {
        struct batadv_tt_local_entry *tt_local_entry = NULL;
        struct batadv_tt_global_entry *tt_global_entry = NULL;
+       struct batadv_softif_vlan *vlan;
        bool ret = false;
 
-       if (!atomic_read(&bat_priv->ap_isolation))
+       vlan = batadv_softif_vlan_get(bat_priv, vid);
+       if (!vlan || !atomic_read(&vlan->ap_isolation))
                goto out;
 
-       tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst);
+       tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst, vid);
        if (!tt_local_entry)
                goto out;
 
-       tt_global_entry = batadv_tt_global_hash_find(bat_priv, src);
+       tt_global_entry = batadv_tt_global_hash_find(bat_priv, src, vid);
        if (!tt_global_entry)
                goto out;
 
@@ -2461,6 +3153,8 @@ bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
        ret = true;
 
 out:
+       if (vlan)
+               batadv_softif_vlan_free_ref(vlan);
        if (tt_global_entry)
                batadv_tt_global_entry_free_ref(tt_global_entry);
        if (tt_local_entry)
@@ -2468,19 +3162,29 @@ out:
        return ret;
 }
 
-void batadv_tt_update_orig(struct batadv_priv *bat_priv,
-                          struct batadv_orig_node *orig_node,
-                          const unsigned char *tt_buff, uint8_t tt_num_changes,
-                          uint8_t ttvn, uint16_t tt_crc)
+/**
+ * batadv_tt_update_orig - update global translation table with new tt
+ *  information received via ogms
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @tt_vlan: pointer to the first tvlv VLAN entry
+ * @tt_num_vlan: number of tvlv VLAN entries
+ * @tt_change: pointer to the first entry in the TT buffer
+ * @tt_num_changes: number of tt changes inside the tt buffer
+ * @ttvn: translation table version number of this changeset
+ * @tt_crc: crc32 checksum of orig node's translation table
+ */
+static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
+                                 struct batadv_orig_node *orig_node,
+                                 const void *tt_buff, uint16_t tt_num_vlan,
+                                 struct batadv_tvlv_tt_change *tt_change,
+                                 uint16_t tt_num_changes, uint8_t ttvn)
 {
        uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
+       struct batadv_tvlv_tt_vlan_data *tt_vlan;
        bool full_table = true;
-       struct batadv_tt_change *tt_change;
-
-       /* don't care about a backbone gateways updates. */
-       if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
-               return;
 
+       tt_vlan = (struct batadv_tvlv_tt_vlan_data *)tt_buff;
        /* orig table not initialised AND first diff is in the OGM OR the ttvn
         * increased by one -> we can apply the attached changes
         */
@@ -2496,7 +3200,9 @@ void batadv_tt_update_orig(struct batadv_priv *bat_priv,
                        goto request_table;
                }
 
-               tt_change = (struct batadv_tt_change *)tt_buff;
+               spin_lock_bh(&orig_node->tt_lock);
+
+               tt_change = (struct batadv_tvlv_tt_change *)tt_buff;
                batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
                                         ttvn, tt_change);
 
@@ -2504,7 +3210,9 @@ void batadv_tt_update_orig(struct batadv_priv *bat_priv,
                 * prefer to recompute it to spot any possible inconsistency
                 * in the global table
                 */
-               orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
+               batadv_tt_global_update_crc(bat_priv, orig_node);
+
+               spin_unlock_bh(&orig_node->tt_lock);
 
                /* The ttvn alone is not enough to guarantee consistency
                 * because a single value could represent different states
@@ -2515,37 +3223,46 @@ void batadv_tt_update_orig(struct batadv_priv *bat_priv,
                 * checking the CRC value is mandatory to detect the
                 * inconsistency
                 */
-               if (orig_node->tt_crc != tt_crc)
+               if (!batadv_tt_global_check_crc(orig_node, tt_vlan,
+                                               tt_num_vlan))
                        goto request_table;
        } else {
                /* if we missed more than one change or our tables are not
                 * in sync anymore -> request fresh tt data
                 */
                if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
-                   orig_node->tt_crc != tt_crc) {
+                   !batadv_tt_global_check_crc(orig_node, tt_vlan,
+                                               tt_num_vlan)) {
 request_table:
                        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                                  "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %#.4x last_crc: %#.4x num_changes: %u)\n",
-                                  orig_node->orig, ttvn, orig_ttvn, tt_crc,
-                                  orig_node->tt_crc, tt_num_changes);
+                                  "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u num_changes: %u)\n",
+                                  orig_node->orig, ttvn, orig_ttvn,
+                                  tt_num_changes);
                        batadv_send_tt_request(bat_priv, orig_node, ttvn,
-                                              tt_crc, full_table);
+                                              tt_vlan, tt_num_vlan,
+                                              full_table);
                        return;
                }
        }
 }
 
-/* returns true whether we know that the client has moved from its old
- * originator to another one. This entry is kept is still kept for consistency
- * purposes
+/**
+ * batadv_tt_global_client_is_roaming - check if a client is marked as roaming
+ * @bat_priv: the bat priv with all the soft interface information
+ * @addr: the mac address of the client to check
+ * @vid: VLAN identifier
+ *
+ * Returns true if we know that the client has moved from its old originator
+ * to another one. This entry is still kept for consistency purposes and will be
+ * deleted later by a DEL or because of timeout
  */
 bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
-                                       uint8_t *addr)
+                                       uint8_t *addr, unsigned short vid)
 {
        struct batadv_tt_global_entry *tt_global_entry;
        bool ret = false;
 
-       tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
+       tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr, vid);
        if (!tt_global_entry)
                goto out;
 
@@ -2558,19 +3275,20 @@ out:
 /**
  * batadv_tt_local_client_is_roaming - tells whether the client is roaming
  * @bat_priv: the bat priv with all the soft interface information
- * @addr: the MAC address of the local client to query
+ * @addr: the mac address of the local client to query
+ * @vid: VLAN identifier
  *
  * Returns true if the local client is known to be roaming (it is not served by
  * this node anymore) or not. If yes, the client is still present in the table
  * to keep the latter consistent with the node TTVN
  */
 bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
-                                      uint8_t *addr)
+                                      uint8_t *addr, unsigned short vid)
 {
        struct batadv_tt_local_entry *tt_local_entry;
        bool ret = false;
 
-       tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
+       tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
        if (!tt_local_entry)
                goto out;
 
@@ -2582,26 +3300,268 @@ out:
 
 bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
                                          struct batadv_orig_node *orig_node,
-                                         const unsigned char *addr)
+                                         const unsigned char *addr,
+                                         unsigned short vid)
 {
        bool ret = false;
 
-       /* if the originator is a backbone node (meaning it belongs to the same
-        * LAN of this node) the temporary client must not be added because to
-        * reach such destination the node must use the LAN instead of the mesh
-        */
-       if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
-               goto out;
-
-       if (!batadv_tt_global_add(bat_priv, orig_node, addr,
+       if (!batadv_tt_global_add(bat_priv, orig_node, addr, vid,
                                  BATADV_TT_CLIENT_TEMP,
                                  atomic_read(&orig_node->last_ttvn)))
                goto out;
 
        batadv_dbg(BATADV_DBG_TT, bat_priv,
-                  "Added temporary global client (addr: %pM orig: %pM)\n",
-                  addr, orig_node->orig);
+                  "Added temporary global client (addr: %pM, vid: %d, orig: %pM)\n",
+                  addr, BATADV_PRINT_VID(vid), orig_node->orig);
        ret = true;
 out:
        return ret;
 }
+
+/**
+ * batadv_tt_local_resize_to_mtu - resize the local translation table fit the
+ *  maximum packet size that can be transported through the mesh
+ * @soft_iface: netdev struct of the mesh interface
+ *
+ * Remove entries older than 'timeout' and half timeout if more entries need
+ * to be removed.
+ */
+void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
+{
+       struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+       int packet_size_max = atomic_read(&bat_priv->packet_size_max);
+       int table_size, timeout = BATADV_TT_LOCAL_TIMEOUT / 2;
+       bool reduced = false;
+
+       spin_lock_bh(&bat_priv->tt.commit_lock);
+
+       while (true) {
+               table_size = batadv_tt_local_table_transmit_size(bat_priv);
+               if (packet_size_max >= table_size)
+                       break;
+
+               batadv_tt_local_purge(bat_priv, timeout);
+               batadv_tt_local_purge_pending_clients(bat_priv);
+
+               timeout /= 2;
+               reduced = true;
+               net_ratelimited_function(batadv_info, soft_iface,
+                                        "Forced to purge local tt entries to fit new maximum fragment MTU (%i)\n",
+                                        packet_size_max);
+       }
+
+       /* commit these changes immediately, to avoid synchronization problem
+        * with the TTVN
+        */
+       if (reduced)
+               batadv_tt_local_commit_changes_nolock(bat_priv);
+
+       spin_unlock_bh(&bat_priv->tt.commit_lock);
+}
+
+/**
+ * batadv_tt_tvlv_ogm_handler_v1 - process incoming tt tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: the orig_node of the ogm
+ * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
+ * @tvlv_value: tvlv buffer containing the gateway data
+ * @tvlv_value_len: tvlv buffer length
+ */
+static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+                                         struct batadv_orig_node *orig,
+                                         uint8_t flags, void *tvlv_value,
+                                         uint16_t tvlv_value_len)
+{
+       struct batadv_tvlv_tt_vlan_data *tt_vlan;
+       struct batadv_tvlv_tt_change *tt_change;
+       struct batadv_tvlv_tt_data *tt_data;
+       uint16_t num_entries, num_vlan;
+
+       if (tvlv_value_len < sizeof(*tt_data))
+               return;
+
+       tt_data = (struct batadv_tvlv_tt_data *)tvlv_value;
+       tvlv_value_len -= sizeof(*tt_data);
+
+       num_vlan = ntohs(tt_data->num_vlan);
+
+       if (tvlv_value_len < sizeof(*tt_vlan) * num_vlan)
+               return;
+
+       tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(tt_data + 1);
+       tt_change = (struct batadv_tvlv_tt_change *)(tt_vlan + num_vlan);
+       tvlv_value_len -= sizeof(*tt_vlan) * num_vlan;
+
+       num_entries = batadv_tt_entries(tvlv_value_len);
+
+       batadv_tt_update_orig(bat_priv, orig, tt_vlan, num_vlan, tt_change,
+                             num_entries, tt_data->ttvn);
+}
+
+/**
+ * batadv_tt_tvlv_unicast_handler_v1 - process incoming (unicast) tt tvlv
+ *  container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: mac address of tt tvlv sender
+ * @dst: mac address of tt tvlv recipient
+ * @tvlv_value: tvlv buffer containing the tt data
+ * @tvlv_value_len: tvlv buffer length
+ *
+ * Returns NET_RX_DROP if the tt tvlv is to be re-routed, NET_RX_SUCCESS
+ * otherwise.
+ */
+static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
+                                            uint8_t *src, uint8_t *dst,
+                                            void *tvlv_value,
+                                            uint16_t tvlv_value_len)
+{
+       struct batadv_tvlv_tt_data *tt_data;
+       uint16_t tt_vlan_len, tt_num_entries;
+       char tt_flag;
+       bool ret;
+
+       if (tvlv_value_len < sizeof(*tt_data))
+               return NET_RX_SUCCESS;
+
+       tt_data = (struct batadv_tvlv_tt_data *)tvlv_value;
+       tvlv_value_len -= sizeof(*tt_data);
+
+       tt_vlan_len = sizeof(struct batadv_tvlv_tt_vlan_data);
+       tt_vlan_len *= ntohs(tt_data->num_vlan);
+
+       if (tvlv_value_len < tt_vlan_len)
+               return NET_RX_SUCCESS;
+
+       tvlv_value_len -= tt_vlan_len;
+       tt_num_entries = batadv_tt_entries(tvlv_value_len);
+
+       switch (tt_data->flags & BATADV_TT_DATA_TYPE_MASK) {
+       case BATADV_TT_REQUEST:
+               batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_RX);
+
+               /* If this node cannot provide a TT response the tt_request is
+                * forwarded
+                */
+               ret = batadv_send_tt_response(bat_priv, tt_data, src, dst);
+               if (!ret) {
+                       if (tt_data->flags & BATADV_TT_FULL_TABLE)
+                               tt_flag = 'F';
+                       else
+                               tt_flag = '.';
+
+                       batadv_dbg(BATADV_DBG_TT, bat_priv,
+                                  "Routing TT_REQUEST to %pM [%c]\n",
+                                  dst, tt_flag);
+                       /* tvlv API will re-route the packet */
+                       return NET_RX_DROP;
+               }
+               break;
+       case BATADV_TT_RESPONSE:
+               batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX);
+
+               if (batadv_is_my_mac(bat_priv, dst)) {
+                       batadv_handle_tt_response(bat_priv, tt_data,
+                                                 src, tt_num_entries);
+                       return NET_RX_SUCCESS;
+               }
+
+               if (tt_data->flags & BATADV_TT_FULL_TABLE)
+                       tt_flag =  'F';
+               else
+                       tt_flag = '.';
+
+               batadv_dbg(BATADV_DBG_TT, bat_priv,
+                          "Routing TT_RESPONSE to %pM [%c]\n", dst, tt_flag);
+
+               /* tvlv API will re-route the packet */
+               return NET_RX_DROP;
+       }
+
+       return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_roam_tvlv_unicast_handler_v1 - process incoming tt roam tvlv container
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: mac address of tt tvlv sender
+ * @dst: mac address of tt tvlv recipient
+ * @tvlv_value: tvlv buffer containing the tt data
+ * @tvlv_value_len: tvlv buffer length
+ *
+ * Returns NET_RX_DROP if the tt roam tvlv is to be re-routed, NET_RX_SUCCESS
+ * otherwise.
+ */
+static int batadv_roam_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
+                                              uint8_t *src, uint8_t *dst,
+                                              void *tvlv_value,
+                                              uint16_t tvlv_value_len)
+{
+       struct batadv_tvlv_roam_adv *roaming_adv;
+       struct batadv_orig_node *orig_node = NULL;
+
+       /* If this node is not the intended recipient of the
+        * roaming advertisement the packet is forwarded
+        * (the tvlv API will re-route the packet).
+        */
+       if (!batadv_is_my_mac(bat_priv, dst))
+               return NET_RX_DROP;
+
+       if (tvlv_value_len < sizeof(*roaming_adv))
+               goto out;
+
+       orig_node = batadv_orig_hash_find(bat_priv, src);
+       if (!orig_node)
+               goto out;
+
+       batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
+       roaming_adv = (struct batadv_tvlv_roam_adv *)tvlv_value;
+
+       batadv_dbg(BATADV_DBG_TT, bat_priv,
+                  "Received ROAMING_ADV from %pM (client %pM)\n",
+                  src, roaming_adv->client);
+
+       batadv_tt_global_add(bat_priv, orig_node, roaming_adv->client,
+                            ntohs(roaming_adv->vid), BATADV_TT_CLIENT_ROAM,
+                            atomic_read(&orig_node->last_ttvn) + 1);
+
+out:
+       if (orig_node)
+               batadv_orig_node_free_ref(orig_node);
+       return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_tt_init - initialise the translation table internals
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return 0 on success or negative error number in case of failure.
+ */
+int batadv_tt_init(struct batadv_priv *bat_priv)
+{
+       int ret;
+
+       /* synchronized flags must be remote */
+       BUILD_BUG_ON(!(BATADV_TT_SYNC_MASK & BATADV_TT_REMOTE_MASK));
+
+       ret = batadv_tt_local_init(bat_priv);
+       if (ret < 0)
+               return ret;
+
+       ret = batadv_tt_global_init(bat_priv);
+       if (ret < 0)
+               return ret;
+
+       batadv_tvlv_handler_register(bat_priv, batadv_tt_tvlv_ogm_handler_v1,
+                                    batadv_tt_tvlv_unicast_handler_v1,
+                                    BATADV_TVLV_TT, 1, BATADV_NO_FLAGS);
+
+       batadv_tvlv_handler_register(bat_priv, NULL,
+                                    batadv_roam_tvlv_unicast_handler_v1,
+                                    BATADV_TVLV_ROAM, 1, BATADV_NO_FLAGS);
+
+       INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
+       queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
+                          msecs_to_jiffies(BATADV_TT_WORK_PERIOD));
+
+       return 1;
+}
index 659a3bb759ce87087c4d5697239f4724c815971c..026b1ffa674699ea7561df5456dd0c91f15d3521 100644 (file)
 #ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
 #define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
 
-int batadv_tt_len(int changes_num);
 int batadv_tt_init(struct batadv_priv *bat_priv);
-void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
-                        int ifindex);
+bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
+                        unsigned short vid, int ifindex);
 uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
-                               const uint8_t *addr, const char *message,
-                               bool roaming);
+                               const uint8_t *addr, unsigned short vid,
+                               const char *message, bool roaming);
 int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset);
-void batadv_tt_global_add_orig(struct batadv_priv *bat_priv,
-                              struct batadv_orig_node *orig_node,
-                              const unsigned char *tt_buff, int tt_buff_len);
-int batadv_tt_global_add(struct batadv_priv *bat_priv,
-                        struct batadv_orig_node *orig_node,
-                        const unsigned char *addr, uint16_t flags,
-                        uint8_t ttvn);
 int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset);
 void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
                               struct batadv_orig_node *orig_node,
-                              const char *message);
+                              int32_t match_vid, const char *message);
 struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
                                                  const uint8_t *src,
-                                                 const uint8_t *addr);
+                                                 const uint8_t *addr,
+                                                 unsigned short vid);
 void batadv_tt_free(struct batadv_priv *bat_priv);
-bool batadv_send_tt_response(struct batadv_priv *bat_priv,
-                            struct batadv_tt_query_packet *tt_request);
-bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr);
-void batadv_handle_tt_response(struct batadv_priv *bat_priv,
-                              struct batadv_tt_query_packet *tt_response);
+bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr,
+                        unsigned short vid);
 bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
-                          uint8_t *dst);
-void batadv_tt_update_orig(struct batadv_priv *bat_priv,
-                          struct batadv_orig_node *orig_node,
-                          const unsigned char *tt_buff, uint8_t tt_num_changes,
-                          uint8_t ttvn, uint16_t tt_crc);
-int batadv_tt_append_diff(struct batadv_priv *bat_priv,
-                         unsigned char **packet_buff, int *packet_buff_len,
-                         int packet_min_len);
+                          uint8_t *dst, unsigned short vid);
+void batadv_tt_local_commit_changes(struct batadv_priv *bat_priv);
 bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
-                                       uint8_t *addr);
+                                       uint8_t *addr, unsigned short vid);
 bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
-                                      uint8_t *addr);
+                                      uint8_t *addr, unsigned short vid);
+void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface);
 bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
                                          struct batadv_orig_node *orig_node,
-                                         const unsigned char *addr);
+                                         const unsigned char *addr,
+                                         unsigned short vid);
 
 #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
index b2c94e1393191e00165e42e117ded3dd2374f70f..91dd369b0ff21790b9fae41b88005be95bf0ffe4 100644 (file)
 #include "bitarray.h"
 #include <linux/kernel.h>
 
-/**
- * Maximum overhead for the encapsulation for a payload packet
- */
-#define BATADV_HEADER_LEN \
-       (ETH_HLEN + max(sizeof(struct batadv_unicast_packet), \
-                       sizeof(struct batadv_bcast_packet)))
-
 #ifdef CONFIG_BATMAN_ADV_DAT
 
 /* batadv_dat_addr_t is the type used for all DHT addresses. If it is changed,
 
 #endif /* CONFIG_BATMAN_ADV_DAT */
 
+/**
+ * BATADV_TT_REMOTE_MASK - bitmask selecting the flags that are sent over the
+ *  wire only
+ */
+#define BATADV_TT_REMOTE_MASK  0x00FF
+
+/**
+ * BATADV_TT_SYNC_MASK - bitmask of the flags that need to be kept in sync
+ *  among the nodes. These flags are used to compute the global/local CRC
+ */
+#define BATADV_TT_SYNC_MASK    0x00F0
+
 /**
  * struct batadv_hard_iface_bat_iv - per hard interface B.A.T.M.A.N. IV data
  * @ogm_buff: buffer holding the OGM packet
@@ -60,7 +65,6 @@ struct batadv_hard_iface_bat_iv {
  * @if_num: identificator of the interface
  * @if_status: status of the interface for batman-adv
  * @net_dev: pointer to the net_device
- * @frag_seqno: last fragment sequence number sent by this interface
  * @num_bcasts: number of payload re-broadcasts on this interface (ARQ)
  * @hardif_obj: kobject of the per interface sysfs "mesh" directory
  * @refcount: number of contexts the object is used
@@ -76,7 +80,6 @@ struct batadv_hard_iface {
        int16_t if_num;
        char if_status;
        struct net_device *net_dev;
-       atomic_t frag_seqno;
        uint8_t num_bcasts;
        struct kobject *hardif_obj;
        atomic_t refcount;
@@ -87,29 +90,98 @@ struct batadv_hard_iface {
        struct work_struct cleanup_work;
 };
 
+/**
+ * struct batadv_frag_table_entry - head in the fragment buffer table
+ * @head: head of list with fragments
+ * @lock: lock to protect the list of fragments
+ * @timestamp: time (jiffie) of last received fragment
+ * @seqno: sequence number of the fragments in the list
+ * @size: accumulated size of packets in list
+ */
+struct batadv_frag_table_entry {
+       struct hlist_head head;
+       spinlock_t lock; /* protects head */
+       unsigned long timestamp;
+       uint16_t seqno;
+       uint16_t size;
+};
+
+/**
+ * struct batadv_frag_list_entry - entry in a list of fragments
+ * @list: list node information
+ * @skb: fragment
+ * @no: fragment number in the set
+ */
+struct batadv_frag_list_entry {
+       struct hlist_node list;
+       struct sk_buff *skb;
+       uint8_t no;
+};
+
+/**
+ * struct batadv_vlan_tt - VLAN specific TT attributes
+ * @crc: CRC32 checksum of the entries belonging to this vlan
+ * @num_entries: number of TT entries for this VLAN
+ */
+struct batadv_vlan_tt {
+       uint32_t crc;
+       atomic_t num_entries;
+};
+
+/**
+ * batadv_orig_node_vlan - VLAN specific data per orig_node
+ * @vid: the VLAN identifier
+ * @tt: VLAN specific TT attributes
+ * @list: list node for orig_node::vlan_list
+ * @refcount: number of context where this object is currently in use
+ * @rcu: struct used for freeing in a RCU-safe manner
+ */
+struct batadv_orig_node_vlan {
+       unsigned short vid;
+       struct batadv_vlan_tt tt;
+       struct list_head list;
+       atomic_t refcount;
+       struct rcu_head rcu;
+};
+
+/**
+ * struct batadv_orig_bat_iv - B.A.T.M.A.N. IV private orig_node members
+ * @bcast_own: bitfield containing the number of our OGMs this orig_node
+ *  rebroadcasted "back" to us (relative to last_real_seqno)
+ * @bcast_own_sum: counted result of bcast_own
+ * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
+ *  neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
+ */
+struct batadv_orig_bat_iv {
+       unsigned long *bcast_own;
+       uint8_t *bcast_own_sum;
+       /* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
+        * neigh_node->bat_iv.real_bits & neigh_node->bat_iv.real_packet_count
+        */
+       spinlock_t ogm_cnt_lock;
+};
+
 /**
  * struct batadv_orig_node - structure for orig_list maintaining nodes of mesh
  * @orig: originator ethernet address
  * @primary_addr: hosts primary interface address
  * @router: router that should be used to reach this originator
  * @batadv_dat_addr_t:  address of the orig node in the distributed hash
- * @bcast_own: bitfield containing the number of our OGMs this orig_node
- *  rebroadcasted "back" to us (relative to last_real_seqno)
- * @bcast_own_sum: counted result of bcast_own
  * @last_seen: time when last packet from this node was received
  * @bcast_seqno_reset: time when the broadcast seqno window was reset
  * @batman_seqno_reset: time when the batman seqno window was reset
- * @gw_flags: flags related to gateway class
- * @flags: for now only VIS_SERVER flag
+ * @capabilities: announced capabilities of this originator
  * @last_ttvn: last seen translation table version number
- * @tt_crc: CRC of the translation table
  * @tt_buff: last tt changeset this node received from the orig node
  * @tt_buff_len: length of the last tt changeset this node received from the
  *  orig node
  * @tt_buff_lock: lock that protects tt_buff and tt_buff_len
- * @tt_size: number of global TT entries announced by the orig node
  * @tt_initialised: bool keeping track of whether or not this node have received
  *  any translation table information from the orig node yet
+ * @tt_lock: prevents from updating the table while reading it. Table update is
+ *  made up by two operations (data structure update and metdata -CRC/TTVN-
+ *  recalculation) and they have to be executed atomically in order to avoid
+ *  another thread to read the table/metadata between those.
  * @last_real_seqno: last and best known sequence number
  * @last_ttl: ttl of last received packet
  * @bcast_bits: bitfield containing the info which payload broadcast originated
@@ -117,14 +189,9 @@ struct batadv_hard_iface {
  *  last_bcast_seqno)
  * @last_bcast_seqno: last broadcast sequence number received by this host
  * @neigh_list: list of potential next hop neighbor towards this orig node
- * @frag_list: fragmentation buffer list for fragment re-assembly
- * @last_frag_packet: time when last fragmented packet from this node was
- *  received
  * @neigh_list_lock: lock protecting neigh_list, router and bonding_list
  * @hash_entry: hlist node for batadv_priv::orig_hash
  * @bat_priv: pointer to soft_iface this orig node belongs to
- * @ogm_cnt_lock: lock protecting bcast_own, bcast_own_sum,
- *  neigh_node->real_bits & neigh_node->real_packet_count
  * @bcast_seqno_lock: lock protecting bcast_bits & last_bcast_seqno
  * @bond_candidates: how many candidates are available
  * @bond_list: list of bonding candidates
@@ -134,6 +201,11 @@ struct batadv_hard_iface {
  * @out_coding_list: list of nodes that can hear this orig
  * @in_coding_list_lock: protects in_coding_list
  * @out_coding_list_lock: protects out_coding_list
+ * @fragments: array with heads for fragment chains
+ * @vlan_list: a list of orig_node_vlan structs, one per VLAN served by the
+ *  originator represented by this object
+ * @vlan_list_lock: lock protecting vlan_list
+ * @bat_iv: B.A.T.M.A.N. IV private structure
  */
 struct batadv_orig_node {
        uint8_t orig[ETH_ALEN];
@@ -142,35 +214,26 @@ struct batadv_orig_node {
 #ifdef CONFIG_BATMAN_ADV_DAT
        batadv_dat_addr_t dat_addr;
 #endif
-       unsigned long *bcast_own;
-       uint8_t *bcast_own_sum;
        unsigned long last_seen;
        unsigned long bcast_seqno_reset;
        unsigned long batman_seqno_reset;
-       uint8_t gw_flags;
-       uint8_t flags;
+       uint8_t capabilities;
        atomic_t last_ttvn;
-       uint16_t tt_crc;
        unsigned char *tt_buff;
        int16_t tt_buff_len;
        spinlock_t tt_buff_lock; /* protects tt_buff & tt_buff_len */
-       atomic_t tt_size;
        bool tt_initialised;
+       /* prevents from changing the table while reading it */
+       spinlock_t tt_lock;
        uint32_t last_real_seqno;
        uint8_t last_ttl;
        DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
        uint32_t last_bcast_seqno;
        struct hlist_head neigh_list;
-       struct list_head frag_list;
-       unsigned long last_frag_packet;
        /* neigh_list_lock protects: neigh_list, router & bonding_list */
        spinlock_t neigh_list_lock;
        struct hlist_node hash_entry;
        struct batadv_priv *bat_priv;
-       /* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
-        * neigh_node->real_bits & neigh_node->real_packet_count
-        */
-       spinlock_t ogm_cnt_lock;
        /* bcast_seqno_lock protects: bcast_bits & last_bcast_seqno */
        spinlock_t bcast_seqno_lock;
        atomic_t bond_candidates;
@@ -183,12 +246,28 @@ struct batadv_orig_node {
        spinlock_t in_coding_list_lock; /* Protects in_coding_list */
        spinlock_t out_coding_list_lock; /* Protects out_coding_list */
 #endif
+       struct batadv_frag_table_entry fragments[BATADV_FRAG_BUFFER_COUNT];
+       struct list_head vlan_list;
+       spinlock_t vlan_list_lock; /* protects vlan_list */
+       struct batadv_orig_bat_iv bat_iv;
+};
+
+/**
+ * enum batadv_orig_capabilities - orig node capabilities
+ * @BATADV_ORIG_CAPA_HAS_DAT: orig node has distributed arp table enabled
+ * @BATADV_ORIG_CAPA_HAS_NC: orig node has network coding enabled
+ */
+enum batadv_orig_capabilities {
+       BATADV_ORIG_CAPA_HAS_DAT = BIT(0),
+       BATADV_ORIG_CAPA_HAS_NC = BIT(1),
 };
 
 /**
  * struct batadv_gw_node - structure for orig nodes announcing gw capabilities
  * @list: list node for batadv_priv_gw::list
  * @orig_node: pointer to corresponding orig node
+ * @bandwidth_down: advertised uplink download bandwidth
+ * @bandwidth_up: advertised uplink upload bandwidth
  * @deleted: this struct is scheduled for deletion
  * @refcount: number of contexts the object is used
  * @rcu: struct used for freeing in an RCU-safe manner
@@ -196,46 +275,57 @@ struct batadv_orig_node {
 struct batadv_gw_node {
        struct hlist_node list;
        struct batadv_orig_node *orig_node;
+       uint32_t bandwidth_down;
+       uint32_t bandwidth_up;
        unsigned long deleted;
        atomic_t refcount;
        struct rcu_head rcu;
 };
 
 /**
- * struct batadv_neigh_node - structure for single hop neighbors
- * @list: list node for batadv_orig_node::neigh_list
- * @addr: mac address of neigh node
+ * struct batadv_neigh_bat_iv - B.A.T.M.A.N. IV specific structure for single
+ *  hop neighbors
  * @tq_recv: ring buffer of received TQ values from this neigh node
  * @tq_index: ring buffer index
  * @tq_avg: averaged tq of all tq values in the ring buffer (tq_recv)
- * @last_ttl: last received ttl from this neigh node
- * @bonding_list: list node for batadv_orig_node::bond_list
- * @last_seen: when last packet via this neighbor was received
  * @real_bits: bitfield containing the number of OGMs received from this neigh
  *  node (relative to orig_node->last_real_seqno)
  * @real_packet_count: counted result of real_bits
+ * @lq_update_lock: lock protecting tq_recv & tq_index
+ */
+struct batadv_neigh_bat_iv {
+       uint8_t tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
+       uint8_t tq_index;
+       uint8_t tq_avg;
+       DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
+       uint8_t real_packet_count;
+       spinlock_t lq_update_lock; /* protects tq_recv & tq_index */
+};
+
+/**
+ * struct batadv_neigh_node - structure for single hops neighbors
+ * @list: list node for batadv_orig_node::neigh_list
  * @orig_node: pointer to corresponding orig_node
+ * @addr: the MAC address of the neighboring interface
  * @if_incoming: pointer to incoming hard interface
- * @lq_update_lock: lock protecting tq_recv & tq_index
+ * @last_seen: when last packet via this neighbor was received
+ * @last_ttl: last received ttl from this neigh node
+ * @bonding_list: list node for batadv_orig_node::bond_list
  * @refcount: number of contexts the object is used
  * @rcu: struct used for freeing in an RCU-safe manner
+ * @bat_iv: B.A.T.M.A.N. IV private structure
  */
 struct batadv_neigh_node {
        struct hlist_node list;
+       struct batadv_orig_node *orig_node;
        uint8_t addr[ETH_ALEN];
-       uint8_t tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
-       uint8_t tq_index;
-       uint8_t tq_avg;
+       struct batadv_hard_iface *if_incoming;
+       unsigned long last_seen;
        uint8_t last_ttl;
        struct list_head bonding_list;
-       unsigned long last_seen;
-       DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
-       uint8_t real_packet_count;
-       struct batadv_orig_node *orig_node;
-       struct batadv_hard_iface *if_incoming;
-       spinlock_t lq_update_lock; /* protects tq_recv & tq_index */
        atomic_t refcount;
        struct rcu_head rcu;
+       struct batadv_neigh_bat_iv bat_iv;
 };
 
 /**
@@ -265,6 +355,12 @@ struct batadv_bcast_duplist_entry {
  * @BATADV_CNT_MGMT_TX_BYTES: transmitted routing protocol traffic bytes counter
  * @BATADV_CNT_MGMT_RX: received routing protocol traffic packet counter
  * @BATADV_CNT_MGMT_RX_BYTES: received routing protocol traffic bytes counter
+ * @BATADV_CNT_FRAG_TX: transmitted fragment traffic packet counter
+ * @BATADV_CNT_FRAG_TX_BYTES: transmitted fragment traffic bytes counter
+ * @BATADV_CNT_FRAG_RX: received fragment traffic packet counter
+ * @BATADV_CNT_FRAG_RX_BYTES: received fragment traffic bytes counter
+ * @BATADV_CNT_FRAG_FWD: forwarded fragment traffic packet counter
+ * @BATADV_CNT_FRAG_FWD_BYTES: forwarded fragment traffic bytes counter
  * @BATADV_CNT_TT_REQUEST_TX: transmitted tt req traffic packet counter
  * @BATADV_CNT_TT_REQUEST_RX: received tt req traffic packet counter
  * @BATADV_CNT_TT_RESPONSE_TX: transmitted tt resp traffic packet counter
@@ -302,6 +398,12 @@ enum batadv_counters {
        BATADV_CNT_MGMT_TX_BYTES,
        BATADV_CNT_MGMT_RX,
        BATADV_CNT_MGMT_RX_BYTES,
+       BATADV_CNT_FRAG_TX,
+       BATADV_CNT_FRAG_TX_BYTES,
+       BATADV_CNT_FRAG_RX,
+       BATADV_CNT_FRAG_RX_BYTES,
+       BATADV_CNT_FRAG_FWD,
+       BATADV_CNT_FRAG_FWD_BYTES,
        BATADV_CNT_TT_REQUEST_TX,
        BATADV_CNT_TT_REQUEST_RX,
        BATADV_CNT_TT_RESPONSE_TX,
@@ -343,11 +445,14 @@ enum batadv_counters {
  * @changes_list_lock: lock protecting changes_list
  * @req_list_lock: lock protecting req_list
  * @roam_list_lock: lock protecting roam_list
- * @local_entry_num: number of entries in the local hash table
- * @local_crc: Checksum of the local table, recomputed before sending a new OGM
  * @last_changeset: last tt changeset this host has generated
  * @last_changeset_len: length of last tt changeset this host has generated
  * @last_changeset_lock: lock protecting last_changeset & last_changeset_len
+ * @commit_lock: prevents from executing a local TT commit while reading the
+ *  local table. The local TT commit is made up by two operations (data
+ *  structure update and metdata -CRC/TTVN- recalculation) and they have to be
+ *  executed atomically in order to avoid another thread to read the
+ *  table/metadata between those.
  * @work: work queue callback item for translation table purging
  */
 struct batadv_priv_tt {
@@ -362,12 +467,12 @@ struct batadv_priv_tt {
        spinlock_t changes_list_lock; /* protects changes */
        spinlock_t req_list_lock; /* protects req_list */
        spinlock_t roam_list_lock; /* protects roam_list */
-       atomic_t local_entry_num;
-       uint16_t local_crc;
        unsigned char *last_changeset;
        int16_t last_changeset_len;
        /* protects last_changeset & last_changeset_len */
        spinlock_t last_changeset_lock;
+       /* prevents from executing a commit while reading the table */
+       spinlock_t commit_lock;
        struct delayed_work work;
 };
 
@@ -420,31 +525,31 @@ struct batadv_priv_debug_log {
  * @list: list of available gateway nodes
  * @list_lock: lock protecting gw_list & curr_gw
  * @curr_gw: pointer to currently selected gateway node
+ * @bandwidth_down: advertised uplink download bandwidth (if gw_mode server)
+ * @bandwidth_up: advertised uplink upload bandwidth (if gw_mode server)
  * @reselect: bool indicating a gateway re-selection is in progress
  */
 struct batadv_priv_gw {
        struct hlist_head list;
        spinlock_t list_lock; /* protects gw_list & curr_gw */
        struct batadv_gw_node __rcu *curr_gw;  /* rcu protected pointer */
+       atomic_t bandwidth_down;
+       atomic_t bandwidth_up;
        atomic_t reselect;
 };
 
 /**
- * struct batadv_priv_vis - per mesh interface vis data
- * @send_list: list of batadv_vis_info packets to sent
- * @hash: hash table containing vis data from other nodes in the network
- * @hash_lock: lock protecting the hash table
- * @list_lock: lock protecting my_info::recv_list
- * @work: work queue callback item for vis packet sending
- * @my_info: holds this node's vis data sent on a regular basis
+ * struct batadv_priv_tvlv - per mesh interface tvlv data
+ * @container_list: list of registered tvlv containers to be sent with each OGM
+ * @handler_list: list of the various tvlv content handlers
+ * @container_list_lock: protects tvlv container list access
+ * @handler_list_lock: protects handler list access
  */
-struct batadv_priv_vis {
-       struct list_head send_list;
-       struct batadv_hashtable *hash;
-       spinlock_t hash_lock; /* protects hash */
-       spinlock_t list_lock; /* protects my_info::recv_list */
-       struct delayed_work work;
-       struct batadv_vis_info *my_info;
+struct batadv_priv_tvlv {
+       struct hlist_head container_list;
+       struct hlist_head handler_list;
+       spinlock_t container_list_lock; /* protects container_list */
+       spinlock_t handler_list_lock; /* protects handler_list */
 };
 
 /**
@@ -490,6 +595,26 @@ struct batadv_priv_nc {
        struct batadv_hashtable *decoding_hash;
 };
 
+/**
+ * struct batadv_softif_vlan - per VLAN attributes set
+ * @vid: VLAN identifier
+ * @kobj: kobject for sysfs vlan subdirectory
+ * @ap_isolation: AP isolation state
+ * @tt: TT private attributes (VLAN specific)
+ * @list: list node for bat_priv::softif_vlan_list
+ * @refcount: number of context where this object is currently in use
+ * @rcu: struct used for freeing in a RCU-safe manner
+ */
+struct batadv_softif_vlan {
+       unsigned short vid;
+       struct kobject *kobj;
+       atomic_t ap_isolation;          /* boolean */
+       struct batadv_vlan_tt tt;
+       struct hlist_node list;
+       atomic_t refcount;
+       struct rcu_head rcu;
+};
+
 /**
  * struct batadv_priv - per mesh interface data
  * @mesh_state: current status of the mesh (inactive/active/deactivating)
@@ -499,15 +624,15 @@ struct batadv_priv_nc {
  * @aggregated_ogms: bool indicating whether OGM aggregation is enabled
  * @bonding: bool indicating whether traffic bonding is enabled
  * @fragmentation: bool indicating whether traffic fragmentation is enabled
- * @ap_isolation: bool indicating whether ap isolation is enabled
+ * @packet_size_max: max packet size that can be transmitted via
+ *  multiple fragmented skbs or a single frame if fragmentation is disabled
+ * @frag_seqno: incremental counter to identify chains of egress fragments
  * @bridge_loop_avoidance: bool indicating whether bridge loop avoidance is
  *  enabled
  * @distributed_arp_table: bool indicating whether distributed ARP table is
  *  enabled
- * @vis_mode: vis operation: client or server (see batadv_vis_packettype)
  * @gw_mode: gateway operation: off, client or server (see batadv_gw_modes)
  * @gw_sel_class: gateway selection class (applies if gw_mode client)
- * @gw_bandwidth: gateway announced bandwidth (applies if gw_mode server)
  * @orig_interval: OGM broadcast interval in milliseconds
  * @hop_penalty: penalty which will be applied to an OGM's tq-field on every hop
  * @log_level: configured log level (see batadv_dbg_level)
@@ -527,11 +652,14 @@ struct batadv_priv_nc {
  * @primary_if: one of the hard interfaces assigned to this mesh interface
  *  becomes the primary interface
  * @bat_algo_ops: routing algorithm used by this mesh interface
+ * @softif_vlan_list: a list of softif_vlan structs, one per VLAN created on top
+ *  of the mesh interface represented by this object
+ * @softif_vlan_list_lock: lock protecting softif_vlan_list
  * @bla: bridge loope avoidance data
  * @debug_log: holding debug logging relevant data
  * @gw: gateway data
  * @tt: translation table data
- * @vis: vis data
+ * @tvlv: type-version-length-value data
  * @dat: distributed arp table data
  * @network_coding: bool indicating whether network coding is enabled
  * @batadv_priv_nc: network coding data
@@ -544,17 +672,16 @@ struct batadv_priv {
        atomic_t aggregated_ogms;
        atomic_t bonding;
        atomic_t fragmentation;
-       atomic_t ap_isolation;
+       atomic_t packet_size_max;
+       atomic_t frag_seqno;
 #ifdef CONFIG_BATMAN_ADV_BLA
        atomic_t bridge_loop_avoidance;
 #endif
 #ifdef CONFIG_BATMAN_ADV_DAT
        atomic_t distributed_arp_table;
 #endif
-       atomic_t vis_mode;
        atomic_t gw_mode;
        atomic_t gw_sel_class;
-       atomic_t gw_bandwidth;
        atomic_t orig_interval;
        atomic_t hop_penalty;
 #ifdef CONFIG_BATMAN_ADV_DEBUG
@@ -575,6 +702,8 @@ struct batadv_priv {
        struct work_struct cleanup_work;
        struct batadv_hard_iface __rcu *primary_if;  /* rcu protected pointer */
        struct batadv_algo_ops *bat_algo_ops;
+       struct hlist_head softif_vlan_list;
+       spinlock_t softif_vlan_list_lock; /* protects softif_vlan_list */
 #ifdef CONFIG_BATMAN_ADV_BLA
        struct batadv_priv_bla bla;
 #endif
@@ -583,7 +712,7 @@ struct batadv_priv {
 #endif
        struct batadv_priv_gw gw;
        struct batadv_priv_tt tt;
-       struct batadv_priv_vis vis;
+       struct batadv_priv_tvlv tvlv;
 #ifdef CONFIG_BATMAN_ADV_DAT
        struct batadv_priv_dat dat;
 #endif
@@ -620,7 +749,7 @@ struct batadv_socket_client {
 struct batadv_socket_packet {
        struct list_head list;
        size_t icmp_len;
-       struct batadv_icmp_packet_rr icmp_packet;
+       uint8_t icmp_packet[BATADV_ICMP_MAX_PACKET_SIZE];
 };
 
 /**
@@ -677,6 +806,7 @@ struct batadv_bla_claim {
 /**
  * struct batadv_tt_common_entry - tt local & tt global common data
  * @addr: mac address of non-mesh client
+ * @vid: VLAN identifier
  * @hash_entry: hlist node for batadv_priv_tt::local_hash or for
  *  batadv_priv_tt::global_hash
  * @flags: various state handling flags (see batadv_tt_client_flags)
@@ -686,6 +816,7 @@ struct batadv_bla_claim {
  */
 struct batadv_tt_common_entry {
        uint8_t addr[ETH_ALEN];
+       unsigned short vid;
        struct hlist_node hash_entry;
        uint16_t flags;
        unsigned long added_at;
@@ -740,7 +871,7 @@ struct batadv_tt_orig_list_entry {
  */
 struct batadv_tt_change_node {
        struct list_head list;
-       struct batadv_tt_change change;
+       struct batadv_tvlv_tt_change change;
 };
 
 /**
@@ -865,78 +996,6 @@ struct batadv_forw_packet {
        struct batadv_hard_iface *if_incoming;
 };
 
-/**
- * struct batadv_frag_packet_list_entry - storage for fragment packet
- * @list: list node for orig_node::frag_list
- * @seqno: sequence number of the fragment
- * @skb: fragment's skb buffer
- */
-struct batadv_frag_packet_list_entry {
-       struct list_head list;
-       uint16_t seqno;
-       struct sk_buff *skb;
-};
-
-/**
- * struct batadv_vis_info - local data for vis information
- * @first_seen: timestamp used for purging stale vis info entries
- * @recv_list: List of server-neighbors we have received this packet from. This
- *  packet should not be re-forward to them again. List elements are struct
- *  batadv_vis_recvlist_node
- * @send_list: list of packets to be forwarded
- * @refcount: number of contexts the object is used
- * @hash_entry: hlist node for batadv_priv_vis::hash
- * @bat_priv: pointer to soft_iface this orig node belongs to
- * @skb_packet: contains the vis packet
- */
-struct batadv_vis_info {
-       unsigned long first_seen;
-       struct list_head recv_list;
-       struct list_head send_list;
-       struct kref refcount;
-       struct hlist_node hash_entry;
-       struct batadv_priv *bat_priv;
-       struct sk_buff *skb_packet;
-} __packed;
-
-/**
- * struct batadv_vis_info_entry - contains link information for vis
- * @src: source MAC of the link, all zero for local TT entry
- * @dst: destination MAC of the link, client mac address for local TT entry
- * @quality: transmission quality of the link, or 0 for local TT entry
- */
-struct batadv_vis_info_entry {
-       uint8_t  src[ETH_ALEN];
-       uint8_t  dest[ETH_ALEN];
-       uint8_t  quality;
-} __packed;
-
-/**
- * struct batadv_vis_recvlist_node - list entry for batadv_vis_info::recv_list
- * @list: list node for batadv_vis_info::recv_list
- * @mac: MAC address of the originator from where the vis_info was received
- */
-struct batadv_vis_recvlist_node {
-       struct list_head list;
-       uint8_t mac[ETH_ALEN];
-};
-
-/**
- * struct batadv_vis_if_list_entry - auxiliary data for vis data generation
- * @addr: MAC address of the interface
- * @primary: true if this interface is the primary interface
- * @list: list node the interface list
- *
- * While scanning for vis-entries of a particular vis-originator
- * this list collects its interfaces to create a subgraph/cluster
- * out of them later
- */
-struct batadv_vis_if_list_entry {
-       uint8_t addr[ETH_ALEN];
-       bool primary;
-       struct hlist_node list;
-};
-
 /**
  * struct batadv_algo_ops - mesh algorithm callbacks
  * @list: list node for the batadv_algo_list
@@ -948,6 +1007,16 @@ struct batadv_vis_if_list_entry {
  * @bat_primary_iface_set: called when primary interface is selected / changed
  * @bat_ogm_schedule: prepare a new outgoing OGM for the send queue
  * @bat_ogm_emit: send scheduled OGM
+ * @bat_neigh_cmp: compare the metrics of two neighbors
+ * @bat_neigh_is_equiv_or_better: check if neigh1 is equally good or
+ *  better than neigh2 from the metric prospective
+ * @bat_orig_print: print the originator table (optional)
+ * @bat_orig_free: free the resources allocated by the routing algorithm for an
+ *  orig_node object
+ * @bat_orig_add_if: ask the routing algorithm to apply the needed changes to
+ *  the orig_node due to a new hard-interface being added into the mesh
+ * @bat_orig_del_if: ask the routing algorithm to apply the needed changes to
+ *  the orig_node due to an hard-interface being removed from the mesh
  */
 struct batadv_algo_ops {
        struct hlist_node list;
@@ -958,6 +1027,17 @@ struct batadv_algo_ops {
        void (*bat_primary_iface_set)(struct batadv_hard_iface *hard_iface);
        void (*bat_ogm_schedule)(struct batadv_hard_iface *hard_iface);
        void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet);
+       int (*bat_neigh_cmp)(struct batadv_neigh_node *neigh1,
+                            struct batadv_neigh_node *neigh2);
+       bool (*bat_neigh_is_equiv_or_better)(struct batadv_neigh_node *neigh1,
+                                            struct batadv_neigh_node *neigh2);
+       /* orig_node handling API */
+       void (*bat_orig_print)(struct batadv_priv *priv, struct seq_file *seq);
+       void (*bat_orig_free)(struct batadv_orig_node *orig_node);
+       int (*bat_orig_add_if)(struct batadv_orig_node *orig_node,
+                              int max_if_num);
+       int (*bat_orig_del_if)(struct batadv_orig_node *orig_node,
+                              int max_if_num, int del_if_num);
 };
 
 /**
@@ -965,6 +1045,7 @@ struct batadv_algo_ops {
  * is used to stored ARP entries needed for the global DAT cache
  * @ip: the IPv4 corresponding to this DAT/ARP entry
  * @mac_addr: the MAC address associated to the stored IPv4
+ * @vid: the vlan ID associated to this entry
  * @last_update: time in jiffies when this entry was refreshed last time
  * @hash_entry: hlist node for batadv_priv_dat::hash
  * @refcount: number of contexts the object is used
@@ -973,6 +1054,7 @@ struct batadv_algo_ops {
 struct batadv_dat_entry {
        __be32 ip;
        uint8_t mac_addr[ETH_ALEN];
+       unsigned short vid;
        unsigned long last_update;
        struct hlist_node hash_entry;
        atomic_t refcount;
@@ -992,4 +1074,60 @@ struct batadv_dat_candidate {
        struct batadv_orig_node *orig_node;
 };
 
+/**
+ * struct batadv_tvlv_container - container for tvlv appended to OGMs
+ * @list: hlist node for batadv_priv_tvlv::container_list
+ * @tvlv_hdr: tvlv header information needed to construct the tvlv
+ * @value_len: length of the buffer following this struct which contains
+ *  the actual tvlv payload
+ * @refcount: number of contexts the object is used
+ */
+struct batadv_tvlv_container {
+       struct hlist_node list;
+       struct batadv_tvlv_hdr tvlv_hdr;
+       atomic_t refcount;
+};
+
+/**
+ * struct batadv_tvlv_handler - handler for specific tvlv type and version
+ * @list: hlist node for batadv_priv_tvlv::handler_list
+ * @ogm_handler: handler callback which is given the tvlv payload to process on
+ *  incoming OGM packets
+ * @unicast_handler: handler callback which is given the tvlv payload to process
+ *  on incoming unicast tvlv packets
+ * @type: tvlv type this handler feels responsible for
+ * @version: tvlv version this handler feels responsible for
+ * @flags: tvlv handler flags
+ * @refcount: number of contexts the object is used
+ * @rcu: struct used for freeing in an RCU-safe manner
+ */
+struct batadv_tvlv_handler {
+       struct hlist_node list;
+       void (*ogm_handler)(struct batadv_priv *bat_priv,
+                           struct batadv_orig_node *orig,
+                           uint8_t flags,
+                           void *tvlv_value, uint16_t tvlv_value_len);
+       int (*unicast_handler)(struct batadv_priv *bat_priv,
+                              uint8_t *src, uint8_t *dst,
+                              void *tvlv_value, uint16_t tvlv_value_len);
+       uint8_t type;
+       uint8_t version;
+       uint8_t flags;
+       atomic_t refcount;
+       struct rcu_head rcu;
+};
+
+/**
+ * enum batadv_tvlv_handler_flags - tvlv handler flags definitions
+ * @BATADV_TVLV_HANDLER_OGM_CIFNOTFND: tvlv ogm processing function will call
+ *  this handler even if its type was not found (with no data)
+ * @BATADV_TVLV_HANDLER_OGM_CALLED: interval tvlv handling flag - the API marks
+ *  a handler as being called, so it won't be called if the
+ *  BATADV_TVLV_HANDLER_OGM_CIFNOTFND flag was set
+ */
+enum batadv_tvlv_handler_flags {
+       BATADV_TVLV_HANDLER_OGM_CIFNOTFND = BIT(1),
+       BATADV_TVLV_HANDLER_OGM_CALLED = BIT(2),
+};
+
 #endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
deleted file mode 100644 (file)
index 48b31d3..0000000
+++ /dev/null
@@ -1,491 +0,0 @@
-/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
- *
- * Andreas Langer
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- */
-
-#include "main.h"
-#include "unicast.h"
-#include "send.h"
-#include "soft-interface.h"
-#include "gateway_client.h"
-#include "originator.h"
-#include "hash.h"
-#include "translation-table.h"
-#include "routing.h"
-#include "hard-interface.h"
-
-
-static struct sk_buff *
-batadv_frag_merge_packet(struct list_head *head,
-                        struct batadv_frag_packet_list_entry *tfp,
-                        struct sk_buff *skb)
-{
-       struct batadv_unicast_frag_packet *up;
-       struct sk_buff *tmp_skb;
-       struct batadv_unicast_packet *unicast_packet;
-       int hdr_len = sizeof(*unicast_packet);
-       int uni_diff = sizeof(*up) - hdr_len;
-       uint8_t *packet_pos;
-
-       up = (struct batadv_unicast_frag_packet *)skb->data;
-       /* set skb to the first part and tmp_skb to the second part */
-       if (up->flags & BATADV_UNI_FRAG_HEAD) {
-               tmp_skb = tfp->skb;
-       } else {
-               tmp_skb = skb;
-               skb = tfp->skb;
-       }
-
-       if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0)
-               goto err;
-
-       skb_pull(tmp_skb, sizeof(*up));
-       if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0)
-               goto err;
-
-       /* move free entry to end */
-       tfp->skb = NULL;
-       tfp->seqno = 0;
-       list_move_tail(&tfp->list, head);
-
-       memcpy(skb_put(skb, tmp_skb->len), tmp_skb->data, tmp_skb->len);
-       kfree_skb(tmp_skb);
-
-       memmove(skb->data + uni_diff, skb->data, hdr_len);
-       packet_pos = skb_pull(skb, uni_diff);
-       unicast_packet = (struct batadv_unicast_packet *)packet_pos;
-       unicast_packet->header.packet_type = BATADV_UNICAST;
-
-       return skb;
-
-err:
-       /* free buffered skb, skb will be freed later */
-       kfree_skb(tfp->skb);
-       return NULL;
-}
-
-static void batadv_frag_create_entry(struct list_head *head,
-                                    struct sk_buff *skb)
-{
-       struct batadv_frag_packet_list_entry *tfp;
-       struct batadv_unicast_frag_packet *up;
-
-       up = (struct batadv_unicast_frag_packet *)skb->data;
-
-       /* free and oldest packets stand at the end */
-       tfp = list_entry((head)->prev, typeof(*tfp), list);
-       kfree_skb(tfp->skb);
-
-       tfp->seqno = ntohs(up->seqno);
-       tfp->skb = skb;
-       list_move(&tfp->list, head);
-       return;
-}
-
-static int batadv_frag_create_buffer(struct list_head *head)
-{
-       int i;
-       struct batadv_frag_packet_list_entry *tfp;
-
-       for (i = 0; i < BATADV_FRAG_BUFFER_SIZE; i++) {
-               tfp = kmalloc(sizeof(*tfp), GFP_ATOMIC);
-               if (!tfp) {
-                       batadv_frag_list_free(head);
-                       return -ENOMEM;
-               }
-               tfp->skb = NULL;
-               tfp->seqno = 0;
-               INIT_LIST_HEAD(&tfp->list);
-               list_add(&tfp->list, head);
-       }
-
-       return 0;
-}
-
-static struct batadv_frag_packet_list_entry *
-batadv_frag_search_packet(struct list_head *head,
-                         const struct batadv_unicast_frag_packet *up)
-{
-       struct batadv_frag_packet_list_entry *tfp;
-       struct batadv_unicast_frag_packet *tmp_up = NULL;
-       bool is_head_tmp, is_head;
-       uint16_t search_seqno;
-
-       if (up->flags & BATADV_UNI_FRAG_HEAD)
-               search_seqno = ntohs(up->seqno)+1;
-       else
-               search_seqno = ntohs(up->seqno)-1;
-
-       is_head = up->flags & BATADV_UNI_FRAG_HEAD;
-
-       list_for_each_entry(tfp, head, list) {
-               if (!tfp->skb)
-                       continue;
-
-               if (tfp->seqno == ntohs(up->seqno))
-                       goto mov_tail;
-
-               tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data;
-
-               if (tfp->seqno == search_seqno) {
-                       is_head_tmp = tmp_up->flags & BATADV_UNI_FRAG_HEAD;
-                       if (is_head_tmp != is_head)
-                               return tfp;
-                       else
-                               goto mov_tail;
-               }
-       }
-       return NULL;
-
-mov_tail:
-       list_move_tail(&tfp->list, head);
-       return NULL;
-}
-
-void batadv_frag_list_free(struct list_head *head)
-{
-       struct batadv_frag_packet_list_entry *pf, *tmp_pf;
-
-       if (!list_empty(head)) {
-               list_for_each_entry_safe(pf, tmp_pf, head, list) {
-                       kfree_skb(pf->skb);
-                       list_del(&pf->list);
-                       kfree(pf);
-               }
-       }
-       return;
-}
-
-/* frag_reassemble_skb():
- * returns NET_RX_DROP if the operation failed - skb is left intact
- * returns NET_RX_SUCCESS if the fragment was buffered (skb_new will be NULL)
- * or the skb could be reassembled (skb_new will point to the new packet and
- * skb was freed)
- */
-int batadv_frag_reassemble_skb(struct sk_buff *skb,
-                              struct batadv_priv *bat_priv,
-                              struct sk_buff **new_skb)
-{
-       struct batadv_orig_node *orig_node;
-       struct batadv_frag_packet_list_entry *tmp_frag_entry;
-       int ret = NET_RX_DROP;
-       struct batadv_unicast_frag_packet *unicast_packet;
-
-       unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
-       *new_skb = NULL;
-
-       orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->orig);
-       if (!orig_node)
-               goto out;
-
-       orig_node->last_frag_packet = jiffies;
-
-       if (list_empty(&orig_node->frag_list) &&
-           batadv_frag_create_buffer(&orig_node->frag_list)) {
-               pr_debug("couldn't create frag buffer\n");
-               goto out;
-       }
-
-       tmp_frag_entry = batadv_frag_search_packet(&orig_node->frag_list,
-                                                  unicast_packet);
-
-       if (!tmp_frag_entry) {
-               batadv_frag_create_entry(&orig_node->frag_list, skb);
-               ret = NET_RX_SUCCESS;
-               goto out;
-       }
-
-       *new_skb = batadv_frag_merge_packet(&orig_node->frag_list,
-                                           tmp_frag_entry, skb);
-       /* if not, merge failed */
-       if (*new_skb)
-               ret = NET_RX_SUCCESS;
-
-out:
-       if (orig_node)
-               batadv_orig_node_free_ref(orig_node);
-       return ret;
-}
-
-int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
-                        struct batadv_hard_iface *hard_iface,
-                        const uint8_t dstaddr[])
-{
-       struct batadv_unicast_packet tmp_uc, *unicast_packet;
-       struct batadv_hard_iface *primary_if;
-       struct sk_buff *frag_skb;
-       struct batadv_unicast_frag_packet *frag1, *frag2;
-       int uc_hdr_len = sizeof(*unicast_packet);
-       int ucf_hdr_len = sizeof(*frag1);
-       int data_len = skb->len - uc_hdr_len;
-       int large_tail = 0, ret = NET_RX_DROP;
-       uint16_t seqno;
-
-       primary_if = batadv_primary_if_get_selected(bat_priv);
-       if (!primary_if)
-               goto dropped;
-
-       frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
-       if (!frag_skb)
-               goto dropped;
-
-       skb->priority = TC_PRIO_CONTROL;
-       skb_reserve(frag_skb, ucf_hdr_len);
-
-       unicast_packet = (struct batadv_unicast_packet *)skb->data;
-       memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
-       skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
-
-       if (batadv_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
-           batadv_skb_head_push(frag_skb, ucf_hdr_len) < 0)
-               goto drop_frag;
-
-       frag1 = (struct batadv_unicast_frag_packet *)skb->data;
-       frag2 = (struct batadv_unicast_frag_packet *)frag_skb->data;
-
-       memcpy(frag1, &tmp_uc, sizeof(tmp_uc));
-
-       frag1->header.ttl--;
-       frag1->header.version = BATADV_COMPAT_VERSION;
-       frag1->header.packet_type = BATADV_UNICAST_FRAG;
-
-       memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
-       memcpy(frag2, frag1, sizeof(*frag2));
-
-       if (data_len & 1)
-               large_tail = BATADV_UNI_FRAG_LARGETAIL;
-
-       frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
-       frag2->flags = large_tail;
-
-       seqno = atomic_add_return(2, &hard_iface->frag_seqno);
-       frag1->seqno = htons(seqno - 1);
-       frag2->seqno = htons(seqno);
-
-       batadv_send_skb_packet(skb, hard_iface, dstaddr);
-       batadv_send_skb_packet(frag_skb, hard_iface, dstaddr);
-       ret = NET_RX_SUCCESS;
-       goto out;
-
-drop_frag:
-       kfree_skb(frag_skb);
-dropped:
-       kfree_skb(skb);
-out:
-       if (primary_if)
-               batadv_hardif_free_ref(primary_if);
-       return ret;
-}
-
-/**
- * batadv_unicast_push_and_fill_skb - extends the buffer and initializes the
- * common fields for unicast packets
- * @skb: packet
- * @hdr_size: amount of bytes to push at the beginning of the skb
- * @orig_node: the destination node
- *
- * Returns false if the buffer extension was not possible or true otherwise
- */
-static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
-                                            struct batadv_orig_node *orig_node)
-{
-       struct batadv_unicast_packet *unicast_packet;
-       uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
-
-       if (batadv_skb_head_push(skb, hdr_size) < 0)
-               return false;
-
-       unicast_packet = (struct batadv_unicast_packet *)skb->data;
-       unicast_packet->header.version = BATADV_COMPAT_VERSION;
-       /* batman packet type: unicast */
-       unicast_packet->header.packet_type = BATADV_UNICAST;
-       /* set unicast ttl */
-       unicast_packet->header.ttl = BATADV_TTL;
-       /* copy the destination for faster routing */
-       memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
-       /* set the destination tt version number */
-       unicast_packet->ttvn = ttvn;
-
-       return true;
-}
-
-/**
- * batadv_unicast_prepare_skb - encapsulate an skb with a unicast header
- * @skb: the skb containing the payload to encapsulate
- * @orig_node: the destination node
- *
- * Returns false if the payload could not be encapsulated or true otherwise.
- *
- * This call might reallocate skb data.
- */
-static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
-                                      struct batadv_orig_node *orig_node)
-{
-       size_t uni_size = sizeof(struct batadv_unicast_packet);
-       return batadv_unicast_push_and_fill_skb(skb, uni_size, orig_node);
-}
-
-/**
- * batadv_unicast_4addr_prepare_skb - encapsulate an skb with a unicast4addr
- * header
- * @bat_priv: the bat priv with all the soft interface information
- * @skb: the skb containing the payload to encapsulate
- * @orig_node: the destination node
- * @packet_subtype: the batman 4addr packet subtype to use
- *
- * Returns false if the payload could not be encapsulated or true otherwise.
- *
- * This call might reallocate skb data.
- */
-bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
-                                     struct sk_buff *skb,
-                                     struct batadv_orig_node *orig,
-                                     int packet_subtype)
-{
-       struct batadv_hard_iface *primary_if;
-       struct batadv_unicast_4addr_packet *unicast_4addr_packet;
-       bool ret = false;
-
-       primary_if = batadv_primary_if_get_selected(bat_priv);
-       if (!primary_if)
-               goto out;
-
-       /* pull the header space and fill the unicast_packet substructure.
-        * We can do that because the first member of the unicast_4addr_packet
-        * is of type struct unicast_packet
-        */
-       if (!batadv_unicast_push_and_fill_skb(skb,
-                                             sizeof(*unicast_4addr_packet),
-                                             orig))
-               goto out;
-
-       unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
-       unicast_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR;
-       memcpy(unicast_4addr_packet->src, primary_if->net_dev->dev_addr,
-              ETH_ALEN);
-       unicast_4addr_packet->subtype = packet_subtype;
-       unicast_4addr_packet->reserved = 0;
-
-       ret = true;
-out:
-       if (primary_if)
-               batadv_hardif_free_ref(primary_if);
-       return ret;
-}
-
-/**
- * batadv_unicast_generic_send_skb - send an skb as unicast
- * @bat_priv: the bat priv with all the soft interface information
- * @skb: payload to send
- * @packet_type: the batman unicast packet type to use
- * @packet_subtype: the batman packet subtype. It is ignored if packet_type is
- *                 not BATADV_UNICAT_4ADDR
- *
- * Returns 1 in case of error or 0 otherwise
- */
-int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
-                                   struct sk_buff *skb, int packet_type,
-                                   int packet_subtype)
-{
-       struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
-       struct batadv_unicast_packet *unicast_packet;
-       struct batadv_orig_node *orig_node;
-       struct batadv_neigh_node *neigh_node;
-       int data_len = skb->len;
-       int ret = NET_RX_DROP;
-       unsigned int dev_mtu, header_len;
-
-       /* get routing information */
-       if (is_multicast_ether_addr(ethhdr->h_dest)) {
-               orig_node = batadv_gw_get_selected_orig(bat_priv);
-               if (orig_node)
-                       goto find_router;
-       }
-
-       /* check for tt host - increases orig_node refcount.
-        * returns NULL in case of AP isolation
-        */
-       orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
-                                            ethhdr->h_dest);
-
-find_router:
-       /* find_router():
-        *  - if orig_node is NULL it returns NULL
-        *  - increases neigh_nodes refcount if found.
-        */
-       neigh_node = batadv_find_router(bat_priv, orig_node, NULL);
-
-       if (!neigh_node)
-               goto out;
-
-       switch (packet_type) {
-       case BATADV_UNICAST:
-               if (!batadv_unicast_prepare_skb(skb, orig_node))
-                       goto out;
-
-               header_len = sizeof(struct batadv_unicast_packet);
-               break;
-       case BATADV_UNICAST_4ADDR:
-               if (!batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
-                                                     packet_subtype))
-                       goto out;
-
-               header_len = sizeof(struct batadv_unicast_4addr_packet);
-               break;
-       default:
-               /* this function supports UNICAST and UNICAST_4ADDR only. It
-                * should never be invoked with any other packet type
-                */
-               goto out;
-       }
-
-       ethhdr = (struct ethhdr *)(skb->data + header_len);
-       unicast_packet = (struct batadv_unicast_packet *)skb->data;
-
-       /* inform the destination node that we are still missing a correct route
-        * for this client. The destination will receive this packet and will
-        * try to reroute it because the ttvn contained in the header is less
-        * than the current one
-        */
-       if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
-               unicast_packet->ttvn = unicast_packet->ttvn - 1;
-
-       dev_mtu = neigh_node->if_incoming->net_dev->mtu;
-       /* fragmentation mechanism only works for UNICAST (now) */
-       if (packet_type == BATADV_UNICAST &&
-           atomic_read(&bat_priv->fragmentation) &&
-           data_len + sizeof(*unicast_packet) > dev_mtu) {
-               /* send frag skb decreases ttl */
-               unicast_packet->header.ttl++;
-               ret = batadv_frag_send_skb(skb, bat_priv,
-                                          neigh_node->if_incoming,
-                                          neigh_node->addr);
-               goto out;
-       }
-
-       if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
-               ret = 0;
-
-out:
-       if (neigh_node)
-               batadv_neigh_node_free_ref(neigh_node);
-       if (orig_node)
-               batadv_orig_node_free_ref(orig_node);
-       if (ret == NET_RX_DROP)
-               kfree_skb(skb);
-       return ret;
-}
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
deleted file mode 100644 (file)
index 429cf8a..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
-/* Copyright (C) 2010-2013 B.A.T.M.A.N. contributors:
- *
- * Andreas Langer
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- */
-
-#ifndef _NET_BATMAN_ADV_UNICAST_H_
-#define _NET_BATMAN_ADV_UNICAST_H_
-
-#include "packet.h"
-
-#define BATADV_FRAG_TIMEOUT 10000 /* purge frag list entries after time in ms */
-#define BATADV_FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
-
-int batadv_frag_reassemble_skb(struct sk_buff *skb,
-                              struct batadv_priv *bat_priv,
-                              struct sk_buff **new_skb);
-void batadv_frag_list_free(struct list_head *head);
-int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
-                        struct batadv_hard_iface *hard_iface,
-                        const uint8_t dstaddr[]);
-bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
-                                     struct sk_buff *skb,
-                                     struct batadv_orig_node *orig_node,
-                                     int packet_subtype);
-int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
-                                   struct sk_buff *skb, int packet_type,
-                                   int packet_subtype);
-
-
-/**
- * batadv_unicast_send_skb - send the skb encapsulated in a unicast packet
- * @bat_priv: the bat priv with all the soft interface information
- * @skb: the payload to send
- */
-static inline int batadv_unicast_send_skb(struct batadv_priv *bat_priv,
-                                         struct sk_buff *skb)
-{
-       return batadv_unicast_generic_send_skb(bat_priv, skb, BATADV_UNICAST,
-                                              0);
-}
-
-/**
- * batadv_unicast_send_skb - send the skb encapsulated in a unicast4addr packet
- * @bat_priv: the bat priv with all the soft interface information
- * @skb: the payload to send
- * @packet_subtype: the batman 4addr packet subtype to use
- */
-static inline int batadv_unicast_4addr_send_skb(struct batadv_priv *bat_priv,
-                                               struct sk_buff *skb,
-                                               int packet_subtype)
-{
-       return batadv_unicast_generic_send_skb(bat_priv, skb,
-                                              BATADV_UNICAST_4ADDR,
-                                              packet_subtype);
-}
-
-static inline int batadv_frag_can_reassemble(const struct sk_buff *skb, int mtu)
-{
-       const struct batadv_unicast_frag_packet *unicast_packet;
-       int uneven_correction = 0;
-       unsigned int merged_size;
-
-       unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
-
-       if (unicast_packet->flags & BATADV_UNI_FRAG_LARGETAIL) {
-               if (unicast_packet->flags & BATADV_UNI_FRAG_HEAD)
-                       uneven_correction = 1;
-               else
-                       uneven_correction = -1;
-       }
-
-       merged_size = (skb->len - sizeof(*unicast_packet)) * 2;
-       merged_size += sizeof(struct batadv_unicast_packet) + uneven_correction;
-
-       return merged_size <= mtu;
-}
-
-#endif /* _NET_BATMAN_ADV_UNICAST_H_ */
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
deleted file mode 100644 (file)
index d8ea31a..0000000
+++ /dev/null
@@ -1,938 +0,0 @@
-/* Copyright (C) 2008-2013 B.A.T.M.A.N. contributors:
- *
- * Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- */
-
-#include "main.h"
-#include "send.h"
-#include "translation-table.h"
-#include "vis.h"
-#include "soft-interface.h"
-#include "hard-interface.h"
-#include "hash.h"
-#include "originator.h"
-
-#define BATADV_MAX_VIS_PACKET_SIZE 1000
-
-/* hash class keys */
-static struct lock_class_key batadv_vis_hash_lock_class_key;
-
-/* free the info */
-static void batadv_free_info(struct kref *ref)
-{
-       struct batadv_vis_info *info;
-       struct batadv_priv *bat_priv;
-       struct batadv_vis_recvlist_node *entry, *tmp;
-
-       info = container_of(ref, struct batadv_vis_info, refcount);
-       bat_priv = info->bat_priv;
-
-       list_del_init(&info->send_list);
-       spin_lock_bh(&bat_priv->vis.list_lock);
-       list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
-               list_del(&entry->list);
-               kfree(entry);
-       }
-
-       spin_unlock_bh(&bat_priv->vis.list_lock);
-       kfree_skb(info->skb_packet);
-       kfree(info);
-}
-
-/* Compare two vis packets, used by the hashing algorithm */
-static int batadv_vis_info_cmp(const struct hlist_node *node, const void *data2)
-{
-       const struct batadv_vis_info *d1, *d2;
-       const struct batadv_vis_packet *p1, *p2;
-
-       d1 = container_of(node, struct batadv_vis_info, hash_entry);
-       d2 = data2;
-       p1 = (struct batadv_vis_packet *)d1->skb_packet->data;
-       p2 = (struct batadv_vis_packet *)d2->skb_packet->data;
-       return batadv_compare_eth(p1->vis_orig, p2->vis_orig);
-}
-
-/* hash function to choose an entry in a hash table of given size
- * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
- */
-static uint32_t batadv_vis_info_choose(const void *data, uint32_t size)
-{
-       const struct batadv_vis_info *vis_info = data;
-       const struct batadv_vis_packet *packet;
-       const unsigned char *key;
-       uint32_t hash = 0;
-       size_t i;
-
-       packet = (struct batadv_vis_packet *)vis_info->skb_packet->data;
-       key = packet->vis_orig;
-       for (i = 0; i < ETH_ALEN; i++) {
-               hash += key[i];
-               hash += (hash << 10);
-               hash ^= (hash >> 6);
-       }
-
-       hash += (hash << 3);
-       hash ^= (hash >> 11);
-       hash += (hash << 15);
-
-       return hash % size;
-}
-
-static struct batadv_vis_info *
-batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
-{
-       struct batadv_hashtable *hash = bat_priv->vis.hash;
-       struct hlist_head *head;
-       struct batadv_vis_info *vis_info, *vis_info_tmp = NULL;
-       uint32_t index;
-
-       if (!hash)
-               return NULL;
-
-       index = batadv_vis_info_choose(data, hash->size);
-       head = &hash->table[index];
-
-       rcu_read_lock();
-       hlist_for_each_entry_rcu(vis_info, head, hash_entry) {
-               if (!batadv_vis_info_cmp(&vis_info->hash_entry, data))
-                       continue;
-
-               vis_info_tmp = vis_info;
-               break;
-       }
-       rcu_read_unlock();
-
-       return vis_info_tmp;
-}
-
-/* insert interface to the list of interfaces of one originator, if it
- * does not already exist in the list
- */
-static void batadv_vis_data_insert_interface(const uint8_t *interface,
-                                            struct hlist_head *if_list,
-                                            bool primary)
-{
-       struct batadv_vis_if_list_entry *entry;
-
-       hlist_for_each_entry(entry, if_list, list) {
-               if (batadv_compare_eth(entry->addr, interface))
-                       return;
-       }
-
-       /* it's a new address, add it to the list */
-       entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
-       if (!entry)
-               return;
-       memcpy(entry->addr, interface, ETH_ALEN);
-       entry->primary = primary;
-       hlist_add_head(&entry->list, if_list);
-}
-
-static void batadv_vis_data_read_prim_sec(struct seq_file *seq,
-                                         const struct hlist_head *if_list)
-{
-       struct batadv_vis_if_list_entry *entry;
-
-       hlist_for_each_entry(entry, if_list, list) {
-               if (entry->primary)
-                       seq_puts(seq, "PRIMARY, ");
-               else
-                       seq_printf(seq,  "SEC %pM, ", entry->addr);
-       }
-}
-
-/* read an entry  */
-static ssize_t
-batadv_vis_data_read_entry(struct seq_file *seq,
-                          const struct batadv_vis_info_entry *entry,
-                          const uint8_t *src, bool primary)
-{
-       if (primary && entry->quality == 0)
-               return seq_printf(seq, "TT %pM, ", entry->dest);
-       else if (batadv_compare_eth(entry->src, src))
-               return seq_printf(seq, "TQ %pM %d, ", entry->dest,
-                                 entry->quality);
-
-       return 0;
-}
-
-static void
-batadv_vis_data_insert_interfaces(struct hlist_head *list,
-                                 struct batadv_vis_packet *packet,
-                                 struct batadv_vis_info_entry *entries)
-{
-       int i;
-
-       for (i = 0; i < packet->entries; i++) {
-               if (entries[i].quality == 0)
-                       continue;
-
-               if (batadv_compare_eth(entries[i].src, packet->vis_orig))
-                       continue;
-
-               batadv_vis_data_insert_interface(entries[i].src, list, false);
-       }
-}
-
-static void batadv_vis_data_read_entries(struct seq_file *seq,
-                                        struct hlist_head *list,
-                                        struct batadv_vis_packet *packet,
-                                        struct batadv_vis_info_entry *entries)
-{
-       int i;
-       struct batadv_vis_if_list_entry *entry;
-
-       hlist_for_each_entry(entry, list, list) {
-               seq_printf(seq, "%pM,", entry->addr);
-
-               for (i = 0; i < packet->entries; i++)
-                       batadv_vis_data_read_entry(seq, &entries[i],
-                                                  entry->addr, entry->primary);
-
-               /* add primary/secondary records */
-               if (batadv_compare_eth(entry->addr, packet->vis_orig))
-                       batadv_vis_data_read_prim_sec(seq, list);
-
-               seq_puts(seq, "\n");
-       }
-}
-
-static void batadv_vis_seq_print_text_bucket(struct seq_file *seq,
-                                            const struct hlist_head *head)
-{
-       struct batadv_vis_info *info;
-       struct batadv_vis_packet *packet;
-       uint8_t *entries_pos;
-       struct batadv_vis_info_entry *entries;
-       struct batadv_vis_if_list_entry *entry;
-       struct hlist_node *n;
-
-       HLIST_HEAD(vis_if_list);
-
-       hlist_for_each_entry_rcu(info, head, hash_entry) {
-               packet = (struct batadv_vis_packet *)info->skb_packet->data;
-               entries_pos = (uint8_t *)packet + sizeof(*packet);
-               entries = (struct batadv_vis_info_entry *)entries_pos;
-
-               batadv_vis_data_insert_interface(packet->vis_orig, &vis_if_list,
-                                                true);
-               batadv_vis_data_insert_interfaces(&vis_if_list, packet,
-                                                 entries);
-               batadv_vis_data_read_entries(seq, &vis_if_list, packet,
-                                            entries);
-
-               hlist_for_each_entry_safe(entry, n, &vis_if_list, list) {
-                       hlist_del(&entry->list);
-                       kfree(entry);
-               }
-       }
-}
-
-int batadv_vis_seq_print_text(struct seq_file *seq, void *offset)
-{
-       struct batadv_hard_iface *primary_if;
-       struct hlist_head *head;
-       struct net_device *net_dev = (struct net_device *)seq->private;
-       struct batadv_priv *bat_priv = netdev_priv(net_dev);
-       struct batadv_hashtable *hash = bat_priv->vis.hash;
-       uint32_t i;
-       int ret = 0;
-       int vis_server = atomic_read(&bat_priv->vis_mode);
-
-       primary_if = batadv_primary_if_get_selected(bat_priv);
-       if (!primary_if)
-               goto out;
-
-       if (vis_server == BATADV_VIS_TYPE_CLIENT_UPDATE)
-               goto out;
-
-       spin_lock_bh(&bat_priv->vis.hash_lock);
-       for (i = 0; i < hash->size; i++) {
-               head = &hash->table[i];
-               batadv_vis_seq_print_text_bucket(seq, head);
-       }
-       spin_unlock_bh(&bat_priv->vis.hash_lock);
-
-out:
-       if (primary_if)
-               batadv_hardif_free_ref(primary_if);
-       return ret;
-}
-
-/* add the info packet to the send list, if it was not
- * already linked in.
- */
-static void batadv_send_list_add(struct batadv_priv *bat_priv,
-                                struct batadv_vis_info *info)
-{
-       if (list_empty(&info->send_list)) {
-               kref_get(&info->refcount);
-               list_add_tail(&info->send_list, &bat_priv->vis.send_list);
-       }
-}
-
-/* delete the info packet from the send list, if it was
- * linked in.
- */
-static void batadv_send_list_del(struct batadv_vis_info *info)
-{
-       if (!list_empty(&info->send_list)) {
-               list_del_init(&info->send_list);
-               kref_put(&info->refcount, batadv_free_info);
-       }
-}
-
-/* tries to add one entry to the receive list. */
-static void batadv_recv_list_add(struct batadv_priv *bat_priv,
-                                struct list_head *recv_list, const char *mac)
-{
-       struct batadv_vis_recvlist_node *entry;
-
-       entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
-       if (!entry)
-               return;
-
-       memcpy(entry->mac, mac, ETH_ALEN);
-       spin_lock_bh(&bat_priv->vis.list_lock);
-       list_add_tail(&entry->list, recv_list);
-       spin_unlock_bh(&bat_priv->vis.list_lock);
-}
-
-/* returns 1 if this mac is in the recv_list */
-static int batadv_recv_list_is_in(struct batadv_priv *bat_priv,
-                                 const struct list_head *recv_list,
-                                 const char *mac)
-{
-       const struct batadv_vis_recvlist_node *entry;
-
-       spin_lock_bh(&bat_priv->vis.list_lock);
-       list_for_each_entry(entry, recv_list, list) {
-               if (batadv_compare_eth(entry->mac, mac)) {
-                       spin_unlock_bh(&bat_priv->vis.list_lock);
-                       return 1;
-               }
-       }
-       spin_unlock_bh(&bat_priv->vis.list_lock);
-       return 0;
-}
-
-/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
- * broken.. ). vis hash must be locked outside.  is_new is set when the packet
- * is newer than old entries in the hash.
- */
-static struct batadv_vis_info *
-batadv_add_packet(struct batadv_priv *bat_priv,
-                 struct batadv_vis_packet *vis_packet, int vis_info_len,
-                 int *is_new, int make_broadcast)
-{
-       struct batadv_vis_info *info, *old_info;
-       struct batadv_vis_packet *search_packet, *old_packet;
-       struct batadv_vis_info search_elem;
-       struct batadv_vis_packet *packet;
-       struct sk_buff *tmp_skb;
-       int hash_added;
-       size_t len;
-       size_t max_entries;
-
-       *is_new = 0;
-       /* sanity check */
-       if (!bat_priv->vis.hash)
-               return NULL;
-
-       /* see if the packet is already in vis_hash */
-       search_elem.skb_packet = dev_alloc_skb(sizeof(*search_packet));
-       if (!search_elem.skb_packet)
-               return NULL;
-       len = sizeof(*search_packet);
-       tmp_skb = search_elem.skb_packet;
-       search_packet = (struct batadv_vis_packet *)skb_put(tmp_skb, len);
-
-       memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
-       old_info = batadv_vis_hash_find(bat_priv, &search_elem);
-       kfree_skb(search_elem.skb_packet);
-
-       if (old_info) {
-               tmp_skb = old_info->skb_packet;
-               old_packet = (struct batadv_vis_packet *)tmp_skb->data;
-               if (!batadv_seq_after(ntohl(vis_packet->seqno),
-                                     ntohl(old_packet->seqno))) {
-                       if (old_packet->seqno == vis_packet->seqno) {
-                               batadv_recv_list_add(bat_priv,
-                                                    &old_info->recv_list,
-                                                    vis_packet->sender_orig);
-                               return old_info;
-                       } else {
-                               /* newer packet is already in hash. */
-                               return NULL;
-                       }
-               }
-               /* remove old entry */
-               batadv_hash_remove(bat_priv->vis.hash, batadv_vis_info_cmp,
-                                  batadv_vis_info_choose, old_info);
-               batadv_send_list_del(old_info);
-               kref_put(&old_info->refcount, batadv_free_info);
-       }
-
-       info = kmalloc(sizeof(*info), GFP_ATOMIC);
-       if (!info)
-               return NULL;
-
-       len = sizeof(*packet) + vis_info_len;
-       info->skb_packet = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
-       if (!info->skb_packet) {
-               kfree(info);
-               return NULL;
-       }
-       info->skb_packet->priority = TC_PRIO_CONTROL;
-       skb_reserve(info->skb_packet, ETH_HLEN);
-       packet = (struct batadv_vis_packet *)skb_put(info->skb_packet, len);
-
-       kref_init(&info->refcount);
-       INIT_LIST_HEAD(&info->send_list);
-       INIT_LIST_HEAD(&info->recv_list);
-       info->first_seen = jiffies;
-       info->bat_priv = bat_priv;
-       memcpy(packet, vis_packet, len);
-
-       /* initialize and add new packet. */
-       *is_new = 1;
-
-       /* Make it a broadcast packet, if required */
-       if (make_broadcast)
-               memcpy(packet->target_orig, batadv_broadcast_addr, ETH_ALEN);
-
-       /* repair if entries is longer than packet. */
-       max_entries = vis_info_len / sizeof(struct batadv_vis_info_entry);
-       if (packet->entries > max_entries)
-               packet->entries = max_entries;
-
-       batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
-
-       /* try to add it */
-       hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
-                                    batadv_vis_info_choose, info,
-                                    &info->hash_entry);
-       if (hash_added != 0) {
-               /* did not work (for some reason) */
-               kref_put(&info->refcount, batadv_free_info);
-               info = NULL;
-       }
-
-       return info;
-}
-
-/* handle the server sync packet, forward if needed. */
-void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
-                                      struct batadv_vis_packet *vis_packet,
-                                      int vis_info_len)
-{
-       struct batadv_vis_info *info;
-       int is_new, make_broadcast;
-       int vis_server = atomic_read(&bat_priv->vis_mode);
-
-       make_broadcast = (vis_server == BATADV_VIS_TYPE_SERVER_SYNC);
-
-       spin_lock_bh(&bat_priv->vis.hash_lock);
-       info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
-                                &is_new, make_broadcast);
-       if (!info)
-               goto end;
-
-       /* only if we are server ourselves and packet is newer than the one in
-        * hash.
-        */
-       if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && is_new)
-               batadv_send_list_add(bat_priv, info);
-end:
-       spin_unlock_bh(&bat_priv->vis.hash_lock);
-}
-
-/* handle an incoming client update packet and schedule forward if needed. */
-void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
-                                        struct batadv_vis_packet *vis_packet,
-                                        int vis_info_len)
-{
-       struct batadv_vis_info *info;
-       struct batadv_vis_packet *packet;
-       int is_new;
-       int vis_server = atomic_read(&bat_priv->vis_mode);
-       int are_target = 0;
-
-       /* clients shall not broadcast. */
-       if (is_broadcast_ether_addr(vis_packet->target_orig))
-               return;
-
-       /* Are we the target for this VIS packet? */
-       if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC   &&
-           batadv_is_my_mac(bat_priv, vis_packet->target_orig))
-               are_target = 1;
-
-       spin_lock_bh(&bat_priv->vis.hash_lock);
-       info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
-                                &is_new, are_target);
-
-       if (!info)
-               goto end;
-       /* note that outdated packets will be dropped at this point. */
-
-       packet = (struct batadv_vis_packet *)info->skb_packet->data;
-
-       /* send only if we're the target server or ... */
-       if (are_target && is_new) {
-               packet->vis_type = BATADV_VIS_TYPE_SERVER_SYNC; /* upgrade! */
-               batadv_send_list_add(bat_priv, info);
-
-               /* ... we're not the recipient (and thus need to forward). */
-       } else if (!batadv_is_my_mac(bat_priv, packet->target_orig)) {
-               batadv_send_list_add(bat_priv, info);
-       }
-
-end:
-       spin_unlock_bh(&bat_priv->vis.hash_lock);
-}
-
-/* Walk the originators and find the VIS server with the best tq. Set the packet
- * address to its address and return the best_tq.
- *
- * Must be called with the originator hash locked
- */
-static int batadv_find_best_vis_server(struct batadv_priv *bat_priv,
-                                      struct batadv_vis_info *info)
-{
-       struct batadv_hashtable *hash = bat_priv->orig_hash;
-       struct batadv_neigh_node *router;
-       struct hlist_head *head;
-       struct batadv_orig_node *orig_node;
-       struct batadv_vis_packet *packet;
-       int best_tq = -1;
-       uint32_t i;
-
-       packet = (struct batadv_vis_packet *)info->skb_packet->data;
-
-       for (i = 0; i < hash->size; i++) {
-               head = &hash->table[i];
-
-               rcu_read_lock();
-               hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-                       router = batadv_orig_node_get_router(orig_node);
-                       if (!router)
-                               continue;
-
-                       if ((orig_node->flags & BATADV_VIS_SERVER) &&
-                           (router->tq_avg > best_tq)) {
-                               best_tq = router->tq_avg;
-                               memcpy(packet->target_orig, orig_node->orig,
-                                      ETH_ALEN);
-                       }
-                       batadv_neigh_node_free_ref(router);
-               }
-               rcu_read_unlock();
-       }
-
-       return best_tq;
-}
-
-/* Return true if the vis packet is full. */
-static bool batadv_vis_packet_full(const struct batadv_vis_info *info)
-{
-       const struct batadv_vis_packet *packet;
-       size_t num;
-
-       packet = (struct batadv_vis_packet *)info->skb_packet->data;
-       num = BATADV_MAX_VIS_PACKET_SIZE / sizeof(struct batadv_vis_info_entry);
-
-       if (num < packet->entries + 1)
-               return true;
-       return false;
-}
-
-/* generates a packet of own vis data,
- * returns 0 on success, -1 if no packet could be generated
- */
-static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
-{
-       struct batadv_hashtable *hash = bat_priv->orig_hash;
-       struct hlist_head *head;
-       struct batadv_orig_node *orig_node;
-       struct batadv_neigh_node *router;
-       struct batadv_vis_info *info = bat_priv->vis.my_info;
-       struct batadv_vis_packet *packet;
-       struct batadv_vis_info_entry *entry;
-       struct batadv_tt_common_entry *tt_common_entry;
-       uint8_t *packet_pos;
-       int best_tq = -1;
-       uint32_t i;
-
-       info->first_seen = jiffies;
-       packet = (struct batadv_vis_packet *)info->skb_packet->data;
-       packet->vis_type = atomic_read(&bat_priv->vis_mode);
-
-       memcpy(packet->target_orig, batadv_broadcast_addr, ETH_ALEN);
-       packet->header.ttl = BATADV_TTL;
-       packet->seqno = htonl(ntohl(packet->seqno) + 1);
-       packet->entries = 0;
-       packet->reserved = 0;
-       skb_trim(info->skb_packet, sizeof(*packet));
-
-       if (packet->vis_type == BATADV_VIS_TYPE_CLIENT_UPDATE) {
-               best_tq = batadv_find_best_vis_server(bat_priv, info);
-
-               if (best_tq < 0)
-                       return best_tq;
-       }
-
-       for (i = 0; i < hash->size; i++) {
-               head = &hash->table[i];
-
-               rcu_read_lock();
-               hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-                       router = batadv_orig_node_get_router(orig_node);
-                       if (!router)
-                               continue;
-
-                       if (!batadv_compare_eth(router->addr, orig_node->orig))
-                               goto next;
-
-                       if (router->if_incoming->if_status != BATADV_IF_ACTIVE)
-                               goto next;
-
-                       if (router->tq_avg < 1)
-                               goto next;
-
-                       /* fill one entry into buffer. */
-                       packet_pos = skb_put(info->skb_packet, sizeof(*entry));
-                       entry = (struct batadv_vis_info_entry *)packet_pos;
-                       memcpy(entry->src,
-                              router->if_incoming->net_dev->dev_addr,
-                              ETH_ALEN);
-                       memcpy(entry->dest, orig_node->orig, ETH_ALEN);
-                       entry->quality = router->tq_avg;
-                       packet->entries++;
-
-next:
-                       batadv_neigh_node_free_ref(router);
-
-                       if (batadv_vis_packet_full(info))
-                               goto unlock;
-               }
-               rcu_read_unlock();
-       }
-
-       hash = bat_priv->tt.local_hash;
-
-       for (i = 0; i < hash->size; i++) {
-               head = &hash->table[i];
-
-               rcu_read_lock();
-               hlist_for_each_entry_rcu(tt_common_entry, head,
-                                        hash_entry) {
-                       packet_pos = skb_put(info->skb_packet, sizeof(*entry));
-                       entry = (struct batadv_vis_info_entry *)packet_pos;
-                       memset(entry->src, 0, ETH_ALEN);
-                       memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN);
-                       entry->quality = 0; /* 0 means TT */
-                       packet->entries++;
-
-                       if (batadv_vis_packet_full(info))
-                               goto unlock;
-               }
-               rcu_read_unlock();
-       }
-
-       return 0;
-
-unlock:
-       rcu_read_unlock();
-       return 0;
-}
-
-/* free old vis packets. Must be called with this vis_hash_lock
- * held
- */
-static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
-{
-       uint32_t i;
-       struct batadv_hashtable *hash = bat_priv->vis.hash;
-       struct hlist_node *node_tmp;
-       struct hlist_head *head;
-       struct batadv_vis_info *info;
-
-       for (i = 0; i < hash->size; i++) {
-               head = &hash->table[i];
-
-               hlist_for_each_entry_safe(info, node_tmp,
-                                         head, hash_entry) {
-                       /* never purge own data. */
-                       if (info == bat_priv->vis.my_info)
-                               continue;
-
-                       if (batadv_has_timed_out(info->first_seen,
-                                                BATADV_VIS_TIMEOUT)) {
-                               hlist_del(&info->hash_entry);
-                               batadv_send_list_del(info);
-                               kref_put(&info->refcount, batadv_free_info);
-                       }
-               }
-       }
-}
-
-static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
-                                       struct batadv_vis_info *info)
-{
-       struct batadv_hashtable *hash = bat_priv->orig_hash;
-       struct hlist_head *head;
-       struct batadv_orig_node *orig_node;
-       struct batadv_vis_packet *packet;
-       struct sk_buff *skb;
-       uint32_t i, res;
-
-
-       packet = (struct batadv_vis_packet *)info->skb_packet->data;
-
-       /* send to all routers in range. */
-       for (i = 0; i < hash->size; i++) {
-               head = &hash->table[i];
-
-               rcu_read_lock();
-               hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
-                       /* if it's a vis server and reachable, send it. */
-                       if (!(orig_node->flags & BATADV_VIS_SERVER))
-                               continue;
-
-                       /* don't send it if we already received the packet from
-                        * this node.
-                        */
-                       if (batadv_recv_list_is_in(bat_priv, &info->recv_list,
-                                                  orig_node->orig))
-                               continue;
-
-                       memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
-                       skb = skb_clone(info->skb_packet, GFP_ATOMIC);
-                       if (!skb)
-                               continue;
-
-                       res = batadv_send_skb_to_orig(skb, orig_node, NULL);
-                       if (res == NET_XMIT_DROP)
-                               kfree_skb(skb);
-               }
-               rcu_read_unlock();
-       }
-}
-
-static void batadv_unicast_vis_packet(struct batadv_priv *bat_priv,
-                                     struct batadv_vis_info *info)
-{
-       struct batadv_orig_node *orig_node;
-       struct sk_buff *skb;
-       struct batadv_vis_packet *packet;
-
-       packet = (struct batadv_vis_packet *)info->skb_packet->data;
-
-       orig_node = batadv_orig_hash_find(bat_priv, packet->target_orig);
-       if (!orig_node)
-               goto out;
-
-       skb = skb_clone(info->skb_packet, GFP_ATOMIC);
-       if (!skb)
-               goto out;
-
-       if (batadv_send_skb_to_orig(skb, orig_node, NULL) == NET_XMIT_DROP)
-               kfree_skb(skb);
-
-out:
-       if (orig_node)
-               batadv_orig_node_free_ref(orig_node);
-}
-
-/* only send one vis packet. called from batadv_send_vis_packets() */
-static void batadv_send_vis_packet(struct batadv_priv *bat_priv,
-                                  struct batadv_vis_info *info)
-{
-       struct batadv_hard_iface *primary_if;
-       struct batadv_vis_packet *packet;
-
-       primary_if = batadv_primary_if_get_selected(bat_priv);
-       if (!primary_if)
-               goto out;
-
-       packet = (struct batadv_vis_packet *)info->skb_packet->data;
-       if (packet->header.ttl < 2) {
-               pr_debug("Error - can't send vis packet: ttl exceeded\n");
-               goto out;
-       }
-
-       memcpy(packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
-       packet->header.ttl--;
-
-       if (is_broadcast_ether_addr(packet->target_orig))
-               batadv_broadcast_vis_packet(bat_priv, info);
-       else
-               batadv_unicast_vis_packet(bat_priv, info);
-       packet->header.ttl++; /* restore TTL */
-
-out:
-       if (primary_if)
-               batadv_hardif_free_ref(primary_if);
-}
-
-/* called from timer; send (and maybe generate) vis packet. */
-static void batadv_send_vis_packets(struct work_struct *work)
-{
-       struct delayed_work *delayed_work;
-       struct batadv_priv *bat_priv;
-       struct batadv_priv_vis *priv_vis;
-       struct batadv_vis_info *info;
-
-       delayed_work = container_of(work, struct delayed_work, work);
-       priv_vis = container_of(delayed_work, struct batadv_priv_vis, work);
-       bat_priv = container_of(priv_vis, struct batadv_priv, vis);
-       spin_lock_bh(&bat_priv->vis.hash_lock);
-       batadv_purge_vis_packets(bat_priv);
-
-       if (batadv_generate_vis_packet(bat_priv) == 0) {
-               /* schedule if generation was successful */
-               batadv_send_list_add(bat_priv, bat_priv->vis.my_info);
-       }
-
-       while (!list_empty(&bat_priv->vis.send_list)) {
-               info = list_first_entry(&bat_priv->vis.send_list,
-                                       typeof(*info), send_list);
-
-               kref_get(&info->refcount);
-               spin_unlock_bh(&bat_priv->vis.hash_lock);
-
-               batadv_send_vis_packet(bat_priv, info);
-
-               spin_lock_bh(&bat_priv->vis.hash_lock);
-               batadv_send_list_del(info);
-               kref_put(&info->refcount, batadv_free_info);
-       }
-       spin_unlock_bh(&bat_priv->vis.hash_lock);
-
-       queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
-                          msecs_to_jiffies(BATADV_VIS_INTERVAL));
-}
-
-/* init the vis server. this may only be called when if_list is already
- * initialized (e.g. bat0 is initialized, interfaces have been added)
- */
-int batadv_vis_init(struct batadv_priv *bat_priv)
-{
-       struct batadv_vis_packet *packet;
-       int hash_added;
-       unsigned int len;
-       unsigned long first_seen;
-       struct sk_buff *tmp_skb;
-
-       if (bat_priv->vis.hash)
-               return 0;
-
-       spin_lock_bh(&bat_priv->vis.hash_lock);
-
-       bat_priv->vis.hash = batadv_hash_new(256);
-       if (!bat_priv->vis.hash) {
-               pr_err("Can't initialize vis_hash\n");
-               goto err;
-       }
-
-       batadv_hash_set_lock_class(bat_priv->vis.hash,
-                                  &batadv_vis_hash_lock_class_key);
-
-       bat_priv->vis.my_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
-       if (!bat_priv->vis.my_info)
-               goto err;
-
-       len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN;
-       bat_priv->vis.my_info->skb_packet = netdev_alloc_skb_ip_align(NULL,
-                                                                     len);
-       if (!bat_priv->vis.my_info->skb_packet)
-               goto free_info;
-
-       bat_priv->vis.my_info->skb_packet->priority = TC_PRIO_CONTROL;
-       skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN);
-       tmp_skb = bat_priv->vis.my_info->skb_packet;
-       packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet));
-
-       /* prefill the vis info */
-       first_seen = jiffies - msecs_to_jiffies(BATADV_VIS_INTERVAL);
-       bat_priv->vis.my_info->first_seen = first_seen;
-       INIT_LIST_HEAD(&bat_priv->vis.my_info->recv_list);
-       INIT_LIST_HEAD(&bat_priv->vis.my_info->send_list);
-       kref_init(&bat_priv->vis.my_info->refcount);
-       bat_priv->vis.my_info->bat_priv = bat_priv;
-       packet->header.version = BATADV_COMPAT_VERSION;
-       packet->header.packet_type = BATADV_VIS;
-       packet->header.ttl = BATADV_TTL;
-       packet->seqno = 0;
-       packet->reserved = 0;
-       packet->entries = 0;
-
-       INIT_LIST_HEAD(&bat_priv->vis.send_list);
-
-       hash_added = batadv_hash_add(bat_priv->vis.hash, batadv_vis_info_cmp,
-                                    batadv_vis_info_choose,
-                                    bat_priv->vis.my_info,
-                                    &bat_priv->vis.my_info->hash_entry);
-       if (hash_added != 0) {
-               pr_err("Can't add own vis packet into hash\n");
-               /* not in hash, need to remove it manually. */
-               kref_put(&bat_priv->vis.my_info->refcount, batadv_free_info);
-               goto err;
-       }
-
-       spin_unlock_bh(&bat_priv->vis.hash_lock);
-
-       INIT_DELAYED_WORK(&bat_priv->vis.work, batadv_send_vis_packets);
-       queue_delayed_work(batadv_event_workqueue, &bat_priv->vis.work,
-                          msecs_to_jiffies(BATADV_VIS_INTERVAL));
-
-       return 0;
-
-free_info:
-       kfree(bat_priv->vis.my_info);
-       bat_priv->vis.my_info = NULL;
-err:
-       spin_unlock_bh(&bat_priv->vis.hash_lock);
-       batadv_vis_quit(bat_priv);
-       return -ENOMEM;
-}
-
-/* Decrease the reference count on a hash item info */
-static void batadv_free_info_ref(struct hlist_node *node, void *arg)
-{
-       struct batadv_vis_info *info;
-
-       info = container_of(node, struct batadv_vis_info, hash_entry);
-       batadv_send_list_del(info);
-       kref_put(&info->refcount, batadv_free_info);
-}
-
-/* shutdown vis-server */
-void batadv_vis_quit(struct batadv_priv *bat_priv)
-{
-       if (!bat_priv->vis.hash)
-               return;
-
-       cancel_delayed_work_sync(&bat_priv->vis.work);
-
-       spin_lock_bh(&bat_priv->vis.hash_lock);
-       /* properly remove, kill timers ... */
-       batadv_hash_delete(bat_priv->vis.hash, batadv_free_info_ref, NULL);
-       bat_priv->vis.hash = NULL;
-       bat_priv->vis.my_info = NULL;
-       spin_unlock_bh(&bat_priv->vis.hash_lock);
-}
diff --git a/net/batman-adv/vis.h b/net/batman-adv/vis.h
deleted file mode 100644 (file)
index ad92b0e..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (C) 2008-2013 B.A.T.M.A.N. contributors:
- *
- * Simon Wunderlich, Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- */
-
-#ifndef _NET_BATMAN_ADV_VIS_H_
-#define _NET_BATMAN_ADV_VIS_H_
-
-/* timeout of vis packets in milliseconds */
-#define BATADV_VIS_TIMEOUT             200000
-
-int batadv_vis_seq_print_text(struct seq_file *seq, void *offset);
-void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
-                                      struct batadv_vis_packet *vis_packet,
-                                      int vis_info_len);
-void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
-                                        struct batadv_vis_packet *vis_packet,
-                                        int vis_info_len);
-int batadv_vis_init(struct batadv_priv *bat_priv);
-void batadv_vis_quit(struct batadv_priv *bat_priv);
-
-#endif /* _NET_BATMAN_ADV_VIS_H_ */
index dea6a287daca88b666eaee999043a3abf6dd853a..6a791e73e39d936f0d9958d506c540c5a2805727 100644 (file)
@@ -11,3 +11,5 @@ obj-$(CONFIG_BT_HIDP) += hidp/
 bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
        hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
        a2mp.o amp.o
+
+subdir-ccflags-y += -D__CHECK_ENDIAN__
index 17f33a62f6db559824d9dc1b3973acb779fa9162..efcd108822c43134e3d64755e24c16410f0598d5 100644 (file)
@@ -15,8 +15,9 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
-#include <net/bluetooth/a2mp.h>
-#include <net/bluetooth/amp.h>
+
+#include "a2mp.h"
+#include "amp.h"
 
 /* Global AMP Manager list */
 LIST_HEAD(amp_mgr_list);
@@ -75,33 +76,26 @@ u8 __next_ident(struct amp_mgr *mgr)
        return mgr->ident;
 }
 
-static inline void __a2mp_cl_bredr(struct a2mp_cl *cl)
-{
-       cl->id = 0;
-       cl->type = 0;
-       cl->status = 1;
-}
-
 /* hci_dev_list shall be locked */
-static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl, u8 num_ctrl)
+static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl)
 {
-       int i = 0;
        struct hci_dev *hdev;
+       int i = 1;
 
-       __a2mp_cl_bredr(cl);
+       cl[0].id = AMP_ID_BREDR;
+       cl[0].type = AMP_TYPE_BREDR;
+       cl[0].status = AMP_STATUS_BLUETOOTH_ONLY;
 
        list_for_each_entry(hdev, &hci_dev_list, list) {
-               /* Iterate through AMP controllers */
-               if (hdev->id == HCI_BREDR_ID)
-                       continue;
-
-               /* Starting from second entry */
-               if (++i >= num_ctrl)
-                       return;
-
-               cl[i].id = hdev->id;
-               cl[i].type = hdev->amp_type;
-               cl[i].status = hdev->amp_status;
+               if (hdev->dev_type == HCI_AMP) {
+                       cl[i].id = hdev->id;
+                       cl[i].type = hdev->amp_type;
+                       if (test_bit(HCI_UP, &hdev->flags))
+                               cl[i].status = hdev->amp_status;
+                       else
+                               cl[i].status = AMP_STATUS_POWERED_DOWN;
+                       i++;
+               }
        }
 }
 
@@ -129,6 +123,7 @@ static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
        struct a2mp_discov_rsp *rsp;
        u16 ext_feat;
        u8 num_ctrl;
+       struct hci_dev *hdev;
 
        if (len < sizeof(*req))
                return -EINVAL;
@@ -152,7 +147,14 @@ static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
 
        read_lock(&hci_dev_list_lock);
 
-       num_ctrl = __hci_num_ctrl();
+       /* at minimum the BR/EDR needs to be listed */
+       num_ctrl = 1;
+
+       list_for_each_entry(hdev, &hci_dev_list, list) {
+               if (hdev->dev_type == HCI_AMP)
+                       num_ctrl++;
+       }
+
        len = num_ctrl * sizeof(struct a2mp_cl) + sizeof(*rsp);
        rsp = kmalloc(len, GFP_ATOMIC);
        if (!rsp) {
@@ -163,7 +165,7 @@ static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
        rsp->mtu = __constant_cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
        rsp->ext_feat = 0;
 
-       __a2mp_add_cl(mgr, rsp->cl, num_ctrl);
+       __a2mp_add_cl(mgr, rsp->cl);
 
        read_unlock(&hci_dev_list_lock);
 
@@ -208,7 +210,7 @@ static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
                BT_DBG("Remote AMP id %d type %d status %d", cl->id, cl->type,
                       cl->status);
 
-               if (cl->id != HCI_BREDR_ID && cl->type == HCI_AMP) {
+               if (cl->id != AMP_ID_BREDR && cl->type != AMP_TYPE_BREDR) {
                        struct a2mp_info_req req;
 
                        found = true;
@@ -344,7 +346,7 @@ static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
        tmp = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC);
 
        hdev = hci_dev_get(req->id);
-       if (!hdev || hdev->amp_type == HCI_BREDR || tmp) {
+       if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) {
                struct a2mp_amp_assoc_rsp rsp;
                rsp.id = req->id;
 
@@ -451,7 +453,7 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
        rsp.remote_id = req->local_id;
 
        hdev = hci_dev_get(req->remote_id);
-       if (!hdev || hdev->amp_type != HCI_AMP) {
+       if (!hdev || hdev->amp_type == AMP_TYPE_BREDR) {
                rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
                goto send_rsp;
        }
@@ -535,7 +537,8 @@ static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
                goto send_rsp;
        }
 
-       hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, mgr->l2cap_conn->dst);
+       hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
+                                      &mgr->l2cap_conn->hcon->dst);
        if (!hcon) {
                BT_ERR("No phys link exist");
                rsp.status = A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS;
@@ -669,7 +672,8 @@ static void a2mp_chan_close_cb(struct l2cap_chan *chan)
        l2cap_chan_put(chan);
 }
 
-static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state)
+static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state,
+                                     int err)
 {
        struct amp_mgr *mgr = chan->data;
 
@@ -706,6 +710,9 @@ static struct l2cap_ops a2mp_chan_ops = {
        .teardown = l2cap_chan_no_teardown,
        .ready = l2cap_chan_no_ready,
        .defer = l2cap_chan_no_defer,
+       .resume = l2cap_chan_no_resume,
+       .set_shutdown = l2cap_chan_no_set_shutdown,
+       .get_sndtimeo = l2cap_chan_no_get_sndtimeo,
 };
 
 static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked)
@@ -829,6 +836,9 @@ struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
 {
        struct amp_mgr *mgr;
 
+       if (conn->hcon->type != ACL_LINK)
+               return NULL;
+
        mgr = amp_mgr_create(conn, false);
        if (!mgr) {
                BT_ERR("Could not create AMP manager");
@@ -871,7 +881,7 @@ void a2mp_send_getinfo_rsp(struct hci_dev *hdev)
        rsp.id = hdev->id;
        rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
 
-       if (hdev->amp_type != HCI_BREDR) {
+       if (hdev->amp_type != AMP_TYPE_BREDR) {
                rsp.status = 0;
                rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
                rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
index 9096137c889c4857507fcb60828b8d7ad581f424..f6a1671ea2ff793bfca0efdd1541765803fb4d03 100644 (file)
 /* Bluetooth address family and sockets. */
 
 #include <linux/module.h>
+#include <linux/debugfs.h>
 #include <asm/ioctls.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <linux/proc_fs.h>
 
-#define VERSION "2.16"
+#define VERSION "2.17"
 
 /* Bluetooth sockets */
 #define BT_MAX_PROTO   8
@@ -221,12 +222,12 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
        if (flags & (MSG_OOB))
                return -EOPNOTSUPP;
 
-       msg->msg_namelen = 0;
-
        skb = skb_recv_datagram(sk, flags, noblock, &err);
        if (!skb) {
-               if (sk->sk_shutdown & RCV_SHUTDOWN)
+               if (sk->sk_shutdown & RCV_SHUTDOWN) {
+                       msg->msg_namelen = 0;
                        return 0;
+               }
                return err;
        }
 
@@ -238,9 +239,16 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
 
        skb_reset_transport_header(skb);
        err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
-       if (err == 0)
+       if (err == 0) {
                sock_recv_ts_and_drops(msg, sk, skb);
 
+               if (bt_sk(sk)->skb_msg_name)
+                       bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
+                                               &msg->msg_namelen);
+               else
+                       msg->msg_namelen = 0;
+       }
+
        skb_free_datagram(sk, skb);
 
        return err ? : copied;
@@ -490,6 +498,7 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 }
 EXPORT_SYMBOL(bt_sock_ioctl);
 
+/* This function expects the sk lock to be held when called */
 int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
 {
        DECLARE_WAITQUEUE(wait, current);
@@ -525,6 +534,46 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
 }
 EXPORT_SYMBOL(bt_sock_wait_state);
 
+/* This function expects the sk lock to be held when called */
+int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
+{
+       DECLARE_WAITQUEUE(wait, current);
+       unsigned long timeo;
+       int err = 0;
+
+       BT_DBG("sk %p", sk);
+
+       timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
+
+       add_wait_queue(sk_sleep(sk), &wait);
+       set_current_state(TASK_INTERRUPTIBLE);
+       while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) {
+               if (!timeo) {
+                       err = -EAGAIN;
+                       break;
+               }
+
+               if (signal_pending(current)) {
+                       err = sock_intr_errno(timeo);
+                       break;
+               }
+
+               release_sock(sk);
+               timeo = schedule_timeout(timeo);
+               lock_sock(sk);
+               set_current_state(TASK_INTERRUPTIBLE);
+
+               err = sock_error(sk);
+               if (err)
+                       break;
+       }
+       __set_current_state(TASK_RUNNING);
+       remove_wait_queue(sk_sleep(sk), &wait);
+
+       return err;
+}
+EXPORT_SYMBOL(bt_sock_wait_ready);
+
 #ifdef CONFIG_PROC_FS
 struct bt_seq_state {
        struct bt_sock_list *l;
@@ -563,7 +612,7 @@ static int bt_seq_show(struct seq_file *seq, void *v)
        struct bt_sock_list *l = s->l;
 
        if (v == SEQ_START_TOKEN) {
-               seq_puts(seq ,"sk               RefCnt Rmem   Wmem   User   Inode  Src Dst Parent");
+               seq_puts(seq ,"sk               RefCnt Rmem   Wmem   User   Inode  Parent");
 
                if (l->custom_seq_show) {
                        seq_putc(seq, ' ');
@@ -576,15 +625,13 @@ static int bt_seq_show(struct seq_file *seq, void *v)
                struct bt_sock *bt = bt_sk(sk);
 
                seq_printf(seq,
-                          "%pK %-6d %-6u %-6u %-6u %-6lu %pMR %pMR %-6lu",
+                          "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
                           sk,
                           atomic_read(&sk->sk_refcnt),
                           sk_rmem_alloc_get(sk),
                           sk_wmem_alloc_get(sk),
                           from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
                           sock_i_ino(sk),
-                          &bt->src,
-                          &bt->dst,
                           bt->parent? sock_i_ino(bt->parent): 0LU);
 
                if (l->custom_seq_show) {
@@ -662,12 +709,17 @@ static struct net_proto_family bt_sock_family_ops = {
        .create = bt_sock_create,
 };
 
+struct dentry *bt_debugfs;
+EXPORT_SYMBOL_GPL(bt_debugfs);
+
 static int __init bt_init(void)
 {
        int err;
 
        BT_INFO("Core ver %s", VERSION);
 
+       bt_debugfs = debugfs_create_dir("bluetooth", NULL);
+
        err = bt_sysfs_init();
        if (err < 0)
                return err;
@@ -708,7 +760,6 @@ error:
 
 static void __exit bt_exit(void)
 {
-
        sco_exit();
 
        l2cap_exit();
@@ -718,6 +769,8 @@ static void __exit bt_exit(void)
        sock_unregister(PF_BLUETOOTH);
 
        bt_sysfs_cleanup();
+
+       debugfs_remove_recursive(bt_debugfs);
 }
 
 subsys_initcall(bt_init);
index d459ed43c779d776e453634db290d08f48d5a7c7..bb39509b3f065e2a0d18e1a53cfcfabc8bfe779e 100644 (file)
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci.h>
 #include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/a2mp.h>
-#include <net/bluetooth/amp.h>
 #include <crypto/hash.h>
 
+#include "a2mp.h"
+#include "amp.h"
+
 /* Remote AMP Controllers interface */
 void amp_ctrl_get(struct amp_ctrl *ctrl)
 {
@@ -110,7 +111,7 @@ static u8 __next_handle(struct amp_mgr *mgr)
 struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr,
                             u8 remote_id, bool out)
 {
-       bdaddr_t *dst = mgr->l2cap_conn->dst;
+       bdaddr_t *dst = &mgr->l2cap_conn->hcon->dst;
        struct hci_conn *hcon;
 
        hcon = hci_conn_add(hdev, AMP_LINK, dst);
@@ -409,7 +410,8 @@ void amp_create_logical_link(struct l2cap_chan *chan)
        struct hci_cp_create_accept_logical_link cp;
        struct hci_dev *hdev;
 
-       BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon, chan->conn->dst);
+       BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon,
+              &chan->conn->hcon->dst);
 
        if (!hs_hcon)
                return;
index e430b1abcd2fabf102d15cd4f99399c983fcf9f2..a841d3e776c5e091c19efea8423750edd018e946 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/unaligned.h>
 
 #include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/l2cap.h>
 #include <net/bluetooth/hci_core.h>
 
 #include "bnep.h"
@@ -510,20 +511,13 @@ static int bnep_session(void *arg)
 
 static struct device *bnep_get_device(struct bnep_session *session)
 {
-       bdaddr_t *src = &bt_sk(session->sock->sk)->src;
-       bdaddr_t *dst = &bt_sk(session->sock->sk)->dst;
-       struct hci_dev *hdev;
        struct hci_conn *conn;
 
-       hdev = hci_get_route(dst, src);
-       if (!hdev)
+       conn = l2cap_pi(session->sock->sk)->chan->conn->hcon;
+       if (!conn)
                return NULL;
 
-       conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
-
-       hci_dev_put(hdev);
-
-       return conn ? &conn->dev : NULL;
+       return &conn->dev;
 }
 
 static struct device_type bnep_type = {
@@ -539,8 +533,8 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
 
        BT_DBG("");
 
-       baswap((void *) dst, &bt_sk(sock->sk)->dst);
-       baswap((void *) src, &bt_sk(sock->sk)->src);
+       baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst);
+       baswap((void *) src, &l2cap_pi(sock->sk)->chan->src);
 
        /* session struct allocated as private part of net_device */
        dev = alloc_netdev(sizeof(struct bnep_session),
index e0a6ebf2baa6fecd58a0cc13ea6f2906c6614a46..67fe5e84e68f0bffb166bbcfb6cdb736f15ab05e 100644 (file)
@@ -340,20 +340,20 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
 
        down_write(&cmtp_session_sem);
 
-       s = __cmtp_get_session(&bt_sk(sock->sk)->dst);
+       s = __cmtp_get_session(&l2cap_pi(sock->sk)->chan->dst);
        if (s && s->state == BT_CONNECTED) {
                err = -EEXIST;
                goto failed;
        }
 
-       bacpy(&session->bdaddr, &bt_sk(sock->sk)->dst);
+       bacpy(&session->bdaddr, &l2cap_pi(sock->sk)->chan->dst);
 
        session->mtu = min_t(uint, l2cap_pi(sock->sk)->chan->omtu,
                                        l2cap_pi(sock->sk)->chan->imtu);
 
        BT_DBG("mtu %d", session->mtu);
 
-       sprintf(session->name, "%pMR", &bt_sk(sock->sk)->dst);
+       sprintf(session->name, "%pMR", &session->bdaddr);
 
        session->sock  = sock;
        session->state = BT_CONFIG;
index f0817121ec5e6b0c5f50e82493f9033376908f61..ba5366c320dacc7d4db4659aa144051baf1b035a 100644 (file)
@@ -28,8 +28,9 @@
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
-#include <net/bluetooth/a2mp.h>
-#include <net/bluetooth/smp.h>
+
+#include "smp.h"
+#include "a2mp.h"
 
 struct sco_param {
        u16 pkt_type;
@@ -49,30 +50,6 @@ static const struct sco_param sco_param_wideband[] = {
        { EDR_ESCO_MASK | ESCO_EV3,   0x0008 }, /* T1 */
 };
 
-static void hci_le_create_connection(struct hci_conn *conn)
-{
-       struct hci_dev *hdev = conn->hdev;
-       struct hci_cp_le_create_conn cp;
-
-       conn->state = BT_CONNECT;
-       conn->out = true;
-       conn->link_mode |= HCI_LM_MASTER;
-       conn->sec_level = BT_SECURITY_LOW;
-
-       memset(&cp, 0, sizeof(cp));
-       cp.scan_interval = __constant_cpu_to_le16(0x0060);
-       cp.scan_window = __constant_cpu_to_le16(0x0030);
-       bacpy(&cp.peer_addr, &conn->dst);
-       cp.peer_addr_type = conn->dst_type;
-       cp.conn_interval_min = __constant_cpu_to_le16(0x0028);
-       cp.conn_interval_max = __constant_cpu_to_le16(0x0038);
-       cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
-       cp.min_ce_len = __constant_cpu_to_le16(0x0000);
-       cp.max_ce_len = __constant_cpu_to_le16(0x0000);
-
-       hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
-}
-
 static void hci_le_create_connection_cancel(struct hci_conn *conn)
 {
        hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
@@ -340,8 +317,10 @@ static void hci_conn_timeout(struct work_struct *work)
 }
 
 /* Enter sniff mode */
-static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
+static void hci_conn_idle(struct work_struct *work)
 {
+       struct hci_conn *conn = container_of(work, struct hci_conn,
+                                            idle_work.work);
        struct hci_dev *hdev = conn->hdev;
 
        BT_DBG("hcon %p mode %d", conn, conn->mode);
@@ -375,21 +354,12 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
        }
 }
 
-static void hci_conn_idle(unsigned long arg)
-{
-       struct hci_conn *conn = (void *) arg;
-
-       BT_DBG("hcon %p mode %d", conn, conn->mode);
-
-       hci_conn_enter_sniff_mode(conn);
-}
-
-static void hci_conn_auto_accept(unsigned long arg)
+static void hci_conn_auto_accept(struct work_struct *work)
 {
-       struct hci_conn *conn = (void *) arg;
-       struct hci_dev *hdev = conn->hdev;
+       struct hci_conn *conn = container_of(work, struct hci_conn,
+                                            auto_accept_work.work);
 
-       hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
+       hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
                     &conn->dst);
 }
 
@@ -404,6 +374,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
                return NULL;
 
        bacpy(&conn->dst, dst);
+       bacpy(&conn->src, &hdev->bdaddr);
        conn->hdev  = hdev;
        conn->type  = type;
        conn->mode  = HCI_CM_ACTIVE;
@@ -437,9 +408,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
        INIT_LIST_HEAD(&conn->chan_list);
 
        INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
-       setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
-       setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
-                   (unsigned long) conn);
+       INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
+       INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
 
        atomic_set(&conn->refcnt, 0);
 
@@ -460,11 +430,9 @@ int hci_conn_del(struct hci_conn *conn)
 
        BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
 
-       del_timer(&conn->idle_timer);
-
        cancel_delayed_work_sync(&conn->disc_work);
-
-       del_timer(&conn->auto_accept_timer);
+       cancel_delayed_work_sync(&conn->auto_accept_work);
+       cancel_delayed_work_sync(&conn->idle_work);
 
        if (conn->type == ACL_LINK) {
                struct hci_conn *sco = conn->link;
@@ -518,6 +486,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
        list_for_each_entry(d, &hci_dev_list, list) {
                if (!test_bit(HCI_UP, &d->flags) ||
                    test_bit(HCI_RAW, &d->flags) ||
+                   test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
                    d->dev_type != HCI_BREDR)
                        continue;
 
@@ -545,34 +514,124 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
 }
 EXPORT_SYMBOL(hci_get_route);
 
+static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
+{
+       struct hci_conn *conn;
+
+       if (status == 0)
+               return;
+
+       BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
+              status);
+
+       hci_dev_lock(hdev);
+
+       conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+       if (!conn)
+               goto done;
+
+       conn->state = BT_CLOSED;
+
+       mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
+                           status);
+
+       hci_proto_connect_cfm(conn, status);
+
+       hci_conn_del(conn);
+
+done:
+       hci_dev_unlock(hdev);
+}
+
+static int hci_create_le_conn(struct hci_conn *conn)
+{
+       struct hci_dev *hdev = conn->hdev;
+       struct hci_cp_le_create_conn cp;
+       struct hci_request req;
+       int err;
+
+       hci_req_init(&req, hdev);
+
+       memset(&cp, 0, sizeof(cp));
+       cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
+       cp.scan_window = cpu_to_le16(hdev->le_scan_window);
+       bacpy(&cp.peer_addr, &conn->dst);
+       cp.peer_addr_type = conn->dst_type;
+       cp.own_address_type = conn->src_type;
+       cp.conn_interval_min = cpu_to_le16(hdev->le_conn_min_interval);
+       cp.conn_interval_max = cpu_to_le16(hdev->le_conn_max_interval);
+       cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
+       cp.min_ce_len = __constant_cpu_to_le16(0x0000);
+       cp.max_ce_len = __constant_cpu_to_le16(0x0000);
+
+       hci_req_add(&req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
+
+       err = hci_req_run(&req, create_le_conn_complete);
+       if (err) {
+               hci_conn_del(conn);
+               return err;
+       }
+
+       return 0;
+}
+
 static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
                                    u8 dst_type, u8 sec_level, u8 auth_type)
 {
-       struct hci_conn *le;
+       struct hci_conn *conn;
+       int err;
 
-       if (test_bit(HCI_LE_PERIPHERAL, &hdev->flags))
+       if (test_bit(HCI_ADVERTISING, &hdev->flags))
                return ERR_PTR(-ENOTSUPP);
 
-       le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
-       if (!le) {
-               le = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
-               if (le)
-                       return ERR_PTR(-EBUSY);
+       /* Some devices send ATT messages as soon as the physical link is
+        * established. To be able to handle these ATT messages, the user-
+        * space first establishes the connection and then starts the pairing
+        * process.
+        *
+        * So if a hci_conn object already exists for the following connection
+        * attempt, we simply update pending_sec_level and auth_type fields
+        * and return the object found.
+        */
+       conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
+       if (conn) {
+               conn->pending_sec_level = sec_level;
+               conn->auth_type = auth_type;
+               goto done;
+       }
 
-               le = hci_conn_add(hdev, LE_LINK, dst);
-               if (!le)
-                       return ERR_PTR(-ENOMEM);
+       /* Since the controller supports only one LE connection attempt at a
+        * time, we return -EBUSY if there is any connection attempt running.
+        */
+       conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+       if (conn)
+               return ERR_PTR(-EBUSY);
 
-               le->dst_type = bdaddr_to_le(dst_type);
-               hci_le_create_connection(le);
-       }
+       conn = hci_conn_add(hdev, LE_LINK, dst);
+       if (!conn)
+               return ERR_PTR(-ENOMEM);
 
-       le->pending_sec_level = sec_level;
-       le->auth_type = auth_type;
+       if (dst_type == BDADDR_LE_PUBLIC)
+               conn->dst_type = ADDR_LE_DEV_PUBLIC;
+       else
+               conn->dst_type = ADDR_LE_DEV_RANDOM;
 
-       hci_conn_hold(le);
+       conn->src_type = hdev->own_addr_type;
 
-       return le;
+       conn->state = BT_CONNECT;
+       conn->out = true;
+       conn->link_mode |= HCI_LM_MASTER;
+       conn->sec_level = BT_SECURITY_LOW;
+       conn->pending_sec_level = sec_level;
+       conn->auth_type = auth_type;
+
+       err = hci_create_le_conn(conn);
+       if (err)
+               return ERR_PTR(err);
+
+done:
+       hci_conn_hold(conn);
+       return conn;
 }
 
 static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
@@ -580,6 +639,9 @@ static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
 {
        struct hci_conn *acl;
 
+       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+               return ERR_PTR(-ENOTSUPP);
+
        acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
        if (!acl) {
                acl = hci_conn_add(hdev, ACL_LINK, dst);
@@ -846,8 +908,8 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
 
 timer:
        if (hdev->idle_timeout > 0)
-               mod_timer(&conn->idle_timer,
-                         jiffies + msecs_to_jiffies(hdev->idle_timeout));
+               queue_delayed_work(hdev->workqueue, &conn->idle_work,
+                                  msecs_to_jiffies(hdev->idle_timeout));
 }
 
 /* Drop all connection on the device */
index fb7356fcfe51e03664d7aed6458eafd3015634e5..6ccc4eb9e55e4958f3eb070894a7668b05c98b5b 100644 (file)
 
 #include <linux/export.h>
 #include <linux/idr.h>
+#include <linux/rfkill.h>
+#include <linux/debugfs.h>
+#include <asm/unaligned.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+static void hci_rx_work(struct work_struct *work);
+static void hci_cmd_work(struct work_struct *work);
+static void hci_tx_work(struct work_struct *work);
+
+/* HCI device list */
+LIST_HEAD(hci_dev_list);
+DEFINE_RWLOCK(hci_dev_list_lock);
+
+/* HCI callback list */
+LIST_HEAD(hci_cb_list);
+DEFINE_RWLOCK(hci_cb_list_lock);
+
+/* HCI ID Numbering */
+static DEFINE_IDA(hci_index_ida);
+
+/* ---- HCI notifications ---- */
+
+static void hci_notify(struct hci_dev *hdev, int event)
+{
+       hci_sock_dev_event(hdev, event);
+}
+
+/* ---- HCI debugfs entries ---- */
+
+static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
+                            size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       char buf[3];
+
+       buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
+       buf[1] = '\n';
+       buf[2] = '\0';
+       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
+                             size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       struct sk_buff *skb;
+       char buf[32];
+       size_t buf_size = min(count, (sizeof(buf)-1));
+       bool enable;
+       int err;
+
+       if (!test_bit(HCI_UP, &hdev->flags))
+               return -ENETDOWN;
+
+       if (copy_from_user(buf, user_buf, buf_size))
+               return -EFAULT;
+
+       buf[buf_size] = '\0';
+       if (strtobool(buf, &enable))
+               return -EINVAL;
+
+       if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
+               return -EALREADY;
+
+       hci_req_lock(hdev);
+       if (enable)
+               skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
+                                    HCI_CMD_TIMEOUT);
+       else
+               skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
+                                    HCI_CMD_TIMEOUT);
+       hci_req_unlock(hdev);
+
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       err = -bt_to_errno(skb->data[0]);
+       kfree_skb(skb);
+
+       if (err < 0)
+               return err;
+
+       change_bit(HCI_DUT_MODE, &hdev->dev_flags);
+
+       return count;
+}
+
+static const struct file_operations dut_mode_fops = {
+       .open           = simple_open,
+       .read           = dut_mode_read,
+       .write          = dut_mode_write,
+       .llseek         = default_llseek,
+};
+
+static int features_show(struct seq_file *f, void *ptr)
+{
+       struct hci_dev *hdev = f->private;
+       u8 p;
+
+       hci_dev_lock(hdev);
+       for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
+               seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
+                          "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
+                          hdev->features[p][0], hdev->features[p][1],
+                          hdev->features[p][2], hdev->features[p][3],
+                          hdev->features[p][4], hdev->features[p][5],
+                          hdev->features[p][6], hdev->features[p][7]);
+       }
+       if (lmp_le_capable(hdev))
+               seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
+                          "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
+                          hdev->le_features[0], hdev->le_features[1],
+                          hdev->le_features[2], hdev->le_features[3],
+                          hdev->le_features[4], hdev->le_features[5],
+                          hdev->le_features[6], hdev->le_features[7]);
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int features_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, features_show, inode->i_private);
+}
+
+static const struct file_operations features_fops = {
+       .open           = features_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int blacklist_show(struct seq_file *f, void *p)
+{
+       struct hci_dev *hdev = f->private;
+       struct bdaddr_list *b;
+
+       hci_dev_lock(hdev);
+       list_for_each_entry(b, &hdev->blacklist, list)
+               seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int blacklist_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, blacklist_show, inode->i_private);
+}
+
+static const struct file_operations blacklist_fops = {
+       .open           = blacklist_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int uuids_show(struct seq_file *f, void *p)
+{
+       struct hci_dev *hdev = f->private;
+       struct bt_uuid *uuid;
+
+       hci_dev_lock(hdev);
+       list_for_each_entry(uuid, &hdev->uuids, list) {
+               u8 i, val[16];
+
+               /* The Bluetooth UUID values are stored in big endian,
+                * but with reversed byte order. So convert them into
+                * the right order for the %pUb modifier.
+                */
+               for (i = 0; i < 16; i++)
+                       val[i] = uuid->uuid[15 - i];
+
+               seq_printf(f, "%pUb\n", val);
+       }
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int uuids_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, uuids_show, inode->i_private);
+}
+
+static const struct file_operations uuids_fops = {
+       .open           = uuids_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int inquiry_cache_show(struct seq_file *f, void *p)
+{
+       struct hci_dev *hdev = f->private;
+       struct discovery_state *cache = &hdev->discovery;
+       struct inquiry_entry *e;
+
+       hci_dev_lock(hdev);
+
+       list_for_each_entry(e, &cache->all, all) {
+               struct inquiry_data *data = &e->data;
+               seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
+                          &data->bdaddr,
+                          data->pscan_rep_mode, data->pscan_period_mode,
+                          data->pscan_mode, data->dev_class[2],
+                          data->dev_class[1], data->dev_class[0],
+                          __le16_to_cpu(data->clock_offset),
+                          data->rssi, data->ssp_mode, e->timestamp);
+       }
+
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int inquiry_cache_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, inquiry_cache_show, inode->i_private);
+}
+
+static const struct file_operations inquiry_cache_fops = {
+       .open           = inquiry_cache_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int link_keys_show(struct seq_file *f, void *ptr)
+{
+       struct hci_dev *hdev = f->private;
+       struct list_head *p, *n;
+
+       hci_dev_lock(hdev);
+       list_for_each_safe(p, n, &hdev->link_keys) {
+               struct link_key *key = list_entry(p, struct link_key, list);
+               seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
+                          HCI_LINK_KEY_SIZE, key->val, key->pin_len);
+       }
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int link_keys_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, link_keys_show, inode->i_private);
+}
+
+static const struct file_operations link_keys_fops = {
+       .open           = link_keys_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
+                                  size_t count, loff_t *ppos)
+{
+       struct hci_dev *hdev = file->private_data;
+       char buf[3];
+
+       buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
+       buf[1] = '\n';
+       buf[2] = '\0';
+       return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static const struct file_operations use_debug_keys_fops = {
+       .open           = simple_open,
+       .read           = use_debug_keys_read,
+       .llseek         = default_llseek,
+};
+
+static int dev_class_show(struct seq_file *f, void *ptr)
+{
+       struct hci_dev *hdev = f->private;
+
+       hci_dev_lock(hdev);
+       seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
+                  hdev->dev_class[1], hdev->dev_class[0]);
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int dev_class_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, dev_class_show, inode->i_private);
+}
+
+static const struct file_operations dev_class_fops = {
+       .open           = dev_class_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int voice_setting_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->voice_setting;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
+                       NULL, "0x%4.4llx\n");
+
+static int auto_accept_delay_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       hdev->auto_accept_delay = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int auto_accept_delay_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->auto_accept_delay;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
+                       auto_accept_delay_set, "%llu\n");
+
+static int ssp_debug_mode_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+       struct sk_buff *skb;
+       __u8 mode;
+       int err;
+
+       if (val != 0 && val != 1)
+               return -EINVAL;
+
+       if (!test_bit(HCI_UP, &hdev->flags))
+               return -ENETDOWN;
+
+       hci_req_lock(hdev);
+       mode = val;
+       skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
+                            &mode, HCI_CMD_TIMEOUT);
+       hci_req_unlock(hdev);
+
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       err = -bt_to_errno(skb->data[0]);
+       kfree_skb(skb);
+
+       if (err < 0)
+               return err;
+
+       hci_dev_lock(hdev);
+       hdev->ssp_debug_mode = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int ssp_debug_mode_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->ssp_debug_mode;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
+                       ssp_debug_mode_set, "%llu\n");
+
+static int idle_timeout_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       if (val != 0 && (val < 500 || val > 3600000))
+               return -EINVAL;
+
+       hci_dev_lock(hdev);
+       hdev->idle_timeout = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int idle_timeout_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->idle_timeout;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
+                       idle_timeout_set, "%llu\n");
+
+static int sniff_min_interval_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
+               return -EINVAL;
+
+       hci_dev_lock(hdev);
+       hdev->sniff_min_interval = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int sniff_min_interval_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->sniff_min_interval;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
+                       sniff_min_interval_set, "%llu\n");
+
+static int sniff_max_interval_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
+               return -EINVAL;
+
+       hci_dev_lock(hdev);
+       hdev->sniff_max_interval = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int sniff_max_interval_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->sniff_max_interval;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
+                       sniff_max_interval_set, "%llu\n");
+
+static int static_address_show(struct seq_file *f, void *p)
+{
+       struct hci_dev *hdev = f->private;
+
+       hci_dev_lock(hdev);
+       seq_printf(f, "%pMR\n", &hdev->static_addr);
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int static_address_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, static_address_show, inode->i_private);
+}
+
+static const struct file_operations static_address_fops = {
+       .open           = static_address_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int own_address_type_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       if (val != 0 && val != 1)
+               return -EINVAL;
+
+       hci_dev_lock(hdev);
+       hdev->own_addr_type = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int own_address_type_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->own_addr_type;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
+                       own_address_type_set, "%llu\n");
+
+static int long_term_keys_show(struct seq_file *f, void *ptr)
+{
+       struct hci_dev *hdev = f->private;
+       struct list_head *p, *n;
+
+       hci_dev_lock(hdev);
+       list_for_each_safe(p, n, &hdev->link_keys) {
+               struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
+               seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
+                          &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
+                          ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
+                          8, ltk->rand, 16, ltk->val);
+       }
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int long_term_keys_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, long_term_keys_show, inode->i_private);
+}
+
+static const struct file_operations long_term_keys_fops = {
+       .open           = long_term_keys_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int conn_min_interval_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
+
+       if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
+               return -EINVAL;
 
-#include <linux/rfkill.h>
+       hci_dev_lock(hdev);
+       hdev->le_conn_min_interval = val;
+       hci_dev_unlock(hdev);
 
-#include <net/bluetooth/bluetooth.h>
-#include <net/bluetooth/hci_core.h>
+       return 0;
+}
 
-static void hci_rx_work(struct work_struct *work);
-static void hci_cmd_work(struct work_struct *work);
-static void hci_tx_work(struct work_struct *work);
+static int conn_min_interval_get(void *data, u64 *val)
+{
+       struct hci_dev *hdev = data;
 
-/* HCI device list */
-LIST_HEAD(hci_dev_list);
-DEFINE_RWLOCK(hci_dev_list_lock);
+       hci_dev_lock(hdev);
+       *val = hdev->le_conn_min_interval;
+       hci_dev_unlock(hdev);
 
-/* HCI callback list */
-LIST_HEAD(hci_cb_list);
-DEFINE_RWLOCK(hci_cb_list_lock);
+       return 0;
+}
 
-/* HCI ID Numbering */
-static DEFINE_IDA(hci_index_ida);
+DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
+                       conn_min_interval_set, "%llu\n");
 
-/* ---- HCI notifications ---- */
+static int conn_max_interval_set(void *data, u64 val)
+{
+       struct hci_dev *hdev = data;
 
-static void hci_notify(struct hci_dev *hdev, int event)
+       if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
+               return -EINVAL;
+
+       hci_dev_lock(hdev);
+       hdev->le_conn_max_interval = val;
+       hci_dev_unlock(hdev);
+
+       return 0;
+}
+
+static int conn_max_interval_get(void *data, u64 *val)
 {
-       hci_sock_dev_event(hdev, event);
+       struct hci_dev *hdev = data;
+
+       hci_dev_lock(hdev);
+       *val = hdev->le_conn_max_interval;
+       hci_dev_unlock(hdev);
+
+       return 0;
 }
 
+DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
+                       conn_max_interval_set, "%llu\n");
+
 /* ---- HCI requests ---- */
 
 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
@@ -307,11 +888,23 @@ static void amp_init(struct hci_request *req)
        /* Read Local Version */
        hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
 
+       /* Read Local Supported Commands */
+       hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
+
+       /* Read Local Supported Features */
+       hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
+
        /* Read Local AMP Info */
        hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
 
        /* Read Data Blk size */
        hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
+
+       /* Read Flow Control Mode */
+       hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
+
+       /* Read Location Data */
+       hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
 }
 
 static void hci_init1_req(struct hci_request *req, unsigned long opt)
@@ -341,6 +934,8 @@ static void hci_init1_req(struct hci_request *req, unsigned long opt)
 
 static void bredr_setup(struct hci_request *req)
 {
+       struct hci_dev *hdev = req->hdev;
+
        __le16 param;
        __u8 flt_type;
 
@@ -356,6 +951,12 @@ static void bredr_setup(struct hci_request *req)
        /* Read Voice Setting */
        hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
 
+       /* Read Number of Supported IAC */
+       hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
+
+       /* Read Current IAC LAP */
+       hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
+
        /* Clear Event Filters */
        flt_type = HCI_FLT_CLEAR_ALL;
        hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
@@ -364,8 +965,10 @@ static void bredr_setup(struct hci_request *req)
        param = __constant_cpu_to_le16(0x7d00);
        hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
 
-       /* Read page scan parameters */
-       if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
+       /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
+        * but it does not support page scan related HCI commands.
+        */
+       if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
                hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
                hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
        }
@@ -519,6 +1122,8 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
 
        if (lmp_bredr_capable(hdev))
                bredr_setup(req);
+       else
+               clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
 
        if (lmp_le_capable(hdev))
                le_setup(req);
@@ -532,6 +1137,14 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
                hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
 
        if (lmp_ssp_capable(hdev)) {
+               /* When SSP is available, then the host features page
+                * should also be available as well. However some
+                * controllers list the max_page as 0 as long as SSP
+                * has not been enabled. To achieve proper debugging
+                * output, force the minimum max_page to 1 at least.
+                */
+               hdev->max_page = 0x01;
+
                if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
                        u8 mode = 0x01;
                        hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
@@ -607,6 +1220,34 @@ static void hci_set_le_support(struct hci_request *req)
                            &cp);
 }
 
+static void hci_set_event_mask_page_2(struct hci_request *req)
+{
+       struct hci_dev *hdev = req->hdev;
+       u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+       /* If Connectionless Slave Broadcast master role is supported
+        * enable all necessary events for it.
+        */
+       if (hdev->features[2][0] & 0x01) {
+               events[1] |= 0x40;      /* Triggered Clock Capture */
+               events[1] |= 0x80;      /* Synchronization Train Complete */
+               events[2] |= 0x10;      /* Slave Page Response Timeout */
+               events[2] |= 0x20;      /* CSB Channel Map Change */
+       }
+
+       /* If Connectionless Slave Broadcast slave role is supported
+        * enable all necessary events for it.
+        */
+       if (hdev->features[2][0] & 0x02) {
+               events[2] |= 0x01;      /* Synchronization Train Received */
+               events[2] |= 0x02;      /* CSB Receive */
+               events[2] |= 0x04;      /* CSB Timeout */
+               events[2] |= 0x08;      /* Truncated Page Complete */
+       }
+
+       hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
+}
+
 static void hci_init3_req(struct hci_request *req, unsigned long opt)
 {
        struct hci_dev *hdev = req->hdev;
@@ -634,8 +1275,17 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
                hci_setup_link_policy(req);
 
        if (lmp_le_capable(hdev)) {
+               /* If the controller has a public BD_ADDR, then by
+                * default use that one. If this is a LE only
+                * controller without one, default to the random
+                * address.
+                */
+               if (bacmp(&hdev->bdaddr, BDADDR_ANY))
+                       hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
+               else
+                       hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
+
                hci_set_le_support(req);
-               hci_update_ad(req);
        }
 
        /* Read features beyond page 1 if available */
@@ -648,6 +1298,19 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
        }
 }
 
+static void hci_init4_req(struct hci_request *req, unsigned long opt)
+{
+       struct hci_dev *hdev = req->hdev;
+
+       /* Set event mask page 2 if the HCI command for it is supported */
+       if (hdev->commands[22] & 0x04)
+               hci_set_event_mask_page_2(req);
+
+       /* Check for Synchronization Train support */
+       if (hdev->features[2][0] & 0x04)
+               hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
+}
+
 static int __hci_init(struct hci_dev *hdev)
 {
        int err;
@@ -656,6 +1319,14 @@ static int __hci_init(struct hci_dev *hdev)
        if (err < 0)
                return err;
 
+       /* The Device Under Test (DUT) mode is special and available for
+        * all controller types. So just create it early on.
+        */
+       if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
+               debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
+                                   &dut_mode_fops);
+       }
+
        /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
         * BR/EDR/LE type controllers. AMP controllers only need the
         * first stage init.
@@ -667,7 +1338,75 @@ static int __hci_init(struct hci_dev *hdev)
        if (err < 0)
                return err;
 
-       return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
+       err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
+       if (err < 0)
+               return err;
+
+       err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
+       if (err < 0)
+               return err;
+
+       /* Only create debugfs entries during the initial setup
+        * phase and not every time the controller gets powered on.
+        */
+       if (!test_bit(HCI_SETUP, &hdev->dev_flags))
+               return 0;
+
+       debugfs_create_file("features", 0444, hdev->debugfs, hdev,
+                           &features_fops);
+       debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
+                          &hdev->manufacturer);
+       debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
+       debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
+       debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
+                           &blacklist_fops);
+       debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
+
+       if (lmp_bredr_capable(hdev)) {
+               debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
+                                   hdev, &inquiry_cache_fops);
+               debugfs_create_file("link_keys", 0400, hdev->debugfs,
+                                   hdev, &link_keys_fops);
+               debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
+                                   hdev, &use_debug_keys_fops);
+               debugfs_create_file("dev_class", 0444, hdev->debugfs,
+                                   hdev, &dev_class_fops);
+               debugfs_create_file("voice_setting", 0444, hdev->debugfs,
+                                   hdev, &voice_setting_fops);
+       }
+
+       if (lmp_ssp_capable(hdev)) {
+               debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
+                                   hdev, &auto_accept_delay_fops);
+               debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
+                                   hdev, &ssp_debug_mode_fops);
+       }
+
+       if (lmp_sniff_capable(hdev)) {
+               debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
+                                   hdev, &idle_timeout_fops);
+               debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
+                                   hdev, &sniff_min_interval_fops);
+               debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
+                                   hdev, &sniff_max_interval_fops);
+       }
+
+       if (lmp_le_capable(hdev)) {
+               debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
+                                 &hdev->le_white_list_size);
+               debugfs_create_file("static_address", 0444, hdev->debugfs,
+                                  hdev, &static_address_fops);
+               debugfs_create_file("own_address_type", 0644, hdev->debugfs,
+                                   hdev, &own_address_type_fops);
+               debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
+                                   hdev, &long_term_keys_fops);
+               debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
+                                   hdev, &conn_min_interval_fops);
+               debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
+                                   hdev, &conn_max_interval_fops);
+       }
+
+       return 0;
 }
 
 static void hci_scan_req(struct hci_request *req, unsigned long opt)
@@ -984,6 +1723,21 @@ int hci_inquiry(void __user *arg)
        if (!hdev)
                return -ENODEV;
 
+       if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+               err = -EBUSY;
+               goto done;
+       }
+
+       if (hdev->dev_type != HCI_BREDR) {
+               err = -EOPNOTSUPP;
+               goto done;
+       }
+
+       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+               err = -EOPNOTSUPP;
+               goto done;
+       }
+
        hci_dev_lock(hdev);
        if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
            inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
@@ -1043,100 +1797,10 @@ done:
        return err;
 }
 
-static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
-{
-       u8 ad_len = 0, flags = 0;
-       size_t name_len;
-
-       if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
-               flags |= LE_AD_GENERAL;
-
-       if (!lmp_bredr_capable(hdev))
-               flags |= LE_AD_NO_BREDR;
-
-       if (lmp_le_br_capable(hdev))
-               flags |= LE_AD_SIM_LE_BREDR_CTRL;
-
-       if (lmp_host_le_br_capable(hdev))
-               flags |= LE_AD_SIM_LE_BREDR_HOST;
-
-       if (flags) {
-               BT_DBG("adv flags 0x%02x", flags);
-
-               ptr[0] = 2;
-               ptr[1] = EIR_FLAGS;
-               ptr[2] = flags;
-
-               ad_len += 3;
-               ptr += 3;
-       }
-
-       if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
-               ptr[0] = 2;
-               ptr[1] = EIR_TX_POWER;
-               ptr[2] = (u8) hdev->adv_tx_power;
-
-               ad_len += 3;
-               ptr += 3;
-       }
-
-       name_len = strlen(hdev->dev_name);
-       if (name_len > 0) {
-               size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
-
-               if (name_len > max_len) {
-                       name_len = max_len;
-                       ptr[1] = EIR_NAME_SHORT;
-               } else
-                       ptr[1] = EIR_NAME_COMPLETE;
-
-               ptr[0] = name_len + 1;
-
-               memcpy(ptr + 2, hdev->dev_name, name_len);
-
-               ad_len += (name_len + 2);
-               ptr += (name_len + 2);
-       }
-
-       return ad_len;
-}
-
-void hci_update_ad(struct hci_request *req)
-{
-       struct hci_dev *hdev = req->hdev;
-       struct hci_cp_le_set_adv_data cp;
-       u8 len;
-
-       if (!lmp_le_capable(hdev))
-               return;
-
-       memset(&cp, 0, sizeof(cp));
-
-       len = create_ad(hdev, cp.data);
-
-       if (hdev->adv_data_len == len &&
-           memcmp(cp.data, hdev->adv_data, len) == 0)
-               return;
-
-       memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
-       hdev->adv_data_len = len;
-
-       cp.length = len;
-
-       hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
-}
-
-/* ---- HCI ioctl helpers ---- */
-
-int hci_dev_open(__u16 dev)
+static int hci_dev_do_open(struct hci_dev *hdev)
 {
-       struct hci_dev *hdev;
        int ret = 0;
 
-       hdev = hci_dev_get(dev);
-       if (!hdev)
-               return -ENODEV;
-
        BT_DBG("%s %p", hdev->name, hdev);
 
        hci_req_lock(hdev);
@@ -1146,13 +1810,29 @@ int hci_dev_open(__u16 dev)
                goto done;
        }
 
-       /* Check for rfkill but allow the HCI setup stage to proceed
-        * (which in itself doesn't cause any RF activity).
-        */
-       if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
-           !test_bit(HCI_SETUP, &hdev->dev_flags)) {
-               ret = -ERFKILL;
-               goto done;
+       if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
+               /* Check for rfkill but allow the HCI setup stage to
+                * proceed (which in itself doesn't cause any RF activity).
+                */
+               if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
+                       ret = -ERFKILL;
+                       goto done;
+               }
+
+               /* Check for valid public address or a configured static
+                * random adddress, but let the HCI setup proceed to
+                * be able to determine if there is a public address
+                * or not.
+                *
+                * This check is only valid for BR/EDR controllers
+                * since AMP controllers do not have an address.
+                */
+               if (hdev->dev_type == HCI_BREDR &&
+                   !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+                   !bacmp(&hdev->static_addr, BDADDR_ANY)) {
+                       ret = -EADDRNOTAVAIL;
+                       goto done;
+               }
        }
 
        if (test_bit(HCI_UP, &hdev->flags)) {
@@ -1172,16 +1852,11 @@ int hci_dev_open(__u16 dev)
                ret = hdev->setup(hdev);
 
        if (!ret) {
-               /* Treat all non BR/EDR controllers as raw devices if
-                * enable_hs is not set.
-                */
-               if (hdev->dev_type != HCI_BREDR && !enable_hs)
-                       set_bit(HCI_RAW, &hdev->flags);
-
                if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
                        set_bit(HCI_RAW, &hdev->flags);
 
-               if (!test_bit(HCI_RAW, &hdev->flags))
+               if (!test_bit(HCI_RAW, &hdev->flags) &&
+                   !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
                        ret = __hci_init(hdev);
        }
 
@@ -1192,7 +1867,8 @@ int hci_dev_open(__u16 dev)
                set_bit(HCI_UP, &hdev->flags);
                hci_notify(hdev, HCI_DEV_UP);
                if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
-                   mgmt_valid_hdev(hdev)) {
+                   !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
+                   hdev->dev_type == HCI_BREDR) {
                        hci_dev_lock(hdev);
                        mgmt_powered(hdev, 1);
                        hci_dev_unlock(hdev);
@@ -1220,10 +1896,41 @@ int hci_dev_open(__u16 dev)
 
 done:
        hci_req_unlock(hdev);
-       hci_dev_put(hdev);
        return ret;
 }
 
+/* ---- HCI ioctl helpers ---- */
+
+int hci_dev_open(__u16 dev)
+{
+       struct hci_dev *hdev;
+       int err;
+
+       hdev = hci_dev_get(dev);
+       if (!hdev)
+               return -ENODEV;
+
+       /* We need to ensure that no other power on/off work is pending
+        * before proceeding to call hci_dev_do_open. This is
+        * particularly important if the setup procedure has not yet
+        * completed.
+        */
+       if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
+               cancel_delayed_work(&hdev->power_off);
+
+       /* After this call it is guaranteed that the setup procedure
+        * has finished. This means that error conditions like RFKILL
+        * or no valid public or static random address apply.
+        */
+       flush_workqueue(hdev->req_workqueue);
+
+       err = hci_dev_do_open(hdev);
+
+       hci_dev_put(hdev);
+
+       return err;
+}
+
 static int hci_dev_do_close(struct hci_dev *hdev)
 {
        BT_DBG("%s %p", hdev->name, hdev);
@@ -1247,6 +1954,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
                cancel_delayed_work(&hdev->discov_off);
                hdev->discov_timeout = 0;
                clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+               clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
        }
 
        if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
@@ -1268,6 +1976,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
        skb_queue_purge(&hdev->cmd_q);
        atomic_set(&hdev->cmd_cnt, 1);
        if (!test_bit(HCI_RAW, &hdev->flags) &&
+           !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
            test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
                set_bit(HCI_INIT, &hdev->flags);
                __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
@@ -1300,15 +2009,16 @@ static int hci_dev_do_close(struct hci_dev *hdev)
        hdev->flags = 0;
        hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
 
-       if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
-           mgmt_valid_hdev(hdev)) {
-               hci_dev_lock(hdev);
-               mgmt_powered(hdev, 0);
-               hci_dev_unlock(hdev);
+       if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
+               if (hdev->dev_type == HCI_BREDR) {
+                       hci_dev_lock(hdev);
+                       mgmt_powered(hdev, 0);
+                       hci_dev_unlock(hdev);
+               }
        }
 
        /* Controller radio is available but is currently powered down */
-       hdev->amp_status = 0;
+       hdev->amp_status = AMP_STATUS_POWERED_DOWN;
 
        memset(hdev->eir, 0, sizeof(hdev->eir));
        memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
@@ -1328,11 +2038,17 @@ int hci_dev_close(__u16 dev)
        if (!hdev)
                return -ENODEV;
 
+       if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+               err = -EBUSY;
+               goto done;
+       }
+
        if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
                cancel_delayed_work(&hdev->power_off);
 
        err = hci_dev_do_close(hdev);
 
+done:
        hci_dev_put(hdev);
        return err;
 }
@@ -1348,8 +2064,15 @@ int hci_dev_reset(__u16 dev)
 
        hci_req_lock(hdev);
 
-       if (!test_bit(HCI_UP, &hdev->flags))
+       if (!test_bit(HCI_UP, &hdev->flags)) {
+               ret = -ENETDOWN;
                goto done;
+       }
+
+       if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+               ret = -EBUSY;
+               goto done;
+       }
 
        /* Drop queues */
        skb_queue_purge(&hdev->rx_q);
@@ -1384,10 +2107,15 @@ int hci_dev_reset_stat(__u16 dev)
        if (!hdev)
                return -ENODEV;
 
+       if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+               ret = -EBUSY;
+               goto done;
+       }
+
        memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
 
+done:
        hci_dev_put(hdev);
-
        return ret;
 }
 
@@ -1404,6 +2132,21 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
        if (!hdev)
                return -ENODEV;
 
+       if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+               err = -EBUSY;
+               goto done;
+       }
+
+       if (hdev->dev_type != HCI_BREDR) {
+               err = -EOPNOTSUPP;
+               goto done;
+       }
+
+       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+               err = -EOPNOTSUPP;
+               goto done;
+       }
+
        switch (cmd) {
        case HCISETAUTH:
                err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
@@ -1462,6 +2205,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
                break;
        }
 
+done:
        hci_dev_put(hdev);
        return err;
 }
@@ -1534,7 +2278,7 @@ int hci_get_dev_info(void __user *arg)
 
        strcpy(di.name, hdev->name);
        di.bdaddr   = hdev->bdaddr;
-       di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
+       di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
        di.flags    = hdev->flags;
        di.pkt_type = hdev->pkt_type;
        if (lmp_bredr_capable(hdev)) {
@@ -1570,6 +2314,9 @@ static int hci_rfkill_set_block(void *data, bool blocked)
 
        BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
 
+       if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
+               return -EBUSY;
+
        if (blocked) {
                set_bit(HCI_RFKILLED, &hdev->dev_flags);
                if (!test_bit(HCI_SETUP, &hdev->dev_flags))
@@ -1592,13 +2339,20 @@ static void hci_power_on(struct work_struct *work)
 
        BT_DBG("%s", hdev->name);
 
-       err = hci_dev_open(hdev->id);
+       err = hci_dev_do_open(hdev);
        if (err < 0) {
                mgmt_set_powered_failed(hdev, err);
                return;
        }
 
-       if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
+       /* During the HCI setup phase, a few error conditions are
+        * ignored and they need to be checked now. If they are still
+        * valid, it is important to turn the device back off.
+        */
+       if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
+           (hdev->dev_type == HCI_BREDR &&
+            !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+            !bacmp(&hdev->static_addr, BDADDR_ANY))) {
                clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
                hci_dev_do_close(hdev);
        } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
@@ -1623,19 +2377,12 @@ static void hci_power_off(struct work_struct *work)
 static void hci_discov_off(struct work_struct *work)
 {
        struct hci_dev *hdev;
-       u8 scan = SCAN_PAGE;
 
        hdev = container_of(work, struct hci_dev, discov_off.work);
 
        BT_DBG("%s", hdev->name);
 
-       hci_dev_lock(hdev);
-
-       hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
-
-       hdev->discov_timeout = 0;
-
-       hci_dev_unlock(hdev);
+       mgmt_discoverable_timeout(hdev);
 }
 
 int hci_uuids_clear(struct hci_dev *hdev)
@@ -1958,13 +2705,15 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
        return 0;
 }
 
-struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
+struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
+                                        bdaddr_t *bdaddr, u8 type)
 {
        struct bdaddr_list *b;
 
-       list_for_each_entry(b, &hdev->blacklist, list)
-               if (bacmp(bdaddr, &b->bdaddr) == 0)
+       list_for_each_entry(b, &hdev->blacklist, list) {
+               if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
                        return b;
+       }
 
        return NULL;
 }
@@ -1974,9 +2723,7 @@ int hci_blacklist_clear(struct hci_dev *hdev)
        struct list_head *p, *n;
 
        list_for_each_safe(p, n, &hdev->blacklist) {
-               struct bdaddr_list *b;
-
-               b = list_entry(p, struct bdaddr_list, list);
+               struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
 
                list_del(p);
                kfree(b);
@@ -1989,10 +2736,10 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
 {
        struct bdaddr_list *entry;
 
-       if (bacmp(bdaddr, BDADDR_ANY) == 0)
+       if (!bacmp(bdaddr, BDADDR_ANY))
                return -EBADF;
 
-       if (hci_blacklist_lookup(hdev, bdaddr))
+       if (hci_blacklist_lookup(hdev, bdaddr, type))
                return -EEXIST;
 
        entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
@@ -2000,6 +2747,7 @@ int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
                return -ENOMEM;
 
        bacpy(&entry->bdaddr, bdaddr);
+       entry->bdaddr_type = type;
 
        list_add(&entry->list, &hdev->blacklist);
 
@@ -2010,10 +2758,10 @@ int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
 {
        struct bdaddr_list *entry;
 
-       if (bacmp(bdaddr, BDADDR_ANY) == 0)
+       if (!bacmp(bdaddr, BDADDR_ANY))
                return hci_blacklist_clear(hdev);
 
-       entry = hci_blacklist_lookup(hdev, bdaddr);
+       entry = hci_blacklist_lookup(hdev, bdaddr, type);
        if (!entry)
                return -ENOENT;
 
@@ -2111,13 +2859,19 @@ struct hci_dev *hci_alloc_dev(void)
        hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
        hdev->esco_type = (ESCO_HV1);
        hdev->link_mode = (HCI_LM_ACCEPT);
-       hdev->io_capability = 0x03; /* No Input No Output */
+       hdev->num_iac = 0x01;           /* One IAC support is mandatory */
+       hdev->io_capability = 0x03;     /* No Input No Output */
        hdev->inq_tx_power = HCI_TX_POWER_INVALID;
        hdev->adv_tx_power = HCI_TX_POWER_INVALID;
 
        hdev->sniff_max_interval = 800;
        hdev->sniff_min_interval = 80;
 
+       hdev->le_scan_interval = 0x0060;
+       hdev->le_scan_window = 0x0030;
+       hdev->le_conn_min_interval = 0x0028;
+       hdev->le_conn_max_interval = 0x0038;
+
        mutex_init(&hdev->lock);
        mutex_init(&hdev->req_lock);
 
@@ -2206,7 +2960,12 @@ int hci_register_dev(struct hci_dev *hdev)
                goto err;
        }
 
-       error = hci_add_sysfs(hdev);
+       if (!IS_ERR_OR_NULL(bt_debugfs))
+               hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
+
+       dev_set_name(&hdev->dev, "%s", hdev->name);
+
+       error = device_add(&hdev->dev);
        if (error < 0)
                goto err_wqueue;
 
@@ -2224,9 +2983,14 @@ int hci_register_dev(struct hci_dev *hdev)
                set_bit(HCI_RFKILLED, &hdev->dev_flags);
 
        set_bit(HCI_SETUP, &hdev->dev_flags);
+       set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
 
-       if (hdev->dev_type != HCI_AMP)
-               set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+       if (hdev->dev_type == HCI_BREDR) {
+               /* Assume BR/EDR support until proven otherwise (such as
+                * through reading supported features during init.
+                */
+               set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+       }
 
        write_lock(&hci_dev_list_lock);
        list_add(&hdev->list, &hci_dev_list);
@@ -2289,7 +3053,9 @@ void hci_unregister_dev(struct hci_dev *hdev)
                rfkill_destroy(hdev->rfkill);
        }
 
-       hci_del_sysfs(hdev);
+       device_del(&hdev->dev);
+
+       debugfs_remove_recursive(hdev->debugfs);
 
        destroy_workqueue(hdev->workqueue);
        destroy_workqueue(hdev->req_workqueue);
@@ -2325,9 +3091,8 @@ int hci_resume_dev(struct hci_dev *hdev)
 EXPORT_SYMBOL(hci_resume_dev);
 
 /* Receive frame from HCI drivers */
-int hci_recv_frame(struct sk_buff *skb)
+int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-       struct hci_dev *hdev = (struct hci_dev *) skb->dev;
        if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
                      && !test_bit(HCI_INIT, &hdev->flags))) {
                kfree_skb(skb);
@@ -2386,7 +3151,6 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
                scb->expect = hlen;
                scb->pkt_type = type;
 
-               skb->dev = (void *) hdev;
                hdev->reassembly[index] = skb;
        }
 
@@ -2446,7 +3210,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
                        /* Complete frame */
 
                        bt_cb(skb)->pkt_type = type;
-                       hci_recv_frame(skb);
+                       hci_recv_frame(hdev, skb);
 
                        hdev->reassembly[index] = NULL;
                        return remain;
@@ -2537,15 +3301,8 @@ int hci_unregister_cb(struct hci_cb *cb)
 }
 EXPORT_SYMBOL(hci_unregister_cb);
 
-static int hci_send_frame(struct sk_buff *skb)
+static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
 {
-       struct hci_dev *hdev = (struct hci_dev *) skb->dev;
-
-       if (!hdev) {
-               kfree_skb(skb);
-               return -ENODEV;
-       }
-
        BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
 
        /* Time stamp */
@@ -2562,7 +3319,8 @@ static int hci_send_frame(struct sk_buff *skb)
        /* Get rid of skb owner, prior to sending to the driver. */
        skb_orphan(skb);
 
-       return hdev->send(skb);
+       if (hdev->send(hdev, skb) < 0)
+               BT_ERR("%s sending frame failed", hdev->name);
 }
 
 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
@@ -2625,7 +3383,6 @@ static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
        BT_DBG("skb len %d", skb->len);
 
        bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
-       skb->dev = (void *) hdev;
 
        return skb;
 }
@@ -2769,7 +3526,6 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
                do {
                        skb = list; list = list->next;
 
-                       skb->dev = (void *) hdev;
                        bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
                        hci_add_acl_hdr(skb, conn->handle, flags);
 
@@ -2788,8 +3544,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
 
        BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
 
-       skb->dev = (void *) hdev;
-
        hci_queue_acl(chan, &chan->data_q, skb, flags);
 
        queue_work(hdev->workqueue, &hdev->tx_work);
@@ -2810,7 +3564,6 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
        skb_reset_transport_header(skb);
        memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
 
-       skb->dev = (void *) hdev;
        bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
 
        skb_queue_tail(&conn->data_q, skb);
@@ -3075,7 +3828,7 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev)
                        hci_conn_enter_active_mode(chan->conn,
                                                   bt_cb(skb)->force_active);
 
-                       hci_send_frame(skb);
+                       hci_send_frame(hdev, skb);
                        hdev->acl_last_tx = jiffies;
 
                        hdev->acl_cnt--;
@@ -3127,7 +3880,7 @@ static void hci_sched_acl_blk(struct hci_dev *hdev)
                        hci_conn_enter_active_mode(chan->conn,
                                                   bt_cb(skb)->force_active);
 
-                       hci_send_frame(skb);
+                       hci_send_frame(hdev, skb);
                        hdev->acl_last_tx = jiffies;
 
                        hdev->block_cnt -= blocks;
@@ -3180,7 +3933,7 @@ static void hci_sched_sco(struct hci_dev *hdev)
        while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
                while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
                        BT_DBG("skb %p len %d", skb, skb->len);
-                       hci_send_frame(skb);
+                       hci_send_frame(hdev, skb);
 
                        conn->sent++;
                        if (conn->sent == ~0)
@@ -3204,7 +3957,7 @@ static void hci_sched_esco(struct hci_dev *hdev)
                                                     &quote))) {
                while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
                        BT_DBG("skb %p len %d", skb, skb->len);
-                       hci_send_frame(skb);
+                       hci_send_frame(hdev, skb);
 
                        conn->sent++;
                        if (conn->sent == ~0)
@@ -3246,7 +3999,7 @@ static void hci_sched_le(struct hci_dev *hdev)
 
                        skb = skb_dequeue(&chan->data_q);
 
-                       hci_send_frame(skb);
+                       hci_send_frame(hdev, skb);
                        hdev->le_last_tx = jiffies;
 
                        cnt--;
@@ -3272,19 +4025,17 @@ static void hci_tx_work(struct work_struct *work)
        BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
               hdev->sco_cnt, hdev->le_cnt);
 
-       /* Schedule queues and send stuff to HCI driver */
-
-       hci_sched_acl(hdev);
-
-       hci_sched_sco(hdev);
-
-       hci_sched_esco(hdev);
-
-       hci_sched_le(hdev);
+       if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+               /* Schedule queues and send stuff to HCI driver */
+               hci_sched_acl(hdev);
+               hci_sched_sco(hdev);
+               hci_sched_esco(hdev);
+               hci_sched_le(hdev);
+       }
 
        /* Send next queued raw (unknown type) packet */
        while ((skb = skb_dequeue(&hdev->raw_q)))
-               hci_send_frame(skb);
+               hci_send_frame(hdev, skb);
 }
 
 /* ----- HCI RX task (incoming data processing) ----- */
@@ -3471,7 +4222,8 @@ static void hci_rx_work(struct work_struct *work)
                        hci_send_to_sock(hdev, skb);
                }
 
-               if (test_bit(HCI_RAW, &hdev->flags)) {
+               if (test_bit(HCI_RAW, &hdev->flags) ||
+                   test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
                        kfree_skb(skb);
                        continue;
                }
@@ -3526,10 +4278,10 @@ static void hci_cmd_work(struct work_struct *work)
 
                kfree_skb(hdev->sent_cmd);
 
-               hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
+               hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
                if (hdev->sent_cmd) {
                        atomic_dec(&hdev->cmd_cnt);
-                       hci_send_frame(skb);
+                       hci_send_frame(hdev, skb);
                        if (test_bit(HCI_RESET, &hdev->flags))
                                del_timer(&hdev->cmd_timer);
                        else
@@ -3541,15 +4293,3 @@ static void hci_cmd_work(struct work_struct *work)
                }
        }
 }
-
-u8 bdaddr_to_le(u8 bdaddr_type)
-{
-       switch (bdaddr_type) {
-       case BDADDR_LE_PUBLIC:
-               return ADDR_LE_DEV_PUBLIC;
-
-       default:
-               /* Fallback to LE Random address type */
-               return ADDR_LE_DEV_RANDOM;
-       }
-}
index 8db3e89fae354aebb67c6ea7172a3e2e1926b66d..5935f748c0f9a6fe71cb3c0fe8baeaedb7019c37 100644 (file)
@@ -29,8 +29,9 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/mgmt.h>
-#include <net/bluetooth/a2mp.h>
-#include <net/bluetooth/amp.h>
+
+#include "a2mp.h"
+#include "amp.h"
 
 /* Handle HCI Event packets */
 
@@ -194,6 +195,11 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
 
        memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
        hdev->adv_data_len = 0;
+
+       memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
+       hdev->scan_rsp_data_len = 0;
+
+       hdev->ssp_debug_mode = 0;
 }
 
 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -297,6 +303,11 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
                goto done;
        }
 
+       /* We need to ensure that we set this back on if someone changed
+        * the scan mode through a raw HCI socket.
+        */
+       set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+
        old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
        old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
 
@@ -304,11 +315,6 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
                set_bit(HCI_ISCAN, &hdev->flags);
                if (!old_iscan)
                        mgmt_discoverable(hdev, 1);
-               if (hdev->discov_timeout > 0) {
-                       int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
-                       queue_delayed_work(hdev->workqueue, &hdev->discov_off,
-                                          to);
-               }
        } else if (old_iscan)
                mgmt_discoverable(hdev, 0);
 
@@ -412,6 +418,21 @@ static void hci_cc_write_voice_setting(struct hci_dev *hdev,
                hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
 }
 
+static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
+                                         struct sk_buff *skb)
+{
+       struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+       if (rp->status)
+               return;
+
+       hdev->num_iac = rp->num_iac;
+
+       BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
+}
+
 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
 {
        __u8 status = *((__u8 *) skb->data);
@@ -449,14 +470,13 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
        if (rp->status)
                return;
 
-       hdev->hci_ver = rp->hci_ver;
-       hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
-       hdev->lmp_ver = rp->lmp_ver;
-       hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
-       hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
-
-       BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
-              hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
+       if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
+               hdev->hci_ver = rp->hci_ver;
+               hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
+               hdev->lmp_ver = rp->lmp_ver;
+               hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
+               hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
+       }
 }
 
 static void hci_cc_read_local_commands(struct hci_dev *hdev,
@@ -536,7 +556,8 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
        if (rp->status)
                return;
 
-       hdev->max_page = rp->max_page;
+       if (hdev->max_page < rp->max_page)
+               hdev->max_page = rp->max_page;
 
        if (rp->page < HCI_MAX_PAGES)
                memcpy(hdev->features[rp->page], rp->features, 8);
@@ -913,17 +934,9 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
 
        if (!status) {
                if (*sent)
-                       set_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
+                       set_bit(HCI_ADVERTISING, &hdev->dev_flags);
                else
-                       clear_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags);
-       }
-
-       if (!test_bit(HCI_INIT, &hdev->flags)) {
-               struct hci_request req;
-
-               hci_req_init(&req, hdev);
-               hci_update_ad(&req);
-               hci_req_run(&req, NULL);
+                       clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
        }
 
        hci_dev_unlock(hdev);
@@ -994,20 +1007,20 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
                return;
 
        if (!status) {
-               if (sent->le)
+               if (sent->le) {
                        hdev->features[1][0] |= LMP_HOST_LE;
-               else
+                       set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+               } else {
                        hdev->features[1][0] &= ~LMP_HOST_LE;
+                       clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
+                       clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+               }
 
                if (sent->simul)
                        hdev->features[1][0] |= LMP_HOST_LE_BREDR;
                else
                        hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
        }
-
-       if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
-           !test_bit(HCI_INIT, &hdev->flags))
-               mgmt_le_enable_complete(hdev, sent->le, status);
 }
 
 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
@@ -1291,9 +1304,11 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
                goto unlock;
 
        if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
-               struct hci_cp_auth_requested cp;
-               cp.handle = __cpu_to_le16(conn->handle);
-               hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
+               struct hci_cp_auth_requested auth_cp;
+
+               auth_cp.handle = __cpu_to_le16(conn->handle);
+               hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
+                            sizeof(auth_cp), &auth_cp);
        }
 
 unlock:
@@ -1465,33 +1480,6 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
        hci_dev_unlock(hdev);
 }
 
-static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
-{
-       struct hci_conn *conn;
-
-       BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
-       if (status) {
-               hci_dev_lock(hdev);
-
-               conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
-               if (!conn) {
-                       hci_dev_unlock(hdev);
-                       return;
-               }
-
-               BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
-
-               conn->state = BT_CLOSED;
-               mgmt_connect_failed(hdev, &conn->dst, conn->type,
-                                   conn->dst_type, status);
-               hci_proto_connect_cfm(conn, status);
-               hci_conn_del(conn);
-
-               hci_dev_unlock(hdev);
-       }
-}
-
 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
 {
        struct hci_cp_create_phy_link *cp;
@@ -1706,7 +1694,7 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
                                      &flags);
 
        if ((mask & HCI_LM_ACCEPT) &&
-           !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
+           !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
                /* Connection accepted */
                struct inquiry_entry *ie;
                struct hci_conn *conn;
@@ -1821,10 +1809,25 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
        }
 
        if (ev->status == 0) {
-               if (conn->type == ACL_LINK && conn->flush_key)
+               u8 type = conn->type;
+
+               if (type == ACL_LINK && conn->flush_key)
                        hci_remove_link_key(hdev, &conn->dst);
                hci_proto_disconn_cfm(conn, ev->reason);
                hci_conn_del(conn);
+
+               /* Re-enable advertising if necessary, since it might
+                * have been disabled by the connection. From the
+                * HCI_LE_Set_Advertise_Enable command description in
+                * the core specification (v4.0):
+                * "The Controller shall continue advertising until the Host
+                * issues an LE_Set_Advertise_Enable command with
+                * Advertising_Enable set to 0x00 (Advertising is disabled)
+                * or until a connection is created or until the Advertising
+                * is timed out due to Directed Advertising."
+                */
+               if (type == LE_LINK)
+                       mgmt_reenable_advertising(hdev);
        }
 
 unlock:
@@ -2139,6 +2142,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_cc_write_voice_setting(hdev, skb);
                break;
 
+       case HCI_OP_READ_NUM_SUPPORTED_IAC:
+               hci_cc_read_num_supported_iac(hdev, skb);
+               break;
+
        case HCI_OP_WRITE_SSP_MODE:
                hci_cc_write_ssp_mode(hdev, skb);
                break;
@@ -2342,10 +2349,6 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
                hci_cs_disconnect(hdev, ev->status);
                break;
 
-       case HCI_OP_LE_CREATE_CONN:
-               hci_cs_le_create_conn(hdev, ev->status);
-               break;
-
        case HCI_OP_CREATE_PHY_LINK:
                hci_cs_create_phylink(hdev, ev->status);
                break;
@@ -2548,7 +2551,6 @@ static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
        conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
        if (conn) {
                conn->mode = ev->mode;
-               conn->interval = __le16_to_cpu(ev->interval);
 
                if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
                                        &conn->flags)) {
@@ -2930,6 +2932,23 @@ unlock:
        hci_dev_unlock(hdev);
 }
 
+static inline size_t eir_get_length(u8 *eir, size_t eir_len)
+{
+       size_t parsed = 0;
+
+       while (parsed < eir_len) {
+               u8 field_len = eir[0];
+
+               if (field_len == 0)
+                       return parsed;
+
+               parsed += field_len + 1;
+               eir += field_len + 1;
+       }
+
+       return eir_len;
+}
+
 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
                                            struct sk_buff *skb)
 {
@@ -3170,7 +3189,8 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
 
                if (hdev->auto_accept_delay > 0) {
                        int delay = msecs_to_jiffies(hdev->auto_accept_delay);
-                       mod_timer(&conn->auto_accept_timer, jiffies + delay);
+                       queue_delayed_work(conn->hdev->workqueue,
+                                          &conn->auto_accept_work, delay);
                        goto unlock;
                }
 
@@ -3485,6 +3505,17 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 
                conn->dst_type = ev->bdaddr_type;
 
+               /* The advertising parameters for own address type
+                * define which source address and source address
+                * type this connections has.
+                */
+               if (bacmp(&conn->src, BDADDR_ANY)) {
+                       conn->src_type = ADDR_LE_DEV_PUBLIC;
+               } else {
+                       bacpy(&conn->src, &hdev->static_addr);
+                       conn->src_type = ADDR_LE_DEV_RANDOM;
+               }
+
                if (ev->role == LE_CONN_ROLE_MASTER) {
                        conn->out = true;
                        conn->link_mode |= HCI_LM_MASTER;
@@ -3640,8 +3671,8 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
        skb_pull(skb, HCI_EVENT_HDR_SIZE);
 
        if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
-               struct hci_command_hdr *hdr = (void *) hdev->sent_cmd->data;
-               u16 opcode = __le16_to_cpu(hdr->opcode);
+               struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
+               u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
 
                hci_req_cmd_complete(hdev, opcode, 0);
        }
index 9bd7d959e384c74504a06ed27e0eef0d61dbf70e..71f0be1730801a615191a9badc2bd2f588bd4fbb 100644 (file)
@@ -66,6 +66,46 @@ static struct bt_sock_list hci_sk_list = {
        .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
 };
 
+static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
+{
+       struct hci_filter *flt;
+       int flt_type, flt_event;
+
+       /* Apply filter */
+       flt = &hci_pi(sk)->filter;
+
+       if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
+               flt_type = 0;
+       else
+               flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
+
+       if (!test_bit(flt_type, &flt->type_mask))
+               return true;
+
+       /* Extra filter for event packets only */
+       if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
+               return false;
+
+       flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
+
+       if (!hci_test_bit(flt_event, &flt->event_mask))
+               return true;
+
+       /* Check filter only when opcode is set */
+       if (!flt->opcode)
+               return false;
+
+       if (flt_event == HCI_EV_CMD_COMPLETE &&
+           flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
+               return true;
+
+       if (flt_event == HCI_EV_CMD_STATUS &&
+           flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
+               return true;
+
+       return false;
+}
+
 /* Send frame to RAW socket */
 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
 {
@@ -77,7 +117,6 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
        read_lock(&hci_sk_list.lock);
 
        sk_for_each(sk, &hci_sk_list.head) {
-               struct hci_filter *flt;
                struct sk_buff *nskb;
 
                if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
@@ -87,31 +126,19 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
                if (skb->sk == sk)
                        continue;
 
-               if (hci_pi(sk)->channel != HCI_CHANNEL_RAW)
-                       continue;
-
-               /* Apply filter */
-               flt = &hci_pi(sk)->filter;
-
-               if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
-                             0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
-                             &flt->type_mask))
-                       continue;
-
-               if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
-                       int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
-
-                       if (!hci_test_bit(evt, &flt->event_mask))
+               if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
+                       if (is_filtered_packet(sk, skb))
                                continue;
-
-                       if (flt->opcode &&
-                           ((evt == HCI_EV_CMD_COMPLETE &&
-                             flt->opcode !=
-                             get_unaligned((__le16 *)(skb->data + 3))) ||
-                            (evt == HCI_EV_CMD_STATUS &&
-                             flt->opcode !=
-                             get_unaligned((__le16 *)(skb->data + 4)))))
+               } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
+                       if (!bt_cb(skb)->incoming)
+                               continue;
+                       if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
+                           bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
+                           bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
                                continue;
+               } else {
+                       /* Don't send frame to other channel types */
+                       continue;
                }
 
                if (!skb_copy) {
@@ -360,7 +387,6 @@ static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
        __net_timestamp(skb);
 
        bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
-       skb->dev = (void *) hdev;
        hci_send_to_sock(hdev, skb);
        kfree_skb(skb);
 }
@@ -426,6 +452,12 @@ static int hci_sock_release(struct socket *sock)
        bt_sock_unlink(&hci_sk_list, sk);
 
        if (hdev) {
+               if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
+                       mgmt_index_added(hdev);
+                       clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
+                       hci_dev_close(hdev->id);
+               }
+
                atomic_dec(&hdev->promisc);
                hci_dev_put(hdev);
        }
@@ -449,7 +481,7 @@ static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
 
        hci_dev_lock(hdev);
 
-       err = hci_blacklist_add(hdev, &bdaddr, 0);
+       err = hci_blacklist_add(hdev, &bdaddr, BDADDR_BREDR);
 
        hci_dev_unlock(hdev);
 
@@ -466,7 +498,7 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
 
        hci_dev_lock(hdev);
 
-       err = hci_blacklist_del(hdev, &bdaddr, 0);
+       err = hci_blacklist_del(hdev, &bdaddr, BDADDR_BREDR);
 
        hci_dev_unlock(hdev);
 
@@ -482,6 +514,12 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
        if (!hdev)
                return -EBADFD;
 
+       if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
+               return -EBUSY;
+
+       if (hdev->dev_type != HCI_BREDR)
+               return -EOPNOTSUPP;
+
        switch (cmd) {
        case HCISETRAW:
                if (!capable(CAP_NET_ADMIN))
@@ -512,23 +550,29 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
                if (!capable(CAP_NET_ADMIN))
                        return -EPERM;
                return hci_sock_blacklist_del(hdev, (void __user *) arg);
-
-       default:
-               if (hdev->ioctl)
-                       return hdev->ioctl(hdev, cmd, arg);
-               return -EINVAL;
        }
+
+       return -ENOIOCTLCMD;
 }
 
 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
                          unsigned long arg)
 {
-       struct sock *sk = sock->sk;
        void __user *argp = (void __user *) arg;
+       struct sock *sk = sock->sk;
        int err;
 
        BT_DBG("cmd %x arg %lx", cmd, arg);
 
+       lock_sock(sk);
+
+       if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
+               err = -EBADFD;
+               goto done;
+       }
+
+       release_sock(sk);
+
        switch (cmd) {
        case HCIGETDEVLIST:
                return hci_get_dev_list(argp);
@@ -573,13 +617,15 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
 
        case HCIINQUIRY:
                return hci_inquiry(argp);
-
-       default:
-               lock_sock(sk);
-               err = hci_sock_bound_ioctl(sk, cmd, arg);
-               release_sock(sk);
-               return err;
        }
+
+       lock_sock(sk);
+
+       err = hci_sock_bound_ioctl(sk, cmd, arg);
+
+done:
+       release_sock(sk);
+       return err;
 }
 
 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
@@ -629,6 +675,56 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
                hci_pi(sk)->hdev = hdev;
                break;
 
+       case HCI_CHANNEL_USER:
+               if (hci_pi(sk)->hdev) {
+                       err = -EALREADY;
+                       goto done;
+               }
+
+               if (haddr.hci_dev == HCI_DEV_NONE) {
+                       err = -EINVAL;
+                       goto done;
+               }
+
+               if (!capable(CAP_NET_ADMIN)) {
+                       err = -EPERM;
+                       goto done;
+               }
+
+               hdev = hci_dev_get(haddr.hci_dev);
+               if (!hdev) {
+                       err = -ENODEV;
+                       goto done;
+               }
+
+               if (test_bit(HCI_UP, &hdev->flags) ||
+                   test_bit(HCI_INIT, &hdev->flags) ||
+                   test_bit(HCI_SETUP, &hdev->dev_flags)) {
+                       err = -EBUSY;
+                       hci_dev_put(hdev);
+                       goto done;
+               }
+
+               if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+                       err = -EUSERS;
+                       hci_dev_put(hdev);
+                       goto done;
+               }
+
+               mgmt_index_removed(hdev);
+
+               err = hci_dev_open(hdev->id);
+               if (err) {
+                       clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
+                       hci_dev_put(hdev);
+                       goto done;
+               }
+
+               atomic_inc(&hdev->promisc);
+
+               hci_pi(sk)->hdev = hdev;
+               break;
+
        case HCI_CHANNEL_CONTROL:
                if (haddr.hci_dev != HCI_DEV_NONE) {
                        err = -EINVAL;
@@ -677,22 +773,30 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
 {
        struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
        struct sock *sk = sock->sk;
-       struct hci_dev *hdev = hci_pi(sk)->hdev;
+       struct hci_dev *hdev;
+       int err = 0;
 
        BT_DBG("sock %p sk %p", sock, sk);
 
-       if (!hdev)
-               return -EBADFD;
+       if (peer)
+               return -EOPNOTSUPP;
 
        lock_sock(sk);
 
+       hdev = hci_pi(sk)->hdev;
+       if (!hdev) {
+               err = -EBADFD;
+               goto done;
+       }
+
        *addr_len = sizeof(*haddr);
        haddr->hci_family = AF_BLUETOOTH;
        haddr->hci_dev    = hdev->id;
-       haddr->hci_channel= 0;
+       haddr->hci_channel= hci_pi(sk)->channel;
 
+done:
        release_sock(sk);
-       return 0;
+       return err;
 }
 
 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
@@ -767,6 +871,7 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
        case HCI_CHANNEL_RAW:
                hci_sock_cmsg(sk, msg, skb);
                break;
+       case HCI_CHANNEL_USER:
        case HCI_CHANNEL_CONTROL:
        case HCI_CHANNEL_MONITOR:
                sock_recv_timestamp(msg, sk, skb);
@@ -801,6 +906,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
 
        switch (hci_pi(sk)->channel) {
        case HCI_CHANNEL_RAW:
+       case HCI_CHANNEL_USER:
                break;
        case HCI_CHANNEL_CONTROL:
                err = mgmt_control(sk, msg, len);
@@ -835,9 +941,9 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
 
        bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
        skb_pull(skb, 1);
-       skb->dev = (void *) hdev;
 
-       if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
+       if (hci_pi(sk)->channel == HCI_CHANNEL_RAW &&
+           bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
                u16 opcode = get_unaligned_le16(skb->data);
                u16 ogf = hci_opcode_ogf(opcode);
                u16 ocf = hci_opcode_ocf(opcode);
@@ -868,6 +974,14 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
                        goto drop;
                }
 
+               if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
+                   bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
+                   bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
+                   bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
+                       err = -EINVAL;
+                       goto drop;
+               }
+
                skb_queue_tail(&hdev->raw_q, skb);
                queue_work(hdev->workqueue, &hdev->tx_work);
        }
@@ -895,7 +1009,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
        lock_sock(sk);
 
        if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
-               err = -EINVAL;
+               err = -EBADFD;
                goto done;
        }
 
@@ -981,7 +1095,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
        lock_sock(sk);
 
        if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
-               err = -EINVAL;
+               err = -EBADFD;
                goto done;
        }
 
index edf623a29043117c4be9cf255b37af3ddf22d988..0b61250cfdf90c9e3a488c9ca3cce41ac79d6a84 100644 (file)
@@ -1,17 +1,12 @@
 /* Bluetooth HCI driver model support. */
 
-#include <linux/debugfs.h>
 #include <linux/module.h>
-#include <asm/unaligned.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
 static struct class *bt_class;
 
-struct dentry *bt_debugfs;
-EXPORT_SYMBOL_GPL(bt_debugfs);
-
 static inline char *link_typetostr(int type)
 {
        switch (type) {
@@ -42,29 +37,15 @@ static ssize_t show_link_address(struct device *dev,
        return sprintf(buf, "%pMR\n", &conn->dst);
 }
 
-static ssize_t show_link_features(struct device *dev,
-                                 struct device_attribute *attr, char *buf)
-{
-       struct hci_conn *conn = to_hci_conn(dev);
-
-       return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-                      conn->features[0][0], conn->features[0][1],
-                      conn->features[0][2], conn->features[0][3],
-                      conn->features[0][4], conn->features[0][5],
-                      conn->features[0][6], conn->features[0][7]);
-}
-
 #define LINK_ATTR(_name, _mode, _show, _store) \
 struct device_attribute link_attr_##_name = __ATTR(_name, _mode, _show, _store)
 
 static LINK_ATTR(type, S_IRUGO, show_link_type, NULL);
 static LINK_ATTR(address, S_IRUGO, show_link_address, NULL);
-static LINK_ATTR(features, S_IRUGO, show_link_features, NULL);
 
 static struct attribute *bt_link_attrs[] = {
        &link_attr_type.attr,
        &link_attr_address.attr,
-       &link_attr_features.attr,
        NULL
 };
 
@@ -150,28 +131,6 @@ void hci_conn_del_sysfs(struct hci_conn *conn)
        hci_dev_put(hdev);
 }
 
-static inline char *host_bustostr(int bus)
-{
-       switch (bus) {
-       case HCI_VIRTUAL:
-               return "VIRTUAL";
-       case HCI_USB:
-               return "USB";
-       case HCI_PCCARD:
-               return "PCCARD";
-       case HCI_UART:
-               return "UART";
-       case HCI_RS232:
-               return "RS232";
-       case HCI_PCI:
-               return "PCI";
-       case HCI_SDIO:
-               return "SDIO";
-       default:
-               return "UNKNOWN";
-       }
-}
-
 static inline char *host_typetostr(int type)
 {
        switch (type) {
@@ -184,13 +143,6 @@ static inline char *host_typetostr(int type)
        }
 }
 
-static ssize_t show_bus(struct device *dev,
-                       struct device_attribute *attr, char *buf)
-{
-       struct hci_dev *hdev = to_hci_dev(dev);
-       return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
-}
-
 static ssize_t show_type(struct device *dev,
                         struct device_attribute *attr, char *buf)
 {
@@ -212,14 +164,6 @@ static ssize_t show_name(struct device *dev,
        return sprintf(buf, "%s\n", name);
 }
 
-static ssize_t show_class(struct device *dev,
-                         struct device_attribute *attr, char *buf)
-{
-       struct hci_dev *hdev = to_hci_dev(dev);
-       return sprintf(buf, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
-                      hdev->dev_class[1], hdev->dev_class[0]);
-}
-
 static ssize_t show_address(struct device *dev,
                            struct device_attribute *attr, char *buf)
 {
@@ -227,150 +171,14 @@ static ssize_t show_address(struct device *dev,
        return sprintf(buf, "%pMR\n", &hdev->bdaddr);
 }
 
-static ssize_t show_features(struct device *dev,
-                            struct device_attribute *attr, char *buf)
-{
-       struct hci_dev *hdev = to_hci_dev(dev);
-
-       return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
-                      hdev->features[0][0], hdev->features[0][1],
-                      hdev->features[0][2], hdev->features[0][3],
-                      hdev->features[0][4], hdev->features[0][5],
-                      hdev->features[0][6], hdev->features[0][7]);
-}
-
-static ssize_t show_manufacturer(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       struct hci_dev *hdev = to_hci_dev(dev);
-       return sprintf(buf, "%d\n", hdev->manufacturer);
-}
-
-static ssize_t show_hci_version(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       struct hci_dev *hdev = to_hci_dev(dev);
-       return sprintf(buf, "%d\n", hdev->hci_ver);
-}
-
-static ssize_t show_hci_revision(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       struct hci_dev *hdev = to_hci_dev(dev);
-       return sprintf(buf, "%d\n", hdev->hci_rev);
-}
-
-static ssize_t show_idle_timeout(struct device *dev,
-                                struct device_attribute *attr, char *buf)
-{
-       struct hci_dev *hdev = to_hci_dev(dev);
-       return sprintf(buf, "%d\n", hdev->idle_timeout);
-}
-
-static ssize_t store_idle_timeout(struct device *dev,
-                                 struct device_attribute *attr,
-                                 const char *buf, size_t count)
-{
-       struct hci_dev *hdev = to_hci_dev(dev);
-       unsigned int val;
-       int rv;
-
-       rv = kstrtouint(buf, 0, &val);
-       if (rv < 0)
-               return rv;
-
-       if (val != 0 && (val < 500 || val > 3600000))
-               return -EINVAL;
-
-       hdev->idle_timeout = val;
-
-       return count;
-}
-
-static ssize_t show_sniff_max_interval(struct device *dev,
-                                      struct device_attribute *attr, char *buf)
-{
-       struct hci_dev *hdev = to_hci_dev(dev);
-       return sprintf(buf, "%d\n", hdev->sniff_max_interval);
-}
-
-static ssize_t store_sniff_max_interval(struct device *dev,
-                                       struct device_attribute *attr,
-                                       const char *buf, size_t count)
-{
-       struct hci_dev *hdev = to_hci_dev(dev);
-       u16 val;
-       int rv;
-
-       rv = kstrtou16(buf, 0, &val);
-       if (rv < 0)
-               return rv;
-
-       if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
-               return -EINVAL;
-
-       hdev->sniff_max_interval = val;
-
-       return count;
-}
-
-static ssize_t show_sniff_min_interval(struct device *dev,
-                                      struct device_attribute *attr, char *buf)
-{
-       struct hci_dev *hdev = to_hci_dev(dev);
-       return sprintf(buf, "%d\n", hdev->sniff_min_interval);
-}
-
-static ssize_t store_sniff_min_interval(struct device *dev,
-                                       struct device_attribute *attr,
-                                       const char *buf, size_t count)
-{
-       struct hci_dev *hdev = to_hci_dev(dev);
-       u16 val;
-       int rv;
-
-       rv = kstrtou16(buf, 0, &val);
-       if (rv < 0)
-               return rv;
-
-       if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
-               return -EINVAL;
-
-       hdev->sniff_min_interval = val;
-
-       return count;
-}
-
-static DEVICE_ATTR(bus, S_IRUGO, show_bus, NULL);
 static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
 static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-static DEVICE_ATTR(class, S_IRUGO, show_class, NULL);
 static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
-static DEVICE_ATTR(features, S_IRUGO, show_features, NULL);
-static DEVICE_ATTR(manufacturer, S_IRUGO, show_manufacturer, NULL);
-static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
-static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
-
-static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
-                  show_idle_timeout, store_idle_timeout);
-static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR,
-                  show_sniff_max_interval, store_sniff_max_interval);
-static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
-                  show_sniff_min_interval, store_sniff_min_interval);
 
 static struct attribute *bt_host_attrs[] = {
-       &dev_attr_bus.attr,
        &dev_attr_type.attr,
        &dev_attr_name.attr,
-       &dev_attr_class.attr,
        &dev_attr_address.attr,
-       &dev_attr_features.attr,
-       &dev_attr_manufacturer.attr,
-       &dev_attr_hci_version.attr,
-       &dev_attr_hci_revision.attr,
-       &dev_attr_idle_timeout.attr,
-       &dev_attr_sniff_max_interval.attr,
-       &dev_attr_sniff_min_interval.attr,
        NULL
 };
 
@@ -396,141 +204,6 @@ static struct device_type bt_host = {
        .release = bt_host_release,
 };
 
-static int inquiry_cache_show(struct seq_file *f, void *p)
-{
-       struct hci_dev *hdev = f->private;
-       struct discovery_state *cache = &hdev->discovery;
-       struct inquiry_entry *e;
-
-       hci_dev_lock(hdev);
-
-       list_for_each_entry(e, &cache->all, all) {
-               struct inquiry_data *data = &e->data;
-               seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
-                          &data->bdaddr,
-                          data->pscan_rep_mode, data->pscan_period_mode,
-                          data->pscan_mode, data->dev_class[2],
-                          data->dev_class[1], data->dev_class[0],
-                          __le16_to_cpu(data->clock_offset),
-                          data->rssi, data->ssp_mode, e->timestamp);
-       }
-
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int inquiry_cache_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, inquiry_cache_show, inode->i_private);
-}
-
-static const struct file_operations inquiry_cache_fops = {
-       .open           = inquiry_cache_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static int blacklist_show(struct seq_file *f, void *p)
-{
-       struct hci_dev *hdev = f->private;
-       struct bdaddr_list *b;
-
-       hci_dev_lock(hdev);
-
-       list_for_each_entry(b, &hdev->blacklist, list)
-               seq_printf(f, "%pMR\n", &b->bdaddr);
-
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int blacklist_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, blacklist_show, inode->i_private);
-}
-
-static const struct file_operations blacklist_fops = {
-       .open           = blacklist_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static void print_bt_uuid(struct seq_file *f, u8 *uuid)
-{
-       u32 data0, data5;
-       u16 data1, data2, data3, data4;
-
-       data5 = get_unaligned_le32(uuid);
-       data4 = get_unaligned_le16(uuid + 4);
-       data3 = get_unaligned_le16(uuid + 6);
-       data2 = get_unaligned_le16(uuid + 8);
-       data1 = get_unaligned_le16(uuid + 10);
-       data0 = get_unaligned_le32(uuid + 12);
-
-       seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
-                  data0, data1, data2, data3, data4, data5);
-}
-
-static int uuids_show(struct seq_file *f, void *p)
-{
-       struct hci_dev *hdev = f->private;
-       struct bt_uuid *uuid;
-
-       hci_dev_lock(hdev);
-
-       list_for_each_entry(uuid, &hdev->uuids, list)
-               print_bt_uuid(f, uuid->uuid);
-
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int uuids_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, uuids_show, inode->i_private);
-}
-
-static const struct file_operations uuids_fops = {
-       .open           = uuids_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-static int auto_accept_delay_set(void *data, u64 val)
-{
-       struct hci_dev *hdev = data;
-
-       hci_dev_lock(hdev);
-
-       hdev->auto_accept_delay = val;
-
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-static int auto_accept_delay_get(void *data, u64 *val)
-{
-       struct hci_dev *hdev = data;
-
-       hci_dev_lock(hdev);
-
-       *val = hdev->auto_accept_delay;
-
-       hci_dev_unlock(hdev);
-
-       return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
-                       auto_accept_delay_set, "%llu\n");
-
 void hci_init_sysfs(struct hci_dev *hdev)
 {
        struct device *dev = &hdev->dev;
@@ -542,52 +215,8 @@ void hci_init_sysfs(struct hci_dev *hdev)
        device_initialize(dev);
 }
 
-int hci_add_sysfs(struct hci_dev *hdev)
-{
-       struct device *dev = &hdev->dev;
-       int err;
-
-       BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
-
-       dev_set_name(dev, "%s", hdev->name);
-
-       err = device_add(dev);
-       if (err < 0)
-               return err;
-
-       if (!bt_debugfs)
-               return 0;
-
-       hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
-       if (!hdev->debugfs)
-               return 0;
-
-       debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
-                           hdev, &inquiry_cache_fops);
-
-       debugfs_create_file("blacklist", 0444, hdev->debugfs,
-                           hdev, &blacklist_fops);
-
-       debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
-
-       debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev,
-                           &auto_accept_delay_fops);
-       return 0;
-}
-
-void hci_del_sysfs(struct hci_dev *hdev)
-{
-       BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
-
-       debugfs_remove_recursive(hdev->debugfs);
-
-       device_del(&hdev->dev);
-}
-
 int __init bt_sysfs_init(void)
 {
-       bt_debugfs = debugfs_create_dir("bluetooth", NULL);
-
        bt_class = class_create(THIS_MODULE, "bluetooth");
 
        return PTR_ERR_OR_ZERO(bt_class);
@@ -596,6 +225,4 @@ int __init bt_sysfs_init(void)
 void bt_sysfs_cleanup(void)
 {
        class_destroy(bt_class);
-
-       debugfs_remove_recursive(bt_debugfs);
 }
index bdc35a7a7feeaf4ac7a918547e82d5d19a7ef537..292e619db8961c82e7c3aa7f3280cb4236176ab8 100644 (file)
@@ -767,10 +767,10 @@ static int hidp_setup_hid(struct hidp_session *session,
        strncpy(hid->name, req->name, sizeof(req->name) - 1);
 
        snprintf(hid->phys, sizeof(hid->phys), "%pMR",
-                &bt_sk(session->ctrl_sock->sk)->src);
+                &l2cap_pi(session->ctrl_sock->sk)->chan->src);
 
        snprintf(hid->uniq, sizeof(hid->uniq), "%pMR",
-                &bt_sk(session->ctrl_sock->sk)->dst);
+                &l2cap_pi(session->ctrl_sock->sk)->chan->dst);
 
        hid->dev.parent = &session->conn->hcon->dev;
        hid->ll_driver = &hidp_hid_driver;
@@ -1283,23 +1283,29 @@ static int hidp_session_thread(void *arg)
 static int hidp_verify_sockets(struct socket *ctrl_sock,
                               struct socket *intr_sock)
 {
+       struct l2cap_chan *ctrl_chan, *intr_chan;
        struct bt_sock *ctrl, *intr;
        struct hidp_session *session;
 
        if (!l2cap_is_socket(ctrl_sock) || !l2cap_is_socket(intr_sock))
                return -EINVAL;
 
+       ctrl_chan = l2cap_pi(ctrl_sock->sk)->chan;
+       intr_chan = l2cap_pi(intr_sock->sk)->chan;
+
+       if (bacmp(&ctrl_chan->src, &intr_chan->src) ||
+           bacmp(&ctrl_chan->dst, &intr_chan->dst))
+               return -ENOTUNIQ;
+
        ctrl = bt_sk(ctrl_sock->sk);
        intr = bt_sk(intr_sock->sk);
 
-       if (bacmp(&ctrl->src, &intr->src) || bacmp(&ctrl->dst, &intr->dst))
-               return -ENOTUNIQ;
        if (ctrl->sk.sk_state != BT_CONNECTED ||
            intr->sk.sk_state != BT_CONNECTED)
                return -EBADFD;
 
        /* early session check, we check again during session registration */
-       session = hidp_session_find(&ctrl->dst);
+       session = hidp_session_find(&ctrl_chan->dst);
        if (session) {
                hidp_session_put(session);
                return -EEXIST;
@@ -1332,7 +1338,7 @@ int hidp_connection_add(struct hidp_connadd_req *req,
        if (!conn)
                return -EBADFD;
 
-       ret = hidp_session_new(&session, &bt_sk(ctrl_sock->sk)->dst, ctrl_sock,
+       ret = hidp_session_new(&session, &chan->dst, ctrl_sock,
                               intr_sock, req, conn);
        if (ret)
                goto out_conn;
index 9e6cc355310504ae965035e033bdac7d5008527b..ab5241400cf78a9d7371d7a866d58aeba0d8043a 100644 (file)
@@ -182,7 +182,7 @@ struct hidp_session {
 };
 
 /* HIDP init defines */
-extern int __init hidp_init_sockets(void);
-extern void __exit hidp_cleanup_sockets(void);
+int __init hidp_init_sockets(void);
+void __exit hidp_cleanup_sockets(void);
 
 #endif /* __HIDP_H */
index 63fa11109a1c391725d5efec2075c1524d9f1a1a..0cef677078381315c7ce3e58abb6573136bc227b 100644 (file)
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
-#include <net/bluetooth/smp.h>
-#include <net/bluetooth/a2mp.h>
-#include <net/bluetooth/amp.h>
+
+#include "smp.h"
+#include "a2mp.h"
+#include "amp.h"
 
 bool disable_ertm;
 
-static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
-static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
+static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
+static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_CONNLESS, };
 
 static LIST_HEAD(chan_list);
 static DEFINE_RWLOCK(chan_list_lock);
@@ -58,6 +59,18 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
                     struct sk_buff_head *skbs, u8 event);
 
+static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
+{
+       if (hcon->type == LE_LINK) {
+               if (type == ADDR_LE_DEV_PUBLIC)
+                       return BDADDR_LE_PUBLIC;
+               else
+                       return BDADDR_LE_RANDOM;
+       }
+
+       return BDADDR_BREDR;
+}
+
 /* ---- L2CAP channels ---- */
 
 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
@@ -148,7 +161,7 @@ static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
        struct l2cap_chan *c;
 
        list_for_each_entry(c, &chan_list, global_l) {
-               if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
+               if (c->sport == psm && !bacmp(&c->src, src))
                        return c;
        }
        return NULL;
@@ -210,38 +223,25 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
        return 0;
 }
 
-static void __l2cap_state_change(struct l2cap_chan *chan, int state)
+static void l2cap_state_change(struct l2cap_chan *chan, int state)
 {
        BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
               state_to_string(state));
 
        chan->state = state;
-       chan->ops->state_change(chan, state);
-}
-
-static void l2cap_state_change(struct l2cap_chan *chan, int state)
-{
-       struct sock *sk = chan->sk;
-
-       lock_sock(sk);
-       __l2cap_state_change(chan, state);
-       release_sock(sk);
+       chan->ops->state_change(chan, state, 0);
 }
 
-static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
+static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
+                                               int state, int err)
 {
-       struct sock *sk = chan->sk;
-
-       sk->sk_err = err;
+       chan->state = state;
+       chan->ops->state_change(chan, chan->state, err);
 }
 
 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
 {
-       struct sock *sk = chan->sk;
-
-       lock_sock(sk);
-       __l2cap_chan_set_err(chan, err);
-       release_sock(sk);
+       chan->ops->state_change(chan, chan->state, err);
 }
 
 static void __set_retrans_timer(struct l2cap_chan *chan)
@@ -620,10 +620,8 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
 {
        struct l2cap_conn *conn = chan->conn;
-       struct sock *sk = chan->sk;
 
-       BT_DBG("chan %p state %s sk %p", chan, state_to_string(chan->state),
-              sk);
+       BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
 
        switch (chan->state) {
        case BT_LISTEN:
@@ -634,7 +632,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
        case BT_CONFIG:
                if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
                    conn->hcon->type == ACL_LINK) {
-                       __set_chan_timer(chan, sk->sk_sndtimeo);
+                       __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
                        l2cap_send_disconn_req(chan, reason);
                } else
                        l2cap_chan_del(chan, reason);
@@ -646,10 +644,11 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
                        struct l2cap_conn_rsp rsp;
                        __u16 result;
 
-                       if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
+                       if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
                                result = L2CAP_CR_SEC_BLOCK;
                        else
                                result = L2CAP_CR_BAD_PSM;
+
                        l2cap_state_change(chan, BT_DISCONN);
 
                        rsp.scid   = cpu_to_le16(chan->dcid);
@@ -676,7 +675,8 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
 
 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
 {
-       if (chan->chan_type == L2CAP_CHAN_RAW) {
+       switch (chan->chan_type) {
+       case L2CAP_CHAN_RAW:
                switch (chan->sec_level) {
                case BT_SECURITY_HIGH:
                        return HCI_AT_DEDICATED_BONDING_MITM;
@@ -685,15 +685,29 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
                default:
                        return HCI_AT_NO_BONDING;
                }
-       } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
-               if (chan->sec_level == BT_SECURITY_LOW)
-                       chan->sec_level = BT_SECURITY_SDP;
-
+               break;
+       case L2CAP_CHAN_CONN_LESS:
+               if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_3DSP)) {
+                       if (chan->sec_level == BT_SECURITY_LOW)
+                               chan->sec_level = BT_SECURITY_SDP;
+               }
                if (chan->sec_level == BT_SECURITY_HIGH)
                        return HCI_AT_NO_BONDING_MITM;
                else
                        return HCI_AT_NO_BONDING;
-       } else {
+               break;
+       case L2CAP_CHAN_CONN_ORIENTED:
+               if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
+                       if (chan->sec_level == BT_SECURITY_LOW)
+                               chan->sec_level = BT_SECURITY_SDP;
+
+                       if (chan->sec_level == BT_SECURITY_HIGH)
+                               return HCI_AT_NO_BONDING_MITM;
+                       else
+                               return HCI_AT_NO_BONDING;
+               }
+               /* fall through */
+       default:
                switch (chan->sec_level) {
                case BT_SECURITY_HIGH:
                        return HCI_AT_GENERAL_BONDING_MITM;
@@ -702,6 +716,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
                default:
                        return HCI_AT_NO_BONDING;
                }
+               break;
        }
 }
 
@@ -1015,14 +1030,29 @@ static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
 static bool __amp_capable(struct l2cap_chan *chan)
 {
        struct l2cap_conn *conn = chan->conn;
+       struct hci_dev *hdev;
+       bool amp_available = false;
 
-       if (enable_hs &&
-           hci_amp_capable() &&
-           chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED &&
-           conn->fixed_chan_mask & L2CAP_FC_A2MP)
-               return true;
-       else
+       if (!conn->hs_enabled)
                return false;
+
+       if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
+               return false;
+
+       read_lock(&hci_dev_list_lock);
+       list_for_each_entry(hdev, &hci_dev_list, list) {
+               if (hdev->amp_type != AMP_TYPE_BREDR &&
+                   test_bit(HCI_UP, &hdev->flags)) {
+                       amp_available = true;
+                       break;
+               }
+       }
+       read_unlock(&hci_dev_list_lock);
+
+       if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
+               return amp_available;
+
+       return false;
 }
 
 static bool l2cap_check_efs(struct l2cap_chan *chan)
@@ -1186,7 +1216,6 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
 
 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
 {
-       struct sock *sk = chan->sk;
        struct l2cap_conn *conn = chan->conn;
        struct l2cap_disconn_req req;
 
@@ -1209,10 +1238,7 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
        l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
                       sizeof(req), &req);
 
-       lock_sock(sk);
-       __l2cap_state_change(chan, BT_DISCONN);
-       __l2cap_chan_set_err(chan, err);
-       release_sock(sk);
+       l2cap_state_change_and_error(chan, BT_DISCONN, err);
 }
 
 /* ---- L2CAP connections ---- */
@@ -1225,8 +1251,6 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
        mutex_lock(&conn->chan_lock);
 
        list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
-               struct sock *sk = chan->sk;
-
                l2cap_chan_lock(chan);
 
                if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
@@ -1258,19 +1282,16 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
                        rsp.dcid = cpu_to_le16(chan->scid);
 
                        if (l2cap_chan_check_security(chan)) {
-                               lock_sock(sk);
-                               if (test_bit(BT_SK_DEFER_SETUP,
-                                            &bt_sk(sk)->flags)) {
+                               if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
                                        rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
                                        rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
                                        chan->ops->defer(chan);
 
                                } else {
-                                       __l2cap_state_change(chan, BT_CONFIG);
+                                       l2cap_state_change(chan, BT_CONFIG);
                                        rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
                                        rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
                                }
-                               release_sock(sk);
                        } else {
                                rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
                                rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
@@ -1309,8 +1330,6 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
        read_lock(&chan_list_lock);
 
        list_for_each_entry(c, &chan_list, global_l) {
-               struct sock *sk = c->sk;
-
                if (state && c->state != state)
                        continue;
 
@@ -1319,16 +1338,16 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
                        int src_any, dst_any;
 
                        /* Exact match. */
-                       src_match = !bacmp(&bt_sk(sk)->src, src);
-                       dst_match = !bacmp(&bt_sk(sk)->dst, dst);
+                       src_match = !bacmp(&c->src, src);
+                       dst_match = !bacmp(&c->dst, dst);
                        if (src_match && dst_match) {
                                read_unlock(&chan_list_lock);
                                return c;
                        }
 
                        /* Closest match */
-                       src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
-                       dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
+                       src_any = !bacmp(&c->src, BDADDR_ANY);
+                       dst_any = !bacmp(&c->dst, BDADDR_ANY);
                        if ((src_match && dst_any) || (src_any && dst_match) ||
                            (src_any && dst_any))
                                c1 = c;
@@ -1342,14 +1361,15 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
 
 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
 {
-       struct sock *parent;
+       struct hci_conn *hcon = conn->hcon;
        struct l2cap_chan *chan, *pchan;
+       u8 dst_type;
 
        BT_DBG("");
 
        /* Check if we have socket listening on cid */
        pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
-                                         conn->src, conn->dst);
+                                         &hcon->src, &hcon->dst);
        if (!pchan)
                return;
 
@@ -1357,9 +1377,13 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
        if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
                return;
 
-       parent = pchan->sk;
+       dst_type = bdaddr_type(hcon, hcon->dst_type);
+
+       /* If device is blocked, do not create a channel for it */
+       if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, dst_type))
+               return;
 
-       lock_sock(parent);
+       l2cap_chan_lock(pchan);
 
        chan = pchan->ops->new_connection(pchan);
        if (!chan)
@@ -1367,13 +1391,15 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
 
        chan->dcid = L2CAP_CID_ATT;
 
-       bacpy(&bt_sk(chan->sk)->src, conn->src);
-       bacpy(&bt_sk(chan->sk)->dst, conn->dst);
+       bacpy(&chan->src, &hcon->src);
+       bacpy(&chan->dst, &hcon->dst);
+       chan->src_type = bdaddr_type(hcon, hcon->src_type);
+       chan->dst_type = dst_type;
 
        __l2cap_chan_add(conn, chan);
 
 clean:
-       release_sock(parent);
+       l2cap_chan_unlock(pchan);
 }
 
 static void l2cap_conn_ready(struct l2cap_conn *conn)
@@ -1408,12 +1434,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
                                l2cap_chan_ready(chan);
 
                } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
-                       struct sock *sk = chan->sk;
-                       __clear_chan_timer(chan);
-                       lock_sock(sk);
-                       __l2cap_state_change(chan, BT_CONNECTED);
-                       sk->sk_state_change(sk);
-                       release_sock(sk);
+                       l2cap_chan_ready(chan);
 
                } else if (chan->state == BT_CONNECT) {
                        l2cap_do_start(chan);
@@ -1633,11 +1654,12 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
                break;
        }
 
-       conn->src = &hcon->hdev->bdaddr;
-       conn->dst = &hcon->dst;
-
        conn->feat_mask = 0;
 
+       if (hcon->type == ACL_LINK)
+               conn->hs_enabled = test_bit(HCI_HS_ENABLED,
+                                           &hcon->hdev->dev_flags);
+
        spin_lock_init(&conn->lock);
        mutex_init(&conn->chan_lock);
 
@@ -1688,8 +1710,6 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
        read_lock(&chan_list_lock);
 
        list_for_each_entry(c, &chan_list, global_l) {
-               struct sock *sk = c->sk;
-
                if (state && c->state != state)
                        continue;
 
@@ -1698,16 +1718,16 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
                        int src_any, dst_any;
 
                        /* Exact match. */
-                       src_match = !bacmp(&bt_sk(sk)->src, src);
-                       dst_match = !bacmp(&bt_sk(sk)->dst, dst);
+                       src_match = !bacmp(&c->src, src);
+                       dst_match = !bacmp(&c->dst, dst);
                        if (src_match && dst_match) {
                                read_unlock(&chan_list_lock);
                                return c;
                        }
 
                        /* Closest match */
-                       src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
-                       dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
+                       src_any = !bacmp(&c->src, BDADDR_ANY);
+                       dst_any = !bacmp(&c->dst, BDADDR_ANY);
                        if ((src_match && dst_any) || (src_any && dst_match) ||
                            (src_any && dst_any))
                                c1 = c;
@@ -1722,18 +1742,16 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
                       bdaddr_t *dst, u8 dst_type)
 {
-       struct sock *sk = chan->sk;
-       bdaddr_t *src = &bt_sk(sk)->src;
        struct l2cap_conn *conn;
        struct hci_conn *hcon;
        struct hci_dev *hdev;
        __u8 auth_type;
        int err;
 
-       BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src, dst,
+       BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
               dst_type, __le16_to_cpu(psm));
 
-       hdev = hci_get_route(dst, src);
+       hdev = hci_get_route(dst, &chan->src);
        if (!hdev)
                return -EHOSTUNREACH;
 
@@ -1790,9 +1808,8 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
        }
 
        /* Set destination address and psm */
-       lock_sock(sk);
-       bacpy(&bt_sk(sk)->dst, dst);
-       release_sock(sk);
+       bacpy(&chan->dst, dst);
+       chan->dst_type = dst_type;
 
        chan->psm = psm;
        chan->dcid = cid;
@@ -1825,7 +1842,8 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
        }
 
        /* Update source addr of the socket */
-       bacpy(src, conn->src);
+       bacpy(&chan->src, &hcon->src);
+       chan->src_type = bdaddr_type(hcon, hcon->src_type);
 
        l2cap_chan_unlock(chan);
        l2cap_chan_add(conn, chan);
@@ -1835,7 +1853,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
        hci_conn_drop(hcon);
 
        l2cap_state_change(chan, BT_CONNECT);
-       __set_chan_timer(chan, sk->sk_sndtimeo);
+       __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
 
        if (hcon->state == BT_CONNECTED) {
                if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
@@ -1855,38 +1873,6 @@ done:
        return err;
 }
 
-int __l2cap_wait_ack(struct sock *sk)
-{
-       struct l2cap_chan *chan = l2cap_pi(sk)->chan;
-       DECLARE_WAITQUEUE(wait, current);
-       int err = 0;
-       int timeo = HZ/5;
-
-       add_wait_queue(sk_sleep(sk), &wait);
-       set_current_state(TASK_INTERRUPTIBLE);
-       while (chan->unacked_frames > 0 && chan->conn) {
-               if (!timeo)
-                       timeo = HZ/5;
-
-               if (signal_pending(current)) {
-                       err = sock_intr_errno(timeo);
-                       break;
-               }
-
-               release_sock(sk);
-               timeo = schedule_timeout(timeo);
-               lock_sock(sk);
-               set_current_state(TASK_INTERRUPTIBLE);
-
-               err = sock_error(sk);
-               if (err)
-                       break;
-       }
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(sk_sleep(sk), &wait);
-       return err;
-}
-
 static void l2cap_monitor_timeout(struct work_struct *work)
 {
        struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
@@ -2263,7 +2249,8 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
        int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
        struct l2cap_hdr *lh;
 
-       BT_DBG("chan %p len %zu priority %u", chan, len, priority);
+       BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan,
+              __le16_to_cpu(chan->psm), len, priority);
 
        count = min_t(unsigned int, (conn->mtu - hlen), len);
 
@@ -2278,7 +2265,7 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
        lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
        lh->cid = cpu_to_le16(chan->dcid);
        lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
-       put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
+       put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
 
        err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
        if (unlikely(err < 0)) {
@@ -2826,17 +2813,16 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
        mutex_lock(&conn->chan_lock);
 
        list_for_each_entry(chan, &conn->chan_l, list) {
-               struct sock *sk = chan->sk;
                if (chan->chan_type != L2CAP_CHAN_RAW)
                        continue;
 
-               /* Don't send frame to the socket it came from */
-               if (skb->sk == sk)
+               /* Don't send frame to the channel it came from */
+               if (bt_cb(skb)->chan == chan)
                        continue;
+
                nskb = skb_clone(skb, GFP_KERNEL);
                if (!nskb)
                        continue;
-
                if (chan->ops->recv(chan, nskb))
                        kfree_skb(nskb);
        }
@@ -3043,8 +3029,8 @@ int l2cap_ertm_init(struct l2cap_chan *chan)
 
        skb_queue_head_init(&chan->tx_q);
 
-       chan->local_amp_id = 0;
-       chan->move_id = 0;
+       chan->local_amp_id = AMP_ID_BREDR;
+       chan->move_id = AMP_ID_BREDR;
        chan->move_state = L2CAP_MOVE_STABLE;
        chan->move_role = L2CAP_MOVE_ROLE_NONE;
 
@@ -3084,20 +3070,20 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
        }
 }
 
-static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
+static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
 {
-       return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
+       return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
 }
 
-static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
+static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
 {
-       return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
+       return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
 }
 
 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
                                      struct l2cap_conf_rfc *rfc)
 {
-       if (chan->local_amp_id && chan->hs_hcon) {
+       if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
                u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
 
                /* Class 1 devices have must have ERTM timeouts
@@ -3135,7 +3121,7 @@ static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
 {
        if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
-           __l2cap_ews_supported(chan)) {
+           __l2cap_ews_supported(chan->conn)) {
                /* use extended control field */
                set_bit(FLAG_EXT_CTRL, &chan->flags);
                chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
@@ -3165,7 +3151,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
                if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
                        break;
 
-               if (__l2cap_efs_supported(chan))
+               if (__l2cap_efs_supported(chan->conn))
                        set_bit(FLAG_EFS_ENABLE, &chan->flags);
 
                /* fall through */
@@ -3317,7 +3303,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
                        break;
 
                case L2CAP_CONF_EWS:
-                       if (!enable_hs)
+                       if (!chan->conn->hs_enabled)
                                return -ECONNREFUSED;
 
                        set_bit(FLAG_EXT_CTRL, &chan->flags);
@@ -3349,7 +3335,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
                }
 
                if (remote_efs) {
-                       if (__l2cap_efs_supported(chan))
+                       if (__l2cap_efs_supported(chan->conn))
                                set_bit(FLAG_EFS_ENABLE, &chan->flags);
                        else
                                return -ECONNREFUSED;
@@ -3715,7 +3701,6 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
        struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
        struct l2cap_conn_rsp rsp;
        struct l2cap_chan *chan = NULL, *pchan;
-       struct sock *parent, *sk = NULL;
        int result, status = L2CAP_CS_NO_INFO;
 
        u16 dcid = 0, scid = __le16_to_cpu(req->scid);
@@ -3724,16 +3709,15 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
        BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
 
        /* Check if we have socket listening on psm */
-       pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
+       pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
+                                        &conn->hcon->dst);
        if (!pchan) {
                result = L2CAP_CR_BAD_PSM;
                goto sendresp;
        }
 
-       parent = pchan->sk;
-
        mutex_lock(&conn->chan_lock);
-       lock_sock(parent);
+       l2cap_chan_lock(pchan);
 
        /* Check if the ACL is secure enough (if not SDP) */
        if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
@@ -3753,8 +3737,6 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
        if (!chan)
                goto response;
 
-       sk = chan->sk;
-
        /* For certain devices (ex: HID mouse), support for authentication,
         * pairing and bonding is optional. For such devices, inorder to avoid
         * the ACL alive for too long after L2CAP disconnection, reset the ACL
@@ -3762,8 +3744,10 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
         */
        conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
 
-       bacpy(&bt_sk(sk)->src, conn->src);
-       bacpy(&bt_sk(sk)->dst, conn->dst);
+       bacpy(&chan->src, &conn->hcon->src);
+       bacpy(&chan->dst, &conn->hcon->dst);
+       chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
+       chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
        chan->psm  = psm;
        chan->dcid = scid;
        chan->local_amp_id = amp_id;
@@ -3772,14 +3756,14 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
 
        dcid = chan->scid;
 
-       __set_chan_timer(chan, sk->sk_sndtimeo);
+       __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
 
        chan->ident = cmd->ident;
 
        if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
                if (l2cap_chan_check_security(chan)) {
-                       if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
-                               __l2cap_state_change(chan, BT_CONNECT2);
+                       if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
+                               l2cap_state_change(chan, BT_CONNECT2);
                                result = L2CAP_CR_PEND;
                                status = L2CAP_CS_AUTHOR_PEND;
                                chan->ops->defer(chan);
@@ -3788,28 +3772,28 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
                                 * The connection will succeed after the
                                 * physical link is up.
                                 */
-                               if (amp_id) {
-                                       __l2cap_state_change(chan, BT_CONNECT2);
-                                       result = L2CAP_CR_PEND;
-                               } else {
-                                       __l2cap_state_change(chan, BT_CONFIG);
+                               if (amp_id == AMP_ID_BREDR) {
+                                       l2cap_state_change(chan, BT_CONFIG);
                                        result = L2CAP_CR_SUCCESS;
+                               } else {
+                                       l2cap_state_change(chan, BT_CONNECT2);
+                                       result = L2CAP_CR_PEND;
                                }
                                status = L2CAP_CS_NO_INFO;
                        }
                } else {
-                       __l2cap_state_change(chan, BT_CONNECT2);
+                       l2cap_state_change(chan, BT_CONNECT2);
                        result = L2CAP_CR_PEND;
                        status = L2CAP_CS_AUTHEN_PEND;
                }
        } else {
-               __l2cap_state_change(chan, BT_CONNECT2);
+               l2cap_state_change(chan, BT_CONNECT2);
                result = L2CAP_CR_PEND;
                status = L2CAP_CS_NO_INFO;
        }
 
 response:
-       release_sock(parent);
+       l2cap_chan_unlock(pchan);
        mutex_unlock(&conn->chan_lock);
 
 sendresp:
@@ -3891,13 +3875,13 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
        if (scid) {
                chan = __l2cap_get_chan_by_scid(conn, scid);
                if (!chan) {
-                       err = -EFAULT;
+                       err = -EBADSLT;
                        goto unlock;
                }
        } else {
                chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
                if (!chan) {
-                       err = -EFAULT;
+                       err = -EBADSLT;
                        goto unlock;
                }
        }
@@ -3965,6 +3949,18 @@ static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
                                            L2CAP_CONF_SUCCESS, flags), data);
 }
 
+static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
+                                  u16 scid, u16 dcid)
+{
+       struct l2cap_cmd_rej_cid rej;
+
+       rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
+       rej.scid = __cpu_to_le16(scid);
+       rej.dcid = __cpu_to_le16(dcid);
+
+       l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
+}
+
 static inline int l2cap_config_req(struct l2cap_conn *conn,
                                   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
                                   u8 *data)
@@ -3984,18 +3980,14 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
        BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
 
        chan = l2cap_get_chan_by_scid(conn, dcid);
-       if (!chan)
-               return -ENOENT;
+       if (!chan) {
+               cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
+               return 0;
+       }
 
        if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
-               struct l2cap_cmd_rej_cid rej;
-
-               rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
-               rej.scid = cpu_to_le16(chan->scid);
-               rej.dcid = cpu_to_le16(chan->dcid);
-
-               l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
-                              sizeof(rej), &rej);
+               cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
+                                      chan->dcid);
                goto unlock;
        }
 
@@ -4198,7 +4190,6 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
        struct l2cap_disconn_rsp rsp;
        u16 dcid, scid;
        struct l2cap_chan *chan;
-       struct sock *sk;
 
        if (cmd_len != sizeof(*req))
                return -EPROTO;
@@ -4213,20 +4204,17 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
        chan = __l2cap_get_chan_by_scid(conn, dcid);
        if (!chan) {
                mutex_unlock(&conn->chan_lock);
+               cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
                return 0;
        }
 
        l2cap_chan_lock(chan);
 
-       sk = chan->sk;
-
        rsp.dcid = cpu_to_le16(chan->scid);
        rsp.scid = cpu_to_le16(chan->dcid);
        l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
 
-       lock_sock(sk);
-       sk->sk_shutdown = SHUTDOWN_MASK;
-       release_sock(sk);
+       chan->ops->set_shutdown(chan);
 
        l2cap_chan_hold(chan);
        l2cap_chan_del(chan, ECONNRESET);
@@ -4303,7 +4291,7 @@ static inline int l2cap_information_req(struct l2cap_conn *conn,
                if (!disable_ertm)
                        feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
                                | L2CAP_FEAT_FCS;
-               if (enable_hs)
+               if (conn->hs_enabled)
                        feat_mask |= L2CAP_FEAT_EXT_FLOW
                                | L2CAP_FEAT_EXT_WINDOW;
 
@@ -4314,7 +4302,7 @@ static inline int l2cap_information_req(struct l2cap_conn *conn,
                u8 buf[12];
                struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
 
-               if (enable_hs)
+               if (conn->hs_enabled)
                        l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
                else
                        l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
@@ -4411,7 +4399,7 @@ static int l2cap_create_channel_req(struct l2cap_conn *conn,
        if (cmd_len != sizeof(*req))
                return -EPROTO;
 
-       if (!enable_hs)
+       if (!conn->hs_enabled)
                return -EINVAL;
 
        psm = le16_to_cpu(req->psm);
@@ -4420,7 +4408,7 @@ static int l2cap_create_channel_req(struct l2cap_conn *conn,
        BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
 
        /* For controller id 0 make BR/EDR connection */
-       if (req->amp_id == HCI_BREDR_ID) {
+       if (req->amp_id == AMP_ID_BREDR) {
                l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
                              req->amp_id);
                return 0;
@@ -4442,10 +4430,13 @@ static int l2cap_create_channel_req(struct l2cap_conn *conn,
                struct amp_mgr *mgr = conn->hcon->amp_mgr;
                struct hci_conn *hs_hcon;
 
-               hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, conn->dst);
+               hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
+                                                 &conn->hcon->dst);
                if (!hs_hcon) {
                        hci_dev_put(hdev);
-                       return -EFAULT;
+                       cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
+                                              chan->dcid);
+                       return 0;
                }
 
                BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
@@ -4469,7 +4460,7 @@ error:
        l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
                       sizeof(rsp), &rsp);
 
-       return -EFAULT;
+       return 0;
 }
 
 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
@@ -4655,7 +4646,7 @@ void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
 
        if (chan->state != BT_CONNECTED) {
                /* Ignore logical link if channel is on BR/EDR */
-               if (chan->local_amp_id)
+               if (chan->local_amp_id != AMP_ID_BREDR)
                        l2cap_logical_finish_create(chan, hchan);
        } else {
                l2cap_logical_finish_move(chan, hchan);
@@ -4666,7 +4657,7 @@ void l2cap_move_start(struct l2cap_chan *chan)
 {
        BT_DBG("chan %p", chan);
 
-       if (chan->local_amp_id == HCI_BREDR_ID) {
+       if (chan->local_amp_id == AMP_ID_BREDR) {
                if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
                        return;
                chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
@@ -4723,7 +4714,7 @@ static void l2cap_do_create(struct l2cap_chan *chan, int result,
                               sizeof(rsp), &rsp);
 
                if (result == L2CAP_CR_SUCCESS) {
-                       __l2cap_state_change(chan, BT_CONFIG);
+                       l2cap_state_change(chan, BT_CONFIG);
                        set_bit(CONF_REQ_SENT, &chan->conf_state);
                        l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
                                       L2CAP_CONF_REQ,
@@ -4838,7 +4829,7 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
 
        BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
 
-       if (!enable_hs)
+       if (!conn->hs_enabled)
                return -EINVAL;
 
        chan = l2cap_get_chan_by_dcid(conn, icid);
@@ -4865,7 +4856,7 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
                goto send_move_response;
        }
 
-       if (req->dest_amp_id) {
+       if (req->dest_amp_id != AMP_ID_BREDR) {
                struct hci_dev *hdev;
                hdev = hci_dev_get(req->dest_amp_id);
                if (!hdev || hdev->dev_type != HCI_AMP ||
@@ -4885,7 +4876,7 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
         */
        if ((__chan_is_moving(chan) ||
             chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
-           bacmp(conn->src, conn->dst) > 0) {
+           bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
                result = L2CAP_MR_COLLISION;
                goto send_move_response;
        }
@@ -4895,7 +4886,7 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
        chan->move_id = req->dest_amp_id;
        icid = chan->dcid;
 
-       if (!req->dest_amp_id) {
+       if (req->dest_amp_id == AMP_ID_BREDR) {
                /* Moving to BR/EDR */
                if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
                        chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
@@ -5087,7 +5078,7 @@ static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
        if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
                if (result == L2CAP_MC_CONFIRMED) {
                        chan->local_amp_id = chan->move_id;
-                       if (!chan->local_amp_id)
+                       if (chan->local_amp_id == AMP_ID_BREDR)
                                __release_logical_link(chan);
                } else {
                        chan->move_id = chan->local_amp_id;
@@ -5127,7 +5118,7 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
        if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
                chan->local_amp_id = chan->move_id;
 
-               if (!chan->local_amp_id && chan->hs_hchan)
+               if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
                        __release_logical_link(chan);
 
                l2cap_move_done(chan);
@@ -5219,7 +5210,7 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
 
        case L2CAP_CONN_RSP:
        case L2CAP_CREATE_CHAN_RSP:
-               err = l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
+               l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
                break;
 
        case L2CAP_CONF_REQ:
@@ -5227,7 +5218,7 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
                break;
 
        case L2CAP_CONF_RSP:
-               err = l2cap_config_rsp(conn, cmd, cmd_len, data);
+               l2cap_config_rsp(conn, cmd, cmd_len, data);
                break;
 
        case L2CAP_DISCONN_REQ:
@@ -5235,7 +5226,7 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
                break;
 
        case L2CAP_DISCONN_RSP:
-               err = l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
+               l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
                break;
 
        case L2CAP_ECHO_REQ:
@@ -5250,7 +5241,7 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
                break;
 
        case L2CAP_INFO_RSP:
-               err = l2cap_information_rsp(conn, cmd, cmd_len, data);
+               l2cap_information_rsp(conn, cmd, cmd_len, data);
                break;
 
        case L2CAP_CREATE_CHAN_REQ:
@@ -5262,7 +5253,7 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
                break;
 
        case L2CAP_MOVE_CHAN_RSP:
-               err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
+               l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
                break;
 
        case L2CAP_MOVE_CHAN_CFM:
@@ -5270,7 +5261,7 @@ static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
                break;
 
        case L2CAP_MOVE_CHAN_CFM_RSP:
-               err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
+               l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
                break;
 
        default:
@@ -5304,51 +5295,48 @@ static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
                                        struct sk_buff *skb)
 {
-       u8 *data = skb->data;
-       int len = skb->len;
-       struct l2cap_cmd_hdr cmd;
+       struct hci_conn *hcon = conn->hcon;
+       struct l2cap_cmd_hdr *cmd;
+       u16 len;
        int err;
 
-       l2cap_raw_recv(conn, skb);
+       if (hcon->type != LE_LINK)
+               goto drop;
 
-       while (len >= L2CAP_CMD_HDR_SIZE) {
-               u16 cmd_len;
-               memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
-               data += L2CAP_CMD_HDR_SIZE;
-               len  -= L2CAP_CMD_HDR_SIZE;
+       if (skb->len < L2CAP_CMD_HDR_SIZE)
+               goto drop;
 
-               cmd_len = le16_to_cpu(cmd.len);
+       cmd = (void *) skb->data;
+       skb_pull(skb, L2CAP_CMD_HDR_SIZE);
 
-               BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
-                      cmd.ident);
+       len = le16_to_cpu(cmd->len);
 
-               if (cmd_len > len || !cmd.ident) {
-                       BT_DBG("corrupted command");
-                       break;
-               }
+       BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
 
-               err = l2cap_le_sig_cmd(conn, &cmd, data);
-               if (err) {
-                       struct l2cap_cmd_rej_unk rej;
+       if (len != skb->len || !cmd->ident) {
+               BT_DBG("corrupted command");
+               goto drop;
+       }
 
-                       BT_ERR("Wrong link type (%d)", err);
+       err = l2cap_le_sig_cmd(conn, cmd, skb->data);
+       if (err) {
+               struct l2cap_cmd_rej_unk rej;
 
-                       /* FIXME: Map err to a valid reason */
-                       rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
-                       l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
-                                      sizeof(rej), &rej);
-               }
+               BT_ERR("Wrong link type (%d)", err);
 
-               data += cmd_len;
-               len  -= cmd_len;
+               rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
+               l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
+                              sizeof(rej), &rej);
        }
 
+drop:
        kfree_skb(skb);
 }
 
 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
                                     struct sk_buff *skb)
 {
+       struct hci_conn *hcon = conn->hcon;
        u8 *data = skb->data;
        int len = skb->len;
        struct l2cap_cmd_hdr cmd;
@@ -5356,6 +5344,9 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
 
        l2cap_raw_recv(conn, skb);
 
+       if (hcon->type != ACL_LINK)
+               goto drop;
+
        while (len >= L2CAP_CMD_HDR_SIZE) {
                u16 cmd_len;
                memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
@@ -5378,7 +5369,6 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
 
                        BT_ERR("Wrong link type (%d)", err);
 
-                       /* FIXME: Map err to a valid reason */
                        rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
                        l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
                                       sizeof(rej), &rej);
@@ -5388,6 +5378,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
                len  -= cmd_len;
        }
 
+drop:
        kfree_skb(skb);
 }
 
@@ -5784,7 +5775,7 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
                               struct sk_buff *skb, u8 event)
 {
        int err = 0;
-       bool skb_in_use = 0;
+       bool skb_in_use = false;
 
        BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
               event);
@@ -5805,7 +5796,7 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
                                                           control->txseq);
 
                        chan->buffer_seq = chan->expected_tx_seq;
-                       skb_in_use = 1;
+                       skb_in_use = true;
 
                        err = l2cap_reassemble_sdu(chan, skb, control);
                        if (err)
@@ -5841,7 +5832,7 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
                         * current frame is stored for later use.
                         */
                        skb_queue_tail(&chan->srej_q, skb);
-                       skb_in_use = 1;
+                       skb_in_use = true;
                        BT_DBG("Queued %p (queue len %d)", skb,
                               skb_queue_len(&chan->srej_q));
 
@@ -5919,7 +5910,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
 {
        int err = 0;
        u16 txseq = control->txseq;
-       bool skb_in_use = 0;
+       bool skb_in_use = false;
 
        BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
               event);
@@ -5931,7 +5922,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
                        /* Keep frame for reassembly later */
                        l2cap_pass_to_tx(chan, control);
                        skb_queue_tail(&chan->srej_q, skb);
-                       skb_in_use = 1;
+                       skb_in_use = true;
                        BT_DBG("Queued %p (queue len %d)", skb,
                               skb_queue_len(&chan->srej_q));
 
@@ -5942,7 +5933,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
 
                        l2cap_pass_to_tx(chan, control);
                        skb_queue_tail(&chan->srej_q, skb);
-                       skb_in_use = 1;
+                       skb_in_use = true;
                        BT_DBG("Queued %p (queue len %d)", skb,
                               skb_queue_len(&chan->srej_q));
 
@@ -5957,7 +5948,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
                         * the missing frames.
                         */
                        skb_queue_tail(&chan->srej_q, skb);
-                       skb_in_use = 1;
+                       skb_in_use = true;
                        BT_DBG("Queued %p (queue len %d)", skb,
                               skb_queue_len(&chan->srej_q));
 
@@ -5971,7 +5962,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
                         * SREJ'd frames.
                         */
                        skb_queue_tail(&chan->srej_q, skb);
-                       skb_in_use = 1;
+                       skb_in_use = true;
                        BT_DBG("Queued %p (queue len %d)", skb,
                               skb_queue_len(&chan->srej_q));
 
@@ -6380,9 +6371,13 @@ done:
 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
                                  struct sk_buff *skb)
 {
+       struct hci_conn *hcon = conn->hcon;
        struct l2cap_chan *chan;
 
-       chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
+       if (hcon->type != ACL_LINK)
+               goto drop;
+
+       chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst);
        if (!chan)
                goto drop;
 
@@ -6394,6 +6389,10 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
        if (chan->imtu < skb->len)
                goto drop;
 
+       /* Store remote BD_ADDR and PSM for msg_name */
+       bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
+       bt_cb(skb)->psm = psm;
+
        if (!chan->ops->recv(chan, skb))
                return;
 
@@ -6404,15 +6403,22 @@ drop:
 static void l2cap_att_channel(struct l2cap_conn *conn,
                              struct sk_buff *skb)
 {
+       struct hci_conn *hcon = conn->hcon;
        struct l2cap_chan *chan;
 
+       if (hcon->type != LE_LINK)
+               goto drop;
+
        chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
-                                        conn->src, conn->dst);
+                                        &hcon->src, &hcon->dst);
        if (!chan)
                goto drop;
 
        BT_DBG("chan %p, len %d", chan, skb->len);
 
+       if (hci_blacklist_lookup(hcon->hdev, &hcon->dst, hcon->dst_type))
+               goto drop;
+
        if (chan->imtu < skb->len)
                goto drop;
 
@@ -6441,9 +6447,6 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
        BT_DBG("len %d, cid 0x%4.4x", len, cid);
 
        switch (cid) {
-       case L2CAP_CID_LE_SIGNALING:
-               l2cap_le_sig_channel(conn, skb);
-               break;
        case L2CAP_CID_SIGNALING:
                l2cap_sig_channel(conn, skb);
                break;
@@ -6458,6 +6461,10 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
                l2cap_att_channel(conn, skb);
                break;
 
+       case L2CAP_CID_LE_SIGNALING:
+               l2cap_le_sig_channel(conn, skb);
+               break;
+
        case L2CAP_CID_SMP:
                if (smp_sig_channel(conn, skb))
                        l2cap_conn_del(conn->hcon, EACCES);
@@ -6481,17 +6488,15 @@ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
        /* Find listening sockets and check their link_mode */
        read_lock(&chan_list_lock);
        list_for_each_entry(c, &chan_list, global_l) {
-               struct sock *sk = c->sk;
-
                if (c->state != BT_LISTEN)
                        continue;
 
-               if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
+               if (!bacmp(&c->src, &hdev->bdaddr)) {
                        lm1 |= HCI_LM_ACCEPT;
                        if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
                                lm1 |= HCI_LM_MASTER;
                        exact++;
-               } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
+               } else if (!bacmp(&c->src, BDADDR_ANY)) {
                        lm2 |= HCI_LM_ACCEPT;
                        if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
                                lm2 |= HCI_LM_MASTER;
@@ -6597,11 +6602,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
 
                if (!status && (chan->state == BT_CONNECTED ||
                                chan->state == BT_CONFIG)) {
-                       struct sock *sk = chan->sk;
-
-                       clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
-                       sk->sk_state_change(sk);
-
+                       chan->ops->resume(chan);
                        l2cap_check_encryption(chan, encrypt);
                        l2cap_chan_unlock(chan);
                        continue;
@@ -6614,32 +6615,26 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
                                __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
                        }
                } else if (chan->state == BT_CONNECT2) {
-                       struct sock *sk = chan->sk;
                        struct l2cap_conn_rsp rsp;
                        __u16 res, stat;
 
-                       lock_sock(sk);
-
                        if (!status) {
-                               if (test_bit(BT_SK_DEFER_SETUP,
-                                            &bt_sk(sk)->flags)) {
+                               if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
                                        res = L2CAP_CR_PEND;
                                        stat = L2CAP_CS_AUTHOR_PEND;
                                        chan->ops->defer(chan);
                                } else {
-                                       __l2cap_state_change(chan, BT_CONFIG);
+                                       l2cap_state_change(chan, BT_CONFIG);
                                        res = L2CAP_CR_SUCCESS;
                                        stat = L2CAP_CS_NO_INFO;
                                }
                        } else {
-                               __l2cap_state_change(chan, BT_DISCONN);
+                               l2cap_state_change(chan, BT_DISCONN);
                                __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
                                res = L2CAP_CR_SEC_BLOCK;
                                stat = L2CAP_CS_NO_INFO;
                        }
 
-                       release_sock(sk);
-
                        rsp.scid   = cpu_to_le16(chan->dcid);
                        rsp.dcid   = cpu_to_le16(chan->scid);
                        rsp.result = cpu_to_le16(res);
@@ -6756,9 +6751,13 @@ int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
                conn->rx_len -= skb->len;
 
                if (!conn->rx_len) {
-                       /* Complete frame received */
-                       l2cap_recv_frame(conn, conn->rx_skb);
+                       /* Complete frame received. l2cap_recv_frame
+                        * takes ownership of the skb so set the global
+                        * rx_skb pointer to NULL first.
+                        */
+                       struct sk_buff *rx_skb = conn->rx_skb;
                        conn->rx_skb = NULL;
+                       l2cap_recv_frame(conn, rx_skb);
                }
                break;
        }
@@ -6775,10 +6774,8 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
        read_lock(&chan_list_lock);
 
        list_for_each_entry(c, &chan_list, global_l) {
-               struct sock *sk = c->sk;
-
                seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
-                          &bt_sk(sk)->src, &bt_sk(sk)->dst,
+                          &c->src, &c->dst,
                           c->state, __le16_to_cpu(c->psm),
                           c->scid, c->dcid, c->imtu, c->omtu,
                           c->sec_level, c->mode);
@@ -6811,12 +6808,11 @@ int __init l2cap_init(void)
        if (err < 0)
                return err;
 
-       if (bt_debugfs) {
-               l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
-                                                   NULL, &l2cap_debugfs_fops);
-               if (!l2cap_debugfs)
-                       BT_ERR("Failed to create L2CAP debug file");
-       }
+       if (IS_ERR_OR_NULL(bt_debugfs))
+               return 0;
+
+       l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
+                                           NULL, &l2cap_debugfs_fops);
 
        return 0;
 }
index 0098af80b21327045c99a38771ed8c95da2544c2..7cc24d263caaab45af9d8bbe5be09ebda20e87a4 100644 (file)
@@ -32,7 +32,8 @@
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
-#include <net/bluetooth/smp.h>
+
+#include "smp.h"
 
 static struct bt_sock_list l2cap_sk_list = {
        .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
@@ -68,6 +69,18 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
        if (la.l2_cid && la.l2_psm)
                return -EINVAL;
 
+       if (!bdaddr_type_is_valid(la.l2_bdaddr_type))
+               return -EINVAL;
+
+       if (bdaddr_type_is_le(la.l2_bdaddr_type)) {
+               /* Connection oriented channels are not supported on LE */
+               if (la.l2_psm)
+                       return -EINVAL;
+               /* We only allow ATT user space socket */
+               if (la.l2_cid != __constant_cpu_to_le16(L2CAP_CID_ATT))
+                       return -EINVAL;
+       }
+
        lock_sock(sk);
 
        if (sk->sk_state != BT_OPEN) {
@@ -99,11 +112,20 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
        if (err < 0)
                goto done;
 
-       if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP ||
-           __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM)
-               chan->sec_level = BT_SECURITY_SDP;
+       switch (chan->chan_type) {
+       case L2CAP_CHAN_CONN_LESS:
+               if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_3DSP)
+                       chan->sec_level = BT_SECURITY_SDP;
+               break;
+       case L2CAP_CHAN_CONN_ORIENTED:
+               if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP ||
+                   __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM)
+                       chan->sec_level = BT_SECURITY_SDP;
+               break;
+       }
 
-       bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
+       bacpy(&chan->src, &la.l2_bdaddr);
+       chan->src_type = la.l2_bdaddr_type;
 
        chan->state = BT_BOUND;
        sk->sk_state = BT_BOUND;
@@ -134,6 +156,47 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
        if (la.l2_cid && la.l2_psm)
                return -EINVAL;
 
+       if (!bdaddr_type_is_valid(la.l2_bdaddr_type))
+               return -EINVAL;
+
+       /* Check that the socket wasn't bound to something that
+        * conflicts with the address given to connect(). If chan->src
+        * is BDADDR_ANY it means bind() was never used, in which case
+        * chan->src_type and la.l2_bdaddr_type do not need to match.
+        */
+       if (chan->src_type == BDADDR_BREDR && bacmp(&chan->src, BDADDR_ANY) &&
+           bdaddr_type_is_le(la.l2_bdaddr_type)) {
+               /* Old user space versions will try to incorrectly bind
+                * the ATT socket using BDADDR_BREDR. We need to accept
+                * this and fix up the source address type only when
+                * both the source CID and destination CID indicate
+                * ATT. Anything else is an invalid combination.
+                */
+               if (chan->scid != L2CAP_CID_ATT ||
+                   la.l2_cid != __constant_cpu_to_le16(L2CAP_CID_ATT))
+                       return -EINVAL;
+
+               /* We don't have the hdev available here to make a
+                * better decision on random vs public, but since all
+                * user space versions that exhibit this issue anyway do
+                * not support random local addresses assuming public
+                * here is good enough.
+                */
+               chan->src_type = BDADDR_LE_PUBLIC;
+       }
+
+       if (chan->src_type != BDADDR_BREDR && la.l2_bdaddr_type == BDADDR_BREDR)
+               return -EINVAL;
+
+       if (bdaddr_type_is_le(la.l2_bdaddr_type)) {
+               /* Connection oriented channels are not supported on LE */
+               if (la.l2_psm)
+                       return -EINVAL;
+               /* We only allow ATT user space socket */
+               if (la.l2_cid != __constant_cpu_to_le16(L2CAP_CID_ATT))
+                       return -EINVAL;
+       }
+
        err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
                                 &la.l2_bdaddr, la.l2_bdaddr_type);
        if (err)
@@ -265,12 +328,14 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr,
 
        if (peer) {
                la->l2_psm = chan->psm;
-               bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
+               bacpy(&la->l2_bdaddr, &chan->dst);
                la->l2_cid = cpu_to_le16(chan->dcid);
+               la->l2_bdaddr_type = chan->dst_type;
        } else {
                la->l2_psm = chan->sport;
-               bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
+               bacpy(&la->l2_bdaddr, &chan->src);
                la->l2_cid = cpu_to_le16(chan->scid);
+               la->l2_bdaddr_type = chan->src_type;
        }
 
        return 0;
@@ -445,11 +510,6 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname,
                break;
 
        case BT_CHANNEL_POLICY:
-               if (!enable_hs) {
-                       err = -ENOPROTOOPT;
-                       break;
-               }
-
                if (put_user(chan->chan_policy, (u32 __user *) optval))
                        err = -EFAULT;
                break;
@@ -665,10 +725,13 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
                        break;
                }
 
-               if (opt)
+               if (opt) {
                        set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
-               else
+                       set_bit(FLAG_DEFER_SETUP, &chan->flags);
+               } else {
                        clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
+                       clear_bit(FLAG_DEFER_SETUP, &chan->flags);
+               }
                break;
 
        case BT_FLUSHABLE:
@@ -683,7 +746,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
                }
 
                if (opt == BT_FLUSHABLE_OFF) {
-                       struct l2cap_conn *conn = chan->conn;
+                       conn = chan->conn;
                        /* proceed further only when we have l2cap_conn and
                           No Flush support in the LM */
                        if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
@@ -720,11 +783,6 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
                break;
 
        case BT_CHANNEL_POLICY:
-               if (!enable_hs) {
-                       err = -ENOPROTOOPT;
-                       break;
-               }
-
                if (get_user(opt, (u32 __user *) optval)) {
                        err = -EFAULT;
                        break;
@@ -777,6 +835,12 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
        if (sk->sk_state != BT_CONNECTED)
                return -ENOTCONN;
 
+       lock_sock(sk);
+       err = bt_sock_wait_ready(sk, msg->msg_flags);
+       release_sock(sk);
+       if (err)
+               return err;
+
        l2cap_chan_lock(chan);
        err = l2cap_chan_send(chan, msg, len, sk->sk_priority);
        l2cap_chan_unlock(chan);
@@ -799,8 +863,8 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
                pi->chan->state = BT_CONFIG;
 
                __l2cap_connect_rsp_defer(pi->chan);
-               release_sock(sk);
-               return 0;
+               err = 0;
+               goto done;
        }
 
        release_sock(sk);
@@ -856,6 +920,38 @@ static void l2cap_sock_kill(struct sock *sk)
        sock_put(sk);
 }
 
+static int __l2cap_wait_ack(struct sock *sk)
+{
+       struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+       DECLARE_WAITQUEUE(wait, current);
+       int err = 0;
+       int timeo = HZ/5;
+
+       add_wait_queue(sk_sleep(sk), &wait);
+       set_current_state(TASK_INTERRUPTIBLE);
+       while (chan->unacked_frames > 0 && chan->conn) {
+               if (!timeo)
+                       timeo = HZ/5;
+
+               if (signal_pending(current)) {
+                       err = sock_intr_errno(timeo);
+                       break;
+               }
+
+               release_sock(sk);
+               timeo = schedule_timeout(timeo);
+               lock_sock(sk);
+               set_current_state(TASK_INTERRUPTIBLE);
+
+               err = sock_error(sk);
+               if (err)
+                       break;
+       }
+       set_current_state(TASK_RUNNING);
+       remove_wait_queue(sk_sleep(sk), &wait);
+       return err;
+}
+
 static int l2cap_sock_shutdown(struct socket *sock, int how)
 {
        struct sock *sk = sock->sk;
@@ -946,6 +1042,8 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
 {
        struct sock *sk, *parent = chan->data;
 
+       lock_sock(parent);
+
        /* Check for backlog size */
        if (sk_acceptq_is_full(parent)) {
                BT_DBG("backlog full %d", parent->sk_ack_backlog);
@@ -963,18 +1061,19 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
 
        bt_accept_enqueue(parent, sk);
 
+       release_sock(parent);
+
        return l2cap_pi(sk)->chan;
 }
 
 static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
 {
-       int err;
        struct sock *sk = chan->data;
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       int err;
 
        lock_sock(sk);
 
-       if (pi->rx_busy_skb) {
+       if (l2cap_pi(sk)->rx_busy_skb) {
                err = -ENOMEM;
                goto done;
        }
@@ -990,9 +1089,9 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
         * acked and reassembled until there is buffer space
         * available.
         */
-       if (err < 0 && pi->chan->mode == L2CAP_MODE_ERTM) {
-               pi->rx_busy_skb = skb;
-               l2cap_chan_busy(pi->chan, 1);
+       if (err < 0 && chan->mode == L2CAP_MODE_ERTM) {
+               l2cap_pi(sk)->rx_busy_skb = skb;
+               l2cap_chan_busy(chan, 1);
                err = 0;
        }
 
@@ -1050,26 +1149,33 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
        release_sock(sk);
 }
 
-static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state)
+static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state,
+                                      int err)
 {
        struct sock *sk = chan->data;
 
        sk->sk_state = state;
+
+       if (err)
+               sk->sk_err = err;
 }
 
 static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
                                               unsigned long len, int nb)
 {
+       struct sock *sk = chan->data;
        struct sk_buff *skb;
        int err;
 
        l2cap_chan_unlock(chan);
-       skb = bt_skb_send_alloc(chan->sk, len, nb, &err);
+       skb = bt_skb_send_alloc(sk, len, nb, &err);
        l2cap_chan_lock(chan);
 
        if (!skb)
                return ERR_PTR(err);
 
+       bt_cb(skb)->chan = chan;
+
        return skb;
 }
 
@@ -1095,11 +1201,39 @@ static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
 
 static void l2cap_sock_defer_cb(struct l2cap_chan *chan)
 {
-       struct sock *sk = chan->data;
-       struct sock *parent = bt_sk(sk)->parent;
+       struct sock *parent, *sk = chan->data;
+
+       lock_sock(sk);
 
+       parent = bt_sk(sk)->parent;
        if (parent)
                parent->sk_data_ready(parent, 0);
+
+       release_sock(sk);
+}
+
+static void l2cap_sock_resume_cb(struct l2cap_chan *chan)
+{
+       struct sock *sk = chan->data;
+
+       clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
+       sk->sk_state_change(sk);
+}
+
+static void l2cap_sock_set_shutdown_cb(struct l2cap_chan *chan)
+{
+       struct sock *sk = chan->data;
+
+       lock_sock(sk);
+       sk->sk_shutdown = SHUTDOWN_MASK;
+       release_sock(sk);
+}
+
+static long l2cap_sock_get_sndtimeo_cb(struct l2cap_chan *chan)
+{
+       struct sock *sk = chan->data;
+
+       return sk->sk_sndtimeo;
 }
 
 static struct l2cap_ops l2cap_chan_ops = {
@@ -1111,6 +1245,9 @@ static struct l2cap_ops l2cap_chan_ops = {
        .state_change   = l2cap_sock_state_change_cb,
        .ready          = l2cap_sock_ready_cb,
        .defer          = l2cap_sock_defer_cb,
+       .resume         = l2cap_sock_resume_cb,
+       .set_shutdown   = l2cap_sock_set_shutdown_cb,
+       .get_sndtimeo   = l2cap_sock_get_sndtimeo_cb,
        .alloc_skb      = l2cap_sock_alloc_skb_cb,
 };
 
@@ -1120,6 +1257,7 @@ static void l2cap_sock_destruct(struct sock *sk)
 
        if (l2cap_pi(sk)->chan)
                l2cap_chan_put(l2cap_pi(sk)->chan);
+
        if (l2cap_pi(sk)->rx_busy_skb) {
                kfree_skb(l2cap_pi(sk)->rx_busy_skb);
                l2cap_pi(sk)->rx_busy_skb = NULL;
@@ -1129,10 +1267,22 @@ static void l2cap_sock_destruct(struct sock *sk)
        skb_queue_purge(&sk->sk_write_queue);
 }
 
+static void l2cap_skb_msg_name(struct sk_buff *skb, void *msg_name,
+                              int *msg_namelen)
+{
+       struct sockaddr_l2 *la = (struct sockaddr_l2 *) msg_name;
+
+       memset(la, 0, sizeof(struct sockaddr_l2));
+       la->l2_family = AF_BLUETOOTH;
+       la->l2_psm = bt_cb(skb)->psm;
+       bacpy(&la->l2_bdaddr, &bt_cb(skb)->bdaddr);
+
+       *msg_namelen = sizeof(struct sockaddr_l2);
+}
+
 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
 {
-       struct l2cap_pinfo *pi = l2cap_pi(sk);
-       struct l2cap_chan *chan = pi->chan;
+       struct l2cap_chan *chan = l2cap_pi(sk)->chan;
 
        BT_DBG("sk %p", sk);
 
@@ -1156,13 +1306,13 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
 
                security_sk_clone(parent, sk);
        } else {
-
                switch (sk->sk_type) {
                case SOCK_RAW:
                        chan->chan_type = L2CAP_CHAN_RAW;
                        break;
                case SOCK_DGRAM:
                        chan->chan_type = L2CAP_CHAN_CONN_LESS;
+                       bt_sk(sk)->skb_msg_name = l2cap_skb_msg_name;
                        break;
                case SOCK_SEQPACKET:
                case SOCK_STREAM:
@@ -1224,8 +1374,6 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock,
 
        l2cap_chan_hold(chan);
 
-       chan->sk = sk;
-
        l2cap_pi(sk)->chan = chan;
 
        return sk;
index fedc5399d4658687e7646f3768d8dd47f2017e33..22cf54710744803506b266ab4d9af75611492ecb 100644 (file)
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/mgmt.h>
-#include <net/bluetooth/smp.h>
 
-bool enable_hs;
+#include "smp.h"
 
 #define MGMT_VERSION   1
-#define MGMT_REVISION  3
+#define MGMT_REVISION  4
 
 static const u16 mgmt_commands[] = {
        MGMT_OP_READ_INDEX_LIST,
@@ -76,6 +75,10 @@ static const u16 mgmt_commands[] = {
        MGMT_OP_BLOCK_DEVICE,
        MGMT_OP_UNBLOCK_DEVICE,
        MGMT_OP_SET_DEVICE_ID,
+       MGMT_OP_SET_ADVERTISING,
+       MGMT_OP_SET_BREDR,
+       MGMT_OP_SET_STATIC_ADDRESS,
+       MGMT_OP_SET_SCAN_PARAMS,
 };
 
 static const u16 mgmt_events[] = {
@@ -181,11 +184,6 @@ static u8 mgmt_status_table[] = {
        MGMT_STATUS_CONNECT_FAILED,     /* MAC Connection Failed */
 };
 
-bool mgmt_valid_hdev(struct hci_dev *hdev)
-{
-       return hdev->dev_type == HCI_BREDR;
-}
-
 static u8 mgmt_status(u8 hci_status)
 {
        if (hci_status < ARRAY_SIZE(mgmt_status_table))
@@ -321,10 +319,8 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
 
        count = 0;
        list_for_each_entry(d, &hci_dev_list, list) {
-               if (!mgmt_valid_hdev(d))
-                       continue;
-
-               count++;
+               if (d->dev_type == HCI_BREDR)
+                       count++;
        }
 
        rp_len = sizeof(*rp) + (2 * count);
@@ -339,11 +335,13 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
                if (test_bit(HCI_SETUP, &d->dev_flags))
                        continue;
 
-               if (!mgmt_valid_hdev(d))
+               if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
                        continue;
 
-               rp->index[count++] = cpu_to_le16(d->id);
-               BT_DBG("Added hci%u", d->id);
+               if (d->dev_type == HCI_BREDR) {
+                       rp->index[count++] = cpu_to_le16(d->id);
+                       BT_DBG("Added hci%u", d->id);
+               }
        }
 
        rp->num_controllers = cpu_to_le16(count);
@@ -366,9 +364,6 @@ static u32 get_supported_settings(struct hci_dev *hdev)
        settings |= MGMT_SETTING_POWERED;
        settings |= MGMT_SETTING_PAIRABLE;
 
-       if (lmp_ssp_capable(hdev))
-               settings |= MGMT_SETTING_SSP;
-
        if (lmp_bredr_capable(hdev)) {
                settings |= MGMT_SETTING_CONNECTABLE;
                if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
@@ -376,13 +371,17 @@ static u32 get_supported_settings(struct hci_dev *hdev)
                settings |= MGMT_SETTING_DISCOVERABLE;
                settings |= MGMT_SETTING_BREDR;
                settings |= MGMT_SETTING_LINK_SECURITY;
-       }
 
-       if (enable_hs)
-               settings |= MGMT_SETTING_HS;
+               if (lmp_ssp_capable(hdev)) {
+                       settings |= MGMT_SETTING_SSP;
+                       settings |= MGMT_SETTING_HS;
+               }
+       }
 
-       if (lmp_le_capable(hdev))
+       if (lmp_le_capable(hdev)) {
                settings |= MGMT_SETTING_LE;
+               settings |= MGMT_SETTING_ADVERTISING;
+       }
 
        return settings;
 }
@@ -406,7 +405,7 @@ static u32 get_current_settings(struct hci_dev *hdev)
        if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
                settings |= MGMT_SETTING_PAIRABLE;
 
-       if (lmp_bredr_capable(hdev))
+       if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
                settings |= MGMT_SETTING_BREDR;
 
        if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
@@ -421,6 +420,9 @@ static u32 get_current_settings(struct hci_dev *hdev)
        if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
                settings |= MGMT_SETTING_HS;
 
+       if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+               settings |= MGMT_SETTING_ADVERTISING;
+
        return settings;
 }
 
@@ -534,6 +536,156 @@ static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
        return ptr;
 }
 
+static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
+{
+       struct pending_cmd *cmd;
+
+       list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
+               if (cmd->opcode == opcode)
+                       return cmd;
+       }
+
+       return NULL;
+}
+
+static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
+{
+       u8 ad_len = 0;
+       size_t name_len;
+
+       name_len = strlen(hdev->dev_name);
+       if (name_len > 0) {
+               size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
+
+               if (name_len > max_len) {
+                       name_len = max_len;
+                       ptr[1] = EIR_NAME_SHORT;
+               } else
+                       ptr[1] = EIR_NAME_COMPLETE;
+
+               ptr[0] = name_len + 1;
+
+               memcpy(ptr + 2, hdev->dev_name, name_len);
+
+               ad_len += (name_len + 2);
+               ptr += (name_len + 2);
+       }
+
+       return ad_len;
+}
+
+static void update_scan_rsp_data(struct hci_request *req)
+{
+       struct hci_dev *hdev = req->hdev;
+       struct hci_cp_le_set_scan_rsp_data cp;
+       u8 len;
+
+       if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+               return;
+
+       memset(&cp, 0, sizeof(cp));
+
+       len = create_scan_rsp_data(hdev, cp.data);
+
+       if (hdev->scan_rsp_data_len == len &&
+           memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
+               return;
+
+       memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
+       hdev->scan_rsp_data_len = len;
+
+       cp.length = len;
+
+       hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
+}
+
+static u8 get_adv_discov_flags(struct hci_dev *hdev)
+{
+       struct pending_cmd *cmd;
+
+       /* If there's a pending mgmt command the flags will not yet have
+        * their final values, so check for this first.
+        */
+       cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
+       if (cmd) {
+               struct mgmt_mode *cp = cmd->param;
+               if (cp->val == 0x01)
+                       return LE_AD_GENERAL;
+               else if (cp->val == 0x02)
+                       return LE_AD_LIMITED;
+       } else {
+               if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
+                       return LE_AD_LIMITED;
+               else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+                       return LE_AD_GENERAL;
+       }
+
+       return 0;
+}
+
+static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
+{
+       u8 ad_len = 0, flags = 0;
+
+       flags |= get_adv_discov_flags(hdev);
+
+       if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+               if (lmp_le_br_capable(hdev))
+                       flags |= LE_AD_SIM_LE_BREDR_CTRL;
+               if (lmp_host_le_br_capable(hdev))
+                       flags |= LE_AD_SIM_LE_BREDR_HOST;
+       } else {
+               flags |= LE_AD_NO_BREDR;
+       }
+
+       if (flags) {
+               BT_DBG("adv flags 0x%02x", flags);
+
+               ptr[0] = 2;
+               ptr[1] = EIR_FLAGS;
+               ptr[2] = flags;
+
+               ad_len += 3;
+               ptr += 3;
+       }
+
+       if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
+               ptr[0] = 2;
+               ptr[1] = EIR_TX_POWER;
+               ptr[2] = (u8) hdev->adv_tx_power;
+
+               ad_len += 3;
+               ptr += 3;
+       }
+
+       return ad_len;
+}
+
+static void update_adv_data(struct hci_request *req)
+{
+       struct hci_dev *hdev = req->hdev;
+       struct hci_cp_le_set_adv_data cp;
+       u8 len;
+
+       if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+               return;
+
+       memset(&cp, 0, sizeof(cp));
+
+       len = create_adv_data(hdev, cp.data);
+
+       if (hdev->adv_data_len == len &&
+           memcmp(cp.data, hdev->adv_data, len) == 0)
+               return;
+
+       memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
+       hdev->adv_data_len = len;
+
+       cp.length = len;
+
+       hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
+}
+
 static void create_eir(struct hci_dev *hdev, u8 *data)
 {
        u8 *ptr = data;
@@ -632,6 +784,9 @@ static void update_class(struct hci_request *req)
        if (!hdev_is_powered(hdev))
                return;
 
+       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+               return;
+
        if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
                return;
 
@@ -639,6 +794,9 @@ static void update_class(struct hci_request *req)
        cod[1] = hdev->major_class;
        cod[2] = get_service_classes(hdev);
 
+       if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
+               cod[1] |= 0x20;
+
        if (memcmp(cod, hdev->dev_class, 3) == 0)
                return;
 
@@ -763,18 +921,6 @@ static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
        }
 }
 
-static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
-{
-       struct pending_cmd *cmd;
-
-       list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
-               if (cmd->opcode == opcode)
-                       return cmd;
-       }
-
-       return NULL;
-}
-
 static void mgmt_pending_remove(struct pending_cmd *cmd)
 {
        list_del(&cmd->list);
@@ -804,6 +950,12 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
 
        hci_dev_lock(hdev);
 
+       if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
+               err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
+                                MGMT_STATUS_BUSY);
+               goto failed;
+       }
+
        if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
                cancel_delayed_work(&hdev->power_off);
 
@@ -820,12 +972,6 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
                goto failed;
        }
 
-       if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
-                                MGMT_STATUS_BUSY);
-               goto failed;
-       }
-
        cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
        if (!cmd) {
                err = -ENOMEM;
@@ -883,27 +1029,141 @@ static int new_settings(struct hci_dev *hdev, struct sock *skip)
        return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
 }
 
+struct cmd_lookup {
+       struct sock *sk;
+       struct hci_dev *hdev;
+       u8 mgmt_status;
+};
+
+static void settings_rsp(struct pending_cmd *cmd, void *data)
+{
+       struct cmd_lookup *match = data;
+
+       send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
+
+       list_del(&cmd->list);
+
+       if (match->sk == NULL) {
+               match->sk = cmd->sk;
+               sock_hold(match->sk);
+       }
+
+       mgmt_pending_free(cmd);
+}
+
+static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
+{
+       u8 *status = data;
+
+       cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
+       mgmt_pending_remove(cmd);
+}
+
+static u8 mgmt_bredr_support(struct hci_dev *hdev)
+{
+       if (!lmp_bredr_capable(hdev))
+               return MGMT_STATUS_NOT_SUPPORTED;
+       else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+               return MGMT_STATUS_REJECTED;
+       else
+               return MGMT_STATUS_SUCCESS;
+}
+
+static u8 mgmt_le_support(struct hci_dev *hdev)
+{
+       if (!lmp_le_capable(hdev))
+               return MGMT_STATUS_NOT_SUPPORTED;
+       else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+               return MGMT_STATUS_REJECTED;
+       else
+               return MGMT_STATUS_SUCCESS;
+}
+
+static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
+{
+       struct pending_cmd *cmd;
+       struct mgmt_mode *cp;
+       struct hci_request req;
+       bool changed;
+
+       BT_DBG("status 0x%02x", status);
+
+       hci_dev_lock(hdev);
+
+       cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
+       if (!cmd)
+               goto unlock;
+
+       if (status) {
+               u8 mgmt_err = mgmt_status(status);
+               cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+               clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+               goto remove_cmd;
+       }
+
+       cp = cmd->param;
+       if (cp->val) {
+               changed = !test_and_set_bit(HCI_DISCOVERABLE,
+                                           &hdev->dev_flags);
+
+               if (hdev->discov_timeout > 0) {
+                       int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
+                       queue_delayed_work(hdev->workqueue, &hdev->discov_off,
+                                          to);
+               }
+       } else {
+               changed = test_and_clear_bit(HCI_DISCOVERABLE,
+                                            &hdev->dev_flags);
+       }
+
+       send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
+
+       if (changed)
+               new_settings(hdev, cmd->sk);
+
+       /* When the discoverable mode gets changed, make sure
+        * that class of device has the limited discoverable
+        * bit correctly set.
+        */
+       hci_req_init(&req, hdev);
+       update_class(&req);
+       hci_req_run(&req, NULL);
+
+remove_cmd:
+       mgmt_pending_remove(cmd);
+
+unlock:
+       hci_dev_unlock(hdev);
+}
+
 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
                            u16 len)
 {
        struct mgmt_cp_set_discoverable *cp = data;
        struct pending_cmd *cmd;
+       struct hci_request req;
        u16 timeout;
        u8 scan;
        int err;
 
        BT_DBG("request for %s", hdev->name);
 
-       if (!lmp_bredr_capable(hdev))
+       if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
+           !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
                return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
-                                MGMT_STATUS_NOT_SUPPORTED);
+                                 MGMT_STATUS_REJECTED);
 
-       if (cp->val != 0x00 && cp->val != 0x01)
+       if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
                return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
                                  MGMT_STATUS_INVALID_PARAMS);
 
        timeout = __le16_to_cpu(cp->timeout);
-       if (!cp->val && timeout > 0)
+
+       /* Disabling discoverable requires that no timeout is set,
+        * and enabling limited discoverable requires a timeout.
+        */
+       if ((cp->val == 0x00 && timeout > 0) ||
+           (cp->val == 0x02 && timeout == 0))
                return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
                                  MGMT_STATUS_INVALID_PARAMS);
 
@@ -931,6 +1191,10 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
        if (!hdev_is_powered(hdev)) {
                bool changed = false;
 
+               /* Setting limited discoverable when powered off is
+                * not a valid operation since it requires a timeout
+                * and so no need to check HCI_LIMITED_DISCOVERABLE.
+                */
                if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
                        change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
                        changed = true;
@@ -946,16 +1210,20 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
                goto failed;
        }
 
-       if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
-               if (hdev->discov_timeout > 0) {
-                       cancel_delayed_work(&hdev->discov_off);
-                       hdev->discov_timeout = 0;
-               }
+       /* If the current mode is the same, then just update the timeout
+        * value with the new value. And if only the timeout gets updated,
+        * then no need for any HCI transactions.
+        */
+       if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
+           (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
+                                         &hdev->dev_flags)) {
+               cancel_delayed_work(&hdev->discov_off);
+               hdev->discov_timeout = timeout;
 
-               if (cp->val && timeout > 0) {
-                       hdev->discov_timeout = timeout;
+               if (cp->val && hdev->discov_timeout > 0) {
+                       int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
                        queue_delayed_work(hdev->workqueue, &hdev->discov_off,
-                               msecs_to_jiffies(hdev->discov_timeout * 1000));
+                                          to);
                }
 
                err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
@@ -968,20 +1236,66 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
                goto failed;
        }
 
+       /* Cancel any potential discoverable timeout that might be
+        * still active and store new timeout value. The arming of
+        * the timeout happens in the complete handler.
+        */
+       cancel_delayed_work(&hdev->discov_off);
+       hdev->discov_timeout = timeout;
+
+       /* Limited discoverable mode */
+       if (cp->val == 0x02)
+               set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+       else
+               clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+
+       hci_req_init(&req, hdev);
+
+       /* The procedure for LE-only controllers is much simpler - just
+        * update the advertising data.
+        */
+       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+               goto update_ad;
+
        scan = SCAN_PAGE;
 
-       if (cp->val)
+       if (cp->val) {
+               struct hci_cp_write_current_iac_lap hci_cp;
+
+               if (cp->val == 0x02) {
+                       /* Limited discoverable mode */
+                       hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
+                       hci_cp.iac_lap[0] = 0x00;       /* LIAC */
+                       hci_cp.iac_lap[1] = 0x8b;
+                       hci_cp.iac_lap[2] = 0x9e;
+                       hci_cp.iac_lap[3] = 0x33;       /* GIAC */
+                       hci_cp.iac_lap[4] = 0x8b;
+                       hci_cp.iac_lap[5] = 0x9e;
+               } else {
+                       /* General discoverable mode */
+                       hci_cp.num_iac = 1;
+                       hci_cp.iac_lap[0] = 0x33;       /* GIAC */
+                       hci_cp.iac_lap[1] = 0x8b;
+                       hci_cp.iac_lap[2] = 0x9e;
+               }
+
+               hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
+                           (hci_cp.num_iac * 3) + 1, &hci_cp);
+
                scan |= SCAN_INQUIRY;
-       else
-               cancel_delayed_work(&hdev->discov_off);
+       } else {
+               clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+       }
+
+       hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
+
+update_ad:
+       update_adv_data(&req);
 
-       err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+       err = hci_req_run(&req, set_discoverable_complete);
        if (err < 0)
                mgmt_pending_remove(cmd);
 
-       if (cp->val)
-               hdev->discov_timeout = timeout;
-
 failed:
        hci_dev_unlock(hdev);
        return err;
@@ -993,6 +1307,9 @@ static void write_fast_connectable(struct hci_request *req, bool enable)
        struct hci_cp_write_page_scan_activity acp;
        u8 type;
 
+       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+               return;
+
        if (hdev->hci_ver < BLUETOOTH_VER_1_2)
                return;
 
@@ -1019,9 +1336,55 @@ static void write_fast_connectable(struct hci_request *req, bool enable)
                hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
 }
 
+static u8 get_adv_type(struct hci_dev *hdev)
+{
+       struct pending_cmd *cmd;
+       bool connectable;
+
+       /* If there's a pending mgmt command the flag will not yet have
+        * it's final value, so check for this first.
+        */
+       cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
+       if (cmd) {
+               struct mgmt_mode *cp = cmd->param;
+               connectable = !!cp->val;
+       } else {
+               connectable = test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+       }
+
+       return connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
+}
+
+static void enable_advertising(struct hci_request *req)
+{
+       struct hci_dev *hdev = req->hdev;
+       struct hci_cp_le_set_adv_param cp;
+       u8 enable = 0x01;
+
+       memset(&cp, 0, sizeof(cp));
+       cp.min_interval = __constant_cpu_to_le16(0x0800);
+       cp.max_interval = __constant_cpu_to_le16(0x0800);
+       cp.type = get_adv_type(hdev);
+       cp.own_address_type = hdev->own_addr_type;
+       cp.channel_map = 0x07;
+
+       hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
+
+       hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+}
+
+static void disable_advertising(struct hci_request *req)
+{
+       u8 enable = 0x00;
+
+       hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
+}
+
 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
 {
        struct pending_cmd *cmd;
+       struct mgmt_mode *cp;
+       bool changed;
 
        BT_DBG("status 0x%02x", status);
 
@@ -1031,14 +1394,56 @@ static void set_connectable_complete(struct hci_dev *hdev, u8 status)
        if (!cmd)
                goto unlock;
 
+       if (status) {
+               u8 mgmt_err = mgmt_status(status);
+               cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+               goto remove_cmd;
+       }
+
+       cp = cmd->param;
+       if (cp->val)
+               changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+       else
+               changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+
        send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
 
+       if (changed)
+               new_settings(hdev, cmd->sk);
+
+remove_cmd:
        mgmt_pending_remove(cmd);
 
 unlock:
        hci_dev_unlock(hdev);
 }
 
+static int set_connectable_update_settings(struct hci_dev *hdev,
+                                          struct sock *sk, u8 val)
+{
+       bool changed = false;
+       int err;
+
+       if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+               changed = true;
+
+       if (val) {
+               set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+       } else {
+               clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+               clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+       }
+
+       err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
+       if (err < 0)
+               return err;
+
+       if (changed)
+               return new_settings(hdev, sk);
+
+       return 0;
+}
+
 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
                           u16 len)
 {
@@ -1050,9 +1455,10 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
 
        BT_DBG("request for %s", hdev->name);
 
-       if (!lmp_bredr_capable(hdev))
+       if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
+           !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
                return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
-                                 MGMT_STATUS_NOT_SUPPORTED);
+                                 MGMT_STATUS_REJECTED);
 
        if (cp->val != 0x00 && cp->val != 0x01)
                return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
@@ -1061,25 +1467,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
        hci_dev_lock(hdev);
 
        if (!hdev_is_powered(hdev)) {
-               bool changed = false;
-
-               if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
-                       changed = true;
-
-               if (cp->val) {
-                       set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
-               } else {
-                       clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
-                       clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
-               }
-
-               err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
-               if (err < 0)
-                       goto failed;
-
-               if (changed)
-                       err = new_settings(hdev, sk);
-
+               err = set_connectable_update_settings(hdev, sk, cp->val);
                goto failed;
        }
 
@@ -1090,30 +1478,37 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
                goto failed;
        }
 
-       if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
-               err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
-               goto failed;
-       }
-
        cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
        if (!cmd) {
                err = -ENOMEM;
                goto failed;
        }
 
-       if (cp->val) {
-               scan = SCAN_PAGE;
-       } else {
-               scan = 0;
+       hci_req_init(&req, hdev);
 
-               if (test_bit(HCI_ISCAN, &hdev->flags) &&
-                   hdev->discov_timeout > 0)
-                       cancel_delayed_work(&hdev->discov_off);
-       }
+       /* If BR/EDR is not enabled and we disable advertising as a
+        * by-product of disabling connectable, we need to update the
+        * advertising flags.
+        */
+       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+               if (!cp->val) {
+                       clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+                       clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+               }
+               update_adv_data(&req);
+       } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
+               if (cp->val) {
+                       scan = SCAN_PAGE;
+               } else {
+                       scan = 0;
 
-       hci_req_init(&req, hdev);
+                       if (test_bit(HCI_ISCAN, &hdev->flags) &&
+                           hdev->discov_timeout > 0)
+                               cancel_delayed_work(&hdev->discov_off);
+               }
 
-       hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+               hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+       }
 
        /* If we're going from non-connectable to connectable or
         * vice-versa when fast connectable is enabled ensure that fast
@@ -1124,9 +1519,20 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
        if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
                write_fast_connectable(&req, false);
 
+       if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
+           hci_conn_num(hdev, LE_LINK) == 0) {
+               disable_advertising(&req);
+               enable_advertising(&req);
+       }
+
        err = hci_req_run(&req, set_connectable_complete);
-       if (err < 0)
+       if (err < 0) {
                mgmt_pending_remove(cmd);
+               if (err == -ENODATA)
+                       err = set_connectable_update_settings(hdev, sk,
+                                                             cp->val);
+               goto failed;
+       }
 
 failed:
        hci_dev_unlock(hdev);
@@ -1137,6 +1543,7 @@ static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
                        u16 len)
 {
        struct mgmt_mode *cp = data;
+       bool changed;
        int err;
 
        BT_DBG("request for %s", hdev->name);
@@ -1148,17 +1555,18 @@ static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
        hci_dev_lock(hdev);
 
        if (cp->val)
-               set_bit(HCI_PAIRABLE, &hdev->dev_flags);
+               changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
        else
-               clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
+               changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
 
        err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
        if (err < 0)
-               goto failed;
+               goto unlock;
 
-       err = new_settings(hdev, sk);
+       if (changed)
+               err = new_settings(hdev, sk);
 
-failed:
+unlock:
        hci_dev_unlock(hdev);
        return err;
 }
@@ -1168,14 +1576,15 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
 {
        struct mgmt_mode *cp = data;
        struct pending_cmd *cmd;
-       u8 val;
+       u8 val, status;
        int err;
 
        BT_DBG("request for %s", hdev->name);
 
-       if (!lmp_bredr_capable(hdev))
+       status = mgmt_bredr_support(hdev);
+       if (status)
                return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
-                                 MGMT_STATUS_NOT_SUPPORTED);
+                                 status);
 
        if (cp->val != 0x00 && cp->val != 0x01)
                return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
@@ -1236,11 +1645,15 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 {
        struct mgmt_mode *cp = data;
        struct pending_cmd *cmd;
-       u8 val;
+       u8 status;
        int err;
 
        BT_DBG("request for %s", hdev->name);
 
+       status = mgmt_bredr_support(hdev);
+       if (status)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
+
        if (!lmp_ssp_capable(hdev))
                return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
                                  MGMT_STATUS_NOT_SUPPORTED);
@@ -1251,14 +1664,20 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 
        hci_dev_lock(hdev);
 
-       val = !!cp->val;
-
        if (!hdev_is_powered(hdev)) {
-               bool changed = false;
+               bool changed;
 
-               if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
-                       change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
-                       changed = true;
+               if (cp->val) {
+                       changed = !test_and_set_bit(HCI_SSP_ENABLED,
+                                                   &hdev->dev_flags);
+               } else {
+                       changed = test_and_clear_bit(HCI_SSP_ENABLED,
+                                                    &hdev->dev_flags);
+                       if (!changed)
+                               changed = test_and_clear_bit(HCI_HS_ENABLED,
+                                                            &hdev->dev_flags);
+                       else
+                               clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
                }
 
                err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
@@ -1271,13 +1690,14 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
                goto failed;
        }
 
-       if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
+       if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
+           mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
                err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
                                 MGMT_STATUS_BUSY);
                goto failed;
        }
 
-       if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
+       if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
                err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
                goto failed;
        }
@@ -1288,7 +1708,7 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
                goto failed;
        }
 
-       err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
+       err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
        if (err < 0) {
                mgmt_pending_remove(cmd);
                goto failed;
@@ -1302,23 +1722,90 @@ failed:
 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
 {
        struct mgmt_mode *cp = data;
+       bool changed;
+       u8 status;
+       int err;
 
        BT_DBG("request for %s", hdev->name);
 
-       if (!enable_hs)
+       status = mgmt_bredr_support(hdev);
+       if (status)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
+
+       if (!lmp_ssp_capable(hdev))
                return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
                                  MGMT_STATUS_NOT_SUPPORTED);
 
+       if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+                                 MGMT_STATUS_REJECTED);
+
        if (cp->val != 0x00 && cp->val != 0x01)
                return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
                                  MGMT_STATUS_INVALID_PARAMS);
 
-       if (cp->val)
-               set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
-       else
-               clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+       hci_dev_lock(hdev);
+
+       if (cp->val) {
+               changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+       } else {
+               if (hdev_is_powered(hdev)) {
+                       err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+                                        MGMT_STATUS_REJECTED);
+                       goto unlock;
+               }
+
+               changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+       }
+
+       err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
+       if (err < 0)
+               goto unlock;
+
+       if (changed)
+               err = new_settings(hdev, sk);
+
+unlock:
+       hci_dev_unlock(hdev);
+       return err;
+}
 
-       return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
+static void le_enable_complete(struct hci_dev *hdev, u8 status)
+{
+       struct cmd_lookup match = { NULL, hdev };
+
+       if (status) {
+               u8 mgmt_err = mgmt_status(status);
+
+               mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
+                                    &mgmt_err);
+               return;
+       }
+
+       mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
+
+       new_settings(hdev, match.sk);
+
+       if (match.sk)
+               sock_put(match.sk);
+
+       /* Make sure the controller has a good default for
+        * advertising data. Restrict the update to when LE
+        * has actually been enabled. During power on, the
+        * update in powered_update_hci will take care of it.
+        */
+       if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+               struct hci_request req;
+
+               hci_dev_lock(hdev);
+
+               hci_req_init(&req, hdev);
+               update_adv_data(&req);
+               update_scan_rsp_data(&req);
+               hci_req_run(&req, NULL);
+
+               hci_dev_unlock(hdev);
+       }
 }
 
 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
@@ -1326,6 +1813,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
        struct mgmt_mode *cp = data;
        struct hci_cp_write_le_host_supported hci_cp;
        struct pending_cmd *cmd;
+       struct hci_request req;
        int err;
        u8 val, enabled;
 
@@ -1340,7 +1828,7 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
                                  MGMT_STATUS_INVALID_PARAMS);
 
        /* LE-only devices do not allow toggling LE on/off */
-       if (!lmp_bredr_capable(hdev))
+       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
                return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
                                  MGMT_STATUS_REJECTED);
 
@@ -1357,6 +1845,11 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
                        changed = true;
                }
 
+               if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
+                       clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+                       changed = true;
+               }
+
                err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
                if (err < 0)
                        goto unlock;
@@ -1367,7 +1860,8 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
                goto unlock;
        }
 
-       if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
+       if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
+           mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
                err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
                                 MGMT_STATUS_BUSY);
                goto unlock;
@@ -1379,15 +1873,22 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
                goto unlock;
        }
 
+       hci_req_init(&req, hdev);
+
        memset(&hci_cp, 0, sizeof(hci_cp));
 
        if (val) {
                hci_cp.le = val;
                hci_cp.simul = lmp_le_br_capable(hdev);
+       } else {
+               if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+                       disable_advertising(&req);
        }
 
-       err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
-                          &hci_cp);
+       hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
+                   &hci_cp);
+
+       err = hci_req_run(&req, le_enable_complete);
        if (err < 0)
                mgmt_pending_remove(cmd);
 
@@ -1706,6 +2207,12 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
        u16 key_count, expected_len;
        int i;
 
+       BT_DBG("request for %s", hdev->name);
+
+       if (!lmp_bredr_capable(hdev))
+               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
+                                 MGMT_STATUS_NOT_SUPPORTED);
+
        key_count = __le16_to_cpu(cp->key_count);
 
        expected_len = sizeof(*cp) + key_count *
@@ -2515,8 +3022,11 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
                update_eir(&req);
        }
 
+       /* The name is stored in the scan response data and so
+        * no need to udpate the advertising data here.
+        */
        if (lmp_le_capable(hdev))
-               hci_update_ad(&req);
+               update_scan_rsp_data(&req);
 
        err = hci_req_run(&req, set_name_complete);
        if (err < 0)
@@ -2685,6 +3195,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
        struct hci_request req;
        /* General inquiry access code (GIAC) */
        u8 lap[3] = { 0x33, 0x8b, 0x9e };
+       u8 status;
        int err;
 
        BT_DBG("%s", hdev->name);
@@ -2721,9 +3232,10 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
 
        switch (hdev->discovery.type) {
        case DISCOV_TYPE_BREDR:
-               if (!lmp_bredr_capable(hdev)) {
+               status = mgmt_bredr_support(hdev);
+               if (status) {
                        err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
-                                        MGMT_STATUS_NOT_SUPPORTED);
+                                        status);
                        mgmt_pending_remove(cmd);
                        goto failed;
                }
@@ -2745,22 +3257,23 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
 
        case DISCOV_TYPE_LE:
        case DISCOV_TYPE_INTERLEAVED:
-               if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+               status = mgmt_le_support(hdev);
+               if (status) {
                        err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
-                                        MGMT_STATUS_NOT_SUPPORTED);
+                                        status);
                        mgmt_pending_remove(cmd);
                        goto failed;
                }
 
                if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
-                   !lmp_bredr_capable(hdev)) {
+                   !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
                        err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
                                         MGMT_STATUS_NOT_SUPPORTED);
                        mgmt_pending_remove(cmd);
                        goto failed;
                }
 
-               if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
+               if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
                        err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
                                         MGMT_STATUS_REJECTED);
                        mgmt_pending_remove(cmd);
@@ -2778,6 +3291,7 @@ static int start_discovery(struct sock *sk, struct hci_dev *hdev,
                param_cp.type = LE_SCAN_ACTIVE;
                param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
                param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
+               param_cp.own_address_type = hdev->own_addr_type;
                hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
                            &param_cp);
 
@@ -3065,6 +3579,186 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
        return err;
 }
 
+static void set_advertising_complete(struct hci_dev *hdev, u8 status)
+{
+       struct cmd_lookup match = { NULL, hdev };
+
+       if (status) {
+               u8 mgmt_err = mgmt_status(status);
+
+               mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
+                                    cmd_status_rsp, &mgmt_err);
+               return;
+       }
+
+       mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
+                            &match);
+
+       new_settings(hdev, match.sk);
+
+       if (match.sk)
+               sock_put(match.sk);
+}
+
+static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
+                          u16 len)
+{
+       struct mgmt_mode *cp = data;
+       struct pending_cmd *cmd;
+       struct hci_request req;
+       u8 val, enabled, status;
+       int err;
+
+       BT_DBG("request for %s", hdev->name);
+
+       status = mgmt_le_support(hdev);
+       if (status)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
+                                 status);
+
+       if (cp->val != 0x00 && cp->val != 0x01)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
+                                 MGMT_STATUS_INVALID_PARAMS);
+
+       hci_dev_lock(hdev);
+
+       val = !!cp->val;
+       enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
+
+       /* The following conditions are ones which mean that we should
+        * not do any HCI communication but directly send a mgmt
+        * response to user space (after toggling the flag if
+        * necessary).
+        */
+       if (!hdev_is_powered(hdev) || val == enabled ||
+           hci_conn_num(hdev, LE_LINK) > 0) {
+               bool changed = false;
+
+               if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
+                       change_bit(HCI_ADVERTISING, &hdev->dev_flags);
+                       changed = true;
+               }
+
+               err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
+               if (err < 0)
+                       goto unlock;
+
+               if (changed)
+                       err = new_settings(hdev, sk);
+
+               goto unlock;
+       }
+
+       if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
+           mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
+               err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
+                                MGMT_STATUS_BUSY);
+               goto unlock;
+       }
+
+       cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
+       if (!cmd) {
+               err = -ENOMEM;
+               goto unlock;
+       }
+
+       hci_req_init(&req, hdev);
+
+       if (val)
+               enable_advertising(&req);
+       else
+               disable_advertising(&req);
+
+       err = hci_req_run(&req, set_advertising_complete);
+       if (err < 0)
+               mgmt_pending_remove(cmd);
+
+unlock:
+       hci_dev_unlock(hdev);
+       return err;
+}
+
+static int set_static_address(struct sock *sk, struct hci_dev *hdev,
+                             void *data, u16 len)
+{
+       struct mgmt_cp_set_static_address *cp = data;
+       int err;
+
+       BT_DBG("%s", hdev->name);
+
+       if (!lmp_le_capable(hdev))
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
+                                 MGMT_STATUS_NOT_SUPPORTED);
+
+       if (hdev_is_powered(hdev))
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
+                                 MGMT_STATUS_REJECTED);
+
+       if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
+               if (!bacmp(&cp->bdaddr, BDADDR_NONE))
+                       return cmd_status(sk, hdev->id,
+                                         MGMT_OP_SET_STATIC_ADDRESS,
+                                         MGMT_STATUS_INVALID_PARAMS);
+
+               /* Two most significant bits shall be set */
+               if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
+                       return cmd_status(sk, hdev->id,
+                                         MGMT_OP_SET_STATIC_ADDRESS,
+                                         MGMT_STATUS_INVALID_PARAMS);
+       }
+
+       hci_dev_lock(hdev);
+
+       bacpy(&hdev->static_addr, &cp->bdaddr);
+
+       err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
+
+       hci_dev_unlock(hdev);
+
+       return err;
+}
+
+static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
+                          void *data, u16 len)
+{
+       struct mgmt_cp_set_scan_params *cp = data;
+       __u16 interval, window;
+       int err;
+
+       BT_DBG("%s", hdev->name);
+
+       if (!lmp_le_capable(hdev))
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+                                 MGMT_STATUS_NOT_SUPPORTED);
+
+       interval = __le16_to_cpu(cp->interval);
+
+       if (interval < 0x0004 || interval > 0x4000)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+                                 MGMT_STATUS_INVALID_PARAMS);
+
+       window = __le16_to_cpu(cp->window);
+
+       if (window < 0x0004 || window > 0x4000)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+                                 MGMT_STATUS_INVALID_PARAMS);
+
+       if (window > interval)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
+                                 MGMT_STATUS_INVALID_PARAMS);
+
+       hci_dev_lock(hdev);
+
+       hdev->le_scan_interval = interval;
+       hdev->le_scan_window = window;
+
+       err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
+
+       hci_dev_unlock(hdev);
+
+       return err;
+}
+
 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
 {
        struct pending_cmd *cmd;
@@ -3108,7 +3802,8 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
 
        BT_DBG("%s", hdev->name);
 
-       if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
+       if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
+           hdev->hci_ver < BLUETOOTH_VER_1_2)
                return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
                                  MGMT_STATUS_NOT_SUPPORTED);
 
@@ -3124,41 +3819,183 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
                return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
                                  MGMT_STATUS_REJECTED);
 
-       hci_dev_lock(hdev);
+       hci_dev_lock(hdev);
+
+       if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
+               err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+                                MGMT_STATUS_BUSY);
+               goto unlock;
+       }
+
+       if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
+               err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
+                                       hdev);
+               goto unlock;
+       }
+
+       cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
+                              data, len);
+       if (!cmd) {
+               err = -ENOMEM;
+               goto unlock;
+       }
+
+       hci_req_init(&req, hdev);
+
+       write_fast_connectable(&req, cp->val);
+
+       err = hci_req_run(&req, fast_connectable_complete);
+       if (err < 0) {
+               err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+                                MGMT_STATUS_FAILED);
+               mgmt_pending_remove(cmd);
+       }
+
+unlock:
+       hci_dev_unlock(hdev);
+
+       return err;
+}
+
+static void set_bredr_scan(struct hci_request *req)
+{
+       struct hci_dev *hdev = req->hdev;
+       u8 scan = 0;
+
+       /* Ensure that fast connectable is disabled. This function will
+        * not do anything if the page scan parameters are already what
+        * they should be.
+        */
+       write_fast_connectable(req, false);
+
+       if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+               scan |= SCAN_PAGE;
+       if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+               scan |= SCAN_INQUIRY;
+
+       if (scan)
+               hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+}
+
+static void set_bredr_complete(struct hci_dev *hdev, u8 status)
+{
+       struct pending_cmd *cmd;
+
+       BT_DBG("status 0x%02x", status);
+
+       hci_dev_lock(hdev);
+
+       cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
+       if (!cmd)
+               goto unlock;
+
+       if (status) {
+               u8 mgmt_err = mgmt_status(status);
+
+               /* We need to restore the flag if related HCI commands
+                * failed.
+                */
+               clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+
+               cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+       } else {
+               send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
+               new_settings(hdev, cmd->sk);
+       }
+
+       mgmt_pending_remove(cmd);
+
+unlock:
+       hci_dev_unlock(hdev);
+}
+
+static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+{
+       struct mgmt_mode *cp = data;
+       struct pending_cmd *cmd;
+       struct hci_request req;
+       int err;
+
+       BT_DBG("request for %s", hdev->name);
+
+       if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+                                 MGMT_STATUS_NOT_SUPPORTED);
+
+       if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+                                 MGMT_STATUS_REJECTED);
+
+       if (cp->val != 0x00 && cp->val != 0x01)
+               return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+                                 MGMT_STATUS_INVALID_PARAMS);
+
+       hci_dev_lock(hdev);
+
+       if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+               err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
+               goto unlock;
+       }
+
+       if (!hdev_is_powered(hdev)) {
+               if (!cp->val) {
+                       clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+                       clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+                       clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
+                       clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
+                       clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+               }
+
+               change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+
+               err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
+               if (err < 0)
+                       goto unlock;
+
+               err = new_settings(hdev, sk);
+               goto unlock;
+       }
 
-       if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
-                                MGMT_STATUS_BUSY);
+       /* Reject disabling when powered on */
+       if (!cp->val) {
+               err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+                                MGMT_STATUS_REJECTED);
                goto unlock;
        }
 
-       if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
-               err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
-                                       hdev);
+       if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
+               err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+                                MGMT_STATUS_BUSY);
                goto unlock;
        }
 
-       cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
-                              data, len);
+       cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
        if (!cmd) {
                err = -ENOMEM;
                goto unlock;
        }
 
+       /* We need to flip the bit already here so that update_adv_data
+        * generates the correct flags.
+        */
+       set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
+
        hci_req_init(&req, hdev);
 
-       write_fast_connectable(&req, cp->val);
+       if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
+               set_bredr_scan(&req);
 
-       err = hci_req_run(&req, fast_connectable_complete);
-       if (err < 0) {
-               err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
-                                MGMT_STATUS_FAILED);
+       /* Since only the advertising data flags will change, there
+        * is no need to update the scan response data.
+        */
+       update_adv_data(&req);
+
+       err = hci_req_run(&req, set_bredr_complete);
+       if (err < 0)
                mgmt_pending_remove(cmd);
-       }
 
 unlock:
        hci_dev_unlock(hdev);
-
        return err;
 }
 
@@ -3180,6 +4017,12 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
        u16 key_count, expected_len;
        int i, err;
 
+       BT_DBG("request for %s", hdev->name);
+
+       if (!lmp_le_capable(hdev))
+               return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
+                                 MGMT_STATUS_NOT_SUPPORTED);
+
        key_count = __le16_to_cpu(cp->key_count);
 
        expected_len = sizeof(*cp) + key_count *
@@ -3208,15 +4051,19 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
 
        for (i = 0; i < key_count; i++) {
                struct mgmt_ltk_info *key = &cp->keys[i];
-               u8 type;
+               u8 type, addr_type;
+
+               if (key->addr.type == BDADDR_LE_PUBLIC)
+                       addr_type = ADDR_LE_DEV_PUBLIC;
+               else
+                       addr_type = ADDR_LE_DEV_RANDOM;
 
                if (key->master)
                        type = HCI_SMP_LTK;
                else
                        type = HCI_SMP_LTK_SLAVE;
 
-               hci_add_ltk(hdev, &key->addr.bdaddr,
-                           bdaddr_to_le(key->addr.type),
+               hci_add_ltk(hdev, &key->addr.bdaddr, addr_type,
                            type, 0, key->authenticated, key->val,
                            key->enc_size, key->ediv, key->rand);
        }
@@ -3276,6 +4123,10 @@ static const struct mgmt_handler {
        { block_device,           false, MGMT_BLOCK_DEVICE_SIZE },
        { unblock_device,         false, MGMT_UNBLOCK_DEVICE_SIZE },
        { set_device_id,          false, MGMT_SET_DEVICE_ID_SIZE },
+       { set_advertising,        false, MGMT_SETTING_SIZE },
+       { set_bredr,              false, MGMT_SETTING_SIZE },
+       { set_static_address,     false, MGMT_SET_STATIC_ADDRESS_SIZE },
+       { set_scan_params,        false, MGMT_SET_SCAN_PARAMS_SIZE },
 };
 
 
@@ -3320,6 +4171,13 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
                                         MGMT_STATUS_INVALID_INDEX);
                        goto done;
                }
+
+               if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
+                   test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
+                       err = cmd_status(sk, index, opcode,
+                                        MGMT_STATUS_INVALID_INDEX);
+                       goto done;
+               }
        }
 
        if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
@@ -3365,74 +4223,24 @@ done:
        return err;
 }
 
-static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
-{
-       u8 *status = data;
-
-       cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
-       mgmt_pending_remove(cmd);
-}
-
-int mgmt_index_added(struct hci_dev *hdev)
+void mgmt_index_added(struct hci_dev *hdev)
 {
-       if (!mgmt_valid_hdev(hdev))
-               return -ENOTSUPP;
+       if (hdev->dev_type != HCI_BREDR)
+               return;
 
-       return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
+       mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
 }
 
-int mgmt_index_removed(struct hci_dev *hdev)
+void mgmt_index_removed(struct hci_dev *hdev)
 {
        u8 status = MGMT_STATUS_INVALID_INDEX;
 
-       if (!mgmt_valid_hdev(hdev))
-               return -ENOTSUPP;
+       if (hdev->dev_type != HCI_BREDR)
+               return;
 
        mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
 
-       return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
-}
-
-struct cmd_lookup {
-       struct sock *sk;
-       struct hci_dev *hdev;
-       u8 mgmt_status;
-};
-
-static void settings_rsp(struct pending_cmd *cmd, void *data)
-{
-       struct cmd_lookup *match = data;
-
-       send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
-
-       list_del(&cmd->list);
-
-       if (match->sk == NULL) {
-               match->sk = cmd->sk;
-               sock_hold(match->sk);
-       }
-
-       mgmt_pending_free(cmd);
-}
-
-static void set_bredr_scan(struct hci_request *req)
-{
-       struct hci_dev *hdev = req->hdev;
-       u8 scan = 0;
-
-       /* Ensure that fast connectable is disabled. This function will
-        * not do anything if the page scan parameters are already what
-        * they should be.
-        */
-       write_fast_connectable(req, false);
-
-       if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
-               scan |= SCAN_PAGE;
-       if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
-               scan |= SCAN_INQUIRY;
-
-       if (scan)
-               hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+       mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
 }
 
 static void powered_complete(struct hci_dev *hdev, u8 status)
@@ -3483,13 +4291,33 @@ static int powered_update_hci(struct hci_dev *hdev)
                                    sizeof(cp), &cp);
        }
 
+       if (lmp_le_capable(hdev)) {
+               /* Set random address to static address if configured */
+               if (bacmp(&hdev->static_addr, BDADDR_ANY))
+                       hci_req_add(&req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
+                                   &hdev->static_addr);
+
+               /* Make sure the controller has a good default for
+                * advertising data. This also applies to the case
+                * where BR/EDR was toggled during the AUTO_OFF phase.
+                */
+               if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+                       update_adv_data(&req);
+                       update_scan_rsp_data(&req);
+               }
+
+               if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+                       enable_advertising(&req);
+       }
+
        link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
        if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
                hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
                            sizeof(link_sec), &link_sec);
 
        if (lmp_bredr_capable(hdev)) {
-               set_bredr_scan(&req);
+               if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+                       set_bredr_scan(&req);
                update_class(&req);
                update_name(&req);
                update_eir(&req);
@@ -3533,76 +4361,110 @@ new_settings:
        return err;
 }
 
-int mgmt_set_powered_failed(struct hci_dev *hdev, int err)
+void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
 {
        struct pending_cmd *cmd;
        u8 status;
 
        cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
        if (!cmd)
-               return -ENOENT;
+               return;
 
        if (err == -ERFKILL)
                status = MGMT_STATUS_RFKILLED;
        else
                status = MGMT_STATUS_FAILED;
 
-       err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
+       cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
 
        mgmt_pending_remove(cmd);
+}
 
-       return err;
+void mgmt_discoverable_timeout(struct hci_dev *hdev)
+{
+       struct hci_request req;
+
+       hci_dev_lock(hdev);
+
+       /* When discoverable timeout triggers, then just make sure
+        * the limited discoverable flag is cleared. Even in the case
+        * of a timeout triggered from general discoverable, it is
+        * safe to unconditionally clear the flag.
+        */
+       clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+       clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
+
+       hci_req_init(&req, hdev);
+       if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
+               u8 scan = SCAN_PAGE;
+               hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
+                           sizeof(scan), &scan);
+       }
+       update_class(&req);
+       update_adv_data(&req);
+       hci_req_run(&req, NULL);
+
+       hdev->discov_timeout = 0;
+
+       new_settings(hdev, NULL);
+
+       hci_dev_unlock(hdev);
 }
 
-int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
+void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
 {
-       struct cmd_lookup match = { NULL, hdev };
-       bool changed = false;
-       int err = 0;
+       bool changed;
+
+       /* Nothing needed here if there's a pending command since that
+        * commands request completion callback takes care of everything
+        * necessary.
+        */
+       if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
+               return;
 
        if (discoverable) {
-               if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
-                       changed = true;
+               changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
        } else {
-               if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
-                       changed = true;
+               clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
+               changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
        }
 
-       mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
-                            &match);
-
-       if (changed)
-               err = new_settings(hdev, match.sk);
+       if (changed) {
+               struct hci_request req;
 
-       if (match.sk)
-               sock_put(match.sk);
+               /* In case this change in discoverable was triggered by
+                * a disabling of connectable there could be a need to
+                * update the advertising flags.
+                */
+               hci_req_init(&req, hdev);
+               update_adv_data(&req);
+               hci_req_run(&req, NULL);
 
-       return err;
+               new_settings(hdev, NULL);
+       }
 }
 
-int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
+void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
 {
-       struct pending_cmd *cmd;
-       bool changed = false;
-       int err = 0;
+       bool changed;
 
-       if (connectable) {
-               if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
-                       changed = true;
-       } else {
-               if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
-                       changed = true;
-       }
+       /* Nothing needed here if there's a pending command since that
+        * commands request completion callback takes care of everything
+        * necessary.
+        */
+       if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
+               return;
 
-       cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
+       if (connectable)
+               changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
+       else
+               changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
 
        if (changed)
-               err = new_settings(hdev, cmd ? cmd->sk : NULL);
-
-       return err;
+               new_settings(hdev, NULL);
 }
 
-int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
+void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
 {
        u8 mgmt_err = mgmt_status(status);
 
@@ -3613,12 +4475,10 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
        if (scan & SCAN_INQUIRY)
                mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
                                     cmd_status_rsp, &mgmt_err);
-
-       return 0;
 }
 
-int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
-                     bool persistent)
+void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
+                      bool persistent)
 {
        struct mgmt_ev_new_link_key ev;
 
@@ -3631,10 +4491,10 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
        memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
        ev.key.pin_len = key->pin_len;
 
-       return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
+       mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
+void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
 {
        struct mgmt_ev_new_long_term_key ev;
 
@@ -3653,13 +4513,23 @@ int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
        memcpy(ev.key.rand, key->rand, sizeof(key->rand));
        memcpy(ev.key.val, key->val, sizeof(key->val));
 
-       return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
-                         NULL);
+       mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
+}
+
+static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
+                                 u8 data_len)
+{
+       eir[eir_len++] = sizeof(type) + data_len;
+       eir[eir_len++] = type;
+       memcpy(&eir[eir_len], data, data_len);
+       eir_len += data_len;
+
+       return eir_len;
 }
 
-int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-                         u8 addr_type, u32 flags, u8 *name, u8 name_len,
-                         u8 *dev_class)
+void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                          u8 addr_type, u32 flags, u8 *name, u8 name_len,
+                          u8 *dev_class)
 {
        char buf[512];
        struct mgmt_ev_device_connected *ev = (void *) buf;
@@ -3680,8 +4550,8 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
 
        ev->eir_len = cpu_to_le16(eir_len);
 
-       return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
-                         sizeof(*ev) + eir_len, NULL);
+       mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
+                   sizeof(*ev) + eir_len, NULL);
 }
 
 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
@@ -3719,12 +4589,11 @@ static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
        mgmt_pending_remove(cmd);
 }
 
-int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
-                            u8 link_type, u8 addr_type, u8 reason)
+void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                             u8 link_type, u8 addr_type, u8 reason)
 {
        struct mgmt_ev_device_disconnected ev;
        struct sock *sk = NULL;
-       int err;
 
        mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
 
@@ -3732,45 +4601,39 @@ int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
        ev.addr.type = link_to_bdaddr(link_type, addr_type);
        ev.reason = reason;
 
-       err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
-                        sk);
+       mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
 
        if (sk)
                sock_put(sk);
 
        mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
                             hdev);
-
-       return err;
 }
 
-int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
-                          u8 link_type, u8 addr_type, u8 status)
+void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                           u8 link_type, u8 addr_type, u8 status)
 {
        struct mgmt_rp_disconnect rp;
        struct pending_cmd *cmd;
-       int err;
 
        mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
                             hdev);
 
        cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
        if (!cmd)
-               return -ENOENT;
+               return;
 
        bacpy(&rp.addr.bdaddr, bdaddr);
        rp.addr.type = link_to_bdaddr(link_type, addr_type);
 
-       err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
-                          mgmt_status(status), &rp, sizeof(rp));
+       cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
+                    mgmt_status(status), &rp, sizeof(rp));
 
        mgmt_pending_remove(cmd);
-
-       return err;
 }
 
-int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-                       u8 addr_type, u8 status)
+void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                        u8 addr_type, u8 status)
 {
        struct mgmt_ev_connect_failed ev;
 
@@ -3778,10 +4641,10 @@ int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
        ev.addr.type = link_to_bdaddr(link_type, addr_type);
        ev.status = mgmt_status(status);
 
-       return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
+       mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
+void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
 {
        struct mgmt_ev_pin_code_request ev;
 
@@ -3789,52 +4652,45 @@ int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
        ev.addr.type = BDADDR_BREDR;
        ev.secure = secure;
 
-       return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
-                         NULL);
+       mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
-                                u8 status)
+void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                 u8 status)
 {
        struct pending_cmd *cmd;
        struct mgmt_rp_pin_code_reply rp;
-       int err;
 
        cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
        if (!cmd)
-               return -ENOENT;
+               return;
 
        bacpy(&rp.addr.bdaddr, bdaddr);
        rp.addr.type = BDADDR_BREDR;
 
-       err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
-                          mgmt_status(status), &rp, sizeof(rp));
+       cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
+                    mgmt_status(status), &rp, sizeof(rp));
 
        mgmt_pending_remove(cmd);
-
-       return err;
 }
 
-int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
-                                    u8 status)
+void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                     u8 status)
 {
        struct pending_cmd *cmd;
        struct mgmt_rp_pin_code_reply rp;
-       int err;
 
        cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
        if (!cmd)
-               return -ENOENT;
+               return;
 
        bacpy(&rp.addr.bdaddr, bdaddr);
        rp.addr.type = BDADDR_BREDR;
 
-       err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
-                          mgmt_status(status), &rp, sizeof(rp));
+       cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
+                    mgmt_status(status), &rp, sizeof(rp));
 
        mgmt_pending_remove(cmd);
-
-       return err;
 }
 
 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
@@ -3936,8 +4792,8 @@ int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
        return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-                    u8 addr_type, u8 status)
+void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                     u8 addr_type, u8 status)
 {
        struct mgmt_ev_auth_failed ev;
 
@@ -3945,40 +4801,36 @@ int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
        ev.addr.type = link_to_bdaddr(link_type, addr_type);
        ev.status = mgmt_status(status);
 
-       return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
+       mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
 }
 
-int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
+void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
 {
        struct cmd_lookup match = { NULL, hdev };
-       bool changed = false;
-       int err = 0;
+       bool changed;
 
        if (status) {
                u8 mgmt_err = mgmt_status(status);
                mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
                                     cmd_status_rsp, &mgmt_err);
-               return 0;
+               return;
        }
 
-       if (test_bit(HCI_AUTH, &hdev->flags)) {
-               if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
-                       changed = true;
-       } else {
-               if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
-                       changed = true;
-       }
+       if (test_bit(HCI_AUTH, &hdev->flags))
+               changed = !test_and_set_bit(HCI_LINK_SECURITY,
+                                           &hdev->dev_flags);
+       else
+               changed = test_and_clear_bit(HCI_LINK_SECURITY,
+                                            &hdev->dev_flags);
 
        mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
                             &match);
 
        if (changed)
-               err = new_settings(hdev, match.sk);
+               new_settings(hdev, match.sk);
 
        if (match.sk)
                sock_put(match.sk);
-
-       return err;
 }
 
 static void clear_eir(struct hci_request *req)
@@ -3996,38 +4848,41 @@ static void clear_eir(struct hci_request *req)
        hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
 }
 
-int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
+void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
 {
        struct cmd_lookup match = { NULL, hdev };
        struct hci_request req;
        bool changed = false;
-       int err = 0;
 
        if (status) {
                u8 mgmt_err = mgmt_status(status);
 
                if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
-                                                &hdev->dev_flags))
-                       err = new_settings(hdev, NULL);
+                                                &hdev->dev_flags)) {
+                       clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
+                       new_settings(hdev, NULL);
+               }
 
                mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
                                     &mgmt_err);
-
-               return err;
+               return;
        }
 
        if (enable) {
-               if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
-                       changed = true;
+               changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
        } else {
-               if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
-                       changed = true;
+               changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
+               if (!changed)
+                       changed = test_and_clear_bit(HCI_HS_ENABLED,
+                                                    &hdev->dev_flags);
+               else
+                       clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
        }
 
        mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
 
        if (changed)
-               err = new_settings(hdev, match.sk);
+               new_settings(hdev, match.sk);
 
        if (match.sk)
                sock_put(match.sk);
@@ -4040,8 +4895,6 @@ int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
                clear_eir(&req);
 
        hci_req_run(&req, NULL);
-
-       return err;
 }
 
 static void sk_lookup(struct pending_cmd *cmd, void *data)
@@ -4054,33 +4907,30 @@ static void sk_lookup(struct pending_cmd *cmd, void *data)
        }
 }
 
-int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
-                                  u8 status)
+void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
+                                   u8 status)
 {
        struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
-       int err = 0;
 
        mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
        mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
        mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
 
        if (!status)
-               err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
-                                3, NULL);
+               mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
+                          NULL);
 
        if (match.sk)
                sock_put(match.sk);
-
-       return err;
 }
 
-int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
+void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
 {
        struct mgmt_cp_set_local_name ev;
        struct pending_cmd *cmd;
 
        if (status)
-               return 0;
+               return;
 
        memset(&ev, 0, sizeof(ev));
        memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
@@ -4094,96 +4944,54 @@ int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
                 * HCI dev don't send any mgmt signals.
                 */
                if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
-                       return 0;
+                       return;
        }
 
-       return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
-                         cmd ? cmd->sk : NULL);
+       mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
+                  cmd ? cmd->sk : NULL);
 }
 
-int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
-                                           u8 *randomizer, u8 status)
+void mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
+                                            u8 *randomizer, u8 status)
 {
        struct pending_cmd *cmd;
-       int err;
 
        BT_DBG("%s status %u", hdev->name, status);
 
        cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
        if (!cmd)
-               return -ENOENT;
+               return;
 
        if (status) {
-               err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
-                                mgmt_status(status));
+               cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+                          mgmt_status(status));
        } else {
                struct mgmt_rp_read_local_oob_data rp;
 
                memcpy(rp.hash, hash, sizeof(rp.hash));
                memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
 
-               err = cmd_complete(cmd->sk, hdev->id,
-                                  MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
-                                  sizeof(rp));
+               cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+                            0, &rp, sizeof(rp));
        }
 
        mgmt_pending_remove(cmd);
-
-       return err;
-}
-
-int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
-{
-       struct cmd_lookup match = { NULL, hdev };
-       bool changed = false;
-       int err = 0;
-
-       if (status) {
-               u8 mgmt_err = mgmt_status(status);
-
-               if (enable && test_and_clear_bit(HCI_LE_ENABLED,
-                                                &hdev->dev_flags))
-                       err = new_settings(hdev, NULL);
-
-               mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
-                                    &mgmt_err);
-
-               return err;
-       }
-
-       if (enable) {
-               if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
-                       changed = true;
-       } else {
-               if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
-                       changed = true;
-       }
-
-       mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
-
-       if (changed)
-               err = new_settings(hdev, match.sk);
-
-       if (match.sk)
-               sock_put(match.sk);
-
-       return err;
 }
 
-int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-                     u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
-                     ssp, u8 *eir, u16 eir_len)
+void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                      u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
+                      ssp, u8 *eir, u16 eir_len)
 {
        char buf[512];
        struct mgmt_ev_device_found *ev = (void *) buf;
        size_t ev_size;
 
        if (!hci_discovery_active(hdev))
-               return -EPERM;
+               return;
 
        /* Leave 5 bytes for a potential CoD field */
        if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
-               return -EINVAL;
+               return;
 
        memset(buf, 0, sizeof(buf));
 
@@ -4205,11 +5013,11 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
        ev->eir_len = cpu_to_le16(eir_len);
        ev_size = sizeof(*ev) + eir_len;
 
-       return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
+       mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
 }
 
-int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
-                    u8 addr_type, s8 rssi, u8 *name, u8 name_len)
+void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
+                     u8 addr_type, s8 rssi, u8 *name, u8 name_len)
 {
        struct mgmt_ev_device_found *ev;
        char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
@@ -4228,11 +5036,10 @@ int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
 
        ev->eir_len = cpu_to_le16(eir_len);
 
-       return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
-                         sizeof(*ev) + eir_len, NULL);
+       mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
 }
 
-int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
+void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
 {
        struct mgmt_ev_discovering ev;
        struct pending_cmd *cmd;
@@ -4256,7 +5063,7 @@ int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
        ev.type = hdev->discovery.type;
        ev.discovering = discovering;
 
-       return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
+       mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
 }
 
 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
@@ -4287,5 +5094,35 @@ int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
                          cmd ? cmd->sk : NULL);
 }
 
-module_param(enable_hs, bool, 0644);
-MODULE_PARM_DESC(enable_hs, "Enable High Speed support");
+static void adv_enable_complete(struct hci_dev *hdev, u8 status)
+{
+       BT_DBG("%s status %u", hdev->name, status);
+
+       /* Clear the advertising mgmt setting if we failed to re-enable it */
+       if (status) {
+               clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+               new_settings(hdev, NULL);
+       }
+}
+
+void mgmt_reenable_advertising(struct hci_dev *hdev)
+{
+       struct hci_request req;
+
+       if (hci_conn_num(hdev, LE_LINK) > 0)
+               return;
+
+       if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
+               return;
+
+       hci_req_init(&req, hdev);
+       enable_advertising(&req);
+
+       /* If this fails we have no option but to let user space know
+        * that we've disabled advertising.
+        */
+       if (hci_req_run(&req, adv_enable_complete) < 0) {
+               clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
+               new_settings(hdev, NULL);
+       }
+}
index ca957d34b0c89fa29341a179ee16e325ac226e55..94d06cbfbc184a6e827aa5c3cbd15d4693eca49a 100644 (file)
@@ -641,13 +641,13 @@ static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst)
 {
        struct rfcomm_session *s;
        struct list_head *p, *n;
-       struct bt_sock *sk;
+       struct l2cap_chan *chan;
        list_for_each_safe(p, n, &session_list) {
                s = list_entry(p, struct rfcomm_session, list);
-               sk = bt_sk(s->sock->sk);
+               chan = l2cap_pi(s->sock->sk)->chan;
 
-               if ((!bacmp(src, BDADDR_ANY) || !bacmp(&sk->src, src)) &&
-                               !bacmp(&sk->dst, dst))
+               if ((!bacmp(src, BDADDR_ANY) || !bacmp(&chan->src, src)) &&
+                   !bacmp(&chan->dst, dst))
                        return s;
        }
        return NULL;
@@ -732,11 +732,11 @@ failed:
 
 void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *dst)
 {
-       struct sock *sk = s->sock->sk;
+       struct l2cap_chan *chan = l2cap_pi(s->sock->sk)->chan;
        if (src)
-               bacpy(src, &bt_sk(sk)->src);
+               bacpy(src, &chan->src);
        if (dst)
-               bacpy(dst, &bt_sk(sk)->dst);
+               bacpy(dst, &chan->dst);
 }
 
 /* ---- RFCOMM frame sending ---- */
@@ -2112,12 +2112,11 @@ static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x)
        rfcomm_lock();
 
        list_for_each_entry(s, &session_list, list) {
+               struct l2cap_chan *chan = l2cap_pi(s->sock->sk)->chan;
                struct rfcomm_dlc *d;
                list_for_each_entry(d, &s->dlcs, list) {
-                       struct sock *sk = s->sock->sk;
-
                        seq_printf(f, "%pMR %pMR %ld %d %d %d %d\n",
-                                  &bt_sk(sk)->src, &bt_sk(sk)->dst,
+                                  &chan->src, &chan->dst,
                                   d->state, d->dlci, d->mtu,
                                   d->rx_credits, d->tx_credits);
                }
@@ -2155,13 +2154,6 @@ static int __init rfcomm_init(void)
                goto unregister;
        }
 
-       if (bt_debugfs) {
-               rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444,
-                               bt_debugfs, NULL, &rfcomm_dlc_debugfs_fops);
-               if (!rfcomm_dlc_debugfs)
-                       BT_ERR("Failed to create RFCOMM debug file");
-       }
-
        err = rfcomm_init_ttys();
        if (err < 0)
                goto stop;
@@ -2172,6 +2164,13 @@ static int __init rfcomm_init(void)
 
        BT_INFO("RFCOMM ver %s", VERSION);
 
+       if (IS_ERR_OR_NULL(bt_debugfs))
+               return 0;
+
+       rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444,
+                                                bt_debugfs, NULL,
+                                                &rfcomm_dlc_debugfs_fops);
+
        return 0;
 
 cleanup:
index 30b3721dc6d77d96be4bdb412ee90c49b8976dc9..c4d3d423f89b84d41b9ff0e6919e9f37786f4960 100644 (file)
@@ -87,7 +87,8 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
                parent->sk_data_ready(parent, 0);
        } else {
                if (d->state == BT_CONNECTED)
-                       rfcomm_session_getaddr(d->session, &bt_sk(sk)->src, NULL);
+                       rfcomm_session_getaddr(d->session,
+                                              &rfcomm_pi(sk)->src, NULL);
                sk->sk_state_change(sk);
        }
 
@@ -110,7 +111,7 @@ static struct sock *__rfcomm_get_sock_by_addr(u8 channel, bdaddr_t *src)
 
        sk_for_each(sk, &rfcomm_sk_list.head) {
                if (rfcomm_pi(sk)->channel == channel &&
-                               !bacmp(&bt_sk(sk)->src, src))
+                               !bacmp(&rfcomm_pi(sk)->src, src))
                        break;
        }
 
@@ -132,11 +133,11 @@ static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *
 
                if (rfcomm_pi(sk)->channel == channel) {
                        /* Exact match. */
-                       if (!bacmp(&bt_sk(sk)->src, src))
+                       if (!bacmp(&rfcomm_pi(sk)->src, src))
                                break;
 
                        /* Closest match */
-                       if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+                       if (!bacmp(&rfcomm_pi(sk)->src, BDADDR_ANY))
                                sk1 = sk;
                }
        }
@@ -355,7 +356,7 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr
                err = -EADDRINUSE;
        } else {
                /* Save source address */
-               bacpy(&bt_sk(sk)->src, &sa->rc_bdaddr);
+               bacpy(&rfcomm_pi(sk)->src, &sa->rc_bdaddr);
                rfcomm_pi(sk)->channel = sa->rc_channel;
                sk->sk_state = BT_BOUND;
        }
@@ -393,13 +394,14 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
        }
 
        sk->sk_state = BT_CONNECT;
-       bacpy(&bt_sk(sk)->dst, &sa->rc_bdaddr);
+       bacpy(&rfcomm_pi(sk)->dst, &sa->rc_bdaddr);
        rfcomm_pi(sk)->channel = sa->rc_channel;
 
        d->sec_level = rfcomm_pi(sk)->sec_level;
        d->role_switch = rfcomm_pi(sk)->role_switch;
 
-       err = rfcomm_dlc_open(d, &bt_sk(sk)->src, &sa->rc_bdaddr, sa->rc_channel);
+       err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr,
+                             sa->rc_channel);
        if (!err)
                err = bt_sock_wait_state(sk, BT_CONNECTED,
                                sock_sndtimeo(sk, flags & O_NONBLOCK));
@@ -429,7 +431,7 @@ static int rfcomm_sock_listen(struct socket *sock, int backlog)
        }
 
        if (!rfcomm_pi(sk)->channel) {
-               bdaddr_t *src = &bt_sk(sk)->src;
+               bdaddr_t *src = &rfcomm_pi(sk)->src;
                u8 channel;
 
                err = -EINVAL;
@@ -530,9 +532,9 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *
        sa->rc_family  = AF_BLUETOOTH;
        sa->rc_channel = rfcomm_pi(sk)->channel;
        if (peer)
-               bacpy(&sa->rc_bdaddr, &bt_sk(sk)->dst);
+               bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->dst);
        else
-               bacpy(&sa->rc_bdaddr, &bt_sk(sk)->src);
+               bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->src);
 
        *len = sizeof(struct sockaddr_rc);
        return 0;
@@ -544,7 +546,7 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
        struct sock *sk = sock->sk;
        struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
        struct sk_buff *skb;
-       int sent = 0;
+       int sent;
 
        if (test_bit(RFCOMM_DEFER_SETUP, &d->flags))
                return -ENOTCONN;
@@ -559,6 +561,10 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
 
        lock_sock(sk);
 
+       sent = bt_sock_wait_ready(sk, msg->msg_flags);
+       if (sent)
+               goto done;
+
        while (len) {
                size_t size = min_t(size_t, len, d->mtu);
                int err;
@@ -594,6 +600,7 @@ static int rfcomm_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
                len  -= size;
        }
 
+done:
        release_sock(sk);
 
        return sent;
@@ -946,8 +953,8 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
        bt_sock_reclassify_lock(sk, BTPROTO_RFCOMM);
 
        rfcomm_sock_init(sk, parent);
-       bacpy(&bt_sk(sk)->src, &src);
-       bacpy(&bt_sk(sk)->dst, &dst);
+       bacpy(&rfcomm_pi(sk)->src, &src);
+       bacpy(&rfcomm_pi(sk)->dst, &dst);
        rfcomm_pi(sk)->channel = channel;
 
        sk->sk_state = BT_CONFIG;
@@ -974,7 +981,7 @@ static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
 
        sk_for_each(sk, &rfcomm_sk_list.head) {
                seq_printf(f, "%pMR %pMR %d %d\n",
-                          &bt_sk(sk)->src, &bt_sk(sk)->dst,
+                          &rfcomm_pi(sk)->src, &rfcomm_pi(sk)->dst,
                           sk->sk_state, rfcomm_pi(sk)->channel);
        }
 
@@ -1044,15 +1051,15 @@ int __init rfcomm_init_sockets(void)
                goto error;
        }
 
-       if (bt_debugfs) {
-               rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
-                               bt_debugfs, NULL, &rfcomm_sock_debugfs_fops);
-               if (!rfcomm_sock_debugfs)
-                       BT_ERR("Failed to create RFCOMM debug file");
-       }
-
        BT_INFO("RFCOMM socket layer initialized");
 
+       if (IS_ERR_OR_NULL(bt_debugfs))
+               return 0;
+
+       rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
+                                                 bt_debugfs, NULL,
+                                                 &rfcomm_sock_debugfs_fops);
+
        return 0;
 
 error:
index 96bd388d93a4aae145bfb207d26fbc3a90b9e0a8..12a0e51e21e13631beeec14d97e64aa137fd0e99 100644 (file)
@@ -92,9 +92,6 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
        hcon->sco_data = conn;
        conn->hcon = hcon;
 
-       conn->src = &hdev->bdaddr;
-       conn->dst = &hcon->dst;
-
        if (hdev->sco_mtu > 0)
                conn->mtu = hdev->sco_mtu;
        else
@@ -156,16 +153,14 @@ static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
 
 static int sco_connect(struct sock *sk)
 {
-       bdaddr_t *src = &bt_sk(sk)->src;
-       bdaddr_t *dst = &bt_sk(sk)->dst;
        struct sco_conn *conn;
        struct hci_conn *hcon;
        struct hci_dev  *hdev;
        int err, type;
 
-       BT_DBG("%pMR -> %pMR", src, dst);
+       BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst);
 
-       hdev = hci_get_route(dst, src);
+       hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src);
        if (!hdev)
                return -EHOSTUNREACH;
 
@@ -182,7 +177,8 @@ static int sco_connect(struct sock *sk)
                goto done;
        }
 
-       hcon = hci_connect_sco(hdev, type, dst, sco_pi(sk)->setting);
+       hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst,
+                              sco_pi(sk)->setting);
        if (IS_ERR(hcon)) {
                err = PTR_ERR(hcon);
                goto done;
@@ -196,7 +192,7 @@ static int sco_connect(struct sock *sk)
        }
 
        /* Update source addr of the socket */
-       bacpy(src, conn->src);
+       bacpy(&sco_pi(sk)->src, &hcon->src);
 
        err = sco_chan_add(conn, sk, NULL);
        if (err)
@@ -270,7 +266,7 @@ static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba)
                if (sk->sk_state != BT_LISTEN)
                        continue;
 
-               if (!bacmp(&bt_sk(sk)->src, ba))
+               if (!bacmp(&sco_pi(sk)->src, ba))
                        return sk;
        }
 
@@ -291,11 +287,11 @@ static struct sock *sco_get_sock_listen(bdaddr_t *src)
                        continue;
 
                /* Exact match. */
-               if (!bacmp(&bt_sk(sk)->src, src))
+               if (!bacmp(&sco_pi(sk)->src, src))
                        break;
 
                /* Closest match */
-               if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
+               if (!bacmp(&sco_pi(sk)->src, BDADDR_ANY))
                        sk1 = sk;
        }
 
@@ -475,7 +471,7 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
                goto done;
        }
 
-       bacpy(&bt_sk(sk)->src, &sa->sco_bdaddr);
+       bacpy(&sco_pi(sk)->src, &sa->sco_bdaddr);
 
        sk->sk_state = BT_BOUND;
 
@@ -505,7 +501,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
        lock_sock(sk);
 
        /* Set destination address and psm */
-       bacpy(&bt_sk(sk)->dst, &sa->sco_bdaddr);
+       bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr);
 
        err = sco_connect(sk);
        if (err)
@@ -522,7 +518,7 @@ done:
 static int sco_sock_listen(struct socket *sock, int backlog)
 {
        struct sock *sk = sock->sk;
-       bdaddr_t *src = &bt_sk(sk)->src;
+       bdaddr_t *src = &sco_pi(sk)->src;
        int err = 0;
 
        BT_DBG("sk %p backlog %d", sk, backlog);
@@ -626,9 +622,9 @@ static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, int *len
        *len = sizeof(struct sockaddr_sco);
 
        if (peer)
-               bacpy(&sa->sco_bdaddr, &bt_sk(sk)->dst);
+               bacpy(&sa->sco_bdaddr, &sco_pi(sk)->dst);
        else
-               bacpy(&sa->sco_bdaddr, &bt_sk(sk)->src);
+               bacpy(&sa->sco_bdaddr, &sco_pi(sk)->src);
 
        return 0;
 }
@@ -999,7 +995,7 @@ static void sco_conn_ready(struct sco_conn *conn)
        } else {
                sco_conn_lock(conn);
 
-               parent = sco_get_sock_listen(conn->src);
+               parent = sco_get_sock_listen(&conn->hcon->src);
                if (!parent) {
                        sco_conn_unlock(conn);
                        return;
@@ -1017,8 +1013,8 @@ static void sco_conn_ready(struct sco_conn *conn)
 
                sco_sock_init(sk, parent);
 
-               bacpy(&bt_sk(sk)->src, conn->src);
-               bacpy(&bt_sk(sk)->dst, conn->dst);
+               bacpy(&sco_pi(sk)->src, &conn->hcon->src);
+               bacpy(&sco_pi(sk)->dst, &conn->hcon->dst);
 
                hci_conn_hold(conn->hcon);
                __sco_chan_add(conn, sk, parent);
@@ -1051,8 +1047,8 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
                if (sk->sk_state != BT_LISTEN)
                        continue;
 
-               if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) ||
-                   !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
+               if (!bacmp(&sco_pi(sk)->src, &hdev->bdaddr) ||
+                   !bacmp(&sco_pi(sk)->src, BDADDR_ANY)) {
                        lm |= HCI_LM_ACCEPT;
 
                        if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
@@ -1111,8 +1107,8 @@ static int sco_debugfs_show(struct seq_file *f, void *p)
        read_lock(&sco_sk_list.lock);
 
        sk_for_each(sk, &sco_sk_list.head) {
-               seq_printf(f, "%pMR %pMR %d\n", &bt_sk(sk)->src,
-                          &bt_sk(sk)->dst, sk->sk_state);
+               seq_printf(f, "%pMR %pMR %d\n", &sco_pi(sk)->src,
+                          &sco_pi(sk)->dst, sk->sk_state);
        }
 
        read_unlock(&sco_sk_list.lock);
@@ -1181,15 +1177,14 @@ int __init sco_init(void)
                goto error;
        }
 
-       if (bt_debugfs) {
-               sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
-                                                 NULL, &sco_debugfs_fops);
-               if (!sco_debugfs)
-                       BT_ERR("Failed to create SCO debug file");
-       }
-
        BT_INFO("SCO socket layer initialized");
 
+       if (IS_ERR_OR_NULL(bt_debugfs))
+               return 0;
+
+       sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
+                                         NULL, &sco_debugfs_fops);
+
        return 0;
 
 error:
index b5562abdd6e0c84e4a2f52f3175c8046ce70751c..85a2796cac61bcc423ef724f1911f2a794667aff 100644 (file)
@@ -28,7 +28,8 @@
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
 #include <net/bluetooth/mgmt.h>
-#include <net/bluetooth/smp.h>
+
+#include "smp.h"
 
 #define SMP_TIMEOUT    msecs_to_jiffies(30000)
 
@@ -85,8 +86,8 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
 }
 
 static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],
-               u8 preq[7], u8 pres[7], u8 _iat, bdaddr_t *ia,
-               u8 _rat, bdaddr_t *ra, u8 res[16])
+                 u8 preq[7], u8 pres[7], u8 _iat, bdaddr_t *ia,
+                 u8 _rat, bdaddr_t *ra, u8 res[16])
 {
        u8 p1[16], p2[16];
        int err;
@@ -126,8 +127,8 @@ static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],
        return err;
 }
 
-static int smp_s1(struct crypto_blkcipher *tfm, u8 k[16],
-                       u8 r1[16], u8 r2[16], u8 _r[16])
+static int smp_s1(struct crypto_blkcipher *tfm, u8 k[16], u8 r1[16],
+                 u8 r2[16], u8 _r[16])
 {
        int err;
 
@@ -150,7 +151,7 @@ static int smp_rand(u8 *buf)
 }
 
 static struct sk_buff *smp_build_cmd(struct l2cap_conn *conn, u8 code,
-                                               u16 dlen, void *data)
+                                    u16 dlen, void *data)
 {
        struct sk_buff *skb;
        struct l2cap_hdr *lh;
@@ -213,9 +214,8 @@ static __u8 seclevel_to_authreq(__u8 sec_level)
 }
 
 static void build_pairing_cmd(struct l2cap_conn *conn,
-                               struct smp_cmd_pairing *req,
-                               struct smp_cmd_pairing *rsp,
-                               __u8 authreq)
+                             struct smp_cmd_pairing *req,
+                             struct smp_cmd_pairing *rsp, __u8 authreq)
 {
        u8 dist_keys = 0;
 
@@ -249,7 +249,7 @@ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
        struct smp_chan *smp = conn->smp_chan;
 
        if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) ||
-                       (max_key_size < SMP_MIN_ENC_KEY_SIZE))
+           (max_key_size < SMP_MIN_ENC_KEY_SIZE))
                return SMP_ENC_KEY_SIZE;
 
        smp->enc_key_size = max_key_size;
@@ -263,15 +263,15 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send)
 
        if (send)
                smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
-                                                               &reason);
+                            &reason);
 
-       clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->hcon->flags);
-       mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type,
-                        hcon->dst_type, HCI_ERROR_AUTH_FAILURE);
+       clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags);
+       mgmt_auth_failed(hcon->hdev, &hcon->dst, hcon->type, hcon->dst_type,
+                        HCI_ERROR_AUTH_FAILURE);
 
        cancel_delayed_work_sync(&conn->security_timer);
 
-       if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
+       if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
                smp_chan_destroy(conn);
 }
 
@@ -309,8 +309,8 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
        /* If either side has unknown io_caps, use JUST WORKS */
        /* Otherwise, look up method from the table */
        if (!(auth & SMP_AUTH_MITM) ||
-                       local_io > SMP_IO_KEYBOARD_DISPLAY ||
-                       remote_io > SMP_IO_KEYBOARD_DISPLAY)
+           local_io > SMP_IO_KEYBOARD_DISPLAY ||
+           remote_io > SMP_IO_KEYBOARD_DISPLAY)
                method = JUST_WORKS;
        else
                method = gen_method[remote_io][local_io];
@@ -354,10 +354,10 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
        hci_dev_lock(hcon->hdev);
 
        if (method == REQ_PASSKEY)
-               ret = mgmt_user_passkey_request(hcon->hdev, conn->dst,
+               ret = mgmt_user_passkey_request(hcon->hdev, &hcon->dst,
                                                hcon->type, hcon->dst_type);
        else
-               ret = mgmt_user_confirm_request(hcon->hdev, conn->dst,
+               ret = mgmt_user_confirm_request(hcon->hdev, &hcon->dst,
                                                hcon->type, hcon->dst_type,
                                                cpu_to_le32(passkey), 0);
 
@@ -386,12 +386,13 @@ static void confirm_work(struct work_struct *work)
        smp->tfm = tfm;
 
        if (conn->hcon->out)
-               ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp, 0,
-                            conn->src, conn->hcon->dst_type, conn->dst, res);
+               ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
+                            conn->hcon->src_type, &conn->hcon->src,
+                            conn->hcon->dst_type, &conn->hcon->dst, res);
        else
                ret = smp_c1(tfm, smp->tk, smp->prnd, smp->preq, smp->prsp,
-                            conn->hcon->dst_type, conn->dst, 0, conn->src,
-                            res);
+                            conn->hcon->dst_type, &conn->hcon->dst,
+                            conn->hcon->src_type, &conn->hcon->src, res);
        if (ret) {
                reason = SMP_UNSPECIFIED;
                goto error;
@@ -425,11 +426,13 @@ static void random_work(struct work_struct *work)
        BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
 
        if (hcon->out)
-               ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp, 0,
-                            conn->src, hcon->dst_type, conn->dst, res);
+               ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
+                            hcon->src_type, &hcon->src,
+                            hcon->dst_type, &hcon->dst, res);
        else
                ret = smp_c1(tfm, smp->tk, smp->rrnd, smp->preq, smp->prsp,
-                            hcon->dst_type, conn->dst, 0, conn->src, res);
+                            hcon->dst_type, &hcon->dst,
+                            hcon->src_type, &hcon->src, res);
        if (ret) {
                reason = SMP_UNSPECIFIED;
                goto error;
@@ -477,9 +480,9 @@ static void random_work(struct work_struct *work)
                swap128(key, stk);
 
                memset(stk + smp->enc_key_size, 0,
-                               SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
+                      SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
 
-               hci_add_ltk(hcon->hdev, conn->dst, hcon->dst_type,
+               hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
                            HCI_SMP_STK_SLAVE, 0, 0, stk, smp->enc_key_size,
                            ediv, rand);
        }
@@ -494,7 +497,7 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
 {
        struct smp_chan *smp;
 
-       smp = kzalloc(sizeof(struct smp_chan), GFP_ATOMIC);
+       smp = kzalloc(sizeof(*smp), GFP_ATOMIC);
        if (!smp)
                return NULL;
 
@@ -649,7 +652,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
        memcpy(&smp->prsp[1], rsp, sizeof(*rsp));
 
        if ((req->auth_req & SMP_AUTH_BONDING) &&
-                       (rsp->auth_req & SMP_AUTH_BONDING))
+           (rsp->auth_req & SMP_AUTH_BONDING))
                auth = SMP_AUTH_BONDING;
 
        auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM;
@@ -684,7 +687,7 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
 
                swap128(smp->prnd, random);
                smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random),
-                                                               random);
+                            random);
        } else if (test_bit(SMP_FLAG_TK_VALID, &smp->smp_flags)) {
                queue_work(hdev->workqueue, &smp->confirm);
        } else {
@@ -714,7 +717,7 @@ static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
        struct smp_ltk *key;
        struct hci_conn *hcon = conn->hcon;
 
-       key = hci_find_ltk_by_addr(hcon->hdev, conn->dst, hcon->dst_type);
+       key = hci_find_ltk_by_addr(hcon->hdev, &hcon->dst, hcon->dst_type);
        if (!key)
                return 0;
 
@@ -728,8 +731,8 @@ static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
        hcon->enc_key_size = key->enc_size;
 
        return 1;
-
 }
+
 static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
 {
        struct smp_cmd_security_req *rp = (void *) skb->data;
@@ -835,9 +838,9 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
        skb_pull(skb, sizeof(*rp));
 
        hci_dev_lock(hdev);
-       authenticated = (conn->hcon->sec_level == BT_SECURITY_HIGH);
-       hci_add_ltk(conn->hcon->hdev, conn->dst, hcon->dst_type,
-                   HCI_SMP_LTK, 1, authenticated, smp->tk, smp->enc_key_size,
+       authenticated = (hcon->sec_level == BT_SECURITY_HIGH);
+       hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, HCI_SMP_LTK, 1,
+                   authenticated, smp->tk, smp->enc_key_size,
                    rp->ediv, rp->rand);
        smp_distribute_keys(conn, 1);
        hci_dev_unlock(hdev);
@@ -847,16 +850,27 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
 
 int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
 {
-       __u8 code = skb->data[0];
-       __u8 reason;
+       struct hci_conn *hcon = conn->hcon;
+       __u8 code, reason;
        int err = 0;
 
-       if (!test_bit(HCI_LE_ENABLED, &conn->hcon->hdev->dev_flags)) {
+       if (hcon->type != LE_LINK) {
+               kfree_skb(skb);
+               return 0;
+       }
+
+       if (skb->len < 1) {
+               kfree_skb(skb);
+               return -EILSEQ;
+       }
+
+       if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) {
                err = -ENOTSUPP;
                reason = SMP_PAIRING_NOTSUPP;
                goto done;
        }
 
+       code = skb->data[0];
        skb_pull(skb, sizeof(code));
 
        /*
@@ -974,7 +988,7 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
                smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);
 
                authenticated = hcon->sec_level == BT_SECURITY_HIGH;
-               hci_add_ltk(conn->hcon->hdev, conn->dst, hcon->dst_type,
+               hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
                            HCI_SMP_LTK_SLAVE, 1, authenticated,
                            enc.ltk, smp->enc_key_size, ediv, ident.rand);
 
@@ -996,10 +1010,10 @@ int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
 
                /* Just public address */
                memset(&addrinfo, 0, sizeof(addrinfo));
-               bacpy(&addrinfo.bdaddr, conn->src);
+               bacpy(&addrinfo.bdaddr, &conn->hcon->src);
 
                smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo),
-                                                               &addrinfo);
+                            &addrinfo);
 
                *keydist &= ~SMP_DIST_ID_KEY;
        }
index ffd5874f25920a94c74f5d97ebf4a0e2aa77f48d..33e8f23acddd9ca2c817913753142ce063c1fb9e 100644 (file)
@@ -700,7 +700,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 
                vid = nla_get_u16(tb[NDA_VLAN]);
 
-               if (vid >= VLAN_N_VID) {
+               if (!vid || vid >= VLAN_VID_MASK) {
                        pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
                                vid);
                        return -EINVAL;
@@ -794,7 +794,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
 
                vid = nla_get_u16(tb[NDA_VLAN]);
 
-               if (vid >= VLAN_N_VID) {
+               if (!vid || vid >= VLAN_VID_MASK) {
                        pr_info("bridge: RTM_NEWNEIGH with invalid vlan id %d\n",
                                vid);
                        return -EINVAL;
index 85a09bb5ca51b864cd671c63746d65dfd531ed79..b7b1914dfa252a3731a26e2bcf2be3afc5171661 100644 (file)
@@ -453,7 +453,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
                call_rcu_bh(&p->rcu, br_multicast_free_pg);
                err = 0;
 
-               if (!mp->ports && !mp->mglist && mp->timer_armed &&
+               if (!mp->ports && !mp->mglist &&
                    netif_running(br->dev))
                        mod_timer(&mp->timer, jiffies);
                break;
index d1c5786306784a7353b845cad60e2d39abec0f72..0513ef3ce6673dbc62ab4ff8dd8d28af6a0f6478 100644 (file)
@@ -272,7 +272,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
                del_timer(&p->timer);
                call_rcu_bh(&p->rcu, br_multicast_free_pg);
 
-               if (!mp->ports && !mp->mglist && mp->timer_armed &&
+               if (!mp->ports && !mp->mglist &&
                    netif_running(br->dev))
                        mod_timer(&mp->timer, jiffies);
 
@@ -363,7 +363,7 @@ static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
        skb_reset_mac_header(skb);
        eth = eth_hdr(skb);
 
-       memcpy(eth->h_source, br->dev->dev_addr, 6);
+       memcpy(eth->h_source, br->dev->dev_addr, ETH_ALEN);
        eth->h_dest[0] = 1;
        eth->h_dest[1] = 0;
        eth->h_dest[2] = 0x5e;
@@ -433,7 +433,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
        skb_reset_mac_header(skb);
        eth = eth_hdr(skb);
 
-       memcpy(eth->h_source, br->dev->dev_addr, 6);
+       memcpy(eth->h_source, br->dev->dev_addr, ETH_ALEN);
        eth->h_proto = htons(ETH_P_IPV6);
        skb_put(skb, sizeof(*eth));
 
@@ -620,7 +620,6 @@ rehash:
 
        mp->br = br;
        mp->addr = *group;
-
        setup_timer(&mp->timer, br_multicast_group_expired,
                    (unsigned long)mp);
 
@@ -660,6 +659,7 @@ static int br_multicast_add_group(struct net_bridge *br,
        struct net_bridge_mdb_entry *mp;
        struct net_bridge_port_group *p;
        struct net_bridge_port_group __rcu **pp;
+       unsigned long now = jiffies;
        int err;
 
        spin_lock(&br->multicast_lock);
@@ -674,6 +674,7 @@ static int br_multicast_add_group(struct net_bridge *br,
 
        if (!port) {
                mp->mglist = true;
+               mod_timer(&mp->timer, now + br->multicast_membership_interval);
                goto out;
        }
 
@@ -681,7 +682,7 @@ static int br_multicast_add_group(struct net_bridge *br,
             (p = mlock_dereference(*pp, br)) != NULL;
             pp = &p->next) {
                if (p->port == port)
-                       goto out;
+                       goto found;
                if ((unsigned long)p->port < (unsigned long)port)
                        break;
        }
@@ -692,6 +693,8 @@ static int br_multicast_add_group(struct net_bridge *br,
        rcu_assign_pointer(*pp, p);
        br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
 
+found:
+       mod_timer(&p->timer, now + br->multicast_membership_interval);
 out:
        err = 0;
 
@@ -1191,9 +1194,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
        if (!mp)
                goto out;
 
-       mod_timer(&mp->timer, now + br->multicast_membership_interval);
-       mp->timer_armed = true;
-
        max_delay *= br->multicast_last_member_count;
 
        if (mp->mglist &&
@@ -1270,9 +1270,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
        if (!mp)
                goto out;
 
-       mod_timer(&mp->timer, now + br->multicast_membership_interval);
-       mp->timer_armed = true;
-
        max_delay *= br->multicast_last_member_count;
        if (mp->mglist &&
            (timer_pending(&mp->timer) ?
@@ -1358,7 +1355,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
                        call_rcu_bh(&p->rcu, br_multicast_free_pg);
                        br_mdb_notify(br->dev, port, group, RTM_DELMDB);
 
-                       if (!mp->ports && !mp->mglist && mp->timer_armed &&
+                       if (!mp->ports && !mp->mglist &&
                            netif_running(br->dev))
                                mod_timer(&mp->timer, jiffies);
                }
@@ -1370,12 +1367,30 @@ static void br_multicast_leave_group(struct net_bridge *br,
                     br->multicast_last_member_interval;
 
        if (!port) {
-               if (mp->mglist && mp->timer_armed &&
+               if (mp->mglist &&
                    (timer_pending(&mp->timer) ?
                     time_after(mp->timer.expires, time) :
                     try_to_del_timer_sync(&mp->timer) >= 0)) {
                        mod_timer(&mp->timer, time);
                }
+
+               goto out;
+       }
+
+       for (p = mlock_dereference(mp->ports, br);
+            p != NULL;
+            p = mlock_dereference(p->next, br)) {
+               if (p->port != port)
+                       continue;
+
+               if (!hlist_unhashed(&p->mglist) &&
+                   (timer_pending(&p->timer) ?
+                    time_after(p->timer.expires, time) :
+                    try_to_del_timer_sync(&p->timer) >= 0)) {
+                       mod_timer(&p->timer, time);
+               }
+
+               break;
        }
 out:
        spin_unlock(&br->multicast_lock);
@@ -1798,7 +1813,6 @@ void br_multicast_stop(struct net_bridge *br)
                hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
                                          hlist[ver]) {
                        del_timer(&mp->timer);
-                       mp->timer_armed = false;
                        call_rcu_bh(&mp->rcu, br_multicast_free_group);
                }
        }
index f87736270eaa875ba0060048a38cfabacfd29444..878f008afefac63bd102f948986d2f89e3fd446a 100644 (file)
@@ -619,7 +619,7 @@ bad:
 
 /* Replicate the checks that IPv6 does on packet reception and pass the packet
  * to ip6tables, which doesn't support NAT, so things are fairly simple. */
-static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
+static unsigned int br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops,
                                           struct sk_buff *skb,
                                           const struct net_device *in,
                                           const struct net_device *out,
@@ -669,7 +669,8 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
  * receiving device) to make netfilter happy, the REDIRECT
  * target in particular.  Save the original destination IP
  * address to be able to detect DNAT afterwards. */
-static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
+static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
+                                     struct sk_buff *skb,
                                      const struct net_device *in,
                                      const struct net_device *out,
                                      int (*okfn)(struct sk_buff *))
@@ -691,7 +692,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
                        return NF_ACCEPT;
 
                nf_bridge_pull_encap_header_rcsum(skb);
-               return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
+               return br_nf_pre_routing_ipv6(ops, skb, in, out, okfn);
        }
 
        if (!brnf_call_iptables && !br->nf_call_iptables)
@@ -727,7 +728,8 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
  * took place when the packet entered the bridge), but we
  * register an IPv4 PRE_ROUTING 'sabotage' hook that will
  * prevent this from happening. */
-static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
+static unsigned int br_nf_local_in(const struct nf_hook_ops *ops,
+                                  struct sk_buff *skb,
                                   const struct net_device *in,
                                   const struct net_device *out,
                                   int (*okfn)(struct sk_buff *))
@@ -765,7 +767,8 @@ static int br_nf_forward_finish(struct sk_buff *skb)
  * but we are still able to filter on the 'real' indev/outdev
  * because of the physdev module. For ARP, indev and outdev are the
  * bridge ports. */
-static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
+static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
+                                    struct sk_buff *skb,
                                     const struct net_device *in,
                                     const struct net_device *out,
                                     int (*okfn)(struct sk_buff *))
@@ -818,7 +821,8 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
        return NF_STOLEN;
 }
 
-static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
+static unsigned int br_nf_forward_arp(const struct nf_hook_ops *ops,
+                                     struct sk_buff *skb,
                                      const struct net_device *in,
                                      const struct net_device *out,
                                      int (*okfn)(struct sk_buff *))
@@ -878,7 +882,8 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb)
 #endif
 
 /* PF_BRIDGE/POST_ROUTING ********************************************/
-static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
+static unsigned int br_nf_post_routing(const struct nf_hook_ops *ops,
+                                      struct sk_buff *skb,
                                       const struct net_device *in,
                                       const struct net_device *out,
                                       int (*okfn)(struct sk_buff *))
@@ -923,7 +928,8 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
 /* IP/SABOTAGE *****************************************************/
 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
  * for the second time. */
-static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff *skb,
+static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops,
+                                  struct sk_buff *skb,
                                   const struct net_device *in,
                                   const struct net_device *out,
                                   int (*okfn)(struct sk_buff *))
index e74ddc1c29a8be20f1a093fcc8e27b11bca6f03e..f75d92e4f96b33cec6fd46fc37151224f86ed018 100644 (file)
@@ -243,7 +243,7 @@ static int br_afspec(struct net_bridge *br,
 
                vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
 
-               if (vinfo->vid >= VLAN_N_VID)
+               if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
                        return -EINVAL;
 
                switch (cmd) {
index efb57d91156975b3b43a2afdc588128a2f6e1f79..d1ca6d9566332055591b96979805748a4c76e6c5 100644 (file)
@@ -126,7 +126,6 @@ struct net_bridge_mdb_entry
        struct timer_list               timer;
        struct br_ip                    addr;
        bool                            mglist;
-       bool                            timer_armed;
 };
 
 struct net_bridge_mdb_htable
@@ -344,10 +343,9 @@ static inline int br_is_root_bridge(const struct net_bridge *br)
 }
 
 /* br_device.c */
-extern void br_dev_setup(struct net_device *dev);
-extern void br_dev_delete(struct net_device *dev, struct list_head *list);
-extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
-                              struct net_device *dev);
+void br_dev_setup(struct net_device *dev);
+void br_dev_delete(struct net_device *dev, struct list_head *list);
+netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev);
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
                                       struct sk_buff *skb)
@@ -358,8 +356,8 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
                netpoll_send_skb(np, skb);
 }
 
-extern int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp);
-extern void br_netpoll_disable(struct net_bridge_port *p);
+int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp);
+void br_netpoll_disable(struct net_bridge_port *p);
 #else
 static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
                                       struct sk_buff *skb)
@@ -377,116 +375,99 @@ static inline void br_netpoll_disable(struct net_bridge_port *p)
 #endif
 
 /* br_fdb.c */
-extern int br_fdb_init(void);
-extern void br_fdb_fini(void);
-extern void br_fdb_flush(struct net_bridge *br);
-extern void br_fdb_changeaddr(struct net_bridge_port *p,
-                             const unsigned char *newaddr);
-extern void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
-extern void br_fdb_cleanup(unsigned long arg);
-extern void br_fdb_delete_by_port(struct net_bridge *br,
-                                 const struct net_bridge_port *p, int do_all);
-extern struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
-                                                const unsigned char *addr,
-                                                __u16 vid);
-extern int br_fdb_test_addr(struct net_device *dev, unsigned char *addr);
-extern int br_fdb_fillbuf(struct net_bridge *br, void *buf,
-                         unsigned long count, unsigned long off);
-extern int br_fdb_insert(struct net_bridge *br,
-                        struct net_bridge_port *source,
-                        const unsigned char *addr,
-                        u16 vid);
-extern void br_fdb_update(struct net_bridge *br,
-                         struct net_bridge_port *source,
-                         const unsigned char *addr,
-                         u16 vid);
-extern int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vid);
-
-extern int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
-                        struct net_device *dev,
-                        const unsigned char *addr);
-extern int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[],
-                     struct net_device *dev,
-                     const unsigned char *addr,
-                     u16 nlh_flags);
-extern int br_fdb_dump(struct sk_buff *skb,
-                      struct netlink_callback *cb,
-                      struct net_device *dev,
-                      int idx);
+int br_fdb_init(void);
+void br_fdb_fini(void);
+void br_fdb_flush(struct net_bridge *br);
+void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr);
+void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
+void br_fdb_cleanup(unsigned long arg);
+void br_fdb_delete_by_port(struct net_bridge *br,
+                          const struct net_bridge_port *p, int do_all);
+struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
+                                         const unsigned char *addr, __u16 vid);
+int br_fdb_test_addr(struct net_device *dev, unsigned char *addr);
+int br_fdb_fillbuf(struct net_bridge *br, void *buf, unsigned long count,
+                  unsigned long off);
+int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
+                 const unsigned char *addr, u16 vid);
+void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
+                  const unsigned char *addr, u16 vid);
+int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vid);
+
+int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
+                 struct net_device *dev, const unsigned char *addr);
+int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
+              const unsigned char *addr, u16 nlh_flags);
+int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
+               struct net_device *dev, int idx);
 
 /* br_forward.c */
-extern void br_deliver(const struct net_bridge_port *to,
-               struct sk_buff *skb);
-extern int br_dev_queue_push_xmit(struct sk_buff *skb);
-extern void br_forward(const struct net_bridge_port *to,
+void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb);
+int br_dev_queue_push_xmit(struct sk_buff *skb);
+void br_forward(const struct net_bridge_port *to,
                struct sk_buff *skb, struct sk_buff *skb0);
-extern int br_forward_finish(struct sk_buff *skb);
-extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb,
-                            bool unicast);
-extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
-                            struct sk_buff *skb2, bool unicast);
+int br_forward_finish(struct sk_buff *skb);
+void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast);
+void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
+                     struct sk_buff *skb2, bool unicast);
 
 /* br_if.c */
-extern void br_port_carrier_check(struct net_bridge_port *p);
-extern int br_add_bridge(struct net *net, const char *name);
-extern int br_del_bridge(struct net *net, const char *name);
-extern void br_net_exit(struct net *net);
-extern int br_add_if(struct net_bridge *br,
-             struct net_device *dev);
-extern int br_del_if(struct net_bridge *br,
-             struct net_device *dev);
-extern int br_min_mtu(const struct net_bridge *br);
-extern netdev_features_t br_features_recompute(struct net_bridge *br,
-       netdev_features_t features);
+void br_port_carrier_check(struct net_bridge_port *p);
+int br_add_bridge(struct net *net, const char *name);
+int br_del_bridge(struct net *net, const char *name);
+void br_net_exit(struct net *net);
+int br_add_if(struct net_bridge *br, struct net_device *dev);
+int br_del_if(struct net_bridge *br, struct net_device *dev);
+int br_min_mtu(const struct net_bridge *br);
+netdev_features_t br_features_recompute(struct net_bridge *br,
+                                       netdev_features_t features);
 
 /* br_input.c */
-extern int br_handle_frame_finish(struct sk_buff *skb);
-extern rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
+int br_handle_frame_finish(struct sk_buff *skb);
+rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
 
 /* br_ioctl.c */
-extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-extern int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *arg);
+int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd,
+                            void __user *arg);
 
 /* br_multicast.c */
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
 extern unsigned int br_mdb_rehash_seq;
-extern int br_multicast_rcv(struct net_bridge *br,
-                           struct net_bridge_port *port,
-                           struct sk_buff *skb);
-extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
-                                              struct sk_buff *skb, u16 vid);
-extern void br_multicast_add_port(struct net_bridge_port *port);
-extern void br_multicast_del_port(struct net_bridge_port *port);
-extern void br_multicast_enable_port(struct net_bridge_port *port);
-extern void br_multicast_disable_port(struct net_bridge_port *port);
-extern void br_multicast_init(struct net_bridge *br);
-extern void br_multicast_open(struct net_bridge *br);
-extern void br_multicast_stop(struct net_bridge *br);
-extern void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
-                                struct sk_buff *skb);
-extern void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
-                                struct sk_buff *skb, struct sk_buff *skb2);
-extern int br_multicast_set_router(struct net_bridge *br, unsigned long val);
-extern int br_multicast_set_port_router(struct net_bridge_port *p,
-                                       unsigned long val);
-extern int br_multicast_toggle(struct net_bridge *br, unsigned long val);
-extern int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
-extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
-extern struct net_bridge_mdb_entry *br_mdb_ip_get(
-                               struct net_bridge_mdb_htable *mdb,
-                               struct br_ip *dst);
-extern struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
-                               struct net_bridge_port *port, struct br_ip *group);
-extern void br_multicast_free_pg(struct rcu_head *head);
-extern struct net_bridge_port_group *br_multicast_new_port_group(
-                               struct net_bridge_port *port,
-                               struct br_ip *group,
-                               struct net_bridge_port_group __rcu *next,
-                               unsigned char state);
-extern void br_mdb_init(void);
-extern void br_mdb_uninit(void);
-extern void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
-                         struct br_ip *group, int type);
+int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
+                    struct sk_buff *skb);
+struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
+                                       struct sk_buff *skb, u16 vid);
+void br_multicast_add_port(struct net_bridge_port *port);
+void br_multicast_del_port(struct net_bridge_port *port);
+void br_multicast_enable_port(struct net_bridge_port *port);
+void br_multicast_disable_port(struct net_bridge_port *port);
+void br_multicast_init(struct net_bridge *br);
+void br_multicast_open(struct net_bridge *br);
+void br_multicast_stop(struct net_bridge *br);
+void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
+                         struct sk_buff *skb);
+void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
+                         struct sk_buff *skb, struct sk_buff *skb2);
+int br_multicast_set_router(struct net_bridge *br, unsigned long val);
+int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val);
+int br_multicast_toggle(struct net_bridge *br, unsigned long val);
+int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
+int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
+struct net_bridge_mdb_entry *
+br_mdb_ip_get(struct net_bridge_mdb_htable *mdb, struct br_ip *dst);
+struct net_bridge_mdb_entry *
+br_multicast_new_group(struct net_bridge *br, struct net_bridge_port *port,
+                      struct br_ip *group);
+void br_multicast_free_pg(struct rcu_head *head);
+struct net_bridge_port_group *
+br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
+                           struct net_bridge_port_group __rcu *next,
+                           unsigned char state);
+void br_mdb_init(void);
+void br_mdb_uninit(void);
+void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
+                  struct br_ip *group, int type);
 
 #define mlock_dereference(X, br) \
        rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
@@ -591,22 +572,21 @@ static inline void br_mdb_uninit(void)
 
 /* br_vlan.c */
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
-extern bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
-                              struct sk_buff *skb, u16 *vid);
-extern bool br_allowed_egress(struct net_bridge *br,
-                             const struct net_port_vlans *v,
-                             const struct sk_buff *skb);
-extern struct sk_buff *br_handle_vlan(struct net_bridge *br,
-                                     const struct net_port_vlans *v,
-                                     struct sk_buff *skb);
-extern int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
-extern int br_vlan_delete(struct net_bridge *br, u16 vid);
-extern void br_vlan_flush(struct net_bridge *br);
-extern int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
-extern int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
-extern int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
-extern void nbp_vlan_flush(struct net_bridge_port *port);
-extern bool nbp_vlan_find(struct net_bridge_port *port, u16 vid);
+bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
+                       struct sk_buff *skb, u16 *vid);
+bool br_allowed_egress(struct net_bridge *br, const struct net_port_vlans *v,
+                      const struct sk_buff *skb);
+struct sk_buff *br_handle_vlan(struct net_bridge *br,
+                              const struct net_port_vlans *v,
+                              struct sk_buff *skb);
+int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
+int br_vlan_delete(struct net_bridge *br, u16 vid);
+void br_vlan_flush(struct net_bridge *br);
+int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
+int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
+int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
+void nbp_vlan_flush(struct net_bridge_port *port);
+bool nbp_vlan_find(struct net_bridge_port *port, u16 vid);
 
 static inline struct net_port_vlans *br_get_vlan_info(
                                                const struct net_bridge *br)
@@ -643,9 +623,7 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v)
         * vid wasn't set
         */
        smp_rmb();
-       return (v->pvid & VLAN_TAG_PRESENT) ?
-                       (v->pvid & ~VLAN_TAG_PRESENT) :
-                       VLAN_N_VID;
+       return v->pvid ?: VLAN_N_VID;
 }
 
 #else
@@ -727,9 +705,9 @@ static inline u16 br_get_pvid(const struct net_port_vlans *v)
 
 /* br_netfilter.c */
 #ifdef CONFIG_BRIDGE_NETFILTER
-extern int br_netfilter_init(void);
-extern void br_netfilter_fini(void);
-extern void br_netfilter_rtable_init(struct net_bridge *);
+int br_netfilter_init(void);
+void br_netfilter_fini(void);
+void br_netfilter_rtable_init(struct net_bridge *);
 #else
 #define br_netfilter_init()    (0)
 #define br_netfilter_fini()    do { } while(0)
@@ -737,43 +715,39 @@ extern void br_netfilter_rtable_init(struct net_bridge *);
 #endif
 
 /* br_stp.c */
-extern void br_log_state(const struct net_bridge_port *p);
-extern struct net_bridge_port *br_get_port(struct net_bridge *br,
-                                          u16 port_no);
-extern void br_init_port(struct net_bridge_port *p);
-extern void br_become_designated_port(struct net_bridge_port *p);
+void br_log_state(const struct net_bridge_port *p);
+struct net_bridge_port *br_get_port(struct net_bridge *br, u16 port_no);
+void br_init_port(struct net_bridge_port *p);
+void br_become_designated_port(struct net_bridge_port *p);
 
-extern void __br_set_forward_delay(struct net_bridge *br, unsigned long t);
-extern int br_set_forward_delay(struct net_bridge *br, unsigned long x);
-extern int br_set_hello_time(struct net_bridge *br, unsigned long x);
-extern int br_set_max_age(struct net_bridge *br, unsigned long x);
+void __br_set_forward_delay(struct net_bridge *br, unsigned long t);
+int br_set_forward_delay(struct net_bridge *br, unsigned long x);
+int br_set_hello_time(struct net_bridge *br, unsigned long x);
+int br_set_max_age(struct net_bridge *br, unsigned long x);
 
 
 /* br_stp_if.c */
-extern void br_stp_enable_bridge(struct net_bridge *br);
-extern void br_stp_disable_bridge(struct net_bridge *br);
-extern void br_stp_set_enabled(struct net_bridge *br, unsigned long val);
-extern void br_stp_enable_port(struct net_bridge_port *p);
-extern void br_stp_disable_port(struct net_bridge_port *p);
-extern bool br_stp_recalculate_bridge_id(struct net_bridge *br);
-extern void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a);
-extern void br_stp_set_bridge_priority(struct net_bridge *br,
-                                      u16 newprio);
-extern int br_stp_set_port_priority(struct net_bridge_port *p,
-                                   unsigned long newprio);
-extern int br_stp_set_path_cost(struct net_bridge_port *p,
-                               unsigned long path_cost);
-extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id);
+void br_stp_enable_bridge(struct net_bridge *br);
+void br_stp_disable_bridge(struct net_bridge *br);
+void br_stp_set_enabled(struct net_bridge *br, unsigned long val);
+void br_stp_enable_port(struct net_bridge_port *p);
+void br_stp_disable_port(struct net_bridge_port *p);
+bool br_stp_recalculate_bridge_id(struct net_bridge *br);
+void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a);
+void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio);
+int br_stp_set_port_priority(struct net_bridge_port *p, unsigned long newprio);
+int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost);
+ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id);
 
 /* br_stp_bpdu.c */
 struct stp_proto;
-extern void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
-                      struct net_device *dev);
+void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
+               struct net_device *dev);
 
 /* br_stp_timer.c */
-extern void br_stp_timer_init(struct net_bridge *br);
-extern void br_stp_port_timer_init(struct net_bridge_port *p);
-extern unsigned long br_timer_value(const struct timer_list *timer);
+void br_stp_timer_init(struct net_bridge *br);
+void br_stp_port_timer_init(struct net_bridge_port *p);
+unsigned long br_timer_value(const struct timer_list *timer);
 
 /* br.c */
 #if IS_ENABLED(CONFIG_ATM_LANE)
@@ -782,23 +756,23 @@ extern int (*br_fdb_test_addr_hook)(struct net_device *dev, unsigned char *addr)
 
 /* br_netlink.c */
 extern struct rtnl_link_ops br_link_ops;
-extern int br_netlink_init(void);
-extern void br_netlink_fini(void);
-extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
-extern int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg);
-extern int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg);
-extern int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-                     struct net_device *dev, u32 filter_mask);
+int br_netlink_init(void);
+void br_netlink_fini(void);
+void br_ifinfo_notify(int event, struct net_bridge_port *port);
+int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg);
+int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg);
+int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev,
+              u32 filter_mask);
 
 #ifdef CONFIG_SYSFS
 /* br_sysfs_if.c */
 extern const struct sysfs_ops brport_sysfs_ops;
-extern int br_sysfs_addif(struct net_bridge_port *p);
-extern int br_sysfs_renameif(struct net_bridge_port *p);
+int br_sysfs_addif(struct net_bridge_port *p);
+int br_sysfs_renameif(struct net_bridge_port *p);
 
 /* br_sysfs_br.c */
-extern int br_sysfs_addbr(struct net_device *dev);
-extern void br_sysfs_delbr(struct net_device *dev);
+int br_sysfs_addbr(struct net_device *dev);
+void br_sysfs_delbr(struct net_device *dev);
 
 #else
 
index 0c0fe36e7aa92eebd67e3e91d07c440a5310090b..2fe910c4e1700776c33275f0ae277d354e348f6c 100644 (file)
@@ -51,19 +51,19 @@ static inline int br_is_designated_port(const struct net_bridge_port *p)
 
 
 /* br_stp.c */
-extern void br_become_root_bridge(struct net_bridge *br);
-extern void br_config_bpdu_generation(struct net_bridge *);
-extern void br_configuration_update(struct net_bridge *);
-extern void br_port_state_selection(struct net_bridge *);
-extern void br_received_config_bpdu(struct net_bridge_port *p,
-                                   const struct br_config_bpdu *bpdu);
-extern void br_received_tcn_bpdu(struct net_bridge_port *p);
-extern void br_transmit_config(struct net_bridge_port *p);
-extern void br_transmit_tcn(struct net_bridge *br);
-extern void br_topology_change_detection(struct net_bridge *br);
+void br_become_root_bridge(struct net_bridge *br);
+void br_config_bpdu_generation(struct net_bridge *);
+void br_configuration_update(struct net_bridge *);
+void br_port_state_selection(struct net_bridge *);
+void br_received_config_bpdu(struct net_bridge_port *p,
+                            const struct br_config_bpdu *bpdu);
+void br_received_tcn_bpdu(struct net_bridge_port *p);
+void br_transmit_config(struct net_bridge_port *p);
+void br_transmit_tcn(struct net_bridge *br);
+void br_topology_change_detection(struct net_bridge *br);
 
 /* br_stp_bpdu.c */
-extern void br_send_config_bpdu(struct net_bridge_port *, struct br_config_bpdu *);
-extern void br_send_tcn_bpdu(struct net_bridge_port *);
+void br_send_config_bpdu(struct net_bridge_port *, struct br_config_bpdu *);
+void br_send_tcn_bpdu(struct net_bridge_port *);
 
 #endif
index 108084a0467160e30eb001cd2dbb326fdc4dadd3..656a6f3e40de1b13b9ea7a89373da5d5615e5bb2 100644 (file)
@@ -134,7 +134,7 @@ static void br_stp_start(struct net_bridge *br)
 
        if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY)
                __br_set_forward_delay(br, BR_MIN_FORWARD_DELAY);
-       else if (br->bridge_forward_delay < BR_MAX_FORWARD_DELAY)
+       else if (br->bridge_forward_delay > BR_MAX_FORWARD_DELAY)
                __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY);
 
        if (r == 0) {
index 9a9ffe7e4019741d75456e3b9afdba21c44785b3..53f0990eab58e08a3da9160a27ee0834bd0f0272 100644 (file)
@@ -45,37 +45,34 @@ static int __vlan_add(struct net_port_vlans *v, u16 vid, u16 flags)
                return 0;
        }
 
-       if (vid) {
-               if (v->port_idx) {
-                       p = v->parent.port;
-                       br = p->br;
-                       dev = p->dev;
-               } else {
-                       br = v->parent.br;
-                       dev = br->dev;
-               }
-               ops = dev->netdev_ops;
-
-               if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
-                       /* Add VLAN to the device filter if it is supported.
-                        * Stricly speaking, this is not necessary now, since
-                        * devices are made promiscuous by the bridge, but if
-                        * that ever changes this code will allow tagged
-                        * traffic to enter the bridge.
-                        */
-                       err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q),
-                                                      vid);
-                       if (err)
-                               return err;
-               }
-
-               err = br_fdb_insert(br, p, dev->dev_addr, vid);
-               if (err) {
-                       br_err(br, "failed insert local address into bridge "
-                              "forwarding table\n");
-                       goto out_filt;
-               }
+       if (v->port_idx) {
+               p = v->parent.port;
+               br = p->br;
+               dev = p->dev;
+       } else {
+               br = v->parent.br;
+               dev = br->dev;
+       }
+       ops = dev->netdev_ops;
+
+       if (p && (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
+               /* Add VLAN to the device filter if it is supported.
+                * Stricly speaking, this is not necessary now, since
+                * devices are made promiscuous by the bridge, but if
+                * that ever changes this code will allow tagged
+                * traffic to enter the bridge.
+                */
+               err = ops->ndo_vlan_rx_add_vid(dev, htons(ETH_P_8021Q),
+                                              vid);
+               if (err)
+                       return err;
+       }
 
+       err = br_fdb_insert(br, p, dev->dev_addr, vid);
+       if (err) {
+               br_err(br, "failed insert local address into bridge "
+                      "forwarding table\n");
+               goto out_filt;
        }
 
        set_bit(vid, v->vlan_bitmap);
@@ -98,7 +95,7 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
        __vlan_delete_pvid(v, vid);
        clear_bit(vid, v->untagged_bitmap);
 
-       if (v->port_idx && vid) {
+       if (v->port_idx) {
                struct net_device *dev = v->parent.port->dev;
                const struct net_device_ops *ops = dev->netdev_ops;
 
@@ -192,6 +189,8 @@ out:
 bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
                        struct sk_buff *skb, u16 *vid)
 {
+       int err;
+
        /* If VLAN filtering is disabled on the bridge, all packets are
         * permitted.
         */
@@ -204,20 +203,32 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
        if (!v)
                return false;
 
-       if (br_vlan_get_tag(skb, vid)) {
+       err = br_vlan_get_tag(skb, vid);
+       if (!*vid) {
                u16 pvid = br_get_pvid(v);
 
-               /* Frame did not have a tag.  See if pvid is set
-                * on this port.  That tells us which vlan untagged
-                * traffic belongs to.
+               /* Frame had a tag with VID 0 or did not have a tag.
+                * See if pvid is set on this port.  That tells us which
+                * vlan untagged or priority-tagged traffic belongs to.
                 */
                if (pvid == VLAN_N_VID)
                        return false;
 
-               /* PVID is set on this port.  Any untagged ingress
-                * frame is considered to belong to this vlan.
+               /* PVID is set on this port.  Any untagged or priority-tagged
+                * ingress frame is considered to belong to this vlan.
                 */
-               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid);
+               *vid = pvid;
+               if (likely(err))
+                       /* Untagged Frame. */
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), pvid);
+               else
+                       /* Priority-tagged Frame.
+                        * At this point, We know that skb->vlan_tci had
+                        * VLAN_TAG_PRESENT bit and its VID field was 0x000.
+                        * We update only VID field and preserve PCP field.
+                        */
+                       skb->vlan_tci |= pvid;
+
                return true;
        }
 
@@ -248,7 +259,9 @@ bool br_allowed_egress(struct net_bridge *br,
        return false;
 }
 
-/* Must be protected by RTNL */
+/* Must be protected by RTNL.
+ * Must be called with vid in range from 1 to 4094 inclusive.
+ */
 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
 {
        struct net_port_vlans *pv = NULL;
@@ -278,7 +291,9 @@ out:
        return err;
 }
 
-/* Must be protected by RTNL */
+/* Must be protected by RTNL.
+ * Must be called with vid in range from 1 to 4094 inclusive.
+ */
 int br_vlan_delete(struct net_bridge *br, u16 vid)
 {
        struct net_port_vlans *pv;
@@ -289,14 +304,9 @@ int br_vlan_delete(struct net_bridge *br, u16 vid)
        if (!pv)
                return -EINVAL;
 
-       if (vid) {
-               /* If the VID !=0 remove fdb for this vid. VID 0 is special
-                * in that it's the default and is always there in the fdb.
-                */
-               spin_lock_bh(&br->hash_lock);
-               fdb_delete_by_addr(br, br->dev->dev_addr, vid);
-               spin_unlock_bh(&br->hash_lock);
-       }
+       spin_lock_bh(&br->hash_lock);
+       fdb_delete_by_addr(br, br->dev->dev_addr, vid);
+       spin_unlock_bh(&br->hash_lock);
 
        __vlan_del(pv, vid);
        return 0;
@@ -329,7 +339,9 @@ unlock:
        return 0;
 }
 
-/* Must be protected by RTNL */
+/* Must be protected by RTNL.
+ * Must be called with vid in range from 1 to 4094 inclusive.
+ */
 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
 {
        struct net_port_vlans *pv = NULL;
@@ -363,7 +375,9 @@ clean_up:
        return err;
 }
 
-/* Must be protected by RTNL */
+/* Must be protected by RTNL.
+ * Must be called with vid in range from 1 to 4094 inclusive.
+ */
 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
 {
        struct net_port_vlans *pv;
@@ -374,14 +388,9 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
        if (!pv)
                return -EINVAL;
 
-       if (vid) {
-               /* If the VID !=0 remove fdb for this vid. VID 0 is special
-                * in that it's the default and is always there in the fdb.
-                */
-               spin_lock_bh(&port->br->hash_lock);
-               fdb_delete_by_addr(port->br, port->dev->dev_addr, vid);
-               spin_unlock_bh(&port->br->hash_lock);
-       }
+       spin_lock_bh(&port->br->hash_lock);
+       fdb_delete_by_addr(port->br, port->dev->dev_addr, vid);
+       spin_unlock_bh(&port->br->hash_lock);
 
        return __vlan_del(pv, vid);
 }
index a9aff9c7d0273b2a41ef9d122ac38e769956a640..68f8128147be7c9eb2ae6ab9593d999308eea755 100644 (file)
@@ -1,6 +1,9 @@
 #
 # Bridge netfilter configuration
 #
+#
+config NF_TABLES_BRIDGE
+       tristate "Ethernet Bridge nf_tables support"
 
 menuconfig BRIDGE_NF_EBTABLES
        tristate "Ethernet Bridge tables (ebtables) support"
index 0718699540b023fd6d95be217b79e4bfe5029821..ea7629f58b3d1c44e28524df8a0937de3a18546b 100644 (file)
@@ -2,6 +2,8 @@
 # Makefile for the netfilter modules for Link Layer filtering on a bridge.
 #
 
+obj-$(CONFIG_NF_TABLES_BRIDGE) += nf_tables_bridge.o
+
 obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o
 
 # tables
index 8b84c581be3082ea4c8a6a21a362a3077ab17751..3fb3c848affef74249a1fd9ed610aea7d1db5764 100644 (file)
@@ -28,7 +28,7 @@ static bool ebt_mac_wormhash_contains(const struct ebt_mac_wormhash *wh,
        uint32_t cmp[2] = { 0, 0 };
        int key = ((const unsigned char *)mac)[5];
 
-       memcpy(((char *) cmp) + 2, mac, 6);
+       memcpy(((char *) cmp) + 2, mac, ETH_ALEN);
        start = wh->table[key];
        limit = wh->table[key + 1];
        if (ip) {
index 518093802d1d640f3642e997b1481279fad9b68b..7c470c371e14f82a9734d3da4d0bb0a177169251 100644 (file)
@@ -181,6 +181,7 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
        ub->qlen++;
 
        pm = nlmsg_data(nlh);
+       memset(pm, 0, sizeof(*pm));
 
        /* Fill in the ulog data */
        pm->version = EBT_ULOG_VERSION;
@@ -193,8 +194,6 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
        pm->hook = hooknr;
        if (uloginfo->prefix != NULL)
                strcpy(pm->prefix, uloginfo->prefix);
-       else
-               *(pm->prefix) = '\0';
 
        if (in) {
                strcpy(pm->physindev, in->name);
@@ -204,16 +203,14 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
                        strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name);
                else
                        strcpy(pm->indev, in->name);
-       } else
-               pm->indev[0] = pm->physindev[0] = '\0';
+       }
 
        if (out) {
                /* If out exists, then out is a bridge port */
                strcpy(pm->physoutdev, out->name);
                /* rcu_read_lock()ed by nf_hook_slow */
                strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name);
-       } else
-               pm->outdev[0] = pm->physoutdev[0] = '\0';
+       }
 
        if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0)
                BUG();
index 94b2b700cff8d0de28058497f7b863ce685633a2..bb2da7b706e7214f05fb7d56f87142e5ed5a5667 100644 (file)
@@ -60,17 +60,21 @@ static const struct ebt_table frame_filter =
 };
 
 static unsigned int
-ebt_in_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in,
-   const struct net_device *out, int (*okfn)(struct sk_buff *))
+ebt_in_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
+           const struct net_device *in, const struct net_device *out,
+           int (*okfn)(struct sk_buff *))
 {
-       return ebt_do_table(hook, skb, in, out, dev_net(in)->xt.frame_filter);
+       return ebt_do_table(ops->hooknum, skb, in, out,
+                           dev_net(in)->xt.frame_filter);
 }
 
 static unsigned int
-ebt_out_hook(unsigned int hook, struct sk_buff *skb, const struct net_device *in,
-   const struct net_device *out, int (*okfn)(struct sk_buff *))
+ebt_out_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
+            const struct net_device *in, const struct net_device *out,
+            int (*okfn)(struct sk_buff *))
 {
-       return ebt_do_table(hook, skb, in, out, dev_net(out)->xt.frame_filter);
+       return ebt_do_table(ops->hooknum, skb, in, out,
+                           dev_net(out)->xt.frame_filter);
 }
 
 static struct nf_hook_ops ebt_ops_filter[] __read_mostly = {
index 322555acdd4009aaa677147f7a33e558d82a67cc..bd238f1f105b94ac5f8db9982b7738e255f42630 100644 (file)
@@ -60,17 +60,21 @@ static struct ebt_table frame_nat =
 };
 
 static unsigned int
-ebt_nat_in(unsigned int hook, struct sk_buff *skb, const struct net_device *in
-   , const struct net_device *out, int (*okfn)(struct sk_buff *))
+ebt_nat_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
+          const struct net_device *in, const struct net_device *out,
+          int (*okfn)(struct sk_buff *))
 {
-       return ebt_do_table(hook, skb, in, out, dev_net(in)->xt.frame_nat);
+       return ebt_do_table(ops->hooknum, skb, in, out,
+                           dev_net(in)->xt.frame_nat);
 }
 
 static unsigned int
-ebt_nat_out(unsigned int hook, struct sk_buff *skb, const struct net_device *in
-   , const struct net_device *out, int (*okfn)(struct sk_buff *))
+ebt_nat_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
+           const struct net_device *in, const struct net_device *out,
+           int (*okfn)(struct sk_buff *))
 {
-       return ebt_do_table(hook, skb, in, out, dev_net(out)->xt.frame_nat);
+       return ebt_do_table(ops->hooknum, skb, in, out,
+                           dev_net(out)->xt.frame_nat);
 }
 
 static struct nf_hook_ops ebt_ops_nat[] __read_mostly = {
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c
new file mode 100644 (file)
index 0000000..e8cb016
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netfilter_bridge.h>
+#include <net/netfilter/nf_tables.h>
+
+static struct nft_af_info nft_af_bridge __read_mostly = {
+       .family         = NFPROTO_BRIDGE,
+       .nhooks         = NF_BR_NUMHOOKS,
+       .owner          = THIS_MODULE,
+};
+
+static int nf_tables_bridge_init_net(struct net *net)
+{
+       net->nft.bridge = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+       if (net->nft.bridge == NULL)
+               return -ENOMEM;
+
+       memcpy(net->nft.bridge, &nft_af_bridge, sizeof(nft_af_bridge));
+
+       if (nft_register_afinfo(net, net->nft.bridge) < 0)
+               goto err;
+
+       return 0;
+err:
+       kfree(net->nft.bridge);
+       return -ENOMEM;
+}
+
+static void nf_tables_bridge_exit_net(struct net *net)
+{
+       nft_unregister_afinfo(net->nft.bridge);
+       kfree(net->nft.bridge);
+}
+
+static struct pernet_operations nf_tables_bridge_net_ops = {
+       .init   = nf_tables_bridge_init_net,
+       .exit   = nf_tables_bridge_exit_net,
+};
+
+static int __init nf_tables_bridge_init(void)
+{
+       return register_pernet_subsys(&nf_tables_bridge_net_ops);
+}
+
+static void __exit nf_tables_bridge_exit(void)
+{
+       return unregister_pernet_subsys(&nf_tables_bridge_net_ops);
+}
+
+module_init(nf_tables_bridge_init);
+module_exit(nf_tables_bridge_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_FAMILY(AF_BRIDGE);
index 1dccb4c33894af7074ee85e320e32c035dbfa368..6de58b40535cc309e59f1a61ed624c930b7c7ffe 100644 (file)
@@ -108,9 +108,9 @@ struct s_pstats {
 extern struct dev_rcv_lists can_rx_alldev_list;
 
 /* function prototypes for the CAN networklayer procfs (proc.c) */
-extern void can_init_proc(void);
-extern void can_remove_proc(void);
-extern void can_stat_update(unsigned long data);
+void can_init_proc(void);
+void can_remove_proc(void);
+void can_stat_update(unsigned long data);
 
 /* structures and variables from af_can.c needed in proc.c for reading */
 extern struct timer_list can_stattimer;    /* timer for statistics update */
index ed7d088b1bc92e14372e2f24c8f5016750e07014..059a3ce4b53f48425336bf4b792d2ac6a5c6e849 100644 (file)
@@ -23,7 +23,7 @@ struct ceph_auth_none_info {
        struct ceph_none_authorizer au;   /* we only need one; it's static */
 };
 
-extern int ceph_auth_none_init(struct ceph_auth_client *ac);
+int ceph_auth_none_init(struct ceph_auth_client *ac);
 
 #endif
 
index c5a058da7ac8e7ae6a51f695b2a9d1b9362cdab3..65ee72082d99866d0f98057f15c4f181b21d971c 100644 (file)
@@ -45,7 +45,7 @@ struct ceph_x_info {
        struct ceph_x_authorizer auth_authorizer;
 };
 
-extern int ceph_x_init(struct ceph_auth_client *ac);
+int ceph_x_init(struct ceph_auth_client *ac);
 
 #endif
 
index 3572dc518bc984aece38fb8bd9f8cbe7b1b5c239..d1498224c49d4c6c18e082ea908cb3e5a8fed30c 100644 (file)
@@ -20,34 +20,32 @@ static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
                kfree(key->key);
 }
 
-extern int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
-                                const struct ceph_crypto_key *src);
-extern int ceph_crypto_key_encode(struct ceph_crypto_key *key,
-                                 void **p, void *end);
-extern int ceph_crypto_key_decode(struct ceph_crypto_key *key,
-                                 void **p, void *end);
-extern int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *in);
+int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
+                         const struct ceph_crypto_key *src);
+int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end);
+int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end);
+int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *in);
 
 /* crypto.c */
-extern int ceph_decrypt(struct ceph_crypto_key *secret,
-                       void *dst, size_t *dst_len,
-                       const void *src, size_t src_len);
-extern int ceph_encrypt(struct ceph_crypto_key *secret,
-                       void *dst, size_t *dst_len,
-                       const void *src, size_t src_len);
-extern int ceph_decrypt2(struct ceph_crypto_key *secret,
-                       void *dst1, size_t *dst1_len,
-                       void *dst2, size_t *dst2_len,
-                       const void *src, size_t src_len);
-extern int ceph_encrypt2(struct ceph_crypto_key *secret,
-                        void *dst, size_t *dst_len,
-                        const void *src1, size_t src1_len,
-                        const void *src2, size_t src2_len);
-extern int ceph_crypto_init(void);
-extern void ceph_crypto_shutdown(void);
+int ceph_decrypt(struct ceph_crypto_key *secret,
+                void *dst, size_t *dst_len,
+                const void *src, size_t src_len);
+int ceph_encrypt(struct ceph_crypto_key *secret,
+                void *dst, size_t *dst_len,
+                const void *src, size_t src_len);
+int ceph_decrypt2(struct ceph_crypto_key *secret,
+                 void *dst1, size_t *dst1_len,
+                 void *dst2, size_t *dst2_len,
+                 const void *src, size_t src_len);
+int ceph_encrypt2(struct ceph_crypto_key *secret,
+                 void *dst, size_t *dst_len,
+                 const void *src1, size_t src1_len,
+                 const void *src2, size_t src2_len);
+int ceph_crypto_init(void);
+void ceph_crypto_shutdown(void);
 
 /* armor.c */
-extern int ceph_armor(char *dst, const char *src, const char *end);
-extern int ceph_unarmor(char *dst, const char *src, const char *end);
+int ceph_armor(char *dst, const char *src, const char *end);
+int ceph_unarmor(char *dst, const char *src, const char *end);
 
 #endif
index f0a1ba6c8086acc65a87e519fca48853a3bd091e..89032580bd1d8aa5ef29d1395661bdd0bcfe95f1 100644 (file)
@@ -71,6 +71,8 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
            __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
            __get_user(kmsg->msg_flags, &umsg->msg_flags))
                return -EFAULT;
+       if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+               return -EINVAL;
        kmsg->msg_name = compat_ptr(tmp1);
        kmsg->msg_iov = compat_ptr(tmp2);
        kmsg->msg_control = compat_ptr(tmp3);
index 65f829cfd928b3bda157b0848e13edfa5b118bae..0918aadc20fd2ce528ef284da6d6c87f5254f6a0 100644 (file)
@@ -1307,7 +1307,7 @@ static int __dev_close_many(struct list_head *head)
        ASSERT_RTNL();
        might_sleep();
 
-       list_for_each_entry(dev, head, unreg_list) {
+       list_for_each_entry(dev, head, close_list) {
                call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
 
                clear_bit(__LINK_STATE_START, &dev->state);
@@ -1323,7 +1323,7 @@ static int __dev_close_many(struct list_head *head)
 
        dev_deactivate_many(head);
 
-       list_for_each_entry(dev, head, unreg_list) {
+       list_for_each_entry(dev, head, close_list) {
                const struct net_device_ops *ops = dev->netdev_ops;
 
                /*
@@ -1351,7 +1351,7 @@ static int __dev_close(struct net_device *dev)
        /* Temporarily disable netpoll until the interface is down */
        netpoll_rx_disable(dev);
 
-       list_add(&dev->unreg_list, &single);
+       list_add(&dev->close_list, &single);
        retval = __dev_close_many(&single);
        list_del(&single);
 
@@ -1362,21 +1362,20 @@ static int __dev_close(struct net_device *dev)
 static int dev_close_many(struct list_head *head)
 {
        struct net_device *dev, *tmp;
-       LIST_HEAD(tmp_list);
 
-       list_for_each_entry_safe(dev, tmp, head, unreg_list)
+       /* Remove the devices that don't need to be closed */
+       list_for_each_entry_safe(dev, tmp, head, close_list)
                if (!(dev->flags & IFF_UP))
-                       list_move(&dev->unreg_list, &tmp_list);
+                       list_del_init(&dev->close_list);
 
        __dev_close_many(head);
 
-       list_for_each_entry(dev, head, unreg_list) {
+       list_for_each_entry_safe(dev, tmp, head, close_list) {
                rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
                call_netdevice_notifiers(NETDEV_DOWN, dev);
+               list_del_init(&dev->close_list);
        }
 
-       /* rollback_registered_many needs the complete original list */
-       list_splice(&tmp_list, head);
        return 0;
 }
 
@@ -1397,7 +1396,7 @@ int dev_close(struct net_device *dev)
                /* Block netpoll rx while the interface is going down */
                netpoll_rx_disable(dev);
 
-               list_add(&dev->unreg_list, &single);
+               list_add(&dev->close_list, &single);
                dev_close_many(&single);
                list_del(&single);
 
@@ -1917,7 +1916,8 @@ static struct xps_map *expand_xps_map(struct xps_map *map,
        return new_map;
 }
 
-int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
+int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
+                       u16 index)
 {
        struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
        struct xps_map *map, *new_map;
@@ -2377,6 +2377,8 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
        }
 
        SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
+       SKB_GSO_CB(skb)->encap_level = 0;
+
        skb_reset_mac_header(skb);
        skb_reset_mac_len(skb);
 
@@ -4373,42 +4375,40 @@ struct netdev_adjacent {
        /* upper master flag, there can only be one master device per list */
        bool master;
 
-       /* indicates that this dev is our first-level lower/upper device */
-       bool neighbour;
-
        /* counter for the number of times this device was added to us */
        u16 ref_nr;
 
+       /* private field for the users */
+       void *private;
+
        struct list_head list;
        struct rcu_head rcu;
 };
 
-static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
-                                                struct net_device *adj_dev,
-                                                bool upper)
+static struct netdev_adjacent *__netdev_find_adj_rcu(struct net_device *dev,
+                                                    struct net_device *adj_dev,
+                                                    struct list_head *adj_list)
 {
        struct netdev_adjacent *adj;
-       struct list_head *dev_list;
-
-       dev_list = upper ? &dev->upper_dev_list : &dev->lower_dev_list;
 
-       list_for_each_entry(adj, dev_list, list) {
+       list_for_each_entry_rcu(adj, adj_list, list) {
                if (adj->dev == adj_dev)
                        return adj;
        }
        return NULL;
 }
 
-static inline struct netdev_adjacent *__netdev_find_upper(struct net_device *dev,
-                                                         struct net_device *udev)
+static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
+                                                struct net_device *adj_dev,
+                                                struct list_head *adj_list)
 {
-       return __netdev_find_adj(dev, udev, true);
-}
+       struct netdev_adjacent *adj;
 
-static inline struct netdev_adjacent *__netdev_find_lower(struct net_device *dev,
-                                                         struct net_device *ldev)
-{
-       return __netdev_find_adj(dev, ldev, false);
+       list_for_each_entry(adj, adj_list, list) {
+               if (adj->dev == adj_dev)
+                       return adj;
+       }
+       return NULL;
 }
 
 /**
@@ -4425,7 +4425,7 @@ bool netdev_has_upper_dev(struct net_device *dev,
 {
        ASSERT_RTNL();
 
-       return __netdev_find_upper(dev, upper_dev);
+       return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
 }
 EXPORT_SYMBOL(netdev_has_upper_dev);
 
@@ -4440,7 +4440,7 @@ bool netdev_has_any_upper_dev(struct net_device *dev)
 {
        ASSERT_RTNL();
 
-       return !list_empty(&dev->upper_dev_list);
+       return !list_empty(&dev->all_adj_list.upper);
 }
 EXPORT_SYMBOL(netdev_has_any_upper_dev);
 
@@ -4457,10 +4457,10 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
 
        ASSERT_RTNL();
 
-       if (list_empty(&dev->upper_dev_list))
+       if (list_empty(&dev->adj_list.upper))
                return NULL;
 
-       upper = list_first_entry(&dev->upper_dev_list,
+       upper = list_first_entry(&dev->adj_list.upper,
                                 struct netdev_adjacent, list);
        if (likely(upper->master))
                return upper->dev;
@@ -4468,15 +4468,26 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
 }
 EXPORT_SYMBOL(netdev_master_upper_dev_get);
 
-/* netdev_upper_get_next_dev_rcu - Get the next dev from upper list
+void *netdev_adjacent_get_private(struct list_head *adj_list)
+{
+       struct netdev_adjacent *adj;
+
+       adj = list_entry(adj_list, struct netdev_adjacent, list);
+
+       return adj->private;
+}
+EXPORT_SYMBOL(netdev_adjacent_get_private);
+
+/**
+ * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
  * @dev: device
  * @iter: list_head ** of the current position
  *
  * Gets the next device from the dev's upper list, starting from iter
  * position. The caller must hold RCU read lock.
  */
-struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
-                                                struct list_head **iter)
+struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
+                                                    struct list_head **iter)
 {
        struct netdev_adjacent *upper;
 
@@ -4484,14 +4495,71 @@ struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
 
        upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
 
-       if (&upper->list == &dev->upper_dev_list)
+       if (&upper->list == &dev->all_adj_list.upper)
                return NULL;
 
        *iter = &upper->list;
 
        return upper->dev;
 }
-EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
+EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
+
+/**
+ * netdev_lower_get_next_private - Get the next ->private from the
+ *                                lower neighbour list
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next netdev_adjacent->private from the dev's lower neighbour
+ * list, starting from iter position. The caller must hold either hold the
+ * RTNL lock or its own locking that guarantees that the neighbour lower
+ * list will remain unchainged.
+ */
+void *netdev_lower_get_next_private(struct net_device *dev,
+                                   struct list_head **iter)
+{
+       struct netdev_adjacent *lower;
+
+       lower = list_entry(*iter, struct netdev_adjacent, list);
+
+       if (&lower->list == &dev->adj_list.lower)
+               return NULL;
+
+       if (iter)
+               *iter = lower->list.next;
+
+       return lower->private;
+}
+EXPORT_SYMBOL(netdev_lower_get_next_private);
+
+/**
+ * netdev_lower_get_next_private_rcu - Get the next ->private from the
+ *                                    lower neighbour list, RCU
+ *                                    variant
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next netdev_adjacent->private from the dev's lower neighbour
+ * list, starting from iter position. The caller must hold RCU read lock.
+ */
+void *netdev_lower_get_next_private_rcu(struct net_device *dev,
+                                       struct list_head **iter)
+{
+       struct netdev_adjacent *lower;
+
+       WARN_ON_ONCE(!rcu_read_lock_held());
+
+       lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
+
+       if (&lower->list == &dev->adj_list.lower)
+               return NULL;
+
+       if (iter)
+               *iter = &lower->list;
+
+       return lower->private;
+}
+EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
 
 /**
  * netdev_master_upper_dev_get_rcu - Get master upper device
@@ -4504,7 +4572,7 @@ struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
 {
        struct netdev_adjacent *upper;
 
-       upper = list_first_or_null_rcu(&dev->upper_dev_list,
+       upper = list_first_or_null_rcu(&dev->adj_list.upper,
                                       struct netdev_adjacent, list);
        if (upper && likely(upper->master))
                return upper->dev;
@@ -4514,15 +4582,16 @@ EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
 
 static int __netdev_adjacent_dev_insert(struct net_device *dev,
                                        struct net_device *adj_dev,
-                                       bool neighbour, bool master,
-                                       bool upper)
+                                       struct list_head *dev_list,
+                                       void *private, bool master)
 {
        struct netdev_adjacent *adj;
+       char linkname[IFNAMSIZ+7];
+       int ret;
 
-       adj = __netdev_find_adj(dev, adj_dev, upper);
+       adj = __netdev_find_adj(dev, adj_dev, dev_list);
 
        if (adj) {
-               BUG_ON(neighbour);
                adj->ref_nr++;
                return 0;
        }
@@ -4533,124 +4602,178 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
 
        adj->dev = adj_dev;
        adj->master = master;
-       adj->neighbour = neighbour;
        adj->ref_nr = 1;
-
+       adj->private = private;
        dev_hold(adj_dev);
-       pr_debug("dev_hold for %s, because of %s link added from %s to %s\n",
-                adj_dev->name, upper ? "upper" : "lower", dev->name,
-                adj_dev->name);
 
-       if (!upper) {
-               list_add_tail_rcu(&adj->list, &dev->lower_dev_list);
-               return 0;
+       pr_debug("dev_hold for %s, because of link added from %s to %s\n",
+                adj_dev->name, dev->name, adj_dev->name);
+
+       if (dev_list == &dev->adj_list.lower) {
+               sprintf(linkname, "lower_%s", adj_dev->name);
+               ret = sysfs_create_link(&(dev->dev.kobj),
+                                       &(adj_dev->dev.kobj), linkname);
+               if (ret)
+                       goto free_adj;
+       } else if (dev_list == &dev->adj_list.upper) {
+               sprintf(linkname, "upper_%s", adj_dev->name);
+               ret = sysfs_create_link(&(dev->dev.kobj),
+                                       &(adj_dev->dev.kobj), linkname);
+               if (ret)
+                       goto free_adj;
        }
 
-       /* Ensure that master upper link is always the first item in list. */
-       if (master)
-               list_add_rcu(&adj->list, &dev->upper_dev_list);
-       else
-               list_add_tail_rcu(&adj->list, &dev->upper_dev_list);
+       /* Ensure that master link is always the first item in list. */
+       if (master) {
+               ret = sysfs_create_link(&(dev->dev.kobj),
+                                       &(adj_dev->dev.kobj), "master");
+               if (ret)
+                       goto remove_symlinks;
+
+               list_add_rcu(&adj->list, dev_list);
+       } else {
+               list_add_tail_rcu(&adj->list, dev_list);
+       }
 
        return 0;
-}
 
-static inline int __netdev_upper_dev_insert(struct net_device *dev,
-                                           struct net_device *udev,
-                                           bool master, bool neighbour)
-{
-       return __netdev_adjacent_dev_insert(dev, udev, neighbour, master,
-                                           true);
-}
+remove_symlinks:
+       if (dev_list == &dev->adj_list.lower) {
+               sprintf(linkname, "lower_%s", adj_dev->name);
+               sysfs_remove_link(&(dev->dev.kobj), linkname);
+       } else if (dev_list == &dev->adj_list.upper) {
+               sprintf(linkname, "upper_%s", adj_dev->name);
+               sysfs_remove_link(&(dev->dev.kobj), linkname);
+       }
 
-static inline int __netdev_lower_dev_insert(struct net_device *dev,
-                                           struct net_device *ldev,
-                                           bool neighbour)
-{
-       return __netdev_adjacent_dev_insert(dev, ldev, neighbour, false,
-                                           false);
+free_adj:
+       kfree(adj);
+
+       return ret;
 }
 
 void __netdev_adjacent_dev_remove(struct net_device *dev,
-                                 struct net_device *adj_dev, bool upper)
+                                 struct net_device *adj_dev,
+                                 struct list_head *dev_list)
 {
        struct netdev_adjacent *adj;
+       char linkname[IFNAMSIZ+7];
 
-       if (upper)
-               adj = __netdev_find_upper(dev, adj_dev);
-       else
-               adj = __netdev_find_lower(dev, adj_dev);
+       adj = __netdev_find_adj(dev, adj_dev, dev_list);
 
-       if (!adj)
+       if (!adj) {
+               pr_err("tried to remove device %s from %s\n",
+                      dev->name, adj_dev->name);
                BUG();
+       }
 
        if (adj->ref_nr > 1) {
+               pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
+                        adj->ref_nr-1);
                adj->ref_nr--;
                return;
        }
 
+       if (adj->master)
+               sysfs_remove_link(&(dev->dev.kobj), "master");
+
+       if (dev_list == &dev->adj_list.lower) {
+               sprintf(linkname, "lower_%s", adj_dev->name);
+               sysfs_remove_link(&(dev->dev.kobj), linkname);
+       } else if (dev_list == &dev->adj_list.upper) {
+               sprintf(linkname, "upper_%s", adj_dev->name);
+               sysfs_remove_link(&(dev->dev.kobj), linkname);
+       }
+
        list_del_rcu(&adj->list);
-       pr_debug("dev_put for %s, because of %s link removed from %s to %s\n",
-                adj_dev->name, upper ? "upper" : "lower", dev->name,
-                adj_dev->name);
+       pr_debug("dev_put for %s, because link removed from %s to %s\n",
+                adj_dev->name, dev->name, adj_dev->name);
        dev_put(adj_dev);
        kfree_rcu(adj, rcu);
 }
 
-static inline void __netdev_upper_dev_remove(struct net_device *dev,
-                                            struct net_device *udev)
-{
-       return __netdev_adjacent_dev_remove(dev, udev, true);
-}
-
-static inline void __netdev_lower_dev_remove(struct net_device *dev,
-                                            struct net_device *ldev)
-{
-       return __netdev_adjacent_dev_remove(dev, ldev, false);
-}
-
-int __netdev_adjacent_dev_insert_link(struct net_device *dev,
-                                     struct net_device *upper_dev,
-                                     bool master, bool neighbour)
+int __netdev_adjacent_dev_link_lists(struct net_device *dev,
+                                    struct net_device *upper_dev,
+                                    struct list_head *up_list,
+                                    struct list_head *down_list,
+                                    void *private, bool master)
 {
        int ret;
 
-       ret = __netdev_upper_dev_insert(dev, upper_dev, master, neighbour);
+       ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
+                                          master);
        if (ret)
                return ret;
 
-       ret = __netdev_lower_dev_insert(upper_dev, dev, neighbour);
+       ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
+                                          false);
        if (ret) {
-               __netdev_upper_dev_remove(dev, upper_dev);
+               __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
                return ret;
        }
 
        return 0;
 }
 
-static inline int __netdev_adjacent_dev_link(struct net_device *dev,
-                                            struct net_device *udev)
+int __netdev_adjacent_dev_link(struct net_device *dev,
+                              struct net_device *upper_dev)
 {
-       return __netdev_adjacent_dev_insert_link(dev, udev, false, false);
+       return __netdev_adjacent_dev_link_lists(dev, upper_dev,
+                                               &dev->all_adj_list.upper,
+                                               &upper_dev->all_adj_list.lower,
+                                               NULL, false);
 }
 
-static inline int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
-                                                      struct net_device *udev,
-                                                      bool master)
+void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
+                                       struct net_device *upper_dev,
+                                       struct list_head *up_list,
+                                       struct list_head *down_list)
 {
-       return __netdev_adjacent_dev_insert_link(dev, udev, master, true);
+       __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
+       __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
 }
 
 void __netdev_adjacent_dev_unlink(struct net_device *dev,
                                  struct net_device *upper_dev)
 {
-       __netdev_upper_dev_remove(dev, upper_dev);
-       __netdev_lower_dev_remove(upper_dev, dev);
+       __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
+                                          &dev->all_adj_list.upper,
+                                          &upper_dev->all_adj_list.lower);
 }
 
+int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
+                                        struct net_device *upper_dev,
+                                        void *private, bool master)
+{
+       int ret = __netdev_adjacent_dev_link(dev, upper_dev);
+
+       if (ret)
+               return ret;
+
+       ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
+                                              &dev->adj_list.upper,
+                                              &upper_dev->adj_list.lower,
+                                              private, master);
+       if (ret) {
+               __netdev_adjacent_dev_unlink(dev, upper_dev);
+               return ret;
+       }
+
+       return 0;
+}
+
+void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
+                                           struct net_device *upper_dev)
+{
+       __netdev_adjacent_dev_unlink(dev, upper_dev);
+       __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
+                                          &dev->adj_list.upper,
+                                          &upper_dev->adj_list.lower);
+}
 
 static int __netdev_upper_dev_link(struct net_device *dev,
-                                  struct net_device *upper_dev, bool master)
+                                  struct net_device *upper_dev, bool master,
+                                  void *private)
 {
        struct netdev_adjacent *i, *j, *to_i, *to_j;
        int ret = 0;
@@ -4661,26 +4784,29 @@ static int __netdev_upper_dev_link(struct net_device *dev,
                return -EBUSY;
 
        /* To prevent loops, check if dev is not upper device to upper_dev. */
-       if (__netdev_find_upper(upper_dev, dev))
+       if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
                return -EBUSY;
 
-       if (__netdev_find_upper(dev, upper_dev))
+       if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
                return -EEXIST;
 
        if (master && netdev_master_upper_dev_get(dev))
                return -EBUSY;
 
-       ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, master);
+       ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
+                                                  master);
        if (ret)
                return ret;
 
        /* Now that we linked these devs, make all the upper_dev's
-        * upper_dev_list visible to every dev's lower_dev_list and vice
+        * all_adj_list.upper visible to every dev's all_adj_list.lower an
         * versa, and don't forget the devices itself. All of these
         * links are non-neighbours.
         */
-       list_for_each_entry(i, &dev->lower_dev_list, list) {
-               list_for_each_entry(j, &upper_dev->upper_dev_list, list) {
+       list_for_each_entry(i, &dev->all_adj_list.lower, list) {
+               list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
+                       pr_debug("Interlinking %s with %s, non-neighbour\n",
+                                i->dev->name, j->dev->name);
                        ret = __netdev_adjacent_dev_link(i->dev, j->dev);
                        if (ret)
                                goto rollback_mesh;
@@ -4688,14 +4814,18 @@ static int __netdev_upper_dev_link(struct net_device *dev,
        }
 
        /* add dev to every upper_dev's upper device */
-       list_for_each_entry(i, &upper_dev->upper_dev_list, list) {
+       list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
+               pr_debug("linking %s's upper device %s with %s\n",
+                        upper_dev->name, i->dev->name, dev->name);
                ret = __netdev_adjacent_dev_link(dev, i->dev);
                if (ret)
                        goto rollback_upper_mesh;
        }
 
        /* add upper_dev to every dev's lower device */
-       list_for_each_entry(i, &dev->lower_dev_list, list) {
+       list_for_each_entry(i, &dev->all_adj_list.lower, list) {
+               pr_debug("linking %s's lower device %s with %s\n", dev->name,
+                        i->dev->name, upper_dev->name);
                ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
                if (ret)
                        goto rollback_lower_mesh;
@@ -4706,7 +4836,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
 
 rollback_lower_mesh:
        to_i = i;
-       list_for_each_entry(i, &dev->lower_dev_list, list) {
+       list_for_each_entry(i, &dev->all_adj_list.lower, list) {
                if (i == to_i)
                        break;
                __netdev_adjacent_dev_unlink(i->dev, upper_dev);
@@ -4716,7 +4846,7 @@ rollback_lower_mesh:
 
 rollback_upper_mesh:
        to_i = i;
-       list_for_each_entry(i, &upper_dev->upper_dev_list, list) {
+       list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
                if (i == to_i)
                        break;
                __netdev_adjacent_dev_unlink(dev, i->dev);
@@ -4727,8 +4857,8 @@ rollback_upper_mesh:
 rollback_mesh:
        to_i = i;
        to_j = j;
-       list_for_each_entry(i, &dev->lower_dev_list, list) {
-               list_for_each_entry(j, &upper_dev->upper_dev_list, list) {
+       list_for_each_entry(i, &dev->all_adj_list.lower, list) {
+               list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
                        if (i == to_i && j == to_j)
                                break;
                        __netdev_adjacent_dev_unlink(i->dev, j->dev);
@@ -4737,7 +4867,7 @@ rollback_mesh:
                        break;
        }
 
-       __netdev_adjacent_dev_unlink(dev, upper_dev);
+       __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
 
        return ret;
 }
@@ -4755,7 +4885,7 @@ rollback_mesh:
 int netdev_upper_dev_link(struct net_device *dev,
                          struct net_device *upper_dev)
 {
-       return __netdev_upper_dev_link(dev, upper_dev, false);
+       return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
 }
 EXPORT_SYMBOL(netdev_upper_dev_link);
 
@@ -4773,10 +4903,18 @@ EXPORT_SYMBOL(netdev_upper_dev_link);
 int netdev_master_upper_dev_link(struct net_device *dev,
                                 struct net_device *upper_dev)
 {
-       return __netdev_upper_dev_link(dev, upper_dev, true);
+       return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
 }
 EXPORT_SYMBOL(netdev_master_upper_dev_link);
 
+int netdev_master_upper_dev_link_private(struct net_device *dev,
+                                        struct net_device *upper_dev,
+                                        void *private)
+{
+       return __netdev_upper_dev_link(dev, upper_dev, true, private);
+}
+EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
+
 /**
  * netdev_upper_dev_unlink - Removes a link to upper device
  * @dev: device
@@ -4791,29 +4929,59 @@ void netdev_upper_dev_unlink(struct net_device *dev,
        struct netdev_adjacent *i, *j;
        ASSERT_RTNL();
 
-       __netdev_adjacent_dev_unlink(dev, upper_dev);
+       __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
 
        /* Here is the tricky part. We must remove all dev's lower
         * devices from all upper_dev's upper devices and vice
         * versa, to maintain the graph relationship.
         */
-       list_for_each_entry(i, &dev->lower_dev_list, list)
-               list_for_each_entry(j, &upper_dev->upper_dev_list, list)
+       list_for_each_entry(i, &dev->all_adj_list.lower, list)
+               list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
                        __netdev_adjacent_dev_unlink(i->dev, j->dev);
 
        /* remove also the devices itself from lower/upper device
         * list
         */
-       list_for_each_entry(i, &dev->lower_dev_list, list)
+       list_for_each_entry(i, &dev->all_adj_list.lower, list)
                __netdev_adjacent_dev_unlink(i->dev, upper_dev);
 
-       list_for_each_entry(i, &upper_dev->upper_dev_list, list)
+       list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
                __netdev_adjacent_dev_unlink(dev, i->dev);
 
        call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
 }
 EXPORT_SYMBOL(netdev_upper_dev_unlink);
 
+void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
+                                      struct net_device *lower_dev)
+{
+       struct netdev_adjacent *lower;
+
+       if (!lower_dev)
+               return NULL;
+       lower = __netdev_find_adj_rcu(dev, lower_dev, &dev->adj_list.lower);
+       if (!lower)
+               return NULL;
+
+       return lower->private;
+}
+EXPORT_SYMBOL(netdev_lower_dev_get_private_rcu);
+
+void *netdev_lower_dev_get_private(struct net_device *dev,
+                                  struct net_device *lower_dev)
+{
+       struct netdev_adjacent *lower;
+
+       if (!lower_dev)
+               return NULL;
+       lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
+       if (!lower)
+               return NULL;
+
+       return lower->private;
+}
+EXPORT_SYMBOL(netdev_lower_dev_get_private);
+
 static void dev_change_rx_flags(struct net_device *dev, int flags)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
@@ -4822,7 +4990,7 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
                ops->ndo_change_rx_flags(dev, flags);
 }
 
-static int __dev_set_promiscuity(struct net_device *dev, int inc)
+static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
 {
        unsigned int old_flags = dev->flags;
        kuid_t uid;
@@ -4865,6 +5033,8 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
 
                dev_change_rx_flags(dev, IFF_PROMISC);
        }
+       if (notify)
+               __dev_notify_flags(dev, old_flags, IFF_PROMISC);
        return 0;
 }
 
@@ -4884,7 +5054,7 @@ int dev_set_promiscuity(struct net_device *dev, int inc)
        unsigned int old_flags = dev->flags;
        int err;
 
-       err = __dev_set_promiscuity(dev, inc);
+       err = __dev_set_promiscuity(dev, inc, true);
        if (err < 0)
                return err;
        if (dev->flags != old_flags)
@@ -4893,22 +5063,9 @@ int dev_set_promiscuity(struct net_device *dev, int inc)
 }
 EXPORT_SYMBOL(dev_set_promiscuity);
 
-/**
- *     dev_set_allmulti        - update allmulti count on a device
- *     @dev: device
- *     @inc: modifier
- *
- *     Add or remove reception of all multicast frames to a device. While the
- *     count in the device remains above zero the interface remains listening
- *     to all interfaces. Once it hits zero the device reverts back to normal
- *     filtering operation. A negative @inc value is used to drop the counter
- *     when releasing a resource needing all multicasts.
- *     Return 0 if successful or a negative errno code on error.
- */
-
-int dev_set_allmulti(struct net_device *dev, int inc)
+static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
 {
-       unsigned int old_flags = dev->flags;
+       unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
 
        ASSERT_RTNL();
 
@@ -4931,9 +5088,30 @@ int dev_set_allmulti(struct net_device *dev, int inc)
        if (dev->flags ^ old_flags) {
                dev_change_rx_flags(dev, IFF_ALLMULTI);
                dev_set_rx_mode(dev);
+               if (notify)
+                       __dev_notify_flags(dev, old_flags,
+                                          dev->gflags ^ old_gflags);
        }
        return 0;
 }
+
+/**
+ *     dev_set_allmulti        - update allmulti count on a device
+ *     @dev: device
+ *     @inc: modifier
+ *
+ *     Add or remove reception of all multicast frames to a device. While the
+ *     count in the device remains above zero the interface remains listening
+ *     to all interfaces. Once it hits zero the device reverts back to normal
+ *     filtering operation. A negative @inc value is used to drop the counter
+ *     when releasing a resource needing all multicasts.
+ *     Return 0 if successful or a negative errno code on error.
+ */
+
+int dev_set_allmulti(struct net_device *dev, int inc)
+{
+       return __dev_set_allmulti(dev, inc, true);
+}
 EXPORT_SYMBOL(dev_set_allmulti);
 
 /*
@@ -4958,10 +5136,10 @@ void __dev_set_rx_mode(struct net_device *dev)
                 * therefore calling __dev_set_promiscuity here is safe.
                 */
                if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
-                       __dev_set_promiscuity(dev, 1);
+                       __dev_set_promiscuity(dev, 1, false);
                        dev->uc_promisc = true;
                } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
-                       __dev_set_promiscuity(dev, -1);
+                       __dev_set_promiscuity(dev, -1, false);
                        dev->uc_promisc = false;
                }
        }
@@ -5050,9 +5228,13 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags)
 
        if ((flags ^ dev->gflags) & IFF_PROMISC) {
                int inc = (flags & IFF_PROMISC) ? 1 : -1;
+               unsigned int old_flags = dev->flags;
 
                dev->gflags ^= IFF_PROMISC;
-               dev_set_promiscuity(dev, inc);
+
+               if (__dev_set_promiscuity(dev, inc, false) >= 0)
+                       if (dev->flags != old_flags)
+                               dev_set_rx_mode(dev);
        }
 
        /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
@@ -5063,16 +5245,20 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags)
                int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
 
                dev->gflags ^= IFF_ALLMULTI;
-               dev_set_allmulti(dev, inc);
+               __dev_set_allmulti(dev, inc, false);
        }
 
        return ret;
 }
 
-void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
+void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
+                       unsigned int gchanges)
 {
        unsigned int changes = dev->flags ^ old_flags;
 
+       if (gchanges)
+               rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges);
+
        if (changes & IFF_UP) {
                if (dev->flags & IFF_UP)
                        call_netdevice_notifiers(NETDEV_UP, dev);
@@ -5101,17 +5287,14 @@ void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
 int dev_change_flags(struct net_device *dev, unsigned int flags)
 {
        int ret;
-       unsigned int changes, old_flags = dev->flags;
+       unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
 
        ret = __dev_change_flags(dev, flags);
        if (ret < 0)
                return ret;
 
-       changes = old_flags ^ dev->flags;
-       if (changes)
-               rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
-
-       __dev_notify_flags(dev, old_flags);
+       changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
+       __dev_notify_flags(dev, old_flags, changes);
        return ret;
 }
 EXPORT_SYMBOL(dev_change_flags);
@@ -5258,6 +5441,7 @@ static void net_set_todo(struct net_device *dev)
 static void rollback_registered_many(struct list_head *head)
 {
        struct net_device *dev, *tmp;
+       LIST_HEAD(close_head);
 
        BUG_ON(dev_boot_phase);
        ASSERT_RTNL();
@@ -5280,7 +5464,9 @@ static void rollback_registered_many(struct list_head *head)
        }
 
        /* If device is running, close it first. */
-       dev_close_many(head);
+       list_for_each_entry(dev, head, unreg_list)
+               list_add_tail(&dev->close_list, &close_head);
+       dev_close_many(&close_head);
 
        list_for_each_entry(dev, head, unreg_list) {
                /* And unlink it from device chain. */
@@ -6076,9 +6262,12 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 
        INIT_LIST_HEAD(&dev->napi_list);
        INIT_LIST_HEAD(&dev->unreg_list);
+       INIT_LIST_HEAD(&dev->close_list);
        INIT_LIST_HEAD(&dev->link_watch_list);
-       INIT_LIST_HEAD(&dev->upper_dev_list);
-       INIT_LIST_HEAD(&dev->lower_dev_list);
+       INIT_LIST_HEAD(&dev->adj_list.upper);
+       INIT_LIST_HEAD(&dev->adj_list.lower);
+       INIT_LIST_HEAD(&dev->all_adj_list.upper);
+       INIT_LIST_HEAD(&dev->all_adj_list.lower);
        dev->priv_flags = IFF_XMIT_DST_RELEASE;
        setup(dev);
 
index 78e9d9223e40dfe5bc9956170619e0a8e3335e9d..862989898f611e8f9f2dea28b273b551eeb2b0bb 100644 (file)
@@ -81,6 +81,8 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
        [NETIF_F_TSO6_BIT] =             "tx-tcp6-segmentation",
        [NETIF_F_FSO_BIT] =              "tx-fcoe-segmentation",
        [NETIF_F_GSO_GRE_BIT] =          "tx-gre-segmentation",
+       [NETIF_F_GSO_IPIP_BIT] =         "tx-ipip-segmentation",
+       [NETIF_F_GSO_SIT_BIT] =          "tx-sit-segmentation",
        [NETIF_F_GSO_UDP_TUNNEL_BIT] =   "tx-udp_tnl-segmentation",
        [NETIF_F_GSO_MPLS_BIT] =         "tx-mpls-segmentation",
 
index 6438f29ff26650b240be40d7d80953dc28f13cb0..01b780856db29e5a2a7ebbc36aa7062340b28bb8 100644 (file)
@@ -644,7 +644,6 @@ void sk_filter_release_rcu(struct rcu_head *rcu)
        struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
 
        bpf_jit_free(fp);
-       kfree(fp);
 }
 EXPORT_SYMBOL(sk_filter_release_rcu);
 
@@ -683,7 +682,7 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
        if (fprog->filter == NULL)
                return -EINVAL;
 
-       fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL);
+       fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
        if (!fp)
                return -ENOMEM;
        memcpy(fp->insns, fprog->filter, fsize);
@@ -723,6 +722,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
 {
        struct sk_filter *fp, *old_fp;
        unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
+       unsigned int sk_fsize = sk_filter_size(fprog->len);
        int err;
 
        if (sock_flag(sk, SOCK_FILTER_LOCKED))
@@ -732,11 +732,11 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
        if (fprog->filter == NULL)
                return -EINVAL;
 
-       fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
+       fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
        if (!fp)
                return -ENOMEM;
        if (copy_from_user(fp->insns, fprog->filter, fsize)) {
-               sock_kfree_s(sk, fp, fsize+sizeof(*fp));
+               sock_kfree_s(sk, fp, sk_fsize);
                return -EFAULT;
        }
 
index 8d7d0dd72db211e23b5bcffd16f7600841841598..f8e25ac41c6c1b7b2eebadc6ec8d8310c458bfee 100644 (file)
@@ -25,9 +25,35 @@ static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *i
        memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
 }
 
+/**
+ * skb_flow_get_ports - extract the upper layer ports and return them
+ * @skb: buffer to extract the ports from
+ * @thoff: transport header offset
+ * @ip_proto: protocol for which to get port offset
+ *
+ * The function will try to retrieve the ports at offset thoff + poff where poff
+ * is the protocol port offset returned from proto_ports_offset
+ */
+__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
+{
+       int poff = proto_ports_offset(ip_proto);
+
+       if (poff >= 0) {
+               __be32 *ports, _ports;
+
+               ports = skb_header_pointer(skb, thoff + poff,
+                                          sizeof(_ports), &_ports);
+               if (ports)
+                       return *ports;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(skb_flow_get_ports);
+
 bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
 {
-       int poff, nhoff = skb_network_offset(skb);
+       int nhoff = skb_network_offset(skb);
        u8 ip_proto;
        __be16 proto = skb->protocol;
 
@@ -150,16 +176,7 @@ ipv6:
        }
 
        flow->ip_proto = ip_proto;
-       poff = proto_ports_offset(ip_proto);
-       if (poff >= 0) {
-               __be32 *ports, _ports;
-
-               ports = skb_header_pointer(skb, nhoff + poff,
-                                          sizeof(_ports), &_ports);
-               if (ports)
-                       flow->ports = *ports;
-       }
-
+       flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto);
        flow->thoff = (u16) nhoff;
 
        return true;
index 6072610a8672d1a54a0e7618214f70610fa2a6fa..ca15f32821fb8d536586354108488050b7cc6a0e 100644 (file)
@@ -867,7 +867,7 @@ static void neigh_invalidate(struct neighbour *neigh)
 static void neigh_probe(struct neighbour *neigh)
        __releases(neigh->lock)
 {
-       struct sk_buff *skb = skb_peek(&neigh->arp_queue);
+       struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
        /* keep skb alive even if arp_queue overflows */
        if (skb)
                skb = skb_copy(skb, GFP_ATOMIC);
index d9cd627e6a16a55bb9b6e8ec1619a0291e1621ab..9b7cf6c85f82447d3e03f33cd2c599de13ed3644 100644 (file)
@@ -222,11 +222,10 @@ static void net_prio_attach(struct cgroup_subsys_state *css,
                            struct cgroup_taskset *tset)
 {
        struct task_struct *p;
-       void *v;
+       void *v = (void *)(unsigned long)css->cgroup->id;
 
        cgroup_taskset_for_each(p, css, tset) {
                task_lock(p);
-               v = (void *)(unsigned long)task_netprioidx(p);
                iterate_fd(p->files, 0, update_netprio, v);
                task_unlock(p);
        }
index 2a0e21de3060cddbc9cd657acb24ee84b748c4d1..4aedf03da0521433f16b470f3e675adcd4efdf25 100644 (file)
@@ -1647,9 +1647,8 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
        }
 
        dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
-       rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
 
-       __dev_notify_flags(dev, old_flags);
+       __dev_notify_flags(dev, old_flags, ~0U);
        return 0;
 }
 EXPORT_SYMBOL(rtnl_configure_link);
index 3f1ec1586ae174d9bea18de2bcada31c3c88a5bb..897da56f3affae13161d4610989667a82244c308 100644 (file)
@@ -7,28 +7,20 @@
 #include <linux/hrtimer.h>
 #include <linux/ktime.h>
 #include <linux/string.h>
+#include <linux/net.h>
 
 #include <net/secure_seq.h>
 
+#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_INET)
 #define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4)
 
 static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned;
 
-static void net_secret_init(void)
+static __always_inline void net_secret_init(void)
 {
-       u32 tmp;
-       int i;
-
-       if (likely(net_secret[0]))
-               return;
-
-       for (i = NET_SECRET_SIZE; i > 0;) {
-               do {
-                       get_random_bytes(&tmp, sizeof(tmp));
-               } while (!tmp);
-               cmpxchg(&net_secret[--i], 0, tmp);
-       }
+       net_get_random_once(net_secret, sizeof(net_secret));
 }
+#endif
 
 #ifdef CONFIG_INET
 static u32 seq_scale(u32 seq)
index d81cff119f734a5c8f405fed01f4047b9169f5d1..0ab32faa520f30bb16dfce78d6c17a27e61c1c59 100644 (file)
@@ -903,6 +903,9 @@ EXPORT_SYMBOL(skb_clone);
 
 static void skb_headers_offset_update(struct sk_buff *skb, int off)
 {
+       /* Only adjust this if it actually is csum_start rather than csum */
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               skb->csum_start += off;
        /* {transport,network,mac}_header and tail are relative to skb->head */
        skb->transport_header += off;
        skb->network_header   += off;
@@ -1109,9 +1112,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 #endif
        skb->tail             += off;
        skb_headers_offset_update(skb, nhead);
-       /* Only adjust this if it actually is csum_start rather than csum */
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
-               skb->csum_start += nhead;
        skb->cloned   = 0;
        skb->hdr_len  = 0;
        skb->nohdr    = 0;
@@ -1176,7 +1176,6 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
                                        NUMA_NO_NODE);
        int oldheadroom = skb_headroom(skb);
        int head_copy_len, head_copy_off;
-       int off;
 
        if (!n)
                return NULL;
@@ -1200,11 +1199,7 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
 
        copy_skb_header(n, skb);
 
-       off                  = newheadroom - oldheadroom;
-       if (n->ip_summed == CHECKSUM_PARTIAL)
-               n->csum_start += off;
-
-       skb_headers_offset_update(n, off);
+       skb_headers_offset_update(n, newheadroom - oldheadroom);
 
        return n;
 }
@@ -2837,14 +2832,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
                __copy_skb_header(nskb, skb);
                nskb->mac_len = skb->mac_len;
 
-               /* nskb and skb might have different headroom */
-               if (nskb->ip_summed == CHECKSUM_PARTIAL)
-                       nskb->csum_start += skb_headroom(nskb) - headroom;
-
-               skb_reset_mac_header(nskb);
-               skb_set_network_header(nskb, skb->mac_len);
-               nskb->transport_header = (nskb->network_header +
-                                         skb_network_header_len(skb));
+               skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
 
                skb_copy_from_linear_data_offset(skb, -tnl_hlen,
                                                 nskb->data - tnl_hlen,
@@ -2936,32 +2924,30 @@ EXPORT_SYMBOL_GPL(skb_segment);
 
 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 {
-       struct sk_buff *p = *head;
-       struct sk_buff *nskb;
-       struct skb_shared_info *skbinfo = skb_shinfo(skb);
-       struct skb_shared_info *pinfo = skb_shinfo(p);
-       unsigned int headroom;
-       unsigned int len = skb_gro_len(skb);
+       struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
        unsigned int offset = skb_gro_offset(skb);
        unsigned int headlen = skb_headlen(skb);
+       struct sk_buff *nskb, *lp, *p = *head;
+       unsigned int len = skb_gro_len(skb);
        unsigned int delta_truesize;
+       unsigned int headroom;
 
-       if (p->len + len >= 65536)
+       if (unlikely(p->len + len >= 65536))
                return -E2BIG;
 
-       if (pinfo->frag_list)
-               goto merge;
-       else if (headlen <= offset) {
+       lp = NAPI_GRO_CB(p)->last ?: p;
+       pinfo = skb_shinfo(lp);
+
+       if (headlen <= offset) {
                skb_frag_t *frag;
                skb_frag_t *frag2;
                int i = skbinfo->nr_frags;
                int nr_frags = pinfo->nr_frags + i;
 
-               offset -= headlen;
-
                if (nr_frags > MAX_SKB_FRAGS)
-                       return -E2BIG;
+                       goto merge;
 
+               offset -= headlen;
                pinfo->nr_frags = nr_frags;
                skbinfo->nr_frags = 0;
 
@@ -2992,7 +2978,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
                unsigned int first_offset;
 
                if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
-                       return -E2BIG;
+                       goto merge;
 
                first_offset = skb->data -
                               (unsigned char *)page_address(page) +
@@ -3010,7 +2996,10 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
                delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
                NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
                goto done;
-       } else if (skb_gro_len(p) != pinfo->gso_size)
+       }
+       if (pinfo->frag_list)
+               goto merge;
+       if (skb_gro_len(p) != pinfo->gso_size)
                return -E2BIG;
 
        headroom = skb_headroom(p);
@@ -3062,16 +3051,24 @@ merge:
 
        __skb_pull(skb, offset);
 
-       NAPI_GRO_CB(p)->last->next = skb;
+       if (!NAPI_GRO_CB(p)->last)
+               skb_shinfo(p)->frag_list = skb;
+       else
+               NAPI_GRO_CB(p)->last->next = skb;
        NAPI_GRO_CB(p)->last = skb;
        skb_header_release(skb);
+       lp = p;
 
 done:
        NAPI_GRO_CB(p)->count++;
        p->data_len += len;
        p->truesize += delta_truesize;
        p->len += len;
-
+       if (lp != p) {
+               lp->data_len += len;
+               lp->truesize += delta_truesize;
+               lp->len += len;
+       }
        NAPI_GRO_CB(skb)->same_flow = 1;
        return 0;
 }
index 5b6beba494a350cb28adfc7724487f1a13e6c011..ab20ed9b0f31da64cb118cf645fa88f5787b5571 100644 (file)
@@ -475,12 +475,6 @@ discard_and_relse:
 }
 EXPORT_SYMBOL(sk_receive_skb);
 
-void sk_reset_txq(struct sock *sk)
-{
-       sk_tx_queue_clear(sk);
-}
-EXPORT_SYMBOL(sk_reset_txq);
-
 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
 {
        struct dst_entry *dst = __sk_dst_get(sk);
@@ -914,6 +908,13 @@ set_rcvbuf:
                }
                break;
 #endif
+
+       case SO_MAX_PACING_RATE:
+               sk->sk_max_pacing_rate = val;
+               sk->sk_pacing_rate = min(sk->sk_pacing_rate,
+                                        sk->sk_max_pacing_rate);
+               break;
+
        default:
                ret = -ENOPROTOOPT;
                break;
@@ -1177,6 +1178,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                break;
 #endif
 
+       case SO_MAX_PACING_RATE:
+               v.val = sk->sk_max_pacing_rate;
+               break;
+
        default:
                return -ENOPROTOOPT;
        }
@@ -1836,7 +1841,17 @@ EXPORT_SYMBOL(sock_alloc_send_skb);
 /* On 32bit arches, an skb frag is limited to 2^15 */
 #define SKB_FRAG_PAGE_ORDER    get_order(32768)
 
-bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
+/**
+ * skb_page_frag_refill - check that a page_frag contains enough room
+ * @sz: minimum size of the fragment we want to get
+ * @pfrag: pointer to page_frag
+ * @prio: priority for memory allocation
+ *
+ * Note: While this allocator tries to use high order pages, there is
+ * no guarantee that allocations succeed. Therefore, @sz MUST be
+ * less or equal than PAGE_SIZE.
+ */
+bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
 {
        int order;
 
@@ -1845,16 +1860,16 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
                        pfrag->offset = 0;
                        return true;
                }
-               if (pfrag->offset < pfrag->size)
+               if (pfrag->offset + sz <= pfrag->size)
                        return true;
                put_page(pfrag->page);
        }
 
        /* We restrict high order allocations to users that can afford to wait */
-       order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
+       order = (prio & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0;
 
        do {
-               gfp_t gfp = sk->sk_allocation;
+               gfp_t gfp = prio;
 
                if (order)
                        gfp |= __GFP_COMP | __GFP_NOWARN;
@@ -1866,6 +1881,15 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
                }
        } while (--order >= 0);
 
+       return false;
+}
+EXPORT_SYMBOL(skb_page_frag_refill);
+
+bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
+{
+       if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
+               return true;
+
        sk_enter_memory_pressure(sk);
        sk_stream_moderate_sndbuf(sk);
        return false;
@@ -2319,6 +2343,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
        sk->sk_ll_usec          =       sysctl_net_busy_read;
 #endif
 
+       sk->sk_max_pacing_rate = ~0U;
+       sk->sk_pacing_rate = ~0U;
        /*
         * Before updating sk_refcnt, we must commit prior changes to memory
         * (Documentation/RCU/rculist_nulls.txt for details)
index aa88e23fc87aa44f95f8794cf234846d8d06d21d..bf09371e19b146369a6c580e52700588440adf49 100644 (file)
@@ -338,3 +338,51 @@ void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
                                  csum_unfold(*sum)));
 }
 EXPORT_SYMBOL(inet_proto_csum_replace16);
+
+struct __net_random_once_work {
+       struct work_struct work;
+       struct static_key *key;
+};
+
+static void __net_random_once_deferred(struct work_struct *w)
+{
+       struct __net_random_once_work *work =
+               container_of(w, struct __net_random_once_work, work);
+       if (!static_key_enabled(work->key))
+               static_key_slow_inc(work->key);
+       kfree(work);
+}
+
+static void __net_random_once_disable_jump(struct static_key *key)
+{
+       struct __net_random_once_work *w;
+
+       w = kmalloc(sizeof(*w), GFP_ATOMIC);
+       if (!w)
+               return;
+
+       INIT_WORK(&w->work, __net_random_once_deferred);
+       w->key = key;
+       schedule_work(&w->work);
+}
+
+bool __net_get_random_once(void *buf, int nbytes, bool *done,
+                          struct static_key *done_key)
+{
+       static DEFINE_SPINLOCK(lock);
+
+       spin_lock_bh(&lock);
+       if (*done) {
+               spin_unlock_bh(&lock);
+               return false;
+       }
+
+       get_random_bytes(buf, nbytes);
+       *done = true;
+       spin_unlock_bh(&lock);
+
+       __net_random_once_disable_jump(done_key);
+
+       return true;
+}
+EXPORT_SYMBOL(__net_get_random_once);
index a269aa7f7923bf2b7f7b9d1dd3b26030456fc47c..3284bfa988c0ea45c0c46c4b5c7e9a80528bd0b0 100644 (file)
@@ -101,16 +101,16 @@ struct dccp_ackvec_record {
        u8               avr_ack_nonce:1;
 };
 
-extern int dccp_ackvec_init(void);
-extern void dccp_ackvec_exit(void);
+int dccp_ackvec_init(void);
+void dccp_ackvec_exit(void);
 
-extern struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority);
-extern void dccp_ackvec_free(struct dccp_ackvec *av);
+struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority);
+void dccp_ackvec_free(struct dccp_ackvec *av);
 
-extern void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb);
-extern int  dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seq, u8 sum);
-extern void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno);
-extern u16  dccp_ackvec_buflen(const struct dccp_ackvec *av);
+void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb);
+int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seq, u8 sum);
+void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno);
+u16 dccp_ackvec_buflen(const struct dccp_ackvec *av);
 
 static inline bool dccp_ackvec_is_empty(const struct dccp_ackvec *av)
 {
@@ -133,7 +133,6 @@ struct dccp_ackvec_parsed {
        struct list_head node;
 };
 
-extern int dccp_ackvec_parsed_add(struct list_head *head,
-                                 u8 *vec, u8 len, u8 nonce);
-extern void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks);
+int dccp_ackvec_parsed_add(struct list_head *head, u8 *vec, u8 len, u8 nonce);
+void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks);
 #endif /* _ACKVEC_H */
index fb85d371a8dec875a01205b22ef6ddef5e99d511..6eb837a47b5c42f8c73c498525fe4d269fc5b97d 100644 (file)
@@ -93,8 +93,8 @@ extern struct ccid_operations ccid2_ops;
 extern struct ccid_operations ccid3_ops;
 #endif
 
-extern int  ccid_initialize_builtins(void);
-extern void ccid_cleanup_builtins(void);
+int ccid_initialize_builtins(void);
+void ccid_cleanup_builtins(void);
 
 struct ccid {
        struct ccid_operations *ccid_ops;
@@ -106,12 +106,12 @@ static inline void *ccid_priv(const struct ccid *ccid)
        return (void *)ccid->ccid_priv;
 }
 
-extern bool ccid_support_check(u8 const *ccid_array, u8 array_len);
-extern int  ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len);
-extern int  ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
-                                         char __user *, int __user *);
+bool ccid_support_check(u8 const *ccid_array, u8 array_len);
+int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len);
+int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
+                                 char __user *, int __user *);
 
-extern struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx);
+struct ccid *ccid_new(const u8 id, struct sock *sk, bool rx);
 
 static inline int ccid_get_current_rx_ccid(struct dccp_sock *dp)
 {
@@ -131,8 +131,8 @@ static inline int ccid_get_current_tx_ccid(struct dccp_sock *dp)
        return ccid->ccid_ops->ccid_id;
 }
 
-extern void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk);
-extern void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk);
+void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk);
+void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk);
 
 /*
  * Congestion control of queued data packets via CCID decision.
index d1d2f5383b7d3d7cec533be0d7b8d939ebe492dd..57f631a86ccd35c9a7a949003091909330971cb3 100644 (file)
@@ -65,9 +65,9 @@ static inline u8 tfrc_lh_length(struct tfrc_loss_hist *lh)
 
 struct tfrc_rx_hist;
 
-extern int  tfrc_lh_interval_add(struct tfrc_loss_hist *, struct tfrc_rx_hist *,
-                                u32 (*first_li)(struct sock *), struct sock *);
-extern u8   tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *);
-extern void tfrc_lh_cleanup(struct tfrc_loss_hist *lh);
+int tfrc_lh_interval_add(struct tfrc_loss_hist *, struct tfrc_rx_hist *,
+                        u32 (*first_li)(struct sock *), struct sock *);
+u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *);
+void tfrc_lh_cleanup(struct tfrc_loss_hist *lh);
 
 #endif /* _DCCP_LI_HIST_ */
index 7ee4a9d9d3352337bb5e899d214a754b740ac8c0..ee362b0b630ddde5a46242dc4b86c2213efe581c 100644 (file)
@@ -60,8 +60,8 @@ static inline struct tfrc_tx_hist_entry *
        return head;
 }
 
-extern int  tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno);
-extern void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp);
+int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno);
+void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp);
 
 /* Subtraction a-b modulo-16, respects circular wrap-around */
 #define SUB16(a, b) (((a) + 16 - (b)) & 0xF)
@@ -139,20 +139,17 @@ static inline bool tfrc_rx_hist_loss_pending(const struct tfrc_rx_hist *h)
        return h->loss_count > 0;
 }
 
-extern void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h,
-                                   const struct sk_buff *skb, const u64 ndp);
+void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h, const struct sk_buff *skb,
+                            const u64 ndp);
 
-extern int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb);
+int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb);
 
 struct tfrc_loss_hist;
-extern int  tfrc_rx_handle_loss(struct tfrc_rx_hist *h,
-                               struct tfrc_loss_hist *lh,
-                               struct sk_buff *skb, const u64 ndp,
-                               u32 (*first_li)(struct sock *sk),
-                               struct sock *sk);
-extern u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h,
-                                  const struct sk_buff *skb);
-extern int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h);
-extern void tfrc_rx_hist_purge(struct tfrc_rx_hist *h);
+int tfrc_rx_handle_loss(struct tfrc_rx_hist *h, struct tfrc_loss_hist *lh,
+                       struct sk_buff *skb, const u64 ndp,
+                       u32 (*first_li)(struct sock *sk), struct sock *sk);
+u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h, const struct sk_buff *skb);
+int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h);
+void tfrc_rx_hist_purge(struct tfrc_rx_hist *h);
 
 #endif /* _DCCP_PKT_HIST_ */
index ed698c42a5fbef17b34a9a4daf6fc34464d914f5..40ee7d62b6520d7daf2a1a8f3b54bda9cfbedfc2 100644 (file)
@@ -55,21 +55,21 @@ static inline u32 tfrc_ewma(const u32 avg, const u32 newval, const u8 weight)
        return avg ? (weight * avg + (10 - weight) * newval) / 10 : newval;
 }
 
-extern u32  tfrc_calc_x(u16 s, u32 R, u32 p);
-extern u32  tfrc_calc_x_reverse_lookup(u32 fvalue);
-extern u32  tfrc_invert_loss_event_rate(u32 loss_event_rate);
+u32 tfrc_calc_x(u16 s, u32 R, u32 p);
+u32 tfrc_calc_x_reverse_lookup(u32 fvalue);
+u32 tfrc_invert_loss_event_rate(u32 loss_event_rate);
 
-extern int  tfrc_tx_packet_history_init(void);
-extern void tfrc_tx_packet_history_exit(void);
-extern int  tfrc_rx_packet_history_init(void);
-extern void tfrc_rx_packet_history_exit(void);
+int tfrc_tx_packet_history_init(void);
+void tfrc_tx_packet_history_exit(void);
+int tfrc_rx_packet_history_init(void);
+void tfrc_rx_packet_history_exit(void);
 
-extern int  tfrc_li_init(void);
-extern void tfrc_li_exit(void);
+int tfrc_li_init(void);
+void tfrc_li_exit(void);
 
 #ifdef CONFIG_IP_DCCP_TFRC_LIB
-extern int  tfrc_lib_init(void);
-extern void tfrc_lib_exit(void);
+int tfrc_lib_init(void);
+void tfrc_lib_exit(void);
 #else
 #define tfrc_lib_init() (0)
 #define tfrc_lib_exit()
index 708e75bf623dea1f78dcd844a1b1af5cd6928d09..30948784dd58ff0a35df38b075c3bebd53f90cbe 100644 (file)
@@ -53,7 +53,7 @@ extern struct inet_hashinfo dccp_hashinfo;
 
 extern struct percpu_counter dccp_orphan_count;
 
-extern void dccp_time_wait(struct sock *sk, int state, int timeo);
+void dccp_time_wait(struct sock *sk, int state, int timeo);
 
 /*
  *  Set safe upper bounds for header and option length. Since Data Offset is 8
@@ -224,114 +224,108 @@ static inline void dccp_csum_outgoing(struct sk_buff *skb)
        skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0);
 }
 
-extern void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb);
+void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb);
 
-extern int  dccp_retransmit_skb(struct sock *sk);
+int dccp_retransmit_skb(struct sock *sk);
 
-extern void dccp_send_ack(struct sock *sk);
-extern void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
-                               struct request_sock *rsk);
+void dccp_send_ack(struct sock *sk);
+void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+                        struct request_sock *rsk);
 
-extern void dccp_send_sync(struct sock *sk, const u64 seq,
-                          const enum dccp_pkt_type pkt_type);
+void dccp_send_sync(struct sock *sk, const u64 seq,
+                   const enum dccp_pkt_type pkt_type);
 
 /*
  * TX Packet Dequeueing Interface
  */
-extern void            dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb);
-extern bool            dccp_qpolicy_full(struct sock *sk);
-extern void            dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb);
-extern struct sk_buff  *dccp_qpolicy_top(struct sock *sk);
-extern struct sk_buff  *dccp_qpolicy_pop(struct sock *sk);
-extern bool            dccp_qpolicy_param_ok(struct sock *sk, __be32 param);
+void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb);
+bool dccp_qpolicy_full(struct sock *sk);
+void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb);
+struct sk_buff *dccp_qpolicy_top(struct sock *sk);
+struct sk_buff *dccp_qpolicy_pop(struct sock *sk);
+bool dccp_qpolicy_param_ok(struct sock *sk, __be32 param);
 
 /*
  * TX Packet Output and TX Timers
  */
-extern void   dccp_write_xmit(struct sock *sk);
-extern void   dccp_write_space(struct sock *sk);
-extern void   dccp_flush_write_queue(struct sock *sk, long *time_budget);
+void dccp_write_xmit(struct sock *sk);
+void dccp_write_space(struct sock *sk);
+void dccp_flush_write_queue(struct sock *sk, long *time_budget);
 
-extern void dccp_init_xmit_timers(struct sock *sk);
+void dccp_init_xmit_timers(struct sock *sk);
 static inline void dccp_clear_xmit_timers(struct sock *sk)
 {
        inet_csk_clear_xmit_timers(sk);
 }
 
-extern unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu);
+unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu);
 
-extern const char *dccp_packet_name(const int type);
+const char *dccp_packet_name(const int type);
 
-extern void dccp_set_state(struct sock *sk, const int state);
-extern void dccp_done(struct sock *sk);
+void dccp_set_state(struct sock *sk, const int state);
+void dccp_done(struct sock *sk);
 
-extern int  dccp_reqsk_init(struct request_sock *rq, struct dccp_sock const *dp,
-                           struct sk_buff const *skb);
+int dccp_reqsk_init(struct request_sock *rq, struct dccp_sock const *dp,
+                   struct sk_buff const *skb);
 
-extern int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
+int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 
-extern struct sock *dccp_create_openreq_child(struct sock *sk,
-                                             const struct request_sock *req,
-                                             const struct sk_buff *skb);
+struct sock *dccp_create_openreq_child(struct sock *sk,
+                                      const struct request_sock *req,
+                                      const struct sk_buff *skb);
 
-extern int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
+int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
 
-extern struct sock *dccp_v4_request_recv_sock(struct sock *sk,
-                                             struct sk_buff *skb,
-                                             struct request_sock *req,
-                                             struct dst_entry *dst);
-extern struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
-                                  struct request_sock *req,
-                                  struct request_sock **prev);
+struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
+                                      struct request_sock *req,
+                                      struct dst_entry *dst);
+struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
+                           struct request_sock *req,
+                           struct request_sock **prev);
 
-extern int dccp_child_process(struct sock *parent, struct sock *child,
-                             struct sk_buff *skb);
-extern int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
-                                 struct dccp_hdr *dh, unsigned int len);
-extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
-                               const struct dccp_hdr *dh, const unsigned int len);
+int dccp_child_process(struct sock *parent, struct sock *child,
+                      struct sk_buff *skb);
+int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+                          struct dccp_hdr *dh, unsigned int len);
+int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
+                        const struct dccp_hdr *dh, const unsigned int len);
 
-extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
-extern void dccp_destroy_sock(struct sock *sk);
+int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
+void dccp_destroy_sock(struct sock *sk);
 
-extern void            dccp_close(struct sock *sk, long timeout);
-extern struct sk_buff  *dccp_make_response(struct sock *sk,
-                                           struct dst_entry *dst,
-                                           struct request_sock *req);
+void dccp_close(struct sock *sk, long timeout);
+struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
+                                  struct request_sock *req);
 
-extern int        dccp_connect(struct sock *sk);
-extern int        dccp_disconnect(struct sock *sk, int flags);
-extern int        dccp_getsockopt(struct sock *sk, int level, int optname,
-                                  char __user *optval, int __user *optlen);
-extern int        dccp_setsockopt(struct sock *sk, int level, int optname,
-                                  char __user *optval, unsigned int optlen);
+int dccp_connect(struct sock *sk);
+int dccp_disconnect(struct sock *sk, int flags);
+int dccp_getsockopt(struct sock *sk, int level, int optname,
+                   char __user *optval, int __user *optlen);
+int dccp_setsockopt(struct sock *sk, int level, int optname,
+                   char __user *optval, unsigned int optlen);
 #ifdef CONFIG_COMPAT
-extern int        compat_dccp_getsockopt(struct sock *sk,
-                               int level, int optname,
-                               char __user *optval, int __user *optlen);
-extern int        compat_dccp_setsockopt(struct sock *sk,
-                               int level, int optname,
-                               char __user *optval, unsigned int optlen);
+int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
+                          char __user *optval, int __user *optlen);
+int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
+                          char __user *optval, unsigned int optlen);
 #endif
-extern int        dccp_ioctl(struct sock *sk, int cmd, unsigned long arg);
-extern int        dccp_sendmsg(struct kiocb *iocb, struct sock *sk,
-                               struct msghdr *msg, size_t size);
-extern int        dccp_recvmsg(struct kiocb *iocb, struct sock *sk,
-                               struct msghdr *msg, size_t len, int nonblock,
-                               int flags, int *addr_len);
-extern void       dccp_shutdown(struct sock *sk, int how);
-extern int        inet_dccp_listen(struct socket *sock, int backlog);
-extern unsigned int dccp_poll(struct file *file, struct socket *sock,
-                            poll_table *wait);
-extern int        dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
-                                  int addr_len);
-
-extern struct sk_buff *dccp_ctl_make_reset(struct sock *sk,
-                                          struct sk_buff *skb);
-extern int        dccp_send_reset(struct sock *sk, enum dccp_reset_codes code);
-extern void       dccp_send_close(struct sock *sk, const int active);
-extern int        dccp_invalid_packet(struct sk_buff *skb);
-extern u32        dccp_sample_rtt(struct sock *sk, long delta);
+int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+                size_t size);
+int dccp_recvmsg(struct kiocb *iocb, struct sock *sk,
+                struct msghdr *msg, size_t len, int nonblock, int flags,
+                int *addr_len);
+void dccp_shutdown(struct sock *sk, int how);
+int inet_dccp_listen(struct socket *sock, int backlog);
+unsigned int dccp_poll(struct file *file, struct socket *sock,
+                      poll_table *wait);
+int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+
+struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *skb);
+int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code);
+void dccp_send_close(struct sock *sk, const int active);
+int dccp_invalid_packet(struct sk_buff *skb);
+u32 dccp_sample_rtt(struct sock *sk, long delta);
 
 static inline int dccp_bad_service_code(const struct sock *sk,
                                        const __be32 service)
@@ -475,25 +469,25 @@ static inline int dccp_ack_pending(const struct sock *sk)
        return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk);
 }
 
-extern int  dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val);
-extern int  dccp_feat_finalise_settings(struct dccp_sock *dp);
-extern int  dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq);
-extern int  dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*,
-                                 struct sk_buff *skb);
-extern int  dccp_feat_activate_values(struct sock *sk, struct list_head *fn);
-extern void dccp_feat_list_purge(struct list_head *fn_list);
-
-extern int dccp_insert_options(struct sock *sk, struct sk_buff *skb);
-extern int dccp_insert_options_rsk(struct dccp_request_sock*, struct sk_buff*);
-extern int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed);
-extern u32 dccp_timestamp(void);
-extern void dccp_timestamping_init(void);
-extern int dccp_insert_option(struct sk_buff *skb, unsigned char option,
-                             const void *value, unsigned char len);
+int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val);
+int dccp_feat_finalise_settings(struct dccp_sock *dp);
+int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq);
+int dccp_feat_insert_opts(struct dccp_sock*, struct dccp_request_sock*,
+                         struct sk_buff *skb);
+int dccp_feat_activate_values(struct sock *sk, struct list_head *fn);
+void dccp_feat_list_purge(struct list_head *fn_list);
+
+int dccp_insert_options(struct sock *sk, struct sk_buff *skb);
+int dccp_insert_options_rsk(struct dccp_request_sock *, struct sk_buff *);
+int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed);
+u32 dccp_timestamp(void);
+void dccp_timestamping_init(void);
+int dccp_insert_option(struct sk_buff *skb, unsigned char option,
+                      const void *value, unsigned char len);
 
 #ifdef CONFIG_SYSCTL
-extern int dccp_sysctl_init(void);
-extern void dccp_sysctl_exit(void);
+int dccp_sysctl_init(void);
+void dccp_sysctl_exit(void);
 #else
 static inline int dccp_sysctl_init(void)
 {
index 90b957d34d2602987d49175ef483af3463f9c523..0e75cebb2187ef41c5e06c0e18e9c89b73a27cd2 100644 (file)
@@ -107,13 +107,13 @@ extern unsigned long sysctl_dccp_sequence_window;
 extern int          sysctl_dccp_rx_ccid;
 extern int          sysctl_dccp_tx_ccid;
 
-extern int  dccp_feat_init(struct sock *sk);
-extern void dccp_feat_initialise_sysctls(void);
-extern int  dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
-                                 u8 const *list, u8 len);
-extern int  dccp_feat_parse_options(struct sock *, struct dccp_request_sock *,
-                                   u8 mand, u8 opt, u8 feat, u8 *val, u8 len);
-extern int  dccp_feat_clone_list(struct list_head const *, struct list_head *);
+int dccp_feat_init(struct sock *sk);
+void dccp_feat_initialise_sysctls(void);
+int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
+                         u8 const *list, u8 len);
+int dccp_feat_parse_options(struct sock *, struct dccp_request_sock *,
+                           u8 mand, u8 opt, u8 feat, u8 *val, u8 len);
+int dccp_feat_clone_list(struct list_head const *, struct list_head *);
 
 /*
  * Encoding variable-length options and their maximum length.
@@ -127,11 +127,11 @@ extern int  dccp_feat_clone_list(struct list_head const *, struct list_head *);
  */
 #define DCCP_OPTVAL_MAXLEN     6
 
-extern void dccp_encode_value_var(const u64 value, u8 *to, const u8 len);
-extern u64  dccp_decode_value_var(const u8 *bf, const u8 len);
-extern u64  dccp_feat_nn_get(struct sock *sk, u8 feat);
+void dccp_encode_value_var(const u64 value, u8 *to, const u8 len);
+u64 dccp_decode_value_var(const u8 *bf, const u8 len);
+u64 dccp_feat_nn_get(struct sock *sk, u8 feat);
 
-extern int  dccp_insert_option_mandatory(struct sk_buff *skb);
-extern int  dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat,
-                              u8 *val, u8 len, bool repeat_first);
+int dccp_insert_option_mandatory(struct sk_buff *skb);
+int dccp_insert_fn_opt(struct sk_buff *skb, u8 type, u8 feat, u8 *val, u8 len,
+                      bool repeat_first);
 #endif /* _DCCP_FEAT_H */
index ebc54fef85a5ed15110fe39002defd42d6a0b8f7..720c36225ed9b3219a5b5929e509fb9f660e0d08 100644 (file)
@@ -409,9 +409,9 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
 
        newinet            = inet_sk(newsk);
        ireq               = inet_rsk(req);
-       newinet->inet_daddr     = ireq->rmt_addr;
-       newinet->inet_rcv_saddr = ireq->loc_addr;
-       newinet->inet_saddr     = ireq->loc_addr;
+       newinet->inet_daddr     = ireq->ir_rmt_addr;
+       newinet->inet_rcv_saddr = ireq->ir_loc_addr;
+       newinet->inet_saddr     = ireq->ir_loc_addr;
        newinet->inet_opt       = ireq->opt;
        ireq->opt          = NULL;
        newinet->mc_index  = inet_iif(skb);
@@ -516,10 +516,10 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req)
                const struct inet_request_sock *ireq = inet_rsk(req);
                struct dccp_hdr *dh = dccp_hdr(skb);
 
-               dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->loc_addr,
-                                                             ireq->rmt_addr);
-               err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
-                                           ireq->rmt_addr,
+               dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr,
+                                                             ireq->ir_rmt_addr);
+               err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
+                                           ireq->ir_rmt_addr,
                                            ireq->opt);
                err = net_xmit_eval(err);
        }
@@ -641,8 +641,8 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                goto drop_and_free;
 
        ireq = inet_rsk(req);
-       ireq->loc_addr = ip_hdr(skb)->daddr;
-       ireq->rmt_addr = ip_hdr(skb)->saddr;
+       ireq->ir_loc_addr = ip_hdr(skb)->daddr;
+       ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
 
        /*
         * Step 3: Process LISTEN state
index 6cf9f7782ad4238208173f390369b5fc4cc75e2b..4ac71ff7c2e47c4a3bcec19206845c8afd7ce9e8 100644 (file)
@@ -67,7 +67,7 @@ static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
        struct dccp_hdr *dh = dccp_hdr(skb);
 
        dccp_csum_outgoing(skb);
-       dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
+       dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &sk->sk_v6_daddr);
 }
 
 static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
@@ -216,7 +216,7 @@ out:
 
 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
 {
-       struct inet6_request_sock *ireq6 = inet6_rsk(req);
+       struct inet_request_sock *ireq = inet_rsk(req);
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct sk_buff *skb;
        struct in6_addr *final_p, final;
@@ -226,12 +226,12 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
 
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_proto = IPPROTO_DCCP;
-       fl6.daddr = ireq6->rmt_addr;
-       fl6.saddr = ireq6->loc_addr;
+       fl6.daddr = ireq->ir_v6_rmt_addr;
+       fl6.saddr = ireq->ir_v6_loc_addr;
        fl6.flowlabel = 0;
-       fl6.flowi6_oif = ireq6->iif;
-       fl6.fl6_dport = inet_rsk(req)->rmt_port;
-       fl6.fl6_sport = inet_rsk(req)->loc_port;
+       fl6.flowi6_oif = ireq->ir_iif;
+       fl6.fl6_dport = ireq->ir_rmt_port;
+       fl6.fl6_sport = htons(ireq->ir_num);
        security_req_classify_flow(req, flowi6_to_flowi(&fl6));
 
 
@@ -249,9 +249,9 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
                struct dccp_hdr *dh = dccp_hdr(skb);
 
                dh->dccph_checksum = dccp_v6_csum_finish(skb,
-                                                        &ireq6->loc_addr,
-                                                        &ireq6->rmt_addr);
-               fl6.daddr = ireq6->rmt_addr;
+                                                        &ireq->ir_v6_loc_addr,
+                                                        &ireq->ir_v6_rmt_addr);
+               fl6.daddr = ireq->ir_v6_rmt_addr;
                err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
                err = net_xmit_eval(err);
        }
@@ -264,8 +264,7 @@ done:
 static void dccp_v6_reqsk_destructor(struct request_sock *req)
 {
        dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
-       if (inet6_rsk(req)->pktopts != NULL)
-               kfree_skb(inet6_rsk(req)->pktopts);
+       kfree_skb(inet_rsk(req)->pktopts);
 }
 
 static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
@@ -359,7 +358,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 {
        struct request_sock *req;
        struct dccp_request_sock *dreq;
-       struct inet6_request_sock *ireq6;
+       struct inet_request_sock *ireq;
        struct ipv6_pinfo *np = inet6_sk(sk);
        const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
        struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
@@ -398,22 +397,22 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        if (security_inet_conn_request(sk, skb, req))
                goto drop_and_free;
 
-       ireq6 = inet6_rsk(req);
-       ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
-       ireq6->loc_addr = ipv6_hdr(skb)->daddr;
+       ireq = inet_rsk(req);
+       ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+       ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
 
        if (ipv6_opt_accepted(sk, skb) ||
            np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
            np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
                atomic_inc(&skb->users);
-               ireq6->pktopts = skb;
+               ireq->pktopts = skb;
        }
-       ireq6->iif = sk->sk_bound_dev_if;
+       ireq->ir_iif = sk->sk_bound_dev_if;
 
        /* So that link locals have meaning */
        if (!sk->sk_bound_dev_if &&
-           ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
-               ireq6->iif = inet6_iif(skb);
+           ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
+               ireq->ir_iif = inet6_iif(skb);
 
        /*
         * Step 3: Process LISTEN state
@@ -446,7 +445,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
                                              struct request_sock *req,
                                              struct dst_entry *dst)
 {
-       struct inet6_request_sock *ireq6 = inet6_rsk(req);
+       struct inet_request_sock *ireq = inet_rsk(req);
        struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
        struct inet_sock *newinet;
        struct dccp6_sock *newdp6;
@@ -467,11 +466,11 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
 
                memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-               ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
+               ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
 
                ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
 
-               newnp->rcv_saddr = newnp->saddr;
+               newsk->sk_v6_rcv_saddr = newnp->saddr;
 
                inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
                newsk->sk_backlog_rcv = dccp_v4_do_rcv;
@@ -505,12 +504,12 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
 
                memset(&fl6, 0, sizeof(fl6));
                fl6.flowi6_proto = IPPROTO_DCCP;
-               fl6.daddr = ireq6->rmt_addr;
+               fl6.daddr = ireq->ir_v6_rmt_addr;
                final_p = fl6_update_dst(&fl6, np->opt, &final);
-               fl6.saddr = ireq6->loc_addr;
+               fl6.saddr = ireq->ir_v6_loc_addr;
                fl6.flowi6_oif = sk->sk_bound_dev_if;
-               fl6.fl6_dport = inet_rsk(req)->rmt_port;
-               fl6.fl6_sport = inet_rsk(req)->loc_port;
+               fl6.fl6_dport = ireq->ir_rmt_port;
+               fl6.fl6_sport = htons(ireq->ir_num);
                security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
                dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
@@ -538,10 +537,10 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
 
        memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-       newnp->daddr = ireq6->rmt_addr;
-       newnp->saddr = ireq6->loc_addr;
-       newnp->rcv_saddr = ireq6->loc_addr;
-       newsk->sk_bound_dev_if = ireq6->iif;
+       newsk->sk_v6_daddr      = ireq->ir_v6_rmt_addr;
+       newnp->saddr            = ireq->ir_v6_loc_addr;
+       newsk->sk_v6_rcv_saddr  = ireq->ir_v6_loc_addr;
+       newsk->sk_bound_dev_if  = ireq->ir_iif;
 
        /* Now IPv6 options...
 
@@ -554,10 +553,10 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
 
        /* Clone pktoptions received with SYN */
        newnp->pktoptions = NULL;
-       if (ireq6->pktopts != NULL) {
-               newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
-               consume_skb(ireq6->pktopts);
-               ireq6->pktopts = NULL;
+       if (ireq->pktopts != NULL) {
+               newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
+               consume_skb(ireq->pktopts);
+               ireq->pktopts = NULL;
                if (newnp->pktoptions)
                        skb_set_owner_r(newnp->pktoptions, newsk);
        }
@@ -885,7 +884,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                        return -EINVAL;
        }
 
-       np->daddr = usin->sin6_addr;
+       sk->sk_v6_daddr = usin->sin6_addr;
        np->flow_label = fl6.flowlabel;
 
        /*
@@ -915,16 +914,16 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                        goto failure;
                }
                ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
-               ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &np->rcv_saddr);
+               ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &sk->sk_v6_rcv_saddr);
 
                return err;
        }
 
-       if (!ipv6_addr_any(&np->rcv_saddr))
-               saddr = &np->rcv_saddr;
+       if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
+               saddr = &sk->sk_v6_rcv_saddr;
 
        fl6.flowi6_proto = IPPROTO_DCCP;
-       fl6.daddr = np->daddr;
+       fl6.daddr = sk->sk_v6_daddr;
        fl6.saddr = saddr ? *saddr : np->saddr;
        fl6.flowi6_oif = sk->sk_bound_dev_if;
        fl6.fl6_dport = usin->sin6_port;
@@ -941,7 +940,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
        if (saddr == NULL) {
                saddr = &fl6.saddr;
-               np->rcv_saddr = *saddr;
+               sk->sk_v6_rcv_saddr = *saddr;
        }
 
        /* set the source address */
@@ -963,7 +962,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                goto late_failure;
 
        dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
-                                                     np->daddr.s6_addr32,
+                                                     sk->sk_v6_daddr.s6_addr32,
                                                      inet->inet_sport,
                                                      inet->inet_dport);
        err = dccp_connect(sk);
index 6eef81fdbe566f5623fc0816370ac15b4c01dc5a..af259e15e7f0b8c4cb09381bba5423a59c7e5540 100644 (file)
@@ -25,12 +25,10 @@ struct dccp6_sock {
 
 struct dccp6_request_sock {
        struct dccp_request_sock  dccp;
-       struct inet6_request_sock inet6;
 };
 
 struct dccp6_timewait_sock {
        struct inet_timewait_sock   inet;
-       struct inet6_timewait_sock  tw6;
 };
 
 #endif /* _DCCP_IPV6_H */
index 662071b249cc627130cf90eeaa8e5b7bcd8b829d..9e2f78bc1553e38ed45c66e3c3aba4db302fa0b0 100644 (file)
@@ -56,12 +56,9 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
 #if IS_ENABLED(CONFIG_IPV6)
                if (tw->tw_family == PF_INET6) {
                        const struct ipv6_pinfo *np = inet6_sk(sk);
-                       struct inet6_timewait_sock *tw6;
 
-                       tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
-                       tw6 = inet6_twsk((struct sock *)tw);
-                       tw6->tw_v6_daddr = np->daddr;
-                       tw6->tw_v6_rcv_saddr = np->rcv_saddr;
+                       tw->tw_v6_daddr = sk->sk_v6_daddr;
+                       tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
                        tw->tw_ipv6only = np->ipv6only;
                }
 #endif
@@ -269,10 +266,10 @@ int dccp_reqsk_init(struct request_sock *req,
 {
        struct dccp_request_sock *dreq = dccp_rsk(req);
 
-       inet_rsk(req)->rmt_port   = dccp_hdr(skb)->dccph_sport;
-       inet_rsk(req)->loc_port   = dccp_hdr(skb)->dccph_dport;
-       inet_rsk(req)->acked      = 0;
-       dreq->dreq_timestamp_echo = 0;
+       inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport;
+       inet_rsk(req)->ir_num      = ntohs(dccp_hdr(skb)->dccph_dport);
+       inet_rsk(req)->acked       = 0;
+       dreq->dreq_timestamp_echo  = 0;
 
        /* inherit feature negotiation options from listening socket */
        return dccp_feat_clone_list(&dp->dccps_featneg, &dreq->dreq_featneg);
index d17fc90a74b63822bb6de1f647c34903a855b197..8876078859dac20ef543043ae5b98ec42f8731cd 100644 (file)
@@ -424,8 +424,8 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
        /* Build and checksum header */
        dh = dccp_zeroed_hdr(skb, dccp_header_size);
 
-       dh->dccph_sport = inet_rsk(req)->loc_port;
-       dh->dccph_dport = inet_rsk(req)->rmt_port;
+       dh->dccph_sport = htons(inet_rsk(req)->ir_num);
+       dh->dccph_dport = inet_rsk(req)->ir_rmt_port;
        dh->dccph_doff  = (dccp_header_size +
                           DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
        dh->dccph_type  = DCCP_PKT_RESPONSE;
index ba64750f038726a990caa71d90e45f77bad93230..eb892b4f48144966e47f386108942f51b8b85e50 100644 (file)
@@ -1158,10 +1158,8 @@ static int __init dccp_init(void)
                goto out_free_bind_bucket_cachep;
        }
 
-       for (i = 0; i <= dccp_hashinfo.ehash_mask; i++) {
+       for (i = 0; i <= dccp_hashinfo.ehash_mask; i++)
                INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
-               INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i);
-       }
 
        if (inet_ehash_locks_alloc(&dccp_hashinfo))
                        goto out_free_dccp_ehash;
index 2a7efe388344fdd37b10487c50ba3d712eedbf8a..e83015cecfa7507d551bd19e4b4121ad0f25eeaf 100644 (file)
@@ -87,7 +87,7 @@ static void dnrmg_send_peer(struct sk_buff *skb)
 }
 
 
-static unsigned int dnrmg_hook(unsigned int hook,
+static unsigned int dnrmg_hook(const struct nf_hook_ops *ops,
                        struct sk_buff *skb,
                        const struct net_device *in,
                        const struct net_device *out,
index be1f64d35358fbfbf61d84683c52e4d6fb423f34..8f032bae60ad8fdc8e284c5cd5ae7501660fcf50 100644 (file)
@@ -58,7 +58,7 @@
 #include <net/ipv6.h>
 #include <net/ip.h>
 #include <net/dsa.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 __setup("ether=", netdev_boot_setup);
 
@@ -133,7 +133,7 @@ int eth_rebuild_header(struct sk_buff *skb)
                return arp_find(eth->h_dest, skb);
 #endif
        default:
-               printk(KERN_DEBUG
+               netdev_dbg(dev,
                       "%s: unable to resolve type %X addresses.\n",
                       dev->name, ntohs(eth->h_proto));
 
@@ -169,20 +169,9 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
                else
                        skb->pkt_type = PACKET_MULTICAST;
        }
-
-       /*
-        *      This ALLMULTI check should be redundant by 1.4
-        *      so don't forget to remove it.
-        *
-        *      Seems, you forgot to remove it. All silly devices
-        *      seems to set IFF_PROMISC.
-        */
-
-       else if (1 /*dev->flags&IFF_PROMISC */ ) {
-               if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
-                                                     dev->dev_addr)))
-                       skb->pkt_type = PACKET_OTHERHOST;
-       }
+       else if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
+                                                  dev->dev_addr)))
+               skb->pkt_type = PACKET_OTHERHOST;
 
        /*
         * Some variants of DSA tagging don't have an ethertype field
@@ -190,12 +179,13 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
         * variants has been configured on the receiving interface,
         * and if so, set skb->protocol without looking at the packet.
         */
-       if (netdev_uses_dsa_tags(dev))
+       if (unlikely(netdev_uses_dsa_tags(dev)))
                return htons(ETH_P_DSA);
-       if (netdev_uses_trailer_tags(dev))
+
+       if (unlikely(netdev_uses_trailer_tags(dev)))
                return htons(ETH_P_TRAILER);
 
-       if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
+       if (likely(ntohs(eth->h_proto) >= ETH_P_802_3_MIN))
                return eth->h_proto;
 
        /*
@@ -204,7 +194,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
         *      layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
         *      won't work for fault tolerant netware but does for the rest.
         */
-       if (skb->len >= 2 && *(unsigned short *)(skb->data) == 0xFFFF)
+       if (unlikely(skb->len >= 2 && *(unsigned short *)(skb->data) == 0xFFFF))
                return htons(ETH_P_802_3);
 
        /*
index c85e71e0c7ffc640bd9592ce52b03dbde6cad926..ff41b4d60d302e18a1ebcf1bdfc950970fdd3aae 100644 (file)
@@ -1372,6 +1372,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
        real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
        if (!real_dev)
                return -ENODEV;
+       if (real_dev->type != ARPHRD_IEEE802154)
+               return -EINVAL;
 
        lowpan_dev_info(dev)->real_dev = real_dev;
        lowpan_dev_info(dev)->fragment_tag = 0;
@@ -1386,6 +1388,9 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
 
        entry->ldev = dev;
 
+       /* Set the lowpan harware address to the wpan hardware address. */
+       memcpy(dev->dev_addr, real_dev->dev_addr, IEEE802154_ADDR_LEN);
+
        mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
        INIT_LIST_HEAD(&entry->list);
        list_add_tail(&entry->list, &lowpan_devices);
index cfeb85cff4f02abc28570b267ba6e64784595fab..f4a159e705c0c57af71d6efb99a3608f2a304f5a 100644 (file)
@@ -245,29 +245,6 @@ out:
 }
 EXPORT_SYMBOL(inet_listen);
 
-u32 inet_ehash_secret __read_mostly;
-EXPORT_SYMBOL(inet_ehash_secret);
-
-u32 ipv6_hash_secret __read_mostly;
-EXPORT_SYMBOL(ipv6_hash_secret);
-
-/*
- * inet_ehash_secret must be set exactly once, and to a non nul value
- * ipv6_hash_secret must be set exactly once.
- */
-void build_ehash_secret(void)
-{
-       u32 rnd;
-
-       do {
-               get_random_bytes(&rnd, sizeof(rnd));
-       } while (rnd == 0);
-
-       if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
-               get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
-}
-EXPORT_SYMBOL(build_ehash_secret);
-
 /*
  *     Create an inet socket.
  */
@@ -284,10 +261,6 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
        int try_loading_module = 0;
        int err;
 
-       if (unlikely(!inet_ehash_secret))
-               if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
-                       build_ehash_secret();
-
        sock->state = SS_UNCONNECTED;
 
        /* Look for the requested type/protocol pair. */
@@ -1254,36 +1227,36 @@ static int inet_gso_send_check(struct sk_buff *skb)
        if (ihl < sizeof(*iph))
                goto out;
 
+       proto = iph->protocol;
+
+       /* Warning: after this point, iph might be no longer valid */
        if (unlikely(!pskb_may_pull(skb, ihl)))
                goto out;
-
        __skb_pull(skb, ihl);
+
        skb_reset_transport_header(skb);
-       iph = ip_hdr(skb);
-       proto = iph->protocol;
        err = -EPROTONOSUPPORT;
 
-       rcu_read_lock();
        ops = rcu_dereference(inet_offloads[proto]);
        if (likely(ops && ops->callbacks.gso_send_check))
                err = ops->callbacks.gso_send_check(skb);
-       rcu_read_unlock();
 
 out:
        return err;
 }
 
 static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
-       netdev_features_t features)
+                                       netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        const struct net_offload *ops;
+       unsigned int offset = 0;
        struct iphdr *iph;
+       bool tunnel;
        int proto;
+       int nhoff;
        int ihl;
        int id;
-       unsigned int offset = 0;
-       bool tunnel;
 
        if (unlikely(skb_shinfo(skb)->gso_type &
                     ~(SKB_GSO_TCPV4 |
@@ -1291,12 +1264,16 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                       SKB_GSO_DODGY |
                       SKB_GSO_TCP_ECN |
                       SKB_GSO_GRE |
+                      SKB_GSO_IPIP |
+                      SKB_GSO_SIT |
                       SKB_GSO_TCPV6 |
                       SKB_GSO_UDP_TUNNEL |
                       SKB_GSO_MPLS |
                       0)))
                goto out;
 
+       skb_reset_network_header(skb);
+       nhoff = skb_network_header(skb) - skb_mac_header(skb);
        if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
                goto out;
 
@@ -1305,42 +1282,49 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
        if (ihl < sizeof(*iph))
                goto out;
 
+       id = ntohs(iph->id);
+       proto = iph->protocol;
+
+       /* Warning: after this point, iph might be no longer valid */
        if (unlikely(!pskb_may_pull(skb, ihl)))
                goto out;
+       __skb_pull(skb, ihl);
 
-       tunnel = !!skb->encapsulation;
+       tunnel = SKB_GSO_CB(skb)->encap_level > 0;
+       if (tunnel)
+               features = skb->dev->hw_enc_features & netif_skb_features(skb);
+       SKB_GSO_CB(skb)->encap_level += ihl;
 
-       __skb_pull(skb, ihl);
        skb_reset_transport_header(skb);
-       iph = ip_hdr(skb);
-       id = ntohs(iph->id);
-       proto = iph->protocol;
+
        segs = ERR_PTR(-EPROTONOSUPPORT);
 
-       rcu_read_lock();
        ops = rcu_dereference(inet_offloads[proto]);
        if (likely(ops && ops->callbacks.gso_segment))
                segs = ops->callbacks.gso_segment(skb, features);
-       rcu_read_unlock();
 
        if (IS_ERR_OR_NULL(segs))
                goto out;
 
        skb = segs;
        do {
-               iph = ip_hdr(skb);
+               iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
                if (!tunnel && proto == IPPROTO_UDP) {
                        iph->id = htons(id);
                        iph->frag_off = htons(offset >> 3);
                        if (skb->next != NULL)
                                iph->frag_off |= htons(IP_MF);
-                       offset += (skb->len - skb->mac_len - iph->ihl * 4);
+                       offset += skb->len - nhoff - ihl;
                } else  {
                        iph->id = htons(id++);
                }
-               iph->tot_len = htons(skb->len - skb->mac_len);
-               iph->check = 0;
-               iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
+               iph->tot_len = htons(skb->len - nhoff);
+               ip_send_check(iph);
+               if (tunnel) {
+                       skb_reset_inner_headers(skb);
+                       skb->encapsulation = 1;
+               }
+               skb->network_header = (u8 *)iph - skb->head;
        } while ((skb = skb->next));
 
 out:
@@ -1546,6 +1530,7 @@ static const struct net_protocol tcp_protocol = {
 };
 
 static const struct net_protocol udp_protocol = {
+       .early_demux =  udp_v4_early_demux,
        .handler =      udp_rcv,
        .err_handler =  udp_err,
        .no_policy =    1,
@@ -1646,6 +1631,13 @@ static struct packet_offload ip_packet_offload __read_mostly = {
        },
 };
 
+static const struct net_offload ipip_offload = {
+       .callbacks = {
+               .gso_send_check = inet_gso_send_check,
+               .gso_segment    = inet_gso_segment,
+       },
+};
+
 static int __init ipv4_offload_init(void)
 {
        /*
@@ -1657,6 +1649,7 @@ static int __init ipv4_offload_init(void)
                pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
 
        dev_add_offload(&ip_packet_offload);
+       inet_add_offload(&ipip_offload, IPPROTO_IPIP);
        return 0;
 }
 
@@ -1705,8 +1698,6 @@ static int __init inet_init(void)
        ip_static_sysctl_init();
 #endif
 
-       tcp_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
-
        /*
         *      Add all the base protocols.
         */
index b3f627ac4ed8ae5031d559a5648612b46f015e25..d846304b7b89d619d53609c5e435b3ddcc3e2d84 100644 (file)
@@ -933,7 +933,6 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
                local_bh_disable();
 
                frn->tb_id = tb->tb_id;
-               rcu_read_lock();
                frn->err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
 
                if (!frn->err) {
@@ -942,7 +941,6 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
                        frn->type = res.type;
                        frn->scope = res.scope;
                }
-               rcu_read_unlock();
                local_bh_enable();
        }
 }
index af0f14aba169a2fa11e03eb0fb1bde4b50261568..388d113fd289b9134cf34e930a26c6c99b7ea721 100644 (file)
@@ -24,21 +24,17 @@ static inline void fib_alias_accessed(struct fib_alias *fa)
 }
 
 /* Exported by fib_semantics.c */
-extern void fib_release_info(struct fib_info *);
-extern struct fib_info *fib_create_info(struct fib_config *cfg);
-extern int fib_nh_match(struct fib_config *cfg, struct fib_info *fi);
-extern int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
-                        u32 tb_id, u8 type, __be32 dst,
-                        int dst_len, u8 tos, struct fib_info *fi,
-                        unsigned int);
-extern void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
-                     int dst_len, u32 tb_id, struct nl_info *info,
-                     unsigned int nlm_flags);
-extern struct fib_alias *fib_find_alias(struct list_head *fah,
-                                       u8 tos, u32 prio);
-extern int fib_detect_death(struct fib_info *fi, int order,
-                           struct fib_info **last_resort,
-                           int *last_idx, int dflt);
+void fib_release_info(struct fib_info *);
+struct fib_info *fib_create_info(struct fib_config *cfg);
+int fib_nh_match(struct fib_config *cfg, struct fib_info *fi);
+int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, u32 tb_id,
+                 u8 type, __be32 dst, int dst_len, u8 tos, struct fib_info *fi,
+                 unsigned int);
+void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, int dst_len,
+              u32 tb_id, const struct nl_info *info, unsigned int nlm_flags);
+struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio);
+int fib_detect_death(struct fib_info *fi, int order,
+                    struct fib_info **last_resort, int *last_idx, int dflt);
 
 static inline void fib_result_assign(struct fib_result *res,
                                     struct fib_info *fi)
index d5dbca5ecf628c4dd5a52daf306047fb971de33e..e63f47a4e651f2c5587723f7796e2fd9ec43b4f8 100644 (file)
@@ -380,7 +380,7 @@ static inline size_t fib_nlmsg_size(struct fib_info *fi)
 }
 
 void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
-              int dst_len, u32 tb_id, struct nl_info *info,
+              int dst_len, u32 tb_id, const struct nl_info *info,
               unsigned int nlm_flags)
 {
        struct sk_buff *skb;
index 3df6d3edb2a15a98cb0e90a4e5ed935f42f15f1c..ec9a9ef4ce50851639cf4cbe8e497390bea371f6 100644 (file)
@@ -762,12 +762,9 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
 
                if (IS_LEAF(node) || ((struct tnode *) node)->pos >
                   tn->pos + tn->bits - 1) {
-                       if (tkey_extract_bits(node->key,
-                                             oldtnode->pos + oldtnode->bits,
-                                             1) == 0)
-                               put_child(tn, 2*i, node);
-                       else
-                               put_child(tn, 2*i+1, node);
+                       put_child(tn,
+                               tkey_extract_bits(node->key, oldtnode->pos, oldtnode->bits + 1),
+                               node);
                        continue;
                }
 
@@ -1120,12 +1117,8 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
                 *  first tnode need some special handling
                 */
 
-               if (tp)
-                       pos = tp->pos+tp->bits;
-               else
-                       pos = 0;
-
                if (n) {
+                       pos = tp ? tp->pos+tp->bits : 0;
                        newpos = tkey_mismatch(key, pos, n->key);
                        tn = tnode_new(n->key, newpos, 1);
                } else {
index 736c9fc3ef93c0f2de02658997782f7af011c122..5893e99e82990ae75f4221249e3e5fcbceb819c4 100644 (file)
@@ -93,35 +93,6 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 }
 EXPORT_SYMBOL_GPL(gre_build_header);
 
-struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool gre_csum)
-{
-       int err;
-
-       if (likely(!skb->encapsulation)) {
-               skb_reset_inner_headers(skb);
-               skb->encapsulation = 1;
-       }
-
-       if (skb_is_gso(skb)) {
-               err = skb_unclone(skb, GFP_ATOMIC);
-               if (unlikely(err))
-                       goto error;
-               skb_shinfo(skb)->gso_type |= SKB_GSO_GRE;
-               return skb;
-       } else if (skb->ip_summed == CHECKSUM_PARTIAL && gre_csum) {
-               err = skb_checksum_help(skb);
-               if (unlikely(err))
-                       goto error;
-       } else if (skb->ip_summed != CHECKSUM_PARTIAL)
-               skb->ip_summed = CHECKSUM_NONE;
-
-       return skb;
-error:
-       kfree_skb(skb);
-       return ERR_PTR(err);
-}
-EXPORT_SYMBOL_GPL(gre_handle_offloads);
-
 static __sum16 check_checksum(struct sk_buff *skb)
 {
        __sum16 csum = 0;
index 55e6bfb3a28921c26b0be3170f10c1245a9a74ae..e5d436188464eba55eca773f4fb88fd0f0634e97 100644 (file)
@@ -39,7 +39,8 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
                                  SKB_GSO_UDP |
                                  SKB_GSO_DODGY |
                                  SKB_GSO_TCP_ECN |
-                                 SKB_GSO_GRE)))
+                                 SKB_GSO_GRE |
+                                 SKB_GSO_IPIP)))
                goto out;
 
        if (unlikely(!pskb_may_pull(skb, sizeof(*greh))))
index 5f7d11a458713f9c755dd1a1a40289b180a3e041..5c0e8bc6e5ba275d2469336533b5848de94ae6c1 100644 (file)
@@ -353,6 +353,9 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
        saddr = fib_compute_spec_dst(skb);
        ipc.opt = NULL;
        ipc.tx_flags = 0;
+       ipc.ttl = 0;
+       ipc.tos = -1;
+
        if (icmp_param->replyopts.opt.opt.optlen) {
                ipc.opt = &icmp_param->replyopts.opt;
                if (ipc.opt->opt.srr)
@@ -608,6 +611,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
        ipc.addr = iph->saddr;
        ipc.opt = &icmp_param->replyopts.opt;
        ipc.tx_flags = 0;
+       ipc.ttl = 0;
+       ipc.tos = -1;
 
        rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos,
                               type, code, icmp_param);
index 6acb541c90910204f02449e7500138362da6998a..fc0e649cc002beb0631c390e6aed5f1ea3fe7390 100644 (file)
@@ -29,27 +29,19 @@ const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
 EXPORT_SYMBOL(inet_csk_timer_bug_msg);
 #endif
 
-/*
- * This struct holds the first and last local port number.
- */
-struct local_ports sysctl_local_ports __read_mostly = {
-       .lock = __SEQLOCK_UNLOCKED(sysctl_local_ports.lock),
-       .range = { 32768, 61000 },
-};
-
 unsigned long *sysctl_local_reserved_ports;
 EXPORT_SYMBOL(sysctl_local_reserved_ports);
 
-void inet_get_local_port_range(int *low, int *high)
+void inet_get_local_port_range(struct net *net, int *low, int *high)
 {
        unsigned int seq;
 
        do {
-               seq = read_seqbegin(&sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
 
-               *low = sysctl_local_ports.range[0];
-               *high = sysctl_local_ports.range[1];
-       } while (read_seqretry(&sysctl_local_ports.lock, seq));
+               *low = net->ipv4.sysctl_local_ports.range[0];
+               *high = net->ipv4.sysctl_local_ports.range[1];
+       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
 }
 EXPORT_SYMBOL(inet_get_local_port_range);
 
@@ -79,17 +71,16 @@ int inet_csk_bind_conflict(const struct sock *sk,
                            (!reuseport || !sk2->sk_reuseport ||
                            (sk2->sk_state != TCP_TIME_WAIT &&
                             !uid_eq(uid, sock_i_uid(sk2))))) {
-                               const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
-                               if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
-                                   sk2_rcv_saddr == sk_rcv_saddr(sk))
+
+                               if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
+                                   sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
                                        break;
                        }
                        if (!relax && reuse && sk2->sk_reuse &&
                            sk2->sk_state != TCP_LISTEN) {
-                               const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
 
-                               if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
-                                   sk2_rcv_saddr == sk_rcv_saddr(sk))
+                               if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
+                                   sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
                                        break;
                        }
                }
@@ -116,7 +107,7 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
                int remaining, rover, low, high;
 
 again:
-               inet_get_local_port_range(&low, &high);
+               inet_get_local_port_range(net, &low, &high);
                remaining = (high - low) + 1;
                smallest_rover = rover = net_random() % remaining + low;
 
@@ -421,8 +412,8 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           sk->sk_protocol,
                           flags,
-                          (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
-                          ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
+                          (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
+                          ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport);
        security_req_classify_flow(req, flowi4_to_flowi(fl4));
        rt = ip_route_output_flow(net, fl4, sk);
        if (IS_ERR(rt))
@@ -457,8 +448,8 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
        flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
                           sk->sk_protocol, inet_sk_flowi_flags(sk),
-                          (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
-                          ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
+                          (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
+                          ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport);
        security_req_classify_flow(req, flowi4_to_flowi(fl4));
        rt = ip_route_output_flow(net, fl4, sk);
        if (IS_ERR(rt))
@@ -504,9 +495,9 @@ struct request_sock *inet_csk_search_req(const struct sock *sk,
             prev = &req->dl_next) {
                const struct inet_request_sock *ireq = inet_rsk(req);
 
-               if (ireq->rmt_port == rport &&
-                   ireq->rmt_addr == raddr &&
-                   ireq->loc_addr == laddr &&
+               if (ireq->ir_rmt_port == rport &&
+                   ireq->ir_rmt_addr == raddr &&
+                   ireq->ir_loc_addr == laddr &&
                    AF_INET_FAMILY(req->rsk_ops->family)) {
                        WARN_ON(req->sk);
                        *prevp = prev;
@@ -523,7 +514,8 @@ void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
-       const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
+       const u32 h = inet_synq_hash(inet_rsk(req)->ir_rmt_addr,
+                                    inet_rsk(req)->ir_rmt_port,
                                     lopt->hash_rnd, lopt->nr_table_entries);
 
        reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
@@ -683,9 +675,9 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
                newsk->sk_state = TCP_SYN_RECV;
                newicsk->icsk_bind_hash = NULL;
 
-               inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port;
-               inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port);
-               inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port;
+               inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
+               inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
+               inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
                newsk->sk_write_space = sk_stream_write_space;
 
                newicsk->icsk_retransmits = 0;
index 5f648751fce2d03418f4a6425c7ba0dbfb3ddfc0..56a964a553d2c739a03b880acd7158a4c9714b66 100644 (file)
@@ -121,13 +121,13 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
 
 #if IS_ENABLED(CONFIG_IPV6)
        if (r->idiag_family == AF_INET6) {
-               const struct ipv6_pinfo *np = inet6_sk(sk);
 
-               *(struct in6_addr *)r->id.idiag_src = np->rcv_saddr;
-               *(struct in6_addr *)r->id.idiag_dst = np->daddr;
+               *(struct in6_addr *)r->id.idiag_src = sk->sk_v6_rcv_saddr;
+               *(struct in6_addr *)r->id.idiag_dst = sk->sk_v6_daddr;
 
                if (ext & (1 << (INET_DIAG_TCLASS - 1)))
-                       if (nla_put_u8(skb, INET_DIAG_TCLASS, np->tclass) < 0)
+                       if (nla_put_u8(skb, INET_DIAG_TCLASS,
+                                      inet6_sk(sk)->tclass) < 0)
                                goto errout;
        }
 #endif
@@ -222,7 +222,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
                               u32 portid, u32 seq, u16 nlmsg_flags,
                               const struct nlmsghdr *unlh)
 {
-       long tmo;
+       s32 tmo;
        struct inet_diag_msg *r;
        struct nlmsghdr *nlh;
 
@@ -234,7 +234,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
        r = nlmsg_data(nlh);
        BUG_ON(tw->tw_state != TCP_TIME_WAIT);
 
-       tmo = tw->tw_ttd - jiffies;
+       tmo = tw->tw_ttd - inet_tw_time_stamp();
        if (tmo < 0)
                tmo = 0;
 
@@ -248,18 +248,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
        r->id.idiag_dst[0]    = tw->tw_daddr;
        r->idiag_state        = tw->tw_substate;
        r->idiag_timer        = 3;
-       r->idiag_expires      = DIV_ROUND_UP(tmo * 1000, HZ);
+       r->idiag_expires      = jiffies_to_msecs(tmo);
        r->idiag_rqueue       = 0;
        r->idiag_wqueue       = 0;
        r->idiag_uid          = 0;
        r->idiag_inode        = 0;
 #if IS_ENABLED(CONFIG_IPV6)
        if (tw->tw_family == AF_INET6) {
-               const struct inet6_timewait_sock *tw6 =
-                                               inet6_twsk((struct sock *)tw);
-
-               *(struct in6_addr *)r->id.idiag_src = tw6->tw_v6_rcv_saddr;
-               *(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr;
+               *(struct in6_addr *)r->id.idiag_src = tw->tw_v6_rcv_saddr;
+               *(struct in6_addr *)r->id.idiag_dst = tw->tw_v6_daddr;
        }
 #endif
 
@@ -273,10 +270,11 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
                        const struct nlmsghdr *unlh)
 {
        if (sk->sk_state == TCP_TIME_WAIT)
-               return inet_twsk_diag_fill((struct inet_timewait_sock *)sk,
-                                          skb, r, portid, seq, nlmsg_flags,
-                                          unlh);
-       return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq, nlmsg_flags, unlh);
+               return inet_twsk_diag_fill(inet_twsk(sk), skb, r, portid, seq,
+                                          nlmsg_flags, unlh);
+
+       return inet_csk_diag_fill(sk, skb, r, user_ns, portid, seq,
+                                 nlmsg_flags, unlh);
 }
 
 int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_skb,
@@ -338,12 +336,9 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
                err = 0;
 
 out:
-       if (sk) {
-               if (sk->sk_state == TCP_TIME_WAIT)
-                       inet_twsk_put((struct inet_timewait_sock *)sk);
-               else
-                       sock_put(sk);
-       }
+       if (sk)
+               sock_gen_put(sk);
+
 out_nosk:
        return err;
 }
@@ -489,10 +484,9 @@ int inet_diag_bc_sk(const struct nlattr *bc, struct sock *sk)
        entry.family = sk->sk_family;
 #if IS_ENABLED(CONFIG_IPV6)
        if (entry.family == AF_INET6) {
-               struct ipv6_pinfo *np = inet6_sk(sk);
 
-               entry.saddr = np->rcv_saddr.s6_addr32;
-               entry.daddr = np->daddr.s6_addr32;
+               entry.saddr = sk->sk_v6_rcv_saddr.s6_addr32;
+               entry.daddr = sk->sk_v6_daddr.s6_addr32;
        } else
 #endif
        {
@@ -635,22 +629,22 @@ static int inet_csk_diag_dump(struct sock *sk,
                                  cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
 }
 
-static int inet_twsk_diag_dump(struct inet_timewait_sock *tw,
+static int inet_twsk_diag_dump(struct sock *sk,
                               struct sk_buff *skb,
                               struct netlink_callback *cb,
                               struct inet_diag_req_v2 *r,
                               const struct nlattr *bc)
 {
+       struct inet_timewait_sock *tw = inet_twsk(sk);
+
        if (bc != NULL) {
                struct inet_diag_entry entry;
 
                entry.family = tw->tw_family;
 #if IS_ENABLED(CONFIG_IPV6)
                if (tw->tw_family == AF_INET6) {
-                       struct inet6_timewait_sock *tw6 =
-                                               inet6_twsk((struct sock *)tw);
-                       entry.saddr = tw6->tw_v6_rcv_saddr.s6_addr32;
-                       entry.daddr = tw6->tw_v6_daddr.s6_addr32;
+                       entry.saddr = tw->tw_v6_rcv_saddr.s6_addr32;
+                       entry.daddr = tw->tw_v6_daddr.s6_addr32;
                } else
 #endif
                {
@@ -682,12 +676,12 @@ static inline void inet_diag_req_addrs(const struct sock *sk,
 #if IS_ENABLED(CONFIG_IPV6)
        if (sk->sk_family == AF_INET6) {
                if (req->rsk_ops->family == AF_INET6) {
-                       entry->saddr = inet6_rsk(req)->loc_addr.s6_addr32;
-                       entry->daddr = inet6_rsk(req)->rmt_addr.s6_addr32;
+                       entry->saddr = ireq->ir_v6_loc_addr.s6_addr32;
+                       entry->daddr = ireq->ir_v6_rmt_addr.s6_addr32;
                } else if (req->rsk_ops->family == AF_INET) {
-                       ipv6_addr_set_v4mapped(ireq->loc_addr,
+                       ipv6_addr_set_v4mapped(ireq->ir_loc_addr,
                                               &entry->saddr_storage);
-                       ipv6_addr_set_v4mapped(ireq->rmt_addr,
+                       ipv6_addr_set_v4mapped(ireq->ir_rmt_addr,
                                               &entry->daddr_storage);
                        entry->saddr = entry->saddr_storage.s6_addr32;
                        entry->daddr = entry->daddr_storage.s6_addr32;
@@ -695,8 +689,8 @@ static inline void inet_diag_req_addrs(const struct sock *sk,
        } else
 #endif
        {
-               entry->saddr = &ireq->loc_addr;
-               entry->daddr = &ireq->rmt_addr;
+               entry->saddr = &ireq->ir_loc_addr;
+               entry->daddr = &ireq->ir_rmt_addr;
        }
 }
 
@@ -731,9 +725,9 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
                tmo = 0;
 
        r->id.idiag_sport = inet->inet_sport;
-       r->id.idiag_dport = ireq->rmt_port;
-       r->id.idiag_src[0] = ireq->loc_addr;
-       r->id.idiag_dst[0] = ireq->rmt_addr;
+       r->id.idiag_dport = ireq->ir_rmt_port;
+       r->id.idiag_src[0] = ireq->ir_loc_addr;
+       r->id.idiag_dst[0] = ireq->ir_rmt_addr;
        r->idiag_expires = jiffies_to_msecs(tmo);
        r->idiag_rqueue = 0;
        r->idiag_wqueue = 0;
@@ -792,13 +786,13 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
 
                        if (reqnum < s_reqnum)
                                continue;
-                       if (r->id.idiag_dport != ireq->rmt_port &&
+                       if (r->id.idiag_dport != ireq->ir_rmt_port &&
                            r->id.idiag_dport)
                                continue;
 
                        if (bc) {
                                inet_diag_req_addrs(sk, req, &entry);
-                               entry.dport = ntohs(ireq->rmt_port);
+                               entry.dport = ntohs(ireq->ir_rmt_port);
 
                                if (!inet_diag_bc_run(bc, &entry))
                                        continue;
@@ -911,8 +905,7 @@ skip_listen_ht:
 
                num = 0;
 
-               if (hlist_nulls_empty(&head->chain) &&
-                       hlist_nulls_empty(&head->twchain))
+               if (hlist_nulls_empty(&head->chain))
                        continue;
 
                if (i > s_i)
@@ -920,7 +913,7 @@ skip_listen_ht:
 
                spin_lock_bh(lock);
                sk_nulls_for_each(sk, node, &head->chain) {
-                       struct inet_sock *inet = inet_sk(sk);
+                       int res;
 
                        if (!net_eq(sock_net(sk), net))
                                continue;
@@ -929,15 +922,19 @@ skip_listen_ht:
                        if (!(r->idiag_states & (1 << sk->sk_state)))
                                goto next_normal;
                        if (r->sdiag_family != AF_UNSPEC &&
-                                       sk->sk_family != r->sdiag_family)
+                           sk->sk_family != r->sdiag_family)
                                goto next_normal;
-                       if (r->id.idiag_sport != inet->inet_sport &&
+                       if (r->id.idiag_sport != htons(sk->sk_num) &&
                            r->id.idiag_sport)
                                goto next_normal;
-                       if (r->id.idiag_dport != inet->inet_dport &&
+                       if (r->id.idiag_dport != sk->sk_dport &&
                            r->id.idiag_dport)
                                goto next_normal;
-                       if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) {
+                       if (sk->sk_state == TCP_TIME_WAIT)
+                               res = inet_twsk_diag_dump(sk, skb, cb, r, bc);
+                       else
+                               res = inet_csk_diag_dump(sk, skb, cb, r, bc);
+                       if (res < 0) {
                                spin_unlock_bh(lock);
                                goto done;
                        }
@@ -945,33 +942,6 @@ next_normal:
                        ++num;
                }
 
-               if (r->idiag_states & TCPF_TIME_WAIT) {
-                       struct inet_timewait_sock *tw;
-
-                       inet_twsk_for_each(tw, node,
-                                   &head->twchain) {
-                               if (!net_eq(twsk_net(tw), net))
-                                       continue;
-
-                               if (num < s_num)
-                                       goto next_dying;
-                               if (r->sdiag_family != AF_UNSPEC &&
-                                               tw->tw_family != r->sdiag_family)
-                                       goto next_dying;
-                               if (r->id.idiag_sport != tw->tw_sport &&
-                                   r->id.idiag_sport)
-                                       goto next_dying;
-                               if (r->id.idiag_dport != tw->tw_dport &&
-                                   r->id.idiag_dport)
-                                       goto next_dying;
-                               if (inet_twsk_diag_dump(tw, skb, cb, r, bc) < 0) {
-                                       spin_unlock_bh(lock);
-                                       goto done;
-                               }
-next_dying:
-                               ++num;
-                       }
-               }
                spin_unlock_bh(lock);
        }
 
index c5313a9c019b9b94aca569aba011b63fc452e1c5..bb075fc9a14f25169c175c7fcdcb86d56c709627 100644 (file)
@@ -93,9 +93,6 @@ void inet_frags_init(struct inet_frags *f)
        }
        rwlock_init(&f->lock);
 
-       f->rnd = (u32) ((totalram_pages ^ (totalram_pages >> 7)) ^
-                                  (jiffies ^ (jiffies >> 6)));
-
        setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
                        (unsigned long)f);
        f->secret_timer.expires = jiffies + f->secret_interval;
index 7bd8983dbfcf308e61dc55bb9491b3dd6866fa35..8b9cf279450d6cf0c24e64a20fb0d05b9fb89a82 100644 (file)
 #include <net/secure_seq.h>
 #include <net/ip.h>
 
+static unsigned int inet_ehashfn(struct net *net, const __be32 laddr,
+                                const __u16 lport, const __be32 faddr,
+                                const __be16 fport)
+{
+       static u32 inet_ehash_secret __read_mostly;
+
+       net_get_random_once(&inet_ehash_secret, sizeof(inet_ehash_secret));
+
+       return __inet_ehashfn(laddr, lport, faddr, fport,
+                             inet_ehash_secret + net_hash_mix(net));
+}
+
+
+static unsigned int inet_sk_ehashfn(const struct sock *sk)
+{
+       const struct inet_sock *inet = inet_sk(sk);
+       const __be32 laddr = inet->inet_rcv_saddr;
+       const __u16 lport = inet->inet_num;
+       const __be32 faddr = inet->inet_daddr;
+       const __be16 fport = inet->inet_dport;
+       struct net *net = sock_net(sk);
+
+       return inet_ehashfn(net, laddr, lport, faddr, fport);
+}
+
 /*
  * Allocate and initialize a new local port bind bucket.
  * The bindhash mutex for snum's hash chain must be held here.
@@ -230,6 +255,19 @@ begin:
 }
 EXPORT_SYMBOL_GPL(__inet_lookup_listener);
 
+/* All sockets share common refcount, but have different destructors */
+void sock_gen_put(struct sock *sk)
+{
+       if (!atomic_dec_and_test(&sk->sk_refcnt))
+               return;
+
+       if (sk->sk_state == TCP_TIME_WAIT)
+               inet_twsk_free(inet_twsk(sk));
+       else
+               sk_free(sk);
+}
+EXPORT_SYMBOL_GPL(sock_gen_put);
+
 struct sock *__inet_lookup_established(struct net *net,
                                  struct inet_hashinfo *hashinfo,
                                  const __be32 saddr, const __be16 sport,
@@ -255,13 +293,13 @@ begin:
                if (likely(INET_MATCH(sk, net, acookie,
                                      saddr, daddr, ports, dif))) {
                        if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
-                               goto begintw;
+                               goto out;
                        if (unlikely(!INET_MATCH(sk, net, acookie,
                                                 saddr, daddr, ports, dif))) {
-                               sock_put(sk);
+                               sock_gen_put(sk);
                                goto begin;
                        }
-                       goto out;
+                       goto found;
                }
        }
        /*
@@ -271,37 +309,9 @@ begin:
         */
        if (get_nulls_value(node) != slot)
                goto begin;
-
-begintw:
-       /* Must check for a TIME_WAIT'er before going to listener hash. */
-       sk_nulls_for_each_rcu(sk, node, &head->twchain) {
-               if (sk->sk_hash != hash)
-                       continue;
-               if (likely(INET_TW_MATCH(sk, net, acookie,
-                                        saddr, daddr, ports,
-                                        dif))) {
-                       if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) {
-                               sk = NULL;
-                               goto out;
-                       }
-                       if (unlikely(!INET_TW_MATCH(sk, net, acookie,
-                                                   saddr, daddr, ports,
-                                                   dif))) {
-                               sock_put(sk);
-                               goto begintw;
-                       }
-                       goto out;
-               }
-       }
-       /*
-        * if the nulls value we got at the end of this lookup is
-        * not the expected one, we must restart lookup.
-        * We probably met an item that was moved to another chain.
-        */
-       if (get_nulls_value(node) != slot)
-               goto begintw;
-       sk = NULL;
 out:
+       sk = NULL;
+found:
        rcu_read_unlock();
        return sk;
 }
@@ -326,39 +336,29 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
        spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
        struct sock *sk2;
        const struct hlist_nulls_node *node;
-       struct inet_timewait_sock *tw;
+       struct inet_timewait_sock *tw = NULL;
        int twrefcnt = 0;
 
        spin_lock(lock);
 
-       /* Check TIME-WAIT sockets first. */
-       sk_nulls_for_each(sk2, node, &head->twchain) {
-               if (sk2->sk_hash != hash)
-                       continue;
-
-               if (likely(INET_TW_MATCH(sk2, net, acookie,
-                                        saddr, daddr, ports, dif))) {
-                       tw = inet_twsk(sk2);
-                       if (twsk_unique(sk, sk2, twp))
-                               goto unique;
-                       else
-                               goto not_unique;
-               }
-       }
-       tw = NULL;
-
-       /* And established part... */
        sk_nulls_for_each(sk2, node, &head->chain) {
                if (sk2->sk_hash != hash)
                        continue;
+
                if (likely(INET_MATCH(sk2, net, acookie,
-                                     saddr, daddr, ports, dif)))
+                                        saddr, daddr, ports, dif))) {
+                       if (sk2->sk_state == TCP_TIME_WAIT) {
+                               tw = inet_twsk(sk2);
+                               if (twsk_unique(sk, sk2, twp))
+                                       break;
+                       }
                        goto not_unique;
+               }
        }
 
-unique:
        /* Must record num and sport now. Otherwise we will see
-        * in hash table socket with a funny identity. */
+        * in hash table socket with a funny identity.
+        */
        inet->inet_num = lport;
        inet->inet_sport = htons(lport);
        sk->sk_hash = hash;
@@ -494,7 +494,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
                u32 offset = hint + port_offset;
                struct inet_timewait_sock *tw = NULL;
 
-               inet_get_local_port_range(&low, &high);
+               inet_get_local_port_range(net, &low, &high);
                remaining = (high - low) + 1;
 
                local_bh_disable();
index 1f27c9f4afd07fbf55589ed4e6e730375dcdc0a3..6d592f8555fb8bf15506d828b53c6582386f7bec 100644 (file)
@@ -87,19 +87,11 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
        refcnt += inet_twsk_bind_unhash(tw, hashinfo);
        spin_unlock(&bhead->lock);
 
-#ifdef SOCK_REFCNT_DEBUG
-       if (atomic_read(&tw->tw_refcnt) != 1) {
-               pr_debug("%s timewait_sock %p refcnt=%d\n",
-                        tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
-       }
-#endif
-       while (refcnt) {
-               inet_twsk_put(tw);
-               refcnt--;
-       }
+       BUG_ON(refcnt >= atomic_read(&tw->tw_refcnt));
+       atomic_sub(refcnt, &tw->tw_refcnt);
 }
 
-static noinline void inet_twsk_free(struct inet_timewait_sock *tw)
+void inet_twsk_free(struct inet_timewait_sock *tw)
 {
        struct module *owner = tw->tw_prot->owner;
        twsk_destructor((struct sock *)tw);
@@ -118,6 +110,18 @@ void inet_twsk_put(struct inet_timewait_sock *tw)
 }
 EXPORT_SYMBOL_GPL(inet_twsk_put);
 
+static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
+                                  struct hlist_nulls_head *list)
+{
+       hlist_nulls_add_head_rcu(&tw->tw_node, list);
+}
+
+static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
+                                   struct hlist_head *list)
+{
+       hlist_add_head(&tw->tw_bind_node, list);
+}
+
 /*
  * Enter the time wait state. This is called with locally disabled BH.
  * Essentially we whip up a timewait bucket, copy the relevant info into it
@@ -146,26 +150,21 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
        spin_lock(lock);
 
        /*
-        * Step 2: Hash TW into TIMEWAIT chain.
-        * Should be done before removing sk from established chain
-        * because readers are lockless and search established first.
+        * Step 2: Hash TW into tcp ehash chain.
+        * Notes :
+        * - tw_refcnt is set to 3 because :
+        * - We have one reference from bhash chain.
+        * - We have one reference from ehash chain.
+        * We can use atomic_set() because prior spin_lock()/spin_unlock()
+        * committed into memory all tw fields.
         */
-       inet_twsk_add_node_rcu(tw, &ehead->twchain);
+       atomic_set(&tw->tw_refcnt, 1 + 1 + 1);
+       inet_twsk_add_node_rcu(tw, &ehead->chain);
 
-       /* Step 3: Remove SK from established hash. */
+       /* Step 3: Remove SK from hash chain */
        if (__sk_nulls_del_node_init_rcu(sk))
                sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
 
-       /*
-        * Notes :
-        * - We initially set tw_refcnt to 0 in inet_twsk_alloc()
-        * - We add one reference for the bhash link
-        * - We add one reference for the ehash link
-        * - We want this refcnt update done before allowing other
-        *   threads to find this tw in ehash chain.
-        */
-       atomic_add(1 + 1 + 1, &tw->tw_refcnt);
-
        spin_unlock(lock);
 }
 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
@@ -387,11 +386,11 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw,
                        if (slot >= INET_TWDR_TWKILL_SLOTS)
                                slot = INET_TWDR_TWKILL_SLOTS - 1;
                }
-               tw->tw_ttd = jiffies + timeo;
+               tw->tw_ttd = inet_tw_time_stamp() + timeo;
                slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
                list = &twdr->cells[slot];
        } else {
-               tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);
+               tw->tw_ttd = inet_tw_time_stamp() + (slot << INET_TWDR_RECYCLE_TICK);
 
                if (twdr->twcal_hand < 0) {
                        twdr->twcal_hand = 0;
@@ -490,7 +489,9 @@ void inet_twsk_purge(struct inet_hashinfo *hashinfo,
 restart_rcu:
                rcu_read_lock();
 restart:
-               sk_nulls_for_each_rcu(sk, node, &head->twchain) {
+               sk_nulls_for_each_rcu(sk, node, &head->chain) {
+                       if (sk->sk_state != TCP_TIME_WAIT)
+                               continue;
                        tw = inet_twsk(sk);
                        if ((tw->tw_family != family) ||
                                atomic_read(&twsk_net(tw)->count))
index b66910aaef4d633977d2e983f2c63b419da38f77..2481993a49708337af890fad97c70f8d6c85f43d 100644 (file)
@@ -106,6 +106,7 @@ struct ip4_create_arg {
 
 static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
 {
+       net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd));
        return jhash_3words((__force u32)id << 16 | prot,
                            (__force u32)saddr, (__force u32)daddr,
                            ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1);
index a04d872c54f919c7133e7830773301cdf070f3ed..8fbac7de1e1b731e14aa485e75c1212618cace55 100644 (file)
@@ -772,15 +772,20 @@ static inline int ip_ufo_append_data(struct sock *sk,
                /* initialize protocol header pointer */
                skb->transport_header = skb->network_header + fragheaderlen;
 
-               skb->ip_summed = CHECKSUM_PARTIAL;
                skb->csum = 0;
 
-               /* specify the length of each IP datagram fragment */
-               skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
-               skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
                __skb_queue_tail(queue, skb);
+       } else if (skb_is_gso(skb)) {
+               goto append;
        }
 
+       skb->ip_summed = CHECKSUM_PARTIAL;
+       /* specify the length of each IP datagram fragment */
+       skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
+       skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
+append:
        return skb_append_datato_frags(sk, skb, getfrag, from,
                                       (length - transhdrlen));
 }
@@ -1060,6 +1065,9 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
                         rt->dst.dev->mtu : dst_mtu(&rt->dst);
        cork->dst = &rt->dst;
        cork->length = 0;
+       cork->ttl = ipc->ttl;
+       cork->tos = ipc->tos;
+       cork->priority = ipc->priority;
        cork->tx_flags = ipc->tx_flags;
 
        return 0;
@@ -1311,7 +1319,9 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
        if (cork->flags & IPCORK_OPT)
                opt = cork->opt;
 
-       if (rt->rt_type == RTN_MULTICAST)
+       if (cork->ttl != 0)
+               ttl = cork->ttl;
+       else if (rt->rt_type == RTN_MULTICAST)
                ttl = inet->mc_ttl;
        else
                ttl = ip_select_ttl(inet, &rt->dst);
@@ -1319,7 +1329,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
        iph = ip_hdr(skb);
        iph->version = 4;
        iph->ihl = 5;
-       iph->tos = inet->tos;
+       iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
        iph->frag_off = df;
        iph->ttl = ttl;
        iph->protocol = sk->sk_protocol;
@@ -1331,7 +1341,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
                ip_options_build(skb, opt, cork->addr, rt, 0);
        }
 
-       skb->priority = sk->sk_priority;
+       skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
        skb->mark = sk->sk_mark;
        /*
         * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
@@ -1481,6 +1491,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
        ipc.addr = daddr;
        ipc.opt = NULL;
        ipc.tx_flags = 0;
+       ipc.ttl = 0;
+       ipc.tos = -1;
 
        if (replyopts.opt.opt.optlen) {
                ipc.opt = &replyopts.opt;
index d9c4f113d7093bba7eba2beefc31cd0af4b9bb95..0626f2cb192e69ecb3fdbcf6d4d12884c7bc5c6d 100644 (file)
@@ -189,7 +189,7 @@ EXPORT_SYMBOL(ip_cmsg_recv);
 
 int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
 {
-       int err;
+       int err, val;
        struct cmsghdr *cmsg;
 
        for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
@@ -215,6 +215,24 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
                        ipc->addr = info->ipi_spec_dst.s_addr;
                        break;
                }
+               case IP_TTL:
+                       if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
+                               return -EINVAL;
+                       val = *(int *)CMSG_DATA(cmsg);
+                       if (val < 1 || val > 255)
+                               return -EINVAL;
+                       ipc->ttl = val;
+                       break;
+               case IP_TOS:
+                       if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
+                               return -EINVAL;
+                       val = *(int *)CMSG_DATA(cmsg);
+                       if (val < 0 || val > 255)
+                               return -EINVAL;
+                       ipc->tos = val;
+                       ipc->priority = rt_tos2priority(ipc->tos);
+                       break;
+
                default:
                        return -EINVAL;
                }
@@ -1034,11 +1052,12 @@ e_inval:
  * destination in skb->cb[] before dst drop.
  * This way, receiver doesnt make cache line misses to read rtable.
  */
-void ipv4_pktinfo_prepare(struct sk_buff *skb)
+void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
 {
        struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
 
-       if (skb_rtable(skb)) {
+       if ((inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) &&
+           skb_rtable(skb)) {
                pktinfo->ipi_ifindex = inet_iif(skb);
                pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
        } else {
index c31e3ad98ef28e91eff2679d6976d4c20c055410..42ffbc8d65c65fda2b288008b4aed7670b43ee3f 100644 (file)
@@ -116,3 +116,36 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
        return 0;
 }
 EXPORT_SYMBOL_GPL(iptunnel_pull_header);
+
+struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
+                                        bool csum_help,
+                                        int gso_type_mask)
+{
+       int err;
+
+       if (likely(!skb->encapsulation)) {
+               skb_reset_inner_headers(skb);
+               skb->encapsulation = 1;
+       }
+
+       if (skb_is_gso(skb)) {
+               err = skb_unclone(skb, GFP_ATOMIC);
+               if (unlikely(err))
+                       goto error;
+               skb_shinfo(skb)->gso_type |= gso_type_mask;
+               return skb;
+       }
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) {
+               err = skb_checksum_help(skb);
+               if (unlikely(err))
+                       goto error;
+       } else if (skb->ip_summed != CHECKSUM_PARTIAL)
+               skb->ip_summed = CHECKSUM_NONE;
+
+       return skb;
+error:
+       kfree_skb(skb);
+       return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
index e805e7b3030e3dad2f8fd83d140f0bb7f100d69c..5d9c845d288a3d8cce3eb96aeb65384e4a56edbc 100644 (file)
@@ -49,70 +49,6 @@ static struct rtnl_link_ops vti_link_ops __read_mostly;
 static int vti_net_id __read_mostly;
 static int vti_tunnel_init(struct net_device *dev);
 
-static int vti_err(struct sk_buff *skb, u32 info)
-{
-
-       /* All the routers (except for Linux) return only
-        * 8 bytes of packet payload. It means, that precise relaying of
-        * ICMP in the real Internet is absolutely infeasible.
-        */
-       struct net *net = dev_net(skb->dev);
-       struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
-       struct iphdr *iph = (struct iphdr *)skb->data;
-       const int type = icmp_hdr(skb)->type;
-       const int code = icmp_hdr(skb)->code;
-       struct ip_tunnel *t;
-       int err;
-
-       switch (type) {
-       default:
-       case ICMP_PARAMETERPROB:
-               return 0;
-
-       case ICMP_DEST_UNREACH:
-               switch (code) {
-               case ICMP_SR_FAILED:
-               case ICMP_PORT_UNREACH:
-                       /* Impossible event. */
-                       return 0;
-               default:
-                       /* All others are translated to HOST_UNREACH. */
-                       break;
-               }
-               break;
-       case ICMP_TIME_EXCEEDED:
-               if (code != ICMP_EXC_TTL)
-                       return 0;
-               break;
-       }
-
-       err = -ENOENT;
-
-       t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
-                            iph->daddr, iph->saddr, 0);
-       if (t == NULL)
-               goto out;
-
-       if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
-               ipv4_update_pmtu(skb, dev_net(skb->dev), info,
-                                t->parms.link, 0, IPPROTO_IPIP, 0);
-               err = 0;
-               goto out;
-       }
-
-       err = 0;
-       if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
-               goto out;
-
-       if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
-               t->err_count++;
-       else
-               t->err_count = 1;
-       t->err_time = jiffies;
-out:
-       return err;
-}
-
 /* We dont digest the packet therefore let the packet pass */
 static int vti_rcv(struct sk_buff *skb)
 {
@@ -125,8 +61,17 @@ static int vti_rcv(struct sk_buff *skb)
                                  iph->saddr, iph->daddr, 0);
        if (tunnel != NULL) {
                struct pcpu_tstats *tstats;
+               u32 oldmark = skb->mark;
+               int ret;
+
 
-               if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+               /* temporarily mark the skb with the tunnel o_key, to
+                * only match policies with this mark.
+                */
+               skb->mark = be32_to_cpu(tunnel->parms.o_key);
+               ret = xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb);
+               skb->mark = oldmark;
+               if (!ret)
                        return -1;
 
                tstats = this_cpu_ptr(tunnel->dev->tstats);
@@ -135,7 +80,6 @@ static int vti_rcv(struct sk_buff *skb)
                tstats->rx_bytes += skb->len;
                u64_stats_update_end(&tstats->syncp);
 
-               skb->mark = 0;
                secpath_reset(skb);
                skb->dev = tunnel->dev;
                return 1;
@@ -167,7 +111,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 
        memset(&fl4, 0, sizeof(fl4));
        flowi4_init_output(&fl4, tunnel->parms.link,
-                          be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos),
+                          be32_to_cpu(tunnel->parms.o_key), RT_TOS(tos),
                           RT_SCOPE_UNIVERSE,
                           IPPROTO_IPIP, 0,
                           dst, tiph->saddr, 0, 0);
@@ -296,9 +240,8 @@ static void __net_init vti_fb_tunnel_init(struct net_device *dev)
        iph->ihl                = 5;
 }
 
-static struct xfrm_tunnel vti_handler __read_mostly = {
+static struct xfrm_tunnel_notifier vti_handler __read_mostly = {
        .handler        =       vti_rcv,
-       .err_handler    =       vti_err,
        .priority       =       1,
 };
 
index 7f80fb4b82d3716ad79044a43ed43545f6183957..fe3e9f7f1f0beaf0ba6a8b7b4c788c8a021ba54d 100644 (file)
@@ -220,17 +220,17 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(skb->protocol != htons(ETH_P_IP)))
                goto tx_error;
 
-       if (likely(!skb->encapsulation)) {
-               skb_reset_inner_headers(skb);
-               skb->encapsulation = 1;
-       }
+       skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP);
+       if (IS_ERR(skb))
+               goto out;
 
        ip_tunnel_xmit(skb, dev, tiph, tiph->protocol);
        return NETDEV_TX_OK;
 
 tx_error:
-       dev->stats.tx_errors++;
        dev_kfree_skb(skb);
+out:
+       dev->stats.tx_errors++;
        return NETDEV_TX_OK;
 }
 
@@ -275,6 +275,7 @@ static const struct net_device_ops ipip_netdev_ops = {
 #define IPIP_FEATURES (NETIF_F_SG |            \
                       NETIF_F_FRAGLIST |       \
                       NETIF_F_HIGHDMA |        \
+                      NETIF_F_GSO_SOFTWARE |   \
                       NETIF_F_HW_CSUM)
 
 static void ipip_tunnel_setup(struct net_device *dev)
index 1657e39b291f2ae8747e21e944627def23ebfcbb..40d56073cd19d3b3c27372b7dca255469d7e0068 100644 (file)
@@ -36,6 +36,27 @@ config NF_CONNTRACK_PROC_COMPAT
 
          If unsure, say Y.
 
+config NF_TABLES_IPV4
+       depends on NF_TABLES
+       tristate "IPv4 nf_tables support"
+
+config NFT_REJECT_IPV4
+       depends on NF_TABLES_IPV4
+       tristate "nf_tables IPv4 reject support"
+
+config NFT_CHAIN_ROUTE_IPV4
+       depends on NF_TABLES_IPV4
+       tristate "IPv4 nf_tables route chain support"
+
+config NFT_CHAIN_NAT_IPV4
+       depends on NF_TABLES_IPV4
+       depends on NF_NAT_IPV4 && NFT_NAT
+       tristate "IPv4 nf_tables nat chain support"
+
+config NF_TABLES_ARP
+       depends on NF_TABLES
+       tristate "ARP nf_tables support"
+
 config IP_NF_IPTABLES
        tristate "IP tables support (required for filtering/masq/NAT)"
        default m if NETFILTER_ADVANCED=n
index 3622b248b6dd7ad78aa4411243b4b507731d7e2b..19df72b7ba8810e698bf6d50e8eee5c23033b34a 100644 (file)
@@ -27,6 +27,12 @@ obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
 # NAT protocols (nf_nat)
 obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
 
+obj-$(CONFIG_NF_TABLES_IPV4) += nf_tables_ipv4.o
+obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
+obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o
+obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o
+obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
+
 # generic IP tables 
 obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
 
index 85a4f21aac1ad117f740d40da7123a663fdfefa8..59da7cde072447c2331422659adb7dadad4f3fae 100644 (file)
@@ -271,6 +271,11 @@ unsigned int arpt_do_table(struct sk_buff *skb,
        local_bh_disable();
        addend = xt_write_recseq_begin();
        private = table->private;
+       /*
+        * Ensure we load private-> members after we've fetched the base
+        * pointer.
+        */
+       smp_read_barrier_depends();
        table_base = private->entries[smp_processor_id()];
 
        e = get_entry(table_base, private->hook_entry[hook]);
index a865f6f9401318663a2e973ac34cf707be4e1452..802ddecb30b8110474da0e0a34c134aceaece43b 100644 (file)
@@ -27,13 +27,14 @@ static const struct xt_table packet_filter = {
 
 /* The work comes in here from netfilter.c */
 static unsigned int
-arptable_filter_hook(unsigned int hook, struct sk_buff *skb,
+arptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                     const struct net_device *in, const struct net_device *out,
                     int (*okfn)(struct sk_buff *))
 {
        const struct net *net = dev_net((in != NULL) ? in : out);
 
-       return arpt_do_table(skb, hook, in, out, net->ipv4.arptable_filter);
+       return arpt_do_table(skb, ops->hooknum, in, out,
+                            net->ipv4.arptable_filter);
 }
 
 static struct nf_hook_ops *arpfilter_ops __read_mostly;
index d23118d95ff9291401396953d094391db0e2e44e..718dfbd30cbe09560c1d545525b446fb5695f6af 100644 (file)
@@ -327,6 +327,11 @@ ipt_do_table(struct sk_buff *skb,
        addend = xt_write_recseq_begin();
        private = table->private;
        cpu        = smp_processor_id();
+       /*
+        * Ensure we load private-> members after we've fetched the base
+        * pointer.
+        */
+       smp_read_barrier_depends();
        table_base = private->entries[cpu];
        jumpstack  = (struct ipt_entry **)private->jumpstack[cpu];
        stackptr   = per_cpu_ptr(private->stackptr, cpu);
index 0b732efd32e20aa8e22e6c9bd5aa4edfb7e5516d..a2e2b61cd7da230475f8ce7b9818eba67090bf05 100644 (file)
@@ -483,7 +483,7 @@ static void arp_print(struct arp_payload *payload)
 #endif
 
 static unsigned int
-arp_mangle(unsigned int hook,
+arp_mangle(const struct nf_hook_ops *ops,
           struct sk_buff *skb,
           const struct net_device *in,
           const struct net_device *out,
index b6346bf2fde3bc16f0655e9c9f7c6ade5ffeae8a..01cffeaa0085ede5bb802999fc1f29c9e6ec4b7f 100644 (file)
@@ -297,7 +297,7 @@ synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
        return XT_CONTINUE;
 }
 
-static unsigned int ipv4_synproxy_hook(unsigned int hooknum,
+static unsigned int ipv4_synproxy_hook(const struct nf_hook_ops *ops,
                                       struct sk_buff *skb,
                                       const struct net_device *in,
                                       const struct net_device *out,
index cbc22158af490589833a4918f3610050a60d5be4..9cb993cd224bf702ca48382a0656163837433c54 100644 (file)
@@ -220,6 +220,7 @@ static void ipt_ulog_packet(struct net *net,
        ub->qlen++;
 
        pm = nlmsg_data(nlh);
+       memset(pm, 0, sizeof(*pm));
 
        /* We might not have a timestamp, get one */
        if (skb->tstamp.tv64 == 0)
@@ -238,8 +239,6 @@ static void ipt_ulog_packet(struct net *net,
        }
        else if (loginfo->prefix[0] != '\0')
                strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
-       else
-               *(pm->prefix) = '\0';
 
        if (in && in->hard_header_len > 0 &&
            skb->mac_header != skb->network_header &&
@@ -251,13 +250,9 @@ static void ipt_ulog_packet(struct net *net,
 
        if (in)
                strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
-       else
-               pm->indev_name[0] = '\0';
 
        if (out)
                strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
-       else
-               pm->outdev_name[0] = '\0';
 
        /* copy_len <= skb->len, so can't fail. */
        if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
index 50af5b45c050c22773db181353e2aee9e02137e3..e08a74a243a85d125ccbd043314f06c6b528b368 100644 (file)
@@ -33,20 +33,21 @@ static const struct xt_table packet_filter = {
 };
 
 static unsigned int
-iptable_filter_hook(unsigned int hook, struct sk_buff *skb,
+iptable_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                    const struct net_device *in, const struct net_device *out,
                    int (*okfn)(struct sk_buff *))
 {
        const struct net *net;
 
-       if (hook == NF_INET_LOCAL_OUT &&
+       if (ops->hooknum == NF_INET_LOCAL_OUT &&
            (skb->len < sizeof(struct iphdr) ||
             ip_hdrlen(skb) < sizeof(struct iphdr)))
                /* root is playing with raw sockets. */
                return NF_ACCEPT;
 
        net = dev_net((in != NULL) ? in : out);
-       return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_filter);
+       return ipt_do_table(skb, ops->hooknum, in, out,
+                           net->ipv4.iptable_filter);
 }
 
 static struct nf_hook_ops *filter_ops __read_mostly;
index 0d8cd82e0fadf676bebd1a3cb3b5b559159b3491..6a5079c34bb363c34135e9bed5700a645f15b249 100644 (file)
@@ -79,19 +79,19 @@ ipt_mangle_out(struct sk_buff *skb, const struct net_device *out)
 
 /* The work comes in here from netfilter.c. */
 static unsigned int
-iptable_mangle_hook(unsigned int hook,
+iptable_mangle_hook(const struct nf_hook_ops *ops,
                     struct sk_buff *skb,
                     const struct net_device *in,
                     const struct net_device *out,
                     int (*okfn)(struct sk_buff *))
 {
-       if (hook == NF_INET_LOCAL_OUT)
+       if (ops->hooknum == NF_INET_LOCAL_OUT)
                return ipt_mangle_out(skb, out);
-       if (hook == NF_INET_POST_ROUTING)
-               return ipt_do_table(skb, hook, in, out,
+       if (ops->hooknum == NF_INET_POST_ROUTING)
+               return ipt_do_table(skb, ops->hooknum, in, out,
                                    dev_net(out)->ipv4.iptable_mangle);
        /* PREROUTING/INPUT/FORWARD: */
-       return ipt_do_table(skb, hook, in, out,
+       return ipt_do_table(skb, ops->hooknum, in, out,
                            dev_net(in)->ipv4.iptable_mangle);
 }
 
index 683bfaffed65da561cb3a1c2eed598fb8cbe32a1..ee2886126e3dfad44e7c801d82ad2ec4bad621d3 100644 (file)
@@ -61,7 +61,7 @@ static unsigned int nf_nat_rule_find(struct sk_buff *skb, unsigned int hooknum,
 }
 
 static unsigned int
-nf_nat_ipv4_fn(unsigned int hooknum,
+nf_nat_ipv4_fn(const struct nf_hook_ops *ops,
               struct sk_buff *skb,
               const struct net_device *in,
               const struct net_device *out,
@@ -71,7 +71,7 @@ nf_nat_ipv4_fn(unsigned int hooknum,
        enum ip_conntrack_info ctinfo;
        struct nf_conn_nat *nat;
        /* maniptype == SRC for postrouting. */
-       enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
+       enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
 
        /* We never see fragments: conntrack defrags on pre-routing
         * and local-out, and nf_nat_out protects post-routing.
@@ -108,7 +108,7 @@ nf_nat_ipv4_fn(unsigned int hooknum,
        case IP_CT_RELATED_REPLY:
                if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
                        if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
-                                                          hooknum))
+                                                          ops->hooknum))
                                return NF_DROP;
                        else
                                return NF_ACCEPT;
@@ -121,14 +121,14 @@ nf_nat_ipv4_fn(unsigned int hooknum,
                if (!nf_nat_initialized(ct, maniptype)) {
                        unsigned int ret;
 
-                       ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
+                       ret = nf_nat_rule_find(skb, ops->hooknum, in, out, ct);
                        if (ret != NF_ACCEPT)
                                return ret;
                } else {
                        pr_debug("Already setup manip %s for ct %p\n",
                                 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
                                 ct);
-                       if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
+                       if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
                                goto oif_changed;
                }
                break;
@@ -137,11 +137,11 @@ nf_nat_ipv4_fn(unsigned int hooknum,
                /* ESTABLISHED */
                NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
                             ctinfo == IP_CT_ESTABLISHED_REPLY);
-               if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
+               if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
                        goto oif_changed;
        }
 
-       return nf_nat_packet(ct, ctinfo, hooknum, skb);
+       return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
 
 oif_changed:
        nf_ct_kill_acct(ct, ctinfo, skb);
@@ -149,7 +149,7 @@ oif_changed:
 }
 
 static unsigned int
-nf_nat_ipv4_in(unsigned int hooknum,
+nf_nat_ipv4_in(const struct nf_hook_ops *ops,
               struct sk_buff *skb,
               const struct net_device *in,
               const struct net_device *out,
@@ -158,7 +158,7 @@ nf_nat_ipv4_in(unsigned int hooknum,
        unsigned int ret;
        __be32 daddr = ip_hdr(skb)->daddr;
 
-       ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
+       ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn);
        if (ret != NF_DROP && ret != NF_STOLEN &&
            daddr != ip_hdr(skb)->daddr)
                skb_dst_drop(skb);
@@ -167,7 +167,7 @@ nf_nat_ipv4_in(unsigned int hooknum,
 }
 
 static unsigned int
-nf_nat_ipv4_out(unsigned int hooknum,
+nf_nat_ipv4_out(const struct nf_hook_ops *ops,
                struct sk_buff *skb,
                const struct net_device *in,
                const struct net_device *out,
@@ -185,7 +185,7 @@ nf_nat_ipv4_out(unsigned int hooknum,
            ip_hdrlen(skb) < sizeof(struct iphdr))
                return NF_ACCEPT;
 
-       ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
+       ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn);
 #ifdef CONFIG_XFRM
        if (ret != NF_DROP && ret != NF_STOLEN &&
            !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
@@ -207,7 +207,7 @@ nf_nat_ipv4_out(unsigned int hooknum,
 }
 
 static unsigned int
-nf_nat_ipv4_local_fn(unsigned int hooknum,
+nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
                     struct sk_buff *skb,
                     const struct net_device *in,
                     const struct net_device *out,
@@ -223,7 +223,7 @@ nf_nat_ipv4_local_fn(unsigned int hooknum,
            ip_hdrlen(skb) < sizeof(struct iphdr))
                return NF_ACCEPT;
 
-       ret = nf_nat_ipv4_fn(hooknum, skb, in, out, okfn);
+       ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn);
        if (ret != NF_DROP && ret != NF_STOLEN &&
            (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
                enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
index 1f82aea11df63e0683ba6e2c0e85d270534a6d74..b2f7e8f98316d2733e3936ead8dec20da2978f73 100644 (file)
@@ -20,20 +20,20 @@ static const struct xt_table packet_raw = {
 
 /* The work comes in here from netfilter.c. */
 static unsigned int
-iptable_raw_hook(unsigned int hook, struct sk_buff *skb,
+iptable_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                 const struct net_device *in, const struct net_device *out,
                 int (*okfn)(struct sk_buff *))
 {
        const struct net *net;
 
-       if (hook == NF_INET_LOCAL_OUT && 
+       if (ops->hooknum == NF_INET_LOCAL_OUT &&
            (skb->len < sizeof(struct iphdr) ||
             ip_hdrlen(skb) < sizeof(struct iphdr)))
                /* root is playing with raw sockets. */
                return NF_ACCEPT;
 
        net = dev_net((in != NULL) ? in : out);
-       return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_raw);
+       return ipt_do_table(skb, ops->hooknum, in, out, net->ipv4.iptable_raw);
 }
 
 static struct nf_hook_ops *rawtable_ops __read_mostly;
index f867a8d38bf7513d38d3da645fb21c2096f5e2a5..c86647ed2078f660cf0e9f8b69957a6f4c79b1f6 100644 (file)
@@ -37,21 +37,22 @@ static const struct xt_table security_table = {
 };
 
 static unsigned int
-iptable_security_hook(unsigned int hook, struct sk_buff *skb,
+iptable_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                      const struct net_device *in,
                      const struct net_device *out,
                      int (*okfn)(struct sk_buff *))
 {
        const struct net *net;
 
-       if (hook == NF_INET_LOCAL_OUT &&
+       if (ops->hooknum == NF_INET_LOCAL_OUT &&
            (skb->len < sizeof(struct iphdr) ||
             ip_hdrlen(skb) < sizeof(struct iphdr)))
                /* Somebody is playing with raw sockets. */
                return NF_ACCEPT;
 
        net = dev_net((in != NULL) ? in : out);
-       return ipt_do_table(skb, hook, in, out, net->ipv4.iptable_security);
+       return ipt_do_table(skb, ops->hooknum, in, out,
+                           net->ipv4.iptable_security);
 }
 
 static struct nf_hook_ops *sectbl_ops __read_mostly;
index 86f5b34a4ed1865cd1438817bfb46f6eb589f61e..ecd8bec411c9719c5ef083d9781e90d0377b88e9 100644 (file)
@@ -92,7 +92,7 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
        return NF_ACCEPT;
 }
 
-static unsigned int ipv4_helper(unsigned int hooknum,
+static unsigned int ipv4_helper(const struct nf_hook_ops *ops,
                                struct sk_buff *skb,
                                const struct net_device *in,
                                const struct net_device *out,
@@ -121,7 +121,7 @@ static unsigned int ipv4_helper(unsigned int hooknum,
                            ct, ctinfo);
 }
 
-static unsigned int ipv4_confirm(unsigned int hooknum,
+static unsigned int ipv4_confirm(const struct nf_hook_ops *ops,
                                 struct sk_buff *skb,
                                 const struct net_device *in,
                                 const struct net_device *out,
@@ -147,16 +147,16 @@ out:
        return nf_conntrack_confirm(skb);
 }
 
-static unsigned int ipv4_conntrack_in(unsigned int hooknum,
+static unsigned int ipv4_conntrack_in(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
                                      const struct net_device *in,
                                      const struct net_device *out,
                                      int (*okfn)(struct sk_buff *))
 {
-       return nf_conntrack_in(dev_net(in), PF_INET, hooknum, skb);
+       return nf_conntrack_in(dev_net(in), PF_INET, ops->hooknum, skb);
 }
 
-static unsigned int ipv4_conntrack_local(unsigned int hooknum,
+static unsigned int ipv4_conntrack_local(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
                                         const struct net_device *in,
                                         const struct net_device *out,
@@ -166,7 +166,7 @@ static unsigned int ipv4_conntrack_local(unsigned int hooknum,
        if (skb->len < sizeof(struct iphdr) ||
            ip_hdrlen(skb) < sizeof(struct iphdr))
                return NF_ACCEPT;
-       return nf_conntrack_in(dev_net(out), PF_INET, hooknum, skb);
+       return nf_conntrack_in(dev_net(out), PF_INET, ops->hooknum, skb);
 }
 
 /* Connection tracking may drop packets, but never alters them, so
index 742815518b0fd5ea78ec4205bfee76a8a804c88d..12e13bd82b5bba4fdd183d5ba2cda098a1c0c683 100644 (file)
@@ -60,7 +60,7 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
                return IP_DEFRAG_CONNTRACK_OUT + zone;
 }
 
-static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
+static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops,
                                          struct sk_buff *skb,
                                          const struct net_device *in,
                                          const struct net_device *out,
@@ -83,7 +83,9 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
 #endif
        /* Gather fragments. */
        if (ip_is_fragment(ip_hdr(skb))) {
-               enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb);
+               enum ip_defrag_users user =
+                       nf_ct_defrag_user(ops->hooknum, skb);
+
                if (nf_ct_ipv4_gather_frags(skb, user))
                        return NF_STOLEN;
        }
diff --git a/net/ipv4/netfilter/nf_tables_arp.c b/net/ipv4/netfilter/nf_tables_arp.c
new file mode 100644 (file)
index 0000000..3e67ef1
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2008-2010 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2013 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netfilter_arp.h>
+#include <net/netfilter/nf_tables.h>
+
+static struct nft_af_info nft_af_arp __read_mostly = {
+       .family         = NFPROTO_ARP,
+       .nhooks         = NF_ARP_NUMHOOKS,
+       .owner          = THIS_MODULE,
+};
+
+static int nf_tables_arp_init_net(struct net *net)
+{
+       net->nft.arp = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+       if (net->nft.arp== NULL)
+               return -ENOMEM;
+
+       memcpy(net->nft.arp, &nft_af_arp, sizeof(nft_af_arp));
+
+       if (nft_register_afinfo(net, net->nft.arp) < 0)
+               goto err;
+
+       return 0;
+err:
+       kfree(net->nft.arp);
+       return -ENOMEM;
+}
+
+static void nf_tables_arp_exit_net(struct net *net)
+{
+       nft_unregister_afinfo(net->nft.arp);
+       kfree(net->nft.arp);
+}
+
+static struct pernet_operations nf_tables_arp_net_ops = {
+       .init   = nf_tables_arp_init_net,
+       .exit   = nf_tables_arp_exit_net,
+};
+
+static unsigned int
+nft_do_chain_arp(const struct nf_hook_ops *ops,
+                 struct sk_buff *skb,
+                 const struct net_device *in,
+                 const struct net_device *out,
+                 int (*okfn)(struct sk_buff *))
+{
+       struct nft_pktinfo pkt;
+
+       nft_set_pktinfo(&pkt, ops, skb, in, out);
+
+       return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nf_chain_type filter_arp = {
+       .family         = NFPROTO_ARP,
+       .name           = "filter",
+       .type           = NFT_CHAIN_T_DEFAULT,
+       .hook_mask      = (1 << NF_ARP_IN) |
+                         (1 << NF_ARP_OUT) |
+                         (1 << NF_ARP_FORWARD),
+       .fn             = {
+               [NF_ARP_IN]             = nft_do_chain_arp,
+               [NF_ARP_OUT]            = nft_do_chain_arp,
+               [NF_ARP_FORWARD]        = nft_do_chain_arp,
+       },
+};
+
+static int __init nf_tables_arp_init(void)
+{
+       int ret;
+
+       nft_register_chain_type(&filter_arp);
+       ret = register_pernet_subsys(&nf_tables_arp_net_ops);
+       if (ret < 0)
+               nft_unregister_chain_type(&filter_arp);
+
+       return ret;
+}
+
+static void __exit nf_tables_arp_exit(void)
+{
+       unregister_pernet_subsys(&nf_tables_arp_net_ops);
+       nft_unregister_chain_type(&filter_arp);
+}
+
+module_init(nf_tables_arp_init);
+module_exit(nf_tables_arp_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_FAMILY(3); /* NFPROTO_ARP */
diff --git a/net/ipv4/netfilter/nf_tables_ipv4.c b/net/ipv4/netfilter/nf_tables_ipv4.c
new file mode 100644 (file)
index 0000000..8f7536b
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012-2013 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/netfilter_ipv4.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/net_namespace.h>
+#include <net/ip.h>
+#include <net/net_namespace.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+
+static unsigned int nft_ipv4_output(const struct nf_hook_ops *ops,
+                                   struct sk_buff *skb,
+                                   const struct net_device *in,
+                                   const struct net_device *out,
+                                   int (*okfn)(struct sk_buff *))
+{
+       struct nft_pktinfo pkt;
+
+       if (unlikely(skb->len < sizeof(struct iphdr) ||
+                    ip_hdr(skb)->ihl < sizeof(struct iphdr) / 4)) {
+               if (net_ratelimit())
+                       pr_info("nf_tables_ipv4: ignoring short SOCK_RAW "
+                               "packet\n");
+               return NF_ACCEPT;
+       }
+       nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+
+       return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nft_af_info nft_af_ipv4 __read_mostly = {
+       .family         = NFPROTO_IPV4,
+       .nhooks         = NF_INET_NUMHOOKS,
+       .owner          = THIS_MODULE,
+       .hooks          = {
+               [NF_INET_LOCAL_OUT]     = nft_ipv4_output,
+       },
+};
+
+static int nf_tables_ipv4_init_net(struct net *net)
+{
+       net->nft.ipv4 = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+       if (net->nft.ipv4 == NULL)
+               return -ENOMEM;
+
+       memcpy(net->nft.ipv4, &nft_af_ipv4, sizeof(nft_af_ipv4));
+
+       if (nft_register_afinfo(net, net->nft.ipv4) < 0)
+               goto err;
+
+       return 0;
+err:
+       kfree(net->nft.ipv4);
+       return -ENOMEM;
+}
+
+static void nf_tables_ipv4_exit_net(struct net *net)
+{
+       nft_unregister_afinfo(net->nft.ipv4);
+       kfree(net->nft.ipv4);
+}
+
+static struct pernet_operations nf_tables_ipv4_net_ops = {
+       .init   = nf_tables_ipv4_init_net,
+       .exit   = nf_tables_ipv4_exit_net,
+};
+
+static unsigned int
+nft_do_chain_ipv4(const struct nf_hook_ops *ops,
+                 struct sk_buff *skb,
+                 const struct net_device *in,
+                 const struct net_device *out,
+                 int (*okfn)(struct sk_buff *))
+{
+       struct nft_pktinfo pkt;
+
+       nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+
+       return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nf_chain_type filter_ipv4 = {
+       .family         = NFPROTO_IPV4,
+       .name           = "filter",
+       .type           = NFT_CHAIN_T_DEFAULT,
+       .hook_mask      = (1 << NF_INET_LOCAL_IN) |
+                         (1 << NF_INET_LOCAL_OUT) |
+                         (1 << NF_INET_FORWARD) |
+                         (1 << NF_INET_PRE_ROUTING) |
+                         (1 << NF_INET_POST_ROUTING),
+       .fn             = {
+               [NF_INET_LOCAL_IN]      = nft_do_chain_ipv4,
+               [NF_INET_LOCAL_OUT]     = nft_ipv4_output,
+               [NF_INET_FORWARD]       = nft_do_chain_ipv4,
+               [NF_INET_PRE_ROUTING]   = nft_do_chain_ipv4,
+               [NF_INET_POST_ROUTING]  = nft_do_chain_ipv4,
+       },
+};
+
+static int __init nf_tables_ipv4_init(void)
+{
+       nft_register_chain_type(&filter_ipv4);
+       return register_pernet_subsys(&nf_tables_ipv4_net_ops);
+}
+
+static void __exit nf_tables_ipv4_exit(void)
+{
+       unregister_pernet_subsys(&nf_tables_ipv4_net_ops);
+       nft_unregister_chain_type(&filter_ipv4);
+}
+
+module_init(nf_tables_ipv4_init);
+module_exit(nf_tables_ipv4_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_FAMILY(AF_INET);
diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
new file mode 100644 (file)
index 0000000..cf2c792
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
+ * Copyright (c) 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/ip.h>
+
+/*
+ * NAT chains
+ */
+
+static unsigned int nf_nat_fn(const struct nf_hook_ops *ops,
+                             struct sk_buff *skb,
+                             const struct net_device *in,
+                             const struct net_device *out,
+                             int (*okfn)(struct sk_buff *))
+{
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+       struct nf_conn_nat *nat;
+       enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
+       struct nft_pktinfo pkt;
+       unsigned int ret;
+
+       if (ct == NULL || nf_ct_is_untracked(ct))
+               return NF_ACCEPT;
+
+       NF_CT_ASSERT(!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)));
+
+       nat = nfct_nat(ct);
+       if (nat == NULL) {
+               /* Conntrack module was loaded late, can't add extension. */
+               if (nf_ct_is_confirmed(ct))
+                       return NF_ACCEPT;
+               nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
+               if (nat == NULL)
+                       return NF_ACCEPT;
+       }
+
+       switch (ctinfo) {
+       case IP_CT_RELATED:
+       case IP_CT_RELATED + IP_CT_IS_REPLY:
+               if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
+                       if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
+                                                          ops->hooknum))
+                               return NF_DROP;
+                       else
+                               return NF_ACCEPT;
+               }
+               /* Fall through */
+       case IP_CT_NEW:
+               if (nf_nat_initialized(ct, maniptype))
+                       break;
+
+               nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+
+               ret = nft_do_chain_pktinfo(&pkt, ops);
+               if (ret != NF_ACCEPT)
+                       return ret;
+               if (!nf_nat_initialized(ct, maniptype)) {
+                       ret = nf_nat_alloc_null_binding(ct, ops->hooknum);
+                       if (ret != NF_ACCEPT)
+                               return ret;
+               }
+       default:
+               break;
+       }
+
+       return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
+}
+
+static unsigned int nf_nat_prerouting(const struct nf_hook_ops *ops,
+                                     struct sk_buff *skb,
+                                     const struct net_device *in,
+                                     const struct net_device *out,
+                                     int (*okfn)(struct sk_buff *))
+{
+       __be32 daddr = ip_hdr(skb)->daddr;
+       unsigned int ret;
+
+       ret = nf_nat_fn(ops, skb, in, out, okfn);
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           ip_hdr(skb)->daddr != daddr) {
+               skb_dst_drop(skb);
+       }
+       return ret;
+}
+
+static unsigned int nf_nat_postrouting(const struct nf_hook_ops *ops,
+                                      struct sk_buff *skb,
+                                      const struct net_device *in,
+                                      const struct net_device *out,
+                                      int (*okfn)(struct sk_buff *))
+{
+       enum ip_conntrack_info ctinfo __maybe_unused;
+       const struct nf_conn *ct __maybe_unused;
+       unsigned int ret;
+
+       ret = nf_nat_fn(ops, skb, in, out, okfn);
+#ifdef CONFIG_XFRM
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+               enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+               if (ct->tuplehash[dir].tuple.src.u3.ip !=
+                   ct->tuplehash[!dir].tuple.dst.u3.ip ||
+                   ct->tuplehash[dir].tuple.src.u.all !=
+                   ct->tuplehash[!dir].tuple.dst.u.all)
+                       return nf_xfrm_me_harder(skb, AF_INET) == 0 ?
+                                                               ret : NF_DROP;
+       }
+#endif
+       return ret;
+}
+
+static unsigned int nf_nat_output(const struct nf_hook_ops *ops,
+                                 struct sk_buff *skb,
+                                 const struct net_device *in,
+                                 const struct net_device *out,
+                                 int (*okfn)(struct sk_buff *))
+{
+       enum ip_conntrack_info ctinfo;
+       const struct nf_conn *ct;
+       unsigned int ret;
+
+       ret = nf_nat_fn(ops, skb, in, out, okfn);
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+               enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+               if (ct->tuplehash[dir].tuple.dst.u3.ip !=
+                   ct->tuplehash[!dir].tuple.src.u3.ip) {
+                       if (ip_route_me_harder(skb, RTN_UNSPEC))
+                               ret = NF_DROP;
+               }
+#ifdef CONFIG_XFRM
+               else if (ct->tuplehash[dir].tuple.dst.u.all !=
+                        ct->tuplehash[!dir].tuple.src.u.all)
+                       if (nf_xfrm_me_harder(skb, AF_INET))
+                               ret = NF_DROP;
+#endif
+       }
+       return ret;
+}
+
+static struct nf_chain_type nft_chain_nat_ipv4 = {
+       .family         = NFPROTO_IPV4,
+       .name           = "nat",
+       .type           = NFT_CHAIN_T_NAT,
+       .hook_mask      = (1 << NF_INET_PRE_ROUTING) |
+                         (1 << NF_INET_POST_ROUTING) |
+                         (1 << NF_INET_LOCAL_OUT) |
+                         (1 << NF_INET_LOCAL_IN),
+       .fn             = {
+               [NF_INET_PRE_ROUTING]   = nf_nat_prerouting,
+               [NF_INET_POST_ROUTING]  = nf_nat_postrouting,
+               [NF_INET_LOCAL_OUT]     = nf_nat_output,
+               [NF_INET_LOCAL_IN]      = nf_nat_fn,
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init nft_chain_nat_init(void)
+{
+       int err;
+
+       err = nft_register_chain_type(&nft_chain_nat_ipv4);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static void __exit nft_chain_nat_exit(void)
+{
+       nft_unregister_chain_type(&nft_chain_nat_ipv4);
+}
+
+module_init(nft_chain_nat_init);
+module_exit(nft_chain_nat_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_CHAIN(AF_INET, "nat");
diff --git a/net/ipv4/netfilter/nft_chain_route_ipv4.c b/net/ipv4/netfilter/nft_chain_route_ipv4.c
new file mode 100644 (file)
index 0000000..4e6bf9a
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv4.h>
+#include <net/route.h>
+#include <net/ip.h>
+
+static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
+                                       struct sk_buff *skb,
+                                       const struct net_device *in,
+                                       const struct net_device *out,
+                                       int (*okfn)(struct sk_buff *))
+{
+       unsigned int ret;
+       struct nft_pktinfo pkt;
+       u32 mark;
+       __be32 saddr, daddr;
+       u_int8_t tos;
+       const struct iphdr *iph;
+
+       /* root is playing with raw sockets. */
+       if (skb->len < sizeof(struct iphdr) ||
+           ip_hdrlen(skb) < sizeof(struct iphdr))
+               return NF_ACCEPT;
+
+       nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+
+       mark = skb->mark;
+       iph = ip_hdr(skb);
+       saddr = iph->saddr;
+       daddr = iph->daddr;
+       tos = iph->tos;
+
+       ret = nft_do_chain_pktinfo(&pkt, ops);
+       if (ret != NF_DROP && ret != NF_QUEUE) {
+               iph = ip_hdr(skb);
+
+               if (iph->saddr != saddr ||
+                   iph->daddr != daddr ||
+                   skb->mark != mark ||
+                   iph->tos != tos)
+                       if (ip_route_me_harder(skb, RTN_UNSPEC))
+                               ret = NF_DROP;
+       }
+       return ret;
+}
+
+static struct nf_chain_type nft_chain_route_ipv4 = {
+       .family         = NFPROTO_IPV4,
+       .name           = "route",
+       .type           = NFT_CHAIN_T_ROUTE,
+       .hook_mask      = (1 << NF_INET_LOCAL_OUT),
+       .fn             = {
+               [NF_INET_LOCAL_OUT]     = nf_route_table_hook,
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init nft_chain_route_init(void)
+{
+       return nft_register_chain_type(&nft_chain_route_ipv4);
+}
+
+static void __exit nft_chain_route_exit(void)
+{
+       nft_unregister_chain_type(&nft_chain_route_ipv4);
+}
+
+module_init(nft_chain_route_init);
+module_exit(nft_chain_route_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_CHAIN(AF_INET, "route");
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
new file mode 100644 (file)
index 0000000..fff5ba1
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/icmp.h>
+
+struct nft_reject {
+       enum nft_reject_types   type:8;
+       u8                      icmp_code;
+};
+
+static void nft_reject_eval(const struct nft_expr *expr,
+                             struct nft_data data[NFT_REG_MAX + 1],
+                             const struct nft_pktinfo *pkt)
+{
+       struct nft_reject *priv = nft_expr_priv(expr);
+
+       switch (priv->type) {
+       case NFT_REJECT_ICMP_UNREACH:
+               icmp_send(pkt->skb, ICMP_DEST_UNREACH, priv->icmp_code, 0);
+               break;
+       case NFT_REJECT_TCP_RST:
+               break;
+       }
+
+       data[NFT_REG_VERDICT].verdict = NF_DROP;
+}
+
+static const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
+       [NFTA_REJECT_TYPE]              = { .type = NLA_U32 },
+       [NFTA_REJECT_ICMP_CODE]         = { .type = NLA_U8 },
+};
+
+static int nft_reject_init(const struct nft_ctx *ctx,
+                          const struct nft_expr *expr,
+                          const struct nlattr * const tb[])
+{
+       struct nft_reject *priv = nft_expr_priv(expr);
+
+       if (tb[NFTA_REJECT_TYPE] == NULL)
+               return -EINVAL;
+
+       priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
+       switch (priv->type) {
+       case NFT_REJECT_ICMP_UNREACH:
+               if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
+                       return -EINVAL;
+               priv->icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
+       case NFT_REJECT_TCP_RST:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_reject *priv = nft_expr_priv(expr);
+
+       if (nla_put_be32(skb, NFTA_REJECT_TYPE, priv->type))
+               goto nla_put_failure;
+
+       switch (priv->type) {
+       case NFT_REJECT_ICMP_UNREACH:
+               if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
+                       goto nla_put_failure;
+               break;
+       }
+
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_reject_type;
+static const struct nft_expr_ops nft_reject_ops = {
+       .type           = &nft_reject_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
+       .eval           = nft_reject_eval,
+       .init           = nft_reject_init,
+       .dump           = nft_reject_dump,
+};
+
+static struct nft_expr_type nft_reject_type __read_mostly = {
+       .name           = "reject",
+       .ops            = &nft_reject_ops,
+       .policy         = nft_reject_policy,
+       .maxattr        = NFTA_REJECT_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_reject_module_init(void)
+{
+       return nft_register_expr(&nft_reject_type);
+}
+
+static void __exit nft_reject_module_exit(void)
+{
+       nft_unregister_expr(&nft_reject_type);
+}
+
+module_init(nft_reject_module_init);
+module_exit(nft_reject_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("reject");
index d7d9882d4caea169964a58e294ffe6c73a99d36c..9afbdb19f4a2f5dc1bccb6cf5d73db795fbf5f03 100644 (file)
@@ -202,15 +202,14 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
 #if IS_ENABLED(CONFIG_IPV6)
                } else if (skb->protocol == htons(ETH_P_IPV6) &&
                           sk->sk_family == AF_INET6) {
-                       struct ipv6_pinfo *np = inet6_sk(sk);
 
                        pr_debug("found: %p: num=%d, daddr=%pI6c, dif=%d\n", sk,
                                 (int) isk->inet_num,
-                                &inet6_sk(sk)->rcv_saddr,
+                                &sk->sk_v6_rcv_saddr,
                                 sk->sk_bound_dev_if);
 
-                       if (!ipv6_addr_any(&np->rcv_saddr) &&
-                           !ipv6_addr_equal(&np->rcv_saddr,
+                       if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
+                           !ipv6_addr_equal(&sk->sk_v6_rcv_saddr,
                                             &ipv6_hdr(skb)->daddr))
                                continue;
 #endif
@@ -237,11 +236,11 @@ static void inet_get_ping_group_range_net(struct net *net, kgid_t *low,
        unsigned int seq;
 
        do {
-               seq = read_seqbegin(&sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
 
                *low = data[0];
                *high = data[1];
-       } while (read_seqretry(&sysctl_local_ports.lock, seq));
+       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
 }
 
 
@@ -362,7 +361,7 @@ static void ping_set_saddr(struct sock *sk, struct sockaddr *saddr)
        } else if (saddr->sa_family == AF_INET6) {
                struct sockaddr_in6 *addr = (struct sockaddr_in6 *) saddr;
                struct ipv6_pinfo *np = inet6_sk(sk);
-               np->rcv_saddr = np->saddr = addr->sin6_addr;
+               sk->sk_v6_rcv_saddr = np->saddr = addr->sin6_addr;
 #endif
        }
 }
@@ -376,7 +375,7 @@ static void ping_clear_saddr(struct sock *sk, int dif)
 #if IS_ENABLED(CONFIG_IPV6)
        } else if (sk->sk_family == AF_INET6) {
                struct ipv6_pinfo *np = inet6_sk(sk);
-               memset(&np->rcv_saddr, 0, sizeof(np->rcv_saddr));
+               memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
                memset(&np->saddr, 0, sizeof(np->saddr));
 #endif
        }
@@ -416,10 +415,12 @@ int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                 (int)sk->sk_bound_dev_if);
 
        err = 0;
-       if ((sk->sk_family == AF_INET && isk->inet_rcv_saddr) ||
-           (sk->sk_family == AF_INET6 &&
-            !ipv6_addr_any(&inet6_sk(sk)->rcv_saddr)))
+       if (sk->sk_family == AF_INET && isk->inet_rcv_saddr)
                sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
+#if IS_ENABLED(CONFIG_IPV6)
+       if (sk->sk_family == AF_INET6 && !ipv6_addr_any(&sk->sk_v6_rcv_saddr))
+               sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
+#endif
 
        if (snum)
                sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
@@ -429,7 +430,7 @@ int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 
 #if IS_ENABLED(CONFIG_IPV6)
        if (sk->sk_family == AF_INET6)
-               memset(&inet6_sk(sk)->daddr, 0, sizeof(inet6_sk(sk)->daddr));
+               memset(&sk->sk_v6_daddr, 0, sizeof(sk->sk_v6_daddr));
 #endif
 
        sk_dst_reset(sk);
@@ -713,6 +714,8 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        ipc.opt = NULL;
        ipc.oif = sk->sk_bound_dev_if;
        ipc.tx_flags = 0;
+       ipc.ttl = 0;
+       ipc.tos = -1;
 
        sock_tx_timestamp(sk, &ipc.tx_flags);
 
@@ -744,7 +747,7 @@ int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                        return -EINVAL;
                faddr = ipc.opt->opt.faddr;
        }
-       tos = RT_TOS(inet->tos);
+       tos = get_rttos(&ipc, inet);
        if (sock_flag(sk, SOCK_LOCALROUTE) ||
            (msg->msg_flags & MSG_DONTROUTE) ||
            (ipc.opt && ipc.opt->opt.is_strictroute)) {
index 193db03540ad7c8dabd2f88be81fb5fe78eb8eab..41e1d2845c8f6690b5588888134bd80e49fc5dcf 100644 (file)
@@ -299,7 +299,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
        /* Charge it to the socket. */
 
-       ipv4_pktinfo_prepare(skb);
+       ipv4_pktinfo_prepare(sk, skb);
        if (sock_queue_rcv_skb(sk, skb) < 0) {
                kfree_skb(skb);
                return NET_RX_DROP;
@@ -519,6 +519,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        ipc.addr = inet->inet_saddr;
        ipc.opt = NULL;
        ipc.tx_flags = 0;
+       ipc.ttl = 0;
+       ipc.tos = -1;
        ipc.oif = sk->sk_bound_dev_if;
 
        if (msg->msg_controllen) {
@@ -558,7 +560,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                        daddr = ipc.opt->opt.faddr;
                }
        }
-       tos = RT_CONN_FLAGS(sk);
+       tos = get_rtconn_flags(&ipc, sk);
        if (msg->msg_flags & MSG_DONTROUTE)
                tos |= RTO_ONLINK;
 
index 727f4365bcdff3acdb415fe75fd878a3bc5050af..d2d325382b13f4343d70974c327af3527ef1fcae 100644 (file)
@@ -295,7 +295,7 @@ static int rt_cpu_seq_show(struct seq_file *seq, void *v)
        seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
                   " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
                   dst_entries_get_slow(&ipv4_dst_ops),
-                  st->in_hit,
+                  0, /* st->in_hit */
                   st->in_slow_tot,
                   st->in_slow_mc,
                   st->in_no_route,
@@ -303,16 +303,16 @@ static int rt_cpu_seq_show(struct seq_file *seq, void *v)
                   st->in_martian_dst,
                   st->in_martian_src,
 
-                  st->out_hit,
+                  0, /* st->out_hit */
                   st->out_slow_tot,
                   st->out_slow_mc,
 
-                  st->gc_total,
-                  st->gc_ignored,
-                  st->gc_goal_miss,
-                  st->gc_dst_overflow,
-                  st->in_hlist_search,
-                  st->out_hlist_search
+                  0, /* st->gc_total */
+                  0, /* st->gc_ignored */
+                  0, /* st->gc_goal_miss */
+                  0, /* st->gc_dst_overflow */
+                  0, /* st->in_hlist_search */
+                  0  /* st->out_hlist_search */
                );
        return 0;
 }
@@ -2072,7 +2072,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
                                                              RT_SCOPE_LINK);
                        goto make_route;
                }
-               if (fl4->saddr) {
+               if (!fl4->saddr) {
                        if (ipv4_is_multicast(fl4->daddr))
                                fl4->saddr = inet_select_addr(dev_out, 0,
                                                              fl4->flowi4_scope);
index 14a15c49129df8984076c05c4dd757012fed03b3..b95331e6c077cea0ff215702087fa14acd00abaf 100644 (file)
 
 extern int sysctl_tcp_syncookies;
 
-__u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
-EXPORT_SYMBOL(syncookie_secret);
-
-static __init int init_syncookies(void)
-{
-       get_random_bytes(syncookie_secret, sizeof(syncookie_secret));
-       return 0;
-}
-__initcall(init_syncookies);
+static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
 
 #define COOKIEBITS 24  /* Upper bits store count */
 #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
@@ -44,8 +36,11 @@ static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
 static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
                       u32 count, int c)
 {
-       __u32 *tmp = __get_cpu_var(ipv4_cookie_scratch);
+       __u32 *tmp;
+
+       net_get_random_once(syncookie_secret, sizeof(syncookie_secret));
 
+       tmp  = __get_cpu_var(ipv4_cookie_scratch);
        memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c]));
        tmp[0] = (__force u32)saddr;
        tmp[1] = (__force u32)daddr;
@@ -89,8 +84,7 @@ __u32 cookie_init_timestamp(struct request_sock *req)
 
 
 static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
-                                  __be16 dport, __u32 sseq, __u32 count,
-                                  __u32 data)
+                                  __be16 dport, __u32 sseq, __u32 data)
 {
        /*
         * Compute the secure sequence number.
@@ -102,7 +96,7 @@ static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
         * As an extra hack, we add a small "data" value that encodes the
         * MSS into the second hash value.
         */
-
+       u32 count = tcp_cookie_time();
        return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
                sseq + (count << COOKIEBITS) +
                ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
@@ -114,22 +108,21 @@ static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
  * If the syncookie is bad, the data returned will be out of
  * range.  This must be checked by the caller.
  *
- * The count value used to generate the cookie must be within
- * "maxdiff" if the current (passed-in) "count".  The return value
- * is (__u32)-1 if this test fails.
+ * The count value used to generate the cookie must be less than
+ * MAX_SYNCOOKIE_AGE minutes in the past.
+ * The return value (__u32)-1 if this test fails.
  */
 static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
-                                 __be16 sport, __be16 dport, __u32 sseq,
-                                 __u32 count, __u32 maxdiff)
+                                 __be16 sport, __be16 dport, __u32 sseq)
 {
-       __u32 diff;
+       u32 diff, count = tcp_cookie_time();
 
        /* Strip away the layers from the cookie */
        cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
 
        /* Cookie is now reduced to (count * 2^24) ^ (hash % 2^24) */
        diff = (count - (cookie >> COOKIEBITS)) & ((__u32) - 1 >> COOKIEBITS);
-       if (diff >= maxdiff)
+       if (diff >= MAX_SYNCOOKIE_AGE)
                return (__u32)-1;
 
        return (cookie -
@@ -138,22 +131,22 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
 }
 
 /*
- * MSS Values are taken from the 2009 paper
- * 'Measuring TCP Maximum Segment Size' by S. Alcock and R. Nelson:
- *  - values 1440 to 1460 accounted for 80% of observed mss values
- *  - values outside the 536-1460 range are rare (<0.2%).
+ * MSS Values are chosen based on the 2011 paper
+ * 'An Analysis of TCP Maximum Segement Sizes' by S. Alcock and R. Nelson.
+ * Values ..
+ *  .. lower than 536 are rare (< 0.2%)
+ *  .. between 537 and 1299 account for less than < 1.5% of observed values
+ *  .. in the 1300-1349 range account for about 15 to 20% of observed mss values
+ *  .. exceeding 1460 are very rare (< 0.04%)
  *
- * Table must be sorted.
+ *  1460 is the single most frequently announced mss value (30 to 46% depending
+ *  on monitor location).  Table must be sorted.
  */
 static __u16 const msstab[] = {
-       64,
-       512,
        536,
-       1024,
-       1440,
+       1300,
+       1440,   /* 1440, 1452: PPPoE */
        1460,
-       4312,
-       8960,
 };
 
 /*
@@ -173,7 +166,7 @@ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
 
        return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
                                     th->source, th->dest, ntohl(th->seq),
-                                    jiffies / (HZ * 60), mssind);
+                                    mssind);
 }
 EXPORT_SYMBOL_GPL(__cookie_v4_init_sequence);
 
@@ -188,13 +181,6 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
        return __cookie_v4_init_sequence(iph, th, mssp);
 }
 
-/*
- * This (misnamed) value is the age of syncookie which is permitted.
- * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
- * sysctl_tcp_retries1. It's a rather complicated formula (exponential
- * backoff) to compute at runtime so it's currently hardcoded here.
- */
-#define COUNTER_TRIES 4
 /*
  * Check if a ack sequence number is a valid syncookie.
  * Return the decoded mss if it is, or 0 if not.
@@ -204,9 +190,7 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
 {
        __u32 seq = ntohl(th->seq) - 1;
        __u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
-                                           th->source, th->dest, seq,
-                                           jiffies / (HZ * 60),
-                                           COUNTER_TRIES);
+                                           th->source, th->dest, seq);
 
        return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
 }
@@ -315,10 +299,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
        treq->rcv_isn           = ntohl(th->seq) - 1;
        treq->snt_isn           = cookie;
        req->mss                = mss;
-       ireq->loc_port          = th->dest;
-       ireq->rmt_port          = th->source;
-       ireq->loc_addr          = ip_hdr(skb)->daddr;
-       ireq->rmt_addr          = ip_hdr(skb)->saddr;
+       ireq->ir_num            = ntohs(th->dest);
+       ireq->ir_rmt_port       = th->source;
+       ireq->ir_loc_addr       = ip_hdr(skb)->daddr;
+       ireq->ir_rmt_addr       = ip_hdr(skb)->saddr;
        ireq->ecn_ok            = ecn_ok;
        ireq->snd_wscale        = tcp_opt.snd_wscale;
        ireq->sack_ok           = tcp_opt.sack_ok;
@@ -358,8 +342,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
        flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
                           inet_sk_flowi_flags(sk),
-                          (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
-                          ireq->loc_addr, th->source, th->dest);
+                          (opt && opt->srr) ? opt->faddr : ireq->ir_rmt_addr,
+                          ireq->ir_loc_addr, th->source, th->dest);
        security_req_classify_flow(req, flowi4_to_flowi(&fl4));
        rt = ip_route_output_key(sock_net(sk), &fl4);
        if (IS_ERR(rt)) {
index 540279f4c531be079e45c027aa11d37920c14a88..d5b1390eebbee19715fcbbdbea742bbf0eddcc71 100644 (file)
@@ -43,12 +43,12 @@ static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
 
 /* Update system visible IP port range */
-static void set_local_port_range(int range[2])
+static void set_local_port_range(struct net *net, int range[2])
 {
-       write_seqlock(&sysctl_local_ports.lock);
-       sysctl_local_ports.range[0] = range[0];
-       sysctl_local_ports.range[1] = range[1];
-       write_sequnlock(&sysctl_local_ports.lock);
+       write_seqlock(&net->ipv4.sysctl_local_ports.lock);
+       net->ipv4.sysctl_local_ports.range[0] = range[0];
+       net->ipv4.sysctl_local_ports.range[1] = range[1];
+       write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
 }
 
 /* Validate changes from /proc interface. */
@@ -56,6 +56,8 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
                                 void __user *buffer,
                                 size_t *lenp, loff_t *ppos)
 {
+       struct net *net =
+               container_of(table->data, struct net, ipv4.sysctl_local_ports.range);
        int ret;
        int range[2];
        struct ctl_table tmp = {
@@ -66,14 +68,15 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
                .extra2 = &ip_local_port_range_max,
        };
 
-       inet_get_local_port_range(range, range + 1);
+       inet_get_local_port_range(net, &range[0], &range[1]);
+
        ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
 
        if (write && ret == 0) {
                if (range[1] < range[0])
                        ret = -EINVAL;
                else
-                       set_local_port_range(range);
+                       set_local_port_range(net, range);
        }
 
        return ret;
@@ -83,23 +86,27 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
 static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high)
 {
        kgid_t *data = table->data;
+       struct net *net =
+               container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
        unsigned int seq;
        do {
-               seq = read_seqbegin(&sysctl_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.sysctl_local_ports.lock);
 
                *low = data[0];
                *high = data[1];
-       } while (read_seqretry(&sysctl_local_ports.lock, seq));
+       } while (read_seqretry(&net->ipv4.sysctl_local_ports.lock, seq));
 }
 
 /* Update system visible IP port range */
 static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t high)
 {
        kgid_t *data = table->data;
-       write_seqlock(&sysctl_local_ports.lock);
+       struct net *net =
+               container_of(table->data, struct net, ipv4.sysctl_ping_group_range);
+       write_seqlock(&net->ipv4.sysctl_local_ports.lock);
        data[0] = low;
        data[1] = high;
-       write_sequnlock(&sysctl_local_ports.lock);
+       write_sequnlock(&net->ipv4.sysctl_local_ports.lock);
 }
 
 /* Validate changes from /proc interface. */
@@ -193,49 +200,6 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
        return ret;
 }
 
-static int ipv4_tcp_mem(struct ctl_table *ctl, int write,
-                          void __user *buffer, size_t *lenp,
-                          loff_t *ppos)
-{
-       int ret;
-       unsigned long vec[3];
-       struct net *net = current->nsproxy->net_ns;
-#ifdef CONFIG_MEMCG_KMEM
-       struct mem_cgroup *memcg;
-#endif
-
-       struct ctl_table tmp = {
-               .data = &vec,
-               .maxlen = sizeof(vec),
-               .mode = ctl->mode,
-       };
-
-       if (!write) {
-               ctl->data = &net->ipv4.sysctl_tcp_mem;
-               return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
-       }
-
-       ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
-       if (ret)
-               return ret;
-
-#ifdef CONFIG_MEMCG_KMEM
-       rcu_read_lock();
-       memcg = mem_cgroup_from_task(current);
-
-       tcp_prot_mem(memcg, vec[0], 0);
-       tcp_prot_mem(memcg, vec[1], 1);
-       tcp_prot_mem(memcg, vec[2], 2);
-       rcu_read_unlock();
-#endif
-
-       net->ipv4.sysctl_tcp_mem[0] = vec[0];
-       net->ipv4.sysctl_tcp_mem[1] = vec[1];
-       net->ipv4.sysctl_tcp_mem[2] = vec[2];
-
-       return 0;
-}
-
 static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
                                 void __user *buffer, size_t *lenp,
                                 loff_t *ppos)
@@ -267,6 +231,11 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
                        ret = -EINVAL;
                        goto bad_key;
                }
+               /* Generate a dummy secret but don't publish it. This
+                * is needed so we don't regenerate a new key on the
+                * first invocation of tcp_fastopen_cookie_gen
+                */
+               tcp_fastopen_init_key_once(false);
                tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH);
        }
 
@@ -474,13 +443,6 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
-       {
-               .procname       = "ip_local_port_range",
-               .data           = &sysctl_local_ports.range,
-               .maxlen         = sizeof(sysctl_local_ports.range),
-               .mode           = 0644,
-               .proc_handler   = ipv4_local_port_range,
-       },
        {
                .procname       = "ip_local_reserved_ports",
                .data           = NULL, /* initialized in sysctl_ipv4_init */
@@ -551,6 +513,13 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "tcp_mem",
+               .maxlen         = sizeof(sysctl_tcp_mem),
+               .data           = &sysctl_tcp_mem,
+               .mode           = 0644,
+               .proc_handler   = proc_doulongvec_minmax,
+       },
        {
                .procname       = "tcp_wmem",
                .data           = &sysctl_tcp_wmem,
@@ -854,10 +823,11 @@ static struct ctl_table ipv4_net_table[] = {
                .proc_handler   = proc_dointvec
        },
        {
-               .procname       = "tcp_mem",
-               .maxlen         = sizeof(init_net.ipv4.sysctl_tcp_mem),
+               .procname       = "ip_local_port_range",
+               .maxlen         = sizeof(init_net.ipv4.sysctl_local_ports.range),
+               .data           = &init_net.ipv4.sysctl_local_ports.range,
                .mode           = 0644,
-               .proc_handler   = ipv4_tcp_mem,
+               .proc_handler   = ipv4_local_port_range,
        },
        { }
 };
@@ -868,30 +838,15 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
 
        table = ipv4_net_table;
        if (!net_eq(net, &init_net)) {
+               int i;
+
                table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
                if (table == NULL)
                        goto err_alloc;
 
-               table[0].data =
-                       &net->ipv4.sysctl_icmp_echo_ignore_all;
-               table[1].data =
-                       &net->ipv4.sysctl_icmp_echo_ignore_broadcasts;
-               table[2].data =
-                       &net->ipv4.sysctl_icmp_ignore_bogus_error_responses;
-               table[3].data =
-                       &net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr;
-               table[4].data =
-                       &net->ipv4.sysctl_icmp_ratelimit;
-               table[5].data =
-                       &net->ipv4.sysctl_icmp_ratemask;
-               table[6].data =
-                       &net->ipv4.sysctl_ping_group_range;
-               table[7].data =
-                       &net->ipv4.sysctl_tcp_ecn;
-
-               /* Don't export sysctls to unprivileged users */
-               if (net->user_ns != &init_user_ns)
-                       table[0].procname = NULL;
+               /* Update the variables to point into the current struct net */
+               for (i = 0; i < ARRAY_SIZE(ipv4_net_table) - 1; i++)
+                       table[i].data += (void *)net - (void *)&init_net;
        }
 
        /*
@@ -901,7 +856,12 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
        net->ipv4.sysctl_ping_group_range[0] = make_kgid(&init_user_ns, 1);
        net->ipv4.sysctl_ping_group_range[1] = make_kgid(&init_user_ns, 0);
 
-       tcp_init_mem(net);
+       /*
+        * Set defaults for local port range
+        */
+       seqlock_init(&net->ipv4.sysctl_local_ports.lock);
+       net->ipv4.sysctl_local_ports.range[0] =  32768;
+       net->ipv4.sysctl_local_ports.range[1] =  61000;
 
        net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
        if (net->ipv4.ipv4_hdr == NULL)
index 6e5617b9f9db6f3c2aeacc7923ea763660761d99..4f328544c07517a6544d5893039aec10829f6959 100644 (file)
@@ -288,9 +288,11 @@ int sysctl_tcp_min_tso_segs __read_mostly = 2;
 struct percpu_counter tcp_orphan_count;
 EXPORT_SYMBOL_GPL(tcp_orphan_count);
 
+long sysctl_tcp_mem[3] __read_mostly;
 int sysctl_tcp_wmem[3] __read_mostly;
 int sysctl_tcp_rmem[3] __read_mostly;
 
+EXPORT_SYMBOL(sysctl_tcp_mem);
 EXPORT_SYMBOL(sysctl_tcp_rmem);
 EXPORT_SYMBOL(sysctl_tcp_wmem);
 
@@ -1429,7 +1431,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
        do {
                if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
                                              last_issued, &done,
-                                             &used) == DMA_SUCCESS) {
+                                             &used) == DMA_COMPLETE) {
                        /* Safe to free early-copied skbs now */
                        __skb_queue_purge(&sk->sk_async_wait_queue);
                        break;
@@ -1437,7 +1439,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
                        struct sk_buff *skb;
                        while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
                               (dma_async_is_complete(skb->dma_cookie, done,
-                                                     used) == DMA_SUCCESS)) {
+                                                     used) == DMA_COMPLETE)) {
                                __skb_dequeue(&sk->sk_async_wait_queue);
                                kfree_skb(skb);
                        }
@@ -3097,13 +3099,13 @@ static int __init set_thash_entries(char *str)
 }
 __setup("thash_entries=", set_thash_entries);
 
-void tcp_init_mem(struct net *net)
+static void tcp_init_mem(void)
 {
        unsigned long limit = nr_free_buffer_pages() / 8;
        limit = max(limit, 128UL);
-       net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
-       net->ipv4.sysctl_tcp_mem[1] = limit;
-       net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
+       sysctl_tcp_mem[0] = limit / 4 * 3;
+       sysctl_tcp_mem[1] = limit;
+       sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
 }
 
 void __init tcp_init(void)
@@ -3137,10 +3139,9 @@ void __init tcp_init(void)
                                        &tcp_hashinfo.ehash_mask,
                                        0,
                                        thash_entries ? 0 : 512 * 1024);
-       for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) {
+       for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
                INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
-               INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i);
-       }
+
        if (inet_ehash_locks_alloc(&tcp_hashinfo))
                panic("TCP: failed to alloc ehash_locks");
        tcp_hashinfo.bhash =
@@ -3166,7 +3167,7 @@ void __init tcp_init(void)
        sysctl_tcp_max_orphans = cnt / 2;
        sysctl_max_syn_backlog = max(128, cnt / 256);
 
-       tcp_init_mem(&init_net);
+       tcp_init_mem();
        /* Set per-socket limits to no more than 1/128 the pressure threshold */
        limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
        max_wshare = min(4UL*1024*1024, limit);
index ab7bd35bb312c6e9e07aa2950d75ec4bbc982eac..766032b4a6c39b9894c95b5eeef1689e355605ff 100644 (file)
@@ -14,6 +14,20 @@ struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
 
 static DEFINE_SPINLOCK(tcp_fastopen_ctx_lock);
 
+void tcp_fastopen_init_key_once(bool publish)
+{
+       static u8 key[TCP_FASTOPEN_KEY_LENGTH];
+
+       /* tcp_fastopen_reset_cipher publishes the new context
+        * atomically, so we allow this race happening here.
+        *
+        * All call sites of tcp_fastopen_cookie_gen also check
+        * for a valid cookie, so this is an acceptable risk.
+        */
+       if (net_get_random_once(key, sizeof(key)) && publish)
+               tcp_fastopen_reset_cipher(key, sizeof(key));
+}
+
 static void tcp_fastopen_ctx_free(struct rcu_head *head)
 {
        struct tcp_fastopen_context *ctx =
@@ -70,6 +84,8 @@ void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
        __be32 path[4] = { src, dst, 0, 0 };
        struct tcp_fastopen_context *ctx;
 
+       tcp_fastopen_init_key_once(true);
+
        rcu_read_lock();
        ctx = rcu_dereference(tcp_fastopen_ctx);
        if (ctx) {
@@ -78,14 +94,3 @@ void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
        }
        rcu_read_unlock();
 }
-
-static int __init tcp_fastopen_init(void)
-{
-       __u8 key[TCP_FASTOPEN_KEY_LENGTH];
-
-       get_random_bytes(key, sizeof(key));
-       tcp_fastopen_reset_cipher(key, sizeof(key));
-       return 0;
-}
-
-late_initcall(tcp_fastopen_init);
index 25a89eaa669de9240036abc04f1385eb168f995e..b935397c703c569bf01e6ebfd85e6bfd1b474227 100644 (file)
@@ -267,11 +267,31 @@ static bool TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr
  * 1. Tuning sk->sk_sndbuf, when connection enters established state.
  */
 
-static void tcp_fixup_sndbuf(struct sock *sk)
+static void tcp_sndbuf_expand(struct sock *sk)
 {
-       int sndmem = SKB_TRUESIZE(tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER);
+       const struct tcp_sock *tp = tcp_sk(sk);
+       int sndmem, per_mss;
+       u32 nr_segs;
+
+       /* Worst case is non GSO/TSO : each frame consumes one skb
+        * and skb->head is kmalloced using power of two area of memory
+        */
+       per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
+                 MAX_TCP_HEADER +
+                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+       per_mss = roundup_pow_of_two(per_mss) +
+                 SKB_DATA_ALIGN(sizeof(struct sk_buff));
+
+       nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd);
+       nr_segs = max_t(u32, nr_segs, tp->reordering + 1);
+
+       /* Fast Recovery (RFC 5681 3.2) :
+        * Cubic needs 1.7 factor, rounded to 2 to include
+        * extra cushion (application might react slowly to POLLOUT)
+        */
+       sndmem = 2 * nr_segs * per_mss;
 
-       sndmem *= TCP_INIT_CWND;
        if (sk->sk_sndbuf < sndmem)
                sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
 }
@@ -355,6 +375,12 @@ static void tcp_fixup_rcvbuf(struct sock *sk)
        rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER) *
                 tcp_default_init_rwnd(mss);
 
+       /* Dynamic Right Sizing (DRS) has 2 to 3 RTT latency
+        * Allow enough cushion so that sender is not limited by our window
+        */
+       if (sysctl_tcp_moderate_rcvbuf)
+               rcvmem <<= 2;
+
        if (sk->sk_rcvbuf < rcvmem)
                sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]);
 }
@@ -370,9 +396,11 @@ void tcp_init_buffer_space(struct sock *sk)
        if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
                tcp_fixup_rcvbuf(sk);
        if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
-               tcp_fixup_sndbuf(sk);
+               tcp_sndbuf_expand(sk);
 
        tp->rcvq_space.space = tp->rcv_wnd;
+       tp->rcvq_space.time = tcp_time_stamp;
+       tp->rcvq_space.seq = tp->copied_seq;
 
        maxwin = tcp_full_space(sk);
 
@@ -512,48 +540,62 @@ void tcp_rcv_space_adjust(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        int time;
-       int space;
-
-       if (tp->rcvq_space.time == 0)
-               goto new_measure;
+       int copied;
 
        time = tcp_time_stamp - tp->rcvq_space.time;
        if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
                return;
 
-       space = 2 * (tp->copied_seq - tp->rcvq_space.seq);
+       /* Number of bytes copied to user in last RTT */
+       copied = tp->copied_seq - tp->rcvq_space.seq;
+       if (copied <= tp->rcvq_space.space)
+               goto new_measure;
 
-       space = max(tp->rcvq_space.space, space);
+       /* A bit of theory :
+        * copied = bytes received in previous RTT, our base window
+        * To cope with packet losses, we need a 2x factor
+        * To cope with slow start, and sender growing its cwin by 100 %
+        * every RTT, we need a 4x factor, because the ACK we are sending
+        * now is for the next RTT, not the current one :
+        * <prev RTT . ><current RTT .. ><next RTT .... >
+        */
 
-       if (tp->rcvq_space.space != space) {
-               int rcvmem;
+       if (sysctl_tcp_moderate_rcvbuf &&
+           !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
+               int rcvwin, rcvmem, rcvbuf;
 
-               tp->rcvq_space.space = space;
+               /* minimal window to cope with packet losses, assuming
+                * steady state. Add some cushion because of small variations.
+                */
+               rcvwin = (copied << 1) + 16 * tp->advmss;
 
-               if (sysctl_tcp_moderate_rcvbuf &&
-                   !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
-                       int new_clamp = space;
+               /* If rate increased by 25%,
+                *      assume slow start, rcvwin = 3 * copied
+                * If rate increased by 50%,
+                *      assume sender can use 2x growth, rcvwin = 4 * copied
+                */
+               if (copied >=
+                   tp->rcvq_space.space + (tp->rcvq_space.space >> 2)) {
+                       if (copied >=
+                           tp->rcvq_space.space + (tp->rcvq_space.space >> 1))
+                               rcvwin <<= 1;
+                       else
+                               rcvwin += (rcvwin >> 1);
+               }
 
-                       /* Receive space grows, normalize in order to
-                        * take into account packet headers and sk_buff
-                        * structure overhead.
-                        */
-                       space /= tp->advmss;
-                       if (!space)
-                               space = 1;
-                       rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
-                       while (tcp_win_from_space(rcvmem) < tp->advmss)
-                               rcvmem += 128;
-                       space *= rcvmem;
-                       space = min(space, sysctl_tcp_rmem[2]);
-                       if (space > sk->sk_rcvbuf) {
-                               sk->sk_rcvbuf = space;
-
-                               /* Make the window clamp follow along.  */
-                               tp->window_clamp = new_clamp;
-                       }
+               rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
+               while (tcp_win_from_space(rcvmem) < tp->advmss)
+                       rcvmem += 128;
+
+               rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]);
+               if (rcvbuf > sk->sk_rcvbuf) {
+                       sk->sk_rcvbuf = rcvbuf;
+
+                       /* Make the window clamp follow along.  */
+                       tp->window_clamp = rcvwin;
                }
        }
+       tp->rcvq_space.space = copied;
 
 new_measure:
        tp->rcvq_space.seq = tp->copied_seq;
@@ -713,7 +755,12 @@ static void tcp_update_pacing_rate(struct sock *sk)
        if (tp->srtt > 8 + 2)
                do_div(rate, tp->srtt);
 
-       sk->sk_pacing_rate = min_t(u64, rate, ~0U);
+       /* ACCESS_ONCE() is needed because sch_fq fetches sk_pacing_rate
+        * without any lock. We want to make sure compiler wont store
+        * intermediate values in this location.
+        */
+       ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
+                                               sk->sk_max_pacing_rate);
 }
 
 /* Calculate rto without backoff.  This is the second half of Van Jacobson's
@@ -1284,7 +1331,10 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
                tp->lost_cnt_hint -= tcp_skb_pcount(prev);
        }
 
-       TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags;
+       TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
+       if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
+               TCP_SKB_CB(prev)->end_seq++;
+
        if (skb == tcp_highest_sack(sk))
                tcp_advance_highest_sack(sk, skb);
 
@@ -2970,7 +3020,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct sk_buff *skb;
        u32 now = tcp_time_stamp;
-       int fully_acked = true;
+       bool fully_acked = true;
        int flag = 0;
        u32 pkts_acked = 0;
        u32 reord = tp->packets_out;
@@ -3288,7 +3338,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
                        tcp_init_cwnd_reduction(sk, true);
                        tcp_set_ca_state(sk, TCP_CA_CWR);
                        tcp_end_cwnd_reduction(sk);
-                       tcp_set_ca_state(sk, TCP_CA_Open);
+                       tcp_try_keep_open(sk);
                        NET_INC_STATS_BH(sock_net(sk),
                                         LINUX_MIB_TCPLOSSPROBERECOVERY);
                }
@@ -4701,15 +4751,7 @@ static void tcp_new_space(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
 
        if (tcp_should_expand_sndbuf(sk)) {
-               int sndmem = SKB_TRUESIZE(max_t(u32,
-                                               tp->rx_opt.mss_clamp,
-                                               tp->mss_cache) +
-                                         MAX_TCP_HEADER);
-               int demanded = max_t(unsigned int, tp->snd_cwnd,
-                                    tp->reordering + 1);
-               sndmem *= 2 * demanded;
-               if (sndmem > sk->sk_sndbuf)
-                       sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
+               tcp_sndbuf_expand(sk);
                tp->snd_cwnd_stamp = tcp_time_stamp;
        }
 
@@ -5674,8 +5716,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                        tcp_init_congestion_control(sk);
 
                        tcp_mtup_init(sk);
-                       tcp_init_buffer_space(sk);
                        tp->copied_seq = tp->rcv_nxt;
+                       tcp_init_buffer_space(sk);
                }
                smp_mb();
                tcp_set_state(sk, TCP_ESTABLISHED);
@@ -5709,6 +5751,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                } else
                        tcp_init_metrics(sk);
 
+               tcp_update_pacing_rate(sk);
+
                /* Prevent spurious tcp_cwnd_restart() on first data packet */
                tp->lsndtime = tcp_time_stamp;
 
index b14266bb91eb5e3b1f43f3329dc2510c8be26a19..300ab2c93f29270d4dbdd2d1b65b64d8dc82200a 100644 (file)
@@ -835,11 +835,11 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
        skb = tcp_make_synack(sk, dst, req, NULL);
 
        if (skb) {
-               __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
+               __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 
                skb_set_queue_mapping(skb, queue_mapping);
-               err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
-                                           ireq->rmt_addr,
+               err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
+                                           ireq->ir_rmt_addr,
                                            ireq->opt);
                err = net_xmit_eval(err);
                if (!tcp_rsk(req)->snt_synack && !err)
@@ -972,7 +972,7 @@ static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
 {
        union tcp_md5_addr *addr;
 
-       addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
+       addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
        return tcp_md5_do_lookup(sk, addr, AF_INET);
 }
 
@@ -1149,8 +1149,8 @@ int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
                saddr = inet_sk(sk)->inet_saddr;
                daddr = inet_sk(sk)->inet_daddr;
        } else if (req) {
-               saddr = inet_rsk(req)->loc_addr;
-               daddr = inet_rsk(req)->rmt_addr;
+               saddr = inet_rsk(req)->ir_loc_addr;
+               daddr = inet_rsk(req)->ir_rmt_addr;
        } else {
                const struct iphdr *iph = ip_hdr(skb);
                saddr = iph->saddr;
@@ -1366,8 +1366,8 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
                kfree_skb(skb_synack);
                return -1;
        }
-       err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
-                                   ireq->rmt_addr, ireq->opt);
+       err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
+                                   ireq->ir_rmt_addr, ireq->opt);
        err = net_xmit_eval(err);
        if (!err)
                tcp_rsk(req)->snt_synack = tcp_time_stamp;
@@ -1410,8 +1410,8 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
        inet_csk(child)->icsk_af_ops->rebuild_header(child);
        tcp_init_congestion_control(child);
        tcp_mtup_init(child);
-       tcp_init_buffer_space(child);
        tcp_init_metrics(child);
+       tcp_init_buffer_space(child);
 
        /* Queue the data carried in the SYN packet. We need to first
         * bump skb's refcnt because the caller will attempt to free it.
@@ -1502,8 +1502,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        tcp_openreq_init(req, &tmp_opt, skb);
 
        ireq = inet_rsk(req);
-       ireq->loc_addr = daddr;
-       ireq->rmt_addr = saddr;
+       ireq->ir_loc_addr = daddr;
+       ireq->ir_rmt_addr = saddr;
        ireq->no_srccheck = inet_sk(sk)->transparent;
        ireq->opt = tcp_v4_save_options(skb);
 
@@ -1578,15 +1578,15 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
            fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
 
        if (skb_synack) {
-               __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
+               __tcp_v4_send_check(skb_synack, ireq->ir_loc_addr, ireq->ir_rmt_addr);
                skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
        } else
                goto drop_and_free;
 
        if (likely(!do_fastopen)) {
                int err;
-               err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
-                    ireq->rmt_addr, ireq->opt);
+               err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr,
+                    ireq->ir_rmt_addr, ireq->opt);
                err = net_xmit_eval(err);
                if (err || want_cookie)
                        goto drop_and_free;
@@ -1644,9 +1644,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        newtp                 = tcp_sk(newsk);
        newinet               = inet_sk(newsk);
        ireq                  = inet_rsk(req);
-       newinet->inet_daddr   = ireq->rmt_addr;
-       newinet->inet_rcv_saddr = ireq->loc_addr;
-       newinet->inet_saddr           = ireq->loc_addr;
+       newinet->inet_daddr   = ireq->ir_rmt_addr;
+       newinet->inet_rcv_saddr = ireq->ir_loc_addr;
+       newinet->inet_saddr           = ireq->ir_loc_addr;
        inet_opt              = ireq->opt;
        rcu_assign_pointer(newinet->inet_opt, inet_opt);
        ireq->opt             = NULL;
@@ -2194,18 +2194,6 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock);
 #ifdef CONFIG_PROC_FS
 /* Proc filesystem TCP sock list dumping. */
 
-static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
-{
-       return hlist_nulls_empty(head) ? NULL :
-               list_entry(head->first, struct inet_timewait_sock, tw_node);
-}
-
-static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
-{
-       return !is_a_nulls(tw->tw_node.next) ?
-               hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
-}
-
 /*
  * Get next listener socket follow cur.  If cur is NULL, get first socket
  * starting from bucket given in st->bucket; when st->bucket is zero the
@@ -2309,10 +2297,9 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
        return rc;
 }
 
-static inline bool empty_bucket(struct tcp_iter_state *st)
+static inline bool empty_bucket(const struct tcp_iter_state *st)
 {
-       return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
-               hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
+       return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
 }
 
 /*
@@ -2329,7 +2316,6 @@ static void *established_get_first(struct seq_file *seq)
        for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
                struct sock *sk;
                struct hlist_nulls_node *node;
-               struct inet_timewait_sock *tw;
                spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
 
                /* Lockless fast path for the common case of empty buckets */
@@ -2345,18 +2331,7 @@ static void *established_get_first(struct seq_file *seq)
                        rc = sk;
                        goto out;
                }
-               st->state = TCP_SEQ_STATE_TIME_WAIT;
-               inet_twsk_for_each(tw, node,
-                                  &tcp_hashinfo.ehash[st->bucket].twchain) {
-                       if (tw->tw_family != st->family ||
-                           !net_eq(twsk_net(tw), net)) {
-                               continue;
-                       }
-                       rc = tw;
-                       goto out;
-               }
                spin_unlock_bh(lock);
-               st->state = TCP_SEQ_STATE_ESTABLISHED;
        }
 out:
        return rc;
@@ -2365,7 +2340,6 @@ out:
 static void *established_get_next(struct seq_file *seq, void *cur)
 {
        struct sock *sk = cur;
-       struct inet_timewait_sock *tw;
        struct hlist_nulls_node *node;
        struct tcp_iter_state *st = seq->private;
        struct net *net = seq_file_net(seq);
@@ -2373,45 +2347,16 @@ static void *established_get_next(struct seq_file *seq, void *cur)
        ++st->num;
        ++st->offset;
 
-       if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
-               tw = cur;
-               tw = tw_next(tw);
-get_tw:
-               while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
-                       tw = tw_next(tw);
-               }
-               if (tw) {
-                       cur = tw;
-                       goto out;
-               }
-               spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
-               st->state = TCP_SEQ_STATE_ESTABLISHED;
-
-               /* Look for next non empty bucket */
-               st->offset = 0;
-               while (++st->bucket <= tcp_hashinfo.ehash_mask &&
-                               empty_bucket(st))
-                       ;
-               if (st->bucket > tcp_hashinfo.ehash_mask)
-                       return NULL;
-
-               spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
-               sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
-       } else
-               sk = sk_nulls_next(sk);
+       sk = sk_nulls_next(sk);
 
        sk_nulls_for_each_from(sk, node) {
                if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
-                       goto found;
+                       return sk;
        }
 
-       st->state = TCP_SEQ_STATE_TIME_WAIT;
-       tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
-       goto get_tw;
-found:
-       cur = sk;
-out:
-       return cur;
+       spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
+       ++st->bucket;
+       return established_get_first(seq);
 }
 
 static void *established_get_idx(struct seq_file *seq, loff_t pos)
@@ -2464,10 +2409,9 @@ static void *tcp_seek_last_pos(struct seq_file *seq)
                if (rc)
                        break;
                st->bucket = 0;
+               st->state = TCP_SEQ_STATE_ESTABLISHED;
                /* Fallthrough */
        case TCP_SEQ_STATE_ESTABLISHED:
-       case TCP_SEQ_STATE_TIME_WAIT:
-               st->state = TCP_SEQ_STATE_ESTABLISHED;
                if (st->bucket > tcp_hashinfo.ehash_mask)
                        break;
                rc = established_get_first(seq);
@@ -2524,7 +2468,6 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                }
                break;
        case TCP_SEQ_STATE_ESTABLISHED:
-       case TCP_SEQ_STATE_TIME_WAIT:
                rc = established_get_next(seq, v);
                break;
        }
@@ -2548,7 +2491,6 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
                if (v != SEQ_START_TOKEN)
                        spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
                break;
-       case TCP_SEQ_STATE_TIME_WAIT:
        case TCP_SEQ_STATE_ESTABLISHED:
                if (v)
                        spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
@@ -2606,10 +2548,10 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
        seq_printf(f, "%4d: %08X:%04X %08X:%04X"
                " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK%n",
                i,
-               ireq->loc_addr,
+               ireq->ir_loc_addr,
                ntohs(inet_sk(sk)->inet_sport),
-               ireq->rmt_addr,
-               ntohs(ireq->rmt_port),
+               ireq->ir_rmt_addr,
+               ntohs(ireq->ir_rmt_port),
                TCP_SYN_RECV,
                0, 0, /* could print option size, but that is af dependent. */
                1,    /* timers active (only the expire timer) */
@@ -2707,6 +2649,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
 static int tcp4_seq_show(struct seq_file *seq, void *v)
 {
        struct tcp_iter_state *st;
+       struct sock *sk = v;
        int len;
 
        if (v == SEQ_START_TOKEN) {
@@ -2721,14 +2664,14 @@ static int tcp4_seq_show(struct seq_file *seq, void *v)
        switch (st->state) {
        case TCP_SEQ_STATE_LISTENING:
        case TCP_SEQ_STATE_ESTABLISHED:
-               get_tcp4_sock(v, seq, st->num, &len);
+               if (sk->sk_state == TCP_TIME_WAIT)
+                       get_timewait4_sock(v, seq, st->num, &len);
+               else
+                       get_tcp4_sock(v, seq, st->num, &len);
                break;
        case TCP_SEQ_STATE_OPENREQ:
                get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
                break;
-       case TCP_SEQ_STATE_TIME_WAIT:
-               get_timewait4_sock(v, seq, st->num, &len);
-               break;
        }
        seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
 out:
@@ -2806,6 +2749,7 @@ struct proto tcp_prot = {
        .orphan_count           = &tcp_orphan_count,
        .memory_allocated       = &tcp_memory_allocated,
        .memory_pressure        = &tcp_memory_pressure,
+       .sysctl_mem             = sysctl_tcp_mem,
        .sysctl_wmem            = sysctl_tcp_wmem,
        .sysctl_rmem            = sysctl_tcp_rmem,
        .max_header             = MAX_TCP_HEADER,
index 559d4ae6ebf4ed32ff80605e3d1b5c3792b1752c..03e9154f7e687efef63c91878e33427672bc4036 100644 (file)
@@ -6,15 +6,10 @@
 #include <linux/memcontrol.h>
 #include <linux/module.h>
 
-static inline struct tcp_memcontrol *tcp_from_cgproto(struct cg_proto *cg_proto)
-{
-       return container_of(cg_proto, struct tcp_memcontrol, cg_proto);
-}
-
 static void memcg_tcp_enter_memory_pressure(struct sock *sk)
 {
        if (sk->sk_cgrp->memory_pressure)
-               *sk->sk_cgrp->memory_pressure = 1;
+               sk->sk_cgrp->memory_pressure = 1;
 }
 EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
 
@@ -27,34 +22,24 @@ int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
         */
        struct res_counter *res_parent = NULL;
        struct cg_proto *cg_proto, *parent_cg;
-       struct tcp_memcontrol *tcp;
        struct mem_cgroup *parent = parent_mem_cgroup(memcg);
-       struct net *net = current->nsproxy->net_ns;
 
        cg_proto = tcp_prot.proto_cgroup(memcg);
        if (!cg_proto)
                return 0;
 
-       tcp = tcp_from_cgproto(cg_proto);
-
-       tcp->tcp_prot_mem[0] = net->ipv4.sysctl_tcp_mem[0];
-       tcp->tcp_prot_mem[1] = net->ipv4.sysctl_tcp_mem[1];
-       tcp->tcp_prot_mem[2] = net->ipv4.sysctl_tcp_mem[2];
-       tcp->tcp_memory_pressure = 0;
+       cg_proto->sysctl_mem[0] = sysctl_tcp_mem[0];
+       cg_proto->sysctl_mem[1] = sysctl_tcp_mem[1];
+       cg_proto->sysctl_mem[2] = sysctl_tcp_mem[2];
+       cg_proto->memory_pressure = 0;
+       cg_proto->memcg = memcg;
 
        parent_cg = tcp_prot.proto_cgroup(parent);
        if (parent_cg)
-               res_parent = parent_cg->memory_allocated;
-
-       res_counter_init(&tcp->tcp_memory_allocated, res_parent);
-       percpu_counter_init(&tcp->tcp_sockets_allocated, 0);
+               res_parent = &parent_cg->memory_allocated;
 
-       cg_proto->enter_memory_pressure = memcg_tcp_enter_memory_pressure;
-       cg_proto->memory_pressure = &tcp->tcp_memory_pressure;
-       cg_proto->sysctl_mem = tcp->tcp_prot_mem;
-       cg_proto->memory_allocated = &tcp->tcp_memory_allocated;
-       cg_proto->sockets_allocated = &tcp->tcp_sockets_allocated;
-       cg_proto->memcg = memcg;
+       res_counter_init(&cg_proto->memory_allocated, res_parent);
+       percpu_counter_init(&cg_proto->sockets_allocated, 0);
 
        return 0;
 }
@@ -63,21 +48,17 @@ EXPORT_SYMBOL(tcp_init_cgroup);
 void tcp_destroy_cgroup(struct mem_cgroup *memcg)
 {
        struct cg_proto *cg_proto;
-       struct tcp_memcontrol *tcp;
 
        cg_proto = tcp_prot.proto_cgroup(memcg);
        if (!cg_proto)
                return;
 
-       tcp = tcp_from_cgproto(cg_proto);
-       percpu_counter_destroy(&tcp->tcp_sockets_allocated);
+       percpu_counter_destroy(&cg_proto->sockets_allocated);
 }
 EXPORT_SYMBOL(tcp_destroy_cgroup);
 
 static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
 {
-       struct net *net = current->nsproxy->net_ns;
-       struct tcp_memcontrol *tcp;
        struct cg_proto *cg_proto;
        u64 old_lim;
        int i;
@@ -90,16 +71,14 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
        if (val > RES_COUNTER_MAX)
                val = RES_COUNTER_MAX;
 
-       tcp = tcp_from_cgproto(cg_proto);
-
-       old_lim = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
-       ret = res_counter_set_limit(&tcp->tcp_memory_allocated, val);
+       old_lim = res_counter_read_u64(&cg_proto->memory_allocated, RES_LIMIT);
+       ret = res_counter_set_limit(&cg_proto->memory_allocated, val);
        if (ret)
                return ret;
 
        for (i = 0; i < 3; i++)
-               tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT,
-                                            net->ipv4.sysctl_tcp_mem[i]);
+               cg_proto->sysctl_mem[i] = min_t(long, val >> PAGE_SHIFT,
+                                               sysctl_tcp_mem[i]);
 
        if (val == RES_COUNTER_MAX)
                clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
@@ -156,28 +135,24 @@ static int tcp_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
 
 static u64 tcp_read_stat(struct mem_cgroup *memcg, int type, u64 default_val)
 {
-       struct tcp_memcontrol *tcp;
        struct cg_proto *cg_proto;
 
        cg_proto = tcp_prot.proto_cgroup(memcg);
        if (!cg_proto)
                return default_val;
 
-       tcp = tcp_from_cgproto(cg_proto);
-       return res_counter_read_u64(&tcp->tcp_memory_allocated, type);
+       return res_counter_read_u64(&cg_proto->memory_allocated, type);
 }
 
 static u64 tcp_read_usage(struct mem_cgroup *memcg)
 {
-       struct tcp_memcontrol *tcp;
        struct cg_proto *cg_proto;
 
        cg_proto = tcp_prot.proto_cgroup(memcg);
        if (!cg_proto)
                return atomic_long_read(&tcp_memory_allocated) << PAGE_SHIFT;
 
-       tcp = tcp_from_cgproto(cg_proto);
-       return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
+       return res_counter_read_u64(&cg_proto->memory_allocated, RES_USAGE);
 }
 
 static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
@@ -205,54 +180,25 @@ static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
 static int tcp_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
 {
        struct mem_cgroup *memcg;
-       struct tcp_memcontrol *tcp;
        struct cg_proto *cg_proto;
 
        memcg = mem_cgroup_from_css(css);
        cg_proto = tcp_prot.proto_cgroup(memcg);
        if (!cg_proto)
                return 0;
-       tcp = tcp_from_cgproto(cg_proto);
 
        switch (event) {
        case RES_MAX_USAGE:
-               res_counter_reset_max(&tcp->tcp_memory_allocated);
+               res_counter_reset_max(&cg_proto->memory_allocated);
                break;
        case RES_FAILCNT:
-               res_counter_reset_failcnt(&tcp->tcp_memory_allocated);
+               res_counter_reset_failcnt(&cg_proto->memory_allocated);
                break;
        }
 
        return 0;
 }
 
-unsigned long long tcp_max_memory(const struct mem_cgroup *memcg)
-{
-       struct tcp_memcontrol *tcp;
-       struct cg_proto *cg_proto;
-
-       cg_proto = tcp_prot.proto_cgroup((struct mem_cgroup *)memcg);
-       if (!cg_proto)
-               return 0;
-
-       tcp = tcp_from_cgproto(cg_proto);
-       return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
-}
-
-void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx)
-{
-       struct tcp_memcontrol *tcp;
-       struct cg_proto *cg_proto;
-
-       cg_proto = tcp_prot.proto_cgroup(memcg);
-       if (!cg_proto)
-               return;
-
-       tcp = tcp_from_cgproto(cg_proto);
-
-       tcp->tcp_prot_mem[idx] = val;
-}
-
 static struct cftype tcp_files[] = {
        {
                .name = "kmem.tcp.limit_in_bytes",
index 52f3c6b971d2def6854cf7d489022ad499ee0d24..4a2a84110dfb2ef68f8154d23958fbddae772775 100644 (file)
@@ -215,13 +215,15 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
        addr.family = req->rsk_ops->family;
        switch (addr.family) {
        case AF_INET:
-               addr.addr.a4 = inet_rsk(req)->rmt_addr;
+               addr.addr.a4 = inet_rsk(req)->ir_rmt_addr;
                hash = (__force unsigned int) addr.addr.a4;
                break;
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
-               *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr;
-               hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr);
+               *(struct in6_addr *)addr.addr.a6 = inet_rsk(req)->ir_v6_rmt_addr;
+               hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr);
                break;
+#endif
        default:
                return NULL;
        }
@@ -240,7 +242,6 @@ static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
 
 static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
 {
-       struct inet6_timewait_sock *tw6;
        struct tcp_metrics_block *tm;
        struct inetpeer_addr addr;
        unsigned int hash;
@@ -252,11 +253,12 @@ static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock
                addr.addr.a4 = tw->tw_daddr;
                hash = (__force unsigned int) addr.addr.a4;
                break;
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
-               tw6 = inet6_twsk((struct sock *)tw);
-               *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr;
-               hash = ipv6_addr_hash(&tw6->tw_v6_daddr);
+               *(struct in6_addr *)addr.addr.a6 = tw->tw_v6_daddr;
+               hash = ipv6_addr_hash(&tw->tw_v6_daddr);
                break;
+#endif
        default:
                return NULL;
        }
@@ -288,10 +290,12 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
                addr.addr.a4 = inet_sk(sk)->inet_daddr;
                hash = (__force unsigned int) addr.addr.a4;
                break;
+#if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
-               *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr;
-               hash = ipv6_addr_hash(&inet6_sk(sk)->daddr);
+               *(struct in6_addr *)addr.addr.a6 = sk->sk_v6_daddr;
+               hash = ipv6_addr_hash(&sk->sk_v6_daddr);
                break;
+#endif
        default:
                return NULL;
        }
index 58a3e69aef6440d36061ea9e6291152442954239..97b684159861bc451219bf66359afa288049a63e 100644 (file)
@@ -293,12 +293,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
 #if IS_ENABLED(CONFIG_IPV6)
                if (tw->tw_family == PF_INET6) {
                        struct ipv6_pinfo *np = inet6_sk(sk);
-                       struct inet6_timewait_sock *tw6;
 
-                       tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
-                       tw6 = inet6_twsk((struct sock *)tw);
-                       tw6->tw_v6_daddr = np->daddr;
-                       tw6->tw_v6_rcv_saddr = np->rcv_saddr;
+                       tw->tw_v6_daddr = sk->sk_v6_daddr;
+                       tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
                        tw->tw_tclass = np->tclass;
                        tw->tw_ipv6only = np->ipv6only;
                }
index 3a7525e6c08633dad9424bc1a9cb13bf26dfec1e..a7a5583eab04aff966521bcc6de4075fffd27e70 100644 (file)
@@ -14,7 +14,7 @@
 #include <net/tcp.h>
 #include <net/protocol.h>
 
-struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
+struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
                                netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
@@ -56,6 +56,8 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
                               SKB_GSO_TCP_ECN |
                               SKB_GSO_TCPV6 |
                               SKB_GSO_GRE |
+                              SKB_GSO_IPIP |
+                              SKB_GSO_SIT |
                               SKB_GSO_MPLS |
                               SKB_GSO_UDP_TUNNEL |
                               0) ||
@@ -139,7 +141,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
 out:
        return segs;
 }
-EXPORT_SYMBOL(tcp_tso_segment);
+EXPORT_SYMBOL(tcp_gso_segment);
 
 struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 {
@@ -320,7 +322,7 @@ static int tcp4_gro_complete(struct sk_buff *skb)
 static const struct net_offload tcpv4_offload = {
        .callbacks = {
                .gso_send_check =       tcp_v4_gso_send_check,
-               .gso_segment    =       tcp_tso_segment,
+               .gso_segment    =       tcp_gso_segment,
                .gro_receive    =       tcp4_gro_receive,
                .gro_complete   =       tcp4_gro_complete,
        },
index e6bb8256e59f3738280a022f250f6fefd621cb38..672854664ff5c1d36783ac1bfb72fea481648ca1 100644 (file)
@@ -637,6 +637,8 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
        unsigned int size = 0;
        unsigned int eff_sacks;
 
+       opts->options = 0;
+
 #ifdef CONFIG_TCP_MD5SIG
        *md5 = tp->af_specific->md5_lookup(sk, sk);
        if (unlikely(*md5)) {
@@ -848,15 +850,15 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
 
        BUG_ON(!skb || !tcp_skb_pcount(skb));
 
-       /* If congestion control is doing timestamping, we must
-        * take such a timestamp before we potentially clone/copy.
-        */
-       if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
-               __net_timestamp(skb);
-
-       if (likely(clone_it)) {
+       if (clone_it) {
                const struct sk_buff *fclone = skb + 1;
 
+               /* If congestion control is doing timestamping, we must
+                * take such a timestamp before we potentially clone/copy.
+                */
+               if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
+                       __net_timestamp(skb);
+
                if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
                             fclone->fclone == SKB_FCLONE_CLONE))
                        NET_INC_STATS_BH(sock_net(sk),
@@ -984,8 +986,10 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
 static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
                                 unsigned int mss_now)
 {
-       if (skb->len <= mss_now || !sk_can_gso(sk) ||
-           skb->ip_summed == CHECKSUM_NONE) {
+       /* Make sure we own this skb before messing gso_size/gso_segs */
+       WARN_ON_ONCE(skb_cloned(skb));
+
+       if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
                /* Avoid the costly divide in the normal
                 * non-TSO case.
                 */
@@ -1065,9 +1069,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
        if (nsize < 0)
                nsize = 0;
 
-       if (skb_cloned(skb) &&
-           skb_is_nonlinear(skb) &&
-           pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+       if (skb_unclone(skb, GFP_ATOMIC))
                return -ENOMEM;
 
        /* Get a new skb... force flag on. */
@@ -2342,6 +2344,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                int oldpcount = tcp_skb_pcount(skb);
 
                if (unlikely(oldpcount > 1)) {
+                       if (skb_unclone(skb, GFP_ATOMIC))
+                               return -ENOMEM;
                        tcp_init_tso_segs(sk, skb, cur_mss);
                        tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
                }
@@ -2349,21 +2353,6 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
 
        tcp_retrans_try_collapse(sk, skb, cur_mss);
 
-       /* Some Solaris stacks overoptimize and ignore the FIN on a
-        * retransmit when old data is attached.  So strip it off
-        * since it is cheap to do so and saves bytes on the network.
-        */
-       if (skb->len > 0 &&
-           (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
-           tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
-               if (!pskb_trim(skb, 0)) {
-                       /* Reuse, even though it does some unnecessary work */
-                       tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1,
-                                            TCP_SKB_CB(skb)->tcp_flags);
-                       skb->ip_summed = CHECKSUM_NONE;
-               }
-       }
-
        /* Make a copy, if the first transmission SKB clone we made
         * is still in somebody's hands, else make a clone.
         */
@@ -2732,8 +2721,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
        th->syn = 1;
        th->ack = 1;
        TCP_ECN_make_synack(req, th);
-       th->source = ireq->loc_port;
-       th->dest = ireq->rmt_port;
+       th->source = htons(ireq->ir_num);
+       th->dest = ireq->ir_rmt_port;
        /* Setting of flags are superfluous here for callers (and ECE is
         * not even correctly set)
         */
index 611beab38a004aae36730d18bffb62928a2321b1..8b97d71e193b97cf18e424f01be911e34a614b32 100644 (file)
@@ -101,22 +101,6 @@ static inline int tcp_probe_avail(void)
                si4.sin_addr.s_addr = inet->inet_##mem##addr;   \
        } while (0)                                             \
 
-#if IS_ENABLED(CONFIG_IPV6)
-#define tcp_probe_copy_fl_to_si6(inet, si6, mem)               \
-       do {                                                    \
-               struct ipv6_pinfo *pi6 = inet->pinet6;          \
-               si6.sin6_family = AF_INET6;                     \
-               si6.sin6_port = inet->inet_##mem##port;         \
-               si6.sin6_addr = pi6->mem##addr;                 \
-               si6.sin6_flowinfo = 0; /* No need here. */      \
-               si6.sin6_scope_id = 0;  /* No need here. */     \
-       } while (0)
-#else
-#define tcp_probe_copy_fl_to_si6(fl, si6, mem)                 \
-       do {                                                    \
-               memset(&si6, 0, sizeof(si6));                   \
-       } while (0)
-#endif
 
 /*
  * Hook inserted to be called before each receive packet.
@@ -147,8 +131,17 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                tcp_probe_copy_fl_to_si4(inet, p->dst.v4, d);
                                break;
                        case AF_INET6:
-                               tcp_probe_copy_fl_to_si6(inet, p->src.v6, s);
-                               tcp_probe_copy_fl_to_si6(inet, p->dst.v6, d);
+                               memset(&p->src.v6, 0, sizeof(p->src.v6));
+                               memset(&p->dst.v6, 0, sizeof(p->dst.v6));
+#if IS_ENABLED(CONFIG_IPV6)
+                               p->src.v6.sin6_family = AF_INET6;
+                               p->src.v6.sin6_port = inet->inet_sport;
+                               p->src.v6.sin6_addr = inet6_sk(sk)->saddr;
+
+                               p->dst.v6.sin6_family = AF_INET6;
+                               p->dst.v6.sin6_port = inet->inet_dport;
+                               p->dst.v6.sin6_addr = sk->sk_v6_daddr;
+#endif
                                break;
                        default:
                                BUG();
index 4b85e6f636c9e66dbb1cc0f4acd197c33cb6cfb6..af07b5b23ebf11e4df7cf3383f6508f985a5cf94 100644 (file)
@@ -374,9 +374,8 @@ void tcp_retransmit_timer(struct sock *sk)
                }
 #if IS_ENABLED(CONFIG_IPV6)
                else if (sk->sk_family == AF_INET6) {
-                       struct ipv6_pinfo *np = inet6_sk(sk);
                        LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"),
-                                      &np->daddr,
+                                      &sk->sk_v6_daddr,
                                       ntohs(inet->inet_dport), inet->inet_num,
                                       tp->snd_una, tp->snd_nxt);
                }
index 6c0eea2f8249bf6b08c85395c0ad4b4d8ef52b39..0531b99d8637bac1c4f19537a351f060fffa56ed 100644 (file)
@@ -15,10 +15,10 @@ struct vegas {
        u32     baseRTT;        /* the min of all Vegas RTT measurements seen (in usec) */
 };
 
-extern void tcp_vegas_init(struct sock *sk);
-extern void tcp_vegas_state(struct sock *sk, u8 ca_state);
-extern void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us);
-extern void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event);
-extern void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb);
+void tcp_vegas_init(struct sock *sk);
+void tcp_vegas_state(struct sock *sk, u8 ca_state);
+void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us);
+void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event);
+void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb);
 
 #endif /* __TCP_VEGAS_H */
index 0ca44df51ee94a2875427e5c9d3b1c77434f5f40..89909dd730ddd65a4eac4f18e8a38925962b8534 100644 (file)
 #include <linux/seq_file.h>
 #include <net/net_namespace.h>
 #include <net/icmp.h>
+#include <net/inet_hashtables.h>
 #include <net/route.h>
 #include <net/checksum.h>
 #include <net/xfrm.h>
@@ -219,7 +220,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
                unsigned short first, last;
                DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
 
-               inet_get_local_port_range(&low, &high);
+               inet_get_local_port_range(net, &low, &high);
                remaining = (high - low) + 1;
 
                rand = net_random();
@@ -406,6 +407,18 @@ static inline int compute_score2(struct sock *sk, struct net *net,
        return score;
 }
 
+static unsigned int udp_ehashfn(struct net *net, const __be32 laddr,
+                                const __u16 lport, const __be32 faddr,
+                                const __be16 fport)
+{
+       static u32 udp_ehash_secret __read_mostly;
+
+       net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret));
+
+       return __inet_ehashfn(laddr, lport, faddr, fport,
+                             udp_ehash_secret + net_hash_mix(net));
+}
+
 
 /* called with read_rcu_lock() */
 static struct sock *udp4_lib_lookup2(struct net *net,
@@ -429,8 +442,8 @@ begin:
                        badness = score;
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
-                               hash = inet_ehashfn(net, daddr, hnum,
-                                                   saddr, sport);
+                               hash = udp_ehashfn(net, daddr, hnum,
+                                                  saddr, sport);
                                matches = 1;
                        }
                } else if (score == badness && reuseport) {
@@ -510,8 +523,8 @@ begin:
                        badness = score;
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
-                               hash = inet_ehashfn(net, daddr, hnum,
-                                                   saddr, sport);
+                               hash = udp_ehashfn(net, daddr, hnum,
+                                                  saddr, sport);
                                matches = 1;
                        }
                } else if (score == badness && reuseport) {
@@ -565,6 +578,26 @@ struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
 }
 EXPORT_SYMBOL_GPL(udp4_lib_lookup);
 
+static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
+                                      __be16 loc_port, __be32 loc_addr,
+                                      __be16 rmt_port, __be32 rmt_addr,
+                                      int dif, unsigned short hnum)
+{
+       struct inet_sock *inet = inet_sk(sk);
+
+       if (!net_eq(sock_net(sk), net) ||
+           udp_sk(sk)->udp_port_hash != hnum ||
+           (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
+           (inet->inet_dport != rmt_port && inet->inet_dport) ||
+           (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
+           ipv6_only_sock(sk) ||
+           (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
+               return false;
+       if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif))
+               return false;
+       return true;
+}
+
 static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
                                             __be16 loc_port, __be32 loc_addr,
                                             __be16 rmt_port, __be32 rmt_addr,
@@ -575,20 +608,11 @@ static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
        unsigned short hnum = ntohs(loc_port);
 
        sk_nulls_for_each_from(s, node) {
-               struct inet_sock *inet = inet_sk(s);
-
-               if (!net_eq(sock_net(s), net) ||
-                   udp_sk(s)->udp_port_hash != hnum ||
-                   (inet->inet_daddr && inet->inet_daddr != rmt_addr) ||
-                   (inet->inet_dport != rmt_port && inet->inet_dport) ||
-                   (inet->inet_rcv_saddr &&
-                    inet->inet_rcv_saddr != loc_addr) ||
-                   ipv6_only_sock(s) ||
-                   (s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
-                       continue;
-               if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif))
-                       continue;
-               goto found;
+               if (__udp_is_mcast_sock(net, s,
+                                       loc_port, loc_addr,
+                                       rmt_port, rmt_addr,
+                                       dif, hnum))
+                       goto found;
        }
        s = NULL;
 found:
@@ -855,6 +879,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
        ipc.opt = NULL;
        ipc.tx_flags = 0;
+       ipc.ttl = 0;
+       ipc.tos = -1;
 
        getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
 
@@ -938,7 +964,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                faddr = ipc.opt->opt.faddr;
                connected = 0;
        }
-       tos = RT_TOS(inet->tos);
+       tos = get_rttos(&ipc, inet);
        if (sock_flag(sk, SOCK_LOCALROUTE) ||
            (msg->msg_flags & MSG_DONTROUTE) ||
            (ipc.opt && ipc.opt->opt.is_strictroute)) {
@@ -1403,8 +1429,10 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
        int rc;
 
-       if (inet_sk(sk)->inet_daddr)
+       if (inet_sk(sk)->inet_daddr) {
                sock_rps_save_rxhash(sk, skb);
+               sk_mark_napi_id(sk, skb);
+       }
 
        rc = sock_queue_rcv_skb(sk, skb);
        if (rc < 0) {
@@ -1528,7 +1556,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
        rc = 0;
 
-       ipv4_pktinfo_prepare(skb);
+       ipv4_pktinfo_prepare(sk, skb);
        bh_lock_sock(sk);
        if (!sock_owned_by_user(sk))
                rc = __udp_queue_rcv_skb(sk, skb);
@@ -1577,6 +1605,14 @@ static void flush_stack(struct sock **stack, unsigned int count,
                kfree_skb(skb1);
 }
 
+static void udp_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+{
+       struct dst_entry *dst = skb_dst(skb);
+
+       dst_hold(dst);
+       sk->sk_rx_dst = dst;
+}
+
 /*
  *     Multicasts and broadcasts go to each listener.
  *
@@ -1705,16 +1741,32 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
        if (udp4_csum_init(skb, uh, proto))
                goto csum_error;
 
-       if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
-               return __udp4_lib_mcast_deliver(net, skb, uh,
-                               saddr, daddr, udptable);
+       if (skb->sk) {
+               int ret;
+               sk = skb->sk;
+
+               if (unlikely(sk->sk_rx_dst == NULL))
+                       udp_sk_rx_dst_set(sk, skb);
 
-       sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
+               ret = udp_queue_rcv_skb(sk, skb);
+
+               /* a return value > 0 means to resubmit the input, but
+                * it wants the return to be -protocol, or 0
+                */
+               if (ret > 0)
+                       return -ret;
+               return 0;
+       } else {
+               if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
+                       return __udp4_lib_mcast_deliver(net, skb, uh,
+                                       saddr, daddr, udptable);
+
+               sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
+       }
 
        if (sk != NULL) {
                int ret;
 
-               sk_mark_napi_id(sk, skb);
                ret = udp_queue_rcv_skb(sk, skb);
                sock_put(sk);
 
@@ -1768,6 +1820,135 @@ drop:
        return 0;
 }
 
+/* We can only early demux multicast if there is a single matching socket.
+ * If more than one socket found returns NULL
+ */
+static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
+                                                 __be16 loc_port, __be32 loc_addr,
+                                                 __be16 rmt_port, __be32 rmt_addr,
+                                                 int dif)
+{
+       struct sock *sk, *result;
+       struct hlist_nulls_node *node;
+       unsigned short hnum = ntohs(loc_port);
+       unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask);
+       struct udp_hslot *hslot = &udp_table.hash[slot];
+
+       rcu_read_lock();
+begin:
+       count = 0;
+       result = NULL;
+       sk_nulls_for_each_rcu(sk, node, &hslot->head) {
+               if (__udp_is_mcast_sock(net, sk,
+                                       loc_port, loc_addr,
+                                       rmt_port, rmt_addr,
+                                       dif, hnum)) {
+                       result = sk;
+                       ++count;
+               }
+       }
+       /*
+        * if the nulls value we got at the end of this lookup is
+        * not the expected one, we must restart lookup.
+        * We probably met an item that was moved to another chain.
+        */
+       if (get_nulls_value(node) != slot)
+               goto begin;
+
+       if (result) {
+               if (count != 1 ||
+                   unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
+                       result = NULL;
+               else if (unlikely(!__udp_is_mcast_sock(net, result,
+                                                      loc_port, loc_addr,
+                                                      rmt_port, rmt_addr,
+                                                      dif, hnum))) {
+                       sock_put(result);
+                       result = NULL;
+               }
+       }
+       rcu_read_unlock();
+       return result;
+}
+
+/* For unicast we should only early demux connected sockets or we can
+ * break forwarding setups.  The chains here can be long so only check
+ * if the first socket is an exact match and if not move on.
+ */
+static struct sock *__udp4_lib_demux_lookup(struct net *net,
+                                           __be16 loc_port, __be32 loc_addr,
+                                           __be16 rmt_port, __be32 rmt_addr,
+                                           int dif)
+{
+       struct sock *sk, *result;
+       struct hlist_nulls_node *node;
+       unsigned short hnum = ntohs(loc_port);
+       unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum);
+       unsigned int slot2 = hash2 & udp_table.mask;
+       struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
+       INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr)
+       const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
+
+       rcu_read_lock();
+       result = NULL;
+       udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
+               if (INET_MATCH(sk, net, acookie,
+                              rmt_addr, loc_addr, ports, dif))
+                       result = sk;
+               /* Only check first socket in chain */
+               break;
+       }
+
+       if (result) {
+               if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
+                       result = NULL;
+               else if (unlikely(!INET_MATCH(sk, net, acookie,
+                                             rmt_addr, loc_addr,
+                                             ports, dif))) {
+                       sock_put(result);
+                       result = NULL;
+               }
+       }
+       rcu_read_unlock();
+       return result;
+}
+
+void udp_v4_early_demux(struct sk_buff *skb)
+{
+       const struct iphdr *iph = ip_hdr(skb);
+       const struct udphdr *uh = udp_hdr(skb);
+       struct sock *sk;
+       struct dst_entry *dst;
+       struct net *net = dev_net(skb->dev);
+       int dif = skb->dev->ifindex;
+
+       /* validate the packet */
+       if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
+               return;
+
+       if (skb->pkt_type == PACKET_BROADCAST ||
+           skb->pkt_type == PACKET_MULTICAST)
+               sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
+                                                  uh->source, iph->saddr, dif);
+       else if (skb->pkt_type == PACKET_HOST)
+               sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
+                                            uh->source, iph->saddr, dif);
+       else
+               return;
+
+       if (!sk)
+               return;
+
+       skb->sk = sk;
+       skb->destructor = sock_edemux;
+       dst = sk->sk_rx_dst;
+
+       if (dst)
+               dst = dst_check(dst, 0);
+       if (dst)
+               skb_dst_set_noref(skb, dst);
+}
+
 int udp_rcv(struct sk_buff *skb)
 {
        return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP);
index 5a681e298b905127cdd5f0655d136754ff6eaabb..f3c27899f62b914f7b406094472a47e2fe2df613 100644 (file)
@@ -5,30 +5,30 @@
 #include <net/protocol.h>
 #include <net/inet_common.h>
 
-extern int     __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int );
-extern void    __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
+int __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int);
+void __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
 
-extern int     udp_v4_get_port(struct sock *sk, unsigned short snum);
+int udp_v4_get_port(struct sock *sk, unsigned short snum);
 
-extern int     udp_setsockopt(struct sock *sk, int level, int optname,
-                              char __user *optval, unsigned int optlen);
-extern int     udp_getsockopt(struct sock *sk, int level, int optname,
-                              char __user *optval, int __user *optlen);
+int udp_setsockopt(struct sock *sk, int level, int optname,
+                  char __user *optval, unsigned int optlen);
+int udp_getsockopt(struct sock *sk, int level, int optname,
+                  char __user *optval, int __user *optlen);
 
 #ifdef CONFIG_COMPAT
-extern int     compat_udp_setsockopt(struct sock *sk, int level, int optname,
-                                     char __user *optval, unsigned int optlen);
-extern int     compat_udp_getsockopt(struct sock *sk, int level, int optname,
-                                     char __user *optval, int __user *optlen);
+int compat_udp_setsockopt(struct sock *sk, int level, int optname,
+                         char __user *optval, unsigned int optlen);
+int compat_udp_getsockopt(struct sock *sk, int level, int optname,
+                         char __user *optval, int __user *optlen);
 #endif
-extern int     udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                           size_t len, int noblock, int flags, int *addr_len);
-extern int     udp_sendpage(struct sock *sk, struct page *page, int offset,
-                            size_t size, int flags);
-extern int     udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
-extern void    udp_destroy_sock(struct sock *sk);
+int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+               size_t len, int noblock, int flags, int *addr_len);
+int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
+                int flags);
+int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+void udp_destroy_sock(struct sock *sk);
 
 #ifdef CONFIG_PROC_FS
-extern int     udp4_seq_show(struct seq_file *seq, void *v);
+int udp4_seq_show(struct seq_file *seq, void *v);
 #endif
 #endif /* _UDP4_IMPL_H */
index f35eccaa855ebfbac508503c9111311d6728f8ff..83206de2bc7679dc20e4fdcf34f8b1f5cd7831ef 100644 (file)
@@ -52,6 +52,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
 
                if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
                                      SKB_GSO_UDP_TUNNEL |
+                                     SKB_GSO_IPIP |
                                      SKB_GSO_GRE | SKB_GSO_MPLS) ||
                             !(type & (SKB_GSO_UDP))))
                        goto out;
index b5663c37f089ed0afe33115bcbfad2555b8d0f48..31b18152528fe4dbf9e500ae0c9a2a1a5a3a2adf 100644 (file)
 #include <net/xfrm.h>
 
 /* Informational hook. The decap is still done here. */
-static struct xfrm_tunnel __rcu *rcv_notify_handlers __read_mostly;
+static struct xfrm_tunnel_notifier __rcu *rcv_notify_handlers __read_mostly;
 static DEFINE_MUTEX(xfrm4_mode_tunnel_input_mutex);
 
-int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler)
+int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler)
 {
-       struct xfrm_tunnel __rcu **pprev;
-       struct xfrm_tunnel *t;
+       struct xfrm_tunnel_notifier __rcu **pprev;
+       struct xfrm_tunnel_notifier *t;
        int ret = -EEXIST;
        int priority = handler->priority;
 
@@ -50,10 +50,10 @@ err:
 }
 EXPORT_SYMBOL_GPL(xfrm4_mode_tunnel_input_register);
 
-int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler)
+int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler)
 {
-       struct xfrm_tunnel __rcu **pprev;
-       struct xfrm_tunnel *t;
+       struct xfrm_tunnel_notifier __rcu **pprev;
+       struct xfrm_tunnel_notifier *t;
        int ret = -ENOENT;
 
        mutex_lock(&xfrm4_mode_tunnel_input_mutex);
@@ -134,7 +134,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
 
 static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
 {
-       struct xfrm_tunnel *handler;
+       struct xfrm_tunnel_notifier *handler;
        int err = -EINVAL;
 
        if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
index 9a459be24af762b42e2d667618f7149c055e5ef6..ccde54248c8ca77d3efc173ce457aa5f0153337a 100644 (file)
@@ -107,6 +107,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
 
        memset(fl4, 0, sizeof(struct flowi4));
        fl4->flowi4_mark = skb->mark;
+       fl4->flowi4_oif = skb_dst(skb)->dev->ifindex;
 
        if (!ip_is_fragment(iph)) {
                switch (iph->protocol) {
index 11b13ea69db4e6c35c8b992de9055bf6a312d802..e1a8d903e3662cb1aa36def1ffa5208cd6d515a8 100644 (file)
@@ -153,6 +153,17 @@ config INET6_XFRM_MODE_ROUTEOPTIMIZATION
        ---help---
          Support for MIPv6 route optimization mode.
 
+config IPV6_VTI
+tristate "Virtual (secure) IPv6: tunneling"
+       select IPV6_TUNNEL
+       depends on INET6_XFRM_MODE_TUNNEL
+       ---help---
+       Tunneling means encapsulating data of one protocol type within
+       another protocol and sending it over a channel that understands the
+       encapsulating protocol. This can be used with xfrm mode tunnel to give
+       the notion of a secure tunnel for IPSEC and then use routing protocol
+       on top.
+
 config IPV6_SIT
        tristate "IPv6: IPv6-in-IPv4 tunnel (SIT driver)"
        select INET_TUNNEL
index 470a9c008e9b9e921045bb5b11b77569e53ac5c9..17bb830872db21e07080ed9f6a0ef93cbccee9b7 100644 (file)
@@ -36,6 +36,7 @@ obj-$(CONFIG_INET6_XFRM_MODE_BEET) += xfrm6_mode_beet.o
 obj-$(CONFIG_IPV6_MIP6) += mip6.o
 obj-$(CONFIG_NETFILTER)        += netfilter/
 
+obj-$(CONFIG_IPV6_VTI) += ip6_vti.o
 obj-$(CONFIG_IPV6_SIT) += sit.o
 obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
 obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
index 7c96100b021ef0558a739b6da719246548153b11..6468bda1f2b94c382fe7212135ed1245f71e221f 100644 (file)
@@ -110,11 +110,6 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
        int try_loading_module = 0;
        int err;
 
-       if (sock->type != SOCK_RAW &&
-           sock->type != SOCK_DGRAM &&
-           !inet_ehash_secret)
-               build_ehash_secret();
-
        /* Look for the requested type/protocol pair. */
 lookup_protocol:
        err = -ESOCKTNOSUPPORT;
@@ -364,7 +359,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        inet->inet_rcv_saddr = v4addr;
        inet->inet_saddr = v4addr;
 
-       np->rcv_saddr = addr->sin6_addr;
+       sk->sk_v6_rcv_saddr = addr->sin6_addr;
 
        if (!(addr_type & IPV6_ADDR_MULTICAST))
                np->saddr = addr->sin6_addr;
@@ -461,14 +456,14 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
                    peer == 1)
                        return -ENOTCONN;
                sin->sin6_port = inet->inet_dport;
-               sin->sin6_addr = np->daddr;
+               sin->sin6_addr = sk->sk_v6_daddr;
                if (np->sndflow)
                        sin->sin6_flowinfo = np->flow_label;
        } else {
-               if (ipv6_addr_any(&np->rcv_saddr))
+               if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
                        sin->sin6_addr = np->saddr;
                else
-                       sin->sin6_addr = np->rcv_saddr;
+                       sin->sin6_addr = sk->sk_v6_rcv_saddr;
 
                sin->sin6_port = inet->inet_sport;
        }
@@ -655,7 +650,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
 
                memset(&fl6, 0, sizeof(fl6));
                fl6.flowi6_proto = sk->sk_protocol;
-               fl6.daddr = np->daddr;
+               fl6.daddr = sk->sk_v6_daddr;
                fl6.saddr = np->saddr;
                fl6.flowlabel = np->flow_label;
                fl6.flowi6_oif = sk->sk_bound_dev_if;
@@ -870,8 +865,6 @@ static int __init inet6_init(void)
        if (err)
                goto out_sock_register_fail;
 
-       tcpv6_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
-
        /*
         *      ipngwg API draft makes clear that the correct semantics
         *      for TCP and UDP is to consider one TCP and UDP instance
@@ -1028,52 +1021,4 @@ out_unregister_tcp_proto:
 }
 module_init(inet6_init);
 
-static void __exit inet6_exit(void)
-{
-       if (disable_ipv6_mod)
-               return;
-
-       /* First of all disallow new sockets creation. */
-       sock_unregister(PF_INET6);
-       /* Disallow any further netlink messages */
-       rtnl_unregister_all(PF_INET6);
-
-       udpv6_exit();
-       udplitev6_exit();
-       tcpv6_exit();
-
-       /* Cleanup code parts. */
-       ipv6_packet_cleanup();
-       ipv6_frag_exit();
-       ipv6_exthdrs_exit();
-       addrconf_cleanup();
-       ip6_flowlabel_cleanup();
-       ndisc_late_cleanup();
-       ip6_route_cleanup();
-#ifdef CONFIG_PROC_FS
-
-       /* Cleanup code parts. */
-       if6_proc_exit();
-       ipv6_misc_proc_exit();
-       udplite6_proc_exit();
-       raw6_proc_exit();
-#endif
-       ipv6_netfilter_fini();
-       ipv6_stub = NULL;
-       igmp6_cleanup();
-       ndisc_cleanup();
-       ip6_mr_cleanup();
-       icmpv6_cleanup();
-       rawv6_exit();
-
-       unregister_pernet_subsys(&inet6_net_ops);
-       proto_unregister(&rawv6_prot);
-       proto_unregister(&udplitev6_prot);
-       proto_unregister(&udpv6_prot);
-       proto_unregister(&tcpv6_prot);
-
-       rcu_barrier(); /* Wait for completion of call_rcu()'s */
-}
-module_exit(inet6_exit);
-
 MODULE_ALIAS_NETPROTO(PF_INET6);
index 73784c3d4642e09f0f9f92d1f699769ec08258d9..82e1da3a40b915e65c2ecf15662415511cc91286 100644 (file)
@@ -618,8 +618,7 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset);
        struct xfrm_state *x;
 
-       if (type != ICMPV6_DEST_UNREACH &&
-           type != ICMPV6_PKT_TOOBIG &&
+       if (type != ICMPV6_PKT_TOOBIG &&
            type != NDISC_REDIRECT)
                return;
 
index 48b6bd2a9a1451b7adf9a678ccce859003d57a79..a454b0ff57c7c67a91e2e5c887609e7a65e5a910 100644 (file)
@@ -107,16 +107,16 @@ ipv4_connected:
                if (err)
                        goto out;
 
-               ipv6_addr_set_v4mapped(inet->inet_daddr, &np->daddr);
+               ipv6_addr_set_v4mapped(inet->inet_daddr, &sk->sk_v6_daddr);
 
                if (ipv6_addr_any(&np->saddr) ||
                    ipv6_mapped_addr_any(&np->saddr))
                        ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
 
-               if (ipv6_addr_any(&np->rcv_saddr) ||
-                   ipv6_mapped_addr_any(&np->rcv_saddr)) {
+               if (ipv6_addr_any(&sk->sk_v6_rcv_saddr) ||
+                   ipv6_mapped_addr_any(&sk->sk_v6_rcv_saddr)) {
                        ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
-                                              &np->rcv_saddr);
+                                              &sk->sk_v6_rcv_saddr);
                        if (sk->sk_prot->rehash)
                                sk->sk_prot->rehash(sk);
                }
@@ -145,7 +145,7 @@ ipv4_connected:
                }
        }
 
-       np->daddr = *daddr;
+       sk->sk_v6_daddr = *daddr;
        np->flow_label = fl6.flowlabel;
 
        inet->inet_dport = usin->sin6_port;
@@ -156,7 +156,7 @@ ipv4_connected:
         */
 
        fl6.flowi6_proto = sk->sk_protocol;
-       fl6.daddr = np->daddr;
+       fl6.daddr = sk->sk_v6_daddr;
        fl6.saddr = np->saddr;
        fl6.flowi6_oif = sk->sk_bound_dev_if;
        fl6.flowi6_mark = sk->sk_mark;
@@ -183,16 +183,16 @@ ipv4_connected:
        if (ipv6_addr_any(&np->saddr))
                np->saddr = fl6.saddr;
 
-       if (ipv6_addr_any(&np->rcv_saddr)) {
-               np->rcv_saddr = fl6.saddr;
+       if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+               sk->sk_v6_rcv_saddr = fl6.saddr;
                inet->inet_rcv_saddr = LOOPBACK4_IPV6;
                if (sk->sk_prot->rehash)
                        sk->sk_prot->rehash(sk);
        }
 
        ip6_dst_store(sk, dst,
-                     ipv6_addr_equal(&fl6.daddr, &np->daddr) ?
-                     &np->daddr : NULL,
+                     ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
+                     &sk->sk_v6_daddr : NULL,
 #ifdef CONFIG_IPV6_SUBTREES
                      ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
                      &np->saddr :
@@ -883,11 +883,10 @@ EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
 void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
                             __u16 srcp, __u16 destp, int bucket)
 {
-       struct ipv6_pinfo *np = inet6_sk(sp);
        const struct in6_addr *dest, *src;
 
-       dest  = &np->daddr;
-       src   = &np->rcv_saddr;
+       dest  = &sp->sk_v6_daddr;
+       src   = &sp->sk_v6_rcv_saddr;
        seq_printf(seq,
                   "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
                   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n",
index d3618a78fcac4b1f6e606a904196de79235833b1..e67e63f9858d7feae9e6fba4de667edb8f81875d 100644 (file)
@@ -436,8 +436,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
        struct xfrm_state *x;
 
-       if (type != ICMPV6_DEST_UNREACH &&
-           type != ICMPV6_PKT_TOOBIG &&
+       if (type != ICMPV6_PKT_TOOBIG &&
            type != NDISC_REDIRECT)
                return;
 
index e4311cbc8b4ecbf70ea1fb2e2f2415342be382cc..77bb8afb141d6349e3de622e0f20452552d70344 100644 (file)
@@ -70,20 +70,20 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
                                      struct flowi6 *fl6,
                                      const struct request_sock *req)
 {
-       struct inet6_request_sock *treq = inet6_rsk(req);
+       struct inet_request_sock *ireq = inet_rsk(req);
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct in6_addr *final_p, final;
        struct dst_entry *dst;
 
        memset(fl6, 0, sizeof(*fl6));
        fl6->flowi6_proto = IPPROTO_TCP;
-       fl6->daddr = treq->rmt_addr;
+       fl6->daddr = ireq->ir_v6_rmt_addr;
        final_p = fl6_update_dst(fl6, np->opt, &final);
-       fl6->saddr = treq->loc_addr;
-       fl6->flowi6_oif = treq->iif;
+       fl6->saddr = ireq->ir_v6_loc_addr;
+       fl6->flowi6_oif = ireq->ir_iif;
        fl6->flowi6_mark = sk->sk_mark;
-       fl6->fl6_dport = inet_rsk(req)->rmt_port;
-       fl6->fl6_sport = inet_rsk(req)->loc_port;
+       fl6->fl6_dport = ireq->ir_rmt_port;
+       fl6->fl6_sport = htons(ireq->ir_num);
        security_req_classify_flow(req, flowi6_to_flowi(fl6));
 
        dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
@@ -129,13 +129,13 @@ struct request_sock *inet6_csk_search_req(const struct sock *sk,
                                                     lopt->nr_table_entries)];
             (req = *prev) != NULL;
             prev = &req->dl_next) {
-               const struct inet6_request_sock *treq = inet6_rsk(req);
+               const struct inet_request_sock *ireq = inet_rsk(req);
 
-               if (inet_rsk(req)->rmt_port == rport &&
+               if (ireq->ir_rmt_port == rport &&
                    req->rsk_ops->family == AF_INET6 &&
-                   ipv6_addr_equal(&treq->rmt_addr, raddr) &&
-                   ipv6_addr_equal(&treq->loc_addr, laddr) &&
-                   (!treq->iif || treq->iif == iif)) {
+                   ipv6_addr_equal(&ireq->ir_v6_rmt_addr, raddr) &&
+                   ipv6_addr_equal(&ireq->ir_v6_loc_addr, laddr) &&
+                   (!ireq->ir_iif || ireq->ir_iif == iif)) {
                        WARN_ON(req->sk != NULL);
                        *prevp = prev;
                        return req;
@@ -153,8 +153,8 @@ void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
-       const u32 h = inet6_synq_hash(&inet6_rsk(req)->rmt_addr,
-                                     inet_rsk(req)->rmt_port,
+       const u32 h = inet6_synq_hash(&inet_rsk(req)->ir_v6_rmt_addr,
+                                     inet_rsk(req)->ir_rmt_port,
                                      lopt->hash_rnd, lopt->nr_table_entries);
 
        reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
@@ -165,11 +165,10 @@ EXPORT_SYMBOL_GPL(inet6_csk_reqsk_queue_hash_add);
 
 void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
 {
-       struct ipv6_pinfo *np = inet6_sk(sk);
        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
 
        sin6->sin6_family = AF_INET6;
-       sin6->sin6_addr = np->daddr;
+       sin6->sin6_addr = sk->sk_v6_daddr;
        sin6->sin6_port = inet_sk(sk)->inet_dport;
        /* We do not store received flowlabel for TCP */
        sin6->sin6_flowinfo = 0;
@@ -203,7 +202,7 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
 
        memset(fl6, 0, sizeof(*fl6));
        fl6->flowi6_proto = sk->sk_protocol;
-       fl6->daddr = np->daddr;
+       fl6->daddr = sk->sk_v6_daddr;
        fl6->saddr = np->saddr;
        fl6->flowlabel = np->flow_label;
        IP6_ECN_flow_xmit(sk, fl6->flowlabel);
@@ -245,7 +244,7 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
        skb_dst_set_noref(skb, dst);
 
        /* Restore final destination back after routing done */
-       fl6.daddr = np->daddr;
+       fl6.daddr = sk->sk_v6_daddr;
 
        res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
        rcu_read_unlock();
index 32b4a1675d826d8ce50e36dbf314456075e1660e..262e13c02ec27dea15154d9b4af5fd413dd1c504 100644 (file)
 #include <net/secure_seq.h>
 #include <net/ip.h>
 
+static unsigned int inet6_ehashfn(struct net *net,
+                                 const struct in6_addr *laddr,
+                                 const u16 lport,
+                                 const struct in6_addr *faddr,
+                                 const __be16 fport)
+{
+       static u32 inet6_ehash_secret __read_mostly;
+       static u32 ipv6_hash_secret __read_mostly;
+
+       u32 lhash, fhash;
+
+       net_get_random_once(&inet6_ehash_secret, sizeof(inet6_ehash_secret));
+       net_get_random_once(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
+
+       lhash = (__force u32)laddr->s6_addr32[3];
+       fhash = __ipv6_addr_jhash(faddr, ipv6_hash_secret);
+
+       return __inet6_ehashfn(lhash, lport, fhash, fport,
+                              inet6_ehash_secret + net_hash_mix(net));
+}
+
+static int inet6_sk_ehashfn(const struct sock *sk)
+{
+       const struct inet_sock *inet = inet_sk(sk);
+       const struct in6_addr *laddr = &sk->sk_v6_rcv_saddr;
+       const struct in6_addr *faddr = &sk->sk_v6_daddr;
+       const __u16 lport = inet->inet_num;
+       const __be16 fport = inet->inet_dport;
+       struct net *net = sock_net(sk);
+
+       return inet6_ehashfn(net, laddr, lport, faddr, fport);
+}
+
 int __inet6_hash(struct sock *sk, struct inet_timewait_sock *tw)
 {
        struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
@@ -89,43 +122,22 @@ begin:
        sk_nulls_for_each_rcu(sk, node, &head->chain) {
                if (sk->sk_hash != hash)
                        continue;
-               if (likely(INET6_MATCH(sk, net, saddr, daddr, ports, dif))) {
-                       if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
-                               goto begintw;
-                       if (unlikely(!INET6_MATCH(sk, net, saddr, daddr,
-                                                 ports, dif))) {
-                               sock_put(sk);
-                               goto begin;
-                       }
-               goto out;
-               }
-       }
-       if (get_nulls_value(node) != slot)
-               goto begin;
-
-begintw:
-       /* Must check for a TIME_WAIT'er before going to listener hash. */
-       sk_nulls_for_each_rcu(sk, node, &head->twchain) {
-               if (sk->sk_hash != hash)
+               if (!INET6_MATCH(sk, net, saddr, daddr, ports, dif))
                        continue;
-               if (likely(INET6_TW_MATCH(sk, net, saddr, daddr,
-                                         ports, dif))) {
-                       if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) {
-                               sk = NULL;
-                               goto out;
-                       }
-                       if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr,
-                                                    ports, dif))) {
-                               sock_put(sk);
-                               goto begintw;
-                       }
+               if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
                        goto out;
+
+               if (unlikely(!INET6_MATCH(sk, net, saddr, daddr, ports, dif))) {
+                       sock_gen_put(sk);
+                       goto begin;
                }
+               goto found;
        }
        if (get_nulls_value(node) != slot)
-               goto begintw;
-       sk = NULL;
+               goto begin;
 out:
+       sk = NULL;
+found:
        rcu_read_unlock();
        return sk;
 }
@@ -140,11 +152,10 @@ static inline int compute_score(struct sock *sk, struct net *net,
 
        if (net_eq(sock_net(sk), net) && inet_sk(sk)->inet_num == hnum &&
            sk->sk_family == PF_INET6) {
-               const struct ipv6_pinfo *np = inet6_sk(sk);
 
                score = 1;
-               if (!ipv6_addr_any(&np->rcv_saddr)) {
-                       if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
+               if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+                       if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
                                return -1;
                        score++;
                }
@@ -236,9 +247,8 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
 {
        struct inet_hashinfo *hinfo = death_row->hashinfo;
        struct inet_sock *inet = inet_sk(sk);
-       const struct ipv6_pinfo *np = inet6_sk(sk);
-       const struct in6_addr *daddr = &np->rcv_saddr;
-       const struct in6_addr *saddr = &np->daddr;
+       const struct in6_addr *daddr = &sk->sk_v6_rcv_saddr;
+       const struct in6_addr *saddr = &sk->sk_v6_daddr;
        const int dif = sk->sk_bound_dev_if;
        const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport);
        struct net *net = sock_net(sk);
@@ -248,38 +258,28 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
        spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
        struct sock *sk2;
        const struct hlist_nulls_node *node;
-       struct inet_timewait_sock *tw;
+       struct inet_timewait_sock *tw = NULL;
        int twrefcnt = 0;
 
        spin_lock(lock);
 
-       /* Check TIME-WAIT sockets first. */
-       sk_nulls_for_each(sk2, node, &head->twchain) {
-               if (sk2->sk_hash != hash)
-                       continue;
-
-               if (likely(INET6_TW_MATCH(sk2, net, saddr, daddr,
-                                         ports, dif))) {
-                       tw = inet_twsk(sk2);
-                       if (twsk_unique(sk, sk2, twp))
-                               goto unique;
-                       else
-                               goto not_unique;
-               }
-       }
-       tw = NULL;
-
-       /* And established part... */
        sk_nulls_for_each(sk2, node, &head->chain) {
                if (sk2->sk_hash != hash)
                        continue;
-               if (likely(INET6_MATCH(sk2, net, saddr, daddr, ports, dif)))
+
+               if (likely(INET6_MATCH(sk2, net, saddr, daddr, ports, dif))) {
+                       if (sk2->sk_state == TCP_TIME_WAIT) {
+                               tw = inet_twsk(sk2);
+                               if (twsk_unique(sk, sk2, twp))
+                                       break;
+                       }
                        goto not_unique;
+               }
        }
 
-unique:
        /* Must record num and sport now. Otherwise we will see
-        * in hash table socket with a funny identity. */
+        * in hash table socket with a funny identity.
+        */
        inet->inet_num = lport;
        inet->inet_sport = htons(lport);
        sk->sk_hash = hash;
@@ -312,9 +312,9 @@ not_unique:
 static inline u32 inet6_sk_port_offset(const struct sock *sk)
 {
        const struct inet_sock *inet = inet_sk(sk);
-       const struct ipv6_pinfo *np = inet6_sk(sk);
-       return secure_ipv6_port_ephemeral(np->rcv_saddr.s6_addr32,
-                                         np->daddr.s6_addr32,
+
+       return secure_ipv6_port_ephemeral(sk->sk_v6_rcv_saddr.s6_addr32,
+                                         sk->sk_v6_daddr.s6_addr32,
                                          inet->inet_dport);
 }
 
index 5bec666aba61d464fab4e77684eedd4265143cf9..5550a8113a6dc5f202c0ccda2c7166e98e81a7e9 100644 (file)
@@ -1529,25 +1529,6 @@ static void fib6_clean_tree(struct net *net, struct fib6_node *root,
        fib6_walk(&c.w);
 }
 
-void fib6_clean_all_ro(struct net *net, int (*func)(struct rt6_info *, void *arg),
-                   int prune, void *arg)
-{
-       struct fib6_table *table;
-       struct hlist_head *head;
-       unsigned int h;
-
-       rcu_read_lock();
-       for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
-               head = &net->ipv6.fib_table_hash[h];
-               hlist_for_each_entry_rcu(table, head, tb6_hlist) {
-                       read_lock_bh(&table->tb6_lock);
-                       fib6_clean_tree(net, &table->tb6_root,
-                                       func, prune, arg);
-                       read_unlock_bh(&table->tb6_lock);
-               }
-       }
-       rcu_read_unlock();
-}
 void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
                    int prune, void *arg)
 {
@@ -1782,3 +1763,189 @@ void fib6_gc_cleanup(void)
        unregister_pernet_subsys(&fib6_net_ops);
        kmem_cache_destroy(fib6_node_kmem);
 }
+
+#ifdef CONFIG_PROC_FS
+
+struct ipv6_route_iter {
+       struct seq_net_private p;
+       struct fib6_walker_t w;
+       loff_t skip;
+       struct fib6_table *tbl;
+       __u32 sernum;
+};
+
+static int ipv6_route_seq_show(struct seq_file *seq, void *v)
+{
+       struct rt6_info *rt = v;
+       struct ipv6_route_iter *iter = seq->private;
+
+       seq_printf(seq, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
+
+#ifdef CONFIG_IPV6_SUBTREES
+       seq_printf(seq, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
+#else
+       seq_puts(seq, "00000000000000000000000000000000 00 ");
+#endif
+       if (rt->rt6i_flags & RTF_GATEWAY)
+               seq_printf(seq, "%pi6", &rt->rt6i_gateway);
+       else
+               seq_puts(seq, "00000000000000000000000000000000");
+
+       seq_printf(seq, " %08x %08x %08x %08x %8s\n",
+                  rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
+                  rt->dst.__use, rt->rt6i_flags,
+                  rt->dst.dev ? rt->dst.dev->name : "");
+       iter->w.leaf = NULL;
+       return 0;
+}
+
+static int ipv6_route_yield(struct fib6_walker_t *w)
+{
+       struct ipv6_route_iter *iter = w->args;
+
+       if (!iter->skip)
+               return 1;
+
+       do {
+               iter->w.leaf = iter->w.leaf->dst.rt6_next;
+               iter->skip--;
+               if (!iter->skip && iter->w.leaf)
+                       return 1;
+       } while (iter->w.leaf);
+
+       return 0;
+}
+
+static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter)
+{
+       memset(&iter->w, 0, sizeof(iter->w));
+       iter->w.func = ipv6_route_yield;
+       iter->w.root = &iter->tbl->tb6_root;
+       iter->w.state = FWS_INIT;
+       iter->w.node = iter->w.root;
+       iter->w.args = iter;
+       iter->sernum = iter->w.root->fn_sernum;
+       INIT_LIST_HEAD(&iter->w.lh);
+       fib6_walker_link(&iter->w);
+}
+
+static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl,
+                                                   struct net *net)
+{
+       unsigned int h;
+       struct hlist_node *node;
+
+       if (tbl) {
+               h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1;
+               node = rcu_dereference_bh(hlist_next_rcu(&tbl->tb6_hlist));
+       } else {
+               h = 0;
+               node = NULL;
+       }
+
+       while (!node && h < FIB6_TABLE_HASHSZ) {
+               node = rcu_dereference_bh(
+                       hlist_first_rcu(&net->ipv6.fib_table_hash[h++]));
+       }
+       return hlist_entry_safe(node, struct fib6_table, tb6_hlist);
+}
+
+static void ipv6_route_check_sernum(struct ipv6_route_iter *iter)
+{
+       if (iter->sernum != iter->w.root->fn_sernum) {
+               iter->sernum = iter->w.root->fn_sernum;
+               iter->w.state = FWS_INIT;
+               iter->w.node = iter->w.root;
+               WARN_ON(iter->w.skip);
+               iter->w.skip = iter->w.count;
+       }
+}
+
+static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       int r;
+       struct rt6_info *n;
+       struct net *net = seq_file_net(seq);
+       struct ipv6_route_iter *iter = seq->private;
+
+       if (!v)
+               goto iter_table;
+
+       n = ((struct rt6_info *)v)->dst.rt6_next;
+       if (n) {
+               ++*pos;
+               return n;
+       }
+
+iter_table:
+       ipv6_route_check_sernum(iter);
+       read_lock(&iter->tbl->tb6_lock);
+       r = fib6_walk_continue(&iter->w);
+       read_unlock(&iter->tbl->tb6_lock);
+       if (r > 0) {
+               if (v)
+                       ++*pos;
+               return iter->w.leaf;
+       } else if (r < 0) {
+               fib6_walker_unlink(&iter->w);
+               return NULL;
+       }
+       fib6_walker_unlink(&iter->w);
+
+       iter->tbl = ipv6_route_seq_next_table(iter->tbl, net);
+       if (!iter->tbl)
+               return NULL;
+
+       ipv6_route_seq_setup_walk(iter);
+       goto iter_table;
+}
+
+static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
+       __acquires(RCU_BH)
+{
+       struct net *net = seq_file_net(seq);
+       struct ipv6_route_iter *iter = seq->private;
+
+       rcu_read_lock_bh();
+       iter->tbl = ipv6_route_seq_next_table(NULL, net);
+       iter->skip = *pos;
+
+       if (iter->tbl) {
+               ipv6_route_seq_setup_walk(iter);
+               return ipv6_route_seq_next(seq, NULL, pos);
+       } else {
+               return NULL;
+       }
+}
+
+static bool ipv6_route_iter_active(struct ipv6_route_iter *iter)
+{
+       struct fib6_walker_t *w = &iter->w;
+       return w->node && !(w->state == FWS_U && w->node == w->root);
+}
+
+static void ipv6_route_seq_stop(struct seq_file *seq, void *v)
+       __releases(RCU_BH)
+{
+       struct ipv6_route_iter *iter = seq->private;
+
+       if (ipv6_route_iter_active(iter))
+               fib6_walker_unlink(&iter->w);
+
+       rcu_read_unlock_bh();
+}
+
+static const struct seq_operations ipv6_route_seq_ops = {
+       .start  = ipv6_route_seq_start,
+       .next   = ipv6_route_seq_next,
+       .stop   = ipv6_route_seq_stop,
+       .show   = ipv6_route_seq_show
+};
+
+int ipv6_route_open(struct inode *inode, struct file *file)
+{
+       return seq_open_net(inode, file, &ipv6_route_seq_ops,
+                           sizeof(struct ipv6_route_iter));
+}
+
+#endif /* CONFIG_PROC_FS */
index 7bb5446b9d73c16a7f4096f2705ce60f997c7c2a..bf4a9a084de5aa8f733318276d6e84cc37d5e249 100644 (file)
@@ -976,6 +976,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
                if (t->parms.o_flags&GRE_SEQ)
                        addend += 4;
        }
+       t->hlen = addend;
 
        if (p->flags & IP6_TNL_F_CAP_XMIT) {
                int strict = (ipv6_addr_type(&p->raddr) &
@@ -1002,8 +1003,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
                }
                ip6_rt_put(rt);
        }
-
-       t->hlen = addend;
 }
 
 static int ip6gre_tnl_change(struct ip6_tnl *t,
@@ -1173,9 +1172,8 @@ done:
 
 static int ip6gre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
 {
-       struct ip6_tnl *tunnel = netdev_priv(dev);
        if (new_mtu < 68 ||
-           new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
+           new_mtu > 0xFFF8 - dev->hard_header_len)
                return -EINVAL;
        dev->mtu = new_mtu;
        return 0;
index d82de72281009c1b93d1cbde1bac91c023ecd930..4b851692b1f6bed3fbe476c65672c45e28570b3e 100644 (file)
@@ -66,7 +66,6 @@ static int ipv6_gso_send_check(struct sk_buff *skb)
        __skb_pull(skb, sizeof(*ipv6h));
        err = -EPROTONOSUPPORT;
 
-       rcu_read_lock();
        ops = rcu_dereference(inet6_offloads[
                ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
 
@@ -74,7 +73,6 @@ static int ipv6_gso_send_check(struct sk_buff *skb)
                skb_reset_transport_header(skb);
                err = ops->callbacks.gso_send_check(skb);
        }
-       rcu_read_unlock();
 
 out:
        return err;
@@ -92,46 +90,58 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
        u8 *prevhdr;
        int offset = 0;
        bool tunnel;
+       int nhoff;
 
        if (unlikely(skb_shinfo(skb)->gso_type &
                     ~(SKB_GSO_UDP |
                       SKB_GSO_DODGY |
                       SKB_GSO_TCP_ECN |
                       SKB_GSO_GRE |
+                      SKB_GSO_IPIP |
+                      SKB_GSO_SIT |
                       SKB_GSO_UDP_TUNNEL |
                       SKB_GSO_MPLS |
                       SKB_GSO_TCPV6 |
                       0)))
                goto out;
 
+       skb_reset_network_header(skb);
+       nhoff = skb_network_header(skb) - skb_mac_header(skb);
        if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
                goto out;
 
-       tunnel = skb->encapsulation;
+       tunnel = SKB_GSO_CB(skb)->encap_level > 0;
+       if (tunnel)
+               features = skb->dev->hw_enc_features & netif_skb_features(skb);
+       SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
+
        ipv6h = ipv6_hdr(skb);
        __skb_pull(skb, sizeof(*ipv6h));
        segs = ERR_PTR(-EPROTONOSUPPORT);
 
        proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
-       rcu_read_lock();
+
        ops = rcu_dereference(inet6_offloads[proto]);
        if (likely(ops && ops->callbacks.gso_segment)) {
                skb_reset_transport_header(skb);
                segs = ops->callbacks.gso_segment(skb, features);
        }
-       rcu_read_unlock();
 
        if (IS_ERR(segs))
                goto out;
 
        for (skb = segs; skb; skb = skb->next) {
-               ipv6h = ipv6_hdr(skb);
-               ipv6h->payload_len = htons(skb->len - skb->mac_len -
-                                          sizeof(*ipv6h));
+               ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
+               ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h));
+               if (tunnel) {
+                       skb_reset_inner_headers(skb);
+                       skb->encapsulation = 1;
+               }
+               skb->network_header = (u8 *)ipv6h - skb->head;
+
                if (!tunnel && proto == IPPROTO_UDP) {
                        unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
-                       fptr = (struct frag_hdr *)(skb_network_header(skb) +
-                               unfrag_ip6hlen);
+                       fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
                        fptr->frag_off = htons(offset);
                        if (skb->next != NULL)
                                fptr->frag_off |= htons(IP6_MF);
@@ -267,6 +277,13 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
        },
 };
 
+static const struct net_offload sit_offload = {
+       .callbacks = {
+               .gso_send_check = ipv6_gso_send_check,
+               .gso_segment    = ipv6_gso_segment,
+       },
+};
+
 static int __init ipv6_offload_init(void)
 {
 
@@ -278,6 +295,9 @@ static int __init ipv6_offload_init(void)
                pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
 
        dev_add_offload(&ipv6_packet_offload);
+
+       inet_add_offload(&sit_offload, IPPROTO_IPV6);
+
        return 0;
 }
 
index a54c45ce4a48f0d3a65f6c54ac77bb73a6a41280..91fb4e8212f52d434e6d072103f70dbb8d219326 100644 (file)
@@ -105,7 +105,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
        }
 
        rcu_read_lock_bh();
-       nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
+       nexthop = rt6_nexthop((struct rt6_info *)dst);
        neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
        if (unlikely(!neigh))
                neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
@@ -874,7 +874,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
         */
        rt = (struct rt6_info *) *dst;
        rcu_read_lock_bh();
-       n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt, &fl6->daddr));
+       n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
        err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
        rcu_read_unlock_bh();
 
@@ -1008,6 +1008,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
 
 {
        struct sk_buff *skb;
+       struct frag_hdr fhdr;
        int err;
 
        /* There is support for UDP large send offload by network
@@ -1015,8 +1016,6 @@ static inline int ip6_ufo_append_data(struct sock *sk,
         * udp datagram
         */
        if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
-               struct frag_hdr fhdr;
-
                skb = sock_alloc_send_skb(sk,
                        hh_len + fragheaderlen + transhdrlen + 20,
                        (flags & MSG_DONTWAIT), &err);
@@ -1036,20 +1035,24 @@ static inline int ip6_ufo_append_data(struct sock *sk,
                skb->transport_header = skb->network_header + fragheaderlen;
 
                skb->protocol = htons(ETH_P_IPV6);
-               skb->ip_summed = CHECKSUM_PARTIAL;
                skb->csum = 0;
 
-               /* Specify the length of each IPv6 datagram fragment.
-                * It has to be a multiple of 8.
-                */
-               skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
-                                            sizeof(struct frag_hdr)) & ~7;
-               skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
-               ipv6_select_ident(&fhdr, rt);
-               skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
                __skb_queue_tail(&sk->sk_write_queue, skb);
+       } else if (skb_is_gso(skb)) {
+               goto append;
        }
 
+       skb->ip_summed = CHECKSUM_PARTIAL;
+       /* Specify the length of each IPv6 datagram fragment.
+        * It has to be a multiple of 8.
+        */
+       skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
+                                    sizeof(struct frag_hdr)) & ~7;
+       skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+       ipv6_select_ident(&fhdr, rt);
+       skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
+
+append:
        return skb_append_datato_frags(sk, skb, getfrag, from,
                                       (length - transhdrlen));
 }
index a791552e042212d866b2cd96c35e2f0b5d289ab5..583b77e2f69be1d1499da479e2f8da1435d22a97 100644 (file)
@@ -1430,9 +1430,17 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 static int
 ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
 {
-       if (new_mtu < IPV6_MIN_MTU) {
-               return -EINVAL;
+       struct ip6_tnl *tnl = netdev_priv(dev);
+
+       if (tnl->parms.proto == IPPROTO_IPIP) {
+               if (new_mtu < 68)
+                       return -EINVAL;
+       } else {
+               if (new_mtu < IPV6_MIN_MTU)
+                       return -EINVAL;
        }
+       if (new_mtu > 0xFFF8 - dev->hard_header_len)
+               return -EINVAL;
        dev->mtu = new_mtu;
        return 0;
 }
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
new file mode 100644 (file)
index 0000000..ed94ba6
--- /dev/null
@@ -0,0 +1,1056 @@
+/*
+ *     IPv6 virtual tunneling interface
+ *
+ *     Copyright (C) 2013 secunet Security Networks AG
+ *
+ *     Author:
+ *     Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ *     Based on:
+ *     net/ipv6/ip6_tunnel.c
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/sockios.h>
+#include <linux/icmp.h>
+#include <linux/if.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/if_tunnel.h>
+#include <linux/net.h>
+#include <linux/in6.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/icmpv6.h>
+#include <linux/init.h>
+#include <linux/route.h>
+#include <linux/rtnetlink.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/slab.h>
+#include <linux/hash.h>
+
+#include <linux/uaccess.h>
+#include <linux/atomic.h>
+
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/ip_tunnels.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/addrconf.h>
+#include <net/ip6_tunnel.h>
+#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+#define HASH_SIZE_SHIFT  5
+#define HASH_SIZE (1 << HASH_SIZE_SHIFT)
+
+static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
+{
+       u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
+
+       return hash_32(hash, HASH_SIZE_SHIFT);
+}
+
+static int vti6_dev_init(struct net_device *dev);
+static void vti6_dev_setup(struct net_device *dev);
+static struct rtnl_link_ops vti6_link_ops __read_mostly;
+
+static int vti6_net_id __read_mostly;
+struct vti6_net {
+       /* the vti6 tunnel fallback device */
+       struct net_device *fb_tnl_dev;
+       /* lists for storing tunnels in use */
+       struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE];
+       struct ip6_tnl __rcu *tnls_wc[1];
+       struct ip6_tnl __rcu **tnls[2];
+};
+
+static struct net_device_stats *vti6_get_stats(struct net_device *dev)
+{
+       struct pcpu_tstats sum = { 0 };
+       int i;
+
+       for_each_possible_cpu(i) {
+               const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+
+               sum.rx_packets += tstats->rx_packets;
+               sum.rx_bytes   += tstats->rx_bytes;
+               sum.tx_packets += tstats->tx_packets;
+               sum.tx_bytes   += tstats->tx_bytes;
+       }
+       dev->stats.rx_packets = sum.rx_packets;
+       dev->stats.rx_bytes   = sum.rx_bytes;
+       dev->stats.tx_packets = sum.tx_packets;
+       dev->stats.tx_bytes   = sum.tx_bytes;
+       return &dev->stats;
+}
+
+#define for_each_vti6_tunnel_rcu(start) \
+       for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
+
+/**
+ * vti6_tnl_lookup - fetch tunnel matching the end-point addresses
+ *   @net: network namespace
+ *   @remote: the address of the tunnel exit-point
+ *   @local: the address of the tunnel entry-point
+ *
+ * Return:
+ *   tunnel matching given end-points if found,
+ *   else fallback tunnel if its device is up,
+ *   else %NULL
+ **/
+static struct ip6_tnl *
+vti6_tnl_lookup(struct net *net, const struct in6_addr *remote,
+               const struct in6_addr *local)
+{
+       unsigned int hash = HASH(remote, local);
+       struct ip6_tnl *t;
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+       for_each_vti6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
+               if (ipv6_addr_equal(local, &t->parms.laddr) &&
+                   ipv6_addr_equal(remote, &t->parms.raddr) &&
+                   (t->dev->flags & IFF_UP))
+                       return t;
+       }
+       t = rcu_dereference(ip6n->tnls_wc[0]);
+       if (t && (t->dev->flags & IFF_UP))
+               return t;
+
+       return NULL;
+}
+
+/**
+ * vti6_tnl_bucket - get head of list matching given tunnel parameters
+ *   @p: parameters containing tunnel end-points
+ *
+ * Description:
+ *   vti6_tnl_bucket() returns the head of the list matching the
+ *   &struct in6_addr entries laddr and raddr in @p.
+ *
+ * Return: head of IPv6 tunnel list
+ **/
+static struct ip6_tnl __rcu **
+vti6_tnl_bucket(struct vti6_net *ip6n, const struct __ip6_tnl_parm *p)
+{
+       const struct in6_addr *remote = &p->raddr;
+       const struct in6_addr *local = &p->laddr;
+       unsigned int h = 0;
+       int prio = 0;
+
+       if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
+               prio = 1;
+               h = HASH(remote, local);
+       }
+       return &ip6n->tnls[prio][h];
+}
+
+static void
+vti6_tnl_link(struct vti6_net *ip6n, struct ip6_tnl *t)
+{
+       struct ip6_tnl __rcu **tp = vti6_tnl_bucket(ip6n, &t->parms);
+
+       rcu_assign_pointer(t->next , rtnl_dereference(*tp));
+       rcu_assign_pointer(*tp, t);
+}
+
+static void
+vti6_tnl_unlink(struct vti6_net *ip6n, struct ip6_tnl *t)
+{
+       struct ip6_tnl __rcu **tp;
+       struct ip6_tnl *iter;
+
+       for (tp = vti6_tnl_bucket(ip6n, &t->parms);
+            (iter = rtnl_dereference(*tp)) != NULL;
+            tp = &iter->next) {
+               if (t == iter) {
+                       rcu_assign_pointer(*tp, t->next);
+                       break;
+               }
+       }
+}
+
+static void vti6_dev_free(struct net_device *dev)
+{
+       free_percpu(dev->tstats);
+       free_netdev(dev);
+}
+
+static int vti6_tnl_create2(struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct net *net = dev_net(dev);
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+       int err;
+
+       err = vti6_dev_init(dev);
+       if (err < 0)
+               goto out;
+
+       err = register_netdevice(dev);
+       if (err < 0)
+               goto out;
+
+       strcpy(t->parms.name, dev->name);
+       dev->rtnl_link_ops = &vti6_link_ops;
+
+       dev_hold(dev);
+       vti6_tnl_link(ip6n, t);
+
+       return 0;
+
+out:
+       return err;
+}
+
+static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
+{
+       struct net_device *dev;
+       struct ip6_tnl *t;
+       char name[IFNAMSIZ];
+       int err;
+
+       if (p->name[0])
+               strlcpy(name, p->name, IFNAMSIZ);
+       else
+               sprintf(name, "ip6_vti%%d");
+
+       dev = alloc_netdev(sizeof(*t), name, vti6_dev_setup);
+       if (dev == NULL)
+               goto failed;
+
+       dev_net_set(dev, net);
+
+       t = netdev_priv(dev);
+       t->parms = *p;
+       t->net = dev_net(dev);
+
+       err = vti6_tnl_create2(dev);
+       if (err < 0)
+               goto failed_free;
+
+       return t;
+
+failed_free:
+       vti6_dev_free(dev);
+failed:
+       return NULL;
+}
+
+/**
+ * vti6_locate - find or create tunnel matching given parameters
+ *   @net: network namespace
+ *   @p: tunnel parameters
+ *   @create: != 0 if allowed to create new tunnel if no match found
+ *
+ * Description:
+ *   vti6_locate() first tries to locate an existing tunnel
+ *   based on @parms. If this is unsuccessful, but @create is set a new
+ *   tunnel device is created and registered for use.
+ *
+ * Return:
+ *   matching tunnel or NULL
+ **/
+static struct ip6_tnl *vti6_locate(struct net *net, struct __ip6_tnl_parm *p,
+                                  int create)
+{
+       const struct in6_addr *remote = &p->raddr;
+       const struct in6_addr *local = &p->laddr;
+       struct ip6_tnl __rcu **tp;
+       struct ip6_tnl *t;
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+       for (tp = vti6_tnl_bucket(ip6n, p);
+            (t = rtnl_dereference(*tp)) != NULL;
+            tp = &t->next) {
+               if (ipv6_addr_equal(local, &t->parms.laddr) &&
+                   ipv6_addr_equal(remote, &t->parms.raddr))
+                       return t;
+       }
+       if (!create)
+               return NULL;
+       return vti6_tnl_create(net, p);
+}
+
+/**
+ * vti6_dev_uninit - tunnel device uninitializer
+ *   @dev: the device to be destroyed
+ *
+ * Description:
+ *   vti6_dev_uninit() removes tunnel from its list
+ **/
+static void vti6_dev_uninit(struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct net *net = dev_net(dev);
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+       if (dev == ip6n->fb_tnl_dev)
+               RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
+       else
+               vti6_tnl_unlink(ip6n, t);
+       ip6_tnl_dst_reset(t);
+       dev_put(dev);
+}
+
+static int vti6_rcv(struct sk_buff *skb)
+{
+       struct ip6_tnl *t;
+       const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+       rcu_read_lock();
+
+       if ((t = vti6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
+                                &ipv6h->daddr)) != NULL) {
+               struct pcpu_tstats *tstats;
+
+               if (t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) {
+                       rcu_read_unlock();
+                       goto discard;
+               }
+
+               if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+                       rcu_read_unlock();
+                       return 0;
+               }
+
+               if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
+                       t->dev->stats.rx_dropped++;
+                       rcu_read_unlock();
+                       goto discard;
+               }
+
+               tstats = this_cpu_ptr(t->dev->tstats);
+               tstats->rx_packets++;
+               tstats->rx_bytes += skb->len;
+
+               skb->mark = 0;
+               secpath_reset(skb);
+               skb->dev = t->dev;
+
+               rcu_read_unlock();
+               return 0;
+       }
+       rcu_read_unlock();
+       return 1;
+
+discard:
+       kfree_skb(skb);
+       return 0;
+}
+
+/**
+ * vti6_addr_conflict - compare packet addresses to tunnel's own
+ *   @t: the outgoing tunnel device
+ *   @hdr: IPv6 header from the incoming packet
+ *
+ * Description:
+ *   Avoid trivial tunneling loop by checking that tunnel exit-point
+ *   doesn't match source of incoming packet.
+ *
+ * Return:
+ *   1 if conflict,
+ *   0 else
+ **/
+static inline bool
+vti6_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
+{
+       return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
+}
+
+/**
+ * vti6_xmit - send a packet
+ *   @skb: the outgoing socket buffer
+ *   @dev: the outgoing tunnel device
+ **/
+static int vti6_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct net *net = dev_net(dev);
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct net_device_stats *stats = &t->dev->stats;
+       struct dst_entry *dst = NULL, *ndst = NULL;
+       struct flowi6 fl6;
+       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       struct net_device *tdev;
+       int err = -1;
+
+       if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
+           !ip6_tnl_xmit_ctl(t) || vti6_addr_conflict(t, ipv6h))
+               return err;
+
+       dst = ip6_tnl_dst_check(t);
+       if (!dst) {
+               memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+
+               ndst = ip6_route_output(net, NULL, &fl6);
+
+               if (ndst->error)
+                       goto tx_err_link_failure;
+               ndst = xfrm_lookup(net, ndst, flowi6_to_flowi(&fl6), NULL, 0);
+               if (IS_ERR(ndst)) {
+                       err = PTR_ERR(ndst);
+                       ndst = NULL;
+                       goto tx_err_link_failure;
+               }
+               dst = ndst;
+       }
+
+       if (!dst->xfrm || dst->xfrm->props.mode != XFRM_MODE_TUNNEL)
+               goto tx_err_link_failure;
+
+       tdev = dst->dev;
+
+       if (tdev == dev) {
+               stats->collisions++;
+               net_warn_ratelimited("%s: Local routing loop detected!\n",
+                                    t->parms.name);
+               goto tx_err_dst_release;
+       }
+
+
+       skb_dst_drop(skb);
+       skb_dst_set_noref(skb, dst);
+
+       ip6tunnel_xmit(skb, dev);
+       if (ndst) {
+               dev->mtu = dst_mtu(ndst);
+               ip6_tnl_dst_store(t, ndst);
+       }
+
+       return 0;
+tx_err_link_failure:
+       stats->tx_carrier_errors++;
+       dst_link_failure(skb);
+tx_err_dst_release:
+       dst_release(ndst);
+       return err;
+}
+
+static netdev_tx_t
+vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct net_device_stats *stats = &t->dev->stats;
+       int ret;
+
+       switch (skb->protocol) {
+       case htons(ETH_P_IPV6):
+               ret = vti6_xmit(skb, dev);
+               break;
+       default:
+               goto tx_err;
+       }
+
+       if (ret < 0)
+               goto tx_err;
+
+       return NETDEV_TX_OK;
+
+tx_err:
+       stats->tx_errors++;
+       stats->tx_dropped++;
+       kfree_skb(skb);
+       return NETDEV_TX_OK;
+}
+
+static void vti6_link_config(struct ip6_tnl *t)
+{
+       struct dst_entry *dst;
+       struct net_device *dev = t->dev;
+       struct __ip6_tnl_parm *p = &t->parms;
+       struct flowi6 *fl6 = &t->fl.u.ip6;
+
+       memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
+       memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
+
+       /* Set up flowi template */
+       fl6->saddr = p->laddr;
+       fl6->daddr = p->raddr;
+       fl6->flowi6_oif = p->link;
+       fl6->flowi6_mark = be32_to_cpu(p->i_key);
+       fl6->flowi6_proto = p->proto;
+       fl6->flowlabel = 0;
+
+       p->flags &= ~(IP6_TNL_F_CAP_XMIT | IP6_TNL_F_CAP_RCV |
+                     IP6_TNL_F_CAP_PER_PACKET);
+       p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
+
+       if (p->flags & IP6_TNL_F_CAP_XMIT && p->flags & IP6_TNL_F_CAP_RCV)
+               dev->flags |= IFF_POINTOPOINT;
+       else
+               dev->flags &= ~IFF_POINTOPOINT;
+
+       dev->iflink = p->link;
+
+       if (p->flags & IP6_TNL_F_CAP_XMIT) {
+
+               dst = ip6_route_output(dev_net(dev), NULL, fl6);
+               if (dst->error)
+                       return;
+
+               dst = xfrm_lookup(dev_net(dev), dst, flowi6_to_flowi(fl6),
+                                 NULL, 0);
+               if (IS_ERR(dst))
+                       return;
+
+               if (dst->dev) {
+                       dev->hard_header_len = dst->dev->hard_header_len;
+
+                       dev->mtu = dst_mtu(dst);
+
+                       if (dev->mtu < IPV6_MIN_MTU)
+                               dev->mtu = IPV6_MIN_MTU;
+               }
+               dst_release(dst);
+       }
+}
+
+/**
+ * vti6_tnl_change - update the tunnel parameters
+ *   @t: tunnel to be changed
+ *   @p: tunnel configuration parameters
+ *
+ * Description:
+ *   vti6_tnl_change() updates the tunnel parameters
+ **/
+static int
+vti6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
+{
+       t->parms.laddr = p->laddr;
+       t->parms.raddr = p->raddr;
+       t->parms.link = p->link;
+       t->parms.i_key = p->i_key;
+       t->parms.o_key = p->o_key;
+       t->parms.proto = p->proto;
+       ip6_tnl_dst_reset(t);
+       vti6_link_config(t);
+       return 0;
+}
+
+static int vti6_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
+{
+       struct net *net = dev_net(t->dev);
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+       int err;
+
+       vti6_tnl_unlink(ip6n, t);
+       synchronize_net();
+       err = vti6_tnl_change(t, p);
+       vti6_tnl_link(ip6n, t);
+       netdev_state_change(t->dev);
+       return err;
+}
+
+static void
+vti6_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm2 *u)
+{
+       p->laddr = u->laddr;
+       p->raddr = u->raddr;
+       p->link = u->link;
+       p->i_key = u->i_key;
+       p->o_key = u->o_key;
+       p->proto = u->proto;
+
+       memcpy(p->name, u->name, sizeof(u->name));
+}
+
+static void
+vti6_parm_to_user(struct ip6_tnl_parm2 *u, const struct __ip6_tnl_parm *p)
+{
+       u->laddr = p->laddr;
+       u->raddr = p->raddr;
+       u->link = p->link;
+       u->i_key = p->i_key;
+       u->o_key = p->o_key;
+       u->proto = p->proto;
+
+       memcpy(u->name, p->name, sizeof(u->name));
+}
+
+/**
+ * vti6_tnl_ioctl - configure vti6 tunnels from userspace
+ *   @dev: virtual device associated with tunnel
+ *   @ifr: parameters passed from userspace
+ *   @cmd: command to be performed
+ *
+ * Description:
+ *   vti6_ioctl() is used for managing vti6 tunnels
+ *   from userspace.
+ *
+ *   The possible commands are the following:
+ *     %SIOCGETTUNNEL: get tunnel parameters for device
+ *     %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
+ *     %SIOCCHGTUNNEL: change tunnel parameters to those given
+ *     %SIOCDELTUNNEL: delete tunnel
+ *
+ *   The fallback device "ip6_vti0", created during module
+ *   initialization, can be used for creating other tunnel devices.
+ *
+ * Return:
+ *   0 on success,
+ *   %-EFAULT if unable to copy data to or from userspace,
+ *   %-EPERM if current process hasn't %CAP_NET_ADMIN set
+ *   %-EINVAL if passed tunnel parameters are invalid,
+ *   %-EEXIST if changing a tunnel's parameters would cause a conflict
+ *   %-ENODEV if attempting to change or delete a nonexisting device
+ **/
+static int
+vti6_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+       int err = 0;
+       struct ip6_tnl_parm2 p;
+       struct __ip6_tnl_parm p1;
+       struct ip6_tnl *t = NULL;
+       struct net *net = dev_net(dev);
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+       switch (cmd) {
+       case SIOCGETTUNNEL:
+               if (dev == ip6n->fb_tnl_dev) {
+                       if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
+                               err = -EFAULT;
+                               break;
+                       }
+                       vti6_parm_from_user(&p1, &p);
+                       t = vti6_locate(net, &p1, 0);
+               } else {
+                       memset(&p, 0, sizeof(p));
+               }
+               if (t == NULL)
+                       t = netdev_priv(dev);
+               vti6_parm_to_user(&p, &t->parms);
+               if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+                       err = -EFAULT;
+               break;
+       case SIOCADDTUNNEL:
+       case SIOCCHGTUNNEL:
+               err = -EPERM;
+               if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+                       break;
+               err = -EFAULT;
+               if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+                       break;
+               err = -EINVAL;
+               if (p.proto != IPPROTO_IPV6  && p.proto != 0)
+                       break;
+               vti6_parm_from_user(&p1, &p);
+               t = vti6_locate(net, &p1, cmd == SIOCADDTUNNEL);
+               if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
+                       if (t != NULL) {
+                               if (t->dev != dev) {
+                                       err = -EEXIST;
+                                       break;
+                               }
+                       } else
+                               t = netdev_priv(dev);
+
+                       err = vti6_update(t, &p1);
+               }
+               if (t) {
+                       err = 0;
+                       vti6_parm_to_user(&p, &t->parms);
+                       if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+                               err = -EFAULT;
+
+               } else
+                       err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
+               break;
+       case SIOCDELTUNNEL:
+               err = -EPERM;
+               if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+                       break;
+
+               if (dev == ip6n->fb_tnl_dev) {
+                       err = -EFAULT;
+                       if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+                               break;
+                       err = -ENOENT;
+                       vti6_parm_from_user(&p1, &p);
+                       t = vti6_locate(net, &p1, 0);
+                       if (t == NULL)
+                               break;
+                       err = -EPERM;
+                       if (t->dev == ip6n->fb_tnl_dev)
+                               break;
+                       dev = t->dev;
+               }
+               err = 0;
+               unregister_netdevice(dev);
+               break;
+       default:
+               err = -EINVAL;
+       }
+       return err;
+}
+
+/**
+ * vti6_tnl_change_mtu - change mtu manually for tunnel device
+ *   @dev: virtual device associated with tunnel
+ *   @new_mtu: the new mtu
+ *
+ * Return:
+ *   0 on success,
+ *   %-EINVAL if mtu too small
+ **/
+static int vti6_change_mtu(struct net_device *dev, int new_mtu)
+{
+       if (new_mtu < IPV6_MIN_MTU)
+               return -EINVAL;
+
+       dev->mtu = new_mtu;
+       return 0;
+}
+
+static const struct net_device_ops vti6_netdev_ops = {
+       .ndo_uninit     = vti6_dev_uninit,
+       .ndo_start_xmit = vti6_tnl_xmit,
+       .ndo_do_ioctl   = vti6_ioctl,
+       .ndo_change_mtu = vti6_change_mtu,
+       .ndo_get_stats  = vti6_get_stats,
+};
+
+/**
+ * vti6_dev_setup - setup virtual tunnel device
+ *   @dev: virtual device associated with tunnel
+ *
+ * Description:
+ *   Initialize function pointers and device parameters
+ **/
+static void vti6_dev_setup(struct net_device *dev)
+{
+       struct ip6_tnl *t;
+
+       dev->netdev_ops = &vti6_netdev_ops;
+       dev->destructor = vti6_dev_free;
+
+       dev->type = ARPHRD_TUNNEL6;
+       dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
+       dev->mtu = ETH_DATA_LEN;
+       t = netdev_priv(dev);
+       dev->flags |= IFF_NOARP;
+       dev->addr_len = sizeof(struct in6_addr);
+       dev->features |= NETIF_F_NETNS_LOCAL;
+       dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+}
+
+/**
+ * vti6_dev_init_gen - general initializer for all tunnel devices
+ *   @dev: virtual device associated with tunnel
+ **/
+static inline int vti6_dev_init_gen(struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+
+       t->dev = dev;
+       t->net = dev_net(dev);
+       dev->tstats = alloc_percpu(struct pcpu_tstats);
+       if (!dev->tstats)
+               return -ENOMEM;
+       return 0;
+}
+
+/**
+ * vti6_dev_init - initializer for all non fallback tunnel devices
+ *   @dev: virtual device associated with tunnel
+ **/
+static int vti6_dev_init(struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       int err = vti6_dev_init_gen(dev);
+
+       if (err)
+               return err;
+       vti6_link_config(t);
+       return 0;
+}
+
+/**
+ * vti6_fb_tnl_dev_init - initializer for fallback tunnel device
+ *   @dev: fallback device
+ *
+ * Return: 0
+ **/
+static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev)
+{
+       struct ip6_tnl *t = netdev_priv(dev);
+       struct net *net = dev_net(dev);
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+       int err = vti6_dev_init_gen(dev);
+
+       if (err)
+               return err;
+
+       t->parms.proto = IPPROTO_IPV6;
+       dev_hold(dev);
+
+       vti6_link_config(t);
+
+       rcu_assign_pointer(ip6n->tnls_wc[0], t);
+       return 0;
+}
+
+static int vti6_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+       return 0;
+}
+
+static void vti6_netlink_parms(struct nlattr *data[],
+                              struct __ip6_tnl_parm *parms)
+{
+       memset(parms, 0, sizeof(*parms));
+
+       if (!data)
+               return;
+
+       if (data[IFLA_VTI_LINK])
+               parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
+
+       if (data[IFLA_VTI_LOCAL])
+               nla_memcpy(&parms->laddr, data[IFLA_VTI_LOCAL],
+                          sizeof(struct in6_addr));
+
+       if (data[IFLA_VTI_REMOTE])
+               nla_memcpy(&parms->raddr, data[IFLA_VTI_REMOTE],
+                          sizeof(struct in6_addr));
+
+       if (data[IFLA_VTI_IKEY])
+               parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]);
+
+       if (data[IFLA_VTI_OKEY])
+               parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]);
+}
+
+static int vti6_newlink(struct net *src_net, struct net_device *dev,
+                       struct nlattr *tb[], struct nlattr *data[])
+{
+       struct net *net = dev_net(dev);
+       struct ip6_tnl *nt;
+
+       nt = netdev_priv(dev);
+       vti6_netlink_parms(data, &nt->parms);
+
+       nt->parms.proto = IPPROTO_IPV6;
+
+       if (vti6_locate(net, &nt->parms, 0))
+               return -EEXIST;
+
+       return vti6_tnl_create2(dev);
+}
+
+static int vti6_changelink(struct net_device *dev, struct nlattr *tb[],
+                          struct nlattr *data[])
+{
+       struct ip6_tnl *t;
+       struct __ip6_tnl_parm p;
+       struct net *net = dev_net(dev);
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+       if (dev == ip6n->fb_tnl_dev)
+               return -EINVAL;
+
+       vti6_netlink_parms(data, &p);
+
+       t = vti6_locate(net, &p, 0);
+
+       if (t) {
+               if (t->dev != dev)
+                       return -EEXIST;
+       } else
+               t = netdev_priv(dev);
+
+       return vti6_update(t, &p);
+}
+
+static size_t vti6_get_size(const struct net_device *dev)
+{
+       return
+               /* IFLA_VTI_LINK */
+               nla_total_size(4) +
+               /* IFLA_VTI_LOCAL */
+               nla_total_size(sizeof(struct in6_addr)) +
+               /* IFLA_VTI_REMOTE */
+               nla_total_size(sizeof(struct in6_addr)) +
+               /* IFLA_VTI_IKEY */
+               nla_total_size(4) +
+               /* IFLA_VTI_OKEY */
+               nla_total_size(4) +
+               0;
+}
+
+static int vti6_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+       struct ip6_tnl *tunnel = netdev_priv(dev);
+       struct __ip6_tnl_parm *parm = &tunnel->parms;
+
+       if (nla_put_u32(skb, IFLA_VTI_LINK, parm->link) ||
+           nla_put(skb, IFLA_VTI_LOCAL, sizeof(struct in6_addr),
+                   &parm->laddr) ||
+           nla_put(skb, IFLA_VTI_REMOTE, sizeof(struct in6_addr),
+                   &parm->raddr) ||
+           nla_put_be32(skb, IFLA_VTI_IKEY, parm->i_key) ||
+           nla_put_be32(skb, IFLA_VTI_OKEY, parm->o_key))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static const struct nla_policy vti6_policy[IFLA_VTI_MAX + 1] = {
+       [IFLA_VTI_LINK]         = { .type = NLA_U32 },
+       [IFLA_VTI_LOCAL]        = { .len = sizeof(struct in6_addr) },
+       [IFLA_VTI_REMOTE]       = { .len = sizeof(struct in6_addr) },
+       [IFLA_VTI_IKEY]         = { .type = NLA_U32 },
+       [IFLA_VTI_OKEY]         = { .type = NLA_U32 },
+};
+
+static struct rtnl_link_ops vti6_link_ops __read_mostly = {
+       .kind           = "vti6",
+       .maxtype        = IFLA_VTI_MAX,
+       .policy         = vti6_policy,
+       .priv_size      = sizeof(struct ip6_tnl),
+       .setup          = vti6_dev_setup,
+       .validate       = vti6_validate,
+       .newlink        = vti6_newlink,
+       .changelink     = vti6_changelink,
+       .get_size       = vti6_get_size,
+       .fill_info      = vti6_fill_info,
+};
+
+static struct xfrm_tunnel_notifier vti6_handler __read_mostly = {
+       .handler        = vti6_rcv,
+       .priority       =       1,
+};
+
+static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n)
+{
+       int h;
+       struct ip6_tnl *t;
+       LIST_HEAD(list);
+
+       for (h = 0; h < HASH_SIZE; h++) {
+               t = rtnl_dereference(ip6n->tnls_r_l[h]);
+               while (t != NULL) {
+                       unregister_netdevice_queue(t->dev, &list);
+                       t = rtnl_dereference(t->next);
+               }
+       }
+
+       t = rtnl_dereference(ip6n->tnls_wc[0]);
+       unregister_netdevice_queue(t->dev, &list);
+       unregister_netdevice_many(&list);
+}
+
+static int __net_init vti6_init_net(struct net *net)
+{
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+       struct ip6_tnl *t = NULL;
+       int err;
+
+       ip6n->tnls[0] = ip6n->tnls_wc;
+       ip6n->tnls[1] = ip6n->tnls_r_l;
+
+       err = -ENOMEM;
+       ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6_vti0",
+                                       vti6_dev_setup);
+
+       if (!ip6n->fb_tnl_dev)
+               goto err_alloc_dev;
+       dev_net_set(ip6n->fb_tnl_dev, net);
+
+       err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
+       if (err < 0)
+               goto err_register;
+
+       err = register_netdev(ip6n->fb_tnl_dev);
+       if (err < 0)
+               goto err_register;
+
+       t = netdev_priv(ip6n->fb_tnl_dev);
+
+       strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
+       return 0;
+
+err_register:
+       vti6_dev_free(ip6n->fb_tnl_dev);
+err_alloc_dev:
+       return err;
+}
+
+static void __net_exit vti6_exit_net(struct net *net)
+{
+       struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+
+       rtnl_lock();
+       vti6_destroy_tunnels(ip6n);
+       rtnl_unlock();
+}
+
+static struct pernet_operations vti6_net_ops = {
+       .init = vti6_init_net,
+       .exit = vti6_exit_net,
+       .id   = &vti6_net_id,
+       .size = sizeof(struct vti6_net),
+};
+
+/**
+ * vti6_tunnel_init - register protocol and reserve needed resources
+ *
+ * Return: 0 on success
+ **/
+static int __init vti6_tunnel_init(void)
+{
+       int  err;
+
+       err = register_pernet_device(&vti6_net_ops);
+       if (err < 0)
+               goto out_pernet;
+
+       err = xfrm6_mode_tunnel_input_register(&vti6_handler);
+       if (err < 0) {
+               pr_err("%s: can't register vti6\n", __func__);
+               goto out;
+       }
+       err = rtnl_link_register(&vti6_link_ops);
+       if (err < 0)
+               goto rtnl_link_failed;
+
+       return 0;
+
+rtnl_link_failed:
+       xfrm6_mode_tunnel_input_deregister(&vti6_handler);
+out:
+       unregister_pernet_device(&vti6_net_ops);
+out_pernet:
+       return err;
+}
+
+/**
+ * vti6_tunnel_cleanup - free resources and unregister protocol
+ **/
+static void __exit vti6_tunnel_cleanup(void)
+{
+       rtnl_link_unregister(&vti6_link_ops);
+       if (xfrm6_mode_tunnel_input_deregister(&vti6_handler))
+               pr_info("%s: can't deregister vti6\n", __func__);
+
+       unregister_pernet_device(&vti6_net_ops);
+}
+
+module_init(vti6_tunnel_init);
+module_exit(vti6_tunnel_cleanup);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("vti6");
+MODULE_ALIAS_NETDEV("ip6_vti0");
+MODULE_AUTHOR("Steffen Klassert");
+MODULE_DESCRIPTION("IPv6 virtual tunnel interface");
index 5636a912074acb8ecf445d9d6df753ff8e3eba95..ce507d9e1c900d3990e6025b37087309f850eaca 100644 (file)
@@ -64,8 +64,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                (struct ip_comp_hdr *)(skb->data + offset);
        struct xfrm_state *x;
 
-       if (type != ICMPV6_DEST_UNREACH &&
-           type != ICMPV6_PKT_TOOBIG &&
+       if (type != ICMPV6_PKT_TOOBIG &&
            type != NDISC_REDIRECT)
                return;
 
index d1e2e8ef29c54abd86064728ec145f0478f360af..4919a8e6063ed81b608bfe497a3d7d96f7fb3ca8 100644 (file)
@@ -174,7 +174,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
                        }
 
                        if (ipv6_only_sock(sk) ||
-                           !ipv6_addr_v4mapped(&np->daddr)) {
+                           !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
                                retv = -EADDRNOTAVAIL;
                                break;
                        }
@@ -1011,7 +1011,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                                struct in6_pktinfo src_info;
                                src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
                                        np->sticky_pktinfo.ipi6_ifindex;
-                               src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
+                               src_info.ipi6_addr = np->mcast_oif ? sk->sk_v6_daddr : np->sticky_pktinfo.ipi6_addr;
                                put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
                        }
                        if (np->rxopt.bits.rxhlim) {
@@ -1026,7 +1026,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                                struct in6_pktinfo src_info;
                                src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif :
                                        np->sticky_pktinfo.ipi6_ifindex;
-                               src_info.ipi6_addr = np->mcast_oif ? np->daddr : np->sticky_pktinfo.ipi6_addr;
+                               src_info.ipi6_addr = np->mcast_oif ? sk->sk_v6_daddr :
+                                                                    np->sticky_pktinfo.ipi6_addr;
                                put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
                        }
                        if (np->rxopt.bits.rxohlim) {
index a7f842b29b67b2ca5ff2c2cbe13588e3098ce44b..7702f9e90a043ac458b63a2e0a07ed3975d37aa6 100644 (file)
@@ -25,6 +25,19 @@ config NF_CONNTRACK_IPV6
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config NF_TABLES_IPV6
+       depends on NF_TABLES
+       tristate "IPv6 nf_tables support"
+
+config NFT_CHAIN_ROUTE_IPV6
+       depends on NF_TABLES_IPV6
+       tristate "IPv6 nf_tables route chain support"
+
+config NFT_CHAIN_NAT_IPV6
+       depends on NF_TABLES_IPV6
+       depends on NF_NAT_IPV6 && NFT_NAT
+       tristate "IPv6 nf_tables nat chain support"
+
 config IP6_NF_IPTABLES
        tristate "IP6 tables support (required for filtering)"
        depends on INET && IPV6
index 2b53738f798cd3a9898d89a2a1a76b48ffe983b8..d1b4928f34f7ba3f80200ec0b9db67c12d47715b 100644 (file)
@@ -23,6 +23,11 @@ obj-$(CONFIG_NF_NAT_IPV6) += nf_nat_ipv6.o
 nf_defrag_ipv6-y := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
 obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
 
+# nf_tables
+obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o
+obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
+obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o
+
 # matches
 obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
 obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
index 44400c216dc6361e4607fe9af3e2999639766a11..710238f58aa93c0319cf90adb0c203c35f8bc7e4 100644 (file)
@@ -349,6 +349,11 @@ ip6t_do_table(struct sk_buff *skb,
        local_bh_disable();
        addend = xt_write_recseq_begin();
        private = table->private;
+       /*
+        * Ensure we load private-> members after we've fetched the base
+        * pointer.
+        */
+       smp_read_barrier_depends();
        cpu        = smp_processor_id();
        table_base = private->entries[cpu];
        jumpstack  = (struct ip6t_entry **)private->jumpstack[cpu];
index 2748b042da72eceb4002cf4183a781282c5e84d4..bf9f612c1bc24eb9eb0b26fbf7d0a62f74932735 100644 (file)
@@ -312,7 +312,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
        return XT_CONTINUE;
 }
 
-static unsigned int ipv6_synproxy_hook(unsigned int hooknum,
+static unsigned int ipv6_synproxy_hook(const struct nf_hook_ops *ops,
                                       struct sk_buff *skb,
                                       const struct net_device *in,
                                       const struct net_device *out,
index 29b44b14c5ea84bfa50f96d9e6f4f9914dc1e828..ca7f6c1280861b2977dce643fdea349eb3ec5078 100644 (file)
@@ -32,13 +32,14 @@ static const struct xt_table packet_filter = {
 
 /* The work comes in here from netfilter.c. */
 static unsigned int
-ip6table_filter_hook(unsigned int hook, struct sk_buff *skb,
+ip6table_filter_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                     const struct net_device *in, const struct net_device *out,
                     int (*okfn)(struct sk_buff *))
 {
        const struct net *net = dev_net((in != NULL) ? in : out);
 
-       return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_filter);
+       return ip6t_do_table(skb, ops->hooknum, in, out,
+                            net->ipv6.ip6table_filter);
 }
 
 static struct nf_hook_ops *filter_ops __read_mostly;
index c705907ae6ab4178f683c0432b45aff0630ad2f5..307bbb782d147011d689f04c92e0ba5ac7c13074 100644 (file)
@@ -76,17 +76,17 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
 
 /* The work comes in here from netfilter.c. */
 static unsigned int
-ip6table_mangle_hook(unsigned int hook, struct sk_buff *skb,
+ip6table_mangle_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                     const struct net_device *in, const struct net_device *out,
                     int (*okfn)(struct sk_buff *))
 {
-       if (hook == NF_INET_LOCAL_OUT)
+       if (ops->hooknum == NF_INET_LOCAL_OUT)
                return ip6t_mangle_out(skb, out);
-       if (hook == NF_INET_POST_ROUTING)
-               return ip6t_do_table(skb, hook, in, out,
+       if (ops->hooknum == NF_INET_POST_ROUTING)
+               return ip6t_do_table(skb, ops->hooknum, in, out,
                                     dev_net(out)->ipv6.ip6table_mangle);
        /* INPUT/FORWARD */
-       return ip6t_do_table(skb, hook, in, out,
+       return ip6t_do_table(skb, ops->hooknum, in, out,
                             dev_net(in)->ipv6.ip6table_mangle);
 }
 
index 9b076d2d3a7b6ec6a495302efd97324e9bc95b14..84c7f33d0cf858115abdb5f359a658b574637d57 100644 (file)
@@ -63,7 +63,7 @@ static unsigned int nf_nat_rule_find(struct sk_buff *skb, unsigned int hooknum,
 }
 
 static unsigned int
-nf_nat_ipv6_fn(unsigned int hooknum,
+nf_nat_ipv6_fn(const struct nf_hook_ops *ops,
               struct sk_buff *skb,
               const struct net_device *in,
               const struct net_device *out,
@@ -72,7 +72,7 @@ nf_nat_ipv6_fn(unsigned int hooknum,
        struct nf_conn *ct;
        enum ip_conntrack_info ctinfo;
        struct nf_conn_nat *nat;
-       enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
+       enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
        __be16 frag_off;
        int hdrlen;
        u8 nexthdr;
@@ -111,7 +111,8 @@ nf_nat_ipv6_fn(unsigned int hooknum,
 
                if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
                        if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
-                                                            hooknum, hdrlen))
+                                                            ops->hooknum,
+                                                            hdrlen))
                                return NF_DROP;
                        else
                                return NF_ACCEPT;
@@ -124,14 +125,14 @@ nf_nat_ipv6_fn(unsigned int hooknum,
                if (!nf_nat_initialized(ct, maniptype)) {
                        unsigned int ret;
 
-                       ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
+                       ret = nf_nat_rule_find(skb, ops->hooknum, in, out, ct);
                        if (ret != NF_ACCEPT)
                                return ret;
                } else {
                        pr_debug("Already setup manip %s for ct %p\n",
                                 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
                                 ct);
-                       if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
+                       if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
                                goto oif_changed;
                }
                break;
@@ -140,11 +141,11 @@ nf_nat_ipv6_fn(unsigned int hooknum,
                /* ESTABLISHED */
                NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
                             ctinfo == IP_CT_ESTABLISHED_REPLY);
-               if (nf_nat_oif_changed(hooknum, ctinfo, nat, out))
+               if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
                        goto oif_changed;
        }
 
-       return nf_nat_packet(ct, ctinfo, hooknum, skb);
+       return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
 
 oif_changed:
        nf_ct_kill_acct(ct, ctinfo, skb);
@@ -152,7 +153,7 @@ oif_changed:
 }
 
 static unsigned int
-nf_nat_ipv6_in(unsigned int hooknum,
+nf_nat_ipv6_in(const struct nf_hook_ops *ops,
               struct sk_buff *skb,
               const struct net_device *in,
               const struct net_device *out,
@@ -161,7 +162,7 @@ nf_nat_ipv6_in(unsigned int hooknum,
        unsigned int ret;
        struct in6_addr daddr = ipv6_hdr(skb)->daddr;
 
-       ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
+       ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
        if (ret != NF_DROP && ret != NF_STOLEN &&
            ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
                skb_dst_drop(skb);
@@ -170,7 +171,7 @@ nf_nat_ipv6_in(unsigned int hooknum,
 }
 
 static unsigned int
-nf_nat_ipv6_out(unsigned int hooknum,
+nf_nat_ipv6_out(const struct nf_hook_ops *ops,
                struct sk_buff *skb,
                const struct net_device *in,
                const struct net_device *out,
@@ -187,7 +188,7 @@ nf_nat_ipv6_out(unsigned int hooknum,
        if (skb->len < sizeof(struct ipv6hdr))
                return NF_ACCEPT;
 
-       ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
+       ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
 #ifdef CONFIG_XFRM
        if (ret != NF_DROP && ret != NF_STOLEN &&
            !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
@@ -209,7 +210,7 @@ nf_nat_ipv6_out(unsigned int hooknum,
 }
 
 static unsigned int
-nf_nat_ipv6_local_fn(unsigned int hooknum,
+nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
                     struct sk_buff *skb,
                     const struct net_device *in,
                     const struct net_device *out,
@@ -224,7 +225,7 @@ nf_nat_ipv6_local_fn(unsigned int hooknum,
        if (skb->len < sizeof(struct ipv6hdr))
                return NF_ACCEPT;
 
-       ret = nf_nat_ipv6_fn(hooknum, skb, in, out, okfn);
+       ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
        if (ret != NF_DROP && ret != NF_STOLEN &&
            (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
                enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
index 9a626d86720fc6a0d1b4eeffa79a85b47a0dec2f..5274740acecc93b4550dabdd7f48fac3c04f67ac 100644 (file)
@@ -19,13 +19,14 @@ static const struct xt_table packet_raw = {
 
 /* The work comes in here from netfilter.c. */
 static unsigned int
-ip6table_raw_hook(unsigned int hook, struct sk_buff *skb,
+ip6table_raw_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                  const struct net_device *in, const struct net_device *out,
                  int (*okfn)(struct sk_buff *))
 {
        const struct net *net = dev_net((in != NULL) ? in : out);
 
-       return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_raw);
+       return ip6t_do_table(skb, ops->hooknum, in, out,
+                            net->ipv6.ip6table_raw);
 }
 
 static struct nf_hook_ops *rawtable_ops __read_mostly;
index ce88d1d7e5255d69ee102229e463f12701bdac99..ab3b0219ecfa436c07eb5cb86af36bd04efbdfb7 100644 (file)
@@ -36,14 +36,15 @@ static const struct xt_table security_table = {
 };
 
 static unsigned int
-ip6table_security_hook(unsigned int hook, struct sk_buff *skb,
+ip6table_security_hook(const struct nf_hook_ops *ops, struct sk_buff *skb,
                       const struct net_device *in,
                       const struct net_device *out,
                       int (*okfn)(struct sk_buff *))
 {
        const struct net *net = dev_net((in != NULL) ? in : out);
 
-       return ip6t_do_table(skb, hook, in, out, net->ipv6.ip6table_security);
+       return ip6t_do_table(skb, ops->hooknum, in, out,
+                            net->ipv6.ip6table_security);
 }
 
 static struct nf_hook_ops *sectbl_ops __read_mostly;
index d6e4dd8b58dfaf67a9f64fe8bfafcf2f9f6b24ea..486545eb42ce5c1a64ad55385e1bca31f2557687 100644 (file)
@@ -95,7 +95,7 @@ static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
        return NF_ACCEPT;
 }
 
-static unsigned int ipv6_helper(unsigned int hooknum,
+static unsigned int ipv6_helper(const struct nf_hook_ops *ops,
                                struct sk_buff *skb,
                                const struct net_device *in,
                                const struct net_device *out,
@@ -133,7 +133,7 @@ static unsigned int ipv6_helper(unsigned int hooknum,
        return helper->help(skb, protoff, ct, ctinfo);
 }
 
-static unsigned int ipv6_confirm(unsigned int hooknum,
+static unsigned int ipv6_confirm(const struct nf_hook_ops *ops,
                                 struct sk_buff *skb,
                                 const struct net_device *in,
                                 const struct net_device *out,
@@ -219,16 +219,17 @@ static unsigned int __ipv6_conntrack_in(struct net *net,
        return nf_conntrack_in(net, PF_INET6, hooknum, skb);
 }
 
-static unsigned int ipv6_conntrack_in(unsigned int hooknum,
+static unsigned int ipv6_conntrack_in(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
                                      const struct net_device *in,
                                      const struct net_device *out,
                                      int (*okfn)(struct sk_buff *))
 {
-       return __ipv6_conntrack_in(dev_net(in), hooknum, skb, in, out, okfn);
+       return __ipv6_conntrack_in(dev_net(in), ops->hooknum, skb, in, out,
+                                  okfn);
 }
 
-static unsigned int ipv6_conntrack_local(unsigned int hooknum,
+static unsigned int ipv6_conntrack_local(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
                                         const struct net_device *in,
                                         const struct net_device *out,
@@ -239,7 +240,8 @@ static unsigned int ipv6_conntrack_local(unsigned int hooknum,
                net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
                return NF_ACCEPT;
        }
-       return __ipv6_conntrack_in(dev_net(out), hooknum, skb, in, out, okfn);
+       return __ipv6_conntrack_in(dev_net(out), ops->hooknum, skb, in, out,
+                                  okfn);
 }
 
 static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
@@ -297,9 +299,9 @@ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
        struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
        struct nf_conn *ct;
 
-       tuple.src.u3.in6 = inet6->rcv_saddr;
+       tuple.src.u3.in6 = sk->sk_v6_rcv_saddr;
        tuple.src.u.tcp.port = inet->inet_sport;
-       tuple.dst.u3.in6 = inet6->daddr;
+       tuple.dst.u3.in6 = sk->sk_v6_daddr;
        tuple.dst.u.tcp.port = inet->inet_dport;
        tuple.dst.protonum = sk->sk_protocol;
 
index dffdc1a389c57da1d16ebeda9b9aff44d6cd961a..4a258263d8ecbca612a008a390962fe0b9b68833 100644 (file)
@@ -144,12 +144,24 @@ static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
        return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
 }
 
+static unsigned int nf_hash_frag(__be32 id, const struct in6_addr *saddr,
+                                const struct in6_addr *daddr)
+{
+       u32 c;
+
+       net_get_random_once(&nf_frags.rnd, sizeof(nf_frags.rnd));
+       c = jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
+                        (__force u32)id, nf_frags.rnd);
+       return c & (INETFRAGS_HASHSZ - 1);
+}
+
+
 static unsigned int nf_hashfn(struct inet_frag_queue *q)
 {
        const struct frag_queue *nq;
 
        nq = container_of(q, struct frag_queue, q);
-       return inet6_hash_frag(nq->id, &nq->saddr, &nq->daddr, nf_frags.rnd);
+       return nf_hash_frag(nq->id, &nq->saddr, &nq->daddr);
 }
 
 static void nf_skb_free(struct sk_buff *skb)
@@ -185,7 +197,7 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
        arg.ecn = ecn;
 
        read_lock_bh(&nf_frags.lock);
-       hash = inet6_hash_frag(id, src, dst, nf_frags.rnd);
+       hash = nf_hash_frag(id, src, dst);
 
        q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
        local_bh_enable();
index aacd121fe8c54365607f49e40679d7fd08dcd8e8..ec483aa3f60f82d7e66b0b121296774d0e56565f 100644 (file)
@@ -52,7 +52,7 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
 
 }
 
-static unsigned int ipv6_defrag(unsigned int hooknum,
+static unsigned int ipv6_defrag(const struct nf_hook_ops *ops,
                                struct sk_buff *skb,
                                const struct net_device *in,
                                const struct net_device *out,
@@ -66,7 +66,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
                return NF_ACCEPT;
 #endif
 
-       reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(hooknum, skb));
+       reasm = nf_ct_frag6_gather(skb, nf_ct6_defrag_user(ops->hooknum, skb));
        /* queued */
        if (reasm == NULL)
                return NF_STOLEN;
@@ -75,7 +75,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
        if (reasm == skb)
                return NF_ACCEPT;
 
-       nf_ct_frag6_output(hooknum, reasm, (struct net_device *)in,
+       nf_ct_frag6_output(ops->hooknum, reasm, (struct net_device *)in,
                           (struct net_device *)out, okfn);
 
        return NF_STOLEN;
diff --git a/net/ipv6/netfilter/nf_tables_ipv6.c b/net/ipv6/netfilter/nf_tables_ipv6.c
new file mode 100644 (file)
index 0000000..d77db8a
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012-2013 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ipv6.h>
+#include <linux/netfilter_ipv6.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+
+static unsigned int nft_ipv6_output(const struct nf_hook_ops *ops,
+                                   struct sk_buff *skb,
+                                   const struct net_device *in,
+                                   const struct net_device *out,
+                                   int (*okfn)(struct sk_buff *))
+{
+       struct nft_pktinfo pkt;
+
+       if (unlikely(skb->len < sizeof(struct ipv6hdr))) {
+               if (net_ratelimit())
+                       pr_info("nf_tables_ipv6: ignoring short SOCK_RAW "
+                               "packet\n");
+               return NF_ACCEPT;
+       }
+       if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
+               return NF_DROP;
+
+       return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nft_af_info nft_af_ipv6 __read_mostly = {
+       .family         = NFPROTO_IPV6,
+       .nhooks         = NF_INET_NUMHOOKS,
+       .owner          = THIS_MODULE,
+       .hooks          = {
+               [NF_INET_LOCAL_OUT]     = nft_ipv6_output,
+       },
+};
+
+static int nf_tables_ipv6_init_net(struct net *net)
+{
+       net->nft.ipv6 = kmalloc(sizeof(struct nft_af_info), GFP_KERNEL);
+       if (net->nft.ipv6 == NULL)
+               return -ENOMEM;
+
+       memcpy(net->nft.ipv6, &nft_af_ipv6, sizeof(nft_af_ipv6));
+
+       if (nft_register_afinfo(net, net->nft.ipv6) < 0)
+               goto err;
+
+       return 0;
+err:
+       kfree(net->nft.ipv6);
+       return -ENOMEM;
+}
+
+static void nf_tables_ipv6_exit_net(struct net *net)
+{
+       nft_unregister_afinfo(net->nft.ipv6);
+       kfree(net->nft.ipv6);
+}
+
+static struct pernet_operations nf_tables_ipv6_net_ops = {
+       .init   = nf_tables_ipv6_init_net,
+       .exit   = nf_tables_ipv6_exit_net,
+};
+
+static unsigned int
+nft_do_chain_ipv6(const struct nf_hook_ops *ops,
+                 struct sk_buff *skb,
+                 const struct net_device *in,
+                 const struct net_device *out,
+                 int (*okfn)(struct sk_buff *))
+{
+       struct nft_pktinfo pkt;
+
+       /* malformed packet, drop it */
+       if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
+               return NF_DROP;
+
+       return nft_do_chain_pktinfo(&pkt, ops);
+}
+
+static struct nf_chain_type filter_ipv6 = {
+       .family         = NFPROTO_IPV6,
+       .name           = "filter",
+       .type           = NFT_CHAIN_T_DEFAULT,
+       .hook_mask      = (1 << NF_INET_LOCAL_IN) |
+                         (1 << NF_INET_LOCAL_OUT) |
+                         (1 << NF_INET_FORWARD) |
+                         (1 << NF_INET_PRE_ROUTING) |
+                         (1 << NF_INET_POST_ROUTING),
+       .fn             = {
+               [NF_INET_LOCAL_IN]      = nft_do_chain_ipv6,
+               [NF_INET_LOCAL_OUT]     = nft_ipv6_output,
+               [NF_INET_FORWARD]       = nft_do_chain_ipv6,
+               [NF_INET_PRE_ROUTING]   = nft_do_chain_ipv6,
+               [NF_INET_POST_ROUTING]  = nft_do_chain_ipv6,
+       },
+};
+
+static int __init nf_tables_ipv6_init(void)
+{
+       nft_register_chain_type(&filter_ipv6);
+       return register_pernet_subsys(&nf_tables_ipv6_net_ops);
+}
+
+static void __exit nf_tables_ipv6_exit(void)
+{
+       unregister_pernet_subsys(&nf_tables_ipv6_net_ops);
+       nft_unregister_chain_type(&filter_ipv6);
+}
+
+module_init(nf_tables_ipv6_init);
+module_exit(nf_tables_ipv6_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_FAMILY(AF_INET6);
diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
new file mode 100644 (file)
index 0000000..e86dcd7
--- /dev/null
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/ipv6.h>
+
+/*
+ * IPv6 NAT chains
+ */
+
+static unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops,
+                             struct sk_buff *skb,
+                             const struct net_device *in,
+                             const struct net_device *out,
+                             int (*okfn)(struct sk_buff *))
+{
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+       struct nf_conn_nat *nat;
+       enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
+       __be16 frag_off;
+       int hdrlen;
+       u8 nexthdr;
+       struct nft_pktinfo pkt;
+       unsigned int ret;
+
+       if (ct == NULL || nf_ct_is_untracked(ct))
+               return NF_ACCEPT;
+
+       nat = nfct_nat(ct);
+       if (nat == NULL) {
+               /* Conntrack module was loaded late, can't add extension. */
+               if (nf_ct_is_confirmed(ct))
+                       return NF_ACCEPT;
+               nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
+               if (nat == NULL)
+                       return NF_ACCEPT;
+       }
+
+       switch (ctinfo) {
+       case IP_CT_RELATED:
+       case IP_CT_RELATED + IP_CT_IS_REPLY:
+               nexthdr = ipv6_hdr(skb)->nexthdr;
+               hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
+                                         &nexthdr, &frag_off);
+
+               if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
+                       if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
+                                                          ops->hooknum,
+                                                          hdrlen))
+                               return NF_DROP;
+                       else
+                               return NF_ACCEPT;
+               }
+               /* Fall through */
+       case IP_CT_NEW:
+               if (nf_nat_initialized(ct, maniptype))
+                       break;
+
+               nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out);
+
+               ret = nft_do_chain_pktinfo(&pkt, ops);
+               if (ret != NF_ACCEPT)
+                       return ret;
+               if (!nf_nat_initialized(ct, maniptype)) {
+                       ret = nf_nat_alloc_null_binding(ct, ops->hooknum);
+                       if (ret != NF_ACCEPT)
+                               return ret;
+               }
+       default:
+               break;
+       }
+
+       return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
+}
+
+static unsigned int nf_nat_ipv6_prerouting(const struct nf_hook_ops *ops,
+                                     struct sk_buff *skb,
+                                     const struct net_device *in,
+                                     const struct net_device *out,
+                                     int (*okfn)(struct sk_buff *))
+{
+       struct in6_addr daddr = ipv6_hdr(skb)->daddr;
+       unsigned int ret;
+
+       ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
+               skb_dst_drop(skb);
+
+       return ret;
+}
+
+static unsigned int nf_nat_ipv6_postrouting(const struct nf_hook_ops *ops,
+                                      struct sk_buff *skb,
+                                      const struct net_device *in,
+                                      const struct net_device *out,
+                                      int (*okfn)(struct sk_buff *))
+{
+       enum ip_conntrack_info ctinfo __maybe_unused;
+       const struct nf_conn *ct __maybe_unused;
+       unsigned int ret;
+
+       ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
+#ifdef CONFIG_XFRM
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
+           (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+               enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+               if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
+                                     &ct->tuplehash[!dir].tuple.dst.u3) ||
+                   (ct->tuplehash[dir].tuple.src.u.all !=
+                    ct->tuplehash[!dir].tuple.dst.u.all))
+                       if (nf_xfrm_me_harder(skb, AF_INET6) < 0)
+                               ret = NF_DROP;
+       }
+#endif
+       return ret;
+}
+
+static unsigned int nf_nat_ipv6_output(const struct nf_hook_ops *ops,
+                                 struct sk_buff *skb,
+                                 const struct net_device *in,
+                                 const struct net_device *out,
+                                 int (*okfn)(struct sk_buff *))
+{
+       enum ip_conntrack_info ctinfo;
+       const struct nf_conn *ct;
+       unsigned int ret;
+
+       ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
+       if (ret != NF_DROP && ret != NF_STOLEN &&
+           (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+               enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+               if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
+                                     &ct->tuplehash[!dir].tuple.src.u3)) {
+                       if (ip6_route_me_harder(skb))
+                               ret = NF_DROP;
+               }
+#ifdef CONFIG_XFRM
+               else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
+                        ct->tuplehash[dir].tuple.dst.u.all !=
+                        ct->tuplehash[!dir].tuple.src.u.all)
+                       if (nf_xfrm_me_harder(skb, AF_INET6))
+                               ret = NF_DROP;
+#endif
+       }
+       return ret;
+}
+
+static struct nf_chain_type nft_chain_nat_ipv6 = {
+       .family         = NFPROTO_IPV6,
+       .name           = "nat",
+       .type           = NFT_CHAIN_T_NAT,
+       .hook_mask      = (1 << NF_INET_PRE_ROUTING) |
+                         (1 << NF_INET_POST_ROUTING) |
+                         (1 << NF_INET_LOCAL_OUT) |
+                         (1 << NF_INET_LOCAL_IN),
+       .fn             = {
+               [NF_INET_PRE_ROUTING]   = nf_nat_ipv6_prerouting,
+               [NF_INET_POST_ROUTING]  = nf_nat_ipv6_postrouting,
+               [NF_INET_LOCAL_OUT]     = nf_nat_ipv6_output,
+               [NF_INET_LOCAL_IN]      = nf_nat_ipv6_fn,
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init nft_chain_nat_ipv6_init(void)
+{
+       int err;
+
+       err = nft_register_chain_type(&nft_chain_nat_ipv6);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static void __exit nft_chain_nat_ipv6_exit(void)
+{
+       nft_unregister_chain_type(&nft_chain_nat_ipv6);
+}
+
+module_init(nft_chain_nat_ipv6_init);
+module_exit(nft_chain_nat_ipv6_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
+MODULE_ALIAS_NFT_CHAIN(AF_INET6, "nat");
diff --git a/net/ipv6/netfilter/nft_chain_route_ipv6.c b/net/ipv6/netfilter/nft_chain_route_ipv6.c
new file mode 100644 (file)
index 0000000..3fe40f0
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_ipv6.h>
+#include <net/route.h>
+
+static unsigned int nf_route_table_hook(const struct nf_hook_ops *ops,
+                                       struct sk_buff *skb,
+                                       const struct net_device *in,
+                                       const struct net_device *out,
+                                       int (*okfn)(struct sk_buff *))
+{
+       unsigned int ret;
+       struct nft_pktinfo pkt;
+       struct in6_addr saddr, daddr;
+       u_int8_t hop_limit;
+       u32 mark, flowlabel;
+
+       /* malformed packet, drop it */
+       if (nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out) < 0)
+               return NF_DROP;
+
+       /* save source/dest address, mark, hoplimit, flowlabel, priority */
+       memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
+       memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr));
+       mark = skb->mark;
+       hop_limit = ipv6_hdr(skb)->hop_limit;
+
+       /* flowlabel and prio (includes version, which shouldn't change either */
+       flowlabel = *((u32 *)ipv6_hdr(skb));
+
+       ret = nft_do_chain_pktinfo(&pkt, ops);
+       if (ret != NF_DROP && ret != NF_QUEUE &&
+           (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
+            memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
+            skb->mark != mark ||
+            ipv6_hdr(skb)->hop_limit != hop_limit ||
+            flowlabel != *((u_int32_t *)ipv6_hdr(skb))))
+               return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP;
+
+       return ret;
+}
+
+static struct nf_chain_type nft_chain_route_ipv6 = {
+       .family         = NFPROTO_IPV6,
+       .name           = "route",
+       .type           = NFT_CHAIN_T_ROUTE,
+       .hook_mask      = (1 << NF_INET_LOCAL_OUT),
+       .fn             = {
+                [NF_INET_LOCAL_OUT]    = nf_route_table_hook,
+        },
+        .me            = THIS_MODULE,
+};
+
+static int __init nft_chain_route_init(void)
+{
+       return nft_register_chain_type(&nft_chain_route_ipv6);
+}
+
+static void __exit nft_chain_route_exit(void)
+{
+       nft_unregister_chain_type(&nft_chain_route_ipv6);
+}
+
+module_init(nft_chain_route_init);
+module_exit(nft_chain_route_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_CHAIN(AF_INET6, "route");
index 18f19df4189f19734564b5fcfbd46ec22b67a1e8..8815e31a87fed4ba51ebd78a6724df8a3a436b85 100644 (file)
@@ -116,7 +116,7 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        } else {
                if (sk->sk_state != TCP_ESTABLISHED)
                        return -EDESTADDRREQ;
-               daddr = &np->daddr;
+               daddr = &sk->sk_v6_daddr;
        }
 
        if (!iif)
index a4ed2416399ed52622b9c60460d411b9507e6d85..3c00842b0079240f1788c83bfab346d5879336d8 100644 (file)
@@ -77,20 +77,19 @@ static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
 
        sk_for_each_from(sk)
                if (inet_sk(sk)->inet_num == num) {
-                       struct ipv6_pinfo *np = inet6_sk(sk);
 
                        if (!net_eq(sock_net(sk), net))
                                continue;
 
-                       if (!ipv6_addr_any(&np->daddr) &&
-                           !ipv6_addr_equal(&np->daddr, rmt_addr))
+                       if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
+                           !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
                                continue;
 
                        if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
                                continue;
 
-                       if (!ipv6_addr_any(&np->rcv_saddr)) {
-                               if (ipv6_addr_equal(&np->rcv_saddr, loc_addr))
+                       if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+                               if (ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
                                        goto found;
                                if (is_multicast &&
                                    inet6_mc_check(sk, loc_addr, rmt_addr))
@@ -302,7 +301,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        }
 
        inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
-       np->rcv_saddr = addr->sin6_addr;
+       sk->sk_v6_rcv_saddr = addr->sin6_addr;
        if (!(addr_type & IPV6_ADDR_MULTICAST))
                np->saddr = addr->sin6_addr;
        err = 0;
@@ -804,8 +803,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
                 * sk->sk_dst_cache.
                 */
                if (sk->sk_state == TCP_ESTABLISHED &&
-                   ipv6_addr_equal(daddr, &np->daddr))
-                       daddr = &np->daddr;
+                   ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
+                       daddr = &sk->sk_v6_daddr;
 
                if (addr_len >= sizeof(struct sockaddr_in6) &&
                    sin6->sin6_scope_id &&
@@ -816,7 +815,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
                        return -EDESTADDRREQ;
 
                proto = inet->inet_num;
-               daddr = &np->daddr;
+               daddr = &sk->sk_v6_daddr;
                fl6.flowlabel = np->flow_label;
        }
 
index 1aeb473b2cc695d8d2b0a3696972ec9228455d14..cc85a9ba50101c5abf86cb3e6d493bfb9b680c74 100644 (file)
@@ -82,24 +82,24 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
  * callers should be careful not to use the hash value outside the ipfrag_lock
  * as doing so could race with ipfrag_hash_rnd being recalculated.
  */
-unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
-                            const struct in6_addr *daddr, u32 rnd)
+static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
+                                   const struct in6_addr *daddr)
 {
        u32 c;
 
+       net_get_random_once(&ip6_frags.rnd, sizeof(ip6_frags.rnd));
        c = jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
-                        (__force u32)id, rnd);
+                        (__force u32)id, ip6_frags.rnd);
 
        return c & (INETFRAGS_HASHSZ - 1);
 }
-EXPORT_SYMBOL_GPL(inet6_hash_frag);
 
 static unsigned int ip6_hashfn(struct inet_frag_queue *q)
 {
        struct frag_queue *fq;
 
        fq = container_of(q, struct frag_queue, q);
-       return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd);
+       return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr);
 }
 
 bool ip6_frag_match(struct inet_frag_queue *q, void *a)
@@ -193,7 +193,7 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src,
        arg.ecn = ecn;
 
        read_lock(&ip6_frags.lock);
-       hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
+       hash = inet6_hash_frag(id, src, dst);
 
        q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
        if (IS_ERR_OR_NULL(q)) {
index c979dd96d82a838534ae0105dbfb9e665ad410e6..5dc6ca6b66863a981b807df51341b35bfd93d2b6 100644 (file)
@@ -476,6 +476,24 @@ out:
 }
 
 #ifdef CONFIG_IPV6_ROUTER_PREF
+struct __rt6_probe_work {
+       struct work_struct work;
+       struct in6_addr target;
+       struct net_device *dev;
+};
+
+static void rt6_probe_deferred(struct work_struct *w)
+{
+       struct in6_addr mcaddr;
+       struct __rt6_probe_work *work =
+               container_of(w, struct __rt6_probe_work, work);
+
+       addrconf_addr_solict_mult(&work->target, &mcaddr);
+       ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
+       dev_put(work->dev);
+       kfree(w);
+}
+
 static void rt6_probe(struct rt6_info *rt)
 {
        struct neighbour *neigh;
@@ -499,17 +517,23 @@ static void rt6_probe(struct rt6_info *rt)
 
        if (!neigh ||
            time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
-               struct in6_addr mcaddr;
-               struct in6_addr *target;
+               struct __rt6_probe_work *work;
 
-               if (neigh) {
+               work = kmalloc(sizeof(*work), GFP_ATOMIC);
+
+               if (neigh && work)
                        neigh->updated = jiffies;
+
+               if (neigh)
                        write_unlock(&neigh->lock);
-               }
 
-               target = (struct in6_addr *)&rt->rt6i_gateway;
-               addrconf_addr_solict_mult(target, &mcaddr);
-               ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL);
+               if (work) {
+                       INIT_WORK(&work->work, rt6_probe_deferred);
+                       work->target = rt->rt6i_gateway;
+                       dev_hold(rt->dst.dev);
+                       work->dev = rt->dst.dev;
+                       schedule_work(&work->work);
+               }
        } else {
 out:
                write_unlock(&neigh->lock);
@@ -851,7 +875,6 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
                        if (ort->rt6i_dst.plen != 128 &&
                            ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
                                rt->rt6i_flags |= RTF_ANYCAST;
-                       rt->rt6i_gateway = *daddr;
                }
 
                rt->rt6i_flags |= RTF_CACHE;
@@ -1137,7 +1160,6 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_oif = oif;
        fl6.flowi6_mark = mark;
-       fl6.flowi6_flags = 0;
        fl6.daddr = iph->daddr;
        fl6.saddr = iph->saddr;
        fl6.flowlabel = ip6_flowinfo(iph);
@@ -1236,7 +1258,6 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_oif = oif;
        fl6.flowi6_mark = mark;
-       fl6.flowi6_flags = 0;
        fl6.daddr = iph->daddr;
        fl6.saddr = iph->saddr;
        fl6.flowlabel = ip6_flowinfo(iph);
@@ -1258,7 +1279,6 @@ void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
        memset(&fl6, 0, sizeof(fl6));
        fl6.flowi6_oif = oif;
        fl6.flowi6_mark = mark;
-       fl6.flowi6_flags = 0;
        fl6.daddr = msg->dest;
        fl6.saddr = iph->daddr;
 
@@ -1338,6 +1358,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
        rt->dst.flags |= DST_HOST;
        rt->dst.output  = ip6_output;
        atomic_set(&rt->dst.__refcnt, 1);
+       rt->rt6i_gateway  = fl6->daddr;
        rt->rt6i_dst.addr = fl6->daddr;
        rt->rt6i_dst.plen = 128;
        rt->rt6i_idev     = idev;
@@ -1873,7 +1894,10 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
                        in6_dev_hold(rt->rt6i_idev);
                rt->dst.lastuse = jiffies;
 
-               rt->rt6i_gateway = ort->rt6i_gateway;
+               if (ort->rt6i_flags & RTF_GATEWAY)
+                       rt->rt6i_gateway = ort->rt6i_gateway;
+               else
+                       rt->rt6i_gateway = *dest;
                rt->rt6i_flags = ort->rt6i_flags;
                if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
                    (RTF_DEFAULT | RTF_ADDRCONF))
@@ -2160,6 +2184,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
        else
                rt->rt6i_flags |= RTF_LOCAL;
 
+       rt->rt6i_gateway  = *addr;
        rt->rt6i_dst.addr = *addr;
        rt->rt6i_dst.plen = 128;
        rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
@@ -2800,56 +2825,12 @@ static int ip6_route_dev_notify(struct notifier_block *this,
 
 #ifdef CONFIG_PROC_FS
 
-struct rt6_proc_arg
-{
-       char *buffer;
-       int offset;
-       int length;
-       int skip;
-       int len;
-};
-
-static int rt6_info_route(struct rt6_info *rt, void *p_arg)
-{
-       struct seq_file *m = p_arg;
-
-       seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
-
-#ifdef CONFIG_IPV6_SUBTREES
-       seq_printf(m, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
-#else
-       seq_puts(m, "00000000000000000000000000000000 00 ");
-#endif
-       if (rt->rt6i_flags & RTF_GATEWAY) {
-               seq_printf(m, "%pi6", &rt->rt6i_gateway);
-       } else {
-               seq_puts(m, "00000000000000000000000000000000");
-       }
-       seq_printf(m, " %08x %08x %08x %08x %8s\n",
-                  rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
-                  rt->dst.__use, rt->rt6i_flags,
-                  rt->dst.dev ? rt->dst.dev->name : "");
-       return 0;
-}
-
-static int ipv6_route_show(struct seq_file *m, void *v)
-{
-       struct net *net = (struct net *)m->private;
-       fib6_clean_all_ro(net, rt6_info_route, 0, m);
-       return 0;
-}
-
-static int ipv6_route_open(struct inode *inode, struct file *file)
-{
-       return single_open_net(inode, file, ipv6_route_show);
-}
-
 static const struct file_operations ipv6_route_proc_fops = {
        .owner          = THIS_MODULE,
        .open           = ipv6_route_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
-       .release        = single_release_net,
+       .release        = seq_release_net,
 };
 
 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
index 19269453a8eaca1d34eefa648e401ccea7d829ac..3a9038dd818d7588c950c4bca2ff56279683314c 100644 (file)
@@ -933,10 +933,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
                ttl = iph6->hop_limit;
        tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
 
-       if (likely(!skb->encapsulation)) {
-               skb_reset_inner_headers(skb);
-               skb->encapsulation = 1;
-       }
+       skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT);
+       if (IS_ERR(skb))
+               goto out;
 
        err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos,
                            ttl, df, !net_eq(tunnel->net, dev_net(dev)));
@@ -946,8 +945,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
 tx_error_icmp:
        dst_link_failure(skb);
 tx_error:
-       dev->stats.tx_errors++;
        dev_kfree_skb(skb);
+out:
+       dev->stats.tx_errors++;
        return NETDEV_TX_OK;
 }
 
@@ -956,13 +956,15 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
        struct ip_tunnel *tunnel = netdev_priv(dev);
        const struct iphdr  *tiph = &tunnel->parms.iph;
 
-       if (likely(!skb->encapsulation)) {
-               skb_reset_inner_headers(skb);
-               skb->encapsulation = 1;
-       }
+       skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP);
+       if (IS_ERR(skb))
+               goto out;
 
        ip_tunnel_xmit(skb, dev, tiph, IPPROTO_IPIP);
        return NETDEV_TX_OK;
+out:
+       dev->stats.tx_errors++;
+       return NETDEV_TX_OK;
 }
 
 static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
@@ -1292,6 +1294,12 @@ static void ipip6_dev_free(struct net_device *dev)
        free_netdev(dev);
 }
 
+#define SIT_FEATURES (NETIF_F_SG          | \
+                     NETIF_F_FRAGLIST     | \
+                     NETIF_F_HIGHDMA      | \
+                     NETIF_F_GSO_SOFTWARE | \
+                     NETIF_F_HW_CSUM)
+
 static void ipip6_tunnel_setup(struct net_device *dev)
 {
        dev->netdev_ops         = &ipip6_netdev_ops;
@@ -1305,6 +1313,8 @@ static void ipip6_tunnel_setup(struct net_device *dev)
        dev->iflink             = 0;
        dev->addr_len           = 4;
        dev->features           |= NETIF_F_LLTX;
+       dev->features           |= SIT_FEATURES;
+       dev->hw_features        |= SIT_FEATURES;
 }
 
 static int ipip6_tunnel_init(struct net_device *dev)
index bf63ac8a49b9df8601a5e0dabc4074e65007f94b..535a3ad262f18d7dcc04586fc55c3f0e9d4afe8b 100644 (file)
 #define COOKIEBITS 24  /* Upper bits store count */
 #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
 
-/* Table must be sorted. */
+static u32 syncookie6_secret[2][16-4+SHA_DIGEST_WORDS];
+
+/* RFC 2460, Section 8.3:
+ * [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..]
+ *
+ * Due to IPV6_MIN_MTU=1280 the lowest possible MSS is 1220, which allows
+ * using higher values than ipv4 tcp syncookies.
+ * The other values are chosen based on ethernet (1500 and 9k MTU), plus
+ * one that accounts for common encap (PPPoe) overhead. Table must be sorted.
+ */
 static __u16 const msstab[] = {
-       64,
-       512,
-       536,
-       1280 - 60,
+       1280 - 60, /* IPV6_MIN_MTU - 60 */
        1480 - 60,
        1500 - 60,
-       4460 - 60,
        9000 - 60,
 };
 
-/*
- * This (misnamed) value is the age of syncookie which is permitted.
- * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
- * sysctl_tcp_retries1. It's a rather complicated formula (exponential
- * backoff) to compute at runtime so it's currently hardcoded here.
- */
-#define COUNTER_TRIES 4
-
 static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
                                           struct request_sock *req,
                                           struct dst_entry *dst)
@@ -66,14 +63,18 @@ static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
 static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *daddr,
                       __be16 sport, __be16 dport, u32 count, int c)
 {
-       __u32 *tmp = __get_cpu_var(ipv6_cookie_scratch);
+       __u32 *tmp;
+
+       net_get_random_once(syncookie6_secret, sizeof(syncookie6_secret));
+
+       tmp  = __get_cpu_var(ipv6_cookie_scratch);
 
        /*
         * we have 320 bits of information to hash, copy in the remaining
-        * 192 bits required for sha_transform, from the syncookie_secret
+        * 192 bits required for sha_transform, from the syncookie6_secret
         * and overwrite the digest with the secret
         */
-       memcpy(tmp + 10, syncookie_secret[c], 44);
+       memcpy(tmp + 10, syncookie6_secret[c], 44);
        memcpy(tmp, saddr, 16);
        memcpy(tmp + 4, daddr, 16);
        tmp[8] = ((__force u32)sport << 16) + (__force u32)dport;
@@ -86,8 +87,9 @@ static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *dadd
 static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
                                   const struct in6_addr *daddr,
                                   __be16 sport, __be16 dport, __u32 sseq,
-                                  __u32 count, __u32 data)
+                                  __u32 data)
 {
+       u32 count = tcp_cookie_time();
        return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
                sseq + (count << COOKIEBITS) +
                ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
@@ -96,15 +98,14 @@ static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
 
 static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr,
                                  const struct in6_addr *daddr, __be16 sport,
-                                 __be16 dport, __u32 sseq, __u32 count,
-                                 __u32 maxdiff)
+                                 __be16 dport, __u32 sseq)
 {
-       __u32 diff;
+       __u32 diff, count = tcp_cookie_time();
 
        cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
 
        diff = (count - (cookie >> COOKIEBITS)) & ((__u32) -1 >> COOKIEBITS);
-       if (diff >= maxdiff)
+       if (diff >= MAX_SYNCOOKIE_AGE)
                return (__u32)-1;
 
        return (cookie -
@@ -125,8 +126,7 @@ u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
        *mssp = msstab[mssind];
 
        return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source,
-                                    th->dest, ntohl(th->seq),
-                                    jiffies / (HZ * 60), mssind);
+                                    th->dest, ntohl(th->seq), mssind);
 }
 EXPORT_SYMBOL_GPL(__cookie_v6_init_sequence);
 
@@ -146,8 +146,7 @@ int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
 {
        __u32 seq = ntohl(th->seq) - 1;
        __u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr,
-                                           th->source, th->dest, seq,
-                                           jiffies / (HZ * 60), COUNTER_TRIES);
+                                           th->source, th->dest, seq);
 
        return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
 }
@@ -157,7 +156,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
 {
        struct tcp_options_received tcp_opt;
        struct inet_request_sock *ireq;
-       struct inet6_request_sock *ireq6;
        struct tcp_request_sock *treq;
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
@@ -194,7 +192,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
                goto out;
 
        ireq = inet_rsk(req);
-       ireq6 = inet6_rsk(req);
        treq = tcp_rsk(req);
        treq->listener = NULL;
 
@@ -202,22 +199,22 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
                goto out_free;
 
        req->mss = mss;
-       ireq->rmt_port = th->source;
-       ireq->loc_port = th->dest;
-       ireq6->rmt_addr = ipv6_hdr(skb)->saddr;
-       ireq6->loc_addr = ipv6_hdr(skb)->daddr;
+       ireq->ir_rmt_port = th->source;
+       ireq->ir_num = ntohs(th->dest);
+       ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+       ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
        if (ipv6_opt_accepted(sk, skb) ||
            np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
            np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
                atomic_inc(&skb->users);
-               ireq6->pktopts = skb;
+               ireq->pktopts = skb;
        }
 
-       ireq6->iif = sk->sk_bound_dev_if;
+       ireq->ir_iif = sk->sk_bound_dev_if;
        /* So that link locals have meaning */
        if (!sk->sk_bound_dev_if &&
-           ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL)
-               ireq6->iif = inet6_iif(skb);
+           ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
+               ireq->ir_iif = inet6_iif(skb);
 
        req->expires = 0UL;
        req->num_retrans = 0;
@@ -241,12 +238,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
                struct flowi6 fl6;
                memset(&fl6, 0, sizeof(fl6));
                fl6.flowi6_proto = IPPROTO_TCP;
-               fl6.daddr = ireq6->rmt_addr;
+               fl6.daddr = ireq->ir_v6_rmt_addr;
                final_p = fl6_update_dst(&fl6, np->opt, &final);
-               fl6.saddr = ireq6->loc_addr;
+               fl6.saddr = ireq->ir_v6_loc_addr;
                fl6.flowi6_oif = sk->sk_bound_dev_if;
                fl6.flowi6_mark = sk->sk_mark;
-               fl6.fl6_dport = inet_rsk(req)->rmt_port;
+               fl6.fl6_dport = ireq->ir_rmt_port;
                fl6.fl6_sport = inet_sk(sk)->inet_sport;
                security_req_classify_flow(req, flowi6_to_flowi(&fl6));
 
index 5c71501fc917d6271f72cea50cb98b9ad783f1c4..0740f93a114a26ac09150638576f523bcd53694c 100644 (file)
@@ -192,13 +192,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        }
 
        if (tp->rx_opt.ts_recent_stamp &&
-           !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
+           !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
                tp->rx_opt.ts_recent = 0;
                tp->rx_opt.ts_recent_stamp = 0;
                tp->write_seq = 0;
        }
 
-       np->daddr = usin->sin6_addr;
+       sk->sk_v6_daddr = usin->sin6_addr;
        np->flow_label = fl6.flowlabel;
 
        /*
@@ -237,17 +237,17 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                } else {
                        ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
                        ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
-                                              &np->rcv_saddr);
+                                              &sk->sk_v6_rcv_saddr);
                }
 
                return err;
        }
 
-       if (!ipv6_addr_any(&np->rcv_saddr))
-               saddr = &np->rcv_saddr;
+       if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
+               saddr = &sk->sk_v6_rcv_saddr;
 
        fl6.flowi6_proto = IPPROTO_TCP;
-       fl6.daddr = np->daddr;
+       fl6.daddr = sk->sk_v6_daddr;
        fl6.saddr = saddr ? *saddr : np->saddr;
        fl6.flowi6_oif = sk->sk_bound_dev_if;
        fl6.flowi6_mark = sk->sk_mark;
@@ -266,7 +266,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
        if (saddr == NULL) {
                saddr = &fl6.saddr;
-               np->rcv_saddr = *saddr;
+               sk->sk_v6_rcv_saddr = *saddr;
        }
 
        /* set the source address */
@@ -279,7 +279,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        rt = (struct rt6_info *) dst;
        if (tcp_death_row.sysctl_tw_recycle &&
            !tp->rx_opt.ts_recent_stamp &&
-           ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr))
+           ipv6_addr_equal(&rt->rt6i_dst.addr, &sk->sk_v6_daddr))
                tcp_fetch_timewait_stamp(sk, dst);
 
        icsk->icsk_ext_hdr_len = 0;
@@ -298,7 +298,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
        if (!tp->write_seq && likely(!tp->repair))
                tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
-                                                            np->daddr.s6_addr32,
+                                                            sk->sk_v6_daddr.s6_addr32,
                                                             inet->inet_sport,
                                                             inet->inet_dport);
 
@@ -465,7 +465,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
                              struct request_sock *req,
                              u16 queue_mapping)
 {
-       struct inet6_request_sock *treq = inet6_rsk(req);
+       struct inet_request_sock *ireq = inet_rsk(req);
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct sk_buff * skb;
        int err = -ENOMEM;
@@ -477,9 +477,10 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
        skb = tcp_make_synack(sk, dst, req, NULL);
 
        if (skb) {
-               __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
+               __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
+                                   &ireq->ir_v6_rmt_addr);
 
-               fl6->daddr = treq->rmt_addr;
+               fl6->daddr = ireq->ir_v6_rmt_addr;
                skb_set_queue_mapping(skb, queue_mapping);
                err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
                err = net_xmit_eval(err);
@@ -502,7 +503,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
 
 static void tcp_v6_reqsk_destructor(struct request_sock *req)
 {
-       kfree_skb(inet6_rsk(req)->pktopts);
+       kfree_skb(inet_rsk(req)->pktopts);
 }
 
 #ifdef CONFIG_TCP_MD5SIG
@@ -515,13 +516,13 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
                                                struct sock *addr_sk)
 {
-       return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
+       return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
 }
 
 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
                                                      struct request_sock *req)
 {
-       return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
+       return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr);
 }
 
 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
@@ -621,10 +622,10 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
 
        if (sk) {
                saddr = &inet6_sk(sk)->saddr;
-               daddr = &inet6_sk(sk)->daddr;
+               daddr = &sk->sk_v6_daddr;
        } else if (req) {
-               saddr = &inet6_rsk(req)->loc_addr;
-               daddr = &inet6_rsk(req)->rmt_addr;
+               saddr = &inet_rsk(req)->ir_v6_loc_addr;
+               daddr = &inet_rsk(req)->ir_v6_rmt_addr;
        } else {
                const struct ipv6hdr *ip6h = ipv6_hdr(skb);
                saddr = &ip6h->saddr;
@@ -949,7 +950,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 {
        struct tcp_options_received tmp_opt;
        struct request_sock *req;
-       struct inet6_request_sock *treq;
+       struct inet_request_sock *ireq;
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        __u32 isn = TCP_SKB_CB(skb)->when;
@@ -994,25 +995,25 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
        tcp_openreq_init(req, &tmp_opt, skb);
 
-       treq = inet6_rsk(req);
-       treq->rmt_addr = ipv6_hdr(skb)->saddr;
-       treq->loc_addr = ipv6_hdr(skb)->daddr;
+       ireq = inet_rsk(req);
+       ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+       ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
        if (!want_cookie || tmp_opt.tstamp_ok)
                TCP_ECN_create_request(req, skb, sock_net(sk));
 
-       treq->iif = sk->sk_bound_dev_if;
+       ireq->ir_iif = sk->sk_bound_dev_if;
 
        /* So that link locals have meaning */
        if (!sk->sk_bound_dev_if &&
-           ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
-               treq->iif = inet6_iif(skb);
+           ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
+               ireq->ir_iif = inet6_iif(skb);
 
        if (!isn) {
                if (ipv6_opt_accepted(sk, skb) ||
                    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
                    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
                        atomic_inc(&skb->users);
-                       treq->pktopts = skb;
+                       ireq->pktopts = skb;
                }
 
                if (want_cookie) {
@@ -1051,7 +1052,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                         * to the moment of synflood.
                         */
                        LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
-                                      &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
+                                      &ireq->ir_v6_rmt_addr, ntohs(tcp_hdr(skb)->source));
                        goto drop_and_release;
                }
 
@@ -1086,7 +1087,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                                          struct request_sock *req,
                                          struct dst_entry *dst)
 {
-       struct inet6_request_sock *treq;
+       struct inet_request_sock *ireq;
        struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
        struct tcp6_sock *newtcp6sk;
        struct inet_sock *newinet;
@@ -1116,11 +1117,11 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 
                memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-               ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
+               ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);
 
                ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
 
-               newnp->rcv_saddr = newnp->saddr;
+               newsk->sk_v6_rcv_saddr = newnp->saddr;
 
                inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
                newsk->sk_backlog_rcv = tcp_v4_do_rcv;
@@ -1151,7 +1152,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
                return newsk;
        }
 
-       treq = inet6_rsk(req);
+       ireq = inet_rsk(req);
 
        if (sk_acceptq_is_full(sk))
                goto out_overflow;
@@ -1185,10 +1186,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 
        memcpy(newnp, np, sizeof(struct ipv6_pinfo));
 
-       newnp->daddr = treq->rmt_addr;
-       newnp->saddr = treq->loc_addr;
-       newnp->rcv_saddr = treq->loc_addr;
-       newsk->sk_bound_dev_if = treq->iif;
+       newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
+       newnp->saddr = ireq->ir_v6_loc_addr;
+       newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
+       newsk->sk_bound_dev_if = ireq->ir_iif;
 
        /* Now IPv6 options...
 
@@ -1203,11 +1204,11 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 
        /* Clone pktoptions received with SYN */
        newnp->pktoptions = NULL;
-       if (treq->pktopts != NULL) {
-               newnp->pktoptions = skb_clone(treq->pktopts,
+       if (ireq->pktopts != NULL) {
+               newnp->pktoptions = skb_clone(ireq->pktopts,
                                              sk_gfp_atomic(sk, GFP_ATOMIC));
-               consume_skb(treq->pktopts);
-               treq->pktopts = NULL;
+               consume_skb(ireq->pktopts);
+               ireq->pktopts = NULL;
                if (newnp->pktoptions)
                        skb_set_owner_r(newnp->pktoptions, newsk);
        }
@@ -1244,13 +1245,13 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
 
 #ifdef CONFIG_TCP_MD5SIG
        /* Copy over the MD5 key from the original socket */
-       if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
+       if ((key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr)) != NULL) {
                /* We're using one, so create a matching key
                 * on the newsk structure. If we fail to get
                 * memory, then we end up not copying the key
                 * across. Shucks.
                 */
-               tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newnp->daddr,
+               tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
                               AF_INET6, key->key, key->keylen,
                               sk_gfp_atomic(sk, GFP_ATOMIC));
        }
@@ -1722,8 +1723,8 @@ static void get_openreq6(struct seq_file *seq,
                         const struct sock *sk, struct request_sock *req, int i, kuid_t uid)
 {
        int ttd = req->expires - jiffies;
-       const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
-       const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
+       const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
+       const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
 
        if (ttd < 0)
                ttd = 0;
@@ -1734,10 +1735,10 @@ static void get_openreq6(struct seq_file *seq,
                   i,
                   src->s6_addr32[0], src->s6_addr32[1],
                   src->s6_addr32[2], src->s6_addr32[3],
-                  ntohs(inet_rsk(req)->loc_port),
+                  inet_rsk(req)->ir_num,
                   dest->s6_addr32[0], dest->s6_addr32[1],
                   dest->s6_addr32[2], dest->s6_addr32[3],
-                  ntohs(inet_rsk(req)->rmt_port),
+                  ntohs(inet_rsk(req)->ir_rmt_port),
                   TCP_SYN_RECV,
                   0,0, /* could print option size, but that is af dependent. */
                   1,   /* timers active (only the expire timer) */
@@ -1758,10 +1759,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
        const struct inet_sock *inet = inet_sk(sp);
        const struct tcp_sock *tp = tcp_sk(sp);
        const struct inet_connection_sock *icsk = inet_csk(sp);
-       const struct ipv6_pinfo *np = inet6_sk(sp);
 
-       dest  = &np->daddr;
-       src   = &np->rcv_saddr;
+       dest  = &sp->sk_v6_daddr;
+       src   = &sp->sk_v6_rcv_saddr;
        destp = ntohs(inet->inet_dport);
        srcp  = ntohs(inet->inet_sport);
 
@@ -1810,11 +1810,10 @@ static void get_timewait6_sock(struct seq_file *seq,
 {
        const struct in6_addr *dest, *src;
        __u16 destp, srcp;
-       const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
-       long delta = tw->tw_ttd - jiffies;
+       s32 delta = tw->tw_ttd - inet_tw_time_stamp();
 
-       dest = &tw6->tw_v6_daddr;
-       src  = &tw6->tw_v6_rcv_saddr;
+       dest = &tw->tw_v6_daddr;
+       src  = &tw->tw_v6_rcv_saddr;
        destp = ntohs(tw->tw_dport);
        srcp  = ntohs(tw->tw_sport);
 
@@ -1834,6 +1833,7 @@ static void get_timewait6_sock(struct seq_file *seq,
 static int tcp6_seq_show(struct seq_file *seq, void *v)
 {
        struct tcp_iter_state *st;
+       struct sock *sk = v;
 
        if (v == SEQ_START_TOKEN) {
                seq_puts(seq,
@@ -1849,14 +1849,14 @@ static int tcp6_seq_show(struct seq_file *seq, void *v)
        switch (st->state) {
        case TCP_SEQ_STATE_LISTENING:
        case TCP_SEQ_STATE_ESTABLISHED:
-               get_tcp6_sock(seq, v, st->num);
+               if (sk->sk_state == TCP_TIME_WAIT)
+                       get_timewait6_sock(seq, v, st->num);
+               else
+                       get_tcp6_sock(seq, v, st->num);
                break;
        case TCP_SEQ_STATE_OPENREQ:
                get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
                break;
-       case TCP_SEQ_STATE_TIME_WAIT:
-               get_timewait6_sock(seq, v, st->num);
-               break;
        }
 out:
        return 0;
@@ -1929,6 +1929,7 @@ struct proto tcpv6_prot = {
        .memory_allocated       = &tcp_memory_allocated,
        .memory_pressure        = &tcp_memory_pressure,
        .orphan_count           = &tcp_orphan_count,
+       .sysctl_mem             = sysctl_tcp_mem,
        .sysctl_wmem            = sysctl_tcp_wmem,
        .sysctl_rmem            = sysctl_tcp_rmem,
        .max_header             = MAX_TCP_HEADER,
index 2ec6bf6a0aa002d6c21468d090767736161a5d1b..c1097c79890070e3d04517cecb7f0b263c0a4fc7 100644 (file)
@@ -83,7 +83,7 @@ static int tcp6_gro_complete(struct sk_buff *skb)
 static const struct net_offload tcpv6_offload = {
        .callbacks = {
                .gso_send_check =       tcp_v6_gso_send_check,
-               .gso_segment    =       tcp_tso_segment,
+               .gso_segment    =       tcp_gso_segment,
                .gro_receive    =       tcp6_gro_receive,
                .gro_complete   =       tcp6_gro_complete,
        },
index 72b7eaaf3ca0e3e6b7cad3edea8aa69e71cbe147..f3893e897f721aa4345c82f0ea2b899c44fa0ae4 100644 (file)
 #include <trace/events/skb.h>
 #include "udp_impl.h"
 
+static unsigned int udp6_ehashfn(struct net *net,
+                                 const struct in6_addr *laddr,
+                                 const u16 lport,
+                                 const struct in6_addr *faddr,
+                                 const __be16 fport)
+{
+       static u32 udp6_ehash_secret __read_mostly;
+       static u32 udp_ipv6_hash_secret __read_mostly;
+
+       u32 lhash, fhash;
+
+       net_get_random_once(&udp6_ehash_secret,
+                           sizeof(udp6_ehash_secret));
+       net_get_random_once(&udp_ipv6_hash_secret,
+                           sizeof(udp_ipv6_hash_secret));
+
+       lhash = (__force u32)laddr->s6_addr32[3];
+       fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
+
+       return __inet6_ehashfn(lhash, lport, fhash, fport,
+                              udp_ipv6_hash_secret + net_hash_mix(net));
+}
+
 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
 {
-       const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
        const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
-       __be32 sk1_rcv_saddr = sk_rcv_saddr(sk);
-       __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
        int sk_ipv6only = ipv6_only_sock(sk);
        int sk2_ipv6only = inet_v6_ipv6only(sk2);
-       int addr_type = ipv6_addr_type(sk_rcv_saddr6);
+       int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
        int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
 
        /* if both are mapped, treat as IPv4 */
        if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
                return (!sk2_ipv6only &&
-                       (!sk1_rcv_saddr || !sk2_rcv_saddr ||
-                         sk1_rcv_saddr == sk2_rcv_saddr));
+                       (!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr ||
+                         sk->sk_rcv_saddr == sk2->sk_rcv_saddr));
 
        if (addr_type2 == IPV6_ADDR_ANY &&
            !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
@@ -79,7 +99,7 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
                return 1;
 
        if (sk2_rcv_saddr6 &&
-           ipv6_addr_equal(sk_rcv_saddr6, sk2_rcv_saddr6))
+           ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6))
                return 1;
 
        return 0;
@@ -107,7 +127,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
        unsigned int hash2_nulladdr =
                udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
        unsigned int hash2_partial =
-               udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0);
+               udp6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
 
        /* precompute partial secondary hash */
        udp_sk(sk)->udp_portaddr_hash = hash2_partial;
@@ -117,7 +137,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
 static void udp_v6_rehash(struct sock *sk)
 {
        u16 new_hash = udp6_portaddr_hash(sock_net(sk),
-                                         &inet6_sk(sk)->rcv_saddr,
+                                         &sk->sk_v6_rcv_saddr,
                                          inet_sk(sk)->inet_num);
 
        udp_lib_rehash(sk, new_hash);
@@ -133,7 +153,6 @@ static inline int compute_score(struct sock *sk, struct net *net,
 
        if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
                        sk->sk_family == PF_INET6) {
-               struct ipv6_pinfo *np = inet6_sk(sk);
                struct inet_sock *inet = inet_sk(sk);
 
                score = 0;
@@ -142,13 +161,13 @@ static inline int compute_score(struct sock *sk, struct net *net,
                                return -1;
                        score++;
                }
-               if (!ipv6_addr_any(&np->rcv_saddr)) {
-                       if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
+               if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+                       if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
                                return -1;
                        score++;
                }
-               if (!ipv6_addr_any(&np->daddr)) {
-                       if (!ipv6_addr_equal(&np->daddr, saddr))
+               if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
+                       if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
                                return -1;
                        score++;
                }
@@ -171,10 +190,9 @@ static inline int compute_score2(struct sock *sk, struct net *net,
 
        if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
                        sk->sk_family == PF_INET6) {
-               struct ipv6_pinfo *np = inet6_sk(sk);
                struct inet_sock *inet = inet_sk(sk);
 
-               if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
+               if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
                        return -1;
                score = 0;
                if (inet->inet_dport) {
@@ -182,8 +200,8 @@ static inline int compute_score2(struct sock *sk, struct net *net,
                                return -1;
                        score++;
                }
-               if (!ipv6_addr_any(&np->daddr)) {
-                       if (!ipv6_addr_equal(&np->daddr, saddr))
+               if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
+                       if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
                                return -1;
                        score++;
                }
@@ -219,8 +237,8 @@ begin:
                        badness = score;
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
-                               hash = inet6_ehashfn(net, daddr, hnum,
-                                                    saddr, sport);
+                               hash = udp6_ehashfn(net, daddr, hnum,
+                                                   saddr, sport);
                                matches = 1;
                        } else if (score == SCORE2_MAX)
                                goto exact_match;
@@ -300,8 +318,8 @@ begin:
                        badness = score;
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
-                               hash = inet6_ehashfn(net, daddr, hnum,
-                                                    saddr, sport);
+                               hash = udp6_ehashfn(net, daddr, hnum,
+                                                   saddr, sport);
                                matches = 1;
                        }
                } else if (score == badness && reuseport) {
@@ -551,8 +569,10 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
        int rc;
 
-       if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
+       if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
                sock_rps_save_rxhash(sk, skb);
+               sk_mark_napi_id(sk, skb);
+       }
 
        rc = sock_queue_rcv_skb(sk, skb);
        if (rc < 0) {
@@ -690,20 +710,19 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
 
                if (udp_sk(s)->udp_port_hash == num &&
                    s->sk_family == PF_INET6) {
-                       struct ipv6_pinfo *np = inet6_sk(s);
                        if (inet->inet_dport) {
                                if (inet->inet_dport != rmt_port)
                                        continue;
                        }
-                       if (!ipv6_addr_any(&np->daddr) &&
-                           !ipv6_addr_equal(&np->daddr, rmt_addr))
+                       if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
+                           !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
                                continue;
 
                        if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)
                                continue;
 
-                       if (!ipv6_addr_any(&np->rcv_saddr)) {
-                               if (!ipv6_addr_equal(&np->rcv_saddr, loc_addr))
+                       if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
+                               if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
                                        continue;
                        }
                        if (!inet6_mc_check(s, loc_addr, rmt_addr))
@@ -846,7 +865,6 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
        if (sk != NULL) {
                int ret;
 
-               sk_mark_napi_id(sk, skb);
                ret = udpv6_queue_rcv_skb(sk, skb);
                sock_put(sk);
 
@@ -1064,7 +1082,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
        } else if (!up->pending) {
                if (sk->sk_state != TCP_ESTABLISHED)
                        return -EDESTADDRREQ;
-               daddr = &np->daddr;
+               daddr = &sk->sk_v6_daddr;
        } else
                daddr = NULL;
 
@@ -1134,8 +1152,8 @@ do_udp_sendmsg:
                 * sk->sk_dst_cache.
                 */
                if (sk->sk_state == TCP_ESTABLISHED &&
-                   ipv6_addr_equal(daddr, &np->daddr))
-                       daddr = &np->daddr;
+                   ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
+                       daddr = &sk->sk_v6_daddr;
 
                if (addr_len >= sizeof(struct sockaddr_in6) &&
                    sin6->sin6_scope_id &&
@@ -1146,7 +1164,7 @@ do_udp_sendmsg:
                        return -EDESTADDRREQ;
 
                fl6.fl6_dport = inet->inet_dport;
-               daddr = &np->daddr;
+               daddr = &sk->sk_v6_daddr;
                fl6.flowlabel = np->flow_label;
                connected = 1;
        }
@@ -1225,9 +1243,6 @@ do_udp_sendmsg:
        if (tclass < 0)
                tclass = np->tclass;
 
-       if (dontfrag < 0)
-               dontfrag = np->dontfrag;
-
        if (msg->msg_flags&MSG_CONFIRM)
                goto do_confirm;
 back_from_confirm:
@@ -1246,6 +1261,8 @@ back_from_confirm:
        up->pending = AF_INET6;
 
 do_append_data:
+       if (dontfrag < 0)
+               dontfrag = np->dontfrag;
        up->len += ulen;
        getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
        err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
@@ -1262,8 +1279,8 @@ do_append_data:
        if (dst) {
                if (connected) {
                        ip6_dst_store(sk, dst,
-                                     ipv6_addr_equal(&fl6.daddr, &np->daddr) ?
-                                     &np->daddr : NULL,
+                                     ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
+                                     &sk->sk_v6_daddr : NULL,
 #ifdef CONFIG_IPV6_SUBTREES
                                      ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
                                      &np->saddr :
index 4691ed50a9282a108e43ce8e7de56d79c1635c4a..c779c3c90b9d3b90c3b4821508d3fdf31b6d5da6 100644 (file)
@@ -7,33 +7,32 @@
 #include <net/inet_common.h>
 #include <net/transp_v6.h>
 
-extern int     __udp6_lib_rcv(struct sk_buff *, struct udp_table *, int );
-extern void    __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *,
-                              u8 , u8 , int , __be32 , struct udp_table *);
+int __udp6_lib_rcv(struct sk_buff *, struct udp_table *, int);
+void __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int,
+                   __be32, struct udp_table *);
 
-extern int     udp_v6_get_port(struct sock *sk, unsigned short snum);
+int udp_v6_get_port(struct sock *sk, unsigned short snum);
 
-extern int     udpv6_getsockopt(struct sock *sk, int level, int optname,
-                                char __user *optval, int __user *optlen);
-extern int     udpv6_setsockopt(struct sock *sk, int level, int optname,
-                                char __user *optval, unsigned int optlen);
+int udpv6_getsockopt(struct sock *sk, int level, int optname,
+                    char __user *optval, int __user *optlen);
+int udpv6_setsockopt(struct sock *sk, int level, int optname,
+                    char __user *optval, unsigned int optlen);
 #ifdef CONFIG_COMPAT
-extern int     compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
-                                       char __user *optval, unsigned int optlen);
-extern int     compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
-                                      char __user *optval, int __user *optlen);
+int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
+                           char __user *optval, unsigned int optlen);
+int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
+                           char __user *optval, int __user *optlen);
 #endif
-extern int     udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
-                             struct msghdr *msg, size_t len);
-extern int     udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
-                             struct msghdr *msg, size_t len,
-                             int noblock, int flags, int *addr_len);
-extern int     udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
-extern void    udpv6_destroy_sock(struct sock *sk);
+int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+                 size_t len);
+int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
+                 size_t len, int noblock, int flags, int *addr_len);
+int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+void udpv6_destroy_sock(struct sock *sk);
 
-extern void udp_v6_clear_sk(struct sock *sk, int size);
+void udp_v6_clear_sk(struct sock *sk, int size);
 
 #ifdef CONFIG_PROC_FS
-extern int     udp6_seq_show(struct seq_file *seq, void *v);
+int udp6_seq_show(struct seq_file *seq, void *v);
 #endif
 #endif /* _UDP6_IMPL_H */
index 60559511bd9c479e7996ff9826c4852e8051df72..08e23b0bf302e93c27c74a3cd8dfc5213c453c6e 100644 (file)
@@ -64,6 +64,8 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
                                      SKB_GSO_DODGY |
                                      SKB_GSO_UDP_TUNNEL |
                                      SKB_GSO_GRE |
+                                     SKB_GSO_IPIP |
+                                     SKB_GSO_SIT |
                                      SKB_GSO_MPLS) ||
                             !(type & (SKB_GSO_UDP))))
                        goto out;
index 4770d515c2c856684e93416d4696067cae7e3114..cb04f7a16b5e102f2944be051d61511d5645d3f7 100644 (file)
 #include <net/ipv6.h>
 #include <net/xfrm.h>
 
+/* Informational hook. The decap is still done here. */
+static struct xfrm_tunnel_notifier __rcu *rcv_notify_handlers __read_mostly;
+static DEFINE_MUTEX(xfrm6_mode_tunnel_input_mutex);
+
+int xfrm6_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler)
+{
+       struct xfrm_tunnel_notifier __rcu **pprev;
+       struct xfrm_tunnel_notifier *t;
+       int ret = -EEXIST;
+       int priority = handler->priority;
+
+       mutex_lock(&xfrm6_mode_tunnel_input_mutex);
+
+       for (pprev = &rcv_notify_handlers;
+            (t = rcu_dereference_protected(*pprev,
+            lockdep_is_held(&xfrm6_mode_tunnel_input_mutex))) != NULL;
+            pprev = &t->next) {
+               if (t->priority > priority)
+                       break;
+               if (t->priority == priority)
+                       goto err;
+
+       }
+
+       handler->next = *pprev;
+       rcu_assign_pointer(*pprev, handler);
+
+       ret = 0;
+
+err:
+       mutex_unlock(&xfrm6_mode_tunnel_input_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(xfrm6_mode_tunnel_input_register);
+
+int xfrm6_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler)
+{
+       struct xfrm_tunnel_notifier __rcu **pprev;
+       struct xfrm_tunnel_notifier *t;
+       int ret = -ENOENT;
+
+       mutex_lock(&xfrm6_mode_tunnel_input_mutex);
+       for (pprev = &rcv_notify_handlers;
+            (t = rcu_dereference_protected(*pprev,
+            lockdep_is_held(&xfrm6_mode_tunnel_input_mutex))) != NULL;
+            pprev = &t->next) {
+               if (t == handler) {
+                       *pprev = handler->next;
+                       ret = 0;
+                       break;
+               }
+       }
+       mutex_unlock(&xfrm6_mode_tunnel_input_mutex);
+       synchronize_net();
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(xfrm6_mode_tunnel_input_deregister);
+
 static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
 {
        const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
@@ -63,8 +122,15 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
        return 0;
 }
 
+#define for_each_input_rcu(head, handler)      \
+       for (handler = rcu_dereference(head);   \
+            handler != NULL;                   \
+            handler = rcu_dereference(handler->next))
+
+
 static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
 {
+       struct xfrm_tunnel_notifier *handler;
        int err = -EINVAL;
 
        if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6)
@@ -72,6 +138,9 @@ static int xfrm6_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
        if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
                goto out;
 
+       for_each_input_rcu(rcv_notify_handlers, handler)
+               handler->handler(skb);
+
        err = skb_unclone(skb, GFP_ATOMIC);
        if (err)
                goto out;
index 23ed03d786c8376cc59f9fa2cf577ee01a4c2c2d..08ed2772b7aa58225bd6be95518ea95c1191dcd9 100644 (file)
@@ -138,6 +138,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
 
        memset(fl6, 0, sizeof(struct flowi6));
        fl6->flowi6_mark = skb->mark;
+       fl6->flowi6_oif = skb_dst(skb)->dev->ifindex;
 
        fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
        fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
index 564eb0b8afa350eb5f933c981ab7bd0d68abd1b0..8d65bb9477fce626c7e781c0d38d78b488b5271c 100644 (file)
@@ -509,16 +509,11 @@ typedef struct irnet_ctrl_channel
  */
 
 /* -------------------------- IRDA PART -------------------------- */
-extern int
-       irda_irnet_create(irnet_socket *);      /* Initialise a IrNET socket */
-extern int
-       irda_irnet_connect(irnet_socket *);     /* Try to connect over IrDA */
-extern void
-       irda_irnet_destroy(irnet_socket *);     /* Teardown  a IrNET socket */
-extern int
-       irda_irnet_init(void);          /* Initialise IrDA part of IrNET */
-extern void
-       irda_irnet_cleanup(void);       /* Teardown IrDA part of IrNET */
+int irda_irnet_create(irnet_socket *); /* Initialise an IrNET socket */
+int irda_irnet_connect(irnet_socket *);        /* Try to connect over IrDA */
+void irda_irnet_destroy(irnet_socket *);       /* Teardown an IrNET socket */
+int irda_irnet_init(void);             /* Initialise IrDA part of IrNET */
+void irda_irnet_cleanup(void);         /* Teardown IrDA part of IrNET */
 
 /**************************** VARIABLES ****************************/
 
index 9d585370c5b4d6a1e60728df4dad6c79ca196ee3..911ef03bf8fbf1716672fb503f39de7fe13746d9 100644 (file)
@@ -1098,7 +1098,8 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
 
        x->id.proto = proto;
        x->id.spi = sa->sadb_sa_spi;
-       x->props.replay_window = sa->sadb_sa_replay;
+       x->props.replay_window = min_t(unsigned int, sa->sadb_sa_replay,
+                                       (sizeof(x->replay.bitmap) * 8));
        if (sa->sadb_sa_flags & SADB_SAFLAGS_NOECN)
                x->props.flags |= XFRM_STATE_NOECN;
        if (sa->sadb_sa_flags & SADB_SAFLAGS_DECAP_DSCP)
index feae495a0a30accd9eb5a88baf1c23095bb38d66..9af77d9c0ec9b7327c275feeacc6766c7681f54d 100644 (file)
@@ -115,6 +115,11 @@ struct l2tp_net {
 static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
 
+static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
+{
+       return sk->sk_user_data;
+}
+
 static inline struct l2tp_net *l2tp_pernet(struct net *net)
 {
        BUG_ON(!net);
@@ -504,7 +509,7 @@ static inline int l2tp_verify_udp_checksum(struct sock *sk,
                return 0;
 
 #if IS_ENABLED(CONFIG_IPV6)
-       if (sk->sk_family == PF_INET6) {
+       if (sk->sk_family == PF_INET6 && !l2tp_tunnel(sk)->v4mapped) {
                if (!uh->check) {
                        LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
                        return 1;
@@ -1128,7 +1133,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
        /* Queue the packet to IP for output */
        skb->local_df = 1;
 #if IS_ENABLED(CONFIG_IPV6)
-       if (skb->sk->sk_family == PF_INET6)
+       if (skb->sk->sk_family == PF_INET6 && !tunnel->v4mapped)
                error = inet6_csk_xmit(skb, NULL);
        else
 #endif
@@ -1176,7 +1181,7 @@ static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb,
            !(skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) {
                __wsum csum = skb_checksum(skb, 0, udp_len, 0);
                skb->ip_summed = CHECKSUM_UNNECESSARY;
-               uh->check = csum_ipv6_magic(&np->saddr, &np->daddr, udp_len,
+               uh->check = csum_ipv6_magic(&np->saddr, &sk->sk_v6_daddr, udp_len,
                                            IPPROTO_UDP, csum);
                if (uh->check == 0)
                        uh->check = CSUM_MANGLED_0;
@@ -1184,7 +1189,7 @@ static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb,
                skb->ip_summed = CHECKSUM_PARTIAL;
                skb->csum_start = skb_transport_header(skb) - skb->head;
                skb->csum_offset = offsetof(struct udphdr, check);
-               uh->check = ~csum_ipv6_magic(&np->saddr, &np->daddr,
+               uh->check = ~csum_ipv6_magic(&np->saddr, &sk->sk_v6_daddr,
                                             udp_len, IPPROTO_UDP, 0);
        }
 }
@@ -1255,7 +1260,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
 
                /* Calculate UDP checksum if configured to do so */
 #if IS_ENABLED(CONFIG_IPV6)
-               if (sk->sk_family == PF_INET6)
+               if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
                        l2tp_xmit_ipv6_csum(sk, skb, udp_len);
                else
 #endif
@@ -1304,10 +1309,9 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
  */
 static void l2tp_tunnel_destruct(struct sock *sk)
 {
-       struct l2tp_tunnel *tunnel;
+       struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
        struct l2tp_net *pn;
 
-       tunnel = sk->sk_user_data;
        if (tunnel == NULL)
                goto end;
 
@@ -1675,7 +1679,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
        }
 
        /* Check if this socket has already been prepped */
-       tunnel = (struct l2tp_tunnel *)sk->sk_user_data;
+       tunnel = l2tp_tunnel(sk);
        if (tunnel != NULL) {
                /* This socket has already been prepped */
                err = -EBUSY;
@@ -1704,6 +1708,24 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
        if (cfg != NULL)
                tunnel->debug = cfg->debug;
 
+#if IS_ENABLED(CONFIG_IPV6)
+       if (sk->sk_family == PF_INET6) {
+               struct ipv6_pinfo *np = inet6_sk(sk);
+
+               if (ipv6_addr_v4mapped(&np->saddr) &&
+                   ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
+                       struct inet_sock *inet = inet_sk(sk);
+
+                       tunnel->v4mapped = true;
+                       inet->inet_saddr = np->saddr.s6_addr32[3];
+                       inet->inet_rcv_saddr = sk->sk_v6_rcv_saddr.s6_addr32[3];
+                       inet->inet_daddr = sk->sk_v6_daddr.s6_addr32[3];
+               } else {
+                       tunnel->v4mapped = false;
+               }
+       }
+#endif
+
        /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
        tunnel->encap = encap;
        if (encap == L2TP_ENCAPTYPE_UDP) {
@@ -1712,7 +1734,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
                udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
                udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
 #if IS_ENABLED(CONFIG_IPV6)
-               if (sk->sk_family == PF_INET6)
+               if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
                        udpv6_encap_enable();
                else
 #endif
index 66a559b104b6a7af0bc5c76770c32ad0f816670c..1ee9f6965d6850c94f9ea70a6320836e3d0734f0 100644 (file)
@@ -194,6 +194,9 @@ struct l2tp_tunnel {
        struct sock             *sock;          /* Parent socket */
        int                     fd;             /* Parent fd, if tunnel socket
                                                 * was created by userspace */
+#if IS_ENABLED(CONFIG_IPV6)
+       bool                    v4mapped;
+#endif
 
        struct work_struct      del_work;
 
@@ -235,29 +238,40 @@ out:
        return tunnel;
 }
 
-extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel);
-extern void l2tp_tunnel_sock_put(struct sock *sk);
-extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
-extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
-extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
-extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
-extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
-
-extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp);
-extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
-extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
-extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
-extern void __l2tp_session_unhash(struct l2tp_session *session);
-extern int l2tp_session_delete(struct l2tp_session *session);
-extern void l2tp_session_free(struct l2tp_session *session);
-extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb));
-extern int l2tp_session_queue_purge(struct l2tp_session *session);
-extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
-
-extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
-
-extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops);
-extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
+struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel);
+void l2tp_tunnel_sock_put(struct sock *sk);
+struct l2tp_session *l2tp_session_find(struct net *net,
+                                      struct l2tp_tunnel *tunnel,
+                                      u32 session_id);
+struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
+struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
+struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
+struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
+
+int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
+                      u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
+                      struct l2tp_tunnel **tunnelp);
+void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
+int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
+struct l2tp_session *l2tp_session_create(int priv_size,
+                                        struct l2tp_tunnel *tunnel,
+                                        u32 session_id, u32 peer_session_id,
+                                        struct l2tp_session_cfg *cfg);
+void __l2tp_session_unhash(struct l2tp_session *session);
+int l2tp_session_delete(struct l2tp_session *session);
+void l2tp_session_free(struct l2tp_session *session);
+void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
+                     unsigned char *ptr, unsigned char *optr, u16 hdrflags,
+                     int length, int (*payload_hook)(struct sk_buff *skb));
+int l2tp_session_queue_purge(struct l2tp_session *session);
+int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
+
+int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb,
+                 int hdr_len);
+
+int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
+                        const struct l2tp_nl_cmd_ops *ops);
+void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
 
 /* Session reference counts. Incremented when code obtains a reference
  * to a session.
index 072d7202e182ffa2125fe7de9c54c584626fdeb0..2d6760a2ae347b96d465e30192ab8a7957258d32 100644 (file)
@@ -127,9 +127,10 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
 
 #if IS_ENABLED(CONFIG_IPV6)
                if (tunnel->sock->sk_family == AF_INET6) {
-                       struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
+                       const struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
+
                        seq_printf(m, " from %pI6c to %pI6c\n",
-                               &np->saddr, &np->daddr);
+                               &np->saddr, &tunnel->sock->sk_v6_daddr);
                } else
 #endif
                seq_printf(m, " from %pI4 to %pI4\n",
index b8a6039314e868781d3130f06fbb78ced6c4a678..cfd65304be60ae8937449a8c1e4cf77406c11305 100644 (file)
@@ -63,7 +63,7 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
        struct sock *sk;
 
        sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
-               struct in6_addr *addr = inet6_rcv_saddr(sk);
+               const struct in6_addr *addr = inet6_rcv_saddr(sk);
                struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
 
                if (l2tp == NULL)
@@ -331,7 +331,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        rcu_read_unlock();
 
        inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
-       np->rcv_saddr = addr->l2tp_addr;
+       sk->sk_v6_rcv_saddr = addr->l2tp_addr;
        np->saddr = addr->l2tp_addr;
 
        l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
@@ -421,14 +421,14 @@ static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
                if (!lsk->peer_conn_id)
                        return -ENOTCONN;
                lsa->l2tp_conn_id = lsk->peer_conn_id;
-               lsa->l2tp_addr = np->daddr;
+               lsa->l2tp_addr = sk->sk_v6_daddr;
                if (np->sndflow)
                        lsa->l2tp_flowinfo = np->flow_label;
        } else {
-               if (ipv6_addr_any(&np->rcv_saddr))
+               if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
                        lsa->l2tp_addr = np->saddr;
                else
-                       lsa->l2tp_addr = np->rcv_saddr;
+                       lsa->l2tp_addr = sk->sk_v6_rcv_saddr;
 
                lsa->l2tp_conn_id = lsk->conn_id;
        }
@@ -537,8 +537,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
                 * sk->sk_dst_cache.
                 */
                if (sk->sk_state == TCP_ESTABLISHED &&
-                   ipv6_addr_equal(daddr, &np->daddr))
-                       daddr = &np->daddr;
+                   ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
+                       daddr = &sk->sk_v6_daddr;
 
                if (addr_len >= sizeof(struct sockaddr_in6) &&
                    lsa->l2tp_scope_id &&
@@ -548,7 +548,7 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
                if (sk->sk_state != TCP_ESTABLISHED)
                        return -EDESTADDRREQ;
 
-               daddr = &np->daddr;
+               daddr = &sk->sk_v6_daddr;
                fl6.flowlabel = np->flow_label;
        }
 
index 0825ff26e113f3e2acdbbe2b88bd0f4fbfd92410..be446d517bc96641d413dc0bba09d9150398a81e 100644 (file)
@@ -306,8 +306,8 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
                if (np) {
                        if (nla_put(skb, L2TP_ATTR_IP6_SADDR, sizeof(np->saddr),
                                    &np->saddr) ||
-                           nla_put(skb, L2TP_ATTR_IP6_DADDR, sizeof(np->daddr),
-                                   &np->daddr))
+                           nla_put(skb, L2TP_ATTR_IP6_DADDR, sizeof(sk->sk_v6_daddr),
+                                   &sk->sk_v6_daddr))
                                goto nla_put_failure;
                } else
 #endif
index 5ebee2ded9e9a8f40d2a282600b47f6cb088a910..ffda81ef1a709df605ef128ebc866cb00c75fe32 100644 (file)
@@ -353,7 +353,9 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
                goto error_put_sess_tun;
        }
 
+       local_bh_disable();
        l2tp_xmit_skb(session, skb, session->hdr_len);
+       local_bh_enable();
 
        sock_put(ps->tunnel_sock);
        sock_put(sk);
@@ -422,7 +424,9 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        skb->data[0] = ppph[0];
        skb->data[1] = ppph[1];
 
+       local_bh_disable();
        l2tp_xmit_skb(session, skb, session->hdr_len);
+       local_bh_enable();
 
        sock_put(sk_tun);
        sock_put(sk);
@@ -906,8 +910,8 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
 #if IS_ENABLED(CONFIG_IPV6)
        } else if ((tunnel->version == 2) &&
                   (tunnel->sock->sk_family == AF_INET6)) {
-               struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
                struct sockaddr_pppol2tpin6 sp;
+
                len = sizeof(sp);
                memset(&sp, 0, len);
                sp.sa_family    = AF_PPPOX;
@@ -920,13 +924,13 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
                sp.pppol2tp.d_session = session->peer_session_id;
                sp.pppol2tp.addr.sin6_family = AF_INET6;
                sp.pppol2tp.addr.sin6_port = inet->inet_dport;
-               memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr,
-                      sizeof(np->daddr));
+               memcpy(&sp.pppol2tp.addr.sin6_addr, &tunnel->sock->sk_v6_daddr,
+                      sizeof(tunnel->sock->sk_v6_daddr));
                memcpy(uaddr, &sp, len);
        } else if ((tunnel->version == 3) &&
                   (tunnel->sock->sk_family == AF_INET6)) {
-               struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
                struct sockaddr_pppol2tpv3in6 sp;
+
                len = sizeof(sp);
                memset(&sp, 0, len);
                sp.sa_family    = AF_PPPOX;
@@ -939,8 +943,8 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
                sp.pppol2tp.d_session = session->peer_session_id;
                sp.pppol2tp.addr.sin6_family = AF_INET6;
                sp.pppol2tp.addr.sin6_port = inet->inet_dport;
-               memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr,
-                      sizeof(np->daddr));
+               memcpy(&sp.pppol2tp.addr.sin6_addr, &tunnel->sock->sk_v6_daddr,
+                      sizeof(tunnel->sock->sk_v6_daddr));
                memcpy(uaddr, &sp, len);
 #endif
        } else if (tunnel->version == 3) {
index 2e7855a1b10d17198adcfec98d550838daef80bd..b0a651cc389fdab807b2a8b800f63144d0eaeb2f 100644 (file)
@@ -2865,30 +2865,43 @@ void ieee80211_csa_finalize_work(struct work_struct *work)
        if (!ieee80211_sdata_running(sdata))
                return;
 
-       if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
-               return;
-
        sdata->radar_required = sdata->csa_radar_required;
        err = ieee80211_vif_change_channel(sdata, &local->csa_chandef,
                                           &changed);
        if (WARN_ON(err < 0))
                return;
 
-       err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon);
-       if (err < 0)
-               return;
+       if (!local->use_chanctx) {
+               local->_oper_chandef = local->csa_chandef;
+               ieee80211_hw_config(local, 0);
+       }
 
-       changed |= err;
-       kfree(sdata->u.ap.next_beacon);
-       sdata->u.ap.next_beacon = NULL;
+       ieee80211_bss_info_change_notify(sdata, changed);
+
+       switch (sdata->vif.type) {
+       case NL80211_IFTYPE_AP:
+               err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon);
+               if (err < 0)
+                       return;
+               changed |= err;
+               kfree(sdata->u.ap.next_beacon);
+               sdata->u.ap.next_beacon = NULL;
+
+               ieee80211_bss_info_change_notify(sdata, err);
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               ieee80211_ibss_finish_csa(sdata);
+               break;
+       default:
+               WARN_ON(1);
+               return;
+       }
        sdata->vif.csa_active = false;
 
        ieee80211_wake_queues_by_reason(&sdata->local->hw,
                                        IEEE80211_MAX_QUEUE_MAP,
                                        IEEE80211_QUEUE_STOP_REASON_CSA);
 
-       ieee80211_bss_info_change_notify(sdata, changed);
-
        cfg80211_ch_switch_notify(sdata->dev, &local->csa_chandef);
 }
 
@@ -2936,20 +2949,56 @@ static int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
        if (sdata->vif.csa_active)
                return -EBUSY;
 
-       /* only handle AP for now. */
        switch (sdata->vif.type) {
        case NL80211_IFTYPE_AP:
+               sdata->csa_counter_offset_beacon =
+                       params->counter_offset_beacon;
+               sdata->csa_counter_offset_presp = params->counter_offset_presp;
+               sdata->u.ap.next_beacon =
+                       cfg80211_beacon_dup(&params->beacon_after);
+               if (!sdata->u.ap.next_beacon)
+                       return -ENOMEM;
+
+               err = ieee80211_assign_beacon(sdata, &params->beacon_csa);
+               if (err < 0) {
+                       kfree(sdata->u.ap.next_beacon);
+                       return err;
+               }
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               if (!sdata->vif.bss_conf.ibss_joined)
+                       return -EINVAL;
+
+               if (params->chandef.width != sdata->u.ibss.chandef.width)
+                       return -EINVAL;
+
+               switch (params->chandef.width) {
+               case NL80211_CHAN_WIDTH_40:
+                       if (cfg80211_get_chandef_type(&params->chandef) !=
+                           cfg80211_get_chandef_type(&sdata->u.ibss.chandef))
+                               return -EINVAL;
+               case NL80211_CHAN_WIDTH_5:
+               case NL80211_CHAN_WIDTH_10:
+               case NL80211_CHAN_WIDTH_20_NOHT:
+               case NL80211_CHAN_WIDTH_20:
+                       break;
+               default:
+                       return -EINVAL;
+               }
+
+               /* changes into another band are not supported */
+               if (sdata->u.ibss.chandef.chan->band !=
+                   params->chandef.chan->band)
+                       return -EINVAL;
+
+               err = ieee80211_ibss_csa_beacon(sdata, params);
+               if (err < 0)
+                       return err;
                break;
        default:
                return -EOPNOTSUPP;
        }
 
-       sdata->u.ap.next_beacon = cfg80211_beacon_dup(&params->beacon_after);
-       if (!sdata->u.ap.next_beacon)
-               return -ENOMEM;
-
-       sdata->csa_counter_offset_beacon = params->counter_offset_beacon;
-       sdata->csa_counter_offset_presp = params->counter_offset_presp;
        sdata->csa_radar_required = params->radar_required;
 
        if (params->block_tx)
@@ -2957,10 +3006,6 @@ static int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
                                IEEE80211_MAX_QUEUE_MAP,
                                IEEE80211_QUEUE_STOP_REASON_CSA);
 
-       err = ieee80211_assign_beacon(sdata, &params->beacon_csa);
-       if (err < 0)
-               return err;
-
        local->csa_chandef = params->chandef;
        sdata->vif.csa_active = true;
 
@@ -3014,7 +3059,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                        need_offchan = true;
                if (!ieee80211_is_action(mgmt->frame_control) ||
                    mgmt->u.action.category == WLAN_CATEGORY_PUBLIC ||
-                   mgmt->u.action.category == WLAN_CATEGORY_SELF_PROTECTED)
+                   mgmt->u.action.category == WLAN_CATEGORY_SELF_PROTECTED ||
+                   mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT)
                        break;
                rcu_read_lock();
                sta = sta_info_get(sdata, mgmt->da);
@@ -3518,7 +3564,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
                return -EINVAL;
        }
        band = chanctx_conf->def.chan->band;
-       sta = sta_info_get(sdata, peer);
+       sta = sta_info_get_bss(sdata, peer);
        if (sta) {
                qos = test_sta_flag(sta, WLAN_STA_WME);
        } else {
index 3a4764b2869efffdbcc3f90a363cf3f8b095496c..03ba6b5c5373b373d47956518fafeaf1338b2ad0 100644 (file)
@@ -453,11 +453,6 @@ int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
        chanctx_changed |= IEEE80211_CHANCTX_CHANGE_CHANNEL;
        drv_change_chanctx(local, ctx, chanctx_changed);
 
-       if (!local->use_chanctx) {
-               local->_oper_chandef = *chandef;
-               ieee80211_hw_config(local, 0);
-       }
-
        ieee80211_recalc_chanctx_chantype(local, ctx);
        ieee80211_recalc_smps_chanctx(local, ctx);
        ieee80211_recalc_radar_chanctx(local, ctx);
index b0e32d6281146231061d34f63088db6b90106fc1..5c090e41d9bbf1ea379307c8ecd6a648cb33f0ac 100644 (file)
@@ -103,54 +103,57 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
        if (!buf)
                return 0;
 
-       sf += snprintf(buf, mxln - sf, "0x%x\n", local->hw.flags);
+       sf += scnprintf(buf, mxln - sf, "0x%x\n", local->hw.flags);
        if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
-               sf += snprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n");
+               sf += scnprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n");
        if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
-               sf += snprintf(buf + sf, mxln - sf, "RX_INCLUDES_FCS\n");
+               sf += scnprintf(buf + sf, mxln - sf, "RX_INCLUDES_FCS\n");
        if (local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)
-               sf += snprintf(buf + sf, mxln - sf,
-                              "HOST_BCAST_PS_BUFFERING\n");
+               sf += scnprintf(buf + sf, mxln - sf,
+                               "HOST_BCAST_PS_BUFFERING\n");
        if (local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE)
-               sf += snprintf(buf + sf, mxln - sf,
-                              "2GHZ_SHORT_SLOT_INCAPABLE\n");
+               sf += scnprintf(buf + sf, mxln - sf,
+                               "2GHZ_SHORT_SLOT_INCAPABLE\n");
        if (local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE)
-               sf += snprintf(buf + sf, mxln - sf,
-                              "2GHZ_SHORT_PREAMBLE_INCAPABLE\n");
+               sf += scnprintf(buf + sf, mxln - sf,
+                               "2GHZ_SHORT_PREAMBLE_INCAPABLE\n");
        if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
-               sf += snprintf(buf + sf, mxln - sf, "SIGNAL_UNSPEC\n");
+               sf += scnprintf(buf + sf, mxln - sf, "SIGNAL_UNSPEC\n");
        if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
-               sf += snprintf(buf + sf, mxln - sf, "SIGNAL_DBM\n");
+               sf += scnprintf(buf + sf, mxln - sf, "SIGNAL_DBM\n");
        if (local->hw.flags & IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC)
-               sf += snprintf(buf + sf, mxln - sf, "NEED_DTIM_BEFORE_ASSOC\n");
+               sf += scnprintf(buf + sf, mxln - sf,
+                               "NEED_DTIM_BEFORE_ASSOC\n");
        if (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)
-               sf += snprintf(buf + sf, mxln - sf, "SPECTRUM_MGMT\n");
+               sf += scnprintf(buf + sf, mxln - sf, "SPECTRUM_MGMT\n");
        if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)
-               sf += snprintf(buf + sf, mxln - sf, "AMPDU_AGGREGATION\n");
+               sf += scnprintf(buf + sf, mxln - sf, "AMPDU_AGGREGATION\n");
        if (local->hw.flags & IEEE80211_HW_SUPPORTS_PS)
-               sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PS\n");
+               sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_PS\n");
        if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
-               sf += snprintf(buf + sf, mxln - sf, "PS_NULLFUNC_STACK\n");
+               sf += scnprintf(buf + sf, mxln - sf, "PS_NULLFUNC_STACK\n");
        if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
-               sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n");
+               sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n");
        if (local->hw.flags & IEEE80211_HW_MFP_CAPABLE)
-               sf += snprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n");
+               sf += scnprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n");
        if (local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS)
-               sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_STATIC_SMPS\n");
+               sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_STATIC_SMPS\n");
        if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS)
-               sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_SMPS\n");
+               sf += scnprintf(buf + sf, mxln - sf,
+                               "SUPPORTS_DYNAMIC_SMPS\n");
        if (local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
-               sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_UAPSD\n");
+               sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_UAPSD\n");
        if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
-               sf += snprintf(buf + sf, mxln - sf, "REPORTS_TX_ACK_STATUS\n");
+               sf += scnprintf(buf + sf, mxln - sf,
+                               "REPORTS_TX_ACK_STATUS\n");
        if (local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
-               sf += snprintf(buf + sf, mxln - sf, "CONNECTION_MONITOR\n");
+               sf += scnprintf(buf + sf, mxln - sf, "CONNECTION_MONITOR\n");
        if (local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK)
-               sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
+               sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
        if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
-               sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
+               sf += scnprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
        if (local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW)
-               sf += snprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n");
+               sf += scnprintf(buf + sf, mxln - sf, "TX_AMPDU_SETUP_IN_HW\n");
 
        rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
        kfree(buf);
index b3ea11f3d526962ddd8190587f82d8d84134067c..5d03c47c0a4cb4fa60e861750956c64bb8c90337 100644 (file)
@@ -1085,4 +1085,31 @@ drv_channel_switch_beacon(struct ieee80211_sub_if_data *sdata,
        }
 }
 
+static inline int drv_join_ibss(struct ieee80211_local *local,
+                               struct ieee80211_sub_if_data *sdata)
+{
+       int ret = 0;
+
+       might_sleep();
+       check_sdata_in_driver(sdata);
+
+       trace_drv_join_ibss(local, sdata, &sdata->vif.bss_conf);
+       if (local->ops->join_ibss)
+               ret = local->ops->join_ibss(&local->hw, &sdata->vif);
+       trace_drv_return_int(local, ret);
+       return ret;
+}
+
+static inline void drv_leave_ibss(struct ieee80211_local *local,
+                                 struct ieee80211_sub_if_data *sdata)
+{
+       might_sleep();
+       check_sdata_in_driver(sdata);
+
+       trace_drv_leave_ibss(local, sdata);
+       if (local->ops->leave_ibss)
+               local->ops->leave_ibss(&local->hw, &sdata->vif);
+       trace_drv_return_void(local);
+}
+
 #endif /* __MAC80211_DRIVER_OPS */
index a12afe77bb26b5037fe58d6624bbc87939c30cde..21a0b8835cb31d4a27dda9b2243038c2280b8f6a 100644 (file)
@@ -39,7 +39,8 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
                           const int beacon_int, const u32 basic_rates,
                           const u16 capability, u64 tsf,
                           struct cfg80211_chan_def *chandef,
-                          bool *have_higher_than_11mbit)
+                          bool *have_higher_than_11mbit,
+                          struct cfg80211_csa_settings *csa_settings)
 {
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
        struct ieee80211_local *local = sdata->local;
@@ -59,6 +60,7 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
                    2 + 8 /* max Supported Rates */ +
                    3 /* max DS params */ +
                    4 /* IBSS params */ +
+                   5 /* Channel Switch Announcement */ +
                    2 + (IEEE80211_MAX_SUPP_RATES - 8) +
                    2 + sizeof(struct ieee80211_ht_cap) +
                    2 + sizeof(struct ieee80211_ht_operation) +
@@ -135,6 +137,16 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
        *pos++ = 0;
        *pos++ = 0;
 
+       if (csa_settings) {
+               *pos++ = WLAN_EID_CHANNEL_SWITCH;
+               *pos++ = 3;
+               *pos++ = csa_settings->block_tx ? 1 : 0;
+               *pos++ = ieee80211_frequency_to_channel(
+                               csa_settings->chandef.chan->center_freq);
+               sdata->csa_counter_offset_beacon = (pos - presp->head);
+               *pos++ = csa_settings->count;
+       }
+
        /* put the remaining rates in WLAN_EID_EXT_SUPP_RATES */
        if (rates_n > 8) {
                *pos++ = WLAN_EID_EXT_SUPP_RATES;
@@ -217,6 +229,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        struct beacon_data *presp;
        enum nl80211_bss_scan_width scan_width;
        bool have_higher_than_11mbit;
+       int err;
 
        sdata_assert_lock(sdata);
 
@@ -235,6 +248,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
                ieee80211_bss_info_change_notify(sdata,
                                                 BSS_CHANGED_IBSS |
                                                 BSS_CHANGED_BEACON_ENABLED);
+               drv_leave_ibss(local, sdata);
        }
 
        presp = rcu_dereference_protected(ifibss->presp,
@@ -276,7 +290,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
 
        presp = ieee80211_ibss_build_presp(sdata, beacon_int, basic_rates,
                                           capability, tsf, &chandef,
-                                          &have_higher_than_11mbit);
+                                          &have_higher_than_11mbit, NULL);
        if (!presp)
                return;
 
@@ -317,11 +331,26 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
        else
                sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
 
+       ieee80211_set_wmm_default(sdata, true);
+
        sdata->vif.bss_conf.ibss_joined = true;
        sdata->vif.bss_conf.ibss_creator = creator;
-       ieee80211_bss_info_change_notify(sdata, bss_change);
 
-       ieee80211_set_wmm_default(sdata, true);
+       err = drv_join_ibss(local, sdata);
+       if (err) {
+               sdata->vif.bss_conf.ibss_joined = false;
+               sdata->vif.bss_conf.ibss_creator = false;
+               sdata->vif.bss_conf.enable_beacon = false;
+               sdata->vif.bss_conf.ssid_len = 0;
+               RCU_INIT_POINTER(ifibss->presp, NULL);
+               kfree_rcu(presp, rcu_head);
+               ieee80211_vif_release_channel(sdata);
+               sdata_info(sdata, "Failed to join IBSS, driver failure: %d\n",
+                          err);
+               return;
+       }
+
+       ieee80211_bss_info_change_notify(sdata, bss_change);
 
        ifibss->state = IEEE80211_IBSS_MLME_JOINED;
        mod_timer(&ifibss->timer,
@@ -416,6 +445,169 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
                                  tsf, false);
 }
 
+static int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
+                                    struct cfg80211_csa_settings *csa_settings)
+{
+       struct sk_buff *skb;
+       struct ieee80211_mgmt *mgmt;
+       struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+       struct ieee80211_local *local = sdata->local;
+       int freq;
+       int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.chan_switch) +
+                     sizeof(mgmt->u.action.u.chan_switch);
+       u8 *pos;
+
+       skb = dev_alloc_skb(local->tx_headroom + hdr_len +
+                           5 + /* channel switch announcement element */
+                           3); /* secondary channel offset element */
+       if (!skb)
+               return -1;
+
+       skb_reserve(skb, local->tx_headroom);
+       mgmt = (struct ieee80211_mgmt *)skb_put(skb, hdr_len);
+       memset(mgmt, 0, hdr_len);
+       mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+                                         IEEE80211_STYPE_ACTION);
+
+       eth_broadcast_addr(mgmt->da);
+       memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+       memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN);
+       mgmt->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT;
+       mgmt->u.action.u.chan_switch.action_code = WLAN_ACTION_SPCT_CHL_SWITCH;
+       pos = skb_put(skb, 5);
+       *pos++ = WLAN_EID_CHANNEL_SWITCH;                       /* EID */
+       *pos++ = 3;                                             /* IE length */
+       *pos++ = csa_settings->block_tx ? 1 : 0;                /* CSA mode */
+       freq = csa_settings->chandef.chan->center_freq;
+       *pos++ = ieee80211_frequency_to_channel(freq);          /* channel */
+       *pos++ = csa_settings->count;                           /* count */
+
+       if (csa_settings->chandef.width == NL80211_CHAN_WIDTH_40) {
+               enum nl80211_channel_type ch_type;
+
+               skb_put(skb, 3);
+               *pos++ = WLAN_EID_SECONDARY_CHANNEL_OFFSET;     /* EID */
+               *pos++ = 1;                                     /* IE length */
+               ch_type = cfg80211_get_chandef_type(&csa_settings->chandef);
+               if (ch_type == NL80211_CHAN_HT40PLUS)
+                       *pos++ = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+               else
+                       *pos++ = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+       }
+
+       ieee80211_tx_skb(sdata, skb);
+       return 0;
+}
+
+int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata,
+                             struct cfg80211_csa_settings *csa_settings)
+{
+       struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+       struct beacon_data *presp, *old_presp;
+       struct cfg80211_bss *cbss;
+       const struct cfg80211_bss_ies *ies;
+       u16 capability;
+       u64 tsf;
+       int ret = 0;
+
+       sdata_assert_lock(sdata);
+
+       capability = WLAN_CAPABILITY_IBSS;
+
+       if (ifibss->privacy)
+               capability |= WLAN_CAPABILITY_PRIVACY;
+
+       cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan,
+                               ifibss->bssid, ifibss->ssid,
+                               ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
+                               WLAN_CAPABILITY_PRIVACY,
+                               capability);
+
+       if (WARN_ON(!cbss)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       rcu_read_lock();
+       ies = rcu_dereference(cbss->ies);
+       tsf = ies->tsf;
+       rcu_read_unlock();
+       cfg80211_put_bss(sdata->local->hw.wiphy, cbss);
+
+       old_presp = rcu_dereference_protected(ifibss->presp,
+                                         lockdep_is_held(&sdata->wdev.mtx));
+
+       presp = ieee80211_ibss_build_presp(sdata,
+                                          sdata->vif.bss_conf.beacon_int,
+                                          sdata->vif.bss_conf.basic_rates,
+                                          capability, tsf, &ifibss->chandef,
+                                          NULL, csa_settings);
+       if (!presp) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       rcu_assign_pointer(ifibss->presp, presp);
+       if (old_presp)
+               kfree_rcu(old_presp, rcu_head);
+
+       /* it might not send the beacon for a while. send an action frame
+        * immediately to announce the channel switch.
+        */
+       if (csa_settings)
+               ieee80211_send_action_csa(sdata, csa_settings);
+
+       ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+ out:
+       return ret;
+}
+
+int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+       struct cfg80211_bss *cbss;
+       int err;
+       u16 capability;
+
+       sdata_lock(sdata);
+       /* update cfg80211 bss information with the new channel */
+       if (!is_zero_ether_addr(ifibss->bssid)) {
+               capability = WLAN_CAPABILITY_IBSS;
+
+               if (ifibss->privacy)
+                       capability |= WLAN_CAPABILITY_PRIVACY;
+
+               cbss = cfg80211_get_bss(sdata->local->hw.wiphy,
+                                       ifibss->chandef.chan,
+                                       ifibss->bssid, ifibss->ssid,
+                                       ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
+                                       WLAN_CAPABILITY_PRIVACY,
+                                       capability);
+               /* XXX: should not really modify cfg80211 data */
+               if (cbss) {
+                       cbss->channel = sdata->local->csa_chandef.chan;
+                       cfg80211_put_bss(sdata->local->hw.wiphy, cbss);
+               }
+       }
+
+       ifibss->chandef = sdata->local->csa_chandef;
+
+       /* generate the beacon */
+       err = ieee80211_ibss_csa_beacon(sdata, NULL);
+       sdata_unlock(sdata);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+
+       cancel_work_sync(&ifibss->csa_connection_drop_work);
+}
+
 static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta)
        __acquires(RCU)
 {
@@ -499,6 +691,295 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid,
        return ieee80211_ibss_finish_sta(sta);
 }
 
+static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_local *local = sdata->local;
+       int active = 0;
+       struct sta_info *sta;
+
+       sdata_assert_lock(sdata);
+
+       rcu_read_lock();
+
+       list_for_each_entry_rcu(sta, &local->sta_list, list) {
+               if (sta->sdata == sdata &&
+                   time_after(sta->last_rx + IEEE80211_IBSS_MERGE_INTERVAL,
+                              jiffies)) {
+                       active++;
+                       break;
+               }
+       }
+
+       rcu_read_unlock();
+
+       return active;
+}
+
+static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+       struct ieee80211_local *local = sdata->local;
+       struct cfg80211_bss *cbss;
+       struct beacon_data *presp;
+       struct sta_info *sta;
+       int active_ibss;
+       u16 capability;
+
+       active_ibss = ieee80211_sta_active_ibss(sdata);
+
+       if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
+               capability = WLAN_CAPABILITY_IBSS;
+
+               if (ifibss->privacy)
+                       capability |= WLAN_CAPABILITY_PRIVACY;
+
+               cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->chandef.chan,
+                                       ifibss->bssid, ifibss->ssid,
+                                       ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
+                                       WLAN_CAPABILITY_PRIVACY,
+                                       capability);
+
+               if (cbss) {
+                       cfg80211_unlink_bss(local->hw.wiphy, cbss);
+                       cfg80211_put_bss(sdata->local->hw.wiphy, cbss);
+               }
+       }
+
+       ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
+
+       sta_info_flush(sdata);
+
+       spin_lock_bh(&ifibss->incomplete_lock);
+       while (!list_empty(&ifibss->incomplete_stations)) {
+               sta = list_first_entry(&ifibss->incomplete_stations,
+                                      struct sta_info, list);
+               list_del(&sta->list);
+               spin_unlock_bh(&ifibss->incomplete_lock);
+
+               sta_info_free(local, sta);
+               spin_lock_bh(&ifibss->incomplete_lock);
+       }
+       spin_unlock_bh(&ifibss->incomplete_lock);
+
+       netif_carrier_off(sdata->dev);
+
+       sdata->vif.bss_conf.ibss_joined = false;
+       sdata->vif.bss_conf.ibss_creator = false;
+       sdata->vif.bss_conf.enable_beacon = false;
+       sdata->vif.bss_conf.ssid_len = 0;
+
+       /* remove beacon */
+       presp = rcu_dereference_protected(ifibss->presp,
+                                         lockdep_is_held(&sdata->wdev.mtx));
+       RCU_INIT_POINTER(sdata->u.ibss.presp, NULL);
+       if (presp)
+               kfree_rcu(presp, rcu_head);
+
+       clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
+       ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
+                                               BSS_CHANGED_IBSS);
+       drv_leave_ibss(local, sdata);
+       ieee80211_vif_release_channel(sdata);
+}
+
+static void ieee80211_csa_connection_drop_work(struct work_struct *work)
+{
+       struct ieee80211_sub_if_data *sdata =
+               container_of(work, struct ieee80211_sub_if_data,
+                            u.ibss.csa_connection_drop_work);
+
+       ieee80211_ibss_disconnect(sdata);
+       synchronize_rcu();
+       skb_queue_purge(&sdata->skb_queue);
+
+       /* trigger a scan to find another IBSS network to join */
+       ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+}
+
+static bool
+ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
+                                 struct ieee802_11_elems *elems,
+                                 bool beacon)
+{
+       struct cfg80211_csa_settings params;
+       struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+       struct ieee80211_chanctx_conf *chanctx_conf;
+       struct ieee80211_chanctx *chanctx;
+       enum nl80211_channel_type ch_type;
+       int err, num_chanctx;
+       u32 sta_flags;
+       u8 mode;
+
+       if (sdata->vif.csa_active)
+               return true;
+
+       if (!sdata->vif.bss_conf.ibss_joined)
+               return false;
+
+       sta_flags = IEEE80211_STA_DISABLE_VHT;
+       switch (ifibss->chandef.width) {
+       case NL80211_CHAN_WIDTH_5:
+       case NL80211_CHAN_WIDTH_10:
+       case NL80211_CHAN_WIDTH_20_NOHT:
+               sta_flags |= IEEE80211_STA_DISABLE_HT;
+               /* fall through */
+       case NL80211_CHAN_WIDTH_20:
+               sta_flags |= IEEE80211_STA_DISABLE_40MHZ;
+               break;
+       default:
+               break;
+       }
+
+       memset(&params, 0, sizeof(params));
+       err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon,
+                                          ifibss->chandef.chan->band,
+                                          sta_flags, ifibss->bssid,
+                                          &params.count, &mode,
+                                          &params.chandef);
+
+       /* can't switch to destination channel, fail */
+       if (err < 0)
+               goto disconnect;
+
+       /* did not contain a CSA */
+       if (err)
+               return false;
+
+       if (ifibss->chandef.chan->band != params.chandef.chan->band)
+               goto disconnect;
+
+       switch (ifibss->chandef.width) {
+       case NL80211_CHAN_WIDTH_20_NOHT:
+       case NL80211_CHAN_WIDTH_20:
+       case NL80211_CHAN_WIDTH_40:
+               /* keep our current HT mode (HT20/HT40+/HT40-), even if
+                * another mode  has been announced. The mode is not adopted
+                * within the beacon while doing CSA and we should therefore
+                * keep the mode which we announce.
+                */
+               ch_type = cfg80211_get_chandef_type(&ifibss->chandef);
+               cfg80211_chandef_create(&params.chandef, params.chandef.chan,
+                                       ch_type);
+               break;
+       case NL80211_CHAN_WIDTH_5:
+       case NL80211_CHAN_WIDTH_10:
+               if (params.chandef.width != ifibss->chandef.width) {
+                       sdata_info(sdata,
+                                  "IBSS %pM received channel switch from incompatible channel width (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
+                                  ifibss->bssid,
+                                  params.chandef.chan->center_freq,
+                                  params.chandef.width,
+                                  params.chandef.center_freq1,
+                                  params.chandef.center_freq2);
+                       goto disconnect;
+               }
+               break;
+       default:
+               /* should not happen, sta_flags should prevent VHT modes. */
+               WARN_ON(1);
+               goto disconnect;
+       }
+
+       if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, &params.chandef,
+                                    IEEE80211_CHAN_DISABLED)) {
+               sdata_info(sdata,
+                          "IBSS %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
+                          ifibss->bssid,
+                          params.chandef.chan->center_freq,
+                          params.chandef.width,
+                          params.chandef.center_freq1,
+                          params.chandef.center_freq2);
+               goto disconnect;
+       }
+
+       err = cfg80211_chandef_dfs_required(sdata->local->hw.wiphy,
+                                           &params.chandef);
+       if (err < 0)
+               goto disconnect;
+       if (err) {
+               params.radar_required = true;
+
+               /* TODO: IBSS-DFS not (yet) supported, disconnect. */
+               goto disconnect;
+       }
+
+       rcu_read_lock();
+       chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+       if (!chanctx_conf) {
+               rcu_read_unlock();
+               goto disconnect;
+       }
+
+       /* don't handle for multi-VIF cases */
+       chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf);
+       if (chanctx->refcount > 1) {
+               rcu_read_unlock();
+               goto disconnect;
+       }
+       num_chanctx = 0;
+       list_for_each_entry_rcu(chanctx, &sdata->local->chanctx_list, list)
+               num_chanctx++;
+
+       if (num_chanctx > 1) {
+               rcu_read_unlock();
+               goto disconnect;
+       }
+       rcu_read_unlock();
+
+       /* all checks done, now perform the channel switch. */
+       ibss_dbg(sdata,
+                "received channel switch announcement to go to channel %d MHz\n",
+                params.chandef.chan->center_freq);
+
+       params.block_tx = !!mode;
+
+       ieee80211_ibss_csa_beacon(sdata, &params);
+       sdata->csa_radar_required = params.radar_required;
+
+       if (params.block_tx)
+               ieee80211_stop_queues_by_reason(&sdata->local->hw,
+                               IEEE80211_MAX_QUEUE_MAP,
+                               IEEE80211_QUEUE_STOP_REASON_CSA);
+
+       sdata->local->csa_chandef = params.chandef;
+       sdata->vif.csa_active = true;
+
+       ieee80211_bss_info_change_notify(sdata, err);
+       drv_channel_switch_beacon(sdata, &params.chandef);
+
+       return true;
+disconnect:
+       ibss_dbg(sdata, "Can't handle channel switch, disconnect\n");
+       ieee80211_queue_work(&sdata->local->hw,
+                            &ifibss->csa_connection_drop_work);
+
+       return true;
+}
+
+static void
+ieee80211_rx_mgmt_spectrum_mgmt(struct ieee80211_sub_if_data *sdata,
+                               struct ieee80211_mgmt *mgmt, size_t len,
+                               struct ieee80211_rx_status *rx_status,
+                               struct ieee802_11_elems *elems)
+{
+       int required_len;
+
+       if (len < IEEE80211_MIN_ACTION_SIZE + 1)
+               return;
+
+       /* CSA is the only action we handle for now */
+       if (mgmt->u.action.u.measurement.action_code !=
+           WLAN_ACTION_SPCT_CHL_SWITCH)
+               return;
+
+       required_len = IEEE80211_MIN_ACTION_SIZE +
+                      sizeof(mgmt->u.action.u.chan_switch);
+       if (len < required_len)
+               return;
+
+       ieee80211_ibss_process_chanswitch(sdata, elems, false);
+}
+
 static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata,
                                          struct ieee80211_mgmt *mgmt,
                                          size_t len)
@@ -661,10 +1142,6 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
 
        /* check if we need to merge IBSS */
 
-       /* we use a fixed BSSID */
-       if (sdata->u.ibss.fixed_bssid)
-               goto put_bss;
-
        /* not an IBSS */
        if (!(cbss->capability & WLAN_CAPABILITY_IBSS))
                goto put_bss;
@@ -680,10 +1157,18 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                                sdata->u.ibss.ssid_len))
                goto put_bss;
 
+       /* process channel switch */
+       if (ieee80211_ibss_process_chanswitch(sdata, elems, true))
+               goto put_bss;
+
        /* same BSSID */
        if (ether_addr_equal(cbss->bssid, sdata->u.ibss.bssid))
                goto put_bss;
 
+       /* we use a fixed BSSID */
+       if (sdata->u.ibss.fixed_bssid)
+               goto put_bss;
+
        if (ieee80211_have_rx_timestamp(rx_status)) {
                /* time when timestamp field was received */
                rx_timestamp =
@@ -775,30 +1260,6 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
        ieee80211_queue_work(&local->hw, &sdata->work);
 }
 
-static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
-{
-       struct ieee80211_local *local = sdata->local;
-       int active = 0;
-       struct sta_info *sta;
-
-       sdata_assert_lock(sdata);
-
-       rcu_read_lock();
-
-       list_for_each_entry_rcu(sta, &local->sta_list, list) {
-               if (sta->sdata == sdata &&
-                   time_after(sta->last_rx + IEEE80211_IBSS_MERGE_INTERVAL,
-                              jiffies)) {
-                       active++;
-                       break;
-               }
-       }
-
-       rcu_read_unlock();
-
-       return active;
-}
-
 static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_local *local = sdata->local;
@@ -1076,6 +1537,8 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_rx_status *rx_status;
        struct ieee80211_mgmt *mgmt;
        u16 fc;
+       struct ieee802_11_elems elems;
+       int ies_len;
 
        rx_status = IEEE80211_SKB_RXCB(skb);
        mgmt = (struct ieee80211_mgmt *) skb->data;
@@ -1101,6 +1564,27 @@ void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
        case IEEE80211_STYPE_DEAUTH:
                ieee80211_rx_mgmt_deauth_ibss(sdata, mgmt, skb->len);
                break;
+       case IEEE80211_STYPE_ACTION:
+               switch (mgmt->u.action.category) {
+               case WLAN_CATEGORY_SPECTRUM_MGMT:
+                       ies_len = skb->len -
+                                 offsetof(struct ieee80211_mgmt,
+                                          u.action.u.chan_switch.variable);
+
+                       if (ies_len < 0)
+                               break;
+
+                       ieee802_11_parse_elems(
+                               mgmt->u.action.u.chan_switch.variable,
+                               ies_len, true, &elems);
+
+                       if (elems.parse_error)
+                               break;
+
+                       ieee80211_rx_mgmt_spectrum_mgmt(sdata, mgmt, skb->len,
+                                                       rx_status, &elems);
+                       break;
+               }
        }
 
  mgmt_out:
@@ -1167,6 +1651,8 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata)
                    (unsigned long) sdata);
        INIT_LIST_HEAD(&ifibss->incomplete_stations);
        spin_lock_init(&ifibss->incomplete_lock);
+       INIT_WORK(&ifibss->csa_connection_drop_work,
+                 ieee80211_csa_connection_drop_work);
 }
 
 /* scan finished notification */
@@ -1265,73 +1751,19 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
 int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
-       struct ieee80211_local *local = sdata->local;
-       struct cfg80211_bss *cbss;
-       u16 capability;
-       int active_ibss;
-       struct sta_info *sta;
-       struct beacon_data *presp;
-
-       active_ibss = ieee80211_sta_active_ibss(sdata);
-
-       if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
-               capability = WLAN_CAPABILITY_IBSS;
-
-               if (ifibss->privacy)
-                       capability |= WLAN_CAPABILITY_PRIVACY;
-
-               cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->chandef.chan,
-                                       ifibss->bssid, ifibss->ssid,
-                                       ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
-                                       WLAN_CAPABILITY_PRIVACY,
-                                       capability);
 
-               if (cbss) {
-                       cfg80211_unlink_bss(local->hw.wiphy, cbss);
-                       cfg80211_put_bss(local->hw.wiphy, cbss);
-               }
-       }
-
-       ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
-       memset(ifibss->bssid, 0, ETH_ALEN);
+       ieee80211_ibss_disconnect(sdata);
        ifibss->ssid_len = 0;
-
-       sta_info_flush(sdata);
-
-       spin_lock_bh(&ifibss->incomplete_lock);
-       while (!list_empty(&ifibss->incomplete_stations)) {
-               sta = list_first_entry(&ifibss->incomplete_stations,
-                                      struct sta_info, list);
-               list_del(&sta->list);
-               spin_unlock_bh(&ifibss->incomplete_lock);
-
-               sta_info_free(local, sta);
-               spin_lock_bh(&ifibss->incomplete_lock);
-       }
-       spin_unlock_bh(&ifibss->incomplete_lock);
-
-       netif_carrier_off(sdata->dev);
+       memset(ifibss->bssid, 0, ETH_ALEN);
 
        /* remove beacon */
        kfree(sdata->u.ibss.ie);
-       presp = rcu_dereference_protected(ifibss->presp,
-                                         lockdep_is_held(&sdata->wdev.mtx));
-       RCU_INIT_POINTER(sdata->u.ibss.presp, NULL);
 
        /* on the next join, re-program HT parameters */
        memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa));
        memset(&ifibss->ht_capa_mask, 0, sizeof(ifibss->ht_capa_mask));
 
-       sdata->vif.bss_conf.ibss_joined = false;
-       sdata->vif.bss_conf.ibss_creator = false;
-       sdata->vif.bss_conf.enable_beacon = false;
-       sdata->vif.bss_conf.ssid_len = 0;
-       clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
-       ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
-                                               BSS_CHANGED_IBSS);
-       ieee80211_vif_release_channel(sdata);
        synchronize_rcu();
-       kfree(presp);
 
        skb_queue_purge(&sdata->skb_queue);
 
index b6186517ec567e85eb986d77dd6ff72c36a4379d..fe48b093d4dc1cafb420f6e75526f4bf851fa4fe 100644 (file)
@@ -322,7 +322,6 @@ struct ieee80211_roc_work {
 
 /* flags used in struct ieee80211_if_managed.flags */
 enum ieee80211_sta_flags {
-       IEEE80211_STA_BEACON_POLL       = BIT(0),
        IEEE80211_STA_CONNECTION_POLL   = BIT(1),
        IEEE80211_STA_CONTROL_PORT      = BIT(2),
        IEEE80211_STA_DISABLE_HT        = BIT(4),
@@ -335,6 +334,7 @@ enum ieee80211_sta_flags {
        IEEE80211_STA_DISABLE_VHT       = BIT(11),
        IEEE80211_STA_DISABLE_80P80MHZ  = BIT(12),
        IEEE80211_STA_DISABLE_160MHZ    = BIT(13),
+       IEEE80211_STA_DISABLE_WMM       = BIT(14),
 };
 
 struct ieee80211_mgd_auth_data {
@@ -487,6 +487,7 @@ struct ieee80211_if_managed {
 
 struct ieee80211_if_ibss {
        struct timer_list timer;
+       struct work_struct csa_connection_drop_work;
 
        unsigned long last_scan_completed;
 
@@ -893,6 +894,8 @@ struct tpt_led_trigger {
  *     that the scan completed.
  * @SCAN_ABORTED: Set for our scan work function when the driver reported
  *     a scan complete for an aborted scan.
+ * @SCAN_HW_CANCELLED: Set for our scan work function when the scan is being
+ *     cancelled.
  */
 enum {
        SCAN_SW_SCANNING,
@@ -900,6 +903,7 @@ enum {
        SCAN_ONCHANNEL_SCANNING,
        SCAN_COMPLETED,
        SCAN_ABORTED,
+       SCAN_HW_CANCELLED,
 };
 
 /**
@@ -1330,6 +1334,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata);
 void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata);
 void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
                                   struct sk_buff *skb);
+int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata,
+                             struct cfg80211_csa_settings *csa_settings);
+int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata);
+void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata);
 
 /* mesh code */
 void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata);
@@ -1481,6 +1489,29 @@ void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
 void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
                                       struct ieee80211_mgmt *mgmt,
                                       size_t len);
+/**
+ * ieee80211_parse_ch_switch_ie - parses channel switch IEs
+ * @sdata: the sdata of the interface which has received the frame
+ * @elems: parsed 802.11 elements received with the frame
+ * @beacon: indicates if the frame was a beacon or probe response
+ * @current_band: indicates the current band
+ * @sta_flags: contains information about own capabilities and restrictions
+ *     to decide which channel switch announcements can be accepted. Only the
+ *     following subset of &enum ieee80211_sta_flags are evaluated:
+ *     %IEEE80211_STA_DISABLE_HT, %IEEE80211_STA_DISABLE_VHT,
+ *     %IEEE80211_STA_DISABLE_40MHZ, %IEEE80211_STA_DISABLE_80P80MHZ,
+ *     %IEEE80211_STA_DISABLE_160MHZ.
+ * @count: to be filled with the counter until the switch (on success only)
+ * @bssid: the currently connected bssid (for reporting)
+ * @mode: to be filled with CSA mode (on success only)
+ * @new_chandef: to be filled with destination chandef (on success only)
+ * Return: 0 on success, <0 on error and >0 if there is nothing to parse.
+ */
+int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
+                                struct ieee802_11_elems *elems, bool beacon,
+                                enum ieee80211_band current_band,
+                                u32 sta_flags, u8 *bssid, u8 *count, u8 *mode,
+                                struct cfg80211_chan_def *new_chandef);
 
 /* Suspend/resume and hw reconfiguration */
 int ieee80211_reconfig(struct ieee80211_local *local);
@@ -1654,6 +1685,7 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
 void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
                                  const struct ieee80211_ht_operation *ht_oper,
                                  struct cfg80211_chan_def *chandef);
+u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c);
 
 int __must_check
 ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
index fcecd633514e2185b0a31235cac345ab08df9433..e48f103b9adeb5fd773f68c6d12cb633cea33fa5 100644 (file)
@@ -766,6 +766,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        if (sdata->vif.type == NL80211_IFTYPE_STATION)
                ieee80211_mgd_stop(sdata);
 
+       if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+               ieee80211_ibss_stop(sdata);
+
+
        /*
         * Remove all stations associated with this interface.
         *
index 620677e897bd0fc82fd5f4e737156096c05a9eef..3e51dd7d98b34aad114e6b927beae8e4ecdde9fb 100644 (file)
@@ -879,7 +879,7 @@ ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
                                  keyconf->keylen, keyconf->key,
                                  0, NULL);
        if (IS_ERR(key))
-               return ERR_PTR(PTR_ERR(key));
+               return ERR_CAST(key);
 
        if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED)
                key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
index 86e4ad56b573df27779831e17dce835f67396836..d7bdc4b97dde2d57d79e2131333ff55f4878c9cb 100644 (file)
@@ -145,66 +145,6 @@ static int ecw2cw(int ecw)
        return (1 << ecw) - 1;
 }
 
-static u32 chandef_downgrade(struct cfg80211_chan_def *c)
-{
-       u32 ret;
-       int tmp;
-
-       switch (c->width) {
-       case NL80211_CHAN_WIDTH_20:
-               c->width = NL80211_CHAN_WIDTH_20_NOHT;
-               ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
-               break;
-       case NL80211_CHAN_WIDTH_40:
-               c->width = NL80211_CHAN_WIDTH_20;
-               c->center_freq1 = c->chan->center_freq;
-               ret = IEEE80211_STA_DISABLE_40MHZ |
-                     IEEE80211_STA_DISABLE_VHT;
-               break;
-       case NL80211_CHAN_WIDTH_80:
-               tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
-               /* n_P40 */
-               tmp /= 2;
-               /* freq_P40 */
-               c->center_freq1 = c->center_freq1 - 20 + 40 * tmp;
-               c->width = NL80211_CHAN_WIDTH_40;
-               ret = IEEE80211_STA_DISABLE_VHT;
-               break;
-       case NL80211_CHAN_WIDTH_80P80:
-               c->center_freq2 = 0;
-               c->width = NL80211_CHAN_WIDTH_80;
-               ret = IEEE80211_STA_DISABLE_80P80MHZ |
-                     IEEE80211_STA_DISABLE_160MHZ;
-               break;
-       case NL80211_CHAN_WIDTH_160:
-               /* n_P20 */
-               tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
-               /* n_P80 */
-               tmp /= 4;
-               c->center_freq1 = c->center_freq1 - 40 + 80 * tmp;
-               c->width = NL80211_CHAN_WIDTH_80;
-               ret = IEEE80211_STA_DISABLE_80P80MHZ |
-                     IEEE80211_STA_DISABLE_160MHZ;
-               break;
-       default:
-       case NL80211_CHAN_WIDTH_20_NOHT:
-               WARN_ON_ONCE(1);
-               c->width = NL80211_CHAN_WIDTH_20_NOHT;
-               ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
-               break;
-       case NL80211_CHAN_WIDTH_5:
-       case NL80211_CHAN_WIDTH_10:
-               WARN_ON_ONCE(1);
-               /* keep c->width */
-               ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
-               break;
-       }
-
-       WARN_ON_ONCE(!cfg80211_chandef_valid(c));
-
-       return ret;
-}
-
 static u32
 ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
                             struct ieee80211_supported_band *sband,
@@ -352,7 +292,7 @@ out:
                        break;
                }
 
-               ret |= chandef_downgrade(chandef);
+               ret |= ieee80211_chandef_downgrade(chandef);
        }
 
        if (chandef->width != vht_chandef.width && !tracking)
@@ -406,13 +346,13 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
         */
        if (ifmgd->flags & IEEE80211_STA_DISABLE_80P80MHZ &&
            chandef.width == NL80211_CHAN_WIDTH_80P80)
-               flags |= chandef_downgrade(&chandef);
+               flags |= ieee80211_chandef_downgrade(&chandef);
        if (ifmgd->flags & IEEE80211_STA_DISABLE_160MHZ &&
            chandef.width == NL80211_CHAN_WIDTH_160)
-               flags |= chandef_downgrade(&chandef);
+               flags |= ieee80211_chandef_downgrade(&chandef);
        if (ifmgd->flags & IEEE80211_STA_DISABLE_40MHZ &&
            chandef.width > NL80211_CHAN_WIDTH_20)
-               flags |= chandef_downgrade(&chandef);
+               flags |= ieee80211_chandef_downgrade(&chandef);
 
        if (cfg80211_chandef_identical(&chandef, &sdata->vif.bss_conf.chandef))
                return 0;
@@ -893,8 +833,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
        if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
                IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
 
-       if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
-                           IEEE80211_STA_CONNECTION_POLL))
+       if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL)
                IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_USE_MINRATE;
 
        ieee80211_tx_skb(sdata, skb);
@@ -937,6 +876,8 @@ static void ieee80211_chswitch_work(struct work_struct *work)
                container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work);
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+       u32 changed = 0;
+       int ret;
 
        if (!ieee80211_sdata_running(sdata))
                return;
@@ -945,24 +886,39 @@ static void ieee80211_chswitch_work(struct work_struct *work)
        if (!ifmgd->associated)
                goto out;
 
-       local->_oper_chandef = local->csa_chandef;
+       ret = ieee80211_vif_change_channel(sdata, &local->csa_chandef,
+                                          &changed);
+       if (ret) {
+               sdata_info(sdata,
+                          "vif channel switch failed, disconnecting\n");
+               ieee80211_queue_work(&sdata->local->hw,
+                                    &ifmgd->csa_connection_drop_work);
+               goto out;
+       }
 
-       if (!local->ops->channel_switch) {
-               /* call "hw_config" only if doing sw channel switch */
-               ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
-       } else {
-               /* update the device channel directly */
-               local->hw.conf.chandef = local->_oper_chandef;
+       if (!local->use_chanctx) {
+               local->_oper_chandef = local->csa_chandef;
+               /* Call "hw_config" only if doing sw channel switch.
+                * Otherwise update the channel directly
+                */
+               if (!local->ops->channel_switch)
+                       ieee80211_hw_config(local, 0);
+               else
+                       local->hw.conf.chandef = local->_oper_chandef;
        }
 
        /* XXX: shouldn't really modify cfg80211-owned data! */
-       ifmgd->associated->channel = local->_oper_chandef.chan;
+       ifmgd->associated->channel = local->csa_chandef.chan;
 
        /* XXX: wait for a beacon first? */
        ieee80211_wake_queues_by_reason(&local->hw,
                                        IEEE80211_MAX_QUEUE_MAP,
                                        IEEE80211_QUEUE_STOP_REASON_CSA);
+
+       ieee80211_bss_info_change_notify(sdata, changed);
+
  out:
+       sdata->vif.csa_active = false;
        ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
        sdata_unlock(sdata);
 }
@@ -1000,20 +956,12 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct cfg80211_bss *cbss = ifmgd->associated;
-       struct ieee80211_bss *bss;
        struct ieee80211_chanctx *chanctx;
-       enum ieee80211_band new_band;
-       int new_freq;
-       u8 new_chan_no;
+       enum ieee80211_band current_band;
        u8 count;
        u8 mode;
-       struct ieee80211_channel *new_chan;
        struct cfg80211_chan_def new_chandef = {};
-       struct cfg80211_chan_def new_vht_chandef = {};
-       const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
-       const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie;
-       const struct ieee80211_ht_operation *ht_oper;
-       int secondary_channel_offset = -1;
+       int res;
 
        sdata_assert_lock(sdata);
 
@@ -1027,162 +975,23 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
        if (ifmgd->flags & IEEE80211_STA_CSA_RECEIVED)
                return;
 
-       sec_chan_offs = elems->sec_chan_offs;
-       wide_bw_chansw_ie = elems->wide_bw_chansw_ie;
-       ht_oper = elems->ht_operation;
-
-       if (ifmgd->flags & (IEEE80211_STA_DISABLE_HT |
-                           IEEE80211_STA_DISABLE_40MHZ)) {
-               sec_chan_offs = NULL;
-               wide_bw_chansw_ie = NULL;
-               /* only used for bandwidth here */
-               ht_oper = NULL;
-       }
-
-       if (ifmgd->flags & IEEE80211_STA_DISABLE_VHT)
-               wide_bw_chansw_ie = NULL;
-
-       if (elems->ext_chansw_ie) {
-               if (!ieee80211_operating_class_to_band(
-                               elems->ext_chansw_ie->new_operating_class,
-                               &new_band)) {
-                       sdata_info(sdata,
-                                  "cannot understand ECSA IE operating class %d, disconnecting\n",
-                                  elems->ext_chansw_ie->new_operating_class);
-                       ieee80211_queue_work(&local->hw,
-                                            &ifmgd->csa_connection_drop_work);
-               }
-               new_chan_no = elems->ext_chansw_ie->new_ch_num;
-               count = elems->ext_chansw_ie->count;
-               mode = elems->ext_chansw_ie->mode;
-       } else if (elems->ch_switch_ie) {
-               new_band = cbss->channel->band;
-               new_chan_no = elems->ch_switch_ie->new_ch_num;
-               count = elems->ch_switch_ie->count;
-               mode = elems->ch_switch_ie->mode;
-       } else {
-               /* nothing here we understand */
-               return;
-       }
-
-       bss = (void *)cbss->priv;
-
-       new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band);
-       new_chan = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq);
-       if (!new_chan || new_chan->flags & IEEE80211_CHAN_DISABLED) {
-               sdata_info(sdata,
-                          "AP %pM switches to unsupported channel (%d MHz), disconnecting\n",
-                          ifmgd->associated->bssid, new_freq);
+       current_band = cbss->channel->band;
+       res = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, current_band,
+                                          ifmgd->flags,
+                                          ifmgd->associated->bssid, &count,
+                                          &mode, &new_chandef);
+       if (res < 0)
                ieee80211_queue_work(&local->hw,
                                     &ifmgd->csa_connection_drop_work);
+       if (res)
                return;
-       }
-
-       if (!beacon && sec_chan_offs) {
-               secondary_channel_offset = sec_chan_offs->sec_chan_offs;
-       } else if (beacon && ht_oper) {
-               secondary_channel_offset =
-                       ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET;
-       } else if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
-               /*
-                * If it's not a beacon, HT is enabled and the IE not present,
-                * it's 20 MHz, 802.11-2012 8.5.2.6:
-                *      This element [the Secondary Channel Offset Element] is
-                *      present when switching to a 40 MHz channel. It may be
-                *      present when switching to a 20 MHz channel (in which
-                *      case the secondary channel offset is set to SCN).
-                */
-               secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
-       }
-
-       switch (secondary_channel_offset) {
-       default:
-               /* secondary_channel_offset was present but is invalid */
-       case IEEE80211_HT_PARAM_CHA_SEC_NONE:
-               cfg80211_chandef_create(&new_chandef, new_chan,
-                                       NL80211_CHAN_HT20);
-               break;
-       case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
-               cfg80211_chandef_create(&new_chandef, new_chan,
-                                       NL80211_CHAN_HT40PLUS);
-               break;
-       case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
-               cfg80211_chandef_create(&new_chandef, new_chan,
-                                       NL80211_CHAN_HT40MINUS);
-               break;
-       case -1:
-               cfg80211_chandef_create(&new_chandef, new_chan,
-                                       NL80211_CHAN_NO_HT);
-               /* keep width for 5/10 MHz channels */
-               switch (sdata->vif.bss_conf.chandef.width) {
-               case NL80211_CHAN_WIDTH_5:
-               case NL80211_CHAN_WIDTH_10:
-                       new_chandef.width = sdata->vif.bss_conf.chandef.width;
-                       break;
-               default:
-                       break;
-               }
-               break;
-       }
-
-       if (wide_bw_chansw_ie) {
-               new_vht_chandef.chan = new_chan;
-               new_vht_chandef.center_freq1 =
-                       ieee80211_channel_to_frequency(
-                               wide_bw_chansw_ie->new_center_freq_seg0,
-                               new_band);
-
-               switch (wide_bw_chansw_ie->new_channel_width) {
-               default:
-                       /* hmmm, ignore VHT and use HT if present */
-               case IEEE80211_VHT_CHANWIDTH_USE_HT:
-                       new_vht_chandef.chan = NULL;
-                       break;
-               case IEEE80211_VHT_CHANWIDTH_80MHZ:
-                       new_vht_chandef.width = NL80211_CHAN_WIDTH_80;
-                       break;
-               case IEEE80211_VHT_CHANWIDTH_160MHZ:
-                       new_vht_chandef.width = NL80211_CHAN_WIDTH_160;
-                       break;
-               case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
-                       /* field is otherwise reserved */
-                       new_vht_chandef.center_freq2 =
-                               ieee80211_channel_to_frequency(
-                                       wide_bw_chansw_ie->new_center_freq_seg1,
-                                       new_band);
-                       new_vht_chandef.width = NL80211_CHAN_WIDTH_80P80;
-                       break;
-               }
-               if (ifmgd->flags & IEEE80211_STA_DISABLE_80P80MHZ &&
-                   new_vht_chandef.width == NL80211_CHAN_WIDTH_80P80)
-                       chandef_downgrade(&new_vht_chandef);
-               if (ifmgd->flags & IEEE80211_STA_DISABLE_160MHZ &&
-                   new_vht_chandef.width == NL80211_CHAN_WIDTH_160)
-                       chandef_downgrade(&new_vht_chandef);
-               if (ifmgd->flags & IEEE80211_STA_DISABLE_40MHZ &&
-                   new_vht_chandef.width > NL80211_CHAN_WIDTH_20)
-                       chandef_downgrade(&new_vht_chandef);
-       }
-
-       /* if VHT data is there validate & use it */
-       if (new_vht_chandef.chan) {
-               if (!cfg80211_chandef_compatible(&new_vht_chandef,
-                                                &new_chandef)) {
-                       sdata_info(sdata,
-                                  "AP %pM CSA has inconsistent channel data, disconnecting\n",
-                                  ifmgd->associated->bssid);
-                       ieee80211_queue_work(&local->hw,
-                                            &ifmgd->csa_connection_drop_work);
-                       return;
-               }
-               new_chandef = new_vht_chandef;
-       }
 
        if (!cfg80211_chandef_usable(local->hw.wiphy, &new_chandef,
                                     IEEE80211_CHAN_DISABLED)) {
                sdata_info(sdata,
                           "AP %pM switches to unsupported channel (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n",
-                          ifmgd->associated->bssid, new_freq,
+                          ifmgd->associated->bssid,
+                          new_chandef.chan->center_freq,
                           new_chandef.width, new_chandef.center_freq1,
                           new_chandef.center_freq2);
                ieee80211_queue_work(&local->hw,
@@ -1191,17 +1000,28 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
        }
 
        ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
+       sdata->vif.csa_active = true;
 
+       mutex_lock(&local->chanctx_mtx);
        if (local->use_chanctx) {
-               sdata_info(sdata,
-                          "not handling channel switch with channel contexts\n");
-               ieee80211_queue_work(&local->hw,
-                                    &ifmgd->csa_connection_drop_work);
-               return;
+               u32 num_chanctx = 0;
+               list_for_each_entry(chanctx, &local->chanctx_list, list)
+                      num_chanctx++;
+
+               if (num_chanctx > 1 ||
+                   !(local->hw.flags & IEEE80211_HW_CHANCTX_STA_CSA)) {
+                       sdata_info(sdata,
+                                  "not handling chan-switch with channel contexts\n");
+                       ieee80211_queue_work(&local->hw,
+                                            &ifmgd->csa_connection_drop_work);
+                       mutex_unlock(&local->chanctx_mtx);
+                       return;
+               }
        }
 
-       mutex_lock(&local->chanctx_mtx);
        if (WARN_ON(!rcu_access_pointer(sdata->vif.chanctx_conf))) {
+               ieee80211_queue_work(&local->hw,
+                                    &ifmgd->csa_connection_drop_work);
                mutex_unlock(&local->chanctx_mtx);
                return;
        }
@@ -1374,8 +1194,7 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
        if (!mgd->associated)
                return false;
 
-       if (mgd->flags & (IEEE80211_STA_BEACON_POLL |
-                         IEEE80211_STA_CONNECTION_POLL))
+       if (mgd->flags & IEEE80211_STA_CONNECTION_POLL)
                return false;
 
        if (!mgd->have_beacon)
@@ -1691,8 +1510,7 @@ static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
 {
        lockdep_assert_held(&sdata->local->mtx);
 
-       sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
-                               IEEE80211_STA_BEACON_POLL);
+       sdata->u.mgd.flags &= ~IEEE80211_STA_CONNECTION_POLL;
        ieee80211_run_deferred_scan(sdata->local);
 }
 
@@ -1954,11 +1772,8 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_local *local = sdata->local;
 
        mutex_lock(&local->mtx);
-       if (!(ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
-                             IEEE80211_STA_CONNECTION_POLL))) {
-               mutex_unlock(&local->mtx);
-               return;
-       }
+       if (!(ifmgd->flags & IEEE80211_STA_CONNECTION_POLL))
+               goto out;
 
        __ieee80211_stop_poll(sdata);
 
@@ -2094,15 +1909,9 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
         * because otherwise we would reset the timer every time and
         * never check whether we received a probe response!
         */
-       if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
-                           IEEE80211_STA_CONNECTION_POLL))
+       if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL)
                already = true;
 
-       if (beacon)
-               ifmgd->flags |= IEEE80211_STA_BEACON_POLL;
-       else
-               ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL;
-
        mutex_unlock(&sdata->local->mtx);
 
        if (already)
@@ -2174,6 +1983,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
                               WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
                               true, frame_buf);
        ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
+       sdata->vif.csa_active = false;
        ieee80211_wake_queues_by_reason(&sdata->local->hw,
                                        IEEE80211_MAX_QUEUE_MAP,
                                        IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -2717,7 +2527,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
         */
        ifmgd->wmm_last_param_set = -1;
 
-       if (elems.wmm_param)
+       if (!(ifmgd->flags & IEEE80211_STA_DISABLE_WMM) && elems.wmm_param)
                ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
                                         elems.wmm_param_len);
        else
@@ -3061,17 +2871,10 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                }
        }
 
-       if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) {
+       if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL) {
                mlme_dbg_ratelimited(sdata,
                                     "cancelling AP probe due to a received beacon\n");
-               mutex_lock(&local->mtx);
-               ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
-               ieee80211_run_deferred_scan(local);
-               mutex_unlock(&local->mtx);
-
-               mutex_lock(&local->iflist_mtx);
-               ieee80211_recalc_ps(local, -1);
-               mutex_unlock(&local->iflist_mtx);
+               ieee80211_reset_ap_probe(sdata);
        }
 
        /*
@@ -3152,7 +2955,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
        ieee80211_sta_process_chanswitch(sdata, rx_status->mactime,
                                         &elems, true);
 
-       if (ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
+       if (!(ifmgd->flags & IEEE80211_STA_DISABLE_WMM) &&
+           ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
                                     elems.wmm_param_len))
                changed |= BSS_CHANGED_QOS;
 
@@ -3543,8 +3347,7 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
        } else if (ifmgd->assoc_data && ifmgd->assoc_data->timeout_started)
                run_again(sdata, ifmgd->assoc_data->timeout);
 
-       if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
-                           IEEE80211_STA_CONNECTION_POLL) &&
+       if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL &&
            ifmgd->associated) {
                u8 bssid[ETH_ALEN];
                int max_tries;
@@ -3876,7 +3679,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
                return ret;
 
        while (ret && chandef.width != NL80211_CHAN_WIDTH_20_NOHT) {
-               ifmgd->flags |= chandef_downgrade(&chandef);
+               ifmgd->flags |= ieee80211_chandef_downgrade(&chandef);
                ret = ieee80211_vif_use_channel(sdata, &chandef,
                                                IEEE80211_CHANCTX_SHARED);
        }
@@ -4135,6 +3938,44 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
        return err;
 }
 
+static bool ieee80211_usable_wmm_params(struct ieee80211_sub_if_data *sdata,
+                                       const u8 *wmm_param, int len)
+{
+       const u8 *pos;
+       size_t left;
+
+       if (len < 8)
+               return false;
+
+       if (wmm_param[5] != 1 /* version */)
+               return false;
+
+       pos = wmm_param + 8;
+       left = len - 8;
+
+       for (; left >= 4; left -= 4, pos += 4) {
+               u8 aifsn = pos[0] & 0x0f;
+               u8 ecwmin = pos[1] & 0x0f;
+               u8 ecwmax = (pos[1] & 0xf0) >> 4;
+               int aci = (pos[0] >> 5) & 0x03;
+
+               if (aifsn < 2) {
+                       sdata_info(sdata,
+                                  "AP has invalid WMM params (AIFSN=%d for ACI %d), disabling WMM\n",
+                                  aifsn, aci);
+                       return false;
+               }
+               if (ecwmin > ecwmax) {
+                       sdata_info(sdata,
+                                  "AP has invalid WMM params (ECWmin/max=%d/%d for ACI %d), disabling WMM\n",
+                                  ecwmin, ecwmax, aci);
+                       return false;
+               }
+       }
+
+       return true;
+}
+
 int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                        struct cfg80211_assoc_request *req)
 {
@@ -4192,9 +4033,45 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        }
 
        /* prepare assoc data */
-       
+
        ifmgd->beacon_crc_valid = false;
 
+       assoc_data->wmm = bss->wmm_used &&
+                         (local->hw.queues >= IEEE80211_NUM_ACS);
+       if (assoc_data->wmm) {
+               /* try to check validity of WMM params IE */
+               const struct cfg80211_bss_ies *ies;
+               const u8 *wp, *start, *end;
+
+               rcu_read_lock();
+               ies = rcu_dereference(req->bss->ies);
+               start = ies->data;
+               end = start + ies->len;
+
+               while (true) {
+                       wp = cfg80211_find_vendor_ie(
+                               WLAN_OUI_MICROSOFT,
+                               WLAN_OUI_TYPE_MICROSOFT_WMM,
+                               start, end - start);
+                       if (!wp)
+                               break;
+                       start = wp + wp[1] + 2;
+                       /* if this IE is too short, try the next */
+                       if (wp[1] <= 4)
+                               continue;
+                       /* if this IE is WMM params, we found what we wanted */
+                       if (wp[6] == 1)
+                               break;
+               }
+
+               if (!wp || !ieee80211_usable_wmm_params(sdata, wp + 2,
+                                                       wp[1] - 2)) {
+                       assoc_data->wmm = false;
+                       ifmgd->flags |= IEEE80211_STA_DISABLE_WMM;
+               }
+               rcu_read_unlock();
+       }
+
        /*
         * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode.
         * We still associate in non-HT mode (11a/b/g) if any one of these
@@ -4224,18 +4101,22 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        /* Also disable HT if we don't support it or the AP doesn't use WMM */
        sband = local->hw.wiphy->bands[req->bss->channel->band];
        if (!sband->ht_cap.ht_supported ||
-           local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
+           local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used ||
+           ifmgd->flags & IEEE80211_STA_DISABLE_WMM) {
                ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
-               if (!bss->wmm_used)
+               if (!bss->wmm_used &&
+                   !(ifmgd->flags & IEEE80211_STA_DISABLE_WMM))
                        netdev_info(sdata->dev,
                                    "disabling HT as WMM/QoS is not supported by the AP\n");
        }
 
        /* disable VHT if we don't support it or the AP doesn't use WMM */
        if (!sband->vht_cap.vht_supported ||
-           local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
+           local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used ||
+           ifmgd->flags & IEEE80211_STA_DISABLE_WMM) {
                ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
-               if (!bss->wmm_used)
+               if (!bss->wmm_used &&
+                   !(ifmgd->flags & IEEE80211_STA_DISABLE_WMM))
                        netdev_info(sdata->dev,
                                    "disabling VHT as WMM/QoS is not supported by the AP\n");
        }
@@ -4264,8 +4145,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
                sdata->smps_mode = ifmgd->req_smps;
 
        assoc_data->capability = req->bss->capability;
-       assoc_data->wmm = bss->wmm_used &&
-                         (local->hw.queues >= IEEE80211_NUM_ACS);
        assoc_data->supp_rates = bss->supp_rates;
        assoc_data->supp_rates_len = bss->supp_rates_len;
 
index acd1f71adc0386ab588f0939309e2367330f2b29..0c2a29484c07cdaaaf716b1f5ba0184d1047ee68 100644 (file)
@@ -394,6 +394,8 @@ void ieee80211_sw_roc_work(struct work_struct *work)
 
                if (started)
                        ieee80211_start_next_roc(local);
+               else if (list_empty(&local->roc_list))
+                       ieee80211_run_deferred_scan(local);
        }
 
  out_unlock:
index e126605cec66baf82aadb835856110d74105795f..22b223f13c9fa22994eba6f68769a27b2cfe4eb3 100644 (file)
@@ -235,7 +235,8 @@ static void rc_send_low_basicrate(s8 *idx, u32 basic_rates,
 static void __rate_control_send_low(struct ieee80211_hw *hw,
                                    struct ieee80211_supported_band *sband,
                                    struct ieee80211_sta *sta,
-                                   struct ieee80211_tx_info *info)
+                                   struct ieee80211_tx_info *info,
+                                   u32 rate_mask)
 {
        int i;
        u32 rate_flags =
@@ -247,6 +248,12 @@ static void __rate_control_send_low(struct ieee80211_hw *hw,
 
        info->control.rates[0].idx = 0;
        for (i = 0; i < sband->n_bitrates; i++) {
+               if (!(rate_mask & BIT(i)))
+                       continue;
+
+               if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+                       continue;
+
                if (!rate_supported(sta, sband->band, i))
                        continue;
 
@@ -274,7 +281,8 @@ bool rate_control_send_low(struct ieee80211_sta *pubsta,
        bool use_basicrate = false;
 
        if (!pubsta || !priv_sta || rc_no_data_or_no_ack_use_min(txrc)) {
-               __rate_control_send_low(txrc->hw, sband, pubsta, info);
+               __rate_control_send_low(txrc->hw, sband, pubsta, info,
+                                       txrc->rate_idx_mask);
 
                if (!pubsta && txrc->bss) {
                        mcast_rate = txrc->bss_conf->mcast_rate[sband->band];
@@ -656,7 +664,8 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
                rate_control_apply_mask(sdata, sta, sband, info, dest, max_rates);
 
        if (dest[0].idx < 0)
-               __rate_control_send_low(&sdata->local->hw, sband, sta, info);
+               __rate_control_send_low(&sdata->local->hw, sband, sta, info,
+                                       sdata->rc_rateidx_mask[info->band]);
 
        if (sta)
                rate_fixup_ratelist(vif, sband, info, dest, max_rates);
index 5dedc56c94dbe91a1b9bd6c959094ab3b494be86..505bc0dea074809f1532f322528e222a5052c061 100644 (file)
@@ -144,8 +144,8 @@ void rate_control_deinitialize(struct ieee80211_local *local);
 
 /* Rate control algorithms */
 #ifdef CONFIG_MAC80211_RC_PID
-extern int rc80211_pid_init(void);
-extern void rc80211_pid_exit(void);
+int rc80211_pid_init(void);
+void rc80211_pid_exit(void);
 #else
 static inline int rc80211_pid_init(void)
 {
@@ -157,8 +157,8 @@ static inline void rc80211_pid_exit(void)
 #endif
 
 #ifdef CONFIG_MAC80211_RC_MINSTREL
-extern int rc80211_minstrel_init(void);
-extern void rc80211_minstrel_exit(void);
+int rc80211_minstrel_init(void);
+void rc80211_minstrel_exit(void);
 #else
 static inline int rc80211_minstrel_init(void)
 {
@@ -170,8 +170,8 @@ static inline void rc80211_minstrel_exit(void)
 #endif
 
 #ifdef CONFIG_MAC80211_RC_MINSTREL_HT
-extern int rc80211_minstrel_ht_init(void);
-extern void rc80211_minstrel_ht_exit(void);
+int rc80211_minstrel_ht_init(void);
+void rc80211_minstrel_ht_exit(void);
 #else
 static inline int rc80211_minstrel_ht_init(void)
 {
index 8b5f7ef7c0c9f14db5dba3baf8ab82247500c1c3..7fa1b36e620247ed5c14e97d54b2d72d7a0b43cb 100644 (file)
@@ -203,6 +203,15 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
        memcpy(mi->max_tp_rate, tmp_tp_rate, sizeof(mi->max_tp_rate));
        mi->max_prob_rate = tmp_prob_rate;
 
+#ifdef CONFIG_MAC80211_DEBUGFS
+       /* use fixed index if set */
+       if (mp->fixed_rate_idx != -1) {
+               mi->max_tp_rate[0] = mp->fixed_rate_idx;
+               mi->max_tp_rate[1] = mp->fixed_rate_idx;
+               mi->max_prob_rate = mp->fixed_rate_idx;
+       }
+#endif
+
        /* Reset update timer */
        mi->stats_update = jiffies;
 
@@ -310,6 +319,11 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
        /* increase sum packet counter */
        mi->packet_count++;
 
+#ifdef CONFIG_MAC80211_DEBUGFS
+       if (mp->fixed_rate_idx != -1)
+               return;
+#endif
+
        delta = (mi->packet_count * sampling_ratio / 100) -
                        (mi->sample_count + mi->sample_deferred / 2);
 
index 7c323f27ba230a91599f34a6877caa8c17ff8605..5d60779a0c1be89e987b2cd478af865ec806d4bb 100644 (file)
@@ -365,6 +365,14 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
                }
        }
 
+#ifdef CONFIG_MAC80211_DEBUGFS
+       /* use fixed index if set */
+       if (mp->fixed_rate_idx != -1) {
+               mi->max_tp_rate = mp->fixed_rate_idx;
+               mi->max_tp_rate2 = mp->fixed_rate_idx;
+               mi->max_prob_rate = mp->fixed_rate_idx;
+       }
+#endif
 
        mi->stats_update = jiffies;
 }
@@ -774,6 +782,11 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
        info->flags |= mi->tx_flags;
        minstrel_ht_check_cck_shortpreamble(mp, mi, txrc->short_preamble);
 
+#ifdef CONFIG_MAC80211_DEBUGFS
+       if (mp->fixed_rate_idx != -1)
+               return;
+#endif
+
        /* Don't use EAPOL frames for sampling on non-mrr hw */
        if (mp->hw->max_rates == 1 &&
            (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
@@ -781,16 +794,6 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
        else
                sample_idx = minstrel_get_sample_rate(mp, mi);
 
-#ifdef CONFIG_MAC80211_DEBUGFS
-       /* use fixed index if set */
-       if (mp->fixed_rate_idx != -1) {
-               mi->max_tp_rate = mp->fixed_rate_idx;
-               mi->max_tp_rate2 = mp->fixed_rate_idx;
-               mi->max_prob_rate = mp->fixed_rate_idx;
-               sample_idx = -1;
-       }
-#endif
-
        mi->total_packets++;
 
        /* wraparound */
index c97a0657c0435bece74ce534e173df74b62a16ae..6ff134650a84c195e033c62c76e9c954ac7dc245 100644 (file)
@@ -167,29 +167,29 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
         * provide large enough buffers. */
        length = length < RC_PID_PRINT_BUF_SIZE ?
                 length : RC_PID_PRINT_BUF_SIZE;
-       p = snprintf(pb, length, "%u %lu ", ev->id, ev->timestamp);
+       p = scnprintf(pb, length, "%u %lu ", ev->id, ev->timestamp);
        switch (ev->type) {
        case RC_PID_EVENT_TYPE_TX_STATUS:
-               p += snprintf(pb + p, length - p, "tx_status %u %u",
-                             !(ev->data.flags & IEEE80211_TX_STAT_ACK),
-                             ev->data.tx_status.status.rates[0].idx);
+               p += scnprintf(pb + p, length - p, "tx_status %u %u",
+                              !(ev->data.flags & IEEE80211_TX_STAT_ACK),
+                              ev->data.tx_status.status.rates[0].idx);
                break;
        case RC_PID_EVENT_TYPE_RATE_CHANGE:
-               p += snprintf(pb + p, length - p, "rate_change %d %d",
-                             ev->data.index, ev->data.rate);
+               p += scnprintf(pb + p, length - p, "rate_change %d %d",
+                              ev->data.index, ev->data.rate);
                break;
        case RC_PID_EVENT_TYPE_TX_RATE:
-               p += snprintf(pb + p, length - p, "tx_rate %d %d",
-                             ev->data.index, ev->data.rate);
+               p += scnprintf(pb + p, length - p, "tx_rate %d %d",
+                              ev->data.index, ev->data.rate);
                break;
        case RC_PID_EVENT_TYPE_PF_SAMPLE:
-               p += snprintf(pb + p, length - p,
-                             "pf_sample %d %d %d %d",
-                             ev->data.pf_sample, ev->data.prop_err,
-                             ev->data.int_err, ev->data.der_err);
+               p += scnprintf(pb + p, length - p,
+                              "pf_sample %d %d %d %d",
+                              ev->data.pf_sample, ev->data.prop_err,
+                              ev->data.int_err, ev->data.der_err);
                break;
        }
-       p += snprintf(pb + p, length - p, "\n");
+       p += scnprintf(pb + p, length - p, "\n");
 
        spin_unlock_irqrestore(&events->lock, status);
 
index 54395d7583ba70e31111b08092c88d2901be7815..0011ac8150972470110b004c4780a8dbc8acd406 100644 (file)
@@ -995,8 +995,9 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
                                rx->sta->num_duplicates++;
                        }
                        return RX_DROP_UNUSABLE;
-               } else
+               } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
                        rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
+               }
        }
 
        if (unlikely(rx->skb->len < 16)) {
@@ -2402,7 +2403,8 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
                return RX_DROP_UNUSABLE;
 
        if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
-           mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED)
+           mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED &&
+           mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
                return RX_DROP_UNUSABLE;
 
        if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
@@ -2566,31 +2568,46 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
 
                goto queue;
        case WLAN_CATEGORY_SPECTRUM_MGMT:
-               if (status->band != IEEE80211_BAND_5GHZ)
-                       break;
-
-               if (sdata->vif.type != NL80211_IFTYPE_STATION)
-                       break;
-
                /* verify action_code is present */
                if (len < IEEE80211_MIN_ACTION_SIZE + 1)
                        break;
 
                switch (mgmt->u.action.u.measurement.action_code) {
                case WLAN_ACTION_SPCT_MSR_REQ:
+                       if (status->band != IEEE80211_BAND_5GHZ)
+                               break;
+
                        if (len < (IEEE80211_MIN_ACTION_SIZE +
                                   sizeof(mgmt->u.action.u.measurement)))
                                break;
+
+                       if (sdata->vif.type != NL80211_IFTYPE_STATION)
+                               break;
+
                        ieee80211_process_measurement_req(sdata, mgmt, len);
                        goto handled;
-               case WLAN_ACTION_SPCT_CHL_SWITCH:
-                       if (sdata->vif.type != NL80211_IFTYPE_STATION)
+               case WLAN_ACTION_SPCT_CHL_SWITCH: {
+                       u8 *bssid;
+                       if (len < (IEEE80211_MIN_ACTION_SIZE +
+                                  sizeof(mgmt->u.action.u.chan_switch)))
+                               break;
+
+                       if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+                           sdata->vif.type != NL80211_IFTYPE_ADHOC)
                                break;
 
-                       if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
+                       if (sdata->vif.type == NL80211_IFTYPE_STATION)
+                               bssid = sdata->u.mgd.bssid;
+                       else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+                               bssid = sdata->u.ibss.bssid;
+                       else
+                               break;
+
+                       if (!ether_addr_equal(mgmt->bssid, bssid))
                                break;
 
                        goto queue;
+                       }
                }
                break;
        case WLAN_CATEGORY_SA_QUERY:
@@ -3056,6 +3073,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
        case NL80211_IFTYPE_ADHOC:
                if (!bssid)
                        return 0;
+               if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
+                   ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
+                       return 0;
                if (ieee80211_is_beacon(hdr->frame_control)) {
                        return 1;
                } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
index 08afe74b98f4b6cbdda21ed3d8ff68b9a95fa64b..5ad66a83ef7f4d4525de163c2c2f1fe9d6931a04 100644 (file)
@@ -238,6 +238,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
        enum ieee80211_band band;
        int i, ielen, n_chans;
 
+       if (test_bit(SCAN_HW_CANCELLED, &local->scanning))
+               return false;
+
        do {
                if (local->hw_scan_band == IEEE80211_NUM_BANDS)
                        return false;
@@ -391,8 +394,7 @@ static bool ieee80211_can_scan(struct ieee80211_local *local,
                return false;
 
        if (sdata->vif.type == NL80211_IFTYPE_STATION &&
-           sdata->u.mgd.flags & (IEEE80211_STA_BEACON_POLL |
-                                 IEEE80211_STA_CONNECTION_POLL))
+           sdata->u.mgd.flags & IEEE80211_STA_CONNECTION_POLL)
                return false;
 
        return true;
@@ -940,7 +942,23 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
        if (!local->scan_req)
                goto out;
 
+       /*
+        * We have a scan running and the driver already reported completion,
+        * but the worker hasn't run yet or is stuck on the mutex - mark it as
+        * cancelled.
+        */
+       if (test_bit(SCAN_HW_SCANNING, &local->scanning) &&
+           test_bit(SCAN_COMPLETED, &local->scanning)) {
+               set_bit(SCAN_HW_CANCELLED, &local->scanning);
+               goto out;
+       }
+
        if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
+               /*
+                * Make sure that __ieee80211_scan_completed doesn't trigger a
+                * scan on another band.
+                */
+               set_bit(SCAN_HW_CANCELLED, &local->scanning);
                if (local->ops->cancel_hw_scan)
                        drv_cancel_hw_scan(local,
                                rcu_dereference_protected(local->scan_sdata,
index 578eea3fc04d27a25eeddc84f244fe6de24dc6a8..921597e279a307857cad5974a434fdd9252ec225 100644 (file)
 #include "sta_info.h"
 #include "wme.h"
 
+int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
+                                struct ieee802_11_elems *elems, bool beacon,
+                                enum ieee80211_band current_band,
+                                u32 sta_flags, u8 *bssid, u8 *count, u8 *mode,
+                                struct cfg80211_chan_def *new_chandef)
+{
+       enum ieee80211_band new_band;
+       int new_freq;
+       u8 new_chan_no;
+       struct ieee80211_channel *new_chan;
+       struct cfg80211_chan_def new_vht_chandef = {};
+       const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
+       const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie;
+       const struct ieee80211_ht_operation *ht_oper;
+       int secondary_channel_offset = -1;
+
+       sec_chan_offs = elems->sec_chan_offs;
+       wide_bw_chansw_ie = elems->wide_bw_chansw_ie;
+       ht_oper = elems->ht_operation;
+
+       if (sta_flags & (IEEE80211_STA_DISABLE_HT |
+                        IEEE80211_STA_DISABLE_40MHZ)) {
+               sec_chan_offs = NULL;
+               wide_bw_chansw_ie = NULL;
+               /* only used for bandwidth here */
+               ht_oper = NULL;
+       }
+
+       if (sta_flags & IEEE80211_STA_DISABLE_VHT)
+               wide_bw_chansw_ie = NULL;
+
+       if (elems->ext_chansw_ie) {
+               if (!ieee80211_operating_class_to_band(
+                               elems->ext_chansw_ie->new_operating_class,
+                               &new_band)) {
+                       sdata_info(sdata,
+                                  "cannot understand ECSA IE operating class %d, disconnecting\n",
+                                  elems->ext_chansw_ie->new_operating_class);
+                       return -EINVAL;
+               }
+               new_chan_no = elems->ext_chansw_ie->new_ch_num;
+               *count = elems->ext_chansw_ie->count;
+               *mode = elems->ext_chansw_ie->mode;
+       } else if (elems->ch_switch_ie) {
+               new_band = current_band;
+               new_chan_no = elems->ch_switch_ie->new_ch_num;
+               *count = elems->ch_switch_ie->count;
+               *mode = elems->ch_switch_ie->mode;
+       } else {
+               /* nothing here we understand */
+               return 1;
+       }
+
+       new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band);
+       new_chan = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq);
+       if (!new_chan || new_chan->flags & IEEE80211_CHAN_DISABLED) {
+               sdata_info(sdata,
+                          "BSS %pM switches to unsupported channel (%d MHz), disconnecting\n",
+                          bssid, new_freq);
+               return -EINVAL;
+       }
+
+       if (!beacon && sec_chan_offs) {
+               secondary_channel_offset = sec_chan_offs->sec_chan_offs;
+       } else if (beacon && ht_oper) {
+               secondary_channel_offset =
+                       ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET;
+       } else if (!(sta_flags & IEEE80211_STA_DISABLE_HT)) {
+               /* If it's not a beacon, HT is enabled and the IE not present,
+                * it's 20 MHz, 802.11-2012 8.5.2.6:
+                *      This element [the Secondary Channel Offset Element] is
+                *      present when switching to a 40 MHz channel. It may be
+                *      present when switching to a 20 MHz channel (in which
+                *      case the secondary channel offset is set to SCN).
+                */
+               secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+       }
+
+       switch (secondary_channel_offset) {
+       default:
+               /* secondary_channel_offset was present but is invalid */
+       case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+               cfg80211_chandef_create(new_chandef, new_chan,
+                                       NL80211_CHAN_HT20);
+               break;
+       case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+               cfg80211_chandef_create(new_chandef, new_chan,
+                                       NL80211_CHAN_HT40PLUS);
+               break;
+       case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+               cfg80211_chandef_create(new_chandef, new_chan,
+                                       NL80211_CHAN_HT40MINUS);
+               break;
+       case -1:
+               cfg80211_chandef_create(new_chandef, new_chan,
+                                       NL80211_CHAN_NO_HT);
+               /* keep width for 5/10 MHz channels */
+               switch (sdata->vif.bss_conf.chandef.width) {
+               case NL80211_CHAN_WIDTH_5:
+               case NL80211_CHAN_WIDTH_10:
+                       new_chandef->width = sdata->vif.bss_conf.chandef.width;
+                       break;
+               default:
+                       break;
+               }
+               break;
+       }
+
+       if (wide_bw_chansw_ie) {
+               new_vht_chandef.chan = new_chan;
+               new_vht_chandef.center_freq1 =
+                       ieee80211_channel_to_frequency(
+                               wide_bw_chansw_ie->new_center_freq_seg0,
+                               new_band);
+
+               switch (wide_bw_chansw_ie->new_channel_width) {
+               default:
+                       /* hmmm, ignore VHT and use HT if present */
+               case IEEE80211_VHT_CHANWIDTH_USE_HT:
+                       new_vht_chandef.chan = NULL;
+                       break;
+               case IEEE80211_VHT_CHANWIDTH_80MHZ:
+                       new_vht_chandef.width = NL80211_CHAN_WIDTH_80;
+                       break;
+               case IEEE80211_VHT_CHANWIDTH_160MHZ:
+                       new_vht_chandef.width = NL80211_CHAN_WIDTH_160;
+                       break;
+               case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+                       /* field is otherwise reserved */
+                       new_vht_chandef.center_freq2 =
+                               ieee80211_channel_to_frequency(
+                                       wide_bw_chansw_ie->new_center_freq_seg1,
+                                       new_band);
+                       new_vht_chandef.width = NL80211_CHAN_WIDTH_80P80;
+                       break;
+               }
+               if (sta_flags & IEEE80211_STA_DISABLE_80P80MHZ &&
+                   new_vht_chandef.width == NL80211_CHAN_WIDTH_80P80)
+                       ieee80211_chandef_downgrade(&new_vht_chandef);
+               if (sta_flags & IEEE80211_STA_DISABLE_160MHZ &&
+                   new_vht_chandef.width == NL80211_CHAN_WIDTH_160)
+                       ieee80211_chandef_downgrade(&new_vht_chandef);
+               if (sta_flags & IEEE80211_STA_DISABLE_40MHZ &&
+                   new_vht_chandef.width > NL80211_CHAN_WIDTH_20)
+                       ieee80211_chandef_downgrade(&new_vht_chandef);
+       }
+
+       /* if VHT data is there validate & use it */
+       if (new_vht_chandef.chan) {
+               if (!cfg80211_chandef_compatible(&new_vht_chandef,
+                                                new_chandef)) {
+                       sdata_info(sdata,
+                                  "BSS %pM: CSA has inconsistent channel data, disconnecting\n",
+                                  bssid);
+                       return -EINVAL;
+               }
+               *new_chandef = new_vht_chandef;
+       }
+
+       return 0;
+}
+
 static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_data *sdata,
                                        struct ieee80211_msrment_ie *request_ie,
                                        const u8 *da, const u8 *bssid,
index 368837fe3b800e87f408039ef4d36e84bfaa7069..78dc2e99027e06b8a9fc37f5bdf7f06483476f2b 100644 (file)
@@ -180,6 +180,9 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
        struct ieee80211_local *local = sta->local;
        struct ieee80211_sub_if_data *sdata = sta->sdata;
 
+       if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
+               sta->last_rx = jiffies;
+
        if (ieee80211_is_data_qos(mgmt->frame_control)) {
                struct ieee80211_hdr *hdr = (void *) skb->data;
                u8 *qc = ieee80211_get_qos_ctl(hdr);
index 1aba645882bd92abbf9e5a4cdd90f7544a173c4a..d4cee98533fdc5a532a8ba74e48d6158faeb5003 100644 (file)
@@ -77,13 +77,13 @@ DECLARE_EVENT_CLASS(local_sdata_addr_evt,
        TP_STRUCT__entry(
                LOCAL_ENTRY
                VIF_ENTRY
-               __array(char, addr, 6)
+               __array(char, addr, ETH_ALEN)
        ),
 
        TP_fast_assign(
                LOCAL_ASSIGN;
                VIF_ASSIGN;
-               memcpy(__entry->addr, sdata->vif.addr, 6);
+               memcpy(__entry->addr, sdata->vif.addr, ETH_ALEN);
        ),
 
        TP_printk(
@@ -1475,6 +1475,41 @@ DEFINE_EVENT(local_sdata_evt, drv_ipv6_addr_change,
 );
 #endif
 
+TRACE_EVENT(drv_join_ibss,
+       TP_PROTO(struct ieee80211_local *local,
+                struct ieee80211_sub_if_data *sdata,
+                struct ieee80211_bss_conf *info),
+
+       TP_ARGS(local, sdata, info),
+
+       TP_STRUCT__entry(
+               LOCAL_ENTRY
+               VIF_ENTRY
+               __field(u8, dtimper)
+               __field(u16, bcnint)
+               __dynamic_array(u8, ssid, info->ssid_len);
+       ),
+
+       TP_fast_assign(
+               LOCAL_ASSIGN;
+               VIF_ASSIGN;
+               __entry->dtimper = info->dtim_period;
+               __entry->bcnint = info->beacon_int;
+               memcpy(__get_dynamic_array(ssid), info->ssid, info->ssid_len);
+       ),
+
+       TP_printk(
+               LOCAL_PR_FMT  VIF_PR_FMT,
+               LOCAL_PR_ARG, VIF_PR_ARG
+       )
+);
+
+DEFINE_EVENT(local_sdata_evt, drv_leave_ibss,
+       TP_PROTO(struct ieee80211_local *local,
+                struct ieee80211_sub_if_data *sdata),
+       TP_ARGS(local, sdata)
+);
+
 /*
  * Tracing for API calls that drivers call.
  */
index 3456c0486b482bfb44a6f797ebaff5d70bde4940..9993fcb19ecdd8b97ea0c1fe1221f916dfd24e7f 100644 (file)
@@ -1120,7 +1120,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
                tx->sta = rcu_dereference(sdata->u.vlan.sta);
                if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
                        return TX_DROP;
-       } else if (info->flags & IEEE80211_TX_CTL_INJECTED ||
+       } else if (info->flags & (IEEE80211_TX_CTL_INJECTED |
+                                 IEEE80211_TX_INTFL_NL80211_FRAME_TX) ||
                   tx->sdata->control_port_protocol == tx->skb->protocol) {
                tx->sta = sta_info_get_bss(sdata, hdr->addr1);
        }
@@ -1981,7 +1982,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
         * EAPOL frames from the local station.
         */
        if (unlikely(!ieee80211_vif_is_mesh(&sdata->vif) &&
-                    !is_multicast_ether_addr(hdr.addr1) && !authorized &&
+                    !multicast && !authorized &&
                     (cpu_to_be16(ethertype) != sdata->control_port_protocol ||
                      !ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) {
 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -2357,15 +2358,31 @@ static void ieee80211_update_csa(struct ieee80211_sub_if_data *sdata,
        struct probe_resp *resp;
        int counter_offset_beacon = sdata->csa_counter_offset_beacon;
        int counter_offset_presp = sdata->csa_counter_offset_presp;
+       u8 *beacon_data;
+       size_t beacon_data_len;
+
+       switch (sdata->vif.type) {
+       case NL80211_IFTYPE_AP:
+               beacon_data = beacon->tail;
+               beacon_data_len = beacon->tail_len;
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               beacon_data = beacon->head;
+               beacon_data_len = beacon->head_len;
+               break;
+       default:
+               return;
+       }
+       if (WARN_ON(counter_offset_beacon >= beacon_data_len))
+               return;
 
        /* warn if the driver did not check for/react to csa completeness */
-       if (WARN_ON(((u8 *)beacon->tail)[counter_offset_beacon] == 0))
+       if (WARN_ON(beacon_data[counter_offset_beacon] == 0))
                return;
 
-       ((u8 *)beacon->tail)[counter_offset_beacon]--;
+       beacon_data[counter_offset_beacon]--;
 
-       if (sdata->vif.type == NL80211_IFTYPE_AP &&
-           counter_offset_presp) {
+       if (sdata->vif.type == NL80211_IFTYPE_AP && counter_offset_presp) {
                rcu_read_lock();
                resp = rcu_dereference(sdata->u.ap.probe_resp);
 
@@ -2400,6 +2417,15 @@ bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
                        goto out;
                beacon_data = beacon->tail;
                beacon_data_len = beacon->tail_len;
+       } else if (vif->type == NL80211_IFTYPE_ADHOC) {
+               struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+
+               beacon = rcu_dereference(ifibss->presp);
+               if (!beacon)
+                       goto out;
+
+               beacon_data = beacon->head;
+               beacon_data_len = beacon->head_len;
        } else {
                WARN_ON(1);
                goto out;
@@ -2484,6 +2510,10 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
                if (!presp)
                        goto out;
 
+               if (sdata->vif.csa_active)
+                       ieee80211_update_csa(sdata, presp);
+
+
                skb = dev_alloc_skb(local->tx_headroom + presp->head_len);
                if (!skb)
                        goto out;
index e1b34a18b24344cb95a642c436fc5322054207ce..aefb9d5b962023eb59b750f867de6733eb1c142b 100644 (file)
@@ -567,18 +567,15 @@ void ieee80211_flush_queues(struct ieee80211_local *local,
                                        IEEE80211_QUEUE_STOP_REASON_FLUSH);
 }
 
-void ieee80211_iterate_active_interfaces(
-       struct ieee80211_hw *hw, u32 iter_flags,
-       void (*iterator)(void *data, u8 *mac,
-                        struct ieee80211_vif *vif),
-       void *data)
+static void __iterate_active_interfaces(struct ieee80211_local *local,
+                                       u32 iter_flags,
+                                       void (*iterator)(void *data, u8 *mac,
+                                               struct ieee80211_vif *vif),
+                                       void *data)
 {
-       struct ieee80211_local *local = hw_to_local(hw);
        struct ieee80211_sub_if_data *sdata;
 
-       mutex_lock(&local->iflist_mtx);
-
-       list_for_each_entry(sdata, &local->interfaces, list) {
+       list_for_each_entry_rcu(sdata, &local->interfaces, list) {
                switch (sdata->vif.type) {
                case NL80211_IFTYPE_MONITOR:
                        if (!(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))
@@ -597,13 +594,25 @@ void ieee80211_iterate_active_interfaces(
                                 &sdata->vif);
        }
 
-       sdata = rcu_dereference_protected(local->monitor_sdata,
-                                         lockdep_is_held(&local->iflist_mtx));
+       sdata = rcu_dereference_check(local->monitor_sdata,
+                                     lockdep_is_held(&local->iflist_mtx) ||
+                                     lockdep_rtnl_is_held());
        if (sdata &&
            (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL ||
             sdata->flags & IEEE80211_SDATA_IN_DRIVER))
                iterator(data, sdata->vif.addr, &sdata->vif);
+}
 
+void ieee80211_iterate_active_interfaces(
+       struct ieee80211_hw *hw, u32 iter_flags,
+       void (*iterator)(void *data, u8 *mac,
+                        struct ieee80211_vif *vif),
+       void *data)
+{
+       struct ieee80211_local *local = hw_to_local(hw);
+
+       mutex_lock(&local->iflist_mtx);
+       __iterate_active_interfaces(local, iter_flags, iterator, data);
        mutex_unlock(&local->iflist_mtx);
 }
 EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces);
@@ -615,38 +624,26 @@ void ieee80211_iterate_active_interfaces_atomic(
        void *data)
 {
        struct ieee80211_local *local = hw_to_local(hw);
-       struct ieee80211_sub_if_data *sdata;
 
        rcu_read_lock();
+       __iterate_active_interfaces(local, iter_flags, iterator, data);
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
 
-       list_for_each_entry_rcu(sdata, &local->interfaces, list) {
-               switch (sdata->vif.type) {
-               case NL80211_IFTYPE_MONITOR:
-                       if (!(sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE))
-                               continue;
-                       break;
-               case NL80211_IFTYPE_AP_VLAN:
-                       continue;
-               default:
-                       break;
-               }
-               if (!(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL) &&
-                   !(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
-                       continue;
-               if (ieee80211_sdata_running(sdata))
-                       iterator(data, sdata->vif.addr,
-                                &sdata->vif);
-       }
+void ieee80211_iterate_active_interfaces_rtnl(
+       struct ieee80211_hw *hw, u32 iter_flags,
+       void (*iterator)(void *data, u8 *mac,
+                        struct ieee80211_vif *vif),
+       void *data)
+{
+       struct ieee80211_local *local = hw_to_local(hw);
 
-       sdata = rcu_dereference(local->monitor_sdata);
-       if (sdata &&
-           (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL ||
-            sdata->flags & IEEE80211_SDATA_IN_DRIVER))
-               iterator(data, sdata->vif.addr, &sdata->vif);
+       ASSERT_RTNL();
 
-       rcu_read_unlock();
+       __iterate_active_interfaces(local, iter_flags, iterator, data);
 }
-EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
+EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_rtnl);
 
 /*
  * Nothing should have been stuffed into the workqueue during
@@ -1007,14 +1004,21 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
         */
        enable_qos = (sdata->vif.type != NL80211_IFTYPE_STATION);
 
-       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
-               /* Set defaults according to 802.11-2007 Table 7-37 */
-               aCWmax = 1023;
-               if (use_11b)
-                       aCWmin = 31;
-               else
-                       aCWmin = 15;
+       /* Set defaults according to 802.11-2007 Table 7-37 */
+       aCWmax = 1023;
+       if (use_11b)
+               aCWmin = 31;
+       else
+               aCWmin = 15;
 
+       /* Confiure old 802.11b/g medium access rules. */
+       qparam.cw_max = aCWmax;
+       qparam.cw_min = aCWmin;
+       qparam.txop = 0;
+       qparam.aifs = 2;
+
+       for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+               /* Update if QoS is enabled. */
                if (enable_qos) {
                        switch (ac) {
                        case IEEE80211_AC_BK:
@@ -1050,12 +1054,6 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
                                qparam.aifs = 2;
                                break;
                        }
-               } else {
-                       /* Confiure old 802.11b/g medium access rules. */
-                       qparam.cw_max = aCWmax;
-                       qparam.cw_min = aCWmin;
-                       qparam.txop = 0;
-                       qparam.aifs = 2;
                }
 
                qparam.uapsd = false;
@@ -1084,8 +1082,8 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_mgmt *mgmt;
        int err;
 
-       skb = dev_alloc_skb(local->hw.extra_tx_headroom +
-                           sizeof(*mgmt) + 6 + extra_len);
+       /* 24 + 6 = header + auth_algo + auth_transaction + status_code */
+       skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 6 + extra_len);
        if (!skb)
                return;
 
@@ -2103,7 +2101,7 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_supported_band *sband;
-       int rate, skip, shift;
+       int rate, shift;
        u8 i, exrates, *pos;
        u32 basic_rates = sdata->vif.bss_conf.basic_rates;
        u32 rate_flags;
@@ -2131,14 +2129,11 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
                pos = skb_put(skb, exrates + 2);
                *pos++ = WLAN_EID_EXT_SUPP_RATES;
                *pos++ = exrates;
-               skip = 0;
                for (i = 8; i < sband->n_bitrates; i++) {
                        u8 basic = 0;
                        if ((rate_flags & sband->bitrates[i].flags)
                            != rate_flags)
                                continue;
-                       if (skip++ < 8)
-                               continue;
                        if (need_basic && basic_rates & BIT(i))
                                basic = 0x80;
                        rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
@@ -2241,6 +2236,10 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
        }
 
        rate = cfg80211_calculate_bitrate(&ri);
+       if (WARN_ONCE(!rate,
+                     "Invalid bitrate: flags=0x%x, idx=%d, vht_nss=%d\n",
+                     status->flag, status->rate_idx, status->vht_nss))
+               return 0;
 
        /* rewind from end of MPDU */
        if (status->flag & RX_FLAG_MACTIME_END)
@@ -2295,3 +2294,63 @@ void ieee80211_radar_detected(struct ieee80211_hw *hw)
        ieee80211_queue_work(hw, &local->radar_detected_work);
 }
 EXPORT_SYMBOL(ieee80211_radar_detected);
+
+u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c)
+{
+       u32 ret;
+       int tmp;
+
+       switch (c->width) {
+       case NL80211_CHAN_WIDTH_20:
+               c->width = NL80211_CHAN_WIDTH_20_NOHT;
+               ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
+               break;
+       case NL80211_CHAN_WIDTH_40:
+               c->width = NL80211_CHAN_WIDTH_20;
+               c->center_freq1 = c->chan->center_freq;
+               ret = IEEE80211_STA_DISABLE_40MHZ |
+                     IEEE80211_STA_DISABLE_VHT;
+               break;
+       case NL80211_CHAN_WIDTH_80:
+               tmp = (30 + c->chan->center_freq - c->center_freq1)/20;
+               /* n_P40 */
+               tmp /= 2;
+               /* freq_P40 */
+               c->center_freq1 = c->center_freq1 - 20 + 40 * tmp;
+               c->width = NL80211_CHAN_WIDTH_40;
+               ret = IEEE80211_STA_DISABLE_VHT;
+               break;
+       case NL80211_CHAN_WIDTH_80P80:
+               c->center_freq2 = 0;
+               c->width = NL80211_CHAN_WIDTH_80;
+               ret = IEEE80211_STA_DISABLE_80P80MHZ |
+                     IEEE80211_STA_DISABLE_160MHZ;
+               break;
+       case NL80211_CHAN_WIDTH_160:
+               /* n_P20 */
+               tmp = (70 + c->chan->center_freq - c->center_freq1)/20;
+               /* n_P80 */
+               tmp /= 4;
+               c->center_freq1 = c->center_freq1 - 40 + 80 * tmp;
+               c->width = NL80211_CHAN_WIDTH_80;
+               ret = IEEE80211_STA_DISABLE_80P80MHZ |
+                     IEEE80211_STA_DISABLE_160MHZ;
+               break;
+       default:
+       case NL80211_CHAN_WIDTH_20_NOHT:
+               WARN_ON_ONCE(1);
+               c->width = NL80211_CHAN_WIDTH_20_NOHT;
+               ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
+               break;
+       case NL80211_CHAN_WIDTH_5:
+       case NL80211_CHAN_WIDTH_10:
+               WARN_ON_ONCE(1);
+               /* keep c->width */
+               ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
+               break;
+       }
+
+       WARN_ON_ONCE(!cfg80211_chandef_valid(c));
+
+       return ret;
+}
index 97c289414e32a5d989528dd9bba7c8b77ec982bf..de0112785aae19a8b23b2f8f84b364c80d27e8b3 100644 (file)
@@ -185,13 +185,13 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
        if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) {
                vht_cap->cap |= cap_info &
                                (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
-                                IEEE80211_VHT_CAP_BEAMFORMER_ANTENNAS_MAX |
                                 IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MAX);
        }
 
        if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
                vht_cap->cap |= cap_info &
-                               IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
+                               (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+                                IEEE80211_VHT_CAP_BEAMFORMEE_STS_MAX);
 
        if (own_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
                vht_cap->cap |= cap_info &
index 1bec1219ab815ff37c3970a4e2473e0a4bb62809..851cd880b0c048c4dcd45e1b6652888338490cf9 100644 (file)
@@ -33,6 +33,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
                                  SKB_GSO_DODGY |
                                  SKB_GSO_TCP_ECN |
                                  SKB_GSO_GRE |
+                                 SKB_GSO_IPIP |
                                  SKB_GSO_MPLS)))
                goto out;
 
index 6e839b6dff2b1349f86d87837e0a91bb486cc4bf..48acec17e27a1524ced30bd86ad865f147ef3a3e 100644 (file)
@@ -413,6 +413,58 @@ config NETFILTER_SYNPROXY
 
 endif # NF_CONNTRACK
 
+config NF_TABLES
+       depends on NETFILTER_NETLINK
+       tristate "Netfilter nf_tables support"
+
+config NFT_EXTHDR
+       depends on NF_TABLES
+       tristate "Netfilter nf_tables IPv6 exthdr module"
+
+config NFT_META
+       depends on NF_TABLES
+       tristate "Netfilter nf_tables meta module"
+
+config NFT_CT
+       depends on NF_TABLES
+       depends on NF_CONNTRACK
+       tristate "Netfilter nf_tables conntrack module"
+
+config NFT_RBTREE
+       depends on NF_TABLES
+       tristate "Netfilter nf_tables rbtree set module"
+
+config NFT_HASH
+       depends on NF_TABLES
+       tristate "Netfilter nf_tables hash set module"
+
+config NFT_COUNTER
+       depends on NF_TABLES
+       tristate "Netfilter nf_tables counter module"
+
+config NFT_LOG
+       depends on NF_TABLES
+       tristate "Netfilter nf_tables log module"
+
+config NFT_LIMIT
+       depends on NF_TABLES
+       tristate "Netfilter nf_tables limit module"
+
+config NFT_NAT
+       depends on NF_TABLES
+       depends on NF_CONNTRACK
+       depends on NF_NAT
+       tristate "Netfilter nf_tables nat module"
+
+config NFT_COMPAT
+       depends on NF_TABLES
+       depends on NETFILTER_XTABLES
+       tristate "Netfilter x_tables over nf_tables module"
+       help
+         This is required if you intend to use any of existing
+         x_tables match/target extensions over the nf_tables
+         framework.
+
 config NETFILTER_XTABLES
        tristate "Netfilter Xtables support (required for ip_tables)"
        default m if NETFILTER_ADVANCED=n
index c3a0a12907f693630b841400d3e47babfd338bde..394483b2c193c53fb08dd98591969db0fa995fa8 100644 (file)
@@ -64,6 +64,24 @@ obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
 # SYNPROXY
 obj-$(CONFIG_NETFILTER_SYNPROXY) += nf_synproxy_core.o
 
+# nf_tables
+nf_tables-objs += nf_tables_core.o nf_tables_api.o
+nf_tables-objs += nft_immediate.o nft_cmp.o nft_lookup.o
+nf_tables-objs += nft_bitwise.o nft_byteorder.o nft_payload.o
+
+obj-$(CONFIG_NF_TABLES)                += nf_tables.o
+obj-$(CONFIG_NFT_COMPAT)       += nft_compat.o
+obj-$(CONFIG_NFT_EXTHDR)       += nft_exthdr.o
+obj-$(CONFIG_NFT_META)         += nft_meta.o
+obj-$(CONFIG_NFT_CT)           += nft_ct.o
+obj-$(CONFIG_NFT_LIMIT)                += nft_limit.o
+obj-$(CONFIG_NFT_NAT)          += nft_nat.o
+#nf_tables-objs                        += nft_meta_target.o
+obj-$(CONFIG_NFT_RBTREE)       += nft_rbtree.o
+obj-$(CONFIG_NFT_HASH)         += nft_hash.o
+obj-$(CONFIG_NFT_COUNTER)      += nft_counter.o
+obj-$(CONFIG_NFT_LOG)          += nft_log.o
+
 # generic X tables 
 obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
 
index 593b16ea45e0d817787c4d06d9d3d703df2fc65b..1fbab0cdd302bdafe199d434fc10f2f6401ef6a8 100644 (file)
@@ -146,7 +146,7 @@ unsigned int nf_iterate(struct list_head *head,
                /* Optimization: we don't need to hold module
                   reference here, since function can't sleep. --RR */
 repeat:
-               verdict = (*elemp)->hook(hook, skb, indev, outdev, okfn);
+               verdict = (*elemp)->hook(*elemp, skb, indev, outdev, okfn);
                if (verdict != NF_ACCEPT) {
 #ifdef CONFIG_NETFILTER_DEBUG
                        if (unlikely((verdict & NF_VERDICT_MASK)
index ba36c283d8372dfb3c890994c57c6bb0d46266b0..a2d6263b6c648e97c663025660ad47320c2c08ff 100644 (file)
@@ -1,7 +1,7 @@
 menuconfig IP_SET
        tristate "IP set support"
        depends on INET && NETFILTER
-       depends on NETFILTER_NETLINK
+       select NETFILTER_NETLINK
        help
          This option adds IP set support to the kernel.
          In order to define and use the sets, you need the userspace utility
@@ -90,6 +90,15 @@ config IP_SET_HASH_IPPORTNET
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config IP_SET_HASH_NETPORTNET
+       tristate "hash:net,port,net set support"
+       depends on IP_SET
+       help
+         This option adds the hash:net,port,net set type support, by which
+         one can store two IPv4/IPv6 subnets, and a protocol/port in a set.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
 config IP_SET_HASH_NET
        tristate "hash:net set support"
        depends on IP_SET
@@ -99,6 +108,15 @@ config IP_SET_HASH_NET
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config IP_SET_HASH_NETNET
+       tristate "hash:net,net set support"
+       depends on IP_SET
+       help
+         This option adds the hash:net,net  set type support, by which
+         one can store IPv4/IPv6 network address/prefix pairs in a set.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
 config IP_SET_HASH_NETPORT
        tristate "hash:net,port set support"
        depends on IP_SET
index 6e965ecd5444696b4dc7e09bb98b1d09420ce9ab..44b2d38476faeb75a95c5ac8348330a99aa866fd 100644 (file)
@@ -20,6 +20,8 @@ obj-$(CONFIG_IP_SET_HASH_IPPORTNET) += ip_set_hash_ipportnet.o
 obj-$(CONFIG_IP_SET_HASH_NET) += ip_set_hash_net.o
 obj-$(CONFIG_IP_SET_HASH_NETPORT) += ip_set_hash_netport.o
 obj-$(CONFIG_IP_SET_HASH_NETIFACE) += ip_set_hash_netiface.o
+obj-$(CONFIG_IP_SET_HASH_NETNET) += ip_set_hash_netnet.o
+obj-$(CONFIG_IP_SET_HASH_NETPORTNET) += ip_set_hash_netportnet.o
 
 # list types
 obj-$(CONFIG_IP_SET_LIST_SET) += ip_set_list_set.o
index 25243379b887e5724d147cac0afedab58a1dec61..a13e15be7911cfcd2e3103f9790beaeb869bd478 100644 (file)
@@ -8,38 +8,32 @@
 #ifndef __IP_SET_BITMAP_IP_GEN_H
 #define __IP_SET_BITMAP_IP_GEN_H
 
-#define CONCAT(a, b)           a##b
-#define TOKEN(a,b)             CONCAT(a, b)
-
-#define mtype_do_test          TOKEN(MTYPE, _do_test)
-#define mtype_gc_test          TOKEN(MTYPE, _gc_test)
-#define mtype_is_filled                TOKEN(MTYPE, _is_filled)
-#define mtype_do_add           TOKEN(MTYPE, _do_add)
-#define mtype_do_del           TOKEN(MTYPE, _do_del)
-#define mtype_do_list          TOKEN(MTYPE, _do_list)
-#define mtype_do_head          TOKEN(MTYPE, _do_head)
-#define mtype_adt_elem         TOKEN(MTYPE, _adt_elem)
-#define mtype_add_timeout      TOKEN(MTYPE, _add_timeout)
-#define mtype_gc_init          TOKEN(MTYPE, _gc_init)
-#define mtype_kadt             TOKEN(MTYPE, _kadt)
-#define mtype_uadt             TOKEN(MTYPE, _uadt)
-#define mtype_destroy          TOKEN(MTYPE, _destroy)
-#define mtype_flush            TOKEN(MTYPE, _flush)
-#define mtype_head             TOKEN(MTYPE, _head)
-#define mtype_same_set         TOKEN(MTYPE, _same_set)
-#define mtype_elem             TOKEN(MTYPE, _elem)
-#define mtype_test             TOKEN(MTYPE, _test)
-#define mtype_add              TOKEN(MTYPE, _add)
-#define mtype_del              TOKEN(MTYPE, _del)
-#define mtype_list             TOKEN(MTYPE, _list)
-#define mtype_gc               TOKEN(MTYPE, _gc)
+#define mtype_do_test          IPSET_TOKEN(MTYPE, _do_test)
+#define mtype_gc_test          IPSET_TOKEN(MTYPE, _gc_test)
+#define mtype_is_filled                IPSET_TOKEN(MTYPE, _is_filled)
+#define mtype_do_add           IPSET_TOKEN(MTYPE, _do_add)
+#define mtype_ext_cleanup      IPSET_TOKEN(MTYPE, _ext_cleanup)
+#define mtype_do_del           IPSET_TOKEN(MTYPE, _do_del)
+#define mtype_do_list          IPSET_TOKEN(MTYPE, _do_list)
+#define mtype_do_head          IPSET_TOKEN(MTYPE, _do_head)
+#define mtype_adt_elem         IPSET_TOKEN(MTYPE, _adt_elem)
+#define mtype_add_timeout      IPSET_TOKEN(MTYPE, _add_timeout)
+#define mtype_gc_init          IPSET_TOKEN(MTYPE, _gc_init)
+#define mtype_kadt             IPSET_TOKEN(MTYPE, _kadt)
+#define mtype_uadt             IPSET_TOKEN(MTYPE, _uadt)
+#define mtype_destroy          IPSET_TOKEN(MTYPE, _destroy)
+#define mtype_flush            IPSET_TOKEN(MTYPE, _flush)
+#define mtype_head             IPSET_TOKEN(MTYPE, _head)
+#define mtype_same_set         IPSET_TOKEN(MTYPE, _same_set)
+#define mtype_elem             IPSET_TOKEN(MTYPE, _elem)
+#define mtype_test             IPSET_TOKEN(MTYPE, _test)
+#define mtype_add              IPSET_TOKEN(MTYPE, _add)
+#define mtype_del              IPSET_TOKEN(MTYPE, _del)
+#define mtype_list             IPSET_TOKEN(MTYPE, _list)
+#define mtype_gc               IPSET_TOKEN(MTYPE, _gc)
 #define mtype                  MTYPE
 
-#define ext_timeout(e, m)      \
-       (unsigned long *)((e) + (m)->offset[IPSET_OFFSET_TIMEOUT])
-#define ext_counter(e, m)      \
-       (struct ip_set_counter *)((e) + (m)->offset[IPSET_OFFSET_COUNTER])
-#define get_ext(map, id)       ((map)->extensions + (map)->dsize * (id))
+#define get_ext(set, map, id)  ((map)->extensions + (set)->dsize * (id))
 
 static void
 mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
@@ -49,10 +43,21 @@ mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
        init_timer(&map->gc);
        map->gc.data = (unsigned long) set;
        map->gc.function = gc;
-       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
        add_timer(&map->gc);
 }
 
+static void
+mtype_ext_cleanup(struct ip_set *set)
+{
+       struct mtype *map = set->data;
+       u32 id;
+
+       for (id = 0; id < map->elements; id++)
+               if (test_bit(id, map->members))
+                       ip_set_ext_destroy(set, get_ext(set, map, id));
+}
+
 static void
 mtype_destroy(struct ip_set *set)
 {
@@ -62,8 +67,11 @@ mtype_destroy(struct ip_set *set)
                del_timer_sync(&map->gc);
 
        ip_set_free(map->members);
-       if (map->dsize)
+       if (set->dsize) {
+               if (set->extensions & IPSET_EXT_DESTROY)
+                       mtype_ext_cleanup(set);
                ip_set_free(map->extensions);
+       }
        kfree(map);
 
        set->data = NULL;
@@ -74,6 +82,8 @@ mtype_flush(struct ip_set *set)
 {
        struct mtype *map = set->data;
 
+       if (set->extensions & IPSET_EXT_DESTROY)
+               mtype_ext_cleanup(set);
        memset(map->members, 0, map->memsize);
 }
 
@@ -91,12 +101,9 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
            nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
                          htonl(sizeof(*map) +
                                map->memsize +
-                               map->dsize * map->elements)) ||
-           (SET_WITH_TIMEOUT(set) &&
-            nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))) ||
-           (SET_WITH_COUNTER(set) &&
-            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS,
-                          htonl(IPSET_FLAG_WITH_COUNTERS))))
+                               set->dsize * map->elements)))
+               goto nla_put_failure;
+       if (unlikely(ip_set_put_flags(skb, set)))
                goto nla_put_failure;
        ipset_nest_end(skb, nested);
 
@@ -111,16 +118,16 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
 {
        struct mtype *map = set->data;
        const struct mtype_adt_elem *e = value;
-       void *x = get_ext(map, e->id);
-       int ret = mtype_do_test(e, map);
+       void *x = get_ext(set, map, e->id);
+       int ret = mtype_do_test(e, map, set->dsize);
 
        if (ret <= 0)
                return ret;
        if (SET_WITH_TIMEOUT(set) &&
-           ip_set_timeout_expired(ext_timeout(x, map)))
+           ip_set_timeout_expired(ext_timeout(x, set)))
                return 0;
        if (SET_WITH_COUNTER(set))
-               ip_set_update_counter(ext_counter(x, map), ext, mext, flags);
+               ip_set_update_counter(ext_counter(x, set), ext, mext, flags);
        return 1;
 }
 
@@ -130,26 +137,30 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
 {
        struct mtype *map = set->data;
        const struct mtype_adt_elem *e = value;
-       void *x = get_ext(map, e->id);
-       int ret = mtype_do_add(e, map, flags);
+       void *x = get_ext(set, map, e->id);
+       int ret = mtype_do_add(e, map, flags, set->dsize);
 
        if (ret == IPSET_ADD_FAILED) {
                if (SET_WITH_TIMEOUT(set) &&
-                   ip_set_timeout_expired(ext_timeout(x, map)))
+                   ip_set_timeout_expired(ext_timeout(x, set)))
                        ret = 0;
                else if (!(flags & IPSET_FLAG_EXIST))
                        return -IPSET_ERR_EXIST;
+               /* Element is re-added, cleanup extensions */
+               ip_set_ext_destroy(set, x);
        }
 
        if (SET_WITH_TIMEOUT(set))
 #ifdef IP_SET_BITMAP_STORED_TIMEOUT
-               mtype_add_timeout(ext_timeout(x, map), e, ext, map, ret);
+               mtype_add_timeout(ext_timeout(x, set), e, ext, set, map, ret);
 #else
-               ip_set_timeout_set(ext_timeout(x, map), ext->timeout);
+               ip_set_timeout_set(ext_timeout(x, set), ext->timeout);
 #endif
 
        if (SET_WITH_COUNTER(set))
-               ip_set_init_counter(ext_counter(x, map), ext);
+               ip_set_init_counter(ext_counter(x, set), ext);
+       if (SET_WITH_COMMENT(set))
+               ip_set_init_comment(ext_comment(x, set), ext);
        return 0;
 }
 
@@ -159,16 +170,27 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
 {
        struct mtype *map = set->data;
        const struct mtype_adt_elem *e = value;
-       const void *x = get_ext(map, e->id);
+       void *x = get_ext(set, map, e->id);
 
-       if (mtype_do_del(e, map) ||
-           (SET_WITH_TIMEOUT(set) &&
-            ip_set_timeout_expired(ext_timeout(x, map))))
+       if (mtype_do_del(e, map))
+               return -IPSET_ERR_EXIST;
+
+       ip_set_ext_destroy(set, x);
+       if (SET_WITH_TIMEOUT(set) &&
+           ip_set_timeout_expired(ext_timeout(x, set)))
                return -IPSET_ERR_EXIST;
 
        return 0;
 }
 
+#ifndef IP_SET_BITMAP_STORED_TIMEOUT
+static inline bool
+mtype_is_filled(const struct mtype_elem *x)
+{
+       return true;
+}
+#endif
+
 static int
 mtype_list(const struct ip_set *set,
           struct sk_buff *skb, struct netlink_callback *cb)
@@ -183,13 +205,13 @@ mtype_list(const struct ip_set *set,
                return -EMSGSIZE;
        for (; cb->args[2] < map->elements; cb->args[2]++) {
                id = cb->args[2];
-               x = get_ext(map, id);
+               x = get_ext(set, map, id);
                if (!test_bit(id, map->members) ||
                    (SET_WITH_TIMEOUT(set) &&
 #ifdef IP_SET_BITMAP_STORED_TIMEOUT
                     mtype_is_filled((const struct mtype_elem *) x) &&
 #endif
-                    ip_set_timeout_expired(ext_timeout(x, map))))
+                    ip_set_timeout_expired(ext_timeout(x, set))))
                        continue;
                nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
                if (!nested) {
@@ -199,23 +221,10 @@ mtype_list(const struct ip_set *set,
                        } else
                                goto nla_put_failure;
                }
-               if (mtype_do_list(skb, map, id))
+               if (mtype_do_list(skb, map, id, set->dsize))
                        goto nla_put_failure;
-               if (SET_WITH_TIMEOUT(set)) {
-#ifdef IP_SET_BITMAP_STORED_TIMEOUT
-                       if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
-                                         htonl(ip_set_timeout_stored(map, id,
-                                                       ext_timeout(x, map)))))
-                               goto nla_put_failure;
-#else
-                       if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
-                                         htonl(ip_set_timeout_get(
-                                                       ext_timeout(x, map)))))
-                               goto nla_put_failure;
-#endif
-               }
-               if (SET_WITH_COUNTER(set) &&
-                   ip_set_put_counter(skb, ext_counter(x, map)))
+               if (ip_set_put_extensions(skb, set, x,
+                   mtype_is_filled((const struct mtype_elem *) x)))
                        goto nla_put_failure;
                ipset_nest_end(skb, nested);
        }
@@ -228,11 +237,11 @@ mtype_list(const struct ip_set *set,
 
 nla_put_failure:
        nla_nest_cancel(skb, nested);
-       ipset_nest_end(skb, adt);
        if (unlikely(id == first)) {
                cb->args[2] = 0;
                return -EMSGSIZE;
        }
+       ipset_nest_end(skb, adt);
        return 0;
 }
 
@@ -241,21 +250,23 @@ mtype_gc(unsigned long ul_set)
 {
        struct ip_set *set = (struct ip_set *) ul_set;
        struct mtype *map = set->data;
-       const void *x;
+       void *x;
        u32 id;
 
        /* We run parallel with other readers (test element)
         * but adding/deleting new entries is locked out */
        read_lock_bh(&set->lock);
        for (id = 0; id < map->elements; id++)
-               if (mtype_gc_test(id, map)) {
-                       x = get_ext(map, id);
-                       if (ip_set_timeout_expired(ext_timeout(x, map)))
+               if (mtype_gc_test(id, map, set->dsize)) {
+                       x = get_ext(set, map, id);
+                       if (ip_set_timeout_expired(ext_timeout(x, set))) {
                                clear_bit(id, map->members);
+                               ip_set_ext_destroy(set, x);
+                       }
                }
        read_unlock_bh(&set->lock);
 
-       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
        add_timer(&map->gc);
 }
 
index f1a8128bef01c3f555c23264683d9f1688b06509..6f1f9f4948084e40375ab722565e7bf20b2d972d 100644 (file)
 #include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_bitmap.h>
 
-#define REVISION_MIN   0
-#define REVISION_MAX   1       /* Counter support added */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1          Counter support added */
+#define IPSET_TYPE_REV_MAX     2       /* Comment support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("bitmap:ip", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("bitmap:ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_bitmap:ip");
 
 #define MTYPE          bitmap_ip
@@ -44,10 +45,7 @@ struct bitmap_ip {
        u32 elements;           /* number of max elements in the set */
        u32 hosts;              /* number of hosts in a subnet */
        size_t memsize;         /* members size */
-       size_t dsize;           /* extensions struct size */
-       size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
        u8 netmask;             /* subnet netmask */
-       u32 timeout;            /* timeout parameter */
        struct timer_list gc;   /* garbage collection */
 };
 
@@ -65,20 +63,21 @@ ip_to_id(const struct bitmap_ip *m, u32 ip)
 /* Common functions */
 
 static inline int
-bitmap_ip_do_test(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map)
+bitmap_ip_do_test(const struct bitmap_ip_adt_elem *e,
+                 struct bitmap_ip *map, size_t dsize)
 {
        return !!test_bit(e->id, map->members);
 }
 
 static inline int
-bitmap_ip_gc_test(u16 id, const struct bitmap_ip *map)
+bitmap_ip_gc_test(u16 id, const struct bitmap_ip *map, size_t dsize)
 {
        return !!test_bit(id, map->members);
 }
 
 static inline int
 bitmap_ip_do_add(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map,
-                u32 flags)
+                u32 flags, size_t dsize)
 {
        return !!test_and_set_bit(e->id, map->members);
 }
@@ -90,7 +89,8 @@ bitmap_ip_do_del(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map)
 }
 
 static inline int
-bitmap_ip_do_list(struct sk_buff *skb, const struct bitmap_ip *map, u32 id)
+bitmap_ip_do_list(struct sk_buff *skb, const struct bitmap_ip *map, u32 id,
+                 size_t dsize)
 {
        return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
                        htonl(map->first_ip + id * map->hosts));
@@ -113,7 +113,7 @@ bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb,
        struct bitmap_ip *map = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct bitmap_ip_adt_elem e = { };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, map);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
        u32 ip;
 
        ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
@@ -131,9 +131,9 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
 {
        struct bitmap_ip *map = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
-       u32 ip, ip_to;
+       u32 ip = 0, ip_to = 0;
        struct bitmap_ip_adt_elem e = { };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(map);
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        int ret = 0;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -200,7 +200,7 @@ bitmap_ip_same_set(const struct ip_set *a, const struct ip_set *b)
        return x->first_ip == y->first_ip &&
               x->last_ip == y->last_ip &&
               x->netmask == y->netmask &&
-              x->timeout == y->timeout &&
+              a->timeout == b->timeout &&
               a->extensions == b->extensions;
 }
 
@@ -209,25 +209,6 @@ bitmap_ip_same_set(const struct ip_set *a, const struct ip_set *b)
 struct bitmap_ip_elem {
 };
 
-/* Timeout variant */
-
-struct bitmap_ipt_elem {
-       unsigned long timeout;
-};
-
-/* Plain variant with counter */
-
-struct bitmap_ipc_elem {
-       struct ip_set_counter counter;
-};
-
-/* Timeout variant with counter */
-
-struct bitmap_ipct_elem {
-       unsigned long timeout;
-       struct ip_set_counter counter;
-};
-
 #include "ip_set_bitmap_gen.h"
 
 /* Create bitmap:ip type of sets */
@@ -240,8 +221,8 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
        map->members = ip_set_alloc(map->memsize);
        if (!map->members)
                return false;
-       if (map->dsize) {
-               map->extensions = ip_set_alloc(map->dsize * elements);
+       if (set->dsize) {
+               map->extensions = ip_set_alloc(set->dsize * elements);
                if (!map->extensions) {
                        kfree(map->members);
                        return false;
@@ -252,7 +233,7 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
        map->elements = elements;
        map->hosts = hosts;
        map->netmask = netmask;
-       map->timeout = IPSET_NO_TIMEOUT;
+       set->timeout = IPSET_NO_TIMEOUT;
 
        set->data = map;
        set->family = NFPROTO_IPV4;
@@ -261,10 +242,11 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
 }
 
 static int
-bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
+                u32 flags)
 {
        struct bitmap_ip *map;
-       u32 first_ip, last_ip, hosts, cadt_flags = 0;
+       u32 first_ip = 0, last_ip = 0, hosts;
        u64 elements;
        u8 netmask = 32;
        int ret;
@@ -336,61 +318,15 @@ bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
 
        map->memsize = bitmap_bytes(0, elements - 1);
        set->variant = &bitmap_ip;
-       if (tb[IPSET_ATTR_CADT_FLAGS])
-               cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
-       if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
-               set->extensions |= IPSET_EXT_COUNTER;
-               if (tb[IPSET_ATTR_TIMEOUT]) {
-                       map->dsize = sizeof(struct bitmap_ipct_elem);
-                       map->offset[IPSET_OFFSET_TIMEOUT] =
-                               offsetof(struct bitmap_ipct_elem, timeout);
-                       map->offset[IPSET_OFFSET_COUNTER] =
-                               offsetof(struct bitmap_ipct_elem, counter);
-
-                       if (!init_map_ip(set, map, first_ip, last_ip,
-                                        elements, hosts, netmask)) {
-                               kfree(map);
-                               return -ENOMEM;
-                       }
-
-                       map->timeout = ip_set_timeout_uget(
-                               tb[IPSET_ATTR_TIMEOUT]);
-                       set->extensions |= IPSET_EXT_TIMEOUT;
-
-                       bitmap_ip_gc_init(set, bitmap_ip_gc);
-               } else {
-                       map->dsize = sizeof(struct bitmap_ipc_elem);
-                       map->offset[IPSET_OFFSET_COUNTER] =
-                               offsetof(struct bitmap_ipc_elem, counter);
-
-                       if (!init_map_ip(set, map, first_ip, last_ip,
-                                        elements, hosts, netmask)) {
-                               kfree(map);
-                               return -ENOMEM;
-                       }
-               }
-       } else if (tb[IPSET_ATTR_TIMEOUT]) {
-               map->dsize = sizeof(struct bitmap_ipt_elem);
-               map->offset[IPSET_OFFSET_TIMEOUT] =
-                       offsetof(struct bitmap_ipt_elem, timeout);
-
-               if (!init_map_ip(set, map, first_ip, last_ip,
-                                elements, hosts, netmask)) {
-                       kfree(map);
-                       return -ENOMEM;
-               }
-
-               map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-               set->extensions |= IPSET_EXT_TIMEOUT;
-
+       set->dsize = ip_set_elem_len(set, tb, 0);
+       if (!init_map_ip(set, map, first_ip, last_ip,
+                        elements, hosts, netmask)) {
+               kfree(map);
+               return -ENOMEM;
+       }
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
                bitmap_ip_gc_init(set, bitmap_ip_gc);
-       } else {
-               map->dsize = 0;
-               if (!init_map_ip(set, map, first_ip, last_ip,
-                                elements, hosts, netmask)) {
-                       kfree(map);
-                       return -ENOMEM;
-               }
        }
        return 0;
 }
@@ -401,8 +337,8 @@ static struct ip_set_type bitmap_ip_type __read_mostly = {
        .features       = IPSET_TYPE_IP,
        .dimension      = IPSET_DIM_ONE,
        .family         = NFPROTO_IPV4,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = bitmap_ip_create,
        .create_policy  = {
                [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
@@ -420,6 +356,7 @@ static struct ip_set_type bitmap_ip_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
index 3b30e0bef890424abbb3bbf71bd2df147a52eab2..740eabededd9754b7db95a9d46d48f8a1f283bf7 100644 (file)
 #include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_bitmap.h>
 
-#define REVISION_MIN   0
-#define REVISION_MAX   1       /* Counter support added */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1          Counter support added */
+#define IPSET_TYPE_REV_MAX     2       /* Comment support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("bitmap:ip,mac", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("bitmap:ip,mac", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_bitmap:ip,mac");
 
 #define MTYPE          bitmap_ipmac
@@ -48,11 +49,8 @@ struct bitmap_ipmac {
        u32 first_ip;           /* host byte order, included in range */
        u32 last_ip;            /* host byte order, included in range */
        u32 elements;           /* number of max elements in the set */
-       u32 timeout;            /* timeout value */
-       struct timer_list gc;   /* garbage collector */
        size_t memsize;         /* members size */
-       size_t dsize;           /* size of element */
-       size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
+       struct timer_list gc;   /* garbage collector */
 };
 
 /* ADT structure for generic function args */
@@ -82,13 +80,13 @@ get_elem(void *extensions, u16 id, size_t dsize)
 
 static inline int
 bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e,
-                    const struct bitmap_ipmac *map)
+                    const struct bitmap_ipmac *map, size_t dsize)
 {
        const struct bitmap_ipmac_elem *elem;
 
        if (!test_bit(e->id, map->members))
                return 0;
-       elem = get_elem(map->extensions, e->id, map->dsize);
+       elem = get_elem(map->extensions, e->id, dsize);
        if (elem->filled == MAC_FILLED)
                return e->ether == NULL ||
                       ether_addr_equal(e->ether, elem->ether);
@@ -97,13 +95,13 @@ bitmap_ipmac_do_test(const struct bitmap_ipmac_adt_elem *e,
 }
 
 static inline int
-bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map)
+bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map, size_t dsize)
 {
        const struct bitmap_ipmac_elem *elem;
 
        if (!test_bit(id, map->members))
                return 0;
-       elem = get_elem(map->extensions, id, map->dsize);
+       elem = get_elem(map->extensions, id, dsize);
        /* Timer not started for the incomplete elements */
        return elem->filled == MAC_FILLED;
 }
@@ -117,13 +115,13 @@ bitmap_ipmac_is_filled(const struct bitmap_ipmac_elem *elem)
 static inline int
 bitmap_ipmac_add_timeout(unsigned long *timeout,
                         const struct bitmap_ipmac_adt_elem *e,
-                        const struct ip_set_ext *ext,
+                        const struct ip_set_ext *ext, struct ip_set *set,
                         struct bitmap_ipmac *map, int mode)
 {
        u32 t = ext->timeout;
 
        if (mode == IPSET_ADD_START_STORED_TIMEOUT) {
-               if (t == map->timeout)
+               if (t == set->timeout)
                        /* Timeout was not specified, get stored one */
                        t = *timeout;
                ip_set_timeout_set(timeout, t);
@@ -142,11 +140,11 @@ bitmap_ipmac_add_timeout(unsigned long *timeout,
 
 static inline int
 bitmap_ipmac_do_add(const struct bitmap_ipmac_adt_elem *e,
-                   struct bitmap_ipmac *map, u32 flags)
+                   struct bitmap_ipmac *map, u32 flags, size_t dsize)
 {
        struct bitmap_ipmac_elem *elem;
 
-       elem = get_elem(map->extensions, e->id, map->dsize);
+       elem = get_elem(map->extensions, e->id, dsize);
        if (test_and_set_bit(e->id, map->members)) {
                if (elem->filled == MAC_FILLED) {
                        if (e->ether && (flags & IPSET_FLAG_EXIST))
@@ -178,22 +176,12 @@ bitmap_ipmac_do_del(const struct bitmap_ipmac_adt_elem *e,
        return !test_and_clear_bit(e->id, map->members);
 }
 
-static inline unsigned long
-ip_set_timeout_stored(struct bitmap_ipmac *map, u32 id, unsigned long *timeout)
-{
-       const struct bitmap_ipmac_elem *elem =
-               get_elem(map->extensions, id, map->dsize);
-
-       return elem->filled == MAC_FILLED ? ip_set_timeout_get(timeout) :
-                                           *timeout;
-}
-
 static inline int
 bitmap_ipmac_do_list(struct sk_buff *skb, const struct bitmap_ipmac *map,
-                    u32 id)
+                    u32 id, size_t dsize)
 {
        const struct bitmap_ipmac_elem *elem =
-               get_elem(map->extensions, id, map->dsize);
+               get_elem(map->extensions, id, dsize);
 
        return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
                               htonl(map->first_ip + id)) ||
@@ -216,7 +204,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
        struct bitmap_ipmac *map = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct bitmap_ipmac_adt_elem e = {};
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, map);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
        u32 ip;
 
        /* MAC can be src only */
@@ -245,8 +233,8 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct bitmap_ipmac *map = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct bitmap_ipmac_adt_elem e = {};
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(map);
-       u32 ip;
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 ip = 0;
        int ret = 0;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -285,43 +273,12 @@ bitmap_ipmac_same_set(const struct ip_set *a, const struct ip_set *b)
 
        return x->first_ip == y->first_ip &&
               x->last_ip == y->last_ip &&
-              x->timeout == y->timeout &&
+              a->timeout == b->timeout &&
               a->extensions == b->extensions;
 }
 
 /* Plain variant */
 
-/* Timeout variant */
-
-struct bitmap_ipmact_elem {
-       struct {
-               unsigned char ether[ETH_ALEN];
-               unsigned char filled;
-       } __attribute__ ((aligned));
-       unsigned long timeout;
-};
-
-/* Plain variant with counter */
-
-struct bitmap_ipmacc_elem {
-       struct {
-               unsigned char ether[ETH_ALEN];
-               unsigned char filled;
-       } __attribute__ ((aligned));
-       struct ip_set_counter counter;
-};
-
-/* Timeout variant with counter */
-
-struct bitmap_ipmacct_elem {
-       struct {
-               unsigned char ether[ETH_ALEN];
-               unsigned char filled;
-       } __attribute__ ((aligned));
-       unsigned long timeout;
-       struct ip_set_counter counter;
-};
-
 #include "ip_set_bitmap_gen.h"
 
 /* Create bitmap:ip,mac type of sets */
@@ -330,11 +287,11 @@ static bool
 init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
               u32 first_ip, u32 last_ip, u32 elements)
 {
-       map->members = ip_set_alloc((last_ip - first_ip + 1) * map->dsize);
+       map->members = ip_set_alloc(map->memsize);
        if (!map->members)
                return false;
-       if (map->dsize) {
-               map->extensions = ip_set_alloc(map->dsize * elements);
+       if (set->dsize) {
+               map->extensions = ip_set_alloc(set->dsize * elements);
                if (!map->extensions) {
                        kfree(map->members);
                        return false;
@@ -343,7 +300,7 @@ init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
        map->first_ip = first_ip;
        map->last_ip = last_ip;
        map->elements = elements;
-       map->timeout = IPSET_NO_TIMEOUT;
+       set->timeout = IPSET_NO_TIMEOUT;
 
        set->data = map;
        set->family = NFPROTO_IPV4;
@@ -352,10 +309,10 @@ init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
 }
 
 static int
-bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
+bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
                    u32 flags)
 {
-       u32 first_ip, last_ip, cadt_flags = 0;
+       u32 first_ip = 0, last_ip = 0;
        u64 elements;
        struct bitmap_ipmac *map;
        int ret;
@@ -399,57 +356,15 @@ bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
 
        map->memsize = bitmap_bytes(0, elements - 1);
        set->variant = &bitmap_ipmac;
-       if (tb[IPSET_ATTR_CADT_FLAGS])
-               cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
-       if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
-               set->extensions |= IPSET_EXT_COUNTER;
-               if (tb[IPSET_ATTR_TIMEOUT]) {
-                       map->dsize = sizeof(struct bitmap_ipmacct_elem);
-                       map->offset[IPSET_OFFSET_TIMEOUT] =
-                               offsetof(struct bitmap_ipmacct_elem, timeout);
-                       map->offset[IPSET_OFFSET_COUNTER] =
-                               offsetof(struct bitmap_ipmacct_elem, counter);
-
-                       if (!init_map_ipmac(set, map, first_ip, last_ip,
-                                           elements)) {
-                               kfree(map);
-                               return -ENOMEM;
-                       }
-                       map->timeout = ip_set_timeout_uget(
-                               tb[IPSET_ATTR_TIMEOUT]);
-                       set->extensions |= IPSET_EXT_TIMEOUT;
-                       bitmap_ipmac_gc_init(set, bitmap_ipmac_gc);
-               } else {
-                       map->dsize = sizeof(struct bitmap_ipmacc_elem);
-                       map->offset[IPSET_OFFSET_COUNTER] =
-                               offsetof(struct bitmap_ipmacc_elem, counter);
-
-                       if (!init_map_ipmac(set, map, first_ip, last_ip,
-                                           elements)) {
-                               kfree(map);
-                               return -ENOMEM;
-                       }
-               }
-       } else if (tb[IPSET_ATTR_TIMEOUT]) {
-               map->dsize = sizeof(struct bitmap_ipmact_elem);
-               map->offset[IPSET_OFFSET_TIMEOUT] =
-                       offsetof(struct bitmap_ipmact_elem, timeout);
-
-               if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
-                       kfree(map);
-                       return -ENOMEM;
-               }
-               map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-               set->extensions |= IPSET_EXT_TIMEOUT;
+       set->dsize = ip_set_elem_len(set, tb,
+                                    sizeof(struct bitmap_ipmac_elem));
+       if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
+               kfree(map);
+               return -ENOMEM;
+       }
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
                bitmap_ipmac_gc_init(set, bitmap_ipmac_gc);
-       } else {
-               map->dsize = sizeof(struct bitmap_ipmac_elem);
-
-               if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
-                       kfree(map);
-                       return -ENOMEM;
-               }
-               set->variant = &bitmap_ipmac;
        }
        return 0;
 }
@@ -460,8 +375,8 @@ static struct ip_set_type bitmap_ipmac_type = {
        .features       = IPSET_TYPE_IP | IPSET_TYPE_MAC,
        .dimension      = IPSET_DIM_TWO,
        .family         = NFPROTO_IPV4,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = bitmap_ipmac_create,
        .create_policy  = {
                [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
@@ -478,6 +393,7 @@ static struct ip_set_type bitmap_ipmac_type = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
index 8207d1fda5288dcf252a96b9db60ed5fd543144b..e7603c5b53d737b9de6248bfd0b87d0005c242fd 100644 (file)
 #include <linux/netfilter/ipset/ip_set_bitmap.h>
 #include <linux/netfilter/ipset/ip_set_getport.h>
 
-#define REVISION_MIN   0
-#define REVISION_MAX   1       /* Counter support added */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1          Counter support added */
+#define IPSET_TYPE_REV_MAX     2       /* Comment support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("bitmap:port", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("bitmap:port", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_bitmap:port");
 
 #define MTYPE          bitmap_port
@@ -38,9 +39,6 @@ struct bitmap_port {
        u16 last_port;          /* host byte order, included in range */
        u32 elements;           /* number of max elements in the set */
        size_t memsize;         /* members size */
-       size_t dsize;           /* extensions struct size */
-       size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
-       u32 timeout;            /* timeout parameter */
        struct timer_list gc;   /* garbage collection */
 };
 
@@ -59,20 +57,20 @@ port_to_id(const struct bitmap_port *m, u16 port)
 
 static inline int
 bitmap_port_do_test(const struct bitmap_port_adt_elem *e,
-                   const struct bitmap_port *map)
+                   const struct bitmap_port *map, size_t dsize)
 {
        return !!test_bit(e->id, map->members);
 }
 
 static inline int
-bitmap_port_gc_test(u16 id, const struct bitmap_port *map)
+bitmap_port_gc_test(u16 id, const struct bitmap_port *map, size_t dsize)
 {
        return !!test_bit(id, map->members);
 }
 
 static inline int
 bitmap_port_do_add(const struct bitmap_port_adt_elem *e,
-                  struct bitmap_port *map, u32 flags)
+                  struct bitmap_port *map, u32 flags, size_t dsize)
 {
        return !!test_and_set_bit(e->id, map->members);
 }
@@ -85,7 +83,8 @@ bitmap_port_do_del(const struct bitmap_port_adt_elem *e,
 }
 
 static inline int
-bitmap_port_do_list(struct sk_buff *skb, const struct bitmap_port *map, u32 id)
+bitmap_port_do_list(struct sk_buff *skb, const struct bitmap_port *map, u32 id,
+                   size_t dsize)
 {
        return nla_put_net16(skb, IPSET_ATTR_PORT,
                             htons(map->first_port + id));
@@ -106,7 +105,7 @@ bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb,
        struct bitmap_port *map = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct bitmap_port_adt_elem e = {};
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, map);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
        __be16 __port;
        u16 port = 0;
 
@@ -131,7 +130,7 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
        struct bitmap_port *map = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct bitmap_port_adt_elem e = {};
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(map);
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 port;       /* wraparound */
        u16 port_to;
        int ret = 0;
@@ -191,7 +190,7 @@ bitmap_port_same_set(const struct ip_set *a, const struct ip_set *b)
 
        return x->first_port == y->first_port &&
               x->last_port == y->last_port &&
-              x->timeout == y->timeout &&
+              a->timeout == b->timeout &&
               a->extensions == b->extensions;
 }
 
@@ -200,25 +199,6 @@ bitmap_port_same_set(const struct ip_set *a, const struct ip_set *b)
 struct bitmap_port_elem {
 };
 
-/* Timeout variant */
-
-struct bitmap_portt_elem {
-       unsigned long timeout;
-};
-
-/* Plain variant with counter */
-
-struct bitmap_portc_elem {
-       struct ip_set_counter counter;
-};
-
-/* Timeout variant with counter */
-
-struct bitmap_portct_elem {
-       unsigned long timeout;
-       struct ip_set_counter counter;
-};
-
 #include "ip_set_bitmap_gen.h"
 
 /* Create bitmap:ip type of sets */
@@ -230,8 +210,8 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
        map->members = ip_set_alloc(map->memsize);
        if (!map->members)
                return false;
-       if (map->dsize) {
-               map->extensions = ip_set_alloc(map->dsize * map->elements);
+       if (set->dsize) {
+               map->extensions = ip_set_alloc(set->dsize * map->elements);
                if (!map->extensions) {
                        kfree(map->members);
                        return false;
@@ -239,7 +219,7 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
        }
        map->first_port = first_port;
        map->last_port = last_port;
-       map->timeout = IPSET_NO_TIMEOUT;
+       set->timeout = IPSET_NO_TIMEOUT;
 
        set->data = map;
        set->family = NFPROTO_UNSPEC;
@@ -248,11 +228,11 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
 }
 
 static int
-bitmap_port_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
+                  u32 flags)
 {
        struct bitmap_port *map;
        u16 first_port, last_port;
-       u32 cadt_flags = 0;
 
        if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
                     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) ||
@@ -276,53 +256,14 @@ bitmap_port_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
        map->elements = last_port - first_port + 1;
        map->memsize = map->elements * sizeof(unsigned long);
        set->variant = &bitmap_port;
-       if (tb[IPSET_ATTR_CADT_FLAGS])
-               cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
-       if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
-               set->extensions |= IPSET_EXT_COUNTER;
-               if (tb[IPSET_ATTR_TIMEOUT]) {
-                       map->dsize = sizeof(struct bitmap_portct_elem);
-                       map->offset[IPSET_OFFSET_TIMEOUT] =
-                               offsetof(struct bitmap_portct_elem, timeout);
-                       map->offset[IPSET_OFFSET_COUNTER] =
-                               offsetof(struct bitmap_portct_elem, counter);
-                       if (!init_map_port(set, map, first_port, last_port)) {
-                               kfree(map);
-                               return -ENOMEM;
-                       }
-
-                       map->timeout =
-                               ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-                       set->extensions |= IPSET_EXT_TIMEOUT;
-                       bitmap_port_gc_init(set, bitmap_port_gc);
-               } else {
-                       map->dsize = sizeof(struct bitmap_portc_elem);
-                       map->offset[IPSET_OFFSET_COUNTER] =
-                               offsetof(struct bitmap_portc_elem, counter);
-                       if (!init_map_port(set, map, first_port, last_port)) {
-                               kfree(map);
-                               return -ENOMEM;
-                       }
-               }
-       } else if (tb[IPSET_ATTR_TIMEOUT]) {
-               map->dsize = sizeof(struct bitmap_portt_elem);
-               map->offset[IPSET_OFFSET_TIMEOUT] =
-                       offsetof(struct bitmap_portt_elem, timeout);
-               if (!init_map_port(set, map, first_port, last_port)) {
-                       kfree(map);
-                       return -ENOMEM;
-               }
-
-               map->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-               set->extensions |= IPSET_EXT_TIMEOUT;
+       set->dsize = ip_set_elem_len(set, tb, 0);
+       if (!init_map_port(set, map, first_port, last_port)) {
+               kfree(map);
+               return -ENOMEM;
+       }
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
                bitmap_port_gc_init(set, bitmap_port_gc);
-       } else {
-               map->dsize = 0;
-               if (!init_map_port(set, map, first_port, last_port)) {
-                       kfree(map);
-                       return -ENOMEM;
-               }
-
        }
        return 0;
 }
@@ -333,8 +274,8 @@ static struct ip_set_type bitmap_port_type = {
        .features       = IPSET_TYPE_PORT,
        .dimension      = IPSET_DIM_ONE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = bitmap_port_create,
        .create_policy  = {
                [IPSET_ATTR_PORT]       = { .type = NLA_U16 },
@@ -349,6 +290,7 @@ static struct ip_set_type bitmap_port_type = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
index f2e30fb31e78efa405156ca99ba9aeaded3ea49c..dc9284bdd2dd134fa4f96c7d96bcb845dc3f0269 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/spinlock.h>
 #include <linux/rculist.h>
 #include <net/netlink.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
 
 #include <linux/netfilter.h>
 #include <linux/netfilter/x_tables.h>
@@ -27,8 +29,17 @@ static LIST_HEAD(ip_set_type_list);          /* all registered set types */
 static DEFINE_MUTEX(ip_set_type_mutex);                /* protects ip_set_type_list */
 static DEFINE_RWLOCK(ip_set_ref_lock);         /* protects the set refs */
 
-static struct ip_set * __rcu *ip_set_list;     /* all individual sets */
-static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
+struct ip_set_net {
+       struct ip_set * __rcu *ip_set_list;     /* all individual sets */
+       ip_set_id_t     ip_set_max;     /* max number of sets */
+       int             is_deleted;     /* deleted by ip_set_net_exit */
+};
+static int ip_set_net_id __read_mostly;
+
+static inline struct ip_set_net *ip_set_pernet(struct net *net)
+{
+       return net_generic(net, ip_set_net_id);
+}
 
 #define IP_SET_INC     64
 #define STREQ(a, b)    (strncmp(a, b, IPSET_MAXNAMELEN) == 0)
@@ -45,8 +56,8 @@ MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
 /* When the nfnl mutex is held: */
 #define nfnl_dereference(p)            \
        rcu_dereference_protected(p, 1)
-#define nfnl_set(id)                   \
-       nfnl_dereference(ip_set_list)[id]
+#define nfnl_set(inst, id)                     \
+       nfnl_dereference((inst)->ip_set_list)[id]
 
 /*
  * The set types are implemented in modules and registered set types
@@ -315,6 +326,60 @@ ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
 }
 EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
 
+typedef void (*destroyer)(void *);
+/* ipset data extension types, in size order */
+
+const struct ip_set_ext_type ip_set_extensions[] = {
+       [IPSET_EXT_ID_COUNTER] = {
+               .type   = IPSET_EXT_COUNTER,
+               .flag   = IPSET_FLAG_WITH_COUNTERS,
+               .len    = sizeof(struct ip_set_counter),
+               .align  = __alignof__(struct ip_set_counter),
+       },
+       [IPSET_EXT_ID_TIMEOUT] = {
+               .type   = IPSET_EXT_TIMEOUT,
+               .len    = sizeof(unsigned long),
+               .align  = __alignof__(unsigned long),
+       },
+       [IPSET_EXT_ID_COMMENT] = {
+               .type    = IPSET_EXT_COMMENT | IPSET_EXT_DESTROY,
+               .flag    = IPSET_FLAG_WITH_COMMENT,
+               .len     = sizeof(struct ip_set_comment),
+               .align   = __alignof__(struct ip_set_comment),
+               .destroy = (destroyer) ip_set_comment_free,
+       },
+};
+EXPORT_SYMBOL_GPL(ip_set_extensions);
+
+static inline bool
+add_extension(enum ip_set_ext_id id, u32 flags, struct nlattr *tb[])
+{
+       return ip_set_extensions[id].flag ?
+               (flags & ip_set_extensions[id].flag) :
+               !!tb[IPSET_ATTR_TIMEOUT];
+}
+
+size_t
+ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len)
+{
+       enum ip_set_ext_id id;
+       size_t offset = 0;
+       u32 cadt_flags = 0;
+
+       if (tb[IPSET_ATTR_CADT_FLAGS])
+               cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+       for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
+               if (!add_extension(id, cadt_flags, tb))
+                       continue;
+               offset += ALIGN(len + offset, ip_set_extensions[id].align);
+               set->offset[id] = offset;
+               set->extensions |= ip_set_extensions[id].type;
+               offset += ip_set_extensions[id].len;
+       }
+       return len + offset;
+}
+EXPORT_SYMBOL_GPL(ip_set_elem_len);
+
 int
 ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
                      struct ip_set_ext *ext)
@@ -334,6 +399,12 @@ ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
                        ext->packets = be64_to_cpu(nla_get_be64(
                                                   tb[IPSET_ATTR_PACKETS]));
        }
+       if (tb[IPSET_ATTR_COMMENT]) {
+               if (!(set->extensions & IPSET_EXT_COMMENT))
+                       return -IPSET_ERR_COMMENT;
+               ext->comment = ip_set_comment_uget(tb[IPSET_ATTR_COMMENT]);
+       }
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(ip_set_get_extensions);
@@ -374,13 +445,14 @@ __ip_set_put(struct ip_set *set)
  */
 
 static inline struct ip_set *
-ip_set_rcu_get(ip_set_id_t index)
+ip_set_rcu_get(struct net *net, ip_set_id_t index)
 {
        struct ip_set *set;
+       struct ip_set_net *inst = ip_set_pernet(net);
 
        rcu_read_lock();
        /* ip_set_list itself needs to be protected */
-       set = rcu_dereference(ip_set_list)[index];
+       set = rcu_dereference(inst->ip_set_list)[index];
        rcu_read_unlock();
 
        return set;
@@ -390,7 +462,8 @@ int
 ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
            const struct xt_action_param *par, struct ip_set_adt_opt *opt)
 {
-       struct ip_set *set = ip_set_rcu_get(index);
+       struct ip_set *set = ip_set_rcu_get(
+                       dev_net(par->in ? par->in : par->out), index);
        int ret = 0;
 
        BUG_ON(set == NULL);
@@ -428,7 +501,8 @@ int
 ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
           const struct xt_action_param *par, struct ip_set_adt_opt *opt)
 {
-       struct ip_set *set = ip_set_rcu_get(index);
+       struct ip_set *set = ip_set_rcu_get(
+                       dev_net(par->in ? par->in : par->out), index);
        int ret;
 
        BUG_ON(set == NULL);
@@ -450,7 +524,8 @@ int
 ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
           const struct xt_action_param *par, struct ip_set_adt_opt *opt)
 {
-       struct ip_set *set = ip_set_rcu_get(index);
+       struct ip_set *set = ip_set_rcu_get(
+                       dev_net(par->in ? par->in : par->out), index);
        int ret = 0;
 
        BUG_ON(set == NULL);
@@ -474,14 +549,15 @@ EXPORT_SYMBOL_GPL(ip_set_del);
  *
  */
 ip_set_id_t
-ip_set_get_byname(const char *name, struct ip_set **set)
+ip_set_get_byname(struct net *net, const char *name, struct ip_set **set)
 {
        ip_set_id_t i, index = IPSET_INVALID_ID;
        struct ip_set *s;
+       struct ip_set_net *inst = ip_set_pernet(net);
 
        rcu_read_lock();
-       for (i = 0; i < ip_set_max; i++) {
-               s = rcu_dereference(ip_set_list)[i];
+       for (i = 0; i < inst->ip_set_max; i++) {
+               s = rcu_dereference(inst->ip_set_list)[i];
                if (s != NULL && STREQ(s->name, name)) {
                        __ip_set_get(s);
                        index = i;
@@ -501,17 +577,26 @@ EXPORT_SYMBOL_GPL(ip_set_get_byname);
  * to be valid, after calling this function.
  *
  */
-void
-ip_set_put_byindex(ip_set_id_t index)
+
+static inline void
+__ip_set_put_byindex(struct ip_set_net *inst, ip_set_id_t index)
 {
        struct ip_set *set;
 
        rcu_read_lock();
-       set = rcu_dereference(ip_set_list)[index];
+       set = rcu_dereference(inst->ip_set_list)[index];
        if (set != NULL)
                __ip_set_put(set);
        rcu_read_unlock();
 }
+
+void
+ip_set_put_byindex(struct net *net, ip_set_id_t index)
+{
+       struct ip_set_net *inst = ip_set_pernet(net);
+
+       __ip_set_put_byindex(inst, index);
+}
 EXPORT_SYMBOL_GPL(ip_set_put_byindex);
 
 /*
@@ -522,9 +607,9 @@ EXPORT_SYMBOL_GPL(ip_set_put_byindex);
  *
  */
 const char *
-ip_set_name_byindex(ip_set_id_t index)
+ip_set_name_byindex(struct net *net, ip_set_id_t index)
 {
-       const struct ip_set *set = ip_set_rcu_get(index);
+       const struct ip_set *set = ip_set_rcu_get(net, index);
 
        BUG_ON(set == NULL);
        BUG_ON(set->ref == 0);
@@ -546,14 +631,15 @@ EXPORT_SYMBOL_GPL(ip_set_name_byindex);
  * The nfnl mutex is used in the function.
  */
 ip_set_id_t
-ip_set_nfnl_get(const char *name)
+ip_set_nfnl_get(struct net *net, const char *name)
 {
        ip_set_id_t i, index = IPSET_INVALID_ID;
        struct ip_set *s;
+       struct ip_set_net *inst = ip_set_pernet(net);
 
        nfnl_lock(NFNL_SUBSYS_IPSET);
-       for (i = 0; i < ip_set_max; i++) {
-               s = nfnl_set(i);
+       for (i = 0; i < inst->ip_set_max; i++) {
+               s = nfnl_set(inst, i);
                if (s != NULL && STREQ(s->name, name)) {
                        __ip_set_get(s);
                        index = i;
@@ -573,15 +659,16 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_get);
  * The nfnl mutex is used in the function.
  */
 ip_set_id_t
-ip_set_nfnl_get_byindex(ip_set_id_t index)
+ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index)
 {
        struct ip_set *set;
+       struct ip_set_net *inst = ip_set_pernet(net);
 
-       if (index > ip_set_max)
+       if (index > inst->ip_set_max)
                return IPSET_INVALID_ID;
 
        nfnl_lock(NFNL_SUBSYS_IPSET);
-       set = nfnl_set(index);
+       set = nfnl_set(inst, index);
        if (set)
                __ip_set_get(set);
        else
@@ -600,13 +687,17 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_get_byindex);
  * The nfnl mutex is used in the function.
  */
 void
-ip_set_nfnl_put(ip_set_id_t index)
+ip_set_nfnl_put(struct net *net, ip_set_id_t index)
 {
        struct ip_set *set;
+       struct ip_set_net *inst = ip_set_pernet(net);
+
        nfnl_lock(NFNL_SUBSYS_IPSET);
-       set = nfnl_set(index);
-       if (set != NULL)
-               __ip_set_put(set);
+       if (!inst->is_deleted) { /* already deleted from ip_set_net_exit() */
+               set = nfnl_set(inst, index);
+               if (set != NULL)
+                       __ip_set_put(set);
+       }
        nfnl_unlock(NFNL_SUBSYS_IPSET);
 }
 EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
@@ -664,14 +755,14 @@ static const struct nla_policy ip_set_create_policy[IPSET_ATTR_CMD_MAX + 1] = {
 };
 
 static struct ip_set *
-find_set_and_id(const char *name, ip_set_id_t *id)
+find_set_and_id(struct ip_set_net *inst, const char *name, ip_set_id_t *id)
 {
        struct ip_set *set = NULL;
        ip_set_id_t i;
 
        *id = IPSET_INVALID_ID;
-       for (i = 0; i < ip_set_max; i++) {
-               set = nfnl_set(i);
+       for (i = 0; i < inst->ip_set_max; i++) {
+               set = nfnl_set(inst, i);
                if (set != NULL && STREQ(set->name, name)) {
                        *id = i;
                        break;
@@ -681,22 +772,23 @@ find_set_and_id(const char *name, ip_set_id_t *id)
 }
 
 static inline struct ip_set *
-find_set(const char *name)
+find_set(struct ip_set_net *inst, const char *name)
 {
        ip_set_id_t id;
 
-       return find_set_and_id(name, &id);
+       return find_set_and_id(inst, name, &id);
 }
 
 static int
-find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
+find_free_id(struct ip_set_net *inst, const char *name, ip_set_id_t *index,
+            struct ip_set **set)
 {
        struct ip_set *s;
        ip_set_id_t i;
 
        *index = IPSET_INVALID_ID;
-       for (i = 0;  i < ip_set_max; i++) {
-               s = nfnl_set(i);
+       for (i = 0;  i < inst->ip_set_max; i++) {
+               s = nfnl_set(inst, i);
                if (s == NULL) {
                        if (*index == IPSET_INVALID_ID)
                                *index = i;
@@ -725,6 +817,8 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
              const struct nlmsghdr *nlh,
              const struct nlattr * const attr[])
 {
+       struct net *net = sock_net(ctnl);
+       struct ip_set_net *inst = ip_set_pernet(net);
        struct ip_set *set, *clash = NULL;
        ip_set_id_t index = IPSET_INVALID_ID;
        struct nlattr *tb[IPSET_ATTR_CREATE_MAX+1] = {};
@@ -783,7 +877,7 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
                goto put_out;
        }
 
-       ret = set->type->create(set, tb, flags);
+       ret = set->type->create(net, set, tb, flags);
        if (ret != 0)
                goto put_out;
 
@@ -794,7 +888,7 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
         * by the nfnl mutex. Find the first free index in ip_set_list
         * and check clashing.
         */
-       ret = find_free_id(set->name, &index, &clash);
+       ret = find_free_id(inst, set->name, &index, &clash);
        if (ret == -EEXIST) {
                /* If this is the same set and requested, ignore error */
                if ((flags & IPSET_FLAG_EXIST) &&
@@ -807,9 +901,9 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
                goto cleanup;
        } else if (ret == -IPSET_ERR_MAX_SETS) {
                struct ip_set **list, **tmp;
-               ip_set_id_t i = ip_set_max + IP_SET_INC;
+               ip_set_id_t i = inst->ip_set_max + IP_SET_INC;
 
-               if (i < ip_set_max || i == IPSET_INVALID_ID)
+               if (i < inst->ip_set_max || i == IPSET_INVALID_ID)
                        /* Wraparound */
                        goto cleanup;
 
@@ -817,14 +911,14 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
                if (!list)
                        goto cleanup;
                /* nfnl mutex is held, both lists are valid */
-               tmp = nfnl_dereference(ip_set_list);
-               memcpy(list, tmp, sizeof(struct ip_set *) * ip_set_max);
-               rcu_assign_pointer(ip_set_list, list);
+               tmp = nfnl_dereference(inst->ip_set_list);
+               memcpy(list, tmp, sizeof(struct ip_set *) * inst->ip_set_max);
+               rcu_assign_pointer(inst->ip_set_list, list);
                /* Make sure all current packets have passed through */
                synchronize_net();
                /* Use new list */
-               index = ip_set_max;
-               ip_set_max = i;
+               index = inst->ip_set_max;
+               inst->ip_set_max = i;
                kfree(tmp);
                ret = 0;
        } else if (ret)
@@ -834,7 +928,7 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
         * Finally! Add our shiny new set to the list, and be done.
         */
        pr_debug("create: '%s' created with index %u!\n", set->name, index);
-       nfnl_set(index) = set;
+       nfnl_set(inst, index) = set;
 
        return ret;
 
@@ -857,12 +951,12 @@ ip_set_setname_policy[IPSET_ATTR_CMD_MAX + 1] = {
 };
 
 static void
-ip_set_destroy_set(ip_set_id_t index)
+ip_set_destroy_set(struct ip_set_net *inst, ip_set_id_t index)
 {
-       struct ip_set *set = nfnl_set(index);
+       struct ip_set *set = nfnl_set(inst, index);
 
        pr_debug("set: %s\n",  set->name);
-       nfnl_set(index) = NULL;
+       nfnl_set(inst, index) = NULL;
 
        /* Must call it without holding any lock */
        set->variant->destroy(set);
@@ -875,6 +969,7 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
               const struct nlmsghdr *nlh,
               const struct nlattr * const attr[])
 {
+       struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
        struct ip_set *s;
        ip_set_id_t i;
        int ret = 0;
@@ -894,21 +989,22 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
         */
        read_lock_bh(&ip_set_ref_lock);
        if (!attr[IPSET_ATTR_SETNAME]) {
-               for (i = 0; i < ip_set_max; i++) {
-                       s = nfnl_set(i);
+               for (i = 0; i < inst->ip_set_max; i++) {
+                       s = nfnl_set(inst, i);
                        if (s != NULL && s->ref) {
                                ret = -IPSET_ERR_BUSY;
                                goto out;
                        }
                }
                read_unlock_bh(&ip_set_ref_lock);
-               for (i = 0; i < ip_set_max; i++) {
-                       s = nfnl_set(i);
+               for (i = 0; i < inst->ip_set_max; i++) {
+                       s = nfnl_set(inst, i);
                        if (s != NULL)
-                               ip_set_destroy_set(i);
+                               ip_set_destroy_set(inst, i);
                }
        } else {
-               s = find_set_and_id(nla_data(attr[IPSET_ATTR_SETNAME]), &i);
+               s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
+                                   &i);
                if (s == NULL) {
                        ret = -ENOENT;
                        goto out;
@@ -918,7 +1014,7 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
                }
                read_unlock_bh(&ip_set_ref_lock);
 
-               ip_set_destroy_set(i);
+               ip_set_destroy_set(inst, i);
        }
        return 0;
 out:
@@ -943,6 +1039,7 @@ ip_set_flush(struct sock *ctnl, struct sk_buff *skb,
             const struct nlmsghdr *nlh,
             const struct nlattr * const attr[])
 {
+       struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
        struct ip_set *s;
        ip_set_id_t i;
 
@@ -950,13 +1047,13 @@ ip_set_flush(struct sock *ctnl, struct sk_buff *skb,
                return -IPSET_ERR_PROTOCOL;
 
        if (!attr[IPSET_ATTR_SETNAME]) {
-               for (i = 0; i < ip_set_max; i++) {
-                       s = nfnl_set(i);
+               for (i = 0; i < inst->ip_set_max; i++) {
+                       s = nfnl_set(inst, i);
                        if (s != NULL)
                                ip_set_flush_set(s);
                }
        } else {
-               s = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+               s = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
                if (s == NULL)
                        return -ENOENT;
 
@@ -982,6 +1079,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
              const struct nlmsghdr *nlh,
              const struct nlattr * const attr[])
 {
+       struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
        struct ip_set *set, *s;
        const char *name2;
        ip_set_id_t i;
@@ -992,7 +1090,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
                     attr[IPSET_ATTR_SETNAME2] == NULL))
                return -IPSET_ERR_PROTOCOL;
 
-       set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+       set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
        if (set == NULL)
                return -ENOENT;
 
@@ -1003,8 +1101,8 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
        }
 
        name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
-       for (i = 0; i < ip_set_max; i++) {
-               s = nfnl_set(i);
+       for (i = 0; i < inst->ip_set_max; i++) {
+               s = nfnl_set(inst, i);
                if (s != NULL && STREQ(s->name, name2)) {
                        ret = -IPSET_ERR_EXIST_SETNAME2;
                        goto out;
@@ -1031,6 +1129,7 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
            const struct nlmsghdr *nlh,
            const struct nlattr * const attr[])
 {
+       struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
        struct ip_set *from, *to;
        ip_set_id_t from_id, to_id;
        char from_name[IPSET_MAXNAMELEN];
@@ -1040,11 +1139,13 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
                     attr[IPSET_ATTR_SETNAME2] == NULL))
                return -IPSET_ERR_PROTOCOL;
 
-       from = find_set_and_id(nla_data(attr[IPSET_ATTR_SETNAME]), &from_id);
+       from = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]),
+                              &from_id);
        if (from == NULL)
                return -ENOENT;
 
-       to = find_set_and_id(nla_data(attr[IPSET_ATTR_SETNAME2]), &to_id);
+       to = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME2]),
+                            &to_id);
        if (to == NULL)
                return -IPSET_ERR_EXIST_SETNAME2;
 
@@ -1061,8 +1162,8 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
 
        write_lock_bh(&ip_set_ref_lock);
        swap(from->ref, to->ref);
-       nfnl_set(from_id) = to;
-       nfnl_set(to_id) = from;
+       nfnl_set(inst, from_id) = to;
+       nfnl_set(inst, to_id) = from;
        write_unlock_bh(&ip_set_ref_lock);
 
        return 0;
@@ -1081,9 +1182,10 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
 static int
 ip_set_dump_done(struct netlink_callback *cb)
 {
+       struct ip_set_net *inst = (struct ip_set_net *)cb->data;
        if (cb->args[2]) {
-               pr_debug("release set %s\n", nfnl_set(cb->args[1])->name);
-               ip_set_put_byindex((ip_set_id_t) cb->args[1]);
+               pr_debug("release set %s\n", nfnl_set(inst, cb->args[1])->name);
+               __ip_set_put_byindex(inst, (ip_set_id_t) cb->args[1]);
        }
        return 0;
 }
@@ -1109,6 +1211,7 @@ dump_init(struct netlink_callback *cb)
        struct nlattr *attr = (void *)nlh + min_len;
        u32 dump_type;
        ip_set_id_t index;
+       struct ip_set_net *inst = (struct ip_set_net *)cb->data;
 
        /* Second pass, so parser can't fail */
        nla_parse(cda, IPSET_ATTR_CMD_MAX,
@@ -1122,7 +1225,7 @@ dump_init(struct netlink_callback *cb)
        if (cda[IPSET_ATTR_SETNAME]) {
                struct ip_set *set;
 
-               set = find_set_and_id(nla_data(cda[IPSET_ATTR_SETNAME]),
+               set = find_set_and_id(inst, nla_data(cda[IPSET_ATTR_SETNAME]),
                                      &index);
                if (set == NULL)
                        return -ENOENT;
@@ -1150,6 +1253,7 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
        unsigned int flags = NETLINK_CB(cb->skb).portid ? NLM_F_MULTI : 0;
        u32 dump_type, dump_flags;
        int ret = 0;
+       struct ip_set_net *inst = (struct ip_set_net *)cb->data;
 
        if (!cb->args[0]) {
                ret = dump_init(cb);
@@ -1163,18 +1267,18 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
                }
        }
 
-       if (cb->args[1] >= ip_set_max)
+       if (cb->args[1] >= inst->ip_set_max)
                goto out;
 
        dump_type = DUMP_TYPE(cb->args[0]);
        dump_flags = DUMP_FLAGS(cb->args[0]);
-       max = dump_type == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
+       max = dump_type == DUMP_ONE ? cb->args[1] + 1 : inst->ip_set_max;
 dump_last:
        pr_debug("args[0]: %u %u args[1]: %ld\n",
                 dump_type, dump_flags, cb->args[1]);
        for (; cb->args[1] < max; cb->args[1]++) {
                index = (ip_set_id_t) cb->args[1];
-               set = nfnl_set(index);
+               set = nfnl_set(inst, index);
                if (set == NULL) {
                        if (dump_type == DUMP_ONE) {
                                ret = -ENOENT;
@@ -1252,8 +1356,8 @@ next_set:
 release_refcount:
        /* If there was an error or set is done, release set */
        if (ret || !cb->args[2]) {
-               pr_debug("release set %s\n", nfnl_set(index)->name);
-               ip_set_put_byindex(index);
+               pr_debug("release set %s\n", nfnl_set(inst, index)->name);
+               __ip_set_put_byindex(inst, index);
                cb->args[2] = 0;
        }
 out:
@@ -1271,6 +1375,8 @@ ip_set_dump(struct sock *ctnl, struct sk_buff *skb,
            const struct nlmsghdr *nlh,
            const struct nlattr * const attr[])
 {
+       struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
+
        if (unlikely(protocol_failed(attr)))
                return -IPSET_ERR_PROTOCOL;
 
@@ -1278,6 +1384,7 @@ ip_set_dump(struct sock *ctnl, struct sk_buff *skb,
                struct netlink_dump_control c = {
                        .dump = ip_set_dump_start,
                        .done = ip_set_dump_done,
+                       .data = (void *)inst
                };
                return netlink_dump_start(ctnl, skb, nlh, &c);
        }
@@ -1356,6 +1463,7 @@ ip_set_uadd(struct sock *ctnl, struct sk_buff *skb,
            const struct nlmsghdr *nlh,
            const struct nlattr * const attr[])
 {
+       struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
        struct ip_set *set;
        struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
        const struct nlattr *nla;
@@ -1374,7 +1482,7 @@ ip_set_uadd(struct sock *ctnl, struct sk_buff *skb,
                       attr[IPSET_ATTR_LINENO] == NULL))))
                return -IPSET_ERR_PROTOCOL;
 
-       set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+       set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
        if (set == NULL)
                return -ENOENT;
 
@@ -1410,6 +1518,7 @@ ip_set_udel(struct sock *ctnl, struct sk_buff *skb,
            const struct nlmsghdr *nlh,
            const struct nlattr * const attr[])
 {
+       struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
        struct ip_set *set;
        struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
        const struct nlattr *nla;
@@ -1428,7 +1537,7 @@ ip_set_udel(struct sock *ctnl, struct sk_buff *skb,
                       attr[IPSET_ATTR_LINENO] == NULL))))
                return -IPSET_ERR_PROTOCOL;
 
-       set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+       set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
        if (set == NULL)
                return -ENOENT;
 
@@ -1464,6 +1573,7 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
             const struct nlmsghdr *nlh,
             const struct nlattr * const attr[])
 {
+       struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
        struct ip_set *set;
        struct nlattr *tb[IPSET_ATTR_ADT_MAX+1] = {};
        int ret = 0;
@@ -1474,7 +1584,7 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
                     !flag_nested(attr[IPSET_ATTR_DATA])))
                return -IPSET_ERR_PROTOCOL;
 
-       set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+       set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
        if (set == NULL)
                return -ENOENT;
 
@@ -1499,6 +1609,7 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb,
              const struct nlmsghdr *nlh,
              const struct nlattr * const attr[])
 {
+       struct ip_set_net *inst = ip_set_pernet(sock_net(ctnl));
        const struct ip_set *set;
        struct sk_buff *skb2;
        struct nlmsghdr *nlh2;
@@ -1508,7 +1619,7 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb,
                     attr[IPSET_ATTR_SETNAME] == NULL))
                return -IPSET_ERR_PROTOCOL;
 
-       set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
+       set = find_set(inst, nla_data(attr[IPSET_ATTR_SETNAME]));
        if (set == NULL)
                return -ENOENT;
 
@@ -1733,8 +1844,10 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
        unsigned int *op;
        void *data;
        int copylen = *len, ret = 0;
+       struct net *net = sock_net(sk);
+       struct ip_set_net *inst = ip_set_pernet(net);
 
-       if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
+       if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
                return -EPERM;
        if (optval != SO_IP_SET)
                return -EBADF;
@@ -1783,22 +1896,39 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
                }
                req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
                nfnl_lock(NFNL_SUBSYS_IPSET);
-               find_set_and_id(req_get->set.name, &id);
+               find_set_and_id(inst, req_get->set.name, &id);
                req_get->set.index = id;
                nfnl_unlock(NFNL_SUBSYS_IPSET);
                goto copy;
        }
+       case IP_SET_OP_GET_FNAME: {
+               struct ip_set_req_get_set_family *req_get = data;
+               ip_set_id_t id;
+
+               if (*len != sizeof(struct ip_set_req_get_set_family)) {
+                       ret = -EINVAL;
+                       goto done;
+               }
+               req_get->set.name[IPSET_MAXNAMELEN - 1] = '\0';
+               nfnl_lock(NFNL_SUBSYS_IPSET);
+               find_set_and_id(inst, req_get->set.name, &id);
+               req_get->set.index = id;
+               if (id != IPSET_INVALID_ID)
+                       req_get->family = nfnl_set(inst, id)->family;
+               nfnl_unlock(NFNL_SUBSYS_IPSET);
+               goto copy;
+       }
        case IP_SET_OP_GET_BYINDEX: {
                struct ip_set_req_get_set *req_get = data;
                struct ip_set *set;
 
                if (*len != sizeof(struct ip_set_req_get_set) ||
-                   req_get->set.index >= ip_set_max) {
+                   req_get->set.index >= inst->ip_set_max) {
                        ret = -EINVAL;
                        goto done;
                }
                nfnl_lock(NFNL_SUBSYS_IPSET);
-               set = nfnl_set(req_get->set.index);
+               set = nfnl_set(inst, req_get->set.index);
                strncpy(req_get->set.name, set ? set->name : "",
                        IPSET_MAXNAMELEN);
                nfnl_unlock(NFNL_SUBSYS_IPSET);
@@ -1827,49 +1957,82 @@ static struct nf_sockopt_ops so_set __read_mostly = {
        .owner          = THIS_MODULE,
 };
 
-static int __init
-ip_set_init(void)
+static int __net_init
+ip_set_net_init(struct net *net)
 {
+       struct ip_set_net *inst = ip_set_pernet(net);
+
        struct ip_set **list;
-       int ret;
 
-       if (max_sets)
-               ip_set_max = max_sets;
-       if (ip_set_max >= IPSET_INVALID_ID)
-               ip_set_max = IPSET_INVALID_ID - 1;
+       inst->ip_set_max = max_sets ? max_sets : CONFIG_IP_SET_MAX;
+       if (inst->ip_set_max >= IPSET_INVALID_ID)
+               inst->ip_set_max = IPSET_INVALID_ID - 1;
 
-       list = kzalloc(sizeof(struct ip_set *) * ip_set_max, GFP_KERNEL);
+       list = kzalloc(sizeof(struct ip_set *) * inst->ip_set_max, GFP_KERNEL);
        if (!list)
                return -ENOMEM;
+       inst->is_deleted = 0;
+       rcu_assign_pointer(inst->ip_set_list, list);
+       pr_notice("ip_set: protocol %u\n", IPSET_PROTOCOL);
+       return 0;
+}
+
+static void __net_exit
+ip_set_net_exit(struct net *net)
+{
+       struct ip_set_net *inst = ip_set_pernet(net);
+
+       struct ip_set *set = NULL;
+       ip_set_id_t i;
+
+       inst->is_deleted = 1; /* flag for ip_set_nfnl_put */
+
+       for (i = 0; i < inst->ip_set_max; i++) {
+               set = nfnl_set(inst, i);
+               if (set != NULL)
+                       ip_set_destroy_set(inst, i);
+       }
+       kfree(rcu_dereference_protected(inst->ip_set_list, 1));
+}
+
+static struct pernet_operations ip_set_net_ops = {
+       .init   = ip_set_net_init,
+       .exit   = ip_set_net_exit,
+       .id     = &ip_set_net_id,
+       .size   = sizeof(struct ip_set_net)
+};
+
 
-       rcu_assign_pointer(ip_set_list, list);
-       ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
+static int __init
+ip_set_init(void)
+{
+       int ret = nfnetlink_subsys_register(&ip_set_netlink_subsys);
        if (ret != 0) {
                pr_err("ip_set: cannot register with nfnetlink.\n");
-               kfree(list);
                return ret;
        }
        ret = nf_register_sockopt(&so_set);
        if (ret != 0) {
                pr_err("SO_SET registry failed: %d\n", ret);
                nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
-               kfree(list);
                return ret;
        }
-
-       pr_notice("ip_set: protocol %u\n", IPSET_PROTOCOL);
+       ret = register_pernet_subsys(&ip_set_net_ops);
+       if (ret) {
+               pr_err("ip_set: cannot register pernet_subsys.\n");
+               nf_unregister_sockopt(&so_set);
+               nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
+               return ret;
+       }
        return 0;
 }
 
 static void __exit
 ip_set_fini(void)
 {
-       struct ip_set **list = rcu_dereference_protected(ip_set_list, 1);
-
-       /* There can't be any existing set */
+       unregister_pernet_subsys(&ip_set_net_ops);
        nf_unregister_sockopt(&so_set);
        nfnetlink_subsys_unregister(&ip_set_netlink_subsys);
-       kfree(list);
        pr_debug("these are the famous last words\n");
 }
 
index dac156f819ac2f2fe25c876d800aadd2c47c0752..29fb01ddff93b0a0da7ad1dc3691a141376f5007 100644 (file)
@@ -102,9 +102,25 @@ ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
        int protocol = iph->protocol;
 
        /* See comments at tcp_match in ip_tables.c */
-       if (protocol <= 0 || (ntohs(iph->frag_off) & IP_OFFSET))
+       if (protocol <= 0)
                return false;
 
+       if (ntohs(iph->frag_off) & IP_OFFSET)
+               switch (protocol) {
+               case IPPROTO_TCP:
+               case IPPROTO_SCTP:
+               case IPPROTO_UDP:
+               case IPPROTO_UDPLITE:
+               case IPPROTO_ICMP:
+                       /* Port info not available for fragment offset > 0 */
+                       return false;
+               default:
+                       /* Other protocols doesn't have ports,
+                          so we can match fragments */
+                       *proto = protocol;
+                       return true;
+               }
+
        return get_port(skb, protocol, protooff, src, port, proto);
 }
 EXPORT_SYMBOL_GPL(ip_set_get_ip4_port);
index 707bc520d629f311dd9978d2e0bf222719f71c82..6a80dbd30df7bea89d0279a5752faa668f2f68a7 100644 (file)
@@ -15,8 +15,7 @@
 #define rcu_dereference_bh(p)  rcu_dereference(p)
 #endif
 
-#define CONCAT(a, b)           a##b
-#define TOKEN(a, b)            CONCAT(a, b)
+#define rcu_dereference_bh_nfnl(p)     rcu_dereference_bh_check(p, 1)
 
 /* Hashing which uses arrays to resolve clashing. The hash table is resized
  * (doubled) when searching becomes too long.
@@ -78,10 +77,14 @@ struct htable {
 
 #define hbucket(h, i)          (&((h)->bucket[i]))
 
+#ifndef IPSET_NET_COUNT
+#define IPSET_NET_COUNT                1
+#endif
+
 /* Book-keeping of the prefixes added to the set */
 struct net_prefixes {
-       u8 cidr;                /* the different cidr values in the set */
-       u32 nets;               /* number of elements per cidr */
+       u32 nets[IPSET_NET_COUNT]; /* number of elements per cidr */
+       u8 cidr[IPSET_NET_COUNT];  /* the different cidr values in the set */
 };
 
 /* Compute the hash table size */
@@ -114,23 +117,6 @@ htable_bits(u32 hashsize)
        return bits;
 }
 
-/* Destroy the hashtable part of the set */
-static void
-ahash_destroy(struct htable *t)
-{
-       struct hbucket *n;
-       u32 i;
-
-       for (i = 0; i < jhash_size(t->htable_bits); i++) {
-               n = hbucket(t, i);
-               if (n->size)
-                       /* FIXME: use slab cache */
-                       kfree(n->value);
-       }
-
-       ip_set_free(t);
-}
-
 static int
 hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
 {
@@ -156,30 +142,30 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
 }
 
 #ifdef IP_SET_HASH_WITH_NETS
+#if IPSET_NET_COUNT > 1
+#define __CIDR(cidr, i)                (cidr[i])
+#else
+#define __CIDR(cidr, i)                (cidr)
+#endif
 #ifdef IP_SET_HASH_WITH_NETS_PACKED
 /* When cidr is packed with nomatch, cidr - 1 is stored in the entry */
-#define CIDR(cidr)             (cidr + 1)
+#define CIDR(cidr, i)          (__CIDR(cidr, i) + 1)
 #else
-#define CIDR(cidr)             (cidr)
+#define CIDR(cidr, i)          (__CIDR(cidr, i))
 #endif
 
 #define SET_HOST_MASK(family)  (family == AF_INET ? 32 : 128)
 
 #ifdef IP_SET_HASH_WITH_MULTI
-#define NETS_LENGTH(family)    (SET_HOST_MASK(family) + 1)
+#define NLEN(family)           (SET_HOST_MASK(family) + 1)
 #else
-#define NETS_LENGTH(family)    SET_HOST_MASK(family)
+#define NLEN(family)           SET_HOST_MASK(family)
 #endif
 
 #else
-#define NETS_LENGTH(family)    0
+#define NLEN(family)           0
 #endif /* IP_SET_HASH_WITH_NETS */
 
-#define ext_timeout(e, h)      \
-(unsigned long *)(((void *)(e)) + (h)->offset[IPSET_OFFSET_TIMEOUT])
-#define ext_counter(e, h)      \
-(struct ip_set_counter *)(((void *)(e)) + (h)->offset[IPSET_OFFSET_COUNTER])
-
 #endif /* _IP_SET_HASH_GEN_H */
 
 /* Family dependent templates */
@@ -194,6 +180,8 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
 #undef mtype_data_next
 #undef mtype_elem
 
+#undef mtype_ahash_destroy
+#undef mtype_ext_cleanup
 #undef mtype_add_cidr
 #undef mtype_del_cidr
 #undef mtype_ahash_memsize
@@ -220,41 +208,44 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
 
 #undef HKEY
 
-#define mtype_data_equal       TOKEN(MTYPE, _data_equal)
+#define mtype_data_equal       IPSET_TOKEN(MTYPE, _data_equal)
 #ifdef IP_SET_HASH_WITH_NETS
-#define mtype_do_data_match    TOKEN(MTYPE, _do_data_match)
+#define mtype_do_data_match    IPSET_TOKEN(MTYPE, _do_data_match)
 #else
 #define mtype_do_data_match(d) 1
 #endif
-#define mtype_data_set_flags   TOKEN(MTYPE, _data_set_flags)
-#define mtype_data_reset_flags TOKEN(MTYPE, _data_reset_flags)
-#define mtype_data_netmask     TOKEN(MTYPE, _data_netmask)
-#define mtype_data_list                TOKEN(MTYPE, _data_list)
-#define mtype_data_next                TOKEN(MTYPE, _data_next)
-#define mtype_elem             TOKEN(MTYPE, _elem)
-#define mtype_add_cidr         TOKEN(MTYPE, _add_cidr)
-#define mtype_del_cidr         TOKEN(MTYPE, _del_cidr)
-#define mtype_ahash_memsize    TOKEN(MTYPE, _ahash_memsize)
-#define mtype_flush            TOKEN(MTYPE, _flush)
-#define mtype_destroy          TOKEN(MTYPE, _destroy)
-#define mtype_gc_init          TOKEN(MTYPE, _gc_init)
-#define mtype_same_set         TOKEN(MTYPE, _same_set)
-#define mtype_kadt             TOKEN(MTYPE, _kadt)
-#define mtype_uadt             TOKEN(MTYPE, _uadt)
+#define mtype_data_set_flags   IPSET_TOKEN(MTYPE, _data_set_flags)
+#define mtype_data_reset_elem  IPSET_TOKEN(MTYPE, _data_reset_elem)
+#define mtype_data_reset_flags IPSET_TOKEN(MTYPE, _data_reset_flags)
+#define mtype_data_netmask     IPSET_TOKEN(MTYPE, _data_netmask)
+#define mtype_data_list                IPSET_TOKEN(MTYPE, _data_list)
+#define mtype_data_next                IPSET_TOKEN(MTYPE, _data_next)
+#define mtype_elem             IPSET_TOKEN(MTYPE, _elem)
+#define mtype_ahash_destroy    IPSET_TOKEN(MTYPE, _ahash_destroy)
+#define mtype_ext_cleanup      IPSET_TOKEN(MTYPE, _ext_cleanup)
+#define mtype_add_cidr         IPSET_TOKEN(MTYPE, _add_cidr)
+#define mtype_del_cidr         IPSET_TOKEN(MTYPE, _del_cidr)
+#define mtype_ahash_memsize    IPSET_TOKEN(MTYPE, _ahash_memsize)
+#define mtype_flush            IPSET_TOKEN(MTYPE, _flush)
+#define mtype_destroy          IPSET_TOKEN(MTYPE, _destroy)
+#define mtype_gc_init          IPSET_TOKEN(MTYPE, _gc_init)
+#define mtype_same_set         IPSET_TOKEN(MTYPE, _same_set)
+#define mtype_kadt             IPSET_TOKEN(MTYPE, _kadt)
+#define mtype_uadt             IPSET_TOKEN(MTYPE, _uadt)
 #define mtype                  MTYPE
 
-#define mtype_elem             TOKEN(MTYPE, _elem)
-#define mtype_add              TOKEN(MTYPE, _add)
-#define mtype_del              TOKEN(MTYPE, _del)
-#define mtype_test_cidrs       TOKEN(MTYPE, _test_cidrs)
-#define mtype_test             TOKEN(MTYPE, _test)
-#define mtype_expire           TOKEN(MTYPE, _expire)
-#define mtype_resize           TOKEN(MTYPE, _resize)
-#define mtype_head             TOKEN(MTYPE, _head)
-#define mtype_list             TOKEN(MTYPE, _list)
-#define mtype_gc               TOKEN(MTYPE, _gc)
-#define mtype_variant          TOKEN(MTYPE, _variant)
-#define mtype_data_match       TOKEN(MTYPE, _data_match)
+#define mtype_elem             IPSET_TOKEN(MTYPE, _elem)
+#define mtype_add              IPSET_TOKEN(MTYPE, _add)
+#define mtype_del              IPSET_TOKEN(MTYPE, _del)
+#define mtype_test_cidrs       IPSET_TOKEN(MTYPE, _test_cidrs)
+#define mtype_test             IPSET_TOKEN(MTYPE, _test)
+#define mtype_expire           IPSET_TOKEN(MTYPE, _expire)
+#define mtype_resize           IPSET_TOKEN(MTYPE, _resize)
+#define mtype_head             IPSET_TOKEN(MTYPE, _head)
+#define mtype_list             IPSET_TOKEN(MTYPE, _list)
+#define mtype_gc               IPSET_TOKEN(MTYPE, _gc)
+#define mtype_variant          IPSET_TOKEN(MTYPE, _variant)
+#define mtype_data_match       IPSET_TOKEN(MTYPE, _data_match)
 
 #ifndef HKEY_DATALEN
 #define HKEY_DATALEN           sizeof(struct mtype_elem)
@@ -269,13 +260,10 @@ hbucket_elem_add(struct hbucket *n, u8 ahash_max, size_t dsize)
 
 /* The generic hash structure */
 struct htype {
-       struct htable *table;   /* the hash table */
+       struct htable __rcu *table; /* the hash table */
        u32 maxelem;            /* max elements in the hash */
        u32 elements;           /* current element (vs timeout) */
        u32 initval;            /* random jhash init value */
-       u32 timeout;            /* timeout value, if enabled */
-       size_t dsize;           /* data struct size */
-       size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
        struct timer_list gc;   /* garbage collection when timeout enabled */
        struct mtype_elem next; /* temporary storage for uadd */
 #ifdef IP_SET_HASH_WITH_MULTI
@@ -297,49 +285,49 @@ struct htype {
 /* Network cidr size book keeping when the hash stores different
  * sized networks */
 static void
-mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length)
+mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
 {
        int i, j;
 
        /* Add in increasing prefix order, so larger cidr first */
-       for (i = 0, j = -1; i < nets_length && h->nets[i].nets; i++) {
+       for (i = 0, j = -1; i < nets_length && h->nets[i].nets[n]; i++) {
                if (j != -1)
                        continue;
-               else if (h->nets[i].cidr < cidr)
+               else if (h->nets[i].cidr[n] < cidr)
                        j = i;
-               else if (h->nets[i].cidr == cidr) {
-                       h->nets[i].nets++;
+               else if (h->nets[i].cidr[n] == cidr) {
+                       h->nets[i].nets[n]++;
                        return;
                }
        }
        if (j != -1) {
                for (; i > j; i--) {
-                       h->nets[i].cidr = h->nets[i - 1].cidr;
-                       h->nets[i].nets = h->nets[i - 1].nets;
+                       h->nets[i].cidr[n] = h->nets[i - 1].cidr[n];
+                       h->nets[i].nets[n] = h->nets[i - 1].nets[n];
                }
        }
-       h->nets[i].cidr = cidr;
-       h->nets[i].nets = 1;
+       h->nets[i].cidr[n] = cidr;
+       h->nets[i].nets[n] = 1;
 }
 
 static void
-mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length)
+mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
 {
        u8 i, j, net_end = nets_length - 1;
 
        for (i = 0; i < nets_length; i++) {
-               if (h->nets[i].cidr != cidr)
+               if (h->nets[i].cidr[n] != cidr)
                        continue;
-                if (h->nets[i].nets > 1 || i == net_end ||
-                    h->nets[i + 1].nets == 0) {
-                        h->nets[i].nets--;
+                if (h->nets[i].nets[n] > 1 || i == net_end ||
+                    h->nets[i + 1].nets[n] == 0) {
+                        h->nets[i].nets[n]--;
                         return;
                 }
-                for (j = i; j < net_end && h->nets[j].nets; j++) {
-                       h->nets[j].cidr = h->nets[j + 1].cidr;
-                       h->nets[j].nets = h->nets[j + 1].nets;
+                for (j = i; j < net_end && h->nets[j].nets[n]; j++) {
+                       h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
+                       h->nets[j].nets[n] = h->nets[j + 1].nets[n];
                 }
-                h->nets[j].nets = 0;
+                h->nets[j].nets[n] = 0;
                 return;
        }
 }
@@ -347,10 +335,10 @@ mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length)
 
 /* Calculate the actual memory size of the set data */
 static size_t
-mtype_ahash_memsize(const struct htype *h, u8 nets_length)
+mtype_ahash_memsize(const struct htype *h, const struct htable *t,
+                   u8 nets_length, size_t dsize)
 {
        u32 i;
-       struct htable *t = h->table;
        size_t memsize = sizeof(*h)
                         + sizeof(*t)
 #ifdef IP_SET_HASH_WITH_NETS
@@ -359,35 +347,70 @@ mtype_ahash_memsize(const struct htype *h, u8 nets_length)
                         + jhash_size(t->htable_bits) * sizeof(struct hbucket);
 
        for (i = 0; i < jhash_size(t->htable_bits); i++)
-               memsize += t->bucket[i].size * h->dsize;
+               memsize += t->bucket[i].size * dsize;
 
        return memsize;
 }
 
+/* Get the ith element from the array block n */
+#define ahash_data(n, i, dsize)        \
+       ((struct mtype_elem *)((n)->value + ((i) * (dsize))))
+
+static void
+mtype_ext_cleanup(struct ip_set *set, struct hbucket *n)
+{
+       int i;
+
+       for (i = 0; i < n->pos; i++)
+               ip_set_ext_destroy(set, ahash_data(n, i, set->dsize));
+}
+
 /* Flush a hash type of set: destroy all elements */
 static void
 mtype_flush(struct ip_set *set)
 {
        struct htype *h = set->data;
-       struct htable *t = h->table;
+       struct htable *t;
        struct hbucket *n;
        u32 i;
 
+       t = rcu_dereference_bh_nfnl(h->table);
        for (i = 0; i < jhash_size(t->htable_bits); i++) {
                n = hbucket(t, i);
                if (n->size) {
+                       if (set->extensions & IPSET_EXT_DESTROY)
+                               mtype_ext_cleanup(set, n);
                        n->size = n->pos = 0;
                        /* FIXME: use slab cache */
                        kfree(n->value);
                }
        }
 #ifdef IP_SET_HASH_WITH_NETS
-       memset(h->nets, 0, sizeof(struct net_prefixes)
-                          * NETS_LENGTH(set->family));
+       memset(h->nets, 0, sizeof(struct net_prefixes) * NLEN(set->family));
 #endif
        h->elements = 0;
 }
 
+/* Destroy the hashtable part of the set */
+static void
+mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
+{
+       struct hbucket *n;
+       u32 i;
+
+       for (i = 0; i < jhash_size(t->htable_bits); i++) {
+               n = hbucket(t, i);
+               if (n->size) {
+                       if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
+                               mtype_ext_cleanup(set, n);
+                       /* FIXME: use slab cache */
+                       kfree(n->value);
+               }
+       }
+
+       ip_set_free(t);
+}
+
 /* Destroy a hash type of set */
 static void
 mtype_destroy(struct ip_set *set)
@@ -397,7 +420,7 @@ mtype_destroy(struct ip_set *set)
        if (set->extensions & IPSET_EXT_TIMEOUT)
                del_timer_sync(&h->gc);
 
-       ahash_destroy(h->table);
+       mtype_ahash_destroy(set, rcu_dereference_bh_nfnl(h->table), true);
 #ifdef IP_SET_HASH_WITH_RBTREE
        rbtree_destroy(&h->rbtree);
 #endif
@@ -414,10 +437,10 @@ mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
        init_timer(&h->gc);
        h->gc.data = (unsigned long) set;
        h->gc.function = gc;
-       h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
+       h->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
        add_timer(&h->gc);
        pr_debug("gc initialized, run in every %u\n",
-                IPSET_GC_PERIOD(h->timeout));
+                IPSET_GC_PERIOD(set->timeout));
 }
 
 static bool
@@ -428,37 +451,40 @@ mtype_same_set(const struct ip_set *a, const struct ip_set *b)
 
        /* Resizing changes htable_bits, so we ignore it */
        return x->maxelem == y->maxelem &&
-              x->timeout == y->timeout &&
+              a->timeout == b->timeout &&
 #ifdef IP_SET_HASH_WITH_NETMASK
               x->netmask == y->netmask &&
 #endif
               a->extensions == b->extensions;
 }
 
-/* Get the ith element from the array block n */
-#define ahash_data(n, i, dsize)        \
-       ((struct mtype_elem *)((n)->value + ((i) * (dsize))))
-
 /* Delete expired elements from the hashtable */
 static void
-mtype_expire(struct htype *h, u8 nets_length, size_t dsize)
+mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
 {
-       struct htable *t = h->table;
+       struct htable *t;
        struct hbucket *n;
        struct mtype_elem *data;
        u32 i;
        int j;
+#ifdef IP_SET_HASH_WITH_NETS
+       u8 k;
+#endif
 
+       rcu_read_lock_bh();
+       t = rcu_dereference_bh(h->table);
        for (i = 0; i < jhash_size(t->htable_bits); i++) {
                n = hbucket(t, i);
                for (j = 0; j < n->pos; j++) {
                        data = ahash_data(n, j, dsize);
-                       if (ip_set_timeout_expired(ext_timeout(data, h))) {
+                       if (ip_set_timeout_expired(ext_timeout(data, set))) {
                                pr_debug("expired %u/%u\n", i, j);
 #ifdef IP_SET_HASH_WITH_NETS
-                               mtype_del_cidr(h, CIDR(data->cidr),
-                                              nets_length);
+                               for (k = 0; k < IPSET_NET_COUNT; k++)
+                                       mtype_del_cidr(h, CIDR(data->cidr, k),
+                                                      nets_length, k);
 #endif
+                               ip_set_ext_destroy(set, data);
                                if (j != n->pos - 1)
                                        /* Not last one */
                                        memcpy(data,
@@ -481,6 +507,7 @@ mtype_expire(struct htype *h, u8 nets_length, size_t dsize)
                        n->value = tmp;
                }
        }
+       rcu_read_unlock_bh();
 }
 
 static void
@@ -491,10 +518,10 @@ mtype_gc(unsigned long ul_set)
 
        pr_debug("called\n");
        write_lock_bh(&set->lock);
-       mtype_expire(h, NETS_LENGTH(set->family), h->dsize);
+       mtype_expire(set, h, NLEN(set->family), set->dsize);
        write_unlock_bh(&set->lock);
 
-       h->gc.expires = jiffies + IPSET_GC_PERIOD(h->timeout) * HZ;
+       h->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
        add_timer(&h->gc);
 }
 
@@ -505,7 +532,7 @@ static int
 mtype_resize(struct ip_set *set, bool retried)
 {
        struct htype *h = set->data;
-       struct htable *t, *orig = h->table;
+       struct htable *t, *orig = rcu_dereference_bh_nfnl(h->table);
        u8 htable_bits = orig->htable_bits;
 #ifdef IP_SET_HASH_WITH_NETS
        u8 flags;
@@ -520,8 +547,7 @@ mtype_resize(struct ip_set *set, bool retried)
        if (SET_WITH_TIMEOUT(set) && !retried) {
                i = h->elements;
                write_lock_bh(&set->lock);
-               mtype_expire(set->data, NETS_LENGTH(set->family),
-                            h->dsize);
+               mtype_expire(set, set->data, NLEN(set->family), set->dsize);
                write_unlock_bh(&set->lock);
                if (h->elements < i)
                        return 0;
@@ -548,25 +574,25 @@ retry:
        for (i = 0; i < jhash_size(orig->htable_bits); i++) {
                n = hbucket(orig, i);
                for (j = 0; j < n->pos; j++) {
-                       data = ahash_data(n, j, h->dsize);
+                       data = ahash_data(n, j, set->dsize);
 #ifdef IP_SET_HASH_WITH_NETS
                        flags = 0;
                        mtype_data_reset_flags(data, &flags);
 #endif
                        m = hbucket(t, HKEY(data, h->initval, htable_bits));
-                       ret = hbucket_elem_add(m, AHASH_MAX(h), h->dsize);
+                       ret = hbucket_elem_add(m, AHASH_MAX(h), set->dsize);
                        if (ret < 0) {
 #ifdef IP_SET_HASH_WITH_NETS
                                mtype_data_reset_flags(data, &flags);
 #endif
                                read_unlock_bh(&set->lock);
-                               ahash_destroy(t);
+                               mtype_ahash_destroy(set, t, false);
                                if (ret == -EAGAIN)
                                        goto retry;
                                return ret;
                        }
-                       d = ahash_data(m, m->pos++, h->dsize);
-                       memcpy(d, data, h->dsize);
+                       d = ahash_data(m, m->pos++, set->dsize);
+                       memcpy(d, data, set->dsize);
 #ifdef IP_SET_HASH_WITH_NETS
                        mtype_data_reset_flags(d, &flags);
 #endif
@@ -581,7 +607,7 @@ retry:
 
        pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
                 orig->htable_bits, orig, t->htable_bits, t);
-       ahash_destroy(orig);
+       mtype_ahash_destroy(set, orig, false);
 
        return 0;
 }
@@ -604,7 +630,7 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
 
        if (SET_WITH_TIMEOUT(set) && h->elements >= h->maxelem)
                /* FIXME: when set is full, we slow down here */
-               mtype_expire(h, NETS_LENGTH(set->family), h->dsize);
+               mtype_expire(set, h, NLEN(set->family), set->dsize);
 
        if (h->elements >= h->maxelem) {
                if (net_ratelimit())
@@ -618,11 +644,11 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
        key = HKEY(value, h->initval, t->htable_bits);
        n = hbucket(t, key);
        for (i = 0; i < n->pos; i++) {
-               data = ahash_data(n, i, h->dsize);
+               data = ahash_data(n, i, set->dsize);
                if (mtype_data_equal(data, d, &multi)) {
                        if (flag_exist ||
                            (SET_WITH_TIMEOUT(set) &&
-                            ip_set_timeout_expired(ext_timeout(data, h)))) {
+                            ip_set_timeout_expired(ext_timeout(data, set)))) {
                                /* Just the extensions could be overwritten */
                                j = i;
                                goto reuse_slot;
@@ -633,30 +659,37 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
                }
                /* Reuse first timed out entry */
                if (SET_WITH_TIMEOUT(set) &&
-                   ip_set_timeout_expired(ext_timeout(data, h)) &&
+                   ip_set_timeout_expired(ext_timeout(data, set)) &&
                    j != AHASH_MAX(h) + 1)
                        j = i;
        }
 reuse_slot:
        if (j != AHASH_MAX(h) + 1) {
                /* Fill out reused slot */
-               data = ahash_data(n, j, h->dsize);
+               data = ahash_data(n, j, set->dsize);
 #ifdef IP_SET_HASH_WITH_NETS
-               mtype_del_cidr(h, CIDR(data->cidr), NETS_LENGTH(set->family));
-               mtype_add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
+               for (i = 0; i < IPSET_NET_COUNT; i++) {
+                       mtype_del_cidr(h, CIDR(data->cidr, i),
+                                      NLEN(set->family), i);
+                       mtype_add_cidr(h, CIDR(d->cidr, i),
+                                      NLEN(set->family), i);
+               }
 #endif
+               ip_set_ext_destroy(set, data);
        } else {
                /* Use/create a new slot */
                TUNE_AHASH_MAX(h, multi);
-               ret = hbucket_elem_add(n, AHASH_MAX(h), h->dsize);
+               ret = hbucket_elem_add(n, AHASH_MAX(h), set->dsize);
                if (ret != 0) {
                        if (ret == -EAGAIN)
                                mtype_data_next(&h->next, d);
                        goto out;
                }
-               data = ahash_data(n, n->pos++, h->dsize);
+               data = ahash_data(n, n->pos++, set->dsize);
 #ifdef IP_SET_HASH_WITH_NETS
-               mtype_add_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
+               for (i = 0; i < IPSET_NET_COUNT; i++)
+                       mtype_add_cidr(h, CIDR(d->cidr, i), NLEN(set->family),
+                                      i);
 #endif
                h->elements++;
        }
@@ -665,9 +698,11 @@ reuse_slot:
        mtype_data_set_flags(data, flags);
 #endif
        if (SET_WITH_TIMEOUT(set))
-               ip_set_timeout_set(ext_timeout(data, h), ext->timeout);
+               ip_set_timeout_set(ext_timeout(data, set), ext->timeout);
        if (SET_WITH_COUNTER(set))
-               ip_set_init_counter(ext_counter(data, h), ext);
+               ip_set_init_counter(ext_counter(data, set), ext);
+       if (SET_WITH_COMMENT(set))
+               ip_set_init_comment(ext_comment(data, set), ext);
 
 out:
        rcu_read_unlock_bh();
@@ -682,47 +717,60 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
          struct ip_set_ext *mext, u32 flags)
 {
        struct htype *h = set->data;
-       struct htable *t = h->table;
+       struct htable *t;
        const struct mtype_elem *d = value;
        struct mtype_elem *data;
        struct hbucket *n;
-       int i;
+       int i, ret = -IPSET_ERR_EXIST;
+#ifdef IP_SET_HASH_WITH_NETS
+       u8 j;
+#endif
        u32 key, multi = 0;
 
+       rcu_read_lock_bh();
+       t = rcu_dereference_bh(h->table);
        key = HKEY(value, h->initval, t->htable_bits);
        n = hbucket(t, key);
        for (i = 0; i < n->pos; i++) {
-               data = ahash_data(n, i, h->dsize);
+               data = ahash_data(n, i, set->dsize);
                if (!mtype_data_equal(data, d, &multi))
                        continue;
                if (SET_WITH_TIMEOUT(set) &&
-                   ip_set_timeout_expired(ext_timeout(data, h)))
-                       return -IPSET_ERR_EXIST;
+                   ip_set_timeout_expired(ext_timeout(data, set)))
+                       goto out;
                if (i != n->pos - 1)
                        /* Not last one */
-                       memcpy(data, ahash_data(n, n->pos - 1, h->dsize),
-                              h->dsize);
+                       memcpy(data, ahash_data(n, n->pos - 1, set->dsize),
+                              set->dsize);
 
                n->pos--;
                h->elements--;
 #ifdef IP_SET_HASH_WITH_NETS
-               mtype_del_cidr(h, CIDR(d->cidr), NETS_LENGTH(set->family));
+               for (j = 0; j < IPSET_NET_COUNT; j++)
+                       mtype_del_cidr(h, CIDR(d->cidr, j), NLEN(set->family),
+                                      j);
 #endif
+               ip_set_ext_destroy(set, data);
                if (n->pos + AHASH_INIT_SIZE < n->size) {
                        void *tmp = kzalloc((n->size - AHASH_INIT_SIZE)
-                                           * h->dsize,
+                                           * set->dsize,
                                            GFP_ATOMIC);
-                       if (!tmp)
-                               return 0;
+                       if (!tmp) {
+                               ret = 0;
+                               goto out;
+                       }
                        n->size -= AHASH_INIT_SIZE;
-                       memcpy(tmp, n->value, n->size * h->dsize);
+                       memcpy(tmp, n->value, n->size * set->dsize);
                        kfree(n->value);
                        n->value = tmp;
                }
-               return 0;
+               ret = 0;
+               goto out;
        }
 
-       return -IPSET_ERR_EXIST;
+out:
+       rcu_read_unlock_bh();
+       return ret;
 }
 
 static inline int
@@ -730,8 +778,7 @@ mtype_data_match(struct mtype_elem *data, const struct ip_set_ext *ext,
                 struct ip_set_ext *mext, struct ip_set *set, u32 flags)
 {
        if (SET_WITH_COUNTER(set))
-               ip_set_update_counter(ext_counter(data,
-                                                 (struct htype *)(set->data)),
+               ip_set_update_counter(ext_counter(data, set),
                                      ext, mext, flags);
        return mtype_do_data_match(data);
 }
@@ -745,25 +792,38 @@ mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
                 struct ip_set_ext *mext, u32 flags)
 {
        struct htype *h = set->data;
-       struct htable *t = h->table;
+       struct htable *t = rcu_dereference_bh(h->table);
        struct hbucket *n;
        struct mtype_elem *data;
+#if IPSET_NET_COUNT == 2
+       struct mtype_elem orig = *d;
+       int i, j = 0, k;
+#else
        int i, j = 0;
+#endif
        u32 key, multi = 0;
-       u8 nets_length = NETS_LENGTH(set->family);
+       u8 nets_length = NLEN(set->family);
 
        pr_debug("test by nets\n");
-       for (; j < nets_length && h->nets[j].nets && !multi; j++) {
-               mtype_data_netmask(d, h->nets[j].cidr);
+       for (; j < nets_length && h->nets[j].nets[0] && !multi; j++) {
+#if IPSET_NET_COUNT == 2
+               mtype_data_reset_elem(d, &orig);
+               mtype_data_netmask(d, h->nets[j].cidr[0], false);
+               for (k = 0; k < nets_length && h->nets[k].nets[1] && !multi;
+                    k++) {
+                       mtype_data_netmask(d, h->nets[k].cidr[1], true);
+#else
+               mtype_data_netmask(d, h->nets[j].cidr[0]);
+#endif
                key = HKEY(d, h->initval, t->htable_bits);
                n = hbucket(t, key);
                for (i = 0; i < n->pos; i++) {
-                       data = ahash_data(n, i, h->dsize);
+                       data = ahash_data(n, i, set->dsize);
                        if (!mtype_data_equal(data, d, &multi))
                                continue;
                        if (SET_WITH_TIMEOUT(set)) {
                                if (!ip_set_timeout_expired(
-                                                       ext_timeout(data, h)))
+                                               ext_timeout(data, set)))
                                        return mtype_data_match(data, ext,
                                                                mext, set,
                                                                flags);
@@ -774,6 +834,9 @@ mtype_test_cidrs(struct ip_set *set, struct mtype_elem *d,
                                return mtype_data_match(data, ext,
                                                        mext, set, flags);
                }
+#if IPSET_NET_COUNT == 2
+               }
+#endif
        }
        return 0;
 }
@@ -785,30 +848,41 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
           struct ip_set_ext *mext, u32 flags)
 {
        struct htype *h = set->data;
-       struct htable *t = h->table;
+       struct htable *t;
        struct mtype_elem *d = value;
        struct hbucket *n;
        struct mtype_elem *data;
-       int i;
+       int i, ret = 0;
        u32 key, multi = 0;
 
+       rcu_read_lock_bh();
+       t = rcu_dereference_bh(h->table);
 #ifdef IP_SET_HASH_WITH_NETS
        /* If we test an IP address and not a network address,
         * try all possible network sizes */
-       if (CIDR(d->cidr) == SET_HOST_MASK(set->family))
-               return mtype_test_cidrs(set, d, ext, mext, flags);
+       for (i = 0; i < IPSET_NET_COUNT; i++)
+               if (CIDR(d->cidr, i) != SET_HOST_MASK(set->family))
+                       break;
+       if (i == IPSET_NET_COUNT) {
+               ret = mtype_test_cidrs(set, d, ext, mext, flags);
+               goto out;
+       }
 #endif
 
        key = HKEY(d, h->initval, t->htable_bits);
        n = hbucket(t, key);
        for (i = 0; i < n->pos; i++) {
-               data = ahash_data(n, i, h->dsize);
+               data = ahash_data(n, i, set->dsize);
                if (mtype_data_equal(data, d, &multi) &&
                    !(SET_WITH_TIMEOUT(set) &&
-                     ip_set_timeout_expired(ext_timeout(data, h))))
-                       return mtype_data_match(data, ext, mext, set, flags);
+                     ip_set_timeout_expired(ext_timeout(data, set)))) {
+                       ret = mtype_data_match(data, ext, mext, set, flags);
+                       goto out;
+               }
        }
-       return 0;
+out:
+       rcu_read_unlock_bh();
+       return ret;
 }
 
 /* Reply a HEADER request: fill out the header part of the set */
@@ -816,18 +890,18 @@ static int
 mtype_head(struct ip_set *set, struct sk_buff *skb)
 {
        const struct htype *h = set->data;
+       const struct htable *t;
        struct nlattr *nested;
        size_t memsize;
 
-       read_lock_bh(&set->lock);
-       memsize = mtype_ahash_memsize(h, NETS_LENGTH(set->family));
-       read_unlock_bh(&set->lock);
+       t = rcu_dereference_bh_nfnl(h->table);
+       memsize = mtype_ahash_memsize(h, t, NLEN(set->family), set->dsize);
 
        nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
        if (!nested)
                goto nla_put_failure;
        if (nla_put_net32(skb, IPSET_ATTR_HASHSIZE,
-                         htonl(jhash_size(h->table->htable_bits))) ||
+                         htonl(jhash_size(t->htable_bits))) ||
            nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)))
                goto nla_put_failure;
 #ifdef IP_SET_HASH_WITH_NETMASK
@@ -836,12 +910,9 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
                goto nla_put_failure;
 #endif
        if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
-           nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
-           ((set->extensions & IPSET_EXT_TIMEOUT) &&
-            nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout))) ||
-           ((set->extensions & IPSET_EXT_COUNTER) &&
-            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS,
-                          htonl(IPSET_FLAG_WITH_COUNTERS))))
+           nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)))
+               goto nla_put_failure;
+       if (unlikely(ip_set_put_flags(skb, set)))
                goto nla_put_failure;
        ipset_nest_end(skb, nested);
 
@@ -856,7 +927,7 @@ mtype_list(const struct ip_set *set,
           struct sk_buff *skb, struct netlink_callback *cb)
 {
        const struct htype *h = set->data;
-       const struct htable *t = h->table;
+       const struct htable *t = rcu_dereference_bh_nfnl(h->table);
        struct nlattr *atd, *nested;
        const struct hbucket *n;
        const struct mtype_elem *e;
@@ -874,9 +945,9 @@ mtype_list(const struct ip_set *set,
                n = hbucket(t, cb->args[2]);
                pr_debug("cb->args[2]: %lu, t %p n %p\n", cb->args[2], t, n);
                for (i = 0; i < n->pos; i++) {
-                       e = ahash_data(n, i, h->dsize);
+                       e = ahash_data(n, i, set->dsize);
                        if (SET_WITH_TIMEOUT(set) &&
-                           ip_set_timeout_expired(ext_timeout(e, h)))
+                           ip_set_timeout_expired(ext_timeout(e, set)))
                                continue;
                        pr_debug("list hash %lu hbucket %p i %u, data %p\n",
                                 cb->args[2], n, i, e);
@@ -890,13 +961,7 @@ mtype_list(const struct ip_set *set,
                        }
                        if (mtype_data_list(skb, e))
                                goto nla_put_failure;
-                       if (SET_WITH_TIMEOUT(set) &&
-                           nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
-                                         htonl(ip_set_timeout_get(
-                                               ext_timeout(e, h)))))
-                               goto nla_put_failure;
-                       if (SET_WITH_COUNTER(set) &&
-                           ip_set_put_counter(skb, ext_counter(e, h)))
+                       if (ip_set_put_extensions(skb, set, e, true))
                                goto nla_put_failure;
                        ipset_nest_end(skb, nested);
                }
@@ -909,24 +974,24 @@ mtype_list(const struct ip_set *set,
 
 nla_put_failure:
        nlmsg_trim(skb, incomplete);
-       ipset_nest_end(skb, atd);
        if (unlikely(first == cb->args[2])) {
                pr_warning("Can't list set %s: one bucket does not fit into "
                           "a message. Please report it!\n", set->name);
                cb->args[2] = 0;
                return -EMSGSIZE;
        }
+       ipset_nest_end(skb, atd);
        return 0;
 }
 
 static int
-TOKEN(MTYPE, _kadt)(struct ip_set *set, const struct sk_buff *skb,
-             const struct xt_action_param *par,
-             enum ipset_adt adt, struct ip_set_adt_opt *opt);
+IPSET_TOKEN(MTYPE, _kadt)(struct ip_set *set, const struct sk_buff *skb,
+           const struct xt_action_param *par,
+           enum ipset_adt adt, struct ip_set_adt_opt *opt);
 
 static int
-TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[],
-             enum ipset_adt adt, u32 *lineno, u32 flags, bool retried);
+IPSET_TOKEN(MTYPE, _uadt)(struct ip_set *set, struct nlattr *tb[],
+           enum ipset_adt adt, u32 *lineno, u32 flags, bool retried);
 
 static const struct ip_set_type_variant mtype_variant = {
        .kadt   = mtype_kadt,
@@ -946,16 +1011,17 @@ static const struct ip_set_type_variant mtype_variant = {
 
 #ifdef IP_SET_EMIT_CREATE
 static int
-TOKEN(HTYPE, _create)(struct ip_set *set, struct nlattr *tb[], u32 flags)
+IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
+                           struct nlattr *tb[], u32 flags)
 {
        u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
-       u32 cadt_flags = 0;
        u8 hbits;
 #ifdef IP_SET_HASH_WITH_NETMASK
        u8 netmask;
 #endif
        size_t hsize;
        struct HTYPE *h;
+       struct htable *t;
 
        if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
                return -IPSET_ERR_INVALID_FAMILY;
@@ -1005,7 +1071,7 @@ TOKEN(HTYPE, _create)(struct ip_set *set, struct nlattr *tb[], u32 flags)
        h->netmask = netmask;
 #endif
        get_random_bytes(&h->initval, sizeof(h->initval));
-       h->timeout = IPSET_NO_TIMEOUT;
+       set->timeout = IPSET_NO_TIMEOUT;
 
        hbits = htable_bits(hashsize);
        hsize = htable_size(hbits);
@@ -1013,91 +1079,37 @@ TOKEN(HTYPE, _create)(struct ip_set *set, struct nlattr *tb[], u32 flags)
                kfree(h);
                return -ENOMEM;
        }
-       h->table = ip_set_alloc(hsize);
-       if (!h->table) {
+       t = ip_set_alloc(hsize);
+       if (!t) {
                kfree(h);
                return -ENOMEM;
        }
-       h->table->htable_bits = hbits;
+       t->htable_bits = hbits;
+       rcu_assign_pointer(h->table, t);
 
        set->data = h;
-       if (set->family ==  NFPROTO_IPV4)
-               set->variant = &TOKEN(HTYPE, 4_variant);
-       else
-               set->variant = &TOKEN(HTYPE, 6_variant);
-
-       if (tb[IPSET_ATTR_CADT_FLAGS])
-               cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
-       if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
-               set->extensions |= IPSET_EXT_COUNTER;
-               if (tb[IPSET_ATTR_TIMEOUT]) {
-                       h->timeout =
-                               ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-                       set->extensions |= IPSET_EXT_TIMEOUT;
-                       if (set->family == NFPROTO_IPV4) {
-                               h->dsize =
-                                       sizeof(struct TOKEN(HTYPE, 4ct_elem));
-                               h->offset[IPSET_OFFSET_TIMEOUT] =
-                                       offsetof(struct TOKEN(HTYPE, 4ct_elem),
-                                                timeout);
-                               h->offset[IPSET_OFFSET_COUNTER] =
-                                       offsetof(struct TOKEN(HTYPE, 4ct_elem),
-                                                counter);
-                               TOKEN(HTYPE, 4_gc_init)(set,
-                                       TOKEN(HTYPE, 4_gc));
-                       } else {
-                               h->dsize =
-                                       sizeof(struct TOKEN(HTYPE, 6ct_elem));
-                               h->offset[IPSET_OFFSET_TIMEOUT] =
-                                       offsetof(struct TOKEN(HTYPE, 6ct_elem),
-                                                timeout);
-                               h->offset[IPSET_OFFSET_COUNTER] =
-                                       offsetof(struct TOKEN(HTYPE, 6ct_elem),
-                                                counter);
-                               TOKEN(HTYPE, 6_gc_init)(set,
-                                       TOKEN(HTYPE, 6_gc));
-                       }
-               } else {
-                       if (set->family == NFPROTO_IPV4) {
-                               h->dsize =
-                                       sizeof(struct TOKEN(HTYPE, 4c_elem));
-                               h->offset[IPSET_OFFSET_COUNTER] =
-                                       offsetof(struct TOKEN(HTYPE, 4c_elem),
-                                                counter);
-                       } else {
-                               h->dsize =
-                                       sizeof(struct TOKEN(HTYPE, 6c_elem));
-                               h->offset[IPSET_OFFSET_COUNTER] =
-                                       offsetof(struct TOKEN(HTYPE, 6c_elem),
-                                                counter);
-                       }
-               }
-       } else if (tb[IPSET_ATTR_TIMEOUT]) {
-               h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
-               set->extensions |= IPSET_EXT_TIMEOUT;
-               if (set->family == NFPROTO_IPV4) {
-                       h->dsize = sizeof(struct TOKEN(HTYPE, 4t_elem));
-                       h->offset[IPSET_OFFSET_TIMEOUT] =
-                               offsetof(struct TOKEN(HTYPE, 4t_elem),
-                                        timeout);
-                       TOKEN(HTYPE, 4_gc_init)(set, TOKEN(HTYPE, 4_gc));
-               } else {
-                       h->dsize = sizeof(struct TOKEN(HTYPE, 6t_elem));
-                       h->offset[IPSET_OFFSET_TIMEOUT] =
-                               offsetof(struct TOKEN(HTYPE, 6t_elem),
-                                        timeout);
-                       TOKEN(HTYPE, 6_gc_init)(set, TOKEN(HTYPE, 6_gc));
-               }
+       if (set->family == NFPROTO_IPV4) {
+               set->variant = &IPSET_TOKEN(HTYPE, 4_variant);
+               set->dsize = ip_set_elem_len(set, tb,
+                               sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)));
        } else {
+               set->variant = &IPSET_TOKEN(HTYPE, 6_variant);
+               set->dsize = ip_set_elem_len(set, tb,
+                               sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)));
+       }
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
                if (set->family == NFPROTO_IPV4)
-                       h->dsize = sizeof(struct TOKEN(HTYPE, 4_elem));
+                       IPSET_TOKEN(HTYPE, 4_gc_init)(set,
+                               IPSET_TOKEN(HTYPE, 4_gc));
                else
-                       h->dsize = sizeof(struct TOKEN(HTYPE, 6_elem));
+                       IPSET_TOKEN(HTYPE, 6_gc_init)(set,
+                               IPSET_TOKEN(HTYPE, 6_gc));
        }
 
        pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
-                set->name, jhash_size(h->table->htable_bits),
-                h->table->htable_bits, h->maxelem, set->data, h->table);
+                set->name, jhash_size(t->htable_bits),
+                t->htable_bits, h->maxelem, set->data, t);
 
        return 0;
 }
index c74e6e14cd933f4f20bbd5496312fdd0056b7e52..e65fc2423d56dd2b21cee513786eec41ceabefc9 100644 (file)
 #include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN   0
-#define REVISION_MAX   1       /* Counters support */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1          Counters support */
+#define IPSET_TYPE_REV_MAX     2       /* Comments support */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:ip", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:ip");
 
 /* Type specific function prefix */
 #define HTYPE          hash_ip
 #define IP_SET_HASH_WITH_NETMASK
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 /* Member elements */
 struct hash_ip4_elem {
@@ -43,22 +44,6 @@ struct hash_ip4_elem {
        __be32 ip;
 };
 
-struct hash_ip4t_elem {
-       __be32 ip;
-       unsigned long timeout;
-};
-
-struct hash_ip4c_elem {
-       __be32 ip;
-       struct ip_set_counter counter;
-};
-
-struct hash_ip4ct_elem {
-       __be32 ip;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -99,7 +84,7 @@ hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb,
        const struct hash_ip *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ip4_elem e = {};
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
        __be32 ip;
 
        ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &ip);
@@ -118,8 +103,8 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_ip *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ip4_elem e = {};
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-       u32 ip, ip_to, hosts;
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 ip = 0, ip_to = 0, hosts;
        int ret = 0;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -178,29 +163,13 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
        return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 /* Member elements */
 struct hash_ip6_elem {
        union nf_inet_addr ip;
 };
 
-struct hash_ip6t_elem {
-       union nf_inet_addr ip;
-       unsigned long timeout;
-};
-
-struct hash_ip6c_elem {
-       union nf_inet_addr ip;
-       struct ip_set_counter counter;
-};
-
-struct hash_ip6ct_elem {
-       union nf_inet_addr ip;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -253,7 +222,7 @@ hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb,
        const struct hash_ip *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ip6_elem e = {};
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
        hash_ip6_netmask(&e.ip, h->netmask);
@@ -270,7 +239,7 @@ hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_ip *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ip6_elem e = {};
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        int ret;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -304,8 +273,8 @@ static struct ip_set_type hash_ip_type __read_mostly = {
        .features       = IPSET_TYPE_IP,
        .dimension      = IPSET_DIM_ONE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = hash_ip_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
@@ -324,6 +293,7 @@ static struct ip_set_type hash_ip_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
index 7a2d2bd98d046f7b9eec8bc10d099c3794a1e0f6..525a595dd1fe4bf0efe6db7ca9cc06d995c66430 100644 (file)
 #include <linux/netfilter/ipset/ip_set_getport.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN   0
-/*                     1    SCTP and UDPLITE support added */
-#define REVISION_MAX   2 /* Counters support added */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1    SCTP and UDPLITE support added */
+/*                             2    Counters support added */
+#define IPSET_TYPE_REV_MAX     3 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:ip,port", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:ip,port", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:ip,port");
 
 /* Type specific function prefix */
 #define HTYPE          hash_ipport
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 /* Member elements */
 struct hash_ipport4_elem {
@@ -46,31 +47,6 @@ struct hash_ipport4_elem {
        u8 padding;
 };
 
-struct hash_ipport4t_elem {
-       __be32 ip;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       unsigned long timeout;
-};
-
-struct hash_ipport4c_elem {
-       __be32 ip;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       struct ip_set_counter counter;
-};
-
-struct hash_ipport4ct_elem {
-       __be32 ip;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -116,10 +92,9 @@ hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb,
                  const struct xt_action_param *par,
                  enum ipset_adt adt, struct ip_set_adt_opt *opt)
 {
-       const struct hash_ipport *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipport4_elem e = { };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
                                 &e.port, &e.proto))
@@ -136,8 +111,8 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_ipport *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipport4_elem e = { };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-       u32 ip, ip_to, p = 0, port, port_to;
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 ip, ip_to = 0, p = 0, port, port_to;
        bool with_ports = false;
        int ret;
 
@@ -222,7 +197,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
        return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 struct hash_ipport6_elem {
        union nf_inet_addr ip;
@@ -231,31 +206,6 @@ struct hash_ipport6_elem {
        u8 padding;
 };
 
-struct hash_ipport6t_elem {
-       union nf_inet_addr ip;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       unsigned long timeout;
-};
-
-struct hash_ipport6c_elem {
-       union nf_inet_addr ip;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       struct ip_set_counter counter;
-};
-
-struct hash_ipport6ct_elem {
-       union nf_inet_addr ip;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -306,10 +256,9 @@ hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb,
                  const struct xt_action_param *par,
                  enum ipset_adt adt, struct ip_set_adt_opt *opt)
 {
-       const struct hash_ipport *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipport6_elem e = { };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
                                 &e.port, &e.proto))
@@ -326,7 +275,7 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_ipport *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipport6_elem e = { };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 port, port_to;
        bool with_ports = false;
        int ret;
@@ -396,8 +345,8 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
        .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT,
        .dimension      = IPSET_DIM_TWO,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = hash_ipport_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
@@ -419,6 +368,7 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
index 34e8a1acce42219686c897f4f1db30387b733b43..f5636631466eb3ee98509e8b832e46da4ad9a417 100644 (file)
 #include <linux/netfilter/ipset/ip_set_getport.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN   0
-/*                     1    SCTP and UDPLITE support added */
-#define REVISION_MAX   2 /* Counters support added */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1    SCTP and UDPLITE support added */
+/*                             2    Counters support added */
+#define IPSET_TYPE_REV_MAX     3 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:ip,port,ip", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:ip,port,ip", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:ip,port,ip");
 
 /* Type specific function prefix */
 #define HTYPE          hash_ipportip
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 /* Member elements  */
 struct hash_ipportip4_elem {
@@ -47,34 +48,6 @@ struct hash_ipportip4_elem {
        u8 padding;
 };
 
-struct hash_ipportip4t_elem {
-       __be32 ip;
-       __be32 ip2;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       unsigned long timeout;
-};
-
-struct hash_ipportip4c_elem {
-       __be32 ip;
-       __be32 ip2;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       struct ip_set_counter counter;
-};
-
-struct hash_ipportip4ct_elem {
-       __be32 ip;
-       __be32 ip2;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 static inline bool
 hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1,
                          const struct hash_ipportip4_elem *ip2,
@@ -120,10 +93,9 @@ hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb,
                    const struct xt_action_param *par,
                    enum ipset_adt adt, struct ip_set_adt_opt *opt)
 {
-       const struct hash_ipportip *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportip4_elem e = { };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
                                 &e.port, &e.proto))
@@ -141,8 +113,8 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_ipportip *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportip4_elem e = { };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-       u32 ip, ip_to, p = 0, port, port_to;
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 ip, ip_to = 0, p = 0, port, port_to;
        bool with_ports = false;
        int ret;
 
@@ -231,7 +203,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
        return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 struct hash_ipportip6_elem {
        union nf_inet_addr ip;
@@ -241,34 +213,6 @@ struct hash_ipportip6_elem {
        u8 padding;
 };
 
-struct hash_ipportip6t_elem {
-       union nf_inet_addr ip;
-       union nf_inet_addr ip2;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       unsigned long timeout;
-};
-
-struct hash_ipportip6c_elem {
-       union nf_inet_addr ip;
-       union nf_inet_addr ip2;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       struct ip_set_counter counter;
-};
-
-struct hash_ipportip6ct_elem {
-       union nf_inet_addr ip;
-       union nf_inet_addr ip2;
-       __be16 port;
-       u8 proto;
-       u8 padding;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -319,10 +263,9 @@ hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb,
                    const struct xt_action_param *par,
                    enum ipset_adt adt, struct ip_set_adt_opt *opt)
 {
-       const struct hash_ipportip *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportip6_elem e = { };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
                                 &e.port, &e.proto))
@@ -340,7 +283,7 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_ipportip *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportip6_elem e = { };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 port, port_to;
        bool with_ports = false;
        int ret;
@@ -414,8 +357,8 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
        .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
        .dimension      = IPSET_DIM_THREE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = hash_ipportip_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
@@ -437,6 +380,7 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
index f15f3e28b9c338c6e72d3aa949d9d241c0e7cabe..5d87fe8a41ffa4888a70b6d91897360e30c253bc 100644 (file)
 #include <linux/netfilter/ipset/ip_set_getport.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN   0
-/*                     1    SCTP and UDPLITE support added */
-/*                     2    Range as input support for IPv4 added */
-/*                     3    nomatch flag support added */
-#define REVISION_MAX   4 /* Counters support added */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1    SCTP and UDPLITE support added */
+/*                             2    Range as input support for IPv4 added */
+/*                             3    nomatch flag support added */
+/*                             4    Counters support added */
+#define IPSET_TYPE_REV_MAX     5 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:ip,port,net", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:ip,port,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:ip,port,net");
 
 /* Type specific function prefix */
@@ -46,7 +47,7 @@ MODULE_ALIAS("ip_set_hash:ip,port,net");
 #define IP_SET_HASH_WITH_PROTO
 #define IP_SET_HASH_WITH_NETS
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 /* Member elements */
 struct hash_ipportnet4_elem {
@@ -58,37 +59,6 @@ struct hash_ipportnet4_elem {
        u8 proto;
 };
 
-struct hash_ipportnet4t_elem {
-       __be32 ip;
-       __be32 ip2;
-       __be16 port;
-       u8 cidr:7;
-       u8 nomatch:1;
-       u8 proto;
-       unsigned long timeout;
-};
-
-struct hash_ipportnet4c_elem {
-       __be32 ip;
-       __be32 ip2;
-       __be16 port;
-       u8 cidr:7;
-       u8 nomatch:1;
-       u8 proto;
-       struct ip_set_counter counter;
-};
-
-struct hash_ipportnet4ct_elem {
-       __be32 ip;
-       __be32 ip2;
-       __be16 port;
-       u8 cidr:7;
-       u8 nomatch:1;
-       u8 proto;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -170,9 +140,9 @@ hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
        const struct hash_ipportnet *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportnet4_elem e = {
-               .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
+               .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
        };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        if (adt == IPSET_TEST)
                e.cidr = HOST_MASK - 1;
@@ -195,9 +165,9 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_ipportnet *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-       u32 ip, ip_to, p = 0, port, port_to;
-       u32 ip2_from, ip2_to, ip2_last, ip2;
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 ip = 0, ip_to = 0, p = 0, port, port_to;
+       u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
        bool with_ports = false;
        u8 cidr;
        int ret;
@@ -272,7 +242,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                if (ip > ip_to)
                        swap(ip, ip_to);
        } else if (tb[IPSET_ATTR_CIDR]) {
-               u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+               cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
 
                if (!cidr || cidr > 32)
                        return -IPSET_ERR_INVALID_CIDR;
@@ -306,9 +276,9 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                                                       : port;
                for (; p <= port_to; p++) {
                        e.port = htons(p);
-                       ip2 = retried
-                             && ip == ntohl(h->next.ip)
-                             && p == ntohs(h->next.port)
+                       ip2 = retried &&
+                             ip == ntohl(h->next.ip) &&
+                             p == ntohs(h->next.port)
                                ? ntohl(h->next.ip2) : ip2_from;
                        while (!after(ip2, ip2_to)) {
                                e.ip2 = htonl(ip2);
@@ -328,7 +298,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 struct hash_ipportnet6_elem {
        union nf_inet_addr ip;
@@ -339,37 +309,6 @@ struct hash_ipportnet6_elem {
        u8 proto;
 };
 
-struct hash_ipportnet6t_elem {
-       union nf_inet_addr ip;
-       union nf_inet_addr ip2;
-       __be16 port;
-       u8 cidr:7;
-       u8 nomatch:1;
-       u8 proto;
-       unsigned long timeout;
-};
-
-struct hash_ipportnet6c_elem {
-       union nf_inet_addr ip;
-       union nf_inet_addr ip2;
-       __be16 port;
-       u8 cidr:7;
-       u8 nomatch:1;
-       u8 proto;
-       struct ip_set_counter counter;
-};
-
-struct hash_ipportnet6ct_elem {
-       union nf_inet_addr ip;
-       union nf_inet_addr ip2;
-       __be16 port;
-       u8 cidr:7;
-       u8 nomatch:1;
-       u8 proto;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -454,9 +393,9 @@ hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
        const struct hash_ipportnet *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportnet6_elem e = {
-               .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
+               .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
        };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        if (adt == IPSET_TEST)
                e.cidr = HOST_MASK - 1;
@@ -479,7 +418,7 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_ipportnet *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportnet6_elem e = { .cidr = HOST_MASK - 1 };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 port, port_to;
        bool with_ports = false;
        u8 cidr;
@@ -574,8 +513,8 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
                          IPSET_TYPE_NOMATCH,
        .dimension      = IPSET_DIM_THREE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = hash_ipportnet_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
@@ -600,6 +539,7 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
index 223e9f546d0fa3e414b7a53a839b56d6835093bb..8295cf4f9fdcfdb3d3da4b72792add9fc83156ad 100644 (file)
 #include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN   0
-/*                     1    Range as input support for IPv4 added */
-/*                     2    nomatch flag support added */
-#define REVISION_MAX   3 /* Counters support added */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1    Range as input support for IPv4 added */
+/*                             2    nomatch flag support added */
+/*                             3    Counters support added */
+#define IPSET_TYPE_REV_MAX     4 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:net", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:net");
 
 /* Type specific function prefix */
 #define HTYPE          hash_net
 #define IP_SET_HASH_WITH_NETS
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 /* Member elements  */
 struct hash_net4_elem {
@@ -46,31 +47,6 @@ struct hash_net4_elem {
        u8 cidr;
 };
 
-struct hash_net4t_elem {
-       __be32 ip;
-       u16 padding0;
-       u8 nomatch;
-       u8 cidr;
-       unsigned long timeout;
-};
-
-struct hash_net4c_elem {
-       __be32 ip;
-       u16 padding0;
-       u8 nomatch;
-       u8 cidr;
-       struct ip_set_counter counter;
-};
-
-struct hash_net4ct_elem {
-       __be32 ip;
-       u16 padding0;
-       u8 nomatch;
-       u8 cidr;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -143,9 +119,9 @@ hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
        const struct hash_net *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_net4_elem e = {
-               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+               .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
        };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        if (e.cidr == 0)
                return -EINVAL;
@@ -165,8 +141,8 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_net *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_net4_elem e = { .cidr = HOST_MASK };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-       u32 ip = 0, ip_to, last;
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 ip = 0, ip_to = 0, last;
        int ret;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -228,7 +204,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
        return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 struct hash_net6_elem {
        union nf_inet_addr ip;
@@ -237,31 +213,6 @@ struct hash_net6_elem {
        u8 cidr;
 };
 
-struct hash_net6t_elem {
-       union nf_inet_addr ip;
-       u16 padding0;
-       u8 nomatch;
-       u8 cidr;
-       unsigned long timeout;
-};
-
-struct hash_net6c_elem {
-       union nf_inet_addr ip;
-       u16 padding0;
-       u8 nomatch;
-       u8 cidr;
-       struct ip_set_counter counter;
-};
-
-struct hash_net6ct_elem {
-       union nf_inet_addr ip;
-       u16 padding0;
-       u8 nomatch;
-       u8 cidr;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -338,9 +289,9 @@ hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
        const struct hash_net *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_net6_elem e = {
-               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+               .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
        };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        if (e.cidr == 0)
                return -EINVAL;
@@ -357,10 +308,9 @@ static int
 hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
               enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
-       const struct hash_net *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_net6_elem e = { .cidr = HOST_MASK };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        int ret;
 
        if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -406,8 +356,8 @@ static struct ip_set_type hash_net_type __read_mostly = {
        .features       = IPSET_TYPE_IP | IPSET_TYPE_NOMATCH,
        .dimension      = IPSET_DIM_ONE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = hash_net_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
@@ -425,6 +375,7 @@ static struct ip_set_type hash_net_type __read_mostly = {
                [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
index 7d798d5d5cd30a66de3d6fade0b4110eeba583f7..3f64a66bf5d9b78bfa551124cbc7bd3d7d8a8857 100644 (file)
 #include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN   0
-/*                     1    nomatch flag support added */
-/*                     2    /0 support added */
-#define REVISION_MAX   3 /* Counters support added */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1    nomatch flag support added */
+/*                             2    /0 support added */
+/*                             3    Counters support added */
+#define IPSET_TYPE_REV_MAX     4 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:net,iface", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:net,iface", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:net,iface");
 
 /* Interface name rbtree */
@@ -134,7 +135,7 @@ iface_add(struct rb_root *root, const char **iface)
 
 #define STREQ(a, b)    (strcmp(a, b) == 0)
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 struct hash_netiface4_elem_hashed {
        __be32 ip;
@@ -144,7 +145,7 @@ struct hash_netiface4_elem_hashed {
        u8 elem;
 };
 
-/* Member elements without timeout */
+/* Member elements */
 struct hash_netiface4_elem {
        __be32 ip;
        u8 physdev;
@@ -154,37 +155,6 @@ struct hash_netiface4_elem {
        const char *iface;
 };
 
-struct hash_netiface4t_elem {
-       __be32 ip;
-       u8 physdev;
-       u8 cidr;
-       u8 nomatch;
-       u8 elem;
-       const char *iface;
-       unsigned long timeout;
-};
-
-struct hash_netiface4c_elem {
-       __be32 ip;
-       u8 physdev;
-       u8 cidr;
-       u8 nomatch;
-       u8 elem;
-       const char *iface;
-       struct ip_set_counter counter;
-};
-
-struct hash_netiface4ct_elem {
-       __be32 ip;
-       u8 physdev;
-       u8 cidr;
-       u8 nomatch;
-       u8 elem;
-       const char *iface;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -265,10 +235,10 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
        struct hash_netiface *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface4_elem e = {
-               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK,
+               .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
                .elem = 1,
        };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
        int ret;
 
        if (e.cidr == 0)
@@ -319,8 +289,8 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        struct hash_netiface *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-       u32 ip = 0, ip_to, last;
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 ip = 0, ip_to = 0, last;
        char iface[IFNAMSIZ];
        int ret;
 
@@ -399,7 +369,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 struct hash_netiface6_elem_hashed {
        union nf_inet_addr ip;
@@ -418,37 +388,6 @@ struct hash_netiface6_elem {
        const char *iface;
 };
 
-struct hash_netiface6t_elem {
-       union nf_inet_addr ip;
-       u8 physdev;
-       u8 cidr;
-       u8 nomatch;
-       u8 elem;
-       const char *iface;
-       unsigned long timeout;
-};
-
-struct hash_netiface6c_elem {
-       union nf_inet_addr ip;
-       u8 physdev;
-       u8 cidr;
-       u8 nomatch;
-       u8 elem;
-       const char *iface;
-       struct ip_set_counter counter;
-};
-
-struct hash_netiface6ct_elem {
-       union nf_inet_addr ip;
-       u8 physdev;
-       u8 cidr;
-       u8 nomatch;
-       u8 elem;
-       const char *iface;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -534,10 +473,10 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
        struct hash_netiface *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface6_elem e = {
-               .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK,
+               .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
                .elem = 1,
        };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
        int ret;
 
        if (e.cidr == 0)
@@ -584,7 +523,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
        struct hash_netiface *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface6_elem e = { .cidr = HOST_MASK, .elem = 1 };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        char iface[IFNAMSIZ];
        int ret;
 
@@ -645,8 +584,8 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
                          IPSET_TYPE_NOMATCH,
        .dimension      = IPSET_DIM_TWO,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = hash_netiface_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
@@ -668,6 +607,7 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
                [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
new file mode 100644 (file)
index 0000000..4260327
--- /dev/null
@@ -0,0 +1,483 @@
+/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ * Copyright (C) 2013 Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:net type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+#define IPSET_TYPE_REV_MIN     0
+#define IPSET_TYPE_REV_MAX     0
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>");
+IP_SET_MODULE_DESC("hash:net,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
+MODULE_ALIAS("ip_set_hash:net,net");
+
+/* Type specific function prefix */
+#define HTYPE          hash_netnet
+#define IP_SET_HASH_WITH_NETS
+#define IPSET_NET_COUNT 2
+
+/* IPv4 variants */
+
+/* Member elements  */
+struct hash_netnet4_elem {
+       union {
+               __be32 ip[2];
+               __be64 ipcmp;
+       };
+       u8 nomatch;
+       union {
+               u8 cidr[2];
+               u16 ccmp;
+       };
+};
+
+/* Common functions */
+
+static inline bool
+hash_netnet4_data_equal(const struct hash_netnet4_elem *ip1,
+                    const struct hash_netnet4_elem *ip2,
+                    u32 *multi)
+{
+       return ip1->ipcmp == ip2->ipcmp &&
+              ip2->ccmp == ip2->ccmp;
+}
+
+static inline int
+hash_netnet4_do_data_match(const struct hash_netnet4_elem *elem)
+{
+       return elem->nomatch ? -ENOTEMPTY : 1;
+}
+
+static inline void
+hash_netnet4_data_set_flags(struct hash_netnet4_elem *elem, u32 flags)
+{
+       elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
+}
+
+static inline void
+hash_netnet4_data_reset_flags(struct hash_netnet4_elem *elem, u8 *flags)
+{
+       swap(*flags, elem->nomatch);
+}
+
+static inline void
+hash_netnet4_data_reset_elem(struct hash_netnet4_elem *elem,
+                         struct hash_netnet4_elem *orig)
+{
+       elem->ip[1] = orig->ip[1];
+}
+
+static inline void
+hash_netnet4_data_netmask(struct hash_netnet4_elem *elem, u8 cidr, bool inner)
+{
+       if (inner) {
+               elem->ip[1] &= ip_set_netmask(cidr);
+               elem->cidr[1] = cidr;
+       } else {
+               elem->ip[0] &= ip_set_netmask(cidr);
+               elem->cidr[0] = cidr;
+       }
+}
+
+static bool
+hash_netnet4_data_list(struct sk_buff *skb,
+                   const struct hash_netnet4_elem *data)
+{
+       u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip[0]) ||
+           nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip[1]) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static inline void
+hash_netnet4_data_next(struct hash_netnet4_elem *next,
+                   const struct hash_netnet4_elem *d)
+{
+       next->ipcmp = d->ipcmp;
+}
+
+#define MTYPE          hash_netnet4
+#define PF             4
+#define HOST_MASK      32
+#include "ip_set_hash_gen.h"
+
+static int
+hash_netnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
+              const struct xt_action_param *par,
+              enum ipset_adt adt, struct ip_set_adt_opt *opt)
+{
+       const struct hash_netnet *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netnet4_elem e = {
+               .cidr[0] = h->nets[0].cidr[0] ? h->nets[0].cidr[0] : HOST_MASK,
+               .cidr[1] = h->nets[0].cidr[1] ? h->nets[0].cidr[1] : HOST_MASK,
+       };
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+
+       if (adt == IPSET_TEST)
+               e.ccmp = (HOST_MASK << (sizeof(e.cidr[0]) * 8)) | HOST_MASK;
+
+       ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0]);
+       ip4addrptr(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.ip[1]);
+       e.ip[0] &= ip_set_netmask(e.cidr[0]);
+       e.ip[1] &= ip_set_netmask(e.cidr[1]);
+
+       return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
+}
+
+static int
+hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+              enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+       const struct hash_netnet *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netnet4_elem e = { .cidr[0] = HOST_MASK,
+                                      .cidr[1] = HOST_MASK };
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 ip = 0, ip_to = 0, last;
+       u32 ip2 = 0, ip2_from = 0, ip2_to = 0, last2;
+       u8 cidr, cidr2;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
+             ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from) ||
+             ip_set_get_extensions(set, tb, &ext);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_CIDR]) {
+               cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+               if (!cidr || cidr > HOST_MASK)
+                       return -IPSET_ERR_INVALID_CIDR;
+               e.cidr[0] = cidr;
+       }
+
+       if (tb[IPSET_ATTR_CIDR2]) {
+               cidr2 = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+               if (!cidr2 || cidr2 > HOST_MASK)
+                       return -IPSET_ERR_INVALID_CIDR;
+               e.cidr[1] = cidr2;
+       }
+
+       if (tb[IPSET_ATTR_CADT_FLAGS]) {
+               u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+               if (cadt_flags & IPSET_FLAG_NOMATCH)
+                       flags |= (IPSET_FLAG_NOMATCH << 16);
+       }
+
+       if (adt == IPSET_TEST || !(tb[IPSET_ATTR_IP_TO] &&
+                                  tb[IPSET_ATTR_IP2_TO])) {
+               e.ip[0] = htonl(ip & ip_set_hostmask(e.cidr[0]));
+               e.ip[1] = htonl(ip2_from & ip_set_hostmask(e.cidr[1]));
+               ret = adtfn(set, &e, &ext, &ext, flags);
+               return ip_set_enomatch(ret, flags, adt, set) ? -ret :
+                      ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       ip_to = ip;
+       if (tb[IPSET_ATTR_IP_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+               if (ret)
+                       return ret;
+               if (ip_to < ip)
+                       swap(ip, ip_to);
+               if (ip + UINT_MAX == ip_to)
+                       return -IPSET_ERR_HASH_RANGE;
+       }
+
+       ip2_to = ip2_from;
+       if (tb[IPSET_ATTR_IP2_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
+               if (ret)
+                       return ret;
+               if (ip2_to < ip2_from)
+                       swap(ip2_from, ip2_to);
+               if (ip2_from + UINT_MAX == ip2_to)
+                       return -IPSET_ERR_HASH_RANGE;
+
+       }
+
+       if (retried)
+               ip = ntohl(h->next.ip[0]);
+
+       while (!after(ip, ip_to)) {
+               e.ip[0] = htonl(ip);
+               last = ip_set_range_to_cidr(ip, ip_to, &cidr);
+               e.cidr[0] = cidr;
+               ip2 = (retried &&
+                      ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1])
+                                                  : ip2_from;
+               while (!after(ip2, ip2_to)) {
+                       e.ip[1] = htonl(ip2);
+                       last2 = ip_set_range_to_cidr(ip2, ip2_to, &cidr2);
+                       e.cidr[1] = cidr2;
+                       ret = adtfn(set, &e, &ext, &ext, flags);
+                       if (ret && !ip_set_eexist(ret, flags))
+                               return ret;
+                       else
+                               ret = 0;
+                       ip2 = last2 + 1;
+               }
+               ip = last + 1;
+       }
+       return ret;
+}
+
+/* IPv6 variants */
+
+struct hash_netnet6_elem {
+       union nf_inet_addr ip[2];
+       u8 nomatch;
+       union {
+               u8 cidr[2];
+               u16 ccmp;
+       };
+};
+
+/* Common functions */
+
+static inline bool
+hash_netnet6_data_equal(const struct hash_netnet6_elem *ip1,
+                    const struct hash_netnet6_elem *ip2,
+                    u32 *multi)
+{
+       return ipv6_addr_equal(&ip1->ip[0].in6, &ip2->ip[0].in6) &&
+              ipv6_addr_equal(&ip1->ip[1].in6, &ip2->ip[1].in6) &&
+              ip1->ccmp == ip2->ccmp;
+}
+
+static inline int
+hash_netnet6_do_data_match(const struct hash_netnet6_elem *elem)
+{
+       return elem->nomatch ? -ENOTEMPTY : 1;
+}
+
+static inline void
+hash_netnet6_data_set_flags(struct hash_netnet6_elem *elem, u32 flags)
+{
+       elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH;
+}
+
+static inline void
+hash_netnet6_data_reset_flags(struct hash_netnet6_elem *elem, u8 *flags)
+{
+       swap(*flags, elem->nomatch);
+}
+
+static inline void
+hash_netnet6_data_reset_elem(struct hash_netnet6_elem *elem,
+                         struct hash_netnet6_elem *orig)
+{
+       elem->ip[1] = orig->ip[1];
+}
+
+static inline void
+hash_netnet6_data_netmask(struct hash_netnet6_elem *elem, u8 cidr, bool inner)
+{
+       if (inner) {
+               ip6_netmask(&elem->ip[1], cidr);
+               elem->cidr[1] = cidr;
+       } else {
+               ip6_netmask(&elem->ip[0], cidr);
+               elem->cidr[0] = cidr;
+       }
+}
+
+static bool
+hash_netnet6_data_list(struct sk_buff *skb,
+                   const struct hash_netnet6_elem *data)
+{
+       u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
+       if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip[0].in6) ||
+           nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip[1].in6) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static inline void
+hash_netnet6_data_next(struct hash_netnet4_elem *next,
+                   const struct hash_netnet6_elem *d)
+{
+}
+
+#undef MTYPE
+#undef PF
+#undef HOST_MASK
+
+#define MTYPE          hash_netnet6
+#define PF             6
+#define HOST_MASK      128
+#define IP_SET_EMIT_CREATE
+#include "ip_set_hash_gen.h"
+
+static int
+hash_netnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
+              const struct xt_action_param *par,
+              enum ipset_adt adt, struct ip_set_adt_opt *opt)
+{
+       const struct hash_netnet *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netnet6_elem e = {
+               .cidr[0] = h->nets[0].cidr[0] ? h->nets[0].cidr[0] : HOST_MASK,
+               .cidr[1] = h->nets[0].cidr[1] ? h->nets[0].cidr[1] : HOST_MASK
+       };
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+
+       if (adt == IPSET_TEST)
+               e.ccmp = (HOST_MASK << (sizeof(u8)*8)) | HOST_MASK;
+
+       ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0].in6);
+       ip6addrptr(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.ip[1].in6);
+       ip6_netmask(&e.ip[0], e.cidr[0]);
+       ip6_netmask(&e.ip[1], e.cidr[1]);
+
+       return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
+}
+
+static int
+hash_netnet6_uadt(struct ip_set *set, struct nlattr *tb[],
+              enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netnet6_elem e = { .cidr[0] = HOST_MASK,
+                                      .cidr[1] = HOST_MASK };
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+               return -IPSET_ERR_PROTOCOL;
+       if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO]))
+               return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]) ||
+             ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]) ||
+             ip_set_get_extensions(set, tb, &ext);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_CIDR])
+               e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+       if (tb[IPSET_ATTR_CIDR2])
+               e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+
+       if (!e.cidr[0] || e.cidr[0] > HOST_MASK || !e.cidr[1] ||
+           e.cidr[1] > HOST_MASK)
+               return -IPSET_ERR_INVALID_CIDR;
+
+       ip6_netmask(&e.ip[0], e.cidr[0]);
+       ip6_netmask(&e.ip[1], e.cidr[1]);
+
+       if (tb[IPSET_ATTR_CADT_FLAGS]) {
+               u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+               if (cadt_flags & IPSET_FLAG_NOMATCH)
+                       flags |= (IPSET_FLAG_NOMATCH << 16);
+       }
+
+       ret = adtfn(set, &e, &ext, &ext, flags);
+
+       return ip_set_enomatch(ret, flags, adt, set) ? -ret :
+              ip_set_eexist(ret, flags) ? 0 : ret;
+}
+
+static struct ip_set_type hash_netnet_type __read_mostly = {
+       .name           = "hash:net,net",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_IP2 | IPSET_TYPE_NOMATCH,
+       .dimension      = IPSET_DIM_TWO,
+       .family         = NFPROTO_UNSPEC,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
+       .create         = hash_netnet_create,
+       .create_policy  = {
+               [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
+               [IPSET_ATTR_MAXELEM]    = { .type = NLA_U32 },
+               [IPSET_ATTR_PROBES]     = { .type = NLA_U8 },
+               [IPSET_ATTR_RESIZE]     = { .type = NLA_U8  },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP2]        = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP2_TO]     = { .type = NLA_NESTED },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_CIDR2]      = { .type = NLA_U8 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
+               [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
+               [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+hash_netnet_init(void)
+{
+       return ip_set_type_register(&hash_netnet_type);
+}
+
+static void __exit
+hash_netnet_fini(void)
+{
+       ip_set_type_unregister(&hash_netnet_type);
+}
+
+module_init(hash_netnet_init);
+module_exit(hash_netnet_fini);
index 09d6690bee6fd33891c4a00bbbb909f2bbf7731b..7097fb0141bf6e1363ca0b0342451e66c34773b4 100644 (file)
 #include <linux/netfilter/ipset/ip_set_getport.h>
 #include <linux/netfilter/ipset/ip_set_hash.h>
 
-#define REVISION_MIN   0
-/*                     1    SCTP and UDPLITE support added */
-/*                     2    Range as input support for IPv4 added */
-/*                     3    nomatch flag support added */
-#define REVISION_MAX   4 /* Counters support added */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1    SCTP and UDPLITE support added */
+/*                             2    Range as input support for IPv4 added */
+/*                             3    nomatch flag support added */
+/*                             4    Counters support added */
+#define IPSET_TYPE_REV_MAX     5 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("hash:net,port", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("hash:net,port", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_hash:net,port");
 
 /* Type specific function prefix */
@@ -45,7 +46,7 @@ MODULE_ALIAS("ip_set_hash:net,port");
  */
 #define IP_SET_HASH_WITH_NETS_PACKED
 
-/* IPv4 variants */
+/* IPv4 variant */
 
 /* Member elements */
 struct hash_netport4_elem {
@@ -56,34 +57,6 @@ struct hash_netport4_elem {
        u8 nomatch:1;
 };
 
-struct hash_netport4t_elem {
-       __be32 ip;
-       __be16 port;
-       u8 proto;
-       u8 cidr:7;
-       u8 nomatch:1;
-       unsigned long timeout;
-};
-
-struct hash_netport4c_elem {
-       __be32 ip;
-       __be16 port;
-       u8 proto;
-       u8 cidr:7;
-       u8 nomatch:1;
-       struct ip_set_counter counter;
-};
-
-struct hash_netport4ct_elem {
-       __be32 ip;
-       __be16 port;
-       u8 proto;
-       u8 cidr:7;
-       u8 nomatch:1;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -162,9 +135,9 @@ hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
        const struct hash_netport *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netport4_elem e = {
-               .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
+               .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
        };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        if (adt == IPSET_TEST)
                e.cidr = HOST_MASK - 1;
@@ -186,8 +159,8 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_netport *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
-       u32 port, port_to, p = 0, ip = 0, ip_to, last;
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 port, port_to, p = 0, ip = 0, ip_to = 0, last;
        bool with_ports = false;
        u8 cidr;
        int ret;
@@ -287,7 +260,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
        return ret;
 }
 
-/* IPv6 variants */
+/* IPv6 variant */
 
 struct hash_netport6_elem {
        union nf_inet_addr ip;
@@ -297,34 +270,6 @@ struct hash_netport6_elem {
        u8 nomatch:1;
 };
 
-struct hash_netport6t_elem {
-       union nf_inet_addr ip;
-       __be16 port;
-       u8 proto;
-       u8 cidr:7;
-       u8 nomatch:1;
-       unsigned long timeout;
-};
-
-struct hash_netport6c_elem {
-       union nf_inet_addr ip;
-       __be16 port;
-       u8 proto;
-       u8 cidr:7;
-       u8 nomatch:1;
-       struct ip_set_counter counter;
-};
-
-struct hash_netport6ct_elem {
-       union nf_inet_addr ip;
-       __be16 port;
-       u8 proto;
-       u8 cidr:7;
-       u8 nomatch:1;
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 /* Common functions */
 
 static inline bool
@@ -407,9 +352,9 @@ hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
        const struct hash_netport *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netport6_elem e = {
-               .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1,
+               .cidr = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK) - 1,
        };
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, h);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        if (adt == IPSET_TEST)
                e.cidr = HOST_MASK - 1;
@@ -431,7 +376,7 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct hash_netport *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netport6_elem e = { .cidr = HOST_MASK  - 1 };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 port, port_to;
        bool with_ports = false;
        u8 cidr;
@@ -518,8 +463,8 @@ static struct ip_set_type hash_netport_type __read_mostly = {
        .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_NOMATCH,
        .dimension      = IPSET_DIM_TWO,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = hash_netport_create,
        .create_policy  = {
                [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
@@ -542,6 +487,7 @@ static struct ip_set_type hash_netport_type __read_mostly = {
                [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
new file mode 100644 (file)
index 0000000..363fab9
--- /dev/null
@@ -0,0 +1,588 @@
+/* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:ip,port,net type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/ip.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/random.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/netlink.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/pfxlen.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_getport.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+#define IPSET_TYPE_REV_MIN     0
+#define IPSET_TYPE_REV_MAX     0 /* Comments support added */
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>");
+IP_SET_MODULE_DESC("hash:net,port,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
+MODULE_ALIAS("ip_set_hash:net,port,net");
+
+/* Type specific function prefix */
+#define HTYPE          hash_netportnet
+#define IP_SET_HASH_WITH_PROTO
+#define IP_SET_HASH_WITH_NETS
+#define IPSET_NET_COUNT 2
+
+/* IPv4 variant */
+
+/* Member elements */
+struct hash_netportnet4_elem {
+       union {
+               __be32 ip[2];
+               __be64 ipcmp;
+       };
+       __be16 port;
+       union {
+               u8 cidr[2];
+               u16 ccmp;
+       };
+       u8 nomatch:1;
+       u8 proto;
+};
+
+/* Common functions */
+
+static inline bool
+hash_netportnet4_data_equal(const struct hash_netportnet4_elem *ip1,
+                          const struct hash_netportnet4_elem *ip2,
+                          u32 *multi)
+{
+       return ip1->ipcmp == ip2->ipcmp &&
+              ip1->ccmp == ip2->ccmp &&
+              ip1->port == ip2->port &&
+              ip1->proto == ip2->proto;
+}
+
+static inline int
+hash_netportnet4_do_data_match(const struct hash_netportnet4_elem *elem)
+{
+       return elem->nomatch ? -ENOTEMPTY : 1;
+}
+
+static inline void
+hash_netportnet4_data_set_flags(struct hash_netportnet4_elem *elem, u32 flags)
+{
+       elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
+}
+
+static inline void
+hash_netportnet4_data_reset_flags(struct hash_netportnet4_elem *elem, u8 *flags)
+{
+       swap(*flags, elem->nomatch);
+}
+
+static inline void
+hash_netportnet4_data_reset_elem(struct hash_netportnet4_elem *elem,
+                               struct hash_netportnet4_elem *orig)
+{
+       elem->ip[1] = orig->ip[1];
+}
+
+static inline void
+hash_netportnet4_data_netmask(struct hash_netportnet4_elem *elem,
+                             u8 cidr, bool inner)
+{
+       if (inner) {
+               elem->ip[1] &= ip_set_netmask(cidr);
+               elem->cidr[1] = cidr;
+       } else {
+               elem->ip[0] &= ip_set_netmask(cidr);
+               elem->cidr[0] = cidr;
+       }
+}
+
+static bool
+hash_netportnet4_data_list(struct sk_buff *skb,
+                         const struct hash_netportnet4_elem *data)
+{
+       u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
+       if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip[0]) ||
+           nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip[1]) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
+           nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static inline void
+hash_netportnet4_data_next(struct hash_netportnet4_elem *next,
+                         const struct hash_netportnet4_elem *d)
+{
+       next->ipcmp = d->ipcmp;
+       next->port = d->port;
+}
+
+#define MTYPE          hash_netportnet4
+#define PF             4
+#define HOST_MASK      32
+#include "ip_set_hash_gen.h"
+
+static int
+hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
+                    const struct xt_action_param *par,
+                    enum ipset_adt adt, struct ip_set_adt_opt *opt)
+{
+       const struct hash_netportnet *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netportnet4_elem e = {
+               .cidr[0] = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
+               .cidr[1] = IP_SET_INIT_CIDR(h->nets[0].cidr[1], HOST_MASK),
+       };
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+
+       if (adt == IPSET_TEST)
+               e.ccmp = (HOST_MASK << (sizeof(e.cidr[0]) * 8)) | HOST_MASK;
+
+       if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
+                                &e.port, &e.proto))
+               return -EINVAL;
+
+       ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0]);
+       ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip[1]);
+       e.ip[0] &= ip_set_netmask(e.cidr[0]);
+       e.ip[1] &= ip_set_netmask(e.cidr[1]);
+
+       return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
+}
+
+static int
+hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+                    enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+       const struct hash_netportnet *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netportnet4_elem e = { .cidr[0] = HOST_MASK,
+                                          .cidr[1] = HOST_MASK };
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 ip = 0, ip_to = 0, ip_last, p = 0, port, port_to;
+       u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
+       bool with_ports = false;
+       u8 cidr, cidr2;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip) ||
+             ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from) ||
+             ip_set_get_extensions(set, tb, &ext);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_CIDR]) {
+               cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+               if (!cidr || cidr > HOST_MASK)
+                       return -IPSET_ERR_INVALID_CIDR;
+               e.cidr[0] = cidr;
+       }
+
+       if (tb[IPSET_ATTR_CIDR2]) {
+               cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+               if (!cidr || cidr > HOST_MASK)
+                       return -IPSET_ERR_INVALID_CIDR;
+               e.cidr[1] = cidr;
+       }
+
+       if (tb[IPSET_ATTR_PORT])
+               e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+       else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_PROTO]) {
+               e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+               with_ports = ip_set_proto_with_ports(e.proto);
+
+               if (e.proto == 0)
+                       return -IPSET_ERR_INVALID_PROTO;
+       } else
+               return -IPSET_ERR_MISSING_PROTO;
+
+       if (!(with_ports || e.proto == IPPROTO_ICMP))
+               e.port = 0;
+
+       if (tb[IPSET_ATTR_CADT_FLAGS]) {
+               u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+               if (cadt_flags & IPSET_FLAG_NOMATCH)
+                       flags |= (IPSET_FLAG_NOMATCH << 16);
+       }
+
+       with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
+       if (adt == IPSET_TEST ||
+           !(tb[IPSET_ATTR_IP_TO] || with_ports || tb[IPSET_ATTR_IP2_TO])) {
+               e.ip[0] = htonl(ip & ip_set_hostmask(e.cidr[0]));
+               e.ip[1] = htonl(ip2_from & ip_set_hostmask(e.cidr[1]));
+               ret = adtfn(set, &e, &ext, &ext, flags);
+               return ip_set_enomatch(ret, flags, adt, set) ? -ret :
+                      ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       ip_to = ip;
+       if (tb[IPSET_ATTR_IP_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
+               if (ret)
+                       return ret;
+               if (ip > ip_to)
+                       swap(ip, ip_to);
+               if (unlikely(ip + UINT_MAX == ip_to))
+                       return -IPSET_ERR_HASH_RANGE;
+       }
+
+       port_to = port = ntohs(e.port);
+       if (tb[IPSET_ATTR_PORT_TO]) {
+               port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+               if (port > port_to)
+                       swap(port, port_to);
+       }
+
+       ip2_to = ip2_from;
+       if (tb[IPSET_ATTR_IP2_TO]) {
+               ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
+               if (ret)
+                       return ret;
+               if (ip2_from > ip2_to)
+                       swap(ip2_from, ip2_to);
+               if (unlikely(ip2_from + UINT_MAX == ip2_to))
+                       return -IPSET_ERR_HASH_RANGE;
+       }
+
+       if (retried)
+               ip = ntohl(h->next.ip[0]);
+
+       while (!after(ip, ip_to)) {
+               e.ip[0] = htonl(ip);
+               ip_last = ip_set_range_to_cidr(ip, ip_to, &cidr);
+               e.cidr[0] = cidr;
+               p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port)
+                                                         : port;
+               for (; p <= port_to; p++) {
+                       e.port = htons(p);
+                       ip2 = (retried && ip == ntohl(h->next.ip[0]) &&
+                              p == ntohs(h->next.port)) ? ntohl(h->next.ip[1])
+                                                        : ip2_from;
+                       while (!after(ip2, ip2_to)) {
+                               e.ip[1] = htonl(ip2);
+                               ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
+                                                               &cidr2);
+                               e.cidr[1] = cidr2;
+                               ret = adtfn(set, &e, &ext, &ext, flags);
+                               if (ret && !ip_set_eexist(ret, flags))
+                                       return ret;
+                               else
+                                       ret = 0;
+                               ip2 = ip2_last + 1;
+                       }
+               }
+               ip = ip_last + 1;
+       }
+       return ret;
+}
+
+/* IPv6 variant */
+
+struct hash_netportnet6_elem {
+       union nf_inet_addr ip[2];
+       __be16 port;
+       union {
+               u8 cidr[2];
+               u16 ccmp;
+       };
+       u8 nomatch:1;
+       u8 proto;
+};
+
+/* Common functions */
+
+static inline bool
+hash_netportnet6_data_equal(const struct hash_netportnet6_elem *ip1,
+                          const struct hash_netportnet6_elem *ip2,
+                          u32 *multi)
+{
+       return ipv6_addr_equal(&ip1->ip[0].in6, &ip2->ip[0].in6) &&
+              ipv6_addr_equal(&ip1->ip[1].in6, &ip2->ip[1].in6) &&
+              ip1->ccmp == ip2->ccmp &&
+              ip1->port == ip2->port &&
+              ip1->proto == ip2->proto;
+}
+
+static inline int
+hash_netportnet6_do_data_match(const struct hash_netportnet6_elem *elem)
+{
+       return elem->nomatch ? -ENOTEMPTY : 1;
+}
+
+static inline void
+hash_netportnet6_data_set_flags(struct hash_netportnet6_elem *elem, u32 flags)
+{
+       elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH);
+}
+
+static inline void
+hash_netportnet6_data_reset_flags(struct hash_netportnet6_elem *elem, u8 *flags)
+{
+       swap(*flags, elem->nomatch);
+}
+
+static inline void
+hash_netportnet6_data_reset_elem(struct hash_netportnet6_elem *elem,
+                               struct hash_netportnet6_elem *orig)
+{
+       elem->ip[1] = orig->ip[1];
+}
+
+static inline void
+hash_netportnet6_data_netmask(struct hash_netportnet6_elem *elem,
+                             u8 cidr, bool inner)
+{
+       if (inner) {
+               ip6_netmask(&elem->ip[1], cidr);
+               elem->cidr[1] = cidr;
+       } else {
+               ip6_netmask(&elem->ip[0], cidr);
+               elem->cidr[0] = cidr;
+       }
+}
+
+static bool
+hash_netportnet6_data_list(struct sk_buff *skb,
+                         const struct hash_netportnet6_elem *data)
+{
+       u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
+       if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip[0].in6) ||
+           nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip[1].in6) ||
+           nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr[0]) ||
+           nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr[1]) ||
+           nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
+           (flags &&
+            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return 1;
+}
+
+static inline void
+hash_netportnet6_data_next(struct hash_netportnet4_elem *next,
+                         const struct hash_netportnet6_elem *d)
+{
+       next->port = d->port;
+}
+
+#undef MTYPE
+#undef PF
+#undef HOST_MASK
+
+#define MTYPE          hash_netportnet6
+#define PF             6
+#define HOST_MASK      128
+#define IP_SET_EMIT_CREATE
+#include "ip_set_hash_gen.h"
+
+static int
+hash_netportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
+                    const struct xt_action_param *par,
+                    enum ipset_adt adt, struct ip_set_adt_opt *opt)
+{
+       const struct hash_netportnet *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netportnet6_elem e = {
+               .cidr[0] = IP_SET_INIT_CIDR(h->nets[0].cidr[0], HOST_MASK),
+               .cidr[1] = IP_SET_INIT_CIDR(h->nets[0].cidr[1], HOST_MASK),
+       };
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+
+       if (adt == IPSET_TEST)
+               e.ccmp = (HOST_MASK << (sizeof(u8) * 8)) | HOST_MASK;
+
+       if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
+                                &e.port, &e.proto))
+               return -EINVAL;
+
+       ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip[0].in6);
+       ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip[1].in6);
+       ip6_netmask(&e.ip[0], e.cidr[0]);
+       ip6_netmask(&e.ip[1], e.cidr[1]);
+
+       return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
+}
+
+static int
+hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
+                    enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+       const struct hash_netportnet *h = set->data;
+       ipset_adtfn adtfn = set->variant->adt[adt];
+       struct hash_netportnet6_elem e = { .cidr[0] = HOST_MASK,
+                                          .cidr[1] = HOST_MASK };
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+       u32 port, port_to;
+       bool with_ports = false;
+       int ret;
+
+       if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+                    !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
+                    !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+               return -IPSET_ERR_PROTOCOL;
+       if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO]))
+               return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
+
+       if (tb[IPSET_ATTR_LINENO])
+               *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+       ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip[0]) ||
+             ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip[1]) ||
+             ip_set_get_extensions(set, tb, &ext);
+       if (ret)
+               return ret;
+
+       if (tb[IPSET_ATTR_CIDR])
+               e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+
+       if (tb[IPSET_ATTR_CIDR2])
+               e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+
+       if (unlikely(!e.cidr[0] || e.cidr[0] > HOST_MASK || !e.cidr[1] ||
+                    e.cidr[1] > HOST_MASK))
+               return -IPSET_ERR_INVALID_CIDR;
+
+       ip6_netmask(&e.ip[0], e.cidr[0]);
+       ip6_netmask(&e.ip[1], e.cidr[1]);
+
+       if (tb[IPSET_ATTR_PORT])
+               e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
+       else
+               return -IPSET_ERR_PROTOCOL;
+
+       if (tb[IPSET_ATTR_PROTO]) {
+               e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
+               with_ports = ip_set_proto_with_ports(e.proto);
+
+               if (e.proto == 0)
+                       return -IPSET_ERR_INVALID_PROTO;
+       } else
+               return -IPSET_ERR_MISSING_PROTO;
+
+       if (!(with_ports || e.proto == IPPROTO_ICMPV6))
+               e.port = 0;
+
+       if (tb[IPSET_ATTR_CADT_FLAGS]) {
+               u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+               if (cadt_flags & IPSET_FLAG_NOMATCH)
+                       flags |= (IPSET_FLAG_NOMATCH << 16);
+       }
+
+       if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
+               ret = adtfn(set, &e, &ext, &ext, flags);
+               return ip_set_enomatch(ret, flags, adt, set) ? -ret :
+                      ip_set_eexist(ret, flags) ? 0 : ret;
+       }
+
+       port = ntohs(e.port);
+       port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
+       if (port > port_to)
+               swap(port, port_to);
+
+       if (retried)
+               port = ntohs(h->next.port);
+       for (; port <= port_to; port++) {
+               e.port = htons(port);
+               ret = adtfn(set, &e, &ext, &ext, flags);
+
+               if (ret && !ip_set_eexist(ret, flags))
+                       return ret;
+               else
+                       ret = 0;
+       }
+       return ret;
+}
+
+static struct ip_set_type hash_netportnet_type __read_mostly = {
+       .name           = "hash:net,port,net",
+       .protocol       = IPSET_PROTOCOL,
+       .features       = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2 |
+                         IPSET_TYPE_NOMATCH,
+       .dimension      = IPSET_DIM_THREE,
+       .family         = NFPROTO_UNSPEC,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
+       .create         = hash_netportnet_create,
+       .create_policy  = {
+               [IPSET_ATTR_HASHSIZE]   = { .type = NLA_U32 },
+               [IPSET_ATTR_MAXELEM]    = { .type = NLA_U32 },
+               [IPSET_ATTR_PROBES]     = { .type = NLA_U8 },
+               [IPSET_ATTR_RESIZE]     = { .type = NLA_U8  },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
+       },
+       .adt_policy     = {
+               [IPSET_ATTR_IP]         = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP_TO]      = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP2]        = { .type = NLA_NESTED },
+               [IPSET_ATTR_IP2_TO]     = { .type = NLA_NESTED },
+               [IPSET_ATTR_PORT]       = { .type = NLA_U16 },
+               [IPSET_ATTR_PORT_TO]    = { .type = NLA_U16 },
+               [IPSET_ATTR_CIDR]       = { .type = NLA_U8 },
+               [IPSET_ATTR_CIDR2]      = { .type = NLA_U8 },
+               [IPSET_ATTR_PROTO]      = { .type = NLA_U8 },
+               [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
+               [IPSET_ATTR_TIMEOUT]    = { .type = NLA_U32 },
+               [IPSET_ATTR_LINENO]     = { .type = NLA_U32 },
+               [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
+               [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
+       },
+       .me             = THIS_MODULE,
+};
+
+static int __init
+hash_netportnet_init(void)
+{
+       return ip_set_type_register(&hash_netportnet_type);
+}
+
+static void __exit
+hash_netportnet_fini(void)
+{
+       ip_set_type_unregister(&hash_netportnet_type);
+}
+
+module_init(hash_netportnet_init);
+module_exit(hash_netportnet_fini);
index 979b8c90e42201c656faf914e38b4b30bac2d298..ec6f6d15dded36429ee235196b48e828f7842fbc 100644 (file)
 #include <linux/netfilter/ipset/ip_set.h>
 #include <linux/netfilter/ipset/ip_set_list.h>
 
-#define REVISION_MIN   0
-#define REVISION_MAX   1 /* Counters support added */
+#define IPSET_TYPE_REV_MIN     0
+/*                             1    Counters support added */
+#define IPSET_TYPE_REV_MAX     2 /* Comments support added */
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-IP_SET_MODULE_DESC("list:set", REVISION_MIN, REVISION_MAX);
+IP_SET_MODULE_DESC("list:set", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
 MODULE_ALIAS("ip_set_list:set");
 
 /* Member elements  */
@@ -28,28 +29,6 @@ struct set_elem {
        ip_set_id_t id;
 };
 
-struct sett_elem {
-       struct {
-               ip_set_id_t id;
-       } __attribute__ ((aligned));
-       unsigned long timeout;
-};
-
-struct setc_elem {
-       struct {
-               ip_set_id_t id;
-       } __attribute__ ((aligned));
-       struct ip_set_counter counter;
-};
-
-struct setct_elem {
-       struct {
-               ip_set_id_t id;
-       } __attribute__ ((aligned));
-       struct ip_set_counter counter;
-       unsigned long timeout;
-};
-
 struct set_adt_elem {
        ip_set_id_t id;
        ip_set_id_t refid;
@@ -58,24 +37,14 @@ struct set_adt_elem {
 
 /* Type structure */
 struct list_set {
-       size_t dsize;           /* element size */
-       size_t offset[IPSET_OFFSET_MAX]; /* Offsets to extensions */
        u32 size;               /* size of set list array */
-       u32 timeout;            /* timeout value */
        struct timer_list gc;   /* garbage collection */
+       struct net *net;        /* namespace */
        struct set_elem members[0]; /* the set members */
 };
 
-static inline struct set_elem *
-list_set_elem(const struct list_set *map, u32 id)
-{
-       return (struct set_elem *)((void *)map->members + id * map->dsize);
-}
-
-#define ext_timeout(e, m)      \
-(unsigned long *)((void *)(e) + (m)->offset[IPSET_OFFSET_TIMEOUT])
-#define ext_counter(e, m)      \
-(struct ip_set_counter *)((void *)(e) + (m)->offset[IPSET_OFFSET_COUNTER])
+#define list_set_elem(set, map, id)    \
+       (struct set_elem *)((void *)(map)->members + (id) * (set)->dsize)
 
 static int
 list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
@@ -92,16 +61,16 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
        if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
                opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE;
        for (i = 0; i < map->size; i++) {
-               e = list_set_elem(map, i);
+               e = list_set_elem(set, map, i);
                if (e->id == IPSET_INVALID_ID)
                        return 0;
                if (SET_WITH_TIMEOUT(set) &&
-                   ip_set_timeout_expired(ext_timeout(e, map)))
+                   ip_set_timeout_expired(ext_timeout(e, set)))
                        continue;
                ret = ip_set_test(e->id, skb, par, opt);
                if (ret > 0) {
                        if (SET_WITH_COUNTER(set))
-                               ip_set_update_counter(ext_counter(e, map),
+                               ip_set_update_counter(ext_counter(e, set),
                                                      ext, &opt->ext,
                                                      cmdflags);
                        return ret;
@@ -121,11 +90,11 @@ list_set_kadd(struct ip_set *set, const struct sk_buff *skb,
        int ret;
 
        for (i = 0; i < map->size; i++) {
-               e = list_set_elem(map, i);
+               e = list_set_elem(set, map, i);
                if (e->id == IPSET_INVALID_ID)
                        return 0;
                if (SET_WITH_TIMEOUT(set) &&
-                   ip_set_timeout_expired(ext_timeout(e, map)))
+                   ip_set_timeout_expired(ext_timeout(e, set)))
                        continue;
                ret = ip_set_add(e->id, skb, par, opt);
                if (ret == 0)
@@ -145,11 +114,11 @@ list_set_kdel(struct ip_set *set, const struct sk_buff *skb,
        int ret;
 
        for (i = 0; i < map->size; i++) {
-               e = list_set_elem(map, i);
+               e = list_set_elem(set, map, i);
                if (e->id == IPSET_INVALID_ID)
                        return 0;
                if (SET_WITH_TIMEOUT(set) &&
-                   ip_set_timeout_expired(ext_timeout(e, map)))
+                   ip_set_timeout_expired(ext_timeout(e, set)))
                        continue;
                ret = ip_set_del(e->id, skb, par, opt);
                if (ret == 0)
@@ -163,8 +132,7 @@ list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
              const struct xt_action_param *par,
              enum ipset_adt adt, struct ip_set_adt_opt *opt)
 {
-       struct list_set *map = set->data;
-       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, map);
+       struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
        switch (adt) {
        case IPSET_TEST:
@@ -188,10 +156,10 @@ id_eq(const struct ip_set *set, u32 i, ip_set_id_t id)
        if (i >= map->size)
                return 0;
 
-       e = list_set_elem(map, i);
+       e = list_set_elem(set, map, i);
        return !!(e->id == id &&
                 !(SET_WITH_TIMEOUT(set) &&
-                  ip_set_timeout_expired(ext_timeout(e, map))));
+                  ip_set_timeout_expired(ext_timeout(e, set))));
 }
 
 static int
@@ -199,28 +167,36 @@ list_set_add(struct ip_set *set, u32 i, struct set_adt_elem *d,
             const struct ip_set_ext *ext)
 {
        struct list_set *map = set->data;
-       struct set_elem *e = list_set_elem(map, i);
+       struct set_elem *e = list_set_elem(set, map, i);
 
        if (e->id != IPSET_INVALID_ID) {
-               if (i == map->size - 1)
+               if (i == map->size - 1) {
                        /* Last element replaced: e.g. add new,before,last */
-                       ip_set_put_byindex(e->id);
-               else {
-                       struct set_elem *x = list_set_elem(map, map->size - 1);
+                       ip_set_put_byindex(map->net, e->id);
+                       ip_set_ext_destroy(set, e);
+               } else {
+                       struct set_elem *x = list_set_elem(set, map,
+                                                          map->size - 1);
 
                        /* Last element pushed off */
-                       if (x->id != IPSET_INVALID_ID)
-                               ip_set_put_byindex(x->id);
-                       memmove(list_set_elem(map, i + 1), e,
-                               map->dsize * (map->size - (i + 1)));
+                       if (x->id != IPSET_INVALID_ID) {
+                               ip_set_put_byindex(map->net, x->id);
+                               ip_set_ext_destroy(set, x);
+                       }
+                       memmove(list_set_elem(set, map, i + 1), e,
+                               set->dsize * (map->size - (i + 1)));
+                       /* Extensions must be initialized to zero */
+                       memset(e, 0, set->dsize);
                }
        }
 
        e->id = d->id;
        if (SET_WITH_TIMEOUT(set))
-               ip_set_timeout_set(ext_timeout(e, map), ext->timeout);
+               ip_set_timeout_set(ext_timeout(e, set), ext->timeout);
        if (SET_WITH_COUNTER(set))
-               ip_set_init_counter(ext_counter(e, map), ext);
+               ip_set_init_counter(ext_counter(e, set), ext);
+       if (SET_WITH_COMMENT(set))
+               ip_set_init_comment(ext_comment(e, set), ext);
        return 0;
 }
 
@@ -228,16 +204,17 @@ static int
 list_set_del(struct ip_set *set, u32 i)
 {
        struct list_set *map = set->data;
-       struct set_elem *e = list_set_elem(map, i);
+       struct set_elem *e = list_set_elem(set, map, i);
 
-       ip_set_put_byindex(e->id);
+       ip_set_put_byindex(map->net, e->id);
+       ip_set_ext_destroy(set, e);
 
        if (i < map->size - 1)
-               memmove(e, list_set_elem(map, i + 1),
-                       map->dsize * (map->size - (i + 1)));
+               memmove(e, list_set_elem(set, map, i + 1),
+                       set->dsize * (map->size - (i + 1)));
 
        /* Last element */
-       e = list_set_elem(map, map->size - 1);
+       e = list_set_elem(set, map, map->size - 1);
        e->id = IPSET_INVALID_ID;
        return 0;
 }
@@ -247,13 +224,16 @@ set_cleanup_entries(struct ip_set *set)
 {
        struct list_set *map = set->data;
        struct set_elem *e;
-       u32 i;
+       u32 i = 0;
 
-       for (i = 0; i < map->size; i++) {
-               e = list_set_elem(map, i);
+       while (i < map->size) {
+               e = list_set_elem(set, map, i);
                if (e->id != IPSET_INVALID_ID &&
-                   ip_set_timeout_expired(ext_timeout(e, map)))
+                   ip_set_timeout_expired(ext_timeout(e, set)))
                        list_set_del(set, i);
+                       /* Check element moved to position i in next loop */
+               else
+                       i++;
        }
 }
 
@@ -268,11 +248,11 @@ list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
        int ret;
 
        for (i = 0; i < map->size; i++) {
-               e = list_set_elem(map, i);
+               e = list_set_elem(set, map, i);
                if (e->id == IPSET_INVALID_ID)
                        return 0;
                else if (SET_WITH_TIMEOUT(set) &&
-                        ip_set_timeout_expired(ext_timeout(e, map)))
+                        ip_set_timeout_expired(ext_timeout(e, set)))
                        continue;
                else if (e->id != d->id)
                        continue;
@@ -299,14 +279,14 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
        bool flag_exist = flags & IPSET_FLAG_EXIST;
        u32 i, ret = 0;
 
+       if (SET_WITH_TIMEOUT(set))
+               set_cleanup_entries(set);
+
        /* Check already added element */
        for (i = 0; i < map->size; i++) {
-               e = list_set_elem(map, i);
+               e = list_set_elem(set, map, i);
                if (e->id == IPSET_INVALID_ID)
                        goto insert;
-               else if (SET_WITH_TIMEOUT(set) &&
-                        ip_set_timeout_expired(ext_timeout(e, map)))
-                       continue;
                else if (e->id != d->id)
                        continue;
 
@@ -319,18 +299,22 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
                        /* Can't re-add */
                        return -IPSET_ERR_EXIST;
                /* Update extensions */
+               ip_set_ext_destroy(set, e);
+
                if (SET_WITH_TIMEOUT(set))
-                       ip_set_timeout_set(ext_timeout(e, map), ext->timeout);
+                       ip_set_timeout_set(ext_timeout(e, set), ext->timeout);
                if (SET_WITH_COUNTER(set))
-                       ip_set_init_counter(ext_counter(e, map), ext);
+                       ip_set_init_counter(ext_counter(e, set), ext);
+               if (SET_WITH_COMMENT(set))
+                       ip_set_init_comment(ext_comment(e, set), ext);
                /* Set is already added to the list */
-               ip_set_put_byindex(d->id);
+               ip_set_put_byindex(map->net, d->id);
                return 0;
        }
 insert:
        ret = -IPSET_ERR_LIST_FULL;
        for (i = 0; i < map->size && ret == -IPSET_ERR_LIST_FULL; i++) {
-               e = list_set_elem(map, i);
+               e = list_set_elem(set, map, i);
                if (e->id == IPSET_INVALID_ID)
                        ret = d->before != 0 ? -IPSET_ERR_REF_EXIST
                                : list_set_add(set, i, d, ext);
@@ -355,12 +339,12 @@ list_set_udel(struct ip_set *set, void *value, const struct ip_set_ext *ext,
        u32 i;
 
        for (i = 0; i < map->size; i++) {
-               e = list_set_elem(map, i);
+               e = list_set_elem(set, map, i);
                if (e->id == IPSET_INVALID_ID)
                        return d->before != 0 ? -IPSET_ERR_REF_EXIST
                                              : -IPSET_ERR_EXIST;
                else if (SET_WITH_TIMEOUT(set) &&
-                        ip_set_timeout_expired(ext_timeout(e, map)))
+                        ip_set_timeout_expired(ext_timeout(e, set)))
                        continue;
                else if (e->id != d->id)
                        continue;
@@ -386,7 +370,7 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
        struct list_set *map = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct set_adt_elem e = { .refid = IPSET_INVALID_ID };
-       struct ip_set_ext ext = IP_SET_INIT_UEXT(map);
+       struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        struct ip_set *s;
        int ret = 0;
 
@@ -403,7 +387,7 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
        ret = ip_set_get_extensions(set, tb, &ext);
        if (ret)
                return ret;
-       e.id = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAME]), &s);
+       e.id = ip_set_get_byname(map->net, nla_data(tb[IPSET_ATTR_NAME]), &s);
        if (e.id == IPSET_INVALID_ID)
                return -IPSET_ERR_NAME;
        /* "Loop detection" */
@@ -423,7 +407,8 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
        }
 
        if (tb[IPSET_ATTR_NAMEREF]) {
-               e.refid = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAMEREF]),
+               e.refid = ip_set_get_byname(map->net,
+                                           nla_data(tb[IPSET_ATTR_NAMEREF]),
                                            &s);
                if (e.refid == IPSET_INVALID_ID) {
                        ret = -IPSET_ERR_NAMEREF;
@@ -439,9 +424,9 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
 
 finish:
        if (e.refid != IPSET_INVALID_ID)
-               ip_set_put_byindex(e.refid);
+               ip_set_put_byindex(map->net, e.refid);
        if (adt != IPSET_ADD || ret)
-               ip_set_put_byindex(e.id);
+               ip_set_put_byindex(map->net, e.id);
 
        return ip_set_eexist(ret, flags) ? 0 : ret;
 }
@@ -454,9 +439,10 @@ list_set_flush(struct ip_set *set)
        u32 i;
 
        for (i = 0; i < map->size; i++) {
-               e = list_set_elem(map, i);
+               e = list_set_elem(set, map, i);
                if (e->id != IPSET_INVALID_ID) {
-                       ip_set_put_byindex(e->id);
+                       ip_set_put_byindex(map->net, e->id);
+                       ip_set_ext_destroy(set, e);
                        e->id = IPSET_INVALID_ID;
                }
        }
@@ -485,14 +471,11 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
        if (!nested)
                goto nla_put_failure;
        if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
-           (SET_WITH_TIMEOUT(set) &&
-            nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))) ||
-           (SET_WITH_COUNTER(set) &&
-            nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS,
-                          htonl(IPSET_FLAG_WITH_COUNTERS))) ||
            nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
            nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
-                         htonl(sizeof(*map) + map->size * map->dsize)))
+                         htonl(sizeof(*map) + map->size * set->dsize)))
+               goto nla_put_failure;
+       if (unlikely(ip_set_put_flags(skb, set)))
                goto nla_put_failure;
        ipset_nest_end(skb, nested);
 
@@ -515,11 +498,11 @@ list_set_list(const struct ip_set *set,
                return -EMSGSIZE;
        for (; cb->args[2] < map->size; cb->args[2]++) {
                i = cb->args[2];
-               e = list_set_elem(map, i);
+               e = list_set_elem(set, map, i);
                if (e->id == IPSET_INVALID_ID)
                        goto finish;
                if (SET_WITH_TIMEOUT(set) &&
-                   ip_set_timeout_expired(ext_timeout(e, map)))
+                   ip_set_timeout_expired(ext_timeout(e, set)))
                        continue;
                nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
                if (!nested) {
@@ -530,15 +513,9 @@ list_set_list(const struct ip_set *set,
                                goto nla_put_failure;
                }
                if (nla_put_string(skb, IPSET_ATTR_NAME,
-                                  ip_set_name_byindex(e->id)))
-                       goto nla_put_failure;
-               if (SET_WITH_TIMEOUT(set) &&
-                   nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
-                                 htonl(ip_set_timeout_get(
-                                               ext_timeout(e, map)))))
+                                  ip_set_name_byindex(map->net, e->id)))
                        goto nla_put_failure;
-               if (SET_WITH_COUNTER(set) &&
-                   ip_set_put_counter(skb, ext_counter(e, map)))
+               if (ip_set_put_extensions(skb, set, e, true))
                        goto nla_put_failure;
                ipset_nest_end(skb, nested);
        }
@@ -550,11 +527,11 @@ finish:
 
 nla_put_failure:
        nla_nest_cancel(skb, nested);
-       ipset_nest_end(skb, atd);
        if (unlikely(i == first)) {
                cb->args[2] = 0;
                return -EMSGSIZE;
        }
+       ipset_nest_end(skb, atd);
        return 0;
 }
 
@@ -565,7 +542,7 @@ list_set_same_set(const struct ip_set *a, const struct ip_set *b)
        const struct list_set *y = b->data;
 
        return x->size == y->size &&
-              x->timeout == y->timeout &&
+              a->timeout == b->timeout &&
               a->extensions == b->extensions;
 }
 
@@ -594,7 +571,7 @@ list_set_gc(unsigned long ul_set)
        set_cleanup_entries(set);
        write_unlock_bh(&set->lock);
 
-       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
        add_timer(&map->gc);
 }
 
@@ -606,43 +583,40 @@ list_set_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
        init_timer(&map->gc);
        map->gc.data = (unsigned long) set;
        map->gc.function = gc;
-       map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
+       map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
        add_timer(&map->gc);
 }
 
 /* Create list:set type of sets */
 
-static struct list_set *
-init_list_set(struct ip_set *set, u32 size, size_t dsize,
-             unsigned long timeout)
+static bool
+init_list_set(struct net *net, struct ip_set *set, u32 size)
 {
        struct list_set *map;
        struct set_elem *e;
        u32 i;
 
-       map = kzalloc(sizeof(*map) + size * dsize, GFP_KERNEL);
+       map = kzalloc(sizeof(*map) + size * set->dsize, GFP_KERNEL);
        if (!map)
-               return NULL;
+               return false;
 
        map->size = size;
-       map->dsize = dsize;
-       map->timeout = timeout;
+       map->net = net;
        set->data = map;
 
        for (i = 0; i < size; i++) {
-               e = list_set_elem(map, i);
+               e = list_set_elem(set, map, i);
                e->id = IPSET_INVALID_ID;
        }
 
-       return map;
+       return true;
 }
 
 static int
-list_set_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
+list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
+               u32 flags)
 {
-       struct list_set *map;
-       u32 size = IP_SET_LIST_DEFAULT_SIZE, cadt_flags = 0;
-       unsigned long timeout = 0;
+       u32 size = IP_SET_LIST_DEFAULT_SIZE;
 
        if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_SIZE) ||
                     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
@@ -654,45 +628,13 @@ list_set_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
        if (size < IP_SET_LIST_MIN_SIZE)
                size = IP_SET_LIST_MIN_SIZE;
 
-       if (tb[IPSET_ATTR_CADT_FLAGS])
-               cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
-       if (tb[IPSET_ATTR_TIMEOUT])
-               timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
        set->variant = &set_variant;
-       if (cadt_flags & IPSET_FLAG_WITH_COUNTERS) {
-               set->extensions |= IPSET_EXT_COUNTER;
-               if (tb[IPSET_ATTR_TIMEOUT]) {
-                       map = init_list_set(set, size,
-                                       sizeof(struct setct_elem), timeout);
-                       if (!map)
-                               return -ENOMEM;
-                       set->extensions |= IPSET_EXT_TIMEOUT;
-                       map->offset[IPSET_OFFSET_TIMEOUT] =
-                               offsetof(struct setct_elem, timeout);
-                       map->offset[IPSET_OFFSET_COUNTER] =
-                               offsetof(struct setct_elem, counter);
-                       list_set_gc_init(set, list_set_gc);
-               } else {
-                       map = init_list_set(set, size,
-                                           sizeof(struct setc_elem), 0);
-                       if (!map)
-                               return -ENOMEM;
-                       map->offset[IPSET_OFFSET_COUNTER] =
-                               offsetof(struct setc_elem, counter);
-               }
-       } else if (tb[IPSET_ATTR_TIMEOUT]) {
-               map = init_list_set(set, size,
-                                   sizeof(struct sett_elem), timeout);
-               if (!map)
-                       return -ENOMEM;
-               set->extensions |= IPSET_EXT_TIMEOUT;
-               map->offset[IPSET_OFFSET_TIMEOUT] =
-                       offsetof(struct sett_elem, timeout);
+       set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem));
+       if (!init_list_set(net, set, size))
+               return -ENOMEM;
+       if (tb[IPSET_ATTR_TIMEOUT]) {
+               set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
                list_set_gc_init(set, list_set_gc);
-       } else {
-               map = init_list_set(set, size, sizeof(struct set_elem), 0);
-               if (!map)
-                       return -ENOMEM;
        }
        return 0;
 }
@@ -703,8 +645,8 @@ static struct ip_set_type list_set_type __read_mostly = {
        .features       = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
        .dimension      = IPSET_DIM_ONE,
        .family         = NFPROTO_UNSPEC,
-       .revision_min   = REVISION_MIN,
-       .revision_max   = REVISION_MAX,
+       .revision_min   = IPSET_TYPE_REV_MIN,
+       .revision_max   = IPSET_TYPE_REV_MAX,
        .create         = list_set_create,
        .create_policy  = {
                [IPSET_ATTR_SIZE]       = { .type = NLA_U32 },
@@ -721,6 +663,7 @@ static struct ip_set_type list_set_type __read_mostly = {
                [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
                [IPSET_ATTR_BYTES]      = { .type = NLA_U64 },
                [IPSET_ATTR_PACKETS]    = { .type = NLA_U64 },
+               [IPSET_ATTR_COMMENT]    = { .type = NLA_NUL_STRING },
        },
        .me             = THIS_MODULE,
 };
index 74fd00c272100d5271533689abf074c5f9e8c454..34fda62f40f61bd210b93a89a570d1da07f2ede2 100644 (file)
@@ -1239,11 +1239,11 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
  *     Check if packet is reply for established ip_vs_conn.
  */
 static unsigned int
-ip_vs_reply4(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
             const struct net_device *in, const struct net_device *out,
             int (*okfn)(struct sk_buff *))
 {
-       return ip_vs_out(hooknum, skb, AF_INET);
+       return ip_vs_out(ops->hooknum, skb, AF_INET);
 }
 
 /*
@@ -1251,11 +1251,11 @@ ip_vs_reply4(unsigned int hooknum, struct sk_buff *skb,
  *     Check if packet is reply for established ip_vs_conn.
  */
 static unsigned int
-ip_vs_local_reply4(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_local_reply4(const struct nf_hook_ops *ops, struct sk_buff *skb,
                   const struct net_device *in, const struct net_device *out,
                   int (*okfn)(struct sk_buff *))
 {
-       return ip_vs_out(hooknum, skb, AF_INET);
+       return ip_vs_out(ops->hooknum, skb, AF_INET);
 }
 
 #ifdef CONFIG_IP_VS_IPV6
@@ -1266,11 +1266,11 @@ ip_vs_local_reply4(unsigned int hooknum, struct sk_buff *skb,
  *     Check if packet is reply for established ip_vs_conn.
  */
 static unsigned int
-ip_vs_reply6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
             const struct net_device *in, const struct net_device *out,
             int (*okfn)(struct sk_buff *))
 {
-       return ip_vs_out(hooknum, skb, AF_INET6);
+       return ip_vs_out(ops->hooknum, skb, AF_INET6);
 }
 
 /*
@@ -1278,11 +1278,11 @@ ip_vs_reply6(unsigned int hooknum, struct sk_buff *skb,
  *     Check if packet is reply for established ip_vs_conn.
  */
 static unsigned int
-ip_vs_local_reply6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_local_reply6(const struct nf_hook_ops *ops, struct sk_buff *skb,
                   const struct net_device *in, const struct net_device *out,
                   int (*okfn)(struct sk_buff *))
 {
-       return ip_vs_out(hooknum, skb, AF_INET6);
+       return ip_vs_out(ops->hooknum, skb, AF_INET6);
 }
 
 #endif
@@ -1733,12 +1733,12 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
  *     Schedule and forward packets from remote clients
  */
 static unsigned int
-ip_vs_remote_request4(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_remote_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
                      const struct net_device *in,
                      const struct net_device *out,
                      int (*okfn)(struct sk_buff *))
 {
-       return ip_vs_in(hooknum, skb, AF_INET);
+       return ip_vs_in(ops->hooknum, skb, AF_INET);
 }
 
 /*
@@ -1746,11 +1746,11 @@ ip_vs_remote_request4(unsigned int hooknum, struct sk_buff *skb,
  *     Schedule and forward packets from local clients
  */
 static unsigned int
-ip_vs_local_request4(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_local_request4(const struct nf_hook_ops *ops, struct sk_buff *skb,
                     const struct net_device *in, const struct net_device *out,
                     int (*okfn)(struct sk_buff *))
 {
-       return ip_vs_in(hooknum, skb, AF_INET);
+       return ip_vs_in(ops->hooknum, skb, AF_INET);
 }
 
 #ifdef CONFIG_IP_VS_IPV6
@@ -1760,7 +1760,7 @@ ip_vs_local_request4(unsigned int hooknum, struct sk_buff *skb,
  * Copy info from first fragment, to the rest of them.
  */
 static unsigned int
-ip_vs_preroute_frag6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_preroute_frag6(const struct nf_hook_ops *ops, struct sk_buff *skb,
                     const struct net_device *in,
                     const struct net_device *out,
                     int (*okfn)(struct sk_buff *))
@@ -1792,12 +1792,12 @@ ip_vs_preroute_frag6(unsigned int hooknum, struct sk_buff *skb,
  *     Schedule and forward packets from remote clients
  */
 static unsigned int
-ip_vs_remote_request6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_remote_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
                      const struct net_device *in,
                      const struct net_device *out,
                      int (*okfn)(struct sk_buff *))
 {
-       return ip_vs_in(hooknum, skb, AF_INET6);
+       return ip_vs_in(ops->hooknum, skb, AF_INET6);
 }
 
 /*
@@ -1805,11 +1805,11 @@ ip_vs_remote_request6(unsigned int hooknum, struct sk_buff *skb,
  *     Schedule and forward packets from local clients
  */
 static unsigned int
-ip_vs_local_request6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_local_request6(const struct nf_hook_ops *ops, struct sk_buff *skb,
                     const struct net_device *in, const struct net_device *out,
                     int (*okfn)(struct sk_buff *))
 {
-       return ip_vs_in(hooknum, skb, AF_INET6);
+       return ip_vs_in(ops->hooknum, skb, AF_INET6);
 }
 
 #endif
@@ -1825,7 +1825,7 @@ ip_vs_local_request6(unsigned int hooknum, struct sk_buff *skb,
  *      and send them to ip_vs_in_icmp.
  */
 static unsigned int
-ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_forward_icmp(const struct nf_hook_ops *ops, struct sk_buff *skb,
                   const struct net_device *in, const struct net_device *out,
                   int (*okfn)(struct sk_buff *))
 {
@@ -1842,12 +1842,12 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
        if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
                return NF_ACCEPT;
 
-       return ip_vs_in_icmp(skb, &r, hooknum);
+       return ip_vs_in_icmp(skb, &r, ops->hooknum);
 }
 
 #ifdef CONFIG_IP_VS_IPV6
 static unsigned int
-ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
+ip_vs_forward_icmp_v6(const struct nf_hook_ops *ops, struct sk_buff *skb,
                      const struct net_device *in, const struct net_device *out,
                      int (*okfn)(struct sk_buff *))
 {
@@ -1866,7 +1866,7 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
        if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))
                return NF_ACCEPT;
 
-       return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr);
+       return ip_vs_in_icmp_v6(skb, &r, ops->hooknum, &iphdr);
 }
 #endif
 
index bdebd03bc8cd448a75ca94db1c88ea461f72093a..70866d192efc9351f2fcafe3ef23ce0c131fc0ba 100644 (file)
@@ -778,8 +778,8 @@ static int callforward_do_filter(const union nf_inet_addr *src,
                                   flowi6_to_flowi(&fl1), false)) {
                        if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
                                           flowi6_to_flowi(&fl2), false)) {
-                               if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
-                                           sizeof(rt1->rt6i_gateway)) &&
+                               if (ipv6_addr_equal(rt6_nexthop(rt1),
+                                                   rt6_nexthop(rt2)) &&
                                    rt1->dst.dev == rt2->dst.dev)
                                        ret = 1;
                                dst_release(&rt2->dst);
index e0c4373b47478d4d72899d95166566db34a8bcfa..466410eaa482c2a3940d3768351e92be17a644ef 100644 (file)
@@ -52,66 +52,8 @@ module_param(sip_direct_media, int, 0600);
 MODULE_PARM_DESC(sip_direct_media, "Expect Media streams between signalling "
                                   "endpoints only (default 1)");
 
-unsigned int (*nf_nat_sip_hook)(struct sk_buff *skb, unsigned int protoff,
-                               unsigned int dataoff, const char **dptr,
-                               unsigned int *datalen) __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sip_hook);
-
-void (*nf_nat_sip_seq_adjust_hook)(struct sk_buff *skb, unsigned int protoff,
-                                  s16 off) __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sip_seq_adjust_hook);
-
-unsigned int (*nf_nat_sip_expect_hook)(struct sk_buff *skb,
-                                      unsigned int protoff,
-                                      unsigned int dataoff,
-                                      const char **dptr,
-                                      unsigned int *datalen,
-                                      struct nf_conntrack_expect *exp,
-                                      unsigned int matchoff,
-                                      unsigned int matchlen) __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sip_expect_hook);
-
-unsigned int (*nf_nat_sdp_addr_hook)(struct sk_buff *skb, unsigned int protoff,
-                                    unsigned int dataoff,
-                                    const char **dptr,
-                                    unsigned int *datalen,
-                                    unsigned int sdpoff,
-                                    enum sdp_header_types type,
-                                    enum sdp_header_types term,
-                                    const union nf_inet_addr *addr)
-                                    __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sdp_addr_hook);
-
-unsigned int (*nf_nat_sdp_port_hook)(struct sk_buff *skb, unsigned int protoff,
-                                    unsigned int dataoff,
-                                    const char **dptr,
-                                    unsigned int *datalen,
-                                    unsigned int matchoff,
-                                    unsigned int matchlen,
-                                    u_int16_t port) __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sdp_port_hook);
-
-unsigned int (*nf_nat_sdp_session_hook)(struct sk_buff *skb,
-                                       unsigned int protoff,
-                                       unsigned int dataoff,
-                                       const char **dptr,
-                                       unsigned int *datalen,
-                                       unsigned int sdpoff,
-                                       const union nf_inet_addr *addr)
-                                       __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sdp_session_hook);
-
-unsigned int (*nf_nat_sdp_media_hook)(struct sk_buff *skb, unsigned int protoff,
-                                     unsigned int dataoff,
-                                     const char **dptr,
-                                     unsigned int *datalen,
-                                     struct nf_conntrack_expect *rtp_exp,
-                                     struct nf_conntrack_expect *rtcp_exp,
-                                     unsigned int mediaoff,
-                                     unsigned int medialen,
-                                     union nf_inet_addr *rtp_addr)
-                                     __read_mostly;
-EXPORT_SYMBOL_GPL(nf_nat_sdp_media_hook);
+const struct nf_nat_sip_hooks *nf_nat_sip_hooks;
+EXPORT_SYMBOL_GPL(nf_nat_sip_hooks);
 
 static int string_len(const struct nf_conn *ct, const char *dptr,
                      const char *limit, int *shift)
@@ -914,8 +856,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
        int direct_rtp = 0, skip_expect = 0, ret = NF_DROP;
        u_int16_t base_port;
        __be16 rtp_port, rtcp_port;
-       typeof(nf_nat_sdp_port_hook) nf_nat_sdp_port;
-       typeof(nf_nat_sdp_media_hook) nf_nat_sdp_media;
+       const struct nf_nat_sip_hooks *hooks;
 
        saddr = NULL;
        if (sip_direct_media) {
@@ -966,22 +907,23 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
 #endif
                        skip_expect = 1;
        } while (!skip_expect);
-       rcu_read_unlock();
 
        base_port = ntohs(tuple.dst.u.udp.port) & ~1;
        rtp_port = htons(base_port);
        rtcp_port = htons(base_port + 1);
 
        if (direct_rtp) {
-               nf_nat_sdp_port = rcu_dereference(nf_nat_sdp_port_hook);
-               if (nf_nat_sdp_port &&
-                   !nf_nat_sdp_port(skb, protoff, dataoff, dptr, datalen,
+               hooks = rcu_dereference(nf_nat_sip_hooks);
+               if (hooks &&
+                   !hooks->sdp_port(skb, protoff, dataoff, dptr, datalen,
                                     mediaoff, medialen, ntohs(rtp_port)))
                        goto err1;
        }
 
-       if (skip_expect)
+       if (skip_expect) {
+               rcu_read_unlock();
                return NF_ACCEPT;
+       }
 
        rtp_exp = nf_ct_expect_alloc(ct);
        if (rtp_exp == NULL)
@@ -995,10 +937,10 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
        nf_ct_expect_init(rtcp_exp, class, nf_ct_l3num(ct), saddr, daddr,
                          IPPROTO_UDP, NULL, &rtcp_port);
 
-       nf_nat_sdp_media = rcu_dereference(nf_nat_sdp_media_hook);
-       if (nf_nat_sdp_media && ct->status & IPS_NAT_MASK && !direct_rtp)
-               ret = nf_nat_sdp_media(skb, protoff, dataoff, dptr, datalen,
-                                      rtp_exp, rtcp_exp,
+       hooks = rcu_dereference(nf_nat_sip_hooks);
+       if (hooks && ct->status & IPS_NAT_MASK && !direct_rtp)
+               ret = hooks->sdp_media(skb, protoff, dataoff, dptr,
+                                      datalen, rtp_exp, rtcp_exp,
                                       mediaoff, medialen, daddr);
        else {
                if (nf_ct_expect_related(rtp_exp) == 0) {
@@ -1012,6 +954,7 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
 err2:
        nf_ct_expect_put(rtp_exp);
 err1:
+       rcu_read_unlock();
        return ret;
 }
 
@@ -1051,13 +994,12 @@ static int process_sdp(struct sk_buff *skb, unsigned int protoff,
        unsigned int caddr_len, maddr_len;
        unsigned int i;
        union nf_inet_addr caddr, maddr, rtp_addr;
+       const struct nf_nat_sip_hooks *hooks;
        unsigned int port;
        const struct sdp_media_type *t;
        int ret = NF_ACCEPT;
-       typeof(nf_nat_sdp_addr_hook) nf_nat_sdp_addr;
-       typeof(nf_nat_sdp_session_hook) nf_nat_sdp_session;
 
-       nf_nat_sdp_addr = rcu_dereference(nf_nat_sdp_addr_hook);
+       hooks = rcu_dereference(nf_nat_sip_hooks);
 
        /* Find beginning of session description */
        if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen,
@@ -1125,10 +1067,11 @@ static int process_sdp(struct sk_buff *skb, unsigned int protoff,
                }
 
                /* Update media connection address if present */
-               if (maddr_len && nf_nat_sdp_addr && ct->status & IPS_NAT_MASK) {
-                       ret = nf_nat_sdp_addr(skb, protoff, dataoff,
+               if (maddr_len && hooks && ct->status & IPS_NAT_MASK) {
+                       ret = hooks->sdp_addr(skb, protoff, dataoff,
                                              dptr, datalen, mediaoff,
-                                             SDP_HDR_CONNECTION, SDP_HDR_MEDIA,
+                                             SDP_HDR_CONNECTION,
+                                             SDP_HDR_MEDIA,
                                              &rtp_addr);
                        if (ret != NF_ACCEPT) {
                                nf_ct_helper_log(skb, ct, "cannot mangle SDP");
@@ -1139,10 +1082,11 @@ static int process_sdp(struct sk_buff *skb, unsigned int protoff,
        }
 
        /* Update session connection and owner addresses */
-       nf_nat_sdp_session = rcu_dereference(nf_nat_sdp_session_hook);
-       if (nf_nat_sdp_session && ct->status & IPS_NAT_MASK)
-               ret = nf_nat_sdp_session(skb, protoff, dataoff,
-                                        dptr, datalen, sdpoff, &rtp_addr);
+       hooks = rcu_dereference(nf_nat_sip_hooks);
+       if (hooks && ct->status & IPS_NAT_MASK)
+               ret = hooks->sdp_session(skb, protoff, dataoff,
+                                        dptr, datalen, sdpoff,
+                                        &rtp_addr);
 
        return ret;
 }
@@ -1242,11 +1186,11 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
        unsigned int matchoff, matchlen;
        struct nf_conntrack_expect *exp;
        union nf_inet_addr *saddr, daddr;
+       const struct nf_nat_sip_hooks *hooks;
        __be16 port;
        u8 proto;
        unsigned int expires = 0;
        int ret;
-       typeof(nf_nat_sip_expect_hook) nf_nat_sip_expect;
 
        /* Expected connections can not register again. */
        if (ct->status & IPS_EXPECTED)
@@ -1309,10 +1253,10 @@ static int process_register_request(struct sk_buff *skb, unsigned int protoff,
        exp->helper = nfct_help(ct)->helper;
        exp->flags = NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE;
 
-       nf_nat_sip_expect = rcu_dereference(nf_nat_sip_expect_hook);
-       if (nf_nat_sip_expect && ct->status & IPS_NAT_MASK)
-               ret = nf_nat_sip_expect(skb, protoff, dataoff, dptr, datalen,
-                                       exp, matchoff, matchlen);
+       hooks = rcu_dereference(nf_nat_sip_hooks);
+       if (hooks && ct->status & IPS_NAT_MASK)
+               ret = hooks->expect(skb, protoff, dataoff, dptr, datalen,
+                                   exp, matchoff, matchlen);
        else {
                if (nf_ct_expect_related(exp) != 0) {
                        nf_ct_helper_log(skb, ct, "cannot add expectation");
@@ -1515,7 +1459,7 @@ static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct,
                           unsigned int protoff, unsigned int dataoff,
                           const char **dptr, unsigned int *datalen)
 {
-       typeof(nf_nat_sip_hook) nf_nat_sip;
+       const struct nf_nat_sip_hooks *hooks;
        int ret;
 
        if (strnicmp(*dptr, "SIP/2.0 ", strlen("SIP/2.0 ")) != 0)
@@ -1524,9 +1468,9 @@ static int process_sip_msg(struct sk_buff *skb, struct nf_conn *ct,
                ret = process_sip_response(skb, protoff, dataoff, dptr, datalen);
 
        if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
-               nf_nat_sip = rcu_dereference(nf_nat_sip_hook);
-               if (nf_nat_sip && !nf_nat_sip(skb, protoff, dataoff,
-                                             dptr, datalen)) {
+               hooks = rcu_dereference(nf_nat_sip_hooks);
+               if (hooks && !hooks->msg(skb, protoff, dataoff,
+                                        dptr, datalen)) {
                        nf_ct_helper_log(skb, ct, "cannot NAT SIP message");
                        ret = NF_DROP;
                }
@@ -1546,7 +1490,6 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
        s16 diff, tdiff = 0;
        int ret = NF_ACCEPT;
        bool term;
-       typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
 
        if (ctinfo != IP_CT_ESTABLISHED &&
            ctinfo != IP_CT_ESTABLISHED_REPLY)
@@ -1610,9 +1553,11 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
        }
 
        if (ret == NF_ACCEPT && ct->status & IPS_NAT_MASK) {
-               nf_nat_sip_seq_adjust = rcu_dereference(nf_nat_sip_seq_adjust_hook);
-               if (nf_nat_sip_seq_adjust)
-                       nf_nat_sip_seq_adjust(skb, protoff, tdiff);
+               const struct nf_nat_sip_hooks *hooks;
+
+               hooks = rcu_dereference(nf_nat_sip_hooks);
+               if (hooks)
+                       hooks->seq_adjust(skb, protoff, tdiff);
        }
 
        return ret;
index 3deec997be89e32770750abc504922761d096935..61a3c927e63cf1c9f0f9b596155e5c8a74bcd71a 100644 (file)
 
 
 /* core.c */
-extern unsigned int nf_iterate(struct list_head *head,
-                               struct sk_buff *skb,
-                               unsigned int hook,
-                               const struct net_device *indev,
-                               const struct net_device *outdev,
-                               struct nf_hook_ops **elemp,
-                               int (*okfn)(struct sk_buff *),
-                               int hook_thresh);
+unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb,
+                       unsigned int hook, const struct net_device *indev,
+                       const struct net_device *outdev,
+                       struct nf_hook_ops **elemp,
+                       int (*okfn)(struct sk_buff *), int hook_thresh);
 
 /* nf_queue.c */
-extern int nf_queue(struct sk_buff *skb,
-                   struct nf_hook_ops *elem,
-                   u_int8_t pf, unsigned int hook,
-                   struct net_device *indev,
-                   struct net_device *outdev,
-                   int (*okfn)(struct sk_buff *),
-                   unsigned int queuenum);
-extern int __init netfilter_queue_init(void);
+int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem, u_int8_t pf,
+            unsigned int hook, struct net_device *indev,
+            struct net_device *outdev, int (*okfn)(struct sk_buff *),
+            unsigned int queuenum);
+int __init netfilter_queue_init(void);
 
 /* nf_log.c */
-extern int __init netfilter_log_init(void);
+int __init netfilter_log_init(void);
 
 #endif
index 6f0f4f7f68a5f4a7f85c03b09a35b456bedc33a7..63a81540221169fcc6115e8c36cc84500beb809d 100644 (file)
@@ -432,6 +432,26 @@ nf_nat_setup_info(struct nf_conn *ct,
 }
 EXPORT_SYMBOL(nf_nat_setup_info);
 
+unsigned int
+nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
+{
+       /* Force range to this IP; let proto decide mapping for
+        * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
+        * Use reply in case it's already been mangled (eg local packet).
+        */
+       union nf_inet_addr ip =
+               (HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
+               ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
+               ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
+       struct nf_nat_range range = {
+               .flags          = NF_NAT_RANGE_MAP_IPS,
+               .min_addr       = ip,
+               .max_addr       = ip,
+       };
+       return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
+}
+EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
+
 /* Do packet manipulations according to nf_nat_setup_info. */
 unsigned int nf_nat_packet(struct nf_conn *ct,
                           enum ip_conntrack_info ctinfo,
index f9790405b7fff5ead6004bf4266fe4e96abbc6cb..b4d691db955ed451954381e01baeed57ca588d11 100644 (file)
@@ -625,33 +625,26 @@ static struct nf_ct_helper_expectfn sip_nat = {
 
 static void __exit nf_nat_sip_fini(void)
 {
-       RCU_INIT_POINTER(nf_nat_sip_hook, NULL);
-       RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, NULL);
-       RCU_INIT_POINTER(nf_nat_sip_expect_hook, NULL);
-       RCU_INIT_POINTER(nf_nat_sdp_addr_hook, NULL);
-       RCU_INIT_POINTER(nf_nat_sdp_port_hook, NULL);
-       RCU_INIT_POINTER(nf_nat_sdp_session_hook, NULL);
-       RCU_INIT_POINTER(nf_nat_sdp_media_hook, NULL);
+       RCU_INIT_POINTER(nf_nat_sip_hooks, NULL);
+
        nf_ct_helper_expectfn_unregister(&sip_nat);
        synchronize_rcu();
 }
 
+static const struct nf_nat_sip_hooks sip_hooks = {
+       .msg            = nf_nat_sip,
+       .seq_adjust     = nf_nat_sip_seq_adjust,
+       .expect         = nf_nat_sip_expect,
+       .sdp_addr       = nf_nat_sdp_addr,
+       .sdp_port       = nf_nat_sdp_port,
+       .sdp_session    = nf_nat_sdp_session,
+       .sdp_media      = nf_nat_sdp_media,
+};
+
 static int __init nf_nat_sip_init(void)
 {
-       BUG_ON(nf_nat_sip_hook != NULL);
-       BUG_ON(nf_nat_sip_seq_adjust_hook != NULL);
-       BUG_ON(nf_nat_sip_expect_hook != NULL);
-       BUG_ON(nf_nat_sdp_addr_hook != NULL);
-       BUG_ON(nf_nat_sdp_port_hook != NULL);
-       BUG_ON(nf_nat_sdp_session_hook != NULL);
-       BUG_ON(nf_nat_sdp_media_hook != NULL);
-       RCU_INIT_POINTER(nf_nat_sip_hook, nf_nat_sip);
-       RCU_INIT_POINTER(nf_nat_sip_seq_adjust_hook, nf_nat_sip_seq_adjust);
-       RCU_INIT_POINTER(nf_nat_sip_expect_hook, nf_nat_sip_expect);
-       RCU_INIT_POINTER(nf_nat_sdp_addr_hook, nf_nat_sdp_addr);
-       RCU_INIT_POINTER(nf_nat_sdp_port_hook, nf_nat_sdp_port);
-       RCU_INIT_POINTER(nf_nat_sdp_session_hook, nf_nat_sdp_session);
-       RCU_INIT_POINTER(nf_nat_sdp_media_hook, nf_nat_sdp_media);
+       BUG_ON(nf_nat_sip_hooks != NULL);
+       RCU_INIT_POINTER(nf_nat_sip_hooks, &sip_hooks);
        nf_ct_helper_expectfn_register(&sip_nat);
        return 0;
 }
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
new file mode 100644 (file)
index 0000000..dcddc49
--- /dev/null
@@ -0,0 +1,3275 @@
+/*
+ * Copyright (c) 2007-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+
+static LIST_HEAD(nf_tables_expressions);
+
+/**
+ *     nft_register_afinfo - register nf_tables address family info
+ *
+ *     @afi: address family info to register
+ *
+ *     Register the address family for use with nf_tables. Returns zero on
+ *     success or a negative errno code otherwise.
+ */
+int nft_register_afinfo(struct net *net, struct nft_af_info *afi)
+{
+       INIT_LIST_HEAD(&afi->tables);
+       nfnl_lock(NFNL_SUBSYS_NFTABLES);
+       list_add_tail(&afi->list, &net->nft.af_info);
+       nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nft_register_afinfo);
+
+/**
+ *     nft_unregister_afinfo - unregister nf_tables address family info
+ *
+ *     @afi: address family info to unregister
+ *
+ *     Unregister the address family for use with nf_tables.
+ */
+void nft_unregister_afinfo(struct nft_af_info *afi)
+{
+       nfnl_lock(NFNL_SUBSYS_NFTABLES);
+       list_del(&afi->list);
+       nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_unregister_afinfo);
+
+static struct nft_af_info *nft_afinfo_lookup(struct net *net, int family)
+{
+       struct nft_af_info *afi;
+
+       list_for_each_entry(afi, &net->nft.af_info, list) {
+               if (afi->family == family)
+                       return afi;
+       }
+       return NULL;
+}
+
+static struct nft_af_info *
+nf_tables_afinfo_lookup(struct net *net, int family, bool autoload)
+{
+       struct nft_af_info *afi;
+
+       afi = nft_afinfo_lookup(net, family);
+       if (afi != NULL)
+               return afi;
+#ifdef CONFIG_MODULES
+       if (autoload) {
+               nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+               request_module("nft-afinfo-%u", family);
+               nfnl_lock(NFNL_SUBSYS_NFTABLES);
+               afi = nft_afinfo_lookup(net, family);
+               if (afi != NULL)
+                       return ERR_PTR(-EAGAIN);
+       }
+#endif
+       return ERR_PTR(-EAFNOSUPPORT);
+}
+
+/*
+ * Tables
+ */
+
+static struct nft_table *nft_table_lookup(const struct nft_af_info *afi,
+                                         const struct nlattr *nla)
+{
+       struct nft_table *table;
+
+       list_for_each_entry(table, &afi->tables, list) {
+               if (!nla_strcmp(nla, table->name))
+                       return table;
+       }
+       return NULL;
+}
+
+static struct nft_table *nf_tables_table_lookup(const struct nft_af_info *afi,
+                                               const struct nlattr *nla)
+{
+       struct nft_table *table;
+
+       if (nla == NULL)
+               return ERR_PTR(-EINVAL);
+
+       table = nft_table_lookup(afi, nla);
+       if (table != NULL)
+               return table;
+
+       return ERR_PTR(-ENOENT);
+}
+
+static inline u64 nf_tables_alloc_handle(struct nft_table *table)
+{
+       return ++table->hgenerator;
+}
+
+static struct nf_chain_type *chain_type[AF_MAX][NFT_CHAIN_T_MAX];
+
+static int __nf_tables_chain_type_lookup(int family, const struct nlattr *nla)
+{
+       int i;
+
+       for (i=0; i<NFT_CHAIN_T_MAX; i++) {
+               if (chain_type[family][i] != NULL &&
+                   !nla_strcmp(nla, chain_type[family][i]->name))
+                       return i;
+       }
+       return -1;
+}
+
+static int nf_tables_chain_type_lookup(const struct nft_af_info *afi,
+                                      const struct nlattr *nla,
+                                      bool autoload)
+{
+       int type;
+
+       type = __nf_tables_chain_type_lookup(afi->family, nla);
+#ifdef CONFIG_MODULES
+       if (type < 0 && autoload) {
+               nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+               request_module("nft-chain-%u-%*.s", afi->family,
+                              nla_len(nla)-1, (const char *)nla_data(nla));
+               nfnl_lock(NFNL_SUBSYS_NFTABLES);
+               type = __nf_tables_chain_type_lookup(afi->family, nla);
+       }
+#endif
+       return type;
+}
+
+static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
+       [NFTA_TABLE_NAME]       = { .type = NLA_STRING },
+       [NFTA_TABLE_FLAGS]      = { .type = NLA_U32 },
+};
+
+static int nf_tables_fill_table_info(struct sk_buff *skb, u32 portid, u32 seq,
+                                    int event, u32 flags, int family,
+                                    const struct nft_table *table)
+{
+       struct nlmsghdr *nlh;
+       struct nfgenmsg *nfmsg;
+
+       event |= NFNL_SUBSYS_NFTABLES << 8;
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
+       if (nlh == NULL)
+               goto nla_put_failure;
+
+       nfmsg = nlmsg_data(nlh);
+       nfmsg->nfgen_family     = family;
+       nfmsg->version          = NFNETLINK_V0;
+       nfmsg->res_id           = 0;
+
+       if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) ||
+           nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)))
+               goto nla_put_failure;
+
+       return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+       nlmsg_trim(skb, nlh);
+       return -1;
+}
+
+static int nf_tables_table_notify(const struct sk_buff *oskb,
+                                 const struct nlmsghdr *nlh,
+                                 const struct nft_table *table,
+                                 int event, int family)
+{
+       struct sk_buff *skb;
+       u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
+       u32 seq = nlh ? nlh->nlmsg_seq : 0;
+       struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
+       bool report;
+       int err;
+
+       report = nlh ? nlmsg_report(nlh) : false;
+       if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+               return 0;
+
+       err = -ENOBUFS;
+       skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (skb == NULL)
+               goto err;
+
+       err = nf_tables_fill_table_info(skb, portid, seq, event, 0,
+                                       family, table);
+       if (err < 0) {
+               kfree_skb(skb);
+               goto err;
+       }
+
+       err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report,
+                            GFP_KERNEL);
+err:
+       if (err < 0)
+               nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
+       return err;
+}
+
+static int nf_tables_dump_tables(struct sk_buff *skb,
+                                struct netlink_callback *cb)
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+       const struct nft_af_info *afi;
+       const struct nft_table *table;
+       unsigned int idx = 0, s_idx = cb->args[0];
+       struct net *net = sock_net(skb->sk);
+       int family = nfmsg->nfgen_family;
+
+       list_for_each_entry(afi, &net->nft.af_info, list) {
+               if (family != NFPROTO_UNSPEC && family != afi->family)
+                       continue;
+
+               list_for_each_entry(table, &afi->tables, list) {
+                       if (idx < s_idx)
+                               goto cont;
+                       if (idx > s_idx)
+                               memset(&cb->args[1], 0,
+                                      sizeof(cb->args) - sizeof(cb->args[0]));
+                       if (nf_tables_fill_table_info(skb,
+                                                     NETLINK_CB(cb->skb).portid,
+                                                     cb->nlh->nlmsg_seq,
+                                                     NFT_MSG_NEWTABLE,
+                                                     NLM_F_MULTI,
+                                                     afi->family, table) < 0)
+                               goto done;
+cont:
+                       idx++;
+               }
+       }
+done:
+       cb->args[0] = idx;
+       return skb->len;
+}
+
+static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb,
+                             const struct nlmsghdr *nlh,
+                             const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nft_af_info *afi;
+       const struct nft_table *table;
+       struct sk_buff *skb2;
+       struct net *net = sock_net(skb->sk);
+       int family = nfmsg->nfgen_family;
+       int err;
+
+       if (nlh->nlmsg_flags & NLM_F_DUMP) {
+               struct netlink_dump_control c = {
+                       .dump = nf_tables_dump_tables,
+               };
+               return netlink_dump_start(nlsk, skb, nlh, &c);
+       }
+
+       afi = nf_tables_afinfo_lookup(net, family, false);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (!skb2)
+               return -ENOMEM;
+
+       err = nf_tables_fill_table_info(skb2, NETLINK_CB(skb).portid,
+                                       nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0,
+                                       family, table);
+       if (err < 0)
+               goto err;
+
+       return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+
+err:
+       kfree_skb(skb2);
+       return err;
+}
+
+static int nf_tables_table_enable(struct nft_table *table)
+{
+       struct nft_chain *chain;
+       int err, i = 0;
+
+       list_for_each_entry(chain, &table->chains, list) {
+               err = nf_register_hook(&nft_base_chain(chain)->ops);
+               if (err < 0)
+                       goto err;
+
+               i++;
+       }
+       return 0;
+err:
+       list_for_each_entry(chain, &table->chains, list) {
+               if (i-- <= 0)
+                       break;
+
+               nf_unregister_hook(&nft_base_chain(chain)->ops);
+       }
+       return err;
+}
+
+static int nf_tables_table_disable(struct nft_table *table)
+{
+       struct nft_chain *chain;
+
+       list_for_each_entry(chain, &table->chains, list)
+               nf_unregister_hook(&nft_base_chain(chain)->ops);
+
+       return 0;
+}
+
+static int nf_tables_updtable(struct sock *nlsk, struct sk_buff *skb,
+                             const struct nlmsghdr *nlh,
+                             const struct nlattr * const nla[],
+                             struct nft_af_info *afi, struct nft_table *table)
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       int family = nfmsg->nfgen_family, ret = 0;
+
+       if (nla[NFTA_TABLE_FLAGS]) {
+               __be32 flags;
+
+               flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS]));
+               if (flags & ~NFT_TABLE_F_DORMANT)
+                       return -EINVAL;
+
+               if ((flags & NFT_TABLE_F_DORMANT) &&
+                   !(table->flags & NFT_TABLE_F_DORMANT)) {
+                       ret = nf_tables_table_disable(table);
+                       if (ret >= 0)
+                               table->flags |= NFT_TABLE_F_DORMANT;
+               } else if (!(flags & NFT_TABLE_F_DORMANT) &&
+                          table->flags & NFT_TABLE_F_DORMANT) {
+                       ret = nf_tables_table_enable(table);
+                       if (ret >= 0)
+                               table->flags &= ~NFT_TABLE_F_DORMANT;
+               }
+               if (ret < 0)
+                       goto err;
+       }
+
+       nf_tables_table_notify(skb, nlh, table, NFT_MSG_NEWTABLE, family);
+err:
+       return ret;
+}
+
+static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
+                             const struct nlmsghdr *nlh,
+                             const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nlattr *name;
+       struct nft_af_info *afi;
+       struct nft_table *table;
+       struct net *net = sock_net(skb->sk);
+       int family = nfmsg->nfgen_family;
+
+       afi = nf_tables_afinfo_lookup(net, family, true);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       name = nla[NFTA_TABLE_NAME];
+       table = nf_tables_table_lookup(afi, name);
+       if (IS_ERR(table)) {
+               if (PTR_ERR(table) != -ENOENT)
+                       return PTR_ERR(table);
+               table = NULL;
+       }
+
+       if (table != NULL) {
+               if (nlh->nlmsg_flags & NLM_F_EXCL)
+                       return -EEXIST;
+               if (nlh->nlmsg_flags & NLM_F_REPLACE)
+                       return -EOPNOTSUPP;
+               return nf_tables_updtable(nlsk, skb, nlh, nla, afi, table);
+       }
+
+       table = kzalloc(sizeof(*table) + nla_len(name), GFP_KERNEL);
+       if (table == NULL)
+               return -ENOMEM;
+
+       nla_strlcpy(table->name, name, nla_len(name));
+       INIT_LIST_HEAD(&table->chains);
+       INIT_LIST_HEAD(&table->sets);
+
+       if (nla[NFTA_TABLE_FLAGS]) {
+               __be32 flags;
+
+               flags = ntohl(nla_get_be32(nla[NFTA_TABLE_FLAGS]));
+               if (flags & ~NFT_TABLE_F_DORMANT) {
+                       kfree(table);
+                       return -EINVAL;
+               }
+
+               table->flags |= flags;
+       }
+
+       list_add_tail(&table->list, &afi->tables);
+       nf_tables_table_notify(skb, nlh, table, NFT_MSG_NEWTABLE, family);
+       return 0;
+}
+
+static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
+                             const struct nlmsghdr *nlh,
+                             const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       struct nft_af_info *afi;
+       struct nft_table *table;
+       struct net *net = sock_net(skb->sk);
+       int family = nfmsg->nfgen_family;
+
+       afi = nf_tables_afinfo_lookup(net, family, false);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       table = nf_tables_table_lookup(afi, nla[NFTA_TABLE_NAME]);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       if (table->use)
+               return -EBUSY;
+
+       list_del(&table->list);
+       nf_tables_table_notify(skb, nlh, table, NFT_MSG_DELTABLE, family);
+       kfree(table);
+       return 0;
+}
+
+int nft_register_chain_type(struct nf_chain_type *ctype)
+{
+       int err = 0;
+
+       nfnl_lock(NFNL_SUBSYS_NFTABLES);
+       if (chain_type[ctype->family][ctype->type] != NULL) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       if (!try_module_get(ctype->me))
+               goto out;
+
+       chain_type[ctype->family][ctype->type] = ctype;
+out:
+       nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+       return err;
+}
+EXPORT_SYMBOL_GPL(nft_register_chain_type);
+
+void nft_unregister_chain_type(struct nf_chain_type *ctype)
+{
+       nfnl_lock(NFNL_SUBSYS_NFTABLES);
+       chain_type[ctype->family][ctype->type] = NULL;
+       module_put(ctype->me);
+       nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_unregister_chain_type);
+
+/*
+ * Chains
+ */
+
+static struct nft_chain *
+nf_tables_chain_lookup_byhandle(const struct nft_table *table, u64 handle)
+{
+       struct nft_chain *chain;
+
+       list_for_each_entry(chain, &table->chains, list) {
+               if (chain->handle == handle)
+                       return chain;
+       }
+
+       return ERR_PTR(-ENOENT);
+}
+
+static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table,
+                                               const struct nlattr *nla)
+{
+       struct nft_chain *chain;
+
+       if (nla == NULL)
+               return ERR_PTR(-EINVAL);
+
+       list_for_each_entry(chain, &table->chains, list) {
+               if (!nla_strcmp(nla, chain->name))
+                       return chain;
+       }
+
+       return ERR_PTR(-ENOENT);
+}
+
+static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
+       [NFTA_CHAIN_TABLE]      = { .type = NLA_STRING },
+       [NFTA_CHAIN_HANDLE]     = { .type = NLA_U64 },
+       [NFTA_CHAIN_NAME]       = { .type = NLA_STRING,
+                                   .len = NFT_CHAIN_MAXNAMELEN - 1 },
+       [NFTA_CHAIN_HOOK]       = { .type = NLA_NESTED },
+       [NFTA_CHAIN_POLICY]     = { .type = NLA_U32 },
+       [NFTA_CHAIN_TYPE]       = { .type = NLA_NUL_STRING },
+       [NFTA_CHAIN_COUNTERS]   = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy nft_hook_policy[NFTA_HOOK_MAX + 1] = {
+       [NFTA_HOOK_HOOKNUM]     = { .type = NLA_U32 },
+       [NFTA_HOOK_PRIORITY]    = { .type = NLA_U32 },
+};
+
+static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
+{
+       struct nft_stats *cpu_stats, total;
+       struct nlattr *nest;
+       int cpu;
+
+       memset(&total, 0, sizeof(total));
+       for_each_possible_cpu(cpu) {
+               cpu_stats = per_cpu_ptr(stats, cpu);
+               total.pkts += cpu_stats->pkts;
+               total.bytes += cpu_stats->bytes;
+       }
+       nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS);
+       if (nest == NULL)
+               goto nla_put_failure;
+
+       if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.pkts)) ||
+           nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)))
+               goto nla_put_failure;
+
+       nla_nest_end(skb, nest);
+       return 0;
+
+nla_put_failure:
+       return -ENOSPC;
+}
+
+static int nf_tables_fill_chain_info(struct sk_buff *skb, u32 portid, u32 seq,
+                                    int event, u32 flags, int family,
+                                    const struct nft_table *table,
+                                    const struct nft_chain *chain)
+{
+       struct nlmsghdr *nlh;
+       struct nfgenmsg *nfmsg;
+
+       event |= NFNL_SUBSYS_NFTABLES << 8;
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), flags);
+       if (nlh == NULL)
+               goto nla_put_failure;
+
+       nfmsg = nlmsg_data(nlh);
+       nfmsg->nfgen_family     = family;
+       nfmsg->version          = NFNETLINK_V0;
+       nfmsg->res_id           = 0;
+
+       if (nla_put_string(skb, NFTA_CHAIN_TABLE, table->name))
+               goto nla_put_failure;
+       if (nla_put_be64(skb, NFTA_CHAIN_HANDLE, cpu_to_be64(chain->handle)))
+               goto nla_put_failure;
+       if (nla_put_string(skb, NFTA_CHAIN_NAME, chain->name))
+               goto nla_put_failure;
+
+       if (chain->flags & NFT_BASE_CHAIN) {
+               const struct nft_base_chain *basechain = nft_base_chain(chain);
+               const struct nf_hook_ops *ops = &basechain->ops;
+               struct nlattr *nest;
+
+               nest = nla_nest_start(skb, NFTA_CHAIN_HOOK);
+               if (nest == NULL)
+                       goto nla_put_failure;
+               if (nla_put_be32(skb, NFTA_HOOK_HOOKNUM, htonl(ops->hooknum)))
+                       goto nla_put_failure;
+               if (nla_put_be32(skb, NFTA_HOOK_PRIORITY, htonl(ops->priority)))
+                       goto nla_put_failure;
+               nla_nest_end(skb, nest);
+
+               if (nla_put_be32(skb, NFTA_CHAIN_POLICY,
+                                htonl(basechain->policy)))
+                       goto nla_put_failure;
+
+               if (nla_put_string(skb, NFTA_CHAIN_TYPE,
+                       chain_type[ops->pf][nft_base_chain(chain)->type]->name))
+                               goto nla_put_failure;
+
+               if (nft_dump_stats(skb, nft_base_chain(chain)->stats))
+                       goto nla_put_failure;
+       }
+
+       if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use)))
+               goto nla_put_failure;
+
+       return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+       nlmsg_trim(skb, nlh);
+       return -1;
+}
+
+static int nf_tables_chain_notify(const struct sk_buff *oskb,
+                                 const struct nlmsghdr *nlh,
+                                 const struct nft_table *table,
+                                 const struct nft_chain *chain,
+                                 int event, int family)
+{
+       struct sk_buff *skb;
+       u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
+       struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
+       u32 seq = nlh ? nlh->nlmsg_seq : 0;
+       bool report;
+       int err;
+
+       report = nlh ? nlmsg_report(nlh) : false;
+       if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+               return 0;
+
+       err = -ENOBUFS;
+       skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (skb == NULL)
+               goto err;
+
+       err = nf_tables_fill_chain_info(skb, portid, seq, event, 0, family,
+                                       table, chain);
+       if (err < 0) {
+               kfree_skb(skb);
+               goto err;
+       }
+
+       err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report,
+                            GFP_KERNEL);
+err:
+       if (err < 0)
+               nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
+       return err;
+}
+
+static int nf_tables_dump_chains(struct sk_buff *skb,
+                                struct netlink_callback *cb)
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+       const struct nft_af_info *afi;
+       const struct nft_table *table;
+       const struct nft_chain *chain;
+       unsigned int idx = 0, s_idx = cb->args[0];
+       struct net *net = sock_net(skb->sk);
+       int family = nfmsg->nfgen_family;
+
+       list_for_each_entry(afi, &net->nft.af_info, list) {
+               if (family != NFPROTO_UNSPEC && family != afi->family)
+                       continue;
+
+               list_for_each_entry(table, &afi->tables, list) {
+                       list_for_each_entry(chain, &table->chains, list) {
+                               if (idx < s_idx)
+                                       goto cont;
+                               if (idx > s_idx)
+                                       memset(&cb->args[1], 0,
+                                              sizeof(cb->args) - sizeof(cb->args[0]));
+                               if (nf_tables_fill_chain_info(skb, NETLINK_CB(cb->skb).portid,
+                                                             cb->nlh->nlmsg_seq,
+                                                             NFT_MSG_NEWCHAIN,
+                                                             NLM_F_MULTI,
+                                                             afi->family, table, chain) < 0)
+                                       goto done;
+cont:
+                               idx++;
+                       }
+               }
+       }
+done:
+       cb->args[0] = idx;
+       return skb->len;
+}
+
+
+static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
+                             const struct nlmsghdr *nlh,
+                             const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nft_af_info *afi;
+       const struct nft_table *table;
+       const struct nft_chain *chain;
+       struct sk_buff *skb2;
+       struct net *net = sock_net(skb->sk);
+       int family = nfmsg->nfgen_family;
+       int err;
+
+       if (nlh->nlmsg_flags & NLM_F_DUMP) {
+               struct netlink_dump_control c = {
+                       .dump = nf_tables_dump_chains,
+               };
+               return netlink_dump_start(nlsk, skb, nlh, &c);
+       }
+
+       afi = nf_tables_afinfo_lookup(net, family, false);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]);
+       if (IS_ERR(chain))
+               return PTR_ERR(chain);
+
+       skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (!skb2)
+               return -ENOMEM;
+
+       err = nf_tables_fill_chain_info(skb2, NETLINK_CB(skb).portid,
+                                       nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0,
+                                       family, table, chain);
+       if (err < 0)
+               goto err;
+
+       return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+
+err:
+       kfree_skb(skb2);
+       return err;
+}
+
+static int
+nf_tables_chain_policy(struct nft_base_chain *chain, const struct nlattr *attr)
+{
+       switch (ntohl(nla_get_be32(attr))) {
+       case NF_DROP:
+               chain->policy = NF_DROP;
+               break;
+       case NF_ACCEPT:
+               chain->policy = NF_ACCEPT;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static const struct nla_policy nft_counter_policy[NFTA_COUNTER_MAX + 1] = {
+       [NFTA_COUNTER_PACKETS]  = { .type = NLA_U64 },
+       [NFTA_COUNTER_BYTES]    = { .type = NLA_U64 },
+};
+
+static int
+nf_tables_counters(struct nft_base_chain *chain, const struct nlattr *attr)
+{
+       struct nlattr *tb[NFTA_COUNTER_MAX+1];
+       struct nft_stats __percpu *newstats;
+       struct nft_stats *stats;
+       int err;
+
+       err = nla_parse_nested(tb, NFTA_COUNTER_MAX, attr, nft_counter_policy);
+       if (err < 0)
+               return err;
+
+       if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
+               return -EINVAL;
+
+       newstats = alloc_percpu(struct nft_stats);
+       if (newstats == NULL)
+               return -ENOMEM;
+
+       /* Restore old counters on this cpu, no problem. Per-cpu statistics
+        * are not exposed to userspace.
+        */
+       stats = this_cpu_ptr(newstats);
+       stats->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+       stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
+
+       if (chain->stats) {
+               /* nfnl_lock is held, add some nfnl function for this, later */
+               struct nft_stats __percpu *oldstats =
+                       rcu_dereference_protected(chain->stats, 1);
+
+               rcu_assign_pointer(chain->stats, newstats);
+               synchronize_rcu();
+               free_percpu(oldstats);
+       } else
+               rcu_assign_pointer(chain->stats, newstats);
+
+       return 0;
+}
+
+static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
+                             const struct nlmsghdr *nlh,
+                             const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nlattr * uninitialized_var(name);
+       const struct nft_af_info *afi;
+       struct nft_table *table;
+       struct nft_chain *chain;
+       struct nft_base_chain *basechain = NULL;
+       struct nlattr *ha[NFTA_HOOK_MAX + 1];
+       struct net *net = sock_net(skb->sk);
+       int family = nfmsg->nfgen_family;
+       u64 handle = 0;
+       int err;
+       bool create;
+
+       create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
+
+       afi = nf_tables_afinfo_lookup(net, family, true);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       if (table->use == UINT_MAX)
+               return -EOVERFLOW;
+
+       chain = NULL;
+       name = nla[NFTA_CHAIN_NAME];
+
+       if (nla[NFTA_CHAIN_HANDLE]) {
+               handle = be64_to_cpu(nla_get_be64(nla[NFTA_CHAIN_HANDLE]));
+               chain = nf_tables_chain_lookup_byhandle(table, handle);
+               if (IS_ERR(chain))
+                       return PTR_ERR(chain);
+       } else {
+               chain = nf_tables_chain_lookup(table, name);
+               if (IS_ERR(chain)) {
+                       if (PTR_ERR(chain) != -ENOENT)
+                               return PTR_ERR(chain);
+                       chain = NULL;
+               }
+       }
+
+       if (chain != NULL) {
+               if (nlh->nlmsg_flags & NLM_F_EXCL)
+                       return -EEXIST;
+               if (nlh->nlmsg_flags & NLM_F_REPLACE)
+                       return -EOPNOTSUPP;
+
+               if (nla[NFTA_CHAIN_HANDLE] && name &&
+                   !IS_ERR(nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME])))
+                       return -EEXIST;
+
+               if (nla[NFTA_CHAIN_POLICY]) {
+                       if (!(chain->flags & NFT_BASE_CHAIN))
+                               return -EOPNOTSUPP;
+
+                       err = nf_tables_chain_policy(nft_base_chain(chain),
+                                                    nla[NFTA_CHAIN_POLICY]);
+                       if (err < 0)
+                               return err;
+               }
+
+               if (nla[NFTA_CHAIN_COUNTERS]) {
+                       if (!(chain->flags & NFT_BASE_CHAIN))
+                               return -EOPNOTSUPP;
+
+                       err = nf_tables_counters(nft_base_chain(chain),
+                                                nla[NFTA_CHAIN_COUNTERS]);
+                       if (err < 0)
+                               return err;
+               }
+
+               if (nla[NFTA_CHAIN_HANDLE] && name)
+                       nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN);
+
+               goto notify;
+       }
+
+       if (nla[NFTA_CHAIN_HOOK]) {
+               struct nf_hook_ops *ops;
+               nf_hookfn *hookfn;
+               u32 hooknum;
+               int type = NFT_CHAIN_T_DEFAULT;
+
+               if (nla[NFTA_CHAIN_TYPE]) {
+                       type = nf_tables_chain_type_lookup(afi,
+                                                          nla[NFTA_CHAIN_TYPE],
+                                                          create);
+                       if (type < 0)
+                               return -ENOENT;
+               }
+
+               err = nla_parse_nested(ha, NFTA_HOOK_MAX, nla[NFTA_CHAIN_HOOK],
+                                      nft_hook_policy);
+               if (err < 0)
+                       return err;
+               if (ha[NFTA_HOOK_HOOKNUM] == NULL ||
+                   ha[NFTA_HOOK_PRIORITY] == NULL)
+                       return -EINVAL;
+
+               hooknum = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
+               if (hooknum >= afi->nhooks)
+                       return -EINVAL;
+
+               hookfn = chain_type[family][type]->fn[hooknum];
+               if (hookfn == NULL)
+                       return -EOPNOTSUPP;
+
+               basechain = kzalloc(sizeof(*basechain), GFP_KERNEL);
+               if (basechain == NULL)
+                       return -ENOMEM;
+
+               basechain->type = type;
+               chain = &basechain->chain;
+
+               ops = &basechain->ops;
+               ops->pf         = family;
+               ops->owner      = afi->owner;
+               ops->hooknum    = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
+               ops->priority   = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
+               ops->priv       = chain;
+               ops->hook       = hookfn;
+               if (afi->hooks[ops->hooknum])
+                       ops->hook = afi->hooks[ops->hooknum];
+
+               chain->flags |= NFT_BASE_CHAIN;
+
+               if (nla[NFTA_CHAIN_POLICY]) {
+                       err = nf_tables_chain_policy(basechain,
+                                                    nla[NFTA_CHAIN_POLICY]);
+                       if (err < 0) {
+                               free_percpu(basechain->stats);
+                               kfree(basechain);
+                               return err;
+                       }
+               } else
+                       basechain->policy = NF_ACCEPT;
+
+               if (nla[NFTA_CHAIN_COUNTERS]) {
+                       err = nf_tables_counters(basechain,
+                                                nla[NFTA_CHAIN_COUNTERS]);
+                       if (err < 0) {
+                               free_percpu(basechain->stats);
+                               kfree(basechain);
+                               return err;
+                       }
+               } else {
+                       struct nft_stats __percpu *newstats;
+
+                       newstats = alloc_percpu(struct nft_stats);
+                       if (newstats == NULL)
+                               return -ENOMEM;
+
+                       rcu_assign_pointer(nft_base_chain(chain)->stats,
+                                          newstats);
+               }
+       } else {
+               chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+               if (chain == NULL)
+                       return -ENOMEM;
+       }
+
+       INIT_LIST_HEAD(&chain->rules);
+       chain->handle = nf_tables_alloc_handle(table);
+       chain->net = net;
+       chain->table = table;
+       nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN);
+
+       if (!(table->flags & NFT_TABLE_F_DORMANT) &&
+           chain->flags & NFT_BASE_CHAIN) {
+               err = nf_register_hook(&nft_base_chain(chain)->ops);
+               if (err < 0) {
+                       free_percpu(basechain->stats);
+                       kfree(basechain);
+                       return err;
+               }
+       }
+       list_add_tail(&chain->list, &table->chains);
+       table->use++;
+notify:
+       nf_tables_chain_notify(skb, nlh, table, chain, NFT_MSG_NEWCHAIN,
+                              family);
+       return 0;
+}
+
+static void nf_tables_rcu_chain_destroy(struct rcu_head *head)
+{
+       struct nft_chain *chain = container_of(head, struct nft_chain, rcu_head);
+
+       BUG_ON(chain->use > 0);
+
+       if (chain->flags & NFT_BASE_CHAIN) {
+               free_percpu(nft_base_chain(chain)->stats);
+               kfree(nft_base_chain(chain));
+       } else
+               kfree(chain);
+}
+
+static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
+                             const struct nlmsghdr *nlh,
+                             const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nft_af_info *afi;
+       struct nft_table *table;
+       struct nft_chain *chain;
+       struct net *net = sock_net(skb->sk);
+       int family = nfmsg->nfgen_family;
+
+       afi = nf_tables_afinfo_lookup(net, family, false);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       table = nf_tables_table_lookup(afi, nla[NFTA_CHAIN_TABLE]);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       chain = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME]);
+       if (IS_ERR(chain))
+               return PTR_ERR(chain);
+
+       if (!list_empty(&chain->rules))
+               return -EBUSY;
+
+       list_del(&chain->list);
+       table->use--;
+
+       if (!(table->flags & NFT_TABLE_F_DORMANT) &&
+           chain->flags & NFT_BASE_CHAIN)
+               nf_unregister_hook(&nft_base_chain(chain)->ops);
+
+       nf_tables_chain_notify(skb, nlh, table, chain, NFT_MSG_DELCHAIN,
+                              family);
+
+       /* Make sure all rule references are gone before this is released */
+       call_rcu(&chain->rcu_head, nf_tables_rcu_chain_destroy);
+       return 0;
+}
+
+static void nft_ctx_init(struct nft_ctx *ctx,
+                        const struct sk_buff *skb,
+                        const struct nlmsghdr *nlh,
+                        const struct nft_af_info *afi,
+                        const struct nft_table *table,
+                        const struct nft_chain *chain,
+                        const struct nlattr * const *nla)
+{
+       ctx->net   = sock_net(skb->sk);
+       ctx->skb   = skb;
+       ctx->nlh   = nlh;
+       ctx->afi   = afi;
+       ctx->table = table;
+       ctx->chain = chain;
+       ctx->nla   = nla;
+}
+
+/*
+ * Expressions
+ */
+
+/**
+ *     nft_register_expr - register nf_tables expr type
+ *     @ops: expr type
+ *
+ *     Registers the expr type for use with nf_tables. Returns zero on
+ *     success or a negative errno code otherwise.
+ */
+int nft_register_expr(struct nft_expr_type *type)
+{
+       nfnl_lock(NFNL_SUBSYS_NFTABLES);
+       list_add_tail(&type->list, &nf_tables_expressions);
+       nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nft_register_expr);
+
+/**
+ *     nft_unregister_expr - unregister nf_tables expr type
+ *     @ops: expr type
+ *
+ *     Unregisters the expr typefor use with nf_tables.
+ */
+void nft_unregister_expr(struct nft_expr_type *type)
+{
+       nfnl_lock(NFNL_SUBSYS_NFTABLES);
+       list_del(&type->list);
+       nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_unregister_expr);
+
+static const struct nft_expr_type *__nft_expr_type_get(struct nlattr *nla)
+{
+       const struct nft_expr_type *type;
+
+       list_for_each_entry(type, &nf_tables_expressions, list) {
+               if (!nla_strcmp(nla, type->name))
+                       return type;
+       }
+       return NULL;
+}
+
+static const struct nft_expr_type *nft_expr_type_get(struct nlattr *nla)
+{
+       const struct nft_expr_type *type;
+
+       if (nla == NULL)
+               return ERR_PTR(-EINVAL);
+
+       type = __nft_expr_type_get(nla);
+       if (type != NULL && try_module_get(type->owner))
+               return type;
+
+#ifdef CONFIG_MODULES
+       if (type == NULL) {
+               nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+               request_module("nft-expr-%.*s",
+                              nla_len(nla), (char *)nla_data(nla));
+               nfnl_lock(NFNL_SUBSYS_NFTABLES);
+               if (__nft_expr_type_get(nla))
+                       return ERR_PTR(-EAGAIN);
+       }
+#endif
+       return ERR_PTR(-ENOENT);
+}
+
+static const struct nla_policy nft_expr_policy[NFTA_EXPR_MAX + 1] = {
+       [NFTA_EXPR_NAME]        = { .type = NLA_STRING },
+       [NFTA_EXPR_DATA]        = { .type = NLA_NESTED },
+};
+
+static int nf_tables_fill_expr_info(struct sk_buff *skb,
+                                   const struct nft_expr *expr)
+{
+       if (nla_put_string(skb, NFTA_EXPR_NAME, expr->ops->type->name))
+               goto nla_put_failure;
+
+       if (expr->ops->dump) {
+               struct nlattr *data = nla_nest_start(skb, NFTA_EXPR_DATA);
+               if (data == NULL)
+                       goto nla_put_failure;
+               if (expr->ops->dump(skb, expr) < 0)
+                       goto nla_put_failure;
+               nla_nest_end(skb, data);
+       }
+
+       return skb->len;
+
+nla_put_failure:
+       return -1;
+};
+
+struct nft_expr_info {
+       const struct nft_expr_ops       *ops;
+       struct nlattr                   *tb[NFT_EXPR_MAXATTR + 1];
+};
+
+static int nf_tables_expr_parse(const struct nft_ctx *ctx,
+                               const struct nlattr *nla,
+                               struct nft_expr_info *info)
+{
+       const struct nft_expr_type *type;
+       const struct nft_expr_ops *ops;
+       struct nlattr *tb[NFTA_EXPR_MAX + 1];
+       int err;
+
+       err = nla_parse_nested(tb, NFTA_EXPR_MAX, nla, nft_expr_policy);
+       if (err < 0)
+               return err;
+
+       type = nft_expr_type_get(tb[NFTA_EXPR_NAME]);
+       if (IS_ERR(type))
+               return PTR_ERR(type);
+
+       if (tb[NFTA_EXPR_DATA]) {
+               err = nla_parse_nested(info->tb, type->maxattr,
+                                      tb[NFTA_EXPR_DATA], type->policy);
+               if (err < 0)
+                       goto err1;
+       } else
+               memset(info->tb, 0, sizeof(info->tb[0]) * (type->maxattr + 1));
+
+       if (type->select_ops != NULL) {
+               ops = type->select_ops(ctx,
+                                      (const struct nlattr * const *)info->tb);
+               if (IS_ERR(ops)) {
+                       err = PTR_ERR(ops);
+                       goto err1;
+               }
+       } else
+               ops = type->ops;
+
+       info->ops = ops;
+       return 0;
+
+err1:
+       module_put(type->owner);
+       return err;
+}
+
+static int nf_tables_newexpr(const struct nft_ctx *ctx,
+                            const struct nft_expr_info *info,
+                            struct nft_expr *expr)
+{
+       const struct nft_expr_ops *ops = info->ops;
+       int err;
+
+       expr->ops = ops;
+       if (ops->init) {
+               err = ops->init(ctx, expr, (const struct nlattr **)info->tb);
+               if (err < 0)
+                       goto err1;
+       }
+
+       return 0;
+
+err1:
+       expr->ops = NULL;
+       return err;
+}
+
+static void nf_tables_expr_destroy(struct nft_expr *expr)
+{
+       if (expr->ops->destroy)
+               expr->ops->destroy(expr);
+       module_put(expr->ops->type->owner);
+}
+
+/*
+ * Rules
+ */
+
+static struct nft_rule *__nf_tables_rule_lookup(const struct nft_chain *chain,
+                                               u64 handle)
+{
+       struct nft_rule *rule;
+
+       // FIXME: this sucks
+       list_for_each_entry(rule, &chain->rules, list) {
+               if (handle == rule->handle)
+                       return rule;
+       }
+
+       return ERR_PTR(-ENOENT);
+}
+
+static struct nft_rule *nf_tables_rule_lookup(const struct nft_chain *chain,
+                                             const struct nlattr *nla)
+{
+       if (nla == NULL)
+               return ERR_PTR(-EINVAL);
+
+       return __nf_tables_rule_lookup(chain, be64_to_cpu(nla_get_be64(nla)));
+}
+
+static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
+       [NFTA_RULE_TABLE]       = { .type = NLA_STRING },
+       [NFTA_RULE_CHAIN]       = { .type = NLA_STRING,
+                                   .len = NFT_CHAIN_MAXNAMELEN - 1 },
+       [NFTA_RULE_HANDLE]      = { .type = NLA_U64 },
+       [NFTA_RULE_EXPRESSIONS] = { .type = NLA_NESTED },
+       [NFTA_RULE_COMPAT]      = { .type = NLA_NESTED },
+       [NFTA_RULE_POSITION]    = { .type = NLA_U64 },
+};
+
+static int nf_tables_fill_rule_info(struct sk_buff *skb, u32 portid, u32 seq,
+                                   int event, u32 flags, int family,
+                                   const struct nft_table *table,
+                                   const struct nft_chain *chain,
+                                   const struct nft_rule *rule)
+{
+       struct nlmsghdr *nlh;
+       struct nfgenmsg *nfmsg;
+       const struct nft_expr *expr, *next;
+       struct nlattr *list;
+       const struct nft_rule *prule;
+       int type = event | NFNL_SUBSYS_NFTABLES << 8;
+
+       nlh = nlmsg_put(skb, portid, seq, type, sizeof(struct nfgenmsg),
+                       flags);
+       if (nlh == NULL)
+               goto nla_put_failure;
+
+       nfmsg = nlmsg_data(nlh);
+       nfmsg->nfgen_family     = family;
+       nfmsg->version          = NFNETLINK_V0;
+       nfmsg->res_id           = 0;
+
+       if (nla_put_string(skb, NFTA_RULE_TABLE, table->name))
+               goto nla_put_failure;
+       if (nla_put_string(skb, NFTA_RULE_CHAIN, chain->name))
+               goto nla_put_failure;
+       if (nla_put_be64(skb, NFTA_RULE_HANDLE, cpu_to_be64(rule->handle)))
+               goto nla_put_failure;
+
+       if ((event != NFT_MSG_DELRULE) && (rule->list.prev != &chain->rules)) {
+               prule = list_entry(rule->list.prev, struct nft_rule, list);
+               if (nla_put_be64(skb, NFTA_RULE_POSITION,
+                                cpu_to_be64(prule->handle)))
+                       goto nla_put_failure;
+       }
+
+       list = nla_nest_start(skb, NFTA_RULE_EXPRESSIONS);
+       if (list == NULL)
+               goto nla_put_failure;
+       nft_rule_for_each_expr(expr, next, rule) {
+               struct nlattr *elem = nla_nest_start(skb, NFTA_LIST_ELEM);
+               if (elem == NULL)
+                       goto nla_put_failure;
+               if (nf_tables_fill_expr_info(skb, expr) < 0)
+                       goto nla_put_failure;
+               nla_nest_end(skb, elem);
+       }
+       nla_nest_end(skb, list);
+
+       return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+       nlmsg_trim(skb, nlh);
+       return -1;
+}
+
+static int nf_tables_rule_notify(const struct sk_buff *oskb,
+                                const struct nlmsghdr *nlh,
+                                const struct nft_table *table,
+                                const struct nft_chain *chain,
+                                const struct nft_rule *rule,
+                                int event, u32 flags, int family)
+{
+       struct sk_buff *skb;
+       u32 portid = NETLINK_CB(oskb).portid;
+       struct net *net = oskb ? sock_net(oskb->sk) : &init_net;
+       u32 seq = nlh->nlmsg_seq;
+       bool report;
+       int err;
+
+       report = nlmsg_report(nlh);
+       if (!report && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+               return 0;
+
+       err = -ENOBUFS;
+       skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (skb == NULL)
+               goto err;
+
+       err = nf_tables_fill_rule_info(skb, portid, seq, event, flags,
+                                      family, table, chain, rule);
+       if (err < 0) {
+               kfree_skb(skb);
+               goto err;
+       }
+
+       err = nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report,
+                            GFP_KERNEL);
+err:
+       if (err < 0)
+               nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, err);
+       return err;
+}
+
+static inline bool
+nft_rule_is_active(struct net *net, const struct nft_rule *rule)
+{
+       return (rule->genmask & (1 << net->nft.gencursor)) == 0;
+}
+
+static inline int gencursor_next(struct net *net)
+{
+       return net->nft.gencursor+1 == 1 ? 1 : 0;
+}
+
+static inline int
+nft_rule_is_active_next(struct net *net, const struct nft_rule *rule)
+{
+       return (rule->genmask & (1 << gencursor_next(net))) == 0;
+}
+
+static inline void
+nft_rule_activate_next(struct net *net, struct nft_rule *rule)
+{
+       /* Now inactive, will be active in the future */
+       rule->genmask = (1 << net->nft.gencursor);
+}
+
+static inline void
+nft_rule_disactivate_next(struct net *net, struct nft_rule *rule)
+{
+       rule->genmask = (1 << gencursor_next(net));
+}
+
+static inline void nft_rule_clear(struct net *net, struct nft_rule *rule)
+{
+       rule->genmask = 0;
+}
+
+static int nf_tables_dump_rules(struct sk_buff *skb,
+                               struct netlink_callback *cb)
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+       const struct nft_af_info *afi;
+       const struct nft_table *table;
+       const struct nft_chain *chain;
+       const struct nft_rule *rule;
+       unsigned int idx = 0, s_idx = cb->args[0];
+       struct net *net = sock_net(skb->sk);
+       int family = nfmsg->nfgen_family;
+       u8 genctr = ACCESS_ONCE(net->nft.genctr);
+       u8 gencursor = ACCESS_ONCE(net->nft.gencursor);
+
+       list_for_each_entry(afi, &net->nft.af_info, list) {
+               if (family != NFPROTO_UNSPEC && family != afi->family)
+                       continue;
+
+               list_for_each_entry(table, &afi->tables, list) {
+                       list_for_each_entry(chain, &table->chains, list) {
+                               list_for_each_entry(rule, &chain->rules, list) {
+                                       if (!nft_rule_is_active(net, rule))
+                                               goto cont;
+                                       if (idx < s_idx)
+                                               goto cont;
+                                       if (idx > s_idx)
+                                               memset(&cb->args[1], 0,
+                                                      sizeof(cb->args) - sizeof(cb->args[0]));
+                                       if (nf_tables_fill_rule_info(skb, NETLINK_CB(cb->skb).portid,
+                                                                     cb->nlh->nlmsg_seq,
+                                                                     NFT_MSG_NEWRULE,
+                                                                     NLM_F_MULTI | NLM_F_APPEND,
+                                                                     afi->family, table, chain, rule) < 0)
+                                               goto done;
+cont:
+                                       idx++;
+                               }
+                       }
+               }
+       }
+done:
+       /* Invalidate this dump, a transition to the new generation happened */
+       if (gencursor != net->nft.gencursor || genctr != net->nft.genctr)
+               return -EBUSY;
+
+       cb->args[0] = idx;
+       return skb->len;
+}
+
+static int nf_tables_getrule(struct sock *nlsk, struct sk_buff *skb,
+                            const struct nlmsghdr *nlh,
+                            const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nft_af_info *afi;
+       const struct nft_table *table;
+       const struct nft_chain *chain;
+       const struct nft_rule *rule;
+       struct sk_buff *skb2;
+       struct net *net = sock_net(skb->sk);
+       int family = nfmsg->nfgen_family;
+       int err;
+
+       if (nlh->nlmsg_flags & NLM_F_DUMP) {
+               struct netlink_dump_control c = {
+                       .dump = nf_tables_dump_rules,
+               };
+               return netlink_dump_start(nlsk, skb, nlh, &c);
+       }
+
+       afi = nf_tables_afinfo_lookup(net, family, false);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
+       if (IS_ERR(chain))
+               return PTR_ERR(chain);
+
+       rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
+       if (IS_ERR(rule))
+               return PTR_ERR(rule);
+
+       skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (!skb2)
+               return -ENOMEM;
+
+       err = nf_tables_fill_rule_info(skb2, NETLINK_CB(skb).portid,
+                                      nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0,
+                                      family, table, chain, rule);
+       if (err < 0)
+               goto err;
+
+       return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+
+err:
+       kfree_skb(skb2);
+       return err;
+}
+
+static void nf_tables_rcu_rule_destroy(struct rcu_head *head)
+{
+       struct nft_rule *rule = container_of(head, struct nft_rule, rcu_head);
+       struct nft_expr *expr;
+
+       /*
+        * Careful: some expressions might not be initialized in case this
+        * is called on error from nf_tables_newrule().
+        */
+       expr = nft_expr_first(rule);
+       while (expr->ops && expr != nft_expr_last(rule)) {
+               nf_tables_expr_destroy(expr);
+               expr = nft_expr_next(expr);
+       }
+       kfree(rule);
+}
+
+static void nf_tables_rule_destroy(struct nft_rule *rule)
+{
+       call_rcu(&rule->rcu_head, nf_tables_rcu_rule_destroy);
+}
+
+#define NFT_RULE_MAXEXPRS      128
+
+static struct nft_expr_info *info;
+
+static struct nft_rule_trans *
+nf_tables_trans_add(struct nft_rule *rule, const struct nft_ctx *ctx)
+{
+       struct nft_rule_trans *rupd;
+
+       rupd = kmalloc(sizeof(struct nft_rule_trans), GFP_KERNEL);
+       if (rupd == NULL)
+              return NULL;
+
+       rupd->chain = ctx->chain;
+       rupd->table = ctx->table;
+       rupd->rule = rule;
+       rupd->family = ctx->afi->family;
+       rupd->nlh = ctx->nlh;
+       list_add_tail(&rupd->list, &ctx->net->nft.commit_list);
+
+       return rupd;
+}
+
+static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
+                            const struct nlmsghdr *nlh,
+                            const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nft_af_info *afi;
+       struct net *net = sock_net(skb->sk);
+       struct nft_table *table;
+       struct nft_chain *chain;
+       struct nft_rule *rule, *old_rule = NULL;
+       struct nft_rule_trans *repl = NULL;
+       struct nft_expr *expr;
+       struct nft_ctx ctx;
+       struct nlattr *tmp;
+       unsigned int size, i, n;
+       int err, rem;
+       bool create;
+       u64 handle, pos_handle;
+
+       create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
+
+       afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
+       if (IS_ERR(chain))
+               return PTR_ERR(chain);
+
+       if (nla[NFTA_RULE_HANDLE]) {
+               handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE]));
+               rule = __nf_tables_rule_lookup(chain, handle);
+               if (IS_ERR(rule))
+                       return PTR_ERR(rule);
+
+               if (nlh->nlmsg_flags & NLM_F_EXCL)
+                       return -EEXIST;
+               if (nlh->nlmsg_flags & NLM_F_REPLACE)
+                       old_rule = rule;
+               else
+                       return -EOPNOTSUPP;
+       } else {
+               if (!create || nlh->nlmsg_flags & NLM_F_REPLACE)
+                       return -EINVAL;
+               handle = nf_tables_alloc_handle(table);
+       }
+
+       if (nla[NFTA_RULE_POSITION]) {
+               if (!(nlh->nlmsg_flags & NLM_F_CREATE))
+                       return -EOPNOTSUPP;
+
+               pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
+               old_rule = __nf_tables_rule_lookup(chain, pos_handle);
+               if (IS_ERR(old_rule))
+                       return PTR_ERR(old_rule);
+       }
+
+       nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
+
+       n = 0;
+       size = 0;
+       if (nla[NFTA_RULE_EXPRESSIONS]) {
+               nla_for_each_nested(tmp, nla[NFTA_RULE_EXPRESSIONS], rem) {
+                       err = -EINVAL;
+                       if (nla_type(tmp) != NFTA_LIST_ELEM)
+                               goto err1;
+                       if (n == NFT_RULE_MAXEXPRS)
+                               goto err1;
+                       err = nf_tables_expr_parse(&ctx, tmp, &info[n]);
+                       if (err < 0)
+                               goto err1;
+                       size += info[n].ops->size;
+                       n++;
+               }
+       }
+
+       err = -ENOMEM;
+       rule = kzalloc(sizeof(*rule) + size, GFP_KERNEL);
+       if (rule == NULL)
+               goto err1;
+
+       nft_rule_activate_next(net, rule);
+
+       rule->handle = handle;
+       rule->dlen   = size;
+
+       expr = nft_expr_first(rule);
+       for (i = 0; i < n; i++) {
+               err = nf_tables_newexpr(&ctx, &info[i], expr);
+               if (err < 0)
+                       goto err2;
+               info[i].ops = NULL;
+               expr = nft_expr_next(expr);
+       }
+
+       if (nlh->nlmsg_flags & NLM_F_REPLACE) {
+               if (nft_rule_is_active_next(net, old_rule)) {
+                       repl = nf_tables_trans_add(old_rule, &ctx);
+                       if (repl == NULL) {
+                               err = -ENOMEM;
+                               goto err2;
+                       }
+                       nft_rule_disactivate_next(net, old_rule);
+                       list_add_tail(&rule->list, &old_rule->list);
+               } else {
+                       err = -ENOENT;
+                       goto err2;
+               }
+       } else if (nlh->nlmsg_flags & NLM_F_APPEND)
+               if (old_rule)
+                       list_add_rcu(&rule->list, &old_rule->list);
+               else
+                       list_add_tail_rcu(&rule->list, &chain->rules);
+       else {
+               if (old_rule)
+                       list_add_tail_rcu(&rule->list, &old_rule->list);
+               else
+                       list_add_rcu(&rule->list, &chain->rules);
+       }
+
+       if (nf_tables_trans_add(rule, &ctx) == NULL) {
+               err = -ENOMEM;
+               goto err3;
+       }
+       return 0;
+
+err3:
+       list_del_rcu(&rule->list);
+       if (repl) {
+               list_del_rcu(&repl->rule->list);
+               list_del(&repl->list);
+               nft_rule_clear(net, repl->rule);
+               kfree(repl);
+       }
+err2:
+       nf_tables_rule_destroy(rule);
+err1:
+       for (i = 0; i < n; i++) {
+               if (info[i].ops != NULL)
+                       module_put(info[i].ops->type->owner);
+       }
+       return err;
+}
+
+static int
+nf_tables_delrule_one(struct nft_ctx *ctx, struct nft_rule *rule)
+{
+       /* You cannot delete the same rule twice */
+       if (nft_rule_is_active_next(ctx->net, rule)) {
+               if (nf_tables_trans_add(rule, ctx) == NULL)
+                       return -ENOMEM;
+               nft_rule_disactivate_next(ctx->net, rule);
+               return 0;
+       }
+       return -ENOENT;
+}
+
+static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
+                            const struct nlmsghdr *nlh,
+                            const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nft_af_info *afi;
+       struct net *net = sock_net(skb->sk);
+       const struct nft_table *table;
+       struct nft_chain *chain;
+       struct nft_rule *rule, *tmp;
+       int family = nfmsg->nfgen_family, err = 0;
+       struct nft_ctx ctx;
+
+       afi = nf_tables_afinfo_lookup(net, family, false);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       table = nf_tables_table_lookup(afi, nla[NFTA_RULE_TABLE]);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
+       if (IS_ERR(chain))
+               return PTR_ERR(chain);
+
+       nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
+
+       if (nla[NFTA_RULE_HANDLE]) {
+               rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
+               if (IS_ERR(rule))
+                       return PTR_ERR(rule);
+
+               err = nf_tables_delrule_one(&ctx, rule);
+       } else {
+               /* Remove all rules in this chain */
+               list_for_each_entry_safe(rule, tmp, &chain->rules, list) {
+                       err = nf_tables_delrule_one(&ctx, rule);
+                       if (err < 0)
+                               break;
+               }
+       }
+
+       return err;
+}
+
+static int nf_tables_commit(struct sk_buff *skb)
+{
+       struct net *net = sock_net(skb->sk);
+       struct nft_rule_trans *rupd, *tmp;
+
+       /* Bump generation counter, invalidate any dump in progress */
+       net->nft.genctr++;
+
+       /* A new generation has just started */
+       net->nft.gencursor = gencursor_next(net);
+
+       /* Make sure all packets have left the previous generation before
+        * purging old rules.
+        */
+       synchronize_rcu();
+
+       list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
+               /* Delete this rule from the dirty list */
+               list_del(&rupd->list);
+
+               /* This rule was inactive in the past and just became active.
+                * Clear the next bit of the genmask since its meaning has
+                * changed, now it is the future.
+                */
+               if (nft_rule_is_active(net, rupd->rule)) {
+                       nft_rule_clear(net, rupd->rule);
+                       nf_tables_rule_notify(skb, rupd->nlh, rupd->table,
+                                             rupd->chain, rupd->rule,
+                                             NFT_MSG_NEWRULE, 0,
+                                             rupd->family);
+                       kfree(rupd);
+                       continue;
+               }
+
+               /* This rule is in the past, get rid of it */
+               list_del_rcu(&rupd->rule->list);
+               nf_tables_rule_notify(skb, rupd->nlh, rupd->table, rupd->chain,
+                                     rupd->rule, NFT_MSG_DELRULE, 0,
+                                     rupd->family);
+               nf_tables_rule_destroy(rupd->rule);
+               kfree(rupd);
+       }
+
+       return 0;
+}
+
+static int nf_tables_abort(struct sk_buff *skb)
+{
+       struct net *net = sock_net(skb->sk);
+       struct nft_rule_trans *rupd, *tmp;
+
+       list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
+               /* Delete all rules from the dirty list */
+               list_del(&rupd->list);
+
+               if (!nft_rule_is_active_next(net, rupd->rule)) {
+                       nft_rule_clear(net, rupd->rule);
+                       kfree(rupd);
+                       continue;
+               }
+
+               /* This rule is inactive, get rid of it */
+               list_del_rcu(&rupd->rule->list);
+               nf_tables_rule_destroy(rupd->rule);
+               kfree(rupd);
+       }
+       return 0;
+}
+
+/*
+ * Sets
+ */
+
+static LIST_HEAD(nf_tables_set_ops);
+
+int nft_register_set(struct nft_set_ops *ops)
+{
+       nfnl_lock(NFNL_SUBSYS_NFTABLES);
+       list_add_tail(&ops->list, &nf_tables_set_ops);
+       nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nft_register_set);
+
+void nft_unregister_set(struct nft_set_ops *ops)
+{
+       nfnl_lock(NFNL_SUBSYS_NFTABLES);
+       list_del(&ops->list);
+       nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+}
+EXPORT_SYMBOL_GPL(nft_unregister_set);
+
+static const struct nft_set_ops *nft_select_set_ops(const struct nlattr * const nla[])
+{
+       const struct nft_set_ops *ops;
+       u32 features;
+
+#ifdef CONFIG_MODULES
+       if (list_empty(&nf_tables_set_ops)) {
+               nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+               request_module("nft-set");
+               nfnl_lock(NFNL_SUBSYS_NFTABLES);
+               if (!list_empty(&nf_tables_set_ops))
+                       return ERR_PTR(-EAGAIN);
+       }
+#endif
+       features = 0;
+       if (nla[NFTA_SET_FLAGS] != NULL) {
+               features = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
+               features &= NFT_SET_INTERVAL | NFT_SET_MAP;
+       }
+
+       // FIXME: implement selection properly
+       list_for_each_entry(ops, &nf_tables_set_ops, list) {
+               if ((ops->features & features) != features)
+                       continue;
+               if (!try_module_get(ops->owner))
+                       continue;
+               return ops;
+       }
+
+       return ERR_PTR(-EOPNOTSUPP);
+}
+
+static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
+       [NFTA_SET_TABLE]                = { .type = NLA_STRING },
+       [NFTA_SET_NAME]                 = { .type = NLA_STRING },
+       [NFTA_SET_FLAGS]                = { .type = NLA_U32 },
+       [NFTA_SET_KEY_TYPE]             = { .type = NLA_U32 },
+       [NFTA_SET_KEY_LEN]              = { .type = NLA_U32 },
+       [NFTA_SET_DATA_TYPE]            = { .type = NLA_U32 },
+       [NFTA_SET_DATA_LEN]             = { .type = NLA_U32 },
+};
+
+static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
+                                    const struct sk_buff *skb,
+                                    const struct nlmsghdr *nlh,
+                                    const struct nlattr * const nla[])
+{
+       struct net *net = sock_net(skb->sk);
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nft_af_info *afi;
+       const struct nft_table *table = NULL;
+
+       afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       if (nla[NFTA_SET_TABLE] != NULL) {
+               table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]);
+               if (IS_ERR(table))
+                       return PTR_ERR(table);
+       }
+
+       nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla);
+       return 0;
+}
+
+struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
+                                    const struct nlattr *nla)
+{
+       struct nft_set *set;
+
+       if (nla == NULL)
+               return ERR_PTR(-EINVAL);
+
+       list_for_each_entry(set, &table->sets, list) {
+               if (!nla_strcmp(nla, set->name))
+                       return set;
+       }
+       return ERR_PTR(-ENOENT);
+}
+
+static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
+                                   const char *name)
+{
+       const struct nft_set *i;
+       const char *p;
+       unsigned long *inuse;
+       unsigned int n = 0;
+
+       p = strnchr(name, IFNAMSIZ, '%');
+       if (p != NULL) {
+               if (p[1] != 'd' || strchr(p + 2, '%'))
+                       return -EINVAL;
+
+               inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+               if (inuse == NULL)
+                       return -ENOMEM;
+
+               list_for_each_entry(i, &ctx->table->sets, list) {
+                       if (!sscanf(i->name, name, &n))
+                               continue;
+                       if (n < 0 || n > BITS_PER_LONG * PAGE_SIZE)
+                               continue;
+                       set_bit(n, inuse);
+               }
+
+               n = find_first_zero_bit(inuse, BITS_PER_LONG * PAGE_SIZE);
+               free_page((unsigned long)inuse);
+       }
+
+       snprintf(set->name, sizeof(set->name), name, n);
+       list_for_each_entry(i, &ctx->table->sets, list) {
+               if (!strcmp(set->name, i->name))
+                       return -ENFILE;
+       }
+       return 0;
+}
+
+static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
+                             const struct nft_set *set, u16 event, u16 flags)
+{
+       struct nfgenmsg *nfmsg;
+       struct nlmsghdr *nlh;
+       u32 portid = NETLINK_CB(ctx->skb).portid;
+       u32 seq = ctx->nlh->nlmsg_seq;
+
+       event |= NFNL_SUBSYS_NFTABLES << 8;
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
+                       flags);
+       if (nlh == NULL)
+               goto nla_put_failure;
+
+       nfmsg = nlmsg_data(nlh);
+       nfmsg->nfgen_family     = ctx->afi->family;
+       nfmsg->version          = NFNETLINK_V0;
+       nfmsg->res_id           = 0;
+
+       if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name))
+               goto nla_put_failure;
+       if (nla_put_string(skb, NFTA_SET_NAME, set->name))
+               goto nla_put_failure;
+       if (set->flags != 0)
+               if (nla_put_be32(skb, NFTA_SET_FLAGS, htonl(set->flags)))
+                       goto nla_put_failure;
+
+       if (nla_put_be32(skb, NFTA_SET_KEY_TYPE, htonl(set->ktype)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_SET_KEY_LEN, htonl(set->klen)))
+               goto nla_put_failure;
+       if (set->flags & NFT_SET_MAP) {
+               if (nla_put_be32(skb, NFTA_SET_DATA_TYPE, htonl(set->dtype)))
+                       goto nla_put_failure;
+               if (nla_put_be32(skb, NFTA_SET_DATA_LEN, htonl(set->dlen)))
+                       goto nla_put_failure;
+       }
+
+       return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+       nlmsg_trim(skb, nlh);
+       return -1;
+}
+
+static int nf_tables_set_notify(const struct nft_ctx *ctx,
+                               const struct nft_set *set,
+                               int event)
+{
+       struct sk_buff *skb;
+       u32 portid = NETLINK_CB(ctx->skb).portid;
+       bool report;
+       int err;
+
+       report = nlmsg_report(ctx->nlh);
+       if (!report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES))
+               return 0;
+
+       err = -ENOBUFS;
+       skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (skb == NULL)
+               goto err;
+
+       err = nf_tables_fill_set(skb, ctx, set, event, 0);
+       if (err < 0) {
+               kfree_skb(skb);
+               goto err;
+       }
+
+       err = nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES, report,
+                            GFP_KERNEL);
+err:
+       if (err < 0)
+               nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, err);
+       return err;
+}
+
+static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
+                                    struct netlink_callback *cb)
+{
+       const struct nft_set *set;
+       unsigned int idx = 0, s_idx = cb->args[0];
+
+       if (cb->args[1])
+               return skb->len;
+
+       list_for_each_entry(set, &ctx->table->sets, list) {
+               if (idx < s_idx)
+                       goto cont;
+               if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
+                                      NLM_F_MULTI) < 0) {
+                       cb->args[0] = idx;
+                       goto done;
+               }
+cont:
+               idx++;
+       }
+       cb->args[1] = 1;
+done:
+       return skb->len;
+}
+
+static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
+                                  struct netlink_callback *cb)
+{
+       const struct nft_set *set;
+       unsigned int idx = 0, s_idx = cb->args[0];
+       struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
+
+       if (cb->args[1])
+               return skb->len;
+
+       list_for_each_entry(table, &ctx->afi->tables, list) {
+               if (cur_table && cur_table != table)
+                       continue;
+
+               ctx->table = table;
+               list_for_each_entry(set, &ctx->table->sets, list) {
+                       if (idx < s_idx)
+                               goto cont;
+                       if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
+                                              NLM_F_MULTI) < 0) {
+                               cb->args[0] = idx;
+                               cb->args[2] = (unsigned long) table;
+                               goto done;
+                       }
+cont:
+                       idx++;
+               }
+       }
+       cb->args[1] = 1;
+done:
+       return skb->len;
+}
+
+static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
+       struct nlattr *nla[NFTA_SET_MAX + 1];
+       struct nft_ctx ctx;
+       int err, ret;
+
+       err = nlmsg_parse(cb->nlh, sizeof(*nfmsg), nla, NFTA_SET_MAX,
+                         nft_set_policy);
+       if (err < 0)
+               return err;
+
+       err = nft_ctx_init_from_setattr(&ctx, cb->skb, cb->nlh, (void *)nla);
+       if (err < 0)
+               return err;
+
+       if (ctx.table == NULL)
+               ret = nf_tables_dump_sets_all(&ctx, skb, cb);
+       else
+               ret = nf_tables_dump_sets_table(&ctx, skb, cb);
+
+       return ret;
+}
+
+static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
+                           const struct nlmsghdr *nlh,
+                           const struct nlattr * const nla[])
+{
+       const struct nft_set *set;
+       struct nft_ctx ctx;
+       struct sk_buff *skb2;
+       int err;
+
+       /* Verify existance before starting dump */
+       err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla);
+       if (err < 0)
+               return err;
+
+       if (nlh->nlmsg_flags & NLM_F_DUMP) {
+               struct netlink_dump_control c = {
+                       .dump = nf_tables_dump_sets,
+               };
+               return netlink_dump_start(nlsk, skb, nlh, &c);
+       }
+
+       set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
+       if (IS_ERR(set))
+               return PTR_ERR(set);
+
+       skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+       if (skb2 == NULL)
+               return -ENOMEM;
+
+       err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0);
+       if (err < 0)
+               goto err;
+
+       return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+
+err:
+       kfree_skb(skb2);
+       return err;
+}
+
+static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
+                           const struct nlmsghdr *nlh,
+                           const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nft_set_ops *ops;
+       const struct nft_af_info *afi;
+       struct net *net = sock_net(skb->sk);
+       struct nft_table *table;
+       struct nft_set *set;
+       struct nft_ctx ctx;
+       char name[IFNAMSIZ];
+       unsigned int size;
+       bool create;
+       u32 ktype, klen, dlen, dtype, flags;
+       int err;
+
+       if (nla[NFTA_SET_TABLE] == NULL ||
+           nla[NFTA_SET_NAME] == NULL ||
+           nla[NFTA_SET_KEY_LEN] == NULL)
+               return -EINVAL;
+
+       ktype = NFT_DATA_VALUE;
+       if (nla[NFTA_SET_KEY_TYPE] != NULL) {
+               ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE]));
+               if ((ktype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK)
+                       return -EINVAL;
+       }
+
+       klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN]));
+       if (klen == 0 || klen > FIELD_SIZEOF(struct nft_data, data))
+               return -EINVAL;
+
+       flags = 0;
+       if (nla[NFTA_SET_FLAGS] != NULL) {
+               flags = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
+               if (flags & ~(NFT_SET_ANONYMOUS | NFT_SET_CONSTANT |
+                             NFT_SET_INTERVAL | NFT_SET_MAP))
+                       return -EINVAL;
+       }
+
+       dtype = 0;
+       dlen  = 0;
+       if (nla[NFTA_SET_DATA_TYPE] != NULL) {
+               if (!(flags & NFT_SET_MAP))
+                       return -EINVAL;
+
+               dtype = ntohl(nla_get_be32(nla[NFTA_SET_DATA_TYPE]));
+               if ((dtype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK &&
+                   dtype != NFT_DATA_VERDICT)
+                       return -EINVAL;
+
+               if (dtype != NFT_DATA_VERDICT) {
+                       if (nla[NFTA_SET_DATA_LEN] == NULL)
+                               return -EINVAL;
+                       dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN]));
+                       if (dlen == 0 ||
+                           dlen > FIELD_SIZEOF(struct nft_data, data))
+                               return -EINVAL;
+               } else
+                       dlen = sizeof(struct nft_data);
+       } else if (flags & NFT_SET_MAP)
+               return -EINVAL;
+
+       create = nlh->nlmsg_flags & NLM_F_CREATE ? true : false;
+
+       afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, create);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
+
+       set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME]);
+       if (IS_ERR(set)) {
+               if (PTR_ERR(set) != -ENOENT)
+                       return PTR_ERR(set);
+               set = NULL;
+       }
+
+       if (set != NULL) {
+               if (nlh->nlmsg_flags & NLM_F_EXCL)
+                       return -EEXIST;
+               if (nlh->nlmsg_flags & NLM_F_REPLACE)
+                       return -EOPNOTSUPP;
+               return 0;
+       }
+
+       if (!(nlh->nlmsg_flags & NLM_F_CREATE))
+               return -ENOENT;
+
+       ops = nft_select_set_ops(nla);
+       if (IS_ERR(ops))
+               return PTR_ERR(ops);
+
+       size = 0;
+       if (ops->privsize != NULL)
+               size = ops->privsize(nla);
+
+       err = -ENOMEM;
+       set = kzalloc(sizeof(*set) + size, GFP_KERNEL);
+       if (set == NULL)
+               goto err1;
+
+       nla_strlcpy(name, nla[NFTA_SET_NAME], sizeof(set->name));
+       err = nf_tables_set_alloc_name(&ctx, set, name);
+       if (err < 0)
+               goto err2;
+
+       INIT_LIST_HEAD(&set->bindings);
+       set->ops   = ops;
+       set->ktype = ktype;
+       set->klen  = klen;
+       set->dtype = dtype;
+       set->dlen  = dlen;
+       set->flags = flags;
+
+       err = ops->init(set, nla);
+       if (err < 0)
+               goto err2;
+
+       list_add_tail(&set->list, &table->sets);
+       nf_tables_set_notify(&ctx, set, NFT_MSG_NEWSET);
+       return 0;
+
+err2:
+       kfree(set);
+err1:
+       module_put(ops->owner);
+       return err;
+}
+
+static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
+{
+       list_del(&set->list);
+       if (!(set->flags & NFT_SET_ANONYMOUS))
+               nf_tables_set_notify(ctx, set, NFT_MSG_DELSET);
+
+       set->ops->destroy(set);
+       module_put(set->ops->owner);
+       kfree(set);
+}
+
+static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
+                           const struct nlmsghdr *nlh,
+                           const struct nlattr * const nla[])
+{
+       struct nft_set *set;
+       struct nft_ctx ctx;
+       int err;
+
+       if (nla[NFTA_SET_TABLE] == NULL)
+               return -EINVAL;
+
+       err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla);
+       if (err < 0)
+               return err;
+
+       set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
+       if (IS_ERR(set))
+               return PTR_ERR(set);
+       if (!list_empty(&set->bindings))
+               return -EBUSY;
+
+       nf_tables_set_destroy(&ctx, set);
+       return 0;
+}
+
+static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
+                                       const struct nft_set *set,
+                                       const struct nft_set_iter *iter,
+                                       const struct nft_set_elem *elem)
+{
+       enum nft_registers dreg;
+
+       dreg = nft_type_to_reg(set->dtype);
+       return nft_validate_data_load(ctx, dreg, &elem->data, set->dtype);
+}
+
+int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
+                      struct nft_set_binding *binding)
+{
+       struct nft_set_binding *i;
+       struct nft_set_iter iter;
+
+       if (!list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS)
+               return -EBUSY;
+
+       if (set->flags & NFT_SET_MAP) {
+               /* If the set is already bound to the same chain all
+                * jumps are already validated for that chain.
+                */
+               list_for_each_entry(i, &set->bindings, list) {
+                       if (i->chain == binding->chain)
+                               goto bind;
+               }
+
+               iter.skip       = 0;
+               iter.count      = 0;
+               iter.err        = 0;
+               iter.fn         = nf_tables_bind_check_setelem;
+
+               set->ops->walk(ctx, set, &iter);
+               if (iter.err < 0) {
+                       /* Destroy anonymous sets if binding fails */
+                       if (set->flags & NFT_SET_ANONYMOUS)
+                               nf_tables_set_destroy(ctx, set);
+
+                       return iter.err;
+               }
+       }
+bind:
+       binding->chain = ctx->chain;
+       list_add_tail(&binding->list, &set->bindings);
+       return 0;
+}
+
+void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
+                         struct nft_set_binding *binding)
+{
+       list_del(&binding->list);
+
+       if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS)
+               nf_tables_set_destroy(ctx, set);
+}
+
+/*
+ * Set elements
+ */
+
+static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
+       [NFTA_SET_ELEM_KEY]             = { .type = NLA_NESTED },
+       [NFTA_SET_ELEM_DATA]            = { .type = NLA_NESTED },
+       [NFTA_SET_ELEM_FLAGS]           = { .type = NLA_U32 },
+};
+
+static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
+       [NFTA_SET_ELEM_LIST_TABLE]      = { .type = NLA_STRING },
+       [NFTA_SET_ELEM_LIST_SET]        = { .type = NLA_STRING },
+       [NFTA_SET_ELEM_LIST_ELEMENTS]   = { .type = NLA_NESTED },
+};
+
+static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx,
+                                     const struct sk_buff *skb,
+                                     const struct nlmsghdr *nlh,
+                                     const struct nlattr * const nla[])
+{
+       const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+       const struct nft_af_info *afi;
+       const struct nft_table *table;
+       struct net *net = sock_net(skb->sk);
+
+       afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false);
+       if (IS_ERR(afi))
+               return PTR_ERR(afi);
+
+       table = nf_tables_table_lookup(afi, nla[NFTA_SET_ELEM_LIST_TABLE]);
+       if (IS_ERR(table))
+               return PTR_ERR(table);
+
+       nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla);
+       return 0;
+}
+
+static int nf_tables_fill_setelem(struct sk_buff *skb,
+                                 const struct nft_set *set,
+                                 const struct nft_set_elem *elem)
+{
+       unsigned char *b = skb_tail_pointer(skb);
+       struct nlattr *nest;
+
+       nest = nla_nest_start(skb, NFTA_LIST_ELEM);
+       if (nest == NULL)
+               goto nla_put_failure;
+
+       if (nft_data_dump(skb, NFTA_SET_ELEM_KEY, &elem->key, NFT_DATA_VALUE,
+                         set->klen) < 0)
+               goto nla_put_failure;
+
+       if (set->flags & NFT_SET_MAP &&
+           !(elem->flags & NFT_SET_ELEM_INTERVAL_END) &&
+           nft_data_dump(skb, NFTA_SET_ELEM_DATA, &elem->data,
+                         set->dtype == NFT_DATA_VERDICT ? NFT_DATA_VERDICT : NFT_DATA_VALUE,
+                         set->dlen) < 0)
+               goto nla_put_failure;
+
+       if (elem->flags != 0)
+               if (nla_put_be32(skb, NFTA_SET_ELEM_FLAGS, htonl(elem->flags)))
+                       goto nla_put_failure;
+
+       nla_nest_end(skb, nest);
+       return 0;
+
+nla_put_failure:
+       nlmsg_trim(skb, b);
+       return -EMSGSIZE;
+}
+
+struct nft_set_dump_args {
+       const struct netlink_callback   *cb;
+       struct nft_set_iter             iter;
+       struct sk_buff                  *skb;
+};
+
+static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
+                                 const struct nft_set *set,
+                                 const struct nft_set_iter *iter,
+                                 const struct nft_set_elem *elem)
+{
+       struct nft_set_dump_args *args;
+
+       args = container_of(iter, struct nft_set_dump_args, iter);
+       return nf_tables_fill_setelem(args->skb, set, elem);
+}
+
+static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const struct nft_set *set;
+       struct nft_set_dump_args args;
+       struct nft_ctx ctx;
+       struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1];
+       struct nfgenmsg *nfmsg;
+       struct nlmsghdr *nlh;
+       struct nlattr *nest;
+       u32 portid, seq;
+       int event, err;
+
+       nfmsg = nlmsg_data(cb->nlh);
+       err = nlmsg_parse(cb->nlh, sizeof(*nfmsg), nla, NFTA_SET_ELEM_LIST_MAX,
+                         nft_set_elem_list_policy);
+       if (err < 0)
+               return err;
+
+       err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla);
+       if (err < 0)
+               return err;
+
+       set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
+       if (IS_ERR(set))
+               return PTR_ERR(set);
+
+       event  = NFT_MSG_NEWSETELEM;
+       event |= NFNL_SUBSYS_NFTABLES << 8;
+       portid = NETLINK_CB(cb->skb).portid;
+       seq    = cb->nlh->nlmsg_seq;
+
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg),
+                       NLM_F_MULTI);
+       if (nlh == NULL)
+               goto nla_put_failure;
+
+       nfmsg = nlmsg_data(nlh);
+       nfmsg->nfgen_family = NFPROTO_UNSPEC;
+       nfmsg->version      = NFNETLINK_V0;
+       nfmsg->res_id       = 0;
+
+       if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, ctx.table->name))
+               goto nla_put_failure;
+       if (nla_put_string(skb, NFTA_SET_ELEM_LIST_SET, set->name))
+               goto nla_put_failure;
+
+       nest = nla_nest_start(skb, NFTA_SET_ELEM_LIST_ELEMENTS);
+       if (nest == NULL)
+               goto nla_put_failure;
+
+       args.cb         = cb;
+       args.skb        = skb;
+       args.iter.skip  = cb->args[0];
+       args.iter.count = 0;
+       args.iter.err   = 0;
+       args.iter.fn    = nf_tables_dump_setelem;
+       set->ops->walk(&ctx, set, &args.iter);
+
+       nla_nest_end(skb, nest);
+       nlmsg_end(skb, nlh);
+
+       if (args.iter.err && args.iter.err != -EMSGSIZE)
+               return args.iter.err;
+       if (args.iter.count == cb->args[0])
+               return 0;
+
+       cb->args[0] = args.iter.count;
+       return skb->len;
+
+nla_put_failure:
+       return -ENOSPC;
+}
+
+static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb,
+                               const struct nlmsghdr *nlh,
+                               const struct nlattr * const nla[])
+{
+       const struct nft_set *set;
+       struct nft_ctx ctx;
+       int err;
+
+       err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla);
+       if (err < 0)
+               return err;
+
+       set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
+       if (IS_ERR(set))
+               return PTR_ERR(set);
+
+       if (nlh->nlmsg_flags & NLM_F_DUMP) {
+               struct netlink_dump_control c = {
+                       .dump = nf_tables_dump_set,
+               };
+               return netlink_dump_start(nlsk, skb, nlh, &c);
+       }
+       return -EOPNOTSUPP;
+}
+
+static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
+                           const struct nlattr *attr)
+{
+       struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
+       struct nft_data_desc d1, d2;
+       struct nft_set_elem elem;
+       struct nft_set_binding *binding;
+       enum nft_registers dreg;
+       int err;
+
+       err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
+                              nft_set_elem_policy);
+       if (err < 0)
+               return err;
+
+       if (nla[NFTA_SET_ELEM_KEY] == NULL)
+               return -EINVAL;
+
+       elem.flags = 0;
+       if (nla[NFTA_SET_ELEM_FLAGS] != NULL) {
+               elem.flags = ntohl(nla_get_be32(nla[NFTA_SET_ELEM_FLAGS]));
+               if (elem.flags & ~NFT_SET_ELEM_INTERVAL_END)
+                       return -EINVAL;
+       }
+
+       if (set->flags & NFT_SET_MAP) {
+               if (nla[NFTA_SET_ELEM_DATA] == NULL &&
+                   !(elem.flags & NFT_SET_ELEM_INTERVAL_END))
+                       return -EINVAL;
+       } else {
+               if (nla[NFTA_SET_ELEM_DATA] != NULL)
+                       return -EINVAL;
+       }
+
+       err = nft_data_init(ctx, &elem.key, &d1, nla[NFTA_SET_ELEM_KEY]);
+       if (err < 0)
+               goto err1;
+       err = -EINVAL;
+       if (d1.type != NFT_DATA_VALUE || d1.len != set->klen)
+               goto err2;
+
+       err = -EEXIST;
+       if (set->ops->get(set, &elem) == 0)
+               goto err2;
+
+       if (nla[NFTA_SET_ELEM_DATA] != NULL) {
+               err = nft_data_init(ctx, &elem.data, &d2, nla[NFTA_SET_ELEM_DATA]);
+               if (err < 0)
+                       goto err2;
+
+               err = -EINVAL;
+               if (set->dtype != NFT_DATA_VERDICT && d2.len != set->dlen)
+                       goto err3;
+
+               dreg = nft_type_to_reg(set->dtype);
+               list_for_each_entry(binding, &set->bindings, list) {
+                       struct nft_ctx bind_ctx = {
+                               .afi    = ctx->afi,
+                               .table  = ctx->table,
+                               .chain  = binding->chain,
+                       };
+
+                       err = nft_validate_data_load(&bind_ctx, dreg,
+                                                    &elem.data, d2.type);
+                       if (err < 0)
+                               goto err3;
+               }
+       }
+
+       err = set->ops->insert(set, &elem);
+       if (err < 0)
+               goto err3;
+
+       return 0;
+
+err3:
+       if (nla[NFTA_SET_ELEM_DATA] != NULL)
+               nft_data_uninit(&elem.data, d2.type);
+err2:
+       nft_data_uninit(&elem.key, d1.type);
+err1:
+       return err;
+}
+
+static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb,
+                               const struct nlmsghdr *nlh,
+                               const struct nlattr * const nla[])
+{
+       const struct nlattr *attr;
+       struct nft_set *set;
+       struct nft_ctx ctx;
+       int rem, err;
+
+       err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla);
+       if (err < 0)
+               return err;
+
+       set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
+       if (IS_ERR(set))
+               return PTR_ERR(set);
+       if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
+               return -EBUSY;
+
+       nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
+               err = nft_add_set_elem(&ctx, set, attr);
+               if (err < 0)
+                       return err;
+       }
+       return 0;
+}
+
+static int nft_del_setelem(const struct nft_ctx *ctx, struct nft_set *set,
+                          const struct nlattr *attr)
+{
+       struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
+       struct nft_data_desc desc;
+       struct nft_set_elem elem;
+       int err;
+
+       err = nla_parse_nested(nla, NFTA_SET_ELEM_MAX, attr,
+                              nft_set_elem_policy);
+       if (err < 0)
+               goto err1;
+
+       err = -EINVAL;
+       if (nla[NFTA_SET_ELEM_KEY] == NULL)
+               goto err1;
+
+       err = nft_data_init(ctx, &elem.key, &desc, nla[NFTA_SET_ELEM_KEY]);
+       if (err < 0)
+               goto err1;
+
+       err = -EINVAL;
+       if (desc.type != NFT_DATA_VALUE || desc.len != set->klen)
+               goto err2;
+
+       err = set->ops->get(set, &elem);
+       if (err < 0)
+               goto err2;
+
+       set->ops->remove(set, &elem);
+
+       nft_data_uninit(&elem.key, NFT_DATA_VALUE);
+       if (set->flags & NFT_SET_MAP)
+               nft_data_uninit(&elem.data, set->dtype);
+
+err2:
+       nft_data_uninit(&elem.key, desc.type);
+err1:
+       return err;
+}
+
+static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
+                               const struct nlmsghdr *nlh,
+                               const struct nlattr * const nla[])
+{
+       const struct nlattr *attr;
+       struct nft_set *set;
+       struct nft_ctx ctx;
+       int rem, err;
+
+       err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla);
+       if (err < 0)
+               return err;
+
+       set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET]);
+       if (IS_ERR(set))
+               return PTR_ERR(set);
+       if (!list_empty(&set->bindings) && set->flags & NFT_SET_CONSTANT)
+               return -EBUSY;
+
+       nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
+               err = nft_del_setelem(&ctx, set, attr);
+               if (err < 0)
+                       return err;
+       }
+       return 0;
+}
+
+static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
+       [NFT_MSG_NEWTABLE] = {
+               .call           = nf_tables_newtable,
+               .attr_count     = NFTA_TABLE_MAX,
+               .policy         = nft_table_policy,
+       },
+       [NFT_MSG_GETTABLE] = {
+               .call           = nf_tables_gettable,
+               .attr_count     = NFTA_TABLE_MAX,
+               .policy         = nft_table_policy,
+       },
+       [NFT_MSG_DELTABLE] = {
+               .call           = nf_tables_deltable,
+               .attr_count     = NFTA_TABLE_MAX,
+               .policy         = nft_table_policy,
+       },
+       [NFT_MSG_NEWCHAIN] = {
+               .call           = nf_tables_newchain,
+               .attr_count     = NFTA_CHAIN_MAX,
+               .policy         = nft_chain_policy,
+       },
+       [NFT_MSG_GETCHAIN] = {
+               .call           = nf_tables_getchain,
+               .attr_count     = NFTA_CHAIN_MAX,
+               .policy         = nft_chain_policy,
+       },
+       [NFT_MSG_DELCHAIN] = {
+               .call           = nf_tables_delchain,
+               .attr_count     = NFTA_CHAIN_MAX,
+               .policy         = nft_chain_policy,
+       },
+       [NFT_MSG_NEWRULE] = {
+               .call_batch     = nf_tables_newrule,
+               .attr_count     = NFTA_RULE_MAX,
+               .policy         = nft_rule_policy,
+       },
+       [NFT_MSG_GETRULE] = {
+               .call           = nf_tables_getrule,
+               .attr_count     = NFTA_RULE_MAX,
+               .policy         = nft_rule_policy,
+       },
+       [NFT_MSG_DELRULE] = {
+               .call_batch     = nf_tables_delrule,
+               .attr_count     = NFTA_RULE_MAX,
+               .policy         = nft_rule_policy,
+       },
+       [NFT_MSG_NEWSET] = {
+               .call           = nf_tables_newset,
+               .attr_count     = NFTA_SET_MAX,
+               .policy         = nft_set_policy,
+       },
+       [NFT_MSG_GETSET] = {
+               .call           = nf_tables_getset,
+               .attr_count     = NFTA_SET_MAX,
+               .policy         = nft_set_policy,
+       },
+       [NFT_MSG_DELSET] = {
+               .call           = nf_tables_delset,
+               .attr_count     = NFTA_SET_MAX,
+               .policy         = nft_set_policy,
+       },
+       [NFT_MSG_NEWSETELEM] = {
+               .call           = nf_tables_newsetelem,
+               .attr_count     = NFTA_SET_ELEM_LIST_MAX,
+               .policy         = nft_set_elem_list_policy,
+       },
+       [NFT_MSG_GETSETELEM] = {
+               .call           = nf_tables_getsetelem,
+               .attr_count     = NFTA_SET_ELEM_LIST_MAX,
+               .policy         = nft_set_elem_list_policy,
+       },
+       [NFT_MSG_DELSETELEM] = {
+               .call           = nf_tables_delsetelem,
+               .attr_count     = NFTA_SET_ELEM_LIST_MAX,
+               .policy         = nft_set_elem_list_policy,
+       },
+};
+
+static const struct nfnetlink_subsystem nf_tables_subsys = {
+       .name           = "nf_tables",
+       .subsys_id      = NFNL_SUBSYS_NFTABLES,
+       .cb_count       = NFT_MSG_MAX,
+       .cb             = nf_tables_cb,
+       .commit         = nf_tables_commit,
+       .abort          = nf_tables_abort,
+};
+
+/*
+ * Loop detection - walk through the ruleset beginning at the destination chain
+ * of a new jump until either the source chain is reached (loop) or all
+ * reachable chains have been traversed.
+ *
+ * The loop check is performed whenever a new jump verdict is added to an
+ * expression or verdict map or a verdict map is bound to a new chain.
+ */
+
+static int nf_tables_check_loops(const struct nft_ctx *ctx,
+                                const struct nft_chain *chain);
+
+static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
+                                       const struct nft_set *set,
+                                       const struct nft_set_iter *iter,
+                                       const struct nft_set_elem *elem)
+{
+       switch (elem->data.verdict) {
+       case NFT_JUMP:
+       case NFT_GOTO:
+               return nf_tables_check_loops(ctx, elem->data.chain);
+       default:
+               return 0;
+       }
+}
+
+static int nf_tables_check_loops(const struct nft_ctx *ctx,
+                                const struct nft_chain *chain)
+{
+       const struct nft_rule *rule;
+       const struct nft_expr *expr, *last;
+       const struct nft_set *set;
+       struct nft_set_binding *binding;
+       struct nft_set_iter iter;
+
+       if (ctx->chain == chain)
+               return -ELOOP;
+
+       list_for_each_entry(rule, &chain->rules, list) {
+               nft_rule_for_each_expr(expr, last, rule) {
+                       const struct nft_data *data = NULL;
+                       int err;
+
+                       if (!expr->ops->validate)
+                               continue;
+
+                       err = expr->ops->validate(ctx, expr, &data);
+                       if (err < 0)
+                               return err;
+
+                       if (data == NULL)
+                               continue;
+
+                       switch (data->verdict) {
+                       case NFT_JUMP:
+                       case NFT_GOTO:
+                               err = nf_tables_check_loops(ctx, data->chain);
+                               if (err < 0)
+                                       return err;
+                       default:
+                               break;
+                       }
+               }
+       }
+
+       list_for_each_entry(set, &ctx->table->sets, list) {
+               if (!(set->flags & NFT_SET_MAP) ||
+                   set->dtype != NFT_DATA_VERDICT)
+                       continue;
+
+               list_for_each_entry(binding, &set->bindings, list) {
+                       if (binding->chain != chain)
+                               continue;
+
+                       iter.skip       = 0;
+                       iter.count      = 0;
+                       iter.err        = 0;
+                       iter.fn         = nf_tables_loop_check_setelem;
+
+                       set->ops->walk(ctx, set, &iter);
+                       if (iter.err < 0)
+                               return iter.err;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ *     nft_validate_input_register - validate an expressions' input register
+ *
+ *     @reg: the register number
+ *
+ *     Validate that the input register is one of the general purpose
+ *     registers.
+ */
+int nft_validate_input_register(enum nft_registers reg)
+{
+       if (reg <= NFT_REG_VERDICT)
+               return -EINVAL;
+       if (reg > NFT_REG_MAX)
+               return -ERANGE;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nft_validate_input_register);
+
+/**
+ *     nft_validate_output_register - validate an expressions' output register
+ *
+ *     @reg: the register number
+ *
+ *     Validate that the output register is one of the general purpose
+ *     registers or the verdict register.
+ */
+int nft_validate_output_register(enum nft_registers reg)
+{
+       if (reg < NFT_REG_VERDICT)
+               return -EINVAL;
+       if (reg > NFT_REG_MAX)
+               return -ERANGE;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nft_validate_output_register);
+
+/**
+ *     nft_validate_data_load - validate an expressions' data load
+ *
+ *     @ctx: context of the expression performing the load
+ *     @reg: the destination register number
+ *     @data: the data to load
+ *     @type: the data type
+ *
+ *     Validate that a data load uses the appropriate data type for
+ *     the destination register. A value of NULL for the data means
+ *     that its runtime gathered data, which is always of type
+ *     NFT_DATA_VALUE.
+ */
+int nft_validate_data_load(const struct nft_ctx *ctx, enum nft_registers reg,
+                          const struct nft_data *data,
+                          enum nft_data_types type)
+{
+       int err;
+
+       switch (reg) {
+       case NFT_REG_VERDICT:
+               if (data == NULL || type != NFT_DATA_VERDICT)
+                       return -EINVAL;
+
+               if (data->verdict == NFT_GOTO || data->verdict == NFT_JUMP) {
+                       err = nf_tables_check_loops(ctx, data->chain);
+                       if (err < 0)
+                               return err;
+
+                       if (ctx->chain->level + 1 > data->chain->level) {
+                               if (ctx->chain->level + 1 == NFT_JUMP_STACK_SIZE)
+                                       return -EMLINK;
+                               data->chain->level = ctx->chain->level + 1;
+                       }
+               }
+
+               return 0;
+       default:
+               if (data != NULL && type != NFT_DATA_VALUE)
+                       return -EINVAL;
+               return 0;
+       }
+}
+EXPORT_SYMBOL_GPL(nft_validate_data_load);
+
+static const struct nla_policy nft_verdict_policy[NFTA_VERDICT_MAX + 1] = {
+       [NFTA_VERDICT_CODE]     = { .type = NLA_U32 },
+       [NFTA_VERDICT_CHAIN]    = { .type = NLA_STRING,
+                                   .len = NFT_CHAIN_MAXNAMELEN - 1 },
+};
+
+static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
+                           struct nft_data_desc *desc, const struct nlattr *nla)
+{
+       struct nlattr *tb[NFTA_VERDICT_MAX + 1];
+       struct nft_chain *chain;
+       int err;
+
+       err = nla_parse_nested(tb, NFTA_VERDICT_MAX, nla, nft_verdict_policy);
+       if (err < 0)
+               return err;
+
+       if (!tb[NFTA_VERDICT_CODE])
+               return -EINVAL;
+       data->verdict = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
+
+       switch (data->verdict) {
+       case NF_ACCEPT:
+       case NF_DROP:
+       case NF_QUEUE:
+       case NFT_CONTINUE:
+       case NFT_BREAK:
+       case NFT_RETURN:
+               desc->len = sizeof(data->verdict);
+               break;
+       case NFT_JUMP:
+       case NFT_GOTO:
+               if (!tb[NFTA_VERDICT_CHAIN])
+                       return -EINVAL;
+               chain = nf_tables_chain_lookup(ctx->table,
+                                              tb[NFTA_VERDICT_CHAIN]);
+               if (IS_ERR(chain))
+                       return PTR_ERR(chain);
+               if (chain->flags & NFT_BASE_CHAIN)
+                       return -EOPNOTSUPP;
+
+               chain->use++;
+               data->chain = chain;
+               desc->len = sizeof(data);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       desc->type = NFT_DATA_VERDICT;
+       return 0;
+}
+
+static void nft_verdict_uninit(const struct nft_data *data)
+{
+       switch (data->verdict) {
+       case NFT_JUMP:
+       case NFT_GOTO:
+               data->chain->use--;
+               break;
+       }
+}
+
+static int nft_verdict_dump(struct sk_buff *skb, const struct nft_data *data)
+{
+       struct nlattr *nest;
+
+       nest = nla_nest_start(skb, NFTA_DATA_VERDICT);
+       if (!nest)
+               goto nla_put_failure;
+
+       if (nla_put_be32(skb, NFTA_VERDICT_CODE, htonl(data->verdict)))
+               goto nla_put_failure;
+
+       switch (data->verdict) {
+       case NFT_JUMP:
+       case NFT_GOTO:
+               if (nla_put_string(skb, NFTA_VERDICT_CHAIN, data->chain->name))
+                       goto nla_put_failure;
+       }
+       nla_nest_end(skb, nest);
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static int nft_value_init(const struct nft_ctx *ctx, struct nft_data *data,
+                         struct nft_data_desc *desc, const struct nlattr *nla)
+{
+       unsigned int len;
+
+       len = nla_len(nla);
+       if (len == 0)
+               return -EINVAL;
+       if (len > sizeof(data->data))
+               return -EOVERFLOW;
+
+       nla_memcpy(data->data, nla, sizeof(data->data));
+       desc->type = NFT_DATA_VALUE;
+       desc->len  = len;
+       return 0;
+}
+
+static int nft_value_dump(struct sk_buff *skb, const struct nft_data *data,
+                         unsigned int len)
+{
+       return nla_put(skb, NFTA_DATA_VALUE, len, data->data);
+}
+
+static const struct nla_policy nft_data_policy[NFTA_DATA_MAX + 1] = {
+       [NFTA_DATA_VALUE]       = { .type = NLA_BINARY,
+                                   .len  = FIELD_SIZEOF(struct nft_data, data) },
+       [NFTA_DATA_VERDICT]     = { .type = NLA_NESTED },
+};
+
+/**
+ *     nft_data_init - parse nf_tables data netlink attributes
+ *
+ *     @ctx: context of the expression using the data
+ *     @data: destination struct nft_data
+ *     @desc: data description
+ *     @nla: netlink attribute containing data
+ *
+ *     Parse the netlink data attributes and initialize a struct nft_data.
+ *     The type and length of data are returned in the data description.
+ *
+ *     The caller can indicate that it only wants to accept data of type
+ *     NFT_DATA_VALUE by passing NULL for the ctx argument.
+ */
+int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data,
+                 struct nft_data_desc *desc, const struct nlattr *nla)
+{
+       struct nlattr *tb[NFTA_DATA_MAX + 1];
+       int err;
+
+       err = nla_parse_nested(tb, NFTA_DATA_MAX, nla, nft_data_policy);
+       if (err < 0)
+               return err;
+
+       if (tb[NFTA_DATA_VALUE])
+               return nft_value_init(ctx, data, desc, tb[NFTA_DATA_VALUE]);
+       if (tb[NFTA_DATA_VERDICT] && ctx != NULL)
+               return nft_verdict_init(ctx, data, desc, tb[NFTA_DATA_VERDICT]);
+       return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(nft_data_init);
+
+/**
+ *     nft_data_uninit - release a nft_data item
+ *
+ *     @data: struct nft_data to release
+ *     @type: type of data
+ *
+ *     Release a nft_data item. NFT_DATA_VALUE types can be silently discarded,
+ *     all others need to be released by calling this function.
+ */
+void nft_data_uninit(const struct nft_data *data, enum nft_data_types type)
+{
+       switch (type) {
+       case NFT_DATA_VALUE:
+               return;
+       case NFT_DATA_VERDICT:
+               return nft_verdict_uninit(data);
+       default:
+               WARN_ON(1);
+       }
+}
+EXPORT_SYMBOL_GPL(nft_data_uninit);
+
+int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
+                 enum nft_data_types type, unsigned int len)
+{
+       struct nlattr *nest;
+       int err;
+
+       nest = nla_nest_start(skb, attr);
+       if (nest == NULL)
+               return -1;
+
+       switch (type) {
+       case NFT_DATA_VALUE:
+               err = nft_value_dump(skb, data, len);
+               break;
+       case NFT_DATA_VERDICT:
+               err = nft_verdict_dump(skb, data);
+               break;
+       default:
+               err = -EINVAL;
+               WARN_ON(1);
+       }
+
+       nla_nest_end(skb, nest);
+       return err;
+}
+EXPORT_SYMBOL_GPL(nft_data_dump);
+
+static int nf_tables_init_net(struct net *net)
+{
+       INIT_LIST_HEAD(&net->nft.af_info);
+       INIT_LIST_HEAD(&net->nft.commit_list);
+       return 0;
+}
+
+static struct pernet_operations nf_tables_net_ops = {
+       .init   = nf_tables_init_net,
+};
+
+static int __init nf_tables_module_init(void)
+{
+       int err;
+
+       info = kmalloc(sizeof(struct nft_expr_info) * NFT_RULE_MAXEXPRS,
+                      GFP_KERNEL);
+       if (info == NULL) {
+               err = -ENOMEM;
+               goto err1;
+       }
+
+       err = nf_tables_core_module_init();
+       if (err < 0)
+               goto err2;
+
+       err = nfnetlink_subsys_register(&nf_tables_subsys);
+       if (err < 0)
+               goto err3;
+
+       pr_info("nf_tables: (c) 2007-2009 Patrick McHardy <kaber@trash.net>\n");
+       return register_pernet_subsys(&nf_tables_net_ops);
+err3:
+       nf_tables_core_module_exit();
+err2:
+       kfree(info);
+err1:
+       return err;
+}
+
+static void __exit nf_tables_module_exit(void)
+{
+       unregister_pernet_subsys(&nf_tables_net_ops);
+       nfnetlink_subsys_unregister(&nf_tables_subsys);
+       nf_tables_core_module_exit();
+       kfree(info);
+}
+
+module_init(nf_tables_module_init);
+module_exit(nf_tables_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFTABLES);
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
new file mode 100644 (file)
index 0000000..cb9e685
--- /dev/null
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_log.h>
+
+static void nft_cmp_fast_eval(const struct nft_expr *expr,
+                             struct nft_data data[NFT_REG_MAX + 1])
+{
+       const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
+       u32 mask;
+
+       mask = ~0U >> (sizeof(priv->data) * BITS_PER_BYTE - priv->len);
+       if ((data[priv->sreg].data[0] & mask) == priv->data)
+               return;
+       data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static bool nft_payload_fast_eval(const struct nft_expr *expr,
+                                 struct nft_data data[NFT_REG_MAX + 1],
+                                 const struct nft_pktinfo *pkt)
+{
+       const struct nft_payload *priv = nft_expr_priv(expr);
+       const struct sk_buff *skb = pkt->skb;
+       struct nft_data *dest = &data[priv->dreg];
+       unsigned char *ptr;
+
+       if (priv->base == NFT_PAYLOAD_NETWORK_HEADER)
+               ptr = skb_network_header(skb);
+       else
+               ptr = skb_network_header(skb) + pkt->xt.thoff;
+
+       ptr += priv->offset;
+
+       if (unlikely(ptr + priv->len >= skb_tail_pointer(skb)))
+               return false;
+
+       if (priv->len == 2)
+               *(u16 *)dest->data = *(u16 *)ptr;
+       else if (priv->len == 4)
+               *(u32 *)dest->data = *(u32 *)ptr;
+       else
+               *(u8 *)dest->data = *(u8 *)ptr;
+       return true;
+}
+
+struct nft_jumpstack {
+       const struct nft_chain  *chain;
+       const struct nft_rule   *rule;
+       int                     rulenum;
+};
+
+static inline void
+nft_chain_stats(const struct nft_chain *this, const struct nft_pktinfo *pkt,
+               struct nft_jumpstack *jumpstack, unsigned int stackptr)
+{
+       struct nft_stats __percpu *stats;
+       const struct nft_chain *chain = stackptr ? jumpstack[0].chain : this;
+
+       rcu_read_lock_bh();
+       stats = rcu_dereference(nft_base_chain(chain)->stats);
+       __this_cpu_inc(stats->pkts);
+       __this_cpu_add(stats->bytes, pkt->skb->len);
+       rcu_read_unlock_bh();
+}
+
+enum nft_trace {
+       NFT_TRACE_RULE,
+       NFT_TRACE_RETURN,
+       NFT_TRACE_POLICY,
+};
+
+static const char *const comments[] = {
+       [NFT_TRACE_RULE]        = "rule",
+       [NFT_TRACE_RETURN]      = "return",
+       [NFT_TRACE_POLICY]      = "policy",
+};
+
+static struct nf_loginfo trace_loginfo = {
+       .type = NF_LOG_TYPE_LOG,
+       .u = {
+               .log = {
+                       .level = 4,
+                       .logflags = NF_LOG_MASK,
+               },
+       },
+};
+
+static inline void nft_trace_packet(const struct nft_pktinfo *pkt,
+                                   const struct nft_chain *chain,
+                                   int rulenum, enum nft_trace type)
+{
+       struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
+
+       nf_log_packet(net, pkt->xt.family, pkt->hooknum, pkt->skb, pkt->in,
+                     pkt->out, &trace_loginfo, "TRACE: %s:%s:%s:%u ",
+                     chain->table->name, chain->name, comments[type],
+                     rulenum);
+}
+
+unsigned int
+nft_do_chain_pktinfo(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
+{
+       const struct nft_chain *chain = ops->priv;
+       const struct nft_rule *rule;
+       const struct nft_expr *expr, *last;
+       struct nft_data data[NFT_REG_MAX + 1];
+       unsigned int stackptr = 0;
+       struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
+       int rulenum = 0;
+       /*
+        * Cache cursor to avoid problems in case that the cursor is updated
+        * while traversing the ruleset.
+        */
+       unsigned int gencursor = ACCESS_ONCE(chain->net->nft.gencursor);
+
+do_chain:
+       rule = list_entry(&chain->rules, struct nft_rule, list);
+next_rule:
+       data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+       list_for_each_entry_continue_rcu(rule, &chain->rules, list) {
+
+               /* This rule is not active, skip. */
+               if (unlikely(rule->genmask & (1 << gencursor)))
+                       continue;
+
+               rulenum++;
+
+               nft_rule_for_each_expr(expr, last, rule) {
+                       if (expr->ops == &nft_cmp_fast_ops)
+                               nft_cmp_fast_eval(expr, data);
+                       else if (expr->ops != &nft_payload_fast_ops ||
+                                !nft_payload_fast_eval(expr, data, pkt))
+                               expr->ops->eval(expr, data, pkt);
+
+                       if (data[NFT_REG_VERDICT].verdict != NFT_CONTINUE)
+                               break;
+               }
+
+               switch (data[NFT_REG_VERDICT].verdict) {
+               case NFT_BREAK:
+                       data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+                       /* fall through */
+               case NFT_CONTINUE:
+                       continue;
+               }
+               break;
+       }
+
+       switch (data[NFT_REG_VERDICT].verdict) {
+       case NF_ACCEPT:
+       case NF_DROP:
+       case NF_QUEUE:
+               if (unlikely(pkt->skb->nf_trace))
+                       nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
+
+               return data[NFT_REG_VERDICT].verdict;
+       case NFT_JUMP:
+               if (unlikely(pkt->skb->nf_trace))
+                       nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RULE);
+
+               BUG_ON(stackptr >= NFT_JUMP_STACK_SIZE);
+               jumpstack[stackptr].chain = chain;
+               jumpstack[stackptr].rule  = rule;
+               jumpstack[stackptr].rulenum = rulenum;
+               stackptr++;
+               /* fall through */
+       case NFT_GOTO:
+               chain = data[NFT_REG_VERDICT].chain;
+               goto do_chain;
+       case NFT_RETURN:
+               if (unlikely(pkt->skb->nf_trace))
+                       nft_trace_packet(pkt, chain, rulenum, NFT_TRACE_RETURN);
+
+               /* fall through */
+       case NFT_CONTINUE:
+               break;
+       default:
+               WARN_ON(1);
+       }
+
+       if (stackptr > 0) {
+               if (unlikely(pkt->skb->nf_trace))
+                       nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_RETURN);
+
+               stackptr--;
+               chain = jumpstack[stackptr].chain;
+               rule  = jumpstack[stackptr].rule;
+               rulenum = jumpstack[stackptr].rulenum;
+               goto next_rule;
+       }
+       nft_chain_stats(chain, pkt, jumpstack, stackptr);
+
+       if (unlikely(pkt->skb->nf_trace))
+               nft_trace_packet(pkt, chain, ++rulenum, NFT_TRACE_POLICY);
+
+       return nft_base_chain(chain)->policy;
+}
+EXPORT_SYMBOL_GPL(nft_do_chain_pktinfo);
+
+int __init nf_tables_core_module_init(void)
+{
+       int err;
+
+       err = nft_immediate_module_init();
+       if (err < 0)
+               goto err1;
+
+       err = nft_cmp_module_init();
+       if (err < 0)
+               goto err2;
+
+       err = nft_lookup_module_init();
+       if (err < 0)
+               goto err3;
+
+       err = nft_bitwise_module_init();
+       if (err < 0)
+               goto err4;
+
+       err = nft_byteorder_module_init();
+       if (err < 0)
+               goto err5;
+
+       err = nft_payload_module_init();
+       if (err < 0)
+               goto err6;
+
+       return 0;
+
+err6:
+       nft_byteorder_module_exit();
+err5:
+       nft_bitwise_module_exit();
+err4:
+       nft_lookup_module_exit();
+err3:
+       nft_cmp_module_exit();
+err2:
+       nft_immediate_module_exit();
+err1:
+       return err;
+}
+
+void nf_tables_core_module_exit(void)
+{
+       nft_payload_module_exit();
+       nft_byteorder_module_exit();
+       nft_bitwise_module_exit();
+       nft_lookup_module_exit();
+       nft_cmp_module_exit();
+       nft_immediate_module_exit();
+}
index 572d87dc116ffa838d2f9f8838129156add7284e..027f16af51a0f1bde86b1805259166b4befa18ff 100644 (file)
@@ -147,9 +147,6 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        const struct nfnetlink_subsystem *ss;
        int type, err;
 
-       if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
-               return -EPERM;
-
        /* All the messages must at least contain nfgenmsg */
        if (nlmsg_len(nlh) < sizeof(struct nfgenmsg))
                return 0;
@@ -217,9 +214,179 @@ replay:
        }
 }
 
+static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
+                               u_int16_t subsys_id)
+{
+       struct sk_buff *nskb, *oskb = skb;
+       struct net *net = sock_net(skb->sk);
+       const struct nfnetlink_subsystem *ss;
+       const struct nfnl_callback *nc;
+       bool success = true, done = false;
+       int err;
+
+       if (subsys_id >= NFNL_SUBSYS_COUNT)
+               return netlink_ack(skb, nlh, -EINVAL);
+replay:
+       nskb = netlink_skb_clone(oskb, GFP_KERNEL);
+       if (!nskb)
+               return netlink_ack(oskb, nlh, -ENOMEM);
+
+       nskb->sk = oskb->sk;
+       skb = nskb;
+
+       nfnl_lock(subsys_id);
+       ss = rcu_dereference_protected(table[subsys_id].subsys,
+                                      lockdep_is_held(&table[subsys_id].mutex));
+       if (!ss) {
+#ifdef CONFIG_MODULES
+               nfnl_unlock(subsys_id);
+               request_module("nfnetlink-subsys-%d", subsys_id);
+               nfnl_lock(subsys_id);
+               ss = rcu_dereference_protected(table[subsys_id].subsys,
+                                              lockdep_is_held(&table[subsys_id].mutex));
+               if (!ss)
+#endif
+               {
+                       nfnl_unlock(subsys_id);
+                       kfree_skb(nskb);
+                       return netlink_ack(skb, nlh, -EOPNOTSUPP);
+               }
+       }
+
+       if (!ss->commit || !ss->abort) {
+               nfnl_unlock(subsys_id);
+               kfree_skb(nskb);
+               return netlink_ack(skb, nlh, -EOPNOTSUPP);
+       }
+
+       while (skb->len >= nlmsg_total_size(0)) {
+               int msglen, type;
+
+               nlh = nlmsg_hdr(skb);
+               err = 0;
+
+               if (nlh->nlmsg_len < NLMSG_HDRLEN) {
+                       err = -EINVAL;
+                       goto ack;
+               }
+
+               /* Only requests are handled by the kernel */
+               if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) {
+                       err = -EINVAL;
+                       goto ack;
+               }
+
+               type = nlh->nlmsg_type;
+               if (type == NFNL_MSG_BATCH_BEGIN) {
+                       /* Malformed: Batch begin twice */
+                       success = false;
+                       goto done;
+               } else if (type == NFNL_MSG_BATCH_END) {
+                       done = true;
+                       goto done;
+               } else if (type < NLMSG_MIN_TYPE) {
+                       err = -EINVAL;
+                       goto ack;
+               }
+
+               /* We only accept a batch with messages for the same
+                * subsystem.
+                */
+               if (NFNL_SUBSYS_ID(type) != subsys_id) {
+                       err = -EINVAL;
+                       goto ack;
+               }
+
+               nc = nfnetlink_find_client(type, ss);
+               if (!nc) {
+                       err = -EINVAL;
+                       goto ack;
+               }
+
+               {
+                       int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
+                       u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
+                       struct nlattr *cda[ss->cb[cb_id].attr_count + 1];
+                       struct nlattr *attr = (void *)nlh + min_len;
+                       int attrlen = nlh->nlmsg_len - min_len;
+
+                       err = nla_parse(cda, ss->cb[cb_id].attr_count,
+                                       attr, attrlen, ss->cb[cb_id].policy);
+                       if (err < 0)
+                               goto ack;
+
+                       if (nc->call_batch) {
+                               err = nc->call_batch(net->nfnl, skb, nlh,
+                                                    (const struct nlattr **)cda);
+                       }
+
+                       /* The lock was released to autoload some module, we
+                        * have to abort and start from scratch using the
+                        * original skb.
+                        */
+                       if (err == -EAGAIN) {
+                               ss->abort(skb);
+                               nfnl_unlock(subsys_id);
+                               kfree_skb(nskb);
+                               goto replay;
+                       }
+               }
+ack:
+               if (nlh->nlmsg_flags & NLM_F_ACK || err) {
+                       /* We don't stop processing the batch on errors, thus,
+                        * userspace gets all the errors that the batch
+                        * triggers.
+                        */
+                       netlink_ack(skb, nlh, err);
+                       if (err)
+                               success = false;
+               }
+
+               msglen = NLMSG_ALIGN(nlh->nlmsg_len);
+               if (msglen > skb->len)
+                       msglen = skb->len;
+               skb_pull(skb, msglen);
+       }
+done:
+       if (success && done)
+               ss->commit(skb);
+       else
+               ss->abort(skb);
+
+       nfnl_unlock(subsys_id);
+       kfree_skb(nskb);
+}
+
 static void nfnetlink_rcv(struct sk_buff *skb)
 {
-       netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
+       struct nlmsghdr *nlh = nlmsg_hdr(skb);
+       struct net *net = sock_net(skb->sk);
+       int msglen;
+
+       if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+               return netlink_ack(skb, nlh, -EPERM);
+
+       if (nlh->nlmsg_len < NLMSG_HDRLEN ||
+           skb->len < nlh->nlmsg_len)
+               return;
+
+       if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN) {
+               struct nfgenmsg *nfgenmsg;
+
+               msglen = NLMSG_ALIGN(nlh->nlmsg_len);
+               if (msglen > skb->len)
+                       msglen = skb->len;
+
+               if (nlh->nlmsg_len < NLMSG_HDRLEN ||
+                   skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
+                       return;
+
+               nfgenmsg = nlmsg_data(nlh);
+               skb_pull(skb, msglen);
+               nfnetlink_rcv_batch(skb, nlh, nfgenmsg->res_id);
+       } else {
+               netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
+       }
 }
 
 #ifdef CONFIG_MODULES
index 50580494148d00433c092ee0ad2a7097a1653221..476accd171452fcfbfbe01018dcadd55fee41f67 100644 (file)
@@ -49,10 +49,8 @@ static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = {
 };
 
 static int
-ctnl_timeout_parse_policy(struct ctnl_timeout *timeout,
-                         struct nf_conntrack_l4proto *l4proto,
-                         struct net *net,
-                         const struct nlattr *attr)
+ctnl_timeout_parse_policy(void *timeouts, struct nf_conntrack_l4proto *l4proto,
+                         struct net *net, const struct nlattr *attr)
 {
        int ret = 0;
 
@@ -64,8 +62,7 @@ ctnl_timeout_parse_policy(struct ctnl_timeout *timeout,
                if (ret < 0)
                        return ret;
 
-               ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net,
-                                                         &timeout->data);
+               ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net, timeouts);
        }
        return ret;
 }
@@ -123,7 +120,8 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
                                goto err_proto_put;
                        }
 
-                       ret = ctnl_timeout_parse_policy(matching, l4proto, net,
+                       ret = ctnl_timeout_parse_policy(&matching->data,
+                                                       l4proto, net,
                                                        cda[CTA_TIMEOUT_DATA]);
                        return ret;
                }
@@ -138,7 +136,7 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
                goto err_proto_put;
        }
 
-       ret = ctnl_timeout_parse_policy(timeout, l4proto, net,
+       ret = ctnl_timeout_parse_policy(&timeout->data, l4proto, net,
                                        cda[CTA_TIMEOUT_DATA]);
        if (ret < 0)
                goto err;
@@ -342,6 +340,147 @@ cttimeout_del_timeout(struct sock *ctnl, struct sk_buff *skb,
        return ret;
 }
 
+static int
+cttimeout_default_set(struct sock *ctnl, struct sk_buff *skb,
+                     const struct nlmsghdr *nlh,
+                     const struct nlattr * const cda[])
+{
+       __u16 l3num;
+       __u8 l4num;
+       struct nf_conntrack_l4proto *l4proto;
+       struct net *net = sock_net(skb->sk);
+       unsigned int *timeouts;
+       int ret;
+
+       if (!cda[CTA_TIMEOUT_L3PROTO] ||
+           !cda[CTA_TIMEOUT_L4PROTO] ||
+           !cda[CTA_TIMEOUT_DATA])
+               return -EINVAL;
+
+       l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
+       l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
+       l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+
+       /* This protocol is not supported, skip. */
+       if (l4proto->l4proto != l4num) {
+               ret = -EOPNOTSUPP;
+               goto err;
+       }
+
+       timeouts = l4proto->get_timeouts(net);
+
+       ret = ctnl_timeout_parse_policy(timeouts, l4proto, net,
+                                       cda[CTA_TIMEOUT_DATA]);
+       if (ret < 0)
+               goto err;
+
+       nf_ct_l4proto_put(l4proto);
+       return 0;
+err:
+       nf_ct_l4proto_put(l4proto);
+       return ret;
+}
+
+static int
+cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
+                           u32 seq, u32 type, int event,
+                           struct nf_conntrack_l4proto *l4proto)
+{
+       struct nlmsghdr *nlh;
+       struct nfgenmsg *nfmsg;
+       unsigned int flags = portid ? NLM_F_MULTI : 0;
+
+       event |= NFNL_SUBSYS_CTNETLINK_TIMEOUT << 8;
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
+       if (nlh == NULL)
+               goto nlmsg_failure;
+
+       nfmsg = nlmsg_data(nlh);
+       nfmsg->nfgen_family = AF_UNSPEC;
+       nfmsg->version = NFNETLINK_V0;
+       nfmsg->res_id = 0;
+
+       if (nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(l4proto->l3proto)) ||
+           nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, l4proto->l4proto))
+               goto nla_put_failure;
+
+       if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
+               struct nlattr *nest_parms;
+               unsigned int *timeouts = l4proto->get_timeouts(net);
+               int ret;
+
+               nest_parms = nla_nest_start(skb,
+                                           CTA_TIMEOUT_DATA | NLA_F_NESTED);
+               if (!nest_parms)
+                       goto nla_put_failure;
+
+               ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, timeouts);
+               if (ret < 0)
+                       goto nla_put_failure;
+
+               nla_nest_end(skb, nest_parms);
+       }
+
+       nlmsg_end(skb, nlh);
+       return skb->len;
+
+nlmsg_failure:
+nla_put_failure:
+       nlmsg_cancel(skb, nlh);
+       return -1;
+}
+
+static int cttimeout_default_get(struct sock *ctnl, struct sk_buff *skb,
+                                const struct nlmsghdr *nlh,
+                                const struct nlattr * const cda[])
+{
+       __u16 l3num;
+       __u8 l4num;
+       struct nf_conntrack_l4proto *l4proto;
+       struct net *net = sock_net(skb->sk);
+       struct sk_buff *skb2;
+       int ret, err;
+
+       if (!cda[CTA_TIMEOUT_L3PROTO] || !cda[CTA_TIMEOUT_L4PROTO])
+               return -EINVAL;
+
+       l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
+       l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
+       l4proto = nf_ct_l4proto_find_get(l3num, l4num);
+
+       /* This protocol is not supported, skip. */
+       if (l4proto->l4proto != l4num) {
+               err = -EOPNOTSUPP;
+               goto err;
+       }
+
+       skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (skb2 == NULL) {
+               err = -ENOMEM;
+               goto err;
+       }
+
+       ret = cttimeout_default_fill_info(net, skb2, NETLINK_CB(skb).portid,
+                                         nlh->nlmsg_seq,
+                                         NFNL_MSG_TYPE(nlh->nlmsg_type),
+                                         IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
+                                         l4proto);
+       if (ret <= 0) {
+               kfree_skb(skb2);
+               err = -ENOMEM;
+               goto err;
+       }
+       ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
+       if (ret > 0)
+               ret = 0;
+
+       /* this avoids a loop in nfnetlink. */
+       return ret == -EAGAIN ? -ENOBUFS : ret;
+err:
+       nf_ct_l4proto_put(l4proto);
+       return err;
+}
+
 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 static struct ctnl_timeout *ctnl_timeout_find_get(const char *name)
 {
@@ -384,6 +523,12 @@ static const struct nfnl_callback cttimeout_cb[IPCTNL_MSG_TIMEOUT_MAX] = {
        [IPCTNL_MSG_TIMEOUT_DELETE]     = { .call = cttimeout_del_timeout,
                                            .attr_count = CTA_TIMEOUT_MAX,
                                            .policy = cttimeout_nla_policy },
+       [IPCTNL_MSG_TIMEOUT_DEFAULT_SET]= { .call = cttimeout_default_set,
+                                           .attr_count = CTA_TIMEOUT_MAX,
+                                           .policy = cttimeout_nla_policy },
+       [IPCTNL_MSG_TIMEOUT_DEFAULT_GET]= { .call = cttimeout_default_get,
+                                           .attr_count = CTA_TIMEOUT_MAX,
+                                           .policy = cttimeout_nla_policy },
 };
 
 static const struct nfnetlink_subsystem cttimeout_subsys = {
index d92cc317bf8b25a0c371b770688c860169c9dea8..3c4b69e5fe17348b422f390bf79ab4269043fe5d 100644 (file)
@@ -319,7 +319,8 @@ nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags)
 }
 
 static struct sk_buff *
-nfulnl_alloc_skb(u32 peer_portid, unsigned int inst_size, unsigned int pkt_size)
+nfulnl_alloc_skb(struct net *net, u32 peer_portid, unsigned int inst_size,
+                unsigned int pkt_size)
 {
        struct sk_buff *skb;
        unsigned int n;
@@ -328,13 +329,13 @@ nfulnl_alloc_skb(u32 peer_portid, unsigned int inst_size, unsigned int pkt_size)
         * message.  WARNING: has to be <= 128k due to slab restrictions */
 
        n = max(inst_size, pkt_size);
-       skb = nfnetlink_alloc_skb(&init_net, n, peer_portid, GFP_ATOMIC);
+       skb = nfnetlink_alloc_skb(net, n, peer_portid, GFP_ATOMIC);
        if (!skb) {
                if (n > pkt_size) {
                        /* try to allocate only as much as we need for current
                         * packet */
 
-                       skb = nfnetlink_alloc_skb(&init_net, pkt_size,
+                       skb = nfnetlink_alloc_skb(net, pkt_size,
                                                  peer_portid, GFP_ATOMIC);
                        if (!skb)
                                pr_err("nfnetlink_log: can't even alloc %u bytes\n",
@@ -702,8 +703,8 @@ nfulnl_log_packet(struct net *net,
        }
 
        if (!inst->skb) {
-               inst->skb = nfulnl_alloc_skb(inst->peer_portid, inst->nlbufsiz,
-                                            size);
+               inst->skb = nfulnl_alloc_skb(net, inst->peer_portid,
+                                            inst->nlbufsiz, size);
                if (!inst->skb)
                        goto alloc_failure;
        }
index ae2e5c11d01ac9887c5158d0084f193c624373ef..21258cf7009118c3820c7b9575b4a4bd1b331f1f 100644 (file)
@@ -298,7 +298,7 @@ nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet,
 }
 
 static struct sk_buff *
-nfqnl_build_packet_message(struct nfqnl_instance *queue,
+nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
                           struct nf_queue_entry *entry,
                           __be32 **packet_id_ptr)
 {
@@ -372,7 +372,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
        if (queue->flags & NFQA_CFG_F_CONNTRACK)
                ct = nfqnl_ct_get(entskb, &size, &ctinfo);
 
-       skb = nfnetlink_alloc_skb(&init_net, size, queue->peer_portid,
+       skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
                                  GFP_ATOMIC);
        if (!skb)
                return NULL;
@@ -525,7 +525,7 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
        __be32 *packet_id_ptr;
        int failopen = 0;
 
-       nskb = nfqnl_build_packet_message(queue, entry, &packet_id_ptr);
+       nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr);
        if (nskb == NULL) {
                err = -ENOMEM;
                goto err_out;
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
new file mode 100644 (file)
index 0000000..4fb6ee2
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_bitwise {
+       enum nft_registers      sreg:8;
+       enum nft_registers      dreg:8;
+       u8                      len;
+       struct nft_data         mask;
+       struct nft_data         xor;
+};
+
+static void nft_bitwise_eval(const struct nft_expr *expr,
+                            struct nft_data data[NFT_REG_MAX + 1],
+                            const struct nft_pktinfo *pkt)
+{
+       const struct nft_bitwise *priv = nft_expr_priv(expr);
+       const struct nft_data *src = &data[priv->sreg];
+       struct nft_data *dst = &data[priv->dreg];
+       unsigned int i;
+
+       for (i = 0; i < DIV_ROUND_UP(priv->len, 4); i++) {
+               dst->data[i] = (src->data[i] & priv->mask.data[i]) ^
+                              priv->xor.data[i];
+       }
+}
+
+static const struct nla_policy nft_bitwise_policy[NFTA_BITWISE_MAX + 1] = {
+       [NFTA_BITWISE_SREG]     = { .type = NLA_U32 },
+       [NFTA_BITWISE_DREG]     = { .type = NLA_U32 },
+       [NFTA_BITWISE_LEN]      = { .type = NLA_U32 },
+       [NFTA_BITWISE_MASK]     = { .type = NLA_NESTED },
+       [NFTA_BITWISE_XOR]      = { .type = NLA_NESTED },
+};
+
+static int nft_bitwise_init(const struct nft_ctx *ctx,
+                           const struct nft_expr *expr,
+                           const struct nlattr * const tb[])
+{
+       struct nft_bitwise *priv = nft_expr_priv(expr);
+       struct nft_data_desc d1, d2;
+       int err;
+
+       if (tb[NFTA_BITWISE_SREG] == NULL ||
+           tb[NFTA_BITWISE_DREG] == NULL ||
+           tb[NFTA_BITWISE_LEN] == NULL ||
+           tb[NFTA_BITWISE_MASK] == NULL ||
+           tb[NFTA_BITWISE_XOR] == NULL)
+               return -EINVAL;
+
+       priv->sreg = ntohl(nla_get_be32(tb[NFTA_BITWISE_SREG]));
+       err = nft_validate_input_register(priv->sreg);
+       if (err < 0)
+               return err;
+
+       priv->dreg = ntohl(nla_get_be32(tb[NFTA_BITWISE_DREG]));
+       err = nft_validate_output_register(priv->dreg);
+       if (err < 0)
+               return err;
+       err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+       if (err < 0)
+               return err;
+
+       priv->len = ntohl(nla_get_be32(tb[NFTA_BITWISE_LEN]));
+
+       err = nft_data_init(NULL, &priv->mask, &d1, tb[NFTA_BITWISE_MASK]);
+       if (err < 0)
+               return err;
+       if (d1.len != priv->len)
+               return -EINVAL;
+
+       err = nft_data_init(NULL, &priv->xor, &d2, tb[NFTA_BITWISE_XOR]);
+       if (err < 0)
+               return err;
+       if (d2.len != priv->len)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_bitwise *priv = nft_expr_priv(expr);
+
+       if (nla_put_be32(skb, NFTA_BITWISE_SREG, htonl(priv->sreg)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_BITWISE_DREG, htonl(priv->dreg)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_BITWISE_LEN, htonl(priv->len)))
+               goto nla_put_failure;
+
+       if (nft_data_dump(skb, NFTA_BITWISE_MASK, &priv->mask,
+                         NFT_DATA_VALUE, priv->len) < 0)
+               goto nla_put_failure;
+
+       if (nft_data_dump(skb, NFTA_BITWISE_XOR, &priv->xor,
+                         NFT_DATA_VALUE, priv->len) < 0)
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_bitwise_type;
+static const struct nft_expr_ops nft_bitwise_ops = {
+       .type           = &nft_bitwise_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_bitwise)),
+       .eval           = nft_bitwise_eval,
+       .init           = nft_bitwise_init,
+       .dump           = nft_bitwise_dump,
+};
+
+static struct nft_expr_type nft_bitwise_type __read_mostly = {
+       .name           = "bitwise",
+       .ops            = &nft_bitwise_ops,
+       .policy         = nft_bitwise_policy,
+       .maxattr        = NFTA_BITWISE_MAX,
+       .owner          = THIS_MODULE,
+};
+
+int __init nft_bitwise_module_init(void)
+{
+       return nft_register_expr(&nft_bitwise_type);
+}
+
+void nft_bitwise_module_exit(void)
+{
+       nft_unregister_expr(&nft_bitwise_type);
+}
diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c
new file mode 100644 (file)
index 0000000..c39ed8d
--- /dev/null
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_byteorder {
+       enum nft_registers      sreg:8;
+       enum nft_registers      dreg:8;
+       enum nft_byteorder_ops  op:8;
+       u8                      len;
+       u8                      size;
+};
+
+static void nft_byteorder_eval(const struct nft_expr *expr,
+                              struct nft_data data[NFT_REG_MAX + 1],
+                              const struct nft_pktinfo *pkt)
+{
+       const struct nft_byteorder *priv = nft_expr_priv(expr);
+       struct nft_data *src = &data[priv->sreg], *dst = &data[priv->dreg];
+       union { u32 u32; u16 u16; } *s, *d;
+       unsigned int i;
+
+       s = (void *)src->data;
+       d = (void *)dst->data;
+
+       switch (priv->size) {
+       case 4:
+               switch (priv->op) {
+               case NFT_BYTEORDER_NTOH:
+                       for (i = 0; i < priv->len / 4; i++)
+                               d[i].u32 = ntohl((__force __be32)s[i].u32);
+                       break;
+               case NFT_BYTEORDER_HTON:
+                       for (i = 0; i < priv->len / 4; i++)
+                               d[i].u32 = (__force __u32)htonl(s[i].u32);
+                       break;
+               }
+               break;
+       case 2:
+               switch (priv->op) {
+               case NFT_BYTEORDER_NTOH:
+                       for (i = 0; i < priv->len / 2; i++)
+                               d[i].u16 = ntohs((__force __be16)s[i].u16);
+                       break;
+               case NFT_BYTEORDER_HTON:
+                       for (i = 0; i < priv->len / 2; i++)
+                               d[i].u16 = (__force __u16)htons(s[i].u16);
+                       break;
+               }
+               break;
+       }
+}
+
+static const struct nla_policy nft_byteorder_policy[NFTA_BYTEORDER_MAX + 1] = {
+       [NFTA_BYTEORDER_SREG]   = { .type = NLA_U32 },
+       [NFTA_BYTEORDER_DREG]   = { .type = NLA_U32 },
+       [NFTA_BYTEORDER_OP]     = { .type = NLA_U32 },
+       [NFTA_BYTEORDER_LEN]    = { .type = NLA_U32 },
+       [NFTA_BYTEORDER_SIZE]   = { .type = NLA_U32 },
+};
+
+static int nft_byteorder_init(const struct nft_ctx *ctx,
+                             const struct nft_expr *expr,
+                             const struct nlattr * const tb[])
+{
+       struct nft_byteorder *priv = nft_expr_priv(expr);
+       int err;
+
+       if (tb[NFTA_BYTEORDER_SREG] == NULL ||
+           tb[NFTA_BYTEORDER_DREG] == NULL ||
+           tb[NFTA_BYTEORDER_LEN] == NULL ||
+           tb[NFTA_BYTEORDER_SIZE] == NULL ||
+           tb[NFTA_BYTEORDER_OP] == NULL)
+               return -EINVAL;
+
+       priv->sreg = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_SREG]));
+       err = nft_validate_input_register(priv->sreg);
+       if (err < 0)
+               return err;
+
+       priv->dreg = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_DREG]));
+       err = nft_validate_output_register(priv->dreg);
+       if (err < 0)
+               return err;
+       err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+       if (err < 0)
+               return err;
+
+       priv->op = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_OP]));
+       switch (priv->op) {
+       case NFT_BYTEORDER_NTOH:
+       case NFT_BYTEORDER_HTON:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       priv->len = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_LEN]));
+       if (priv->len == 0 || priv->len > FIELD_SIZEOF(struct nft_data, data))
+               return -EINVAL;
+
+       priv->size = ntohl(nla_get_be32(tb[NFTA_BYTEORDER_SIZE]));
+       switch (priv->size) {
+       case 2:
+       case 4:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int nft_byteorder_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_byteorder *priv = nft_expr_priv(expr);
+
+       if (nla_put_be32(skb, NFTA_BYTEORDER_SREG, htonl(priv->sreg)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_BYTEORDER_DREG, htonl(priv->dreg)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_BYTEORDER_OP, htonl(priv->op)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_BYTEORDER_LEN, htonl(priv->len)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_BYTEORDER_SIZE, htonl(priv->size)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_byteorder_type;
+static const struct nft_expr_ops nft_byteorder_ops = {
+       .type           = &nft_byteorder_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_byteorder)),
+       .eval           = nft_byteorder_eval,
+       .init           = nft_byteorder_init,
+       .dump           = nft_byteorder_dump,
+};
+
+static struct nft_expr_type nft_byteorder_type __read_mostly = {
+       .name           = "byteorder",
+       .ops            = &nft_byteorder_ops,
+       .policy         = nft_byteorder_policy,
+       .maxattr        = NFTA_BYTEORDER_MAX,
+       .owner          = THIS_MODULE,
+};
+
+int __init nft_byteorder_module_init(void)
+{
+       return nft_register_expr(&nft_byteorder_type);
+}
+
+void nft_byteorder_module_exit(void)
+{
+       nft_unregister_expr(&nft_byteorder_type);
+}
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
new file mode 100644 (file)
index 0000000..954925d
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_cmp_expr {
+       struct nft_data         data;
+       enum nft_registers      sreg:8;
+       u8                      len;
+       enum nft_cmp_ops        op:8;
+};
+
+static void nft_cmp_eval(const struct nft_expr *expr,
+                        struct nft_data data[NFT_REG_MAX + 1],
+                        const struct nft_pktinfo *pkt)
+{
+       const struct nft_cmp_expr *priv = nft_expr_priv(expr);
+       int d;
+
+       d = nft_data_cmp(&data[priv->sreg], &priv->data, priv->len);
+       switch (priv->op) {
+       case NFT_CMP_EQ:
+               if (d != 0)
+                       goto mismatch;
+               break;
+       case NFT_CMP_NEQ:
+               if (d == 0)
+                       goto mismatch;
+               break;
+       case NFT_CMP_LT:
+               if (d == 0)
+                       goto mismatch;
+       case NFT_CMP_LTE:
+               if (d > 0)
+                       goto mismatch;
+               break;
+       case NFT_CMP_GT:
+               if (d == 0)
+                       goto mismatch;
+       case NFT_CMP_GTE:
+               if (d < 0)
+                       goto mismatch;
+               break;
+       }
+       return;
+
+mismatch:
+       data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_cmp_policy[NFTA_CMP_MAX + 1] = {
+       [NFTA_CMP_SREG]         = { .type = NLA_U32 },
+       [NFTA_CMP_OP]           = { .type = NLA_U32 },
+       [NFTA_CMP_DATA]         = { .type = NLA_NESTED },
+};
+
+static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+                       const struct nlattr * const tb[])
+{
+       struct nft_cmp_expr *priv = nft_expr_priv(expr);
+       struct nft_data_desc desc;
+       int err;
+
+       priv->sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
+       priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
+
+       err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]);
+       BUG_ON(err < 0);
+
+       priv->len = desc.len;
+       return 0;
+}
+
+static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_cmp_expr *priv = nft_expr_priv(expr);
+
+       if (nla_put_be32(skb, NFTA_CMP_SREG, htonl(priv->sreg)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_CMP_OP, htonl(priv->op)))
+               goto nla_put_failure;
+
+       if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
+                         NFT_DATA_VALUE, priv->len) < 0)
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_cmp_type;
+static const struct nft_expr_ops nft_cmp_ops = {
+       .type           = &nft_cmp_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_cmp_expr)),
+       .eval           = nft_cmp_eval,
+       .init           = nft_cmp_init,
+       .dump           = nft_cmp_dump,
+};
+
+static int nft_cmp_fast_init(const struct nft_ctx *ctx,
+                            const struct nft_expr *expr,
+                            const struct nlattr * const tb[])
+{
+       struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
+       struct nft_data_desc desc;
+       struct nft_data data;
+       u32 mask;
+       int err;
+
+       priv->sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
+
+       err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
+       BUG_ON(err < 0);
+       desc.len *= BITS_PER_BYTE;
+
+       mask = ~0U >> (sizeof(priv->data) * BITS_PER_BYTE - desc.len);
+       priv->data = data.data[0] & mask;
+       priv->len  = desc.len;
+       return 0;
+}
+
+static int nft_cmp_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
+       struct nft_data data;
+
+       if (nla_put_be32(skb, NFTA_CMP_SREG, htonl(priv->sreg)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_CMP_OP, htonl(NFT_CMP_EQ)))
+               goto nla_put_failure;
+
+       data.data[0] = priv->data;
+       if (nft_data_dump(skb, NFTA_CMP_DATA, &data,
+                         NFT_DATA_VALUE, priv->len / BITS_PER_BYTE) < 0)
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+const struct nft_expr_ops nft_cmp_fast_ops = {
+       .type           = &nft_cmp_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_cmp_fast_expr)),
+       .eval           = NULL, /* inlined */
+       .init           = nft_cmp_fast_init,
+       .dump           = nft_cmp_fast_dump,
+};
+
+static const struct nft_expr_ops *
+nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
+{
+       struct nft_data_desc desc;
+       struct nft_data data;
+       enum nft_registers sreg;
+       enum nft_cmp_ops op;
+       int err;
+
+       if (tb[NFTA_CMP_SREG] == NULL ||
+           tb[NFTA_CMP_OP] == NULL ||
+           tb[NFTA_CMP_DATA] == NULL)
+               return ERR_PTR(-EINVAL);
+
+       sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG]));
+       err = nft_validate_input_register(sreg);
+       if (err < 0)
+               return ERR_PTR(err);
+
+       op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
+       switch (op) {
+       case NFT_CMP_EQ:
+       case NFT_CMP_NEQ:
+       case NFT_CMP_LT:
+       case NFT_CMP_LTE:
+       case NFT_CMP_GT:
+       case NFT_CMP_GTE:
+               break;
+       default:
+               return ERR_PTR(-EINVAL);
+       }
+
+       err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]);
+       if (err < 0)
+               return ERR_PTR(err);
+
+       if (desc.len <= sizeof(u32) && op == NFT_CMP_EQ)
+               return &nft_cmp_fast_ops;
+       else
+               return &nft_cmp_ops;
+}
+
+static struct nft_expr_type nft_cmp_type __read_mostly = {
+       .name           = "cmp",
+       .select_ops     = nft_cmp_select_ops,
+       .policy         = nft_cmp_policy,
+       .maxattr        = NFTA_CMP_MAX,
+       .owner          = THIS_MODULE,
+};
+
+int __init nft_cmp_module_init(void)
+{
+       return nft_register_expr(&nft_cmp_type);
+}
+
+void nft_cmp_module_exit(void)
+{
+       nft_unregister_expr(&nft_cmp_type);
+}
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
new file mode 100644 (file)
index 0000000..4811f76
--- /dev/null
@@ -0,0 +1,768 @@
+/*
+ * (C) 2012-2013 by Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This software has been sponsored by Sophos Astaro <http://www.sophos.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <linux/netfilter/nf_tables_compat.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <asm/uaccess.h> /* for set_fs */
+#include <net/netfilter/nf_tables.h>
+
+union nft_entry {
+       struct ipt_entry e4;
+       struct ip6t_entry e6;
+};
+
+static inline void
+nft_compat_set_par(struct xt_action_param *par, void *xt, const void *xt_info)
+{
+       par->target     = xt;
+       par->targinfo   = xt_info;
+       par->hotdrop    = false;
+}
+
+static void nft_target_eval(const struct nft_expr *expr,
+                           struct nft_data data[NFT_REG_MAX + 1],
+                           const struct nft_pktinfo *pkt)
+{
+       void *info = nft_expr_priv(expr);
+       struct xt_target *target = expr->ops->data;
+       struct sk_buff *skb = pkt->skb;
+       int ret;
+
+       nft_compat_set_par((struct xt_action_param *)&pkt->xt, target, info);
+
+       ret = target->target(skb, &pkt->xt);
+
+       if (pkt->xt.hotdrop)
+               ret = NF_DROP;
+
+       switch(ret) {
+       case XT_CONTINUE:
+               data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+               break;
+       default:
+               data[NFT_REG_VERDICT].verdict = ret;
+               break;
+       }
+       return;
+}
+
+static const struct nla_policy nft_target_policy[NFTA_TARGET_MAX + 1] = {
+       [NFTA_TARGET_NAME]      = { .type = NLA_NUL_STRING },
+       [NFTA_TARGET_REV]       = { .type = NLA_U32 },
+       [NFTA_TARGET_INFO]      = { .type = NLA_BINARY },
+};
+
+static void
+nft_target_set_tgchk_param(struct xt_tgchk_param *par,
+                          const struct nft_ctx *ctx,
+                          struct xt_target *target, void *info,
+                          union nft_entry *entry, u8 proto, bool inv)
+{
+       par->net        = &init_net;
+       par->table      = ctx->table->name;
+       switch (ctx->afi->family) {
+       case AF_INET:
+               entry->e4.ip.proto = proto;
+               entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
+               break;
+       case AF_INET6:
+               entry->e6.ipv6.proto = proto;
+               entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
+               break;
+       }
+       par->entryinfo  = entry;
+       par->target     = target;
+       par->targinfo   = info;
+       if (ctx->chain->flags & NFT_BASE_CHAIN) {
+               const struct nft_base_chain *basechain =
+                                               nft_base_chain(ctx->chain);
+               const struct nf_hook_ops *ops = &basechain->ops;
+
+               par->hook_mask = 1 << ops->hooknum;
+       }
+       par->family     = ctx->afi->family;
+}
+
+static void target_compat_from_user(struct xt_target *t, void *in, void *out)
+{
+#ifdef CONFIG_COMPAT
+       if (t->compat_from_user) {
+               int pad;
+
+               t->compat_from_user(out, in);
+               pad = XT_ALIGN(t->targetsize) - t->targetsize;
+               if (pad > 0)
+                       memset(out + t->targetsize, 0, pad);
+       } else
+#endif
+               memcpy(out, in, XT_ALIGN(t->targetsize));
+}
+
+static inline int nft_compat_target_offset(struct xt_target *target)
+{
+#ifdef CONFIG_COMPAT
+       return xt_compat_target_offset(target);
+#else
+       return 0;
+#endif
+}
+
+static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1] = {
+       [NFTA_RULE_COMPAT_PROTO]        = { .type = NLA_U32 },
+       [NFTA_RULE_COMPAT_FLAGS]        = { .type = NLA_U32 },
+};
+
+static u8 nft_parse_compat(const struct nlattr *attr, bool *inv)
+{
+       struct nlattr *tb[NFTA_RULE_COMPAT_MAX+1];
+       u32 flags;
+       int err;
+
+       err = nla_parse_nested(tb, NFTA_RULE_COMPAT_MAX, attr,
+                              nft_rule_compat_policy);
+       if (err < 0)
+               return err;
+
+       if (!tb[NFTA_RULE_COMPAT_PROTO] || !tb[NFTA_RULE_COMPAT_FLAGS])
+               return -EINVAL;
+
+       flags = ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_FLAGS]));
+       if (flags & ~NFT_RULE_COMPAT_F_MASK)
+               return -EINVAL;
+       if (flags & NFT_RULE_COMPAT_F_INV)
+               *inv = true;
+
+       return ntohl(nla_get_be32(tb[NFTA_RULE_COMPAT_PROTO]));
+}
+
+static int
+nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+               const struct nlattr * const tb[])
+{
+       void *info = nft_expr_priv(expr);
+       struct xt_target *target = expr->ops->data;
+       struct xt_tgchk_param par;
+       size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
+       u8 proto = 0;
+       bool inv = false;
+       union nft_entry e = {};
+       int ret;
+
+       target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info);
+
+       if (ctx->nla[NFTA_RULE_COMPAT])
+               proto = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &inv);
+
+       nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
+
+       ret = xt_check_target(&par, size, proto, inv);
+       if (ret < 0)
+               goto err;
+
+       /* The standard target cannot be used */
+       if (target->target == NULL) {
+               ret = -EINVAL;
+               goto err;
+       }
+
+       return 0;
+err:
+       module_put(target->me);
+       return ret;
+}
+
+static void
+nft_target_destroy(const struct nft_expr *expr)
+{
+       struct xt_target *target = expr->ops->data;
+
+       module_put(target->me);
+}
+
+static int
+target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in)
+{
+       int ret;
+
+#ifdef CONFIG_COMPAT
+       if (t->compat_to_user) {
+               mm_segment_t old_fs;
+               void *out;
+
+               out = kmalloc(XT_ALIGN(t->targetsize), GFP_ATOMIC);
+               if (out == NULL)
+                       return -ENOMEM;
+
+               /* We want to reuse existing compat_to_user */
+               old_fs = get_fs();
+               set_fs(KERNEL_DS);
+               t->compat_to_user(out, in);
+               set_fs(old_fs);
+               ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out);
+               kfree(out);
+       } else
+#endif
+               ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), in);
+
+       return ret;
+}
+
+static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct xt_target *target = expr->ops->data;
+       void *info = nft_expr_priv(expr);
+
+       if (nla_put_string(skb, NFTA_TARGET_NAME, target->name) ||
+           nla_put_be32(skb, NFTA_TARGET_REV, htonl(target->revision)) ||
+           target_dump_info(skb, target, info))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static int nft_target_validate(const struct nft_ctx *ctx,
+                              const struct nft_expr *expr,
+                              const struct nft_data **data)
+{
+       struct xt_target *target = expr->ops->data;
+       unsigned int hook_mask = 0;
+
+       if (ctx->chain->flags & NFT_BASE_CHAIN) {
+               const struct nft_base_chain *basechain =
+                                               nft_base_chain(ctx->chain);
+               const struct nf_hook_ops *ops = &basechain->ops;
+
+               hook_mask = 1 << ops->hooknum;
+               if (hook_mask & target->hooks)
+                       return 0;
+
+               /* This target is being called from an invalid chain */
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void nft_match_eval(const struct nft_expr *expr,
+                          struct nft_data data[NFT_REG_MAX + 1],
+                          const struct nft_pktinfo *pkt)
+{
+       void *info = nft_expr_priv(expr);
+       struct xt_match *match = expr->ops->data;
+       struct sk_buff *skb = pkt->skb;
+       bool ret;
+
+       nft_compat_set_par((struct xt_action_param *)&pkt->xt, match, info);
+
+       ret = match->match(skb, (struct xt_action_param *)&pkt->xt);
+
+       if (pkt->xt.hotdrop) {
+               data[NFT_REG_VERDICT].verdict = NF_DROP;
+               return;
+       }
+
+       switch(ret) {
+       case true:
+               data[NFT_REG_VERDICT].verdict = NFT_CONTINUE;
+               break;
+       case false:
+               data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+               break;
+       }
+}
+
+static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
+       [NFTA_MATCH_NAME]       = { .type = NLA_NUL_STRING },
+       [NFTA_MATCH_REV]        = { .type = NLA_U32 },
+       [NFTA_MATCH_INFO]       = { .type = NLA_BINARY },
+};
+
+/* struct xt_mtchk_param and xt_tgchk_param look very similar */
+static void
+nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
+                         struct xt_match *match, void *info,
+                         union nft_entry *entry, u8 proto, bool inv)
+{
+       par->net        = &init_net;
+       par->table      = ctx->table->name;
+       switch (ctx->afi->family) {
+       case AF_INET:
+               entry->e4.ip.proto = proto;
+               entry->e4.ip.invflags = inv ? IPT_INV_PROTO : 0;
+               break;
+       case AF_INET6:
+               entry->e6.ipv6.proto = proto;
+               entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0;
+               break;
+       }
+       par->entryinfo  = entry;
+       par->match      = match;
+       par->matchinfo  = info;
+       if (ctx->chain->flags & NFT_BASE_CHAIN) {
+               const struct nft_base_chain *basechain =
+                                               nft_base_chain(ctx->chain);
+               const struct nf_hook_ops *ops = &basechain->ops;
+
+               par->hook_mask = 1 << ops->hooknum;
+       }
+       par->family     = ctx->afi->family;
+}
+
+static void match_compat_from_user(struct xt_match *m, void *in, void *out)
+{
+#ifdef CONFIG_COMPAT
+       if (m->compat_from_user) {
+               int pad;
+
+               m->compat_from_user(out, in);
+               pad = XT_ALIGN(m->matchsize) - m->matchsize;
+               if (pad > 0)
+                       memset(out + m->matchsize, 0, pad);
+       } else
+#endif
+               memcpy(out, in, XT_ALIGN(m->matchsize));
+}
+
+static int
+nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+               const struct nlattr * const tb[])
+{
+       void *info = nft_expr_priv(expr);
+       struct xt_match *match = expr->ops->data;
+       struct xt_mtchk_param par;
+       size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
+       u8 proto = 0;
+       bool inv = false;
+       union nft_entry e = {};
+       int ret;
+
+       match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info);
+
+       if (ctx->nla[NFTA_RULE_COMPAT])
+               proto = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &inv);
+
+       nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
+
+       ret = xt_check_match(&par, size, proto, inv);
+       if (ret < 0)
+               goto err;
+
+       return 0;
+err:
+       module_put(match->me);
+       return ret;
+}
+
+static void
+nft_match_destroy(const struct nft_expr *expr)
+{
+       struct xt_match *match = expr->ops->data;
+
+       module_put(match->me);
+}
+
+static int
+match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in)
+{
+       int ret;
+
+#ifdef CONFIG_COMPAT
+       if (m->compat_to_user) {
+               mm_segment_t old_fs;
+               void *out;
+
+               out = kmalloc(XT_ALIGN(m->matchsize), GFP_ATOMIC);
+               if (out == NULL)
+                       return -ENOMEM;
+
+               /* We want to reuse existing compat_to_user */
+               old_fs = get_fs();
+               set_fs(KERNEL_DS);
+               m->compat_to_user(out, in);
+               set_fs(old_fs);
+               ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out);
+               kfree(out);
+       } else
+#endif
+               ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), in);
+
+       return ret;
+}
+
+static inline int nft_compat_match_offset(struct xt_match *match)
+{
+#ifdef CONFIG_COMPAT
+       return xt_compat_match_offset(match);
+#else
+       return 0;
+#endif
+}
+
+static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       void *info = nft_expr_priv(expr);
+       struct xt_match *match = expr->ops->data;
+
+       if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) ||
+           nla_put_be32(skb, NFTA_MATCH_REV, htonl(match->revision)) ||
+           match_dump_info(skb, match, info))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static int nft_match_validate(const struct nft_ctx *ctx,
+                             const struct nft_expr *expr,
+                             const struct nft_data **data)
+{
+       struct xt_match *match = expr->ops->data;
+       unsigned int hook_mask = 0;
+
+       if (ctx->chain->flags & NFT_BASE_CHAIN) {
+               const struct nft_base_chain *basechain =
+                                               nft_base_chain(ctx->chain);
+               const struct nf_hook_ops *ops = &basechain->ops;
+
+               hook_mask = 1 << ops->hooknum;
+               if (hook_mask & match->hooks)
+                       return 0;
+
+               /* This match is being called from an invalid chain */
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int
+nfnl_compat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
+                     int event, u16 family, const char *name,
+                     int rev, int target)
+{
+       struct nlmsghdr *nlh;
+       struct nfgenmsg *nfmsg;
+       unsigned int flags = portid ? NLM_F_MULTI : 0;
+
+       event |= NFNL_SUBSYS_NFT_COMPAT << 8;
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
+       if (nlh == NULL)
+               goto nlmsg_failure;
+
+       nfmsg = nlmsg_data(nlh);
+       nfmsg->nfgen_family = family;
+       nfmsg->version = NFNETLINK_V0;
+       nfmsg->res_id = 0;
+
+       if (nla_put_string(skb, NFTA_COMPAT_NAME, name) ||
+           nla_put_be32(skb, NFTA_COMPAT_REV, htonl(rev)) ||
+           nla_put_be32(skb, NFTA_COMPAT_TYPE, htonl(target)))
+               goto nla_put_failure;
+
+       nlmsg_end(skb, nlh);
+       return skb->len;
+
+nlmsg_failure:
+nla_put_failure:
+       nlmsg_cancel(skb, nlh);
+       return -1;
+}
+
+static int
+nfnl_compat_get(struct sock *nfnl, struct sk_buff *skb,
+               const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+{
+       int ret = 0, target;
+       struct nfgenmsg *nfmsg;
+       const char *fmt;
+       const char *name;
+       u32 rev;
+       struct sk_buff *skb2;
+
+       if (tb[NFTA_COMPAT_NAME] == NULL ||
+           tb[NFTA_COMPAT_REV] == NULL ||
+           tb[NFTA_COMPAT_TYPE] == NULL)
+               return -EINVAL;
+
+       name = nla_data(tb[NFTA_COMPAT_NAME]);
+       rev = ntohl(nla_get_be32(tb[NFTA_COMPAT_REV]));
+       target = ntohl(nla_get_be32(tb[NFTA_COMPAT_TYPE]));
+
+       nfmsg = nlmsg_data(nlh);
+
+       switch(nfmsg->nfgen_family) {
+       case AF_INET:
+               fmt = "ipt_%s";
+               break;
+       case AF_INET6:
+               fmt = "ip6t_%s";
+               break;
+       default:
+               pr_err("nft_compat: unsupported protocol %d\n",
+                       nfmsg->nfgen_family);
+               return -EINVAL;
+       }
+
+       try_then_request_module(xt_find_revision(nfmsg->nfgen_family, name,
+                                                rev, target, &ret),
+                                                fmt, name);
+
+       if (ret < 0)
+               return ret;
+
+       skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (skb2 == NULL)
+               return -ENOMEM;
+
+       /* include the best revision for this extension in the message */
+       if (nfnl_compat_fill_info(skb2, NETLINK_CB(skb).portid,
+                                 nlh->nlmsg_seq,
+                                 NFNL_MSG_TYPE(nlh->nlmsg_type),
+                                 NFNL_MSG_COMPAT_GET,
+                                 nfmsg->nfgen_family,
+                                 name, ret, target) <= 0) {
+               kfree_skb(skb2);
+               return -ENOSPC;
+       }
+
+       ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
+                               MSG_DONTWAIT);
+       if (ret > 0)
+               ret = 0;
+
+       return ret == -EAGAIN ? -ENOBUFS : ret;
+}
+
+static const struct nla_policy nfnl_compat_policy_get[NFTA_COMPAT_MAX+1] = {
+       [NFTA_COMPAT_NAME]      = { .type = NLA_NUL_STRING,
+                                   .len = NFT_COMPAT_NAME_MAX-1 },
+       [NFTA_COMPAT_REV]       = { .type = NLA_U32 },
+       [NFTA_COMPAT_TYPE]      = { .type = NLA_U32 },
+};
+
+static const struct nfnl_callback nfnl_nft_compat_cb[NFNL_MSG_COMPAT_MAX] = {
+       [NFNL_MSG_COMPAT_GET]           = { .call = nfnl_compat_get,
+                                           .attr_count = NFTA_COMPAT_MAX,
+                                           .policy = nfnl_compat_policy_get },
+};
+
+static const struct nfnetlink_subsystem nfnl_compat_subsys = {
+       .name           = "nft-compat",
+       .subsys_id      = NFNL_SUBSYS_NFT_COMPAT,
+       .cb_count       = NFNL_MSG_COMPAT_MAX,
+       .cb             = nfnl_nft_compat_cb,
+};
+
+static LIST_HEAD(nft_match_list);
+
+struct nft_xt {
+       struct list_head        head;
+       struct nft_expr_ops     ops;
+};
+
+static struct nft_expr_type nft_match_type;
+
+static const struct nft_expr_ops *
+nft_match_select_ops(const struct nft_ctx *ctx,
+                    const struct nlattr * const tb[])
+{
+       struct nft_xt *nft_match;
+       struct xt_match *match;
+       char *mt_name;
+       __u32 rev, family;
+
+       if (tb[NFTA_MATCH_NAME] == NULL ||
+           tb[NFTA_MATCH_REV] == NULL ||
+           tb[NFTA_MATCH_INFO] == NULL)
+               return ERR_PTR(-EINVAL);
+
+       mt_name = nla_data(tb[NFTA_MATCH_NAME]);
+       rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV]));
+       family = ctx->afi->family;
+
+       /* Re-use the existing match if it's already loaded. */
+       list_for_each_entry(nft_match, &nft_match_list, head) {
+               struct xt_match *match = nft_match->ops.data;
+
+               if (strcmp(match->name, mt_name) == 0 &&
+                   match->revision == rev && match->family == family)
+                       return &nft_match->ops;
+       }
+
+       match = xt_request_find_match(family, mt_name, rev);
+       if (IS_ERR(match))
+               return ERR_PTR(-ENOENT);
+
+       /* This is the first time we use this match, allocate operations */
+       nft_match = kzalloc(sizeof(struct nft_xt), GFP_KERNEL);
+       if (nft_match == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       nft_match->ops.type = &nft_match_type;
+       nft_match->ops.size = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize) +
+                                           nft_compat_match_offset(match));
+       nft_match->ops.eval = nft_match_eval;
+       nft_match->ops.init = nft_match_init;
+       nft_match->ops.destroy = nft_match_destroy;
+       nft_match->ops.dump = nft_match_dump;
+       nft_match->ops.validate = nft_match_validate;
+       nft_match->ops.data = match;
+
+       list_add(&nft_match->head, &nft_match_list);
+
+       return &nft_match->ops;
+}
+
+static void nft_match_release(void)
+{
+       struct nft_xt *nft_match;
+
+       list_for_each_entry(nft_match, &nft_match_list, head)
+               kfree(nft_match);
+}
+
+static struct nft_expr_type nft_match_type __read_mostly = {
+       .name           = "match",
+       .select_ops     = nft_match_select_ops,
+       .policy         = nft_match_policy,
+       .maxattr        = NFTA_MATCH_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static LIST_HEAD(nft_target_list);
+
+static struct nft_expr_type nft_target_type;
+
+static const struct nft_expr_ops *
+nft_target_select_ops(const struct nft_ctx *ctx,
+                     const struct nlattr * const tb[])
+{
+       struct nft_xt *nft_target;
+       struct xt_target *target;
+       char *tg_name;
+       __u32 rev, family;
+
+       if (tb[NFTA_TARGET_NAME] == NULL ||
+           tb[NFTA_TARGET_REV] == NULL ||
+           tb[NFTA_TARGET_INFO] == NULL)
+               return ERR_PTR(-EINVAL);
+
+       tg_name = nla_data(tb[NFTA_TARGET_NAME]);
+       rev = ntohl(nla_get_be32(tb[NFTA_TARGET_REV]));
+       family = ctx->afi->family;
+
+       /* Re-use the existing target if it's already loaded. */
+       list_for_each_entry(nft_target, &nft_match_list, head) {
+               struct xt_target *target = nft_target->ops.data;
+
+               if (strcmp(target->name, tg_name) == 0 &&
+                   target->revision == rev && target->family == family)
+                       return &nft_target->ops;
+       }
+
+       target = xt_request_find_target(family, tg_name, rev);
+       if (IS_ERR(target))
+               return ERR_PTR(-ENOENT);
+
+       /* This is the first time we use this target, allocate operations */
+       nft_target = kzalloc(sizeof(struct nft_xt), GFP_KERNEL);
+       if (nft_target == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       nft_target->ops.type = &nft_target_type;
+       nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize) +
+                                            nft_compat_target_offset(target));
+       nft_target->ops.eval = nft_target_eval;
+       nft_target->ops.init = nft_target_init;
+       nft_target->ops.destroy = nft_target_destroy;
+       nft_target->ops.dump = nft_target_dump;
+       nft_target->ops.validate = nft_target_validate;
+       nft_target->ops.data = target;
+
+       list_add(&nft_target->head, &nft_target_list);
+
+       return &nft_target->ops;
+}
+
+static void nft_target_release(void)
+{
+       struct nft_xt *nft_target;
+
+       list_for_each_entry(nft_target, &nft_target_list, head)
+               kfree(nft_target);
+}
+
+static struct nft_expr_type nft_target_type __read_mostly = {
+       .name           = "target",
+       .select_ops     = nft_target_select_ops,
+       .policy         = nft_target_policy,
+       .maxattr        = NFTA_TARGET_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_compat_module_init(void)
+{
+       int ret;
+
+       ret = nft_register_expr(&nft_match_type);
+       if (ret < 0)
+               return ret;
+
+       ret = nft_register_expr(&nft_target_type);
+       if (ret < 0)
+               goto err_match;
+
+       ret = nfnetlink_subsys_register(&nfnl_compat_subsys);
+       if (ret < 0) {
+               pr_err("nft_compat: cannot register with nfnetlink.\n");
+               goto err_target;
+       }
+
+       pr_info("nf_tables_compat: (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>\n");
+
+       return ret;
+
+err_target:
+       nft_unregister_expr(&nft_target_type);
+err_match:
+       nft_unregister_expr(&nft_match_type);
+       return ret;
+}
+
+static void __exit nft_compat_module_exit(void)
+{
+       nfnetlink_subsys_unregister(&nfnl_compat_subsys);
+       nft_unregister_expr(&nft_target_type);
+       nft_unregister_expr(&nft_match_type);
+       nft_match_release();
+       nft_target_release();
+}
+
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT);
+
+module_init(nft_compat_module_init);
+module_exit(nft_compat_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_EXPR("match");
+MODULE_ALIAS_NFT_EXPR("target");
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
new file mode 100644 (file)
index 0000000..c89ee48
--- /dev/null
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/seqlock.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_counter {
+       seqlock_t       lock;
+       u64             bytes;
+       u64             packets;
+};
+
+static void nft_counter_eval(const struct nft_expr *expr,
+                            struct nft_data data[NFT_REG_MAX + 1],
+                            const struct nft_pktinfo *pkt)
+{
+       struct nft_counter *priv = nft_expr_priv(expr);
+
+       write_seqlock_bh(&priv->lock);
+       priv->bytes += pkt->skb->len;
+       priv->packets++;
+       write_sequnlock_bh(&priv->lock);
+}
+
+static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       struct nft_counter *priv = nft_expr_priv(expr);
+       unsigned int seq;
+       u64 bytes;
+       u64 packets;
+
+       do {
+               seq = read_seqbegin(&priv->lock);
+               bytes   = priv->bytes;
+               packets = priv->packets;
+       } while (read_seqretry(&priv->lock, seq));
+
+       if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(bytes)))
+               goto nla_put_failure;
+       if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(packets)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static const struct nla_policy nft_counter_policy[NFTA_COUNTER_MAX + 1] = {
+       [NFTA_COUNTER_PACKETS]  = { .type = NLA_U64 },
+       [NFTA_COUNTER_BYTES]    = { .type = NLA_U64 },
+};
+
+static int nft_counter_init(const struct nft_ctx *ctx,
+                           const struct nft_expr *expr,
+                           const struct nlattr * const tb[])
+{
+       struct nft_counter *priv = nft_expr_priv(expr);
+
+       if (tb[NFTA_COUNTER_PACKETS])
+               priv->packets = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
+       if (tb[NFTA_COUNTER_BYTES])
+               priv->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
+
+       seqlock_init(&priv->lock);
+       return 0;
+}
+
+static struct nft_expr_type nft_counter_type;
+static const struct nft_expr_ops nft_counter_ops = {
+       .type           = &nft_counter_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_counter)),
+       .eval           = nft_counter_eval,
+       .init           = nft_counter_init,
+       .dump           = nft_counter_dump,
+};
+
+static struct nft_expr_type nft_counter_type __read_mostly = {
+       .name           = "counter",
+       .ops            = &nft_counter_ops,
+       .policy         = nft_counter_policy,
+       .maxattr        = NFTA_COUNTER_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_counter_module_init(void)
+{
+       return nft_register_expr(&nft_counter_type);
+}
+
+static void __exit nft_counter_module_exit(void)
+{
+       nft_unregister_expr(&nft_counter_type);
+}
+
+module_init(nft_counter_module_init);
+module_exit(nft_counter_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("counter");
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
new file mode 100644 (file)
index 0000000..955f4e6
--- /dev/null
@@ -0,0 +1,258 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+
+struct nft_ct {
+       enum nft_ct_keys        key:8;
+       enum ip_conntrack_dir   dir:8;
+       enum nft_registers      dreg:8;
+       uint8_t                 family;
+};
+
+static void nft_ct_eval(const struct nft_expr *expr,
+                       struct nft_data data[NFT_REG_MAX + 1],
+                       const struct nft_pktinfo *pkt)
+{
+       const struct nft_ct *priv = nft_expr_priv(expr);
+       struct nft_data *dest = &data[priv->dreg];
+       enum ip_conntrack_info ctinfo;
+       const struct nf_conn *ct;
+       const struct nf_conn_help *help;
+       const struct nf_conntrack_tuple *tuple;
+       const struct nf_conntrack_helper *helper;
+       long diff;
+       unsigned int state;
+
+       ct = nf_ct_get(pkt->skb, &ctinfo);
+
+       switch (priv->key) {
+       case NFT_CT_STATE:
+               if (ct == NULL)
+                       state = NF_CT_STATE_INVALID_BIT;
+               else if (nf_ct_is_untracked(ct))
+                       state = NF_CT_STATE_UNTRACKED_BIT;
+               else
+                       state = NF_CT_STATE_BIT(ctinfo);
+               dest->data[0] = state;
+               return;
+       }
+
+       if (ct == NULL)
+               goto err;
+
+       switch (priv->key) {
+       case NFT_CT_DIRECTION:
+               dest->data[0] = CTINFO2DIR(ctinfo);
+               return;
+       case NFT_CT_STATUS:
+               dest->data[0] = ct->status;
+               return;
+#ifdef CONFIG_NF_CONNTRACK_MARK
+       case NFT_CT_MARK:
+               dest->data[0] = ct->mark;
+               return;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+       case NFT_CT_SECMARK:
+               dest->data[0] = ct->secmark;
+               return;
+#endif
+       case NFT_CT_EXPIRATION:
+               diff = (long)jiffies - (long)ct->timeout.expires;
+               if (diff < 0)
+                       diff = 0;
+               dest->data[0] = jiffies_to_msecs(diff);
+               return;
+       case NFT_CT_HELPER:
+               if (ct->master == NULL)
+                       goto err;
+               help = nfct_help(ct->master);
+               if (help == NULL)
+                       goto err;
+               helper = rcu_dereference(help->helper);
+               if (helper == NULL)
+                       goto err;
+               if (strlen(helper->name) >= sizeof(dest->data))
+                       goto err;
+               strncpy((char *)dest->data, helper->name, sizeof(dest->data));
+               return;
+       }
+
+       tuple = &ct->tuplehash[priv->dir].tuple;
+       switch (priv->key) {
+       case NFT_CT_L3PROTOCOL:
+               dest->data[0] = nf_ct_l3num(ct);
+               return;
+       case NFT_CT_SRC:
+               memcpy(dest->data, tuple->src.u3.all,
+                      nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
+               return;
+       case NFT_CT_DST:
+               memcpy(dest->data, tuple->dst.u3.all,
+                      nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16);
+               return;
+       case NFT_CT_PROTOCOL:
+               dest->data[0] = nf_ct_protonum(ct);
+               return;
+       case NFT_CT_PROTO_SRC:
+               dest->data[0] = (__force __u16)tuple->src.u.all;
+               return;
+       case NFT_CT_PROTO_DST:
+               dest->data[0] = (__force __u16)tuple->dst.u.all;
+               return;
+       }
+       return;
+err:
+       data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_ct_policy[NFTA_CT_MAX + 1] = {
+       [NFTA_CT_DREG]          = { .type = NLA_U32 },
+       [NFTA_CT_KEY]           = { .type = NLA_U32 },
+       [NFTA_CT_DIRECTION]     = { .type = NLA_U8 },
+};
+
+static int nft_ct_init(const struct nft_ctx *ctx,
+                      const struct nft_expr *expr,
+                      const struct nlattr * const tb[])
+{
+       struct nft_ct *priv = nft_expr_priv(expr);
+       int err;
+
+       if (tb[NFTA_CT_DREG] == NULL ||
+           tb[NFTA_CT_KEY] == NULL)
+               return -EINVAL;
+
+       priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
+       if (tb[NFTA_CT_DIRECTION] != NULL) {
+               priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]);
+               switch (priv->dir) {
+               case IP_CT_DIR_ORIGINAL:
+               case IP_CT_DIR_REPLY:
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       switch (priv->key) {
+       case NFT_CT_STATE:
+       case NFT_CT_DIRECTION:
+       case NFT_CT_STATUS:
+#ifdef CONFIG_NF_CONNTRACK_MARK
+       case NFT_CT_MARK:
+#endif
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+       case NFT_CT_SECMARK:
+#endif
+       case NFT_CT_EXPIRATION:
+       case NFT_CT_HELPER:
+               if (tb[NFTA_CT_DIRECTION] != NULL)
+                       return -EINVAL;
+               break;
+       case NFT_CT_PROTOCOL:
+       case NFT_CT_SRC:
+       case NFT_CT_DST:
+       case NFT_CT_PROTO_SRC:
+       case NFT_CT_PROTO_DST:
+               if (tb[NFTA_CT_DIRECTION] == NULL)
+                       return -EINVAL;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       err = nf_ct_l3proto_try_module_get(ctx->afi->family);
+       if (err < 0)
+               return err;
+       priv->family = ctx->afi->family;
+
+       priv->dreg = ntohl(nla_get_be32(tb[NFTA_CT_DREG]));
+       err = nft_validate_output_register(priv->dreg);
+       if (err < 0)
+               goto err1;
+
+       err = nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+       if (err < 0)
+               goto err1;
+       return 0;
+
+err1:
+       nf_ct_l3proto_module_put(ctx->afi->family);
+       return err;
+}
+
+static void nft_ct_destroy(const struct nft_expr *expr)
+{
+       struct nft_ct *priv = nft_expr_priv(expr);
+
+       nf_ct_l3proto_module_put(priv->family);
+}
+
+static int nft_ct_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_ct *priv = nft_expr_priv(expr);
+
+       if (nla_put_be32(skb, NFTA_CT_DREG, htonl(priv->dreg)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key)))
+               goto nla_put_failure;
+       if (nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_ct_type;
+static const struct nft_expr_ops nft_ct_ops = {
+       .type           = &nft_ct_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
+       .eval           = nft_ct_eval,
+       .init           = nft_ct_init,
+       .destroy        = nft_ct_destroy,
+       .dump           = nft_ct_dump,
+};
+
+static struct nft_expr_type nft_ct_type __read_mostly = {
+       .name           = "ct",
+       .ops            = &nft_ct_ops,
+       .policy         = nft_ct_policy,
+       .maxattr        = NFTA_CT_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_ct_module_init(void)
+{
+       return nft_register_expr(&nft_ct_type);
+}
+
+static void __exit nft_ct_module_exit(void)
+{
+       nft_unregister_expr(&nft_ct_type);
+}
+
+module_init(nft_ct_module_init);
+module_exit(nft_ct_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("ct");
diff --git a/net/netfilter/nft_expr_template.c b/net/netfilter/nft_expr_template.c
new file mode 100644 (file)
index 0000000..b6eed4d
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_template {
+
+};
+
+static void nft_template_eval(const struct nft_expr *expr,
+                             struct nft_data data[NFT_REG_MAX + 1],
+                             const struct nft_pktinfo *pkt)
+{
+       struct nft_template *priv = nft_expr_priv(expr);
+
+}
+
+static const struct nla_policy nft_template_policy[NFTA_TEMPLATE_MAX + 1] = {
+       [NFTA_TEMPLATE_ATTR]            = { .type = NLA_U32 },
+};
+
+static int nft_template_init(const struct nft_ctx *ctx,
+                          const struct nft_expr *expr,
+                          const struct nlattr * const tb[])
+{
+       struct nft_template *priv = nft_expr_priv(expr);
+
+       return 0;
+}
+
+static void nft_template_destroy(const struct nft_ctx *ctx,
+                              const struct nft_expr *expr)
+{
+       struct nft_template *priv = nft_expr_priv(expr);
+
+}
+
+static int nft_template_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_template *priv = nft_expr_priv(expr);
+
+       NLA_PUT_BE32(skb, NFTA_TEMPLATE_ATTR, priv->field);
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_template_type;
+static const struct nft_expr_ops nft_template_ops = {
+       .type           = &nft_template_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_template)),
+       .eval           = nft_template_eval,
+       .init           = nft_template_init,
+       .destroy        = nft_template_destroy,
+       .dump           = nft_template_dump,
+};
+
+static struct nft_expr_type nft_template_type __read_mostly = {
+       .name           = "template",
+       .ops            = &nft_template_ops,
+       .policy         = nft_template_policy,
+       .maxattr        = NFTA_TEMPLATE_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_template_module_init(void)
+{
+       return nft_register_expr(&nft_template_type);
+}
+
+static void __exit nft_template_module_exit(void)
+{
+       nft_unregister_expr(&nft_template_type);
+}
+
+module_init(nft_template_module_init);
+module_exit(nft_template_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("template");
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
new file mode 100644 (file)
index 0000000..8e0bb75
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+// FIXME:
+#include <net/ipv6.h>
+
+struct nft_exthdr {
+       u8                      type;
+       u8                      offset;
+       u8                      len;
+       enum nft_registers      dreg:8;
+};
+
+static void nft_exthdr_eval(const struct nft_expr *expr,
+                           struct nft_data data[NFT_REG_MAX + 1],
+                           const struct nft_pktinfo *pkt)
+{
+       struct nft_exthdr *priv = nft_expr_priv(expr);
+       struct nft_data *dest = &data[priv->dreg];
+       unsigned int offset;
+       int err;
+
+       err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
+       if (err < 0)
+               goto err;
+       offset += priv->offset;
+
+       if (skb_copy_bits(pkt->skb, offset, dest->data, priv->len) < 0)
+               goto err;
+       return;
+err:
+       data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
+       [NFTA_EXTHDR_DREG]              = { .type = NLA_U32 },
+       [NFTA_EXTHDR_TYPE]              = { .type = NLA_U8 },
+       [NFTA_EXTHDR_OFFSET]            = { .type = NLA_U32 },
+       [NFTA_EXTHDR_LEN]               = { .type = NLA_U32 },
+};
+
+static int nft_exthdr_init(const struct nft_ctx *ctx,
+                          const struct nft_expr *expr,
+                          const struct nlattr * const tb[])
+{
+       struct nft_exthdr *priv = nft_expr_priv(expr);
+       int err;
+
+       if (tb[NFTA_EXTHDR_DREG] == NULL ||
+           tb[NFTA_EXTHDR_TYPE] == NULL ||
+           tb[NFTA_EXTHDR_OFFSET] == NULL ||
+           tb[NFTA_EXTHDR_LEN] == NULL)
+               return -EINVAL;
+
+       priv->type   = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
+       priv->offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET]));
+       priv->len    = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN]));
+       if (priv->len == 0 ||
+           priv->len > FIELD_SIZEOF(struct nft_data, data))
+               return -EINVAL;
+
+       priv->dreg = ntohl(nla_get_be32(tb[NFTA_EXTHDR_DREG]));
+       err = nft_validate_output_register(priv->dreg);
+       if (err < 0)
+               return err;
+       return nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+}
+
+static int nft_exthdr_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_exthdr *priv = nft_expr_priv(expr);
+
+       if (nla_put_be32(skb, NFTA_EXTHDR_DREG, htonl(priv->dreg)))
+               goto nla_put_failure;
+       if (nla_put_u8(skb, NFTA_EXTHDR_TYPE, priv->type))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_EXTHDR_OFFSET, htonl(priv->offset)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_EXTHDR_LEN, htonl(priv->len)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_exthdr_type;
+static const struct nft_expr_ops nft_exthdr_ops = {
+       .type           = &nft_exthdr_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
+       .eval           = nft_exthdr_eval,
+       .init           = nft_exthdr_init,
+       .dump           = nft_exthdr_dump,
+};
+
+static struct nft_expr_type nft_exthdr_type __read_mostly = {
+       .name           = "exthdr",
+       .ops            = &nft_exthdr_ops,
+       .policy         = nft_exthdr_policy,
+       .maxattr        = NFTA_EXTHDR_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_exthdr_module_init(void)
+{
+       return nft_register_expr(&nft_exthdr_type);
+}
+
+static void __exit nft_exthdr_module_exit(void)
+{
+       nft_unregister_expr(&nft_exthdr_type);
+}
+
+module_init(nft_exthdr_module_init);
+module_exit(nft_exthdr_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("exthdr");
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
new file mode 100644 (file)
index 0000000..3d3f8fc
--- /dev/null
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/jhash.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_hash {
+       struct hlist_head       *hash;
+       unsigned int            hsize;
+};
+
+struct nft_hash_elem {
+       struct hlist_node       hnode;
+       struct nft_data         key;
+       struct nft_data         data[];
+};
+
+static u32 nft_hash_rnd __read_mostly;
+static bool nft_hash_rnd_initted __read_mostly;
+
+static unsigned int nft_hash_data(const struct nft_data *data,
+                                 unsigned int hsize, unsigned int len)
+{
+       unsigned int h;
+
+       h = jhash(data->data, len, nft_hash_rnd);
+       return ((u64)h * hsize) >> 32;
+}
+
+static bool nft_hash_lookup(const struct nft_set *set,
+                           const struct nft_data *key,
+                           struct nft_data *data)
+{
+       const struct nft_hash *priv = nft_set_priv(set);
+       const struct nft_hash_elem *he;
+       unsigned int h;
+
+       h = nft_hash_data(key, priv->hsize, set->klen);
+       hlist_for_each_entry(he, &priv->hash[h], hnode) {
+               if (nft_data_cmp(&he->key, key, set->klen))
+                       continue;
+               if (set->flags & NFT_SET_MAP)
+                       nft_data_copy(data, he->data);
+               return true;
+       }
+       return false;
+}
+
+static void nft_hash_elem_destroy(const struct nft_set *set,
+                                 struct nft_hash_elem *he)
+{
+       nft_data_uninit(&he->key, NFT_DATA_VALUE);
+       if (set->flags & NFT_SET_MAP)
+               nft_data_uninit(he->data, set->dtype);
+       kfree(he);
+}
+
+static int nft_hash_insert(const struct nft_set *set,
+                          const struct nft_set_elem *elem)
+{
+       struct nft_hash *priv = nft_set_priv(set);
+       struct nft_hash_elem *he;
+       unsigned int size, h;
+
+       if (elem->flags != 0)
+               return -EINVAL;
+
+       size = sizeof(*he);
+       if (set->flags & NFT_SET_MAP)
+               size += sizeof(he->data[0]);
+
+       he = kzalloc(size, GFP_KERNEL);
+       if (he == NULL)
+               return -ENOMEM;
+
+       nft_data_copy(&he->key, &elem->key);
+       if (set->flags & NFT_SET_MAP)
+               nft_data_copy(he->data, &elem->data);
+
+       h = nft_hash_data(&he->key, priv->hsize, set->klen);
+       hlist_add_head_rcu(&he->hnode, &priv->hash[h]);
+       return 0;
+}
+
+static void nft_hash_remove(const struct nft_set *set,
+                           const struct nft_set_elem *elem)
+{
+       struct nft_hash_elem *he = elem->cookie;
+
+       hlist_del_rcu(&he->hnode);
+       kfree(he);
+}
+
+static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem)
+{
+       const struct nft_hash *priv = nft_set_priv(set);
+       struct nft_hash_elem *he;
+       unsigned int h;
+
+       h = nft_hash_data(&elem->key, priv->hsize, set->klen);
+       hlist_for_each_entry(he, &priv->hash[h], hnode) {
+               if (nft_data_cmp(&he->key, &elem->key, set->klen))
+                       continue;
+
+               elem->cookie = he;
+               elem->flags  = 0;
+               if (set->flags & NFT_SET_MAP)
+                       nft_data_copy(&elem->data, he->data);
+               return 0;
+       }
+       return -ENOENT;
+}
+
+static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
+                         struct nft_set_iter *iter)
+{
+       const struct nft_hash *priv = nft_set_priv(set);
+       const struct nft_hash_elem *he;
+       struct nft_set_elem elem;
+       unsigned int i;
+
+       for (i = 0; i < priv->hsize; i++) {
+               hlist_for_each_entry(he, &priv->hash[i], hnode) {
+                       if (iter->count < iter->skip)
+                               goto cont;
+
+                       memcpy(&elem.key, &he->key, sizeof(elem.key));
+                       if (set->flags & NFT_SET_MAP)
+                               memcpy(&elem.data, he->data, sizeof(elem.data));
+                       elem.flags = 0;
+
+                       iter->err = iter->fn(ctx, set, iter, &elem);
+                       if (iter->err < 0)
+                               return;
+cont:
+                       iter->count++;
+               }
+       }
+}
+
+static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
+{
+       return sizeof(struct nft_hash);
+}
+
+static int nft_hash_init(const struct nft_set *set,
+                        const struct nlattr * const tb[])
+{
+       struct nft_hash *priv = nft_set_priv(set);
+       unsigned int cnt, i;
+
+       if (unlikely(!nft_hash_rnd_initted)) {
+               get_random_bytes(&nft_hash_rnd, 4);
+               nft_hash_rnd_initted = true;
+       }
+
+       /* Aim for a load factor of 0.75 */
+       // FIXME: temporarily broken until we have set descriptions
+       cnt = 100;
+       cnt = cnt * 4 / 3;
+
+       priv->hash = kcalloc(cnt, sizeof(struct hlist_head), GFP_KERNEL);
+       if (priv->hash == NULL)
+               return -ENOMEM;
+       priv->hsize = cnt;
+
+       for (i = 0; i < cnt; i++)
+               INIT_HLIST_HEAD(&priv->hash[i]);
+
+       return 0;
+}
+
+static void nft_hash_destroy(const struct nft_set *set)
+{
+       const struct nft_hash *priv = nft_set_priv(set);
+       const struct hlist_node *next;
+       struct nft_hash_elem *elem;
+       unsigned int i;
+
+       for (i = 0; i < priv->hsize; i++) {
+               hlist_for_each_entry_safe(elem, next, &priv->hash[i], hnode) {
+                       hlist_del(&elem->hnode);
+                       nft_hash_elem_destroy(set, elem);
+               }
+       }
+       kfree(priv->hash);
+}
+
+static struct nft_set_ops nft_hash_ops __read_mostly = {
+       .privsize       = nft_hash_privsize,
+       .init           = nft_hash_init,
+       .destroy        = nft_hash_destroy,
+       .get            = nft_hash_get,
+       .insert         = nft_hash_insert,
+       .remove         = nft_hash_remove,
+       .lookup         = nft_hash_lookup,
+       .walk           = nft_hash_walk,
+       .features       = NFT_SET_MAP,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_hash_module_init(void)
+{
+       return nft_register_set(&nft_hash_ops);
+}
+
+static void __exit nft_hash_module_exit(void)
+{
+       nft_unregister_set(&nft_hash_ops);
+}
+
+module_init(nft_hash_module_init);
+module_exit(nft_hash_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_SET();
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
new file mode 100644 (file)
index 0000000..f169501
--- /dev/null
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_immediate_expr {
+       struct nft_data         data;
+       enum nft_registers      dreg:8;
+       u8                      dlen;
+};
+
+static void nft_immediate_eval(const struct nft_expr *expr,
+                              struct nft_data data[NFT_REG_MAX + 1],
+                              const struct nft_pktinfo *pkt)
+{
+       const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+
+       nft_data_copy(&data[priv->dreg], &priv->data);
+}
+
+static const struct nla_policy nft_immediate_policy[NFTA_IMMEDIATE_MAX + 1] = {
+       [NFTA_IMMEDIATE_DREG]   = { .type = NLA_U32 },
+       [NFTA_IMMEDIATE_DATA]   = { .type = NLA_NESTED },
+};
+
+static int nft_immediate_init(const struct nft_ctx *ctx,
+                             const struct nft_expr *expr,
+                             const struct nlattr * const tb[])
+{
+       struct nft_immediate_expr *priv = nft_expr_priv(expr);
+       struct nft_data_desc desc;
+       int err;
+
+       if (tb[NFTA_IMMEDIATE_DREG] == NULL ||
+           tb[NFTA_IMMEDIATE_DATA] == NULL)
+               return -EINVAL;
+
+       priv->dreg = ntohl(nla_get_be32(tb[NFTA_IMMEDIATE_DREG]));
+       err = nft_validate_output_register(priv->dreg);
+       if (err < 0)
+               return err;
+
+       err = nft_data_init(ctx, &priv->data, &desc, tb[NFTA_IMMEDIATE_DATA]);
+       if (err < 0)
+               return err;
+       priv->dlen = desc.len;
+
+       err = nft_validate_data_load(ctx, priv->dreg, &priv->data, desc.type);
+       if (err < 0)
+               goto err1;
+
+       return 0;
+
+err1:
+       nft_data_uninit(&priv->data, desc.type);
+       return err;
+}
+
+static void nft_immediate_destroy(const struct nft_expr *expr)
+{
+       const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+       return nft_data_uninit(&priv->data, nft_dreg_to_type(priv->dreg));
+}
+
+static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+
+       if (nla_put_be32(skb, NFTA_IMMEDIATE_DREG, htonl(priv->dreg)))
+               goto nla_put_failure;
+
+       return nft_data_dump(skb, NFTA_IMMEDIATE_DATA, &priv->data,
+                            nft_dreg_to_type(priv->dreg), priv->dlen);
+
+nla_put_failure:
+       return -1;
+}
+
+static int nft_immediate_validate(const struct nft_ctx *ctx,
+                                 const struct nft_expr *expr,
+                                 const struct nft_data **data)
+{
+       const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+
+       if (priv->dreg == NFT_REG_VERDICT)
+               *data = &priv->data;
+
+       return 0;
+}
+
+static struct nft_expr_type nft_imm_type;
+static const struct nft_expr_ops nft_imm_ops = {
+       .type           = &nft_imm_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)),
+       .eval           = nft_immediate_eval,
+       .init           = nft_immediate_init,
+       .destroy        = nft_immediate_destroy,
+       .dump           = nft_immediate_dump,
+       .validate       = nft_immediate_validate,
+};
+
+static struct nft_expr_type nft_imm_type __read_mostly = {
+       .name           = "immediate",
+       .ops            = &nft_imm_ops,
+       .policy         = nft_immediate_policy,
+       .maxattr        = NFTA_IMMEDIATE_MAX,
+       .owner          = THIS_MODULE,
+};
+
+int __init nft_immediate_module_init(void)
+{
+       return nft_register_expr(&nft_imm_type);
+}
+
+void nft_immediate_module_exit(void)
+{
+       nft_unregister_expr(&nft_imm_type);
+}
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
new file mode 100644 (file)
index 0000000..85da5bd
--- /dev/null
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+static DEFINE_SPINLOCK(limit_lock);
+
+struct nft_limit {
+       u64             tokens;
+       u64             rate;
+       u64             unit;
+       unsigned long   stamp;
+};
+
+static void nft_limit_eval(const struct nft_expr *expr,
+                          struct nft_data data[NFT_REG_MAX + 1],
+                          const struct nft_pktinfo *pkt)
+{
+       struct nft_limit *priv = nft_expr_priv(expr);
+
+       spin_lock_bh(&limit_lock);
+       if (time_after_eq(jiffies, priv->stamp)) {
+               priv->tokens = priv->rate;
+               priv->stamp = jiffies + priv->unit * HZ;
+       }
+
+       if (priv->tokens >= 1) {
+               priv->tokens--;
+               spin_unlock_bh(&limit_lock);
+               return;
+       }
+       spin_unlock_bh(&limit_lock);
+
+       data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_limit_policy[NFTA_LIMIT_MAX + 1] = {
+       [NFTA_LIMIT_RATE]       = { .type = NLA_U64 },
+       [NFTA_LIMIT_UNIT]       = { .type = NLA_U64 },
+};
+
+static int nft_limit_init(const struct nft_ctx *ctx,
+                         const struct nft_expr *expr,
+                         const struct nlattr * const tb[])
+{
+       struct nft_limit *priv = nft_expr_priv(expr);
+
+       if (tb[NFTA_LIMIT_RATE] == NULL ||
+           tb[NFTA_LIMIT_UNIT] == NULL)
+               return -EINVAL;
+
+       priv->rate   = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
+       priv->unit   = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
+       priv->stamp  = jiffies + priv->unit * HZ;
+       priv->tokens = priv->rate;
+       return 0;
+}
+
+static int nft_limit_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_limit *priv = nft_expr_priv(expr);
+
+       if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(priv->rate)))
+               goto nla_put_failure;
+       if (nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(priv->unit)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_limit_type;
+static const struct nft_expr_ops nft_limit_ops = {
+       .type           = &nft_limit_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_limit)),
+       .eval           = nft_limit_eval,
+       .init           = nft_limit_init,
+       .dump           = nft_limit_dump,
+};
+
+static struct nft_expr_type nft_limit_type __read_mostly = {
+       .name           = "limit",
+       .ops            = &nft_limit_ops,
+       .policy         = nft_limit_policy,
+       .maxattr        = NFTA_LIMIT_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_limit_module_init(void)
+{
+       return nft_register_expr(&nft_limit_type);
+}
+
+static void __exit nft_limit_module_exit(void)
+{
+       nft_unregister_expr(&nft_limit_type);
+}
+
+module_init(nft_limit_module_init);
+module_exit(nft_limit_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("limit");
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
new file mode 100644 (file)
index 0000000..57cad07
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_log.h>
+#include <linux/netdevice.h>
+
+static const char *nft_log_null_prefix = "";
+
+struct nft_log {
+       struct nf_loginfo       loginfo;
+       char                    *prefix;
+       int                     family;
+};
+
+static void nft_log_eval(const struct nft_expr *expr,
+                        struct nft_data data[NFT_REG_MAX + 1],
+                        const struct nft_pktinfo *pkt)
+{
+       const struct nft_log *priv = nft_expr_priv(expr);
+       struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
+
+       nf_log_packet(net, priv->family, pkt->hooknum, pkt->skb, pkt->in,
+                     pkt->out, &priv->loginfo, "%s", priv->prefix);
+}
+
+static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
+       [NFTA_LOG_GROUP]        = { .type = NLA_U16 },
+       [NFTA_LOG_PREFIX]       = { .type = NLA_STRING },
+       [NFTA_LOG_SNAPLEN]      = { .type = NLA_U32 },
+       [NFTA_LOG_QTHRESHOLD]   = { .type = NLA_U16 },
+};
+
+static int nft_log_init(const struct nft_ctx *ctx,
+                       const struct nft_expr *expr,
+                       const struct nlattr * const tb[])
+{
+       struct nft_log *priv = nft_expr_priv(expr);
+       struct nf_loginfo *li = &priv->loginfo;
+       const struct nlattr *nla;
+
+       priv->family = ctx->afi->family;
+
+       nla = tb[NFTA_LOG_PREFIX];
+       if (nla != NULL) {
+               priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL);
+               if (priv->prefix == NULL)
+                       return -ENOMEM;
+               nla_strlcpy(priv->prefix, nla, nla_len(nla) + 1);
+       } else
+               priv->prefix = (char *)nft_log_null_prefix;
+
+       li->type = NF_LOG_TYPE_ULOG;
+       if (tb[NFTA_LOG_GROUP] != NULL)
+               li->u.ulog.group = ntohs(nla_get_be16(tb[NFTA_LOG_GROUP]));
+
+       if (tb[NFTA_LOG_SNAPLEN] != NULL)
+               li->u.ulog.copy_len = ntohl(nla_get_be32(tb[NFTA_LOG_SNAPLEN]));
+       if (tb[NFTA_LOG_QTHRESHOLD] != NULL) {
+               li->u.ulog.qthreshold =
+                       ntohs(nla_get_be16(tb[NFTA_LOG_QTHRESHOLD]));
+       }
+
+       return 0;
+}
+
+static void nft_log_destroy(const struct nft_expr *expr)
+{
+       struct nft_log *priv = nft_expr_priv(expr);
+
+       if (priv->prefix != nft_log_null_prefix)
+               kfree(priv->prefix);
+}
+
+static int nft_log_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_log *priv = nft_expr_priv(expr);
+       const struct nf_loginfo *li = &priv->loginfo;
+
+       if (priv->prefix != nft_log_null_prefix)
+               if (nla_put_string(skb, NFTA_LOG_PREFIX, priv->prefix))
+                       goto nla_put_failure;
+       if (li->u.ulog.group)
+               if (nla_put_be16(skb, NFTA_LOG_GROUP, htons(li->u.ulog.group)))
+                       goto nla_put_failure;
+       if (li->u.ulog.copy_len)
+               if (nla_put_be32(skb, NFTA_LOG_SNAPLEN,
+                                htonl(li->u.ulog.copy_len)))
+                       goto nla_put_failure;
+       if (li->u.ulog.qthreshold)
+               if (nla_put_be16(skb, NFTA_LOG_QTHRESHOLD,
+                                htons(li->u.ulog.qthreshold)))
+                       goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_log_type;
+static const struct nft_expr_ops nft_log_ops = {
+       .type           = &nft_log_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_log)),
+       .eval           = nft_log_eval,
+       .init           = nft_log_init,
+       .destroy        = nft_log_destroy,
+       .dump           = nft_log_dump,
+};
+
+static struct nft_expr_type nft_log_type __read_mostly = {
+       .name           = "log",
+       .ops            = &nft_log_ops,
+       .policy         = nft_log_policy,
+       .maxattr        = NFTA_LOG_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_log_module_init(void)
+{
+       return nft_register_expr(&nft_log_type);
+}
+
+static void __exit nft_log_module_exit(void)
+{
+       nft_unregister_expr(&nft_log_type);
+}
+
+module_init(nft_log_module_init);
+module_exit(nft_log_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("log");
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
new file mode 100644 (file)
index 0000000..8a6116b
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_lookup {
+       struct nft_set                  *set;
+       enum nft_registers              sreg:8;
+       enum nft_registers              dreg:8;
+       struct nft_set_binding          binding;
+};
+
+static void nft_lookup_eval(const struct nft_expr *expr,
+                           struct nft_data data[NFT_REG_MAX + 1],
+                           const struct nft_pktinfo *pkt)
+{
+       const struct nft_lookup *priv = nft_expr_priv(expr);
+       const struct nft_set *set = priv->set;
+
+       if (set->ops->lookup(set, &data[priv->sreg], &data[priv->dreg]))
+               return;
+       data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
+       [NFTA_LOOKUP_SET]       = { .type = NLA_STRING },
+       [NFTA_LOOKUP_SREG]      = { .type = NLA_U32 },
+       [NFTA_LOOKUP_DREG]      = { .type = NLA_U32 },
+};
+
+static int nft_lookup_init(const struct nft_ctx *ctx,
+                          const struct nft_expr *expr,
+                          const struct nlattr * const tb[])
+{
+       struct nft_lookup *priv = nft_expr_priv(expr);
+       struct nft_set *set;
+       int err;
+
+       if (tb[NFTA_LOOKUP_SET] == NULL ||
+           tb[NFTA_LOOKUP_SREG] == NULL)
+               return -EINVAL;
+
+       set = nf_tables_set_lookup(ctx->table, tb[NFTA_LOOKUP_SET]);
+       if (IS_ERR(set))
+               return PTR_ERR(set);
+
+       priv->sreg = ntohl(nla_get_be32(tb[NFTA_LOOKUP_SREG]));
+       err = nft_validate_input_register(priv->sreg);
+       if (err < 0)
+               return err;
+
+       if (tb[NFTA_LOOKUP_DREG] != NULL) {
+               if (!(set->flags & NFT_SET_MAP))
+                       return -EINVAL;
+
+               priv->dreg = ntohl(nla_get_be32(tb[NFTA_LOOKUP_DREG]));
+               err = nft_validate_output_register(priv->dreg);
+               if (err < 0)
+                       return err;
+
+               if (priv->dreg == NFT_REG_VERDICT) {
+                       if (set->dtype != NFT_DATA_VERDICT)
+                               return -EINVAL;
+               } else if (set->dtype == NFT_DATA_VERDICT)
+                       return -EINVAL;
+       } else if (set->flags & NFT_SET_MAP)
+               return -EINVAL;
+
+       err = nf_tables_bind_set(ctx, set, &priv->binding);
+       if (err < 0)
+               return err;
+
+       priv->set = set;
+       return 0;
+}
+
+static void nft_lookup_destroy(const struct nft_expr *expr)
+{
+       struct nft_lookup *priv = nft_expr_priv(expr);
+
+       nf_tables_unbind_set(NULL, priv->set, &priv->binding);
+}
+
+static int nft_lookup_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_lookup *priv = nft_expr_priv(expr);
+
+       if (nla_put_string(skb, NFTA_LOOKUP_SET, priv->set->name))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_LOOKUP_SREG, htonl(priv->sreg)))
+               goto nla_put_failure;
+       if (priv->set->flags & NFT_SET_MAP)
+               if (nla_put_be32(skb, NFTA_LOOKUP_DREG, htonl(priv->dreg)))
+                       goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_lookup_type;
+static const struct nft_expr_ops nft_lookup_ops = {
+       .type           = &nft_lookup_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
+       .eval           = nft_lookup_eval,
+       .init           = nft_lookup_init,
+       .destroy        = nft_lookup_destroy,
+       .dump           = nft_lookup_dump,
+};
+
+static struct nft_expr_type nft_lookup_type __read_mostly = {
+       .name           = "lookup",
+       .ops            = &nft_lookup_ops,
+       .policy         = nft_lookup_policy,
+       .maxattr        = NFTA_LOOKUP_MAX,
+       .owner          = THIS_MODULE,
+};
+
+int __init nft_lookup_module_init(void)
+{
+       return nft_register_expr(&nft_lookup_type);
+}
+
+void nft_lookup_module_exit(void)
+{
+       nft_unregister_expr(&nft_lookup_type);
+}
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
new file mode 100644 (file)
index 0000000..8c28220
--- /dev/null
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/dst.h>
+#include <net/sock.h>
+#include <net/tcp_states.h> /* for TCP_TIME_WAIT */
+#include <net/netfilter/nf_tables.h>
+
+struct nft_meta {
+       enum nft_meta_keys      key:8;
+       enum nft_registers      dreg:8;
+};
+
+static void nft_meta_eval(const struct nft_expr *expr,
+                         struct nft_data data[NFT_REG_MAX + 1],
+                         const struct nft_pktinfo *pkt)
+{
+       const struct nft_meta *priv = nft_expr_priv(expr);
+       const struct sk_buff *skb = pkt->skb;
+       const struct net_device *in = pkt->in, *out = pkt->out;
+       struct nft_data *dest = &data[priv->dreg];
+
+       switch (priv->key) {
+       case NFT_META_LEN:
+               dest->data[0] = skb->len;
+               break;
+       case NFT_META_PROTOCOL:
+               *(__be16 *)dest->data = skb->protocol;
+               break;
+       case NFT_META_PRIORITY:
+               dest->data[0] = skb->priority;
+               break;
+       case NFT_META_MARK:
+               dest->data[0] = skb->mark;
+               break;
+       case NFT_META_IIF:
+               if (in == NULL)
+                       goto err;
+               dest->data[0] = in->ifindex;
+               break;
+       case NFT_META_OIF:
+               if (out == NULL)
+                       goto err;
+               dest->data[0] = out->ifindex;
+               break;
+       case NFT_META_IIFNAME:
+               if (in == NULL)
+                       goto err;
+               strncpy((char *)dest->data, in->name, sizeof(dest->data));
+               break;
+       case NFT_META_OIFNAME:
+               if (out == NULL)
+                       goto err;
+               strncpy((char *)dest->data, out->name, sizeof(dest->data));
+               break;
+       case NFT_META_IIFTYPE:
+               if (in == NULL)
+                       goto err;
+               *(u16 *)dest->data = in->type;
+               break;
+       case NFT_META_OIFTYPE:
+               if (out == NULL)
+                       goto err;
+               *(u16 *)dest->data = out->type;
+               break;
+       case NFT_META_SKUID:
+               if (skb->sk == NULL || skb->sk->sk_state == TCP_TIME_WAIT)
+                       goto err;
+
+               read_lock_bh(&skb->sk->sk_callback_lock);
+               if (skb->sk->sk_socket == NULL ||
+                   skb->sk->sk_socket->file == NULL) {
+                       read_unlock_bh(&skb->sk->sk_callback_lock);
+                       goto err;
+               }
+
+               dest->data[0] =
+                       from_kuid_munged(&init_user_ns,
+                               skb->sk->sk_socket->file->f_cred->fsuid);
+               read_unlock_bh(&skb->sk->sk_callback_lock);
+               break;
+       case NFT_META_SKGID:
+               if (skb->sk == NULL || skb->sk->sk_state == TCP_TIME_WAIT)
+                       goto err;
+
+               read_lock_bh(&skb->sk->sk_callback_lock);
+               if (skb->sk->sk_socket == NULL ||
+                   skb->sk->sk_socket->file == NULL) {
+                       read_unlock_bh(&skb->sk->sk_callback_lock);
+                       goto err;
+               }
+               dest->data[0] =
+                       from_kgid_munged(&init_user_ns,
+                                skb->sk->sk_socket->file->f_cred->fsgid);
+               read_unlock_bh(&skb->sk->sk_callback_lock);
+               break;
+#ifdef CONFIG_NET_CLS_ROUTE
+       case NFT_META_RTCLASSID: {
+               const struct dst_entry *dst = skb_dst(skb);
+
+               if (dst == NULL)
+                       goto err;
+               dest->data[0] = dst->tclassid;
+               break;
+       }
+#endif
+#ifdef CONFIG_NETWORK_SECMARK
+       case NFT_META_SECMARK:
+               dest->data[0] = skb->secmark;
+               break;
+#endif
+       default:
+               WARN_ON(1);
+               goto err;
+       }
+       return;
+
+err:
+       data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = {
+       [NFTA_META_DREG]        = { .type = NLA_U32 },
+       [NFTA_META_KEY]         = { .type = NLA_U32 },
+};
+
+static int nft_meta_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+                        const struct nlattr * const tb[])
+{
+       struct nft_meta *priv = nft_expr_priv(expr);
+       int err;
+
+       if (tb[NFTA_META_DREG] == NULL ||
+           tb[NFTA_META_KEY] == NULL)
+               return -EINVAL;
+
+       priv->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
+       switch (priv->key) {
+       case NFT_META_LEN:
+       case NFT_META_PROTOCOL:
+       case NFT_META_PRIORITY:
+       case NFT_META_MARK:
+       case NFT_META_IIF:
+       case NFT_META_OIF:
+       case NFT_META_IIFNAME:
+       case NFT_META_OIFNAME:
+       case NFT_META_IIFTYPE:
+       case NFT_META_OIFTYPE:
+       case NFT_META_SKUID:
+       case NFT_META_SKGID:
+#ifdef CONFIG_NET_CLS_ROUTE
+       case NFT_META_RTCLASSID:
+#endif
+#ifdef CONFIG_NETWORK_SECMARK
+       case NFT_META_SECMARK:
+#endif
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       priv->dreg = ntohl(nla_get_be32(tb[NFTA_META_DREG]));
+       err = nft_validate_output_register(priv->dreg);
+       if (err < 0)
+               return err;
+       return nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+}
+
+static int nft_meta_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_meta *priv = nft_expr_priv(expr);
+
+       if (nla_put_be32(skb, NFTA_META_DREG, htonl(priv->dreg)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_META_KEY, htonl(priv->key)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_meta_type;
+static const struct nft_expr_ops nft_meta_ops = {
+       .type           = &nft_meta_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
+       .eval           = nft_meta_eval,
+       .init           = nft_meta_init,
+       .dump           = nft_meta_dump,
+};
+
+static struct nft_expr_type nft_meta_type __read_mostly = {
+       .name           = "meta",
+       .ops            = &nft_meta_ops,
+       .policy         = nft_meta_policy,
+       .maxattr        = NFTA_META_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_meta_module_init(void)
+{
+       return nft_register_expr(&nft_meta_type);
+}
+
+static void __exit nft_meta_module_exit(void)
+{
+       nft_unregister_expr(&nft_meta_type);
+}
+
+module_init(nft_meta_module_init);
+module_exit(nft_meta_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("meta");
diff --git a/net/netfilter/nft_meta_target.c b/net/netfilter/nft_meta_target.c
new file mode 100644 (file)
index 0000000..71177df
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_meta {
+       enum nft_meta_keys      key;
+};
+
+static void nft_meta_eval(const struct nft_expr *expr,
+                         struct nft_data *nfres,
+                         struct nft_data *data,
+                         const struct nft_pktinfo *pkt)
+{
+       const struct nft_meta *meta = nft_expr_priv(expr);
+       struct sk_buff *skb = pkt->skb;
+       u32 val = data->data[0];
+
+       switch (meta->key) {
+       case NFT_META_MARK:
+               skb->mark = val;
+               break;
+       case NFT_META_PRIORITY:
+               skb->priority = val;
+               break;
+       case NFT_META_NFTRACE:
+               skb->nf_trace = val;
+               break;
+#ifdef CONFIG_NETWORK_SECMARK
+       case NFT_META_SECMARK:
+               skb->secmark = val;
+               break;
+#endif
+       default:
+               WARN_ON(1);
+       }
+}
+
+static const struct nla_policy nft_meta_policy[NFTA_META_MAX + 1] = {
+       [NFTA_META_KEY]         = { .type = NLA_U32 },
+};
+
+static int nft_meta_init(const struct nft_expr *expr, struct nlattr *tb[])
+{
+       struct nft_meta *meta = nft_expr_priv(expr);
+
+       if (tb[NFTA_META_KEY] == NULL)
+               return -EINVAL;
+
+       meta->key = ntohl(nla_get_be32(tb[NFTA_META_KEY]));
+       switch (meta->key) {
+       case NFT_META_MARK:
+       case NFT_META_PRIORITY:
+       case NFT_META_NFTRACE:
+#ifdef CONFIG_NETWORK_SECMARK
+       case NFT_META_SECMARK:
+#endif
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int nft_meta_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       struct nft_meta *meta = nft_expr_priv(expr);
+
+       NLA_PUT_BE32(skb, NFTA_META_KEY, htonl(meta->key));
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_ops meta_target __read_mostly = {
+       .name           = "meta",
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
+       .owner          = THIS_MODULE,
+       .eval           = nft_meta_eval,
+       .init           = nft_meta_init,
+       .dump           = nft_meta_dump,
+       .policy         = nft_meta_policy,
+       .maxattr        = NFTA_META_MAX,
+};
+
+static int __init nft_meta_target_init(void)
+{
+       return nft_register_expr(&meta_target);
+}
+
+static void __exit nft_meta_target_exit(void)
+{
+       nft_unregister_expr(&meta_target);
+}
+
+module_init(nft_meta_target_init);
+module_exit(nft_meta_target_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_EXPR("meta");
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
new file mode 100644 (file)
index 0000000..b0b87b2
--- /dev/null
@@ -0,0 +1,220 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
+ * Copyright (c) 2012 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/string.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/ip.h>
+
+struct nft_nat {
+       enum nft_registers      sreg_addr_min:8;
+       enum nft_registers      sreg_addr_max:8;
+       enum nft_registers      sreg_proto_min:8;
+       enum nft_registers      sreg_proto_max:8;
+       int                     family;
+       enum nf_nat_manip_type  type;
+};
+
+static void nft_nat_eval(const struct nft_expr *expr,
+                        struct nft_data data[NFT_REG_MAX + 1],
+                        const struct nft_pktinfo *pkt)
+{
+       const struct nft_nat *priv = nft_expr_priv(expr);
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct = nf_ct_get(pkt->skb, &ctinfo);
+       struct nf_nat_range range;
+
+       memset(&range, 0, sizeof(range));
+       if (priv->sreg_addr_min) {
+               if (priv->family == AF_INET) {
+                       range.min_addr.ip = data[priv->sreg_addr_min].data[0];
+                       range.max_addr.ip = data[priv->sreg_addr_max].data[0];
+
+               } else {
+                       memcpy(range.min_addr.ip6,
+                              data[priv->sreg_addr_min].data,
+                              sizeof(struct nft_data));
+                       memcpy(range.max_addr.ip6,
+                              data[priv->sreg_addr_max].data,
+                              sizeof(struct nft_data));
+               }
+               range.flags |= NF_NAT_RANGE_MAP_IPS;
+       }
+
+       if (priv->sreg_proto_min) {
+               range.min_proto.all = data[priv->sreg_proto_min].data[0];
+               range.max_proto.all = data[priv->sreg_proto_max].data[0];
+               range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+       }
+
+       data[NFT_REG_VERDICT].verdict =
+               nf_nat_setup_info(ct, &range, priv->type);
+}
+
+static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = {
+       [NFTA_NAT_TYPE]          = { .type = NLA_U32 },
+       [NFTA_NAT_FAMILY]        = { .type = NLA_U32 },
+       [NFTA_NAT_REG_ADDR_MIN]  = { .type = NLA_U32 },
+       [NFTA_NAT_REG_ADDR_MAX]  = { .type = NLA_U32 },
+       [NFTA_NAT_REG_PROTO_MIN] = { .type = NLA_U32 },
+       [NFTA_NAT_REG_PROTO_MAX] = { .type = NLA_U32 },
+};
+
+static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
+                       const struct nlattr * const tb[])
+{
+       struct nft_nat *priv = nft_expr_priv(expr);
+       int err;
+
+       if (tb[NFTA_NAT_TYPE] == NULL)
+               return -EINVAL;
+
+       switch (ntohl(nla_get_be32(tb[NFTA_NAT_TYPE]))) {
+       case NFT_NAT_SNAT:
+               priv->type = NF_NAT_MANIP_SRC;
+               break;
+       case NFT_NAT_DNAT:
+               priv->type = NF_NAT_MANIP_DST;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (tb[NFTA_NAT_FAMILY] == NULL)
+               return -EINVAL;
+
+       priv->family = ntohl(nla_get_be32(tb[NFTA_NAT_FAMILY]));
+       if (priv->family != AF_INET && priv->family != AF_INET6)
+               return -EINVAL;
+
+       if (tb[NFTA_NAT_REG_ADDR_MIN]) {
+               priv->sreg_addr_min = ntohl(nla_get_be32(
+                                               tb[NFTA_NAT_REG_ADDR_MIN]));
+               err = nft_validate_input_register(priv->sreg_addr_min);
+               if (err < 0)
+                       return err;
+       }
+
+       if (tb[NFTA_NAT_REG_ADDR_MAX]) {
+               priv->sreg_addr_max = ntohl(nla_get_be32(
+                                               tb[NFTA_NAT_REG_ADDR_MAX]));
+               err = nft_validate_input_register(priv->sreg_addr_max);
+               if (err < 0)
+                       return err;
+       } else
+               priv->sreg_addr_max = priv->sreg_addr_min;
+
+       if (tb[NFTA_NAT_REG_PROTO_MIN]) {
+               priv->sreg_proto_min = ntohl(nla_get_be32(
+                                               tb[NFTA_NAT_REG_PROTO_MIN]));
+               err = nft_validate_input_register(priv->sreg_proto_min);
+               if (err < 0)
+                       return err;
+       }
+
+       if (tb[NFTA_NAT_REG_PROTO_MAX]) {
+               priv->sreg_proto_max = ntohl(nla_get_be32(
+                                               tb[NFTA_NAT_REG_PROTO_MAX]));
+               err = nft_validate_input_register(priv->sreg_proto_max);
+               if (err < 0)
+                       return err;
+       } else
+               priv->sreg_proto_max = priv->sreg_proto_min;
+
+       return 0;
+}
+
+static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_nat *priv = nft_expr_priv(expr);
+
+       switch (priv->type) {
+       case NF_NAT_MANIP_SRC:
+               if (nla_put_be32(skb, NFTA_NAT_TYPE, htonl(NFT_NAT_SNAT)))
+                       goto nla_put_failure;
+               break;
+       case NF_NAT_MANIP_DST:
+               if (nla_put_be32(skb, NFTA_NAT_TYPE, htonl(NFT_NAT_DNAT)))
+                       goto nla_put_failure;
+               break;
+       }
+
+       if (nla_put_be32(skb, NFTA_NAT_FAMILY, htonl(priv->family)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb,
+                        NFTA_NAT_REG_ADDR_MIN, htonl(priv->sreg_addr_min)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb,
+                        NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb,
+                        NFTA_NAT_REG_PROTO_MIN, htonl(priv->sreg_proto_min)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb,
+                        NFTA_NAT_REG_PROTO_MAX, htonl(priv->sreg_proto_max)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_nat_type;
+static const struct nft_expr_ops nft_nat_ops = {
+       .type           = &nft_nat_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_nat)),
+       .eval           = nft_nat_eval,
+       .init           = nft_nat_init,
+       .dump           = nft_nat_dump,
+};
+
+static struct nft_expr_type nft_nat_type __read_mostly = {
+       .name           = "nat",
+       .ops            = &nft_nat_ops,
+       .policy         = nft_nat_policy,
+       .maxattr        = NFTA_NAT_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_nat_module_init(void)
+{
+       int err;
+
+       err = nft_register_expr(&nft_nat_type);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static void __exit nft_nat_module_exit(void)
+{
+       nft_unregister_expr(&nft_nat_type);
+}
+
+module_init(nft_nat_module_init);
+module_exit(nft_nat_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
+MODULE_ALIAS_NFT_EXPR("nat");
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
new file mode 100644 (file)
index 0000000..a2aeb31
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
+#include <net/netfilter/nf_tables.h>
+
+static void nft_payload_eval(const struct nft_expr *expr,
+                            struct nft_data data[NFT_REG_MAX + 1],
+                            const struct nft_pktinfo *pkt)
+{
+       const struct nft_payload *priv = nft_expr_priv(expr);
+       const struct sk_buff *skb = pkt->skb;
+       struct nft_data *dest = &data[priv->dreg];
+       int offset;
+
+       switch (priv->base) {
+       case NFT_PAYLOAD_LL_HEADER:
+               if (!skb_mac_header_was_set(skb))
+                       goto err;
+               offset = skb_mac_header(skb) - skb->data;
+               break;
+       case NFT_PAYLOAD_NETWORK_HEADER:
+               offset = skb_network_offset(skb);
+               break;
+       case NFT_PAYLOAD_TRANSPORT_HEADER:
+               offset = pkt->xt.thoff;
+               break;
+       default:
+               BUG();
+       }
+       offset += priv->offset;
+
+       if (skb_copy_bits(skb, offset, dest->data, priv->len) < 0)
+               goto err;
+       return;
+err:
+       data[NFT_REG_VERDICT].verdict = NFT_BREAK;
+}
+
+static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
+       [NFTA_PAYLOAD_DREG]     = { .type = NLA_U32 },
+       [NFTA_PAYLOAD_BASE]     = { .type = NLA_U32 },
+       [NFTA_PAYLOAD_OFFSET]   = { .type = NLA_U32 },
+       [NFTA_PAYLOAD_LEN]      = { .type = NLA_U32 },
+};
+
+static int nft_payload_init(const struct nft_ctx *ctx,
+                           const struct nft_expr *expr,
+                           const struct nlattr * const tb[])
+{
+       struct nft_payload *priv = nft_expr_priv(expr);
+       int err;
+
+       priv->base   = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
+       priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
+       priv->len    = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
+
+       priv->dreg = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_DREG]));
+       err = nft_validate_output_register(priv->dreg);
+       if (err < 0)
+               return err;
+       return nft_validate_data_load(ctx, priv->dreg, NULL, NFT_DATA_VALUE);
+}
+
+static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+       const struct nft_payload *priv = nft_expr_priv(expr);
+
+       if (nla_put_be32(skb, NFTA_PAYLOAD_DREG, htonl(priv->dreg)) ||
+           nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
+           nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
+           nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
+               goto nla_put_failure;
+       return 0;
+
+nla_put_failure:
+       return -1;
+}
+
+static struct nft_expr_type nft_payload_type;
+static const struct nft_expr_ops nft_payload_ops = {
+       .type           = &nft_payload_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
+       .eval           = nft_payload_eval,
+       .init           = nft_payload_init,
+       .dump           = nft_payload_dump,
+};
+
+const struct nft_expr_ops nft_payload_fast_ops = {
+       .type           = &nft_payload_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
+       .eval           = nft_payload_eval,
+       .init           = nft_payload_init,
+       .dump           = nft_payload_dump,
+};
+
+static const struct nft_expr_ops *
+nft_payload_select_ops(const struct nft_ctx *ctx,
+                      const struct nlattr * const tb[])
+{
+       enum nft_payload_bases base;
+       unsigned int offset, len;
+
+       if (tb[NFTA_PAYLOAD_DREG] == NULL ||
+           tb[NFTA_PAYLOAD_BASE] == NULL ||
+           tb[NFTA_PAYLOAD_OFFSET] == NULL ||
+           tb[NFTA_PAYLOAD_LEN] == NULL)
+               return ERR_PTR(-EINVAL);
+
+       base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
+       switch (base) {
+       case NFT_PAYLOAD_LL_HEADER:
+       case NFT_PAYLOAD_NETWORK_HEADER:
+       case NFT_PAYLOAD_TRANSPORT_HEADER:
+               break;
+       default:
+               return ERR_PTR(-EOPNOTSUPP);
+       }
+
+       offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
+       len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
+       if (len == 0 || len > FIELD_SIZEOF(struct nft_data, data))
+               return ERR_PTR(-EINVAL);
+
+       if (len <= 4 && IS_ALIGNED(offset, len) && base != NFT_PAYLOAD_LL_HEADER)
+               return &nft_payload_fast_ops;
+       else
+               return &nft_payload_ops;
+}
+
+static struct nft_expr_type nft_payload_type __read_mostly = {
+       .name           = "payload",
+       .select_ops     = nft_payload_select_ops,
+       .policy         = nft_payload_policy,
+       .maxattr        = NFTA_PAYLOAD_MAX,
+       .owner          = THIS_MODULE,
+};
+
+int __init nft_payload_module_init(void)
+{
+       return nft_register_expr(&nft_payload_type);
+}
+
+void nft_payload_module_exit(void)
+{
+       nft_unregister_expr(&nft_payload_type);
+}
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
new file mode 100644 (file)
index 0000000..ca0c1b2
--- /dev/null
@@ -0,0 +1,247 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+struct nft_rbtree {
+       struct rb_root          root;
+};
+
+struct nft_rbtree_elem {
+       struct rb_node          node;
+       u16                     flags;
+       struct nft_data         key;
+       struct nft_data         data[];
+};
+
+static bool nft_rbtree_lookup(const struct nft_set *set,
+                             const struct nft_data *key,
+                             struct nft_data *data)
+{
+       const struct nft_rbtree *priv = nft_set_priv(set);
+       const struct nft_rbtree_elem *rbe, *interval = NULL;
+       const struct rb_node *parent = priv->root.rb_node;
+       int d;
+
+       while (parent != NULL) {
+               rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+
+               d = nft_data_cmp(&rbe->key, key, set->klen);
+               if (d < 0) {
+                       parent = parent->rb_left;
+                       interval = rbe;
+               } else if (d > 0)
+                       parent = parent->rb_right;
+               else {
+found:
+                       if (rbe->flags & NFT_SET_ELEM_INTERVAL_END)
+                               goto out;
+                       if (set->flags & NFT_SET_MAP)
+                               nft_data_copy(data, rbe->data);
+                       return true;
+               }
+       }
+
+       if (set->flags & NFT_SET_INTERVAL && interval != NULL) {
+               rbe = interval;
+               goto found;
+       }
+out:
+       return false;
+}
+
+static void nft_rbtree_elem_destroy(const struct nft_set *set,
+                                   struct nft_rbtree_elem *rbe)
+{
+       nft_data_uninit(&rbe->key, NFT_DATA_VALUE);
+       if (set->flags & NFT_SET_MAP)
+               nft_data_uninit(rbe->data, set->dtype);
+       kfree(rbe);
+}
+
+static int __nft_rbtree_insert(const struct nft_set *set,
+                              struct nft_rbtree_elem *new)
+{
+       struct nft_rbtree *priv = nft_set_priv(set);
+       struct nft_rbtree_elem *rbe;
+       struct rb_node *parent, **p;
+       int d;
+
+       parent = NULL;
+       p = &priv->root.rb_node;
+       while (*p != NULL) {
+               parent = *p;
+               rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+               d = nft_data_cmp(&rbe->key, &new->key, set->klen);
+               if (d < 0)
+                       p = &parent->rb_left;
+               else if (d > 0)
+                       p = &parent->rb_right;
+               else
+                       return -EEXIST;
+       }
+       rb_link_node(&new->node, parent, p);
+       rb_insert_color(&new->node, &priv->root);
+       return 0;
+}
+
+static int nft_rbtree_insert(const struct nft_set *set,
+                            const struct nft_set_elem *elem)
+{
+       struct nft_rbtree_elem *rbe;
+       unsigned int size;
+       int err;
+
+       size = sizeof(*rbe);
+       if (set->flags & NFT_SET_MAP)
+               size += sizeof(rbe->data[0]);
+
+       rbe = kzalloc(size, GFP_KERNEL);
+       if (rbe == NULL)
+               return -ENOMEM;
+
+       rbe->flags = elem->flags;
+       nft_data_copy(&rbe->key, &elem->key);
+       if (set->flags & NFT_SET_MAP)
+               nft_data_copy(rbe->data, &elem->data);
+
+       err = __nft_rbtree_insert(set, rbe);
+       if (err < 0)
+               kfree(rbe);
+       return err;
+}
+
+static void nft_rbtree_remove(const struct nft_set *set,
+                             const struct nft_set_elem *elem)
+{
+       struct nft_rbtree *priv = nft_set_priv(set);
+       struct nft_rbtree_elem *rbe = elem->cookie;
+
+       rb_erase(&rbe->node, &priv->root);
+       kfree(rbe);
+}
+
+static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
+{
+       const struct nft_rbtree *priv = nft_set_priv(set);
+       const struct rb_node *parent = priv->root.rb_node;
+       struct nft_rbtree_elem *rbe;
+       int d;
+
+       while (parent != NULL) {
+               rbe = rb_entry(parent, struct nft_rbtree_elem, node);
+
+               d = nft_data_cmp(&rbe->key, &elem->key, set->klen);
+               if (d < 0)
+                       parent = parent->rb_left;
+               else if (d > 0)
+                       parent = parent->rb_right;
+               else {
+                       elem->cookie = rbe;
+                       if (set->flags & NFT_SET_MAP)
+                               nft_data_copy(&elem->data, rbe->data);
+                       elem->flags = rbe->flags;
+                       return 0;
+               }
+       }
+       return -ENOENT;
+}
+
+static void nft_rbtree_walk(const struct nft_ctx *ctx,
+                           const struct nft_set *set,
+                           struct nft_set_iter *iter)
+{
+       const struct nft_rbtree *priv = nft_set_priv(set);
+       const struct nft_rbtree_elem *rbe;
+       struct nft_set_elem elem;
+       struct rb_node *node;
+
+       for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
+               if (iter->count < iter->skip)
+                       goto cont;
+
+               rbe = rb_entry(node, struct nft_rbtree_elem, node);
+               nft_data_copy(&elem.key, &rbe->key);
+               if (set->flags & NFT_SET_MAP)
+                       nft_data_copy(&elem.data, rbe->data);
+               elem.flags = rbe->flags;
+
+               iter->err = iter->fn(ctx, set, iter, &elem);
+               if (iter->err < 0)
+                       return;
+cont:
+               iter->count++;
+       }
+}
+
+static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[])
+{
+       return sizeof(struct nft_rbtree);
+}
+
+static int nft_rbtree_init(const struct nft_set *set,
+                          const struct nlattr * const nla[])
+{
+       struct nft_rbtree *priv = nft_set_priv(set);
+
+       priv->root = RB_ROOT;
+       return 0;
+}
+
+static void nft_rbtree_destroy(const struct nft_set *set)
+{
+       struct nft_rbtree *priv = nft_set_priv(set);
+       struct nft_rbtree_elem *rbe;
+       struct rb_node *node;
+
+       while ((node = priv->root.rb_node) != NULL) {
+               rb_erase(node, &priv->root);
+               rbe = rb_entry(node, struct nft_rbtree_elem, node);
+               nft_rbtree_elem_destroy(set, rbe);
+       }
+}
+
+static struct nft_set_ops nft_rbtree_ops __read_mostly = {
+       .privsize       = nft_rbtree_privsize,
+       .init           = nft_rbtree_init,
+       .destroy        = nft_rbtree_destroy,
+       .insert         = nft_rbtree_insert,
+       .remove         = nft_rbtree_remove,
+       .get            = nft_rbtree_get,
+       .lookup         = nft_rbtree_lookup,
+       .walk           = nft_rbtree_walk,
+       .features       = NFT_SET_INTERVAL | NFT_SET_MAP,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_rbtree_module_init(void)
+{
+       return nft_register_set(&nft_rbtree_ops);
+}
+
+static void __exit nft_rbtree_module_exit(void)
+{
+       nft_unregister_set(&nft_rbtree_ops);
+}
+
+module_init(nft_rbtree_module_init);
+module_exit(nft_rbtree_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_SET();
index 8b03028cca69b616df298e0ffbfdeb247441a341..227aa11e8409bb18be93c0c432a4d71cfb6e8f38 100644 (file)
@@ -845,8 +845,13 @@ xt_replace_table(struct xt_table *table,
                return NULL;
        }
 
-       table->private = newinfo;
        newinfo->initial_entries = private->initial_entries;
+       /*
+        * Ensure contents of newinfo are visible before assigning to
+        * private.
+        */
+       smp_wmb();
+       table->private = newinfo;
 
        /*
         * Even though table entries have now been swapped, other CPU's
index cd24290f3b2fdbff20088a0ffd2692fb6240a9ce..e762de5ee89bfa480b1789ce164cb024ac35e91e 100644 (file)
@@ -43,10 +43,42 @@ optlen(const u_int8_t *opt, unsigned int offset)
                return opt[offset+1];
 }
 
+static u_int32_t tcpmss_reverse_mtu(struct net *net,
+                                   const struct sk_buff *skb,
+                                   unsigned int family)
+{
+       struct flowi fl;
+       const struct nf_afinfo *ai;
+       struct rtable *rt = NULL;
+       u_int32_t mtu     = ~0U;
+
+       if (family == PF_INET) {
+               struct flowi4 *fl4 = &fl.u.ip4;
+               memset(fl4, 0, sizeof(*fl4));
+               fl4->daddr = ip_hdr(skb)->saddr;
+       } else {
+               struct flowi6 *fl6 = &fl.u.ip6;
+
+               memset(fl6, 0, sizeof(*fl6));
+               fl6->daddr = ipv6_hdr(skb)->saddr;
+       }
+       rcu_read_lock();
+       ai = nf_get_afinfo(family);
+       if (ai != NULL)
+               ai->route(net, (struct dst_entry **)&rt, &fl, false);
+       rcu_read_unlock();
+
+       if (rt != NULL) {
+               mtu = dst_mtu(&rt->dst);
+               dst_release(&rt->dst);
+       }
+       return mtu;
+}
+
 static int
 tcpmss_mangle_packet(struct sk_buff *skb,
                     const struct xt_action_param *par,
-                    unsigned int in_mtu,
+                    unsigned int family,
                     unsigned int tcphoff,
                     unsigned int minlen)
 {
@@ -76,6 +108,9 @@ tcpmss_mangle_packet(struct sk_buff *skb,
                return -1;
 
        if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
+               struct net *net = dev_net(par->in ? par->in : par->out);
+               unsigned int in_mtu = tcpmss_reverse_mtu(net, skb, family);
+
                if (dst_mtu(skb_dst(skb)) <= minlen) {
                        net_err_ratelimited("unknown or invalid path-MTU (%u)\n",
                                            dst_mtu(skb_dst(skb)));
@@ -165,37 +200,6 @@ tcpmss_mangle_packet(struct sk_buff *skb,
        return TCPOLEN_MSS;
 }
 
-static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
-                                   unsigned int family)
-{
-       struct flowi fl;
-       const struct nf_afinfo *ai;
-       struct rtable *rt = NULL;
-       u_int32_t mtu     = ~0U;
-
-       if (family == PF_INET) {
-               struct flowi4 *fl4 = &fl.u.ip4;
-               memset(fl4, 0, sizeof(*fl4));
-               fl4->daddr = ip_hdr(skb)->saddr;
-       } else {
-               struct flowi6 *fl6 = &fl.u.ip6;
-
-               memset(fl6, 0, sizeof(*fl6));
-               fl6->daddr = ipv6_hdr(skb)->saddr;
-       }
-       rcu_read_lock();
-       ai = nf_get_afinfo(family);
-       if (ai != NULL)
-               ai->route(&init_net, (struct dst_entry **)&rt, &fl, false);
-       rcu_read_unlock();
-
-       if (rt != NULL) {
-               mtu = dst_mtu(&rt->dst);
-               dst_release(&rt->dst);
-       }
-       return mtu;
-}
-
 static unsigned int
 tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
 {
@@ -204,7 +208,7 @@ tcpmss_tg4(struct sk_buff *skb, const struct xt_action_param *par)
        int ret;
 
        ret = tcpmss_mangle_packet(skb, par,
-                                  tcpmss_reverse_mtu(skb, PF_INET),
+                                  PF_INET,
                                   iph->ihl * 4,
                                   sizeof(*iph) + sizeof(struct tcphdr));
        if (ret < 0)
@@ -233,7 +237,7 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
        if (tcphoff < 0)
                return NF_DROP;
        ret = tcpmss_mangle_packet(skb, par,
-                                  tcpmss_reverse_mtu(skb, PF_INET6),
+                                  PF_INET6,
                                   tcphoff,
                                   sizeof(*ipv6h) + sizeof(struct tcphdr));
        if (ret < 0)
index 5d8a3a3cd5a7cd04b714d92a1366c04ad464e716..ef8a926752a97542f6f2f8eeb378e150958bff3d 100644 (file)
@@ -200,7 +200,7 @@ nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
                                     in->ifindex);
                if (sk) {
                        int connected = (sk->sk_state == TCP_ESTABLISHED);
-                       int wildcard = ipv6_addr_any(&inet6_sk(sk)->rcv_saddr);
+                       int wildcard = ipv6_addr_any(&sk->sk_v6_rcv_saddr);
 
                        /* NOTE: we return listeners even if bound to
                         * 0.0.0.0, those are filtered out in
index 31790e789e224f8fb758ee87e62f8dd8908e81dd..e7c4e0e01ff5de40fb32cdeddd70cb204b84ea32 100644 (file)
@@ -81,7 +81,7 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par)
        struct xt_set_info_match_v0 *info = par->matchinfo;
        ip_set_id_t index;
 
-       index = ip_set_nfnl_get_byindex(info->match_set.index);
+       index = ip_set_nfnl_get_byindex(par->net, info->match_set.index);
 
        if (index == IPSET_INVALID_ID) {
                pr_warning("Cannot find set indentified by id %u to match\n",
@@ -91,7 +91,7 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par)
        if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
                pr_warning("Protocol error: set match dimension "
                           "is over the limit!\n");
-               ip_set_nfnl_put(info->match_set.index);
+               ip_set_nfnl_put(par->net, info->match_set.index);
                return -ERANGE;
        }
 
@@ -106,9 +106,104 @@ set_match_v0_destroy(const struct xt_mtdtor_param *par)
 {
        struct xt_set_info_match_v0 *info = par->matchinfo;
 
-       ip_set_nfnl_put(info->match_set.index);
+       ip_set_nfnl_put(par->net, info->match_set.index);
 }
 
+/* Revision 1 match */
+
+static bool
+set_match_v1(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_set_info_match_v1 *info = par->matchinfo;
+       ADT_OPT(opt, par->family, info->match_set.dim,
+               info->match_set.flags, 0, UINT_MAX);
+
+       if (opt.flags & IPSET_RETURN_NOMATCH)
+               opt.cmdflags |= IPSET_FLAG_RETURN_NOMATCH;
+
+       return match_set(info->match_set.index, skb, par, &opt,
+                        info->match_set.flags & IPSET_INV_MATCH);
+}
+
+static int
+set_match_v1_checkentry(const struct xt_mtchk_param *par)
+{
+       struct xt_set_info_match_v1 *info = par->matchinfo;
+       ip_set_id_t index;
+
+       index = ip_set_nfnl_get_byindex(par->net, info->match_set.index);
+
+       if (index == IPSET_INVALID_ID) {
+               pr_warning("Cannot find set indentified by id %u to match\n",
+                          info->match_set.index);
+               return -ENOENT;
+       }
+       if (info->match_set.dim > IPSET_DIM_MAX) {
+               pr_warning("Protocol error: set match dimension "
+                          "is over the limit!\n");
+               ip_set_nfnl_put(par->net, info->match_set.index);
+               return -ERANGE;
+       }
+
+       return 0;
+}
+
+static void
+set_match_v1_destroy(const struct xt_mtdtor_param *par)
+{
+       struct xt_set_info_match_v1 *info = par->matchinfo;
+
+       ip_set_nfnl_put(par->net, info->match_set.index);
+}
+
+/* Revision 3 match */
+
+static bool
+match_counter(u64 counter, const struct ip_set_counter_match *info)
+{
+       switch (info->op) {
+       case IPSET_COUNTER_NONE:
+               return true;
+       case IPSET_COUNTER_EQ:
+               return counter == info->value;
+       case IPSET_COUNTER_NE:
+               return counter != info->value;
+       case IPSET_COUNTER_LT:
+               return counter < info->value;
+       case IPSET_COUNTER_GT:
+               return counter > info->value;
+       }
+       return false;
+}
+
+static bool
+set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
+{
+       const struct xt_set_info_match_v3 *info = par->matchinfo;
+       ADT_OPT(opt, par->family, info->match_set.dim,
+               info->match_set.flags, info->flags, UINT_MAX);
+       int ret;
+
+       if (info->packets.op != IPSET_COUNTER_NONE ||
+           info->bytes.op != IPSET_COUNTER_NONE)
+               opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS;
+
+       ret = match_set(info->match_set.index, skb, par, &opt,
+                       info->match_set.flags & IPSET_INV_MATCH);
+
+       if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS))
+               return ret;
+
+       if (!match_counter(opt.ext.packets, &info->packets))
+               return 0;
+       return match_counter(opt.ext.bytes, &info->bytes);
+}
+
+#define set_match_v3_checkentry        set_match_v1_checkentry
+#define set_match_v3_destroy   set_match_v1_destroy
+
+/* Revision 0 interface: backward compatible with netfilter/iptables */
+
 static unsigned int
 set_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
 {
@@ -133,7 +228,7 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
        ip_set_id_t index;
 
        if (info->add_set.index != IPSET_INVALID_ID) {
-               index = ip_set_nfnl_get_byindex(info->add_set.index);
+               index = ip_set_nfnl_get_byindex(par->net, info->add_set.index);
                if (index == IPSET_INVALID_ID) {
                        pr_warning("Cannot find add_set index %u as target\n",
                                   info->add_set.index);
@@ -142,12 +237,12 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
        }
 
        if (info->del_set.index != IPSET_INVALID_ID) {
-               index = ip_set_nfnl_get_byindex(info->del_set.index);
+               index = ip_set_nfnl_get_byindex(par->net, info->del_set.index);
                if (index == IPSET_INVALID_ID) {
                        pr_warning("Cannot find del_set index %u as target\n",
                                   info->del_set.index);
                        if (info->add_set.index != IPSET_INVALID_ID)
-                               ip_set_nfnl_put(info->add_set.index);
+                               ip_set_nfnl_put(par->net, info->add_set.index);
                        return -ENOENT;
                }
        }
@@ -156,9 +251,9 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
                pr_warning("Protocol error: SET target dimension "
                           "is over the limit!\n");
                if (info->add_set.index != IPSET_INVALID_ID)
-                       ip_set_nfnl_put(info->add_set.index);
+                       ip_set_nfnl_put(par->net, info->add_set.index);
                if (info->del_set.index != IPSET_INVALID_ID)
-                       ip_set_nfnl_put(info->del_set.index);
+                       ip_set_nfnl_put(par->net, info->del_set.index);
                return -ERANGE;
        }
 
@@ -175,57 +270,12 @@ set_target_v0_destroy(const struct xt_tgdtor_param *par)
        const struct xt_set_info_target_v0 *info = par->targinfo;
 
        if (info->add_set.index != IPSET_INVALID_ID)
-               ip_set_nfnl_put(info->add_set.index);
+               ip_set_nfnl_put(par->net, info->add_set.index);
        if (info->del_set.index != IPSET_INVALID_ID)
-               ip_set_nfnl_put(info->del_set.index);
+               ip_set_nfnl_put(par->net, info->del_set.index);
 }
 
-/* Revision 1 match and target */
-
-static bool
-set_match_v1(const struct sk_buff *skb, struct xt_action_param *par)
-{
-       const struct xt_set_info_match_v1 *info = par->matchinfo;
-       ADT_OPT(opt, par->family, info->match_set.dim,
-               info->match_set.flags, 0, UINT_MAX);
-
-       if (opt.flags & IPSET_RETURN_NOMATCH)
-               opt.cmdflags |= IPSET_FLAG_RETURN_NOMATCH;
-
-       return match_set(info->match_set.index, skb, par, &opt,
-                        info->match_set.flags & IPSET_INV_MATCH);
-}
-
-static int
-set_match_v1_checkentry(const struct xt_mtchk_param *par)
-{
-       struct xt_set_info_match_v1 *info = par->matchinfo;
-       ip_set_id_t index;
-
-       index = ip_set_nfnl_get_byindex(info->match_set.index);
-
-       if (index == IPSET_INVALID_ID) {
-               pr_warning("Cannot find set indentified by id %u to match\n",
-                          info->match_set.index);
-               return -ENOENT;
-       }
-       if (info->match_set.dim > IPSET_DIM_MAX) {
-               pr_warning("Protocol error: set match dimension "
-                          "is over the limit!\n");
-               ip_set_nfnl_put(info->match_set.index);
-               return -ERANGE;
-       }
-
-       return 0;
-}
-
-static void
-set_match_v1_destroy(const struct xt_mtdtor_param *par)
-{
-       struct xt_set_info_match_v1 *info = par->matchinfo;
-
-       ip_set_nfnl_put(info->match_set.index);
-}
+/* Revision 1 target */
 
 static unsigned int
 set_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
@@ -251,7 +301,7 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
        ip_set_id_t index;
 
        if (info->add_set.index != IPSET_INVALID_ID) {
-               index = ip_set_nfnl_get_byindex(info->add_set.index);
+               index = ip_set_nfnl_get_byindex(par->net, info->add_set.index);
                if (index == IPSET_INVALID_ID) {
                        pr_warning("Cannot find add_set index %u as target\n",
                                   info->add_set.index);
@@ -260,12 +310,12 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
        }
 
        if (info->del_set.index != IPSET_INVALID_ID) {
-               index = ip_set_nfnl_get_byindex(info->del_set.index);
+               index = ip_set_nfnl_get_byindex(par->net, info->del_set.index);
                if (index == IPSET_INVALID_ID) {
                        pr_warning("Cannot find del_set index %u as target\n",
                                   info->del_set.index);
                        if (info->add_set.index != IPSET_INVALID_ID)
-                               ip_set_nfnl_put(info->add_set.index);
+                               ip_set_nfnl_put(par->net, info->add_set.index);
                        return -ENOENT;
                }
        }
@@ -274,9 +324,9 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
                pr_warning("Protocol error: SET target dimension "
                           "is over the limit!\n");
                if (info->add_set.index != IPSET_INVALID_ID)
-                       ip_set_nfnl_put(info->add_set.index);
+                       ip_set_nfnl_put(par->net, info->add_set.index);
                if (info->del_set.index != IPSET_INVALID_ID)
-                       ip_set_nfnl_put(info->del_set.index);
+                       ip_set_nfnl_put(par->net, info->del_set.index);
                return -ERANGE;
        }
 
@@ -289,9 +339,9 @@ set_target_v1_destroy(const struct xt_tgdtor_param *par)
        const struct xt_set_info_target_v1 *info = par->targinfo;
 
        if (info->add_set.index != IPSET_INVALID_ID)
-               ip_set_nfnl_put(info->add_set.index);
+               ip_set_nfnl_put(par->net, info->add_set.index);
        if (info->del_set.index != IPSET_INVALID_ID)
-               ip_set_nfnl_put(info->del_set.index);
+               ip_set_nfnl_put(par->net, info->del_set.index);
 }
 
 /* Revision 2 target */
@@ -320,52 +370,6 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
 #define set_target_v2_checkentry       set_target_v1_checkentry
 #define set_target_v2_destroy          set_target_v1_destroy
 
-/* Revision 3 match */
-
-static bool
-match_counter(u64 counter, const struct ip_set_counter_match *info)
-{
-       switch (info->op) {
-       case IPSET_COUNTER_NONE:
-               return true;
-       case IPSET_COUNTER_EQ:
-               return counter == info->value;
-       case IPSET_COUNTER_NE:
-               return counter != info->value;
-       case IPSET_COUNTER_LT:
-               return counter < info->value;
-       case IPSET_COUNTER_GT:
-               return counter > info->value;
-       }
-       return false;
-}
-
-static bool
-set_match_v3(const struct sk_buff *skb, struct xt_action_param *par)
-{
-       const struct xt_set_info_match_v3 *info = par->matchinfo;
-       ADT_OPT(opt, par->family, info->match_set.dim,
-               info->match_set.flags, info->flags, UINT_MAX);
-       int ret;
-
-       if (info->packets.op != IPSET_COUNTER_NONE ||
-           info->bytes.op != IPSET_COUNTER_NONE)
-               opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS;
-
-       ret = match_set(info->match_set.index, skb, par, &opt,
-                       info->match_set.flags & IPSET_INV_MATCH);
-
-       if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS))
-               return ret;
-
-       if (!match_counter(opt.ext.packets, &info->packets))
-               return 0;
-       return match_counter(opt.ext.bytes, &info->bytes);
-}
-
-#define set_match_v3_checkentry        set_match_v1_checkentry
-#define set_match_v3_destroy   set_match_v1_destroy
-
 static struct xt_match set_matches[] __read_mostly = {
        {
                .name           = "set",
index 06df2b9110f5f2b9376c19e34241c257b31e5f63..3dd0e374bc2b3952f845470c67e3a8f77391ff23 100644 (file)
@@ -370,7 +370,7 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
                 */
                wildcard = (!(info->flags & XT_SOCKET_NOWILDCARD) &&
                            sk->sk_state != TCP_TIME_WAIT &&
-                           ipv6_addr_any(&inet6_sk(sk)->rcv_saddr));
+                           ipv6_addr_any(&sk->sk_v6_rcv_saddr));
 
                /* Ignore non-transparent sockets,
                   if XT_SOCKET_TRANSPARENT is used */
index 96a458e12f60ada2186c08ebbafe22c6e68ba06e..dce1bebf7aecb98f4cfae0beb45a9b3c576c21d3 100644 (file)
@@ -817,7 +817,7 @@ int netlbl_req_setattr(struct request_sock *req,
        switch (req->rsk_ops->family) {
        case AF_INET:
                entry = netlbl_domhsh_getentry_af4(secattr->domain,
-                                                  inet_rsk(req)->rmt_addr);
+                                                  inet_rsk(req)->ir_rmt_addr);
                if (entry == NULL) {
                        ret_val = -ENOENT;
                        goto req_setattr_return;
index 5948b2fc72f63d3a9994e962b1366f8913c61ed7..6e0fa0cce1982a8825d36e7966851a46fe1bcb2e 100644 (file)
@@ -14,6 +14,20 @@ menuconfig NFC
          To compile this support as a module, choose M here: the module will
          be called nfc.
 
+config NFC_DIGITAL
+       depends on NFC
+       select CRC_CCITT
+       select CRC_ITU_T
+       tristate "NFC Digital Protocol stack support"
+       default n
+       help
+         Say Y if you want to build NFC digital protocol stack support.
+         This is needed by NFC chipsets whose firmware only implement
+         the NFC analog layer.
+
+         To compile this support as a module, choose M here: the module will
+         be called nfc_digital.
+
 source "net/nfc/nci/Kconfig"
 source "net/nfc/hci/Kconfig"
 
index a76f4533cb6ce5a1b51e2199bbfd3c8bf91a86a4..2555ff8e7219b19ecdc06da62d5bef6ce555ae20 100644 (file)
@@ -5,7 +5,9 @@
 obj-$(CONFIG_NFC) += nfc.o
 obj-$(CONFIG_NFC_NCI) += nci/
 obj-$(CONFIG_NFC_HCI) += hci/
+obj-$(CONFIG_NFC_DIGITAL) += nfc_digital.o
 
 nfc-objs := core.o netlink.o af_nfc.o rawsock.o llcp_core.o llcp_commands.o \
                llcp_sock.o
 
+nfc_digital-objs := digital_core.o digital_technology.o digital_dep.o
index e92923cf3e0374f950417e5039ba2e61369c8e27..872529105abc7c3a2e41b9d579e934e019e8e5ff 100644 (file)
@@ -384,6 +384,19 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
 {
        dev->dep_link_up = true;
 
+       if (!dev->active_target) {
+               struct nfc_target *target;
+
+               target = nfc_find_target(dev, target_idx);
+               if (target == NULL)
+                       return -ENOTCONN;
+
+               dev->active_target = target;
+       }
+
+       dev->polling = false;
+       dev->rf_mode = rf_mode;
+
        nfc_llcp_mac_is_up(dev, target_idx, comm_mode, rf_mode);
 
        return nfc_genl_dep_link_up_event(dev, target_idx, comm_mode, rf_mode);
@@ -536,7 +549,7 @@ error:
        return rc;
 }
 
-static struct nfc_se *find_se(struct nfc_dev *dev, u32 se_idx)
+struct nfc_se *nfc_find_se(struct nfc_dev *dev, u32 se_idx)
 {
        struct nfc_se *se, *n;
 
@@ -546,6 +559,7 @@ static struct nfc_se *find_se(struct nfc_dev *dev, u32 se_idx)
 
        return NULL;
 }
+EXPORT_SYMBOL(nfc_find_se);
 
 int nfc_enable_se(struct nfc_dev *dev, u32 se_idx)
 {
@@ -577,7 +591,7 @@ int nfc_enable_se(struct nfc_dev *dev, u32 se_idx)
                goto error;
        }
 
-       se = find_se(dev, se_idx);
+       se = nfc_find_se(dev, se_idx);
        if (!se) {
                rc = -EINVAL;
                goto error;
@@ -622,7 +636,7 @@ int nfc_disable_se(struct nfc_dev *dev, u32 se_idx)
                goto error;
        }
 
-       se = find_se(dev, se_idx);
+       se = nfc_find_se(dev, se_idx);
        if (!se) {
                rc = -EINVAL;
                goto error;
@@ -881,7 +895,7 @@ int nfc_add_se(struct nfc_dev *dev, u32 se_idx, u16 type)
 
        pr_debug("%s se index %d\n", dev_name(&dev->dev), se_idx);
 
-       se = find_se(dev, se_idx);
+       se = nfc_find_se(dev, se_idx);
        if (se)
                return -EALREADY;
 
diff --git a/net/nfc/digital.h b/net/nfc/digital.h
new file mode 100644 (file)
index 0000000..08b29b5
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ * NFC Digital Protocol stack
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef __DIGITAL_H
+#define __DIGITAL_H
+
+#include <net/nfc/nfc.h>
+#include <net/nfc/digital.h>
+
+#include <linux/crc-ccitt.h>
+#include <linux/crc-itu-t.h>
+
+#define PROTOCOL_ERR(req) pr_err("%d: NFC Digital Protocol error: %s\n", \
+                                __LINE__, req)
+
+#define DIGITAL_CMD_IN_SEND        0
+#define DIGITAL_CMD_TG_SEND        1
+#define DIGITAL_CMD_TG_LISTEN      2
+#define DIGITAL_CMD_TG_LISTEN_MDAA 3
+
+#define DIGITAL_MAX_HEADER_LEN 7
+#define DIGITAL_CRC_LEN        2
+
+#define DIGITAL_SENSF_NFCID2_NFC_DEP_B1 0x01
+#define DIGITAL_SENSF_NFCID2_NFC_DEP_B2 0xFE
+
+#define DIGITAL_SENS_RES_NFC_DEP 0x0100
+#define DIGITAL_SEL_RES_NFC_DEP  0x40
+#define DIGITAL_SENSF_FELICA_SC  0xFFFF
+
+#define DIGITAL_DRV_CAPS_IN_CRC(ddev) \
+       ((ddev)->driver_capabilities & NFC_DIGITAL_DRV_CAPS_IN_CRC)
+#define DIGITAL_DRV_CAPS_TG_CRC(ddev) \
+       ((ddev)->driver_capabilities & NFC_DIGITAL_DRV_CAPS_TG_CRC)
+
+struct digital_data_exch {
+       data_exchange_cb_t cb;
+       void *cb_context;
+};
+
+struct sk_buff *digital_skb_alloc(struct nfc_digital_dev *ddev,
+                                 unsigned int len);
+
+int digital_send_cmd(struct nfc_digital_dev *ddev, u8 cmd_type,
+                    struct sk_buff *skb, struct digital_tg_mdaa_params *params,
+                    u16 timeout, nfc_digital_cmd_complete_t cmd_cb,
+                    void *cb_context);
+
+int digital_in_configure_hw(struct nfc_digital_dev *ddev, int type, int param);
+static inline int digital_in_send_cmd(struct nfc_digital_dev *ddev,
+                                     struct sk_buff *skb, u16 timeout,
+                                     nfc_digital_cmd_complete_t cmd_cb,
+                                     void *cb_context)
+{
+       return digital_send_cmd(ddev, DIGITAL_CMD_IN_SEND, skb, NULL, timeout,
+                               cmd_cb, cb_context);
+}
+
+void digital_poll_next_tech(struct nfc_digital_dev *ddev);
+
+int digital_in_send_sens_req(struct nfc_digital_dev *ddev, u8 rf_tech);
+int digital_in_send_sensf_req(struct nfc_digital_dev *ddev, u8 rf_tech);
+
+int digital_target_found(struct nfc_digital_dev *ddev,
+                        struct nfc_target *target, u8 protocol);
+
+int digital_in_recv_mifare_res(struct sk_buff *resp);
+
+int digital_in_send_atr_req(struct nfc_digital_dev *ddev,
+                           struct nfc_target *target, __u8 comm_mode, __u8 *gb,
+                           size_t gb_len);
+int digital_in_send_dep_req(struct nfc_digital_dev *ddev,
+                           struct nfc_target *target, struct sk_buff *skb,
+                           struct digital_data_exch *data_exch);
+
+int digital_tg_configure_hw(struct nfc_digital_dev *ddev, int type, int param);
+static inline int digital_tg_send_cmd(struct nfc_digital_dev *ddev,
+                       struct sk_buff *skb, u16 timeout,
+                       nfc_digital_cmd_complete_t cmd_cb, void *cb_context)
+{
+       return digital_send_cmd(ddev, DIGITAL_CMD_TG_SEND, skb, NULL, timeout,
+                               cmd_cb, cb_context);
+}
+
+void digital_tg_recv_sens_req(struct nfc_digital_dev *ddev, void *arg,
+                             struct sk_buff *resp);
+
+void digital_tg_recv_sensf_req(struct nfc_digital_dev *ddev, void *arg,
+                              struct sk_buff *resp);
+
+static inline int digital_tg_listen(struct nfc_digital_dev *ddev, u16 timeout,
+                                   nfc_digital_cmd_complete_t cb, void *arg)
+{
+       return digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN, NULL, NULL,
+                               timeout, cb, arg);
+}
+
+void digital_tg_recv_atr_req(struct nfc_digital_dev *ddev, void *arg,
+                            struct sk_buff *resp);
+
+int digital_tg_send_dep_res(struct nfc_digital_dev *ddev, struct sk_buff *skb);
+
+int digital_tg_listen_nfca(struct nfc_digital_dev *ddev, u8 rf_tech);
+int digital_tg_listen_nfcf(struct nfc_digital_dev *ddev, u8 rf_tech);
+
+typedef u16 (*crc_func_t)(u16, const u8 *, size_t);
+
+#define CRC_A_INIT 0x6363
+#define CRC_B_INIT 0xFFFF
+#define CRC_F_INIT 0x0000
+
+void digital_skb_add_crc(struct sk_buff *skb, crc_func_t crc_func, u16 init,
+                        u8 bitwise_inv, u8 msb_first);
+
+static inline void digital_skb_add_crc_a(struct sk_buff *skb)
+{
+       digital_skb_add_crc(skb, crc_ccitt, CRC_A_INIT, 0, 0);
+}
+
+static inline void digital_skb_add_crc_b(struct sk_buff *skb)
+{
+       digital_skb_add_crc(skb, crc_ccitt, CRC_B_INIT, 1, 0);
+}
+
+static inline void digital_skb_add_crc_f(struct sk_buff *skb)
+{
+       digital_skb_add_crc(skb, crc_itu_t, CRC_F_INIT, 0, 1);
+}
+
+static inline void digital_skb_add_crc_none(struct sk_buff *skb)
+{
+       return;
+}
+
+int digital_skb_check_crc(struct sk_buff *skb, crc_func_t crc_func,
+                         u16 crc_init, u8 bitwise_inv, u8 msb_first);
+
+static inline int digital_skb_check_crc_a(struct sk_buff *skb)
+{
+       return digital_skb_check_crc(skb, crc_ccitt, CRC_A_INIT, 0, 0);
+}
+
+static inline int digital_skb_check_crc_b(struct sk_buff *skb)
+{
+       return digital_skb_check_crc(skb, crc_ccitt, CRC_B_INIT, 1, 0);
+}
+
+static inline int digital_skb_check_crc_f(struct sk_buff *skb)
+{
+       return digital_skb_check_crc(skb, crc_itu_t, CRC_F_INIT, 0, 1);
+}
+
+static inline int digital_skb_check_crc_none(struct sk_buff *skb)
+{
+       return 0;
+}
+
+#endif /* __DIGITAL_H */
diff --git a/net/nfc/digital_core.c b/net/nfc/digital_core.c
new file mode 100644 (file)
index 0000000..09fc954
--- /dev/null
@@ -0,0 +1,737 @@
+/*
+ * NFC Digital Protocol stack
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) "digital: %s: " fmt, __func__
+
+#include <linux/module.h>
+
+#include "digital.h"
+
+#define DIGITAL_PROTO_NFCA_RF_TECH \
+       (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK | NFC_PROTO_NFC_DEP_MASK)
+
+#define DIGITAL_PROTO_NFCF_RF_TECH \
+       (NFC_PROTO_FELICA_MASK | NFC_PROTO_NFC_DEP_MASK)
+
+struct digital_cmd {
+       struct list_head queue;
+
+       u8 type;
+       u8 pending;
+
+       u16 timeout;
+       struct sk_buff *req;
+       struct sk_buff *resp;
+       struct digital_tg_mdaa_params *mdaa_params;
+
+       nfc_digital_cmd_complete_t cmd_cb;
+       void *cb_context;
+};
+
+struct sk_buff *digital_skb_alloc(struct nfc_digital_dev *ddev,
+                                 unsigned int len)
+{
+       struct sk_buff *skb;
+
+       skb = alloc_skb(len + ddev->tx_headroom + ddev->tx_tailroom,
+                       GFP_KERNEL);
+       if (skb)
+               skb_reserve(skb, ddev->tx_headroom);
+
+       return skb;
+}
+
+void digital_skb_add_crc(struct sk_buff *skb, crc_func_t crc_func, u16 init,
+                        u8 bitwise_inv, u8 msb_first)
+{
+       u16 crc;
+
+       crc = crc_func(init, skb->data, skb->len);
+
+       if (bitwise_inv)
+               crc = ~crc;
+
+       if (msb_first)
+               crc = __fswab16(crc);
+
+       *skb_put(skb, 1) = crc & 0xFF;
+       *skb_put(skb, 1) = (crc >> 8) & 0xFF;
+}
+
+int digital_skb_check_crc(struct sk_buff *skb, crc_func_t crc_func,
+                         u16 crc_init, u8 bitwise_inv, u8 msb_first)
+{
+       int rc;
+       u16 crc;
+
+       if (skb->len <= 2)
+               return -EIO;
+
+       crc = crc_func(crc_init, skb->data, skb->len - 2);
+
+       if (bitwise_inv)
+               crc = ~crc;
+
+       if (msb_first)
+               crc = __swab16(crc);
+
+       rc = (skb->data[skb->len - 2] - (crc & 0xFF)) +
+            (skb->data[skb->len - 1] - ((crc >> 8) & 0xFF));
+
+       if (rc)
+               return -EIO;
+
+       skb_trim(skb, skb->len - 2);
+
+       return 0;
+}
+
+static inline void digital_switch_rf(struct nfc_digital_dev *ddev, bool on)
+{
+       ddev->ops->switch_rf(ddev, on);
+}
+
+static inline void digital_abort_cmd(struct nfc_digital_dev *ddev)
+{
+       ddev->ops->abort_cmd(ddev);
+}
+
+static void digital_wq_cmd_complete(struct work_struct *work)
+{
+       struct digital_cmd *cmd;
+       struct nfc_digital_dev *ddev = container_of(work,
+                                                   struct nfc_digital_dev,
+                                                   cmd_complete_work);
+
+       mutex_lock(&ddev->cmd_lock);
+
+       cmd = list_first_entry_or_null(&ddev->cmd_queue, struct digital_cmd,
+                                      queue);
+       if (!cmd) {
+               mutex_unlock(&ddev->cmd_lock);
+               return;
+       }
+
+       list_del(&cmd->queue);
+
+       mutex_unlock(&ddev->cmd_lock);
+
+       if (!IS_ERR(cmd->resp))
+               print_hex_dump_debug("DIGITAL RX: ", DUMP_PREFIX_NONE, 16, 1,
+                                    cmd->resp->data, cmd->resp->len, false);
+
+       cmd->cmd_cb(ddev, cmd->cb_context, cmd->resp);
+
+       kfree(cmd->mdaa_params);
+       kfree(cmd);
+
+       schedule_work(&ddev->cmd_work);
+}
+
+static void digital_send_cmd_complete(struct nfc_digital_dev *ddev,
+                                     void *arg, struct sk_buff *resp)
+{
+       struct digital_cmd *cmd = arg;
+
+       cmd->resp = resp;
+
+       schedule_work(&ddev->cmd_complete_work);
+}
+
+static void digital_wq_cmd(struct work_struct *work)
+{
+       int rc;
+       struct digital_cmd *cmd;
+       struct digital_tg_mdaa_params *params;
+       struct nfc_digital_dev *ddev = container_of(work,
+                                                   struct nfc_digital_dev,
+                                                   cmd_work);
+
+       mutex_lock(&ddev->cmd_lock);
+
+       cmd = list_first_entry_or_null(&ddev->cmd_queue, struct digital_cmd,
+                                      queue);
+       if (!cmd || cmd->pending) {
+               mutex_unlock(&ddev->cmd_lock);
+               return;
+       }
+
+       mutex_unlock(&ddev->cmd_lock);
+
+       if (cmd->req)
+               print_hex_dump_debug("DIGITAL TX: ", DUMP_PREFIX_NONE, 16, 1,
+                                    cmd->req->data, cmd->req->len, false);
+
+       switch (cmd->type) {
+       case DIGITAL_CMD_IN_SEND:
+               rc = ddev->ops->in_send_cmd(ddev, cmd->req, cmd->timeout,
+                                           digital_send_cmd_complete, cmd);
+               break;
+
+       case DIGITAL_CMD_TG_SEND:
+               rc = ddev->ops->tg_send_cmd(ddev, cmd->req, cmd->timeout,
+                                           digital_send_cmd_complete, cmd);
+               break;
+
+       case DIGITAL_CMD_TG_LISTEN:
+               rc = ddev->ops->tg_listen(ddev, cmd->timeout,
+                                         digital_send_cmd_complete, cmd);
+               break;
+
+       case DIGITAL_CMD_TG_LISTEN_MDAA:
+               params = cmd->mdaa_params;
+
+               rc = ddev->ops->tg_listen_mdaa(ddev, params, cmd->timeout,
+                                              digital_send_cmd_complete, cmd);
+               break;
+
+       default:
+               pr_err("Unknown cmd type %d\n", cmd->type);
+               return;
+       }
+
+       if (!rc)
+               return;
+
+       pr_err("in_send_command returned err %d\n", rc);
+
+       mutex_lock(&ddev->cmd_lock);
+       list_del(&cmd->queue);
+       mutex_unlock(&ddev->cmd_lock);
+
+       kfree_skb(cmd->req);
+       kfree(cmd->mdaa_params);
+       kfree(cmd);
+
+       schedule_work(&ddev->cmd_work);
+}
+
+int digital_send_cmd(struct nfc_digital_dev *ddev, u8 cmd_type,
+                    struct sk_buff *skb, struct digital_tg_mdaa_params *params,
+                    u16 timeout, nfc_digital_cmd_complete_t cmd_cb,
+                    void *cb_context)
+{
+       struct digital_cmd *cmd;
+
+       cmd = kzalloc(sizeof(struct digital_cmd), GFP_KERNEL);
+       if (!cmd)
+               return -ENOMEM;
+
+       cmd->type = cmd_type;
+       cmd->timeout = timeout;
+       cmd->req = skb;
+       cmd->mdaa_params = params;
+       cmd->cmd_cb = cmd_cb;
+       cmd->cb_context = cb_context;
+       INIT_LIST_HEAD(&cmd->queue);
+
+       mutex_lock(&ddev->cmd_lock);
+       list_add_tail(&cmd->queue, &ddev->cmd_queue);
+       mutex_unlock(&ddev->cmd_lock);
+
+       schedule_work(&ddev->cmd_work);
+
+       return 0;
+}
+
+int digital_in_configure_hw(struct nfc_digital_dev *ddev, int type, int param)
+{
+       int rc;
+
+       rc = ddev->ops->in_configure_hw(ddev, type, param);
+       if (rc)
+               pr_err("in_configure_hw failed: %d\n", rc);
+
+       return rc;
+}
+
+int digital_tg_configure_hw(struct nfc_digital_dev *ddev, int type, int param)
+{
+       int rc;
+
+       rc = ddev->ops->tg_configure_hw(ddev, type, param);
+       if (rc)
+               pr_err("tg_configure_hw failed: %d\n", rc);
+
+       return rc;
+}
+
+static int digital_tg_listen_mdaa(struct nfc_digital_dev *ddev, u8 rf_tech)
+{
+       struct digital_tg_mdaa_params *params;
+
+       params = kzalloc(sizeof(struct digital_tg_mdaa_params), GFP_KERNEL);
+       if (!params)
+               return -ENOMEM;
+
+       params->sens_res = DIGITAL_SENS_RES_NFC_DEP;
+       get_random_bytes(params->nfcid1, sizeof(params->nfcid1));
+       params->sel_res = DIGITAL_SEL_RES_NFC_DEP;
+
+       params->nfcid2[0] = DIGITAL_SENSF_NFCID2_NFC_DEP_B1;
+       params->nfcid2[1] = DIGITAL_SENSF_NFCID2_NFC_DEP_B2;
+       get_random_bytes(params->nfcid2 + 2, NFC_NFCID2_MAXSIZE - 2);
+       params->sc = DIGITAL_SENSF_FELICA_SC;
+
+       return digital_send_cmd(ddev, DIGITAL_CMD_TG_LISTEN_MDAA, NULL, params,
+                               500, digital_tg_recv_atr_req, NULL);
+}
+
+int digital_target_found(struct nfc_digital_dev *ddev,
+                        struct nfc_target *target, u8 protocol)
+{
+       int rc;
+       u8 framing;
+       u8 rf_tech;
+       int (*check_crc)(struct sk_buff *skb);
+       void (*add_crc)(struct sk_buff *skb);
+
+       rf_tech = ddev->poll_techs[ddev->poll_tech_index].rf_tech;
+
+       switch (protocol) {
+       case NFC_PROTO_JEWEL:
+               framing = NFC_DIGITAL_FRAMING_NFCA_T1T;
+               check_crc = digital_skb_check_crc_b;
+               add_crc = digital_skb_add_crc_b;
+               break;
+
+       case NFC_PROTO_MIFARE:
+               framing = NFC_DIGITAL_FRAMING_NFCA_T2T;
+               check_crc = digital_skb_check_crc_a;
+               add_crc = digital_skb_add_crc_a;
+               break;
+
+       case NFC_PROTO_FELICA:
+               framing = NFC_DIGITAL_FRAMING_NFCF_T3T;
+               check_crc = digital_skb_check_crc_f;
+               add_crc = digital_skb_add_crc_f;
+               break;
+
+       case NFC_PROTO_NFC_DEP:
+               if (rf_tech == NFC_DIGITAL_RF_TECH_106A) {
+                       framing = NFC_DIGITAL_FRAMING_NFCA_NFC_DEP;
+                       check_crc = digital_skb_check_crc_a;
+                       add_crc = digital_skb_add_crc_a;
+               } else {
+                       framing = NFC_DIGITAL_FRAMING_NFCF_NFC_DEP;
+                       check_crc = digital_skb_check_crc_f;
+                       add_crc = digital_skb_add_crc_f;
+               }
+               break;
+
+       default:
+               pr_err("Invalid protocol %d\n", protocol);
+               return -EINVAL;
+       }
+
+       pr_debug("rf_tech=%d, protocol=%d\n", rf_tech, protocol);
+
+       ddev->curr_rf_tech = rf_tech;
+       ddev->curr_protocol = protocol;
+
+       if (DIGITAL_DRV_CAPS_IN_CRC(ddev)) {
+               ddev->skb_add_crc = digital_skb_add_crc_none;
+               ddev->skb_check_crc = digital_skb_check_crc_none;
+       } else {
+               ddev->skb_add_crc = add_crc;
+               ddev->skb_check_crc = check_crc;
+       }
+
+       rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, framing);
+       if (rc)
+               return rc;
+
+       target->supported_protocols = (1 << protocol);
+       rc = nfc_targets_found(ddev->nfc_dev, target, 1);
+       if (rc)
+               return rc;
+
+       ddev->poll_tech_count = 0;
+
+       return 0;
+}
+
+void digital_poll_next_tech(struct nfc_digital_dev *ddev)
+{
+       digital_switch_rf(ddev, 0);
+
+       mutex_lock(&ddev->poll_lock);
+
+       if (!ddev->poll_tech_count) {
+               mutex_unlock(&ddev->poll_lock);
+               return;
+       }
+
+       ddev->poll_tech_index = (ddev->poll_tech_index + 1) %
+                               ddev->poll_tech_count;
+
+       mutex_unlock(&ddev->poll_lock);
+
+       schedule_work(&ddev->poll_work);
+}
+
+static void digital_wq_poll(struct work_struct *work)
+{
+       int rc;
+       struct digital_poll_tech *poll_tech;
+       struct nfc_digital_dev *ddev = container_of(work,
+                                                   struct nfc_digital_dev,
+                                                   poll_work);
+       mutex_lock(&ddev->poll_lock);
+
+       if (!ddev->poll_tech_count) {
+               mutex_unlock(&ddev->poll_lock);
+               return;
+       }
+
+       poll_tech = &ddev->poll_techs[ddev->poll_tech_index];
+
+       mutex_unlock(&ddev->poll_lock);
+
+       rc = poll_tech->poll_func(ddev, poll_tech->rf_tech);
+       if (rc)
+               digital_poll_next_tech(ddev);
+}
+
+static void digital_add_poll_tech(struct nfc_digital_dev *ddev, u8 rf_tech,
+                                 digital_poll_t poll_func)
+{
+       struct digital_poll_tech *poll_tech;
+
+       if (ddev->poll_tech_count >= NFC_DIGITAL_POLL_MODE_COUNT_MAX)
+               return;
+
+       poll_tech = &ddev->poll_techs[ddev->poll_tech_count++];
+
+       poll_tech->rf_tech = rf_tech;
+       poll_tech->poll_func = poll_func;
+}
+
+/**
+ * start_poll operation
+ *
+ * For every supported protocol, the corresponding polling function is added
+ * to the table of polling technologies (ddev->poll_techs[]) using
+ * digital_add_poll_tech().
+ * When a polling function fails (by timeout or protocol error) the next one is
+ * schedule by digital_poll_next_tech() on the poll workqueue (ddev->poll_work).
+ */
+static int digital_start_poll(struct nfc_dev *nfc_dev, __u32 im_protocols,
+                             __u32 tm_protocols)
+{
+       struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+       u32 matching_im_protocols, matching_tm_protocols;
+
+       pr_debug("protocols: im 0x%x, tm 0x%x, supported 0x%x\n", im_protocols,
+                tm_protocols, ddev->protocols);
+
+       matching_im_protocols = ddev->protocols & im_protocols;
+       matching_tm_protocols = ddev->protocols & tm_protocols;
+
+       if (!matching_im_protocols && !matching_tm_protocols) {
+               pr_err("Unknown protocol\n");
+               return -EINVAL;
+       }
+
+       if (ddev->poll_tech_count) {
+               pr_err("Already polling\n");
+               return -EBUSY;
+       }
+
+       if (ddev->curr_protocol) {
+               pr_err("A target is already active\n");
+               return -EBUSY;
+       }
+
+       ddev->poll_tech_count = 0;
+       ddev->poll_tech_index = 0;
+
+       if (matching_im_protocols & DIGITAL_PROTO_NFCA_RF_TECH)
+               digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106A,
+                                     digital_in_send_sens_req);
+
+       if (im_protocols & DIGITAL_PROTO_NFCF_RF_TECH) {
+               digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_212F,
+                                     digital_in_send_sensf_req);
+
+               digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_424F,
+                                     digital_in_send_sensf_req);
+       }
+
+       if (tm_protocols & NFC_PROTO_NFC_DEP_MASK) {
+               if (ddev->ops->tg_listen_mdaa) {
+                       digital_add_poll_tech(ddev, 0,
+                                             digital_tg_listen_mdaa);
+               } else {
+                       digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_106A,
+                                             digital_tg_listen_nfca);
+
+                       digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_212F,
+                                             digital_tg_listen_nfcf);
+
+                       digital_add_poll_tech(ddev, NFC_DIGITAL_RF_TECH_424F,
+                                             digital_tg_listen_nfcf);
+               }
+       }
+
+       if (!ddev->poll_tech_count) {
+               pr_err("Unsupported protocols: im=0x%x, tm=0x%x\n",
+                      matching_im_protocols, matching_tm_protocols);
+               return -EINVAL;
+       }
+
+       schedule_work(&ddev->poll_work);
+
+       return 0;
+}
+
+static void digital_stop_poll(struct nfc_dev *nfc_dev)
+{
+       struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+
+       mutex_lock(&ddev->poll_lock);
+
+       if (!ddev->poll_tech_count) {
+               pr_err("Polling operation was not running\n");
+               mutex_unlock(&ddev->poll_lock);
+               return;
+       }
+
+       ddev->poll_tech_count = 0;
+
+       mutex_unlock(&ddev->poll_lock);
+
+       cancel_work_sync(&ddev->poll_work);
+
+       digital_abort_cmd(ddev);
+}
+
+static int digital_dev_up(struct nfc_dev *nfc_dev)
+{
+       struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+
+       digital_switch_rf(ddev, 1);
+
+       return 0;
+}
+
+static int digital_dev_down(struct nfc_dev *nfc_dev)
+{
+       struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+
+       digital_switch_rf(ddev, 0);
+
+       return 0;
+}
+
+static int digital_dep_link_up(struct nfc_dev *nfc_dev,
+                              struct nfc_target *target,
+                              __u8 comm_mode, __u8 *gb, size_t gb_len)
+{
+       struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+
+       return digital_in_send_atr_req(ddev, target, comm_mode, gb, gb_len);
+}
+
+static int digital_dep_link_down(struct nfc_dev *nfc_dev)
+{
+       struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+
+       ddev->curr_protocol = 0;
+
+       return 0;
+}
+
+static int digital_activate_target(struct nfc_dev *nfc_dev,
+                                  struct nfc_target *target, __u32 protocol)
+{
+       return 0;
+}
+
+static void digital_deactivate_target(struct nfc_dev *nfc_dev,
+                                     struct nfc_target *target)
+{
+       struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+
+       ddev->curr_protocol = 0;
+}
+
+static int digital_tg_send(struct nfc_dev *dev, struct sk_buff *skb)
+{
+       struct nfc_digital_dev *ddev = nfc_get_drvdata(dev);
+
+       return digital_tg_send_dep_res(ddev, skb);
+}
+
+static void digital_in_send_complete(struct nfc_digital_dev *ddev, void *arg,
+                                    struct sk_buff *resp)
+{
+       struct digital_data_exch *data_exch = arg;
+       int rc;
+
+       if (IS_ERR(resp)) {
+               rc = PTR_ERR(resp);
+               goto done;
+       }
+
+       if (ddev->curr_protocol == NFC_PROTO_MIFARE)
+               rc = digital_in_recv_mifare_res(resp);
+       else
+               rc = ddev->skb_check_crc(resp);
+
+       if (rc) {
+               kfree_skb(resp);
+               resp = NULL;
+       }
+
+done:
+       data_exch->cb(data_exch->cb_context, resp, rc);
+
+       kfree(data_exch);
+}
+
+static int digital_in_send(struct nfc_dev *nfc_dev, struct nfc_target *target,
+                          struct sk_buff *skb, data_exchange_cb_t cb,
+                          void *cb_context)
+{
+       struct nfc_digital_dev *ddev = nfc_get_drvdata(nfc_dev);
+       struct digital_data_exch *data_exch;
+
+       data_exch = kzalloc(sizeof(struct digital_data_exch), GFP_KERNEL);
+       if (!data_exch) {
+               pr_err("Failed to allocate data_exch struct\n");
+               return -ENOMEM;
+       }
+
+       data_exch->cb = cb;
+       data_exch->cb_context = cb_context;
+
+       if (ddev->curr_protocol == NFC_PROTO_NFC_DEP)
+               return digital_in_send_dep_req(ddev, target, skb, data_exch);
+
+       ddev->skb_add_crc(skb);
+
+       return digital_in_send_cmd(ddev, skb, 500, digital_in_send_complete,
+                                  data_exch);
+}
+
+static struct nfc_ops digital_nfc_ops = {
+       .dev_up = digital_dev_up,
+       .dev_down = digital_dev_down,
+       .start_poll = digital_start_poll,
+       .stop_poll = digital_stop_poll,
+       .dep_link_up = digital_dep_link_up,
+       .dep_link_down = digital_dep_link_down,
+       .activate_target = digital_activate_target,
+       .deactivate_target = digital_deactivate_target,
+       .tm_send = digital_tg_send,
+       .im_transceive = digital_in_send,
+};
+
+struct nfc_digital_dev *nfc_digital_allocate_device(struct nfc_digital_ops *ops,
+                                           __u32 supported_protocols,
+                                           __u32 driver_capabilities,
+                                           int tx_headroom, int tx_tailroom)
+{
+       struct nfc_digital_dev *ddev;
+
+       if (!ops->in_configure_hw || !ops->in_send_cmd || !ops->tg_listen ||
+           !ops->tg_configure_hw || !ops->tg_send_cmd || !ops->abort_cmd ||
+           !ops->switch_rf)
+               return NULL;
+
+       ddev = kzalloc(sizeof(struct nfc_digital_dev), GFP_KERNEL);
+       if (!ddev)
+               return NULL;
+
+       ddev->driver_capabilities = driver_capabilities;
+       ddev->ops = ops;
+
+       mutex_init(&ddev->cmd_lock);
+       INIT_LIST_HEAD(&ddev->cmd_queue);
+
+       INIT_WORK(&ddev->cmd_work, digital_wq_cmd);
+       INIT_WORK(&ddev->cmd_complete_work, digital_wq_cmd_complete);
+
+       mutex_init(&ddev->poll_lock);
+       INIT_WORK(&ddev->poll_work, digital_wq_poll);
+
+       if (supported_protocols & NFC_PROTO_JEWEL_MASK)
+               ddev->protocols |= NFC_PROTO_JEWEL_MASK;
+       if (supported_protocols & NFC_PROTO_MIFARE_MASK)
+               ddev->protocols |= NFC_PROTO_MIFARE_MASK;
+       if (supported_protocols & NFC_PROTO_FELICA_MASK)
+               ddev->protocols |= NFC_PROTO_FELICA_MASK;
+       if (supported_protocols & NFC_PROTO_NFC_DEP_MASK)
+               ddev->protocols |= NFC_PROTO_NFC_DEP_MASK;
+
+       ddev->tx_headroom = tx_headroom + DIGITAL_MAX_HEADER_LEN;
+       ddev->tx_tailroom = tx_tailroom + DIGITAL_CRC_LEN;
+
+       ddev->nfc_dev = nfc_allocate_device(&digital_nfc_ops, ddev->protocols,
+                                           ddev->tx_headroom,
+                                           ddev->tx_tailroom);
+       if (!ddev->nfc_dev) {
+               pr_err("nfc_allocate_device failed\n");
+               goto free_dev;
+       }
+
+       nfc_set_drvdata(ddev->nfc_dev, ddev);
+
+       return ddev;
+
+free_dev:
+       kfree(ddev);
+
+       return NULL;
+}
+EXPORT_SYMBOL(nfc_digital_allocate_device);
+
+void nfc_digital_free_device(struct nfc_digital_dev *ddev)
+{
+       nfc_free_device(ddev->nfc_dev);
+       kfree(ddev);
+}
+EXPORT_SYMBOL(nfc_digital_free_device);
+
+int nfc_digital_register_device(struct nfc_digital_dev *ddev)
+{
+       return nfc_register_device(ddev->nfc_dev);
+}
+EXPORT_SYMBOL(nfc_digital_register_device);
+
+void nfc_digital_unregister_device(struct nfc_digital_dev *ddev)
+{
+       struct digital_cmd *cmd, *n;
+
+       nfc_unregister_device(ddev->nfc_dev);
+
+       mutex_lock(&ddev->poll_lock);
+       ddev->poll_tech_count = 0;
+       mutex_unlock(&ddev->poll_lock);
+
+       cancel_work_sync(&ddev->poll_work);
+       cancel_work_sync(&ddev->cmd_work);
+       cancel_work_sync(&ddev->cmd_complete_work);
+
+       list_for_each_entry_safe(cmd, n, &ddev->cmd_queue, queue) {
+               list_del(&cmd->queue);
+               kfree(cmd->mdaa_params);
+               kfree(cmd);
+       }
+}
+EXPORT_SYMBOL(nfc_digital_unregister_device);
+
+MODULE_LICENSE("GPL");
diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c
new file mode 100644 (file)
index 0000000..07bbc24
--- /dev/null
@@ -0,0 +1,729 @@
+/*
+ * NFC Digital Protocol stack
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) "digital: %s: " fmt, __func__
+
+#include "digital.h"
+
+#define DIGITAL_NFC_DEP_FRAME_DIR_OUT 0xD4
+#define DIGITAL_NFC_DEP_FRAME_DIR_IN  0xD5
+
+#define DIGITAL_NFC_DEP_NFCA_SOD_SB   0xF0
+
+#define DIGITAL_CMD_ATR_REQ 0x00
+#define DIGITAL_CMD_ATR_RES 0x01
+#define DIGITAL_CMD_PSL_REQ 0x04
+#define DIGITAL_CMD_PSL_RES 0x05
+#define DIGITAL_CMD_DEP_REQ 0x06
+#define DIGITAL_CMD_DEP_RES 0x07
+
+#define DIGITAL_ATR_REQ_MIN_SIZE 16
+#define DIGITAL_ATR_REQ_MAX_SIZE 64
+
+#define DIGITAL_NFCID3_LEN ((u8)8)
+#define DIGITAL_LR_BITS_PAYLOAD_SIZE_254B 0x30
+#define DIGITAL_GB_BIT 0x02
+
+#define DIGITAL_NFC_DEP_PFB_TYPE(pfb) ((pfb) & 0xE0)
+
+#define DIGITAL_NFC_DEP_PFB_TIMEOUT_BIT 0x10
+
+#define DIGITAL_NFC_DEP_PFB_IS_TIMEOUT(pfb) \
+                               ((pfb) & DIGITAL_NFC_DEP_PFB_TIMEOUT_BIT)
+#define DIGITAL_NFC_DEP_MI_BIT_SET(pfb)  ((pfb) & 0x10)
+#define DIGITAL_NFC_DEP_NAD_BIT_SET(pfb) ((pfb) & 0x08)
+#define DIGITAL_NFC_DEP_DID_BIT_SET(pfb) ((pfb) & 0x04)
+#define DIGITAL_NFC_DEP_PFB_PNI(pfb)     ((pfb) & 0x03)
+
+#define DIGITAL_NFC_DEP_PFB_I_PDU          0x00
+#define DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU   0x40
+#define DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU 0x80
+
+struct digital_atr_req {
+       u8 dir;
+       u8 cmd;
+       u8 nfcid3[10];
+       u8 did;
+       u8 bs;
+       u8 br;
+       u8 pp;
+       u8 gb[0];
+} __packed;
+
+struct digital_atr_res {
+       u8 dir;
+       u8 cmd;
+       u8 nfcid3[10];
+       u8 did;
+       u8 bs;
+       u8 br;
+       u8 to;
+       u8 pp;
+       u8 gb[0];
+} __packed;
+
+struct digital_psl_req {
+       u8 dir;
+       u8 cmd;
+       u8 did;
+       u8 brs;
+       u8 fsl;
+} __packed;
+
+struct digital_psl_res {
+       u8 dir;
+       u8 cmd;
+       u8 did;
+} __packed;
+
+struct digital_dep_req_res {
+       u8 dir;
+       u8 cmd;
+       u8 pfb;
+} __packed;
+
+static void digital_in_recv_dep_res(struct nfc_digital_dev *ddev, void *arg,
+                                   struct sk_buff *resp);
+
+static void digital_skb_push_dep_sod(struct nfc_digital_dev *ddev,
+                                    struct sk_buff *skb)
+{
+       skb_push(skb, sizeof(u8));
+
+       skb->data[0] = skb->len;
+
+       if (ddev->curr_rf_tech == NFC_DIGITAL_RF_TECH_106A)
+               *skb_push(skb, sizeof(u8)) = DIGITAL_NFC_DEP_NFCA_SOD_SB;
+}
+
+static int digital_skb_pull_dep_sod(struct nfc_digital_dev *ddev,
+                                   struct sk_buff *skb)
+{
+       u8 size;
+
+       if (skb->len < 2)
+               return -EIO;
+
+       if (ddev->curr_rf_tech == NFC_DIGITAL_RF_TECH_106A)
+               skb_pull(skb, sizeof(u8));
+
+       size = skb->data[0];
+       if (size != skb->len)
+               return -EIO;
+
+       skb_pull(skb, sizeof(u8));
+
+       return 0;
+}
+
+static void digital_in_recv_atr_res(struct nfc_digital_dev *ddev, void *arg,
+                                struct sk_buff *resp)
+{
+       struct nfc_target *target = arg;
+       struct digital_atr_res *atr_res;
+       u8 gb_len;
+       int rc;
+
+       if (IS_ERR(resp)) {
+               rc = PTR_ERR(resp);
+               resp = NULL;
+               goto exit;
+       }
+
+       rc = ddev->skb_check_crc(resp);
+       if (rc) {
+               PROTOCOL_ERR("14.4.1.6");
+               goto exit;
+       }
+
+       rc = digital_skb_pull_dep_sod(ddev, resp);
+       if (rc) {
+               PROTOCOL_ERR("14.4.1.2");
+               goto exit;
+       }
+
+       if (resp->len < sizeof(struct digital_atr_res)) {
+               rc = -EIO;
+               goto exit;
+       }
+
+       gb_len = resp->len - sizeof(struct digital_atr_res);
+
+       atr_res = (struct digital_atr_res *)resp->data;
+
+       rc = nfc_set_remote_general_bytes(ddev->nfc_dev, atr_res->gb, gb_len);
+       if (rc)
+               goto exit;
+
+       rc = nfc_dep_link_is_up(ddev->nfc_dev, target->idx, NFC_COMM_ACTIVE,
+                               NFC_RF_INITIATOR);
+
+       ddev->curr_nfc_dep_pni = 0;
+
+exit:
+       dev_kfree_skb(resp);
+
+       if (rc)
+               ddev->curr_protocol = 0;
+}
+
+int digital_in_send_atr_req(struct nfc_digital_dev *ddev,
+                           struct nfc_target *target, __u8 comm_mode, __u8 *gb,
+                           size_t gb_len)
+{
+       struct sk_buff *skb;
+       struct digital_atr_req *atr_req;
+       uint size;
+
+       size = DIGITAL_ATR_REQ_MIN_SIZE + gb_len;
+
+       if (size > DIGITAL_ATR_REQ_MAX_SIZE) {
+               PROTOCOL_ERR("14.6.1.1");
+               return -EINVAL;
+       }
+
+       skb = digital_skb_alloc(ddev, size);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, sizeof(struct digital_atr_req));
+
+       atr_req = (struct digital_atr_req *)skb->data;
+       memset(atr_req, 0, sizeof(struct digital_atr_req));
+
+       atr_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT;
+       atr_req->cmd = DIGITAL_CMD_ATR_REQ;
+       if (target->nfcid2_len)
+               memcpy(atr_req->nfcid3, target->nfcid2,
+                      max(target->nfcid2_len, DIGITAL_NFCID3_LEN));
+       else
+               get_random_bytes(atr_req->nfcid3, DIGITAL_NFCID3_LEN);
+
+       atr_req->did = 0;
+       atr_req->bs = 0;
+       atr_req->br = 0;
+
+       atr_req->pp = DIGITAL_LR_BITS_PAYLOAD_SIZE_254B;
+
+       if (gb_len) {
+               atr_req->pp |= DIGITAL_GB_BIT;
+               memcpy(skb_put(skb, gb_len), gb, gb_len);
+       }
+
+       digital_skb_push_dep_sod(ddev, skb);
+
+       ddev->skb_add_crc(skb);
+
+       digital_in_send_cmd(ddev, skb, 500, digital_in_recv_atr_res, target);
+
+       return 0;
+}
+
+static int digital_in_send_rtox(struct nfc_digital_dev *ddev,
+                               struct digital_data_exch *data_exch, u8 rtox)
+{
+       struct digital_dep_req_res *dep_req;
+       struct sk_buff *skb;
+       int rc;
+
+       skb = digital_skb_alloc(ddev, 1);
+       if (!skb)
+               return -ENOMEM;
+
+       *skb_put(skb, 1) = rtox;
+
+       skb_push(skb, sizeof(struct digital_dep_req_res));
+
+       dep_req = (struct digital_dep_req_res *)skb->data;
+
+       dep_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT;
+       dep_req->cmd = DIGITAL_CMD_DEP_REQ;
+       dep_req->pfb = DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU |
+                      DIGITAL_NFC_DEP_PFB_TIMEOUT_BIT;
+
+       digital_skb_push_dep_sod(ddev, skb);
+
+       ddev->skb_add_crc(skb);
+
+       rc = digital_in_send_cmd(ddev, skb, 1500, digital_in_recv_dep_res,
+                                data_exch);
+
+       return rc;
+}
+
+static void digital_in_recv_dep_res(struct nfc_digital_dev *ddev, void *arg,
+                                   struct sk_buff *resp)
+{
+       struct digital_data_exch *data_exch = arg;
+       struct digital_dep_req_res *dep_res;
+       u8 pfb;
+       uint size;
+       int rc;
+
+       if (IS_ERR(resp)) {
+               rc = PTR_ERR(resp);
+               resp = NULL;
+               goto exit;
+       }
+
+       rc = ddev->skb_check_crc(resp);
+       if (rc) {
+               PROTOCOL_ERR("14.4.1.6");
+               goto error;
+       }
+
+       rc = digital_skb_pull_dep_sod(ddev, resp);
+       if (rc) {
+               PROTOCOL_ERR("14.4.1.2");
+               goto exit;
+       }
+
+       dep_res = (struct digital_dep_req_res *)resp->data;
+
+       if (resp->len < sizeof(struct digital_dep_req_res) ||
+           dep_res->dir != DIGITAL_NFC_DEP_FRAME_DIR_IN ||
+           dep_res->cmd != DIGITAL_CMD_DEP_RES) {
+               rc = -EIO;
+               goto error;
+       }
+
+       pfb = dep_res->pfb;
+
+       switch (DIGITAL_NFC_DEP_PFB_TYPE(pfb)) {
+       case DIGITAL_NFC_DEP_PFB_I_PDU:
+               if (DIGITAL_NFC_DEP_PFB_PNI(pfb) != ddev->curr_nfc_dep_pni) {
+                       PROTOCOL_ERR("14.12.3.3");
+                       rc = -EIO;
+                       goto error;
+               }
+
+               ddev->curr_nfc_dep_pni =
+                       DIGITAL_NFC_DEP_PFB_PNI(ddev->curr_nfc_dep_pni + 1);
+               rc = 0;
+               break;
+
+       case DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU:
+               pr_err("Received a ACK/NACK PDU\n");
+               rc = -EIO;
+               goto error;
+
+       case DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU:
+               if (!DIGITAL_NFC_DEP_PFB_IS_TIMEOUT(pfb)) {
+                       rc = -EINVAL;
+                       goto error;
+               }
+
+               rc = digital_in_send_rtox(ddev, data_exch, resp->data[3]);
+               if (rc)
+                       goto error;
+
+               kfree_skb(resp);
+               return;
+       }
+
+       if (DIGITAL_NFC_DEP_MI_BIT_SET(pfb)) {
+               pr_err("MI bit set. Chained PDU not supported\n");
+               rc = -EIO;
+               goto error;
+       }
+
+       size = sizeof(struct digital_dep_req_res);
+
+       if (DIGITAL_NFC_DEP_DID_BIT_SET(pfb))
+               size++;
+
+       if (size > resp->len) {
+               rc = -EIO;
+               goto error;
+       }
+
+       skb_pull(resp, size);
+
+exit:
+       data_exch->cb(data_exch->cb_context, resp, rc);
+
+error:
+       kfree(data_exch);
+
+       if (rc)
+               kfree_skb(resp);
+}
+
+int digital_in_send_dep_req(struct nfc_digital_dev *ddev,
+                           struct nfc_target *target, struct sk_buff *skb,
+                           struct digital_data_exch *data_exch)
+{
+       struct digital_dep_req_res *dep_req;
+
+       skb_push(skb, sizeof(struct digital_dep_req_res));
+
+       dep_req = (struct digital_dep_req_res *)skb->data;
+       dep_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT;
+       dep_req->cmd = DIGITAL_CMD_DEP_REQ;
+       dep_req->pfb = ddev->curr_nfc_dep_pni;
+
+       digital_skb_push_dep_sod(ddev, skb);
+
+       ddev->skb_add_crc(skb);
+
+       return digital_in_send_cmd(ddev, skb, 1500, digital_in_recv_dep_res,
+                                  data_exch);
+}
+
+static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg,
+                                   struct sk_buff *resp)
+{
+       int rc;
+       struct digital_dep_req_res *dep_req;
+       size_t size;
+
+       if (IS_ERR(resp)) {
+               rc = PTR_ERR(resp);
+               resp = NULL;
+               goto exit;
+       }
+
+       rc = ddev->skb_check_crc(resp);
+       if (rc) {
+               PROTOCOL_ERR("14.4.1.6");
+               goto exit;
+       }
+
+       rc = digital_skb_pull_dep_sod(ddev, resp);
+       if (rc) {
+               PROTOCOL_ERR("14.4.1.2");
+               goto exit;
+       }
+
+       size = sizeof(struct digital_dep_req_res);
+       dep_req = (struct digital_dep_req_res *)resp->data;
+
+       if (resp->len < size || dep_req->dir != DIGITAL_NFC_DEP_FRAME_DIR_OUT ||
+           dep_req->cmd != DIGITAL_CMD_DEP_REQ) {
+               rc = -EIO;
+               goto exit;
+       }
+
+       if (DIGITAL_NFC_DEP_DID_BIT_SET(dep_req->pfb))
+               size++;
+
+       if (resp->len < size) {
+               rc = -EIO;
+               goto exit;
+       }
+
+       switch (DIGITAL_NFC_DEP_PFB_TYPE(dep_req->pfb)) {
+       case DIGITAL_NFC_DEP_PFB_I_PDU:
+               pr_debug("DIGITAL_NFC_DEP_PFB_I_PDU\n");
+               ddev->curr_nfc_dep_pni = DIGITAL_NFC_DEP_PFB_PNI(dep_req->pfb);
+               break;
+       case DIGITAL_NFC_DEP_PFB_ACK_NACK_PDU:
+               pr_err("Received a ACK/NACK PDU\n");
+               rc = -EINVAL;
+               goto exit;
+               break;
+       case DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU:
+               pr_err("Received a SUPERVISOR PDU\n");
+               rc = -EINVAL;
+               goto exit;
+               break;
+       }
+
+       skb_pull(resp, size);
+
+       rc = nfc_tm_data_received(ddev->nfc_dev, resp);
+
+exit:
+       if (rc)
+               kfree_skb(resp);
+}
+
+int digital_tg_send_dep_res(struct nfc_digital_dev *ddev, struct sk_buff *skb)
+{
+       struct digital_dep_req_res *dep_res;
+
+       skb_push(skb, sizeof(struct digital_dep_req_res));
+       dep_res = (struct digital_dep_req_res *)skb->data;
+
+       dep_res->dir = DIGITAL_NFC_DEP_FRAME_DIR_IN;
+       dep_res->cmd = DIGITAL_CMD_DEP_RES;
+       dep_res->pfb = ddev->curr_nfc_dep_pni;
+
+       digital_skb_push_dep_sod(ddev, skb);
+
+       ddev->skb_add_crc(skb);
+
+       return digital_tg_send_cmd(ddev, skb, 1500, digital_tg_recv_dep_req,
+                                  NULL);
+}
+
+static void digital_tg_send_psl_res_complete(struct nfc_digital_dev *ddev,
+                                            void *arg, struct sk_buff *resp)
+{
+       u8 rf_tech = PTR_ERR(arg);
+
+       if (IS_ERR(resp))
+               return;
+
+       digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, rf_tech);
+
+       digital_tg_listen(ddev, 1500, digital_tg_recv_dep_req, NULL);
+
+       dev_kfree_skb(resp);
+}
+
+static int digital_tg_send_psl_res(struct nfc_digital_dev *ddev, u8 did,
+                                  u8 rf_tech)
+{
+       struct digital_psl_res *psl_res;
+       struct sk_buff *skb;
+       int rc;
+
+       skb = digital_skb_alloc(ddev, sizeof(struct digital_psl_res));
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, sizeof(struct digital_psl_res));
+
+       psl_res = (struct digital_psl_res *)skb->data;
+
+       psl_res->dir = DIGITAL_NFC_DEP_FRAME_DIR_IN;
+       psl_res->cmd = DIGITAL_CMD_PSL_RES;
+       psl_res->did = did;
+
+       digital_skb_push_dep_sod(ddev, skb);
+
+       ddev->skb_add_crc(skb);
+
+       rc = digital_tg_send_cmd(ddev, skb, 0, digital_tg_send_psl_res_complete,
+                                ERR_PTR(rf_tech));
+
+       if (rc)
+               kfree_skb(skb);
+
+       return rc;
+}
+
+static void digital_tg_recv_psl_req(struct nfc_digital_dev *ddev, void *arg,
+                                   struct sk_buff *resp)
+{
+       int rc;
+       struct digital_psl_req *psl_req;
+       u8 rf_tech;
+       u8 dsi;
+
+       if (IS_ERR(resp)) {
+               rc = PTR_ERR(resp);
+               resp = NULL;
+               goto exit;
+       }
+
+       rc = ddev->skb_check_crc(resp);
+       if (rc) {
+               PROTOCOL_ERR("14.4.1.6");
+               goto exit;
+       }
+
+       rc = digital_skb_pull_dep_sod(ddev, resp);
+       if (rc) {
+               PROTOCOL_ERR("14.4.1.2");
+               goto exit;
+       }
+
+       psl_req = (struct digital_psl_req *)resp->data;
+
+       if (resp->len != sizeof(struct digital_psl_req) ||
+           psl_req->dir != DIGITAL_NFC_DEP_FRAME_DIR_OUT ||
+           psl_req->cmd != DIGITAL_CMD_PSL_REQ) {
+               rc = -EIO;
+               goto exit;
+       }
+
+       dsi = (psl_req->brs >> 3) & 0x07;
+       switch (dsi) {
+       case 0:
+               rf_tech = NFC_DIGITAL_RF_TECH_106A;
+               break;
+       case 1:
+               rf_tech = NFC_DIGITAL_RF_TECH_212F;
+               break;
+       case 2:
+               rf_tech = NFC_DIGITAL_RF_TECH_424F;
+               break;
+       default:
+               pr_err("Unsuported dsi value %d\n", dsi);
+               goto exit;
+       }
+
+       rc = digital_tg_send_psl_res(ddev, psl_req->did, rf_tech);
+
+exit:
+       kfree_skb(resp);
+}
+
+static void digital_tg_send_atr_res_complete(struct nfc_digital_dev *ddev,
+                                            void *arg, struct sk_buff *resp)
+{
+       int offset;
+
+       if (IS_ERR(resp)) {
+               digital_poll_next_tech(ddev);
+               return;
+       }
+
+       offset = 2;
+       if (resp->data[0] == DIGITAL_NFC_DEP_NFCA_SOD_SB)
+               offset++;
+
+       if (resp->data[offset] == DIGITAL_CMD_PSL_REQ)
+               digital_tg_recv_psl_req(ddev, arg, resp);
+       else
+               digital_tg_recv_dep_req(ddev, arg, resp);
+}
+
+static int digital_tg_send_atr_res(struct nfc_digital_dev *ddev,
+                                  struct digital_atr_req *atr_req)
+{
+       struct digital_atr_res *atr_res;
+       struct sk_buff *skb;
+       u8 *gb;
+       size_t gb_len;
+       int rc;
+
+       gb = nfc_get_local_general_bytes(ddev->nfc_dev, &gb_len);
+       if (!gb)
+               gb_len = 0;
+
+       skb = digital_skb_alloc(ddev, sizeof(struct digital_atr_res) + gb_len);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, sizeof(struct digital_atr_res));
+       atr_res = (struct digital_atr_res *)skb->data;
+
+       memset(atr_res, 0, sizeof(struct digital_atr_res));
+
+       atr_res->dir = DIGITAL_NFC_DEP_FRAME_DIR_IN;
+       atr_res->cmd = DIGITAL_CMD_ATR_RES;
+       memcpy(atr_res->nfcid3, atr_req->nfcid3, sizeof(atr_req->nfcid3));
+       atr_res->to = 8;
+       atr_res->pp = DIGITAL_LR_BITS_PAYLOAD_SIZE_254B;
+       if (gb_len) {
+               skb_put(skb, gb_len);
+
+               atr_res->pp |= DIGITAL_GB_BIT;
+               memcpy(atr_res->gb, gb, gb_len);
+       }
+
+       digital_skb_push_dep_sod(ddev, skb);
+
+       ddev->skb_add_crc(skb);
+
+       rc = digital_tg_send_cmd(ddev, skb, 999,
+                                digital_tg_send_atr_res_complete, NULL);
+       if (rc) {
+               kfree_skb(skb);
+               return rc;
+       }
+
+       return rc;
+}
+
+void digital_tg_recv_atr_req(struct nfc_digital_dev *ddev, void *arg,
+                            struct sk_buff *resp)
+{
+       int rc;
+       struct digital_atr_req *atr_req;
+       size_t gb_len, min_size;
+
+       if (IS_ERR(resp)) {
+               rc = PTR_ERR(resp);
+               resp = NULL;
+               goto exit;
+       }
+
+       if (!resp->len) {
+               rc = -EIO;
+               goto exit;
+       }
+
+       if (resp->data[0] == DIGITAL_NFC_DEP_NFCA_SOD_SB) {
+               min_size = DIGITAL_ATR_REQ_MIN_SIZE + 2;
+
+               ddev->curr_rf_tech = NFC_DIGITAL_RF_TECH_106A;
+               ddev->skb_add_crc = digital_skb_add_crc_a;
+               ddev->skb_check_crc = digital_skb_check_crc_a;
+       } else {
+               min_size = DIGITAL_ATR_REQ_MIN_SIZE + 1;
+
+               ddev->curr_rf_tech = NFC_DIGITAL_RF_TECH_212F;
+               ddev->skb_add_crc = digital_skb_add_crc_f;
+               ddev->skb_check_crc = digital_skb_check_crc_f;
+       }
+
+       if (resp->len < min_size) {
+               rc = -EIO;
+               goto exit;
+       }
+
+       if (DIGITAL_DRV_CAPS_TG_CRC(ddev)) {
+               ddev->skb_add_crc = digital_skb_add_crc_none;
+               ddev->skb_check_crc = digital_skb_check_crc_none;
+       }
+
+       rc = ddev->skb_check_crc(resp);
+       if (rc) {
+               PROTOCOL_ERR("14.4.1.6");
+               goto exit;
+       }
+
+       rc = digital_skb_pull_dep_sod(ddev, resp);
+       if (rc) {
+               PROTOCOL_ERR("14.4.1.2");
+               goto exit;
+       }
+
+       atr_req = (struct digital_atr_req *)resp->data;
+
+       if (atr_req->dir != DIGITAL_NFC_DEP_FRAME_DIR_OUT ||
+           atr_req->cmd != DIGITAL_CMD_ATR_REQ) {
+               rc = -EINVAL;
+               goto exit;
+       }
+
+       rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+                                    NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED);
+       if (rc)
+               goto exit;
+
+       rc = digital_tg_send_atr_res(ddev, atr_req);
+       if (rc)
+               goto exit;
+
+       gb_len = resp->len - sizeof(struct digital_atr_req);
+       rc = nfc_tm_activated(ddev->nfc_dev, NFC_PROTO_NFC_DEP_MASK,
+                             NFC_COMM_PASSIVE, atr_req->gb, gb_len);
+       if (rc)
+               goto exit;
+
+       ddev->poll_tech_count = 0;
+
+       rc = 0;
+exit:
+       if (rc)
+               digital_poll_next_tech(ddev);
+
+       dev_kfree_skb(resp);
+}
diff --git a/net/nfc/digital_technology.c b/net/nfc/digital_technology.c
new file mode 100644 (file)
index 0000000..251c8c7
--- /dev/null
@@ -0,0 +1,770 @@
+/*
+ * NFC Digital Protocol stack
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) "digital: %s: " fmt, __func__
+
+#include "digital.h"
+
+#define DIGITAL_CMD_SENS_REQ    0x26
+#define DIGITAL_CMD_ALL_REQ     0x52
+#define DIGITAL_CMD_SEL_REQ_CL1 0x93
+#define DIGITAL_CMD_SEL_REQ_CL2 0x95
+#define DIGITAL_CMD_SEL_REQ_CL3 0x97
+
+#define DIGITAL_SDD_REQ_SEL_PAR 0x20
+
+#define DIGITAL_SDD_RES_CT  0x88
+#define DIGITAL_SDD_RES_LEN 5
+
+#define DIGITAL_SEL_RES_NFCID1_COMPLETE(sel_res) (!((sel_res) & 0x04))
+#define DIGITAL_SEL_RES_IS_T2T(sel_res) (!((sel_res) & 0x60))
+#define DIGITAL_SEL_RES_IS_NFC_DEP(sel_res) ((sel_res) & 0x40)
+
+#define DIGITAL_SENS_RES_IS_T1T(sens_res) (((sens_res) & 0x0C00) == 0x0C00)
+#define DIGITAL_SENS_RES_IS_VALID(sens_res) \
+       ((!((sens_res) & 0x001F) && (((sens_res) & 0x0C00) == 0x0C00)) || \
+       (((sens_res) & 0x001F) && ((sens_res) & 0x0C00) != 0x0C00))
+
+#define DIGITAL_MIFARE_READ_RES_LEN 16
+#define DIGITAL_MIFARE_ACK_RES 0x0A
+
+#define DIGITAL_CMD_SENSF_REQ  0x00
+#define DIGITAL_CMD_SENSF_RES  0x01
+
+#define DIGITAL_SENSF_RES_MIN_LENGTH 17
+#define DIGITAL_SENSF_RES_RD_AP_B1   0x00
+#define DIGITAL_SENSF_RES_RD_AP_B2   0x8F
+
+#define DIGITAL_SENSF_REQ_RC_NONE 0
+#define DIGITAL_SENSF_REQ_RC_SC   1
+#define DIGITAL_SENSF_REQ_RC_AP   2
+
+struct digital_sdd_res {
+       u8 nfcid1[4];
+       u8 bcc;
+} __packed;
+
+struct digital_sel_req {
+       u8 sel_cmd;
+       u8 b2;
+       u8 nfcid1[4];
+       u8 bcc;
+} __packed;
+
+struct digital_sensf_req {
+       u8 cmd;
+       u8 sc1;
+       u8 sc2;
+       u8 rc;
+       u8 tsn;
+} __packed;
+
+struct digital_sensf_res {
+       u8 cmd;
+       u8 nfcid2[8];
+       u8 pad0[2];
+       u8 pad1[3];
+       u8 mrti_check;
+       u8 mrti_update;
+       u8 pad2;
+       u8 rd[2];
+} __packed;
+
+static int digital_in_send_sdd_req(struct nfc_digital_dev *ddev,
+                                  struct nfc_target *target);
+
+static void digital_in_recv_sel_res(struct nfc_digital_dev *ddev, void *arg,
+                                   struct sk_buff *resp)
+{
+       struct nfc_target *target = arg;
+       int rc;
+       u8 sel_res;
+       u8 nfc_proto;
+
+       if (IS_ERR(resp)) {
+               rc = PTR_ERR(resp);
+               resp = NULL;
+               goto exit;
+       }
+
+       if (!DIGITAL_DRV_CAPS_IN_CRC(ddev)) {
+               rc = digital_skb_check_crc_a(resp);
+               if (rc) {
+                       PROTOCOL_ERR("4.4.1.3");
+                       goto exit;
+               }
+       }
+
+       if (!resp->len) {
+               rc = -EIO;
+               goto exit;
+       }
+
+       sel_res = resp->data[0];
+
+       if (!DIGITAL_SEL_RES_NFCID1_COMPLETE(sel_res)) {
+               rc = digital_in_send_sdd_req(ddev, target);
+               if (rc)
+                       goto exit;
+
+               goto exit_free_skb;
+       }
+
+       if (DIGITAL_SEL_RES_IS_T2T(sel_res)) {
+               nfc_proto = NFC_PROTO_MIFARE;
+       } else if (DIGITAL_SEL_RES_IS_NFC_DEP(sel_res)) {
+               nfc_proto = NFC_PROTO_NFC_DEP;
+       } else {
+               rc = -EOPNOTSUPP;
+               goto exit;
+       }
+
+       target->sel_res = sel_res;
+
+       rc = digital_target_found(ddev, target, nfc_proto);
+
+exit:
+       kfree(target);
+
+exit_free_skb:
+       dev_kfree_skb(resp);
+
+       if (rc)
+               digital_poll_next_tech(ddev);
+}
+
+static int digital_in_send_sel_req(struct nfc_digital_dev *ddev,
+                                  struct nfc_target *target,
+                                  struct digital_sdd_res *sdd_res)
+{
+       struct sk_buff *skb;
+       struct digital_sel_req *sel_req;
+       u8 sel_cmd;
+       int rc;
+
+       skb = digital_skb_alloc(ddev, sizeof(struct digital_sel_req));
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, sizeof(struct digital_sel_req));
+       sel_req = (struct digital_sel_req *)skb->data;
+
+       if (target->nfcid1_len <= 4)
+               sel_cmd = DIGITAL_CMD_SEL_REQ_CL1;
+       else if (target->nfcid1_len < 10)
+               sel_cmd = DIGITAL_CMD_SEL_REQ_CL2;
+       else
+               sel_cmd = DIGITAL_CMD_SEL_REQ_CL3;
+
+       sel_req->sel_cmd = sel_cmd;
+       sel_req->b2 = 0x70;
+       memcpy(sel_req->nfcid1, sdd_res->nfcid1, 4);
+       sel_req->bcc = sdd_res->bcc;
+
+       if (DIGITAL_DRV_CAPS_IN_CRC(ddev)) {
+               rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+                               NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A);
+               if (rc)
+                       goto exit;
+       } else {
+               digital_skb_add_crc_a(skb);
+       }
+
+       rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sel_res,
+                                target);
+exit:
+       if (rc)
+               kfree_skb(skb);
+
+       return rc;
+}
+
+static void digital_in_recv_sdd_res(struct nfc_digital_dev *ddev, void *arg,
+                                   struct sk_buff *resp)
+{
+       struct nfc_target *target = arg;
+       struct digital_sdd_res *sdd_res;
+       int rc;
+       u8 offset, size;
+       u8 i, bcc;
+
+       if (IS_ERR(resp)) {
+               rc = PTR_ERR(resp);
+               resp = NULL;
+               goto exit;
+       }
+
+       if (resp->len < DIGITAL_SDD_RES_LEN) {
+               PROTOCOL_ERR("4.7.2.8");
+               rc = -EINVAL;
+               goto exit;
+       }
+
+       sdd_res = (struct digital_sdd_res *)resp->data;
+
+       for (i = 0, bcc = 0; i < 4; i++)
+               bcc ^= sdd_res->nfcid1[i];
+
+       if (bcc != sdd_res->bcc) {
+               PROTOCOL_ERR("4.7.2.6");
+               rc = -EINVAL;
+               goto exit;
+       }
+
+       if (sdd_res->nfcid1[0] == DIGITAL_SDD_RES_CT) {
+               offset = 1;
+               size = 3;
+       } else {
+               offset = 0;
+               size = 4;
+       }
+
+       memcpy(target->nfcid1 + target->nfcid1_len, sdd_res->nfcid1 + offset,
+              size);
+       target->nfcid1_len += size;
+
+       rc = digital_in_send_sel_req(ddev, target, sdd_res);
+
+exit:
+       dev_kfree_skb(resp);
+
+       if (rc) {
+               kfree(target);
+               digital_poll_next_tech(ddev);
+       }
+}
+
+static int digital_in_send_sdd_req(struct nfc_digital_dev *ddev,
+                                  struct nfc_target *target)
+{
+       int rc;
+       struct sk_buff *skb;
+       u8 sel_cmd;
+
+       rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+                                    NFC_DIGITAL_FRAMING_NFCA_STANDARD);
+       if (rc)
+               return rc;
+
+       skb = digital_skb_alloc(ddev, 2);
+       if (!skb)
+               return -ENOMEM;
+
+       if (target->nfcid1_len == 0)
+               sel_cmd = DIGITAL_CMD_SEL_REQ_CL1;
+       else if (target->nfcid1_len == 3)
+               sel_cmd = DIGITAL_CMD_SEL_REQ_CL2;
+       else
+               sel_cmd = DIGITAL_CMD_SEL_REQ_CL3;
+
+       *skb_put(skb, sizeof(u8)) = sel_cmd;
+       *skb_put(skb, sizeof(u8)) = DIGITAL_SDD_REQ_SEL_PAR;
+
+       return digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sdd_res,
+                                  target);
+}
+
+static void digital_in_recv_sens_res(struct nfc_digital_dev *ddev, void *arg,
+                                    struct sk_buff *resp)
+{
+       struct nfc_target *target = NULL;
+       int rc;
+
+       if (IS_ERR(resp)) {
+               rc = PTR_ERR(resp);
+               resp = NULL;
+               goto exit;
+       }
+
+       if (resp->len < sizeof(u16)) {
+               rc = -EIO;
+               goto exit;
+       }
+
+       target = kzalloc(sizeof(struct nfc_target), GFP_KERNEL);
+       if (!target) {
+               rc = -ENOMEM;
+               goto exit;
+       }
+
+       target->sens_res = __le16_to_cpu(*(__le16 *)resp->data);
+
+       if (!DIGITAL_SENS_RES_IS_VALID(target->sens_res)) {
+               PROTOCOL_ERR("4.6.3.3");
+               rc = -EINVAL;
+               goto exit;
+       }
+
+       if (DIGITAL_SENS_RES_IS_T1T(target->sens_res))
+               rc = digital_target_found(ddev, target, NFC_PROTO_JEWEL);
+       else
+               rc = digital_in_send_sdd_req(ddev, target);
+
+exit:
+       dev_kfree_skb(resp);
+
+       if (rc) {
+               kfree(target);
+               digital_poll_next_tech(ddev);
+       }
+}
+
+int digital_in_send_sens_req(struct nfc_digital_dev *ddev, u8 rf_tech)
+{
+       struct sk_buff *skb;
+       int rc;
+
+       rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH,
+                                    NFC_DIGITAL_RF_TECH_106A);
+       if (rc)
+               return rc;
+
+       rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+                                    NFC_DIGITAL_FRAMING_NFCA_SHORT);
+       if (rc)
+               return rc;
+
+       skb = digital_skb_alloc(ddev, 1);
+       if (!skb)
+               return -ENOMEM;
+
+       *skb_put(skb, sizeof(u8)) = DIGITAL_CMD_SENS_REQ;
+
+       rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sens_res, NULL);
+       if (rc)
+               kfree_skb(skb);
+
+       return rc;
+}
+
+int digital_in_recv_mifare_res(struct sk_buff *resp)
+{
+       /* Successful READ command response is 16 data bytes + 2 CRC bytes long.
+        * Since the driver can't differentiate a ACK/NACK response from a valid
+        * READ response, the CRC calculation must be handled at digital level
+        * even if the driver supports it for this technology.
+        */
+       if (resp->len == DIGITAL_MIFARE_READ_RES_LEN + DIGITAL_CRC_LEN) {
+               if (digital_skb_check_crc_a(resp)) {
+                       PROTOCOL_ERR("9.4.1.2");
+                       return -EIO;
+               }
+
+               return 0;
+       }
+
+       /* ACK response (i.e. successful WRITE). */
+       if (resp->len == 1 && resp->data[0] == DIGITAL_MIFARE_ACK_RES) {
+               resp->data[0] = 0;
+               return 0;
+       }
+
+       /* NACK and any other responses are treated as error. */
+       return -EIO;
+}
+
+static void digital_in_recv_sensf_res(struct nfc_digital_dev *ddev, void *arg,
+                                  struct sk_buff *resp)
+{
+       int rc;
+       u8 proto;
+       struct nfc_target target;
+       struct digital_sensf_res *sensf_res;
+
+       if (IS_ERR(resp)) {
+               rc = PTR_ERR(resp);
+               resp = NULL;
+               goto exit;
+       }
+
+       if (resp->len < DIGITAL_SENSF_RES_MIN_LENGTH) {
+               rc = -EIO;
+               goto exit;
+       }
+
+       if (!DIGITAL_DRV_CAPS_IN_CRC(ddev)) {
+               rc = digital_skb_check_crc_f(resp);
+               if (rc) {
+                       PROTOCOL_ERR("6.4.1.8");
+                       goto exit;
+               }
+       }
+
+       skb_pull(resp, 1);
+
+       memset(&target, 0, sizeof(struct nfc_target));
+
+       sensf_res = (struct digital_sensf_res *)resp->data;
+
+       memcpy(target.sensf_res, sensf_res, resp->len);
+       target.sensf_res_len = resp->len;
+
+       memcpy(target.nfcid2, sensf_res->nfcid2, NFC_NFCID2_MAXSIZE);
+       target.nfcid2_len = NFC_NFCID2_MAXSIZE;
+
+       if (target.nfcid2[0] == DIGITAL_SENSF_NFCID2_NFC_DEP_B1 &&
+           target.nfcid2[1] == DIGITAL_SENSF_NFCID2_NFC_DEP_B2)
+               proto = NFC_PROTO_NFC_DEP;
+       else
+               proto = NFC_PROTO_FELICA;
+
+       rc = digital_target_found(ddev, &target, proto);
+
+exit:
+       dev_kfree_skb(resp);
+
+       if (rc)
+               digital_poll_next_tech(ddev);
+}
+
+int digital_in_send_sensf_req(struct nfc_digital_dev *ddev, u8 rf_tech)
+{
+       struct digital_sensf_req *sensf_req;
+       struct sk_buff *skb;
+       int rc;
+       u8 size;
+
+       rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, rf_tech);
+       if (rc)
+               return rc;
+
+       rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+                                    NFC_DIGITAL_FRAMING_NFCF);
+       if (rc)
+               return rc;
+
+       size = sizeof(struct digital_sensf_req);
+
+       skb = digital_skb_alloc(ddev, size);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, size);
+
+       sensf_req = (struct digital_sensf_req *)skb->data;
+       sensf_req->cmd = DIGITAL_CMD_SENSF_REQ;
+       sensf_req->sc1 = 0xFF;
+       sensf_req->sc2 = 0xFF;
+       sensf_req->rc = 0;
+       sensf_req->tsn = 0;
+
+       *skb_push(skb, 1) = size + 1;
+
+       if (!DIGITAL_DRV_CAPS_IN_CRC(ddev))
+               digital_skb_add_crc_f(skb);
+
+       rc = digital_in_send_cmd(ddev, skb, 30, digital_in_recv_sensf_res,
+                                NULL);
+       if (rc)
+               kfree_skb(skb);
+
+       return rc;
+}
+
+static int digital_tg_send_sel_res(struct nfc_digital_dev *ddev)
+{
+       struct sk_buff *skb;
+       int rc;
+
+       skb = digital_skb_alloc(ddev, 1);
+       if (!skb)
+               return -ENOMEM;
+
+       *skb_put(skb, 1) = DIGITAL_SEL_RES_NFC_DEP;
+
+       if (!DIGITAL_DRV_CAPS_TG_CRC(ddev))
+               digital_skb_add_crc_a(skb);
+
+       rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_atr_req,
+                                NULL);
+       if (rc)
+               kfree_skb(skb);
+
+       return rc;
+}
+
+static void digital_tg_recv_sel_req(struct nfc_digital_dev *ddev, void *arg,
+                                   struct sk_buff *resp)
+{
+       int rc;
+
+       if (IS_ERR(resp)) {
+               rc = PTR_ERR(resp);
+               resp = NULL;
+               goto exit;
+       }
+
+       if (!DIGITAL_DRV_CAPS_TG_CRC(ddev)) {
+               rc = digital_skb_check_crc_a(resp);
+               if (rc) {
+                       PROTOCOL_ERR("4.4.1.3");
+                       goto exit;
+               }
+       }
+
+       /* Silently ignore SEL_REQ content and send a SEL_RES for NFC-DEP */
+
+       rc = digital_tg_send_sel_res(ddev);
+
+exit:
+       if (rc)
+               digital_poll_next_tech(ddev);
+
+       dev_kfree_skb(resp);
+}
+
+static int digital_tg_send_sdd_res(struct nfc_digital_dev *ddev)
+{
+       struct sk_buff *skb;
+       struct digital_sdd_res *sdd_res;
+       int rc, i;
+
+       skb = digital_skb_alloc(ddev, sizeof(struct digital_sdd_res));
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, sizeof(struct digital_sdd_res));
+       sdd_res = (struct digital_sdd_res *)skb->data;
+
+       sdd_res->nfcid1[0] = 0x08;
+       get_random_bytes(sdd_res->nfcid1 + 1, 3);
+
+       sdd_res->bcc = 0;
+       for (i = 0; i < 4; i++)
+               sdd_res->bcc ^= sdd_res->nfcid1[i];
+
+       rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_sel_req,
+                                NULL);
+       if (rc)
+               kfree_skb(skb);
+
+       return rc;
+}
+
+static void digital_tg_recv_sdd_req(struct nfc_digital_dev *ddev, void *arg,
+                                   struct sk_buff *resp)
+{
+       u8 *sdd_req;
+       int rc;
+
+       if (IS_ERR(resp)) {
+               rc = PTR_ERR(resp);
+               resp = NULL;
+               goto exit;
+       }
+
+       sdd_req = resp->data;
+
+       if (resp->len < 2 || sdd_req[0] != DIGITAL_CMD_SEL_REQ_CL1 ||
+           sdd_req[1] != DIGITAL_SDD_REQ_SEL_PAR) {
+               rc = -EINVAL;
+               goto exit;
+       }
+
+       rc = digital_tg_send_sdd_res(ddev);
+
+exit:
+       if (rc)
+               digital_poll_next_tech(ddev);
+
+       dev_kfree_skb(resp);
+}
+
+static int digital_tg_send_sens_res(struct nfc_digital_dev *ddev)
+{
+       struct sk_buff *skb;
+       u8 *sens_res;
+       int rc;
+
+       skb = digital_skb_alloc(ddev, 2);
+       if (!skb)
+               return -ENOMEM;
+
+       sens_res = skb_put(skb, 2);
+
+       sens_res[0] = (DIGITAL_SENS_RES_NFC_DEP >> 8) & 0xFF;
+       sens_res[1] = DIGITAL_SENS_RES_NFC_DEP & 0xFF;
+
+       rc = digital_tg_send_cmd(ddev, skb, 300, digital_tg_recv_sdd_req,
+                                NULL);
+       if (rc)
+               kfree_skb(skb);
+
+       return rc;
+}
+
+void digital_tg_recv_sens_req(struct nfc_digital_dev *ddev, void *arg,
+                             struct sk_buff *resp)
+{
+       u8 sens_req;
+       int rc;
+
+       if (IS_ERR(resp)) {
+               rc = PTR_ERR(resp);
+               resp = NULL;
+               goto exit;
+       }
+
+       sens_req = resp->data[0];
+
+       if (!resp->len || (sens_req != DIGITAL_CMD_SENS_REQ &&
+           sens_req != DIGITAL_CMD_ALL_REQ)) {
+               rc = -EINVAL;
+               goto exit;
+       }
+
+       rc = digital_tg_send_sens_res(ddev);
+
+exit:
+       if (rc)
+               digital_poll_next_tech(ddev);
+
+       dev_kfree_skb(resp);
+}
+
+static int digital_tg_send_sensf_res(struct nfc_digital_dev *ddev,
+                             struct digital_sensf_req *sensf_req)
+{
+       struct sk_buff *skb;
+       u8 size;
+       int rc;
+       struct digital_sensf_res *sensf_res;
+
+       size = sizeof(struct digital_sensf_res);
+
+       if (sensf_req->rc != DIGITAL_SENSF_REQ_RC_NONE)
+               size -= sizeof(sensf_res->rd);
+
+       skb = digital_skb_alloc(ddev, size);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, size);
+
+       sensf_res = (struct digital_sensf_res *)skb->data;
+
+       memset(sensf_res, 0, size);
+
+       sensf_res->cmd = DIGITAL_CMD_SENSF_RES;
+       sensf_res->nfcid2[0] = DIGITAL_SENSF_NFCID2_NFC_DEP_B1;
+       sensf_res->nfcid2[1] = DIGITAL_SENSF_NFCID2_NFC_DEP_B2;
+       get_random_bytes(&sensf_res->nfcid2[2], 6);
+
+       switch (sensf_req->rc) {
+       case DIGITAL_SENSF_REQ_RC_SC:
+               sensf_res->rd[0] = sensf_req->sc1;
+               sensf_res->rd[1] = sensf_req->sc2;
+               break;
+       case DIGITAL_SENSF_REQ_RC_AP:
+               sensf_res->rd[0] = DIGITAL_SENSF_RES_RD_AP_B1;
+               sensf_res->rd[1] = DIGITAL_SENSF_RES_RD_AP_B2;
+               break;
+       }
+
+       *skb_push(skb, sizeof(u8)) = size + 1;
+
+       if (!DIGITAL_DRV_CAPS_TG_CRC(ddev))
+               digital_skb_add_crc_f(skb);
+
+       rc = digital_tg_send_cmd(ddev, skb, 300,
+                                digital_tg_recv_atr_req, NULL);
+       if (rc)
+               kfree_skb(skb);
+
+       return rc;
+}
+
+void digital_tg_recv_sensf_req(struct nfc_digital_dev *ddev, void *arg,
+                              struct sk_buff *resp)
+{
+       struct digital_sensf_req *sensf_req;
+       int rc;
+
+       if (IS_ERR(resp)) {
+               rc = PTR_ERR(resp);
+               resp = NULL;
+               goto exit;
+       }
+
+       if (!DIGITAL_DRV_CAPS_TG_CRC(ddev)) {
+               rc = digital_skb_check_crc_f(resp);
+               if (rc) {
+                       PROTOCOL_ERR("6.4.1.8");
+                       goto exit;
+               }
+       }
+
+       if (resp->len != sizeof(struct digital_sensf_req) + 1) {
+               rc = -EINVAL;
+               goto exit;
+       }
+
+       skb_pull(resp, 1);
+       sensf_req = (struct digital_sensf_req *)resp->data;
+
+       if (sensf_req->cmd != DIGITAL_CMD_SENSF_REQ) {
+               rc = -EINVAL;
+               goto exit;
+       }
+
+       rc = digital_tg_send_sensf_res(ddev, sensf_req);
+
+exit:
+       if (rc)
+               digital_poll_next_tech(ddev);
+
+       dev_kfree_skb(resp);
+}
+
+int digital_tg_listen_nfca(struct nfc_digital_dev *ddev, u8 rf_tech)
+{
+       int rc;
+
+       rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, rf_tech);
+       if (rc)
+               return rc;
+
+       rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+                                    NFC_DIGITAL_FRAMING_NFCA_NFC_DEP);
+       if (rc)
+               return rc;
+
+       return digital_tg_listen(ddev, 300, digital_tg_recv_sens_req, NULL);
+}
+
+int digital_tg_listen_nfcf(struct nfc_digital_dev *ddev, u8 rf_tech)
+{
+       int rc;
+       u8 *nfcid2;
+
+       rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, rf_tech);
+       if (rc)
+               return rc;
+
+       rc = digital_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+                                    NFC_DIGITAL_FRAMING_NFCF_NFC_DEP);
+       if (rc)
+               return rc;
+
+       nfcid2 = kzalloc(NFC_NFCID2_MAXSIZE, GFP_KERNEL);
+       if (!nfcid2)
+               return -ENOMEM;
+
+       nfcid2[0] = DIGITAL_SENSF_NFCID2_NFC_DEP_B1;
+       nfcid2[1] = DIGITAL_SENSF_NFCID2_NFC_DEP_B2;
+       get_random_bytes(nfcid2 + 2, NFC_NFCID2_MAXSIZE - 2);
+
+       return digital_tg_listen(ddev, 300, digital_tg_recv_sensf_req, nfcid2);
+}
index c7cf37ba729819ab49c22f5b462320f633cdab5c..f1d426f10cceac10b0befc9f1c98caa6ad5b7793 100644 (file)
 #include <linux/export.h>
 #include <linux/spi/spi.h>
 #include <linux/crc-ccitt.h>
-#include <linux/nfc.h>
 #include <net/nfc/nci_core.h>
 
-#define NCI_SPI_HDR_LEN                        4
-#define NCI_SPI_CRC_LEN                        2
 #define NCI_SPI_ACK_SHIFT              6
 #define NCI_SPI_MSB_PAYLOAD_MASK       0x3F
 
 
 #define CRC_INIT               0xFFFF
 
-static int nci_spi_open(struct nci_dev *nci_dev)
-{
-       struct nci_spi_dev *ndev = nci_get_drvdata(nci_dev);
-
-       return ndev->ops->open(ndev);
-}
-
-static int nci_spi_close(struct nci_dev *nci_dev)
-{
-       struct nci_spi_dev *ndev = nci_get_drvdata(nci_dev);
-
-       return ndev->ops->close(ndev);
-}
-
-static int __nci_spi_send(struct nci_spi_dev *ndev, struct sk_buff *skb)
+static int __nci_spi_send(struct nci_spi *nspi, struct sk_buff *skb,
+                         int cs_change)
 {
        struct spi_message m;
        struct spi_transfer t;
 
-       t.tx_buf = skb->data;
-       t.len = skb->len;
-       t.cs_change = 0;
-       t.delay_usecs = ndev->xfer_udelay;
+       memset(&t, 0, sizeof(struct spi_transfer));
+       /* a NULL skb means we just want the SPI chip select line to raise */
+       if (skb) {
+               t.tx_buf = skb->data;
+               t.len = skb->len;
+       } else {
+               /* still set tx_buf non NULL to make the driver happy */
+               t.tx_buf = &t;
+               t.len = 0;
+       }
+       t.cs_change = cs_change;
+       t.delay_usecs = nspi->xfer_udelay;
 
        spi_message_init(&m);
        spi_message_add_tail(&t, &m);
 
-       return spi_sync(ndev->spi, &m);
+       return spi_sync(nspi->spi, &m);
 }
 
-static int nci_spi_send(struct nci_dev *nci_dev, struct sk_buff *skb)
+int nci_spi_send(struct nci_spi *nspi,
+                struct completion *write_handshake_completion,
+                struct sk_buff *skb)
 {
-       struct nci_spi_dev *ndev = nci_get_drvdata(nci_dev);
        unsigned int payload_len = skb->len;
        unsigned char *hdr;
        int ret;
        long completion_rc;
 
-       ndev->ops->deassert_int(ndev);
-
        /* add the NCI SPI header to the start of the buffer */
        hdr = skb_push(skb, NCI_SPI_HDR_LEN);
        hdr[0] = NCI_SPI_DIRECT_WRITE;
-       hdr[1] = ndev->acknowledge_mode;
+       hdr[1] = nspi->acknowledge_mode;
        hdr[2] = payload_len >> 8;
        hdr[3] = payload_len & 0xFF;
 
-       if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
+       if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
                u16 crc;
 
                crc = crc_ccitt(CRC_INIT, skb->data, skb->len);
@@ -96,123 +87,77 @@ static int nci_spi_send(struct nci_dev *nci_dev, struct sk_buff *skb)
                *skb_put(skb, 1) = crc & 0xFF;
        }
 
-       ret = __nci_spi_send(ndev, skb);
+       if (write_handshake_completion) {
+               /* Trick SPI driver to raise chip select */
+               ret = __nci_spi_send(nspi, NULL, 1);
+               if (ret)
+                       goto done;
 
-       kfree_skb(skb);
-       ndev->ops->assert_int(ndev);
+               /* wait for NFC chip hardware handshake to complete */
+               if (wait_for_completion_timeout(write_handshake_completion,
+                                               msecs_to_jiffies(1000)) == 0) {
+                       ret = -ETIME;
+                       goto done;
+               }
+       }
 
-       if (ret != 0 || ndev->acknowledge_mode == NCI_SPI_CRC_DISABLED)
+       ret = __nci_spi_send(nspi, skb, 0);
+       if (ret != 0 || nspi->acknowledge_mode == NCI_SPI_CRC_DISABLED)
                goto done;
 
-       init_completion(&ndev->req_completion);
-       completion_rc =
-               wait_for_completion_interruptible_timeout(&ndev->req_completion,
-                                                         NCI_SPI_SEND_TIMEOUT);
+       init_completion(&nspi->req_completion);
+       completion_rc = wait_for_completion_interruptible_timeout(
+                                                       &nspi->req_completion,
+                                                       NCI_SPI_SEND_TIMEOUT);
 
-       if (completion_rc <= 0 || ndev->req_result == ACKNOWLEDGE_NACK)
+       if (completion_rc <= 0 || nspi->req_result == ACKNOWLEDGE_NACK)
                ret = -EIO;
 
 done:
+       kfree_skb(skb);
+
        return ret;
 }
-
-static struct nci_ops nci_spi_ops = {
-       .open = nci_spi_open,
-       .close = nci_spi_close,
-       .send = nci_spi_send,
-};
+EXPORT_SYMBOL_GPL(nci_spi_send);
 
 /* ---- Interface to NCI SPI drivers ---- */
 
 /**
- * nci_spi_allocate_device - allocate a new nci spi device
+ * nci_spi_allocate_spi - allocate a new nci spi
  *
  * @spi: SPI device
- * @ops: device operations
- * @supported_protocols: NFC protocols supported by the device
- * @supported_se: NFC Secure Elements supported by the device
- * @acknowledge_mode: Acknowledge mode used by the device
+ * @acknowledge_mode: Acknowledge mode used by the NFC device
  * @delay: delay between transactions in us
+ * @ndev: nci dev to send incoming nci frames to
  */
-struct nci_spi_dev *nci_spi_allocate_device(struct spi_device *spi,
-                                               struct nci_spi_ops *ops,
-                                               u32 supported_protocols,
-                                               u32 supported_se,
-                                               u8 acknowledge_mode,
-                                               unsigned int delay)
+struct nci_spi *nci_spi_allocate_spi(struct spi_device *spi,
+                                    u8 acknowledge_mode, unsigned int delay,
+                                    struct nci_dev *ndev)
 {
-       struct nci_spi_dev *ndev;
-       int tailroom = 0;
+       struct nci_spi *nspi;
 
-       if (!ops->open || !ops->close || !ops->assert_int || !ops->deassert_int)
+       nspi = devm_kzalloc(&spi->dev, sizeof(struct nci_spi), GFP_KERNEL);
+       if (!nspi)
                return NULL;
 
-       if (!supported_protocols)
-               return NULL;
-
-       ndev = devm_kzalloc(&spi->dev, sizeof(struct nci_dev), GFP_KERNEL);
-       if (!ndev)
-               return NULL;
+       nspi->acknowledge_mode = acknowledge_mode;
+       nspi->xfer_udelay = delay;
 
-       ndev->ops = ops;
-       ndev->acknowledge_mode = acknowledge_mode;
-       ndev->xfer_udelay = delay;
+       nspi->spi = spi;
+       nspi->ndev = ndev;
 
-       if (acknowledge_mode == NCI_SPI_CRC_ENABLED)
-               tailroom += NCI_SPI_CRC_LEN;
-
-       ndev->nci_dev = nci_allocate_device(&nci_spi_ops, supported_protocols,
-                                           NCI_SPI_HDR_LEN, tailroom);
-       if (!ndev->nci_dev)
-               return NULL;
-
-       nci_set_drvdata(ndev->nci_dev, ndev);
-
-       return ndev;
+       return nspi;
 }
-EXPORT_SYMBOL_GPL(nci_spi_allocate_device);
+EXPORT_SYMBOL_GPL(nci_spi_allocate_spi);
 
-/**
- * nci_spi_free_device - deallocate nci spi device
- *
- * @ndev: The nci spi device to deallocate
- */
-void nci_spi_free_device(struct nci_spi_dev *ndev)
-{
-       nci_free_device(ndev->nci_dev);
-}
-EXPORT_SYMBOL_GPL(nci_spi_free_device);
-
-/**
- * nci_spi_register_device - register a nci spi device in the nfc subsystem
- *
- * @pdev: The nci spi device to register
- */
-int nci_spi_register_device(struct nci_spi_dev *ndev)
-{
-       return nci_register_device(ndev->nci_dev);
-}
-EXPORT_SYMBOL_GPL(nci_spi_register_device);
-
-/**
- * nci_spi_unregister_device - unregister a nci spi device in the nfc subsystem
- *
- * @dev: The nci spi device to unregister
- */
-void nci_spi_unregister_device(struct nci_spi_dev *ndev)
-{
-       nci_unregister_device(ndev->nci_dev);
-}
-EXPORT_SYMBOL_GPL(nci_spi_unregister_device);
-
-static int send_acknowledge(struct nci_spi_dev *ndev, u8 acknowledge)
+static int send_acknowledge(struct nci_spi *nspi, u8 acknowledge)
 {
        struct sk_buff *skb;
        unsigned char *hdr;
        u16 crc;
        int ret;
 
-       skb = nci_skb_alloc(ndev->nci_dev, 0, GFP_KERNEL);
+       skb = nci_skb_alloc(nspi->ndev, 0, GFP_KERNEL);
 
        /* add the NCI SPI header to the start of the buffer */
        hdr = skb_push(skb, NCI_SPI_HDR_LEN);
@@ -225,14 +170,14 @@ static int send_acknowledge(struct nci_spi_dev *ndev, u8 acknowledge)
        *skb_put(skb, 1) = crc >> 8;
        *skb_put(skb, 1) = crc & 0xFF;
 
-       ret = __nci_spi_send(ndev, skb);
+       ret = __nci_spi_send(nspi, skb, 0);
 
        kfree_skb(skb);
 
        return ret;
 }
 
-static struct sk_buff *__nci_spi_recv_frame(struct nci_spi_dev *ndev)
+static struct sk_buff *__nci_spi_read(struct nci_spi *nspi)
 {
        struct sk_buff *skb;
        struct spi_message m;
@@ -242,43 +187,49 @@ static struct sk_buff *__nci_spi_recv_frame(struct nci_spi_dev *ndev)
        int ret;
 
        spi_message_init(&m);
+
+       memset(&tx, 0, sizeof(struct spi_transfer));
        req[0] = NCI_SPI_DIRECT_READ;
-       req[1] = ndev->acknowledge_mode;
+       req[1] = nspi->acknowledge_mode;
        tx.tx_buf = req;
        tx.len = 2;
        tx.cs_change = 0;
        spi_message_add_tail(&tx, &m);
+
+       memset(&rx, 0, sizeof(struct spi_transfer));
        rx.rx_buf = resp_hdr;
        rx.len = 2;
        rx.cs_change = 1;
        spi_message_add_tail(&rx, &m);
-       ret = spi_sync(ndev->spi, &m);
 
+       ret = spi_sync(nspi->spi, &m);
        if (ret)
                return NULL;
 
-       if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED)
+       if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED)
                rx_len = ((resp_hdr[0] & NCI_SPI_MSB_PAYLOAD_MASK) << 8) +
                                resp_hdr[1] + NCI_SPI_CRC_LEN;
        else
                rx_len = (resp_hdr[0] << 8) | resp_hdr[1];
 
-       skb = nci_skb_alloc(ndev->nci_dev, rx_len, GFP_KERNEL);
+       skb = nci_skb_alloc(nspi->ndev, rx_len, GFP_KERNEL);
        if (!skb)
                return NULL;
 
        spi_message_init(&m);
+
+       memset(&rx, 0, sizeof(struct spi_transfer));
        rx.rx_buf = skb_put(skb, rx_len);
        rx.len = rx_len;
        rx.cs_change = 0;
-       rx.delay_usecs = ndev->xfer_udelay;
+       rx.delay_usecs = nspi->xfer_udelay;
        spi_message_add_tail(&rx, &m);
-       ret = spi_sync(ndev->spi, &m);
 
+       ret = spi_sync(nspi->spi, &m);
        if (ret)
                goto receive_error;
 
-       if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
+       if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
                *skb_push(skb, 1) = resp_hdr[1];
                *skb_push(skb, 1) = resp_hdr[0];
        }
@@ -318,61 +269,53 @@ static u8 nci_spi_get_ack(struct sk_buff *skb)
 }
 
 /**
- * nci_spi_recv_frame - receive frame from NCI SPI drivers
+ * nci_spi_read - read frame from NCI SPI drivers
  *
- * @ndev: The nci spi device
+ * @nspi: The nci spi
  * Context: can sleep
  *
  * This call may only be used from a context that may sleep.  The sleep
  * is non-interruptible, and has no timeout.
  *
- * It returns zero on success, else a negative error code.
+ * It returns an allocated skb containing the frame on success, or NULL.
  */
-int nci_spi_recv_frame(struct nci_spi_dev *ndev)
+struct sk_buff *nci_spi_read(struct nci_spi *nspi)
 {
        struct sk_buff *skb;
-       int ret = 0;
-
-       ndev->ops->deassert_int(ndev);
 
        /* Retrieve frame from SPI */
-       skb = __nci_spi_recv_frame(ndev);
-       if (!skb) {
-               ret = -EIO;
+       skb = __nci_spi_read(nspi);
+       if (!skb)
                goto done;
-       }
 
-       if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
+       if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED) {
                if (!nci_spi_check_crc(skb)) {
-                       send_acknowledge(ndev, ACKNOWLEDGE_NACK);
+                       send_acknowledge(nspi, ACKNOWLEDGE_NACK);
                        goto done;
                }
 
                /* In case of acknowledged mode: if ACK or NACK received,
                 * unblock completion of latest frame sent.
                 */
-               ndev->req_result = nci_spi_get_ack(skb);
-               if (ndev->req_result)
-                       complete(&ndev->req_completion);
+               nspi->req_result = nci_spi_get_ack(skb);
+               if (nspi->req_result)
+                       complete(&nspi->req_completion);
        }
 
        /* If there is no payload (ACK/NACK only frame),
         * free the socket buffer
         */
-       if (skb->len == 0) {
+       if (!skb->len) {
                kfree_skb(skb);
+               skb = NULL;
                goto done;
        }
 
-       if (ndev->acknowledge_mode == NCI_SPI_CRC_ENABLED)
-               send_acknowledge(ndev, ACKNOWLEDGE_ACK);
-
-       /* Forward skb to NCI core layer */
-       ret = nci_recv_frame(ndev->nci_dev, skb);
+       if (nspi->acknowledge_mode == NCI_SPI_CRC_ENABLED)
+               send_acknowledge(nspi, ACKNOWLEDGE_ACK);
 
 done:
-       ndev->ops->assert_int(ndev);
 
-       return ret;
+       return skb;
 }
-EXPORT_SYMBOL_GPL(nci_spi_recv_frame);
+EXPORT_SYMBOL_GPL(nci_spi_read);
index 68063b2025da2750519dac5a652cdbbea36dac74..84b7e3ea7b7ad7ce09e9256916589e4724d55969 100644 (file)
@@ -58,6 +58,7 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
        [NFC_ATTR_LLC_SDP] = { .type = NLA_NESTED },
        [NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING,
                                     .len = NFC_FIRMWARE_NAME_MAXSIZE },
+       [NFC_ATTR_SE_APDU] = { .type = NLA_BINARY },
 };
 
 static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
@@ -1278,6 +1279,91 @@ static int nfc_genl_dump_ses_done(struct netlink_callback *cb)
        return 0;
 }
 
+struct se_io_ctx {
+       u32 dev_idx;
+       u32 se_idx;
+};
+
+static void se_io_cb(void *context, u8 *apdu, size_t apdu_len, int err)
+{
+       struct se_io_ctx *ctx = context;
+       struct sk_buff *msg;
+       void *hdr;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg) {
+               kfree(ctx);
+               return;
+       }
+
+       hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
+                         NFC_CMD_SE_IO);
+       if (!hdr)
+               goto free_msg;
+
+       if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, ctx->dev_idx) ||
+           nla_put_u32(msg, NFC_ATTR_SE_INDEX, ctx->se_idx) ||
+           nla_put(msg, NFC_ATTR_SE_APDU, apdu_len, apdu))
+               goto nla_put_failure;
+
+       genlmsg_end(msg, hdr);
+
+       genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
+
+       kfree(ctx);
+
+       return;
+
+nla_put_failure:
+       genlmsg_cancel(msg, hdr);
+free_msg:
+       nlmsg_free(msg);
+       kfree(ctx);
+
+       return;
+}
+
+static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info)
+{
+       struct nfc_dev *dev;
+       struct se_io_ctx *ctx;
+       u32 dev_idx, se_idx;
+       u8 *apdu;
+       size_t apdu_len;
+
+       if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+           !info->attrs[NFC_ATTR_SE_INDEX] ||
+           !info->attrs[NFC_ATTR_SE_APDU])
+               return -EINVAL;
+
+       dev_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+       se_idx = nla_get_u32(info->attrs[NFC_ATTR_SE_INDEX]);
+
+       dev = nfc_get_device(dev_idx);
+       if (!dev)
+               return -ENODEV;
+
+       if (!dev->ops || !dev->ops->se_io)
+               return -ENOTSUPP;
+
+       apdu_len = nla_len(info->attrs[NFC_ATTR_SE_APDU]);
+       if (apdu_len == 0)
+               return -EINVAL;
+
+       apdu = nla_data(info->attrs[NFC_ATTR_SE_APDU]);
+       if (!apdu)
+               return -EINVAL;
+
+       ctx = kzalloc(sizeof(struct se_io_ctx), GFP_KERNEL);
+       if (!ctx)
+               return -ENOMEM;
+
+       ctx->dev_idx = dev_idx;
+       ctx->se_idx = se_idx;
+
+       return dev->ops->se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx);
+}
+
 static struct genl_ops nfc_genl_ops[] = {
        {
                .cmd = NFC_CMD_GET_DEVICE,
@@ -1358,6 +1444,11 @@ static struct genl_ops nfc_genl_ops[] = {
                .done = nfc_genl_dump_ses_done,
                .policy = nfc_genl_policy,
        },
+       {
+               .cmd = NFC_CMD_SE_IO,
+               .doit = nfc_genl_se_io,
+               .policy = nfc_genl_policy,
+       },
 };
 
 
index 313bf1bc848a8c45b755e838fc9cd595e1a833c2..cd958b381f9615911b1acfc6da6aceb39d373401 100644 (file)
@@ -142,11 +142,11 @@ static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb,
 
        err = rawsock_add_header(skb);
        if (err)
-               goto error;
+               goto error_skb;
 
        err = sock_queue_rcv_skb(sk, skb);
        if (err)
-               goto error;
+               goto error_skb;
 
        spin_lock_bh(&sk->sk_write_queue.lock);
        if (!skb_queue_empty(&sk->sk_write_queue))
@@ -158,6 +158,9 @@ static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb,
        sock_put(sk);
        return;
 
+error_skb:
+       kfree_skb(skb);
+
 error:
        rawsock_report_error(sk, err);
        sock_put(sk);
index a481c03e2861f7bcc1de1ec65b56cbc142a2f73d..56e22b74cf965f57ceb90ad0be84b378bbac093f 100644 (file)
@@ -173,7 +173,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
 
        skb->local_df = 1;
 
-       inet_get_local_port_range(&port_min, &port_max);
+       inet_get_local_port_range(net, &port_min, &port_max);
        src_port = vxlan_src_port(port_min, port_max, skb);
 
        err = vxlan_xmit_skb(vxlan_port->vs, rt, skb,
index 642ad42c416ba915680645d8bc3539e5bdee4bf5..378c3a6acf84cab59346ab832d2fffba33e6e543 100644 (file)
@@ -51,10 +51,16 @@ static struct kmem_cache *rds_conn_slab;
 
 static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
 {
+       static u32 rds_hash_secret __read_mostly;
+
+       unsigned long hash;
+
+       net_get_random_once(&rds_hash_secret, sizeof(rds_hash_secret));
+
        /* Pass NULL, don't need struct net for hash */
-       unsigned long hash = inet_ehashfn(NULL,
-                                         be32_to_cpu(laddr), 0,
-                                         be32_to_cpu(faddr), 0);
+       hash = __inet_ehashfn(be32_to_cpu(laddr), 0,
+                             be32_to_cpu(faddr), 0,
+                             rds_hash_secret);
        return &rds_conn_hash[hash & RDS_CONNECTION_HASH_MASK];
 }
 
index ec1d731ecff0e2d103b1933f5997317c2dd1403b..48f8ffc60f8f1cee8e63448fe3aa7aaac0d5d4c1 100644 (file)
@@ -749,7 +749,7 @@ void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
 int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
                    struct cmsghdr *cmsg);
 
-extern void __rds_put_mr_final(struct rds_mr *mr);
+void __rds_put_mr_final(struct rds_mr *mr);
 static inline void rds_mr_put(struct rds_mr *mr)
 {
        if (atomic_dec_and_test(&mr->r_refcount))
index a693aca2ae2eb61a5c258fa21aa07b05e20c8e53..5f43675ee1df3822ecf98e432797f9b498047b58 100644 (file)
@@ -426,17 +426,16 @@ extern struct workqueue_struct *rxrpc_workqueue;
 /*
  * ar-accept.c
  */
-extern void rxrpc_accept_incoming_calls(struct work_struct *);
-extern struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *,
-                                           unsigned long);
-extern int rxrpc_reject_call(struct rxrpc_sock *);
+void rxrpc_accept_incoming_calls(struct work_struct *);
+struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long);
+int rxrpc_reject_call(struct rxrpc_sock *);
 
 /*
  * ar-ack.c
  */
-extern void __rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
-extern void rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
-extern void rxrpc_process_call(struct work_struct *);
+void __rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
+void rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
+void rxrpc_process_call(struct work_struct *);
 
 /*
  * ar-call.c
@@ -445,19 +444,18 @@ extern struct kmem_cache *rxrpc_call_jar;
 extern struct list_head rxrpc_calls;
 extern rwlock_t rxrpc_call_lock;
 
-extern struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *,
-                                               struct rxrpc_transport *,
-                                               struct rxrpc_conn_bundle *,
-                                               unsigned long, int, gfp_t);
-extern struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
-                                             struct rxrpc_connection *,
-                                             struct rxrpc_header *, gfp_t);
-extern struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *,
-                                                unsigned long);
-extern void rxrpc_release_call(struct rxrpc_call *);
-extern void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
-extern void __rxrpc_put_call(struct rxrpc_call *);
-extern void __exit rxrpc_destroy_all_calls(void);
+struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *,
+                                        struct rxrpc_transport *,
+                                        struct rxrpc_conn_bundle *,
+                                        unsigned long, int, gfp_t);
+struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
+                                      struct rxrpc_connection *,
+                                      struct rxrpc_header *, gfp_t);
+struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *, unsigned long);
+void rxrpc_release_call(struct rxrpc_call *);
+void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
+void __rxrpc_put_call(struct rxrpc_call *);
+void __exit rxrpc_destroy_all_calls(void);
 
 /*
  * ar-connection.c
@@ -465,19 +463,16 @@ extern void __exit rxrpc_destroy_all_calls(void);
 extern struct list_head rxrpc_connections;
 extern rwlock_t rxrpc_connection_lock;
 
-extern struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *,
-                                                 struct rxrpc_transport *,
-                                                 struct key *,
-                                                 __be16, gfp_t);
-extern void rxrpc_put_bundle(struct rxrpc_transport *,
-                            struct rxrpc_conn_bundle *);
-extern int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_transport *,
-                             struct rxrpc_conn_bundle *, struct rxrpc_call *,
-                             gfp_t);
-extern void rxrpc_put_connection(struct rxrpc_connection *);
-extern void __exit rxrpc_destroy_all_connections(void);
-extern struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
-                                                     struct rxrpc_header *);
+struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *,
+                                          struct rxrpc_transport *,
+                                          struct key *, __be16, gfp_t);
+void rxrpc_put_bundle(struct rxrpc_transport *, struct rxrpc_conn_bundle *);
+int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_transport *,
+                      struct rxrpc_conn_bundle *, struct rxrpc_call *, gfp_t);
+void rxrpc_put_connection(struct rxrpc_connection *);
+void __exit rxrpc_destroy_all_connections(void);
+struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *,
+                                              struct rxrpc_header *);
 extern struct rxrpc_connection *
 rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_header *,
                          gfp_t);
@@ -485,15 +480,15 @@ rxrpc_incoming_connection(struct rxrpc_transport *, struct rxrpc_header *,
 /*
  * ar-connevent.c
  */
-extern void rxrpc_process_connection(struct work_struct *);
-extern void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *);
-extern void rxrpc_reject_packets(struct work_struct *);
+void rxrpc_process_connection(struct work_struct *);
+void rxrpc_reject_packet(struct rxrpc_local *, struct sk_buff *);
+void rxrpc_reject_packets(struct work_struct *);
 
 /*
  * ar-error.c
  */
-extern void rxrpc_UDP_error_report(struct sock *);
-extern void rxrpc_UDP_error_handler(struct work_struct *);
+void rxrpc_UDP_error_report(struct sock *);
+void rxrpc_UDP_error_handler(struct work_struct *);
 
 /*
  * ar-input.c
@@ -501,18 +496,17 @@ extern void rxrpc_UDP_error_handler(struct work_struct *);
 extern unsigned long rxrpc_ack_timeout;
 extern const char *rxrpc_pkts[];
 
-extern void rxrpc_data_ready(struct sock *, int);
-extern int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool,
-                              bool);
-extern void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
+void rxrpc_data_ready(struct sock *, int);
+int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
+void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
 
 /*
  * ar-local.c
  */
 extern rwlock_t rxrpc_local_lock;
-extern struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *);
-extern void rxrpc_put_local(struct rxrpc_local *);
-extern void __exit rxrpc_destroy_all_locals(void);
+struct rxrpc_local *rxrpc_lookup_local(struct sockaddr_rxrpc *);
+void rxrpc_put_local(struct rxrpc_local *);
+void __exit rxrpc_destroy_all_locals(void);
 
 /*
  * ar-key.c
@@ -520,31 +514,29 @@ extern void __exit rxrpc_destroy_all_locals(void);
 extern struct key_type key_type_rxrpc;
 extern struct key_type key_type_rxrpc_s;
 
-extern int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
-extern int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
-extern int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *,
-                                    time_t, u32);
+int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
+int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
+int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time_t,
+                             u32);
 
 /*
  * ar-output.c
  */
 extern int rxrpc_resend_timeout;
 
-extern int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
-extern int rxrpc_client_sendmsg(struct kiocb *, struct rxrpc_sock *,
-                               struct rxrpc_transport *, struct msghdr *,
-                               size_t);
-extern int rxrpc_server_sendmsg(struct kiocb *, struct rxrpc_sock *,
-                               struct msghdr *, size_t);
+int rxrpc_send_packet(struct rxrpc_transport *, struct sk_buff *);
+int rxrpc_client_sendmsg(struct kiocb *, struct rxrpc_sock *,
+                        struct rxrpc_transport *, struct msghdr *, size_t);
+int rxrpc_server_sendmsg(struct kiocb *, struct rxrpc_sock *, struct msghdr *,
+                        size_t);
 
 /*
  * ar-peer.c
  */
-extern struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *, gfp_t);
-extern void rxrpc_put_peer(struct rxrpc_peer *);
-extern struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *,
-                                         __be32, __be16);
-extern void __exit rxrpc_destroy_all_peers(void);
+struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *, gfp_t);
+void rxrpc_put_peer(struct rxrpc_peer *);
+struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *, __be32, __be16);
+void __exit rxrpc_destroy_all_peers(void);
 
 /*
  * ar-proc.c
@@ -556,38 +548,36 @@ extern const struct file_operations rxrpc_connection_seq_fops;
 /*
  * ar-recvmsg.c
  */
-extern void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *);
-extern int rxrpc_recvmsg(struct kiocb *, struct socket *, struct msghdr *,
-                        size_t, int);
+void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *);
+int rxrpc_recvmsg(struct kiocb *, struct socket *, struct msghdr *, size_t,
+                 int);
 
 /*
  * ar-security.c
  */
-extern int rxrpc_register_security(struct rxrpc_security *);
-extern void rxrpc_unregister_security(struct rxrpc_security *);
-extern int rxrpc_init_client_conn_security(struct rxrpc_connection *);
-extern int rxrpc_init_server_conn_security(struct rxrpc_connection *);
-extern int rxrpc_secure_packet(const struct rxrpc_call *, struct sk_buff *,
-                              size_t, void *);
-extern int rxrpc_verify_packet(const struct rxrpc_call *, struct sk_buff *,
-                              u32 *);
-extern void rxrpc_clear_conn_security(struct rxrpc_connection *);
+int rxrpc_register_security(struct rxrpc_security *);
+void rxrpc_unregister_security(struct rxrpc_security *);
+int rxrpc_init_client_conn_security(struct rxrpc_connection *);
+int rxrpc_init_server_conn_security(struct rxrpc_connection *);
+int rxrpc_secure_packet(const struct rxrpc_call *, struct sk_buff *, size_t,
+                       void *);
+int rxrpc_verify_packet(const struct rxrpc_call *, struct sk_buff *, u32 *);
+void rxrpc_clear_conn_security(struct rxrpc_connection *);
 
 /*
  * ar-skbuff.c
  */
-extern void rxrpc_packet_destructor(struct sk_buff *);
+void rxrpc_packet_destructor(struct sk_buff *);
 
 /*
  * ar-transport.c
  */
-extern struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *,
-                                                  struct rxrpc_peer *,
-                                                  gfp_t);
-extern void rxrpc_put_transport(struct rxrpc_transport *);
-extern void __exit rxrpc_destroy_all_transports(void);
-extern struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
-                                                   struct rxrpc_peer *);
+struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *,
+                                           struct rxrpc_peer *, gfp_t);
+void rxrpc_put_transport(struct rxrpc_transport *);
+void __exit rxrpc_destroy_all_transports(void);
+struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
+                                            struct rxrpc_peer *);
 
 /*
  * debug tracing
index 189e3c5b3d098a0d224a8fb2a04b1bd76e47b3ac..272d8e924cf6b2467e2772898aca2d7895131545 100644 (file)
@@ -231,14 +231,14 @@ override:
        }
        if (R_tab) {
                police->rate_present = true;
-               psched_ratecfg_precompute(&police->rate, &R_tab->rate);
+               psched_ratecfg_precompute(&police->rate, &R_tab->rate, 0);
                qdisc_put_rtab(R_tab);
        } else {
                police->rate_present = false;
        }
        if (P_tab) {
                police->peak_present = true;
-               psched_ratecfg_precompute(&police->peak, &P_tab->rate);
+               psched_ratecfg_precompute(&police->peak, &P_tab->rate, 0);
                qdisc_put_rtab(P_tab);
        } else {
                police->peak_present = false;
index d76a35d0dc85b82ddd9b8f2cc037b1ff0d31f5d5..636d9131d87016a46597af5cadd3471d21e1798f 100644 (file)
@@ -137,7 +137,7 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp,
                           struct nlattr **tb,
                           struct nlattr *est)
 {
-       int err = -EINVAL;
+       int err;
        struct tcf_exts e;
        struct tcf_ematch_tree t;
 
index 867b4a3e39800fb44819e3cfd68ad0fb30d6ea05..16006c92c3fde515d9047a4e32f0ec3f0df2f005 100644 (file)
@@ -72,11 +72,11 @@ static void cgrp_attach(struct cgroup_subsys_state *css,
                        struct cgroup_taskset *tset)
 {
        struct task_struct *p;
-       void *v;
+       struct cgroup_cls_state *cs = css_cls_state(css);
+       void *v = (void *)(unsigned long)cs->classid;
 
        cgroup_taskset_for_each(p, css, tset) {
                task_lock(p);
-               v = (void *)(unsigned long)task_cls_classid(p);
                iterate_fd(p->files, 0, update_classid, v);
                task_unlock(p);
        }
index 938b7cbf56278b34c6d5574a64c8c32fa4a1d3a2..527aeb7a3ff0b3b558873d0c7d93fc2c26e100b8 100644 (file)
@@ -24,11 +24,12 @@ static int em_ipset_change(struct tcf_proto *tp, void *data, int data_len,
 {
        struct xt_set_info *set = data;
        ip_set_id_t index;
+       struct net *net = dev_net(qdisc_dev(tp->q));
 
        if (data_len != sizeof(*set))
                return -EINVAL;
 
-       index = ip_set_nfnl_get_byindex(set->index);
+       index = ip_set_nfnl_get_byindex(net, set->index);
        if (index == IPSET_INVALID_ID)
                return -ENOENT;
 
@@ -37,7 +38,7 @@ static int em_ipset_change(struct tcf_proto *tp, void *data, int data_len,
        if (em->data)
                return 0;
 
-       ip_set_nfnl_put(index);
+       ip_set_nfnl_put(net, index);
        return -ENOMEM;
 }
 
@@ -45,7 +46,7 @@ static void em_ipset_destroy(struct tcf_proto *p, struct tcf_ematch *em)
 {
        const struct xt_set_info *set = (const void *) em->data;
        if (set) {
-               ip_set_nfnl_put(set->index);
+               ip_set_nfnl_put(dev_net(qdisc_dev(p->q)), set->index);
                kfree((void *) em->data);
        }
 }
index 7c3de6ffa5164db0f7abd3f0e2cc0ca2e92ecda9..e5cef95672256b650249c80bef36930f78be9f13 100644 (file)
@@ -793,8 +793,10 @@ static int em_meta_change(struct tcf_proto *tp, void *data, int len,
                goto errout;
 
        meta = kzalloc(sizeof(*meta), GFP_KERNEL);
-       if (meta == NULL)
+       if (meta == NULL) {
+               err = -ENOMEM;
                goto errout;
+       }
 
        memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left));
        memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right));
index 2adda7fa2d390c4bb2aec883cf049df7e7cef9ef..cd81505662b8a3bcc201a18bce0d6c9bd421b650 100644 (file)
@@ -737,9 +737,11 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
        const struct Qdisc_class_ops *cops;
        unsigned long cl;
        u32 parentid;
+       int drops;
 
        if (n == 0)
                return;
+       drops = max_t(int, n, 0);
        while ((parentid = sch->parent)) {
                if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
                        return;
@@ -756,6 +758,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
                        cops->put(sch, cl);
                }
                sch->q.qlen -= n;
+               sch->qstats.drops += drops;
        }
 }
 EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
index a2fef8b10b960c15d29556b7dc832737c848b3b6..a9dfdda9ed1d55d17643f36ba05d9ac04ce1557d 100644 (file)
@@ -472,20 +472,16 @@ begin:
        if (f->credit > 0 || !q->rate_enable)
                goto out;
 
-       if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) {
-               rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate;
+       rate = q->flow_max_rate;
+       if (skb->sk && skb->sk->sk_state != TCP_TIME_WAIT)
+               rate = min(skb->sk->sk_pacing_rate, rate);
 
-               rate = min(rate, q->flow_max_rate);
-       } else {
-               rate = q->flow_max_rate;
-               if (rate == ~0U)
-                       goto out;
-       }
-       if (rate) {
+       if (rate != ~0U) {
                u32 plen = max(qdisc_pkt_len(skb), q->quantum);
                u64 len = (u64)plen * NSEC_PER_SEC;
 
-               do_div(len, rate);
+               if (likely(rate))
+                       do_div(len, rate);
                /* Since socket rate can change later,
                 * clamp the delay to 125 ms.
                 * TODO: maybe segment the too big skb, as in commit
@@ -656,7 +652,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
                q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
 
        if (tb[TCA_FQ_INITIAL_QUANTUM])
-               q->quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
+               q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
 
        if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
                q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]);
@@ -735,12 +731,14 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
        if (opts == NULL)
                goto nla_put_failure;
 
+       /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore,
+        * do not bother giving its value
+        */
        if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
            nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
            nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
            nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
            nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
-           nla_put_u32(skb, TCA_FQ_FLOW_DEFAULT_RATE, q->flow_default_rate) ||
            nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
            nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
                goto nla_put_failure;
index a74e278654aa2290a0ef3061d1392c5cf405e5f5..7fc899a943a8fa8368415bc0c6c8a939bd042963 100644 (file)
@@ -829,7 +829,7 @@ void dev_deactivate_many(struct list_head *head)
        struct net_device *dev;
        bool sync_needed = false;
 
-       list_for_each_entry(dev, head, unreg_list) {
+       list_for_each_entry(dev, head, close_list) {
                netdev_for_each_tx_queue(dev, dev_deactivate_queue,
                                         &noop_qdisc);
                if (dev_ingress_queue(dev))
@@ -848,7 +848,7 @@ void dev_deactivate_many(struct list_head *head)
                synchronize_net();
 
        /* Wait for outstanding qdisc_run calls. */
-       list_for_each_entry(dev, head, unreg_list)
+       list_for_each_entry(dev, head, close_list)
                while (some_qdisc_is_busy(dev))
                        yield();
 }
@@ -857,7 +857,7 @@ void dev_deactivate(struct net_device *dev)
 {
        LIST_HEAD(single);
 
-       list_add(&dev->unreg_list, &single);
+       list_add(&dev->close_list, &single);
        dev_deactivate_many(&single);
        list_del(&single);
 }
@@ -910,11 +910,12 @@ void dev_shutdown(struct net_device *dev)
 }
 
 void psched_ratecfg_precompute(struct psched_ratecfg *r,
-                              const struct tc_ratespec *conf)
+                              const struct tc_ratespec *conf,
+                              u64 rate64)
 {
        memset(r, 0, sizeof(*r));
        r->overhead = conf->overhead;
-       r->rate_bytes_ps = conf->rate;
+       r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
        r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
        r->mult = 1;
        /*
index 863846cc5513b5cd136264265d226a811bd868c6..0e1e38b40025fd111f50bfce339a6d2e7cae1252 100644 (file)
@@ -997,6 +997,8 @@ static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
        [TCA_HTB_CTAB]  = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
        [TCA_HTB_RTAB]  = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
        [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
+       [TCA_HTB_RATE64] = { .type = NLA_U64 },
+       [TCA_HTB_CEIL64] = { .type = NLA_U64 },
 };
 
 static void htb_work_func(struct work_struct *work)
@@ -1114,6 +1116,12 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
        opt.level = cl->level;
        if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
                goto nla_put_failure;
+       if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
+           nla_put_u64(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps))
+               goto nla_put_failure;
+       if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
+           nla_put_u64(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps))
+               goto nla_put_failure;
 
        nla_nest_end(skb, nest);
        spin_unlock_bh(root_lock);
@@ -1332,6 +1340,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
        struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
        struct nlattr *tb[TCA_HTB_MAX + 1];
        struct tc_htb_opt *hopt;
+       u64 rate64, ceil64;
 
        /* extract all subattrs from opt attr */
        if (!opt)
@@ -1491,8 +1500,12 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
                        cl->prio = TC_HTB_NUMPRIO - 1;
        }
 
-       psched_ratecfg_precompute(&cl->rate, &hopt->rate);
-       psched_ratecfg_precompute(&cl->ceil, &hopt->ceil);
+       rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
+
+       ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
+
+       psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
+       psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
 
        cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
        cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
index a6d788d45216a6f286e5aaea0cdb0587cd0f7848..b87e83d0747821bc2251c3b099f91a3358c211d9 100644 (file)
@@ -358,6 +358,21 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
        return PSCHED_NS2TICKS(ticks);
 }
 
+static void tfifo_reset(struct Qdisc *sch)
+{
+       struct netem_sched_data *q = qdisc_priv(sch);
+       struct rb_node *p;
+
+       while ((p = rb_first(&q->t_root))) {
+               struct sk_buff *skb = netem_rb_to_skb(p);
+
+               rb_erase(p, &q->t_root);
+               skb->next = NULL;
+               skb->prev = NULL;
+               kfree_skb(skb);
+       }
+}
+
 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
@@ -520,6 +535,7 @@ static unsigned int netem_drop(struct Qdisc *sch)
                        skb->next = NULL;
                        skb->prev = NULL;
                        len = qdisc_pkt_len(skb);
+                       sch->qstats.backlog -= len;
                        kfree_skb(skb);
                }
        }
@@ -609,6 +625,7 @@ static void netem_reset(struct Qdisc *sch)
        struct netem_sched_data *q = qdisc_priv(sch);
 
        qdisc_reset_queue(sch);
+       tfifo_reset(sch);
        if (q->qdisc)
                qdisc_reset(q->qdisc);
        qdisc_watchdog_cancel(&q->watchdog);
index 1aaf1b6e51a2be238bc47797597ed3bcb76de4b1..b0571224f3c92c00f2899db53baaf04bfdaa45c8 100644 (file)
@@ -341,9 +341,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
        q->tokens = q->buffer;
        q->ptokens = q->mtu;
 
-       psched_ratecfg_precompute(&q->rate, &rtab->rate);
+       psched_ratecfg_precompute(&q->rate, &rtab->rate, 0);
        if (ptab) {
-               psched_ratecfg_precompute(&q->peak, &ptab->rate);
+               psched_ratecfg_precompute(&q->peak, &ptab->rate, 0);
                q->peak_present = true;
        } else {
                q->peak_present = false;
index e7b2d4fe2b6a120c66b3e869d796eb6dba69c2d2..7567e6f1a9205bb0a37a29becc243d9ed6352fe4 100644 (file)
@@ -279,7 +279,9 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
                sctp_v6_to_addr(&dst_saddr, &fl6->saddr, htons(bp->port));
                rcu_read_lock();
                list_for_each_entry_rcu(laddr, &bp->address_list, list) {
-                       if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC))
+                       if (!laddr->valid || laddr->state == SCTP_ADDR_DEL ||
+                           (laddr->state != SCTP_ADDR_SRC &&
+                            !asoc->src_out_of_asoc_ok))
                                continue;
 
                        /* Do not compare against v4 addrs */
@@ -426,20 +428,20 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk)
 {
        addr->v6.sin6_family = AF_INET6;
        addr->v6.sin6_port = 0;
-       addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr;
+       addr->v6.sin6_addr = sk->sk_v6_rcv_saddr;
 }
 
 /* Initialize sk->sk_rcv_saddr from sctp_addr. */
 static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
 {
        if (addr->sa.sa_family == AF_INET && sctp_sk(sk)->v4mapped) {
-               inet6_sk(sk)->rcv_saddr.s6_addr32[0] = 0;
-               inet6_sk(sk)->rcv_saddr.s6_addr32[1] = 0;
-               inet6_sk(sk)->rcv_saddr.s6_addr32[2] = htonl(0x0000ffff);
-               inet6_sk(sk)->rcv_saddr.s6_addr32[3] =
+               sk->sk_v6_rcv_saddr.s6_addr32[0] = 0;
+               sk->sk_v6_rcv_saddr.s6_addr32[1] = 0;
+               sk->sk_v6_rcv_saddr.s6_addr32[2] = htonl(0x0000ffff);
+               sk->sk_v6_rcv_saddr.s6_addr32[3] =
                        addr->v4.sin_addr.s_addr;
        } else {
-               inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr;
+               sk->sk_v6_rcv_saddr = addr->v6.sin6_addr;
        }
 }
 
@@ -447,12 +449,12 @@ static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
 static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
 {
        if (addr->sa.sa_family == AF_INET && sctp_sk(sk)->v4mapped) {
-               inet6_sk(sk)->daddr.s6_addr32[0] = 0;
-               inet6_sk(sk)->daddr.s6_addr32[1] = 0;
-               inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff);
-               inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
+               sk->sk_v6_daddr.s6_addr32[0] = 0;
+               sk->sk_v6_daddr.s6_addr32[1] = 0;
+               sk->sk_v6_daddr.s6_addr32[2] = htonl(0x0000ffff);
+               sk->sk_v6_daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
        } else {
-               inet6_sk(sk)->daddr = addr->v6.sin6_addr;
+               sk->sk_v6_daddr = addr->v6.sin6_addr;
        }
 }
 
index 0ac3a65daccb71cd78ae5e2c52696c33df7e16e4..319137340d158df36094c45b8eae1fc13df50825 100644 (file)
@@ -536,7 +536,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
         * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
         */
        if (!sctp_checksum_disable) {
-               if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) {
+               if (!(dst->dev->features & NETIF_F_SCTP_CSUM) ||
+                   (dst_xfrm(dst) != NULL) || packet->ipfragok) {
                        __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
 
                        /* 3) Put the resultant value into the checksum field in the
index 911b71b26b0e6670bfac1a7f8d450aea510f7333..72046b9729a8a6a669fb9c75446de8ef2fe345c6 100644 (file)
@@ -5890,7 +5890,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
                int low, high, remaining, index;
                unsigned int rover;
 
-               inet_get_local_port_range(&low, &high);
+               inet_get_local_port_range(sock_net(sk), &low, &high);
                remaining = (high - low) + 1;
                rover = net_random() % remaining + low;
 
index ebed4b68f768a1afccea45ea7dc22ef555f3f679..c226aceee65b8b8c59d93d6a133a04c86177ceef 100644 (file)
@@ -1964,6 +1964,16 @@ struct used_address {
        unsigned int name_len;
 };
 
+static int copy_msghdr_from_user(struct msghdr *kmsg,
+                                struct msghdr __user *umsg)
+{
+       if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
+               return -EFAULT;
+       if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
+               return -EINVAL;
+       return 0;
+}
+
 static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
                         struct msghdr *msg_sys, unsigned int flags,
                         struct used_address *used_address)
@@ -1982,8 +1992,11 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
        if (MSG_CMSG_COMPAT & flags) {
                if (get_compat_msghdr(msg_sys, msg_compat))
                        return -EFAULT;
-       } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
-               return -EFAULT;
+       } else {
+               err = copy_msghdr_from_user(msg_sys, msg);
+               if (err)
+                       return err;
+       }
 
        if (msg_sys->msg_iovlen > UIO_FASTIOV) {
                err = -EMSGSIZE;
@@ -2191,8 +2204,11 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
        if (MSG_CMSG_COMPAT & flags) {
                if (get_compat_msghdr(msg_sys, msg_compat))
                        return -EFAULT;
-       } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
-               return -EFAULT;
+       } else {
+               err = copy_msghdr_from_user(msg_sys, msg);
+               if (err)
+                       return err;
+       }
 
        if (msg_sys->msg_iovlen > UIO_FASTIOV) {
                err = -EMSGSIZE;
index 77479606a9716e9526019ba5e5a0dfa4b7d010a2..7352aef8a25444e450e17e38bbe9bcbf81185ddc 100644 (file)
@@ -772,6 +772,8 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
                atomic_inc(&clnt->cl_count);
                if (clnt->cl_softrtry)
                        task->tk_flags |= RPC_TASK_SOFT;
+               if (clnt->cl_noretranstimeo)
+                       task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
                if (sk_memalloc_socks()) {
                        struct rpc_xprt *xprt;
 
@@ -1690,6 +1692,7 @@ call_connect_status(struct rpc_task *task)
        dprint_status(task);
 
        trace_rpc_connect_status(task, status);
+       task->tk_status = 0;
        switch (status) {
                /* if soft mounted, test if we've timed out */
        case -ETIMEDOUT:
@@ -1698,12 +1701,14 @@ call_connect_status(struct rpc_task *task)
        case -ECONNREFUSED:
        case -ECONNRESET:
        case -ENETUNREACH:
+               /* retry with existing socket, after a delay */
+               rpc_delay(task, 3*HZ);
                if (RPC_IS_SOFTCONN(task))
                        break;
-               /* retry with existing socket, after a delay */
-       case 0:
        case -EAGAIN:
-               task->tk_status = 0;
+               task->tk_action = call_bind;
+               return;
+       case 0:
                clnt->cl_stats->netreconn++;
                task->tk_action = call_transmit;
                return;
@@ -1717,13 +1722,14 @@ call_connect_status(struct rpc_task *task)
 static void
 call_transmit(struct rpc_task *task)
 {
+       int is_retrans = RPC_WAS_SENT(task);
+
        dprint_status(task);
 
        task->tk_action = call_status;
        if (task->tk_status < 0)
                return;
-       task->tk_status = xprt_prepare_transmit(task);
-       if (task->tk_status != 0)
+       if (!xprt_prepare_transmit(task))
                return;
        task->tk_action = call_transmit_status;
        /* Encode here so that rpcsec_gss can use correct sequence number. */
@@ -1742,6 +1748,8 @@ call_transmit(struct rpc_task *task)
        xprt_transmit(task);
        if (task->tk_status < 0)
                return;
+       if (is_retrans)
+               task->tk_client->cl_stats->rpcretrans++;
        /*
         * On success, ensure that we call xprt_end_transmit() before sleeping
         * in order to allow access to the socket to other RPC requests.
@@ -1811,8 +1819,7 @@ call_bc_transmit(struct rpc_task *task)
 {
        struct rpc_rqst *req = task->tk_rqstp;
 
-       task->tk_status = xprt_prepare_transmit(task);
-       if (task->tk_status == -EAGAIN) {
+       if (!xprt_prepare_transmit(task)) {
                /*
                 * Could not reserve the transport. Try again after the
                 * transport is released.
@@ -1900,7 +1907,8 @@ call_status(struct rpc_task *task)
                rpc_delay(task, 3*HZ);
        case -ETIMEDOUT:
                task->tk_action = call_timeout;
-               if (task->tk_client->cl_discrtry)
+               if (!(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
+                   && task->tk_client->cl_discrtry)
                        xprt_conditional_disconnect(req->rq_xprt,
                                        req->rq_connect_cookie);
                break;
@@ -1982,7 +1990,6 @@ call_timeout(struct rpc_task *task)
        rpcauth_invalcred(task);
 
 retry:
-       clnt->cl_stats->rpcretrans++;
        task->tk_action = call_bind;
        task->tk_status = 0;
 }
@@ -2025,7 +2032,6 @@ call_decode(struct rpc_task *task)
        if (req->rq_rcv_buf.len < 12) {
                if (!RPC_IS_SOFT(task)) {
                        task->tk_action = call_bind;
-                       clnt->cl_stats->rpcretrans++;
                        goto out_retry;
                }
                dprintk("RPC:       %s: too small RPC reply size (%d bytes)\n",
index f94567b45bb3eb9f4f8b0ec82db4ee08a304c48b..d0d14a04dce1eb2e4274e3111d9008b71da918aa 100644 (file)
@@ -519,8 +519,8 @@ static int __rpc_create_common(struct inode *dir, struct dentry *dentry,
        d_add(dentry, inode);
        return 0;
 out_err:
-       printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
-                       __FILE__, __func__, dentry->d_name.name);
+       printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %pd\n",
+                       __FILE__, __func__, dentry);
        dput(dentry);
        return -ENOMEM;
 }
@@ -755,8 +755,8 @@ static int rpc_populate(struct dentry *parent,
 out_bad:
        __rpc_depopulate(parent, files, start, eof);
        mutex_unlock(&dir->i_mutex);
-       printk(KERN_WARNING "%s: %s failed to populate directory %s\n",
-                       __FILE__, __func__, parent->d_name.name);
+       printk(KERN_WARNING "%s: %s failed to populate directory %pd\n",
+                       __FILE__, __func__, parent);
        return err;
 }
 
@@ -852,8 +852,8 @@ out:
        return dentry;
 out_err:
        dentry = ERR_PTR(err);
-       printk(KERN_WARNING "%s: %s() failed to create pipe %s/%s (errno = %d)\n",
-                       __FILE__, __func__, parent->d_name.name, name,
+       printk(KERN_WARNING "%s: %s() failed to create pipe %pd/%s (errno = %d)\n",
+                       __FILE__, __func__, parent, name,
                        err);
        goto out;
 }
index 9c9caaa5e0d3129f8872350ded706321fb2509c2..b6e59f0a9475d665ea1952a8428e56ed6b06b89e 100644 (file)
@@ -291,12 +291,14 @@ static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining)
                                &inet_sk(sk)->inet_rcv_saddr,
                                inet_sk(sk)->inet_num);
                break;
+#if IS_ENABLED(CONFIG_IPV6)
        case PF_INET6:
                len = snprintf(buf, remaining, "ipv6 %s %pI6 %d\n",
                                proto_name,
-                               &inet6_sk(sk)->rcv_saddr,
+                               &sk->sk_v6_rcv_saddr,
                                inet_sk(sk)->inet_num);
                break;
+#endif
        default:
                len = snprintf(buf, remaining, "*unknown-%d*\n",
                                sk->sk_family);
index 095363eee764b3ff496a745f56e4fa646f5f02c0..4953550537e0d6284625d39bf46992a792966ace 100644 (file)
@@ -205,10 +205,8 @@ int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
                goto out_sleep;
        }
        xprt->snd_task = task;
-       if (req != NULL) {
-               req->rq_bytes_sent = 0;
+       if (req != NULL)
                req->rq_ntrans++;
-       }
 
        return 1;
 
@@ -263,7 +261,6 @@ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
        }
        if (__xprt_get_cong(xprt, task)) {
                xprt->snd_task = task;
-               req->rq_bytes_sent = 0;
                req->rq_ntrans++;
                return 1;
        }
@@ -300,10 +297,8 @@ static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
 
        req = task->tk_rqstp;
        xprt->snd_task = task;
-       if (req) {
-               req->rq_bytes_sent = 0;
+       if (req)
                req->rq_ntrans++;
-       }
        return true;
 }
 
@@ -329,7 +324,6 @@ static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
        }
        if (__xprt_get_cong(xprt, task)) {
                xprt->snd_task = task;
-               req->rq_bytes_sent = 0;
                req->rq_ntrans++;
                return true;
        }
@@ -358,6 +352,11 @@ out_unlock:
 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
 {
        if (xprt->snd_task == task) {
+               if (task != NULL) {
+                       struct rpc_rqst *req = task->tk_rqstp;
+                       if (req != NULL)
+                               req->rq_bytes_sent = 0;
+               }
                xprt_clear_locked(xprt);
                __xprt_lock_write_next(xprt);
        }
@@ -375,6 +374,11 @@ EXPORT_SYMBOL_GPL(xprt_release_xprt);
 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
 {
        if (xprt->snd_task == task) {
+               if (task != NULL) {
+                       struct rpc_rqst *req = task->tk_rqstp;
+                       if (req != NULL)
+                               req->rq_bytes_sent = 0;
+               }
                xprt_clear_locked(xprt);
                __xprt_lock_write_next_cong(xprt);
        }
@@ -854,24 +858,36 @@ static inline int xprt_has_timer(struct rpc_xprt *xprt)
  * @task: RPC task about to send a request
  *
  */
-int xprt_prepare_transmit(struct rpc_task *task)
+bool xprt_prepare_transmit(struct rpc_task *task)
 {
        struct rpc_rqst *req = task->tk_rqstp;
        struct rpc_xprt *xprt = req->rq_xprt;
-       int err = 0;
+       bool ret = false;
 
        dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
 
        spin_lock_bh(&xprt->transport_lock);
-       if (req->rq_reply_bytes_recvd && !req->rq_bytes_sent) {
-               err = req->rq_reply_bytes_recvd;
+       if (!req->rq_bytes_sent) {
+               if (req->rq_reply_bytes_recvd) {
+                       task->tk_status = req->rq_reply_bytes_recvd;
+                       goto out_unlock;
+               }
+               if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
+                   && xprt_connected(xprt)
+                   && req->rq_connect_cookie == xprt->connect_cookie) {
+                       xprt->ops->set_retrans_timeout(task);
+                       rpc_sleep_on(&xprt->pending, task, xprt_timer);
+                       goto out_unlock;
+               }
+       }
+       if (!xprt->ops->reserve_xprt(xprt, task)) {
+               task->tk_status = -EAGAIN;
                goto out_unlock;
        }
-       if (!xprt->ops->reserve_xprt(xprt, task))
-               err = -EAGAIN;
+       ret = true;
 out_unlock:
        spin_unlock_bh(&xprt->transport_lock);
-       return err;
+       return ret;
 }
 
 void xprt_end_transmit(struct rpc_task *task)
@@ -912,7 +928,6 @@ void xprt_transmit(struct rpc_task *task)
        } else if (!req->rq_bytes_sent)
                return;
 
-       req->rq_connect_cookie = xprt->connect_cookie;
        req->rq_xtime = ktime_get();
        status = xprt->ops->send_request(task);
        if (status != 0) {
@@ -938,12 +953,14 @@ void xprt_transmit(struct rpc_task *task)
        /* Don't race with disconnect */
        if (!xprt_connected(xprt))
                task->tk_status = -ENOTCONN;
-       else if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task)) {
+       else {
                /*
                 * Sleep on the pending queue since
                 * we're expecting a reply.
                 */
-               rpc_sleep_on(&xprt->pending, task, xprt_timer);
+               if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task))
+                       rpc_sleep_on(&xprt->pending, task, xprt_timer);
+               req->rq_connect_cookie = xprt->connect_cookie;
        }
        spin_unlock_bh(&xprt->transport_lock);
 }
@@ -1186,6 +1203,12 @@ static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
        req->rq_xprt    = xprt;
        req->rq_buffer  = NULL;
        req->rq_xid     = xprt_alloc_xid(xprt);
+       req->rq_connect_cookie = xprt->connect_cookie - 1;
+       req->rq_bytes_sent = 0;
+       req->rq_snd_buf.len = 0;
+       req->rq_snd_buf.buflen = 0;
+       req->rq_rcv_buf.len = 0;
+       req->rq_rcv_buf.buflen = 0;
        req->rq_release_snd_buf = NULL;
        xprt_reset_majortimeo(req);
        dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
index ee03d35677d962a3385d8d01a31968b70fa77b56..9928ba164d62ad344b1b8c487087561904fb381f 100644 (file)
@@ -1511,6 +1511,7 @@ static void xs_tcp_state_change(struct sock *sk)
                        transport->tcp_copied = 0;
                        transport->tcp_flags =
                                TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
+                       xprt->connect_cookie++;
 
                        xprt_wake_pending_tasks(xprt, -EAGAIN);
                }
@@ -2112,6 +2113,19 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
 
        if (!transport->inet) {
                struct sock *sk = sock->sk;
+               unsigned int keepidle = xprt->timeout->to_initval / HZ;
+               unsigned int keepcnt = xprt->timeout->to_retries + 1;
+               unsigned int opt_on = 1;
+
+               /* TCP Keepalive options */
+               kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
+                               (char *)&opt_on, sizeof(opt_on));
+               kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
+                               (char *)&keepidle, sizeof(keepidle));
+               kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
+                               (char *)&keepidle, sizeof(keepidle));
+               kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
+                               (char *)&keepcnt, sizeof(keepcnt));
 
                write_lock_bh(&sk->sk_callback_lock);
 
@@ -2151,7 +2165,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
        case 0:
        case -EINPROGRESS:
                /* SYN_SENT! */
-               xprt->connect_cookie++;
                if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
                        xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
        }
index 609c30c808165191a69899e750439666addc29f8..3f9707a16d0650d6367ad716ab2a110318a03e96 100644 (file)
@@ -387,7 +387,7 @@ restart:
 
        b_ptr = &tipc_bearers[bearer_id];
        strcpy(b_ptr->name, name);
-       res = m_ptr->enable_bearer(b_ptr);
+       res = m_ptr->enable_media(b_ptr);
        if (res) {
                pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
                        name, -res);
@@ -420,23 +420,15 @@ exit:
 }
 
 /**
- * tipc_block_bearer - Block the bearer with the given name, and reset all its links
+ * tipc_block_bearer - Block the bearer, and reset all its links
  */
-int tipc_block_bearer(const char *name)
+int tipc_block_bearer(struct tipc_bearer *b_ptr)
 {
-       struct tipc_bearer *b_ptr = NULL;
        struct tipc_link *l_ptr;
        struct tipc_link *temp_l_ptr;
 
        read_lock_bh(&tipc_net_lock);
-       b_ptr = tipc_bearer_find(name);
-       if (!b_ptr) {
-               pr_warn("Attempt to block unknown bearer <%s>\n", name);
-               read_unlock_bh(&tipc_net_lock);
-               return -EINVAL;
-       }
-
-       pr_info("Blocking bearer <%s>\n", name);
+       pr_info("Blocking bearer <%s>\n", b_ptr->name);
        spin_lock_bh(&b_ptr->lock);
        b_ptr->blocked = 1;
        list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
@@ -465,7 +457,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
        pr_info("Disabling bearer <%s>\n", b_ptr->name);
        spin_lock_bh(&b_ptr->lock);
        b_ptr->blocked = 1;
-       b_ptr->media->disable_bearer(b_ptr);
+       b_ptr->media->disable_media(b_ptr);
        list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
                tipc_link_delete(l_ptr);
        }
index 09c869adcfcf4fd7f3e6cac8423c4e3ccd7762c1..e5e04be6fffa3782c01f37db795c7f84c8b71793 100644 (file)
@@ -75,8 +75,8 @@ struct tipc_bearer;
 /**
  * struct tipc_media - TIPC media information available to internal users
  * @send_msg: routine which handles buffer transmission
- * @enable_bearer: routine which enables a bearer
- * @disable_bearer: routine which disables a bearer
+ * @enable_media: routine which enables a media
+ * @disable_media: routine which disables a media
  * @addr2str: routine which converts media address to string
  * @addr2msg: routine which converts media address to protocol message area
  * @msg2addr: routine which converts media address from protocol message area
@@ -91,8 +91,8 @@ struct tipc_media {
        int (*send_msg)(struct sk_buff *buf,
                        struct tipc_bearer *b_ptr,
                        struct tipc_media_addr *dest);
-       int (*enable_bearer)(struct tipc_bearer *b_ptr);
-       void (*disable_bearer)(struct tipc_bearer *b_ptr);
+       int (*enable_media)(struct tipc_bearer *b_ptr);
+       void (*disable_media)(struct tipc_bearer *b_ptr);
        int (*addr2str)(struct tipc_media_addr *a, char *str_buf, int str_size);
        int (*addr2msg)(struct tipc_media_addr *a, char *msg_area);
        int (*msg2addr)(const struct tipc_bearer *b_ptr,
@@ -163,7 +163,7 @@ int tipc_register_media(struct tipc_media *m_ptr);
 
 void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
 
-int  tipc_block_bearer(const char *name);
+int  tipc_block_bearer(struct tipc_bearer *b_ptr);
 void tipc_continue(struct tipc_bearer *tb_ptr);
 
 int tipc_enable_bearer(const char *bearer_name, u32 disc_domain, u32 priority);
index be72f8cebc536a425a79d99a5435fa10f8e58b89..94895d4e86abe249f4ef954d527e119647665c92 100644 (file)
@@ -90,21 +90,21 @@ extern int tipc_random __read_mostly;
 /*
  * Routines available to privileged subsystems
  */
-extern int tipc_core_start_net(unsigned long);
-extern int  tipc_handler_start(void);
-extern void tipc_handler_stop(void);
-extern int  tipc_netlink_start(void);
-extern void tipc_netlink_stop(void);
-extern int  tipc_socket_init(void);
-extern void tipc_socket_stop(void);
-extern int tipc_sock_create_local(int type, struct socket **res);
-extern void tipc_sock_release_local(struct socket *sock);
-extern int tipc_sock_accept_local(struct socket *sock,
-                                 struct socket **newsock, int flags);
+int tipc_core_start_net(unsigned long);
+int tipc_handler_start(void);
+void tipc_handler_stop(void);
+int tipc_netlink_start(void);
+void tipc_netlink_stop(void);
+int tipc_socket_init(void);
+void tipc_socket_stop(void);
+int tipc_sock_create_local(int type, struct socket **res);
+void tipc_sock_release_local(struct socket *sock);
+int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
+                          int flags);
 
 #ifdef CONFIG_SYSCTL
-extern int tipc_register_sysctl(void);
-extern void tipc_unregister_sysctl(void);
+int tipc_register_sysctl(void);
+void tipc_unregister_sysctl(void);
 #else
 #define tipc_register_sysctl() 0
 #define tipc_unregister_sysctl()
@@ -201,6 +201,6 @@ static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
        return (struct tipc_msg *)skb->data;
 }
 
-extern struct sk_buff *tipc_buf_acquire(u32 size);
+struct sk_buff *tipc_buf_acquire(u32 size);
 
 #endif
index 40ea40cf6204506f4a579f99d2a0c3d8e79652ca..f80d59f5a161c33199ef4d00494117b8115e2676 100644 (file)
@@ -2,7 +2,7 @@
  * net/tipc/eth_media.c: Ethernet bearer support for TIPC
  *
  * Copyright (c) 2001-2007, Ericsson AB
- * Copyright (c) 2005-2008, 2011, Wind River Systems
+ * Copyright (c) 2005-2008, 2011-2013, Wind River Systems
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #include "core.h"
 #include "bearer.h"
 
-#define MAX_ETH_BEARERS                MAX_BEARERS
+#define MAX_ETH_MEDIA          MAX_BEARERS
 
 #define ETH_ADDR_OFFSET        4       /* message header offset of MAC address */
 
 /**
- * struct eth_bearer - Ethernet bearer data structure
+ * struct eth_media - Ethernet bearer data structure
  * @bearer: ptr to associated "generic" bearer structure
  * @dev: ptr to associated Ethernet network device
  * @tipc_packet_type: used in binding TIPC to Ethernet driver
  * @setup: work item used when enabling bearer
  * @cleanup: work item used when disabling bearer
  */
-struct eth_bearer {
+struct eth_media {
        struct tipc_bearer *bearer;
        struct net_device *dev;
        struct packet_type tipc_packet_type;
@@ -58,7 +58,7 @@ struct eth_bearer {
 };
 
 static struct tipc_media eth_media_info;
-static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
+static struct eth_media eth_media_array[MAX_ETH_MEDIA];
 static int eth_started;
 
 static int recv_notification(struct notifier_block *nb, unsigned long evt,
@@ -100,7 +100,7 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
        if (!clone)
                return 0;
 
-       dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
+       dev = ((struct eth_media *)(tb_ptr->usr_handle))->dev;
        delta = dev->hard_header_len - skb_headroom(buf);
 
        if ((delta > 0) &&
@@ -128,43 +128,43 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
 static int recv_msg(struct sk_buff *buf, struct net_device *dev,
                    struct packet_type *pt, struct net_device *orig_dev)
 {
-       struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv;
+       struct eth_media *eb_ptr = (struct eth_media *)pt->af_packet_priv;
 
        if (!net_eq(dev_net(dev), &init_net)) {
                kfree_skb(buf);
-               return 0;
+               return NET_RX_DROP;
        }
 
        if (likely(eb_ptr->bearer)) {
                if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
                        buf->next = NULL;
                        tipc_recv_msg(buf, eb_ptr->bearer);
-                       return 0;
+                       return NET_RX_SUCCESS;
                }
        }
        kfree_skb(buf);
-       return 0;
+       return NET_RX_DROP;
 }
 
 /**
- * setup_bearer - setup association between Ethernet bearer and interface
+ * setup_media - setup association between Ethernet bearer and interface
  */
-static void setup_bearer(struct work_struct *work)
+static void setup_media(struct work_struct *work)
 {
-       struct eth_bearer *eb_ptr =
-               container_of(work, struct eth_bearer, setup);
+       struct eth_media *eb_ptr =
+               container_of(work, struct eth_media, setup);
 
        dev_add_pack(&eb_ptr->tipc_packet_type);
 }
 
 /**
- * enable_bearer - attach TIPC bearer to an Ethernet interface
+ * enable_media - attach TIPC bearer to an Ethernet interface
  */
-static int enable_bearer(struct tipc_bearer *tb_ptr)
+static int enable_media(struct tipc_bearer *tb_ptr)
 {
        struct net_device *dev;
-       struct eth_bearer *eb_ptr = &eth_bearers[0];
-       struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
+       struct eth_media *eb_ptr = &eth_media_array[0];
+       struct eth_media *stop = &eth_media_array[MAX_ETH_MEDIA];
        char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
        int pending_dev = 0;
 
@@ -188,7 +188,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
        eb_ptr->tipc_packet_type.func = recv_msg;
        eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
        INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
-       INIT_WORK(&eb_ptr->setup, setup_bearer);
+       INIT_WORK(&eb_ptr->setup, setup_media);
        schedule_work(&eb_ptr->setup);
 
        /* Associate TIPC bearer with Ethernet bearer */
@@ -205,14 +205,14 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
 }
 
 /**
- * cleanup_bearer - break association between Ethernet bearer and interface
+ * cleanup_media - break association between Ethernet bearer and interface
  *
  * This routine must be invoked from a work queue because it can sleep.
  */
-static void cleanup_bearer(struct work_struct *work)
+static void cleanup_media(struct work_struct *work)
 {
-       struct eth_bearer *eb_ptr =
-               container_of(work, struct eth_bearer, cleanup);
+       struct eth_media *eb_ptr =
+               container_of(work, struct eth_media, cleanup);
 
        dev_remove_pack(&eb_ptr->tipc_packet_type);
        dev_put(eb_ptr->dev);
@@ -220,18 +220,18 @@ static void cleanup_bearer(struct work_struct *work)
 }
 
 /**
- * disable_bearer - detach TIPC bearer from an Ethernet interface
+ * disable_media - detach TIPC bearer from an Ethernet interface
  *
  * Mark Ethernet bearer as inactive so that incoming buffers are thrown away,
  * then get worker thread to complete bearer cleanup.  (Can't do cleanup
  * here because cleanup code needs to sleep and caller holds spinlocks.)
  */
-static void disable_bearer(struct tipc_bearer *tb_ptr)
+static void disable_media(struct tipc_bearer *tb_ptr)
 {
-       struct eth_bearer *eb_ptr = (struct eth_bearer *)tb_ptr->usr_handle;
+       struct eth_media *eb_ptr = (struct eth_media *)tb_ptr->usr_handle;
 
        eb_ptr->bearer = NULL;
-       INIT_WORK(&eb_ptr->cleanup, cleanup_bearer);
+       INIT_WORK(&eb_ptr->cleanup, cleanup_media);
        schedule_work(&eb_ptr->cleanup);
 }
 
@@ -245,8 +245,8 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
                             void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-       struct eth_bearer *eb_ptr = &eth_bearers[0];
-       struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
+       struct eth_media *eb_ptr = &eth_media_array[0];
+       struct eth_media *stop = &eth_media_array[MAX_ETH_MEDIA];
 
        if (!net_eq(dev_net(dev), &init_net))
                return NOTIFY_DONE;
@@ -265,17 +265,17 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
                if (netif_carrier_ok(dev))
                        tipc_continue(eb_ptr->bearer);
                else
-                       tipc_block_bearer(eb_ptr->bearer->name);
+                       tipc_block_bearer(eb_ptr->bearer);
                break;
        case NETDEV_UP:
                tipc_continue(eb_ptr->bearer);
                break;
        case NETDEV_DOWN:
-               tipc_block_bearer(eb_ptr->bearer->name);
+               tipc_block_bearer(eb_ptr->bearer);
                break;
        case NETDEV_CHANGEMTU:
        case NETDEV_CHANGEADDR:
-               tipc_block_bearer(eb_ptr->bearer->name);
+               tipc_block_bearer(eb_ptr->bearer);
                tipc_continue(eb_ptr->bearer);
                break;
        case NETDEV_UNREGISTER:
@@ -327,8 +327,8 @@ static int eth_msg2addr(const struct tipc_bearer *tb_ptr,
  */
 static struct tipc_media eth_media_info = {
        .send_msg       = send_msg,
-       .enable_bearer  = enable_bearer,
-       .disable_bearer = disable_bearer,
+       .enable_media   = enable_media,
+       .disable_media  = disable_media,
        .addr2str       = eth_addr2str,
        .addr2msg       = eth_addr2msg,
        .msg2addr       = eth_msg2addr,
index 9934a32bfa877a8a8eae83d1fc74e7f31a7a8399..c139892974644925c4eca8e225e0c262b99cf4c2 100644 (file)
 #include "core.h"
 #include "bearer.h"
 
-#define MAX_IB_BEARERS         MAX_BEARERS
+#define MAX_IB_MEDIA           MAX_BEARERS
 
 /**
- * struct ib_bearer - Infiniband bearer data structure
+ * struct ib_media - Infiniband media data structure
  * @bearer: ptr to associated "generic" bearer structure
  * @dev: ptr to associated Infiniband network device
  * @tipc_packet_type: used in binding TIPC to Infiniband driver
  * @cleanup: work item used when disabling bearer
  */
 
-struct ib_bearer {
+struct ib_media {
        struct tipc_bearer *bearer;
        struct net_device *dev;
        struct packet_type tipc_packet_type;
@@ -61,7 +61,7 @@ struct ib_bearer {
 };
 
 static struct tipc_media ib_media_info;
-static struct ib_bearer ib_bearers[MAX_IB_BEARERS];
+static struct ib_media ib_media_array[MAX_IB_MEDIA];
 static int ib_started;
 
 /**
@@ -93,7 +93,7 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
        if (!clone)
                return 0;
 
-       dev = ((struct ib_bearer *)(tb_ptr->usr_handle))->dev;
+       dev = ((struct ib_media *)(tb_ptr->usr_handle))->dev;
        delta = dev->hard_header_len - skb_headroom(buf);
 
        if ((delta > 0) &&
@@ -121,43 +121,43 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
 static int recv_msg(struct sk_buff *buf, struct net_device *dev,
                    struct packet_type *pt, struct net_device *orig_dev)
 {
-       struct ib_bearer *ib_ptr = (struct ib_bearer *)pt->af_packet_priv;
+       struct ib_media *ib_ptr = (struct ib_media *)pt->af_packet_priv;
 
        if (!net_eq(dev_net(dev), &init_net)) {
                kfree_skb(buf);
-               return 0;
+               return NET_RX_DROP;
        }
 
        if (likely(ib_ptr->bearer)) {
                if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
                        buf->next = NULL;
                        tipc_recv_msg(buf, ib_ptr->bearer);
-                       return 0;
+                       return NET_RX_SUCCESS;
                }
        }
        kfree_skb(buf);
-       return 0;
+       return NET_RX_DROP;
 }
 
 /**
  * setup_bearer - setup association between InfiniBand bearer and interface
  */
-static void setup_bearer(struct work_struct *work)
+static void setup_media(struct work_struct *work)
 {
-       struct ib_bearer *ib_ptr =
-               container_of(work, struct ib_bearer, setup);
+       struct ib_media *ib_ptr =
+               container_of(work, struct ib_media, setup);
 
        dev_add_pack(&ib_ptr->tipc_packet_type);
 }
 
 /**
- * enable_bearer - attach TIPC bearer to an InfiniBand interface
+ * enable_media - attach TIPC bearer to an InfiniBand interface
  */
-static int enable_bearer(struct tipc_bearer *tb_ptr)
+static int enable_media(struct tipc_bearer *tb_ptr)
 {
        struct net_device *dev;
-       struct ib_bearer *ib_ptr = &ib_bearers[0];
-       struct ib_bearer *stop = &ib_bearers[MAX_IB_BEARERS];
+       struct ib_media *ib_ptr = &ib_media_array[0];
+       struct ib_media *stop = &ib_media_array[MAX_IB_MEDIA];
        char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
        int pending_dev = 0;
 
@@ -181,7 +181,7 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
        ib_ptr->tipc_packet_type.func = recv_msg;
        ib_ptr->tipc_packet_type.af_packet_priv = ib_ptr;
        INIT_LIST_HEAD(&(ib_ptr->tipc_packet_type.list));
-       INIT_WORK(&ib_ptr->setup, setup_bearer);
+       INIT_WORK(&ib_ptr->setup, setup_media);
        schedule_work(&ib_ptr->setup);
 
        /* Associate TIPC bearer with InfiniBand bearer */
@@ -204,8 +204,8 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
  */
 static void cleanup_bearer(struct work_struct *work)
 {
-       struct ib_bearer *ib_ptr =
-               container_of(work, struct ib_bearer, cleanup);
+       struct ib_media *ib_ptr =
+               container_of(work, struct ib_media, cleanup);
 
        dev_remove_pack(&ib_ptr->tipc_packet_type);
        dev_put(ib_ptr->dev);
@@ -213,15 +213,15 @@ static void cleanup_bearer(struct work_struct *work)
 }
 
 /**
- * disable_bearer - detach TIPC bearer from an InfiniBand interface
+ * disable_media - detach TIPC bearer from an InfiniBand interface
  *
  * Mark InfiniBand bearer as inactive so that incoming buffers are thrown away,
  * then get worker thread to complete bearer cleanup.  (Can't do cleanup
  * here because cleanup code needs to sleep and caller holds spinlocks.)
  */
-static void disable_bearer(struct tipc_bearer *tb_ptr)
+static void disable_media(struct tipc_bearer *tb_ptr)
 {
-       struct ib_bearer *ib_ptr = (struct ib_bearer *)tb_ptr->usr_handle;
+       struct ib_media *ib_ptr = (struct ib_media *)tb_ptr->usr_handle;
 
        ib_ptr->bearer = NULL;
        INIT_WORK(&ib_ptr->cleanup, cleanup_bearer);
@@ -238,8 +238,8 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
                             void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-       struct ib_bearer *ib_ptr = &ib_bearers[0];
-       struct ib_bearer *stop = &ib_bearers[MAX_IB_BEARERS];
+       struct ib_media *ib_ptr = &ib_media_array[0];
+       struct ib_media *stop = &ib_media_array[MAX_IB_MEDIA];
 
        if (!net_eq(dev_net(dev), &init_net))
                return NOTIFY_DONE;
@@ -258,17 +258,17 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
                if (netif_carrier_ok(dev))
                        tipc_continue(ib_ptr->bearer);
                else
-                       tipc_block_bearer(ib_ptr->bearer->name);
+                       tipc_block_bearer(ib_ptr->bearer);
                break;
        case NETDEV_UP:
                tipc_continue(ib_ptr->bearer);
                break;
        case NETDEV_DOWN:
-               tipc_block_bearer(ib_ptr->bearer->name);
+               tipc_block_bearer(ib_ptr->bearer);
                break;
        case NETDEV_CHANGEMTU:
        case NETDEV_CHANGEADDR:
-               tipc_block_bearer(ib_ptr->bearer->name);
+               tipc_block_bearer(ib_ptr->bearer);
                tipc_continue(ib_ptr->bearer);
                break;
        case NETDEV_UNREGISTER:
@@ -323,8 +323,8 @@ static int ib_msg2addr(const struct tipc_bearer *tb_ptr,
  */
 static struct tipc_media ib_media_info = {
        .send_msg       = send_msg,
-       .enable_bearer  = enable_bearer,
-       .disable_bearer = disable_bearer,
+       .enable_media   = enable_media,
+       .disable_media  = disable_media,
        .addr2str       = ib_addr2str,
        .addr2msg       = ib_addr2msg,
        .msg2addr       = ib_msg2addr,
index 0cc3d9015c5d5bb6a1629251d7a636f41239db6f..e8153f64d2d6fb3c23d7f24f42e50ee2ea019df6 100644 (file)
@@ -75,20 +75,6 @@ static const char *link_unk_evt = "Unknown link event ";
  */
 #define START_CHANGEOVER 100000u
 
-/**
- * struct tipc_link_name - deconstructed link name
- * @addr_local: network address of node at this end
- * @if_local: name of interface at this end
- * @addr_peer: network address of node at far end
- * @if_peer: name of interface at far end
- */
-struct tipc_link_name {
-       u32 addr_local;
-       char if_local[TIPC_MAX_IF_NAME];
-       u32 addr_peer;
-       char if_peer[TIPC_MAX_IF_NAME];
-};
-
 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
                                       struct sk_buff *buf);
 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
@@ -97,8 +83,7 @@ static int  link_recv_changeover_msg(struct tipc_link **l_ptr,
 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
 static int  link_send_sections_long(struct tipc_port *sender,
                                    struct iovec const *msg_sect,
-                                   u32 num_sect, unsigned int total_len,
-                                   u32 destnode);
+                                   unsigned int len, u32 destnode);
 static void link_state_event(struct tipc_link *l_ptr, u32 event);
 static void link_reset_statistics(struct tipc_link *l_ptr);
 static void link_print(struct tipc_link *l_ptr, const char *str);
@@ -160,72 +145,6 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
                (l_ptr->owner->active_links[1] == l_ptr);
 }
 
-/**
- * link_name_validate - validate & (optionally) deconstruct tipc_link name
- * @name: ptr to link name string
- * @name_parts: ptr to area for link name components (or NULL if not needed)
- *
- * Returns 1 if link name is valid, otherwise 0.
- */
-static int link_name_validate(const char *name,
-                               struct tipc_link_name *name_parts)
-{
-       char name_copy[TIPC_MAX_LINK_NAME];
-       char *addr_local;
-       char *if_local;
-       char *addr_peer;
-       char *if_peer;
-       char dummy;
-       u32 z_local, c_local, n_local;
-       u32 z_peer, c_peer, n_peer;
-       u32 if_local_len;
-       u32 if_peer_len;
-
-       /* copy link name & ensure length is OK */
-       name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
-       /* need above in case non-Posix strncpy() doesn't pad with nulls */
-       strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
-       if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
-               return 0;
-
-       /* ensure all component parts of link name are present */
-       addr_local = name_copy;
-       if_local = strchr(addr_local, ':');
-       if (if_local == NULL)
-               return 0;
-       *(if_local++) = 0;
-       addr_peer = strchr(if_local, '-');
-       if (addr_peer == NULL)
-               return 0;
-       *(addr_peer++) = 0;
-       if_local_len = addr_peer - if_local;
-       if_peer = strchr(addr_peer, ':');
-       if (if_peer == NULL)
-               return 0;
-       *(if_peer++) = 0;
-       if_peer_len = strlen(if_peer) + 1;
-
-       /* validate component parts of link name */
-       if ((sscanf(addr_local, "%u.%u.%u%c",
-                   &z_local, &c_local, &n_local, &dummy) != 3) ||
-           (sscanf(addr_peer, "%u.%u.%u%c",
-                   &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
-           (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
-           (z_peer  > 255) || (c_peer  > 4095) || (n_peer  > 4095) ||
-           (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
-           (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME))
-               return 0;
-
-       /* return link name components, if necessary */
-       if (name_parts) {
-               name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
-               strcpy(name_parts->if_local, if_local);
-               name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
-               strcpy(name_parts->if_peer, if_peer);
-       }
-       return 1;
-}
-
 /**
  * link_timeout - handle expiration of link timer
  * @l_ptr: pointer to link
@@ -1065,8 +984,7 @@ static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
  */
 int tipc_link_send_sections_fast(struct tipc_port *sender,
                                 struct iovec const *msg_sect,
-                                const u32 num_sect, unsigned int total_len,
-                                u32 destaddr)
+                                unsigned int len, u32 destaddr)
 {
        struct tipc_msg *hdr = &sender->phdr;
        struct tipc_link *l_ptr;
@@ -1080,8 +998,7 @@ again:
         * Try building message using port's max_pkt hint.
         * (Must not hold any locks while building message.)
         */
-       res = tipc_msg_build(hdr, msg_sect, num_sect, total_len,
-                            sender->max_pkt, &buf);
+       res = tipc_msg_build(hdr, msg_sect, len, sender->max_pkt, &buf);
        /* Exit if build request was invalid */
        if (unlikely(res < 0))
                return res;
@@ -1121,8 +1038,7 @@ exit:
                        if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
                                goto again;
 
-                       return link_send_sections_long(sender, msg_sect,
-                                                      num_sect, total_len,
+                       return link_send_sections_long(sender, msg_sect, len,
                                                       destaddr);
                }
                tipc_node_unlock(node);
@@ -1133,8 +1049,8 @@ exit:
        if (buf)
                return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
        if (res >= 0)
-               return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
-                                                total_len, TIPC_ERR_NO_NODE);
+               return tipc_port_reject_sections(sender, hdr, msg_sect,
+                                                len, TIPC_ERR_NO_NODE);
        return res;
 }
 
@@ -1154,18 +1070,17 @@ exit:
  */
 static int link_send_sections_long(struct tipc_port *sender,
                                   struct iovec const *msg_sect,
-                                  u32 num_sect, unsigned int total_len,
-                                  u32 destaddr)
+                                  unsigned int len, u32 destaddr)
 {
        struct tipc_link *l_ptr;
        struct tipc_node *node;
        struct tipc_msg *hdr = &sender->phdr;
-       u32 dsz = total_len;
+       u32 dsz = len;
        u32 max_pkt, fragm_sz, rest;
        struct tipc_msg fragm_hdr;
        struct sk_buff *buf, *buf_chain, *prev;
        u32 fragm_crs, fragm_rest, hsz, sect_rest;
-       const unchar *sect_crs;
+       const unchar __user *sect_crs;
        int curr_sect;
        u32 fragm_no;
        int res = 0;
@@ -1207,7 +1122,7 @@ again:
 
                if (!sect_rest) {
                        sect_rest = msg_sect[++curr_sect].iov_len;
-                       sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
+                       sect_crs = msg_sect[curr_sect].iov_base;
                }
 
                if (sect_rest < fragm_rest)
@@ -1283,8 +1198,8 @@ reject:
                        buf = buf_chain->next;
                        kfree_skb(buf_chain);
                }
-               return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
-                                                total_len, TIPC_ERR_NO_NODE);
+               return tipc_port_reject_sections(sender, hdr, msg_sect,
+                                                len, TIPC_ERR_NO_NODE);
        }
 
        /* Append chain of fragments to send queue & send them */
@@ -2585,25 +2500,21 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
 static struct tipc_link *link_find_link(const char *name,
                                        struct tipc_node **node)
 {
-       struct tipc_link_name link_name_parts;
-       struct tipc_bearer *b_ptr;
        struct tipc_link *l_ptr;
+       struct tipc_node *n_ptr;
+       int i;
 
-       if (!link_name_validate(name, &link_name_parts))
-               return NULL;
-
-       b_ptr = tipc_bearer_find_interface(link_name_parts.if_local);
-       if (!b_ptr)
-               return NULL;
-
-       *node = tipc_node_find(link_name_parts.addr_peer);
-       if (!*node)
-               return NULL;
-
-       l_ptr = (*node)->links[b_ptr->identity];
-       if (!l_ptr || strcmp(l_ptr->name, name))
-               return NULL;
-
+       list_for_each_entry(n_ptr, &tipc_node_list, list) {
+               for (i = 0; i < MAX_BEARERS; i++) {
+                       l_ptr = n_ptr->links[i];
+                       if (l_ptr && !strcmp(l_ptr->name, name))
+                               goto found;
+               }
+       }
+       l_ptr = NULL;
+       n_ptr = NULL;
+found:
+       *node = n_ptr;
        return l_ptr;
 }
 
@@ -2646,6 +2557,7 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
        struct tipc_link *l_ptr;
        struct tipc_bearer *b_ptr;
        struct tipc_media *m_ptr;
+       int res = 0;
 
        l_ptr = link_find_link(name, &node);
        if (l_ptr) {
@@ -2668,9 +2580,12 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
                case TIPC_CMD_SET_LINK_WINDOW:
                        tipc_link_set_queue_limits(l_ptr, new_value);
                        break;
+               default:
+                       res = -EINVAL;
+                       break;
                }
                tipc_node_unlock(node);
-               return 0;
+               return res;
        }
 
        b_ptr = tipc_bearer_find(name);
@@ -2678,15 +2593,18 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
                switch (cmd) {
                case TIPC_CMD_SET_LINK_TOL:
                        b_ptr->tolerance = new_value;
-                       return 0;
+                       break;
                case TIPC_CMD_SET_LINK_PRI:
                        b_ptr->priority = new_value;
-                       return 0;
+                       break;
                case TIPC_CMD_SET_LINK_WINDOW:
                        b_ptr->window = new_value;
-                       return 0;
+                       break;
+               default:
+                       res = -EINVAL;
+                       break;
                }
-               return -EINVAL;
+               return res;
        }
 
        m_ptr = tipc_media_find(name);
@@ -2695,15 +2613,18 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
        switch (cmd) {
        case TIPC_CMD_SET_LINK_TOL:
                m_ptr->tolerance = new_value;
-               return 0;
+               break;
        case TIPC_CMD_SET_LINK_PRI:
                m_ptr->priority = new_value;
-               return 0;
+               break;
        case TIPC_CMD_SET_LINK_WINDOW:
                m_ptr->window = new_value;
-               return 0;
+               break;
+       default:
+               res = -EINVAL;
+               break;
        }
-       return -EINVAL;
+       return res;
 }
 
 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
index c048ed1cbd765aa3417c945d2913d0c994cb7d3f..55cf8554a08bd974000a67a116583a86e39bda0f 100644 (file)
@@ -227,9 +227,7 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
 u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
 int tipc_link_send_sections_fast(struct tipc_port *sender,
                                 struct iovec const *msg_sect,
-                                const u32 num_sect,
-                                unsigned int total_len,
-                                u32 destnode);
+                                unsigned int len, u32 destnode);
 void tipc_link_recv_bundle(struct sk_buff *buf);
 int  tipc_link_recv_fragment(struct sk_buff **pending,
                             struct sk_buff **fb,
index ced60e2fc4f7fbee929874594ee524c41835cd3e..e525f8ce1dee09ce0d9baf214bd3dd8e13daa9d8 100644 (file)
@@ -73,13 +73,13 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
  * Returns message data size or errno
  */
 int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
-                  u32 num_sect, unsigned int total_len, int max_size,
-                  struct sk_buff **buf)
+                  unsigned int len, int max_size, struct sk_buff **buf)
 {
-       int dsz, sz, hsz, pos, res, cnt;
+       int dsz, sz, hsz;
+       unsigned char *to;
 
-       dsz = total_len;
-       pos = hsz = msg_hdr_sz(hdr);
+       dsz = len;
+       hsz = msg_hdr_sz(hdr);
        sz = hsz + dsz;
        msg_set_size(hdr, sz);
        if (unlikely(sz > max_size)) {
@@ -91,16 +91,11 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
        if (!(*buf))
                return -ENOMEM;
        skb_copy_to_linear_data(*buf, hdr, hsz);
-       for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
-               skb_copy_to_linear_data_offset(*buf, pos,
-                                              msg_sect[cnt].iov_base,
-                                              msg_sect[cnt].iov_len);
-               pos += msg_sect[cnt].iov_len;
+       to = (*buf)->data + hsz;
+       if (len && memcpy_fromiovecend(to, msg_sect, 0, dsz)) {
+               kfree_skb(*buf);
+               *buf = NULL;
+               return -EFAULT;
        }
-       if (likely(res))
-               return dsz;
-
-       kfree_skb(*buf);
-       *buf = NULL;
-       return -EFAULT;
+       return dsz;
 }
index 5e4ccf5c27df0361aaf26df8023ba79f20057936..559b73a9bf352d9bb7b058a366b81341eae57f15 100644 (file)
@@ -722,6 +722,5 @@ u32 tipc_msg_tot_importance(struct tipc_msg *m);
 void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
                   u32 destnode);
 int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
-                  u32 num_sect, unsigned int total_len, int max_size,
-                  struct sk_buff **buf);
+                  unsigned int len, int max_size, struct sk_buff **buf);
 #endif
index b3ed2fcab4fbd3a947b6419856641c9a7b315872..c081a7632302ca0798c193b039214466c3180184 100644 (file)
@@ -90,8 +90,7 @@ int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg)
  * tipc_multicast - send a multicast message to local and remote destinations
  */
 int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
-                  u32 num_sect, struct iovec const *msg_sect,
-                  unsigned int total_len)
+                  struct iovec const *msg_sect, unsigned int len)
 {
        struct tipc_msg *hdr;
        struct sk_buff *buf;
@@ -114,8 +113,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
        msg_set_namelower(hdr, seq->lower);
        msg_set_nameupper(hdr, seq->upper);
        msg_set_hdr_sz(hdr, MCAST_H_SIZE);
-       res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, MAX_MSG_SIZE,
-                            &buf);
+       res = tipc_msg_build(hdr, msg_sect, len, MAX_MSG_SIZE, &buf);
        if (unlikely(!buf))
                return res;
 
@@ -436,14 +434,13 @@ exit:
 }
 
 int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
-                             struct iovec const *msg_sect, u32 num_sect,
-                             unsigned int total_len, int err)
+                             struct iovec const *msg_sect, unsigned int len,
+                             int err)
 {
        struct sk_buff *buf;
        int res;
 
-       res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, MAX_MSG_SIZE,
-                            &buf);
+       res = tipc_msg_build(hdr, msg_sect, len, MAX_MSG_SIZE, &buf);
        if (!buf)
                return res;
 
@@ -918,15 +915,14 @@ int tipc_port_recv_msg(struct sk_buff *buf)
  *  tipc_port_recv_sections(): Concatenate and deliver sectioned
  *                        message for this node.
  */
-static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_sect,
+static int tipc_port_recv_sections(struct tipc_port *sender,
                                   struct iovec const *msg_sect,
-                                  unsigned int total_len)
+                                  unsigned int len)
 {
        struct sk_buff *buf;
        int res;
 
-       res = tipc_msg_build(&sender->phdr, msg_sect, num_sect, total_len,
-                            MAX_MSG_SIZE, &buf);
+       res = tipc_msg_build(&sender->phdr, msg_sect, len, MAX_MSG_SIZE, &buf);
        if (likely(buf))
                tipc_port_recv_msg(buf);
        return res;
@@ -935,8 +931,7 @@ static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_se
 /**
  * tipc_send - send message sections on connection
  */
-int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
-             unsigned int total_len)
+int tipc_send(u32 ref, struct iovec const *msg_sect, unsigned int len)
 {
        struct tipc_port *p_ptr;
        u32 destnode;
@@ -950,11 +945,10 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
        if (!tipc_port_congested(p_ptr)) {
                destnode = port_peernode(p_ptr);
                if (likely(!in_own_node(destnode)))
-                       res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
-                                                          total_len, destnode);
+                       res = tipc_link_send_sections_fast(p_ptr, msg_sect,
+                                                          len, destnode);
                else
-                       res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect,
-                                                     total_len);
+                       res = tipc_port_recv_sections(p_ptr, msg_sect, len);
 
                if (likely(res != -ELINKCONG)) {
                        p_ptr->congested = 0;
@@ -965,7 +959,7 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
        }
        if (port_unreliable(p_ptr)) {
                p_ptr->congested = 0;
-               return total_len;
+               return len;
        }
        return -ELINKCONG;
 }
@@ -974,8 +968,7 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
  * tipc_send2name - send message sections to port name
  */
 int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
-                  unsigned int num_sect, struct iovec const *msg_sect,
-                  unsigned int total_len)
+                  struct iovec const *msg_sect, unsigned int len)
 {
        struct tipc_port *p_ptr;
        struct tipc_msg *msg;
@@ -999,36 +992,32 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
 
        if (likely(destport || destnode)) {
                if (likely(in_own_node(destnode)))
-                       res = tipc_port_recv_sections(p_ptr, num_sect,
-                                                     msg_sect, total_len);
+                       res = tipc_port_recv_sections(p_ptr, msg_sect, len);
                else if (tipc_own_addr)
                        res = tipc_link_send_sections_fast(p_ptr, msg_sect,
-                                                          num_sect, total_len,
-                                                          destnode);
+                                                          len, destnode);
                else
                        res = tipc_port_reject_sections(p_ptr, msg, msg_sect,
-                                                       num_sect, total_len,
-                                                       TIPC_ERR_NO_NODE);
+                                                       len, TIPC_ERR_NO_NODE);
                if (likely(res != -ELINKCONG)) {
                        if (res > 0)
                                p_ptr->sent++;
                        return res;
                }
                if (port_unreliable(p_ptr)) {
-                       return total_len;
+                       return len;
                }
                return -ELINKCONG;
        }
-       return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
-                                        total_len, TIPC_ERR_NO_NAME);
+       return tipc_port_reject_sections(p_ptr, msg, msg_sect, len,
+                                        TIPC_ERR_NO_NAME);
 }
 
 /**
  * tipc_send2port - send message sections to port identity
  */
 int tipc_send2port(u32 ref, struct tipc_portid const *dest,
-                  unsigned int num_sect, struct iovec const *msg_sect,
-                  unsigned int total_len)
+                  struct iovec const *msg_sect, unsigned int len)
 {
        struct tipc_port *p_ptr;
        struct tipc_msg *msg;
@@ -1046,21 +1035,20 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
        msg_set_hdr_sz(msg, BASIC_H_SIZE);
 
        if (in_own_node(dest->node))
-               res =  tipc_port_recv_sections(p_ptr, num_sect, msg_sect,
-                                              total_len);
+               res =  tipc_port_recv_sections(p_ptr, msg_sect, len);
        else if (tipc_own_addr)
-               res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
-                                                  total_len, dest->node);
+               res = tipc_link_send_sections_fast(p_ptr, msg_sect, len,
+                                                  dest->node);
        else
-               res = tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
-                                               total_len, TIPC_ERR_NO_NODE);
+               res = tipc_port_reject_sections(p_ptr, msg, msg_sect, len,
+                                               TIPC_ERR_NO_NODE);
        if (likely(res != -ELINKCONG)) {
                if (res > 0)
                        p_ptr->sent++;
                return res;
        }
        if (port_unreliable(p_ptr)) {
-               return total_len;
+               return len;
        }
        return -ELINKCONG;
 }
index 5a7026b9c3456b716dd8a173b345d11355f1b3be..9122535973430edff99e8ee6070a091975dea457 100644 (file)
@@ -151,24 +151,20 @@ int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
  * TIPC messaging routines
  */
 int tipc_port_recv_msg(struct sk_buff *buf);
-int tipc_send(u32 portref, unsigned int num_sect, struct iovec const *msg_sect,
-             unsigned int total_len);
+int tipc_send(u32 portref, struct iovec const *msg_sect, unsigned int len);
 
 int tipc_send2name(u32 portref, struct tipc_name const *name, u32 domain,
-                  unsigned int num_sect, struct iovec const *msg_sect,
-                  unsigned int total_len);
+                  struct iovec const *msg_sect, unsigned int len);
 
 int tipc_send2port(u32 portref, struct tipc_portid const *dest,
-                  unsigned int num_sect, struct iovec const *msg_sect,
-                  unsigned int total_len);
+                  struct iovec const *msg_sect, unsigned int len);
 
 int tipc_multicast(u32 portref, struct tipc_name_seq const *seq,
-                  unsigned int section_count, struct iovec const *msg,
-                  unsigned int total_len);
+                  struct iovec const *msg, unsigned int len);
 
 int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
-                             struct iovec const *msg_sect, u32 num_sect,
-                             unsigned int total_len, int err);
+                             struct iovec const *msg_sect, unsigned int len,
+                             int err);
 struct sk_buff *tipc_port_get_ports(void);
 void tipc_port_recv_proto_msg(struct sk_buff *buf);
 void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp);
index 6cc7ddd2fb7c9cd26eb3e7508adb9f4bc9093866..3906527259d19f2d18d06d3641eda0b755d34968 100644 (file)
@@ -338,7 +338,7 @@ static int release(struct socket *sock)
                buf = __skb_dequeue(&sk->sk_receive_queue);
                if (buf == NULL)
                        break;
-               if (TIPC_SKB_CB(buf)->handle != 0)
+               if (TIPC_SKB_CB(buf)->handle != NULL)
                        kfree_skb(buf);
                else {
                        if ((sock->state == SS_CONNECTING) ||
@@ -622,13 +622,11 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
                        res = tipc_send2name(tport->ref,
                                             &dest->addr.name.name,
                                             dest->addr.name.domain,
-                                            m->msg_iovlen,
                                             m->msg_iov,
                                             total_len);
                } else if (dest->addrtype == TIPC_ADDR_ID) {
                        res = tipc_send2port(tport->ref,
                                             &dest->addr.id,
-                                            m->msg_iovlen,
                                             m->msg_iov,
                                             total_len);
                } else if (dest->addrtype == TIPC_ADDR_MCAST) {
@@ -641,7 +639,6 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
                                break;
                        res = tipc_multicast(tport->ref,
                                             &dest->addr.nameseq,
-                                            m->msg_iovlen,
                                             m->msg_iov,
                                             total_len);
                }
@@ -707,8 +704,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
                        break;
                }
 
-               res = tipc_send(tport->ref, m->msg_iovlen, m->msg_iov,
-                               total_len);
+               res = tipc_send(tport->ref, m->msg_iov, total_len);
                if (likely(res != -ELINKCONG))
                        break;
                if (timeout_val <= 0L) {
@@ -1368,7 +1364,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
                return TIPC_ERR_OVERLOAD;
 
        /* Enqueue message */
-       TIPC_SKB_CB(buf)->handle = 0;
+       TIPC_SKB_CB(buf)->handle = NULL;
        __skb_queue_tail(&sk->sk_receive_queue, buf);
        skb_set_owner_r(buf, sk);
 
@@ -1691,7 +1687,7 @@ restart:
                /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
                buf = __skb_dequeue(&sk->sk_receive_queue);
                if (buf) {
-                       if (TIPC_SKB_CB(buf)->handle != 0) {
+                       if (TIPC_SKB_CB(buf)->handle != NULL) {
                                kfree_skb(buf);
                                goto restart;
                        }
index 86de99ad297605d04356f9efd7be174d2f7c7993..c1f403bed683ee8653356870f35457fe6e7f18eb 100644 (file)
@@ -1246,6 +1246,15 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb)
        return 0;
 }
 
+static void unix_sock_inherit_flags(const struct socket *old,
+                                   struct socket *new)
+{
+       if (test_bit(SOCK_PASSCRED, &old->flags))
+               set_bit(SOCK_PASSCRED, &new->flags);
+       if (test_bit(SOCK_PASSSEC, &old->flags))
+               set_bit(SOCK_PASSSEC, &new->flags);
+}
+
 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
 {
        struct sock *sk = sock->sk;
@@ -1280,6 +1289,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
        /* attach accepted sock to socket */
        unix_state_lock(tsk);
        newsock->state = SS_CONNECTED;
+       unix_sock_inherit_flags(sock, newsock);
        sock_graft(tsk, newsock);
        unix_state_unlock(tsk);
        return 0;
index d591091603bfe4da1d6a87810287bdd5aaecc6da..86fa0f3b2cafa46d47db9cd03e46dfe89c22d238 100644 (file)
@@ -124,6 +124,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
        rep->udiag_family = AF_UNIX;
        rep->udiag_type = sk->sk_type;
        rep->udiag_state = sk->sk_state;
+       rep->pad = 0;
        rep->udiag_ino = sk_ino;
        sock_diag_save_cookie(sk, rep->udiag_cookie);
 
index 1e743d2148565a1010114b468366b5285a56f02e..5dcd9c067bf0ac72a3c6e1067818053e31f7b748 100644 (file)
@@ -63,11 +63,11 @@ void __wimax_state_set(struct wimax_dev *wimax_dev, enum wimax_st state)
 {
        wimax_dev->state = state;
 }
-extern void __wimax_state_change(struct wimax_dev *, enum wimax_st);
+void __wimax_state_change(struct wimax_dev *, enum wimax_st);
 
 #ifdef CONFIG_DEBUG_FS
-extern int wimax_debugfs_add(struct wimax_dev *);
-extern void wimax_debugfs_rm(struct wimax_dev *);
+int wimax_debugfs_add(struct wimax_dev *);
+void wimax_debugfs_rm(struct wimax_dev *);
 #else
 static inline int wimax_debugfs_add(struct wimax_dev *wimax_dev)
 {
@@ -76,13 +76,13 @@ static inline int wimax_debugfs_add(struct wimax_dev *wimax_dev)
 static inline void wimax_debugfs_rm(struct wimax_dev *wimax_dev) {}
 #endif
 
-extern void wimax_id_table_add(struct wimax_dev *);
-extern struct wimax_dev *wimax_dev_get_by_genl_info(struct genl_info *, int);
-extern void wimax_id_table_rm(struct wimax_dev *);
-extern void wimax_id_table_release(void);
+void wimax_id_table_add(struct wimax_dev *);
+struct wimax_dev *wimax_dev_get_by_genl_info(struct genl_info *, int);
+void wimax_id_table_rm(struct wimax_dev *);
+void wimax_id_table_release(void);
 
-extern int wimax_rfkill_add(struct wimax_dev *);
-extern void wimax_rfkill_rm(struct wimax_dev *);
+int wimax_rfkill_add(struct wimax_dev *);
+void wimax_rfkill_rm(struct wimax_dev *);
 
 extern struct genl_family wimax_gnl_family;
 extern struct genl_multicast_group wimax_gnl_mcg;
index 50f6195c8b70b816ab35ade4075c5f71de105b35..16f3c3a7b2c17747230f62d1318ed152490eac34 100644 (file)
@@ -328,6 +328,7 @@ int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
        return cfg80211_get_chans_dfs_required(wiphy, chandef->center_freq2,
                                               width);
 }
+EXPORT_SYMBOL(cfg80211_chandef_dfs_required);
 
 static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
                                        u32 center_freq, u32 bandwidth,
index 67153964aad2059652ffd34d79c956585cc9c1e3..aff959e5a1b360e7cb467cade7f7d617544b3909 100644 (file)
@@ -566,18 +566,13 @@ int wiphy_register(struct wiphy *wiphy)
        /* check and set up bitrates */
        ieee80211_set_bitrate_flags(wiphy);
 
-
+       rtnl_lock();
        res = device_add(&rdev->wiphy.dev);
-       if (res)
-               return res;
-
-       res = rfkill_register(rdev->rfkill);
        if (res) {
-               device_del(&rdev->wiphy.dev);
+               rtnl_unlock();
                return res;
        }
 
-       rtnl_lock();
        /* set up regulatory info */
        wiphy_regulatory_register(wiphy);
 
@@ -606,6 +601,15 @@ int wiphy_register(struct wiphy *wiphy)
 
        rdev->wiphy.registered = true;
        rtnl_unlock();
+
+       res = rfkill_register(rdev->rfkill);
+       if (res) {
+               rfkill_destroy(rdev->rfkill);
+               rdev->rfkill = NULL;
+               wiphy_unregister(&rdev->wiphy);
+               return res;
+       }
+
        return 0;
 }
 EXPORT_SYMBOL(wiphy_register);
@@ -640,7 +644,8 @@ void wiphy_unregister(struct wiphy *wiphy)
                rtnl_unlock();
                __count == 0; }));
 
-       rfkill_unregister(rdev->rfkill);
+       if (rdev->rfkill)
+               rfkill_unregister(rdev->rfkill);
 
        rtnl_lock();
        rdev->wiphy.registered = false;
@@ -953,8 +958,6 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
        case NETDEV_PRE_UP:
                if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
                        return notifier_from_errno(-EOPNOTSUPP);
-               if (rfkill_blocked(rdev->rfkill))
-                       return notifier_from_errno(-ERFKILL);
                ret = cfg80211_can_add_interface(rdev, wdev->iftype);
                if (ret)
                        return notifier_from_errno(ret);
index 9ad43c619c54830f915193daa60eadef92b5bda5..af10e59af2d86f418bd7cc4e69914c9bb31831c7 100644 (file)
@@ -234,10 +234,10 @@ struct cfg80211_beacon_registration {
 };
 
 /* free object */
-extern void cfg80211_dev_free(struct cfg80211_registered_device *rdev);
+void cfg80211_dev_free(struct cfg80211_registered_device *rdev);
 
-extern int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
-                              char *newname);
+int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
+                       char *newname);
 
 void ieee80211_set_bitrate_flags(struct wiphy *wiphy);
 
@@ -382,15 +382,6 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
                                 enum cfg80211_chan_mode chanmode,
                                 u8 radar_detect);
 
-/**
- * cfg80211_chandef_dfs_required - checks if radar detection is required
- * @wiphy: the wiphy to validate against
- * @chandef: the channel definition to check
- * Return: 1 if radar detection is required, 0 if it is not, < 0 on error
- */
-int cfg80211_chandef_dfs_required(struct wiphy *wiphy,
-                                 const struct cfg80211_chan_def *c);
-
 void cfg80211_set_dfs_state(struct wiphy *wiphy,
                            const struct cfg80211_chan_def *chandef,
                            enum nl80211_dfs_state dfs_state);
@@ -411,6 +402,9 @@ static inline int
 cfg80211_can_add_interface(struct cfg80211_registered_device *rdev,
                           enum nl80211_iftype iftype)
 {
+       if (rfkill_blocked(rdev->rfkill))
+               return -ERFKILL;
+
        return cfg80211_can_change_interface(rdev, NULL, iftype);
 }
 
index 90d0500366248900f2aebcac8d57f295d229d764..454157717efaf8654cae9cdee39e827381dd20c3 100644 (file)
@@ -47,17 +47,19 @@ static int ht_print_chan(struct ieee80211_channel *chan,
                return 0;
 
        if (chan->flags & IEEE80211_CHAN_DISABLED)
-               return snprintf(buf + offset,
-                               buf_size - offset,
-                               "%d Disabled\n",
-                               chan->center_freq);
-
-       return snprintf(buf + offset,
-                       buf_size - offset,
-                       "%d HT40 %c%c\n",
-                       chan->center_freq,
-                       (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) ? ' ' : '-',
-                       (chan->flags & IEEE80211_CHAN_NO_HT40PLUS)  ? ' ' : '+');
+               return scnprintf(buf + offset,
+                                buf_size - offset,
+                                "%d Disabled\n",
+                                chan->center_freq);
+
+       return scnprintf(buf + offset,
+                        buf_size - offset,
+                        "%d HT40 %c%c\n",
+                        chan->center_freq,
+                        (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) ?
+                               ' ' : '-',
+                        (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) ?
+                               ' ' : '+');
 }
 
 static ssize_t ht40allow_map_read(struct file *file,
index 9392f8cbb901d605769bbd6bb0fd048b3f884328..42ed274e81f4ca7debc59c4ae17137c35a0993d8 100644 (file)
@@ -46,6 +46,12 @@ BEGIN {
        sub(/:/, "", country)
        printf "static const struct ieee80211_regdomain regdom_%s = {\n", country
        printf "\t.alpha2 = \"%s\",\n", country
+       if ($NF ~ /DFS-ETSI/)
+               printf "\t.dfs_region = NL80211_DFS_ETSI,\n"
+       else if ($NF ~ /DFS-FCC/)
+               printf "\t.dfs_region = NL80211_DFS_FCC,\n"
+       else if ($NF ~ /DFS-JP/)
+               printf "\t.dfs_region = NL80211_DFS_JP,\n"
        printf "\t.reg_rules = {\n"
        active = 1
        regdb = regdb "\t&regdom_" country ",\n"
index 39bff7d367687fbdd4d220d76e34d66e4e586a9c..403fe29c024db86f732b0c0d74b8d97d4cfc84bf 100644 (file)
@@ -263,6 +263,8 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
                                if (chan->flags & IEEE80211_CHAN_DISABLED)
                                        continue;
                                wdev->wext.ibss.chandef.chan = chan;
+                               wdev->wext.ibss.chandef.center_freq1 =
+                                       chan->center_freq;
                                break;
                        }
 
@@ -347,6 +349,7 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
        if (chan) {
                wdev->wext.ibss.chandef.chan = chan;
                wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
+               wdev->wext.ibss.chandef.center_freq1 = freq;
                wdev->wext.ibss.channel_fixed = true;
        } else {
                /* cfg80211_ibss_wext_join will pick one if needed */
index af8d84a4a5b2a05fab2de732722e6535c8e699d0..cbbef88a8ebd125fc4797e4493968e5c67108771 100644 (file)
@@ -2421,7 +2421,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
                change = true;
        }
 
-       if (flags && (*flags & NL80211_MNTR_FLAG_ACTIVE) &&
+       if (flags && (*flags & MONITOR_FLAG_ACTIVE) &&
            !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
                return -EOPNOTSUPP;
 
@@ -2483,7 +2483,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
                                  info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
                                  &flags);
 
-       if (!err && (flags & NL80211_MNTR_FLAG_ACTIVE) &&
+       if (!err && (flags & MONITOR_FLAG_ACTIVE) &&
            !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR))
                return -EOPNOTSUPP;
 
@@ -5591,6 +5591,9 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
        if (err)
                return err;
 
+       if (netif_carrier_ok(dev))
+               return -EBUSY;
+
        if (wdev->cac_started)
                return -EBUSY;
 
@@ -5634,15 +5637,26 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
        static struct nlattr *csa_attrs[NL80211_ATTR_MAX+1];
        u8 radar_detect_width = 0;
        int err;
+       bool need_new_beacon = false;
 
        if (!rdev->ops->channel_switch ||
            !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH))
                return -EOPNOTSUPP;
 
-       /* may add IBSS support later */
-       if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
-           dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+       switch (dev->ieee80211_ptr->iftype) {
+       case NL80211_IFTYPE_AP:
+       case NL80211_IFTYPE_P2P_GO:
+               need_new_beacon = true;
+
+               /* useless if AP is not running */
+               if (!wdev->beacon_interval)
+                       return -EINVAL;
+               break;
+       case NL80211_IFTYPE_ADHOC:
+               break;
+       default:
                return -EOPNOTSUPP;
+       }
 
        memset(&params, 0, sizeof(params));
 
@@ -5651,15 +5665,16 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
                return -EINVAL;
 
        /* only important for AP, IBSS and mesh create IEs internally */
-       if (!info->attrs[NL80211_ATTR_CSA_IES])
-               return -EINVAL;
-
-       /* useless if AP is not running */
-       if (!wdev->beacon_interval)
+       if (need_new_beacon &&
+           (!info->attrs[NL80211_ATTR_CSA_IES] ||
+            !info->attrs[NL80211_ATTR_CSA_C_OFF_BEACON]))
                return -EINVAL;
 
        params.count = nla_get_u32(info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]);
 
+       if (!need_new_beacon)
+               goto skip_beacons;
+
        err = nl80211_parse_beacon(info->attrs, &params.beacon_after);
        if (err)
                return err;
@@ -5699,6 +5714,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
                        return -EINVAL;
        }
 
+skip_beacons:
        err = nl80211_parse_chandef(rdev, info, &params.chandef);
        if (err)
                return err;
@@ -5706,12 +5722,17 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
        if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef))
                return -EINVAL;
 
-       err = cfg80211_chandef_dfs_required(wdev->wiphy, &params.chandef);
-       if (err < 0) {
-               return err;
-       } else if (err) {
-               radar_detect_width = BIT(params.chandef.width);
-               params.radar_required = true;
+       /* DFS channels are only supported for AP/P2P GO ... for now. */
+       if (dev->ieee80211_ptr->iftype == NL80211_IFTYPE_AP ||
+           dev->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) {
+               err = cfg80211_chandef_dfs_required(wdev->wiphy,
+                                                   &params.chandef);
+               if (err < 0) {
+                       return err;
+               } else if (err) {
+                       radar_detect_width = BIT(params.chandef.width);
+                       params.radar_required = true;
+               }
        }
 
        err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
@@ -10740,7 +10761,8 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
        wdev_lock(wdev);
 
        if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
-                   wdev->iftype != NL80211_IFTYPE_P2P_GO))
+                   wdev->iftype != NL80211_IFTYPE_P2P_GO &&
+                   wdev->iftype != NL80211_IFTYPE_ADHOC))
                goto out;
 
        wdev->channel = chandef->chan;
index 7d604c06c3dc38d1155366a52f184971be1197e3..a271c27fac774ce987c0db6f1330ffbfca6dc7f7 100644 (file)
@@ -97,6 +97,10 @@ int ieee80211_radiotap_iterator_init(
        struct ieee80211_radiotap_header *radiotap_header,
        int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns)
 {
+       /* check the radiotap header can actually be present */
+       if (max_length < sizeof(struct ieee80211_radiotap_header))
+               return -EINVAL;
+
        /* Linux only supports version 0 radiotap format */
        if (radiotap_header->it_version)
                return -EINVAL;
@@ -131,7 +135,8 @@ int ieee80211_radiotap_iterator_init(
                         */
 
                        if ((unsigned long)iterator->_arg -
-                           (unsigned long)iterator->_rtheader >
+                           (unsigned long)iterator->_rtheader +
+                           sizeof(uint32_t) >
                            (unsigned long)iterator->_max_length)
                                return -EINVAL;
                }
index de06d5d1287f97b6f6c8a1fd6b3e59371d80ae3e..a0ec143ba3dc3593f94fb71a25d649321af9aced 100644 (file)
@@ -172,11 +172,21 @@ static const struct ieee80211_regdomain world_regdom = {
                        NL80211_RRF_NO_IBSS |
                        NL80211_RRF_NO_OFDM),
                /* IEEE 802.11a, channel 36..48 */
-               REG_RULE(5180-10, 5240+10, 80, 6, 20,
+               REG_RULE(5180-10, 5240+10, 160, 6, 20,
                         NL80211_RRF_PASSIVE_SCAN |
                         NL80211_RRF_NO_IBSS),
 
-               /* NB: 5260 MHz - 5700 MHz requires DFS */
+               /* IEEE 802.11a, channel 52..64 - DFS required */
+               REG_RULE(5260-10, 5320+10, 160, 6, 20,
+                       NL80211_RRF_PASSIVE_SCAN |
+                       NL80211_RRF_NO_IBSS |
+                       NL80211_RRF_DFS),
+
+               /* IEEE 802.11a, channel 100..144 - DFS required */
+               REG_RULE(5500-10, 5720+10, 160, 6, 20,
+                       NL80211_RRF_PASSIVE_SCAN |
+                       NL80211_RRF_NO_IBSS |
+                       NL80211_RRF_DFS),
 
                /* IEEE 802.11a, channel 149..165 */
                REG_RULE(5745-10, 5825+10, 80, 6, 20,
@@ -758,23 +768,25 @@ const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
 }
 EXPORT_SYMBOL(freq_reg_info);
 
-#ifdef CONFIG_CFG80211_REG_DEBUG
-static const char *reg_initiator_name(enum nl80211_reg_initiator initiator)
+const char *reg_initiator_name(enum nl80211_reg_initiator initiator)
 {
        switch (initiator) {
        case NL80211_REGDOM_SET_BY_CORE:
-               return "Set by core";
+               return "core";
        case NL80211_REGDOM_SET_BY_USER:
-               return "Set by user";
+               return "user";
        case NL80211_REGDOM_SET_BY_DRIVER:
-               return "Set by driver";
+               return "driver";
        case NL80211_REGDOM_SET_BY_COUNTRY_IE:
-               return "Set by country IE";
+               return "country IE";
        default:
                WARN_ON(1);
-               return "Set by bug";
+               return "bug";
        }
 }
+EXPORT_SYMBOL(reg_initiator_name);
+
+#ifdef CONFIG_CFG80211_REG_DEBUG
 
 static void chan_reg_rule_print_dbg(struct ieee80211_channel *chan,
                                    const struct ieee80211_reg_rule *reg_rule)
@@ -969,14 +981,17 @@ static bool ignore_reg_update(struct wiphy *wiphy,
        struct regulatory_request *lr = get_last_request();
 
        if (!lr) {
-               REG_DBG_PRINT("Ignoring regulatory request %s since last_request is not set\n",
+               REG_DBG_PRINT("Ignoring regulatory request set by %s "
+                             "since last_request is not set\n",
                              reg_initiator_name(initiator));
                return true;
        }
 
        if (initiator == NL80211_REGDOM_SET_BY_CORE &&
            wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY) {
-               REG_DBG_PRINT("Ignoring regulatory request %s since the driver uses its own custom regulatory domain\n",
+               REG_DBG_PRINT("Ignoring regulatory request set by %s "
+                             "since the driver uses its own custom "
+                             "regulatory domain\n",
                              reg_initiator_name(initiator));
                return true;
        }
@@ -988,7 +1003,9 @@ static bool ignore_reg_update(struct wiphy *wiphy,
        if (wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY && !wiphy->regd &&
            initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
            !is_world_regdom(lr->alpha2)) {
-               REG_DBG_PRINT("Ignoring regulatory request %s since the driver requires its own regulatory domain to be set first\n",
+               REG_DBG_PRINT("Ignoring regulatory request set by %s "
+                             "since the driver requires its own regulatory "
+                             "domain to be set first\n",
                              reg_initiator_name(initiator));
                return true;
        }
index 65acbebd371127b5eeb70f5bc82a6c1028924c26..b533ed71dafff1d0b3f7c0a6049630d9e2cdda38 100644 (file)
@@ -1,8 +1,8 @@
 #ifndef __WIRELESS_SYSFS_H
 #define __WIRELESS_SYSFS_H
 
-extern int wiphy_sysfs_init(void);
-extern void wiphy_sysfs_exit(void);
+int wiphy_sysfs_init(void);
+void wiphy_sysfs_exit(void);
 
 extern struct class ieee80211_class;
 
index ce090c1c5e4fdb36459c4f6fde6b5af241a75f0e..3c8be6104ba407a9d0b5d997d8c2fe4d659a63e4 100644 (file)
@@ -10,6 +10,7 @@
 #include <net/cfg80211.h>
 #include <net/ip.h>
 #include <net/dsfield.h>
+#include <linux/if_vlan.h>
 #include "core.h"
 #include "rdev-ops.h"
 
@@ -691,6 +692,7 @@ EXPORT_SYMBOL(ieee80211_amsdu_to_8023s);
 unsigned int cfg80211_classify8021d(struct sk_buff *skb)
 {
        unsigned int dscp;
+       unsigned char vlan_priority;
 
        /* skb->priority values from 256->263 are magic values to
         * directly indicate a specific 802.1d priority.  This is used
@@ -700,6 +702,13 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb)
        if (skb->priority >= 256 && skb->priority <= 263)
                return skb->priority - 256;
 
+       if (vlan_tx_tag_present(skb)) {
+               vlan_priority = (vlan_tx_tag_get(skb) & VLAN_PRIO_MASK)
+                       >> VLAN_PRIO_SHIFT;
+               if (vlan_priority > 0)
+                       return vlan_priority;
+       }
+
        switch (skb->protocol) {
        case htons(ETH_P_IP):
                dscp = ipv4_get_dsfield(ip_hdr(skb)) & 0xfc;
index 716502ada53ba00b1877c2bc6d140fd50915ee7a..0622d319e1f269c31e8a6af27e4c986832948534 100644 (file)
@@ -130,7 +130,7 @@ static inline unsigned int __addr_hash(const xfrm_address_t *daddr,
        return h & hmask;
 }
 
-extern struct hlist_head *xfrm_hash_alloc(unsigned int sz);
-extern void xfrm_hash_free(struct hlist_head *n, unsigned int sz);
+struct hlist_head *xfrm_hash_alloc(unsigned int sz);
+void xfrm_hash_free(struct hlist_head *n, unsigned int sz);
 
 #endif /* _XFRM_HASH_H */
index 2906d520eea7c2b7636fc94f3a60f5131701f57d..ccfdc7115a83f709e2a5980c5dc0d65cd5859467 100644 (file)
@@ -141,14 +141,14 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
        const int plen = skb->len;
        int dlen = IPCOMP_SCRATCH_SIZE;
        u8 *start = skb->data;
-       const int cpu = get_cpu();
-       u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
-       struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
+       struct crypto_comp *tfm;
+       u8 *scratch;
        int err;
 
        local_bh_disable();
+       scratch = *this_cpu_ptr(ipcomp_scratches);
+       tfm = *this_cpu_ptr(ipcd->tfms);
        err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
-       local_bh_enable();
        if (err)
                goto out;
 
@@ -158,13 +158,13 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
        }
 
        memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
-       put_cpu();
+       local_bh_enable();
 
        pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
        return 0;
 
 out:
-       put_cpu();
+       local_bh_enable();
        return err;
 }
 
@@ -220,8 +220,8 @@ static void ipcomp_free_scratches(void)
 
 static void * __percpu *ipcomp_alloc_scratches(void)
 {
-       int i;
        void * __percpu *scratches;
+       int i;
 
        if (ipcomp_scratch_users++)
                return ipcomp_scratches;
@@ -233,7 +233,9 @@ static void * __percpu *ipcomp_alloc_scratches(void)
        ipcomp_scratches = scratches;
 
        for_each_possible_cpu(i) {
-               void *scratch = vmalloc(IPCOMP_SCRATCH_SIZE);
+               void *scratch;
+
+               scratch = vmalloc_node(IPCOMP_SCRATCH_SIZE, cpu_to_node(i));
                if (!scratch)
                        return NULL;
                *per_cpu_ptr(scratches, i) = scratch;
index ed38d5d81f9e1f4890cf36447713badd48691489..9a91f7431c411b706810a40bccc3167617c74aec 100644 (file)
@@ -334,7 +334,8 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
 
        atomic_inc(&policy->genid);
 
-       del_timer(&policy->polq.hold_timer);
+       if (del_timer(&policy->polq.hold_timer))
+               xfrm_pol_put(policy);
        xfrm_queue_purge(&policy->polq.hold_queue);
 
        if (del_timer(&policy->timer))
@@ -589,7 +590,8 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
 
        spin_lock_bh(&pq->hold_queue.lock);
        skb_queue_splice_init(&pq->hold_queue, &list);
-       del_timer(&pq->hold_timer);
+       if (del_timer(&pq->hold_timer))
+               xfrm_pol_put(old);
        spin_unlock_bh(&pq->hold_queue.lock);
 
        if (skb_queue_empty(&list))
@@ -600,7 +602,8 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
        spin_lock_bh(&pq->hold_queue.lock);
        skb_queue_splice(&list, &pq->hold_queue);
        pq->timeout = XFRM_QUEUE_TMO_MIN;
-       mod_timer(&pq->hold_timer, jiffies);
+       if (!mod_timer(&pq->hold_timer, jiffies))
+               xfrm_pol_hold(new);
        spin_unlock_bh(&pq->hold_queue.lock);
 }
 
@@ -1769,6 +1772,10 @@ static void xfrm_policy_queue_process(unsigned long arg)
 
        spin_lock(&pq->hold_queue.lock);
        skb = skb_peek(&pq->hold_queue);
+       if (!skb) {
+               spin_unlock(&pq->hold_queue.lock);
+               goto out;
+       }
        dst = skb_dst(skb);
        sk = skb->sk;
        xfrm_decode_session(skb, &fl, dst->ops->family);
@@ -1787,8 +1794,9 @@ static void xfrm_policy_queue_process(unsigned long arg)
                        goto purge_queue;
 
                pq->timeout = pq->timeout << 1;
-               mod_timer(&pq->hold_timer, jiffies + pq->timeout);
-               return;
+               if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
+                       xfrm_pol_hold(pol);
+       goto out;
        }
 
        dst_release(dst);
@@ -1819,11 +1827,14 @@ static void xfrm_policy_queue_process(unsigned long arg)
                err = dst_output(skb);
        }
 
+out:
+       xfrm_pol_put(pol);
        return;
 
 purge_queue:
        pq->timeout = 0;
        xfrm_queue_purge(&pq->hold_queue);
+       xfrm_pol_put(pol);
 }
 
 static int xdst_queue_output(struct sk_buff *skb)
@@ -1831,7 +1842,15 @@ static int xdst_queue_output(struct sk_buff *skb)
        unsigned long sched_next;
        struct dst_entry *dst = skb_dst(skb);
        struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
-       struct xfrm_policy_queue *pq = &xdst->pols[0]->polq;
+       struct xfrm_policy *pol = xdst->pols[0];
+       struct xfrm_policy_queue *pq = &pol->polq;
+       const struct sk_buff *fclone = skb + 1;
+
+       if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
+                    fclone->fclone == SKB_FCLONE_CLONE)) {
+               kfree_skb(skb);
+               return 0;
+       }
 
        if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
                kfree_skb(skb);
@@ -1850,10 +1869,12 @@ static int xdst_queue_output(struct sk_buff *skb)
        if (del_timer(&pq->hold_timer)) {
                if (time_before(pq->hold_timer.expires, sched_next))
                        sched_next = pq->hold_timer.expires;
+               xfrm_pol_put(pol);
        }
 
        __skb_queue_tail(&pq->hold_queue, skb);
-       mod_timer(&pq->hold_timer, sched_next);
+       if (!mod_timer(&pq->hold_timer, sched_next))
+               xfrm_pol_hold(pol);
 
        spin_unlock_bh(&pq->hold_queue.lock);
 
index 8dafe6d3c6e41ebd20e5d0347d4ab9b0e6326a34..dab57daae40856030790fa8070068a59d82220af 100644 (file)
@@ -61,9 +61,9 @@ static void xfrm_replay_notify(struct xfrm_state *x, int event)
 
        switch (event) {
        case XFRM_REPLAY_UPDATE:
-               if (x->replay_maxdiff &&
-                   (x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
-                   (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff)) {
+               if (!x->replay_maxdiff ||
+                   ((x->replay.seq - x->preplay.seq < x->replay_maxdiff) &&
+                   (x->replay.oseq - x->preplay.oseq < x->replay_maxdiff))) {
                        if (x->xflags & XFRM_TIME_DEFER)
                                event = XFRM_REPLAY_TIMEOUT;
                        else
@@ -129,8 +129,7 @@ static int xfrm_replay_check(struct xfrm_state *x,
                return 0;
 
        diff = x->replay.seq - seq;
-       if (diff >= min_t(unsigned int, x->props.replay_window,
-                         sizeof(x->replay.bitmap) * 8)) {
+       if (diff >= x->props.replay_window) {
                x->stats.replay_window++;
                goto err;
        }
@@ -302,9 +301,10 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
 
        switch (event) {
        case XFRM_REPLAY_UPDATE:
-               if (x->replay_maxdiff &&
-                   (replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) &&
-                   (replay_esn->oseq - preplay_esn->oseq < x->replay_maxdiff)) {
+               if (!x->replay_maxdiff ||
+                   ((replay_esn->seq - preplay_esn->seq < x->replay_maxdiff) &&
+                   (replay_esn->oseq - preplay_esn->oseq
+                    < x->replay_maxdiff))) {
                        if (x->xflags & XFRM_TIME_DEFER)
                                event = XFRM_REPLAY_TIMEOUT;
                        else
@@ -353,28 +353,30 @@ static void xfrm_replay_notify_esn(struct xfrm_state *x, int event)
 
        switch (event) {
        case XFRM_REPLAY_UPDATE:
-               if (!x->replay_maxdiff)
-                       break;
-
-               if (replay_esn->seq_hi == preplay_esn->seq_hi)
-                       seq_diff = replay_esn->seq - preplay_esn->seq;
-               else
-                       seq_diff = ~preplay_esn->seq + replay_esn->seq + 1;
-
-               if (replay_esn->oseq_hi == preplay_esn->oseq_hi)
-                       oseq_diff = replay_esn->oseq - preplay_esn->oseq;
-               else
-                       oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1;
-
-               if (seq_diff < x->replay_maxdiff &&
-                   oseq_diff < x->replay_maxdiff) {
+               if (x->replay_maxdiff) {
+                       if (replay_esn->seq_hi == preplay_esn->seq_hi)
+                               seq_diff = replay_esn->seq - preplay_esn->seq;
+                       else
+                               seq_diff = ~preplay_esn->seq + replay_esn->seq
+                                          + 1;
 
-                       if (x->xflags & XFRM_TIME_DEFER)
-                               event = XFRM_REPLAY_TIMEOUT;
+                       if (replay_esn->oseq_hi == preplay_esn->oseq_hi)
+                               oseq_diff = replay_esn->oseq
+                                           - preplay_esn->oseq;
                        else
-                               return;
+                               oseq_diff = ~preplay_esn->oseq
+                                           + replay_esn->oseq + 1;
+
+                       if (seq_diff >= x->replay_maxdiff ||
+                           oseq_diff >= x->replay_maxdiff)
+                               break;
                }
 
+               if (x->xflags & XFRM_TIME_DEFER)
+                       event = XFRM_REPLAY_TIMEOUT;
+               else
+                       return;
+
                break;
 
        case XFRM_REPLAY_TIMEOUT:
index b9c3f9e943a9159d1617feec49c751055ea4dd55..68c2f357a18389d9debc35401afe1295c4341959 100644 (file)
@@ -468,7 +468,7 @@ expired:
        }
 
        err = __xfrm_state_delete(x);
-       if (!err && x->id.spi)
+       if (!err)
                km_state_expired(x, 1, 0);
 
        xfrm_audit_state_delete(x, err ? 0 : 1,
@@ -815,7 +815,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
                        xfrm_state_look_at(pol, x, fl, encap_family,
                                           &best, &acquire_in_progress, &error);
        }
-       if (best)
+       if (best || acquire_in_progress)
                goto found;
 
        h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
@@ -824,7 +824,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
                    x->props.reqid == tmpl->reqid &&
                    (mark & x->mark.m) == x->mark.v &&
                    !(x->props.flags & XFRM_STATE_WILDRECV) &&
-                   xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
+                   xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
                    tmpl->mode == x->props.mode &&
                    tmpl->id.proto == x->id.proto &&
                    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
index 3f565e495ac68cea83e1d52cf7db7e820fe777ad..f964d4c00ffb53457aa46b24f0225249c1d46b7c 100644 (file)
@@ -446,7 +446,8 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
        memcpy(&x->sel, &p->sel, sizeof(x->sel));
        memcpy(&x->lft, &p->lft, sizeof(x->lft));
        x->props.mode = p->mode;
-       x->props.replay_window = p->replay_window;
+       x->props.replay_window = min_t(unsigned int, p->replay_window,
+                                       sizeof(x->replay.bitmap) * 8);
        x->props.reqid = p->reqid;
        x->props.family = p->family;
        memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
@@ -1856,7 +1857,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
        if (x->km.state != XFRM_STATE_VALID)
                goto out;
 
-       err = xfrm_replay_verify_len(x->replay_esn, rp);
+       err = xfrm_replay_verify_len(x->replay_esn, re);
        if (err)
                goto out;
 
index 8dcdca27d8360107f83c2fdfc70087f9464ccedd..69f0a1417e9a47669f5568af2097f031c247e607 100644 (file)
@@ -79,9 +79,11 @@ modpost = scripts/mod/modpost                    \
  $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S)      \
  $(if $(KBUILD_EXTMOD)$(KBUILD_MODPOST_WARN),-w)
 
+MODPOST_OPT=$(subst -i,-n,$(filter -i,$(MAKEFLAGS)))
+
 # We can go over command line length here, so be careful.
 quiet_cmd_modpost = MODPOST $(words $(filter-out vmlinux FORCE, $^)) modules
-      cmd_modpost = $(MODLISTCMD) | sed 's/\.ko$$/.o/' | $(modpost) -s -T -
+      cmd_modpost = $(MODLISTCMD) | sed 's/\.ko$$/.o/' | $(modpost) $(MODPOST_OPT) -s -T -
 
 PHONY += __modpost
 __modpost: $(modules:.ko=.o) FORCE
diff --git a/scripts/coccinelle/api/devm_request_and_ioremap.cocci b/scripts/coccinelle/api/devm_request_and_ioremap.cocci
deleted file mode 100644 (file)
index 562ec88..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-/// Reimplement a call to devm_request_mem_region followed by a call to ioremap
-/// or ioremap_nocache by a call to devm_request_and_ioremap.
-/// Devm_request_and_ioremap was introduced in
-/// 72f8c0bfa0de64c68ee59f40eb9b2683bffffbb0.  It makes the code much more
-/// concise.
-///
-///
-// Confidence: High
-// Copyright: (C) 2011 Julia Lawall, INRIA/LIP6.  GPLv2.
-// Copyright: (C) 2011 Gilles Muller, INRIA/LiP6.  GPLv2.
-// URL: http://coccinelle.lip6.fr/
-// Comments:
-// Options: --no-includes --include-headers
-
-virtual patch
-virtual org
-virtual report
-virtual context
-
-@nm@
-expression myname;
-identifier i;
-@@
-
-struct platform_driver i = { .driver = { .name = myname } };
-
-@depends on patch@
-expression dev,res,size;
-@@
-
--if (!devm_request_mem_region(dev, res->start, size,
--                              \(res->name\|dev_name(dev)\))) {
--   ...
--   return ...;
--}
-... when != res->start
-(
--devm_ioremap(dev,res->start,size)
-+devm_request_and_ioremap(dev,res)
-|
--devm_ioremap_nocache(dev,res->start,size)
-+devm_request_and_ioremap(dev,res)
-)
-... when any
-    when != res->start
-
-// this rule is separate from the previous one, because a single file can
-// have multiple values of myname
-@depends on patch@
-expression dev,res,size;
-expression nm.myname;
-@@
-
--if (!devm_request_mem_region(dev, res->start, size,myname)) {
--   ...
--   return ...;
--}
-... when != res->start
-(
--devm_ioremap(dev,res->start,size)
-+devm_request_and_ioremap(dev,res)
-|
--devm_ioremap_nocache(dev,res->start,size)
-+devm_request_and_ioremap(dev,res)
-)
-... when any
-    when != res->start
-
-
-@pb depends on org || report || context@
-expression dev,res,size;
-expression nm.myname;
-position p1,p2;
-@@
-
-*if
-  (!devm_request_mem_region@p1(dev, res->start, size,
-                              \(res->name\|dev_name(dev)\|myname\))) {
-   ...
-   return ...;
-}
-... when != res->start
-(
-*devm_ioremap@p2(dev,res->start,size)
-|
-*devm_ioremap_nocache@p2(dev,res->start,size)
-)
-... when any
-    when != res->start
-
-@script:python depends on org@
-p1 << pb.p1;
-p2 << pb.p2;
-@@
-
-cocci.print_main("INFO: replace by devm_request_and_ioremap",p1)
-cocci.print_secs("",p2)
-
-@script:python depends on report@
-p1 << pb.p1;
-p2 << pb.p2;
-@@
-
-msg = "INFO: devm_request_mem_region followed by ioremap on line %s can be replaced by devm_request_and_ioremap" % (p2[0].line)
-coccilib.report.print_report(p1[0],msg)
index 8247979e8f64dd2eef5671544d3b39e8ed3487fc..393706b3777430b0f4f4b29bd814d5602c655421 100644 (file)
@@ -17,6 +17,7 @@
 #include <string.h>
 #include <limits.h>
 #include <stdbool.h>
+#include <errno.h>
 #include "modpost.h"
 #include "../../include/generated/autoconf.h"
 #include "../../include/linux/license.h"
@@ -37,6 +38,8 @@ static int warn_unresolved = 0;
 /* How a symbol is exported */
 static int sec_mismatch_count = 0;
 static int sec_mismatch_verbose = 1;
+/* ignore missing files */
+static int ignore_missing_files;
 
 enum export {
        export_plain,      export_unused,     export_gpl,
@@ -407,6 +410,11 @@ static int parse_elf(struct elf_info *info, const char *filename)
 
        hdr = grab_file(filename, &info->size);
        if (!hdr) {
+               if (ignore_missing_files) {
+                       fprintf(stderr, "%s: %s (ignored)\n", filename,
+                               strerror(errno));
+                       return 0;
+               }
                perror(filename);
                exit(1);
        }
@@ -2119,7 +2127,7 @@ int main(int argc, char **argv)
        struct ext_sym_list *extsym_iter;
        struct ext_sym_list *extsym_start = NULL;
 
-       while ((opt = getopt(argc, argv, "i:I:e:msST:o:awM:K:")) != -1) {
+       while ((opt = getopt(argc, argv, "i:I:e:mnsST:o:awM:K:")) != -1) {
                switch (opt) {
                case 'i':
                        kernel_read = optarg;
@@ -2139,6 +2147,9 @@ int main(int argc, char **argv)
                case 'm':
                        modversions = 1;
                        break;
+               case 'n':
+                       ignore_missing_files = 1;
+                       break;
                case 'o':
                        dump_write = optarg;
                        break;
index 17df3051747a93ee68a75255764408c813f1a14e..e25732b5d701127d904da3b608b825a520fce3ca 100755 (executable)
@@ -13,7 +13,7 @@ import sys
 import string
 
 def usage():
-       print """usage: show_delta [<options>] <filename>
+       print ("""usage: show_delta [<options>] <filename>
 
 This program parses the output from a set of printk message lines which
 have time data prefixed because the CONFIG_PRINTK_TIME option is set, or
@@ -35,7 +35,7 @@ ex: $ dmesg >timefile
 
 will show times relative to the line in the kernel output
 starting with "NET4".
-"""
+""")
        sys.exit(1)
 
 # returns a tuple containing the seconds and text for each message line
@@ -94,11 +94,11 @@ def main():
        try:
                lines = open(filein,"r").readlines()
        except:
-               print "Problem opening file: %s" % filein
+               print ("Problem opening file: %s" % filein)
                sys.exit(1)
 
        if base_str:
-               print 'base= "%s"' % base_str
+               print ('base= "%s"' % base_str)
                # assume a numeric base.  If that fails, try searching
                # for a matching line.
                try:
@@ -117,13 +117,13 @@ def main():
                                        # stop at first match
                                        break
                        if not found:
-                               print 'Couldn\'t find line matching base pattern "%s"' % base_str
+                               print ('Couldn\'t find line matching base pattern "%s"' % base_str)
                                sys.exit(1)
        else:
                base_time = 0.0
 
        for line in lines:
-               print convert_line(line, base_time),
+               print (convert_line(line, base_time),)
 
 main()
 
index 74f02e4dddd29eca8894fe409984ae525d1e2e96..f1bcfc11cc72e366217041ea05039b397e0b8468 100755 (executable)
@@ -151,13 +151,14 @@ exuberant()
        all_target_sources | xargs $1 -a                        \
        -I __initdata,__exitdata,__initconst,__devinitdata      \
        -I __devinitconst,__cpuinitdata,__initdata_memblock     \
-       -I __refdata,__attribute                                \
+       -I __refdata,__attribute,__maybe_unused,__always_unused \
        -I __acquires,__releases,__deprecated                   \
        -I __read_mostly,__aligned,____cacheline_aligned        \
        -I ____cacheline_aligned_in_smp                         \
+       -I __cacheline_aligned,__cacheline_aligned_in_smp       \
        -I ____cacheline_internodealigned_in_smp                \
        -I __used,__packed,__packed2__,__must_check,__must_hold \
-       -I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL                      \
+       -I EXPORT_SYMBOL,EXPORT_SYMBOL_GPL,ACPI_EXPORT_SYMBOL   \
        -I DEFINE_TRACE,EXPORT_TRACEPOINT_SYMBOL,EXPORT_TRACEPOINT_SYMBOL_GPL \
        -I static,const                                         \
        --extra=+f --c-kinds=+px                                \
index 95c2b2689a03aa521bc923323f521a8c191d107d..7db9954f1af2c56c42400746f367f5f4d9906ca8 100644 (file)
@@ -580,15 +580,13 @@ static struct aa_namespace *__next_namespace(struct aa_namespace *root,
 
        /* check if the next ns is a sibling, parent, gp, .. */
        parent = ns->parent;
-       while (parent) {
+       while (ns != root) {
                mutex_unlock(&ns->lock);
                next = list_entry_next(ns, base.list);
                if (!list_entry_is_head(next, &parent->sub_ns, base.list)) {
                        mutex_lock(&next->lock);
                        return next;
                }
-               if (parent == root)
-                       return NULL;
                ns = parent;
                parent = parent->parent;
        }
index 345bec07a27d9292dbfc1306d1f5e12563eaa891..705c2879d3a94a79e10d1190468483c2f9d7192a 100644 (file)
@@ -610,6 +610,7 @@ void aa_free_profile(struct aa_profile *profile)
        aa_put_dfa(profile->policy.dfa);
        aa_put_replacedby(profile->replacedby);
 
+       kzfree(profile->hash);
        kzfree(profile);
 }
 
index 8d8d97dbb389b1d305352cee29e4dbcc0191af64..234bc2ab450c61b42b1db2b53f631ab72bc48a39 100644 (file)
@@ -302,18 +302,19 @@ static void dump_common_audit_data(struct audit_buffer *ab,
                                                "faddr", "fport");
                                break;
                        }
+#if IS_ENABLED(CONFIG_IPV6)
                        case AF_INET6: {
                                struct inet_sock *inet = inet_sk(sk);
-                               struct ipv6_pinfo *inet6 = inet6_sk(sk);
 
-                               print_ipv6_addr(ab, &inet6->rcv_saddr,
+                               print_ipv6_addr(ab, &sk->sk_v6_rcv_saddr,
                                                inet->inet_sport,
                                                "laddr", "lport");
-                               print_ipv6_addr(ab, &inet6->daddr,
+                               print_ipv6_addr(ab, &sk->sk_v6_daddr,
                                                inet->inet_dport,
                                                "faddr", "fport");
                                break;
                        }
+#endif
                        case AF_UNIX:
                                u = unix_sk(sk);
                                if (u->path.dentry) {
index 5b5231068516e46bb529efb3df38051124d2ef85..c540795fb3f2647619cb4705281872e93592e21e 100644 (file)
@@ -3928,7 +3928,7 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
                if (snum) {
                        int low, high;
 
-                       inet_get_local_port_range(&low, &high);
+                       inet_get_local_port_range(sock_net(sk), &low, &high);
 
                        if (snum < max(PROT_SOCK, low) || snum > high) {
                                err = sel_netport_sid(sk->sk_protocol,
@@ -4667,7 +4667,7 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
        return NF_ACCEPT;
 }
 
-static unsigned int selinux_ipv4_forward(unsigned int hooknum,
+static unsigned int selinux_ipv4_forward(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
                                         const struct net_device *in,
                                         const struct net_device *out,
@@ -4677,7 +4677,7 @@ static unsigned int selinux_ipv4_forward(unsigned int hooknum,
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static unsigned int selinux_ipv6_forward(unsigned int hooknum,
+static unsigned int selinux_ipv6_forward(const struct nf_hook_ops *ops,
                                         struct sk_buff *skb,
                                         const struct net_device *in,
                                         const struct net_device *out,
@@ -4709,7 +4709,7 @@ static unsigned int selinux_ip_output(struct sk_buff *skb,
        return NF_ACCEPT;
 }
 
-static unsigned int selinux_ipv4_output(unsigned int hooknum,
+static unsigned int selinux_ipv4_output(const struct nf_hook_ops *ops,
                                        struct sk_buff *skb,
                                        const struct net_device *in,
                                        const struct net_device *out,
@@ -4836,7 +4836,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
        return NF_ACCEPT;
 }
 
-static unsigned int selinux_ipv4_postroute(unsigned int hooknum,
+static unsigned int selinux_ipv4_postroute(const struct nf_hook_ops *ops,
                                           struct sk_buff *skb,
                                           const struct net_device *in,
                                           const struct net_device *out,
@@ -4846,7 +4846,7 @@ static unsigned int selinux_ipv4_postroute(unsigned int hooknum,
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static unsigned int selinux_ipv6_postroute(unsigned int hooknum,
+static unsigned int selinux_ipv6_postroute(const struct nf_hook_ops *ops,
                                           struct sk_buff *skb,
                                           const struct net_device *in,
                                           const struct net_device *out,
index e6f4633b8dd5b6b9cdfebc6409fc6db279393aed..99a466822a7d9eb3d73dd56da74911c3cb3d0173 100644 (file)
@@ -117,8 +117,7 @@ static inline void pxa_ac97_warm_pxa25x(void)
 {
        gsr_bits = 0;
 
-       GCR |= GCR_WARM_RST | GCR_PRIRDY_IEN | GCR_SECRDY_IEN;
-       wait_event_timeout(gsr_wq, gsr_bits & (GSR_PCR | GSR_SCR), 1);
+       GCR |= GCR_WARM_RST;
 }
 
 static inline void pxa_ac97_cold_pxa25x(void)
@@ -129,8 +128,6 @@ static inline void pxa_ac97_cold_pxa25x(void)
        gsr_bits = 0;
 
        GCR = GCR_COLD_RST;
-       GCR |= GCR_CDONE_IE|GCR_SDONE_IE;
-       wait_event_timeout(gsr_wq, gsr_bits & (GSR_PCR | GSR_SCR), 1);
 }
 #endif
 
@@ -149,8 +146,6 @@ static inline void pxa_ac97_warm_pxa27x(void)
 
 static inline void pxa_ac97_cold_pxa27x(void)
 {
-       unsigned int timeout;
-
        GCR &=  GCR_COLD_RST;  /* clear everything but nCRST */
        GCR &= ~GCR_COLD_RST;  /* then assert nCRST */
 
@@ -161,29 +156,20 @@ static inline void pxa_ac97_cold_pxa27x(void)
        udelay(5);
        clk_disable(ac97conf_clk);
        GCR = GCR_COLD_RST | GCR_WARM_RST;
-       timeout = 100;     /* wait for the codec-ready bit to be set */
-       while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
-               mdelay(1);
 }
 #endif
 
 #ifdef CONFIG_PXA3xx
 static inline void pxa_ac97_warm_pxa3xx(void)
 {
-       int timeout = 100;
-
        gsr_bits = 0;
 
        /* Can't use interrupts */
        GCR |= GCR_WARM_RST;
-       while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
-               mdelay(1);
 }
 
 static inline void pxa_ac97_cold_pxa3xx(void)
 {
-       int timeout = 1000;
-
        /* Hold CLKBPB for 100us */
        GCR = 0;
        GCR = GCR_CLKBPB;
@@ -199,14 +185,13 @@ static inline void pxa_ac97_cold_pxa3xx(void)
        GCR &= ~(GCR_PRIRDY_IEN|GCR_SECRDY_IEN);
 
        GCR = GCR_WARM_RST | GCR_COLD_RST;
-       while (!(GSR & (GSR_PCR | GSR_SCR)) && timeout--)
-               mdelay(10);
 }
 #endif
 
 bool pxa2xx_ac97_try_warm_reset(struct snd_ac97 *ac97)
 {
        unsigned long gsr;
+       unsigned int timeout = 100;
 
 #ifdef CONFIG_PXA25x
        if (cpu_is_pxa25x())
@@ -224,6 +209,10 @@ bool pxa2xx_ac97_try_warm_reset(struct snd_ac97 *ac97)
        else
 #endif
                BUG();
+
+       while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
+               mdelay(1);
+
        gsr = GSR | gsr_bits;
        if (!(gsr & (GSR_PCR | GSR_SCR))) {
                printk(KERN_INFO "%s: warm reset timeout (GSR=%#lx)\n",
@@ -239,6 +228,7 @@ EXPORT_SYMBOL_GPL(pxa2xx_ac97_try_warm_reset);
 bool pxa2xx_ac97_try_cold_reset(struct snd_ac97 *ac97)
 {
        unsigned long gsr;
+       unsigned int timeout = 1000;
 
 #ifdef CONFIG_PXA25x
        if (cpu_is_pxa25x())
@@ -257,6 +247,9 @@ bool pxa2xx_ac97_try_cold_reset(struct snd_ac97 *ac97)
 #endif
                BUG();
 
+       while (!((GSR | gsr_bits) & (GSR_PCR | GSR_SCR)) && timeout--)
+               mdelay(1);
+
        gsr = GSR | gsr_bits;
        if (!(gsr & (GSR_PCR | GSR_SCR))) {
                printk(KERN_INFO "%s: cold reset timeout (GSR=%#lx)\n",
index 69a2455b447210d42c2bf2c37b23fbe9035eada9..e6c727b317fbd8fa5a1d33fef7133aa601eeddd7 100644 (file)
@@ -11,6 +11,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
 
 #include <sound/core.h>
@@ -83,8 +84,6 @@ static struct snd_pcm_ops pxa2xx_pcm_ops = {
        .mmap           = pxa2xx_pcm_mmap,
 };
 
-static u64 pxa2xx_pcm_dmamask = 0xffffffff;
-
 int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client,
                   struct snd_pcm **rpcm)
 {
@@ -100,10 +99,9 @@ int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client,
        pcm->private_data = client;
        pcm->private_free = pxa2xx_pcm_free_dma_buffers;
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &pxa2xx_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = 0xffffffff;
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto out;
 
        if (play) {
                int stream = SNDRV_PCM_STREAM_PLAYBACK;
index ea063e1f87221e982558ec892024e5bdb7793b89..b3e274fe4a77438a463766c9d364045dbe8cbaf7 100644 (file)
@@ -11,6 +11,21 @@ config SND_FIREWIRE_LIB
        tristate
        depends on SND_PCM
 
+config SND_DICE
+       tristate "DICE-based DACs (EXPERIMENTAL)"
+       select SND_HWDEP
+       select SND_PCM
+       select SND_FIREWIRE_LIB
+       help
+         Say Y here to include support for many DACs based on the DICE
+         chip family (DICE-II/Jr/Mini) from TC Applied Technologies.
+
+         At the moment, this driver supports playback only.  If you
+         want to use devices that support capturing, use FFADO instead.
+
+         To compile this driver as a module, choose M here: the module
+         will be called snd-dice.
+
 config SND_FIREWIRE_SPEAKERS
        tristate "FireWire speakers"
        select SND_PCM
index 460179df5bb526ee06f90fa7ffd03ef638024e3b..509955061d30f62d44216fb9913282091d0bca93 100644 (file)
@@ -1,10 +1,12 @@
 snd-firewire-lib-objs := lib.o iso-resources.o packets-buffer.o \
                         fcp.o cmp.o amdtp.o
+snd-dice-objs := dice.o
 snd-firewire-speakers-objs := speakers.o
 snd-isight-objs := isight.o
 snd-scs1x-objs := scs1x.o
 
 obj-$(CONFIG_SND_FIREWIRE_LIB) += snd-firewire-lib.o
+obj-$(CONFIG_SND_DICE) += snd-dice.o
 obj-$(CONFIG_SND_FIREWIRE_SPEAKERS) += snd-firewire-speakers.o
 obj-$(CONFIG_SND_ISIGHT) += snd-isight.o
 obj-$(CONFIG_SND_SCS1X) += snd-scs1x.o
index ea995af6d049ec63b86f8fb59cf374640db9cc56..d3226892ad6b44953fd4d980d65874ac40535768 100644 (file)
@@ -42,9 +42,6 @@ static void pcm_period_tasklet(unsigned long data);
 int amdtp_out_stream_init(struct amdtp_out_stream *s, struct fw_unit *unit,
                          enum cip_out_flags flags)
 {
-       if (flags != CIP_NONBLOCKING)
-               return -EINVAL;
-
        s->unit = fw_unit_get(unit);
        s->flags = flags;
        s->context = ERR_PTR(-1);
@@ -62,73 +59,91 @@ EXPORT_SYMBOL(amdtp_out_stream_init);
  */
 void amdtp_out_stream_destroy(struct amdtp_out_stream *s)
 {
-       WARN_ON(!IS_ERR(s->context));
+       WARN_ON(amdtp_out_stream_running(s));
        mutex_destroy(&s->mutex);
        fw_unit_put(s->unit);
 }
 EXPORT_SYMBOL(amdtp_out_stream_destroy);
 
+const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = {
+       [CIP_SFC_32000]  =  8,
+       [CIP_SFC_44100]  =  8,
+       [CIP_SFC_48000]  =  8,
+       [CIP_SFC_88200]  = 16,
+       [CIP_SFC_96000]  = 16,
+       [CIP_SFC_176400] = 32,
+       [CIP_SFC_192000] = 32,
+};
+EXPORT_SYMBOL(amdtp_syt_intervals);
+
 /**
- * amdtp_out_stream_set_rate - set the sample rate
+ * amdtp_out_stream_set_parameters - set stream parameters
  * @s: the AMDTP output stream to configure
  * @rate: the sample rate
+ * @pcm_channels: the number of PCM samples in each data block, to be encoded
+ *                as AM824 multi-bit linear audio
+ * @midi_ports: the number of MIDI ports (i.e., MPX-MIDI Data Channels)
  *
- * The sample rate must be set before the stream is started, and must not be
+ * The parameters must be set before the stream is started, and must not be
  * changed while the stream is running.
  */
-void amdtp_out_stream_set_rate(struct amdtp_out_stream *s, unsigned int rate)
+void amdtp_out_stream_set_parameters(struct amdtp_out_stream *s,
+                                    unsigned int rate,
+                                    unsigned int pcm_channels,
+                                    unsigned int midi_ports)
 {
-       static const struct {
-               unsigned int rate;
-               unsigned int syt_interval;
-       } rate_info[] = {
-               [CIP_SFC_32000]  = {  32000,  8, },
-               [CIP_SFC_44100]  = {  44100,  8, },
-               [CIP_SFC_48000]  = {  48000,  8, },
-               [CIP_SFC_88200]  = {  88200, 16, },
-               [CIP_SFC_96000]  = {  96000, 16, },
-               [CIP_SFC_176400] = { 176400, 32, },
-               [CIP_SFC_192000] = { 192000, 32, },
+       static const unsigned int rates[] = {
+               [CIP_SFC_32000]  =  32000,
+               [CIP_SFC_44100]  =  44100,
+               [CIP_SFC_48000]  =  48000,
+               [CIP_SFC_88200]  =  88200,
+               [CIP_SFC_96000]  =  96000,
+               [CIP_SFC_176400] = 176400,
+               [CIP_SFC_192000] = 192000,
        };
        unsigned int sfc;
 
-       if (WARN_ON(!IS_ERR(s->context)))
+       if (WARN_ON(amdtp_out_stream_running(s)))
                return;
 
-       for (sfc = 0; sfc < ARRAY_SIZE(rate_info); ++sfc)
-               if (rate_info[sfc].rate == rate) {
-                       s->sfc = sfc;
-                       s->syt_interval = rate_info[sfc].syt_interval;
-                       return;
-               }
+       for (sfc = 0; sfc < CIP_SFC_COUNT; ++sfc)
+               if (rates[sfc] == rate)
+                       goto sfc_found;
        WARN_ON(1);
+       return;
+
+sfc_found:
+       s->dual_wire = (s->flags & CIP_HI_DUALWIRE) && sfc > CIP_SFC_96000;
+       if (s->dual_wire) {
+               sfc -= 2;
+               rate /= 2;
+               pcm_channels *= 2;
+       }
+       s->sfc = sfc;
+       s->data_block_quadlets = pcm_channels + DIV_ROUND_UP(midi_ports, 8);
+       s->pcm_channels = pcm_channels;
+       s->midi_ports = midi_ports;
+
+       s->syt_interval = amdtp_syt_intervals[sfc];
+
+       /* default buffering in the device */
+       s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
+       if (s->flags & CIP_BLOCKING)
+               /* additional buffering needed to adjust for no-data packets */
+               s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate;
 }
-EXPORT_SYMBOL(amdtp_out_stream_set_rate);
+EXPORT_SYMBOL(amdtp_out_stream_set_parameters);
 
 /**
  * amdtp_out_stream_get_max_payload - get the stream's packet size
  * @s: the AMDTP output stream
  *
  * This function must not be called before the stream has been configured
- * with amdtp_out_stream_set_hw_params(), amdtp_out_stream_set_pcm(), and
- * amdtp_out_stream_set_midi().
+ * with amdtp_out_stream_set_parameters().
  */
 unsigned int amdtp_out_stream_get_max_payload(struct amdtp_out_stream *s)
 {
-       static const unsigned int max_data_blocks[] = {
-               [CIP_SFC_32000]  =  4,
-               [CIP_SFC_44100]  =  6,
-               [CIP_SFC_48000]  =  6,
-               [CIP_SFC_88200]  = 12,
-               [CIP_SFC_96000]  = 12,
-               [CIP_SFC_176400] = 23,
-               [CIP_SFC_192000] = 24,
-       };
-
-       s->data_block_quadlets = s->pcm_channels;
-       s->data_block_quadlets += DIV_ROUND_UP(s->midi_ports, 8);
-
-       return 8 + max_data_blocks[s->sfc] * 4 * s->data_block_quadlets;
+       return 8 + s->syt_interval * s->data_block_quadlets * 4;
 }
 EXPORT_SYMBOL(amdtp_out_stream_get_max_payload);
 
@@ -138,19 +153,26 @@ static void amdtp_write_s16(struct amdtp_out_stream *s,
 static void amdtp_write_s32(struct amdtp_out_stream *s,
                            struct snd_pcm_substream *pcm,
                            __be32 *buffer, unsigned int frames);
+static void amdtp_write_s16_dualwire(struct amdtp_out_stream *s,
+                                    struct snd_pcm_substream *pcm,
+                                    __be32 *buffer, unsigned int frames);
+static void amdtp_write_s32_dualwire(struct amdtp_out_stream *s,
+                                    struct snd_pcm_substream *pcm,
+                                    __be32 *buffer, unsigned int frames);
 
 /**
  * amdtp_out_stream_set_pcm_format - set the PCM format
  * @s: the AMDTP output stream to configure
  * @format: the format of the ALSA PCM device
  *
- * The sample format must be set before the stream is started, and must not be
- * changed while the stream is running.
+ * The sample format must be set after the other paramters (rate/PCM channels/
+ * MIDI) and before the stream is started, and must not be changed while the
+ * stream is running.
  */
 void amdtp_out_stream_set_pcm_format(struct amdtp_out_stream *s,
                                     snd_pcm_format_t format)
 {
-       if (WARN_ON(!IS_ERR(s->context)))
+       if (WARN_ON(amdtp_out_stream_running(s)))
                return;
 
        switch (format) {
@@ -158,10 +180,16 @@ void amdtp_out_stream_set_pcm_format(struct amdtp_out_stream *s,
                WARN_ON(1);
                /* fall through */
        case SNDRV_PCM_FORMAT_S16:
-               s->transfer_samples = amdtp_write_s16;
+               if (s->dual_wire)
+                       s->transfer_samples = amdtp_write_s16_dualwire;
+               else
+                       s->transfer_samples = amdtp_write_s16;
                break;
        case SNDRV_PCM_FORMAT_S32:
-               s->transfer_samples = amdtp_write_s32;
+               if (s->dual_wire)
+                       s->transfer_samples = amdtp_write_s32_dualwire;
+               else
+                       s->transfer_samples = amdtp_write_s32;
                break;
        }
 }
@@ -248,7 +276,7 @@ static unsigned int calculate_syt(struct amdtp_out_stream *s,
        s->last_syt_offset = syt_offset;
 
        if (syt_offset < TICKS_PER_CYCLE) {
-               syt_offset += TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
+               syt_offset += s->transfer_delay;
                syt = (cycle + syt_offset / TICKS_PER_CYCLE) << 12;
                syt += syt_offset % TICKS_PER_CYCLE;
 
@@ -268,7 +296,7 @@ static void amdtp_write_s32(struct amdtp_out_stream *s,
 
        channels = s->pcm_channels;
        src = (void *)runtime->dma_area +
-                       s->pcm_buffer_pointer * (runtime->frame_bits / 8);
+                       frames_to_bytes(runtime, s->pcm_buffer_pointer);
        remaining_frames = runtime->buffer_size - s->pcm_buffer_pointer;
        frame_step = s->data_block_quadlets - channels;
 
@@ -294,7 +322,7 @@ static void amdtp_write_s16(struct amdtp_out_stream *s,
 
        channels = s->pcm_channels;
        src = (void *)runtime->dma_area +
-                       s->pcm_buffer_pointer * (runtime->frame_bits / 8);
+                       frames_to_bytes(runtime, s->pcm_buffer_pointer);
        remaining_frames = runtime->buffer_size - s->pcm_buffer_pointer;
        frame_step = s->data_block_quadlets - channels;
 
@@ -310,6 +338,68 @@ static void amdtp_write_s16(struct amdtp_out_stream *s,
        }
 }
 
+static void amdtp_write_s32_dualwire(struct amdtp_out_stream *s,
+                                    struct snd_pcm_substream *pcm,
+                                    __be32 *buffer, unsigned int frames)
+{
+       struct snd_pcm_runtime *runtime = pcm->runtime;
+       unsigned int channels, frame_adjust_1, frame_adjust_2, i, c;
+       const u32 *src;
+
+       channels = s->pcm_channels;
+       src = (void *)runtime->dma_area +
+                       s->pcm_buffer_pointer * (runtime->frame_bits / 8);
+       frame_adjust_1 = channels - 1;
+       frame_adjust_2 = 1 - (s->data_block_quadlets - channels);
+
+       channels /= 2;
+       for (i = 0; i < frames; ++i) {
+               for (c = 0; c < channels; ++c) {
+                       *buffer = cpu_to_be32((*src >> 8) | 0x40000000);
+                       src++;
+                       buffer += 2;
+               }
+               buffer -= frame_adjust_1;
+               for (c = 0; c < channels; ++c) {
+                       *buffer = cpu_to_be32((*src >> 8) | 0x40000000);
+                       src++;
+                       buffer += 2;
+               }
+               buffer -= frame_adjust_2;
+       }
+}
+
+static void amdtp_write_s16_dualwire(struct amdtp_out_stream *s,
+                                    struct snd_pcm_substream *pcm,
+                                    __be32 *buffer, unsigned int frames)
+{
+       struct snd_pcm_runtime *runtime = pcm->runtime;
+       unsigned int channels, frame_adjust_1, frame_adjust_2, i, c;
+       const u16 *src;
+
+       channels = s->pcm_channels;
+       src = (void *)runtime->dma_area +
+                       s->pcm_buffer_pointer * (runtime->frame_bits / 8);
+       frame_adjust_1 = channels - 1;
+       frame_adjust_2 = 1 - (s->data_block_quadlets - channels);
+
+       channels /= 2;
+       for (i = 0; i < frames; ++i) {
+               for (c = 0; c < channels; ++c) {
+                       *buffer = cpu_to_be32((*src << 8) | 0x40000000);
+                       src++;
+                       buffer += 2;
+               }
+               buffer -= frame_adjust_1;
+               for (c = 0; c < channels; ++c) {
+                       *buffer = cpu_to_be32((*src << 8) | 0x40000000);
+                       src++;
+                       buffer += 2;
+               }
+               buffer -= frame_adjust_2;
+       }
+}
+
 static void amdtp_fill_pcm_silence(struct amdtp_out_stream *s,
                                   __be32 *buffer, unsigned int frames)
 {
@@ -344,8 +434,17 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
                return;
        index = s->packet_index;
 
-       data_blocks = calculate_data_blocks(s);
        syt = calculate_syt(s, cycle);
+       if (!(s->flags & CIP_BLOCKING)) {
+               data_blocks = calculate_data_blocks(s);
+       } else {
+               if (syt != 0xffff) {
+                       data_blocks = s->syt_interval;
+               } else {
+                       data_blocks = 0;
+                       syt = 0xffffff;
+               }
+       }
 
        buffer = s->buffer.packets[index].buffer;
        buffer[0] = cpu_to_be32(ACCESS_ONCE(s->source_node_id_field) |
@@ -386,6 +485,9 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
        s->packet_index = index;
 
        if (pcm) {
+               if (s->dual_wire)
+                       data_blocks *= 2;
+
                ptr = s->pcm_buffer_pointer + data_blocks;
                if (ptr >= pcm->runtime->buffer_size)
                        ptr -= pcm->runtime->buffer_size;
@@ -455,9 +557,8 @@ static int queue_initial_skip_packets(struct amdtp_out_stream *s)
  * @speed: firewire speed code
  *
  * The stream cannot be started until it has been configured with
- * amdtp_out_stream_set_hw_params(), amdtp_out_stream_set_pcm(), and
- * amdtp_out_stream_set_midi(); and it must be started before any
- * PCM or MIDI device can be started.
+ * amdtp_out_stream_set_parameters() and amdtp_out_stream_set_pcm_format(),
+ * and it must be started before any PCM or MIDI device can be started.
  */
 int amdtp_out_stream_start(struct amdtp_out_stream *s, int channel, int speed)
 {
@@ -477,7 +578,7 @@ int amdtp_out_stream_start(struct amdtp_out_stream *s, int channel, int speed)
 
        mutex_lock(&s->mutex);
 
-       if (WARN_ON(!IS_ERR(s->context) ||
+       if (WARN_ON(amdtp_out_stream_running(s) ||
                    (!s->pcm_channels && !s->midi_ports))) {
                err = -EBADFD;
                goto err_unlock;
@@ -573,7 +674,7 @@ void amdtp_out_stream_stop(struct amdtp_out_stream *s)
 {
        mutex_lock(&s->mutex);
 
-       if (IS_ERR(s->context)) {
+       if (!amdtp_out_stream_running(s)) {
                mutex_unlock(&s->mutex);
                return;
        }
index f6103d68c4b1660bf9d61ba8d0c053086eec923b..839ebf812d79e47302e3c4c57b5f2f74695551b7 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef SOUND_FIREWIRE_AMDTP_H_INCLUDED
 #define SOUND_FIREWIRE_AMDTP_H_INCLUDED
 
+#include <linux/err.h>
 #include <linux/interrupt.h>
 #include <linux/mutex.h>
 #include "packets-buffer.h"
  *     sample_rate/8000 samples, with rounding up or down to adjust
  *     for clock skew and left-over fractional samples.  This should
  *     be used if supported by the device.
+ * @CIP_BLOCKING: In blocking mode, each packet contains either zero or
+ *     SYT_INTERVAL samples, with these two types alternating so that
+ *     the overall sample rate comes out right.
+ * @CIP_HI_DUALWIRE: At rates above 96 kHz, pretend that the stream runs
+ *     at half the actual sample rate with twice the number of channels;
+ *     two samples of a channel are stored consecutively in the packet.
+ *     Requires blocking mode and SYT_INTERVAL-aligned PCM buffer size.
  */
 enum cip_out_flags {
-       CIP_NONBLOCKING = 0,
+       CIP_NONBLOCKING = 0x00,
+       CIP_BLOCKING    = 0x01,
+       CIP_HI_DUALWIRE = 0x02,
 };
 
 /**
@@ -27,6 +37,7 @@ enum cip_sfc {
        CIP_SFC_96000  = 4,
        CIP_SFC_176400 = 5,
        CIP_SFC_192000 = 6,
+       CIP_SFC_COUNT
 };
 
 #define AMDTP_OUT_PCM_FORMAT_BITS      (SNDRV_PCM_FMTBIT_S16 | \
@@ -43,6 +54,7 @@ struct amdtp_out_stream {
        struct mutex mutex;
 
        enum cip_sfc sfc;
+       bool dual_wire;
        unsigned int data_block_quadlets;
        unsigned int pcm_channels;
        unsigned int midi_ports;
@@ -51,6 +63,7 @@ struct amdtp_out_stream {
                                 __be32 *buffer, unsigned int frames);
 
        unsigned int syt_interval;
+       unsigned int transfer_delay;
        unsigned int source_node_id_field;
        struct iso_packets_buffer buffer;
 
@@ -74,7 +87,10 @@ int amdtp_out_stream_init(struct amdtp_out_stream *s, struct fw_unit *unit,
                          enum cip_out_flags flags);
 void amdtp_out_stream_destroy(struct amdtp_out_stream *s);
 
-void amdtp_out_stream_set_rate(struct amdtp_out_stream *s, unsigned int rate);
+void amdtp_out_stream_set_parameters(struct amdtp_out_stream *s,
+                                    unsigned int rate,
+                                    unsigned int pcm_channels,
+                                    unsigned int midi_ports);
 unsigned int amdtp_out_stream_get_max_payload(struct amdtp_out_stream *s);
 
 int amdtp_out_stream_start(struct amdtp_out_stream *s, int channel, int speed);
@@ -87,31 +103,11 @@ void amdtp_out_stream_pcm_prepare(struct amdtp_out_stream *s);
 unsigned long amdtp_out_stream_pcm_pointer(struct amdtp_out_stream *s);
 void amdtp_out_stream_pcm_abort(struct amdtp_out_stream *s);
 
-/**
- * amdtp_out_stream_set_pcm - configure format of PCM samples
- * @s: the AMDTP output stream to be configured
- * @pcm_channels: the number of PCM samples in each data block, to be encoded
- *                as AM824 multi-bit linear audio
- *
- * This function must not be called while the stream is running.
- */
-static inline void amdtp_out_stream_set_pcm(struct amdtp_out_stream *s,
-                                           unsigned int pcm_channels)
-{
-       s->pcm_channels = pcm_channels;
-}
+extern const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT];
 
-/**
- * amdtp_out_stream_set_midi - configure format of MIDI data
- * @s: the AMDTP output stream to be configured
- * @midi_ports: the number of MIDI ports (i.e., MPX-MIDI Data Channels)
- *
- * This function must not be called while the stream is running.
- */
-static inline void amdtp_out_stream_set_midi(struct amdtp_out_stream *s,
-                                            unsigned int midi_ports)
+static inline bool amdtp_out_stream_running(struct amdtp_out_stream *s)
 {
-       s->midi_ports = midi_ports;
+       return !IS_ERR(s->context);
 }
 
 /**
index 645cb0ba429310f1a0c2569e1a7845a9bf0c4387..efdbf585e4046f7588f1e838139104beecc90c75 100644 (file)
@@ -48,9 +48,6 @@ static int pcr_modify(struct cmp_connection *c,
                      int (*check)(struct cmp_connection *c, __be32 pcr),
                      enum bus_reset_handling bus_reset_handling)
 {
-       struct fw_device *device = fw_parent_device(c->resources.unit);
-       int generation = c->resources.generation;
-       int rcode, errors = 0;
        __be32 old_arg, buffer[2];
        int err;
 
@@ -59,36 +56,31 @@ static int pcr_modify(struct cmp_connection *c,
                old_arg = buffer[0];
                buffer[1] = modify(c, buffer[0]);
 
-               rcode = fw_run_transaction(
-                               device->card, TCODE_LOCK_COMPARE_SWAP,
-                               device->node_id, generation, device->max_speed,
+               err = snd_fw_transaction(
+                               c->resources.unit, TCODE_LOCK_COMPARE_SWAP,
                                CSR_REGISTER_BASE + CSR_IPCR(c->pcr_index),
-                               buffer, 8);
-
-               if (rcode == RCODE_COMPLETE) {
-                       if (buffer[0] == old_arg) /* success? */
-                               break;
-
-                       if (check) {
-                               err = check(c, buffer[0]);
-                               if (err < 0)
-                                       return err;
-                       }
-               } else if (rcode == RCODE_GENERATION)
-                       goto bus_reset;
-               else if (rcode_is_permanent_error(rcode) || ++errors >= 3)
-                       goto io_error;
+                               buffer, 8,
+                               FW_FIXED_GENERATION | c->resources.generation);
+
+               if (err < 0) {
+                       if (err == -EAGAIN &&
+                           bus_reset_handling == SUCCEED_ON_BUS_RESET)
+                               err = 0;
+                       return err;
+               }
+
+               if (buffer[0] == old_arg) /* success? */
+                       break;
+
+               if (check) {
+                       err = check(c, buffer[0]);
+                       if (err < 0)
+                               return err;
+               }
        }
        c->last_pcr_value = buffer[1];
 
        return 0;
-
-io_error:
-       cmp_error(c, "transaction failed: %s\n", fw_rcode_string(rcode));
-       return -EIO;
-
-bus_reset:
-       return bus_reset_handling == ABORT_ON_BUS_RESET ? -EAGAIN : 0;
 }
 
 
@@ -108,7 +100,7 @@ int cmp_connection_init(struct cmp_connection *c,
 
        err = snd_fw_transaction(unit, TCODE_READ_QUADLET_REQUEST,
                                 CSR_REGISTER_BASE + CSR_IMPR,
-                                &impr_be, 4);
+                                &impr_be, 4, 0);
        if (err < 0)
                return err;
        impr = be32_to_cpu(impr_be);
diff --git a/sound/firewire/dice-interface.h b/sound/firewire/dice-interface.h
new file mode 100644 (file)
index 0000000..27b044f
--- /dev/null
@@ -0,0 +1,371 @@
+#ifndef SOUND_FIREWIRE_DICE_INTERFACE_H_INCLUDED
+#define SOUND_FIREWIRE_DICE_INTERFACE_H_INCLUDED
+
+/*
+ * DICE device interface definitions
+ */
+
+/*
+ * Generally, all registers can be read like memory, i.e., with quadlet read or
+ * block read transactions with at least quadlet-aligned offset and length.
+ * Writes are not allowed except where noted; quadlet-sized registers must be
+ * written with a quadlet write transaction.
+ *
+ * All values are in big endian.  The DICE firmware runs on a little-endian CPU
+ * and just byte-swaps _all_ quadlets on the bus, so values without endianness
+ * (e.g. strings) get scrambled and must be byte-swapped again by the driver.
+ */
+
+/*
+ * Streaming is handled by the "DICE driver" interface.  Its registers are
+ * located in this private address space.
+ */
+#define DICE_PRIVATE_SPACE             0xffffe0000000uLL
+
+/*
+ * The registers are organized in several sections, which are organized
+ * separately to allow them to be extended individually.  Whether a register is
+ * supported can be detected by checking its offset against its section's size.
+ *
+ * The section offset values are relative to DICE_PRIVATE_SPACE; the offset/
+ * size values are measured in quadlets.  Read-only.
+ */
+#define DICE_GLOBAL_OFFSET             0x00
+#define DICE_GLOBAL_SIZE               0x04
+#define DICE_TX_OFFSET                 0x08
+#define DICE_TX_SIZE                   0x0c
+#define DICE_RX_OFFSET                 0x10
+#define DICE_RX_SIZE                   0x14
+#define DICE_EXT_SYNC_OFFSET           0x18
+#define DICE_EXT_SYNC_SIZE             0x1c
+#define DICE_UNUSED2_OFFSET            0x20
+#define DICE_UNUSED2_SIZE              0x24
+
+/*
+ * Global settings.
+ */
+
+/*
+ * Stores the full 64-bit address (node ID and offset in the node's address
+ * space) where the device will send notifications.  Must be changed with
+ * a compare/swap transaction by the owner.  This register is automatically
+ * cleared on a bus reset.
+ */
+#define GLOBAL_OWNER                   0x000
+#define  OWNER_NO_OWNER                        0xffff000000000000uLL
+#define  OWNER_NODE_SHIFT              48
+
+/*
+ * A bitmask with asynchronous events; read-only.  When any event(s) happen,
+ * the bits of previous events are cleared, and the value of this register is
+ * also written to the address stored in the owner register.
+ */
+#define GLOBAL_NOTIFICATION            0x008
+/* Some registers in the Rx/Tx sections may have changed. */
+#define  NOTIFY_RX_CFG_CHG             0x00000001
+#define  NOTIFY_TX_CFG_CHG             0x00000002
+/* Lock status of the current clock source may have changed. */
+#define  NOTIFY_LOCK_CHG               0x00000010
+/* Write to the clock select register has been finished. */
+#define  NOTIFY_CLOCK_ACCEPTED         0x00000020
+/* Lock status of some clock source has changed. */
+#define  NOTIFY_EXT_STATUS             0x00000040
+/* Other bits may be used for device-specific events. */
+
+/*
+ * A name that can be customized for each device; read/write.  Padded with zero
+ * bytes.  Quadlets are byte-swapped.  The encoding is whatever the host driver
+ * happens to be using.
+ */
+#define GLOBAL_NICK_NAME               0x00c
+#define  NICK_NAME_SIZE                        64
+
+/*
+ * The current sample rate and clock source; read/write.  Whether a clock
+ * source or sample rate is supported is device-specific; the internal clock
+ * source is always available.  Low/mid/high = up to 48/96/192 kHz.  This
+ * register can be changed even while streams are running.
+ */
+#define GLOBAL_CLOCK_SELECT            0x04c
+#define  CLOCK_SOURCE_MASK             0x000000ff
+#define  CLOCK_SOURCE_AES1             0x00000000
+#define  CLOCK_SOURCE_AES2             0x00000001
+#define  CLOCK_SOURCE_AES3             0x00000002
+#define  CLOCK_SOURCE_AES4             0x00000003
+#define  CLOCK_SOURCE_AES_ANY          0x00000004
+#define  CLOCK_SOURCE_ADAT             0x00000005
+#define  CLOCK_SOURCE_TDIF             0x00000006
+#define  CLOCK_SOURCE_WC               0x00000007
+#define  CLOCK_SOURCE_ARX1             0x00000008
+#define  CLOCK_SOURCE_ARX2             0x00000009
+#define  CLOCK_SOURCE_ARX3             0x0000000a
+#define  CLOCK_SOURCE_ARX4             0x0000000b
+#define  CLOCK_SOURCE_INTERNAL         0x0000000c
+#define  CLOCK_RATE_MASK               0x0000ff00
+#define  CLOCK_RATE_32000              0x00000000
+#define  CLOCK_RATE_44100              0x00000100
+#define  CLOCK_RATE_48000              0x00000200
+#define  CLOCK_RATE_88200              0x00000300
+#define  CLOCK_RATE_96000              0x00000400
+#define  CLOCK_RATE_176400             0x00000500
+#define  CLOCK_RATE_192000             0x00000600
+#define  CLOCK_RATE_ANY_LOW            0x00000700
+#define  CLOCK_RATE_ANY_MID            0x00000800
+#define  CLOCK_RATE_ANY_HIGH           0x00000900
+#define  CLOCK_RATE_NONE               0x00000a00
+#define  CLOCK_RATE_SHIFT              8
+
+/*
+ * Enable streaming; read/write.  Writing a non-zero value (re)starts all
+ * streams that have a valid iso channel set; zero stops all streams.  The
+ * streams' parameters must be configured before starting.  This register is
+ * automatically cleared on a bus reset.
+ */
+#define GLOBAL_ENABLE                  0x050
+
+/*
+ * Status of the sample clock; read-only.
+ */
+#define GLOBAL_STATUS                  0x054
+/* The current clock source is locked. */
+#define  STATUS_SOURCE_LOCKED          0x00000001
+/* The actual sample rate; CLOCK_RATE_32000-_192000 or _NONE. */
+#define  STATUS_NOMINAL_RATE_MASK      0x0000ff00
+
+/*
+ * Status of all clock sources; read-only.
+ */
+#define GLOBAL_EXTENDED_STATUS         0x058
+/*
+ * The _LOCKED bits always show the current status; any change generates
+ * a notification.
+ */
+#define  EXT_STATUS_AES1_LOCKED                0x00000001
+#define  EXT_STATUS_AES2_LOCKED                0x00000002
+#define  EXT_STATUS_AES3_LOCKED                0x00000004
+#define  EXT_STATUS_AES4_LOCKED                0x00000008
+#define  EXT_STATUS_ADAT_LOCKED                0x00000010
+#define  EXT_STATUS_TDIF_LOCKED                0x00000020
+#define  EXT_STATUS_ARX1_LOCKED                0x00000040
+#define  EXT_STATUS_ARX2_LOCKED                0x00000080
+#define  EXT_STATUS_ARX3_LOCKED                0x00000100
+#define  EXT_STATUS_ARX4_LOCKED                0x00000200
+#define  EXT_STATUS_WC_LOCKED          0x00000400
+/*
+ * The _SLIP bits do not generate notifications; a set bit indicates that an
+ * error occurred since the last time when this register was read with
+ * a quadlet read transaction.
+ */
+#define  EXT_STATUS_AES1_SLIP          0x00010000
+#define  EXT_STATUS_AES2_SLIP          0x00020000
+#define  EXT_STATUS_AES3_SLIP          0x00040000
+#define  EXT_STATUS_AES4_SLIP          0x00080000
+#define  EXT_STATUS_ADAT_SLIP          0x00100000
+#define  EXT_STATUS_TDIF_SLIP          0x00200000
+#define  EXT_STATUS_ARX1_SLIP          0x00400000
+#define  EXT_STATUS_ARX2_SLIP          0x00800000
+#define  EXT_STATUS_ARX3_SLIP          0x01000000
+#define  EXT_STATUS_ARX4_SLIP          0x02000000
+#define  EXT_STATUS_WC_SLIP            0x04000000
+
+/*
+ * The measured rate of the current clock source, in Hz; read-only.
+ */
+#define GLOBAL_SAMPLE_RATE             0x05c
+
+/*
+ * The version of the DICE driver specification that this device conforms to;
+ * read-only.
+ */
+#define GLOBAL_VERSION                 0x060
+
+/* Some old firmware versions do not have the following global registers: */
+
+/*
+ * Supported sample rates and clock sources; read-only.
+ */
+#define GLOBAL_CLOCK_CAPABILITIES      0x064
+#define  CLOCK_CAP_RATE_32000          0x00000001
+#define  CLOCK_CAP_RATE_44100          0x00000002
+#define  CLOCK_CAP_RATE_48000          0x00000004
+#define  CLOCK_CAP_RATE_88200          0x00000008
+#define  CLOCK_CAP_RATE_96000          0x00000010
+#define  CLOCK_CAP_RATE_176400         0x00000020
+#define  CLOCK_CAP_RATE_192000         0x00000040
+#define  CLOCK_CAP_SOURCE_AES1         0x00010000
+#define  CLOCK_CAP_SOURCE_AES2         0x00020000
+#define  CLOCK_CAP_SOURCE_AES3         0x00040000
+#define  CLOCK_CAP_SOURCE_AES4         0x00080000
+#define  CLOCK_CAP_SOURCE_AES_ANY      0x00100000
+#define  CLOCK_CAP_SOURCE_ADAT         0x00200000
+#define  CLOCK_CAP_SOURCE_TDIF         0x00400000
+#define  CLOCK_CAP_SOURCE_WC           0x00800000
+#define  CLOCK_CAP_SOURCE_ARX1         0x01000000
+#define  CLOCK_CAP_SOURCE_ARX2         0x02000000
+#define  CLOCK_CAP_SOURCE_ARX3         0x04000000
+#define  CLOCK_CAP_SOURCE_ARX4         0x08000000
+#define  CLOCK_CAP_SOURCE_INTERNAL     0x10000000
+
+/*
+ * Names of all clock sources; read-only.  Quadlets are byte-swapped.  Names
+ * are separated with one backslash, the list is terminated with two
+ * backslashes.  Unused clock sources are included.
+ */
+#define GLOBAL_CLOCK_SOURCE_NAMES      0x068
+#define  CLOCK_SOURCE_NAMES_SIZE       256
+
+/*
+ * Capture stream settings.  This section includes the number/size registers
+ * and the registers of all streams.
+ */
+
+/*
+ * The number of supported capture streams; read-only.
+ */
+#define TX_NUMBER                      0x000
+
+/*
+ * The size of one stream's register block, in quadlets; read-only.  The
+ * registers of the first stream follow immediately afterwards; the registers
+ * of the following streams are offset by this register's value.
+ */
+#define TX_SIZE                                0x004
+
+/*
+ * The isochronous channel number on which packets are sent, or -1 if the
+ * stream is not to be used; read/write.
+ */
+#define TX_ISOCHRONOUS                 0x008
+
+/*
+ * The number of audio channels; read-only.  There will be one quadlet per
+ * channel; the first channel is the first quadlet in a data block.
+ */
+#define TX_NUMBER_AUDIO                        0x00c
+
+/*
+ * The number of MIDI ports, 0-8; read-only.  If > 0, there will be one
+ * additional quadlet in each data block, following the audio quadlets.
+ */
+#define TX_NUMBER_MIDI                 0x010
+
+/*
+ * The speed at which the packets are sent, SCODE_100-_400; read/write.
+ */
+#define TX_SPEED                       0x014
+
+/*
+ * Names of all audio channels; read-only.  Quadlets are byte-swapped.  Names
+ * are separated with one backslash, the list is terminated with two
+ * backslashes.
+ */
+#define TX_NAMES                       0x018
+#define  TX_NAMES_SIZE                 256
+
+/*
+ * Audio IEC60958 capabilities; read-only.  Bitmask with one bit per audio
+ * channel.
+ */
+#define TX_AC3_CAPABILITIES            0x118
+
+/*
+ * Send audio data with IEC60958 label; read/write.  Bitmask with one bit per
+ * audio channel.  This register can be changed even while the stream is
+ * running.
+ */
+#define TX_AC3_ENABLE                  0x11c
+
+/*
+ * Playback stream settings.  This section includes the number/size registers
+ * and the registers of all streams.
+ */
+
+/*
+ * The number of supported playback streams; read-only.
+ */
+#define RX_NUMBER                      0x000
+
+/*
+ * The size of one stream's register block, in quadlets; read-only.  The
+ * registers of the first stream follow immediately afterwards; the registers
+ * of the following streams are offset by this register's value.
+ */
+#define RX_SIZE                                0x004
+
+/*
+ * The isochronous channel number on which packets are received, or -1 if the
+ * stream is not to be used; read/write.
+ */
+#define RX_ISOCHRONOUS                 0x008
+
+/*
+ * Index of first quadlet to be interpreted; read/write.  If > 0, that many
+ * quadlets at the beginning of each data block will be ignored, and all the
+ * audio and MIDI quadlets will follow.
+ */
+#define RX_SEQ_START                   0x00c
+
+/*
+ * The number of audio channels; read-only.  There will be one quadlet per
+ * channel.
+ */
+#define RX_NUMBER_AUDIO                        0x010
+
+/*
+ * The number of MIDI ports, 0-8; read-only.  If > 0, there will be one
+ * additional quadlet in each data block, following the audio quadlets.
+ */
+#define RX_NUMBER_MIDI                 0x014
+
+/*
+ * Names of all audio channels; read-only.  Quadlets are byte-swapped.  Names
+ * are separated with one backslash, the list is terminated with two
+ * backslashes.
+ */
+#define RX_NAMES                       0x018
+#define  RX_NAMES_SIZE                 256
+
+/*
+ * Audio IEC60958 capabilities; read-only.  Bitmask with one bit per audio
+ * channel.
+ */
+#define RX_AC3_CAPABILITIES            0x118
+
+/*
+ * Receive audio data with IEC60958 label; read/write.  Bitmask with one bit
+ * per audio channel.  This register can be changed even while the stream is
+ * running.
+ */
+#define RX_AC3_ENABLE                  0x11c
+
+/*
+ * Extended synchronization information.
+ * This section can be read completely with a block read request.
+ */
+
+/*
+ * Current clock source; read-only.
+ */
+#define EXT_SYNC_CLOCK_SOURCE          0x000
+
+/*
+ * Clock source is locked (boolean); read-only.
+ */
+#define EXT_SYNC_LOCKED                        0x004
+
+/*
+ * Current sample rate (CLOCK_RATE_* >> CLOCK_RATE_SHIFT), _32000-_192000 or
+ * _NONE; read-only.
+ */
+#define EXT_SYNC_RATE                  0x008
+
+/*
+ * ADAT user data bits; read-only.
+ */
+#define EXT_SYNC_ADAT_USER_DATA                0x00c
+/* The data bits, if available. */
+#define  ADAT_USER_DATA_MASK           0x0f
+/* The data bits are not available. */
+#define  ADAT_USER_DATA_NO_DATA                0x10
+
+#endif
diff --git a/sound/firewire/dice.c b/sound/firewire/dice.c
new file mode 100644 (file)
index 0000000..6feee66
--- /dev/null
@@ -0,0 +1,1494 @@
+/*
+ * TC Applied Technologies Digital Interface Communications Engine driver
+ *
+ * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#include <linux/compat.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/firewire.h>
+#include <linux/firewire-constants.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <sound/control.h>
+#include <sound/core.h>
+#include <sound/firewire.h>
+#include <sound/hwdep.h>
+#include <sound/info.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include "amdtp.h"
+#include "iso-resources.h"
+#include "lib.h"
+#include "dice-interface.h"
+
+
+struct dice {
+       struct snd_card *card;
+       struct fw_unit *unit;
+       spinlock_t lock;
+       struct mutex mutex;
+       unsigned int global_offset;
+       unsigned int rx_offset;
+       unsigned int clock_caps;
+       unsigned int rx_channels[3];
+       unsigned int rx_midi_ports[3];
+       struct fw_address_handler notification_handler;
+       int owner_generation;
+       int dev_lock_count; /* > 0 driver, < 0 userspace */
+       bool dev_lock_changed;
+       bool global_enabled;
+       struct completion clock_accepted;
+       wait_queue_head_t hwdep_wait;
+       u32 notification_bits;
+       struct fw_iso_resources resources;
+       struct amdtp_out_stream stream;
+};
+
+MODULE_DESCRIPTION("DICE driver");
+MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
+MODULE_LICENSE("GPL v2");
+
+static const unsigned int dice_rates[] = {
+       /* mode 0 */
+       [0] =  32000,
+       [1] =  44100,
+       [2] =  48000,
+       /* mode 1 */
+       [3] =  88200,
+       [4] =  96000,
+       /* mode 2 */
+       [5] = 176400,
+       [6] = 192000,
+};
+
+static unsigned int rate_to_index(unsigned int rate)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(dice_rates); ++i)
+               if (dice_rates[i] == rate)
+                       return i;
+
+       return 0;
+}
+
+static unsigned int rate_index_to_mode(unsigned int rate_index)
+{
+       return ((int)rate_index - 1) / 2;
+}
+
+static void dice_lock_changed(struct dice *dice)
+{
+       dice->dev_lock_changed = true;
+       wake_up(&dice->hwdep_wait);
+}
+
+static int dice_try_lock(struct dice *dice)
+{
+       int err;
+
+       spin_lock_irq(&dice->lock);
+
+       if (dice->dev_lock_count < 0) {
+               err = -EBUSY;
+               goto out;
+       }
+
+       if (dice->dev_lock_count++ == 0)
+               dice_lock_changed(dice);
+       err = 0;
+
+out:
+       spin_unlock_irq(&dice->lock);
+
+       return err;
+}
+
+static void dice_unlock(struct dice *dice)
+{
+       spin_lock_irq(&dice->lock);
+
+       if (WARN_ON(dice->dev_lock_count <= 0))
+               goto out;
+
+       if (--dice->dev_lock_count == 0)
+               dice_lock_changed(dice);
+
+out:
+       spin_unlock_irq(&dice->lock);
+}
+
+static inline u64 global_address(struct dice *dice, unsigned int offset)
+{
+       return DICE_PRIVATE_SPACE + dice->global_offset + offset;
+}
+
+// TODO: rx index
+static inline u64 rx_address(struct dice *dice, unsigned int offset)
+{
+       return DICE_PRIVATE_SPACE + dice->rx_offset + offset;
+}
+
+static int dice_owner_set(struct dice *dice)
+{
+       struct fw_device *device = fw_parent_device(dice->unit);
+       __be64 *buffer;
+       int err, errors = 0;
+
+       buffer = kmalloc(2 * 8, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
+       for (;;) {
+               buffer[0] = cpu_to_be64(OWNER_NO_OWNER);
+               buffer[1] = cpu_to_be64(
+                       ((u64)device->card->node_id << OWNER_NODE_SHIFT) |
+                       dice->notification_handler.offset);
+
+               dice->owner_generation = device->generation;
+               smp_rmb(); /* node_id vs. generation */
+               err = snd_fw_transaction(dice->unit,
+                                        TCODE_LOCK_COMPARE_SWAP,
+                                        global_address(dice, GLOBAL_OWNER),
+                                        buffer, 2 * 8,
+                                        FW_FIXED_GENERATION |
+                                                       dice->owner_generation);
+
+               if (err == 0) {
+                       if (buffer[0] != cpu_to_be64(OWNER_NO_OWNER)) {
+                               dev_err(&dice->unit->device,
+                                       "device is already in use\n");
+                               err = -EBUSY;
+                       }
+                       break;
+               }
+               if (err != -EAGAIN || ++errors >= 3)
+                       break;
+
+               msleep(20);
+       }
+
+       kfree(buffer);
+
+       return err;
+}
+
+static int dice_owner_update(struct dice *dice)
+{
+       struct fw_device *device = fw_parent_device(dice->unit);
+       __be64 *buffer;
+       int err;
+
+       if (dice->owner_generation == -1)
+               return 0;
+
+       buffer = kmalloc(2 * 8, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
+       buffer[0] = cpu_to_be64(OWNER_NO_OWNER);
+       buffer[1] = cpu_to_be64(
+               ((u64)device->card->node_id << OWNER_NODE_SHIFT) |
+               dice->notification_handler.offset);
+
+       dice->owner_generation = device->generation;
+       smp_rmb(); /* node_id vs. generation */
+       err = snd_fw_transaction(dice->unit, TCODE_LOCK_COMPARE_SWAP,
+                                global_address(dice, GLOBAL_OWNER),
+                                buffer, 2 * 8,
+                                FW_FIXED_GENERATION | dice->owner_generation);
+
+       if (err == 0) {
+               if (buffer[0] != cpu_to_be64(OWNER_NO_OWNER)) {
+                       dev_err(&dice->unit->device,
+                               "device is already in use\n");
+                       err = -EBUSY;
+               }
+       } else if (err == -EAGAIN) {
+               err = 0; /* try again later */
+       }
+
+       kfree(buffer);
+
+       if (err < 0)
+               dice->owner_generation = -1;
+
+       return err;
+}
+
+static void dice_owner_clear(struct dice *dice)
+{
+       struct fw_device *device = fw_parent_device(dice->unit);
+       __be64 *buffer;
+
+       buffer = kmalloc(2 * 8, GFP_KERNEL);
+       if (!buffer)
+               return;
+
+       buffer[0] = cpu_to_be64(
+               ((u64)device->card->node_id << OWNER_NODE_SHIFT) |
+               dice->notification_handler.offset);
+       buffer[1] = cpu_to_be64(OWNER_NO_OWNER);
+       snd_fw_transaction(dice->unit, TCODE_LOCK_COMPARE_SWAP,
+                          global_address(dice, GLOBAL_OWNER),
+                          buffer, 2 * 8, FW_QUIET |
+                          FW_FIXED_GENERATION | dice->owner_generation);
+
+       kfree(buffer);
+
+       dice->owner_generation = -1;
+}
+
+static int dice_enable_set(struct dice *dice)
+{
+       __be32 value;
+       int err;
+
+       value = cpu_to_be32(1);
+       err = snd_fw_transaction(dice->unit, TCODE_WRITE_QUADLET_REQUEST,
+                                global_address(dice, GLOBAL_ENABLE),
+                                &value, 4,
+                                FW_FIXED_GENERATION | dice->owner_generation);
+       if (err < 0)
+               return err;
+
+       dice->global_enabled = true;
+
+       return 0;
+}
+
+static void dice_enable_clear(struct dice *dice)
+{
+       __be32 value;
+
+       if (!dice->global_enabled)
+               return;
+
+       value = 0;
+       snd_fw_transaction(dice->unit, TCODE_WRITE_QUADLET_REQUEST,
+                          global_address(dice, GLOBAL_ENABLE),
+                          &value, 4, FW_QUIET |
+                          FW_FIXED_GENERATION | dice->owner_generation);
+
+       dice->global_enabled = false;
+}
+
+static void dice_notification(struct fw_card *card, struct fw_request *request,
+                             int tcode, int destination, int source,
+                             int generation, unsigned long long offset,
+                             void *data, size_t length, void *callback_data)
+{
+       struct dice *dice = callback_data;
+       u32 bits;
+       unsigned long flags;
+
+       if (tcode != TCODE_WRITE_QUADLET_REQUEST) {
+               fw_send_response(card, request, RCODE_TYPE_ERROR);
+               return;
+       }
+       if ((offset & 3) != 0) {
+               fw_send_response(card, request, RCODE_ADDRESS_ERROR);
+               return;
+       }
+
+       bits = be32_to_cpup(data);
+
+       spin_lock_irqsave(&dice->lock, flags);
+       dice->notification_bits |= bits;
+       spin_unlock_irqrestore(&dice->lock, flags);
+
+       fw_send_response(card, request, RCODE_COMPLETE);
+
+       if (bits & NOTIFY_CLOCK_ACCEPTED)
+               complete(&dice->clock_accepted);
+       wake_up(&dice->hwdep_wait);
+}
+
+static int dice_rate_constraint(struct snd_pcm_hw_params *params,
+                               struct snd_pcm_hw_rule *rule)
+{
+       struct dice *dice = rule->private;
+       const struct snd_interval *channels =
+               hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+       struct snd_interval *rate =
+               hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+       struct snd_interval allowed_rates = {
+               .min = UINT_MAX, .max = 0, .integer = 1
+       };
+       unsigned int i, mode;
+
+       for (i = 0; i < ARRAY_SIZE(dice_rates); ++i) {
+               mode = rate_index_to_mode(i);
+               if ((dice->clock_caps & (1 << i)) &&
+                   snd_interval_test(channels, dice->rx_channels[mode])) {
+                       allowed_rates.min = min(allowed_rates.min,
+                                               dice_rates[i]);
+                       allowed_rates.max = max(allowed_rates.max,
+                                               dice_rates[i]);
+               }
+       }
+
+       return snd_interval_refine(rate, &allowed_rates);
+}
+
+static int dice_channels_constraint(struct snd_pcm_hw_params *params,
+                                   struct snd_pcm_hw_rule *rule)
+{
+       struct dice *dice = rule->private;
+       const struct snd_interval *rate =
+               hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
+       struct snd_interval *channels =
+               hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
+       struct snd_interval allowed_channels = {
+               .min = UINT_MAX, .max = 0, .integer = 1
+       };
+       unsigned int i, mode;
+
+       for (i = 0; i < ARRAY_SIZE(dice_rates); ++i)
+               if ((dice->clock_caps & (1 << i)) &&
+                   snd_interval_test(rate, dice_rates[i])) {
+                       mode = rate_index_to_mode(i);
+                       allowed_channels.min = min(allowed_channels.min,
+                                                  dice->rx_channels[mode]);
+                       allowed_channels.max = max(allowed_channels.max,
+                                                  dice->rx_channels[mode]);
+               }
+
+       return snd_interval_refine(channels, &allowed_channels);
+}
+
+static int dice_open(struct snd_pcm_substream *substream)
+{
+       static const struct snd_pcm_hardware hardware = {
+               .info = SNDRV_PCM_INFO_MMAP |
+                       SNDRV_PCM_INFO_MMAP_VALID |
+                       SNDRV_PCM_INFO_BATCH |
+                       SNDRV_PCM_INFO_INTERLEAVED |
+                       SNDRV_PCM_INFO_BLOCK_TRANSFER,
+               .formats = AMDTP_OUT_PCM_FORMAT_BITS,
+               .channels_min = UINT_MAX,
+               .channels_max = 0,
+               .buffer_bytes_max = 16 * 1024 * 1024,
+               .period_bytes_min = 1,
+               .period_bytes_max = UINT_MAX,
+               .periods_min = 1,
+               .periods_max = UINT_MAX,
+       };
+       struct dice *dice = substream->private_data;
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       unsigned int i;
+       int err;
+
+       err = dice_try_lock(dice);
+       if (err < 0)
+               goto error;
+
+       runtime->hw = hardware;
+
+       for (i = 0; i < ARRAY_SIZE(dice_rates); ++i)
+               if (dice->clock_caps & (1 << i))
+                       runtime->hw.rates |=
+                               snd_pcm_rate_to_rate_bit(dice_rates[i]);
+       snd_pcm_limit_hw_rates(runtime);
+
+       for (i = 0; i < 3; ++i)
+               if (dice->rx_channels[i]) {
+                       runtime->hw.channels_min = min(runtime->hw.channels_min,
+                                                      dice->rx_channels[i]);
+                       runtime->hw.channels_max = max(runtime->hw.channels_max,
+                                                      dice->rx_channels[i]);
+               }
+
+       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+                                 dice_rate_constraint, dice,
+                                 SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+       if (err < 0)
+               goto err_lock;
+       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+                                 dice_channels_constraint, dice,
+                                 SNDRV_PCM_HW_PARAM_RATE, -1);
+       if (err < 0)
+               goto err_lock;
+
+       err = snd_pcm_hw_constraint_step(runtime, 0,
+                                        SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 32);
+       if (err < 0)
+               goto err_lock;
+       err = snd_pcm_hw_constraint_step(runtime, 0,
+                                        SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 32);
+       if (err < 0)
+               goto err_lock;
+
+       err = snd_pcm_hw_constraint_minmax(runtime,
+                                          SNDRV_PCM_HW_PARAM_PERIOD_TIME,
+                                          5000, UINT_MAX);
+       if (err < 0)
+               goto err_lock;
+
+       err = snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
+       if (err < 0)
+               goto err_lock;
+
+       return 0;
+
+err_lock:
+       dice_unlock(dice);
+error:
+       return err;
+}
+
+static int dice_close(struct snd_pcm_substream *substream)
+{
+       struct dice *dice = substream->private_data;
+
+       dice_unlock(dice);
+
+       return 0;
+}
+
+static int dice_stream_start_packets(struct dice *dice)
+{
+       int err;
+
+       if (amdtp_out_stream_running(&dice->stream))
+               return 0;
+
+       err = amdtp_out_stream_start(&dice->stream, dice->resources.channel,
+                                    fw_parent_device(dice->unit)->max_speed);
+       if (err < 0)
+               return err;
+
+       err = dice_enable_set(dice);
+       if (err < 0) {
+               amdtp_out_stream_stop(&dice->stream);
+               return err;
+       }
+
+       return 0;
+}
+
+static int dice_stream_start(struct dice *dice)
+{
+       __be32 channel;
+       int err;
+
+       if (!dice->resources.allocated) {
+               err = fw_iso_resources_allocate(&dice->resources,
+                               amdtp_out_stream_get_max_payload(&dice->stream),
+                               fw_parent_device(dice->unit)->max_speed);
+               if (err < 0)
+                       goto error;
+
+               channel = cpu_to_be32(dice->resources.channel);
+               err = snd_fw_transaction(dice->unit,
+                                        TCODE_WRITE_QUADLET_REQUEST,
+                                        rx_address(dice, RX_ISOCHRONOUS),
+                                        &channel, 4, 0);
+               if (err < 0)
+                       goto err_resources;
+       }
+
+       err = dice_stream_start_packets(dice);
+       if (err < 0)
+               goto err_rx_channel;
+
+       return 0;
+
+err_rx_channel:
+       channel = cpu_to_be32((u32)-1);
+       snd_fw_transaction(dice->unit, TCODE_WRITE_QUADLET_REQUEST,
+                          rx_address(dice, RX_ISOCHRONOUS), &channel, 4, 0);
+err_resources:
+       fw_iso_resources_free(&dice->resources);
+error:
+       return err;
+}
+
+static void dice_stream_stop_packets(struct dice *dice)
+{
+       if (amdtp_out_stream_running(&dice->stream)) {
+               dice_enable_clear(dice);
+               amdtp_out_stream_stop(&dice->stream);
+       }
+}
+
+static void dice_stream_stop(struct dice *dice)
+{
+       __be32 channel;
+
+       dice_stream_stop_packets(dice);
+
+       if (!dice->resources.allocated)
+               return;
+
+       channel = cpu_to_be32((u32)-1);
+       snd_fw_transaction(dice->unit, TCODE_WRITE_QUADLET_REQUEST,
+                          rx_address(dice, RX_ISOCHRONOUS), &channel, 4, 0);
+
+       fw_iso_resources_free(&dice->resources);
+}
+
+static int dice_change_rate(struct dice *dice, unsigned int clock_rate)
+{
+       __be32 value;
+       int err;
+
+       INIT_COMPLETION(dice->clock_accepted);
+
+       value = cpu_to_be32(clock_rate | CLOCK_SOURCE_ARX1);
+       err = snd_fw_transaction(dice->unit, TCODE_WRITE_QUADLET_REQUEST,
+                                global_address(dice, GLOBAL_CLOCK_SELECT),
+                                &value, 4, 0);
+       if (err < 0)
+               return err;
+
+       if (!wait_for_completion_timeout(&dice->clock_accepted,
+                                        msecs_to_jiffies(100)))
+               dev_warn(&dice->unit->device, "clock change timed out\n");
+
+       return 0;
+}
+
+static int dice_hw_params(struct snd_pcm_substream *substream,
+                         struct snd_pcm_hw_params *hw_params)
+{
+       struct dice *dice = substream->private_data;
+       unsigned int rate_index, mode;
+       int err;
+
+       mutex_lock(&dice->mutex);
+       dice_stream_stop(dice);
+       mutex_unlock(&dice->mutex);
+
+       err = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+                                              params_buffer_bytes(hw_params));
+       if (err < 0)
+               return err;
+
+       rate_index = rate_to_index(params_rate(hw_params));
+       err = dice_change_rate(dice, rate_index << CLOCK_RATE_SHIFT);
+       if (err < 0)
+               return err;
+
+       mode = rate_index_to_mode(rate_index);
+       amdtp_out_stream_set_parameters(&dice->stream,
+                                       params_rate(hw_params),
+                                       params_channels(hw_params),
+                                       dice->rx_midi_ports[mode]);
+       amdtp_out_stream_set_pcm_format(&dice->stream,
+                                       params_format(hw_params));
+
+       return 0;
+}
+
+static int dice_hw_free(struct snd_pcm_substream *substream)
+{
+       struct dice *dice = substream->private_data;
+
+       mutex_lock(&dice->mutex);
+       dice_stream_stop(dice);
+       mutex_unlock(&dice->mutex);
+
+       return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int dice_prepare(struct snd_pcm_substream *substream)
+{
+       struct dice *dice = substream->private_data;
+       int err;
+
+       mutex_lock(&dice->mutex);
+
+       if (amdtp_out_streaming_error(&dice->stream))
+               dice_stream_stop_packets(dice);
+
+       err = dice_stream_start(dice);
+       if (err < 0) {
+               mutex_unlock(&dice->mutex);
+               return err;
+       }
+
+       mutex_unlock(&dice->mutex);
+
+       amdtp_out_stream_pcm_prepare(&dice->stream);
+
+       return 0;
+}
+
+static int dice_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+       struct dice *dice = substream->private_data;
+       struct snd_pcm_substream *pcm;
+
+       switch (cmd) {
+       case SNDRV_PCM_TRIGGER_START:
+               pcm = substream;
+               break;
+       case SNDRV_PCM_TRIGGER_STOP:
+               pcm = NULL;
+               break;
+       default:
+               return -EINVAL;
+       }
+       amdtp_out_stream_pcm_trigger(&dice->stream, pcm);
+
+       return 0;
+}
+
+static snd_pcm_uframes_t dice_pointer(struct snd_pcm_substream *substream)
+{
+       struct dice *dice = substream->private_data;
+
+       return amdtp_out_stream_pcm_pointer(&dice->stream);
+}
+
+static int dice_create_pcm(struct dice *dice)
+{
+       static struct snd_pcm_ops ops = {
+               .open      = dice_open,
+               .close     = dice_close,
+               .ioctl     = snd_pcm_lib_ioctl,
+               .hw_params = dice_hw_params,
+               .hw_free   = dice_hw_free,
+               .prepare   = dice_prepare,
+               .trigger   = dice_trigger,
+               .pointer   = dice_pointer,
+               .page      = snd_pcm_lib_get_vmalloc_page,
+               .mmap      = snd_pcm_lib_mmap_vmalloc,
+       };
+       struct snd_pcm *pcm;
+       int err;
+
+       err = snd_pcm_new(dice->card, "DICE", 0, 1, 0, &pcm);
+       if (err < 0)
+               return err;
+       pcm->private_data = dice;
+       strcpy(pcm->name, dice->card->shortname);
+       pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->ops = &ops;
+
+       return 0;
+}
+
+static long dice_hwdep_read(struct snd_hwdep *hwdep, char __user *buf,
+                           long count, loff_t *offset)
+{
+       struct dice *dice = hwdep->private_data;
+       DEFINE_WAIT(wait);
+       union snd_firewire_event event;
+
+       spin_lock_irq(&dice->lock);
+
+       while (!dice->dev_lock_changed && dice->notification_bits == 0) {
+               prepare_to_wait(&dice->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
+               spin_unlock_irq(&dice->lock);
+               schedule();
+               finish_wait(&dice->hwdep_wait, &wait);
+               if (signal_pending(current))
+                       return -ERESTARTSYS;
+               spin_lock_irq(&dice->lock);
+       }
+
+       memset(&event, 0, sizeof(event));
+       if (dice->dev_lock_changed) {
+               event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
+               event.lock_status.status = dice->dev_lock_count > 0;
+               dice->dev_lock_changed = false;
+
+               count = min(count, (long)sizeof(event.lock_status));
+       } else {
+               event.dice_notification.type = SNDRV_FIREWIRE_EVENT_DICE_NOTIFICATION;
+               event.dice_notification.notification = dice->notification_bits;
+               dice->notification_bits = 0;
+
+               count = min(count, (long)sizeof(event.dice_notification));
+       }
+
+       spin_unlock_irq(&dice->lock);
+
+       if (copy_to_user(buf, &event, count))
+               return -EFAULT;
+
+       return count;
+}
+
+static unsigned int dice_hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+                                   poll_table *wait)
+{
+       struct dice *dice = hwdep->private_data;
+       unsigned int events;
+
+       poll_wait(file, &dice->hwdep_wait, wait);
+
+       spin_lock_irq(&dice->lock);
+       if (dice->dev_lock_changed || dice->notification_bits != 0)
+               events = POLLIN | POLLRDNORM;
+       else
+               events = 0;
+       spin_unlock_irq(&dice->lock);
+
+       return events;
+}
+
+static int dice_hwdep_get_info(struct dice *dice, void __user *arg)
+{
+       struct fw_device *dev = fw_parent_device(dice->unit);
+       struct snd_firewire_get_info info;
+
+       memset(&info, 0, sizeof(info));
+       info.type = SNDRV_FIREWIRE_TYPE_DICE;
+       info.card = dev->card->index;
+       *(__be32 *)&info.guid[0] = cpu_to_be32(dev->config_rom[3]);
+       *(__be32 *)&info.guid[4] = cpu_to_be32(dev->config_rom[4]);
+       strlcpy(info.device_name, dev_name(&dev->device),
+               sizeof(info.device_name));
+
+       if (copy_to_user(arg, &info, sizeof(info)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int dice_hwdep_lock(struct dice *dice)
+{
+       int err;
+
+       spin_lock_irq(&dice->lock);
+
+       if (dice->dev_lock_count == 0) {
+               dice->dev_lock_count = -1;
+               err = 0;
+       } else {
+               err = -EBUSY;
+       }
+
+       spin_unlock_irq(&dice->lock);
+
+       return err;
+}
+
+static int dice_hwdep_unlock(struct dice *dice)
+{
+       int err;
+
+       spin_lock_irq(&dice->lock);
+
+       if (dice->dev_lock_count == -1) {
+               dice->dev_lock_count = 0;
+               err = 0;
+       } else {
+               err = -EBADFD;
+       }
+
+       spin_unlock_irq(&dice->lock);
+
+       return err;
+}
+
+static int dice_hwdep_release(struct snd_hwdep *hwdep, struct file *file)
+{
+       struct dice *dice = hwdep->private_data;
+
+       spin_lock_irq(&dice->lock);
+       if (dice->dev_lock_count == -1)
+               dice->dev_lock_count = 0;
+       spin_unlock_irq(&dice->lock);
+
+       return 0;
+}
+
+static int dice_hwdep_ioctl(struct snd_hwdep *hwdep, struct file *file,
+                           unsigned int cmd, unsigned long arg)
+{
+       struct dice *dice = hwdep->private_data;
+
+       switch (cmd) {
+       case SNDRV_FIREWIRE_IOCTL_GET_INFO:
+               return dice_hwdep_get_info(dice, (void __user *)arg);
+       case SNDRV_FIREWIRE_IOCTL_LOCK:
+               return dice_hwdep_lock(dice);
+       case SNDRV_FIREWIRE_IOCTL_UNLOCK:
+               return dice_hwdep_unlock(dice);
+       default:
+               return -ENOIOCTLCMD;
+       }
+}
+
+#ifdef CONFIG_COMPAT
+static int dice_hwdep_compat_ioctl(struct snd_hwdep *hwdep, struct file *file,
+                                  unsigned int cmd, unsigned long arg)
+{
+       return dice_hwdep_ioctl(hwdep, file, cmd,
+                               (unsigned long)compat_ptr(arg));
+}
+#else
+#define dice_hwdep_compat_ioctl NULL
+#endif
+
+static int dice_create_hwdep(struct dice *dice)
+{
+       static const struct snd_hwdep_ops ops = {
+               .read         = dice_hwdep_read,
+               .release      = dice_hwdep_release,
+               .poll         = dice_hwdep_poll,
+               .ioctl        = dice_hwdep_ioctl,
+               .ioctl_compat = dice_hwdep_compat_ioctl,
+       };
+       struct snd_hwdep *hwdep;
+       int err;
+
+       err = snd_hwdep_new(dice->card, "DICE", 0, &hwdep);
+       if (err < 0)
+               return err;
+       strcpy(hwdep->name, "DICE");
+       hwdep->iface = SNDRV_HWDEP_IFACE_FW_DICE;
+       hwdep->ops = ops;
+       hwdep->private_data = dice;
+       hwdep->exclusive = true;
+
+       return 0;
+}
+
+static int dice_proc_read_mem(struct dice *dice, void *buffer,
+                             unsigned int offset_q, unsigned int quadlets)
+{
+       unsigned int i;
+       int err;
+
+       err = snd_fw_transaction(dice->unit, TCODE_READ_BLOCK_REQUEST,
+                                DICE_PRIVATE_SPACE + 4 * offset_q,
+                                buffer, 4 * quadlets, 0);
+       if (err < 0)
+               return err;
+
+       for (i = 0; i < quadlets; ++i)
+               be32_to_cpus(&((u32 *)buffer)[i]);
+
+       return 0;
+}
+
+static const char *str_from_array(const char *const strs[], unsigned int count,
+                                 unsigned int i)
+{
+       if (i < count)
+               return strs[i];
+       else
+               return "(unknown)";
+}
+
+static void dice_proc_fixup_string(char *s, unsigned int size)
+{
+       unsigned int i;
+
+       for (i = 0; i < size; i += 4)
+               cpu_to_le32s((u32 *)(s + i));
+
+       for (i = 0; i < size - 2; ++i) {
+               if (s[i] == '\0')
+                       return;
+               if (s[i] == '\\' && s[i + 1] == '\\') {
+                       s[i + 2] = '\0';
+                       return;
+               }
+       }
+       s[size - 1] = '\0';
+}
+
+static void dice_proc_read(struct snd_info_entry *entry,
+                          struct snd_info_buffer *buffer)
+{
+       static const char *const section_names[5] = {
+               "global", "tx", "rx", "ext_sync", "unused2"
+       };
+       static const char *const clock_sources[] = {
+               "aes1", "aes2", "aes3", "aes4", "aes", "adat", "tdif",
+               "wc", "arx1", "arx2", "arx3", "arx4", "internal"
+       };
+       static const char *const rates[] = {
+               "32000", "44100", "48000", "88200", "96000", "176400", "192000",
+               "any low", "any mid", "any high", "none"
+       };
+       struct dice *dice = entry->private_data;
+       u32 sections[ARRAY_SIZE(section_names) * 2];
+       struct {
+               u32 number;
+               u32 size;
+       } tx_rx_header;
+       union {
+               struct {
+                       u32 owner_hi, owner_lo;
+                       u32 notification;
+                       char nick_name[NICK_NAME_SIZE];
+                       u32 clock_select;
+                       u32 enable;
+                       u32 status;
+                       u32 extended_status;
+                       u32 sample_rate;
+                       u32 version;
+                       u32 clock_caps;
+                       char clock_source_names[CLOCK_SOURCE_NAMES_SIZE];
+               } global;
+               struct {
+                       u32 iso;
+                       u32 number_audio;
+                       u32 number_midi;
+                       u32 speed;
+                       char names[TX_NAMES_SIZE];
+                       u32 ac3_caps;
+                       u32 ac3_enable;
+               } tx;
+               struct {
+                       u32 iso;
+                       u32 seq_start;
+                       u32 number_audio;
+                       u32 number_midi;
+                       char names[RX_NAMES_SIZE];
+                       u32 ac3_caps;
+                       u32 ac3_enable;
+               } rx;
+               struct {
+                       u32 clock_source;
+                       u32 locked;
+                       u32 rate;
+                       u32 adat_user_data;
+               } ext_sync;
+       } buf;
+       unsigned int quadlets, stream, i;
+
+       if (dice_proc_read_mem(dice, sections, 0, ARRAY_SIZE(sections)) < 0)
+               return;
+       snd_iprintf(buffer, "sections:\n");
+       for (i = 0; i < ARRAY_SIZE(section_names); ++i)
+               snd_iprintf(buffer, "  %s: offset %u, size %u\n",
+                           section_names[i],
+                           sections[i * 2], sections[i * 2 + 1]);
+
+       quadlets = min_t(u32, sections[1], sizeof(buf.global) / 4);
+       if (dice_proc_read_mem(dice, &buf.global, sections[0], quadlets) < 0)
+               return;
+       snd_iprintf(buffer, "global:\n");
+       snd_iprintf(buffer, "  owner: %04x:%04x%08x\n",
+                   buf.global.owner_hi >> 16,
+                   buf.global.owner_hi & 0xffff, buf.global.owner_lo);
+       snd_iprintf(buffer, "  notification: %08x\n", buf.global.notification);
+       dice_proc_fixup_string(buf.global.nick_name, NICK_NAME_SIZE);
+       snd_iprintf(buffer, "  nick name: %s\n", buf.global.nick_name);
+       snd_iprintf(buffer, "  clock select: %s %s\n",
+                   str_from_array(clock_sources, ARRAY_SIZE(clock_sources),
+                                  buf.global.clock_select & CLOCK_SOURCE_MASK),
+                   str_from_array(rates, ARRAY_SIZE(rates),
+                                  (buf.global.clock_select & CLOCK_RATE_MASK)
+                                  >> CLOCK_RATE_SHIFT));
+       snd_iprintf(buffer, "  enable: %u\n", buf.global.enable);
+       snd_iprintf(buffer, "  status: %slocked %s\n",
+                   buf.global.status & STATUS_SOURCE_LOCKED ? "" : "un",
+                   str_from_array(rates, ARRAY_SIZE(rates),
+                                  (buf.global.status &
+                                   STATUS_NOMINAL_RATE_MASK)
+                                  >> CLOCK_RATE_SHIFT));
+       snd_iprintf(buffer, "  ext status: %08x\n", buf.global.extended_status);
+       snd_iprintf(buffer, "  sample rate: %u\n", buf.global.sample_rate);
+       snd_iprintf(buffer, "  version: %u.%u.%u.%u\n",
+                   (buf.global.version >> 24) & 0xff,
+                   (buf.global.version >> 16) & 0xff,
+                   (buf.global.version >>  8) & 0xff,
+                   (buf.global.version >>  0) & 0xff);
+       if (quadlets >= 90) {
+               snd_iprintf(buffer, "  clock caps:");
+               for (i = 0; i <= 6; ++i)
+                       if (buf.global.clock_caps & (1 << i))
+                               snd_iprintf(buffer, " %s", rates[i]);
+               for (i = 0; i <= 12; ++i)
+                       if (buf.global.clock_caps & (1 << (16 + i)))
+                               snd_iprintf(buffer, " %s", clock_sources[i]);
+               snd_iprintf(buffer, "\n");
+               dice_proc_fixup_string(buf.global.clock_source_names,
+                                      CLOCK_SOURCE_NAMES_SIZE);
+               snd_iprintf(buffer, "  clock source names: %s\n",
+                           buf.global.clock_source_names);
+       }
+
+       if (dice_proc_read_mem(dice, &tx_rx_header, sections[2], 2) < 0)
+               return;
+       quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.tx));
+       for (stream = 0; stream < tx_rx_header.number; ++stream) {
+               if (dice_proc_read_mem(dice, &buf.tx, sections[2] + 2 +
+                                      stream * tx_rx_header.size,
+                                      quadlets) < 0)
+                       break;
+               snd_iprintf(buffer, "tx %u:\n", stream);
+               snd_iprintf(buffer, "  iso channel: %d\n", (int)buf.tx.iso);
+               snd_iprintf(buffer, "  audio channels: %u\n",
+                           buf.tx.number_audio);
+               snd_iprintf(buffer, "  midi ports: %u\n", buf.tx.number_midi);
+               snd_iprintf(buffer, "  speed: S%u\n", 100u << buf.tx.speed);
+               if (quadlets >= 68) {
+                       dice_proc_fixup_string(buf.tx.names, TX_NAMES_SIZE);
+                       snd_iprintf(buffer, "  names: %s\n", buf.tx.names);
+               }
+               if (quadlets >= 70) {
+                       snd_iprintf(buffer, "  ac3 caps: %08x\n",
+                                   buf.tx.ac3_caps);
+                       snd_iprintf(buffer, "  ac3 enable: %08x\n",
+                                   buf.tx.ac3_enable);
+               }
+       }
+
+       if (dice_proc_read_mem(dice, &tx_rx_header, sections[4], 2) < 0)
+               return;
+       quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.rx));
+       for (stream = 0; stream < tx_rx_header.number; ++stream) {
+               if (dice_proc_read_mem(dice, &buf.rx, sections[4] + 2 +
+                                      stream * tx_rx_header.size,
+                                      quadlets) < 0)
+                       break;
+               snd_iprintf(buffer, "rx %u:\n", stream);
+               snd_iprintf(buffer, "  iso channel: %d\n", (int)buf.rx.iso);
+               snd_iprintf(buffer, "  sequence start: %u\n", buf.rx.seq_start);
+               snd_iprintf(buffer, "  audio channels: %u\n",
+                           buf.rx.number_audio);
+               snd_iprintf(buffer, "  midi ports: %u\n", buf.rx.number_midi);
+               if (quadlets >= 68) {
+                       dice_proc_fixup_string(buf.rx.names, RX_NAMES_SIZE);
+                       snd_iprintf(buffer, "  names: %s\n", buf.rx.names);
+               }
+               if (quadlets >= 70) {
+                       snd_iprintf(buffer, "  ac3 caps: %08x\n",
+                                   buf.rx.ac3_caps);
+                       snd_iprintf(buffer, "  ac3 enable: %08x\n",
+                                   buf.rx.ac3_enable);
+               }
+       }
+
+       quadlets = min_t(u32, sections[7], sizeof(buf.ext_sync) / 4);
+       if (quadlets >= 4) {
+               if (dice_proc_read_mem(dice, &buf.ext_sync,
+                                      sections[6], 4) < 0)
+                       return;
+               snd_iprintf(buffer, "ext status:\n");
+               snd_iprintf(buffer, "  clock source: %s\n",
+                           str_from_array(clock_sources,
+                                          ARRAY_SIZE(clock_sources),
+                                          buf.ext_sync.clock_source));
+               snd_iprintf(buffer, "  locked: %u\n", buf.ext_sync.locked);
+               snd_iprintf(buffer, "  rate: %s\n",
+                           str_from_array(rates, ARRAY_SIZE(rates),
+                                          buf.ext_sync.rate));
+               snd_iprintf(buffer, "  adat user data: ");
+               if (buf.ext_sync.adat_user_data & ADAT_USER_DATA_NO_DATA)
+                       snd_iprintf(buffer, "-\n");
+               else
+                       snd_iprintf(buffer, "%x\n",
+                                   buf.ext_sync.adat_user_data);
+       }
+}
+
+static void dice_create_proc(struct dice *dice)
+{
+       struct snd_info_entry *entry;
+
+       if (!snd_card_proc_new(dice->card, "dice", &entry))
+               snd_info_set_text_ops(entry, dice, dice_proc_read);
+}
+
+static void dice_card_free(struct snd_card *card)
+{
+       struct dice *dice = card->private_data;
+
+       amdtp_out_stream_destroy(&dice->stream);
+       fw_core_remove_address_handler(&dice->notification_handler);
+       mutex_destroy(&dice->mutex);
+}
+
+#define OUI_WEISS              0x001c6a
+
+#define DICE_CATEGORY_ID       0x04
+#define WEISS_CATEGORY_ID      0x00
+
+static int dice_interface_check(struct fw_unit *unit)
+{
+       static const int min_values[10] = {
+               10, 0x64 / 4,
+               10, 0x18 / 4,
+               10, 0x18 / 4,
+               0, 0,
+               0, 0,
+       };
+       struct fw_device *device = fw_parent_device(unit);
+       struct fw_csr_iterator it;
+       int key, value, vendor = -1, model = -1, err;
+       unsigned int category, i;
+       __be32 pointers[ARRAY_SIZE(min_values)];
+       __be32 tx_data[4];
+       __be32 version;
+
+       /*
+        * Check that GUID and unit directory are constructed according to DICE
+        * rules, i.e., that the specifier ID is the GUID's OUI, and that the
+        * GUID chip ID consists of the 8-bit category ID, the 10-bit product
+        * ID, and a 22-bit serial number.
+        */
+       fw_csr_iterator_init(&it, unit->directory);
+       while (fw_csr_iterator_next(&it, &key, &value)) {
+               switch (key) {
+               case CSR_SPECIFIER_ID:
+                       vendor = value;
+                       break;
+               case CSR_MODEL:
+                       model = value;
+                       break;
+               }
+       }
+       if (vendor == OUI_WEISS)
+               category = WEISS_CATEGORY_ID;
+       else
+               category = DICE_CATEGORY_ID;
+       if (device->config_rom[3] != ((vendor << 8) | category) ||
+           device->config_rom[4] >> 22 != model)
+               return -ENODEV;
+
+       /*
+        * Check that the sub address spaces exist and are located inside the
+        * private address space.  The minimum values are chosen so that all
+        * minimally required registers are included.
+        */
+       err = snd_fw_transaction(unit, TCODE_READ_BLOCK_REQUEST,
+                                DICE_PRIVATE_SPACE,
+                                pointers, sizeof(pointers), 0);
+       if (err < 0)
+               return -ENODEV;
+       for (i = 0; i < ARRAY_SIZE(pointers); ++i) {
+               value = be32_to_cpu(pointers[i]);
+               if (value < min_values[i] || value >= 0x40000)
+                       return -ENODEV;
+       }
+
+       /* We support playback only. Let capture devices be handled by FFADO. */
+       err = snd_fw_transaction(unit, TCODE_READ_BLOCK_REQUEST,
+                                DICE_PRIVATE_SPACE +
+                                be32_to_cpu(pointers[2]) * 4,
+                                tx_data, sizeof(tx_data), 0);
+       if (err < 0 || (tx_data[0] && tx_data[3]))
+               return -ENODEV;
+
+       /*
+        * Check that the implemented DICE driver specification major version
+        * number matches.
+        */
+       err = snd_fw_transaction(unit, TCODE_READ_QUADLET_REQUEST,
+                                DICE_PRIVATE_SPACE +
+                                be32_to_cpu(pointers[0]) * 4 + GLOBAL_VERSION,
+                                &version, 4, 0);
+       if (err < 0)
+               return -ENODEV;
+       if ((version & cpu_to_be32(0xff000000)) != cpu_to_be32(0x01000000)) {
+               dev_err(&unit->device,
+                       "unknown DICE version: 0x%08x\n", be32_to_cpu(version));
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int highest_supported_mode_rate(struct dice *dice, unsigned int mode)
+{
+       int i;
+
+       for (i = ARRAY_SIZE(dice_rates) - 1; i >= 0; --i)
+               if ((dice->clock_caps & (1 << i)) &&
+                   rate_index_to_mode(i) == mode)
+                       return i;
+
+       return -1;
+}
+
+static int dice_read_mode_params(struct dice *dice, unsigned int mode)
+{
+       __be32 values[2];
+       int rate_index, err;
+
+       rate_index = highest_supported_mode_rate(dice, mode);
+       if (rate_index < 0) {
+               dice->rx_channels[mode] = 0;
+               dice->rx_midi_ports[mode] = 0;
+               return 0;
+       }
+
+       err = dice_change_rate(dice, rate_index << CLOCK_RATE_SHIFT);
+       if (err < 0)
+               return err;
+
+       err = snd_fw_transaction(dice->unit, TCODE_READ_BLOCK_REQUEST,
+                                rx_address(dice, RX_NUMBER_AUDIO),
+                                values, 2 * 4, 0);
+       if (err < 0)
+               return err;
+
+       dice->rx_channels[mode]   = be32_to_cpu(values[0]);
+       dice->rx_midi_ports[mode] = be32_to_cpu(values[1]);
+
+       return 0;
+}
+
+static int dice_read_params(struct dice *dice)
+{
+       __be32 pointers[6];
+       __be32 value;
+       int mode, err;
+
+       err = snd_fw_transaction(dice->unit, TCODE_READ_BLOCK_REQUEST,
+                                DICE_PRIVATE_SPACE,
+                                pointers, sizeof(pointers), 0);
+       if (err < 0)
+               return err;
+
+       dice->global_offset = be32_to_cpu(pointers[0]) * 4;
+       dice->rx_offset = be32_to_cpu(pointers[4]) * 4;
+
+       /* some very old firmwares don't tell about their clock support */
+       if (be32_to_cpu(pointers[1]) * 4 >= GLOBAL_CLOCK_CAPABILITIES + 4) {
+               err = snd_fw_transaction(
+                               dice->unit, TCODE_READ_QUADLET_REQUEST,
+                               global_address(dice, GLOBAL_CLOCK_CAPABILITIES),
+                               &value, 4, 0);
+               if (err < 0)
+                       return err;
+               dice->clock_caps = be32_to_cpu(value);
+       } else {
+               /* this should be supported by any device */
+               dice->clock_caps = CLOCK_CAP_RATE_44100 |
+                                  CLOCK_CAP_RATE_48000 |
+                                  CLOCK_CAP_SOURCE_ARX1 |
+                                  CLOCK_CAP_SOURCE_INTERNAL;
+       }
+
+       for (mode = 2; mode >= 0; --mode) {
+               err = dice_read_mode_params(dice, mode);
+               if (err < 0)
+                       return err;
+       }
+
+       return 0;
+}
+
+static void dice_card_strings(struct dice *dice)
+{
+       struct snd_card *card = dice->card;
+       struct fw_device *dev = fw_parent_device(dice->unit);
+       char vendor[32], model[32];
+       unsigned int i;
+       int err;
+
+       strcpy(card->driver, "DICE");
+
+       strcpy(card->shortname, "DICE");
+       BUILD_BUG_ON(NICK_NAME_SIZE < sizeof(card->shortname));
+       err = snd_fw_transaction(dice->unit, TCODE_READ_BLOCK_REQUEST,
+                                global_address(dice, GLOBAL_NICK_NAME),
+                                card->shortname, sizeof(card->shortname), 0);
+       if (err >= 0) {
+               /* DICE strings are returned in "always-wrong" endianness */
+               BUILD_BUG_ON(sizeof(card->shortname) % 4 != 0);
+               for (i = 0; i < sizeof(card->shortname); i += 4)
+                       swab32s((u32 *)&card->shortname[i]);
+               card->shortname[sizeof(card->shortname) - 1] = '\0';
+       }
+
+       strcpy(vendor, "?");
+       fw_csr_string(dev->config_rom + 5, CSR_VENDOR, vendor, sizeof(vendor));
+       strcpy(model, "?");
+       fw_csr_string(dice->unit->directory, CSR_MODEL, model, sizeof(model));
+       snprintf(card->longname, sizeof(card->longname),
+                "%s %s (serial %u) at %s, S%d",
+                vendor, model, dev->config_rom[4] & 0x3fffff,
+                dev_name(&dice->unit->device), 100 << dev->max_speed);
+
+       strcpy(card->mixername, "DICE");
+}
+
+static int dice_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
+{
+       struct snd_card *card;
+       struct dice *dice;
+       __be32 clock_sel;
+       int err;
+
+       err = dice_interface_check(unit);
+       if (err < 0)
+               return err;
+
+       err = snd_card_create(-1, NULL, THIS_MODULE, sizeof(*dice), &card);
+       if (err < 0)
+               return err;
+       snd_card_set_dev(card, &unit->device);
+
+       dice = card->private_data;
+       dice->card = card;
+       spin_lock_init(&dice->lock);
+       mutex_init(&dice->mutex);
+       dice->unit = unit;
+       init_completion(&dice->clock_accepted);
+       init_waitqueue_head(&dice->hwdep_wait);
+
+       dice->notification_handler.length = 4;
+       dice->notification_handler.address_callback = dice_notification;
+       dice->notification_handler.callback_data = dice;
+       err = fw_core_add_address_handler(&dice->notification_handler,
+                                         &fw_high_memory_region);
+       if (err < 0)
+               goto err_mutex;
+
+       err = dice_owner_set(dice);
+       if (err < 0)
+               goto err_notification_handler;
+
+       err = dice_read_params(dice);
+       if (err < 0)
+               goto err_owner;
+
+       err = fw_iso_resources_init(&dice->resources, unit);
+       if (err < 0)
+               goto err_owner;
+       dice->resources.channels_mask = 0x00000000ffffffffuLL;
+
+       err = amdtp_out_stream_init(&dice->stream, unit,
+                                   CIP_BLOCKING | CIP_HI_DUALWIRE);
+       if (err < 0)
+               goto err_resources;
+
+       card->private_free = dice_card_free;
+
+       dice_card_strings(dice);
+
+       err = snd_fw_transaction(unit, TCODE_READ_QUADLET_REQUEST,
+                                global_address(dice, GLOBAL_CLOCK_SELECT),
+                                &clock_sel, 4, 0);
+       if (err < 0)
+               goto error;
+       clock_sel &= cpu_to_be32(~CLOCK_SOURCE_MASK);
+       clock_sel |= cpu_to_be32(CLOCK_SOURCE_ARX1);
+       err = snd_fw_transaction(unit, TCODE_WRITE_QUADLET_REQUEST,
+                                global_address(dice, GLOBAL_CLOCK_SELECT),
+                                &clock_sel, 4, 0);
+       if (err < 0)
+               goto error;
+
+       err = dice_create_pcm(dice);
+       if (err < 0)
+               goto error;
+
+       err = dice_create_hwdep(dice);
+       if (err < 0)
+               goto error;
+
+       dice_create_proc(dice);
+
+       err = snd_card_register(card);
+       if (err < 0)
+               goto error;
+
+       dev_set_drvdata(&unit->device, dice);
+
+       return 0;
+
+err_resources:
+       fw_iso_resources_destroy(&dice->resources);
+err_owner:
+       dice_owner_clear(dice);
+err_notification_handler:
+       fw_core_remove_address_handler(&dice->notification_handler);
+err_mutex:
+       mutex_destroy(&dice->mutex);
+error:
+       snd_card_free(card);
+       return err;
+}
+
+static void dice_remove(struct fw_unit *unit)
+{
+       struct dice *dice = dev_get_drvdata(&unit->device);
+
+       amdtp_out_stream_pcm_abort(&dice->stream);
+
+       snd_card_disconnect(dice->card);
+
+       mutex_lock(&dice->mutex);
+
+       dice_stream_stop(dice);
+       dice_owner_clear(dice);
+
+       mutex_unlock(&dice->mutex);
+
+       snd_card_free_when_closed(dice->card);
+}
+
+static void dice_bus_reset(struct fw_unit *unit)
+{
+       struct dice *dice = dev_get_drvdata(&unit->device);
+
+       /*
+        * On a bus reset, the DICE firmware disables streaming and then goes
+        * off contemplating its own navel for hundreds of milliseconds before
+        * it can react to any of our attempts to reenable streaming.  This
+        * means that we lose synchronization anyway, so we force our streams
+        * to stop so that the application can restart them in an orderly
+        * manner.
+        */
+       amdtp_out_stream_pcm_abort(&dice->stream);
+
+       mutex_lock(&dice->mutex);
+
+       dice->global_enabled = false;
+       dice_stream_stop_packets(dice);
+
+       dice_owner_update(dice);
+
+       fw_iso_resources_update(&dice->resources);
+
+       mutex_unlock(&dice->mutex);
+}
+
+#define DICE_INTERFACE 0x000001
+
+static const struct ieee1394_device_id dice_id_table[] = {
+       {
+               .match_flags = IEEE1394_MATCH_VERSION,
+               .version     = DICE_INTERFACE,
+       },
+       { }
+};
+MODULE_DEVICE_TABLE(ieee1394, dice_id_table);
+
+static struct fw_driver dice_driver = {
+       .driver   = {
+               .owner  = THIS_MODULE,
+               .name   = KBUILD_MODNAME,
+               .bus    = &fw_bus_type,
+       },
+       .probe    = dice_probe,
+       .update   = dice_bus_reset,
+       .remove   = dice_remove,
+       .id_table = dice_id_table,
+};
+
+static int __init alsa_dice_init(void)
+{
+       return driver_register(&dice_driver.driver);
+}
+
+static void __exit alsa_dice_exit(void)
+{
+       driver_unregister(&dice_driver.driver);
+}
+
+module_init(alsa_dice_init);
+module_exit(alsa_dice_exit);
index ec578b5ad8da1f45738b76f714574243db76a1db..860c08073c5924ef1fad28d2a3bfed747f947879 100644 (file)
@@ -90,7 +90,7 @@ int fcp_avc_transaction(struct fw_unit *unit,
                                          : TCODE_WRITE_BLOCK_REQUEST;
                ret = snd_fw_transaction(t.unit, tcode,
                                         CSR_REGISTER_BASE + CSR_FCP_COMMAND,
-                                        (void *)command, command_size);
+                                        (void *)command, command_size, 0);
                if (ret < 0)
                        break;
 
index 58a5afefdc69f39ef6720626ad823b5ae55060c9..fd42e6b315e69d90e39deda16d553fc0baf97395 100644 (file)
@@ -217,7 +217,7 @@ static void isight_packet(struct fw_iso_context *context, u32 cycle,
 
 static int isight_connect(struct isight *isight)
 {
-       int ch, err, rcode, errors = 0;
+       int ch, err;
        __be32 value;
 
 retry_after_bus_reset:
@@ -230,27 +230,19 @@ retry_after_bus_reset:
        }
 
        value = cpu_to_be32(ch | (isight->device->max_speed << SPEED_SHIFT));
-       for (;;) {
-               rcode = fw_run_transaction(
-                               isight->device->card,
-                               TCODE_WRITE_QUADLET_REQUEST,
-                               isight->device->node_id,
-                               isight->resources.generation,
-                               isight->device->max_speed,
-                               isight->audio_base + REG_ISO_TX_CONFIG,
-                               &value, 4);
-               if (rcode == RCODE_COMPLETE) {
-                       return 0;
-               } else if (rcode == RCODE_GENERATION) {
-                       fw_iso_resources_free(&isight->resources);
-                       goto retry_after_bus_reset;
-               } else if (rcode_is_permanent_error(rcode) || ++errors >= 3) {
-                       err = -EIO;
-                       goto err_resources;
-               }
-               msleep(5);
+       err = snd_fw_transaction(isight->unit, TCODE_WRITE_QUADLET_REQUEST,
+                                isight->audio_base + REG_ISO_TX_CONFIG,
+                                &value, 4, FW_FIXED_GENERATION |
+                                isight->resources.generation);
+       if (err == -EAGAIN) {
+               fw_iso_resources_free(&isight->resources);
+               goto retry_after_bus_reset;
+       } else if (err < 0) {
+               goto err_resources;
        }
 
+       return 0;
+
 err_resources:
        fw_iso_resources_free(&isight->resources);
 error:
@@ -315,17 +307,19 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
 static int reg_read(struct isight *isight, int offset, __be32 *value)
 {
        return snd_fw_transaction(isight->unit, TCODE_READ_QUADLET_REQUEST,
-                                 isight->audio_base + offset, value, 4);
+                                 isight->audio_base + offset, value, 4, 0);
 }
 
 static int reg_write(struct isight *isight, int offset, __be32 value)
 {
        return snd_fw_transaction(isight->unit, TCODE_WRITE_QUADLET_REQUEST,
-                                 isight->audio_base + offset, &value, 4);
+                                 isight->audio_base + offset, &value, 4, 0);
 }
 
 static void isight_stop_streaming(struct isight *isight)
 {
+       __be32 value;
+
        if (!isight->context)
                return;
 
@@ -333,7 +327,10 @@ static void isight_stop_streaming(struct isight *isight)
        fw_iso_context_destroy(isight->context);
        isight->context = NULL;
        fw_iso_resources_free(&isight->resources);
-       reg_write(isight, REG_AUDIO_ENABLE, 0);
+       value = 0;
+       snd_fw_transaction(isight->unit, TCODE_WRITE_QUADLET_REQUEST,
+                          isight->audio_base + REG_AUDIO_ENABLE,
+                          &value, 4, FW_QUIET);
 }
 
 static int isight_hw_free(struct snd_pcm_substream *substream)
index 14eb414983720fcc629bb55ae962c43be97e8205..7409edba9f06761b11d8582bd60fb43bcc8a0ff6 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/module.h>
 #include "lib.h"
 
-#define ERROR_RETRY_DELAY_MS   5
+#define ERROR_RETRY_DELAY_MS   20
 
 /**
  * snd_fw_transaction - send a request and wait for its completion
@@ -20,6 +20,9 @@
  * @offset: the address in the target's address space
  * @buffer: input/output data
  * @length: length of @buffer
+ * @flags: use %FW_FIXED_GENERATION and add the generation value to attempt the
+ *         request only in that generation; use %FW_QUIET to suppress error
+ *         messages
  *
  * Submits an asynchronous request to the target device, and waits for the
  * response.  The node ID and the current generation are derived from @unit.
  * Returns zero on success, or a negative error code.
  */
 int snd_fw_transaction(struct fw_unit *unit, int tcode,
-                      u64 offset, void *buffer, size_t length)
+                      u64 offset, void *buffer, size_t length,
+                      unsigned int flags)
 {
        struct fw_device *device = fw_parent_device(unit);
        int generation, rcode, tries = 0;
 
+       generation = flags & FW_GENERATION_MASK;
        for (;;) {
-               generation = device->generation;
-               smp_rmb(); /* node_id vs. generation */
+               if (!(flags & FW_FIXED_GENERATION)) {
+                       generation = device->generation;
+                       smp_rmb(); /* node_id vs. generation */
+               }
                rcode = fw_run_transaction(device->card, tcode,
                                           device->node_id, generation,
                                           device->max_speed, offset,
@@ -43,9 +50,14 @@ int snd_fw_transaction(struct fw_unit *unit, int tcode,
                if (rcode == RCODE_COMPLETE)
                        return 0;
 
+               if (rcode == RCODE_GENERATION && (flags & FW_FIXED_GENERATION))
+                       return -EAGAIN;
+
                if (rcode_is_permanent_error(rcode) || ++tries >= 3) {
-                       dev_err(&unit->device, "transaction failed: %s\n",
-                               fw_rcode_string(rcode));
+                       if (!(flags & FW_QUIET))
+                               dev_err(&unit->device,
+                                       "transaction failed: %s\n",
+                                       fw_rcode_string(rcode));
                        return -EIO;
                }
 
index aef301476ea943bf9bbff8a9bfb8a28b6661a906..02cfabc9c3c4ba53d18e7b827600491fd9c2df84 100644 (file)
@@ -6,8 +6,13 @@
 
 struct fw_unit;
 
+#define FW_GENERATION_MASK     0x00ff
+#define FW_FIXED_GENERATION    0x0100
+#define FW_QUIET               0x0200
+
 int snd_fw_transaction(struct fw_unit *unit, int tcode,
-                      u64 offset, void *buffer, size_t length);
+                      u64 offset, void *buffer, size_t length,
+                      unsigned int flags);
 
 /* returns true if retrying the transaction would not make sense */
 static inline bool rcode_is_permanent_error(int rcode)
index 505fc812319958e12e804a2917c6d6cd3b4df88d..858023cf4298c0cbf6022e16824ab26f085f2b59 100644 (file)
@@ -369,7 +369,7 @@ static int scs_init_hss_address(struct scs *scs)
        data = cpu_to_be64(((u64)HSS1394_TAG_CHANGE_ADDRESS << 56) |
                           scs->hss_handler.offset);
        err = snd_fw_transaction(scs->unit, TCODE_WRITE_BLOCK_REQUEST,
-                                HSS1394_ADDRESS, &data, 8);
+                                HSS1394_ADDRESS, &data, 8, 0);
        if (err < 0)
                dev_err(&scs->unit->device, "HSS1394 communication failed\n");
 
@@ -455,12 +455,16 @@ err_card:
 static void scs_update(struct fw_unit *unit)
 {
        struct scs *scs = dev_get_drvdata(&unit->device);
+       int generation;
        __be64 data;
 
        data = cpu_to_be64(((u64)HSS1394_TAG_CHANGE_ADDRESS << 56) |
                           scs->hss_handler.offset);
+       generation = fw_parent_device(unit)->generation;
+       smp_rmb(); /* node_id vs. generation */
        snd_fw_transaction(scs->unit, TCODE_WRITE_BLOCK_REQUEST,
-                          HSS1394_ADDRESS, &data, 8);
+                          HSS1394_ADDRESS, &data, 8,
+                          FW_FIXED_GENERATION | generation);
 }
 
 static void scs_remove(struct fw_unit *unit)
index fe9e6e2f2c5b2a825ae24a432541d45de8f029fb..cc8bc3a51bc147ab7a13c0a38ec84134b55f9da3 100644 (file)
@@ -52,7 +52,6 @@ struct fwspk {
        struct mutex mutex;
        struct cmp_connection connection;
        struct amdtp_out_stream stream;
-       bool stream_running;
        bool mute;
        s16 volume[6];
        s16 volume_min;
@@ -188,10 +187,9 @@ static int fwspk_close(struct snd_pcm_substream *substream)
 
 static void fwspk_stop_stream(struct fwspk *fwspk)
 {
-       if (fwspk->stream_running) {
+       if (amdtp_out_stream_running(&fwspk->stream)) {
                amdtp_out_stream_stop(&fwspk->stream);
                cmp_connection_break(&fwspk->connection);
-               fwspk->stream_running = false;
        }
 }
 
@@ -246,8 +244,10 @@ static int fwspk_hw_params(struct snd_pcm_substream *substream,
        if (err < 0)
                goto error;
 
-       amdtp_out_stream_set_rate(&fwspk->stream, params_rate(hw_params));
-       amdtp_out_stream_set_pcm(&fwspk->stream, params_channels(hw_params));
+       amdtp_out_stream_set_parameters(&fwspk->stream,
+                                       params_rate(hw_params),
+                                       params_channels(hw_params),
+                                       0);
 
        amdtp_out_stream_set_pcm_format(&fwspk->stream,
                                        params_format(hw_params));
@@ -285,7 +285,7 @@ static int fwspk_prepare(struct snd_pcm_substream *substream)
        if (amdtp_out_streaming_error(&fwspk->stream))
                fwspk_stop_stream(fwspk);
 
-       if (!fwspk->stream_running) {
+       if (!amdtp_out_stream_running(&fwspk->stream)) {
                err = cmp_connection_establish(&fwspk->connection,
                        amdtp_out_stream_get_max_payload(&fwspk->stream));
                if (err < 0)
@@ -296,8 +296,6 @@ static int fwspk_prepare(struct snd_pcm_substream *substream)
                                        fwspk->connection.speed);
                if (err < 0)
                        goto err_connection;
-
-               fwspk->stream_running = true;
        }
 
        mutex_unlock(&fwspk->mutex);
@@ -647,7 +645,7 @@ static u32 fwspk_read_firmware_version(struct fw_unit *unit)
        int err;
 
        err = snd_fw_transaction(unit, TCODE_READ_QUADLET_REQUEST,
-                                OXFORD_FIRMWARE_ID_ADDRESS, &data, 4);
+                                OXFORD_FIRMWARE_ID_ADDRESS, &data, 4, 0);
        return err >= 0 ? be32_to_cpu(data) : 0;
 }
 
index c0be085e4a200e4878097138b9294fe0d4a0855c..0e7254bde4c271cb9591724c2c8da0210c0d4426 100644 (file)
@@ -1544,7 +1544,7 @@ static int ess_has_rec_mixer (int submodel)
                return 1;
        default:
                return 0;
-       };
+       }
 };
 
 #ifdef FKS_LOGGING
index dc632cdc38706ec74c18978739e0819257e1d3a8..5f2acd35dcb9f4ccf26d6263e8fcba1f6a984683 100644 (file)
@@ -1913,6 +1913,7 @@ static int snd_asihpi_tuner_band_put(struct snd_kcontrol *kcontrol,
        struct snd_card_asihpi *asihpi = snd_kcontrol_chip(kcontrol);
        */
        u32 h_control = kcontrol->private_value;
+       unsigned int idx;
        u16 band;
        u16 tuner_bands[HPI_TUNER_BAND_LAST];
        u32 num_bands = 0;
@@ -1920,7 +1921,10 @@ static int snd_asihpi_tuner_band_put(struct snd_kcontrol *kcontrol,
        num_bands = asihpi_tuner_band_query(kcontrol, tuner_bands,
                        HPI_TUNER_BAND_LAST);
 
-       band = tuner_bands[ucontrol->value.enumerated.item[0]];
+       idx = ucontrol->value.enumerated.item[0];
+       if (idx >= ARRAY_SIZE(tuner_bands))
+               idx = ARRAY_SIZE(tuner_bands) - 1;
+       band = tuner_bands[idx];
        hpi_handle_error(hpi_tuner_set_band(h_control, band));
 
        return 1;
@@ -2383,7 +2387,8 @@ static int snd_asihpi_clksrc_put(struct snd_kcontrol *kcontrol,
        struct snd_card_asihpi *asihpi =
                        (struct snd_card_asihpi *)(kcontrol->private_data);
        struct clk_cache *clkcache = &asihpi->cc;
-       int change, item;
+       unsigned int item;
+       int change;
        u32 h_control = kcontrol->private_value;
 
        change = 1;
index b46dc9b24dbd70d7c564c9a8853d8855f33a9ce7..9fb03b4ea925cb173e3e898950c9b4736c0ff787 100644 (file)
@@ -671,7 +671,7 @@ static int snd_vortex_new_pcm(vortex_t *chip, int idx, int nr)
                        return err;
                break;
 #endif
-       };
+       }
 
        if (VORTEX_PCM_TYPE(pcm) == VORTEX_PCM_SPDIF) {
                for (i = 0; i < ARRAY_SIZE(snd_vortex_mixer_spdif); i++) {
index 8bef47311e45025aeeb6552054c50dd520a8ad9d..922a84bba2ef4738006e02dcc1c3920525ce8ec6 100644 (file)
@@ -219,7 +219,6 @@ vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt,
                */
                hwwrite(vortex->mmio, WT_RUN(wt), val);
                return 0xc;
-               break;
        case 1:         /* param 0 */
                /*
                printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
@@ -227,7 +226,6 @@ vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt,
                */
                hwwrite(vortex->mmio, WT_PARM(wt, 0), val);
                return 0xc;
-               break;
        case 2:         /* param 1 */
                /*
                printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
@@ -235,7 +233,6 @@ vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt,
                */
                hwwrite(vortex->mmio, WT_PARM(wt, 1), val);
                return 0xc;
-               break;
        case 3:         /* param 2 */
                /*
                printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
@@ -243,7 +240,6 @@ vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt,
                */
                hwwrite(vortex->mmio, WT_PARM(wt, 2), val);
                return 0xc;
-               break;
        case 4:         /* param 3 */
                /*
                printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
@@ -251,7 +247,6 @@ vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt,
                */
                hwwrite(vortex->mmio, WT_PARM(wt, 3), val);
                return 0xc;
-               break;
        case 6:         /* mute */
                /*
                printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
@@ -259,20 +254,17 @@ vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt,
                */
                hwwrite(vortex->mmio, WT_MUTE(wt), val);
                return 0xc;
-               break;
        case 0xb:
-               {               /* delay */
-                       /*
-                       printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
-                              WT_DELAY(wt,0), (int)val);
-                       */
-                       hwwrite(vortex->mmio, WT_DELAY(wt, 3), val);
-                       hwwrite(vortex->mmio, WT_DELAY(wt, 2), val);
-                       hwwrite(vortex->mmio, WT_DELAY(wt, 1), val);
-                       hwwrite(vortex->mmio, WT_DELAY(wt, 0), val);
-                       return 0xc;
-               }
-               break;
+                       /* delay */
+               /*
+               printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n",
+                      WT_DELAY(wt,0), (int)val);
+               */
+               hwwrite(vortex->mmio, WT_DELAY(wt, 3), val);
+               hwwrite(vortex->mmio, WT_DELAY(wt, 2), val);
+               hwwrite(vortex->mmio, WT_DELAY(wt, 1), val);
+               hwwrite(vortex->mmio, WT_DELAY(wt, 0), val);
+               return 0xc;
                /* Global WT block parameters */
        case 5:         /* sramp */
                ecx = WT_SRAMP(wt);
@@ -291,7 +283,6 @@ vortex_wt_SetReg(vortex_t * vortex, unsigned char reg, int wt,
                break;
        default:
                return 0;
-               break;
        }
        /*
        printk(KERN_DEBUG "vortex: WT SetReg(0x%x) = 0x%08x\n", ecx, (int)val);
index c8e1216115936015a73e67df84e2c83ecc0d1975..1aef7128f7caa97e0daacaeb312ee60d1ec9225f 100644 (file)
@@ -715,14 +715,14 @@ snd_azf3328_mixer_ac97_read(struct snd_ac97 *ac97, unsigned short reg_ac97)
        const struct snd_azf3328 *chip = ac97->private_data;
        unsigned short reg_azf = snd_azf3328_mixer_ac97_map_reg_idx(reg_ac97);
        unsigned short reg_val = 0;
-       bool unsupported = 0;
+       bool unsupported = false;
 
        snd_azf3328_dbgmixer(
                "snd_azf3328_mixer_ac97_read reg_ac97 %u\n",
                        reg_ac97
        );
        if (reg_azf & AZF_AC97_REG_UNSUPPORTED)
-               unsupported = 1;
+               unsupported = true;
        else {
                if (reg_azf & AZF_AC97_REG_REAL_IO_READ)
                        reg_val = snd_azf3328_mixer_inw(chip,
@@ -759,7 +759,7 @@ snd_azf3328_mixer_ac97_read(struct snd_ac97 *ac97, unsigned short reg_ac97)
                                reg_val = azf_emulated_ac97_vendor_id & 0xffff;
                                break;
                        default:
-                               unsupported = 1;
+                               unsupported = true;
                                break;
                        }
                }
@@ -776,14 +776,14 @@ snd_azf3328_mixer_ac97_write(struct snd_ac97 *ac97,
 {
        const struct snd_azf3328 *chip = ac97->private_data;
        unsigned short reg_azf = snd_azf3328_mixer_ac97_map_reg_idx(reg_ac97);
-       bool unsupported = 0;
+       bool unsupported = false;
 
        snd_azf3328_dbgmixer(
                "snd_azf3328_mixer_ac97_write reg_ac97 %u val %u\n",
                        reg_ac97, val
        );
        if (reg_azf & AZF_AC97_REG_UNSUPPORTED)
-               unsupported = 1;
+               unsupported = true;
        else {
                if (reg_azf & AZF_AC97_REG_REAL_IO_WRITE)
                        snd_azf3328_mixer_outw(
@@ -808,7 +808,7 @@ snd_azf3328_mixer_ac97_write(struct snd_ac97 *ac97,
                                 */
                                break;
                        default:
-                               unsupported = 1;
+                               unsupported = true;
                                break;
                        }
                }
@@ -1559,7 +1559,7 @@ snd_azf3328_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
        struct snd_azf3328_codec_data *codec = runtime->private_data;
        int result = 0;
        u16 flags1;
-       bool previously_muted = 0;
+       bool previously_muted = false;
        bool is_main_mixer_playback_codec = (AZF_CODEC_PLAYBACK == codec->type);
 
        snd_azf3328_dbgcalls("snd_azf3328_pcm_trigger cmd %d\n", cmd);
index 0c00eb4088efca6639bf35c554aefa73f9e926e5..84f86bf63b8fc31dc27700013608a65a7af970bb 100644 (file)
@@ -33,7 +33,7 @@ struct daio_rsc_idx {
        unsigned short right;
 };
 
-struct daio_rsc_idx idx_20k1[NUM_DAIOTYP] = {
+static struct daio_rsc_idx idx_20k1[NUM_DAIOTYP] = {
        [LINEO1] = {.left = 0x00, .right = 0x01},
        [LINEO2] = {.left = 0x18, .right = 0x19},
        [LINEO3] = {.left = 0x08, .right = 0x09},
@@ -44,7 +44,7 @@ struct daio_rsc_idx idx_20k1[NUM_DAIOTYP] = {
        [SPDIFI1] = {.left = 0x95, .right = 0x9d},
 };
 
-struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
+static struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
        [LINEO1] = {.left = 0x40, .right = 0x41},
        [LINEO2] = {.left = 0x60, .right = 0x61},
        [LINEO3] = {.left = 0x50, .right = 0x51},
index 0275209ca82e33bb4fd0c69569c31e08993a87d2..1f9c7c4bbcd8b0f3db5fb8552a24fba5dd9467f8 100644 (file)
@@ -1182,15 +1182,20 @@ static int _snd_emu10k1_audigy_init_efx(struct snd_emu10k1 *emu)
        u32 *gpr_map;
        mm_segment_t seg;
 
-       if ((icode = kzalloc(sizeof(*icode), GFP_KERNEL)) == NULL ||
-           (icode->gpr_map = (u_int32_t __user *)
-            kcalloc(512 + 256 + 256 + 2 * 1024, sizeof(u_int32_t),
-                    GFP_KERNEL)) == NULL ||
-           (controls = kcalloc(SND_EMU10K1_GPR_CONTROLS,
-                               sizeof(*controls), GFP_KERNEL)) == NULL) {
-               err = -ENOMEM;
-               goto __err;
-       }
+       err = -ENOMEM;
+       icode = kzalloc(sizeof(*icode), GFP_KERNEL);
+       if (!icode)
+               return err;
+
+       icode->gpr_map = (u_int32_t __user *) kcalloc(512 + 256 + 256 + 2 * 1024,
+                                                     sizeof(u_int32_t), GFP_KERNEL);
+       if (!icode->gpr_map)
+               goto __err_gpr;
+       controls = kcalloc(SND_EMU10K1_GPR_CONTROLS,
+                          sizeof(*controls), GFP_KERNEL);
+       if (!controls)
+               goto __err_ctrls;
+
        gpr_map = (u32 __force *)icode->gpr_map;
 
        icode->tram_data_map = icode->gpr_map + 512;
@@ -1741,12 +1746,12 @@ A_OP(icode, &ptr, iMAC0, A_GPR(var), A_GPR(var), A_GPR(vol), A_EXTIN(input))
        emu->support_tlv = 0; /* clear again */
        snd_leave_user(seg);
 
- __err:
+__err:
        kfree(controls);
-       if (icode != NULL) {
-               kfree((void __force *)icode->gpr_map);
-               kfree(icode);
-       }
+__err_ctrls:
+       kfree((void __force *)icode->gpr_map);
+__err_gpr:
+       kfree(icode);
        return err;
 }
 
@@ -1813,18 +1818,26 @@ static int _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
        u32 *gpr_map;
        mm_segment_t seg;
 
-       if ((icode = kzalloc(sizeof(*icode), GFP_KERNEL)) == NULL)
-               return -ENOMEM;
-       if ((icode->gpr_map = (u_int32_t __user *)
-            kcalloc(256 + 160 + 160 + 2 * 512, sizeof(u_int32_t),
-                    GFP_KERNEL)) == NULL ||
-            (controls = kcalloc(SND_EMU10K1_GPR_CONTROLS,
-                               sizeof(struct snd_emu10k1_fx8010_control_gpr),
-                               GFP_KERNEL)) == NULL ||
-           (ipcm = kzalloc(sizeof(*ipcm), GFP_KERNEL)) == NULL) {
-               err = -ENOMEM;
-               goto __err;
-       }
+       err = -ENOMEM;
+       icode = kzalloc(sizeof(*icode), GFP_KERNEL);
+       if (!icode)
+               return err;
+
+       icode->gpr_map = (u_int32_t __user *) kcalloc(256 + 160 + 160 + 2 * 512,
+                                                     sizeof(u_int32_t), GFP_KERNEL);
+       if (!icode->gpr_map)
+               goto __err_gpr;
+
+       controls = kcalloc(SND_EMU10K1_GPR_CONTROLS,
+                          sizeof(struct snd_emu10k1_fx8010_control_gpr),
+                          GFP_KERNEL);
+       if (!controls)
+               goto __err_ctrls;
+
+       ipcm = kzalloc(sizeof(*ipcm), GFP_KERNEL);
+       if (!ipcm)
+               goto __err_ipcm;
+
        gpr_map = (u32 __force *)icode->gpr_map;
 
        icode->tram_data_map = icode->gpr_map + 256;
@@ -2363,13 +2376,14 @@ static int _snd_emu10k1_init_efx(struct snd_emu10k1 *emu)
        snd_leave_user(seg);
        if (err >= 0)
                err = snd_emu10k1_ipcm_poke(emu, ipcm);
-      __err:
+__err:
        kfree(ipcm);
+__err_ipcm:
        kfree(controls);
-       if (icode != NULL) {
-               kfree((void __force *)icode->gpr_map);
-               kfree(icode);
-       }
+__err_ctrls:
+       kfree((void __force *)icode->gpr_map);
+__err_gpr:
+       kfree(icode);
        return err;
 }
 
index 5b6c4e3c92ca40a7000909c1540d7d6787d36946..a1632f4056d49083232928d96e46323818913c34 100644 (file)
@@ -565,7 +565,7 @@ int snd_hda_get_raw_connections(struct hda_codec *codec, hda_nid_t nid,
                range_val = !!(parm & (1 << (shift-1))); /* ranges */
                val = parm & mask;
                if (val == 0 && null_count++) {  /* no second chance */
-                       snd_printk(KERN_WARNING "hda_codec: "
+                       snd_printdd("hda_codec: "
                                   "invalid CONNECT_LIST verb %x[%i]:%x\n",
                                    nid, i, parm);
                        return 0;
@@ -5395,11 +5395,6 @@ int snd_hda_multi_out_analog_prepare(struct hda_codec *codec,
                        snd_hda_codec_setup_stream(codec,
                                                   mout->hp_out_nid[i],
                                                   stream_tag, 0, format);
-       for (i = 0; i < ARRAY_SIZE(mout->extra_out_nid); i++)
-               if (!mout->no_share_stream && mout->extra_out_nid[i])
-                       snd_hda_codec_setup_stream(codec,
-                                                  mout->extra_out_nid[i],
-                                                  stream_tag, 0, format);
 
        /* surrounds */
        for (i = 1; i < mout->num_dacs; i++) {
@@ -5410,6 +5405,20 @@ int snd_hda_multi_out_analog_prepare(struct hda_codec *codec,
                        snd_hda_codec_setup_stream(codec, nids[i], stream_tag,
                                                   0, format);
        }
+
+       /* extra surrounds */
+       for (i = 0; i < ARRAY_SIZE(mout->extra_out_nid); i++) {
+               int ch = 0;
+               if (!mout->extra_out_nid[i])
+                       break;
+               if (chs >= (i + 1) * 2)
+                       ch = i * 2;
+               else if (!mout->no_share_stream)
+                       break;
+               snd_hda_codec_setup_stream(codec, mout->extra_out_nid[i],
+                                          stream_tag, ch, format);
+       }
+
        return 0;
 }
 EXPORT_SYMBOL_HDA(snd_hda_multi_out_analog_prepare);
index d0d7ac1e99d24cc606b441e6ec23470a75800dcd..f62356c2f54ceffbb7b8bf5e18d16fa6bef0729c 100644 (file)
@@ -478,10 +478,9 @@ static void hdmi_print_sad_info(int i, struct cea_sad *a,
                snd_iprintf(buffer, "sad%d_profile\t\t%d\n", i, a->profile);
 }
 
-static void hdmi_print_eld_info(struct snd_info_entry *entry,
-                               struct snd_info_buffer *buffer)
+void snd_hdmi_print_eld_info(struct hdmi_eld *eld,
+                            struct snd_info_buffer *buffer)
 {
-       struct hdmi_eld *eld = entry->private_data;
        struct parsed_hdmi_eld *e = &eld->info;
        char buf[SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE];
        int i;
@@ -500,13 +499,10 @@ static void hdmi_print_eld_info(struct snd_info_entry *entry,
                [4 ... 7] = "reserved"
        };
 
-       mutex_lock(&eld->lock);
        snd_iprintf(buffer, "monitor_present\t\t%d\n", eld->monitor_present);
        snd_iprintf(buffer, "eld_valid\t\t%d\n", eld->eld_valid);
-       if (!eld->eld_valid) {
-               mutex_unlock(&eld->lock);
+       if (!eld->eld_valid)
                return;
-       }
        snd_iprintf(buffer, "monitor_name\t\t%s\n", e->monitor_name);
        snd_iprintf(buffer, "connection_type\t\t%s\n",
                                eld_connection_type_names[e->conn_type]);
@@ -528,13 +524,11 @@ static void hdmi_print_eld_info(struct snd_info_entry *entry,
 
        for (i = 0; i < e->sad_count; i++)
                hdmi_print_sad_info(i, e->sad + i, buffer);
-       mutex_unlock(&eld->lock);
 }
 
-static void hdmi_write_eld_info(struct snd_info_entry *entry,
-                               struct snd_info_buffer *buffer)
+void snd_hdmi_write_eld_info(struct hdmi_eld *eld,
+                            struct snd_info_buffer *buffer)
 {
-       struct hdmi_eld *eld = entry->private_data;
        struct parsed_hdmi_eld *e = &eld->info;
        char line[64];
        char name[64];
@@ -542,7 +536,6 @@ static void hdmi_write_eld_info(struct snd_info_entry *entry,
        long long val;
        unsigned int n;
 
-       mutex_lock(&eld->lock);
        while (!snd_info_get_line(buffer, line, sizeof(line))) {
                if (sscanf(line, "%s %llx", name, &val) != 2)
                        continue;
@@ -594,38 +587,7 @@ static void hdmi_write_eld_info(struct snd_info_entry *entry,
                                e->sad_count = n + 1;
                }
        }
-       mutex_unlock(&eld->lock);
-}
-
-
-int snd_hda_eld_proc_new(struct hda_codec *codec, struct hdmi_eld *eld,
-                        int index)
-{
-       char name[32];
-       struct snd_info_entry *entry;
-       int err;
-
-       snprintf(name, sizeof(name), "eld#%d.%d", codec->addr, index);
-       err = snd_card_proc_new(codec->bus->card, name, &entry);
-       if (err < 0)
-               return err;
-
-       snd_info_set_text_ops(entry, eld, hdmi_print_eld_info);
-       entry->c.text.write = hdmi_write_eld_info;
-       entry->mode |= S_IWUSR;
-       eld->proc_entry = entry;
-
-       return 0;
-}
-
-void snd_hda_eld_proc_free(struct hda_codec *codec, struct hdmi_eld *eld)
-{
-       if (!codec->bus->shutdown && eld->proc_entry) {
-               snd_device_free(codec->bus->card, eld->proc_entry);
-               eld->proc_entry = NULL;
-       }
 }
-
 #endif /* CONFIG_PROC_FS */
 
 /* update PCM info based on ELD */
index ac41e9cdc976a1c190c51684519abb9c475c2c59..26ad4f0aade3fcf2c26940c008fae9a55c63244c 100644 (file)
@@ -3531,7 +3531,7 @@ static int create_capture_mixers(struct hda_codec *codec)
                if (!multi)
                        err = create_single_cap_vol_ctl(codec, n, vol, sw,
                                                        inv_dmic);
-               else if (!multi_cap_vol)
+               else if (!multi_cap_vol && !inv_dmic)
                        err = create_bind_cap_vol_ctl(codec, n, vol, sw);
                else
                        err = create_multi_cap_vol_ctl(codec);
index 2e7493ef8ee0643b1ee51b759198c233d23af6a5..46cddd4c7b72c5c592500a1ee921cf3a7140eff2 100644 (file)
@@ -428,6 +428,7 @@ enum {
        HDA_FIXUP_ACT_PROBE,
        HDA_FIXUP_ACT_INIT,
        HDA_FIXUP_ACT_BUILD,
+       HDA_FIXUP_ACT_FREE,
 };
 
 int snd_hda_add_verbs(struct hda_codec *codec, const struct hda_verb *list);
@@ -751,10 +752,6 @@ struct hdmi_eld {
        int     eld_size;
        char    eld_buffer[ELD_MAX_SIZE];
        struct parsed_hdmi_eld info;
-       struct mutex lock;
-#ifdef CONFIG_PROC_FS
-       struct snd_info_entry *proc_entry;
-#endif
 };
 
 int snd_hdmi_get_eld_size(struct hda_codec *codec, hda_nid_t nid);
@@ -767,20 +764,10 @@ void snd_hdmi_eld_update_pcm_info(struct parsed_hdmi_eld *e,
                              struct hda_pcm_stream *hinfo);
 
 #ifdef CONFIG_PROC_FS
-int snd_hda_eld_proc_new(struct hda_codec *codec, struct hdmi_eld *eld,
-                        int index);
-void snd_hda_eld_proc_free(struct hda_codec *codec, struct hdmi_eld *eld);
-#else
-static inline int snd_hda_eld_proc_new(struct hda_codec *codec,
-                                      struct hdmi_eld *eld,
-                                      int index)
-{
-       return 0;
-}
-static inline void snd_hda_eld_proc_free(struct hda_codec *codec,
-                                        struct hdmi_eld *eld)
-{
-}
+void snd_hdmi_print_eld_info(struct hdmi_eld *eld,
+                            struct snd_info_buffer *buffer);
+void snd_hdmi_write_eld_info(struct hdmi_eld *eld,
+                            struct snd_info_buffer *buffer);
 #endif
 
 #define SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE 80
index 6e9876f27d959a64e14443ceab5efaed9f11d44b..54d14793725a7dd93623083d29f72f41a5ddc4bd 100644 (file)
@@ -759,7 +759,7 @@ struct ca0132_spec {
 /*
  * CA0132 codec access
  */
-unsigned int codec_send_command(struct hda_codec *codec, hda_nid_t nid,
+static unsigned int codec_send_command(struct hda_codec *codec, hda_nid_t nid,
                unsigned int verb, unsigned int parm, unsigned int *res)
 {
        unsigned int response;
index ec68eaea0336a008c78fb05c03a4d6c0e3f99c2a..993b25c17711dbb6b81b81c7f0129a5de2393eec 100644 (file)
@@ -3208,11 +3208,17 @@ static int cx_auto_init(struct hda_codec *codec)
        return 0;
 }
 
+static void cx_auto_free(struct hda_codec *codec)
+{
+       snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_FREE);
+       snd_hda_gen_free(codec);
+}
+
 static const struct hda_codec_ops cx_auto_patch_ops = {
        .build_controls = cx_auto_build_controls,
        .build_pcms = snd_hda_gen_build_pcms,
        .init = cx_auto_init,
-       .free = snd_hda_gen_free,
+       .free = cx_auto_free,
        .unsol_event = snd_hda_jack_unsol_event,
 #ifdef CONFIG_PM
        .check_power_status = snd_hda_gen_check_power_status,
@@ -3232,8 +3238,84 @@ enum {
        CXT_FIXUP_HEADPHONE_MIC_PIN,
        CXT_FIXUP_HEADPHONE_MIC,
        CXT_FIXUP_GPIO1,
+       CXT_FIXUP_THINKPAD_ACPI,
 };
 
+#if IS_ENABLED(CONFIG_THINKPAD_ACPI)
+
+#include <linux/thinkpad_acpi.h>
+
+static int (*led_set_func)(int, bool);
+
+static void update_tpacpi_mute_led(void *private_data, int enabled)
+{
+       struct hda_codec *codec = private_data;
+       struct conexant_spec *spec = codec->spec;
+
+       if (spec->dynamic_eapd)
+               cx_auto_vmaster_hook(private_data, enabled);
+
+       if (led_set_func)
+               led_set_func(TPACPI_LED_MUTE, !enabled);
+}
+
+static void update_tpacpi_micmute_led(struct hda_codec *codec,
+                                     struct snd_ctl_elem_value *ucontrol)
+{
+       if (!ucontrol || !led_set_func)
+               return;
+       if (strcmp("Capture Switch", ucontrol->id.name) == 0 && ucontrol->id.index == 0) {
+               /* TODO: How do I verify if it's a mono or stereo here? */
+               bool val = ucontrol->value.integer.value[0] || ucontrol->value.integer.value[1];
+               led_set_func(TPACPI_LED_MICMUTE, !val);
+       }
+}
+
+static void cxt_fixup_thinkpad_acpi(struct hda_codec *codec,
+                                 const struct hda_fixup *fix, int action)
+{
+       struct conexant_spec *spec = codec->spec;
+
+       bool removefunc = false;
+
+       if (action == HDA_FIXUP_ACT_PROBE) {
+               if (!led_set_func)
+                       led_set_func = symbol_request(tpacpi_led_set);
+               if (!led_set_func) {
+                       snd_printk(KERN_WARNING "Failed to find thinkpad-acpi symbol tpacpi_led_set\n");
+                       return;
+               }
+
+               removefunc = true;
+               if (led_set_func(TPACPI_LED_MUTE, false) >= 0) {
+                       spec->gen.vmaster_mute.hook = update_tpacpi_mute_led;
+                       removefunc = false;
+               }
+               if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
+                       if (spec->gen.num_adc_nids > 1)
+                               snd_printdd("Skipping micmute LED control due to several ADCs");
+                       else {
+                               spec->gen.cap_sync_hook = update_tpacpi_micmute_led;
+                               removefunc = false;
+                       }
+               }
+       }
+
+       if (led_set_func && (action == HDA_FIXUP_ACT_FREE || removefunc)) {
+               symbol_put(tpacpi_led_set);
+               led_set_func = NULL;
+       }
+}
+
+#else
+
+static void cxt_fixup_thinkpad_acpi(struct hda_codec *codec,
+                                 const struct hda_fixup *fix, int action)
+{
+}
+
+#endif
+
 static void cxt_fixup_stereo_dmic(struct hda_codec *codec,
                                  const struct hda_fixup *fix, int action)
 {
@@ -3344,6 +3426,8 @@ static const struct hda_fixup cxt_fixups[] = {
        [CXT_PINCFG_LENOVO_TP410] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = cxt_pincfg_lenovo_tp410,
+               .chained = true,
+               .chain_id = CXT_FIXUP_THINKPAD_ACPI,
        },
        [CXT_PINCFG_LEMOTE_A1004] = {
                .type = HDA_FIXUP_PINS,
@@ -3385,6 +3469,10 @@ static const struct hda_fixup cxt_fixups[] = {
                        { }
                },
        },
+       [CXT_FIXUP_THINKPAD_ACPI] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = cxt_fixup_thinkpad_acpi,
+       },
 };
 
 static const struct snd_pci_quirk cxt5051_fixups[] = {
@@ -3507,7 +3595,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
        return 0;
 
  error:
-       snd_hda_gen_free(codec);
+       cx_auto_free(codec);
        return err;
 }
 
index 7ea0245fc6bd5fe5579d81682a2d39ce6cd06af7..0f9b10322b2a818b869df52e918afd3882b59ca5 100644 (file)
@@ -63,9 +63,11 @@ struct hdmi_spec_per_pin {
        hda_nid_t pin_nid;
        int num_mux_nids;
        hda_nid_t mux_nids[HDA_MAX_CONNECTIONS];
+       hda_nid_t cvt_nid;
 
        struct hda_codec *codec;
        struct hdmi_eld sink_eld;
+       struct mutex lock;
        struct delayed_work work;
        struct snd_kcontrol *eld_ctl;
        int repoll_count;
@@ -75,6 +77,9 @@ struct hdmi_spec_per_pin {
        bool chmap_set;         /* channel-map override by ALSA API? */
        unsigned char chmap[8]; /* ALSA API channel-map */
        char pcm_name[8];       /* filled in build_pcm callbacks */
+#ifdef CONFIG_PROC_FS
+       struct snd_info_entry *proc_entry;
+#endif
 };
 
 struct hdmi_spec {
@@ -348,17 +353,19 @@ static int hdmi_eld_ctl_info(struct snd_kcontrol *kcontrol,
 {
        struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
        struct hdmi_spec *spec = codec->spec;
+       struct hdmi_spec_per_pin *per_pin;
        struct hdmi_eld *eld;
        int pin_idx;
 
        uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
 
        pin_idx = kcontrol->private_value;
-       eld = &get_pin(spec, pin_idx)->sink_eld;
+       per_pin = get_pin(spec, pin_idx);
+       eld = &per_pin->sink_eld;
 
-       mutex_lock(&eld->lock);
+       mutex_lock(&per_pin->lock);
        uinfo->count = eld->eld_valid ? eld->eld_size : 0;
-       mutex_unlock(&eld->lock);
+       mutex_unlock(&per_pin->lock);
 
        return 0;
 }
@@ -368,15 +375,17 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
 {
        struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
        struct hdmi_spec *spec = codec->spec;
+       struct hdmi_spec_per_pin *per_pin;
        struct hdmi_eld *eld;
        int pin_idx;
 
        pin_idx = kcontrol->private_value;
-       eld = &get_pin(spec, pin_idx)->sink_eld;
+       per_pin = get_pin(spec, pin_idx);
+       eld = &per_pin->sink_eld;
 
-       mutex_lock(&eld->lock);
+       mutex_lock(&per_pin->lock);
        if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data)) {
-               mutex_unlock(&eld->lock);
+               mutex_unlock(&per_pin->lock);
                snd_BUG();
                return -EINVAL;
        }
@@ -386,7 +395,7 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
        if (eld->eld_valid)
                memcpy(ucontrol->value.bytes.data, eld->eld_buffer,
                       eld->eld_size);
-       mutex_unlock(&eld->lock);
+       mutex_unlock(&per_pin->lock);
 
        return 0;
 }
@@ -477,6 +486,68 @@ static void hdmi_set_channel_count(struct hda_codec *codec,
                                    AC_VERB_SET_CVT_CHAN_COUNT, chs - 1);
 }
 
+/*
+ * ELD proc files
+ */
+
+#ifdef CONFIG_PROC_FS
+static void print_eld_info(struct snd_info_entry *entry,
+                          struct snd_info_buffer *buffer)
+{
+       struct hdmi_spec_per_pin *per_pin = entry->private_data;
+
+       mutex_lock(&per_pin->lock);
+       snd_hdmi_print_eld_info(&per_pin->sink_eld, buffer);
+       mutex_unlock(&per_pin->lock);
+}
+
+static void write_eld_info(struct snd_info_entry *entry,
+                          struct snd_info_buffer *buffer)
+{
+       struct hdmi_spec_per_pin *per_pin = entry->private_data;
+
+       mutex_lock(&per_pin->lock);
+       snd_hdmi_write_eld_info(&per_pin->sink_eld, buffer);
+       mutex_unlock(&per_pin->lock);
+}
+
+static int eld_proc_new(struct hdmi_spec_per_pin *per_pin, int index)
+{
+       char name[32];
+       struct hda_codec *codec = per_pin->codec;
+       struct snd_info_entry *entry;
+       int err;
+
+       snprintf(name, sizeof(name), "eld#%d.%d", codec->addr, index);
+       err = snd_card_proc_new(codec->bus->card, name, &entry);
+       if (err < 0)
+               return err;
+
+       snd_info_set_text_ops(entry, per_pin, print_eld_info);
+       entry->c.text.write = write_eld_info;
+       entry->mode |= S_IWUSR;
+       per_pin->proc_entry = entry;
+
+       return 0;
+}
+
+static void eld_proc_free(struct hdmi_spec_per_pin *per_pin)
+{
+       if (!per_pin->codec->bus->shutdown && per_pin->proc_entry) {
+               snd_device_free(per_pin->codec->bus->card, per_pin->proc_entry);
+               per_pin->proc_entry = NULL;
+       }
+}
+#else
+static inline int eld_proc_new(struct hdmi_spec_per_pin *per_pin,
+                              int index)
+{
+       return 0;
+}
+static inline void eld_proc_free(struct hdmi_spec_per_pin *per_pin)
+{
+}
+#endif
 
 /*
  * Channel mapping routines
@@ -595,25 +666,35 @@ static void hdmi_std_setup_channel_mapping(struct hda_codec *codec,
                                       bool non_pcm,
                                       int ca)
 {
+       struct cea_channel_speaker_allocation *ch_alloc;
        int i;
        int err;
        int order;
        int non_pcm_mapping[8];
 
        order = get_channel_allocation_order(ca);
+       ch_alloc = &channel_allocations[order];
 
        if (hdmi_channel_mapping[ca][1] == 0) {
-               for (i = 0; i < channel_allocations[order].channels; i++)
-                       hdmi_channel_mapping[ca][i] = i | (i << 4);
-               for (; i < 8; i++)
-                       hdmi_channel_mapping[ca][i] = 0xf | (i << 4);
+               int hdmi_slot = 0;
+               /* fill actual channel mappings in ALSA channel (i) order */
+               for (i = 0; i < ch_alloc->channels; i++) {
+                       while (!ch_alloc->speakers[7 - hdmi_slot] && !WARN_ON(hdmi_slot >= 8))
+                               hdmi_slot++; /* skip zero slots */
+
+                       hdmi_channel_mapping[ca][i] = (i << 4) | hdmi_slot++;
+               }
+               /* fill the rest of the slots with ALSA channel 0xf */
+               for (hdmi_slot = 0; hdmi_slot < 8; hdmi_slot++)
+                       if (!ch_alloc->speakers[7 - hdmi_slot])
+                               hdmi_channel_mapping[ca][i++] = (0xf << 4) | hdmi_slot;
        }
 
        if (non_pcm) {
-               for (i = 0; i < channel_allocations[order].channels; i++)
-                       non_pcm_mapping[i] = i | (i << 4);
+               for (i = 0; i < ch_alloc->channels; i++)
+                       non_pcm_mapping[i] = (i << 4) | i;
                for (; i < 8; i++)
-                       non_pcm_mapping[i] = 0xf | (i << 4);
+                       non_pcm_mapping[i] = (0xf << 4) | i;
        }
 
        for (i = 0; i < 8; i++) {
@@ -626,25 +707,31 @@ static void hdmi_std_setup_channel_mapping(struct hda_codec *codec,
                        break;
                }
        }
-
-       hdmi_debug_channel_mapping(codec, pin_nid);
 }
 
 struct channel_map_table {
        unsigned char map;              /* ALSA API channel map position */
-       unsigned char cea_slot;         /* CEA slot value */
        int spk_mask;                   /* speaker position bit mask */
 };
 
 static struct channel_map_table map_tables[] = {
-       { SNDRV_CHMAP_FL,       0x00,   FL },
-       { SNDRV_CHMAP_FR,       0x01,   FR },
-       { SNDRV_CHMAP_RL,       0x04,   RL },
-       { SNDRV_CHMAP_RR,       0x05,   RR },
-       { SNDRV_CHMAP_LFE,      0x02,   LFE },
-       { SNDRV_CHMAP_FC,       0x03,   FC },
-       { SNDRV_CHMAP_RLC,      0x06,   RLC },
-       { SNDRV_CHMAP_RRC,      0x07,   RRC },
+       { SNDRV_CHMAP_FL,       FL },
+       { SNDRV_CHMAP_FR,       FR },
+       { SNDRV_CHMAP_RL,       RL },
+       { SNDRV_CHMAP_RR,       RR },
+       { SNDRV_CHMAP_LFE,      LFE },
+       { SNDRV_CHMAP_FC,       FC },
+       { SNDRV_CHMAP_RLC,      RLC },
+       { SNDRV_CHMAP_RRC,      RRC },
+       { SNDRV_CHMAP_RC,       RC },
+       { SNDRV_CHMAP_FLC,      FLC },
+       { SNDRV_CHMAP_FRC,      FRC },
+       { SNDRV_CHMAP_FLH,      FLH },
+       { SNDRV_CHMAP_FRH,      FRH },
+       { SNDRV_CHMAP_FLW,      FLW },
+       { SNDRV_CHMAP_FRW,      FRW },
+       { SNDRV_CHMAP_TC,       TC },
+       { SNDRV_CHMAP_FCH,      FCH },
        {} /* terminator */
 };
 
@@ -660,25 +747,19 @@ static int to_spk_mask(unsigned char c)
 }
 
 /* from ALSA API channel position to CEA slot */
-static int to_cea_slot(unsigned char c)
+static int to_cea_slot(int ordered_ca, unsigned char pos)
 {
-       struct channel_map_table *t = map_tables;
-       for (; t->map; t++) {
-               if (t->map == c)
-                       return t->cea_slot;
-       }
-       return 0x0f;
-}
+       int mask = to_spk_mask(pos);
+       int i;
 
-/* from CEA slot to ALSA API channel position */
-static int from_cea_slot(unsigned char c)
-{
-       struct channel_map_table *t = map_tables;
-       for (; t->map; t++) {
-               if (t->cea_slot == c)
-                       return t->map;
+       if (mask) {
+               for (i = 0; i < 8; i++) {
+                       if (channel_allocations[ordered_ca].speakers[7 - i] == mask)
+                               return i;
+               }
        }
-       return 0;
+
+       return -1;
 }
 
 /* from speaker bit mask to ALSA API channel position */
@@ -692,6 +773,14 @@ static int spk_to_chmap(int spk)
        return 0;
 }
 
+/* from CEA slot to ALSA API channel position */
+static int from_cea_slot(int ordered_ca, unsigned char slot)
+{
+       int mask = channel_allocations[ordered_ca].speakers[7 - slot];
+
+       return spk_to_chmap(mask);
+}
+
 /* get the CA index corresponding to the given ALSA API channel map */
 static int hdmi_manual_channel_allocation(int chs, unsigned char *map)
 {
@@ -718,16 +807,27 @@ static int hdmi_manual_channel_allocation(int chs, unsigned char *map)
 /* set up the channel slots for the given ALSA API channel map */
 static int hdmi_manual_setup_channel_mapping(struct hda_codec *codec,
                                             hda_nid_t pin_nid,
-                                            int chs, unsigned char *map)
+                                            int chs, unsigned char *map,
+                                            int ca)
 {
-       int i;
-       for (i = 0; i < 8; i++) {
+       int ordered_ca = get_channel_allocation_order(ca);
+       int alsa_pos, hdmi_slot;
+       int assignments[8] = {[0 ... 7] = 0xf};
+
+       for (alsa_pos = 0; alsa_pos < chs; alsa_pos++) {
+
+               hdmi_slot = to_cea_slot(ordered_ca, map[alsa_pos]);
+
+               if (hdmi_slot < 0)
+                       continue; /* unassigned channel */
+
+               assignments[hdmi_slot] = alsa_pos;
+       }
+
+       for (hdmi_slot = 0; hdmi_slot < 8; hdmi_slot++) {
                int val, err;
-               if (i < chs)
-                       val = to_cea_slot(map[i]);
-               else
-                       val = 0xf;
-               val |= (i << 4);
+
+               val = (assignments[hdmi_slot] << 4) | hdmi_slot;
                err = snd_hda_codec_write(codec, pin_nid, 0,
                                          AC_VERB_SET_HDMI_CHAN_SLOT, val);
                if (err)
@@ -740,9 +840,10 @@ static int hdmi_manual_setup_channel_mapping(struct hda_codec *codec,
 static void hdmi_setup_fake_chmap(unsigned char *map, int ca)
 {
        int i;
+       int ordered_ca = get_channel_allocation_order(ca);
        for (i = 0; i < 8; i++) {
-               if (i < channel_allocations[ca].channels)
-                       map[i] = from_cea_slot((hdmi_channel_mapping[ca][i] >> 4) & 0x0f);
+               if (i < channel_allocations[ordered_ca].channels)
+                       map[i] = from_cea_slot(ordered_ca, hdmi_channel_mapping[ca][i] & 0x0f);
                else
                        map[i] = 0;
        }
@@ -755,11 +856,13 @@ static void hdmi_setup_channel_mapping(struct hda_codec *codec,
 {
        if (!non_pcm && chmap_set) {
                hdmi_manual_setup_channel_mapping(codec, pin_nid,
-                                                 channels, map);
+                                                 channels, map, ca);
        } else {
                hdmi_std_setup_channel_mapping(codec, pin_nid, non_pcm, ca);
                hdmi_setup_fake_chmap(map, ca);
        }
+
+       hdmi_debug_channel_mapping(codec, pin_nid);
 }
 
 /*
@@ -889,8 +992,9 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
 {
        hda_nid_t pin_nid = per_pin->pin_nid;
        int channels = per_pin->channels;
+       int active_channels;
        struct hdmi_eld *eld;
-       int ca;
+       int ca, ordered_ca;
        union audio_infoframe ai;
 
        if (!channels)
@@ -912,6 +1016,11 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
        if (ca < 0)
                ca = 0;
 
+       ordered_ca = get_channel_allocation_order(ca);
+       active_channels = channel_allocations[ordered_ca].channels;
+
+       hdmi_set_channel_count(codec, per_pin->cvt_nid, active_channels);
+
        memset(&ai, 0, sizeof(ai));
        if (eld->info.conn_type == 0) { /* HDMI */
                struct hdmi_audio_infoframe *hdmi_ai = &ai.hdmi;
@@ -919,7 +1028,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
                hdmi_ai->type           = 0x84;
                hdmi_ai->ver            = 0x01;
                hdmi_ai->len            = 0x0a;
-               hdmi_ai->CC02_CT47      = channels - 1;
+               hdmi_ai->CC02_CT47      = active_channels - 1;
                hdmi_ai->CA             = ca;
                hdmi_checksum_audio_infoframe(hdmi_ai);
        } else if (eld->info.conn_type == 1) { /* DisplayPort */
@@ -928,7 +1037,7 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
                dp_ai->type             = 0x84;
                dp_ai->len              = 0x1b;
                dp_ai->ver              = 0x11 << 2;
-               dp_ai->CC02_CT47        = channels - 1;
+               dp_ai->CC02_CT47        = active_channels - 1;
                dp_ai->CA               = ca;
        } else {
                snd_printd("HDMI: unknown connection type at pin %d\n",
@@ -936,6 +1045,14 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
                return;
        }
 
+       /*
+        * always configure channel mapping, it may have been changed by the
+        * user in the meantime
+        */
+       hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
+                                  channels, per_pin->chmap,
+                                  per_pin->chmap_set);
+
        /*
         * sizeof(ai) is used instead of sizeof(*hdmi_ai) or
         * sizeof(*dp_ai) to avoid partial match/update problems when
@@ -944,23 +1061,13 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
        if (!hdmi_infoframe_uptodate(codec, pin_nid, ai.bytes,
                                        sizeof(ai))) {
                snd_printdd("hdmi_setup_audio_infoframe: "
-                           "pin=%d channels=%d\n",
+                           "pin=%d channels=%d ca=0x%02x\n",
                            pin_nid,
-                           channels);
-               hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
-                                          channels, per_pin->chmap,
-                                          per_pin->chmap_set);
+                           active_channels, ca);
                hdmi_stop_infoframe_trans(codec, pin_nid);
                hdmi_fill_audio_infoframe(codec, pin_nid,
                                            ai.bytes, sizeof(ai));
                hdmi_start_infoframe_trans(codec, pin_nid);
-       } else {
-               /* For non-pcm audio switch, setup new channel mapping
-                * accordingly */
-               if (per_pin->non_pcm != non_pcm)
-                       hdmi_setup_channel_mapping(codec, pin_nid, non_pcm, ca,
-                                                  channels, per_pin->chmap,
-                                                  per_pin->chmap_set);
        }
 
        per_pin->non_pcm = non_pcm;
@@ -1219,6 +1326,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
        per_cvt = get_cvt(spec, cvt_idx);
        /* Claim converter */
        per_cvt->assigned = 1;
+       per_pin->cvt_nid = per_cvt->cvt_nid;
        hinfo->nid = per_cvt->cvt_nid;
 
        snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
@@ -1304,6 +1412,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
        bool update_eld = false;
        bool eld_changed = false;
 
+       mutex_lock(&per_pin->lock);
        pin_eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
        if (pin_eld->monitor_present)
                eld->eld_valid  = !!(present & AC_PINSENSE_ELDV);
@@ -1333,11 +1442,10 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
                        queue_delayed_work(codec->bus->workq,
                                           &per_pin->work,
                                           msecs_to_jiffies(300));
-                       return;
+                       goto unlock;
                }
        }
 
-       mutex_lock(&pin_eld->lock);
        if (pin_eld->eld_valid && !eld->eld_valid) {
                update_eld = true;
                eld_changed = true;
@@ -1362,12 +1470,13 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
                        hdmi_setup_audio_infoframe(codec, per_pin,
                                                   per_pin->non_pcm);
        }
-       mutex_unlock(&pin_eld->lock);
 
        if (eld_changed)
                snd_ctl_notify(codec->bus->card,
                               SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
                               &per_pin->eld_ctl->id);
+ unlock:
+       mutex_unlock(&per_pin->lock);
 }
 
 static void hdmi_repoll_eld(struct work_struct *work)
@@ -1538,12 +1647,12 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
        bool non_pcm;
 
        non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
+       mutex_lock(&per_pin->lock);
        per_pin->channels = substream->runtime->channels;
        per_pin->setup = true;
 
-       hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
-
        hdmi_setup_audio_infoframe(codec, per_pin, non_pcm);
+       mutex_unlock(&per_pin->lock);
 
        return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
 }
@@ -1581,11 +1690,14 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
                per_pin = get_pin(spec, pin_idx);
 
                snd_hda_spdif_ctls_unassign(codec, pin_idx);
+
+               mutex_lock(&per_pin->lock);
                per_pin->chmap_set = false;
                memset(per_pin->chmap, 0, sizeof(per_pin->chmap));
 
                per_pin->setup = false;
                per_pin->channels = 0;
+               mutex_unlock(&per_pin->lock);
        }
 
        return 0;
@@ -1620,8 +1732,6 @@ static int hdmi_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
        struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
        struct hda_codec *codec = info->private_data;
        struct hdmi_spec *spec = codec->spec;
-       const unsigned int valid_mask =
-               FL | FR | RL | RR | LFE | FC | RLC | RRC;
        unsigned int __user *dst;
        int chs, count = 0;
 
@@ -1639,8 +1749,6 @@ static int hdmi_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag,
                        int chs_bytes = chs * 4;
                        if (cap->channels != chs)
                                continue;
-                       if (cap->spk_mask & ~valid_mask)
-                               continue;
                        if (size < 8)
                                return -ENOMEM;
                        if (put_user(SNDRV_CTL_TLVT_CHMAP_VAR, dst) ||
@@ -1718,10 +1826,12 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
        ca = hdmi_manual_channel_allocation(ARRAY_SIZE(chmap), chmap);
        if (ca < 0)
                return -EINVAL;
+       mutex_lock(&per_pin->lock);
        per_pin->chmap_set = true;
        memcpy(per_pin->chmap, chmap, sizeof(chmap));
        if (prepared)
                hdmi_setup_audio_infoframe(codec, per_pin, per_pin->non_pcm);
+       mutex_unlock(&per_pin->lock);
 
        return 0;
 }
@@ -1838,12 +1948,11 @@ static int generic_hdmi_init_per_pins(struct hda_codec *codec)
 
        for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
                struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
-               struct hdmi_eld *eld = &per_pin->sink_eld;
 
                per_pin->codec = codec;
-               mutex_init(&eld->lock);
+               mutex_init(&per_pin->lock);
                INIT_DELAYED_WORK(&per_pin->work, hdmi_repoll_eld);
-               snd_hda_eld_proc_new(codec, eld, pin_idx);
+               eld_proc_new(per_pin, pin_idx);
        }
        return 0;
 }
@@ -1884,10 +1993,9 @@ static void generic_hdmi_free(struct hda_codec *codec)
 
        for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
                struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
-               struct hdmi_eld *eld = &per_pin->sink_eld;
 
                cancel_delayed_work(&per_pin->work);
-               snd_hda_eld_proc_free(codec, eld);
+               eld_proc_free(per_pin);
        }
 
        flush_workqueue(codec->bus->workq);
@@ -2671,6 +2779,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
 { .id = 0x80862806, .name = "PantherPoint HDMI", .patch = patch_generic_hdmi },
 { .id = 0x80862807, .name = "Haswell HDMI",    .patch = patch_generic_hdmi },
 { .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi },
+{ .id = 0x80862882, .name = "Valleyview2 HDMI",        .patch = patch_generic_hdmi },
 { .id = 0x808629fb, .name = "Crestline HDMI",  .patch = patch_generic_hdmi },
 {} /* terminator */
 };
@@ -2725,6 +2834,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862805");
 MODULE_ALIAS("snd-hda-codec-id:80862806");
 MODULE_ALIAS("snd-hda-codec-id:80862807");
 MODULE_ALIAS("snd-hda-codec-id:80862880");
+MODULE_ALIAS("snd-hda-codec-id:80862882");
 MODULE_ALIAS("snd-hda-codec-id:808629fb");
 
 MODULE_LICENSE("GPL");
index 0e303b99a47ceef3037e7b245ed5d93caf1f7779..bf313bea70858f8a4abd18ef6c60beca6668fea1 100644 (file)
@@ -2819,6 +2819,15 @@ static void alc269_fixup_hweq(struct hda_codec *codec,
        alc_write_coef_idx(codec, 0x1e, coef | 0x80);
 }
 
+static void alc269_fixup_headset_mic(struct hda_codec *codec,
+                                      const struct hda_fixup *fix, int action)
+{
+       struct alc_spec *spec = codec->spec;
+
+       if (action == HDA_FIXUP_ACT_PRE_PROBE)
+               spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
+}
+
 static void alc271_fixup_dmic(struct hda_codec *codec,
                              const struct hda_fixup *fix, int action)
 {
@@ -3496,6 +3505,15 @@ static void alc282_fixup_asus_tx300(struct hda_codec *codec,
        }
 }
 
+static void alc290_fixup_mono_speakers(struct hda_codec *codec,
+                                      const struct hda_fixup *fix, int action)
+{
+       if (action == HDA_FIXUP_ACT_PRE_PROBE)
+               /* Remove DAC node 0x03, as it seems to be
+                  giving mono output */
+               snd_hda_override_wcaps(codec, 0x03, 0);
+}
+
 enum {
        ALC269_FIXUP_SONY_VAIO,
        ALC275_FIXUP_SONY_VAIO_GPIO2,
@@ -3507,6 +3525,7 @@ enum {
        ALC271_FIXUP_DMIC,
        ALC269_FIXUP_PCM_44K,
        ALC269_FIXUP_STEREO_DMIC,
+       ALC269_FIXUP_HEADSET_MIC,
        ALC269_FIXUP_QUANTA_MUTE,
        ALC269_FIXUP_LIFEBOOK,
        ALC269_FIXUP_AMIC,
@@ -3519,9 +3538,11 @@ enum {
        ALC269_FIXUP_HP_GPIO_LED,
        ALC269_FIXUP_INV_DMIC,
        ALC269_FIXUP_LENOVO_DOCK,
+       ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
        ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
        ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
        ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
+       ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
        ALC269_FIXUP_HEADSET_MODE,
        ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
        ALC269_FIXUP_ASUS_X101_FUNC,
@@ -3535,6 +3556,7 @@ enum {
        ALC283_FIXUP_CHROME_BOOK,
        ALC282_FIXUP_ASUS_TX300,
        ALC283_FIXUP_INT_MIC,
+       ALC290_FIXUP_MONO_SPEAKERS,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -3603,6 +3625,10 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc269_fixup_stereo_dmic,
        },
+       [ALC269_FIXUP_HEADSET_MIC] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269_fixup_headset_mic,
+       },
        [ALC269_FIXUP_QUANTA_MUTE] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc269_fixup_quanta_mute,
@@ -3712,6 +3738,15 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
        },
+       [ALC269_FIXUP_DELL3_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+       },
        [ALC269_FIXUP_HEADSET_MODE] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_headset_mode,
@@ -3720,6 +3755,15 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_headset_mode_no_hp_mic,
        },
+       [ALC286_FIXUP_SONY_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MIC
+       },
        [ALC269_FIXUP_ASUS_X101_FUNC] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc269_fixup_x101_headset_mic,
@@ -3804,6 +3848,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
        },
+       [ALC290_FIXUP_MONO_SPEAKERS] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc290_fixup_mono_speakers,
+               .chained = true,
+               .chain_id = ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -3845,6 +3895,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
        SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -3867,6 +3918,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
+       SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
        SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
        SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
@@ -3952,6 +4004,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC269_FIXUP_STEREO_DMIC, .name = "alc269-dmic"},
        {.id = ALC271_FIXUP_DMIC, .name = "alc271-dmic"},
        {.id = ALC269_FIXUP_INV_DMIC, .name = "inv-dmic"},
+       {.id = ALC269_FIXUP_HEADSET_MIC, .name = "headset-mic"},
        {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
        {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
        {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
@@ -4569,6 +4622,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+       SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
        SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
        SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
        SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
index 4f255dfee4504454702bfdec305b1b72820a26ac..f59a321a6d6af04d6dce32751410e4eb3f9d9df0 100644 (file)
@@ -4845,6 +4845,7 @@ static int snd_hdsp_hwdep_ioctl(struct snd_hwdep *hw, struct file *file, unsigne
                        if ((err = hdsp_get_iobox_version(hdsp)) < 0)
                                return err;
                }
+               memset(&hdsp_version, 0, sizeof(hdsp_version));
                hdsp_version.io_type = hdsp->io_type;
                hdsp_version.firmware_rev = hdsp->firmware_rev;
                if ((err = copy_to_user(argp, &hdsp_version, sizeof(hdsp_version))))
index 3cde55b753e26086c13569dbee92847db02c8a9c..2907e68150cb1ca1adb0c40a5a573ef7af18394b 100644 (file)
@@ -3996,7 +3996,6 @@ static int hdspm_tco_sync_check(struct hdspm *hdspm)
                                        return 1;
                        }
                        return 0;
-                       break;
                case AES32:
                        status = hdspm_read(hdspm, HDSPM_statusRegister);
                        if (status & HDSPM_tcoLockAes) {
@@ -4006,9 +4005,6 @@ static int hdspm_tco_sync_check(struct hdspm *hdspm)
                                        return 1;
                        }
                        return 0;
-
-                       break;
-
                case RayDAT:
                case AIO:
                        status = hdspm_read(hdspm, HDSPM_RD_STATUS_1);
@@ -4018,7 +4014,6 @@ static int hdspm_tco_sync_check(struct hdspm *hdspm)
                        if (status & 0x4000000)
                                return 1; /* Lock */
                        return 0; /* No signal */
-                       break;
 
                default:
                        break;
index 01aecc2b50738353ad8a96f618c01be631981240..0d1c27e911b8dde2cf1b69827e8743a31a67316f 100644 (file)
@@ -65,7 +65,7 @@ static int keywest_attach_adapter(struct i2c_adapter *adapter)
         * already bound. If not it means binding failed, and then there
         * is no point in keeping the device instantiated.
         */
-       if (!keywest_ctx->client->driver) {
+       if (!keywest_ctx->client->dev.driver) {
                i2c_unregister_device(keywest_ctx->client);
                keywest_ctx->client = NULL;
                return -ENODEV;
@@ -76,7 +76,7 @@ static int keywest_attach_adapter(struct i2c_adapter *adapter)
         * This is safe because i2c-core holds the core_lock mutex for us.
         */
        list_add_tail(&keywest_ctx->client->detected,
-                     &keywest_ctx->client->driver->clients);
+                     &to_i2c_driver(keywest_ctx->client->dev.driver)->clients);
        return 0;
 }
 
index 61a64d281905e4b8997bc2ae3eb42ce387de9e7c..8b9e70105dd28f5ccceabad1cd2ea414a97640a1 100644 (file)
@@ -1,5 +1,5 @@
 snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o
-snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o
+snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o
 
 ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),)
 snd-soc-core-objs += soc-generic-dmaengine-pcm.o
index 3109db7b9017cc08dd23ef6f9c70f9c605bcdf3e..8ae3fa5ac60a5fe9c59460663365c28827548163 100644 (file)
@@ -50,7 +50,7 @@ static int atmel_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
        buf->area = dma_alloc_coherent(pcm->card->dev, size,
                        &buf->addr, GFP_KERNEL);
        pr_debug("atmel-pcm: alloc dma buffer: area=%p, addr=%p, size=%zu\n",
-                       (void *)buf->area, (void *)buf->addr, size);
+                       (void *)buf->area, (void *)(long)buf->addr, size);
 
        if (!buf->area)
                return -ENOMEM;
@@ -68,18 +68,15 @@ int atmel_pcm_mmap(struct snd_pcm_substream *substream,
 }
 EXPORT_SYMBOL_GPL(atmel_pcm_mmap);
 
-static u64 atmel_pcm_dmamask = DMA_BIT_MASK(32);
-
 int atmel_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       int ret = 0;
+       int ret;
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &atmel_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                pr_debug("atmel-pcm: allocating PCM playback DMA buffer\n");
index bb53dea85b17eefc55090dfb4190ee5863337a2d..8697cedccd21240f76b4b5076ba6d0968a394d75 100644 (file)
@@ -777,7 +777,7 @@ static int asoc_ssc_init(struct device *dev)
        if (ret) {
                dev_err(dev, "Could not register PCM: %d\n", ret);
                goto err_unregister_dai;
-       };
+       }
 
        return 0;
 
index 7222380131ea60441a6e05335250385b567abb2b..b4e36901a40b0e238c759dd38dd8c455eed61f44 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
-#include <linux/pinctrl/consumer.h>
 
 #include <sound/soc.h>
 
@@ -155,15 +154,8 @@ static int atmel_asoc_wm8904_probe(struct platform_device *pdev)
        struct snd_soc_card *card = &atmel_asoc_wm8904_card;
        struct snd_soc_dai_link *dailink = &atmel_asoc_wm8904_dailink;
        struct clk *clk_src;
-       struct pinctrl *pinctrl;
        int id, ret;
 
-       pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
-       if (IS_ERR(pinctrl)) {
-               dev_err(&pdev->dev, "failed to request pinctrl\n");
-               return PTR_ERR(pinctrl);
-       }
-
        card->dev = &pdev->dev;
        ret = atmel_asoc_wm8904_dt_init(pdev);
        if (ret) {
index 802717eccbd010d1a43f104e87004a28e56c3f30..f15bff1548f8fceb86c3f466201aab13107d9816 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/interrupt.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
+#include <linux/of.h>
 
 #include <linux/atmel-ssc.h>
 
index 53f84085bf1fbd7c1daadc093ebd60d7f5dc7251..1d4c676eb6cc696e78f6722525923c91e3afe936 100644 (file)
@@ -415,19 +415,16 @@ static void bf5xx_pcm_free_dma_buffers(struct snd_pcm *pcm)
        }
 }
 
-static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int bf5xx_pcm_ac97_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       int ret = 0;
+       int ret;
 
        pr_debug("%s enter\n", __func__);
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &bf5xx_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = bf5xx_pcm_preallocate_dma_buffer(pcm,
index 9cb4a80df98eee2efa78b9b3a05a99baf64cc8b1..2a5b43417fd5a5810ac36d1cc258f97987e9066a 100644 (file)
@@ -323,18 +323,16 @@ static struct snd_pcm_ops bf5xx_pcm_i2s_ops = {
        .silence        = bf5xx_pcm_silence,
 };
 
-static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int bf5xx_pcm_i2s_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        size_t size = bf5xx_pcm_hardware.buffer_bytes_max;
+       int ret;
 
        pr_debug("%s enter\n", __func__);
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &bf5xx_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
                                SNDRV_DMA_TYPE_DEV, card->dev, size, size);
index 2c20f01e1f7e8b8d8d6d623bba45288e2bedeb1a..06f938deda15bd0d888e7e6140d54f10ec9331b5 100644 (file)
@@ -1,6 +1,6 @@
 config SND_EP93XX_SOC
        tristate "SoC Audio support for the Cirrus Logic EP93xx series"
-       depends on ARCH_EP93XX && SND_SOC
+       depends on (ARCH_EP93XX || COMPILE_TEST) && SND_SOC
        select SND_SOC_GENERIC_DMAENGINE_PCM
        help
          Say Y or M if you want to add support for codecs attached to
index 0e9f56e0d4b2e7e6031cb30ecd007c7da04ba693..cfe517e6800969ab64c91c563c7b5839fc46da66 100644 (file)
@@ -57,9 +57,22 @@ static bool ep93xx_pcm_dma_filter(struct dma_chan *chan, void *filter_param)
        return false;
 }
 
+static struct dma_chan *ep93xx_compat_request_channel(
+       struct snd_soc_pcm_runtime *rtd,
+       struct snd_pcm_substream *substream)
+{
+       struct snd_dmaengine_dai_dma_data *dma_data;
+
+       dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+
+       return snd_dmaengine_pcm_request_channel(ep93xx_pcm_dma_filter,
+                                                dma_data);
+}
+
 static const struct snd_dmaengine_pcm_config ep93xx_dmaengine_pcm_config = {
        .pcm_hardware = &ep93xx_pcm_hardware,
        .compat_filter_fn = ep93xx_pcm_dma_filter,
+       .compat_request_channel = ep93xx_compat_request_channel,
        .prealloc_buffer_size = 131072,
 };
 
index 259d1ac4492fc610a1454567bdabd52f56dbae5a..75d0ad5d2dcb38107934536ba7ef93410025c2f6 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/mfd/88pm860x.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
+#include <linux/regmap.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -140,6 +141,7 @@ struct pm860x_priv {
        unsigned int            filter;
        struct snd_soc_codec    *codec;
        struct i2c_client       *i2c;
+       struct regmap           *regmap;
        struct pm860x_chip      *chip;
        struct pm860x_det       det;
 
@@ -269,48 +271,6 @@ static struct st_gain st_table[] = {
        {   -86, 29,  0}, {   -56, 30,  0}, {   -28, 31,  0}, {     0,  0,  0},
 };
 
-static int pm860x_volatile(unsigned int reg)
-{
-       BUG_ON(reg >= REG_CACHE_SIZE);
-
-       switch (reg) {
-       case PM860X_AUDIO_SUPPLIES_2:
-               return 1;
-       }
-
-       return 0;
-}
-
-static unsigned int pm860x_read_reg_cache(struct snd_soc_codec *codec,
-                                         unsigned int reg)
-{
-       unsigned char *cache = codec->reg_cache;
-
-       BUG_ON(reg >= REG_CACHE_SIZE);
-
-       if (pm860x_volatile(reg))
-               return cache[reg];
-
-       reg += REG_CACHE_BASE;
-
-       return pm860x_reg_read(codec->control_data, reg);
-}
-
-static int pm860x_write_reg_cache(struct snd_soc_codec *codec,
-                                 unsigned int reg, unsigned int value)
-{
-       unsigned char *cache = codec->reg_cache;
-
-       BUG_ON(reg >= REG_CACHE_SIZE);
-
-       if (!pm860x_volatile(reg))
-               cache[reg] = (unsigned char)value;
-
-       reg += REG_CACHE_BASE;
-
-       return pm860x_reg_write(codec->control_data, reg, value);
-}
-
 static int snd_soc_get_volsw_2r_st(struct snd_kcontrol *kcontrol,
                                   struct snd_ctl_elem_value *ucontrol)
 {
@@ -1169,6 +1129,7 @@ static int pm860x_i2s_set_dai_fmt(struct snd_soc_dai *codec_dai,
 static int pm860x_set_bias_level(struct snd_soc_codec *codec,
                                 enum snd_soc_bias_level level)
 {
+       struct pm860x_priv *pm860x = snd_soc_codec_get_drvdata(codec);
        int data;
 
        switch (level) {
@@ -1182,17 +1143,17 @@ static int pm860x_set_bias_level(struct snd_soc_codec *codec,
                if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
                        /* Enable Audio PLL & Audio section */
                        data = AUDIO_PLL | AUDIO_SECTION_ON;
-                       pm860x_reg_write(codec->control_data, REG_MISC2, data);
+                       pm860x_reg_write(pm860x->i2c, REG_MISC2, data);
                        udelay(300);
                        data = AUDIO_PLL | AUDIO_SECTION_RESET
                                | AUDIO_SECTION_ON;
-                       pm860x_reg_write(codec->control_data, REG_MISC2, data);
+                       pm860x_reg_write(pm860x->i2c, REG_MISC2, data);
                }
                break;
 
        case SND_SOC_BIAS_OFF:
                data = AUDIO_PLL | AUDIO_SECTION_RESET | AUDIO_SECTION_ON;
-               pm860x_set_bits(codec->control_data, REG_MISC2, data, 0);
+               pm860x_set_bits(pm860x->i2c, REG_MISC2, data, 0);
                break;
        }
        codec->dapm.bias_level = level;
@@ -1322,17 +1283,17 @@ int pm860x_hs_jack_detect(struct snd_soc_codec *codec,
        pm860x->det.lo_shrt = lo_shrt;
 
        if (det & SND_JACK_HEADPHONE)
-               pm860x_set_bits(codec->control_data, REG_HS_DET,
+               pm860x_set_bits(pm860x->i2c, REG_HS_DET,
                                EN_HS_DET, EN_HS_DET);
        /* headset short detect */
        if (hs_shrt) {
                data = CLR_SHORT_HS2 | CLR_SHORT_HS1;
-               pm860x_set_bits(codec->control_data, REG_SHORTS, data, data);
+               pm860x_set_bits(pm860x->i2c, REG_SHORTS, data, data);
        }
        /* Lineout short detect */
        if (lo_shrt) {
                data = CLR_SHORT_LO2 | CLR_SHORT_LO1;
-               pm860x_set_bits(codec->control_data, REG_SHORTS, data, data);
+               pm860x_set_bits(pm860x->i2c, REG_SHORTS, data, data);
        }
 
        /* sync status */
@@ -1350,7 +1311,7 @@ int pm860x_mic_jack_detect(struct snd_soc_codec *codec,
        pm860x->det.mic_det = det;
 
        if (det & SND_JACK_MICROPHONE)
-               pm860x_set_bits(codec->control_data, REG_MIC_DET,
+               pm860x_set_bits(pm860x->i2c, REG_MIC_DET,
                                MICDET_MASK, MICDET_MASK);
 
        /* sync status */
@@ -1366,7 +1327,7 @@ static int pm860x_probe(struct snd_soc_codec *codec)
 
        pm860x->codec = codec;
 
-       codec->control_data = pm860x->i2c;
+       codec->control_data = pm860x->regmap;
 
        for (i = 0; i < 4; i++) {
                ret = request_threaded_irq(pm860x->irq[i], NULL,
@@ -1380,14 +1341,6 @@ static int pm860x_probe(struct snd_soc_codec *codec)
 
        pm860x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 
-       ret = pm860x_bulk_read(codec->control_data, REG_CACHE_BASE,
-                              REG_CACHE_SIZE, codec->reg_cache);
-       if (ret < 0) {
-               dev_err(codec->dev, "Failed to fill register cache: %d\n",
-                       ret);
-               goto out;
-       }
-
        return 0;
 
 out:
@@ -1410,10 +1363,6 @@ static int pm860x_remove(struct snd_soc_codec *codec)
 static struct snd_soc_codec_driver soc_codec_dev_pm860x = {
        .probe          = pm860x_probe,
        .remove         = pm860x_remove,
-       .read           = pm860x_read_reg_cache,
-       .write          = pm860x_write_reg_cache,
-       .reg_cache_size = REG_CACHE_SIZE,
-       .reg_word_size  = sizeof(u8),
        .set_bias_level = pm860x_set_bias_level,
 
        .controls = pm860x_snd_controls,
@@ -1439,6 +1388,8 @@ static int pm860x_codec_probe(struct platform_device *pdev)
        pm860x->chip = chip;
        pm860x->i2c = (chip->id == CHIP_PM8607) ? chip->client
                        : chip->companion;
+       pm860x->regmap = (chip->id == CHIP_PM8607) ? chip->regmap
+                       : chip->regmap_companion;
        platform_set_drvdata(pdev, pm860x);
 
        for (i = 0; i < 4; i++) {
index 3364ba4a36071677087b53bce314f8019cdbcb4a..f7282f4f4a7909e3a35e4af6f74d735d90c2f167 100644 (file)
 #ifndef __88PM860X_H
 #define __88PM860X_H
 
-/* The offset of these registers are 0xb0 */
-#define PM860X_PCM_IFACE_1             0x00
-#define PM860X_PCM_IFACE_2             0x01
-#define PM860X_PCM_IFACE_3             0x02
-#define PM860X_PCM_RATE                        0x03
-#define PM860X_EC_PATH                 0x04
-#define PM860X_SIDETONE_L_GAIN         0x05
-#define PM860X_SIDETONE_R_GAIN         0x06
-#define PM860X_SIDETONE_SHIFT          0x07
-#define PM860X_ADC_OFFSET_1            0x08
-#define PM860X_ADC_OFFSET_2            0x09
-#define PM860X_DMIC_DELAY              0x0a
+#define PM860X_PCM_IFACE_1             0xb0
+#define PM860X_PCM_IFACE_2             0xb1
+#define PM860X_PCM_IFACE_3             0xb2
+#define PM860X_PCM_RATE                        0xb3
+#define PM860X_EC_PATH                 0xb4
+#define PM860X_SIDETONE_L_GAIN         0xb5
+#define PM860X_SIDETONE_R_GAIN         0xb6
+#define PM860X_SIDETONE_SHIFT          0xb7
+#define PM860X_ADC_OFFSET_1            0xb8
+#define PM860X_ADC_OFFSET_2            0xb9
+#define PM860X_DMIC_DELAY              0xba
 
-#define PM860X_I2S_IFACE_1             0x0b
-#define PM860X_I2S_IFACE_2             0x0c
-#define PM860X_I2S_IFACE_3             0x0d
-#define PM860X_I2S_IFACE_4             0x0e
-#define PM860X_EQUALIZER_N0_1          0x0f
-#define PM860X_EQUALIZER_N0_2          0x10
-#define PM860X_EQUALIZER_N1_1          0x11
-#define PM860X_EQUALIZER_N1_2          0x12
-#define PM860X_EQUALIZER_D1_1          0x13
-#define PM860X_EQUALIZER_D1_2          0x14
-#define PM860X_LOFI_GAIN_LEFT          0x15
-#define PM860X_LOFI_GAIN_RIGHT         0x16
-#define PM860X_HIFIL_GAIN_LEFT         0x17
-#define PM860X_HIFIL_GAIN_RIGHT                0x18
-#define PM860X_HIFIR_GAIN_LEFT         0x19
-#define PM860X_HIFIR_GAIN_RIGHT                0x1a
-#define PM860X_DAC_OFFSET              0x1b
-#define PM860X_OFFSET_LEFT_1           0x1c
-#define PM860X_OFFSET_LEFT_2           0x1d
-#define PM860X_OFFSET_RIGHT_1          0x1e
-#define PM860X_OFFSET_RIGHT_2          0x1f
-#define PM860X_ADC_ANA_1               0x20
-#define PM860X_ADC_ANA_2               0x21
-#define PM860X_ADC_ANA_3               0x22
-#define PM860X_ADC_ANA_4               0x23
-#define PM860X_ANA_TO_ANA              0x24
-#define PM860X_HS1_CTRL                        0x25
-#define PM860X_HS2_CTRL                        0x26
-#define PM860X_LO1_CTRL                        0x27
-#define PM860X_LO2_CTRL                        0x28
-#define PM860X_EAR_CTRL_1              0x29
-#define PM860X_EAR_CTRL_2              0x2a
-#define PM860X_AUDIO_SUPPLIES_1                0x2b
-#define PM860X_AUDIO_SUPPLIES_2                0x2c
-#define PM860X_ADC_EN_1                        0x2d
-#define PM860X_ADC_EN_2                        0x2e
-#define PM860X_DAC_EN_1                        0x2f
-#define PM860X_DAC_EN_2                        0x31
-#define PM860X_AUDIO_CAL_1             0x32
-#define PM860X_AUDIO_CAL_2             0x33
-#define PM860X_AUDIO_CAL_3             0x34
-#define PM860X_AUDIO_CAL_4             0x35
-#define PM860X_AUDIO_CAL_5             0x36
-#define PM860X_ANA_INPUT_SEL_1         0x37
-#define PM860X_ANA_INPUT_SEL_2         0x38
+#define PM860X_I2S_IFACE_1             0xbb
+#define PM860X_I2S_IFACE_2             0xbc
+#define PM860X_I2S_IFACE_3             0xbd
+#define PM860X_I2S_IFACE_4             0xbe
+#define PM860X_EQUALIZER_N0_1          0xbf
+#define PM860X_EQUALIZER_N0_2          0xc0
+#define PM860X_EQUALIZER_N1_1          0xc1
+#define PM860X_EQUALIZER_N1_2          0xc2
+#define PM860X_EQUALIZER_D1_1          0xc3
+#define PM860X_EQUALIZER_D1_2          0xc4
+#define PM860X_LOFI_GAIN_LEFT          0xc5
+#define PM860X_LOFI_GAIN_RIGHT         0xc6
+#define PM860X_HIFIL_GAIN_LEFT         0xc7
+#define PM860X_HIFIL_GAIN_RIGHT                0xc8
+#define PM860X_HIFIR_GAIN_LEFT         0xc9
+#define PM860X_HIFIR_GAIN_RIGHT                0xca
+#define PM860X_DAC_OFFSET              0xcb
+#define PM860X_OFFSET_LEFT_1           0xcc
+#define PM860X_OFFSET_LEFT_2           0xcd
+#define PM860X_OFFSET_RIGHT_1          0xce
+#define PM860X_OFFSET_RIGHT_2          0xcf
+#define PM860X_ADC_ANA_1               0xd0
+#define PM860X_ADC_ANA_2               0xd1
+#define PM860X_ADC_ANA_3               0xd2
+#define PM860X_ADC_ANA_4               0xd3
+#define PM860X_ANA_TO_ANA              0xd4
+#define PM860X_HS1_CTRL                        0xd5
+#define PM860X_HS2_CTRL                        0xd6
+#define PM860X_LO1_CTRL                        0xd7
+#define PM860X_LO2_CTRL                        0xd8
+#define PM860X_EAR_CTRL_1              0xd9
+#define PM860X_EAR_CTRL_2              0xda
+#define PM860X_AUDIO_SUPPLIES_1                0xdb
+#define PM860X_AUDIO_SUPPLIES_2                0xdc
+#define PM860X_ADC_EN_1                        0xdd
+#define PM860X_ADC_EN_2                        0xde
+#define PM860X_DAC_EN_1                        0xdf
+#define PM860X_DAC_EN_2                        0xe1
+#define PM860X_AUDIO_CAL_1             0xe2
+#define PM860X_AUDIO_CAL_2             0xe3
+#define PM860X_AUDIO_CAL_3             0xe4
+#define PM860X_AUDIO_CAL_4             0xe5
+#define PM860X_AUDIO_CAL_5             0xe6
+#define PM860X_ANA_INPUT_SEL_1         0xe7
+#define PM860X_ANA_INPUT_SEL_2         0xe8
 
-#define PM860X_PCM_IFACE_4             0x39
-#define PM860X_I2S_IFACE_5             0x3a
+#define PM860X_PCM_IFACE_4             0xe9
+#define PM860X_I2S_IFACE_5             0xea
 
 #define PM860X_SHORTS                  0x3b
 #define PM860X_PLL_ADJ_1               0x3c
index 80555d7551e68018d5e66485eea1507b10c35b92..a0394a8f2257174738555b5cbf3b5901d5d5fd5b 100644 (file)
@@ -126,6 +126,8 @@ struct ab8500_codec_drvdata_dbg {
 
 /* Private data for AB8500 device-driver */
 struct ab8500_codec_drvdata {
+       struct regmap *regmap;
+
        /* Sidetone */
        long *sid_fir_values;
        enum sid_state sid_status;
@@ -166,49 +168,35 @@ static inline const char *amic_type_str(enum amic_type type)
  */
 
 /* Read a register from the audio-bank of AB8500 */
-static unsigned int ab8500_codec_read_reg(struct snd_soc_codec *codec,
-                                       unsigned int reg)
+static int ab8500_codec_read_reg(void *context, unsigned int reg,
+                                unsigned int *value)
 {
+       struct device *dev = context;
        int status;
-       unsigned int value = 0;
 
        u8 value8;
-       status = abx500_get_register_interruptible(codec->dev, AB8500_AUDIO,
-                                               reg, &value8);
-       if (status < 0) {
-               dev_err(codec->dev,
-                       "%s: ERROR: Register (0x%02x:0x%02x) read failed (%d).\n",
-                       __func__, (u8)AB8500_AUDIO, (u8)reg, status);
-       } else {
-               dev_dbg(codec->dev,
-                       "%s: Read 0x%02x from register 0x%02x:0x%02x\n",
-                       __func__, value8, (u8)AB8500_AUDIO, (u8)reg);
-               value = (unsigned int)value8;
-       }
+       status = abx500_get_register_interruptible(dev, AB8500_AUDIO,
+                                                  reg, &value8);
+       *value = (unsigned int)value8;
 
-       return value;
+       return status;
 }
 
 /* Write to a register in the audio-bank of AB8500 */
-static int ab8500_codec_write_reg(struct snd_soc_codec *codec,
-                               unsigned int reg, unsigned int value)
+static int ab8500_codec_write_reg(void *context, unsigned int reg,
+                                 unsigned int value)
 {
-       int status;
-
-       status = abx500_set_register_interruptible(codec->dev, AB8500_AUDIO,
-                                               reg, value);
-       if (status < 0)
-               dev_err(codec->dev,
-                       "%s: ERROR: Register (%02x:%02x) write failed (%d).\n",
-                       __func__, (u8)AB8500_AUDIO, (u8)reg, status);
-       else
-               dev_dbg(codec->dev,
-                       "%s: Wrote 0x%02x into register %02x:%02x\n",
-                       __func__, (u8)value, (u8)AB8500_AUDIO, (u8)reg);
+       struct device *dev = context;
 
-       return status;
+       return abx500_set_register_interruptible(dev, AB8500_AUDIO,
+                                                reg, value);
 }
 
+static const struct regmap_config ab8500_codec_regmap = {
+       .reg_read = ab8500_codec_read_reg,
+       .reg_write = ab8500_codec_write_reg,
+};
+
 /*
  * Controls - DAPM
  */
@@ -2485,9 +2473,13 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec)
 
        dev_dbg(dev, "%s: Enter.\n", __func__);
 
+       snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
+
        /* Setup AB8500 according to board-settings */
        pdata = dev_get_platdata(dev->parent);
 
+       codec->control_data = drvdata->regmap;
+
        if (np) {
                if (!pdata)
                        pdata = devm_kzalloc(dev,
@@ -2532,12 +2524,10 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec)
        }
 
        /* Override HW-defaults */
-       ab8500_codec_write_reg(codec,
-                               AB8500_ANACONF5,
-                               BIT(AB8500_ANACONF5_HSAUTOEN));
-       ab8500_codec_write_reg(codec,
-                               AB8500_SHORTCIRCONF,
-                               BIT(AB8500_SHORTCIRCONF_HSZCDDIS));
+       snd_soc_write(codec, AB8500_ANACONF5,
+                     BIT(AB8500_ANACONF5_HSAUTOEN));
+       snd_soc_write(codec, AB8500_SHORTCIRCONF,
+                     BIT(AB8500_SHORTCIRCONF_HSZCDDIS));
 
        /* Add filter controls */
        status = snd_soc_add_codec_controls(codec, ab8500_filter_controls,
@@ -2567,9 +2557,6 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec)
 
 static struct snd_soc_codec_driver ab8500_codec_driver = {
        .probe =                ab8500_codec_probe,
-       .read =                 ab8500_codec_read_reg,
-       .write =                ab8500_codec_write_reg,
-       .reg_word_size =        sizeof(u8),
        .controls =             ab8500_ctrls,
        .num_controls =         ARRAY_SIZE(ab8500_ctrls),
        .dapm_widgets =         ab8500_dapm_widgets,
@@ -2592,6 +2579,15 @@ static int ab8500_codec_driver_probe(struct platform_device *pdev)
        drvdata->anc_status = ANC_UNCONFIGURED;
        dev_set_drvdata(&pdev->dev, drvdata);
 
+       drvdata->regmap = devm_regmap_init(&pdev->dev, NULL, &pdev->dev,
+                                          &ab8500_codec_regmap);
+       if (IS_ERR(drvdata->regmap)) {
+               status = PTR_ERR(drvdata->regmap);
+               dev_err(&pdev->dev, "%s: Failed to allocate regmap: %d\n",
+                       __func__, status);
+               return status;
+       }
+
        dev_dbg(&pdev->dev, "%s: Register codec.\n", __func__);
        status = snd_soc_register_codec(&pdev->dev, &ab8500_codec_driver,
                                ab8500_codec_dai,
@@ -2606,7 +2602,7 @@ static int ab8500_codec_driver_probe(struct platform_device *pdev)
 
 static int ab8500_codec_driver_remove(struct platform_device *pdev)
 {
-       dev_info(&pdev->dev, "%s Enter.\n", __func__);
+       dev_dbg(&pdev->dev, "%s Enter.\n", __func__);
 
        snd_soc_unregister_codec(&pdev->dev);
 
index 1aa10ddf3a614ab73085d883daa06638bfdfb77c..59654b1e7f3fbcd1262dca4c1066e505980aee2b 100644 (file)
@@ -32,6 +32,7 @@ struct adau1373_dai {
 };
 
 struct adau1373 {
+       struct regmap *regmap;
        struct adau1373_dai dais[3];
 };
 
@@ -73,7 +74,6 @@ struct adau1373 {
 #define ADAU1373_PLL_CTRL4(x)  (0x2c + (x) * 7)
 #define ADAU1373_PLL_CTRL5(x)  (0x2d + (x) * 7)
 #define ADAU1373_PLL_CTRL6(x)  (0x2e + (x) * 7)
-#define ADAU1373_PLL_CTRL7(x)  (0x2f + (x) * 7)
 #define ADAU1373_HEADDECT      0x36
 #define ADAU1373_ADC_DAC_STATUS        0x37
 #define ADAU1373_ADC_CTRL      0x3c
@@ -152,37 +152,172 @@ struct adau1373 {
 #define ADAU1373_EP_CTRL_MICBIAS1_OFFSET 4
 #define ADAU1373_EP_CTRL_MICBIAS2_OFFSET 2
 
-static const uint8_t adau1373_default_regs[] = {
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00 */
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10 */
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20 */
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, /* 0x30 */
-       0x00, 0x00, 0x00, 0x80, 0x00, 0x01, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x0a, 0x0a, 0x0a, 0x00, /* 0x40 */
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, /* 0x50 */
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70 */
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x78, 0x18, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, /* 0x80 */
-       0x00, 0xc0, 0x88, 0x7a, 0xdf, 0x20, 0x00, 0x00,
-       0x78, 0x18, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, /* 0x90 */
-       0x00, 0xc0, 0x88, 0x7a, 0xdf, 0x20, 0x00, 0x00,
-       0x78, 0x18, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, /* 0xa0 */
-       0x00, 0xc0, 0x88, 0x7a, 0xdf, 0x20, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0 */
-       0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0 */
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, /* 0xe0 */
-       0x00, 0x1f, 0x0f, 0x00, 0x00,
+static const struct reg_default adau1373_reg_defaults[] = {
+       { ADAU1373_INPUT_MODE,          0x00 },
+       { ADAU1373_AINL_CTRL(0),        0x00 },
+       { ADAU1373_AINR_CTRL(0),        0x00 },
+       { ADAU1373_AINL_CTRL(1),        0x00 },
+       { ADAU1373_AINR_CTRL(1),        0x00 },
+       { ADAU1373_AINL_CTRL(2),        0x00 },
+       { ADAU1373_AINR_CTRL(2),        0x00 },
+       { ADAU1373_AINL_CTRL(3),        0x00 },
+       { ADAU1373_AINR_CTRL(3),        0x00 },
+       { ADAU1373_LLINE_OUT(0),        0x00 },
+       { ADAU1373_RLINE_OUT(0),        0x00 },
+       { ADAU1373_LLINE_OUT(1),        0x00 },
+       { ADAU1373_RLINE_OUT(1),        0x00 },
+       { ADAU1373_LSPK_OUT,            0x00 },
+       { ADAU1373_RSPK_OUT,            0x00 },
+       { ADAU1373_LHP_OUT,             0x00 },
+       { ADAU1373_RHP_OUT,             0x00 },
+       { ADAU1373_ADC_GAIN,            0x00 },
+       { ADAU1373_LADC_MIXER,          0x00 },
+       { ADAU1373_RADC_MIXER,          0x00 },
+       { ADAU1373_LLINE1_MIX,          0x00 },
+       { ADAU1373_RLINE1_MIX,          0x00 },
+       { ADAU1373_LLINE2_MIX,          0x00 },
+       { ADAU1373_RLINE2_MIX,          0x00 },
+       { ADAU1373_LSPK_MIX,            0x00 },
+       { ADAU1373_RSPK_MIX,            0x00 },
+       { ADAU1373_LHP_MIX,             0x00 },
+       { ADAU1373_RHP_MIX,             0x00 },
+       { ADAU1373_EP_MIX,              0x00 },
+       { ADAU1373_HP_CTRL,             0x00 },
+       { ADAU1373_HP_CTRL2,            0x00 },
+       { ADAU1373_LS_CTRL,             0x00 },
+       { ADAU1373_EP_CTRL,             0x00 },
+       { ADAU1373_MICBIAS_CTRL1,       0x00 },
+       { ADAU1373_MICBIAS_CTRL2,       0x00 },
+       { ADAU1373_OUTPUT_CTRL,         0x00 },
+       { ADAU1373_PWDN_CTRL1,          0x00 },
+       { ADAU1373_PWDN_CTRL2,          0x00 },
+       { ADAU1373_PWDN_CTRL3,          0x00 },
+       { ADAU1373_DPLL_CTRL(0),        0x00 },
+       { ADAU1373_PLL_CTRL1(0),        0x00 },
+       { ADAU1373_PLL_CTRL2(0),        0x00 },
+       { ADAU1373_PLL_CTRL3(0),        0x00 },
+       { ADAU1373_PLL_CTRL4(0),        0x00 },
+       { ADAU1373_PLL_CTRL5(0),        0x00 },
+       { ADAU1373_PLL_CTRL6(0),        0x02 },
+       { ADAU1373_DPLL_CTRL(1),        0x00 },
+       { ADAU1373_PLL_CTRL1(1),        0x00 },
+       { ADAU1373_PLL_CTRL2(1),        0x00 },
+       { ADAU1373_PLL_CTRL3(1),        0x00 },
+       { ADAU1373_PLL_CTRL4(1),        0x00 },
+       { ADAU1373_PLL_CTRL5(1),        0x00 },
+       { ADAU1373_PLL_CTRL6(1),        0x02 },
+       { ADAU1373_HEADDECT,            0x00 },
+       { ADAU1373_ADC_CTRL,            0x00 },
+       { ADAU1373_CLK_SRC_DIV(0),      0x00 },
+       { ADAU1373_CLK_SRC_DIV(1),      0x00 },
+       { ADAU1373_DAI(0),              0x0a },
+       { ADAU1373_DAI(1),              0x0a },
+       { ADAU1373_DAI(2),              0x0a },
+       { ADAU1373_BCLKDIV(0),          0x00 },
+       { ADAU1373_BCLKDIV(1),          0x00 },
+       { ADAU1373_BCLKDIV(2),          0x00 },
+       { ADAU1373_SRC_RATIOA(0),       0x00 },
+       { ADAU1373_SRC_RATIOB(0),       0x00 },
+       { ADAU1373_SRC_RATIOA(1),       0x00 },
+       { ADAU1373_SRC_RATIOB(1),       0x00 },
+       { ADAU1373_SRC_RATIOA(2),       0x00 },
+       { ADAU1373_SRC_RATIOB(2),       0x00 },
+       { ADAU1373_DEEMP_CTRL,          0x00 },
+       { ADAU1373_SRC_DAI_CTRL(0),     0x08 },
+       { ADAU1373_SRC_DAI_CTRL(1),     0x08 },
+       { ADAU1373_SRC_DAI_CTRL(2),     0x08 },
+       { ADAU1373_DIN_MIX_CTRL(0),     0x00 },
+       { ADAU1373_DIN_MIX_CTRL(1),     0x00 },
+       { ADAU1373_DIN_MIX_CTRL(2),     0x00 },
+       { ADAU1373_DIN_MIX_CTRL(3),     0x00 },
+       { ADAU1373_DIN_MIX_CTRL(4),     0x00 },
+       { ADAU1373_DOUT_MIX_CTRL(0),    0x00 },
+       { ADAU1373_DOUT_MIX_CTRL(1),    0x00 },
+       { ADAU1373_DOUT_MIX_CTRL(2),    0x00 },
+       { ADAU1373_DOUT_MIX_CTRL(3),    0x00 },
+       { ADAU1373_DOUT_MIX_CTRL(4),    0x00 },
+       { ADAU1373_DAI_PBL_VOL(0),      0x00 },
+       { ADAU1373_DAI_PBR_VOL(0),      0x00 },
+       { ADAU1373_DAI_PBL_VOL(1),      0x00 },
+       { ADAU1373_DAI_PBR_VOL(1),      0x00 },
+       { ADAU1373_DAI_PBL_VOL(2),      0x00 },
+       { ADAU1373_DAI_PBR_VOL(2),      0x00 },
+       { ADAU1373_DAI_RECL_VOL(0),     0x00 },
+       { ADAU1373_DAI_RECR_VOL(0),     0x00 },
+       { ADAU1373_DAI_RECL_VOL(1),     0x00 },
+       { ADAU1373_DAI_RECR_VOL(1),     0x00 },
+       { ADAU1373_DAI_RECL_VOL(2),     0x00 },
+       { ADAU1373_DAI_RECR_VOL(2),     0x00 },
+       { ADAU1373_DAC1_PBL_VOL,        0x00 },
+       { ADAU1373_DAC1_PBR_VOL,        0x00 },
+       { ADAU1373_DAC2_PBL_VOL,        0x00 },
+       { ADAU1373_DAC2_PBR_VOL,        0x00 },
+       { ADAU1373_ADC_RECL_VOL,        0x00 },
+       { ADAU1373_ADC_RECR_VOL,        0x00 },
+       { ADAU1373_DMIC_RECL_VOL,       0x00 },
+       { ADAU1373_DMIC_RECR_VOL,       0x00 },
+       { ADAU1373_VOL_GAIN1,           0x00 },
+       { ADAU1373_VOL_GAIN2,           0x00 },
+       { ADAU1373_VOL_GAIN3,           0x00 },
+       { ADAU1373_HPF_CTRL,            0x00 },
+       { ADAU1373_BASS1,               0x00 },
+       { ADAU1373_BASS2,               0x00 },
+       { ADAU1373_DRC(0) + 0x0,        0x78 },
+       { ADAU1373_DRC(0) + 0x1,        0x18 },
+       { ADAU1373_DRC(0) + 0x2,        0x00 },
+       { ADAU1373_DRC(0) + 0x3,        0x00 },
+       { ADAU1373_DRC(0) + 0x4,        0x00 },
+       { ADAU1373_DRC(0) + 0x5,        0xc0 },
+       { ADAU1373_DRC(0) + 0x6,        0x00 },
+       { ADAU1373_DRC(0) + 0x7,        0x00 },
+       { ADAU1373_DRC(0) + 0x8,        0x00 },
+       { ADAU1373_DRC(0) + 0x9,        0xc0 },
+       { ADAU1373_DRC(0) + 0xa,        0x88 },
+       { ADAU1373_DRC(0) + 0xb,        0x7a },
+       { ADAU1373_DRC(0) + 0xc,        0xdf },
+       { ADAU1373_DRC(0) + 0xd,        0x20 },
+       { ADAU1373_DRC(0) + 0xe,        0x00 },
+       { ADAU1373_DRC(0) + 0xf,        0x00 },
+       { ADAU1373_DRC(1) + 0x0,        0x78 },
+       { ADAU1373_DRC(1) + 0x1,        0x18 },
+       { ADAU1373_DRC(1) + 0x2,        0x00 },
+       { ADAU1373_DRC(1) + 0x3,        0x00 },
+       { ADAU1373_DRC(1) + 0x4,        0x00 },
+       { ADAU1373_DRC(1) + 0x5,        0xc0 },
+       { ADAU1373_DRC(1) + 0x6,        0x00 },
+       { ADAU1373_DRC(1) + 0x7,        0x00 },
+       { ADAU1373_DRC(1) + 0x8,        0x00 },
+       { ADAU1373_DRC(1) + 0x9,        0xc0 },
+       { ADAU1373_DRC(1) + 0xa,        0x88 },
+       { ADAU1373_DRC(1) + 0xb,        0x7a },
+       { ADAU1373_DRC(1) + 0xc,        0xdf },
+       { ADAU1373_DRC(1) + 0xd,        0x20 },
+       { ADAU1373_DRC(1) + 0xe,        0x00 },
+       { ADAU1373_DRC(1) + 0xf,        0x00 },
+       { ADAU1373_DRC(2) + 0x0,        0x78 },
+       { ADAU1373_DRC(2) + 0x1,        0x18 },
+       { ADAU1373_DRC(2) + 0x2,        0x00 },
+       { ADAU1373_DRC(2) + 0x3,        0x00 },
+       { ADAU1373_DRC(2) + 0x4,        0x00 },
+       { ADAU1373_DRC(2) + 0x5,        0xc0 },
+       { ADAU1373_DRC(2) + 0x6,        0x00 },
+       { ADAU1373_DRC(2) + 0x7,        0x00 },
+       { ADAU1373_DRC(2) + 0x8,        0x00 },
+       { ADAU1373_DRC(2) + 0x9,        0xc0 },
+       { ADAU1373_DRC(2) + 0xa,        0x88 },
+       { ADAU1373_DRC(2) + 0xb,        0x7a },
+       { ADAU1373_DRC(2) + 0xc,        0xdf },
+       { ADAU1373_DRC(2) + 0xd,        0x20 },
+       { ADAU1373_DRC(2) + 0xe,        0x00 },
+       { ADAU1373_DRC(2) + 0xf,        0x00 },
+       { ADAU1373_3D_CTRL1,            0x00 },
+       { ADAU1373_3D_CTRL2,            0x00 },
+       { ADAU1373_FDSP_SEL1,           0x00 },
+       { ADAU1373_FDSP_SEL2,           0x00 },
+       { ADAU1373_FDSP_SEL2,           0x00 },
+       { ADAU1373_FDSP_SEL4,           0x00 },
+       { ADAU1373_DIGMICCTRL,          0x00 },
+       { ADAU1373_DIGEN,               0x00 },
 };
 
 static const unsigned int adau1373_out_tlv[] = {
@@ -418,6 +553,7 @@ static int adau1373_pll_event(struct snd_soc_dapm_widget *w,
        struct snd_kcontrol *kcontrol, int event)
 {
        struct snd_soc_codec *codec = w->codec;
+       struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
        unsigned int pll_id = w->name[3] - '1';
        unsigned int val;
 
@@ -426,7 +562,7 @@ static int adau1373_pll_event(struct snd_soc_dapm_widget *w,
        else
                val = 0;
 
-       snd_soc_update_bits(codec, ADAU1373_PLL_CTRL6(pll_id),
+       regmap_update_bits(adau1373->regmap, ADAU1373_PLL_CTRL6(pll_id),
                ADAU1373_PLL_CTRL6_PLL_EN, val);
 
        if (SND_SOC_DAPM_EVENT_ON(event))
@@ -938,7 +1074,7 @@ static int adau1373_hw_params(struct snd_pcm_substream *substream,
 
        adau1373_dai->enable_src = (div != 0);
 
-       snd_soc_update_bits(codec, ADAU1373_BCLKDIV(dai->id),
+       regmap_update_bits(adau1373->regmap, ADAU1373_BCLKDIV(dai->id),
                ADAU1373_BCLKDIV_SR_MASK | ADAU1373_BCLKDIV_BCLK_MASK,
                (div << 2) | ADAU1373_BCLKDIV_64);
 
@@ -959,7 +1095,7 @@ static int adau1373_hw_params(struct snd_pcm_substream *substream,
                return -EINVAL;
        }
 
-       return snd_soc_update_bits(codec, ADAU1373_DAI(dai->id),
+       return regmap_update_bits(adau1373->regmap, ADAU1373_DAI(dai->id),
                        ADAU1373_DAI_WLEN_MASK, ctrl);
 }
 
@@ -1016,7 +1152,7 @@ static int adau1373_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
                return -EINVAL;
        }
 
-       snd_soc_update_bits(codec, ADAU1373_DAI(dai->id),
+       regmap_update_bits(adau1373->regmap, ADAU1373_DAI(dai->id),
                ~ADAU1373_DAI_WLEN_MASK, ctrl);
 
        return 0;
@@ -1039,7 +1175,7 @@ static int adau1373_set_dai_sysclk(struct snd_soc_dai *dai,
        adau1373_dai->sysclk = freq;
        adau1373_dai->clk_src = clk_id;
 
-       snd_soc_update_bits(dai->codec, ADAU1373_BCLKDIV(dai->id),
+       regmap_update_bits(adau1373->regmap, ADAU1373_BCLKDIV(dai->id),
                ADAU1373_BCLKDIV_SOURCE, clk_id << 5);
 
        return 0;
@@ -1120,6 +1256,7 @@ static struct snd_soc_dai_driver adau1373_dai_driver[] = {
 static int adau1373_set_pll(struct snd_soc_codec *codec, int pll_id,
        int source, unsigned int freq_in, unsigned int freq_out)
 {
+       struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
        unsigned int dpll_div = 0;
        unsigned int x, r, n, m, i, j, mode;
 
@@ -1187,36 +1324,36 @@ static int adau1373_set_pll(struct snd_soc_codec *codec, int pll_id,
 
        if (dpll_div) {
                dpll_div = 11 - dpll_div;
-               snd_soc_update_bits(codec, ADAU1373_PLL_CTRL6(pll_id),
+               regmap_update_bits(adau1373->regmap, ADAU1373_PLL_CTRL6(pll_id),
                        ADAU1373_PLL_CTRL6_DPLL_BYPASS, 0);
        } else {
-               snd_soc_update_bits(codec, ADAU1373_PLL_CTRL6(pll_id),
+               regmap_update_bits(adau1373->regmap, ADAU1373_PLL_CTRL6(pll_id),
                        ADAU1373_PLL_CTRL6_DPLL_BYPASS,
                        ADAU1373_PLL_CTRL6_DPLL_BYPASS);
        }
 
-       snd_soc_write(codec, ADAU1373_DPLL_CTRL(pll_id),
+       regmap_write(adau1373->regmap, ADAU1373_DPLL_CTRL(pll_id),
                (source << 4) | dpll_div);
-       snd_soc_write(codec, ADAU1373_PLL_CTRL1(pll_id), (m >> 8) & 0xff);
-       snd_soc_write(codec, ADAU1373_PLL_CTRL2(pll_id), m & 0xff);
-       snd_soc_write(codec, ADAU1373_PLL_CTRL3(pll_id), (n >> 8) & 0xff);
-       snd_soc_write(codec, ADAU1373_PLL_CTRL4(pll_id), n & 0xff);
-       snd_soc_write(codec, ADAU1373_PLL_CTRL5(pll_id),
+       regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL1(pll_id), (m >> 8) & 0xff);
+       regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL2(pll_id), m & 0xff);
+       regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL3(pll_id), (n >> 8) & 0xff);
+       regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL4(pll_id), n & 0xff);
+       regmap_write(adau1373->regmap, ADAU1373_PLL_CTRL5(pll_id),
                (r << 3) | (x << 1) | mode);
 
        /* Set sysclk to pll_rate / 4 */
-       snd_soc_update_bits(codec, ADAU1373_CLK_SRC_DIV(pll_id), 0x3f, 0x09);
+       regmap_update_bits(adau1373->regmap, ADAU1373_CLK_SRC_DIV(pll_id), 0x3f, 0x09);
 
        return 0;
 }
 
-static void adau1373_load_drc_settings(struct snd_soc_codec *codec,
+static void adau1373_load_drc_settings(struct adau1373 *adau1373,
        unsigned int nr, uint8_t *drc)
 {
        unsigned int i;
 
        for (i = 0; i < ADAU1373_DRC_SIZE; ++i)
-               snd_soc_write(codec, ADAU1373_DRC(nr) + i, drc[i]);
+               regmap_write(adau1373->regmap, ADAU1373_DRC(nr) + i, drc[i]);
 }
 
 static bool adau1373_valid_micbias(enum adau1373_micbias_voltage micbias)
@@ -1235,13 +1372,14 @@ static bool adau1373_valid_micbias(enum adau1373_micbias_voltage micbias)
 
 static int adau1373_probe(struct snd_soc_codec *codec)
 {
+       struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
        struct adau1373_platform_data *pdata = codec->dev->platform_data;
        bool lineout_differential = false;
        unsigned int val;
        int ret;
        int i;
 
-       ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C);
+       ret = snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
        if (ret) {
                dev_err(codec->dev, "failed to set cache I/O: %d\n", ret);
                return ret;
@@ -1256,7 +1394,7 @@ static int adau1373_probe(struct snd_soc_codec *codec)
                        return -EINVAL;
 
                for (i = 0; i < pdata->num_drc; ++i) {
-                       adau1373_load_drc_settings(codec, i,
+                       adau1373_load_drc_settings(adau1373, i,
                                pdata->drc_setting[i]);
                }
 
@@ -1268,18 +1406,18 @@ static int adau1373_probe(struct snd_soc_codec *codec)
                        if (pdata->input_differential[i])
                                val |= BIT(i);
                }
-               snd_soc_write(codec, ADAU1373_INPUT_MODE, val);
+               regmap_write(adau1373->regmap, ADAU1373_INPUT_MODE, val);
 
                val = 0;
                if (pdata->lineout_differential)
                        val |= ADAU1373_OUTPUT_CTRL_LDIFF;
                if (pdata->lineout_ground_sense)
                        val |= ADAU1373_OUTPUT_CTRL_LNFBEN;
-               snd_soc_write(codec, ADAU1373_OUTPUT_CTRL, val);
+               regmap_write(adau1373->regmap, ADAU1373_OUTPUT_CTRL, val);
 
                lineout_differential = pdata->lineout_differential;
 
-               snd_soc_write(codec, ADAU1373_EP_CTRL,
+               regmap_write(adau1373->regmap, ADAU1373_EP_CTRL,
                        (pdata->micbias1 << ADAU1373_EP_CTRL_MICBIAS1_OFFSET) |
                        (pdata->micbias2 << ADAU1373_EP_CTRL_MICBIAS2_OFFSET));
        }
@@ -1289,7 +1427,7 @@ static int adau1373_probe(struct snd_soc_codec *codec)
                        ARRAY_SIZE(adau1373_lineout2_controls));
        }
 
-       snd_soc_write(codec, ADAU1373_ADC_CTRL,
+       regmap_write(adau1373->regmap, ADAU1373_ADC_CTRL,
            ADAU1373_ADC_CTRL_RESET_FORCE | ADAU1373_ADC_CTRL_PEAK_DETECT);
 
        return 0;
@@ -1298,17 +1436,19 @@ static int adau1373_probe(struct snd_soc_codec *codec)
 static int adau1373_set_bias_level(struct snd_soc_codec *codec,
        enum snd_soc_bias_level level)
 {
+       struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
+
        switch (level) {
        case SND_SOC_BIAS_ON:
                break;
        case SND_SOC_BIAS_PREPARE:
                break;
        case SND_SOC_BIAS_STANDBY:
-               snd_soc_update_bits(codec, ADAU1373_PWDN_CTRL3,
+               regmap_update_bits(adau1373->regmap, ADAU1373_PWDN_CTRL3,
                        ADAU1373_PWDN_CTRL3_PWR_EN, ADAU1373_PWDN_CTRL3_PWR_EN);
                break;
        case SND_SOC_BIAS_OFF:
-               snd_soc_update_bits(codec, ADAU1373_PWDN_CTRL3,
+               regmap_update_bits(adau1373->regmap, ADAU1373_PWDN_CTRL3,
                        ADAU1373_PWDN_CTRL3_PWR_EN, 0);
                break;
        }
@@ -1324,17 +1464,49 @@ static int adau1373_remove(struct snd_soc_codec *codec)
 
 static int adau1373_suspend(struct snd_soc_codec *codec)
 {
-       return adau1373_set_bias_level(codec, SND_SOC_BIAS_OFF);
+       struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
+       int ret;
+
+       ret = adau1373_set_bias_level(codec, SND_SOC_BIAS_OFF);
+       regcache_cache_only(adau1373->regmap, true);
+
+       return ret;
 }
 
 static int adau1373_resume(struct snd_soc_codec *codec)
 {
+       struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
+
+       regcache_cache_only(adau1373->regmap, false);
        adau1373_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-       snd_soc_cache_sync(codec);
+       regcache_sync(adau1373->regmap);
 
        return 0;
 }
 
+static bool adau1373_register_volatile(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case ADAU1373_SOFT_RESET:
+       case ADAU1373_ADC_DAC_STATUS:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static const struct regmap_config adau1373_regmap_config = {
+       .val_bits = 8,
+       .reg_bits = 8,
+
+       .volatile_reg = adau1373_register_volatile,
+       .max_register = ADAU1373_SOFT_RESET,
+
+       .cache_type = REGCACHE_RBTREE,
+       .reg_defaults = adau1373_reg_defaults,
+       .num_reg_defaults = ARRAY_SIZE(adau1373_reg_defaults),
+};
+
 static struct snd_soc_codec_driver adau1373_codec_driver = {
        .probe =        adau1373_probe,
        .remove =       adau1373_remove,
@@ -1342,9 +1514,6 @@ static struct snd_soc_codec_driver adau1373_codec_driver = {
        .resume =       adau1373_resume,
        .set_bias_level = adau1373_set_bias_level,
        .idle_bias_off = true,
-       .reg_cache_size = ARRAY_SIZE(adau1373_default_regs),
-       .reg_cache_default = adau1373_default_regs,
-       .reg_word_size = sizeof(uint8_t),
 
        .set_pll = adau1373_set_pll,
 
@@ -1366,6 +1535,13 @@ static int adau1373_i2c_probe(struct i2c_client *client,
        if (!adau1373)
                return -ENOMEM;
 
+       adau1373->regmap = devm_regmap_init_i2c(client,
+               &adau1373_regmap_config);
+       if (IS_ERR(adau1373->regmap))
+               return PTR_ERR(adau1373->regmap);
+
+       regmap_write(adau1373->regmap, ADAU1373_SOFT_RESET, 0x00);
+
        dev_set_drvdata(&client->dev, adau1373);
 
        ret = snd_soc_register_codec(&client->dev, &adau1373_codec_driver,
index 15b012d0f226c48a4178b1c9f4d10468708dcd1d..14a7c169d004ebf97fabbbccfd8df0434327c639 100644 (file)
 
 #define ADAV80X_PLL_OUTE_SYSCLKPD(x)           BIT(2 - (x))
 
-static u8 adav80x_default_regs[] = {
-       0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x02, 0x01, 0x80, 0x26, 0x00, 0x00,
-       0x02, 0x40, 0x20, 0x00, 0x09, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x04, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd1, 0x92, 0xb1, 0x37,
-       0x48, 0xd2, 0xfb, 0xca, 0xd2, 0x15, 0xe8, 0x29, 0xb9, 0x6a, 0xda, 0x2b,
-       0xb7, 0xc0, 0x11, 0x65, 0x5c, 0xf6, 0xff, 0x8d, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa5, 0x00, 0x00,
-       0x00, 0xe8, 0x46, 0xe1, 0x5b, 0xd3, 0x43, 0x77, 0x93, 0xa7, 0x44, 0xee,
-       0x32, 0x12, 0xc0, 0x11, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3f, 0x3f,
-       0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x1d, 0x00, 0x00, 0x00, 0x00,
-       0x00, 0x00, 0x00, 0x00, 0x52, 0x00,
+static struct reg_default adav80x_reg_defaults[] = {
+       { ADAV80X_PLAYBACK_CTRL,        0x01 },
+       { ADAV80X_AUX_IN_CTRL,          0x01 },
+       { ADAV80X_REC_CTRL,             0x02 },
+       { ADAV80X_AUX_OUT_CTRL,         0x01 },
+       { ADAV80X_DPATH_CTRL1,          0xc0 },
+       { ADAV80X_DPATH_CTRL2,          0x11 },
+       { ADAV80X_DAC_CTRL1,            0x00 },
+       { ADAV80X_DAC_CTRL2,            0x00 },
+       { ADAV80X_DAC_CTRL3,            0x00 },
+       { ADAV80X_DAC_L_VOL,            0xff },
+       { ADAV80X_DAC_R_VOL,            0xff },
+       { ADAV80X_PGA_L_VOL,            0x00 },
+       { ADAV80X_PGA_R_VOL,            0x00 },
+       { ADAV80X_ADC_CTRL1,            0x00 },
+       { ADAV80X_ADC_CTRL2,            0x00 },
+       { ADAV80X_ADC_L_VOL,            0xff },
+       { ADAV80X_ADC_R_VOL,            0xff },
+       { ADAV80X_PLL_CTRL1,            0x00 },
+       { ADAV80X_PLL_CTRL2,            0x00 },
+       { ADAV80X_ICLK_CTRL1,           0x00 },
+       { ADAV80X_ICLK_CTRL2,           0x00 },
+       { ADAV80X_PLL_CLK_SRC,          0x00 },
+       { ADAV80X_PLL_OUTE,             0x00 },
 };
 
 struct adav80x {
-       enum snd_soc_control_type control_type;
+       struct regmap *regmap;
 
        enum adav80x_clk_src clk_src;
        unsigned int sysclk;
@@ -298,7 +310,7 @@ static int adav80x_set_deemph(struct snd_soc_codec *codec)
                val = ADAV80X_DAC_CTRL2_DEEMPH_NONE;
        }
 
-       return snd_soc_update_bits(codec, ADAV80X_DAC_CTRL2,
+       return regmap_update_bits(adav80x->regmap, ADAV80X_DAC_CTRL2,
                ADAV80X_DAC_CTRL2_DEEMPH_MASK, val);
 }
 
@@ -394,10 +406,11 @@ static int adav80x_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
                return -EINVAL;
        }
 
-       snd_soc_update_bits(codec, adav80x_port_ctrl_regs[dai->id][0],
+       regmap_update_bits(adav80x->regmap, adav80x_port_ctrl_regs[dai->id][0],
                ADAV80X_CAPTURE_MODE_MASK | ADAV80X_CAPTURE_MODE_MASTER,
                capture);
-       snd_soc_write(codec, adav80x_port_ctrl_regs[dai->id][1], playback);
+       regmap_write(adav80x->regmap, adav80x_port_ctrl_regs[dai->id][1],
+               playback);
 
        adav80x->dai_fmt[dai->id] = fmt & SND_SOC_DAIFMT_FORMAT_MASK;
 
@@ -407,6 +420,7 @@ static int adav80x_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 static int adav80x_set_adc_clock(struct snd_soc_codec *codec,
                unsigned int sample_rate)
 {
+       struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
        unsigned int val;
 
        if (sample_rate <= 48000)
@@ -414,7 +428,7 @@ static int adav80x_set_adc_clock(struct snd_soc_codec *codec,
        else
                val = ADAV80X_ADC_CTRL1_MODULATOR_64FS;
 
-       snd_soc_update_bits(codec, ADAV80X_ADC_CTRL1,
+       regmap_update_bits(adav80x->regmap, ADAV80X_ADC_CTRL1,
                ADAV80X_ADC_CTRL1_MODULATOR_MASK, val);
 
        return 0;
@@ -423,6 +437,7 @@ static int adav80x_set_adc_clock(struct snd_soc_codec *codec,
 static int adav80x_set_dac_clock(struct snd_soc_codec *codec,
                unsigned int sample_rate)
 {
+       struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
        unsigned int val;
 
        if (sample_rate <= 48000)
@@ -430,7 +445,7 @@ static int adav80x_set_dac_clock(struct snd_soc_codec *codec,
        else
                val = ADAV80X_DAC_CTRL2_DIV2 | ADAV80X_DAC_CTRL2_INTERPOL_128FS;
 
-       snd_soc_update_bits(codec, ADAV80X_DAC_CTRL2,
+       regmap_update_bits(adav80x->regmap, ADAV80X_DAC_CTRL2,
                ADAV80X_DAC_CTRL2_DIV_MASK | ADAV80X_DAC_CTRL2_INTERPOL_MASK,
                val);
 
@@ -440,6 +455,7 @@ static int adav80x_set_dac_clock(struct snd_soc_codec *codec,
 static int adav80x_set_capture_pcm_format(struct snd_soc_codec *codec,
                struct snd_soc_dai *dai, snd_pcm_format_t format)
 {
+       struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
        unsigned int val;
 
        switch (format) {
@@ -459,7 +475,7 @@ static int adav80x_set_capture_pcm_format(struct snd_soc_codec *codec,
                return -EINVAL;
        }
 
-       snd_soc_update_bits(codec, adav80x_port_ctrl_regs[dai->id][0],
+       regmap_update_bits(adav80x->regmap, adav80x_port_ctrl_regs[dai->id][0],
                ADAV80X_CAPTURE_WORD_LEN_MASK, val);
 
        return 0;
@@ -491,7 +507,7 @@ static int adav80x_set_playback_pcm_format(struct snd_soc_codec *codec,
                return -EINVAL;
        }
 
-       snd_soc_update_bits(codec, adav80x_port_ctrl_regs[dai->id][1],
+       regmap_update_bits(adav80x->regmap, adav80x_port_ctrl_regs[dai->id][1],
                ADAV80X_PLAYBACK_MODE_MASK, val);
 
        return 0;
@@ -554,8 +570,10 @@ static int adav80x_set_sysclk(struct snd_soc_codec *codec,
                                        ADAV80X_ICLK_CTRL1_ICLK2_SRC(clk_id);
                        iclk_ctrl2 = ADAV80X_ICLK_CTRL2_ICLK1_SRC(clk_id);
 
-                       snd_soc_write(codec, ADAV80X_ICLK_CTRL1, iclk_ctrl1);
-                       snd_soc_write(codec, ADAV80X_ICLK_CTRL2, iclk_ctrl2);
+                       regmap_write(adav80x->regmap, ADAV80X_ICLK_CTRL1,
+                               iclk_ctrl1);
+                       regmap_write(adav80x->regmap, ADAV80X_ICLK_CTRL2,
+                               iclk_ctrl2);
 
                        snd_soc_dapm_sync(&codec->dapm);
                }
@@ -575,10 +593,12 @@ static int adav80x_set_sysclk(struct snd_soc_codec *codec,
                mask = ADAV80X_PLL_OUTE_SYSCLKPD(clk_id);
 
                if (freq == 0) {
-                       snd_soc_update_bits(codec, ADAV80X_PLL_OUTE, mask, mask);
+                       regmap_update_bits(adav80x->regmap, ADAV80X_PLL_OUTE,
+                               mask, mask);
                        adav80x->sysclk_pd[clk_id] = true;
                } else {
-                       snd_soc_update_bits(codec, ADAV80X_PLL_OUTE, mask, 0);
+                       regmap_update_bits(adav80x->regmap, ADAV80X_PLL_OUTE,
+                               mask, 0);
                        adav80x->sysclk_pd[clk_id] = false;
                }
 
@@ -650,9 +670,9 @@ static int adav80x_set_pll(struct snd_soc_codec *codec, int pll_id,
                return -EINVAL;
        }
 
-       snd_soc_update_bits(codec, ADAV80X_PLL_CTRL1, ADAV80X_PLL_CTRL1_PLLDIV,
-               pll_ctrl1);
-       snd_soc_update_bits(codec, ADAV80X_PLL_CTRL2,
+       regmap_update_bits(adav80x->regmap, ADAV80X_PLL_CTRL1,
+                       ADAV80X_PLL_CTRL1_PLLDIV, pll_ctrl1);
+       regmap_update_bits(adav80x->regmap, ADAV80X_PLL_CTRL2,
                        ADAV80X_PLL_CTRL2_PLL_MASK(pll_id), pll_ctrl2);
 
        if (source != adav80x->pll_src) {
@@ -661,7 +681,7 @@ static int adav80x_set_pll(struct snd_soc_codec *codec, int pll_id,
                else
                        pll_src = ADAV80X_PLL_CLK_SRC_PLL_XIN(pll_id);
 
-               snd_soc_update_bits(codec, ADAV80X_PLL_CLK_SRC,
+               regmap_update_bits(adav80x->regmap, ADAV80X_PLL_CLK_SRC,
                                ADAV80X_PLL_CLK_SRC_PLL_MASK(pll_id), pll_src);
 
                adav80x->pll_src = source;
@@ -675,6 +695,7 @@ static int adav80x_set_pll(struct snd_soc_codec *codec, int pll_id,
 static int adav80x_set_bias_level(struct snd_soc_codec *codec,
                enum snd_soc_bias_level level)
 {
+       struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
        unsigned int mask = ADAV80X_DAC_CTRL1_PD;
 
        switch (level) {
@@ -683,10 +704,12 @@ static int adav80x_set_bias_level(struct snd_soc_codec *codec,
        case SND_SOC_BIAS_PREPARE:
                break;
        case SND_SOC_BIAS_STANDBY:
-               snd_soc_update_bits(codec, ADAV80X_DAC_CTRL1, mask, 0x00);
+               regmap_update_bits(adav80x->regmap, ADAV80X_DAC_CTRL1, mask,
+                       0x00);
                break;
        case SND_SOC_BIAS_OFF:
-               snd_soc_update_bits(codec, ADAV80X_DAC_CTRL1, mask, mask);
+               regmap_update_bits(adav80x->regmap, ADAV80X_DAC_CTRL1, mask,
+                       mask);
                break;
        }
 
@@ -780,7 +803,7 @@ static int adav80x_probe(struct snd_soc_codec *codec)
        int ret;
        struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
 
-       ret = snd_soc_codec_set_cache_io(codec, 7, 9, adav80x->control_type);
+       ret = snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
        if (ret) {
                dev_err(codec->dev, "failed to set cache I/O: %d\n", ret);
                return ret;
@@ -791,23 +814,31 @@ static int adav80x_probe(struct snd_soc_codec *codec)
        snd_soc_dapm_force_enable_pin(&codec->dapm, "PLL2");
 
        /* Power down S/PDIF receiver, since it is currently not supported */
-       snd_soc_write(codec, ADAV80X_PLL_OUTE, 0x20);
+       regmap_write(adav80x->regmap, ADAV80X_PLL_OUTE, 0x20);
        /* Disable DAC zero flag */
-       snd_soc_write(codec, ADAV80X_DAC_CTRL3, 0x6);
+       regmap_write(adav80x->regmap, ADAV80X_DAC_CTRL3, 0x6);
 
        return adav80x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 }
 
 static int adav80x_suspend(struct snd_soc_codec *codec)
 {
-       return adav80x_set_bias_level(codec, SND_SOC_BIAS_OFF);
+       struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
+       int ret;
+
+       ret = adav80x_set_bias_level(codec, SND_SOC_BIAS_OFF);
+       regcache_cache_only(adav80x->regmap, true);
+
+       return ret;
 }
 
 static int adav80x_resume(struct snd_soc_codec *codec)
 {
+       struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
+
+       regcache_cache_only(adav80x->regmap, false);
        adav80x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-       codec->cache_sync = 1;
-       snd_soc_cache_sync(codec);
+       regcache_sync(adav80x->regmap);
 
        return 0;
 }
@@ -827,10 +858,6 @@ static struct snd_soc_codec_driver adav80x_codec_driver = {
        .set_pll = adav80x_set_pll,
        .set_sysclk = adav80x_set_sysclk,
 
-       .reg_word_size = sizeof(u8),
-       .reg_cache_size = ARRAY_SIZE(adav80x_default_regs),
-       .reg_cache_default = adav80x_default_regs,
-
        .controls = adav80x_controls,
        .num_controls = ARRAY_SIZE(adav80x_controls),
        .dapm_widgets = adav80x_dapm_widgets,
@@ -839,18 +866,21 @@ static struct snd_soc_codec_driver adav80x_codec_driver = {
        .num_dapm_routes = ARRAY_SIZE(adav80x_dapm_routes),
 };
 
-static int adav80x_bus_probe(struct device *dev,
-                            enum snd_soc_control_type control_type)
+static int adav80x_bus_probe(struct device *dev, struct regmap *regmap)
 {
        struct adav80x *adav80x;
        int ret;
 
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
        adav80x = kzalloc(sizeof(*adav80x), GFP_KERNEL);
        if (!adav80x)
                return -ENOMEM;
 
+
        dev_set_drvdata(dev, adav80x);
-       adav80x->control_type = control_type;
+       adav80x->regmap = regmap;
 
        ret = snd_soc_register_codec(dev, &adav80x_codec_driver,
                adav80x_dais, ARRAY_SIZE(adav80x_dais));
@@ -868,6 +898,19 @@ static int adav80x_bus_remove(struct device *dev)
 }
 
 #if defined(CONFIG_SPI_MASTER)
+static const struct regmap_config adav80x_spi_regmap_config = {
+       .val_bits = 8,
+       .pad_bits = 1,
+       .reg_bits = 7,
+       .read_flag_mask = 0x01,
+
+       .max_register = ADAV80X_PLL_OUTE,
+
+       .cache_type = REGCACHE_RBTREE,
+       .reg_defaults = adav80x_reg_defaults,
+       .num_reg_defaults = ARRAY_SIZE(adav80x_reg_defaults),
+};
+
 static const struct spi_device_id adav80x_spi_id[] = {
        { "adav801", 0 },
        { }
@@ -876,7 +919,8 @@ MODULE_DEVICE_TABLE(spi, adav80x_spi_id);
 
 static int adav80x_spi_probe(struct spi_device *spi)
 {
-       return adav80x_bus_probe(&spi->dev, SND_SOC_SPI);
+       return adav80x_bus_probe(&spi->dev,
+               devm_regmap_init_spi(spi, &adav80x_spi_regmap_config));
 }
 
 static int adav80x_spi_remove(struct spi_device *spi)
@@ -896,6 +940,18 @@ static struct spi_driver adav80x_spi_driver = {
 #endif
 
 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+static const struct regmap_config adav80x_i2c_regmap_config = {
+       .val_bits = 8,
+       .pad_bits = 1,
+       .reg_bits = 7,
+
+       .max_register = ADAV80X_PLL_OUTE,
+
+       .cache_type = REGCACHE_RBTREE,
+       .reg_defaults = adav80x_reg_defaults,
+       .num_reg_defaults = ARRAY_SIZE(adav80x_reg_defaults),
+};
+
 static const struct i2c_device_id adav80x_i2c_id[] = {
        { "adav803", 0 },
        { }
@@ -905,7 +961,8 @@ MODULE_DEVICE_TABLE(i2c, adav80x_i2c_id);
 static int adav80x_i2c_probe(struct i2c_client *client,
                             const struct i2c_device_id *id)
 {
-       return adav80x_bus_probe(&client->dev, SND_SOC_I2C);
+       return adav80x_bus_probe(&client->dev,
+               devm_regmap_init_i2c(client, &adav80x_i2c_regmap_config));
 }
 
 static int adav80x_i2c_remove(struct i2c_client *client)
index 71059c07ae7be5abc3a9c70fd5443bce27493ced..b4819dcd4f4dc73f781383681d0ed4198ac2f220 100644 (file)
@@ -45,8 +45,6 @@
 #define AK4104_TX_TXE                  (1 << 0)
 #define AK4104_TX_V                    (1 << 1)
 
-#define DRV_NAME "ak4104-codec"
-
 struct ak4104_private {
        struct regmap *regmap;
 };
@@ -291,12 +289,19 @@ static const struct of_device_id ak4104_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, ak4104_of_match);
 
+static const struct spi_device_id ak4104_id_table[] = {
+       { "ak4104", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(spi, ak4104_id_table);
+
 static struct spi_driver ak4104_spi_driver = {
        .driver  = {
-               .name   = DRV_NAME,
+               .name   = "ak4104",
                .owner  = THIS_MODULE,
                .of_match_table = ak4104_of_match,
        },
+       .id_table = ak4104_id_table,
        .probe  = ak4104_spi_probe,
        .remove = ak4104_spi_remove,
 };
index 5f9af1fb76e862a1fb2eb4e73176d47cccf4aef8..49cc5f6d6dba41cd1d45712fc15037d8031ddaf5 100644 (file)
@@ -328,7 +328,7 @@ static int ak4641_i2s_hw_params(struct snd_pcm_substream *substream,
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                ak4641->playback_fs = rate;
                ak4641_set_deemph(codec);
-       };
+       }
 
        return 0;
 }
index 2d037870970221b2c2a87f4f96cac4886d606cb9..21c35ed778cccfcc38c250ffb1e6663eb25fba68 100644 (file)
@@ -352,7 +352,6 @@ static int ak4642_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
         */
        default:
                return -EINVAL;
-               break;
        }
        snd_soc_update_bits(codec, MD_CTL1, DIF_MASK, data);
 
@@ -405,7 +404,6 @@ static int ak4642_dai_hw_params(struct snd_pcm_substream *substream,
                break;
        default:
                return -EINVAL;
-               break;
        }
        snd_soc_update_bits(codec, MD_CTL2, FS_MASK, rate);
 
index 657808ba1418d5588e961081c5e1578cbb671400..6f05b17d19657aebb15f5793ec3cb36b07c57422 100644 (file)
@@ -1477,21 +1477,25 @@ static void arizona_enable_fll(struct arizona_fll *fll,
 {
        struct arizona *arizona = fll->arizona;
        int ret;
+       bool use_sync = false;
 
        /*
         * If we have both REFCLK and SYNCCLK then enable both,
         * otherwise apply the SYNCCLK settings to REFCLK.
         */
-       if (fll->ref_src >= 0 && fll->ref_src != fll->sync_src) {
+       if (fll->ref_src >= 0 && fll->ref_freq &&
+           fll->ref_src != fll->sync_src) {
                regmap_update_bits(arizona->regmap, fll->base + 5,
                                   ARIZONA_FLL1_OUTDIV_MASK,
                                   ref->outdiv << ARIZONA_FLL1_OUTDIV_SHIFT);
 
                arizona_apply_fll(arizona, fll->base, ref, fll->ref_src,
                                  false);
-               if (fll->sync_src >= 0)
+               if (fll->sync_src >= 0) {
                        arizona_apply_fll(arizona, fll->base + 0x10, sync,
                                          fll->sync_src, true);
+                       use_sync = true;
+               }
        } else if (fll->sync_src >= 0) {
                regmap_update_bits(arizona->regmap, fll->base + 5,
                                   ARIZONA_FLL1_OUTDIV_MASK,
@@ -1511,7 +1515,7 @@ static void arizona_enable_fll(struct arizona_fll *fll,
         * Increase the bandwidth if we're not using a low frequency
         * sync source.
         */
-       if (fll->sync_src >= 0 && fll->sync_freq > 100000)
+       if (use_sync && fll->sync_freq > 100000)
                regmap_update_bits(arizona->regmap, fll->base + 0x17,
                                   ARIZONA_FLL1_SYNC_BW, 0);
        else
@@ -1526,8 +1530,7 @@ static void arizona_enable_fll(struct arizona_fll *fll,
 
        regmap_update_bits(arizona->regmap, fll->base + 1,
                           ARIZONA_FLL1_ENA, ARIZONA_FLL1_ENA);
-       if (fll->ref_src >= 0 && fll->sync_src >= 0 &&
-           fll->ref_src != fll->sync_src)
+       if (use_sync)
                regmap_update_bits(arizona->regmap, fll->base + 0x11,
                                   ARIZONA_FLL1_SYNC_ENA,
                                   ARIZONA_FLL1_SYNC_ENA);
@@ -1561,10 +1564,12 @@ int arizona_set_fll_refclk(struct arizona_fll *fll, int source,
        if (fll->ref_src == source && fll->ref_freq == Fref)
                return 0;
 
-       if (fll->fout && Fref > 0) {
-               ret = arizona_calc_fll(fll, &ref, Fref, fll->fout);
-               if (ret != 0)
-                       return ret;
+       if (fll->fout) {
+               if (Fref > 0) {
+                       ret = arizona_calc_fll(fll, &ref, Fref, fll->fout);
+                       if (ret != 0)
+                               return ret;
+               }
 
                if (fll->sync_src >= 0) {
                        ret = arizona_calc_fll(fll, &sync, fll->sync_freq,
index 23316c887b19211f7c7473767b956895abda397f..43737a27d79caff3dda074565d0201493d4fb96e 100644 (file)
 #include <sound/soc.h>
 #include <sound/initval.h>
 
-static inline unsigned int cq93vc_read(struct snd_soc_codec *codec,
-                                               unsigned int reg)
-{
-       struct davinci_vc *davinci_vc = codec->control_data;
-
-       return readl(davinci_vc->base + reg);
-}
-
-static inline int cq93vc_write(struct snd_soc_codec *codec, unsigned int reg,
-                      unsigned int value)
-{
-       struct davinci_vc *davinci_vc = codec->control_data;
-
-       writel(value, davinci_vc->base + reg);
-
-       return 0;
-}
-
 static const struct snd_kcontrol_new cq93vc_snd_controls[] = {
        SOC_SINGLE("PGA Capture Volume", DAVINCI_VC_REG05, 0, 0x03, 0),
        SOC_SINGLE("Mono DAC Playback Volume", DAVINCI_VC_REG09, 0, 0x3f, 0),
@@ -64,13 +46,15 @@ static const struct snd_kcontrol_new cq93vc_snd_controls[] = {
 static int cq93vc_mute(struct snd_soc_dai *dai, int mute)
 {
        struct snd_soc_codec *codec = dai->codec;
-       u8 reg = cq93vc_read(codec, DAVINCI_VC_REG09) & ~DAVINCI_VC_REG09_MUTE;
+       u8 reg;
 
        if (mute)
-               cq93vc_write(codec, DAVINCI_VC_REG09,
-                            reg | DAVINCI_VC_REG09_MUTE);
+               reg = DAVINCI_VC_REG09_MUTE;
        else
-               cq93vc_write(codec, DAVINCI_VC_REG09, reg);
+               reg = 0;
+
+       snd_soc_update_bits(codec, DAVINCI_VC_REG09, DAVINCI_VC_REG09_MUTE,
+                           reg);
 
        return 0;
 }
@@ -79,7 +63,7 @@ static int cq93vc_set_dai_sysclk(struct snd_soc_dai *codec_dai,
                                 int clk_id, unsigned int freq, int dir)
 {
        struct snd_soc_codec *codec = codec_dai->codec;
-       struct davinci_vc *davinci_vc = codec->control_data;
+       struct davinci_vc *davinci_vc = codec->dev->platform_data;
 
        switch (freq) {
        case 22579200:
@@ -97,18 +81,18 @@ static int cq93vc_set_bias_level(struct snd_soc_codec *codec,
 {
        switch (level) {
        case SND_SOC_BIAS_ON:
-               cq93vc_write(codec, DAVINCI_VC_REG12,
+               snd_soc_write(codec, DAVINCI_VC_REG12,
                             DAVINCI_VC_REG12_POWER_ALL_ON);
                break;
        case SND_SOC_BIAS_PREPARE:
                break;
        case SND_SOC_BIAS_STANDBY:
-               cq93vc_write(codec, DAVINCI_VC_REG12,
+               snd_soc_write(codec, DAVINCI_VC_REG12,
                             DAVINCI_VC_REG12_POWER_ALL_OFF);
                break;
        case SND_SOC_BIAS_OFF:
                /* force all power off */
-               cq93vc_write(codec, DAVINCI_VC_REG12,
+               snd_soc_write(codec, DAVINCI_VC_REG12,
                             DAVINCI_VC_REG12_POWER_ALL_OFF);
                break;
        }
@@ -154,11 +138,9 @@ static int cq93vc_probe(struct snd_soc_codec *codec)
        struct davinci_vc *davinci_vc = codec->dev->platform_data;
 
        davinci_vc->cq93vc.codec = codec;
-       codec->control_data = davinci_vc;
+       codec->control_data = davinci_vc->regmap;
 
-       /* Set controls */
-       snd_soc_add_codec_controls(codec, cq93vc_snd_controls,
-                            ARRAY_SIZE(cq93vc_snd_controls));
+       snd_soc_codec_set_cache_io(codec, 32, 32, SND_SOC_REGMAP);
 
        /* Off, with power on */
        cq93vc_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
@@ -174,12 +156,12 @@ static int cq93vc_remove(struct snd_soc_codec *codec)
 }
 
 static struct snd_soc_codec_driver soc_codec_dev_cq93vc = {
-       .read = cq93vc_read,
-       .write = cq93vc_write,
        .set_bias_level = cq93vc_set_bias_level,
        .probe = cq93vc_probe,
        .remove = cq93vc_remove,
        .resume = cq93vc_resume,
+       .controls = cq93vc_snd_controls,
+       .num_controls = ARRAY_SIZE(cq93vc_snd_controls),
 };
 
 static int cq93vc_platform_probe(struct platform_device *pdev)
index a20f1bb8f0715011dad1a710cf038a80121e30b9..f6e953454bc0856ffccb87ec6680a670d3e90cbd 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/gpio.h>
 #include <linux/i2c.h>
 #include <linux/spi/spi.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
 #include <sound/pcm.h>
index 3b20c86cdb01671c283cec22b58aa6fac5acddcd..549d5d6a3fef47f0680934c68b36a2c6a48529d0 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/delay.h>
+#include <linux/of_gpio.h>
 #include <linux/pm.h>
 #include <linux/i2c.h>
 #include <linux/regmap.h>
@@ -28,6 +29,7 @@
 #include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
+#include <sound/cs42l73.h>
 #include "cs42l73.h"
 
 struct sp_config {
@@ -35,6 +37,7 @@ struct sp_config {
        u32 srate;
 };
 struct  cs42l73_private {
+       struct cs42l73_platform_data pdata;
        struct sp_config config[3];
        struct regmap *regmap;
        u32 sysclk;
@@ -310,15 +313,6 @@ static const struct soc_enum ng_delay_enum =
        SOC_ENUM_SINGLE(CS42L73_NGCAB, 0,
                ARRAY_SIZE(cs42l73_ng_delay_text), cs42l73_ng_delay_text);
 
-static const char * const charge_pump_freq_text[] = {
-       "0", "1", "2", "3", "4",
-       "5", "6", "7", "8", "9",
-       "10", "11", "12", "13", "14", "15" };
-
-static const struct soc_enum charge_pump_enum =
-       SOC_ENUM_SINGLE(CS42L73_CPFCHC, 4,
-               ARRAY_SIZE(charge_pump_freq_text), charge_pump_freq_text);
-
 static const char * const cs42l73_mono_mix_texts[] = {
        "Left", "Right", "Mono Mix"};
 
@@ -511,8 +505,6 @@ static const struct snd_kcontrol_new cs42l73_snd_controls[] = {
        SOC_SINGLE("NG Threshold", CS42L73_NGCAB, 2, 7, 0),
        SOC_ENUM("NG Delay", ng_delay_enum),
 
-       SOC_ENUM("Charge Pump Frequency", charge_pump_enum),
-
        SOC_DOUBLE_R_TLV("XSP-IP Volume",
                        CS42L73_XSPAIPAA, CS42L73_XSPBIPBA, 0, 0x3F, 1,
                        attn_tlv),
@@ -1055,11 +1047,11 @@ static int cs42l73_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
 
        switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
        case SND_SOC_DAIFMT_CBM_CFM:
-               mmcc |= MS_MASTER;
+               mmcc |= CS42L73_MS_MASTER;
                break;
 
        case SND_SOC_DAIFMT_CBS_CFS:
-               mmcc &= ~MS_MASTER;
+               mmcc &= ~CS42L73_MS_MASTER;
                break;
 
        default:
@@ -1071,11 +1063,11 @@ static int cs42l73_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
 
        switch (format) {
        case SND_SOC_DAIFMT_I2S:
-               spc &= ~SPDIF_PCM;
+               spc &= ~CS42L73_SPDIF_PCM;
                break;
        case SND_SOC_DAIFMT_DSP_A:
        case SND_SOC_DAIFMT_DSP_B:
-               if (mmcc & MS_MASTER) {
+               if (mmcc & CS42L73_MS_MASTER) {
                        dev_err(codec->dev,
                                "PCM format in slave mode only\n");
                        return -EINVAL;
@@ -1085,25 +1077,25 @@ static int cs42l73_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
                                "PCM format is not supported on ASP port\n");
                        return -EINVAL;
                }
-               spc |= SPDIF_PCM;
+               spc |= CS42L73_SPDIF_PCM;
                break;
        default:
                return -EINVAL;
        }
 
-       if (spc & SPDIF_PCM) {
+       if (spc & CS42L73_SPDIF_PCM) {
                /* Clear PCM mode, clear PCM_BIT_ORDER bit for MSB->LSB */
-               spc &= ~(PCM_MODE_MASK | PCM_BIT_ORDER);
+               spc &= ~(CS42L73_PCM_MODE_MASK | CS42L73_PCM_BIT_ORDER);
                switch (format) {
                case SND_SOC_DAIFMT_DSP_B:
                        if (inv == SND_SOC_DAIFMT_IB_IF)
-                               spc |= PCM_MODE0;
+                               spc |= CS42L73_PCM_MODE0;
                        if (inv == SND_SOC_DAIFMT_IB_NF)
-                               spc |= PCM_MODE1;
+                               spc |= CS42L73_PCM_MODE1;
                break;
                case SND_SOC_DAIFMT_DSP_A:
                        if (inv == SND_SOC_DAIFMT_IB_IF)
-                               spc |= PCM_MODE1;
+                               spc |= CS42L73_PCM_MODE1;
                        break;
                default:
                        return -EINVAL;
@@ -1163,7 +1155,7 @@ static int cs42l73_pcm_hw_params(struct snd_pcm_substream *substream,
        int mclk_coeff;
        int srate = params_rate(params);
 
-       if (priv->config[id].mmcc & MS_MASTER) {
+       if (priv->config[id].mmcc & CS42L73_MS_MASTER) {
                /* CS42L73 Master */
                /* MCLK -> srate */
                mclk_coeff =
@@ -1182,13 +1174,13 @@ static int cs42l73_pcm_hw_params(struct snd_pcm_substream *substream,
                priv->config[id].spc &= 0xFC;
                /* Use SCLK=64*Fs if internal MCLK >= 6.4MHz */
                if (priv->mclk >= 6400000)
-                       priv->config[id].spc |= MCK_SCLK_64FS;
+                       priv->config[id].spc |= CS42L73_MCK_SCLK_64FS;
                else
-                       priv->config[id].spc |= MCK_SCLK_MCLK;
+                       priv->config[id].spc |= CS42L73_MCK_SCLK_MCLK;
        } else {
                /* CS42L73 Slave */
                priv->config[id].spc &= 0xFC;
-               priv->config[id].spc |= MCK_SCLK_64FS;
+               priv->config[id].spc |= CS42L73_MCK_SCLK_64FS;
        }
        /* Update ASRCs */
        priv->config[id].srate = srate;
@@ -1208,8 +1200,8 @@ static int cs42l73_set_bias_level(struct snd_soc_codec *codec,
 
        switch (level) {
        case SND_SOC_BIAS_ON:
-               snd_soc_update_bits(codec, CS42L73_DMMCC, MCLKDIS, 0);
-               snd_soc_update_bits(codec, CS42L73_PWRCTL1, PDN, 0);
+               snd_soc_update_bits(codec, CS42L73_DMMCC, CS42L73_MCLKDIS, 0);
+               snd_soc_update_bits(codec, CS42L73_PWRCTL1, CS42L73_PDN, 0);
                break;
 
        case SND_SOC_BIAS_PREPARE:
@@ -1220,11 +1212,11 @@ static int cs42l73_set_bias_level(struct snd_soc_codec *codec,
                        regcache_cache_only(cs42l73->regmap, false);
                        regcache_sync(cs42l73->regmap);
                }
-               snd_soc_update_bits(codec, CS42L73_PWRCTL1, PDN, 1);
+               snd_soc_update_bits(codec, CS42L73_PWRCTL1, CS42L73_PDN, 1);
                break;
 
        case SND_SOC_BIAS_OFF:
-               snd_soc_update_bits(codec, CS42L73_PWRCTL1, PDN, 1);
+               snd_soc_update_bits(codec, CS42L73_PWRCTL1, CS42L73_PDN, 1);
                if (cs42l73->shutdwn_delay > 0) {
                        mdelay(cs42l73->shutdwn_delay);
                        cs42l73->shutdwn_delay = 0;
@@ -1233,7 +1225,7 @@ static int cs42l73_set_bias_level(struct snd_soc_codec *codec,
                                     * down.
                                     */
                }
-               snd_soc_update_bits(codec, CS42L73_DMMCC, MCLKDIS, 1);
+               snd_soc_update_bits(codec, CS42L73_DMMCC, CS42L73_MCLKDIS, 1);
                break;
        }
        codec->dapm.bias_level = level;
@@ -1367,11 +1359,16 @@ static int cs42l73_probe(struct snd_soc_codec *codec)
                return ret;
        }
 
-       regcache_cache_only(cs42l73->regmap, true);
-
        cs42l73_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 
-       cs42l73->mclksel = CS42L73_CLKID_MCLK1; /* MCLK1 as master clk */
+       /* Set Charge Pump Frequency */
+       if (cs42l73->pdata.chgfreq)
+               snd_soc_update_bits(codec, CS42L73_CPFCHC,
+                                   CS42L73_CHARGEPUMP_MASK,
+                                       cs42l73->pdata.chgfreq << 4);
+
+       /* MCLK1 as master clk */
+       cs42l73->mclksel = CS42L73_CLKID_MCLK1;
        cs42l73->mclk = 0;
 
        return ret;
@@ -1415,9 +1412,11 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
                             const struct i2c_device_id *id)
 {
        struct cs42l73_private *cs42l73;
+       struct cs42l73_platform_data *pdata = dev_get_platdata(&i2c_client->dev);
        int ret;
        unsigned int devid = 0;
        unsigned int reg;
+       u32 val32;
 
        cs42l73 = devm_kzalloc(&i2c_client->dev, sizeof(struct cs42l73_private),
                               GFP_KERNEL);
@@ -1426,14 +1425,49 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
                return -ENOMEM;
        }
 
-       i2c_set_clientdata(i2c_client, cs42l73);
-
        cs42l73->regmap = devm_regmap_init_i2c(i2c_client, &cs42l73_regmap);
        if (IS_ERR(cs42l73->regmap)) {
                ret = PTR_ERR(cs42l73->regmap);
                dev_err(&i2c_client->dev, "regmap_init() failed: %d\n", ret);
                return ret;
        }
+
+       if (pdata) {
+               cs42l73->pdata = *pdata;
+       } else {
+               pdata = devm_kzalloc(&i2c_client->dev,
+                                    sizeof(struct cs42l73_platform_data),
+                               GFP_KERNEL);
+               if (!pdata) {
+                       dev_err(&i2c_client->dev, "could not allocate pdata\n");
+                       return -ENOMEM;
+               }
+               if (i2c_client->dev.of_node) {
+                       if (of_property_read_u32(i2c_client->dev.of_node,
+                               "chgfreq", &val32) >= 0)
+                               pdata->chgfreq = val32;
+               }
+               pdata->reset_gpio = of_get_named_gpio(i2c_client->dev.of_node,
+                                               "reset-gpio", 0);
+               cs42l73->pdata = *pdata;
+       }
+
+       i2c_set_clientdata(i2c_client, cs42l73);
+
+       if (cs42l73->pdata.reset_gpio) {
+               ret = gpio_request_one(cs42l73->pdata.reset_gpio,
+                                      GPIOF_OUT_INIT_HIGH, "CS42L73 /RST");
+               if (ret < 0) {
+                       dev_err(&i2c_client->dev, "Failed to request /RST %d: %d\n",
+                               cs42l73->pdata.reset_gpio, ret);
+                       return ret;
+               }
+               gpio_set_value_cansleep(cs42l73->pdata.reset_gpio, 0);
+               gpio_set_value_cansleep(cs42l73->pdata.reset_gpio, 1);
+       }
+
+       regcache_cache_bypass(cs42l73->regmap, true);
+
        /* initialize codec */
        ret = regmap_read(cs42l73->regmap, CS42L73_DEVID_AB, &reg);
        devid = (reg & 0xFF) << 12;
@@ -1444,7 +1478,6 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
        ret = regmap_read(cs42l73->regmap, CS42L73_DEVID_E, &reg);
        devid |= (reg & 0xF0) >> 4;
 
-
        if (devid != CS42L73_DEVID) {
                ret = -ENODEV;
                dev_err(&i2c_client->dev,
@@ -1462,7 +1495,7 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
        dev_info(&i2c_client->dev,
                 "Cirrus Logic CS42L73, Revision: %02X\n", reg & 0xFF);
 
-       regcache_cache_only(cs42l73->regmap, true);
+       regcache_cache_bypass(cs42l73->regmap, false);
 
        ret =  snd_soc_register_codec(&i2c_client->dev,
                        &soc_codec_dev_cs42l73, cs42l73_dai,
@@ -1478,6 +1511,12 @@ static int cs42l73_i2c_remove(struct i2c_client *client)
        return 0;
 }
 
+static const struct of_device_id cs42l73_of_match[] = {
+       { .compatible = "cirrus,cs42l73", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, cs42l73_of_match);
+
 static const struct i2c_device_id cs42l73_id[] = {
        {"cs42l73", 0},
        {}
@@ -1489,6 +1528,7 @@ static struct i2c_driver cs42l73_i2c_driver = {
        .driver = {
                   .name = "cs42l73",
                   .owner = THIS_MODULE,
+                  .of_match_table = cs42l73_of_match,
                   },
        .id_table = cs42l73_id,
        .probe = cs42l73_i2c_probe,
index f30a4c4d62e6a08cbf125d41bd4405657678aeb9..45746186a67820fd40678e452fc7b67a790ac69a 100644 (file)
 /* Bitfield Definitions */
 
 /* CS42L73_PWRCTL1 */
-#define PDN_ADCB               (1 << 7)
-#define PDN_DMICB              (1 << 6)
-#define PDN_ADCA               (1 << 5)
-#define PDN_DMICA              (1 << 4)
-#define PDN_LDO                        (1 << 2)
-#define DISCHG_FILT            (1 << 1)
-#define PDN                    (1 << 0)
+#define CS42L73_PDN_ADCB               (1 << 7)
+#define CS42L73_PDN_DMICB              (1 << 6)
+#define CS42L73_PDN_ADCA               (1 << 5)
+#define CS42L73_PDN_DMICA              (1 << 4)
+#define CS42L73_PDN_LDO                        (1 << 2)
+#define CS42L73_DISCHG_FILT            (1 << 1)
+#define CS42L73_PDN                    (1 << 0)
 
 /* CS42L73_PWRCTL2 */
-#define PDN_MIC2_BIAS          (1 << 7)
-#define PDN_MIC1_BIAS          (1 << 6)
-#define PDN_VSP                        (1 << 4)
-#define PDN_ASP_SDOUT          (1 << 3)
-#define PDN_ASP_SDIN           (1 << 2)
-#define PDN_XSP_SDOUT          (1 << 1)
-#define PDN_XSP_SDIN           (1 << 0)
+#define CS42L73_PDN_MIC2_BIAS          (1 << 7)
+#define CS42L73_PDN_MIC1_BIAS          (1 << 6)
+#define CS42L73_PDN_VSP                        (1 << 4)
+#define CS42L73_PDN_ASP_SDOUT          (1 << 3)
+#define CS42L73_PDN_ASP_SDIN           (1 << 2)
+#define CS42L73_PDN_XSP_SDOUT          (1 << 1)
+#define CS42L73_PDN_XSP_SDIN           (1 << 0)
 
 /* CS42L73_PWRCTL3 */
-#define PDN_THMS               (1 << 5)
-#define PDN_SPKLO              (1 << 4)
-#define PDN_EAR                        (1 << 3)
-#define PDN_SPK                        (1 << 2)
-#define PDN_LO                 (1 << 1)
-#define PDN_HP                 (1 << 0)
+#define CS42L73_PDN_THMS               (1 << 5)
+#define CS42L73_PDN_SPKLO              (1 << 4)
+#define CS42L73_PDN_EAR                        (1 << 3)
+#define CS42L73_PDN_SPK                        (1 << 2)
+#define CS42L73_PDN_LO                 (1 << 1)
+#define CS42L73_PDN_HP                 (1 << 0)
 
 /* Thermal Overload Detect. Requires interrupt ... */
-#define THMOVLD_150C           0
-#define THMOVLD_132C           1
-#define THMOVLD_115C           2
-#define THMOVLD_098C           3
+#define CS42L73_THMOVLD_150C           0
+#define CS42L73_THMOVLD_132C           1
+#define CS42L73_THMOVLD_115C           2
+#define CS42L73_THMOVLD_098C           3
 
+#define CS42L73_CHARGEPUMP_MASK        (0xF0)
 
 /* CS42L73_ASPC, CS42L73_XSPC, CS42L73_VSPC */
-#define        SP_3ST                  (1 << 7)
-#define SPDIF_I2S              (0 << 6)
-#define SPDIF_PCM              (1 << 6)
-#define PCM_MODE0              (0 << 4)
-#define PCM_MODE1              (1 << 4)
-#define PCM_MODE2              (2 << 4)
-#define PCM_MODE_MASK          (3 << 4)
-#define PCM_BIT_ORDER          (1 << 3)
-#define MCK_SCLK_64FS          (0 << 0)
-#define MCK_SCLK_MCLK          (2 << 0)
-#define MCK_SCLK_PREMCLK       (3 << 0)
+#define        CS42L73_SP_3ST                  (1 << 7)
+#define CS42L73_SPDIF_I2S              (0 << 6)
+#define CS42L73_SPDIF_PCM              (1 << 6)
+#define CS42L73_PCM_MODE0              (0 << 4)
+#define CS42L73_PCM_MODE1              (1 << 4)
+#define CS42L73_PCM_MODE2              (2 << 4)
+#define CS42L73_PCM_MODE_MASK          (3 << 4)
+#define CS42L73_PCM_BIT_ORDER          (1 << 3)
+#define CS42L73_MCK_SCLK_64FS          (0 << 0)
+#define CS42L73_MCK_SCLK_MCLK          (2 << 0)
+#define CS42L73_MCK_SCLK_PREMCLK       (3 << 0)
 
 /* CS42L73_xSPMMCC */
-#define MS_MASTER              (1 << 7)
+#define CS42L73_MS_MASTER              (1 << 7)
 
 
 /* CS42L73_DMMCC */
-#define MCLKDIS                        (1 << 0)
-#define MCLKSEL_MCLK2          (1 << 4)
-#define MCLKSEL_MCLK1          (0 << 4)
+#define CS42L73_MCLKDIS                        (1 << 0)
+#define CS42L73_MCLKSEL_MCLK2          (1 << 4)
+#define CS42L73_MCLKSEL_MCLK1          (0 << 4)
 
 /* CS42L73 MCLK derived from MCLK1 or MCLK2 */
 #define CS42L73_CLKID_MCLK1     0
 #define CS42L73_VSP            2
 
 /* IS1, IM1 */
-#define MIC2_SDET              (1 << 6)
-#define THMOVLD                        (1 << 4)
-#define DIGMIXOVFL             (1 << 3)
-#define IPBOVFL                        (1 << 1)
-#define IPAOVFL                        (1 << 0)
+#define CS42L73_MIC2_SDET              (1 << 6)
+#define CS42L73_THMOVLD                        (1 << 4)
+#define CS42L73_DIGMIXOVFL             (1 << 3)
+#define CS42L73_IPBOVFL                        (1 << 1)
+#define CS42L73_IPAOVFL                        (1 << 0)
 
 /* Analog Softramp */
-#define ANLGOSFT               (1 << 0)
+#define CS42L73_ANLGOSFT               (1 << 0)
 
 /* HP A/B Analog Mute */
-#define HPA_MUTE               (1 << 7)
+#define CS42L73_HPA_MUTE               (1 << 7)
 /* LO A/B Analog Mute  */
-#define LOA_MUTE               (1 << 7)
+#define CS42L73_LOA_MUTE               (1 << 7)
 /* Digital Mute */
-#define HLAD_MUTE              (1 << 0)
-#define HLBD_MUTE              (1 << 1)
-#define SPKD_MUTE              (1 << 2)
-#define ESLD_MUTE              (1 << 3)
+#define CS42L73_HLAD_MUTE              (1 << 0)
+#define CS42L73_HLBD_MUTE              (1 << 1)
+#define CS42L73_SPKD_MUTE              (1 << 2)
+#define CS42L73_ESLD_MUTE              (1 << 3)
 
 /* Misc defines for codec */
-#define CS42L73_RESET_GPIO 143
-
 #define CS42L73_DEVID          0x00042A73
 #define CS42L73_MCLKX_MIN      5644800
 #define CS42L73_MCLKX_MAX      38400000
index 566a367c94fa0ce71ac04f86686923ed63ece58d..66ceee22fdadd923225f1b7001b208c3c1b1fde6 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/delay.h>
 #include <linux/pm.h>
 #include <linux/i2c.h>
+#include <linux/regmap.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -38,294 +39,223 @@ struct max98088_cdata {
 };
 
 struct max98088_priv {
-       enum max98088_type devtype;
-       struct max98088_pdata *pdata;
-       unsigned int sysclk;
-       struct max98088_cdata dai[2];
-       int eq_textcnt;
-       const char **eq_texts;
-       struct soc_enum eq_enum;
-       u8 ina_state;
-       u8 inb_state;
-       unsigned int ex_mode;
-       unsigned int digmic;
-       unsigned int mic1pre;
-       unsigned int mic2pre;
-       unsigned int extmic_mode;
+       struct regmap *regmap;
+       enum max98088_type devtype;
+       struct max98088_pdata *pdata;
+       unsigned int sysclk;
+       struct max98088_cdata dai[2];
+       int eq_textcnt;
+       const char **eq_texts;
+       struct soc_enum eq_enum;
+       u8 ina_state;
+       u8 inb_state;
+       unsigned int ex_mode;
+       unsigned int digmic;
+       unsigned int mic1pre;
+       unsigned int mic2pre;
+       unsigned int extmic_mode;
 };
 
-static const u8 max98088_reg[M98088_REG_CNT] = {
-       0x00, /* 00 IRQ status */
-       0x00, /* 01 MIC status */
-       0x00, /* 02 jack status */
-       0x00, /* 03 battery voltage */
-       0x00, /* 04 */
-       0x00, /* 05 */
-       0x00, /* 06 */
-       0x00, /* 07 */
-       0x00, /* 08 */
-       0x00, /* 09 */
-       0x00, /* 0A */
-       0x00, /* 0B */
-       0x00, /* 0C */
-       0x00, /* 0D */
-       0x00, /* 0E */
-       0x00, /* 0F interrupt enable */
-
-       0x00, /* 10 master clock */
-       0x00, /* 11 DAI1 clock mode */
-       0x00, /* 12 DAI1 clock control */
-       0x00, /* 13 DAI1 clock control */
-       0x00, /* 14 DAI1 format */
-       0x00, /* 15 DAI1 clock */
-       0x00, /* 16 DAI1 config */
-       0x00, /* 17 DAI1 TDM */
-       0x00, /* 18 DAI1 filters */
-       0x00, /* 19 DAI2 clock mode */
-       0x00, /* 1A DAI2 clock control */
-       0x00, /* 1B DAI2 clock control */
-       0x00, /* 1C DAI2 format */
-       0x00, /* 1D DAI2 clock */
-       0x00, /* 1E DAI2 config */
-       0x00, /* 1F DAI2 TDM */
-
-       0x00, /* 20 DAI2 filters */
-       0x00, /* 21 data config */
-       0x00, /* 22 DAC mixer */
-       0x00, /* 23 left ADC mixer */
-       0x00, /* 24 right ADC mixer */
-       0x00, /* 25 left HP mixer */
-       0x00, /* 26 right HP mixer */
-       0x00, /* 27 HP control */
-       0x00, /* 28 left REC mixer */
-       0x00, /* 29 right REC mixer */
-       0x00, /* 2A REC control */
-       0x00, /* 2B left SPK mixer */
-       0x00, /* 2C right SPK mixer */
-       0x00, /* 2D SPK control */
-       0x00, /* 2E sidetone */
-       0x00, /* 2F DAI1 playback level */
-
-       0x00, /* 30 DAI1 playback level */
-       0x00, /* 31 DAI2 playback level */
-       0x00, /* 32 DAI2 playbakc level */
-       0x00, /* 33 left ADC level */
-       0x00, /* 34 right ADC level */
-       0x00, /* 35 MIC1 level */
-       0x00, /* 36 MIC2 level */
-       0x00, /* 37 INA level */
-       0x00, /* 38 INB level */
-       0x00, /* 39 left HP volume */
-       0x00, /* 3A right HP volume */
-       0x00, /* 3B left REC volume */
-       0x00, /* 3C right REC volume */
-       0x00, /* 3D left SPK volume */
-       0x00, /* 3E right SPK volume */
-       0x00, /* 3F MIC config */
-
-       0x00, /* 40 MIC threshold */
-       0x00, /* 41 excursion limiter filter */
-       0x00, /* 42 excursion limiter threshold */
-       0x00, /* 43 ALC */
-       0x00, /* 44 power limiter threshold */
-       0x00, /* 45 power limiter config */
-       0x00, /* 46 distortion limiter config */
-       0x00, /* 47 audio input */
-       0x00, /* 48 microphone */
-       0x00, /* 49 level control */
-       0x00, /* 4A bypass switches */
-       0x00, /* 4B jack detect */
-       0x00, /* 4C input enable */
-       0x00, /* 4D output enable */
-       0xF0, /* 4E bias control */
-       0x00, /* 4F DAC power */
-
-       0x0F, /* 50 DAC power */
-       0x00, /* 51 system */
-       0x00, /* 52 DAI1 EQ1 */
-       0x00, /* 53 DAI1 EQ1 */
-       0x00, /* 54 DAI1 EQ1 */
-       0x00, /* 55 DAI1 EQ1 */
-       0x00, /* 56 DAI1 EQ1 */
-       0x00, /* 57 DAI1 EQ1 */
-       0x00, /* 58 DAI1 EQ1 */
-       0x00, /* 59 DAI1 EQ1 */
-       0x00, /* 5A DAI1 EQ1 */
-       0x00, /* 5B DAI1 EQ1 */
-       0x00, /* 5C DAI1 EQ2 */
-       0x00, /* 5D DAI1 EQ2 */
-       0x00, /* 5E DAI1 EQ2 */
-       0x00, /* 5F DAI1 EQ2 */
-
-       0x00, /* 60 DAI1 EQ2 */
-       0x00, /* 61 DAI1 EQ2 */
-       0x00, /* 62 DAI1 EQ2 */
-       0x00, /* 63 DAI1 EQ2 */
-       0x00, /* 64 DAI1 EQ2 */
-       0x00, /* 65 DAI1 EQ2 */
-       0x00, /* 66 DAI1 EQ3 */
-       0x00, /* 67 DAI1 EQ3 */
-       0x00, /* 68 DAI1 EQ3 */
-       0x00, /* 69 DAI1 EQ3 */
-       0x00, /* 6A DAI1 EQ3 */
-       0x00, /* 6B DAI1 EQ3 */
-       0x00, /* 6C DAI1 EQ3 */
-       0x00, /* 6D DAI1 EQ3 */
-       0x00, /* 6E DAI1 EQ3 */
-       0x00, /* 6F DAI1 EQ3 */
-
-       0x00, /* 70 DAI1 EQ4 */
-       0x00, /* 71 DAI1 EQ4 */
-       0x00, /* 72 DAI1 EQ4 */
-       0x00, /* 73 DAI1 EQ4 */
-       0x00, /* 74 DAI1 EQ4 */
-       0x00, /* 75 DAI1 EQ4 */
-       0x00, /* 76 DAI1 EQ4 */
-       0x00, /* 77 DAI1 EQ4 */
-       0x00, /* 78 DAI1 EQ4 */
-       0x00, /* 79 DAI1 EQ4 */
-       0x00, /* 7A DAI1 EQ5 */
-       0x00, /* 7B DAI1 EQ5 */
-       0x00, /* 7C DAI1 EQ5 */
-       0x00, /* 7D DAI1 EQ5 */
-       0x00, /* 7E DAI1 EQ5 */
-       0x00, /* 7F DAI1 EQ5 */
-
-       0x00, /* 80 DAI1 EQ5 */
-       0x00, /* 81 DAI1 EQ5 */
-       0x00, /* 82 DAI1 EQ5 */
-       0x00, /* 83 DAI1 EQ5 */
-       0x00, /* 84 DAI2 EQ1 */
-       0x00, /* 85 DAI2 EQ1 */
-       0x00, /* 86 DAI2 EQ1 */
-       0x00, /* 87 DAI2 EQ1 */
-       0x00, /* 88 DAI2 EQ1 */
-       0x00, /* 89 DAI2 EQ1 */
-       0x00, /* 8A DAI2 EQ1 */
-       0x00, /* 8B DAI2 EQ1 */
-       0x00, /* 8C DAI2 EQ1 */
-       0x00, /* 8D DAI2 EQ1 */
-       0x00, /* 8E DAI2 EQ2 */
-       0x00, /* 8F DAI2 EQ2 */
-
-       0x00, /* 90 DAI2 EQ2 */
-       0x00, /* 91 DAI2 EQ2 */
-       0x00, /* 92 DAI2 EQ2 */
-       0x00, /* 93 DAI2 EQ2 */
-       0x00, /* 94 DAI2 EQ2 */
-       0x00, /* 95 DAI2 EQ2 */
-       0x00, /* 96 DAI2 EQ2 */
-       0x00, /* 97 DAI2 EQ2 */
-       0x00, /* 98 DAI2 EQ3 */
-       0x00, /* 99 DAI2 EQ3 */
-       0x00, /* 9A DAI2 EQ3 */
-       0x00, /* 9B DAI2 EQ3 */
-       0x00, /* 9C DAI2 EQ3 */
-       0x00, /* 9D DAI2 EQ3 */
-       0x00, /* 9E DAI2 EQ3 */
-       0x00, /* 9F DAI2 EQ3 */
-
-       0x00, /* A0 DAI2 EQ3 */
-       0x00, /* A1 DAI2 EQ3 */
-       0x00, /* A2 DAI2 EQ4 */
-       0x00, /* A3 DAI2 EQ4 */
-       0x00, /* A4 DAI2 EQ4 */
-       0x00, /* A5 DAI2 EQ4 */
-       0x00, /* A6 DAI2 EQ4 */
-       0x00, /* A7 DAI2 EQ4 */
-       0x00, /* A8 DAI2 EQ4 */
-       0x00, /* A9 DAI2 EQ4 */
-       0x00, /* AA DAI2 EQ4 */
-       0x00, /* AB DAI2 EQ4 */
-       0x00, /* AC DAI2 EQ5 */
-       0x00, /* AD DAI2 EQ5 */
-       0x00, /* AE DAI2 EQ5 */
-       0x00, /* AF DAI2 EQ5 */
-
-       0x00, /* B0 DAI2 EQ5 */
-       0x00, /* B1 DAI2 EQ5 */
-       0x00, /* B2 DAI2 EQ5 */
-       0x00, /* B3 DAI2 EQ5 */
-       0x00, /* B4 DAI2 EQ5 */
-       0x00, /* B5 DAI2 EQ5 */
-       0x00, /* B6 DAI1 biquad */
-       0x00, /* B7 DAI1 biquad */
-       0x00, /* B8 DAI1 biquad */
-       0x00, /* B9 DAI1 biquad */
-       0x00, /* BA DAI1 biquad */
-       0x00, /* BB DAI1 biquad */
-       0x00, /* BC DAI1 biquad */
-       0x00, /* BD DAI1 biquad */
-       0x00, /* BE DAI1 biquad */
-       0x00, /* BF DAI1 biquad */
-
-       0x00, /* C0 DAI2 biquad */
-       0x00, /* C1 DAI2 biquad */
-       0x00, /* C2 DAI2 biquad */
-       0x00, /* C3 DAI2 biquad */
-       0x00, /* C4 DAI2 biquad */
-       0x00, /* C5 DAI2 biquad */
-       0x00, /* C6 DAI2 biquad */
-       0x00, /* C7 DAI2 biquad */
-       0x00, /* C8 DAI2 biquad */
-       0x00, /* C9 DAI2 biquad */
-       0x00, /* CA */
-       0x00, /* CB */
-       0x00, /* CC */
-       0x00, /* CD */
-       0x00, /* CE */
-       0x00, /* CF */
-
-       0x00, /* D0 */
-       0x00, /* D1 */
-       0x00, /* D2 */
-       0x00, /* D3 */
-       0x00, /* D4 */
-       0x00, /* D5 */
-       0x00, /* D6 */
-       0x00, /* D7 */
-       0x00, /* D8 */
-       0x00, /* D9 */
-       0x00, /* DA */
-       0x70, /* DB */
-       0x00, /* DC */
-       0x00, /* DD */
-       0x00, /* DE */
-       0x00, /* DF */
-
-       0x00, /* E0 */
-       0x00, /* E1 */
-       0x00, /* E2 */
-       0x00, /* E3 */
-       0x00, /* E4 */
-       0x00, /* E5 */
-       0x00, /* E6 */
-       0x00, /* E7 */
-       0x00, /* E8 */
-       0x00, /* E9 */
-       0x00, /* EA */
-       0x00, /* EB */
-       0x00, /* EC */
-       0x00, /* ED */
-       0x00, /* EE */
-       0x00, /* EF */
-
-       0x00, /* F0 */
-       0x00, /* F1 */
-       0x00, /* F2 */
-       0x00, /* F3 */
-       0x00, /* F4 */
-       0x00, /* F5 */
-       0x00, /* F6 */
-       0x00, /* F7 */
-       0x00, /* F8 */
-       0x00, /* F9 */
-       0x00, /* FA */
-       0x00, /* FB */
-       0x00, /* FC */
-       0x00, /* FD */
-       0x00, /* FE */
-       0x00, /* FF */
+static const struct reg_default max98088_reg[] = {
+       {  0xf, 0x00 }, /* 0F interrupt enable */
+
+       { 0x10, 0x00 }, /* 10 master clock */
+       { 0x11, 0x00 }, /* 11 DAI1 clock mode */
+       { 0x12, 0x00 }, /* 12 DAI1 clock control */
+       { 0x13, 0x00 }, /* 13 DAI1 clock control */
+       { 0x14, 0x00 }, /* 14 DAI1 format */
+       { 0x15, 0x00 }, /* 15 DAI1 clock */
+       { 0x16, 0x00 }, /* 16 DAI1 config */
+       { 0x17, 0x00 }, /* 17 DAI1 TDM */
+       { 0x18, 0x00 }, /* 18 DAI1 filters */
+       { 0x19, 0x00 }, /* 19 DAI2 clock mode */
+       { 0x1a, 0x00 }, /* 1A DAI2 clock control */
+       { 0x1b, 0x00 }, /* 1B DAI2 clock control */
+       { 0x1c, 0x00 }, /* 1C DAI2 format */
+       { 0x1d, 0x00 }, /* 1D DAI2 clock */
+       { 0x1e, 0x00 }, /* 1E DAI2 config */
+       { 0x1f, 0x00 }, /* 1F DAI2 TDM */
+
+       { 0x20, 0x00 }, /* 20 DAI2 filters */
+       { 0x21, 0x00 }, /* 21 data config */
+       { 0x22, 0x00 }, /* 22 DAC mixer */
+       { 0x23, 0x00 }, /* 23 left ADC mixer */
+       { 0x24, 0x00 }, /* 24 right ADC mixer */
+       { 0x25, 0x00 }, /* 25 left HP mixer */
+       { 0x26, 0x00 }, /* 26 right HP mixer */
+       { 0x27, 0x00 }, /* 27 HP control */
+       { 0x28, 0x00 }, /* 28 left REC mixer */
+       { 0x29, 0x00 }, /* 29 right REC mixer */
+       { 0x2a, 0x00 }, /* 2A REC control */
+       { 0x2b, 0x00 }, /* 2B left SPK mixer */
+       { 0x2c, 0x00 }, /* 2C right SPK mixer */
+       { 0x2d, 0x00 }, /* 2D SPK control */
+       { 0x2e, 0x00 }, /* 2E sidetone */
+       { 0x2f, 0x00 }, /* 2F DAI1 playback level */
+
+       { 0x30, 0x00 }, /* 30 DAI1 playback level */
+       { 0x31, 0x00 }, /* 31 DAI2 playback level */
+       { 0x32, 0x00 }, /* 32 DAI2 playbakc level */
+       { 0x33, 0x00 }, /* 33 left ADC level */
+       { 0x34, 0x00 }, /* 34 right ADC level */
+       { 0x35, 0x00 }, /* 35 MIC1 level */
+       { 0x36, 0x00 }, /* 36 MIC2 level */
+       { 0x37, 0x00 }, /* 37 INA level */
+       { 0x38, 0x00 }, /* 38 INB level */
+       { 0x39, 0x00 }, /* 39 left HP volume */
+       { 0x3a, 0x00 }, /* 3A right HP volume */
+       { 0x3b, 0x00 }, /* 3B left REC volume */
+       { 0x3c, 0x00 }, /* 3C right REC volume */
+       { 0x3d, 0x00 }, /* 3D left SPK volume */
+       { 0x3e, 0x00 }, /* 3E right SPK volume */
+       { 0x3f, 0x00 }, /* 3F MIC config */
+
+       { 0x40, 0x00 }, /* 40 MIC threshold */
+       { 0x41, 0x00 }, /* 41 excursion limiter filter */
+       { 0x42, 0x00 }, /* 42 excursion limiter threshold */
+       { 0x43, 0x00 }, /* 43 ALC */
+       { 0x44, 0x00 }, /* 44 power limiter threshold */
+       { 0x45, 0x00 }, /* 45 power limiter config */
+       { 0x46, 0x00 }, /* 46 distortion limiter config */
+       { 0x47, 0x00 }, /* 47 audio input */
+        { 0x48, 0x00 }, /* 48 microphone */
+       { 0x49, 0x00 }, /* 49 level control */
+       { 0x4a, 0x00 }, /* 4A bypass switches */
+       { 0x4b, 0x00 }, /* 4B jack detect */
+       { 0x4c, 0x00 }, /* 4C input enable */
+       { 0x4d, 0x00 }, /* 4D output enable */
+       { 0x4e, 0xF0 }, /* 4E bias control */
+       { 0x4f, 0x00 }, /* 4F DAC power */
+
+       { 0x50, 0x0F }, /* 50 DAC power */
+       { 0x51, 0x00 }, /* 51 system */
+       { 0x52, 0x00 }, /* 52 DAI1 EQ1 */
+       { 0x53, 0x00 }, /* 53 DAI1 EQ1 */
+       { 0x54, 0x00 }, /* 54 DAI1 EQ1 */
+       { 0x55, 0x00 }, /* 55 DAI1 EQ1 */
+       { 0x56, 0x00 }, /* 56 DAI1 EQ1 */
+       { 0x57, 0x00 }, /* 57 DAI1 EQ1 */
+       { 0x58, 0x00 }, /* 58 DAI1 EQ1 */
+       { 0x59, 0x00 }, /* 59 DAI1 EQ1 */
+       { 0x5a, 0x00 }, /* 5A DAI1 EQ1 */
+       { 0x5b, 0x00 }, /* 5B DAI1 EQ1 */
+       { 0x5c, 0x00 }, /* 5C DAI1 EQ2 */
+       { 0x5d, 0x00 }, /* 5D DAI1 EQ2 */
+       { 0x5e, 0x00 }, /* 5E DAI1 EQ2 */
+       { 0x5f, 0x00 }, /* 5F DAI1 EQ2 */
+
+       { 0x60, 0x00 }, /* 60 DAI1 EQ2 */
+       { 0x61, 0x00 }, /* 61 DAI1 EQ2 */
+       { 0x62, 0x00 }, /* 62 DAI1 EQ2 */
+       { 0x63, 0x00 }, /* 63 DAI1 EQ2 */
+       { 0x64, 0x00 }, /* 64 DAI1 EQ2 */
+       { 0x65, 0x00 }, /* 65 DAI1 EQ2 */
+       { 0x66, 0x00 }, /* 66 DAI1 EQ3 */
+       { 0x67, 0x00 }, /* 67 DAI1 EQ3 */
+       { 0x68, 0x00 }, /* 68 DAI1 EQ3 */
+       { 0x69, 0x00 }, /* 69 DAI1 EQ3 */
+       { 0x6a, 0x00 }, /* 6A DAI1 EQ3 */
+       { 0x6b, 0x00 }, /* 6B DAI1 EQ3 */
+       { 0x6c, 0x00 }, /* 6C DAI1 EQ3 */
+       { 0x6d, 0x00 }, /* 6D DAI1 EQ3 */
+       { 0x6e, 0x00 }, /* 6E DAI1 EQ3 */
+       { 0x6f, 0x00 }, /* 6F DAI1 EQ3 */
+
+       { 0x70, 0x00 }, /* 70 DAI1 EQ4 */
+       { 0x71, 0x00 }, /* 71 DAI1 EQ4 */
+       { 0x72, 0x00 }, /* 72 DAI1 EQ4 */
+       { 0x73, 0x00 }, /* 73 DAI1 EQ4 */
+       { 0x74, 0x00 }, /* 74 DAI1 EQ4 */
+       { 0x75, 0x00 }, /* 75 DAI1 EQ4 */
+       { 0x76, 0x00 }, /* 76 DAI1 EQ4 */
+       { 0x77, 0x00 }, /* 77 DAI1 EQ4 */
+       { 0x78, 0x00 }, /* 78 DAI1 EQ4 */
+       { 0x79, 0x00 }, /* 79 DAI1 EQ4 */
+       { 0x7a, 0x00 }, /* 7A DAI1 EQ5 */
+       { 0x7b, 0x00 }, /* 7B DAI1 EQ5 */
+       { 0x7c, 0x00 }, /* 7C DAI1 EQ5 */
+       { 0x7d, 0x00 }, /* 7D DAI1 EQ5 */
+       { 0x7e, 0x00 }, /* 7E DAI1 EQ5 */
+       { 0x7f, 0x00 }, /* 7F DAI1 EQ5 */
+
+       { 0x80, 0x00 }, /* 80 DAI1 EQ5 */
+       { 0x81, 0x00 }, /* 81 DAI1 EQ5 */
+       { 0x82, 0x00 }, /* 82 DAI1 EQ5 */
+       { 0x83, 0x00 }, /* 83 DAI1 EQ5 */
+       { 0x84, 0x00 }, /* 84 DAI2 EQ1 */
+       { 0x85, 0x00 }, /* 85 DAI2 EQ1 */
+       { 0x86, 0x00 }, /* 86 DAI2 EQ1 */
+       { 0x87, 0x00 }, /* 87 DAI2 EQ1 */
+       { 0x88, 0x00 }, /* 88 DAI2 EQ1 */
+       { 0x89, 0x00 }, /* 89 DAI2 EQ1 */
+       { 0x8a, 0x00 }, /* 8A DAI2 EQ1 */
+       { 0x8b, 0x00 }, /* 8B DAI2 EQ1 */
+       { 0x8c, 0x00 }, /* 8C DAI2 EQ1 */
+       { 0x8d, 0x00 }, /* 8D DAI2 EQ1 */
+       { 0x8e, 0x00 }, /* 8E DAI2 EQ2 */
+       { 0x8f, 0x00 }, /* 8F DAI2 EQ2 */
+
+       { 0x90, 0x00 }, /* 90 DAI2 EQ2 */
+       { 0x91, 0x00 }, /* 91 DAI2 EQ2 */
+       { 0x92, 0x00 }, /* 92 DAI2 EQ2 */
+       { 0x93, 0x00 }, /* 93 DAI2 EQ2 */
+       { 0x94, 0x00 }, /* 94 DAI2 EQ2 */
+       { 0x95, 0x00 }, /* 95 DAI2 EQ2 */
+       { 0x96, 0x00 }, /* 96 DAI2 EQ2 */
+       { 0x97, 0x00 }, /* 97 DAI2 EQ2 */
+       { 0x98, 0x00 }, /* 98 DAI2 EQ3 */
+       { 0x99, 0x00 }, /* 99 DAI2 EQ3 */
+       { 0x9a, 0x00 }, /* 9A DAI2 EQ3 */
+        { 0x9b, 0x00 }, /* 9B DAI2 EQ3 */
+       { 0x9c, 0x00 }, /* 9C DAI2 EQ3 */
+       { 0x9d, 0x00 }, /* 9D DAI2 EQ3 */
+       { 0x9e, 0x00 }, /* 9E DAI2 EQ3 */
+       { 0x9f, 0x00 }, /* 9F DAI2 EQ3 */
+
+       { 0xa0, 0x00 }, /* A0 DAI2 EQ3 */
+       { 0xa1, 0x00 }, /* A1 DAI2 EQ3 */
+       { 0xa2, 0x00 }, /* A2 DAI2 EQ4 */
+       { 0xa3, 0x00 }, /* A3 DAI2 EQ4 */
+       { 0xa4, 0x00 }, /* A4 DAI2 EQ4 */
+       { 0xa5, 0x00 }, /* A5 DAI2 EQ4 */
+       { 0xa6, 0x00 }, /* A6 DAI2 EQ4 */
+       { 0xa7, 0x00 }, /* A7 DAI2 EQ4 */
+       { 0xa8, 0x00 }, /* A8 DAI2 EQ4 */
+       { 0xa9, 0x00 }, /* A9 DAI2 EQ4 */
+       { 0xaa, 0x00 }, /* AA DAI2 EQ4 */
+       { 0xab, 0x00 }, /* AB DAI2 EQ4 */
+       { 0xac, 0x00 }, /* AC DAI2 EQ5 */
+       { 0xad, 0x00 }, /* AD DAI2 EQ5 */
+       { 0xae, 0x00 }, /* AE DAI2 EQ5 */
+       { 0xaf, 0x00 }, /* AF DAI2 EQ5 */
+
+       { 0xb0, 0x00 }, /* B0 DAI2 EQ5 */
+       { 0xb1, 0x00 }, /* B1 DAI2 EQ5 */
+       { 0xb2, 0x00 }, /* B2 DAI2 EQ5 */
+       { 0xb3, 0x00 }, /* B3 DAI2 EQ5 */
+       { 0xb4, 0x00 }, /* B4 DAI2 EQ5 */
+       { 0xb5, 0x00 }, /* B5 DAI2 EQ5 */
+       { 0xb6, 0x00 }, /* B6 DAI1 biquad */
+       { 0xb7, 0x00 }, /* B7 DAI1 biquad */
+       { 0xb8 ,0x00 }, /* B8 DAI1 biquad */
+       { 0xb9, 0x00 }, /* B9 DAI1 biquad */
+       { 0xba, 0x00 }, /* BA DAI1 biquad */
+       { 0xbb, 0x00 }, /* BB DAI1 biquad */
+       { 0xbc, 0x00 }, /* BC DAI1 biquad */
+       { 0xbd, 0x00 }, /* BD DAI1 biquad */
+       { 0xbe, 0x00 }, /* BE DAI1 biquad */
+        { 0xbf, 0x00 }, /* BF DAI1 biquad */
+
+       { 0xc0, 0x00 }, /* C0 DAI2 biquad */
+       { 0xc1, 0x00 }, /* C1 DAI2 biquad */
+       { 0xc2, 0x00 }, /* C2 DAI2 biquad */
+       { 0xc3, 0x00 }, /* C3 DAI2 biquad */
+       { 0xc4, 0x00 }, /* C4 DAI2 biquad */
+       { 0xc5, 0x00 }, /* C5 DAI2 biquad */
+       { 0xc6, 0x00 }, /* C6 DAI2 biquad */
+       { 0xc7, 0x00 }, /* C7 DAI2 biquad */
+       { 0xc8, 0x00 }, /* C8 DAI2 biquad */
+       { 0xc9, 0x00 }, /* C9 DAI2 biquad */
 };
 
 static struct {
@@ -606,11 +536,28 @@ static struct {
        { 0xFF, 0x00, 1 }, /* FF */
 };
 
-static int max98088_volatile_register(struct snd_soc_codec *codec, unsigned int reg)
+static bool max98088_readable_register(struct device *dev, unsigned int reg)
+{
+       return max98088_access[reg].readable;
+}
+
+static bool max98088_volatile_register(struct device *dev, unsigned int reg)
 {
        return max98088_access[reg].vol;
 }
 
+static const struct regmap_config max98088_regmap = {
+       .reg_bits = 8,
+       .val_bits = 8,
+
+       .readable_reg = max98088_readable_register,
+       .volatile_reg = max98088_volatile_register,
+       .max_register = 0xff,
+
+       .reg_defaults = max98088_reg,
+       .num_reg_defaults = ARRAY_SIZE(max98088_reg),
+       .cache_type = REGCACHE_RBTREE,
+};
 
 /*
  * Load equalizer DSP coefficient configurations registers
@@ -1610,58 +1557,34 @@ static int max98088_dai2_digital_mute(struct snd_soc_dai *codec_dai, int mute)
        return 0;
 }
 
-static void max98088_sync_cache(struct snd_soc_codec *codec)
-{
-       u8 *reg_cache = codec->reg_cache;
-       int i;
-
-       if (!codec->cache_sync)
-               return;
-
-       codec->cache_only = 0;
-
-       /* write back cached values if they're writeable and
-        * different from the hardware default.
-        */
-       for (i = 1; i < codec->driver->reg_cache_size; i++) {
-               if (!max98088_access[i].writable)
-                       continue;
-
-               if (reg_cache[i] == max98088_reg[i])
-                       continue;
-
-               snd_soc_write(codec, i, reg_cache[i]);
-       }
-
-       codec->cache_sync = 0;
-}
-
 static int max98088_set_bias_level(struct snd_soc_codec *codec,
                                   enum snd_soc_bias_level level)
 {
-       switch (level) {
-       case SND_SOC_BIAS_ON:
-               break;
-
-       case SND_SOC_BIAS_PREPARE:
-               break;
-
-       case SND_SOC_BIAS_STANDBY:
-               if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
-                       max98088_sync_cache(codec);
-
-               snd_soc_update_bits(codec, M98088_REG_4C_PWR_EN_IN,
-                               M98088_MBEN, M98088_MBEN);
-               break;
-
-       case SND_SOC_BIAS_OFF:
-               snd_soc_update_bits(codec, M98088_REG_4C_PWR_EN_IN,
-                               M98088_MBEN, 0);
-               codec->cache_sync = 1;
-               break;
-       }
-       codec->dapm.bias_level = level;
-       return 0;
+       struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec);
+
+       switch (level) {
+       case SND_SOC_BIAS_ON:
+               break;
+
+       case SND_SOC_BIAS_PREPARE:
+               break;
+
+       case SND_SOC_BIAS_STANDBY:
+               if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
+                       regcache_sync(max98088->regmap);
+
+               snd_soc_update_bits(codec, M98088_REG_4C_PWR_EN_IN,
+                                  M98088_MBEN, M98088_MBEN);
+               break;
+
+       case SND_SOC_BIAS_OFF:
+               snd_soc_update_bits(codec, M98088_REG_4C_PWR_EN_IN,
+                                   M98088_MBEN, 0);
+               regcache_mark_dirty(max98088->regmap);
+               break;
+       }
+       codec->dapm.bias_level = level;
+       return 0;
 }
 
 #define MAX98088_RATES SNDRV_PCM_RATE_8000_96000
@@ -1988,9 +1911,9 @@ static int max98088_probe(struct snd_soc_codec *codec)
        struct max98088_cdata *cdata;
        int ret = 0;
 
-       codec->cache_sync = 1;
+       regcache_mark_dirty(max98088->regmap);
 
-       ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C);
+       ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_REGMAP);
        if (ret != 0) {
                dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
                return ret;
@@ -2048,9 +1971,6 @@ static int max98088_probe(struct snd_soc_codec *codec)
 
        max98088_handle_pdata(codec);
 
-       snd_soc_add_codec_controls(codec, max98088_snd_controls,
-                            ARRAY_SIZE(max98088_snd_controls));
-
 err_access:
        return ret;
 }
@@ -2066,15 +1986,13 @@ static int max98088_remove(struct snd_soc_codec *codec)
 }
 
 static struct snd_soc_codec_driver soc_codec_dev_max98088 = {
-       .probe   = max98088_probe,
-       .remove  = max98088_remove,
-       .suspend = max98088_suspend,
-       .resume  = max98088_resume,
-       .set_bias_level = max98088_set_bias_level,
-       .reg_cache_size = ARRAY_SIZE(max98088_reg),
-       .reg_word_size = sizeof(u8),
-       .reg_cache_default = max98088_reg,
-       .volatile_register = max98088_volatile_register,
+       .probe   = max98088_probe,
+       .remove  = max98088_remove,
+       .suspend = max98088_suspend,
+       .resume  = max98088_resume,
+       .set_bias_level = max98088_set_bias_level,
+       .controls = max98088_snd_controls,
+       .num_controls = ARRAY_SIZE(max98088_snd_controls),
        .dapm_widgets = max98088_dapm_widgets,
        .num_dapm_widgets = ARRAY_SIZE(max98088_dapm_widgets),
        .dapm_routes = max98088_audio_map,
@@ -2082,7 +2000,7 @@ static struct snd_soc_codec_driver soc_codec_dev_max98088 = {
 };
 
 static int max98088_i2c_probe(struct i2c_client *i2c,
-                            const struct i2c_device_id *id)
+                             const struct i2c_device_id *id)
 {
        struct max98088_priv *max98088;
        int ret;
@@ -2092,6 +2010,10 @@ static int max98088_i2c_probe(struct i2c_client *i2c,
        if (max98088 == NULL)
                return -ENOMEM;
 
+       max98088->regmap = devm_regmap_init_i2c(i2c, &max98088_regmap);
+       if (IS_ERR(max98088->regmap))
+              return PTR_ERR(max98088->regmap);
+
        max98088->devtype = id->driver_data;
 
        i2c_set_clientdata(i2c, max98088);
index 8dbcacd44e6aa5cbf5de1fba91ebdf16c1592ff8..8fb072455802e0daa82dfd8efce7ecc0758893e8 100644 (file)
@@ -39,6 +39,7 @@ struct max98095_cdata {
 };
 
 struct max98095_priv {
+       struct regmap *regmap;
        enum max98095_type devtype;
        struct max98095_pdata *pdata;
        unsigned int sysclk;
@@ -56,263 +57,145 @@ struct max98095_priv {
        struct snd_soc_jack *mic_jack;
 };
 
-static const u8 max98095_reg_def[M98095_REG_CNT] = {
-       0x00, /* 00 */
-       0x00, /* 01 */
-       0x00, /* 02 */
-       0x00, /* 03 */
-       0x00, /* 04 */
-       0x00, /* 05 */
-       0x00, /* 06 */
-       0x00, /* 07 */
-       0x00, /* 08 */
-       0x00, /* 09 */
-       0x00, /* 0A */
-       0x00, /* 0B */
-       0x00, /* 0C */
-       0x00, /* 0D */
-       0x00, /* 0E */
-       0x00, /* 0F */
-       0x00, /* 10 */
-       0x00, /* 11 */
-       0x00, /* 12 */
-       0x00, /* 13 */
-       0x00, /* 14 */
-       0x00, /* 15 */
-       0x00, /* 16 */
-       0x00, /* 17 */
-       0x00, /* 18 */
-       0x00, /* 19 */
-       0x00, /* 1A */
-       0x00, /* 1B */
-       0x00, /* 1C */
-       0x00, /* 1D */
-       0x00, /* 1E */
-       0x00, /* 1F */
-       0x00, /* 20 */
-       0x00, /* 21 */
-       0x00, /* 22 */
-       0x00, /* 23 */
-       0x00, /* 24 */
-       0x00, /* 25 */
-       0x00, /* 26 */
-       0x00, /* 27 */
-       0x00, /* 28 */
-       0x00, /* 29 */
-       0x00, /* 2A */
-       0x00, /* 2B */
-       0x00, /* 2C */
-       0x00, /* 2D */
-       0x00, /* 2E */
-       0x00, /* 2F */
-       0x00, /* 30 */
-       0x00, /* 31 */
-       0x00, /* 32 */
-       0x00, /* 33 */
-       0x00, /* 34 */
-       0x00, /* 35 */
-       0x00, /* 36 */
-       0x00, /* 37 */
-       0x00, /* 38 */
-       0x00, /* 39 */
-       0x00, /* 3A */
-       0x00, /* 3B */
-       0x00, /* 3C */
-       0x00, /* 3D */
-       0x00, /* 3E */
-       0x00, /* 3F */
-       0x00, /* 40 */
-       0x00, /* 41 */
-       0x00, /* 42 */
-       0x00, /* 43 */
-       0x00, /* 44 */
-       0x00, /* 45 */
-       0x00, /* 46 */
-       0x00, /* 47 */
-       0x00, /* 48 */
-       0x00, /* 49 */
-       0x00, /* 4A */
-       0x00, /* 4B */
-       0x00, /* 4C */
-       0x00, /* 4D */
-       0x00, /* 4E */
-       0x00, /* 4F */
-       0x00, /* 50 */
-       0x00, /* 51 */
-       0x00, /* 52 */
-       0x00, /* 53 */
-       0x00, /* 54 */
-       0x00, /* 55 */
-       0x00, /* 56 */
-       0x00, /* 57 */
-       0x00, /* 58 */
-       0x00, /* 59 */
-       0x00, /* 5A */
-       0x00, /* 5B */
-       0x00, /* 5C */
-       0x00, /* 5D */
-       0x00, /* 5E */
-       0x00, /* 5F */
-       0x00, /* 60 */
-       0x00, /* 61 */
-       0x00, /* 62 */
-       0x00, /* 63 */
-       0x00, /* 64 */
-       0x00, /* 65 */
-       0x00, /* 66 */
-       0x00, /* 67 */
-       0x00, /* 68 */
-       0x00, /* 69 */
-       0x00, /* 6A */
-       0x00, /* 6B */
-       0x00, /* 6C */
-       0x00, /* 6D */
-       0x00, /* 6E */
-       0x00, /* 6F */
-       0x00, /* 70 */
-       0x00, /* 71 */
-       0x00, /* 72 */
-       0x00, /* 73 */
-       0x00, /* 74 */
-       0x00, /* 75 */
-       0x00, /* 76 */
-       0x00, /* 77 */
-       0x00, /* 78 */
-       0x00, /* 79 */
-       0x00, /* 7A */
-       0x00, /* 7B */
-       0x00, /* 7C */
-       0x00, /* 7D */
-       0x00, /* 7E */
-       0x00, /* 7F */
-       0x00, /* 80 */
-       0x00, /* 81 */
-       0x00, /* 82 */
-       0x00, /* 83 */
-       0x00, /* 84 */
-       0x00, /* 85 */
-       0x00, /* 86 */
-       0x00, /* 87 */
-       0x00, /* 88 */
-       0x00, /* 89 */
-       0x00, /* 8A */
-       0x00, /* 8B */
-       0x00, /* 8C */
-       0x00, /* 8D */
-       0x00, /* 8E */
-       0x00, /* 8F */
-       0x00, /* 90 */
-       0x00, /* 91 */
-       0x30, /* 92 */
-       0xF0, /* 93 */
-       0x00, /* 94 */
-       0x00, /* 95 */
-       0x3F, /* 96 */
-       0x00, /* 97 */
-       0x00, /* 98 */
-       0x00, /* 99 */
-       0x00, /* 9A */
-       0x00, /* 9B */
-       0x00, /* 9C */
-       0x00, /* 9D */
-       0x00, /* 9E */
-       0x00, /* 9F */
-       0x00, /* A0 */
-       0x00, /* A1 */
-       0x00, /* A2 */
-       0x00, /* A3 */
-       0x00, /* A4 */
-       0x00, /* A5 */
-       0x00, /* A6 */
-       0x00, /* A7 */
-       0x00, /* A8 */
-       0x00, /* A9 */
-       0x00, /* AA */
-       0x00, /* AB */
-       0x00, /* AC */
-       0x00, /* AD */
-       0x00, /* AE */
-       0x00, /* AF */
-       0x00, /* B0 */
-       0x00, /* B1 */
-       0x00, /* B2 */
-       0x00, /* B3 */
-       0x00, /* B4 */
-       0x00, /* B5 */
-       0x00, /* B6 */
-       0x00, /* B7 */
-       0x00, /* B8 */
-       0x00, /* B9 */
-       0x00, /* BA */
-       0x00, /* BB */
-       0x00, /* BC */
-       0x00, /* BD */
-       0x00, /* BE */
-       0x00, /* BF */
-       0x00, /* C0 */
-       0x00, /* C1 */
-       0x00, /* C2 */
-       0x00, /* C3 */
-       0x00, /* C4 */
-       0x00, /* C5 */
-       0x00, /* C6 */
-       0x00, /* C7 */
-       0x00, /* C8 */
-       0x00, /* C9 */
-       0x00, /* CA */
-       0x00, /* CB */
-       0x00, /* CC */
-       0x00, /* CD */
-       0x00, /* CE */
-       0x00, /* CF */
-       0x00, /* D0 */
-       0x00, /* D1 */
-       0x00, /* D2 */
-       0x00, /* D3 */
-       0x00, /* D4 */
-       0x00, /* D5 */
-       0x00, /* D6 */
-       0x00, /* D7 */
-       0x00, /* D8 */
-       0x00, /* D9 */
-       0x00, /* DA */
-       0x00, /* DB */
-       0x00, /* DC */
-       0x00, /* DD */
-       0x00, /* DE */
-       0x00, /* DF */
-       0x00, /* E0 */
-       0x00, /* E1 */
-       0x00, /* E2 */
-       0x00, /* E3 */
-       0x00, /* E4 */
-       0x00, /* E5 */
-       0x00, /* E6 */
-       0x00, /* E7 */
-       0x00, /* E8 */
-       0x00, /* E9 */
-       0x00, /* EA */
-       0x00, /* EB */
-       0x00, /* EC */
-       0x00, /* ED */
-       0x00, /* EE */
-       0x00, /* EF */
-       0x00, /* F0 */
-       0x00, /* F1 */
-       0x00, /* F2 */
-       0x00, /* F3 */
-       0x00, /* F4 */
-       0x00, /* F5 */
-       0x00, /* F6 */
-       0x00, /* F7 */
-       0x00, /* F8 */
-       0x00, /* F9 */
-       0x00, /* FA */
-       0x00, /* FB */
-       0x00, /* FC */
-       0x00, /* FD */
-       0x00, /* FE */
-       0x00, /* FF */
+static const struct reg_default max98095_reg_def[] = {
+       {  0xf, 0x00 }, /* 0F */
+       { 0x10, 0x00 }, /* 10 */
+       { 0x11, 0x00 }, /* 11 */
+       { 0x12, 0x00 }, /* 12 */
+       { 0x13, 0x00 }, /* 13 */
+       { 0x14, 0x00 }, /* 14 */
+       { 0x15, 0x00 }, /* 15 */
+       { 0x16, 0x00 }, /* 16 */
+       { 0x17, 0x00 }, /* 17 */
+       { 0x18, 0x00 }, /* 18 */
+       { 0x19, 0x00 }, /* 19 */
+       { 0x1a, 0x00 }, /* 1A */
+       { 0x1b, 0x00 }, /* 1B */
+       { 0x1c, 0x00 }, /* 1C */
+       { 0x1d, 0x00 }, /* 1D */
+       { 0x1e, 0x00 }, /* 1E */
+       { 0x1f, 0x00 }, /* 1F */
+       { 0x20, 0x00 }, /* 20 */
+       { 0x21, 0x00 }, /* 21 */
+       { 0x22, 0x00 }, /* 22 */
+       { 0x23, 0x00 }, /* 23 */
+       { 0x24, 0x00 }, /* 24 */
+       { 0x25, 0x00 }, /* 25 */
+       { 0x26, 0x00 }, /* 26 */
+       { 0x27, 0x00 }, /* 27 */
+       { 0x28, 0x00 }, /* 28 */
+       { 0x29, 0x00 }, /* 29 */
+       { 0x2a, 0x00 }, /* 2A */
+       { 0x2b, 0x00 }, /* 2B */
+       { 0x2c, 0x00 }, /* 2C */
+       { 0x2d, 0x00 }, /* 2D */
+       { 0x2e, 0x00 }, /* 2E */
+       { 0x2f, 0x00 }, /* 2F */
+       { 0x30, 0x00 }, /* 30 */
+       { 0x31, 0x00 }, /* 31 */
+       { 0x32, 0x00 }, /* 32 */
+       { 0x33, 0x00 }, /* 33 */
+       { 0x34, 0x00 }, /* 34 */
+       { 0x35, 0x00 }, /* 35 */
+       { 0x36, 0x00 }, /* 36 */
+       { 0x37, 0x00 }, /* 37 */
+       { 0x38, 0x00 }, /* 38 */
+       { 0x39, 0x00 }, /* 39 */
+       { 0x3a, 0x00 }, /* 3A */
+       { 0x3b, 0x00 }, /* 3B */
+       { 0x3c, 0x00 }, /* 3C */
+       { 0x3d, 0x00 }, /* 3D */
+       { 0x3e, 0x00 }, /* 3E */
+       { 0x3f, 0x00 }, /* 3F */
+       { 0x40, 0x00 }, /* 40 */
+       { 0x41, 0x00 }, /* 41 */
+       { 0x42, 0x00 }, /* 42 */
+       { 0x43, 0x00 }, /* 43 */
+       { 0x44, 0x00 }, /* 44 */
+       { 0x45, 0x00 }, /* 45 */
+       { 0x46, 0x00 }, /* 46 */
+       { 0x47, 0x00 }, /* 47 */
+       { 0x48, 0x00 }, /* 48 */
+       { 0x49, 0x00 }, /* 49 */
+       { 0x4a, 0x00 }, /* 4A */
+       { 0x4b, 0x00 }, /* 4B */
+       { 0x4c, 0x00 }, /* 4C */
+       { 0x4d, 0x00 }, /* 4D */
+       { 0x4e, 0x00 }, /* 4E */
+       { 0x4f, 0x00 }, /* 4F */
+       { 0x50, 0x00 }, /* 50 */
+       { 0x51, 0x00 }, /* 51 */
+       { 0x52, 0x00 }, /* 52 */
+       { 0x53, 0x00 }, /* 53 */
+       { 0x54, 0x00 }, /* 54 */
+       { 0x55, 0x00 }, /* 55 */
+       { 0x56, 0x00 }, /* 56 */
+       { 0x57, 0x00 }, /* 57 */
+       { 0x58, 0x00 }, /* 58 */
+       { 0x59, 0x00 }, /* 59 */
+       { 0x5a, 0x00 }, /* 5A */
+       { 0x5b, 0x00 }, /* 5B */
+       { 0x5c, 0x00 }, /* 5C */
+       { 0x5d, 0x00 }, /* 5D */
+       { 0x5e, 0x00 }, /* 5E */
+       { 0x5f, 0x00 }, /* 5F */
+       { 0x60, 0x00 }, /* 60 */
+       { 0x61, 0x00 }, /* 61 */
+       { 0x62, 0x00 }, /* 62 */
+       { 0x63, 0x00 }, /* 63 */
+       { 0x64, 0x00 }, /* 64 */
+       { 0x65, 0x00 }, /* 65 */
+       { 0x66, 0x00 }, /* 66 */
+       { 0x67, 0x00 }, /* 67 */
+       { 0x68, 0x00 }, /* 68 */
+       { 0x69, 0x00 }, /* 69 */
+       { 0x6a, 0x00 }, /* 6A */
+       { 0x6b, 0x00 }, /* 6B */
+       { 0x6c, 0x00 }, /* 6C */
+       { 0x6d, 0x00 }, /* 6D */
+       { 0x6e, 0x00 }, /* 6E */
+       { 0x6f, 0x00 }, /* 6F */
+       { 0x70, 0x00 }, /* 70 */
+       { 0x71, 0x00 }, /* 71 */
+       { 0x72, 0x00 }, /* 72 */
+       { 0x73, 0x00 }, /* 73 */
+       { 0x74, 0x00 }, /* 74 */
+       { 0x75, 0x00 }, /* 75 */
+       { 0x76, 0x00 }, /* 76 */
+       { 0x77, 0x00 }, /* 77 */
+       { 0x78, 0x00 }, /* 78 */
+       { 0x79, 0x00 }, /* 79 */
+       { 0x7a, 0x00 }, /* 7A */
+       { 0x7b, 0x00 }, /* 7B */
+       { 0x7c, 0x00 }, /* 7C */
+       { 0x7d, 0x00 }, /* 7D */
+       { 0x7e, 0x00 }, /* 7E */
+       { 0x7f, 0x00 }, /* 7F */
+       { 0x80, 0x00 }, /* 80 */
+       { 0x81, 0x00 }, /* 81 */
+       { 0x82, 0x00 }, /* 82 */
+       { 0x83, 0x00 }, /* 83 */
+       { 0x84, 0x00 }, /* 84 */
+       { 0x85, 0x00 }, /* 85 */
+       { 0x86, 0x00 }, /* 86 */
+       { 0x87, 0x00 }, /* 87 */
+       { 0x88, 0x00 }, /* 88 */
+       { 0x89, 0x00 }, /* 89 */
+       { 0x8a, 0x00 }, /* 8A */
+       { 0x8b, 0x00 }, /* 8B */
+       { 0x8c, 0x00 }, /* 8C */
+       { 0x8d, 0x00 }, /* 8D */
+       { 0x8e, 0x00 }, /* 8E */
+       { 0x8f, 0x00 }, /* 8F */
+       { 0x90, 0x00 }, /* 90 */
+       { 0x91, 0x00 }, /* 91 */
+       { 0x92, 0x30 }, /* 92 */
+       { 0x93, 0xF0 }, /* 93 */
+       { 0x94, 0x00 }, /* 94 */
+       { 0x95, 0x00 }, /* 95 */
+       { 0x96, 0x3F }, /* 96 */
+       { 0x97, 0x00 }, /* 97 */
+       { 0xff, 0x00 }, /* FF */
 };
 
 static struct {
@@ -577,14 +460,14 @@ static struct {
        { 0xFF, 0x00 }, /* FF */
 };
 
-static int max98095_readable(struct snd_soc_codec *codec, unsigned int reg)
+static bool max98095_readable(struct device *dev, unsigned int reg)
 {
        if (reg >= M98095_REG_CNT)
                return 0;
        return max98095_access[reg].readable != 0;
 }
 
-static int max98095_volatile(struct snd_soc_codec *codec, unsigned int reg)
+static bool max98095_volatile(struct device *dev, unsigned int reg)
 {
        if (reg > M98095_REG_MAX_CACHED)
                return 1;
@@ -611,22 +494,18 @@ static int max98095_volatile(struct snd_soc_codec *codec, unsigned int reg)
        return 0;
 }
 
-/*
- * Filter coefficients are in a separate register segment
- * and they share the address space of the normal registers.
- * The coefficient registers do not need or share the cache.
- */
-static int max98095_hw_write(struct snd_soc_codec *codec, unsigned int reg,
-                            unsigned int value)
-{
-       int ret;
+static const struct regmap_config max98095_regmap = {
+       .reg_bits = 8,
+       .val_bits = 8,
 
-       codec->cache_bypass = 1;
-       ret = snd_soc_write(codec, reg, value);
-       codec->cache_bypass = 0;
+       .reg_defaults = max98095_reg_def,
+       .num_reg_defaults = ARRAY_SIZE(max98095_reg_def),
+       .max_register = M98095_0FF_REV_ID,
+       .cache_type = REGCACHE_RBTREE,
 
-       return ret ? -EIO : 0;
-}
+       .readable_reg = max98095_readable,
+       .volatile_reg = max98095_volatile,
+};
 
 /*
  * Load equalizer DSP coefficient configurations registers
@@ -648,8 +527,8 @@ static void m98095_eq_band(struct snd_soc_codec *codec, unsigned int dai,
 
        /* Step through the registers and coefs */
        for (i = 0; i < M98095_COEFS_PER_BAND; i++) {
-               max98095_hw_write(codec, eq_reg++, M98095_BYTE1(coefs[i]));
-               max98095_hw_write(codec, eq_reg++, M98095_BYTE0(coefs[i]));
+               snd_soc_write(codec, eq_reg++, M98095_BYTE1(coefs[i]));
+               snd_soc_write(codec, eq_reg++, M98095_BYTE0(coefs[i]));
        }
 }
 
@@ -673,8 +552,8 @@ static void m98095_biquad_band(struct snd_soc_codec *codec, unsigned int dai,
 
        /* Step through the registers and coefs */
        for (i = 0; i < M98095_COEFS_PER_BAND; i++) {
-               max98095_hw_write(codec, bq_reg++, M98095_BYTE1(coefs[i]));
-               max98095_hw_write(codec, bq_reg++, M98095_BYTE0(coefs[i]));
+               snd_soc_write(codec, bq_reg++, M98095_BYTE1(coefs[i]));
+               snd_soc_write(codec, bq_reg++, M98095_BYTE0(coefs[i]));
        }
 }
 
@@ -1285,14 +1164,6 @@ static const struct snd_soc_dapm_route max98095_audio_map[] = {
        {"MIC2 Input", NULL, "MIC2"},
 };
 
-static int max98095_add_widgets(struct snd_soc_codec *codec)
-{
-       snd_soc_add_codec_controls(codec, max98095_snd_controls,
-                            ARRAY_SIZE(max98095_snd_controls));
-
-       return 0;
-}
-
 /* codec mclk clock divider coefficients */
 static const struct {
        u32 rate;
@@ -1748,6 +1619,7 @@ static int max98095_dai3_set_fmt(struct snd_soc_dai *codec_dai,
 static int max98095_set_bias_level(struct snd_soc_codec *codec,
                                   enum snd_soc_bias_level level)
 {
+       struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
        int ret;
 
        switch (level) {
@@ -1759,7 +1631,7 @@ static int max98095_set_bias_level(struct snd_soc_codec *codec,
 
        case SND_SOC_BIAS_STANDBY:
                if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
-                       ret = snd_soc_cache_sync(codec);
+                       ret = regcache_sync(max98095->regmap);
 
                        if (ret != 0) {
                                dev_err(codec->dev, "Failed to sync cache: %d\n", ret);
@@ -1774,7 +1646,7 @@ static int max98095_set_bias_level(struct snd_soc_codec *codec,
        case SND_SOC_BIAS_OFF:
                snd_soc_update_bits(codec, M98095_090_PWR_EN_IN,
                                M98095_MBEN, 0);
-               codec->cache_sync = 1;
+               regcache_mark_dirty(max98095->regmap);
                break;
        }
        codec->dapm.bias_level = level;
@@ -2341,7 +2213,7 @@ static int max98095_reset(struct snd_soc_codec *codec)
        /* Reset to hardware default for registers, as there is not
         * a soft reset hardware control register */
        for (i = M98095_010_HOST_INT_CFG; i < M98095_REG_MAX_CACHED; i++) {
-               ret = snd_soc_write(codec, i, max98095_reg_def[i]);
+               ret = snd_soc_write(codec, i, snd_soc_read(codec, i));
                if (ret < 0) {
                        dev_err(codec->dev, "Failed to reset: %d\n", ret);
                        return ret;
@@ -2358,7 +2230,7 @@ static int max98095_probe(struct snd_soc_codec *codec)
        struct i2c_client *client;
        int ret = 0;
 
-       ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C);
+       ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_REGMAP);
        if (ret != 0) {
                dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
                return ret;
@@ -2447,8 +2319,6 @@ static int max98095_probe(struct snd_soc_codec *codec)
        snd_soc_update_bits(codec, M98095_097_PWR_SYS, M98095_SHDNRUN,
                M98095_SHDNRUN);
 
-       max98095_add_widgets(codec);
-
        return 0;
 
 err_irq:
@@ -2480,11 +2350,8 @@ static struct snd_soc_codec_driver soc_codec_dev_max98095 = {
        .suspend = max98095_suspend,
        .resume  = max98095_resume,
        .set_bias_level = max98095_set_bias_level,
-       .reg_cache_size = ARRAY_SIZE(max98095_reg_def),
-       .reg_word_size = sizeof(u8),
-       .reg_cache_default = max98095_reg_def,
-       .readable_register = max98095_readable,
-       .volatile_register = max98095_volatile,
+       .controls = max98095_snd_controls,
+       .num_controls = ARRAY_SIZE(max98095_snd_controls),
        .dapm_widgets     = max98095_dapm_widgets,
        .num_dapm_widgets = ARRAY_SIZE(max98095_dapm_widgets),
        .dapm_routes     = max98095_audio_map,
@@ -2502,6 +2369,13 @@ static int max98095_i2c_probe(struct i2c_client *i2c,
        if (max98095 == NULL)
                return -ENOMEM;
 
+       max98095->regmap = devm_regmap_init_i2c(i2c, &max98095_regmap);
+       if (IS_ERR(max98095->regmap)) {
+               ret = PTR_ERR(max98095->regmap);
+               dev_err(&i2c->dev, "Failed to allocate regmap: %d\n", ret);
+               return ret;
+       }
+
        max98095->devtype = id->driver_data;
        i2c_set_clientdata(i2c, max98095);
        max98095->pdata = i2c->dev.platform_data;
index 58c38a5b481ccb7658a8a6a285d5874d13659d50..c5dd61785f8d6318097ae3d0878f881d93e78c29 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/i2c.h>
+#include <linux/regmap.h>
 #include <linux/slab.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include "max9850.h"
 
 struct max9850_priv {
+       struct regmap *regmap;
        unsigned int sysclk;
 };
 
 /* max9850 register cache */
-static const u8 max9850_reg[MAX9850_CACHEREGNUM] = {
-       0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+static const struct reg_default max9850_reg[] = {
+       {  2, 0x0c },
+       {  3, 0x00 },
+       {  4, 0x00 },
+       {  5, 0x00 },
+       {  6, 0x00 },
+       {  7, 0x00 },
+       {  8, 0x00 },
+       {  9, 0x00 },
+       { 10, 0x00 },
 };
 
 /* these registers are not used at the moment but provided for the sake of
  * completeness */
-static int max9850_volatile_register(struct snd_soc_codec *codec,
-               unsigned int reg)
+static bool max9850_volatile_register(struct device *dev, unsigned int reg)
 {
        switch (reg) {
        case MAX9850_STATUSA:
@@ -49,6 +58,15 @@ static int max9850_volatile_register(struct snd_soc_codec *codec,
        }
 }
 
+static const struct regmap_config max9850_regmap = {
+       .reg_bits = 8,
+       .val_bits = 8,
+
+       .max_register = MAX9850_DIGITAL_AUDIO,
+       .volatile_reg = max9850_volatile_register,
+       .cache_type = REGCACHE_RBTREE,
+};
+
 static const unsigned int max9850_tlv[] = {
        TLV_DB_RANGE_HEAD(4),
        0x18, 0x1f, TLV_DB_SCALE_ITEM(-7450, 400, 0),
@@ -225,6 +243,7 @@ static int max9850_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
 static int max9850_set_bias_level(struct snd_soc_codec *codec,
                                  enum snd_soc_bias_level level)
 {
+       struct max9850_priv *max9850 = snd_soc_codec_get_drvdata(codec);
        int ret;
 
        switch (level) {
@@ -234,7 +253,7 @@ static int max9850_set_bias_level(struct snd_soc_codec *codec,
                break;
        case SND_SOC_BIAS_STANDBY:
                if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
-                       ret = snd_soc_cache_sync(codec);
+                       ret = regcache_sync(max9850->regmap);
                        if (ret) {
                                dev_err(codec->dev,
                                        "Failed to sync cache: %d\n", ret);
@@ -295,7 +314,7 @@ static int max9850_probe(struct snd_soc_codec *codec)
 {
        int ret;
 
-       ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_I2C);
+       ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_REGMAP);
        if (ret < 0) {
                dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
                return ret;
@@ -316,10 +335,6 @@ static struct snd_soc_codec_driver soc_codec_dev_max9850 = {
        .suspend =      max9850_suspend,
        .resume =       max9850_resume,
        .set_bias_level = max9850_set_bias_level,
-       .reg_cache_size = ARRAY_SIZE(max9850_reg),
-       .reg_word_size = sizeof(u8),
-       .reg_cache_default = max9850_reg,
-       .volatile_register = max9850_volatile_register,
 
        .controls = max9850_controls,
        .num_controls = ARRAY_SIZE(max9850_controls),
@@ -340,6 +355,10 @@ static int max9850_i2c_probe(struct i2c_client *i2c,
        if (max9850 == NULL)
                return -ENOMEM;
 
+       max9850->regmap = devm_regmap_init_i2c(i2c, &max9850_regmap);
+       if (IS_ERR(max9850->regmap))
+               return PTR_ERR(max9850->regmap);
+
        i2c_set_clientdata(i2c, max9850);
 
        ret = snd_soc_register_codec(&i2c->dev,
index ea141e1d6f280733fdc832f7b5e2d054f2e8fff4..bae60164c7b7355408ab77676daa680bcb5946b0 100644 (file)
 #include <sound/soc.h>
 #include <sound/initval.h>
 #include <sound/soc-dapm.h>
+#include <linux/regmap.h>
 
 #include "mc13783.h"
 
-#define MC13783_AUDIO_RX0      36
-#define MC13783_AUDIO_RX1      37
-#define MC13783_AUDIO_TX       38
-#define MC13783_SSI_NETWORK    39
-#define MC13783_AUDIO_CODEC    40
-#define MC13783_AUDIO_DAC      41
-
 #define AUDIO_RX0_ALSPEN               (1 << 5)
 #define AUDIO_RX0_ALSPSEL              (1 << 7)
 #define AUDIO_RX0_ADDCDC               (1 << 21)
 
 struct mc13783_priv {
        struct mc13xxx *mc13xxx;
+       struct regmap *regmap;
 
        enum mc13783_ssi_port adc_ssi_port;
        enum mc13783_ssi_port dac_ssi_port;
 };
 
-static unsigned int mc13783_read(struct snd_soc_codec *codec,
-       unsigned int reg)
-{
-       struct mc13783_priv *priv = snd_soc_codec_get_drvdata(codec);
-       unsigned int value = 0;
-
-       mc13xxx_lock(priv->mc13xxx);
-
-       mc13xxx_reg_read(priv->mc13xxx, reg, &value);
-
-       mc13xxx_unlock(priv->mc13xxx);
-
-       return value;
-}
-
-static int mc13783_write(struct snd_soc_codec *codec,
-       unsigned int reg, unsigned int value)
-{
-       struct mc13783_priv *priv = snd_soc_codec_get_drvdata(codec);
-       int ret;
-
-       mc13xxx_lock(priv->mc13xxx);
-
-       ret = mc13xxx_reg_write(priv->mc13xxx, reg, value);
-
-       /* include errata fix for spi audio problems */
-       if (reg == MC13783_AUDIO_CODEC || reg == MC13783_AUDIO_DAC)
-               ret = mc13xxx_reg_write(priv->mc13xxx, reg, value);
-
-       mc13xxx_unlock(priv->mc13xxx);
-
-       return ret;
-}
-
 /* Mapping between sample rates and register value */
 static unsigned int mc13783_rates[] = {
        8000, 11025, 12000, 16000,
@@ -382,7 +343,7 @@ static int mc13783_set_tdm_slot_dac(struct snd_soc_dai *dai,
                break;
        default:
                return -EINVAL;
-       };
+       }
 
        snd_soc_update_bits(codec, MC13783_SSI_NETWORK, mask, val);
 
@@ -466,6 +427,29 @@ static const struct snd_kcontrol_new right_input_mux =
 static const struct snd_kcontrol_new samp_ctl =
        SOC_DAPM_SINGLE("Switch", MC13783_AUDIO_RX0, 3, 1, 0);
 
+static const char * const speaker_amp_source_text[] = {
+       "CODEC", "Right"
+};
+static const SOC_ENUM_SINGLE_DECL(speaker_amp_source, MC13783_AUDIO_RX0, 4,
+                                 speaker_amp_source_text);
+static const struct snd_kcontrol_new speaker_amp_source_mux =
+       SOC_DAPM_ENUM("Speaker Amp Source MUX", speaker_amp_source);
+
+static const char * const headset_amp_source_text[] = {
+       "CODEC", "Mixer"
+};
+
+static const SOC_ENUM_SINGLE_DECL(headset_amp_source, MC13783_AUDIO_RX0, 11,
+                                 headset_amp_source_text);
+static const struct snd_kcontrol_new headset_amp_source_mux =
+       SOC_DAPM_ENUM("Headset Amp Source MUX", headset_amp_source);
+
+static const struct snd_kcontrol_new cdcout_ctl =
+       SOC_DAPM_SINGLE("Switch", MC13783_AUDIO_RX0, 18, 1, 0);
+
+static const struct snd_kcontrol_new adc_bypass_ctl =
+       SOC_DAPM_SINGLE("Switch", MC13783_AUDIO_CODEC, 16, 1, 0);
+
 static const struct snd_kcontrol_new lamp_ctl =
        SOC_DAPM_SINGLE("Switch", MC13783_AUDIO_RX0, 5, 1, 0);
 
@@ -503,12 +487,22 @@ static const struct snd_soc_dapm_widget mc13783_dapm_widgets[] = {
        SND_SOC_DAPM_VIRT_MUX("PGA Right Input Mux", SND_SOC_NOPM, 0, 0,
                              &right_input_mux),
 
+       SND_SOC_DAPM_MUX("Speaker Amp Source MUX", SND_SOC_NOPM, 0, 0,
+                        &speaker_amp_source_mux),
+
+       SND_SOC_DAPM_MUX("Headset Amp Source MUX", SND_SOC_NOPM, 0, 0,
+                        &headset_amp_source_mux),
+
        SND_SOC_DAPM_PGA("PGA Left Input", SND_SOC_NOPM, 0, 0, NULL, 0),
        SND_SOC_DAPM_PGA("PGA Right Input", SND_SOC_NOPM, 0, 0, NULL, 0),
 
        SND_SOC_DAPM_ADC("ADC", "Capture", MC13783_AUDIO_CODEC, 11, 0),
        SND_SOC_DAPM_SUPPLY("ADC_Reset", MC13783_AUDIO_CODEC, 15, 0, NULL, 0),
 
+       SND_SOC_DAPM_PGA("Voice CODEC PGA", MC13783_AUDIO_RX1, 0, 0, NULL, 0),
+       SND_SOC_DAPM_SWITCH("Voice CODEC Bypass", MC13783_AUDIO_CODEC, 16, 0,
+                       &adc_bypass_ctl),
+
 /* Output */
        SND_SOC_DAPM_SUPPLY("DAC_E", MC13783_AUDIO_DAC, 11, 0, NULL, 0),
        SND_SOC_DAPM_SUPPLY("DAC_Reset", MC13783_AUDIO_DAC, 15, 0, NULL, 0),
@@ -516,10 +510,15 @@ static const struct snd_soc_dapm_widget mc13783_dapm_widgets[] = {
        SND_SOC_DAPM_OUTPUT("RXOUTR"),
        SND_SOC_DAPM_OUTPUT("HSL"),
        SND_SOC_DAPM_OUTPUT("HSR"),
+       SND_SOC_DAPM_OUTPUT("LSPL"),
        SND_SOC_DAPM_OUTPUT("LSP"),
        SND_SOC_DAPM_OUTPUT("SP"),
+       SND_SOC_DAPM_OUTPUT("CDCOUT"),
 
-       SND_SOC_DAPM_SWITCH("Speaker Amp", MC13783_AUDIO_RX0, 3, 0, &samp_ctl),
+       SND_SOC_DAPM_SWITCH("CDCOUT Switch", MC13783_AUDIO_RX0, 18, 0,
+                       &cdcout_ctl),
+       SND_SOC_DAPM_SWITCH("Speaker Amp Switch", MC13783_AUDIO_RX0, 3, 0,
+                       &samp_ctl),
        SND_SOC_DAPM_SWITCH("Loudspeaker Amp", SND_SOC_NOPM, 0, 0, &lamp_ctl),
        SND_SOC_DAPM_SWITCH("Headset Amp Left", MC13783_AUDIO_RX0, 10, 0,
                        &hlamp_ctl),
@@ -554,20 +553,28 @@ static struct snd_soc_dapm_route mc13783_routes[] = {
        { "ADC", NULL, "PGA Right Input"},
        { "ADC", NULL, "ADC_Reset"},
 
+       { "Voice CODEC PGA", "Voice CODEC Bypass", "ADC" },
+
+       { "Speaker Amp Source MUX", "CODEC", "Voice CODEC PGA"},
+       { "Speaker Amp Source MUX", "Right", "DAC PGA"},
+
+       { "Headset Amp Source MUX", "CODEC", "Voice CODEC PGA"},
+       { "Headset Amp Source MUX", "Mixer", "DAC PGA"},
+
 /* Output */
        { "HSL", NULL, "Headset Amp Left" },
        { "HSR", NULL, "Headset Amp Right"},
        { "RXOUTL", NULL, "Line out Amp Left"},
        { "RXOUTR", NULL, "Line out Amp Right"},
-       { "SP", NULL, "Speaker Amp"},
-       { "Speaker Amp", NULL, "DAC PGA"},
-       { "LSP", NULL, "DAC PGA"},
-       { "Headset Amp Left", NULL, "DAC PGA"},
-       { "Headset Amp Right", NULL, "DAC PGA"},
+       { "SP", "Speaker Amp Switch", "Speaker Amp Source MUX"},
+       { "LSP", "Loudspeaker Amp", "Speaker Amp Source MUX"},
+       { "HSL", "Headset Amp Left", "Headset Amp Source MUX"},
+       { "HSR", "Headset Amp Right", "Headset Amp Source MUX"},
        { "Line out Amp Left", NULL, "DAC PGA"},
        { "Line out Amp Right", NULL, "DAC PGA"},
        { "DAC PGA", NULL, "DAC"},
        { "DAC", NULL, "DAC_E"},
+       { "CDCOUT", "CDCOUT Switch", "Voice CODEC PGA"},
 };
 
 static const char * const mc13783_3d_mixer[] = {"Stereo", "Phase Mix",
@@ -580,15 +587,39 @@ static const struct soc_enum mc13783_enum_3d_mixer =
 static struct snd_kcontrol_new mc13783_control_list[] = {
        SOC_SINGLE("Loudspeaker enable", MC13783_AUDIO_RX0, 5, 1, 0),
        SOC_SINGLE("PCM Playback Volume", MC13783_AUDIO_RX1, 6, 15, 0),
+       SOC_SINGLE("PCM Playback Switch", MC13783_AUDIO_RX1, 5, 1, 0),
        SOC_DOUBLE("PCM Capture Volume", MC13783_AUDIO_TX, 19, 14, 31, 0),
        SOC_ENUM("3D Control", mc13783_enum_3d_mixer),
+
+       SOC_SINGLE("CDCOUT Switch", MC13783_AUDIO_RX0, 18, 1, 0),
+       SOC_SINGLE("Earpiece Amp Switch", MC13783_AUDIO_RX0, 3, 1, 0),
+       SOC_DOUBLE("Headset Amp Switch", MC13783_AUDIO_RX0, 10, 9, 1, 0),
+       SOC_DOUBLE("Line out Amp Switch", MC13783_AUDIO_RX0, 16, 15, 1, 0),
+
+       SOC_SINGLE("PCM Capture Mixin Switch", MC13783_AUDIO_RX0, 22, 1, 0),
+       SOC_SINGLE("Line in Capture Mixin Switch", MC13783_AUDIO_RX0, 23, 1, 0),
+
+       SOC_SINGLE("CODEC Capture Volume", MC13783_AUDIO_RX1, 1, 15, 0),
+       SOC_SINGLE("CODEC Capture Mixin Switch", MC13783_AUDIO_RX0, 21, 1, 0),
+
+       SOC_SINGLE("Line in Capture Volume", MC13783_AUDIO_RX1, 12, 15, 0),
+       SOC_SINGLE("Line in Capture Switch", MC13783_AUDIO_RX1, 10, 1, 0),
+
+       SOC_SINGLE("MC1 Capture Bias Switch", MC13783_AUDIO_TX, 0, 1, 0),
+       SOC_SINGLE("MC2 Capture Bias Switch", MC13783_AUDIO_TX, 1, 1, 0),
 };
 
 static int mc13783_probe(struct snd_soc_codec *codec)
 {
        struct mc13783_priv *priv = snd_soc_codec_get_drvdata(codec);
+       int ret;
 
-       mc13xxx_lock(priv->mc13xxx);
+       codec->control_data = dev_get_regmap(codec->dev->parent, NULL);
+       ret = snd_soc_codec_set_cache_io(codec, 8, 24, SND_SOC_REGMAP);
+       if (ret != 0) {
+               dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+               return ret;
+       }
 
        /* these are the reset values */
        mc13xxx_reg_write(priv->mc13xxx, MC13783_AUDIO_RX0, 0x25893);
@@ -612,8 +643,6 @@ static int mc13783_probe(struct snd_soc_codec *codec)
                mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC,
                                0, AUDIO_SSI_SEL);
 
-       mc13xxx_unlock(priv->mc13xxx);
-
        return 0;
 }
 
@@ -621,13 +650,9 @@ static int mc13783_remove(struct snd_soc_codec *codec)
 {
        struct mc13783_priv *priv = snd_soc_codec_get_drvdata(codec);
 
-       mc13xxx_lock(priv->mc13xxx);
-
        /* Make sure VAUDIOON is off */
        mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_RX0, 0x3, 0);
 
-       mc13xxx_unlock(priv->mc13xxx);
-
        return 0;
 }
 
@@ -717,8 +742,6 @@ static struct snd_soc_dai_driver mc13783_dai_sync[] = {
 static struct snd_soc_codec_driver soc_codec_dev_mc13783 = {
        .probe          = mc13783_probe,
        .remove         = mc13783_remove,
-       .read           = mc13783_read,
-       .write          = mc13783_write,
        .controls       = mc13783_control_list,
        .num_controls   = ARRAY_SIZE(mc13783_control_list),
        .dapm_widgets   = mc13783_dapm_widgets,
index 651ce09236755ff9938062e3e719926fad856f9a..73f9c3630e2c8e33a56b089fa3a0d5f16e418dd3 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/gpio.h>
 #include <linux/i2c.h>
 #include <linux/regmap.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
 #include <sound/pcm.h>
@@ -270,7 +271,7 @@ MODULE_DEVICE_TABLE(of, pcm1681_dt_ids);
 static const struct regmap_config pcm1681_regmap = {
        .reg_bits               = 8,
        .val_bits               = 8,
-       .max_register           = ARRAY_SIZE(pcm1681_reg_defaults) + 1,
+       .max_register           = 0x13,
        .reg_defaults           = pcm1681_reg_defaults,
        .num_reg_defaults       = ARRAY_SIZE(pcm1681_reg_defaults),
        .writeable_reg          = pcm1681_writeable_reg,
index 2a8eccf64c76565ab5abb96f3436face0dda4f01..7146653a8e165c942f7107c5ff305dea785578d9 100644 (file)
@@ -28,6 +28,7 @@
 #include <sound/initval.h>
 #include <sound/soc.h>
 #include <sound/tlv.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
 
 #include "pcm1792a.h"
@@ -188,7 +189,7 @@ MODULE_DEVICE_TABLE(of, pcm1792a_of_match);
 static const struct regmap_config pcm1792a_regmap = {
        .reg_bits               = 8,
        .val_bits               = 8,
-       .max_register           = 24,
+       .max_register           = 23,
        .reg_defaults           = pcm1792a_reg_defaults,
        .num_reg_defaults       = ARRAY_SIZE(pcm1792a_reg_defaults),
        .writeable_reg          = pcm1792a_writeable_reg,
index c26a8f814b1865269b3869acf60c8c7f7a20349c..4d041d376f3191555503d249a7f0c71a375c729a 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/of_gpio.h>
 #include <linux/platform_device.h>
 #include <linux/spi/spi.h>
+#include <linux/acpi.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -926,7 +927,7 @@ static int rt5640_set_dmic2_event(struct snd_soc_dapm_widget *w,
        return 0;
 }
 
-void hp_amp_power_on(struct snd_soc_codec *codec)
+static void hp_amp_power_on(struct snd_soc_codec *codec)
 {
        struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
 
@@ -1609,7 +1610,8 @@ static int rt5640_hw_params(struct snd_pcm_substream *substream,
        rt5640->lrck[dai->id] = params_rate(params);
        pre_div = get_clk_info(rt5640->sysclk, rt5640->lrck[dai->id]);
        if (pre_div < 0) {
-               dev_err(codec->dev, "Unsupported clock setting\n");
+               dev_err(codec->dev, "Unsupported clock setting %d for DAI %d\n",
+                       rt5640->lrck[dai->id], dai->id);
                return -EINVAL;
        }
        frame_size = snd_soc_params_to_frame_size(params);
@@ -1977,13 +1979,20 @@ static int rt5640_suspend(struct snd_soc_codec *codec)
        rt5640_reset(codec);
        regcache_cache_only(rt5640->regmap, true);
        regcache_mark_dirty(rt5640->regmap);
+       if (gpio_is_valid(rt5640->pdata.ldo1_en))
+               gpio_set_value_cansleep(rt5640->pdata.ldo1_en, 0);
 
        return 0;
 }
 
 static int rt5640_resume(struct snd_soc_codec *codec)
 {
-       rt5640_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+       struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
+
+       if (gpio_is_valid(rt5640->pdata.ldo1_en)) {
+               gpio_set_value_cansleep(rt5640->pdata.ldo1_en, 1);
+               msleep(400);
+       }
 
        return 0;
 }
@@ -2080,6 +2089,14 @@ static const struct i2c_device_id rt5640_i2c_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, rt5640_i2c_id);
 
+#ifdef CONFIG_ACPI
+static struct acpi_device_id rt5640_acpi_match[] = {
+       { "INT33CA", 0 },
+       { },
+};
+MODULE_DEVICE_TABLE(acpi, rt5640_acpi_match);
+#endif
+
 static int rt5640_parse_dt(struct rt5640_priv *rt5640, struct device_node *np)
 {
        rt5640->pdata.in1_diff = of_property_read_bool(np,
@@ -2199,6 +2216,7 @@ static struct i2c_driver rt5640_i2c_driver = {
        .driver = {
                .name = "rt5640",
                .owner = THIS_MODULE,
+               .acpi_match_table = ACPI_PTR(rt5640_acpi_match),
        },
        .probe = rt5640_i2c_probe,
        .remove   = rt5640_i2c_remove,
index 38f3b105c17dc2da0809c903190dd37644f2508d..52e7cb08434bd281354c4a34f520f16c38b0446f 100644 (file)
@@ -60,48 +60,6 @@ enum si476x_pcm_format {
        SI476X_PCM_FORMAT_S24_LE        = 6,
 };
 
-static unsigned int si476x_codec_read(struct snd_soc_codec *codec,
-                                     unsigned int reg)
-{
-       int err;
-       unsigned int val;
-       struct si476x_core *core = codec->control_data;
-
-       si476x_core_lock(core);
-       if (!si476x_core_is_powered_up(core))
-               regcache_cache_only(core->regmap, true);
-
-       err = regmap_read(core->regmap, reg, &val);
-
-       if (!si476x_core_is_powered_up(core))
-               regcache_cache_only(core->regmap, false);
-       si476x_core_unlock(core);
-
-       if (err < 0)
-               return err;
-
-       return val;
-}
-
-static int si476x_codec_write(struct snd_soc_codec *codec,
-                             unsigned int reg, unsigned int val)
-{
-       int err;
-       struct si476x_core *core = codec->control_data;
-
-       si476x_core_lock(core);
-       if (!si476x_core_is_powered_up(core))
-               regcache_cache_only(core->regmap, true);
-
-       err = regmap_write(core->regmap, reg, val);
-
-       if (!si476x_core_is_powered_up(core))
-               regcache_cache_only(core->regmap, false);
-       si476x_core_unlock(core);
-
-       return err;
-}
-
 static const struct snd_soc_dapm_widget si476x_dapm_widgets[] = {
 SND_SOC_DAPM_OUTPUT("LOUT"),
 SND_SOC_DAPM_OUTPUT("ROUT"),
@@ -115,6 +73,7 @@ static const struct snd_soc_dapm_route si476x_dapm_routes[] = {
 static int si476x_codec_set_dai_fmt(struct snd_soc_dai *codec_dai,
                                    unsigned int fmt)
 {
+       struct si476x_core *core = i2c_mfd_cell_to_core(codec_dai->dev);
        int err;
        u16 format = 0;
 
@@ -178,9 +137,14 @@ static int si476x_codec_set_dai_fmt(struct snd_soc_dai *codec_dai,
                return -EINVAL;
        }
 
+       si476x_core_lock(core);
+
        err = snd_soc_update_bits(codec_dai->codec, SI476X_DIGITAL_IO_OUTPUT_FORMAT,
                                  SI476X_DIGITAL_IO_OUTPUT_FORMAT_MASK,
                                  format);
+
+       si476x_core_unlock(core);
+
        if (err < 0) {
                dev_err(codec_dai->codec->dev, "Failed to set output format\n");
                return err;
@@ -193,6 +157,7 @@ static int si476x_codec_hw_params(struct snd_pcm_substream *substream,
                                  struct snd_pcm_hw_params *params,
                                  struct snd_soc_dai *dai)
 {
+       struct si476x_core *core = i2c_mfd_cell_to_core(dai->dev);
        int rate, width, err;
 
        rate = params_rate(params);
@@ -218,11 +183,13 @@ static int si476x_codec_hw_params(struct snd_pcm_substream *substream,
                return -EINVAL;
        }
 
+       si476x_core_lock(core);
+
        err = snd_soc_write(dai->codec, SI476X_DIGITAL_IO_OUTPUT_SAMPLE_RATE,
                            rate);
        if (err < 0) {
                dev_err(dai->codec->dev, "Failed to set sample rate\n");
-               return err;
+               goto out;
        }
 
        err = snd_soc_update_bits(dai->codec, SI476X_DIGITAL_IO_OUTPUT_FORMAT,
@@ -231,15 +198,18 @@ static int si476x_codec_hw_params(struct snd_pcm_substream *substream,
                                  (width << SI476X_DIGITAL_IO_SAMPLE_SIZE_SHIFT));
        if (err < 0) {
                dev_err(dai->codec->dev, "Failed to set output width\n");
-               return err;
+               goto out;
        }
 
-       return 0;
+out:
+       si476x_core_unlock(core);
+
+       return err;
 }
 
 static int si476x_codec_probe(struct snd_soc_codec *codec)
 {
-       codec->control_data = i2c_mfd_cell_to_core(codec->dev);
+       codec->control_data = dev_get_regmap(codec->dev->parent, NULL);
        return 0;
 }
 
@@ -268,8 +238,6 @@ static struct snd_soc_dai_driver si476x_dai = {
 
 static struct snd_soc_codec_driver soc_codec_dev_si476x = {
        .probe  = si476x_codec_probe,
-       .read   = si476x_codec_read,
-       .write  = si476x_codec_write,
        .dapm_widgets = si476x_dapm_widgets,
        .num_dapm_widgets = ARRAY_SIZE(si476x_dapm_widgets),
        .dapm_routes = si476x_dapm_routes,
index dba26e63844ef901073531bcdb26b1e084bf0a3b..13045f2af4d32a7725961feabc628ac6e088e80c 100644 (file)
@@ -164,30 +164,28 @@ static unsigned int sn95031_get_mic_bias(struct snd_soc_codec *codec)
 }
 /*end - adc helper functions */
 
-static inline unsigned int sn95031_read(struct snd_soc_codec *codec,
-                       unsigned int reg)
+static int sn95031_read(void *ctx, unsigned int reg, unsigned int *val)
 {
        u8 value = 0;
        int ret;
 
        ret = intel_scu_ipc_ioread8(reg, &value);
-       if (ret)
-               pr_err("read of %x failed, err %d\n", reg, ret);
-       return value;
+       if (ret == 0)
+               *val = value;
 
+       return ret;
 }
 
-static inline int sn95031_write(struct snd_soc_codec *codec,
-                       unsigned int reg, unsigned int value)
+static int sn95031_write(void *ctx, unsigned int reg, unsigned int value)
 {
-       int ret;
-
-       ret = intel_scu_ipc_iowrite8(reg, value);
-       if (ret)
-               pr_err("write of %x failed, err %d\n", reg, ret);
-       return ret;
+       return intel_scu_ipc_iowrite8(reg, value);
 }
 
+static const struct regmap_config sn95031_regmap = {
+       .reg_read = sn95031_read,
+       .reg_write = sn95031_write,
+};
+
 static int sn95031_set_vaud_bias(struct snd_soc_codec *codec,
                enum snd_soc_bias_level level)
 {
@@ -827,6 +825,8 @@ static int sn95031_codec_probe(struct snd_soc_codec *codec)
 {
        pr_debug("codec_probe called\n");
 
+       snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
+
        /* PCM interface config
         * This sets the pcm rx slot conguration to max 6 slots
         * for max 4 dais (2 stereo and 2 mono)
@@ -886,8 +886,6 @@ static int sn95031_codec_remove(struct snd_soc_codec *codec)
 static struct snd_soc_codec_driver sn95031_codec = {
        .probe          = sn95031_codec_probe,
        .remove         = sn95031_codec_remove,
-       .read           = sn95031_read,
-       .write          = sn95031_write,
        .set_bias_level = sn95031_set_vaud_bias,
        .idle_bias_off  = true,
        .dapm_widgets   = sn95031_dapm_widgets,
@@ -898,7 +896,14 @@ static struct snd_soc_codec_driver sn95031_codec = {
 
 static int sn95031_device_probe(struct platform_device *pdev)
 {
+       struct regmap *regmap;
+
        pr_debug("codec device probe called for %s\n", dev_name(&pdev->dev));
+
+       regmap = devm_regmap_init(&pdev->dev, NULL, NULL, &sn95031_regmap);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
        return snd_soc_register_codec(&pdev->dev, &sn95031_codec,
                        sn95031_dais, ARRAY_SIZE(sn95031_dais));
 }
index 6d31d88f72040098700a1b57ef3f4d118976a604..a895a5e4bdf207eee6387bc00e8452ae2385233d 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/i2c.h>
 #include <linux/regmap.h>
 #include <linux/spi/spi.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_gpio.h>
 #include <sound/pcm.h>
@@ -244,6 +245,8 @@ struct tas5086_private {
        unsigned int    mclk, sclk;
        unsigned int    format;
        bool            deemph;
+       unsigned int    charge_period;
+       unsigned int    pwm_start_mid_z;
        /* Current sample rate for de-emphasis control */
        int             rate;
        /* GPIO driving Reset pin, if any */
@@ -429,7 +432,7 @@ static int tas5086_hw_params(struct snd_pcm_substream *substream,
        default:
                dev_err(codec->dev, "Invalid bit width\n");
                return -EINVAL;
-       };
+       }
 
        ret = regmap_write(priv->regmap, TAS5086_SERIAL_DATA_IF, val);
        if (ret < 0)
@@ -456,6 +459,75 @@ static int tas5086_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
        return regmap_write(priv->regmap, TAS5086_SOFT_MUTE, val);
 }
 
+static void tas5086_reset(struct tas5086_private *priv)
+{
+       if (gpio_is_valid(priv->gpio_nreset)) {
+               /* Reset codec - minimum assertion time is 400ns */
+               gpio_direction_output(priv->gpio_nreset, 0);
+               udelay(1);
+               gpio_set_value(priv->gpio_nreset, 1);
+
+               /* Codec needs ~15ms to wake up */
+               msleep(15);
+       }
+}
+
+/* charge period values in microseconds */
+static const int tas5086_charge_period[] = {
+         13000,  16900,   23400,   31200,   41600,   54600,   72800,   96200,
+        130000, 156000,  234000,  312000,  416000,  546000,  728000,  962000,
+       1300000, 169000, 2340000, 3120000, 4160000, 5460000, 7280000, 9620000,
+};
+
+static int tas5086_init(struct device *dev, struct tas5086_private *priv)
+{
+       int ret, i;
+
+       /*
+        * If any of the channels is configured to start in Mid-Z mode,
+        * configure 'part 1' of the PWM starts to use Mid-Z, and tell
+        * all configured mid-z channels to start start under 'part 1'.
+        */
+       if (priv->pwm_start_mid_z)
+               regmap_write(priv->regmap, TAS5086_PWM_START,
+                            TAS5086_PWM_START_MIDZ_FOR_START_1 |
+                               priv->pwm_start_mid_z);
+
+       /* lookup and set split-capacitor charge period */
+       if (priv->charge_period == 0) {
+               regmap_write(priv->regmap, TAS5086_SPLIT_CAP_CHARGE, 0);
+       } else {
+               i = index_in_array(tas5086_charge_period,
+                                  ARRAY_SIZE(tas5086_charge_period),
+                                  priv->charge_period);
+               if (i >= 0)
+                       regmap_write(priv->regmap, TAS5086_SPLIT_CAP_CHARGE,
+                                    i + 0x08);
+               else
+                       dev_warn(dev,
+                                "Invalid split-cap charge period of %d ns.\n",
+                                priv->charge_period);
+       }
+
+       /* enable factory trim */
+       ret = regmap_write(priv->regmap, TAS5086_OSC_TRIM, 0x00);
+       if (ret < 0)
+               return ret;
+
+       /* start all channels */
+       ret = regmap_write(priv->regmap, TAS5086_SYS_CONTROL_2, 0x20);
+       if (ret < 0)
+               return ret;
+
+       /* mute all channels for now */
+       ret = regmap_write(priv->regmap, TAS5086_SOFT_MUTE,
+                          TAS5086_SOFT_MUTE_ALL);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
 /* TAS5086 controls */
 static const DECLARE_TLV_DB_SCALE(tas5086_dac_tlv, -10350, 50, 1);
 
@@ -691,14 +763,39 @@ static struct snd_soc_dai_driver tas5086_dai = {
 };
 
 #ifdef CONFIG_PM
+static int tas5086_soc_suspend(struct snd_soc_codec *codec)
+{
+       struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
+       int ret;
+
+       /* Shut down all channels */
+       ret = regmap_write(priv->regmap, TAS5086_SYS_CONTROL_2, 0x60);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
 static int tas5086_soc_resume(struct snd_soc_codec *codec)
 {
        struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
+       int ret;
+
+       tas5086_reset(priv);
+       regcache_mark_dirty(priv->regmap);
+
+       ret = tas5086_init(codec->dev, priv);
+       if (ret < 0)
+               return ret;
+
+       ret = regcache_sync(priv->regmap);
+       if (ret < 0)
+               return ret;
 
-       /* Restore codec state */
-       return regcache_sync(priv->regmap);
+       return 0;
 }
 #else
+#define tas5086_soc_suspend    NULL
 #define tas5086_soc_resume     NULL
 #endif /* CONFIG_PM */
 
@@ -710,23 +807,19 @@ static const struct of_device_id tas5086_dt_ids[] = {
 MODULE_DEVICE_TABLE(of, tas5086_dt_ids);
 #endif
 
-/* charge period values in microseconds */
-static const int tas5086_charge_period[] = {
-         13000,  16900,   23400,   31200,   41600,   54600,   72800,   96200,
-        130000, 156000,  234000,  312000,  416000,  546000,  728000,  962000,
-       1300000, 169000, 2340000, 3120000, 4160000, 5460000, 7280000, 9620000,
-};
-
 static int tas5086_probe(struct snd_soc_codec *codec)
 {
        struct tas5086_private *priv = snd_soc_codec_get_drvdata(codec);
-       int charge_period = 1300000; /* hardware default is 1300 ms */
-       u8 pwm_start_mid_z = 0;
        int i, ret;
 
+       priv->pwm_start_mid_z = 0;
+       priv->charge_period = 1300000; /* hardware default is 1300 ms */
+
        if (of_match_device(of_match_ptr(tas5086_dt_ids), codec->dev)) {
                struct device_node *of_node = codec->dev->of_node;
-               of_property_read_u32(of_node, "ti,charge-period", &charge_period);
+
+               of_property_read_u32(of_node, "ti,charge-period",
+                                    &priv->charge_period);
 
                for (i = 0; i < 6; i++) {
                        char name[25];
@@ -735,43 +828,11 @@ static int tas5086_probe(struct snd_soc_codec *codec)
                                 "ti,mid-z-channel-%d", i + 1);
 
                        if (of_get_property(of_node, name, NULL) != NULL)
-                               pwm_start_mid_z |= 1 << i;
+                               priv->pwm_start_mid_z |= 1 << i;
                }
        }
 
-       /*
-        * If any of the channels is configured to start in Mid-Z mode,
-        * configure 'part 1' of the PWM starts to use Mid-Z, and tell
-        * all configured mid-z channels to start start under 'part 1'.
-        */
-       if (pwm_start_mid_z)
-               regmap_write(priv->regmap, TAS5086_PWM_START,
-                            TAS5086_PWM_START_MIDZ_FOR_START_1 |
-                               pwm_start_mid_z);
-
-       /* lookup and set split-capacitor charge period */
-       if (charge_period == 0) {
-               regmap_write(priv->regmap, TAS5086_SPLIT_CAP_CHARGE, 0);
-       } else {
-               i = index_in_array(tas5086_charge_period,
-                                  ARRAY_SIZE(tas5086_charge_period),
-                                  charge_period);
-               if (i >= 0)
-                       regmap_write(priv->regmap, TAS5086_SPLIT_CAP_CHARGE,
-                                    i + 0x08);
-               else
-                       dev_warn(codec->dev,
-                                "Invalid split-cap charge period of %d ns.\n",
-                                charge_period);
-       }
-
-       /* enable factory trim */
-       ret = regmap_write(priv->regmap, TAS5086_OSC_TRIM, 0x00);
-       if (ret < 0)
-               return ret;
-
-       /* start all channels */
-       ret = regmap_write(priv->regmap, TAS5086_SYS_CONTROL_2, 0x20);
+       ret = tas5086_init(codec->dev, priv);
        if (ret < 0)
                return ret;
 
@@ -780,12 +841,6 @@ static int tas5086_probe(struct snd_soc_codec *codec)
        if (ret < 0)
                return ret;
 
-       /* mute all channels for now */
-       ret = regmap_write(priv->regmap, TAS5086_SOFT_MUTE,
-                          TAS5086_SOFT_MUTE_ALL);
-       if (ret < 0)
-               return ret;
-
        return 0;
 }
 
@@ -803,6 +858,7 @@ static int tas5086_remove(struct snd_soc_codec *codec)
 static struct snd_soc_codec_driver soc_codec_dev_tas5086 = {
        .probe                  = tas5086_probe,
        .remove                 = tas5086_remove,
+       .suspend                = tas5086_soc_suspend,
        .resume                 = tas5086_soc_resume,
        .controls               = tas5086_controls,
        .num_controls           = ARRAY_SIZE(tas5086_controls),
@@ -862,17 +918,8 @@ static int tas5086_i2c_probe(struct i2c_client *i2c,
                if (devm_gpio_request(dev, gpio_nreset, "TAS5086 Reset"))
                        gpio_nreset = -EINVAL;
 
-       if (gpio_is_valid(gpio_nreset)) {
-               /* Reset codec - minimum assertion time is 400ns */
-               gpio_direction_output(gpio_nreset, 0);
-               udelay(1);
-               gpio_set_value(gpio_nreset, 1);
-
-               /* Codec needs ~15ms to wake up */
-               msleep(15);
-       }
-
        priv->gpio_nreset = gpio_nreset;
+       tas5086_reset(priv);
 
        /* The TAS5086 always returns 0x03 in its TAS5086_DEV_ID register */
        ret = regmap_read(priv->regmap, TAS5086_DEV_ID, &i);
index 31762ebdd774d1947c115542eb4041144345b1b9..5d430cc56f51d42397c3190bf3bfb55b7f850dae 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/delay.h>
 #include <linux/pm.h>
 #include <linux/i2c.h>
+#include <linux/regmap.h>
 #include <linux/slab.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 /*
  * AIC23 register cache
  */
-static const u16 tlv320aic23_reg[] = {
-       0x0097, 0x0097, 0x00F9, 0x00F9, /* 0 */
-       0x001A, 0x0004, 0x0007, 0x0001, /* 4 */
-       0x0020, 0x0000, 0x0000, 0x0000, /* 8 */
-       0x0000, 0x0000, 0x0000, 0x0000, /* 12 */
+static const struct reg_default tlv320aic23_reg[] = {
+       {  0, 0x0097 },
+       {  1, 0x0097 },
+       {  2, 0x00F9 },
+       {  3, 0x00F9 },
+       {  4, 0x001A },
+       {  5, 0x0004 },
+       {  6, 0x0007 },
+       {  7, 0x0001 },
+       {  8, 0x0020 },
+       {  9, 0x0000 },
+};
+
+static const struct regmap_config tlv320aic23_regmap = {
+       .reg_bits = 7,
+       .val_bits = 9,
+
+       .max_register = TLV320AIC23_RESET,
+       .reg_defaults = tlv320aic23_reg,
+       .num_reg_defaults = ARRAY_SIZE(tlv320aic23_reg),
+       .cache_type = REGCACHE_RBTREE,
 };
 
 static const char *rec_src_text[] = { "Line", "Mic" };
@@ -171,7 +188,7 @@ static const struct snd_soc_dapm_route tlv320aic23_intercon[] = {
 
 /* AIC23 driver data */
 struct aic23 {
-       enum snd_soc_control_type control_type;
+       struct regmap *regmap;
        int mclk;
        int requested_adc;
        int requested_dac;
@@ -532,7 +549,9 @@ static int tlv320aic23_suspend(struct snd_soc_codec *codec)
 
 static int tlv320aic23_resume(struct snd_soc_codec *codec)
 {
-       snd_soc_cache_sync(codec);
+       struct aic23 *aic23 = snd_soc_codec_get_drvdata(codec);
+       regcache_mark_dirty(aic23->regmap);
+       regcache_sync(aic23->regmap);
        tlv320aic23_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 
        return 0;
@@ -540,10 +559,9 @@ static int tlv320aic23_resume(struct snd_soc_codec *codec)
 
 static int tlv320aic23_probe(struct snd_soc_codec *codec)
 {
-       struct aic23 *aic23 = snd_soc_codec_get_drvdata(codec);
        int ret;
 
-       ret = snd_soc_codec_set_cache_io(codec, 7, 9, aic23->control_type);
+       ret = snd_soc_codec_set_cache_io(codec, 7, 9, SND_SOC_REGMAP);
        if (ret < 0) {
                dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
                return ret;
@@ -552,16 +570,6 @@ static int tlv320aic23_probe(struct snd_soc_codec *codec)
        /* Reset codec */
        snd_soc_write(codec, TLV320AIC23_RESET, 0);
 
-       /* Write the register default value to cache for reserved registers,
-        * so the write to the these registers are suppressed by the cache
-        * restore code when it skips writes of default registers.
-        */
-       snd_soc_cache_write(codec, 0x0A, 0);
-       snd_soc_cache_write(codec, 0x0B, 0);
-       snd_soc_cache_write(codec, 0x0C, 0);
-       snd_soc_cache_write(codec, 0x0D, 0);
-       snd_soc_cache_write(codec, 0x0E, 0);
-
        /* power on device */
        tlv320aic23_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 
@@ -586,9 +594,6 @@ static int tlv320aic23_probe(struct snd_soc_codec *codec)
 
        snd_soc_write(codec, TLV320AIC23_ACTIVE, 0x1);
 
-       snd_soc_add_codec_controls(codec, tlv320aic23_snd_controls,
-                               ARRAY_SIZE(tlv320aic23_snd_controls));
-
        return 0;
 }
 
@@ -599,21 +604,19 @@ static int tlv320aic23_remove(struct snd_soc_codec *codec)
 }
 
 static struct snd_soc_codec_driver soc_codec_dev_tlv320aic23 = {
-       .reg_cache_size = ARRAY_SIZE(tlv320aic23_reg),
-       .reg_word_size = sizeof(u16),
-       .reg_cache_default = tlv320aic23_reg,
        .probe = tlv320aic23_probe,
        .remove = tlv320aic23_remove,
        .suspend = tlv320aic23_suspend,
        .resume = tlv320aic23_resume,
        .set_bias_level = tlv320aic23_set_bias_level,
+       .controls = tlv320aic23_snd_controls,
+       .num_controls = ARRAY_SIZE(tlv320aic23_snd_controls),
        .dapm_widgets = tlv320aic23_dapm_widgets,
        .num_dapm_widgets = ARRAY_SIZE(tlv320aic23_dapm_widgets),
        .dapm_routes = tlv320aic23_intercon,
        .num_dapm_routes = ARRAY_SIZE(tlv320aic23_intercon),
 };
 
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
 /*
  * If the i2c layer weren't so broken, we could pass this kind of data
  * around
@@ -631,8 +634,11 @@ static int tlv320aic23_codec_probe(struct i2c_client *i2c,
        if (aic23 == NULL)
                return -ENOMEM;
 
+       aic23->regmap = devm_regmap_init_i2c(i2c, &tlv320aic23_regmap);
+       if (IS_ERR(aic23->regmap))
+               return PTR_ERR(aic23->regmap);
+
        i2c_set_clientdata(i2c, aic23);
-       aic23->control_type = SND_SOC_I2C;
 
        ret =  snd_soc_register_codec(&i2c->dev,
                        &soc_codec_dev_tlv320aic23, &tlv320aic23_dai, 1);
@@ -660,29 +666,7 @@ static struct i2c_driver tlv320aic23_i2c_driver = {
        .id_table = tlv320aic23_id,
 };
 
-#endif
-
-static int __init tlv320aic23_modinit(void)
-{
-       int ret;
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-       ret = i2c_add_driver(&tlv320aic23_i2c_driver);
-       if (ret != 0) {
-               printk(KERN_ERR "Failed to register TLV320AIC23 I2C driver: %d\n",
-                      ret);
-       }
-#endif
-       return ret;
-}
-module_init(tlv320aic23_modinit);
-
-static void __exit tlv320aic23_exit(void)
-{
-#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-       i2c_del_driver(&tlv320aic23_i2c_driver);
-#endif
-}
-module_exit(tlv320aic23_exit);
+module_i2c_driver(tlv320aic23_i2c_driver);
 
 MODULE_DESCRIPTION("ASoC TLV320AIC23 codec driver");
 MODULE_AUTHOR("Arun KS <arunks@mistralsolutions.com>");
index 7b8f3d965f43c814b9b5f5f918ddc6c8778129d4..94a658fa6d97408e84682cea3e3ecc4c9a01cc1d 100644 (file)
@@ -29,6 +29,7 @@ MODULE_LICENSE("GPL");
 /* AIC26 driver private data */
 struct aic26 {
        struct spi_device *spi;
+       struct regmap *regmap;
        struct snd_soc_codec *codec;
        int master;
        int datfm;
@@ -40,85 +41,6 @@ struct aic26 {
        int keyclick_len;
 };
 
-/* ---------------------------------------------------------------------
- * Register access routines
- */
-static unsigned int aic26_reg_read(struct snd_soc_codec *codec,
-                                  unsigned int reg)
-{
-       struct aic26 *aic26 = snd_soc_codec_get_drvdata(codec);
-       u16 *cache = codec->reg_cache;
-       u16 cmd, value;
-       u8 buffer[2];
-       int rc;
-
-       if (reg >= AIC26_NUM_REGS) {
-               WARN_ON_ONCE(1);
-               return 0;
-       }
-
-       /* Do SPI transfer; first 16bits are command; remaining is
-        * register contents */
-       cmd = AIC26_READ_COMMAND_WORD(reg);
-       buffer[0] = (cmd >> 8) & 0xff;
-       buffer[1] = cmd & 0xff;
-       rc = spi_write_then_read(aic26->spi, buffer, 2, buffer, 2);
-       if (rc) {
-               dev_err(&aic26->spi->dev, "AIC26 reg read error\n");
-               return -EIO;
-       }
-       value = (buffer[0] << 8) | buffer[1];
-
-       /* Update the cache before returning with the value */
-       cache[reg] = value;
-       return value;
-}
-
-static unsigned int aic26_reg_read_cache(struct snd_soc_codec *codec,
-                                        unsigned int reg)
-{
-       u16 *cache = codec->reg_cache;
-
-       if (reg >= AIC26_NUM_REGS) {
-               WARN_ON_ONCE(1);
-               return 0;
-       }
-
-       return cache[reg];
-}
-
-static int aic26_reg_write(struct snd_soc_codec *codec, unsigned int reg,
-                          unsigned int value)
-{
-       struct aic26 *aic26 = snd_soc_codec_get_drvdata(codec);
-       u16 *cache = codec->reg_cache;
-       u16 cmd;
-       u8 buffer[4];
-       int rc;
-
-       if (reg >= AIC26_NUM_REGS) {
-               WARN_ON_ONCE(1);
-               return -EINVAL;
-       }
-
-       /* Do SPI transfer; first 16bits are command; remaining is data
-        * to write into register */
-       cmd = AIC26_WRITE_COMMAND_WORD(reg);
-       buffer[0] = (cmd >> 8) & 0xff;
-       buffer[1] = cmd & 0xff;
-       buffer[2] = value >> 8;
-       buffer[3] = value;
-       rc = spi_write(aic26->spi, buffer, 4);
-       if (rc) {
-               dev_err(&aic26->spi->dev, "AIC26 reg read error\n");
-               return -EIO;
-       }
-
-       /* update cache before returning */
-       cache[reg] = value;
-       return 0;
-}
-
 static const struct snd_soc_dapm_widget tlv320aic26_dapm_widgets[] = {
 SND_SOC_DAPM_INPUT("MICIN"),
 SND_SOC_DAPM_INPUT("AUX"),
@@ -195,19 +117,15 @@ static int aic26_hw_params(struct snd_pcm_substream *substream,
        snd_soc_write(codec, AIC26_REG_PLL_PROG2, reg);
 
        /* Audio Control 3 (master mode, fsref rate) */
-       reg = aic26_reg_read_cache(codec, AIC26_REG_AUDIO_CTRL3);
-       reg &= ~0xf800;
        if (aic26->master)
-               reg |= 0x0800;
+               reg = 0x0800;
        if (fsref == 48000)
-               reg |= 0x2000;
-       snd_soc_write(codec, AIC26_REG_AUDIO_CTRL3, reg);
+               reg = 0x2000;
+       snd_soc_update_bits(codec, AIC26_REG_AUDIO_CTRL3, 0xf800, reg);
 
        /* Audio Control 1 (FSref divisor) */
-       reg = aic26_reg_read_cache(codec, AIC26_REG_AUDIO_CTRL1);
-       reg &= ~0x0fff;
-       reg |= wlen | aic26->datfm | (divisor << 3) | divisor;
-       snd_soc_write(codec, AIC26_REG_AUDIO_CTRL1, reg);
+       reg = wlen | aic26->datfm | (divisor << 3) | divisor;
+       snd_soc_update_bits(codec, AIC26_REG_AUDIO_CTRL1, 0xfff, reg);
 
        return 0;
 }
@@ -219,16 +137,16 @@ static int aic26_mute(struct snd_soc_dai *dai, int mute)
 {
        struct snd_soc_codec *codec = dai->codec;
        struct aic26 *aic26 = snd_soc_codec_get_drvdata(codec);
-       u16 reg = aic26_reg_read_cache(codec, AIC26_REG_DAC_GAIN);
+       u16 reg;
 
        dev_dbg(&aic26->spi->dev, "aic26_mute(dai=%p, mute=%i)\n",
                dai, mute);
 
        if (mute)
-               reg |= 0x8080;
+               reg = 0x8080;
        else
-               reg &= ~0x8080;
-       snd_soc_write(codec, AIC26_REG_DAC_GAIN, reg);
+               reg 0;
+       snd_soc_update_bits(codec, AIC26_REG_DAC_GAIN, 0x8000, reg);
 
        return 0;
 }
@@ -346,7 +264,7 @@ static ssize_t aic26_keyclick_show(struct device *dev,
        struct aic26 *aic26 = dev_get_drvdata(dev);
        int val, amp, freq, len;
 
-       val = aic26_reg_read_cache(aic26->codec, AIC26_REG_AUDIO_CTRL2);
+       val = snd_soc_read(aic26->codec, AIC26_REG_AUDIO_CTRL2);
        amp = (val >> 12) & 0x7;
        freq = (125 << ((val >> 8) & 0x7)) >> 1;
        len = 2 * (1 + ((val >> 4) & 0xf));
@@ -360,11 +278,9 @@ static ssize_t aic26_keyclick_set(struct device *dev,
                                  const char *buf, size_t count)
 {
        struct aic26 *aic26 = dev_get_drvdata(dev);
-       int val;
 
-       val = aic26_reg_read_cache(aic26->codec, AIC26_REG_AUDIO_CTRL2);
-       val |= 0x8000;
-       snd_soc_write(aic26->codec, AIC26_REG_AUDIO_CTRL2, val);
+       snd_soc_update_bits(aic26->codec, AIC26_REG_AUDIO_CTRL2,
+                           0x8000, 0x800);
 
        return count;
 }
@@ -377,7 +293,9 @@ static DEVICE_ATTR(keyclick, 0644, aic26_keyclick_show, aic26_keyclick_set);
 static int aic26_probe(struct snd_soc_codec *codec)
 {
        struct aic26 *aic26 = dev_get_drvdata(codec->dev);
-       int ret, err, i, reg;
+       int ret, reg;
+
+       snd_soc_codec_set_cache_io(codec, 16, 16, SND_SOC_REGMAP);
 
        aic26->codec = codec;
 
@@ -393,37 +311,30 @@ static int aic26_probe(struct snd_soc_codec *codec)
        reg |= 0x0800; /* set master mode */
        snd_soc_write(codec, AIC26_REG_AUDIO_CTRL3, reg);
 
-       /* Fill register cache */
-       for (i = 0; i < codec->driver->reg_cache_size; i++)
-               snd_soc_read(codec, i);
-
        /* Register the sysfs files for debugging */
        /* Create SysFS files */
        ret = device_create_file(codec->dev, &dev_attr_keyclick);
        if (ret)
                dev_info(codec->dev, "error creating sysfs files\n");
 
-       /* register controls */
-       dev_dbg(codec->dev, "Registering controls\n");
-       err = snd_soc_add_codec_controls(codec, aic26_snd_controls,
-                       ARRAY_SIZE(aic26_snd_controls));
-       WARN_ON(err < 0);
-
        return 0;
 }
 
 static struct snd_soc_codec_driver aic26_soc_codec_dev = {
        .probe = aic26_probe,
-       .read = aic26_reg_read,
-       .write = aic26_reg_write,
-       .reg_cache_size = AIC26_NUM_REGS,
-       .reg_word_size = sizeof(u16),
+       .controls = aic26_snd_controls,
+       .num_controls = ARRAY_SIZE(aic26_snd_controls),
        .dapm_widgets = tlv320aic26_dapm_widgets,
        .num_dapm_widgets = ARRAY_SIZE(tlv320aic26_dapm_widgets),
        .dapm_routes = tlv320aic26_dapm_routes,
        .num_dapm_routes = ARRAY_SIZE(tlv320aic26_dapm_routes),
 };
 
+static const struct regmap_config aic26_regmap = {
+       .reg_bits = 16,
+       .val_bits = 16,
+};
+
 /* ---------------------------------------------------------------------
  * SPI device portion of driver: probe and release routines and SPI
  *                              driver registration.
@@ -440,6 +351,10 @@ static int aic26_spi_probe(struct spi_device *spi)
        if (!aic26)
                return -ENOMEM;
 
+       aic26->regmap = devm_regmap_init_spi(spi, &aic26_regmap);
+       if (IS_ERR(aic26->regmap))
+               return PTR_ERR(aic26->regmap);
+
        /* Initialize the driver data */
        aic26->spi = spi;
        dev_set_drvdata(&spi->dev, aic26);
index 67f19c3bebe6a88a6d86a81f32b2f6dca25c65ec..629b85e75409383df910d448eabb97b2e8b5dc8e 100644 (file)
@@ -9,10 +9,7 @@
 #define _TLV320AIC16_H_
 
 /* AIC26 Registers */
-#define AIC26_READ_COMMAND_WORD(addr)  ((1 << 15) | (addr << 5))
-#define AIC26_WRITE_COMMAND_WORD(addr) ((0 << 15) | (addr << 5))
-#define AIC26_PAGE_ADDR(page, offset)  ((page << 6) | offset)
-#define AIC26_NUM_REGS                 AIC26_PAGE_ADDR(3, 0)
+#define AIC26_PAGE_ADDR(page, offset)  ((page << 11) | offset << 5)
 
 /* Page 0: Auxiliary data registers */
 #define AIC26_REG_BAT1                 AIC26_PAGE_ADDR(0, 0x05)
index 2ed57d4aa4456e4e614ca85765846d1d8aa78cf2..18cdcca9014cc66d394dd9e1df875f6edc2a7874 100644 (file)
@@ -60,9 +60,8 @@ struct aic32x4_rate_divs {
 };
 
 struct aic32x4_priv {
+       struct regmap *regmap;
        u32 sysclk;
-       u8 page_no;
-       void *control_data;
        u32 power_cfg;
        u32 micpga_routing;
        bool swapdacs;
@@ -262,67 +261,25 @@ static const struct snd_soc_dapm_route aic32x4_dapm_routes[] = {
        {"Right ADC", NULL, "Right Input Mixer"},
 };
 
-static inline int aic32x4_change_page(struct snd_soc_codec *codec,
-                                       unsigned int new_page)
-{
-       struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
-       u8 data[2];
-       int ret;
-
-       data[0] = 0x00;
-       data[1] = new_page & 0xff;
-
-       ret = codec->hw_write(codec->control_data, data, 2);
-       if (ret == 2) {
-               aic32x4->page_no = new_page;
-               return 0;
-       } else {
-               return ret;
-       }
-}
-
-static int aic32x4_write(struct snd_soc_codec *codec, unsigned int reg,
-                               unsigned int val)
-{
-       struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
-       unsigned int page = reg / 128;
-       unsigned int fixed_reg = reg % 128;
-       u8 data[2];
-       int ret;
-
-       /* A write to AIC32X4_PSEL is really a non-explicit page change */
-       if (reg == AIC32X4_PSEL)
-               return aic32x4_change_page(codec, val);
-
-       if (aic32x4->page_no != page) {
-               ret = aic32x4_change_page(codec, page);
-               if (ret != 0)
-                       return ret;
-       }
-
-       data[0] = fixed_reg & 0xff;
-       data[1] = val & 0xff;
-
-       if (codec->hw_write(codec->control_data, data, 2) == 2)
-               return 0;
-       else
-               return -EIO;
-}
+static const struct regmap_range_cfg aic32x4_regmap_pages[] = {
+       {
+               .selector_reg = 0,
+               .selector_mask  = 0xff,
+               .window_start = 0,
+               .window_len = 128,
+               .range_min = AIC32X4_PAGE1,
+               .range_max = AIC32X4_PAGE1 + 127,
+       },
+};
 
-static unsigned int aic32x4_read(struct snd_soc_codec *codec, unsigned int reg)
-{
-       struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
-       unsigned int page = reg / 128;
-       unsigned int fixed_reg = reg % 128;
-       int ret;
+static const struct regmap_config aic32x4_regmap = {
+       .reg_bits = 8,
+       .val_bits = 8,
 
-       if (aic32x4->page_no != page) {
-               ret = aic32x4_change_page(codec, page);
-               if (ret != 0)
-                       return ret;
-       }
-       return i2c_smbus_read_byte_data(codec->control_data, fixed_reg & 0xff);
-}
+       .max_register = AIC32X4_RMICPGAVOL,
+       .ranges = aic32x4_regmap_pages,
+       .num_ranges = ARRAY_SIZE(aic32x4_regmap_pages),
+};
 
 static inline int aic32x4_get_divs(int mclk, int rate)
 {
@@ -617,16 +574,10 @@ static int aic32x4_probe(struct snd_soc_codec *codec)
 {
        struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
        u32 tmp_reg;
-       int ret;
 
-       codec->hw_write = (hw_write_t) i2c_master_send;
-       codec->control_data = aic32x4->control_data;
+       snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_REGMAP);
 
        if (aic32x4->rstn_gpio >= 0) {
-               ret = devm_gpio_request_one(codec->dev, aic32x4->rstn_gpio,
-                               GPIOF_OUT_INIT_LOW, "tlv320aic32x4 rstn");
-               if (ret != 0)
-                       return ret;
                ndelay(10);
                gpio_set_value(aic32x4->rstn_gpio, 1);
        }
@@ -692,8 +643,6 @@ static int aic32x4_remove(struct snd_soc_codec *codec)
 }
 
 static struct snd_soc_codec_driver soc_codec_dev_aic32x4 = {
-       .read = aic32x4_read,
-       .write = aic32x4_write,
        .probe = aic32x4_probe,
        .remove = aic32x4_remove,
        .suspend = aic32x4_suspend,
@@ -720,7 +669,10 @@ static int aic32x4_i2c_probe(struct i2c_client *i2c,
        if (aic32x4 == NULL)
                return -ENOMEM;
 
-       aic32x4->control_data = i2c;
+       aic32x4->regmap = devm_regmap_init_i2c(i2c, &aic32x4_regmap);
+       if (IS_ERR(aic32x4->regmap))
+               return PTR_ERR(aic32x4->regmap);
+
        i2c_set_clientdata(i2c, aic32x4);
 
        if (pdata) {
@@ -735,6 +687,13 @@ static int aic32x4_i2c_probe(struct i2c_client *i2c,
                aic32x4->rstn_gpio = -1;
        }
 
+       if (aic32x4->rstn_gpio >= 0) {
+               ret = devm_gpio_request_one(&i2c->dev, aic32x4->rstn_gpio,
+                               GPIOF_OUT_INIT_LOW, "tlv320aic32x4 rstn");
+               if (ret != 0)
+                       return ret;
+       }
+
        ret = snd_soc_register_codec(&i2c->dev,
                        &soc_codec_dev_aic32x4, &aic32x4_dai, 1);
        return ret;
index 6e3f269243e050fe5149e60807c0e41cbf6e4147..546d16b7d38f87cd456e35286dbf3b9a8eab4b33 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/i2c.h>
 #include <linux/gpio.h>
 #include <linux/regulator/consumer.h>
+#include <linux/of.h>
 #include <linux/of_gpio.h>
 #include <linux/slab.h>
 #include <sound/core.h>
@@ -72,9 +73,9 @@ struct aic3x_disable_nb {
 /* codec private data */
 struct aic3x_priv {
        struct snd_soc_codec *codec;
+       struct regmap *regmap;
        struct regulator_bulk_data supplies[AIC3X_NUM_SUPPLIES];
        struct aic3x_disable_nb disable_nb[AIC3X_NUM_SUPPLIES];
-       enum snd_soc_control_type control_type;
        struct aic3x_setup_data *setup;
        unsigned int sysclk;
        struct list_head list;
@@ -90,41 +91,45 @@ struct aic3x_priv {
        enum aic3x_micbias_voltage micbias_vg;
 };
 
-/*
- * AIC3X register cache
- * We can't read the AIC3X register space when we are
- * using 2 wire for device control, so we cache them instead.
- * There is no point in caching the reset register
- */
-static const u8 aic3x_reg[AIC3X_CACHEREGNUM] = {
-       0x00, 0x00, 0x00, 0x10, /* 0 */
-       0x04, 0x00, 0x00, 0x00, /* 4 */
-       0x00, 0x00, 0x00, 0x01, /* 8 */
-       0x00, 0x00, 0x00, 0x80, /* 12 */
-       0x80, 0xff, 0xff, 0x78, /* 16 */
-       0x78, 0x78, 0x78, 0x78, /* 20 */
-       0x78, 0x00, 0x00, 0xfe, /* 24 */
-       0x00, 0x00, 0xfe, 0x00, /* 28 */
-       0x18, 0x18, 0x00, 0x00, /* 32 */
-       0x00, 0x00, 0x00, 0x00, /* 36 */
-       0x00, 0x00, 0x00, 0x80, /* 40 */
-       0x80, 0x00, 0x00, 0x00, /* 44 */
-       0x00, 0x00, 0x00, 0x04, /* 48 */
-       0x00, 0x00, 0x00, 0x00, /* 52 */
-       0x00, 0x00, 0x04, 0x00, /* 56 */
-       0x00, 0x00, 0x00, 0x00, /* 60 */
-       0x00, 0x04, 0x00, 0x00, /* 64 */
-       0x00, 0x00, 0x00, 0x00, /* 68 */
-       0x04, 0x00, 0x00, 0x00, /* 72 */
-       0x00, 0x00, 0x00, 0x00, /* 76 */
-       0x00, 0x00, 0x00, 0x00, /* 80 */
-       0x00, 0x00, 0x00, 0x00, /* 84 */
-       0x00, 0x00, 0x00, 0x00, /* 88 */
-       0x00, 0x00, 0x00, 0x00, /* 92 */
-       0x00, 0x00, 0x00, 0x00, /* 96 */
-       0x00, 0x00, 0x02, 0x00, /* 100 */
-       0x00, 0x00, 0x00, 0x00, /* 104 */
-       0x00, 0x00,             /* 108 */
+static const struct reg_default aic3x_reg[] = {
+       {   0, 0x00 }, {   1, 0x00 }, {   2, 0x00 }, {   3, 0x10 },
+       {   4, 0x04 }, {   5, 0x00 }, {   6, 0x00 }, {   7, 0x00 },
+       {   8, 0x00 }, {   9, 0x00 }, {  10, 0x00 }, {  11, 0x01 },
+       {  12, 0x00 }, {  13, 0x00 }, {  14, 0x00 }, {  15, 0x80 },
+       {  16, 0x80 }, {  17, 0xff }, {  18, 0xff }, {  19, 0x78 },
+       {  20, 0x78 }, {  21, 0x78 }, {  22, 0x78 }, {  23, 0x78 },
+       {  24, 0x78 }, {  25, 0x00 }, {  26, 0x00 }, {  27, 0xfe },
+       {  28, 0x00 }, {  29, 0x00 }, {  30, 0xfe }, {  31, 0x00 },
+       {  32, 0x18 }, {  33, 0x18 }, {  34, 0x00 }, {  35, 0x00 },
+       {  36, 0x00 }, {  37, 0x00 }, {  38, 0x00 }, {  39, 0x00 },
+       {  40, 0x00 }, {  41, 0x00 }, {  42, 0x00 }, {  43, 0x80 },
+       {  44, 0x80 }, {  45, 0x00 }, {  46, 0x00 }, {  47, 0x00 },
+       {  48, 0x00 }, {  49, 0x00 }, {  50, 0x00 }, {  51, 0x04 },
+       {  52, 0x00 }, {  53, 0x00 }, {  54, 0x00 }, {  55, 0x00 },
+       {  56, 0x00 }, {  57, 0x00 }, {  58, 0x04 }, {  59, 0x00 },
+       {  60, 0x00 }, {  61, 0x00 }, {  62, 0x00 }, {  63, 0x00 },
+       {  64, 0x00 }, {  65, 0x04 }, {  66, 0x00 }, {  67, 0x00 },
+       {  68, 0x00 }, {  69, 0x00 }, {  70, 0x00 }, {  71, 0x00 },
+       {  72, 0x04 }, {  73, 0x00 }, {  74, 0x00 }, {  75, 0x00 },
+       {  76, 0x00 }, {  77, 0x00 }, {  78, 0x00 }, {  79, 0x00 },
+       {  80, 0x00 }, {  81, 0x00 }, {  82, 0x00 }, {  83, 0x00 },
+       {  84, 0x00 }, {  85, 0x00 }, {  86, 0x00 }, {  87, 0x00 },
+       {  88, 0x00 }, {  89, 0x00 }, {  90, 0x00 }, {  91, 0x00 },
+       {  92, 0x00 }, {  93, 0x00 }, {  94, 0x00 }, {  95, 0x00 },
+       {  96, 0x00 }, {  97, 0x00 }, {  98, 0x00 }, {  99, 0x00 },
+       { 100, 0x00 }, { 101, 0x00 }, { 102, 0x02 }, { 103, 0x00 },
+       { 104, 0x00 }, { 105, 0x00 }, { 106, 0x00 }, { 107, 0x00 },
+       { 108, 0x00 }, { 109, 0x00 },
+};
+
+static const struct regmap_config aic3x_regmap = {
+       .reg_bits = 8,
+       .val_bits = 8,
+
+       .max_register = DAC_ICC_ADJ,
+       .reg_defaults = aic3x_reg,
+       .num_reg_defaults = ARRAY_SIZE(aic3x_reg),
+       .cache_type = REGCACHE_RBTREE,
 };
 
 #define SOC_DAPM_SINGLE_AIC3X(xname, reg, shift, mask, invert) \
@@ -674,6 +679,8 @@ static const struct snd_soc_dapm_route intercon[] = {
        /* Left Input */
        {"Left Line1L Mux", "single-ended", "LINE1L"},
        {"Left Line1L Mux", "differential", "LINE1L"},
+       {"Left Line1R Mux", "single-ended", "LINE1R"},
+       {"Left Line1R Mux", "differential", "LINE1R"},
 
        {"Left Line2L Mux", "single-ended", "LINE2L"},
        {"Left Line2L Mux", "differential", "LINE2L"},
@@ -690,6 +697,8 @@ static const struct snd_soc_dapm_route intercon[] = {
        /* Right Input */
        {"Right Line1R Mux", "single-ended", "LINE1R"},
        {"Right Line1R Mux", "differential", "LINE1R"},
+       {"Right Line1L Mux", "single-ended", "LINE1L"},
+       {"Right Line1L Mux", "differential", "LINE1L"},
 
        {"Right Line2R Mux", "single-ended", "LINE2R"},
        {"Right Line2R Mux", "differential", "LINE2R"},
@@ -824,12 +833,6 @@ static int aic3x_add_widgets(struct snd_soc_codec *codec)
        struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
        struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-       snd_soc_dapm_new_controls(dapm, aic3x_dapm_widgets,
-                                 ARRAY_SIZE(aic3x_dapm_widgets));
-
-       /* set up audio path interconnects */
-       snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
-
        if (aic3x->model == AIC3X_MODEL_3007) {
                snd_soc_dapm_new_controls(dapm, aic3007_dapm_widgets,
                        ARRAY_SIZE(aic3007_dapm_widgets));
@@ -1078,29 +1081,6 @@ static int aic3x_set_dai_fmt(struct snd_soc_dai *codec_dai,
        return 0;
 }
 
-static int aic3x_init_3007(struct snd_soc_codec *codec)
-{
-       u8 tmp1, tmp2, *cache = codec->reg_cache;
-
-       /*
-        * There is no need to cache writes to undocumented page 0xD but
-        * respective page 0 register cache entries must be preserved
-        */
-       tmp1 = cache[0xD];
-       tmp2 = cache[0x8];
-       /* Class-D speaker driver init; datasheet p. 46 */
-       snd_soc_write(codec, AIC3X_PAGE_SELECT, 0x0D);
-       snd_soc_write(codec, 0xD, 0x0D);
-       snd_soc_write(codec, 0x8, 0x5C);
-       snd_soc_write(codec, 0x8, 0x5D);
-       snd_soc_write(codec, 0x8, 0x5C);
-       snd_soc_write(codec, AIC3X_PAGE_SELECT, 0x00);
-       cache[0xD] = tmp1;
-       cache[0x8] = tmp2;
-
-       return 0;
-}
-
 static int aic3x_regulator_event(struct notifier_block *nb,
                                 unsigned long event, void *data)
 {
@@ -1115,7 +1095,7 @@ static int aic3x_regulator_event(struct notifier_block *nb,
                 */
                if (gpio_is_valid(aic3x->gpio_reset))
                        gpio_set_value(aic3x->gpio_reset, 0);
-               aic3x->codec->cache_sync = 1;
+               regcache_mark_dirty(aic3x->regmap);
        }
 
        return 0;
@@ -1124,8 +1104,7 @@ static int aic3x_regulator_event(struct notifier_block *nb,
 static int aic3x_set_power(struct snd_soc_codec *codec, int power)
 {
        struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
-       int i, ret;
-       u8 *cache = codec->reg_cache;
+       int ret;
 
        if (power) {
                ret = regulator_bulk_enable(ARRAY_SIZE(aic3x->supplies),
@@ -1133,12 +1112,6 @@ static int aic3x_set_power(struct snd_soc_codec *codec, int power)
                if (ret)
                        goto out;
                aic3x->power = 1;
-               /*
-                * Reset release and cache sync is necessary only if some
-                * supply was off or if there were cached writes
-                */
-               if (!codec->cache_sync)
-                       goto out;
 
                if (gpio_is_valid(aic3x->gpio_reset)) {
                        udelay(1);
@@ -1146,12 +1119,8 @@ static int aic3x_set_power(struct snd_soc_codec *codec, int power)
                }
 
                /* Sync reg_cache with the hardware */
-               codec->cache_only = 0;
-               for (i = AIC3X_SAMPLE_RATE_SEL_REG; i < ARRAY_SIZE(aic3x_reg); i++)
-                       snd_soc_write(codec, i, cache[i]);
-               if (aic3x->model == AIC3X_MODEL_3007)
-                       aic3x_init_3007(codec);
-               codec->cache_sync = 0;
+               regcache_cache_only(aic3x->regmap, false);
+               regcache_sync(aic3x->regmap);
        } else {
                /*
                 * Do soft reset to this codec instance in order to clear
@@ -1159,10 +1128,10 @@ static int aic3x_set_power(struct snd_soc_codec *codec, int power)
                 * remain on
                 */
                snd_soc_write(codec, AIC3X_RESET, SOFT_RESET);
-               codec->cache_sync = 1;
+               regcache_mark_dirty(aic3x->regmap);
                aic3x->power = 0;
                /* HW writes are needless when bias is off */
-               codec->cache_only = 1;
+               regcache_cache_only(aic3x->regmap, true);
                ret = regulator_bulk_disable(ARRAY_SIZE(aic3x->supplies),
                                             aic3x->supplies);
        }
@@ -1317,7 +1286,6 @@ static int aic3x_init(struct snd_soc_codec *codec)
        snd_soc_write(codec, LINE2R_2_MONOLOPM_VOL, DEFAULT_VOL);
 
        if (aic3x->model == AIC3X_MODEL_3007) {
-               aic3x_init_3007(codec);
                snd_soc_write(codec, CLASSD_CTRL, 0);
        }
 
@@ -1345,29 +1313,12 @@ static int aic3x_probe(struct snd_soc_codec *codec)
        INIT_LIST_HEAD(&aic3x->list);
        aic3x->codec = codec;
 
-       ret = snd_soc_codec_set_cache_io(codec, 8, 8, aic3x->control_type);
+       ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_REGMAP);
        if (ret != 0) {
                dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
                return ret;
        }
 
-       if (gpio_is_valid(aic3x->gpio_reset) &&
-           !aic3x_is_shared_reset(aic3x)) {
-               ret = gpio_request(aic3x->gpio_reset, "tlv320aic3x reset");
-               if (ret != 0)
-                       goto err_gpio;
-               gpio_direction_output(aic3x->gpio_reset, 0);
-       }
-
-       for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++)
-               aic3x->supplies[i].supply = aic3x_supply_names[i];
-
-       ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(aic3x->supplies),
-                                aic3x->supplies);
-       if (ret != 0) {
-               dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
-               goto err_get;
-       }
        for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++) {
                aic3x->disable_nb[i].nb.notifier_call = aic3x_regulator_event;
                aic3x->disable_nb[i].aic3x = aic3x;
@@ -1381,7 +1332,7 @@ static int aic3x_probe(struct snd_soc_codec *codec)
                }
        }
 
-       codec->cache_only = 1;
+       regcache_mark_dirty(aic3x->regmap);
        aic3x_init(codec);
 
        if (aic3x->setup) {
@@ -1392,8 +1343,6 @@ static int aic3x_probe(struct snd_soc_codec *codec)
                              (aic3x->setup->gpio_func[1] & 0xf) << 4);
        }
 
-       snd_soc_add_codec_controls(codec, aic3x_snd_controls,
-                            ARRAY_SIZE(aic3x_snd_controls));
        if (aic3x->model == AIC3X_MODEL_3007)
                snd_soc_add_codec_controls(codec, &aic3x_classd_amp_gain_ctrl, 1);
 
@@ -1424,12 +1373,6 @@ err_notif:
        while (i--)
                regulator_unregister_notifier(aic3x->supplies[i].consumer,
                                              &aic3x->disable_nb[i].nb);
-       regulator_bulk_free(ARRAY_SIZE(aic3x->supplies), aic3x->supplies);
-err_get:
-       if (gpio_is_valid(aic3x->gpio_reset) &&
-           !aic3x_is_shared_reset(aic3x))
-               gpio_free(aic3x->gpio_reset);
-err_gpio:
        return ret;
 }
 
@@ -1440,15 +1383,9 @@ static int aic3x_remove(struct snd_soc_codec *codec)
 
        aic3x_set_bias_level(codec, SND_SOC_BIAS_OFF);
        list_del(&aic3x->list);
-       if (gpio_is_valid(aic3x->gpio_reset) &&
-           !aic3x_is_shared_reset(aic3x)) {
-               gpio_set_value(aic3x->gpio_reset, 0);
-               gpio_free(aic3x->gpio_reset);
-       }
        for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++)
                regulator_unregister_notifier(aic3x->supplies[i].consumer,
                                              &aic3x->disable_nb[i].nb);
-       regulator_bulk_free(ARRAY_SIZE(aic3x->supplies), aic3x->supplies);
 
        return 0;
 }
@@ -1456,13 +1393,16 @@ static int aic3x_remove(struct snd_soc_codec *codec)
 static struct snd_soc_codec_driver soc_codec_dev_aic3x = {
        .set_bias_level = aic3x_set_bias_level,
        .idle_bias_off = true,
-       .reg_cache_size = ARRAY_SIZE(aic3x_reg),
-       .reg_word_size = sizeof(u8),
-       .reg_cache_default = aic3x_reg,
        .probe = aic3x_probe,
        .remove = aic3x_remove,
        .suspend = aic3x_suspend,
        .resume = aic3x_resume,
+       .controls = aic3x_snd_controls,
+       .num_controls = ARRAY_SIZE(aic3x_snd_controls),
+       .dapm_widgets = aic3x_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(aic3x_dapm_widgets),
+       .dapm_routes = intercon,
+       .num_dapm_routes = ARRAY_SIZE(intercon),
 };
 
 /*
@@ -1479,6 +1419,16 @@ static const struct i2c_device_id aic3x_i2c_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, aic3x_i2c_id);
 
+static const struct reg_default aic3007_class_d[] = {
+       /* Class-D speaker driver init; datasheet p. 46 */
+       { AIC3X_PAGE_SELECT, 0x0D },
+       { 0xD, 0x0D },
+       { 0x8, 0x5C },
+       { 0x8, 0x5D },
+       { 0x8, 0x5C },
+       { AIC3X_PAGE_SELECT, 0x00 },
+};
+
 /*
  * If the i2c layer weren't so broken, we could pass this kind of data
  * around
@@ -1490,7 +1440,7 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
        struct aic3x_priv *aic3x;
        struct aic3x_setup_data *ai3x_setup;
        struct device_node *np = i2c->dev.of_node;
-       int ret;
+       int ret, i;
        u32 value;
 
        aic3x = devm_kzalloc(&i2c->dev, sizeof(struct aic3x_priv), GFP_KERNEL);
@@ -1499,7 +1449,13 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
                return -ENOMEM;
        }
 
-       aic3x->control_type = SND_SOC_I2C;
+       aic3x->regmap = devm_regmap_init_i2c(i2c, &aic3x_regmap);
+       if (IS_ERR(aic3x->regmap)) {
+               ret = PTR_ERR(aic3x->regmap);
+               return ret;
+       }
+
+       regcache_cache_only(aic3x->regmap, true);
 
        i2c_set_clientdata(i2c, aic3x);
        if (pdata) {
@@ -1551,14 +1507,54 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
 
        aic3x->model = id->driver_data;
 
+       if (gpio_is_valid(aic3x->gpio_reset) &&
+           !aic3x_is_shared_reset(aic3x)) {
+               ret = gpio_request(aic3x->gpio_reset, "tlv320aic3x reset");
+               if (ret != 0)
+                       goto err;
+               gpio_direction_output(aic3x->gpio_reset, 0);
+       }
+
+       for (i = 0; i < ARRAY_SIZE(aic3x->supplies); i++)
+               aic3x->supplies[i].supply = aic3x_supply_names[i];
+
+       ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(aic3x->supplies),
+                                     aic3x->supplies);
+       if (ret != 0) {
+               dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
+               goto err_gpio;
+       }
+
+       if (aic3x->model == AIC3X_MODEL_3007) {
+               ret = regmap_register_patch(aic3x->regmap, aic3007_class_d,
+                                           ARRAY_SIZE(aic3007_class_d));
+               if (ret != 0)
+                       dev_err(&i2c->dev, "Failed to init class D: %d\n",
+                               ret);
+       }
+
        ret = snd_soc_register_codec(&i2c->dev,
                        &soc_codec_dev_aic3x, &aic3x_dai, 1);
        return ret;
+
+err_gpio:
+       if (gpio_is_valid(aic3x->gpio_reset) &&
+           !aic3x_is_shared_reset(aic3x))
+               gpio_free(aic3x->gpio_reset);
+err:
+       return ret;
 }
 
 static int aic3x_i2c_remove(struct i2c_client *client)
 {
+       struct aic3x_priv *aic3x = i2c_get_clientdata(client);
+
        snd_soc_unregister_codec(&client->dev);
+       if (gpio_is_valid(aic3x->gpio_reset) &&
+           !aic3x_is_shared_reset(aic3x)) {
+               gpio_set_value(aic3x->gpio_reset, 0);
+               gpio_free(aic3x->gpio_reset);
+       }
        return 0;
 }
 
index 1e3884d6b3fbc7202cedb6fe478e5d00e71da49b..dfc51bb425da93b5dc6e2ac55af8ea3125b9e1b9 100644 (file)
 /* TWL4030 PMBR1 Register GPIO6 mux bits */
 #define TWL4030_GPIO6_PWM0_MUTE(value) ((value & 0x03) << 2)
 
-/* Shadow register used by the audio driver */
-#define TWL4030_REG_SW_SHADOW          0x4A
-#define TWL4030_CACHEREGNUM    (TWL4030_REG_SW_SHADOW + 1)
-
-/* TWL4030_REG_SW_SHADOW (0x4A) Fields */
-#define TWL4030_HFL_EN                 0x01
-#define TWL4030_HFR_EN                 0x02
+#define TWL4030_CACHEREGNUM    (TWL4030_REG_MISC_SET_2 + 1)
 
 /*
  * twl4030 register cache & default register settings
@@ -132,7 +126,6 @@ static const u8 twl4030_reg[TWL4030_CACHEREGNUM] = {
        0x00, /* REG_VIBRA_PWM_SET      (0x47)  */
        0x00, /* REG_ANAMIC_GAIN        (0x48)  */
        0x00, /* REG_MISC_SET_2         (0x49)  */
-       0x00, /* REG_SW_SHADOW          (0x4A)  - Shadow, non HW register */
 };
 
 /* codec private data */
@@ -198,42 +191,41 @@ static int twl4030_write(struct snd_soc_codec *codec,
        int write_to_reg = 0;
 
        twl4030_write_reg_cache(codec, reg, value);
-       if (likely(reg < TWL4030_REG_SW_SHADOW)) {
-               /* Decide if the given register can be written */
-               switch (reg) {
-               case TWL4030_REG_EAR_CTL:
-                       if (twl4030->earpiece_enabled)
-                               write_to_reg = 1;
-                       break;
-               case TWL4030_REG_PREDL_CTL:
-                       if (twl4030->predrivel_enabled)
-                               write_to_reg = 1;
-                       break;
-               case TWL4030_REG_PREDR_CTL:
-                       if (twl4030->predriver_enabled)
-                               write_to_reg = 1;
-                       break;
-               case TWL4030_REG_PRECKL_CTL:
-                       if (twl4030->carkitl_enabled)
-                               write_to_reg = 1;
-                       break;
-               case TWL4030_REG_PRECKR_CTL:
-                       if (twl4030->carkitr_enabled)
-                               write_to_reg = 1;
-                       break;
-               case TWL4030_REG_HS_GAIN_SET:
-                       if (twl4030->hsl_enabled || twl4030->hsr_enabled)
-                               write_to_reg = 1;
-                       break;
-               default:
-                       /* All other register can be written */
+       /* Decide if the given register can be written */
+       switch (reg) {
+       case TWL4030_REG_EAR_CTL:
+               if (twl4030->earpiece_enabled)
                        write_to_reg = 1;
-                       break;
-               }
-               if (write_to_reg)
-                       return twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
-                                                   value, reg);
+               break;
+       case TWL4030_REG_PREDL_CTL:
+               if (twl4030->predrivel_enabled)
+                       write_to_reg = 1;
+               break;
+       case TWL4030_REG_PREDR_CTL:
+               if (twl4030->predriver_enabled)
+                       write_to_reg = 1;
+               break;
+       case TWL4030_REG_PRECKL_CTL:
+               if (twl4030->carkitl_enabled)
+                       write_to_reg = 1;
+               break;
+       case TWL4030_REG_PRECKR_CTL:
+               if (twl4030->carkitr_enabled)
+                       write_to_reg = 1;
+               break;
+       case TWL4030_REG_HS_GAIN_SET:
+               if (twl4030->hsl_enabled || twl4030->hsr_enabled)
+                       write_to_reg = 1;
+               break;
+       default:
+               /* All other register can be written */
+               write_to_reg = 1;
+               break;
        }
+       if (write_to_reg)
+               return twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
+                                           value, reg);
+
        return 0;
 }
 
@@ -532,7 +524,7 @@ SOC_DAPM_ENUM("Route", twl4030_handsfreel_enum);
 
 /* Handsfree Left virtual mute */
 static const struct snd_kcontrol_new twl4030_dapm_handsfreelmute_control =
-       SOC_DAPM_SINGLE("Switch", TWL4030_REG_SW_SHADOW, 0, 1, 0);
+       SOC_DAPM_SINGLE_VIRT("Switch", 1);
 
 /* Handsfree Right */
 static const char *twl4030_handsfreer_texts[] =
@@ -548,7 +540,7 @@ SOC_DAPM_ENUM("Route", twl4030_handsfreer_enum);
 
 /* Handsfree Right virtual mute */
 static const struct snd_kcontrol_new twl4030_dapm_handsfreermute_control =
-       SOC_DAPM_SINGLE("Switch", TWL4030_REG_SW_SHADOW, 1, 1, 0);
+       SOC_DAPM_SINGLE_VIRT("Switch", 1);
 
 /* Vibra */
 /* Vibra audio path selection */
index 3c79dbb6c32323b36bc974616c5ff1157498f281..f2f4bcb2ff71527b6dde8b264be2c99de7bf4c37 100644 (file)
@@ -54,12 +54,7 @@ enum twl6040_dai_id {
 #define TWL6040_OUTHF_0dB 0x03
 #define TWL6040_OUTHF_M52dB 0x1D
 
-/* Shadow register used by the driver */
-#define TWL6040_REG_SW_SHADOW  0x2F
-#define TWL6040_CACHEREGNUM    (TWL6040_REG_SW_SHADOW + 1)
-
-/* TWL6040_REG_SW_SHADOW (0x2F) fields */
-#define TWL6040_EAR_PATH_ENABLE        0x01
+#define TWL6040_CACHEREGNUM    (TWL6040_REG_STATUS + 1)
 
 struct twl6040_jack_data {
        struct snd_soc_jack *jack;
@@ -135,8 +130,6 @@ static const u8 twl6040_reg[TWL6040_CACHEREGNUM] = {
        0x00, /* REG_HFOTRIM    0x2C    */
        0x09, /* REG_ACCCTL     0x2D    */
        0x00, /* REG_STATUS     0x2E (ro) */
-
-       0x00, /* REG_SW_SHADOW  0x2F - Shadow, non HW register */
 };
 
 /* List of registers to be restored after power up */
@@ -220,12 +213,8 @@ static int twl6040_read_reg_volatile(struct snd_soc_codec *codec,
        if (reg >= TWL6040_CACHEREGNUM)
                return -EIO;
 
-       if (likely(reg < TWL6040_REG_SW_SHADOW)) {
-               value = twl6040_reg_read(twl6040, reg);
-               twl6040_write_reg_cache(codec, reg, value);
-       } else {
-               value = twl6040_read_reg_cache(codec, reg);
-       }
+       value = twl6040_reg_read(twl6040, reg);
+       twl6040_write_reg_cache(codec, reg, value);
 
        return value;
 }
@@ -246,7 +235,7 @@ static bool twl6040_is_path_unmuted(struct snd_soc_codec *codec,
                return priv->dl2_unmuted;
        default:
                return 1;
-       };
+       }
 }
 
 /*
@@ -261,8 +250,7 @@ static int twl6040_write(struct snd_soc_codec *codec,
                return -EIO;
 
        twl6040_write_reg_cache(codec, reg, value);
-       if (likely(reg < TWL6040_REG_SW_SHADOW) &&
-           twl6040_is_path_unmuted(codec, reg))
+       if (twl6040_is_path_unmuted(codec, reg))
                return twl6040_reg_write(twl6040, reg, value);
        else
                return 0;
@@ -555,7 +543,7 @@ static const struct snd_kcontrol_new hfr_mux_controls =
        SOC_DAPM_ENUM("Route", twl6040_hf_enum[1]);
 
 static const struct snd_kcontrol_new ep_path_enable_control =
-       SOC_DAPM_SINGLE("Switch", TWL6040_REG_SW_SHADOW, 0, 1, 0);
+       SOC_DAPM_SINGLE_VIRT("Switch", 1);
 
 static const struct snd_kcontrol_new auxl_switch_control =
        SOC_DAPM_SINGLE("Switch", TWL6040_REG_HFLCTL, 6, 1, 0);
@@ -1100,7 +1088,7 @@ static void twl6040_mute_path(struct snd_soc_codec *codec, enum twl6040_dai_id i
                break;
        default:
                break;
-       };
+       }
 }
 
 static int twl6040_digital_mute(struct snd_soc_dai *dai, int mute)
index d2a092850283f36ed035f20e4e6cd0c51a20412b..48dc7d2fee36fc07c236f6e9a09ea74cdb38d824 100644 (file)
 
 #include "wm8400.h"
 
-/* Fake register for internal state */
-#define WM8400_INTDRIVBITS      (WM8400_REGISTER_COUNT + 1)
-#define WM8400_INMIXL_PWR                      0
-#define WM8400_AINLMUX_PWR                     1
-#define WM8400_INMIXR_PWR                      2
-#define WM8400_AINRMUX_PWR                     3
-
 static struct regulator_bulk_data power[] = {
        {
                .supply = "I2S1VDD",
@@ -74,32 +67,6 @@ struct wm8400_priv {
        int fll_in, fll_out;
 };
 
-static inline unsigned int wm8400_read(struct snd_soc_codec *codec,
-                                      unsigned int reg)
-{
-       struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec);
-
-       if (reg == WM8400_INTDRIVBITS)
-               return wm8400->fake_register;
-       else
-               return wm8400_reg_read(wm8400->wm8400, reg);
-}
-
-/*
- * write to the wm8400 register space
- */
-static int wm8400_write(struct snd_soc_codec *codec, unsigned int reg,
-       unsigned int value)
-{
-       struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec);
-
-       if (reg == WM8400_INTDRIVBITS) {
-               wm8400->fake_register = value;
-               return 0;
-       } else
-               return wm8400_set_bits(wm8400->wm8400, reg, 0xffff, value);
-}
-
 static void wm8400_codec_reset(struct snd_soc_codec *codec)
 {
        struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec);
@@ -352,32 +319,6 @@ SOC_SINGLE("RIN34 Mute Switch", WM8400_RIGHT_LINE_INPUT_3_4_VOLUME,
  * _DAPM_ Controls
  */
 
-static int inmixer_event (struct snd_soc_dapm_widget *w,
-       struct snd_kcontrol *kcontrol, int event)
-{
-       u16 reg, fakepower;
-
-       reg = snd_soc_read(w->codec, WM8400_POWER_MANAGEMENT_2);
-       fakepower = snd_soc_read(w->codec, WM8400_INTDRIVBITS);
-
-       if (fakepower & ((1 << WM8400_INMIXL_PWR) |
-               (1 << WM8400_AINLMUX_PWR))) {
-               reg |= WM8400_AINL_ENA;
-       } else {
-               reg &= ~WM8400_AINL_ENA;
-       }
-
-       if (fakepower & ((1 << WM8400_INMIXR_PWR) |
-               (1 << WM8400_AINRMUX_PWR))) {
-               reg |= WM8400_AINR_ENA;
-       } else {
-               reg &= ~WM8400_AINR_ENA;
-       }
-       snd_soc_write(w->codec, WM8400_POWER_MANAGEMENT_2, reg);
-
-       return 0;
-}
-
 static int outmixer_event (struct snd_soc_dapm_widget *w,
        struct snd_kcontrol * kcontrol, int event)
 {
@@ -658,27 +599,26 @@ SND_SOC_DAPM_MIXER("RIN34 PGA", WM8400_POWER_MANAGEMENT_2,
                   0, &wm8400_dapm_rin34_pga_controls[0],
                   ARRAY_SIZE(wm8400_dapm_rin34_pga_controls)),
 
+SND_SOC_DAPM_SUPPLY("INL", WM8400_POWER_MANAGEMENT_2, WM8400_AINL_ENA_SHIFT,
+                   0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("INR", WM8400_POWER_MANAGEMENT_2, WM8400_AINR_ENA_SHIFT,
+                   0, NULL, 0),
+
 /* INMIXL */
-SND_SOC_DAPM_MIXER_E("INMIXL", WM8400_INTDRIVBITS, WM8400_INMIXL_PWR, 0,
+SND_SOC_DAPM_MIXER("INMIXL", SND_SOC_NOPM, 0, 0,
        &wm8400_dapm_inmixl_controls[0],
-       ARRAY_SIZE(wm8400_dapm_inmixl_controls),
-       inmixer_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+       ARRAY_SIZE(wm8400_dapm_inmixl_controls)),
 
 /* AINLMUX */
-SND_SOC_DAPM_MUX_E("AILNMUX", WM8400_INTDRIVBITS, WM8400_AINLMUX_PWR, 0,
-       &wm8400_dapm_ainlmux_controls, inmixer_event,
-       SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_MUX("AILNMUX", SND_SOC_NOPM, 0, 0, &wm8400_dapm_ainlmux_controls),
 
 /* INMIXR */
-SND_SOC_DAPM_MIXER_E("INMIXR", WM8400_INTDRIVBITS, WM8400_INMIXR_PWR, 0,
+SND_SOC_DAPM_MIXER("INMIXR", SND_SOC_NOPM, 0, 0,
        &wm8400_dapm_inmixr_controls[0],
-       ARRAY_SIZE(wm8400_dapm_inmixr_controls),
-       inmixer_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+       ARRAY_SIZE(wm8400_dapm_inmixr_controls)),
 
 /* AINRMUX */
-SND_SOC_DAPM_MUX_E("AIRNMUX", WM8400_INTDRIVBITS, WM8400_AINRMUX_PWR, 0,
-       &wm8400_dapm_ainrmux_controls, inmixer_event,
-       SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_MUX("AIRNMUX", SND_SOC_NOPM, 0, 0, &wm8400_dapm_ainrmux_controls),
 
 /* Output Side */
 /* DACs */
@@ -789,11 +729,13 @@ static const struct snd_soc_dapm_route wm8400_dapm_routes[] = {
        {"LIN34 PGA", "LIN3 Switch", "LIN3"},
        {"LIN34 PGA", "LIN4 Switch", "LIN4/RXN"},
        /* INMIXL */
+       {"INMIXL", NULL, "INL"},
        {"INMIXL", "Record Left Volume", "LOMIX"},
        {"INMIXL", "LIN2 Volume", "LIN2"},
        {"INMIXL", "LINPGA12 Switch", "LIN12 PGA"},
        {"INMIXL", "LINPGA34 Switch", "LIN34 PGA"},
        /* AILNMUX */
+       {"AILNMUX", NULL, "INL"},
        {"AILNMUX", "INMIXL Mix", "INMIXL"},
        {"AILNMUX", "DIFFINL Mix", "LIN12 PGA"},
        {"AILNMUX", "DIFFINL Mix", "LIN34 PGA"},
@@ -808,12 +750,14 @@ static const struct snd_soc_dapm_route wm8400_dapm_routes[] = {
        /* RIN34 PGA */
        {"RIN34 PGA", "RIN3 Switch", "RIN3"},
        {"RIN34 PGA", "RIN4 Switch", "RIN4/RXP"},
-       /* INMIXL */
+       /* INMIXR */
+       {"INMIXR", NULL, "INR"},
        {"INMIXR", "Record Right Volume", "ROMIX"},
        {"INMIXR", "RIN2 Volume", "RIN2"},
        {"INMIXR", "RINPGA12 Switch", "RIN12 PGA"},
        {"INMIXR", "RINPGA34 Switch", "RIN34 PGA"},
        /* AIRNMUX */
+       {"AIRNMUX", NULL, "INR"},
        {"AIRNMUX", "INMIXR Mix", "INMIXR"},
        {"AIRNMUX", "DIFFINR Mix", "RIN12 PGA"},
        {"AIRNMUX", "DIFFINR Mix", "RIN34 PGA"},
@@ -1365,9 +1309,12 @@ static int wm8400_codec_probe(struct snd_soc_codec *codec)
                return -ENOMEM;
 
        snd_soc_codec_set_drvdata(codec, priv);
-       codec->control_data = priv->wm8400 = wm8400;
+       priv->wm8400 = wm8400;
+       codec->control_data = wm8400->regmap;
        priv->codec = codec;
 
+       snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_REGMAP);
+
        ret = devm_regulator_bulk_get(wm8400->dev,
                                 ARRAY_SIZE(power), &power[0]);
        if (ret != 0) {
@@ -1414,8 +1361,6 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8400 = {
        .remove =       wm8400_codec_remove,
        .suspend =      wm8400_suspend,
        .resume =       wm8400_resume,
-       .read = snd_soc_read,
-       .write = wm8400_write,
        .set_bias_level = wm8400_set_bias_level,
 
        .controls = wm8400_snd_controls,
index 11d80f3b61372d357870b44432d5e1f1c9f6e25c..2bf9ee7c54078d26854540fc2502ab387f42748c 100644 (file)
@@ -3242,7 +3242,7 @@ static void wm8962_free_beep(struct snd_soc_codec *codec)
 }
 #endif
 
-static void wm8962_set_gpio_mode(struct snd_soc_codec *codec, int gpio)
+static void wm8962_set_gpio_mode(struct wm8962_priv *wm8962, int gpio)
 {
        int mask = 0;
        int val = 0;
@@ -3263,8 +3263,8 @@ static void wm8962_set_gpio_mode(struct snd_soc_codec *codec, int gpio)
        }
 
        if (mask)
-               snd_soc_update_bits(codec, WM8962_ANALOGUE_CLOCKING1,
-                                   mask, val);
+               regmap_update_bits(wm8962->regmap, WM8962_ANALOGUE_CLOCKING1,
+                                  mask, val);
 }
 
 #ifdef CONFIG_GPIOLIB
@@ -3276,7 +3276,6 @@ static inline struct wm8962_priv *gpio_to_wm8962(struct gpio_chip *chip)
 static int wm8962_gpio_request(struct gpio_chip *chip, unsigned offset)
 {
        struct wm8962_priv *wm8962 = gpio_to_wm8962(chip);
-       struct snd_soc_codec *codec = wm8962->codec;
 
        /* The WM8962 GPIOs aren't linearly numbered.  For simplicity
         * we export linear numbers and error out if the unsupported
@@ -3292,7 +3291,7 @@ static int wm8962_gpio_request(struct gpio_chip *chip, unsigned offset)
                return -EINVAL;
        }
 
-       wm8962_set_gpio_mode(codec, offset + 1);
+       wm8962_set_gpio_mode(wm8962, offset + 1);
 
        return 0;
 }
@@ -3376,8 +3375,7 @@ static int wm8962_probe(struct snd_soc_codec *codec)
 {
        int ret;
        struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec);
-       struct wm8962_pdata *pdata = &wm8962->pdata;
-       int i, trigger, irq_pol;
+       int i;
        bool dmicclk, dmicdat;
 
        wm8962->codec = codec;
@@ -3409,75 +3407,6 @@ static int wm8962_probe(struct snd_soc_codec *codec)
                }
        }
 
-       /* SYSCLK defaults to on; make sure it is off so we can safely
-        * write to registers if the device is declocked.
-        */
-       snd_soc_update_bits(codec, WM8962_CLOCKING2, WM8962_SYSCLK_ENA, 0);
-
-       /* Ensure we have soft control over all registers */
-       snd_soc_update_bits(codec, WM8962_CLOCKING2,
-                           WM8962_CLKREG_OVD, WM8962_CLKREG_OVD);
-
-       /* Ensure that the oscillator and PLLs are disabled */
-       snd_soc_update_bits(codec, WM8962_PLL2,
-                           WM8962_OSC_ENA | WM8962_PLL2_ENA | WM8962_PLL3_ENA,
-                           0);
-
-       /* Apply static configuration for GPIOs */
-       for (i = 0; i < ARRAY_SIZE(pdata->gpio_init); i++)
-               if (pdata->gpio_init[i]) {
-                       wm8962_set_gpio_mode(codec, i + 1);
-                       snd_soc_write(codec, 0x200 + i,
-                                       pdata->gpio_init[i] & 0xffff);
-               }
-
-
-       /* Put the speakers into mono mode? */
-       if (pdata->spk_mono)
-               snd_soc_update_bits(codec, WM8962_CLASS_D_CONTROL_2,
-                               WM8962_SPK_MONO_MASK, WM8962_SPK_MONO);
-
-       /* Micbias setup, detection enable and detection
-        * threasholds. */
-       if (pdata->mic_cfg)
-               snd_soc_update_bits(codec, WM8962_ADDITIONAL_CONTROL_4,
-                                   WM8962_MICDET_ENA |
-                                   WM8962_MICDET_THR_MASK |
-                                   WM8962_MICSHORT_THR_MASK |
-                                   WM8962_MICBIAS_LVL,
-                                   pdata->mic_cfg);
-
-       /* Latch volume update bits */
-       snd_soc_update_bits(codec, WM8962_LEFT_INPUT_VOLUME,
-                           WM8962_IN_VU, WM8962_IN_VU);
-       snd_soc_update_bits(codec, WM8962_RIGHT_INPUT_VOLUME,
-                           WM8962_IN_VU, WM8962_IN_VU);
-       snd_soc_update_bits(codec, WM8962_LEFT_ADC_VOLUME,
-                           WM8962_ADC_VU, WM8962_ADC_VU);
-       snd_soc_update_bits(codec, WM8962_RIGHT_ADC_VOLUME,
-                           WM8962_ADC_VU, WM8962_ADC_VU);
-       snd_soc_update_bits(codec, WM8962_LEFT_DAC_VOLUME,
-                           WM8962_DAC_VU, WM8962_DAC_VU);
-       snd_soc_update_bits(codec, WM8962_RIGHT_DAC_VOLUME,
-                           WM8962_DAC_VU, WM8962_DAC_VU);
-       snd_soc_update_bits(codec, WM8962_SPKOUTL_VOLUME,
-                           WM8962_SPKOUT_VU, WM8962_SPKOUT_VU);
-       snd_soc_update_bits(codec, WM8962_SPKOUTR_VOLUME,
-                           WM8962_SPKOUT_VU, WM8962_SPKOUT_VU);
-       snd_soc_update_bits(codec, WM8962_HPOUTL_VOLUME,
-                           WM8962_HPOUT_VU, WM8962_HPOUT_VU);
-       snd_soc_update_bits(codec, WM8962_HPOUTR_VOLUME,
-                           WM8962_HPOUT_VU, WM8962_HPOUT_VU);
-
-       /* Stereo control for EQ */
-       snd_soc_update_bits(codec, WM8962_EQ1, WM8962_EQ_SHARED_COEFF, 0);
-
-       /* Don't debouce interrupts so we don't need SYSCLK */
-       snd_soc_update_bits(codec, WM8962_IRQ_DEBOUNCE,
-                           WM8962_FLL_LOCK_DB | WM8962_PLL3_LOCK_DB |
-                           WM8962_PLL2_LOCK_DB | WM8962_TEMP_SHUT_DB,
-                           0);
-
        wm8962_add_widgets(codec);
 
        /* Save boards having to disable DMIC when not in use */
@@ -3506,36 +3435,6 @@ static int wm8962_probe(struct snd_soc_codec *codec)
        wm8962_init_beep(codec);
        wm8962_init_gpio(codec);
 
-       if (wm8962->irq) {
-               if (pdata->irq_active_low) {
-                       trigger = IRQF_TRIGGER_LOW;
-                       irq_pol = WM8962_IRQ_POL;
-               } else {
-                       trigger = IRQF_TRIGGER_HIGH;
-                       irq_pol = 0;
-               }
-
-               snd_soc_update_bits(codec, WM8962_INTERRUPT_CONTROL,
-                                   WM8962_IRQ_POL, irq_pol);
-
-               ret = request_threaded_irq(wm8962->irq, NULL, wm8962_irq,
-                                          trigger | IRQF_ONESHOT,
-                                          "wm8962", codec->dev);
-               if (ret != 0) {
-                       dev_err(codec->dev, "Failed to request IRQ %d: %d\n",
-                               wm8962->irq, ret);
-                       wm8962->irq = 0;
-                       /* Non-fatal */
-               } else {
-                       /* Enable some IRQs by default */
-                       snd_soc_update_bits(codec,
-                                           WM8962_INTERRUPT_STATUS_2_MASK,
-                                           WM8962_FLL_LOCK_EINT |
-                                           WM8962_TEMP_SHUT_EINT |
-                                           WM8962_FIFOS_ERR_EINT, 0);
-               }
-       }
-
        return 0;
 }
 
@@ -3544,9 +3443,6 @@ static int wm8962_remove(struct snd_soc_codec *codec)
        struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec);
        int i;
 
-       if (wm8962->irq)
-               free_irq(wm8962->irq, codec);
-
        cancel_delayed_work_sync(&wm8962->mic_work);
 
        wm8962_free_gpio(codec);
@@ -3619,7 +3515,7 @@ static int wm8962_i2c_probe(struct i2c_client *i2c,
        struct wm8962_pdata *pdata = dev_get_platdata(&i2c->dev);
        struct wm8962_priv *wm8962;
        unsigned int reg;
-       int ret, i;
+       int ret, i, irq_pol, trigger;
 
        wm8962 = devm_kzalloc(&i2c->dev, sizeof(struct wm8962_priv),
                              GFP_KERNEL);
@@ -3704,6 +3600,77 @@ static int wm8962_i2c_probe(struct i2c_client *i2c,
                goto err_enable;
        }
 
+       /* SYSCLK defaults to on; make sure it is off so we can safely
+        * write to registers if the device is declocked.
+        */
+       regmap_update_bits(wm8962->regmap, WM8962_CLOCKING2,
+                          WM8962_SYSCLK_ENA, 0);
+
+       /* Ensure we have soft control over all registers */
+       regmap_update_bits(wm8962->regmap, WM8962_CLOCKING2,
+                          WM8962_CLKREG_OVD, WM8962_CLKREG_OVD);
+
+       /* Ensure that the oscillator and PLLs are disabled */
+       regmap_update_bits(wm8962->regmap, WM8962_PLL2,
+                          WM8962_OSC_ENA | WM8962_PLL2_ENA | WM8962_PLL3_ENA,
+                          0);
+
+       /* Apply static configuration for GPIOs */
+       for (i = 0; i < ARRAY_SIZE(pdata->gpio_init); i++)
+               if (pdata->gpio_init[i]) {
+                       wm8962_set_gpio_mode(wm8962, i + 1);
+                       regmap_write(wm8962->regmap, 0x200 + i,
+                                    pdata->gpio_init[i] & 0xffff);
+               }
+
+
+       /* Put the speakers into mono mode? */
+       if (pdata->spk_mono)
+               regmap_update_bits(wm8962->regmap, WM8962_CLASS_D_CONTROL_2,
+                                  WM8962_SPK_MONO_MASK, WM8962_SPK_MONO);
+
+       /* Micbias setup, detection enable and detection
+        * threasholds. */
+       if (pdata->mic_cfg)
+               regmap_update_bits(wm8962->regmap, WM8962_ADDITIONAL_CONTROL_4,
+                                  WM8962_MICDET_ENA |
+                                  WM8962_MICDET_THR_MASK |
+                                  WM8962_MICSHORT_THR_MASK |
+                                  WM8962_MICBIAS_LVL,
+                                  pdata->mic_cfg);
+
+       /* Latch volume update bits */
+       regmap_update_bits(wm8962->regmap, WM8962_LEFT_INPUT_VOLUME,
+                          WM8962_IN_VU, WM8962_IN_VU);
+       regmap_update_bits(wm8962->regmap, WM8962_RIGHT_INPUT_VOLUME,
+                          WM8962_IN_VU, WM8962_IN_VU);
+       regmap_update_bits(wm8962->regmap, WM8962_LEFT_ADC_VOLUME,
+                          WM8962_ADC_VU, WM8962_ADC_VU);
+       regmap_update_bits(wm8962->regmap, WM8962_RIGHT_ADC_VOLUME,
+                          WM8962_ADC_VU, WM8962_ADC_VU);
+       regmap_update_bits(wm8962->regmap, WM8962_LEFT_DAC_VOLUME,
+                          WM8962_DAC_VU, WM8962_DAC_VU);
+       regmap_update_bits(wm8962->regmap, WM8962_RIGHT_DAC_VOLUME,
+                          WM8962_DAC_VU, WM8962_DAC_VU);
+       regmap_update_bits(wm8962->regmap, WM8962_SPKOUTL_VOLUME,
+                          WM8962_SPKOUT_VU, WM8962_SPKOUT_VU);
+       regmap_update_bits(wm8962->regmap, WM8962_SPKOUTR_VOLUME,
+                          WM8962_SPKOUT_VU, WM8962_SPKOUT_VU);
+       regmap_update_bits(wm8962->regmap, WM8962_HPOUTL_VOLUME,
+                          WM8962_HPOUT_VU, WM8962_HPOUT_VU);
+       regmap_update_bits(wm8962->regmap, WM8962_HPOUTR_VOLUME,
+                          WM8962_HPOUT_VU, WM8962_HPOUT_VU);
+
+       /* Stereo control for EQ */
+       regmap_update_bits(wm8962->regmap, WM8962_EQ1,
+                          WM8962_EQ_SHARED_COEFF, 0);
+
+       /* Don't debouce interrupts so we don't need SYSCLK */
+       regmap_update_bits(wm8962->regmap, WM8962_IRQ_DEBOUNCE,
+                          WM8962_FLL_LOCK_DB | WM8962_PLL3_LOCK_DB |
+                          WM8962_PLL2_LOCK_DB | WM8962_TEMP_SHUT_DB,
+                          0);
+
        if (wm8962->pdata.in4_dc_measure) {
                ret = regmap_register_patch(wm8962->regmap,
                                            wm8962_dc_measure,
@@ -3714,6 +3681,37 @@ static int wm8962_i2c_probe(struct i2c_client *i2c,
                                ret);
        }
 
+       if (wm8962->irq) {
+               if (pdata->irq_active_low) {
+                       trigger = IRQF_TRIGGER_LOW;
+                       irq_pol = WM8962_IRQ_POL;
+               } else {
+                       trigger = IRQF_TRIGGER_HIGH;
+                       irq_pol = 0;
+               }
+
+               regmap_update_bits(wm8962->regmap, WM8962_INTERRUPT_CONTROL,
+                                  WM8962_IRQ_POL, irq_pol);
+
+               ret = devm_request_threaded_irq(&i2c->dev, wm8962->irq, NULL,
+                                               wm8962_irq,
+                                               trigger | IRQF_ONESHOT,
+                                               "wm8962", &i2c->dev);
+               if (ret != 0) {
+                       dev_err(&i2c->dev, "Failed to request IRQ %d: %d\n",
+                               wm8962->irq, ret);
+                       wm8962->irq = 0;
+                       /* Non-fatal */
+               } else {
+                       /* Enable some IRQs by default */
+                       regmap_update_bits(wm8962->regmap,
+                                          WM8962_INTERRUPT_STATUS_2_MASK,
+                                          WM8962_FLL_LOCK_EINT |
+                                          WM8962_TEMP_SHUT_EINT |
+                                          WM8962_FIFOS_ERR_EINT, 0);
+               }
+       }
+
        pm_runtime_enable(&i2c->dev);
        pm_request_idle(&i2c->dev);
 
index c82f89c9475b2f7301ecd1d753c65d85431f3cc6..95970f5db3eca228df66a55f5e357f14b26209c9 100644 (file)
@@ -1,9 +1,10 @@
 config SND_DAVINCI_SOC
-       tristate "SoC Audio for the TI DAVINCI chip"
-       depends on ARCH_DAVINCI
+       tristate "SoC Audio for the TI DAVINCI or AM33XX chip"
+       depends on ARCH_DAVINCI || SOC_AM33XX
        help
+         Platform driver for daVinci or AM33xx
          Say Y or M if you want to add support for codecs attached to
-         the DAVINCI AC97 or I2S interface. You will also need
+         the DAVINCI AC97, I2S, or McASP interface. You will also need
          to select the audio interfaces to support below.
 
 config SND_DAVINCI_SOC_I2S
@@ -15,6 +16,17 @@ config SND_DAVINCI_SOC_MCASP
 config SND_DAVINCI_SOC_VCIF
        tristate
 
+config SND_AM33XX_SOC_EVM
+       tristate "SoC Audio for the AM33XX chip based boards"
+       depends on SND_DAVINCI_SOC && SOC_AM33XX
+       select SND_SOC_TLV320AIC3X
+       select SND_DAVINCI_SOC_MCASP
+       help
+         Say Y or M if you want to add support for SoC audio on AM33XX
+         boards using McASP and TLV320AIC3X codec. For example AM335X-EVM,
+         AM335X-EVMSK, and BeagelBone with AudioCape boards have this
+         setup.
+
 config SND_DAVINCI_SOC_EVM
        tristate "SoC Audio support for DaVinci DM6446, DM355 or DM365 EVM"
        depends on SND_DAVINCI_SOC
index a396ab6d6d5efaa6f11ec5d1da6716cb4a1db0ac..bc81e79fc301b8357cddd5edf7e707beb33a0723 100644 (file)
@@ -13,6 +13,7 @@ obj-$(CONFIG_SND_DAVINCI_SOC_VCIF) += snd-soc-davinci-vcif.o
 snd-soc-evm-objs := davinci-evm.o
 
 obj-$(CONFIG_SND_DAVINCI_SOC_EVM) += snd-soc-evm.o
+obj-$(CONFIG_SND_AM33XX_SOC_EVM) += snd-soc-evm.o
 obj-$(CONFIG_SND_DM6467_SOC_EVM) += snd-soc-evm.o
 obj-$(CONFIG_SND_DA830_SOC_EVM) += snd-soc-evm.o
 obj-$(CONFIG_SND_DA850_SOC_EVM) += snd-soc-evm.o
index fd7c45b9ed5a3836323a4d54af31df6f2929d050..623eb5e7c08981c08f31f2d51863c0426c1a7416 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/platform_device.h>
 #include <linux/platform_data/edma.h>
 #include <linux/i2c.h>
+#include <linux/of_platform.h>
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
 #include <asm/dma.h>
 #include <asm/mach-types.h>
 
+#include <linux/edma.h>
+
 #include "davinci-pcm.h"
 #include "davinci-i2s.h"
 #include "davinci-mcasp.h"
 
+struct snd_soc_card_drvdata_davinci {
+       unsigned sysclk;
+};
+
 #define AUDIO_FORMAT (SND_SOC_DAIFMT_DSP_B | \
                SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_IB_NF)
 static int evm_hw_params(struct snd_pcm_substream *substream,
@@ -35,27 +42,11 @@ static int evm_hw_params(struct snd_pcm_substream *substream,
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
        struct snd_soc_dai *codec_dai = rtd->codec_dai;
        struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+       struct snd_soc_codec *codec = rtd->codec;
+       struct snd_soc_card *soc_card = codec->card;
        int ret = 0;
-       unsigned sysclk;
-
-       /* ASP1 on DM355 EVM is clocked by an external oscillator */
-       if (machine_is_davinci_dm355_evm() || machine_is_davinci_dm6467_evm() ||
-           machine_is_davinci_dm365_evm())
-               sysclk = 27000000;
-
-       /* ASP0 in DM6446 EVM is clocked by U55, as configured by
-        * board-dm644x-evm.c using GPIOs from U18.  There are six
-        * options; here we "know" we use a 48 KHz sample rate.
-        */
-       else if (machine_is_davinci_evm())
-               sysclk = 12288000;
-
-       else if (machine_is_davinci_da830_evm() ||
-                               machine_is_davinci_da850_evm())
-               sysclk = 24576000;
-
-       else
-               return -EINVAL;
+       unsigned sysclk = ((struct snd_soc_card_drvdata_davinci *)
+                          snd_soc_card_get_drvdata(soc_card))->sysclk;
 
        /* set codec DAI configuration */
        ret = snd_soc_dai_set_fmt(codec_dai, AUDIO_FORMAT);
@@ -133,13 +124,22 @@ static int evm_aic3x_init(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_soc_codec *codec = rtd->codec;
        struct snd_soc_dapm_context *dapm = &codec->dapm;
+       struct device_node *np = codec->card->dev->of_node;
+       int ret;
 
        /* Add davinci-evm specific widgets */
        snd_soc_dapm_new_controls(dapm, aic3x_dapm_widgets,
                                  ARRAY_SIZE(aic3x_dapm_widgets));
 
-       /* Set up davinci-evm specific audio path audio_map */
-       snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
+       if (np) {
+               ret = snd_soc_of_parse_audio_routing(codec->card,
+                                                       "ti,audio-routing");
+               if (ret)
+                       return ret;
+       } else {
+               /* Set up davinci-evm specific audio path audio_map */
+               snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
+       }
 
        /* not connected */
        snd_soc_dapm_disable_pin(dapm, "MONO_LOUT");
@@ -243,35 +243,65 @@ static struct snd_soc_dai_link da850_evm_dai = {
 };
 
 /* davinci dm6446 evm audio machine driver */
+/*
+ * ASP0 in DM6446 EVM is clocked by U55, as configured by
+ * board-dm644x-evm.c using GPIOs from U18.  There are six
+ * options; here we "know" we use a 48 KHz sample rate.
+ */
+static struct snd_soc_card_drvdata_davinci dm6446_snd_soc_card_drvdata = {
+       .sysclk = 12288000,
+};
+
 static struct snd_soc_card dm6446_snd_soc_card_evm = {
        .name = "DaVinci DM6446 EVM",
        .owner = THIS_MODULE,
        .dai_link = &dm6446_evm_dai,
        .num_links = 1,
+       .drvdata = &dm6446_snd_soc_card_drvdata,
 };
 
 /* davinci dm355 evm audio machine driver */
+/* ASP1 on DM355 EVM is clocked by an external oscillator */
+static struct snd_soc_card_drvdata_davinci dm355_snd_soc_card_drvdata = {
+       .sysclk = 27000000,
+};
+
 static struct snd_soc_card dm355_snd_soc_card_evm = {
        .name = "DaVinci DM355 EVM",
        .owner = THIS_MODULE,
        .dai_link = &dm355_evm_dai,
        .num_links = 1,
+       .drvdata = &dm355_snd_soc_card_drvdata,
 };
 
 /* davinci dm365 evm audio machine driver */
+static struct snd_soc_card_drvdata_davinci dm365_snd_soc_card_drvdata = {
+       .sysclk = 27000000,
+};
+
 static struct snd_soc_card dm365_snd_soc_card_evm = {
        .name = "DaVinci DM365 EVM",
        .owner = THIS_MODULE,
        .dai_link = &dm365_evm_dai,
        .num_links = 1,
+       .drvdata = &dm365_snd_soc_card_drvdata,
 };
 
 /* davinci dm6467 evm audio machine driver */
+static struct snd_soc_card_drvdata_davinci dm6467_snd_soc_card_drvdata = {
+       .sysclk = 27000000,
+};
+
 static struct snd_soc_card dm6467_snd_soc_card_evm = {
        .name = "DaVinci DM6467 EVM",
        .owner = THIS_MODULE,
        .dai_link = dm6467_evm_dai,
        .num_links = ARRAY_SIZE(dm6467_evm_dai),
+       .drvdata = &dm6467_snd_soc_card_drvdata,
+};
+
+static struct snd_soc_card_drvdata_davinci da830_snd_soc_card_drvdata = {
+       .sysclk = 24576000,
 };
 
 static struct snd_soc_card da830_snd_soc_card = {
@@ -279,6 +309,11 @@ static struct snd_soc_card da830_snd_soc_card = {
        .owner = THIS_MODULE,
        .dai_link = &da830_evm_dai,
        .num_links = 1,
+       .drvdata = &da830_snd_soc_card_drvdata,
+};
+
+static struct snd_soc_card_drvdata_davinci da850_snd_soc_card_drvdata = {
+       .sysclk = 24576000,
 };
 
 static struct snd_soc_card da850_snd_soc_card = {
@@ -286,8 +321,101 @@ static struct snd_soc_card da850_snd_soc_card = {
        .owner = THIS_MODULE,
        .dai_link = &da850_evm_dai,
        .num_links = 1,
+       .drvdata = &da850_snd_soc_card_drvdata,
+};
+
+#if defined(CONFIG_OF)
+
+/*
+ * The struct is used as place holder. It will be completely
+ * filled with data from dt node.
+ */
+static struct snd_soc_dai_link evm_dai_tlv320aic3x = {
+       .name           = "TLV320AIC3X",
+       .stream_name    = "AIC3X",
+       .codec_dai_name = "tlv320aic3x-hifi",
+       .ops            = &evm_ops,
+       .init           = evm_aic3x_init,
+};
+
+static const struct of_device_id davinci_evm_dt_ids[] = {
+       {
+               .compatible = "ti,da830-evm-audio",
+               .data = (void *) &evm_dai_tlv320aic3x,
+       },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, davinci_evm_dt_ids);
+
+/* davinci evm audio machine driver */
+static struct snd_soc_card evm_soc_card = {
+       .owner = THIS_MODULE,
+       .num_links = 1,
 };
 
+static int davinci_evm_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       const struct of_device_id *match =
+               of_match_device(of_match_ptr(davinci_evm_dt_ids), &pdev->dev);
+       struct snd_soc_dai_link *dai = (struct snd_soc_dai_link *) match->data;
+       struct snd_soc_card_drvdata_davinci *drvdata = NULL;
+       int ret = 0;
+
+       evm_soc_card.dai_link = dai;
+
+       dai->codec_of_node = of_parse_phandle(np, "ti,audio-codec", 0);
+       if (!dai->codec_of_node)
+               return -EINVAL;
+
+       dai->cpu_of_node = of_parse_phandle(np, "ti,mcasp-controller", 0);
+       if (!dai->cpu_of_node)
+               return -EINVAL;
+
+       dai->platform_of_node = dai->cpu_of_node;
+
+       evm_soc_card.dev = &pdev->dev;
+       ret = snd_soc_of_parse_card_name(&evm_soc_card, "ti,model");
+       if (ret)
+               return ret;
+
+       drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+       if (!drvdata)
+               return -ENOMEM;
+
+       ret = of_property_read_u32(np, "ti,codec-clock-rate", &drvdata->sysclk);
+       if (ret < 0)
+               return -EINVAL;
+
+       snd_soc_card_set_drvdata(&evm_soc_card, drvdata);
+       ret = devm_snd_soc_register_card(&pdev->dev, &evm_soc_card);
+
+       if (ret)
+               dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
+
+       return ret;
+}
+
+static int davinci_evm_remove(struct platform_device *pdev)
+{
+       struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+       snd_soc_unregister_card(card);
+
+       return 0;
+}
+
+static struct platform_driver davinci_evm_driver = {
+       .probe          = davinci_evm_probe,
+       .remove         = davinci_evm_remove,
+       .driver         = {
+               .name   = "davinci_evm",
+               .owner  = THIS_MODULE,
+               .of_match_table = of_match_ptr(davinci_evm_dt_ids),
+       },
+};
+#endif
+
 static struct platform_device *evm_snd_device;
 
 static int __init evm_init(void)
@@ -296,6 +424,15 @@ static int __init evm_init(void)
        int index;
        int ret;
 
+       /*
+        * If dtb is there, the devices will be created dynamically.
+        * Only register platfrom driver structure.
+        */
+#if defined(CONFIG_OF)
+       if (of_have_populated_dt())
+               return platform_driver_register(&davinci_evm_driver);
+#endif
+
        if (machine_is_davinci_evm()) {
                evm_snd_dev_data = &dm6446_snd_soc_card_evm;
                index = 0;
@@ -331,6 +468,13 @@ static int __init evm_init(void)
 
 static void __exit evm_exit(void)
 {
+#if defined(CONFIG_OF)
+       if (of_have_populated_dt()) {
+               platform_driver_unregister(&davinci_evm_driver);
+               return;
+       }
+#endif
+
        platform_device_unregister(evm_snd_device);
 }
 
index 32ddb7fe5034865a7f13dc7243f1f8161482c7f2..71e14bb3a8cd11b4465567d088a05820e0ecdb74 100644 (file)
@@ -1001,18 +1001,40 @@ static const struct snd_soc_component_driver davinci_mcasp_component = {
        .name           = "davinci-mcasp",
 };
 
+/* Some HW specific values and defaults. The rest is filled in from DT. */
+static struct snd_platform_data dm646x_mcasp_pdata = {
+       .tx_dma_offset = 0x400,
+       .rx_dma_offset = 0x400,
+       .asp_chan_q = EVENTQ_0,
+       .version = MCASP_VERSION_1,
+};
+
+static struct snd_platform_data da830_mcasp_pdata = {
+       .tx_dma_offset = 0x2000,
+       .rx_dma_offset = 0x2000,
+       .asp_chan_q = EVENTQ_0,
+       .version = MCASP_VERSION_2,
+};
+
+static struct snd_platform_data omap2_mcasp_pdata = {
+       .tx_dma_offset = 0,
+       .rx_dma_offset = 0,
+       .asp_chan_q = EVENTQ_0,
+       .version = MCASP_VERSION_3,
+};
+
 static const struct of_device_id mcasp_dt_ids[] = {
        {
                .compatible = "ti,dm646x-mcasp-audio",
-               .data = (void *)MCASP_VERSION_1,
+               .data = &dm646x_mcasp_pdata,
        },
        {
                .compatible = "ti,da830-mcasp-audio",
-               .data = (void *)MCASP_VERSION_2,
+               .data = &da830_mcasp_pdata,
        },
        {
-               .compatible = "ti,omap2-mcasp-audio",
-               .data = (void *)MCASP_VERSION_3,
+               .compatible = "ti,am33xx-mcasp-audio",
+               .data = &omap2_mcasp_pdata,
        },
        { /* sentinel */ }
 };
@@ -1025,9 +1047,9 @@ static struct snd_platform_data *davinci_mcasp_set_pdata_from_of(
        struct snd_platform_data *pdata = NULL;
        const struct of_device_id *match =
                        of_match_device(mcasp_dt_ids, &pdev->dev);
+       struct of_phandle_args dma_spec;
 
        const u32 *of_serial_dir32;
-       u8 *of_serial_dir;
        u32 val;
        int i, ret = 0;
 
@@ -1035,20 +1057,13 @@ static struct snd_platform_data *davinci_mcasp_set_pdata_from_of(
                pdata = pdev->dev.platform_data;
                return pdata;
        } else if (match) {
-               pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
-               if (!pdata) {
-                       ret = -ENOMEM;
-                       goto nodata;
-               }
+               pdata = (struct snd_platform_data *) match->data;
        } else {
                /* control shouldn't reach here. something is wrong */
                ret = -EINVAL;
                goto nodata;
        }
 
-       if (match->data)
-               pdata->version = (u8)((int)match->data);
-
        ret = of_property_read_u32(np, "op-mode", &val);
        if (ret >= 0)
                pdata->op_mode = val;
@@ -1065,35 +1080,46 @@ static struct snd_platform_data *davinci_mcasp_set_pdata_from_of(
                pdata->tdm_slots = val;
        }
 
-       ret = of_property_read_u32(np, "num-serializer", &val);
-       if (ret >= 0)
-               pdata->num_serializer = val;
-
        of_serial_dir32 = of_get_property(np, "serial-dir", &val);
        val /= sizeof(u32);
-       if (val != pdata->num_serializer) {
-               dev_err(&pdev->dev,
-                               "num-serializer(%d) != serial-dir size(%d)\n",
-                               pdata->num_serializer, val);
-               ret = -EINVAL;
-               goto nodata;
-       }
-
        if (of_serial_dir32) {
-               of_serial_dir = devm_kzalloc(&pdev->dev,
-                                               (sizeof(*of_serial_dir) * val),
-                                               GFP_KERNEL);
+               u8 *of_serial_dir = devm_kzalloc(&pdev->dev,
+                                                (sizeof(*of_serial_dir) * val),
+                                                GFP_KERNEL);
                if (!of_serial_dir) {
                        ret = -ENOMEM;
                        goto nodata;
                }
 
-               for (i = 0; i < pdata->num_serializer; i++)
+               for (i = 0; i < val; i++)
                        of_serial_dir[i] = be32_to_cpup(&of_serial_dir32[i]);
 
+               pdata->num_serializer = val;
                pdata->serial_dir = of_serial_dir;
        }
 
+       ret = of_property_match_string(np, "dma-names", "tx");
+       if (ret < 0)
+               goto nodata;
+
+       ret = of_parse_phandle_with_args(np, "dmas", "#dma-cells", ret,
+                                        &dma_spec);
+       if (ret < 0)
+               goto nodata;
+
+       pdata->tx_dma_channel = dma_spec.args[0];
+
+       ret = of_property_match_string(np, "dma-names", "rx");
+       if (ret < 0)
+               goto nodata;
+
+       ret = of_parse_phandle_with_args(np, "dmas", "#dma-cells", ret,
+                                        &dma_spec);
+       if (ret < 0)
+               goto nodata;
+
+       pdata->rx_dma_channel = dma_spec.args[0];
+
        ret = of_property_read_u32(np, "tx-num-evt", &val);
        if (ret >= 0)
                pdata->txnumevt = val;
@@ -1124,7 +1150,7 @@ nodata:
 static int davinci_mcasp_probe(struct platform_device *pdev)
 {
        struct davinci_pcm_dma_params *dma_data;
-       struct resource *mem, *ioarea, *res;
+       struct resource *mem, *ioarea, *res, *dat;
        struct snd_platform_data *pdata;
        struct davinci_audio_dev *dev;
        int ret;
@@ -1145,10 +1171,15 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu");
        if (!mem) {
-               dev_err(&pdev->dev, "no mem resource?\n");
-               return -ENODEV;
+               dev_warn(dev->dev,
+                        "\"mpu\" mem resource not found, using index 0\n");
+               mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+               if (!mem) {
+                       dev_err(&pdev->dev, "no mem resource?\n");
+                       return -ENODEV;
+               }
        }
 
        ioarea = devm_request_mem_region(&pdev->dev, mem->start,
@@ -1182,40 +1213,36 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
        dev->rxnumevt = pdata->rxnumevt;
        dev->dev = &pdev->dev;
 
+       dat = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
+       if (!dat)
+               dat = mem;
+
        dma_data = &dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK];
        dma_data->asp_chan_q = pdata->asp_chan_q;
        dma_data->ram_chan_q = pdata->ram_chan_q;
        dma_data->sram_pool = pdata->sram_pool;
        dma_data->sram_size = pdata->sram_size_playback;
-       dma_data->dma_addr = (dma_addr_t) (pdata->tx_dma_offset +
-                                                       mem->start);
+       dma_data->dma_addr = dat->start + pdata->tx_dma_offset;
 
-       /* first TX, then RX */
        res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "no DMA resource\n");
-               ret = -ENODEV;
-               goto err_release_clk;
-       }
-
-       dma_data->channel = res->start;
+       if (res)
+               dma_data->channel = res->start;
+       else
+               dma_data->channel = pdata->tx_dma_channel;
 
        dma_data = &dev->dma_params[SNDRV_PCM_STREAM_CAPTURE];
        dma_data->asp_chan_q = pdata->asp_chan_q;
        dma_data->ram_chan_q = pdata->ram_chan_q;
        dma_data->sram_pool = pdata->sram_pool;
        dma_data->sram_size = pdata->sram_size_capture;
-       dma_data->dma_addr = (dma_addr_t)(pdata->rx_dma_offset +
-                                                       mem->start);
+       dma_data->dma_addr = dat->start + pdata->rx_dma_offset;
 
        res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-       if (!res) {
-               dev_err(&pdev->dev, "no DMA resource\n");
-               ret = -ENODEV;
-               goto err_release_clk;
-       }
+       if (res)
+               dma_data->channel = res->start;
+       else
+               dma_data->channel = pdata->rx_dma_channel;
 
-       dma_data->channel = res->start;
        dev_set_drvdata(&pdev->dev, dev);
        ret = snd_soc_register_component(&pdev->dev, &davinci_mcasp_component,
                                         &davinci_mcasp_dai[pdata->op_mode], 1);
@@ -1251,12 +1278,51 @@ static int davinci_mcasp_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int davinci_mcasp_suspend(struct device *dev)
+{
+       struct davinci_audio_dev *a = dev_get_drvdata(dev);
+       void __iomem *base = a->base;
+
+       a->context.txfmtctl = mcasp_get_reg(base + DAVINCI_MCASP_TXFMCTL_REG);
+       a->context.rxfmtctl = mcasp_get_reg(base + DAVINCI_MCASP_RXFMCTL_REG);
+       a->context.txfmt = mcasp_get_reg(base + DAVINCI_MCASP_TXFMT_REG);
+       a->context.rxfmt = mcasp_get_reg(base + DAVINCI_MCASP_RXFMT_REG);
+       a->context.aclkxctl = mcasp_get_reg(base + DAVINCI_MCASP_ACLKXCTL_REG);
+       a->context.aclkrctl = mcasp_get_reg(base + DAVINCI_MCASP_ACLKRCTL_REG);
+       a->context.pdir = mcasp_get_reg(base + DAVINCI_MCASP_PDIR_REG);
+
+       return 0;
+}
+
+static int davinci_mcasp_resume(struct device *dev)
+{
+       struct davinci_audio_dev *a = dev_get_drvdata(dev);
+       void __iomem *base = a->base;
+
+       mcasp_set_reg(base + DAVINCI_MCASP_TXFMCTL_REG, a->context.txfmtctl);
+       mcasp_set_reg(base + DAVINCI_MCASP_RXFMCTL_REG, a->context.rxfmtctl);
+       mcasp_set_reg(base + DAVINCI_MCASP_TXFMT_REG, a->context.txfmt);
+       mcasp_set_reg(base + DAVINCI_MCASP_RXFMT_REG, a->context.rxfmt);
+       mcasp_set_reg(base + DAVINCI_MCASP_ACLKXCTL_REG, a->context.aclkxctl);
+       mcasp_set_reg(base + DAVINCI_MCASP_ACLKRCTL_REG, a->context.aclkrctl);
+       mcasp_set_reg(base + DAVINCI_MCASP_PDIR_REG, a->context.pdir);
+
+       return 0;
+}
+#endif
+
+SIMPLE_DEV_PM_OPS(davinci_mcasp_pm_ops,
+                 davinci_mcasp_suspend,
+                 davinci_mcasp_resume);
+
 static struct platform_driver davinci_mcasp_driver = {
        .probe          = davinci_mcasp_probe,
        .remove         = davinci_mcasp_remove,
        .driver         = {
                .name   = "davinci-mcasp",
                .owner  = THIS_MODULE,
+               .pm     = &davinci_mcasp_pm_ops,
                .of_match_table = mcasp_dt_ids,
        },
 };
@@ -1266,4 +1332,3 @@ module_platform_driver(davinci_mcasp_driver);
 MODULE_AUTHOR("Steve Chen");
 MODULE_DESCRIPTION("TI DAVINCI McASP SoC Interface");
 MODULE_LICENSE("GPL");
-
index a9ac0c11da71dd784ed26286e46d064f0cca66c9..a2e27e1c32f3b28a8600ea0bb5325a35f7fe9741 100644 (file)
@@ -43,6 +43,18 @@ struct davinci_audio_dev {
        /* McASP FIFO related */
        u8      txnumevt;
        u8      rxnumevt;
+
+#ifdef CONFIG_PM_SLEEP
+       struct {
+               u32     txfmtctl;
+               u32     rxfmtctl;
+               u32     txfmt;
+               u32     rxfmt;
+               u32     aclkxctl;
+               u32     aclkrctl;
+               u32     pdir;
+       } context;
+#endif
 };
 
 #endif /* DAVINCI_MCASP_H */
index 8460edce1c3b6b5cc90dff481e5588c75e2285ad..84a63c660ab93b48f090e4085048cc0959cc6aa2 100644 (file)
@@ -844,18 +844,15 @@ static void davinci_pcm_free(struct snd_pcm *pcm)
        }
 }
 
-static u64 davinci_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int davinci_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
        int ret;
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &davinci_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = davinci_pcm_preallocate_dma_buffer(pcm,
index 9cc5c1f82f093f5b171877e1bd5e5672cf9c46cd..f73c7eff8b237b0d7786cd71005789af12b8c0a3 100644 (file)
@@ -298,14 +298,11 @@ static int fsl_dma_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       static u64 fsl_dma_dmamask = DMA_BIT_MASK(36);
        int ret;
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &fsl_dma_dmamask;
-
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = fsl_dma_dmamask;
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(36));
+       if (ret)
+               return ret;
 
        /* Some codecs have separate DAIs for playback and capture, so we
         * should allocate a DMA buffer only for the streams that are valid.
index 3920c3e849ce4dfd6b76f77d877363243d8234d5..ff1f34766ee394119001f9bb417dbe3bf25d9ea3 100644 (file)
@@ -963,7 +963,7 @@ static bool fsl_spdif_readable_reg(struct device *dev, unsigned int reg)
                return true;
        default:
                return false;
-       };
+       }
 }
 
 static bool fsl_spdif_writeable_reg(struct device *dev, unsigned int reg)
@@ -982,7 +982,7 @@ static bool fsl_spdif_writeable_reg(struct device *dev, unsigned int reg)
                return true;
        default:
                return false;
-       };
+       }
 }
 
 static const struct regmap_config fsl_spdif_regmap_config = {
@@ -1172,23 +1172,16 @@ static int fsl_spdif_probe(struct platform_device *pdev)
        /* Register with ASoC */
        dev_set_drvdata(&pdev->dev, spdif_priv);
 
-       ret = snd_soc_register_component(&pdev->dev, &fsl_spdif_component,
-                                        &spdif_priv->cpu_dai_drv, 1);
+       ret = devm_snd_soc_register_component(&pdev->dev, &fsl_spdif_component,
+                                             &spdif_priv->cpu_dai_drv, 1);
        if (ret) {
                dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
                return ret;
        }
 
        ret = imx_pcm_dma_init(pdev);
-       if (ret) {
+       if (ret)
                dev_err(&pdev->dev, "imx_pcm_dma_init failed: %d\n", ret);
-               goto error_component;
-       }
-
-       return ret;
-
-error_component:
-       snd_soc_unregister_component(&pdev->dev);
 
        return ret;
 }
@@ -1196,7 +1189,6 @@ error_component:
 static int fsl_spdif_remove(struct platform_device *pdev)
 {
        imx_pcm_dma_exit(pdev);
-       snd_soc_unregister_component(&pdev->dev);
 
        return 0;
 }
index c6b743978d5ecdcdf402ddb43df0b7bd0400c422..35e277379b86ce80d56737e3e0c05a5b3772fb2c 100644 (file)
@@ -469,19 +469,12 @@ static int fsl_ssi_startup(struct snd_pcm_substream *substream,
                         * parameters, then the second stream may be
                         * constrained to the wrong sample rate or size.
                         */
-                       if (!first_runtime->sample_bits) {
-                               dev_err(substream->pcm->card->dev,
-                                       "set sample size in %s stream first\n",
-                                       substream->stream ==
-                                       SNDRV_PCM_STREAM_PLAYBACK
-                                       ? "capture" : "playback");
-                               return -EAGAIN;
-                       }
-
-                       snd_pcm_hw_constraint_minmax(substream->runtime,
-                               SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
+                       if (first_runtime->sample_bits) {
+                               snd_pcm_hw_constraint_minmax(substream->runtime,
+                                               SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
                                first_runtime->sample_bits,
                                first_runtime->sample_bits);
+                       }
                }
 
                ssi_private->second_stream = substream;
@@ -748,7 +741,7 @@ static void fsl_ssi_ac97_init(void)
        fsl_ssi_setup(fsl_ac97_data);
 }
 
-void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
+static void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
                unsigned short val)
 {
        struct ccsr_ssi *ssi = fsl_ac97_data->ssi;
@@ -770,7 +763,7 @@ void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
        udelay(100);
 }
 
-unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
+static unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
                unsigned short reg)
 {
        struct ccsr_ssi *ssi = fsl_ac97_data->ssi;
@@ -936,7 +929,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
        ssi_private->ssi_phys = res.start;
 
        ssi_private->irq = irq_of_parse_and_map(np, 0);
-       if (ssi_private->irq == NO_IRQ) {
+       if (!ssi_private->irq) {
                dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
                return -ENXIO;
        }
@@ -1135,7 +1128,6 @@ static int fsl_ssi_remove(struct platform_device *pdev)
        if (ssi_private->ssi_on_imx)
                imx_pcm_dma_exit(pdev);
        snd_soc_unregister_component(&pdev->dev);
-       dev_set_drvdata(&pdev->dev, NULL);
        device_remove_file(&pdev->dev, &ssi_private->dev_attr);
        if (ssi_private->ssi_on_imx)
                clk_disable_unprepare(ssi_private->clk);
index d3bf71a0ec56aa6ff0ef604b36779eec79e3f23a..ac869931d7f16c9c4049aefaffb4a7d416417e49 100644 (file)
@@ -66,13 +66,10 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf,
                                size_t count, loff_t *ppos)
 {
        ssize_t ret;
-       char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       char *buf;
        int port = (int)file->private_data;
        u32 pdcr, ptcr;
 
-       if (!buf)
-               return -ENOMEM;
-
        if (audmux_clk) {
                ret = clk_prepare_enable(audmux_clk);
                if (ret)
@@ -85,6 +82,10 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf,
        if (audmux_clk)
                clk_disable_unprepare(audmux_clk);
 
+       buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
        ret = snprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
                       pdcr, ptcr);
 
index a3d60d4bea4ce8ace84d2860921b5c46d3f00895..a2fd7321b5a9a1bbd321f756af14fd9f071a372f 100644 (file)
@@ -112,7 +112,7 @@ static int imx_mc13783_probe(struct platform_device *pdev)
                return ret;
        }
 
-       if (machine_is_mx31_3ds()) {
+       if (machine_is_mx31_3ds() || machine_is_mx31moboard()) {
                imx_audmux_v2_configure_port(MX31_AUDMUX_PORT4_SSI_PINS_4,
                        IMX_AUDMUX_V2_PTCR_SYN,
                        IMX_AUDMUX_V2_PDCR_RXDSEL(MX31_AUDMUX_PORT1_SSI0) |
index 34043c55f2a62f048232b09ff2f6165fd3726116..fd5f2fb955f182cdef9458e1ab3280d5072365a1 100644 (file)
@@ -272,18 +272,16 @@ static int imx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
        return 0;
 }
 
-static u64 imx_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int imx_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       int ret = 0;
+       int ret;
+
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &imx_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = imx_pcm_preallocate_dma_buffer(pcm,
                        SNDRV_PCM_STREAM_PLAYBACK);
index ca1be1d9dcf0349b608f77367f5330ca7c2c6308..ed6ba1eba557f5b826f3b8abc4f51f25ab81d7df 100644 (file)
@@ -159,7 +159,7 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
        data->card.dapm_widgets = imx_sgtl5000_dapm_widgets;
        data->card.num_dapm_widgets = ARRAY_SIZE(imx_sgtl5000_dapm_widgets);
 
-       ret = snd_soc_register_card(&data->card);
+       ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
        if (ret) {
                dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
                goto fail;
@@ -186,7 +186,6 @@ static int imx_sgtl5000_remove(struct platform_device *pdev)
 {
        struct imx_sgtl5000_data *data = platform_get_drvdata(pdev);
 
-       snd_soc_unregister_card(&data->card);
        clk_put(data->codec_clk);
 
        return 0;
index 816013b0ebba4f0b16e3fdfddd0e763833d1ee39..8499d5292f088a45e9ea736106ee2db13e7d205a 100644 (file)
@@ -87,7 +87,7 @@ static int imx_spdif_audio_probe(struct platform_device *pdev)
        if (ret)
                goto error_dir;
 
-       ret = snd_soc_register_card(&data->card);
+       ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
        if (ret) {
                dev_err(&pdev->dev, "snd_soc_register_card failed: %d\n", ret);
                goto error_dir;
@@ -119,8 +119,6 @@ static int imx_spdif_audio_remove(struct platform_device *pdev)
        if (data->txdev)
                platform_device_unregister(data->txdev);
 
-       snd_soc_unregister_card(&data->card);
-
        return 0;
 }
 
index f58bcd85c07fbd8b302c0c3bcd97fd3cf6ed4d0c..f5f248c91c16db7c3db5a50654bceedb0e81953f 100644 (file)
@@ -600,22 +600,19 @@ static int imx_ssi_probe(struct platform_device *pdev)
        ssi->fiq_params.dma_params_rx = &ssi->dma_params_rx;
        ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx;
 
-       ret = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
-       if (ret)
-               goto failed_pcm_fiq;
+       ssi->fiq_init = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
+       ssi->dma_init = imx_pcm_dma_init(pdev);
 
-       ret = imx_pcm_dma_init(pdev);
-       if (ret)
-               goto failed_pcm_dma;
+       if (ssi->fiq_init && ssi->dma_init) {
+               ret = ssi->fiq_init;
+               goto failed_pcm;
+       }
 
        return 0;
 
-failed_pcm_dma:
-       imx_pcm_fiq_exit(pdev);
-failed_pcm_fiq:
+failed_pcm:
        snd_soc_unregister_component(&pdev->dev);
 failed_register:
-       release_mem_region(res->start, resource_size(res));
        clk_disable_unprepare(ssi->clk);
 failed_clk:
        snd_soc_set_ac97_ops(NULL);
@@ -625,18 +622,19 @@ failed_clk:
 
 static int imx_ssi_remove(struct platform_device *pdev)
 {
-       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        struct imx_ssi *ssi = platform_get_drvdata(pdev);
 
-       imx_pcm_dma_exit(pdev);
-       imx_pcm_fiq_exit(pdev);
+       if (!ssi->dma_init)
+               imx_pcm_dma_exit(pdev);
+
+       if (!ssi->fiq_init)
+               imx_pcm_fiq_exit(pdev);
 
        snd_soc_unregister_component(&pdev->dev);
 
        if (ssi->flags & IMX_SSI_USE_AC97)
                ac97_ssi = NULL;
 
-       release_mem_region(res->start, resource_size(res));
        clk_disable_unprepare(ssi->clk);
        snd_soc_set_ac97_ops(NULL);
 
index fb1616ba8c5967e1892b4ff6c7e180b2b9447047..560c40fc9ebbb50241e3732f3a76ece064178bf2 100644 (file)
@@ -211,6 +211,8 @@ struct imx_ssi {
        struct imx_dma_data filter_data_rx;
        struct imx_pcm_fiq_params fiq_params;
 
+       int fiq_init;
+       int dma_init;
        int enabled;
 };
 
index 722afe69169e904798974115292159a89a08ba18..f84ecbfd1238701326e1e69a53770148b55e3a83 100644 (file)
@@ -215,7 +215,7 @@ static int imx_wm8962_probe(struct platform_device *pdev)
                goto fail;
        }
        codec_dev = of_find_i2c_device_by_node(codec_np);
-       if (!codec_dev || !codec_dev->driver) {
+       if (!codec_dev || !codec_dev->dev.driver) {
                dev_err(&pdev->dev, "failed to find codec platform device\n");
                ret = -EINVAL;
                goto fail;
@@ -266,7 +266,7 @@ static int imx_wm8962_probe(struct platform_device *pdev)
        data->card.late_probe = imx_wm8962_late_probe;
        data->card.set_bias_level = imx_wm8962_set_bias_level;
 
-       ret = snd_soc_register_card(&data->card);
+       ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
        if (ret) {
                dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
                goto clk_fail;
@@ -296,7 +296,6 @@ static int imx_wm8962_remove(struct platform_device *pdev)
 
        if (!IS_ERR(data->codec_clk))
                clk_disable_unprepare(data->codec_clk);
-       snd_soc_unregister_card(&data->card);
 
        return 0;
 }
index 2a847ca494b5b4dfe00019d1b10c5ae8eccc817b..8fcf2241674054a8d687ae94598a0b1beed26e8a 100644 (file)
@@ -299,7 +299,6 @@ static struct snd_pcm_ops psc_dma_ops = {
        .hw_params      = psc_dma_hw_params,
 };
 
-static u64 psc_dma_dmamask = DMA_BIT_MASK(32);
 static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
@@ -307,15 +306,14 @@ static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
        struct snd_pcm *pcm = rtd->pcm;
        struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai);
        size_t size = psc_dma_hardware.buffer_bytes_max;
-       int rc = 0;
+       int rc;
 
        dev_dbg(rtd->platform->dev, "psc_dma_new(card=%p, dai=%p, pcm=%p)\n",
                card, dai, pcm);
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &psc_dma_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       rc = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (rc)
+               return rc;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev,
index 8c49147db84c43cf73d5870a4802959e66330ce2..b2fbb7075a6c9900aac6aea847b63242a359a5a7 100644 (file)
@@ -27,6 +27,11 @@ static int __asoc_simple_card_dai_init(struct snd_soc_dai *dai,
        if (!ret && daifmt)
                ret = snd_soc_dai_set_fmt(dai, daifmt);
 
+       if (ret == -ENOTSUPP) {
+               dev_dbg(dai->dev, "ASoC: set_fmt is not supported\n");
+               ret = 0;
+       }
+
        if (!ret && set->sysclk)
                ret = snd_soc_dai_set_sysclk(dai, 0, set->sysclk, 0);
 
index 710059292318878886d00383396413c3ec87c8d1..1d7ef28585e1a4125bb70bc17edd203a7195b422 100644 (file)
@@ -297,19 +297,15 @@ static void jz4740_pcm_free(struct snd_pcm *pcm)
        }
 }
 
-static u64 jz4740_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int jz4740_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       int ret = 0;
-
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &jz4740_pcm_dmamask;
+       int ret;
 
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = jz4740_pcm_preallocate_dma_buffer(pcm,
index b238434f92b099db8bf3ba613fc596b7006ec96d..4af1936cf0f4fdbbaafe0bee2879872418efadd7 100644 (file)
@@ -29,9 +29,7 @@
 #define KIRKWOOD_FORMATS \
        (SNDRV_PCM_FMTBIT_S16_LE | \
         SNDRV_PCM_FMTBIT_S24_LE | \
-        SNDRV_PCM_FMTBIT_S32_LE | \
-        SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE | \
-        SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE)
+        SNDRV_PCM_FMTBIT_S32_LE)
 
 static struct kirkwood_dma_data *kirkwood_priv(struct snd_pcm_substream *subs)
 {
@@ -59,8 +57,6 @@ static struct snd_pcm_hardware kirkwood_dma_snd_hw = {
        .fifo_size              = 0,
 };
 
-static u64 kirkwood_dma_dmamask = DMA_BIT_MASK(32);
-
 static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id)
 {
        struct kirkwood_dma_data *priv = dev_id;
@@ -161,7 +157,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
                 * Enable Error interrupts. We're only ack'ing them but
                 * it's useful for diagnostics
                 */
-               writel((unsigned long)-1, priv->io + KIRKWOOD_ERR_MASK);
+               writel((unsigned int)-1, priv->io + KIRKWOOD_ERR_MASK);
        }
 
        dram = mv_mbus_dram_info();
@@ -292,10 +288,9 @@ static int kirkwood_dma_new(struct snd_soc_pcm_runtime *rtd)
        struct snd_pcm *pcm = rtd->pcm;
        int ret;
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &kirkwood_dma_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = kirkwood_dma_preallocate_dma_buffer(pcm,
index 0f3d73d4ef48ea72d5d9b3c63d72f90e919ba7bc..d0504a2d8c6320d67a7ad8f0dab6b22bcef51548 100644 (file)
@@ -103,7 +103,7 @@ static void kirkwood_set_rate(struct snd_soc_dai *dai,
 {
        uint32_t clks_ctrl;
 
-       if (rate == 44100 || rate == 48000 || rate == 96000) {
+       if (IS_ERR(priv->extclk)) {
                /* use internal dco for the supported rates
                 * defined in kirkwood_i2s_dai */
                dev_dbg(dai->dev, "%s: dco set rate = %lu\n",
@@ -496,7 +496,10 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
                return err;
 
        priv->extclk = devm_clk_get(&pdev->dev, "extclk");
-       if (!IS_ERR(priv->extclk)) {
+       if (IS_ERR(priv->extclk)) {
+               if (PTR_ERR(priv->extclk) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+       } else {
                if (priv->extclk == priv->clk) {
                        devm_clk_put(&pdev->dev, priv->extclk);
                        priv->extclk = ERR_PTR(-EINVAL);
index f8e1ccc1c58cc76f8ce2a314a1c8a0fcc5e998a5..bf23afbba1d7ba3d6e5e8f3bb22f078e5b2908ae 100644 (file)
 /* need to find where they come from               */
 #define KIRKWOOD_SND_MIN_PERIODS               8
 #define KIRKWOOD_SND_MAX_PERIODS               16
-#define KIRKWOOD_SND_MIN_PERIOD_BYTES          0x4000
-#define KIRKWOOD_SND_MAX_PERIOD_BYTES          0x4000
+#define KIRKWOOD_SND_MIN_PERIOD_BYTES          0x800
+#define KIRKWOOD_SND_MAX_PERIOD_BYTES          0x8000
 #define KIRKWOOD_SND_MAX_BUFFER_BYTES          (KIRKWOOD_SND_MAX_PERIOD_BYTES \
                                                 * KIRKWOOD_SND_MAX_PERIODS)
 
index ee363845759eca555346193ce17d740868d5fd6d..d3d4c32434f7614d944296b34b9b87f6c80a66b3 100644 (file)
@@ -400,7 +400,7 @@ static int snd_mfld_mc_probe(struct platform_device *pdev)
        }
        /* register the soc card */
        snd_soc_card_mfld.dev = &pdev->dev;
-       ret_val = snd_soc_register_card(&snd_soc_card_mfld);
+       ret_val = devm_snd_soc_register_card(&pdev->dev, &snd_soc_card_mfld);
        if (ret_val) {
                pr_debug("snd_soc_register_card failed %d\n", ret_val);
                return ret_val;
@@ -410,20 +410,12 @@ static int snd_mfld_mc_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int snd_mfld_mc_remove(struct platform_device *pdev)
-{
-       pr_debug("snd_mfld_mc_remove called\n");
-       snd_soc_unregister_card(&snd_soc_card_mfld);
-       return 0;
-}
-
 static struct platform_driver snd_mfld_mc_driver = {
        .driver = {
                .owner = THIS_MODULE,
                .name = "msic_audio",
        },
        .probe = snd_mfld_mc_probe,
-       .remove = snd_mfld_mc_remove,
 };
 
 module_platform_driver(snd_mfld_mc_driver);
index b56b8a0e8deb43b6fdca0668fb43677bb1ce47ba..54e622acac330d82a92d95164ae79e89cfb461e4 100644 (file)
@@ -494,6 +494,7 @@ static int mxs_saif_trigger(struct snd_pcm_substream *substream, int cmd,
        struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai);
        struct mxs_saif *master_saif;
        u32 delay;
+       int ret;
 
        master_saif = mxs_saif_get_master(saif);
        if (!master_saif)
@@ -503,23 +504,37 @@ static int mxs_saif_trigger(struct snd_pcm_substream *substream, int cmd,
        case SNDRV_PCM_TRIGGER_START:
        case SNDRV_PCM_TRIGGER_RESUME:
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+               if (saif->state == MXS_SAIF_STATE_RUNNING)
+                       return 0;
+
                dev_dbg(cpu_dai->dev, "start\n");
 
-               clk_enable(master_saif->clk);
-               if (!master_saif->mclk_in_use)
-                       __raw_writel(BM_SAIF_CTRL_RUN,
-                               master_saif->base + SAIF_CTRL + MXS_SET_ADDR);
+               ret = clk_enable(master_saif->clk);
+               if (ret) {
+                       dev_err(saif->dev, "Failed to enable master clock\n");
+                       return ret;
+               }
 
                /*
                 * If the saif's master is not himself, we also need to enable
                 * itself clk for its internal basic logic to work.
                 */
                if (saif != master_saif) {
-                       clk_enable(saif->clk);
+                       ret = clk_enable(saif->clk);
+                       if (ret) {
+                               dev_err(saif->dev, "Failed to enable master clock\n");
+                               clk_disable(master_saif->clk);
+                               return ret;
+                       }
+
                        __raw_writel(BM_SAIF_CTRL_RUN,
                                saif->base + SAIF_CTRL + MXS_SET_ADDR);
                }
 
+               if (!master_saif->mclk_in_use)
+                       __raw_writel(BM_SAIF_CTRL_RUN,
+                               master_saif->base + SAIF_CTRL + MXS_SET_ADDR);
+
                if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                        /*
                         * write data to saif data register to trigger
@@ -543,6 +558,7 @@ static int mxs_saif_trigger(struct snd_pcm_substream *substream, int cmd,
                }
 
                master_saif->ongoing = 1;
+               saif->state = MXS_SAIF_STATE_RUNNING;
 
                dev_dbg(saif->dev, "CTRL 0x%x STAT 0x%x\n",
                        __raw_readl(saif->base + SAIF_CTRL),
@@ -555,6 +571,9 @@ static int mxs_saif_trigger(struct snd_pcm_substream *substream, int cmd,
        case SNDRV_PCM_TRIGGER_SUSPEND:
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+               if (saif->state == MXS_SAIF_STATE_STOPPED)
+                       return 0;
+
                dev_dbg(cpu_dai->dev, "stop\n");
 
                /* wait a while for the current sample to complete */
@@ -575,6 +594,7 @@ static int mxs_saif_trigger(struct snd_pcm_substream *substream, int cmd,
                }
 
                master_saif->ongoing = 0;
+               saif->state = MXS_SAIF_STATE_STOPPED;
 
                break;
        default:
@@ -768,8 +788,8 @@ static int mxs_saif_probe(struct platform_device *pdev)
                        dev_warn(&pdev->dev, "failed to init clocks\n");
        }
 
-       ret = snd_soc_register_component(&pdev->dev, &mxs_saif_component,
-                                        &mxs_saif_dai, 1);
+       ret = devm_snd_soc_register_component(&pdev->dev, &mxs_saif_component,
+                                             &mxs_saif_dai, 1);
        if (ret) {
                dev_err(&pdev->dev, "register DAI failed\n");
                return ret;
@@ -778,21 +798,15 @@ static int mxs_saif_probe(struct platform_device *pdev)
        ret = mxs_pcm_platform_register(&pdev->dev);
        if (ret) {
                dev_err(&pdev->dev, "register PCM failed: %d\n", ret);
-               goto failed_pdev_alloc;
+               return ret;
        }
 
        return 0;
-
-failed_pdev_alloc:
-       snd_soc_unregister_component(&pdev->dev);
-
-       return ret;
 }
 
 static int mxs_saif_remove(struct platform_device *pdev)
 {
        mxs_pcm_platform_unregister(&pdev->dev);
-       snd_soc_unregister_component(&pdev->dev);
 
        return 0;
 }
index 53eaa4bf0e2726b04c593d9c083895aece2becd6..fbaf7badfdfb03b7d26eb84fd5c1be5850d06d1a 100644 (file)
@@ -124,6 +124,11 @@ struct mxs_saif {
 
        u32 fifo_underrun;
        u32 fifo_overrun;
+
+       enum {
+               MXS_SAIF_STATE_STOPPED,
+               MXS_SAIF_STATE_RUNNING,
+       } state;
 };
 
 extern int mxs_saif_put_mclk(unsigned int saif_id);
index 4bb273786ff336aef16c08122915520f69467ef6..61822cc53bd3e9b53b63042dd6ea986b681a7d1b 100644 (file)
@@ -122,14 +122,12 @@ static struct snd_soc_card mxs_sgtl5000 = {
        .num_links      = ARRAY_SIZE(mxs_sgtl5000_dai),
 };
 
-static int mxs_sgtl5000_probe_dt(struct platform_device *pdev)
+static int mxs_sgtl5000_probe(struct platform_device *pdev)
 {
+       struct snd_soc_card *card = &mxs_sgtl5000;
+       int ret, i;
        struct device_node *np = pdev->dev.of_node;
        struct device_node *saif_np[2], *codec_np;
-       int i;
-
-       if (!np)
-               return 1; /* no device tree */
 
        saif_np[0] = of_parse_phandle(np, "saif-controllers", 0);
        saif_np[1] = of_parse_phandle(np, "saif-controllers", 1);
@@ -152,18 +150,6 @@ static int mxs_sgtl5000_probe_dt(struct platform_device *pdev)
        of_node_put(saif_np[0]);
        of_node_put(saif_np[1]);
 
-       return 0;
-}
-
-static int mxs_sgtl5000_probe(struct platform_device *pdev)
-{
-       struct snd_soc_card *card = &mxs_sgtl5000;
-       int ret;
-
-       ret = mxs_sgtl5000_probe_dt(pdev);
-       if (ret < 0)
-               return ret;
-
        /*
         * Set an init clock(11.28Mhz) for sgtl5000 initialization(i2c r/w).
         * The Sgtl5000 sysclk is derived from saif0 mclk and it's range
index c894ff0f25809c997197496e810cc52d0299d40a..f588ee45b4fdd610d21c316f469b81b687da40a9 100644 (file)
@@ -314,16 +314,15 @@ static void nuc900_dma_free_dma_buffers(struct snd_pcm *pcm)
        snd_pcm_lib_preallocate_free_for_all(pcm);
 }
 
-static u64 nuc900_pcm_dmamask = DMA_BIT_MASK(32);
 static int nuc900_dma_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
+       int ret;
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &nuc900_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
                card->dev, 4 * 1024, (4 * 1024) - 1);
index daa78a0095facf5ff35d1eb0b6ca61836b7c9210..4a07f7179690d36e3526cb8bcf9c4fc20e2ca509 100644 (file)
@@ -1,6 +1,6 @@
 config SND_OMAP_SOC
        tristate "SoC Audio for the Texas Instruments OMAP chips"
-       depends on (ARCH_OMAP && DMA_OMAP) || (ARCH_ARM && COMPILE_TEST)
+       depends on (ARCH_OMAP && DMA_OMAP) || (ARM && COMPILE_TEST)
        select SND_DMAENGINE_PCM
 
 config SND_OMAP_SOC_DMIC
@@ -26,7 +26,7 @@ config SND_OMAP_SOC_N810
 
 config SND_OMAP_SOC_RX51
        tristate "SoC Audio support for Nokia RX-51"
-       depends on SND_OMAP_SOC && ARCH_ARM && (MACH_NOKIA_RX51 || COMPILE_TEST)
+       depends on SND_OMAP_SOC && ARM && (MACH_NOKIA_RX51 || COMPILE_TEST)
        select SND_OMAP_SOC_MCBSP
        select SND_SOC_TLV320AIC3X
        select SND_SOC_TPA6130A2
index 90d2a7cd2563d06170d450b5bb6a9cc8a6615210..cd9ee167959dcc0b34512ffdf32d26208d506c31 100644 (file)
@@ -490,14 +490,9 @@ static int asoc_mcpdm_probe(struct platform_device *pdev)
 
        mcpdm->dev = &pdev->dev;
 
-       return snd_soc_register_component(&pdev->dev, &omap_mcpdm_component,
-                                         &omap_mcpdm_dai, 1);
-}
-
-static int asoc_mcpdm_remove(struct platform_device *pdev)
-{
-       snd_soc_unregister_component(&pdev->dev);
-       return 0;
+       return devm_snd_soc_register_component(&pdev->dev,
+                                              &omap_mcpdm_component,
+                                              &omap_mcpdm_dai, 1);
 }
 
 static const struct of_device_id omap_mcpdm_of_match[] = {
@@ -514,7 +509,6 @@ static struct platform_driver asoc_mcpdm_driver = {
        },
 
        .probe  = asoc_mcpdm_probe,
-       .remove = asoc_mcpdm_remove,
 };
 
 module_platform_driver(asoc_mcpdm_driver);
index a11405de86e82bda8801e837ec48183093bd99db..b8fa9862e54c4a6c6d99c8d3b88e8204d3cd9d63 100644 (file)
@@ -156,8 +156,6 @@ static struct snd_pcm_ops omap_pcm_ops = {
        .mmap           = omap_pcm_mmap,
 };
 
-static u64 omap_pcm_dmamask = DMA_BIT_MASK(64);
-
 static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
        int stream)
 {
@@ -202,12 +200,11 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       int ret = 0;
+       int ret;
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &omap_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(64);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(64));
+       if (ret)
+               return ret;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = omap_pcm_preallocate_dma_buffer(pcm,
index 2a9324f794d8468172637ff07d28537a4a4cdd11..6a8d6b5f160debac623f79ad72b91dd5c77b1550 100644 (file)
@@ -338,9 +338,9 @@ static int omap_twl4030_probe(struct platform_device *pdev)
        }
 
        snd_soc_card_set_drvdata(card, priv);
-       ret = snd_soc_register_card(card);
+       ret = devm_snd_soc_register_card(&pdev->dev, card);
        if (ret) {
-               dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
+               dev_err(&pdev->dev, "devm_snd_soc_register_card() failed: %d\n",
                        ret);
                return ret;
        }
@@ -357,7 +357,6 @@ static int omap_twl4030_remove(struct platform_device *pdev)
                snd_soc_jack_free_gpios(&priv->hs_jack,
                                        ARRAY_SIZE(hs_jack_gpios),
                                        hs_jack_gpios);
-       snd_soc_unregister_card(card);
 
        return 0;
 }
index 5b7d969f89a96eebd418687ef5b7abc79d74ee97..08acdc236bf8479259b33068716c21ac5aace275 100644 (file)
@@ -163,6 +163,7 @@ static struct platform_driver mmp_driver = {
        .driver         = {
                .name   = "brownstone-audio",
                .owner  = THIS_MODULE,
+               .pm     = &snd_soc_pm_ops,
        },
        .probe          = brownstone_probe,
        .remove         = brownstone_remove,
index f4cce1e8011242364807ebe21dc15b74acb1e3c7..1853d41034bfa0c17d752f6191e4346e1cc06725 100644 (file)
@@ -329,6 +329,7 @@ static struct platform_driver corgi_driver = {
        .driver         = {
                .name   = "corgi-audio",
                .owner  = THIS_MODULE,
+               .pm     = &snd_soc_pm_ops,
        },
        .probe          = corgi_probe,
        .remove         = corgi_remove,
index 70d799b13f0d3fed2464c61046eb3e7439ebd8e4..44b5c09d296be62889649c5157dd098d12013deb 100644 (file)
@@ -178,6 +178,7 @@ static struct platform_driver e740_driver = {
        .driver         = {
                .name   = "e740-audio",
                .owner  = THIS_MODULE,
+               .pm     = &snd_soc_pm_ops,
        },
        .probe          = e740_probe,
        .remove         = e740_remove,
index f94d2ab51351b63932ff6e62d434cb08671dcc54..c34e447eb9911202ca5b6fbcaa78704f1103468a 100644 (file)
@@ -160,6 +160,7 @@ static struct platform_driver e750_driver = {
        .driver         = {
                .name   = "e750-audio",
                .owner  = THIS_MODULE,
+               .pm     = &snd_soc_pm_ops,
        },
        .probe          = e750_probe,
        .remove         = e750_remove,
index 8768a640dd71953a0c78b43d0df539d0b5c6ad18..3137f800b43f139a2c41adcfb3edc7981aae347a 100644 (file)
@@ -150,6 +150,7 @@ static struct platform_driver e800_driver = {
        .driver         = {
                .name   = "e800-audio",
                .owner  = THIS_MODULE,
+               .pm     = &snd_soc_pm_ops,
        },
        .probe          = e800_probe,
        .remove         = e800_remove,
index eef1f7b7b38e27c9ffcfb1b340df0f66d3173275..fd2f4eda1fd3f88d3945168f26fc6f5c57616506 100644 (file)
@@ -91,6 +91,7 @@ static struct platform_driver imote2_driver = {
        .driver         = {
                .name   = "imote2-audio",
                .owner  = THIS_MODULE,
+               .pm     = &snd_soc_pm_ops,
        },
        .probe          = imote2_probe,
        .remove         = imote2_remove,
index bbea7780eac640630803a024d3d0670b01c3c1eb..160c5245448f1e32303b240e47ab1e4616c25325 100644 (file)
@@ -215,6 +215,7 @@ static struct platform_driver mioa701_wm9713_driver = {
        .driver         = {
                .name           = "mioa701-wm9713",
                .owner          = THIS_MODULE,
+               .pm     = &snd_soc_pm_ops,
        },
 };
 
index 41752a5fe3b07148c5402b2ea66d6f292fdc3133..5bf5f1f7cac5013f461657e0487687ce597a485b 100644 (file)
@@ -455,8 +455,8 @@ static int asoc_mmp_sspa_probe(struct platform_device *pdev)
        priv->dai_fmt = (unsigned int) -1;
        platform_set_drvdata(pdev, priv);
 
-       return snd_soc_register_component(&pdev->dev, &mmp_sspa_component,
-                                         &mmp_sspa_dai, 1);
+       return devm_snd_soc_register_component(&pdev->dev, &mmp_sspa_component,
+                                              &mmp_sspa_dai, 1);
 }
 
 static int asoc_mmp_sspa_remove(struct platform_device *pdev)
@@ -466,7 +466,6 @@ static int asoc_mmp_sspa_remove(struct platform_device *pdev)
        clk_disable(priv->audio_clk);
        clk_put(priv->audio_clk);
        clk_put(priv->sysclk);
-       snd_soc_unregister_component(&pdev->dev);
        return 0;
 }
 
index e1ffcdd9a6492678c604ab045a7a981cae562971..3284c4b901cbfa740e75d633ddb1580698fdafb5 100644 (file)
@@ -181,6 +181,7 @@ static struct platform_driver palm27x_wm9712_driver = {
        .driver         = {
                .name           = "palm27x-asoc",
                .owner          = THIS_MODULE,
+               .pm     = &snd_soc_pm_ops,
        },
 };
 
index fafe46355c316b20df0f1e3fb114cae8d4cae9e2..c93e138d8dc3403f538c193d2bc1fc52794d9ad6 100644 (file)
@@ -303,6 +303,7 @@ static struct platform_driver poodle_driver = {
        .driver         = {
                .name   = "poodle-audio",
                .owner  = THIS_MODULE,
+               .pm     = &snd_soc_pm_ops,
        },
        .probe          = poodle_probe,
        .remove         = poodle_remove,
index f1059d999de6d4128791861d2325cd8bf68371dd..ae956e3f4b9dbfb0aea905e74b4c06cc25217291 100644 (file)
@@ -89,33 +89,6 @@ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_mic_mono_in = {
        .filter_data    = &pxa2xx_ac97_pcm_aux_mic_mono_req,
 };
 
-#ifdef CONFIG_PM
-static int pxa2xx_ac97_suspend(struct snd_soc_dai *dai)
-{
-       return pxa2xx_ac97_hw_suspend();
-}
-
-static int pxa2xx_ac97_resume(struct snd_soc_dai *dai)
-{
-       return pxa2xx_ac97_hw_resume();
-}
-
-#else
-#define pxa2xx_ac97_suspend    NULL
-#define pxa2xx_ac97_resume     NULL
-#endif
-
-static int pxa2xx_ac97_probe(struct snd_soc_dai *dai)
-{
-       return pxa2xx_ac97_hw_probe(to_platform_device(dai->dev));
-}
-
-static int pxa2xx_ac97_remove(struct snd_soc_dai *dai)
-{
-       pxa2xx_ac97_hw_remove(to_platform_device(dai->dev));
-       return 0;
-}
-
 static int pxa2xx_ac97_hw_params(struct snd_pcm_substream *substream,
                                 struct snd_pcm_hw_params *params,
                                 struct snd_soc_dai *cpu_dai)
@@ -185,10 +158,6 @@ static struct snd_soc_dai_driver pxa_ac97_dai_driver[] = {
 {
        .name = "pxa2xx-ac97",
        .ac97_control = 1,
-       .probe = pxa2xx_ac97_probe,
-       .remove = pxa2xx_ac97_remove,
-       .suspend = pxa2xx_ac97_suspend,
-       .resume = pxa2xx_ac97_resume,
        .playback = {
                .stream_name = "AC97 Playback",
                .channels_min = 2,
@@ -246,6 +215,12 @@ static int pxa2xx_ac97_dev_probe(struct platform_device *pdev)
                return -ENXIO;
        }
 
+       ret = pxa2xx_ac97_hw_probe(pdev);
+       if (ret) {
+               dev_err(&pdev->dev, "PXA2xx AC97 hw probe error (%d)\n", ret);
+               return ret;
+       }
+
        ret = snd_soc_set_ac97_ops(&pxa2xx_ac97_ops);
        if (ret != 0)
                return ret;
@@ -262,15 +237,34 @@ static int pxa2xx_ac97_dev_remove(struct platform_device *pdev)
 {
        snd_soc_unregister_component(&pdev->dev);
        snd_soc_set_ac97_ops(NULL);
+       pxa2xx_ac97_hw_remove(pdev);
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int pxa2xx_ac97_dev_suspend(struct device *dev)
+{
+       return pxa2xx_ac97_hw_suspend();
+}
+
+static int pxa2xx_ac97_dev_resume(struct device *dev)
+{
+       return pxa2xx_ac97_hw_resume();
+}
+
+static SIMPLE_DEV_PM_OPS(pxa2xx_ac97_pm_ops,
+               pxa2xx_ac97_dev_suspend, pxa2xx_ac97_dev_resume);
+#endif
+
 static struct platform_driver pxa2xx_ac97_driver = {
        .probe          = pxa2xx_ac97_dev_probe,
        .remove         = pxa2xx_ac97_dev_remove,
        .driver         = {
                .name   = "pxa2xx-ac97",
                .owner  = THIS_MODULE,
+#ifdef CONFIG_PM_SLEEP
+               .pm     = &pxa2xx_ac97_pm_ops,
+#endif
        },
 };
 
index 806da27b8b671ed8596484856ec59f8092c75e8d..d58b09f4f7a426dcba4e7a82c26434161dbe124e 100644 (file)
@@ -87,18 +87,15 @@ static struct snd_pcm_ops pxa2xx_pcm_ops = {
        .mmap           = pxa2xx_pcm_mmap,
 };
 
-static u64 pxa2xx_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       int ret = 0;
+       int ret;
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &pxa2xx_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = pxa2xx_pcm_preallocate_dma_buffer(pcm,
index a3fe19123f07218bbd8af8bb7bd1cf6a26af8067..1d9c2ed223bc089b233b748485c7b5a2e387ee49 100644 (file)
@@ -275,6 +275,7 @@ static struct platform_driver tosa_driver = {
        .driver         = {
                .name   = "tosa-audio",
                .owner  = THIS_MODULE,
+               .pm     = &snd_soc_pm_ops,
        },
        .probe          = tosa_probe,
        .remove         = tosa_remove,
index 13c9ee0cb83b988b06cb9b84963128b1cd703032..0b535b570622d3ad1fcffef4a6be13753f817385 100644 (file)
@@ -160,6 +160,7 @@ static struct platform_driver ttc_dkb_driver = {
        .driver         = {
                .name   = "ttc-dkb-audio",
                .owner  = THIS_MODULE,
+               .pm     = &snd_soc_pm_ops,
        },
        .probe          = ttc_dkb_probe,
        .remove         = ttc_dkb_remove,
index d0740a762963d867bf75cce29d20c15c0a4b5097..283620a97fe7b597d2670bd8c985610f5d1161ba 100644 (file)
@@ -444,8 +444,6 @@ static void s6000_pcm_free(struct snd_pcm *pcm)
        snd_pcm_lib_preallocate_free_for_all(pcm);
 }
 
-static u64 s6000_pcm_dmamask = DMA_BIT_MASK(32);
-
 static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime)
 {
        struct snd_card *card = runtime->card->snd_card;
@@ -456,10 +454,9 @@ static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime)
        params = snd_soc_dai_get_dma_data(runtime->cpu_dai,
                        pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream);
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &s6000_pcm_dmamask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       res = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (res)
+               return res;
 
        if (params->dma_in) {
                s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_in),
index 29e246803626baf1ba32f066226e1902cb7e1a86..84f5d8b766791748ca065678d828e767b27f0eb3 100644 (file)
@@ -356,6 +356,7 @@ static struct snd_soc_dapm_widget bells_widgets[] = {
 
 static struct snd_soc_dapm_route bells_routes[] = {
        { "Sub CLK_SYS", NULL, "OPCLK" },
+       { "CLKIN", NULL, "OPCLK" },
 
        { "DMIC", NULL, "MICBIAS2" },
        { "IN2L", NULL, "DMIC" },
index 9338d11e92168c222eae4630228f711bd9cf803c..fe2748b494d4cd38c6db6ae4409d38f56819ff3f 100644 (file)
@@ -406,20 +406,17 @@ static void dma_free_dma_buffers(struct snd_pcm *pcm)
        }
 }
 
-static u64 dma_mask = DMA_BIT_MASK(32);
-
 static int dma_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       int ret = 0;
+       int ret;
 
        pr_debug("Entered %s\n", __func__);
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &dma_mask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = preallocate_dma_buffer(pcm,
index b302f3b7a587f3e480586419affc8e300c80ad8d..2c4d2505a19e3cce79a36bcb9b52d31df9df067d 100644 (file)
@@ -702,13 +702,6 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
        }
        writel(mod, i2s->addr + I2SMOD);
 
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-               snd_soc_dai_set_dma_data(dai, substream,
-                       (void *)&i2s->dma_playback);
-       else
-               snd_soc_dai_set_dma_data(dai, substream,
-                       (void *)&i2s->dma_capture);
-
        i2s->frmclk = params_rate(params);
 
        return 0;
@@ -970,6 +963,8 @@ static int samsung_i2s_dai_probe(struct snd_soc_dai *dai)
        }
        clk_prepare_enable(i2s->clk);
 
+       snd_soc_dai_init_dma_data(dai, &i2s->dma_playback, &i2s->dma_capture);
+
        if (other) {
                other->addr = i2s->addr;
                other->clk = i2s->clk;
@@ -1060,7 +1055,7 @@ static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
        i2s->i2s_dai_drv.ops = &samsung_i2s_dai_ops;
        i2s->i2s_dai_drv.suspend = i2s_suspend;
        i2s->i2s_dai_drv.resume = i2s_resume;
-       i2s->i2s_dai_drv.playback.channels_min = 2;
+       i2s->i2s_dai_drv.playback.channels_min = 1;
        i2s->i2s_dai_drv.playback.channels_max = 2;
        i2s->i2s_dai_drv.playback.rates = SAMSUNG_I2S_RATES;
        i2s->i2s_dai_drv.playback.formats = SAMSUNG_I2S_FMTS;
@@ -1143,9 +1138,9 @@ static int samsung_i2s_probe(struct platform_device *pdev)
                        dev_err(&pdev->dev, "Unable to get drvdata\n");
                        return -EFAULT;
                }
-               snd_soc_register_component(&sec_dai->pdev->dev,
-                                          &samsung_i2s_component,
-                                          &sec_dai->i2s_dai_drv, 1);
+               devm_snd_soc_register_component(&sec_dai->pdev->dev,
+                                               &samsung_i2s_component,
+                                               &sec_dai->i2s_dai_drv, 1);
                samsung_asoc_dma_platform_register(&pdev->dev);
                return 0;
        }
@@ -1258,8 +1253,9 @@ static int samsung_i2s_probe(struct platform_device *pdev)
                goto err;
        }
 
-       snd_soc_register_component(&pri_dai->pdev->dev, &samsung_i2s_component,
-                                  &pri_dai->i2s_dai_drv, 1);
+       devm_snd_soc_register_component(&pri_dai->pdev->dev,
+                                       &samsung_i2s_component,
+                                       &pri_dai->i2s_dai_drv, 1);
 
        pm_runtime_enable(&pdev->dev);
 
@@ -1294,7 +1290,6 @@ static int samsung_i2s_remove(struct platform_device *pdev)
        i2s->sec_dai = NULL;
 
        samsung_asoc_dma_platform_unregister(&pdev->dev);
-       snd_soc_unregister_component(&pdev->dev);
 
        return 0;
 }
index ce1e1e16f250affafbc333d165c2843e043bfa23..e4f318fc2f82bb048f5047a6773f1a2c8be40902 100644 (file)
@@ -383,18 +383,15 @@ static int preallocate_idma_buffer(struct snd_pcm *pcm, int stream)
        return 0;
 }
 
-static u64 idma_mask = DMA_BIT_MASK(32);
-
 static int idma_new(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_card *card = rtd->card->snd_card;
        struct snd_pcm *pcm = rtd->pcm;
-       int ret = 0;
+       int ret;
 
-       if (!card->dev->dma_mask)
-               card->dev->dma_mask = &idma_mask;
-       if (!card->dev->coherent_dma_mask)
-               card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+       ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
 
        if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) {
                ret = preallocate_idma_buffer(pcm,
index 5fd7a05a9b9e2fe9730a962bcdca73482a9e5d71..b072bd107b3150f8ca7e013e5190bfc285d78c4e 100644 (file)
@@ -9,6 +9,7 @@
 
 #include "../codecs/wm8994.h"
 #include <sound/pcm_params.h>
+#include <sound/soc.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -193,7 +194,7 @@ static int smdk_audio_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, board);
 
-       ret = snd_soc_register_card(card);
+       ret = devm_snd_soc_register_card(&pdev->dev, card);
 
        if (ret)
                dev_err(&pdev->dev, "snd_soc_register_card() failed:%d\n", ret);
@@ -201,23 +202,14 @@ static int smdk_audio_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int smdk_audio_remove(struct platform_device *pdev)
-{
-       struct snd_soc_card *card = platform_get_drvdata(pdev);
-
-       snd_soc_unregister_card(card);
-
-       return 0;
-}
-
 static struct platform_driver smdk_audio_driver = {
        .driver         = {
                .name   = "smdk-audio-wm8894",
                .owner  = THIS_MODULE,
                .of_match_table = of_match_ptr(samsung_wm8994_of_match),
+               .pm     = &snd_soc_pm_ops,
        },
        .probe          = smdk_audio_probe,
-       .remove         = smdk_audio_remove,
 };
 
 module_platform_driver(smdk_audio_driver);
index d80deb7ccf13f7395ef79d78b2e51704a2844e4b..9430097979a580c972a989738bdeb9ada3557e5e 100644 (file)
@@ -8,7 +8,6 @@
  * for more details.
  */
 #include <linux/sh_clk.h>
-#include <mach/clock.h>
 #include "rsnd.h"
 
 #define CLKA   0
@@ -22,6 +21,7 @@ struct rsnd_adg {
 
        int rate_of_441khz_div_6;
        int rate_of_48khz_div_6;
+       u32 ckr;
 };
 
 #define for_each_rsnd_clk(pos, adg, i)         \
@@ -116,6 +116,11 @@ int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *mod, unsigned int rate)
 
 found_clock:
 
+       /* see rsnd_adg_ssi_clk_init() */
+       rsnd_mod_bset(mod, SSICKR, 0x00FF0000, adg->ckr);
+       rsnd_mod_write(mod, BRRA,  0x00000002); /* 1/6 */
+       rsnd_mod_write(mod, BRRB,  0x00000002); /* 1/6 */
+
        /*
         * This "mod" = "ssi" here.
         * we can get "ssi id" from mod
@@ -182,9 +187,7 @@ static void rsnd_adg_ssi_clk_init(struct rsnd_priv *priv, struct rsnd_adg *adg)
                }
        }
 
-       rsnd_priv_bset(priv, SSICKR, 0x00FF0000, ckr);
-       rsnd_priv_write(priv, BRRA,  0x00000002); /* 1/6 */
-       rsnd_priv_write(priv, BRRB,  0x00000002); /* 1/6 */
+       adg->ckr = ckr;
 }
 
 int rsnd_adg_probe(struct platform_device *pdev,
index a35706028514056e162afa491504d3864c6fa5c7..b234ed663073e76068830d56a806567856e4f62d 100644 (file)
  *     rsnd_platform functions
  */
 #define rsnd_platform_call(priv, dai, func, param...)  \
-       (!(priv->info->func) ? -ENODEV :                \
+       (!(priv->info->func) ? 0 :              \
         priv->info->func(param))
 
-
-/*
- *     basic function
- */
-u32 rsnd_read(struct rsnd_priv *priv,
-             struct rsnd_mod *mod, enum rsnd_reg reg)
-{
-       void __iomem *base = rsnd_gen_reg_get(priv, mod, reg);
-
-       BUG_ON(!base);
-
-       return ioread32(base);
-}
-
-void rsnd_write(struct rsnd_priv *priv,
-               struct rsnd_mod *mod,
-               enum rsnd_reg reg, u32 data)
-{
-       void __iomem *base = rsnd_gen_reg_get(priv, mod, reg);
-       struct device *dev = rsnd_priv_to_dev(priv);
-
-       BUG_ON(!base);
-
-       dev_dbg(dev, "w %p : %08x\n", base, data);
-
-       iowrite32(data, base);
-}
-
-void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod,
-              enum rsnd_reg reg, u32 mask, u32 data)
-{
-       void __iomem *base = rsnd_gen_reg_get(priv, mod, reg);
-       struct device *dev = rsnd_priv_to_dev(priv);
-       u32 val;
-
-       BUG_ON(!base);
-
-       val = ioread32(base);
-       val &= ~mask;
-       val |= data & mask;
-       iowrite32(val, base);
-
-       dev_dbg(dev, "s %p : %08x\n", base, val);
-}
-
 /*
  *     rsnd_mod functions
  */
@@ -363,6 +318,9 @@ int rsnd_dai_id(struct rsnd_priv *priv, struct rsnd_dai *rdai)
 
 struct rsnd_dai *rsnd_dai_get(struct rsnd_priv *priv, int id)
 {
+       if ((id < 0) || (id >= rsnd_dai_nr(priv)))
+               return NULL;
+
        return priv->rdai + id;
 }
 
index babb203b43b7bca938451d23fa3a5db99b542409..61212ee97c28269a0cd245e424636c4f38efc4c7 100644 (file)
 #include "rsnd.h"
 
 struct rsnd_gen_ops {
+       int (*probe)(struct platform_device *pdev,
+                    struct rcar_snd_info *info,
+                    struct rsnd_priv *priv);
+       void (*remove)(struct platform_device *pdev,
+                     struct rsnd_priv *priv);
        int (*path_init)(struct rsnd_priv *priv,
                         struct rsnd_dai *rdai,
                         struct rsnd_dai_stream *io);
@@ -19,21 +24,97 @@ struct rsnd_gen_ops {
                         struct rsnd_dai_stream *io);
 };
 
-struct rsnd_gen_reg_map {
-       int index;      /* -1 : not supported */
-       u32 offset_id;  /* offset of ssi0, ssi1, ssi2... */
-       u32 offset_adr; /* offset of SSICR, SSISR, ... */
-};
-
 struct rsnd_gen {
        void __iomem *base[RSND_BASE_MAX];
 
-       struct rsnd_gen_reg_map reg_map[RSND_REG_MAX];
        struct rsnd_gen_ops *ops;
+
+       struct regmap *regmap;
+       struct regmap_field *regs[RSND_REG_MAX];
 };
 
 #define rsnd_priv_to_gen(p)    ((struct rsnd_gen *)(p)->gen)
 
+#define RSND_REG_SET(gen, id, reg_id, offset, _id_offset, _id_size)    \
+       [id] = {                                                        \
+               .reg = (unsigned int)gen->base[reg_id] + offset,        \
+               .lsb = 0,                                               \
+               .msb = 31,                                              \
+               .id_size = _id_size,                                    \
+               .id_offset = _id_offset,                                \
+       }
+
+/*
+ *             basic function
+ */
+static int rsnd_regmap_write32(void *context, const void *_data, size_t count)
+{
+       struct rsnd_priv *priv = context;
+       struct device *dev = rsnd_priv_to_dev(priv);
+       u32 *data = (u32 *)_data;
+       u32 val = data[1];
+       void __iomem *reg = (void *)data[0];
+
+       iowrite32(val, reg);
+
+       dev_dbg(dev, "w %p : %08x\n", reg, val);
+
+       return 0;
+}
+
+static int rsnd_regmap_read32(void *context,
+                             const void *_data, size_t reg_size,
+                             void *_val, size_t val_size)
+{
+       struct rsnd_priv *priv = context;
+       struct device *dev = rsnd_priv_to_dev(priv);
+       u32 *data = (u32 *)_data;
+       u32 *val = (u32 *)_val;
+       void __iomem *reg = (void *)data[0];
+
+       *val = ioread32(reg);
+
+       dev_dbg(dev, "r %p : %08x\n", reg, *val);
+
+       return 0;
+}
+
+static struct regmap_bus rsnd_regmap_bus = {
+       .write                          = rsnd_regmap_write32,
+       .read                           = rsnd_regmap_read32,
+       .reg_format_endian_default      = REGMAP_ENDIAN_NATIVE,
+       .val_format_endian_default      = REGMAP_ENDIAN_NATIVE,
+};
+
+u32 rsnd_read(struct rsnd_priv *priv,
+             struct rsnd_mod *mod, enum rsnd_reg reg)
+{
+       struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+       u32 val;
+
+       regmap_fields_read(gen->regs[reg], rsnd_mod_id(mod), &val);
+
+       return val;
+}
+
+void rsnd_write(struct rsnd_priv *priv,
+               struct rsnd_mod *mod,
+               enum rsnd_reg reg, u32 data)
+{
+       struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+
+       regmap_fields_write(gen->regs[reg], rsnd_mod_id(mod), data);
+}
+
+void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod,
+              enum rsnd_reg reg, u32 mask, u32 data)
+{
+       struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+
+       regmap_fields_update_bits(gen->regs[reg], rsnd_mod_id(mod),
+                                 mask, data);
+}
+
 /*
  *             Gen2
  *             will be filled in the future
@@ -98,44 +179,64 @@ static int rsnd_gen1_path_exit(struct rsnd_priv *priv,
        return ret;
 }
 
-static struct rsnd_gen_ops rsnd_gen1_ops = {
-       .path_init      = rsnd_gen1_path_init,
-       .path_exit      = rsnd_gen1_path_exit,
-};
+/* single address mapping */
+#define RSND_GEN1_S_REG(gen, reg, id, offset)  \
+       RSND_REG_SET(gen, RSND_REG_##id, RSND_GEN1_##reg, offset, 0, 9)
 
-#define RSND_GEN1_REG_MAP(g, s, i, oi, oa)                             \
-       do {                                                            \
-               (g)->reg_map[RSND_REG_##i].index  = RSND_GEN1_##s;      \
-               (g)->reg_map[RSND_REG_##i].offset_id = oi;              \
-               (g)->reg_map[RSND_REG_##i].offset_adr = oa;             \
-       } while (0)
+/* multi address mapping */
+#define RSND_GEN1_M_REG(gen, reg, id, offset, _id_offset)      \
+       RSND_REG_SET(gen, RSND_REG_##id, RSND_GEN1_##reg, offset, _id_offset, 9)
 
-static void rsnd_gen1_reg_map_init(struct rsnd_gen *gen)
+static int rsnd_gen1_regmap_init(struct rsnd_priv *priv, struct rsnd_gen *gen)
 {
-       RSND_GEN1_REG_MAP(gen, SRU,     SRC_ROUTE_SEL,  0x0,    0x00);
-       RSND_GEN1_REG_MAP(gen, SRU,     SRC_TMG_SEL0,   0x0,    0x08);
-       RSND_GEN1_REG_MAP(gen, SRU,     SRC_TMG_SEL1,   0x0,    0x0c);
-       RSND_GEN1_REG_MAP(gen, SRU,     SRC_TMG_SEL2,   0x0,    0x10);
-       RSND_GEN1_REG_MAP(gen, SRU,     SRC_CTRL,       0x0,    0xc0);
-       RSND_GEN1_REG_MAP(gen, SRU,     SSI_MODE0,      0x0,    0xD0);
-       RSND_GEN1_REG_MAP(gen, SRU,     SSI_MODE1,      0x0,    0xD4);
-       RSND_GEN1_REG_MAP(gen, SRU,     BUSIF_MODE,     0x4,    0x20);
-       RSND_GEN1_REG_MAP(gen, SRU,     BUSIF_ADINR,    0x40,   0x214);
-
-       RSND_GEN1_REG_MAP(gen, ADG,     BRRA,           0x0,    0x00);
-       RSND_GEN1_REG_MAP(gen, ADG,     BRRB,           0x0,    0x04);
-       RSND_GEN1_REG_MAP(gen, ADG,     SSICKR,         0x0,    0x08);
-       RSND_GEN1_REG_MAP(gen, ADG,     AUDIO_CLK_SEL0, 0x0,    0x0c);
-       RSND_GEN1_REG_MAP(gen, ADG,     AUDIO_CLK_SEL1, 0x0,    0x10);
-       RSND_GEN1_REG_MAP(gen, ADG,     AUDIO_CLK_SEL3, 0x0,    0x18);
-       RSND_GEN1_REG_MAP(gen, ADG,     AUDIO_CLK_SEL4, 0x0,    0x1c);
-       RSND_GEN1_REG_MAP(gen, ADG,     AUDIO_CLK_SEL5, 0x0,    0x20);
-
-       RSND_GEN1_REG_MAP(gen, SSI,     SSICR,          0x40,   0x00);
-       RSND_GEN1_REG_MAP(gen, SSI,     SSISR,          0x40,   0x04);
-       RSND_GEN1_REG_MAP(gen, SSI,     SSITDR,         0x40,   0x08);
-       RSND_GEN1_REG_MAP(gen, SSI,     SSIRDR,         0x40,   0x0c);
-       RSND_GEN1_REG_MAP(gen, SSI,     SSIWSR,         0x40,   0x20);
+       int i;
+       struct device *dev = rsnd_priv_to_dev(priv);
+       struct regmap_config regc;
+       struct reg_field regf[RSND_REG_MAX] = {
+               RSND_GEN1_S_REG(gen, SRU,       SRC_ROUTE_SEL,  0x00),
+               RSND_GEN1_S_REG(gen, SRU,       SRC_TMG_SEL0,   0x08),
+               RSND_GEN1_S_REG(gen, SRU,       SRC_TMG_SEL1,   0x0c),
+               RSND_GEN1_S_REG(gen, SRU,       SRC_TMG_SEL2,   0x10),
+               RSND_GEN1_S_REG(gen, SRU,       SRC_CTRL,       0xc0),
+               RSND_GEN1_S_REG(gen, SRU,       SSI_MODE0,      0xD0),
+               RSND_GEN1_S_REG(gen, SRU,       SSI_MODE1,      0xD4),
+               RSND_GEN1_M_REG(gen, SRU,       BUSIF_MODE,     0x20,   0x4),
+               RSND_GEN1_M_REG(gen, SRU,       BUSIF_ADINR,    0x214,  0x40),
+
+               RSND_GEN1_S_REG(gen, ADG,       BRRA,           0x00),
+               RSND_GEN1_S_REG(gen, ADG,       BRRB,           0x04),
+               RSND_GEN1_S_REG(gen, ADG,       SSICKR,         0x08),
+               RSND_GEN1_S_REG(gen, ADG,       AUDIO_CLK_SEL0, 0x0c),
+               RSND_GEN1_S_REG(gen, ADG,       AUDIO_CLK_SEL1, 0x10),
+               RSND_GEN1_S_REG(gen, ADG,       AUDIO_CLK_SEL3, 0x18),
+               RSND_GEN1_S_REG(gen, ADG,       AUDIO_CLK_SEL4, 0x1c),
+               RSND_GEN1_S_REG(gen, ADG,       AUDIO_CLK_SEL5, 0x20),
+
+               RSND_GEN1_M_REG(gen, SSI,       SSICR,          0x00,   0x40),
+               RSND_GEN1_M_REG(gen, SSI,       SSISR,          0x04,   0x40),
+               RSND_GEN1_M_REG(gen, SSI,       SSITDR,         0x08,   0x40),
+               RSND_GEN1_M_REG(gen, SSI,       SSIRDR,         0x0c,   0x40),
+               RSND_GEN1_M_REG(gen, SSI,       SSIWSR,         0x20,   0x40),
+       };
+
+       memset(&regc, 0, sizeof(regc));
+       regc.reg_bits = 32;
+       regc.val_bits = 32;
+
+       gen->regmap = devm_regmap_init(dev, &rsnd_regmap_bus, priv, &regc);
+       if (IS_ERR(gen->regmap)) {
+               dev_err(dev, "regmap error %ld\n", PTR_ERR(gen->regmap));
+               return PTR_ERR(gen->regmap);
+       }
+
+       for (i = 0; i < RSND_REG_MAX; i++) {
+               gen->regs[i] = devm_regmap_field_alloc(dev, gen->regmap, regf[i]);
+               if (IS_ERR(gen->regs[i]))
+                       return PTR_ERR(gen->regs[i]);
+
+       }
+
+       return 0;
 }
 
 static int rsnd_gen1_probe(struct platform_device *pdev,
@@ -147,6 +248,7 @@ static int rsnd_gen1_probe(struct platform_device *pdev,
        struct resource *sru_res;
        struct resource *adg_res;
        struct resource *ssi_res;
+       int ret;
 
        /*
         * map address
@@ -163,8 +265,9 @@ static int rsnd_gen1_probe(struct platform_device *pdev,
            IS_ERR(gen->base[RSND_GEN1_SSI]))
                return -ENODEV;
 
-       gen->ops = &rsnd_gen1_ops;
-       rsnd_gen1_reg_map_init(gen);
+       ret = rsnd_gen1_regmap_init(priv, gen);
+       if (ret < 0)
+               return ret;
 
        dev_dbg(dev, "Gen1 device probed\n");
        dev_dbg(dev, "SRU : %08x => %p\n",      sru_res->start,
@@ -183,6 +286,13 @@ static void rsnd_gen1_remove(struct platform_device *pdev,
 {
 }
 
+static struct rsnd_gen_ops rsnd_gen1_ops = {
+       .probe          = rsnd_gen1_probe,
+       .remove         = rsnd_gen1_remove,
+       .path_init      = rsnd_gen1_path_init,
+       .path_exit      = rsnd_gen1_path_exit,
+};
+
 /*
  *             Gen
  */
@@ -204,46 +314,12 @@ int rsnd_gen_path_exit(struct rsnd_priv *priv,
        return gen->ops->path_exit(priv, rdai, io);
 }
 
-void __iomem *rsnd_gen_reg_get(struct rsnd_priv *priv,
-                              struct rsnd_mod *mod,
-                              enum rsnd_reg reg)
-{
-       struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
-       struct device *dev = rsnd_priv_to_dev(priv);
-       int index;
-       u32 offset_id, offset_adr;
-
-       if (reg >= RSND_REG_MAX) {
-               dev_err(dev, "rsnd_reg reg error\n");
-               return NULL;
-       }
-
-       index           = gen->reg_map[reg].index;
-       offset_id       = gen->reg_map[reg].offset_id;
-       offset_adr      = gen->reg_map[reg].offset_adr;
-
-       if (index < 0) {
-               dev_err(dev, "unsupported reg access %d\n", reg);
-               return NULL;
-       }
-
-       if (offset_id && mod)
-               offset_id *= rsnd_mod_id(mod);
-
-       /*
-        * index/offset were set on gen1/gen2
-        */
-
-       return gen->base[index] + offset_id + offset_adr;
-}
-
 int rsnd_gen_probe(struct platform_device *pdev,
                   struct rcar_snd_info *info,
                   struct rsnd_priv *priv)
 {
        struct device *dev = rsnd_priv_to_dev(priv);
        struct rsnd_gen *gen;
-       int i;
 
        gen = devm_kzalloc(dev, sizeof(*gen), GFP_KERNEL);
        if (!gen) {
@@ -251,30 +327,23 @@ int rsnd_gen_probe(struct platform_device *pdev,
                return -ENOMEM;
        }
 
-       priv->gen = gen;
-
-       /*
-        * see
-        *      rsnd_reg_get()
-        *      rsnd_gen_probe()
-        */
-       for (i = 0; i < RSND_REG_MAX; i++)
-               gen->reg_map[i].index = -1;
-
-       /*
-        *      init each module
-        */
        if (rsnd_is_gen1(priv))
-               return rsnd_gen1_probe(pdev, info, priv);
+               gen->ops = &rsnd_gen1_ops;
 
-       dev_err(dev, "unknown generation R-Car sound device\n");
+       if (!gen->ops) {
+               dev_err(dev, "unknown generation R-Car sound device\n");
+               return -ENODEV;
+       }
 
-       return -ENODEV;
+       priv->gen = gen;
+
+       return gen->ops->probe(pdev, info, priv);
 }
 
 void rsnd_gen_remove(struct platform_device *pdev,
                     struct rsnd_priv *priv)
 {
-       if (rsnd_is_gen1(priv))
-               rsnd_gen1_remove(pdev, priv);
+       struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+
+       gen->ops->remove(pdev, priv);
 }
index 9cc6986a8cfb347465a9aa442b3b4e79a4f8d9f6..9e463e50e7e62b81fabeaeecf39f50de4206eeaf 100644 (file)
@@ -78,10 +78,6 @@ struct rsnd_dai_stream;
 #define rsnd_mod_bset(m, r, s, d) \
        rsnd_bset(rsnd_mod_to_priv(m), m, RSND_REG_##r, s, d)
 
-#define rsnd_priv_read(p, r)           rsnd_read(p, NULL, RSND_REG_##r)
-#define rsnd_priv_write(p, r, d)       rsnd_write(p, NULL, RSND_REG_##r, d)
-#define rsnd_priv_bset(p, r, s, d)     rsnd_bset(p, NULL, RSND_REG_##r, s, d)
-
 u32 rsnd_read(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg);
 void rsnd_write(struct rsnd_priv *priv, struct rsnd_mod *mod,
                enum rsnd_reg reg, u32 data);
@@ -220,8 +216,8 @@ int rsnd_gen_path_exit(struct rsnd_priv *priv,
 void __iomem *rsnd_gen_reg_get(struct rsnd_priv *priv,
                               struct rsnd_mod *mod,
                               enum rsnd_reg reg);
-#define rsnd_is_gen1(s)                ((s)->info->flags & RSND_GEN1)
-#define rsnd_is_gen2(s)                ((s)->info->flags & RSND_GEN2)
+#define rsnd_is_gen1(s)                (((s)->info->flags & RSND_GEN_MASK) == RSND_GEN1)
+#define rsnd_is_gen2(s)                (((s)->info->flags & RSND_GEN_MASK) == RSND_GEN2)
 
 /*
  *     R-Car ADG
@@ -285,6 +281,7 @@ int rsnd_scu_probe(struct platform_device *pdev,
 void rsnd_scu_remove(struct platform_device *pdev,
                     struct rsnd_priv *priv);
 struct rsnd_mod *rsnd_scu_mod_get(struct rsnd_priv *priv, int id);
+bool rsnd_scu_hpbif_is_enable(struct rsnd_mod *mod);
 #define rsnd_scu_nr(priv) ((priv)->scu_nr)
 
 /*
index 2df2e9150b893498ffbc15991b1a47e2f9dfd47a..1ab1bce6be7f644affa15b4855b3f2f398d125d9 100644 (file)
@@ -146,20 +146,26 @@ static int rsnd_scu_set_hpbif(struct rsnd_priv *priv,
        return 0;
 }
 
+bool rsnd_scu_hpbif_is_enable(struct rsnd_mod *mod)
+{
+       struct rsnd_scu *scu = rsnd_mod_to_scu(mod);
+       u32 flags = rsnd_scu_mode_flags(scu);
+
+       return !!(flags & RSND_SCU_USE_HPBIF);
+}
+
 static int rsnd_scu_start(struct rsnd_mod *mod,
                          struct rsnd_dai *rdai,
                          struct rsnd_dai_stream *io)
 {
        struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
-       struct rsnd_scu *scu = rsnd_mod_to_scu(mod);
        struct device *dev = rsnd_priv_to_dev(priv);
-       u32 flags = rsnd_scu_mode_flags(scu);
        int ret;
 
        /*
         * SCU will be used if it has RSND_SCU_USE_HPBIF flags
         */
-       if (!(flags & RSND_SCU_USE_HPBIF)) {
+       if (!rsnd_scu_hpbif_is_enable(mod)) {
                /* it use PIO transter */
                dev_dbg(dev, "%s%d is not used\n",
                        rsnd_mod_name(mod), rsnd_mod_id(mod));
index fae26d3f79d26bf827cc931bd16fe186800f777d..b71cf9d7dd3febe600465b40cdc5dcaaedb5b6fb 100644 (file)
@@ -101,29 +101,30 @@ struct rsnd_ssiu {
 #define rsnd_ssi_to_ssiu(ssi)\
        (((struct rsnd_ssiu *)((ssi) - rsnd_mod_id(&(ssi)->mod))) - 1)
 
-static void rsnd_ssi_mode_init(struct rsnd_priv *priv,
-                              struct rsnd_ssiu *ssiu)
+static void rsnd_ssi_mode_set(struct rsnd_priv *priv,
+                             struct rsnd_dai *rdai,
+                             struct rsnd_ssi *ssi)
 {
        struct device *dev = rsnd_priv_to_dev(priv);
-       struct rsnd_ssi *ssi;
+       struct rsnd_mod *scu;
+       struct rsnd_ssiu *ssiu = rsnd_ssi_to_ssiu(ssi);
+       int id = rsnd_mod_id(&ssi->mod);
        u32 flags;
        u32 val;
-       int i;
+
+       scu   = rsnd_scu_mod_get(priv, rsnd_mod_id(&ssi->mod));
 
        /*
         * SSI_MODE0
         */
-       ssiu->ssi_mode0 = 0;
-       for_each_rsnd_ssi(ssi, priv, i) {
-               flags = rsnd_ssi_mode_flags(ssi);
-
-               /* see also BUSIF_MODE */
-               if (!(flags & RSND_SSI_DEPENDENT)) {
-                       ssiu->ssi_mode0 |= (1 << i);
-                       dev_dbg(dev, "SSI%d uses INDEPENDENT mode\n", i);
-               } else {
-                       dev_dbg(dev, "SSI%d uses DEPENDENT mode\n", i);
-               }
+
+       /* see also BUSIF_MODE */
+       if (rsnd_scu_hpbif_is_enable(scu)) {
+               ssiu->ssi_mode0 &= ~(1 << id);
+               dev_dbg(dev, "SSI%d uses DEPENDENT mode\n", id);
+       } else {
+               ssiu->ssi_mode0 |= (1 << id);
+               dev_dbg(dev, "SSI%d uses INDEPENDENT mode\n", id);
        }
 
        /*
@@ -132,7 +133,7 @@ static void rsnd_ssi_mode_init(struct rsnd_priv *priv,
 #define ssi_parent_set(p, sync, adg, ext)              \
        do {                                            \
                ssi->parent = ssiu->ssi + p;            \
-               if (flags & RSND_SSI_CLK_FROM_ADG)      \
+               if (rsnd_rdai_is_clk_master(rdai))      \
                        val = adg;                      \
                else                                    \
                        val = ext;                      \
@@ -140,15 +141,11 @@ static void rsnd_ssi_mode_init(struct rsnd_priv *priv,
                        val |= sync;                    \
        } while (0)
 
-       ssiu->ssi_mode1 = 0;
-       for_each_rsnd_ssi(ssi, priv, i) {
-               flags = rsnd_ssi_mode_flags(ssi);
-
-               if (!(flags & RSND_SSI_CLK_PIN_SHARE))
-                       continue;
+       flags = rsnd_ssi_mode_flags(ssi);
+       if (flags & RSND_SSI_CLK_PIN_SHARE) {
 
                val = 0;
-               switch (i) {
+               switch (id) {
                case 1:
                        ssi_parent_set(0, (1 << 4), (0x2 << 0), (0x1 << 0));
                        break;
@@ -165,11 +162,6 @@ static void rsnd_ssi_mode_init(struct rsnd_priv *priv,
 
                ssiu->ssi_mode1 |= val;
        }
-}
-
-static void rsnd_ssi_mode_set(struct rsnd_ssi *ssi)
-{
-       struct rsnd_ssiu *ssiu = rsnd_ssi_to_ssiu(ssi);
 
        rsnd_mod_write(&ssi->mod, SSI_MODE0, ssiu->ssi_mode0);
        rsnd_mod_write(&ssi->mod, SSI_MODE1, ssiu->ssi_mode1);
@@ -379,7 +371,7 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
        ssi->cr_own     = cr;
        ssi->err        = -1; /* ignore 1st error */
 
-       rsnd_ssi_mode_set(ssi);
+       rsnd_ssi_mode_set(priv, rdai, ssi);
 
        dev_dbg(dev, "%s.%d init\n", rsnd_mod_name(mod), rsnd_mod_id(mod));
 
@@ -706,8 +698,6 @@ int rsnd_ssi_probe(struct platform_device *pdev,
                rsnd_mod_init(priv, &ssi->mod, ops, i);
        }
 
-       rsnd_ssi_mode_init(priv, ssiu);
-
        dev_dbg(dev, "ssi probed\n");
 
        return 0;
index e72f55428f0ba8ece44cdea8f6f720efc5c1d868..1b6663f45b3435c40fe751491c2c228121c62e00 100644 (file)
  *  option) any later version.
  */
 
-#include <linux/i2c.h>
-#include <linux/spi/spi.h>
 #include <sound/soc.h>
-#include <linux/bitmap.h>
-#include <linux/rbtree.h>
 #include <linux/export.h>
+#include <linux/slab.h>
 
 #include <trace/events/asoc.h>
 
@@ -66,126 +63,42 @@ static unsigned int snd_soc_get_cache_val(const void *base, unsigned int idx,
        return -1;
 }
 
-static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
+int snd_soc_cache_init(struct snd_soc_codec *codec)
 {
-       int i;
-       int ret;
-       const struct snd_soc_codec_driver *codec_drv;
-       unsigned int val;
+       const struct snd_soc_codec_driver *codec_drv = codec->driver;
+       size_t reg_size;
 
-       codec_drv = codec->driver;
-       for (i = 0; i < codec_drv->reg_cache_size; ++i) {
-               ret = snd_soc_cache_read(codec, i, &val);
-               if (ret)
-                       return ret;
-               if (codec->reg_def_copy)
-                       if (snd_soc_get_cache_val(codec->reg_def_copy,
-                                                 i, codec_drv->reg_word_size) == val)
-                               continue;
+       reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
 
-               WARN_ON(!snd_soc_codec_writable_register(codec, i));
-
-               ret = snd_soc_write(codec, i, val);
-               if (ret)
-                       return ret;
-               dev_dbg(codec->dev, "ASoC: Synced register %#x, value = %#x\n",
-                       i, val);
-       }
-       return 0;
-}
-
-static int snd_soc_flat_cache_write(struct snd_soc_codec *codec,
-                                   unsigned int reg, unsigned int value)
-{
-       snd_soc_set_cache_val(codec->reg_cache, reg, value,
-                             codec->driver->reg_word_size);
-       return 0;
-}
-
-static int snd_soc_flat_cache_read(struct snd_soc_codec *codec,
-                                  unsigned int reg, unsigned int *value)
-{
-       *value = snd_soc_get_cache_val(codec->reg_cache, reg,
-                                      codec->driver->reg_word_size);
-       return 0;
-}
+       mutex_init(&codec->cache_rw_mutex);
 
-static int snd_soc_flat_cache_exit(struct snd_soc_codec *codec)
-{
-       if (!codec->reg_cache)
-               return 0;
-       kfree(codec->reg_cache);
-       codec->reg_cache = NULL;
-       return 0;
-}
+       dev_dbg(codec->dev, "ASoC: Initializing cache for %s codec\n",
+                               codec->name);
 
-static int snd_soc_flat_cache_init(struct snd_soc_codec *codec)
-{
-       if (codec->reg_def_copy)
-               codec->reg_cache = kmemdup(codec->reg_def_copy,
-                                          codec->reg_size, GFP_KERNEL);
+       if (codec_drv->reg_cache_default)
+               codec->reg_cache = kmemdup(codec_drv->reg_cache_default,
+                                          reg_size, GFP_KERNEL);
        else
-               codec->reg_cache = kzalloc(codec->reg_size, GFP_KERNEL);
+               codec->reg_cache = kzalloc(reg_size, GFP_KERNEL);
        if (!codec->reg_cache)
                return -ENOMEM;
 
        return 0;
 }
 
-/* an array of all supported compression types */
-static const struct snd_soc_cache_ops cache_types[] = {
-       /* Flat *must* be the first entry for fallback */
-       {
-               .id = SND_SOC_FLAT_COMPRESSION,
-               .name = "flat",
-               .init = snd_soc_flat_cache_init,
-               .exit = snd_soc_flat_cache_exit,
-               .read = snd_soc_flat_cache_read,
-               .write = snd_soc_flat_cache_write,
-               .sync = snd_soc_flat_cache_sync
-       },
-};
-
-int snd_soc_cache_init(struct snd_soc_codec *codec)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(cache_types); ++i)
-               if (cache_types[i].id == codec->compress_type)
-                       break;
-
-       /* Fall back to flat compression */
-       if (i == ARRAY_SIZE(cache_types)) {
-               dev_warn(codec->dev, "ASoC: Could not match compress type: %d\n",
-                        codec->compress_type);
-               i = 0;
-       }
-
-       mutex_init(&codec->cache_rw_mutex);
-       codec->cache_ops = &cache_types[i];
-
-       if (codec->cache_ops->init) {
-               if (codec->cache_ops->name)
-                       dev_dbg(codec->dev, "ASoC: Initializing %s cache for %s codec\n",
-                               codec->cache_ops->name, codec->name);
-               return codec->cache_ops->init(codec);
-       }
-       return -ENOSYS;
-}
-
 /*
  * NOTE: keep in mind that this function might be called
  * multiple times.
  */
 int snd_soc_cache_exit(struct snd_soc_codec *codec)
 {
-       if (codec->cache_ops && codec->cache_ops->exit) {
-               if (codec->cache_ops->name)
-                       dev_dbg(codec->dev, "ASoC: Destroying %s cache for %s codec\n",
-                               codec->cache_ops->name, codec->name);
-               return codec->cache_ops->exit(codec);
-       }
-       return -ENOSYS;
+       dev_dbg(codec->dev, "ASoC: Destroying cache for %s codec\n",
+                       codec->name);
+       if (!codec->reg_cache)
+               return 0;
+       kfree(codec->reg_cache);
+       codec->reg_cache = NULL;
+       return 0;
 }
 
 /**
@@ -198,18 +111,15 @@ int snd_soc_cache_exit(struct snd_soc_codec *codec)
 int snd_soc_cache_read(struct snd_soc_codec *codec,
                       unsigned int reg, unsigned int *value)
 {
-       int ret;
+       if (!value)
+               return -EINVAL;
 
        mutex_lock(&codec->cache_rw_mutex);
-
-       if (value && codec->cache_ops && codec->cache_ops->read) {
-               ret = codec->cache_ops->read(codec, reg, value);
-               mutex_unlock(&codec->cache_rw_mutex);
-               return ret;
-       }
-
+       *value = snd_soc_get_cache_val(codec->reg_cache, reg,
+                                      codec->driver->reg_word_size);
        mutex_unlock(&codec->cache_rw_mutex);
-       return -ENOSYS;
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(snd_soc_cache_read);
 
@@ -223,20 +133,42 @@ EXPORT_SYMBOL_GPL(snd_soc_cache_read);
 int snd_soc_cache_write(struct snd_soc_codec *codec,
                        unsigned int reg, unsigned int value)
 {
+       mutex_lock(&codec->cache_rw_mutex);
+       snd_soc_set_cache_val(codec->reg_cache, reg, value,
+                             codec->driver->reg_word_size);
+       mutex_unlock(&codec->cache_rw_mutex);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_cache_write);
+
+static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
+{
+       int i;
        int ret;
+       const struct snd_soc_codec_driver *codec_drv;
+       unsigned int val;
 
-       mutex_lock(&codec->cache_rw_mutex);
+       codec_drv = codec->driver;
+       for (i = 0; i < codec_drv->reg_cache_size; ++i) {
+               ret = snd_soc_cache_read(codec, i, &val);
+               if (ret)
+                       return ret;
+               if (codec_drv->reg_cache_default)
+                       if (snd_soc_get_cache_val(codec_drv->reg_cache_default,
+                                                 i, codec_drv->reg_word_size) == val)
+                               continue;
 
-       if (codec->cache_ops && codec->cache_ops->write) {
-               ret = codec->cache_ops->write(codec, reg, value);
-               mutex_unlock(&codec->cache_rw_mutex);
-               return ret;
-       }
+               WARN_ON(!snd_soc_codec_writable_register(codec, i));
 
-       mutex_unlock(&codec->cache_rw_mutex);
-       return -ENOSYS;
+               ret = snd_soc_write(codec, i, val);
+               if (ret)
+                       return ret;
+               dev_dbg(codec->dev, "ASoC: Synced register %#x, value = %#x\n",
+                       i, val);
+       }
+       return 0;
 }
-EXPORT_SYMBOL_GPL(snd_soc_cache_write);
 
 /**
  * snd_soc_cache_sync: Sync the register cache with the hardware.
@@ -249,92 +181,19 @@ EXPORT_SYMBOL_GPL(snd_soc_cache_write);
  */
 int snd_soc_cache_sync(struct snd_soc_codec *codec)
 {
+       const char *name = "flat";
        int ret;
-       const char *name;
 
-       if (!codec->cache_sync) {
+       if (!codec->cache_sync)
                return 0;
-       }
-
-       if (!codec->cache_ops || !codec->cache_ops->sync)
-               return -ENOSYS;
 
-       if (codec->cache_ops->name)
-               name = codec->cache_ops->name;
-       else
-               name = "unknown";
-
-       if (codec->cache_ops->name)
-               dev_dbg(codec->dev, "ASoC: Syncing %s cache for %s codec\n",
-                       codec->cache_ops->name, codec->name);
+       dev_dbg(codec->dev, "ASoC: Syncing cache for %s codec\n",
+               codec->name);
        trace_snd_soc_cache_sync(codec, name, "start");
-       ret = codec->cache_ops->sync(codec);
+       ret = snd_soc_flat_cache_sync(codec);
        if (!ret)
                codec->cache_sync = 0;
        trace_snd_soc_cache_sync(codec, name, "end");
        return ret;
 }
 EXPORT_SYMBOL_GPL(snd_soc_cache_sync);
-
-static int snd_soc_get_reg_access_index(struct snd_soc_codec *codec,
-                                       unsigned int reg)
-{
-       const struct snd_soc_codec_driver *codec_drv;
-       unsigned int min, max, index;
-
-       codec_drv = codec->driver;
-       min = 0;
-       max = codec_drv->reg_access_size - 1;
-       do {
-               index = (min + max) / 2;
-               if (codec_drv->reg_access_default[index].reg == reg)
-                       return index;
-               if (codec_drv->reg_access_default[index].reg < reg)
-                       min = index + 1;
-               else
-                       max = index;
-       } while (min <= max);
-       return -1;
-}
-
-int snd_soc_default_volatile_register(struct snd_soc_codec *codec,
-                                     unsigned int reg)
-{
-       int index;
-
-       if (reg >= codec->driver->reg_cache_size)
-               return 1;
-       index = snd_soc_get_reg_access_index(codec, reg);
-       if (index < 0)
-               return 0;
-       return codec->driver->reg_access_default[index].vol;
-}
-EXPORT_SYMBOL_GPL(snd_soc_default_volatile_register);
-
-int snd_soc_default_readable_register(struct snd_soc_codec *codec,
-                                     unsigned int reg)
-{
-       int index;
-
-       if (reg >= codec->driver->reg_cache_size)
-               return 1;
-       index = snd_soc_get_reg_access_index(codec, reg);
-       if (index < 0)
-               return 0;
-       return codec->driver->reg_access_default[index].read;
-}
-EXPORT_SYMBOL_GPL(snd_soc_default_readable_register);
-
-int snd_soc_default_writable_register(struct snd_soc_codec *codec,
-                                     unsigned int reg)
-{
-       int index;
-
-       if (reg >= codec->driver->reg_cache_size)
-               return 1;
-       index = snd_soc_get_reg_access_index(codec, reg);
-       if (index < 0)
-               return 0;
-       return codec->driver->reg_access_default[index].write;
-}
-EXPORT_SYMBOL_GPL(snd_soc_default_writable_register);
index 1a38be0d0ca8fbf0724bbec4087569ee418d39a2..afc3fa8b747091bcf4b4590d71c05b605bb82710 100644 (file)
@@ -1589,17 +1589,13 @@ static void soc_remove_aux_dev(struct snd_soc_card *card, int num)
                soc_remove_codec(codec);
 }
 
-static int snd_soc_init_codec_cache(struct snd_soc_codec *codec,
-                                   enum snd_soc_compress_type compress_type)
+static int snd_soc_init_codec_cache(struct snd_soc_codec *codec)
 {
        int ret;
 
        if (codec->cache_init)
                return 0;
 
-       /* override the compress_type if necessary */
-       if (compress_type && codec->compress_type != compress_type)
-               codec->compress_type = compress_type;
        ret = snd_soc_cache_init(codec);
        if (ret < 0) {
                dev_err(codec->dev,
@@ -1614,8 +1610,6 @@ static int snd_soc_init_codec_cache(struct snd_soc_codec *codec,
 static int snd_soc_instantiate_card(struct snd_soc_card *card)
 {
        struct snd_soc_codec *codec;
-       struct snd_soc_codec_conf *codec_conf;
-       enum snd_soc_compress_type compress_type;
        struct snd_soc_dai_link *dai_link;
        int ret, i, order, dai_fmt;
 
@@ -1639,19 +1633,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
        list_for_each_entry(codec, &codec_list, list) {
                if (codec->cache_init)
                        continue;
-               /* by default we don't override the compress_type */
-               compress_type = 0;
-               /* check to see if we need to override the compress_type */
-               for (i = 0; i < card->num_configs; ++i) {
-                       codec_conf = &card->codec_conf[i];
-                       if (!strcmp(codec->name, codec_conf->dev_name)) {
-                               compress_type = codec_conf->compress_type;
-                               if (compress_type && compress_type
-                                   != codec->compress_type)
-                                       break;
-                       }
-               }
-               ret = snd_soc_init_codec_cache(codec, compress_type);
+               ret = snd_soc_init_codec_cache(codec);
                if (ret < 0)
                        goto base_error;
        }
@@ -2297,13 +2279,6 @@ unsigned int snd_soc_write(struct snd_soc_codec *codec,
 }
 EXPORT_SYMBOL_GPL(snd_soc_write);
 
-unsigned int snd_soc_bulk_write_raw(struct snd_soc_codec *codec,
-                                   unsigned int reg, const void *data, size_t len)
-{
-       return codec->bulk_write_raw(codec, reg, data, len);
-}
-EXPORT_SYMBOL_GPL(snd_soc_bulk_write_raw);
-
 /**
  * snd_soc_update_bits - update codec register bits
  * @codec: audio codec
@@ -3575,6 +3550,22 @@ int snd_soc_codec_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
 }
 EXPORT_SYMBOL_GPL(snd_soc_codec_set_pll);
 
+/**
+ * snd_soc_dai_set_bclk_ratio - configure BCLK to sample rate ratio.
+ * @dai: DAI
+ * @ratio Ratio of BCLK to Sample rate.
+ *
+ * Configures the DAI for a preset BCLK to sample rate ratio.
+ */
+int snd_soc_dai_set_bclk_ratio(struct snd_soc_dai *dai, unsigned int ratio)
+{
+       if (dai->driver && dai->driver->ops->set_bclk_ratio)
+               return dai->driver->ops->set_bclk_ratio(dai, ratio);
+       else
+               return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_dai_set_bclk_ratio);
+
 /**
  * snd_soc_dai_set_fmt - configure DAI hardware audio format.
  * @dai: DAI
@@ -4019,6 +4010,113 @@ static void snd_soc_unregister_dais(struct device *dev, size_t count)
                snd_soc_unregister_dai(dev);
 }
 
+/**
+ * snd_soc_register_component - Register a component with the ASoC core
+ *
+ */
+static int
+__snd_soc_register_component(struct device *dev,
+                            struct snd_soc_component *cmpnt,
+                            const struct snd_soc_component_driver *cmpnt_drv,
+                            struct snd_soc_dai_driver *dai_drv,
+                            int num_dai, bool allow_single_dai)
+{
+       int ret;
+
+       dev_dbg(dev, "component register %s\n", dev_name(dev));
+
+       if (!cmpnt) {
+               dev_err(dev, "ASoC: Failed to connecting component\n");
+               return -ENOMEM;
+       }
+
+       cmpnt->name = fmt_single_name(dev, &cmpnt->id);
+       if (!cmpnt->name) {
+               dev_err(dev, "ASoC: Failed to simplifying name\n");
+               return -ENOMEM;
+       }
+
+       cmpnt->dev      = dev;
+       cmpnt->driver   = cmpnt_drv;
+       cmpnt->dai_drv  = dai_drv;
+       cmpnt->num_dai  = num_dai;
+
+       /*
+        * snd_soc_register_dai()  uses fmt_single_name(), and
+        * snd_soc_register_dais() uses fmt_multiple_name()
+        * for dai->name which is used for name based matching
+        *
+        * this function is used from cpu/codec.
+        * allow_single_dai flag can ignore "codec" driver reworking
+        * since it had been used snd_soc_register_dais(),
+        */
+       if ((1 == num_dai) && allow_single_dai)
+               ret = snd_soc_register_dai(dev, dai_drv);
+       else
+               ret = snd_soc_register_dais(dev, dai_drv, num_dai);
+       if (ret < 0) {
+               dev_err(dev, "ASoC: Failed to regster DAIs: %d\n", ret);
+               goto error_component_name;
+       }
+
+       mutex_lock(&client_mutex);
+       list_add(&cmpnt->list, &component_list);
+       mutex_unlock(&client_mutex);
+
+       dev_dbg(cmpnt->dev, "ASoC: Registered component '%s'\n", cmpnt->name);
+
+       return ret;
+
+error_component_name:
+       kfree(cmpnt->name);
+
+       return ret;
+}
+
+int snd_soc_register_component(struct device *dev,
+                              const struct snd_soc_component_driver *cmpnt_drv,
+                              struct snd_soc_dai_driver *dai_drv,
+                              int num_dai)
+{
+       struct snd_soc_component *cmpnt;
+
+       cmpnt = devm_kzalloc(dev, sizeof(*cmpnt), GFP_KERNEL);
+       if (!cmpnt) {
+               dev_err(dev, "ASoC: Failed to allocate memory\n");
+               return -ENOMEM;
+       }
+
+       return __snd_soc_register_component(dev, cmpnt, cmpnt_drv,
+                                           dai_drv, num_dai, true);
+}
+EXPORT_SYMBOL_GPL(snd_soc_register_component);
+
+/**
+ * snd_soc_unregister_component - Unregister a component from the ASoC core
+ *
+ */
+void snd_soc_unregister_component(struct device *dev)
+{
+       struct snd_soc_component *cmpnt;
+
+       list_for_each_entry(cmpnt, &component_list, list) {
+               if (dev == cmpnt->dev)
+                       goto found;
+       }
+       return;
+
+found:
+       snd_soc_unregister_dais(dev, cmpnt->num_dai);
+
+       mutex_lock(&client_mutex);
+       list_del(&cmpnt->list);
+       mutex_unlock(&client_mutex);
+
+       dev_dbg(dev, "ASoC: Unregistered component '%s'\n", cmpnt->name);
+       kfree(cmpnt->name);
+}
+EXPORT_SYMBOL_GPL(snd_soc_unregister_component);
+
 /**
  * snd_soc_add_platform - Add a platform to the ASoC core
  * @dev: The parent device for the platform
@@ -4165,7 +4263,6 @@ int snd_soc_register_codec(struct device *dev,
                           struct snd_soc_dai_driver *dai_drv,
                           int num_dai)
 {
-       size_t reg_size;
        struct snd_soc_codec *codec;
        int ret, i;
 
@@ -4182,11 +4279,6 @@ int snd_soc_register_codec(struct device *dev,
                goto fail_codec;
        }
 
-       if (codec_drv->compress_type)
-               codec->compress_type = codec_drv->compress_type;
-       else
-               codec->compress_type = SND_SOC_FLAT_COMPRESSION;
-
        codec->write = codec_drv->write;
        codec->read = codec_drv->read;
        codec->volatile_register = codec_drv->volatile_register;
@@ -4203,35 +4295,6 @@ int snd_soc_register_codec(struct device *dev,
        codec->num_dai = num_dai;
        mutex_init(&codec->mutex);
 
-       /* allocate CODEC register cache */
-       if (codec_drv->reg_cache_size && codec_drv->reg_word_size) {
-               reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
-               codec->reg_size = reg_size;
-               /* it is necessary to make a copy of the default register cache
-                * because in the case of using a compression type that requires
-                * the default register cache to be marked as the
-                * kernel might have freed the array by the time we initialize
-                * the cache.
-                */
-               if (codec_drv->reg_cache_default) {
-                       codec->reg_def_copy = kmemdup(codec_drv->reg_cache_default,
-                                                     reg_size, GFP_KERNEL);
-                       if (!codec->reg_def_copy) {
-                               ret = -ENOMEM;
-                               goto fail_codec_name;
-                       }
-               }
-       }
-
-       if (codec_drv->reg_access_size && codec_drv->reg_access_default) {
-               if (!codec->volatile_register)
-                       codec->volatile_register = snd_soc_default_volatile_register;
-               if (!codec->readable_register)
-                       codec->readable_register = snd_soc_default_readable_register;
-               if (!codec->writable_register)
-                       codec->writable_register = snd_soc_default_writable_register;
-       }
-
        for (i = 0; i < num_dai; i++) {
                fixup_codec_formats(&dai_drv[i].playback);
                fixup_codec_formats(&dai_drv[i].capture);
@@ -4241,10 +4304,12 @@ int snd_soc_register_codec(struct device *dev,
        list_add(&codec->list, &codec_list);
        mutex_unlock(&client_mutex);
 
-       /* register any DAIs */
-       ret = snd_soc_register_dais(dev, dai_drv, num_dai);
+       /* register component */
+       ret = __snd_soc_register_component(dev, &codec->component,
+                                          &codec_drv->component_driver,
+                                          dai_drv, num_dai, false);
        if (ret < 0) {
-               dev_err(codec->dev, "ASoC: Failed to regster DAIs: %d\n", ret);
+               dev_err(codec->dev, "ASoC: Failed to regster component: %d\n", ret);
                goto fail_codec_name;
        }
 
@@ -4279,7 +4344,7 @@ void snd_soc_unregister_codec(struct device *dev)
        return;
 
 found:
-       snd_soc_unregister_dais(dev, codec->num_dai);
+       snd_soc_unregister_component(dev);
 
        mutex_lock(&client_mutex);
        list_del(&codec->list);
@@ -4288,98 +4353,11 @@ found:
        dev_dbg(codec->dev, "ASoC: Unregistered codec '%s'\n", codec->name);
 
        snd_soc_cache_exit(codec);
-       kfree(codec->reg_def_copy);
        kfree(codec->name);
        kfree(codec);
 }
 EXPORT_SYMBOL_GPL(snd_soc_unregister_codec);
 
-
-/**
- * snd_soc_register_component - Register a component with the ASoC core
- *
- */
-int snd_soc_register_component(struct device *dev,
-                        const struct snd_soc_component_driver *cmpnt_drv,
-                        struct snd_soc_dai_driver *dai_drv,
-                        int num_dai)
-{
-       struct snd_soc_component *cmpnt;
-       int ret;
-
-       dev_dbg(dev, "component register %s\n", dev_name(dev));
-
-       cmpnt = devm_kzalloc(dev, sizeof(*cmpnt), GFP_KERNEL);
-       if (!cmpnt) {
-               dev_err(dev, "ASoC: Failed to allocate memory\n");
-               return -ENOMEM;
-       }
-
-       cmpnt->name = fmt_single_name(dev, &cmpnt->id);
-       if (!cmpnt->name) {
-               dev_err(dev, "ASoC: Failed to simplifying name\n");
-               return -ENOMEM;
-       }
-
-       cmpnt->dev      = dev;
-       cmpnt->driver   = cmpnt_drv;
-       cmpnt->num_dai  = num_dai;
-
-       /*
-        * snd_soc_register_dai()  uses fmt_single_name(), and
-        * snd_soc_register_dais() uses fmt_multiple_name()
-        * for dai->name which is used for name based matching
-        */
-       if (1 == num_dai)
-               ret = snd_soc_register_dai(dev, dai_drv);
-       else
-               ret = snd_soc_register_dais(dev, dai_drv, num_dai);
-       if (ret < 0) {
-               dev_err(dev, "ASoC: Failed to regster DAIs: %d\n", ret);
-               goto error_component_name;
-       }
-
-       mutex_lock(&client_mutex);
-       list_add(&cmpnt->list, &component_list);
-       mutex_unlock(&client_mutex);
-
-       dev_dbg(cmpnt->dev, "ASoC: Registered component '%s'\n", cmpnt->name);
-
-       return ret;
-
-error_component_name:
-       kfree(cmpnt->name);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(snd_soc_register_component);
-
-/**
- * snd_soc_unregister_component - Unregister a component from the ASoC core
- *
- */
-void snd_soc_unregister_component(struct device *dev)
-{
-       struct snd_soc_component *cmpnt;
-
-       list_for_each_entry(cmpnt, &component_list, list) {
-               if (dev == cmpnt->dev)
-                       goto found;
-       }
-       return;
-
-found:
-       snd_soc_unregister_dais(dev, cmpnt->num_dai);
-
-       mutex_lock(&client_mutex);
-       list_del(&cmpnt->list);
-       mutex_unlock(&client_mutex);
-
-       dev_dbg(dev, "ASoC: Unregistered component '%s'\n", cmpnt->name);
-       kfree(cmpnt->name);
-}
-EXPORT_SYMBOL_GPL(snd_soc_unregister_component);
-
 /* Retrieve a card's name from device tree */
 int snd_soc_of_parse_card_name(struct snd_soc_card *card,
                               const char *propname)
@@ -4567,6 +4545,60 @@ unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
 }
 EXPORT_SYMBOL_GPL(snd_soc_of_parse_daifmt);
 
+int snd_soc_of_get_dai_name(struct device_node *of_node,
+                           const char **dai_name)
+{
+       struct snd_soc_component *pos;
+       struct of_phandle_args args;
+       int ret;
+
+       ret = of_parse_phandle_with_args(of_node, "sound-dai",
+                                        "#sound-dai-cells", 0, &args);
+       if (ret)
+               return ret;
+
+       ret = -EPROBE_DEFER;
+
+       mutex_lock(&client_mutex);
+       list_for_each_entry(pos, &component_list, list) {
+               if (pos->dev->of_node != args.np)
+                       continue;
+
+               if (pos->driver->of_xlate_dai_name) {
+                       ret = pos->driver->of_xlate_dai_name(pos, &args, dai_name);
+               } else {
+                       int id = -1;
+
+                       switch (args.args_count) {
+                       case 0:
+                               id = 0; /* same as dai_drv[0] */
+                               break;
+                       case 1:
+                               id = args.args[0];
+                               break;
+                       default:
+                               /* not supported */
+                               break;
+                       }
+
+                       if (id < 0 || id >= pos->num_dai) {
+                               ret = -EINVAL;
+                       } else {
+                               *dai_name = pos->dai_drv[id].name;
+                               ret = 0;
+                       }
+               }
+
+               break;
+       }
+       mutex_unlock(&client_mutex);
+
+       of_node_put(args.np);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_of_get_dai_name);
+
 static int __init snd_soc_init(void)
 {
 #ifdef CONFIG_DEBUG_FS
index c17c14c394df88bb0442ccaf51b5735f05455ab5..2fb0b72d8a3c00c7d6266df72b68487a06f31877 100644 (file)
@@ -59,31 +59,31 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
 /* dapm power sequences - make this per codec in the future */
 static int dapm_up_seq[] = {
        [snd_soc_dapm_pre] = 0,
-       [snd_soc_dapm_supply] = 1,
        [snd_soc_dapm_regulator_supply] = 1,
        [snd_soc_dapm_clock_supply] = 1,
-       [snd_soc_dapm_micbias] = 2,
+       [snd_soc_dapm_supply] = 2,
+       [snd_soc_dapm_micbias] = 3,
        [snd_soc_dapm_dai_link] = 2,
-       [snd_soc_dapm_dai_in] = 3,
-       [snd_soc_dapm_dai_out] = 3,
-       [snd_soc_dapm_aif_in] = 3,
-       [snd_soc_dapm_aif_out] = 3,
-       [snd_soc_dapm_mic] = 4,
-       [snd_soc_dapm_mux] = 5,
-       [snd_soc_dapm_virt_mux] = 5,
-       [snd_soc_dapm_value_mux] = 5,
-       [snd_soc_dapm_dac] = 6,
-       [snd_soc_dapm_switch] = 7,
-       [snd_soc_dapm_mixer] = 7,
-       [snd_soc_dapm_mixer_named_ctl] = 7,
-       [snd_soc_dapm_pga] = 8,
-       [snd_soc_dapm_adc] = 9,
-       [snd_soc_dapm_out_drv] = 10,
-       [snd_soc_dapm_hp] = 10,
-       [snd_soc_dapm_spk] = 10,
-       [snd_soc_dapm_line] = 10,
-       [snd_soc_dapm_kcontrol] = 11,
-       [snd_soc_dapm_post] = 12,
+       [snd_soc_dapm_dai_in] = 4,
+       [snd_soc_dapm_dai_out] = 4,
+       [snd_soc_dapm_aif_in] = 4,
+       [snd_soc_dapm_aif_out] = 4,
+       [snd_soc_dapm_mic] = 5,
+       [snd_soc_dapm_mux] = 6,
+       [snd_soc_dapm_virt_mux] = 6,
+       [snd_soc_dapm_value_mux] = 6,
+       [snd_soc_dapm_dac] = 7,
+       [snd_soc_dapm_switch] = 8,
+       [snd_soc_dapm_mixer] = 8,
+       [snd_soc_dapm_mixer_named_ctl] = 8,
+       [snd_soc_dapm_pga] = 9,
+       [snd_soc_dapm_adc] = 10,
+       [snd_soc_dapm_out_drv] = 11,
+       [snd_soc_dapm_hp] = 11,
+       [snd_soc_dapm_spk] = 11,
+       [snd_soc_dapm_line] = 11,
+       [snd_soc_dapm_kcontrol] = 12,
+       [snd_soc_dapm_post] = 13,
 };
 
 static int dapm_down_seq[] = {
@@ -109,10 +109,10 @@ static int dapm_down_seq[] = {
        [snd_soc_dapm_dai_in] = 10,
        [snd_soc_dapm_dai_out] = 10,
        [snd_soc_dapm_dai_link] = 11,
-       [snd_soc_dapm_clock_supply] = 12,
-       [snd_soc_dapm_regulator_supply] = 12,
        [snd_soc_dapm_supply] = 12,
-       [snd_soc_dapm_post] = 13,
+       [snd_soc_dapm_clock_supply] = 13,
+       [snd_soc_dapm_regulator_supply] = 13,
+       [snd_soc_dapm_post] = 14,
 };
 
 static void pop_wait(u32 pop_time)
@@ -409,6 +409,12 @@ static inline void soc_widget_unlock(struct snd_soc_dapm_widget *w)
                mutex_unlock(&w->platform->mutex);
 }
 
+static void soc_dapm_async_complete(struct snd_soc_dapm_context *dapm)
+{
+       if (dapm->codec && dapm->codec->using_regmap)
+               regmap_async_complete(dapm->codec->control_data);
+}
+
 static int soc_widget_update_bits_locked(struct snd_soc_dapm_widget *w,
        unsigned short reg, unsigned int mask, unsigned int value)
 {
@@ -417,8 +423,9 @@ static int soc_widget_update_bits_locked(struct snd_soc_dapm_widget *w,
        int ret;
 
        if (w->codec && w->codec->using_regmap) {
-               ret = regmap_update_bits_check(w->codec->control_data,
-                                              reg, mask, value, &change);
+               ret = regmap_update_bits_check_async(w->codec->control_data,
+                                                    reg, mask, value,
+                                                    &change);
                if (ret != 0)
                        return ret;
        } else {
@@ -499,18 +506,22 @@ static void dapm_set_path_status(struct snd_soc_dapm_widget *w,
                int val;
                struct soc_mixer_control *mc = (struct soc_mixer_control *)
                        w->kcontrol_news[i].private_value;
-               unsigned int reg = mc->reg;
+               int reg = mc->reg;
                unsigned int shift = mc->shift;
                int max = mc->max;
                unsigned int mask = (1 << fls(max)) - 1;
                unsigned int invert = mc->invert;
 
-               val = soc_widget_read(w, reg);
-               val = (val >> shift) & mask;
-               if (invert)
-                       val = max - val;
+               if (reg != SND_SOC_NOPM) {
+                       val = soc_widget_read(w, reg);
+                       val = (val >> shift) & mask;
+                       if (invert)
+                               val = max - val;
+                       p->connect = !!val;
+               } else {
+                       p->connect = 0;
+               }
 
-               p->connect = !!val;
        }
        break;
        case snd_soc_dapm_mux: {
@@ -1197,6 +1208,8 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,
 {
        int ret;
 
+       soc_dapm_async_complete(w->dapm);
+
        if (SND_SOC_DAPM_EVENT_ON(event)) {
                if (w->on_val & SND_SOC_DAPM_REGULATOR_BYPASS) {
                        ret = regulator_allow_bypass(w->regulator, false);
@@ -1230,6 +1243,8 @@ int dapm_clock_event(struct snd_soc_dapm_widget *w,
        if (!w->clk)
                return -EIO;
 
+       soc_dapm_async_complete(w->dapm);
+
 #ifdef CONFIG_HAVE_CLK
        if (SND_SOC_DAPM_EVENT_ON(event)) {
                return clk_prepare_enable(w->clk);
@@ -1422,6 +1437,7 @@ static void dapm_seq_check_event(struct snd_soc_card *card,
        if (w->event && (w->event_flags & event)) {
                pop_dbg(w->dapm->dev, card->pop_time, "pop test : %s %s\n",
                        w->name, ev_name);
+               soc_dapm_async_complete(w->dapm);
                trace_snd_soc_dapm_widget_event_start(w, event);
                ret = w->event(w, NULL, event);
                trace_snd_soc_dapm_widget_event_done(w, event);
@@ -1494,6 +1510,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
        struct list_head *list, int event, bool power_up)
 {
        struct snd_soc_dapm_widget *w, *n;
+       struct snd_soc_dapm_context *d;
        LIST_HEAD(pending);
        int cur_sort = -1;
        int cur_subseq = -1;
@@ -1524,6 +1541,9 @@ static void dapm_seq_run(struct snd_soc_card *card,
                                                                       cur_subseq);
                        }
 
+                       if (cur_dapm && w->dapm != cur_dapm)
+                               soc_dapm_async_complete(cur_dapm);
+
                        INIT_LIST_HEAD(&pending);
                        cur_sort = -1;
                        cur_subseq = INT_MIN;
@@ -1582,6 +1602,10 @@ static void dapm_seq_run(struct snd_soc_card *card,
                                cur_dapm->seq_notifier(cur_dapm,
                                                       i, cur_subseq);
        }
+
+       list_for_each_entry(d, &card->dapm_list, list) {
+               soc_dapm_async_complete(d);
+       }
 }
 
 static void dapm_widget_update(struct snd_soc_card *card)
@@ -1840,6 +1864,7 @@ static int dapm_power_widgets(struct snd_soc_card *card, int event)
                         */
                        switch (w->id) {
                        case snd_soc_dapm_siggen:
+                       case snd_soc_dapm_vmid:
                                break;
                        case snd_soc_dapm_supply:
                        case snd_soc_dapm_regulator_supply:
@@ -2791,7 +2816,7 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
        struct snd_soc_card *card = codec->card;
        struct soc_mixer_control *mc =
                (struct soc_mixer_control *)kcontrol->private_value;
-       unsigned int reg = mc->reg;
+       int reg = mc->reg;
        unsigned int shift = mc->shift;
        int max = mc->max;
        unsigned int mask = (1 << fls(max)) - 1;
@@ -2804,7 +2829,7 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
                         kcontrol->id.name);
 
        mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
-       if (dapm_kcontrol_is_powered(kcontrol))
+       if (dapm_kcontrol_is_powered(kcontrol) && reg != SND_SOC_NOPM)
                val = (snd_soc_read(codec, reg) >> shift) & mask;
        else
                val = dapm_kcontrol_get_value(kcontrol);
@@ -2835,7 +2860,7 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
        struct snd_soc_card *card = codec->card;
        struct soc_mixer_control *mc =
                (struct soc_mixer_control *)kcontrol->private_value;
-       unsigned int reg = mc->reg;
+       int reg = mc->reg;
        unsigned int shift = mc->shift;
        int max = mc->max;
        unsigned int mask = (1 << fls(max)) - 1;
@@ -2857,19 +2882,24 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
 
        mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
 
-       dapm_kcontrol_set_value(kcontrol, val);
+       change = dapm_kcontrol_set_value(kcontrol, val);
+
+       if (reg != SND_SOC_NOPM) {
+               mask = mask << shift;
+               val = val << shift;
 
-       mask = mask << shift;
-       val = val << shift;
+               change = snd_soc_test_bits(codec, reg, mask, val);
+       }
 
-       change = snd_soc_test_bits(codec, reg, mask, val);
        if (change) {
-               update.kcontrol = kcontrol;
-               update.reg = reg;
-               update.mask = mask;
-               update.val = val;
+               if (reg != SND_SOC_NOPM) {
+                       update.kcontrol = kcontrol;
+                       update.reg = reg;
+                       update.mask = mask;
+                       update.val = val;
 
-               card->update = &update;
+                       card->update = &update;
+               }
 
                soc_dapm_mixer_update_power(card, kcontrol, connect);
 
diff --git a/sound/soc/soc-devres.c b/sound/soc/soc-devres.c
new file mode 100644 (file)
index 0000000..b1d7322
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * soc-devres.c  --  ALSA SoC Audio Layer devres functions
+ *
+ * Copyright (C) 2013 Linaro Ltd
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <sound/soc.h>
+
+static void devm_component_release(struct device *dev, void *res)
+{
+       snd_soc_unregister_component(*(struct device **)res);
+}
+
+/**
+ * devm_snd_soc_register_component - resource managed component registration
+ * @dev: Device used to manage component
+ * @cmpnt_drv: Component driver
+ * @dai_drv: DAI driver
+ * @num_dai: Number of DAIs to register
+ *
+ * Register a component with automatic unregistration when the device is
+ * unregistered.
+ */
+int devm_snd_soc_register_component(struct device *dev,
+                        const struct snd_soc_component_driver *cmpnt_drv,
+                        struct snd_soc_dai_driver *dai_drv, int num_dai)
+{
+       struct device **ptr;
+       int ret;
+
+       ptr = devres_alloc(devm_component_release, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return -ENOMEM;
+
+       ret = snd_soc_register_component(dev, cmpnt_drv, dai_drv, num_dai);
+       if (ret == 0) {
+               *ptr = dev;
+               devres_add(dev, ptr);
+       } else {
+               devres_free(ptr);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(devm_snd_soc_register_component);
+
+static void devm_card_release(struct device *dev, void *res)
+{
+       snd_soc_unregister_card(*(struct snd_soc_card **)res);
+}
+
+/**
+ * devm_snd_soc_register_card - resource managed card registration
+ * @dev: Device used to manage card
+ * @card: Card to register
+ *
+ * Register a card with automatic unregistration when the device is
+ * unregistered.
+ */
+int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card)
+{
+       struct device **ptr;
+       int ret;
+
+       ptr = devres_alloc(devm_card_release, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return -ENOMEM;
+
+       ret = snd_soc_register_card(card);
+       if (ret == 0) {
+               *ptr = dev;
+               devres_add(dev, ptr);
+       } else {
+               devres_free(ptr);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(devm_snd_soc_register_card);
index e29ec3cd84b1301febfdf6ecd25d9ed8c2f57c50..99f9495c1c40f95ce4f26bb5c31dd1f1a2007c59 100644 (file)
@@ -36,6 +36,15 @@ static struct dmaengine_pcm *soc_platform_to_pcm(struct snd_soc_platform *p)
        return container_of(p, struct dmaengine_pcm, platform);
 }
 
+static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm,
+       struct snd_pcm_substream *substream)
+{
+       if (!pcm->chan[substream->stream])
+               return NULL;
+
+       return pcm->chan[substream->stream]->device->dev;
+}
+
 /**
  * snd_dmaengine_pcm_prepare_slave_config() - Generic prepare_slave_config callback
  * @substream: PCM substream
@@ -75,12 +84,19 @@ static int dmaengine_pcm_hw_params(struct snd_pcm_substream *substream,
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
        struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
        struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
+       int (*prepare_slave_config)(struct snd_pcm_substream *substream,
+                       struct snd_pcm_hw_params *params,
+                       struct dma_slave_config *slave_config);
        struct dma_slave_config slave_config;
        int ret;
 
-       if (pcm->config->prepare_slave_config) {
-               ret = pcm->config->prepare_slave_config(substream, params,
-                               &slave_config);
+       if (!pcm->config)
+               prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config;
+       else
+               prepare_slave_config = pcm->config->prepare_slave_config;
+
+       if (prepare_slave_config) {
+               ret = prepare_slave_config(substream, params, &slave_config);
                if (ret)
                        return ret;
 
@@ -92,28 +108,54 @@ static int dmaengine_pcm_hw_params(struct snd_pcm_substream *substream,
        return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
 }
 
-static int dmaengine_pcm_open(struct snd_pcm_substream *substream)
+static int dmaengine_pcm_set_runtime_hwparams(struct snd_pcm_substream *substream)
 {
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
        struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
+       struct device *dma_dev = dmaengine_dma_dev(pcm, substream);
        struct dma_chan *chan = pcm->chan[substream->stream];
+       struct snd_dmaengine_dai_dma_data *dma_data;
+       struct dma_slave_caps dma_caps;
+       struct snd_pcm_hardware hw;
        int ret;
 
-       ret = snd_soc_set_runtime_hwparams(substream,
+       if (pcm->config && pcm->config->pcm_hardware)
+               return snd_soc_set_runtime_hwparams(substream,
                                pcm->config->pcm_hardware);
-       if (ret)
-               return ret;
 
-       return snd_dmaengine_pcm_open(substream, chan);
+       dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+
+       memset(&hw, 0, sizeof(hw));
+       hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
+                       SNDRV_PCM_INFO_INTERLEAVED;
+       hw.periods_min = 2;
+       hw.periods_max = UINT_MAX;
+       hw.period_bytes_min = 256;
+       hw.period_bytes_max = dma_get_max_seg_size(dma_dev);
+       hw.buffer_bytes_max = SIZE_MAX;
+       hw.fifo_size = dma_data->fifo_size;
+
+       ret = dma_get_slave_caps(chan, &dma_caps);
+       if (ret == 0) {
+               if (dma_caps.cmd_pause)
+                       hw.info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME;
+       }
+
+       return snd_soc_set_runtime_hwparams(substream, &hw);
 }
 
-static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm,
-       struct snd_pcm_substream *substream)
+static int dmaengine_pcm_open(struct snd_pcm_substream *substream)
 {
-       if (!pcm->chan[substream->stream])
-               return NULL;
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
+       struct dma_chan *chan = pcm->chan[substream->stream];
+       int ret;
 
-       return pcm->chan[substream->stream]->device->dev;
+       ret = dmaengine_pcm_set_runtime_hwparams(substream);
+       if (ret)
+               return ret;
+
+       return snd_dmaengine_pcm_open(substream, chan);
 }
 
 static void dmaengine_pcm_free(struct snd_pcm *pcm)
@@ -142,9 +184,20 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
        struct dmaengine_pcm *pcm = soc_platform_to_pcm(rtd->platform);
        const struct snd_dmaengine_pcm_config *config = pcm->config;
        struct snd_pcm_substream *substream;
+       size_t prealloc_buffer_size;
+       size_t max_buffer_size;
        unsigned int i;
        int ret;
 
+       if (config && config->prealloc_buffer_size) {
+               prealloc_buffer_size = config->prealloc_buffer_size;
+               max_buffer_size = config->pcm_hardware->buffer_bytes_max;
+       } else {
+               prealloc_buffer_size = 512 * 1024;
+               max_buffer_size = SIZE_MAX;
+       }
+
+
        for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; i++) {
                substream = rtd->pcm->streams[i].substream;
                if (!substream)
@@ -165,8 +218,8 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
                ret = snd_pcm_lib_preallocate_pages(substream,
                                SNDRV_DMA_TYPE_DEV,
                                dmaengine_dma_dev(pcm, substream),
-                               config->prealloc_buffer_size,
-                               config->pcm_hardware->buffer_bytes_max);
+                               prealloc_buffer_size,
+                               max_buffer_size);
                if (ret)
                        goto err_free;
        }
index 122c0c18b9dd286ce6322f3ce67bcd070387c913..4f11d23f20621971b8806e70618ee8e4e1c3846f 100644 (file)
@@ -65,31 +65,6 @@ static unsigned int hw_read(struct snd_soc_codec *codec, unsigned int reg)
        return val;
 }
 
-/* Primitive bulk write support for soc-cache.  The data pointed to by
- * `data' needs to already be in the form the hardware expects.  Any
- * data written through this function will not go through the cache as
- * it only handles writing to volatile or out of bounds registers.
- *
- * This is currently only supported for devices using the regmap API
- * wrappers.
- */
-static int snd_soc_hw_bulk_write_raw(struct snd_soc_codec *codec,
-                                    unsigned int reg,
-                                    const void *data, size_t len)
-{
-       /* To ensure that we don't get out of sync with the cache, check
-        * whether the base register is volatile or if we've directly asked
-        * to bypass the cache.  Out of bounds registers are considered
-        * volatile.
-        */
-       if (!codec->cache_bypass
-           && !snd_soc_codec_volatile_register(codec, reg)
-           && reg < codec->driver->reg_cache_size)
-               return -EINVAL;
-
-       return regmap_raw_write(codec->control_data, reg, data, len);
-}
-
 /**
  * snd_soc_codec_set_cache_io: Set up standard I/O functions.
  *
@@ -119,7 +94,6 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
        memset(&config, 0, sizeof(config));
        codec->write = hw_write;
        codec->read = hw_read;
-       codec->bulk_write_raw = snd_soc_hw_bulk_write_raw;
 
        config.reg_bits = addr_bits;
        config.val_bits = data_bits;
index 71358e3b54d93ef10c3e402d98f66773b81d27fc..23d43dac91da2c0d9c6ad8bbf792036483da3fab 100644 (file)
@@ -65,6 +65,7 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
        struct snd_soc_codec *codec;
        struct snd_soc_dapm_context *dapm;
        struct snd_soc_jack_pin *pin;
+       unsigned int sync = 0;
        int enable;
 
        trace_snd_soc_jack_report(jack, mask, status);
@@ -92,12 +93,16 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
                        snd_soc_dapm_enable_pin(dapm, pin->pin);
                else
                        snd_soc_dapm_disable_pin(dapm, pin->pin);
+
+               /* we need to sync for this case only */
+               sync = 1;
        }
 
        /* Report before the DAPM sync to help users updating micbias status */
        blocking_notifier_call_chain(&jack->notifier, jack->status, jack);
 
-       snd_soc_dapm_sync(dapm);
+       if (sync)
+               snd_soc_dapm_sync(dapm);
 
        snd_jack_report(jack->jack, jack->status);
 
index 330c9a6b5cb5bf9810da6afd76a6cc8155b13845..d4498723b375025cfacc21246f6f018be1c8e321 100644 (file)
@@ -721,7 +721,7 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
        list_add(&dpcm->list_be, &fe->dpcm[stream].be_clients);
        list_add(&dpcm->list_fe, &be->dpcm[stream].fe_clients);
 
-       dev_dbg(fe->dev, "  connected new DPCM %s path %s %s %s\n",
+       dev_dbg(fe->dev, "connected new DPCM %s path %s %s %s\n",
                        stream ? "capture" : "playback",  fe->dai_link->name,
                        stream ? "<-" : "->", be->dai_link->name);
 
@@ -749,7 +749,7 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe,
                if (dpcm->fe == fe)
                        continue;
 
-               dev_dbg(fe->dev, "  reparent %s path %s %s %s\n",
+               dev_dbg(fe->dev, "reparent %s path %s %s %s\n",
                        stream ? "capture" : "playback",
                        dpcm->fe->dai_link->name,
                        stream ? "<-" : "->", dpcm->be->dai_link->name);
@@ -773,7 +773,7 @@ static void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
                if (dpcm->state != SND_SOC_DPCM_LINK_STATE_FREE)
                        continue;
 
-               dev_dbg(fe->dev, "  freed DSP %s path %s %s %s\n",
+               dev_dbg(fe->dev, "freed DSP %s path %s %s %s\n",
                        stream ? "capture" : "playback", fe->dai_link->name,
                        stream ? "<-" : "->", dpcm->be->dai_link->name);
 
@@ -2116,7 +2116,7 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
 
        pcm->private_free = platform->driver->pcm_free;
 out:
-       dev_info(rtd->card->dev, " %s <-> %s mapping ok\n", codec_dai->name,
+       dev_info(rtd->card->dev, "%s <-> %s mapping ok\n", codec_dai->name,
                cpu_dai->name);
        return ret;
 }
index 29b211e9c06020a641b1449099fc163e58770f87..5e633659c1b38154ebd1a75aa0e4f005b4f089a0 100644 (file)
@@ -75,7 +75,11 @@ static const struct snd_pcm_hardware dummy_dma_hardware = {
 
 static int dummy_dma_open(struct snd_pcm_substream *substream)
 {
-       snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+
+       /* BE's dont need dummy params */
+       if (!rtd->dai_link->no_pcm)
+               snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
 
        return 0;
 }
index 63acfeb4b69d22e91ab9e98cf81d7fbf9ef4cc04..21a8c954af1cd6dfedd65100cc331d54145f4746 100644 (file)
@@ -257,20 +257,12 @@ static int spdif_in_probe(struct platform_device *pdev)
                return ret;
        }
 
-       return snd_soc_register_component(&pdev->dev, &spdif_in_component,
-                                        &spdif_in_dai, 1);
-}
-
-static int spdif_in_remove(struct platform_device *pdev)
-{
-       snd_soc_unregister_component(&pdev->dev);
-
-       return 0;
+       return devm_snd_soc_register_component(&pdev->dev, &spdif_in_component,
+                                              &spdif_in_dai, 1);
 }
 
 static struct platform_driver spdif_in_driver = {
        .probe          = spdif_in_probe,
-       .remove         = spdif_in_remove,
        .driver         = {
                .name   = "spdif-in",
                .owner  = THIS_MODULE,
index 2fdf68c98d22a92fb53f587919123d069b31a43c..b6ef6f78dc781ad8a45c18d594b1cdc4945647f1 100644 (file)
@@ -280,7 +280,6 @@ static int spdif_out_probe(struct platform_device *pdev)
        struct spdif_out_dev *host;
        struct spear_spdif_platform_data *pdata;
        struct resource *res;
-       int ret;
 
        host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
        if (!host) {
@@ -307,16 +306,8 @@ static int spdif_out_probe(struct platform_device *pdev)
 
        dev_set_drvdata(&pdev->dev, host);
 
-       ret = snd_soc_register_component(&pdev->dev, &spdif_out_component,
-                                        &spdif_out_dai, 1);
-       return ret;
-}
-
-static int spdif_out_remove(struct platform_device *pdev)
-{
-       snd_soc_unregister_component(&pdev->dev);
-
-       return 0;
+       return devm_snd_soc_register_component(&pdev->dev, &spdif_out_component,
+                                              &spdif_out_dai, 1);
 }
 
 #ifdef CONFIG_PM
@@ -357,7 +348,6 @@ static SIMPLE_DEV_PM_OPS(spdif_out_dev_pm_ops, spdif_out_suspend, \
 
 static struct platform_driver spdif_out_driver = {
        .probe          = spdif_out_probe,
-       .remove         = spdif_out_remove,
        .driver         = {
                .name   = "spdif-out",
                .owner  = THIS_MODULE,
index 52af7f6fb37f6938be2845b70cf55d69a7fd12e3..364bf6a907e1c39a36df89ded1d6b2681172940f 100644 (file)
@@ -297,7 +297,7 @@ static bool tegra20_i2s_wr_rd_reg(struct device *dev, unsigned int reg)
                return true;
        default:
                return false;
-       };
+       }
 }
 
 static bool tegra20_i2s_volatile_reg(struct device *dev, unsigned int reg)
@@ -310,7 +310,7 @@ static bool tegra20_i2s_volatile_reg(struct device *dev, unsigned int reg)
                return true;
        default:
                return false;
-       };
+       }
 }
 
 static bool tegra20_i2s_precious_reg(struct device *dev, unsigned int reg)
@@ -321,7 +321,7 @@ static bool tegra20_i2s_precious_reg(struct device *dev, unsigned int reg)
                return true;
        default:
                return false;
-       };
+       }
 }
 
 static const struct regmap_config tegra20_i2s_regmap_config = {
index 551b3c93ce932c77e9e05de4bcf01f5af63d1c94..08bc6931c7c7fc0477703037098a8e7185d7c69d 100644 (file)
@@ -213,7 +213,7 @@ static bool tegra20_spdif_wr_rd_reg(struct device *dev, unsigned int reg)
                return true;
        default:
                return false;
-       };
+       }
 }
 
 static bool tegra20_spdif_volatile_reg(struct device *dev, unsigned int reg)
@@ -234,7 +234,7 @@ static bool tegra20_spdif_volatile_reg(struct device *dev, unsigned int reg)
                return true;
        default:
                return false;
-       };
+       }
 }
 
 static bool tegra20_spdif_precious_reg(struct device *dev, unsigned int reg)
@@ -247,7 +247,7 @@ static bool tegra20_spdif_precious_reg(struct device *dev, unsigned int reg)
                return true;
        default:
                return false;
-       };
+       }
 }
 
 static const struct regmap_config tegra20_spdif_regmap_config = {
index d554d46d08b550788a1bd0ad706f8ed7948a318a..31154338c1eb742da6a1342310d81fddb1f9ecd4 100644 (file)
@@ -100,6 +100,7 @@ int tegra30_ahub_allocate_rx_fifo(enum tegra30_ahub_rxcif *rxcif,
 {
        int channel;
        u32 reg, val;
+       struct tegra30_ahub_cif_conf cif_conf;
 
        channel = find_first_zero_bit(ahub->rx_usage,
                                      TEGRA30_AHUB_CHANNEL_CTRL_COUNT);
@@ -123,15 +124,21 @@ int tegra30_ahub_allocate_rx_fifo(enum tegra30_ahub_rxcif *rxcif,
               TEGRA30_AHUB_CHANNEL_CTRL_RX_PACK_16;
        tegra30_apbif_write(reg, val);
 
+       cif_conf.threshold = 0;
+       cif_conf.audio_channels = 2;
+       cif_conf.client_channels = 2;
+       cif_conf.audio_bits = TEGRA30_AUDIOCIF_BITS_16;
+       cif_conf.client_bits = TEGRA30_AUDIOCIF_BITS_16;
+       cif_conf.expand = 0;
+       cif_conf.stereo_conv = 0;
+       cif_conf.replicate = 0;
+       cif_conf.direction = TEGRA30_AUDIOCIF_DIRECTION_RX;
+       cif_conf.truncate = 0;
+       cif_conf.mono_conv = 0;
+
        reg = TEGRA30_AHUB_CIF_RX_CTRL +
              (channel * TEGRA30_AHUB_CIF_RX_CTRL_STRIDE);
-       val = (0 << TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT) |
-             (1 << TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT) |
-             (1 << TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT) |
-             TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_16 |
-             TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_16 |
-             TEGRA30_AUDIOCIF_CTRL_DIRECTION_RX;
-       tegra30_apbif_write(reg, val);
+       ahub->soc_data->set_audio_cif(ahub->regmap_apbif, reg, &cif_conf);
 
        return 0;
 }
@@ -183,6 +190,7 @@ int tegra30_ahub_allocate_tx_fifo(enum tegra30_ahub_txcif *txcif,
 {
        int channel;
        u32 reg, val;
+       struct tegra30_ahub_cif_conf cif_conf;
 
        channel = find_first_zero_bit(ahub->tx_usage,
                                      TEGRA30_AHUB_CHANNEL_CTRL_COUNT);
@@ -206,15 +214,21 @@ int tegra30_ahub_allocate_tx_fifo(enum tegra30_ahub_txcif *txcif,
               TEGRA30_AHUB_CHANNEL_CTRL_TX_PACK_16;
        tegra30_apbif_write(reg, val);
 
+       cif_conf.threshold = 0;
+       cif_conf.audio_channels = 2;
+       cif_conf.client_channels = 2;
+       cif_conf.audio_bits = TEGRA30_AUDIOCIF_BITS_16;
+       cif_conf.client_bits = TEGRA30_AUDIOCIF_BITS_16;
+       cif_conf.expand = 0;
+       cif_conf.stereo_conv = 0;
+       cif_conf.replicate = 0;
+       cif_conf.direction = TEGRA30_AUDIOCIF_DIRECTION_TX;
+       cif_conf.truncate = 0;
+       cif_conf.mono_conv = 0;
+
        reg = TEGRA30_AHUB_CIF_TX_CTRL +
              (channel * TEGRA30_AHUB_CIF_TX_CTRL_STRIDE);
-       val = (0 << TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT) |
-             (1 << TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT) |
-             (1 << TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT) |
-             TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_16 |
-             TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_16 |
-             TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX;
-       tegra30_apbif_write(reg, val);
+       ahub->soc_data->set_audio_cif(ahub->regmap_apbif, reg, &cif_conf);
 
        return 0;
 }
@@ -346,7 +360,7 @@ static bool tegra30_ahub_apbif_wr_rd_reg(struct device *dev, unsigned int reg)
                return true;
        default:
                break;
-       };
+       }
 
        if (REG_IN_ARRAY(reg, CHANNEL_CTRL) ||
            REG_IN_ARRAY(reg, CHANNEL_CLEAR) ||
@@ -381,7 +395,7 @@ static bool tegra30_ahub_apbif_volatile_reg(struct device *dev,
                return true;
        default:
                break;
-       };
+       }
 
        if (REG_IN_ARRAY(reg, CHANNEL_CLEAR) ||
            REG_IN_ARRAY(reg, CHANNEL_STATUS) ||
@@ -437,13 +451,21 @@ static const struct regmap_config tegra30_ahub_ahub_regmap_config = {
 
 static struct tegra30_ahub_soc_data soc_data_tegra30 = {
        .clk_list_mask = CLK_LIST_MASK_TEGRA30,
+       .set_audio_cif = tegra30_ahub_set_cif,
 };
 
 static struct tegra30_ahub_soc_data soc_data_tegra114 = {
        .clk_list_mask = CLK_LIST_MASK_TEGRA114,
+       .set_audio_cif = tegra30_ahub_set_cif,
+};
+
+static struct tegra30_ahub_soc_data soc_data_tegra124 = {
+       .clk_list_mask = CLK_LIST_MASK_TEGRA114,
+       .set_audio_cif = tegra124_ahub_set_cif,
 };
 
 static const struct of_device_id tegra30_ahub_of_match[] = {
+       { .compatible = "nvidia,tegra124-ahub", .data = &soc_data_tegra124 },
        { .compatible = "nvidia,tegra114-ahub", .data = &soc_data_tegra114 },
        { .compatible = "nvidia,tegra30-ahub",  .data = &soc_data_tegra30 },
        {},
@@ -497,6 +519,7 @@ static int tegra30_ahub_probe(struct platform_device *pdev)
        }
        dev_set_drvdata(&pdev->dev, ahub);
 
+       ahub->soc_data = soc_data;
        ahub->dev = &pdev->dev;
 
        ahub->clk_d_audio = clk_get(&pdev->dev, "d_audio");
@@ -669,6 +692,70 @@ static struct platform_driver tegra30_ahub_driver = {
 };
 module_platform_driver(tegra30_ahub_driver);
 
+void tegra30_ahub_set_cif(struct regmap *regmap, unsigned int reg,
+                         struct tegra30_ahub_cif_conf *conf)
+{
+       unsigned int value;
+
+       value = (conf->threshold <<
+                       TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT) |
+               ((conf->audio_channels - 1) <<
+                       TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT) |
+               ((conf->client_channels - 1) <<
+                       TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT) |
+               (conf->audio_bits <<
+                       TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_SHIFT) |
+               (conf->client_bits <<
+                       TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_SHIFT) |
+               (conf->expand <<
+                       TEGRA30_AUDIOCIF_CTRL_EXPAND_SHIFT) |
+               (conf->stereo_conv <<
+                       TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_SHIFT) |
+               (conf->replicate <<
+                       TEGRA30_AUDIOCIF_CTRL_REPLICATE_SHIFT) |
+               (conf->direction <<
+                       TEGRA30_AUDIOCIF_CTRL_DIRECTION_SHIFT) |
+               (conf->truncate <<
+                       TEGRA30_AUDIOCIF_CTRL_TRUNCATE_SHIFT) |
+               (conf->mono_conv <<
+                       TEGRA30_AUDIOCIF_CTRL_MONO_CONV_SHIFT);
+
+       regmap_write(regmap, reg, value);
+}
+EXPORT_SYMBOL_GPL(tegra30_ahub_set_cif);
+
+void tegra124_ahub_set_cif(struct regmap *regmap, unsigned int reg,
+                          struct tegra30_ahub_cif_conf *conf)
+{
+       unsigned int value;
+
+       value = (conf->threshold <<
+                       TEGRA124_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT) |
+               ((conf->audio_channels - 1) <<
+                       TEGRA124_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT) |
+               ((conf->client_channels - 1) <<
+                       TEGRA124_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT) |
+               (conf->audio_bits <<
+                       TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_SHIFT) |
+               (conf->client_bits <<
+                       TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_SHIFT) |
+               (conf->expand <<
+                       TEGRA30_AUDIOCIF_CTRL_EXPAND_SHIFT) |
+               (conf->stereo_conv <<
+                       TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_SHIFT) |
+               (conf->replicate <<
+                       TEGRA30_AUDIOCIF_CTRL_REPLICATE_SHIFT) |
+               (conf->direction <<
+                       TEGRA30_AUDIOCIF_CTRL_DIRECTION_SHIFT) |
+               (conf->truncate <<
+                       TEGRA30_AUDIOCIF_CTRL_TRUNCATE_SHIFT) |
+               (conf->mono_conv <<
+                       TEGRA30_AUDIOCIF_CTRL_MONO_CONV_SHIFT);
+
+       regmap_write(regmap, reg, value);
+}
+EXPORT_SYMBOL_GPL(tegra124_ahub_set_cif);
+
 MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
 MODULE_DESCRIPTION("Tegra30 AHUB driver");
 MODULE_LICENSE("GPL v2");
index 09766cdc45ca76abfb05109d8c7784a72d19372c..d67321d90faa1f5e828e0bca0a5ccc331539f47b 100644 (file)
 #define TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_MASK_US   0xf
 #define TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_MASK      (TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_MASK_US << TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT)
 
+#define TEGRA124_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT    24
+#define TEGRA124_AUDIOCIF_CTRL_FIFO_THRESHOLD_MASK_US  0x3f
+#define TEGRA124_AUDIOCIF_CTRL_FIFO_THRESHOLD_MASK     (TEGRA124_AUDIOCIF_CTRL_FIFO_THRESHOLD_MASK_US << TEGRA124_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT)
+
 /* Channel count minus 1 */
 #define TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT     24
 #define TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_MASK_US   7
 #define TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_MASK      (TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_MASK_US << TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT)
 
+/* Channel count minus 1 */
+#define TEGRA124_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT    20
+#define TEGRA124_AUDIOCIF_CTRL_AUDIO_CHANNELS_MASK_US  0xf
+#define TEGRA124_AUDIOCIF_CTRL_AUDIO_CHANNELS_MASK     (TEGRA124_AUDIOCIF_CTRL_AUDIO_CHANNELS_MASK_US << TEGRA124_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT)
+
 /* Channel count minus 1 */
 #define TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT    16
 #define TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_MASK_US  7
 #define TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_MASK     (TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_MASK_US << TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT)
 
+/* Channel count minus 1 */
+#define TEGRA124_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT   16
+#define TEGRA124_AUDIOCIF_CTRL_CLIENT_CHANNELS_MASK_US 0xf
+#define TEGRA124_AUDIOCIF_CTRL_CLIENT_CHANNELS_MASK    (TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_MASK_US << TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT)
+
 #define TEGRA30_AUDIOCIF_BITS_4                                0
 #define TEGRA30_AUDIOCIF_BITS_8                                1
 #define TEGRA30_AUDIOCIF_BITS_12                       2
 #define TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_CH1          (TEGRA30_AUDIOCIF_STEREO_CONV_CH1 << TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_SHIFT)
 #define TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_AVG          (TEGRA30_AUDIOCIF_STEREO_CONV_AVG << TEGRA30_AUDIOCIF_CTRL_STEREO_CONV_SHIFT)
 
-#define TEGRA30_AUDIOCIF_CTRL_REPLICATE                        3
+#define TEGRA30_AUDIOCIF_CTRL_REPLICATE_SHIFT          3
 
 #define TEGRA30_AUDIOCIF_DIRECTION_TX                  0
 #define TEGRA30_AUDIOCIF_DIRECTION_RX                  1
@@ -468,8 +482,30 @@ extern int tegra30_ahub_set_rx_cif_source(enum tegra30_ahub_rxcif rxcif,
                                          enum tegra30_ahub_txcif txcif);
 extern int tegra30_ahub_unset_rx_cif_source(enum tegra30_ahub_rxcif rxcif);
 
+struct tegra30_ahub_cif_conf {
+       unsigned int threshold;
+       unsigned int audio_channels;
+       unsigned int client_channels;
+       unsigned int audio_bits;
+       unsigned int client_bits;
+       unsigned int expand;
+       unsigned int stereo_conv;
+       unsigned int replicate;
+       unsigned int direction;
+       unsigned int truncate;
+       unsigned int mono_conv;
+};
+
+void tegra30_ahub_set_cif(struct regmap *regmap, unsigned int reg,
+                         struct tegra30_ahub_cif_conf *conf);
+void tegra124_ahub_set_cif(struct regmap *regmap, unsigned int reg,
+                          struct tegra30_ahub_cif_conf *conf);
+
 struct tegra30_ahub_soc_data {
        u32 clk_list_mask;
+       void (*set_audio_cif)(struct regmap *regmap,
+                             unsigned int reg,
+                             struct tegra30_ahub_cif_conf *conf);
        /*
         * FIXME: There are many more differences in HW, such as:
         * - More APBIF channels.
index 47565fd045057344390592bb2a8d7a5374cc2209..231a785b3921a5bd95d87914a92e7f5f2d266a63 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
@@ -179,6 +180,7 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
        struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
        unsigned int mask, val, reg;
        int ret, sample_size, srate, i2sclock, bitcnt;
+       struct tegra30_ahub_cif_conf cif_conf;
 
        if (params_channels(params) != 2)
                return -EINVAL;
@@ -217,21 +219,26 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
 
        regmap_write(i2s->regmap, TEGRA30_I2S_TIMING, val);
 
-       val = (0 << TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT) |
-             (1 << TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT) |
-             (1 << TEGRA30_AUDIOCIF_CTRL_CLIENT_CHANNELS_SHIFT) |
-             TEGRA30_AUDIOCIF_CTRL_AUDIO_BITS_16 |
-             TEGRA30_AUDIOCIF_CTRL_CLIENT_BITS_16;
+       cif_conf.threshold = 0;
+       cif_conf.audio_channels = 2;
+       cif_conf.client_channels = 2;
+       cif_conf.audio_bits = TEGRA30_AUDIOCIF_BITS_16;
+       cif_conf.client_bits = TEGRA30_AUDIOCIF_BITS_16;
+       cif_conf.expand = 0;
+       cif_conf.stereo_conv = 0;
+       cif_conf.replicate = 0;
+       cif_conf.truncate = 0;
+       cif_conf.mono_conv = 0;
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_RX;
+               cif_conf.direction = TEGRA30_AUDIOCIF_DIRECTION_RX;
                reg = TEGRA30_I2S_CIF_RX_CTRL;
        } else {
-               val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX;
+               cif_conf.direction = TEGRA30_AUDIOCIF_DIRECTION_TX;
                reg = TEGRA30_I2S_CIF_TX_CTRL;
        }
 
-       regmap_write(i2s->regmap, reg, val);
+       i2s->soc_data->set_audio_cif(i2s->regmap, reg, &cif_conf);
 
        val = (1 << TEGRA30_I2S_OFFSET_RX_DATA_OFFSET_SHIFT) |
              (1 << TEGRA30_I2S_OFFSET_TX_DATA_OFFSET_SHIFT);
@@ -369,7 +376,7 @@ static bool tegra30_i2s_wr_rd_reg(struct device *dev, unsigned int reg)
                return true;
        default:
                return false;
-       };
+       }
 }
 
 static bool tegra30_i2s_volatile_reg(struct device *dev, unsigned int reg)
@@ -382,7 +389,7 @@ static bool tegra30_i2s_volatile_reg(struct device *dev, unsigned int reg)
                return true;
        default:
                return false;
-       };
+       }
 }
 
 static const struct regmap_config tegra30_i2s_regmap_config = {
@@ -396,9 +403,24 @@ static const struct regmap_config tegra30_i2s_regmap_config = {
        .cache_type = REGCACHE_RBTREE,
 };
 
+static const struct tegra30_i2s_soc_data tegra30_i2s_config = {
+       .set_audio_cif = tegra30_ahub_set_cif,
+};
+
+static const struct tegra30_i2s_soc_data tegra124_i2s_config = {
+       .set_audio_cif = tegra124_ahub_set_cif,
+};
+
+static const struct of_device_id tegra30_i2s_of_match[] = {
+       { .compatible = "nvidia,tegra124-i2s", .data = &tegra124_i2s_config },
+       { .compatible = "nvidia,tegra30-i2s", .data = &tegra30_i2s_config },
+       {},
+};
+
 static int tegra30_i2s_platform_probe(struct platform_device *pdev)
 {
        struct tegra30_i2s *i2s;
+       const struct of_device_id *match;
        u32 cif_ids[2];
        struct resource *mem, *memregion;
        void __iomem *regs;
@@ -412,6 +434,14 @@ static int tegra30_i2s_platform_probe(struct platform_device *pdev)
        }
        dev_set_drvdata(&pdev->dev, i2s);
 
+       match = of_match_device(tegra30_i2s_of_match, &pdev->dev);
+       if (!match) {
+               dev_err(&pdev->dev, "Error: No device match found\n");
+               ret = -ENODEV;
+               goto err;
+       }
+       i2s->soc_data = (struct tegra30_i2s_soc_data *)match->data;
+
        i2s->dai = tegra30_i2s_dai_template;
        i2s->dai.name = dev_name(&pdev->dev);
 
@@ -539,11 +569,6 @@ static int tegra30_i2s_resume(struct device *dev)
 }
 #endif
 
-static const struct of_device_id tegra30_i2s_of_match[] = {
-       { .compatible = "nvidia,tegra30-i2s", },
-       {},
-};
-
 static const struct dev_pm_ops tegra30_i2s_pm_ops = {
        SET_RUNTIME_PM_OPS(tegra30_i2s_runtime_suspend,
                           tegra30_i2s_runtime_resume, NULL)
index bea23afe3b9f215f71b3305f065c4902703a74d5..4d0b0a30dbfb341c8a2a93b00485f2d3fb826f18 100644 (file)
 #define TEGRA30_I2S_LCOEF_COEF_MASK_US                 0xffff
 #define TEGRA30_I2S_LCOEF_COEF_MASK                    (TEGRA30_I2S_LCOEF_COEF_MASK_US << TEGRA30_I2S_LCOEF_COEF_SHIFT)
 
+struct tegra30_i2s_soc_data {
+       void (*set_audio_cif)(struct regmap *regmap,
+                             unsigned int reg,
+                             struct tegra30_ahub_cif_conf *conf);
+};
+
 struct tegra30_i2s {
+       const struct tegra30_i2s_soc_data *soc_data;
        struct snd_soc_dai_driver dai;
        int cif_id;
        struct clk *clk_i2s;
index d173880f290d0f762ec0151fb442e815354b8815..1be311c51a181996753c830ac77a902371c0553b 100644 (file)
@@ -182,6 +182,8 @@ int tegra_asoc_utils_init(struct tegra_asoc_utils_data *data,
                data->soc = TEGRA_ASOC_UTILS_SOC_TEGRA30;
        else if (of_machine_is_compatible("nvidia,tegra114"))
                data->soc = TEGRA_ASOC_UTILS_SOC_TEGRA114;
+       else if (of_machine_is_compatible("nvidia,tegra124"))
+               data->soc = TEGRA_ASOC_UTILS_SOC_TEGRA124;
        else {
                dev_err(data->dev, "SoC unknown to Tegra ASoC utils\n");
                return -EINVAL;
index 19fdcafed32f88eb266d34e7f21c51e5d889f729..9577121ce971247407b6d04957de548a5b3fd67d 100644 (file)
@@ -30,6 +30,7 @@ enum tegra_asoc_utils_soc {
        TEGRA_ASOC_UTILS_SOC_TEGRA20,
        TEGRA_ASOC_UTILS_SOC_TEGRA30,
        TEGRA_ASOC_UTILS_SOC_TEGRA114,
+       TEGRA_ASOC_UTILS_SOC_TEGRA124,
 };
 
 struct tegra_asoc_utils_data {
index f056f632557c1c7dc148ca7ce98338e4a03e381e..7b2d23ba69b3bf397ba3fb963feb2d3d2b5948af 100644 (file)
@@ -56,7 +56,6 @@ static const struct snd_pcm_hardware tegra_pcm_hardware = {
 static const struct snd_dmaengine_pcm_config tegra_dmaengine_pcm_config = {
        .pcm_hardware = &tegra_pcm_hardware,
        .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
-       .compat_filter_fn = NULL,
        .prealloc_buffer_size = PAGE_SIZE * 8,
 };
 
index ae6b50f9ed56a6c2696d38acee7f8f038d3c9a4c..f65fc0987cfb8cc9bdc45ce366a6c3bfa8641f55 100644 (file)
@@ -28,6 +28,7 @@
 #include "control.h"
 
 #define CNT_INTVAL 0x10000
+#define MASCHINE_BANK_SIZE 32
 
 static int control_info(struct snd_kcontrol *kcontrol,
                        struct snd_ctl_elem_info *uinfo)
@@ -105,6 +106,10 @@ static int control_put(struct snd_kcontrol *kcontrol,
                USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1))
                cmd = EP1_CMD_DIMM_LEDS;
 
+       if (cdev->chip.usb_id ==
+               USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER))
+               cmd = EP1_CMD_DIMM_LEDS;
+
        if (pos & CNT_INTVAL) {
                int i = pos & ~CNT_INTVAL;
 
@@ -121,6 +126,20 @@ static int control_put(struct snd_kcontrol *kcontrol,
                                     usb_sndbulkpipe(cdev->chip.dev, 8),
                                     cdev->ep8_out_buf, sizeof(cdev->ep8_out_buf),
                                     &actual_len, 200);
+               } else if (cdev->chip.usb_id ==
+                       USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER)) {
+
+                       int bank = 0;
+                       int offset = 0;
+
+                       if (i >= MASCHINE_BANK_SIZE) {
+                               bank = 0x1e;
+                               offset = MASCHINE_BANK_SIZE;
+                       }
+
+                       snd_usb_caiaq_send_command_bank(cdev, cmd, bank,
+                                       cdev->control_state + offset,
+                                       MASCHINE_BANK_SIZE);
                } else {
                        snd_usb_caiaq_send_command(cdev, cmd,
                                        cdev->control_state, sizeof(cdev->control_state));
@@ -490,6 +509,74 @@ static struct caiaq_controller kontrols4_controller[] = {
        { "LED: FX2: Mode",                     133 | CNT_INTVAL },
 };
 
+static struct caiaq_controller maschine_controller[] = {
+       { "LED: Pad 1",                         3  | CNT_INTVAL },
+       { "LED: Pad 2",                         2  | CNT_INTVAL },
+       { "LED: Pad 3",                         1  | CNT_INTVAL },
+       { "LED: Pad 4",                         0  | CNT_INTVAL },
+       { "LED: Pad 5",                         7  | CNT_INTVAL },
+       { "LED: Pad 6",                         6  | CNT_INTVAL },
+       { "LED: Pad 7",                         5  | CNT_INTVAL },
+       { "LED: Pad 8",                         4  | CNT_INTVAL },
+       { "LED: Pad 9",                         11 | CNT_INTVAL },
+       { "LED: Pad 10",                        10 | CNT_INTVAL },
+       { "LED: Pad 11",                        9  | CNT_INTVAL },
+       { "LED: Pad 12",                        8  | CNT_INTVAL },
+       { "LED: Pad 13",                        15 | CNT_INTVAL },
+       { "LED: Pad 14",                        14 | CNT_INTVAL },
+       { "LED: Pad 15",                        13 | CNT_INTVAL },
+       { "LED: Pad 16",                        12 | CNT_INTVAL },
+
+       { "LED: Mute",                          16 | CNT_INTVAL },
+       { "LED: Solo",                          17 | CNT_INTVAL },
+       { "LED: Select",                        18 | CNT_INTVAL },
+       { "LED: Duplicate",                     19 | CNT_INTVAL },
+       { "LED: Navigate",                      20 | CNT_INTVAL },
+       { "LED: Pad Mode",                      21 | CNT_INTVAL },
+       { "LED: Pattern",                       22 | CNT_INTVAL },
+       { "LED: Scene",                         23 | CNT_INTVAL },
+
+       { "LED: Shift",                         24 | CNT_INTVAL },
+       { "LED: Erase",                         25 | CNT_INTVAL },
+       { "LED: Grid",                          26 | CNT_INTVAL },
+       { "LED: Right Bottom",                  27 | CNT_INTVAL },
+       { "LED: Rec",                           28 | CNT_INTVAL },
+       { "LED: Play",                          29 | CNT_INTVAL },
+       { "LED: Left Bottom",                   32 | CNT_INTVAL },
+       { "LED: Restart",                       33 | CNT_INTVAL },
+
+       { "LED: Group A",                       41 | CNT_INTVAL },
+       { "LED: Group B",                       40 | CNT_INTVAL },
+       { "LED: Group C",                       37 | CNT_INTVAL },
+       { "LED: Group D",                       36 | CNT_INTVAL },
+       { "LED: Group E",                       39 | CNT_INTVAL },
+       { "LED: Group F",                       38 | CNT_INTVAL },
+       { "LED: Group G",                       35 | CNT_INTVAL },
+       { "LED: Group H",                       34 | CNT_INTVAL },
+
+       { "LED: Auto Write",                    42 | CNT_INTVAL },
+       { "LED: Snap",                          43 | CNT_INTVAL },
+       { "LED: Right Top",                     44 | CNT_INTVAL },
+       { "LED: Left Top",                      45 | CNT_INTVAL },
+       { "LED: Sampling",                      46 | CNT_INTVAL },
+       { "LED: Browse",                        47 | CNT_INTVAL },
+       { "LED: Step",                          48 | CNT_INTVAL },
+       { "LED: Control",                       49 | CNT_INTVAL },
+
+       { "LED: Top Button 1",                  57 | CNT_INTVAL },
+       { "LED: Top Button 2",                  56 | CNT_INTVAL },
+       { "LED: Top Button 3",                  55 | CNT_INTVAL },
+       { "LED: Top Button 4",                  54 | CNT_INTVAL },
+       { "LED: Top Button 5",                  53 | CNT_INTVAL },
+       { "LED: Top Button 6",                  52 | CNT_INTVAL },
+       { "LED: Top Button 7",                  51 | CNT_INTVAL },
+       { "LED: Top Button 8",                  50 | CNT_INTVAL },
+
+       { "LED: Note Repeat",                   58 | CNT_INTVAL },
+
+       { "Backlight Display",                  59 | CNT_INTVAL }
+};
+
 static int add_controls(struct caiaq_controller *c, int num,
                        struct snd_usb_caiaqdev *cdev)
 {
@@ -553,6 +640,11 @@ int snd_usb_caiaq_control_init(struct snd_usb_caiaqdev *cdev)
                ret = add_controls(kontrols4_controller,
                        ARRAY_SIZE(kontrols4_controller), cdev);
                break;
+
+       case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER):
+               ret = add_controls(maschine_controller,
+                       ARRAY_SIZE(maschine_controller), cdev);
+               break;
        }
 
        return ret;
index 1a61dd12fe38a9bb881e462f396e92f43a048e36..bc55f708a696d11a3302e87557fa8dfef15ac5f0 100644 (file)
@@ -235,6 +235,31 @@ int snd_usb_caiaq_send_command(struct snd_usb_caiaqdev *cdev,
                           cdev->ep1_out_buf, len+1, &actual_len, 200);
 }
 
+int snd_usb_caiaq_send_command_bank(struct snd_usb_caiaqdev *cdev,
+                              unsigned char command,
+                              unsigned char bank,
+                              const unsigned char *buffer,
+                              int len)
+{
+       int actual_len;
+       struct usb_device *usb_dev = cdev->chip.dev;
+
+       if (!usb_dev)
+               return -EIO;
+
+       if (len > EP1_BUFSIZE - 2)
+               len = EP1_BUFSIZE - 2;
+
+       if (buffer && len > 0)
+               memcpy(cdev->ep1_out_buf+2, buffer, len);
+
+       cdev->ep1_out_buf[0] = command;
+       cdev->ep1_out_buf[1] = bank;
+
+       return usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, 1),
+                          cdev->ep1_out_buf, len+2, &actual_len, 200);
+}
+
 int snd_usb_caiaq_set_audio_params (struct snd_usb_caiaqdev *cdev,
                                    int rate, int depth, int bpp)
 {
index ad102fac694221b89a7b0d95f49a5aa225ae8ba9..ab0f7520a99be7e6f835817d31951d19084f5ea3 100644 (file)
@@ -128,5 +128,10 @@ int snd_usb_caiaq_send_command(struct snd_usb_caiaqdev *cdev,
                               unsigned char command,
                               const unsigned char *buffer,
                               int len);
+int snd_usb_caiaq_send_command_bank(struct snd_usb_caiaqdev *cdev,
+                              unsigned char command,
+                              unsigned char bank,
+                              const unsigned char *buffer,
+                              int len);
 
 #endif /* CAIAQ_DEVICE_H */
index 64952e2d3ed1e94a12ad9e593a15298e8eaceee7..d979050e6a6afbc7daf9b65f666c2de10dbb6269 100644 (file)
@@ -79,7 +79,6 @@ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;/* Enable this card *
 /* Vendor/product IDs for this card */
 static int vid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 };
 static int pid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 };
-static int nrpacks = 8;                /* max. number of packets per urb */
 static int device_setup[SNDRV_CARDS]; /* device parameter for this card */
 static bool ignore_ctl_error;
 static bool autoclock = true;
@@ -94,8 +93,6 @@ module_param_array(vid, int, NULL, 0444);
 MODULE_PARM_DESC(vid, "Vendor ID for the USB audio device.");
 module_param_array(pid, int, NULL, 0444);
 MODULE_PARM_DESC(pid, "Product ID for the USB audio device.");
-module_param(nrpacks, int, 0644);
-MODULE_PARM_DESC(nrpacks, "Max. number of packets per URB.");
 module_param_array(device_setup, int, NULL, 0444);
 MODULE_PARM_DESC(device_setup, "Specific device setup (if needed).");
 module_param(ignore_ctl_error, bool, 0444);
@@ -349,6 +346,7 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx,
        case USB_SPEED_LOW:
        case USB_SPEED_FULL:
        case USB_SPEED_HIGH:
+       case USB_SPEED_WIRELESS:
        case USB_SPEED_SUPER:
                break;
        default:
@@ -374,7 +372,6 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx,
        chip->dev = dev;
        chip->card = card;
        chip->setup = device_setup[idx];
-       chip->nrpacks = nrpacks;
        chip->autoclock = autoclock;
        chip->probing = 1;
 
@@ -754,19 +751,4 @@ static struct usb_driver usb_audio_driver = {
        .supports_autosuspend = 1,
 };
 
-static int __init snd_usb_audio_init(void)
-{
-       if (nrpacks < 1 || nrpacks > MAX_PACKS) {
-               printk(KERN_WARNING "invalid nrpacks value.\n");
-               return -EINVAL;
-       }
-       return usb_register(&usb_audio_driver);
-}
-
-static void __exit snd_usb_audio_cleanup(void)
-{
-       usb_deregister(&usb_audio_driver);
-}
-
-module_init(snd_usb_audio_init);
-module_exit(snd_usb_audio_cleanup);
+module_usb_driver(usb_audio_driver);
index 5ecacaa90b53fc163c9383b18100bb1ce97f1dfe..9867ab866857260df9432b4378d5ba87c1d90834 100644 (file)
@@ -2,11 +2,11 @@
 #define __USBAUDIO_CARD_H
 
 #define MAX_NR_RATES   1024
-#define MAX_PACKS      20
+#define MAX_PACKS      6               /* per URB */
 #define MAX_PACKS_HS   (MAX_PACKS * 8) /* in high speed mode */
-#define MAX_URBS       8
+#define MAX_URBS       12
 #define SYNC_URBS      4       /* always four urbs for sync */
-#define MAX_QUEUE      24      /* try not to exceed this queue length, in ms */
+#define MAX_QUEUE      18      /* try not to exceed this queue length, in ms */
 
 struct audioformat {
        struct list_head list;
@@ -87,6 +87,7 @@ struct snd_usb_endpoint {
        unsigned int phase;             /* phase accumulator */
        unsigned int maxpacksize;       /* max packet size in bytes */
        unsigned int maxframesize;      /* max packet size in frames */
+       unsigned int max_urb_frames;    /* max URB size in frames */
        unsigned int curpacksize;       /* current packet size in bytes (for capture) */
        unsigned int curframesize;      /* current packet size in frames (for capture) */
        unsigned int syncmaxsize;       /* sync endpoint packet size */
@@ -95,7 +96,7 @@ struct snd_usb_endpoint {
        unsigned int syncinterval;      /* P for adaptive mode, 0 otherwise */
        unsigned char silence_value;
        unsigned int stride;
-       int iface, alt_idx;
+       int iface, altsetting;
        int skip_packets;               /* quirks for devices to ignore the first n packets
                                           in a stream */
 
@@ -116,6 +117,8 @@ struct snd_usb_substream {
        unsigned int channels_max;      /* max channels in the all audiofmts */
        unsigned int cur_rate;          /* current rate (for hw_params callback) */
        unsigned int period_bytes;      /* current period bytes (for hw_params callback) */
+       unsigned int period_frames;     /* current frames per period */
+       unsigned int buffer_periods;    /* current periods per buffer */
        unsigned int altset_idx;     /* USB data format: index of alternate setting */
        unsigned int txfr_quirk:1;      /* allow sub-frame alignment */
        unsigned int fmt_type;          /* USB audio format type (1-3) */
@@ -125,6 +128,7 @@ struct snd_usb_substream {
 
        unsigned int hwptr_done;        /* processed byte position in the buffer */
        unsigned int transfer_done;             /* processed frames since last period update */
+       unsigned int frame_limit;       /* limits number of packets in URB */
 
        /* data and sync endpoints for this stream */
        unsigned int ep_num;            /* the endpoint number */
index 93e970f2b3c0ad4d58957faed6b76ec2a1fb2991..b9ba0fcc45df10d4151bb39f8a5d6284dc8ec23e 100644 (file)
@@ -33,7 +33,6 @@
 #include "pcm.h"
 #include "quirks.h"
 
-#define EP_FLAG_ACTIVATED      0
 #define EP_FLAG_RUNNING                1
 #define EP_FLAG_STOPPING       2
 
@@ -426,9 +425,9 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
        list_for_each_entry(ep, &chip->ep_list, list) {
                if (ep->ep_num == ep_num &&
                    ep->iface == alts->desc.bInterfaceNumber &&
-                   ep->alt_idx == alts->desc.bAlternateSetting) {
+                   ep->altsetting == alts->desc.bAlternateSetting) {
                        snd_printdd(KERN_DEBUG "Re-using EP %x in iface %d,%d @%p\n",
-                                       ep_num, ep->iface, ep->alt_idx, ep);
+                                       ep_num, ep->iface, ep->altsetting, ep);
                        goto __exit_unlock;
                }
        }
@@ -447,7 +446,7 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
        ep->type = type;
        ep->ep_num = ep_num;
        ep->iface = alts->desc.bInterfaceNumber;
-       ep->alt_idx = alts->desc.bAlternateSetting;
+       ep->altsetting = alts->desc.bAlternateSetting;
        INIT_LIST_HEAD(&ep->ready_playback_urbs);
        ep_num &= USB_ENDPOINT_NUMBER_MASK;
 
@@ -574,11 +573,14 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep,
                              snd_pcm_format_t pcm_format,
                              unsigned int channels,
                              unsigned int period_bytes,
+                             unsigned int frames_per_period,
+                             unsigned int periods_per_buffer,
                              struct audioformat *fmt,
                              struct snd_usb_endpoint *sync_ep)
 {
-       unsigned int maxsize, i, urb_packs, total_packs, packs_per_ms;
-       int is_playback = usb_pipeout(ep->pipe);
+       unsigned int maxsize, minsize, packs_per_ms, max_packs_per_urb;
+       unsigned int max_packs_per_period, urbs_per_period, urb_packs;
+       unsigned int max_urbs, i;
        int frame_bits = snd_pcm_format_physical_width(pcm_format) * channels;
 
        if (pcm_format == SNDRV_PCM_FORMAT_DSD_U16_LE && fmt->dsd_dop) {
@@ -611,58 +613,67 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep,
        else
                ep->curpacksize = maxsize;
 
-       if (snd_usb_get_speed(ep->chip->dev) != USB_SPEED_FULL)
+       if (snd_usb_get_speed(ep->chip->dev) != USB_SPEED_FULL) {
                packs_per_ms = 8 >> ep->datainterval;
-       else
-               packs_per_ms = 1;
-
-       if (is_playback && !snd_usb_endpoint_implicit_feedback_sink(ep)) {
-               urb_packs = max(ep->chip->nrpacks, 1);
-               urb_packs = min(urb_packs, (unsigned int) MAX_PACKS);
+               max_packs_per_urb = MAX_PACKS_HS;
        } else {
-               urb_packs = 1;
+               packs_per_ms = 1;
+               max_packs_per_urb = MAX_PACKS;
        }
+       if (sync_ep && !snd_usb_endpoint_implicit_feedback_sink(ep))
+               max_packs_per_urb = min(max_packs_per_urb,
+                                       1U << sync_ep->syncinterval);
+       max_packs_per_urb = max(1u, max_packs_per_urb >> ep->datainterval);
 
-       urb_packs *= packs_per_ms;
+       /*
+        * Capture endpoints need to use small URBs because there's no way
+        * to tell in advance where the next period will end, and we don't
+        * want the next URB to complete much after the period ends.
+        *
+        * Playback endpoints with implicit sync much use the same parameters
+        * as their corresponding capture endpoint.
+        */
+       if (usb_pipein(ep->pipe) ||
+                       snd_usb_endpoint_implicit_feedback_sink(ep)) {
 
-       if (sync_ep && !snd_usb_endpoint_implicit_feedback_sink(ep))
-               urb_packs = min(urb_packs, 1U << sync_ep->syncinterval);
+               /* make capture URBs <= 1 ms and smaller than a period */
+               urb_packs = min(max_packs_per_urb, packs_per_ms);
+               while (urb_packs > 1 && urb_packs * maxsize >= period_bytes)
+                       urb_packs >>= 1;
+               ep->nurbs = MAX_URBS;
 
-       /* decide how many packets to be used */
-       if (is_playback && !snd_usb_endpoint_implicit_feedback_sink(ep)) {
-               unsigned int minsize, maxpacks;
+       /*
+        * Playback endpoints without implicit sync are adjusted so that
+        * a period fits as evenly as possible in the smallest number of
+        * URBs.  The total number of URBs is adjusted to the size of the
+        * ALSA buffer, subject to the MAX_URBS and MAX_QUEUE limits.
+        */
+       } else {
                /* determine how small a packet can be */
-               minsize = (ep->freqn >> (16 - ep->datainterval))
-                         * (frame_bits >> 3);
+               minsize = (ep->freqn >> (16 - ep->datainterval)) *
+                               (frame_bits >> 3);
                /* with sync from device, assume it can be 12% lower */
                if (sync_ep)
                        minsize -= minsize >> 3;
                minsize = max(minsize, 1u);
-               total_packs = (period_bytes + minsize - 1) / minsize;
-               /* we need at least two URBs for queueing */
-               if (total_packs < 2) {
-                       total_packs = 2;
-               } else {
-                       /* and we don't want too long a queue either */
-                       maxpacks = max(MAX_QUEUE * packs_per_ms, urb_packs * 2);
-                       total_packs = min(total_packs, maxpacks);
-               }
-       } else {
-               while (urb_packs > 1 && urb_packs * maxsize >= period_bytes)
-                       urb_packs >>= 1;
-               total_packs = MAX_URBS * urb_packs;
-       }
 
-       ep->nurbs = (total_packs + urb_packs - 1) / urb_packs;
-       if (ep->nurbs > MAX_URBS) {
-               /* too much... */
-               ep->nurbs = MAX_URBS;
-               total_packs = MAX_URBS * urb_packs;
-       } else if (ep->nurbs < 2) {
-               /* too little - we need at least two packets
-                * to ensure contiguous playback/capture
-                */
-               ep->nurbs = 2;
+               /* how many packets will contain an entire ALSA period? */
+               max_packs_per_period = DIV_ROUND_UP(period_bytes, minsize);
+
+               /* how many URBs will contain a period? */
+               urbs_per_period = DIV_ROUND_UP(max_packs_per_period,
+                               max_packs_per_urb);
+               /* how many packets are needed in each URB? */
+               urb_packs = DIV_ROUND_UP(max_packs_per_period, urbs_per_period);
+
+               /* limit the number of frames in a single URB */
+               ep->max_urb_frames = DIV_ROUND_UP(frames_per_period,
+                                       urbs_per_period);
+
+               /* try to use enough URBs to contain an entire ALSA buffer */
+               max_urbs = min((unsigned) MAX_URBS,
+                               MAX_QUEUE * packs_per_ms / urb_packs);
+               ep->nurbs = min(max_urbs, urbs_per_period * periods_per_buffer);
        }
 
        /* allocate and initialize data urbs */
@@ -670,8 +681,7 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep,
                struct snd_urb_ctx *u = &ep->urb[i];
                u->index = i;
                u->ep = ep;
-               u->packets = (i + 1) * total_packs / ep->nurbs
-                       - i * total_packs / ep->nurbs;
+               u->packets = urb_packs;
                u->buffer_size = maxsize * u->packets;
 
                if (fmt->fmt_type == UAC_FORMAT_TYPE_II)
@@ -703,8 +713,7 @@ out_of_memory:
 /*
  * configure a sync endpoint
  */
-static int sync_ep_set_params(struct snd_usb_endpoint *ep,
-                             struct audioformat *fmt)
+static int sync_ep_set_params(struct snd_usb_endpoint *ep)
 {
        int i;
 
@@ -748,6 +757,8 @@ out_of_memory:
  * @pcm_format: the audio fomat.
  * @channels: the number of audio channels.
  * @period_bytes: the number of bytes in one alsa period.
+ * @period_frames: the number of frames in one alsa period.
+ * @buffer_periods: the number of periods in one alsa buffer.
  * @rate: the frame rate.
  * @fmt: the USB audio format information
  * @sync_ep: the sync endpoint to use, if any
@@ -760,6 +771,8 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
                                snd_pcm_format_t pcm_format,
                                unsigned int channels,
                                unsigned int period_bytes,
+                               unsigned int period_frames,
+                               unsigned int buffer_periods,
                                unsigned int rate,
                                struct audioformat *fmt,
                                struct snd_usb_endpoint *sync_ep)
@@ -793,10 +806,11 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
        switch (ep->type) {
        case  SND_USB_ENDPOINT_TYPE_DATA:
                err = data_ep_set_params(ep, pcm_format, channels,
-                                        period_bytes, fmt, sync_ep);
+                                        period_bytes, period_frames,
+                                        buffer_periods, fmt, sync_ep);
                break;
        case  SND_USB_ENDPOINT_TYPE_SYNC:
-               err = sync_ep_set_params(ep, fmt);
+               err = sync_ep_set_params(ep);
                break;
        default:
                err = -EINVAL;
@@ -931,28 +945,21 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep)
  *
  * @ep: the endpoint to deactivate
  *
- * If the endpoint is not currently in use, this functions will select the
- * alternate interface setting 0 for the interface of this endpoint.
+ * If the endpoint is not currently in use, this functions will
+ * deactivate its associated URBs.
  *
  * In case of any active users, this functions does nothing.
- *
- * Returns an error if usb_set_interface() failed, 0 in all other
- * cases.
  */
-int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
+void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
 {
        if (!ep)
-               return -EINVAL;
-
-       deactivate_urbs(ep, true);
-       wait_clear_urbs(ep);
+               return;
 
        if (ep->use_count != 0)
-               return 0;
-
-       clear_bit(EP_FLAG_ACTIVATED, &ep->flags);
+               return;
 
-       return 0;
+       deactivate_urbs(ep, true);
+       wait_clear_urbs(ep);
 }
 
 /**
index 2287adf5ca597b65156f1dbbffca609774b84c00..1c7e8ee48abc12e3e61cbda6491f20bdb9628ce5 100644 (file)
@@ -12,6 +12,8 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
                                snd_pcm_format_t pcm_format,
                                unsigned int channels,
                                unsigned int period_bytes,
+                               unsigned int period_frames,
+                               unsigned int buffer_periods,
                                unsigned int rate,
                                struct audioformat *fmt,
                                struct snd_usb_endpoint *sync_ep);
@@ -20,7 +22,7 @@ int  snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep);
 void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep);
 void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
 int  snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
-int  snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
+void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
 void snd_usb_endpoint_free(struct list_head *head);
 
 int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep);
index 620902463c6ea2d5b9ba8eec496c33b5c8e78c2a..51ed1ac825fdca80744a1351de727bdc4fddbfbe 100644 (file)
@@ -118,6 +118,7 @@ unsigned char snd_usb_parse_datainterval(struct snd_usb_audio *chip,
 {
        switch (snd_usb_get_speed(chip->dev)) {
        case USB_SPEED_HIGH:
+       case USB_SPEED_WIRELESS:
        case USB_SPEED_SUPER:
                if (get_endpoint(alts, 0)->bInterval >= 1 &&
                    get_endpoint(alts, 0)->bInterval <= 4)
index 95558ef4a7a09c72ed52c4706348d765a40c4314..44b0ba4feab3bd1b43100feb457e82e290ea26dc 100644 (file)
@@ -1151,14 +1151,14 @@ static void check_no_speaker_on_headset(struct snd_kcontrol *kctl,
        const char *names_to_check[] = {
                "Headset", "headset", "Headphone", "headphone", NULL};
        const char **s;
-       bool found = 0;
+       bool found = false;
 
        if (strcmp("Speaker", kctl->id.name))
                return;
 
        for (s = names_to_check; *s; s++)
                if (strstr(card->shortname, *s)) {
-                       found = 1;
+                       found = true;
                        break;
                }
 
index b375d58871e7ce9f7eb0e63a3f73644984c6c5e1..ca3256d6fde3d1206ffe3fc7313305561438f611 100644 (file)
@@ -241,16 +241,17 @@ static int start_endpoints(struct snd_usb_substream *subs, bool can_sleep)
                struct snd_usb_endpoint *ep = subs->sync_endpoint;
 
                if (subs->data_endpoint->iface != subs->sync_endpoint->iface ||
-                   subs->data_endpoint->alt_idx != subs->sync_endpoint->alt_idx) {
+                   subs->data_endpoint->altsetting != subs->sync_endpoint->altsetting) {
                        err = usb_set_interface(subs->dev,
                                                subs->sync_endpoint->iface,
-                                               subs->sync_endpoint->alt_idx);
+                                               subs->sync_endpoint->altsetting);
                        if (err < 0) {
+                               clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags);
                                snd_printk(KERN_ERR
                                           "%d:%d:%d: cannot set interface (%d)\n",
                                           subs->dev->devnum,
                                           subs->sync_endpoint->iface,
-                                          subs->sync_endpoint->alt_idx, err);
+                                          subs->sync_endpoint->altsetting, err);
                                return -EIO;
                        }
                }
@@ -282,22 +283,6 @@ static void stop_endpoints(struct snd_usb_substream *subs, bool wait)
        }
 }
 
-static int deactivate_endpoints(struct snd_usb_substream *subs)
-{
-       int reta, retb;
-
-       reta = snd_usb_endpoint_deactivate(subs->sync_endpoint);
-       retb = snd_usb_endpoint_deactivate(subs->data_endpoint);
-
-       if (reta < 0)
-               return reta;
-
-       if (retb < 0)
-               return retb;
-
-       return 0;
-}
-
 static int search_roland_implicit_fb(struct usb_device *dev, int ifnum,
                                     unsigned int altsetting,
                                     struct usb_host_interface **alts,
@@ -595,6 +580,7 @@ static int configure_sync_endpoint(struct snd_usb_substream *subs)
                                                   subs->pcm_format,
                                                   subs->channels,
                                                   subs->period_bytes,
+                                                  0, 0,
                                                   subs->cur_rate,
                                                   subs->cur_audiofmt,
                                                   NULL);
@@ -631,6 +617,7 @@ static int configure_sync_endpoint(struct snd_usb_substream *subs)
                                          subs->pcm_format,
                                          sync_fp->channels,
                                          sync_period_bytes,
+                                         0, 0,
                                          subs->cur_rate,
                                          sync_fp,
                                          NULL);
@@ -653,6 +640,8 @@ static int configure_endpoint(struct snd_usb_substream *subs)
                                          subs->pcm_format,
                                          subs->channels,
                                          subs->period_bytes,
+                                         subs->period_frames,
+                                         subs->buffer_periods,
                                          subs->cur_rate,
                                          subs->cur_audiofmt,
                                          subs->sync_endpoint);
@@ -689,6 +678,8 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
 
        subs->pcm_format = params_format(hw_params);
        subs->period_bytes = params_period_bytes(hw_params);
+       subs->period_frames = params_period_size(hw_params);
+       subs->buffer_periods = params_periods(hw_params);
        subs->channels = params_channels(hw_params);
        subs->cur_rate = params_rate(hw_params);
 
@@ -730,7 +721,8 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
        down_read(&subs->stream->chip->shutdown_rwsem);
        if (!subs->stream->chip->shutdown) {
                stop_endpoints(subs, true);
-               deactivate_endpoints(subs);
+               snd_usb_endpoint_deactivate(subs->sync_endpoint);
+               snd_usb_endpoint_deactivate(subs->data_endpoint);
        }
        up_read(&subs->stream->chip->shutdown_rwsem);
        return snd_pcm_lib_free_vmalloc_buffer(substream);
@@ -1363,6 +1355,7 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
        frames = 0;
        urb->number_of_packets = 0;
        spin_lock_irqsave(&subs->lock, flags);
+       subs->frame_limit += ep->max_urb_frames;
        for (i = 0; i < ctx->packets; i++) {
                if (ctx->packet_size[i])
                        counts = ctx->packet_size[i];
@@ -1377,6 +1370,7 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
                subs->transfer_done += counts;
                if (subs->transfer_done >= runtime->period_size) {
                        subs->transfer_done -= runtime->period_size;
+                       subs->frame_limit = 0;
                        period_elapsed = 1;
                        if (subs->fmt_type == UAC_FORMAT_TYPE_II) {
                                if (subs->transfer_done > 0) {
@@ -1399,8 +1393,10 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
                                break;
                        }
                }
-               if (period_elapsed &&
-                   !snd_usb_endpoint_implicit_feedback_sink(subs->data_endpoint)) /* finish at the period boundary */
+               /* finish at the period boundary or after enough frames */
+               if ((period_elapsed ||
+                               subs->transfer_done >= subs->frame_limit) &&
+                   !snd_usb_endpoint_implicit_feedback_sink(ep))
                        break;
        }
        bytes = frames * ep->stride;
index caabe9b3af49250460d9a3b7e44e5896fea64c9a..5d2fe0530745a175c24a9034416a29477be96062 100644 (file)
@@ -55,7 +55,6 @@ struct snd_usb_audio {
        struct list_head mixer_list;    /* list of mixer interfaces */
 
        int setup;                      /* from the 'device_setup' module param */
-       int nrpacks;                    /* from the 'nrpacks' module param */
        bool autoclock;                 /* from the 'autoclock' module param */
 
        struct usb_host_interface *ctrl_intf;   /* the audio control interface */
index d0323a693ba20f4719731369f85324e9ba582096..999550bbad40e66f259d9ed2b044ed478e377d72 100644 (file)
@@ -262,7 +262,9 @@ static int usb_stream_hwdep_mmap(struct snd_hwdep *hw,
        }
 
        area->vm_ops = &usb_stream_hwdep_vm_ops;
-       area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       area->vm_flags |= VM_DONTDUMP;
+       if (!read)
+               area->vm_flags |= VM_DONTEXPAND;
        area->vm_private_data = us122l;
        atomic_inc(&us122l->mmap_count);
 out:
index 63fb5219f0f8aad7a844eb1942b5cd728be678dc..6234a51625b1b6a556f252c2fea3a150db1bdbc1 100644 (file)
@@ -299,19 +299,6 @@ static void usX2Y_error_urb_status(struct usX2Ydev *usX2Y,
        usX2Y_clients_stop(usX2Y);
 }
 
-static void usX2Y_error_sequence(struct usX2Ydev *usX2Y,
-                                struct snd_usX2Y_substream *subs, struct urb *urb)
-{
-       snd_printk(KERN_ERR
-"Sequence Error!(hcd_frame=%i ep=%i%s;wait=%i,frame=%i).\n"
-"Most probably some urb of usb-frame %i is still missing.\n"
-"Cause could be too long delays in usb-hcd interrupt handling.\n",
-                  usb_get_current_frame_number(usX2Y->dev),
-                  subs->endpoint, usb_pipein(urb->pipe) ? "in" : "out",
-                  usX2Y->wait_iso_frame, urb->start_frame, usX2Y->wait_iso_frame);
-       usX2Y_clients_stop(usX2Y);
-}
-
 static void i_usX2Y_urb_complete(struct urb *urb)
 {
        struct snd_usX2Y_substream *subs = urb->context;
@@ -328,12 +315,9 @@ static void i_usX2Y_urb_complete(struct urb *urb)
                usX2Y_error_urb_status(usX2Y, subs, urb);
                return;
        }
-       if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
-               subs->completed_urb = urb;
-       else {
-               usX2Y_error_sequence(usX2Y, subs, urb);
-               return;
-       }
+
+       subs->completed_urb = urb;
+
        {
                struct snd_usX2Y_substream *capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE],
                        *playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
index f2a1acdc4d839f22eb7fa83d63b5f35ce367c5cd..814d0e887c62e5c451c3ff7cc4b8f448c3a45007 100644 (file)
@@ -244,13 +244,8 @@ static void i_usX2Y_usbpcm_urb_complete(struct urb *urb)
                usX2Y_error_urb_status(usX2Y, subs, urb);
                return;
        }
-       if (likely((urb->start_frame & 0xFFFF) == (usX2Y->wait_iso_frame & 0xFFFF)))
-               subs->completed_urb = urb;
-       else {
-               usX2Y_error_sequence(usX2Y, subs, urb);
-               return;
-       }
 
+       subs->completed_urb = urb;
        capsubs = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE];
        capsubs2 = usX2Y->subs[SNDRV_PCM_STREAM_CAPTURE + 2];
        playbacksubs = usX2Y->subs[SNDRV_PCM_STREAM_PLAYBACK];
index 3a0ff7fb71b633df77cfd1302ce35e2bf9506142..64c043b7a43848f17a023dcf9479dbd77f96d7be 100644 (file)
@@ -770,6 +770,7 @@ check: $(OUTPUT)common-cmds.h
 install-bin: all
        $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'
        $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'
+       $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
        $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
 ifndef NO_LIBPERL
        $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
index 15130b50dfe3be2fea6a975a102283f6d33e4887..fe9b61e322a557b063ecc78eccb9a87c8de73dda 100644 (file)
@@ -2,3 +2,6 @@ ifndef NO_DWARF
 PERF_HAVE_DWARF_REGS := 1
 LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
 endif
+ifndef NO_LIBUNWIND
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind.o
+endif
diff --git a/tools/perf/arch/arm/include/perf_regs.h b/tools/perf/arch/arm/include/perf_regs.h
new file mode 100644 (file)
index 0000000..2a1cfde
--- /dev/null
@@ -0,0 +1,54 @@
+#ifndef ARCH_PERF_REGS_H
+#define ARCH_PERF_REGS_H
+
+#include <stdlib.h>
+#include "../../util/types.h"
+#include <asm/perf_regs.h>
+
+#define PERF_REGS_MASK ((1ULL << PERF_REG_ARM_MAX) - 1)
+#define PERF_REG_IP    PERF_REG_ARM_PC
+#define PERF_REG_SP    PERF_REG_ARM_SP
+
+static inline const char *perf_reg_name(int id)
+{
+       switch (id) {
+       case PERF_REG_ARM_R0:
+               return "r0";
+       case PERF_REG_ARM_R1:
+               return "r1";
+       case PERF_REG_ARM_R2:
+               return "r2";
+       case PERF_REG_ARM_R3:
+               return "r3";
+       case PERF_REG_ARM_R4:
+               return "r4";
+       case PERF_REG_ARM_R5:
+               return "r5";
+       case PERF_REG_ARM_R6:
+               return "r6";
+       case PERF_REG_ARM_R7:
+               return "r7";
+       case PERF_REG_ARM_R8:
+               return "r8";
+       case PERF_REG_ARM_R9:
+               return "r9";
+       case PERF_REG_ARM_R10:
+               return "r10";
+       case PERF_REG_ARM_FP:
+               return "fp";
+       case PERF_REG_ARM_IP:
+               return "ip";
+       case PERF_REG_ARM_SP:
+               return "sp";
+       case PERF_REG_ARM_LR:
+               return "lr";
+       case PERF_REG_ARM_PC:
+               return "pc";
+       default:
+               return NULL;
+       }
+
+       return NULL;
+}
+
+#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/arm/util/unwind.c b/tools/perf/arch/arm/util/unwind.c
new file mode 100644 (file)
index 0000000..da3dc95
--- /dev/null
@@ -0,0 +1,48 @@
+
+#include <errno.h>
+#include <libunwind.h>
+#include "perf_regs.h"
+#include "../../util/unwind.h"
+
+int unwind__arch_reg_id(int regnum)
+{
+       switch (regnum) {
+       case UNW_ARM_R0:
+               return PERF_REG_ARM_R0;
+       case UNW_ARM_R1:
+               return PERF_REG_ARM_R1;
+       case UNW_ARM_R2:
+               return PERF_REG_ARM_R2;
+       case UNW_ARM_R3:
+               return PERF_REG_ARM_R3;
+       case UNW_ARM_R4:
+               return PERF_REG_ARM_R4;
+       case UNW_ARM_R5:
+               return PERF_REG_ARM_R5;
+       case UNW_ARM_R6:
+               return PERF_REG_ARM_R6;
+       case UNW_ARM_R7:
+               return PERF_REG_ARM_R7;
+       case UNW_ARM_R8:
+               return PERF_REG_ARM_R8;
+       case UNW_ARM_R9:
+               return PERF_REG_ARM_R9;
+       case UNW_ARM_R10:
+               return PERF_REG_ARM_R10;
+       case UNW_ARM_R11:
+               return PERF_REG_ARM_FP;
+       case UNW_ARM_R12:
+               return PERF_REG_ARM_IP;
+       case UNW_ARM_R13:
+               return PERF_REG_ARM_SP;
+       case UNW_ARM_R14:
+               return PERF_REG_ARM_LR;
+       case UNW_ARM_R15:
+               return PERF_REG_ARM_PC;
+       default:
+               pr_err("unwind: invalid reg id %d\n", regnum);
+               return -EINVAL;
+       }
+
+       return -EINVAL;
+}
index f686d5ff594e6b93c6e6f732e7a1c97aa18c4e49..5098f144b92defd53e94f9f24184e3ade0672928 100644 (file)
@@ -457,6 +457,7 @@ static int __run_perf_stat(int argc, const char **argv)
                        perror("failed to prepare workload");
                        return -1;
                }
+               child_pid = evsel_list->workload.pid;
        }
 
        if (group)
index 5f6f9b3271bb0657b77206f6723fd8b3786041bd..75b93d7f786010000368e299d214717131c67fa1 100644 (file)
@@ -29,6 +29,10 @@ ifeq ($(ARCH),x86_64)
   NO_PERF_REGS := 0
   LIBUNWIND_LIBS = -lunwind -lunwind-x86_64
 endif
+ifeq ($(ARCH),arm)
+  NO_PERF_REGS := 0
+  LIBUNWIND_LIBS = -lunwind -lunwind-arm
+endif
 
 ifeq ($(NO_PERF_REGS),0)
   CFLAGS += -DHAVE_PERF_REGS
@@ -208,8 +212,7 @@ ifeq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_LIBELF),-DLIBELF_MMAP),y)
 endif # try-cc
 endif # NO_LIBELF
 
-# There's only x86 (both 32 and 64) support for CFI unwind so far
-ifneq ($(ARCH),x86)
+ifeq ($(LIBUNWIND_LIBS),)
   NO_LIBUNWIND := 1
 endif
 
@@ -223,9 +226,13 @@ endif
 
 FLAGS_UNWIND=$(LIBUNWIND_CFLAGS) $(CFLAGS) $(LIBUNWIND_LDFLAGS) $(LDFLAGS) $(EXTLIBS) $(LIBUNWIND_LIBS)
 ifneq ($(call try-cc,$(SOURCE_LIBUNWIND),$(FLAGS_UNWIND),libunwind),y)
-  msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 0.99);
+  msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 1.1);
   NO_LIBUNWIND := 1
 endif # Libunwind support
+ifneq ($(call try-cc,$(SOURCE_LIBUNWIND_DEBUG_FRAME),$(FLAGS_UNWIND),libunwind debug_frame),y)
+  msg := $(warning No debug_frame support found in libunwind);
+CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
+endif # debug_frame support in libunwind
 endif # NO_LIBUNWIND
 
 ifndef NO_LIBUNWIND
index d5a8dd44945fcc6483eabfafde3b5d882b46f6f1..028fe997d5ebcfecce4d013fa43e9c21c7f989a2 100644 (file)
@@ -185,7 +185,6 @@ extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
                                       unw_proc_info_t *pi,
                                       int need_unwind_info, void *arg);
 
-
 #define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
 
 int main(void)
@@ -197,6 +196,26 @@ int main(void)
        return 0;
 }
 endef
+
+define SOURCE_LIBUNWIND_DEBUG_FRAME
+#include <libunwind.h>
+#include <stdlib.h>
+
+extern int
+UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
+                                unw_word_t ip, unw_word_t segbase,
+                                const char *obj_name, unw_word_t start,
+                                unw_word_t end);
+
+#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
+
+int main(void)
+{
+       dwarf_find_debug_frame(0, NULL, 0, 0, NULL, 0, 0);
+       return 0;
+}
+endef
+
 endif
 
 ifndef NO_BACKTRACE
@@ -219,7 +238,7 @@ define SOURCE_LIBAUDIT
 
 int main(void)
 {
-       printf(\"error message: %s\n\", audit_errno_to_name(0));
+       printf(\"error message: %s\", audit_errno_to_name(0));
        return audit_open();
 }
 endef
index e23bde19d590872c6567c35d0df39fd5d0d68a87..7defd77105d005f9820c7ca69621e814a3c45c76 100644 (file)
@@ -426,7 +426,7 @@ static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
  * @die_mem: a buffer for result DIE
  *
  * Search a non-inlined function DIE which includes @addr. Stores the
- * DIE to @die_mem and returns it if found. Returns NULl if failed.
+ * DIE to @die_mem and returns it if found. Returns NULL if failed.
  */
 Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,
                                    Dwarf_Die *die_mem)
@@ -453,16 +453,33 @@ static int __die_find_inline_cb(Dwarf_Die *die_mem, void *data)
        return DIE_FIND_CB_CONTINUE;
 }
 
+/**
+ * die_find_top_inlinefunc - Search the top inlined function at given address
+ * @sp_die: a subprogram DIE which including @addr
+ * @addr: target address
+ * @die_mem: a buffer for result DIE
+ *
+ * Search an inlined function DIE which includes @addr. Stores the
+ * DIE to @die_mem and returns it if found. Returns NULL if failed.
+ * Even if several inlined functions are expanded recursively, this
+ * doesn't trace it down, and returns the topmost one.
+ */
+Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
+                                  Dwarf_Die *die_mem)
+{
+       return die_find_child(sp_die, __die_find_inline_cb, &addr, die_mem);
+}
+
 /**
  * die_find_inlinefunc - Search an inlined function at given address
- * @cu_die: a CU DIE which including @addr
+ * @sp_die: a subprogram DIE which including @addr
  * @addr: target address
  * @die_mem: a buffer for result DIE
  *
  * Search an inlined function DIE which includes @addr. Stores the
- * DIE to @die_mem and returns it if found. Returns NULl if failed.
+ * DIE to @die_mem and returns it if found. Returns NULL if failed.
  * If several inlined functions are expanded recursively, this trace
- * it and returns deepest one.
+ * it down and returns deepest one.
  */
 Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
                               Dwarf_Die *die_mem)
index 8658d41697d27fbbe06dbd1fbdbc3b9d895b4606..b4fe90c6cb2d4413c3ab96f91a346f3f89549932 100644 (file)
@@ -79,7 +79,11 @@ extern Dwarf_Die *die_find_child(Dwarf_Die *rt_die,
 extern Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,
                                    Dwarf_Die *die_mem);
 
-/* Search an inlined function including given address */
+/* Search the top inlined function including given address */
+extern Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
+                                         Dwarf_Die *die_mem);
+
+/* Search the deepest inlined function including given address */
 extern Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
                                      Dwarf_Die *die_mem);
 
index ce69901176d864506d39cc0df4f5dbe9479f9383..c3e5a3b817ab714497dc7f6f39520945dc886b1a 100644 (file)
@@ -2768,6 +2768,18 @@ int perf_session__read_header(struct perf_session *session)
        if (perf_file_header__read(&f_header, header, fd) < 0)
                return -EINVAL;
 
+       /*
+        * Sanity check that perf.data was written cleanly; data size is
+        * initialized to 0 and updated only if the on_exit function is run.
+        * If data size is still 0 then the file contains only partial
+        * information.  Just warn user and process it as much as it can.
+        */
+       if (f_header.data.size == 0) {
+               pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
+                          "Was the 'perf record' command properly terminated?\n",
+                          session->filename);
+       }
+
        nr_attrs = f_header.attrs.size / f_header.attr_size;
        lseek(fd, f_header.attrs.offset, SEEK_SET);
 
index 371476cb8ddc17a6643a6298ba887f8c6aa75bcb..c09e0a9fdf4cda118be077fbee1fb382dcc3d5ee 100644 (file)
@@ -1327,8 +1327,8 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr,
                                struct perf_probe_point *ppt)
 {
        Dwarf_Die cudie, spdie, indie;
-       Dwarf_Addr _addr, baseaddr;
-       const char *fname = NULL, *func = NULL, *tmp;
+       Dwarf_Addr _addr = 0, baseaddr = 0;
+       const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
        int baseline = 0, lineno = 0, ret = 0;
 
        /* Adjust address with bias */
@@ -1349,27 +1349,36 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr,
        /* Find a corresponding function (name, baseline and baseaddr) */
        if (die_find_realfunc(&cudie, (Dwarf_Addr)addr, &spdie)) {
                /* Get function entry information */
-               tmp = dwarf_diename(&spdie);
-               if (!tmp ||
+               func = basefunc = dwarf_diename(&spdie);
+               if (!func ||
                    dwarf_entrypc(&spdie, &baseaddr) != 0 ||
-                   dwarf_decl_line(&spdie, &baseline) != 0)
+                   dwarf_decl_line(&spdie, &baseline) != 0) {
+                       lineno = 0;
                        goto post;
-               func = tmp;
+               }
 
-               if (addr == (unsigned long)baseaddr)
+               if (addr == (unsigned long)baseaddr) {
                        /* Function entry - Relative line number is 0 */
                        lineno = baseline;
-               else if (die_find_inlinefunc(&spdie, (Dwarf_Addr)addr,
-                                            &indie)) {
+                       fname = dwarf_decl_file(&spdie);
+                       goto post;
+               }
+
+               /* Track down the inline functions step by step */
+               while (die_find_top_inlinefunc(&spdie, (Dwarf_Addr)addr,
+                                               &indie)) {
+                       /* There is an inline function */
                        if (dwarf_entrypc(&indie, &_addr) == 0 &&
-                           _addr == addr)
+                           _addr == addr) {
                                /*
                                 * addr is at an inline function entry.
                                 * In this case, lineno should be the call-site
-                                * line number.
+                                * line number. (overwrite lineinfo)
                                 */
                                lineno = die_get_call_lineno(&indie);
-                       else {
+                               fname = die_get_call_file(&indie);
+                               break;
+                       } else {
                                /*
                                 * addr is in an inline function body.
                                 * Since lineno points one of the lines
@@ -1377,19 +1386,27 @@ int debuginfo__find_probe_point(struct debuginfo *self, unsigned long addr,
                                 * be the entry line of the inline function.
                                 */
                                tmp = dwarf_diename(&indie);
-                               if (tmp &&
-                                   dwarf_decl_line(&spdie, &baseline) == 0)
-                                       func = tmp;
+                               if (!tmp ||
+                                   dwarf_decl_line(&indie, &baseline) != 0)
+                                       break;
+                               func = tmp;
+                               spdie = indie;
                        }
                }
+               /* Verify the lineno and baseline are in a same file */
+               tmp = dwarf_decl_file(&spdie);
+               if (!tmp || strcmp(tmp, fname) != 0)
+                       lineno = 0;
        }
 
 post:
        /* Make a relative line number or an offset */
        if (lineno)
                ppt->line = lineno - baseline;
-       else if (func)
+       else if (basefunc) {
                ppt->offset = addr - (unsigned long)baseaddr;
+               func = basefunc;
+       }
 
        /* Duplicate strings */
        if (func) {
index 70ffa41518f34a1bbfaa82e67c46a8a22b133f67..568b750c01f60b3d81cda29966ed40534a7bc8e1 100644 (file)
@@ -256,6 +256,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
                tool->sample = process_event_sample_stub;
        if (tool->mmap == NULL)
                tool->mmap = process_event_stub;
+       if (tool->mmap2 == NULL)
+               tool->mmap2 = process_event_stub;
        if (tool->comm == NULL)
                tool->comm = process_event_stub;
        if (tool->fork == NULL)
@@ -1310,7 +1312,7 @@ int __perf_session__process_events(struct perf_session *session,
        file_offset = page_offset;
        head = data_offset - page_offset;
 
-       if (data_offset + data_size < file_size)
+       if (data_size && (data_offset + data_size < file_size))
                file_size = data_offset + data_size;
 
        progress_next = file_size / 16;
index 2f891f7e70bf9251c849780ec008ded14a2776c9..5390d0b8862a680e147cf52dd8bed22cfbba333f 100644 (file)
@@ -39,6 +39,15 @@ UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
 
 #define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
 
+extern int
+UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
+                                unw_word_t ip,
+                                unw_word_t segbase,
+                                const char *obj_name, unw_word_t start,
+                                unw_word_t end);
+
+#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
+
 #define DW_EH_PE_FORMAT_MASK   0x0f    /* format of the encoded value */
 #define DW_EH_PE_APPL_MASK     0x70    /* how the value is to be applied */
 
@@ -245,8 +254,9 @@ static int unwind_spec_ehframe(struct dso *dso, struct machine *machine,
        return 0;
 }
 
-static int read_unwind_spec(struct dso *dso, struct machine *machine,
-                           u64 *table_data, u64 *segbase, u64 *fde_count)
+static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
+                                    u64 *table_data, u64 *segbase,
+                                    u64 *fde_count)
 {
        int ret = -EINVAL, fd;
        u64 offset;
@@ -255,6 +265,7 @@ static int read_unwind_spec(struct dso *dso, struct machine *machine,
        if (fd < 0)
                return -EINVAL;
 
+       /* Check the .eh_frame section for unwinding info */
        offset = elf_section_offset(fd, ".eh_frame_hdr");
        close(fd);
 
@@ -263,10 +274,29 @@ static int read_unwind_spec(struct dso *dso, struct machine *machine,
                                          table_data, segbase,
                                          fde_count);
 
-       /* TODO .debug_frame check if eh_frame_hdr fails */
        return ret;
 }
 
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+static int read_unwind_spec_debug_frame(struct dso *dso,
+                                       struct machine *machine, u64 *offset)
+{
+       int fd = dso__data_fd(dso, machine);
+
+       if (fd < 0)
+               return -EINVAL;
+
+       /* Check the .debug_frame section for unwinding info */
+       *offset = elf_section_offset(fd, ".debug_frame");
+       close(fd);
+
+       if (*offset)
+               return 0;
+
+       return -EINVAL;
+}
+#endif
+
 static struct map *find_map(unw_word_t ip, struct unwind_info *ui)
 {
        struct addr_location al;
@@ -291,20 +321,33 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
 
        pr_debug("unwind: find_proc_info dso %s\n", map->dso->name);
 
-       if (read_unwind_spec(map->dso, ui->machine,
-                            &table_data, &segbase, &fde_count))
-               return -EINVAL;
+       /* Check the .eh_frame section for unwinding info */
+       if (!read_unwind_spec_eh_frame(map->dso, ui->machine,
+                                      &table_data, &segbase, &fde_count)) {
+               memset(&di, 0, sizeof(di));
+               di.format   = UNW_INFO_FORMAT_REMOTE_TABLE;
+               di.start_ip = map->start;
+               di.end_ip   = map->end;
+               di.u.rti.segbase    = map->start + segbase;
+               di.u.rti.table_data = map->start + table_data;
+               di.u.rti.table_len  = fde_count * sizeof(struct table_entry)
+                                     / sizeof(unw_word_t);
+               return dwarf_search_unwind_table(as, ip, &di, pi,
+                                                need_unwind_info, arg);
+       }
+
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+       /* Check the .debug_frame section for unwinding info */
+       if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) {
+               memset(&di, 0, sizeof(di));
+               dwarf_find_debug_frame(0, &di, ip, 0, map->dso->name,
+                                      map->start, map->end);
+               return dwarf_search_unwind_table(as, ip, &di, pi,
+                                                need_unwind_info, arg);
+       }
+#endif
 
-       memset(&di, 0, sizeof(di));
-       di.format   = UNW_INFO_FORMAT_REMOTE_TABLE;
-       di.start_ip = map->start;
-       di.end_ip   = map->end;
-       di.u.rti.segbase    = map->start + segbase;
-       di.u.rti.table_data = map->start + table_data;
-       di.u.rti.table_len  = fde_count * sizeof(struct table_entry)
-                             / sizeof(unw_word_t);
-       return dwarf_search_unwind_table(as, ip, &di, pi,
-                                        need_unwind_info, arg);
+       return -EINVAL;
 }
 
 static int access_fpreg(unw_addr_space_t __maybe_unused as,
index fe702076ca46cc2d3d02bab818446c9d15f8c392..2bb8bf506681b4d982546dd9d14b005df815a5fc 100644 (file)
@@ -2,7 +2,7 @@
  * turbostat -- show CPU frequency and C-state residency
  * on modern Intel turbo-capable processors.
  *
- * Copyright (c) 2012 Intel Corporation.
+ * Copyright (c) 2013 Intel Corporation.
  * Len Brown <len.brown@intel.com>
  *
  * This program is free software; you can redistribute it and/or modify it
@@ -47,6 +47,8 @@ unsigned int skip_c1;
 unsigned int do_nhm_cstates;
 unsigned int do_snb_cstates;
 unsigned int do_c8_c9_c10;
+unsigned int do_slm_cstates;
+unsigned int use_c1_residency_msr;
 unsigned int has_aperf;
 unsigned int has_epb;
 unsigned int units = 1000000000;       /* Ghz etc */
@@ -81,6 +83,8 @@ double rapl_joule_counter_range;
 #define RAPL_DRAM      (1 << 3)
 #define RAPL_PKG_PERF_STATUS   (1 << 4)
 #define RAPL_DRAM_PERF_STATUS  (1 << 5)
+#define RAPL_PKG_POWER_INFO    (1 << 6)
+#define RAPL_CORE_POLICY       (1 << 7)
 #define        TJMAX_DEFAULT   100
 
 #define MAX(a, b) ((a) > (b) ? (a) : (b))
@@ -96,7 +100,7 @@ struct thread_data {
        unsigned long long tsc;
        unsigned long long aperf;
        unsigned long long mperf;
-       unsigned long long c1;  /* derived */
+       unsigned long long c1;
        unsigned long long extra_msr64;
        unsigned long long extra_delta64;
        unsigned long long extra_msr32;
@@ -266,7 +270,7 @@ void print_header(void)
                outp += sprintf(outp, "           MSR 0x%03X", extra_msr_offset64);
        if (do_nhm_cstates)
                outp += sprintf(outp, "    %%c1");
-       if (do_nhm_cstates)
+       if (do_nhm_cstates && !do_slm_cstates)
                outp += sprintf(outp, "    %%c3");
        if (do_nhm_cstates)
                outp += sprintf(outp, "    %%c6");
@@ -280,9 +284,9 @@ void print_header(void)
 
        if (do_snb_cstates)
                outp += sprintf(outp, "   %%pc2");
-       if (do_nhm_cstates)
+       if (do_nhm_cstates && !do_slm_cstates)
                outp += sprintf(outp, "   %%pc3");
-       if (do_nhm_cstates)
+       if (do_nhm_cstates && !do_slm_cstates)
                outp += sprintf(outp, "   %%pc6");
        if (do_snb_cstates)
                outp += sprintf(outp, "   %%pc7");
@@ -480,7 +484,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
                goto done;
 
-       if (do_nhm_cstates)
+       if (do_nhm_cstates && !do_slm_cstates)
                outp += sprintf(outp, " %6.2f", 100.0 * c->c3/t->tsc);
        if (do_nhm_cstates)
                outp += sprintf(outp, " %6.2f", 100.0 * c->c6/t->tsc);
@@ -499,9 +503,9 @@ int format_counters(struct thread_data *t, struct core_data *c,
 
        if (do_snb_cstates)
                outp += sprintf(outp, " %6.2f", 100.0 * p->pc2/t->tsc);
-       if (do_nhm_cstates)
+       if (do_nhm_cstates && !do_slm_cstates)
                outp += sprintf(outp, " %6.2f", 100.0 * p->pc3/t->tsc);
-       if (do_nhm_cstates)
+       if (do_nhm_cstates && !do_slm_cstates)
                outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc);
        if (do_snb_cstates)
                outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc);
@@ -648,17 +652,24 @@ delta_thread(struct thread_data *new, struct thread_data *old,
        }
 
 
-       /*
-        * As counter collection is not atomic,
-        * it is possible for mperf's non-halted cycles + idle states
-        * to exceed TSC's all cycles: show c1 = 0% in that case.
-        */
-       if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
-               old->c1 = 0;
-       else {
-               /* normal case, derive c1 */
-               old->c1 = old->tsc - old->mperf - core_delta->c3
+       if (use_c1_residency_msr) {
+               /*
+                * Some models have a dedicated C1 residency MSR,
+                * which should be more accurate than the derivation below.
+                */
+       } else {
+               /*
+                * As counter collection is not atomic,
+                * it is possible for mperf's non-halted cycles + idle states
+                * to exceed TSC's all cycles: show c1 = 0% in that case.
+                */
+               if ((old->mperf + core_delta->c3 + core_delta->c6 + core_delta->c7) > old->tsc)
+                       old->c1 = 0;
+               else {
+                       /* normal case, derive c1 */
+                       old->c1 = old->tsc - old->mperf - core_delta->c3
                                - core_delta->c6 - core_delta->c7;
+               }
        }
 
        if (old->mperf == 0) {
@@ -872,13 +883,21 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                if (get_msr(cpu, extra_msr_offset64, &t->extra_msr64))
                        return -5;
 
+       if (use_c1_residency_msr) {
+               if (get_msr(cpu, MSR_CORE_C1_RES, &t->c1))
+                       return -6;
+       }
+
        /* collect core counters only for 1st thread in core */
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
                return 0;
 
-       if (do_nhm_cstates) {
+       if (do_nhm_cstates && !do_slm_cstates) {
                if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
                        return -6;
+       }
+
+       if (do_nhm_cstates) {
                if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
                        return -7;
        }
@@ -898,7 +917,7 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
        if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
                return 0;
 
-       if (do_nhm_cstates) {
+       if (do_nhm_cstates && !do_slm_cstates) {
                if (get_msr(cpu, MSR_PKG_C3_RESIDENCY, &p->pc3))
                        return -9;
                if (get_msr(cpu, MSR_PKG_C6_RESIDENCY, &p->pc6))
@@ -1046,25 +1065,28 @@ print_nhm_turbo_ratio_limits:
 
        switch(msr & 0x7) {
        case 0:
-               fprintf(stderr, "pc0");
+               fprintf(stderr, do_slm_cstates ? "no pkg states" : "pc0");
                break;
        case 1:
-               fprintf(stderr, do_snb_cstates ? "pc2" : "pc0");
+               fprintf(stderr, do_slm_cstates ? "no pkg states" : do_snb_cstates ? "pc2" : "pc0");
                break;
        case 2:
-               fprintf(stderr, do_snb_cstates ? "pc6-noret" : "pc3");
+               fprintf(stderr, do_slm_cstates ? "invalid" : do_snb_cstates ? "pc6-noret" : "pc3");
                break;
        case 3:
-               fprintf(stderr, "pc6");
+               fprintf(stderr, do_slm_cstates ? "invalid" : "pc6");
                break;
        case 4:
-               fprintf(stderr, "pc7");
+               fprintf(stderr, do_slm_cstates ? "pc4" : "pc7");
                break;
        case 5:
-               fprintf(stderr, do_snb_cstates ? "pc7s" : "invalid");
+               fprintf(stderr, do_slm_cstates ? "invalid" : do_snb_cstates ? "pc7s" : "invalid");
+               break;
+       case 6:
+               fprintf(stderr, do_slm_cstates ? "pc6" : "invalid");
                break;
        case 7:
-               fprintf(stderr, "unlimited");
+               fprintf(stderr, do_slm_cstates ? "pc7" : "unlimited");
                break;
        default:
                fprintf(stderr, "invalid");
@@ -1460,6 +1482,7 @@ int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
        case 0x3F:      /* HSW */
        case 0x45:      /* HSW */
        case 0x46:      /* HSW */
+       case 0x4D:      /* AVN */
                return 1;
        case 0x2E:      /* Nehalem-EX Xeon - Beckton */
        case 0x2F:      /* Westmere-EX Xeon - Eagleton */
@@ -1555,11 +1578,14 @@ void rapl_probe(unsigned int family, unsigned int model)
        case 0x3F:      /* HSW */
        case 0x45:      /* HSW */
        case 0x46:      /* HSW */
-               do_rapl = RAPL_PKG | RAPL_CORES | RAPL_GFX;
+               do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
                break;
        case 0x2D:
        case 0x3E:
-               do_rapl = RAPL_PKG | RAPL_CORES | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS;
+               do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_PKG_PERF_STATUS | RAPL_DRAM_PERF_STATUS | RAPL_PKG_POWER_INFO;
+               break;
+       case 0x4D:      /* AVN */
+               do_rapl = RAPL_PKG | RAPL_CORES ;
                break;
        default:
                return;
@@ -1573,17 +1599,18 @@ void rapl_probe(unsigned int family, unsigned int model)
        rapl_energy_units = 1.0 / (1 << (msr >> 8 & 0x1F));
        rapl_time_units = 1.0 / (1 << (msr >> 16 & 0xF));
 
-       /* get TDP to determine energy counter range */
-       if (get_msr(0, MSR_PKG_POWER_INFO, &msr))
-               return;
+       if (do_rapl & RAPL_PKG_POWER_INFO) {
+               /* get TDP to determine energy counter range */
+               if (get_msr(0, MSR_PKG_POWER_INFO, &msr))
+                       return;
 
-       tdp = ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
+               tdp = ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
 
-       rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
-
-       if (verbose)
-               fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range\n", rapl_joule_counter_range);
+               rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
 
+               if (verbose)
+                       fprintf(stderr, "RAPL: %.0f sec. Joule Counter Range\n", rapl_joule_counter_range);
+       }
        return;
 }
 
@@ -1702,7 +1729,8 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                        "(%f Watts, %f Joules, %f sec.)\n", cpu, msr,
                        local_rapl_power_units, local_rapl_energy_units, local_rapl_time_units);
        }
-       if (do_rapl & RAPL_PKG) {
+       if (do_rapl & RAPL_PKG_POWER_INFO) {
+
                if (get_msr(cpu, MSR_PKG_POWER_INFO, &msr))
                        return -5;
 
@@ -1714,6 +1742,9 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                        ((msr >> 32) & RAPL_POWER_GRANULARITY) * rapl_power_units,
                        ((msr >> 48) & RAPL_TIME_GRANULARITY) * rapl_time_units);
 
+       }
+       if (do_rapl & RAPL_PKG) {
+
                if (get_msr(cpu, MSR_PKG_POWER_LIMIT, &msr))
                        return -9;
 
@@ -1749,12 +1780,16 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
 
                print_power_limit_msr(cpu, msr, "DRAM Limit");
        }
-       if (do_rapl & RAPL_CORES) {
+       if (do_rapl & RAPL_CORE_POLICY) {
                if (verbose) {
                        if (get_msr(cpu, MSR_PP0_POLICY, &msr))
                                return -7;
 
                        fprintf(stderr, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu, msr & 0xF);
+               }
+       }
+       if (do_rapl & RAPL_CORES) {
+               if (verbose) {
 
                        if (get_msr(cpu, MSR_PP0_POWER_LIMIT, &msr))
                                return -9;
@@ -1813,10 +1848,47 @@ int has_c8_c9_c10(unsigned int family, unsigned int model)
 }
 
 
+int is_slm(unsigned int family, unsigned int model)
+{
+       if (!genuine_intel)
+               return 0;
+       switch (model) {
+       case 0x4D:      /* AVN */
+               return 1;
+       }
+       return 0;
+}
+
+#define SLM_BCLK_FREQS 5
+double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
+
+double slm_bclk(void)
+{
+       unsigned long long msr = 3;
+       unsigned int i;
+       double freq;
+
+       if (get_msr(0, MSR_FSB_FREQ, &msr))
+               fprintf(stderr, "SLM BCLK: unknown\n");
+
+       i = msr & 0xf;
+       if (i >= SLM_BCLK_FREQS) {
+               fprintf(stderr, "SLM BCLK[%d] invalid\n", i);
+               msr = 3;
+       }
+       freq = slm_freq_table[i];
+
+       fprintf(stderr, "SLM BCLK: %.1f Mhz\n", freq);
+
+       return freq;
+}
+
 double discover_bclk(unsigned int family, unsigned int model)
 {
        if (is_snb(family, model))
                return 100.00;
+       else if (is_slm(family, model))
+               return slm_bclk();
        else
                return 133.33;
 }
@@ -1873,7 +1945,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
                fprintf(stderr, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
                        cpu, msr, target_c_local);
 
-       if (target_c_local < 85 || target_c_local > 120)
+       if (target_c_local < 85 || target_c_local > 127)
                goto guess;
 
        tcc_activation_temp = target_c_local;
@@ -1970,6 +2042,7 @@ void check_cpuid()
        do_smi = do_nhm_cstates;
        do_snb_cstates = is_snb(family, model);
        do_c8_c9_c10 = has_c8_c9_c10(family, model);
+       do_slm_cstates = is_slm(family, model);
        bclk = discover_bclk(family, model);
 
        do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
@@ -2331,7 +2404,7 @@ int main(int argc, char **argv)
        cmdline(argc, argv);
 
        if (verbose)
-               fprintf(stderr, "turbostat v3.4 April 17, 2013"
+               fprintf(stderr, "turbostat v3.5 April 26, 2013"
                        " - Len Brown <lenb@kernel.org>\n");
 
        turbostat_init();
index 46736604c26cda69fb7dc179dbce884c9cf849cc..a1203148dfa18990b1dc30af5db3c09e1e0b7012 100644 (file)
@@ -133,12 +133,6 @@ CROSS = frv-linux
 ARCH = frv
 GCC_VER = 4.5.1
 
-# h8300 - failed make defconfig??
-TEST_START IF ${RUN} == h8300 || ${DO_FAILED}
-CROSS = h8300-elf
-ARCH = h8300
-GCC_VER = 4.5.1
-
 # m68k fails with error?
 TEST_START IF ${RUN} == m68k || ${DO_DEFAULT}
 CROSS = m68k-linux
index 4fa655d68a81c8a1b8dd8b60f885721edc7cfea1..41bd85559d4b01aa8ee0e89615ece47c4115b0d0 100644 (file)
@@ -151,7 +151,7 @@ static int check_timer_create(int which)
        fflush(stdout);
 
        done = 0;
-       timer_create(which, NULL, &id);
+       err = timer_create(which, NULL, &id);
        if (err < 0) {
                perror("Can't create timer\n");
                return -1;